summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.mailmap8
-rw-r--r--Documentation/devicetree/bindings/display/msm/dp-controller.yaml2
-rw-r--r--Documentation/devicetree/bindings/display/msm/edp.txt56
-rw-r--r--MAINTAINERS1
-rw-r--r--drivers/gpu/drm/msm/Kconfig2
-rw-r--r--drivers/gpu/drm/msm/Makefile12
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx_gpu.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c3
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_debugfs.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c16
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c25
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c35
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h10
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c99
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c10
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.h11
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c4
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c5
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c11
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c4
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h3
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c17
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h18
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c2
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c304
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c19
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c24
-rw-r--r--drivers/gpu/drm/msm/dp/dp_aux.c17
-rw-r--r--drivers/gpu/drm/msm/dp/dp_ctrl.c23
-rw-r--r--drivers/gpu/drm/msm/dp/dp_display.c15
-rw-r--r--drivers/gpu/drm/msm/dp/dp_hpd.c2
-rw-r--r--drivers/gpu/drm/msm/dp/dp_hpd.h2
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h4
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c49
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c14
-rw-r--r--drivers/gpu/drm/msm/dsi/phy/dsi_phy.c11
-rw-r--r--drivers/gpu/drm/msm/edp/edp.c198
-rw-r--r--drivers/gpu/drm/msm/edp/edp.h77
-rw-r--r--drivers/gpu/drm/msm/edp/edp.xml.h388
-rw-r--r--drivers/gpu/drm/msm/edp/edp_aux.c265
-rw-r--r--drivers/gpu/drm/msm/edp/edp_bridge.c111
-rw-r--r--drivers/gpu/drm/msm/edp/edp_connector.c132
-rw-r--r--drivers/gpu/drm/msm/edp/edp_ctrl.c1373
-rw-r--r--drivers/gpu/drm/msm/edp/edp_phy.c98
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c53
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c64
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h23
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c13
-rw-r--r--drivers/gpu/drm/msm/msm_fence.h12
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c5
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c2
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c22
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h69
-rw-r--r--drivers/gpu/drm/msm/msm_gpu_devfreq.c100
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h3
-rw-r--r--drivers/gpu/drm/msm/msm_perf.c9
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c16
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c4
-rw-r--r--include/drm/drm_dp_helper.h7
66 files changed, 685 insertions, 3235 deletions
diff --git a/.mailmap b/.mailmap
index 6277bb27b4bf..6d6e144cc3c3 100644
--- a/.mailmap
+++ b/.mailmap
@@ -10,10 +10,12 @@
# Please keep this list dictionary sorted.
#
Aaron Durbin <adurbin@google.com>
+Abhinav Kumar <quic_abhinavk@quicinc.com> <abhinavk@codeaurora.org>
Adam Oldham <oldhamca@gmail.com>
Adam Radford <aradford@gmail.com>
Adriana Reus <adi.reus@gmail.com> <adriana.reus@intel.com>
Adrian Bunk <bunk@stusta.de>
+Akhil P Oommen <quic_akhilpo@quicinc.com> <akhilpo@codeaurora.org>
Alan Cox <alan@lxorguk.ukuu.org.uk>
Alan Cox <root@hraefn.swansea.linux.org.uk>
Aleksandar Markovic <aleksandar.markovic@mips.com> <aleksandar.markovic@imgtec.com>
@@ -170,6 +172,7 @@ Jeff Layton <jlayton@kernel.org> <jlayton@redhat.com>
Jens Axboe <axboe@suse.de>
Jens Osterkamp <Jens.Osterkamp@de.ibm.com>
Jernej Skrabec <jernej.skrabec@gmail.com> <jernej.skrabec@siol.net>
+Jessica Zhang <quic_jesszhan@quicinc.com> <jesszhan@codeaurora.org>
Jiri Slaby <jirislaby@kernel.org> <jirislaby@gmail.com>
Jiri Slaby <jirislaby@kernel.org> <jslaby@novell.com>
Jiri Slaby <jirislaby@kernel.org> <jslaby@suse.com>
@@ -189,6 +192,7 @@ Juha Yrjola <at solidboot.com>
Juha Yrjola <juha.yrjola@nokia.com>
Juha Yrjola <juha.yrjola@solidboot.com>
Julien Thierry <julien.thierry.kdev@gmail.com> <julien.thierry@arm.com>
+Kalyan Thota <quic_kalyant@quicinc.com> <kalyan_t@codeaurora.org>
Kay Sievers <kay.sievers@vrfy.org>
Kees Cook <keescook@chromium.org> <kees.cook@canonical.com>
Kees Cook <keescook@chromium.org> <keescook@google.com>
@@ -200,9 +204,11 @@ Kenneth W Chen <kenneth.w.chen@intel.com>
Konstantin Khlebnikov <koct9i@gmail.com> <khlebnikov@yandex-team.ru>
Konstantin Khlebnikov <koct9i@gmail.com> <k.khlebnikov@samsung.com>
Koushik <raghavendra.koushik@neterion.com>
+Krishna Manikandan <quic_mkrishn@quicinc.com> <mkrishn@codeaurora.org>
Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski.k@gmail.com>
Krzysztof Kozlowski <krzk@kernel.org> <k.kozlowski@samsung.com>
Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
+Kuogee Hsieh <quic_khsieh@quicinc.com> <khsieh@codeaurora.org>
Leonardo Bras <leobras.c@gmail.com> <leonardo@linux.ibm.com>
Leonid I Ananiev <leonid.i.ananiev@intel.com>
Leon Romanovsky <leon@kernel.org> <leon@leon.nu>
@@ -309,6 +315,7 @@ Qais Yousef <qsyousef@gmail.com> <qais.yousef@imgtec.com>
Quentin Monnet <quentin@isovalent.com> <quentin.monnet@netronome.com>
Quentin Perret <qperret@qperret.net> <quentin.perret@arm.com>
Rafael J. Wysocki <rjw@rjwysocki.net> <rjw@sisk.pl>
+Rajeev Nandan <quic_rajeevny@quicinc.com> <rajeevny@codeaurora.org>
Rajesh Shah <rajesh.shah@intel.com>
Ralf Baechle <ralf@linux-mips.org>
Ralf Wildenhues <Ralf.Wildenhues@gmx.de>
@@ -323,6 +330,7 @@ Rui Saraiva <rmps@joel.ist.utl.pt>
Sachin P Sant <ssant@in.ibm.com>
Sakari Ailus <sakari.ailus@linux.intel.com> <sakari.ailus@iki.fi>
Sam Ravnborg <sam@mars.ravnborg.org>
+Sankeerth Billakanti <quic_sbillaka@quicinc.com> <sbillaka@codeaurora.org>
Santosh Shilimkar <santosh.shilimkar@oracle.org>
Santosh Shilimkar <ssantosh@kernel.org>
Sarangdhar Joshi <spjoshi@codeaurora.org>
diff --git a/Documentation/devicetree/bindings/display/msm/dp-controller.yaml b/Documentation/devicetree/bindings/display/msm/dp-controller.yaml
index 63e585f48789..5457612ab136 100644
--- a/Documentation/devicetree/bindings/display/msm/dp-controller.yaml
+++ b/Documentation/devicetree/bindings/display/msm/dp-controller.yaml
@@ -17,6 +17,8 @@ properties:
compatible:
enum:
- qcom,sc7180-dp
+ - qcom,sc7280-dp
+ - qcom,sc7280-edp
- qcom,sc8180x-dp
- qcom,sc8180x-edp
diff --git a/Documentation/devicetree/bindings/display/msm/edp.txt b/Documentation/devicetree/bindings/display/msm/edp.txt
deleted file mode 100644
index eff9daff418c..000000000000
--- a/Documentation/devicetree/bindings/display/msm/edp.txt
+++ /dev/null
@@ -1,56 +0,0 @@
-Qualcomm Technologies Inc. adreno/snapdragon eDP output
-
-Required properties:
-- compatible:
- * "qcom,mdss-edp"
-- reg: Physical base address and length of the registers of controller and PLL
-- reg-names: The names of register regions. The following regions are required:
- * "edp"
- * "pll_base"
-- interrupts: The interrupt signal from the eDP block.
-- power-domains: Should be <&mmcc MDSS_GDSC>.
-- clocks: device clocks
- See Documentation/devicetree/bindings/clock/clock-bindings.txt for details.
-- clock-names: the following clocks are required:
- * "core"
- * "iface"
- * "mdp_core"
- * "pixel"
- * "link"
-- #clock-cells: The value should be 1.
-- vdda-supply: phandle to vdda regulator device node
-- lvl-vdd-supply: phandle to regulator device node which is used to supply power
- to HPD receiving chip
-- panel-en-gpios: GPIO pin to supply power to panel.
-- panel-hpd-gpios: GPIO pin used for eDP hpd.
-
-
-Example:
- mdss_edp: qcom,mdss_edp@fd923400 {
- compatible = "qcom,mdss-edp";
- reg-names =
- "edp",
- "pll_base";
- reg = <0xfd923400 0x700>,
- <0xfd923a00 0xd4>;
- interrupt-parent = <&mdss_mdp>;
- interrupts = <12 0>;
- power-domains = <&mmcc MDSS_GDSC>;
- clock-names =
- "core",
- "pixel",
- "iface",
- "link",
- "mdp_core";
- clocks =
- <&mmcc MDSS_EDPAUX_CLK>,
- <&mmcc MDSS_EDPPIXEL_CLK>,
- <&mmcc MDSS_AHB_CLK>,
- <&mmcc MDSS_EDPLINK_CLK>,
- <&mmcc MDSS_MDP_CLK>;
- #clock-cells = <1>;
- vdda-supply = <&pma8084_l12>;
- lvl-vdd-supply = <&lvl_vreg>;
- panel-en-gpios = <&tlmm 137 0>;
- panel-hpd-gpios = <&tlmm 103 0>;
- };
diff --git a/MAINTAINERS b/MAINTAINERS
index 53b859d10de6..540f4be2ef74 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6038,6 +6038,7 @@ F: drivers/gpu/drm/tiny/mi0283qt.c
DRM DRIVER FOR MSM ADRENO GPU
M: Rob Clark <robdclark@gmail.com>
M: Sean Paul <sean@poorly.run>
+R: Abhinav Kumar <quic_abhinavk@quicinc.com>
L: linux-arm-msm@vger.kernel.org
L: dri-devel@lists.freedesktop.org
L: freedreno@lists.freedesktop.org
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index ae11061727ff..39197b4beea7 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -4,8 +4,8 @@ config DRM_MSM
tristate "MSM DRM"
depends on DRM
depends on ARCH_QCOM || SOC_IMX5 || COMPILE_TEST
+ depends on COMMON_CLK
depends on IOMMU_SUPPORT
- depends on (OF && COMMON_CLK) || COMPILE_TEST
depends on QCOM_OCMEM || QCOM_OCMEM=n
depends on QCOM_LLCC || QCOM_LLCC=n
depends on QCOM_COMMAND_DB || QCOM_COMMAND_DB=n
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 40577f8856d8..edc53aa004c5 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -23,20 +23,17 @@ msm-y := \
hdmi/hdmi_i2c.o \
hdmi/hdmi_phy.o \
hdmi/hdmi_phy_8960.o \
+ hdmi/hdmi_phy_8996.o \
hdmi/hdmi_phy_8x60.o \
hdmi/hdmi_phy_8x74.o \
- edp/edp.o \
- edp/edp_aux.o \
- edp/edp_bridge.o \
- edp/edp_connector.o \
- edp/edp_ctrl.o \
- edp/edp_phy.o \
+ hdmi/hdmi_pll_8960.o \
disp/mdp_format.o \
disp/mdp_kms.o \
disp/mdp4/mdp4_crtc.o \
disp/mdp4/mdp4_dtv_encoder.o \
disp/mdp4/mdp4_lcdc_encoder.o \
disp/mdp4/mdp4_lvds_connector.o \
+ disp/mdp4/mdp4_lvds_pll.o \
disp/mdp4/mdp4_irq.o \
disp/mdp4/mdp4_kms.o \
disp/mdp4/mdp4_plane.o \
@@ -116,9 +113,6 @@ msm-$(CONFIG_DRM_MSM_DP)+= dp/dp_aux.o \
dp/dp_audio.o
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
-msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o
-msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_pll_8960.o
-msm-$(CONFIG_COMMON_CLK) += hdmi/hdmi_phy_8996.o
msm-$(CONFIG_DRM_MSM_HDMI_HDCP) += hdmi/hdmi_hdcp.o
diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
index bdc989183c64..22e8295a5e2b 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c
@@ -12,7 +12,6 @@ static bool a2xx_idle(struct msm_gpu *gpu);
static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
- struct msm_drm_private *priv = gpu->dev->dev_private;
struct msm_ringbuffer *ring = submit->ring;
unsigned int i;
@@ -23,7 +22,7 @@ static void a2xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
/* ignore if there has not been a ctx switch: */
- if (priv->lastctx == submit->queue->ctx)
+ if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 8fb847c174ff..2e481e2692ba 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -30,7 +30,6 @@ static bool a3xx_idle(struct msm_gpu *gpu);
static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
- struct msm_drm_private *priv = gpu->dev->dev_private;
struct msm_ringbuffer *ring = submit->ring;
unsigned int i;
@@ -41,7 +40,7 @@ static void a3xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
/* ignore if there has not been a ctx switch: */
- if (priv->lastctx == submit->queue->ctx)
+ if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index a96ee79cc5e0..c5524d6e8705 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -24,7 +24,6 @@ static bool a4xx_idle(struct msm_gpu *gpu);
static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
- struct msm_drm_private *priv = gpu->dev->dev_private;
struct msm_ringbuffer *ring = submit->ring;
unsigned int i;
@@ -35,7 +34,7 @@ static void a4xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
/* ignore if there has not been a ctx switch: */
- if (priv->lastctx == submit->queue->ctx)
+ if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
index dd593ec2bc56..6bd397a85834 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_debugfs.c
@@ -107,7 +107,7 @@ reset_set(void *data, u64 val)
* try to reset an active GPU.
*/
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&gpu->lock);
release_firmware(adreno_gpu->fw[ADRENO_FW_PM4]);
adreno_gpu->fw[ADRENO_FW_PM4] = NULL;
@@ -133,7 +133,7 @@ reset_set(void *data, u64 val)
gpu->funcs->recover(gpu);
pm_runtime_put_sync(&gpu->pdev->dev);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
return 0;
}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index 5e2750eb3810..a95977e8ad98 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -65,7 +65,6 @@ void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
- struct msm_drm_private *priv = gpu->dev->dev_private;
struct msm_ringbuffer *ring = submit->ring;
struct msm_gem_object *obj;
uint32_t *ptr, dwords;
@@ -76,7 +75,7 @@ static void a5xx_submit_in_rb(struct msm_gpu *gpu, struct msm_gem_submit *submit
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
- if (priv->lastctx == submit->queue->ctx)
+ if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
@@ -126,12 +125,11 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
- struct msm_drm_private *priv = gpu->dev->dev_private;
struct msm_ringbuffer *ring = submit->ring;
unsigned int i, ibs = 0;
if (IS_ENABLED(CONFIG_DRM_MSM_GPU_SUDO) && submit->in_rb) {
- priv->lastctx = NULL;
+ gpu->cur_ctx_seqno = 0;
a5xx_submit_in_rb(gpu, submit);
return;
}
@@ -166,7 +164,7 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
- if (priv->lastctx == submit->queue->ctx)
+ if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
@@ -927,6 +925,8 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
if (IS_ERR(a5xx_gpu->shadow))
return PTR_ERR(a5xx_gpu->shadow);
+
+ msm_gem_object_set_name(a5xx_gpu->shadow_bo, "shadow");
}
gpu_write64(gpu, REG_A5XX_CP_RB_RPTR_ADDR,
@@ -1254,6 +1254,7 @@ static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
{
+ struct msm_drm_private *priv = gpu->dev->dev_private;
u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
/*
@@ -1263,6 +1264,11 @@ static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
status & ~A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
+ if (priv->disable_err_irq) {
+ status &= A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS |
+ A5XX_RBBM_INT_0_MASK_CP_SW;
+ }
+
/* Pass status to a5xx_rbbm_err_irq because we've already cleared it */
if (status & RBBM_ERROR_MASK)
a5xx_rbbm_err_irq(gpu, status);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
index 71e52b2b2025..3e325e2a2b1b 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -1146,7 +1146,7 @@ static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu)
}
static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
- size_t size, u64 iova)
+ size_t size, u64 iova, const char *name)
{
struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
struct drm_device *dev = a6xx_gpu->base.base.dev;
@@ -1181,6 +1181,8 @@ static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo,
bo->virt = msm_gem_get_vaddr(bo->obj);
bo->size = size;
+ msm_gem_object_set_name(bo->obj, name);
+
return 0;
}
@@ -1515,7 +1517,8 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
*/
gmu->dummy.size = SZ_4K;
if (adreno_is_a660_family(adreno_gpu)) {
- ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7, 0x60400000);
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_4K * 7,
+ 0x60400000, "debug");
if (ret)
goto err_memory;
@@ -1523,42 +1526,46 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
}
/* Allocate memory for the GMU dummy page */
- ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size, 0x60000000);
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->dummy, gmu->dummy.size,
+ 0x60000000, "dummy");
if (ret)
goto err_memory;
+ /* Note that a650 family also includes a660 family: */
if (adreno_is_a650_family(adreno_gpu)) {
ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
- SZ_16M - SZ_16K, 0x04000);
+ SZ_16M - SZ_16K, 0x04000, "icache");
if (ret)
goto err_memory;
} else if (adreno_is_a640_family(adreno_gpu)) {
ret = a6xx_gmu_memory_alloc(gmu, &gmu->icache,
- SZ_256K - SZ_16K, 0x04000);
+ SZ_256K - SZ_16K, 0x04000, "icache");
if (ret)
goto err_memory;
ret = a6xx_gmu_memory_alloc(gmu, &gmu->dcache,
- SZ_256K - SZ_16K, 0x44000);
+ SZ_256K - SZ_16K, 0x44000, "dcache");
if (ret)
goto err_memory;
} else {
+ BUG_ON(adreno_is_a660_family(adreno_gpu));
+
/* HFI v1, has sptprac */
gmu->legacy = true;
/* Allocate memory for the GMU debug region */
- ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0);
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->debug, SZ_16K, 0, "debug");
if (ret)
goto err_memory;
}
/* Allocate memory for for the HFI queues */
- ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0);
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->hfi, SZ_16K, 0, "hfi");
if (ret)
goto err_memory;
/* Allocate memory for the GMU log region */
- ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_4K, 0);
+ ret = a6xx_gmu_memory_alloc(gmu, &gmu->log, SZ_4K, 0, "log");
if (ret)
goto err_memory;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
index 267a880811d6..51b83776951b 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -106,7 +106,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
u32 asid;
u64 memptr = rbmemptr(ring, ttbr0);
- if (ctx->seqno == a6xx_gpu->cur_ctx_seqno)
+ if (ctx->seqno == a6xx_gpu->base.base.cur_ctx_seqno)
return;
if (msm_iommu_pagetable_params(ctx->aspace->mmu, &ttbr, &asid))
@@ -138,14 +138,11 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu,
OUT_PKT7(ring, CP_EVENT_WRITE, 1);
OUT_RING(ring, 0x31);
-
- a6xx_gpu->cur_ctx_seqno = ctx->seqno;
}
static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
unsigned int index = submit->seqno % MSM_GPU_SUBMIT_STATS_COUNT;
- struct msm_drm_private *priv = gpu->dev->dev_private;
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
struct msm_ringbuffer *ring = submit->ring;
@@ -177,7 +174,7 @@ static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
- if (priv->lastctx == submit->queue->ctx)
+ if (gpu->cur_ctx_seqno == submit->queue->ctx->seqno)
break;
fallthrough;
case MSM_SUBMIT_CMD_BUF:
@@ -1071,6 +1068,8 @@ static int hw_init(struct msm_gpu *gpu)
if (IS_ERR(a6xx_gpu->shadow))
return PTR_ERR(a6xx_gpu->shadow);
+
+ msm_gem_object_set_name(a6xx_gpu->shadow_bo, "shadow");
}
gpu_write64(gpu, REG_A6XX_CP_RB_RPTR_ADDR_LO,
@@ -1081,7 +1080,7 @@ static int hw_init(struct msm_gpu *gpu)
/* Always come up on rb 0 */
a6xx_gpu->cur_ring = gpu->rb[0];
- a6xx_gpu->cur_ctx_seqno = 0;
+ gpu->cur_ctx_seqno = 0;
/* Enable the SQE_to start the CP engine */
gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
@@ -1376,10 +1375,14 @@ static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
{
+ struct msm_drm_private *priv = gpu->dev->dev_private;
u32 status = gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS);
gpu_write(gpu, REG_A6XX_RBBM_INT_CLEAR_CMD, status);
+ if (priv->disable_err_irq)
+ status &= A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS;
+
if (status & A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT)
a6xx_fault_detect_irq(gpu);
@@ -1424,17 +1427,24 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
{
struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
struct msm_gpu *gpu = &adreno_gpu->base;
- u32 gpu_scid, cntl1_regval = 0;
+ u32 cntl1_regval = 0;
if (IS_ERR(a6xx_gpu->llc_mmio))
return;
if (!llcc_slice_activate(a6xx_gpu->llc_slice)) {
- gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
+ u32 gpu_scid = llcc_get_slice_id(a6xx_gpu->llc_slice);
gpu_scid &= 0x1f;
cntl1_regval = (gpu_scid << 0) | (gpu_scid << 5) | (gpu_scid << 10) |
(gpu_scid << 15) | (gpu_scid << 20);
+
+ /* On A660, the SCID programming for UCHE traffic is done in
+ * A6XX_GBIF_SCACHE_CNTL0[14:10]
+ */
+ if (adreno_is_a660_family(adreno_gpu))
+ gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) |
+ (1 << 8), (gpu_scid << 10) | (1 << 8));
}
/*
@@ -1471,13 +1481,6 @@ static void a6xx_llc_activate(struct a6xx_gpu *a6xx_gpu)
}
gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL1, GENMASK(24, 0), cntl1_regval);
-
- /* On A660, the SCID programming for UCHE traffic is done in
- * A6XX_GBIF_SCACHE_CNTL0[14:10]
- */
- if (adreno_is_a660_family(adreno_gpu))
- gpu_rmw(gpu, REG_A6XX_GBIF_SCACHE_CNTL0, (0x1f << 10) |
- (1 << 8), (gpu_scid << 10) | (1 << 8));
}
static void a6xx_llc_slices_destroy(struct a6xx_gpu *a6xx_gpu)
@@ -1640,7 +1643,7 @@ static unsigned long a6xx_gpu_busy(struct msm_gpu *gpu)
return (unsigned long)busy_time;
}
-void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
+static void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
index 8e5527c881b1..86e0a7c3fe6d 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -20,16 +20,6 @@ struct a6xx_gpu {
struct msm_ringbuffer *cur_ring;
- /**
- * cur_ctx_seqno:
- *
- * The ctx->seqno value of the context with current pgtables
- * installed. Tracked by seqno rather than pointer value to
- * avoid dangling pointers, and cases where a ctx can be freed
- * and a new one created with the same address.
- */
- int cur_ctx_seqno;
-
struct a6xx_gmu gmu;
struct drm_gem_object *shadow_bo;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
index 7501849ed15d..bdd0059a81ff 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu_state.c
@@ -42,6 +42,12 @@ struct a6xx_gpu_state {
struct a6xx_gpu_state_obj *cx_debugbus;
int nr_cx_debugbus;
+ struct msm_gpu_state_bo *gmu_log;
+ struct msm_gpu_state_bo *gmu_hfi;
+ struct msm_gpu_state_bo *gmu_debug;
+
+ s32 hfi_queue_history[2][HFI_HISTORY_SZ];
+
struct list_head objs;
};
@@ -777,12 +783,12 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
a6xx_state->gmu_registers = state_kcalloc(a6xx_state,
- 2, sizeof(*a6xx_state->gmu_registers));
+ 3, sizeof(*a6xx_state->gmu_registers));
if (!a6xx_state->gmu_registers)
return;
- a6xx_state->nr_gmu_registers = 2;
+ a6xx_state->nr_gmu_registers = 3;
/* Get the CX GMU registers from AHB */
_a6xx_get_gmu_registers(gpu, a6xx_state, &a6xx_gmu_reglist[0],
@@ -800,6 +806,45 @@ static void a6xx_get_gmu_registers(struct msm_gpu *gpu,
&a6xx_state->gmu_registers[2], false);
}
+static struct msm_gpu_state_bo *a6xx_snapshot_gmu_bo(
+ struct a6xx_gpu_state *a6xx_state, struct a6xx_gmu_bo *bo)
+{
+ struct msm_gpu_state_bo *snapshot;
+
+ snapshot = state_kcalloc(a6xx_state, 1, sizeof(*snapshot));
+ if (!snapshot)
+ return NULL;
+
+ snapshot->iova = bo->iova;
+ snapshot->size = bo->size;
+ snapshot->data = kvzalloc(snapshot->size, GFP_KERNEL);
+ if (!snapshot->data)
+ return NULL;
+
+ memcpy(snapshot->data, bo->virt, bo->size);
+
+ return snapshot;
+}
+
+static void a6xx_snapshot_gmu_hfi_history(struct msm_gpu *gpu,
+ struct a6xx_gpu_state *a6xx_state)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ unsigned i, j;
+
+ BUILD_BUG_ON(ARRAY_SIZE(gmu->queues) != ARRAY_SIZE(a6xx_state->hfi_queue_history));
+
+ for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) {
+ struct a6xx_hfi_queue *queue = &gmu->queues[i];
+ for (j = 0; j < HFI_HISTORY_SZ; j++) {
+ unsigned idx = (j + queue->history_idx) % HFI_HISTORY_SZ;
+ a6xx_state->hfi_queue_history[i][j] = queue->history[idx];
+ }
+ }
+}
+
#define A6XX_GBIF_REGLIST_SIZE 1
static void a6xx_get_registers(struct msm_gpu *gpu,
struct a6xx_gpu_state *a6xx_state,
@@ -937,6 +982,12 @@ struct msm_gpu_state *a6xx_gpu_state_get(struct msm_gpu *gpu)
a6xx_get_gmu_registers(gpu, a6xx_state);
+ a6xx_state->gmu_log = a6xx_snapshot_gmu_bo(a6xx_state, &a6xx_gpu->gmu.log);
+ a6xx_state->gmu_hfi = a6xx_snapshot_gmu_bo(a6xx_state, &a6xx_gpu->gmu.hfi);
+ a6xx_state->gmu_debug = a6xx_snapshot_gmu_bo(a6xx_state, &a6xx_gpu->gmu.debug);
+
+ a6xx_snapshot_gmu_hfi_history(gpu, a6xx_state);
+
/* If GX isn't on the rest of the data isn't going to be accessible */
if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu))
return &a6xx_state->base;
@@ -978,6 +1029,12 @@ static void a6xx_gpu_state_destroy(struct kref *kref)
struct a6xx_gpu_state *a6xx_state = container_of(state,
struct a6xx_gpu_state, base);
+ if (a6xx_state->gmu_log)
+ kvfree(a6xx_state->gmu_log->data);
+
+ if (a6xx_state->gmu_hfi)
+ kvfree(a6xx_state->gmu_hfi->data);
+
list_for_each_entry_safe(obj, tmp, &a6xx_state->objs, node)
kfree(obj);
@@ -1191,6 +1248,44 @@ void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
adreno_show(gpu, state, p);
+ drm_puts(p, "gmu-log:\n");
+ if (a6xx_state->gmu_log) {
+ struct msm_gpu_state_bo *gmu_log = a6xx_state->gmu_log;
+
+ drm_printf(p, " iova: 0x%016llx\n", gmu_log->iova);
+ drm_printf(p, " size: %zu\n", gmu_log->size);
+ adreno_show_object(p, &gmu_log->data, gmu_log->size,
+ &gmu_log->encoded);
+ }
+
+ drm_puts(p, "gmu-hfi:\n");
+ if (a6xx_state->gmu_hfi) {
+ struct msm_gpu_state_bo *gmu_hfi = a6xx_state->gmu_hfi;
+ unsigned i, j;
+
+ drm_printf(p, " iova: 0x%016llx\n", gmu_hfi->iova);
+ drm_printf(p, " size: %zu\n", gmu_hfi->size);
+ for (i = 0; i < ARRAY_SIZE(a6xx_state->hfi_queue_history); i++) {
+ drm_printf(p, " queue-history[%u]:", i);
+ for (j = 0; j < HFI_HISTORY_SZ; j++) {
+ drm_printf(p, " %d", a6xx_state->hfi_queue_history[i][j]);
+ }
+ drm_printf(p, "\n");
+ }
+ adreno_show_object(p, &gmu_hfi->data, gmu_hfi->size,
+ &gmu_hfi->encoded);
+ }
+
+ drm_puts(p, "gmu-debug:\n");
+ if (a6xx_state->gmu_debug) {
+ struct msm_gpu_state_bo *gmu_debug = a6xx_state->gmu_debug;
+
+ drm_printf(p, " iova: 0x%016llx\n", gmu_debug->iova);
+ drm_printf(p, " size: %zu\n", gmu_debug->size);
+ adreno_show_object(p, &gmu_debug->data, gmu_debug->size,
+ &gmu_debug->encoded);
+ }
+
drm_puts(p, "registers:\n");
for (i = 0; i < a6xx_state->nr_registers; i++) {
struct a6xx_gpu_state_obj *obj = &a6xx_state->registers[i];
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
index d4c65bf0a1b7..d73fce5fdf1f 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -36,6 +36,8 @@ static int a6xx_hfi_queue_read(struct a6xx_gmu *gmu,
hdr = queue->data[index];
+ queue->history[(queue->history_idx++) % HFI_HISTORY_SZ] = index;
+
/*
* If we are to assume that the GMU firmware is in fact a rational actor
* and is programmed to not send us a larger response than we expect
@@ -75,6 +77,8 @@ static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu,
return -ENOSPC;
}
+ queue->history[(queue->history_idx++) % HFI_HISTORY_SZ] = index;
+
for (i = 0; i < dwords; i++) {
queue->data[index] = data[i];
index = (index + 1) % header->size;
@@ -600,6 +604,9 @@ void a6xx_hfi_stop(struct a6xx_gmu *gmu)
queue->header->read_index = 0;
queue->header->write_index = 0;
+
+ memset(&queue->history, 0xff, sizeof(queue->history));
+ queue->history_idx = 0;
}
}
@@ -612,6 +619,9 @@ static void a6xx_hfi_queue_init(struct a6xx_hfi_queue *queue,
queue->data = virt;
atomic_set(&queue->seqnum, 0);
+ memset(&queue->history, 0xff, sizeof(queue->history));
+ queue->history_idx = 0;
+
/* Set up the shared memory header */
header->iova = iova;
header->type = 10 << 8 | id;
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
index 2bd670ca42d6..528110169398 100644
--- a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
@@ -33,6 +33,17 @@ struct a6xx_hfi_queue {
spinlock_t lock;
u32 *data;
atomic_t seqnum;
+
+ /*
+ * Tracking for the start index of the last N messages in the
+ * queue, for the benefit of devcore dump / crashdec (since
+ * parsing in the reverse direction to decode the last N
+ * messages is difficult to do and would rely on heuristics
+ * which are not guaranteed to be correct)
+ */
+#define HFI_HISTORY_SZ 8
+ s32 history[HFI_HISTORY_SZ];
+ u8 history_idx;
};
/* This is the outgoing queue to the GMU */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 2a6ce76656aa..9e01ccc800a6 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -408,9 +408,9 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
return NULL;
}
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&gpu->lock);
ret = msm_gpu_hw_init(gpu);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
pm_runtime_put_autosuspend(&pdev->dev);
if (ret) {
DRM_DEV_ERROR(dev->dev, "gpu hw init failed: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 748665232d29..47cb40bdbd43 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -504,6 +504,9 @@ int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
int i, count = 0;
+ WARN_ON(gpu->needs_hw_init);
+ WARN_ON(!mutex_is_locked(&gpu->lock));
+
kref_init(&state->ref);
ktime_get_real_ts64(&state->time);
@@ -630,7 +633,7 @@ static char *adreno_gpu_ascii85_encode(u32 *src, size_t len)
}
/* len is expected to be in bytes */
-static void adreno_show_object(struct drm_printer *p, void **ptr, int len,
+void adreno_show_object(struct drm_printer *p, void **ptr, int len,
bool *encoded)
{
if (!*ptr || !len)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 225c277a6223..676230862671 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -306,6 +306,8 @@ void adreno_gpu_state_destroy(struct msm_gpu_state *state);
int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state);
int adreno_gpu_state_put(struct msm_gpu_state *state);
+void adreno_show_object(struct drm_printer *p, void **ptr, int len,
+ bool *encoded);
/*
* Common helper function to initialize the default address space for arm-smmu
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
index 967245b8cc02..d290809d59bd 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -337,7 +337,8 @@ static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
}
static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
- struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer)
+ struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer,
+ struct dpu_hw_stage_cfg *stage_cfg)
{
struct drm_plane *plane;
struct drm_framebuffer *fb;
@@ -346,7 +347,6 @@ static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
struct dpu_plane_state *pstate = NULL;
struct dpu_format *format;
struct dpu_hw_ctl *ctl = mixer->lm_ctl;
- struct dpu_hw_stage_cfg *stage_cfg = &dpu_crtc->stage_cfg;
u32 flush_mask;
uint32_t stage_idx, lm_idx;
@@ -422,6 +422,7 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
struct dpu_crtc_mixer *mixer = cstate->mixers;
struct dpu_hw_ctl *ctl;
struct dpu_hw_mixer *lm;
+ struct dpu_hw_stage_cfg stage_cfg;
int i;
DRM_DEBUG_ATOMIC("%s\n", dpu_crtc->name);
@@ -435,9 +436,9 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
}
/* initialize stage cfg */
- memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
+ memset(&stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
- _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer);
+ _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer, &stage_cfg);
for (i = 0; i < cstate->num_mixers; i++) {
ctl = mixer[i].lm_ctl;
@@ -458,7 +459,7 @@ static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
mixer[i].flush_mask);
ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
- &dpu_crtc->stage_cfg);
+ &stage_cfg);
}
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
index ae9546ca1359..4328e133d71c 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -129,7 +129,6 @@ struct dpu_crtc_frame_event {
* @drm_requested_vblank : Whether vblanks have been enabled in the encoder
* @property_info : Opaque structure for generic property support
* @property_defaults : Array of default values for generic property support
- * @stage_cfg : H/w mixer stage configuration
* @debugfs_root : Parent of debugfs node
* @vblank_cb_count : count of vblank callback since last reset
* @play_count : frame count between crtc enable and disable
@@ -161,7 +160,6 @@ struct dpu_crtc {
struct drm_pending_vblank_event *event;
u32 vsync_count;
- struct dpu_hw_stage_cfg stage_cfg;
struct dentry *debugfs_root;
u32 vblank_cb_count;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
index ce6f32a919e5..2b4dc9d852b3 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -856,9 +856,9 @@ static const struct dpu_intf_cfg sm8150_intf[] = {
};
static const struct dpu_intf_cfg sc7280_intf[] = {
- INTF_BLK("intf_0", INTF_0, 0x34000, INTF_DP, 0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
+ INTF_BLK("intf_0", INTF_0, 0x34000, INTF_DP, MSM_DP_CONTROLLER_0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 24, 25),
INTF_BLK("intf_1", INTF_1, 0x35000, INTF_DSI, 0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 26, 27),
- INTF_BLK("intf_5", INTF_5, 0x39000, INTF_EDP, 0, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 22, 23),
+ INTF_BLK("intf_5", INTF_5, 0x39000, INTF_DP, MSM_DP_CONTROLLER_1, 24, INTF_SC7280_MASK, MDP_SSPP_TOP0_INTR, 22, 23),
};
/*************************************************************
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
index d2b6dca487e3..a77a5eaa78ad 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -30,6 +30,9 @@
#define MDP_AD4_INTR_STATUS_OFF 0x420
#define MDP_INTF_0_OFF_REV_7xxx 0x34000
#define MDP_INTF_1_OFF_REV_7xxx 0x35000
+#define MDP_INTF_2_OFF_REV_7xxx 0x36000
+#define MDP_INTF_3_OFF_REV_7xxx 0x37000
+#define MDP_INTF_4_OFF_REV_7xxx 0x38000
#define MDP_INTF_5_OFF_REV_7xxx 0x39000
/**
@@ -111,6 +114,21 @@ static const struct dpu_intr_reg dpu_intr_set[] = {
MDP_INTF_1_OFF_REV_7xxx+INTF_INTR_STATUS
},
{
+ MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_CLEAR,
+ MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_EN,
+ MDP_INTF_2_OFF_REV_7xxx+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_CLEAR,
+ MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_EN,
+ MDP_INTF_3_OFF_REV_7xxx+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_CLEAR,
+ MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_EN,
+ MDP_INTF_4_OFF_REV_7xxx+INTF_INTR_STATUS
+ },
+ {
MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_CLEAR,
MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_EN,
MDP_INTF_5_OFF_REV_7xxx+INTF_INTR_STATUS
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
index d50e78c9f148..1ab75cccd145 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
@@ -26,6 +26,9 @@ enum dpu_hw_intr_reg {
MDP_AD4_1_INTR,
MDP_INTF0_7xxx_INTR,
MDP_INTF1_7xxx_INTR,
+ MDP_INTF2_7xxx_INTR,
+ MDP_INTF3_7xxx_INTR,
+ MDP_INTF4_7xxx_INTR,
MDP_INTF5_7xxx_INTR,
MDP_INTR_MAX,
};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
index f9460672176a..d77eb7da5daf 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -539,7 +539,7 @@ static void dpu_hw_sspp_setup_sourceaddress(struct dpu_hw_pipe *ctx,
}
static void dpu_hw_sspp_setup_csc(struct dpu_hw_pipe *ctx,
- struct dpu_csc_cfg *data)
+ const struct dpu_csc_cfg *data)
{
u32 idx;
bool csc10 = false;
@@ -571,19 +571,20 @@ static void dpu_hw_sspp_setup_solidfill(struct dpu_hw_pipe *ctx, u32 color, enum
}
static void dpu_hw_sspp_setup_danger_safe_lut(struct dpu_hw_pipe *ctx,
- struct dpu_hw_pipe_qos_cfg *cfg)
+ u32 danger_lut,
+ u32 safe_lut)
{
u32 idx;
if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
return;
- DPU_REG_WRITE(&ctx->hw, SSPP_DANGER_LUT + idx, cfg->danger_lut);
- DPU_REG_WRITE(&ctx->hw, SSPP_SAFE_LUT + idx, cfg->safe_lut);
+ DPU_REG_WRITE(&ctx->hw, SSPP_DANGER_LUT + idx, danger_lut);
+ DPU_REG_WRITE(&ctx->hw, SSPP_SAFE_LUT + idx, safe_lut);
}
static void dpu_hw_sspp_setup_creq_lut(struct dpu_hw_pipe *ctx,
- struct dpu_hw_pipe_qos_cfg *cfg)
+ u64 creq_lut)
{
u32 idx;
@@ -591,11 +592,11 @@ static void dpu_hw_sspp_setup_creq_lut(struct dpu_hw_pipe *ctx,
return;
if (ctx->cap && test_bit(DPU_SSPP_QOS_8LVL, &ctx->cap->features)) {
- DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_0 + idx, cfg->creq_lut);
+ DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_0 + idx, creq_lut);
DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_1 + idx,
- cfg->creq_lut >> 32);
+ creq_lut >> 32);
} else {
- DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT + idx, cfg->creq_lut);
+ DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT + idx, creq_lut);
}
}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
index fdfd4b46e2c6..e8939d7387cb 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -166,18 +166,12 @@ struct dpu_hw_pipe_cfg {
/**
* struct dpu_hw_pipe_qos_cfg : Source pipe QoS configuration
- * @danger_lut: LUT for generate danger level based on fill level
- * @safe_lut: LUT for generate safe level based on fill level
- * @creq_lut: LUT for generate creq level based on fill level
* @creq_vblank: creq value generated to vbif during vertical blanking
* @danger_vblank: danger value generated during vertical blanking
* @vblank_en: enable creq_vblank and danger_vblank during vblank
* @danger_safe_en: enable danger safe generation
*/
struct dpu_hw_pipe_qos_cfg {
- u32 danger_lut;
- u32 safe_lut;
- u64 creq_lut;
u32 creq_vblank;
u32 danger_vblank;
bool vblank_en;
@@ -268,7 +262,7 @@ struct dpu_hw_sspp_ops {
* @ctx: Pointer to pipe context
* @data: Pointer to config structure
*/
- void (*setup_csc)(struct dpu_hw_pipe *ctx, struct dpu_csc_cfg *data);
+ void (*setup_csc)(struct dpu_hw_pipe *ctx, const struct dpu_csc_cfg *data);
/**
* setup_solidfill - enable/disable colorfill
@@ -302,20 +296,22 @@ struct dpu_hw_sspp_ops {
/**
* setup_danger_safe_lut - setup danger safe LUTs
* @ctx: Pointer to pipe context
- * @cfg: Pointer to pipe QoS configuration
+ * @danger_lut: LUT for generate danger level based on fill level
+ * @safe_lut: LUT for generate safe level based on fill level
*
*/
void (*setup_danger_safe_lut)(struct dpu_hw_pipe *ctx,
- struct dpu_hw_pipe_qos_cfg *cfg);
+ u32 danger_lut,
+ u32 safe_lut);
/**
* setup_creq_lut - setup CREQ LUT
* @ctx: Pointer to pipe context
- * @cfg: Pointer to pipe QoS configuration
+ * @creq_lut: LUT for generate creq level based on fill level
*
*/
void (*setup_creq_lut)(struct dpu_hw_pipe *ctx,
- struct dpu_hw_pipe_qos_cfg *cfg);
+ u64 creq_lut);
/**
* setup_qos_ctrl - setup QoS control
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
index f94584c982cd..aad85116b0a0 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
@@ -374,7 +374,7 @@ u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c,
void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
u32 csc_reg_off,
- struct dpu_csc_cfg *data, bool csc10)
+ const struct dpu_csc_cfg *data, bool csc10)
{
static const u32 matrix_shift = 7;
u32 clamp_shift = csc10 ? 16 : 8;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
index 6d4911957e33..39134754579e 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
@@ -322,6 +322,6 @@ u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c,
void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
u32 csc_reg_off,
- struct dpu_csc_cfg *data, bool csc10);
+ const struct dpu_csc_cfg *data, bool csc10);
#endif /* _DPU_HW_UTIL_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
index a15b26428280..6c457c419412 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -1285,7 +1285,7 @@ static const struct dev_pm_ops dpu_pm_ops = {
pm_runtime_force_resume)
};
-static const struct of_device_id dpu_dt_match[] = {
+const struct of_device_id dpu_dt_match[] = {
{ .compatible = "qcom,sdm845-dpu", },
{ .compatible = "qcom,sc7180-dpu", },
{ .compatible = "qcom,sc7280-dpu", },
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
index a3e3b9d1b82e..ca190d92f0d5 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -90,7 +90,6 @@ enum dpu_plane_qos {
/*
* struct dpu_plane - local dpu plane structure
* @aspace: address space pointer
- * @csc_ptr: Points to dpu_csc_cfg structure to use for current
* @mplane_list: List of multirect planes of the same pipe
* @catalog: Points to dpu catalog structure
* @revalidate: force revalidation of all the plane properties
@@ -101,11 +100,8 @@ struct dpu_plane {
struct mutex lock;
enum dpu_sspp pipe;
- uint32_t features; /* capabilities from catalog */
struct dpu_hw_pipe *pipe_hw;
- struct dpu_hw_pipe_cfg pipe_cfg;
- struct dpu_hw_pipe_qos_cfg pipe_qos_cfg;
uint32_t color_fill;
bool is_error;
bool is_rt_pipe;
@@ -113,11 +109,6 @@ struct dpu_plane {
struct list_head mplane_list;
struct dpu_mdss_cfg *catalog;
- struct dpu_csc_cfg *csc_ptr;
-
- const struct dpu_sspp_sub_blks *pipe_sblk;
- char pipe_name[DPU_NAME_SIZE];
-
/* debugfs related stuff */
struct dentry *debugfs_root;
struct dpu_debugfs_regset32 debugfs_src;
@@ -145,14 +136,15 @@ static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane)
* _dpu_plane_calc_bw - calculate bandwidth required for a plane
* @plane: Pointer to drm plane.
* @fb: Pointer to framebuffer associated with the given plane
+ * @pipe_cfg: Pointer to pipe configuration
* Result: Updates calculated bandwidth in the plane state.
* BW Equation: src_w * src_h * bpp * fps * (v_total / v_dest)
* Prefill BW Equation: line src bytes * line_time
*/
static void _dpu_plane_calc_bw(struct drm_plane *plane,
- struct drm_framebuffer *fb)
+ struct drm_framebuffer *fb,
+ struct dpu_hw_pipe_cfg *pipe_cfg)
{
- struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_plane_state *pstate;
struct drm_display_mode *mode;
const struct dpu_format *fmt = NULL;
@@ -169,9 +161,9 @@ static void _dpu_plane_calc_bw(struct drm_plane *plane,
fmt = dpu_get_dpu_format_ext(fb->format->format, fb->modifier);
- src_width = drm_rect_width(&pdpu->pipe_cfg.src_rect);
- src_height = drm_rect_height(&pdpu->pipe_cfg.src_rect);
- dst_height = drm_rect_height(&pdpu->pipe_cfg.dst_rect);
+ src_width = drm_rect_width(&pipe_cfg->src_rect);
+ src_height = drm_rect_height(&pipe_cfg->src_rect);
+ dst_height = drm_rect_height(&pipe_cfg->dst_rect);
fps = drm_mode_vrefresh(mode);
vbp = mode->vtotal - mode->vsync_end;
vpw = mode->vsync_end - mode->vsync_start;
@@ -202,12 +194,12 @@ static void _dpu_plane_calc_bw(struct drm_plane *plane,
/**
* _dpu_plane_calc_clk - calculate clock required for a plane
* @plane: Pointer to drm plane.
+ * @pipe_cfg: Pointer to pipe configuration
* Result: Updates calculated clock in the plane state.
* Clock equation: dst_w * v_total * fps * (src_h / dst_h)
*/
-static void _dpu_plane_calc_clk(struct drm_plane *plane)
+static void _dpu_plane_calc_clk(struct drm_plane *plane, struct dpu_hw_pipe_cfg *pipe_cfg)
{
- struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_plane_state *pstate;
struct drm_display_mode *mode;
int dst_width, src_height, dst_height, fps;
@@ -215,9 +207,9 @@ static void _dpu_plane_calc_clk(struct drm_plane *plane)
pstate = to_dpu_plane_state(plane->state);
mode = &plane->state->crtc->mode;
- src_height = drm_rect_height(&pdpu->pipe_cfg.src_rect);
- dst_width = drm_rect_width(&pdpu->pipe_cfg.dst_rect);
- dst_height = drm_rect_height(&pdpu->pipe_cfg.dst_rect);
+ src_height = drm_rect_height(&pipe_cfg->src_rect);
+ dst_width = drm_rect_width(&pipe_cfg->dst_rect);
+ dst_height = drm_rect_height(&pipe_cfg->dst_rect);
fps = drm_mode_vrefresh(mode);
pstate->plane_clk =
@@ -254,14 +246,17 @@ static int _dpu_plane_calc_fill_level(struct drm_plane *plane,
fixed_buff_size = pdpu->catalog->caps->pixel_ram_size;
list_for_each_entry(tmp, &pdpu->mplane_list, mplane_list) {
+ u32 tmp_width;
+
if (!tmp->base.state->visible)
continue;
+ tmp_width = drm_rect_width(&tmp->base.state->src) >> 16;
DPU_DEBUG("plane%d/%d src_width:%d/%d\n",
pdpu->base.base.id, tmp->base.base.id,
src_width,
- drm_rect_width(&tmp->pipe_cfg.src_rect));
+ tmp_width);
src_width = max_t(u32, src_width,
- drm_rect_width(&tmp->pipe_cfg.src_rect));
+ tmp_width);
}
if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) {
@@ -321,9 +316,10 @@ static u64 _dpu_plane_get_qos_lut(const struct dpu_qos_lut_tbl *tbl,
* _dpu_plane_set_qos_lut - set QoS LUT of the given plane
* @plane: Pointer to drm plane
* @fb: Pointer to framebuffer associated with the given plane
+ * @pipe_cfg: Pointer to pipe configuration
*/
static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
- struct drm_framebuffer *fb)
+ struct drm_framebuffer *fb, struct dpu_hw_pipe_cfg *pipe_cfg)
{
struct dpu_plane *pdpu = to_dpu_plane(plane);
const struct dpu_format *fmt = NULL;
@@ -337,7 +333,7 @@ static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
fb->format->format,
fb->modifier);
total_fl = _dpu_plane_calc_fill_level(plane, fmt,
- drm_rect_width(&pdpu->pipe_cfg.src_rect));
+ drm_rect_width(&pipe_cfg->src_rect));
if (fmt && DPU_FORMAT_IS_LINEAR(fmt))
lut_usage = DPU_QOS_LUT_USAGE_LINEAR;
@@ -348,8 +344,6 @@ static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
qos_lut = _dpu_plane_get_qos_lut(
&pdpu->catalog->perf.qos_lut_tbl[lut_usage], total_fl);
- pdpu->pipe_qos_cfg.creq_lut = qos_lut;
-
trace_dpu_perf_set_qos_luts(pdpu->pipe - SSPP_VIG0,
(fmt) ? fmt->base.pixel_format : 0,
pdpu->is_rt_pipe, total_fl, qos_lut, lut_usage);
@@ -359,7 +353,7 @@ static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
fmt ? (char *)&fmt->base.pixel_format : NULL,
pdpu->is_rt_pipe, total_fl, qos_lut);
- pdpu->pipe_hw->ops.setup_creq_lut(pdpu->pipe_hw, &pdpu->pipe_qos_cfg);
+ pdpu->pipe_hw->ops.setup_creq_lut(pdpu->pipe_hw, qos_lut);
}
/**
@@ -397,24 +391,21 @@ static void _dpu_plane_set_danger_lut(struct drm_plane *plane,
}
}
- pdpu->pipe_qos_cfg.danger_lut = danger_lut;
- pdpu->pipe_qos_cfg.safe_lut = safe_lut;
-
trace_dpu_perf_set_danger_luts(pdpu->pipe - SSPP_VIG0,
(fmt) ? fmt->base.pixel_format : 0,
(fmt) ? fmt->fetch_mode : 0,
- pdpu->pipe_qos_cfg.danger_lut,
- pdpu->pipe_qos_cfg.safe_lut);
+ danger_lut,
+ safe_lut);
DPU_DEBUG_PLANE(pdpu, "pnum:%d fmt: %4.4s mode:%d luts[0x%x, 0x%x]\n",
pdpu->pipe - SSPP_VIG0,
fmt ? (char *)&fmt->base.pixel_format : NULL,
fmt ? fmt->fetch_mode : -1,
- pdpu->pipe_qos_cfg.danger_lut,
- pdpu->pipe_qos_cfg.safe_lut);
+ danger_lut,
+ safe_lut);
pdpu->pipe_hw->ops.setup_danger_safe_lut(pdpu->pipe_hw,
- &pdpu->pipe_qos_cfg);
+ danger_lut, safe_lut);
}
/**
@@ -427,47 +418,51 @@ static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane,
bool enable, u32 flags)
{
struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_hw_pipe_qos_cfg pipe_qos_cfg;
+
+ memset(&pipe_qos_cfg, 0, sizeof(pipe_qos_cfg));
if (flags & DPU_PLANE_QOS_VBLANK_CTRL) {
- pdpu->pipe_qos_cfg.creq_vblank = pdpu->pipe_sblk->creq_vblank;
- pdpu->pipe_qos_cfg.danger_vblank =
- pdpu->pipe_sblk->danger_vblank;
- pdpu->pipe_qos_cfg.vblank_en = enable;
+ pipe_qos_cfg.creq_vblank = pdpu->pipe_hw->cap->sblk->creq_vblank;
+ pipe_qos_cfg.danger_vblank =
+ pdpu->pipe_hw->cap->sblk->danger_vblank;
+ pipe_qos_cfg.vblank_en = enable;
}
if (flags & DPU_PLANE_QOS_VBLANK_AMORTIZE) {
/* this feature overrules previous VBLANK_CTRL */
- pdpu->pipe_qos_cfg.vblank_en = false;
- pdpu->pipe_qos_cfg.creq_vblank = 0; /* clear vblank bits */
+ pipe_qos_cfg.vblank_en = false;
+ pipe_qos_cfg.creq_vblank = 0; /* clear vblank bits */
}
if (flags & DPU_PLANE_QOS_PANIC_CTRL)
- pdpu->pipe_qos_cfg.danger_safe_en = enable;
+ pipe_qos_cfg.danger_safe_en = enable;
if (!pdpu->is_rt_pipe) {
- pdpu->pipe_qos_cfg.vblank_en = false;
- pdpu->pipe_qos_cfg.danger_safe_en = false;
+ pipe_qos_cfg.vblank_en = false;
+ pipe_qos_cfg.danger_safe_en = false;
}
DPU_DEBUG_PLANE(pdpu, "pnum:%d ds:%d vb:%d pri[0x%x, 0x%x] is_rt:%d\n",
pdpu->pipe - SSPP_VIG0,
- pdpu->pipe_qos_cfg.danger_safe_en,
- pdpu->pipe_qos_cfg.vblank_en,
- pdpu->pipe_qos_cfg.creq_vblank,
- pdpu->pipe_qos_cfg.danger_vblank,
+ pipe_qos_cfg.danger_safe_en,
+ pipe_qos_cfg.vblank_en,
+ pipe_qos_cfg.creq_vblank,
+ pipe_qos_cfg.danger_vblank,
pdpu->is_rt_pipe);
pdpu->pipe_hw->ops.setup_qos_ctrl(pdpu->pipe_hw,
- &pdpu->pipe_qos_cfg);
+ &pipe_qos_cfg);
}
/**
* _dpu_plane_set_ot_limit - set OT limit for the given plane
* @plane: Pointer to drm plane
* @crtc: Pointer to drm crtc
+ * @pipe_cfg: Pointer to pipe configuration
*/
static void _dpu_plane_set_ot_limit(struct drm_plane *plane,
- struct drm_crtc *crtc)
+ struct drm_crtc *crtc, struct dpu_hw_pipe_cfg *pipe_cfg)
{
struct dpu_plane *pdpu = to_dpu_plane(plane);
struct dpu_vbif_set_ot_params ot_params;
@@ -476,8 +471,8 @@ static void _dpu_plane_set_ot_limit(struct drm_plane *plane,
memset(&ot_params, 0, sizeof(ot_params));
ot_params.xin_id = pdpu->pipe_hw->cap->xin_id;
ot_params.num = pdpu->pipe_hw->idx - SSPP_NONE;
- ot_params.width = drm_rect_width(&pdpu->pipe_cfg.src_rect);
- ot_params.height = drm_rect_height(&pdpu->pipe_cfg.src_rect);
+ ot_params.width = drm_rect_width(&pipe_cfg->src_rect);
+ ot_params.height = drm_rect_height(&pipe_cfg->src_rect);
ot_params.is_wfd = !pdpu->is_rt_pipe;
ot_params.frame_rate = drm_mode_vrefresh(&crtc->mode);
ot_params.vbif_idx = VBIF_RT;
@@ -606,66 +601,75 @@ static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
scale_cfg->enable = 1;
}
-static void _dpu_plane_setup_csc(struct dpu_plane *pdpu)
-{
- static const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = {
- {
- /* S15.16 format */
- 0x00012A00, 0x00000000, 0x00019880,
- 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
- 0x00012A00, 0x00020480, 0x00000000,
+static const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = {
+ {
+ /* S15.16 format */
+ 0x00012A00, 0x00000000, 0x00019880,
+ 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+ 0x00012A00, 0x00020480, 0x00000000,
+ },
+ /* signed bias */
+ { 0xfff0, 0xff80, 0xff80,},
+ { 0x0, 0x0, 0x0,},
+ /* unsigned clamp */
+ { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
+ { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,},
+};
+
+static const struct dpu_csc_cfg dpu_csc10_YUV2RGB_601L = {
+ {
+ /* S15.16 format */
+ 0x00012A00, 0x00000000, 0x00019880,
+ 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+ 0x00012A00, 0x00020480, 0x00000000,
},
- /* signed bias */
- { 0xfff0, 0xff80, 0xff80,},
- { 0x0, 0x0, 0x0,},
- /* unsigned clamp */
- { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
- { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,},
- };
- static const struct dpu_csc_cfg dpu_csc10_YUV2RGB_601L = {
- {
- /* S15.16 format */
- 0x00012A00, 0x00000000, 0x00019880,
- 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
- 0x00012A00, 0x00020480, 0x00000000,
- },
- /* signed bias */
- { 0xffc0, 0xfe00, 0xfe00,},
- { 0x0, 0x0, 0x0,},
- /* unsigned clamp */
- { 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,},
- { 0x00, 0x3ff, 0x00, 0x3ff, 0x00, 0x3ff,},
- };
+ /* signed bias */
+ { 0xffc0, 0xfe00, 0xfe00,},
+ { 0x0, 0x0, 0x0,},
+ /* unsigned clamp */
+ { 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,},
+ { 0x00, 0x3ff, 0x00, 0x3ff, 0x00, 0x3ff,},
+};
+
+static const struct dpu_csc_cfg *_dpu_plane_get_csc(struct dpu_plane *pdpu, const struct dpu_format *fmt)
+{
+ const struct dpu_csc_cfg *csc_ptr;
if (!pdpu) {
DPU_ERROR("invalid plane\n");
- return;
+ return NULL;
}
- if (BIT(DPU_SSPP_CSC_10BIT) & pdpu->features)
- pdpu->csc_ptr = (struct dpu_csc_cfg *)&dpu_csc10_YUV2RGB_601L;
+ if (!DPU_FORMAT_IS_YUV(fmt))
+ return NULL;
+
+ if (BIT(DPU_SSPP_CSC_10BIT) & pdpu->pipe_hw->cap->features)
+ csc_ptr = &dpu_csc10_YUV2RGB_601L;
else
- pdpu->csc_ptr = (struct dpu_csc_cfg *)&dpu_csc_YUV2RGB_601L;
+ csc_ptr = &dpu_csc_YUV2RGB_601L;
DPU_DEBUG_PLANE(pdpu, "using 0x%X 0x%X 0x%X...\n",
- pdpu->csc_ptr->csc_mv[0],
- pdpu->csc_ptr->csc_mv[1],
- pdpu->csc_ptr->csc_mv[2]);
+ csc_ptr->csc_mv[0],
+ csc_ptr->csc_mv[1],
+ csc_ptr->csc_mv[2]);
+
+ return csc_ptr;
}
static void _dpu_plane_setup_scaler(struct dpu_plane *pdpu,
struct dpu_plane_state *pstate,
- const struct dpu_format *fmt, bool color_fill)
+ const struct dpu_format *fmt, bool color_fill,
+ struct dpu_hw_pipe_cfg *pipe_cfg)
{
const struct drm_format_info *info = drm_format_info(fmt->base.pixel_format);
/* don't chroma subsample if decimating */
/* update scaler. calculate default config for QSEED3 */
_dpu_plane_setup_scaler3(pdpu, pstate,
- drm_rect_width(&pdpu->pipe_cfg.src_rect),
- drm_rect_height(&pdpu->pipe_cfg.src_rect),
- drm_rect_width(&pdpu->pipe_cfg.dst_rect),
- drm_rect_height(&pdpu->pipe_cfg.dst_rect),
+ drm_rect_width(&pipe_cfg->src_rect),
+ drm_rect_height(&pipe_cfg->src_rect),
+ drm_rect_width(&pipe_cfg->dst_rect),
+ drm_rect_height(&pipe_cfg->dst_rect),
&pstate->scaler3_cfg, fmt,
info->hsub, info->vsub);
}
@@ -683,6 +687,7 @@ static int _dpu_plane_color_fill(struct dpu_plane *pdpu,
const struct dpu_format *fmt;
const struct drm_plane *plane = &pdpu->base;
struct dpu_plane_state *pstate = to_dpu_plane_state(plane->state);
+ struct dpu_hw_pipe_cfg pipe_cfg;
DPU_DEBUG_PLANE(pdpu, "\n");
@@ -699,13 +704,15 @@ static int _dpu_plane_color_fill(struct dpu_plane *pdpu,
pstate->multirect_index);
/* override scaler/decimation if solid fill */
- pdpu->pipe_cfg.src_rect.x1 = 0;
- pdpu->pipe_cfg.src_rect.y1 = 0;
- pdpu->pipe_cfg.src_rect.x2 =
- drm_rect_width(&pdpu->pipe_cfg.dst_rect);
- pdpu->pipe_cfg.src_rect.y2 =
- drm_rect_height(&pdpu->pipe_cfg.dst_rect);
- _dpu_plane_setup_scaler(pdpu, pstate, fmt, true);
+ pipe_cfg.dst_rect = pstate->base.dst;
+
+ pipe_cfg.src_rect.x1 = 0;
+ pipe_cfg.src_rect.y1 = 0;
+ pipe_cfg.src_rect.x2 =
+ drm_rect_width(&pipe_cfg.dst_rect);
+ pipe_cfg.src_rect.y2 =
+ drm_rect_height(&pipe_cfg.dst_rect);
+ _dpu_plane_setup_scaler(pdpu, pstate, fmt, true, &pipe_cfg);
if (pdpu->pipe_hw->ops.setup_format)
pdpu->pipe_hw->ops.setup_format(pdpu->pipe_hw,
@@ -714,7 +721,7 @@ static int _dpu_plane_color_fill(struct dpu_plane *pdpu,
if (pdpu->pipe_hw->ops.setup_rects)
pdpu->pipe_hw->ops.setup_rects(pdpu->pipe_hw,
- &pdpu->pipe_cfg,
+ &pipe_cfg,
pstate->multirect_index);
if (pdpu->pipe_hw->ops.setup_pe)
@@ -724,7 +731,7 @@ static int _dpu_plane_color_fill(struct dpu_plane *pdpu,
if (pdpu->pipe_hw->ops.setup_scaler &&
pstate->multirect_index != DPU_SSPP_RECT_1)
pdpu->pipe_hw->ops.setup_scaler(pdpu->pipe_hw,
- &pdpu->pipe_cfg, &pstate->pixel_ext,
+ &pipe_cfg, &pstate->pixel_ext,
&pstate->scaler3_cfg);
}
@@ -964,10 +971,10 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
crtc_state = drm_atomic_get_new_crtc_state(state,
new_plane_state->crtc);
- min_scale = FRAC_16_16(1, pdpu->pipe_sblk->maxupscale);
+ min_scale = FRAC_16_16(1, pdpu->pipe_hw->cap->sblk->maxupscale);
ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state,
min_scale,
- pdpu->pipe_sblk->maxdwnscale << 16,
+ pdpu->pipe_hw->cap->sblk->maxdwnscale << 16,
true, true);
if (ret) {
DPU_DEBUG_PLANE(pdpu, "Check plane state failed (%d)\n", ret);
@@ -993,8 +1000,8 @@ static int dpu_plane_atomic_check(struct drm_plane *plane,
min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1;
if (DPU_FORMAT_IS_YUV(fmt) &&
- (!(pdpu->features & DPU_SSPP_SCALER) ||
- !(pdpu->features & (BIT(DPU_SSPP_CSC)
+ (!(pdpu->pipe_hw->cap->features & DPU_SSPP_SCALER) ||
+ !(pdpu->pipe_hw->cap->features & (BIT(DPU_SSPP_CSC)
| BIT(DPU_SSPP_CSC_10BIT))))) {
DPU_DEBUG_PLANE(pdpu,
"plane doesn't have scaler/csc for yuv\n");
@@ -1056,8 +1063,13 @@ void dpu_plane_flush(struct drm_plane *plane)
else if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG)
/* force 100% alpha */
_dpu_plane_color_fill(pdpu, pdpu->color_fill, 0xFF);
- else if (pdpu->pipe_hw && pdpu->csc_ptr && pdpu->pipe_hw->ops.setup_csc)
- pdpu->pipe_hw->ops.setup_csc(pdpu->pipe_hw, pdpu->csc_ptr);
+ else if (pdpu->pipe_hw && pdpu->pipe_hw->ops.setup_csc) {
+ const struct dpu_format *fmt = to_dpu_format(msm_framebuffer_format(plane->state->fb));
+ const struct dpu_csc_cfg *csc_ptr = _dpu_plane_get_csc(pdpu, fmt);
+
+ if (csc_ptr)
+ pdpu->pipe_hw->ops.setup_csc(pdpu->pipe_hw, csc_ptr);
+ }
/* flag h/w flush complete */
if (plane->state)
@@ -1091,10 +1103,11 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
bool is_rt_pipe, update_qos_remap;
const struct dpu_format *fmt =
to_dpu_format(msm_framebuffer_format(fb));
+ struct dpu_hw_pipe_cfg pipe_cfg;
- memset(&(pdpu->pipe_cfg), 0, sizeof(struct dpu_hw_pipe_cfg));
+ memset(&pipe_cfg, 0, sizeof(struct dpu_hw_pipe_cfg));
- _dpu_plane_set_scanout(plane, pstate, &pdpu->pipe_cfg, fb);
+ _dpu_plane_set_scanout(plane, pstate, &pipe_cfg, fb);
pstate->pending = true;
@@ -1106,17 +1119,17 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
crtc->base.id, DRM_RECT_ARG(&state->dst),
(char *)&fmt->base.pixel_format, DPU_FORMAT_IS_UBWC(fmt));
- pdpu->pipe_cfg.src_rect = state->src;
+ pipe_cfg.src_rect = state->src;
/* state->src is 16.16, src_rect is not */
- pdpu->pipe_cfg.src_rect.x1 >>= 16;
- pdpu->pipe_cfg.src_rect.x2 >>= 16;
- pdpu->pipe_cfg.src_rect.y1 >>= 16;
- pdpu->pipe_cfg.src_rect.y2 >>= 16;
+ pipe_cfg.src_rect.x1 >>= 16;
+ pipe_cfg.src_rect.x2 >>= 16;
+ pipe_cfg.src_rect.y1 >>= 16;
+ pipe_cfg.src_rect.y2 >>= 16;
- pdpu->pipe_cfg.dst_rect = state->dst;
+ pipe_cfg.dst_rect = state->dst;
- _dpu_plane_setup_scaler(pdpu, pstate, fmt, false);
+ _dpu_plane_setup_scaler(pdpu, pstate, fmt, false, &pipe_cfg);
/* override for color fill */
if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG) {
@@ -1126,7 +1139,7 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
if (pdpu->pipe_hw->ops.setup_rects) {
pdpu->pipe_hw->ops.setup_rects(pdpu->pipe_hw,
- &pdpu->pipe_cfg,
+ &pipe_cfg,
pstate->multirect_index);
}
@@ -1143,7 +1156,7 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
if (pdpu->pipe_hw->ops.setup_scaler &&
pstate->multirect_index != DPU_SSPP_RECT_1)
pdpu->pipe_hw->ops.setup_scaler(pdpu->pipe_hw,
- &pdpu->pipe_cfg, &pstate->pixel_ext,
+ &pipe_cfg, &pstate->pixel_ext,
&pstate->scaler3_cfg);
if (pdpu->pipe_hw->ops.setup_multirect)
@@ -1173,35 +1186,29 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
pstate->multirect_index);
if (pdpu->pipe_hw->ops.setup_cdp) {
- struct dpu_hw_pipe_cdp_cfg *cdp_cfg = &pstate->cdp_cfg;
+ struct dpu_hw_pipe_cdp_cfg cdp_cfg;
- memset(cdp_cfg, 0, sizeof(struct dpu_hw_pipe_cdp_cfg));
+ memset(&cdp_cfg, 0, sizeof(struct dpu_hw_pipe_cdp_cfg));
- cdp_cfg->enable = pdpu->catalog->perf.cdp_cfg
+ cdp_cfg.enable = pdpu->catalog->perf.cdp_cfg
[DPU_PERF_CDP_USAGE_RT].rd_enable;
- cdp_cfg->ubwc_meta_enable =
+ cdp_cfg.ubwc_meta_enable =
DPU_FORMAT_IS_UBWC(fmt);
- cdp_cfg->tile_amortize_enable =
+ cdp_cfg.tile_amortize_enable =
DPU_FORMAT_IS_UBWC(fmt) ||
DPU_FORMAT_IS_TILE(fmt);
- cdp_cfg->preload_ahead = DPU_SSPP_CDP_PRELOAD_AHEAD_64;
+ cdp_cfg.preload_ahead = DPU_SSPP_CDP_PRELOAD_AHEAD_64;
- pdpu->pipe_hw->ops.setup_cdp(pdpu->pipe_hw, cdp_cfg);
+ pdpu->pipe_hw->ops.setup_cdp(pdpu->pipe_hw, &cdp_cfg);
}
-
- /* update csc */
- if (DPU_FORMAT_IS_YUV(fmt))
- _dpu_plane_setup_csc(pdpu);
- else
- pdpu->csc_ptr = NULL;
}
- _dpu_plane_set_qos_lut(plane, fb);
+ _dpu_plane_set_qos_lut(plane, fb, &pipe_cfg);
_dpu_plane_set_danger_lut(plane, fb);
if (plane->type != DRM_PLANE_TYPE_CURSOR) {
_dpu_plane_set_qos_ctrl(plane, true, DPU_PLANE_QOS_PANIC_CTRL);
- _dpu_plane_set_ot_limit(plane, crtc);
+ _dpu_plane_set_ot_limit(plane, crtc, &pipe_cfg);
}
update_qos_remap = (is_rt_pipe != pdpu->is_rt_pipe) ||
@@ -1215,9 +1222,9 @@ static void dpu_plane_sspp_atomic_update(struct drm_plane *plane)
_dpu_plane_set_qos_remap(plane);
}
- _dpu_plane_calc_bw(plane, fb);
+ _dpu_plane_calc_bw(plane, fb, &pipe_cfg);
- _dpu_plane_calc_clk(plane);
+ _dpu_plane_calc_clk(plane, &pipe_cfg);
}
static void _dpu_plane_atomic_disable(struct drm_plane *plane)
@@ -1432,12 +1439,12 @@ static int _dpu_plane_init_debugfs(struct drm_plane *plane)
/* create overall sub-directory for the pipe */
pdpu->debugfs_root =
- debugfs_create_dir(pdpu->pipe_name,
+ debugfs_create_dir(plane->name,
plane->dev->primary->debugfs_root);
/* don't error check these */
- debugfs_create_x32("features", 0600,
- pdpu->debugfs_root, &pdpu->features);
+ debugfs_create_xul("features", 0600,
+ pdpu->debugfs_root, (unsigned long *)&pdpu->pipe_hw->cap->features);
/* add register dump support */
dpu_debugfs_setup_regset32(&pdpu->debugfs_src,
@@ -1609,21 +1616,13 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
goto clean_sspp;
}
- /* cache features mask for later */
- pdpu->features = pdpu->pipe_hw->cap->features;
- pdpu->pipe_sblk = pdpu->pipe_hw->cap->sblk;
- if (!pdpu->pipe_sblk) {
- DPU_ERROR("[%u]invalid sblk\n", pipe);
- goto clean_sspp;
- }
-
if (pdpu->is_virtual) {
- format_list = pdpu->pipe_sblk->virt_format_list;
- num_formats = pdpu->pipe_sblk->virt_num_formats;
+ format_list = pdpu->pipe_hw->cap->sblk->virt_format_list;
+ num_formats = pdpu->pipe_hw->cap->sblk->virt_num_formats;
}
else {
- format_list = pdpu->pipe_sblk->format_list;
- num_formats = pdpu->pipe_sblk->num_formats;
+ format_list = pdpu->pipe_hw->cap->sblk->format_list;
+ num_formats = pdpu->pipe_hw->cap->sblk->num_formats;
}
ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs,
@@ -1663,12 +1662,9 @@ struct drm_plane *dpu_plane_init(struct drm_device *dev,
/* success! finalize initialization */
drm_plane_helper_add(plane, &dpu_plane_helper_funcs);
- /* save user friendly pipe name for later */
- snprintf(pdpu->pipe_name, DPU_NAME_SIZE, "plane%u", plane->base.id);
-
mutex_init(&pdpu->lock);
- DPU_DEBUG("%s created for pipe:%u id:%u virtual:%u\n", pdpu->pipe_name,
+ DPU_DEBUG("%s created for pipe:%u id:%u virtual:%u\n", plane->name,
pipe, plane->base.id, master_plane_id);
return plane;
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
index 34e03ac05f4a..52792526e904 100644
--- a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
@@ -25,7 +25,6 @@
* @pending: whether the current update is still pending
* @scaler3_cfg: configuration data for scaler3
* @pixel_ext: configuration data for pixel extensions
- * @cdp_cfg: CDP configuration
* @plane_fetch_bw: calculated BW per plane
* @plane_clk: calculated clk per plane
*/
@@ -42,7 +41,6 @@ struct dpu_plane_state {
struct dpu_hw_scaler3_cfg scaler3_cfg;
struct dpu_hw_pixel_ext pixel_ext;
- struct dpu_hw_pipe_cdp_cfg cdp_cfg;
u64 plane_fetch_bw;
u64 plane_clk;
};
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 7b242246d4e7..f78a6eec0ddd 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -370,22 +370,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms,
switch (intf->type) {
case INTF_eDP:
- if (!priv->edp)
- break;
-
- ctl = mdp5_ctlm_request(ctlm, intf->num);
- if (!ctl) {
- ret = -EINVAL;
- break;
- }
-
- encoder = construct_encoder(mdp5_kms, intf, ctl);
- if (IS_ERR(encoder)) {
- ret = PTR_ERR(encoder);
- break;
- }
-
- ret = msm_edp_modeset_init(priv->edp, dev, encoder);
+ DRM_DEV_INFO(dev->dev, "Skipping eDP interface %d\n", intf->num);
break;
case INTF_HDMI:
if (!priv->hdmi)
@@ -1031,7 +1016,7 @@ static const struct dev_pm_ops mdp5_pm_ops = {
SET_RUNTIME_PM_OPS(mdp5_runtime_suspend, mdp5_runtime_resume, NULL)
};
-static const struct of_device_id mdp5_dt_match[] = {
+const struct of_device_id mdp5_dt_match[] = {
{ .compatible = "qcom,mdp5", },
/* to support downstream DT files */
{ .compatible = "qcom,mdss_mdp", },
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
index 0ea53420bc40..c34760d981b8 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
@@ -16,8 +16,6 @@ struct mdp5_mdss {
void __iomem *mmio, *vbif;
- struct regulator *vdd;
-
struct clk *ahb_clk;
struct clk *axi_clk;
struct clk *vsync_clk;
@@ -185,8 +183,6 @@ static void mdp5_mdss_destroy(struct drm_device *dev)
irq_domain_remove(mdp5_mdss->irqcontroller.domain);
mdp5_mdss->irqcontroller.domain = NULL;
- regulator_disable(mdp5_mdss->vdd);
-
pm_runtime_disable(dev->dev);
}
@@ -234,31 +230,17 @@ int mdp5_mdss_init(struct drm_device *dev)
goto fail;
}
- /* Regulator to enable GDSCs in downstream kernels */
- mdp5_mdss->vdd = devm_regulator_get(dev->dev, "vdd");
- if (IS_ERR(mdp5_mdss->vdd)) {
- ret = PTR_ERR(mdp5_mdss->vdd);
- goto fail;
- }
-
- ret = regulator_enable(mdp5_mdss->vdd);
- if (ret) {
- DRM_DEV_ERROR(dev->dev, "failed to enable regulator vdd: %d\n",
- ret);
- goto fail;
- }
-
ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
mdss_irq, 0, "mdss_isr", mdp5_mdss);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to init irq: %d\n", ret);
- goto fail_irq;
+ goto fail;
}
ret = mdss_irq_domain_init(mdp5_mdss);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to init sub-block irqs: %d\n", ret);
- goto fail_irq;
+ goto fail;
}
mdp5_mdss->base.funcs = &mdss_funcs;
@@ -267,8 +249,6 @@ int mdp5_mdss_init(struct drm_device *dev)
pm_runtime_enable(dev->dev);
return 0;
-fail_irq:
- regulator_disable(mdp5_mdss->vdd);
fail:
return ret;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_aux.c b/drivers/gpu/drm/msm/dp/dp_aux.c
index eb40d8413bca..6d36f63c3338 100644
--- a/drivers/gpu/drm/msm/dp/dp_aux.c
+++ b/drivers/gpu/drm/msm/dp/dp_aux.c
@@ -33,6 +33,7 @@ struct dp_aux_private {
bool read;
bool no_send_addr;
bool no_send_stop;
+ bool initted;
u32 offset;
u32 segment;
@@ -331,6 +332,10 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
}
mutex_lock(&aux->mutex);
+ if (!aux->initted) {
+ ret = -EIO;
+ goto exit;
+ }
dp_aux_update_offset_and_segment(aux, msg);
dp_aux_transfer_helper(aux, msg, true);
@@ -380,6 +385,8 @@ static ssize_t dp_aux_transfer(struct drm_dp_aux *dp_aux,
}
aux->cmd_busy = false;
+
+exit:
mutex_unlock(&aux->mutex);
return ret;
@@ -431,8 +438,13 @@ void dp_aux_init(struct drm_dp_aux *dp_aux)
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+ mutex_lock(&aux->mutex);
+
dp_catalog_aux_enable(aux->catalog, true);
aux->retry_cnt = 0;
+ aux->initted = true;
+
+ mutex_unlock(&aux->mutex);
}
void dp_aux_deinit(struct drm_dp_aux *dp_aux)
@@ -441,7 +453,12 @@ void dp_aux_deinit(struct drm_dp_aux *dp_aux)
aux = container_of(dp_aux, struct dp_aux_private, dp_aux);
+ mutex_lock(&aux->mutex);
+
+ aux->initted = false;
dp_catalog_aux_enable(aux->catalog, false);
+
+ mutex_unlock(&aux->mutex);
}
int dp_aux_register(struct drm_dp_aux *dp_aux)
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index 62e75dc8afc6..c724cb0bde9d 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -119,13 +119,13 @@ void dp_ctrl_push_idle(struct dp_ctrl *dp_ctrl)
static void dp_ctrl_config_ctrl(struct dp_ctrl_private *ctrl)
{
u32 config = 0, tbd;
- u8 *dpcd = ctrl->panel->dpcd;
+ const u8 *dpcd = ctrl->panel->dpcd;
/* Default-> LSCLK DIV: 1/4 LCLK */
config |= (2 << DP_CONFIGURATION_CTRL_LSCLK_DIV_SHIFT);
/* Scrambler reset enable */
- if (dpcd[DP_EDP_CONFIGURATION_CAP] & DP_ALTERNATE_SCRAMBLER_RESET_CAP)
+ if (drm_dp_alternate_scrambler_reset_cap(dpcd))
config |= DP_CONFIGURATION_CTRL_ASSR;
tbd = dp_link_get_test_bits_depth(ctrl->link,
@@ -1228,7 +1228,10 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
int *training_step)
{
int ret = 0;
+ const u8 *dpcd = ctrl->panel->dpcd;
u8 encoding = DP_SET_ANSI_8B10B;
+ u8 ssc;
+ u8 assr;
struct dp_link_info link_info = {0};
dp_ctrl_config_ctrl(ctrl);
@@ -1238,9 +1241,21 @@ static int dp_ctrl_link_train(struct dp_ctrl_private *ctrl,
link_info.capabilities = DP_LINK_CAP_ENHANCED_FRAMING;
dp_aux_link_configure(ctrl->aux, &link_info);
+
+ if (drm_dp_max_downspread(dpcd)) {
+ ssc = DP_SPREAD_AMP_0_5;
+ drm_dp_dpcd_write(ctrl->aux, DP_DOWNSPREAD_CTRL, &ssc, 1);
+ }
+
drm_dp_dpcd_write(ctrl->aux, DP_MAIN_LINK_CHANNEL_CODING_SET,
&encoding, 1);
+ if (drm_dp_alternate_scrambler_reset_cap(dpcd)) {
+ assr = DP_ALTERNATE_SCRAMBLER_RESET_ENABLE;
+ drm_dp_dpcd_write(ctrl->aux, DP_EDP_CONFIGURATION_SET,
+ &assr, 1);
+ }
+
ret = dp_ctrl_link_train_1(ctrl, training_step);
if (ret) {
DRM_ERROR("link training #1 failed. ret=%d\n", ret);
@@ -1312,9 +1327,11 @@ static int dp_ctrl_enable_mainlink_clocks(struct dp_ctrl_private *ctrl)
struct dp_io *dp_io = &ctrl->parser->io;
struct phy *phy = dp_io->phy;
struct phy_configure_opts_dp *opts_dp = &dp_io->phy_opts.dp;
+ const u8 *dpcd = ctrl->panel->dpcd;
opts_dp->lanes = ctrl->link->link_params.num_lanes;
opts_dp->link_rate = ctrl->link->link_params.rate / 100;
+ opts_dp->ssc = drm_dp_max_downspread(dpcd);
dp_ctrl_set_clock_rate(ctrl, DP_CTRL_PM, "ctrl_link",
ctrl->link->link_params.rate * 1000);
@@ -1406,7 +1423,7 @@ void dp_ctrl_host_deinit(struct dp_ctrl *dp_ctrl)
static bool dp_ctrl_use_fixed_nvid(struct dp_ctrl_private *ctrl)
{
- u8 *dpcd = ctrl->panel->dpcd;
+ const u8 *dpcd = ctrl->panel->dpcd;
/*
* For better interop experience, used a fixed NVID=0x8000
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index aba8aa47ed76..d44f18b96ff4 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -135,8 +135,17 @@ static const struct msm_dp_config sc7180_dp_cfg = {
.num_descs = 1,
};
+static const struct msm_dp_config sc7280_dp_cfg = {
+ .descs = (const struct msm_dp_desc[]) {
+ [MSM_DP_CONTROLLER_0] = { .io_start = 0x0ae90000, .connector_type = DRM_MODE_CONNECTOR_DisplayPort },
+ [MSM_DP_CONTROLLER_1] = { .io_start = 0x0aea0000, .connector_type = DRM_MODE_CONNECTOR_eDP },
+ },
+ .num_descs = 2,
+};
+
static const struct of_device_id dp_dt_match[] = {
{ .compatible = "qcom,sc7180-dp", .data = &sc7180_dp_cfg },
+ { .compatible = "qcom,sc7280-edp", .data = &sc7280_dp_cfg },
{}
};
@@ -522,11 +531,8 @@ static int dp_hpd_plug_handle(struct dp_display_private *dp, u32 data)
dp->hpd_state = ST_CONNECT_PENDING;
- hpd->hpd_high = 1;
-
ret = dp_display_usbpd_configure_cb(&dp->pdev->dev);
if (ret) { /* link train failed */
- hpd->hpd_high = 0;
dp->hpd_state = ST_DISCONNECTED;
if (ret == -ECONNRESET) { /* cable unplugged */
@@ -603,7 +609,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
/* triggered by irq_hdp with sink_count = 0 */
if (dp->link->sink_count == 0) {
dp_ctrl_off_phy(dp->ctrl);
- hpd->hpd_high = 0;
dp->core_initialized = false;
}
mutex_unlock(&dp->event_mutex);
@@ -627,8 +632,6 @@ static int dp_hpd_unplug_handle(struct dp_display_private *dp, u32 data)
/* disable HPD plug interrupts */
dp_catalog_hpd_config_intr(dp->catalog, DP_DP_HPD_PLUG_INT_MASK, false);
- hpd->hpd_high = 0;
-
/*
* We don't need separate work for disconnect as
* connect/attention interrupts are disabled
diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.c b/drivers/gpu/drm/msm/dp/dp_hpd.c
index e1c90fa47411..db98a1d431eb 100644
--- a/drivers/gpu/drm/msm/dp/dp_hpd.c
+++ b/drivers/gpu/drm/msm/dp/dp_hpd.c
@@ -32,8 +32,6 @@ int dp_hpd_connect(struct dp_usbpd *dp_usbpd, bool hpd)
hpd_priv = container_of(dp_usbpd, struct dp_hpd_private,
dp_usbpd);
- dp_usbpd->hpd_high = hpd;
-
if (!hpd_priv->dp_cb || !hpd_priv->dp_cb->configure
|| !hpd_priv->dp_cb->disconnect) {
pr_err("hpd dp_cb not initialized\n");
diff --git a/drivers/gpu/drm/msm/dp/dp_hpd.h b/drivers/gpu/drm/msm/dp/dp_hpd.h
index 5bc5bb64680f..8feec5aa5027 100644
--- a/drivers/gpu/drm/msm/dp/dp_hpd.h
+++ b/drivers/gpu/drm/msm/dp/dp_hpd.h
@@ -26,7 +26,6 @@ enum plug_orientation {
* @multi_func: multi-function preferred
* @usb_config_req: request to switch to usb
* @exit_dp_mode: request exit from displayport mode
- * @hpd_high: Hot Plug Detect signal is high.
* @hpd_irq: Change in the status since last message
* @alt_mode_cfg_done: bool to specify alt mode status
* @debug_en: bool to specify debug mode
@@ -39,7 +38,6 @@ struct dp_usbpd {
bool multi_func;
bool usb_config_req;
bool exit_dp_mode;
- bool hpd_high;
bool hpd_irq;
bool alt_mode_cfg_done;
bool debug_en;
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index bb39e7ca802d..f46df52c6985 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -120,6 +120,8 @@ unsigned long msm_dsi_host_get_mode_flags(struct mipi_dsi_host *host);
struct drm_bridge *msm_dsi_host_get_bridge(struct mipi_dsi_host *host);
int msm_dsi_host_register(struct mipi_dsi_host *host);
void msm_dsi_host_unregister(struct mipi_dsi_host *host);
+void msm_dsi_host_set_phy_mode(struct mipi_dsi_host *host,
+ struct msm_dsi_phy *src_phy);
int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
struct msm_dsi_phy *src_phy);
void msm_dsi_host_reset_phy(struct mipi_dsi_host *host);
@@ -173,8 +175,6 @@ int msm_dsi_phy_enable(struct msm_dsi_phy *phy,
void msm_dsi_phy_disable(struct msm_dsi_phy *phy);
void msm_dsi_phy_set_usecase(struct msm_dsi_phy *phy,
enum msm_dsi_phy_usecase uc);
-int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy,
- struct clk **byte_clk_provider, struct clk **pixel_clk_provider);
void msm_dsi_phy_pll_save_state(struct msm_dsi_phy *phy);
int msm_dsi_phy_pll_restore_state(struct msm_dsi_phy *phy);
void msm_dsi_phy_snapshot(struct msm_disp_state *disp_state, struct msm_dsi_phy *phy);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 4c7b6944fc0d..5b4bb722f750 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -1664,6 +1664,8 @@ static int dsi_host_parse_lane_data(struct msm_dsi_host *msm_host,
if (!prop) {
DRM_DEV_DEBUG(dev,
"failed to find data lane mapping, using default\n");
+ /* Set the number of date lanes to 4 by default. */
+ msm_host->num_data_lanes = 4;
return 0;
}
@@ -2177,57 +2179,12 @@ void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 dma_base,
wmb();
}
-int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
+void msm_dsi_host_set_phy_mode(struct mipi_dsi_host *host,
struct msm_dsi_phy *src_phy)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
- struct clk *byte_clk_provider, *pixel_clk_provider;
- int ret;
msm_host->cphy_mode = src_phy->cphy_mode;
-
- ret = msm_dsi_phy_get_clk_provider(src_phy,
- &byte_clk_provider, &pixel_clk_provider);
- if (ret) {
- pr_info("%s: can't get provider from pll, don't set parent\n",
- __func__);
- return 0;
- }
-
- ret = clk_set_parent(msm_host->byte_clk_src, byte_clk_provider);
- if (ret) {
- pr_err("%s: can't set parent to byte_clk_src. ret=%d\n",
- __func__, ret);
- goto exit;
- }
-
- ret = clk_set_parent(msm_host->pixel_clk_src, pixel_clk_provider);
- if (ret) {
- pr_err("%s: can't set parent to pixel_clk_src. ret=%d\n",
- __func__, ret);
- goto exit;
- }
-
- if (msm_host->dsi_clk_src) {
- ret = clk_set_parent(msm_host->dsi_clk_src, pixel_clk_provider);
- if (ret) {
- pr_err("%s: can't set parent to dsi_clk_src. ret=%d\n",
- __func__, ret);
- goto exit;
- }
- }
-
- if (msm_host->esc_clk_src) {
- ret = clk_set_parent(msm_host->esc_clk_src, byte_clk_provider);
- if (ret) {
- pr_err("%s: can't set parent to esc_clk_src. ret=%d\n",
- __func__, ret);
- goto exit;
- }
- }
-
-exit:
- return ret;
}
void msm_dsi_host_reset_phy(struct mipi_dsi_host *host)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 01bf8d907933..681ca74fe410 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -79,10 +79,8 @@ static int dsi_mgr_setup_components(int id)
return ret;
msm_dsi_phy_set_usecase(msm_dsi->phy, MSM_DSI_PHY_STANDALONE);
- ret = msm_dsi_host_set_src_pll(msm_dsi->host, msm_dsi->phy);
- } else if (!other_dsi) {
- ret = 0;
- } else {
+ msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy);
+ } else if (other_dsi) {
struct msm_dsi *master_link_dsi = IS_MASTER_DSI_LINK(id) ?
msm_dsi : other_dsi;
struct msm_dsi *slave_link_dsi = IS_MASTER_DSI_LINK(id) ?
@@ -106,13 +104,11 @@ static int dsi_mgr_setup_components(int id)
MSM_DSI_PHY_MASTER);
msm_dsi_phy_set_usecase(clk_slave_dsi->phy,
MSM_DSI_PHY_SLAVE);
- ret = msm_dsi_host_set_src_pll(msm_dsi->host, clk_master_dsi->phy);
- if (ret)
- return ret;
- ret = msm_dsi_host_set_src_pll(other_dsi->host, clk_master_dsi->phy);
+ msm_dsi_host_set_phy_mode(msm_dsi->host, msm_dsi->phy);
+ msm_dsi_host_set_phy_mode(other_dsi->host, other_dsi->phy);
}
- return ret;
+ return 0;
}
static int enable_phy(struct msm_dsi *msm_dsi,
diff --git a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
index 9842e04b5858..0b2ae5c15240 100644
--- a/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
+++ b/drivers/gpu/drm/msm/dsi/phy/dsi_phy.c
@@ -892,17 +892,6 @@ bool msm_dsi_phy_set_continuous_clock(struct msm_dsi_phy *phy, bool enable)
return phy->cfg->ops.set_continuous_clock(phy, enable);
}
-int msm_dsi_phy_get_clk_provider(struct msm_dsi_phy *phy,
- struct clk **byte_clk_provider, struct clk **pixel_clk_provider)
-{
- if (byte_clk_provider)
- *byte_clk_provider = phy->provided_clocks->hws[DSI_BYTE_PLL_CLK]->clk;
- if (pixel_clk_provider)
- *pixel_clk_provider = phy->provided_clocks->hws[DSI_PIXEL_PLL_CLK]->clk;
-
- return 0;
-}
-
void msm_dsi_phy_pll_save_state(struct msm_dsi_phy *phy)
{
if (phy->cfg->ops.save_pll_state) {
diff --git a/drivers/gpu/drm/msm/edp/edp.c b/drivers/gpu/drm/msm/edp/edp.c
deleted file mode 100644
index 106a67473af5..000000000000
--- a/drivers/gpu/drm/msm/edp/edp.c
+++ /dev/null
@@ -1,198 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/of_irq.h>
-#include "edp.h"
-
-static irqreturn_t edp_irq(int irq, void *dev_id)
-{
- struct msm_edp *edp = dev_id;
-
- /* Process eDP irq */
- return msm_edp_ctrl_irq(edp->ctrl);
-}
-
-static void edp_destroy(struct platform_device *pdev)
-{
- struct msm_edp *edp = platform_get_drvdata(pdev);
-
- if (!edp)
- return;
-
- if (edp->ctrl) {
- msm_edp_ctrl_destroy(edp->ctrl);
- edp->ctrl = NULL;
- }
-
- platform_set_drvdata(pdev, NULL);
-}
-
-/* construct eDP at bind/probe time, grab all the resources. */
-static struct msm_edp *edp_init(struct platform_device *pdev)
-{
- struct msm_edp *edp = NULL;
- int ret;
-
- if (!pdev) {
- pr_err("no eDP device\n");
- ret = -ENXIO;
- goto fail;
- }
-
- edp = devm_kzalloc(&pdev->dev, sizeof(*edp), GFP_KERNEL);
- if (!edp) {
- ret = -ENOMEM;
- goto fail;
- }
- DBG("eDP probed=%p", edp);
-
- edp->pdev = pdev;
- platform_set_drvdata(pdev, edp);
-
- ret = msm_edp_ctrl_init(edp);
- if (ret)
- goto fail;
-
- return edp;
-
-fail:
- if (edp)
- edp_destroy(pdev);
-
- return ERR_PTR(ret);
-}
-
-static int edp_bind(struct device *dev, struct device *master, void *data)
-{
- struct drm_device *drm = dev_get_drvdata(master);
- struct msm_drm_private *priv = drm->dev_private;
- struct msm_edp *edp;
-
- DBG("");
- edp = edp_init(to_platform_device(dev));
- if (IS_ERR(edp))
- return PTR_ERR(edp);
- priv->edp = edp;
-
- return 0;
-}
-
-static void edp_unbind(struct device *dev, struct device *master, void *data)
-{
- struct drm_device *drm = dev_get_drvdata(master);
- struct msm_drm_private *priv = drm->dev_private;
-
- DBG("");
- if (priv->edp) {
- edp_destroy(to_platform_device(dev));
- priv->edp = NULL;
- }
-}
-
-static const struct component_ops edp_ops = {
- .bind = edp_bind,
- .unbind = edp_unbind,
-};
-
-static int edp_dev_probe(struct platform_device *pdev)
-{
- DBG("");
- return component_add(&pdev->dev, &edp_ops);
-}
-
-static int edp_dev_remove(struct platform_device *pdev)
-{
- DBG("");
- component_del(&pdev->dev, &edp_ops);
- return 0;
-}
-
-static const struct of_device_id dt_match[] = {
- { .compatible = "qcom,mdss-edp" },
- {}
-};
-
-static struct platform_driver edp_driver = {
- .probe = edp_dev_probe,
- .remove = edp_dev_remove,
- .driver = {
- .name = "msm_edp",
- .of_match_table = dt_match,
- },
-};
-
-void __init msm_edp_register(void)
-{
- DBG("");
- platform_driver_register(&edp_driver);
-}
-
-void __exit msm_edp_unregister(void)
-{
- DBG("");
- platform_driver_unregister(&edp_driver);
-}
-
-/* Second part of initialization, the drm/kms level modeset_init */
-int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
- struct drm_encoder *encoder)
-{
- struct platform_device *pdev = edp->pdev;
- struct msm_drm_private *priv = dev->dev_private;
- int ret;
-
- edp->encoder = encoder;
- edp->dev = dev;
-
- edp->bridge = msm_edp_bridge_init(edp);
- if (IS_ERR(edp->bridge)) {
- ret = PTR_ERR(edp->bridge);
- DRM_DEV_ERROR(dev->dev, "failed to create eDP bridge: %d\n", ret);
- edp->bridge = NULL;
- goto fail;
- }
-
- edp->connector = msm_edp_connector_init(edp);
- if (IS_ERR(edp->connector)) {
- ret = PTR_ERR(edp->connector);
- DRM_DEV_ERROR(dev->dev, "failed to create eDP connector: %d\n", ret);
- edp->connector = NULL;
- goto fail;
- }
-
- edp->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
- if (edp->irq < 0) {
- ret = edp->irq;
- DRM_DEV_ERROR(dev->dev, "failed to get IRQ: %d\n", ret);
- goto fail;
- }
-
- ret = devm_request_irq(&pdev->dev, edp->irq,
- edp_irq, IRQF_TRIGGER_HIGH | IRQF_ONESHOT,
- "edp_isr", edp);
- if (ret < 0) {
- DRM_DEV_ERROR(dev->dev, "failed to request IRQ%u: %d\n",
- edp->irq, ret);
- goto fail;
- }
-
- priv->bridges[priv->num_bridges++] = edp->bridge;
- priv->connectors[priv->num_connectors++] = edp->connector;
-
- return 0;
-
-fail:
- /* bridge/connector are normally destroyed by drm */
- if (edp->bridge) {
- edp_bridge_destroy(edp->bridge);
- edp->bridge = NULL;
- }
- if (edp->connector) {
- edp->connector->funcs->destroy(edp->connector);
- edp->connector = NULL;
- }
-
- return ret;
-}
diff --git a/drivers/gpu/drm/msm/edp/edp.h b/drivers/gpu/drm/msm/edp/edp.h
deleted file mode 100644
index 8590f2ce274d..000000000000
--- a/drivers/gpu/drm/msm/edp/edp.h
+++ /dev/null
@@ -1,77 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only */
-/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
- */
-
-#ifndef __EDP_CONNECTOR_H__
-#define __EDP_CONNECTOR_H__
-
-#include <linux/i2c.h>
-#include <linux/interrupt.h>
-#include <linux/kernel.h>
-#include <linux/platform_device.h>
-#include <drm/drm_bridge.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_dp_helper.h>
-
-#include "msm_drv.h"
-
-#define edp_read(offset) msm_readl((offset))
-#define edp_write(offset, data) msm_writel((data), (offset))
-
-struct edp_ctrl;
-struct edp_aux;
-struct edp_phy;
-
-struct msm_edp {
- struct drm_device *dev;
- struct platform_device *pdev;
-
- struct drm_connector *connector;
- struct drm_bridge *bridge;
-
- /* the encoder we are hooked to (outside of eDP block) */
- struct drm_encoder *encoder;
-
- struct edp_ctrl *ctrl;
-
- int irq;
-};
-
-/* eDP bridge */
-struct drm_bridge *msm_edp_bridge_init(struct msm_edp *edp);
-void edp_bridge_destroy(struct drm_bridge *bridge);
-
-/* eDP connector */
-struct drm_connector *msm_edp_connector_init(struct msm_edp *edp);
-
-/* AUX */
-void *msm_edp_aux_init(struct msm_edp *edp, void __iomem *regbase, struct drm_dp_aux **drm_aux);
-void msm_edp_aux_destroy(struct device *dev, struct edp_aux *aux);
-irqreturn_t msm_edp_aux_irq(struct edp_aux *aux, u32 isr);
-void msm_edp_aux_ctrl(struct edp_aux *aux, int enable);
-
-/* Phy */
-bool msm_edp_phy_ready(struct edp_phy *phy);
-void msm_edp_phy_ctrl(struct edp_phy *phy, int enable);
-void msm_edp_phy_vm_pe_init(struct edp_phy *phy);
-void msm_edp_phy_vm_pe_cfg(struct edp_phy *phy, u32 v0, u32 v1);
-void msm_edp_phy_lane_power_ctrl(struct edp_phy *phy, bool up, u32 max_lane);
-void *msm_edp_phy_init(struct device *dev, void __iomem *regbase);
-
-/* Ctrl */
-irqreturn_t msm_edp_ctrl_irq(struct edp_ctrl *ctrl);
-void msm_edp_ctrl_power(struct edp_ctrl *ctrl, bool on);
-int msm_edp_ctrl_init(struct msm_edp *edp);
-void msm_edp_ctrl_destroy(struct edp_ctrl *ctrl);
-bool msm_edp_ctrl_panel_connected(struct edp_ctrl *ctrl);
-int msm_edp_ctrl_get_panel_info(struct edp_ctrl *ctrl,
- struct drm_connector *connector, struct edid **edid);
-int msm_edp_ctrl_timing_cfg(struct edp_ctrl *ctrl,
- const struct drm_display_mode *mode,
- const struct drm_display_info *info);
-/* @pixel_rate is in kHz */
-bool msm_edp_ctrl_pixel_clock_valid(struct edp_ctrl *ctrl,
- u32 pixel_rate, u32 *pm, u32 *pn);
-
-#endif /* __EDP_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/edp/edp.xml.h b/drivers/gpu/drm/msm/edp/edp.xml.h
deleted file mode 100644
index 7907e0f5988f..000000000000
--- a/drivers/gpu/drm/msm/edp/edp.xml.h
+++ /dev/null
@@ -1,388 +0,0 @@
-#ifndef EDP_XML
-#define EDP_XML
-
-/* Autogenerated file, DO NOT EDIT manually!
-
-This file was generated by the rules-ng-ng headergen tool in this git repository:
-http://github.com/freedreno/envytools/
-git clone https://github.com/freedreno/envytools.git
-
-The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/msm.xml ( 981 bytes, from 2021-06-05 21:37:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/freedreno_copyright.xml ( 1572 bytes, from 2021-02-18 16:45:44)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp4.xml ( 20912 bytes, from 2021-02-18 16:45:44)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp_common.xml ( 2849 bytes, from 2021-02-18 16:45:44)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/mdp/mdp5.xml ( 37461 bytes, from 2021-02-18 16:45:44)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi.xml ( 15291 bytes, from 2021-06-15 22:36:13)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_v2.xml ( 3236 bytes, from 2021-06-05 21:37:42)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm_8960.xml ( 4935 bytes, from 2021-05-21 19:18:08)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_28nm.xml ( 7004 bytes, from 2021-05-21 19:18:08)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_20nm.xml ( 3712 bytes, from 2021-05-21 19:18:08)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_14nm.xml ( 5381 bytes, from 2021-05-21 19:18:08)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_10nm.xml ( 4499 bytes, from 2021-05-21 19:18:08)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_7nm.xml ( 10953 bytes, from 2021-05-21 19:18:08)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/dsi_phy_5nm.xml ( 10900 bytes, from 2021-05-21 19:18:08)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/sfpb.xml ( 602 bytes, from 2021-02-18 16:45:44)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/dsi/mmss_cc.xml ( 1686 bytes, from 2021-02-18 16:45:44)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/qfprom.xml ( 600 bytes, from 2021-02-18 16:45:44)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/hdmi/hdmi.xml ( 41874 bytes, from 2021-02-18 16:45:44)
-- /home/robclark/src/mesa/mesa/src/freedreno/registers/edp/edp.xml ( 10416 bytes, from 2021-02-18 16:45:44)
-
-Copyright (C) 2013-2021 by the following authors:
-- Rob Clark <robdclark@gmail.com> (robclark)
-- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice (including the
-next paragraph) shall be included in all copies or substantial
-portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
-IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
-*/
-
-
-enum edp_color_depth {
- EDP_6BIT = 0,
- EDP_8BIT = 1,
- EDP_10BIT = 2,
- EDP_12BIT = 3,
- EDP_16BIT = 4,
-};
-
-enum edp_component_format {
- EDP_RGB = 0,
- EDP_YUV422 = 1,
- EDP_YUV444 = 2,
-};
-
-#define REG_EDP_MAINLINK_CTRL 0x00000004
-#define EDP_MAINLINK_CTRL_ENABLE 0x00000001
-#define EDP_MAINLINK_CTRL_RESET 0x00000002
-
-#define REG_EDP_STATE_CTRL 0x00000008
-#define EDP_STATE_CTRL_TRAIN_PATTERN_1 0x00000001
-#define EDP_STATE_CTRL_TRAIN_PATTERN_2 0x00000002
-#define EDP_STATE_CTRL_TRAIN_PATTERN_3 0x00000004
-#define EDP_STATE_CTRL_SYMBOL_ERR_RATE_MEAS 0x00000008
-#define EDP_STATE_CTRL_PRBS7 0x00000010
-#define EDP_STATE_CTRL_CUSTOM_80_BIT_PATTERN 0x00000020
-#define EDP_STATE_CTRL_SEND_VIDEO 0x00000040
-#define EDP_STATE_CTRL_PUSH_IDLE 0x00000080
-
-#define REG_EDP_CONFIGURATION_CTRL 0x0000000c
-#define EDP_CONFIGURATION_CTRL_SYNC_CLK 0x00000001
-#define EDP_CONFIGURATION_CTRL_STATIC_MVID 0x00000002
-#define EDP_CONFIGURATION_CTRL_PROGRESSIVE 0x00000004
-#define EDP_CONFIGURATION_CTRL_LANES__MASK 0x00000030
-#define EDP_CONFIGURATION_CTRL_LANES__SHIFT 4
-static inline uint32_t EDP_CONFIGURATION_CTRL_LANES(uint32_t val)
-{
- return ((val) << EDP_CONFIGURATION_CTRL_LANES__SHIFT) & EDP_CONFIGURATION_CTRL_LANES__MASK;
-}
-#define EDP_CONFIGURATION_CTRL_ENHANCED_FRAMING 0x00000040
-#define EDP_CONFIGURATION_CTRL_COLOR__MASK 0x00000100
-#define EDP_CONFIGURATION_CTRL_COLOR__SHIFT 8
-static inline uint32_t EDP_CONFIGURATION_CTRL_COLOR(enum edp_color_depth val)
-{
- return ((val) << EDP_CONFIGURATION_CTRL_COLOR__SHIFT) & EDP_CONFIGURATION_CTRL_COLOR__MASK;
-}
-
-#define REG_EDP_SOFTWARE_MVID 0x00000014
-
-#define REG_EDP_SOFTWARE_NVID 0x00000018
-
-#define REG_EDP_TOTAL_HOR_VER 0x0000001c
-#define EDP_TOTAL_HOR_VER_HORIZ__MASK 0x0000ffff
-#define EDP_TOTAL_HOR_VER_HORIZ__SHIFT 0
-static inline uint32_t EDP_TOTAL_HOR_VER_HORIZ(uint32_t val)
-{
- return ((val) << EDP_TOTAL_HOR_VER_HORIZ__SHIFT) & EDP_TOTAL_HOR_VER_HORIZ__MASK;
-}
-#define EDP_TOTAL_HOR_VER_VERT__MASK 0xffff0000
-#define EDP_TOTAL_HOR_VER_VERT__SHIFT 16
-static inline uint32_t EDP_TOTAL_HOR_VER_VERT(uint32_t val)
-{
- return ((val) << EDP_TOTAL_HOR_VER_VERT__SHIFT) & EDP_TOTAL_HOR_VER_VERT__MASK;
-}
-
-#define REG_EDP_START_HOR_VER_FROM_SYNC 0x00000020
-#define EDP_START_HOR_VER_FROM_SYNC_HORIZ__MASK 0x0000ffff
-#define EDP_START_HOR_VER_FROM_SYNC_HORIZ__SHIFT 0
-static inline uint32_t EDP_START_HOR_VER_FROM_SYNC_HORIZ(uint32_t val)
-{
- return ((val) << EDP_START_HOR_VER_FROM_SYNC_HORIZ__SHIFT) & EDP_START_HOR_VER_FROM_SYNC_HORIZ__MASK;
-}
-#define EDP_START_HOR_VER_FROM_SYNC_VERT__MASK 0xffff0000
-#define EDP_START_HOR_VER_FROM_SYNC_VERT__SHIFT 16
-static inline uint32_t EDP_START_HOR_VER_FROM_SYNC_VERT(uint32_t val)
-{
- return ((val) << EDP_START_HOR_VER_FROM_SYNC_VERT__SHIFT) & EDP_START_HOR_VER_FROM_SYNC_VERT__MASK;
-}
-
-#define REG_EDP_HSYNC_VSYNC_WIDTH_POLARITY 0x00000024
-#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ__MASK 0x00007fff
-#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ__SHIFT 0
-static inline uint32_t EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ(uint32_t val)
-{
- return ((val) << EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ__SHIFT) & EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ__MASK;
-}
-#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_NHSYNC 0x00008000
-#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT__MASK 0x7fff0000
-#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT__SHIFT 16
-static inline uint32_t EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT(uint32_t val)
-{
- return ((val) << EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT__SHIFT) & EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT__MASK;
-}
-#define EDP_HSYNC_VSYNC_WIDTH_POLARITY_NVSYNC 0x80000000
-
-#define REG_EDP_ACTIVE_HOR_VER 0x00000028
-#define EDP_ACTIVE_HOR_VER_HORIZ__MASK 0x0000ffff
-#define EDP_ACTIVE_HOR_VER_HORIZ__SHIFT 0
-static inline uint32_t EDP_ACTIVE_HOR_VER_HORIZ(uint32_t val)
-{
- return ((val) << EDP_ACTIVE_HOR_VER_HORIZ__SHIFT) & EDP_ACTIVE_HOR_VER_HORIZ__MASK;
-}
-#define EDP_ACTIVE_HOR_VER_VERT__MASK 0xffff0000
-#define EDP_ACTIVE_HOR_VER_VERT__SHIFT 16
-static inline uint32_t EDP_ACTIVE_HOR_VER_VERT(uint32_t val)
-{
- return ((val) << EDP_ACTIVE_HOR_VER_VERT__SHIFT) & EDP_ACTIVE_HOR_VER_VERT__MASK;
-}
-
-#define REG_EDP_MISC1_MISC0 0x0000002c
-#define EDP_MISC1_MISC0_MISC0__MASK 0x000000ff
-#define EDP_MISC1_MISC0_MISC0__SHIFT 0
-static inline uint32_t EDP_MISC1_MISC0_MISC0(uint32_t val)
-{
- return ((val) << EDP_MISC1_MISC0_MISC0__SHIFT) & EDP_MISC1_MISC0_MISC0__MASK;
-}
-#define EDP_MISC1_MISC0_SYNC 0x00000001
-#define EDP_MISC1_MISC0_COMPONENT_FORMAT__MASK 0x00000006
-#define EDP_MISC1_MISC0_COMPONENT_FORMAT__SHIFT 1
-static inline uint32_t EDP_MISC1_MISC0_COMPONENT_FORMAT(enum edp_component_format val)
-{
- return ((val) << EDP_MISC1_MISC0_COMPONENT_FORMAT__SHIFT) & EDP_MISC1_MISC0_COMPONENT_FORMAT__MASK;
-}
-#define EDP_MISC1_MISC0_CEA 0x00000008
-#define EDP_MISC1_MISC0_BT709_5 0x00000010
-#define EDP_MISC1_MISC0_COLOR__MASK 0x000000e0
-#define EDP_MISC1_MISC0_COLOR__SHIFT 5
-static inline uint32_t EDP_MISC1_MISC0_COLOR(enum edp_color_depth val)
-{
- return ((val) << EDP_MISC1_MISC0_COLOR__SHIFT) & EDP_MISC1_MISC0_COLOR__MASK;
-}
-#define EDP_MISC1_MISC0_MISC1__MASK 0x0000ff00
-#define EDP_MISC1_MISC0_MISC1__SHIFT 8
-static inline uint32_t EDP_MISC1_MISC0_MISC1(uint32_t val)
-{
- return ((val) << EDP_MISC1_MISC0_MISC1__SHIFT) & EDP_MISC1_MISC0_MISC1__MASK;
-}
-#define EDP_MISC1_MISC0_INTERLACED_ODD 0x00000100
-#define EDP_MISC1_MISC0_STEREO__MASK 0x00000600
-#define EDP_MISC1_MISC0_STEREO__SHIFT 9
-static inline uint32_t EDP_MISC1_MISC0_STEREO(uint32_t val)
-{
- return ((val) << EDP_MISC1_MISC0_STEREO__SHIFT) & EDP_MISC1_MISC0_STEREO__MASK;
-}
-
-#define REG_EDP_PHY_CTRL 0x00000074
-#define EDP_PHY_CTRL_SW_RESET_PLL 0x00000001
-#define EDP_PHY_CTRL_SW_RESET 0x00000004
-
-#define REG_EDP_MAINLINK_READY 0x00000084
-#define EDP_MAINLINK_READY_TRAIN_PATTERN_1_READY 0x00000008
-#define EDP_MAINLINK_READY_TRAIN_PATTERN_2_READY 0x00000010
-#define EDP_MAINLINK_READY_TRAIN_PATTERN_3_READY 0x00000020
-
-#define REG_EDP_AUX_CTRL 0x00000300
-#define EDP_AUX_CTRL_ENABLE 0x00000001
-#define EDP_AUX_CTRL_RESET 0x00000002
-
-#define REG_EDP_INTERRUPT_REG_1 0x00000308
-#define EDP_INTERRUPT_REG_1_HPD 0x00000001
-#define EDP_INTERRUPT_REG_1_HPD_ACK 0x00000002
-#define EDP_INTERRUPT_REG_1_HPD_EN 0x00000004
-#define EDP_INTERRUPT_REG_1_AUX_I2C_DONE 0x00000008
-#define EDP_INTERRUPT_REG_1_AUX_I2C_DONE_ACK 0x00000010
-#define EDP_INTERRUPT_REG_1_AUX_I2C_DONE_EN 0x00000020
-#define EDP_INTERRUPT_REG_1_WRONG_ADDR 0x00000040
-#define EDP_INTERRUPT_REG_1_WRONG_ADDR_ACK 0x00000080
-#define EDP_INTERRUPT_REG_1_WRONG_ADDR_EN 0x00000100
-#define EDP_INTERRUPT_REG_1_TIMEOUT 0x00000200
-#define EDP_INTERRUPT_REG_1_TIMEOUT_ACK 0x00000400
-#define EDP_INTERRUPT_REG_1_TIMEOUT_EN 0x00000800
-#define EDP_INTERRUPT_REG_1_NACK_DEFER 0x00001000
-#define EDP_INTERRUPT_REG_1_NACK_DEFER_ACK 0x00002000
-#define EDP_INTERRUPT_REG_1_NACK_DEFER_EN 0x00004000
-#define EDP_INTERRUPT_REG_1_WRONG_DATA_CNT 0x00008000
-#define EDP_INTERRUPT_REG_1_WRONG_DATA_CNT_ACK 0x00010000
-#define EDP_INTERRUPT_REG_1_WRONG_DATA_CNT_EN 0x00020000
-#define EDP_INTERRUPT_REG_1_I2C_NACK 0x00040000
-#define EDP_INTERRUPT_REG_1_I2C_NACK_ACK 0x00080000
-#define EDP_INTERRUPT_REG_1_I2C_NACK_EN 0x00100000
-#define EDP_INTERRUPT_REG_1_I2C_DEFER 0x00200000
-#define EDP_INTERRUPT_REG_1_I2C_DEFER_ACK 0x00400000
-#define EDP_INTERRUPT_REG_1_I2C_DEFER_EN 0x00800000
-#define EDP_INTERRUPT_REG_1_PLL_UNLOCK 0x01000000
-#define EDP_INTERRUPT_REG_1_PLL_UNLOCK_ACK 0x02000000
-#define EDP_INTERRUPT_REG_1_PLL_UNLOCK_EN 0x04000000
-#define EDP_INTERRUPT_REG_1_AUX_ERROR 0x08000000
-#define EDP_INTERRUPT_REG_1_AUX_ERROR_ACK 0x10000000
-#define EDP_INTERRUPT_REG_1_AUX_ERROR_EN 0x20000000
-
-#define REG_EDP_INTERRUPT_REG_2 0x0000030c
-#define EDP_INTERRUPT_REG_2_READY_FOR_VIDEO 0x00000001
-#define EDP_INTERRUPT_REG_2_READY_FOR_VIDEO_ACK 0x00000002
-#define EDP_INTERRUPT_REG_2_READY_FOR_VIDEO_EN 0x00000004
-#define EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT 0x00000008
-#define EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT_ACK 0x00000010
-#define EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT_EN 0x00000020
-#define EDP_INTERRUPT_REG_2_FRAME_END 0x00000200
-#define EDP_INTERRUPT_REG_2_FRAME_END_ACK 0x00000080
-#define EDP_INTERRUPT_REG_2_FRAME_END_EN 0x00000100
-#define EDP_INTERRUPT_REG_2_CRC_UPDATED 0x00000200
-#define EDP_INTERRUPT_REG_2_CRC_UPDATED_ACK 0x00000400
-#define EDP_INTERRUPT_REG_2_CRC_UPDATED_EN 0x00000800
-
-#define REG_EDP_INTERRUPT_TRANS_NUM 0x00000310
-
-#define REG_EDP_AUX_DATA 0x00000314
-#define EDP_AUX_DATA_READ 0x00000001
-#define EDP_AUX_DATA_DATA__MASK 0x0000ff00
-#define EDP_AUX_DATA_DATA__SHIFT 8
-static inline uint32_t EDP_AUX_DATA_DATA(uint32_t val)
-{
- return ((val) << EDP_AUX_DATA_DATA__SHIFT) & EDP_AUX_DATA_DATA__MASK;
-}
-#define EDP_AUX_DATA_INDEX__MASK 0x00ff0000
-#define EDP_AUX_DATA_INDEX__SHIFT 16
-static inline uint32_t EDP_AUX_DATA_INDEX(uint32_t val)
-{
- return ((val) << EDP_AUX_DATA_INDEX__SHIFT) & EDP_AUX_DATA_INDEX__MASK;
-}
-#define EDP_AUX_DATA_INDEX_WRITE 0x80000000
-
-#define REG_EDP_AUX_TRANS_CTRL 0x00000318
-#define EDP_AUX_TRANS_CTRL_I2C 0x00000100
-#define EDP_AUX_TRANS_CTRL_GO 0x00000200
-
-#define REG_EDP_AUX_STATUS 0x00000324
-
-static inline uint32_t REG_EDP_PHY_LN(uint32_t i0) { return 0x00000400 + 0x40*i0; }
-
-static inline uint32_t REG_EDP_PHY_LN_PD_CTL(uint32_t i0) { return 0x00000404 + 0x40*i0; }
-
-#define REG_EDP_PHY_GLB_VM_CFG0 0x00000510
-
-#define REG_EDP_PHY_GLB_VM_CFG1 0x00000514
-
-#define REG_EDP_PHY_GLB_MISC9 0x00000518
-
-#define REG_EDP_PHY_GLB_CFG 0x00000528
-
-#define REG_EDP_PHY_GLB_PD_CTL 0x0000052c
-
-#define REG_EDP_PHY_GLB_PHY_STATUS 0x00000598
-
-#define REG_EDP_28nm_PHY_PLL_REFCLK_CFG 0x00000000
-
-#define REG_EDP_28nm_PHY_PLL_POSTDIV1_CFG 0x00000004
-
-#define REG_EDP_28nm_PHY_PLL_CHGPUMP_CFG 0x00000008
-
-#define REG_EDP_28nm_PHY_PLL_VCOLPF_CFG 0x0000000c
-
-#define REG_EDP_28nm_PHY_PLL_VREG_CFG 0x00000010
-
-#define REG_EDP_28nm_PHY_PLL_PWRGEN_CFG 0x00000014
-
-#define REG_EDP_28nm_PHY_PLL_DMUX_CFG 0x00000018
-
-#define REG_EDP_28nm_PHY_PLL_AMUX_CFG 0x0000001c
-
-#define REG_EDP_28nm_PHY_PLL_GLB_CFG 0x00000020
-#define EDP_28nm_PHY_PLL_GLB_CFG_PLL_PWRDN_B 0x00000001
-#define EDP_28nm_PHY_PLL_GLB_CFG_PLL_LDO_PWRDN_B 0x00000002
-#define EDP_28nm_PHY_PLL_GLB_CFG_PLL_PWRGEN_PWRDN_B 0x00000004
-#define EDP_28nm_PHY_PLL_GLB_CFG_PLL_ENABLE 0x00000008
-
-#define REG_EDP_28nm_PHY_PLL_POSTDIV2_CFG 0x00000024
-
-#define REG_EDP_28nm_PHY_PLL_POSTDIV3_CFG 0x00000028
-
-#define REG_EDP_28nm_PHY_PLL_LPFR_CFG 0x0000002c
-
-#define REG_EDP_28nm_PHY_PLL_LPFC1_CFG 0x00000030
-
-#define REG_EDP_28nm_PHY_PLL_LPFC2_CFG 0x00000034
-
-#define REG_EDP_28nm_PHY_PLL_SDM_CFG0 0x00000038
-
-#define REG_EDP_28nm_PHY_PLL_SDM_CFG1 0x0000003c
-
-#define REG_EDP_28nm_PHY_PLL_SDM_CFG2 0x00000040
-
-#define REG_EDP_28nm_PHY_PLL_SDM_CFG3 0x00000044
-
-#define REG_EDP_28nm_PHY_PLL_SDM_CFG4 0x00000048
-
-#define REG_EDP_28nm_PHY_PLL_SSC_CFG0 0x0000004c
-
-#define REG_EDP_28nm_PHY_PLL_SSC_CFG1 0x00000050
-
-#define REG_EDP_28nm_PHY_PLL_SSC_CFG2 0x00000054
-
-#define REG_EDP_28nm_PHY_PLL_SSC_CFG3 0x00000058
-
-#define REG_EDP_28nm_PHY_PLL_LKDET_CFG0 0x0000005c
-
-#define REG_EDP_28nm_PHY_PLL_LKDET_CFG1 0x00000060
-
-#define REG_EDP_28nm_PHY_PLL_LKDET_CFG2 0x00000064
-
-#define REG_EDP_28nm_PHY_PLL_TEST_CFG 0x00000068
-#define EDP_28nm_PHY_PLL_TEST_CFG_PLL_SW_RESET 0x00000001
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG0 0x0000006c
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG1 0x00000070
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG2 0x00000074
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG3 0x00000078
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG4 0x0000007c
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG5 0x00000080
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG6 0x00000084
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG7 0x00000088
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG8 0x0000008c
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG9 0x00000090
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG10 0x00000094
-
-#define REG_EDP_28nm_PHY_PLL_CAL_CFG11 0x00000098
-
-#define REG_EDP_28nm_PHY_PLL_EFUSE_CFG 0x0000009c
-
-#define REG_EDP_28nm_PHY_PLL_DEBUG_BUS_SEL 0x000000a0
-
-
-#endif /* EDP_XML */
diff --git a/drivers/gpu/drm/msm/edp/edp_aux.c b/drivers/gpu/drm/msm/edp/edp_aux.c
deleted file mode 100644
index e3d85c622cfb..000000000000
--- a/drivers/gpu/drm/msm/edp/edp_aux.c
+++ /dev/null
@@ -1,265 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
- */
-
-#include "edp.h"
-#include "edp.xml.h"
-
-#define AUX_CMD_FIFO_LEN 144
-#define AUX_CMD_NATIVE_MAX 16
-#define AUX_CMD_I2C_MAX 128
-
-#define EDP_INTR_AUX_I2C_ERR \
- (EDP_INTERRUPT_REG_1_WRONG_ADDR | EDP_INTERRUPT_REG_1_TIMEOUT | \
- EDP_INTERRUPT_REG_1_NACK_DEFER | EDP_INTERRUPT_REG_1_WRONG_DATA_CNT | \
- EDP_INTERRUPT_REG_1_I2C_NACK | EDP_INTERRUPT_REG_1_I2C_DEFER)
-#define EDP_INTR_TRANS_STATUS \
- (EDP_INTERRUPT_REG_1_AUX_I2C_DONE | EDP_INTR_AUX_I2C_ERR)
-
-struct edp_aux {
- void __iomem *base;
- bool msg_err;
-
- struct completion msg_comp;
-
- /* To prevent the message transaction routine from reentry. */
- struct mutex msg_mutex;
-
- struct drm_dp_aux drm_aux;
-};
-#define to_edp_aux(x) container_of(x, struct edp_aux, drm_aux)
-
-static int edp_msg_fifo_tx(struct edp_aux *aux, struct drm_dp_aux_msg *msg)
-{
- u32 data[4];
- u32 reg, len;
- bool native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ);
- bool read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
- u8 *msgdata = msg->buffer;
- int i;
-
- if (read)
- len = 4;
- else
- len = msg->size + 4;
-
- /*
- * cmd fifo only has depth of 144 bytes
- */
- if (len > AUX_CMD_FIFO_LEN)
- return -EINVAL;
-
- /* Pack cmd and write to HW */
- data[0] = (msg->address >> 16) & 0xf; /* addr[19:16] */
- if (read)
- data[0] |= BIT(4); /* R/W */
-
- data[1] = (msg->address >> 8) & 0xff; /* addr[15:8] */
- data[2] = msg->address & 0xff; /* addr[7:0] */
- data[3] = (msg->size - 1) & 0xff; /* len[7:0] */
-
- for (i = 0; i < len; i++) {
- reg = (i < 4) ? data[i] : msgdata[i - 4];
- reg = EDP_AUX_DATA_DATA(reg); /* index = 0, write */
- if (i == 0)
- reg |= EDP_AUX_DATA_INDEX_WRITE;
- edp_write(aux->base + REG_EDP_AUX_DATA, reg);
- }
-
- reg = 0; /* Transaction number is always 1 */
- if (!native) /* i2c */
- reg |= EDP_AUX_TRANS_CTRL_I2C;
-
- reg |= EDP_AUX_TRANS_CTRL_GO;
- edp_write(aux->base + REG_EDP_AUX_TRANS_CTRL, reg);
-
- return 0;
-}
-
-static int edp_msg_fifo_rx(struct edp_aux *aux, struct drm_dp_aux_msg *msg)
-{
- u32 data;
- u8 *dp;
- int i;
- u32 len = msg->size;
-
- edp_write(aux->base + REG_EDP_AUX_DATA,
- EDP_AUX_DATA_INDEX_WRITE | EDP_AUX_DATA_READ); /* index = 0 */
-
- dp = msg->buffer;
-
- /* discard first byte */
- data = edp_read(aux->base + REG_EDP_AUX_DATA);
- for (i = 0; i < len; i++) {
- data = edp_read(aux->base + REG_EDP_AUX_DATA);
- dp[i] = (u8)((data >> 8) & 0xff);
- }
-
- return 0;
-}
-
-/*
- * This function does the real job to process an AUX transaction.
- * It will call msm_edp_aux_ctrl() function to reset the AUX channel,
- * if the waiting is timeout.
- * The caller who triggers the transaction should avoid the
- * msm_edp_aux_ctrl() running concurrently in other threads, i.e.
- * start transaction only when AUX channel is fully enabled.
- */
-static ssize_t edp_aux_transfer(struct drm_dp_aux *drm_aux,
- struct drm_dp_aux_msg *msg)
-{
- struct edp_aux *aux = to_edp_aux(drm_aux);
- ssize_t ret;
- unsigned long time_left;
- bool native = msg->request & (DP_AUX_NATIVE_WRITE & DP_AUX_NATIVE_READ);
- bool read = msg->request & (DP_AUX_I2C_READ & DP_AUX_NATIVE_READ);
-
- /* Ignore address only message */
- if ((msg->size == 0) || (msg->buffer == NULL)) {
- msg->reply = native ?
- DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
- return msg->size;
- }
-
- /* msg sanity check */
- if ((native && (msg->size > AUX_CMD_NATIVE_MAX)) ||
- (msg->size > AUX_CMD_I2C_MAX)) {
- pr_err("%s: invalid msg: size(%zu), request(%x)\n",
- __func__, msg->size, msg->request);
- return -EINVAL;
- }
-
- mutex_lock(&aux->msg_mutex);
-
- aux->msg_err = false;
- reinit_completion(&aux->msg_comp);
-
- ret = edp_msg_fifo_tx(aux, msg);
- if (ret < 0)
- goto unlock_exit;
-
- DBG("wait_for_completion");
- time_left = wait_for_completion_timeout(&aux->msg_comp,
- msecs_to_jiffies(300));
- if (!time_left) {
- /*
- * Clear GO and reset AUX channel
- * to cancel the current transaction.
- */
- edp_write(aux->base + REG_EDP_AUX_TRANS_CTRL, 0);
- msm_edp_aux_ctrl(aux, 1);
- pr_err("%s: aux timeout,\n", __func__);
- ret = -ETIMEDOUT;
- goto unlock_exit;
- }
- DBG("completion");
-
- if (!aux->msg_err) {
- if (read) {
- ret = edp_msg_fifo_rx(aux, msg);
- if (ret < 0)
- goto unlock_exit;
- }
-
- msg->reply = native ?
- DP_AUX_NATIVE_REPLY_ACK : DP_AUX_I2C_REPLY_ACK;
- } else {
- /* Reply defer to retry */
- msg->reply = native ?
- DP_AUX_NATIVE_REPLY_DEFER : DP_AUX_I2C_REPLY_DEFER;
- /*
- * The sleep time in caller is not long enough to make sure
- * our H/W completes transactions. Add more defer time here.
- */
- msleep(100);
- }
-
- /* Return requested size for success or retry */
- ret = msg->size;
-
-unlock_exit:
- mutex_unlock(&aux->msg_mutex);
- return ret;
-}
-
-void *msm_edp_aux_init(struct msm_edp *edp, void __iomem *regbase, struct drm_dp_aux **drm_aux)
-{
- struct device *dev = &edp->pdev->dev;
- struct edp_aux *aux = NULL;
- int ret;
-
- DBG("");
- aux = devm_kzalloc(dev, sizeof(*aux), GFP_KERNEL);
- if (!aux)
- return NULL;
-
- aux->base = regbase;
- mutex_init(&aux->msg_mutex);
- init_completion(&aux->msg_comp);
-
- aux->drm_aux.name = "msm_edp_aux";
- aux->drm_aux.dev = dev;
- aux->drm_aux.drm_dev = edp->dev;
- aux->drm_aux.transfer = edp_aux_transfer;
- ret = drm_dp_aux_register(&aux->drm_aux);
- if (ret) {
- pr_err("%s: failed to register drm aux: %d\n", __func__, ret);
- mutex_destroy(&aux->msg_mutex);
- }
-
- if (drm_aux && aux)
- *drm_aux = &aux->drm_aux;
-
- return aux;
-}
-
-void msm_edp_aux_destroy(struct device *dev, struct edp_aux *aux)
-{
- if (aux) {
- drm_dp_aux_unregister(&aux->drm_aux);
- mutex_destroy(&aux->msg_mutex);
- }
-}
-
-irqreturn_t msm_edp_aux_irq(struct edp_aux *aux, u32 isr)
-{
- if (isr & EDP_INTR_TRANS_STATUS) {
- DBG("isr=%x", isr);
- edp_write(aux->base + REG_EDP_AUX_TRANS_CTRL, 0);
-
- if (isr & EDP_INTR_AUX_I2C_ERR)
- aux->msg_err = true;
- else
- aux->msg_err = false;
-
- complete(&aux->msg_comp);
- }
-
- return IRQ_HANDLED;
-}
-
-void msm_edp_aux_ctrl(struct edp_aux *aux, int enable)
-{
- u32 data;
-
- DBG("enable=%d", enable);
- data = edp_read(aux->base + REG_EDP_AUX_CTRL);
-
- if (enable) {
- data |= EDP_AUX_CTRL_RESET;
- edp_write(aux->base + REG_EDP_AUX_CTRL, data);
- /* Make sure full reset */
- wmb();
- usleep_range(500, 1000);
-
- data &= ~EDP_AUX_CTRL_RESET;
- data |= EDP_AUX_CTRL_ENABLE;
- edp_write(aux->base + REG_EDP_AUX_CTRL, data);
- } else {
- data &= ~EDP_AUX_CTRL_ENABLE;
- edp_write(aux->base + REG_EDP_AUX_CTRL, data);
- }
-}
-
diff --git a/drivers/gpu/drm/msm/edp/edp_bridge.c b/drivers/gpu/drm/msm/edp/edp_bridge.c
deleted file mode 100644
index c69a37e0c708..000000000000
--- a/drivers/gpu/drm/msm/edp/edp_bridge.c
+++ /dev/null
@@ -1,111 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
- */
-
-#include "edp.h"
-
-struct edp_bridge {
- struct drm_bridge base;
- struct msm_edp *edp;
-};
-#define to_edp_bridge(x) container_of(x, struct edp_bridge, base)
-
-void edp_bridge_destroy(struct drm_bridge *bridge)
-{
-}
-
-static void edp_bridge_pre_enable(struct drm_bridge *bridge)
-{
- struct edp_bridge *edp_bridge = to_edp_bridge(bridge);
- struct msm_edp *edp = edp_bridge->edp;
-
- DBG("");
- msm_edp_ctrl_power(edp->ctrl, true);
-}
-
-static void edp_bridge_enable(struct drm_bridge *bridge)
-{
- DBG("");
-}
-
-static void edp_bridge_disable(struct drm_bridge *bridge)
-{
- DBG("");
-}
-
-static void edp_bridge_post_disable(struct drm_bridge *bridge)
-{
- struct edp_bridge *edp_bridge = to_edp_bridge(bridge);
- struct msm_edp *edp = edp_bridge->edp;
-
- DBG("");
- msm_edp_ctrl_power(edp->ctrl, false);
-}
-
-static void edp_bridge_mode_set(struct drm_bridge *bridge,
- const struct drm_display_mode *mode,
- const struct drm_display_mode *adjusted_mode)
-{
- struct drm_device *dev = bridge->dev;
- struct drm_connector *connector;
- struct edp_bridge *edp_bridge = to_edp_bridge(bridge);
- struct msm_edp *edp = edp_bridge->edp;
-
- DBG("set mode: " DRM_MODE_FMT, DRM_MODE_ARG(mode));
-
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- struct drm_encoder *encoder = connector->encoder;
- struct drm_bridge *first_bridge;
-
- if (!connector->encoder)
- continue;
-
- first_bridge = drm_bridge_chain_get_first_bridge(encoder);
- if (bridge == first_bridge) {
- msm_edp_ctrl_timing_cfg(edp->ctrl,
- adjusted_mode, &connector->display_info);
- break;
- }
- }
-}
-
-static const struct drm_bridge_funcs edp_bridge_funcs = {
- .pre_enable = edp_bridge_pre_enable,
- .enable = edp_bridge_enable,
- .disable = edp_bridge_disable,
- .post_disable = edp_bridge_post_disable,
- .mode_set = edp_bridge_mode_set,
-};
-
-/* initialize bridge */
-struct drm_bridge *msm_edp_bridge_init(struct msm_edp *edp)
-{
- struct drm_bridge *bridge = NULL;
- struct edp_bridge *edp_bridge;
- int ret;
-
- edp_bridge = devm_kzalloc(edp->dev->dev,
- sizeof(*edp_bridge), GFP_KERNEL);
- if (!edp_bridge) {
- ret = -ENOMEM;
- goto fail;
- }
-
- edp_bridge->edp = edp;
-
- bridge = &edp_bridge->base;
- bridge->funcs = &edp_bridge_funcs;
-
- ret = drm_bridge_attach(edp->encoder, bridge, NULL, 0);
- if (ret)
- goto fail;
-
- return bridge;
-
-fail:
- if (bridge)
- edp_bridge_destroy(bridge);
-
- return ERR_PTR(ret);
-}
diff --git a/drivers/gpu/drm/msm/edp/edp_connector.c b/drivers/gpu/drm/msm/edp/edp_connector.c
deleted file mode 100644
index 73cb5fd97a5a..000000000000
--- a/drivers/gpu/drm/msm/edp/edp_connector.c
+++ /dev/null
@@ -1,132 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
- */
-
-#include "drm/drm_edid.h"
-#include "msm_kms.h"
-#include "edp.h"
-
-struct edp_connector {
- struct drm_connector base;
- struct msm_edp *edp;
-};
-#define to_edp_connector(x) container_of(x, struct edp_connector, base)
-
-static enum drm_connector_status edp_connector_detect(
- struct drm_connector *connector, bool force)
-{
- struct edp_connector *edp_connector = to_edp_connector(connector);
- struct msm_edp *edp = edp_connector->edp;
-
- DBG("");
- return msm_edp_ctrl_panel_connected(edp->ctrl) ?
- connector_status_connected : connector_status_disconnected;
-}
-
-static void edp_connector_destroy(struct drm_connector *connector)
-{
- struct edp_connector *edp_connector = to_edp_connector(connector);
-
- DBG("");
-
- drm_connector_cleanup(connector);
-
- kfree(edp_connector);
-}
-
-static int edp_connector_get_modes(struct drm_connector *connector)
-{
- struct edp_connector *edp_connector = to_edp_connector(connector);
- struct msm_edp *edp = edp_connector->edp;
-
- struct edid *drm_edid = NULL;
- int ret = 0;
-
- DBG("");
- ret = msm_edp_ctrl_get_panel_info(edp->ctrl, connector, &drm_edid);
- if (ret)
- return ret;
-
- drm_connector_update_edid_property(connector, drm_edid);
- if (drm_edid)
- ret = drm_add_edid_modes(connector, drm_edid);
-
- return ret;
-}
-
-static int edp_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- struct edp_connector *edp_connector = to_edp_connector(connector);
- struct msm_edp *edp = edp_connector->edp;
- struct msm_drm_private *priv = connector->dev->dev_private;
- struct msm_kms *kms = priv->kms;
- long actual, requested;
-
- requested = 1000 * mode->clock;
- actual = kms->funcs->round_pixclk(kms,
- requested, edp_connector->edp->encoder);
-
- DBG("requested=%ld, actual=%ld", requested, actual);
- if (actual != requested)
- return MODE_CLOCK_RANGE;
-
- if (!msm_edp_ctrl_pixel_clock_valid(
- edp->ctrl, mode->clock, NULL, NULL))
- return MODE_CLOCK_RANGE;
-
- /* Invalidate all modes if color format is not supported */
- if (connector->display_info.bpc > 8)
- return MODE_BAD;
-
- return MODE_OK;
-}
-
-static const struct drm_connector_funcs edp_connector_funcs = {
- .detect = edp_connector_detect,
- .fill_modes = drm_helper_probe_single_connector_modes,
- .destroy = edp_connector_destroy,
- .reset = drm_atomic_helper_connector_reset,
- .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
-};
-
-static const struct drm_connector_helper_funcs edp_connector_helper_funcs = {
- .get_modes = edp_connector_get_modes,
- .mode_valid = edp_connector_mode_valid,
-};
-
-/* initialize connector */
-struct drm_connector *msm_edp_connector_init(struct msm_edp *edp)
-{
- struct drm_connector *connector = NULL;
- struct edp_connector *edp_connector;
- int ret;
-
- edp_connector = kzalloc(sizeof(*edp_connector), GFP_KERNEL);
- if (!edp_connector)
- return ERR_PTR(-ENOMEM);
-
- edp_connector->edp = edp;
-
- connector = &edp_connector->base;
-
- ret = drm_connector_init(edp->dev, connector, &edp_connector_funcs,
- DRM_MODE_CONNECTOR_eDP);
- if (ret)
- return ERR_PTR(ret);
-
- drm_connector_helper_add(connector, &edp_connector_helper_funcs);
-
- /* We don't support HPD, so only poll status until connected. */
- connector->polled = DRM_CONNECTOR_POLL_CONNECT;
-
- /* Display driver doesn't support interlace now. */
- connector->interlace_allowed = false;
- connector->doublescan_allowed = false;
-
- drm_connector_attach_encoder(connector, edp->encoder);
-
- return connector;
-}
diff --git a/drivers/gpu/drm/msm/edp/edp_ctrl.c b/drivers/gpu/drm/msm/edp/edp_ctrl.c
deleted file mode 100644
index a68a4a1867c1..000000000000
--- a/drivers/gpu/drm/msm/edp/edp_ctrl.c
+++ /dev/null
@@ -1,1373 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
- */
-
-#include <linux/clk.h>
-#include <linux/gpio/consumer.h>
-#include <linux/regulator/consumer.h>
-#include <drm/drm_crtc.h>
-#include <drm/drm_dp_helper.h>
-#include <drm/drm_edid.h>
-
-#include "edp.h"
-#include "edp.xml.h"
-
-#define VDDA_UA_ON_LOAD 100000 /* uA units */
-#define VDDA_UA_OFF_LOAD 100 /* uA units */
-
-#define DPCD_LINK_VOLTAGE_MAX 4
-#define DPCD_LINK_PRE_EMPHASIS_MAX 4
-
-#define EDP_LINK_BW_MAX DP_LINK_BW_2_7
-
-/* Link training return value */
-#define EDP_TRAIN_FAIL -1
-#define EDP_TRAIN_SUCCESS 0
-#define EDP_TRAIN_RECONFIG 1
-
-#define EDP_CLK_MASK_AHB BIT(0)
-#define EDP_CLK_MASK_AUX BIT(1)
-#define EDP_CLK_MASK_LINK BIT(2)
-#define EDP_CLK_MASK_PIXEL BIT(3)
-#define EDP_CLK_MASK_MDP_CORE BIT(4)
-#define EDP_CLK_MASK_LINK_CHAN (EDP_CLK_MASK_LINK | EDP_CLK_MASK_PIXEL)
-#define EDP_CLK_MASK_AUX_CHAN \
- (EDP_CLK_MASK_AHB | EDP_CLK_MASK_AUX | EDP_CLK_MASK_MDP_CORE)
-#define EDP_CLK_MASK_ALL (EDP_CLK_MASK_AUX_CHAN | EDP_CLK_MASK_LINK_CHAN)
-
-#define EDP_BACKLIGHT_MAX 255
-
-#define EDP_INTR_STATUS1 \
- (EDP_INTERRUPT_REG_1_HPD | EDP_INTERRUPT_REG_1_AUX_I2C_DONE | \
- EDP_INTERRUPT_REG_1_WRONG_ADDR | EDP_INTERRUPT_REG_1_TIMEOUT | \
- EDP_INTERRUPT_REG_1_NACK_DEFER | EDP_INTERRUPT_REG_1_WRONG_DATA_CNT | \
- EDP_INTERRUPT_REG_1_I2C_NACK | EDP_INTERRUPT_REG_1_I2C_DEFER | \
- EDP_INTERRUPT_REG_1_PLL_UNLOCK | EDP_INTERRUPT_REG_1_AUX_ERROR)
-#define EDP_INTR_MASK1 (EDP_INTR_STATUS1 << 2)
-#define EDP_INTR_STATUS2 \
- (EDP_INTERRUPT_REG_2_READY_FOR_VIDEO | \
- EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT | \
- EDP_INTERRUPT_REG_2_FRAME_END | EDP_INTERRUPT_REG_2_CRC_UPDATED)
-#define EDP_INTR_MASK2 (EDP_INTR_STATUS2 << 2)
-
-struct edp_ctrl {
- struct platform_device *pdev;
-
- void __iomem *base;
-
- /* regulators */
- struct regulator *vdda_vreg; /* 1.8 V */
- struct regulator *lvl_vreg;
-
- /* clocks */
- struct clk *aux_clk;
- struct clk *pixel_clk;
- struct clk *ahb_clk;
- struct clk *link_clk;
- struct clk *mdp_core_clk;
-
- /* gpios */
- struct gpio_desc *panel_en_gpio;
- struct gpio_desc *panel_hpd_gpio;
-
- /* completion and mutex */
- struct completion idle_comp;
- struct mutex dev_mutex; /* To protect device power status */
-
- /* work queue */
- struct work_struct on_work;
- struct work_struct off_work;
- struct workqueue_struct *workqueue;
-
- /* Interrupt register lock */
- spinlock_t irq_lock;
-
- bool edp_connected;
- bool power_on;
-
- /* edid raw data */
- struct edid *edid;
-
- struct drm_dp_aux *drm_aux;
-
- /* dpcd raw data */
- u8 dpcd[DP_RECEIVER_CAP_SIZE];
-
- /* Link status */
- u8 link_rate;
- u8 lane_cnt;
- u8 v_level;
- u8 p_level;
-
- /* Timing status */
- u8 interlaced;
- u32 pixel_rate; /* in kHz */
- u32 color_depth;
-
- struct edp_aux *aux;
- struct edp_phy *phy;
-};
-
-struct edp_pixel_clk_div {
- u32 rate; /* in kHz */
- u32 m;
- u32 n;
-};
-
-#define EDP_PIXEL_CLK_NUM 8
-static const struct edp_pixel_clk_div clk_divs[2][EDP_PIXEL_CLK_NUM] = {
- { /* Link clock = 162MHz, source clock = 810MHz */
- {119000, 31, 211}, /* WSXGA+ 1680x1050@60Hz CVT */
- {130250, 32, 199}, /* UXGA 1600x1200@60Hz CVT */
- {148500, 11, 60}, /* FHD 1920x1080@60Hz */
- {154000, 50, 263}, /* WUXGA 1920x1200@60Hz CVT */
- {209250, 31, 120}, /* QXGA 2048x1536@60Hz CVT */
- {268500, 119, 359}, /* WQXGA 2560x1600@60Hz CVT */
- {138530, 33, 193}, /* AUO B116HAN03.0 Panel */
- {141400, 48, 275}, /* AUO B133HTN01.2 Panel */
- },
- { /* Link clock = 270MHz, source clock = 675MHz */
- {119000, 52, 295}, /* WSXGA+ 1680x1050@60Hz CVT */
- {130250, 11, 57}, /* UXGA 1600x1200@60Hz CVT */
- {148500, 11, 50}, /* FHD 1920x1080@60Hz */
- {154000, 47, 206}, /* WUXGA 1920x1200@60Hz CVT */
- {209250, 31, 100}, /* QXGA 2048x1536@60Hz CVT */
- {268500, 107, 269}, /* WQXGA 2560x1600@60Hz CVT */
- {138530, 63, 307}, /* AUO B116HAN03.0 Panel */
- {141400, 53, 253}, /* AUO B133HTN01.2 Panel */
- },
-};
-
-static int edp_clk_init(struct edp_ctrl *ctrl)
-{
- struct platform_device *pdev = ctrl->pdev;
- int ret;
-
- ctrl->aux_clk = msm_clk_get(pdev, "core");
- if (IS_ERR(ctrl->aux_clk)) {
- ret = PTR_ERR(ctrl->aux_clk);
- pr_err("%s: Can't find core clock, %d\n", __func__, ret);
- ctrl->aux_clk = NULL;
- return ret;
- }
-
- ctrl->pixel_clk = msm_clk_get(pdev, "pixel");
- if (IS_ERR(ctrl->pixel_clk)) {
- ret = PTR_ERR(ctrl->pixel_clk);
- pr_err("%s: Can't find pixel clock, %d\n", __func__, ret);
- ctrl->pixel_clk = NULL;
- return ret;
- }
-
- ctrl->ahb_clk = msm_clk_get(pdev, "iface");
- if (IS_ERR(ctrl->ahb_clk)) {
- ret = PTR_ERR(ctrl->ahb_clk);
- pr_err("%s: Can't find iface clock, %d\n", __func__, ret);
- ctrl->ahb_clk = NULL;
- return ret;
- }
-
- ctrl->link_clk = msm_clk_get(pdev, "link");
- if (IS_ERR(ctrl->link_clk)) {
- ret = PTR_ERR(ctrl->link_clk);
- pr_err("%s: Can't find link clock, %d\n", __func__, ret);
- ctrl->link_clk = NULL;
- return ret;
- }
-
- /* need mdp core clock to receive irq */
- ctrl->mdp_core_clk = msm_clk_get(pdev, "mdp_core");
- if (IS_ERR(ctrl->mdp_core_clk)) {
- ret = PTR_ERR(ctrl->mdp_core_clk);
- pr_err("%s: Can't find mdp_core clock, %d\n", __func__, ret);
- ctrl->mdp_core_clk = NULL;
- return ret;
- }
-
- return 0;
-}
-
-static int edp_clk_enable(struct edp_ctrl *ctrl, u32 clk_mask)
-{
- int ret;
-
- DBG("mask=%x", clk_mask);
- /* ahb_clk should be enabled first */
- if (clk_mask & EDP_CLK_MASK_AHB) {
- ret = clk_prepare_enable(ctrl->ahb_clk);
- if (ret) {
- pr_err("%s: Failed to enable ahb clk\n", __func__);
- goto f0;
- }
- }
- if (clk_mask & EDP_CLK_MASK_AUX) {
- ret = clk_set_rate(ctrl->aux_clk, 19200000);
- if (ret) {
- pr_err("%s: Failed to set rate aux clk\n", __func__);
- goto f1;
- }
- ret = clk_prepare_enable(ctrl->aux_clk);
- if (ret) {
- pr_err("%s: Failed to enable aux clk\n", __func__);
- goto f1;
- }
- }
- /* Need to set rate and enable link_clk prior to pixel_clk */
- if (clk_mask & EDP_CLK_MASK_LINK) {
- DBG("edp->link_clk, set_rate %ld",
- (unsigned long)ctrl->link_rate * 27000000);
- ret = clk_set_rate(ctrl->link_clk,
- (unsigned long)ctrl->link_rate * 27000000);
- if (ret) {
- pr_err("%s: Failed to set rate to link clk\n",
- __func__);
- goto f2;
- }
-
- ret = clk_prepare_enable(ctrl->link_clk);
- if (ret) {
- pr_err("%s: Failed to enable link clk\n", __func__);
- goto f2;
- }
- }
- if (clk_mask & EDP_CLK_MASK_PIXEL) {
- DBG("edp->pixel_clk, set_rate %ld",
- (unsigned long)ctrl->pixel_rate * 1000);
- ret = clk_set_rate(ctrl->pixel_clk,
- (unsigned long)ctrl->pixel_rate * 1000);
- if (ret) {
- pr_err("%s: Failed to set rate to pixel clk\n",
- __func__);
- goto f3;
- }
-
- ret = clk_prepare_enable(ctrl->pixel_clk);
- if (ret) {
- pr_err("%s: Failed to enable pixel clk\n", __func__);
- goto f3;
- }
- }
- if (clk_mask & EDP_CLK_MASK_MDP_CORE) {
- ret = clk_prepare_enable(ctrl->mdp_core_clk);
- if (ret) {
- pr_err("%s: Failed to enable mdp core clk\n", __func__);
- goto f4;
- }
- }
-
- return 0;
-
-f4:
- if (clk_mask & EDP_CLK_MASK_PIXEL)
- clk_disable_unprepare(ctrl->pixel_clk);
-f3:
- if (clk_mask & EDP_CLK_MASK_LINK)
- clk_disable_unprepare(ctrl->link_clk);
-f2:
- if (clk_mask & EDP_CLK_MASK_AUX)
- clk_disable_unprepare(ctrl->aux_clk);
-f1:
- if (clk_mask & EDP_CLK_MASK_AHB)
- clk_disable_unprepare(ctrl->ahb_clk);
-f0:
- return ret;
-}
-
-static void edp_clk_disable(struct edp_ctrl *ctrl, u32 clk_mask)
-{
- if (clk_mask & EDP_CLK_MASK_MDP_CORE)
- clk_disable_unprepare(ctrl->mdp_core_clk);
- if (clk_mask & EDP_CLK_MASK_PIXEL)
- clk_disable_unprepare(ctrl->pixel_clk);
- if (clk_mask & EDP_CLK_MASK_LINK)
- clk_disable_unprepare(ctrl->link_clk);
- if (clk_mask & EDP_CLK_MASK_AUX)
- clk_disable_unprepare(ctrl->aux_clk);
- if (clk_mask & EDP_CLK_MASK_AHB)
- clk_disable_unprepare(ctrl->ahb_clk);
-}
-
-static int edp_regulator_init(struct edp_ctrl *ctrl)
-{
- struct device *dev = &ctrl->pdev->dev;
- int ret;
-
- DBG("");
- ctrl->vdda_vreg = devm_regulator_get(dev, "vdda");
- ret = PTR_ERR_OR_ZERO(ctrl->vdda_vreg);
- if (ret) {
- pr_err("%s: Could not get vdda reg, ret = %d\n", __func__,
- ret);
- ctrl->vdda_vreg = NULL;
- return ret;
- }
- ctrl->lvl_vreg = devm_regulator_get(dev, "lvl-vdd");
- ret = PTR_ERR_OR_ZERO(ctrl->lvl_vreg);
- if (ret) {
- pr_err("%s: Could not get lvl-vdd reg, ret = %d\n", __func__,
- ret);
- ctrl->lvl_vreg = NULL;
- return ret;
- }
-
- return 0;
-}
-
-static int edp_regulator_enable(struct edp_ctrl *ctrl)
-{
- int ret;
-
- ret = regulator_set_load(ctrl->vdda_vreg, VDDA_UA_ON_LOAD);
- if (ret < 0) {
- pr_err("%s: vdda_vreg set regulator mode failed.\n", __func__);
- goto vdda_set_fail;
- }
-
- ret = regulator_enable(ctrl->vdda_vreg);
- if (ret) {
- pr_err("%s: Failed to enable vdda_vreg regulator.\n", __func__);
- goto vdda_enable_fail;
- }
-
- ret = regulator_enable(ctrl->lvl_vreg);
- if (ret) {
- pr_err("Failed to enable lvl-vdd reg regulator, %d", ret);
- goto lvl_enable_fail;
- }
-
- DBG("exit");
- return 0;
-
-lvl_enable_fail:
- regulator_disable(ctrl->vdda_vreg);
-vdda_enable_fail:
- regulator_set_load(ctrl->vdda_vreg, VDDA_UA_OFF_LOAD);
-vdda_set_fail:
- return ret;
-}
-
-static void edp_regulator_disable(struct edp_ctrl *ctrl)
-{
- regulator_disable(ctrl->lvl_vreg);
- regulator_disable(ctrl->vdda_vreg);
- regulator_set_load(ctrl->vdda_vreg, VDDA_UA_OFF_LOAD);
-}
-
-static int edp_gpio_config(struct edp_ctrl *ctrl)
-{
- struct device *dev = &ctrl->pdev->dev;
- int ret;
-
- ctrl->panel_hpd_gpio = devm_gpiod_get(dev, "panel-hpd", GPIOD_IN);
- if (IS_ERR(ctrl->panel_hpd_gpio)) {
- ret = PTR_ERR(ctrl->panel_hpd_gpio);
- ctrl->panel_hpd_gpio = NULL;
- pr_err("%s: cannot get panel-hpd-gpios, %d\n", __func__, ret);
- return ret;
- }
-
- ctrl->panel_en_gpio = devm_gpiod_get(dev, "panel-en", GPIOD_OUT_LOW);
- if (IS_ERR(ctrl->panel_en_gpio)) {
- ret = PTR_ERR(ctrl->panel_en_gpio);
- ctrl->panel_en_gpio = NULL;
- pr_err("%s: cannot get panel-en-gpios, %d\n", __func__, ret);
- return ret;
- }
-
- DBG("gpio on");
-
- return 0;
-}
-
-static void edp_ctrl_irq_enable(struct edp_ctrl *ctrl, int enable)
-{
- unsigned long flags;
-
- DBG("%d", enable);
- spin_lock_irqsave(&ctrl->irq_lock, flags);
- if (enable) {
- edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_1, EDP_INTR_MASK1);
- edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_2, EDP_INTR_MASK2);
- } else {
- edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_1, 0x0);
- edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_2, 0x0);
- }
- spin_unlock_irqrestore(&ctrl->irq_lock, flags);
- DBG("exit");
-}
-
-static void edp_fill_link_cfg(struct edp_ctrl *ctrl)
-{
- u32 prate;
- u32 lrate;
- u32 bpp;
- u8 max_lane = drm_dp_max_lane_count(ctrl->dpcd);
- u8 lane;
-
- prate = ctrl->pixel_rate;
- bpp = ctrl->color_depth * 3;
-
- /*
- * By default, use the maximum link rate and minimum lane count,
- * so that we can do rate down shift during link training.
- */
- ctrl->link_rate = ctrl->dpcd[DP_MAX_LINK_RATE];
-
- prate *= bpp;
- prate /= 8; /* in kByte */
-
- lrate = 270000; /* in kHz */
- lrate *= ctrl->link_rate;
- lrate /= 10; /* in kByte, 10 bits --> 8 bits */
-
- for (lane = 1; lane <= max_lane; lane <<= 1) {
- if (lrate >= prate)
- break;
- lrate <<= 1;
- }
-
- ctrl->lane_cnt = lane;
- DBG("rate=%d lane=%d", ctrl->link_rate, ctrl->lane_cnt);
-}
-
-static void edp_config_ctrl(struct edp_ctrl *ctrl)
-{
- u32 data;
- enum edp_color_depth depth;
-
- data = EDP_CONFIGURATION_CTRL_LANES(ctrl->lane_cnt - 1);
-
- if (drm_dp_enhanced_frame_cap(ctrl->dpcd))
- data |= EDP_CONFIGURATION_CTRL_ENHANCED_FRAMING;
-
- depth = EDP_6BIT;
- if (ctrl->color_depth == 8)
- depth = EDP_8BIT;
-
- data |= EDP_CONFIGURATION_CTRL_COLOR(depth);
-
- if (!ctrl->interlaced) /* progressive */
- data |= EDP_CONFIGURATION_CTRL_PROGRESSIVE;
-
- data |= (EDP_CONFIGURATION_CTRL_SYNC_CLK |
- EDP_CONFIGURATION_CTRL_STATIC_MVID);
-
- edp_write(ctrl->base + REG_EDP_CONFIGURATION_CTRL, data);
-}
-
-static void edp_state_ctrl(struct edp_ctrl *ctrl, u32 state)
-{
- edp_write(ctrl->base + REG_EDP_STATE_CTRL, state);
- /* Make sure H/W status is set */
- wmb();
-}
-
-static int edp_lane_set_write(struct edp_ctrl *ctrl,
- u8 voltage_level, u8 pre_emphasis_level)
-{
- int i;
- u8 buf[4];
-
- if (voltage_level >= DPCD_LINK_VOLTAGE_MAX)
- voltage_level |= 0x04;
-
- if (pre_emphasis_level >= DPCD_LINK_PRE_EMPHASIS_MAX)
- pre_emphasis_level |= 0x04;
-
- pre_emphasis_level <<= 3;
-
- for (i = 0; i < 4; i++)
- buf[i] = voltage_level | pre_emphasis_level;
-
- DBG("%s: p|v=0x%x", __func__, voltage_level | pre_emphasis_level);
- if (drm_dp_dpcd_write(ctrl->drm_aux, 0x103, buf, 4) < 4) {
- pr_err("%s: Set sw/pe to panel failed\n", __func__);
- return -ENOLINK;
- }
-
- return 0;
-}
-
-static int edp_train_pattern_set_write(struct edp_ctrl *ctrl, u8 pattern)
-{
- u8 p = pattern;
-
- DBG("pattern=%x", p);
- if (drm_dp_dpcd_write(ctrl->drm_aux,
- DP_TRAINING_PATTERN_SET, &p, 1) < 1) {
- pr_err("%s: Set training pattern to panel failed\n", __func__);
- return -ENOLINK;
- }
-
- return 0;
-}
-
-static void edp_sink_train_set_adjust(struct edp_ctrl *ctrl,
- const u8 *link_status)
-{
- int i;
- u8 max = 0;
- u8 data;
-
- /* use the max level across lanes */
- for (i = 0; i < ctrl->lane_cnt; i++) {
- data = drm_dp_get_adjust_request_voltage(link_status, i);
- DBG("lane=%d req_voltage_swing=0x%x", i, data);
- if (max < data)
- max = data;
- }
-
- ctrl->v_level = max >> DP_TRAIN_VOLTAGE_SWING_SHIFT;
-
- /* use the max level across lanes */
- max = 0;
- for (i = 0; i < ctrl->lane_cnt; i++) {
- data = drm_dp_get_adjust_request_pre_emphasis(link_status, i);
- DBG("lane=%d req_pre_emphasis=0x%x", i, data);
- if (max < data)
- max = data;
- }
-
- ctrl->p_level = max >> DP_TRAIN_PRE_EMPHASIS_SHIFT;
- DBG("v_level=%d, p_level=%d", ctrl->v_level, ctrl->p_level);
-}
-
-static void edp_host_train_set(struct edp_ctrl *ctrl, u32 train)
-{
- int cnt = 10;
- u32 data;
- u32 shift = train - 1;
-
- DBG("train=%d", train);
-
- edp_state_ctrl(ctrl, EDP_STATE_CTRL_TRAIN_PATTERN_1 << shift);
- while (--cnt) {
- data = edp_read(ctrl->base + REG_EDP_MAINLINK_READY);
- if (data & (EDP_MAINLINK_READY_TRAIN_PATTERN_1_READY << shift))
- break;
- }
-
- if (cnt == 0)
- pr_err("%s: set link_train=%d failed\n", __func__, train);
-}
-
-static const u8 vm_pre_emphasis[4][4] = {
- {0x03, 0x06, 0x09, 0x0C}, /* pe0, 0 db */
- {0x03, 0x06, 0x09, 0xFF}, /* pe1, 3.5 db */
- {0x03, 0x06, 0xFF, 0xFF}, /* pe2, 6.0 db */
- {0x03, 0xFF, 0xFF, 0xFF} /* pe3, 9.5 db */
-};
-
-/* voltage swing, 0.2v and 1.0v are not support */
-static const u8 vm_voltage_swing[4][4] = {
- {0x14, 0x18, 0x1A, 0x1E}, /* sw0, 0.4v */
- {0x18, 0x1A, 0x1E, 0xFF}, /* sw1, 0.6 v */
- {0x1A, 0x1E, 0xFF, 0xFF}, /* sw1, 0.8 v */
- {0x1E, 0xFF, 0xFF, 0xFF} /* sw1, 1.2 v, optional */
-};
-
-static int edp_voltage_pre_emphasise_set(struct edp_ctrl *ctrl)
-{
- u32 value0;
- u32 value1;
-
- DBG("v=%d p=%d", ctrl->v_level, ctrl->p_level);
-
- value0 = vm_pre_emphasis[(int)(ctrl->v_level)][(int)(ctrl->p_level)];
- value1 = vm_voltage_swing[(int)(ctrl->v_level)][(int)(ctrl->p_level)];
-
- /* Configure host and panel only if both values are allowed */
- if (value0 != 0xFF && value1 != 0xFF) {
- msm_edp_phy_vm_pe_cfg(ctrl->phy, value0, value1);
- return edp_lane_set_write(ctrl, ctrl->v_level, ctrl->p_level);
- }
-
- return -EINVAL;
-}
-
-static int edp_start_link_train_1(struct edp_ctrl *ctrl)
-{
- u8 link_status[DP_LINK_STATUS_SIZE];
- u8 old_v_level;
- int tries;
- int ret;
- int rlen;
-
- DBG("");
-
- edp_host_train_set(ctrl, DP_TRAINING_PATTERN_1);
- ret = edp_voltage_pre_emphasise_set(ctrl);
- if (ret)
- return ret;
- ret = edp_train_pattern_set_write(ctrl,
- DP_TRAINING_PATTERN_1 | DP_RECOVERED_CLOCK_OUT_EN);
- if (ret)
- return ret;
-
- tries = 0;
- old_v_level = ctrl->v_level;
- while (1) {
- drm_dp_link_train_clock_recovery_delay(ctrl->drm_aux, ctrl->dpcd);
-
- rlen = drm_dp_dpcd_read_link_status(ctrl->drm_aux, link_status);
- if (rlen < DP_LINK_STATUS_SIZE) {
- pr_err("%s: read link status failed\n", __func__);
- return -ENOLINK;
- }
- if (drm_dp_clock_recovery_ok(link_status, ctrl->lane_cnt)) {
- ret = 0;
- break;
- }
-
- if (ctrl->v_level == DPCD_LINK_VOLTAGE_MAX) {
- ret = -1;
- break;
- }
-
- if (old_v_level == ctrl->v_level) {
- tries++;
- if (tries >= 5) {
- ret = -1;
- break;
- }
- } else {
- tries = 0;
- old_v_level = ctrl->v_level;
- }
-
- edp_sink_train_set_adjust(ctrl, link_status);
- ret = edp_voltage_pre_emphasise_set(ctrl);
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
-static int edp_start_link_train_2(struct edp_ctrl *ctrl)
-{
- u8 link_status[DP_LINK_STATUS_SIZE];
- int tries = 0;
- int ret;
- int rlen;
-
- DBG("");
-
- edp_host_train_set(ctrl, DP_TRAINING_PATTERN_2);
- ret = edp_voltage_pre_emphasise_set(ctrl);
- if (ret)
- return ret;
-
- ret = edp_train_pattern_set_write(ctrl,
- DP_TRAINING_PATTERN_2 | DP_RECOVERED_CLOCK_OUT_EN);
- if (ret)
- return ret;
-
- while (1) {
- drm_dp_link_train_channel_eq_delay(ctrl->drm_aux, ctrl->dpcd);
-
- rlen = drm_dp_dpcd_read_link_status(ctrl->drm_aux, link_status);
- if (rlen < DP_LINK_STATUS_SIZE) {
- pr_err("%s: read link status failed\n", __func__);
- return -ENOLINK;
- }
- if (drm_dp_channel_eq_ok(link_status, ctrl->lane_cnt)) {
- ret = 0;
- break;
- }
-
- tries++;
- if (tries > 10) {
- ret = -1;
- break;
- }
-
- edp_sink_train_set_adjust(ctrl, link_status);
- ret = edp_voltage_pre_emphasise_set(ctrl);
- if (ret)
- return ret;
- }
-
- return ret;
-}
-
-static int edp_link_rate_down_shift(struct edp_ctrl *ctrl)
-{
- u32 prate, lrate, bpp;
- u8 rate, lane, max_lane;
- int changed = 0;
-
- rate = ctrl->link_rate;
- lane = ctrl->lane_cnt;
- max_lane = drm_dp_max_lane_count(ctrl->dpcd);
-
- bpp = ctrl->color_depth * 3;
- prate = ctrl->pixel_rate;
- prate *= bpp;
- prate /= 8; /* in kByte */
-
- if (rate > DP_LINK_BW_1_62 && rate <= EDP_LINK_BW_MAX) {
- rate -= 4; /* reduce rate */
- changed++;
- }
-
- if (changed) {
- if (lane >= 1 && lane < max_lane)
- lane <<= 1; /* increase lane */
-
- lrate = 270000; /* in kHz */
- lrate *= rate;
- lrate /= 10; /* kByte, 10 bits --> 8 bits */
- lrate *= lane;
-
- DBG("new lrate=%u prate=%u(kHz) rate=%d lane=%d p=%u b=%d",
- lrate, prate, rate, lane,
- ctrl->pixel_rate,
- bpp);
-
- if (lrate > prate) {
- ctrl->link_rate = rate;
- ctrl->lane_cnt = lane;
- DBG("new rate=%d %d", rate, lane);
- return 0;
- }
- }
-
- return -EINVAL;
-}
-
-static int edp_clear_training_pattern(struct edp_ctrl *ctrl)
-{
- int ret;
-
- ret = edp_train_pattern_set_write(ctrl, 0);
-
- drm_dp_link_train_channel_eq_delay(ctrl->drm_aux, ctrl->dpcd);
-
- return ret;
-}
-
-static int edp_do_link_train(struct edp_ctrl *ctrl)
-{
- u8 values[2];
- int ret;
-
- DBG("");
- /*
- * Set the current link rate and lane cnt to panel. They may have been
- * adjusted and the values are different from them in DPCD CAP
- */
- values[0] = ctrl->lane_cnt;
- values[1] = ctrl->link_rate;
-
- if (drm_dp_enhanced_frame_cap(ctrl->dpcd))
- values[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN;
-
- if (drm_dp_dpcd_write(ctrl->drm_aux, DP_LINK_BW_SET, values,
- sizeof(values)) < 0)
- return EDP_TRAIN_FAIL;
-
- ctrl->v_level = 0; /* start from default level */
- ctrl->p_level = 0;
-
- edp_state_ctrl(ctrl, 0);
- if (edp_clear_training_pattern(ctrl))
- return EDP_TRAIN_FAIL;
-
- ret = edp_start_link_train_1(ctrl);
- if (ret < 0) {
- if (edp_link_rate_down_shift(ctrl) == 0) {
- DBG("link reconfig");
- ret = EDP_TRAIN_RECONFIG;
- goto clear;
- } else {
- pr_err("%s: Training 1 failed", __func__);
- ret = EDP_TRAIN_FAIL;
- goto clear;
- }
- }
- DBG("Training 1 completed successfully");
-
- edp_state_ctrl(ctrl, 0);
- if (edp_clear_training_pattern(ctrl))
- return EDP_TRAIN_FAIL;
-
- ret = edp_start_link_train_2(ctrl);
- if (ret < 0) {
- if (edp_link_rate_down_shift(ctrl) == 0) {
- DBG("link reconfig");
- ret = EDP_TRAIN_RECONFIG;
- goto clear;
- } else {
- pr_err("%s: Training 2 failed", __func__);
- ret = EDP_TRAIN_FAIL;
- goto clear;
- }
- }
- DBG("Training 2 completed successfully");
-
- edp_state_ctrl(ctrl, EDP_STATE_CTRL_SEND_VIDEO);
-clear:
- edp_clear_training_pattern(ctrl);
-
- return ret;
-}
-
-static void edp_clock_synchrous(struct edp_ctrl *ctrl, int sync)
-{
- u32 data;
- enum edp_color_depth depth;
-
- data = edp_read(ctrl->base + REG_EDP_MISC1_MISC0);
-
- if (sync)
- data |= EDP_MISC1_MISC0_SYNC;
- else
- data &= ~EDP_MISC1_MISC0_SYNC;
-
- /* only legacy rgb mode supported */
- depth = EDP_6BIT; /* Default */
- if (ctrl->color_depth == 8)
- depth = EDP_8BIT;
- else if (ctrl->color_depth == 10)
- depth = EDP_10BIT;
- else if (ctrl->color_depth == 12)
- depth = EDP_12BIT;
- else if (ctrl->color_depth == 16)
- depth = EDP_16BIT;
-
- data |= EDP_MISC1_MISC0_COLOR(depth);
-
- edp_write(ctrl->base + REG_EDP_MISC1_MISC0, data);
-}
-
-static int edp_sw_mvid_nvid(struct edp_ctrl *ctrl, u32 m, u32 n)
-{
- u32 n_multi, m_multi = 5;
-
- if (ctrl->link_rate == DP_LINK_BW_1_62) {
- n_multi = 1;
- } else if (ctrl->link_rate == DP_LINK_BW_2_7) {
- n_multi = 2;
- } else {
- pr_err("%s: Invalid link rate, %d\n", __func__,
- ctrl->link_rate);
- return -EINVAL;
- }
-
- edp_write(ctrl->base + REG_EDP_SOFTWARE_MVID, m * m_multi);
- edp_write(ctrl->base + REG_EDP_SOFTWARE_NVID, n * n_multi);
-
- return 0;
-}
-
-static void edp_mainlink_ctrl(struct edp_ctrl *ctrl, int enable)
-{
- u32 data = 0;
-
- edp_write(ctrl->base + REG_EDP_MAINLINK_CTRL, EDP_MAINLINK_CTRL_RESET);
- /* Make sure fully reset */
- wmb();
- usleep_range(500, 1000);
-
- if (enable)
- data |= EDP_MAINLINK_CTRL_ENABLE;
-
- edp_write(ctrl->base + REG_EDP_MAINLINK_CTRL, data);
-}
-
-static void edp_ctrl_phy_aux_enable(struct edp_ctrl *ctrl, int enable)
-{
- if (enable) {
- edp_regulator_enable(ctrl);
- edp_clk_enable(ctrl, EDP_CLK_MASK_AUX_CHAN);
- msm_edp_phy_ctrl(ctrl->phy, 1);
- msm_edp_aux_ctrl(ctrl->aux, 1);
- gpiod_set_value(ctrl->panel_en_gpio, 1);
- } else {
- gpiod_set_value(ctrl->panel_en_gpio, 0);
- msm_edp_aux_ctrl(ctrl->aux, 0);
- msm_edp_phy_ctrl(ctrl->phy, 0);
- edp_clk_disable(ctrl, EDP_CLK_MASK_AUX_CHAN);
- edp_regulator_disable(ctrl);
- }
-}
-
-static void edp_ctrl_link_enable(struct edp_ctrl *ctrl, int enable)
-{
- u32 m, n;
-
- if (enable) {
- /* Enable link channel clocks */
- edp_clk_enable(ctrl, EDP_CLK_MASK_LINK_CHAN);
-
- msm_edp_phy_lane_power_ctrl(ctrl->phy, true, ctrl->lane_cnt);
-
- msm_edp_phy_vm_pe_init(ctrl->phy);
-
- /* Make sure phy is programed */
- wmb();
- msm_edp_phy_ready(ctrl->phy);
-
- edp_config_ctrl(ctrl);
- msm_edp_ctrl_pixel_clock_valid(ctrl, ctrl->pixel_rate, &m, &n);
- edp_sw_mvid_nvid(ctrl, m, n);
- edp_mainlink_ctrl(ctrl, 1);
- } else {
- edp_mainlink_ctrl(ctrl, 0);
-
- msm_edp_phy_lane_power_ctrl(ctrl->phy, false, 0);
- edp_clk_disable(ctrl, EDP_CLK_MASK_LINK_CHAN);
- }
-}
-
-static int edp_ctrl_training(struct edp_ctrl *ctrl)
-{
- int ret;
-
- /* Do link training only when power is on */
- if (!ctrl->power_on)
- return -EINVAL;
-
-train_start:
- ret = edp_do_link_train(ctrl);
- if (ret == EDP_TRAIN_RECONFIG) {
- /* Re-configure main link */
- edp_ctrl_irq_enable(ctrl, 0);
- edp_ctrl_link_enable(ctrl, 0);
- msm_edp_phy_ctrl(ctrl->phy, 0);
-
- /* Make sure link is fully disabled */
- wmb();
- usleep_range(500, 1000);
-
- msm_edp_phy_ctrl(ctrl->phy, 1);
- edp_ctrl_link_enable(ctrl, 1);
- edp_ctrl_irq_enable(ctrl, 1);
- goto train_start;
- }
-
- return ret;
-}
-
-static void edp_ctrl_on_worker(struct work_struct *work)
-{
- struct edp_ctrl *ctrl = container_of(
- work, struct edp_ctrl, on_work);
- u8 value;
- int ret;
-
- mutex_lock(&ctrl->dev_mutex);
-
- if (ctrl->power_on) {
- DBG("already on");
- goto unlock_ret;
- }
-
- edp_ctrl_phy_aux_enable(ctrl, 1);
- edp_ctrl_link_enable(ctrl, 1);
-
- edp_ctrl_irq_enable(ctrl, 1);
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (ctrl->dpcd[DP_DPCD_REV] >= 0x11) {
- ret = drm_dp_dpcd_readb(ctrl->drm_aux, DP_SET_POWER, &value);
- if (ret < 0)
- goto fail;
-
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D0;
-
- ret = drm_dp_dpcd_writeb(ctrl->drm_aux, DP_SET_POWER, value);
- if (ret < 0)
- goto fail;
-
- /*
- * According to the DP 1.1 specification, a "Sink Device must
- * exit the power saving state within 1 ms" (Section 2.5.3.1,
- * Table 5-52, "Sink Control Field" (register 0x600).
- */
- usleep_range(1000, 2000);
- }
-
- ctrl->power_on = true;
-
- /* Start link training */
- ret = edp_ctrl_training(ctrl);
- if (ret != EDP_TRAIN_SUCCESS)
- goto fail;
-
- DBG("DONE");
- goto unlock_ret;
-
-fail:
- edp_ctrl_irq_enable(ctrl, 0);
- edp_ctrl_link_enable(ctrl, 0);
- edp_ctrl_phy_aux_enable(ctrl, 0);
- ctrl->power_on = false;
-unlock_ret:
- mutex_unlock(&ctrl->dev_mutex);
-}
-
-static void edp_ctrl_off_worker(struct work_struct *work)
-{
- struct edp_ctrl *ctrl = container_of(
- work, struct edp_ctrl, off_work);
- unsigned long time_left;
-
- mutex_lock(&ctrl->dev_mutex);
-
- if (!ctrl->power_on) {
- DBG("already off");
- goto unlock_ret;
- }
-
- reinit_completion(&ctrl->idle_comp);
- edp_state_ctrl(ctrl, EDP_STATE_CTRL_PUSH_IDLE);
-
- time_left = wait_for_completion_timeout(&ctrl->idle_comp,
- msecs_to_jiffies(500));
- if (!time_left)
- DBG("%s: idle pattern timedout\n", __func__);
-
- edp_state_ctrl(ctrl, 0);
-
- /* DP_SET_POWER register is only available on DPCD v1.1 and later */
- if (ctrl->dpcd[DP_DPCD_REV] >= 0x11) {
- u8 value;
- int ret;
-
- ret = drm_dp_dpcd_readb(ctrl->drm_aux, DP_SET_POWER, &value);
- if (ret > 0) {
- value &= ~DP_SET_POWER_MASK;
- value |= DP_SET_POWER_D3;
-
- drm_dp_dpcd_writeb(ctrl->drm_aux, DP_SET_POWER, value);
- }
- }
-
- edp_ctrl_irq_enable(ctrl, 0);
-
- edp_ctrl_link_enable(ctrl, 0);
-
- edp_ctrl_phy_aux_enable(ctrl, 0);
-
- ctrl->power_on = false;
-
-unlock_ret:
- mutex_unlock(&ctrl->dev_mutex);
-}
-
-irqreturn_t msm_edp_ctrl_irq(struct edp_ctrl *ctrl)
-{
- u32 isr1, isr2, mask1, mask2;
- u32 ack;
-
- DBG("");
- spin_lock(&ctrl->irq_lock);
- isr1 = edp_read(ctrl->base + REG_EDP_INTERRUPT_REG_1);
- isr2 = edp_read(ctrl->base + REG_EDP_INTERRUPT_REG_2);
-
- mask1 = isr1 & EDP_INTR_MASK1;
- mask2 = isr2 & EDP_INTR_MASK2;
-
- isr1 &= ~mask1; /* remove masks bit */
- isr2 &= ~mask2;
-
- DBG("isr=%x mask=%x isr2=%x mask2=%x",
- isr1, mask1, isr2, mask2);
-
- ack = isr1 & EDP_INTR_STATUS1;
- ack <<= 1; /* ack bits */
- ack |= mask1;
- edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_1, ack);
-
- ack = isr2 & EDP_INTR_STATUS2;
- ack <<= 1; /* ack bits */
- ack |= mask2;
- edp_write(ctrl->base + REG_EDP_INTERRUPT_REG_2, ack);
- spin_unlock(&ctrl->irq_lock);
-
- if (isr1 & EDP_INTERRUPT_REG_1_HPD)
- DBG("edp_hpd");
-
- if (isr2 & EDP_INTERRUPT_REG_2_READY_FOR_VIDEO)
- DBG("edp_video_ready");
-
- if (isr2 & EDP_INTERRUPT_REG_2_IDLE_PATTERNs_SENT) {
- DBG("idle_patterns_sent");
- complete(&ctrl->idle_comp);
- }
-
- msm_edp_aux_irq(ctrl->aux, isr1);
-
- return IRQ_HANDLED;
-}
-
-void msm_edp_ctrl_power(struct edp_ctrl *ctrl, bool on)
-{
- if (on)
- queue_work(ctrl->workqueue, &ctrl->on_work);
- else
- queue_work(ctrl->workqueue, &ctrl->off_work);
-}
-
-int msm_edp_ctrl_init(struct msm_edp *edp)
-{
- struct edp_ctrl *ctrl = NULL;
- struct device *dev;
- int ret;
-
- if (!edp) {
- pr_err("%s: edp is NULL!\n", __func__);
- return -EINVAL;
- }
-
- dev = &edp->pdev->dev;
- ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL);
- if (!ctrl)
- return -ENOMEM;
-
- edp->ctrl = ctrl;
- ctrl->pdev = edp->pdev;
-
- ctrl->base = msm_ioremap(ctrl->pdev, "edp", "eDP");
- if (IS_ERR(ctrl->base))
- return PTR_ERR(ctrl->base);
-
- /* Get regulator, clock, gpio, pwm */
- ret = edp_regulator_init(ctrl);
- if (ret) {
- pr_err("%s:regulator init fail\n", __func__);
- return ret;
- }
- ret = edp_clk_init(ctrl);
- if (ret) {
- pr_err("%s:clk init fail\n", __func__);
- return ret;
- }
- ret = edp_gpio_config(ctrl);
- if (ret) {
- pr_err("%s:failed to configure GPIOs: %d", __func__, ret);
- return ret;
- }
-
- /* Init aux and phy */
- ctrl->aux = msm_edp_aux_init(edp, ctrl->base, &ctrl->drm_aux);
- if (!ctrl->aux || !ctrl->drm_aux) {
- pr_err("%s:failed to init aux\n", __func__);
- return -ENOMEM;
- }
-
- ctrl->phy = msm_edp_phy_init(dev, ctrl->base);
- if (!ctrl->phy) {
- pr_err("%s:failed to init phy\n", __func__);
- ret = -ENOMEM;
- goto err_destory_aux;
- }
-
- spin_lock_init(&ctrl->irq_lock);
- mutex_init(&ctrl->dev_mutex);
- init_completion(&ctrl->idle_comp);
-
- /* setup workqueue */
- ctrl->workqueue = alloc_ordered_workqueue("edp_drm_work", 0);
- INIT_WORK(&ctrl->on_work, edp_ctrl_on_worker);
- INIT_WORK(&ctrl->off_work, edp_ctrl_off_worker);
-
- return 0;
-
-err_destory_aux:
- msm_edp_aux_destroy(dev, ctrl->aux);
- ctrl->aux = NULL;
- return ret;
-}
-
-void msm_edp_ctrl_destroy(struct edp_ctrl *ctrl)
-{
- if (!ctrl)
- return;
-
- if (ctrl->workqueue) {
- destroy_workqueue(ctrl->workqueue);
- ctrl->workqueue = NULL;
- }
-
- if (ctrl->aux) {
- msm_edp_aux_destroy(&ctrl->pdev->dev, ctrl->aux);
- ctrl->aux = NULL;
- }
-
- kfree(ctrl->edid);
- ctrl->edid = NULL;
-
- mutex_destroy(&ctrl->dev_mutex);
-}
-
-bool msm_edp_ctrl_panel_connected(struct edp_ctrl *ctrl)
-{
- mutex_lock(&ctrl->dev_mutex);
- DBG("connect status = %d", ctrl->edp_connected);
- if (ctrl->edp_connected) {
- mutex_unlock(&ctrl->dev_mutex);
- return true;
- }
-
- if (!ctrl->power_on) {
- edp_ctrl_phy_aux_enable(ctrl, 1);
- edp_ctrl_irq_enable(ctrl, 1);
- }
-
- if (drm_dp_dpcd_read(ctrl->drm_aux, DP_DPCD_REV, ctrl->dpcd,
- DP_RECEIVER_CAP_SIZE) < DP_RECEIVER_CAP_SIZE) {
- pr_err("%s: AUX channel is NOT ready\n", __func__);
- memset(ctrl->dpcd, 0, DP_RECEIVER_CAP_SIZE);
- } else {
- ctrl->edp_connected = true;
- }
-
- if (!ctrl->power_on) {
- edp_ctrl_irq_enable(ctrl, 0);
- edp_ctrl_phy_aux_enable(ctrl, 0);
- }
-
- DBG("exit: connect status=%d", ctrl->edp_connected);
-
- mutex_unlock(&ctrl->dev_mutex);
-
- return ctrl->edp_connected;
-}
-
-int msm_edp_ctrl_get_panel_info(struct edp_ctrl *ctrl,
- struct drm_connector *connector, struct edid **edid)
-{
- mutex_lock(&ctrl->dev_mutex);
-
- if (ctrl->edid) {
- if (edid) {
- DBG("Just return edid buffer");
- *edid = ctrl->edid;
- }
- goto unlock_ret;
- }
-
- if (!ctrl->power_on) {
- edp_ctrl_phy_aux_enable(ctrl, 1);
- edp_ctrl_irq_enable(ctrl, 1);
- }
-
- /* Initialize link rate as panel max link rate */
- ctrl->link_rate = ctrl->dpcd[DP_MAX_LINK_RATE];
-
- ctrl->edid = drm_get_edid(connector, &ctrl->drm_aux->ddc);
- if (!ctrl->edid) {
- pr_err("%s: edid read fail\n", __func__);
- goto disable_ret;
- }
-
- if (edid)
- *edid = ctrl->edid;
-
-disable_ret:
- if (!ctrl->power_on) {
- edp_ctrl_irq_enable(ctrl, 0);
- edp_ctrl_phy_aux_enable(ctrl, 0);
- }
-unlock_ret:
- mutex_unlock(&ctrl->dev_mutex);
- return 0;
-}
-
-int msm_edp_ctrl_timing_cfg(struct edp_ctrl *ctrl,
- const struct drm_display_mode *mode,
- const struct drm_display_info *info)
-{
- u32 hstart_from_sync, vstart_from_sync;
- u32 data;
- int ret = 0;
-
- mutex_lock(&ctrl->dev_mutex);
- /*
- * Need to keep color depth, pixel rate and
- * interlaced information in ctrl context
- */
- ctrl->color_depth = info->bpc;
- ctrl->pixel_rate = mode->clock;
- ctrl->interlaced = !!(mode->flags & DRM_MODE_FLAG_INTERLACE);
-
- /* Fill initial link config based on passed in timing */
- edp_fill_link_cfg(ctrl);
-
- if (edp_clk_enable(ctrl, EDP_CLK_MASK_AHB)) {
- pr_err("%s, fail to prepare enable ahb clk\n", __func__);
- ret = -EINVAL;
- goto unlock_ret;
- }
- edp_clock_synchrous(ctrl, 1);
-
- /* Configure eDP timing to HW */
- edp_write(ctrl->base + REG_EDP_TOTAL_HOR_VER,
- EDP_TOTAL_HOR_VER_HORIZ(mode->htotal) |
- EDP_TOTAL_HOR_VER_VERT(mode->vtotal));
-
- vstart_from_sync = mode->vtotal - mode->vsync_start;
- hstart_from_sync = mode->htotal - mode->hsync_start;
- edp_write(ctrl->base + REG_EDP_START_HOR_VER_FROM_SYNC,
- EDP_START_HOR_VER_FROM_SYNC_HORIZ(hstart_from_sync) |
- EDP_START_HOR_VER_FROM_SYNC_VERT(vstart_from_sync));
-
- data = EDP_HSYNC_VSYNC_WIDTH_POLARITY_VERT(
- mode->vsync_end - mode->vsync_start);
- data |= EDP_HSYNC_VSYNC_WIDTH_POLARITY_HORIZ(
- mode->hsync_end - mode->hsync_start);
- if (mode->flags & DRM_MODE_FLAG_NVSYNC)
- data |= EDP_HSYNC_VSYNC_WIDTH_POLARITY_NVSYNC;
- if (mode->flags & DRM_MODE_FLAG_NHSYNC)
- data |= EDP_HSYNC_VSYNC_WIDTH_POLARITY_NHSYNC;
- edp_write(ctrl->base + REG_EDP_HSYNC_VSYNC_WIDTH_POLARITY, data);
-
- edp_write(ctrl->base + REG_EDP_ACTIVE_HOR_VER,
- EDP_ACTIVE_HOR_VER_HORIZ(mode->hdisplay) |
- EDP_ACTIVE_HOR_VER_VERT(mode->vdisplay));
-
- edp_clk_disable(ctrl, EDP_CLK_MASK_AHB);
-
-unlock_ret:
- mutex_unlock(&ctrl->dev_mutex);
- return ret;
-}
-
-bool msm_edp_ctrl_pixel_clock_valid(struct edp_ctrl *ctrl,
- u32 pixel_rate, u32 *pm, u32 *pn)
-{
- const struct edp_pixel_clk_div *divs;
- u32 err = 1; /* 1% error tolerance */
- u32 clk_err;
- int i;
-
- if (ctrl->link_rate == DP_LINK_BW_1_62) {
- divs = clk_divs[0];
- } else if (ctrl->link_rate == DP_LINK_BW_2_7) {
- divs = clk_divs[1];
- } else {
- pr_err("%s: Invalid link rate,%d\n", __func__, ctrl->link_rate);
- return false;
- }
-
- for (i = 0; i < EDP_PIXEL_CLK_NUM; i++) {
- clk_err = abs(divs[i].rate - pixel_rate);
- if ((divs[i].rate * err / 100) >= clk_err) {
- if (pm)
- *pm = divs[i].m;
- if (pn)
- *pn = divs[i].n;
- return true;
- }
- }
-
- DBG("pixel clock %d(kHz) not supported", pixel_rate);
-
- return false;
-}
-
diff --git a/drivers/gpu/drm/msm/edp/edp_phy.c b/drivers/gpu/drm/msm/edp/edp_phy.c
deleted file mode 100644
index fcaf7b7ecdd2..000000000000
--- a/drivers/gpu/drm/msm/edp/edp_phy.c
+++ /dev/null
@@ -1,98 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
- */
-
-#include "edp.h"
-#include "edp.xml.h"
-
-#define EDP_MAX_LANE 4
-
-struct edp_phy {
- void __iomem *base;
-};
-
-bool msm_edp_phy_ready(struct edp_phy *phy)
-{
- u32 status;
- int cnt = 100;
-
- while (--cnt) {
- status = edp_read(phy->base +
- REG_EDP_PHY_GLB_PHY_STATUS);
- if (status & 0x01)
- break;
- usleep_range(500, 1000);
- }
-
- if (cnt == 0) {
- pr_err("%s: PHY NOT ready\n", __func__);
- return false;
- } else {
- return true;
- }
-}
-
-void msm_edp_phy_ctrl(struct edp_phy *phy, int enable)
-{
- DBG("enable=%d", enable);
- if (enable) {
- /* Reset */
- edp_write(phy->base + REG_EDP_PHY_CTRL,
- EDP_PHY_CTRL_SW_RESET | EDP_PHY_CTRL_SW_RESET_PLL);
- /* Make sure fully reset */
- wmb();
- usleep_range(500, 1000);
- edp_write(phy->base + REG_EDP_PHY_CTRL, 0x000);
- edp_write(phy->base + REG_EDP_PHY_GLB_PD_CTL, 0x3f);
- edp_write(phy->base + REG_EDP_PHY_GLB_CFG, 0x1);
- } else {
- edp_write(phy->base + REG_EDP_PHY_GLB_PD_CTL, 0xc0);
- }
-}
-
-/* voltage mode and pre emphasis cfg */
-void msm_edp_phy_vm_pe_init(struct edp_phy *phy)
-{
- edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG0, 0x3);
- edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG1, 0x64);
- edp_write(phy->base + REG_EDP_PHY_GLB_MISC9, 0x6c);
-}
-
-void msm_edp_phy_vm_pe_cfg(struct edp_phy *phy, u32 v0, u32 v1)
-{
- edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG0, v0);
- edp_write(phy->base + REG_EDP_PHY_GLB_VM_CFG1, v1);
-}
-
-void msm_edp_phy_lane_power_ctrl(struct edp_phy *phy, bool up, u32 max_lane)
-{
- u32 i;
- u32 data;
-
- if (up)
- data = 0; /* power up */
- else
- data = 0x7; /* power down */
-
- for (i = 0; i < max_lane; i++)
- edp_write(phy->base + REG_EDP_PHY_LN_PD_CTL(i) , data);
-
- /* power down unused lane */
- data = 0x7; /* power down */
- for (i = max_lane; i < EDP_MAX_LANE; i++)
- edp_write(phy->base + REG_EDP_PHY_LN_PD_CTL(i) , data);
-}
-
-void *msm_edp_phy_init(struct device *dev, void __iomem *regbase)
-{
- struct edp_phy *phy = NULL;
-
- phy = devm_kzalloc(dev, sizeof(*phy), GFP_KERNEL);
- if (!phy)
- return NULL;
-
- phy->base = regbase;
- return phy;
-}
-
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 09d2d279c30a..956b1efc3721 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -29,14 +29,14 @@ static int msm_gpu_show(struct seq_file *m, void *arg)
struct msm_gpu *gpu = priv->gpu;
int ret;
- ret = mutex_lock_interruptible(&show_priv->dev->struct_mutex);
+ ret = mutex_lock_interruptible(&gpu->lock);
if (ret)
return ret;
drm_printf(&p, "%s Status:\n", gpu->name);
gpu->funcs->show(gpu, show_priv->state, &p);
- mutex_unlock(&show_priv->dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
return 0;
}
@@ -48,9 +48,9 @@ static int msm_gpu_release(struct inode *inode, struct file *file)
struct msm_drm_private *priv = show_priv->dev->dev_private;
struct msm_gpu *gpu = priv->gpu;
- mutex_lock(&show_priv->dev->struct_mutex);
+ mutex_lock(&gpu->lock);
gpu->funcs->gpu_state_put(show_priv->state);
- mutex_unlock(&show_priv->dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
kfree(show_priv);
@@ -72,15 +72,16 @@ static int msm_gpu_open(struct inode *inode, struct file *file)
if (!show_priv)
return -ENOMEM;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&gpu->lock);
if (ret)
goto free_priv;
pm_runtime_get_sync(&gpu->pdev->dev);
+ msm_gpu_hw_init(gpu);
show_priv->state = gpu->funcs->gpu_state_get(gpu);
pm_runtime_put_sync(&gpu->pdev->dev);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
if (IS_ERR(show_priv->state)) {
ret = PTR_ERR(show_priv->state);
@@ -133,8 +134,10 @@ DEFINE_SIMPLE_ATTRIBUTE(shrink_fops,
"0x%08llx\n");
-static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
+static int msm_gem_show(struct seq_file *m, void *arg)
{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
struct msm_drm_private *priv = dev->dev_private;
int ret;
@@ -149,8 +152,10 @@ static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
return 0;
}
-static int msm_mm_show(struct drm_device *dev, struct seq_file *m)
+static int msm_mm_show(struct seq_file *m, void *arg)
{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
struct drm_printer p = drm_seq_file_printer(m);
drm_mm_print(&dev->vma_offset_manager->vm_addr_space_mm, &p);
@@ -158,8 +163,10 @@ static int msm_mm_show(struct drm_device *dev, struct seq_file *m)
return 0;
}
-static int msm_fb_show(struct drm_device *dev, struct seq_file *m)
+static int msm_fb_show(struct seq_file *m, void *arg)
{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
struct msm_drm_private *priv = dev->dev_private;
struct drm_framebuffer *fb, *fbdev_fb = NULL;
@@ -182,29 +189,10 @@ static int msm_fb_show(struct drm_device *dev, struct seq_file *m)
return 0;
}
-static int show_locked(struct seq_file *m, void *arg)
-{
- struct drm_info_node *node = (struct drm_info_node *) m->private;
- struct drm_device *dev = node->minor->dev;
- int (*show)(struct drm_device *dev, struct seq_file *m) =
- node->info_ent->data;
- int ret;
-
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
- ret = show(dev, m);
-
- mutex_unlock(&dev->struct_mutex);
-
- return ret;
-}
-
static struct drm_info_list msm_debugfs_list[] = {
- {"gem", show_locked, 0, msm_gem_show},
- { "mm", show_locked, 0, msm_mm_show },
- { "fb", show_locked, 0, msm_fb_show },
+ {"gem", msm_gem_show},
+ { "mm", msm_mm_show },
+ { "fb", msm_fb_show },
};
static int late_init_minor(struct drm_minor *minor)
@@ -254,6 +242,9 @@ void msm_debugfs_init(struct drm_minor *minor)
debugfs_create_u32("hangcheck_period_ms", 0600, minor->debugfs_root,
&priv->hangcheck_period);
+ debugfs_create_bool("disable_err_irq", 0600, minor->debugfs_root,
+ &priv->disable_err_irq);
+
debugfs_create_file("shrink", S_IRWXU, minor->debugfs_root,
dev, &shrink_fops);
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 7936e8d498dd..f81cf7097494 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -752,14 +752,8 @@ static void context_close(struct msm_file_private *ctx)
static void msm_postclose(struct drm_device *dev, struct drm_file *file)
{
- struct msm_drm_private *priv = dev->dev_private;
struct msm_file_private *ctx = file->driver_priv;
- mutex_lock(&dev->struct_mutex);
- if (ctx == priv->lastctx)
- priv->lastctx = NULL;
- mutex_unlock(&dev->struct_mutex);
-
context_close(ctx);
}
@@ -967,29 +961,18 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
return ret;
}
-static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
- struct drm_file *file)
+static int wait_fence(struct msm_gpu_submitqueue *queue, uint32_t fence_id,
+ ktime_t timeout)
{
- struct msm_drm_private *priv = dev->dev_private;
- struct drm_msm_wait_fence *args = data;
- ktime_t timeout = to_ktime(args->timeout);
- struct msm_gpu_submitqueue *queue;
- struct msm_gpu *gpu = priv->gpu;
struct dma_fence *fence;
int ret;
- if (args->pad) {
- DRM_ERROR("invalid pad: %08x\n", args->pad);
+ if (fence_after(fence_id, queue->last_fence)) {
+ DRM_ERROR_RATELIMITED("waiting on invalid fence: %u (of %u)\n",
+ fence_id, queue->last_fence);
return -EINVAL;
}
- if (!gpu)
- return 0;
-
- queue = msm_submitqueue_get(file->driver_priv, args->queueid);
- if (!queue)
- return -ENOENT;
-
/*
* Map submitqueue scoped "seqno" (which is actually an idr key)
* back to underlying dma-fence
@@ -1001,7 +984,7 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
ret = mutex_lock_interruptible(&queue->lock);
if (ret)
return ret;
- fence = idr_find(&queue->fence_idr, args->fence);
+ fence = idr_find(&queue->fence_idr, fence_id);
if (fence)
fence = dma_fence_get_rcu(fence);
mutex_unlock(&queue->lock);
@@ -1017,6 +1000,32 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
}
dma_fence_put(fence);
+
+ return ret;
+}
+
+static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_msm_wait_fence *args = data;
+ struct msm_gpu_submitqueue *queue;
+ int ret;
+
+ if (args->pad) {
+ DRM_ERROR("invalid pad: %08x\n", args->pad);
+ return -EINVAL;
+ }
+
+ if (!priv->gpu)
+ return 0;
+
+ queue = msm_submitqueue_get(file->driver_priv, args->queueid);
+ if (!queue)
+ return -ENOENT;
+
+ ret = wait_fence(queue, args->fence, to_ktime(args->timeout));
+
msm_submitqueue_put(queue);
return ret;
@@ -1277,9 +1286,10 @@ static int add_components_mdp(struct device *mdp_dev,
return 0;
}
-static int compare_name_mdp(struct device *dev, void *data)
+static int find_mdp_node(struct device *dev, void *data)
{
- return (strstr(dev_name(dev), "mdp") != NULL);
+ return of_match_node(dpu_dt_match, dev->of_node) ||
+ of_match_node(mdp5_dt_match, dev->of_node);
}
static int add_display_components(struct platform_device *pdev,
@@ -1304,7 +1314,7 @@ static int add_display_components(struct platform_device *pdev,
return ret;
}
- mdp_dev = device_find_child(dev, NULL, compare_name_mdp);
+ mdp_dev = device_find_child(dev, NULL, find_mdp_node);
if (!mdp_dev) {
DRM_DEV_ERROR(dev, "failed to find MDSS MDP node\n");
of_platform_depopulate(dev);
@@ -1463,7 +1473,6 @@ static int __init msm_drm_register(void)
msm_mdp_register();
msm_dpu_register();
msm_dsi_register();
- msm_edp_register();
msm_hdmi_register();
msm_dp_register();
adreno_register();
@@ -1477,7 +1486,6 @@ static void __exit msm_drm_unregister(void)
msm_dp_unregister();
msm_hdmi_unregister();
adreno_unregister();
- msm_edp_unregister();
msm_dsi_unregister();
msm_mdp_unregister();
msm_dpu_unregister();
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index eb984d925f4d..abc668f15f3d 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -151,12 +151,6 @@ struct msm_drm_private {
*/
struct hdmi *hdmi;
- /* eDP is for mdp5 only, but kms has not been created
- * when edp_bind() and edp_init() are called. Here is the only
- * place to keep the edp instance.
- */
- struct msm_edp *edp;
-
/* DSI is shared by mdp4 and mdp5 */
struct msm_dsi *dsi[2];
@@ -164,7 +158,7 @@ struct msm_drm_private {
/* when we have more than one 'msm_gpu' these need to be an array: */
struct msm_gpu *gpu;
- struct msm_file_private *lastctx;
+
/* gpu is only set on open(), but we need this info earlier */
bool is_a2xx;
bool has_cached_coherent;
@@ -246,6 +240,15 @@ struct msm_drm_private {
/* For hang detection, in ms */
unsigned int hangcheck_period;
+
+ /**
+ * disable_err_irq:
+ *
+ * Disable handling of GPU hw error interrupts, to force fallback to
+ * sw hangcheck timer. Written (via debugfs) by igt tests to test
+ * the sw hangcheck mechanism.
+ */
+ bool disable_err_irq;
};
struct msm_format {
@@ -335,12 +338,6 @@ int msm_hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
void __init msm_hdmi_register(void);
void __exit msm_hdmi_unregister(void);
-struct msm_edp;
-void __init msm_edp_register(void);
-void __exit msm_edp_unregister(void);
-int msm_edp_modeset_init(struct msm_edp *edp, struct drm_device *dev,
- struct drm_encoder *encoder);
-
struct msm_dsi;
#ifdef CONFIG_DRM_MSM_DSI
int dsi_dev_attach(struct platform_device *pdev);
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 0daaeb54ff6f..4c39ef9dd75d 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -81,8 +81,6 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
bo = msm_framebuffer_bo(fb, 0);
- mutex_lock(&dev->struct_mutex);
-
/*
* NOTE: if we can be guaranteed to be able to map buffer
* in panic (ie. lock-safe, etc) we could avoid pinning the
@@ -91,14 +89,14 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
ret = msm_gem_get_and_pin_iova(bo, priv->kms->aspace, &paddr);
if (ret) {
DRM_DEV_ERROR(dev->dev, "failed to get buffer obj iova: %d\n", ret);
- goto fail_unlock;
+ goto fail;
}
fbi = drm_fb_helper_alloc_fbi(helper);
if (IS_ERR(fbi)) {
DRM_DEV_ERROR(dev->dev, "failed to allocate fb info\n");
ret = PTR_ERR(fbi);
- goto fail_unlock;
+ goto fail;
}
DBG("fbi=%p, dev=%p", fbi, dev);
@@ -115,7 +113,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
fbi->screen_base = msm_gem_get_vaddr(bo);
if (IS_ERR(fbi->screen_base)) {
ret = PTR_ERR(fbi->screen_base);
- goto fail_unlock;
+ goto fail;
}
fbi->screen_size = bo->size;
fbi->fix.smem_start = paddr;
@@ -124,12 +122,9 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
- mutex_unlock(&dev->struct_mutex);
-
return 0;
-fail_unlock:
- mutex_unlock(&dev->struct_mutex);
+fail:
drm_framebuffer_remove(fb);
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_fence.h b/drivers/gpu/drm/msm/msm_fence.h
index 4783db528bcc..17ee3822b423 100644
--- a/drivers/gpu/drm/msm/msm_fence.h
+++ b/drivers/gpu/drm/msm/msm_fence.h
@@ -60,4 +60,16 @@ void msm_update_fence(struct msm_fence_context *fctx, uint32_t fence);
struct dma_fence * msm_fence_alloc(struct msm_fence_context *fctx);
+static inline bool
+fence_before(uint32_t a, uint32_t b)
+{
+ return (int32_t)(a - b) < 0;
+}
+
+static inline bool
+fence_after(uint32_t a, uint32_t b)
+{
+ return (int32_t)(a - b) > 0;
+}
+
#endif
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 2916480d9115..02b9ae65a96a 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -1029,8 +1029,7 @@ static int msm_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- vma->vm_flags &= ~VM_PFNMAP;
- vma->vm_flags |= VM_MIXEDMAP | VM_DONTEXPAND;
+ vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND | VM_DONTDUMP;
vma->vm_page_prot = msm_gem_pgprot(msm_obj, vm_get_page_prot(vma->vm_flags));
return 0;
@@ -1094,7 +1093,7 @@ static int msm_gem_new_impl(struct drm_device *dev,
break;
fallthrough;
default:
- DRM_DEV_ERROR(dev->dev, "invalid cache flag: %x\n",
+ DRM_DEV_DEBUG(dev->dev, "invalid cache flag: %x\n",
(flags & MSM_BO_CACHE_MASK));
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 3cb029f10925..282628d6b72c 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -772,6 +772,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
args->nr_cmds);
if (IS_ERR(submit)) {
ret = PTR_ERR(submit);
+ submit = NULL;
goto out_unlock;
}
@@ -904,6 +905,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
drm_sched_entity_push_job(&submit->base);
args->fence = submit->fence_id;
+ queue->last_fence = submit->fence_id;
msm_reset_syncobjs(syncobjs_to_reset, args->nr_in_syncobjs);
msm_process_post_deps(post_deps, args->nr_out_syncobjs,
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 2c46cd968ac4..0f78c2615272 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -150,7 +150,7 @@ int msm_gpu_hw_init(struct msm_gpu *gpu)
{
int ret;
- WARN_ON(!mutex_is_locked(&gpu->dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&gpu->lock));
if (!gpu->needs_hw_init)
return 0;
@@ -172,7 +172,7 @@ static void update_fences(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
spin_lock_irqsave(&ring->submit_lock, flags);
list_for_each_entry(submit, &ring->submits, node) {
- if (submit->seqno > fence)
+ if (fence_after(submit->seqno, fence))
break;
msm_update_fence(submit->ring->fctx,
@@ -361,7 +361,7 @@ static void recover_worker(struct kthread_work *work)
char *comm = NULL, *cmd = NULL;
int i;
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&gpu->lock);
DRM_DEV_ERROR(dev->dev, "%s: hangcheck recover!\n", gpu->name);
@@ -442,7 +442,7 @@ static void recover_worker(struct kthread_work *work)
}
}
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
msm_gpu_retire(gpu);
}
@@ -450,12 +450,11 @@ static void recover_worker(struct kthread_work *work)
static void fault_worker(struct kthread_work *work)
{
struct msm_gpu *gpu = container_of(work, struct msm_gpu, fault_work);
- struct drm_device *dev = gpu->dev;
struct msm_gem_submit *submit;
struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
char *comm = NULL, *cmd = NULL;
- mutex_lock(&dev->struct_mutex);
+ mutex_lock(&gpu->lock);
submit = find_submit(cur_ring, cur_ring->memptrs->fence + 1);
if (submit && submit->fault_dumped)
@@ -490,7 +489,7 @@ resume_smmu:
memset(&gpu->fault_info, 0, sizeof(gpu->fault_info));
gpu->aspace->mmu->funcs->resume_translation(gpu->aspace->mmu);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
}
static void hangcheck_timer_reset(struct msm_gpu *gpu)
@@ -510,7 +509,7 @@ static void hangcheck_handler(struct timer_list *t)
if (fence != ring->hangcheck_fence) {
/* some progress has been made.. ya! */
ring->hangcheck_fence = fence;
- } else if (fence < ring->seqno) {
+ } else if (fence_before(fence, ring->seqno)) {
/* no progress and not done.. hung! */
ring->hangcheck_fence = fence;
DRM_DEV_ERROR(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
@@ -524,7 +523,7 @@ static void hangcheck_handler(struct timer_list *t)
}
/* if still more pending work, reset the hangcheck timer: */
- if (ring->seqno > ring->hangcheck_fence)
+ if (fence_after(ring->seqno, ring->hangcheck_fence))
hangcheck_timer_reset(gpu);
/* workaround for missing irq: */
@@ -733,7 +732,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
struct msm_ringbuffer *ring = submit->ring;
unsigned long flags;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&gpu->lock));
pm_runtime_get_sync(&gpu->pdev->dev);
@@ -763,7 +762,7 @@ void msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
mutex_unlock(&gpu->active_lock);
gpu->funcs->submit(gpu, submit);
- priv->lastctx = submit->queue->ctx;
+ gpu->cur_ctx_seqno = submit->queue->ctx->seqno;
hangcheck_timer_reset(gpu);
}
@@ -848,6 +847,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
INIT_LIST_HEAD(&gpu->active_list);
mutex_init(&gpu->active_lock);
+ mutex_init(&gpu->lock);
kthread_init_work(&gpu->retire_work, retire_worker);
kthread_init_work(&gpu->recover_work, recover_worker);
kthread_init_work(&gpu->fault_work, fault_worker);
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 59cdd00b69d0..445c6bfd4b6b 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -88,6 +88,21 @@ struct msm_gpu_devfreq {
struct devfreq *devfreq;
/**
+ * idle_constraint:
+ *
+ * A PM QoS constraint to limit max freq while the GPU is idle.
+ */
+ struct dev_pm_qos_request idle_freq;
+
+ /**
+ * boost_constraint:
+ *
+ * A PM QoS constraint to boost min freq for a period of time
+ * until the boost expires.
+ */
+ struct dev_pm_qos_request boost_freq;
+
+ /**
* busy_cycles:
*
* Used by implementation of gpu->gpu_busy() to track the last
@@ -103,22 +118,19 @@ struct msm_gpu_devfreq {
ktime_t idle_time;
/**
- * idle_freq:
+ * idle_work:
*
- * Shadow frequency used while the GPU is idle. From the PoV of
- * the devfreq governor, we are continuing to sample busyness and
- * adjust frequency while the GPU is idle, but we use this shadow
- * value as the GPU is actually clamped to minimum frequency while
- * it is inactive.
+ * Used to delay clamping to idle freq on active->idle transition.
*/
- unsigned long idle_freq;
+ struct msm_hrtimer_work idle_work;
/**
- * idle_work:
+ * boost_work:
*
- * Used to delay clamping to idle freq on active->idle transition.
+ * Used to reset the boost_constraint after the boost period has
+ * elapsed
*/
- struct msm_hrtimer_work idle_work;
+ struct msm_hrtimer_work boost_work;
};
struct msm_gpu {
@@ -144,6 +156,17 @@ struct msm_gpu {
struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
int nr_rings;
+ /**
+ * cur_ctx_seqno:
+ *
+ * The ctx->seqno value of the last context to submit rendering,
+ * and the one with current pgtables installed (for generations
+ * that support per-context pgtables). Tracked by seqno rather
+ * than pointer value to avoid dangling pointers, and cases where
+ * a ctx can be freed and a new one created with the same address.
+ */
+ int cur_ctx_seqno;
+
/*
* List of GEM active objects on this gpu. Protected by
* msm_drm_private::mm_lock
@@ -151,12 +174,22 @@ struct msm_gpu {
struct list_head active_list;
/**
+ * lock:
+ *
+ * General lock for serializing all the gpu things.
+ *
+ * TODO move to per-ring locking where feasible (ie. submit/retire
+ * path, etc)
+ */
+ struct mutex lock;
+
+ /**
* active_submits:
*
* The number of submitted but not yet retired submits, used to
* determine transitions between active and idle.
*
- * Protected by lock
+ * Protected by active_lock
*/
int active_submits;
@@ -241,7 +274,7 @@ static inline bool msm_gpu_active(struct msm_gpu *gpu)
for (i = 0; i < gpu->nr_rings; i++) {
struct msm_ringbuffer *ring = gpu->rb[i];
- if (ring->seqno > ring->memptrs->fence)
+ if (fence_after(ring->seqno, ring->memptrs->fence))
return true;
}
@@ -359,6 +392,8 @@ static inline int msm_gpu_convert_priority(struct msm_gpu *gpu, int prio,
* @ring_nr: the ringbuffer used by this submitqueue, which is determined
* by the submitqueue's priority
* @faults: the number of GPU hangs associated with this submitqueue
+ * @last_fence: the sequence number of the last allocated fence (for error
+ * checking)
* @ctx: the per-drm_file context associated with the submitqueue (ie.
* which set of pgtables do submits jobs associated with the
* submitqueue use)
@@ -374,6 +409,7 @@ struct msm_gpu_submitqueue {
u32 flags;
u32 ring_nr;
int faults;
+ uint32_t last_fence;
struct msm_file_private *ctx;
struct list_head node;
struct idr fence_idr;
@@ -498,6 +534,7 @@ void msm_devfreq_init(struct msm_gpu *gpu);
void msm_devfreq_cleanup(struct msm_gpu *gpu);
void msm_devfreq_resume(struct msm_gpu *gpu);
void msm_devfreq_suspend(struct msm_gpu *gpu);
+void msm_devfreq_boost(struct msm_gpu *gpu, unsigned factor);
void msm_devfreq_active(struct msm_gpu *gpu);
void msm_devfreq_idle(struct msm_gpu *gpu);
@@ -534,28 +571,28 @@ static inline struct msm_gpu_state *msm_gpu_crashstate_get(struct msm_gpu *gpu)
{
struct msm_gpu_state *state = NULL;
- mutex_lock(&gpu->dev->struct_mutex);
+ mutex_lock(&gpu->lock);
if (gpu->crashstate) {
kref_get(&gpu->crashstate->ref);
state = gpu->crashstate;
}
- mutex_unlock(&gpu->dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
return state;
}
static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
{
- mutex_lock(&gpu->dev->struct_mutex);
+ mutex_lock(&gpu->lock);
if (gpu->crashstate) {
if (gpu->funcs->gpu_state_put(gpu->crashstate))
gpu->crashstate = NULL;
}
- mutex_unlock(&gpu->dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
}
/*
diff --git a/drivers/gpu/drm/msm/msm_gpu_devfreq.c b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
index 8b7473f69cb8..62405e980925 100644
--- a/drivers/gpu/drm/msm/msm_gpu_devfreq.c
+++ b/drivers/gpu/drm/msm/msm_gpu_devfreq.c
@@ -9,6 +9,7 @@
#include <linux/devfreq.h>
#include <linux/devfreq_cooling.h>
+#include <linux/units.h>
/*
* Power Management:
@@ -20,17 +21,11 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
struct msm_gpu *gpu = dev_to_gpu(dev);
struct dev_pm_opp *opp;
- opp = devfreq_recommended_opp(dev, freq, flags);
-
/*
- * If the GPU is idle, devfreq is not aware, so just ignore
- * it's requests
+ * Note that devfreq_recommended_opp() can modify the freq
+ * to something that actually is in the opp table:
*/
- if (gpu->devfreq.idle_freq) {
- gpu->devfreq.idle_freq = *freq;
- return 0;
- }
-
+ opp = devfreq_recommended_opp(dev, freq, flags);
if (IS_ERR(opp))
return PTR_ERR(opp);
@@ -48,9 +43,6 @@ static int msm_devfreq_target(struct device *dev, unsigned long *freq,
static unsigned long get_freq(struct msm_gpu *gpu)
{
- if (gpu->devfreq.idle_freq)
- return gpu->devfreq.idle_freq;
-
if (gpu->funcs->gpu_get_freq)
return gpu->funcs->gpu_get_freq(gpu);
@@ -88,6 +80,7 @@ static struct devfreq_dev_profile msm_devfreq_profile = {
.get_cur_freq = msm_devfreq_get_cur_freq,
};
+static void msm_devfreq_boost_work(struct kthread_work *work);
static void msm_devfreq_idle_work(struct kthread_work *work);
void msm_devfreq_init(struct msm_gpu *gpu)
@@ -98,6 +91,12 @@ void msm_devfreq_init(struct msm_gpu *gpu)
if (!gpu->funcs->gpu_busy)
return;
+ dev_pm_qos_add_request(&gpu->pdev->dev, &df->idle_freq,
+ DEV_PM_QOS_MAX_FREQUENCY,
+ PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
+ dev_pm_qos_add_request(&gpu->pdev->dev, &df->boost_freq,
+ DEV_PM_QOS_MIN_FREQUENCY, 0);
+
msm_devfreq_profile.initial_freq = gpu->fast_rate;
/*
@@ -128,13 +127,19 @@ void msm_devfreq_init(struct msm_gpu *gpu)
gpu->cooling = NULL;
}
+ msm_hrtimer_work_init(&df->boost_work, gpu->worker, msm_devfreq_boost_work,
+ CLOCK_MONOTONIC, HRTIMER_MODE_REL);
msm_hrtimer_work_init(&df->idle_work, gpu->worker, msm_devfreq_idle_work,
CLOCK_MONOTONIC, HRTIMER_MODE_REL);
}
void msm_devfreq_cleanup(struct msm_gpu *gpu)
{
+ struct msm_gpu_devfreq *df = &gpu->devfreq;
+
devfreq_cooling_unregister(gpu->cooling);
+ dev_pm_qos_remove_request(&df->boost_freq);
+ dev_pm_qos_remove_request(&df->idle_freq);
}
void msm_devfreq_resume(struct msm_gpu *gpu)
@@ -150,12 +155,40 @@ void msm_devfreq_suspend(struct msm_gpu *gpu)
devfreq_suspend_device(gpu->devfreq.devfreq);
}
+static void msm_devfreq_boost_work(struct kthread_work *work)
+{
+ struct msm_gpu_devfreq *df = container_of(work,
+ struct msm_gpu_devfreq, boost_work.work);
+
+ dev_pm_qos_update_request(&df->boost_freq, 0);
+}
+
+void msm_devfreq_boost(struct msm_gpu *gpu, unsigned factor)
+{
+ struct msm_gpu_devfreq *df = &gpu->devfreq;
+ uint64_t freq;
+
+ freq = get_freq(gpu);
+ freq *= factor;
+
+ /*
+ * A nice little trap is that PM QoS operates in terms of KHz,
+ * while devfreq operates in terms of Hz:
+ */
+ do_div(freq, HZ_PER_KHZ);
+
+ dev_pm_qos_update_request(&df->boost_freq, freq);
+
+ msm_hrtimer_queue_work(&df->boost_work,
+ ms_to_ktime(msm_devfreq_profile.polling_ms),
+ HRTIMER_MODE_REL);
+}
+
void msm_devfreq_active(struct msm_gpu *gpu)
{
struct msm_gpu_devfreq *df = &gpu->devfreq;
struct devfreq_dev_status status;
unsigned int idle_time;
- unsigned long target_freq = df->idle_freq;
if (!df->devfreq)
return;
@@ -165,12 +198,6 @@ void msm_devfreq_active(struct msm_gpu *gpu)
*/
hrtimer_cancel(&df->idle_work.timer);
- /*
- * Hold devfreq lock to synchronize with get_dev_status()/
- * target() callbacks
- */
- mutex_lock(&df->devfreq->lock);
-
idle_time = ktime_to_ms(ktime_sub(ktime_get(), df->idle_time));
/*
@@ -178,21 +205,18 @@ void msm_devfreq_active(struct msm_gpu *gpu)
* interval, then we won't meet the threshold of busyness for
* the governor to ramp up the freq.. so give some boost
*/
- if (idle_time > msm_devfreq_profile.polling_ms/2) {
- target_freq *= 2;
+ if (idle_time > msm_devfreq_profile.polling_ms) {
+ msm_devfreq_boost(gpu, 2);
}
- df->idle_freq = 0;
-
- msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0);
+ dev_pm_qos_update_request(&df->idle_freq,
+ PM_QOS_MAX_FREQUENCY_DEFAULT_VALUE);
/*
* Reset the polling interval so we aren't inconsistent
* about freq vs busy/total cycles
*/
msm_devfreq_get_dev_status(&gpu->pdev->dev, &status);
-
- mutex_unlock(&df->devfreq->lock);
}
@@ -201,32 +225,20 @@ static void msm_devfreq_idle_work(struct kthread_work *work)
struct msm_gpu_devfreq *df = container_of(work,
struct msm_gpu_devfreq, idle_work.work);
struct msm_gpu *gpu = container_of(df, struct msm_gpu, devfreq);
- unsigned long idle_freq, target_freq = 0;
-
- if (!df->devfreq)
- return;
-
- /*
- * Hold devfreq lock to synchronize with get_dev_status()/
- * target() callbacks
- */
- mutex_lock(&df->devfreq->lock);
-
- idle_freq = get_freq(gpu);
-
- if (gpu->clamp_to_idle)
- msm_devfreq_target(&gpu->pdev->dev, &target_freq, 0);
df->idle_time = ktime_get();
- df->idle_freq = idle_freq;
- mutex_unlock(&df->devfreq->lock);
+ if (gpu->clamp_to_idle)
+ dev_pm_qos_update_request(&df->idle_freq, 0);
}
void msm_devfreq_idle(struct msm_gpu *gpu)
{
struct msm_gpu_devfreq *df = &gpu->devfreq;
+ if (!df->devfreq)
+ return;
+
msm_hrtimer_queue_work(&df->idle_work, ms_to_ktime(1),
- HRTIMER_MODE_ABS);
+ HRTIMER_MODE_REL);
}
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 6a42b819abc4..8b132c8b1513 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -198,6 +198,9 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev);
struct msm_kms *mdp5_kms_init(struct drm_device *dev);
struct msm_kms *dpu_kms_init(struct drm_device *dev);
+extern const struct of_device_id dpu_dt_match[];
+extern const struct of_device_id mdp5_dt_match[];
+
struct msm_mdss_funcs {
int (*enable)(struct msm_mdss *mdss);
int (*disable)(struct msm_mdss *mdss);
diff --git a/drivers/gpu/drm/msm/msm_perf.c b/drivers/gpu/drm/msm/msm_perf.c
index 3a27153eef08..3d3da79fec2a 100644
--- a/drivers/gpu/drm/msm/msm_perf.c
+++ b/drivers/gpu/drm/msm/msm_perf.c
@@ -155,9 +155,12 @@ static int perf_open(struct inode *inode, struct file *file)
struct msm_gpu *gpu = priv->gpu;
int ret = 0;
- mutex_lock(&dev->struct_mutex);
+ if (!gpu)
+ return -ENODEV;
- if (perf->open || !gpu) {
+ mutex_lock(&gpu->lock);
+
+ if (perf->open) {
ret = -EBUSY;
goto out;
}
@@ -171,7 +174,7 @@ static int perf_open(struct inode *inode, struct file *file)
perf->next_jiffies = jiffies + SAMPLE_TIME;
out:
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index b55398a34fa4..81432ec07012 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -86,7 +86,7 @@ struct msm_rd_state {
struct msm_gem_submit *submit;
/* fifo access is synchronized on the producer side by
- * struct_mutex held by submit code (otherwise we could
+ * gpu->lock held by submit code (otherwise we could
* end up w/ cmds logged in different order than they
* were executed). And read_lock synchronizes the reads
*/
@@ -181,9 +181,12 @@ static int rd_open(struct inode *inode, struct file *file)
uint32_t gpu_id;
int ret = 0;
- mutex_lock(&dev->struct_mutex);
+ if (!gpu)
+ return -ENODEV;
- if (rd->open || !gpu) {
+ mutex_lock(&gpu->lock);
+
+ if (rd->open) {
ret = -EBUSY;
goto out;
}
@@ -200,7 +203,7 @@ static int rd_open(struct inode *inode, struct file *file)
rd_write_section(rd, RD_GPU_ID, &gpu_id, sizeof(gpu_id));
out:
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
return ret;
}
@@ -340,11 +343,10 @@ out_unlock:
msm_gem_unlock(&obj->base);
}
-/* called under struct_mutex */
+/* called under gpu->lock */
void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
const char *fmt, ...)
{
- struct drm_device *dev = submit->dev;
struct task_struct *task;
char msg[256];
int i, n;
@@ -355,7 +357,7 @@ void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit,
/* writing into fifo is serialized by caller, and
* rd->read_lock is used to serialize the reads
*/
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&submit->gpu->lock));
if (fmt) {
va_list args;
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 652b1dedd7c1..3bbf574c3bdc 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -21,11 +21,11 @@ static struct dma_fence *msm_job_run(struct drm_sched_job *job)
pm_runtime_get_sync(&gpu->pdev->dev);
/* TODO move submit path over to using a per-ring lock.. */
- mutex_lock(&gpu->dev->struct_mutex);
+ mutex_lock(&gpu->lock);
msm_gpu_submit(gpu, submit);
- mutex_unlock(&gpu->dev->struct_mutex);
+ mutex_unlock(&gpu->lock);
pm_runtime_put(&gpu->pdev->dev);
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 8b2ed4199284..30359e434c3f 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -1784,6 +1784,13 @@ drm_dp_tps3_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
}
static inline bool
+drm_dp_max_downspread(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
+{
+ return dpcd[DP_DPCD_REV] >= 0x11 ||
+ dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5;
+}
+
+static inline bool
drm_dp_tps4_supported(const u8 dpcd[DP_RECEIVER_CAP_SIZE])
{
return dpcd[DP_DPCD_REV] >= 0x14 &&