summaryrefslogtreecommitdiff
path: root/drivers/ufs/host/ufs-qcom.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/ufs/host/ufs-qcom.c')
-rw-r--r--drivers/ufs/host/ufs-qcom.c226
1 files changed, 109 insertions, 117 deletions
diff --git a/drivers/ufs/host/ufs-qcom.c b/drivers/ufs/host/ufs-qcom.c
index 9574fdc2bb0f..3e83dc51d538 100644
--- a/drivers/ufs/host/ufs-qcom.c
+++ b/drivers/ufs/host/ufs-qcom.c
@@ -38,6 +38,9 @@
#define DEEMPHASIS_3_5_dB 0x04
#define NO_DEEMPHASIS 0x0
+#define UFS_ICE_SYNC_RST_SEL BIT(3)
+#define UFS_ICE_SYNC_RST_SW BIT(4)
+
enum {
TSTBUS_UAWM,
TSTBUS_UARM,
@@ -494,12 +497,8 @@ static int ufs_qcom_power_up_sequence(struct ufs_hba *hba)
* If the HS-G5 PHY gear is used, update host_params->hs_rate to Rate-A,
* so that the subsequent power mode change shall stick to Rate-A.
*/
- if (host->hw_ver.major == 0x5) {
- if (host->phy_gear == UFS_HS_G5)
- host_params->hs_rate = PA_HS_MODE_A;
- else
- host_params->hs_rate = PA_HS_MODE_B;
- }
+ if (host->hw_ver.major == 0x5 && host->phy_gear == UFS_HS_G5)
+ host_params->hs_rate = PA_HS_MODE_A;
mode = host_params->hs_rate == PA_HS_MODE_B ? PHY_MODE_UFS_HS_B : PHY_MODE_UFS_HS_A;
@@ -751,11 +750,29 @@ static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
int err;
+ u32 reg_val;
err = ufs_qcom_enable_lane_clks(host);
if (err)
return err;
+ if ((!ufs_qcom_is_link_active(hba)) &&
+ host->hw_ver.major == 5 &&
+ host->hw_ver.minor == 0 &&
+ host->hw_ver.step == 0) {
+ ufshcd_writel(hba, UFS_ICE_SYNC_RST_SEL | UFS_ICE_SYNC_RST_SW, UFS_MEM_ICE_CFG);
+ reg_val = ufshcd_readl(hba, UFS_MEM_ICE_CFG);
+ reg_val &= ~(UFS_ICE_SYNC_RST_SEL | UFS_ICE_SYNC_RST_SW);
+ /*
+ * HW documentation doesn't recommend any delay between the
+ * reset set and clear. But we are enforcing an arbitrary delay
+ * to give flops enough time to settle in.
+ */
+ usleep_range(50, 100);
+ ufshcd_writel(hba, reg_val, UFS_MEM_ICE_CFG);
+ ufshcd_readl(hba, UFS_MEM_ICE_CFG);
+ }
+
return ufs_qcom_ice_resume(host);
}
@@ -1096,6 +1113,18 @@ static void ufs_qcom_set_phy_gear(struct ufs_qcom_host *host)
}
}
+static void ufs_qcom_parse_gear_limits(struct ufs_hba *hba)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct ufs_host_params *host_params = &host->host_params;
+ u32 hs_gear_old = host_params->hs_tx_gear;
+
+ ufshcd_parse_gear_limits(hba, host_params);
+ if (host_params->hs_tx_gear != hs_gear_old) {
+ host->phy_gear = host_params->hs_tx_gear;
+ }
+}
+
static void ufs_qcom_set_host_params(struct ufs_hba *hba)
{
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
@@ -1162,6 +1191,13 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
case PRE_CHANGE:
if (on) {
ufs_qcom_icc_update_bw(host);
+ if (ufs_qcom_is_link_hibern8(hba)) {
+ err = ufs_qcom_enable_lane_clks(host);
+ if (err) {
+ dev_err(hba->dev, "enable lane clks failed, ret=%d\n", err);
+ return err;
+ }
+ }
} else {
if (!ufs_qcom_is_link_active(hba)) {
/* disable device ref_clk */
@@ -1187,6 +1223,9 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
if (ufshcd_is_hs_mode(&hba->pwr_info))
ufs_qcom_dev_ref_clk_ctrl(host, true);
} else {
+ if (ufs_qcom_is_link_hibern8(hba))
+ ufs_qcom_disable_lane_clks(host);
+
ufs_qcom_icc_set_bw(host, ufs_qcom_bw_table[MODE_MIN][0][0].mem_bw,
ufs_qcom_bw_table[MODE_MIN][0][0].cfg_bw);
}
@@ -1337,6 +1376,7 @@ static int ufs_qcom_init(struct ufs_hba *hba)
ufs_qcom_advertise_quirks(hba);
ufs_qcom_set_host_params(hba);
ufs_qcom_set_phy_gear(host);
+ ufs_qcom_parse_gear_limits(hba);
err = ufs_qcom_ice_init(host);
if (err)
@@ -1742,7 +1782,7 @@ static void ufs_qcom_dump_testbus(struct ufs_hba *hba)
}
static int ufs_qcom_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
- const char *prefix, enum ufshcd_res id)
+ const char *prefix, void __iomem *base)
{
u32 *regs __free(kfree) = NULL;
size_t pos;
@@ -1755,7 +1795,7 @@ static int ufs_qcom_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
return -ENOMEM;
for (pos = 0; pos < len; pos += 4)
- regs[pos / 4] = readl(hba->res[id].base + offset + pos);
+ regs[pos / 4] = readl(base + offset + pos);
print_hex_dump(KERN_ERR, prefix,
len > 4 ? DUMP_PREFIX_OFFSET : DUMP_PREFIX_NONE,
@@ -1766,30 +1806,34 @@ static int ufs_qcom_dump_regs(struct ufs_hba *hba, size_t offset, size_t len,
static void ufs_qcom_dump_mcq_hci_regs(struct ufs_hba *hba)
{
+ struct ufshcd_mcq_opr_info_t *opr = &hba->mcq_opr[0];
+ void __iomem *mcq_vs_base = hba->mcq_base + UFS_MEM_VS_BASE;
+
struct dump_info {
+ void __iomem *base;
size_t offset;
size_t len;
const char *prefix;
- enum ufshcd_res id;
};
struct dump_info mcq_dumps[] = {
- {0x0, 256 * 4, "MCQ HCI-0 ", RES_MCQ},
- {0x400, 256 * 4, "MCQ HCI-1 ", RES_MCQ},
- {0x0, 5 * 4, "MCQ VS-0 ", RES_MCQ_VS},
- {0x0, 256 * 4, "MCQ SQD-0 ", RES_MCQ_SQD},
- {0x400, 256 * 4, "MCQ SQD-1 ", RES_MCQ_SQD},
- {0x800, 256 * 4, "MCQ SQD-2 ", RES_MCQ_SQD},
- {0xc00, 256 * 4, "MCQ SQD-3 ", RES_MCQ_SQD},
- {0x1000, 256 * 4, "MCQ SQD-4 ", RES_MCQ_SQD},
- {0x1400, 256 * 4, "MCQ SQD-5 ", RES_MCQ_SQD},
- {0x1800, 256 * 4, "MCQ SQD-6 ", RES_MCQ_SQD},
- {0x1c00, 256 * 4, "MCQ SQD-7 ", RES_MCQ_SQD},
+ {hba->mcq_base, 0x0, 256 * 4, "MCQ HCI-0 "},
+ {hba->mcq_base, 0x400, 256 * 4, "MCQ HCI-1 "},
+ {mcq_vs_base, 0x0, 5 * 4, "MCQ VS-0 "},
+ {opr->base, 0x0, 256 * 4, "MCQ SQD-0 "},
+ {opr->base, 0x400, 256 * 4, "MCQ SQD-1 "},
+ {opr->base, 0x800, 256 * 4, "MCQ SQD-2 "},
+ {opr->base, 0xc00, 256 * 4, "MCQ SQD-3 "},
+ {opr->base, 0x1000, 256 * 4, "MCQ SQD-4 "},
+ {opr->base, 0x1400, 256 * 4, "MCQ SQD-5 "},
+ {opr->base, 0x1800, 256 * 4, "MCQ SQD-6 "},
+ {opr->base, 0x1c00, 256 * 4, "MCQ SQD-7 "},
+
};
for (int i = 0; i < ARRAY_SIZE(mcq_dumps); i++) {
ufs_qcom_dump_regs(hba, mcq_dumps[i].offset, mcq_dumps[i].len,
- mcq_dumps[i].prefix, mcq_dumps[i].id);
+ mcq_dumps[i].prefix, mcq_dumps[i].base);
cond_resched();
}
}
@@ -1910,116 +1954,68 @@ static void ufs_qcom_config_scaling_param(struct ufs_hba *hba,
hba->clk_scaling.suspend_on_no_request = true;
}
-/* Resources */
-static const struct ufshcd_res_info ufs_res_info[RES_MAX] = {
- {.name = "ufs_mem",},
- {.name = "mcq",},
- /* Submission Queue DAO */
- {.name = "mcq_sqd",},
- /* Submission Queue Interrupt Status */
- {.name = "mcq_sqis",},
- /* Completion Queue DAO */
- {.name = "mcq_cqd",},
- /* Completion Queue Interrupt Status */
- {.name = "mcq_cqis",},
- /* MCQ vendor specific */
- {.name = "mcq_vs",},
-};
-
static int ufs_qcom_mcq_config_resource(struct ufs_hba *hba)
{
struct platform_device *pdev = to_platform_device(hba->dev);
- struct ufshcd_res_info *res;
- struct resource *res_mem, *res_mcq;
- int i, ret;
-
- memcpy(hba->res, ufs_res_info, sizeof(ufs_res_info));
-
- for (i = 0; i < RES_MAX; i++) {
- res = &hba->res[i];
- res->resource = platform_get_resource_byname(pdev,
- IORESOURCE_MEM,
- res->name);
- if (!res->resource) {
- dev_info(hba->dev, "Resource %s not provided\n", res->name);
- if (i == RES_UFS)
- return -ENODEV;
- continue;
- } else if (i == RES_UFS) {
- res_mem = res->resource;
- res->base = hba->mmio_base;
- continue;
- }
+ struct resource *res;
- res->base = devm_ioremap_resource(hba->dev, res->resource);
- if (IS_ERR(res->base)) {
- dev_err(hba->dev, "Failed to map res %s, err=%d\n",
- res->name, (int)PTR_ERR(res->base));
- ret = PTR_ERR(res->base);
- res->base = NULL;
- return ret;
- }
+ /* Map the MCQ configuration region */
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mcq");
+ if (!res) {
+ dev_err(hba->dev, "MCQ resource not found in device tree\n");
+ return -ENODEV;
}
- /* MCQ resource provided in DT */
- res = &hba->res[RES_MCQ];
- /* Bail if MCQ resource is provided */
- if (res->base)
- goto out;
-
- /* Explicitly allocate MCQ resource from ufs_mem */
- res_mcq = devm_kzalloc(hba->dev, sizeof(*res_mcq), GFP_KERNEL);
- if (!res_mcq)
- return -ENOMEM;
-
- res_mcq->start = res_mem->start +
- MCQ_SQATTR_OFFSET(hba->mcq_capabilities);
- res_mcq->end = res_mcq->start + hba->nr_hw_queues * MCQ_QCFG_SIZE - 1;
- res_mcq->flags = res_mem->flags;
- res_mcq->name = "mcq";
-
- ret = insert_resource(&iomem_resource, res_mcq);
- if (ret) {
- dev_err(hba->dev, "Failed to insert MCQ resource, err=%d\n",
- ret);
- return ret;
- }
-
- res->base = devm_ioremap_resource(hba->dev, res_mcq);
- if (IS_ERR(res->base)) {
- dev_err(hba->dev, "MCQ registers mapping failed, err=%d\n",
- (int)PTR_ERR(res->base));
- ret = PTR_ERR(res->base);
- goto ioremap_err;
+ hba->mcq_base = devm_ioremap_resource(hba->dev, res);
+ if (IS_ERR(hba->mcq_base)) {
+ dev_err(hba->dev, "Failed to map MCQ region: %ld\n",
+ PTR_ERR(hba->mcq_base));
+ return PTR_ERR(hba->mcq_base);
}
-out:
- hba->mcq_base = res->base;
return 0;
-ioremap_err:
- res->base = NULL;
- remove_resource(res_mcq);
- return ret;
}
static int ufs_qcom_op_runtime_config(struct ufs_hba *hba)
{
- struct ufshcd_res_info *mem_res, *sqdao_res;
struct ufshcd_mcq_opr_info_t *opr;
int i;
+ u32 doorbell_offsets[OPR_MAX];
- mem_res = &hba->res[RES_UFS];
- sqdao_res = &hba->res[RES_MCQ_SQD];
+ /*
+ * Configure doorbell address offsets in MCQ configuration registers.
+ * These values are offsets relative to mmio_base (UFS_HCI_BASE).
+ *
+ * Memory Layout:
+ * - mmio_base = UFS_HCI_BASE
+ * - mcq_base = MCQ_CONFIG_BASE = mmio_base + (UFS_QCOM_MCQCAP_QCFGPTR * 0x200)
+ * - Doorbell registers are at: mmio_base + (UFS_QCOM_MCQCAP_QCFGPTR * 0x200) +
+ * - UFS_QCOM_MCQ_SQD_OFFSET
+ * - Which is also: mcq_base + UFS_QCOM_MCQ_SQD_OFFSET
+ */
- if (!mem_res->base || !sqdao_res->base)
- return -EINVAL;
+ doorbell_offsets[OPR_SQD] = UFS_QCOM_SQD_ADDR_OFFSET;
+ doorbell_offsets[OPR_SQIS] = UFS_QCOM_SQIS_ADDR_OFFSET;
+ doorbell_offsets[OPR_CQD] = UFS_QCOM_CQD_ADDR_OFFSET;
+ doorbell_offsets[OPR_CQIS] = UFS_QCOM_CQIS_ADDR_OFFSET;
+ /*
+ * Configure MCQ operation registers.
+ *
+ * The doorbell registers are physically located within the MCQ region:
+ * - doorbell_physical_addr = mmio_base + doorbell_offset
+ * - doorbell_physical_addr = mcq_base + (doorbell_offset - MCQ_CONFIG_OFFSET)
+ */
for (i = 0; i < OPR_MAX; i++) {
opr = &hba->mcq_opr[i];
- opr->offset = sqdao_res->resource->start -
- mem_res->resource->start + 0x40 * i;
- opr->stride = 0x100;
- opr->base = sqdao_res->base + 0x40 * i;
+ opr->offset = doorbell_offsets[i]; /* Offset relative to mmio_base */
+ opr->stride = UFS_QCOM_MCQ_STRIDE; /* 256 bytes between queues */
+
+ /*
+ * Calculate the actual doorbell base address within MCQ region:
+ * base = mcq_base + (doorbell_offset - MCQ_CONFIG_OFFSET)
+ */
+ opr->base = hba->mcq_base + (opr->offset - UFS_QCOM_MCQ_CONFIG_OFFSET);
}
return 0;
@@ -2034,12 +2030,8 @@ static int ufs_qcom_get_hba_mac(struct ufs_hba *hba)
static int ufs_qcom_get_outstanding_cqs(struct ufs_hba *hba,
unsigned long *ocqs)
{
- struct ufshcd_res_info *mcq_vs_res = &hba->res[RES_MCQ_VS];
-
- if (!mcq_vs_res->base)
- return -EINVAL;
-
- *ocqs = readl(mcq_vs_res->base + UFS_MEM_CQIS_VS);
+ /* Read from MCQ vendor-specific register in MCQ region */
+ *ocqs = readl(hba->mcq_base + UFS_MEM_CQIS_VS);
return 0;
}