diff options
Diffstat (limited to 'drivers/ufs/host/ufs-mediatek.c')
-rw-r--r-- | drivers/ufs/host/ufs-mediatek.c | 354 |
1 files changed, 296 insertions, 58 deletions
diff --git a/drivers/ufs/host/ufs-mediatek.c b/drivers/ufs/host/ufs-mediatek.c index 86ae73b89d4d..758a393a9de1 100644 --- a/drivers/ufs/host/ufs-mediatek.c +++ b/drivers/ufs/host/ufs-mediatek.c @@ -29,6 +29,7 @@ #include "ufs-mediatek-sip.h" static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq); +static void _ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up); #define CREATE_TRACE_POINTS #include "ufs-mediatek-trace.h" @@ -415,7 +416,7 @@ static void ufs_mtk_dbg_sel(struct ufs_hba *hba) } } -static void ufs_mtk_wait_idle_state(struct ufs_hba *hba, +static int ufs_mtk_wait_idle_state(struct ufs_hba *hba, unsigned long retry_ms) { u64 timeout, time_checked; @@ -451,8 +452,12 @@ static void ufs_mtk_wait_idle_state(struct ufs_hba *hba, break; } while (time_checked < timeout); - if (wait_idle && sm != VS_HCE_BASE) + if (wait_idle && sm != VS_HCE_BASE) { dev_info(hba->dev, "wait idle tmo: 0x%x\n", val); + return -ETIMEDOUT; + } + + return 0; } static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state, @@ -798,8 +803,14 @@ static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on, clk_pwr_off = true; } - if (clk_pwr_off) + if (clk_pwr_off) { ufs_mtk_pwr_ctrl(hba, false); + } else { + dev_warn(hba->dev, "Clock is not turned off, hba->ahit = 0x%x, AHIT = 0x%x\n", + hba->ahit, + ufshcd_readl(hba, + REG_AUTO_HIBERNATE_IDLE_TIMER)); + } ufs_mtk_mcq_disable_irq(hba); } else if (on && status == POST_CHANGE) { ufs_mtk_pwr_ctrl(hba, true); @@ -818,7 +829,7 @@ static u32 ufs_mtk_mcq_get_irq(struct ufs_hba *hba, unsigned int cpu) unsigned int q_index; q_index = map->mq_map[cpu]; - if (q_index > nr) { + if (q_index >= nr) { dev_err(hba->dev, "hwq index %d exceed %d\n", q_index, nr); return MTK_MCQ_INVALID_IRQ; @@ -1018,7 +1029,7 @@ static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba) struct arm_smccc_res res; int err, ver; - if (hba->vreg_info.vcc) + if (info->vcc) return 0; if (of_property_read_bool(np, "mediatek,ufs-vcc-by-num")) { @@ -1075,6 +1086,80 @@ static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba) } } +static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba) +{ + unsigned long flags; + u32 ah_ms = 10; + u32 ah_scale, ah_timer; + u32 scale_us[] = {1, 10, 100, 1000, 10000, 100000}; + + if (ufshcd_is_clkgating_allowed(hba)) { + if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit) { + ah_scale = FIELD_GET(UFSHCI_AHIBERN8_SCALE_MASK, + hba->ahit); + ah_timer = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, + hba->ahit); + if (ah_scale <= 5) + ah_ms = ah_timer * scale_us[ah_scale] / 1000; + } + + spin_lock_irqsave(hba->host->host_lock, flags); + hba->clk_gating.delay_ms = max(ah_ms, 10U); + spin_unlock_irqrestore(hba->host->host_lock, flags); + } +} + +/* Convert microseconds to Auto-Hibernate Idle Timer register value */ +static u32 ufs_mtk_us_to_ahit(unsigned int timer) +{ + unsigned int scale; + + for (scale = 0; timer > UFSHCI_AHIBERN8_TIMER_MASK; ++scale) + timer /= UFSHCI_AHIBERN8_SCALE_FACTOR; + + return FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, timer) | + FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, scale); +} + +static void ufs_mtk_fix_ahit(struct ufs_hba *hba) +{ + unsigned int us; + + if (ufshcd_is_auto_hibern8_supported(hba)) { + switch (hba->dev_info.wmanufacturerid) { + case UFS_VENDOR_SAMSUNG: + /* configure auto-hibern8 timer to 3.5 ms */ + us = 3500; + break; + + case UFS_VENDOR_MICRON: + /* configure auto-hibern8 timer to 2 ms */ + us = 2000; + break; + + default: + /* configure auto-hibern8 timer to 1 ms */ + us = 1000; + break; + } + + hba->ahit = ufs_mtk_us_to_ahit(us); + } + + ufs_mtk_setup_clk_gating(hba); +} + +static void ufs_mtk_fix_clock_scaling(struct ufs_hba *hba) +{ + /* UFS version is below 4.0, clock scaling is not necessary */ + if ((hba->dev_info.wspecversion < 0x0400) && + ufs_mtk_is_clk_scale_ready(hba)) { + hba->caps &= ~UFSHCD_CAP_CLK_SCALING; + + _ufs_mtk_clk_scale(hba, false); + } +} + static void ufs_mtk_init_mcq_irq(struct ufs_hba *hba) { struct ufs_mtk_host *host = ufshcd_get_variant(hba); @@ -1240,6 +1325,10 @@ static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba, dev_req_params->gear_rx < UFS_HS_G4) return false; + if (dev_req_params->pwr_tx == SLOW_MODE || + dev_req_params->pwr_rx == SLOW_MODE) + return false; + return true; } @@ -1255,6 +1344,10 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba, host_params.hs_rx_gear = UFS_HS_G5; host_params.hs_tx_gear = UFS_HS_G5; + if (dev_max_params->pwr_rx == SLOW_MODE || + dev_max_params->pwr_tx == SLOW_MODE) + host_params.desired_working_mode = UFS_PWM_MODE; + ret = ufshcd_negotiate_pwr_params(&host_params, dev_max_params, dev_req_params); if (ret) { pr_info("%s: failed to determine capabilities\n", @@ -1278,6 +1371,28 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba, ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE), PA_NO_ADAPT); + if (!(hba->quirks & UFSHCD_QUIRK_SKIP_DEF_UNIPRO_TIMEOUT_SETTING)) { + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA0), + DL_FC0ProtectionTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA1), + DL_TC0ReplayTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA2), + DL_AFC0ReqTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA3), + DL_FC1ProtectionTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA4), + DL_TC1ReplayTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(PA_PWRMODEUSERDATA5), + DL_AFC1ReqTimeOutVal_Default); + + ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalFC0ProtectionTimeOutVal), + DL_FC0ProtectionTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalTC0ReplayTimeOutVal), + DL_TC0ReplayTimeOutVal_Default); + ufshcd_dme_set(hba, UIC_ARG_MIB(DME_LocalAFC0ReqTimeOutVal), + DL_AFC0ReqTimeOutVal_Default); + } + ret = ufshcd_uic_change_pwr_mode(hba, FASTAUTO_MODE << 4 | FASTAUTO_MODE); @@ -1287,10 +1402,59 @@ static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba, } } - if (host->hw_ver.major >= 3) { + /* if already configured to the requested pwr_mode, skip adapt */ + if (dev_req_params->gear_rx == hba->pwr_info.gear_rx && + dev_req_params->gear_tx == hba->pwr_info.gear_tx && + dev_req_params->lane_rx == hba->pwr_info.lane_rx && + dev_req_params->lane_tx == hba->pwr_info.lane_tx && + dev_req_params->pwr_rx == hba->pwr_info.pwr_rx && + dev_req_params->pwr_tx == hba->pwr_info.pwr_tx && + dev_req_params->hs_rate == hba->pwr_info.hs_rate) { + return ret; + } + + if (dev_req_params->pwr_rx == FAST_MODE || + dev_req_params->pwr_rx == FASTAUTO_MODE) { + if (host->hw_ver.major >= 3) { + ret = ufshcd_dme_configure_adapt(hba, + dev_req_params->gear_tx, + PA_INITIAL_ADAPT); + } else { + ret = ufshcd_dme_configure_adapt(hba, + dev_req_params->gear_tx, + PA_NO_ADAPT); + } + } else { ret = ufshcd_dme_configure_adapt(hba, - dev_req_params->gear_tx, - PA_INITIAL_ADAPT); + dev_req_params->gear_tx, + PA_NO_ADAPT); + } + + return ret; +} + +static int ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba) +{ + int ret; + + /* disable auto-hibern8 */ + ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER); + + /* wait host return to idle state when auto-hibern8 off */ + ret = ufs_mtk_wait_idle_state(hba, 5); + if (ret) + goto out; + + ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100); + +out: + if (ret) { + dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret); + + ufshcd_force_error_recovery(hba); + + /* trigger error handler and break suspend */ + ret = -EBUSY; } return ret; @@ -1302,13 +1466,20 @@ static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba, struct ufs_pa_layer_attr *dev_req_params) { int ret = 0; + static u32 reg; switch (stage) { case PRE_CHANGE: + if (ufshcd_is_auto_hibern8_supported(hba)) { + reg = ufshcd_readl(hba, REG_AUTO_HIBERNATE_IDLE_TIMER); + ufs_mtk_auto_hibern8_disable(hba); + } ret = ufs_mtk_pre_pwr_change(hba, dev_max_params, dev_req_params); break; case POST_CHANGE: + if (ufshcd_is_auto_hibern8_supported(hba)) + ufshcd_writel(hba, reg, REG_AUTO_HIBERNATE_IDLE_TIMER); break; default: ret = -EINVAL; @@ -1342,6 +1513,7 @@ static int ufs_mtk_pre_link(struct ufs_hba *hba) { int ret; u32 tmp; + struct ufs_mtk_host *host = ufshcd_get_variant(hba); ufs_mtk_get_controller_version(hba); @@ -1367,34 +1539,33 @@ static int ufs_mtk_pre_link(struct ufs_hba *hba) ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp); + /* Enable the 1144 functions setting */ + if (host->ip_ver == IP_VER_MT6989) { + ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_DEBUGOMC), &tmp); + if (ret) + return ret; + + tmp |= 0x10; + ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), tmp); + } + return ret; } -static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba) +static void ufs_mtk_post_link(struct ufs_hba *hba) { - u32 ah_ms; + struct ufs_mtk_host *host = ufshcd_get_variant(hba); + u32 tmp; - if (ufshcd_is_clkgating_allowed(hba)) { - if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit) - ah_ms = FIELD_GET(UFSHCI_AHIBERN8_TIMER_MASK, - hba->ahit); - else - ah_ms = 10; - ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5); + /* fix device PA_INIT no adapt */ + if (host->ip_ver >= IP_VER_MT6899) { + ufshcd_dme_get(hba, UIC_ARG_MIB(VS_DEBUGOMC), &tmp); + tmp |= 0x100; + ufshcd_dme_set(hba, UIC_ARG_MIB(VS_DEBUGOMC), tmp); } -} -static void ufs_mtk_post_link(struct ufs_hba *hba) -{ /* enable unipro clock gating feature */ ufs_mtk_cfg_unipro_cg(hba, true); - - /* will be configured during probe hba */ - if (ufshcd_is_auto_hibern8_supported(hba)) - hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) | - FIELD_PREP(UFSHCI_AHIBERN8_SCALE_MASK, 3); - - ufs_mtk_setup_clk_gating(hba); } static int ufs_mtk_link_startup_notify(struct ufs_hba *hba, @@ -1421,11 +1592,11 @@ static int ufs_mtk_device_reset(struct ufs_hba *hba) { struct arm_smccc_res res; - /* disable hba before device reset */ - ufshcd_hba_stop(hba); - ufs_mtk_device_reset_ctrl(0, res); + /* disable hba in middle of device reset */ + ufshcd_hba_stop(hba); + /* * The reset signal is active low. UFS devices shall detect * more than or equal to 1us of positive or negative RST_n @@ -1462,7 +1633,11 @@ static int ufs_mtk_link_set_hpm(struct ufs_hba *hba) return err; /* Check link state to make sure exit h8 success */ - ufs_mtk_wait_idle_state(hba, 5); + err = ufs_mtk_wait_idle_state(hba, 5); + if (err) { + dev_warn(hba->dev, "wait idle fail, err=%d\n", err); + return err; + } err = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100); if (err) { dev_warn(hba->dev, "exit h8 state fail, err=%d\n", err); @@ -1507,6 +1682,9 @@ static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm) { struct ufs_vreg *vccqx = NULL; + if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2) + return; + if (hba->vreg_info.vccq) vccqx = hba->vreg_info.vccq; else @@ -1561,21 +1739,6 @@ static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm) } } -static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba) -{ - int ret; - - /* disable auto-hibern8 */ - ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER); - - /* wait host return to idle state when auto-hibern8 off */ - ufs_mtk_wait_idle_state(hba, 5); - - ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100); - if (ret) - dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret); -} - static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, enum ufs_notify_change_status status) { @@ -1584,7 +1747,7 @@ static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, if (status == PRE_CHANGE) { if (ufshcd_is_auto_hibern8_supported(hba)) - ufs_mtk_auto_hibern8_disable(hba); + return ufs_mtk_auto_hibern8_disable(hba); return 0; } @@ -1642,8 +1805,21 @@ static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) } return 0; + fail: - return ufshcd_link_recovery(hba); + /* + * Check if the platform (parent) device has resumed, and ensure that + * power, clock, and MTCMOS are all turned on. + */ + err = ufshcd_link_recovery(hba); + if (err) { + dev_err(hba->dev, "Device PM: req=%d, status:%d, err:%d\n", + hba->dev->power.request, + hba->dev->power.runtime_status, + hba->dev->power.runtime_error); + } + + return 0; /* Cannot return a failure, otherwise, the I/O will hang. */ } static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba) @@ -1726,6 +1902,8 @@ static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba) ufs_mtk_vreg_fix_vcc(hba); ufs_mtk_vreg_fix_vccqx(hba); + ufs_mtk_fix_ahit(hba); + ufs_mtk_fix_clock_scaling(hba); } static void ufs_mtk_event_notify(struct ufs_hba *hba, @@ -2012,6 +2190,7 @@ static int ufs_mtk_config_mcq_irq(struct ufs_hba *hba) return ret; } } + host->is_mcq_intr_enabled = true; return 0; } @@ -2095,10 +2274,12 @@ static const struct ufs_hba_variant_ops ufs_hba_mtk_vops = { static int ufs_mtk_probe(struct platform_device *pdev) { int err; - struct device *dev = &pdev->dev; - struct device_node *reset_node; - struct platform_device *reset_pdev; + struct device *dev = &pdev->dev, *phy_dev = NULL; + struct device_node *reset_node, *phy_node = NULL; + struct platform_device *reset_pdev, *phy_pdev = NULL; struct device_link *link; + struct ufs_hba *hba; + struct ufs_mtk_host *host; reset_node = of_find_compatible_node(NULL, NULL, "ti,syscon-reset"); @@ -2125,13 +2306,51 @@ static int ufs_mtk_probe(struct platform_device *pdev) } skip_reset: + /* find phy node */ + phy_node = of_parse_phandle(dev->of_node, "phys", 0); + + if (phy_node) { + phy_pdev = of_find_device_by_node(phy_node); + if (!phy_pdev) + goto skip_phy; + phy_dev = &phy_pdev->dev; + + pm_runtime_set_active(phy_dev); + pm_runtime_enable(phy_dev); + pm_runtime_get_sync(phy_dev); + + put_device(phy_dev); + dev_info(dev, "phys node found\n"); + } else { + dev_notice(dev, "phys node not found\n"); + } + +skip_phy: /* perform generic probe */ err = ufshcd_pltfrm_init(pdev, &ufs_hba_mtk_vops); - -out: - if (err) + if (err) { dev_err(dev, "probe failed %d\n", err); + goto out; + } + + hba = platform_get_drvdata(pdev); + if (!hba) + goto out; + + if (phy_node && phy_dev) { + host = ufshcd_get_variant(hba); + host->phy_dev = phy_dev; + } + + /* + * Because the default power setting of VSx (the upper layer of + * VCCQ/VCCQ2) is HWLP, we need to prevent VCCQ/VCCQ2 from + * entering LPM. + */ + ufs_mtk_dev_vreg_set_lpm(hba, false); +out: + of_node_put(phy_node); of_node_put(reset_node); return err; } @@ -2156,27 +2375,38 @@ static int ufs_mtk_system_suspend(struct device *dev) ret = ufshcd_system_suspend(dev); if (ret) - return ret; + goto out; + + if (pm_runtime_suspended(hba->dev)) + goto out; ufs_mtk_dev_vreg_set_lpm(hba, true); if (ufs_mtk_is_rtff_mtcmos(hba)) ufs_mtk_mtcmos_ctrl(false, res); - return 0; +out: + return ret; } static int ufs_mtk_system_resume(struct device *dev) { + int ret = 0; struct ufs_hba *hba = dev_get_drvdata(dev); struct arm_smccc_res res; - ufs_mtk_dev_vreg_set_lpm(hba, false); + if (pm_runtime_suspended(hba->dev)) + goto out; if (ufs_mtk_is_rtff_mtcmos(hba)) ufs_mtk_mtcmos_ctrl(true, res); - return ufshcd_system_resume(dev); + ufs_mtk_dev_vreg_set_lpm(hba, false); + +out: + ret = ufshcd_system_resume(dev); + + return ret; } #endif @@ -2184,6 +2414,7 @@ static int ufs_mtk_system_resume(struct device *dev) static int ufs_mtk_runtime_suspend(struct device *dev) { struct ufs_hba *hba = dev_get_drvdata(dev); + struct ufs_mtk_host *host = ufshcd_get_variant(hba); struct arm_smccc_res res; int ret = 0; @@ -2196,17 +2427,24 @@ static int ufs_mtk_runtime_suspend(struct device *dev) if (ufs_mtk_is_rtff_mtcmos(hba)) ufs_mtk_mtcmos_ctrl(false, res); + if (host->phy_dev) + pm_runtime_put_sync(host->phy_dev); + return 0; } static int ufs_mtk_runtime_resume(struct device *dev) { struct ufs_hba *hba = dev_get_drvdata(dev); + struct ufs_mtk_host *host = ufshcd_get_variant(hba); struct arm_smccc_res res; if (ufs_mtk_is_rtff_mtcmos(hba)) ufs_mtk_mtcmos_ctrl(true, res); + if (host->phy_dev) + pm_runtime_get_sync(host->phy_dev); + ufs_mtk_dev_vreg_set_lpm(hba, false); return ufshcd_runtime_resume(dev); |