Lines Matching full:hba
31 static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq);
95 static bool ufs_mtk_is_boost_crypt_enabled(struct ufs_hba *hba) in ufs_mtk_is_boost_crypt_enabled() argument
97 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_is_boost_crypt_enabled()
102 static bool ufs_mtk_is_va09_supported(struct ufs_hba *hba) in ufs_mtk_is_va09_supported() argument
104 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_is_va09_supported()
109 static bool ufs_mtk_is_broken_vcc(struct ufs_hba *hba) in ufs_mtk_is_broken_vcc() argument
111 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_is_broken_vcc()
116 static bool ufs_mtk_is_pmc_via_fastauto(struct ufs_hba *hba) in ufs_mtk_is_pmc_via_fastauto() argument
118 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_is_pmc_via_fastauto()
123 static bool ufs_mtk_is_tx_skew_fix(struct ufs_hba *hba) in ufs_mtk_is_tx_skew_fix() argument
125 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_is_tx_skew_fix()
130 static bool ufs_mtk_is_rtff_mtcmos(struct ufs_hba *hba) in ufs_mtk_is_rtff_mtcmos() argument
132 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_is_rtff_mtcmos()
137 static bool ufs_mtk_is_allow_vccqx_lpm(struct ufs_hba *hba) in ufs_mtk_is_allow_vccqx_lpm() argument
139 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_is_allow_vccqx_lpm()
144 static void ufs_mtk_cfg_unipro_cg(struct ufs_hba *hba, bool enable) in ufs_mtk_cfg_unipro_cg() argument
149 ufshcd_dme_get(hba, in ufs_mtk_cfg_unipro_cg()
155 ufshcd_dme_set(hba, in ufs_mtk_cfg_unipro_cg()
158 ufshcd_dme_get(hba, in ufs_mtk_cfg_unipro_cg()
161 ufshcd_dme_set(hba, in ufs_mtk_cfg_unipro_cg()
164 ufshcd_dme_get(hba, in ufs_mtk_cfg_unipro_cg()
169 ufshcd_dme_set(hba, in ufs_mtk_cfg_unipro_cg()
172 ufshcd_dme_get(hba, in ufs_mtk_cfg_unipro_cg()
175 ufshcd_dme_set(hba, in ufs_mtk_cfg_unipro_cg()
180 static void ufs_mtk_crypto_enable(struct ufs_hba *hba) in ufs_mtk_crypto_enable() argument
186 dev_info(hba->dev, "%s: crypto enable failed, err: %lu\n", in ufs_mtk_crypto_enable()
188 hba->caps &= ~UFSHCD_CAP_CRYPTO; in ufs_mtk_crypto_enable()
192 static void ufs_mtk_host_reset(struct ufs_hba *hba) in ufs_mtk_host_reset() argument
194 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_host_reset()
214 static void ufs_mtk_init_reset_control(struct ufs_hba *hba, in ufs_mtk_init_reset_control() argument
218 *rc = devm_reset_control_get(hba->dev, str); in ufs_mtk_init_reset_control()
220 dev_info(hba->dev, "Failed to get reset control %s: %ld\n", in ufs_mtk_init_reset_control()
226 static void ufs_mtk_init_reset(struct ufs_hba *hba) in ufs_mtk_init_reset() argument
228 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_init_reset()
230 ufs_mtk_init_reset_control(hba, &host->hci_reset, in ufs_mtk_init_reset()
232 ufs_mtk_init_reset_control(hba, &host->unipro_reset, in ufs_mtk_init_reset()
234 ufs_mtk_init_reset_control(hba, &host->crypto_reset, in ufs_mtk_init_reset()
236 ufs_mtk_init_reset_control(hba, &host->mphy_reset, in ufs_mtk_init_reset()
240 static int ufs_mtk_hce_enable_notify(struct ufs_hba *hba, in ufs_mtk_hce_enable_notify() argument
243 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_hce_enable_notify()
247 hba->vps->hba_enable_delay_us = 0; in ufs_mtk_hce_enable_notify()
249 hba->vps->hba_enable_delay_us = 600; in ufs_mtk_hce_enable_notify()
250 ufs_mtk_host_reset(hba); in ufs_mtk_hce_enable_notify()
253 if (hba->caps & UFSHCD_CAP_CRYPTO) in ufs_mtk_hce_enable_notify()
254 ufs_mtk_crypto_enable(hba); in ufs_mtk_hce_enable_notify()
257 ufshcd_writel(hba, 0, in ufs_mtk_hce_enable_notify()
259 hba->capabilities &= ~MASK_AUTO_HIBERN8_SUPPORT; in ufs_mtk_hce_enable_notify()
260 hba->ahit = 0; in ufs_mtk_hce_enable_notify()
267 ufshcd_writel(hba, in ufs_mtk_hce_enable_notify()
268 ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) | 0x80, in ufs_mtk_hce_enable_notify()
275 static int ufs_mtk_bind_mphy(struct ufs_hba *hba) in ufs_mtk_bind_mphy() argument
277 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_bind_mphy()
278 struct device *dev = hba->dev; in ufs_mtk_bind_mphy()
313 static int ufs_mtk_setup_ref_clk(struct ufs_hba *hba, bool on) in ufs_mtk_setup_ref_clk() argument
315 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_setup_ref_clk()
326 ufshcd_writel(hba, REFCLK_REQUEST, REG_UFS_REFCLK_CTRL); in ufs_mtk_setup_ref_clk()
329 ufshcd_writel(hba, REFCLK_RELEASE, REG_UFS_REFCLK_CTRL); in ufs_mtk_setup_ref_clk()
336 value = ufshcd_readl(hba, REG_UFS_REFCLK_CTRL); in ufs_mtk_setup_ref_clk()
345 dev_err(hba->dev, "missing ack of refclk req, reg: 0x%x\n", value); in ufs_mtk_setup_ref_clk()
361 static void ufs_mtk_setup_ref_clk_wait_us(struct ufs_hba *hba, in ufs_mtk_setup_ref_clk_wait_us() argument
364 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_setup_ref_clk_wait_us()
366 if (hba->dev_info.clk_gating_wait_us) { in ufs_mtk_setup_ref_clk_wait_us()
368 hba->dev_info.clk_gating_wait_us; in ufs_mtk_setup_ref_clk_wait_us()
376 static void ufs_mtk_dbg_sel(struct ufs_hba *hba) in ufs_mtk_dbg_sel() argument
378 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_dbg_sel()
381 ufshcd_writel(hba, 0x820820, REG_UFS_DEBUG_SEL); in ufs_mtk_dbg_sel()
382 ufshcd_writel(hba, 0x0, REG_UFS_DEBUG_SEL_B0); in ufs_mtk_dbg_sel()
383 ufshcd_writel(hba, 0x55555555, REG_UFS_DEBUG_SEL_B1); in ufs_mtk_dbg_sel()
384 ufshcd_writel(hba, 0xaaaaaaaa, REG_UFS_DEBUG_SEL_B2); in ufs_mtk_dbg_sel()
385 ufshcd_writel(hba, 0xffffffff, REG_UFS_DEBUG_SEL_B3); in ufs_mtk_dbg_sel()
387 ufshcd_writel(hba, 0x20, REG_UFS_DEBUG_SEL); in ufs_mtk_dbg_sel()
391 static void ufs_mtk_wait_idle_state(struct ufs_hba *hba, in ufs_mtk_wait_idle_state() argument
407 ufs_mtk_dbg_sel(hba); in ufs_mtk_wait_idle_state()
408 val = ufshcd_readl(hba, REG_UFS_PROBE); in ufs_mtk_wait_idle_state()
428 dev_info(hba->dev, "wait idle tmo: 0x%x\n", val); in ufs_mtk_wait_idle_state()
431 static int ufs_mtk_wait_link_state(struct ufs_hba *hba, u32 state, in ufs_mtk_wait_link_state() argument
440 ufs_mtk_dbg_sel(hba); in ufs_mtk_wait_link_state()
441 val = ufshcd_readl(hba, REG_UFS_PROBE); in ufs_mtk_wait_link_state()
454 static int ufs_mtk_mphy_power_on(struct ufs_hba *hba, bool on) in ufs_mtk_mphy_power_on() argument
456 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_mphy_power_on()
465 if (ufs_mtk_is_va09_supported(hba)) { in ufs_mtk_mphy_power_on()
476 if (ufs_mtk_is_va09_supported(hba)) { in ufs_mtk_mphy_power_on()
483 dev_info(hba->dev, in ufs_mtk_mphy_power_on()
509 static void ufs_mtk_boost_crypt(struct ufs_hba *hba, bool boost) in ufs_mtk_boost_crypt() argument
511 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_boost_crypt()
516 if (!ufs_mtk_is_boost_crypt_enabled(hba)) in ufs_mtk_boost_crypt()
525 dev_info(hba->dev, "clk_prepare_enable(): %d\n", in ufs_mtk_boost_crypt()
533 dev_info(hba->dev, in ufs_mtk_boost_crypt()
541 dev_info(hba->dev, in ufs_mtk_boost_crypt()
550 dev_info(hba->dev, in ufs_mtk_boost_crypt()
557 dev_info(hba->dev, in ufs_mtk_boost_crypt()
565 static int ufs_mtk_init_host_clk(struct ufs_hba *hba, const char *name, in ufs_mtk_init_host_clk() argument
570 ret = ufs_mtk_get_host_clk(hba->dev, name, clk); in ufs_mtk_init_host_clk()
572 dev_info(hba->dev, "%s: failed to get %s: %d", __func__, in ufs_mtk_init_host_clk()
579 static void ufs_mtk_init_boost_crypt(struct ufs_hba *hba) in ufs_mtk_init_boost_crypt() argument
581 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_init_boost_crypt()
583 struct device *dev = hba->dev; in ufs_mtk_init_boost_crypt()
606 if (ufs_mtk_init_host_clk(hba, "crypt_mux", in ufs_mtk_init_boost_crypt()
610 if (ufs_mtk_init_host_clk(hba, "crypt_lp", in ufs_mtk_init_boost_crypt()
614 if (ufs_mtk_init_host_clk(hba, "crypt_perf", in ufs_mtk_init_boost_crypt()
626 static void ufs_mtk_init_va09_pwr_ctrl(struct ufs_hba *hba) in ufs_mtk_init_va09_pwr_ctrl() argument
628 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_init_va09_pwr_ctrl()
630 host->reg_va09 = regulator_get(hba->dev, "va09"); in ufs_mtk_init_va09_pwr_ctrl()
632 dev_info(hba->dev, "failed to get va09"); in ufs_mtk_init_va09_pwr_ctrl()
637 static void ufs_mtk_init_host_caps(struct ufs_hba *hba) in ufs_mtk_init_host_caps() argument
639 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_init_host_caps()
640 struct device_node *np = hba->dev->of_node; in ufs_mtk_init_host_caps()
643 ufs_mtk_init_boost_crypt(hba); in ufs_mtk_init_host_caps()
646 ufs_mtk_init_va09_pwr_ctrl(hba); in ufs_mtk_init_host_caps()
666 dev_info(hba->dev, "caps: 0x%x", host->caps); in ufs_mtk_init_host_caps()
669 static void ufs_mtk_scale_perf(struct ufs_hba *hba, bool scale_up) in ufs_mtk_scale_perf() argument
671 ufs_mtk_boost_crypt(hba, scale_up); in ufs_mtk_scale_perf()
674 static void ufs_mtk_pwr_ctrl(struct ufs_hba *hba, bool on) in ufs_mtk_pwr_ctrl() argument
676 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_pwr_ctrl()
680 ufs_mtk_setup_ref_clk(hba, on); in ufs_mtk_pwr_ctrl()
681 if (!ufshcd_is_clkscaling_supported(hba)) in ufs_mtk_pwr_ctrl()
682 ufs_mtk_scale_perf(hba, on); in ufs_mtk_pwr_ctrl()
684 if (!ufshcd_is_clkscaling_supported(hba)) in ufs_mtk_pwr_ctrl()
685 ufs_mtk_scale_perf(hba, on); in ufs_mtk_pwr_ctrl()
686 ufs_mtk_setup_ref_clk(hba, on); in ufs_mtk_pwr_ctrl()
691 static void ufs_mtk_mcq_disable_irq(struct ufs_hba *hba) in ufs_mtk_mcq_disable_irq() argument
693 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_mcq_disable_irq()
696 if (!hba->mcq_enabled) in ufs_mtk_mcq_disable_irq()
709 static void ufs_mtk_mcq_enable_irq(struct ufs_hba *hba) in ufs_mtk_mcq_enable_irq() argument
711 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_mcq_enable_irq()
714 if (!hba->mcq_enabled) in ufs_mtk_mcq_enable_irq()
732 * @hba: host controller instance
738 static int ufs_mtk_setup_clocks(struct ufs_hba *hba, bool on, in ufs_mtk_setup_clocks() argument
741 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_setup_clocks()
754 if (ufshcd_is_link_off(hba)) { in ufs_mtk_setup_clocks()
756 } else if (ufshcd_is_link_hibern8(hba) || in ufs_mtk_setup_clocks()
757 (!ufshcd_can_hibern8_during_gating(hba) && in ufs_mtk_setup_clocks()
758 ufshcd_is_auto_hibern8_enabled(hba))) { in ufs_mtk_setup_clocks()
764 ret = ufs_mtk_wait_link_state(hba, in ufs_mtk_setup_clocks()
772 ufs_mtk_pwr_ctrl(hba, false); in ufs_mtk_setup_clocks()
773 ufs_mtk_mcq_disable_irq(hba); in ufs_mtk_setup_clocks()
775 ufs_mtk_pwr_ctrl(hba, true); in ufs_mtk_setup_clocks()
776 ufs_mtk_mcq_enable_irq(hba); in ufs_mtk_setup_clocks()
782 static void ufs_mtk_get_controller_version(struct ufs_hba *hba) in ufs_mtk_get_controller_version() argument
784 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_get_controller_version()
793 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_LOCALVERINFO), &ver); in ufs_mtk_get_controller_version()
801 if (hba->ufs_version < ufshci_version(3, 0)) in ufs_mtk_get_controller_version()
802 hba->ufs_version = ufshci_version(3, 0); in ufs_mtk_get_controller_version()
807 static u32 ufs_mtk_get_ufs_hci_version(struct ufs_hba *hba) in ufs_mtk_get_ufs_hci_version() argument
809 return hba->ufs_version; in ufs_mtk_get_ufs_hci_version()
815 * @hba: per adapter instance
817 static void ufs_mtk_init_clocks(struct ufs_hba *hba) in ufs_mtk_init_clocks() argument
819 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_init_clocks()
820 struct list_head *head = &hba->clk_list_head; in ufs_mtk_init_clocks()
845 hba->caps &= ~UFSHCD_CAP_CLK_SCALING; in ufs_mtk_init_clocks()
846 dev_info(hba->dev, in ufs_mtk_init_clocks()
853 static int ufs_mtk_vreg_fix_vcc(struct ufs_hba *hba) in ufs_mtk_vreg_fix_vcc() argument
855 struct ufs_vreg_info *info = &hba->vreg_info; in ufs_mtk_vreg_fix_vcc()
856 struct device_node *np = hba->dev->of_node; in ufs_mtk_vreg_fix_vcc()
857 struct device *dev = hba->dev; in ufs_mtk_vreg_fix_vcc()
862 if (hba->vreg_info.vcc) in ufs_mtk_vreg_fix_vcc()
872 ver = (hba->dev_info.wspecversion & 0xF00) >> 8; in ufs_mtk_vreg_fix_vcc()
895 static void ufs_mtk_vreg_fix_vccqx(struct ufs_hba *hba) in ufs_mtk_vreg_fix_vccqx() argument
897 struct ufs_vreg_info *info = &hba->vreg_info; in ufs_mtk_vreg_fix_vccqx()
900 if (hba->dev_info.wspecversion >= 0x0300) { in ufs_mtk_vreg_fix_vccqx()
913 devm_kfree(hba->dev, (*vreg_off)->name); in ufs_mtk_vreg_fix_vccqx()
914 devm_kfree(hba->dev, *vreg_off); in ufs_mtk_vreg_fix_vccqx()
919 static void ufs_mtk_init_mcq_irq(struct ufs_hba *hba) in ufs_mtk_init_mcq_irq() argument
921 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_init_mcq_irq()
927 pdev = container_of(hba->dev, struct platform_device, dev); in ufs_mtk_init_mcq_irq()
939 host->mcq_intr_info[i].hba = hba; in ufs_mtk_init_mcq_irq()
941 dev_info(hba->dev, "get platform mcq irq: %d, %d\n", i, irq); in ufs_mtk_init_mcq_irq()
955 * @hba: host controller instance
963 static int ufs_mtk_init(struct ufs_hba *hba) in ufs_mtk_init() argument
966 struct device *dev = hba->dev; in ufs_mtk_init()
968 struct Scsi_Host *shost = hba->host; in ufs_mtk_init()
979 host->hba = hba; in ufs_mtk_init()
980 ufshcd_set_variant(hba, host); in ufs_mtk_init()
989 ufs_mtk_init_host_caps(hba); in ufs_mtk_init()
991 ufs_mtk_init_mcq_irq(hba); in ufs_mtk_init()
993 err = ufs_mtk_bind_mphy(hba); in ufs_mtk_init()
997 ufs_mtk_init_reset(hba); in ufs_mtk_init()
1004 hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND; in ufs_mtk_init()
1007 hba->caps |= UFSHCD_CAP_CLK_GATING; in ufs_mtk_init()
1010 hba->caps |= UFSHCD_CAP_CRYPTO; in ufs_mtk_init()
1013 hba->caps |= UFSHCD_CAP_WB_EN; in ufs_mtk_init()
1016 hba->caps |= UFSHCD_CAP_CLK_SCALING; in ufs_mtk_init()
1021 hba->quirks |= UFSHCI_QUIRK_SKIP_MANUAL_WB_FLUSH_CTRL; in ufs_mtk_init()
1022 hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_INTR; in ufs_mtk_init()
1023 hba->quirks |= UFSHCD_QUIRK_MCQ_BROKEN_RTC; in ufs_mtk_init()
1024 hba->vps->wb_flush_threshold = UFS_WB_BUF_REMAIN_PERCENT(80); in ufs_mtk_init()
1027 hba->caps |= UFSHCD_CAP_HIBERN8_WITH_CLK_GATING; in ufs_mtk_init()
1030 hba->quirks |= UFSHCD_QUIRK_BROKEN_LSDBS_CAP; in ufs_mtk_init()
1032 ufs_mtk_init_clocks(hba); in ufs_mtk_init()
1041 ufs_mtk_mphy_power_on(hba, true); in ufs_mtk_init()
1043 if (ufs_mtk_is_rtff_mtcmos(hba)) { in ufs_mtk_init()
1051 ufs_mtk_setup_clocks(hba, true, POST_CHANGE); in ufs_mtk_init()
1053 host->ip_ver = ufshcd_readl(hba, REG_UFS_MTK_IP_VER); in ufs_mtk_init()
1058 ufshcd_set_variant(hba, NULL); in ufs_mtk_init()
1063 static bool ufs_mtk_pmc_via_fastauto(struct ufs_hba *hba, in ufs_mtk_pmc_via_fastauto() argument
1066 if (!ufs_mtk_is_pmc_via_fastauto(hba)) in ufs_mtk_pmc_via_fastauto()
1069 if (dev_req_params->hs_rate == hba->pwr_info.hs_rate) in ufs_mtk_pmc_via_fastauto()
1083 static int ufs_mtk_pre_pwr_change(struct ufs_hba *hba, in ufs_mtk_pre_pwr_change() argument
1087 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_pre_pwr_change()
1101 if (ufs_mtk_pmc_via_fastauto(hba, dev_req_params)) { in ufs_mtk_pre_pwr_change()
1102 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXTERMINATION), true); in ufs_mtk_pre_pwr_change()
1103 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXGEAR), UFS_HS_G1); in ufs_mtk_pre_pwr_change()
1105 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXTERMINATION), true); in ufs_mtk_pre_pwr_change()
1106 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_RXGEAR), UFS_HS_G1); in ufs_mtk_pre_pwr_change()
1108 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVETXDATALANES), in ufs_mtk_pre_pwr_change()
1110 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_ACTIVERXDATALANES), in ufs_mtk_pre_pwr_change()
1112 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HSSERIES), in ufs_mtk_pre_pwr_change()
1115 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TXHSADAPTTYPE), in ufs_mtk_pre_pwr_change()
1118 ret = ufshcd_uic_change_pwr_mode(hba, in ufs_mtk_pre_pwr_change()
1122 dev_err(hba->dev, "%s: HSG1B FASTAUTO failed ret=%d\n", in ufs_mtk_pre_pwr_change()
1128 ret = ufshcd_dme_configure_adapt(hba, in ufs_mtk_pre_pwr_change()
1136 static int ufs_mtk_pwr_change_notify(struct ufs_hba *hba, in ufs_mtk_pwr_change_notify() argument
1145 ret = ufs_mtk_pre_pwr_change(hba, dev_max_params, in ufs_mtk_pwr_change_notify()
1158 static int ufs_mtk_unipro_set_lpm(struct ufs_hba *hba, bool lpm) in ufs_mtk_unipro_set_lpm() argument
1161 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_unipro_set_lpm()
1163 ret = ufshcd_dme_set(hba, in ufs_mtk_unipro_set_lpm()
1178 static int ufs_mtk_pre_link(struct ufs_hba *hba) in ufs_mtk_pre_link() argument
1183 ufs_mtk_get_controller_version(hba); in ufs_mtk_pre_link()
1185 ret = ufs_mtk_unipro_set_lpm(hba, false); in ufs_mtk_pre_link()
1194 ret = ufshcd_disable_host_tx_lcc(hba); in ufs_mtk_pre_link()
1199 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), &tmp); in ufs_mtk_pre_link()
1205 ret = ufshcd_dme_set(hba, UIC_ARG_MIB(VS_SAVEPOWERCONTROL), tmp); in ufs_mtk_pre_link()
1210 static void ufs_mtk_setup_clk_gating(struct ufs_hba *hba) in ufs_mtk_setup_clk_gating() argument
1214 if (ufshcd_is_clkgating_allowed(hba)) { in ufs_mtk_setup_clk_gating()
1215 if (ufshcd_is_auto_hibern8_supported(hba) && hba->ahit) in ufs_mtk_setup_clk_gating()
1217 hba->ahit); in ufs_mtk_setup_clk_gating()
1220 ufshcd_clkgate_delay_set(hba->dev, ah_ms + 5); in ufs_mtk_setup_clk_gating()
1224 static void ufs_mtk_post_link(struct ufs_hba *hba) in ufs_mtk_post_link() argument
1227 ufs_mtk_cfg_unipro_cg(hba, true); in ufs_mtk_post_link()
1229 /* will be configured during probe hba */ in ufs_mtk_post_link()
1230 if (ufshcd_is_auto_hibern8_supported(hba)) in ufs_mtk_post_link()
1231 hba->ahit = FIELD_PREP(UFSHCI_AHIBERN8_TIMER_MASK, 10) | in ufs_mtk_post_link()
1234 ufs_mtk_setup_clk_gating(hba); in ufs_mtk_post_link()
1237 static int ufs_mtk_link_startup_notify(struct ufs_hba *hba, in ufs_mtk_link_startup_notify() argument
1244 ret = ufs_mtk_pre_link(hba); in ufs_mtk_link_startup_notify()
1247 ufs_mtk_post_link(hba); in ufs_mtk_link_startup_notify()
1257 static int ufs_mtk_device_reset(struct ufs_hba *hba) in ufs_mtk_device_reset() argument
1261 /* disable hba before device reset */ in ufs_mtk_device_reset()
1262 ufshcd_hba_stop(hba); in ufs_mtk_device_reset()
1280 dev_info(hba->dev, "device reset done\n"); in ufs_mtk_device_reset()
1285 static int ufs_mtk_link_set_hpm(struct ufs_hba *hba) in ufs_mtk_link_set_hpm() argument
1289 err = ufshcd_hba_enable(hba); in ufs_mtk_link_set_hpm()
1293 err = ufs_mtk_unipro_set_lpm(hba, false); in ufs_mtk_link_set_hpm()
1297 err = ufshcd_uic_hibern8_exit(hba); in ufs_mtk_link_set_hpm()
1302 ufs_mtk_wait_idle_state(hba, 5); in ufs_mtk_link_set_hpm()
1303 err = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100); in ufs_mtk_link_set_hpm()
1305 dev_warn(hba->dev, "exit h8 state fail, err=%d\n", err); in ufs_mtk_link_set_hpm()
1308 ufshcd_set_link_active(hba); in ufs_mtk_link_set_hpm()
1310 err = ufshcd_make_hba_operational(hba); in ufs_mtk_link_set_hpm()
1314 if (hba->mcq_enabled) { in ufs_mtk_link_set_hpm()
1315 ufs_mtk_config_mcq(hba, false); in ufs_mtk_link_set_hpm()
1316 ufshcd_mcq_make_queues_operational(hba); in ufs_mtk_link_set_hpm()
1317 ufshcd_mcq_config_mac(hba, hba->nutrs); in ufs_mtk_link_set_hpm()
1318 ufshcd_mcq_enable(hba); in ufs_mtk_link_set_hpm()
1324 static int ufs_mtk_link_set_lpm(struct ufs_hba *hba) in ufs_mtk_link_set_lpm() argument
1329 ufshcd_writel(hba, in ufs_mtk_link_set_lpm()
1330 (ufshcd_readl(hba, REG_UFS_XOUFS_CTRL) & ~0x100), in ufs_mtk_link_set_lpm()
1333 err = ufs_mtk_unipro_set_lpm(hba, true); in ufs_mtk_link_set_lpm()
1336 ufs_mtk_unipro_set_lpm(hba, false); in ufs_mtk_link_set_lpm()
1343 static void ufs_mtk_vccqx_set_lpm(struct ufs_hba *hba, bool lpm) in ufs_mtk_vccqx_set_lpm() argument
1347 if (hba->vreg_info.vccq) in ufs_mtk_vccqx_set_lpm()
1348 vccqx = hba->vreg_info.vccq; in ufs_mtk_vccqx_set_lpm()
1350 vccqx = hba->vreg_info.vccq2; in ufs_mtk_vccqx_set_lpm()
1356 static void ufs_mtk_vsx_set_lpm(struct ufs_hba *hba, bool lpm) in ufs_mtk_vsx_set_lpm() argument
1361 (unsigned long)hba->dev_info.wspecversion, in ufs_mtk_vsx_set_lpm()
1365 static void ufs_mtk_dev_vreg_set_lpm(struct ufs_hba *hba, bool lpm) in ufs_mtk_dev_vreg_set_lpm() argument
1370 if (lpm && ufshcd_is_ufs_dev_active(hba)) in ufs_mtk_dev_vreg_set_lpm()
1374 if (!hba->vreg_info.vccq && !hba->vreg_info.vccq2) in ufs_mtk_dev_vreg_set_lpm()
1378 if (!hba->vreg_info.vcc) in ufs_mtk_dev_vreg_set_lpm()
1382 if (lpm && hba->vreg_info.vcc && hba->vreg_info.vcc->enabled) { in ufs_mtk_dev_vreg_set_lpm()
1384 if (ufs_mtk_is_allow_vccqx_lpm(hba)) in ufs_mtk_dev_vreg_set_lpm()
1392 ufs_mtk_vccqx_set_lpm(hba, lpm); in ufs_mtk_dev_vreg_set_lpm()
1393 ufs_mtk_vsx_set_lpm(hba, lpm); in ufs_mtk_dev_vreg_set_lpm()
1395 ufs_mtk_vsx_set_lpm(hba, lpm); in ufs_mtk_dev_vreg_set_lpm()
1397 ufs_mtk_vccqx_set_lpm(hba, lpm); in ufs_mtk_dev_vreg_set_lpm()
1401 static void ufs_mtk_auto_hibern8_disable(struct ufs_hba *hba) in ufs_mtk_auto_hibern8_disable() argument
1406 ufshcd_writel(hba, 0, REG_AUTO_HIBERNATE_IDLE_TIMER); in ufs_mtk_auto_hibern8_disable()
1409 ufs_mtk_wait_idle_state(hba, 5); in ufs_mtk_auto_hibern8_disable()
1411 ret = ufs_mtk_wait_link_state(hba, VS_LINK_UP, 100); in ufs_mtk_auto_hibern8_disable()
1413 dev_warn(hba->dev, "exit h8 state fail, ret=%d\n", ret); in ufs_mtk_auto_hibern8_disable()
1416 static int ufs_mtk_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op, in ufs_mtk_suspend() argument
1423 if (ufshcd_is_auto_hibern8_supported(hba)) in ufs_mtk_suspend()
1424 ufs_mtk_auto_hibern8_disable(hba); in ufs_mtk_suspend()
1428 if (ufshcd_is_link_hibern8(hba)) { in ufs_mtk_suspend()
1429 err = ufs_mtk_link_set_lpm(hba); in ufs_mtk_suspend()
1434 if (!ufshcd_is_link_active(hba)) { in ufs_mtk_suspend()
1440 err = ufs_mtk_mphy_power_on(hba, false); in ufs_mtk_suspend()
1445 if (ufshcd_is_link_off(hba)) in ufs_mtk_suspend()
1457 ufshcd_set_link_off(hba); in ufs_mtk_suspend()
1461 static int ufs_mtk_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op) in ufs_mtk_resume() argument
1466 if (hba->ufshcd_state != UFSHCD_STATE_OPERATIONAL) in ufs_mtk_resume()
1467 ufs_mtk_dev_vreg_set_lpm(hba, false); in ufs_mtk_resume()
1471 err = ufs_mtk_mphy_power_on(hba, true); in ufs_mtk_resume()
1475 if (ufshcd_is_link_hibern8(hba)) { in ufs_mtk_resume()
1476 err = ufs_mtk_link_set_hpm(hba); in ufs_mtk_resume()
1483 return ufshcd_link_recovery(hba); in ufs_mtk_resume()
1486 static void ufs_mtk_dbg_register_dump(struct ufs_hba *hba) in ufs_mtk_dbg_register_dump() argument
1489 ufshcd_dump_regs(hba, REG_UFS_XOUFS_CTRL, 0x10, in ufs_mtk_dbg_register_dump()
1492 ufshcd_dump_regs(hba, REG_UFS_EXTREG, 0x4, "Ext Reg "); in ufs_mtk_dbg_register_dump()
1495 ufshcd_dump_regs(hba, REG_UFS_MPHYCTRL, in ufs_mtk_dbg_register_dump()
1500 ufs_mtk_dbg_sel(hba); in ufs_mtk_dbg_register_dump()
1501 ufshcd_dump_regs(hba, REG_UFS_PROBE, 0x4, "Debug Probe "); in ufs_mtk_dbg_register_dump()
1504 static int ufs_mtk_apply_dev_quirks(struct ufs_hba *hba) in ufs_mtk_apply_dev_quirks() argument
1506 struct ufs_dev_info *dev_info = &hba->dev_info; in ufs_mtk_apply_dev_quirks()
1510 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 6); in ufs_mtk_apply_dev_quirks()
1511 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_HIBERN8TIME), 10); in ufs_mtk_apply_dev_quirks()
1514 if (ufs_mtk_is_tx_skew_fix(hba) && in ufs_mtk_apply_dev_quirks()
1521 ufshcd_dme_set(hba, UIC_ARG_MIB(PA_TACTIVATE), 8); in ufs_mtk_apply_dev_quirks()
1531 ufs_mtk_setup_ref_clk_wait_us(hba, 1); in ufs_mtk_apply_dev_quirks()
1533 ufs_mtk_setup_ref_clk_wait_us(hba, 30); in ufs_mtk_apply_dev_quirks()
1535 ufs_mtk_setup_ref_clk_wait_us(hba, 100); in ufs_mtk_apply_dev_quirks()
1537 ufs_mtk_setup_ref_clk_wait_us(hba, in ufs_mtk_apply_dev_quirks()
1542 static void ufs_mtk_fixup_dev_quirks(struct ufs_hba *hba) in ufs_mtk_fixup_dev_quirks() argument
1544 ufshcd_fixup_dev_quirks(hba, ufs_mtk_dev_fixups); in ufs_mtk_fixup_dev_quirks()
1546 if (ufs_mtk_is_broken_vcc(hba) && hba->vreg_info.vcc && in ufs_mtk_fixup_dev_quirks()
1547 (hba->dev_quirks & UFS_DEVICE_QUIRK_DELAY_AFTER_LPM)) { in ufs_mtk_fixup_dev_quirks()
1548 hba->vreg_info.vcc->always_on = true; in ufs_mtk_fixup_dev_quirks()
1553 hba->dev_quirks &= ~(UFS_DEVICE_QUIRK_DELAY_BEFORE_LPM | in ufs_mtk_fixup_dev_quirks()
1557 ufs_mtk_vreg_fix_vcc(hba); in ufs_mtk_fixup_dev_quirks()
1558 ufs_mtk_vreg_fix_vccqx(hba); in ufs_mtk_fixup_dev_quirks()
1561 static void ufs_mtk_event_notify(struct ufs_hba *hba, in ufs_mtk_event_notify() argument
1572 dev_info(hba->dev, in ufs_mtk_event_notify()
1580 dev_info(hba->dev, "%s\n", ufs_uic_pa_err_str[bit]); in ufs_mtk_event_notify()
1585 dev_info(hba->dev, "%s\n", ufs_uic_dl_err_str[bit]); in ufs_mtk_event_notify()
1589 static void ufs_mtk_config_scaling_param(struct ufs_hba *hba, in ufs_mtk_config_scaling_param() argument
1594 hba->clk_scaling.min_gear = UFS_HS_G4; in ufs_mtk_config_scaling_param()
1596 hba->vps->devfreq_profile.polling_ms = 200; in ufs_mtk_config_scaling_param()
1597 hba->vps->ondemand_data.upthreshold = 50; in ufs_mtk_config_scaling_param()
1598 hba->vps->ondemand_data.downdifferential = 20; in ufs_mtk_config_scaling_param()
1610 * @hba: per adapter instance
1613 static void ufs_mtk_clk_scale(struct ufs_hba *hba, bool scale_up) in ufs_mtk_clk_scale() argument
1615 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_clk_scale()
1622 dev_info(hba->dev, in ufs_mtk_clk_scale()
1636 dev_info(hba->dev, in ufs_mtk_clk_scale()
1645 static int ufs_mtk_clk_scale_notify(struct ufs_hba *hba, bool scale_up, in ufs_mtk_clk_scale_notify() argument
1648 if (!ufshcd_is_clkscaling_supported(hba)) in ufs_mtk_clk_scale_notify()
1653 ufs_mtk_clk_scale(hba, scale_up); in ufs_mtk_clk_scale_notify()
1656 ufs_mtk_scale_perf(hba, scale_up); in ufs_mtk_clk_scale_notify()
1662 static int ufs_mtk_get_hba_mac(struct ufs_hba *hba) in ufs_mtk_get_hba_mac() argument
1664 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_get_hba_mac()
1673 static int ufs_mtk_op_runtime_config(struct ufs_hba *hba) in ufs_mtk_op_runtime_config() argument
1678 hba->mcq_opr[OPR_SQD].offset = REG_UFS_MTK_SQD; in ufs_mtk_op_runtime_config()
1679 hba->mcq_opr[OPR_SQIS].offset = REG_UFS_MTK_SQIS; in ufs_mtk_op_runtime_config()
1680 hba->mcq_opr[OPR_CQD].offset = REG_UFS_MTK_CQD; in ufs_mtk_op_runtime_config()
1681 hba->mcq_opr[OPR_CQIS].offset = REG_UFS_MTK_CQIS; in ufs_mtk_op_runtime_config()
1684 opr = &hba->mcq_opr[i]; in ufs_mtk_op_runtime_config()
1686 opr->base = hba->mmio_base + opr->offset; in ufs_mtk_op_runtime_config()
1692 static int ufs_mtk_mcq_config_resource(struct ufs_hba *hba) in ufs_mtk_mcq_config_resource() argument
1694 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_mcq_config_resource()
1698 dev_info(hba->dev, "IRQs not ready. MCQ disabled."); in ufs_mtk_mcq_config_resource()
1702 hba->mcq_base = hba->mmio_base + MCQ_QUEUE_OFFSET(hba->mcq_capabilities); in ufs_mtk_mcq_config_resource()
1709 struct ufs_hba *hba = mcq_intr_info->hba; in ufs_mtk_mcq_intr() local
1714 hwq = &hba->uhq[qid]; in ufs_mtk_mcq_intr()
1716 events = ufshcd_mcq_read_cqis(hba, qid); in ufs_mtk_mcq_intr()
1718 ufshcd_mcq_write_cqis(hba, events, qid); in ufs_mtk_mcq_intr()
1721 ufshcd_mcq_poll_cqe_lock(hba, hwq); in ufs_mtk_mcq_intr()
1726 static int ufs_mtk_config_mcq_irq(struct ufs_hba *hba) in ufs_mtk_config_mcq_irq() argument
1728 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_config_mcq_irq()
1735 dev_err(hba->dev, "invalid irq. %d\n", i); in ufs_mtk_config_mcq_irq()
1740 ret = devm_request_irq(hba->dev, irq, ufs_mtk_mcq_intr, 0, UFSHCD, in ufs_mtk_config_mcq_irq()
1743 dev_dbg(hba->dev, "request irq %d intr %s\n", irq, ret ? "failed" : ""); in ufs_mtk_config_mcq_irq()
1746 dev_err(hba->dev, "Cannot request irq %d\n", ret); in ufs_mtk_config_mcq_irq()
1754 static int ufs_mtk_config_mcq(struct ufs_hba *hba, bool irq) in ufs_mtk_config_mcq() argument
1756 struct ufs_mtk_host *host = ufshcd_get_variant(hba); in ufs_mtk_config_mcq()
1761 ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, 0, REG_UFS_MMIO_OPT_CTRL_0); in ufs_mtk_config_mcq()
1764 ret = ufs_mtk_config_mcq_irq(hba); in ufs_mtk_config_mcq()
1772 ufshcd_rmwl(hba, MCQ_AH8, MCQ_AH8, REG_UFS_MMIO_OPT_CTRL_0); in ufs_mtk_config_mcq()
1773 ufshcd_rmwl(hba, MCQ_INTR_EN_MSK, MCQ_MULTI_INTR_EN, REG_UFS_MMIO_OPT_CTRL_0); in ufs_mtk_config_mcq()
1778 static int ufs_mtk_config_esi(struct ufs_hba *hba) in ufs_mtk_config_esi() argument
1780 return ufs_mtk_config_mcq(hba, true); in ufs_mtk_config_esi()
1785 struct ufs_hba *hba = shost_priv(sdev->host); in ufs_mtk_config_scsi_dev() local
1787 dev_dbg(hba->dev, "lu %llu scsi device configured", sdev->lun); in ufs_mtk_config_scsi_dev()
1888 struct ufs_hba *hba = dev_get_drvdata(dev); in ufs_mtk_system_suspend() local
1896 ufs_mtk_dev_vreg_set_lpm(hba, true); in ufs_mtk_system_suspend()
1898 if (ufs_mtk_is_rtff_mtcmos(hba)) in ufs_mtk_system_suspend()
1906 struct ufs_hba *hba = dev_get_drvdata(dev); in ufs_mtk_system_resume() local
1909 ufs_mtk_dev_vreg_set_lpm(hba, false); in ufs_mtk_system_resume()
1911 if (ufs_mtk_is_rtff_mtcmos(hba)) in ufs_mtk_system_resume()
1921 struct ufs_hba *hba = dev_get_drvdata(dev); in ufs_mtk_runtime_suspend() local
1929 ufs_mtk_dev_vreg_set_lpm(hba, true); in ufs_mtk_runtime_suspend()
1931 if (ufs_mtk_is_rtff_mtcmos(hba)) in ufs_mtk_runtime_suspend()
1939 struct ufs_hba *hba = dev_get_drvdata(dev); in ufs_mtk_runtime_resume() local
1942 if (ufs_mtk_is_rtff_mtcmos(hba)) in ufs_mtk_runtime_resume()
1945 ufs_mtk_dev_vreg_set_lpm(hba, false); in ufs_mtk_runtime_resume()