Lines Matching full:hba
26 void (*late_init)(struct ufs_hba *hba);
100 static int ufs_intel_hce_enable_notify(struct ufs_hba *hba, in ufs_intel_hce_enable_notify() argument
104 if (status == POST_CHANGE && hba->caps & UFSHCD_CAP_CRYPTO) { in ufs_intel_hce_enable_notify()
105 u32 hce = ufshcd_readl(hba, REG_CONTROLLER_ENABLE); in ufs_intel_hce_enable_notify()
108 ufshcd_writel(hba, hce, REG_CONTROLLER_ENABLE); in ufs_intel_hce_enable_notify()
114 static int ufs_intel_disable_lcc(struct ufs_hba *hba) in ufs_intel_disable_lcc() argument
119 ufshcd_dme_get(hba, attr, &lcc_enable); in ufs_intel_disable_lcc()
121 ufshcd_disable_host_tx_lcc(hba); in ufs_intel_disable_lcc()
126 static int ufs_intel_link_startup_notify(struct ufs_hba *hba, in ufs_intel_link_startup_notify() argument
133 err = ufs_intel_disable_lcc(hba); in ufs_intel_link_startup_notify()
144 static int ufs_intel_set_lanes(struct ufs_hba *hba, u32 lanes) in ufs_intel_set_lanes() argument
146 struct ufs_pa_layer_attr pwr_info = hba->pwr_info; in ufs_intel_set_lanes()
151 ret = ufshcd_config_pwr_mode(hba, &pwr_info); in ufs_intel_set_lanes()
153 dev_err(hba->dev, "%s: Setting %u lanes, err = %d\n", in ufs_intel_set_lanes()
158 static int ufs_intel_lkf_pwr_change_notify(struct ufs_hba *hba, in ufs_intel_lkf_pwr_change_notify() argument
168 (hba->pwr_info.lane_rx != 2 || hba->pwr_info.lane_tx != 2)) in ufs_intel_lkf_pwr_change_notify()
169 ufs_intel_set_lanes(hba, 2); in ufs_intel_lkf_pwr_change_notify()
177 err = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), in ufs_intel_lkf_pwr_change_notify()
188 static int ufs_intel_lkf_apply_dev_quirks(struct ufs_hba *hba) in ufs_intel_lkf_apply_dev_quirks() argument
194 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &granularity); in ufs_intel_lkf_apply_dev_quirks()
198 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_GRANULARITY), &peer_granularity); in ufs_intel_lkf_apply_dev_quirks()
202 ret = ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &pa_tactivate); in ufs_intel_lkf_apply_dev_quirks()
206 ret = ufshcd_dme_peer_get(hba, UIC_ARG_MIB(PA_TACTIVATE), &peer_pa_tactivate); in ufs_intel_lkf_apply_dev_quirks()
213 ret = ufshcd_dme_peer_set(hba, UIC_ARG_MIB(PA_TACTIVATE), new_peer_pa_tactivate); in ufs_intel_lkf_apply_dev_quirks()
228 static void intel_cache_ltr(struct ufs_hba *hba) in intel_cache_ltr() argument
230 struct intel_host *host = ufshcd_get_variant(hba); in intel_cache_ltr()
232 host->active_ltr = readl(hba->mmio_base + INTEL_ACTIVELTR); in intel_cache_ltr()
233 host->idle_ltr = readl(hba->mmio_base + INTEL_IDLELTR); in intel_cache_ltr()
238 struct ufs_hba *hba = dev_get_drvdata(dev); in intel_ltr_set() local
239 struct intel_host *host = ufshcd_get_variant(hba); in intel_ltr_set()
249 ltr = readl(hba->mmio_base + INTEL_ACTIVELTR); in intel_ltr_set()
271 writel(ltr, hba->mmio_base + INTEL_ACTIVELTR); in intel_ltr_set()
272 writel(ltr, hba->mmio_base + INTEL_IDLELTR); in intel_ltr_set()
275 intel_cache_ltr(hba); in intel_ltr_set()
292 static void intel_add_debugfs(struct ufs_hba *hba) in intel_add_debugfs() argument
294 struct dentry *dir = debugfs_create_dir(dev_name(hba->dev), NULL); in intel_add_debugfs()
295 struct intel_host *host = ufshcd_get_variant(hba); in intel_add_debugfs()
297 intel_cache_ltr(hba); in intel_add_debugfs()
304 static void intel_remove_debugfs(struct ufs_hba *hba) in intel_remove_debugfs() argument
306 struct intel_host *host = ufshcd_get_variant(hba); in intel_remove_debugfs()
311 static int ufs_intel_device_reset(struct ufs_hba *hba) in ufs_intel_device_reset() argument
313 struct intel_host *host = ufshcd_get_variant(hba); in ufs_intel_device_reset()
319 err = intel_dsm(host, hba->dev, INTEL_DSM_RESET, &result); in ufs_intel_device_reset()
323 dev_err(hba->dev, "%s: DSM error %d result %u\n", in ufs_intel_device_reset()
346 static int ufs_intel_common_init(struct ufs_hba *hba) in ufs_intel_common_init() argument
350 hba->caps |= UFSHCD_CAP_RPM_AUTOSUSPEND; in ufs_intel_common_init()
352 host = devm_kzalloc(hba->dev, sizeof(*host), GFP_KERNEL); in ufs_intel_common_init()
355 ufshcd_set_variant(hba, host); in ufs_intel_common_init()
356 intel_dsm_init(host, hba->dev); in ufs_intel_common_init()
358 if (hba->vops->device_reset) in ufs_intel_common_init()
359 hba->caps |= UFSHCD_CAP_DEEPSLEEP; in ufs_intel_common_init()
361 if (hba->vops->device_reset) in ufs_intel_common_init()
362 host->reset_gpio = ufs_intel_get_reset_gpio(hba->dev); in ufs_intel_common_init()
364 dev_err(hba->dev, "%s: failed to get reset GPIO, error %ld\n", in ufs_intel_common_init()
370 hba->caps |= UFSHCD_CAP_DEEPSLEEP; in ufs_intel_common_init()
373 intel_ltr_expose(hba->dev); in ufs_intel_common_init()
374 intel_add_debugfs(hba); in ufs_intel_common_init()
378 static void ufs_intel_common_exit(struct ufs_hba *hba) in ufs_intel_common_exit() argument
380 intel_remove_debugfs(hba); in ufs_intel_common_exit()
381 intel_ltr_hide(hba->dev); in ufs_intel_common_exit()
384 static int ufs_intel_resume(struct ufs_hba *hba, enum ufs_pm_op op) in ufs_intel_resume() argument
386 if (ufshcd_is_link_hibern8(hba)) { in ufs_intel_resume()
387 int ret = ufshcd_uic_hibern8_exit(hba); in ufs_intel_resume()
390 ufshcd_set_link_active(hba); in ufs_intel_resume()
392 dev_err(hba->dev, "%s: hibern8 exit failed %d\n", in ufs_intel_resume()
398 ufshcd_set_link_off(hba); in ufs_intel_resume()
405 static int ufs_intel_ehl_init(struct ufs_hba *hba) in ufs_intel_ehl_init() argument
407 hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8; in ufs_intel_ehl_init()
408 return ufs_intel_common_init(hba); in ufs_intel_ehl_init()
411 static void ufs_intel_lkf_late_init(struct ufs_hba *hba) in ufs_intel_lkf_late_init() argument
414 if (hba->caps & UFSHCD_CAP_DEEPSLEEP) { in ufs_intel_lkf_late_init()
415 hba->spm_lvl = UFS_PM_LVL_6; in ufs_intel_lkf_late_init()
416 hba->rpm_lvl = UFS_PM_LVL_6; in ufs_intel_lkf_late_init()
418 hba->spm_lvl = UFS_PM_LVL_5; in ufs_intel_lkf_late_init()
419 hba->rpm_lvl = UFS_PM_LVL_5; in ufs_intel_lkf_late_init()
423 static int ufs_intel_lkf_init(struct ufs_hba *hba) in ufs_intel_lkf_init() argument
428 hba->nop_out_timeout = 200; in ufs_intel_lkf_init()
429 hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8; in ufs_intel_lkf_init()
430 hba->caps |= UFSHCD_CAP_CRYPTO; in ufs_intel_lkf_init()
431 err = ufs_intel_common_init(hba); in ufs_intel_lkf_init()
432 ufs_host = ufshcd_get_variant(hba); in ufs_intel_lkf_init()
437 static int ufs_intel_adl_init(struct ufs_hba *hba) in ufs_intel_adl_init() argument
439 hba->nop_out_timeout = 200; in ufs_intel_adl_init()
440 hba->quirks |= UFSHCD_QUIRK_BROKEN_AUTO_HIBERN8; in ufs_intel_adl_init()
441 hba->caps |= UFSHCD_CAP_WB_EN; in ufs_intel_adl_init()
442 return ufs_intel_common_init(hba); in ufs_intel_adl_init()
445 static int ufs_intel_mtl_init(struct ufs_hba *hba) in ufs_intel_mtl_init() argument
447 hba->caps |= UFSHCD_CAP_CRYPTO | UFSHCD_CAP_WB_EN; in ufs_intel_mtl_init()
448 return ufs_intel_common_init(hba); in ufs_intel_mtl_init()
451 static int ufs_qemu_get_hba_mac(struct ufs_hba *hba) in ufs_qemu_get_hba_mac() argument
456 static int ufs_qemu_mcq_config_resource(struct ufs_hba *hba) in ufs_qemu_mcq_config_resource() argument
458 hba->mcq_base = hba->mmio_base + ufshcd_mcq_queue_cfg_addr(hba); in ufs_qemu_mcq_config_resource()
463 static int ufs_qemu_op_runtime_config(struct ufs_hba *hba) in ufs_qemu_op_runtime_config() argument
468 u32 sqdao = ufsmcq_readl(hba, ufshcd_mcq_cfg_offset(REG_SQDAO, 0)); in ufs_qemu_op_runtime_config()
469 u32 sqisao = ufsmcq_readl(hba, ufshcd_mcq_cfg_offset(REG_SQISAO, 0)); in ufs_qemu_op_runtime_config()
470 u32 cqdao = ufsmcq_readl(hba, ufshcd_mcq_cfg_offset(REG_CQDAO, 0)); in ufs_qemu_op_runtime_config()
471 u32 cqisao = ufsmcq_readl(hba, ufshcd_mcq_cfg_offset(REG_CQISAO, 0)); in ufs_qemu_op_runtime_config()
473 hba->mcq_opr[OPR_SQD].offset = sqdao; in ufs_qemu_op_runtime_config()
474 hba->mcq_opr[OPR_SQIS].offset = sqisao; in ufs_qemu_op_runtime_config()
475 hba->mcq_opr[OPR_CQD].offset = cqdao; in ufs_qemu_op_runtime_config()
476 hba->mcq_opr[OPR_CQIS].offset = cqisao; in ufs_qemu_op_runtime_config()
479 opr = &hba->mcq_opr[i]; in ufs_qemu_op_runtime_config()
481 opr->base = hba->mmio_base + opr->offset; in ufs_qemu_op_runtime_config()
544 struct ufs_hba *hba = dev_get_drvdata(dev); in ufshcd_pci_restore() local
547 ufshcd_set_link_off(hba); in ufshcd_pci_restore()
560 struct ufs_hba *hba = pci_get_drvdata(pdev); in ufshcd_pci_remove() local
564 ufshcd_remove(hba); in ufshcd_pci_remove()
578 struct ufs_hba *hba; in ufshcd_pci_probe() local
596 err = ufshcd_alloc_host(&pdev->dev, &hba); in ufshcd_pci_probe()
602 hba->vops = (struct ufs_hba_variant_ops *)id->driver_data; in ufshcd_pci_probe()
604 err = ufshcd_init(hba, mmio_base, pdev->irq); in ufshcd_pci_probe()
610 ufs_host = ufshcd_get_variant(hba); in ufshcd_pci_probe()
612 ufs_host->late_init(hba); in ufshcd_pci_probe()