Lines Matching +full:deep +full:- +full:sleep
1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (c) 2021-2022, Intel Corporation.
23 #include <linux/dma-mapping.h>
81 return -ENODEV; in t7xx_mode_store()
83 mode = READ_ONCE(t7xx_dev->mode); in t7xx_mode_store()
87 return -EBUSY; in t7xx_mode_store()
93 WRITE_ONCE(t7xx_dev->mode, T7XX_FASTBOOT_SWITCHING); in t7xx_mode_store()
115 return -ENODEV; in t7xx_mode_show()
117 mode = READ_ONCE(t7xx_dev->mode); in t7xx_mode_show()
138 return -ENODEV; in t7xx_debug_ports_store()
145 WRITE_ONCE(t7xx_dev->debug_ports_show, show); in t7xx_debug_ports_store()
161 return -ENODEV; in t7xx_debug_ports_show()
163 show = READ_ONCE(t7xx_dev->debug_ports_show); in t7xx_debug_ports_show()
185 WRITE_ONCE(t7xx_dev->mode, mode); in t7xx_mode_update()
186 sysfs_notify(&t7xx_dev->pdev->dev.kobj, NULL, "t7xx_mode"); in t7xx_mode_update()
219 if (ret == -ETIMEDOUT) in t7xx_wait_pm_config()
220 dev_err(&t7xx_dev->pdev->dev, "PM configuration timed out\n"); in t7xx_wait_pm_config()
227 struct pci_dev *pdev = t7xx_dev->pdev; in t7xx_pci_pm_init()
229 INIT_LIST_HEAD(&t7xx_dev->md_pm_entities); in t7xx_pci_pm_init()
230 mutex_init(&t7xx_dev->md_pm_entity_mtx); in t7xx_pci_pm_init()
231 spin_lock_init(&t7xx_dev->md_pm_lock); in t7xx_pci_pm_init()
232 init_completion(&t7xx_dev->sleep_lock_acquire); in t7xx_pci_pm_init()
233 init_completion(&t7xx_dev->pm_sr_ack); in t7xx_pci_pm_init()
234 init_completion(&t7xx_dev->init_done); in t7xx_pci_pm_init()
235 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT); in t7xx_pci_pm_init()
237 device_init_wakeup(&pdev->dev, true); in t7xx_pci_pm_init()
238 dev_pm_set_driver_flags(&pdev->dev, pdev->dev.power.driver_flags | in t7xx_pci_pm_init()
242 pm_runtime_set_autosuspend_delay(&pdev->dev, PM_AUTOSUSPEND_MS); in t7xx_pci_pm_init()
243 pm_runtime_use_autosuspend(&pdev->dev); in t7xx_pci_pm_init()
250 /* Enable the PCIe resource lock only after MD deep sleep is done */ in t7xx_pci_pm_init_late()
258 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED); in t7xx_pci_pm_init_late()
260 pm_runtime_mark_last_busy(&t7xx_dev->pdev->dev); in t7xx_pci_pm_init_late()
261 pm_runtime_allow(&t7xx_dev->pdev->dev); in t7xx_pci_pm_init_late()
262 pm_runtime_put_noidle(&t7xx_dev->pdev->dev); in t7xx_pci_pm_init_late()
263 complete_all(&t7xx_dev->init_done); in t7xx_pci_pm_init_late()
268 /* The device is kept in FSM re-init flow in t7xx_pci_pm_reinit()
271 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT); in t7xx_pci_pm_reinit()
273 pm_runtime_get_noresume(&t7xx_dev->pdev->dev); in t7xx_pci_pm_reinit()
283 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_EXCEPTION); in t7xx_pci_pm_exp_detected()
290 mutex_lock(&t7xx_dev->md_pm_entity_mtx); in t7xx_pci_pm_entity_register()
291 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { in t7xx_pci_pm_entity_register()
292 if (entity->id == pm_entity->id) { in t7xx_pci_pm_entity_register()
293 mutex_unlock(&t7xx_dev->md_pm_entity_mtx); in t7xx_pci_pm_entity_register()
294 return -EEXIST; in t7xx_pci_pm_entity_register()
298 list_add_tail(&pm_entity->entity, &t7xx_dev->md_pm_entities); in t7xx_pci_pm_entity_register()
299 mutex_unlock(&t7xx_dev->md_pm_entity_mtx); in t7xx_pci_pm_entity_register()
307 mutex_lock(&t7xx_dev->md_pm_entity_mtx); in t7xx_pci_pm_entity_unregister()
308 list_for_each_entry_safe(entity, tmp_entity, &t7xx_dev->md_pm_entities, entity) { in t7xx_pci_pm_entity_unregister()
309 if (entity->id == pm_entity->id) { in t7xx_pci_pm_entity_unregister()
310 list_del(&pm_entity->entity); in t7xx_pci_pm_entity_unregister()
311 mutex_unlock(&t7xx_dev->md_pm_entity_mtx); in t7xx_pci_pm_entity_unregister()
316 mutex_unlock(&t7xx_dev->md_pm_entity_mtx); in t7xx_pci_pm_entity_unregister()
318 return -ENXIO; in t7xx_pci_pm_entity_unregister()
323 struct device *dev = &t7xx_dev->pdev->dev; in t7xx_pci_sleep_disable_complete()
326 ret = wait_for_completion_timeout(&t7xx_dev->sleep_lock_acquire, in t7xx_pci_sleep_disable_complete()
335 * t7xx_pci_disable_sleep() - Disable deep sleep capability.
338 * Lock the deep sleep capability, note that the device can still go into deep sleep
339 * state while device is in D0 state, from the host's point-of-view.
341 * If device is in deep sleep state, wake up the device and disable deep sleep capability.
347 spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags); in t7xx_pci_disable_sleep()
348 t7xx_dev->sleep_disable_count++; in t7xx_pci_disable_sleep()
349 if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED) in t7xx_pci_disable_sleep()
352 if (t7xx_dev->sleep_disable_count == 1) { in t7xx_pci_disable_sleep()
355 reinit_completion(&t7xx_dev->sleep_lock_acquire); in t7xx_pci_disable_sleep()
364 spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags); in t7xx_pci_disable_sleep()
368 spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags); in t7xx_pci_disable_sleep()
369 complete_all(&t7xx_dev->sleep_lock_acquire); in t7xx_pci_disable_sleep()
373 * t7xx_pci_enable_sleep() - Enable deep sleep capability.
376 * After enabling deep sleep, device can enter into deep sleep state.
382 spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags); in t7xx_pci_enable_sleep()
383 t7xx_dev->sleep_disable_count--; in t7xx_pci_enable_sleep()
384 if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED) in t7xx_pci_enable_sleep()
387 if (t7xx_dev->sleep_disable_count == 0) in t7xx_pci_enable_sleep()
391 spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags); in t7xx_pci_enable_sleep()
398 reinit_completion(&t7xx_dev->pm_sr_ack); in t7xx_send_pm_request()
400 wait_ret = wait_for_completion_timeout(&t7xx_dev->pm_sr_ack, in t7xx_send_pm_request()
403 return -ETIMEDOUT; in t7xx_send_pm_request()
416 if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT || in __t7xx_pci_pm_suspend()
417 READ_ONCE(t7xx_dev->mode) != T7XX_READY) { in __t7xx_pci_pm_suspend()
418 dev_err(&pdev->dev, "[PM] Exiting suspend, modem in invalid state\n"); in __t7xx_pci_pm_suspend()
419 return -EFAULT; in __t7xx_pci_pm_suspend()
429 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED); in __t7xx_pci_pm_suspend()
431 t7xx_dev->rgu_pci_irq_en = false; in __t7xx_pci_pm_suspend()
433 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { in __t7xx_pci_pm_suspend()
434 if (!entity->suspend) in __t7xx_pci_pm_suspend()
437 ret = entity->suspend(t7xx_dev, entity->entity_param); in __t7xx_pci_pm_suspend()
439 entity_id = entity->id; in __t7xx_pci_pm_suspend()
440 dev_err(&pdev->dev, "[PM] Suspend error: %d, id: %d\n", ret, entity_id); in __t7xx_pci_pm_suspend()
447 dev_err(&pdev->dev, "[PM] MD suspend error: %d\n", ret); in __t7xx_pci_pm_suspend()
454 dev_err(&pdev->dev, "[PM] SAP suspend error: %d\n", ret); in __t7xx_pci_pm_suspend()
458 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { in __t7xx_pci_pm_suspend()
459 if (entity->suspend_late) in __t7xx_pci_pm_suspend()
460 entity->suspend_late(t7xx_dev, entity->entity_param); in __t7xx_pci_pm_suspend()
467 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { in __t7xx_pci_pm_suspend()
468 if (entity_id == entity->id) in __t7xx_pci_pm_suspend()
471 if (entity->resume) in __t7xx_pci_pm_suspend()
472 entity->resume(t7xx_dev, entity->entity_param); in __t7xx_pci_pm_suspend()
476 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED); in __t7xx_pci_pm_suspend()
489 * following function will re-enable PCIe interrupts. in t7xx_pcie_interrupt_reinit()
499 ret = pcim_enable_device(t7xx_dev->pdev); in t7xx_pcie_reinit()
516 struct t7xx_fsm_ctl *fsm_ctl = t7xx_dev->md->fsm_ctl; in t7xx_send_fsm_command()
517 struct device *dev = &t7xx_dev->pdev->dev; in t7xx_send_fsm_command()
518 int ret = -EINVAL; in t7xx_send_fsm_command()
528 t7xx_dev->rgu_pci_irq_en = true; in t7xx_send_fsm_command()
545 enum t7xx_mode mode = READ_ONCE(t7xx_dev->mode); in t7xx_pci_reprobe_early()
549 pm_runtime_put_noidle(&t7xx_dev->pdev->dev); in t7xx_pci_reprobe_early()
578 if (atomic_read(&t7xx_dev->md_pm_state) <= MTK_PM_INIT) { in __t7xx_pci_pm_resume()
611 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED); in __t7xx_pci_pm_resume()
612 t7xx_dev->rgu_pci_irq_en = true; in __t7xx_pci_pm_resume()
637 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_SUSPENDED); in __t7xx_pci_pm_resume()
645 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { in __t7xx_pci_pm_resume()
646 if (entity->resume_early) in __t7xx_pci_pm_resume()
647 entity->resume_early(t7xx_dev, entity->entity_param); in __t7xx_pci_pm_resume()
652 dev_err(&pdev->dev, "[PM] MD resume error: %d\n", ret); in __t7xx_pci_pm_resume()
656 dev_err(&pdev->dev, "[PM] SAP resume error: %d\n", ret); in __t7xx_pci_pm_resume()
658 list_for_each_entry(entity, &t7xx_dev->md_pm_entities, entity) { in __t7xx_pci_pm_resume()
659 if (entity->resume) { in __t7xx_pci_pm_resume()
660 ret = entity->resume(t7xx_dev, entity->entity_param); in __t7xx_pci_pm_resume()
662 dev_err(&pdev->dev, "[PM] Resume entry ID: %d error: %d\n", in __t7xx_pci_pm_resume()
663 entity->id, ret); in __t7xx_pci_pm_resume()
667 t7xx_dev->rgu_pci_irq_en = true; in __t7xx_pci_pm_resume()
670 pm_runtime_mark_last_busy(&pdev->dev); in __t7xx_pci_pm_resume()
671 atomic_set(&t7xx_dev->md_pm_state, MTK_PM_RESUMED); in __t7xx_pci_pm_resume()
698 if (!wait_for_completion_timeout(&t7xx_dev->init_done, T7XX_INIT_TIMEOUT * HZ)) { in t7xx_pci_pm_prepare()
699 dev_warn(dev, "Not ready for system sleep.\n"); in t7xx_pci_pm_prepare()
700 return -ETIMEDOUT; in t7xx_pci_pm_prepare()
756 if (!t7xx_dev->intr_handler[i]) in t7xx_request_irq()
759 irq_descr = devm_kasprintf(&pdev->dev, GFP_KERNEL, "%s_%d", in t7xx_request_irq()
760 dev_driver_string(&pdev->dev), i); in t7xx_request_irq()
762 ret = -ENOMEM; in t7xx_request_irq()
767 ret = request_threaded_irq(irq_vec, t7xx_dev->intr_handler[i], in t7xx_request_irq()
768 t7xx_dev->intr_thread[i], 0, irq_descr, in t7xx_request_irq()
769 t7xx_dev->callback_param[i]); in t7xx_request_irq()
771 dev_err(&pdev->dev, "Failed to request IRQ: %d\n", ret); in t7xx_request_irq()
777 while (i--) { in t7xx_request_irq()
778 if (!t7xx_dev->intr_handler[i]) in t7xx_request_irq()
781 free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]); in t7xx_request_irq()
790 struct pci_dev *pdev = t7xx_dev->pdev; in t7xx_setup_msix()
793 /* Only using 6 interrupts, but HW-design requires power-of-2 IRQs allocation */ in t7xx_setup_msix()
796 dev_err(&pdev->dev, "Failed to allocate MSI-X entry: %d\n", ret); in t7xx_setup_msix()
814 if (!t7xx_dev->pdev->msix_cap) in t7xx_interrupt_init()
815 return -EINVAL; in t7xx_interrupt_init()
830 t7xx_dev->base_addr.infracfg_ao_base = t7xx_dev->base_addr.pcie_ext_reg_base + in t7xx_pci_infracfg_ao_calc()
831 INFRACFG_AO_DEV_CHIP - in t7xx_pci_infracfg_ao_calc()
832 t7xx_dev->base_addr.pcie_dev_reg_trsl_addr; in t7xx_pci_infracfg_ao_calc()
841 t7xx_dev = devm_kzalloc(&pdev->dev, sizeof(*t7xx_dev), GFP_KERNEL); in t7xx_pci_probe()
843 return -ENOMEM; in t7xx_pci_probe()
846 t7xx_dev->pdev = pdev; in t7xx_pci_probe()
857 dev_err(&pdev->dev, "Could not request IREG BAR: %d\n", ret); in t7xx_pci_probe()
858 return -ENOMEM; in t7xx_pci_probe()
865 dev_err(&pdev->dev, "Could not request EREG BAR: %d\n", ret); in t7xx_pci_probe()
866 return -ENOMEM; in t7xx_pci_probe()
868 t7xx_dev->base_addr.pcie_ext_reg_base = iomem; in t7xx_pci_probe()
870 ret = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)); in t7xx_pci_probe()
872 dev_err(&pdev->dev, "Could not set PCI DMA mask: %d\n", ret); in t7xx_pci_probe()
876 ret = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)); in t7xx_pci_probe()
878 dev_err(&pdev->dev, "Could not set consistent PCI DMA mask: %d\n", ret); in t7xx_pci_probe()
896 ret = sysfs_create_group(&t7xx_dev->pdev->dev.kobj, in t7xx_pci_probe()
912 sysfs_remove_group(&t7xx_dev->pdev->dev.kobj, in t7xx_pci_probe()
927 sysfs_remove_group(&t7xx_dev->pdev->dev.kobj, in t7xx_pci_remove()
932 if (!t7xx_dev->intr_handler[i]) in t7xx_pci_remove()
935 free_irq(pci_irq_vector(pdev, i), t7xx_dev->callback_param[i]); in t7xx_pci_remove()
938 pci_free_irq_vectors(t7xx_dev->pdev); in t7xx_pci_remove()