Lines Matching +full:pm +full:- +full:alive

1 // SPDX-License-Identifier: MIT
8 #include <linux/fault-inject.h>
31 * Xe PM implements the main routines for both system level suspend states and
34 * System Level Suspend (S-States) - In general this is OS initiated suspend
39 * PCI Device Suspend (D-States) - This is the opportunistic PCIe device low power
43 * alive and quicker low latency resume or D3Cold where Vcc power is off for
48 * the transition towards D3Cold. The lowest runtime PM possible from the PCI
56 * Runtime PM - This infrastructure provided by the Linux kernel allows the
59 * (PC-states), and/or other low level power states. Xe PM component provides
63 * Also, Xe PM provides get and put functions that Xe driver will use to
68 * sysfs, debugfs, dma-buf sharing, GPU execution.
85 * xe_rpm_reclaim_safe() - Whether runtime resume can be done from reclaim context
93 return !xe->d3cold.capable && !xe->info.has_sriov; in xe_rpm_reclaim_safe()
111 * xe_pm_suspend - Helper for System suspend, i.e. S0->S3 / S0->S2idle
122 drm_dbg(&xe->drm, "Suspending device\n"); in xe_pm_suspend()
147 drm_dbg(&xe->drm, "Device suspended\n"); in xe_pm_suspend()
150 drm_dbg(&xe->drm, "Device suspend failed %d\n", err); in xe_pm_suspend()
155 * xe_pm_resume - Helper for System resume S3->S0 / S2idle->S0
167 drm_dbg(&xe->drm, "Resuming device\n"); in xe_pm_resume()
198 drm_dbg(&xe->drm, "Device resumed\n"); in xe_pm_resume()
201 drm_dbg(&xe->drm, "Device resume failed %d\n", err); in xe_pm_resume()
207 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in xe_pm_pci_d3cold_capable()
216 drm_dbg(&xe->drm, "d3cold: PME# not supported\n"); in xe_pm_pci_d3cold_capable()
222 drm_dbg(&xe->drm, "d3cold: ACPI _PR3 not present\n"); in xe_pm_pci_d3cold_capable()
231 struct device *dev = xe->drm.dev; in xe_pm_runtime_init()
256 INIT_LIST_HEAD(&xe->mem_access.vram_userfault.list); in xe_pm_init_early()
258 err = drmm_mutex_init(&xe->drm, &xe->mem_access.vram_userfault.lock); in xe_pm_init_early()
262 err = drmm_mutex_init(&xe->drm, &xe->d3cold.lock); in xe_pm_init_early()
273 if (xe->info.platform == XE_BATTLEMAGE) in vram_threshold_value()
280 * xe_pm_init - Initialize Xe Power Management
296 xe->d3cold.capable = xe_pm_pci_d3cold_capable(xe); in xe_pm_init()
298 if (xe->d3cold.capable) { in xe_pm_init()
315 * xe_pm_runtime_fini - Finalize Runtime PM
320 struct device *dev = xe->drm.dev; in xe_pm_runtime_fini()
329 WRITE_ONCE(xe->pm_callback_task, task); in xe_pm_write_callback_task()
333 * the extent that something else re-uses the task written in in xe_pm_write_callback_task()
344 return READ_ONCE(xe->pm_callback_task); in xe_pm_read_callback_task()
348 * xe_pm_runtime_suspended - Check if runtime_pm state is suspended
353 * It can be used only as a non-reliable assertion, to ensure that we are not in
360 return pm_runtime_suspended(xe->drm.dev); in xe_pm_runtime_suspended()
364 * xe_pm_runtime_suspend - Prepare our device for D3hot/D3Cold
377 /* Disable access_ongoing asserts and prevent recursive pm calls */ in xe_pm_runtime_suspend()
407 mutex_lock(&xe->mem_access.vram_userfault.lock); in xe_pm_runtime_suspend()
409 &xe->mem_access.vram_userfault.list, vram_userfault_link) in xe_pm_runtime_suspend()
411 mutex_unlock(&xe->mem_access.vram_userfault.lock); in xe_pm_runtime_suspend()
415 if (xe->d3cold.allowed) { in xe_pm_runtime_suspend()
440 * xe_pm_runtime_resume - Waking up from D3hot/D3Cold
452 /* Disable access_ongoing asserts and prevent recursive pm calls */ in xe_pm_runtime_resume()
457 if (xe->d3cold.allowed) { in xe_pm_runtime_resume()
480 if (xe->d3cold.allowed) { in xe_pm_runtime_resume()
495 * sensitive to ever see the 0 -> 1 transition with the callers locks
500 * non-debug builds). Lockdep then only needs to see the
501 * xe_pm_runtime_xxx_map -> runtime_resume callback once, and then can
502 * hopefully validate all the (callers_locks) -> xe_pm_runtime_xxx_map.
535 * xe_pm_runtime_get - Get a runtime_pm reference and resume synchronously
541 pm_runtime_get_noresume(xe->drm.dev); in xe_pm_runtime_get()
547 pm_runtime_resume(xe->drm.dev); in xe_pm_runtime_get()
551 * xe_pm_runtime_put - Put the runtime_pm reference back and mark as idle
558 pm_runtime_put_noidle(xe->drm.dev); in xe_pm_runtime_put()
560 pm_runtime_mark_last_busy(xe->drm.dev); in xe_pm_runtime_put()
561 pm_runtime_put(xe->drm.dev); in xe_pm_runtime_put()
566 * xe_pm_runtime_get_ioctl - Get a runtime_pm reference before ioctl
576 return -ELOOP; in xe_pm_runtime_get_ioctl()
579 return pm_runtime_get_sync(xe->drm.dev); in xe_pm_runtime_get_ioctl()
583 * xe_pm_runtime_get_if_active - Get a runtime_pm reference if device active
591 return pm_runtime_get_if_active(xe->drm.dev) > 0; in xe_pm_runtime_get_if_active()
595 * xe_pm_runtime_get_if_in_use - Get a new reference if device is active with previous ref taken
605 pm_runtime_get_noresume(xe->drm.dev); in xe_pm_runtime_get_if_in_use()
609 return pm_runtime_get_if_in_use(xe->drm.dev) > 0; in xe_pm_runtime_get_if_in_use()
619 struct device *dev = xe->drm.dev; in xe_pm_suspending_or_resuming()
621 return dev->power.runtime_status == RPM_SUSPENDING || in xe_pm_suspending_or_resuming()
622 dev->power.runtime_status == RPM_RESUMING || in xe_pm_suspending_or_resuming()
630 * xe_pm_runtime_get_noresume - Bump runtime PM usage counter without resuming
634 * protected by outer-bound callers of `xe_pm_runtime_get`.
646 pm_runtime_get_noresume(xe->drm.dev); in xe_pm_runtime_get_noresume()
647 drm_WARN(&xe->drm, !xe_pm_suspending_or_resuming(xe), in xe_pm_runtime_get_noresume()
648 "Missing outer runtime PM protection\n"); in xe_pm_runtime_get_noresume()
653 * xe_pm_runtime_resume_and_get - Resume, then get a runtime_pm ref if awake.
662 pm_runtime_get_noresume(xe->drm.dev); in xe_pm_runtime_resume_and_get()
667 return pm_runtime_resume_and_get(xe->drm.dev) >= 0; in xe_pm_runtime_resume_and_get()
671 * xe_pm_assert_unbounded_bridge - Disable PM on unbounded pcie parent bridge
676 struct pci_dev *pdev = to_pci_dev(xe->drm.dev); in xe_pm_assert_unbounded_bridge()
682 if (!bridge->driver) { in xe_pm_assert_unbounded_bridge()
683 drm_warn(&xe->drm, "unbounded parent pci bridge, device won't support any PM support.\n"); in xe_pm_assert_unbounded_bridge()
684 device_set_pm_not_required(&pdev->dev); in xe_pm_assert_unbounded_bridge()
689 * xe_pm_set_vram_threshold - Set a vram threshold for allowing/blocking D3Cold
702 man = ttm_manager_type(&xe->ttm, i); in xe_pm_set_vram_threshold()
704 vram_total_mb += DIV_ROUND_UP_ULL(man->size, 1024 * 1024); in xe_pm_set_vram_threshold()
707 drm_dbg(&xe->drm, "Total vram %u mb\n", vram_total_mb); in xe_pm_set_vram_threshold()
710 return -EINVAL; in xe_pm_set_vram_threshold()
712 mutex_lock(&xe->d3cold.lock); in xe_pm_set_vram_threshold()
713 xe->d3cold.vram_threshold = threshold; in xe_pm_set_vram_threshold()
714 mutex_unlock(&xe->d3cold.lock); in xe_pm_set_vram_threshold()
720 * xe_pm_d3cold_allowed_toggle - Check conditions to toggle d3cold.allowed
733 if (!xe->d3cold.capable) { in xe_pm_d3cold_allowed_toggle()
734 xe->d3cold.allowed = false; in xe_pm_d3cold_allowed_toggle()
739 man = ttm_manager_type(&xe->ttm, i); in xe_pm_d3cold_allowed_toggle()
746 mutex_lock(&xe->d3cold.lock); in xe_pm_d3cold_allowed_toggle()
748 if (total_vram_used_mb < xe->d3cold.vram_threshold) in xe_pm_d3cold_allowed_toggle()
749 xe->d3cold.allowed = true; in xe_pm_d3cold_allowed_toggle()
751 xe->d3cold.allowed = false; in xe_pm_d3cold_allowed_toggle()
753 mutex_unlock(&xe->d3cold.lock); in xe_pm_d3cold_allowed_toggle()
757 * xe_pm_module_init() - Perform xe_pm specific module initialization.