1 // SPDX-License-Identifier: GPL-2.0 or MIT
2 /* Copyright 2018 Marty E. Plummer <[email protected]> */
3 /* Copyright 2019 Linaro, Ltd, Rob Herring <[email protected]> */
4 /* Copyright 2023 Collabora ltd. */
5 
6 #include <linux/clk.h>
7 #include <linux/mm.h>
8 #include <linux/platform_device.h>
9 #include <linux/pm_domain.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/regulator/consumer.h>
12 #include <linux/reset.h>
13 
14 #include <drm/drm_drv.h>
15 #include <drm/drm_managed.h>
16 
17 #include "panthor_devfreq.h"
18 #include "panthor_device.h"
19 #include "panthor_fw.h"
20 #include "panthor_gpu.h"
21 #include "panthor_mmu.h"
22 #include "panthor_regs.h"
23 #include "panthor_sched.h"
24 
panthor_gpu_coherency_init(struct panthor_device * ptdev)25 static int panthor_gpu_coherency_init(struct panthor_device *ptdev)
26 {
27 	ptdev->coherent = device_get_dma_attr(ptdev->base.dev) == DEV_DMA_COHERENT;
28 
29 	if (!ptdev->coherent)
30 		return 0;
31 
32 	/* Check if the ACE-Lite coherency protocol is actually supported by the GPU.
33 	 * ACE protocol has never been supported for command stream frontend GPUs.
34 	 */
35 	if ((gpu_read(ptdev, GPU_COHERENCY_FEATURES) &
36 		      GPU_COHERENCY_PROT_BIT(ACE_LITE)))
37 		return 0;
38 
39 	drm_err(&ptdev->base, "Coherency not supported by the device");
40 	return -ENOTSUPP;
41 }
42 
panthor_clk_init(struct panthor_device * ptdev)43 static int panthor_clk_init(struct panthor_device *ptdev)
44 {
45 	ptdev->clks.core = devm_clk_get(ptdev->base.dev, NULL);
46 	if (IS_ERR(ptdev->clks.core))
47 		return dev_err_probe(ptdev->base.dev,
48 				     PTR_ERR(ptdev->clks.core),
49 				     "get 'core' clock failed");
50 
51 	ptdev->clks.stacks = devm_clk_get_optional(ptdev->base.dev, "stacks");
52 	if (IS_ERR(ptdev->clks.stacks))
53 		return dev_err_probe(ptdev->base.dev,
54 				     PTR_ERR(ptdev->clks.stacks),
55 				     "get 'stacks' clock failed");
56 
57 	ptdev->clks.coregroup = devm_clk_get_optional(ptdev->base.dev, "coregroup");
58 	if (IS_ERR(ptdev->clks.coregroup))
59 		return dev_err_probe(ptdev->base.dev,
60 				     PTR_ERR(ptdev->clks.coregroup),
61 				     "get 'coregroup' clock failed");
62 
63 	drm_info(&ptdev->base, "clock rate = %lu\n", clk_get_rate(ptdev->clks.core));
64 	return 0;
65 }
66 
panthor_device_unplug(struct panthor_device * ptdev)67 void panthor_device_unplug(struct panthor_device *ptdev)
68 {
69 	/* This function can be called from two different path: the reset work
70 	 * and the platform device remove callback. drm_dev_unplug() doesn't
71 	 * deal with concurrent callers, so we have to protect drm_dev_unplug()
72 	 * calls with our own lock, and bail out if the device is already
73 	 * unplugged.
74 	 */
75 	mutex_lock(&ptdev->unplug.lock);
76 	if (drm_dev_is_unplugged(&ptdev->base)) {
77 		/* Someone beat us, release the lock and wait for the unplug
78 		 * operation to be reported as done.
79 		 **/
80 		mutex_unlock(&ptdev->unplug.lock);
81 		wait_for_completion(&ptdev->unplug.done);
82 		return;
83 	}
84 
85 	/* Call drm_dev_unplug() so any access to HW blocks happening after
86 	 * that point get rejected.
87 	 */
88 	drm_dev_unplug(&ptdev->base);
89 
90 	/* We do the rest of the unplug with the unplug lock released,
91 	 * future callers will wait on ptdev->unplug.done anyway.
92 	 */
93 	mutex_unlock(&ptdev->unplug.lock);
94 
95 	drm_WARN_ON(&ptdev->base, pm_runtime_get_sync(ptdev->base.dev) < 0);
96 
97 	/* Now, try to cleanly shutdown the GPU before the device resources
98 	 * get reclaimed.
99 	 */
100 	panthor_sched_unplug(ptdev);
101 	panthor_fw_unplug(ptdev);
102 	panthor_mmu_unplug(ptdev);
103 	panthor_gpu_unplug(ptdev);
104 
105 	pm_runtime_dont_use_autosuspend(ptdev->base.dev);
106 	pm_runtime_put_sync_suspend(ptdev->base.dev);
107 
108 	/* If PM is disabled, we need to call the suspend handler manually. */
109 	if (!IS_ENABLED(CONFIG_PM))
110 		panthor_device_suspend(ptdev->base.dev);
111 
112 	/* Report the unplug operation as done to unblock concurrent
113 	 * panthor_device_unplug() callers.
114 	 */
115 	complete_all(&ptdev->unplug.done);
116 }
117 
panthor_device_reset_cleanup(struct drm_device * ddev,void * data)118 static void panthor_device_reset_cleanup(struct drm_device *ddev, void *data)
119 {
120 	struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
121 
122 	cancel_work_sync(&ptdev->reset.work);
123 	destroy_workqueue(ptdev->reset.wq);
124 }
125 
panthor_device_reset_work(struct work_struct * work)126 static void panthor_device_reset_work(struct work_struct *work)
127 {
128 	struct panthor_device *ptdev = container_of(work, struct panthor_device, reset.work);
129 	int ret = 0, cookie;
130 
131 	/* If the device is entering suspend, we don't reset. A slow reset will
132 	 * be forced at resume time instead.
133 	 */
134 	if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE)
135 		return;
136 
137 	if (!drm_dev_enter(&ptdev->base, &cookie))
138 		return;
139 
140 	panthor_sched_pre_reset(ptdev);
141 	panthor_fw_pre_reset(ptdev, true);
142 	panthor_mmu_pre_reset(ptdev);
143 	panthor_gpu_soft_reset(ptdev);
144 	panthor_gpu_l2_power_on(ptdev);
145 	panthor_mmu_post_reset(ptdev);
146 	ret = panthor_fw_post_reset(ptdev);
147 	atomic_set(&ptdev->reset.pending, 0);
148 	panthor_sched_post_reset(ptdev, ret != 0);
149 	drm_dev_exit(cookie);
150 
151 	if (ret) {
152 		panthor_device_unplug(ptdev);
153 		drm_err(&ptdev->base, "Failed to boot MCU after reset, making device unusable.");
154 	}
155 }
156 
panthor_device_is_initialized(struct panthor_device * ptdev)157 static bool panthor_device_is_initialized(struct panthor_device *ptdev)
158 {
159 	return !!ptdev->scheduler;
160 }
161 
panthor_device_free_page(struct drm_device * ddev,void * data)162 static void panthor_device_free_page(struct drm_device *ddev, void *data)
163 {
164 	__free_page(data);
165 }
166 
panthor_device_init(struct panthor_device * ptdev)167 int panthor_device_init(struct panthor_device *ptdev)
168 {
169 	u32 *dummy_page_virt;
170 	struct resource *res;
171 	struct page *p;
172 	int ret;
173 
174 	ret = panthor_gpu_coherency_init(ptdev);
175 	if (ret)
176 		return ret;
177 
178 	init_completion(&ptdev->unplug.done);
179 	ret = drmm_mutex_init(&ptdev->base, &ptdev->unplug.lock);
180 	if (ret)
181 		return ret;
182 
183 	ret = drmm_mutex_init(&ptdev->base, &ptdev->pm.mmio_lock);
184 	if (ret)
185 		return ret;
186 
187 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
188 	p = alloc_page(GFP_KERNEL | __GFP_ZERO);
189 	if (!p)
190 		return -ENOMEM;
191 
192 	ptdev->pm.dummy_latest_flush = p;
193 	dummy_page_virt = page_address(p);
194 	ret = drmm_add_action_or_reset(&ptdev->base, panthor_device_free_page,
195 				       ptdev->pm.dummy_latest_flush);
196 	if (ret)
197 		return ret;
198 
199 	/*
200 	 * Set the dummy page holding the latest flush to 1. This will cause the
201 	 * flush to avoided as we know it isn't necessary if the submission
202 	 * happens while the dummy page is mapped. Zero cannot be used because
203 	 * that means 'always flush'.
204 	 */
205 	*dummy_page_virt = 1;
206 
207 	INIT_WORK(&ptdev->reset.work, panthor_device_reset_work);
208 	ptdev->reset.wq = alloc_ordered_workqueue("panthor-reset-wq", 0);
209 	if (!ptdev->reset.wq)
210 		return -ENOMEM;
211 
212 	ret = drmm_add_action_or_reset(&ptdev->base, panthor_device_reset_cleanup, NULL);
213 	if (ret)
214 		return ret;
215 
216 	ret = panthor_clk_init(ptdev);
217 	if (ret)
218 		return ret;
219 
220 	ret = panthor_devfreq_init(ptdev);
221 	if (ret)
222 		return ret;
223 
224 	ptdev->iomem = devm_platform_get_and_ioremap_resource(to_platform_device(ptdev->base.dev),
225 							      0, &res);
226 	if (IS_ERR(ptdev->iomem))
227 		return PTR_ERR(ptdev->iomem);
228 
229 	ptdev->phys_addr = res->start;
230 
231 	ret = devm_pm_runtime_enable(ptdev->base.dev);
232 	if (ret)
233 		return ret;
234 
235 	ret = pm_runtime_resume_and_get(ptdev->base.dev);
236 	if (ret)
237 		return ret;
238 
239 	/* If PM is disabled, we need to call panthor_device_resume() manually. */
240 	if (!IS_ENABLED(CONFIG_PM)) {
241 		ret = panthor_device_resume(ptdev->base.dev);
242 		if (ret)
243 			return ret;
244 	}
245 
246 	ret = panthor_gpu_init(ptdev);
247 	if (ret)
248 		goto err_rpm_put;
249 
250 	ret = panthor_mmu_init(ptdev);
251 	if (ret)
252 		goto err_unplug_gpu;
253 
254 	ret = panthor_fw_init(ptdev);
255 	if (ret)
256 		goto err_unplug_mmu;
257 
258 	ret = panthor_sched_init(ptdev);
259 	if (ret)
260 		goto err_unplug_fw;
261 
262 	/* ~3 frames */
263 	pm_runtime_set_autosuspend_delay(ptdev->base.dev, 50);
264 	pm_runtime_use_autosuspend(ptdev->base.dev);
265 
266 	ret = drm_dev_register(&ptdev->base, 0);
267 	if (ret)
268 		goto err_disable_autosuspend;
269 
270 	pm_runtime_put_autosuspend(ptdev->base.dev);
271 	return 0;
272 
273 err_disable_autosuspend:
274 	pm_runtime_dont_use_autosuspend(ptdev->base.dev);
275 	panthor_sched_unplug(ptdev);
276 
277 err_unplug_fw:
278 	panthor_fw_unplug(ptdev);
279 
280 err_unplug_mmu:
281 	panthor_mmu_unplug(ptdev);
282 
283 err_unplug_gpu:
284 	panthor_gpu_unplug(ptdev);
285 
286 err_rpm_put:
287 	pm_runtime_put_sync_suspend(ptdev->base.dev);
288 	return ret;
289 }
290 
291 #define PANTHOR_EXCEPTION(id) \
292 	[DRM_PANTHOR_EXCEPTION_ ## id] = { \
293 		.name = #id, \
294 	}
295 
296 struct panthor_exception_info {
297 	const char *name;
298 };
299 
300 static const struct panthor_exception_info panthor_exception_infos[] = {
301 	PANTHOR_EXCEPTION(OK),
302 	PANTHOR_EXCEPTION(TERMINATED),
303 	PANTHOR_EXCEPTION(KABOOM),
304 	PANTHOR_EXCEPTION(EUREKA),
305 	PANTHOR_EXCEPTION(ACTIVE),
306 	PANTHOR_EXCEPTION(CS_RES_TERM),
307 	PANTHOR_EXCEPTION(CS_CONFIG_FAULT),
308 	PANTHOR_EXCEPTION(CS_UNRECOVERABLE),
309 	PANTHOR_EXCEPTION(CS_ENDPOINT_FAULT),
310 	PANTHOR_EXCEPTION(CS_BUS_FAULT),
311 	PANTHOR_EXCEPTION(CS_INSTR_INVALID),
312 	PANTHOR_EXCEPTION(CS_CALL_STACK_OVERFLOW),
313 	PANTHOR_EXCEPTION(CS_INHERIT_FAULT),
314 	PANTHOR_EXCEPTION(INSTR_INVALID_PC),
315 	PANTHOR_EXCEPTION(INSTR_INVALID_ENC),
316 	PANTHOR_EXCEPTION(INSTR_BARRIER_FAULT),
317 	PANTHOR_EXCEPTION(DATA_INVALID_FAULT),
318 	PANTHOR_EXCEPTION(TILE_RANGE_FAULT),
319 	PANTHOR_EXCEPTION(ADDR_RANGE_FAULT),
320 	PANTHOR_EXCEPTION(IMPRECISE_FAULT),
321 	PANTHOR_EXCEPTION(OOM),
322 	PANTHOR_EXCEPTION(CSF_FW_INTERNAL_ERROR),
323 	PANTHOR_EXCEPTION(CSF_RES_EVICTION_TIMEOUT),
324 	PANTHOR_EXCEPTION(GPU_BUS_FAULT),
325 	PANTHOR_EXCEPTION(GPU_SHAREABILITY_FAULT),
326 	PANTHOR_EXCEPTION(SYS_SHAREABILITY_FAULT),
327 	PANTHOR_EXCEPTION(GPU_CACHEABILITY_FAULT),
328 	PANTHOR_EXCEPTION(TRANSLATION_FAULT_0),
329 	PANTHOR_EXCEPTION(TRANSLATION_FAULT_1),
330 	PANTHOR_EXCEPTION(TRANSLATION_FAULT_2),
331 	PANTHOR_EXCEPTION(TRANSLATION_FAULT_3),
332 	PANTHOR_EXCEPTION(TRANSLATION_FAULT_4),
333 	PANTHOR_EXCEPTION(PERM_FAULT_0),
334 	PANTHOR_EXCEPTION(PERM_FAULT_1),
335 	PANTHOR_EXCEPTION(PERM_FAULT_2),
336 	PANTHOR_EXCEPTION(PERM_FAULT_3),
337 	PANTHOR_EXCEPTION(ACCESS_FLAG_1),
338 	PANTHOR_EXCEPTION(ACCESS_FLAG_2),
339 	PANTHOR_EXCEPTION(ACCESS_FLAG_3),
340 	PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_IN),
341 	PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT0),
342 	PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT1),
343 	PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT2),
344 	PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT3),
345 	PANTHOR_EXCEPTION(MEM_ATTR_FAULT_0),
346 	PANTHOR_EXCEPTION(MEM_ATTR_FAULT_1),
347 	PANTHOR_EXCEPTION(MEM_ATTR_FAULT_2),
348 	PANTHOR_EXCEPTION(MEM_ATTR_FAULT_3),
349 };
350 
panthor_exception_name(struct panthor_device * ptdev,u32 exception_code)351 const char *panthor_exception_name(struct panthor_device *ptdev, u32 exception_code)
352 {
353 	if (exception_code >= ARRAY_SIZE(panthor_exception_infos) ||
354 	    !panthor_exception_infos[exception_code].name)
355 		return "Unknown exception type";
356 
357 	return panthor_exception_infos[exception_code].name;
358 }
359 
panthor_mmio_vm_fault(struct vm_fault * vmf)360 static vm_fault_t panthor_mmio_vm_fault(struct vm_fault *vmf)
361 {
362 	struct vm_area_struct *vma = vmf->vma;
363 	struct panthor_device *ptdev = vma->vm_private_data;
364 	u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT;
365 	unsigned long pfn;
366 	pgprot_t pgprot;
367 	vm_fault_t ret;
368 	bool active;
369 	int cookie;
370 
371 	if (!drm_dev_enter(&ptdev->base, &cookie))
372 		return VM_FAULT_SIGBUS;
373 
374 	mutex_lock(&ptdev->pm.mmio_lock);
375 	active = atomic_read(&ptdev->pm.state) == PANTHOR_DEVICE_PM_STATE_ACTIVE;
376 
377 	switch (offset) {
378 	case DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET:
379 		if (active)
380 			pfn = __phys_to_pfn(ptdev->phys_addr + CSF_GPU_LATEST_FLUSH_ID);
381 		else
382 			pfn = page_to_pfn(ptdev->pm.dummy_latest_flush);
383 		break;
384 
385 	default:
386 		ret = VM_FAULT_SIGBUS;
387 		goto out_unlock;
388 	}
389 
390 	pgprot = vma->vm_page_prot;
391 	if (active)
392 		pgprot = pgprot_noncached(pgprot);
393 
394 	ret = vmf_insert_pfn_prot(vma, vmf->address, pfn, pgprot);
395 
396 out_unlock:
397 	mutex_unlock(&ptdev->pm.mmio_lock);
398 	drm_dev_exit(cookie);
399 	return ret;
400 }
401 
402 static const struct vm_operations_struct panthor_mmio_vm_ops = {
403 	.fault = panthor_mmio_vm_fault,
404 };
405 
panthor_device_mmap_io(struct panthor_device * ptdev,struct vm_area_struct * vma)406 int panthor_device_mmap_io(struct panthor_device *ptdev, struct vm_area_struct *vma)
407 {
408 	u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT;
409 
410 	if ((vma->vm_flags & VM_SHARED) == 0)
411 		return -EINVAL;
412 
413 	switch (offset) {
414 	case DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET:
415 		if (vma->vm_end - vma->vm_start != PAGE_SIZE ||
416 		    (vma->vm_flags & (VM_WRITE | VM_EXEC)))
417 			return -EINVAL;
418 		vm_flags_clear(vma, VM_MAYWRITE);
419 
420 		break;
421 
422 	default:
423 		return -EINVAL;
424 	}
425 
426 	/* Defer actual mapping to the fault handler. */
427 	vma->vm_private_data = ptdev;
428 	vma->vm_ops = &panthor_mmio_vm_ops;
429 	vm_flags_set(vma,
430 		     VM_IO | VM_DONTCOPY | VM_DONTEXPAND |
431 		     VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP);
432 	return 0;
433 }
434 
panthor_device_resume_hw_components(struct panthor_device * ptdev)435 static int panthor_device_resume_hw_components(struct panthor_device *ptdev)
436 {
437 	int ret;
438 
439 	panthor_gpu_resume(ptdev);
440 	panthor_mmu_resume(ptdev);
441 
442 	ret = panthor_fw_resume(ptdev);
443 	if (!ret)
444 		return 0;
445 
446 	panthor_mmu_suspend(ptdev);
447 	panthor_gpu_suspend(ptdev);
448 	return ret;
449 }
450 
panthor_device_resume(struct device * dev)451 int panthor_device_resume(struct device *dev)
452 {
453 	struct panthor_device *ptdev = dev_get_drvdata(dev);
454 	int ret, cookie;
455 
456 	if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_SUSPENDED)
457 		return -EINVAL;
458 
459 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_RESUMING);
460 
461 	ret = clk_prepare_enable(ptdev->clks.core);
462 	if (ret)
463 		goto err_set_suspended;
464 
465 	ret = clk_prepare_enable(ptdev->clks.stacks);
466 	if (ret)
467 		goto err_disable_core_clk;
468 
469 	ret = clk_prepare_enable(ptdev->clks.coregroup);
470 	if (ret)
471 		goto err_disable_stacks_clk;
472 
473 	panthor_devfreq_resume(ptdev);
474 
475 	if (panthor_device_is_initialized(ptdev) &&
476 	    drm_dev_enter(&ptdev->base, &cookie)) {
477 		/* If there was a reset pending at the time we suspended the
478 		 * device, we force a slow reset.
479 		 */
480 		if (atomic_read(&ptdev->reset.pending)) {
481 			ptdev->reset.fast = false;
482 			atomic_set(&ptdev->reset.pending, 0);
483 		}
484 
485 		ret = panthor_device_resume_hw_components(ptdev);
486 		if (ret && ptdev->reset.fast) {
487 			drm_err(&ptdev->base, "Fast reset failed, trying a slow reset");
488 			ptdev->reset.fast = false;
489 			ret = panthor_device_resume_hw_components(ptdev);
490 		}
491 
492 		if (!ret)
493 			panthor_sched_resume(ptdev);
494 
495 		drm_dev_exit(cookie);
496 
497 		if (ret)
498 			goto err_suspend_devfreq;
499 	}
500 
501 	/* Clear all IOMEM mappings pointing to this device after we've
502 	 * resumed. This way the fake mappings pointing to the dummy pages
503 	 * are removed and the real iomem mapping will be restored on next
504 	 * access.
505 	 */
506 	mutex_lock(&ptdev->pm.mmio_lock);
507 	unmap_mapping_range(ptdev->base.anon_inode->i_mapping,
508 			    DRM_PANTHOR_USER_MMIO_OFFSET, 0, 1);
509 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_ACTIVE);
510 	mutex_unlock(&ptdev->pm.mmio_lock);
511 	return 0;
512 
513 err_suspend_devfreq:
514 	panthor_devfreq_suspend(ptdev);
515 	clk_disable_unprepare(ptdev->clks.coregroup);
516 
517 err_disable_stacks_clk:
518 	clk_disable_unprepare(ptdev->clks.stacks);
519 
520 err_disable_core_clk:
521 	clk_disable_unprepare(ptdev->clks.core);
522 
523 err_set_suspended:
524 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
525 	atomic_set(&ptdev->pm.recovery_needed, 1);
526 	return ret;
527 }
528 
panthor_device_suspend(struct device * dev)529 int panthor_device_suspend(struct device *dev)
530 {
531 	struct panthor_device *ptdev = dev_get_drvdata(dev);
532 	int cookie;
533 
534 	if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE)
535 		return -EINVAL;
536 
537 	/* Clear all IOMEM mappings pointing to this device before we
538 	 * shutdown the power-domain and clocks. Failing to do that results
539 	 * in external aborts when the process accesses the iomem region.
540 	 * We change the state and call unmap_mapping_range() with the
541 	 * mmio_lock held to make sure the vm_fault handler won't set up
542 	 * invalid mappings.
543 	 */
544 	mutex_lock(&ptdev->pm.mmio_lock);
545 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDING);
546 	unmap_mapping_range(ptdev->base.anon_inode->i_mapping,
547 			    DRM_PANTHOR_USER_MMIO_OFFSET, 0, 1);
548 	mutex_unlock(&ptdev->pm.mmio_lock);
549 
550 	if (panthor_device_is_initialized(ptdev) &&
551 	    drm_dev_enter(&ptdev->base, &cookie)) {
552 		cancel_work_sync(&ptdev->reset.work);
553 
554 		/* We prepare everything as if we were resetting the GPU.
555 		 * The end of the reset will happen in the resume path though.
556 		 */
557 		panthor_sched_suspend(ptdev);
558 		panthor_fw_suspend(ptdev);
559 		panthor_mmu_suspend(ptdev);
560 		panthor_gpu_suspend(ptdev);
561 		drm_dev_exit(cookie);
562 	}
563 
564 	panthor_devfreq_suspend(ptdev);
565 
566 	clk_disable_unprepare(ptdev->clks.coregroup);
567 	clk_disable_unprepare(ptdev->clks.stacks);
568 	clk_disable_unprepare(ptdev->clks.core);
569 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
570 	return 0;
571 }
572