xref: /linux/drivers/gpu/drm/panthor/panthor_device.c (revision 815e260a18a3af4dab59025ee99a7156c0e8b5e0)
1 // SPDX-License-Identifier: GPL-2.0 or MIT
2 /* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
3 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
4 /* Copyright 2023 Collabora ltd. */
5 
6 #include <linux/clk.h>
7 #include <linux/mm.h>
8 #include <linux/platform_device.h>
9 #include <linux/pm_domain.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/regulator/consumer.h>
12 #include <linux/reset.h>
13 
14 #include <drm/drm_drv.h>
15 #include <drm/drm_managed.h>
16 #include <drm/drm_print.h>
17 
18 #include "panthor_devfreq.h"
19 #include "panthor_device.h"
20 #include "panthor_fw.h"
21 #include "panthor_gpu.h"
22 #include "panthor_hw.h"
23 #include "panthor_mmu.h"
24 #include "panthor_regs.h"
25 #include "panthor_sched.h"
26 
27 static int panthor_gpu_coherency_init(struct panthor_device *ptdev)
28 {
29 	ptdev->coherent = device_get_dma_attr(ptdev->base.dev) == DEV_DMA_COHERENT;
30 
31 	if (!ptdev->coherent)
32 		return 0;
33 
34 	/* Check if the ACE-Lite coherency protocol is actually supported by the GPU.
35 	 * ACE protocol has never been supported for command stream frontend GPUs.
36 	 */
37 	if ((gpu_read(ptdev, GPU_COHERENCY_FEATURES) &
38 		      GPU_COHERENCY_PROT_BIT(ACE_LITE)))
39 		return 0;
40 
41 	drm_err(&ptdev->base, "Coherency not supported by the device");
42 	return -ENOTSUPP;
43 }
44 
45 static int panthor_clk_init(struct panthor_device *ptdev)
46 {
47 	ptdev->clks.core = devm_clk_get(ptdev->base.dev, NULL);
48 	if (IS_ERR(ptdev->clks.core))
49 		return dev_err_probe(ptdev->base.dev,
50 				     PTR_ERR(ptdev->clks.core),
51 				     "get 'core' clock failed");
52 
53 	ptdev->clks.stacks = devm_clk_get_optional(ptdev->base.dev, "stacks");
54 	if (IS_ERR(ptdev->clks.stacks))
55 		return dev_err_probe(ptdev->base.dev,
56 				     PTR_ERR(ptdev->clks.stacks),
57 				     "get 'stacks' clock failed");
58 
59 	ptdev->clks.coregroup = devm_clk_get_optional(ptdev->base.dev, "coregroup");
60 	if (IS_ERR(ptdev->clks.coregroup))
61 		return dev_err_probe(ptdev->base.dev,
62 				     PTR_ERR(ptdev->clks.coregroup),
63 				     "get 'coregroup' clock failed");
64 
65 	drm_info(&ptdev->base, "clock rate = %lu\n", clk_get_rate(ptdev->clks.core));
66 	return 0;
67 }
68 
69 static int panthor_init_power(struct device *dev)
70 {
71 	struct dev_pm_domain_list  *pd_list = NULL;
72 
73 	if (dev->pm_domain)
74 		return 0;
75 
76 	return devm_pm_domain_attach_list(dev, NULL, &pd_list);
77 }
78 
79 void panthor_device_unplug(struct panthor_device *ptdev)
80 {
81 	/* This function can be called from two different path: the reset work
82 	 * and the platform device remove callback. drm_dev_unplug() doesn't
83 	 * deal with concurrent callers, so we have to protect drm_dev_unplug()
84 	 * calls with our own lock, and bail out if the device is already
85 	 * unplugged.
86 	 */
87 	mutex_lock(&ptdev->unplug.lock);
88 	if (drm_dev_is_unplugged(&ptdev->base)) {
89 		/* Someone beat us, release the lock and wait for the unplug
90 		 * operation to be reported as done.
91 		 **/
92 		mutex_unlock(&ptdev->unplug.lock);
93 		wait_for_completion(&ptdev->unplug.done);
94 		return;
95 	}
96 
97 	drm_WARN_ON(&ptdev->base, pm_runtime_get_sync(ptdev->base.dev) < 0);
98 
99 	/* Call drm_dev_unplug() so any access to HW blocks happening after
100 	 * that point get rejected.
101 	 */
102 	drm_dev_unplug(&ptdev->base);
103 
104 	/* We do the rest of the unplug with the unplug lock released,
105 	 * future callers will wait on ptdev->unplug.done anyway.
106 	 */
107 	mutex_unlock(&ptdev->unplug.lock);
108 
109 	/* Now, try to cleanly shutdown the GPU before the device resources
110 	 * get reclaimed.
111 	 */
112 	panthor_sched_unplug(ptdev);
113 	panthor_fw_unplug(ptdev);
114 	panthor_mmu_unplug(ptdev);
115 	panthor_gpu_unplug(ptdev);
116 
117 	pm_runtime_dont_use_autosuspend(ptdev->base.dev);
118 	pm_runtime_put_sync_suspend(ptdev->base.dev);
119 
120 	/* If PM is disabled, we need to call the suspend handler manually. */
121 	if (!IS_ENABLED(CONFIG_PM))
122 		panthor_device_suspend(ptdev->base.dev);
123 
124 	/* Report the unplug operation as done to unblock concurrent
125 	 * panthor_device_unplug() callers.
126 	 */
127 	complete_all(&ptdev->unplug.done);
128 }
129 
130 static void panthor_device_reset_cleanup(struct drm_device *ddev, void *data)
131 {
132 	struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
133 
134 	disable_work_sync(&ptdev->reset.work);
135 	destroy_workqueue(ptdev->reset.wq);
136 }
137 
138 static void panthor_device_reset_work(struct work_struct *work)
139 {
140 	struct panthor_device *ptdev = container_of(work, struct panthor_device, reset.work);
141 	int ret = 0, cookie;
142 
143 	/* If the device is entering suspend, we don't reset. A slow reset will
144 	 * be forced at resume time instead.
145 	 */
146 	if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE)
147 		return;
148 
149 	if (!drm_dev_enter(&ptdev->base, &cookie))
150 		return;
151 
152 	panthor_sched_pre_reset(ptdev);
153 	panthor_fw_pre_reset(ptdev, true);
154 	panthor_mmu_pre_reset(ptdev);
155 	panthor_gpu_soft_reset(ptdev);
156 	panthor_gpu_l2_power_on(ptdev);
157 	panthor_mmu_post_reset(ptdev);
158 	ret = panthor_fw_post_reset(ptdev);
159 	atomic_set(&ptdev->reset.pending, 0);
160 	panthor_sched_post_reset(ptdev, ret != 0);
161 	drm_dev_exit(cookie);
162 
163 	if (ret) {
164 		panthor_device_unplug(ptdev);
165 		drm_err(&ptdev->base, "Failed to boot MCU after reset, making device unusable.");
166 	}
167 }
168 
169 static bool panthor_device_is_initialized(struct panthor_device *ptdev)
170 {
171 	return !!ptdev->scheduler;
172 }
173 
174 static void panthor_device_free_page(struct drm_device *ddev, void *data)
175 {
176 	__free_page(data);
177 }
178 
179 int panthor_device_init(struct panthor_device *ptdev)
180 {
181 	u32 *dummy_page_virt;
182 	struct resource *res;
183 	struct page *p;
184 	int ret;
185 
186 	ptdev->soc_data = of_device_get_match_data(ptdev->base.dev);
187 
188 	init_completion(&ptdev->unplug.done);
189 	ret = drmm_mutex_init(&ptdev->base, &ptdev->unplug.lock);
190 	if (ret)
191 		return ret;
192 
193 	ret = drmm_mutex_init(&ptdev->base, &ptdev->pm.mmio_lock);
194 	if (ret)
195 		return ret;
196 
197 #ifdef CONFIG_DEBUG_FS
198 	drmm_mutex_init(&ptdev->base, &ptdev->gems.lock);
199 	INIT_LIST_HEAD(&ptdev->gems.node);
200 #endif
201 
202 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
203 	p = alloc_page(GFP_KERNEL | __GFP_ZERO);
204 	if (!p)
205 		return -ENOMEM;
206 
207 	ptdev->pm.dummy_latest_flush = p;
208 	dummy_page_virt = page_address(p);
209 	ret = drmm_add_action_or_reset(&ptdev->base, panthor_device_free_page,
210 				       ptdev->pm.dummy_latest_flush);
211 	if (ret)
212 		return ret;
213 
214 	/*
215 	 * Set the dummy page holding the latest flush to 1. This will cause the
216 	 * flush to avoided as we know it isn't necessary if the submission
217 	 * happens while the dummy page is mapped. Zero cannot be used because
218 	 * that means 'always flush'.
219 	 */
220 	*dummy_page_virt = 1;
221 
222 	INIT_WORK(&ptdev->reset.work, panthor_device_reset_work);
223 	ptdev->reset.wq = alloc_ordered_workqueue("panthor-reset-wq", 0);
224 	if (!ptdev->reset.wq)
225 		return -ENOMEM;
226 
227 	ret = drmm_add_action_or_reset(&ptdev->base, panthor_device_reset_cleanup, NULL);
228 	if (ret)
229 		return ret;
230 
231 	ret = panthor_clk_init(ptdev);
232 	if (ret)
233 		return ret;
234 
235 	ret = panthor_init_power(ptdev->base.dev);
236 	if (ret < 0) {
237 		drm_err(&ptdev->base, "init power domains failed, ret=%d", ret);
238 		return ret;
239 	}
240 
241 	ret = panthor_devfreq_init(ptdev);
242 	if (ret)
243 		return ret;
244 
245 	ptdev->iomem = devm_platform_get_and_ioremap_resource(to_platform_device(ptdev->base.dev),
246 							      0, &res);
247 	if (IS_ERR(ptdev->iomem))
248 		return PTR_ERR(ptdev->iomem);
249 
250 	ptdev->phys_addr = res->start;
251 
252 	ret = devm_pm_runtime_enable(ptdev->base.dev);
253 	if (ret)
254 		return ret;
255 
256 	ret = pm_runtime_resume_and_get(ptdev->base.dev);
257 	if (ret)
258 		return ret;
259 
260 	/* If PM is disabled, we need to call panthor_device_resume() manually. */
261 	if (!IS_ENABLED(CONFIG_PM)) {
262 		ret = panthor_device_resume(ptdev->base.dev);
263 		if (ret)
264 			return ret;
265 	}
266 
267 	ret = panthor_hw_init(ptdev);
268 	if (ret)
269 		goto err_rpm_put;
270 
271 	ret = panthor_gpu_init(ptdev);
272 	if (ret)
273 		goto err_rpm_put;
274 
275 	ret = panthor_gpu_coherency_init(ptdev);
276 	if (ret)
277 		goto err_unplug_gpu;
278 
279 	ret = panthor_mmu_init(ptdev);
280 	if (ret)
281 		goto err_unplug_gpu;
282 
283 	ret = panthor_fw_init(ptdev);
284 	if (ret)
285 		goto err_unplug_mmu;
286 
287 	ret = panthor_sched_init(ptdev);
288 	if (ret)
289 		goto err_unplug_fw;
290 
291 	/* ~3 frames */
292 	pm_runtime_set_autosuspend_delay(ptdev->base.dev, 50);
293 	pm_runtime_use_autosuspend(ptdev->base.dev);
294 
295 	ret = drm_dev_register(&ptdev->base, 0);
296 	if (ret)
297 		goto err_disable_autosuspend;
298 
299 	pm_runtime_put_autosuspend(ptdev->base.dev);
300 	return 0;
301 
302 err_disable_autosuspend:
303 	pm_runtime_dont_use_autosuspend(ptdev->base.dev);
304 	panthor_sched_unplug(ptdev);
305 
306 err_unplug_fw:
307 	panthor_fw_unplug(ptdev);
308 
309 err_unplug_mmu:
310 	panthor_mmu_unplug(ptdev);
311 
312 err_unplug_gpu:
313 	panthor_gpu_unplug(ptdev);
314 
315 err_rpm_put:
316 	pm_runtime_put_sync_suspend(ptdev->base.dev);
317 	return ret;
318 }
319 
320 #define PANTHOR_EXCEPTION(id) \
321 	[DRM_PANTHOR_EXCEPTION_ ## id] = { \
322 		.name = #id, \
323 	}
324 
325 struct panthor_exception_info {
326 	const char *name;
327 };
328 
329 static const struct panthor_exception_info panthor_exception_infos[] = {
330 	PANTHOR_EXCEPTION(OK),
331 	PANTHOR_EXCEPTION(TERMINATED),
332 	PANTHOR_EXCEPTION(KABOOM),
333 	PANTHOR_EXCEPTION(EUREKA),
334 	PANTHOR_EXCEPTION(ACTIVE),
335 	PANTHOR_EXCEPTION(CS_RES_TERM),
336 	PANTHOR_EXCEPTION(CS_CONFIG_FAULT),
337 	PANTHOR_EXCEPTION(CS_UNRECOVERABLE),
338 	PANTHOR_EXCEPTION(CS_ENDPOINT_FAULT),
339 	PANTHOR_EXCEPTION(CS_BUS_FAULT),
340 	PANTHOR_EXCEPTION(CS_INSTR_INVALID),
341 	PANTHOR_EXCEPTION(CS_CALL_STACK_OVERFLOW),
342 	PANTHOR_EXCEPTION(CS_INHERIT_FAULT),
343 	PANTHOR_EXCEPTION(INSTR_INVALID_PC),
344 	PANTHOR_EXCEPTION(INSTR_INVALID_ENC),
345 	PANTHOR_EXCEPTION(INSTR_BARRIER_FAULT),
346 	PANTHOR_EXCEPTION(DATA_INVALID_FAULT),
347 	PANTHOR_EXCEPTION(TILE_RANGE_FAULT),
348 	PANTHOR_EXCEPTION(ADDR_RANGE_FAULT),
349 	PANTHOR_EXCEPTION(IMPRECISE_FAULT),
350 	PANTHOR_EXCEPTION(OOM),
351 	PANTHOR_EXCEPTION(CSF_FW_INTERNAL_ERROR),
352 	PANTHOR_EXCEPTION(CSF_RES_EVICTION_TIMEOUT),
353 	PANTHOR_EXCEPTION(GPU_BUS_FAULT),
354 	PANTHOR_EXCEPTION(GPU_SHAREABILITY_FAULT),
355 	PANTHOR_EXCEPTION(SYS_SHAREABILITY_FAULT),
356 	PANTHOR_EXCEPTION(GPU_CACHEABILITY_FAULT),
357 	PANTHOR_EXCEPTION(TRANSLATION_FAULT_0),
358 	PANTHOR_EXCEPTION(TRANSLATION_FAULT_1),
359 	PANTHOR_EXCEPTION(TRANSLATION_FAULT_2),
360 	PANTHOR_EXCEPTION(TRANSLATION_FAULT_3),
361 	PANTHOR_EXCEPTION(TRANSLATION_FAULT_4),
362 	PANTHOR_EXCEPTION(PERM_FAULT_0),
363 	PANTHOR_EXCEPTION(PERM_FAULT_1),
364 	PANTHOR_EXCEPTION(PERM_FAULT_2),
365 	PANTHOR_EXCEPTION(PERM_FAULT_3),
366 	PANTHOR_EXCEPTION(ACCESS_FLAG_1),
367 	PANTHOR_EXCEPTION(ACCESS_FLAG_2),
368 	PANTHOR_EXCEPTION(ACCESS_FLAG_3),
369 	PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_IN),
370 	PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT0),
371 	PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT1),
372 	PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT2),
373 	PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT3),
374 	PANTHOR_EXCEPTION(MEM_ATTR_FAULT_0),
375 	PANTHOR_EXCEPTION(MEM_ATTR_FAULT_1),
376 	PANTHOR_EXCEPTION(MEM_ATTR_FAULT_2),
377 	PANTHOR_EXCEPTION(MEM_ATTR_FAULT_3),
378 };
379 
380 const char *panthor_exception_name(struct panthor_device *ptdev, u32 exception_code)
381 {
382 	if (exception_code >= ARRAY_SIZE(panthor_exception_infos) ||
383 	    !panthor_exception_infos[exception_code].name)
384 		return "Unknown exception type";
385 
386 	return panthor_exception_infos[exception_code].name;
387 }
388 
389 static vm_fault_t panthor_mmio_vm_fault(struct vm_fault *vmf)
390 {
391 	struct vm_area_struct *vma = vmf->vma;
392 	struct panthor_device *ptdev = vma->vm_private_data;
393 	u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT;
394 	unsigned long pfn;
395 	pgprot_t pgprot;
396 	vm_fault_t ret;
397 	bool active;
398 	int cookie;
399 
400 	if (!drm_dev_enter(&ptdev->base, &cookie))
401 		return VM_FAULT_SIGBUS;
402 
403 	mutex_lock(&ptdev->pm.mmio_lock);
404 	active = atomic_read(&ptdev->pm.state) == PANTHOR_DEVICE_PM_STATE_ACTIVE;
405 
406 	switch (offset) {
407 	case DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET:
408 		if (active)
409 			pfn = __phys_to_pfn(ptdev->phys_addr + CSF_GPU_LATEST_FLUSH_ID);
410 		else
411 			pfn = page_to_pfn(ptdev->pm.dummy_latest_flush);
412 		break;
413 
414 	default:
415 		ret = VM_FAULT_SIGBUS;
416 		goto out_unlock;
417 	}
418 
419 	pgprot = vma->vm_page_prot;
420 	if (active)
421 		pgprot = pgprot_noncached(pgprot);
422 
423 	ret = vmf_insert_pfn_prot(vma, vmf->address, pfn, pgprot);
424 
425 out_unlock:
426 	mutex_unlock(&ptdev->pm.mmio_lock);
427 	drm_dev_exit(cookie);
428 	return ret;
429 }
430 
431 static const struct vm_operations_struct panthor_mmio_vm_ops = {
432 	.fault = panthor_mmio_vm_fault,
433 };
434 
435 int panthor_device_mmap_io(struct panthor_device *ptdev, struct vm_area_struct *vma)
436 {
437 	u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT;
438 
439 	if ((vma->vm_flags & VM_SHARED) == 0)
440 		return -EINVAL;
441 
442 	switch (offset) {
443 	case DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET:
444 		if (vma->vm_end - vma->vm_start != PAGE_SIZE ||
445 		    (vma->vm_flags & (VM_WRITE | VM_EXEC)))
446 			return -EINVAL;
447 		vm_flags_clear(vma, VM_MAYWRITE);
448 
449 		break;
450 
451 	default:
452 		return -EINVAL;
453 	}
454 
455 	/* Defer actual mapping to the fault handler. */
456 	vma->vm_private_data = ptdev;
457 	vma->vm_ops = &panthor_mmio_vm_ops;
458 	vm_flags_set(vma,
459 		     VM_IO | VM_DONTCOPY | VM_DONTEXPAND |
460 		     VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP);
461 	return 0;
462 }
463 
464 static int panthor_device_resume_hw_components(struct panthor_device *ptdev)
465 {
466 	int ret;
467 
468 	panthor_gpu_resume(ptdev);
469 	panthor_mmu_resume(ptdev);
470 
471 	ret = panthor_fw_resume(ptdev);
472 	if (!ret)
473 		return 0;
474 
475 	panthor_mmu_suspend(ptdev);
476 	panthor_gpu_suspend(ptdev);
477 	return ret;
478 }
479 
480 int panthor_device_resume(struct device *dev)
481 {
482 	struct panthor_device *ptdev = dev_get_drvdata(dev);
483 	int ret, cookie;
484 
485 	if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_SUSPENDED)
486 		return -EINVAL;
487 
488 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_RESUMING);
489 
490 	ret = clk_prepare_enable(ptdev->clks.core);
491 	if (ret)
492 		goto err_set_suspended;
493 
494 	ret = clk_prepare_enable(ptdev->clks.stacks);
495 	if (ret)
496 		goto err_disable_core_clk;
497 
498 	ret = clk_prepare_enable(ptdev->clks.coregroup);
499 	if (ret)
500 		goto err_disable_stacks_clk;
501 
502 	panthor_devfreq_resume(ptdev);
503 
504 	if (panthor_device_is_initialized(ptdev) &&
505 	    drm_dev_enter(&ptdev->base, &cookie)) {
506 		/* If there was a reset pending at the time we suspended the
507 		 * device, we force a slow reset.
508 		 */
509 		if (atomic_read(&ptdev->reset.pending)) {
510 			ptdev->reset.fast = false;
511 			atomic_set(&ptdev->reset.pending, 0);
512 		}
513 
514 		ret = panthor_device_resume_hw_components(ptdev);
515 		if (ret && ptdev->reset.fast) {
516 			drm_err(&ptdev->base, "Fast reset failed, trying a slow reset");
517 			ptdev->reset.fast = false;
518 			ret = panthor_device_resume_hw_components(ptdev);
519 		}
520 
521 		if (!ret)
522 			panthor_sched_resume(ptdev);
523 
524 		drm_dev_exit(cookie);
525 
526 		if (ret)
527 			goto err_suspend_devfreq;
528 	}
529 
530 	/* Clear all IOMEM mappings pointing to this device after we've
531 	 * resumed. This way the fake mappings pointing to the dummy pages
532 	 * are removed and the real iomem mapping will be restored on next
533 	 * access.
534 	 */
535 	mutex_lock(&ptdev->pm.mmio_lock);
536 	unmap_mapping_range(ptdev->base.anon_inode->i_mapping,
537 			    DRM_PANTHOR_USER_MMIO_OFFSET, 0, 1);
538 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_ACTIVE);
539 	mutex_unlock(&ptdev->pm.mmio_lock);
540 	return 0;
541 
542 err_suspend_devfreq:
543 	panthor_devfreq_suspend(ptdev);
544 	clk_disable_unprepare(ptdev->clks.coregroup);
545 
546 err_disable_stacks_clk:
547 	clk_disable_unprepare(ptdev->clks.stacks);
548 
549 err_disable_core_clk:
550 	clk_disable_unprepare(ptdev->clks.core);
551 
552 err_set_suspended:
553 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
554 	atomic_set(&ptdev->pm.recovery_needed, 1);
555 	return ret;
556 }
557 
558 int panthor_device_suspend(struct device *dev)
559 {
560 	struct panthor_device *ptdev = dev_get_drvdata(dev);
561 	int cookie;
562 
563 	if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE)
564 		return -EINVAL;
565 
566 	/* Clear all IOMEM mappings pointing to this device before we
567 	 * shutdown the power-domain and clocks. Failing to do that results
568 	 * in external aborts when the process accesses the iomem region.
569 	 * We change the state and call unmap_mapping_range() with the
570 	 * mmio_lock held to make sure the vm_fault handler won't set up
571 	 * invalid mappings.
572 	 */
573 	mutex_lock(&ptdev->pm.mmio_lock);
574 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDING);
575 	unmap_mapping_range(ptdev->base.anon_inode->i_mapping,
576 			    DRM_PANTHOR_USER_MMIO_OFFSET, 0, 1);
577 	mutex_unlock(&ptdev->pm.mmio_lock);
578 
579 	if (panthor_device_is_initialized(ptdev) &&
580 	    drm_dev_enter(&ptdev->base, &cookie)) {
581 		cancel_work_sync(&ptdev->reset.work);
582 
583 		/* We prepare everything as if we were resetting the GPU.
584 		 * The end of the reset will happen in the resume path though.
585 		 */
586 		panthor_sched_suspend(ptdev);
587 		panthor_fw_suspend(ptdev);
588 		panthor_mmu_suspend(ptdev);
589 		panthor_gpu_suspend(ptdev);
590 		drm_dev_exit(cookie);
591 	}
592 
593 	panthor_devfreq_suspend(ptdev);
594 
595 	clk_disable_unprepare(ptdev->clks.coregroup);
596 	clk_disable_unprepare(ptdev->clks.stacks);
597 	clk_disable_unprepare(ptdev->clks.core);
598 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
599 	return 0;
600 }
601