xref: /linux/drivers/gpu/drm/panthor/panthor_device.c (revision f6e8dc9edf963dbc99085e54f6ced6da9daa6100)
1 // SPDX-License-Identifier: GPL-2.0 or MIT
2 /* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
3 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
4 /* Copyright 2023 Collabora ltd. */
5 
6 #include <linux/clk.h>
7 #include <linux/mm.h>
8 #include <linux/platform_device.h>
9 #include <linux/pm_domain.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/regulator/consumer.h>
12 #include <linux/reset.h>
13 
14 #include <drm/drm_drv.h>
15 #include <drm/drm_managed.h>
16 #include <drm/drm_print.h>
17 
18 #include "panthor_devfreq.h"
19 #include "panthor_device.h"
20 #include "panthor_fw.h"
21 #include "panthor_gpu.h"
22 #include "panthor_hw.h"
23 #include "panthor_mmu.h"
24 #include "panthor_regs.h"
25 #include "panthor_sched.h"
26 
27 static int panthor_gpu_coherency_init(struct panthor_device *ptdev)
28 {
29 	ptdev->coherent = device_get_dma_attr(ptdev->base.dev) == DEV_DMA_COHERENT;
30 
31 	if (!ptdev->coherent)
32 		return 0;
33 
34 	/* Check if the ACE-Lite coherency protocol is actually supported by the GPU.
35 	 * ACE protocol has never been supported for command stream frontend GPUs.
36 	 */
37 	if ((gpu_read(ptdev, GPU_COHERENCY_FEATURES) &
38 		      GPU_COHERENCY_PROT_BIT(ACE_LITE)))
39 		return 0;
40 
41 	drm_err(&ptdev->base, "Coherency not supported by the device");
42 	return -ENOTSUPP;
43 }
44 
45 static int panthor_clk_init(struct panthor_device *ptdev)
46 {
47 	ptdev->clks.core = devm_clk_get(ptdev->base.dev, NULL);
48 	if (IS_ERR(ptdev->clks.core))
49 		return dev_err_probe(ptdev->base.dev,
50 				     PTR_ERR(ptdev->clks.core),
51 				     "get 'core' clock failed");
52 
53 	ptdev->clks.stacks = devm_clk_get_optional(ptdev->base.dev, "stacks");
54 	if (IS_ERR(ptdev->clks.stacks))
55 		return dev_err_probe(ptdev->base.dev,
56 				     PTR_ERR(ptdev->clks.stacks),
57 				     "get 'stacks' clock failed");
58 
59 	ptdev->clks.coregroup = devm_clk_get_optional(ptdev->base.dev, "coregroup");
60 	if (IS_ERR(ptdev->clks.coregroup))
61 		return dev_err_probe(ptdev->base.dev,
62 				     PTR_ERR(ptdev->clks.coregroup),
63 				     "get 'coregroup' clock failed");
64 
65 	drm_info(&ptdev->base, "clock rate = %lu\n", clk_get_rate(ptdev->clks.core));
66 	return 0;
67 }
68 
69 void panthor_device_unplug(struct panthor_device *ptdev)
70 {
71 	/* This function can be called from two different path: the reset work
72 	 * and the platform device remove callback. drm_dev_unplug() doesn't
73 	 * deal with concurrent callers, so we have to protect drm_dev_unplug()
74 	 * calls with our own lock, and bail out if the device is already
75 	 * unplugged.
76 	 */
77 	mutex_lock(&ptdev->unplug.lock);
78 	if (drm_dev_is_unplugged(&ptdev->base)) {
79 		/* Someone beat us, release the lock and wait for the unplug
80 		 * operation to be reported as done.
81 		 **/
82 		mutex_unlock(&ptdev->unplug.lock);
83 		wait_for_completion(&ptdev->unplug.done);
84 		return;
85 	}
86 
87 	/* Call drm_dev_unplug() so any access to HW blocks happening after
88 	 * that point get rejected.
89 	 */
90 	drm_dev_unplug(&ptdev->base);
91 
92 	/* We do the rest of the unplug with the unplug lock released,
93 	 * future callers will wait on ptdev->unplug.done anyway.
94 	 */
95 	mutex_unlock(&ptdev->unplug.lock);
96 
97 	drm_WARN_ON(&ptdev->base, pm_runtime_get_sync(ptdev->base.dev) < 0);
98 
99 	/* Now, try to cleanly shutdown the GPU before the device resources
100 	 * get reclaimed.
101 	 */
102 	panthor_sched_unplug(ptdev);
103 	panthor_fw_unplug(ptdev);
104 	panthor_mmu_unplug(ptdev);
105 	panthor_gpu_unplug(ptdev);
106 
107 	pm_runtime_dont_use_autosuspend(ptdev->base.dev);
108 	pm_runtime_put_sync_suspend(ptdev->base.dev);
109 
110 	/* If PM is disabled, we need to call the suspend handler manually. */
111 	if (!IS_ENABLED(CONFIG_PM))
112 		panthor_device_suspend(ptdev->base.dev);
113 
114 	/* Report the unplug operation as done to unblock concurrent
115 	 * panthor_device_unplug() callers.
116 	 */
117 	complete_all(&ptdev->unplug.done);
118 }
119 
120 static void panthor_device_reset_cleanup(struct drm_device *ddev, void *data)
121 {
122 	struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
123 
124 	cancel_work_sync(&ptdev->reset.work);
125 	destroy_workqueue(ptdev->reset.wq);
126 }
127 
128 static void panthor_device_reset_work(struct work_struct *work)
129 {
130 	struct panthor_device *ptdev = container_of(work, struct panthor_device, reset.work);
131 	int ret = 0, cookie;
132 
133 	/* If the device is entering suspend, we don't reset. A slow reset will
134 	 * be forced at resume time instead.
135 	 */
136 	if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE)
137 		return;
138 
139 	if (!drm_dev_enter(&ptdev->base, &cookie))
140 		return;
141 
142 	panthor_sched_pre_reset(ptdev);
143 	panthor_fw_pre_reset(ptdev, true);
144 	panthor_mmu_pre_reset(ptdev);
145 	panthor_gpu_soft_reset(ptdev);
146 	panthor_gpu_l2_power_on(ptdev);
147 	panthor_mmu_post_reset(ptdev);
148 	ret = panthor_fw_post_reset(ptdev);
149 	atomic_set(&ptdev->reset.pending, 0);
150 	panthor_sched_post_reset(ptdev, ret != 0);
151 	drm_dev_exit(cookie);
152 
153 	if (ret) {
154 		panthor_device_unplug(ptdev);
155 		drm_err(&ptdev->base, "Failed to boot MCU after reset, making device unusable.");
156 	}
157 }
158 
159 static bool panthor_device_is_initialized(struct panthor_device *ptdev)
160 {
161 	return !!ptdev->scheduler;
162 }
163 
164 static void panthor_device_free_page(struct drm_device *ddev, void *data)
165 {
166 	__free_page(data);
167 }
168 
169 int panthor_device_init(struct panthor_device *ptdev)
170 {
171 	u32 *dummy_page_virt;
172 	struct resource *res;
173 	struct page *p;
174 	int ret;
175 
176 	ptdev->soc_data = of_device_get_match_data(ptdev->base.dev);
177 
178 	init_completion(&ptdev->unplug.done);
179 	ret = drmm_mutex_init(&ptdev->base, &ptdev->unplug.lock);
180 	if (ret)
181 		return ret;
182 
183 	ret = drmm_mutex_init(&ptdev->base, &ptdev->pm.mmio_lock);
184 	if (ret)
185 		return ret;
186 
187 #ifdef CONFIG_DEBUG_FS
188 	drmm_mutex_init(&ptdev->base, &ptdev->gems.lock);
189 	INIT_LIST_HEAD(&ptdev->gems.node);
190 #endif
191 
192 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
193 	p = alloc_page(GFP_KERNEL | __GFP_ZERO);
194 	if (!p)
195 		return -ENOMEM;
196 
197 	ptdev->pm.dummy_latest_flush = p;
198 	dummy_page_virt = page_address(p);
199 	ret = drmm_add_action_or_reset(&ptdev->base, panthor_device_free_page,
200 				       ptdev->pm.dummy_latest_flush);
201 	if (ret)
202 		return ret;
203 
204 	/*
205 	 * Set the dummy page holding the latest flush to 1. This will cause the
206 	 * flush to avoided as we know it isn't necessary if the submission
207 	 * happens while the dummy page is mapped. Zero cannot be used because
208 	 * that means 'always flush'.
209 	 */
210 	*dummy_page_virt = 1;
211 
212 	INIT_WORK(&ptdev->reset.work, panthor_device_reset_work);
213 	ptdev->reset.wq = alloc_ordered_workqueue("panthor-reset-wq", 0);
214 	if (!ptdev->reset.wq)
215 		return -ENOMEM;
216 
217 	ret = drmm_add_action_or_reset(&ptdev->base, panthor_device_reset_cleanup, NULL);
218 	if (ret)
219 		return ret;
220 
221 	ret = panthor_clk_init(ptdev);
222 	if (ret)
223 		return ret;
224 
225 	ret = panthor_devfreq_init(ptdev);
226 	if (ret)
227 		return ret;
228 
229 	ptdev->iomem = devm_platform_get_and_ioremap_resource(to_platform_device(ptdev->base.dev),
230 							      0, &res);
231 	if (IS_ERR(ptdev->iomem))
232 		return PTR_ERR(ptdev->iomem);
233 
234 	ptdev->phys_addr = res->start;
235 
236 	ret = devm_pm_runtime_enable(ptdev->base.dev);
237 	if (ret)
238 		return ret;
239 
240 	ret = pm_runtime_resume_and_get(ptdev->base.dev);
241 	if (ret)
242 		return ret;
243 
244 	/* If PM is disabled, we need to call panthor_device_resume() manually. */
245 	if (!IS_ENABLED(CONFIG_PM)) {
246 		ret = panthor_device_resume(ptdev->base.dev);
247 		if (ret)
248 			return ret;
249 	}
250 
251 	ret = panthor_hw_init(ptdev);
252 	if (ret)
253 		goto err_rpm_put;
254 
255 	ret = panthor_gpu_init(ptdev);
256 	if (ret)
257 		goto err_rpm_put;
258 
259 	ret = panthor_gpu_coherency_init(ptdev);
260 	if (ret)
261 		goto err_unplug_gpu;
262 
263 	ret = panthor_mmu_init(ptdev);
264 	if (ret)
265 		goto err_unplug_gpu;
266 
267 	ret = panthor_fw_init(ptdev);
268 	if (ret)
269 		goto err_unplug_mmu;
270 
271 	ret = panthor_sched_init(ptdev);
272 	if (ret)
273 		goto err_unplug_fw;
274 
275 	/* ~3 frames */
276 	pm_runtime_set_autosuspend_delay(ptdev->base.dev, 50);
277 	pm_runtime_use_autosuspend(ptdev->base.dev);
278 
279 	ret = drm_dev_register(&ptdev->base, 0);
280 	if (ret)
281 		goto err_disable_autosuspend;
282 
283 	pm_runtime_put_autosuspend(ptdev->base.dev);
284 	return 0;
285 
286 err_disable_autosuspend:
287 	pm_runtime_dont_use_autosuspend(ptdev->base.dev);
288 	panthor_sched_unplug(ptdev);
289 
290 err_unplug_fw:
291 	panthor_fw_unplug(ptdev);
292 
293 err_unplug_mmu:
294 	panthor_mmu_unplug(ptdev);
295 
296 err_unplug_gpu:
297 	panthor_gpu_unplug(ptdev);
298 
299 err_rpm_put:
300 	pm_runtime_put_sync_suspend(ptdev->base.dev);
301 	return ret;
302 }
303 
304 #define PANTHOR_EXCEPTION(id) \
305 	[DRM_PANTHOR_EXCEPTION_ ## id] = { \
306 		.name = #id, \
307 	}
308 
309 struct panthor_exception_info {
310 	const char *name;
311 };
312 
313 static const struct panthor_exception_info panthor_exception_infos[] = {
314 	PANTHOR_EXCEPTION(OK),
315 	PANTHOR_EXCEPTION(TERMINATED),
316 	PANTHOR_EXCEPTION(KABOOM),
317 	PANTHOR_EXCEPTION(EUREKA),
318 	PANTHOR_EXCEPTION(ACTIVE),
319 	PANTHOR_EXCEPTION(CS_RES_TERM),
320 	PANTHOR_EXCEPTION(CS_CONFIG_FAULT),
321 	PANTHOR_EXCEPTION(CS_UNRECOVERABLE),
322 	PANTHOR_EXCEPTION(CS_ENDPOINT_FAULT),
323 	PANTHOR_EXCEPTION(CS_BUS_FAULT),
324 	PANTHOR_EXCEPTION(CS_INSTR_INVALID),
325 	PANTHOR_EXCEPTION(CS_CALL_STACK_OVERFLOW),
326 	PANTHOR_EXCEPTION(CS_INHERIT_FAULT),
327 	PANTHOR_EXCEPTION(INSTR_INVALID_PC),
328 	PANTHOR_EXCEPTION(INSTR_INVALID_ENC),
329 	PANTHOR_EXCEPTION(INSTR_BARRIER_FAULT),
330 	PANTHOR_EXCEPTION(DATA_INVALID_FAULT),
331 	PANTHOR_EXCEPTION(TILE_RANGE_FAULT),
332 	PANTHOR_EXCEPTION(ADDR_RANGE_FAULT),
333 	PANTHOR_EXCEPTION(IMPRECISE_FAULT),
334 	PANTHOR_EXCEPTION(OOM),
335 	PANTHOR_EXCEPTION(CSF_FW_INTERNAL_ERROR),
336 	PANTHOR_EXCEPTION(CSF_RES_EVICTION_TIMEOUT),
337 	PANTHOR_EXCEPTION(GPU_BUS_FAULT),
338 	PANTHOR_EXCEPTION(GPU_SHAREABILITY_FAULT),
339 	PANTHOR_EXCEPTION(SYS_SHAREABILITY_FAULT),
340 	PANTHOR_EXCEPTION(GPU_CACHEABILITY_FAULT),
341 	PANTHOR_EXCEPTION(TRANSLATION_FAULT_0),
342 	PANTHOR_EXCEPTION(TRANSLATION_FAULT_1),
343 	PANTHOR_EXCEPTION(TRANSLATION_FAULT_2),
344 	PANTHOR_EXCEPTION(TRANSLATION_FAULT_3),
345 	PANTHOR_EXCEPTION(TRANSLATION_FAULT_4),
346 	PANTHOR_EXCEPTION(PERM_FAULT_0),
347 	PANTHOR_EXCEPTION(PERM_FAULT_1),
348 	PANTHOR_EXCEPTION(PERM_FAULT_2),
349 	PANTHOR_EXCEPTION(PERM_FAULT_3),
350 	PANTHOR_EXCEPTION(ACCESS_FLAG_1),
351 	PANTHOR_EXCEPTION(ACCESS_FLAG_2),
352 	PANTHOR_EXCEPTION(ACCESS_FLAG_3),
353 	PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_IN),
354 	PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT0),
355 	PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT1),
356 	PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT2),
357 	PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT3),
358 	PANTHOR_EXCEPTION(MEM_ATTR_FAULT_0),
359 	PANTHOR_EXCEPTION(MEM_ATTR_FAULT_1),
360 	PANTHOR_EXCEPTION(MEM_ATTR_FAULT_2),
361 	PANTHOR_EXCEPTION(MEM_ATTR_FAULT_3),
362 };
363 
364 const char *panthor_exception_name(struct panthor_device *ptdev, u32 exception_code)
365 {
366 	if (exception_code >= ARRAY_SIZE(panthor_exception_infos) ||
367 	    !panthor_exception_infos[exception_code].name)
368 		return "Unknown exception type";
369 
370 	return panthor_exception_infos[exception_code].name;
371 }
372 
373 static vm_fault_t panthor_mmio_vm_fault(struct vm_fault *vmf)
374 {
375 	struct vm_area_struct *vma = vmf->vma;
376 	struct panthor_device *ptdev = vma->vm_private_data;
377 	u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT;
378 	unsigned long pfn;
379 	pgprot_t pgprot;
380 	vm_fault_t ret;
381 	bool active;
382 	int cookie;
383 
384 	if (!drm_dev_enter(&ptdev->base, &cookie))
385 		return VM_FAULT_SIGBUS;
386 
387 	mutex_lock(&ptdev->pm.mmio_lock);
388 	active = atomic_read(&ptdev->pm.state) == PANTHOR_DEVICE_PM_STATE_ACTIVE;
389 
390 	switch (offset) {
391 	case DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET:
392 		if (active)
393 			pfn = __phys_to_pfn(ptdev->phys_addr + CSF_GPU_LATEST_FLUSH_ID);
394 		else
395 			pfn = page_to_pfn(ptdev->pm.dummy_latest_flush);
396 		break;
397 
398 	default:
399 		ret = VM_FAULT_SIGBUS;
400 		goto out_unlock;
401 	}
402 
403 	pgprot = vma->vm_page_prot;
404 	if (active)
405 		pgprot = pgprot_noncached(pgprot);
406 
407 	ret = vmf_insert_pfn_prot(vma, vmf->address, pfn, pgprot);
408 
409 out_unlock:
410 	mutex_unlock(&ptdev->pm.mmio_lock);
411 	drm_dev_exit(cookie);
412 	return ret;
413 }
414 
415 static const struct vm_operations_struct panthor_mmio_vm_ops = {
416 	.fault = panthor_mmio_vm_fault,
417 };
418 
419 int panthor_device_mmap_io(struct panthor_device *ptdev, struct vm_area_struct *vma)
420 {
421 	u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT;
422 
423 	if ((vma->vm_flags & VM_SHARED) == 0)
424 		return -EINVAL;
425 
426 	switch (offset) {
427 	case DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET:
428 		if (vma->vm_end - vma->vm_start != PAGE_SIZE ||
429 		    (vma->vm_flags & (VM_WRITE | VM_EXEC)))
430 			return -EINVAL;
431 		vm_flags_clear(vma, VM_MAYWRITE);
432 
433 		break;
434 
435 	default:
436 		return -EINVAL;
437 	}
438 
439 	/* Defer actual mapping to the fault handler. */
440 	vma->vm_private_data = ptdev;
441 	vma->vm_ops = &panthor_mmio_vm_ops;
442 	vm_flags_set(vma,
443 		     VM_IO | VM_DONTCOPY | VM_DONTEXPAND |
444 		     VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP);
445 	return 0;
446 }
447 
448 static int panthor_device_resume_hw_components(struct panthor_device *ptdev)
449 {
450 	int ret;
451 
452 	panthor_gpu_resume(ptdev);
453 	panthor_mmu_resume(ptdev);
454 
455 	ret = panthor_fw_resume(ptdev);
456 	if (!ret)
457 		return 0;
458 
459 	panthor_mmu_suspend(ptdev);
460 	panthor_gpu_suspend(ptdev);
461 	return ret;
462 }
463 
464 int panthor_device_resume(struct device *dev)
465 {
466 	struct panthor_device *ptdev = dev_get_drvdata(dev);
467 	int ret, cookie;
468 
469 	if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_SUSPENDED)
470 		return -EINVAL;
471 
472 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_RESUMING);
473 
474 	ret = clk_prepare_enable(ptdev->clks.core);
475 	if (ret)
476 		goto err_set_suspended;
477 
478 	ret = clk_prepare_enable(ptdev->clks.stacks);
479 	if (ret)
480 		goto err_disable_core_clk;
481 
482 	ret = clk_prepare_enable(ptdev->clks.coregroup);
483 	if (ret)
484 		goto err_disable_stacks_clk;
485 
486 	panthor_devfreq_resume(ptdev);
487 
488 	if (panthor_device_is_initialized(ptdev) &&
489 	    drm_dev_enter(&ptdev->base, &cookie)) {
490 		/* If there was a reset pending at the time we suspended the
491 		 * device, we force a slow reset.
492 		 */
493 		if (atomic_read(&ptdev->reset.pending)) {
494 			ptdev->reset.fast = false;
495 			atomic_set(&ptdev->reset.pending, 0);
496 		}
497 
498 		ret = panthor_device_resume_hw_components(ptdev);
499 		if (ret && ptdev->reset.fast) {
500 			drm_err(&ptdev->base, "Fast reset failed, trying a slow reset");
501 			ptdev->reset.fast = false;
502 			ret = panthor_device_resume_hw_components(ptdev);
503 		}
504 
505 		if (!ret)
506 			panthor_sched_resume(ptdev);
507 
508 		drm_dev_exit(cookie);
509 
510 		if (ret)
511 			goto err_suspend_devfreq;
512 	}
513 
514 	/* Clear all IOMEM mappings pointing to this device after we've
515 	 * resumed. This way the fake mappings pointing to the dummy pages
516 	 * are removed and the real iomem mapping will be restored on next
517 	 * access.
518 	 */
519 	mutex_lock(&ptdev->pm.mmio_lock);
520 	unmap_mapping_range(ptdev->base.anon_inode->i_mapping,
521 			    DRM_PANTHOR_USER_MMIO_OFFSET, 0, 1);
522 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_ACTIVE);
523 	mutex_unlock(&ptdev->pm.mmio_lock);
524 	return 0;
525 
526 err_suspend_devfreq:
527 	panthor_devfreq_suspend(ptdev);
528 	clk_disable_unprepare(ptdev->clks.coregroup);
529 
530 err_disable_stacks_clk:
531 	clk_disable_unprepare(ptdev->clks.stacks);
532 
533 err_disable_core_clk:
534 	clk_disable_unprepare(ptdev->clks.core);
535 
536 err_set_suspended:
537 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
538 	atomic_set(&ptdev->pm.recovery_needed, 1);
539 	return ret;
540 }
541 
542 int panthor_device_suspend(struct device *dev)
543 {
544 	struct panthor_device *ptdev = dev_get_drvdata(dev);
545 	int cookie;
546 
547 	if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE)
548 		return -EINVAL;
549 
550 	/* Clear all IOMEM mappings pointing to this device before we
551 	 * shutdown the power-domain and clocks. Failing to do that results
552 	 * in external aborts when the process accesses the iomem region.
553 	 * We change the state and call unmap_mapping_range() with the
554 	 * mmio_lock held to make sure the vm_fault handler won't set up
555 	 * invalid mappings.
556 	 */
557 	mutex_lock(&ptdev->pm.mmio_lock);
558 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDING);
559 	unmap_mapping_range(ptdev->base.anon_inode->i_mapping,
560 			    DRM_PANTHOR_USER_MMIO_OFFSET, 0, 1);
561 	mutex_unlock(&ptdev->pm.mmio_lock);
562 
563 	if (panthor_device_is_initialized(ptdev) &&
564 	    drm_dev_enter(&ptdev->base, &cookie)) {
565 		cancel_work_sync(&ptdev->reset.work);
566 
567 		/* We prepare everything as if we were resetting the GPU.
568 		 * The end of the reset will happen in the resume path though.
569 		 */
570 		panthor_sched_suspend(ptdev);
571 		panthor_fw_suspend(ptdev);
572 		panthor_mmu_suspend(ptdev);
573 		panthor_gpu_suspend(ptdev);
574 		drm_dev_exit(cookie);
575 	}
576 
577 	panthor_devfreq_suspend(ptdev);
578 
579 	clk_disable_unprepare(ptdev->clks.coregroup);
580 	clk_disable_unprepare(ptdev->clks.stacks);
581 	clk_disable_unprepare(ptdev->clks.core);
582 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
583 	return 0;
584 }
585