xref: /linux/drivers/gpu/drm/panthor/panthor_device.c (revision face6a3615a649456eb4549f6d474221d877d604)
1 // SPDX-License-Identifier: GPL-2.0 or MIT
2 /* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
3 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
4 /* Copyright 2023 Collabora ltd. */
5 
6 #include <linux/clk.h>
7 #include <linux/mm.h>
8 #include <linux/platform_device.h>
9 #include <linux/pm_domain.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/regulator/consumer.h>
12 #include <linux/reset.h>
13 
14 #include <drm/drm_drv.h>
15 #include <drm/drm_managed.h>
16 
17 #include "panthor_devfreq.h"
18 #include "panthor_device.h"
19 #include "panthor_fw.h"
20 #include "panthor_gpu.h"
21 #include "panthor_hw.h"
22 #include "panthor_mmu.h"
23 #include "panthor_regs.h"
24 #include "panthor_sched.h"
25 
26 static int panthor_gpu_coherency_init(struct panthor_device *ptdev)
27 {
28 	ptdev->coherent = device_get_dma_attr(ptdev->base.dev) == DEV_DMA_COHERENT;
29 
30 	if (!ptdev->coherent)
31 		return 0;
32 
33 	/* Check if the ACE-Lite coherency protocol is actually supported by the GPU.
34 	 * ACE protocol has never been supported for command stream frontend GPUs.
35 	 */
36 	if ((gpu_read(ptdev, GPU_COHERENCY_FEATURES) &
37 		      GPU_COHERENCY_PROT_BIT(ACE_LITE)))
38 		return 0;
39 
40 	drm_err(&ptdev->base, "Coherency not supported by the device");
41 	return -ENOTSUPP;
42 }
43 
44 static int panthor_clk_init(struct panthor_device *ptdev)
45 {
46 	ptdev->clks.core = devm_clk_get(ptdev->base.dev, NULL);
47 	if (IS_ERR(ptdev->clks.core))
48 		return dev_err_probe(ptdev->base.dev,
49 				     PTR_ERR(ptdev->clks.core),
50 				     "get 'core' clock failed");
51 
52 	ptdev->clks.stacks = devm_clk_get_optional(ptdev->base.dev, "stacks");
53 	if (IS_ERR(ptdev->clks.stacks))
54 		return dev_err_probe(ptdev->base.dev,
55 				     PTR_ERR(ptdev->clks.stacks),
56 				     "get 'stacks' clock failed");
57 
58 	ptdev->clks.coregroup = devm_clk_get_optional(ptdev->base.dev, "coregroup");
59 	if (IS_ERR(ptdev->clks.coregroup))
60 		return dev_err_probe(ptdev->base.dev,
61 				     PTR_ERR(ptdev->clks.coregroup),
62 				     "get 'coregroup' clock failed");
63 
64 	drm_info(&ptdev->base, "clock rate = %lu\n", clk_get_rate(ptdev->clks.core));
65 	return 0;
66 }
67 
68 void panthor_device_unplug(struct panthor_device *ptdev)
69 {
70 	/* This function can be called from two different path: the reset work
71 	 * and the platform device remove callback. drm_dev_unplug() doesn't
72 	 * deal with concurrent callers, so we have to protect drm_dev_unplug()
73 	 * calls with our own lock, and bail out if the device is already
74 	 * unplugged.
75 	 */
76 	mutex_lock(&ptdev->unplug.lock);
77 	if (drm_dev_is_unplugged(&ptdev->base)) {
78 		/* Someone beat us, release the lock and wait for the unplug
79 		 * operation to be reported as done.
80 		 **/
81 		mutex_unlock(&ptdev->unplug.lock);
82 		wait_for_completion(&ptdev->unplug.done);
83 		return;
84 	}
85 
86 	/* Call drm_dev_unplug() so any access to HW blocks happening after
87 	 * that point get rejected.
88 	 */
89 	drm_dev_unplug(&ptdev->base);
90 
91 	/* We do the rest of the unplug with the unplug lock released,
92 	 * future callers will wait on ptdev->unplug.done anyway.
93 	 */
94 	mutex_unlock(&ptdev->unplug.lock);
95 
96 	drm_WARN_ON(&ptdev->base, pm_runtime_get_sync(ptdev->base.dev) < 0);
97 
98 	/* Now, try to cleanly shutdown the GPU before the device resources
99 	 * get reclaimed.
100 	 */
101 	panthor_sched_unplug(ptdev);
102 	panthor_fw_unplug(ptdev);
103 	panthor_mmu_unplug(ptdev);
104 	panthor_gpu_unplug(ptdev);
105 
106 	pm_runtime_dont_use_autosuspend(ptdev->base.dev);
107 	pm_runtime_put_sync_suspend(ptdev->base.dev);
108 
109 	/* If PM is disabled, we need to call the suspend handler manually. */
110 	if (!IS_ENABLED(CONFIG_PM))
111 		panthor_device_suspend(ptdev->base.dev);
112 
113 	/* Report the unplug operation as done to unblock concurrent
114 	 * panthor_device_unplug() callers.
115 	 */
116 	complete_all(&ptdev->unplug.done);
117 }
118 
119 static void panthor_device_reset_cleanup(struct drm_device *ddev, void *data)
120 {
121 	struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
122 
123 	cancel_work_sync(&ptdev->reset.work);
124 	destroy_workqueue(ptdev->reset.wq);
125 }
126 
127 static void panthor_device_reset_work(struct work_struct *work)
128 {
129 	struct panthor_device *ptdev = container_of(work, struct panthor_device, reset.work);
130 	int ret = 0, cookie;
131 
132 	/* If the device is entering suspend, we don't reset. A slow reset will
133 	 * be forced at resume time instead.
134 	 */
135 	if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE)
136 		return;
137 
138 	if (!drm_dev_enter(&ptdev->base, &cookie))
139 		return;
140 
141 	panthor_sched_pre_reset(ptdev);
142 	panthor_fw_pre_reset(ptdev, true);
143 	panthor_mmu_pre_reset(ptdev);
144 	panthor_gpu_soft_reset(ptdev);
145 	panthor_gpu_l2_power_on(ptdev);
146 	panthor_mmu_post_reset(ptdev);
147 	ret = panthor_fw_post_reset(ptdev);
148 	atomic_set(&ptdev->reset.pending, 0);
149 	panthor_sched_post_reset(ptdev, ret != 0);
150 	drm_dev_exit(cookie);
151 
152 	if (ret) {
153 		panthor_device_unplug(ptdev);
154 		drm_err(&ptdev->base, "Failed to boot MCU after reset, making device unusable.");
155 	}
156 }
157 
158 static bool panthor_device_is_initialized(struct panthor_device *ptdev)
159 {
160 	return !!ptdev->scheduler;
161 }
162 
163 static void panthor_device_free_page(struct drm_device *ddev, void *data)
164 {
165 	__free_page(data);
166 }
167 
168 int panthor_device_init(struct panthor_device *ptdev)
169 {
170 	u32 *dummy_page_virt;
171 	struct resource *res;
172 	struct page *p;
173 	int ret;
174 
175 	ptdev->soc_data = of_device_get_match_data(ptdev->base.dev);
176 
177 	init_completion(&ptdev->unplug.done);
178 	ret = drmm_mutex_init(&ptdev->base, &ptdev->unplug.lock);
179 	if (ret)
180 		return ret;
181 
182 	ret = drmm_mutex_init(&ptdev->base, &ptdev->pm.mmio_lock);
183 	if (ret)
184 		return ret;
185 
186 #ifdef CONFIG_DEBUG_FS
187 	drmm_mutex_init(&ptdev->base, &ptdev->gems.lock);
188 	INIT_LIST_HEAD(&ptdev->gems.node);
189 #endif
190 
191 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
192 	p = alloc_page(GFP_KERNEL | __GFP_ZERO);
193 	if (!p)
194 		return -ENOMEM;
195 
196 	ptdev->pm.dummy_latest_flush = p;
197 	dummy_page_virt = page_address(p);
198 	ret = drmm_add_action_or_reset(&ptdev->base, panthor_device_free_page,
199 				       ptdev->pm.dummy_latest_flush);
200 	if (ret)
201 		return ret;
202 
203 	/*
204 	 * Set the dummy page holding the latest flush to 1. This will cause the
205 	 * flush to avoided as we know it isn't necessary if the submission
206 	 * happens while the dummy page is mapped. Zero cannot be used because
207 	 * that means 'always flush'.
208 	 */
209 	*dummy_page_virt = 1;
210 
211 	INIT_WORK(&ptdev->reset.work, panthor_device_reset_work);
212 	ptdev->reset.wq = alloc_ordered_workqueue("panthor-reset-wq", 0);
213 	if (!ptdev->reset.wq)
214 		return -ENOMEM;
215 
216 	ret = drmm_add_action_or_reset(&ptdev->base, panthor_device_reset_cleanup, NULL);
217 	if (ret)
218 		return ret;
219 
220 	ret = panthor_clk_init(ptdev);
221 	if (ret)
222 		return ret;
223 
224 	ret = panthor_devfreq_init(ptdev);
225 	if (ret)
226 		return ret;
227 
228 	ptdev->iomem = devm_platform_get_and_ioremap_resource(to_platform_device(ptdev->base.dev),
229 							      0, &res);
230 	if (IS_ERR(ptdev->iomem))
231 		return PTR_ERR(ptdev->iomem);
232 
233 	ptdev->phys_addr = res->start;
234 
235 	ret = devm_pm_runtime_enable(ptdev->base.dev);
236 	if (ret)
237 		return ret;
238 
239 	ret = pm_runtime_resume_and_get(ptdev->base.dev);
240 	if (ret)
241 		return ret;
242 
243 	/* If PM is disabled, we need to call panthor_device_resume() manually. */
244 	if (!IS_ENABLED(CONFIG_PM)) {
245 		ret = panthor_device_resume(ptdev->base.dev);
246 		if (ret)
247 			return ret;
248 	}
249 
250 	ret = panthor_hw_init(ptdev);
251 	if (ret)
252 		goto err_rpm_put;
253 
254 	ret = panthor_gpu_init(ptdev);
255 	if (ret)
256 		goto err_rpm_put;
257 
258 	ret = panthor_gpu_coherency_init(ptdev);
259 	if (ret)
260 		goto err_unplug_gpu;
261 
262 	ret = panthor_mmu_init(ptdev);
263 	if (ret)
264 		goto err_unplug_gpu;
265 
266 	ret = panthor_fw_init(ptdev);
267 	if (ret)
268 		goto err_unplug_mmu;
269 
270 	ret = panthor_sched_init(ptdev);
271 	if (ret)
272 		goto err_unplug_fw;
273 
274 	/* ~3 frames */
275 	pm_runtime_set_autosuspend_delay(ptdev->base.dev, 50);
276 	pm_runtime_use_autosuspend(ptdev->base.dev);
277 
278 	ret = drm_dev_register(&ptdev->base, 0);
279 	if (ret)
280 		goto err_disable_autosuspend;
281 
282 	pm_runtime_put_autosuspend(ptdev->base.dev);
283 	return 0;
284 
285 err_disable_autosuspend:
286 	pm_runtime_dont_use_autosuspend(ptdev->base.dev);
287 	panthor_sched_unplug(ptdev);
288 
289 err_unplug_fw:
290 	panthor_fw_unplug(ptdev);
291 
292 err_unplug_mmu:
293 	panthor_mmu_unplug(ptdev);
294 
295 err_unplug_gpu:
296 	panthor_gpu_unplug(ptdev);
297 
298 err_rpm_put:
299 	pm_runtime_put_sync_suspend(ptdev->base.dev);
300 	return ret;
301 }
302 
303 #define PANTHOR_EXCEPTION(id) \
304 	[DRM_PANTHOR_EXCEPTION_ ## id] = { \
305 		.name = #id, \
306 	}
307 
308 struct panthor_exception_info {
309 	const char *name;
310 };
311 
312 static const struct panthor_exception_info panthor_exception_infos[] = {
313 	PANTHOR_EXCEPTION(OK),
314 	PANTHOR_EXCEPTION(TERMINATED),
315 	PANTHOR_EXCEPTION(KABOOM),
316 	PANTHOR_EXCEPTION(EUREKA),
317 	PANTHOR_EXCEPTION(ACTIVE),
318 	PANTHOR_EXCEPTION(CS_RES_TERM),
319 	PANTHOR_EXCEPTION(CS_CONFIG_FAULT),
320 	PANTHOR_EXCEPTION(CS_UNRECOVERABLE),
321 	PANTHOR_EXCEPTION(CS_ENDPOINT_FAULT),
322 	PANTHOR_EXCEPTION(CS_BUS_FAULT),
323 	PANTHOR_EXCEPTION(CS_INSTR_INVALID),
324 	PANTHOR_EXCEPTION(CS_CALL_STACK_OVERFLOW),
325 	PANTHOR_EXCEPTION(CS_INHERIT_FAULT),
326 	PANTHOR_EXCEPTION(INSTR_INVALID_PC),
327 	PANTHOR_EXCEPTION(INSTR_INVALID_ENC),
328 	PANTHOR_EXCEPTION(INSTR_BARRIER_FAULT),
329 	PANTHOR_EXCEPTION(DATA_INVALID_FAULT),
330 	PANTHOR_EXCEPTION(TILE_RANGE_FAULT),
331 	PANTHOR_EXCEPTION(ADDR_RANGE_FAULT),
332 	PANTHOR_EXCEPTION(IMPRECISE_FAULT),
333 	PANTHOR_EXCEPTION(OOM),
334 	PANTHOR_EXCEPTION(CSF_FW_INTERNAL_ERROR),
335 	PANTHOR_EXCEPTION(CSF_RES_EVICTION_TIMEOUT),
336 	PANTHOR_EXCEPTION(GPU_BUS_FAULT),
337 	PANTHOR_EXCEPTION(GPU_SHAREABILITY_FAULT),
338 	PANTHOR_EXCEPTION(SYS_SHAREABILITY_FAULT),
339 	PANTHOR_EXCEPTION(GPU_CACHEABILITY_FAULT),
340 	PANTHOR_EXCEPTION(TRANSLATION_FAULT_0),
341 	PANTHOR_EXCEPTION(TRANSLATION_FAULT_1),
342 	PANTHOR_EXCEPTION(TRANSLATION_FAULT_2),
343 	PANTHOR_EXCEPTION(TRANSLATION_FAULT_3),
344 	PANTHOR_EXCEPTION(TRANSLATION_FAULT_4),
345 	PANTHOR_EXCEPTION(PERM_FAULT_0),
346 	PANTHOR_EXCEPTION(PERM_FAULT_1),
347 	PANTHOR_EXCEPTION(PERM_FAULT_2),
348 	PANTHOR_EXCEPTION(PERM_FAULT_3),
349 	PANTHOR_EXCEPTION(ACCESS_FLAG_1),
350 	PANTHOR_EXCEPTION(ACCESS_FLAG_2),
351 	PANTHOR_EXCEPTION(ACCESS_FLAG_3),
352 	PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_IN),
353 	PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT0),
354 	PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT1),
355 	PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT2),
356 	PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT3),
357 	PANTHOR_EXCEPTION(MEM_ATTR_FAULT_0),
358 	PANTHOR_EXCEPTION(MEM_ATTR_FAULT_1),
359 	PANTHOR_EXCEPTION(MEM_ATTR_FAULT_2),
360 	PANTHOR_EXCEPTION(MEM_ATTR_FAULT_3),
361 };
362 
363 const char *panthor_exception_name(struct panthor_device *ptdev, u32 exception_code)
364 {
365 	if (exception_code >= ARRAY_SIZE(panthor_exception_infos) ||
366 	    !panthor_exception_infos[exception_code].name)
367 		return "Unknown exception type";
368 
369 	return panthor_exception_infos[exception_code].name;
370 }
371 
372 static vm_fault_t panthor_mmio_vm_fault(struct vm_fault *vmf)
373 {
374 	struct vm_area_struct *vma = vmf->vma;
375 	struct panthor_device *ptdev = vma->vm_private_data;
376 	u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT;
377 	unsigned long pfn;
378 	pgprot_t pgprot;
379 	vm_fault_t ret;
380 	bool active;
381 	int cookie;
382 
383 	if (!drm_dev_enter(&ptdev->base, &cookie))
384 		return VM_FAULT_SIGBUS;
385 
386 	mutex_lock(&ptdev->pm.mmio_lock);
387 	active = atomic_read(&ptdev->pm.state) == PANTHOR_DEVICE_PM_STATE_ACTIVE;
388 
389 	switch (offset) {
390 	case DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET:
391 		if (active)
392 			pfn = __phys_to_pfn(ptdev->phys_addr + CSF_GPU_LATEST_FLUSH_ID);
393 		else
394 			pfn = page_to_pfn(ptdev->pm.dummy_latest_flush);
395 		break;
396 
397 	default:
398 		ret = VM_FAULT_SIGBUS;
399 		goto out_unlock;
400 	}
401 
402 	pgprot = vma->vm_page_prot;
403 	if (active)
404 		pgprot = pgprot_noncached(pgprot);
405 
406 	ret = vmf_insert_pfn_prot(vma, vmf->address, pfn, pgprot);
407 
408 out_unlock:
409 	mutex_unlock(&ptdev->pm.mmio_lock);
410 	drm_dev_exit(cookie);
411 	return ret;
412 }
413 
414 static const struct vm_operations_struct panthor_mmio_vm_ops = {
415 	.fault = panthor_mmio_vm_fault,
416 };
417 
418 int panthor_device_mmap_io(struct panthor_device *ptdev, struct vm_area_struct *vma)
419 {
420 	u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT;
421 
422 	if ((vma->vm_flags & VM_SHARED) == 0)
423 		return -EINVAL;
424 
425 	switch (offset) {
426 	case DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET:
427 		if (vma->vm_end - vma->vm_start != PAGE_SIZE ||
428 		    (vma->vm_flags & (VM_WRITE | VM_EXEC)))
429 			return -EINVAL;
430 		vm_flags_clear(vma, VM_MAYWRITE);
431 
432 		break;
433 
434 	default:
435 		return -EINVAL;
436 	}
437 
438 	/* Defer actual mapping to the fault handler. */
439 	vma->vm_private_data = ptdev;
440 	vma->vm_ops = &panthor_mmio_vm_ops;
441 	vm_flags_set(vma,
442 		     VM_IO | VM_DONTCOPY | VM_DONTEXPAND |
443 		     VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP);
444 	return 0;
445 }
446 
447 static int panthor_device_resume_hw_components(struct panthor_device *ptdev)
448 {
449 	int ret;
450 
451 	panthor_gpu_resume(ptdev);
452 	panthor_mmu_resume(ptdev);
453 
454 	ret = panthor_fw_resume(ptdev);
455 	if (!ret)
456 		return 0;
457 
458 	panthor_mmu_suspend(ptdev);
459 	panthor_gpu_suspend(ptdev);
460 	return ret;
461 }
462 
463 int panthor_device_resume(struct device *dev)
464 {
465 	struct panthor_device *ptdev = dev_get_drvdata(dev);
466 	int ret, cookie;
467 
468 	if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_SUSPENDED)
469 		return -EINVAL;
470 
471 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_RESUMING);
472 
473 	ret = clk_prepare_enable(ptdev->clks.core);
474 	if (ret)
475 		goto err_set_suspended;
476 
477 	ret = clk_prepare_enable(ptdev->clks.stacks);
478 	if (ret)
479 		goto err_disable_core_clk;
480 
481 	ret = clk_prepare_enable(ptdev->clks.coregroup);
482 	if (ret)
483 		goto err_disable_stacks_clk;
484 
485 	panthor_devfreq_resume(ptdev);
486 
487 	if (panthor_device_is_initialized(ptdev) &&
488 	    drm_dev_enter(&ptdev->base, &cookie)) {
489 		/* If there was a reset pending at the time we suspended the
490 		 * device, we force a slow reset.
491 		 */
492 		if (atomic_read(&ptdev->reset.pending)) {
493 			ptdev->reset.fast = false;
494 			atomic_set(&ptdev->reset.pending, 0);
495 		}
496 
497 		ret = panthor_device_resume_hw_components(ptdev);
498 		if (ret && ptdev->reset.fast) {
499 			drm_err(&ptdev->base, "Fast reset failed, trying a slow reset");
500 			ptdev->reset.fast = false;
501 			ret = panthor_device_resume_hw_components(ptdev);
502 		}
503 
504 		if (!ret)
505 			panthor_sched_resume(ptdev);
506 
507 		drm_dev_exit(cookie);
508 
509 		if (ret)
510 			goto err_suspend_devfreq;
511 	}
512 
513 	/* Clear all IOMEM mappings pointing to this device after we've
514 	 * resumed. This way the fake mappings pointing to the dummy pages
515 	 * are removed and the real iomem mapping will be restored on next
516 	 * access.
517 	 */
518 	mutex_lock(&ptdev->pm.mmio_lock);
519 	unmap_mapping_range(ptdev->base.anon_inode->i_mapping,
520 			    DRM_PANTHOR_USER_MMIO_OFFSET, 0, 1);
521 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_ACTIVE);
522 	mutex_unlock(&ptdev->pm.mmio_lock);
523 	return 0;
524 
525 err_suspend_devfreq:
526 	panthor_devfreq_suspend(ptdev);
527 	clk_disable_unprepare(ptdev->clks.coregroup);
528 
529 err_disable_stacks_clk:
530 	clk_disable_unprepare(ptdev->clks.stacks);
531 
532 err_disable_core_clk:
533 	clk_disable_unprepare(ptdev->clks.core);
534 
535 err_set_suspended:
536 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
537 	atomic_set(&ptdev->pm.recovery_needed, 1);
538 	return ret;
539 }
540 
541 int panthor_device_suspend(struct device *dev)
542 {
543 	struct panthor_device *ptdev = dev_get_drvdata(dev);
544 	int cookie;
545 
546 	if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE)
547 		return -EINVAL;
548 
549 	/* Clear all IOMEM mappings pointing to this device before we
550 	 * shutdown the power-domain and clocks. Failing to do that results
551 	 * in external aborts when the process accesses the iomem region.
552 	 * We change the state and call unmap_mapping_range() with the
553 	 * mmio_lock held to make sure the vm_fault handler won't set up
554 	 * invalid mappings.
555 	 */
556 	mutex_lock(&ptdev->pm.mmio_lock);
557 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDING);
558 	unmap_mapping_range(ptdev->base.anon_inode->i_mapping,
559 			    DRM_PANTHOR_USER_MMIO_OFFSET, 0, 1);
560 	mutex_unlock(&ptdev->pm.mmio_lock);
561 
562 	if (panthor_device_is_initialized(ptdev) &&
563 	    drm_dev_enter(&ptdev->base, &cookie)) {
564 		cancel_work_sync(&ptdev->reset.work);
565 
566 		/* We prepare everything as if we were resetting the GPU.
567 		 * The end of the reset will happen in the resume path though.
568 		 */
569 		panthor_sched_suspend(ptdev);
570 		panthor_fw_suspend(ptdev);
571 		panthor_mmu_suspend(ptdev);
572 		panthor_gpu_suspend(ptdev);
573 		drm_dev_exit(cookie);
574 	}
575 
576 	panthor_devfreq_suspend(ptdev);
577 
578 	clk_disable_unprepare(ptdev->clks.coregroup);
579 	clk_disable_unprepare(ptdev->clks.stacks);
580 	clk_disable_unprepare(ptdev->clks.core);
581 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
582 	return 0;
583 }
584