xref: /linux/drivers/gpu/drm/panthor/panthor_device.c (revision ea78ec98265339997959eba3c9d764317614675a)
1 // SPDX-License-Identifier: GPL-2.0 or MIT
2 /* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
3 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
4 /* Copyright 2023 Collabora ltd. */
5 
6 #include <linux/clk.h>
7 #include <linux/mm.h>
8 #include <linux/platform_device.h>
9 #include <linux/pm_domain.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/regulator/consumer.h>
12 #include <linux/reset.h>
13 
14 #include <drm/drm_drv.h>
15 #include <drm/drm_managed.h>
16 #include <drm/drm_print.h>
17 
18 #include "panthor_devfreq.h"
19 #include "panthor_device.h"
20 #include "panthor_fw.h"
21 #include "panthor_gem.h"
22 #include "panthor_gpu.h"
23 #include "panthor_hw.h"
24 #include "panthor_mmu.h"
25 #include "panthor_pwr.h"
26 #include "panthor_regs.h"
27 #include "panthor_sched.h"
28 
29 static int panthor_gpu_coherency_init(struct panthor_device *ptdev)
30 {
31 	BUILD_BUG_ON(GPU_COHERENCY_NONE != DRM_PANTHOR_GPU_COHERENCY_NONE);
32 	BUILD_BUG_ON(GPU_COHERENCY_ACE_LITE != DRM_PANTHOR_GPU_COHERENCY_ACE_LITE);
33 	BUILD_BUG_ON(GPU_COHERENCY_ACE != DRM_PANTHOR_GPU_COHERENCY_ACE);
34 
35 	/* Start with no coherency, and update it if the device is flagged coherent. */
36 	ptdev->gpu_info.selected_coherency = GPU_COHERENCY_NONE;
37 	ptdev->coherent = device_get_dma_attr(ptdev->base.dev) == DEV_DMA_COHERENT;
38 
39 	if (!ptdev->coherent)
40 		return 0;
41 
42 	/* Check if the ACE-Lite coherency protocol is actually supported by the GPU.
43 	 * ACE protocol has never been supported for command stream frontend GPUs.
44 	 */
45 	if ((gpu_read(ptdev, GPU_COHERENCY_FEATURES) &
46 		      GPU_COHERENCY_PROT_BIT(ACE_LITE))) {
47 		ptdev->gpu_info.selected_coherency = GPU_COHERENCY_ACE_LITE;
48 		return 0;
49 	}
50 
51 	drm_err(&ptdev->base, "Coherency not supported by the device");
52 	return -ENOTSUPP;
53 }
54 
55 static int panthor_clk_init(struct panthor_device *ptdev)
56 {
57 	ptdev->clks.core = devm_clk_get(ptdev->base.dev, NULL);
58 	if (IS_ERR(ptdev->clks.core))
59 		return dev_err_probe(ptdev->base.dev,
60 				     PTR_ERR(ptdev->clks.core),
61 				     "get 'core' clock failed");
62 
63 	ptdev->clks.stacks = devm_clk_get_optional(ptdev->base.dev, "stacks");
64 	if (IS_ERR(ptdev->clks.stacks))
65 		return dev_err_probe(ptdev->base.dev,
66 				     PTR_ERR(ptdev->clks.stacks),
67 				     "get 'stacks' clock failed");
68 
69 	ptdev->clks.coregroup = devm_clk_get_optional(ptdev->base.dev, "coregroup");
70 	if (IS_ERR(ptdev->clks.coregroup))
71 		return dev_err_probe(ptdev->base.dev,
72 				     PTR_ERR(ptdev->clks.coregroup),
73 				     "get 'coregroup' clock failed");
74 
75 	drm_info(&ptdev->base, "clock rate = %lu\n", clk_get_rate(ptdev->clks.core));
76 	return 0;
77 }
78 
79 static int panthor_init_power(struct device *dev)
80 {
81 	struct dev_pm_domain_list  *pd_list = NULL;
82 
83 	if (dev->pm_domain)
84 		return 0;
85 
86 	return devm_pm_domain_attach_list(dev, NULL, &pd_list);
87 }
88 
89 void panthor_device_unplug(struct panthor_device *ptdev)
90 {
91 	/* This function can be called from two different path: the reset work
92 	 * and the platform device remove callback. drm_dev_unplug() doesn't
93 	 * deal with concurrent callers, so we have to protect drm_dev_unplug()
94 	 * calls with our own lock, and bail out if the device is already
95 	 * unplugged.
96 	 */
97 	mutex_lock(&ptdev->unplug.lock);
98 	if (drm_dev_is_unplugged(&ptdev->base)) {
99 		/* Someone beat us, release the lock and wait for the unplug
100 		 * operation to be reported as done.
101 		 **/
102 		mutex_unlock(&ptdev->unplug.lock);
103 		wait_for_completion(&ptdev->unplug.done);
104 		return;
105 	}
106 
107 	drm_WARN_ON(&ptdev->base, pm_runtime_get_sync(ptdev->base.dev) < 0);
108 
109 	/* Call drm_dev_unplug() so any access to HW blocks happening after
110 	 * that point get rejected.
111 	 */
112 	drm_dev_unplug(&ptdev->base);
113 
114 	/* We do the rest of the unplug with the unplug lock released,
115 	 * future callers will wait on ptdev->unplug.done anyway.
116 	 */
117 	mutex_unlock(&ptdev->unplug.lock);
118 
119 	/* Now, try to cleanly shutdown the GPU before the device resources
120 	 * get reclaimed.
121 	 */
122 	panthor_sched_unplug(ptdev);
123 	panthor_fw_unplug(ptdev);
124 	panthor_mmu_unplug(ptdev);
125 	panthor_gpu_unplug(ptdev);
126 	panthor_pwr_unplug(ptdev);
127 
128 	pm_runtime_dont_use_autosuspend(ptdev->base.dev);
129 	pm_runtime_put_sync_suspend(ptdev->base.dev);
130 
131 	/* If PM is disabled, we need to call the suspend handler manually. */
132 	if (!IS_ENABLED(CONFIG_PM))
133 		panthor_device_suspend(ptdev->base.dev);
134 
135 	/* Report the unplug operation as done to unblock concurrent
136 	 * panthor_device_unplug() callers.
137 	 */
138 	complete_all(&ptdev->unplug.done);
139 }
140 
141 static void panthor_device_reset_cleanup(struct drm_device *ddev, void *data)
142 {
143 	struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
144 
145 	disable_work_sync(&ptdev->reset.work);
146 	destroy_workqueue(ptdev->reset.wq);
147 }
148 
149 static void panthor_device_reset_work(struct work_struct *work)
150 {
151 	struct panthor_device *ptdev = container_of(work, struct panthor_device, reset.work);
152 	int ret = 0, cookie;
153 
154 	/* If the device is entering suspend, we don't reset. A slow reset will
155 	 * be forced at resume time instead.
156 	 */
157 	if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE)
158 		return;
159 
160 	if (!drm_dev_enter(&ptdev->base, &cookie))
161 		return;
162 
163 	panthor_sched_pre_reset(ptdev);
164 	panthor_fw_pre_reset(ptdev, true);
165 	panthor_mmu_pre_reset(ptdev);
166 	panthor_hw_soft_reset(ptdev);
167 	panthor_hw_l2_power_on(ptdev);
168 	panthor_mmu_post_reset(ptdev);
169 	ret = panthor_fw_post_reset(ptdev);
170 	atomic_set(&ptdev->reset.pending, 0);
171 	panthor_sched_post_reset(ptdev, ret != 0);
172 	drm_dev_exit(cookie);
173 
174 	if (ret) {
175 		panthor_device_unplug(ptdev);
176 		drm_err(&ptdev->base, "Failed to boot MCU after reset, making device unusable.");
177 	}
178 }
179 
180 static bool panthor_device_is_initialized(struct panthor_device *ptdev)
181 {
182 	return !!ptdev->scheduler;
183 }
184 
185 static void panthor_device_free_page(struct drm_device *ddev, void *data)
186 {
187 	__free_page(data);
188 }
189 
190 int panthor_device_init(struct panthor_device *ptdev)
191 {
192 	u32 *dummy_page_virt;
193 	struct resource *res;
194 	struct page *p;
195 	int ret;
196 
197 	ptdev->soc_data = of_device_get_match_data(ptdev->base.dev);
198 
199 	init_completion(&ptdev->unplug.done);
200 	ret = drmm_mutex_init(&ptdev->base, &ptdev->unplug.lock);
201 	if (ret)
202 		return ret;
203 
204 	ret = drmm_mutex_init(&ptdev->base, &ptdev->pm.mmio_lock);
205 	if (ret)
206 		return ret;
207 
208 #ifdef CONFIG_DEBUG_FS
209 	drmm_mutex_init(&ptdev->base, &ptdev->gems.lock);
210 	INIT_LIST_HEAD(&ptdev->gems.node);
211 #endif
212 
213 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
214 	p = alloc_page(GFP_KERNEL | __GFP_ZERO);
215 	if (!p)
216 		return -ENOMEM;
217 
218 	ptdev->pm.dummy_latest_flush = p;
219 	dummy_page_virt = page_address(p);
220 	ret = drmm_add_action_or_reset(&ptdev->base, panthor_device_free_page,
221 				       ptdev->pm.dummy_latest_flush);
222 	if (ret)
223 		return ret;
224 
225 	/*
226 	 * Set the dummy page holding the latest flush to 1. This will cause the
227 	 * flush to avoided as we know it isn't necessary if the submission
228 	 * happens while the dummy page is mapped. Zero cannot be used because
229 	 * that means 'always flush'.
230 	 */
231 	*dummy_page_virt = 1;
232 
233 	INIT_WORK(&ptdev->reset.work, panthor_device_reset_work);
234 	ptdev->reset.wq = alloc_ordered_workqueue("panthor-reset-wq", 0);
235 	if (!ptdev->reset.wq)
236 		return -ENOMEM;
237 
238 	ret = drmm_add_action_or_reset(&ptdev->base, panthor_device_reset_cleanup, NULL);
239 	if (ret)
240 		return ret;
241 
242 	ret = panthor_clk_init(ptdev);
243 	if (ret)
244 		return ret;
245 
246 	ret = panthor_init_power(ptdev->base.dev);
247 	if (ret < 0) {
248 		drm_err(&ptdev->base, "init power domains failed, ret=%d", ret);
249 		return ret;
250 	}
251 
252 	ret = panthor_devfreq_init(ptdev);
253 	if (ret)
254 		return ret;
255 
256 	ptdev->iomem = devm_platform_get_and_ioremap_resource(to_platform_device(ptdev->base.dev),
257 							      0, &res);
258 	if (IS_ERR(ptdev->iomem))
259 		return PTR_ERR(ptdev->iomem);
260 
261 	ptdev->phys_addr = res->start;
262 
263 	ret = devm_pm_runtime_enable(ptdev->base.dev);
264 	if (ret)
265 		return ret;
266 
267 	ret = pm_runtime_resume_and_get(ptdev->base.dev);
268 	if (ret)
269 		return ret;
270 
271 	/* If PM is disabled, we need to call panthor_device_resume() manually. */
272 	if (!IS_ENABLED(CONFIG_PM)) {
273 		ret = panthor_device_resume(ptdev->base.dev);
274 		if (ret)
275 			return ret;
276 	}
277 
278 	ret = panthor_hw_init(ptdev);
279 	if (ret)
280 		goto err_rpm_put;
281 
282 	ret = panthor_pwr_init(ptdev);
283 	if (ret)
284 		goto err_rpm_put;
285 
286 	ret = panthor_gpu_init(ptdev);
287 	if (ret)
288 		goto err_unplug_pwr;
289 
290 	ret = panthor_gpu_coherency_init(ptdev);
291 	if (ret)
292 		goto err_unplug_gpu;
293 
294 	ret = panthor_mmu_init(ptdev);
295 	if (ret)
296 		goto err_unplug_gpu;
297 
298 	ret = panthor_fw_init(ptdev);
299 	if (ret)
300 		goto err_unplug_mmu;
301 
302 	ret = panthor_sched_init(ptdev);
303 	if (ret)
304 		goto err_unplug_fw;
305 
306 	panthor_gem_init(ptdev);
307 
308 	/* ~3 frames */
309 	pm_runtime_set_autosuspend_delay(ptdev->base.dev, 50);
310 	pm_runtime_use_autosuspend(ptdev->base.dev);
311 
312 	ret = drm_dev_register(&ptdev->base, 0);
313 	if (ret)
314 		goto err_disable_autosuspend;
315 
316 	pm_runtime_put_autosuspend(ptdev->base.dev);
317 	return 0;
318 
319 err_disable_autosuspend:
320 	pm_runtime_dont_use_autosuspend(ptdev->base.dev);
321 	panthor_sched_unplug(ptdev);
322 
323 err_unplug_fw:
324 	panthor_fw_unplug(ptdev);
325 
326 err_unplug_mmu:
327 	panthor_mmu_unplug(ptdev);
328 
329 err_unplug_gpu:
330 	panthor_gpu_unplug(ptdev);
331 
332 err_unplug_pwr:
333 	panthor_pwr_unplug(ptdev);
334 
335 err_rpm_put:
336 	pm_runtime_put_sync_suspend(ptdev->base.dev);
337 	return ret;
338 }
339 
340 #define PANTHOR_EXCEPTION(id) \
341 	[DRM_PANTHOR_EXCEPTION_ ## id] = { \
342 		.name = #id, \
343 	}
344 
345 struct panthor_exception_info {
346 	const char *name;
347 };
348 
349 static const struct panthor_exception_info panthor_exception_infos[] = {
350 	PANTHOR_EXCEPTION(OK),
351 	PANTHOR_EXCEPTION(TERMINATED),
352 	PANTHOR_EXCEPTION(KABOOM),
353 	PANTHOR_EXCEPTION(EUREKA),
354 	PANTHOR_EXCEPTION(ACTIVE),
355 	PANTHOR_EXCEPTION(CS_RES_TERM),
356 	PANTHOR_EXCEPTION(CS_CONFIG_FAULT),
357 	PANTHOR_EXCEPTION(CS_UNRECOVERABLE),
358 	PANTHOR_EXCEPTION(CS_ENDPOINT_FAULT),
359 	PANTHOR_EXCEPTION(CS_BUS_FAULT),
360 	PANTHOR_EXCEPTION(CS_INSTR_INVALID),
361 	PANTHOR_EXCEPTION(CS_CALL_STACK_OVERFLOW),
362 	PANTHOR_EXCEPTION(CS_INHERIT_FAULT),
363 	PANTHOR_EXCEPTION(INSTR_INVALID_PC),
364 	PANTHOR_EXCEPTION(INSTR_INVALID_ENC),
365 	PANTHOR_EXCEPTION(INSTR_BARRIER_FAULT),
366 	PANTHOR_EXCEPTION(DATA_INVALID_FAULT),
367 	PANTHOR_EXCEPTION(TILE_RANGE_FAULT),
368 	PANTHOR_EXCEPTION(ADDR_RANGE_FAULT),
369 	PANTHOR_EXCEPTION(IMPRECISE_FAULT),
370 	PANTHOR_EXCEPTION(OOM),
371 	PANTHOR_EXCEPTION(CSF_FW_INTERNAL_ERROR),
372 	PANTHOR_EXCEPTION(CSF_RES_EVICTION_TIMEOUT),
373 	PANTHOR_EXCEPTION(GPU_BUS_FAULT),
374 	PANTHOR_EXCEPTION(GPU_SHAREABILITY_FAULT),
375 	PANTHOR_EXCEPTION(SYS_SHAREABILITY_FAULT),
376 	PANTHOR_EXCEPTION(GPU_CACHEABILITY_FAULT),
377 	PANTHOR_EXCEPTION(TRANSLATION_FAULT_0),
378 	PANTHOR_EXCEPTION(TRANSLATION_FAULT_1),
379 	PANTHOR_EXCEPTION(TRANSLATION_FAULT_2),
380 	PANTHOR_EXCEPTION(TRANSLATION_FAULT_3),
381 	PANTHOR_EXCEPTION(TRANSLATION_FAULT_4),
382 	PANTHOR_EXCEPTION(PERM_FAULT_0),
383 	PANTHOR_EXCEPTION(PERM_FAULT_1),
384 	PANTHOR_EXCEPTION(PERM_FAULT_2),
385 	PANTHOR_EXCEPTION(PERM_FAULT_3),
386 	PANTHOR_EXCEPTION(ACCESS_FLAG_1),
387 	PANTHOR_EXCEPTION(ACCESS_FLAG_2),
388 	PANTHOR_EXCEPTION(ACCESS_FLAG_3),
389 	PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_IN),
390 	PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT0),
391 	PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT1),
392 	PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT2),
393 	PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT3),
394 	PANTHOR_EXCEPTION(MEM_ATTR_FAULT_0),
395 	PANTHOR_EXCEPTION(MEM_ATTR_FAULT_1),
396 	PANTHOR_EXCEPTION(MEM_ATTR_FAULT_2),
397 	PANTHOR_EXCEPTION(MEM_ATTR_FAULT_3),
398 };
399 
400 const char *panthor_exception_name(struct panthor_device *ptdev, u32 exception_code)
401 {
402 	if (exception_code >= ARRAY_SIZE(panthor_exception_infos) ||
403 	    !panthor_exception_infos[exception_code].name)
404 		return "Unknown exception type";
405 
406 	return panthor_exception_infos[exception_code].name;
407 }
408 
409 static vm_fault_t panthor_mmio_vm_fault(struct vm_fault *vmf)
410 {
411 	struct vm_area_struct *vma = vmf->vma;
412 	struct panthor_device *ptdev = vma->vm_private_data;
413 	u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT;
414 	unsigned long pfn;
415 	pgprot_t pgprot;
416 	vm_fault_t ret;
417 	bool active;
418 	int cookie;
419 
420 	if (!drm_dev_enter(&ptdev->base, &cookie))
421 		return VM_FAULT_SIGBUS;
422 
423 	mutex_lock(&ptdev->pm.mmio_lock);
424 	active = atomic_read(&ptdev->pm.state) == PANTHOR_DEVICE_PM_STATE_ACTIVE;
425 
426 	switch (offset) {
427 	case DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET:
428 		if (active)
429 			pfn = __phys_to_pfn(ptdev->phys_addr + CSF_GPU_LATEST_FLUSH_ID);
430 		else
431 			pfn = page_to_pfn(ptdev->pm.dummy_latest_flush);
432 		break;
433 
434 	default:
435 		ret = VM_FAULT_SIGBUS;
436 		goto out_unlock;
437 	}
438 
439 	pgprot = vma->vm_page_prot;
440 	if (active)
441 		pgprot = pgprot_noncached(pgprot);
442 
443 	ret = vmf_insert_pfn_prot(vma, vmf->address, pfn, pgprot);
444 
445 out_unlock:
446 	mutex_unlock(&ptdev->pm.mmio_lock);
447 	drm_dev_exit(cookie);
448 	return ret;
449 }
450 
451 static const struct vm_operations_struct panthor_mmio_vm_ops = {
452 	.fault = panthor_mmio_vm_fault,
453 };
454 
455 int panthor_device_mmap_io(struct panthor_device *ptdev, struct vm_area_struct *vma)
456 {
457 	u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT;
458 
459 	if ((vma->vm_flags & VM_SHARED) == 0)
460 		return -EINVAL;
461 
462 	switch (offset) {
463 	case DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET:
464 		if (vma->vm_end - vma->vm_start != PAGE_SIZE ||
465 		    (vma->vm_flags & (VM_WRITE | VM_EXEC)))
466 			return -EINVAL;
467 		vm_flags_clear(vma, VM_MAYWRITE);
468 
469 		break;
470 
471 	default:
472 		return -EINVAL;
473 	}
474 
475 	/* Defer actual mapping to the fault handler. */
476 	vma->vm_private_data = ptdev;
477 	vma->vm_ops = &panthor_mmio_vm_ops;
478 	vm_flags_set(vma,
479 		     VM_IO | VM_DONTCOPY | VM_DONTEXPAND |
480 		     VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP);
481 	return 0;
482 }
483 
484 static int panthor_device_resume_hw_components(struct panthor_device *ptdev)
485 {
486 	int ret;
487 
488 	panthor_pwr_resume(ptdev);
489 	panthor_gpu_resume(ptdev);
490 	panthor_mmu_resume(ptdev);
491 
492 	ret = panthor_fw_resume(ptdev);
493 	if (!ret)
494 		return 0;
495 
496 	panthor_mmu_suspend(ptdev);
497 	panthor_gpu_suspend(ptdev);
498 	panthor_pwr_suspend(ptdev);
499 	return ret;
500 }
501 
502 int panthor_device_resume(struct device *dev)
503 {
504 	struct panthor_device *ptdev = dev_get_drvdata(dev);
505 	int ret, cookie;
506 
507 	if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_SUSPENDED)
508 		return -EINVAL;
509 
510 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_RESUMING);
511 
512 	ret = clk_prepare_enable(ptdev->clks.core);
513 	if (ret)
514 		goto err_set_suspended;
515 
516 	ret = clk_prepare_enable(ptdev->clks.stacks);
517 	if (ret)
518 		goto err_disable_core_clk;
519 
520 	ret = clk_prepare_enable(ptdev->clks.coregroup);
521 	if (ret)
522 		goto err_disable_stacks_clk;
523 
524 	panthor_devfreq_resume(ptdev);
525 
526 	if (panthor_device_is_initialized(ptdev) &&
527 	    drm_dev_enter(&ptdev->base, &cookie)) {
528 		/* If there was a reset pending at the time we suspended the
529 		 * device, we force a slow reset.
530 		 */
531 		if (atomic_read(&ptdev->reset.pending)) {
532 			ptdev->reset.fast = false;
533 			atomic_set(&ptdev->reset.pending, 0);
534 		}
535 
536 		ret = panthor_device_resume_hw_components(ptdev);
537 		if (ret && ptdev->reset.fast) {
538 			drm_err(&ptdev->base, "Fast reset failed, trying a slow reset");
539 			ptdev->reset.fast = false;
540 			ret = panthor_device_resume_hw_components(ptdev);
541 		}
542 
543 		if (!ret)
544 			panthor_sched_resume(ptdev);
545 
546 		drm_dev_exit(cookie);
547 
548 		if (ret)
549 			goto err_suspend_devfreq;
550 	}
551 
552 	/* Clear all IOMEM mappings pointing to this device after we've
553 	 * resumed. This way the fake mappings pointing to the dummy pages
554 	 * are removed and the real iomem mapping will be restored on next
555 	 * access.
556 	 */
557 	mutex_lock(&ptdev->pm.mmio_lock);
558 	unmap_mapping_range(ptdev->base.anon_inode->i_mapping,
559 			    DRM_PANTHOR_USER_MMIO_OFFSET, 0, 1);
560 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_ACTIVE);
561 	mutex_unlock(&ptdev->pm.mmio_lock);
562 	return 0;
563 
564 err_suspend_devfreq:
565 	panthor_devfreq_suspend(ptdev);
566 	clk_disable_unprepare(ptdev->clks.coregroup);
567 
568 err_disable_stacks_clk:
569 	clk_disable_unprepare(ptdev->clks.stacks);
570 
571 err_disable_core_clk:
572 	clk_disable_unprepare(ptdev->clks.core);
573 
574 err_set_suspended:
575 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
576 	atomic_set(&ptdev->pm.recovery_needed, 1);
577 	return ret;
578 }
579 
580 int panthor_device_suspend(struct device *dev)
581 {
582 	struct panthor_device *ptdev = dev_get_drvdata(dev);
583 	int cookie;
584 
585 	if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE)
586 		return -EINVAL;
587 
588 	/* Clear all IOMEM mappings pointing to this device before we
589 	 * shutdown the power-domain and clocks. Failing to do that results
590 	 * in external aborts when the process accesses the iomem region.
591 	 * We change the state and call unmap_mapping_range() with the
592 	 * mmio_lock held to make sure the vm_fault handler won't set up
593 	 * invalid mappings.
594 	 */
595 	mutex_lock(&ptdev->pm.mmio_lock);
596 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDING);
597 	unmap_mapping_range(ptdev->base.anon_inode->i_mapping,
598 			    DRM_PANTHOR_USER_MMIO_OFFSET, 0, 1);
599 	mutex_unlock(&ptdev->pm.mmio_lock);
600 
601 	if (panthor_device_is_initialized(ptdev) &&
602 	    drm_dev_enter(&ptdev->base, &cookie)) {
603 		cancel_work_sync(&ptdev->reset.work);
604 
605 		/* We prepare everything as if we were resetting the GPU.
606 		 * The end of the reset will happen in the resume path though.
607 		 */
608 		panthor_sched_suspend(ptdev);
609 		panthor_fw_suspend(ptdev);
610 		panthor_mmu_suspend(ptdev);
611 		panthor_gpu_suspend(ptdev);
612 		panthor_pwr_suspend(ptdev);
613 		drm_dev_exit(cookie);
614 	}
615 
616 	panthor_devfreq_suspend(ptdev);
617 
618 	clk_disable_unprepare(ptdev->clks.coregroup);
619 	clk_disable_unprepare(ptdev->clks.stacks);
620 	clk_disable_unprepare(ptdev->clks.core);
621 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
622 	return 0;
623 }
624