xref: /linux/drivers/gpu/drm/panthor/panthor_device.c (revision 3f1c07fc21c68bd3bd2df9d2c9441f6485e934d9)
1 // SPDX-License-Identifier: GPL-2.0 or MIT
2 /* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
3 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
4 /* Copyright 2023 Collabora ltd. */
5 
6 #include <linux/clk.h>
7 #include <linux/mm.h>
8 #include <linux/platform_device.h>
9 #include <linux/pm_domain.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/regulator/consumer.h>
12 #include <linux/reset.h>
13 
14 #include <drm/drm_drv.h>
15 #include <drm/drm_managed.h>
16 #include <drm/drm_print.h>
17 
18 #include "panthor_devfreq.h"
19 #include "panthor_device.h"
20 #include "panthor_fw.h"
21 #include "panthor_gpu.h"
22 #include "panthor_hw.h"
23 #include "panthor_mmu.h"
24 #include "panthor_pwr.h"
25 #include "panthor_regs.h"
26 #include "panthor_sched.h"
27 
panthor_gpu_coherency_init(struct panthor_device * ptdev)28 static int panthor_gpu_coherency_init(struct panthor_device *ptdev)
29 {
30 	ptdev->coherent = device_get_dma_attr(ptdev->base.dev) == DEV_DMA_COHERENT;
31 
32 	if (!ptdev->coherent)
33 		return 0;
34 
35 	/* Check if the ACE-Lite coherency protocol is actually supported by the GPU.
36 	 * ACE protocol has never been supported for command stream frontend GPUs.
37 	 */
38 	if ((gpu_read(ptdev, GPU_COHERENCY_FEATURES) &
39 		      GPU_COHERENCY_PROT_BIT(ACE_LITE)))
40 		return 0;
41 
42 	drm_err(&ptdev->base, "Coherency not supported by the device");
43 	return -ENOTSUPP;
44 }
45 
panthor_clk_init(struct panthor_device * ptdev)46 static int panthor_clk_init(struct panthor_device *ptdev)
47 {
48 	ptdev->clks.core = devm_clk_get(ptdev->base.dev, NULL);
49 	if (IS_ERR(ptdev->clks.core))
50 		return dev_err_probe(ptdev->base.dev,
51 				     PTR_ERR(ptdev->clks.core),
52 				     "get 'core' clock failed");
53 
54 	ptdev->clks.stacks = devm_clk_get_optional(ptdev->base.dev, "stacks");
55 	if (IS_ERR(ptdev->clks.stacks))
56 		return dev_err_probe(ptdev->base.dev,
57 				     PTR_ERR(ptdev->clks.stacks),
58 				     "get 'stacks' clock failed");
59 
60 	ptdev->clks.coregroup = devm_clk_get_optional(ptdev->base.dev, "coregroup");
61 	if (IS_ERR(ptdev->clks.coregroup))
62 		return dev_err_probe(ptdev->base.dev,
63 				     PTR_ERR(ptdev->clks.coregroup),
64 				     "get 'coregroup' clock failed");
65 
66 	drm_info(&ptdev->base, "clock rate = %lu\n", clk_get_rate(ptdev->clks.core));
67 	return 0;
68 }
69 
panthor_init_power(struct device * dev)70 static int panthor_init_power(struct device *dev)
71 {
72 	struct dev_pm_domain_list  *pd_list = NULL;
73 
74 	if (dev->pm_domain)
75 		return 0;
76 
77 	return devm_pm_domain_attach_list(dev, NULL, &pd_list);
78 }
79 
panthor_device_unplug(struct panthor_device * ptdev)80 void panthor_device_unplug(struct panthor_device *ptdev)
81 {
82 	/* This function can be called from two different path: the reset work
83 	 * and the platform device remove callback. drm_dev_unplug() doesn't
84 	 * deal with concurrent callers, so we have to protect drm_dev_unplug()
85 	 * calls with our own lock, and bail out if the device is already
86 	 * unplugged.
87 	 */
88 	mutex_lock(&ptdev->unplug.lock);
89 	if (drm_dev_is_unplugged(&ptdev->base)) {
90 		/* Someone beat us, release the lock and wait for the unplug
91 		 * operation to be reported as done.
92 		 **/
93 		mutex_unlock(&ptdev->unplug.lock);
94 		wait_for_completion(&ptdev->unplug.done);
95 		return;
96 	}
97 
98 	drm_WARN_ON(&ptdev->base, pm_runtime_get_sync(ptdev->base.dev) < 0);
99 
100 	/* Call drm_dev_unplug() so any access to HW blocks happening after
101 	 * that point get rejected.
102 	 */
103 	drm_dev_unplug(&ptdev->base);
104 
105 	/* We do the rest of the unplug with the unplug lock released,
106 	 * future callers will wait on ptdev->unplug.done anyway.
107 	 */
108 	mutex_unlock(&ptdev->unplug.lock);
109 
110 	/* Now, try to cleanly shutdown the GPU before the device resources
111 	 * get reclaimed.
112 	 */
113 	panthor_sched_unplug(ptdev);
114 	panthor_fw_unplug(ptdev);
115 	panthor_mmu_unplug(ptdev);
116 	panthor_gpu_unplug(ptdev);
117 	panthor_pwr_unplug(ptdev);
118 
119 	pm_runtime_dont_use_autosuspend(ptdev->base.dev);
120 	pm_runtime_put_sync_suspend(ptdev->base.dev);
121 
122 	/* If PM is disabled, we need to call the suspend handler manually. */
123 	if (!IS_ENABLED(CONFIG_PM))
124 		panthor_device_suspend(ptdev->base.dev);
125 
126 	/* Report the unplug operation as done to unblock concurrent
127 	 * panthor_device_unplug() callers.
128 	 */
129 	complete_all(&ptdev->unplug.done);
130 }
131 
panthor_device_reset_cleanup(struct drm_device * ddev,void * data)132 static void panthor_device_reset_cleanup(struct drm_device *ddev, void *data)
133 {
134 	struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
135 
136 	disable_work_sync(&ptdev->reset.work);
137 	destroy_workqueue(ptdev->reset.wq);
138 }
139 
panthor_device_reset_work(struct work_struct * work)140 static void panthor_device_reset_work(struct work_struct *work)
141 {
142 	struct panthor_device *ptdev = container_of(work, struct panthor_device, reset.work);
143 	int ret = 0, cookie;
144 
145 	/* If the device is entering suspend, we don't reset. A slow reset will
146 	 * be forced at resume time instead.
147 	 */
148 	if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE)
149 		return;
150 
151 	if (!drm_dev_enter(&ptdev->base, &cookie))
152 		return;
153 
154 	panthor_sched_pre_reset(ptdev);
155 	panthor_fw_pre_reset(ptdev, true);
156 	panthor_mmu_pre_reset(ptdev);
157 	panthor_hw_soft_reset(ptdev);
158 	panthor_hw_l2_power_on(ptdev);
159 	panthor_mmu_post_reset(ptdev);
160 	ret = panthor_fw_post_reset(ptdev);
161 	atomic_set(&ptdev->reset.pending, 0);
162 	panthor_sched_post_reset(ptdev, ret != 0);
163 	drm_dev_exit(cookie);
164 
165 	if (ret) {
166 		panthor_device_unplug(ptdev);
167 		drm_err(&ptdev->base, "Failed to boot MCU after reset, making device unusable.");
168 	}
169 }
170 
panthor_device_is_initialized(struct panthor_device * ptdev)171 static bool panthor_device_is_initialized(struct panthor_device *ptdev)
172 {
173 	return !!ptdev->scheduler;
174 }
175 
panthor_device_free_page(struct drm_device * ddev,void * data)176 static void panthor_device_free_page(struct drm_device *ddev, void *data)
177 {
178 	__free_page(data);
179 }
180 
panthor_device_init(struct panthor_device * ptdev)181 int panthor_device_init(struct panthor_device *ptdev)
182 {
183 	u32 *dummy_page_virt;
184 	struct resource *res;
185 	struct page *p;
186 	int ret;
187 
188 	ptdev->soc_data = of_device_get_match_data(ptdev->base.dev);
189 
190 	init_completion(&ptdev->unplug.done);
191 	ret = drmm_mutex_init(&ptdev->base, &ptdev->unplug.lock);
192 	if (ret)
193 		return ret;
194 
195 	ret = drmm_mutex_init(&ptdev->base, &ptdev->pm.mmio_lock);
196 	if (ret)
197 		return ret;
198 
199 #ifdef CONFIG_DEBUG_FS
200 	drmm_mutex_init(&ptdev->base, &ptdev->gems.lock);
201 	INIT_LIST_HEAD(&ptdev->gems.node);
202 #endif
203 
204 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
205 	p = alloc_page(GFP_KERNEL | __GFP_ZERO);
206 	if (!p)
207 		return -ENOMEM;
208 
209 	ptdev->pm.dummy_latest_flush = p;
210 	dummy_page_virt = page_address(p);
211 	ret = drmm_add_action_or_reset(&ptdev->base, panthor_device_free_page,
212 				       ptdev->pm.dummy_latest_flush);
213 	if (ret)
214 		return ret;
215 
216 	/*
217 	 * Set the dummy page holding the latest flush to 1. This will cause the
218 	 * flush to avoided as we know it isn't necessary if the submission
219 	 * happens while the dummy page is mapped. Zero cannot be used because
220 	 * that means 'always flush'.
221 	 */
222 	*dummy_page_virt = 1;
223 
224 	INIT_WORK(&ptdev->reset.work, panthor_device_reset_work);
225 	ptdev->reset.wq = alloc_ordered_workqueue("panthor-reset-wq", 0);
226 	if (!ptdev->reset.wq)
227 		return -ENOMEM;
228 
229 	ret = drmm_add_action_or_reset(&ptdev->base, panthor_device_reset_cleanup, NULL);
230 	if (ret)
231 		return ret;
232 
233 	ret = panthor_clk_init(ptdev);
234 	if (ret)
235 		return ret;
236 
237 	ret = panthor_init_power(ptdev->base.dev);
238 	if (ret < 0) {
239 		drm_err(&ptdev->base, "init power domains failed, ret=%d", ret);
240 		return ret;
241 	}
242 
243 	ret = panthor_devfreq_init(ptdev);
244 	if (ret)
245 		return ret;
246 
247 	ptdev->iomem = devm_platform_get_and_ioremap_resource(to_platform_device(ptdev->base.dev),
248 							      0, &res);
249 	if (IS_ERR(ptdev->iomem))
250 		return PTR_ERR(ptdev->iomem);
251 
252 	ptdev->phys_addr = res->start;
253 
254 	ret = devm_pm_runtime_enable(ptdev->base.dev);
255 	if (ret)
256 		return ret;
257 
258 	ret = pm_runtime_resume_and_get(ptdev->base.dev);
259 	if (ret)
260 		return ret;
261 
262 	/* If PM is disabled, we need to call panthor_device_resume() manually. */
263 	if (!IS_ENABLED(CONFIG_PM)) {
264 		ret = panthor_device_resume(ptdev->base.dev);
265 		if (ret)
266 			return ret;
267 	}
268 
269 	ret = panthor_hw_init(ptdev);
270 	if (ret)
271 		goto err_rpm_put;
272 
273 	ret = panthor_pwr_init(ptdev);
274 	if (ret)
275 		goto err_rpm_put;
276 
277 	ret = panthor_gpu_init(ptdev);
278 	if (ret)
279 		goto err_unplug_pwr;
280 
281 	ret = panthor_gpu_coherency_init(ptdev);
282 	if (ret)
283 		goto err_unplug_gpu;
284 
285 	ret = panthor_mmu_init(ptdev);
286 	if (ret)
287 		goto err_unplug_gpu;
288 
289 	ret = panthor_fw_init(ptdev);
290 	if (ret)
291 		goto err_unplug_mmu;
292 
293 	ret = panthor_sched_init(ptdev);
294 	if (ret)
295 		goto err_unplug_fw;
296 
297 	/* ~3 frames */
298 	pm_runtime_set_autosuspend_delay(ptdev->base.dev, 50);
299 	pm_runtime_use_autosuspend(ptdev->base.dev);
300 
301 	ret = drm_dev_register(&ptdev->base, 0);
302 	if (ret)
303 		goto err_disable_autosuspend;
304 
305 	pm_runtime_put_autosuspend(ptdev->base.dev);
306 	return 0;
307 
308 err_disable_autosuspend:
309 	pm_runtime_dont_use_autosuspend(ptdev->base.dev);
310 	panthor_sched_unplug(ptdev);
311 
312 err_unplug_fw:
313 	panthor_fw_unplug(ptdev);
314 
315 err_unplug_mmu:
316 	panthor_mmu_unplug(ptdev);
317 
318 err_unplug_gpu:
319 	panthor_gpu_unplug(ptdev);
320 
321 err_unplug_pwr:
322 	panthor_pwr_unplug(ptdev);
323 
324 err_rpm_put:
325 	pm_runtime_put_sync_suspend(ptdev->base.dev);
326 	return ret;
327 }
328 
329 #define PANTHOR_EXCEPTION(id) \
330 	[DRM_PANTHOR_EXCEPTION_ ## id] = { \
331 		.name = #id, \
332 	}
333 
334 struct panthor_exception_info {
335 	const char *name;
336 };
337 
338 static const struct panthor_exception_info panthor_exception_infos[] = {
339 	PANTHOR_EXCEPTION(OK),
340 	PANTHOR_EXCEPTION(TERMINATED),
341 	PANTHOR_EXCEPTION(KABOOM),
342 	PANTHOR_EXCEPTION(EUREKA),
343 	PANTHOR_EXCEPTION(ACTIVE),
344 	PANTHOR_EXCEPTION(CS_RES_TERM),
345 	PANTHOR_EXCEPTION(CS_CONFIG_FAULT),
346 	PANTHOR_EXCEPTION(CS_UNRECOVERABLE),
347 	PANTHOR_EXCEPTION(CS_ENDPOINT_FAULT),
348 	PANTHOR_EXCEPTION(CS_BUS_FAULT),
349 	PANTHOR_EXCEPTION(CS_INSTR_INVALID),
350 	PANTHOR_EXCEPTION(CS_CALL_STACK_OVERFLOW),
351 	PANTHOR_EXCEPTION(CS_INHERIT_FAULT),
352 	PANTHOR_EXCEPTION(INSTR_INVALID_PC),
353 	PANTHOR_EXCEPTION(INSTR_INVALID_ENC),
354 	PANTHOR_EXCEPTION(INSTR_BARRIER_FAULT),
355 	PANTHOR_EXCEPTION(DATA_INVALID_FAULT),
356 	PANTHOR_EXCEPTION(TILE_RANGE_FAULT),
357 	PANTHOR_EXCEPTION(ADDR_RANGE_FAULT),
358 	PANTHOR_EXCEPTION(IMPRECISE_FAULT),
359 	PANTHOR_EXCEPTION(OOM),
360 	PANTHOR_EXCEPTION(CSF_FW_INTERNAL_ERROR),
361 	PANTHOR_EXCEPTION(CSF_RES_EVICTION_TIMEOUT),
362 	PANTHOR_EXCEPTION(GPU_BUS_FAULT),
363 	PANTHOR_EXCEPTION(GPU_SHAREABILITY_FAULT),
364 	PANTHOR_EXCEPTION(SYS_SHAREABILITY_FAULT),
365 	PANTHOR_EXCEPTION(GPU_CACHEABILITY_FAULT),
366 	PANTHOR_EXCEPTION(TRANSLATION_FAULT_0),
367 	PANTHOR_EXCEPTION(TRANSLATION_FAULT_1),
368 	PANTHOR_EXCEPTION(TRANSLATION_FAULT_2),
369 	PANTHOR_EXCEPTION(TRANSLATION_FAULT_3),
370 	PANTHOR_EXCEPTION(TRANSLATION_FAULT_4),
371 	PANTHOR_EXCEPTION(PERM_FAULT_0),
372 	PANTHOR_EXCEPTION(PERM_FAULT_1),
373 	PANTHOR_EXCEPTION(PERM_FAULT_2),
374 	PANTHOR_EXCEPTION(PERM_FAULT_3),
375 	PANTHOR_EXCEPTION(ACCESS_FLAG_1),
376 	PANTHOR_EXCEPTION(ACCESS_FLAG_2),
377 	PANTHOR_EXCEPTION(ACCESS_FLAG_3),
378 	PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_IN),
379 	PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT0),
380 	PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT1),
381 	PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT2),
382 	PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT3),
383 	PANTHOR_EXCEPTION(MEM_ATTR_FAULT_0),
384 	PANTHOR_EXCEPTION(MEM_ATTR_FAULT_1),
385 	PANTHOR_EXCEPTION(MEM_ATTR_FAULT_2),
386 	PANTHOR_EXCEPTION(MEM_ATTR_FAULT_3),
387 };
388 
panthor_exception_name(struct panthor_device * ptdev,u32 exception_code)389 const char *panthor_exception_name(struct panthor_device *ptdev, u32 exception_code)
390 {
391 	if (exception_code >= ARRAY_SIZE(panthor_exception_infos) ||
392 	    !panthor_exception_infos[exception_code].name)
393 		return "Unknown exception type";
394 
395 	return panthor_exception_infos[exception_code].name;
396 }
397 
panthor_mmio_vm_fault(struct vm_fault * vmf)398 static vm_fault_t panthor_mmio_vm_fault(struct vm_fault *vmf)
399 {
400 	struct vm_area_struct *vma = vmf->vma;
401 	struct panthor_device *ptdev = vma->vm_private_data;
402 	u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT;
403 	unsigned long pfn;
404 	pgprot_t pgprot;
405 	vm_fault_t ret;
406 	bool active;
407 	int cookie;
408 
409 	if (!drm_dev_enter(&ptdev->base, &cookie))
410 		return VM_FAULT_SIGBUS;
411 
412 	mutex_lock(&ptdev->pm.mmio_lock);
413 	active = atomic_read(&ptdev->pm.state) == PANTHOR_DEVICE_PM_STATE_ACTIVE;
414 
415 	switch (offset) {
416 	case DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET:
417 		if (active)
418 			pfn = __phys_to_pfn(ptdev->phys_addr + CSF_GPU_LATEST_FLUSH_ID);
419 		else
420 			pfn = page_to_pfn(ptdev->pm.dummy_latest_flush);
421 		break;
422 
423 	default:
424 		ret = VM_FAULT_SIGBUS;
425 		goto out_unlock;
426 	}
427 
428 	pgprot = vma->vm_page_prot;
429 	if (active)
430 		pgprot = pgprot_noncached(pgprot);
431 
432 	ret = vmf_insert_pfn_prot(vma, vmf->address, pfn, pgprot);
433 
434 out_unlock:
435 	mutex_unlock(&ptdev->pm.mmio_lock);
436 	drm_dev_exit(cookie);
437 	return ret;
438 }
439 
440 static const struct vm_operations_struct panthor_mmio_vm_ops = {
441 	.fault = panthor_mmio_vm_fault,
442 };
443 
panthor_device_mmap_io(struct panthor_device * ptdev,struct vm_area_struct * vma)444 int panthor_device_mmap_io(struct panthor_device *ptdev, struct vm_area_struct *vma)
445 {
446 	u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT;
447 
448 	if ((vma->vm_flags & VM_SHARED) == 0)
449 		return -EINVAL;
450 
451 	switch (offset) {
452 	case DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET:
453 		if (vma->vm_end - vma->vm_start != PAGE_SIZE ||
454 		    (vma->vm_flags & (VM_WRITE | VM_EXEC)))
455 			return -EINVAL;
456 		vm_flags_clear(vma, VM_MAYWRITE);
457 
458 		break;
459 
460 	default:
461 		return -EINVAL;
462 	}
463 
464 	/* Defer actual mapping to the fault handler. */
465 	vma->vm_private_data = ptdev;
466 	vma->vm_ops = &panthor_mmio_vm_ops;
467 	vm_flags_set(vma,
468 		     VM_IO | VM_DONTCOPY | VM_DONTEXPAND |
469 		     VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP);
470 	return 0;
471 }
472 
panthor_device_resume_hw_components(struct panthor_device * ptdev)473 static int panthor_device_resume_hw_components(struct panthor_device *ptdev)
474 {
475 	int ret;
476 
477 	panthor_pwr_resume(ptdev);
478 	panthor_gpu_resume(ptdev);
479 	panthor_mmu_resume(ptdev);
480 
481 	ret = panthor_fw_resume(ptdev);
482 	if (!ret)
483 		return 0;
484 
485 	panthor_mmu_suspend(ptdev);
486 	panthor_gpu_suspend(ptdev);
487 	panthor_pwr_suspend(ptdev);
488 	return ret;
489 }
490 
panthor_device_resume(struct device * dev)491 int panthor_device_resume(struct device *dev)
492 {
493 	struct panthor_device *ptdev = dev_get_drvdata(dev);
494 	int ret, cookie;
495 
496 	if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_SUSPENDED)
497 		return -EINVAL;
498 
499 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_RESUMING);
500 
501 	ret = clk_prepare_enable(ptdev->clks.core);
502 	if (ret)
503 		goto err_set_suspended;
504 
505 	ret = clk_prepare_enable(ptdev->clks.stacks);
506 	if (ret)
507 		goto err_disable_core_clk;
508 
509 	ret = clk_prepare_enable(ptdev->clks.coregroup);
510 	if (ret)
511 		goto err_disable_stacks_clk;
512 
513 	panthor_devfreq_resume(ptdev);
514 
515 	if (panthor_device_is_initialized(ptdev) &&
516 	    drm_dev_enter(&ptdev->base, &cookie)) {
517 		/* If there was a reset pending at the time we suspended the
518 		 * device, we force a slow reset.
519 		 */
520 		if (atomic_read(&ptdev->reset.pending)) {
521 			ptdev->reset.fast = false;
522 			atomic_set(&ptdev->reset.pending, 0);
523 		}
524 
525 		ret = panthor_device_resume_hw_components(ptdev);
526 		if (ret && ptdev->reset.fast) {
527 			drm_err(&ptdev->base, "Fast reset failed, trying a slow reset");
528 			ptdev->reset.fast = false;
529 			ret = panthor_device_resume_hw_components(ptdev);
530 		}
531 
532 		if (!ret)
533 			panthor_sched_resume(ptdev);
534 
535 		drm_dev_exit(cookie);
536 
537 		if (ret)
538 			goto err_suspend_devfreq;
539 	}
540 
541 	/* Clear all IOMEM mappings pointing to this device after we've
542 	 * resumed. This way the fake mappings pointing to the dummy pages
543 	 * are removed and the real iomem mapping will be restored on next
544 	 * access.
545 	 */
546 	mutex_lock(&ptdev->pm.mmio_lock);
547 	unmap_mapping_range(ptdev->base.anon_inode->i_mapping,
548 			    DRM_PANTHOR_USER_MMIO_OFFSET, 0, 1);
549 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_ACTIVE);
550 	mutex_unlock(&ptdev->pm.mmio_lock);
551 	return 0;
552 
553 err_suspend_devfreq:
554 	panthor_devfreq_suspend(ptdev);
555 	clk_disable_unprepare(ptdev->clks.coregroup);
556 
557 err_disable_stacks_clk:
558 	clk_disable_unprepare(ptdev->clks.stacks);
559 
560 err_disable_core_clk:
561 	clk_disable_unprepare(ptdev->clks.core);
562 
563 err_set_suspended:
564 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
565 	atomic_set(&ptdev->pm.recovery_needed, 1);
566 	return ret;
567 }
568 
panthor_device_suspend(struct device * dev)569 int panthor_device_suspend(struct device *dev)
570 {
571 	struct panthor_device *ptdev = dev_get_drvdata(dev);
572 	int cookie;
573 
574 	if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE)
575 		return -EINVAL;
576 
577 	/* Clear all IOMEM mappings pointing to this device before we
578 	 * shutdown the power-domain and clocks. Failing to do that results
579 	 * in external aborts when the process accesses the iomem region.
580 	 * We change the state and call unmap_mapping_range() with the
581 	 * mmio_lock held to make sure the vm_fault handler won't set up
582 	 * invalid mappings.
583 	 */
584 	mutex_lock(&ptdev->pm.mmio_lock);
585 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDING);
586 	unmap_mapping_range(ptdev->base.anon_inode->i_mapping,
587 			    DRM_PANTHOR_USER_MMIO_OFFSET, 0, 1);
588 	mutex_unlock(&ptdev->pm.mmio_lock);
589 
590 	if (panthor_device_is_initialized(ptdev) &&
591 	    drm_dev_enter(&ptdev->base, &cookie)) {
592 		cancel_work_sync(&ptdev->reset.work);
593 
594 		/* We prepare everything as if we were resetting the GPU.
595 		 * The end of the reset will happen in the resume path though.
596 		 */
597 		panthor_sched_suspend(ptdev);
598 		panthor_fw_suspend(ptdev);
599 		panthor_mmu_suspend(ptdev);
600 		panthor_gpu_suspend(ptdev);
601 		panthor_pwr_suspend(ptdev);
602 		drm_dev_exit(cookie);
603 	}
604 
605 	panthor_devfreq_suspend(ptdev);
606 
607 	clk_disable_unprepare(ptdev->clks.coregroup);
608 	clk_disable_unprepare(ptdev->clks.stacks);
609 	clk_disable_unprepare(ptdev->clks.core);
610 	atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
611 	return 0;
612 }
613