1 // SPDX-License-Identifier: GPL-2.0 or MIT
2 /* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
3 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
4 /* Copyright 2023 Collabora ltd. */
5
6 #include <linux/clk.h>
7 #include <linux/mm.h>
8 #include <linux/platform_device.h>
9 #include <linux/pm_domain.h>
10 #include <linux/pm_runtime.h>
11 #include <linux/regulator/consumer.h>
12 #include <linux/reset.h>
13
14 #include <drm/drm_drv.h>
15 #include <drm/drm_managed.h>
16
17 #include "panthor_devfreq.h"
18 #include "panthor_device.h"
19 #include "panthor_fw.h"
20 #include "panthor_gpu.h"
21 #include "panthor_mmu.h"
22 #include "panthor_regs.h"
23 #include "panthor_sched.h"
24
panthor_gpu_coherency_init(struct panthor_device * ptdev)25 static int panthor_gpu_coherency_init(struct panthor_device *ptdev)
26 {
27 ptdev->coherent = device_get_dma_attr(ptdev->base.dev) == DEV_DMA_COHERENT;
28
29 if (!ptdev->coherent)
30 return 0;
31
32 /* Check if the ACE-Lite coherency protocol is actually supported by the GPU.
33 * ACE protocol has never been supported for command stream frontend GPUs.
34 */
35 if ((gpu_read(ptdev, GPU_COHERENCY_FEATURES) &
36 GPU_COHERENCY_PROT_BIT(ACE_LITE)))
37 return 0;
38
39 drm_err(&ptdev->base, "Coherency not supported by the device");
40 return -ENOTSUPP;
41 }
42
panthor_clk_init(struct panthor_device * ptdev)43 static int panthor_clk_init(struct panthor_device *ptdev)
44 {
45 ptdev->clks.core = devm_clk_get(ptdev->base.dev, NULL);
46 if (IS_ERR(ptdev->clks.core))
47 return dev_err_probe(ptdev->base.dev,
48 PTR_ERR(ptdev->clks.core),
49 "get 'core' clock failed");
50
51 ptdev->clks.stacks = devm_clk_get_optional(ptdev->base.dev, "stacks");
52 if (IS_ERR(ptdev->clks.stacks))
53 return dev_err_probe(ptdev->base.dev,
54 PTR_ERR(ptdev->clks.stacks),
55 "get 'stacks' clock failed");
56
57 ptdev->clks.coregroup = devm_clk_get_optional(ptdev->base.dev, "coregroup");
58 if (IS_ERR(ptdev->clks.coregroup))
59 return dev_err_probe(ptdev->base.dev,
60 PTR_ERR(ptdev->clks.coregroup),
61 "get 'coregroup' clock failed");
62
63 drm_info(&ptdev->base, "clock rate = %lu\n", clk_get_rate(ptdev->clks.core));
64 return 0;
65 }
66
panthor_device_unplug(struct panthor_device * ptdev)67 void panthor_device_unplug(struct panthor_device *ptdev)
68 {
69 /* This function can be called from two different path: the reset work
70 * and the platform device remove callback. drm_dev_unplug() doesn't
71 * deal with concurrent callers, so we have to protect drm_dev_unplug()
72 * calls with our own lock, and bail out if the device is already
73 * unplugged.
74 */
75 mutex_lock(&ptdev->unplug.lock);
76 if (drm_dev_is_unplugged(&ptdev->base)) {
77 /* Someone beat us, release the lock and wait for the unplug
78 * operation to be reported as done.
79 **/
80 mutex_unlock(&ptdev->unplug.lock);
81 wait_for_completion(&ptdev->unplug.done);
82 return;
83 }
84
85 /* Call drm_dev_unplug() so any access to HW blocks happening after
86 * that point get rejected.
87 */
88 drm_dev_unplug(&ptdev->base);
89
90 /* We do the rest of the unplug with the unplug lock released,
91 * future callers will wait on ptdev->unplug.done anyway.
92 */
93 mutex_unlock(&ptdev->unplug.lock);
94
95 drm_WARN_ON(&ptdev->base, pm_runtime_get_sync(ptdev->base.dev) < 0);
96
97 /* Now, try to cleanly shutdown the GPU before the device resources
98 * get reclaimed.
99 */
100 panthor_sched_unplug(ptdev);
101 panthor_fw_unplug(ptdev);
102 panthor_mmu_unplug(ptdev);
103 panthor_gpu_unplug(ptdev);
104
105 pm_runtime_dont_use_autosuspend(ptdev->base.dev);
106 pm_runtime_put_sync_suspend(ptdev->base.dev);
107
108 /* If PM is disabled, we need to call the suspend handler manually. */
109 if (!IS_ENABLED(CONFIG_PM))
110 panthor_device_suspend(ptdev->base.dev);
111
112 /* Report the unplug operation as done to unblock concurrent
113 * panthor_device_unplug() callers.
114 */
115 complete_all(&ptdev->unplug.done);
116 }
117
panthor_device_reset_cleanup(struct drm_device * ddev,void * data)118 static void panthor_device_reset_cleanup(struct drm_device *ddev, void *data)
119 {
120 struct panthor_device *ptdev = container_of(ddev, struct panthor_device, base);
121
122 cancel_work_sync(&ptdev->reset.work);
123 destroy_workqueue(ptdev->reset.wq);
124 }
125
panthor_device_reset_work(struct work_struct * work)126 static void panthor_device_reset_work(struct work_struct *work)
127 {
128 struct panthor_device *ptdev = container_of(work, struct panthor_device, reset.work);
129 int ret = 0, cookie;
130
131 if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE) {
132 /*
133 * No need for a reset as the device has been (or will be)
134 * powered down
135 */
136 atomic_set(&ptdev->reset.pending, 0);
137 return;
138 }
139
140 if (!drm_dev_enter(&ptdev->base, &cookie))
141 return;
142
143 panthor_sched_pre_reset(ptdev);
144 panthor_fw_pre_reset(ptdev, true);
145 panthor_mmu_pre_reset(ptdev);
146 panthor_gpu_soft_reset(ptdev);
147 panthor_gpu_l2_power_on(ptdev);
148 panthor_mmu_post_reset(ptdev);
149 ret = panthor_fw_post_reset(ptdev);
150 atomic_set(&ptdev->reset.pending, 0);
151 panthor_sched_post_reset(ptdev, ret != 0);
152 drm_dev_exit(cookie);
153
154 if (ret) {
155 panthor_device_unplug(ptdev);
156 drm_err(&ptdev->base, "Failed to boot MCU after reset, making device unusable.");
157 }
158 }
159
panthor_device_is_initialized(struct panthor_device * ptdev)160 static bool panthor_device_is_initialized(struct panthor_device *ptdev)
161 {
162 return !!ptdev->scheduler;
163 }
164
panthor_device_free_page(struct drm_device * ddev,void * data)165 static void panthor_device_free_page(struct drm_device *ddev, void *data)
166 {
167 __free_page(data);
168 }
169
panthor_device_init(struct panthor_device * ptdev)170 int panthor_device_init(struct panthor_device *ptdev)
171 {
172 u32 *dummy_page_virt;
173 struct resource *res;
174 struct page *p;
175 int ret;
176
177 ret = panthor_gpu_coherency_init(ptdev);
178 if (ret)
179 return ret;
180
181 init_completion(&ptdev->unplug.done);
182 ret = drmm_mutex_init(&ptdev->base, &ptdev->unplug.lock);
183 if (ret)
184 return ret;
185
186 ret = drmm_mutex_init(&ptdev->base, &ptdev->pm.mmio_lock);
187 if (ret)
188 return ret;
189
190 atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
191 p = alloc_page(GFP_KERNEL | __GFP_ZERO);
192 if (!p)
193 return -ENOMEM;
194
195 ptdev->pm.dummy_latest_flush = p;
196 dummy_page_virt = page_address(p);
197 ret = drmm_add_action_or_reset(&ptdev->base, panthor_device_free_page,
198 ptdev->pm.dummy_latest_flush);
199 if (ret)
200 return ret;
201
202 /*
203 * Set the dummy page holding the latest flush to 1. This will cause the
204 * flush to avoided as we know it isn't necessary if the submission
205 * happens while the dummy page is mapped. Zero cannot be used because
206 * that means 'always flush'.
207 */
208 *dummy_page_virt = 1;
209
210 INIT_WORK(&ptdev->reset.work, panthor_device_reset_work);
211 ptdev->reset.wq = alloc_ordered_workqueue("panthor-reset-wq", 0);
212 if (!ptdev->reset.wq)
213 return -ENOMEM;
214
215 ret = drmm_add_action_or_reset(&ptdev->base, panthor_device_reset_cleanup, NULL);
216 if (ret)
217 return ret;
218
219 ret = panthor_clk_init(ptdev);
220 if (ret)
221 return ret;
222
223 ret = panthor_devfreq_init(ptdev);
224 if (ret)
225 return ret;
226
227 ptdev->iomem = devm_platform_get_and_ioremap_resource(to_platform_device(ptdev->base.dev),
228 0, &res);
229 if (IS_ERR(ptdev->iomem))
230 return PTR_ERR(ptdev->iomem);
231
232 ptdev->phys_addr = res->start;
233
234 ret = devm_pm_runtime_enable(ptdev->base.dev);
235 if (ret)
236 return ret;
237
238 ret = pm_runtime_resume_and_get(ptdev->base.dev);
239 if (ret)
240 return ret;
241
242 /* If PM is disabled, we need to call panthor_device_resume() manually. */
243 if (!IS_ENABLED(CONFIG_PM)) {
244 ret = panthor_device_resume(ptdev->base.dev);
245 if (ret)
246 return ret;
247 }
248
249 ret = panthor_gpu_init(ptdev);
250 if (ret)
251 goto err_rpm_put;
252
253 ret = panthor_mmu_init(ptdev);
254 if (ret)
255 goto err_unplug_gpu;
256
257 ret = panthor_fw_init(ptdev);
258 if (ret)
259 goto err_unplug_mmu;
260
261 ret = panthor_sched_init(ptdev);
262 if (ret)
263 goto err_unplug_fw;
264
265 /* ~3 frames */
266 pm_runtime_set_autosuspend_delay(ptdev->base.dev, 50);
267 pm_runtime_use_autosuspend(ptdev->base.dev);
268
269 ret = drm_dev_register(&ptdev->base, 0);
270 if (ret)
271 goto err_disable_autosuspend;
272
273 pm_runtime_put_autosuspend(ptdev->base.dev);
274 return 0;
275
276 err_disable_autosuspend:
277 pm_runtime_dont_use_autosuspend(ptdev->base.dev);
278 panthor_sched_unplug(ptdev);
279
280 err_unplug_fw:
281 panthor_fw_unplug(ptdev);
282
283 err_unplug_mmu:
284 panthor_mmu_unplug(ptdev);
285
286 err_unplug_gpu:
287 panthor_gpu_unplug(ptdev);
288
289 err_rpm_put:
290 pm_runtime_put_sync_suspend(ptdev->base.dev);
291 return ret;
292 }
293
294 #define PANTHOR_EXCEPTION(id) \
295 [DRM_PANTHOR_EXCEPTION_ ## id] = { \
296 .name = #id, \
297 }
298
299 struct panthor_exception_info {
300 const char *name;
301 };
302
303 static const struct panthor_exception_info panthor_exception_infos[] = {
304 PANTHOR_EXCEPTION(OK),
305 PANTHOR_EXCEPTION(TERMINATED),
306 PANTHOR_EXCEPTION(KABOOM),
307 PANTHOR_EXCEPTION(EUREKA),
308 PANTHOR_EXCEPTION(ACTIVE),
309 PANTHOR_EXCEPTION(CS_RES_TERM),
310 PANTHOR_EXCEPTION(CS_CONFIG_FAULT),
311 PANTHOR_EXCEPTION(CS_UNRECOVERABLE),
312 PANTHOR_EXCEPTION(CS_ENDPOINT_FAULT),
313 PANTHOR_EXCEPTION(CS_BUS_FAULT),
314 PANTHOR_EXCEPTION(CS_INSTR_INVALID),
315 PANTHOR_EXCEPTION(CS_CALL_STACK_OVERFLOW),
316 PANTHOR_EXCEPTION(CS_INHERIT_FAULT),
317 PANTHOR_EXCEPTION(INSTR_INVALID_PC),
318 PANTHOR_EXCEPTION(INSTR_INVALID_ENC),
319 PANTHOR_EXCEPTION(INSTR_BARRIER_FAULT),
320 PANTHOR_EXCEPTION(DATA_INVALID_FAULT),
321 PANTHOR_EXCEPTION(TILE_RANGE_FAULT),
322 PANTHOR_EXCEPTION(ADDR_RANGE_FAULT),
323 PANTHOR_EXCEPTION(IMPRECISE_FAULT),
324 PANTHOR_EXCEPTION(OOM),
325 PANTHOR_EXCEPTION(CSF_FW_INTERNAL_ERROR),
326 PANTHOR_EXCEPTION(CSF_RES_EVICTION_TIMEOUT),
327 PANTHOR_EXCEPTION(GPU_BUS_FAULT),
328 PANTHOR_EXCEPTION(GPU_SHAREABILITY_FAULT),
329 PANTHOR_EXCEPTION(SYS_SHAREABILITY_FAULT),
330 PANTHOR_EXCEPTION(GPU_CACHEABILITY_FAULT),
331 PANTHOR_EXCEPTION(TRANSLATION_FAULT_0),
332 PANTHOR_EXCEPTION(TRANSLATION_FAULT_1),
333 PANTHOR_EXCEPTION(TRANSLATION_FAULT_2),
334 PANTHOR_EXCEPTION(TRANSLATION_FAULT_3),
335 PANTHOR_EXCEPTION(TRANSLATION_FAULT_4),
336 PANTHOR_EXCEPTION(PERM_FAULT_0),
337 PANTHOR_EXCEPTION(PERM_FAULT_1),
338 PANTHOR_EXCEPTION(PERM_FAULT_2),
339 PANTHOR_EXCEPTION(PERM_FAULT_3),
340 PANTHOR_EXCEPTION(ACCESS_FLAG_1),
341 PANTHOR_EXCEPTION(ACCESS_FLAG_2),
342 PANTHOR_EXCEPTION(ACCESS_FLAG_3),
343 PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_IN),
344 PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT0),
345 PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT1),
346 PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT2),
347 PANTHOR_EXCEPTION(ADDR_SIZE_FAULT_OUT3),
348 PANTHOR_EXCEPTION(MEM_ATTR_FAULT_0),
349 PANTHOR_EXCEPTION(MEM_ATTR_FAULT_1),
350 PANTHOR_EXCEPTION(MEM_ATTR_FAULT_2),
351 PANTHOR_EXCEPTION(MEM_ATTR_FAULT_3),
352 };
353
panthor_exception_name(struct panthor_device * ptdev,u32 exception_code)354 const char *panthor_exception_name(struct panthor_device *ptdev, u32 exception_code)
355 {
356 if (exception_code >= ARRAY_SIZE(panthor_exception_infos) ||
357 !panthor_exception_infos[exception_code].name)
358 return "Unknown exception type";
359
360 return panthor_exception_infos[exception_code].name;
361 }
362
panthor_mmio_vm_fault(struct vm_fault * vmf)363 static vm_fault_t panthor_mmio_vm_fault(struct vm_fault *vmf)
364 {
365 struct vm_area_struct *vma = vmf->vma;
366 struct panthor_device *ptdev = vma->vm_private_data;
367 u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT;
368 unsigned long pfn;
369 pgprot_t pgprot;
370 vm_fault_t ret;
371 bool active;
372 int cookie;
373
374 if (!drm_dev_enter(&ptdev->base, &cookie))
375 return VM_FAULT_SIGBUS;
376
377 mutex_lock(&ptdev->pm.mmio_lock);
378 active = atomic_read(&ptdev->pm.state) == PANTHOR_DEVICE_PM_STATE_ACTIVE;
379
380 switch (offset) {
381 case DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET:
382 if (active)
383 pfn = __phys_to_pfn(ptdev->phys_addr + CSF_GPU_LATEST_FLUSH_ID);
384 else
385 pfn = page_to_pfn(ptdev->pm.dummy_latest_flush);
386 break;
387
388 default:
389 ret = VM_FAULT_SIGBUS;
390 goto out_unlock;
391 }
392
393 pgprot = vma->vm_page_prot;
394 if (active)
395 pgprot = pgprot_noncached(pgprot);
396
397 ret = vmf_insert_pfn_prot(vma, vmf->address, pfn, pgprot);
398
399 out_unlock:
400 mutex_unlock(&ptdev->pm.mmio_lock);
401 drm_dev_exit(cookie);
402 return ret;
403 }
404
405 static const struct vm_operations_struct panthor_mmio_vm_ops = {
406 .fault = panthor_mmio_vm_fault,
407 };
408
panthor_device_mmap_io(struct panthor_device * ptdev,struct vm_area_struct * vma)409 int panthor_device_mmap_io(struct panthor_device *ptdev, struct vm_area_struct *vma)
410 {
411 u64 offset = (u64)vma->vm_pgoff << PAGE_SHIFT;
412
413 if ((vma->vm_flags & VM_SHARED) == 0)
414 return -EINVAL;
415
416 switch (offset) {
417 case DRM_PANTHOR_USER_FLUSH_ID_MMIO_OFFSET:
418 if (vma->vm_end - vma->vm_start != PAGE_SIZE ||
419 (vma->vm_flags & (VM_WRITE | VM_EXEC)))
420 return -EINVAL;
421 vm_flags_clear(vma, VM_MAYWRITE);
422
423 break;
424
425 default:
426 return -EINVAL;
427 }
428
429 /* Defer actual mapping to the fault handler. */
430 vma->vm_private_data = ptdev;
431 vma->vm_ops = &panthor_mmio_vm_ops;
432 vm_flags_set(vma,
433 VM_IO | VM_DONTCOPY | VM_DONTEXPAND |
434 VM_NORESERVE | VM_DONTDUMP | VM_PFNMAP);
435 return 0;
436 }
437
panthor_device_resume_hw_components(struct panthor_device * ptdev)438 static int panthor_device_resume_hw_components(struct panthor_device *ptdev)
439 {
440 int ret;
441
442 panthor_gpu_resume(ptdev);
443 panthor_mmu_resume(ptdev);
444
445 ret = panthor_fw_resume(ptdev);
446 if (!ret)
447 return 0;
448
449 panthor_mmu_suspend(ptdev);
450 panthor_gpu_suspend(ptdev);
451 return ret;
452 }
453
panthor_device_resume(struct device * dev)454 int panthor_device_resume(struct device *dev)
455 {
456 struct panthor_device *ptdev = dev_get_drvdata(dev);
457 int ret, cookie;
458
459 if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_SUSPENDED)
460 return -EINVAL;
461
462 atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_RESUMING);
463
464 ret = clk_prepare_enable(ptdev->clks.core);
465 if (ret)
466 goto err_set_suspended;
467
468 ret = clk_prepare_enable(ptdev->clks.stacks);
469 if (ret)
470 goto err_disable_core_clk;
471
472 ret = clk_prepare_enable(ptdev->clks.coregroup);
473 if (ret)
474 goto err_disable_stacks_clk;
475
476 panthor_devfreq_resume(ptdev);
477
478 if (panthor_device_is_initialized(ptdev) &&
479 drm_dev_enter(&ptdev->base, &cookie)) {
480 ret = panthor_device_resume_hw_components(ptdev);
481 if (ret && ptdev->reset.fast) {
482 drm_err(&ptdev->base, "Fast reset failed, trying a slow reset");
483 ptdev->reset.fast = false;
484 ret = panthor_device_resume_hw_components(ptdev);
485 }
486
487 if (!ret)
488 panthor_sched_resume(ptdev);
489
490 drm_dev_exit(cookie);
491
492 if (ret)
493 goto err_suspend_devfreq;
494 }
495
496 if (atomic_read(&ptdev->reset.pending))
497 queue_work(ptdev->reset.wq, &ptdev->reset.work);
498
499 /* Clear all IOMEM mappings pointing to this device after we've
500 * resumed. This way the fake mappings pointing to the dummy pages
501 * are removed and the real iomem mapping will be restored on next
502 * access.
503 */
504 mutex_lock(&ptdev->pm.mmio_lock);
505 unmap_mapping_range(ptdev->base.anon_inode->i_mapping,
506 DRM_PANTHOR_USER_MMIO_OFFSET, 0, 1);
507 atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_ACTIVE);
508 mutex_unlock(&ptdev->pm.mmio_lock);
509 return 0;
510
511 err_suspend_devfreq:
512 panthor_devfreq_suspend(ptdev);
513 clk_disable_unprepare(ptdev->clks.coregroup);
514
515 err_disable_stacks_clk:
516 clk_disable_unprepare(ptdev->clks.stacks);
517
518 err_disable_core_clk:
519 clk_disable_unprepare(ptdev->clks.core);
520
521 err_set_suspended:
522 atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
523 atomic_set(&ptdev->pm.recovery_needed, 1);
524 return ret;
525 }
526
panthor_device_suspend(struct device * dev)527 int panthor_device_suspend(struct device *dev)
528 {
529 struct panthor_device *ptdev = dev_get_drvdata(dev);
530 int cookie;
531
532 if (atomic_read(&ptdev->pm.state) != PANTHOR_DEVICE_PM_STATE_ACTIVE)
533 return -EINVAL;
534
535 /* Clear all IOMEM mappings pointing to this device before we
536 * shutdown the power-domain and clocks. Failing to do that results
537 * in external aborts when the process accesses the iomem region.
538 * We change the state and call unmap_mapping_range() with the
539 * mmio_lock held to make sure the vm_fault handler won't set up
540 * invalid mappings.
541 */
542 mutex_lock(&ptdev->pm.mmio_lock);
543 atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDING);
544 unmap_mapping_range(ptdev->base.anon_inode->i_mapping,
545 DRM_PANTHOR_USER_MMIO_OFFSET, 0, 1);
546 mutex_unlock(&ptdev->pm.mmio_lock);
547
548 if (panthor_device_is_initialized(ptdev) &&
549 drm_dev_enter(&ptdev->base, &cookie)) {
550 cancel_work_sync(&ptdev->reset.work);
551
552 /* We prepare everything as if we were resetting the GPU.
553 * The end of the reset will happen in the resume path though.
554 */
555 panthor_sched_suspend(ptdev);
556 panthor_fw_suspend(ptdev);
557 panthor_mmu_suspend(ptdev);
558 panthor_gpu_suspend(ptdev);
559 drm_dev_exit(cookie);
560 }
561
562 panthor_devfreq_suspend(ptdev);
563
564 clk_disable_unprepare(ptdev->clks.coregroup);
565 clk_disable_unprepare(ptdev->clks.stacks);
566 clk_disable_unprepare(ptdev->clks.core);
567 atomic_set(&ptdev->pm.state, PANTHOR_DEVICE_PM_STATE_SUSPENDED);
568 return 0;
569 }
570