1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3
4 #include <drm/panfrost_drm.h>
5 #include <drm/drm_print.h>
6
7 #include <linux/atomic.h>
8 #include <linux/bitfield.h>
9 #include <linux/delay.h>
10 #include <linux/dma-mapping.h>
11 #include <linux/interrupt.h>
12 #include <linux/io.h>
13 #include <linux/iopoll.h>
14 #include <linux/io-pgtable.h>
15 #include <linux/iommu.h>
16 #include <linux/platform_device.h>
17 #include <linux/pm_runtime.h>
18 #include <linux/shmem_fs.h>
19 #include <linux/sizes.h>
20
21 #include "panfrost_device.h"
22 #include "panfrost_mmu.h"
23 #include "panfrost_gem.h"
24 #include "panfrost_features.h"
25 #include "panfrost_regs.h"
26
27 #define mmu_write(dev, reg, data) writel(data, dev->iomem + reg)
28 #define mmu_read(dev, reg) readl(dev->iomem + reg)
29
mair_to_memattr(u64 mair,bool coherent)30 static u64 mair_to_memattr(u64 mair, bool coherent)
31 {
32 u64 memattr = 0;
33 u32 i;
34
35 for (i = 0; i < 8; i++) {
36 u8 in_attr = mair >> (8 * i), out_attr;
37 u8 outer = in_attr >> 4, inner = in_attr & 0xf;
38
39 /* For caching to be enabled, inner and outer caching policy
40 * have to be both write-back, if one of them is write-through
41 * or non-cacheable, we just choose non-cacheable. Device
42 * memory is also translated to non-cacheable.
43 */
44 if (!(outer & 3) || !(outer & 4) || !(inner & 4)) {
45 out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_NC |
46 AS_MEMATTR_AARCH64_SH_MIDGARD_INNER |
47 AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(false, false);
48 } else {
49 out_attr = AS_MEMATTR_AARCH64_INNER_OUTER_WB |
50 AS_MEMATTR_AARCH64_INNER_ALLOC_EXPL(inner & 1, inner & 2);
51 /* Use SH_MIDGARD_INNER mode when device isn't coherent,
52 * so SH_IS, which is used when IOMMU_CACHE is set, maps
53 * to Mali's internal-shareable mode. As per the Mali
54 * Spec, inner and outer-shareable modes aren't allowed
55 * for WB memory when coherency is disabled.
56 * Use SH_CPU_INNER mode when coherency is enabled, so
57 * that SH_IS actually maps to the standard definition of
58 * inner-shareable.
59 */
60 if (!coherent)
61 out_attr |= AS_MEMATTR_AARCH64_SH_MIDGARD_INNER;
62 else
63 out_attr |= AS_MEMATTR_AARCH64_SH_CPU_INNER;
64 }
65
66 memattr |= (u64)out_attr << (8 * i);
67 }
68
69 return memattr;
70 }
71
wait_ready(struct panfrost_device * pfdev,u32 as_nr)72 static int wait_ready(struct panfrost_device *pfdev, u32 as_nr)
73 {
74 int ret;
75 u32 val;
76
77 /* Wait for the MMU status to indicate there is no active command, in
78 * case one is pending. */
79 ret = readl_relaxed_poll_timeout_atomic(pfdev->iomem + AS_STATUS(as_nr),
80 val, !(val & AS_STATUS_AS_ACTIVE), 10, 100000);
81
82 if (ret) {
83 /* The GPU hung, let's trigger a reset */
84 panfrost_device_schedule_reset(pfdev);
85 dev_err(pfdev->base.dev, "AS_ACTIVE bit stuck\n");
86 }
87
88 return ret;
89 }
90
write_cmd(struct panfrost_device * pfdev,u32 as_nr,u32 cmd)91 static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
92 {
93 int status;
94
95 /* write AS_COMMAND when MMU is ready to accept another command */
96 status = wait_ready(pfdev, as_nr);
97 if (!status)
98 mmu_write(pfdev, AS_COMMAND(as_nr), cmd);
99
100 return status;
101 }
102
lock_region(struct panfrost_device * pfdev,u32 as_nr,u64 region_start,u64 size)103 static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
104 u64 region_start, u64 size)
105 {
106 u8 region_width;
107 u64 region;
108 u64 region_end = region_start + size;
109
110 if (!size)
111 return;
112
113 /*
114 * The locked region is a naturally aligned power of 2 block encoded as
115 * log2 minus(1).
116 * Calculate the desired start/end and look for the highest bit which
117 * differs. The smallest naturally aligned block must include this bit
118 * change, the desired region starts with this bit (and subsequent bits)
119 * zeroed and ends with the bit (and subsequent bits) set to one.
120 */
121 region_width = max(fls64(region_start ^ (region_end - 1)),
122 const_ilog2(AS_LOCK_REGION_MIN_SIZE)) - 1;
123
124 /*
125 * Mask off the low bits of region_start (which would be ignored by
126 * the hardware anyway)
127 */
128 region_start &= GENMASK_ULL(63, region_width);
129
130 region = region_width | region_start;
131
132 /* Lock the region that needs to be updated */
133 mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), lower_32_bits(region));
134 mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), upper_32_bits(region));
135 write_cmd(pfdev, as_nr, AS_COMMAND_LOCK);
136 }
137
138
mmu_hw_do_operation_locked(struct panfrost_device * pfdev,int as_nr,u64 iova,u64 size,u32 op)139 static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
140 u64 iova, u64 size, u32 op)
141 {
142 if (as_nr < 0)
143 return 0;
144
145 if (op != AS_COMMAND_UNLOCK)
146 lock_region(pfdev, as_nr, iova, size);
147
148 /* Run the MMU operation */
149 write_cmd(pfdev, as_nr, op);
150
151 /* Wait for the flush to complete */
152 return wait_ready(pfdev, as_nr);
153 }
154
mmu_hw_do_operation(struct panfrost_device * pfdev,struct panfrost_mmu * mmu,u64 iova,u64 size,u32 op)155 static int mmu_hw_do_operation(struct panfrost_device *pfdev,
156 struct panfrost_mmu *mmu,
157 u64 iova, u64 size, u32 op)
158 {
159 int ret;
160
161 spin_lock(&pfdev->as_lock);
162 ret = mmu_hw_do_operation_locked(pfdev, mmu->as, iova, size, op);
163 spin_unlock(&pfdev->as_lock);
164 return ret;
165 }
166
panfrost_mmu_enable(struct panfrost_device * pfdev,struct panfrost_mmu * mmu)167 static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
168 {
169 int as_nr = mmu->as;
170 u64 transtab = mmu->cfg.transtab;
171 u64 memattr = mmu->cfg.memattr;
172 u64 transcfg = mmu->cfg.transcfg;
173
174 mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
175
176 mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), lower_32_bits(transtab));
177 mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), upper_32_bits(transtab));
178
179 /* Need to revisit mem attrs.
180 * NC is the default, Mali driver is inner WT.
181 */
182 mmu_write(pfdev, AS_MEMATTR_LO(as_nr), lower_32_bits(memattr));
183 mmu_write(pfdev, AS_MEMATTR_HI(as_nr), upper_32_bits(memattr));
184
185 mmu_write(pfdev, AS_TRANSCFG_LO(as_nr), lower_32_bits(transcfg));
186 mmu_write(pfdev, AS_TRANSCFG_HI(as_nr), upper_32_bits(transcfg));
187
188 write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
189 }
190
panfrost_mmu_disable(struct panfrost_device * pfdev,u32 as_nr)191 static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
192 {
193 mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0ULL, AS_COMMAND_FLUSH_MEM);
194
195 mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
196 mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
197
198 mmu_write(pfdev, AS_MEMATTR_LO(as_nr), 0);
199 mmu_write(pfdev, AS_MEMATTR_HI(as_nr), 0);
200
201 mmu_write(pfdev, AS_TRANSCFG_LO(as_nr), AS_TRANSCFG_ADRMODE_UNMAPPED);
202 mmu_write(pfdev, AS_TRANSCFG_HI(as_nr), 0);
203
204 write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
205 }
206
mmu_cfg_init_mali_lpae(struct panfrost_mmu * mmu)207 static int mmu_cfg_init_mali_lpae(struct panfrost_mmu *mmu)
208 {
209 struct io_pgtable_cfg *pgtbl_cfg = &mmu->pgtbl_cfg;
210
211 /* TODO: The following fields are duplicated between the MMU and Page
212 * Table config structs. Ideally, should be kept in one place.
213 */
214 mmu->cfg.transtab = pgtbl_cfg->arm_mali_lpae_cfg.transtab;
215 mmu->cfg.memattr = pgtbl_cfg->arm_mali_lpae_cfg.memattr;
216 mmu->cfg.transcfg = AS_TRANSCFG_ADRMODE_LEGACY;
217
218 return 0;
219 }
220
mmu_cfg_init_aarch64_4k(struct panfrost_mmu * mmu)221 static int mmu_cfg_init_aarch64_4k(struct panfrost_mmu *mmu)
222 {
223 struct io_pgtable_cfg *pgtbl_cfg = &mmu->pgtbl_cfg;
224 struct panfrost_device *pfdev = mmu->pfdev;
225
226 if (drm_WARN_ON(&pfdev->base, pgtbl_cfg->arm_lpae_s1_cfg.ttbr &
227 ~AS_TRANSTAB_AARCH64_4K_ADDR_MASK))
228 return -EINVAL;
229
230 mmu->cfg.transtab = pgtbl_cfg->arm_lpae_s1_cfg.ttbr;
231
232 mmu->cfg.memattr = mair_to_memattr(pgtbl_cfg->arm_lpae_s1_cfg.mair,
233 pgtbl_cfg->coherent_walk);
234
235 mmu->cfg.transcfg = AS_TRANSCFG_PTW_MEMATTR_WB |
236 AS_TRANSCFG_PTW_RA |
237 AS_TRANSCFG_ADRMODE_AARCH64_4K |
238 AS_TRANSCFG_INA_BITS(55 - pgtbl_cfg->ias);
239 if (pgtbl_cfg->coherent_walk)
240 mmu->cfg.transcfg |= AS_TRANSCFG_PTW_SH_OS;
241
242 return 0;
243 }
244
panfrost_mmu_cfg_init(struct panfrost_mmu * mmu,enum io_pgtable_fmt fmt)245 static int panfrost_mmu_cfg_init(struct panfrost_mmu *mmu,
246 enum io_pgtable_fmt fmt)
247 {
248 struct panfrost_device *pfdev = mmu->pfdev;
249
250 switch (fmt) {
251 case ARM_64_LPAE_S1:
252 return mmu_cfg_init_aarch64_4k(mmu);
253 case ARM_MALI_LPAE:
254 return mmu_cfg_init_mali_lpae(mmu);
255 default:
256 /* This should never happen */
257 drm_WARN(&pfdev->base, 1, "Invalid pgtable format");
258 return -EINVAL;
259 }
260 }
261
panfrost_mmu_as_get(struct panfrost_device * pfdev,struct panfrost_mmu * mmu)262 int panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
263 {
264 int as;
265
266 spin_lock(&pfdev->as_lock);
267
268 as = mmu->as;
269 if (as >= 0) {
270 int en = atomic_inc_return(&mmu->as_count);
271 u32 mask = BIT(as) | BIT(16 + as);
272
273 /*
274 * AS can be retained by active jobs or a perfcnt context,
275 * hence the '+ 1' here.
276 */
277 WARN_ON(en >= (NUM_JOB_SLOTS + 1));
278
279 list_move(&mmu->list, &pfdev->as_lru_list);
280
281 if (pfdev->as_faulty_mask & mask) {
282 /* Unhandled pagefault on this AS, the MMU was
283 * disabled. We need to re-enable the MMU after
284 * clearing+unmasking the AS interrupts.
285 */
286 mmu_write(pfdev, MMU_INT_CLEAR, mask);
287 mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask);
288 pfdev->as_faulty_mask &= ~mask;
289 panfrost_mmu_enable(pfdev, mmu);
290 }
291
292 goto out;
293 }
294
295 /* Check for a free AS */
296 as = ffz(pfdev->as_alloc_mask);
297 if (!(BIT(as) & pfdev->features.as_present)) {
298 struct panfrost_mmu *lru_mmu;
299
300 list_for_each_entry_reverse(lru_mmu, &pfdev->as_lru_list, list) {
301 if (!atomic_read(&lru_mmu->as_count))
302 break;
303 }
304 if (WARN_ON(&lru_mmu->list == &pfdev->as_lru_list)) {
305 as = -EBUSY;
306 goto out;
307 }
308
309 list_del_init(&lru_mmu->list);
310 as = lru_mmu->as;
311
312 WARN_ON(as < 0);
313 lru_mmu->as = -1;
314 }
315
316 /* Assign the free or reclaimed AS to the FD */
317 mmu->as = as;
318 set_bit(as, &pfdev->as_alloc_mask);
319 atomic_set(&mmu->as_count, 1);
320 list_add(&mmu->list, &pfdev->as_lru_list);
321
322 dev_dbg(pfdev->base.dev,
323 "Assigned AS%d to mmu %p, alloc_mask=%lx",
324 as, mmu, pfdev->as_alloc_mask);
325
326 panfrost_mmu_enable(pfdev, mmu);
327
328 out:
329 spin_unlock(&pfdev->as_lock);
330 return as;
331 }
332
panfrost_mmu_as_put(struct panfrost_device * pfdev,struct panfrost_mmu * mmu)333 void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
334 {
335 atomic_dec(&mmu->as_count);
336 WARN_ON(atomic_read(&mmu->as_count) < 0);
337 }
338
panfrost_mmu_reset(struct panfrost_device * pfdev)339 void panfrost_mmu_reset(struct panfrost_device *pfdev)
340 {
341 struct panfrost_mmu *mmu, *mmu_tmp;
342
343 clear_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended);
344
345 spin_lock(&pfdev->as_lock);
346
347 pfdev->as_alloc_mask = 0;
348 pfdev->as_faulty_mask = 0;
349
350 list_for_each_entry_safe(mmu, mmu_tmp, &pfdev->as_lru_list, list) {
351 mmu->as = -1;
352 atomic_set(&mmu->as_count, 0);
353 list_del_init(&mmu->list);
354 }
355
356 spin_unlock(&pfdev->as_lock);
357
358 mmu_write(pfdev, MMU_INT_CLEAR, ~0);
359 mmu_write(pfdev, MMU_INT_MASK, ~0);
360 }
361
get_pgsize(u64 addr,size_t size,size_t * count)362 static size_t get_pgsize(u64 addr, size_t size, size_t *count)
363 {
364 /*
365 * io-pgtable only operates on multiple pages within a single table
366 * entry, so we need to split at boundaries of the table size, i.e.
367 * the next block size up. The distance from address A to the next
368 * boundary of block size B is logically B - A % B, but in unsigned
369 * two's complement where B is a power of two we get the equivalence
370 * B - A % B == (B - A) % B == (n * B - A) % B, and choose n = 0 :)
371 */
372 size_t blk_offset = -addr % SZ_2M;
373
374 if (blk_offset || size < SZ_2M) {
375 *count = min_not_zero(blk_offset, size) / SZ_4K;
376 return SZ_4K;
377 }
378 blk_offset = -addr % SZ_1G ?: SZ_1G;
379 *count = min(blk_offset, size) / SZ_2M;
380 return SZ_2M;
381 }
382
panfrost_mmu_flush_range(struct panfrost_device * pfdev,struct panfrost_mmu * mmu,u64 iova,u64 size)383 static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
384 struct panfrost_mmu *mmu,
385 u64 iova, u64 size)
386 {
387 if (mmu->as < 0)
388 return;
389
390 pm_runtime_get_noresume(pfdev->base.dev);
391
392 /* Flush the PTs only if we're already awake */
393 if (pm_runtime_active(pfdev->base.dev))
394 mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
395
396 pm_runtime_put_autosuspend(pfdev->base.dev);
397 }
398
mmu_unmap_range(struct panfrost_mmu * mmu,u64 iova,size_t len)399 static void mmu_unmap_range(struct panfrost_mmu *mmu, u64 iova, size_t len)
400 {
401 struct io_pgtable_ops *ops = mmu->pgtbl_ops;
402 size_t pgsize, unmapped_len = 0;
403 size_t unmapped_page, pgcount;
404
405 while (unmapped_len < len) {
406 pgsize = get_pgsize(iova, len - unmapped_len, &pgcount);
407
408 unmapped_page = ops->unmap_pages(ops, iova, pgsize, pgcount, NULL);
409 WARN_ON(unmapped_page != pgsize * pgcount);
410
411 iova += pgsize * pgcount;
412 unmapped_len += pgsize * pgcount;
413 }
414 }
415
mmu_map_sg(struct panfrost_device * pfdev,struct panfrost_mmu * mmu,u64 iova,int prot,struct sg_table * sgt)416 static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
417 u64 iova, int prot, struct sg_table *sgt)
418 {
419 unsigned int count;
420 struct scatterlist *sgl;
421 struct io_pgtable_ops *ops = mmu->pgtbl_ops;
422 size_t total_mapped = 0;
423 u64 start_iova = iova;
424 int ret;
425
426 for_each_sgtable_dma_sg(sgt, sgl, count) {
427 unsigned long paddr = sg_dma_address(sgl);
428 size_t len = sg_dma_len(sgl);
429
430 dev_dbg(pfdev->base.dev,
431 "map: as=%d, iova=%llx, paddr=%lx, len=%zx",
432 mmu->as, iova, paddr, len);
433
434 while (len) {
435 size_t pgcount, mapped = 0;
436 size_t pgsize = get_pgsize(iova | paddr, len, &pgcount);
437
438 ret = ops->map_pages(ops, iova, paddr, pgsize, pgcount, prot,
439 GFP_KERNEL, &mapped);
440 if (ret)
441 goto err_unmap_pages;
442
443 /* Don't get stuck if things have gone wrong */
444 mapped = max(mapped, pgsize);
445 total_mapped += mapped;
446 iova += mapped;
447 paddr += mapped;
448 len -= mapped;
449 }
450 }
451
452 panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova);
453
454 return 0;
455
456 err_unmap_pages:
457 mmu_unmap_range(mmu, start_iova, total_mapped);
458 return ret;
459 }
460
panfrost_mmu_map(struct panfrost_gem_mapping * mapping)461 int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
462 {
463 struct panfrost_gem_object *bo = mapping->obj;
464 struct drm_gem_shmem_object *shmem = &bo->base;
465 struct drm_gem_object *obj = &shmem->base;
466 struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
467 struct sg_table *sgt;
468 int prot = IOMMU_READ | IOMMU_WRITE | IOMMU_CACHE;
469 int ret;
470
471 if (WARN_ON(mapping->active))
472 return 0;
473
474 if (bo->noexec)
475 prot |= IOMMU_NOEXEC;
476
477 sgt = drm_gem_shmem_get_pages_sgt(shmem);
478 if (WARN_ON(IS_ERR(sgt)))
479 return PTR_ERR(sgt);
480
481 ret = mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
482 prot, sgt);
483 if (ret)
484 goto err_put_pages;
485
486 mapping->active = true;
487
488 return 0;
489
490 err_put_pages:
491 drm_gem_shmem_put_pages_locked(shmem);
492 return ret;
493 }
494
panfrost_mmu_unmap(struct panfrost_gem_mapping * mapping)495 void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
496 {
497 struct panfrost_gem_object *bo = mapping->obj;
498 struct drm_gem_object *obj = &bo->base.base;
499 struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
500 struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops;
501 u64 iova = mapping->mmnode.start << PAGE_SHIFT;
502 size_t len = mapping->mmnode.size << PAGE_SHIFT;
503 size_t unmapped_len = 0;
504
505 if (WARN_ON(!mapping->active))
506 return;
507
508 dev_dbg(pfdev->base.dev, "unmap: as=%d, iova=%llx, len=%zx",
509 mapping->mmu->as, iova, len);
510
511 while (unmapped_len < len) {
512 size_t unmapped_page, pgcount;
513 size_t pgsize = get_pgsize(iova, len - unmapped_len, &pgcount);
514
515 if (bo->is_heap)
516 pgcount = 1;
517 if (!bo->is_heap || ops->iova_to_phys(ops, iova)) {
518 unmapped_page = ops->unmap_pages(ops, iova, pgsize, pgcount, NULL);
519 WARN_ON(unmapped_page != pgsize * pgcount);
520 }
521 iova += pgsize * pgcount;
522 unmapped_len += pgsize * pgcount;
523 }
524
525 panfrost_mmu_flush_range(pfdev, mapping->mmu,
526 mapping->mmnode.start << PAGE_SHIFT, len);
527 mapping->active = false;
528 }
529
mmu_tlb_inv_context_s1(void * cookie)530 static void mmu_tlb_inv_context_s1(void *cookie)
531 {}
532
mmu_tlb_sync_context(void * cookie)533 static void mmu_tlb_sync_context(void *cookie)
534 {
535 //struct panfrost_mmu *mmu = cookie;
536 // TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X
537 }
538
mmu_tlb_flush_walk(unsigned long iova,size_t size,size_t granule,void * cookie)539 static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule,
540 void *cookie)
541 {
542 mmu_tlb_sync_context(cookie);
543 }
544
545 static const struct iommu_flush_ops mmu_tlb_ops = {
546 .tlb_flush_all = mmu_tlb_inv_context_s1,
547 .tlb_flush_walk = mmu_tlb_flush_walk,
548 };
549
550 static struct panfrost_gem_mapping *
addr_to_mapping(struct panfrost_device * pfdev,int as,u64 addr)551 addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
552 {
553 struct panfrost_gem_mapping *mapping = NULL;
554 struct drm_mm_node *node;
555 u64 offset = addr >> PAGE_SHIFT;
556 struct panfrost_mmu *mmu;
557
558 spin_lock(&pfdev->as_lock);
559 list_for_each_entry(mmu, &pfdev->as_lru_list, list) {
560 if (as == mmu->as)
561 goto found_mmu;
562 }
563 goto out;
564
565 found_mmu:
566
567 spin_lock(&mmu->mm_lock);
568
569 drm_mm_for_each_node(node, &mmu->mm) {
570 if (offset >= node->start &&
571 offset < (node->start + node->size)) {
572 mapping = drm_mm_node_to_panfrost_mapping(node);
573
574 kref_get(&mapping->refcount);
575 break;
576 }
577 }
578
579 spin_unlock(&mmu->mm_lock);
580 out:
581 spin_unlock(&pfdev->as_lock);
582 return mapping;
583 }
584
585 #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
586
panfrost_mmu_map_fault_addr(struct panfrost_device * pfdev,int as,u64 addr)587 static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
588 u64 addr)
589 {
590 int ret;
591 struct panfrost_gem_mapping *bomapping;
592 struct panfrost_gem_object *bo;
593 struct address_space *mapping;
594 struct drm_gem_object *obj;
595 pgoff_t page_offset, nr_pages;
596 struct sg_table *sgt;
597 struct page **pages;
598
599 bomapping = addr_to_mapping(pfdev, as, addr);
600 if (!bomapping)
601 return -ENOENT;
602
603 bo = bomapping->obj;
604 if (!bo->is_heap) {
605 dev_WARN(pfdev->base.dev, "matching BO is not heap type (GPU VA = %llx)",
606 bomapping->mmnode.start << PAGE_SHIFT);
607 ret = -EINVAL;
608 goto err_bo;
609 }
610 WARN_ON(bomapping->mmu->as != as);
611
612 /* Assume 2MB alignment and size multiple */
613 addr &= ~((u64)SZ_2M - 1);
614 page_offset = addr >> PAGE_SHIFT;
615 page_offset -= bomapping->mmnode.start;
616 nr_pages = bo->base.base.size >> PAGE_SHIFT;
617
618 obj = &bo->base.base;
619
620 dma_resv_lock(obj->resv, NULL);
621
622 if (!bo->base.pages) {
623 bo->sgts = kvmalloc_objs(struct sg_table,
624 bo->base.base.size / SZ_2M,
625 GFP_KERNEL | __GFP_ZERO);
626 if (!bo->sgts) {
627 ret = -ENOMEM;
628 goto err_unlock;
629 }
630
631 pages = kvmalloc_objs(struct page *, nr_pages,
632 GFP_KERNEL | __GFP_ZERO);
633 if (!pages) {
634 kvfree(bo->sgts);
635 bo->sgts = NULL;
636 ret = -ENOMEM;
637 goto err_unlock;
638 }
639 bo->base.pages = pages;
640 refcount_set(&bo->base.pages_use_count, 1);
641 } else {
642 pages = bo->base.pages;
643 }
644
645 sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
646 if (sgt->sgl) {
647 /* Pages are already mapped, bail out. */
648 goto out;
649 }
650
651 mapping = bo->base.base.filp->f_mapping;
652 mapping_set_unevictable(mapping);
653
654 for (pgoff_t pg = page_offset; pg < page_offset + NUM_FAULT_PAGES;) {
655 bool already_owned = false;
656 struct folio *folio;
657
658 folio = shmem_read_folio(mapping, pg);
659 if (IS_ERR(folio)) {
660 ret = PTR_ERR(folio);
661 goto err_unlock;
662 }
663
664 pg &= ~(folio_nr_pages(folio) - 1);
665 for (u32 i = 0; i < folio_nr_pages(folio) && pg < nr_pages; i++) {
666 if (pages[pg])
667 already_owned = true;
668
669 pages[pg++] = folio_page(folio, i);
670 }
671
672 /* We always fill the page array at a folio granularity so
673 * there's no valid reason for a folio range to be partially
674 * populated.
675 */
676 if (drm_WARN_ON(&pfdev->base, already_owned))
677 folio_put(folio);
678 }
679
680 ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
681 NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
682 if (ret)
683 goto err_unlock;
684
685 ret = dma_map_sgtable(pfdev->base.dev, sgt, DMA_BIDIRECTIONAL, 0);
686 if (ret)
687 goto err_map;
688
689 ret = mmu_map_sg(pfdev, bomapping->mmu, addr,
690 IOMMU_WRITE | IOMMU_READ | IOMMU_CACHE | IOMMU_NOEXEC, sgt);
691 if (ret)
692 goto err_mmu_map_sg;
693
694 bomapping->active = true;
695 bo->heap_rss_size += SZ_2M;
696
697 dev_dbg(pfdev->base.dev, "mapped page fault @ AS%d %llx", as, addr);
698
699 out:
700 dma_resv_unlock(obj->resv);
701
702 panfrost_gem_mapping_put(bomapping);
703
704 return 0;
705
706 err_mmu_map_sg:
707 dma_unmap_sgtable(pfdev->base.dev, sgt, DMA_BIDIRECTIONAL, 0);
708 err_map:
709 sg_free_table(sgt);
710 err_unlock:
711 dma_resv_unlock(obj->resv);
712 err_bo:
713 panfrost_gem_mapping_put(bomapping);
714 return ret;
715 }
716
panfrost_mmu_release_ctx(struct kref * kref)717 static void panfrost_mmu_release_ctx(struct kref *kref)
718 {
719 struct panfrost_mmu *mmu = container_of(kref, struct panfrost_mmu,
720 refcount);
721 struct panfrost_device *pfdev = mmu->pfdev;
722
723 spin_lock(&pfdev->as_lock);
724 if (mmu->as >= 0) {
725 pm_runtime_get_noresume(pfdev->base.dev);
726 if (pm_runtime_active(pfdev->base.dev))
727 panfrost_mmu_disable(pfdev, mmu->as);
728 pm_runtime_put_autosuspend(pfdev->base.dev);
729
730 clear_bit(mmu->as, &pfdev->as_alloc_mask);
731 list_del(&mmu->list);
732 }
733 spin_unlock(&pfdev->as_lock);
734
735 free_io_pgtable_ops(mmu->pgtbl_ops);
736 drm_mm_takedown(&mmu->mm);
737 kfree(mmu);
738 }
739
panfrost_mmu_ctx_put(struct panfrost_mmu * mmu)740 void panfrost_mmu_ctx_put(struct panfrost_mmu *mmu)
741 {
742 kref_put(&mmu->refcount, panfrost_mmu_release_ctx);
743 }
744
panfrost_mmu_ctx_get(struct panfrost_mmu * mmu)745 struct panfrost_mmu *panfrost_mmu_ctx_get(struct panfrost_mmu *mmu)
746 {
747 kref_get(&mmu->refcount);
748
749 return mmu;
750 }
751
752 #define PFN_4G (SZ_4G >> PAGE_SHIFT)
753 #define PFN_4G_MASK (PFN_4G - 1)
754 #define PFN_16M (SZ_16M >> PAGE_SHIFT)
755
panfrost_drm_mm_color_adjust(const struct drm_mm_node * node,unsigned long color,u64 * start,u64 * end)756 static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node,
757 unsigned long color,
758 u64 *start, u64 *end)
759 {
760 /* Executable buffers can't start or end on a 4GB boundary */
761 if (!(color & PANFROST_BO_NOEXEC)) {
762 u64 next_seg;
763
764 if ((*start & PFN_4G_MASK) == 0)
765 (*start)++;
766
767 if ((*end & PFN_4G_MASK) == 0)
768 (*end)--;
769
770 next_seg = ALIGN(*start, PFN_4G);
771 if (next_seg - *start <= PFN_16M)
772 *start = next_seg + 1;
773
774 *end = min(*end, ALIGN(*start, PFN_4G) - 1);
775 }
776 }
777
panfrost_mmu_ctx_create(struct panfrost_device * pfdev)778 struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev)
779 {
780 u32 va_bits = GPU_MMU_FEATURES_VA_BITS(pfdev->features.mmu_features);
781 u32 pa_bits = GPU_MMU_FEATURES_PA_BITS(pfdev->features.mmu_features);
782 struct panfrost_mmu *mmu;
783 enum io_pgtable_fmt fmt;
784 int ret;
785
786 if (pfdev->comp->gpu_quirks & BIT(GPU_QUIRK_FORCE_AARCH64_PGTABLE)) {
787 if (!panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU)) {
788 dev_err_once(pfdev->base.dev,
789 "AARCH64_4K page table not supported\n");
790 return ERR_PTR(-EINVAL);
791 }
792 fmt = ARM_64_LPAE_S1;
793 } else {
794 fmt = ARM_MALI_LPAE;
795 }
796
797 mmu = kzalloc_obj(*mmu);
798 if (!mmu)
799 return ERR_PTR(-ENOMEM);
800
801 mmu->pfdev = pfdev;
802 spin_lock_init(&mmu->mm_lock);
803
804 /* 4G enough for now. can be 48-bit */
805 drm_mm_init(&mmu->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
806 mmu->mm.color_adjust = panfrost_drm_mm_color_adjust;
807
808 INIT_LIST_HEAD(&mmu->list);
809 mmu->as = -1;
810
811 mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
812 .pgsize_bitmap = SZ_4K | SZ_2M,
813 .ias = va_bits,
814 .oas = pa_bits,
815 .coherent_walk = pfdev->coherent,
816 .tlb = &mmu_tlb_ops,
817 .iommu_dev = pfdev->base.dev,
818 };
819
820 mmu->pgtbl_ops = alloc_io_pgtable_ops(fmt, &mmu->pgtbl_cfg, mmu);
821 if (!mmu->pgtbl_ops) {
822 ret = -EINVAL;
823 goto err_free_mmu;
824 }
825
826 ret = panfrost_mmu_cfg_init(mmu, fmt);
827 if (ret)
828 goto err_free_io_pgtable;
829
830 kref_init(&mmu->refcount);
831
832 return mmu;
833
834 err_free_io_pgtable:
835 free_io_pgtable_ops(mmu->pgtbl_ops);
836
837 err_free_mmu:
838 kfree(mmu);
839 return ERR_PTR(ret);
840 }
841
access_type_name(struct panfrost_device * pfdev,u32 fault_status)842 static const char *access_type_name(struct panfrost_device *pfdev,
843 u32 fault_status)
844 {
845 switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
846 case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
847 if (panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU))
848 return "ATOMIC";
849 else
850 return "UNKNOWN";
851 case AS_FAULTSTATUS_ACCESS_TYPE_READ:
852 return "READ";
853 case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
854 return "WRITE";
855 case AS_FAULTSTATUS_ACCESS_TYPE_EX:
856 return "EXECUTE";
857 default:
858 WARN_ON(1);
859 return NULL;
860 }
861 }
862
panfrost_mmu_irq_handler(int irq,void * data)863 static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
864 {
865 struct panfrost_device *pfdev = data;
866
867 if (test_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended))
868 return IRQ_NONE;
869
870 if (!mmu_read(pfdev, MMU_INT_STAT))
871 return IRQ_NONE;
872
873 mmu_write(pfdev, MMU_INT_MASK, 0);
874 return IRQ_WAKE_THREAD;
875 }
876
panfrost_mmu_irq_handler_thread(int irq,void * data)877 static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
878 {
879 struct panfrost_device *pfdev = data;
880 u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT);
881 int ret;
882
883 while (status) {
884 u32 as = ffs(status | (status >> 16)) - 1;
885 u32 mask = BIT(as) | BIT(as + 16);
886 u64 addr;
887 u32 fault_status;
888 u32 exception_type;
889 u32 access_type;
890 u32 source_id;
891
892 fault_status = mmu_read(pfdev, AS_FAULTSTATUS(as));
893 addr = mmu_read(pfdev, AS_FAULTADDRESS_LO(as));
894 addr |= (u64)mmu_read(pfdev, AS_FAULTADDRESS_HI(as)) << 32;
895
896 /* decode the fault status */
897 exception_type = fault_status & 0xFF;
898 access_type = (fault_status >> 8) & 0x3;
899 source_id = (fault_status >> 16);
900
901 mmu_write(pfdev, MMU_INT_CLEAR, mask);
902
903 /* Page fault only */
904 ret = -1;
905 if ((status & mask) == BIT(as) && (exception_type & 0xF8) == 0xC0)
906 ret = panfrost_mmu_map_fault_addr(pfdev, as, addr);
907
908 if (ret) {
909 /* terminal fault, print info about the fault */
910 dev_err(pfdev->base.dev,
911 "Unhandled Page fault in AS%d at VA 0x%016llX\n"
912 "Reason: %s\n"
913 "raw fault status: 0x%X\n"
914 "decoded fault status: %s\n"
915 "exception type 0x%X: %s\n"
916 "access type 0x%X: %s\n"
917 "source id 0x%X\n",
918 as, addr,
919 "TODO",
920 fault_status,
921 (fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
922 exception_type, panfrost_exception_name(exception_type),
923 access_type, access_type_name(pfdev, fault_status),
924 source_id);
925
926 spin_lock(&pfdev->as_lock);
927 /* Ignore MMU interrupts on this AS until it's been
928 * re-enabled.
929 */
930 pfdev->as_faulty_mask |= mask;
931
932 /* Disable the MMU to kill jobs on this AS. */
933 panfrost_mmu_disable(pfdev, as);
934 spin_unlock(&pfdev->as_lock);
935 }
936
937 status &= ~mask;
938
939 /* If we received new MMU interrupts, process them before returning. */
940 if (!status)
941 status = mmu_read(pfdev, MMU_INT_RAWSTAT) & ~pfdev->as_faulty_mask;
942 }
943
944 /* Enable interrupts only if we're not about to get suspended */
945 if (!test_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended)) {
946 spin_lock(&pfdev->as_lock);
947 mmu_write(pfdev, MMU_INT_MASK, ~pfdev->as_faulty_mask);
948 spin_unlock(&pfdev->as_lock);
949 }
950
951 return IRQ_HANDLED;
952 };
953
panfrost_mmu_init(struct panfrost_device * pfdev)954 int panfrost_mmu_init(struct panfrost_device *pfdev)
955 {
956 int err;
957
958 pfdev->mmu_irq = platform_get_irq_byname(to_platform_device(pfdev->base.dev), "mmu");
959 if (pfdev->mmu_irq < 0)
960 return pfdev->mmu_irq;
961
962 err = devm_request_threaded_irq(pfdev->base.dev, pfdev->mmu_irq,
963 panfrost_mmu_irq_handler,
964 panfrost_mmu_irq_handler_thread,
965 IRQF_SHARED, KBUILD_MODNAME "-mmu",
966 pfdev);
967
968 if (err) {
969 dev_err(pfdev->base.dev, "failed to request mmu irq");
970 return err;
971 }
972
973 return 0;
974 }
975
panfrost_mmu_fini(struct panfrost_device * pfdev)976 void panfrost_mmu_fini(struct panfrost_device *pfdev)
977 {
978 mmu_write(pfdev, MMU_INT_MASK, 0);
979 }
980
panfrost_mmu_suspend_irq(struct panfrost_device * pfdev)981 void panfrost_mmu_suspend_irq(struct panfrost_device *pfdev)
982 {
983 set_bit(PANFROST_COMP_BIT_MMU, pfdev->is_suspended);
984
985 mmu_write(pfdev, MMU_INT_MASK, 0);
986 synchronize_irq(pfdev->mmu_irq);
987 }
988