xref: /linux/drivers/gpu/drm/panfrost/panfrost_mmu.c (revision 6ef2f37f4016a482fd31d21bb295494d26794a7a)
1 // SPDX-License-Identifier:	GPL-2.0
2 /* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
3 
4 #include <drm/panfrost_drm.h>
5 
6 #include <linux/atomic.h>
7 #include <linux/bitfield.h>
8 #include <linux/delay.h>
9 #include <linux/dma-mapping.h>
10 #include <linux/interrupt.h>
11 #include <linux/io.h>
12 #include <linux/iopoll.h>
13 #include <linux/io-pgtable.h>
14 #include <linux/iommu.h>
15 #include <linux/platform_device.h>
16 #include <linux/pm_runtime.h>
17 #include <linux/shmem_fs.h>
18 #include <linux/sizes.h>
19 
20 #include "panfrost_device.h"
21 #include "panfrost_mmu.h"
22 #include "panfrost_gem.h"
23 #include "panfrost_features.h"
24 #include "panfrost_regs.h"
25 
26 #define mmu_write(dev, reg, data) writel(data, dev->iomem + reg)
27 #define mmu_read(dev, reg) readl(dev->iomem + reg)
28 
29 static int wait_ready(struct panfrost_device *pfdev, u32 as_nr)
30 {
31 	int ret;
32 	u32 val;
33 
34 	/* Wait for the MMU status to indicate there is no active command, in
35 	 * case one is pending. */
36 	ret = readl_relaxed_poll_timeout_atomic(pfdev->iomem + AS_STATUS(as_nr),
37 		val, !(val & AS_STATUS_AS_ACTIVE), 10, 1000);
38 
39 	if (ret)
40 		dev_err(pfdev->dev, "AS_ACTIVE bit stuck\n");
41 
42 	return ret;
43 }
44 
45 static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
46 {
47 	int status;
48 
49 	/* write AS_COMMAND when MMU is ready to accept another command */
50 	status = wait_ready(pfdev, as_nr);
51 	if (!status)
52 		mmu_write(pfdev, AS_COMMAND(as_nr), cmd);
53 
54 	return status;
55 }
56 
57 static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
58 			u64 iova, size_t size)
59 {
60 	u8 region_width;
61 	u64 region = iova & PAGE_MASK;
62 	/*
63 	 * fls returns:
64 	 * 1 .. 32
65 	 *
66 	 * 10 + fls(num_pages)
67 	 * results in the range (11 .. 42)
68 	 */
69 
70 	size = round_up(size, PAGE_SIZE);
71 
72 	region_width = 10 + fls(size >> PAGE_SHIFT);
73 	if ((size >> PAGE_SHIFT) != (1ul << (region_width - 11))) {
74 		/* not pow2, so must go up to the next pow2 */
75 		region_width += 1;
76 	}
77 	region |= region_width;
78 
79 	/* Lock the region that needs to be updated */
80 	mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), region & 0xFFFFFFFFUL);
81 	mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), (region >> 32) & 0xFFFFFFFFUL);
82 	write_cmd(pfdev, as_nr, AS_COMMAND_LOCK);
83 }
84 
85 
86 static int mmu_hw_do_operation_locked(struct panfrost_device *pfdev, int as_nr,
87 				      u64 iova, size_t size, u32 op)
88 {
89 	if (as_nr < 0)
90 		return 0;
91 
92 	if (op != AS_COMMAND_UNLOCK)
93 		lock_region(pfdev, as_nr, iova, size);
94 
95 	/* Run the MMU operation */
96 	write_cmd(pfdev, as_nr, op);
97 
98 	/* Wait for the flush to complete */
99 	return wait_ready(pfdev, as_nr);
100 }
101 
102 static int mmu_hw_do_operation(struct panfrost_device *pfdev,
103 			       struct panfrost_mmu *mmu,
104 			       u64 iova, size_t size, u32 op)
105 {
106 	int ret;
107 
108 	spin_lock(&pfdev->as_lock);
109 	ret = mmu_hw_do_operation_locked(pfdev, mmu->as, iova, size, op);
110 	spin_unlock(&pfdev->as_lock);
111 	return ret;
112 }
113 
114 static void panfrost_mmu_enable(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
115 {
116 	int as_nr = mmu->as;
117 	struct io_pgtable_cfg *cfg = &mmu->pgtbl_cfg;
118 	u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
119 	u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
120 
121 	mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
122 
123 	mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
124 	mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
125 
126 	/* Need to revisit mem attrs.
127 	 * NC is the default, Mali driver is inner WT.
128 	 */
129 	mmu_write(pfdev, AS_MEMATTR_LO(as_nr), memattr & 0xffffffffUL);
130 	mmu_write(pfdev, AS_MEMATTR_HI(as_nr), memattr >> 32);
131 
132 	write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
133 }
134 
135 static void panfrost_mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
136 {
137 	mmu_hw_do_operation_locked(pfdev, as_nr, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
138 
139 	mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
140 	mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
141 
142 	mmu_write(pfdev, AS_MEMATTR_LO(as_nr), 0);
143 	mmu_write(pfdev, AS_MEMATTR_HI(as_nr), 0);
144 
145 	write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
146 }
147 
148 u32 panfrost_mmu_as_get(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
149 {
150 	int as;
151 
152 	spin_lock(&pfdev->as_lock);
153 
154 	as = mmu->as;
155 	if (as >= 0) {
156 		int en = atomic_inc_return(&mmu->as_count);
157 
158 		/*
159 		 * AS can be retained by active jobs or a perfcnt context,
160 		 * hence the '+ 1' here.
161 		 */
162 		WARN_ON(en >= (NUM_JOB_SLOTS + 1));
163 
164 		list_move(&mmu->list, &pfdev->as_lru_list);
165 		goto out;
166 	}
167 
168 	/* Check for a free AS */
169 	as = ffz(pfdev->as_alloc_mask);
170 	if (!(BIT(as) & pfdev->features.as_present)) {
171 		struct panfrost_mmu *lru_mmu;
172 
173 		list_for_each_entry_reverse(lru_mmu, &pfdev->as_lru_list, list) {
174 			if (!atomic_read(&lru_mmu->as_count))
175 				break;
176 		}
177 		WARN_ON(&lru_mmu->list == &pfdev->as_lru_list);
178 
179 		list_del_init(&lru_mmu->list);
180 		as = lru_mmu->as;
181 
182 		WARN_ON(as < 0);
183 		lru_mmu->as = -1;
184 	}
185 
186 	/* Assign the free or reclaimed AS to the FD */
187 	mmu->as = as;
188 	set_bit(as, &pfdev->as_alloc_mask);
189 	atomic_set(&mmu->as_count, 1);
190 	list_add(&mmu->list, &pfdev->as_lru_list);
191 
192 	dev_dbg(pfdev->dev, "Assigned AS%d to mmu %p, alloc_mask=%lx", as, mmu, pfdev->as_alloc_mask);
193 
194 	panfrost_mmu_enable(pfdev, mmu);
195 
196 out:
197 	spin_unlock(&pfdev->as_lock);
198 	return as;
199 }
200 
201 void panfrost_mmu_as_put(struct panfrost_device *pfdev, struct panfrost_mmu *mmu)
202 {
203 	atomic_dec(&mmu->as_count);
204 	WARN_ON(atomic_read(&mmu->as_count) < 0);
205 }
206 
207 void panfrost_mmu_reset(struct panfrost_device *pfdev)
208 {
209 	struct panfrost_mmu *mmu, *mmu_tmp;
210 
211 	spin_lock(&pfdev->as_lock);
212 
213 	pfdev->as_alloc_mask = 0;
214 
215 	list_for_each_entry_safe(mmu, mmu_tmp, &pfdev->as_lru_list, list) {
216 		mmu->as = -1;
217 		atomic_set(&mmu->as_count, 0);
218 		list_del_init(&mmu->list);
219 	}
220 
221 	spin_unlock(&pfdev->as_lock);
222 
223 	mmu_write(pfdev, MMU_INT_CLEAR, ~0);
224 	mmu_write(pfdev, MMU_INT_MASK, ~0);
225 }
226 
227 static size_t get_pgsize(u64 addr, size_t size)
228 {
229 	if (addr & (SZ_2M - 1) || size < SZ_2M)
230 		return SZ_4K;
231 
232 	return SZ_2M;
233 }
234 
235 static void panfrost_mmu_flush_range(struct panfrost_device *pfdev,
236 				     struct panfrost_mmu *mmu,
237 				     u64 iova, size_t size)
238 {
239 	if (mmu->as < 0)
240 		return;
241 
242 	pm_runtime_get_noresume(pfdev->dev);
243 
244 	/* Flush the PTs only if we're already awake */
245 	if (pm_runtime_active(pfdev->dev))
246 		mmu_hw_do_operation(pfdev, mmu, iova, size, AS_COMMAND_FLUSH_PT);
247 
248 	pm_runtime_put_sync_autosuspend(pfdev->dev);
249 }
250 
251 static int mmu_map_sg(struct panfrost_device *pfdev, struct panfrost_mmu *mmu,
252 		      u64 iova, int prot, struct sg_table *sgt)
253 {
254 	unsigned int count;
255 	struct scatterlist *sgl;
256 	struct io_pgtable_ops *ops = mmu->pgtbl_ops;
257 	u64 start_iova = iova;
258 
259 	for_each_sgtable_dma_sg(sgt, sgl, count) {
260 		unsigned long paddr = sg_dma_address(sgl);
261 		size_t len = sg_dma_len(sgl);
262 
263 		dev_dbg(pfdev->dev, "map: as=%d, iova=%llx, paddr=%lx, len=%zx", mmu->as, iova, paddr, len);
264 
265 		while (len) {
266 			size_t pgsize = get_pgsize(iova | paddr, len);
267 
268 			ops->map(ops, iova, paddr, pgsize, prot, GFP_KERNEL);
269 			iova += pgsize;
270 			paddr += pgsize;
271 			len -= pgsize;
272 		}
273 	}
274 
275 	panfrost_mmu_flush_range(pfdev, mmu, start_iova, iova - start_iova);
276 
277 	return 0;
278 }
279 
280 int panfrost_mmu_map(struct panfrost_gem_mapping *mapping)
281 {
282 	struct panfrost_gem_object *bo = mapping->obj;
283 	struct drm_gem_object *obj = &bo->base.base;
284 	struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
285 	struct sg_table *sgt;
286 	int prot = IOMMU_READ | IOMMU_WRITE;
287 
288 	if (WARN_ON(mapping->active))
289 		return 0;
290 
291 	if (bo->noexec)
292 		prot |= IOMMU_NOEXEC;
293 
294 	sgt = drm_gem_shmem_get_pages_sgt(obj);
295 	if (WARN_ON(IS_ERR(sgt)))
296 		return PTR_ERR(sgt);
297 
298 	mmu_map_sg(pfdev, mapping->mmu, mapping->mmnode.start << PAGE_SHIFT,
299 		   prot, sgt);
300 	mapping->active = true;
301 
302 	return 0;
303 }
304 
305 void panfrost_mmu_unmap(struct panfrost_gem_mapping *mapping)
306 {
307 	struct panfrost_gem_object *bo = mapping->obj;
308 	struct drm_gem_object *obj = &bo->base.base;
309 	struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
310 	struct io_pgtable_ops *ops = mapping->mmu->pgtbl_ops;
311 	u64 iova = mapping->mmnode.start << PAGE_SHIFT;
312 	size_t len = mapping->mmnode.size << PAGE_SHIFT;
313 	size_t unmapped_len = 0;
314 
315 	if (WARN_ON(!mapping->active))
316 		return;
317 
318 	dev_dbg(pfdev->dev, "unmap: as=%d, iova=%llx, len=%zx",
319 		mapping->mmu->as, iova, len);
320 
321 	while (unmapped_len < len) {
322 		size_t unmapped_page;
323 		size_t pgsize = get_pgsize(iova, len - unmapped_len);
324 
325 		if (ops->iova_to_phys(ops, iova)) {
326 			unmapped_page = ops->unmap(ops, iova, pgsize, NULL);
327 			WARN_ON(unmapped_page != pgsize);
328 		}
329 		iova += pgsize;
330 		unmapped_len += pgsize;
331 	}
332 
333 	panfrost_mmu_flush_range(pfdev, mapping->mmu,
334 				 mapping->mmnode.start << PAGE_SHIFT, len);
335 	mapping->active = false;
336 }
337 
338 static void mmu_tlb_inv_context_s1(void *cookie)
339 {}
340 
341 static void mmu_tlb_sync_context(void *cookie)
342 {
343 	//struct panfrost_mmu *mmu = cookie;
344 	// TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X
345 }
346 
347 static void mmu_tlb_flush_walk(unsigned long iova, size_t size, size_t granule,
348 			       void *cookie)
349 {
350 	mmu_tlb_sync_context(cookie);
351 }
352 
353 static const struct iommu_flush_ops mmu_tlb_ops = {
354 	.tlb_flush_all	= mmu_tlb_inv_context_s1,
355 	.tlb_flush_walk = mmu_tlb_flush_walk,
356 };
357 
358 static struct panfrost_gem_mapping *
359 addr_to_mapping(struct panfrost_device *pfdev, int as, u64 addr)
360 {
361 	struct panfrost_gem_mapping *mapping = NULL;
362 	struct drm_mm_node *node;
363 	u64 offset = addr >> PAGE_SHIFT;
364 	struct panfrost_mmu *mmu;
365 
366 	spin_lock(&pfdev->as_lock);
367 	list_for_each_entry(mmu, &pfdev->as_lru_list, list) {
368 		if (as == mmu->as)
369 			goto found_mmu;
370 	}
371 	goto out;
372 
373 found_mmu:
374 
375 	spin_lock(&mmu->mm_lock);
376 
377 	drm_mm_for_each_node(node, &mmu->mm) {
378 		if (offset >= node->start &&
379 		    offset < (node->start + node->size)) {
380 			mapping = drm_mm_node_to_panfrost_mapping(node);
381 
382 			kref_get(&mapping->refcount);
383 			break;
384 		}
385 	}
386 
387 	spin_unlock(&mmu->mm_lock);
388 out:
389 	spin_unlock(&pfdev->as_lock);
390 	return mapping;
391 }
392 
393 #define NUM_FAULT_PAGES (SZ_2M / PAGE_SIZE)
394 
395 static int panfrost_mmu_map_fault_addr(struct panfrost_device *pfdev, int as,
396 				       u64 addr)
397 {
398 	int ret, i;
399 	struct panfrost_gem_mapping *bomapping;
400 	struct panfrost_gem_object *bo;
401 	struct address_space *mapping;
402 	pgoff_t page_offset;
403 	struct sg_table *sgt;
404 	struct page **pages;
405 
406 	bomapping = addr_to_mapping(pfdev, as, addr);
407 	if (!bomapping)
408 		return -ENOENT;
409 
410 	bo = bomapping->obj;
411 	if (!bo->is_heap) {
412 		dev_WARN(pfdev->dev, "matching BO is not heap type (GPU VA = %llx)",
413 			 bomapping->mmnode.start << PAGE_SHIFT);
414 		ret = -EINVAL;
415 		goto err_bo;
416 	}
417 	WARN_ON(bomapping->mmu->as != as);
418 
419 	/* Assume 2MB alignment and size multiple */
420 	addr &= ~((u64)SZ_2M - 1);
421 	page_offset = addr >> PAGE_SHIFT;
422 	page_offset -= bomapping->mmnode.start;
423 
424 	mutex_lock(&bo->base.pages_lock);
425 
426 	if (!bo->base.pages) {
427 		bo->sgts = kvmalloc_array(bo->base.base.size / SZ_2M,
428 				     sizeof(struct sg_table), GFP_KERNEL | __GFP_ZERO);
429 		if (!bo->sgts) {
430 			mutex_unlock(&bo->base.pages_lock);
431 			ret = -ENOMEM;
432 			goto err_bo;
433 		}
434 
435 		pages = kvmalloc_array(bo->base.base.size >> PAGE_SHIFT,
436 				       sizeof(struct page *), GFP_KERNEL | __GFP_ZERO);
437 		if (!pages) {
438 			kvfree(bo->sgts);
439 			bo->sgts = NULL;
440 			mutex_unlock(&bo->base.pages_lock);
441 			ret = -ENOMEM;
442 			goto err_bo;
443 		}
444 		bo->base.pages = pages;
445 		bo->base.pages_use_count = 1;
446 	} else {
447 		pages = bo->base.pages;
448 		if (pages[page_offset]) {
449 			/* Pages are already mapped, bail out. */
450 			mutex_unlock(&bo->base.pages_lock);
451 			goto out;
452 		}
453 	}
454 
455 	mapping = bo->base.base.filp->f_mapping;
456 	mapping_set_unevictable(mapping);
457 
458 	for (i = page_offset; i < page_offset + NUM_FAULT_PAGES; i++) {
459 		pages[i] = shmem_read_mapping_page(mapping, i);
460 		if (IS_ERR(pages[i])) {
461 			mutex_unlock(&bo->base.pages_lock);
462 			ret = PTR_ERR(pages[i]);
463 			goto err_pages;
464 		}
465 	}
466 
467 	mutex_unlock(&bo->base.pages_lock);
468 
469 	sgt = &bo->sgts[page_offset / (SZ_2M / PAGE_SIZE)];
470 	ret = sg_alloc_table_from_pages(sgt, pages + page_offset,
471 					NUM_FAULT_PAGES, 0, SZ_2M, GFP_KERNEL);
472 	if (ret)
473 		goto err_pages;
474 
475 	ret = dma_map_sgtable(pfdev->dev, sgt, DMA_BIDIRECTIONAL, 0);
476 	if (ret)
477 		goto err_map;
478 
479 	mmu_map_sg(pfdev, bomapping->mmu, addr,
480 		   IOMMU_WRITE | IOMMU_READ | IOMMU_NOEXEC, sgt);
481 
482 	bomapping->active = true;
483 
484 	dev_dbg(pfdev->dev, "mapped page fault @ AS%d %llx", as, addr);
485 
486 out:
487 	panfrost_gem_mapping_put(bomapping);
488 
489 	return 0;
490 
491 err_map:
492 	sg_free_table(sgt);
493 err_pages:
494 	drm_gem_shmem_put_pages(&bo->base);
495 err_bo:
496 	drm_gem_object_put(&bo->base.base);
497 	return ret;
498 }
499 
500 static void panfrost_mmu_release_ctx(struct kref *kref)
501 {
502 	struct panfrost_mmu *mmu = container_of(kref, struct panfrost_mmu,
503 						refcount);
504 	struct panfrost_device *pfdev = mmu->pfdev;
505 
506 	spin_lock(&pfdev->as_lock);
507 	if (mmu->as >= 0) {
508 		pm_runtime_get_noresume(pfdev->dev);
509 		if (pm_runtime_active(pfdev->dev))
510 			panfrost_mmu_disable(pfdev, mmu->as);
511 		pm_runtime_put_autosuspend(pfdev->dev);
512 
513 		clear_bit(mmu->as, &pfdev->as_alloc_mask);
514 		clear_bit(mmu->as, &pfdev->as_in_use_mask);
515 		list_del(&mmu->list);
516 	}
517 	spin_unlock(&pfdev->as_lock);
518 
519 	free_io_pgtable_ops(mmu->pgtbl_ops);
520 	drm_mm_takedown(&mmu->mm);
521 	kfree(mmu);
522 }
523 
524 void panfrost_mmu_ctx_put(struct panfrost_mmu *mmu)
525 {
526 	kref_put(&mmu->refcount, panfrost_mmu_release_ctx);
527 }
528 
529 struct panfrost_mmu *panfrost_mmu_ctx_get(struct panfrost_mmu *mmu)
530 {
531 	kref_get(&mmu->refcount);
532 
533 	return mmu;
534 }
535 
536 #define PFN_4G		(SZ_4G >> PAGE_SHIFT)
537 #define PFN_4G_MASK	(PFN_4G - 1)
538 #define PFN_16M		(SZ_16M >> PAGE_SHIFT)
539 
540 static void panfrost_drm_mm_color_adjust(const struct drm_mm_node *node,
541 					 unsigned long color,
542 					 u64 *start, u64 *end)
543 {
544 	/* Executable buffers can't start or end on a 4GB boundary */
545 	if (!(color & PANFROST_BO_NOEXEC)) {
546 		u64 next_seg;
547 
548 		if ((*start & PFN_4G_MASK) == 0)
549 			(*start)++;
550 
551 		if ((*end & PFN_4G_MASK) == 0)
552 			(*end)--;
553 
554 		next_seg = ALIGN(*start, PFN_4G);
555 		if (next_seg - *start <= PFN_16M)
556 			*start = next_seg + 1;
557 
558 		*end = min(*end, ALIGN(*start, PFN_4G) - 1);
559 	}
560 }
561 
562 struct panfrost_mmu *panfrost_mmu_ctx_create(struct panfrost_device *pfdev)
563 {
564 	struct panfrost_mmu *mmu;
565 
566 	mmu = kzalloc(sizeof(*mmu), GFP_KERNEL);
567 	if (!mmu)
568 		return ERR_PTR(-ENOMEM);
569 
570 	mmu->pfdev = pfdev;
571 	spin_lock_init(&mmu->mm_lock);
572 
573 	/* 4G enough for now. can be 48-bit */
574 	drm_mm_init(&mmu->mm, SZ_32M >> PAGE_SHIFT, (SZ_4G - SZ_32M) >> PAGE_SHIFT);
575 	mmu->mm.color_adjust = panfrost_drm_mm_color_adjust;
576 
577 	INIT_LIST_HEAD(&mmu->list);
578 	mmu->as = -1;
579 
580 	mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
581 		.pgsize_bitmap	= SZ_4K | SZ_2M,
582 		.ias		= FIELD_GET(0xff, pfdev->features.mmu_features),
583 		.oas		= FIELD_GET(0xff00, pfdev->features.mmu_features),
584 		.coherent_walk	= pfdev->coherent,
585 		.tlb		= &mmu_tlb_ops,
586 		.iommu_dev	= pfdev->dev,
587 	};
588 
589 	mmu->pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &mmu->pgtbl_cfg,
590 					      mmu);
591 	if (!mmu->pgtbl_ops) {
592 		kfree(mmu);
593 		return ERR_PTR(-EINVAL);
594 	}
595 
596 	kref_init(&mmu->refcount);
597 
598 	return mmu;
599 }
600 
601 static const char *access_type_name(struct panfrost_device *pfdev,
602 		u32 fault_status)
603 {
604 	switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
605 	case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
606 		if (panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU))
607 			return "ATOMIC";
608 		else
609 			return "UNKNOWN";
610 	case AS_FAULTSTATUS_ACCESS_TYPE_READ:
611 		return "READ";
612 	case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
613 		return "WRITE";
614 	case AS_FAULTSTATUS_ACCESS_TYPE_EX:
615 		return "EXECUTE";
616 	default:
617 		WARN_ON(1);
618 		return NULL;
619 	}
620 }
621 
622 static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
623 {
624 	struct panfrost_device *pfdev = data;
625 
626 	if (!mmu_read(pfdev, MMU_INT_STAT))
627 		return IRQ_NONE;
628 
629 	mmu_write(pfdev, MMU_INT_MASK, 0);
630 	return IRQ_WAKE_THREAD;
631 }
632 
633 static irqreturn_t panfrost_mmu_irq_handler_thread(int irq, void *data)
634 {
635 	struct panfrost_device *pfdev = data;
636 	u32 status = mmu_read(pfdev, MMU_INT_RAWSTAT);
637 	int ret;
638 
639 	while (status) {
640 		u32 as = ffs(status | (status >> 16)) - 1;
641 		u32 mask = BIT(as) | BIT(as + 16);
642 		u64 addr;
643 		u32 fault_status;
644 		u32 exception_type;
645 		u32 access_type;
646 		u32 source_id;
647 
648 		fault_status = mmu_read(pfdev, AS_FAULTSTATUS(as));
649 		addr = mmu_read(pfdev, AS_FAULTADDRESS_LO(as));
650 		addr |= (u64)mmu_read(pfdev, AS_FAULTADDRESS_HI(as)) << 32;
651 
652 		/* decode the fault status */
653 		exception_type = fault_status & 0xFF;
654 		access_type = (fault_status >> 8) & 0x3;
655 		source_id = (fault_status >> 16);
656 
657 		mmu_write(pfdev, MMU_INT_CLEAR, mask);
658 
659 		/* Page fault only */
660 		ret = -1;
661 		if ((status & mask) == BIT(as) && (exception_type & 0xF8) == 0xC0)
662 			ret = panfrost_mmu_map_fault_addr(pfdev, as, addr);
663 
664 		if (ret)
665 			/* terminal fault, print info about the fault */
666 			dev_err(pfdev->dev,
667 				"Unhandled Page fault in AS%d at VA 0x%016llX\n"
668 				"Reason: %s\n"
669 				"raw fault status: 0x%X\n"
670 				"decoded fault status: %s\n"
671 				"exception type 0x%X: %s\n"
672 				"access type 0x%X: %s\n"
673 				"source id 0x%X\n",
674 				as, addr,
675 				"TODO",
676 				fault_status,
677 				(fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
678 				exception_type, panfrost_exception_name(exception_type),
679 				access_type, access_type_name(pfdev, fault_status),
680 				source_id);
681 
682 		status &= ~mask;
683 
684 		/* If we received new MMU interrupts, process them before returning. */
685 		if (!status)
686 			status = mmu_read(pfdev, MMU_INT_RAWSTAT);
687 	}
688 
689 	mmu_write(pfdev, MMU_INT_MASK, ~0);
690 	return IRQ_HANDLED;
691 };
692 
693 int panfrost_mmu_init(struct panfrost_device *pfdev)
694 {
695 	int err, irq;
696 
697 	irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
698 	if (irq <= 0)
699 		return -ENODEV;
700 
701 	err = devm_request_threaded_irq(pfdev->dev, irq,
702 					panfrost_mmu_irq_handler,
703 					panfrost_mmu_irq_handler_thread,
704 					IRQF_SHARED, KBUILD_MODNAME "-mmu",
705 					pfdev);
706 
707 	if (err) {
708 		dev_err(pfdev->dev, "failed to request mmu irq");
709 		return err;
710 	}
711 
712 	return 0;
713 }
714 
715 void panfrost_mmu_fini(struct panfrost_device *pfdev)
716 {
717 	mmu_write(pfdev, MMU_INT_MASK, 0);
718 }
719