xref: /linux/drivers/gpu/drm/msm/msm_iommu.c (revision e9ef810dfee7a2227da9d423aecb0ced35faddbe)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include <linux/adreno-smmu-priv.h>
8 #include <linux/io-pgtable.h>
9 #include <linux/kmemleak.h>
10 #include "msm_drv.h"
11 #include "msm_gpu_trace.h"
12 #include "msm_mmu.h"
13 
14 struct msm_iommu {
15 	struct msm_mmu base;
16 	struct iommu_domain *domain;
17 	atomic_t pagetables;
18 	struct page *prr_page;
19 
20 	struct kmem_cache *pt_cache;
21 };
22 
23 #define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
24 
25 struct msm_iommu_pagetable {
26 	struct msm_mmu base;
27 	struct msm_mmu *parent;
28 	struct io_pgtable_ops *pgtbl_ops;
29 	const struct iommu_flush_ops *tlb;
30 	struct device *iommu_dev;
31 	unsigned long pgsize_bitmap;	/* Bitmap of page sizes in use */
32 	phys_addr_t ttbr;
33 	u32 asid;
34 
35 	/** @root_page_table: Stores the root page table pointer. */
36 	void *root_page_table;
37 };
to_pagetable(struct msm_mmu * mmu)38 static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu)
39 {
40 	return container_of(mmu, struct msm_iommu_pagetable, base);
41 }
42 
43 /* based on iommu_pgsize() in iommu.c: */
calc_pgsize(struct msm_iommu_pagetable * pagetable,unsigned long iova,phys_addr_t paddr,size_t size,size_t * count)44 static size_t calc_pgsize(struct msm_iommu_pagetable *pagetable,
45 			   unsigned long iova, phys_addr_t paddr,
46 			   size_t size, size_t *count)
47 {
48 	unsigned int pgsize_idx, pgsize_idx_next;
49 	unsigned long pgsizes;
50 	size_t offset, pgsize, pgsize_next;
51 	unsigned long addr_merge = paddr | iova;
52 
53 	/* Page sizes supported by the hardware and small enough for @size */
54 	pgsizes = pagetable->pgsize_bitmap & GENMASK(__fls(size), 0);
55 
56 	/* Constrain the page sizes further based on the maximum alignment */
57 	if (likely(addr_merge))
58 		pgsizes &= GENMASK(__ffs(addr_merge), 0);
59 
60 	/* Make sure we have at least one suitable page size */
61 	BUG_ON(!pgsizes);
62 
63 	/* Pick the biggest page size remaining */
64 	pgsize_idx = __fls(pgsizes);
65 	pgsize = BIT(pgsize_idx);
66 	if (!count)
67 		return pgsize;
68 
69 	/* Find the next biggest support page size, if it exists */
70 	pgsizes = pagetable->pgsize_bitmap & ~GENMASK(pgsize_idx, 0);
71 	if (!pgsizes)
72 		goto out_set_count;
73 
74 	pgsize_idx_next = __ffs(pgsizes);
75 	pgsize_next = BIT(pgsize_idx_next);
76 
77 	/*
78 	 * There's no point trying a bigger page size unless the virtual
79 	 * and physical addresses are similarly offset within the larger page.
80 	 */
81 	if ((iova ^ paddr) & (pgsize_next - 1))
82 		goto out_set_count;
83 
84 	/* Calculate the offset to the next page size alignment boundary */
85 	offset = pgsize_next - (addr_merge & (pgsize_next - 1));
86 
87 	/*
88 	 * If size is big enough to accommodate the larger page, reduce
89 	 * the number of smaller pages.
90 	 */
91 	if (offset + pgsize_next <= size)
92 		size = offset;
93 
94 out_set_count:
95 	*count = size >> pgsize_idx;
96 	return pgsize;
97 }
98 
msm_iommu_pagetable_unmap(struct msm_mmu * mmu,u64 iova,size_t size)99 static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
100 		size_t size)
101 {
102 	struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
103 	struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
104 	int ret = 0;
105 
106 	while (size) {
107 		size_t pgsize, count;
108 		ssize_t unmapped;
109 
110 		pgsize = calc_pgsize(pagetable, iova, iova, size, &count);
111 
112 		unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL);
113 		if (unmapped <= 0) {
114 			ret = -EINVAL;
115 			/*
116 			 * Continue attempting to unamp the remained of the
117 			 * range, so we don't end up with some dangling
118 			 * mapped pages
119 			 */
120 			unmapped = PAGE_SIZE;
121 		}
122 
123 		iova += unmapped;
124 		size -= unmapped;
125 	}
126 
127 	iommu_flush_iotlb_all(to_msm_iommu(pagetable->parent)->domain);
128 
129 	return ret;
130 }
131 
msm_iommu_pagetable_map_prr(struct msm_mmu * mmu,u64 iova,size_t len,int prot)132 static int msm_iommu_pagetable_map_prr(struct msm_mmu *mmu, u64 iova, size_t len, int prot)
133 {
134 	struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
135 	struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
136 	struct msm_iommu *iommu = to_msm_iommu(pagetable->parent);
137 	phys_addr_t phys = page_to_phys(iommu->prr_page);
138 	u64 addr = iova;
139 
140 	while (len) {
141 		size_t mapped = 0;
142 		size_t size = PAGE_SIZE;
143 		int ret;
144 
145 		ret = ops->map_pages(ops, addr, phys, size, 1, prot, GFP_KERNEL, &mapped);
146 
147 		/* map_pages could fail after mapping some of the pages,
148 		 * so update the counters before error handling.
149 		 */
150 		addr += mapped;
151 		len  -= mapped;
152 
153 		if (ret) {
154 			msm_iommu_pagetable_unmap(mmu, iova, addr - iova);
155 			return -EINVAL;
156 		}
157 	}
158 
159 	return 0;
160 }
161 
msm_iommu_pagetable_map(struct msm_mmu * mmu,u64 iova,struct sg_table * sgt,size_t off,size_t len,int prot)162 static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
163 				   struct sg_table *sgt, size_t off, size_t len,
164 				   int prot)
165 {
166 	struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
167 	struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
168 	struct scatterlist *sg;
169 	u64 addr = iova;
170 	unsigned int i;
171 
172 	if (!sgt)
173 		return msm_iommu_pagetable_map_prr(mmu, iova, len, prot);
174 
175 	for_each_sgtable_sg(sgt, sg, i) {
176 		size_t size = sg->length;
177 		phys_addr_t phys = sg_phys(sg);
178 
179 		if (!len)
180 			break;
181 
182 		if (size <= off) {
183 			off -= size;
184 			continue;
185 		}
186 
187 		phys += off;
188 		size -= off;
189 		size = min_t(size_t, size, len);
190 		off = 0;
191 
192 		while (size) {
193 			size_t pgsize, count, mapped = 0;
194 			int ret;
195 
196 			pgsize = calc_pgsize(pagetable, addr, phys, size, &count);
197 
198 			ret = ops->map_pages(ops, addr, phys, pgsize, count,
199 					     prot, GFP_KERNEL, &mapped);
200 
201 			/* map_pages could fail after mapping some of the pages,
202 			 * so update the counters before error handling.
203 			 */
204 			phys += mapped;
205 			addr += mapped;
206 			size -= mapped;
207 			len  -= mapped;
208 
209 			if (ret) {
210 				msm_iommu_pagetable_unmap(mmu, iova, addr - iova);
211 				return -EINVAL;
212 			}
213 		}
214 	}
215 
216 	return 0;
217 }
218 
msm_iommu_pagetable_destroy(struct msm_mmu * mmu)219 static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)
220 {
221 	struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
222 	struct msm_iommu *iommu = to_msm_iommu(pagetable->parent);
223 	struct adreno_smmu_priv *adreno_smmu =
224 		dev_get_drvdata(pagetable->parent->dev);
225 
226 	/*
227 	 * If this is the last attached pagetable for the parent,
228 	 * disable TTBR0 in the arm-smmu driver
229 	 */
230 	if (atomic_dec_return(&iommu->pagetables) == 0) {
231 		adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, NULL);
232 
233 		if (adreno_smmu->set_prr_bit) {
234 			adreno_smmu->set_prr_bit(adreno_smmu->cookie, false);
235 			__free_page(iommu->prr_page);
236 			iommu->prr_page = NULL;
237 		}
238 	}
239 
240 	free_io_pgtable_ops(pagetable->pgtbl_ops);
241 	kfree(pagetable);
242 }
243 
msm_iommu_pagetable_params(struct msm_mmu * mmu,phys_addr_t * ttbr,int * asid)244 int msm_iommu_pagetable_params(struct msm_mmu *mmu,
245 		phys_addr_t *ttbr, int *asid)
246 {
247 	struct msm_iommu_pagetable *pagetable;
248 
249 	if (mmu->type != MSM_MMU_IOMMU_PAGETABLE)
250 		return -EINVAL;
251 
252 	pagetable = to_pagetable(mmu);
253 
254 	if (ttbr)
255 		*ttbr = pagetable->ttbr;
256 
257 	if (asid)
258 		*asid = pagetable->asid;
259 
260 	return 0;
261 }
262 
msm_iommu_get_geometry(struct msm_mmu * mmu)263 struct iommu_domain_geometry *msm_iommu_get_geometry(struct msm_mmu *mmu)
264 {
265 	struct msm_iommu *iommu = to_msm_iommu(mmu);
266 
267 	return &iommu->domain->geometry;
268 }
269 
270 int
msm_iommu_pagetable_walk(struct msm_mmu * mmu,unsigned long iova,uint64_t ptes[4])271 msm_iommu_pagetable_walk(struct msm_mmu *mmu, unsigned long iova, uint64_t ptes[4])
272 {
273 	struct msm_iommu_pagetable *pagetable;
274 	struct arm_lpae_io_pgtable_walk_data wd = {};
275 
276 	if (mmu->type != MSM_MMU_IOMMU_PAGETABLE)
277 		return -EINVAL;
278 
279 	pagetable = to_pagetable(mmu);
280 
281 	if (!pagetable->pgtbl_ops->pgtable_walk)
282 		return -EINVAL;
283 
284 	pagetable->pgtbl_ops->pgtable_walk(pagetable->pgtbl_ops, iova, &wd);
285 
286 	for (int i = 0; i < ARRAY_SIZE(wd.ptes); i++)
287 		ptes[i] = wd.ptes[i];
288 
289 	return 0;
290 }
291 
292 static void
msm_iommu_pagetable_prealloc_count(struct msm_mmu * mmu,struct msm_mmu_prealloc * p,uint64_t iova,size_t len)293 msm_iommu_pagetable_prealloc_count(struct msm_mmu *mmu, struct msm_mmu_prealloc *p,
294 				   uint64_t iova, size_t len)
295 {
296 	u64 pt_count;
297 
298 	/*
299 	 * L1, L2 and L3 page tables.
300 	 *
301 	 * We could optimize L3 allocation by iterating over the sgt and merging
302 	 * 2M contiguous blocks, but it's simpler to over-provision and return
303 	 * the pages if they're not used.
304 	 *
305 	 * The first level descriptor (v8 / v7-lpae page table format) encodes
306 	 * 30 bits of address.  The second level encodes 29.  For the 3rd it is
307 	 * 39.
308 	 *
309 	 * https://developer.arm.com/documentation/ddi0406/c/System-Level-Architecture/Virtual-Memory-System-Architecture--VMSA-/Long-descriptor-translation-table-format/Long-descriptor-translation-table-format-descriptors?lang=en#BEIHEFFB
310 	 */
311 	pt_count = ((ALIGN(iova + len, 1ull << 39) - ALIGN_DOWN(iova, 1ull << 39)) >> 39) +
312 		   ((ALIGN(iova + len, 1ull << 30) - ALIGN_DOWN(iova, 1ull << 30)) >> 30) +
313 		   ((ALIGN(iova + len, 1ull << 21) - ALIGN_DOWN(iova, 1ull << 21)) >> 21);
314 
315 	p->count += pt_count;
316 }
317 
318 static struct kmem_cache *
get_pt_cache(struct msm_mmu * mmu)319 get_pt_cache(struct msm_mmu *mmu)
320 {
321 	struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
322 	return to_msm_iommu(pagetable->parent)->pt_cache;
323 }
324 
325 static int
msm_iommu_pagetable_prealloc_allocate(struct msm_mmu * mmu,struct msm_mmu_prealloc * p)326 msm_iommu_pagetable_prealloc_allocate(struct msm_mmu *mmu, struct msm_mmu_prealloc *p)
327 {
328 	struct kmem_cache *pt_cache = get_pt_cache(mmu);
329 	int ret;
330 
331 	p->pages = kvmalloc_array(p->count, sizeof(p->pages), GFP_KERNEL);
332 	if (!p->pages)
333 		return -ENOMEM;
334 
335 	ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, p->count, p->pages);
336 	if (ret != p->count) {
337 		p->count = ret;
338 		return -ENOMEM;
339 	}
340 
341 	return 0;
342 }
343 
344 static void
msm_iommu_pagetable_prealloc_cleanup(struct msm_mmu * mmu,struct msm_mmu_prealloc * p)345 msm_iommu_pagetable_prealloc_cleanup(struct msm_mmu *mmu, struct msm_mmu_prealloc *p)
346 {
347 	struct kmem_cache *pt_cache = get_pt_cache(mmu);
348 	uint32_t remaining_pt_count = p->count - p->ptr;
349 
350 	if (p->count > 0)
351 		trace_msm_mmu_prealloc_cleanup(p->count, remaining_pt_count);
352 
353 	kmem_cache_free_bulk(pt_cache, remaining_pt_count, &p->pages[p->ptr]);
354 	kvfree(p->pages);
355 }
356 
357 /**
358  * alloc_pt() - Custom page table allocator
359  * @cookie: Cookie passed at page table allocation time.
360  * @size: Size of the page table. This size should be fixed,
361  * and determined at creation time based on the granule size.
362  * @gfp: GFP flags.
363  *
364  * We want a custom allocator so we can use a cache for page table
365  * allocations and amortize the cost of the over-reservation that's
366  * done to allow asynchronous VM operations.
367  *
368  * Return: non-NULL on success, NULL if the allocation failed for any
369  * reason.
370  */
371 static void *
msm_iommu_pagetable_alloc_pt(void * cookie,size_t size,gfp_t gfp)372 msm_iommu_pagetable_alloc_pt(void *cookie, size_t size, gfp_t gfp)
373 {
374 	struct msm_iommu_pagetable *pagetable = cookie;
375 	struct msm_mmu_prealloc *p = pagetable->base.prealloc;
376 	void *page;
377 
378 	/* Allocation of the root page table happening during init. */
379 	if (unlikely(!pagetable->root_page_table)) {
380 		struct page *p;
381 
382 		p = alloc_pages_node(dev_to_node(pagetable->iommu_dev),
383 				     gfp | __GFP_ZERO, get_order(size));
384 		page = p ? page_address(p) : NULL;
385 		pagetable->root_page_table = page;
386 		return page;
387 	}
388 
389 	if (WARN_ON(!p) || WARN_ON(p->ptr >= p->count))
390 		return NULL;
391 
392 	page = p->pages[p->ptr++];
393 	memset(page, 0, size);
394 
395 	/*
396 	 * Page table entries don't use virtual addresses, which trips out
397 	 * kmemleak. kmemleak_alloc_phys() might work, but physical addresses
398 	 * are mixed with other fields, and I fear kmemleak won't detect that
399 	 * either.
400 	 *
401 	 * Let's just ignore memory passed to the page-table driver for now.
402 	 */
403 	kmemleak_ignore(page);
404 
405 	return page;
406 }
407 
408 
409 /**
410  * free_pt() - Custom page table free function
411  * @cookie: Cookie passed at page table allocation time.
412  * @data: Page table to free.
413  * @size: Size of the page table. This size should be fixed,
414  * and determined at creation time based on the granule size.
415  */
416 static void
msm_iommu_pagetable_free_pt(void * cookie,void * data,size_t size)417 msm_iommu_pagetable_free_pt(void *cookie, void *data, size_t size)
418 {
419 	struct msm_iommu_pagetable *pagetable = cookie;
420 
421 	if (unlikely(pagetable->root_page_table == data)) {
422 		free_pages((unsigned long)data, get_order(size));
423 		pagetable->root_page_table = NULL;
424 		return;
425 	}
426 
427 	kmem_cache_free(get_pt_cache(&pagetable->base), data);
428 }
429 
430 static const struct msm_mmu_funcs pagetable_funcs = {
431 		.prealloc_count = msm_iommu_pagetable_prealloc_count,
432 		.prealloc_allocate = msm_iommu_pagetable_prealloc_allocate,
433 		.prealloc_cleanup = msm_iommu_pagetable_prealloc_cleanup,
434 		.map = msm_iommu_pagetable_map,
435 		.unmap = msm_iommu_pagetable_unmap,
436 		.destroy = msm_iommu_pagetable_destroy,
437 };
438 
msm_iommu_tlb_flush_all(void * cookie)439 static void msm_iommu_tlb_flush_all(void *cookie)
440 {
441 	struct msm_iommu_pagetable *pagetable = cookie;
442 	struct adreno_smmu_priv *adreno_smmu;
443 
444 	if (!pm_runtime_get_if_in_use(pagetable->iommu_dev))
445 		return;
446 
447 	adreno_smmu = dev_get_drvdata(pagetable->parent->dev);
448 
449 	pagetable->tlb->tlb_flush_all((void *)adreno_smmu->cookie);
450 
451 	pm_runtime_put_autosuspend(pagetable->iommu_dev);
452 }
453 
msm_iommu_tlb_flush_walk(unsigned long iova,size_t size,size_t granule,void * cookie)454 static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size,
455 		size_t granule, void *cookie)
456 {
457 	struct msm_iommu_pagetable *pagetable = cookie;
458 	struct adreno_smmu_priv *adreno_smmu;
459 
460 	if (!pm_runtime_get_if_in_use(pagetable->iommu_dev))
461 		return;
462 
463 	adreno_smmu = dev_get_drvdata(pagetable->parent->dev);
464 
465 	pagetable->tlb->tlb_flush_walk(iova, size, granule, (void *)adreno_smmu->cookie);
466 
467 	pm_runtime_put_autosuspend(pagetable->iommu_dev);
468 }
469 
msm_iommu_tlb_add_page(struct iommu_iotlb_gather * gather,unsigned long iova,size_t granule,void * cookie)470 static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
471 		unsigned long iova, size_t granule, void *cookie)
472 {
473 }
474 
475 static const struct iommu_flush_ops tlb_ops = {
476 	.tlb_flush_all = msm_iommu_tlb_flush_all,
477 	.tlb_flush_walk = msm_iommu_tlb_flush_walk,
478 	.tlb_add_page = msm_iommu_tlb_add_page,
479 };
480 
481 static int msm_gpu_fault_handler(struct iommu_domain *domain, struct device *dev,
482 		unsigned long iova, int flags, void *arg);
483 
get_tblsz(const struct io_pgtable_cfg * cfg)484 static size_t get_tblsz(const struct io_pgtable_cfg *cfg)
485 {
486 	int pg_shift, bits_per_level;
487 
488 	pg_shift = __ffs(cfg->pgsize_bitmap);
489 	/* arm_lpae_iopte is u64: */
490 	bits_per_level = pg_shift - ilog2(sizeof(u64));
491 
492 	return sizeof(u64) << bits_per_level;
493 }
494 
msm_iommu_pagetable_create(struct msm_mmu * parent,bool kernel_managed)495 struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_managed)
496 {
497 	struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev);
498 	struct msm_iommu *iommu = to_msm_iommu(parent);
499 	struct msm_iommu_pagetable *pagetable;
500 	const struct io_pgtable_cfg *ttbr1_cfg = NULL;
501 	struct io_pgtable_cfg ttbr0_cfg;
502 	int ret;
503 
504 	/* Get the pagetable configuration from the domain */
505 	if (adreno_smmu->cookie)
506 		ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
507 
508 	/*
509 	 * If you hit this WARN_ONCE() you are probably missing an entry in
510 	 * qcom_smmu_impl_of_match[] in arm-smmu-qcom.c
511 	 */
512 	if (WARN_ONCE(!ttbr1_cfg, "No per-process page tables"))
513 		return ERR_PTR(-ENODEV);
514 
515 	pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL);
516 	if (!pagetable)
517 		return ERR_PTR(-ENOMEM);
518 
519 	msm_mmu_init(&pagetable->base, parent->dev, &pagetable_funcs,
520 		MSM_MMU_IOMMU_PAGETABLE);
521 
522 	/* Clone the TTBR1 cfg as starting point for TTBR0 cfg: */
523 	ttbr0_cfg = *ttbr1_cfg;
524 
525 	/* The incoming cfg will have the TTBR1 quirk enabled */
526 	ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;
527 	ttbr0_cfg.tlb = &tlb_ops;
528 
529 	if (!kernel_managed) {
530 		ttbr0_cfg.quirks |= IO_PGTABLE_QUIRK_NO_WARN;
531 
532 		/*
533 		 * With userspace managed VM (aka VM_BIND), we need to pre-
534 		 * allocate pages ahead of time for map/unmap operations,
535 		 * handing them to io-pgtable via custom alloc/free ops as
536 		 * needed:
537 		 */
538 		ttbr0_cfg.alloc = msm_iommu_pagetable_alloc_pt;
539 		ttbr0_cfg.free  = msm_iommu_pagetable_free_pt;
540 
541 		/*
542 		 * Restrict to single page granules.  Otherwise we may run
543 		 * into a situation where userspace wants to unmap/remap
544 		 * only a part of a larger block mapping, which is not
545 		 * possible without unmapping the entire block.  Which in
546 		 * turn could cause faults if the GPU is accessing other
547 		 * parts of the block mapping.
548 		 *
549 		 * Note that prior to commit 33729a5fc0ca ("iommu/io-pgtable-arm:
550 		 * Remove split on unmap behavior)" this was handled in
551 		 * io-pgtable-arm.  But this apparently does not work
552 		 * correctly on SMMUv3.
553 		 */
554 		WARN_ON(!(ttbr0_cfg.pgsize_bitmap & PAGE_SIZE));
555 		ttbr0_cfg.pgsize_bitmap = PAGE_SIZE;
556 	}
557 
558 	pagetable->iommu_dev = ttbr1_cfg->iommu_dev;
559 	pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,
560 		&ttbr0_cfg, pagetable);
561 
562 	if (!pagetable->pgtbl_ops) {
563 		kfree(pagetable);
564 		return ERR_PTR(-ENOMEM);
565 	}
566 
567 	/*
568 	 * If this is the first pagetable that we've allocated, send it back to
569 	 * the arm-smmu driver as a trigger to set up TTBR0
570 	 */
571 	if (atomic_inc_return(&iommu->pagetables) == 1) {
572 		ret = adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, &ttbr0_cfg);
573 		if (ret) {
574 			free_io_pgtable_ops(pagetable->pgtbl_ops);
575 			kfree(pagetable);
576 			return ERR_PTR(ret);
577 		}
578 
579 		BUG_ON(iommu->prr_page);
580 		if (adreno_smmu->set_prr_bit) {
581 			/*
582 			 * We need a zero'd page for two reasons:
583 			 *
584 			 * 1) Reserve a known physical address to use when
585 			 *    mapping NULL / sparsely resident regions
586 			 * 2) Read back zero
587 			 *
588 			 * It appears the hw drops writes to the PRR region
589 			 * on the floor, but reads actually return whatever
590 			 * is in the PRR page.
591 			 */
592 			iommu->prr_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
593 			adreno_smmu->set_prr_addr(adreno_smmu->cookie,
594 						  page_to_phys(iommu->prr_page));
595 			adreno_smmu->set_prr_bit(adreno_smmu->cookie, true);
596 		}
597 	}
598 
599 	/* Needed later for TLB flush */
600 	pagetable->parent = parent;
601 	pagetable->tlb = ttbr1_cfg->tlb;
602 	pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap;
603 	pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
604 
605 	/*
606 	 * TODO we would like each set of page tables to have a unique ASID
607 	 * to optimize TLB invalidation.  But iommu_flush_iotlb_all() will
608 	 * end up flushing the ASID used for TTBR1 pagetables, which is not
609 	 * what we want.  So for now just use the same ASID as TTBR1.
610 	 */
611 	pagetable->asid = 0;
612 
613 	return &pagetable->base;
614 }
615 
msm_gpu_fault_handler(struct iommu_domain * domain,struct device * dev,unsigned long iova,int flags,void * arg)616 static int msm_gpu_fault_handler(struct iommu_domain *domain, struct device *dev,
617 		unsigned long iova, int flags, void *arg)
618 {
619 	struct msm_iommu *iommu = arg;
620 	struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(iommu->base.dev);
621 	struct adreno_smmu_fault_info info, *ptr = NULL;
622 
623 	if (adreno_smmu->get_fault_info) {
624 		adreno_smmu->get_fault_info(adreno_smmu->cookie, &info);
625 		ptr = &info;
626 	}
627 
628 	if (iommu->base.handler)
629 		return iommu->base.handler(iommu->base.arg, iova, flags, ptr);
630 
631 	pr_warn_ratelimited("*** fault: iova=%16lx, flags=%d\n", iova, flags);
632 
633 	return 0;
634 }
635 
msm_disp_fault_handler(struct iommu_domain * domain,struct device * dev,unsigned long iova,int flags,void * arg)636 static int msm_disp_fault_handler(struct iommu_domain *domain, struct device *dev,
637 				  unsigned long iova, int flags, void *arg)
638 {
639 	struct msm_iommu *iommu = arg;
640 
641 	if (iommu->base.handler)
642 		return iommu->base.handler(iommu->base.arg, iova, flags, NULL);
643 
644 	return -ENOSYS;
645 }
646 
msm_iommu_set_stall(struct msm_mmu * mmu,bool enable)647 static void msm_iommu_set_stall(struct msm_mmu *mmu, bool enable)
648 {
649 	struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(mmu->dev);
650 
651 	if (adreno_smmu->set_stall)
652 		adreno_smmu->set_stall(adreno_smmu->cookie, enable);
653 }
654 
msm_iommu_detach(struct msm_mmu * mmu)655 static void msm_iommu_detach(struct msm_mmu *mmu)
656 {
657 	struct msm_iommu *iommu = to_msm_iommu(mmu);
658 
659 	iommu_detach_device(iommu->domain, mmu->dev);
660 }
661 
msm_iommu_map(struct msm_mmu * mmu,uint64_t iova,struct sg_table * sgt,size_t off,size_t len,int prot)662 static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
663 			 struct sg_table *sgt, size_t off, size_t len,
664 			 int prot)
665 {
666 	struct msm_iommu *iommu = to_msm_iommu(mmu);
667 	size_t ret;
668 
669 	WARN_ON(off != 0);
670 
671 	/* The arm-smmu driver expects the addresses to be sign extended */
672 	if (iova & BIT_ULL(48))
673 		iova |= GENMASK_ULL(63, 49);
674 
675 	ret = iommu_map_sgtable(iommu->domain, iova, sgt, prot);
676 	WARN_ON(!ret);
677 
678 	return (ret == len) ? 0 : -EINVAL;
679 }
680 
msm_iommu_unmap(struct msm_mmu * mmu,uint64_t iova,size_t len)681 static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
682 {
683 	struct msm_iommu *iommu = to_msm_iommu(mmu);
684 
685 	if (iova & BIT_ULL(48))
686 		iova |= GENMASK_ULL(63, 49);
687 
688 	iommu_unmap(iommu->domain, iova, len);
689 
690 	return 0;
691 }
692 
msm_iommu_destroy(struct msm_mmu * mmu)693 static void msm_iommu_destroy(struct msm_mmu *mmu)
694 {
695 	struct msm_iommu *iommu = to_msm_iommu(mmu);
696 	iommu_domain_free(iommu->domain);
697 	kmem_cache_destroy(iommu->pt_cache);
698 	kfree(iommu);
699 }
700 
701 static const struct msm_mmu_funcs funcs = {
702 		.detach = msm_iommu_detach,
703 		.map = msm_iommu_map,
704 		.unmap = msm_iommu_unmap,
705 		.destroy = msm_iommu_destroy,
706 		.set_stall = msm_iommu_set_stall,
707 };
708 
msm_iommu_new(struct device * dev,unsigned long quirks)709 struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks)
710 {
711 	struct iommu_domain *domain;
712 	struct msm_iommu *iommu;
713 	int ret;
714 
715 	if (!device_iommu_mapped(dev))
716 		return NULL;
717 
718 	domain = iommu_paging_domain_alloc(dev);
719 	if (IS_ERR(domain))
720 		return ERR_CAST(domain);
721 
722 	iommu_set_pgtable_quirks(domain, quirks);
723 
724 	iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
725 	if (!iommu) {
726 		iommu_domain_free(domain);
727 		return ERR_PTR(-ENOMEM);
728 	}
729 
730 	iommu->domain = domain;
731 	msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU);
732 
733 	atomic_set(&iommu->pagetables, 0);
734 
735 	ret = iommu_attach_device(iommu->domain, dev);
736 	if (ret) {
737 		iommu_domain_free(domain);
738 		kfree(iommu);
739 		return ERR_PTR(ret);
740 	}
741 
742 	return &iommu->base;
743 }
744 
msm_iommu_disp_new(struct device * dev,unsigned long quirks)745 struct msm_mmu *msm_iommu_disp_new(struct device *dev, unsigned long quirks)
746 {
747 	struct msm_iommu *iommu;
748 	struct msm_mmu *mmu;
749 
750 	mmu = msm_iommu_new(dev, quirks);
751 	if (IS_ERR_OR_NULL(mmu))
752 		return mmu;
753 
754 	iommu = to_msm_iommu(mmu);
755 	iommu_set_fault_handler(iommu->domain, msm_disp_fault_handler, iommu);
756 
757 	return mmu;
758 }
759 
msm_iommu_gpu_new(struct device * dev,struct msm_gpu * gpu,unsigned long quirks)760 struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsigned long quirks)
761 {
762 	struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev);
763 	struct msm_iommu *iommu;
764 	struct msm_mmu *mmu;
765 
766 	mmu = msm_iommu_new(dev, quirks);
767 	if (IS_ERR_OR_NULL(mmu))
768 		return mmu;
769 
770 	iommu = to_msm_iommu(mmu);
771 	if (adreno_smmu && adreno_smmu->cookie) {
772 		const struct io_pgtable_cfg *cfg =
773 			adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
774 		size_t tblsz = get_tblsz(cfg);
775 
776 		iommu->pt_cache =
777 			kmem_cache_create("msm-mmu-pt", tblsz, tblsz, 0, NULL);
778 	}
779 	iommu_set_fault_handler(iommu->domain, msm_gpu_fault_handler, iommu);
780 
781 	/* Enable stall on iommu fault: */
782 	if (adreno_smmu->set_stall)
783 		adreno_smmu->set_stall(adreno_smmu->cookie, true);
784 
785 	return mmu;
786 }
787