xref: /linux/drivers/gpu/drm/msm/msm_iommu.c (revision d7a5ac67d82c50c1f909c7056f78b1630a0f71cf)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2013 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6 
7 #include <linux/adreno-smmu-priv.h>
8 #include <linux/io-pgtable.h>
9 #include "msm_drv.h"
10 #include "msm_mmu.h"
11 
12 struct msm_iommu {
13 	struct msm_mmu base;
14 	struct iommu_domain *domain;
15 	atomic_t pagetables;
16 };
17 
18 #define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
19 
20 struct msm_iommu_pagetable {
21 	struct msm_mmu base;
22 	struct msm_mmu *parent;
23 	struct io_pgtable_ops *pgtbl_ops;
24 	const struct iommu_flush_ops *tlb;
25 	struct device *iommu_dev;
26 	unsigned long pgsize_bitmap;	/* Bitmap of page sizes in use */
27 	phys_addr_t ttbr;
28 	u32 asid;
29 };
30 static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu)
31 {
32 	return container_of(mmu, struct msm_iommu_pagetable, base);
33 }
34 
35 /* based on iommu_pgsize() in iommu.c: */
36 static size_t calc_pgsize(struct msm_iommu_pagetable *pagetable,
37 			   unsigned long iova, phys_addr_t paddr,
38 			   size_t size, size_t *count)
39 {
40 	unsigned int pgsize_idx, pgsize_idx_next;
41 	unsigned long pgsizes;
42 	size_t offset, pgsize, pgsize_next;
43 	unsigned long addr_merge = paddr | iova;
44 
45 	/* Page sizes supported by the hardware and small enough for @size */
46 	pgsizes = pagetable->pgsize_bitmap & GENMASK(__fls(size), 0);
47 
48 	/* Constrain the page sizes further based on the maximum alignment */
49 	if (likely(addr_merge))
50 		pgsizes &= GENMASK(__ffs(addr_merge), 0);
51 
52 	/* Make sure we have at least one suitable page size */
53 	BUG_ON(!pgsizes);
54 
55 	/* Pick the biggest page size remaining */
56 	pgsize_idx = __fls(pgsizes);
57 	pgsize = BIT(pgsize_idx);
58 	if (!count)
59 		return pgsize;
60 
61 	/* Find the next biggest support page size, if it exists */
62 	pgsizes = pagetable->pgsize_bitmap & ~GENMASK(pgsize_idx, 0);
63 	if (!pgsizes)
64 		goto out_set_count;
65 
66 	pgsize_idx_next = __ffs(pgsizes);
67 	pgsize_next = BIT(pgsize_idx_next);
68 
69 	/*
70 	 * There's no point trying a bigger page size unless the virtual
71 	 * and physical addresses are similarly offset within the larger page.
72 	 */
73 	if ((iova ^ paddr) & (pgsize_next - 1))
74 		goto out_set_count;
75 
76 	/* Calculate the offset to the next page size alignment boundary */
77 	offset = pgsize_next - (addr_merge & (pgsize_next - 1));
78 
79 	/*
80 	 * If size is big enough to accommodate the larger page, reduce
81 	 * the number of smaller pages.
82 	 */
83 	if (offset + pgsize_next <= size)
84 		size = offset;
85 
86 out_set_count:
87 	*count = size >> pgsize_idx;
88 	return pgsize;
89 }
90 
91 static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova,
92 		size_t size)
93 {
94 	struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
95 	struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
96 
97 	while (size) {
98 		size_t unmapped, pgsize, count;
99 
100 		pgsize = calc_pgsize(pagetable, iova, iova, size, &count);
101 
102 		unmapped = ops->unmap_pages(ops, iova, pgsize, count, NULL);
103 		if (!unmapped)
104 			break;
105 
106 		iova += unmapped;
107 		size -= unmapped;
108 	}
109 
110 	iommu_flush_iotlb_all(to_msm_iommu(pagetable->parent)->domain);
111 
112 	return (size == 0) ? 0 : -EINVAL;
113 }
114 
115 static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova,
116 		struct sg_table *sgt, size_t len, int prot)
117 {
118 	struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
119 	struct io_pgtable_ops *ops = pagetable->pgtbl_ops;
120 	struct scatterlist *sg;
121 	u64 addr = iova;
122 	unsigned int i;
123 
124 	for_each_sgtable_sg(sgt, sg, i) {
125 		size_t size = sg->length;
126 		phys_addr_t phys = sg_phys(sg);
127 
128 		while (size) {
129 			size_t pgsize, count, mapped = 0;
130 			int ret;
131 
132 			pgsize = calc_pgsize(pagetable, addr, phys, size, &count);
133 
134 			ret = ops->map_pages(ops, addr, phys, pgsize, count,
135 					     prot, GFP_KERNEL, &mapped);
136 
137 			/* map_pages could fail after mapping some of the pages,
138 			 * so update the counters before error handling.
139 			 */
140 			phys += mapped;
141 			addr += mapped;
142 			size -= mapped;
143 
144 			if (ret) {
145 				msm_iommu_pagetable_unmap(mmu, iova, addr - iova);
146 				return -EINVAL;
147 			}
148 		}
149 	}
150 
151 	return 0;
152 }
153 
154 static void msm_iommu_pagetable_destroy(struct msm_mmu *mmu)
155 {
156 	struct msm_iommu_pagetable *pagetable = to_pagetable(mmu);
157 	struct msm_iommu *iommu = to_msm_iommu(pagetable->parent);
158 	struct adreno_smmu_priv *adreno_smmu =
159 		dev_get_drvdata(pagetable->parent->dev);
160 
161 	/*
162 	 * If this is the last attached pagetable for the parent,
163 	 * disable TTBR0 in the arm-smmu driver
164 	 */
165 	if (atomic_dec_return(&iommu->pagetables) == 0)
166 		adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, NULL);
167 
168 	free_io_pgtable_ops(pagetable->pgtbl_ops);
169 	kfree(pagetable);
170 }
171 
172 int msm_iommu_pagetable_params(struct msm_mmu *mmu,
173 		phys_addr_t *ttbr, int *asid)
174 {
175 	struct msm_iommu_pagetable *pagetable;
176 
177 	if (mmu->type != MSM_MMU_IOMMU_PAGETABLE)
178 		return -EINVAL;
179 
180 	pagetable = to_pagetable(mmu);
181 
182 	if (ttbr)
183 		*ttbr = pagetable->ttbr;
184 
185 	if (asid)
186 		*asid = pagetable->asid;
187 
188 	return 0;
189 }
190 
191 struct iommu_domain_geometry *msm_iommu_get_geometry(struct msm_mmu *mmu)
192 {
193 	struct msm_iommu *iommu = to_msm_iommu(mmu);
194 
195 	return &iommu->domain->geometry;
196 }
197 
198 int
199 msm_iommu_pagetable_walk(struct msm_mmu *mmu, unsigned long iova, uint64_t ptes[4])
200 {
201 	struct msm_iommu_pagetable *pagetable;
202 	struct arm_lpae_io_pgtable_walk_data wd = {};
203 
204 	if (mmu->type != MSM_MMU_IOMMU_PAGETABLE)
205 		return -EINVAL;
206 
207 	pagetable = to_pagetable(mmu);
208 
209 	if (!pagetable->pgtbl_ops->pgtable_walk)
210 		return -EINVAL;
211 
212 	pagetable->pgtbl_ops->pgtable_walk(pagetable->pgtbl_ops, iova, &wd);
213 
214 	for (int i = 0; i < ARRAY_SIZE(wd.ptes); i++)
215 		ptes[i] = wd.ptes[i];
216 
217 	return 0;
218 }
219 
220 static const struct msm_mmu_funcs pagetable_funcs = {
221 		.map = msm_iommu_pagetable_map,
222 		.unmap = msm_iommu_pagetable_unmap,
223 		.destroy = msm_iommu_pagetable_destroy,
224 };
225 
226 static void msm_iommu_tlb_flush_all(void *cookie)
227 {
228 	struct msm_iommu_pagetable *pagetable = cookie;
229 	struct adreno_smmu_priv *adreno_smmu;
230 
231 	if (!pm_runtime_get_if_in_use(pagetable->iommu_dev))
232 		return;
233 
234 	adreno_smmu = dev_get_drvdata(pagetable->parent->dev);
235 
236 	pagetable->tlb->tlb_flush_all((void *)adreno_smmu->cookie);
237 
238 	pm_runtime_put_autosuspend(pagetable->iommu_dev);
239 }
240 
241 static void msm_iommu_tlb_flush_walk(unsigned long iova, size_t size,
242 		size_t granule, void *cookie)
243 {
244 	struct msm_iommu_pagetable *pagetable = cookie;
245 	struct adreno_smmu_priv *adreno_smmu;
246 
247 	if (!pm_runtime_get_if_in_use(pagetable->iommu_dev))
248 		return;
249 
250 	adreno_smmu = dev_get_drvdata(pagetable->parent->dev);
251 
252 	pagetable->tlb->tlb_flush_walk(iova, size, granule, (void *)adreno_smmu->cookie);
253 
254 	pm_runtime_put_autosuspend(pagetable->iommu_dev);
255 }
256 
257 static void msm_iommu_tlb_add_page(struct iommu_iotlb_gather *gather,
258 		unsigned long iova, size_t granule, void *cookie)
259 {
260 }
261 
262 static const struct iommu_flush_ops tlb_ops = {
263 	.tlb_flush_all = msm_iommu_tlb_flush_all,
264 	.tlb_flush_walk = msm_iommu_tlb_flush_walk,
265 	.tlb_add_page = msm_iommu_tlb_add_page,
266 };
267 
268 static int msm_gpu_fault_handler(struct iommu_domain *domain, struct device *dev,
269 		unsigned long iova, int flags, void *arg);
270 
271 struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent)
272 {
273 	struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev);
274 	struct msm_iommu *iommu = to_msm_iommu(parent);
275 	struct msm_iommu_pagetable *pagetable;
276 	const struct io_pgtable_cfg *ttbr1_cfg = NULL;
277 	struct io_pgtable_cfg ttbr0_cfg;
278 	int ret;
279 
280 	/* Get the pagetable configuration from the domain */
281 	if (adreno_smmu->cookie)
282 		ttbr1_cfg = adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie);
283 
284 	/*
285 	 * If you hit this WARN_ONCE() you are probably missing an entry in
286 	 * qcom_smmu_impl_of_match[] in arm-smmu-qcom.c
287 	 */
288 	if (WARN_ONCE(!ttbr1_cfg, "No per-process page tables"))
289 		return ERR_PTR(-ENODEV);
290 
291 	pagetable = kzalloc(sizeof(*pagetable), GFP_KERNEL);
292 	if (!pagetable)
293 		return ERR_PTR(-ENOMEM);
294 
295 	msm_mmu_init(&pagetable->base, parent->dev, &pagetable_funcs,
296 		MSM_MMU_IOMMU_PAGETABLE);
297 
298 	/* Clone the TTBR1 cfg as starting point for TTBR0 cfg: */
299 	ttbr0_cfg = *ttbr1_cfg;
300 
301 	/* The incoming cfg will have the TTBR1 quirk enabled */
302 	ttbr0_cfg.quirks &= ~IO_PGTABLE_QUIRK_ARM_TTBR1;
303 	ttbr0_cfg.tlb = &tlb_ops;
304 
305 	pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1,
306 		&ttbr0_cfg, pagetable);
307 
308 	if (!pagetable->pgtbl_ops) {
309 		kfree(pagetable);
310 		return ERR_PTR(-ENOMEM);
311 	}
312 
313 	/*
314 	 * If this is the first pagetable that we've allocated, send it back to
315 	 * the arm-smmu driver as a trigger to set up TTBR0
316 	 */
317 	if (atomic_inc_return(&iommu->pagetables) == 1) {
318 		ret = adreno_smmu->set_ttbr0_cfg(adreno_smmu->cookie, &ttbr0_cfg);
319 		if (ret) {
320 			free_io_pgtable_ops(pagetable->pgtbl_ops);
321 			kfree(pagetable);
322 			return ERR_PTR(ret);
323 		}
324 	}
325 
326 	/* Needed later for TLB flush */
327 	pagetable->parent = parent;
328 	pagetable->tlb = ttbr1_cfg->tlb;
329 	pagetable->iommu_dev = ttbr1_cfg->iommu_dev;
330 	pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap;
331 	pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr;
332 
333 	/*
334 	 * TODO we would like each set of page tables to have a unique ASID
335 	 * to optimize TLB invalidation.  But iommu_flush_iotlb_all() will
336 	 * end up flushing the ASID used for TTBR1 pagetables, which is not
337 	 * what we want.  So for now just use the same ASID as TTBR1.
338 	 */
339 	pagetable->asid = 0;
340 
341 	return &pagetable->base;
342 }
343 
344 static int msm_gpu_fault_handler(struct iommu_domain *domain, struct device *dev,
345 		unsigned long iova, int flags, void *arg)
346 {
347 	struct msm_iommu *iommu = arg;
348 	struct msm_mmu *mmu = &iommu->base;
349 	struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(iommu->base.dev);
350 	struct adreno_smmu_fault_info info, *ptr = NULL;
351 
352 	if (adreno_smmu->get_fault_info) {
353 		adreno_smmu->get_fault_info(adreno_smmu->cookie, &info);
354 		ptr = &info;
355 	}
356 
357 	if (iommu->base.handler)
358 		return iommu->base.handler(iommu->base.arg, iova, flags, ptr);
359 
360 	pr_warn_ratelimited("*** fault: iova=%16lx, flags=%d\n", iova, flags);
361 
362 	if (mmu->funcs->resume_translation)
363 		mmu->funcs->resume_translation(mmu);
364 
365 	return 0;
366 }
367 
368 static int msm_disp_fault_handler(struct iommu_domain *domain, struct device *dev,
369 				  unsigned long iova, int flags, void *arg)
370 {
371 	struct msm_iommu *iommu = arg;
372 
373 	if (iommu->base.handler)
374 		return iommu->base.handler(iommu->base.arg, iova, flags, NULL);
375 
376 	return -ENOSYS;
377 }
378 
379 static void msm_iommu_resume_translation(struct msm_mmu *mmu)
380 {
381 	struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(mmu->dev);
382 
383 	if (adreno_smmu->resume_translation)
384 		adreno_smmu->resume_translation(adreno_smmu->cookie, true);
385 }
386 
387 static void msm_iommu_detach(struct msm_mmu *mmu)
388 {
389 	struct msm_iommu *iommu = to_msm_iommu(mmu);
390 
391 	iommu_detach_device(iommu->domain, mmu->dev);
392 }
393 
394 static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
395 		struct sg_table *sgt, size_t len, int prot)
396 {
397 	struct msm_iommu *iommu = to_msm_iommu(mmu);
398 	size_t ret;
399 
400 	/* The arm-smmu driver expects the addresses to be sign extended */
401 	if (iova & BIT_ULL(48))
402 		iova |= GENMASK_ULL(63, 49);
403 
404 	ret = iommu_map_sgtable(iommu->domain, iova, sgt, prot);
405 	WARN_ON(!ret);
406 
407 	return (ret == len) ? 0 : -EINVAL;
408 }
409 
410 static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova, size_t len)
411 {
412 	struct msm_iommu *iommu = to_msm_iommu(mmu);
413 
414 	if (iova & BIT_ULL(48))
415 		iova |= GENMASK_ULL(63, 49);
416 
417 	iommu_unmap(iommu->domain, iova, len);
418 
419 	return 0;
420 }
421 
422 static void msm_iommu_destroy(struct msm_mmu *mmu)
423 {
424 	struct msm_iommu *iommu = to_msm_iommu(mmu);
425 	iommu_domain_free(iommu->domain);
426 	kfree(iommu);
427 }
428 
429 static const struct msm_mmu_funcs funcs = {
430 		.detach = msm_iommu_detach,
431 		.map = msm_iommu_map,
432 		.unmap = msm_iommu_unmap,
433 		.destroy = msm_iommu_destroy,
434 		.resume_translation = msm_iommu_resume_translation,
435 };
436 
437 struct msm_mmu *msm_iommu_new(struct device *dev, unsigned long quirks)
438 {
439 	struct iommu_domain *domain;
440 	struct msm_iommu *iommu;
441 	int ret;
442 
443 	if (!device_iommu_mapped(dev))
444 		return NULL;
445 
446 	domain = iommu_paging_domain_alloc(dev);
447 	if (IS_ERR(domain))
448 		return ERR_CAST(domain);
449 
450 	iommu_set_pgtable_quirks(domain, quirks);
451 
452 	iommu = kzalloc(sizeof(*iommu), GFP_KERNEL);
453 	if (!iommu) {
454 		iommu_domain_free(domain);
455 		return ERR_PTR(-ENOMEM);
456 	}
457 
458 	iommu->domain = domain;
459 	msm_mmu_init(&iommu->base, dev, &funcs, MSM_MMU_IOMMU);
460 
461 	atomic_set(&iommu->pagetables, 0);
462 
463 	ret = iommu_attach_device(iommu->domain, dev);
464 	if (ret) {
465 		iommu_domain_free(domain);
466 		kfree(iommu);
467 		return ERR_PTR(ret);
468 	}
469 
470 	return &iommu->base;
471 }
472 
473 struct msm_mmu *msm_iommu_disp_new(struct device *dev, unsigned long quirks)
474 {
475 	struct msm_iommu *iommu;
476 	struct msm_mmu *mmu;
477 
478 	mmu = msm_iommu_new(dev, quirks);
479 	if (IS_ERR_OR_NULL(mmu))
480 		return mmu;
481 
482 	iommu = to_msm_iommu(mmu);
483 	iommu_set_fault_handler(iommu->domain, msm_disp_fault_handler, iommu);
484 
485 	return mmu;
486 }
487 
488 struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsigned long quirks)
489 {
490 	struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(dev);
491 	struct msm_iommu *iommu;
492 	struct msm_mmu *mmu;
493 
494 	mmu = msm_iommu_new(dev, quirks);
495 	if (IS_ERR_OR_NULL(mmu))
496 		return mmu;
497 
498 	iommu = to_msm_iommu(mmu);
499 	iommu_set_fault_handler(iommu->domain, msm_gpu_fault_handler, iommu);
500 
501 	/* Enable stall on iommu fault: */
502 	if (adreno_smmu->set_stall)
503 		adreno_smmu->set_stall(adreno_smmu->cookie, true);
504 
505 	return mmu;
506 }
507