xref: /linux/drivers/iommu/io-pgtable-arm.c (revision 7f71507851fc7764b36a3221839607d3a45c2025)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * CPU-agnostic ARM page table allocator.
4  *
5  * Copyright (C) 2014 ARM Limited
6  *
7  * Author: Will Deacon <will.deacon@arm.com>
8  */
9 
10 #define pr_fmt(fmt)	"arm-lpae io-pgtable: " fmt
11 
12 #include <linux/atomic.h>
13 #include <linux/bitops.h>
14 #include <linux/io-pgtable.h>
15 #include <linux/kernel.h>
16 #include <linux/sizes.h>
17 #include <linux/slab.h>
18 #include <linux/types.h>
19 #include <linux/dma-mapping.h>
20 
21 #include <asm/barrier.h>
22 
23 #include "io-pgtable-arm.h"
24 #include "iommu-pages.h"
25 
26 #define ARM_LPAE_MAX_ADDR_BITS		52
27 #define ARM_LPAE_S2_MAX_CONCAT_PAGES	16
28 #define ARM_LPAE_MAX_LEVELS		4
29 
30 /* Struct accessors */
31 #define io_pgtable_to_data(x)						\
32 	container_of((x), struct arm_lpae_io_pgtable, iop)
33 
34 #define io_pgtable_ops_to_data(x)					\
35 	io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
36 
37 /*
38  * Calculate the right shift amount to get to the portion describing level l
39  * in a virtual address mapped by the pagetable in d.
40  */
41 #define ARM_LPAE_LVL_SHIFT(l,d)						\
42 	(((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) +		\
43 	ilog2(sizeof(arm_lpae_iopte)))
44 
45 #define ARM_LPAE_GRANULE(d)						\
46 	(sizeof(arm_lpae_iopte) << (d)->bits_per_level)
47 #define ARM_LPAE_PGD_SIZE(d)						\
48 	(sizeof(arm_lpae_iopte) << (d)->pgd_bits)
49 
50 #define ARM_LPAE_PTES_PER_TABLE(d)					\
51 	(ARM_LPAE_GRANULE(d) >> ilog2(sizeof(arm_lpae_iopte)))
52 
53 /*
54  * Calculate the index at level l used to map virtual address a using the
55  * pagetable in d.
56  */
57 #define ARM_LPAE_PGD_IDX(l,d)						\
58 	((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0)
59 
60 #define ARM_LPAE_LVL_IDX(a,l,d)						\
61 	(((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) &			\
62 	 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
63 
64 /* Calculate the block/page mapping size at level l for pagetable in d. */
65 #define ARM_LPAE_BLOCK_SIZE(l,d)	(1ULL << ARM_LPAE_LVL_SHIFT(l,d))
66 
67 /* Page table bits */
68 #define ARM_LPAE_PTE_TYPE_SHIFT		0
69 #define ARM_LPAE_PTE_TYPE_MASK		0x3
70 
71 #define ARM_LPAE_PTE_TYPE_BLOCK		1
72 #define ARM_LPAE_PTE_TYPE_TABLE		3
73 #define ARM_LPAE_PTE_TYPE_PAGE		3
74 
75 #define ARM_LPAE_PTE_ADDR_MASK		GENMASK_ULL(47,12)
76 
77 #define ARM_LPAE_PTE_NSTABLE		(((arm_lpae_iopte)1) << 63)
78 #define ARM_LPAE_PTE_XN			(((arm_lpae_iopte)3) << 53)
79 #define ARM_LPAE_PTE_DBM		(((arm_lpae_iopte)1) << 51)
80 #define ARM_LPAE_PTE_AF			(((arm_lpae_iopte)1) << 10)
81 #define ARM_LPAE_PTE_SH_NS		(((arm_lpae_iopte)0) << 8)
82 #define ARM_LPAE_PTE_SH_OS		(((arm_lpae_iopte)2) << 8)
83 #define ARM_LPAE_PTE_SH_IS		(((arm_lpae_iopte)3) << 8)
84 #define ARM_LPAE_PTE_NS			(((arm_lpae_iopte)1) << 5)
85 #define ARM_LPAE_PTE_VALID		(((arm_lpae_iopte)1) << 0)
86 
87 #define ARM_LPAE_PTE_ATTR_LO_MASK	(((arm_lpae_iopte)0x3ff) << 2)
88 /* Ignore the contiguous bit for block splitting */
89 #define ARM_LPAE_PTE_ATTR_HI_MASK	(ARM_LPAE_PTE_XN | ARM_LPAE_PTE_DBM)
90 #define ARM_LPAE_PTE_ATTR_MASK		(ARM_LPAE_PTE_ATTR_LO_MASK |	\
91 					 ARM_LPAE_PTE_ATTR_HI_MASK)
92 /* Software bit for solving coherency races */
93 #define ARM_LPAE_PTE_SW_SYNC		(((arm_lpae_iopte)1) << 55)
94 
95 /* Stage-1 PTE */
96 #define ARM_LPAE_PTE_AP_UNPRIV		(((arm_lpae_iopte)1) << 6)
97 #define ARM_LPAE_PTE_AP_RDONLY_BIT	7
98 #define ARM_LPAE_PTE_AP_RDONLY		(((arm_lpae_iopte)1) << \
99 					   ARM_LPAE_PTE_AP_RDONLY_BIT)
100 #define ARM_LPAE_PTE_AP_WR_CLEAN_MASK	(ARM_LPAE_PTE_AP_RDONLY | \
101 					 ARM_LPAE_PTE_DBM)
102 #define ARM_LPAE_PTE_ATTRINDX_SHIFT	2
103 #define ARM_LPAE_PTE_nG			(((arm_lpae_iopte)1) << 11)
104 
105 /* Stage-2 PTE */
106 #define ARM_LPAE_PTE_HAP_FAULT		(((arm_lpae_iopte)0) << 6)
107 #define ARM_LPAE_PTE_HAP_READ		(((arm_lpae_iopte)1) << 6)
108 #define ARM_LPAE_PTE_HAP_WRITE		(((arm_lpae_iopte)2) << 6)
109 /*
110  * For !FWB these code to:
111  *  1111 = Normal outer write back cachable / Inner Write Back Cachable
112  *         Permit S1 to override
113  *  0101 = Normal Non-cachable / Inner Non-cachable
114  *  0001 = Device / Device-nGnRE
115  * For S2FWB these code:
116  *  0110 Force Normal Write Back
117  *  0101 Normal* is forced Normal-NC, Device unchanged
118  *  0001 Force Device-nGnRE
119  */
120 #define ARM_LPAE_PTE_MEMATTR_FWB_WB	(((arm_lpae_iopte)0x6) << 2)
121 #define ARM_LPAE_PTE_MEMATTR_OIWB	(((arm_lpae_iopte)0xf) << 2)
122 #define ARM_LPAE_PTE_MEMATTR_NC		(((arm_lpae_iopte)0x5) << 2)
123 #define ARM_LPAE_PTE_MEMATTR_DEV	(((arm_lpae_iopte)0x1) << 2)
124 
125 /* Register bits */
126 #define ARM_LPAE_VTCR_SL0_MASK		0x3
127 
128 #define ARM_LPAE_TCR_T0SZ_SHIFT		0
129 
130 #define ARM_LPAE_VTCR_PS_SHIFT		16
131 #define ARM_LPAE_VTCR_PS_MASK		0x7
132 
133 #define ARM_LPAE_MAIR_ATTR_SHIFT(n)	((n) << 3)
134 #define ARM_LPAE_MAIR_ATTR_MASK		0xff
135 #define ARM_LPAE_MAIR_ATTR_DEVICE	0x04
136 #define ARM_LPAE_MAIR_ATTR_NC		0x44
137 #define ARM_LPAE_MAIR_ATTR_INC_OWBRWA	0xf4
138 #define ARM_LPAE_MAIR_ATTR_WBRWA	0xff
139 #define ARM_LPAE_MAIR_ATTR_IDX_NC	0
140 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE	1
141 #define ARM_LPAE_MAIR_ATTR_IDX_DEV	2
142 #define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE	3
143 
144 #define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0)
145 #define ARM_MALI_LPAE_TTBR_READ_INNER	BIT(2)
146 #define ARM_MALI_LPAE_TTBR_SHARE_OUTER	BIT(4)
147 
148 #define ARM_MALI_LPAE_MEMATTR_IMP_DEF	0x88ULL
149 #define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
150 
151 /* IOPTE accessors */
152 #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
153 
154 #define iopte_type(pte)					\
155 	(((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
156 
157 #define iopte_prot(pte)	((pte) & ARM_LPAE_PTE_ATTR_MASK)
158 
159 #define iopte_writeable_dirty(pte)				\
160 	(((pte) & ARM_LPAE_PTE_AP_WR_CLEAN_MASK) == ARM_LPAE_PTE_DBM)
161 
162 #define iopte_set_writeable_clean(ptep)				\
163 	set_bit(ARM_LPAE_PTE_AP_RDONLY_BIT, (unsigned long *)(ptep))
164 
165 struct arm_lpae_io_pgtable {
166 	struct io_pgtable	iop;
167 
168 	int			pgd_bits;
169 	int			start_level;
170 	int			bits_per_level;
171 
172 	void			*pgd;
173 };
174 
175 typedef u64 arm_lpae_iopte;
176 
177 static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
178 			      enum io_pgtable_fmt fmt)
179 {
180 	if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE)
181 		return iopte_type(pte) == ARM_LPAE_PTE_TYPE_PAGE;
182 
183 	return iopte_type(pte) == ARM_LPAE_PTE_TYPE_BLOCK;
184 }
185 
186 static inline bool iopte_table(arm_lpae_iopte pte, int lvl)
187 {
188 	if (lvl == (ARM_LPAE_MAX_LEVELS - 1))
189 		return false;
190 	return iopte_type(pte) == ARM_LPAE_PTE_TYPE_TABLE;
191 }
192 
193 static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
194 				     struct arm_lpae_io_pgtable *data)
195 {
196 	arm_lpae_iopte pte = paddr;
197 
198 	/* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */
199 	return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK;
200 }
201 
202 static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
203 				  struct arm_lpae_io_pgtable *data)
204 {
205 	u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
206 
207 	if (ARM_LPAE_GRANULE(data) < SZ_64K)
208 		return paddr;
209 
210 	/* Rotate the packed high-order bits back to the top */
211 	return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
212 }
213 
214 /*
215  * Convert an index returned by ARM_LPAE_PGD_IDX(), which can point into
216  * a concatenated PGD, into the maximum number of entries that can be
217  * mapped in the same table page.
218  */
219 static inline int arm_lpae_max_entries(int i, struct arm_lpae_io_pgtable *data)
220 {
221 	int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data);
222 
223 	return ptes_per_table - (i & (ptes_per_table - 1));
224 }
225 
226 static bool selftest_running = false;
227 
228 static dma_addr_t __arm_lpae_dma_addr(void *pages)
229 {
230 	return (dma_addr_t)virt_to_phys(pages);
231 }
232 
233 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
234 				    struct io_pgtable_cfg *cfg,
235 				    void *cookie)
236 {
237 	struct device *dev = cfg->iommu_dev;
238 	int order = get_order(size);
239 	dma_addr_t dma;
240 	void *pages;
241 
242 	VM_BUG_ON((gfp & __GFP_HIGHMEM));
243 
244 	if (cfg->alloc)
245 		pages = cfg->alloc(cookie, size, gfp);
246 	else
247 		pages = iommu_alloc_pages_node(dev_to_node(dev), gfp, order);
248 
249 	if (!pages)
250 		return NULL;
251 
252 	if (!cfg->coherent_walk) {
253 		dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
254 		if (dma_mapping_error(dev, dma))
255 			goto out_free;
256 		/*
257 		 * We depend on the IOMMU being able to work with any physical
258 		 * address directly, so if the DMA layer suggests otherwise by
259 		 * translating or truncating them, that bodes very badly...
260 		 */
261 		if (dma != virt_to_phys(pages))
262 			goto out_unmap;
263 	}
264 
265 	return pages;
266 
267 out_unmap:
268 	dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
269 	dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
270 
271 out_free:
272 	if (cfg->free)
273 		cfg->free(cookie, pages, size);
274 	else
275 		iommu_free_pages(pages, order);
276 
277 	return NULL;
278 }
279 
280 static void __arm_lpae_free_pages(void *pages, size_t size,
281 				  struct io_pgtable_cfg *cfg,
282 				  void *cookie)
283 {
284 	if (!cfg->coherent_walk)
285 		dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
286 				 size, DMA_TO_DEVICE);
287 
288 	if (cfg->free)
289 		cfg->free(cookie, pages, size);
290 	else
291 		iommu_free_pages(pages, get_order(size));
292 }
293 
294 static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
295 				struct io_pgtable_cfg *cfg)
296 {
297 	dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep),
298 				   sizeof(*ptep) * num_entries, DMA_TO_DEVICE);
299 }
300 
301 static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg, int num_entries)
302 {
303 	for (int i = 0; i < num_entries; i++)
304 		ptep[i] = 0;
305 
306 	if (!cfg->coherent_walk && num_entries)
307 		__arm_lpae_sync_pte(ptep, num_entries, cfg);
308 }
309 
310 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
311 			       struct iommu_iotlb_gather *gather,
312 			       unsigned long iova, size_t size, size_t pgcount,
313 			       int lvl, arm_lpae_iopte *ptep);
314 
315 static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
316 				phys_addr_t paddr, arm_lpae_iopte prot,
317 				int lvl, int num_entries, arm_lpae_iopte *ptep)
318 {
319 	arm_lpae_iopte pte = prot;
320 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
321 	size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
322 	int i;
323 
324 	if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1)
325 		pte |= ARM_LPAE_PTE_TYPE_PAGE;
326 	else
327 		pte |= ARM_LPAE_PTE_TYPE_BLOCK;
328 
329 	for (i = 0; i < num_entries; i++)
330 		ptep[i] = pte | paddr_to_iopte(paddr + i * sz, data);
331 
332 	if (!cfg->coherent_walk)
333 		__arm_lpae_sync_pte(ptep, num_entries, cfg);
334 }
335 
336 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
337 			     unsigned long iova, phys_addr_t paddr,
338 			     arm_lpae_iopte prot, int lvl, int num_entries,
339 			     arm_lpae_iopte *ptep)
340 {
341 	int i;
342 
343 	for (i = 0; i < num_entries; i++)
344 		if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) {
345 			/* We require an unmap first */
346 			WARN_ON(!selftest_running);
347 			return -EEXIST;
348 		} else if (iopte_type(ptep[i]) == ARM_LPAE_PTE_TYPE_TABLE) {
349 			/*
350 			 * We need to unmap and free the old table before
351 			 * overwriting it with a block entry.
352 			 */
353 			arm_lpae_iopte *tblp;
354 			size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
355 
356 			tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
357 			if (__arm_lpae_unmap(data, NULL, iova + i * sz, sz, 1,
358 					     lvl, tblp) != sz) {
359 				WARN_ON(1);
360 				return -EINVAL;
361 			}
362 		}
363 
364 	__arm_lpae_init_pte(data, paddr, prot, lvl, num_entries, ptep);
365 	return 0;
366 }
367 
368 static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
369 					     arm_lpae_iopte *ptep,
370 					     arm_lpae_iopte curr,
371 					     struct arm_lpae_io_pgtable *data)
372 {
373 	arm_lpae_iopte old, new;
374 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
375 
376 	new = paddr_to_iopte(__pa(table), data) | ARM_LPAE_PTE_TYPE_TABLE;
377 	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
378 		new |= ARM_LPAE_PTE_NSTABLE;
379 
380 	/*
381 	 * Ensure the table itself is visible before its PTE can be.
382 	 * Whilst we could get away with cmpxchg64_release below, this
383 	 * doesn't have any ordering semantics when !CONFIG_SMP.
384 	 */
385 	dma_wmb();
386 
387 	old = cmpxchg64_relaxed(ptep, curr, new);
388 
389 	if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC))
390 		return old;
391 
392 	/* Even if it's not ours, there's no point waiting; just kick it */
393 	__arm_lpae_sync_pte(ptep, 1, cfg);
394 	if (old == curr)
395 		WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC);
396 
397 	return old;
398 }
399 
400 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
401 			  phys_addr_t paddr, size_t size, size_t pgcount,
402 			  arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep,
403 			  gfp_t gfp, size_t *mapped)
404 {
405 	arm_lpae_iopte *cptep, pte;
406 	size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
407 	size_t tblsz = ARM_LPAE_GRANULE(data);
408 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
409 	int ret = 0, num_entries, max_entries, map_idx_start;
410 
411 	/* Find our entry at the current level */
412 	map_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
413 	ptep += map_idx_start;
414 
415 	/* If we can install a leaf entry at this level, then do so */
416 	if (size == block_size) {
417 		max_entries = arm_lpae_max_entries(map_idx_start, data);
418 		num_entries = min_t(int, pgcount, max_entries);
419 		ret = arm_lpae_init_pte(data, iova, paddr, prot, lvl, num_entries, ptep);
420 		if (!ret)
421 			*mapped += num_entries * size;
422 
423 		return ret;
424 	}
425 
426 	/* We can't allocate tables at the final level */
427 	if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
428 		return -EINVAL;
429 
430 	/* Grab a pointer to the next level */
431 	pte = READ_ONCE(*ptep);
432 	if (!pte) {
433 		cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg, data->iop.cookie);
434 		if (!cptep)
435 			return -ENOMEM;
436 
437 		pte = arm_lpae_install_table(cptep, ptep, 0, data);
438 		if (pte)
439 			__arm_lpae_free_pages(cptep, tblsz, cfg, data->iop.cookie);
440 	} else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
441 		__arm_lpae_sync_pte(ptep, 1, cfg);
442 	}
443 
444 	if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) {
445 		cptep = iopte_deref(pte, data);
446 	} else if (pte) {
447 		/* We require an unmap first */
448 		WARN_ON(!selftest_running);
449 		return -EEXIST;
450 	}
451 
452 	/* Rinse, repeat */
453 	return __arm_lpae_map(data, iova, paddr, size, pgcount, prot, lvl + 1,
454 			      cptep, gfp, mapped);
455 }
456 
457 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
458 					   int prot)
459 {
460 	arm_lpae_iopte pte;
461 
462 	if (data->iop.fmt == ARM_64_LPAE_S1 ||
463 	    data->iop.fmt == ARM_32_LPAE_S1) {
464 		pte = ARM_LPAE_PTE_nG;
465 		if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
466 			pte |= ARM_LPAE_PTE_AP_RDONLY;
467 		else if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_HD)
468 			pte |= ARM_LPAE_PTE_DBM;
469 		if (!(prot & IOMMU_PRIV))
470 			pte |= ARM_LPAE_PTE_AP_UNPRIV;
471 	} else {
472 		pte = ARM_LPAE_PTE_HAP_FAULT;
473 		if (prot & IOMMU_READ)
474 			pte |= ARM_LPAE_PTE_HAP_READ;
475 		if (prot & IOMMU_WRITE)
476 			pte |= ARM_LPAE_PTE_HAP_WRITE;
477 	}
478 
479 	/*
480 	 * Note that this logic is structured to accommodate Mali LPAE
481 	 * having stage-1-like attributes but stage-2-like permissions.
482 	 */
483 	if (data->iop.fmt == ARM_64_LPAE_S2 ||
484 	    data->iop.fmt == ARM_32_LPAE_S2) {
485 		if (prot & IOMMU_MMIO) {
486 			pte |= ARM_LPAE_PTE_MEMATTR_DEV;
487 		} else if (prot & IOMMU_CACHE) {
488 			if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_S2FWB)
489 				pte |= ARM_LPAE_PTE_MEMATTR_FWB_WB;
490 			else
491 				pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
492 		} else {
493 			pte |= ARM_LPAE_PTE_MEMATTR_NC;
494 		}
495 	} else {
496 		if (prot & IOMMU_MMIO)
497 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
498 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
499 		else if (prot & IOMMU_CACHE)
500 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
501 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
502 	}
503 
504 	/*
505 	 * Also Mali has its own notions of shareability wherein its Inner
506 	 * domain covers the cores within the GPU, and its Outer domain is
507 	 * "outside the GPU" (i.e. either the Inner or System domain in CPU
508 	 * terms, depending on coherency).
509 	 */
510 	if (prot & IOMMU_CACHE && data->iop.fmt != ARM_MALI_LPAE)
511 		pte |= ARM_LPAE_PTE_SH_IS;
512 	else
513 		pte |= ARM_LPAE_PTE_SH_OS;
514 
515 	if (prot & IOMMU_NOEXEC)
516 		pte |= ARM_LPAE_PTE_XN;
517 
518 	if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
519 		pte |= ARM_LPAE_PTE_NS;
520 
521 	if (data->iop.fmt != ARM_MALI_LPAE)
522 		pte |= ARM_LPAE_PTE_AF;
523 
524 	return pte;
525 }
526 
527 static int arm_lpae_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
528 			      phys_addr_t paddr, size_t pgsize, size_t pgcount,
529 			      int iommu_prot, gfp_t gfp, size_t *mapped)
530 {
531 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
532 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
533 	arm_lpae_iopte *ptep = data->pgd;
534 	int ret, lvl = data->start_level;
535 	arm_lpae_iopte prot;
536 	long iaext = (s64)iova >> cfg->ias;
537 
538 	if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize))
539 		return -EINVAL;
540 
541 	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
542 		iaext = ~iaext;
543 	if (WARN_ON(iaext || paddr >> cfg->oas))
544 		return -ERANGE;
545 
546 	if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
547 		return -EINVAL;
548 
549 	prot = arm_lpae_prot_to_pte(data, iommu_prot);
550 	ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl,
551 			     ptep, gfp, mapped);
552 	/*
553 	 * Synchronise all PTE updates for the new mapping before there's
554 	 * a chance for anything to kick off a table walk for the new iova.
555 	 */
556 	wmb();
557 
558 	return ret;
559 }
560 
561 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
562 				    arm_lpae_iopte *ptep)
563 {
564 	arm_lpae_iopte *start, *end;
565 	unsigned long table_size;
566 
567 	if (lvl == data->start_level)
568 		table_size = ARM_LPAE_PGD_SIZE(data);
569 	else
570 		table_size = ARM_LPAE_GRANULE(data);
571 
572 	start = ptep;
573 
574 	/* Only leaf entries at the last level */
575 	if (lvl == ARM_LPAE_MAX_LEVELS - 1)
576 		end = ptep;
577 	else
578 		end = (void *)ptep + table_size;
579 
580 	while (ptep != end) {
581 		arm_lpae_iopte pte = *ptep++;
582 
583 		if (!pte || iopte_leaf(pte, lvl, data->iop.fmt))
584 			continue;
585 
586 		__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
587 	}
588 
589 	__arm_lpae_free_pages(start, table_size, &data->iop.cfg, data->iop.cookie);
590 }
591 
592 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
593 {
594 	struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
595 
596 	__arm_lpae_free_pgtable(data, data->start_level, data->pgd);
597 	kfree(data);
598 }
599 
600 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
601 			       struct iommu_iotlb_gather *gather,
602 			       unsigned long iova, size_t size, size_t pgcount,
603 			       int lvl, arm_lpae_iopte *ptep)
604 {
605 	arm_lpae_iopte pte;
606 	struct io_pgtable *iop = &data->iop;
607 	int i = 0, num_entries, max_entries, unmap_idx_start;
608 
609 	/* Something went horribly wrong and we ran out of page table */
610 	if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
611 		return 0;
612 
613 	unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
614 	ptep += unmap_idx_start;
615 	pte = READ_ONCE(*ptep);
616 	if (WARN_ON(!pte))
617 		return 0;
618 
619 	/* If the size matches this level, we're in the right place */
620 	if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
621 		max_entries = arm_lpae_max_entries(unmap_idx_start, data);
622 		num_entries = min_t(int, pgcount, max_entries);
623 
624 		/* Find and handle non-leaf entries */
625 		for (i = 0; i < num_entries; i++) {
626 			pte = READ_ONCE(ptep[i]);
627 			if (WARN_ON(!pte))
628 				break;
629 
630 			if (!iopte_leaf(pte, lvl, iop->fmt)) {
631 				__arm_lpae_clear_pte(&ptep[i], &iop->cfg, 1);
632 
633 				/* Also flush any partial walks */
634 				io_pgtable_tlb_flush_walk(iop, iova + i * size, size,
635 							  ARM_LPAE_GRANULE(data));
636 				__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
637 			}
638 		}
639 
640 		/* Clear the remaining entries */
641 		__arm_lpae_clear_pte(ptep, &iop->cfg, i);
642 
643 		if (gather && !iommu_iotlb_gather_queued(gather))
644 			for (int j = 0; j < i; j++)
645 				io_pgtable_tlb_add_page(iop, gather, iova + j * size, size);
646 
647 		return i * size;
648 	} else if (iopte_leaf(pte, lvl, iop->fmt)) {
649 		WARN_ONCE(true, "Unmap of a partial large IOPTE is not allowed");
650 		return 0;
651 	}
652 
653 	/* Keep on walkin' */
654 	ptep = iopte_deref(pte, data);
655 	return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl + 1, ptep);
656 }
657 
658 static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova,
659 				   size_t pgsize, size_t pgcount,
660 				   struct iommu_iotlb_gather *gather)
661 {
662 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
663 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
664 	arm_lpae_iopte *ptep = data->pgd;
665 	long iaext = (s64)iova >> cfg->ias;
666 
667 	if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount))
668 		return 0;
669 
670 	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
671 		iaext = ~iaext;
672 	if (WARN_ON(iaext))
673 		return 0;
674 
675 	return __arm_lpae_unmap(data, gather, iova, pgsize, pgcount,
676 				data->start_level, ptep);
677 }
678 
679 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
680 					 unsigned long iova)
681 {
682 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
683 	arm_lpae_iopte pte, *ptep = data->pgd;
684 	int lvl = data->start_level;
685 
686 	do {
687 		/* Valid IOPTE pointer? */
688 		if (!ptep)
689 			return 0;
690 
691 		/* Grab the IOPTE we're interested in */
692 		ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
693 		pte = READ_ONCE(*ptep);
694 
695 		/* Valid entry? */
696 		if (!pte)
697 			return 0;
698 
699 		/* Leaf entry? */
700 		if (iopte_leaf(pte, lvl, data->iop.fmt))
701 			goto found_translation;
702 
703 		/* Take it to the next level */
704 		ptep = iopte_deref(pte, data);
705 	} while (++lvl < ARM_LPAE_MAX_LEVELS);
706 
707 	/* Ran out of page tables to walk */
708 	return 0;
709 
710 found_translation:
711 	iova &= (ARM_LPAE_BLOCK_SIZE(lvl, data) - 1);
712 	return iopte_to_paddr(pte, data) | iova;
713 }
714 
715 struct io_pgtable_walk_data {
716 	struct iommu_dirty_bitmap	*dirty;
717 	unsigned long			flags;
718 	u64				addr;
719 	const u64			end;
720 };
721 
722 static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data,
723 				       struct io_pgtable_walk_data *walk_data,
724 				       arm_lpae_iopte *ptep,
725 				       int lvl);
726 
727 static int io_pgtable_visit_dirty(struct arm_lpae_io_pgtable *data,
728 				  struct io_pgtable_walk_data *walk_data,
729 				  arm_lpae_iopte *ptep, int lvl)
730 {
731 	struct io_pgtable *iop = &data->iop;
732 	arm_lpae_iopte pte = READ_ONCE(*ptep);
733 
734 	if (iopte_leaf(pte, lvl, iop->fmt)) {
735 		size_t size = ARM_LPAE_BLOCK_SIZE(lvl, data);
736 
737 		if (iopte_writeable_dirty(pte)) {
738 			iommu_dirty_bitmap_record(walk_data->dirty,
739 						  walk_data->addr, size);
740 			if (!(walk_data->flags & IOMMU_DIRTY_NO_CLEAR))
741 				iopte_set_writeable_clean(ptep);
742 		}
743 		walk_data->addr += size;
744 		return 0;
745 	}
746 
747 	if (WARN_ON(!iopte_table(pte, lvl)))
748 		return -EINVAL;
749 
750 	ptep = iopte_deref(pte, data);
751 	return __arm_lpae_iopte_walk_dirty(data, walk_data, ptep, lvl + 1);
752 }
753 
754 static int __arm_lpae_iopte_walk_dirty(struct arm_lpae_io_pgtable *data,
755 				       struct io_pgtable_walk_data *walk_data,
756 				       arm_lpae_iopte *ptep,
757 				       int lvl)
758 {
759 	u32 idx;
760 	int max_entries, ret;
761 
762 	if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
763 		return -EINVAL;
764 
765 	if (lvl == data->start_level)
766 		max_entries = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
767 	else
768 		max_entries = ARM_LPAE_PTES_PER_TABLE(data);
769 
770 	for (idx = ARM_LPAE_LVL_IDX(walk_data->addr, lvl, data);
771 	     (idx < max_entries) && (walk_data->addr < walk_data->end); ++idx) {
772 		ret = io_pgtable_visit_dirty(data, walk_data, ptep + idx, lvl);
773 		if (ret)
774 			return ret;
775 	}
776 
777 	return 0;
778 }
779 
780 static int arm_lpae_read_and_clear_dirty(struct io_pgtable_ops *ops,
781 					 unsigned long iova, size_t size,
782 					 unsigned long flags,
783 					 struct iommu_dirty_bitmap *dirty)
784 {
785 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
786 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
787 	struct io_pgtable_walk_data walk_data = {
788 		.dirty = dirty,
789 		.flags = flags,
790 		.addr = iova,
791 		.end = iova + size,
792 	};
793 	arm_lpae_iopte *ptep = data->pgd;
794 	int lvl = data->start_level;
795 
796 	if (WARN_ON(!size))
797 		return -EINVAL;
798 	if (WARN_ON((iova + size - 1) & ~(BIT(cfg->ias) - 1)))
799 		return -EINVAL;
800 	if (data->iop.fmt != ARM_64_LPAE_S1)
801 		return -EINVAL;
802 
803 	return __arm_lpae_iopte_walk_dirty(data, &walk_data, ptep, lvl);
804 }
805 
806 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
807 {
808 	unsigned long granule, page_sizes;
809 	unsigned int max_addr_bits = 48;
810 
811 	/*
812 	 * We need to restrict the supported page sizes to match the
813 	 * translation regime for a particular granule. Aim to match
814 	 * the CPU page size if possible, otherwise prefer smaller sizes.
815 	 * While we're at it, restrict the block sizes to match the
816 	 * chosen granule.
817 	 */
818 	if (cfg->pgsize_bitmap & PAGE_SIZE)
819 		granule = PAGE_SIZE;
820 	else if (cfg->pgsize_bitmap & ~PAGE_MASK)
821 		granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
822 	else if (cfg->pgsize_bitmap & PAGE_MASK)
823 		granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
824 	else
825 		granule = 0;
826 
827 	switch (granule) {
828 	case SZ_4K:
829 		page_sizes = (SZ_4K | SZ_2M | SZ_1G);
830 		break;
831 	case SZ_16K:
832 		page_sizes = (SZ_16K | SZ_32M);
833 		break;
834 	case SZ_64K:
835 		max_addr_bits = 52;
836 		page_sizes = (SZ_64K | SZ_512M);
837 		if (cfg->oas > 48)
838 			page_sizes |= 1ULL << 42; /* 4TB */
839 		break;
840 	default:
841 		page_sizes = 0;
842 	}
843 
844 	cfg->pgsize_bitmap &= page_sizes;
845 	cfg->ias = min(cfg->ias, max_addr_bits);
846 	cfg->oas = min(cfg->oas, max_addr_bits);
847 }
848 
849 static struct arm_lpae_io_pgtable *
850 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
851 {
852 	struct arm_lpae_io_pgtable *data;
853 	int levels, va_bits, pg_shift;
854 
855 	arm_lpae_restrict_pgsizes(cfg);
856 
857 	if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
858 		return NULL;
859 
860 	if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
861 		return NULL;
862 
863 	if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
864 		return NULL;
865 
866 	data = kmalloc(sizeof(*data), GFP_KERNEL);
867 	if (!data)
868 		return NULL;
869 
870 	pg_shift = __ffs(cfg->pgsize_bitmap);
871 	data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte));
872 
873 	va_bits = cfg->ias - pg_shift;
874 	levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
875 	data->start_level = ARM_LPAE_MAX_LEVELS - levels;
876 
877 	/* Calculate the actual size of our pgd (without concatenation) */
878 	data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1));
879 
880 	data->iop.ops = (struct io_pgtable_ops) {
881 		.map_pages	= arm_lpae_map_pages,
882 		.unmap_pages	= arm_lpae_unmap_pages,
883 		.iova_to_phys	= arm_lpae_iova_to_phys,
884 		.read_and_clear_dirty = arm_lpae_read_and_clear_dirty,
885 	};
886 
887 	return data;
888 }
889 
890 static struct io_pgtable *
891 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
892 {
893 	u64 reg;
894 	struct arm_lpae_io_pgtable *data;
895 	typeof(&cfg->arm_lpae_s1_cfg.tcr) tcr = &cfg->arm_lpae_s1_cfg.tcr;
896 	bool tg1;
897 
898 	if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
899 			    IO_PGTABLE_QUIRK_ARM_TTBR1 |
900 			    IO_PGTABLE_QUIRK_ARM_OUTER_WBWA |
901 			    IO_PGTABLE_QUIRK_ARM_HD))
902 		return NULL;
903 
904 	data = arm_lpae_alloc_pgtable(cfg);
905 	if (!data)
906 		return NULL;
907 
908 	/* TCR */
909 	if (cfg->coherent_walk) {
910 		tcr->sh = ARM_LPAE_TCR_SH_IS;
911 		tcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
912 		tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
913 		if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA)
914 			goto out_free_data;
915 	} else {
916 		tcr->sh = ARM_LPAE_TCR_SH_OS;
917 		tcr->irgn = ARM_LPAE_TCR_RGN_NC;
918 		if (!(cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA))
919 			tcr->orgn = ARM_LPAE_TCR_RGN_NC;
920 		else
921 			tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
922 	}
923 
924 	tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1;
925 	switch (ARM_LPAE_GRANULE(data)) {
926 	case SZ_4K:
927 		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_4K : ARM_LPAE_TCR_TG0_4K;
928 		break;
929 	case SZ_16K:
930 		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_16K : ARM_LPAE_TCR_TG0_16K;
931 		break;
932 	case SZ_64K:
933 		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_64K : ARM_LPAE_TCR_TG0_64K;
934 		break;
935 	}
936 
937 	switch (cfg->oas) {
938 	case 32:
939 		tcr->ips = ARM_LPAE_TCR_PS_32_BIT;
940 		break;
941 	case 36:
942 		tcr->ips = ARM_LPAE_TCR_PS_36_BIT;
943 		break;
944 	case 40:
945 		tcr->ips = ARM_LPAE_TCR_PS_40_BIT;
946 		break;
947 	case 42:
948 		tcr->ips = ARM_LPAE_TCR_PS_42_BIT;
949 		break;
950 	case 44:
951 		tcr->ips = ARM_LPAE_TCR_PS_44_BIT;
952 		break;
953 	case 48:
954 		tcr->ips = ARM_LPAE_TCR_PS_48_BIT;
955 		break;
956 	case 52:
957 		tcr->ips = ARM_LPAE_TCR_PS_52_BIT;
958 		break;
959 	default:
960 		goto out_free_data;
961 	}
962 
963 	tcr->tsz = 64ULL - cfg->ias;
964 
965 	/* MAIRs */
966 	reg = (ARM_LPAE_MAIR_ATTR_NC
967 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
968 	      (ARM_LPAE_MAIR_ATTR_WBRWA
969 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
970 	      (ARM_LPAE_MAIR_ATTR_DEVICE
971 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) |
972 	      (ARM_LPAE_MAIR_ATTR_INC_OWBRWA
973 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE));
974 
975 	cfg->arm_lpae_s1_cfg.mair = reg;
976 
977 	/* Looking good; allocate a pgd */
978 	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
979 					   GFP_KERNEL, cfg, cookie);
980 	if (!data->pgd)
981 		goto out_free_data;
982 
983 	/* Ensure the empty pgd is visible before any actual TTBR write */
984 	wmb();
985 
986 	/* TTBR */
987 	cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd);
988 	return &data->iop;
989 
990 out_free_data:
991 	kfree(data);
992 	return NULL;
993 }
994 
995 static struct io_pgtable *
996 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
997 {
998 	u64 sl;
999 	struct arm_lpae_io_pgtable *data;
1000 	typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr;
1001 
1002 	if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_S2FWB))
1003 		return NULL;
1004 
1005 	data = arm_lpae_alloc_pgtable(cfg);
1006 	if (!data)
1007 		return NULL;
1008 
1009 	/*
1010 	 * Concatenate PGDs at level 1 if possible in order to reduce
1011 	 * the depth of the stage-2 walk.
1012 	 */
1013 	if (data->start_level == 0) {
1014 		unsigned long pgd_pages;
1015 
1016 		pgd_pages = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
1017 		if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) {
1018 			data->pgd_bits += data->bits_per_level;
1019 			data->start_level++;
1020 		}
1021 	}
1022 
1023 	/* VTCR */
1024 	if (cfg->coherent_walk) {
1025 		vtcr->sh = ARM_LPAE_TCR_SH_IS;
1026 		vtcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
1027 		vtcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
1028 	} else {
1029 		vtcr->sh = ARM_LPAE_TCR_SH_OS;
1030 		vtcr->irgn = ARM_LPAE_TCR_RGN_NC;
1031 		vtcr->orgn = ARM_LPAE_TCR_RGN_NC;
1032 	}
1033 
1034 	sl = data->start_level;
1035 
1036 	switch (ARM_LPAE_GRANULE(data)) {
1037 	case SZ_4K:
1038 		vtcr->tg = ARM_LPAE_TCR_TG0_4K;
1039 		sl++; /* SL0 format is different for 4K granule size */
1040 		break;
1041 	case SZ_16K:
1042 		vtcr->tg = ARM_LPAE_TCR_TG0_16K;
1043 		break;
1044 	case SZ_64K:
1045 		vtcr->tg = ARM_LPAE_TCR_TG0_64K;
1046 		break;
1047 	}
1048 
1049 	switch (cfg->oas) {
1050 	case 32:
1051 		vtcr->ps = ARM_LPAE_TCR_PS_32_BIT;
1052 		break;
1053 	case 36:
1054 		vtcr->ps = ARM_LPAE_TCR_PS_36_BIT;
1055 		break;
1056 	case 40:
1057 		vtcr->ps = ARM_LPAE_TCR_PS_40_BIT;
1058 		break;
1059 	case 42:
1060 		vtcr->ps = ARM_LPAE_TCR_PS_42_BIT;
1061 		break;
1062 	case 44:
1063 		vtcr->ps = ARM_LPAE_TCR_PS_44_BIT;
1064 		break;
1065 	case 48:
1066 		vtcr->ps = ARM_LPAE_TCR_PS_48_BIT;
1067 		break;
1068 	case 52:
1069 		vtcr->ps = ARM_LPAE_TCR_PS_52_BIT;
1070 		break;
1071 	default:
1072 		goto out_free_data;
1073 	}
1074 
1075 	vtcr->tsz = 64ULL - cfg->ias;
1076 	vtcr->sl = ~sl & ARM_LPAE_VTCR_SL0_MASK;
1077 
1078 	/* Allocate pgd pages */
1079 	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
1080 					   GFP_KERNEL, cfg, cookie);
1081 	if (!data->pgd)
1082 		goto out_free_data;
1083 
1084 	/* Ensure the empty pgd is visible before any actual TTBR write */
1085 	wmb();
1086 
1087 	/* VTTBR */
1088 	cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
1089 	return &data->iop;
1090 
1091 out_free_data:
1092 	kfree(data);
1093 	return NULL;
1094 }
1095 
1096 static struct io_pgtable *
1097 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
1098 {
1099 	if (cfg->ias > 32 || cfg->oas > 40)
1100 		return NULL;
1101 
1102 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1103 	return arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
1104 }
1105 
1106 static struct io_pgtable *
1107 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
1108 {
1109 	if (cfg->ias > 40 || cfg->oas > 40)
1110 		return NULL;
1111 
1112 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1113 	return arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
1114 }
1115 
1116 static struct io_pgtable *
1117 arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
1118 {
1119 	struct arm_lpae_io_pgtable *data;
1120 
1121 	/* No quirks for Mali (hopefully) */
1122 	if (cfg->quirks)
1123 		return NULL;
1124 
1125 	if (cfg->ias > 48 || cfg->oas > 40)
1126 		return NULL;
1127 
1128 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1129 
1130 	data = arm_lpae_alloc_pgtable(cfg);
1131 	if (!data)
1132 		return NULL;
1133 
1134 	/* Mali seems to need a full 4-level table regardless of IAS */
1135 	if (data->start_level > 0) {
1136 		data->start_level = 0;
1137 		data->pgd_bits = 0;
1138 	}
1139 	/*
1140 	 * MEMATTR: Mali has no actual notion of a non-cacheable type, so the
1141 	 * best we can do is mimic the out-of-tree driver and hope that the
1142 	 * "implementation-defined caching policy" is good enough. Similarly,
1143 	 * we'll use it for the sake of a valid attribute for our 'device'
1144 	 * index, although callers should never request that in practice.
1145 	 */
1146 	cfg->arm_mali_lpae_cfg.memattr =
1147 		(ARM_MALI_LPAE_MEMATTR_IMP_DEF
1148 		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
1149 		(ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC
1150 		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
1151 		(ARM_MALI_LPAE_MEMATTR_IMP_DEF
1152 		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
1153 
1154 	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
1155 					   cfg, cookie);
1156 	if (!data->pgd)
1157 		goto out_free_data;
1158 
1159 	/* Ensure the empty pgd is visible before TRANSTAB can be written */
1160 	wmb();
1161 
1162 	cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
1163 					  ARM_MALI_LPAE_TTBR_READ_INNER |
1164 					  ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
1165 	if (cfg->coherent_walk)
1166 		cfg->arm_mali_lpae_cfg.transtab |= ARM_MALI_LPAE_TTBR_SHARE_OUTER;
1167 
1168 	return &data->iop;
1169 
1170 out_free_data:
1171 	kfree(data);
1172 	return NULL;
1173 }
1174 
1175 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
1176 	.caps	= IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1177 	.alloc	= arm_64_lpae_alloc_pgtable_s1,
1178 	.free	= arm_lpae_free_pgtable,
1179 };
1180 
1181 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
1182 	.caps	= IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1183 	.alloc	= arm_64_lpae_alloc_pgtable_s2,
1184 	.free	= arm_lpae_free_pgtable,
1185 };
1186 
1187 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
1188 	.caps	= IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1189 	.alloc	= arm_32_lpae_alloc_pgtable_s1,
1190 	.free	= arm_lpae_free_pgtable,
1191 };
1192 
1193 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
1194 	.caps	= IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1195 	.alloc	= arm_32_lpae_alloc_pgtable_s2,
1196 	.free	= arm_lpae_free_pgtable,
1197 };
1198 
1199 struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
1200 	.caps	= IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1201 	.alloc	= arm_mali_lpae_alloc_pgtable,
1202 	.free	= arm_lpae_free_pgtable,
1203 };
1204 
1205 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
1206 
1207 static struct io_pgtable_cfg *cfg_cookie __initdata;
1208 
1209 static void __init dummy_tlb_flush_all(void *cookie)
1210 {
1211 	WARN_ON(cookie != cfg_cookie);
1212 }
1213 
1214 static void __init dummy_tlb_flush(unsigned long iova, size_t size,
1215 				   size_t granule, void *cookie)
1216 {
1217 	WARN_ON(cookie != cfg_cookie);
1218 	WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
1219 }
1220 
1221 static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
1222 				      unsigned long iova, size_t granule,
1223 				      void *cookie)
1224 {
1225 	dummy_tlb_flush(iova, granule, granule, cookie);
1226 }
1227 
1228 static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
1229 	.tlb_flush_all	= dummy_tlb_flush_all,
1230 	.tlb_flush_walk	= dummy_tlb_flush,
1231 	.tlb_add_page	= dummy_tlb_add_page,
1232 };
1233 
1234 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
1235 {
1236 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
1237 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
1238 
1239 	pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1240 		cfg->pgsize_bitmap, cfg->ias);
1241 	pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n",
1242 		ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data),
1243 		ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd);
1244 }
1245 
1246 #define __FAIL(ops, i)	({						\
1247 		WARN(1, "selftest: test failed for fmt idx %d\n", (i));	\
1248 		arm_lpae_dump_ops(ops);					\
1249 		selftest_running = false;				\
1250 		-EFAULT;						\
1251 })
1252 
1253 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
1254 {
1255 	static const enum io_pgtable_fmt fmts[] __initconst = {
1256 		ARM_64_LPAE_S1,
1257 		ARM_64_LPAE_S2,
1258 	};
1259 
1260 	int i, j;
1261 	unsigned long iova;
1262 	size_t size, mapped;
1263 	struct io_pgtable_ops *ops;
1264 
1265 	selftest_running = true;
1266 
1267 	for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
1268 		cfg_cookie = cfg;
1269 		ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
1270 		if (!ops) {
1271 			pr_err("selftest: failed to allocate io pgtable ops\n");
1272 			return -ENOMEM;
1273 		}
1274 
1275 		/*
1276 		 * Initial sanity checks.
1277 		 * Empty page tables shouldn't provide any translations.
1278 		 */
1279 		if (ops->iova_to_phys(ops, 42))
1280 			return __FAIL(ops, i);
1281 
1282 		if (ops->iova_to_phys(ops, SZ_1G + 42))
1283 			return __FAIL(ops, i);
1284 
1285 		if (ops->iova_to_phys(ops, SZ_2G + 42))
1286 			return __FAIL(ops, i);
1287 
1288 		/*
1289 		 * Distinct mappings of different granule sizes.
1290 		 */
1291 		iova = 0;
1292 		for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1293 			size = 1UL << j;
1294 
1295 			if (ops->map_pages(ops, iova, iova, size, 1,
1296 					   IOMMU_READ | IOMMU_WRITE |
1297 					   IOMMU_NOEXEC | IOMMU_CACHE,
1298 					   GFP_KERNEL, &mapped))
1299 				return __FAIL(ops, i);
1300 
1301 			/* Overlapping mappings */
1302 			if (!ops->map_pages(ops, iova, iova + size, size, 1,
1303 					    IOMMU_READ | IOMMU_NOEXEC,
1304 					    GFP_KERNEL, &mapped))
1305 				return __FAIL(ops, i);
1306 
1307 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1308 				return __FAIL(ops, i);
1309 
1310 			iova += SZ_1G;
1311 		}
1312 
1313 		/* Full unmap */
1314 		iova = 0;
1315 		for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1316 			size = 1UL << j;
1317 
1318 			if (ops->unmap_pages(ops, iova, size, 1, NULL) != size)
1319 				return __FAIL(ops, i);
1320 
1321 			if (ops->iova_to_phys(ops, iova + 42))
1322 				return __FAIL(ops, i);
1323 
1324 			/* Remap full block */
1325 			if (ops->map_pages(ops, iova, iova, size, 1,
1326 					   IOMMU_WRITE, GFP_KERNEL, &mapped))
1327 				return __FAIL(ops, i);
1328 
1329 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1330 				return __FAIL(ops, i);
1331 
1332 			iova += SZ_1G;
1333 		}
1334 
1335 		/*
1336 		 * Map/unmap the last largest supported page of the IAS, this can
1337 		 * trigger corner cases in the concatednated page tables.
1338 		 */
1339 		mapped = 0;
1340 		size = 1UL << __fls(cfg->pgsize_bitmap);
1341 		iova = (1UL << cfg->ias) - size;
1342 		if (ops->map_pages(ops, iova, iova, size, 1,
1343 				   IOMMU_READ | IOMMU_WRITE |
1344 				   IOMMU_NOEXEC | IOMMU_CACHE,
1345 				   GFP_KERNEL, &mapped))
1346 			return __FAIL(ops, i);
1347 		if (mapped != size)
1348 			return __FAIL(ops, i);
1349 		if (ops->unmap_pages(ops, iova, size, 1, NULL) != size)
1350 			return __FAIL(ops, i);
1351 
1352 		free_io_pgtable_ops(ops);
1353 	}
1354 
1355 	selftest_running = false;
1356 	return 0;
1357 }
1358 
1359 static int __init arm_lpae_do_selftests(void)
1360 {
1361 	static const unsigned long pgsize[] __initconst = {
1362 		SZ_4K | SZ_2M | SZ_1G,
1363 		SZ_16K | SZ_32M,
1364 		SZ_64K | SZ_512M,
1365 	};
1366 
1367 	static const unsigned int ias[] __initconst = {
1368 		32, 36, 40, 42, 44, 48,
1369 	};
1370 
1371 	int i, j, pass = 0, fail = 0;
1372 	struct device dev;
1373 	struct io_pgtable_cfg cfg = {
1374 		.tlb = &dummy_tlb_ops,
1375 		.oas = 48,
1376 		.coherent_walk = true,
1377 		.iommu_dev = &dev,
1378 	};
1379 
1380 	/* __arm_lpae_alloc_pages() merely needs dev_to_node() to work */
1381 	set_dev_node(&dev, NUMA_NO_NODE);
1382 
1383 	for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1384 		for (j = 0; j < ARRAY_SIZE(ias); ++j) {
1385 			cfg.pgsize_bitmap = pgsize[i];
1386 			cfg.ias = ias[j];
1387 			pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n",
1388 				pgsize[i], ias[j]);
1389 			if (arm_lpae_run_tests(&cfg))
1390 				fail++;
1391 			else
1392 				pass++;
1393 		}
1394 	}
1395 
1396 	pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1397 	return fail ? -EFAULT : 0;
1398 }
1399 subsys_initcall(arm_lpae_do_selftests);
1400 #endif
1401