xref: /linux/drivers/iommu/io-pgtable-arm.c (revision 53564f400572b1b8d9ee5bafb9c226eb1d38600a)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * CPU-agnostic ARM page table allocator.
4  *
5  * Copyright (C) 2014 ARM Limited
6  *
7  * Author: Will Deacon <will.deacon@arm.com>
8  */
9 
10 #define pr_fmt(fmt)	"arm-lpae io-pgtable: " fmt
11 
12 #include <linux/atomic.h>
13 #include <linux/bitops.h>
14 #include <linux/io-pgtable.h>
15 #include <linux/kernel.h>
16 #include <linux/device/faux.h>
17 #include <linux/sizes.h>
18 #include <linux/slab.h>
19 #include <linux/types.h>
20 #include <linux/dma-mapping.h>
21 
22 #include <asm/barrier.h>
23 
24 #include "io-pgtable-arm.h"
25 #include "iommu-pages.h"
26 
27 #define ARM_LPAE_MAX_ADDR_BITS		52
28 #define ARM_LPAE_S2_MAX_CONCAT_PAGES	16
29 #define ARM_LPAE_MAX_LEVELS		4
30 
31 /* Struct accessors */
32 #define io_pgtable_to_data(x)						\
33 	container_of((x), struct arm_lpae_io_pgtable, iop)
34 
35 #define io_pgtable_ops_to_data(x)					\
36 	io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
37 
38 /*
39  * Calculate the right shift amount to get to the portion describing level l
40  * in a virtual address mapped by the pagetable in d.
41  */
42 #define ARM_LPAE_LVL_SHIFT(l,d)						\
43 	(((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) +		\
44 	ilog2(sizeof(arm_lpae_iopte)))
45 
46 #define ARM_LPAE_GRANULE(d)						\
47 	(sizeof(arm_lpae_iopte) << (d)->bits_per_level)
48 #define ARM_LPAE_PGD_SIZE(d)						\
49 	(sizeof(arm_lpae_iopte) << (d)->pgd_bits)
50 
51 #define ARM_LPAE_PTES_PER_TABLE(d)					\
52 	(ARM_LPAE_GRANULE(d) >> ilog2(sizeof(arm_lpae_iopte)))
53 
54 /*
55  * Calculate the index at level l used to map virtual address a using the
56  * pagetable in d.
57  */
58 #define ARM_LPAE_PGD_IDX(l,d)						\
59 	((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0)
60 
61 #define ARM_LPAE_LVL_IDX(a,l,d)						\
62 	(((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) &			\
63 	 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
64 
65 /* Calculate the block/page mapping size at level l for pagetable in d. */
66 #define ARM_LPAE_BLOCK_SIZE(l,d)	(1ULL << ARM_LPAE_LVL_SHIFT(l,d))
67 
68 /* Page table bits */
69 #define ARM_LPAE_PTE_TYPE_SHIFT		0
70 #define ARM_LPAE_PTE_TYPE_MASK		0x3
71 
72 #define ARM_LPAE_PTE_TYPE_BLOCK		1
73 #define ARM_LPAE_PTE_TYPE_TABLE		3
74 #define ARM_LPAE_PTE_TYPE_PAGE		3
75 
76 #define ARM_LPAE_PTE_ADDR_MASK		GENMASK_ULL(47,12)
77 
78 #define ARM_LPAE_PTE_NSTABLE		(((arm_lpae_iopte)1) << 63)
79 #define ARM_LPAE_PTE_XN			(((arm_lpae_iopte)3) << 53)
80 #define ARM_LPAE_PTE_DBM		(((arm_lpae_iopte)1) << 51)
81 #define ARM_LPAE_PTE_AF			(((arm_lpae_iopte)1) << 10)
82 #define ARM_LPAE_PTE_SH_NS		(((arm_lpae_iopte)0) << 8)
83 #define ARM_LPAE_PTE_SH_OS		(((arm_lpae_iopte)2) << 8)
84 #define ARM_LPAE_PTE_SH_IS		(((arm_lpae_iopte)3) << 8)
85 #define ARM_LPAE_PTE_NS			(((arm_lpae_iopte)1) << 5)
86 #define ARM_LPAE_PTE_VALID		(((arm_lpae_iopte)1) << 0)
87 
88 /* Software bit for solving coherency races */
89 #define ARM_LPAE_PTE_SW_SYNC		(((arm_lpae_iopte)1) << 55)
90 
91 /* Stage-1 PTE */
92 #define ARM_LPAE_PTE_AP_UNPRIV		(((arm_lpae_iopte)1) << 6)
93 #define ARM_LPAE_PTE_AP_RDONLY_BIT	7
94 #define ARM_LPAE_PTE_AP_RDONLY		(((arm_lpae_iopte)1) << \
95 					   ARM_LPAE_PTE_AP_RDONLY_BIT)
96 #define ARM_LPAE_PTE_AP_WR_CLEAN_MASK	(ARM_LPAE_PTE_AP_RDONLY | \
97 					 ARM_LPAE_PTE_DBM)
98 #define ARM_LPAE_PTE_ATTRINDX_SHIFT	2
99 #define ARM_LPAE_PTE_nG			(((arm_lpae_iopte)1) << 11)
100 
101 /* Stage-2 PTE */
102 #define ARM_LPAE_PTE_HAP_FAULT		(((arm_lpae_iopte)0) << 6)
103 #define ARM_LPAE_PTE_HAP_READ		(((arm_lpae_iopte)1) << 6)
104 #define ARM_LPAE_PTE_HAP_WRITE		(((arm_lpae_iopte)2) << 6)
105 /*
106  * For !FWB these code to:
107  *  1111 = Normal outer write back cachable / Inner Write Back Cachable
108  *         Permit S1 to override
109  *  0101 = Normal Non-cachable / Inner Non-cachable
110  *  0001 = Device / Device-nGnRE
111  * For S2FWB these code:
112  *  0110 Force Normal Write Back
113  *  0101 Normal* is forced Normal-NC, Device unchanged
114  *  0001 Force Device-nGnRE
115  */
116 #define ARM_LPAE_PTE_MEMATTR_FWB_WB	(((arm_lpae_iopte)0x6) << 2)
117 #define ARM_LPAE_PTE_MEMATTR_OIWB	(((arm_lpae_iopte)0xf) << 2)
118 #define ARM_LPAE_PTE_MEMATTR_NC		(((arm_lpae_iopte)0x5) << 2)
119 #define ARM_LPAE_PTE_MEMATTR_DEV	(((arm_lpae_iopte)0x1) << 2)
120 
121 /* Register bits */
122 #define ARM_LPAE_VTCR_SL0_MASK		0x3
123 
124 #define ARM_LPAE_TCR_T0SZ_SHIFT		0
125 
126 #define ARM_LPAE_VTCR_PS_SHIFT		16
127 #define ARM_LPAE_VTCR_PS_MASK		0x7
128 
129 #define ARM_LPAE_MAIR_ATTR_SHIFT(n)	((n) << 3)
130 #define ARM_LPAE_MAIR_ATTR_MASK		0xff
131 #define ARM_LPAE_MAIR_ATTR_DEVICE	0x04
132 #define ARM_LPAE_MAIR_ATTR_NC		0x44
133 #define ARM_LPAE_MAIR_ATTR_INC_OWBRWA	0xf4
134 #define ARM_LPAE_MAIR_ATTR_WBRWA	0xff
135 #define ARM_LPAE_MAIR_ATTR_IDX_NC	0
136 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE	1
137 #define ARM_LPAE_MAIR_ATTR_IDX_DEV	2
138 #define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE	3
139 
140 #define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0)
141 #define ARM_MALI_LPAE_TTBR_READ_INNER	BIT(2)
142 #define ARM_MALI_LPAE_TTBR_SHARE_OUTER	BIT(4)
143 
144 #define ARM_MALI_LPAE_MEMATTR_IMP_DEF	0x88ULL
145 #define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
146 
147 /* IOPTE accessors */
148 #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
149 
150 #define iopte_type(pte)					\
151 	(((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
152 
153 #define iopte_writeable_dirty(pte)				\
154 	(((pte) & ARM_LPAE_PTE_AP_WR_CLEAN_MASK) == ARM_LPAE_PTE_DBM)
155 
156 #define iopte_set_writeable_clean(ptep)				\
157 	set_bit(ARM_LPAE_PTE_AP_RDONLY_BIT, (unsigned long *)(ptep))
158 
159 struct arm_lpae_io_pgtable {
160 	struct io_pgtable	iop;
161 
162 	int			pgd_bits;
163 	int			start_level;
164 	int			bits_per_level;
165 
166 	void			*pgd;
167 };
168 
169 typedef u64 arm_lpae_iopte;
170 
iopte_leaf(arm_lpae_iopte pte,int lvl,enum io_pgtable_fmt fmt)171 static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
172 			      enum io_pgtable_fmt fmt)
173 {
174 	if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE)
175 		return iopte_type(pte) == ARM_LPAE_PTE_TYPE_PAGE;
176 
177 	return iopte_type(pte) == ARM_LPAE_PTE_TYPE_BLOCK;
178 }
179 
iopte_table(arm_lpae_iopte pte,int lvl)180 static inline bool iopte_table(arm_lpae_iopte pte, int lvl)
181 {
182 	if (lvl == (ARM_LPAE_MAX_LEVELS - 1))
183 		return false;
184 	return iopte_type(pte) == ARM_LPAE_PTE_TYPE_TABLE;
185 }
186 
paddr_to_iopte(phys_addr_t paddr,struct arm_lpae_io_pgtable * data)187 static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
188 				     struct arm_lpae_io_pgtable *data)
189 {
190 	arm_lpae_iopte pte = paddr;
191 
192 	/* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */
193 	return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK;
194 }
195 
iopte_to_paddr(arm_lpae_iopte pte,struct arm_lpae_io_pgtable * data)196 static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
197 				  struct arm_lpae_io_pgtable *data)
198 {
199 	u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
200 
201 	if (ARM_LPAE_GRANULE(data) < SZ_64K)
202 		return paddr;
203 
204 	/* Rotate the packed high-order bits back to the top */
205 	return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
206 }
207 
208 /*
209  * Convert an index returned by ARM_LPAE_PGD_IDX(), which can point into
210  * a concatenated PGD, into the maximum number of entries that can be
211  * mapped in the same table page.
212  */
arm_lpae_max_entries(int i,struct arm_lpae_io_pgtable * data)213 static inline int arm_lpae_max_entries(int i, struct arm_lpae_io_pgtable *data)
214 {
215 	int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data);
216 
217 	return ptes_per_table - (i & (ptes_per_table - 1));
218 }
219 
220 /*
221  * Check if concatenated PGDs are mandatory according to Arm DDI0487 (K.a)
222  * 1) R_DXBSH: For 16KB, and 48-bit input size, use level 1 instead of 0.
223  * 2) R_SRKBC: After de-ciphering the table for PA size and valid initial lookup
224  *   a) 40 bits PA size with 4K: use level 1 instead of level 0 (2 tables for ias = oas)
225  *   b) 40 bits PA size with 16K: use level 2 instead of level 1 (16 tables for ias = oas)
226  *   c) 42 bits PA size with 4K: use level 1 instead of level 0 (8 tables for ias = oas)
227  *   d) 48 bits PA size with 16K: use level 1 instead of level 0 (2 tables for ias = oas)
228  */
arm_lpae_concat_mandatory(struct io_pgtable_cfg * cfg,struct arm_lpae_io_pgtable * data)229 static inline bool arm_lpae_concat_mandatory(struct io_pgtable_cfg *cfg,
230 					     struct arm_lpae_io_pgtable *data)
231 {
232 	unsigned int ias = cfg->ias;
233 	unsigned int oas = cfg->oas;
234 
235 	/* Covers 1 and 2.d */
236 	if ((ARM_LPAE_GRANULE(data) == SZ_16K) && (data->start_level == 0))
237 		return (oas == 48) || (ias == 48);
238 
239 	/* Covers 2.a and 2.c */
240 	if ((ARM_LPAE_GRANULE(data) == SZ_4K) && (data->start_level == 0))
241 		return (oas == 40) || (oas == 42);
242 
243 	/* Case 2.b */
244 	return (ARM_LPAE_GRANULE(data) == SZ_16K) &&
245 	       (data->start_level == 1) && (oas == 40);
246 }
247 
__arm_lpae_dma_addr(void * pages)248 static dma_addr_t __arm_lpae_dma_addr(void *pages)
249 {
250 	return (dma_addr_t)virt_to_phys(pages);
251 }
252 
__arm_lpae_alloc_pages(size_t size,gfp_t gfp,struct io_pgtable_cfg * cfg,void * cookie)253 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
254 				    struct io_pgtable_cfg *cfg,
255 				    void *cookie)
256 {
257 	struct device *dev = cfg->iommu_dev;
258 	size_t alloc_size;
259 	dma_addr_t dma;
260 	void *pages;
261 
262 	/*
263 	 * For very small starting-level translation tables the HW requires a
264 	 * minimum alignment of at least 64 to cover all cases.
265 	 */
266 	alloc_size = max(size, 64);
267 	if (cfg->alloc)
268 		pages = cfg->alloc(cookie, alloc_size, gfp);
269 	else
270 		pages = iommu_alloc_pages_node_sz(dev_to_node(dev), gfp,
271 						  alloc_size);
272 
273 	if (!pages)
274 		return NULL;
275 
276 	if (!cfg->coherent_walk) {
277 		dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
278 		if (dma_mapping_error(dev, dma))
279 			goto out_free;
280 		/*
281 		 * We depend on the IOMMU being able to work with any physical
282 		 * address directly, so if the DMA layer suggests otherwise by
283 		 * translating or truncating them, that bodes very badly...
284 		 */
285 		if (dma != virt_to_phys(pages))
286 			goto out_unmap;
287 	}
288 
289 	return pages;
290 
291 out_unmap:
292 	dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
293 	dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
294 
295 out_free:
296 	if (cfg->free)
297 		cfg->free(cookie, pages, size);
298 	else
299 		iommu_free_pages(pages);
300 
301 	return NULL;
302 }
303 
__arm_lpae_free_pages(void * pages,size_t size,struct io_pgtable_cfg * cfg,void * cookie)304 static void __arm_lpae_free_pages(void *pages, size_t size,
305 				  struct io_pgtable_cfg *cfg,
306 				  void *cookie)
307 {
308 	if (!cfg->coherent_walk)
309 		dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
310 				 size, DMA_TO_DEVICE);
311 
312 	if (cfg->free)
313 		cfg->free(cookie, pages, size);
314 	else
315 		iommu_free_pages(pages);
316 }
317 
__arm_lpae_sync_pte(arm_lpae_iopte * ptep,int num_entries,struct io_pgtable_cfg * cfg)318 static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
319 				struct io_pgtable_cfg *cfg)
320 {
321 	dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep),
322 				   sizeof(*ptep) * num_entries, DMA_TO_DEVICE);
323 }
324 
__arm_lpae_clear_pte(arm_lpae_iopte * ptep,struct io_pgtable_cfg * cfg,int num_entries)325 static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg, int num_entries)
326 {
327 	for (int i = 0; i < num_entries; i++)
328 		ptep[i] = 0;
329 
330 	if (!cfg->coherent_walk && num_entries)
331 		__arm_lpae_sync_pte(ptep, num_entries, cfg);
332 }
333 
334 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
335 			       struct iommu_iotlb_gather *gather,
336 			       unsigned long iova, size_t size, size_t pgcount,
337 			       int lvl, arm_lpae_iopte *ptep);
338 
__arm_lpae_init_pte(struct arm_lpae_io_pgtable * data,phys_addr_t paddr,arm_lpae_iopte prot,int lvl,int num_entries,arm_lpae_iopte * ptep)339 static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
340 				phys_addr_t paddr, arm_lpae_iopte prot,
341 				int lvl, int num_entries, arm_lpae_iopte *ptep)
342 {
343 	arm_lpae_iopte pte = prot;
344 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
345 	size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
346 	int i;
347 
348 	if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1)
349 		pte |= ARM_LPAE_PTE_TYPE_PAGE;
350 	else
351 		pte |= ARM_LPAE_PTE_TYPE_BLOCK;
352 
353 	for (i = 0; i < num_entries; i++)
354 		ptep[i] = pte | paddr_to_iopte(paddr + i * sz, data);
355 
356 	if (!cfg->coherent_walk)
357 		__arm_lpae_sync_pte(ptep, num_entries, cfg);
358 }
359 
arm_lpae_init_pte(struct arm_lpae_io_pgtable * data,unsigned long iova,phys_addr_t paddr,arm_lpae_iopte prot,int lvl,int num_entries,arm_lpae_iopte * ptep)360 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
361 			     unsigned long iova, phys_addr_t paddr,
362 			     arm_lpae_iopte prot, int lvl, int num_entries,
363 			     arm_lpae_iopte *ptep)
364 {
365 	int i;
366 
367 	for (i = 0; i < num_entries; i++)
368 		if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) {
369 			/* We require an unmap first */
370 			WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN));
371 			return -EEXIST;
372 		} else if (iopte_type(ptep[i]) == ARM_LPAE_PTE_TYPE_TABLE) {
373 			/*
374 			 * We need to unmap and free the old table before
375 			 * overwriting it with a block entry.
376 			 */
377 			arm_lpae_iopte *tblp;
378 			size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
379 
380 			tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
381 			if (__arm_lpae_unmap(data, NULL, iova + i * sz, sz, 1,
382 					     lvl, tblp) != sz) {
383 				WARN_ON(1);
384 				return -EINVAL;
385 			}
386 		}
387 
388 	__arm_lpae_init_pte(data, paddr, prot, lvl, num_entries, ptep);
389 	return 0;
390 }
391 
arm_lpae_install_table(arm_lpae_iopte * table,arm_lpae_iopte * ptep,arm_lpae_iopte curr,struct arm_lpae_io_pgtable * data)392 static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
393 					     arm_lpae_iopte *ptep,
394 					     arm_lpae_iopte curr,
395 					     struct arm_lpae_io_pgtable *data)
396 {
397 	arm_lpae_iopte old, new;
398 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
399 
400 	new = paddr_to_iopte(__pa(table), data) | ARM_LPAE_PTE_TYPE_TABLE;
401 	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
402 		new |= ARM_LPAE_PTE_NSTABLE;
403 
404 	/*
405 	 * Ensure the table itself is visible before its PTE can be.
406 	 * Whilst we could get away with cmpxchg64_release below, this
407 	 * doesn't have any ordering semantics when !CONFIG_SMP.
408 	 */
409 	dma_wmb();
410 
411 	old = cmpxchg64_relaxed(ptep, curr, new);
412 
413 	if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC))
414 		return old;
415 
416 	/* Even if it's not ours, there's no point waiting; just kick it */
417 	__arm_lpae_sync_pte(ptep, 1, cfg);
418 	if (old == curr)
419 		WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC);
420 
421 	return old;
422 }
423 
__arm_lpae_map(struct arm_lpae_io_pgtable * data,unsigned long iova,phys_addr_t paddr,size_t size,size_t pgcount,arm_lpae_iopte prot,int lvl,arm_lpae_iopte * ptep,gfp_t gfp,size_t * mapped)424 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
425 			  phys_addr_t paddr, size_t size, size_t pgcount,
426 			  arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep,
427 			  gfp_t gfp, size_t *mapped)
428 {
429 	arm_lpae_iopte *cptep, pte;
430 	size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
431 	size_t tblsz = ARM_LPAE_GRANULE(data);
432 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
433 	int ret = 0, num_entries, max_entries, map_idx_start;
434 
435 	/* Find our entry at the current level */
436 	map_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
437 	ptep += map_idx_start;
438 
439 	/* If we can install a leaf entry at this level, then do so */
440 	if (size == block_size) {
441 		max_entries = arm_lpae_max_entries(map_idx_start, data);
442 		num_entries = min_t(int, pgcount, max_entries);
443 		ret = arm_lpae_init_pte(data, iova, paddr, prot, lvl, num_entries, ptep);
444 		if (!ret)
445 			*mapped += num_entries * size;
446 
447 		return ret;
448 	}
449 
450 	/* We can't allocate tables at the final level */
451 	if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
452 		return -EINVAL;
453 
454 	/* Grab a pointer to the next level */
455 	pte = READ_ONCE(*ptep);
456 	if (!pte) {
457 		cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg, data->iop.cookie);
458 		if (!cptep)
459 			return -ENOMEM;
460 
461 		pte = arm_lpae_install_table(cptep, ptep, 0, data);
462 		if (pte)
463 			__arm_lpae_free_pages(cptep, tblsz, cfg, data->iop.cookie);
464 	} else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
465 		__arm_lpae_sync_pte(ptep, 1, cfg);
466 	}
467 
468 	if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) {
469 		cptep = iopte_deref(pte, data);
470 	} else if (pte) {
471 		/* We require an unmap first */
472 		WARN_ON(!(cfg->quirks & IO_PGTABLE_QUIRK_NO_WARN));
473 		return -EEXIST;
474 	}
475 
476 	/* Rinse, repeat */
477 	return __arm_lpae_map(data, iova, paddr, size, pgcount, prot, lvl + 1,
478 			      cptep, gfp, mapped);
479 }
480 
arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable * data,int prot)481 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
482 					   int prot)
483 {
484 	arm_lpae_iopte pte;
485 
486 	if (data->iop.fmt == ARM_64_LPAE_S1 ||
487 	    data->iop.fmt == ARM_32_LPAE_S1) {
488 		pte = ARM_LPAE_PTE_nG;
489 		if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
490 			pte |= ARM_LPAE_PTE_AP_RDONLY;
491 		else if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_HD)
492 			pte |= ARM_LPAE_PTE_DBM;
493 		if (!(prot & IOMMU_PRIV))
494 			pte |= ARM_LPAE_PTE_AP_UNPRIV;
495 	} else {
496 		pte = ARM_LPAE_PTE_HAP_FAULT;
497 		if (prot & IOMMU_READ)
498 			pte |= ARM_LPAE_PTE_HAP_READ;
499 		if (prot & IOMMU_WRITE)
500 			pte |= ARM_LPAE_PTE_HAP_WRITE;
501 	}
502 
503 	/*
504 	 * Note that this logic is structured to accommodate Mali LPAE
505 	 * having stage-1-like attributes but stage-2-like permissions.
506 	 */
507 	if (data->iop.fmt == ARM_64_LPAE_S2 ||
508 	    data->iop.fmt == ARM_32_LPAE_S2) {
509 		if (prot & IOMMU_MMIO) {
510 			pte |= ARM_LPAE_PTE_MEMATTR_DEV;
511 		} else if (prot & IOMMU_CACHE) {
512 			if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_S2FWB)
513 				pte |= ARM_LPAE_PTE_MEMATTR_FWB_WB;
514 			else
515 				pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
516 		} else {
517 			pte |= ARM_LPAE_PTE_MEMATTR_NC;
518 		}
519 	} else {
520 		if (prot & IOMMU_MMIO)
521 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
522 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
523 		else if (prot & IOMMU_CACHE)
524 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
525 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
526 	}
527 
528 	/*
529 	 * Also Mali has its own notions of shareability wherein its Inner
530 	 * domain covers the cores within the GPU, and its Outer domain is
531 	 * "outside the GPU" (i.e. either the Inner or System domain in CPU
532 	 * terms, depending on coherency).
533 	 */
534 	if (prot & IOMMU_CACHE && data->iop.fmt != ARM_MALI_LPAE)
535 		pte |= ARM_LPAE_PTE_SH_IS;
536 	else
537 		pte |= ARM_LPAE_PTE_SH_OS;
538 
539 	if (prot & IOMMU_NOEXEC)
540 		pte |= ARM_LPAE_PTE_XN;
541 
542 	if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
543 		pte |= ARM_LPAE_PTE_NS;
544 
545 	if (data->iop.fmt != ARM_MALI_LPAE)
546 		pte |= ARM_LPAE_PTE_AF;
547 
548 	return pte;
549 }
550 
arm_lpae_map_pages(struct io_pgtable_ops * ops,unsigned long iova,phys_addr_t paddr,size_t pgsize,size_t pgcount,int iommu_prot,gfp_t gfp,size_t * mapped)551 static int arm_lpae_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
552 			      phys_addr_t paddr, size_t pgsize, size_t pgcount,
553 			      int iommu_prot, gfp_t gfp, size_t *mapped)
554 {
555 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
556 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
557 	arm_lpae_iopte *ptep = data->pgd;
558 	int ret, lvl = data->start_level;
559 	arm_lpae_iopte prot;
560 	long iaext = (s64)iova >> cfg->ias;
561 
562 	if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize))
563 		return -EINVAL;
564 
565 	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
566 		iaext = ~iaext;
567 	if (WARN_ON(iaext || paddr >> cfg->oas))
568 		return -ERANGE;
569 
570 	if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
571 		return -EINVAL;
572 
573 	prot = arm_lpae_prot_to_pte(data, iommu_prot);
574 	ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl,
575 			     ptep, gfp, mapped);
576 	/*
577 	 * Synchronise all PTE updates for the new mapping before there's
578 	 * a chance for anything to kick off a table walk for the new iova.
579 	 */
580 	wmb();
581 
582 	return ret;
583 }
584 
__arm_lpae_free_pgtable(struct arm_lpae_io_pgtable * data,int lvl,arm_lpae_iopte * ptep)585 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
586 				    arm_lpae_iopte *ptep)
587 {
588 	arm_lpae_iopte *start, *end;
589 	unsigned long table_size;
590 
591 	if (lvl == data->start_level)
592 		table_size = ARM_LPAE_PGD_SIZE(data);
593 	else
594 		table_size = ARM_LPAE_GRANULE(data);
595 
596 	start = ptep;
597 
598 	/* Only leaf entries at the last level */
599 	if (lvl == ARM_LPAE_MAX_LEVELS - 1)
600 		end = ptep;
601 	else
602 		end = (void *)ptep + table_size;
603 
604 	while (ptep != end) {
605 		arm_lpae_iopte pte = *ptep++;
606 
607 		if (!pte || iopte_leaf(pte, lvl, data->iop.fmt))
608 			continue;
609 
610 		__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
611 	}
612 
613 	__arm_lpae_free_pages(start, table_size, &data->iop.cfg, data->iop.cookie);
614 }
615 
arm_lpae_free_pgtable(struct io_pgtable * iop)616 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
617 {
618 	struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
619 
620 	__arm_lpae_free_pgtable(data, data->start_level, data->pgd);
621 	kfree(data);
622 }
623 
__arm_lpae_unmap(struct arm_lpae_io_pgtable * data,struct iommu_iotlb_gather * gather,unsigned long iova,size_t size,size_t pgcount,int lvl,arm_lpae_iopte * ptep)624 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
625 			       struct iommu_iotlb_gather *gather,
626 			       unsigned long iova, size_t size, size_t pgcount,
627 			       int lvl, arm_lpae_iopte *ptep)
628 {
629 	arm_lpae_iopte pte;
630 	struct io_pgtable *iop = &data->iop;
631 	int i = 0, num_entries, max_entries, unmap_idx_start;
632 
633 	/* Something went horribly wrong and we ran out of page table */
634 	if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
635 		return 0;
636 
637 	unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
638 	ptep += unmap_idx_start;
639 	pte = READ_ONCE(*ptep);
640 	if (!pte) {
641 		WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN));
642 		return -ENOENT;
643 	}
644 
645 	/* If the size matches this level, we're in the right place */
646 	if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
647 		max_entries = arm_lpae_max_entries(unmap_idx_start, data);
648 		num_entries = min_t(int, pgcount, max_entries);
649 
650 		/* Find and handle non-leaf entries */
651 		for (i = 0; i < num_entries; i++) {
652 			pte = READ_ONCE(ptep[i]);
653 			if (!pte) {
654 				WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN));
655 				break;
656 			}
657 
658 			if (!iopte_leaf(pte, lvl, iop->fmt)) {
659 				__arm_lpae_clear_pte(&ptep[i], &iop->cfg, 1);
660 
661 				/* Also flush any partial walks */
662 				io_pgtable_tlb_flush_walk(iop, iova + i * size, size,
663 							  ARM_LPAE_GRANULE(data));
664 				__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
665 			}
666 		}
667 
668 		/* Clear the remaining entries */
669 		__arm_lpae_clear_pte(ptep, &iop->cfg, i);
670 
671 		if (gather && !iommu_iotlb_gather_queued(gather))
672 			for (int j = 0; j < i; j++)
673 				io_pgtable_tlb_add_page(iop, gather, iova + j * size, size);
674 
675 		return i * size;
676 	} else if (iopte_leaf(pte, lvl, iop->fmt)) {
677 		WARN_ONCE(true, "Unmap of a partial large IOPTE is not allowed");
678 		return 0;
679 	}
680 
681 	/* Keep on walkin' */
682 	ptep = iopte_deref(pte, data);
683 	return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl + 1, ptep);
684 }
685 
arm_lpae_unmap_pages(struct io_pgtable_ops * ops,unsigned long iova,size_t pgsize,size_t pgcount,struct iommu_iotlb_gather * gather)686 static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova,
687 				   size_t pgsize, size_t pgcount,
688 				   struct iommu_iotlb_gather *gather)
689 {
690 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
691 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
692 	arm_lpae_iopte *ptep = data->pgd;
693 	long iaext = (s64)iova >> cfg->ias;
694 
695 	if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount))
696 		return 0;
697 
698 	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
699 		iaext = ~iaext;
700 	if (WARN_ON(iaext))
701 		return 0;
702 
703 	return __arm_lpae_unmap(data, gather, iova, pgsize, pgcount,
704 				data->start_level, ptep);
705 }
706 
707 struct io_pgtable_walk_data {
708 	struct io_pgtable		*iop;
709 	void				*data;
710 	int (*visit)(struct io_pgtable_walk_data *walk_data, int lvl,
711 		     arm_lpae_iopte *ptep, size_t size);
712 	unsigned long			flags;
713 	u64				addr;
714 	const u64			end;
715 };
716 
717 static int __arm_lpae_iopte_walk(struct arm_lpae_io_pgtable *data,
718 				 struct io_pgtable_walk_data *walk_data,
719 				 arm_lpae_iopte *ptep,
720 				 int lvl);
721 
722 struct iova_to_phys_data {
723 	arm_lpae_iopte pte;
724 	int lvl;
725 };
726 
visit_iova_to_phys(struct io_pgtable_walk_data * walk_data,int lvl,arm_lpae_iopte * ptep,size_t size)727 static int visit_iova_to_phys(struct io_pgtable_walk_data *walk_data, int lvl,
728 			      arm_lpae_iopte *ptep, size_t size)
729 {
730 	struct iova_to_phys_data *data = walk_data->data;
731 	data->pte = *ptep;
732 	data->lvl = lvl;
733 	return 0;
734 }
735 
arm_lpae_iova_to_phys(struct io_pgtable_ops * ops,unsigned long iova)736 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
737 					 unsigned long iova)
738 {
739 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
740 	struct iova_to_phys_data d;
741 	struct io_pgtable_walk_data walk_data = {
742 		.data = &d,
743 		.visit = visit_iova_to_phys,
744 		.addr = iova,
745 		.end = iova + 1,
746 	};
747 	int ret;
748 
749 	ret = __arm_lpae_iopte_walk(data, &walk_data, data->pgd, data->start_level);
750 	if (ret)
751 		return 0;
752 
753 	iova &= (ARM_LPAE_BLOCK_SIZE(d.lvl, data) - 1);
754 	return iopte_to_paddr(d.pte, data) | iova;
755 }
756 
visit_pgtable_walk(struct io_pgtable_walk_data * walk_data,int lvl,arm_lpae_iopte * ptep,size_t size)757 static int visit_pgtable_walk(struct io_pgtable_walk_data *walk_data, int lvl,
758 			      arm_lpae_iopte *ptep, size_t size)
759 {
760 	struct arm_lpae_io_pgtable_walk_data *data = walk_data->data;
761 	data->ptes[lvl] = *ptep;
762 	return 0;
763 }
764 
arm_lpae_pgtable_walk(struct io_pgtable_ops * ops,unsigned long iova,void * wd)765 static int arm_lpae_pgtable_walk(struct io_pgtable_ops *ops, unsigned long iova,
766 				 void *wd)
767 {
768 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
769 	struct io_pgtable_walk_data walk_data = {
770 		.data = wd,
771 		.visit = visit_pgtable_walk,
772 		.addr = iova,
773 		.end = iova + 1,
774 	};
775 
776 	return __arm_lpae_iopte_walk(data, &walk_data, data->pgd, data->start_level);
777 }
778 
io_pgtable_visit(struct arm_lpae_io_pgtable * data,struct io_pgtable_walk_data * walk_data,arm_lpae_iopte * ptep,int lvl)779 static int io_pgtable_visit(struct arm_lpae_io_pgtable *data,
780 			    struct io_pgtable_walk_data *walk_data,
781 			    arm_lpae_iopte *ptep, int lvl)
782 {
783 	struct io_pgtable *iop = &data->iop;
784 	arm_lpae_iopte pte = READ_ONCE(*ptep);
785 
786 	size_t size = ARM_LPAE_BLOCK_SIZE(lvl, data);
787 	int ret = walk_data->visit(walk_data, lvl, ptep, size);
788 	if (ret)
789 		return ret;
790 
791 	if (iopte_leaf(pte, lvl, iop->fmt)) {
792 		walk_data->addr += size;
793 		return 0;
794 	}
795 
796 	if (!iopte_table(pte, lvl)) {
797 		return -EINVAL;
798 	}
799 
800 	ptep = iopte_deref(pte, data);
801 	return __arm_lpae_iopte_walk(data, walk_data, ptep, lvl + 1);
802 }
803 
__arm_lpae_iopte_walk(struct arm_lpae_io_pgtable * data,struct io_pgtable_walk_data * walk_data,arm_lpae_iopte * ptep,int lvl)804 static int __arm_lpae_iopte_walk(struct arm_lpae_io_pgtable *data,
805 				 struct io_pgtable_walk_data *walk_data,
806 				 arm_lpae_iopte *ptep,
807 				 int lvl)
808 {
809 	u32 idx;
810 	int max_entries, ret;
811 
812 	if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
813 		return -EINVAL;
814 
815 	if (lvl == data->start_level)
816 		max_entries = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
817 	else
818 		max_entries = ARM_LPAE_PTES_PER_TABLE(data);
819 
820 	for (idx = ARM_LPAE_LVL_IDX(walk_data->addr, lvl, data);
821 	     (idx < max_entries) && (walk_data->addr < walk_data->end); ++idx) {
822 		ret = io_pgtable_visit(data, walk_data, ptep + idx, lvl);
823 		if (ret)
824 			return ret;
825 	}
826 
827 	return 0;
828 }
829 
visit_dirty(struct io_pgtable_walk_data * walk_data,int lvl,arm_lpae_iopte * ptep,size_t size)830 static int visit_dirty(struct io_pgtable_walk_data *walk_data, int lvl,
831 		       arm_lpae_iopte *ptep, size_t size)
832 {
833 	struct iommu_dirty_bitmap *dirty = walk_data->data;
834 
835 	if (!iopte_leaf(*ptep, lvl, walk_data->iop->fmt))
836 		return 0;
837 
838 	if (iopte_writeable_dirty(*ptep)) {
839 		iommu_dirty_bitmap_record(dirty, walk_data->addr, size);
840 		if (!(walk_data->flags & IOMMU_DIRTY_NO_CLEAR))
841 			iopte_set_writeable_clean(ptep);
842 	}
843 
844 	return 0;
845 }
846 
arm_lpae_read_and_clear_dirty(struct io_pgtable_ops * ops,unsigned long iova,size_t size,unsigned long flags,struct iommu_dirty_bitmap * dirty)847 static int arm_lpae_read_and_clear_dirty(struct io_pgtable_ops *ops,
848 					 unsigned long iova, size_t size,
849 					 unsigned long flags,
850 					 struct iommu_dirty_bitmap *dirty)
851 {
852 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
853 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
854 	struct io_pgtable_walk_data walk_data = {
855 		.iop = &data->iop,
856 		.data = dirty,
857 		.visit = visit_dirty,
858 		.flags = flags,
859 		.addr = iova,
860 		.end = iova + size,
861 	};
862 	arm_lpae_iopte *ptep = data->pgd;
863 	int lvl = data->start_level;
864 
865 	if (WARN_ON(!size))
866 		return -EINVAL;
867 	if (WARN_ON((iova + size - 1) & ~(BIT(cfg->ias) - 1)))
868 		return -EINVAL;
869 	if (data->iop.fmt != ARM_64_LPAE_S1)
870 		return -EINVAL;
871 
872 	return __arm_lpae_iopte_walk(data, &walk_data, ptep, lvl);
873 }
874 
arm_lpae_restrict_pgsizes(struct io_pgtable_cfg * cfg)875 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
876 {
877 	unsigned long granule, page_sizes;
878 	unsigned int max_addr_bits = 48;
879 
880 	/*
881 	 * We need to restrict the supported page sizes to match the
882 	 * translation regime for a particular granule. Aim to match
883 	 * the CPU page size if possible, otherwise prefer smaller sizes.
884 	 * While we're at it, restrict the block sizes to match the
885 	 * chosen granule.
886 	 */
887 	if (cfg->pgsize_bitmap & PAGE_SIZE)
888 		granule = PAGE_SIZE;
889 	else if (cfg->pgsize_bitmap & ~PAGE_MASK)
890 		granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
891 	else if (cfg->pgsize_bitmap & PAGE_MASK)
892 		granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
893 	else
894 		granule = 0;
895 
896 	switch (granule) {
897 	case SZ_4K:
898 		page_sizes = (SZ_4K | SZ_2M | SZ_1G);
899 		break;
900 	case SZ_16K:
901 		page_sizes = (SZ_16K | SZ_32M);
902 		break;
903 	case SZ_64K:
904 		max_addr_bits = 52;
905 		page_sizes = (SZ_64K | SZ_512M);
906 		if (cfg->oas > 48)
907 			page_sizes |= 1ULL << 42; /* 4TB */
908 		break;
909 	default:
910 		page_sizes = 0;
911 	}
912 
913 	cfg->pgsize_bitmap &= page_sizes;
914 	cfg->ias = min(cfg->ias, max_addr_bits);
915 	cfg->oas = min(cfg->oas, max_addr_bits);
916 }
917 
918 static struct arm_lpae_io_pgtable *
arm_lpae_alloc_pgtable(struct io_pgtable_cfg * cfg)919 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
920 {
921 	struct arm_lpae_io_pgtable *data;
922 	int levels, va_bits, pg_shift;
923 
924 	arm_lpae_restrict_pgsizes(cfg);
925 
926 	if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
927 		return NULL;
928 
929 	if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
930 		return NULL;
931 
932 	if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
933 		return NULL;
934 
935 	data = kmalloc(sizeof(*data), GFP_KERNEL);
936 	if (!data)
937 		return NULL;
938 
939 	pg_shift = __ffs(cfg->pgsize_bitmap);
940 	data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte));
941 
942 	va_bits = cfg->ias - pg_shift;
943 	levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
944 	data->start_level = ARM_LPAE_MAX_LEVELS - levels;
945 
946 	/* Calculate the actual size of our pgd (without concatenation) */
947 	data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1));
948 
949 	data->iop.ops = (struct io_pgtable_ops) {
950 		.map_pages	= arm_lpae_map_pages,
951 		.unmap_pages	= arm_lpae_unmap_pages,
952 		.iova_to_phys	= arm_lpae_iova_to_phys,
953 		.read_and_clear_dirty = arm_lpae_read_and_clear_dirty,
954 		.pgtable_walk	= arm_lpae_pgtable_walk,
955 	};
956 
957 	return data;
958 }
959 
960 static struct io_pgtable *
arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg * cfg,void * cookie)961 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
962 {
963 	u64 reg;
964 	struct arm_lpae_io_pgtable *data;
965 	typeof(&cfg->arm_lpae_s1_cfg.tcr) tcr = &cfg->arm_lpae_s1_cfg.tcr;
966 	bool tg1;
967 
968 	if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
969 			    IO_PGTABLE_QUIRK_ARM_TTBR1 |
970 			    IO_PGTABLE_QUIRK_ARM_OUTER_WBWA |
971 			    IO_PGTABLE_QUIRK_ARM_HD |
972 			    IO_PGTABLE_QUIRK_NO_WARN))
973 		return NULL;
974 
975 	data = arm_lpae_alloc_pgtable(cfg);
976 	if (!data)
977 		return NULL;
978 
979 	/* TCR */
980 	if (cfg->coherent_walk) {
981 		tcr->sh = ARM_LPAE_TCR_SH_IS;
982 		tcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
983 		tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
984 		if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA)
985 			goto out_free_data;
986 	} else {
987 		tcr->sh = ARM_LPAE_TCR_SH_OS;
988 		tcr->irgn = ARM_LPAE_TCR_RGN_NC;
989 		if (!(cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA))
990 			tcr->orgn = ARM_LPAE_TCR_RGN_NC;
991 		else
992 			tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
993 	}
994 
995 	tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1;
996 	switch (ARM_LPAE_GRANULE(data)) {
997 	case SZ_4K:
998 		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_4K : ARM_LPAE_TCR_TG0_4K;
999 		break;
1000 	case SZ_16K:
1001 		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_16K : ARM_LPAE_TCR_TG0_16K;
1002 		break;
1003 	case SZ_64K:
1004 		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_64K : ARM_LPAE_TCR_TG0_64K;
1005 		break;
1006 	}
1007 
1008 	switch (cfg->oas) {
1009 	case 32:
1010 		tcr->ips = ARM_LPAE_TCR_PS_32_BIT;
1011 		break;
1012 	case 36:
1013 		tcr->ips = ARM_LPAE_TCR_PS_36_BIT;
1014 		break;
1015 	case 40:
1016 		tcr->ips = ARM_LPAE_TCR_PS_40_BIT;
1017 		break;
1018 	case 42:
1019 		tcr->ips = ARM_LPAE_TCR_PS_42_BIT;
1020 		break;
1021 	case 44:
1022 		tcr->ips = ARM_LPAE_TCR_PS_44_BIT;
1023 		break;
1024 	case 48:
1025 		tcr->ips = ARM_LPAE_TCR_PS_48_BIT;
1026 		break;
1027 	case 52:
1028 		tcr->ips = ARM_LPAE_TCR_PS_52_BIT;
1029 		break;
1030 	default:
1031 		goto out_free_data;
1032 	}
1033 
1034 	tcr->tsz = 64ULL - cfg->ias;
1035 
1036 	/* MAIRs */
1037 	reg = (ARM_LPAE_MAIR_ATTR_NC
1038 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
1039 	      (ARM_LPAE_MAIR_ATTR_WBRWA
1040 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
1041 	      (ARM_LPAE_MAIR_ATTR_DEVICE
1042 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) |
1043 	      (ARM_LPAE_MAIR_ATTR_INC_OWBRWA
1044 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE));
1045 
1046 	cfg->arm_lpae_s1_cfg.mair = reg;
1047 
1048 	/* Looking good; allocate a pgd */
1049 	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
1050 					   GFP_KERNEL, cfg, cookie);
1051 	if (!data->pgd)
1052 		goto out_free_data;
1053 
1054 	/* Ensure the empty pgd is visible before any actual TTBR write */
1055 	wmb();
1056 
1057 	/* TTBR */
1058 	cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd);
1059 	return &data->iop;
1060 
1061 out_free_data:
1062 	kfree(data);
1063 	return NULL;
1064 }
1065 
1066 static struct io_pgtable *
arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg * cfg,void * cookie)1067 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
1068 {
1069 	u64 sl;
1070 	struct arm_lpae_io_pgtable *data;
1071 	typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr;
1072 
1073 	if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_S2FWB |
1074 			    IO_PGTABLE_QUIRK_NO_WARN))
1075 		return NULL;
1076 
1077 	data = arm_lpae_alloc_pgtable(cfg);
1078 	if (!data)
1079 		return NULL;
1080 
1081 	if (arm_lpae_concat_mandatory(cfg, data)) {
1082 		if (WARN_ON((ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte)) >
1083 			    ARM_LPAE_S2_MAX_CONCAT_PAGES))
1084 			return NULL;
1085 		data->pgd_bits += data->bits_per_level;
1086 		data->start_level++;
1087 	}
1088 
1089 	/* VTCR */
1090 	if (cfg->coherent_walk) {
1091 		vtcr->sh = ARM_LPAE_TCR_SH_IS;
1092 		vtcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
1093 		vtcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
1094 	} else {
1095 		vtcr->sh = ARM_LPAE_TCR_SH_OS;
1096 		vtcr->irgn = ARM_LPAE_TCR_RGN_NC;
1097 		vtcr->orgn = ARM_LPAE_TCR_RGN_NC;
1098 	}
1099 
1100 	sl = data->start_level;
1101 
1102 	switch (ARM_LPAE_GRANULE(data)) {
1103 	case SZ_4K:
1104 		vtcr->tg = ARM_LPAE_TCR_TG0_4K;
1105 		sl++; /* SL0 format is different for 4K granule size */
1106 		break;
1107 	case SZ_16K:
1108 		vtcr->tg = ARM_LPAE_TCR_TG0_16K;
1109 		break;
1110 	case SZ_64K:
1111 		vtcr->tg = ARM_LPAE_TCR_TG0_64K;
1112 		break;
1113 	}
1114 
1115 	switch (cfg->oas) {
1116 	case 32:
1117 		vtcr->ps = ARM_LPAE_TCR_PS_32_BIT;
1118 		break;
1119 	case 36:
1120 		vtcr->ps = ARM_LPAE_TCR_PS_36_BIT;
1121 		break;
1122 	case 40:
1123 		vtcr->ps = ARM_LPAE_TCR_PS_40_BIT;
1124 		break;
1125 	case 42:
1126 		vtcr->ps = ARM_LPAE_TCR_PS_42_BIT;
1127 		break;
1128 	case 44:
1129 		vtcr->ps = ARM_LPAE_TCR_PS_44_BIT;
1130 		break;
1131 	case 48:
1132 		vtcr->ps = ARM_LPAE_TCR_PS_48_BIT;
1133 		break;
1134 	case 52:
1135 		vtcr->ps = ARM_LPAE_TCR_PS_52_BIT;
1136 		break;
1137 	default:
1138 		goto out_free_data;
1139 	}
1140 
1141 	vtcr->tsz = 64ULL - cfg->ias;
1142 	vtcr->sl = ~sl & ARM_LPAE_VTCR_SL0_MASK;
1143 
1144 	/* Allocate pgd pages */
1145 	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
1146 					   GFP_KERNEL, cfg, cookie);
1147 	if (!data->pgd)
1148 		goto out_free_data;
1149 
1150 	/* Ensure the empty pgd is visible before any actual TTBR write */
1151 	wmb();
1152 
1153 	/* VTTBR */
1154 	cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
1155 	return &data->iop;
1156 
1157 out_free_data:
1158 	kfree(data);
1159 	return NULL;
1160 }
1161 
1162 static struct io_pgtable *
arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg * cfg,void * cookie)1163 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
1164 {
1165 	if (cfg->ias > 32 || cfg->oas > 40)
1166 		return NULL;
1167 
1168 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1169 	return arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
1170 }
1171 
1172 static struct io_pgtable *
arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg * cfg,void * cookie)1173 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
1174 {
1175 	if (cfg->ias > 40 || cfg->oas > 40)
1176 		return NULL;
1177 
1178 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1179 	return arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
1180 }
1181 
1182 static struct io_pgtable *
arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg * cfg,void * cookie)1183 arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
1184 {
1185 	struct arm_lpae_io_pgtable *data;
1186 
1187 	/* No quirks for Mali (hopefully) */
1188 	if (cfg->quirks)
1189 		return NULL;
1190 
1191 	if (cfg->ias > 48 || cfg->oas > 40)
1192 		return NULL;
1193 
1194 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1195 
1196 	data = arm_lpae_alloc_pgtable(cfg);
1197 	if (!data)
1198 		return NULL;
1199 
1200 	/* Mali seems to need a full 4-level table regardless of IAS */
1201 	if (data->start_level > 0) {
1202 		data->start_level = 0;
1203 		data->pgd_bits = 0;
1204 	}
1205 	/*
1206 	 * MEMATTR: Mali has no actual notion of a non-cacheable type, so the
1207 	 * best we can do is mimic the out-of-tree driver and hope that the
1208 	 * "implementation-defined caching policy" is good enough. Similarly,
1209 	 * we'll use it for the sake of a valid attribute for our 'device'
1210 	 * index, although callers should never request that in practice.
1211 	 */
1212 	cfg->arm_mali_lpae_cfg.memattr =
1213 		(ARM_MALI_LPAE_MEMATTR_IMP_DEF
1214 		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
1215 		(ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC
1216 		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
1217 		(ARM_MALI_LPAE_MEMATTR_IMP_DEF
1218 		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
1219 
1220 	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
1221 					   cfg, cookie);
1222 	if (!data->pgd)
1223 		goto out_free_data;
1224 
1225 	/* Ensure the empty pgd is visible before TRANSTAB can be written */
1226 	wmb();
1227 
1228 	cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
1229 					  ARM_MALI_LPAE_TTBR_READ_INNER |
1230 					  ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
1231 	if (cfg->coherent_walk)
1232 		cfg->arm_mali_lpae_cfg.transtab |= ARM_MALI_LPAE_TTBR_SHARE_OUTER;
1233 
1234 	return &data->iop;
1235 
1236 out_free_data:
1237 	kfree(data);
1238 	return NULL;
1239 }
1240 
1241 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
1242 	.caps	= IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1243 	.alloc	= arm_64_lpae_alloc_pgtable_s1,
1244 	.free	= arm_lpae_free_pgtable,
1245 };
1246 
1247 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
1248 	.caps	= IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1249 	.alloc	= arm_64_lpae_alloc_pgtable_s2,
1250 	.free	= arm_lpae_free_pgtable,
1251 };
1252 
1253 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
1254 	.caps	= IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1255 	.alloc	= arm_32_lpae_alloc_pgtable_s1,
1256 	.free	= arm_lpae_free_pgtable,
1257 };
1258 
1259 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
1260 	.caps	= IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1261 	.alloc	= arm_32_lpae_alloc_pgtable_s2,
1262 	.free	= arm_lpae_free_pgtable,
1263 };
1264 
1265 struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
1266 	.caps	= IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1267 	.alloc	= arm_mali_lpae_alloc_pgtable,
1268 	.free	= arm_lpae_free_pgtable,
1269 };
1270 
1271 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
1272 
1273 static struct io_pgtable_cfg *cfg_cookie __initdata;
1274 
dummy_tlb_flush_all(void * cookie)1275 static void __init dummy_tlb_flush_all(void *cookie)
1276 {
1277 	WARN_ON(cookie != cfg_cookie);
1278 }
1279 
dummy_tlb_flush(unsigned long iova,size_t size,size_t granule,void * cookie)1280 static void __init dummy_tlb_flush(unsigned long iova, size_t size,
1281 				   size_t granule, void *cookie)
1282 {
1283 	WARN_ON(cookie != cfg_cookie);
1284 	WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
1285 }
1286 
dummy_tlb_add_page(struct iommu_iotlb_gather * gather,unsigned long iova,size_t granule,void * cookie)1287 static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
1288 				      unsigned long iova, size_t granule,
1289 				      void *cookie)
1290 {
1291 	dummy_tlb_flush(iova, granule, granule, cookie);
1292 }
1293 
1294 static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
1295 	.tlb_flush_all	= dummy_tlb_flush_all,
1296 	.tlb_flush_walk	= dummy_tlb_flush,
1297 	.tlb_add_page	= dummy_tlb_add_page,
1298 };
1299 
arm_lpae_dump_ops(struct io_pgtable_ops * ops)1300 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
1301 {
1302 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
1303 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
1304 
1305 	pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1306 		cfg->pgsize_bitmap, cfg->ias);
1307 	pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n",
1308 		ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data),
1309 		ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd);
1310 }
1311 
1312 #define __FAIL(ops, i)	({						\
1313 		WARN(1, "selftest: test failed for fmt idx %d\n", (i));	\
1314 		arm_lpae_dump_ops(ops);					\
1315 		-EFAULT;						\
1316 })
1317 
arm_lpae_run_tests(struct io_pgtable_cfg * cfg)1318 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
1319 {
1320 	static const enum io_pgtable_fmt fmts[] __initconst = {
1321 		ARM_64_LPAE_S1,
1322 		ARM_64_LPAE_S2,
1323 	};
1324 
1325 	int i, j;
1326 	unsigned long iova;
1327 	size_t size, mapped;
1328 	struct io_pgtable_ops *ops;
1329 
1330 	for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
1331 		cfg_cookie = cfg;
1332 		ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
1333 		if (!ops) {
1334 			pr_err("selftest: failed to allocate io pgtable ops\n");
1335 			return -ENOMEM;
1336 		}
1337 
1338 		/*
1339 		 * Initial sanity checks.
1340 		 * Empty page tables shouldn't provide any translations.
1341 		 */
1342 		if (ops->iova_to_phys(ops, 42))
1343 			return __FAIL(ops, i);
1344 
1345 		if (ops->iova_to_phys(ops, SZ_1G + 42))
1346 			return __FAIL(ops, i);
1347 
1348 		if (ops->iova_to_phys(ops, SZ_2G + 42))
1349 			return __FAIL(ops, i);
1350 
1351 		/*
1352 		 * Distinct mappings of different granule sizes.
1353 		 */
1354 		iova = 0;
1355 		for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1356 			size = 1UL << j;
1357 
1358 			if (ops->map_pages(ops, iova, iova, size, 1,
1359 					   IOMMU_READ | IOMMU_WRITE |
1360 					   IOMMU_NOEXEC | IOMMU_CACHE,
1361 					   GFP_KERNEL, &mapped))
1362 				return __FAIL(ops, i);
1363 
1364 			/* Overlapping mappings */
1365 			if (!ops->map_pages(ops, iova, iova + size, size, 1,
1366 					    IOMMU_READ | IOMMU_NOEXEC,
1367 					    GFP_KERNEL, &mapped))
1368 				return __FAIL(ops, i);
1369 
1370 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1371 				return __FAIL(ops, i);
1372 
1373 			iova += SZ_1G;
1374 		}
1375 
1376 		/* Full unmap */
1377 		iova = 0;
1378 		for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1379 			size = 1UL << j;
1380 
1381 			if (ops->unmap_pages(ops, iova, size, 1, NULL) != size)
1382 				return __FAIL(ops, i);
1383 
1384 			if (ops->iova_to_phys(ops, iova + 42))
1385 				return __FAIL(ops, i);
1386 
1387 			/* Remap full block */
1388 			if (ops->map_pages(ops, iova, iova, size, 1,
1389 					   IOMMU_WRITE, GFP_KERNEL, &mapped))
1390 				return __FAIL(ops, i);
1391 
1392 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1393 				return __FAIL(ops, i);
1394 
1395 			iova += SZ_1G;
1396 		}
1397 
1398 		/*
1399 		 * Map/unmap the last largest supported page of the IAS, this can
1400 		 * trigger corner cases in the concatednated page tables.
1401 		 */
1402 		mapped = 0;
1403 		size = 1UL << __fls(cfg->pgsize_bitmap);
1404 		iova = (1UL << cfg->ias) - size;
1405 		if (ops->map_pages(ops, iova, iova, size, 1,
1406 				   IOMMU_READ | IOMMU_WRITE |
1407 				   IOMMU_NOEXEC | IOMMU_CACHE,
1408 				   GFP_KERNEL, &mapped))
1409 			return __FAIL(ops, i);
1410 		if (mapped != size)
1411 			return __FAIL(ops, i);
1412 		if (ops->unmap_pages(ops, iova, size, 1, NULL) != size)
1413 			return __FAIL(ops, i);
1414 
1415 		free_io_pgtable_ops(ops);
1416 	}
1417 
1418 	return 0;
1419 }
1420 
arm_lpae_do_selftests(void)1421 static int __init arm_lpae_do_selftests(void)
1422 {
1423 	static const unsigned long pgsize[] __initconst = {
1424 		SZ_4K | SZ_2M | SZ_1G,
1425 		SZ_16K | SZ_32M,
1426 		SZ_64K | SZ_512M,
1427 	};
1428 
1429 	static const unsigned int address_size[] __initconst = {
1430 		32, 36, 40, 42, 44, 48,
1431 	};
1432 
1433 	int i, j, k, pass = 0, fail = 0;
1434 	struct faux_device *dev;
1435 	struct io_pgtable_cfg cfg = {
1436 		.tlb = &dummy_tlb_ops,
1437 		.coherent_walk = true,
1438 		.quirks = IO_PGTABLE_QUIRK_NO_WARN,
1439 	};
1440 
1441 	dev = faux_device_create("io-pgtable-test", NULL, 0);
1442 	if (!dev)
1443 		return -ENOMEM;
1444 
1445 	cfg.iommu_dev = &dev->dev;
1446 
1447 	for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1448 		for (j = 0; j < ARRAY_SIZE(address_size); ++j) {
1449 			/* Don't use ias > oas as it is not valid for stage-2. */
1450 			for (k = 0; k <= j; ++k) {
1451 				cfg.pgsize_bitmap = pgsize[i];
1452 				cfg.ias = address_size[k];
1453 				cfg.oas = address_size[j];
1454 				pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u OAS %u\n",
1455 					pgsize[i], cfg.ias, cfg.oas);
1456 				if (arm_lpae_run_tests(&cfg))
1457 					fail++;
1458 				else
1459 					pass++;
1460 			}
1461 		}
1462 	}
1463 
1464 	pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1465 	faux_device_destroy(dev);
1466 
1467 	return fail ? -EFAULT : 0;
1468 }
1469 subsys_initcall(arm_lpae_do_selftests);
1470 #endif
1471