xref: /linux/drivers/iommu/io-pgtable-arm.c (revision 8477ab143069c6b05d6da4a8184ded8b969240f5)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * CPU-agnostic ARM page table allocator.
4  *
5  * Copyright (C) 2014 ARM Limited
6  *
7  * Author: Will Deacon <will.deacon@arm.com>
8  */
9 
10 #define pr_fmt(fmt)	"arm-lpae io-pgtable: " fmt
11 
12 #include <linux/atomic.h>
13 #include <linux/bitops.h>
14 #include <linux/io-pgtable.h>
15 #include <linux/kernel.h>
16 #include <linux/device/faux.h>
17 #include <linux/sizes.h>
18 #include <linux/slab.h>
19 #include <linux/types.h>
20 #include <linux/dma-mapping.h>
21 
22 #include <asm/barrier.h>
23 
24 #include "io-pgtable-arm.h"
25 #include "iommu-pages.h"
26 
27 #define ARM_LPAE_MAX_ADDR_BITS		52
28 #define ARM_LPAE_S2_MAX_CONCAT_PAGES	16
29 #define ARM_LPAE_MAX_LEVELS		4
30 
31 /* Struct accessors */
32 #define io_pgtable_to_data(x)						\
33 	container_of((x), struct arm_lpae_io_pgtable, iop)
34 
35 #define io_pgtable_ops_to_data(x)					\
36 	io_pgtable_to_data(io_pgtable_ops_to_pgtable(x))
37 
38 /*
39  * Calculate the right shift amount to get to the portion describing level l
40  * in a virtual address mapped by the pagetable in d.
41  */
42 #define ARM_LPAE_LVL_SHIFT(l,d)						\
43 	(((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level) +		\
44 	ilog2(sizeof(arm_lpae_iopte)))
45 
46 #define ARM_LPAE_GRANULE(d)						\
47 	(sizeof(arm_lpae_iopte) << (d)->bits_per_level)
48 #define ARM_LPAE_PGD_SIZE(d)						\
49 	(sizeof(arm_lpae_iopte) << (d)->pgd_bits)
50 
51 #define ARM_LPAE_PTES_PER_TABLE(d)					\
52 	(ARM_LPAE_GRANULE(d) >> ilog2(sizeof(arm_lpae_iopte)))
53 
54 /*
55  * Calculate the index at level l used to map virtual address a using the
56  * pagetable in d.
57  */
58 #define ARM_LPAE_PGD_IDX(l,d)						\
59 	((l) == (d)->start_level ? (d)->pgd_bits - (d)->bits_per_level : 0)
60 
61 #define ARM_LPAE_LVL_IDX(a,l,d)						\
62 	(((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) &			\
63 	 ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1))
64 
65 /* Calculate the block/page mapping size at level l for pagetable in d. */
66 #define ARM_LPAE_BLOCK_SIZE(l,d)	(1ULL << ARM_LPAE_LVL_SHIFT(l,d))
67 
68 /* Page table bits */
69 #define ARM_LPAE_PTE_TYPE_SHIFT		0
70 #define ARM_LPAE_PTE_TYPE_MASK		0x3
71 
72 #define ARM_LPAE_PTE_TYPE_BLOCK		1
73 #define ARM_LPAE_PTE_TYPE_TABLE		3
74 #define ARM_LPAE_PTE_TYPE_PAGE		3
75 
76 #define ARM_LPAE_PTE_ADDR_MASK		GENMASK_ULL(47,12)
77 
78 #define ARM_LPAE_PTE_NSTABLE		(((arm_lpae_iopte)1) << 63)
79 #define ARM_LPAE_PTE_XN			(((arm_lpae_iopte)3) << 53)
80 #define ARM_LPAE_PTE_DBM		(((arm_lpae_iopte)1) << 51)
81 #define ARM_LPAE_PTE_AF			(((arm_lpae_iopte)1) << 10)
82 #define ARM_LPAE_PTE_SH_NS		(((arm_lpae_iopte)0) << 8)
83 #define ARM_LPAE_PTE_SH_OS		(((arm_lpae_iopte)2) << 8)
84 #define ARM_LPAE_PTE_SH_IS		(((arm_lpae_iopte)3) << 8)
85 #define ARM_LPAE_PTE_NS			(((arm_lpae_iopte)1) << 5)
86 #define ARM_LPAE_PTE_VALID		(((arm_lpae_iopte)1) << 0)
87 
88 #define ARM_LPAE_PTE_ATTR_LO_MASK	(((arm_lpae_iopte)0x3ff) << 2)
89 /* Ignore the contiguous bit for block splitting */
90 #define ARM_LPAE_PTE_ATTR_HI_MASK	(ARM_LPAE_PTE_XN | ARM_LPAE_PTE_DBM)
91 #define ARM_LPAE_PTE_ATTR_MASK		(ARM_LPAE_PTE_ATTR_LO_MASK |	\
92 					 ARM_LPAE_PTE_ATTR_HI_MASK)
93 /* Software bit for solving coherency races */
94 #define ARM_LPAE_PTE_SW_SYNC		(((arm_lpae_iopte)1) << 55)
95 
96 /* Stage-1 PTE */
97 #define ARM_LPAE_PTE_AP_UNPRIV		(((arm_lpae_iopte)1) << 6)
98 #define ARM_LPAE_PTE_AP_RDONLY_BIT	7
99 #define ARM_LPAE_PTE_AP_RDONLY		(((arm_lpae_iopte)1) << \
100 					   ARM_LPAE_PTE_AP_RDONLY_BIT)
101 #define ARM_LPAE_PTE_AP_WR_CLEAN_MASK	(ARM_LPAE_PTE_AP_RDONLY | \
102 					 ARM_LPAE_PTE_DBM)
103 #define ARM_LPAE_PTE_ATTRINDX_SHIFT	2
104 #define ARM_LPAE_PTE_nG			(((arm_lpae_iopte)1) << 11)
105 
106 /* Stage-2 PTE */
107 #define ARM_LPAE_PTE_HAP_FAULT		(((arm_lpae_iopte)0) << 6)
108 #define ARM_LPAE_PTE_HAP_READ		(((arm_lpae_iopte)1) << 6)
109 #define ARM_LPAE_PTE_HAP_WRITE		(((arm_lpae_iopte)2) << 6)
110 /*
111  * For !FWB these code to:
112  *  1111 = Normal outer write back cachable / Inner Write Back Cachable
113  *         Permit S1 to override
114  *  0101 = Normal Non-cachable / Inner Non-cachable
115  *  0001 = Device / Device-nGnRE
116  * For S2FWB these code:
117  *  0110 Force Normal Write Back
118  *  0101 Normal* is forced Normal-NC, Device unchanged
119  *  0001 Force Device-nGnRE
120  */
121 #define ARM_LPAE_PTE_MEMATTR_FWB_WB	(((arm_lpae_iopte)0x6) << 2)
122 #define ARM_LPAE_PTE_MEMATTR_OIWB	(((arm_lpae_iopte)0xf) << 2)
123 #define ARM_LPAE_PTE_MEMATTR_NC		(((arm_lpae_iopte)0x5) << 2)
124 #define ARM_LPAE_PTE_MEMATTR_DEV	(((arm_lpae_iopte)0x1) << 2)
125 
126 /* Register bits */
127 #define ARM_LPAE_VTCR_SL0_MASK		0x3
128 
129 #define ARM_LPAE_TCR_T0SZ_SHIFT		0
130 
131 #define ARM_LPAE_VTCR_PS_SHIFT		16
132 #define ARM_LPAE_VTCR_PS_MASK		0x7
133 
134 #define ARM_LPAE_MAIR_ATTR_SHIFT(n)	((n) << 3)
135 #define ARM_LPAE_MAIR_ATTR_MASK		0xff
136 #define ARM_LPAE_MAIR_ATTR_DEVICE	0x04
137 #define ARM_LPAE_MAIR_ATTR_NC		0x44
138 #define ARM_LPAE_MAIR_ATTR_INC_OWBRWA	0xf4
139 #define ARM_LPAE_MAIR_ATTR_WBRWA	0xff
140 #define ARM_LPAE_MAIR_ATTR_IDX_NC	0
141 #define ARM_LPAE_MAIR_ATTR_IDX_CACHE	1
142 #define ARM_LPAE_MAIR_ATTR_IDX_DEV	2
143 #define ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE	3
144 
145 #define ARM_MALI_LPAE_TTBR_ADRMODE_TABLE (3u << 0)
146 #define ARM_MALI_LPAE_TTBR_READ_INNER	BIT(2)
147 #define ARM_MALI_LPAE_TTBR_SHARE_OUTER	BIT(4)
148 
149 #define ARM_MALI_LPAE_MEMATTR_IMP_DEF	0x88ULL
150 #define ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC 0x8DULL
151 
152 /* IOPTE accessors */
153 #define iopte_deref(pte,d) __va(iopte_to_paddr(pte, d))
154 
155 #define iopte_type(pte)					\
156 	(((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK)
157 
158 #define iopte_prot(pte)	((pte) & ARM_LPAE_PTE_ATTR_MASK)
159 
160 #define iopte_writeable_dirty(pte)				\
161 	(((pte) & ARM_LPAE_PTE_AP_WR_CLEAN_MASK) == ARM_LPAE_PTE_DBM)
162 
163 #define iopte_set_writeable_clean(ptep)				\
164 	set_bit(ARM_LPAE_PTE_AP_RDONLY_BIT, (unsigned long *)(ptep))
165 
166 struct arm_lpae_io_pgtable {
167 	struct io_pgtable	iop;
168 
169 	int			pgd_bits;
170 	int			start_level;
171 	int			bits_per_level;
172 
173 	void			*pgd;
174 };
175 
176 typedef u64 arm_lpae_iopte;
177 
iopte_leaf(arm_lpae_iopte pte,int lvl,enum io_pgtable_fmt fmt)178 static inline bool iopte_leaf(arm_lpae_iopte pte, int lvl,
179 			      enum io_pgtable_fmt fmt)
180 {
181 	if (lvl == (ARM_LPAE_MAX_LEVELS - 1) && fmt != ARM_MALI_LPAE)
182 		return iopte_type(pte) == ARM_LPAE_PTE_TYPE_PAGE;
183 
184 	return iopte_type(pte) == ARM_LPAE_PTE_TYPE_BLOCK;
185 }
186 
iopte_table(arm_lpae_iopte pte,int lvl)187 static inline bool iopte_table(arm_lpae_iopte pte, int lvl)
188 {
189 	if (lvl == (ARM_LPAE_MAX_LEVELS - 1))
190 		return false;
191 	return iopte_type(pte) == ARM_LPAE_PTE_TYPE_TABLE;
192 }
193 
paddr_to_iopte(phys_addr_t paddr,struct arm_lpae_io_pgtable * data)194 static arm_lpae_iopte paddr_to_iopte(phys_addr_t paddr,
195 				     struct arm_lpae_io_pgtable *data)
196 {
197 	arm_lpae_iopte pte = paddr;
198 
199 	/* Of the bits which overlap, either 51:48 or 15:12 are always RES0 */
200 	return (pte | (pte >> (48 - 12))) & ARM_LPAE_PTE_ADDR_MASK;
201 }
202 
iopte_to_paddr(arm_lpae_iopte pte,struct arm_lpae_io_pgtable * data)203 static phys_addr_t iopte_to_paddr(arm_lpae_iopte pte,
204 				  struct arm_lpae_io_pgtable *data)
205 {
206 	u64 paddr = pte & ARM_LPAE_PTE_ADDR_MASK;
207 
208 	if (ARM_LPAE_GRANULE(data) < SZ_64K)
209 		return paddr;
210 
211 	/* Rotate the packed high-order bits back to the top */
212 	return (paddr | (paddr << (48 - 12))) & (ARM_LPAE_PTE_ADDR_MASK << 4);
213 }
214 
215 /*
216  * Convert an index returned by ARM_LPAE_PGD_IDX(), which can point into
217  * a concatenated PGD, into the maximum number of entries that can be
218  * mapped in the same table page.
219  */
arm_lpae_max_entries(int i,struct arm_lpae_io_pgtable * data)220 static inline int arm_lpae_max_entries(int i, struct arm_lpae_io_pgtable *data)
221 {
222 	int ptes_per_table = ARM_LPAE_PTES_PER_TABLE(data);
223 
224 	return ptes_per_table - (i & (ptes_per_table - 1));
225 }
226 
227 /*
228  * Check if concatenated PGDs are mandatory according to Arm DDI0487 (K.a)
229  * 1) R_DXBSH: For 16KB, and 48-bit input size, use level 1 instead of 0.
230  * 2) R_SRKBC: After de-ciphering the table for PA size and valid initial lookup
231  *   a) 40 bits PA size with 4K: use level 1 instead of level 0 (2 tables for ias = oas)
232  *   b) 40 bits PA size with 16K: use level 2 instead of level 1 (16 tables for ias = oas)
233  *   c) 42 bits PA size with 4K: use level 1 instead of level 0 (8 tables for ias = oas)
234  *   d) 48 bits PA size with 16K: use level 1 instead of level 0 (2 tables for ias = oas)
235  */
arm_lpae_concat_mandatory(struct io_pgtable_cfg * cfg,struct arm_lpae_io_pgtable * data)236 static inline bool arm_lpae_concat_mandatory(struct io_pgtable_cfg *cfg,
237 					     struct arm_lpae_io_pgtable *data)
238 {
239 	unsigned int ias = cfg->ias;
240 	unsigned int oas = cfg->oas;
241 
242 	/* Covers 1 and 2.d */
243 	if ((ARM_LPAE_GRANULE(data) == SZ_16K) && (data->start_level == 0))
244 		return (oas == 48) || (ias == 48);
245 
246 	/* Covers 2.a and 2.c */
247 	if ((ARM_LPAE_GRANULE(data) == SZ_4K) && (data->start_level == 0))
248 		return (oas == 40) || (oas == 42);
249 
250 	/* Case 2.b */
251 	return (ARM_LPAE_GRANULE(data) == SZ_16K) &&
252 	       (data->start_level == 1) && (oas == 40);
253 }
254 
__arm_lpae_dma_addr(void * pages)255 static dma_addr_t __arm_lpae_dma_addr(void *pages)
256 {
257 	return (dma_addr_t)virt_to_phys(pages);
258 }
259 
__arm_lpae_alloc_pages(size_t size,gfp_t gfp,struct io_pgtable_cfg * cfg,void * cookie)260 static void *__arm_lpae_alloc_pages(size_t size, gfp_t gfp,
261 				    struct io_pgtable_cfg *cfg,
262 				    void *cookie)
263 {
264 	struct device *dev = cfg->iommu_dev;
265 	size_t alloc_size;
266 	dma_addr_t dma;
267 	void *pages;
268 
269 	/*
270 	 * For very small starting-level translation tables the HW requires a
271 	 * minimum alignment of at least 64 to cover all cases.
272 	 */
273 	alloc_size = max(size, 64);
274 	if (cfg->alloc)
275 		pages = cfg->alloc(cookie, alloc_size, gfp);
276 	else
277 		pages = iommu_alloc_pages_node_sz(dev_to_node(dev), gfp,
278 						  alloc_size);
279 
280 	if (!pages)
281 		return NULL;
282 
283 	if (!cfg->coherent_walk) {
284 		dma = dma_map_single(dev, pages, size, DMA_TO_DEVICE);
285 		if (dma_mapping_error(dev, dma))
286 			goto out_free;
287 		/*
288 		 * We depend on the IOMMU being able to work with any physical
289 		 * address directly, so if the DMA layer suggests otherwise by
290 		 * translating or truncating them, that bodes very badly...
291 		 */
292 		if (dma != virt_to_phys(pages))
293 			goto out_unmap;
294 	}
295 
296 	return pages;
297 
298 out_unmap:
299 	dev_err(dev, "Cannot accommodate DMA translation for IOMMU page tables\n");
300 	dma_unmap_single(dev, dma, size, DMA_TO_DEVICE);
301 
302 out_free:
303 	if (cfg->free)
304 		cfg->free(cookie, pages, size);
305 	else
306 		iommu_free_pages(pages);
307 
308 	return NULL;
309 }
310 
__arm_lpae_free_pages(void * pages,size_t size,struct io_pgtable_cfg * cfg,void * cookie)311 static void __arm_lpae_free_pages(void *pages, size_t size,
312 				  struct io_pgtable_cfg *cfg,
313 				  void *cookie)
314 {
315 	if (!cfg->coherent_walk)
316 		dma_unmap_single(cfg->iommu_dev, __arm_lpae_dma_addr(pages),
317 				 size, DMA_TO_DEVICE);
318 
319 	if (cfg->free)
320 		cfg->free(cookie, pages, size);
321 	else
322 		iommu_free_pages(pages);
323 }
324 
__arm_lpae_sync_pte(arm_lpae_iopte * ptep,int num_entries,struct io_pgtable_cfg * cfg)325 static void __arm_lpae_sync_pte(arm_lpae_iopte *ptep, int num_entries,
326 				struct io_pgtable_cfg *cfg)
327 {
328 	dma_sync_single_for_device(cfg->iommu_dev, __arm_lpae_dma_addr(ptep),
329 				   sizeof(*ptep) * num_entries, DMA_TO_DEVICE);
330 }
331 
__arm_lpae_clear_pte(arm_lpae_iopte * ptep,struct io_pgtable_cfg * cfg,int num_entries)332 static void __arm_lpae_clear_pte(arm_lpae_iopte *ptep, struct io_pgtable_cfg *cfg, int num_entries)
333 {
334 	for (int i = 0; i < num_entries; i++)
335 		ptep[i] = 0;
336 
337 	if (!cfg->coherent_walk && num_entries)
338 		__arm_lpae_sync_pte(ptep, num_entries, cfg);
339 }
340 
341 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
342 			       struct iommu_iotlb_gather *gather,
343 			       unsigned long iova, size_t size, size_t pgcount,
344 			       int lvl, arm_lpae_iopte *ptep);
345 
__arm_lpae_init_pte(struct arm_lpae_io_pgtable * data,phys_addr_t paddr,arm_lpae_iopte prot,int lvl,int num_entries,arm_lpae_iopte * ptep)346 static void __arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
347 				phys_addr_t paddr, arm_lpae_iopte prot,
348 				int lvl, int num_entries, arm_lpae_iopte *ptep)
349 {
350 	arm_lpae_iopte pte = prot;
351 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
352 	size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
353 	int i;
354 
355 	if (data->iop.fmt != ARM_MALI_LPAE && lvl == ARM_LPAE_MAX_LEVELS - 1)
356 		pte |= ARM_LPAE_PTE_TYPE_PAGE;
357 	else
358 		pte |= ARM_LPAE_PTE_TYPE_BLOCK;
359 
360 	for (i = 0; i < num_entries; i++)
361 		ptep[i] = pte | paddr_to_iopte(paddr + i * sz, data);
362 
363 	if (!cfg->coherent_walk)
364 		__arm_lpae_sync_pte(ptep, num_entries, cfg);
365 }
366 
arm_lpae_init_pte(struct arm_lpae_io_pgtable * data,unsigned long iova,phys_addr_t paddr,arm_lpae_iopte prot,int lvl,int num_entries,arm_lpae_iopte * ptep)367 static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
368 			     unsigned long iova, phys_addr_t paddr,
369 			     arm_lpae_iopte prot, int lvl, int num_entries,
370 			     arm_lpae_iopte *ptep)
371 {
372 	int i;
373 
374 	for (i = 0; i < num_entries; i++)
375 		if (iopte_leaf(ptep[i], lvl, data->iop.fmt)) {
376 			/* We require an unmap first */
377 			WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN));
378 			return -EEXIST;
379 		} else if (iopte_type(ptep[i]) == ARM_LPAE_PTE_TYPE_TABLE) {
380 			/*
381 			 * We need to unmap and free the old table before
382 			 * overwriting it with a block entry.
383 			 */
384 			arm_lpae_iopte *tblp;
385 			size_t sz = ARM_LPAE_BLOCK_SIZE(lvl, data);
386 
387 			tblp = ptep - ARM_LPAE_LVL_IDX(iova, lvl, data);
388 			if (__arm_lpae_unmap(data, NULL, iova + i * sz, sz, 1,
389 					     lvl, tblp) != sz) {
390 				WARN_ON(1);
391 				return -EINVAL;
392 			}
393 		}
394 
395 	__arm_lpae_init_pte(data, paddr, prot, lvl, num_entries, ptep);
396 	return 0;
397 }
398 
arm_lpae_install_table(arm_lpae_iopte * table,arm_lpae_iopte * ptep,arm_lpae_iopte curr,struct arm_lpae_io_pgtable * data)399 static arm_lpae_iopte arm_lpae_install_table(arm_lpae_iopte *table,
400 					     arm_lpae_iopte *ptep,
401 					     arm_lpae_iopte curr,
402 					     struct arm_lpae_io_pgtable *data)
403 {
404 	arm_lpae_iopte old, new;
405 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
406 
407 	new = paddr_to_iopte(__pa(table), data) | ARM_LPAE_PTE_TYPE_TABLE;
408 	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_NS)
409 		new |= ARM_LPAE_PTE_NSTABLE;
410 
411 	/*
412 	 * Ensure the table itself is visible before its PTE can be.
413 	 * Whilst we could get away with cmpxchg64_release below, this
414 	 * doesn't have any ordering semantics when !CONFIG_SMP.
415 	 */
416 	dma_wmb();
417 
418 	old = cmpxchg64_relaxed(ptep, curr, new);
419 
420 	if (cfg->coherent_walk || (old & ARM_LPAE_PTE_SW_SYNC))
421 		return old;
422 
423 	/* Even if it's not ours, there's no point waiting; just kick it */
424 	__arm_lpae_sync_pte(ptep, 1, cfg);
425 	if (old == curr)
426 		WRITE_ONCE(*ptep, new | ARM_LPAE_PTE_SW_SYNC);
427 
428 	return old;
429 }
430 
__arm_lpae_map(struct arm_lpae_io_pgtable * data,unsigned long iova,phys_addr_t paddr,size_t size,size_t pgcount,arm_lpae_iopte prot,int lvl,arm_lpae_iopte * ptep,gfp_t gfp,size_t * mapped)431 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
432 			  phys_addr_t paddr, size_t size, size_t pgcount,
433 			  arm_lpae_iopte prot, int lvl, arm_lpae_iopte *ptep,
434 			  gfp_t gfp, size_t *mapped)
435 {
436 	arm_lpae_iopte *cptep, pte;
437 	size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
438 	size_t tblsz = ARM_LPAE_GRANULE(data);
439 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
440 	int ret = 0, num_entries, max_entries, map_idx_start;
441 
442 	/* Find our entry at the current level */
443 	map_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
444 	ptep += map_idx_start;
445 
446 	/* If we can install a leaf entry at this level, then do so */
447 	if (size == block_size) {
448 		max_entries = arm_lpae_max_entries(map_idx_start, data);
449 		num_entries = min_t(int, pgcount, max_entries);
450 		ret = arm_lpae_init_pte(data, iova, paddr, prot, lvl, num_entries, ptep);
451 		if (!ret)
452 			*mapped += num_entries * size;
453 
454 		return ret;
455 	}
456 
457 	/* We can't allocate tables at the final level */
458 	if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1))
459 		return -EINVAL;
460 
461 	/* Grab a pointer to the next level */
462 	pte = READ_ONCE(*ptep);
463 	if (!pte) {
464 		cptep = __arm_lpae_alloc_pages(tblsz, gfp, cfg, data->iop.cookie);
465 		if (!cptep)
466 			return -ENOMEM;
467 
468 		pte = arm_lpae_install_table(cptep, ptep, 0, data);
469 		if (pte)
470 			__arm_lpae_free_pages(cptep, tblsz, cfg, data->iop.cookie);
471 	} else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) {
472 		__arm_lpae_sync_pte(ptep, 1, cfg);
473 	}
474 
475 	if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) {
476 		cptep = iopte_deref(pte, data);
477 	} else if (pte) {
478 		/* We require an unmap first */
479 		WARN_ON(!(cfg->quirks & IO_PGTABLE_QUIRK_NO_WARN));
480 		return -EEXIST;
481 	}
482 
483 	/* Rinse, repeat */
484 	return __arm_lpae_map(data, iova, paddr, size, pgcount, prot, lvl + 1,
485 			      cptep, gfp, mapped);
486 }
487 
arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable * data,int prot)488 static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
489 					   int prot)
490 {
491 	arm_lpae_iopte pte;
492 
493 	if (data->iop.fmt == ARM_64_LPAE_S1 ||
494 	    data->iop.fmt == ARM_32_LPAE_S1) {
495 		pte = ARM_LPAE_PTE_nG;
496 		if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ))
497 			pte |= ARM_LPAE_PTE_AP_RDONLY;
498 		else if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_HD)
499 			pte |= ARM_LPAE_PTE_DBM;
500 		if (!(prot & IOMMU_PRIV))
501 			pte |= ARM_LPAE_PTE_AP_UNPRIV;
502 	} else {
503 		pte = ARM_LPAE_PTE_HAP_FAULT;
504 		if (prot & IOMMU_READ)
505 			pte |= ARM_LPAE_PTE_HAP_READ;
506 		if (prot & IOMMU_WRITE)
507 			pte |= ARM_LPAE_PTE_HAP_WRITE;
508 	}
509 
510 	/*
511 	 * Note that this logic is structured to accommodate Mali LPAE
512 	 * having stage-1-like attributes but stage-2-like permissions.
513 	 */
514 	if (data->iop.fmt == ARM_64_LPAE_S2 ||
515 	    data->iop.fmt == ARM_32_LPAE_S2) {
516 		if (prot & IOMMU_MMIO) {
517 			pte |= ARM_LPAE_PTE_MEMATTR_DEV;
518 		} else if (prot & IOMMU_CACHE) {
519 			if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_S2FWB)
520 				pte |= ARM_LPAE_PTE_MEMATTR_FWB_WB;
521 			else
522 				pte |= ARM_LPAE_PTE_MEMATTR_OIWB;
523 		} else {
524 			pte |= ARM_LPAE_PTE_MEMATTR_NC;
525 		}
526 	} else {
527 		if (prot & IOMMU_MMIO)
528 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV
529 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
530 		else if (prot & IOMMU_CACHE)
531 			pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
532 				<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
533 	}
534 
535 	/*
536 	 * Also Mali has its own notions of shareability wherein its Inner
537 	 * domain covers the cores within the GPU, and its Outer domain is
538 	 * "outside the GPU" (i.e. either the Inner or System domain in CPU
539 	 * terms, depending on coherency).
540 	 */
541 	if (prot & IOMMU_CACHE && data->iop.fmt != ARM_MALI_LPAE)
542 		pte |= ARM_LPAE_PTE_SH_IS;
543 	else
544 		pte |= ARM_LPAE_PTE_SH_OS;
545 
546 	if (prot & IOMMU_NOEXEC)
547 		pte |= ARM_LPAE_PTE_XN;
548 
549 	if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS)
550 		pte |= ARM_LPAE_PTE_NS;
551 
552 	if (data->iop.fmt != ARM_MALI_LPAE)
553 		pte |= ARM_LPAE_PTE_AF;
554 
555 	return pte;
556 }
557 
arm_lpae_map_pages(struct io_pgtable_ops * ops,unsigned long iova,phys_addr_t paddr,size_t pgsize,size_t pgcount,int iommu_prot,gfp_t gfp,size_t * mapped)558 static int arm_lpae_map_pages(struct io_pgtable_ops *ops, unsigned long iova,
559 			      phys_addr_t paddr, size_t pgsize, size_t pgcount,
560 			      int iommu_prot, gfp_t gfp, size_t *mapped)
561 {
562 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
563 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
564 	arm_lpae_iopte *ptep = data->pgd;
565 	int ret, lvl = data->start_level;
566 	arm_lpae_iopte prot;
567 	long iaext = (s64)iova >> cfg->ias;
568 
569 	if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize))
570 		return -EINVAL;
571 
572 	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
573 		iaext = ~iaext;
574 	if (WARN_ON(iaext || paddr >> cfg->oas))
575 		return -ERANGE;
576 
577 	if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
578 		return -EINVAL;
579 
580 	prot = arm_lpae_prot_to_pte(data, iommu_prot);
581 	ret = __arm_lpae_map(data, iova, paddr, pgsize, pgcount, prot, lvl,
582 			     ptep, gfp, mapped);
583 	/*
584 	 * Synchronise all PTE updates for the new mapping before there's
585 	 * a chance for anything to kick off a table walk for the new iova.
586 	 */
587 	wmb();
588 
589 	return ret;
590 }
591 
__arm_lpae_free_pgtable(struct arm_lpae_io_pgtable * data,int lvl,arm_lpae_iopte * ptep)592 static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl,
593 				    arm_lpae_iopte *ptep)
594 {
595 	arm_lpae_iopte *start, *end;
596 	unsigned long table_size;
597 
598 	if (lvl == data->start_level)
599 		table_size = ARM_LPAE_PGD_SIZE(data);
600 	else
601 		table_size = ARM_LPAE_GRANULE(data);
602 
603 	start = ptep;
604 
605 	/* Only leaf entries at the last level */
606 	if (lvl == ARM_LPAE_MAX_LEVELS - 1)
607 		end = ptep;
608 	else
609 		end = (void *)ptep + table_size;
610 
611 	while (ptep != end) {
612 		arm_lpae_iopte pte = *ptep++;
613 
614 		if (!pte || iopte_leaf(pte, lvl, data->iop.fmt))
615 			continue;
616 
617 		__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
618 	}
619 
620 	__arm_lpae_free_pages(start, table_size, &data->iop.cfg, data->iop.cookie);
621 }
622 
arm_lpae_free_pgtable(struct io_pgtable * iop)623 static void arm_lpae_free_pgtable(struct io_pgtable *iop)
624 {
625 	struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
626 
627 	__arm_lpae_free_pgtable(data, data->start_level, data->pgd);
628 	kfree(data);
629 }
630 
__arm_lpae_unmap(struct arm_lpae_io_pgtable * data,struct iommu_iotlb_gather * gather,unsigned long iova,size_t size,size_t pgcount,int lvl,arm_lpae_iopte * ptep)631 static size_t __arm_lpae_unmap(struct arm_lpae_io_pgtable *data,
632 			       struct iommu_iotlb_gather *gather,
633 			       unsigned long iova, size_t size, size_t pgcount,
634 			       int lvl, arm_lpae_iopte *ptep)
635 {
636 	arm_lpae_iopte pte;
637 	struct io_pgtable *iop = &data->iop;
638 	int i = 0, num_entries, max_entries, unmap_idx_start;
639 
640 	/* Something went horribly wrong and we ran out of page table */
641 	if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
642 		return 0;
643 
644 	unmap_idx_start = ARM_LPAE_LVL_IDX(iova, lvl, data);
645 	ptep += unmap_idx_start;
646 	pte = READ_ONCE(*ptep);
647 	if (!pte) {
648 		WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN));
649 		return -ENOENT;
650 	}
651 
652 	/* If the size matches this level, we're in the right place */
653 	if (size == ARM_LPAE_BLOCK_SIZE(lvl, data)) {
654 		max_entries = arm_lpae_max_entries(unmap_idx_start, data);
655 		num_entries = min_t(int, pgcount, max_entries);
656 
657 		/* Find and handle non-leaf entries */
658 		for (i = 0; i < num_entries; i++) {
659 			pte = READ_ONCE(ptep[i]);
660 			if (!pte) {
661 				WARN_ON(!(data->iop.cfg.quirks & IO_PGTABLE_QUIRK_NO_WARN));
662 				break;
663 			}
664 
665 			if (!iopte_leaf(pte, lvl, iop->fmt)) {
666 				__arm_lpae_clear_pte(&ptep[i], &iop->cfg, 1);
667 
668 				/* Also flush any partial walks */
669 				io_pgtable_tlb_flush_walk(iop, iova + i * size, size,
670 							  ARM_LPAE_GRANULE(data));
671 				__arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data));
672 			}
673 		}
674 
675 		/* Clear the remaining entries */
676 		__arm_lpae_clear_pte(ptep, &iop->cfg, i);
677 
678 		if (gather && !iommu_iotlb_gather_queued(gather))
679 			for (int j = 0; j < i; j++)
680 				io_pgtable_tlb_add_page(iop, gather, iova + j * size, size);
681 
682 		return i * size;
683 	} else if (iopte_leaf(pte, lvl, iop->fmt)) {
684 		WARN_ONCE(true, "Unmap of a partial large IOPTE is not allowed");
685 		return 0;
686 	}
687 
688 	/* Keep on walkin' */
689 	ptep = iopte_deref(pte, data);
690 	return __arm_lpae_unmap(data, gather, iova, size, pgcount, lvl + 1, ptep);
691 }
692 
arm_lpae_unmap_pages(struct io_pgtable_ops * ops,unsigned long iova,size_t pgsize,size_t pgcount,struct iommu_iotlb_gather * gather)693 static size_t arm_lpae_unmap_pages(struct io_pgtable_ops *ops, unsigned long iova,
694 				   size_t pgsize, size_t pgcount,
695 				   struct iommu_iotlb_gather *gather)
696 {
697 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
698 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
699 	arm_lpae_iopte *ptep = data->pgd;
700 	long iaext = (s64)iova >> cfg->ias;
701 
702 	if (WARN_ON(!pgsize || (pgsize & cfg->pgsize_bitmap) != pgsize || !pgcount))
703 		return 0;
704 
705 	if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
706 		iaext = ~iaext;
707 	if (WARN_ON(iaext))
708 		return 0;
709 
710 	return __arm_lpae_unmap(data, gather, iova, pgsize, pgcount,
711 				data->start_level, ptep);
712 }
713 
714 struct io_pgtable_walk_data {
715 	struct io_pgtable		*iop;
716 	void				*data;
717 	int (*visit)(struct io_pgtable_walk_data *walk_data, int lvl,
718 		     arm_lpae_iopte *ptep, size_t size);
719 	unsigned long			flags;
720 	u64				addr;
721 	const u64			end;
722 };
723 
724 static int __arm_lpae_iopte_walk(struct arm_lpae_io_pgtable *data,
725 				 struct io_pgtable_walk_data *walk_data,
726 				 arm_lpae_iopte *ptep,
727 				 int lvl);
728 
729 struct iova_to_phys_data {
730 	arm_lpae_iopte pte;
731 	int lvl;
732 };
733 
visit_iova_to_phys(struct io_pgtable_walk_data * walk_data,int lvl,arm_lpae_iopte * ptep,size_t size)734 static int visit_iova_to_phys(struct io_pgtable_walk_data *walk_data, int lvl,
735 			      arm_lpae_iopte *ptep, size_t size)
736 {
737 	struct iova_to_phys_data *data = walk_data->data;
738 	data->pte = *ptep;
739 	data->lvl = lvl;
740 	return 0;
741 }
742 
arm_lpae_iova_to_phys(struct io_pgtable_ops * ops,unsigned long iova)743 static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops,
744 					 unsigned long iova)
745 {
746 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
747 	struct iova_to_phys_data d;
748 	struct io_pgtable_walk_data walk_data = {
749 		.data = &d,
750 		.visit = visit_iova_to_phys,
751 		.addr = iova,
752 		.end = iova + 1,
753 	};
754 	int ret;
755 
756 	ret = __arm_lpae_iopte_walk(data, &walk_data, data->pgd, data->start_level);
757 	if (ret)
758 		return 0;
759 
760 	iova &= (ARM_LPAE_BLOCK_SIZE(d.lvl, data) - 1);
761 	return iopte_to_paddr(d.pte, data) | iova;
762 }
763 
visit_pgtable_walk(struct io_pgtable_walk_data * walk_data,int lvl,arm_lpae_iopte * ptep,size_t size)764 static int visit_pgtable_walk(struct io_pgtable_walk_data *walk_data, int lvl,
765 			      arm_lpae_iopte *ptep, size_t size)
766 {
767 	struct arm_lpae_io_pgtable_walk_data *data = walk_data->data;
768 	data->ptes[lvl] = *ptep;
769 	return 0;
770 }
771 
arm_lpae_pgtable_walk(struct io_pgtable_ops * ops,unsigned long iova,void * wd)772 static int arm_lpae_pgtable_walk(struct io_pgtable_ops *ops, unsigned long iova,
773 				 void *wd)
774 {
775 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
776 	struct io_pgtable_walk_data walk_data = {
777 		.data = wd,
778 		.visit = visit_pgtable_walk,
779 		.addr = iova,
780 		.end = iova + 1,
781 	};
782 
783 	return __arm_lpae_iopte_walk(data, &walk_data, data->pgd, data->start_level);
784 }
785 
io_pgtable_visit(struct arm_lpae_io_pgtable * data,struct io_pgtable_walk_data * walk_data,arm_lpae_iopte * ptep,int lvl)786 static int io_pgtable_visit(struct arm_lpae_io_pgtable *data,
787 			    struct io_pgtable_walk_data *walk_data,
788 			    arm_lpae_iopte *ptep, int lvl)
789 {
790 	struct io_pgtable *iop = &data->iop;
791 	arm_lpae_iopte pte = READ_ONCE(*ptep);
792 
793 	size_t size = ARM_LPAE_BLOCK_SIZE(lvl, data);
794 	int ret = walk_data->visit(walk_data, lvl, ptep, size);
795 	if (ret)
796 		return ret;
797 
798 	if (iopte_leaf(pte, lvl, iop->fmt)) {
799 		walk_data->addr += size;
800 		return 0;
801 	}
802 
803 	if (!iopte_table(pte, lvl)) {
804 		return -EINVAL;
805 	}
806 
807 	ptep = iopte_deref(pte, data);
808 	return __arm_lpae_iopte_walk(data, walk_data, ptep, lvl + 1);
809 }
810 
__arm_lpae_iopte_walk(struct arm_lpae_io_pgtable * data,struct io_pgtable_walk_data * walk_data,arm_lpae_iopte * ptep,int lvl)811 static int __arm_lpae_iopte_walk(struct arm_lpae_io_pgtable *data,
812 				 struct io_pgtable_walk_data *walk_data,
813 				 arm_lpae_iopte *ptep,
814 				 int lvl)
815 {
816 	u32 idx;
817 	int max_entries, ret;
818 
819 	if (WARN_ON(lvl == ARM_LPAE_MAX_LEVELS))
820 		return -EINVAL;
821 
822 	if (lvl == data->start_level)
823 		max_entries = ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte);
824 	else
825 		max_entries = ARM_LPAE_PTES_PER_TABLE(data);
826 
827 	for (idx = ARM_LPAE_LVL_IDX(walk_data->addr, lvl, data);
828 	     (idx < max_entries) && (walk_data->addr < walk_data->end); ++idx) {
829 		ret = io_pgtable_visit(data, walk_data, ptep + idx, lvl);
830 		if (ret)
831 			return ret;
832 	}
833 
834 	return 0;
835 }
836 
visit_dirty(struct io_pgtable_walk_data * walk_data,int lvl,arm_lpae_iopte * ptep,size_t size)837 static int visit_dirty(struct io_pgtable_walk_data *walk_data, int lvl,
838 		       arm_lpae_iopte *ptep, size_t size)
839 {
840 	struct iommu_dirty_bitmap *dirty = walk_data->data;
841 
842 	if (!iopte_leaf(*ptep, lvl, walk_data->iop->fmt))
843 		return 0;
844 
845 	if (iopte_writeable_dirty(*ptep)) {
846 		iommu_dirty_bitmap_record(dirty, walk_data->addr, size);
847 		if (!(walk_data->flags & IOMMU_DIRTY_NO_CLEAR))
848 			iopte_set_writeable_clean(ptep);
849 	}
850 
851 	return 0;
852 }
853 
arm_lpae_read_and_clear_dirty(struct io_pgtable_ops * ops,unsigned long iova,size_t size,unsigned long flags,struct iommu_dirty_bitmap * dirty)854 static int arm_lpae_read_and_clear_dirty(struct io_pgtable_ops *ops,
855 					 unsigned long iova, size_t size,
856 					 unsigned long flags,
857 					 struct iommu_dirty_bitmap *dirty)
858 {
859 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
860 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
861 	struct io_pgtable_walk_data walk_data = {
862 		.iop = &data->iop,
863 		.data = dirty,
864 		.visit = visit_dirty,
865 		.flags = flags,
866 		.addr = iova,
867 		.end = iova + size,
868 	};
869 	arm_lpae_iopte *ptep = data->pgd;
870 	int lvl = data->start_level;
871 
872 	if (WARN_ON(!size))
873 		return -EINVAL;
874 	if (WARN_ON((iova + size - 1) & ~(BIT(cfg->ias) - 1)))
875 		return -EINVAL;
876 	if (data->iop.fmt != ARM_64_LPAE_S1)
877 		return -EINVAL;
878 
879 	return __arm_lpae_iopte_walk(data, &walk_data, ptep, lvl);
880 }
881 
arm_lpae_restrict_pgsizes(struct io_pgtable_cfg * cfg)882 static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg)
883 {
884 	unsigned long granule, page_sizes;
885 	unsigned int max_addr_bits = 48;
886 
887 	/*
888 	 * We need to restrict the supported page sizes to match the
889 	 * translation regime for a particular granule. Aim to match
890 	 * the CPU page size if possible, otherwise prefer smaller sizes.
891 	 * While we're at it, restrict the block sizes to match the
892 	 * chosen granule.
893 	 */
894 	if (cfg->pgsize_bitmap & PAGE_SIZE)
895 		granule = PAGE_SIZE;
896 	else if (cfg->pgsize_bitmap & ~PAGE_MASK)
897 		granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK);
898 	else if (cfg->pgsize_bitmap & PAGE_MASK)
899 		granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK);
900 	else
901 		granule = 0;
902 
903 	switch (granule) {
904 	case SZ_4K:
905 		page_sizes = (SZ_4K | SZ_2M | SZ_1G);
906 		break;
907 	case SZ_16K:
908 		page_sizes = (SZ_16K | SZ_32M);
909 		break;
910 	case SZ_64K:
911 		max_addr_bits = 52;
912 		page_sizes = (SZ_64K | SZ_512M);
913 		if (cfg->oas > 48)
914 			page_sizes |= 1ULL << 42; /* 4TB */
915 		break;
916 	default:
917 		page_sizes = 0;
918 	}
919 
920 	cfg->pgsize_bitmap &= page_sizes;
921 	cfg->ias = min(cfg->ias, max_addr_bits);
922 	cfg->oas = min(cfg->oas, max_addr_bits);
923 }
924 
925 static struct arm_lpae_io_pgtable *
arm_lpae_alloc_pgtable(struct io_pgtable_cfg * cfg)926 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
927 {
928 	struct arm_lpae_io_pgtable *data;
929 	int levels, va_bits, pg_shift;
930 
931 	arm_lpae_restrict_pgsizes(cfg);
932 
933 	if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
934 		return NULL;
935 
936 	if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)
937 		return NULL;
938 
939 	if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
940 		return NULL;
941 
942 	data = kmalloc(sizeof(*data), GFP_KERNEL);
943 	if (!data)
944 		return NULL;
945 
946 	pg_shift = __ffs(cfg->pgsize_bitmap);
947 	data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte));
948 
949 	va_bits = cfg->ias - pg_shift;
950 	levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
951 	data->start_level = ARM_LPAE_MAX_LEVELS - levels;
952 
953 	/* Calculate the actual size of our pgd (without concatenation) */
954 	data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1));
955 
956 	data->iop.ops = (struct io_pgtable_ops) {
957 		.map_pages	= arm_lpae_map_pages,
958 		.unmap_pages	= arm_lpae_unmap_pages,
959 		.iova_to_phys	= arm_lpae_iova_to_phys,
960 		.read_and_clear_dirty = arm_lpae_read_and_clear_dirty,
961 		.pgtable_walk	= arm_lpae_pgtable_walk,
962 	};
963 
964 	return data;
965 }
966 
967 static struct io_pgtable *
arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg * cfg,void * cookie)968 arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
969 {
970 	u64 reg;
971 	struct arm_lpae_io_pgtable *data;
972 	typeof(&cfg->arm_lpae_s1_cfg.tcr) tcr = &cfg->arm_lpae_s1_cfg.tcr;
973 	bool tg1;
974 
975 	if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_NS |
976 			    IO_PGTABLE_QUIRK_ARM_TTBR1 |
977 			    IO_PGTABLE_QUIRK_ARM_OUTER_WBWA |
978 			    IO_PGTABLE_QUIRK_ARM_HD |
979 			    IO_PGTABLE_QUIRK_NO_WARN))
980 		return NULL;
981 
982 	data = arm_lpae_alloc_pgtable(cfg);
983 	if (!data)
984 		return NULL;
985 
986 	/* TCR */
987 	if (cfg->coherent_walk) {
988 		tcr->sh = ARM_LPAE_TCR_SH_IS;
989 		tcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
990 		tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
991 		if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA)
992 			goto out_free_data;
993 	} else {
994 		tcr->sh = ARM_LPAE_TCR_SH_OS;
995 		tcr->irgn = ARM_LPAE_TCR_RGN_NC;
996 		if (!(cfg->quirks & IO_PGTABLE_QUIRK_ARM_OUTER_WBWA))
997 			tcr->orgn = ARM_LPAE_TCR_RGN_NC;
998 		else
999 			tcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
1000 	}
1001 
1002 	tg1 = cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1;
1003 	switch (ARM_LPAE_GRANULE(data)) {
1004 	case SZ_4K:
1005 		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_4K : ARM_LPAE_TCR_TG0_4K;
1006 		break;
1007 	case SZ_16K:
1008 		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_16K : ARM_LPAE_TCR_TG0_16K;
1009 		break;
1010 	case SZ_64K:
1011 		tcr->tg = tg1 ? ARM_LPAE_TCR_TG1_64K : ARM_LPAE_TCR_TG0_64K;
1012 		break;
1013 	}
1014 
1015 	switch (cfg->oas) {
1016 	case 32:
1017 		tcr->ips = ARM_LPAE_TCR_PS_32_BIT;
1018 		break;
1019 	case 36:
1020 		tcr->ips = ARM_LPAE_TCR_PS_36_BIT;
1021 		break;
1022 	case 40:
1023 		tcr->ips = ARM_LPAE_TCR_PS_40_BIT;
1024 		break;
1025 	case 42:
1026 		tcr->ips = ARM_LPAE_TCR_PS_42_BIT;
1027 		break;
1028 	case 44:
1029 		tcr->ips = ARM_LPAE_TCR_PS_44_BIT;
1030 		break;
1031 	case 48:
1032 		tcr->ips = ARM_LPAE_TCR_PS_48_BIT;
1033 		break;
1034 	case 52:
1035 		tcr->ips = ARM_LPAE_TCR_PS_52_BIT;
1036 		break;
1037 	default:
1038 		goto out_free_data;
1039 	}
1040 
1041 	tcr->tsz = 64ULL - cfg->ias;
1042 
1043 	/* MAIRs */
1044 	reg = (ARM_LPAE_MAIR_ATTR_NC
1045 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
1046 	      (ARM_LPAE_MAIR_ATTR_WBRWA
1047 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
1048 	      (ARM_LPAE_MAIR_ATTR_DEVICE
1049 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)) |
1050 	      (ARM_LPAE_MAIR_ATTR_INC_OWBRWA
1051 	       << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_INC_OCACHE));
1052 
1053 	cfg->arm_lpae_s1_cfg.mair = reg;
1054 
1055 	/* Looking good; allocate a pgd */
1056 	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
1057 					   GFP_KERNEL, cfg, cookie);
1058 	if (!data->pgd)
1059 		goto out_free_data;
1060 
1061 	/* Ensure the empty pgd is visible before any actual TTBR write */
1062 	wmb();
1063 
1064 	/* TTBR */
1065 	cfg->arm_lpae_s1_cfg.ttbr = virt_to_phys(data->pgd);
1066 	return &data->iop;
1067 
1068 out_free_data:
1069 	kfree(data);
1070 	return NULL;
1071 }
1072 
1073 static struct io_pgtable *
arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg * cfg,void * cookie)1074 arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
1075 {
1076 	u64 sl;
1077 	struct arm_lpae_io_pgtable *data;
1078 	typeof(&cfg->arm_lpae_s2_cfg.vtcr) vtcr = &cfg->arm_lpae_s2_cfg.vtcr;
1079 
1080 	if (cfg->quirks & ~(IO_PGTABLE_QUIRK_ARM_S2FWB |
1081 			    IO_PGTABLE_QUIRK_NO_WARN))
1082 		return NULL;
1083 
1084 	data = arm_lpae_alloc_pgtable(cfg);
1085 	if (!data)
1086 		return NULL;
1087 
1088 	if (arm_lpae_concat_mandatory(cfg, data)) {
1089 		if (WARN_ON((ARM_LPAE_PGD_SIZE(data) / sizeof(arm_lpae_iopte)) >
1090 			    ARM_LPAE_S2_MAX_CONCAT_PAGES))
1091 			return NULL;
1092 		data->pgd_bits += data->bits_per_level;
1093 		data->start_level++;
1094 	}
1095 
1096 	/* VTCR */
1097 	if (cfg->coherent_walk) {
1098 		vtcr->sh = ARM_LPAE_TCR_SH_IS;
1099 		vtcr->irgn = ARM_LPAE_TCR_RGN_WBWA;
1100 		vtcr->orgn = ARM_LPAE_TCR_RGN_WBWA;
1101 	} else {
1102 		vtcr->sh = ARM_LPAE_TCR_SH_OS;
1103 		vtcr->irgn = ARM_LPAE_TCR_RGN_NC;
1104 		vtcr->orgn = ARM_LPAE_TCR_RGN_NC;
1105 	}
1106 
1107 	sl = data->start_level;
1108 
1109 	switch (ARM_LPAE_GRANULE(data)) {
1110 	case SZ_4K:
1111 		vtcr->tg = ARM_LPAE_TCR_TG0_4K;
1112 		sl++; /* SL0 format is different for 4K granule size */
1113 		break;
1114 	case SZ_16K:
1115 		vtcr->tg = ARM_LPAE_TCR_TG0_16K;
1116 		break;
1117 	case SZ_64K:
1118 		vtcr->tg = ARM_LPAE_TCR_TG0_64K;
1119 		break;
1120 	}
1121 
1122 	switch (cfg->oas) {
1123 	case 32:
1124 		vtcr->ps = ARM_LPAE_TCR_PS_32_BIT;
1125 		break;
1126 	case 36:
1127 		vtcr->ps = ARM_LPAE_TCR_PS_36_BIT;
1128 		break;
1129 	case 40:
1130 		vtcr->ps = ARM_LPAE_TCR_PS_40_BIT;
1131 		break;
1132 	case 42:
1133 		vtcr->ps = ARM_LPAE_TCR_PS_42_BIT;
1134 		break;
1135 	case 44:
1136 		vtcr->ps = ARM_LPAE_TCR_PS_44_BIT;
1137 		break;
1138 	case 48:
1139 		vtcr->ps = ARM_LPAE_TCR_PS_48_BIT;
1140 		break;
1141 	case 52:
1142 		vtcr->ps = ARM_LPAE_TCR_PS_52_BIT;
1143 		break;
1144 	default:
1145 		goto out_free_data;
1146 	}
1147 
1148 	vtcr->tsz = 64ULL - cfg->ias;
1149 	vtcr->sl = ~sl & ARM_LPAE_VTCR_SL0_MASK;
1150 
1151 	/* Allocate pgd pages */
1152 	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data),
1153 					   GFP_KERNEL, cfg, cookie);
1154 	if (!data->pgd)
1155 		goto out_free_data;
1156 
1157 	/* Ensure the empty pgd is visible before any actual TTBR write */
1158 	wmb();
1159 
1160 	/* VTTBR */
1161 	cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
1162 	return &data->iop;
1163 
1164 out_free_data:
1165 	kfree(data);
1166 	return NULL;
1167 }
1168 
1169 static struct io_pgtable *
arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg * cfg,void * cookie)1170 arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
1171 {
1172 	if (cfg->ias > 32 || cfg->oas > 40)
1173 		return NULL;
1174 
1175 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1176 	return arm_64_lpae_alloc_pgtable_s1(cfg, cookie);
1177 }
1178 
1179 static struct io_pgtable *
arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg * cfg,void * cookie)1180 arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
1181 {
1182 	if (cfg->ias > 40 || cfg->oas > 40)
1183 		return NULL;
1184 
1185 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1186 	return arm_64_lpae_alloc_pgtable_s2(cfg, cookie);
1187 }
1188 
1189 static struct io_pgtable *
arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg * cfg,void * cookie)1190 arm_mali_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg, void *cookie)
1191 {
1192 	struct arm_lpae_io_pgtable *data;
1193 
1194 	/* No quirks for Mali (hopefully) */
1195 	if (cfg->quirks)
1196 		return NULL;
1197 
1198 	if (cfg->ias > 48 || cfg->oas > 40)
1199 		return NULL;
1200 
1201 	cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G);
1202 
1203 	data = arm_lpae_alloc_pgtable(cfg);
1204 	if (!data)
1205 		return NULL;
1206 
1207 	/* Mali seems to need a full 4-level table regardless of IAS */
1208 	if (data->start_level > 0) {
1209 		data->start_level = 0;
1210 		data->pgd_bits = 0;
1211 	}
1212 	/*
1213 	 * MEMATTR: Mali has no actual notion of a non-cacheable type, so the
1214 	 * best we can do is mimic the out-of-tree driver and hope that the
1215 	 * "implementation-defined caching policy" is good enough. Similarly,
1216 	 * we'll use it for the sake of a valid attribute for our 'device'
1217 	 * index, although callers should never request that in practice.
1218 	 */
1219 	cfg->arm_mali_lpae_cfg.memattr =
1220 		(ARM_MALI_LPAE_MEMATTR_IMP_DEF
1221 		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) |
1222 		(ARM_MALI_LPAE_MEMATTR_WRITE_ALLOC
1223 		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) |
1224 		(ARM_MALI_LPAE_MEMATTR_IMP_DEF
1225 		 << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV));
1226 
1227 	data->pgd = __arm_lpae_alloc_pages(ARM_LPAE_PGD_SIZE(data), GFP_KERNEL,
1228 					   cfg, cookie);
1229 	if (!data->pgd)
1230 		goto out_free_data;
1231 
1232 	/* Ensure the empty pgd is visible before TRANSTAB can be written */
1233 	wmb();
1234 
1235 	cfg->arm_mali_lpae_cfg.transtab = virt_to_phys(data->pgd) |
1236 					  ARM_MALI_LPAE_TTBR_READ_INNER |
1237 					  ARM_MALI_LPAE_TTBR_ADRMODE_TABLE;
1238 	if (cfg->coherent_walk)
1239 		cfg->arm_mali_lpae_cfg.transtab |= ARM_MALI_LPAE_TTBR_SHARE_OUTER;
1240 
1241 	return &data->iop;
1242 
1243 out_free_data:
1244 	kfree(data);
1245 	return NULL;
1246 }
1247 
1248 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
1249 	.caps	= IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1250 	.alloc	= arm_64_lpae_alloc_pgtable_s1,
1251 	.free	= arm_lpae_free_pgtable,
1252 };
1253 
1254 struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = {
1255 	.caps	= IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1256 	.alloc	= arm_64_lpae_alloc_pgtable_s2,
1257 	.free	= arm_lpae_free_pgtable,
1258 };
1259 
1260 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = {
1261 	.caps	= IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1262 	.alloc	= arm_32_lpae_alloc_pgtable_s1,
1263 	.free	= arm_lpae_free_pgtable,
1264 };
1265 
1266 struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = {
1267 	.caps	= IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1268 	.alloc	= arm_32_lpae_alloc_pgtable_s2,
1269 	.free	= arm_lpae_free_pgtable,
1270 };
1271 
1272 struct io_pgtable_init_fns io_pgtable_arm_mali_lpae_init_fns = {
1273 	.caps	= IO_PGTABLE_CAP_CUSTOM_ALLOCATOR,
1274 	.alloc	= arm_mali_lpae_alloc_pgtable,
1275 	.free	= arm_lpae_free_pgtable,
1276 };
1277 
1278 #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST
1279 
1280 static struct io_pgtable_cfg *cfg_cookie __initdata;
1281 
dummy_tlb_flush_all(void * cookie)1282 static void __init dummy_tlb_flush_all(void *cookie)
1283 {
1284 	WARN_ON(cookie != cfg_cookie);
1285 }
1286 
dummy_tlb_flush(unsigned long iova,size_t size,size_t granule,void * cookie)1287 static void __init dummy_tlb_flush(unsigned long iova, size_t size,
1288 				   size_t granule, void *cookie)
1289 {
1290 	WARN_ON(cookie != cfg_cookie);
1291 	WARN_ON(!(size & cfg_cookie->pgsize_bitmap));
1292 }
1293 
dummy_tlb_add_page(struct iommu_iotlb_gather * gather,unsigned long iova,size_t granule,void * cookie)1294 static void __init dummy_tlb_add_page(struct iommu_iotlb_gather *gather,
1295 				      unsigned long iova, size_t granule,
1296 				      void *cookie)
1297 {
1298 	dummy_tlb_flush(iova, granule, granule, cookie);
1299 }
1300 
1301 static const struct iommu_flush_ops dummy_tlb_ops __initconst = {
1302 	.tlb_flush_all	= dummy_tlb_flush_all,
1303 	.tlb_flush_walk	= dummy_tlb_flush,
1304 	.tlb_add_page	= dummy_tlb_add_page,
1305 };
1306 
arm_lpae_dump_ops(struct io_pgtable_ops * ops)1307 static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
1308 {
1309 	struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
1310 	struct io_pgtable_cfg *cfg = &data->iop.cfg;
1311 
1312 	pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n",
1313 		cfg->pgsize_bitmap, cfg->ias);
1314 	pr_err("data: %d levels, 0x%zx pgd_size, %u pg_shift, %u bits_per_level, pgd @ %p\n",
1315 		ARM_LPAE_MAX_LEVELS - data->start_level, ARM_LPAE_PGD_SIZE(data),
1316 		ilog2(ARM_LPAE_GRANULE(data)), data->bits_per_level, data->pgd);
1317 }
1318 
1319 #define __FAIL(ops, i)	({						\
1320 		WARN(1, "selftest: test failed for fmt idx %d\n", (i));	\
1321 		arm_lpae_dump_ops(ops);					\
1322 		-EFAULT;						\
1323 })
1324 
arm_lpae_run_tests(struct io_pgtable_cfg * cfg)1325 static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg)
1326 {
1327 	static const enum io_pgtable_fmt fmts[] __initconst = {
1328 		ARM_64_LPAE_S1,
1329 		ARM_64_LPAE_S2,
1330 	};
1331 
1332 	int i, j;
1333 	unsigned long iova;
1334 	size_t size, mapped;
1335 	struct io_pgtable_ops *ops;
1336 
1337 	for (i = 0; i < ARRAY_SIZE(fmts); ++i) {
1338 		cfg_cookie = cfg;
1339 		ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg);
1340 		if (!ops) {
1341 			pr_err("selftest: failed to allocate io pgtable ops\n");
1342 			return -ENOMEM;
1343 		}
1344 
1345 		/*
1346 		 * Initial sanity checks.
1347 		 * Empty page tables shouldn't provide any translations.
1348 		 */
1349 		if (ops->iova_to_phys(ops, 42))
1350 			return __FAIL(ops, i);
1351 
1352 		if (ops->iova_to_phys(ops, SZ_1G + 42))
1353 			return __FAIL(ops, i);
1354 
1355 		if (ops->iova_to_phys(ops, SZ_2G + 42))
1356 			return __FAIL(ops, i);
1357 
1358 		/*
1359 		 * Distinct mappings of different granule sizes.
1360 		 */
1361 		iova = 0;
1362 		for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1363 			size = 1UL << j;
1364 
1365 			if (ops->map_pages(ops, iova, iova, size, 1,
1366 					   IOMMU_READ | IOMMU_WRITE |
1367 					   IOMMU_NOEXEC | IOMMU_CACHE,
1368 					   GFP_KERNEL, &mapped))
1369 				return __FAIL(ops, i);
1370 
1371 			/* Overlapping mappings */
1372 			if (!ops->map_pages(ops, iova, iova + size, size, 1,
1373 					    IOMMU_READ | IOMMU_NOEXEC,
1374 					    GFP_KERNEL, &mapped))
1375 				return __FAIL(ops, i);
1376 
1377 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1378 				return __FAIL(ops, i);
1379 
1380 			iova += SZ_1G;
1381 		}
1382 
1383 		/* Full unmap */
1384 		iova = 0;
1385 		for_each_set_bit(j, &cfg->pgsize_bitmap, BITS_PER_LONG) {
1386 			size = 1UL << j;
1387 
1388 			if (ops->unmap_pages(ops, iova, size, 1, NULL) != size)
1389 				return __FAIL(ops, i);
1390 
1391 			if (ops->iova_to_phys(ops, iova + 42))
1392 				return __FAIL(ops, i);
1393 
1394 			/* Remap full block */
1395 			if (ops->map_pages(ops, iova, iova, size, 1,
1396 					   IOMMU_WRITE, GFP_KERNEL, &mapped))
1397 				return __FAIL(ops, i);
1398 
1399 			if (ops->iova_to_phys(ops, iova + 42) != (iova + 42))
1400 				return __FAIL(ops, i);
1401 
1402 			iova += SZ_1G;
1403 		}
1404 
1405 		/*
1406 		 * Map/unmap the last largest supported page of the IAS, this can
1407 		 * trigger corner cases in the concatednated page tables.
1408 		 */
1409 		mapped = 0;
1410 		size = 1UL << __fls(cfg->pgsize_bitmap);
1411 		iova = (1UL << cfg->ias) - size;
1412 		if (ops->map_pages(ops, iova, iova, size, 1,
1413 				   IOMMU_READ | IOMMU_WRITE |
1414 				   IOMMU_NOEXEC | IOMMU_CACHE,
1415 				   GFP_KERNEL, &mapped))
1416 			return __FAIL(ops, i);
1417 		if (mapped != size)
1418 			return __FAIL(ops, i);
1419 		if (ops->unmap_pages(ops, iova, size, 1, NULL) != size)
1420 			return __FAIL(ops, i);
1421 
1422 		free_io_pgtable_ops(ops);
1423 	}
1424 
1425 	return 0;
1426 }
1427 
arm_lpae_do_selftests(void)1428 static int __init arm_lpae_do_selftests(void)
1429 {
1430 	static const unsigned long pgsize[] __initconst = {
1431 		SZ_4K | SZ_2M | SZ_1G,
1432 		SZ_16K | SZ_32M,
1433 		SZ_64K | SZ_512M,
1434 	};
1435 
1436 	static const unsigned int address_size[] __initconst = {
1437 		32, 36, 40, 42, 44, 48,
1438 	};
1439 
1440 	int i, j, k, pass = 0, fail = 0;
1441 	struct faux_device *dev;
1442 	struct io_pgtable_cfg cfg = {
1443 		.tlb = &dummy_tlb_ops,
1444 		.coherent_walk = true,
1445 		.quirks = IO_PGTABLE_QUIRK_NO_WARN,
1446 	};
1447 
1448 	dev = faux_device_create("io-pgtable-test", NULL, 0);
1449 	if (!dev)
1450 		return -ENOMEM;
1451 
1452 	cfg.iommu_dev = &dev->dev;
1453 
1454 	for (i = 0; i < ARRAY_SIZE(pgsize); ++i) {
1455 		for (j = 0; j < ARRAY_SIZE(address_size); ++j) {
1456 			/* Don't use ias > oas as it is not valid for stage-2. */
1457 			for (k = 0; k <= j; ++k) {
1458 				cfg.pgsize_bitmap = pgsize[i];
1459 				cfg.ias = address_size[k];
1460 				cfg.oas = address_size[j];
1461 				pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u OAS %u\n",
1462 					pgsize[i], cfg.ias, cfg.oas);
1463 				if (arm_lpae_run_tests(&cfg))
1464 					fail++;
1465 				else
1466 					pass++;
1467 			}
1468 		}
1469 	}
1470 
1471 	pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail);
1472 	faux_device_destroy(dev);
1473 
1474 	return fail ? -EFAULT : 0;
1475 }
1476 subsys_initcall(arm_lpae_do_selftests);
1477 #endif
1478