xref: /linux/drivers/iommu/arm/arm-smmu-v3/arm-smmu-v3.h (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * IOMMU API for ARM architected SMMUv3 implementations.
4  *
5  * Copyright (C) 2015 ARM Limited
6  */
7 
8 #ifndef _ARM_SMMU_V3_H
9 #define _ARM_SMMU_V3_H
10 
11 #include <linux/bitfield.h>
12 #include <linux/iommu.h>
13 #include <linux/kernel.h>
14 #include <linux/mmzone.h>
15 #include <linux/sizes.h>
16 
17 struct arm_smmu_device;
18 
19 /* MMIO registers */
20 #define ARM_SMMU_IDR0			0x0
21 #define IDR0_ST_LVL			GENMASK(28, 27)
22 #define IDR0_ST_LVL_2LVL		1
23 #define IDR0_STALL_MODEL		GENMASK(25, 24)
24 #define IDR0_STALL_MODEL_STALL		0
25 #define IDR0_STALL_MODEL_FORCE		2
26 #define IDR0_TTENDIAN			GENMASK(22, 21)
27 #define IDR0_TTENDIAN_MIXED		0
28 #define IDR0_TTENDIAN_LE		2
29 #define IDR0_TTENDIAN_BE		3
30 #define IDR0_CD2L			(1 << 19)
31 #define IDR0_VMID16			(1 << 18)
32 #define IDR0_PRI			(1 << 16)
33 #define IDR0_SEV			(1 << 14)
34 #define IDR0_MSI			(1 << 13)
35 #define IDR0_ASID16			(1 << 12)
36 #define IDR0_ATS			(1 << 10)
37 #define IDR0_HYP			(1 << 9)
38 #define IDR0_HTTU			GENMASK(7, 6)
39 #define IDR0_HTTU_ACCESS		1
40 #define IDR0_HTTU_ACCESS_DIRTY		2
41 #define IDR0_COHACC			(1 << 4)
42 #define IDR0_TTF			GENMASK(3, 2)
43 #define IDR0_TTF_AARCH64		2
44 #define IDR0_TTF_AARCH32_64		3
45 #define IDR0_S1P			(1 << 1)
46 #define IDR0_S2P			(1 << 0)
47 
48 #define ARM_SMMU_IDR1			0x4
49 #define IDR1_TABLES_PRESET		(1 << 30)
50 #define IDR1_QUEUES_PRESET		(1 << 29)
51 #define IDR1_REL			(1 << 28)
52 #define IDR1_ATTR_TYPES_OVR		(1 << 27)
53 #define IDR1_CMDQS			GENMASK(25, 21)
54 #define IDR1_EVTQS			GENMASK(20, 16)
55 #define IDR1_PRIQS			GENMASK(15, 11)
56 #define IDR1_SSIDSIZE			GENMASK(10, 6)
57 #define IDR1_SIDSIZE			GENMASK(5, 0)
58 
59 #define ARM_SMMU_IDR3			0xc
60 #define IDR3_RIL			(1 << 10)
61 
62 #define ARM_SMMU_IDR5			0x14
63 #define IDR5_STALL_MAX			GENMASK(31, 16)
64 #define IDR5_GRAN64K			(1 << 6)
65 #define IDR5_GRAN16K			(1 << 5)
66 #define IDR5_GRAN4K			(1 << 4)
67 #define IDR5_OAS			GENMASK(2, 0)
68 #define IDR5_OAS_32_BIT			0
69 #define IDR5_OAS_36_BIT			1
70 #define IDR5_OAS_40_BIT			2
71 #define IDR5_OAS_42_BIT			3
72 #define IDR5_OAS_44_BIT			4
73 #define IDR5_OAS_48_BIT			5
74 #define IDR5_OAS_52_BIT			6
75 #define IDR5_VAX			GENMASK(11, 10)
76 #define IDR5_VAX_52_BIT			1
77 
78 #define ARM_SMMU_IIDR			0x18
79 #define IIDR_PRODUCTID			GENMASK(31, 20)
80 #define IIDR_VARIANT			GENMASK(19, 16)
81 #define IIDR_REVISION			GENMASK(15, 12)
82 #define IIDR_IMPLEMENTER		GENMASK(11, 0)
83 
84 #define ARM_SMMU_CR0			0x20
85 #define CR0_ATSCHK			(1 << 4)
86 #define CR0_CMDQEN			(1 << 3)
87 #define CR0_EVTQEN			(1 << 2)
88 #define CR0_PRIQEN			(1 << 1)
89 #define CR0_SMMUEN			(1 << 0)
90 
91 #define ARM_SMMU_CR0ACK			0x24
92 
93 #define ARM_SMMU_CR1			0x28
94 #define CR1_TABLE_SH			GENMASK(11, 10)
95 #define CR1_TABLE_OC			GENMASK(9, 8)
96 #define CR1_TABLE_IC			GENMASK(7, 6)
97 #define CR1_QUEUE_SH			GENMASK(5, 4)
98 #define CR1_QUEUE_OC			GENMASK(3, 2)
99 #define CR1_QUEUE_IC			GENMASK(1, 0)
100 /* CR1 cacheability fields don't quite follow the usual TCR-style encoding */
101 #define CR1_CACHE_NC			0
102 #define CR1_CACHE_WB			1
103 #define CR1_CACHE_WT			2
104 
105 #define ARM_SMMU_CR2			0x2c
106 #define CR2_PTM				(1 << 2)
107 #define CR2_RECINVSID			(1 << 1)
108 #define CR2_E2H				(1 << 0)
109 
110 #define ARM_SMMU_GBPA			0x44
111 #define GBPA_UPDATE			(1 << 31)
112 #define GBPA_ABORT			(1 << 20)
113 
114 #define ARM_SMMU_IRQ_CTRL		0x50
115 #define IRQ_CTRL_EVTQ_IRQEN		(1 << 2)
116 #define IRQ_CTRL_PRIQ_IRQEN		(1 << 1)
117 #define IRQ_CTRL_GERROR_IRQEN		(1 << 0)
118 
119 #define ARM_SMMU_IRQ_CTRLACK		0x54
120 
121 #define ARM_SMMU_GERROR			0x60
122 #define GERROR_SFM_ERR			(1 << 8)
123 #define GERROR_MSI_GERROR_ABT_ERR	(1 << 7)
124 #define GERROR_MSI_PRIQ_ABT_ERR		(1 << 6)
125 #define GERROR_MSI_EVTQ_ABT_ERR		(1 << 5)
126 #define GERROR_MSI_CMDQ_ABT_ERR		(1 << 4)
127 #define GERROR_PRIQ_ABT_ERR		(1 << 3)
128 #define GERROR_EVTQ_ABT_ERR		(1 << 2)
129 #define GERROR_CMDQ_ERR			(1 << 0)
130 #define GERROR_ERR_MASK			0x1fd
131 
132 #define ARM_SMMU_GERRORN		0x64
133 
134 #define ARM_SMMU_GERROR_IRQ_CFG0	0x68
135 #define ARM_SMMU_GERROR_IRQ_CFG1	0x70
136 #define ARM_SMMU_GERROR_IRQ_CFG2	0x74
137 
138 #define ARM_SMMU_STRTAB_BASE		0x80
139 #define STRTAB_BASE_RA			(1UL << 62)
140 #define STRTAB_BASE_ADDR_MASK		GENMASK_ULL(51, 6)
141 
142 #define ARM_SMMU_STRTAB_BASE_CFG	0x88
143 #define STRTAB_BASE_CFG_FMT		GENMASK(17, 16)
144 #define STRTAB_BASE_CFG_FMT_LINEAR	0
145 #define STRTAB_BASE_CFG_FMT_2LVL	1
146 #define STRTAB_BASE_CFG_SPLIT		GENMASK(10, 6)
147 #define STRTAB_BASE_CFG_LOG2SIZE	GENMASK(5, 0)
148 
149 #define ARM_SMMU_CMDQ_BASE		0x90
150 #define ARM_SMMU_CMDQ_PROD		0x98
151 #define ARM_SMMU_CMDQ_CONS		0x9c
152 
153 #define ARM_SMMU_EVTQ_BASE		0xa0
154 #define ARM_SMMU_EVTQ_PROD		0xa8
155 #define ARM_SMMU_EVTQ_CONS		0xac
156 #define ARM_SMMU_EVTQ_IRQ_CFG0		0xb0
157 #define ARM_SMMU_EVTQ_IRQ_CFG1		0xb8
158 #define ARM_SMMU_EVTQ_IRQ_CFG2		0xbc
159 
160 #define ARM_SMMU_PRIQ_BASE		0xc0
161 #define ARM_SMMU_PRIQ_PROD		0xc8
162 #define ARM_SMMU_PRIQ_CONS		0xcc
163 #define ARM_SMMU_PRIQ_IRQ_CFG0		0xd0
164 #define ARM_SMMU_PRIQ_IRQ_CFG1		0xd8
165 #define ARM_SMMU_PRIQ_IRQ_CFG2		0xdc
166 
167 #define ARM_SMMU_REG_SZ			0xe00
168 
169 /* Common MSI config fields */
170 #define MSI_CFG0_ADDR_MASK		GENMASK_ULL(51, 2)
171 #define MSI_CFG2_SH			GENMASK(5, 4)
172 #define MSI_CFG2_MEMATTR		GENMASK(3, 0)
173 
174 /* Common memory attribute values */
175 #define ARM_SMMU_SH_NSH			0
176 #define ARM_SMMU_SH_OSH			2
177 #define ARM_SMMU_SH_ISH			3
178 #define ARM_SMMU_MEMATTR_DEVICE_nGnRE	0x1
179 #define ARM_SMMU_MEMATTR_OIWB		0xf
180 
181 #define Q_IDX(llq, p)			((p) & ((1 << (llq)->max_n_shift) - 1))
182 #define Q_WRP(llq, p)			((p) & (1 << (llq)->max_n_shift))
183 #define Q_OVERFLOW_FLAG			(1U << 31)
184 #define Q_OVF(p)			((p) & Q_OVERFLOW_FLAG)
185 #define Q_ENT(q, p)			((q)->base +			\
186 					 Q_IDX(&((q)->llq), p) *	\
187 					 (q)->ent_dwords)
188 
189 #define Q_BASE_RWA			(1UL << 62)
190 #define Q_BASE_ADDR_MASK		GENMASK_ULL(51, 5)
191 #define Q_BASE_LOG2SIZE			GENMASK(4, 0)
192 
193 /* Ensure DMA allocations are naturally aligned */
194 #ifdef CONFIG_CMA_ALIGNMENT
195 #define Q_MAX_SZ_SHIFT			(PAGE_SHIFT + CONFIG_CMA_ALIGNMENT)
196 #else
197 #define Q_MAX_SZ_SHIFT			(PAGE_SHIFT + MAX_PAGE_ORDER)
198 #endif
199 
200 /*
201  * Stream table.
202  *
203  * Linear: Enough to cover 1 << IDR1.SIDSIZE entries
204  * 2lvl: 128k L1 entries,
205  *       256 lazy entries per table (each table covers a PCI bus)
206  */
207 #define STRTAB_SPLIT			8
208 
209 #define STRTAB_L1_DESC_SPAN		GENMASK_ULL(4, 0)
210 #define STRTAB_L1_DESC_L2PTR_MASK	GENMASK_ULL(51, 6)
211 
212 #define STRTAB_STE_DWORDS		8
213 
214 struct arm_smmu_ste {
215 	__le64 data[STRTAB_STE_DWORDS];
216 };
217 
218 #define STRTAB_NUM_L2_STES		(1 << STRTAB_SPLIT)
219 struct arm_smmu_strtab_l2 {
220 	struct arm_smmu_ste stes[STRTAB_NUM_L2_STES];
221 };
222 
223 struct arm_smmu_strtab_l1 {
224 	__le64 l2ptr;
225 };
226 #define STRTAB_MAX_L1_ENTRIES		(1 << 17)
227 
228 static inline u32 arm_smmu_strtab_l1_idx(u32 sid)
229 {
230 	return sid / STRTAB_NUM_L2_STES;
231 }
232 
233 static inline u32 arm_smmu_strtab_l2_idx(u32 sid)
234 {
235 	return sid % STRTAB_NUM_L2_STES;
236 }
237 
238 #define STRTAB_STE_0_V			(1UL << 0)
239 #define STRTAB_STE_0_CFG		GENMASK_ULL(3, 1)
240 #define STRTAB_STE_0_CFG_ABORT		0
241 #define STRTAB_STE_0_CFG_BYPASS		4
242 #define STRTAB_STE_0_CFG_S1_TRANS	5
243 #define STRTAB_STE_0_CFG_S2_TRANS	6
244 
245 #define STRTAB_STE_0_S1FMT		GENMASK_ULL(5, 4)
246 #define STRTAB_STE_0_S1FMT_LINEAR	0
247 #define STRTAB_STE_0_S1FMT_64K_L2	2
248 #define STRTAB_STE_0_S1CTXPTR_MASK	GENMASK_ULL(51, 6)
249 #define STRTAB_STE_0_S1CDMAX		GENMASK_ULL(63, 59)
250 
251 #define STRTAB_STE_1_S1DSS		GENMASK_ULL(1, 0)
252 #define STRTAB_STE_1_S1DSS_TERMINATE	0x0
253 #define STRTAB_STE_1_S1DSS_BYPASS	0x1
254 #define STRTAB_STE_1_S1DSS_SSID0	0x2
255 
256 #define STRTAB_STE_1_S1C_CACHE_NC	0UL
257 #define STRTAB_STE_1_S1C_CACHE_WBRA	1UL
258 #define STRTAB_STE_1_S1C_CACHE_WT	2UL
259 #define STRTAB_STE_1_S1C_CACHE_WB	3UL
260 #define STRTAB_STE_1_S1CIR		GENMASK_ULL(3, 2)
261 #define STRTAB_STE_1_S1COR		GENMASK_ULL(5, 4)
262 #define STRTAB_STE_1_S1CSH		GENMASK_ULL(7, 6)
263 
264 #define STRTAB_STE_1_S1STALLD		(1UL << 27)
265 
266 #define STRTAB_STE_1_EATS		GENMASK_ULL(29, 28)
267 #define STRTAB_STE_1_EATS_ABT		0UL
268 #define STRTAB_STE_1_EATS_TRANS		1UL
269 #define STRTAB_STE_1_EATS_S1CHK		2UL
270 
271 #define STRTAB_STE_1_STRW		GENMASK_ULL(31, 30)
272 #define STRTAB_STE_1_STRW_NSEL1		0UL
273 #define STRTAB_STE_1_STRW_EL2		2UL
274 
275 #define STRTAB_STE_1_SHCFG		GENMASK_ULL(45, 44)
276 #define STRTAB_STE_1_SHCFG_INCOMING	1UL
277 
278 #define STRTAB_STE_2_S2VMID		GENMASK_ULL(15, 0)
279 #define STRTAB_STE_2_VTCR		GENMASK_ULL(50, 32)
280 #define STRTAB_STE_2_VTCR_S2T0SZ	GENMASK_ULL(5, 0)
281 #define STRTAB_STE_2_VTCR_S2SL0		GENMASK_ULL(7, 6)
282 #define STRTAB_STE_2_VTCR_S2IR0		GENMASK_ULL(9, 8)
283 #define STRTAB_STE_2_VTCR_S2OR0		GENMASK_ULL(11, 10)
284 #define STRTAB_STE_2_VTCR_S2SH0		GENMASK_ULL(13, 12)
285 #define STRTAB_STE_2_VTCR_S2TG		GENMASK_ULL(15, 14)
286 #define STRTAB_STE_2_VTCR_S2PS		GENMASK_ULL(18, 16)
287 #define STRTAB_STE_2_S2AA64		(1UL << 51)
288 #define STRTAB_STE_2_S2ENDI		(1UL << 52)
289 #define STRTAB_STE_2_S2PTW		(1UL << 54)
290 #define STRTAB_STE_2_S2S		(1UL << 57)
291 #define STRTAB_STE_2_S2R		(1UL << 58)
292 
293 #define STRTAB_STE_3_S2TTB_MASK		GENMASK_ULL(51, 4)
294 
295 /*
296  * Context descriptors.
297  *
298  * Linear: when less than 1024 SSIDs are supported
299  * 2lvl: at most 1024 L1 entries,
300  *       1024 lazy entries per table.
301  */
302 #define CTXDESC_L2_ENTRIES		1024
303 
304 #define CTXDESC_L1_DESC_V		(1UL << 0)
305 #define CTXDESC_L1_DESC_L2PTR_MASK	GENMASK_ULL(51, 12)
306 
307 #define CTXDESC_CD_DWORDS		8
308 
309 struct arm_smmu_cd {
310 	__le64 data[CTXDESC_CD_DWORDS];
311 };
312 
313 struct arm_smmu_cdtab_l2 {
314 	struct arm_smmu_cd cds[CTXDESC_L2_ENTRIES];
315 };
316 
317 struct arm_smmu_cdtab_l1 {
318 	__le64 l2ptr;
319 };
320 
321 static inline unsigned int arm_smmu_cdtab_l1_idx(unsigned int ssid)
322 {
323 	return ssid / CTXDESC_L2_ENTRIES;
324 }
325 
326 static inline unsigned int arm_smmu_cdtab_l2_idx(unsigned int ssid)
327 {
328 	return ssid % CTXDESC_L2_ENTRIES;
329 }
330 
331 #define CTXDESC_CD_0_TCR_T0SZ		GENMASK_ULL(5, 0)
332 #define CTXDESC_CD_0_TCR_TG0		GENMASK_ULL(7, 6)
333 #define CTXDESC_CD_0_TCR_IRGN0		GENMASK_ULL(9, 8)
334 #define CTXDESC_CD_0_TCR_ORGN0		GENMASK_ULL(11, 10)
335 #define CTXDESC_CD_0_TCR_SH0		GENMASK_ULL(13, 12)
336 #define CTXDESC_CD_0_TCR_EPD0		(1ULL << 14)
337 #define CTXDESC_CD_0_TCR_EPD1		(1ULL << 30)
338 
339 #define CTXDESC_CD_0_ENDI		(1UL << 15)
340 #define CTXDESC_CD_0_V			(1UL << 31)
341 
342 #define CTXDESC_CD_0_TCR_IPS		GENMASK_ULL(34, 32)
343 #define CTXDESC_CD_0_TCR_TBI0		(1ULL << 38)
344 
345 #define CTXDESC_CD_0_TCR_HA            (1UL << 43)
346 #define CTXDESC_CD_0_TCR_HD            (1UL << 42)
347 
348 #define CTXDESC_CD_0_AA64		(1UL << 41)
349 #define CTXDESC_CD_0_S			(1UL << 44)
350 #define CTXDESC_CD_0_R			(1UL << 45)
351 #define CTXDESC_CD_0_A			(1UL << 46)
352 #define CTXDESC_CD_0_ASET		(1UL << 47)
353 #define CTXDESC_CD_0_ASID		GENMASK_ULL(63, 48)
354 
355 #define CTXDESC_CD_1_TTB0_MASK		GENMASK_ULL(51, 4)
356 
357 /*
358  * When the SMMU only supports linear context descriptor tables, pick a
359  * reasonable size limit (64kB).
360  */
361 #define CTXDESC_LINEAR_CDMAX		ilog2(SZ_64K / sizeof(struct arm_smmu_cd))
362 
363 /* Command queue */
364 #define CMDQ_ENT_SZ_SHIFT		4
365 #define CMDQ_ENT_DWORDS			((1 << CMDQ_ENT_SZ_SHIFT) >> 3)
366 #define CMDQ_MAX_SZ_SHIFT		(Q_MAX_SZ_SHIFT - CMDQ_ENT_SZ_SHIFT)
367 
368 #define CMDQ_CONS_ERR			GENMASK(30, 24)
369 #define CMDQ_ERR_CERROR_NONE_IDX	0
370 #define CMDQ_ERR_CERROR_ILL_IDX		1
371 #define CMDQ_ERR_CERROR_ABT_IDX		2
372 #define CMDQ_ERR_CERROR_ATC_INV_IDX	3
373 
374 #define CMDQ_PROD_OWNED_FLAG		Q_OVERFLOW_FLAG
375 
376 /*
377  * This is used to size the command queue and therefore must be at least
378  * BITS_PER_LONG so that the valid_map works correctly (it relies on the
379  * total number of queue entries being a multiple of BITS_PER_LONG).
380  */
381 #define CMDQ_BATCH_ENTRIES		BITS_PER_LONG
382 
383 #define CMDQ_0_OP			GENMASK_ULL(7, 0)
384 #define CMDQ_0_SSV			(1UL << 11)
385 
386 #define CMDQ_PREFETCH_0_SID		GENMASK_ULL(63, 32)
387 #define CMDQ_PREFETCH_1_SIZE		GENMASK_ULL(4, 0)
388 #define CMDQ_PREFETCH_1_ADDR_MASK	GENMASK_ULL(63, 12)
389 
390 #define CMDQ_CFGI_0_SSID		GENMASK_ULL(31, 12)
391 #define CMDQ_CFGI_0_SID			GENMASK_ULL(63, 32)
392 #define CMDQ_CFGI_1_LEAF		(1UL << 0)
393 #define CMDQ_CFGI_1_RANGE		GENMASK_ULL(4, 0)
394 
395 #define CMDQ_TLBI_0_NUM			GENMASK_ULL(16, 12)
396 #define CMDQ_TLBI_RANGE_NUM_MAX		31
397 #define CMDQ_TLBI_0_SCALE		GENMASK_ULL(24, 20)
398 #define CMDQ_TLBI_0_VMID		GENMASK_ULL(47, 32)
399 #define CMDQ_TLBI_0_ASID		GENMASK_ULL(63, 48)
400 #define CMDQ_TLBI_1_LEAF		(1UL << 0)
401 #define CMDQ_TLBI_1_TTL			GENMASK_ULL(9, 8)
402 #define CMDQ_TLBI_1_TG			GENMASK_ULL(11, 10)
403 #define CMDQ_TLBI_1_VA_MASK		GENMASK_ULL(63, 12)
404 #define CMDQ_TLBI_1_IPA_MASK		GENMASK_ULL(51, 12)
405 
406 #define CMDQ_ATC_0_SSID			GENMASK_ULL(31, 12)
407 #define CMDQ_ATC_0_SID			GENMASK_ULL(63, 32)
408 #define CMDQ_ATC_0_GLOBAL		(1UL << 9)
409 #define CMDQ_ATC_1_SIZE			GENMASK_ULL(5, 0)
410 #define CMDQ_ATC_1_ADDR_MASK		GENMASK_ULL(63, 12)
411 
412 #define CMDQ_PRI_0_SSID			GENMASK_ULL(31, 12)
413 #define CMDQ_PRI_0_SID			GENMASK_ULL(63, 32)
414 #define CMDQ_PRI_1_GRPID		GENMASK_ULL(8, 0)
415 #define CMDQ_PRI_1_RESP			GENMASK_ULL(13, 12)
416 
417 #define CMDQ_RESUME_0_RESP_TERM		0UL
418 #define CMDQ_RESUME_0_RESP_RETRY	1UL
419 #define CMDQ_RESUME_0_RESP_ABORT	2UL
420 #define CMDQ_RESUME_0_RESP		GENMASK_ULL(13, 12)
421 #define CMDQ_RESUME_0_SID		GENMASK_ULL(63, 32)
422 #define CMDQ_RESUME_1_STAG		GENMASK_ULL(15, 0)
423 
424 #define CMDQ_SYNC_0_CS			GENMASK_ULL(13, 12)
425 #define CMDQ_SYNC_0_CS_NONE		0
426 #define CMDQ_SYNC_0_CS_IRQ		1
427 #define CMDQ_SYNC_0_CS_SEV		2
428 #define CMDQ_SYNC_0_MSH			GENMASK_ULL(23, 22)
429 #define CMDQ_SYNC_0_MSIATTR		GENMASK_ULL(27, 24)
430 #define CMDQ_SYNC_0_MSIDATA		GENMASK_ULL(63, 32)
431 #define CMDQ_SYNC_1_MSIADDR_MASK	GENMASK_ULL(51, 2)
432 
433 /* Event queue */
434 #define EVTQ_ENT_SZ_SHIFT		5
435 #define EVTQ_ENT_DWORDS			((1 << EVTQ_ENT_SZ_SHIFT) >> 3)
436 #define EVTQ_MAX_SZ_SHIFT		(Q_MAX_SZ_SHIFT - EVTQ_ENT_SZ_SHIFT)
437 
438 #define EVTQ_0_ID			GENMASK_ULL(7, 0)
439 
440 #define EVT_ID_TRANSLATION_FAULT	0x10
441 #define EVT_ID_ADDR_SIZE_FAULT		0x11
442 #define EVT_ID_ACCESS_FAULT		0x12
443 #define EVT_ID_PERMISSION_FAULT		0x13
444 
445 #define EVTQ_0_SSV			(1UL << 11)
446 #define EVTQ_0_SSID			GENMASK_ULL(31, 12)
447 #define EVTQ_0_SID			GENMASK_ULL(63, 32)
448 #define EVTQ_1_STAG			GENMASK_ULL(15, 0)
449 #define EVTQ_1_STALL			(1UL << 31)
450 #define EVTQ_1_PnU			(1UL << 33)
451 #define EVTQ_1_InD			(1UL << 34)
452 #define EVTQ_1_RnW			(1UL << 35)
453 #define EVTQ_1_S2			(1UL << 39)
454 #define EVTQ_1_CLASS			GENMASK_ULL(41, 40)
455 #define EVTQ_1_TT_READ			(1UL << 44)
456 #define EVTQ_2_ADDR			GENMASK_ULL(63, 0)
457 #define EVTQ_3_IPA			GENMASK_ULL(51, 12)
458 
459 /* PRI queue */
460 #define PRIQ_ENT_SZ_SHIFT		4
461 #define PRIQ_ENT_DWORDS			((1 << PRIQ_ENT_SZ_SHIFT) >> 3)
462 #define PRIQ_MAX_SZ_SHIFT		(Q_MAX_SZ_SHIFT - PRIQ_ENT_SZ_SHIFT)
463 
464 #define PRIQ_0_SID			GENMASK_ULL(31, 0)
465 #define PRIQ_0_SSID			GENMASK_ULL(51, 32)
466 #define PRIQ_0_PERM_PRIV		(1UL << 58)
467 #define PRIQ_0_PERM_EXEC		(1UL << 59)
468 #define PRIQ_0_PERM_READ		(1UL << 60)
469 #define PRIQ_0_PERM_WRITE		(1UL << 61)
470 #define PRIQ_0_PRG_LAST			(1UL << 62)
471 #define PRIQ_0_SSID_V			(1UL << 63)
472 
473 #define PRIQ_1_PRG_IDX			GENMASK_ULL(8, 0)
474 #define PRIQ_1_ADDR_MASK		GENMASK_ULL(63, 12)
475 
476 /* High-level queue structures */
477 #define ARM_SMMU_POLL_TIMEOUT_US	1000000 /* 1s! */
478 #define ARM_SMMU_POLL_SPIN_COUNT	10
479 
480 #define MSI_IOVA_BASE			0x8000000
481 #define MSI_IOVA_LENGTH			0x100000
482 
483 enum pri_resp {
484 	PRI_RESP_DENY = 0,
485 	PRI_RESP_FAIL = 1,
486 	PRI_RESP_SUCC = 2,
487 };
488 
489 struct arm_smmu_cmdq_ent {
490 	/* Common fields */
491 	u8				opcode;
492 	bool				substream_valid;
493 
494 	/* Command-specific fields */
495 	union {
496 		#define CMDQ_OP_PREFETCH_CFG	0x1
497 		struct {
498 			u32			sid;
499 		} prefetch;
500 
501 		#define CMDQ_OP_CFGI_STE	0x3
502 		#define CMDQ_OP_CFGI_ALL	0x4
503 		#define CMDQ_OP_CFGI_CD		0x5
504 		#define CMDQ_OP_CFGI_CD_ALL	0x6
505 		struct {
506 			u32			sid;
507 			u32			ssid;
508 			union {
509 				bool		leaf;
510 				u8		span;
511 			};
512 		} cfgi;
513 
514 		#define CMDQ_OP_TLBI_NH_ASID	0x11
515 		#define CMDQ_OP_TLBI_NH_VA	0x12
516 		#define CMDQ_OP_TLBI_EL2_ALL	0x20
517 		#define CMDQ_OP_TLBI_EL2_ASID	0x21
518 		#define CMDQ_OP_TLBI_EL2_VA	0x22
519 		#define CMDQ_OP_TLBI_S12_VMALL	0x28
520 		#define CMDQ_OP_TLBI_S2_IPA	0x2a
521 		#define CMDQ_OP_TLBI_NSNH_ALL	0x30
522 		struct {
523 			u8			num;
524 			u8			scale;
525 			u16			asid;
526 			u16			vmid;
527 			bool			leaf;
528 			u8			ttl;
529 			u8			tg;
530 			u64			addr;
531 		} tlbi;
532 
533 		#define CMDQ_OP_ATC_INV		0x40
534 		#define ATC_INV_SIZE_ALL	52
535 		struct {
536 			u32			sid;
537 			u32			ssid;
538 			u64			addr;
539 			u8			size;
540 			bool			global;
541 		} atc;
542 
543 		#define CMDQ_OP_PRI_RESP	0x41
544 		struct {
545 			u32			sid;
546 			u32			ssid;
547 			u16			grpid;
548 			enum pri_resp		resp;
549 		} pri;
550 
551 		#define CMDQ_OP_RESUME		0x44
552 		struct {
553 			u32			sid;
554 			u16			stag;
555 			u8			resp;
556 		} resume;
557 
558 		#define CMDQ_OP_CMD_SYNC	0x46
559 		struct {
560 			u64			msiaddr;
561 		} sync;
562 	};
563 };
564 
565 struct arm_smmu_ll_queue {
566 	union {
567 		u64			val;
568 		struct {
569 			u32		prod;
570 			u32		cons;
571 		};
572 		struct {
573 			atomic_t	prod;
574 			atomic_t	cons;
575 		} atomic;
576 		u8			__pad[SMP_CACHE_BYTES];
577 	} ____cacheline_aligned_in_smp;
578 	u32				max_n_shift;
579 };
580 
581 struct arm_smmu_queue {
582 	struct arm_smmu_ll_queue	llq;
583 	int				irq; /* Wired interrupt */
584 
585 	__le64				*base;
586 	dma_addr_t			base_dma;
587 	u64				q_base;
588 
589 	size_t				ent_dwords;
590 
591 	u32 __iomem			*prod_reg;
592 	u32 __iomem			*cons_reg;
593 };
594 
595 struct arm_smmu_queue_poll {
596 	ktime_t				timeout;
597 	unsigned int			delay;
598 	unsigned int			spin_cnt;
599 	bool				wfe;
600 };
601 
602 struct arm_smmu_cmdq {
603 	struct arm_smmu_queue		q;
604 	atomic_long_t			*valid_map;
605 	atomic_t			owner_prod;
606 	atomic_t			lock;
607 	bool				(*supports_cmd)(struct arm_smmu_cmdq_ent *ent);
608 };
609 
610 static inline bool arm_smmu_cmdq_supports_cmd(struct arm_smmu_cmdq *cmdq,
611 					      struct arm_smmu_cmdq_ent *ent)
612 {
613 	return cmdq->supports_cmd ? cmdq->supports_cmd(ent) : true;
614 }
615 
616 struct arm_smmu_cmdq_batch {
617 	u64				cmds[CMDQ_BATCH_ENTRIES * CMDQ_ENT_DWORDS];
618 	struct arm_smmu_cmdq		*cmdq;
619 	int				num;
620 };
621 
622 struct arm_smmu_evtq {
623 	struct arm_smmu_queue		q;
624 	struct iopf_queue		*iopf;
625 	u32				max_stalls;
626 };
627 
628 struct arm_smmu_priq {
629 	struct arm_smmu_queue		q;
630 };
631 
632 /* High-level stream table and context descriptor structures */
633 struct arm_smmu_ctx_desc {
634 	u16				asid;
635 };
636 
637 struct arm_smmu_ctx_desc_cfg {
638 	union {
639 		struct {
640 			struct arm_smmu_cd *table;
641 			unsigned int num_ents;
642 		} linear;
643 		struct {
644 			struct arm_smmu_cdtab_l1 *l1tab;
645 			struct arm_smmu_cdtab_l2 **l2ptrs;
646 			unsigned int num_l1_ents;
647 		} l2;
648 	};
649 	dma_addr_t			cdtab_dma;
650 	unsigned int			used_ssids;
651 	u8				in_ste;
652 	u8				s1fmt;
653 	/* log2 of the maximum number of CDs supported by this table */
654 	u8				s1cdmax;
655 };
656 
657 static inline bool
658 arm_smmu_cdtab_allocated(struct arm_smmu_ctx_desc_cfg *cfg)
659 {
660 	return cfg->linear.table || cfg->l2.l1tab;
661 }
662 
663 /* True if the cd table has SSIDS > 0 in use. */
664 static inline bool arm_smmu_ssids_in_use(struct arm_smmu_ctx_desc_cfg *cd_table)
665 {
666 	return cd_table->used_ssids;
667 }
668 
669 struct arm_smmu_s2_cfg {
670 	u16				vmid;
671 };
672 
673 struct arm_smmu_strtab_cfg {
674 	union {
675 		struct {
676 			struct arm_smmu_ste *table;
677 			dma_addr_t ste_dma;
678 			unsigned int num_ents;
679 		} linear;
680 		struct {
681 			struct arm_smmu_strtab_l1 *l1tab;
682 			struct arm_smmu_strtab_l2 **l2ptrs;
683 			dma_addr_t l1_dma;
684 			unsigned int num_l1_ents;
685 		} l2;
686 	};
687 };
688 
689 struct arm_smmu_impl_ops {
690 	int (*device_reset)(struct arm_smmu_device *smmu);
691 	void (*device_remove)(struct arm_smmu_device *smmu);
692 	int (*init_structures)(struct arm_smmu_device *smmu);
693 	struct arm_smmu_cmdq *(*get_secondary_cmdq)(
694 		struct arm_smmu_device *smmu, struct arm_smmu_cmdq_ent *ent);
695 };
696 
697 /* An SMMUv3 instance */
698 struct arm_smmu_device {
699 	struct device			*dev;
700 	struct device			*impl_dev;
701 	const struct arm_smmu_impl_ops	*impl_ops;
702 
703 	void __iomem			*base;
704 	void __iomem			*page1;
705 
706 #define ARM_SMMU_FEAT_2_LVL_STRTAB	(1 << 0)
707 #define ARM_SMMU_FEAT_2_LVL_CDTAB	(1 << 1)
708 #define ARM_SMMU_FEAT_TT_LE		(1 << 2)
709 #define ARM_SMMU_FEAT_TT_BE		(1 << 3)
710 #define ARM_SMMU_FEAT_PRI		(1 << 4)
711 #define ARM_SMMU_FEAT_ATS		(1 << 5)
712 #define ARM_SMMU_FEAT_SEV		(1 << 6)
713 #define ARM_SMMU_FEAT_MSI		(1 << 7)
714 #define ARM_SMMU_FEAT_COHERENCY		(1 << 8)
715 #define ARM_SMMU_FEAT_TRANS_S1		(1 << 9)
716 #define ARM_SMMU_FEAT_TRANS_S2		(1 << 10)
717 #define ARM_SMMU_FEAT_STALLS		(1 << 11)
718 #define ARM_SMMU_FEAT_HYP		(1 << 12)
719 #define ARM_SMMU_FEAT_STALL_FORCE	(1 << 13)
720 #define ARM_SMMU_FEAT_VAX		(1 << 14)
721 #define ARM_SMMU_FEAT_RANGE_INV		(1 << 15)
722 #define ARM_SMMU_FEAT_BTM		(1 << 16)
723 #define ARM_SMMU_FEAT_SVA		(1 << 17)
724 #define ARM_SMMU_FEAT_E2H		(1 << 18)
725 #define ARM_SMMU_FEAT_NESTING		(1 << 19)
726 #define ARM_SMMU_FEAT_ATTR_TYPES_OVR	(1 << 20)
727 #define ARM_SMMU_FEAT_HA		(1 << 21)
728 #define ARM_SMMU_FEAT_HD		(1 << 22)
729 	u32				features;
730 
731 #define ARM_SMMU_OPT_SKIP_PREFETCH	(1 << 0)
732 #define ARM_SMMU_OPT_PAGE0_REGS_ONLY	(1 << 1)
733 #define ARM_SMMU_OPT_MSIPOLL		(1 << 2)
734 #define ARM_SMMU_OPT_CMDQ_FORCE_SYNC	(1 << 3)
735 #define ARM_SMMU_OPT_TEGRA241_CMDQV	(1 << 4)
736 	u32				options;
737 
738 	struct arm_smmu_cmdq		cmdq;
739 	struct arm_smmu_evtq		evtq;
740 	struct arm_smmu_priq		priq;
741 
742 	int				gerr_irq;
743 	int				combined_irq;
744 
745 	unsigned long			ias; /* IPA */
746 	unsigned long			oas; /* PA */
747 	unsigned long			pgsize_bitmap;
748 
749 #define ARM_SMMU_MAX_ASIDS		(1 << 16)
750 	unsigned int			asid_bits;
751 
752 #define ARM_SMMU_MAX_VMIDS		(1 << 16)
753 	unsigned int			vmid_bits;
754 	struct ida			vmid_map;
755 
756 	unsigned int			ssid_bits;
757 	unsigned int			sid_bits;
758 
759 	struct arm_smmu_strtab_cfg	strtab_cfg;
760 
761 	/* IOMMU core code handle */
762 	struct iommu_device		iommu;
763 
764 	struct rb_root			streams;
765 	struct mutex			streams_mutex;
766 };
767 
768 struct arm_smmu_stream {
769 	u32				id;
770 	struct arm_smmu_master		*master;
771 	struct rb_node			node;
772 };
773 
774 /* SMMU private data for each master */
775 struct arm_smmu_master {
776 	struct arm_smmu_device		*smmu;
777 	struct device			*dev;
778 	struct arm_smmu_stream		*streams;
779 	/* Locked by the iommu core using the group mutex */
780 	struct arm_smmu_ctx_desc_cfg	cd_table;
781 	unsigned int			num_streams;
782 	bool				ats_enabled : 1;
783 	bool				ste_ats_enabled : 1;
784 	bool				stall_enabled;
785 	bool				sva_enabled;
786 	bool				iopf_enabled;
787 	unsigned int			ssid_bits;
788 };
789 
790 /* SMMU private data for an IOMMU domain */
791 enum arm_smmu_domain_stage {
792 	ARM_SMMU_DOMAIN_S1 = 0,
793 	ARM_SMMU_DOMAIN_S2,
794 };
795 
796 struct arm_smmu_domain {
797 	struct arm_smmu_device		*smmu;
798 	struct mutex			init_mutex; /* Protects smmu pointer */
799 
800 	struct io_pgtable_ops		*pgtbl_ops;
801 	atomic_t			nr_ats_masters;
802 
803 	enum arm_smmu_domain_stage	stage;
804 	union {
805 		struct arm_smmu_ctx_desc	cd;
806 		struct arm_smmu_s2_cfg		s2_cfg;
807 	};
808 
809 	struct iommu_domain		domain;
810 
811 	/* List of struct arm_smmu_master_domain */
812 	struct list_head		devices;
813 	spinlock_t			devices_lock;
814 
815 	struct mmu_notifier		mmu_notifier;
816 };
817 
818 /* The following are exposed for testing purposes. */
819 struct arm_smmu_entry_writer_ops;
820 struct arm_smmu_entry_writer {
821 	const struct arm_smmu_entry_writer_ops *ops;
822 	struct arm_smmu_master *master;
823 };
824 
825 struct arm_smmu_entry_writer_ops {
826 	void (*get_used)(const __le64 *entry, __le64 *used);
827 	void (*sync)(struct arm_smmu_entry_writer *writer);
828 };
829 
830 #if IS_ENABLED(CONFIG_KUNIT)
831 void arm_smmu_get_ste_used(const __le64 *ent, __le64 *used_bits);
832 void arm_smmu_write_entry(struct arm_smmu_entry_writer *writer, __le64 *cur,
833 			  const __le64 *target);
834 void arm_smmu_get_cd_used(const __le64 *ent, __le64 *used_bits);
835 void arm_smmu_make_abort_ste(struct arm_smmu_ste *target);
836 void arm_smmu_make_bypass_ste(struct arm_smmu_device *smmu,
837 			      struct arm_smmu_ste *target);
838 void arm_smmu_make_cdtable_ste(struct arm_smmu_ste *target,
839 			       struct arm_smmu_master *master, bool ats_enabled,
840 			       unsigned int s1dss);
841 void arm_smmu_make_s2_domain_ste(struct arm_smmu_ste *target,
842 				 struct arm_smmu_master *master,
843 				 struct arm_smmu_domain *smmu_domain,
844 				 bool ats_enabled);
845 void arm_smmu_make_sva_cd(struct arm_smmu_cd *target,
846 			  struct arm_smmu_master *master, struct mm_struct *mm,
847 			  u16 asid);
848 #endif
849 
850 struct arm_smmu_master_domain {
851 	struct list_head devices_elm;
852 	struct arm_smmu_master *master;
853 	ioasid_t ssid;
854 };
855 
856 static inline struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom)
857 {
858 	return container_of(dom, struct arm_smmu_domain, domain);
859 }
860 
861 extern struct xarray arm_smmu_asid_xa;
862 extern struct mutex arm_smmu_asid_lock;
863 
864 struct arm_smmu_domain *arm_smmu_domain_alloc(void);
865 
866 void arm_smmu_clear_cd(struct arm_smmu_master *master, ioasid_t ssid);
867 struct arm_smmu_cd *arm_smmu_get_cd_ptr(struct arm_smmu_master *master,
868 					u32 ssid);
869 void arm_smmu_make_s1_cd(struct arm_smmu_cd *target,
870 			 struct arm_smmu_master *master,
871 			 struct arm_smmu_domain *smmu_domain);
872 void arm_smmu_write_cd_entry(struct arm_smmu_master *master, int ssid,
873 			     struct arm_smmu_cd *cdptr,
874 			     const struct arm_smmu_cd *target);
875 
876 int arm_smmu_set_pasid(struct arm_smmu_master *master,
877 		       struct arm_smmu_domain *smmu_domain, ioasid_t pasid,
878 		       struct arm_smmu_cd *cd);
879 
880 void arm_smmu_tlb_inv_asid(struct arm_smmu_device *smmu, u16 asid);
881 void arm_smmu_tlb_inv_range_asid(unsigned long iova, size_t size, int asid,
882 				 size_t granule, bool leaf,
883 				 struct arm_smmu_domain *smmu_domain);
884 int arm_smmu_atc_inv_domain(struct arm_smmu_domain *smmu_domain,
885 			    unsigned long iova, size_t size);
886 
887 void __arm_smmu_cmdq_skip_err(struct arm_smmu_device *smmu,
888 			      struct arm_smmu_cmdq *cmdq);
889 int arm_smmu_init_one_queue(struct arm_smmu_device *smmu,
890 			    struct arm_smmu_queue *q, void __iomem *page,
891 			    unsigned long prod_off, unsigned long cons_off,
892 			    size_t dwords, const char *name);
893 int arm_smmu_cmdq_init(struct arm_smmu_device *smmu,
894 		       struct arm_smmu_cmdq *cmdq);
895 
896 #ifdef CONFIG_ARM_SMMU_V3_SVA
897 bool arm_smmu_sva_supported(struct arm_smmu_device *smmu);
898 bool arm_smmu_master_sva_supported(struct arm_smmu_master *master);
899 bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master);
900 int arm_smmu_master_enable_sva(struct arm_smmu_master *master);
901 int arm_smmu_master_disable_sva(struct arm_smmu_master *master);
902 bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master);
903 void arm_smmu_sva_notifier_synchronize(void);
904 struct iommu_domain *arm_smmu_sva_domain_alloc(struct device *dev,
905 					       struct mm_struct *mm);
906 #else /* CONFIG_ARM_SMMU_V3_SVA */
907 static inline bool arm_smmu_sva_supported(struct arm_smmu_device *smmu)
908 {
909 	return false;
910 }
911 
912 static inline bool arm_smmu_master_sva_supported(struct arm_smmu_master *master)
913 {
914 	return false;
915 }
916 
917 static inline bool arm_smmu_master_sva_enabled(struct arm_smmu_master *master)
918 {
919 	return false;
920 }
921 
922 static inline int arm_smmu_master_enable_sva(struct arm_smmu_master *master)
923 {
924 	return -ENODEV;
925 }
926 
927 static inline int arm_smmu_master_disable_sva(struct arm_smmu_master *master)
928 {
929 	return -ENODEV;
930 }
931 
932 static inline bool arm_smmu_master_iopf_supported(struct arm_smmu_master *master)
933 {
934 	return false;
935 }
936 
937 static inline void arm_smmu_sva_notifier_synchronize(void) {}
938 
939 #define arm_smmu_sva_domain_alloc NULL
940 
941 #endif /* CONFIG_ARM_SMMU_V3_SVA */
942 
943 #ifdef CONFIG_TEGRA241_CMDQV
944 struct arm_smmu_device *tegra241_cmdqv_probe(struct arm_smmu_device *smmu);
945 #else /* CONFIG_TEGRA241_CMDQV */
946 static inline struct arm_smmu_device *
947 tegra241_cmdqv_probe(struct arm_smmu_device *smmu)
948 {
949 	return ERR_PTR(-ENODEV);
950 }
951 #endif /* CONFIG_TEGRA241_CMDQV */
952 #endif /* _ARM_SMMU_V3_H */
953