xref: /linux/drivers/iommu/intel/iommu.h (revision 336b4dae6dfecc9aa53a3a68c71b9c1c1d466388)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright © 2006-2015, Intel Corporation.
4  *
5  * Authors: Ashok Raj <ashok.raj@intel.com>
6  *          Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
7  *          David Woodhouse <David.Woodhouse@intel.com>
8  */
9 
10 #ifndef _INTEL_IOMMU_H_
11 #define _INTEL_IOMMU_H_
12 
13 #include <linux/types.h>
14 #include <linux/iova.h>
15 #include <linux/io.h>
16 #include <linux/idr.h>
17 #include <linux/mmu_notifier.h>
18 #include <linux/list.h>
19 #include <linux/iommu.h>
20 #include <linux/io-64-nonatomic-lo-hi.h>
21 #include <linux/dmar.h>
22 #include <linux/bitfield.h>
23 #include <linux/xarray.h>
24 #include <linux/perf_event.h>
25 #include <linux/pci.h>
26 
27 #include <asm/cacheflush.h>
28 #include <asm/iommu.h>
29 #include <uapi/linux/iommufd.h>
30 
31 /*
32  * VT-d hardware uses 4KiB page size regardless of host page size.
33  */
34 #define VTD_PAGE_SHIFT		(12)
35 #define VTD_PAGE_SIZE		(1UL << VTD_PAGE_SHIFT)
36 #define VTD_PAGE_MASK		(((u64)-1) << VTD_PAGE_SHIFT)
37 #define VTD_PAGE_ALIGN(addr)	(((addr) + VTD_PAGE_SIZE - 1) & VTD_PAGE_MASK)
38 
39 #define IOVA_PFN(addr)		((addr) >> PAGE_SHIFT)
40 
41 #define VTD_STRIDE_SHIFT        (9)
42 #define VTD_STRIDE_MASK         (((u64)-1) << VTD_STRIDE_SHIFT)
43 
44 #define DMA_PTE_READ		BIT_ULL(0)
45 #define DMA_PTE_WRITE		BIT_ULL(1)
46 #define DMA_PTE_LARGE_PAGE	BIT_ULL(7)
47 #define DMA_PTE_SNP		BIT_ULL(11)
48 
49 #define DMA_FL_PTE_PRESENT	BIT_ULL(0)
50 #define DMA_FL_PTE_US		BIT_ULL(2)
51 #define DMA_FL_PTE_ACCESS	BIT_ULL(5)
52 #define DMA_FL_PTE_DIRTY	BIT_ULL(6)
53 
54 #define DMA_SL_PTE_DIRTY_BIT	9
55 #define DMA_SL_PTE_DIRTY	BIT_ULL(DMA_SL_PTE_DIRTY_BIT)
56 
57 #define ADDR_WIDTH_5LEVEL	(57)
58 #define ADDR_WIDTH_4LEVEL	(48)
59 
60 #define CONTEXT_TT_MULTI_LEVEL	0
61 #define CONTEXT_TT_DEV_IOTLB	1
62 #define CONTEXT_TT_PASS_THROUGH 2
63 #define CONTEXT_PASIDE		BIT_ULL(3)
64 
65 /*
66  * Intel IOMMU register specification per version 1.0 public spec.
67  */
68 #define	DMAR_VER_REG	0x0	/* Arch version supported by this IOMMU */
69 #define	DMAR_CAP_REG	0x8	/* Hardware supported capabilities */
70 #define	DMAR_ECAP_REG	0x10	/* Extended capabilities supported */
71 #define	DMAR_GCMD_REG	0x18	/* Global command register */
72 #define	DMAR_GSTS_REG	0x1c	/* Global status register */
73 #define	DMAR_RTADDR_REG	0x20	/* Root entry table */
74 #define	DMAR_CCMD_REG	0x28	/* Context command reg */
75 #define	DMAR_FSTS_REG	0x34	/* Fault Status register */
76 #define	DMAR_FECTL_REG	0x38	/* Fault control register */
77 #define	DMAR_FEDATA_REG	0x3c	/* Fault event interrupt data register */
78 #define	DMAR_FEADDR_REG	0x40	/* Fault event interrupt addr register */
79 #define	DMAR_FEUADDR_REG 0x44	/* Upper address register */
80 #define	DMAR_AFLOG_REG	0x58	/* Advanced Fault control */
81 #define	DMAR_PMEN_REG	0x64	/* Enable Protected Memory Region */
82 #define	DMAR_PLMBASE_REG 0x68	/* PMRR Low addr */
83 #define	DMAR_PLMLIMIT_REG 0x6c	/* PMRR low limit */
84 #define	DMAR_PHMBASE_REG 0x70	/* pmrr high base addr */
85 #define	DMAR_PHMLIMIT_REG 0x78	/* pmrr high limit */
86 #define DMAR_IQH_REG	0x80	/* Invalidation queue head register */
87 #define DMAR_IQT_REG	0x88	/* Invalidation queue tail register */
88 #define DMAR_IQ_SHIFT	4	/* Invalidation queue head/tail shift */
89 #define DMAR_IQA_REG	0x90	/* Invalidation queue addr register */
90 #define DMAR_ICS_REG	0x9c	/* Invalidation complete status register */
91 #define DMAR_IQER_REG	0xb0	/* Invalidation queue error record register */
92 #define DMAR_IRTA_REG	0xb8    /* Interrupt remapping table addr register */
93 #define DMAR_PQH_REG	0xc0	/* Page request queue head register */
94 #define DMAR_PQT_REG	0xc8	/* Page request queue tail register */
95 #define DMAR_PQA_REG	0xd0	/* Page request queue address register */
96 #define DMAR_PRS_REG	0xdc	/* Page request status register */
97 #define DMAR_PECTL_REG	0xe0	/* Page request event control register */
98 #define	DMAR_PEDATA_REG	0xe4	/* Page request event interrupt data register */
99 #define	DMAR_PEADDR_REG	0xe8	/* Page request event interrupt addr register */
100 #define	DMAR_PEUADDR_REG 0xec	/* Page request event Upper address register */
101 #define DMAR_MTRRCAP_REG 0x100	/* MTRR capability register */
102 #define DMAR_MTRRDEF_REG 0x108	/* MTRR default type register */
103 #define DMAR_MTRR_FIX64K_00000_REG 0x120 /* MTRR Fixed range registers */
104 #define DMAR_MTRR_FIX16K_80000_REG 0x128
105 #define DMAR_MTRR_FIX16K_A0000_REG 0x130
106 #define DMAR_MTRR_FIX4K_C0000_REG 0x138
107 #define DMAR_MTRR_FIX4K_C8000_REG 0x140
108 #define DMAR_MTRR_FIX4K_D0000_REG 0x148
109 #define DMAR_MTRR_FIX4K_D8000_REG 0x150
110 #define DMAR_MTRR_FIX4K_E0000_REG 0x158
111 #define DMAR_MTRR_FIX4K_E8000_REG 0x160
112 #define DMAR_MTRR_FIX4K_F0000_REG 0x168
113 #define DMAR_MTRR_FIX4K_F8000_REG 0x170
114 #define DMAR_MTRR_PHYSBASE0_REG 0x180 /* MTRR Variable range registers */
115 #define DMAR_MTRR_PHYSMASK0_REG 0x188
116 #define DMAR_MTRR_PHYSBASE1_REG 0x190
117 #define DMAR_MTRR_PHYSMASK1_REG 0x198
118 #define DMAR_MTRR_PHYSBASE2_REG 0x1a0
119 #define DMAR_MTRR_PHYSMASK2_REG 0x1a8
120 #define DMAR_MTRR_PHYSBASE3_REG 0x1b0
121 #define DMAR_MTRR_PHYSMASK3_REG 0x1b8
122 #define DMAR_MTRR_PHYSBASE4_REG 0x1c0
123 #define DMAR_MTRR_PHYSMASK4_REG 0x1c8
124 #define DMAR_MTRR_PHYSBASE5_REG 0x1d0
125 #define DMAR_MTRR_PHYSMASK5_REG 0x1d8
126 #define DMAR_MTRR_PHYSBASE6_REG 0x1e0
127 #define DMAR_MTRR_PHYSMASK6_REG 0x1e8
128 #define DMAR_MTRR_PHYSBASE7_REG 0x1f0
129 #define DMAR_MTRR_PHYSMASK7_REG 0x1f8
130 #define DMAR_MTRR_PHYSBASE8_REG 0x200
131 #define DMAR_MTRR_PHYSMASK8_REG 0x208
132 #define DMAR_MTRR_PHYSBASE9_REG 0x210
133 #define DMAR_MTRR_PHYSMASK9_REG 0x218
134 #define DMAR_PERFCAP_REG	0x300
135 #define DMAR_PERFCFGOFF_REG	0x310
136 #define DMAR_PERFOVFOFF_REG	0x318
137 #define DMAR_PERFCNTROFF_REG	0x31c
138 #define DMAR_PERFINTRSTS_REG	0x324
139 #define DMAR_PERFINTRCTL_REG	0x328
140 #define DMAR_PERFEVNTCAP_REG	0x380
141 #define DMAR_ECMD_REG		0x400
142 #define DMAR_ECEO_REG		0x408
143 #define DMAR_ECRSP_REG		0x410
144 #define DMAR_ECCAP_REG		0x430
145 
146 #define DMAR_IQER_REG_IQEI(reg)		FIELD_GET(GENMASK_ULL(3, 0), reg)
147 #define DMAR_IQER_REG_ITESID(reg)	FIELD_GET(GENMASK_ULL(47, 32), reg)
148 #define DMAR_IQER_REG_ICESID(reg)	FIELD_GET(GENMASK_ULL(63, 48), reg)
149 
150 #define OFFSET_STRIDE		(9)
151 
152 #define dmar_readq(a) readq(a)
153 #define dmar_writeq(a,v) writeq(v,a)
154 #define dmar_readl(a) readl(a)
155 #define dmar_writel(a, v) writel(v, a)
156 
157 #define DMAR_VER_MAJOR(v)		(((v) & 0xf0) >> 4)
158 #define DMAR_VER_MINOR(v)		((v) & 0x0f)
159 
160 /*
161  * Decoding Capability Register
162  */
163 #define cap_esrtps(c)		(((c) >> 63) & 1)
164 #define cap_esirtps(c)		(((c) >> 62) & 1)
165 #define cap_ecmds(c)		(((c) >> 61) & 1)
166 #define cap_fl5lp_support(c)	(((c) >> 60) & 1)
167 #define cap_pi_support(c)	(((c) >> 59) & 1)
168 #define cap_fl1gp_support(c)	(((c) >> 56) & 1)
169 #define cap_read_drain(c)	(((c) >> 55) & 1)
170 #define cap_write_drain(c)	(((c) >> 54) & 1)
171 #define cap_max_amask_val(c)	(((c) >> 48) & 0x3f)
172 #define cap_num_fault_regs(c)	((((c) >> 40) & 0xff) + 1)
173 #define cap_pgsel_inv(c)	(((c) >> 39) & 1)
174 
175 #define cap_super_page_val(c)	(((c) >> 34) & 0xf)
176 #define cap_super_offset(c)	(((find_first_bit(&cap_super_page_val(c), 4)) \
177 					* OFFSET_STRIDE) + 21)
178 
179 #define cap_fault_reg_offset(c)	((((c) >> 24) & 0x3ff) * 16)
180 #define cap_max_fault_reg_offset(c) \
181 	(cap_fault_reg_offset(c) + cap_num_fault_regs(c) * 16)
182 
183 #define cap_zlr(c)		(((c) >> 22) & 1)
184 #define cap_isoch(c)		(((c) >> 23) & 1)
185 #define cap_mgaw(c)		((((c) >> 16) & 0x3f) + 1)
186 #define cap_sagaw(c)		(((c) >> 8) & 0x1f)
187 #define cap_caching_mode(c)	(((c) >> 7) & 1)
188 #define cap_phmr(c)		(((c) >> 6) & 1)
189 #define cap_plmr(c)		(((c) >> 5) & 1)
190 #define cap_rwbf(c)		(((c) >> 4) & 1)
191 #define cap_afl(c)		(((c) >> 3) & 1)
192 #define cap_ndoms(c)		(((unsigned long)1) << (4 + 2 * ((c) & 0x7)))
193 /*
194  * Extended Capability Register
195  */
196 
197 #define ecap_pms(e)		(((e) >> 51) & 0x1)
198 #define ecap_rps(e)		(((e) >> 49) & 0x1)
199 #define ecap_smpwc(e)		(((e) >> 48) & 0x1)
200 #define ecap_flts(e)		(((e) >> 47) & 0x1)
201 #define ecap_slts(e)		(((e) >> 46) & 0x1)
202 #define ecap_slads(e)		(((e) >> 45) & 0x1)
203 #define ecap_smts(e)		(((e) >> 43) & 0x1)
204 #define ecap_dit(e)		(((e) >> 41) & 0x1)
205 #define ecap_pds(e)		(((e) >> 42) & 0x1)
206 #define ecap_pasid(e)		(((e) >> 40) & 0x1)
207 #define ecap_pss(e)		(((e) >> 35) & 0x1f)
208 #define ecap_eafs(e)		(((e) >> 34) & 0x1)
209 #define ecap_nwfs(e)		(((e) >> 33) & 0x1)
210 #define ecap_srs(e)		(((e) >> 31) & 0x1)
211 #define ecap_ers(e)		(((e) >> 30) & 0x1)
212 #define ecap_prs(e)		(((e) >> 29) & 0x1)
213 #define ecap_broken_pasid(e)	(((e) >> 28) & 0x1)
214 #define ecap_dis(e)		(((e) >> 27) & 0x1)
215 #define ecap_nest(e)		(((e) >> 26) & 0x1)
216 #define ecap_mts(e)		(((e) >> 25) & 0x1)
217 #define ecap_iotlb_offset(e) 	((((e) >> 8) & 0x3ff) * 16)
218 #define ecap_max_iotlb_offset(e) (ecap_iotlb_offset(e) + 16)
219 #define ecap_coherent(e)	((e) & 0x1)
220 #define ecap_qis(e)		((e) & 0x2)
221 #define ecap_pass_through(e)	(((e) >> 6) & 0x1)
222 #define ecap_eim_support(e)	(((e) >> 4) & 0x1)
223 #define ecap_ir_support(e)	(((e) >> 3) & 0x1)
224 #define ecap_dev_iotlb_support(e)	(((e) >> 2) & 0x1)
225 #define ecap_max_handle_mask(e) (((e) >> 20) & 0xf)
226 #define ecap_sc_support(e)	(((e) >> 7) & 0x1) /* Snooping Control */
227 
228 /*
229  * Decoding Perf Capability Register
230  */
231 #define pcap_num_cntr(p)	((p) & 0xffff)
232 #define pcap_cntr_width(p)	(((p) >> 16) & 0x7f)
233 #define pcap_num_event_group(p)	(((p) >> 24) & 0x1f)
234 #define pcap_filters_mask(p)	(((p) >> 32) & 0x1f)
235 #define pcap_interrupt(p)	(((p) >> 50) & 0x1)
236 /* The counter stride is calculated as 2 ^ (x+10) bytes */
237 #define pcap_cntr_stride(p)	(1ULL << ((((p) >> 52) & 0x7) + 10))
238 
239 /*
240  * Decoding Perf Event Capability Register
241  */
242 #define pecap_es(p)		((p) & 0xfffffff)
243 
244 /* Virtual command interface capability */
245 #define vccap_pasid(v)		(((v) & DMA_VCS_PAS)) /* PASID allocation */
246 
247 /* IOTLB_REG */
248 #define DMA_TLB_FLUSH_GRANU_OFFSET  60
249 #define DMA_TLB_GLOBAL_FLUSH (((u64)1) << 60)
250 #define DMA_TLB_DSI_FLUSH (((u64)2) << 60)
251 #define DMA_TLB_PSI_FLUSH (((u64)3) << 60)
252 #define DMA_TLB_IIRG(type) ((type >> 60) & 3)
253 #define DMA_TLB_IAIG(val) (((val) >> 57) & 3)
254 #define DMA_TLB_READ_DRAIN (((u64)1) << 49)
255 #define DMA_TLB_WRITE_DRAIN (((u64)1) << 48)
256 #define DMA_TLB_DID(id)	(((u64)((id) & 0xffff)) << 32)
257 #define DMA_TLB_IVT (((u64)1) << 63)
258 #define DMA_TLB_IH_NONLEAF (((u64)1) << 6)
259 #define DMA_TLB_MAX_SIZE (0x3f)
260 
261 /* INVALID_DESC */
262 #define DMA_CCMD_INVL_GRANU_OFFSET  61
263 #define DMA_ID_TLB_GLOBAL_FLUSH	(((u64)1) << 4)
264 #define DMA_ID_TLB_DSI_FLUSH	(((u64)2) << 4)
265 #define DMA_ID_TLB_PSI_FLUSH	(((u64)3) << 4)
266 #define DMA_ID_TLB_READ_DRAIN	(((u64)1) << 7)
267 #define DMA_ID_TLB_WRITE_DRAIN	(((u64)1) << 6)
268 #define DMA_ID_TLB_DID(id)	(((u64)((id & 0xffff) << 16)))
269 #define DMA_ID_TLB_IH_NONLEAF	(((u64)1) << 6)
270 #define DMA_ID_TLB_ADDR(addr)	(addr)
271 #define DMA_ID_TLB_ADDR_MASK(mask)	(mask)
272 
273 /* PMEN_REG */
274 #define DMA_PMEN_EPM (((u32)1)<<31)
275 #define DMA_PMEN_PRS (((u32)1)<<0)
276 
277 /* GCMD_REG */
278 #define DMA_GCMD_TE (((u32)1) << 31)
279 #define DMA_GCMD_SRTP (((u32)1) << 30)
280 #define DMA_GCMD_SFL (((u32)1) << 29)
281 #define DMA_GCMD_EAFL (((u32)1) << 28)
282 #define DMA_GCMD_WBF (((u32)1) << 27)
283 #define DMA_GCMD_QIE (((u32)1) << 26)
284 #define DMA_GCMD_SIRTP (((u32)1) << 24)
285 #define DMA_GCMD_IRE (((u32) 1) << 25)
286 #define DMA_GCMD_CFI (((u32) 1) << 23)
287 
288 /* GSTS_REG */
289 #define DMA_GSTS_TES (((u32)1) << 31)
290 #define DMA_GSTS_RTPS (((u32)1) << 30)
291 #define DMA_GSTS_FLS (((u32)1) << 29)
292 #define DMA_GSTS_AFLS (((u32)1) << 28)
293 #define DMA_GSTS_WBFS (((u32)1) << 27)
294 #define DMA_GSTS_QIES (((u32)1) << 26)
295 #define DMA_GSTS_IRTPS (((u32)1) << 24)
296 #define DMA_GSTS_IRES (((u32)1) << 25)
297 #define DMA_GSTS_CFIS (((u32)1) << 23)
298 
299 /* DMA_RTADDR_REG */
300 #define DMA_RTADDR_SMT (((u64)1) << 10)
301 
302 /* CCMD_REG */
303 #define DMA_CCMD_ICC (((u64)1) << 63)
304 #define DMA_CCMD_GLOBAL_INVL (((u64)1) << 61)
305 #define DMA_CCMD_DOMAIN_INVL (((u64)2) << 61)
306 #define DMA_CCMD_DEVICE_INVL (((u64)3) << 61)
307 #define DMA_CCMD_FM(m) (((u64)((m) & 0x3)) << 32)
308 #define DMA_CCMD_MASK_NOBIT 0
309 #define DMA_CCMD_MASK_1BIT 1
310 #define DMA_CCMD_MASK_2BIT 2
311 #define DMA_CCMD_MASK_3BIT 3
312 #define DMA_CCMD_SID(s) (((u64)((s) & 0xffff)) << 16)
313 #define DMA_CCMD_DID(d) ((u64)((d) & 0xffff))
314 
315 /* ECMD_REG */
316 #define DMA_MAX_NUM_ECMD		256
317 #define DMA_MAX_NUM_ECMDCAP		(DMA_MAX_NUM_ECMD / 64)
318 #define DMA_ECMD_REG_STEP		8
319 #define DMA_ECMD_ENABLE			0xf0
320 #define DMA_ECMD_DISABLE		0xf1
321 #define DMA_ECMD_FREEZE			0xf4
322 #define DMA_ECMD_UNFREEZE		0xf5
323 #define DMA_ECMD_OA_SHIFT		16
324 #define DMA_ECMD_ECRSP_IP		0x1
325 #define DMA_ECMD_ECCAP3			3
326 #define DMA_ECMD_ECCAP3_ECNTS		BIT_ULL(48)
327 #define DMA_ECMD_ECCAP3_DCNTS		BIT_ULL(49)
328 #define DMA_ECMD_ECCAP3_FCNTS		BIT_ULL(52)
329 #define DMA_ECMD_ECCAP3_UFCNTS		BIT_ULL(53)
330 #define DMA_ECMD_ECCAP3_ESSENTIAL	(DMA_ECMD_ECCAP3_ECNTS |	\
331 					 DMA_ECMD_ECCAP3_DCNTS |	\
332 					 DMA_ECMD_ECCAP3_FCNTS |	\
333 					 DMA_ECMD_ECCAP3_UFCNTS)
334 
335 /* FECTL_REG */
336 #define DMA_FECTL_IM (((u32)1) << 31)
337 
338 /* FSTS_REG */
339 #define DMA_FSTS_PFO (1 << 0) /* Primary Fault Overflow */
340 #define DMA_FSTS_PPF (1 << 1) /* Primary Pending Fault */
341 #define DMA_FSTS_IQE (1 << 4) /* Invalidation Queue Error */
342 #define DMA_FSTS_ICE (1 << 5) /* Invalidation Completion Error */
343 #define DMA_FSTS_ITE (1 << 6) /* Invalidation Time-out Error */
344 #define DMA_FSTS_PRO (1 << 7) /* Page Request Overflow */
345 #define dma_fsts_fault_record_index(s) (((s) >> 8) & 0xff)
346 
347 /* FRCD_REG, 32 bits access */
348 #define DMA_FRCD_F (((u32)1) << 31)
349 #define dma_frcd_type(d) ((d >> 30) & 1)
350 #define dma_frcd_fault_reason(c) (c & 0xff)
351 #define dma_frcd_source_id(c) (c & 0xffff)
352 #define dma_frcd_pasid_value(c) (((c) >> 8) & 0xfffff)
353 #define dma_frcd_pasid_present(c) (((c) >> 31) & 1)
354 /* low 64 bit */
355 #define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
356 
357 /* PRS_REG */
358 #define DMA_PRS_PPR	((u32)1)
359 #define DMA_PRS_PRO	((u32)2)
360 
361 #define DMA_VCS_PAS	((u64)1)
362 
363 /* PERFINTRSTS_REG */
364 #define DMA_PERFINTRSTS_PIS	((u32)1)
365 
366 #define IOMMU_WAIT_OP(iommu, offset, op, cond, sts)			\
367 do {									\
368 	cycles_t start_time = get_cycles();				\
369 	while (1) {							\
370 		sts = op(iommu->reg + offset);				\
371 		if (cond)						\
372 			break;						\
373 		if (DMAR_OPERATION_TIMEOUT < (get_cycles() - start_time))\
374 			panic("DMAR hardware is malfunctioning\n");	\
375 		cpu_relax();						\
376 	}								\
377 } while (0)
378 
379 #define QI_LENGTH	256	/* queue length */
380 
381 enum {
382 	QI_FREE,
383 	QI_IN_USE,
384 	QI_DONE,
385 	QI_ABORT
386 };
387 
388 #define QI_CC_TYPE		0x1
389 #define QI_IOTLB_TYPE		0x2
390 #define QI_DIOTLB_TYPE		0x3
391 #define QI_IEC_TYPE		0x4
392 #define QI_IWD_TYPE		0x5
393 #define QI_EIOTLB_TYPE		0x6
394 #define QI_PC_TYPE		0x7
395 #define QI_DEIOTLB_TYPE		0x8
396 #define QI_PGRP_RESP_TYPE	0x9
397 #define QI_PSTRM_RESP_TYPE	0xa
398 
399 #define QI_IEC_SELECTIVE	(((u64)1) << 4)
400 #define QI_IEC_IIDEX(idx)	(((u64)(idx & 0xffff) << 32))
401 #define QI_IEC_IM(m)		(((u64)(m & 0x1f) << 27))
402 
403 #define QI_IWD_STATUS_DATA(d)	(((u64)d) << 32)
404 #define QI_IWD_STATUS_WRITE	(((u64)1) << 5)
405 #define QI_IWD_FENCE		(((u64)1) << 6)
406 #define QI_IWD_PRQ_DRAIN	(((u64)1) << 7)
407 
408 #define QI_IOTLB_DID(did) 	(((u64)did) << 16)
409 #define QI_IOTLB_DR(dr) 	(((u64)dr) << 7)
410 #define QI_IOTLB_DW(dw) 	(((u64)dw) << 6)
411 #define QI_IOTLB_GRAN(gran) 	(((u64)gran) >> (DMA_TLB_FLUSH_GRANU_OFFSET-4))
412 #define QI_IOTLB_ADDR(addr)	(((u64)addr) & VTD_PAGE_MASK)
413 #define QI_IOTLB_IH(ih)		(((u64)ih) << 6)
414 #define QI_IOTLB_AM(am)		(((u8)am) & 0x3f)
415 
416 #define QI_CC_FM(fm)		(((u64)fm) << 48)
417 #define QI_CC_SID(sid)		(((u64)sid) << 32)
418 #define QI_CC_DID(did)		(((u64)did) << 16)
419 #define QI_CC_GRAN(gran)	(((u64)gran) >> (DMA_CCMD_INVL_GRANU_OFFSET-4))
420 
421 #define QI_DEV_IOTLB_SID(sid)	((u64)((sid) & 0xffff) << 32)
422 #define QI_DEV_IOTLB_QDEP(qdep)	(((qdep) & 0x1f) << 16)
423 #define QI_DEV_IOTLB_ADDR(addr)	((u64)(addr) & VTD_PAGE_MASK)
424 #define QI_DEV_IOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
425 				   ((u64)((pfsid >> 4) & 0xfff) << 52))
426 #define QI_DEV_IOTLB_SIZE	1
427 #define QI_DEV_IOTLB_MAX_INVS	32
428 
429 #define QI_PC_PASID(pasid)	(((u64)pasid) << 32)
430 #define QI_PC_DID(did)		(((u64)did) << 16)
431 #define QI_PC_GRAN(gran)	(((u64)gran) << 4)
432 
433 /* PASID cache invalidation granu */
434 #define QI_PC_ALL_PASIDS	0
435 #define QI_PC_PASID_SEL		1
436 #define QI_PC_GLOBAL		3
437 
438 #define QI_EIOTLB_ADDR(addr)	((u64)(addr) & VTD_PAGE_MASK)
439 #define QI_EIOTLB_IH(ih)	(((u64)ih) << 6)
440 #define QI_EIOTLB_AM(am)	(((u64)am) & 0x3f)
441 #define QI_EIOTLB_PASID(pasid) 	(((u64)pasid) << 32)
442 #define QI_EIOTLB_DID(did)	(((u64)did) << 16)
443 #define QI_EIOTLB_GRAN(gran) 	(((u64)gran) << 4)
444 
445 /* QI Dev-IOTLB inv granu */
446 #define QI_DEV_IOTLB_GRAN_ALL		1
447 #define QI_DEV_IOTLB_GRAN_PASID_SEL	0
448 
449 #define QI_DEV_EIOTLB_ADDR(a)	((u64)(a) & VTD_PAGE_MASK)
450 #define QI_DEV_EIOTLB_SIZE	(((u64)1) << 11)
451 #define QI_DEV_EIOTLB_PASID(p)	((u64)((p) & 0xfffff) << 32)
452 #define QI_DEV_EIOTLB_SID(sid)	((u64)((sid) & 0xffff) << 16)
453 #define QI_DEV_EIOTLB_QDEP(qd)	((u64)((qd) & 0x1f) << 4)
454 #define QI_DEV_EIOTLB_PFSID(pfsid) (((u64)(pfsid & 0xf) << 12) | \
455 				    ((u64)((pfsid >> 4) & 0xfff) << 52))
456 #define QI_DEV_EIOTLB_MAX_INVS	32
457 
458 /* Page group response descriptor QW0 */
459 #define QI_PGRP_PASID_P(p)	(((u64)(p)) << 4)
460 #define QI_PGRP_RESP_CODE(res)	(((u64)(res)) << 12)
461 #define QI_PGRP_DID(rid)	(((u64)(rid)) << 16)
462 #define QI_PGRP_PASID(pasid)	(((u64)(pasid)) << 32)
463 
464 /* Page group response descriptor QW1 */
465 #define QI_PGRP_LPIG(x)		(((u64)(x)) << 2)
466 #define QI_PGRP_IDX(idx)	(((u64)(idx)) << 3)
467 
468 
469 #define QI_RESP_SUCCESS		0x0
470 #define QI_RESP_INVALID		0x1
471 #define QI_RESP_FAILURE		0xf
472 
473 #define QI_GRAN_NONG_PASID		2
474 #define QI_GRAN_PSI_PASID		3
475 
476 #define qi_shift(iommu)		(DMAR_IQ_SHIFT + !!ecap_smts((iommu)->ecap))
477 
478 struct qi_desc {
479 	u64 qw0;
480 	u64 qw1;
481 	u64 qw2;
482 	u64 qw3;
483 };
484 
485 struct q_inval {
486 	raw_spinlock_t  q_lock;
487 	void		*desc;          /* invalidation queue */
488 	int             *desc_status;   /* desc status */
489 	int             free_head;      /* first free entry */
490 	int             free_tail;      /* last free entry */
491 	int             free_cnt;
492 };
493 
494 /* Page Request Queue depth */
495 #define PRQ_ORDER	4
496 #define PRQ_RING_MASK	((0x1000 << PRQ_ORDER) - 0x20)
497 #define PRQ_DEPTH	((0x1000 << PRQ_ORDER) >> 5)
498 
499 struct dmar_pci_notify_info;
500 
501 #ifdef CONFIG_IRQ_REMAP
502 /* 1MB - maximum possible interrupt remapping table size */
503 #define INTR_REMAP_PAGE_ORDER	8
504 #define INTR_REMAP_TABLE_REG_SIZE	0xf
505 #define INTR_REMAP_TABLE_REG_SIZE_MASK  0xf
506 
507 #define INTR_REMAP_TABLE_ENTRIES	65536
508 
509 struct irq_domain;
510 
511 struct ir_table {
512 	struct irte *base;
513 	unsigned long *bitmap;
514 };
515 
516 void intel_irq_remap_add_device(struct dmar_pci_notify_info *info);
517 #else
518 static inline void
intel_irq_remap_add_device(struct dmar_pci_notify_info * info)519 intel_irq_remap_add_device(struct dmar_pci_notify_info *info) { }
520 #endif
521 
522 struct iommu_flush {
523 	void (*flush_context)(struct intel_iommu *iommu, u16 did, u16 sid,
524 			      u8 fm, u64 type);
525 	void (*flush_iotlb)(struct intel_iommu *iommu, u16 did, u64 addr,
526 			    unsigned int size_order, u64 type);
527 };
528 
529 enum {
530 	SR_DMAR_FECTL_REG,
531 	SR_DMAR_FEDATA_REG,
532 	SR_DMAR_FEADDR_REG,
533 	SR_DMAR_FEUADDR_REG,
534 	MAX_SR_DMAR_REGS
535 };
536 
537 #define VTD_FLAG_TRANS_PRE_ENABLED	(1 << 0)
538 #define VTD_FLAG_IRQ_REMAP_PRE_ENABLED	(1 << 1)
539 #define VTD_FLAG_SVM_CAPABLE		(1 << 2)
540 
541 #define sm_supported(iommu)	(intel_iommu_sm && ecap_smts((iommu)->ecap))
542 #define pasid_supported(iommu)	(sm_supported(iommu) &&			\
543 				 ecap_pasid((iommu)->ecap))
544 #define ssads_supported(iommu) (sm_supported(iommu) &&                 \
545 				ecap_slads((iommu)->ecap))
546 #define nested_supported(iommu)	(sm_supported(iommu) &&			\
547 				 ecap_nest((iommu)->ecap))
548 
549 struct pasid_entry;
550 struct pasid_state_entry;
551 struct page_req_dsc;
552 
553 /*
554  * 0: Present
555  * 1-11: Reserved
556  * 12-63: Context Ptr (12 - (haw-1))
557  * 64-127: Reserved
558  */
559 struct root_entry {
560 	u64     lo;
561 	u64     hi;
562 };
563 
564 /*
565  * low 64 bits:
566  * 0: present
567  * 1: fault processing disable
568  * 2-3: translation type
569  * 12-63: address space root
570  * high 64 bits:
571  * 0-2: address width
572  * 3-6: aval
573  * 8-23: domain id
574  */
575 struct context_entry {
576 	u64 lo;
577 	u64 hi;
578 };
579 
580 struct iommu_domain_info {
581 	struct intel_iommu *iommu;
582 	unsigned int refcnt;		/* Refcount of devices per iommu */
583 	u16 did;			/* Domain ids per IOMMU. Use u16 since
584 					 * domain ids are 16 bit wide according
585 					 * to VT-d spec, section 9.3 */
586 };
587 
588 /*
589  * We start simply by using a fixed size for the batched descriptors. This
590  * size is currently sufficient for our needs. Future improvements could
591  * involve dynamically allocating the batch buffer based on actual demand,
592  * allowing us to adjust the batch size for optimal performance in different
593  * scenarios.
594  */
595 #define QI_MAX_BATCHED_DESC_COUNT 16
596 struct qi_batch {
597 	struct qi_desc descs[QI_MAX_BATCHED_DESC_COUNT];
598 	unsigned int index;
599 };
600 
601 struct dmar_domain {
602 	int	nid;			/* node id */
603 	struct xarray iommu_array;	/* Attached IOMMU array */
604 
605 	u8 iommu_coherency: 1;		/* indicate coherency of iommu access */
606 	u8 force_snooping : 1;		/* Create IOPTEs with snoop control */
607 	u8 set_pte_snp:1;
608 	u8 use_first_level:1;		/* DMA translation for the domain goes
609 					 * through the first level page table,
610 					 * otherwise, goes through the second
611 					 * level.
612 					 */
613 	u8 dirty_tracking:1;		/* Dirty tracking is enabled */
614 	u8 nested_parent:1;		/* Has other domains nested on it */
615 	u8 has_mappings:1;		/* Has mappings configured through
616 					 * iommu_map() interface.
617 					 */
618 
619 	spinlock_t lock;		/* Protect device tracking lists */
620 	struct list_head devices;	/* all devices' list */
621 	struct list_head dev_pasids;	/* all attached pasids */
622 
623 	spinlock_t cache_lock;		/* Protect the cache tag list */
624 	struct list_head cache_tags;	/* Cache tag list */
625 	struct qi_batch *qi_batch;	/* Batched QI descriptors */
626 
627 	int		iommu_superpage;/* Level of superpages supported:
628 					   0 == 4KiB (no superpages), 1 == 2MiB,
629 					   2 == 1GiB, 3 == 512GiB, 4 == 1TiB */
630 	union {
631 		/* DMA remapping domain */
632 		struct {
633 			/* virtual address */
634 			struct dma_pte	*pgd;
635 			/* max guest address width */
636 			int		gaw;
637 			/*
638 			 * adjusted guest address width:
639 			 *   0: level 2 30-bit
640 			 *   1: level 3 39-bit
641 			 *   2: level 4 48-bit
642 			 *   3: level 5 57-bit
643 			 */
644 			int		agaw;
645 			/* maximum mapped address */
646 			u64		max_addr;
647 			/* Protect the s1_domains list */
648 			spinlock_t	s1_lock;
649 			/* Track s1_domains nested on this domain */
650 			struct list_head s1_domains;
651 		};
652 
653 		/* Nested user domain */
654 		struct {
655 			/* parent page table which the user domain is nested on */
656 			struct dmar_domain *s2_domain;
657 			/* page table attributes */
658 			struct iommu_hwpt_vtd_s1 s1_cfg;
659 			/* link to parent domain siblings */
660 			struct list_head s2_link;
661 		};
662 
663 		/* SVA domain */
664 		struct {
665 			struct mmu_notifier notifier;
666 		};
667 	};
668 
669 	struct iommu_domain domain;	/* generic domain data structure for
670 					   iommu core */
671 };
672 
673 /*
674  * In theory, the VT-d 4.0 spec can support up to 2 ^ 16 counters.
675  * But in practice, there are only 14 counters for the existing
676  * platform. Setting the max number of counters to 64 should be good
677  * enough for a long time. Also, supporting more than 64 counters
678  * requires more extras, e.g., extra freeze and overflow registers,
679  * which is not necessary for now.
680  */
681 #define IOMMU_PMU_IDX_MAX		64
682 
683 struct iommu_pmu {
684 	struct intel_iommu	*iommu;
685 	u32			num_cntr;	/* Number of counters */
686 	u32			num_eg;		/* Number of event group */
687 	u32			cntr_width;	/* Counter width */
688 	u32			cntr_stride;	/* Counter Stride */
689 	u32			filter;		/* Bitmask of filter support */
690 	void __iomem		*base;		/* the PerfMon base address */
691 	void __iomem		*cfg_reg;	/* counter configuration base address */
692 	void __iomem		*cntr_reg;	/* counter 0 address*/
693 	void __iomem		*overflow;	/* overflow status register */
694 
695 	u64			*evcap;		/* Indicates all supported events */
696 	u32			**cntr_evcap;	/* Supported events of each counter. */
697 
698 	struct pmu		pmu;
699 	DECLARE_BITMAP(used_mask, IOMMU_PMU_IDX_MAX);
700 	struct perf_event	*event_list[IOMMU_PMU_IDX_MAX];
701 	unsigned char		irq_name[16];
702 };
703 
704 #define IOMMU_IRQ_ID_OFFSET_PRQ		(DMAR_UNITS_SUPPORTED)
705 #define IOMMU_IRQ_ID_OFFSET_PERF	(2 * DMAR_UNITS_SUPPORTED)
706 
707 struct intel_iommu {
708 	void __iomem	*reg; /* Pointer to hardware regs, virtual addr */
709 	u64 		reg_phys; /* physical address of hw register set */
710 	u64		reg_size; /* size of hw register set */
711 	u64		cap;
712 	u64		ecap;
713 	u64		vccap;
714 	u64		ecmdcap[DMA_MAX_NUM_ECMDCAP];
715 	u32		gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
716 	raw_spinlock_t	register_lock; /* protect register handling */
717 	int		seq_id;	/* sequence id of the iommu */
718 	int		agaw; /* agaw of this iommu */
719 	int		msagaw; /* max sagaw of this iommu */
720 	unsigned int	irq, pr_irq, perf_irq;
721 	u16		segment;     /* PCI segment# */
722 	unsigned char	name[16];    /* Device Name */
723 
724 #ifdef CONFIG_INTEL_IOMMU
725 	unsigned long 	*domain_ids; /* bitmap of domains */
726 	unsigned long	*copied_tables; /* bitmap of copied tables */
727 	spinlock_t	lock; /* protect context, domain ids */
728 	struct root_entry *root_entry; /* virtual address */
729 
730 	struct iommu_flush flush;
731 #endif
732 	struct page_req_dsc *prq;
733 	unsigned char prq_name[16];    /* Name for PRQ interrupt */
734 	unsigned long prq_seq_number;
735 	struct completion prq_complete;
736 	struct iopf_queue *iopf_queue;
737 	unsigned char iopfq_name[16];
738 	/* Synchronization between fault report and iommu device release. */
739 	struct mutex iopf_lock;
740 	struct q_inval  *qi;            /* Queued invalidation info */
741 	u32 iommu_state[MAX_SR_DMAR_REGS]; /* Store iommu states between suspend and resume.*/
742 
743 	/* rb tree for all probed devices */
744 	struct rb_root device_rbtree;
745 	/* protect the device_rbtree */
746 	spinlock_t device_rbtree_lock;
747 
748 #ifdef CONFIG_IRQ_REMAP
749 	struct ir_table *ir_table;	/* Interrupt remapping info */
750 	struct irq_domain *ir_domain;
751 #endif
752 	struct iommu_device iommu;  /* IOMMU core code handle */
753 	int		node;
754 	u32		flags;      /* Software defined flags */
755 
756 	struct dmar_drhd_unit *drhd;
757 	void *perf_statistic;
758 
759 	struct iommu_pmu *pmu;
760 };
761 
762 /* PCI domain-device relationship */
763 struct device_domain_info {
764 	struct list_head link;	/* link to domain siblings */
765 	u32 segment;		/* PCI segment number */
766 	u8 bus;			/* PCI bus number */
767 	u8 devfn;		/* PCI devfn number */
768 	u16 pfsid;		/* SRIOV physical function source ID */
769 	u8 pasid_supported:3;
770 	u8 pasid_enabled:1;
771 	u8 pri_supported:1;
772 	u8 pri_enabled:1;
773 	u8 ats_supported:1;
774 	u8 ats_enabled:1;
775 	u8 dtlb_extra_inval:1;	/* Quirk for devices need extra flush */
776 	u8 ats_qdep;
777 	unsigned int iopf_refcount;
778 	struct device *dev; /* it's NULL for PCIe-to-PCI bridge */
779 	struct intel_iommu *iommu; /* IOMMU used by this device */
780 	struct dmar_domain *domain; /* pointer to domain */
781 	struct pasid_table *pasid_table; /* pasid table */
782 	/* device tracking node(lookup by PCI RID) */
783 	struct rb_node node;
784 #ifdef CONFIG_INTEL_IOMMU_DEBUGFS
785 	struct dentry *debugfs_dentry; /* pointer to device directory dentry */
786 #endif
787 };
788 
789 struct dev_pasid_info {
790 	struct list_head link_domain;	/* link to domain siblings */
791 	struct device *dev;
792 	ioasid_t pasid;
793 #ifdef CONFIG_INTEL_IOMMU_DEBUGFS
794 	struct dentry *debugfs_dentry; /* pointer to pasid directory dentry */
795 #endif
796 };
797 
__iommu_flush_cache(struct intel_iommu * iommu,void * addr,int size)798 static inline void __iommu_flush_cache(
799 	struct intel_iommu *iommu, void *addr, int size)
800 {
801 	if (!ecap_coherent(iommu->ecap))
802 		clflush_cache_range(addr, size);
803 }
804 
805 /* Convert generic struct iommu_domain to private struct dmar_domain */
to_dmar_domain(struct iommu_domain * dom)806 static inline struct dmar_domain *to_dmar_domain(struct iommu_domain *dom)
807 {
808 	return container_of(dom, struct dmar_domain, domain);
809 }
810 
811 /*
812  * Domain ID reserved for pasid entries programmed for first-level
813  * only and pass-through transfer modes.
814  */
815 #define FLPT_DEFAULT_DID		1
816 #define NUM_RESERVED_DID		2
817 
818 /* Retrieve the domain ID which has allocated to the domain */
819 static inline u16
domain_id_iommu(struct dmar_domain * domain,struct intel_iommu * iommu)820 domain_id_iommu(struct dmar_domain *domain, struct intel_iommu *iommu)
821 {
822 	struct iommu_domain_info *info =
823 			xa_load(&domain->iommu_array, iommu->seq_id);
824 
825 	return info->did;
826 }
827 
828 static inline u16
iommu_domain_did(struct iommu_domain * domain,struct intel_iommu * iommu)829 iommu_domain_did(struct iommu_domain *domain, struct intel_iommu *iommu)
830 {
831 	if (domain->type == IOMMU_DOMAIN_SVA ||
832 	    domain->type == IOMMU_DOMAIN_IDENTITY)
833 		return FLPT_DEFAULT_DID;
834 	return domain_id_iommu(to_dmar_domain(domain), iommu);
835 }
836 
dev_is_real_dma_subdevice(struct device * dev)837 static inline bool dev_is_real_dma_subdevice(struct device *dev)
838 {
839 	return dev && dev_is_pci(dev) &&
840 	       pci_real_dma_dev(to_pci_dev(dev)) != to_pci_dev(dev);
841 }
842 
843 /*
844  * 0: readable
845  * 1: writable
846  * 2-6: reserved
847  * 7: super page
848  * 8-10: available
849  * 11: snoop behavior
850  * 12-63: Host physical address
851  */
852 struct dma_pte {
853 	u64 val;
854 };
855 
dma_clear_pte(struct dma_pte * pte)856 static inline void dma_clear_pte(struct dma_pte *pte)
857 {
858 	pte->val = 0;
859 }
860 
dma_pte_addr(struct dma_pte * pte)861 static inline u64 dma_pte_addr(struct dma_pte *pte)
862 {
863 #ifdef CONFIG_64BIT
864 	return pte->val & VTD_PAGE_MASK;
865 #else
866 	/* Must have a full atomic 64-bit read */
867 	return  __cmpxchg64(&pte->val, 0ULL, 0ULL) & VTD_PAGE_MASK;
868 #endif
869 }
870 
dma_pte_present(struct dma_pte * pte)871 static inline bool dma_pte_present(struct dma_pte *pte)
872 {
873 	return (pte->val & 3) != 0;
874 }
875 
dma_sl_pte_test_and_clear_dirty(struct dma_pte * pte,unsigned long flags)876 static inline bool dma_sl_pte_test_and_clear_dirty(struct dma_pte *pte,
877 						   unsigned long flags)
878 {
879 	if (flags & IOMMU_DIRTY_NO_CLEAR)
880 		return (pte->val & DMA_SL_PTE_DIRTY) != 0;
881 
882 	return test_and_clear_bit(DMA_SL_PTE_DIRTY_BIT,
883 				  (unsigned long *)&pte->val);
884 }
885 
dma_pte_superpage(struct dma_pte * pte)886 static inline bool dma_pte_superpage(struct dma_pte *pte)
887 {
888 	return (pte->val & DMA_PTE_LARGE_PAGE);
889 }
890 
first_pte_in_page(struct dma_pte * pte)891 static inline bool first_pte_in_page(struct dma_pte *pte)
892 {
893 	return IS_ALIGNED((unsigned long)pte, VTD_PAGE_SIZE);
894 }
895 
nr_pte_to_next_page(struct dma_pte * pte)896 static inline int nr_pte_to_next_page(struct dma_pte *pte)
897 {
898 	return first_pte_in_page(pte) ? BIT_ULL(VTD_STRIDE_SHIFT) :
899 		(struct dma_pte *)ALIGN((unsigned long)pte, VTD_PAGE_SIZE) - pte;
900 }
901 
context_present(struct context_entry * context)902 static inline bool context_present(struct context_entry *context)
903 {
904 	return (context->lo & 1);
905 }
906 
907 #define LEVEL_STRIDE		(9)
908 #define LEVEL_MASK		(((u64)1 << LEVEL_STRIDE) - 1)
909 #define MAX_AGAW_WIDTH		(64)
910 #define MAX_AGAW_PFN_WIDTH	(MAX_AGAW_WIDTH - VTD_PAGE_SHIFT)
911 
agaw_to_level(int agaw)912 static inline int agaw_to_level(int agaw)
913 {
914 	return agaw + 2;
915 }
916 
agaw_to_width(int agaw)917 static inline int agaw_to_width(int agaw)
918 {
919 	return min_t(int, 30 + agaw * LEVEL_STRIDE, MAX_AGAW_WIDTH);
920 }
921 
width_to_agaw(int width)922 static inline int width_to_agaw(int width)
923 {
924 	return DIV_ROUND_UP(width - 30, LEVEL_STRIDE);
925 }
926 
level_to_offset_bits(int level)927 static inline unsigned int level_to_offset_bits(int level)
928 {
929 	return (level - 1) * LEVEL_STRIDE;
930 }
931 
pfn_level_offset(u64 pfn,int level)932 static inline int pfn_level_offset(u64 pfn, int level)
933 {
934 	return (pfn >> level_to_offset_bits(level)) & LEVEL_MASK;
935 }
936 
level_mask(int level)937 static inline u64 level_mask(int level)
938 {
939 	return -1ULL << level_to_offset_bits(level);
940 }
941 
level_size(int level)942 static inline u64 level_size(int level)
943 {
944 	return 1ULL << level_to_offset_bits(level);
945 }
946 
align_to_level(u64 pfn,int level)947 static inline u64 align_to_level(u64 pfn, int level)
948 {
949 	return (pfn + level_size(level) - 1) & level_mask(level);
950 }
951 
lvl_to_nr_pages(unsigned int lvl)952 static inline unsigned long lvl_to_nr_pages(unsigned int lvl)
953 {
954 	return 1UL << min_t(int, (lvl - 1) * LEVEL_STRIDE, MAX_AGAW_PFN_WIDTH);
955 }
956 
context_set_present(struct context_entry * context)957 static inline void context_set_present(struct context_entry *context)
958 {
959 	context->lo |= 1;
960 }
961 
context_set_fault_enable(struct context_entry * context)962 static inline void context_set_fault_enable(struct context_entry *context)
963 {
964 	context->lo &= (((u64)-1) << 2) | 1;
965 }
966 
context_set_translation_type(struct context_entry * context,unsigned long value)967 static inline void context_set_translation_type(struct context_entry *context,
968 						unsigned long value)
969 {
970 	context->lo &= (((u64)-1) << 4) | 3;
971 	context->lo |= (value & 3) << 2;
972 }
973 
context_set_address_root(struct context_entry * context,unsigned long value)974 static inline void context_set_address_root(struct context_entry *context,
975 					    unsigned long value)
976 {
977 	context->lo &= ~VTD_PAGE_MASK;
978 	context->lo |= value & VTD_PAGE_MASK;
979 }
980 
context_set_address_width(struct context_entry * context,unsigned long value)981 static inline void context_set_address_width(struct context_entry *context,
982 					     unsigned long value)
983 {
984 	context->hi |= value & 7;
985 }
986 
context_set_domain_id(struct context_entry * context,unsigned long value)987 static inline void context_set_domain_id(struct context_entry *context,
988 					 unsigned long value)
989 {
990 	context->hi |= (value & ((1 << 16) - 1)) << 8;
991 }
992 
context_set_pasid(struct context_entry * context)993 static inline void context_set_pasid(struct context_entry *context)
994 {
995 	context->lo |= CONTEXT_PASIDE;
996 }
997 
context_domain_id(struct context_entry * c)998 static inline int context_domain_id(struct context_entry *c)
999 {
1000 	return((c->hi >> 8) & 0xffff);
1001 }
1002 
context_clear_entry(struct context_entry * context)1003 static inline void context_clear_entry(struct context_entry *context)
1004 {
1005 	context->lo = 0;
1006 	context->hi = 0;
1007 }
1008 
1009 #ifdef CONFIG_INTEL_IOMMU
context_copied(struct intel_iommu * iommu,u8 bus,u8 devfn)1010 static inline bool context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
1011 {
1012 	if (!iommu->copied_tables)
1013 		return false;
1014 
1015 	return test_bit(((long)bus << 8) | devfn, iommu->copied_tables);
1016 }
1017 
1018 static inline void
set_context_copied(struct intel_iommu * iommu,u8 bus,u8 devfn)1019 set_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
1020 {
1021 	set_bit(((long)bus << 8) | devfn, iommu->copied_tables);
1022 }
1023 
1024 static inline void
clear_context_copied(struct intel_iommu * iommu,u8 bus,u8 devfn)1025 clear_context_copied(struct intel_iommu *iommu, u8 bus, u8 devfn)
1026 {
1027 	clear_bit(((long)bus << 8) | devfn, iommu->copied_tables);
1028 }
1029 #endif /* CONFIG_INTEL_IOMMU */
1030 
1031 /*
1032  * Set the RID_PASID field of a scalable mode context entry. The
1033  * IOMMU hardware will use the PASID value set in this field for
1034  * DMA translations of DMA requests without PASID.
1035  */
1036 static inline void
context_set_sm_rid2pasid(struct context_entry * context,unsigned long pasid)1037 context_set_sm_rid2pasid(struct context_entry *context, unsigned long pasid)
1038 {
1039 	context->hi |= pasid & ((1 << 20) - 1);
1040 }
1041 
1042 /*
1043  * Set the DTE(Device-TLB Enable) field of a scalable mode context
1044  * entry.
1045  */
context_set_sm_dte(struct context_entry * context)1046 static inline void context_set_sm_dte(struct context_entry *context)
1047 {
1048 	context->lo |= BIT_ULL(2);
1049 }
1050 
1051 /*
1052  * Set the PRE(Page Request Enable) field of a scalable mode context
1053  * entry.
1054  */
context_set_sm_pre(struct context_entry * context)1055 static inline void context_set_sm_pre(struct context_entry *context)
1056 {
1057 	context->lo |= BIT_ULL(4);
1058 }
1059 
1060 /*
1061  * Clear the PRE(Page Request Enable) field of a scalable mode context
1062  * entry.
1063  */
context_clear_sm_pre(struct context_entry * context)1064 static inline void context_clear_sm_pre(struct context_entry *context)
1065 {
1066 	context->lo &= ~BIT_ULL(4);
1067 }
1068 
1069 /* Returns a number of VTD pages, but aligned to MM page size */
aligned_nrpages(unsigned long host_addr,size_t size)1070 static inline unsigned long aligned_nrpages(unsigned long host_addr, size_t size)
1071 {
1072 	host_addr &= ~PAGE_MASK;
1073 	return PAGE_ALIGN(host_addr + size) >> VTD_PAGE_SHIFT;
1074 }
1075 
1076 /* Return a size from number of VTD pages. */
nrpages_to_size(unsigned long npages)1077 static inline unsigned long nrpages_to_size(unsigned long npages)
1078 {
1079 	return npages << VTD_PAGE_SHIFT;
1080 }
1081 
qi_desc_iotlb(struct intel_iommu * iommu,u16 did,u64 addr,unsigned int size_order,u64 type,struct qi_desc * desc)1082 static inline void qi_desc_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1083 				 unsigned int size_order, u64 type,
1084 				 struct qi_desc *desc)
1085 {
1086 	u8 dw = 0, dr = 0;
1087 	int ih = 0;
1088 
1089 	if (cap_write_drain(iommu->cap))
1090 		dw = 1;
1091 
1092 	if (cap_read_drain(iommu->cap))
1093 		dr = 1;
1094 
1095 	desc->qw0 = QI_IOTLB_DID(did) | QI_IOTLB_DR(dr) | QI_IOTLB_DW(dw)
1096 		| QI_IOTLB_GRAN(type) | QI_IOTLB_TYPE;
1097 	desc->qw1 = QI_IOTLB_ADDR(addr) | QI_IOTLB_IH(ih)
1098 		| QI_IOTLB_AM(size_order);
1099 	desc->qw2 = 0;
1100 	desc->qw3 = 0;
1101 }
1102 
qi_desc_dev_iotlb(u16 sid,u16 pfsid,u16 qdep,u64 addr,unsigned int mask,struct qi_desc * desc)1103 static inline void qi_desc_dev_iotlb(u16 sid, u16 pfsid, u16 qdep, u64 addr,
1104 				     unsigned int mask, struct qi_desc *desc)
1105 {
1106 	if (mask) {
1107 		addr |= (1ULL << (VTD_PAGE_SHIFT + mask - 1)) - 1;
1108 		desc->qw1 = QI_DEV_IOTLB_ADDR(addr) | QI_DEV_IOTLB_SIZE;
1109 	} else {
1110 		desc->qw1 = QI_DEV_IOTLB_ADDR(addr);
1111 	}
1112 
1113 	if (qdep >= QI_DEV_IOTLB_MAX_INVS)
1114 		qdep = 0;
1115 
1116 	desc->qw0 = QI_DEV_IOTLB_SID(sid) | QI_DEV_IOTLB_QDEP(qdep) |
1117 		   QI_DIOTLB_TYPE | QI_DEV_IOTLB_PFSID(pfsid);
1118 	desc->qw2 = 0;
1119 	desc->qw3 = 0;
1120 }
1121 
qi_desc_piotlb(u16 did,u32 pasid,u64 addr,unsigned long npages,bool ih,struct qi_desc * desc)1122 static inline void qi_desc_piotlb(u16 did, u32 pasid, u64 addr,
1123 				  unsigned long npages, bool ih,
1124 				  struct qi_desc *desc)
1125 {
1126 	if (npages == -1) {
1127 		desc->qw0 = QI_EIOTLB_PASID(pasid) |
1128 				QI_EIOTLB_DID(did) |
1129 				QI_EIOTLB_GRAN(QI_GRAN_NONG_PASID) |
1130 				QI_EIOTLB_TYPE;
1131 		desc->qw1 = 0;
1132 	} else {
1133 		int mask = ilog2(__roundup_pow_of_two(npages));
1134 		unsigned long align = (1ULL << (VTD_PAGE_SHIFT + mask));
1135 
1136 		if (WARN_ON_ONCE(!IS_ALIGNED(addr, align)))
1137 			addr = ALIGN_DOWN(addr, align);
1138 
1139 		desc->qw0 = QI_EIOTLB_PASID(pasid) |
1140 				QI_EIOTLB_DID(did) |
1141 				QI_EIOTLB_GRAN(QI_GRAN_PSI_PASID) |
1142 				QI_EIOTLB_TYPE;
1143 		desc->qw1 = QI_EIOTLB_ADDR(addr) |
1144 				QI_EIOTLB_IH(ih) |
1145 				QI_EIOTLB_AM(mask);
1146 	}
1147 }
1148 
qi_desc_dev_iotlb_pasid(u16 sid,u16 pfsid,u32 pasid,u16 qdep,u64 addr,unsigned int size_order,struct qi_desc * desc)1149 static inline void qi_desc_dev_iotlb_pasid(u16 sid, u16 pfsid, u32 pasid,
1150 					   u16 qdep, u64 addr,
1151 					   unsigned int size_order,
1152 					   struct qi_desc *desc)
1153 {
1154 	unsigned long mask = 1UL << (VTD_PAGE_SHIFT + size_order - 1);
1155 
1156 	desc->qw0 = QI_DEV_EIOTLB_PASID(pasid) | QI_DEV_EIOTLB_SID(sid) |
1157 		QI_DEV_EIOTLB_QDEP(qdep) | QI_DEIOTLB_TYPE |
1158 		QI_DEV_IOTLB_PFSID(pfsid);
1159 
1160 	/*
1161 	 * If S bit is 0, we only flush a single page. If S bit is set,
1162 	 * The least significant zero bit indicates the invalidation address
1163 	 * range. VT-d spec 6.5.2.6.
1164 	 * e.g. address bit 12[0] indicates 8KB, 13[0] indicates 16KB.
1165 	 * size order = 0 is PAGE_SIZE 4KB
1166 	 * Max Invs Pending (MIP) is set to 0 for now until we have DIT in
1167 	 * ECAP.
1168 	 */
1169 	if (!IS_ALIGNED(addr, VTD_PAGE_SIZE << size_order))
1170 		pr_warn_ratelimited("Invalidate non-aligned address %llx, order %d\n",
1171 				    addr, size_order);
1172 
1173 	/* Take page address */
1174 	desc->qw1 = QI_DEV_EIOTLB_ADDR(addr);
1175 
1176 	if (size_order) {
1177 		/*
1178 		 * Existing 0s in address below size_order may be the least
1179 		 * significant bit, we must set them to 1s to avoid having
1180 		 * smaller size than desired.
1181 		 */
1182 		desc->qw1 |= GENMASK_ULL(size_order + VTD_PAGE_SHIFT - 1,
1183 					VTD_PAGE_SHIFT);
1184 		/* Clear size_order bit to indicate size */
1185 		desc->qw1 &= ~mask;
1186 		/* Set the S bit to indicate flushing more than 1 page */
1187 		desc->qw1 |= QI_DEV_EIOTLB_SIZE;
1188 	}
1189 }
1190 
1191 /* Convert value to context PASID directory size field coding. */
1192 #define context_pdts(pds)	(((pds) & 0x7) << 9)
1193 
1194 struct dmar_drhd_unit *dmar_find_matched_drhd_unit(struct pci_dev *dev);
1195 
1196 int dmar_enable_qi(struct intel_iommu *iommu);
1197 void dmar_disable_qi(struct intel_iommu *iommu);
1198 int dmar_reenable_qi(struct intel_iommu *iommu);
1199 void qi_global_iec(struct intel_iommu *iommu);
1200 
1201 void qi_flush_context(struct intel_iommu *iommu, u16 did,
1202 		      u16 sid, u8 fm, u64 type);
1203 void qi_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1204 		    unsigned int size_order, u64 type);
1205 void qi_flush_dev_iotlb(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1206 			u16 qdep, u64 addr, unsigned mask);
1207 
1208 void qi_flush_piotlb(struct intel_iommu *iommu, u16 did, u32 pasid, u64 addr,
1209 		     unsigned long npages, bool ih);
1210 
1211 void qi_flush_dev_iotlb_pasid(struct intel_iommu *iommu, u16 sid, u16 pfsid,
1212 			      u32 pasid, u16 qdep, u64 addr,
1213 			      unsigned int size_order);
1214 void quirk_extra_dev_tlb_flush(struct device_domain_info *info,
1215 			       unsigned long address, unsigned long pages,
1216 			       u32 pasid, u16 qdep);
1217 void qi_flush_pasid_cache(struct intel_iommu *iommu, u16 did, u64 granu,
1218 			  u32 pasid);
1219 
1220 int qi_submit_sync(struct intel_iommu *iommu, struct qi_desc *desc,
1221 		   unsigned int count, unsigned long options);
1222 
1223 void __iommu_flush_iotlb(struct intel_iommu *iommu, u16 did, u64 addr,
1224 			 unsigned int size_order, u64 type);
1225 /*
1226  * Options used in qi_submit_sync:
1227  * QI_OPT_WAIT_DRAIN - Wait for PRQ drain completion, spec 6.5.2.8.
1228  */
1229 #define QI_OPT_WAIT_DRAIN		BIT(0)
1230 
1231 int domain_attach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu);
1232 void domain_detach_iommu(struct dmar_domain *domain, struct intel_iommu *iommu);
1233 void device_block_translation(struct device *dev);
1234 int paging_domain_compatible(struct iommu_domain *domain, struct device *dev);
1235 
1236 struct dev_pasid_info *
1237 domain_add_dev_pasid(struct iommu_domain *domain,
1238 		     struct device *dev, ioasid_t pasid);
1239 void domain_remove_dev_pasid(struct iommu_domain *domain,
1240 			     struct device *dev, ioasid_t pasid);
1241 
1242 int __domain_setup_first_level(struct intel_iommu *iommu,
1243 			       struct device *dev, ioasid_t pasid,
1244 			       u16 did, pgd_t *pgd, int flags,
1245 			       struct iommu_domain *old);
1246 
1247 int dmar_ir_support(void);
1248 
1249 void iommu_flush_write_buffer(struct intel_iommu *iommu);
1250 struct iommu_domain *
1251 intel_iommu_domain_alloc_nested(struct device *dev, struct iommu_domain *parent,
1252 				u32 flags,
1253 				const struct iommu_user_data *user_data);
1254 struct device *device_rbtree_find(struct intel_iommu *iommu, u16 rid);
1255 
1256 enum cache_tag_type {
1257 	CACHE_TAG_IOTLB,
1258 	CACHE_TAG_DEVTLB,
1259 	CACHE_TAG_NESTING_IOTLB,
1260 	CACHE_TAG_NESTING_DEVTLB,
1261 };
1262 
1263 struct cache_tag {
1264 	struct list_head node;
1265 	enum cache_tag_type type;
1266 	struct intel_iommu *iommu;
1267 	/*
1268 	 * The @dev field represents the location of the cache. For IOTLB, it
1269 	 * resides on the IOMMU hardware. @dev stores the device pointer to
1270 	 * the IOMMU hardware. For DevTLB, it locates in the PCIe endpoint.
1271 	 * @dev stores the device pointer to that endpoint.
1272 	 */
1273 	struct device *dev;
1274 	u16 domain_id;
1275 	ioasid_t pasid;
1276 	unsigned int users;
1277 };
1278 
1279 int cache_tag_assign_domain(struct dmar_domain *domain,
1280 			    struct device *dev, ioasid_t pasid);
1281 void cache_tag_unassign_domain(struct dmar_domain *domain,
1282 			       struct device *dev, ioasid_t pasid);
1283 void cache_tag_flush_range(struct dmar_domain *domain, unsigned long start,
1284 			   unsigned long end, int ih);
1285 void cache_tag_flush_all(struct dmar_domain *domain);
1286 void cache_tag_flush_range_np(struct dmar_domain *domain, unsigned long start,
1287 			      unsigned long end);
1288 
1289 void intel_context_flush_no_pasid(struct device_domain_info *info,
1290 				  struct context_entry *context, u16 did);
1291 
1292 int intel_iommu_enable_prq(struct intel_iommu *iommu);
1293 int intel_iommu_finish_prq(struct intel_iommu *iommu);
1294 void intel_iommu_page_response(struct device *dev, struct iopf_fault *evt,
1295 			       struct iommu_page_response *msg);
1296 void intel_iommu_drain_pasid_prq(struct device *dev, u32 pasid);
1297 
1298 int intel_iommu_enable_iopf(struct device *dev);
1299 void intel_iommu_disable_iopf(struct device *dev);
1300 
1301 #ifdef CONFIG_INTEL_IOMMU_SVM
1302 void intel_svm_check(struct intel_iommu *iommu);
1303 struct iommu_domain *intel_svm_domain_alloc(struct device *dev,
1304 					    struct mm_struct *mm);
1305 #else
intel_svm_check(struct intel_iommu * iommu)1306 static inline void intel_svm_check(struct intel_iommu *iommu) {}
intel_svm_domain_alloc(struct device * dev,struct mm_struct * mm)1307 static inline struct iommu_domain *intel_svm_domain_alloc(struct device *dev,
1308 							  struct mm_struct *mm)
1309 {
1310 	return ERR_PTR(-ENODEV);
1311 }
1312 #endif
1313 
1314 #ifdef CONFIG_INTEL_IOMMU_DEBUGFS
1315 void intel_iommu_debugfs_init(void);
1316 void intel_iommu_debugfs_create_dev(struct device_domain_info *info);
1317 void intel_iommu_debugfs_remove_dev(struct device_domain_info *info);
1318 void intel_iommu_debugfs_create_dev_pasid(struct dev_pasid_info *dev_pasid);
1319 void intel_iommu_debugfs_remove_dev_pasid(struct dev_pasid_info *dev_pasid);
1320 #else
intel_iommu_debugfs_init(void)1321 static inline void intel_iommu_debugfs_init(void) {}
intel_iommu_debugfs_create_dev(struct device_domain_info * info)1322 static inline void intel_iommu_debugfs_create_dev(struct device_domain_info *info) {}
intel_iommu_debugfs_remove_dev(struct device_domain_info * info)1323 static inline void intel_iommu_debugfs_remove_dev(struct device_domain_info *info) {}
intel_iommu_debugfs_create_dev_pasid(struct dev_pasid_info * dev_pasid)1324 static inline void intel_iommu_debugfs_create_dev_pasid(struct dev_pasid_info *dev_pasid) {}
intel_iommu_debugfs_remove_dev_pasid(struct dev_pasid_info * dev_pasid)1325 static inline void intel_iommu_debugfs_remove_dev_pasid(struct dev_pasid_info *dev_pasid) {}
1326 #endif /* CONFIG_INTEL_IOMMU_DEBUGFS */
1327 
1328 extern const struct attribute_group *intel_iommu_groups[];
1329 struct context_entry *iommu_context_addr(struct intel_iommu *iommu, u8 bus,
1330 					 u8 devfn, int alloc);
1331 
1332 extern const struct iommu_ops intel_iommu_ops;
1333 
1334 #ifdef CONFIG_INTEL_IOMMU
1335 extern int intel_iommu_sm;
1336 int iommu_calculate_agaw(struct intel_iommu *iommu);
1337 int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
1338 int ecmd_submit_sync(struct intel_iommu *iommu, u8 ecmd, u64 oa, u64 ob);
1339 
ecmd_has_pmu_essential(struct intel_iommu * iommu)1340 static inline bool ecmd_has_pmu_essential(struct intel_iommu *iommu)
1341 {
1342 	return (iommu->ecmdcap[DMA_ECMD_ECCAP3] & DMA_ECMD_ECCAP3_ESSENTIAL) ==
1343 		DMA_ECMD_ECCAP3_ESSENTIAL;
1344 }
1345 
1346 extern int dmar_disabled;
1347 extern int intel_iommu_enabled;
1348 #else
iommu_calculate_agaw(struct intel_iommu * iommu)1349 static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
1350 {
1351 	return 0;
1352 }
iommu_calculate_max_sagaw(struct intel_iommu * iommu)1353 static inline int iommu_calculate_max_sagaw(struct intel_iommu *iommu)
1354 {
1355 	return 0;
1356 }
1357 #define dmar_disabled	(1)
1358 #define intel_iommu_enabled (0)
1359 #define intel_iommu_sm (0)
1360 #endif
1361 
decode_prq_descriptor(char * str,size_t size,u64 dw0,u64 dw1,u64 dw2,u64 dw3)1362 static inline const char *decode_prq_descriptor(char *str, size_t size,
1363 		u64 dw0, u64 dw1, u64 dw2, u64 dw3)
1364 {
1365 	char *buf = str;
1366 	int bytes;
1367 
1368 	bytes = snprintf(buf, size,
1369 			 "rid=0x%llx addr=0x%llx %c%c%c%c%c pasid=0x%llx index=0x%llx",
1370 			 FIELD_GET(GENMASK_ULL(31, 16), dw0),
1371 			 FIELD_GET(GENMASK_ULL(63, 12), dw1),
1372 			 dw1 & BIT_ULL(0) ? 'r' : '-',
1373 			 dw1 & BIT_ULL(1) ? 'w' : '-',
1374 			 dw0 & BIT_ULL(52) ? 'x' : '-',
1375 			 dw0 & BIT_ULL(53) ? 'p' : '-',
1376 			 dw1 & BIT_ULL(2) ? 'l' : '-',
1377 			 FIELD_GET(GENMASK_ULL(51, 32), dw0),
1378 			 FIELD_GET(GENMASK_ULL(11, 3), dw1));
1379 
1380 	/* Private Data */
1381 	if (dw0 & BIT_ULL(9)) {
1382 		size -= bytes;
1383 		buf += bytes;
1384 		snprintf(buf, size, " private=0x%llx/0x%llx\n", dw2, dw3);
1385 	}
1386 
1387 	return str;
1388 }
1389 
1390 #endif
1391