xref: /illumos-gate/usr/src/uts/i86pc/sys/immu.h (revision 67d5a6e39bc4b0cb50b481a41e24adda06ef02ed)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Portions Copyright (c) 2010, Oracle and/or its affiliates.
23  * All rights reserved.
24  */
25 
26 /*
27  * Copyright (c) 2008, Intel Corporation.
28  * All rights reserved.
29  * Copyright 2025 RackTop Systems, Inc.
30  */
31 
32 #ifndef	_SYS_INTEL_IOMMU_H
33 #define	_SYS_INTEL_IOMMU_H
34 
35 /*
36  * Intel IOMMU implementation specific state
37  */
38 
39 #ifdef	__cplusplus
40 extern "C" {
41 #endif
42 
43 #include <sys/types.h>
44 #include <sys/bitset.h>
45 #include <sys/kstat.h>
46 #include <sys/kmem.h>
47 #include <sys/vmem.h>
48 #include <sys/rootnex.h>
49 #include <sys/iommulib.h>
50 #include <sys/sdt.h>
51 
52 /*
53  * Some ON drivers have bugs. Keep this define until all such drivers
54  * have been fixed
55  */
56 #define	BUGGY_DRIVERS 1
57 
58 /* PD(T)E entries */
59 typedef uint64_t hw_pdte_t;
60 
61 #define	IMMU_MAXNAMELEN (64)
62 #define	IMMU_MAXSEG	(1)
63 #define	IMMU_REGSZ	(1UL << 12)
64 #define	IMMU_PAGESIZE   (4096)
65 #define	IMMU_PAGESHIFT	(12)
66 #define	IMMU_PAGEOFFSET	(IMMU_PAGESIZE - 1)
67 #define	IMMU_PAGEMASK	(~IMMU_PAGEOFFSET)
68 #define	IMMU_BTOP(b)	(((uint64_t)b) >> IMMU_PAGESHIFT)
69 #define	IMMU_PTOB(p)	(((uint64_t)p) << IMMU_PAGESHIFT)
70 #define	IMMU_BTOPR(x)	((((x) + IMMU_PAGEOFFSET) >> IMMU_PAGESHIFT))
71 #define	IMMU_PGTABLE_MAX_LEVELS	(6)
72 #define	IMMU_ROUNDUP(size) (((size) + IMMU_PAGEOFFSET) & ~IMMU_PAGEOFFSET)
73 #define	IMMU_ROUNDOWN(addr) ((addr) & ~IMMU_PAGEOFFSET)
74 #define	IMMU_PGTABLE_LEVEL_STRIDE	(9)
75 #define	IMMU_PGTABLE_LEVEL_MASK	((1<<IMMU_PGTABLE_LEVEL_STRIDE) - 1)
76 #define	IMMU_PGTABLE_OFFSHIFT  (IMMU_PAGESHIFT - IMMU_PGTABLE_LEVEL_STRIDE)
77 #define	IMMU_PGTABLE_MAXIDX  ((IMMU_PAGESIZE / sizeof (hw_pdte_t)) - 1)
78 
79 /*
80  * DMAR global defines
81  */
82 #define	DMAR_TABLE	"dmar-table"
83 #define	DMAR_INTRMAP_SUPPORT	(0x01)
84 
85 /* DMAR unit types */
86 #define	DMAR_DRHD	0
87 #define	DMAR_RMRR	1
88 #define	DMAR_ATSR	2
89 #define	DMAR_RHSA	3
90 #define	DMAR_ANDD	4
91 #define	DMAR_SATC	5
92 #define	DMAR_SIDP	6
93 
94 /* DRHD flag values */
95 #define	DMAR_INCLUDE_ALL	(0x01)
96 
97 /* Device scope types */
98 #define	DMAR_ENDPOINT	1
99 #define	DMAR_SUBTREE	2
100 #define	DMAR_IOAPIC	3
101 #define	DMAR_HPET	4
102 
103 
104 /* Forward declarations for IOMMU state structure and DVMA domain struct */
105 struct immu;
106 struct domain;
107 
108 /*
109  * The following structure describes the formate of DMAR ACPI table format.
110  * They are used to parse DMAR ACPI table. Read the spec for the meaning
111  * of each member.
112  */
113 
114 /* lengths of various strings */
115 #define	DMAR_SIG_LEN    (4)	/* table signature */
116 #define	DMAR_OEMID_LEN  (6)	/* OEM ID */
117 #define	DMAR_TBLID_LEN  (8)	/* OEM table ID */
118 #define	DMAR_ASL_LEN    (4)	/* ASL len */
119 
120 typedef struct dmar_table {
121 	kmutex_t	tbl_lock;
122 	uint8_t		tbl_haw;
123 	boolean_t	tbl_intrmap;
124 	list_t		tbl_drhd_list[IMMU_MAXSEG];
125 	list_t		tbl_rmrr_list[IMMU_MAXSEG];
126 	char		*tbl_oem_id;
127 	char		*tbl_oem_tblid;
128 	uint32_t	tbl_oem_rev;
129 	caddr_t		tbl_raw;
130 	int		tbl_rawlen;
131 } dmar_table_t;
132 
133 typedef struct drhd {
134 	kmutex_t	dr_lock;   /* protects the dmar field */
135 	struct immu	*dr_immu;
136 	dev_info_t	*dr_dip;
137 	uint16_t	dr_seg;
138 	uint64_t	dr_regs;
139 	boolean_t	dr_include_all;
140 	list_t		dr_scope_list;
141 	list_node_t	dr_node;
142 } drhd_t;
143 
144 typedef struct rmrr {
145 	kmutex_t	rm_lock;
146 	uint16_t	rm_seg;
147 	uint64_t	rm_base;
148 	uint64_t	rm_limit;
149 	list_t		rm_scope_list;
150 	list_node_t	rm_node;
151 } rmrr_t;
152 
153 #define	IMMU_UNIT_NAME	"iommu"
154 
155 /*
156  * Macros based on PCI spec
157  */
158 #define	IMMU_PCI_DEV(devfunc)    ((uint64_t)devfunc >> 3) /* from devfunc  */
159 #define	IMMU_PCI_FUNC(devfunc)   (devfunc & 7)  /* get func from devfunc */
160 #define	IMMU_PCI_DEVFUNC(d, f)   (((d) << 3) | (f))  /* create devfunc */
161 
162 typedef struct scope {
163 	uint8_t scp_type;
164 	uint8_t scp_enumid;
165 	uint8_t scp_bus;
166 	uint8_t scp_dev;
167 	uint8_t scp_func;
168 	list_node_t scp_node;
169 } scope_t;
170 
171 /*
172  * interrupt source id and drhd info for ioapic
173  */
174 typedef struct ioapic_drhd {
175 	uchar_t		ioapic_ioapicid;
176 	uint16_t	ioapic_sid;	/* ioapic source id */
177 	drhd_t		*ioapic_drhd;
178 	list_node_t	ioapic_node;
179 } ioapic_drhd_t;
180 
181 typedef struct memrng {
182 	uint64_t mrng_start;
183 	uint64_t mrng_npages;
184 } memrng_t;
185 
186 typedef enum immu_flags {
187 	IMMU_FLAGS_NONE = 0x1,
188 	IMMU_FLAGS_SLEEP = 0x1,
189 	IMMU_FLAGS_NOSLEEP = 0x2,
190 	IMMU_FLAGS_READ = 0x4,
191 	IMMU_FLAGS_WRITE = 0x8,
192 	IMMU_FLAGS_DONTPASS = 0x10,
193 	IMMU_FLAGS_ALLOC = 0x20,
194 	IMMU_FLAGS_MUST_MATCH = 0x40,
195 	IMMU_FLAGS_PAGE1 = 0x80,
196 	IMMU_FLAGS_UNITY = 0x100,
197 	IMMU_FLAGS_DMAHDL = 0x200,
198 	IMMU_FLAGS_MEMRNG = 0x400
199 } immu_flags_t;
200 
201 typedef enum cont_avail {
202 	IMMU_CONT_BAD = 0x0,
203 	IMMU_CONT_UNINITED = 0x1,
204 	IMMU_CONT_INITED = 0x2
205 } cont_avail_t;
206 
207 /* Size of root and context tables and their entries */
208 #define	IMMU_ROOT_TBLSZ		(4096)
209 #define	IMMU_CONT_TBLSZ		(4096)
210 #define	IMMU_ROOT_NUM		(256)
211 #define	IMMU_CONT_NUM		(256)
212 
213 /* register offset */
214 #define	IMMU_REG_VERSION	(0x00)  /* Version Rigister, 32 bit */
215 #define	IMMU_REG_CAP		(0x08)  /* Capability Register, 64 bit */
216 #define	IMMU_REG_EXCAP		(0x10)  /* Extended Capability Reg, 64 bit */
217 #define	IMMU_REG_GLOBAL_CMD	(0x18)  /* Global Command Register, 32 bit */
218 #define	IMMU_REG_GLOBAL_STS	(0x1C)  /* Global Status Register, 32 bit */
219 #define	IMMU_REG_ROOTENTRY	(0x20)  /* Root-Entry Table Addr Reg, 64 bit */
220 #define	IMMU_REG_CONTEXT_CMD	(0x28)  /* Context Comand Register, 64 bit */
221 #define	IMMU_REG_FAULT_STS	(0x34)  /* Fault Status Register, 32 bit */
222 #define	IMMU_REG_FEVNT_CON	(0x38)  /* Fault Event Control Reg, 32 bit */
223 #define	IMMU_REG_FEVNT_DATA	(0x3C)  /* Fault Event Data Register, 32 bit */
224 #define	IMMU_REG_FEVNT_ADDR	(0x40)  /* Fault Event Address Reg, 32 bit */
225 #define	IMMU_REG_FEVNT_UADDR	(0x44)  /* Fault Event Upper Addr Reg, 32 bit */
226 #define	IMMU_REG_AFAULT_LOG	(0x58)  /* Advanced Fault Log Reg, 64 bit */
227 #define	IMMU_REG_PMER		(0x64)  /* Protected Memory Enble Reg, 32 bit */
228 #define	IMMU_REG_PLMBR		(0x68)  /* Protected Low Mem Base Reg, 32 bit */
229 #define	IMMU_REG_PLMLR		(0x6C)  /* Protected Low Mem Lim Reg, 32 bit */
230 #define	IMMU_REG_PHMBR		(0X70)  /* Protectd High Mem Base Reg, 64 bit */
231 #define	IMMU_REG_PHMLR		(0x78)  /* Protected High Mem Lim Reg, 64 bit */
232 #define	IMMU_REG_INVAL_QH	(0x80)  /* Invalidation Queue Head, 64 bit */
233 #define	IMMU_REG_INVAL_QT	(0x88)  /* Invalidation Queue Tail, 64 bit */
234 #define	IMMU_REG_INVAL_QAR	(0x90)  /* Invalidtion Queue Addr Reg, 64 bit */
235 #define	IMMU_REG_INVAL_CSR	(0x9C)  /* Inval Compl Status Reg, 32 bit */
236 #define	IMMU_REG_INVAL_CECR	(0xA0)  /* Inval Compl Evnt Ctrl Reg, 32 bit */
237 #define	IMMU_REG_INVAL_CEDR	(0xA4)  /* Inval Compl Evnt Data Reg, 32 bit */
238 #define	IMMU_REG_INVAL_CEAR	(0xA8)  /* Inval Compl Event Addr Reg, 32 bit */
239 #define	IMMU_REG_INVAL_CEUAR	(0xAC)  /* Inval Comp Evnt Up Addr reg, 32bit */
240 #define	IMMU_REG_IRTAR		(0xB8)  /* INTR Remap Tbl Addr Reg, 64 bit */
241 
242 /* ioapic memory region */
243 #define	IOAPIC_REGION_START	(0xfee00000)
244 #define	IOAPIC_REGION_END	(0xfeefffff)
245 
246 /* fault register */
247 #define	IMMU_FAULT_STS_PPF		(2)
248 #define	IMMU_FAULT_STS_PFO		(1)
249 #define	IMMU_FAULT_STS_ITE		(1 << 6)
250 #define	IMMU_FAULT_STS_ICE		(1 << 5)
251 #define	IMMU_FAULT_STS_IQE		(1 << 4)
252 #define	IMMU_FAULT_GET_INDEX(x)		((((uint64_t)x) >> 8) & 0xff)
253 #define	IMMU_FRR_GET_F(x)		(((uint64_t)x) >> 63)
254 #define	IMMU_FRR_GET_FR(x)		((((uint64_t)x) >> 32) & 0xff)
255 #define	IMMU_FRR_GET_FT(x)		((((uint64_t)x) >> 62) & 0x1)
256 #define	IMMU_FRR_GET_SID(x)		((x) & 0xffff)
257 
258 /* (ex)capability register */
259 #define	IMMU_CAP_GET_NFR(x)		(((((uint64_t)x) >> 40) & 0xff) + 1)
260 #define	IMMU_CAP_GET_DWD(x)		((((uint64_t)x) >> 54) & 1)
261 #define	IMMU_CAP_GET_DRD(x)		((((uint64_t)x) >> 55) & 1)
262 #define	IMMU_CAP_GET_PSI(x)		((((uint64_t)x) >> 39) & 1)
263 #define	IMMU_CAP_GET_SPS(x)		((((uint64_t)x) >> 34) & 0xf)
264 #define	IMMU_CAP_GET_ISOCH(x)		((((uint64_t)x) >> 23) & 1)
265 #define	IMMU_CAP_GET_ZLR(x)		((((uint64_t)x) >> 22) & 1)
266 #define	IMMU_CAP_GET_MAMV(x)		((((uint64_t)x) >> 48) & 0x3f)
267 #define	IMMU_CAP_GET_CM(x)		((((uint64_t)x) >> 7) & 1)
268 #define	IMMU_CAP_GET_PHMR(x)		((((uint64_t)x) >> 6) & 1)
269 #define	IMMU_CAP_GET_PLMR(x)		((((uint64_t)x) >> 5) & 1)
270 #define	IMMU_CAP_GET_RWBF(x)		((((uint64_t)x) >> 4) & 1)
271 #define	IMMU_CAP_GET_AFL(x)		((((uint64_t)x) >> 3) & 1)
272 #define	IMMU_CAP_GET_FRO(x)		(((((uint64_t)x) >> 24) & 0x3ff) * 16)
273 #define	IMMU_CAP_MGAW(x)		(((((uint64_t)x) >> 16) & 0x3f) + 1)
274 #define	IMMU_CAP_SAGAW(x)		((((uint64_t)x) >> 8) & 0x1f)
275 #define	IMMU_CAP_ND(x)			(1 << (((x) & 0x7) *2 + 4)) -1
276 #define	IMMU_ECAP_GET_IRO(x)		(((((uint64_t)x) >> 8) & 0x3ff) << 4)
277 #define	IMMU_ECAP_GET_MHMV(x)		(((uint64_t)x >> 20) & 0xf)
278 #define	IMMU_ECAP_GET_SC(x)		((x) & 0x80)
279 #define	IMMU_ECAP_GET_PT(x)		((x) & 0x40)
280 #define	IMMU_ECAP_GET_CH(x)		((x) & 0x20)
281 #define	IMMU_ECAP_GET_EIM(x)		((x) & 0x10)
282 #define	IMMU_ECAP_GET_IR(x)		((x) & 0x8)
283 #define	IMMU_ECAP_GET_DI(x)		((x) & 0x4)
284 #define	IMMU_ECAP_GET_QI(x)		((x) & 0x2)
285 #define	IMMU_ECAP_GET_C(x)		((x) & 0x1)
286 
287 #define	IMMU_CAP_SET_RWBF(x)		((x) |= (1 << 4))
288 
289 
290 /* iotlb invalidation */
291 #define	TLB_INV_GLOBAL		(((uint64_t)1) << 60)
292 #define	TLB_INV_DOMAIN		(((uint64_t)2) << 60)
293 #define	TLB_INV_PAGE		(((uint64_t)3) << 60)
294 #define	TLB_INV_GET_IAIG(x)	((((uint64_t)x) >> 57) & 7)
295 #define	TLB_INV_DRAIN_READ	(((uint64_t)1) << 49)
296 #define	TLB_INV_DRAIN_WRITE	(((uint64_t)1) << 48)
297 #define	TLB_INV_DID(x)		(((uint64_t)((x) & 0xffff)) << 32)
298 #define	TLB_INV_IVT		(((uint64_t)1) << 63)
299 #define	TLB_IVA_HINT(x)		(((x) & 0x1) << 6)
300 #define	TLB_IVA_LEAF		1
301 #define	TLB_IVA_WHOLE		0
302 
303 /* dont use value 0 for  enums - to catch unit 8 */
304 typedef enum iotlb_inv {
305 	IOTLB_PSI = 1,
306 	IOTLB_DSI,
307 	IOTLB_GLOBAL
308 } immu_iotlb_inv_t;
309 
310 typedef enum context_inv {
311 	CONTEXT_FSI = 1,
312 	CONTEXT_DSI,
313 	CONTEXT_GLOBAL
314 } immu_context_inv_t;
315 
316 /* context invalidation */
317 #define	CCMD_INV_ICC		(((uint64_t)1) << 63)
318 #define	CCMD_INV_GLOBAL		(((uint64_t)1) << 61)
319 #define	CCMD_INV_DOMAIN		(((uint64_t)2) << 61)
320 #define	CCMD_INV_DEVICE		(((uint64_t)3) << 61)
321 #define	CCMD_INV_DID(x)		((uint64_t)((x) & 0xffff))
322 #define	CCMD_INV_SID(x)		(((uint64_t)((x) & 0xffff)) << 16)
323 #define	CCMD_INV_FM(x)		(((uint64_t)((x) & 0x3)) << 32)
324 
325 /* global command register */
326 #define	IMMU_GCMD_TE		(((uint32_t)1) << 31)
327 #define	IMMU_GCMD_SRTP		(((uint32_t)1) << 30)
328 #define	IMMU_GCMD_SFL		(((uint32_t)1) << 29)
329 #define	IMMU_GCMD_EAFL		(((uint32_t)1) << 28)
330 #define	IMMU_GCMD_WBF		(((uint32_t)1) << 27)
331 #define	IMMU_GCMD_QIE		(((uint32_t)1) << 26)
332 #define	IMMU_GCMD_IRE		(((uint32_t)1) << 25)
333 #define	IMMU_GCMD_SIRTP	(((uint32_t)1) << 24)
334 #define	IMMU_GCMD_CFI		(((uint32_t)1) << 23)
335 
336 /* global status register */
337 #define	IMMU_GSTS_TES		(((uint32_t)1) << 31)
338 #define	IMMU_GSTS_RTPS		(((uint32_t)1) << 30)
339 #define	IMMU_GSTS_FLS		(((uint32_t)1) << 29)
340 #define	IMMU_GSTS_AFLS		(((uint32_t)1) << 28)
341 #define	IMMU_GSTS_WBFS		(((uint32_t)1) << 27)
342 #define	IMMU_GSTS_QIES		(((uint32_t)1) << 26)
343 #define	IMMU_GSTS_IRES		(((uint32_t)1) << 25)
344 #define	IMMU_GSTS_IRTPS	(((uint32_t)1) << 24)
345 #define	IMMU_GSTS_CFIS		(((uint32_t)1) << 23)
346 
347 /* psi address mask */
348 #define	ADDR_AM_MAX(m)		(((uint_t)1) << (m))
349 #define	ADDR_AM_OFFSET(n, m)	((n) & (ADDR_AM_MAX(m) - 1))
350 
351 /* dmar fault event */
352 #define	IMMU_INTR_IPL			(4)
353 #define	IMMU_REG_FEVNT_CON_IM_SHIFT	(31)
354 
355 #define	IMMU_ALLOC_RESOURCE_DELAY    (drv_usectohz(5000))
356 
357 /* max value of Size field of Interrupt Remapping Table Address Register */
358 #define	INTRMAP_MAX_IRTA_SIZE	0xf
359 
360 /* interrupt remapping table entry size */
361 #define	INTRMAP_RTE_SIZE		0x10
362 
363 /* ioapic redirection table entry related shift of remappable interrupt */
364 #define	INTRMAP_IOAPIC_IDX_SHIFT		17
365 #define	INTRMAP_IOAPIC_FORMAT_SHIFT	16
366 #define	INTRMAP_IOAPIC_TM_SHIFT		15
367 #define	INTRMAP_IOAPIC_POL_SHIFT		13
368 #define	INTRMAP_IOAPIC_IDX15_SHIFT	11
369 
370 /* msi intr entry related shift of remappable interrupt */
371 #define	INTRMAP_MSI_IDX_SHIFT	5
372 #define	INTRMAP_MSI_FORMAT_SHIFT	4
373 #define	INTRMAP_MSI_SHV_SHIFT	3
374 #define	INTRMAP_MSI_IDX15_SHIFT	2
375 
376 #define	INTRMAP_IDX_FULL		(uint_t)-1
377 
378 #define	RDT_DLM(rdt)	BITX((rdt), 10, 8)
379 #define	RDT_DM(rdt)	BT_TEST(&(rdt), 11)
380 #define	RDT_POL(rdt)	BT_TEST(&(rdt), 13)
381 #define	RDT_TM(rdt)	BT_TEST(&(rdt), 15)
382 
383 #define	INTRMAP_DISABLE	(void *)-1
384 
385 /*
386  * invalidation granularity
387  */
388 typedef enum {
389 	TLB_INV_G_GLOBAL = 1,
390 	TLB_INV_G_DOMAIN,
391 	TLB_INV_G_PAGE
392 } tlb_inv_g_t;
393 
394 typedef enum {
395 	CTT_INV_G_GLOBAL = 1,
396 	CTT_INV_G_DOMAIN,
397 	CTT_INV_G_DEVICE
398 } ctt_inv_g_t;
399 
400 typedef enum {
401 	IEC_INV_GLOBAL = 0,
402 	IEC_INV_INDEX
403 } iec_inv_g_t;
404 
405 
406 struct inv_queue_state;
407 struct intrmap_tbl_state;
408 
409 /* A software page table structure */
410 typedef struct pgtable {
411 	krwlock_t swpg_rwlock;
412 	caddr_t hwpg_vaddr;   /* HW pgtable VA */
413 	paddr_t hwpg_paddr;   /* HW pgtable PA */
414 	ddi_dma_handle_t hwpg_dmahdl;
415 	ddi_acc_handle_t hwpg_memhdl;
416 	struct pgtable **swpg_next_array;
417 	list_node_t swpg_domain_node;  /* domain list of pgtables */
418 } pgtable_t;
419 
420 /* interrupt remapping table state info */
421 typedef struct intrmap {
422 	kmutex_t		intrmap_lock;
423 	ddi_dma_handle_t	intrmap_dma_hdl;
424 	ddi_acc_handle_t	intrmap_acc_hdl;
425 	caddr_t			intrmap_vaddr;
426 	paddr_t			intrmap_paddr;
427 	uint_t			intrmap_size;
428 	bitset_t		intrmap_map;
429 	uint_t			intrmap_free;
430 } intrmap_t;
431 
432 typedef struct hw_rce {
433 	uint64_t lo;
434 	uint64_t hi;
435 } hw_rce_t;
436 
437 
438 #define	ROOT_GET_P(hrent) ((hrent)->lo & 0x1)
439 #define	ROOT_SET_P(hrent) ((hrent)->lo |= 0x1)
440 
441 #define	ROOT_GET_CONT(hrent) ((hrent)->lo & ~(0xFFF))
442 #define	ROOT_SET_CONT(hrent, paddr) ((hrent)->lo |= (paddr & (~0xFFF)))
443 
444 #define	TTYPE_XLATE_ONLY  (0x0)
445 #define	TTYPE_XLATE_IOTLB (0x1)
446 #define	TTYPE_PASSTHRU    (0x2)
447 #define	TTYPE_RESERVED    (0x3)
448 
449 #define	CONT_GET_DID(hcent) ((((uint64_t)(hcent)->hi) >> 8) & 0xFFFF)
450 #define	CONT_SET_DID(hcent, did) ((hcent)->hi |= ((0xFFFF & (did)) << 8))
451 
452 #define	CONT_GET_AVAIL(hcent) ((((uint64_t)((hcent)->hi)) >> 0x3) & 0xF)
453 #define	CONT_SET_AVAIL(hcent, av) ((hcent)->hi |= ((0xF & (av)) << 0x3))
454 
455 #define	CONT_GET_LO_AW(hcent) (30 + 9 *((hcent)->hi & 0x7))
456 #define	CONT_GET_AW(hcent) \
457 	((CONT_GET_LO_AW(hcent) == 66) ? 64 : CONT_GET_LO_AW(hcent))
458 #define	CONT_SET_AW(hcent, aw) \
459 	((hcent)->hi |= (((((aw) + 2) - 30) / 9) & 0x7))
460 
461 #define	CONT_GET_ASR(hcent) ((hcent)->lo & ~(0xFFF))
462 #define	CONT_SET_ASR(hcent, paddr) ((hcent)->lo |= (paddr & (~0xFFF)))
463 
464 #define	CONT_GET_TTYPE(hcent) ((((uint64_t)(hcent)->lo) >> 0x2) & 0x3)
465 #define	CONT_SET_TTYPE(hcent, ttype) ((hcent)->lo |= (((ttype) & 0x3) << 0x2))
466 
467 #define	CONT_GET_P(hcent) ((hcent)->lo & 0x1)
468 #define	CONT_SET_P(hcent) ((hcent)->lo |= 0x1)
469 
470 #define	CONT_GET_ALH(hcent) ((hcent)->lo & 0x20)
471 #define	CONT_SET_ALH(hcent) ((hcent)->lo |= 0x20)
472 
473 #define	CONT_GET_EH(hcent) ((hcent)->lo & 0x10)
474 #define	CONT_SET_EH(hcent) ((hcent)->lo |= 0x10)
475 
476 
477 /* we use the bit 63 (available for system SW) as a present bit */
478 #define	PDTE_SW4(hw_pdte) ((hw_pdte) & ((uint64_t)1<<63))
479 #define	PDTE_CLEAR_SW4(hw_pdte) ((hw_pdte) &= ~((uint64_t)1<<63))
480 
481 #define	PDTE_P(hw_pdte) ((hw_pdte) & ((uint64_t)1<<63))
482 #define	PDTE_CLEAR_P(hw_pdte) ((hw_pdte) &= ~((uint64_t)1<<63))
483 #define	PDTE_SET_P(hw_pdte) ((hw_pdte) |= ((uint64_t)1<<63))
484 
485 #define	PDTE_TM(hw_pdte) ((hw_pdte) & ((uint64_t)1<<62))
486 #define	PDTE_CLEAR_TM(hw_pdte) ((hw_pdte) &= ~((uint64_t)1<<62))
487 
488 #define	PDTE_SW3(hw_pdte) \
489 	(((hw_pdte) & ~(((uint64_t)0x3<<62)|(((uint64_t)1<<52)-1))) >> 52)
490 #define	PDTE_SW3_OVERFLOW(hw_pdte) \
491 	(PDTE_SW3(hw_pdte) == 0x3FF)
492 #define	PDTE_CLEAR_SW3(hw_pdte) \
493 	((hw_pdte) &= (((uint64_t)0x3<<62)|(((uint64_t)1<<52)-1)))
494 #define	PDTE_SET_SW3(hw_pdte, ref) \
495 	((hw_pdte) |= ((((uint64_t)(ref)) & 0x3FF) << 52))
496 
497 #define	PDTE_PADDR(hw_pdte) ((hw_pdte) & ~(((uint64_t)0xFFF<<52)|((1<<12)-1)))
498 #define	PDTE_CLEAR_PADDR(hw_pdte) \
499 		((hw_pdte) &= (((uint64_t)0xFFF<<52)|((1<<12)-1)))
500 #define	PDTE_SET_PADDR(hw_pdte, paddr) ((hw_pdte) |= PDTE_PADDR(paddr))
501 
502 #define	PDTE_SNP(hw_pdte) ((hw_pdte) & (1<<11))
503 #define	PDTE_CLEAR_SNP(hw_pdte) ((hw_pdte) &= ~(1<<11))
504 #define	PDTE_SET_SNP(hw_pdte) ((hw_pdte) |= (1<<11))
505 
506 #define	PDTE_SW2(hw_pdte) ((hw_pdte) & (0x700))
507 #define	PDTE_CLEAR_SW2(hw_pdte) ((hw_pdte) &= ~(0x700))
508 
509 #define	PDTE_SP(hw_pdte) ((hw_pdte) & (0x80))
510 #define	PDTE_CLEAR_SP(hw_pdte) ((hw_pdte) &= ~(0x80))
511 
512 #define	PDTE_SW1(hw_pdte) ((hw_pdte) & (0x7C))
513 #define	PDTE_CLEAR_SW1(hw_pdte) ((hw_pdte) &= ~(0x7C))
514 
515 #define	PDTE_WRITE(hw_pdte) ((hw_pdte) & (0x2))
516 #define	PDTE_CLEAR_WRITE(hw_pdte) ((hw_pdte) &= ~(0x2))
517 #define	PDTE_SET_WRITE(hw_pdte) ((hw_pdte) |= (0x2))
518 
519 #define	PDTE_READ(hw_pdte) ((hw_pdte) & (0x1))
520 #define	PDTE_CLEAR_READ(hw_pdte) ((hw_pdte) &= ~(0x1))
521 #define	PDTE_SET_READ(hw_pdte) ((hw_pdte) |= (0x1))
522 
523 #define	PDTE_MASK_R	((uint64_t)1 << 0)
524 #define	PDTE_MASK_W	((uint64_t)1 << 1)
525 #define	PDTE_MASK_SNP	((uint64_t)1 << 11)
526 #define	PDTE_MASK_TM	((uint64_t)1 << 62)
527 #define	PDTE_MASK_P	((uint64_t)1 << 63)
528 
529 struct immu_flushops;
530 
531 /*
532  * Used to wait for invalidation completion.
533  *     vstatus is the virtual address of the status word that will be written
534  *     pstatus is the physical addres
535  * If sync is true, then the the operation will be waited on for
536  * completion immediately. Else, the wait interface can be called
537  * to wait for completion later.
538  */
539 
540 #define	IMMU_INV_DATA_PENDING	1
541 #define	IMMU_INV_DATA_DONE	2
542 
543 typedef struct immu_inv_wait {
544 	volatile uint32_t iwp_vstatus;
545 	uint64_t iwp_pstatus;
546 	boolean_t iwp_sync;
547 	const char *iwp_name;		/* ID for debugging/statistics */
548 } immu_inv_wait_t;
549 
550 /*
551  * Used to batch IOMMU pagetable writes.
552  */
553 typedef struct immu_dcookie {
554 	paddr_t dck_paddr;
555 	uint64_t dck_npages;
556 } immu_dcookie_t;
557 
558 typedef struct immu {
559 	kmutex_t		immu_lock;
560 	char			*immu_name;
561 
562 	/* lock grabbed by interrupt handler */
563 	kmutex_t		immu_intr_lock;
564 
565 	/* ACPI/DMAR table related */
566 	void			*immu_dmar_unit;
567 	dev_info_t		*immu_dip;
568 	struct domain		*immu_unity_domain;
569 
570 	/* IOMMU register related */
571 	kmutex_t		immu_regs_lock;
572 	kcondvar_t		immu_regs_cv;
573 	boolean_t		immu_regs_busy;
574 	boolean_t		immu_regs_setup;
575 	boolean_t		immu_regs_running;
576 	boolean_t		immu_regs_quiesced;
577 	ddi_acc_handle_t	immu_regs_handle;
578 	caddr_t			immu_regs_addr;
579 	uint64_t		immu_regs_cap;
580 	uint64_t		immu_regs_excap;
581 	uint32_t		immu_regs_cmdval;
582 	uint32_t		immu_regs_intr_msi_addr;
583 	uint32_t		immu_regs_intr_msi_data;
584 	uint32_t		immu_regs_intr_uaddr;
585 
586 	/* DVMA related */
587 	kmutex_t		immu_dvma_lock;
588 	boolean_t		immu_dvma_setup;
589 	boolean_t		immu_dvma_running;
590 	int			immu_dvma_gaw;
591 	int			immu_dvma_agaw;
592 	int			immu_dvma_nlevels;
593 	boolean_t		immu_dvma_coherent;
594 	boolean_t		immu_TM_reserved;
595 	boolean_t		immu_SNP_reserved;
596 	uint64_t		immu_ptemask;
597 
598 	/* DVMA context related */
599 	krwlock_t		immu_ctx_rwlock;
600 	pgtable_t		*immu_ctx_root;
601 	immu_inv_wait_t		immu_ctx_inv_wait;
602 
603 	/* DVMA domain related */
604 	int			immu_max_domains;
605 	vmem_t			*immu_did_arena;
606 	char			immu_did_arena_name[IMMU_MAXNAMELEN];
607 	list_t			immu_domain_list;
608 
609 	/* DVMA special devices */
610 	boolean_t		immu_dvma_gfx_only;
611 	list_t			immu_dvma_lpc_list;
612 	list_t			immu_dvma_gfx_list;
613 
614 	/* interrupt remapping related */
615 	kmutex_t		immu_intrmap_lock;
616 	boolean_t		immu_intrmap_setup;
617 	boolean_t		immu_intrmap_running;
618 	intrmap_t		*immu_intrmap;
619 	uint64_t		immu_intrmap_irta_reg;
620 	immu_inv_wait_t		immu_intrmap_inv_wait;
621 
622 	/* queued invalidation related */
623 	kmutex_t		immu_qinv_lock;
624 	boolean_t		immu_qinv_setup;
625 	boolean_t		immu_qinv_running;
626 	boolean_t		immu_qinv_enabled;
627 	void			*immu_qinv;
628 	uint64_t		immu_qinv_reg_value;
629 
630 	/* list_node for system-wide list of DMAR units */
631 	list_node_t		immu_node;
632 
633 	struct immu_flushops	*immu_flushops;
634 
635 	kmem_cache_t		*immu_hdl_cache;
636 	kmem_cache_t		*immu_pgtable_cache;
637 
638 	iommulib_handle_t	immu_iommulib_handle;
639 } immu_t;
640 
641 /*
642  * Enough space to hold the decimal number of any device instance.
643  * Used for device/cache names.
644  */
645 #define	IMMU_ISTRLEN	11	/* log10(2^31)  + 1 */
646 
647 /* properties that control DVMA */
648 #define	DDI_DVMA_MAPTYPE_ROOTNEX_PROP	"immu-dvma-mapping"
649 
650 #define	DDI_DVMA_MAPTYPE_UNITY		"unity"
651 #define	DDI_DVMA_MAPTYPE_XLATE		"xlate"
652 
653 typedef enum immu_maptype {
654 	IMMU_MAPTYPE_BAD = 0,    /* 0 is always bad */
655 	IMMU_MAPTYPE_UNITY = 1,
656 	IMMU_MAPTYPE_XLATE
657 } immu_maptype_t;
658 
659 #define	IMMU_COOKIE_HASHSZ	(512)
660 
661 /*
662  * domain_t
663  *
664  */
665 typedef struct domain {
666 	/* the basics */
667 	uint_t			dom_did;
668 	immu_t			*dom_immu;
669 
670 	/* mapping related */
671 	immu_maptype_t		dom_maptype;
672 	vmem_t			*dom_dvma_arena;
673 	char			dom_dvma_arena_name[IMMU_MAXNAMELEN];
674 
675 	/* pgtables */
676 	pgtable_t		*dom_pgtable_root;
677 	krwlock_t		dom_pgtable_rwlock;
678 
679 	/* list node for list of domains (unity or xlate) */
680 	list_node_t		dom_maptype_node;
681 	/* list node for list of domains off immu */
682 	list_node_t		dom_immu_node;
683 
684 	mod_hash_t		*dom_cookie_hash;
685 
686 	/* topmost device in domain; usually the device itself (non-shared) */
687 	dev_info_t		*dom_dip;
688 } domain_t;
689 
690 typedef enum immu_pcib {
691 	IMMU_PCIB_BAD = 0,
692 	IMMU_PCIB_NOBDF,
693 	IMMU_PCIB_PCIE_PCIE,
694 	IMMU_PCIB_PCIE_PCI,
695 	IMMU_PCIB_PCI_PCI,
696 	IMMU_PCIB_ENDPOINT
697 } immu_pcib_t;
698 
699 /*
700  *  immu_devi_t
701  *      Intel IOMMU in devinfo node
702  */
703 typedef struct immu_devi {
704 	/* pci seg, bus, dev, func */
705 	int		imd_seg;
706 	int		imd_bus;
707 	int		imd_devfunc;
708 
709 	/* ppb information */
710 	immu_pcib_t	imd_pcib_type;
711 	int		imd_sec;
712 	int		imd_sub;
713 
714 	/* identifier for special devices */
715 	boolean_t	imd_display;
716 	boolean_t	imd_lpc;
717 
718 	/* set if premapped DVMA space is used */
719 	boolean_t	imd_use_premap;
720 
721 	/* dmar unit to which this dip belongs */
722 	immu_t		*imd_immu;
723 
724 	immu_flags_t	imd_dvma_flags;
725 
726 	/* domain ptr */
727 	domain_t	*imd_domain;
728 	dev_info_t	*imd_ddip;
729 
730 	/* my devinfo */
731 	dev_info_t	*imd_dip;
732 
733 	/*
734 	 * if we are a "special" devinfo
735 	 * the node for the special linked list
736 	 * off the DMAR unit structure
737 	 */
738 	list_node_t	imd_spc_node;
739 } immu_devi_t;
740 
741 #define	IMMU_DEVI(dip)		((immu_devi_t *)(DEVI(dip)->devi_iommu))
742 #define	IMMU_DEVI_SET(dip, imd)	(DEVI(dip)->devi_iommu = (void *)imd)
743 
744 /*
745  * struct dmar_arg
746  */
747 typedef struct immu_arg {
748 	int		ima_seg;
749 	int		ima_bus;
750 	int		ima_devfunc;
751 	dev_info_t	*ima_rdip;
752 	dev_info_t	*ima_ddip;
753 } immu_arg_t;
754 
755 #define	IMMU_NDVSEG	8
756 #define	IMMU_NDCK	64
757 #define	IMMU_NPREPTES	8
758 
759 typedef struct immu_hdl_private {
760 	immu_inv_wait_t ihp_inv_wait;
761 	size_t ihp_ndvseg;
762 	struct dvmaseg ihp_dvseg[IMMU_NDVSEG];
763 	immu_dcookie_t ihp_dcookies[IMMU_NDCK];
764 
765 	hw_pdte_t *ihp_preptes[IMMU_NPREPTES];
766 	uint64_t ihp_predvma;
767 	int ihp_npremapped;
768 } immu_hdl_priv_t;
769 
770 /*
771  * Invalidation operation function pointers for context and IOTLB.
772  * These will be set to either the register or the queue invalidation
773  * interface functions, since the hardware does not allow using them
774  * both at the same time.
775  */
776 struct immu_flushops {
777 	void (*imf_context_fsi)(immu_t *, uint8_t, uint16_t, uint_t,
778 	    immu_inv_wait_t *);
779 	void (*imf_context_dsi)(immu_t *, uint_t, immu_inv_wait_t *);
780 	void (*imf_context_gbl)(immu_t *, immu_inv_wait_t *);
781 
782 	void (*imf_iotlb_psi)(immu_t *, uint_t, uint64_t, uint_t, uint_t,
783 	    immu_inv_wait_t *);
784 	void (*imf_iotlb_dsi)(immu_t *, uint_t, immu_inv_wait_t *);
785 	void (*imf_iotlb_gbl)(immu_t *, immu_inv_wait_t *);
786 
787 	void (*imf_wait)(immu_inv_wait_t *);
788 };
789 
790 #define	immu_flush_context_fsi(i, f, s, d, w) \
791 	(i)->immu_flushops->imf_context_fsi(i, f, s, d, w)
792 #define	immu_flush_context_dsi(i, d, w) \
793 	(i)->immu_flushops->imf_context_dsi(i, d, w)
794 #define	immu_flush_context_gbl(i, w) \
795 	(i)->immu_flushops->imf_context_gbl(i, w)
796 
797 #define	immu_flush_iotlb_psi(i, d, v, c, h, w) \
798 	(i)->immu_flushops->imf_iotlb_psi(i, d, v, c, h, w)
799 #define	immu_flush_iotlb_dsi(i, d, w) \
800 	(i)->immu_flushops->imf_iotlb_dsi(i, d, w)
801 #define	immu_flush_iotlb_gbl(i, w) \
802 	(i)->immu_flushops->imf_iotlb_gbl(i, w)
803 
804 #define	immu_flush_wait(i, w) \
805 	(i)->immu_flushops->imf_wait(w)
806 
807 /*
808  * Globals used by IOMMU code
809  */
810 /* shared between IOMMU files */
811 extern dev_info_t *root_devinfo;
812 extern kmutex_t immu_lock;
813 extern list_t immu_list;
814 extern boolean_t immu_setup;
815 extern boolean_t immu_running;
816 extern kmutex_t ioapic_drhd_lock;
817 extern list_t ioapic_drhd_list;
818 extern struct iommulib_ops immulib_ops;
819 
820 /* switches */
821 
822 /* Various features */
823 extern boolean_t immu_enable;
824 extern boolean_t immu_gfxdvma_enable;
825 extern boolean_t immu_intrmap_enable;
826 extern boolean_t immu_qinv_enable;
827 
828 /* various quirks that need working around */
829 extern boolean_t immu_quirk_usbpage0;
830 extern boolean_t immu_quirk_usbfullpa;
831 extern boolean_t immu_quirk_usbrmrr;
832 extern boolean_t immu_quirk_mobile4;
833 
834 /* debug messages */
835 extern boolean_t immu_dmar_print;
836 
837 /* tunables */
838 extern int64_t immu_flush_gran;
839 
840 extern immu_flags_t immu_global_dvma_flags;
841 
842 extern int immu_use_tm;
843 extern int immu_use_alh;
844 
845 /* ################### Interfaces exported outside IOMMU code ############## */
846 void immu_init(void);
847 void immu_startup(void);
848 void immu_shutdown(void);
849 void immu_destroy(void);
850 int immu_map_sgl(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
851     int prealloc_count, dev_info_t *rdip);
852 int immu_unmap_sgl(ddi_dma_impl_t *hp, dev_info_t *rdip);
853 void immu_device_tree_changed(void);
854 void immu_physmem_update(uint64_t addr, uint64_t size);
855 int immu_quiesce(void);
856 int immu_unquiesce(void);
857 /* ######################################################################### */
858 
859 /* ################# Interfaces used within IOMMU code #################### */
860 /* immu_dmar.c interfaces */
861 int immu_dmar_setup(void);
862 int immu_dmar_parse(void);
863 void immu_dmar_startup(void);
864 void immu_dmar_shutdown(void);
865 void immu_dmar_destroy(void);
866 boolean_t immu_dmar_blacklisted(char **strings_array, uint_t nstrings);
867 immu_t *immu_dmar_get_immu(dev_info_t *rdip);
868 dev_info_t *immu_dmar_unit_dip(void *dmar_unit);
869 void immu_dmar_set_immu(void *dmar_unit, immu_t *immu);
870 void *immu_dmar_walk_units(int seg, void *dmar_unit);
871 boolean_t immu_dmar_intrmap_supported(void);
872 uint16_t immu_dmar_ioapic_sid(int ioapicid);
873 immu_t *immu_dmar_ioapic_immu(int ioapicid);
874 void immu_dmar_rmrr_map(void);
875 
876 /* immu.c interfaces */
877 int immu_walk_ancestor(dev_info_t *rdip, dev_info_t *ddip,
878     int (*func)(dev_info_t *, void *arg), void *arg,
879     int *level, immu_flags_t immu_flags);
880 void immu_init_inv_wait(immu_inv_wait_t *iwp, const char *s, boolean_t sync);
881 
882 /* immu_regs.c interfaces */
883 void immu_regs_setup(list_t *immu_list);
884 void immu_regs_startup(immu_t *immu);
885 int immu_regs_resume(immu_t *immu);
886 void immu_regs_suspend(immu_t *immu);
887 void immu_regs_shutdown(immu_t *immu);
888 void immu_regs_destroy(list_t *immu_list);
889 
890 void immu_regs_intr(immu_t *immu, uint32_t msi_addr, uint32_t msi_data,
891     uint32_t uaddr);
892 
893 boolean_t immu_regs_passthru_supported(immu_t *immu);
894 boolean_t immu_regs_is_TM_reserved(immu_t *immu);
895 boolean_t immu_regs_is_SNP_reserved(immu_t *immu);
896 
897 void immu_regs_wbf_flush(immu_t *immu);
898 void immu_regs_cpu_flush(immu_t *immu, caddr_t addr, uint_t size);
899 
900 void immu_regs_context_fsi(immu_t *immu, uint8_t function_mask,
901     uint16_t source_id, uint_t domain_id, immu_inv_wait_t *iwp);
902 void immu_regs_context_dsi(immu_t *immu, uint_t domain_id,
903     immu_inv_wait_t *iwp);
904 void immu_regs_context_gbl(immu_t *immu, immu_inv_wait_t *iwp);
905 void immu_regs_iotlb_psi(immu_t *immu, uint_t domain_id,
906     uint64_t dvma, uint_t count, uint_t hint, immu_inv_wait_t *iwp);
907 void immu_regs_iotlb_dsi(immu_t *immu, uint_t domain_id, immu_inv_wait_t *iwp);
908 void immu_regs_iotlb_gbl(immu_t *immu, immu_inv_wait_t *iwp);
909 
910 void immu_regs_set_root_table(immu_t *immu);
911 void immu_regs_qinv_enable(immu_t *immu, uint64_t qinv_reg_value);
912 void immu_regs_intr_enable(immu_t *immu, uint32_t msi_addr, uint32_t msi_data,
913     uint32_t uaddr);
914 void immu_regs_intrmap_enable(immu_t *immu, uint64_t irta_reg);
915 uint64_t immu_regs_get64(immu_t *immu, uint_t reg);
916 void immu_regs_put64(immu_t *immu, uint_t reg, uint64_t val);
917 uint32_t immu_regs_get32(immu_t *immu, uint_t reg);
918 void immu_regs_put32(immu_t *immu, uint_t reg, uint32_t val);
919 
920 /* immu_dvma.c interfaces */
921 void immu_dvma_setup(list_t *immu_list);
922 void immu_dvma_startup(immu_t *immu);
923 void immu_dvma_shutdown(immu_t *immu);
924 void immu_dvma_destroy(list_t *immu_list);
925 
926 void immu_dvma_physmem_update(uint64_t addr, uint64_t size);
927 int immu_map_memrange(dev_info_t *, memrng_t *);
928 int immu_dvma_map(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
929     uint_t prealloc_count, dev_info_t *rdip);
930 int immu_dvma_unmap(ddi_dma_impl_t *hp, dev_info_t *rdip);
931 int immu_devi_set(dev_info_t *dip, immu_flags_t immu_flags);
932 immu_devi_t *immu_devi_get(dev_info_t *dip);
933 immu_t *immu_dvma_get_immu(dev_info_t *dip, immu_flags_t immu_flags);
934 int pgtable_ctor(void *buf, void *arg, int kmflag);
935 void pgtable_dtor(void *buf, void *arg);
936 
937 int immu_hdl_priv_ctor(void *buf, void *arg, int kmf);
938 
939 int immu_dvma_device_setup(dev_info_t *rdip, immu_flags_t immu_flags);
940 
941 void immu_print_fault_info(uint_t sid, uint64_t dvma);
942 
943 /* immu_intrmap.c interfaces */
944 void immu_intrmap_setup(list_t *immu_list);
945 void immu_intrmap_startup(immu_t *immu);
946 void immu_intrmap_shutdown(immu_t *immu);
947 void immu_intrmap_destroy(list_t *immu_list);
948 
949 /* registers interrupt handler for IOMMU unit */
950 void immu_intr_register(immu_t *immu);
951 uint_t immu_intr_handler(caddr_t, caddr_t);
952 
953 
954 /* immu_qinv.c interfaces */
955 int immu_qinv_setup(list_t *immu_list);
956 void immu_qinv_startup(immu_t *immu);
957 void immu_qinv_shutdown(immu_t *immu);
958 void immu_qinv_destroy(list_t *immu_list);
959 
960 void immu_qinv_context_fsi(immu_t *immu, uint8_t function_mask,
961     uint16_t source_id, uint_t domain_id, immu_inv_wait_t *iwp);
962 void immu_qinv_context_dsi(immu_t *immu, uint_t domain_id,
963     immu_inv_wait_t *iwp);
964 void immu_qinv_context_gbl(immu_t *immu, immu_inv_wait_t *iwp);
965 void immu_qinv_iotlb_psi(immu_t *immu, uint_t domain_id,
966     uint64_t dvma, uint_t count, uint_t hint, immu_inv_wait_t *iwp);
967 void immu_qinv_iotlb_dsi(immu_t *immu, uint_t domain_id, immu_inv_wait_t *iwp);
968 void immu_qinv_iotlb_gbl(immu_t *immu, immu_inv_wait_t *iwp);
969 
970 void immu_qinv_intr_global(immu_t *immu, immu_inv_wait_t *iwp);
971 void immu_qinv_intr_one_cache(immu_t *immu, uint_t idx, immu_inv_wait_t *iwp);
972 void immu_qinv_intr_caches(immu_t *immu, uint_t idx, uint_t cnt,
973     immu_inv_wait_t *);
974 void immu_qinv_report_fault(immu_t *immu);
975 
976 #ifdef DEBUG
977 #define	IMMU_DPROBE1(name, type1, arg1) \
978 	DTRACE_PROBE1(name, type1, arg1)
979 #define	IMMU_DPROBE2(name, type1, arg1, type2, arg2) \
980 	DTRACE_PROBE2(name, type1, arg1, type2, arg2)
981 #define	IMMU_DPROBE3(name, type1, arg1, type2, arg2, type3, arg3) \
982 	DTRACE_PROBE3(name, type1, arg1, type2, arg2, type3, arg3)
983 #define	IMMU_DPROBE4(name, type1, arg1, type2, arg2, type3, arg3, type4, arg4) \
984 	DTRACE_PROBE4(name, type1, arg1, type2, arg2, type3, arg3, type4, arg4)
985 #else
986 #define	IMMU_DPROBE1(name, type1, arg1)
987 #define	IMMU_DPROBE2(name, type1, arg1, type2, arg2)
988 #define	IMMU_DPROBE3(name, type1, arg1, type2, arg2, type3, arg3)
989 #define	IMMU_DPROBE4(name, type1, arg1, type2, arg2, type3, arg3, type4, arg4)
990 #endif
991 
992 
993 #ifdef	__cplusplus
994 }
995 #endif
996 
997 #endif	/* _SYS_INTEL_IOMMU_H */
998