xref: /titanic_51/usr/src/uts/i86pc/sys/immu.h (revision 6142aa70a0f83a099e021c8378a46f4f8f374bb9)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 /*
22  * Portions Copyright 2010 Sun Microsystems, Inc.  All rights reserved.
23  * Use is subject to license terms.
24  */
25 
26 /*
27  * Copyright (c) 2008, Intel Corporation.
28  * All rights reserved.
29  */
30 
31 #ifndef	_SYS_INTEL_IOMMU_H
32 #define	_SYS_INTEL_IOMMU_H
33 
34 /*
35  * Intel IOMMU implementation specific state
36  */
37 
38 #ifdef	__cplusplus
39 extern "C" {
40 #endif
41 
42 #include <sys/types.h>
43 #include <sys/bitset.h>
44 #include <sys/kstat.h>
45 #include <sys/vmem.h>
46 #include <sys/rootnex.h>
47 
48 /*
49  * Some ON drivers have bugs. Keep this define until all such drivers
50  * have been fixed
51  */
52 #define	BUGGY_DRIVERS 1
53 
54 /* PD(T)E entries */
55 typedef uint64_t hw_pdte_t;
56 
57 #define	IMMU_MAXNAMELEN (64)
58 #define	IMMU_MAXSEG	(1)
59 #define	IMMU_REGSZ	(1UL << 12)
60 #define	IMMU_PAGESIZE   (4096)
61 #define	IMMU_PAGESHIFT	(12)
62 #define	IMMU_PAGEOFFSET	(IMMU_PAGESIZE - 1)
63 #define	IMMU_PAGEMASK	(~IMMU_PAGEOFFSET)
64 #define	IMMU_BTOP(b)	(((uint64_t)b) >> IMMU_PAGESHIFT)
65 #define	IMMU_PTOB(p)	(((uint64_t)p) << IMMU_PAGESHIFT)
66 #define	IMMU_PGTABLE_MAX_LEVELS	(6)
67 #define	IMMU_ROUNDUP(size) (((size) + IMMU_PAGEOFFSET) & ~IMMU_PAGEOFFSET)
68 #define	IMMU_ROUNDOWN(addr) ((addr) & ~IMMU_PAGEOFFSET)
69 #define	IMMU_PGTABLE_LEVEL_STRIDE	(9)
70 #define	IMMU_PGTABLE_LEVEL_MASK	((1<<IMMU_PGTABLE_LEVEL_STRIDE) - 1)
71 #define	IMMU_PGTABLE_OFFSHIFT  (IMMU_PAGESHIFT - IMMU_PGTABLE_LEVEL_STRIDE)
72 #define	IMMU_PGTABLE_MAXIDX  ((IMMU_PAGESIZE / sizeof (hw_pdte_t)) - 1)
73 
74 #define	IMMU_ROUNDUP(size) (((size) + IMMU_PAGEOFFSET) & ~IMMU_PAGEOFFSET)
75 #define	IMMU_ROUNDOWN(addr) ((addr) & ~IMMU_PAGEOFFSET)
76 
77 /*
78  * DMAR global defines
79  */
80 #define	DMAR_TABLE	"dmar-table"
81 #define	DMAR_INTRMAP_SUPPORT	(0x01)
82 
83 /* DMAR unit types */
84 #define	DMAR_DRHD	0
85 #define	DMAR_RMRR	1
86 #define	DMAR_ATSR	2
87 #define	DMAR_RHSA	3
88 
89 /* DRHD flag values */
90 #define	DMAR_INCLUDE_ALL	(0x01)
91 
92 /* Device scope types */
93 #define	DMAR_ENDPOINT	1
94 #define	DMAR_SUBTREE	2
95 #define	DMAR_IOAPIC	3
96 #define	DMAR_HPET	4
97 
98 
99 /* Forward declarations for IOMMU state structure and DVMA domain struct */
100 struct immu;
101 struct domain;
102 
103 /*
104  * The following structure describes the formate of DMAR ACPI table format.
105  * They are used to parse DMAR ACPI table. Read the spec for the meaning
106  * of each member.
107  */
108 
109 /* lengths of various strings */
110 #define	DMAR_SIG_LEN    (4)	/* table signature */
111 #define	DMAR_OEMID_LEN  (6)	/* OEM ID */
112 #define	DMAR_TBLID_LEN  (8)	/* OEM table ID */
113 #define	DMAR_ASL_LEN    (4)	/* ASL len */
114 
115 typedef struct dmar_table {
116 	kmutex_t	tbl_lock;
117 	uint8_t		tbl_haw;
118 	boolean_t	tbl_intrmap;
119 	list_t		tbl_drhd_list[IMMU_MAXSEG];
120 	list_t		tbl_rmrr_list[IMMU_MAXSEG];
121 	char		*tbl_oem_id;
122 	char		*tbl_oem_tblid;
123 	uint32_t	tbl_oem_rev;
124 	caddr_t		tbl_raw;
125 	int		tbl_rawlen;
126 } dmar_table_t;
127 
128 typedef struct drhd {
129 	kmutex_t	dr_lock;   /* protects the dmar field */
130 	struct immu	*dr_immu;
131 	dev_info_t	*dr_dip;
132 	uint16_t 	dr_seg;
133 	uint64_t 	dr_regs;
134 	boolean_t	dr_include_all;
135 	list_t 		dr_scope_list;
136 	list_node_t 	dr_node;
137 } drhd_t;
138 
139 typedef struct rmrr {
140 	kmutex_t	rm_lock;
141 	uint16_t	rm_seg;
142 	uint64_t	rm_base;
143 	uint64_t	rm_limit;
144 	list_t		rm_scope_list;
145 	list_node_t	rm_node;
146 } rmrr_t;
147 
148 /*
149  * Macros based on PCI spec
150  */
151 #define	IMMU_PCI_DEV(devfunc)    ((uint64_t)devfunc >> 3) /* from devfunc  */
152 #define	IMMU_PCI_FUNC(devfunc)   (devfunc & 7)  /* get func from devfunc */
153 #define	IMMU_PCI_DEVFUNC(d, f)   (((d) << 3) | (f))  /* create devfunc */
154 
155 typedef struct scope {
156 	uint8_t scp_type;
157 	uint8_t scp_enumid;
158 	uint8_t scp_bus;
159 	uint8_t scp_dev;
160 	uint8_t scp_func;
161 	list_node_t scp_node;
162 } scope_t;
163 
164 /*
165  * interrupt source id and drhd info for ioapic
166  */
167 typedef struct ioapic_drhd {
168 	uchar_t		ioapic_ioapicid;
169 	uint16_t	ioapic_sid;	/* ioapic source id */
170 	drhd_t		*ioapic_drhd;
171 	list_node_t	ioapic_node;
172 } ioapic_drhd_t;
173 
174 typedef struct memrng {
175 	uint64_t mrng_start;
176 	uint64_t mrng_npages;
177 } memrng_t;
178 
179 typedef enum immu_flags {
180 	IMMU_FLAGS_NONE = 0x1,
181 	IMMU_FLAGS_SLEEP = 0x1,
182 	IMMU_FLAGS_NOSLEEP = 0x2,
183 	IMMU_FLAGS_READ = 0x4,
184 	IMMU_FLAGS_WRITE = 0x8,
185 	IMMU_FLAGS_DONTPASS = 0x10,
186 	IMMU_FLAGS_ALLOC = 0x20,
187 	IMMU_FLAGS_MUST_MATCH = 0x40,
188 	IMMU_FLAGS_PAGE1 = 0x80,
189 	IMMU_FLAGS_UNITY = 0x100,
190 	IMMU_FLAGS_DMAHDL = 0x200,
191 	IMMU_FLAGS_MEMRNG = 0x400
192 } immu_flags_t;
193 
194 typedef enum cont_avail {
195 	IMMU_CONT_BAD = 0x0,
196 	IMMU_CONT_UNINITED = 0x1,
197 	IMMU_CONT_INITED = 0x2
198 } cont_avail_t;
199 
200 /* Size of root and context tables and their entries */
201 #define	IMMU_ROOT_TBLSZ		(4096)
202 #define	IMMU_CONT_TBLSZ		(4096)
203 #define	IMMU_ROOT_NUM		(256)
204 #define	IMMU_CONT_NUM		(256)
205 
206 /* register offset */
207 #define	IMMU_REG_VERSION	(0x00)  /* Version Rigister, 32 bit */
208 #define	IMMU_REG_CAP		(0x08)  /* Capability Register, 64 bit */
209 #define	IMMU_REG_EXCAP		(0x10)  /* Extended Capability Reg, 64 bit */
210 #define	IMMU_REG_GLOBAL_CMD	(0x18)  /* Global Command Register, 32 bit */
211 #define	IMMU_REG_GLOBAL_STS	(0x1C)  /* Global Status Register, 32 bit */
212 #define	IMMU_REG_ROOTENTRY	(0x20)  /* Root-Entry Table Addr Reg, 64 bit */
213 #define	IMMU_REG_CONTEXT_CMD	(0x28)  /* Context Comand Register, 64 bit */
214 #define	IMMU_REG_FAULT_STS	(0x34)  /* Fault Status Register, 32 bit */
215 #define	IMMU_REG_FEVNT_CON	(0x38)  /* Fault Event Control Reg, 32 bit */
216 #define	IMMU_REG_FEVNT_DATA	(0x3C)  /* Fault Event Data Register, 32 bit */
217 #define	IMMU_REG_FEVNT_ADDR	(0x40)  /* Fault Event Address Reg, 32 bit */
218 #define	IMMU_REG_FEVNT_UADDR	(0x44)  /* Fault Event Upper Addr Reg, 32 bit */
219 #define	IMMU_REG_AFAULT_LOG	(0x58)  /* Advanced Fault Log Reg, 64 bit */
220 #define	IMMU_REG_PMER		(0x64)  /* Protected Memory Enble Reg, 32 bit */
221 #define	IMMU_REG_PLMBR		(0x68)  /* Protected Low Mem Base Reg, 32 bit */
222 #define	IMMU_REG_PLMLR		(0x6C)  /* Protected Low Mem Lim Reg, 32 bit */
223 #define	IMMU_REG_PHMBR		(0X70)  /* Protectd High Mem Base Reg, 64 bit */
224 #define	IMMU_REG_PHMLR		(0x78)  /* Protected High Mem Lim Reg, 64 bit */
225 #define	IMMU_REG_INVAL_QH	(0x80)  /* Invalidation Queue Head, 64 bit */
226 #define	IMMU_REG_INVAL_QT	(0x88)  /* Invalidation Queue Tail, 64 bit */
227 #define	IMMU_REG_INVAL_QAR	(0x90)  /* Invalidtion Queue Addr Reg, 64 bit */
228 #define	IMMU_REG_INVAL_CSR	(0x9C)  /* Inval Compl Status Reg, 32 bit */
229 #define	IMMU_REG_INVAL_CECR	(0xA0)  /* Inval Compl Evnt Ctrl Reg, 32 bit */
230 #define	IMMU_REG_INVAL_CEDR	(0xA4)  /* Inval Compl Evnt Data Reg, 32 bit */
231 #define	IMMU_REG_INVAL_CEAR	(0xA8)  /* Inval Compl Event Addr Reg, 32 bit */
232 #define	IMMU_REG_INVAL_CEUAR	(0xAC)  /* Inval Comp Evnt Up Addr reg, 32bit */
233 #define	IMMU_REG_IRTAR		(0xB8)  /* INTR Remap Tbl Addr Reg, 64 bit */
234 
235 /* ioapic memory region */
236 #define	IOAPIC_REGION_START	(0xfee00000)
237 #define	IOAPIC_REGION_END	(0xfeefffff)
238 
239 /* fault register */
240 #define	IMMU_FAULT_STS_PPF		(2)
241 #define	IMMU_FAULT_STS_PFO		(1)
242 #define	IMMU_FAULT_STS_ITE		(1 << 6)
243 #define	IMMU_FAULT_STS_ICE		(1 << 5)
244 #define	IMMU_FAULT_STS_IQE		(1 << 4)
245 #define	IMMU_FAULT_GET_INDEX(x)		((((uint64_t)x) >> 8) & 0xff)
246 #define	IMMU_FRR_GET_F(x)		(((uint64_t)x) >> 63)
247 #define	IMMU_FRR_GET_FR(x)		((((uint64_t)x) >> 32) & 0xff)
248 #define	IMMU_FRR_GET_FT(x)		((((uint64_t)x) >> 62) & 0x1)
249 #define	IMMU_FRR_GET_SID(x)		((x) & 0xffff)
250 
251 /* (ex)capability register */
252 #define	IMMU_CAP_GET_NFR(x)		(((((uint64_t)x) >> 40) & 0xff) + 1)
253 #define	IMMU_CAP_GET_DWD(x)		((((uint64_t)x) >> 54) & 1)
254 #define	IMMU_CAP_GET_DRD(x)		((((uint64_t)x) >> 55) & 1)
255 #define	IMMU_CAP_GET_PSI(x)		((((uint64_t)x) >> 39) & 1)
256 #define	IMMU_CAP_GET_SPS(x)		((((uint64_t)x) >> 34) & 0xf)
257 #define	IMMU_CAP_GET_ISOCH(x)		((((uint64_t)x) >> 23) & 1)
258 #define	IMMU_CAP_GET_ZLR(x)		((((uint64_t)x) >> 22) & 1)
259 #define	IMMU_CAP_GET_MAMV(x)		((((uint64_t)x) >> 48) & 0x3f)
260 #define	IMMU_CAP_GET_CM(x)		((((uint64_t)x) >> 7) & 1)
261 #define	IMMU_CAP_GET_PHMR(x)		((((uint64_t)x) >> 6) & 1)
262 #define	IMMU_CAP_GET_PLMR(x)		((((uint64_t)x) >> 5) & 1)
263 #define	IMMU_CAP_GET_RWBF(x)		((((uint64_t)x) >> 4) & 1)
264 #define	IMMU_CAP_GET_AFL(x)		((((uint64_t)x) >> 3) & 1)
265 #define	IMMU_CAP_GET_FRO(x)		(((((uint64_t)x) >> 24) & 0x3ff) * 16)
266 #define	IMMU_CAP_MGAW(x)		(((((uint64_t)x) >> 16) & 0x3f) + 1)
267 #define	IMMU_CAP_SAGAW(x)		((((uint64_t)x) >> 8) & 0x1f)
268 #define	IMMU_CAP_ND(x)			(1 << (((x) & 0x7) *2 + 4)) -1
269 #define	IMMU_ECAP_GET_IRO(x)		(((((uint64_t)x) >> 8) & 0x3ff) << 4)
270 #define	IMMU_ECAP_GET_MHMV(x)		(((uint64_t)x >> 20) & 0xf)
271 #define	IMMU_ECAP_GET_SC(x)		((x) & 0x80)
272 #define	IMMU_ECAP_GET_PT(x)		((x) & 0x40)
273 #define	IMMU_ECAP_GET_CH(x)		((x) & 0x20)
274 #define	IMMU_ECAP_GET_EIM(x)		((x) & 0x10)
275 #define	IMMU_ECAP_GET_IR(x)		((x) & 0x8)
276 #define	IMMU_ECAP_GET_DI(x)		((x) & 0x4)
277 #define	IMMU_ECAP_GET_QI(x)		((x) & 0x2)
278 #define	IMMU_ECAP_GET_C(x)		((x) & 0x1)
279 
280 #define	IMMU_CAP_SET_RWBF(x)		((x) |= (1 << 4))
281 
282 
283 /* iotlb invalidation */
284 #define	TLB_INV_GLOBAL		(((uint64_t)1) << 60)
285 #define	TLB_INV_DOMAIN		(((uint64_t)2) << 60)
286 #define	TLB_INV_PAGE		(((uint64_t)3) << 60)
287 #define	TLB_INV_GET_IAIG(x)	((((uint64_t)x) >> 57) & 7)
288 #define	TLB_INV_DRAIN_READ	(((uint64_t)1) << 49)
289 #define	TLB_INV_DRAIN_WRITE	(((uint64_t)1) << 48)
290 #define	TLB_INV_DID(x)		(((uint64_t)((x) & 0xffff)) << 32)
291 #define	TLB_INV_IVT		(((uint64_t)1) << 63)
292 #define	TLB_IVA_HINT(x)		(((x) & 0x1) << 6)
293 #define	TLB_IVA_LEAF		1
294 #define	TLB_IVA_WHOLE		0
295 
296 /* dont use value 0 for  enums - to catch unit 8 */
297 typedef enum iotlb_inv {
298 	IOTLB_PSI = 1,
299 	IOTLB_DSI,
300 	IOTLB_GLOBAL
301 } immu_iotlb_inv_t;
302 
303 typedef enum context_inv {
304 	CONTEXT_FSI = 1,
305 	CONTEXT_DSI,
306 	CONTEXT_GLOBAL
307 } immu_context_inv_t;
308 
309 /* context invalidation */
310 #define	CCMD_INV_ICC		(((uint64_t)1) << 63)
311 #define	CCMD_INV_GLOBAL		(((uint64_t)1) << 61)
312 #define	CCMD_INV_DOMAIN		(((uint64_t)2) << 61)
313 #define	CCMD_INV_DEVICE		(((uint64_t)3) << 61)
314 #define	CCMD_INV_DID(x)		((uint64_t)((x) & 0xffff))
315 #define	CCMD_INV_SID(x)		(((uint64_t)((x) & 0xffff)) << 16)
316 #define	CCMD_INV_FM(x)		(((uint64_t)((x) & 0x3)) << 32)
317 
318 /* global command register */
319 #define	IMMU_GCMD_TE		(((uint32_t)1) << 31)
320 #define	IMMU_GCMD_SRTP		(((uint32_t)1) << 30)
321 #define	IMMU_GCMD_SFL		(((uint32_t)1) << 29)
322 #define	IMMU_GCMD_EAFL		(((uint32_t)1) << 28)
323 #define	IMMU_GCMD_WBF		(((uint32_t)1) << 27)
324 #define	IMMU_GCMD_QIE		(((uint32_t)1) << 26)
325 #define	IMMU_GCMD_IRE		(((uint32_t)1) << 25)
326 #define	IMMU_GCMD_SIRTP	(((uint32_t)1) << 24)
327 #define	IMMU_GCMD_CFI		(((uint32_t)1) << 23)
328 
329 /* global status register */
330 #define	IMMU_GSTS_TES		(((uint32_t)1) << 31)
331 #define	IMMU_GSTS_RTPS		(((uint32_t)1) << 30)
332 #define	IMMU_GSTS_FLS		(((uint32_t)1) << 29)
333 #define	IMMU_GSTS_AFLS		(((uint32_t)1) << 28)
334 #define	IMMU_GSTS_WBFS		(((uint32_t)1) << 27)
335 #define	IMMU_GSTS_QIES		(((uint32_t)1) << 26)
336 #define	IMMU_GSTS_IRES		(((uint32_t)1) << 25)
337 #define	IMMU_GSTS_IRTPS	(((uint32_t)1) << 24)
338 #define	IMMU_GSTS_CFIS		(((uint32_t)1) << 23)
339 
340 /* psi address mask */
341 #define	ADDR_AM_MAX(m)		(((uint_t)1) << (m))
342 #define	ADDR_AM_OFFSET(n, m)	((n) & (ADDR_AM_MAX(m) - 1))
343 
344 /* dmar fault event */
345 #define	IMMU_INTR_IPL			(8)
346 #define	IMMU_REG_FEVNT_CON_IM_SHIFT	(31)
347 
348 #define	IMMU_ALLOC_RESOURCE_DELAY    (drv_usectohz(5000))
349 
350 /* max value of Size field of Interrupt Remapping Table Address Register */
351 #define	INTRMAP_MAX_IRTA_SIZE	0xf
352 
353 /* interrupt remapping table entry size */
354 #define	INTRMAP_RTE_SIZE		0x10
355 
356 /* ioapic redirection table entry related shift of remappable interrupt */
357 #define	INTRMAP_IOAPIC_IDX_SHIFT		17
358 #define	INTRMAP_IOAPIC_FORMAT_SHIFT	16
359 #define	INTRMAP_IOAPIC_TM_SHIFT		15
360 #define	INTRMAP_IOAPIC_POL_SHIFT		13
361 #define	INTRMAP_IOAPIC_IDX15_SHIFT	11
362 
363 /* msi intr entry related shift of remappable interrupt */
364 #define	INTRMAP_MSI_IDX_SHIFT	5
365 #define	INTRMAP_MSI_FORMAT_SHIFT	4
366 #define	INTRMAP_MSI_SHV_SHIFT	3
367 #define	INTRMAP_MSI_IDX15_SHIFT	2
368 
369 #define	INTRMAP_IDX_FULL		(uint_t)-1
370 
371 #define	RDT_DLM(rdt)	BITX((rdt), 10, 8)
372 #define	RDT_DM(rdt)	BT_TEST(&(rdt), 11)
373 #define	RDT_POL(rdt)	BT_TEST(&(rdt), 13)
374 #define	RDT_TM(rdt)	BT_TEST(&(rdt), 15)
375 
376 #define	INTRMAP_DISABLE	(void *)-1
377 
378 /*
379  * invalidation granularity
380  */
381 typedef enum {
382 	TLB_INV_G_GLOBAL = 1,
383 	TLB_INV_G_DOMAIN,
384 	TLB_INV_G_PAGE
385 } tlb_inv_g_t;
386 
387 typedef enum {
388 	CTT_INV_G_GLOBAL = 1,
389 	CTT_INV_G_DOMAIN,
390 	CTT_INV_G_DEVICE
391 } ctt_inv_g_t;
392 
393 typedef enum {
394 	IEC_INV_GLOBAL = 0,
395 	IEC_INV_INDEX
396 } iec_inv_g_t;
397 
398 
399 struct inv_queue_state;
400 struct intrmap_tbl_state;
401 
402 /* A software page table structure */
403 typedef struct pgtable {
404 	krwlock_t swpg_rwlock;
405 	caddr_t hwpg_vaddr;   /* HW pgtable VA */
406 	paddr_t hwpg_paddr;   /* HW pgtable PA */
407 	ddi_dma_handle_t hwpg_dmahdl;
408 	ddi_acc_handle_t hwpg_memhdl;
409 	struct pgtable **swpg_next_array;
410 	list_node_t swpg_domain_node;  /* domain list of pgtables */
411 } pgtable_t;
412 
413 /* interrupt remapping table state info */
414 typedef struct intrmap {
415 	kmutex_t		intrmap_lock;
416 	ddi_dma_handle_t	intrmap_dma_hdl;
417 	ddi_acc_handle_t	intrmap_acc_hdl;
418 	caddr_t			intrmap_vaddr;
419 	paddr_t			intrmap_paddr;
420 	uint_t			intrmap_size;
421 	bitset_t		intrmap_map;
422 	uint_t			intrmap_free;
423 } intrmap_t;
424 
425 typedef struct hw_rce {
426 	uint64_t lo;
427 	uint64_t hi;
428 } hw_rce_t;
429 
430 
431 #define	ROOT_GET_P(hrent) ((hrent)->lo & 0x1)
432 #define	ROOT_SET_P(hrent) ((hrent)->lo |= 0x1)
433 
434 #define	ROOT_GET_CONT(hrent) ((hrent)->lo & ~(0xFFF))
435 #define	ROOT_SET_CONT(hrent, paddr) ((hrent)->lo |= (paddr & (~0xFFF)))
436 
437 #define	TTYPE_XLATE_ONLY  (0x0)
438 #define	TTYPE_XLATE_IOTLB (0x1)
439 #define	TTYPE_PASSTHRU    (0x2)
440 #define	TTYPE_RESERVED    (0x3)
441 
442 #define	CONT_GET_DID(hcent) ((((uint64_t)(hcent)->hi) >> 8) & 0xFFFF)
443 #define	CONT_SET_DID(hcent, did) ((hcent)->hi |= ((0xFFFF & (did)) << 8))
444 
445 #define	CONT_GET_AVAIL(hcent) ((((uint64_t)((hcent)->hi)) >> 0x3) & 0xF)
446 #define	CONT_SET_AVAIL(hcent, av) ((hcent)->hi |= ((0xF & (av)) << 0x3))
447 
448 #define	CONT_GET_LO_AW(hcent) (30 + 9 *((hcent)->hi & 0x7))
449 #define	CONT_GET_AW(hcent) \
450 	((CONT_GET_LO_AW(hcent) == 66) ? 64 : CONT_GET_LO_AW(hcent))
451 #define	CONT_SET_AW(hcent, aw) \
452 	((hcent)->hi |= (((((aw) + 2) - 30) / 9) & 0x7))
453 
454 #define	CONT_GET_ASR(hcent) ((hcent)->lo & ~(0xFFF))
455 #define	CONT_SET_ASR(hcent, paddr) ((hcent)->lo |= (paddr & (~0xFFF)))
456 
457 #define	CONT_GET_TTYPE(hcent) ((((uint64_t)(hcent)->lo) >> 0x2) & 0x3)
458 #define	CONT_SET_TTYPE(hcent, ttype) ((hcent)->lo |= (((ttype) & 0x3) << 0x2))
459 
460 #define	CONT_GET_P(hcent) ((hcent)->lo & 0x1)
461 #define	CONT_SET_P(hcent) ((hcent)->lo |= 0x1)
462 
463 
464 /* we use the bit 63 (available for system SW) as a present bit */
465 #define	PDTE_SW4(hw_pdte) ((hw_pdte) & ((uint64_t)1<<63))
466 #define	PDTE_CLEAR_SW4(hw_pdte) ((hw_pdte) &= ~((uint64_t)1<<63))
467 
468 #define	PDTE_P(hw_pdte) ((hw_pdte) & ((uint64_t)1<<63))
469 #define	PDTE_CLEAR_P(hw_pdte) ((hw_pdte) &= ~((uint64_t)1<<63))
470 #define	PDTE_SET_P(hw_pdte) ((hw_pdte) |= ((uint64_t)1<<63))
471 
472 #define	PDTE_TM(hw_pdte) ((hw_pdte) & ((uint64_t)1<<62))
473 #define	PDTE_CLEAR_TM(hw_pdte) ((hw_pdte) &= ~((uint64_t)1<<62))
474 
475 #define	PDTE_SW3(hw_pdte) \
476 	(((hw_pdte) & ~(((uint64_t)0x3<<62)|(((uint64_t)1<<52)-1))) >> 52)
477 #define	PDTE_SW3_OVERFLOW(hw_pdte) \
478 	(PDTE_SW3(hw_pdte) == 0x3FF)
479 #define	PDTE_CLEAR_SW3(hw_pdte) \
480 	((hw_pdte) &= (((uint64_t)0x3<<62)|(((uint64_t)1<<52)-1)))
481 #define	PDTE_SET_SW3(hw_pdte, ref) \
482 	((hw_pdte) |= ((((uint64_t)(ref)) & 0x3FF) << 52))
483 
484 #define	PDTE_PADDR(hw_pdte) ((hw_pdte) & ~(((uint64_t)0xFFF<<52)|((1<<12)-1)))
485 #define	PDTE_CLEAR_PADDR(hw_pdte) \
486 		((hw_pdte) &= (((uint64_t)0xFFF<<52)|((1<<12)-1)))
487 #define	PDTE_SET_PADDR(hw_pdte, paddr) ((hw_pdte) |= PDTE_PADDR(paddr))
488 
489 #define	PDTE_SNP(hw_pdte) ((hw_pdte) & (1<<11))
490 #define	PDTE_CLEAR_SNP(hw_pdte) ((hw_pdte) &= ~(1<<11))
491 #define	PDTE_SET_SNP(hw_pdte) ((hw_pdte) |= (1<<11))
492 
493 #define	PDTE_SW2(hw_pdte) ((hw_pdte) & (0x700))
494 #define	PDTE_CLEAR_SW2(hw_pdte) ((hw_pdte) &= ~(0x700))
495 
496 #define	PDTE_SP(hw_pdte) ((hw_pdte) & (0x80))
497 #define	PDTE_CLEAR_SP(hw_pdte) ((hw_pdte) &= ~(0x80))
498 
499 #define	PDTE_SW1(hw_pdte) ((hw_pdte) & (0x7C))
500 #define	PDTE_CLEAR_SW1(hw_pdte) ((hw_pdte) &= ~(0x7C))
501 
502 #define	PDTE_WRITE(hw_pdte) ((hw_pdte) & (0x2))
503 #define	PDTE_CLEAR_WRITE(hw_pdte) ((hw_pdte) &= ~(0x2))
504 #define	PDTE_SET_WRITE(hw_pdte) ((hw_pdte) |= (0x2))
505 
506 #define	PDTE_READ(hw_pdte) ((hw_pdte) & (0x1))
507 #define	PDTE_CLEAR_READ(hw_pdte) ((hw_pdte) &= ~(0x1))
508 #define	PDTE_SET_READ(hw_pdte) ((hw_pdte) |= (0x1))
509 
510 typedef struct immu {
511 	kmutex_t		immu_lock;
512 	char			*immu_name;
513 
514 	/* lock grabbed by interrupt handler */
515 	kmutex_t		immu_intr_lock;
516 
517 	/* ACPI/DMAR table related */
518 	void			*immu_dmar_unit;
519 	dev_info_t		*immu_dip;
520 	struct domain		*immu_unity_domain;
521 
522 	/* IOMMU register related */
523 	kmutex_t		immu_regs_lock;
524 	boolean_t		immu_regs_setup;
525 	boolean_t		immu_regs_running;
526 	boolean_t		immu_regs_quiesced;
527 	ddi_acc_handle_t	immu_regs_handle;
528 	caddr_t			immu_regs_addr;
529 	uint64_t		immu_regs_cap;
530 	uint64_t		immu_regs_excap;
531 	uint32_t		immu_regs_cmdval;
532 	uint32_t		immu_regs_intr_msi_addr;
533 	uint32_t		immu_regs_intr_msi_data;
534 	uint32_t		immu_regs_intr_uaddr;
535 
536 	/* DVMA related */
537 	kmutex_t		immu_dvma_lock;
538 	boolean_t		immu_dvma_setup;
539 	boolean_t		immu_dvma_running;
540 	int			immu_dvma_gaw;
541 	int			immu_dvma_agaw;
542 	int			immu_dvma_nlevels;
543 	boolean_t		immu_dvma_coherent;
544 
545 	/* DVMA context related */
546 	krwlock_t		immu_ctx_rwlock;
547 	pgtable_t		*immu_ctx_root;
548 
549 	/* DVMA domain related */
550 	int			immu_max_domains;
551 	vmem_t			*immu_did_arena;
552 	char			immu_did_arena_name[IMMU_MAXNAMELEN];
553 	list_t			immu_domain_list;
554 
555 	/* DVMA special devices */
556 	boolean_t		immu_dvma_gfx_only;
557 	list_t			immu_dvma_lpc_list;
558 	list_t			immu_dvma_gfx_list;
559 
560 	/* interrupt remapping related */
561 	kmutex_t		immu_intrmap_lock;
562 	boolean_t		immu_intrmap_setup;
563 	boolean_t		immu_intrmap_running;
564 	intrmap_t		*immu_intrmap;
565 	uint64_t		immu_intrmap_irta_reg;
566 
567 	/* queued invalidation related */
568 	kmutex_t		immu_qinv_lock;
569 	boolean_t		immu_qinv_setup;
570 	boolean_t		immu_qinv_running;
571 	boolean_t		immu_qinv_enabled;
572 	void			*immu_qinv;
573 	uint64_t		immu_qinv_reg_value;
574 
575 	/* list_node for system-wide list of DMAR units */
576 	list_node_t		immu_node;
577 } immu_t;
578 
579 /* properties that control DVMA */
580 #define	DDI_DVMA_MAPTYPE_PROP	"ddi-dvma-mapping"
581 
582 /* property values */
583 #define	DDI_DVMA_MAPTYPE_UNITY	"unity"
584 
585 typedef enum immu_maptype {
586 	IMMU_MAPTYPE_BAD = 0,    /* 0 is always bad */
587 	IMMU_MAPTYPE_UNITY = 1,
588 	IMMU_MAPTYPE_XLATE
589 } immu_maptype_t;
590 
591 /*
592  * domain_t
593  *
594  */
595 typedef struct domain {
596 	/* the basics */
597 	uint_t			dom_did;
598 	immu_t			*dom_immu;
599 
600 	/* mapping related */
601 	immu_maptype_t		dom_maptype;
602 	vmem_t			*dom_dvma_arena;
603 	char			dom_dvma_arena_name[IMMU_MAXNAMELEN];
604 
605 	/* pgtables */
606 	pgtable_t		*dom_pgtable_root;
607 	krwlock_t		dom_pgtable_rwlock;
608 
609 	/* list of pgtables for this domain */
610 	list_t			dom_pglist;
611 
612 	/* list node for list of domains (unity or xlate) */
613 	list_node_t		dom_maptype_node;
614 	/* list node for list of domains off immu */
615 	list_node_t		dom_immu_node;
616 } domain_t;
617 
618 typedef enum immu_pcib {
619 	IMMU_PCIB_BAD = 0,
620 	IMMU_PCIB_NOBDF,
621 	IMMU_PCIB_PCIE_PCIE,
622 	IMMU_PCIB_PCIE_PCI,
623 	IMMU_PCIB_PCI_PCI,
624 	IMMU_PCIB_ENDPOINT
625 } immu_pcib_t;
626 
627 /*
628  *  immu_devi_t
629  *      Intel IOMMU in devinfo node
630  */
631 typedef struct immu_devi {
632 	/* pci seg, bus, dev, func */
633 	int		imd_seg;
634 	int		imd_bus;
635 	int		imd_devfunc;
636 
637 	/* ppb information */
638 	immu_pcib_t	imd_pcib_type;
639 	int		imd_sec;
640 	int		imd_sub;
641 
642 	/* identifier for special devices */
643 	boolean_t	imd_display;
644 	boolean_t	imd_lpc;
645 
646 	/* dmar unit to which this dip belongs */
647 	immu_t		*imd_immu;
648 
649 	/* domain ptr */
650 	domain_t	*imd_domain;
651 	dev_info_t	*imd_ddip;
652 
653 	/* my devinfo */
654 	dev_info_t	*imd_dip;
655 
656 	/*
657 	 * if we are a "special" devinfo
658 	 * the node for the special linked list
659 	 * off the DMAR unit structure
660 	 */
661 	list_node_t	imd_spc_node;
662 } immu_devi_t;
663 
664 #define	IMMU_DEVI(dip)		((immu_devi_t *)(DEVI(dip)->devi_iommu))
665 #define	IMMU_DEVI_SET(dip, imd)	(DEVI(dip)->devi_iommu = (void *)imd)
666 
667 /*
668  * struct dmar_arg
669  */
670 typedef struct immu_arg {
671 	int		ima_seg;
672 	int		ima_bus;
673 	int		ima_devfunc;
674 	dev_info_t	*ima_rdip;
675 	dev_info_t	*ima_ddip;
676 } immu_arg_t;
677 
678 /*
679  * Globals used by IOMMU code
680  */
681 /* shared between IOMMU files */
682 extern dev_info_t *root_devinfo;
683 extern kmutex_t immu_lock;
684 extern list_t immu_list;
685 extern boolean_t immu_setup;
686 extern boolean_t immu_running;
687 extern kmutex_t ioapic_drhd_lock;
688 extern list_t ioapic_drhd_list;
689 
690 /* switches */
691 
692 /* Various features */
693 extern boolean_t immu_enable;
694 extern boolean_t immu_dvma_enable;
695 extern boolean_t immu_gfxdvma_enable;
696 extern boolean_t immu_intrmap_enable;
697 extern boolean_t immu_qinv_enable;
698 extern boolean_t immu_mmio_safe;
699 
700 /* various quirks that need working around */
701 extern boolean_t immu_quirk_usbpage0;
702 extern boolean_t immu_quirk_usbfullpa;
703 extern boolean_t immu_quirk_usbrmrr;
704 extern boolean_t immu_quirk_mobile4;
705 
706 /* debug messages */
707 extern boolean_t immu_dmar_print;
708 
709 /* ################### Interfaces exported outside IOMMU code ############## */
710 void immu_init(void);
711 void immu_startup(void);
712 void immu_shutdown(void);
713 void immu_destroy(void);
714 int immu_map_sgl(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq,
715     int prealloc_count, dev_info_t *rdip);
716 int immu_unmap_sgl(ddi_dma_impl_t *hp, dev_info_t *rdip);
717 void immu_device_tree_changed(void);
718 void immu_physmem_update(uint64_t addr, uint64_t size);
719 int immu_quiesce(void);
720 int immu_unquiesce(void);
721 /* ######################################################################### */
722 
723 /* ################# Interfaces used within IOMMU code #################### */
724 
725 /* functions in rootnex.c */
726 int rootnex_dvcookies_alloc(ddi_dma_impl_t *hp,
727     struct ddi_dma_req *dmareq, dev_info_t *rdip, void *arg);
728 void rootnex_dvcookies_free(dvcookie_t *dvcookies, void *arg);
729 
730 /* immu_dmar.c interfaces */
731 int immu_dmar_setup(void);
732 int immu_dmar_parse(void);
733 void immu_dmar_startup(void);
734 void immu_dmar_shutdown(void);
735 void immu_dmar_destroy(void);
736 boolean_t immu_dmar_blacklisted(char **strings_array, uint_t nstrings);
737 immu_t *immu_dmar_get_immu(dev_info_t *rdip);
738 char *immu_dmar_unit_name(void *dmar_unit);
739 dev_info_t *immu_dmar_unit_dip(void *dmar_unit);
740 void immu_dmar_set_immu(void *dmar_unit, immu_t *immu);
741 void *immu_dmar_walk_units(int seg, void *dmar_unit);
742 boolean_t immu_dmar_intrmap_supported(void);
743 uint16_t immu_dmar_ioapic_sid(int ioapicid);
744 immu_t *immu_dmar_ioapic_immu(int ioapicid);
745 void immu_dmar_rmrr_map(void);
746 
747 /* immu.c interfaces */
748 int immu_walk_ancestor(dev_info_t *rdip, dev_info_t *ddip,
749     int (*func)(dev_info_t *, void *arg), void *arg,
750     int *level, immu_flags_t immu_flags);
751 
752 /* immu_regs.c interfaces */
753 void immu_regs_setup(list_t *immu_list);
754 void immu_regs_startup(immu_t *immu);
755 int immu_regs_resume(immu_t *immu);
756 void immu_regs_suspend(immu_t *immu);
757 void immu_regs_shutdown(immu_t *immu);
758 void immu_regs_destroy(list_t *immu_list);
759 
760 void immu_regs_intr(immu_t *immu, uint32_t msi_addr, uint32_t msi_data,
761     uint32_t uaddr);
762 
763 boolean_t immu_regs_passthru_supported(immu_t *immu);
764 boolean_t immu_regs_is_TM_reserved(immu_t *immu);
765 boolean_t immu_regs_is_SNP_reserved(immu_t *immu);
766 
767 void immu_regs_wbf_flush(immu_t *immu);
768 void immu_regs_cpu_flush(immu_t *immu, caddr_t addr, uint_t size);
769 void immu_regs_iotlb_flush(immu_t *immu, uint_t domainid, uint64_t dvma,
770     uint64_t count, uint_t hint, immu_iotlb_inv_t type);
771 void immu_regs_context_flush(immu_t *immu, uint8_t function_mask,
772     uint16_t source_id, uint_t did, immu_context_inv_t type);
773 void immu_regs_set_root_table(immu_t *immu);
774 void immu_regs_qinv_enable(immu_t *immu, uint64_t qinv_reg_value);
775 void immu_regs_intr_enable(immu_t *immu, uint32_t msi_addr, uint32_t msi_data,
776     uint32_t uaddr);
777 void immu_regs_intrmap_enable(immu_t *immu, uint64_t irta_reg);
778 uint64_t immu_regs_get64(immu_t *immu, uint_t reg);
779 void immu_regs_put64(immu_t *immu, uint_t reg, uint64_t val);
780 uint32_t immu_regs_get32(immu_t *immu, uint_t reg);
781 void immu_regs_put32(immu_t *immu, uint_t reg, uint32_t val);
782 
783 /* immu_dvma.c interfaces */
784 void immu_dvma_setup(list_t *immu_list);
785 void immu_dvma_startup(immu_t *immu);
786 void immu_dvma_shutdown(immu_t *immu);
787 void immu_dvma_destroy(list_t *immu_list);
788 
789 void immu_dvma_physmem_update(uint64_t addr, uint64_t size);
790 int immu_dvma_map(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq, memrng_t *,
791     uint_t prealloc_count, dev_info_t *rdip, immu_flags_t immu_flags);
792 int immu_dvma_unmap(ddi_dma_impl_t *hp, dev_info_t *rdip);
793 int immu_dvma_alloc(dvcookie_t *first_dvcookie, void *arg);
794 void immu_dvma_free(dvcookie_t *first_dvcookie, void *arg);
795 int immu_devi_set(dev_info_t *dip, immu_flags_t immu_flags);
796 immu_devi_t *immu_devi_get(dev_info_t *dip);
797 immu_t *immu_dvma_get_immu(dev_info_t *dip, immu_flags_t immu_flags);
798 
799 
800 /* immu_intrmap.c interfaces */
801 void immu_intrmap_setup(list_t *immu_list);
802 void immu_intrmap_startup(immu_t *immu);
803 void immu_intrmap_shutdown(immu_t *immu);
804 void immu_intrmap_destroy(list_t *immu_list);
805 
806 /* registers interrupt handler for IOMMU unit */
807 void immu_intr_register(immu_t *immu);
808 int immu_intr_handler(immu_t *immu);
809 
810 
811 /* immu_qinv.c interfaces */
812 void immu_qinv_setup(list_t *immu_list);
813 void immu_qinv_startup(immu_t *immu);
814 void immu_qinv_shutdown(immu_t *immu);
815 void immu_qinv_destroy(list_t *immu_list);
816 
817 void immu_qinv_context_fsi(immu_t *immu, uint8_t function_mask,
818     uint16_t source_id, uint_t domain_id);
819 void immu_qinv_context_dsi(immu_t *immu, uint_t domain_id);
820 void immu_qinv_context_gbl(immu_t *immu);
821 void immu_qinv_iotlb_psi(immu_t *immu, uint_t domain_id,
822     uint64_t dvma, uint_t count, uint_t hint);
823 void immu_qinv_iotlb_dsi(immu_t *immu, uint_t domain_id);
824 void immu_qinv_iotlb_gbl(immu_t *immu);
825 void immu_qinv_intr_global(immu_t *immu);
826 void immu_qinv_intr_one_cache(immu_t *immu, uint_t idx);
827 void immu_qinv_intr_caches(immu_t *immu, uint_t idx, uint_t cnt);
828 void immu_qinv_report_fault(immu_t *immu);
829 
830 
831 #ifdef	__cplusplus
832 }
833 #endif
834 
835 #endif	/* _SYS_INTEL_IOMMU_H */
836