1 /* 2 * CDDL HEADER START 3 * 4 * The contents of this file are subject to the terms of the 5 * Common Development and Distribution License (the "License"). 6 * You may not use this file except in compliance with the License. 7 * 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE 9 * or http://www.opensolaris.org/os/licensing. 10 * See the License for the specific language governing permissions 11 * and limitations under the License. 12 * 13 * When distributing Covered Code, include this CDDL HEADER in each 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE. 15 * If applicable, add the following below this CDDL HEADER, with the 16 * fields enclosed by brackets "[]" replaced with your own identifying 17 * information: Portions Copyright [yyyy] [name of copyright owner] 18 * 19 * CDDL HEADER END 20 */ 21 /* 22 * Portions Copyright (c) 2010, Oracle and/or its affiliates. 23 * All rights reserved. 24 */ 25 26 /* 27 * Copyright (c) 2008, Intel Corporation. 28 * All rights reserved. 29 */ 30 31 #ifndef _SYS_INTEL_IOMMU_H 32 #define _SYS_INTEL_IOMMU_H 33 34 /* 35 * Intel IOMMU implementation specific state 36 */ 37 38 #ifdef __cplusplus 39 extern "C" { 40 #endif 41 42 #include <sys/types.h> 43 #include <sys/bitset.h> 44 #include <sys/kstat.h> 45 #include <sys/vmem.h> 46 #include <sys/rootnex.h> 47 48 /* 49 * Some ON drivers have bugs. Keep this define until all such drivers 50 * have been fixed 51 */ 52 #define BUGGY_DRIVERS 1 53 54 /* PD(T)E entries */ 55 typedef uint64_t hw_pdte_t; 56 57 #define IMMU_MAXNAMELEN (64) 58 #define IMMU_MAXSEG (1) 59 #define IMMU_REGSZ (1UL << 12) 60 #define IMMU_PAGESIZE (4096) 61 #define IMMU_PAGESHIFT (12) 62 #define IMMU_PAGEOFFSET (IMMU_PAGESIZE - 1) 63 #define IMMU_PAGEMASK (~IMMU_PAGEOFFSET) 64 #define IMMU_BTOP(b) (((uint64_t)b) >> IMMU_PAGESHIFT) 65 #define IMMU_PTOB(p) (((uint64_t)p) << IMMU_PAGESHIFT) 66 #define IMMU_PGTABLE_MAX_LEVELS (6) 67 #define IMMU_ROUNDUP(size) (((size) + IMMU_PAGEOFFSET) & ~IMMU_PAGEOFFSET) 68 #define IMMU_ROUNDOWN(addr) ((addr) & ~IMMU_PAGEOFFSET) 69 #define IMMU_PGTABLE_LEVEL_STRIDE (9) 70 #define IMMU_PGTABLE_LEVEL_MASK ((1<<IMMU_PGTABLE_LEVEL_STRIDE) - 1) 71 #define IMMU_PGTABLE_OFFSHIFT (IMMU_PAGESHIFT - IMMU_PGTABLE_LEVEL_STRIDE) 72 #define IMMU_PGTABLE_MAXIDX ((IMMU_PAGESIZE / sizeof (hw_pdte_t)) - 1) 73 74 #define IMMU_ROUNDUP(size) (((size) + IMMU_PAGEOFFSET) & ~IMMU_PAGEOFFSET) 75 #define IMMU_ROUNDOWN(addr) ((addr) & ~IMMU_PAGEOFFSET) 76 77 /* 78 * DMAR global defines 79 */ 80 #define DMAR_TABLE "dmar-table" 81 #define DMAR_INTRMAP_SUPPORT (0x01) 82 83 /* DMAR unit types */ 84 #define DMAR_DRHD 0 85 #define DMAR_RMRR 1 86 #define DMAR_ATSR 2 87 #define DMAR_RHSA 3 88 89 /* DRHD flag values */ 90 #define DMAR_INCLUDE_ALL (0x01) 91 92 /* Device scope types */ 93 #define DMAR_ENDPOINT 1 94 #define DMAR_SUBTREE 2 95 #define DMAR_IOAPIC 3 96 #define DMAR_HPET 4 97 98 99 /* Forward declarations for IOMMU state structure and DVMA domain struct */ 100 struct immu; 101 struct domain; 102 103 /* 104 * The following structure describes the formate of DMAR ACPI table format. 105 * They are used to parse DMAR ACPI table. Read the spec for the meaning 106 * of each member. 107 */ 108 109 /* lengths of various strings */ 110 #define DMAR_SIG_LEN (4) /* table signature */ 111 #define DMAR_OEMID_LEN (6) /* OEM ID */ 112 #define DMAR_TBLID_LEN (8) /* OEM table ID */ 113 #define DMAR_ASL_LEN (4) /* ASL len */ 114 115 typedef struct dmar_table { 116 kmutex_t tbl_lock; 117 uint8_t tbl_haw; 118 boolean_t tbl_intrmap; 119 list_t tbl_drhd_list[IMMU_MAXSEG]; 120 list_t tbl_rmrr_list[IMMU_MAXSEG]; 121 char *tbl_oem_id; 122 char *tbl_oem_tblid; 123 uint32_t tbl_oem_rev; 124 caddr_t tbl_raw; 125 int tbl_rawlen; 126 } dmar_table_t; 127 128 typedef struct drhd { 129 kmutex_t dr_lock; /* protects the dmar field */ 130 struct immu *dr_immu; 131 dev_info_t *dr_dip; 132 uint16_t dr_seg; 133 uint64_t dr_regs; 134 boolean_t dr_include_all; 135 list_t dr_scope_list; 136 list_node_t dr_node; 137 } drhd_t; 138 139 typedef struct rmrr { 140 kmutex_t rm_lock; 141 uint16_t rm_seg; 142 uint64_t rm_base; 143 uint64_t rm_limit; 144 list_t rm_scope_list; 145 list_node_t rm_node; 146 } rmrr_t; 147 148 /* 149 * Macros based on PCI spec 150 */ 151 #define IMMU_PCI_DEV(devfunc) ((uint64_t)devfunc >> 3) /* from devfunc */ 152 #define IMMU_PCI_FUNC(devfunc) (devfunc & 7) /* get func from devfunc */ 153 #define IMMU_PCI_DEVFUNC(d, f) (((d) << 3) | (f)) /* create devfunc */ 154 155 typedef struct scope { 156 uint8_t scp_type; 157 uint8_t scp_enumid; 158 uint8_t scp_bus; 159 uint8_t scp_dev; 160 uint8_t scp_func; 161 list_node_t scp_node; 162 } scope_t; 163 164 /* 165 * interrupt source id and drhd info for ioapic 166 */ 167 typedef struct ioapic_drhd { 168 uchar_t ioapic_ioapicid; 169 uint16_t ioapic_sid; /* ioapic source id */ 170 drhd_t *ioapic_drhd; 171 list_node_t ioapic_node; 172 } ioapic_drhd_t; 173 174 typedef struct memrng { 175 uint64_t mrng_start; 176 uint64_t mrng_npages; 177 } memrng_t; 178 179 typedef enum immu_flags { 180 IMMU_FLAGS_NONE = 0x1, 181 IMMU_FLAGS_SLEEP = 0x1, 182 IMMU_FLAGS_NOSLEEP = 0x2, 183 IMMU_FLAGS_READ = 0x4, 184 IMMU_FLAGS_WRITE = 0x8, 185 IMMU_FLAGS_DONTPASS = 0x10, 186 IMMU_FLAGS_ALLOC = 0x20, 187 IMMU_FLAGS_MUST_MATCH = 0x40, 188 IMMU_FLAGS_PAGE1 = 0x80, 189 IMMU_FLAGS_UNITY = 0x100, 190 IMMU_FLAGS_DMAHDL = 0x200, 191 IMMU_FLAGS_MEMRNG = 0x400 192 } immu_flags_t; 193 194 typedef enum cont_avail { 195 IMMU_CONT_BAD = 0x0, 196 IMMU_CONT_UNINITED = 0x1, 197 IMMU_CONT_INITED = 0x2 198 } cont_avail_t; 199 200 /* Size of root and context tables and their entries */ 201 #define IMMU_ROOT_TBLSZ (4096) 202 #define IMMU_CONT_TBLSZ (4096) 203 #define IMMU_ROOT_NUM (256) 204 #define IMMU_CONT_NUM (256) 205 206 /* register offset */ 207 #define IMMU_REG_VERSION (0x00) /* Version Rigister, 32 bit */ 208 #define IMMU_REG_CAP (0x08) /* Capability Register, 64 bit */ 209 #define IMMU_REG_EXCAP (0x10) /* Extended Capability Reg, 64 bit */ 210 #define IMMU_REG_GLOBAL_CMD (0x18) /* Global Command Register, 32 bit */ 211 #define IMMU_REG_GLOBAL_STS (0x1C) /* Global Status Register, 32 bit */ 212 #define IMMU_REG_ROOTENTRY (0x20) /* Root-Entry Table Addr Reg, 64 bit */ 213 #define IMMU_REG_CONTEXT_CMD (0x28) /* Context Comand Register, 64 bit */ 214 #define IMMU_REG_FAULT_STS (0x34) /* Fault Status Register, 32 bit */ 215 #define IMMU_REG_FEVNT_CON (0x38) /* Fault Event Control Reg, 32 bit */ 216 #define IMMU_REG_FEVNT_DATA (0x3C) /* Fault Event Data Register, 32 bit */ 217 #define IMMU_REG_FEVNT_ADDR (0x40) /* Fault Event Address Reg, 32 bit */ 218 #define IMMU_REG_FEVNT_UADDR (0x44) /* Fault Event Upper Addr Reg, 32 bit */ 219 #define IMMU_REG_AFAULT_LOG (0x58) /* Advanced Fault Log Reg, 64 bit */ 220 #define IMMU_REG_PMER (0x64) /* Protected Memory Enble Reg, 32 bit */ 221 #define IMMU_REG_PLMBR (0x68) /* Protected Low Mem Base Reg, 32 bit */ 222 #define IMMU_REG_PLMLR (0x6C) /* Protected Low Mem Lim Reg, 32 bit */ 223 #define IMMU_REG_PHMBR (0X70) /* Protectd High Mem Base Reg, 64 bit */ 224 #define IMMU_REG_PHMLR (0x78) /* Protected High Mem Lim Reg, 64 bit */ 225 #define IMMU_REG_INVAL_QH (0x80) /* Invalidation Queue Head, 64 bit */ 226 #define IMMU_REG_INVAL_QT (0x88) /* Invalidation Queue Tail, 64 bit */ 227 #define IMMU_REG_INVAL_QAR (0x90) /* Invalidtion Queue Addr Reg, 64 bit */ 228 #define IMMU_REG_INVAL_CSR (0x9C) /* Inval Compl Status Reg, 32 bit */ 229 #define IMMU_REG_INVAL_CECR (0xA0) /* Inval Compl Evnt Ctrl Reg, 32 bit */ 230 #define IMMU_REG_INVAL_CEDR (0xA4) /* Inval Compl Evnt Data Reg, 32 bit */ 231 #define IMMU_REG_INVAL_CEAR (0xA8) /* Inval Compl Event Addr Reg, 32 bit */ 232 #define IMMU_REG_INVAL_CEUAR (0xAC) /* Inval Comp Evnt Up Addr reg, 32bit */ 233 #define IMMU_REG_IRTAR (0xB8) /* INTR Remap Tbl Addr Reg, 64 bit */ 234 235 /* ioapic memory region */ 236 #define IOAPIC_REGION_START (0xfee00000) 237 #define IOAPIC_REGION_END (0xfeefffff) 238 239 /* fault register */ 240 #define IMMU_FAULT_STS_PPF (2) 241 #define IMMU_FAULT_STS_PFO (1) 242 #define IMMU_FAULT_STS_ITE (1 << 6) 243 #define IMMU_FAULT_STS_ICE (1 << 5) 244 #define IMMU_FAULT_STS_IQE (1 << 4) 245 #define IMMU_FAULT_GET_INDEX(x) ((((uint64_t)x) >> 8) & 0xff) 246 #define IMMU_FRR_GET_F(x) (((uint64_t)x) >> 63) 247 #define IMMU_FRR_GET_FR(x) ((((uint64_t)x) >> 32) & 0xff) 248 #define IMMU_FRR_GET_FT(x) ((((uint64_t)x) >> 62) & 0x1) 249 #define IMMU_FRR_GET_SID(x) ((x) & 0xffff) 250 251 /* (ex)capability register */ 252 #define IMMU_CAP_GET_NFR(x) (((((uint64_t)x) >> 40) & 0xff) + 1) 253 #define IMMU_CAP_GET_DWD(x) ((((uint64_t)x) >> 54) & 1) 254 #define IMMU_CAP_GET_DRD(x) ((((uint64_t)x) >> 55) & 1) 255 #define IMMU_CAP_GET_PSI(x) ((((uint64_t)x) >> 39) & 1) 256 #define IMMU_CAP_GET_SPS(x) ((((uint64_t)x) >> 34) & 0xf) 257 #define IMMU_CAP_GET_ISOCH(x) ((((uint64_t)x) >> 23) & 1) 258 #define IMMU_CAP_GET_ZLR(x) ((((uint64_t)x) >> 22) & 1) 259 #define IMMU_CAP_GET_MAMV(x) ((((uint64_t)x) >> 48) & 0x3f) 260 #define IMMU_CAP_GET_CM(x) ((((uint64_t)x) >> 7) & 1) 261 #define IMMU_CAP_GET_PHMR(x) ((((uint64_t)x) >> 6) & 1) 262 #define IMMU_CAP_GET_PLMR(x) ((((uint64_t)x) >> 5) & 1) 263 #define IMMU_CAP_GET_RWBF(x) ((((uint64_t)x) >> 4) & 1) 264 #define IMMU_CAP_GET_AFL(x) ((((uint64_t)x) >> 3) & 1) 265 #define IMMU_CAP_GET_FRO(x) (((((uint64_t)x) >> 24) & 0x3ff) * 16) 266 #define IMMU_CAP_MGAW(x) (((((uint64_t)x) >> 16) & 0x3f) + 1) 267 #define IMMU_CAP_SAGAW(x) ((((uint64_t)x) >> 8) & 0x1f) 268 #define IMMU_CAP_ND(x) (1 << (((x) & 0x7) *2 + 4)) -1 269 #define IMMU_ECAP_GET_IRO(x) (((((uint64_t)x) >> 8) & 0x3ff) << 4) 270 #define IMMU_ECAP_GET_MHMV(x) (((uint64_t)x >> 20) & 0xf) 271 #define IMMU_ECAP_GET_SC(x) ((x) & 0x80) 272 #define IMMU_ECAP_GET_PT(x) ((x) & 0x40) 273 #define IMMU_ECAP_GET_CH(x) ((x) & 0x20) 274 #define IMMU_ECAP_GET_EIM(x) ((x) & 0x10) 275 #define IMMU_ECAP_GET_IR(x) ((x) & 0x8) 276 #define IMMU_ECAP_GET_DI(x) ((x) & 0x4) 277 #define IMMU_ECAP_GET_QI(x) ((x) & 0x2) 278 #define IMMU_ECAP_GET_C(x) ((x) & 0x1) 279 280 #define IMMU_CAP_SET_RWBF(x) ((x) |= (1 << 4)) 281 282 283 /* iotlb invalidation */ 284 #define TLB_INV_GLOBAL (((uint64_t)1) << 60) 285 #define TLB_INV_DOMAIN (((uint64_t)2) << 60) 286 #define TLB_INV_PAGE (((uint64_t)3) << 60) 287 #define TLB_INV_GET_IAIG(x) ((((uint64_t)x) >> 57) & 7) 288 #define TLB_INV_DRAIN_READ (((uint64_t)1) << 49) 289 #define TLB_INV_DRAIN_WRITE (((uint64_t)1) << 48) 290 #define TLB_INV_DID(x) (((uint64_t)((x) & 0xffff)) << 32) 291 #define TLB_INV_IVT (((uint64_t)1) << 63) 292 #define TLB_IVA_HINT(x) (((x) & 0x1) << 6) 293 #define TLB_IVA_LEAF 1 294 #define TLB_IVA_WHOLE 0 295 296 /* dont use value 0 for enums - to catch unit 8 */ 297 typedef enum iotlb_inv { 298 IOTLB_PSI = 1, 299 IOTLB_DSI, 300 IOTLB_GLOBAL 301 } immu_iotlb_inv_t; 302 303 typedef enum context_inv { 304 CONTEXT_FSI = 1, 305 CONTEXT_DSI, 306 CONTEXT_GLOBAL 307 } immu_context_inv_t; 308 309 /* context invalidation */ 310 #define CCMD_INV_ICC (((uint64_t)1) << 63) 311 #define CCMD_INV_GLOBAL (((uint64_t)1) << 61) 312 #define CCMD_INV_DOMAIN (((uint64_t)2) << 61) 313 #define CCMD_INV_DEVICE (((uint64_t)3) << 61) 314 #define CCMD_INV_DID(x) ((uint64_t)((x) & 0xffff)) 315 #define CCMD_INV_SID(x) (((uint64_t)((x) & 0xffff)) << 16) 316 #define CCMD_INV_FM(x) (((uint64_t)((x) & 0x3)) << 32) 317 318 /* global command register */ 319 #define IMMU_GCMD_TE (((uint32_t)1) << 31) 320 #define IMMU_GCMD_SRTP (((uint32_t)1) << 30) 321 #define IMMU_GCMD_SFL (((uint32_t)1) << 29) 322 #define IMMU_GCMD_EAFL (((uint32_t)1) << 28) 323 #define IMMU_GCMD_WBF (((uint32_t)1) << 27) 324 #define IMMU_GCMD_QIE (((uint32_t)1) << 26) 325 #define IMMU_GCMD_IRE (((uint32_t)1) << 25) 326 #define IMMU_GCMD_SIRTP (((uint32_t)1) << 24) 327 #define IMMU_GCMD_CFI (((uint32_t)1) << 23) 328 329 /* global status register */ 330 #define IMMU_GSTS_TES (((uint32_t)1) << 31) 331 #define IMMU_GSTS_RTPS (((uint32_t)1) << 30) 332 #define IMMU_GSTS_FLS (((uint32_t)1) << 29) 333 #define IMMU_GSTS_AFLS (((uint32_t)1) << 28) 334 #define IMMU_GSTS_WBFS (((uint32_t)1) << 27) 335 #define IMMU_GSTS_QIES (((uint32_t)1) << 26) 336 #define IMMU_GSTS_IRES (((uint32_t)1) << 25) 337 #define IMMU_GSTS_IRTPS (((uint32_t)1) << 24) 338 #define IMMU_GSTS_CFIS (((uint32_t)1) << 23) 339 340 /* psi address mask */ 341 #define ADDR_AM_MAX(m) (((uint_t)1) << (m)) 342 #define ADDR_AM_OFFSET(n, m) ((n) & (ADDR_AM_MAX(m) - 1)) 343 344 /* dmar fault event */ 345 #define IMMU_INTR_IPL (4) 346 #define IMMU_REG_FEVNT_CON_IM_SHIFT (31) 347 348 #define IMMU_ALLOC_RESOURCE_DELAY (drv_usectohz(5000)) 349 350 /* max value of Size field of Interrupt Remapping Table Address Register */ 351 #define INTRMAP_MAX_IRTA_SIZE 0xf 352 353 /* interrupt remapping table entry size */ 354 #define INTRMAP_RTE_SIZE 0x10 355 356 /* ioapic redirection table entry related shift of remappable interrupt */ 357 #define INTRMAP_IOAPIC_IDX_SHIFT 17 358 #define INTRMAP_IOAPIC_FORMAT_SHIFT 16 359 #define INTRMAP_IOAPIC_TM_SHIFT 15 360 #define INTRMAP_IOAPIC_POL_SHIFT 13 361 #define INTRMAP_IOAPIC_IDX15_SHIFT 11 362 363 /* msi intr entry related shift of remappable interrupt */ 364 #define INTRMAP_MSI_IDX_SHIFT 5 365 #define INTRMAP_MSI_FORMAT_SHIFT 4 366 #define INTRMAP_MSI_SHV_SHIFT 3 367 #define INTRMAP_MSI_IDX15_SHIFT 2 368 369 #define INTRMAP_IDX_FULL (uint_t)-1 370 371 #define RDT_DLM(rdt) BITX((rdt), 10, 8) 372 #define RDT_DM(rdt) BT_TEST(&(rdt), 11) 373 #define RDT_POL(rdt) BT_TEST(&(rdt), 13) 374 #define RDT_TM(rdt) BT_TEST(&(rdt), 15) 375 376 #define INTRMAP_DISABLE (void *)-1 377 378 /* 379 * invalidation granularity 380 */ 381 typedef enum { 382 TLB_INV_G_GLOBAL = 1, 383 TLB_INV_G_DOMAIN, 384 TLB_INV_G_PAGE 385 } tlb_inv_g_t; 386 387 typedef enum { 388 CTT_INV_G_GLOBAL = 1, 389 CTT_INV_G_DOMAIN, 390 CTT_INV_G_DEVICE 391 } ctt_inv_g_t; 392 393 typedef enum { 394 IEC_INV_GLOBAL = 0, 395 IEC_INV_INDEX 396 } iec_inv_g_t; 397 398 399 struct inv_queue_state; 400 struct intrmap_tbl_state; 401 402 /* A software page table structure */ 403 typedef struct pgtable { 404 krwlock_t swpg_rwlock; 405 caddr_t hwpg_vaddr; /* HW pgtable VA */ 406 paddr_t hwpg_paddr; /* HW pgtable PA */ 407 ddi_dma_handle_t hwpg_dmahdl; 408 ddi_acc_handle_t hwpg_memhdl; 409 struct pgtable **swpg_next_array; 410 list_node_t swpg_domain_node; /* domain list of pgtables */ 411 } pgtable_t; 412 413 /* interrupt remapping table state info */ 414 typedef struct intrmap { 415 kmutex_t intrmap_lock; 416 ddi_dma_handle_t intrmap_dma_hdl; 417 ddi_acc_handle_t intrmap_acc_hdl; 418 caddr_t intrmap_vaddr; 419 paddr_t intrmap_paddr; 420 uint_t intrmap_size; 421 bitset_t intrmap_map; 422 uint_t intrmap_free; 423 } intrmap_t; 424 425 typedef struct hw_rce { 426 uint64_t lo; 427 uint64_t hi; 428 } hw_rce_t; 429 430 431 #define ROOT_GET_P(hrent) ((hrent)->lo & 0x1) 432 #define ROOT_SET_P(hrent) ((hrent)->lo |= 0x1) 433 434 #define ROOT_GET_CONT(hrent) ((hrent)->lo & ~(0xFFF)) 435 #define ROOT_SET_CONT(hrent, paddr) ((hrent)->lo |= (paddr & (~0xFFF))) 436 437 #define TTYPE_XLATE_ONLY (0x0) 438 #define TTYPE_XLATE_IOTLB (0x1) 439 #define TTYPE_PASSTHRU (0x2) 440 #define TTYPE_RESERVED (0x3) 441 442 #define CONT_GET_DID(hcent) ((((uint64_t)(hcent)->hi) >> 8) & 0xFFFF) 443 #define CONT_SET_DID(hcent, did) ((hcent)->hi |= ((0xFFFF & (did)) << 8)) 444 445 #define CONT_GET_AVAIL(hcent) ((((uint64_t)((hcent)->hi)) >> 0x3) & 0xF) 446 #define CONT_SET_AVAIL(hcent, av) ((hcent)->hi |= ((0xF & (av)) << 0x3)) 447 448 #define CONT_GET_LO_AW(hcent) (30 + 9 *((hcent)->hi & 0x7)) 449 #define CONT_GET_AW(hcent) \ 450 ((CONT_GET_LO_AW(hcent) == 66) ? 64 : CONT_GET_LO_AW(hcent)) 451 #define CONT_SET_AW(hcent, aw) \ 452 ((hcent)->hi |= (((((aw) + 2) - 30) / 9) & 0x7)) 453 454 #define CONT_GET_ASR(hcent) ((hcent)->lo & ~(0xFFF)) 455 #define CONT_SET_ASR(hcent, paddr) ((hcent)->lo |= (paddr & (~0xFFF))) 456 457 #define CONT_GET_TTYPE(hcent) ((((uint64_t)(hcent)->lo) >> 0x2) & 0x3) 458 #define CONT_SET_TTYPE(hcent, ttype) ((hcent)->lo |= (((ttype) & 0x3) << 0x2)) 459 460 #define CONT_GET_P(hcent) ((hcent)->lo & 0x1) 461 #define CONT_SET_P(hcent) ((hcent)->lo |= 0x1) 462 463 464 /* we use the bit 63 (available for system SW) as a present bit */ 465 #define PDTE_SW4(hw_pdte) ((hw_pdte) & ((uint64_t)1<<63)) 466 #define PDTE_CLEAR_SW4(hw_pdte) ((hw_pdte) &= ~((uint64_t)1<<63)) 467 468 #define PDTE_P(hw_pdte) ((hw_pdte) & ((uint64_t)1<<63)) 469 #define PDTE_CLEAR_P(hw_pdte) ((hw_pdte) &= ~((uint64_t)1<<63)) 470 #define PDTE_SET_P(hw_pdte) ((hw_pdte) |= ((uint64_t)1<<63)) 471 472 #define PDTE_TM(hw_pdte) ((hw_pdte) & ((uint64_t)1<<62)) 473 #define PDTE_CLEAR_TM(hw_pdte) ((hw_pdte) &= ~((uint64_t)1<<62)) 474 475 #define PDTE_SW3(hw_pdte) \ 476 (((hw_pdte) & ~(((uint64_t)0x3<<62)|(((uint64_t)1<<52)-1))) >> 52) 477 #define PDTE_SW3_OVERFLOW(hw_pdte) \ 478 (PDTE_SW3(hw_pdte) == 0x3FF) 479 #define PDTE_CLEAR_SW3(hw_pdte) \ 480 ((hw_pdte) &= (((uint64_t)0x3<<62)|(((uint64_t)1<<52)-1))) 481 #define PDTE_SET_SW3(hw_pdte, ref) \ 482 ((hw_pdte) |= ((((uint64_t)(ref)) & 0x3FF) << 52)) 483 484 #define PDTE_PADDR(hw_pdte) ((hw_pdte) & ~(((uint64_t)0xFFF<<52)|((1<<12)-1))) 485 #define PDTE_CLEAR_PADDR(hw_pdte) \ 486 ((hw_pdte) &= (((uint64_t)0xFFF<<52)|((1<<12)-1))) 487 #define PDTE_SET_PADDR(hw_pdte, paddr) ((hw_pdte) |= PDTE_PADDR(paddr)) 488 489 #define PDTE_SNP(hw_pdte) ((hw_pdte) & (1<<11)) 490 #define PDTE_CLEAR_SNP(hw_pdte) ((hw_pdte) &= ~(1<<11)) 491 #define PDTE_SET_SNP(hw_pdte) ((hw_pdte) |= (1<<11)) 492 493 #define PDTE_SW2(hw_pdte) ((hw_pdte) & (0x700)) 494 #define PDTE_CLEAR_SW2(hw_pdte) ((hw_pdte) &= ~(0x700)) 495 496 #define PDTE_SP(hw_pdte) ((hw_pdte) & (0x80)) 497 #define PDTE_CLEAR_SP(hw_pdte) ((hw_pdte) &= ~(0x80)) 498 499 #define PDTE_SW1(hw_pdte) ((hw_pdte) & (0x7C)) 500 #define PDTE_CLEAR_SW1(hw_pdte) ((hw_pdte) &= ~(0x7C)) 501 502 #define PDTE_WRITE(hw_pdte) ((hw_pdte) & (0x2)) 503 #define PDTE_CLEAR_WRITE(hw_pdte) ((hw_pdte) &= ~(0x2)) 504 #define PDTE_SET_WRITE(hw_pdte) ((hw_pdte) |= (0x2)) 505 506 #define PDTE_READ(hw_pdte) ((hw_pdte) & (0x1)) 507 #define PDTE_CLEAR_READ(hw_pdte) ((hw_pdte) &= ~(0x1)) 508 #define PDTE_SET_READ(hw_pdte) ((hw_pdte) |= (0x1)) 509 510 typedef struct immu { 511 kmutex_t immu_lock; 512 char *immu_name; 513 514 /* lock grabbed by interrupt handler */ 515 kmutex_t immu_intr_lock; 516 517 /* ACPI/DMAR table related */ 518 void *immu_dmar_unit; 519 dev_info_t *immu_dip; 520 struct domain *immu_unity_domain; 521 522 /* IOMMU register related */ 523 kmutex_t immu_regs_lock; 524 kcondvar_t immu_regs_cv; 525 boolean_t immu_regs_busy; 526 boolean_t immu_regs_setup; 527 boolean_t immu_regs_running; 528 boolean_t immu_regs_quiesced; 529 ddi_acc_handle_t immu_regs_handle; 530 caddr_t immu_regs_addr; 531 uint64_t immu_regs_cap; 532 uint64_t immu_regs_excap; 533 uint32_t immu_regs_cmdval; 534 uint32_t immu_regs_intr_msi_addr; 535 uint32_t immu_regs_intr_msi_data; 536 uint32_t immu_regs_intr_uaddr; 537 538 /* DVMA related */ 539 kmutex_t immu_dvma_lock; 540 boolean_t immu_dvma_setup; 541 boolean_t immu_dvma_running; 542 int immu_dvma_gaw; 543 int immu_dvma_agaw; 544 int immu_dvma_nlevels; 545 boolean_t immu_dvma_coherent; 546 boolean_t immu_TM_reserved; 547 boolean_t immu_SNP_reserved; 548 549 /* DVMA context related */ 550 krwlock_t immu_ctx_rwlock; 551 pgtable_t *immu_ctx_root; 552 553 /* DVMA domain related */ 554 int immu_max_domains; 555 vmem_t *immu_did_arena; 556 char immu_did_arena_name[IMMU_MAXNAMELEN]; 557 list_t immu_domain_list; 558 559 /* DVMA special devices */ 560 boolean_t immu_dvma_gfx_only; 561 list_t immu_dvma_lpc_list; 562 list_t immu_dvma_gfx_list; 563 564 /* interrupt remapping related */ 565 kmutex_t immu_intrmap_lock; 566 boolean_t immu_intrmap_setup; 567 boolean_t immu_intrmap_running; 568 intrmap_t *immu_intrmap; 569 uint64_t immu_intrmap_irta_reg; 570 571 /* queued invalidation related */ 572 kmutex_t immu_qinv_lock; 573 boolean_t immu_qinv_setup; 574 boolean_t immu_qinv_running; 575 boolean_t immu_qinv_enabled; 576 void *immu_qinv; 577 uint64_t immu_qinv_reg_value; 578 579 /* list_node for system-wide list of DMAR units */ 580 list_node_t immu_node; 581 } immu_t; 582 583 /* properties that control DVMA */ 584 #define DDI_DVMA_MAPTYPE_ROOTNEX_PROP "immu-dvma-mapping" 585 586 #define DDI_DVMA_MAPTYPE_UNITY "unity" 587 #define DDI_DVMA_MAPTYPE_XLATE "xlate" 588 589 typedef enum immu_maptype { 590 IMMU_MAPTYPE_BAD = 0, /* 0 is always bad */ 591 IMMU_MAPTYPE_UNITY = 1, 592 IMMU_MAPTYPE_XLATE 593 } immu_maptype_t; 594 595 #define IMMU_COOKIE_HASHSZ (512) 596 597 /* 598 * domain_t 599 * 600 */ 601 typedef struct domain { 602 /* the basics */ 603 uint_t dom_did; 604 immu_t *dom_immu; 605 606 /* mapping related */ 607 immu_maptype_t dom_maptype; 608 vmem_t *dom_dvma_arena; 609 char dom_dvma_arena_name[IMMU_MAXNAMELEN]; 610 611 /* pgtables */ 612 pgtable_t *dom_pgtable_root; 613 krwlock_t dom_pgtable_rwlock; 614 615 /* list node for list of domains (unity or xlate) */ 616 list_node_t dom_maptype_node; 617 /* list node for list of domains off immu */ 618 list_node_t dom_immu_node; 619 620 mod_hash_t *dom_cookie_hash; 621 } domain_t; 622 623 typedef enum immu_pcib { 624 IMMU_PCIB_BAD = 0, 625 IMMU_PCIB_NOBDF, 626 IMMU_PCIB_PCIE_PCIE, 627 IMMU_PCIB_PCIE_PCI, 628 IMMU_PCIB_PCI_PCI, 629 IMMU_PCIB_ENDPOINT 630 } immu_pcib_t; 631 632 /* 633 * immu_devi_t 634 * Intel IOMMU in devinfo node 635 */ 636 typedef struct immu_devi { 637 /* pci seg, bus, dev, func */ 638 int imd_seg; 639 int imd_bus; 640 int imd_devfunc; 641 642 /* ppb information */ 643 immu_pcib_t imd_pcib_type; 644 int imd_sec; 645 int imd_sub; 646 647 /* identifier for special devices */ 648 boolean_t imd_display; 649 boolean_t imd_lpc; 650 651 /* dmar unit to which this dip belongs */ 652 immu_t *imd_immu; 653 654 immu_flags_t imd_dvma_flags; 655 656 /* domain ptr */ 657 domain_t *imd_domain; 658 dev_info_t *imd_ddip; 659 660 /* my devinfo */ 661 dev_info_t *imd_dip; 662 663 /* 664 * if we are a "special" devinfo 665 * the node for the special linked list 666 * off the DMAR unit structure 667 */ 668 list_node_t imd_spc_node; 669 } immu_devi_t; 670 671 #define IMMU_DEVI(dip) ((immu_devi_t *)(DEVI(dip)->devi_iommu)) 672 #define IMMU_DEVI_SET(dip, imd) (DEVI(dip)->devi_iommu = (void *)imd) 673 674 /* 675 * struct dmar_arg 676 */ 677 typedef struct immu_arg { 678 int ima_seg; 679 int ima_bus; 680 int ima_devfunc; 681 dev_info_t *ima_rdip; 682 dev_info_t *ima_ddip; 683 } immu_arg_t; 684 685 /* 686 * Globals used by IOMMU code 687 */ 688 /* shared between IOMMU files */ 689 extern dev_info_t *root_devinfo; 690 extern kmutex_t immu_lock; 691 extern list_t immu_list; 692 extern void *immu_pgtable_cache; 693 extern boolean_t immu_setup; 694 extern boolean_t immu_running; 695 extern kmutex_t ioapic_drhd_lock; 696 extern list_t ioapic_drhd_list; 697 698 /* switches */ 699 700 /* Various features */ 701 extern boolean_t immu_enable; 702 extern boolean_t immu_gfxdvma_enable; 703 extern boolean_t immu_intrmap_enable; 704 extern boolean_t immu_qinv_enable; 705 706 /* various quirks that need working around */ 707 extern boolean_t immu_quirk_usbpage0; 708 extern boolean_t immu_quirk_usbfullpa; 709 extern boolean_t immu_quirk_usbrmrr; 710 extern boolean_t immu_quirk_mobile4; 711 712 /* debug messages */ 713 extern boolean_t immu_dmar_print; 714 715 /* tunables */ 716 extern int64_t immu_flush_gran; 717 718 extern immu_flags_t immu_global_dvma_flags; 719 720 /* ################### Interfaces exported outside IOMMU code ############## */ 721 void immu_init(void); 722 void immu_startup(void); 723 void immu_shutdown(void); 724 void immu_destroy(void); 725 int immu_map_sgl(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq, 726 int prealloc_count, dev_info_t *rdip); 727 int immu_unmap_sgl(ddi_dma_impl_t *hp, dev_info_t *rdip); 728 void immu_device_tree_changed(void); 729 void immu_physmem_update(uint64_t addr, uint64_t size); 730 int immu_quiesce(void); 731 int immu_unquiesce(void); 732 /* ######################################################################### */ 733 734 /* ################# Interfaces used within IOMMU code #################### */ 735 736 /* functions in rootnex.c */ 737 int rootnex_dvcookies_alloc(ddi_dma_impl_t *hp, 738 struct ddi_dma_req *dmareq, dev_info_t *rdip, void *arg); 739 void rootnex_dvcookies_free(dvcookie_t *dvcookies, void *arg); 740 741 /* immu_dmar.c interfaces */ 742 int immu_dmar_setup(void); 743 int immu_dmar_parse(void); 744 void immu_dmar_startup(void); 745 void immu_dmar_shutdown(void); 746 void immu_dmar_destroy(void); 747 boolean_t immu_dmar_blacklisted(char **strings_array, uint_t nstrings); 748 immu_t *immu_dmar_get_immu(dev_info_t *rdip); 749 char *immu_dmar_unit_name(void *dmar_unit); 750 dev_info_t *immu_dmar_unit_dip(void *dmar_unit); 751 void immu_dmar_set_immu(void *dmar_unit, immu_t *immu); 752 void *immu_dmar_walk_units(int seg, void *dmar_unit); 753 boolean_t immu_dmar_intrmap_supported(void); 754 uint16_t immu_dmar_ioapic_sid(int ioapicid); 755 immu_t *immu_dmar_ioapic_immu(int ioapicid); 756 void immu_dmar_rmrr_map(void); 757 758 /* immu.c interfaces */ 759 int immu_walk_ancestor(dev_info_t *rdip, dev_info_t *ddip, 760 int (*func)(dev_info_t *, void *arg), void *arg, 761 int *level, immu_flags_t immu_flags); 762 763 /* immu_regs.c interfaces */ 764 void immu_regs_setup(list_t *immu_list); 765 void immu_regs_startup(immu_t *immu); 766 int immu_regs_resume(immu_t *immu); 767 void immu_regs_suspend(immu_t *immu); 768 void immu_regs_shutdown(immu_t *immu); 769 void immu_regs_destroy(list_t *immu_list); 770 771 void immu_regs_intr(immu_t *immu, uint32_t msi_addr, uint32_t msi_data, 772 uint32_t uaddr); 773 774 boolean_t immu_regs_passthru_supported(immu_t *immu); 775 boolean_t immu_regs_is_TM_reserved(immu_t *immu); 776 boolean_t immu_regs_is_SNP_reserved(immu_t *immu); 777 778 void immu_regs_wbf_flush(immu_t *immu); 779 void immu_regs_cpu_flush(immu_t *immu, caddr_t addr, uint_t size); 780 void immu_regs_iotlb_flush(immu_t *immu, uint_t domainid, uint64_t dvma, 781 uint64_t count, uint_t hint, immu_iotlb_inv_t type); 782 void immu_regs_context_flush(immu_t *immu, uint8_t function_mask, 783 uint16_t source_id, uint_t did, immu_context_inv_t type); 784 void immu_regs_set_root_table(immu_t *immu); 785 void immu_regs_qinv_enable(immu_t *immu, uint64_t qinv_reg_value); 786 void immu_regs_intr_enable(immu_t *immu, uint32_t msi_addr, uint32_t msi_data, 787 uint32_t uaddr); 788 void immu_regs_intrmap_enable(immu_t *immu, uint64_t irta_reg); 789 uint64_t immu_regs_get64(immu_t *immu, uint_t reg); 790 void immu_regs_put64(immu_t *immu, uint_t reg, uint64_t val); 791 uint32_t immu_regs_get32(immu_t *immu, uint_t reg); 792 void immu_regs_put32(immu_t *immu, uint_t reg, uint32_t val); 793 794 /* immu_dvma.c interfaces */ 795 void immu_dvma_setup(list_t *immu_list); 796 void immu_dvma_startup(immu_t *immu); 797 void immu_dvma_shutdown(immu_t *immu); 798 void immu_dvma_destroy(list_t *immu_list); 799 800 void immu_dvma_physmem_update(uint64_t addr, uint64_t size); 801 int immu_dvma_map(ddi_dma_impl_t *hp, struct ddi_dma_req *dmareq, memrng_t *, 802 uint_t prealloc_count, dev_info_t *rdip, immu_flags_t immu_flags); 803 int immu_dvma_unmap(ddi_dma_impl_t *hp, dev_info_t *rdip); 804 int immu_dvma_alloc(dvcookie_t *first_dvcookie, void *arg); 805 void immu_dvma_free(dvcookie_t *first_dvcookie, void *arg); 806 int immu_devi_set(dev_info_t *dip, immu_flags_t immu_flags); 807 immu_devi_t *immu_devi_get(dev_info_t *dip); 808 immu_t *immu_dvma_get_immu(dev_info_t *dip, immu_flags_t immu_flags); 809 int pgtable_ctor(void *buf, void *arg, int kmflag); 810 void pgtable_dtor(void *buf, void *arg); 811 812 /* immu_intrmap.c interfaces */ 813 void immu_intrmap_setup(list_t *immu_list); 814 void immu_intrmap_startup(immu_t *immu); 815 void immu_intrmap_shutdown(immu_t *immu); 816 void immu_intrmap_destroy(list_t *immu_list); 817 818 /* registers interrupt handler for IOMMU unit */ 819 void immu_intr_register(immu_t *immu); 820 int immu_intr_handler(immu_t *immu); 821 822 823 /* immu_qinv.c interfaces */ 824 void immu_qinv_setup(list_t *immu_list); 825 void immu_qinv_startup(immu_t *immu); 826 void immu_qinv_shutdown(immu_t *immu); 827 void immu_qinv_destroy(list_t *immu_list); 828 829 void immu_qinv_context_fsi(immu_t *immu, uint8_t function_mask, 830 uint16_t source_id, uint_t domain_id); 831 void immu_qinv_context_dsi(immu_t *immu, uint_t domain_id); 832 void immu_qinv_context_gbl(immu_t *immu); 833 void immu_qinv_iotlb_psi(immu_t *immu, uint_t domain_id, 834 uint64_t dvma, uint_t count, uint_t hint); 835 void immu_qinv_iotlb_dsi(immu_t *immu, uint_t domain_id); 836 void immu_qinv_iotlb_gbl(immu_t *immu); 837 void immu_qinv_intr_global(immu_t *immu); 838 void immu_qinv_intr_one_cache(immu_t *immu, uint_t idx); 839 void immu_qinv_intr_caches(immu_t *immu, uint_t idx, uint_t cnt); 840 void immu_qinv_report_fault(immu_t *immu); 841 842 843 #ifdef __cplusplus 844 } 845 #endif 846 847 #endif /* _SYS_INTEL_IOMMU_H */ 848