1 /*- 2 * Copyright (c) 2016 Anish Gupta (anish@freebsd.org) 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice unmodified, this list of conditions, and the following 10 * disclaimer. 11 * 2. Redistributions in binary form must reproduce the above copyright 12 * notice, this list of conditions and the following disclaimer in the 13 * documentation and/or other materials provided with the distribution. 14 * 15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #ifndef _AMDVI_PRIV_H_ 30 #define _AMDVI_PRIV_H_ 31 32 #include <contrib/dev/acpica/include/acpi.h> 33 34 #define BIT(n) (1ULL << (n)) 35 /* Return value of bits[n:m] where n and (n >= ) m are bit positions. */ 36 #define REG_BITS(x, n, m) (((x) >> (m)) & \ 37 ((1 << (((n) - (m)) + 1)) - 1)) 38 39 /* 40 * IOMMU PCI capability. 41 */ 42 #define AMDVI_PCI_CAP_IOTLB BIT(0) /* IOTLB is supported. */ 43 #define AMDVI_PCI_CAP_HT BIT(1) /* HyperTransport tunnel support. */ 44 #define AMDVI_PCI_CAP_NPCACHE BIT(2) /* Not present page cached. */ 45 #define AMDVI_PCI_CAP_EFR BIT(3) /* Extended features. */ 46 #define AMDVI_PCI_CAP_EXT BIT(4) /* Miscellaneous information reg. */ 47 48 /* 49 * IOMMU extended features. 50 */ 51 #define AMDVI_EX_FEA_PREFSUP BIT(0) /* Prefetch command support. */ 52 #define AMDVI_EX_FEA_PPRSUP BIT(1) /* PPR support */ 53 #define AMDVI_EX_FEA_XTSUP BIT(2) /* Reserved */ 54 #define AMDVI_EX_FEA_NXSUP BIT(3) /* No-execute. */ 55 #define AMDVI_EX_FEA_GTSUP BIT(4) /* Guest translation support. */ 56 #define AMDVI_EX_FEA_EFRW BIT(5) /* Reserved */ 57 #define AMDVI_EX_FEA_IASUP BIT(6) /* Invalidate all command supp. */ 58 #define AMDVI_EX_FEA_GASUP BIT(7) /* Guest APIC or AVIC support. */ 59 #define AMDVI_EX_FEA_HESUP BIT(8) /* Hardware Error. */ 60 #define AMDVI_EX_FEA_PCSUP BIT(9) /* Performance counters support. */ 61 /* XXX: add more EFER bits. */ 62 63 /* 64 * Device table entry or DTE 65 * NOTE: Must be 256-bits/32 bytes aligned. 66 */ 67 struct amdvi_dte { 68 uint32_t dt_valid:1; /* Device Table valid. */ 69 uint32_t pt_valid:1; /* Page translation valid. */ 70 uint16_t :7; /* Reserved[8:2] */ 71 uint8_t pt_level:3; /* Paging level, 0 to disable. */ 72 uint64_t pt_base:40; /* Page table root pointer. */ 73 uint8_t :3; /* Reserved[54:52] */ 74 uint8_t gv_valid:1; /* Revision 2, GVA to SPA. */ 75 uint8_t gv_level:2; /* Revision 2, GLX level. */ 76 uint8_t gv_cr3_lsb:3; /* Revision 2, GCR3[14:12] */ 77 uint8_t read_allow:1; /* I/O read enabled. */ 78 uint8_t write_allow:1; /* I/O write enabled. */ 79 uint8_t :1; /* Reserved[63] */ 80 uint16_t domain_id:16; /* Domain ID */ 81 uint16_t gv_cr3_lsb2:16; /* Revision 2, GCR3[30:15] */ 82 uint8_t iotlb_enable:1; /* Device support IOTLB */ 83 uint8_t sup_second_io_fault:1; /* Suppress subsequent I/O faults. */ 84 uint8_t sup_all_io_fault:1; /* Suppress all I/O page faults. */ 85 uint8_t IOctl:2; /* Port I/O control. */ 86 uint8_t iotlb_cache_disable:1; /* IOTLB cache hints. */ 87 uint8_t snoop_disable:1; /* Snoop disable. */ 88 uint8_t allow_ex:1; /* Allow exclusion. */ 89 uint8_t sysmgmt:2; /* System management message.*/ 90 uint8_t :1; /* Reserved[106] */ 91 uint32_t gv_cr3_msb:21; /* Revision 2, GCR3[51:31] */ 92 uint8_t intmap_valid:1; /* Interrupt map valid. */ 93 uint8_t intmap_len:4; /* Interrupt map table length. */ 94 uint8_t intmap_ign:1; /* Ignore unmapped interrupts. */ 95 uint64_t intmap_base:46; /* IntMap base. */ 96 uint8_t :4; /* Reserved[183:180] */ 97 uint8_t init_pass:1; /* INIT pass through or PT */ 98 uint8_t extintr_pass:1; /* External Interrupt PT */ 99 uint8_t nmi_pass:1; /* NMI PT */ 100 uint8_t :1; /* Reserved[187] */ 101 uint8_t intr_ctrl:2; /* Interrupt control */ 102 uint8_t lint0_pass:1; /* LINT0 PT */ 103 uint8_t lint1_pass:1; /* LINT1 PT */ 104 uint64_t :64; /* Reserved[255:192] */ 105 } __attribute__((__packed__)); 106 CTASSERT(sizeof(struct amdvi_dte) == 32); 107 108 /* 109 * IOMMU command entry. 110 */ 111 struct amdvi_cmd { 112 uint32_t word0; 113 uint32_t word1:28; 114 uint8_t opcode:4; 115 uint64_t addr; 116 } __attribute__((__packed__)); 117 118 /* Command opcodes. */ 119 #define AMDVI_CMP_WAIT_OPCODE 0x1 /* Completion wait. */ 120 #define AMDVI_INVD_DTE_OPCODE 0x2 /* Invalidate device table entry. */ 121 #define AMDVI_INVD_PAGE_OPCODE 0x3 /* Invalidate pages. */ 122 #define AMDVI_INVD_IOTLB_OPCODE 0x4 /* Invalidate IOTLB pages. */ 123 #define AMDVI_INVD_INTR_OPCODE 0x5 /* Invalidate Interrupt table. */ 124 #define AMDVI_PREFETCH_PAGES_OPCODE 0x6 /* Prefetch IOMMU pages. */ 125 #define AMDVI_COMP_PPR_OPCODE 0x7 /* Complete PPR request. */ 126 #define AMDVI_INV_ALL_OPCODE 0x8 /* Invalidate all. */ 127 128 /* Completion wait attributes. */ 129 #define AMDVI_CMP_WAIT_STORE BIT(0) /* Write back data. */ 130 #define AMDVI_CMP_WAIT_INTR BIT(1) /* Completion wait interrupt. */ 131 #define AMDVI_CMP_WAIT_FLUSH BIT(2) /* Flush queue. */ 132 133 /* Invalidate page. */ 134 #define AMDVI_INVD_PAGE_S BIT(0) /* Invalidation size. */ 135 #define AMDVI_INVD_PAGE_PDE BIT(1) /* Invalidate PDE. */ 136 #define AMDVI_INVD_PAGE_GN_GVA BIT(2) /* GPA or GVA. */ 137 138 #define AMDVI_INVD_PAGE_ALL_ADDR (0x7FFFFFFFFFFFFULL << 12) 139 140 /* Invalidate IOTLB. */ 141 #define AMDVI_INVD_IOTLB_S BIT(0) /* Invalidation size 4k or addr */ 142 #define AMDVI_INVD_IOTLB_GN_GVA BIT(2) /* GPA or GVA. */ 143 144 #define AMDVI_INVD_IOTLB_ALL_ADDR (0x7FFFFFFFFFFFFULL << 12) 145 /* XXX: add more command entries. */ 146 147 /* 148 * IOMMU event entry. 149 */ 150 struct amdvi_event { 151 uint16_t devid; 152 uint16_t pasid_hi; 153 uint16_t pasid_domid; /* PASID low or DomainID */ 154 uint16_t flag:12; 155 uint8_t opcode:4; 156 uint64_t addr; 157 } __attribute__((__packed__)); 158 CTASSERT(sizeof(struct amdvi_event) == 16); 159 160 /* Various event types. */ 161 #define AMDVI_EVENT_INVALID_DTE 0x1 162 #define AMDVI_EVENT_PFAULT 0x2 163 #define AMDVI_EVENT_DTE_HW_ERROR 0x3 164 #define AMDVI_EVENT_PAGE_HW_ERROR 0x4 165 #define AMDVI_EVENT_ILLEGAL_CMD 0x5 166 #define AMDVI_EVENT_CMD_HW_ERROR 0x6 167 #define AMDVI_EVENT_IOTLB_TIMEOUT 0x7 168 #define AMDVI_EVENT_INVALID_DTE_REQ 0x8 169 #define AMDVI_EVENT_INVALID_PPR_REQ 0x9 170 #define AMDVI_EVENT_COUNTER_ZERO 0xA 171 172 #define AMDVI_EVENT_FLAG_MASK 0x1FF /* Mask for event flags. */ 173 #define AMDVI_EVENT_FLAG_TYPE(x) (((x) >> 9) & 0x3) 174 175 /* 176 * IOMMU control block. 177 */ 178 struct amdvi_ctrl { 179 struct { 180 uint16_t size:9; 181 uint16_t :3; 182 uint64_t base:40; /* Devtable register base. */ 183 uint16_t :12; 184 } dte; 185 struct { 186 uint16_t :12; 187 uint64_t base:40; 188 uint8_t :4; 189 uint8_t len:4; 190 uint8_t :4; 191 } cmd; 192 struct { 193 uint16_t :12; 194 uint64_t base:40; 195 uint8_t :4; 196 uint8_t len:4; 197 uint8_t :4; 198 } event; 199 uint16_t control :13; 200 uint64_t :51; 201 struct { 202 uint8_t enable:1; 203 uint8_t allow:1; 204 uint16_t :10; 205 uint64_t base:40; 206 uint16_t :12; 207 uint16_t :12; 208 uint64_t limit:40; 209 uint16_t :12; 210 } excl; 211 /* 212 * Revision 2 only. 213 */ 214 uint64_t ex_feature; 215 struct { 216 uint16_t :12; 217 uint64_t base:40; 218 uint8_t :4; 219 uint8_t len:4; 220 uint8_t :4; 221 } ppr; 222 uint64_t first_event; 223 uint64_t second_event; 224 uint64_t event_status; 225 /* Revision 2 only, end. */ 226 uint8_t pad1[0x1FA8]; /* Padding. */ 227 uint32_t cmd_head:19; 228 uint64_t :45; 229 uint32_t cmd_tail:19; 230 uint64_t :45; 231 uint32_t evt_head:19; 232 uint64_t :45; 233 uint32_t evt_tail:19; 234 uint64_t :45; 235 uint32_t status:19; 236 uint64_t :45; 237 uint64_t pad2; 238 uint8_t :4; 239 uint16_t ppr_head:15; 240 uint64_t :45; 241 uint8_t :4; 242 uint16_t ppr_tail:15; 243 uint64_t :45; 244 uint8_t pad3[0x1FC0]; /* Padding. */ 245 246 /* XXX: More for rev2. */ 247 } __attribute__((__packed__)); 248 CTASSERT(offsetof(struct amdvi_ctrl, pad1)== 0x58); 249 CTASSERT(offsetof(struct amdvi_ctrl, pad2)== 0x2028); 250 CTASSERT(offsetof(struct amdvi_ctrl, pad3)== 0x2040); 251 252 #define AMDVI_MMIO_V1_SIZE (4 * PAGE_SIZE) /* v1 size */ 253 /* 254 * AMF IOMMU v2 size including event counters 255 */ 256 #define AMDVI_MMIO_V2_SIZE (8 * PAGE_SIZE) 257 258 CTASSERT(sizeof(struct amdvi_ctrl) == 0x4000); 259 CTASSERT(sizeof(struct amdvi_ctrl) == AMDVI_MMIO_V1_SIZE); 260 261 /* IVHD flag */ 262 #define IVHD_FLAG_HTT BIT(0) /* Hypertransport Tunnel. */ 263 #define IVHD_FLAG_PPW BIT(1) /* Pass posted write. */ 264 #define IVHD_FLAG_RPPW BIT(2) /* Response pass posted write. */ 265 #define IVHD_FLAG_ISOC BIT(3) /* Isoc support. */ 266 #define IVHD_FLAG_IOTLB BIT(4) /* IOTLB support. */ 267 #define IVHD_FLAG_COH BIT(5) /* Coherent control, default 1 */ 268 #define IVHD_FLAG_PFS BIT(6) /* Prefetch IOMMU pages. */ 269 #define IVHD_FLAG_PPRS BIT(7) /* Peripheral page support. */ 270 271 /* IVHD device entry data setting. */ 272 #define IVHD_DEV_LINT0_PASS BIT(6) /* LINT0 interrupts. */ 273 #define IVHD_DEV_LINT1_PASS BIT(7) /* LINT1 interrupts. */ 274 275 /* Bit[5:4] for System Mgmt. Bit3 is reserved. */ 276 #define IVHD_DEV_INIT_PASS BIT(0) /* INIT */ 277 #define IVHD_DEV_EXTINTR_PASS BIT(1) /* ExtInt */ 278 #define IVHD_DEV_NMI_PASS BIT(2) /* NMI */ 279 280 /* IVHD 8-byte extended data settings. */ 281 #define IVHD_DEV_EXT_ATS_DISABLE BIT(31) /* Disable ATS */ 282 283 /* IOMMU control register. */ 284 #define AMDVI_CTRL_EN BIT(0) /* IOMMU enable. */ 285 #define AMDVI_CTRL_HTT BIT(1) /* Hypertransport tunnel enable. */ 286 #define AMDVI_CTRL_ELOG BIT(2) /* Event log enable. */ 287 #define AMDVI_CTRL_ELOGINT BIT(3) /* Event log interrupt. */ 288 #define AMDVI_CTRL_COMINT BIT(4) /* Completion wait interrupt. */ 289 #define AMDVI_CTRL_PPW BIT(8) 290 #define AMDVI_CTRL_RPPW BIT(9) 291 #define AMDVI_CTRL_COH BIT(10) 292 #define AMDVI_CTRL_ISOC BIT(11) 293 #define AMDVI_CTRL_CMD BIT(12) /* Command buffer enable. */ 294 #define AMDVI_CTRL_PPRLOG BIT(13) 295 #define AMDVI_CTRL_PPRINT BIT(14) 296 #define AMDVI_CTRL_PPREN BIT(15) 297 #define AMDVI_CTRL_GTE BIT(16) /* Guest translation enable. */ 298 #define AMDVI_CTRL_GAE BIT(17) /* Guest APIC enable. */ 299 300 /* Invalidation timeout. */ 301 #define AMDVI_CTRL_INV_NO_TO 0 /* No timeout. */ 302 #define AMDVI_CTRL_INV_TO_1ms 1 /* 1 ms */ 303 #define AMDVI_CTRL_INV_TO_10ms 2 /* 10 ms */ 304 #define AMDVI_CTRL_INV_TO_100ms 3 /* 100 ms */ 305 #define AMDVI_CTRL_INV_TO_1S 4 /* 1 second */ 306 #define AMDVI_CTRL_INV_TO_10S 5 /* 10 second */ 307 #define AMDVI_CTRL_INV_TO_100S 6 /* 100 second */ 308 309 /* 310 * Max number of PCI devices. 311 * 256 bus x 32 slot/devices x 8 functions. 312 */ 313 #define PCI_NUM_DEV_MAX 0x10000 314 315 /* Maximum number of domains supported by IOMMU. */ 316 #define AMDVI_MAX_DOMAIN (BIT(16) - 1) 317 318 /* 319 * IOMMU Page Table attributes. 320 */ 321 #define AMDVI_PT_PRESENT BIT(0) 322 #define AMDVI_PT_COHERENT BIT(60) 323 #define AMDVI_PT_READ BIT(61) 324 #define AMDVI_PT_WRITE BIT(62) 325 326 #define AMDVI_PT_RW (AMDVI_PT_READ | AMDVI_PT_WRITE) 327 #define AMDVI_PT_MASK 0xFFFFFFFFFF000UL /* Only [51:12] for PA */ 328 329 #define AMDVI_PD_LEVEL_SHIFT 9 330 #define AMDVI_PD_SUPER(x) (((x) >> AMDVI_PD_LEVEL_SHIFT) == 7) 331 /* 332 * IOMMU Status, offset 0x2020 333 */ 334 #define AMDVI_STATUS_EV_OF BIT(0) /* Event overflow. */ 335 #define AMDVI_STATUS_EV_INTR BIT(1) /* Event interrupt. */ 336 /* Completion wait command completed. */ 337 #define AMDVI_STATUS_CMP BIT(2) 338 339 #define IVRS_CTRL_RID 1 /* MMIO RID */ 340 341 /* ACPI IVHD */ 342 struct ivhd_dev_cfg { 343 uint32_t start_id; 344 uint32_t end_id; 345 uint8_t data; /* Device configuration. */ 346 bool enable_ats; /* ATS enabled for the device. */ 347 int ats_qlen; /* ATS invalidation queue depth. */ 348 }; 349 350 struct amdvi_domain { 351 uint64_t *ptp; /* Highest level page table */ 352 int ptp_level; /* Level of page tables */ 353 u_int id; /* Domain id */ 354 SLIST_ENTRY (amdvi_domain) next; 355 }; 356 357 /* 358 * I/O Virtualization Hardware Definition Block (IVHD) type 0x10 (legacy) 359 * uses ACPI_IVRS_HARDWARE define in contrib/dev/acpica/include/actbl2.h 360 * New IVHD types 0x11 and 0x40 as defined in AMD IOMMU spec[48882] are missing in 361 * ACPI code. These new types add extra field EFR(Extended Feature Register). 362 * XXX : Use definition from ACPI when it is available. 363 */ 364 typedef struct acpi_ivrs_hardware_efr_sup 365 { 366 ACPI_IVRS_HEADER Header; 367 UINT16 CapabilityOffset; /* Offset for IOMMU control fields */ 368 UINT64 BaseAddress; /* IOMMU control registers */ 369 UINT16 PciSegmentGroup; 370 UINT16 Info; /* MSI number and unit ID */ 371 UINT32 Attr; /* IOMMU Feature */ 372 UINT64 ExtFR; /* IOMMU Extended Feature */ 373 UINT64 Reserved; /* v1 feature or v2 attribute */ 374 } __attribute__ ((__packed__)) ACPI_IVRS_HARDWARE_EFRSUP; 375 CTASSERT(sizeof(ACPI_IVRS_HARDWARE_EFRSUP) == 40); 376 377 /* 378 * Different type of IVHD. 379 * XXX: Use AcpiIvrsType once new IVHD types are available. 380 */ 381 enum IvrsType 382 { 383 IVRS_TYPE_HARDWARE_LEGACY = 0x10, /* Legacy without EFRi support. */ 384 IVRS_TYPE_HARDWARE_EFR = 0x11, /* With EFR support. */ 385 IVRS_TYPE_HARDWARE_MIXED = 0x40, /* Mixed with EFR support. */ 386 }; 387 388 /* 389 * AMD IOMMU softc. 390 */ 391 struct amdvi_softc { 392 struct amdvi_ctrl *ctrl; /* Control area. */ 393 device_t dev; /* IOMMU device. */ 394 enum IvrsType ivhd_type; /* IOMMU IVHD type. */ 395 bool iotlb; /* IOTLB supported by IOMMU */ 396 struct amdvi_cmd *cmd; /* Command descriptor area. */ 397 int cmd_max; /* Max number of commands. */ 398 uint64_t cmp_data; /* Command completion write back. */ 399 struct amdvi_event *event; /* Event descriptor area. */ 400 struct resource *event_res; /* Event interrupt resource. */ 401 void *event_tag; /* Event interrupt tag. */ 402 int event_max; /* Max number of events. */ 403 int event_irq; 404 int event_rid; 405 /* ACPI various flags. */ 406 uint32_t ivhd_flag; /* ACPI IVHD flag. */ 407 uint32_t ivhd_feature; /* ACPI v1 Reserved or v2 attribute. */ 408 uint64_t ext_feature; /* IVHD EFR */ 409 /* PCI related. */ 410 uint16_t cap_off; /* PCI Capability offset. */ 411 uint8_t pci_cap; /* PCI capability. */ 412 uint16_t pci_seg; /* IOMMU PCI domain/segment. */ 413 uint16_t pci_rid; /* PCI BDF of IOMMU */ 414 /* Device range under this IOMMU. */ 415 uint16_t start_dev_rid; /* First device under this IOMMU. */ 416 uint16_t end_dev_rid; /* Last device under this IOMMU. */ 417 418 /* BIOS provided device configuration for end points. */ 419 struct ivhd_dev_cfg dev_cfg[10]; 420 int dev_cfg_cnt; 421 422 /* Software statistics. */ 423 uint64_t event_intr_cnt; /* Total event INTR count. */ 424 uint64_t total_cmd; /* Total number of commands. */ 425 }; 426 427 int amdvi_setup_hw(struct amdvi_softc *softc); 428 int amdvi_teardown_hw(struct amdvi_softc *softc); 429 #endif /* _AMDVI_PRIV_H_ */ 430