1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2016, Anish Gupta (anish@freebsd.org) 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/systm.h> 34 #include <sys/bus.h> 35 #include <sys/kernel.h> 36 #include <sys/module.h> 37 #include <sys/malloc.h> 38 #include <sys/pcpu.h> 39 #include <sys/rman.h> 40 #include <sys/sysctl.h> 41 42 #include <dev/pci/pcivar.h> 43 #include <dev/pci/pcireg.h> 44 45 #include <machine/resource.h> 46 #include <machine/vmm.h> 47 #include <machine/vmparam.h> 48 #include <machine/pci_cfgreg.h> 49 50 #include "ivhd_if.h" 51 #include "pcib_if.h" 52 53 #include "io/iommu.h" 54 #include "amdvi_priv.h" 55 56 SYSCTL_DECL(_hw_vmm); 57 SYSCTL_NODE(_hw_vmm, OID_AUTO, amdvi, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 58 NULL); 59 60 #define MOD_INC(a, s, m) (((a) + (s)) % ((m) * (s))) 61 #define MOD_DEC(a, s, m) (((a) - (s)) % ((m) * (s))) 62 63 /* Print RID or device ID in PCI string format. */ 64 #define RID2PCI_STR(d) PCI_RID2BUS(d), PCI_RID2SLOT(d), PCI_RID2FUNC(d) 65 66 static void amdvi_dump_cmds(struct amdvi_softc *softc, int count); 67 static void amdvi_print_dev_cap(struct amdvi_softc *softc); 68 69 MALLOC_DEFINE(M_AMDVI, "amdvi", "amdvi"); 70 71 extern device_t *ivhd_devs; 72 73 extern int ivhd_count; 74 SYSCTL_INT(_hw_vmm_amdvi, OID_AUTO, count, CTLFLAG_RDTUN, &ivhd_count, 75 0, NULL); 76 77 static int amdvi_enable_user = 0; 78 SYSCTL_INT(_hw_vmm_amdvi, OID_AUTO, enable, CTLFLAG_RDTUN, 79 &amdvi_enable_user, 0, NULL); 80 TUNABLE_INT("hw.vmm.amdvi_enable", &amdvi_enable_user); 81 82 #ifdef AMDVI_ATS_ENABLE 83 /* XXX: ATS is not tested. */ 84 static int amdvi_enable_iotlb = 1; 85 SYSCTL_INT(_hw_vmm_amdvi, OID_AUTO, iotlb_enabled, CTLFLAG_RDTUN, 86 &amdvi_enable_iotlb, 0, NULL); 87 TUNABLE_INT("hw.vmm.enable_iotlb", &amdvi_enable_iotlb); 88 #endif 89 90 static int amdvi_host_ptp = 1; /* Use page tables for host. */ 91 SYSCTL_INT(_hw_vmm_amdvi, OID_AUTO, host_ptp, CTLFLAG_RDTUN, 92 &amdvi_host_ptp, 0, NULL); 93 TUNABLE_INT("hw.vmm.amdvi.host_ptp", &amdvi_host_ptp); 94 95 /* Page table level used <= supported by h/w[v1=7]. */ 96 int amdvi_ptp_level = 4; 97 SYSCTL_INT(_hw_vmm_amdvi, OID_AUTO, ptp_level, CTLFLAG_RDTUN, 98 &amdvi_ptp_level, 0, NULL); 99 TUNABLE_INT("hw.vmm.amdvi.ptp_level", &amdvi_ptp_level); 100 101 /* Disable fault event reporting. */ 102 static int amdvi_disable_io_fault = 0; 103 SYSCTL_INT(_hw_vmm_amdvi, OID_AUTO, disable_io_fault, CTLFLAG_RDTUN, 104 &amdvi_disable_io_fault, 0, NULL); 105 TUNABLE_INT("hw.vmm.amdvi.disable_io_fault", &amdvi_disable_io_fault); 106 107 static uint32_t amdvi_dom_id = 0; /* 0 is reserved for host. */ 108 SYSCTL_UINT(_hw_vmm_amdvi, OID_AUTO, domain_id, CTLFLAG_RD, 109 &amdvi_dom_id, 0, NULL); 110 /* 111 * Device table entry. 112 * Bus(256) x Dev(32) x Fun(8) x DTE(256 bits or 32 bytes). 113 * = 256 * 2 * PAGE_SIZE. 114 */ 115 static struct amdvi_dte amdvi_dte[PCI_NUM_DEV_MAX] __aligned(PAGE_SIZE); 116 CTASSERT(PCI_NUM_DEV_MAX == 0x10000); 117 CTASSERT(sizeof(amdvi_dte) == 0x200000); 118 119 static SLIST_HEAD (, amdvi_domain) dom_head; 120 121 static inline uint32_t 122 amdvi_pci_read(struct amdvi_softc *softc, int off) 123 { 124 125 return (pci_cfgregread(PCI_RID2BUS(softc->pci_rid), 126 PCI_RID2SLOT(softc->pci_rid), PCI_RID2FUNC(softc->pci_rid), 127 off, 4)); 128 } 129 130 #ifdef AMDVI_ATS_ENABLE 131 /* XXX: Should be in pci.c */ 132 /* 133 * Check if device has ATS capability and its enabled. 134 * If ATS is absent or disabled, return (-1), otherwise ATS 135 * queue length. 136 */ 137 static int 138 amdvi_find_ats_qlen(uint16_t devid) 139 { 140 device_t dev; 141 uint32_t off, cap; 142 int qlen = -1; 143 144 dev = pci_find_bsf(PCI_RID2BUS(devid), PCI_RID2SLOT(devid), 145 PCI_RID2FUNC(devid)); 146 147 if (!dev) { 148 return (-1); 149 } 150 #define PCIM_ATS_EN BIT(31) 151 152 if (pci_find_extcap(dev, PCIZ_ATS, &off) == 0) { 153 cap = pci_read_config(dev, off + 4, 4); 154 qlen = (cap & 0x1F); 155 qlen = qlen ? qlen : 32; 156 printf("AMD-Vi: PCI device %d.%d.%d ATS %s qlen=%d\n", 157 RID2PCI_STR(devid), 158 (cap & PCIM_ATS_EN) ? "enabled" : "Disabled", 159 qlen); 160 qlen = (cap & PCIM_ATS_EN) ? qlen : -1; 161 } 162 163 return (qlen); 164 } 165 166 /* 167 * Check if an endpoint device support device IOTLB or ATS. 168 */ 169 static inline bool 170 amdvi_dev_support_iotlb(struct amdvi_softc *softc, uint16_t devid) 171 { 172 struct ivhd_dev_cfg *cfg; 173 int qlen, i; 174 bool pci_ats, ivhd_ats; 175 176 qlen = amdvi_find_ats_qlen(devid); 177 if (qlen < 0) 178 return (false); 179 180 KASSERT(softc, ("softc is NULL")); 181 cfg = softc->dev_cfg; 182 183 ivhd_ats = false; 184 for (i = 0; i < softc->dev_cfg_cnt; i++) { 185 if ((cfg->start_id <= devid) && (cfg->end_id >= devid)) { 186 ivhd_ats = cfg->enable_ats; 187 break; 188 } 189 cfg++; 190 } 191 192 pci_ats = (qlen < 0) ? false : true; 193 if (pci_ats != ivhd_ats) 194 device_printf(softc->dev, 195 "BIOS bug: mismatch in ATS setting for %d.%d.%d," 196 "ATS inv qlen = %d\n", RID2PCI_STR(devid), qlen); 197 198 /* Ignore IVRS setting and respect PCI setting. */ 199 return (pci_ats); 200 } 201 #endif 202 203 /* Enable IOTLB support for IOMMU if its supported. */ 204 static inline void 205 amdvi_hw_enable_iotlb(struct amdvi_softc *softc) 206 { 207 #ifndef AMDVI_ATS_ENABLE 208 softc->iotlb = false; 209 #else 210 bool supported; 211 212 supported = (softc->ivhd_flag & IVHD_FLAG_IOTLB) ? true : false; 213 214 if (softc->pci_cap & AMDVI_PCI_CAP_IOTLB) { 215 if (!supported) 216 device_printf(softc->dev, "IOTLB disabled by BIOS.\n"); 217 218 if (supported && !amdvi_enable_iotlb) { 219 device_printf(softc->dev, "IOTLB disabled by user.\n"); 220 supported = false; 221 } 222 } else 223 supported = false; 224 225 softc->iotlb = supported; 226 227 #endif 228 } 229 230 static int 231 amdvi_init_cmd(struct amdvi_softc *softc) 232 { 233 struct amdvi_ctrl *ctrl = softc->ctrl; 234 235 ctrl->cmd.len = 8; /* Use 256 command buffer entries. */ 236 softc->cmd_max = 1 << ctrl->cmd.len; 237 238 softc->cmd = malloc(sizeof(struct amdvi_cmd) * 239 softc->cmd_max, M_AMDVI, M_WAITOK | M_ZERO); 240 241 if ((uintptr_t)softc->cmd & PAGE_MASK) 242 panic("AMDVi: Command buffer not aligned on page boundary."); 243 244 ctrl->cmd.base = vtophys(softc->cmd) / PAGE_SIZE; 245 /* 246 * XXX: Reset the h/w pointers in case IOMMU is restarting, 247 * h/w doesn't clear these pointers based on empirical data. 248 */ 249 ctrl->cmd_tail = 0; 250 ctrl->cmd_head = 0; 251 252 return (0); 253 } 254 255 /* 256 * Note: Update tail pointer after we have written the command since tail 257 * pointer update cause h/w to execute new commands, see section 3.3 258 * of AMD IOMMU spec ver 2.0. 259 */ 260 /* Get the command tail pointer w/o updating it. */ 261 static struct amdvi_cmd * 262 amdvi_get_cmd_tail(struct amdvi_softc *softc) 263 { 264 struct amdvi_ctrl *ctrl; 265 struct amdvi_cmd *tail; 266 267 KASSERT(softc, ("softc is NULL")); 268 KASSERT(softc->cmd != NULL, ("cmd is NULL")); 269 270 ctrl = softc->ctrl; 271 KASSERT(ctrl != NULL, ("ctrl is NULL")); 272 273 tail = (struct amdvi_cmd *)((uint8_t *)softc->cmd + 274 ctrl->cmd_tail); 275 276 return (tail); 277 } 278 279 /* 280 * Update the command tail pointer which will start command execution. 281 */ 282 static void 283 amdvi_update_cmd_tail(struct amdvi_softc *softc) 284 { 285 struct amdvi_ctrl *ctrl; 286 int size; 287 288 size = sizeof(struct amdvi_cmd); 289 KASSERT(softc->cmd != NULL, ("cmd is NULL")); 290 291 ctrl = softc->ctrl; 292 KASSERT(ctrl != NULL, ("ctrl is NULL")); 293 294 ctrl->cmd_tail = MOD_INC(ctrl->cmd_tail, size, softc->cmd_max); 295 softc->total_cmd++; 296 297 #ifdef AMDVI_DEBUG_CMD 298 device_printf(softc->dev, "cmd_tail: %s Tail:0x%x, Head:0x%x.\n", 299 ctrl->cmd_tail, 300 ctrl->cmd_head); 301 #endif 302 303 } 304 305 /* 306 * Various commands supported by IOMMU. 307 */ 308 309 /* Completion wait command. */ 310 static void 311 amdvi_cmd_cmp(struct amdvi_softc *softc, const uint64_t data) 312 { 313 struct amdvi_cmd *cmd; 314 uint64_t pa; 315 316 cmd = amdvi_get_cmd_tail(softc); 317 KASSERT(cmd != NULL, ("Cmd is NULL")); 318 319 pa = vtophys(&softc->cmp_data); 320 cmd->opcode = AMDVI_CMP_WAIT_OPCODE; 321 cmd->word0 = (pa & 0xFFFFFFF8) | AMDVI_CMP_WAIT_STORE; 322 cmd->word1 = (pa >> 32) & 0xFFFFF; 323 cmd->addr = data; 324 325 amdvi_update_cmd_tail(softc); 326 } 327 328 /* Invalidate device table entry. */ 329 static void 330 amdvi_cmd_inv_dte(struct amdvi_softc *softc, uint16_t devid) 331 { 332 struct amdvi_cmd *cmd; 333 334 cmd = amdvi_get_cmd_tail(softc); 335 KASSERT(cmd != NULL, ("Cmd is NULL")); 336 cmd->opcode = AMDVI_INVD_DTE_OPCODE; 337 cmd->word0 = devid; 338 amdvi_update_cmd_tail(softc); 339 #ifdef AMDVI_DEBUG_CMD 340 device_printf(softc->dev, "Invalidated DTE:0x%x\n", devid); 341 #endif 342 } 343 344 /* Invalidate IOMMU page, use for invalidation of domain. */ 345 static void 346 amdvi_cmd_inv_iommu_pages(struct amdvi_softc *softc, uint16_t domain_id, 347 uint64_t addr, bool guest_nested, 348 bool pde, bool page) 349 { 350 struct amdvi_cmd *cmd; 351 352 cmd = amdvi_get_cmd_tail(softc); 353 KASSERT(cmd != NULL, ("Cmd is NULL")); 354 355 cmd->opcode = AMDVI_INVD_PAGE_OPCODE; 356 cmd->word1 = domain_id; 357 /* 358 * Invalidate all addresses for this domain. 359 */ 360 cmd->addr = addr; 361 cmd->addr |= pde ? AMDVI_INVD_PAGE_PDE : 0; 362 cmd->addr |= page ? AMDVI_INVD_PAGE_S : 0; 363 364 amdvi_update_cmd_tail(softc); 365 } 366 367 #ifdef AMDVI_ATS_ENABLE 368 /* Invalidate device IOTLB. */ 369 static void 370 amdvi_cmd_inv_iotlb(struct amdvi_softc *softc, uint16_t devid) 371 { 372 struct amdvi_cmd *cmd; 373 int qlen; 374 375 if (!softc->iotlb) 376 return; 377 378 qlen = amdvi_find_ats_qlen(devid); 379 if (qlen < 0) { 380 panic("AMDVI: Invalid ATS qlen(%d) for device %d.%d.%d\n", 381 qlen, RID2PCI_STR(devid)); 382 } 383 cmd = amdvi_get_cmd_tail(softc); 384 KASSERT(cmd != NULL, ("Cmd is NULL")); 385 386 #ifdef AMDVI_DEBUG_CMD 387 device_printf(softc->dev, "Invalidate IOTLB devID 0x%x" 388 " Qlen:%d\n", devid, qlen); 389 #endif 390 cmd->opcode = AMDVI_INVD_IOTLB_OPCODE; 391 cmd->word0 = devid; 392 cmd->word1 = qlen; 393 cmd->addr = AMDVI_INVD_IOTLB_ALL_ADDR | 394 AMDVI_INVD_IOTLB_S; 395 amdvi_update_cmd_tail(softc); 396 } 397 #endif 398 399 #ifdef notyet /* For Interrupt Remap. */ 400 static void 401 amdvi_cmd_inv_intr_map(struct amdvi_softc *softc, 402 uint16_t devid) 403 { 404 struct amdvi_cmd *cmd; 405 406 cmd = amdvi_get_cmd_tail(softc); 407 KASSERT(cmd != NULL, ("Cmd is NULL")); 408 cmd->opcode = AMDVI_INVD_INTR_OPCODE; 409 cmd->word0 = devid; 410 amdvi_update_cmd_tail(softc); 411 #ifdef AMDVI_DEBUG_CMD 412 device_printf(softc->dev, "Invalidate INTR map of devID 0x%x\n", devid); 413 #endif 414 } 415 #endif 416 417 /* Invalidate domain using INVALIDATE_IOMMU_PAGES command. */ 418 static void 419 amdvi_inv_domain(struct amdvi_softc *softc, uint16_t domain_id) 420 { 421 struct amdvi_cmd *cmd; 422 423 cmd = amdvi_get_cmd_tail(softc); 424 KASSERT(cmd != NULL, ("Cmd is NULL")); 425 426 /* 427 * See section 3.3.3 of IOMMU spec rev 2.0, software note 428 * for invalidating domain. 429 */ 430 amdvi_cmd_inv_iommu_pages(softc, domain_id, AMDVI_INVD_PAGE_ALL_ADDR, 431 false, true, true); 432 433 #ifdef AMDVI_DEBUG_CMD 434 device_printf(softc->dev, "Invalidate domain:0x%x\n", domain_id); 435 436 #endif 437 } 438 439 static bool 440 amdvi_cmp_wait(struct amdvi_softc *softc) 441 { 442 struct amdvi_ctrl *ctrl; 443 const uint64_t VERIFY = 0xA5A5; 444 volatile uint64_t *read; 445 int i; 446 bool status; 447 448 ctrl = softc->ctrl; 449 read = &softc->cmp_data; 450 *read = 0; 451 amdvi_cmd_cmp(softc, VERIFY); 452 /* Wait for h/w to update completion data. */ 453 for (i = 0; i < 100 && (*read != VERIFY); i++) { 454 DELAY(1000); /* 1 ms */ 455 } 456 status = (VERIFY == softc->cmp_data) ? true : false; 457 458 #ifdef AMDVI_DEBUG_CMD 459 if (status) 460 device_printf(softc->dev, "CMD completion DONE Tail:0x%x, " 461 "Head:0x%x, loop:%d.\n", ctrl->cmd_tail, 462 ctrl->cmd_head, loop); 463 #endif 464 return (status); 465 } 466 467 static void 468 amdvi_wait(struct amdvi_softc *softc) 469 { 470 struct amdvi_ctrl *ctrl; 471 int i; 472 473 KASSERT(softc, ("softc is NULL")); 474 475 ctrl = softc->ctrl; 476 KASSERT(ctrl != NULL, ("ctrl is NULL")); 477 /* Don't wait if h/w is not enabled. */ 478 if ((ctrl->control & AMDVI_CTRL_EN) == 0) 479 return; 480 481 for (i = 0; i < 10; i++) { 482 if (amdvi_cmp_wait(softc)) 483 return; 484 } 485 486 device_printf(softc->dev, "Error: completion failed" 487 " tail:0x%x, head:0x%x.\n", 488 ctrl->cmd_tail, ctrl->cmd_head); 489 /* Dump the last command. */ 490 amdvi_dump_cmds(softc, 1); 491 } 492 493 static void 494 amdvi_dump_cmds(struct amdvi_softc *softc, int count) 495 { 496 struct amdvi_ctrl *ctrl; 497 struct amdvi_cmd *cmd; 498 int off, i; 499 500 ctrl = softc->ctrl; 501 device_printf(softc->dev, "Dump last %d command(s):\n", count); 502 /* 503 * If h/w is stuck in completion, it is the previous command, 504 * start dumping from previous command onward. 505 */ 506 off = MOD_DEC(ctrl->cmd_head, sizeof(struct amdvi_cmd), 507 softc->cmd_max); 508 for (i = 0; off != ctrl->cmd_tail && i < count; i++) { 509 cmd = (struct amdvi_cmd *)((uint8_t *)softc->cmd + off); 510 printf(" [CMD%d, off:0x%x] opcode= 0x%x 0x%x" 511 " 0x%x 0x%lx\n", i, off, cmd->opcode, 512 cmd->word0, cmd->word1, cmd->addr); 513 off = MOD_INC(off, sizeof(struct amdvi_cmd), softc->cmd_max); 514 } 515 } 516 517 static int 518 amdvi_init_event(struct amdvi_softc *softc) 519 { 520 struct amdvi_ctrl *ctrl; 521 522 ctrl = softc->ctrl; 523 ctrl->event.len = 8; 524 softc->event_max = 1 << ctrl->event.len; 525 softc->event = malloc(sizeof(struct amdvi_event) * 526 softc->event_max, M_AMDVI, M_WAITOK | M_ZERO); 527 if ((uintptr_t)softc->event & PAGE_MASK) { 528 device_printf(softc->dev, "Event buffer not aligned on page."); 529 return (false); 530 } 531 ctrl->event.base = vtophys(softc->event) / PAGE_SIZE; 532 533 /* Reset the pointers. */ 534 ctrl->evt_head = 0; 535 ctrl->evt_tail = 0; 536 537 return (0); 538 } 539 540 static inline void 541 amdvi_decode_evt_flag(uint16_t flag) 542 { 543 544 flag &= AMDVI_EVENT_FLAG_MASK; 545 printf(" 0x%b]\n", flag, 546 "\020" 547 "\001GN" 548 "\002NX" 549 "\003US" 550 "\004I" 551 "\005PR" 552 "\006RW" 553 "\007PE" 554 "\010RZ" 555 "\011TR" 556 ); 557 } 558 559 /* See section 2.5.4 of AMD IOMMU spec ver 2.62.*/ 560 static inline void 561 amdvi_decode_evt_flag_type(uint8_t type) 562 { 563 564 switch (AMDVI_EVENT_FLAG_TYPE(type)) { 565 case 0: 566 printf("RSVD\n"); 567 break; 568 case 1: 569 printf("Master Abort\n"); 570 break; 571 case 2: 572 printf("Target Abort\n"); 573 break; 574 case 3: 575 printf("Data Err\n"); 576 break; 577 default: 578 break; 579 } 580 } 581 582 static void 583 amdvi_decode_inv_dte_evt(uint16_t devid, uint16_t domid, uint64_t addr, 584 uint16_t flag) 585 { 586 587 printf("\t[IO_PAGE_FAULT EVT: devId:0x%x DomId:0x%x" 588 " Addr:0x%lx", 589 devid, domid, addr); 590 amdvi_decode_evt_flag(flag); 591 } 592 593 static void 594 amdvi_decode_pf_evt(uint16_t devid, uint16_t domid, uint64_t addr, 595 uint16_t flag) 596 { 597 598 printf("\t[IO_PAGE_FAULT EVT: devId:0x%x DomId:0x%x" 599 " Addr:0x%lx", 600 devid, domid, addr); 601 amdvi_decode_evt_flag(flag); 602 } 603 604 static void 605 amdvi_decode_dte_hwerr_evt(uint16_t devid, uint16_t domid, 606 uint64_t addr, uint16_t flag) 607 { 608 609 printf("\t[DEV_TAB_HW_ERR EVT: devId:0x%x DomId:0x%x" 610 " Addr:0x%lx", devid, domid, addr); 611 amdvi_decode_evt_flag(flag); 612 amdvi_decode_evt_flag_type(flag); 613 } 614 615 static void 616 amdvi_decode_page_hwerr_evt(uint16_t devid, uint16_t domid, uint64_t addr, 617 uint16_t flag) 618 { 619 620 printf("\t[PAGE_TAB_HW_ERR EVT: devId:0x%x DomId:0x%x" 621 " Addr:0x%lx", devid, domid, addr); 622 amdvi_decode_evt_flag(flag); 623 amdvi_decode_evt_flag_type(AMDVI_EVENT_FLAG_TYPE(flag)); 624 } 625 626 static void 627 amdvi_decode_evt(struct amdvi_event *evt) 628 { 629 struct amdvi_cmd *cmd; 630 631 switch (evt->opcode) { 632 case AMDVI_EVENT_INVALID_DTE: 633 amdvi_decode_inv_dte_evt(evt->devid, evt->pasid_domid, 634 evt->addr, evt->flag); 635 break; 636 637 case AMDVI_EVENT_PFAULT: 638 amdvi_decode_pf_evt(evt->devid, evt->pasid_domid, 639 evt->addr, evt->flag); 640 break; 641 642 case AMDVI_EVENT_DTE_HW_ERROR: 643 amdvi_decode_dte_hwerr_evt(evt->devid, evt->pasid_domid, 644 evt->addr, evt->flag); 645 break; 646 647 case AMDVI_EVENT_PAGE_HW_ERROR: 648 amdvi_decode_page_hwerr_evt(evt->devid, evt->pasid_domid, 649 evt->addr, evt->flag); 650 break; 651 652 case AMDVI_EVENT_ILLEGAL_CMD: 653 /* FALL THROUGH */ 654 case AMDVI_EVENT_CMD_HW_ERROR: 655 printf("\t[%s EVT]\n", (evt->opcode == AMDVI_EVENT_ILLEGAL_CMD) ? 656 "ILLEGAL CMD" : "CMD HW ERR"); 657 cmd = (struct amdvi_cmd *)PHYS_TO_DMAP(evt->addr); 658 printf("\tCMD opcode= 0x%x 0x%x 0x%x 0x%lx\n", 659 cmd->opcode, cmd->word0, cmd->word1, cmd->addr); 660 break; 661 662 case AMDVI_EVENT_IOTLB_TIMEOUT: 663 printf("\t[IOTLB_INV_TIMEOUT devid:0x%x addr:0x%lx]\n", 664 evt->devid, evt->addr); 665 break; 666 667 case AMDVI_EVENT_INVALID_DTE_REQ: 668 printf("\t[INV_DTE devid:0x%x addr:0x%lx type:0x%x tr:%d]\n", 669 evt->devid, evt->addr, evt->flag >> 9, 670 (evt->flag >> 8) & 1); 671 break; 672 673 case AMDVI_EVENT_INVALID_PPR_REQ: 674 case AMDVI_EVENT_COUNTER_ZERO: 675 printf("AMD-Vi: v2 events.\n"); 676 break; 677 678 default: 679 printf("Unsupported AMD-Vi event:%d\n", evt->opcode); 680 } 681 } 682 683 static void 684 amdvi_print_events(struct amdvi_softc *softc) 685 { 686 struct amdvi_ctrl *ctrl; 687 struct amdvi_event *event; 688 int i, size; 689 690 ctrl = softc->ctrl; 691 size = sizeof(struct amdvi_event); 692 for (i = 0; i < softc->event_max; i++) { 693 event = &softc->event[ctrl->evt_head / size]; 694 if (!event->opcode) 695 break; 696 device_printf(softc->dev, "\t[Event%d: Head:0x%x Tail:0x%x]\n", 697 i, ctrl->evt_head, ctrl->evt_tail); 698 amdvi_decode_evt(event); 699 ctrl->evt_head = MOD_INC(ctrl->evt_head, size, 700 softc->event_max); 701 } 702 } 703 704 static int 705 amdvi_init_dte(struct amdvi_softc *softc) 706 { 707 struct amdvi_ctrl *ctrl; 708 709 ctrl = softc->ctrl; 710 ctrl->dte.base = vtophys(amdvi_dte) / PAGE_SIZE; 711 ctrl->dte.size = 0x1FF; /* 2MB device table. */ 712 713 return (0); 714 } 715 716 /* 717 * Not all capabilities of IOMMU are available in ACPI IVHD flag 718 * or EFR entry, read directly from device. 719 */ 720 static int 721 amdvi_print_pci_cap(device_t dev) 722 { 723 struct amdvi_softc *softc; 724 uint32_t off, cap; 725 726 softc = device_get_softc(dev); 727 off = softc->cap_off; 728 729 /* 730 * Section 3.7.1 of IOMMU sepc rev 2.0. 731 * Read capability from device. 732 */ 733 cap = amdvi_pci_read(softc, off); 734 735 /* Make sure capability type[18:16] is 3. */ 736 KASSERT((((cap >> 16) & 0x7) == 0x3), 737 ("Not a IOMMU capability 0x%x@0x%x", cap, off)); 738 739 softc->pci_cap = cap >> 24; 740 device_printf(softc->dev, "PCI cap 0x%x@0x%x feature:%b\n", 741 cap, off, softc->pci_cap, 742 "\20\1IOTLB\2HT\3NPCache\4EFR\5CapExt"); 743 744 return (0); 745 } 746 747 static void 748 amdvi_event_intr(void *arg) 749 { 750 struct amdvi_softc *softc; 751 struct amdvi_ctrl *ctrl; 752 753 softc = (struct amdvi_softc *)arg; 754 ctrl = softc->ctrl; 755 device_printf(softc->dev, "EVT INTR %ld Status:0x%x" 756 " EVT Head:0x%x Tail:0x%x]\n", softc->event_intr_cnt++, 757 ctrl->status, ctrl->evt_head, ctrl->evt_tail); 758 printf(" [CMD Total 0x%lx] Tail:0x%x, Head:0x%x.\n", 759 softc->total_cmd, ctrl->cmd_tail, ctrl->cmd_head); 760 761 amdvi_print_events(softc); 762 ctrl->status &= AMDVI_STATUS_EV_OF | AMDVI_STATUS_EV_INTR; 763 } 764 765 static void 766 amdvi_free_evt_intr_res(device_t dev) 767 { 768 769 struct amdvi_softc *softc; 770 device_t mmio_dev; 771 772 softc = device_get_softc(dev); 773 mmio_dev = softc->pci_dev; 774 775 IVHD_TEARDOWN_INTR(mmio_dev); 776 } 777 778 static bool 779 amdvi_alloc_intr_resources(struct amdvi_softc *softc) 780 { 781 struct amdvi_ctrl *ctrl; 782 device_t dev, mmio_dev; 783 int err; 784 785 dev = softc->dev; 786 mmio_dev = softc->pci_dev; 787 788 /* Clear interrupt status bits. */ 789 ctrl = softc->ctrl; 790 ctrl->status &= AMDVI_STATUS_EV_OF | AMDVI_STATUS_EV_INTR; 791 792 err = IVHD_SETUP_INTR(mmio_dev, amdvi_event_intr, softc, "fault"); 793 if (err) 794 device_printf(dev, "Interrupt setup failed on %s\n", 795 device_get_nameunit(mmio_dev)); 796 return (err); 797 } 798 799 static void 800 amdvi_print_dev_cap(struct amdvi_softc *softc) 801 { 802 struct ivhd_dev_cfg *cfg; 803 int i; 804 805 cfg = softc->dev_cfg; 806 for (i = 0; i < softc->dev_cfg_cnt; i++) { 807 device_printf(softc->dev, "device [0x%x - 0x%x] " 808 "config:%b%s\n", cfg->start_id, cfg->end_id, 809 cfg->data, 810 "\020\001INIT\002ExtInt\003NMI" 811 "\007LINT0\010LINT1", 812 cfg->enable_ats ? "ATS enabled" : ""); 813 cfg++; 814 } 815 } 816 817 static int 818 amdvi_handle_sysctl(SYSCTL_HANDLER_ARGS) 819 { 820 struct amdvi_softc *softc; 821 int result, type, error = 0; 822 823 softc = (struct amdvi_softc *)arg1; 824 type = arg2; 825 826 switch (type) { 827 case 0: 828 result = softc->ctrl->cmd_head; 829 error = sysctl_handle_int(oidp, &result, 0, 830 req); 831 break; 832 case 1: 833 result = softc->ctrl->cmd_tail; 834 error = sysctl_handle_int(oidp, &result, 0, 835 req); 836 break; 837 case 2: 838 result = softc->ctrl->evt_head; 839 error = sysctl_handle_int(oidp, &result, 0, 840 req); 841 break; 842 case 3: 843 result = softc->ctrl->evt_tail; 844 error = sysctl_handle_int(oidp, &result, 0, 845 req); 846 break; 847 848 default: 849 device_printf(softc->dev, "Unknown sysctl:%d\n", type); 850 } 851 852 return (error); 853 } 854 855 static void 856 amdvi_add_sysctl(struct amdvi_softc *softc) 857 { 858 struct sysctl_oid_list *child; 859 struct sysctl_ctx_list *ctx; 860 device_t dev; 861 862 dev = softc->dev; 863 ctx = device_get_sysctl_ctx(dev); 864 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 865 866 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "event_intr_count", CTLFLAG_RD, 867 &softc->event_intr_cnt, "Event interrupt count"); 868 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "command_count", CTLFLAG_RD, 869 &softc->total_cmd, "Command submitted count"); 870 SYSCTL_ADD_U16(ctx, child, OID_AUTO, "pci_rid", CTLFLAG_RD, 871 &softc->pci_rid, 0, "IOMMU RID"); 872 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "command_head", 873 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, softc, 0, 874 amdvi_handle_sysctl, "IU", "Command head"); 875 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "command_tail", 876 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, softc, 1, 877 amdvi_handle_sysctl, "IU", "Command tail"); 878 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "event_head", 879 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, softc, 2, 880 amdvi_handle_sysctl, "IU", "Command head"); 881 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "event_tail", 882 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, softc, 3, 883 amdvi_handle_sysctl, "IU", "Command tail"); 884 } 885 886 int 887 amdvi_setup_hw(struct amdvi_softc *softc) 888 { 889 device_t dev; 890 int status; 891 892 dev = softc->dev; 893 894 amdvi_hw_enable_iotlb(softc); 895 896 amdvi_print_dev_cap(softc); 897 898 if ((status = amdvi_print_pci_cap(dev)) != 0) { 899 device_printf(dev, "PCI capability.\n"); 900 return (status); 901 } 902 if ((status = amdvi_init_cmd(softc)) != 0) { 903 device_printf(dev, "Couldn't configure command buffer.\n"); 904 return (status); 905 } 906 if ((status = amdvi_init_event(softc)) != 0) { 907 device_printf(dev, "Couldn't configure event buffer.\n"); 908 return (status); 909 } 910 if ((status = amdvi_init_dte(softc)) != 0) { 911 device_printf(dev, "Couldn't configure device table.\n"); 912 return (status); 913 } 914 if ((status = amdvi_alloc_intr_resources(softc)) != 0) { 915 return (status); 916 } 917 amdvi_add_sysctl(softc); 918 return (0); 919 } 920 921 int 922 amdvi_teardown_hw(struct amdvi_softc *softc) 923 { 924 device_t dev; 925 926 dev = softc->dev; 927 928 /* 929 * Called after disable, h/w is stopped by now, free all the resources. 930 */ 931 amdvi_free_evt_intr_res(dev); 932 933 if (softc->cmd) 934 free(softc->cmd, M_AMDVI); 935 936 if (softc->event) 937 free(softc->event, M_AMDVI); 938 939 return (0); 940 } 941 942 /*********** bhyve interfaces *********************/ 943 static int 944 amdvi_init(void) 945 { 946 if (!ivhd_count) { 947 return (EIO); 948 } 949 if (!amdvi_enable_user && ivhd_count) { 950 printf("bhyve: Found %d AMD-Vi/IOMMU device(s), " 951 "use hw.vmm.amdvi.enable=1 to enable pass-through.\n", 952 ivhd_count); 953 return (EINVAL); 954 } 955 return (0); 956 } 957 958 static void 959 amdvi_cleanup(void) 960 { 961 /* Nothing. */ 962 } 963 964 static uint16_t 965 amdvi_domainId(void) 966 { 967 968 /* 969 * If we hit maximum domain limit, rollover leaving host 970 * domain(0). 971 * XXX: make sure that this domain is not used. 972 */ 973 if (amdvi_dom_id == AMDVI_MAX_DOMAIN) 974 amdvi_dom_id = 1; 975 976 return ((uint16_t)amdvi_dom_id++); 977 } 978 979 static void 980 amdvi_do_inv_domain(uint16_t domain_id, bool create) 981 { 982 struct amdvi_softc *softc; 983 int i; 984 985 for (i = 0; i < ivhd_count; i++) { 986 softc = device_get_softc(ivhd_devs[i]); 987 KASSERT(softc, ("softc is NULL")); 988 /* 989 * If not present pages are cached, invalidate page after 990 * creating domain. 991 */ 992 #if 0 993 if (create && ((softc->pci_cap & AMDVI_PCI_CAP_NPCACHE) == 0)) 994 continue; 995 #endif 996 amdvi_inv_domain(softc, domain_id); 997 amdvi_wait(softc); 998 } 999 } 1000 1001 static void * 1002 amdvi_create_domain(vm_paddr_t maxaddr) 1003 { 1004 struct amdvi_domain *dom; 1005 1006 dom = malloc(sizeof(struct amdvi_domain), M_AMDVI, M_ZERO | M_WAITOK); 1007 dom->id = amdvi_domainId(); 1008 //dom->maxaddr = maxaddr; 1009 #ifdef AMDVI_DEBUG_CMD 1010 printf("Created domain #%d\n", dom->id); 1011 #endif 1012 /* 1013 * Host domain(#0) don't create translation table. 1014 */ 1015 if (dom->id || amdvi_host_ptp) 1016 dom->ptp = malloc(PAGE_SIZE, M_AMDVI, M_WAITOK | M_ZERO); 1017 1018 dom->ptp_level = amdvi_ptp_level; 1019 1020 amdvi_do_inv_domain(dom->id, true); 1021 SLIST_INSERT_HEAD(&dom_head, dom, next); 1022 1023 return (dom); 1024 } 1025 1026 static void 1027 amdvi_free_ptp(uint64_t *ptp, int level) 1028 { 1029 int i; 1030 1031 if (level < 1) 1032 return; 1033 1034 for (i = 0; i < NPTEPG ; i++) { 1035 if ((ptp[i] & AMDVI_PT_PRESENT) == 0) 1036 continue; 1037 /* XXX: Add super-page or PTE mapping > 4KB. */ 1038 #ifdef notyet 1039 /* Super-page mapping. */ 1040 if (AMDVI_PD_SUPER(ptp[i])) 1041 continue; 1042 #endif 1043 1044 amdvi_free_ptp((uint64_t *)PHYS_TO_DMAP(ptp[i] 1045 & AMDVI_PT_MASK), level - 1); 1046 } 1047 1048 free(ptp, M_AMDVI); 1049 } 1050 1051 static void 1052 amdvi_destroy_domain(void *arg) 1053 { 1054 struct amdvi_domain *domain; 1055 1056 domain = (struct amdvi_domain *)arg; 1057 KASSERT(domain, ("domain is NULL")); 1058 #ifdef AMDVI_DEBUG_CMD 1059 printf("Destroying domain %d\n", domain->id); 1060 #endif 1061 if (domain->ptp) 1062 amdvi_free_ptp(domain->ptp, domain->ptp_level); 1063 1064 amdvi_do_inv_domain(domain->id, false); 1065 SLIST_REMOVE(&dom_head, domain, amdvi_domain, next); 1066 free(domain, M_AMDVI); 1067 } 1068 1069 static uint64_t 1070 amdvi_set_pt(uint64_t *pt, int level, vm_paddr_t gpa, 1071 vm_paddr_t hpa, uint64_t pg_size, bool create) 1072 { 1073 uint64_t *page, pa; 1074 int shift, index; 1075 const int PT_SHIFT = 9; 1076 const int PT_INDEX_MASK = (1 << PT_SHIFT) - 1; /* Based on PT_SHIFT */ 1077 1078 if (!pg_size) 1079 return (0); 1080 1081 if (hpa & (pg_size - 1)) { 1082 printf("HPA is not size aligned.\n"); 1083 return (0); 1084 } 1085 if (gpa & (pg_size - 1)) { 1086 printf("HPA is not size aligned.\n"); 1087 return (0); 1088 } 1089 shift = PML4SHIFT; 1090 while ((shift > PAGE_SHIFT) && (pg_size < (1UL << shift))) { 1091 index = (gpa >> shift) & PT_INDEX_MASK; 1092 1093 if ((pt[index] == 0) && create) { 1094 page = malloc(PAGE_SIZE, M_AMDVI, M_WAITOK | M_ZERO); 1095 pa = vtophys(page); 1096 pt[index] = pa | AMDVI_PT_PRESENT | AMDVI_PT_RW | 1097 ((level - 1) << AMDVI_PD_LEVEL_SHIFT); 1098 } 1099 #ifdef AMDVI_DEBUG_PTE 1100 if ((gpa % 0x1000000) == 0) 1101 printf("[level%d, shift = %d]PTE:0x%lx\n", 1102 level, shift, pt[index]); 1103 #endif 1104 #define PTE2PA(x) ((uint64_t)(x) & AMDVI_PT_MASK) 1105 pa = PTE2PA(pt[index]); 1106 pt = (uint64_t *)PHYS_TO_DMAP(pa); 1107 shift -= PT_SHIFT; 1108 level--; 1109 } 1110 1111 /* Leaf entry. */ 1112 index = (gpa >> shift) & PT_INDEX_MASK; 1113 1114 if (create) { 1115 pt[index] = hpa | AMDVI_PT_RW | AMDVI_PT_PRESENT; 1116 } else 1117 pt[index] = 0; 1118 1119 #ifdef AMDVI_DEBUG_PTE 1120 if ((gpa % 0x1000000) == 0) 1121 printf("[Last level%d, shift = %d]PTE:0x%lx\n", 1122 level, shift, pt[index]); 1123 #endif 1124 return (1ULL << shift); 1125 } 1126 1127 static uint64_t 1128 amdvi_update_mapping(struct amdvi_domain *domain, vm_paddr_t gpa, 1129 vm_paddr_t hpa, uint64_t size, bool create) 1130 { 1131 uint64_t mapped, *ptp, len; 1132 int level; 1133 1134 KASSERT(domain, ("domain is NULL")); 1135 level = domain->ptp_level; 1136 KASSERT(level, ("Page table level is 0")); 1137 1138 ptp = domain->ptp; 1139 KASSERT(ptp, ("PTP is NULL")); 1140 mapped = 0; 1141 while (mapped < size) { 1142 len = amdvi_set_pt(ptp, level, gpa + mapped, hpa + mapped, 1143 PAGE_SIZE, create); 1144 if (!len) { 1145 printf("Error: Couldn't map HPA:0x%lx GPA:0x%lx\n", 1146 hpa, gpa); 1147 return (0); 1148 } 1149 mapped += len; 1150 } 1151 1152 return (mapped); 1153 } 1154 1155 static uint64_t 1156 amdvi_create_mapping(void *arg, vm_paddr_t gpa, vm_paddr_t hpa, 1157 uint64_t len) 1158 { 1159 struct amdvi_domain *domain; 1160 1161 domain = (struct amdvi_domain *)arg; 1162 1163 if (domain->id && !domain->ptp) { 1164 printf("ptp is NULL"); 1165 return (-1); 1166 } 1167 1168 /* 1169 * If host domain is created w/o page table, skip IOMMU page 1170 * table set-up. 1171 */ 1172 if (domain->ptp) 1173 return (amdvi_update_mapping(domain, gpa, hpa, len, true)); 1174 else 1175 return (len); 1176 } 1177 1178 static uint64_t 1179 amdvi_remove_mapping(void *arg, vm_paddr_t gpa, uint64_t len) 1180 { 1181 struct amdvi_domain *domain; 1182 1183 domain = (struct amdvi_domain *)arg; 1184 /* 1185 * If host domain is created w/o page table, skip IOMMU page 1186 * table set-up. 1187 */ 1188 if (domain->ptp) 1189 return (amdvi_update_mapping(domain, gpa, 0, len, false)); 1190 return 1191 (len); 1192 } 1193 1194 static struct amdvi_softc * 1195 amdvi_find_iommu(uint16_t devid) 1196 { 1197 struct amdvi_softc *softc; 1198 int i, j; 1199 1200 for (i = 0; i < ivhd_count; i++) { 1201 softc = device_get_softc(ivhd_devs[i]); 1202 for (j = 0; j < softc->dev_cfg_cnt; j++) 1203 if ((devid >= softc->dev_cfg[j].start_id) && 1204 (devid <= softc->dev_cfg[j].end_id)) 1205 return (softc); 1206 } 1207 1208 return (NULL); 1209 } 1210 1211 /* 1212 * Set-up device table entry. 1213 * IOMMU spec Rev 2.0, section 3.2.2.2, some of the fields must 1214 * be set concurrently, e.g. read and write bits. 1215 */ 1216 static void 1217 amdvi_set_dte(struct amdvi_domain *domain, struct amdvi_softc *softc, 1218 uint16_t devid, bool enable) 1219 { 1220 struct amdvi_dte* temp; 1221 1222 KASSERT(domain, ("domain is NULL for pci_rid:0x%x\n", devid)); 1223 KASSERT(softc, ("softc is NULL for pci_rid:0x%x\n", devid)); 1224 1225 temp = &amdvi_dte[devid]; 1226 1227 #ifdef AMDVI_ATS_ENABLE 1228 /* If IOMMU and device support IOTLB, enable it. */ 1229 if (amdvi_dev_support_iotlb(softc, devid) && softc->iotlb) 1230 temp->iotlb_enable = 1; 1231 #endif 1232 1233 /* Avoid duplicate I/O faults. */ 1234 temp->sup_second_io_fault = 1; 1235 temp->sup_all_io_fault = amdvi_disable_io_fault; 1236 1237 temp->dt_valid = 1; 1238 temp->domain_id = domain->id; 1239 1240 if (enable) { 1241 if (domain->ptp) { 1242 temp->pt_base = vtophys(domain->ptp) >> 12; 1243 temp->pt_level = amdvi_ptp_level; 1244 } 1245 /* 1246 * XXX: Page table valid[TV] bit must be set even if host domain 1247 * page tables are not enabled. 1248 */ 1249 temp->pt_valid = 1; 1250 temp->read_allow = 1; 1251 temp->write_allow = 1; 1252 } 1253 } 1254 1255 static void 1256 amdvi_inv_device(struct amdvi_softc *softc, uint16_t devid) 1257 { 1258 KASSERT(softc, ("softc is NULL")); 1259 1260 amdvi_cmd_inv_dte(softc, devid); 1261 #ifdef AMDVI_ATS_ENABLE 1262 if (amdvi_dev_support_iotlb(softc, devid)) 1263 amdvi_cmd_inv_iotlb(softc, devid); 1264 #endif 1265 amdvi_wait(softc); 1266 } 1267 1268 static void 1269 amdvi_add_device(void *arg, uint16_t devid) 1270 { 1271 struct amdvi_domain *domain; 1272 struct amdvi_softc *softc; 1273 1274 domain = (struct amdvi_domain *)arg; 1275 KASSERT(domain != NULL, ("domain is NULL")); 1276 #ifdef AMDVI_DEBUG_CMD 1277 printf("Assigning device(%d.%d.%d) to domain:%d\n", 1278 RID2PCI_STR(devid), domain->id); 1279 #endif 1280 softc = amdvi_find_iommu(devid); 1281 if (softc == NULL) 1282 return; 1283 amdvi_set_dte(domain, softc, devid, true); 1284 amdvi_inv_device(softc, devid); 1285 } 1286 1287 static void 1288 amdvi_remove_device(void *arg, uint16_t devid) 1289 { 1290 struct amdvi_domain *domain; 1291 struct amdvi_softc *softc; 1292 1293 domain = (struct amdvi_domain *)arg; 1294 #ifdef AMDVI_DEBUG_CMD 1295 printf("Remove device(0x%x) from domain:%d\n", 1296 devid, domain->id); 1297 #endif 1298 softc = amdvi_find_iommu(devid); 1299 if (softc == NULL) 1300 return; 1301 amdvi_set_dte(domain, softc, devid, false); 1302 amdvi_inv_device(softc, devid); 1303 } 1304 1305 static void 1306 amdvi_enable(void) 1307 { 1308 struct amdvi_ctrl *ctrl; 1309 struct amdvi_softc *softc; 1310 uint64_t val; 1311 int i; 1312 1313 for (i = 0; i < ivhd_count; i++) { 1314 softc = device_get_softc(ivhd_devs[i]); 1315 KASSERT(softc, ("softc is NULL\n")); 1316 ctrl = softc->ctrl; 1317 KASSERT(ctrl, ("ctrl is NULL\n")); 1318 1319 val = ( AMDVI_CTRL_EN | 1320 AMDVI_CTRL_CMD | 1321 AMDVI_CTRL_ELOG | 1322 AMDVI_CTRL_ELOGINT | 1323 AMDVI_CTRL_INV_TO_1S); 1324 1325 if (softc->ivhd_flag & IVHD_FLAG_COH) 1326 val |= AMDVI_CTRL_COH; 1327 if (softc->ivhd_flag & IVHD_FLAG_HTT) 1328 val |= AMDVI_CTRL_HTT; 1329 if (softc->ivhd_flag & IVHD_FLAG_RPPW) 1330 val |= AMDVI_CTRL_RPPW; 1331 if (softc->ivhd_flag & IVHD_FLAG_PPW) 1332 val |= AMDVI_CTRL_PPW; 1333 if (softc->ivhd_flag & IVHD_FLAG_ISOC) 1334 val |= AMDVI_CTRL_ISOC; 1335 1336 ctrl->control = val; 1337 } 1338 } 1339 1340 static void 1341 amdvi_disable(void) 1342 { 1343 struct amdvi_ctrl *ctrl; 1344 struct amdvi_softc *softc; 1345 int i; 1346 1347 for (i = 0; i < ivhd_count; i++) { 1348 softc = device_get_softc(ivhd_devs[i]); 1349 KASSERT(softc, ("softc is NULL\n")); 1350 ctrl = softc->ctrl; 1351 KASSERT(ctrl, ("ctrl is NULL\n")); 1352 1353 ctrl->control = 0; 1354 } 1355 } 1356 1357 static void 1358 amdvi_invalidate_tlb(void *arg) 1359 { 1360 struct amdvi_domain *domain; 1361 1362 domain = (struct amdvi_domain *)arg; 1363 KASSERT(domain, ("domain is NULL")); 1364 amdvi_do_inv_domain(domain->id, false); 1365 } 1366 1367 const struct iommu_ops iommu_ops_amd = { 1368 .init = amdvi_init, 1369 .cleanup = amdvi_cleanup, 1370 .enable = amdvi_enable, 1371 .disable = amdvi_disable, 1372 .create_domain = amdvi_create_domain, 1373 .destroy_domain = amdvi_destroy_domain, 1374 .create_mapping = amdvi_create_mapping, 1375 .remove_mapping = amdvi_remove_mapping, 1376 .add_device = amdvi_add_device, 1377 .remove_device = amdvi_remove_device, 1378 .invalidate_tlb = amdvi_invalidate_tlb 1379 }; 1380