1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2016, Anish Gupta (anish@freebsd.org) 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice unmodified, this list of conditions, and the following 12 * disclaimer. 13 * 2. Redistributions in binary form must reproduce the above copyright 14 * notice, this list of conditions and the following disclaimer in the 15 * documentation and/or other materials provided with the distribution. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR 18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES 19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. 20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, 21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT 22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF 26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 27 */ 28 29 #include <sys/cdefs.h> 30 31 #include <sys/param.h> 32 #include <sys/systm.h> 33 #include <sys/bus.h> 34 #include <sys/kernel.h> 35 #include <sys/module.h> 36 #include <sys/malloc.h> 37 #include <sys/pcpu.h> 38 #include <sys/rman.h> 39 #include <sys/sysctl.h> 40 41 #include <dev/pci/pcivar.h> 42 #include <dev/pci/pcireg.h> 43 44 #include <machine/resource.h> 45 #include <machine/vmm.h> 46 #include <machine/vmparam.h> 47 #include <machine/pci_cfgreg.h> 48 49 #include "ivhd_if.h" 50 #include "pcib_if.h" 51 52 #include "io/iommu.h" 53 #include "amdvi_priv.h" 54 55 SYSCTL_DECL(_hw_vmm); 56 SYSCTL_NODE(_hw_vmm, OID_AUTO, amdvi, CTLFLAG_RW | CTLFLAG_MPSAFE, NULL, 57 NULL); 58 59 #define MOD_INC(a, s, m) (((a) + (s)) % ((m) * (s))) 60 #define MOD_DEC(a, s, m) (((a) - (s)) % ((m) * (s))) 61 62 /* Print RID or device ID in PCI string format. */ 63 #define RID2PCI_STR(d) PCI_RID2BUS(d), PCI_RID2SLOT(d), PCI_RID2FUNC(d) 64 65 static void amdvi_dump_cmds(struct amdvi_softc *softc, int count); 66 static void amdvi_print_dev_cap(struct amdvi_softc *softc); 67 68 MALLOC_DEFINE(M_AMDVI, "amdvi", "amdvi"); 69 70 extern device_t *ivhd_devs; 71 72 extern int ivhd_count; 73 SYSCTL_INT(_hw_vmm_amdvi, OID_AUTO, count, CTLFLAG_RDTUN, &ivhd_count, 74 0, NULL); 75 76 static int amdvi_enable_user = 0; 77 SYSCTL_INT(_hw_vmm_amdvi, OID_AUTO, enable, CTLFLAG_RDTUN, 78 &amdvi_enable_user, 0, NULL); 79 TUNABLE_INT("hw.vmm.amdvi_enable", &amdvi_enable_user); 80 81 #ifdef AMDVI_ATS_ENABLE 82 /* XXX: ATS is not tested. */ 83 static int amdvi_enable_iotlb = 1; 84 SYSCTL_INT(_hw_vmm_amdvi, OID_AUTO, iotlb_enabled, CTLFLAG_RDTUN, 85 &amdvi_enable_iotlb, 0, NULL); 86 TUNABLE_INT("hw.vmm.enable_iotlb", &amdvi_enable_iotlb); 87 #endif 88 89 static int amdvi_host_ptp = 1; /* Use page tables for host. */ 90 SYSCTL_INT(_hw_vmm_amdvi, OID_AUTO, host_ptp, CTLFLAG_RDTUN, 91 &amdvi_host_ptp, 0, NULL); 92 TUNABLE_INT("hw.vmm.amdvi.host_ptp", &amdvi_host_ptp); 93 94 /* Page table level used <= supported by h/w[v1=7]. */ 95 int amdvi_ptp_level = 4; 96 SYSCTL_INT(_hw_vmm_amdvi, OID_AUTO, ptp_level, CTLFLAG_RDTUN, 97 &amdvi_ptp_level, 0, NULL); 98 TUNABLE_INT("hw.vmm.amdvi.ptp_level", &amdvi_ptp_level); 99 100 /* Disable fault event reporting. */ 101 static int amdvi_disable_io_fault = 0; 102 SYSCTL_INT(_hw_vmm_amdvi, OID_AUTO, disable_io_fault, CTLFLAG_RDTUN, 103 &amdvi_disable_io_fault, 0, NULL); 104 TUNABLE_INT("hw.vmm.amdvi.disable_io_fault", &amdvi_disable_io_fault); 105 106 static uint32_t amdvi_dom_id = 0; /* 0 is reserved for host. */ 107 SYSCTL_UINT(_hw_vmm_amdvi, OID_AUTO, domain_id, CTLFLAG_RD, 108 &amdvi_dom_id, 0, NULL); 109 /* 110 * Device table entry. 111 * Bus(256) x Dev(32) x Fun(8) x DTE(256 bits or 32 bytes). 112 * = 256 * 2 * PAGE_SIZE. 113 */ 114 static struct amdvi_dte amdvi_dte[PCI_NUM_DEV_MAX] __aligned(PAGE_SIZE); 115 CTASSERT(PCI_NUM_DEV_MAX == 0x10000); 116 CTASSERT(sizeof(amdvi_dte) == 0x200000); 117 118 static SLIST_HEAD (, amdvi_domain) dom_head; 119 120 static inline uint32_t 121 amdvi_pci_read(struct amdvi_softc *softc, int off) 122 { 123 124 return (pci_cfgregread(PCI_RID2BUS(softc->pci_rid), 125 PCI_RID2SLOT(softc->pci_rid), PCI_RID2FUNC(softc->pci_rid), 126 off, 4)); 127 } 128 129 #ifdef AMDVI_ATS_ENABLE 130 /* XXX: Should be in pci.c */ 131 /* 132 * Check if device has ATS capability and its enabled. 133 * If ATS is absent or disabled, return (-1), otherwise ATS 134 * queue length. 135 */ 136 static int 137 amdvi_find_ats_qlen(uint16_t devid) 138 { 139 device_t dev; 140 uint32_t off, cap; 141 int qlen = -1; 142 143 dev = pci_find_bsf(PCI_RID2BUS(devid), PCI_RID2SLOT(devid), 144 PCI_RID2FUNC(devid)); 145 146 if (!dev) { 147 return (-1); 148 } 149 #define PCIM_ATS_EN BIT(31) 150 151 if (pci_find_extcap(dev, PCIZ_ATS, &off) == 0) { 152 cap = pci_read_config(dev, off + 4, 4); 153 qlen = (cap & 0x1F); 154 qlen = qlen ? qlen : 32; 155 printf("AMD-Vi: PCI device %d.%d.%d ATS %s qlen=%d\n", 156 RID2PCI_STR(devid), 157 (cap & PCIM_ATS_EN) ? "enabled" : "Disabled", 158 qlen); 159 qlen = (cap & PCIM_ATS_EN) ? qlen : -1; 160 } 161 162 return (qlen); 163 } 164 165 /* 166 * Check if an endpoint device support device IOTLB or ATS. 167 */ 168 static inline bool 169 amdvi_dev_support_iotlb(struct amdvi_softc *softc, uint16_t devid) 170 { 171 struct ivhd_dev_cfg *cfg; 172 int qlen, i; 173 bool pci_ats, ivhd_ats; 174 175 qlen = amdvi_find_ats_qlen(devid); 176 if (qlen < 0) 177 return (false); 178 179 KASSERT(softc, ("softc is NULL")); 180 cfg = softc->dev_cfg; 181 182 ivhd_ats = false; 183 for (i = 0; i < softc->dev_cfg_cnt; i++) { 184 if ((cfg->start_id <= devid) && (cfg->end_id >= devid)) { 185 ivhd_ats = cfg->enable_ats; 186 break; 187 } 188 cfg++; 189 } 190 191 pci_ats = (qlen < 0) ? false : true; 192 if (pci_ats != ivhd_ats) 193 device_printf(softc->dev, 194 "BIOS bug: mismatch in ATS setting for %d.%d.%d," 195 "ATS inv qlen = %d\n", RID2PCI_STR(devid), qlen); 196 197 /* Ignore IVRS setting and respect PCI setting. */ 198 return (pci_ats); 199 } 200 #endif 201 202 /* Enable IOTLB support for IOMMU if its supported. */ 203 static inline void 204 amdvi_hw_enable_iotlb(struct amdvi_softc *softc) 205 { 206 #ifndef AMDVI_ATS_ENABLE 207 softc->iotlb = false; 208 #else 209 bool supported; 210 211 supported = (softc->ivhd_flag & IVHD_FLAG_IOTLB) ? true : false; 212 213 if (softc->pci_cap & AMDVI_PCI_CAP_IOTLB) { 214 if (!supported) 215 device_printf(softc->dev, "IOTLB disabled by BIOS.\n"); 216 217 if (supported && !amdvi_enable_iotlb) { 218 device_printf(softc->dev, "IOTLB disabled by user.\n"); 219 supported = false; 220 } 221 } else 222 supported = false; 223 224 softc->iotlb = supported; 225 226 #endif 227 } 228 229 static int 230 amdvi_init_cmd(struct amdvi_softc *softc) 231 { 232 struct amdvi_ctrl *ctrl = softc->ctrl; 233 234 ctrl->cmd.len = 8; /* Use 256 command buffer entries. */ 235 softc->cmd_max = 1 << ctrl->cmd.len; 236 237 softc->cmd = malloc(sizeof(struct amdvi_cmd) * 238 softc->cmd_max, M_AMDVI, M_WAITOK | M_ZERO); 239 240 if ((uintptr_t)softc->cmd & PAGE_MASK) 241 panic("AMDVi: Command buffer not aligned on page boundary."); 242 243 ctrl->cmd.base = vtophys(softc->cmd) / PAGE_SIZE; 244 /* 245 * XXX: Reset the h/w pointers in case IOMMU is restarting, 246 * h/w doesn't clear these pointers based on empirical data. 247 */ 248 ctrl->cmd_tail = 0; 249 ctrl->cmd_head = 0; 250 251 return (0); 252 } 253 254 /* 255 * Note: Update tail pointer after we have written the command since tail 256 * pointer update cause h/w to execute new commands, see section 3.3 257 * of AMD IOMMU spec ver 2.0. 258 */ 259 /* Get the command tail pointer w/o updating it. */ 260 static struct amdvi_cmd * 261 amdvi_get_cmd_tail(struct amdvi_softc *softc) 262 { 263 struct amdvi_ctrl *ctrl; 264 struct amdvi_cmd *tail; 265 266 KASSERT(softc, ("softc is NULL")); 267 KASSERT(softc->cmd != NULL, ("cmd is NULL")); 268 269 ctrl = softc->ctrl; 270 KASSERT(ctrl != NULL, ("ctrl is NULL")); 271 272 tail = (struct amdvi_cmd *)((uint8_t *)softc->cmd + 273 ctrl->cmd_tail); 274 275 return (tail); 276 } 277 278 /* 279 * Update the command tail pointer which will start command execution. 280 */ 281 static void 282 amdvi_update_cmd_tail(struct amdvi_softc *softc) 283 { 284 struct amdvi_ctrl *ctrl; 285 int size; 286 287 size = sizeof(struct amdvi_cmd); 288 KASSERT(softc->cmd != NULL, ("cmd is NULL")); 289 290 ctrl = softc->ctrl; 291 KASSERT(ctrl != NULL, ("ctrl is NULL")); 292 293 ctrl->cmd_tail = MOD_INC(ctrl->cmd_tail, size, softc->cmd_max); 294 softc->total_cmd++; 295 296 #ifdef AMDVI_DEBUG_CMD 297 device_printf(softc->dev, "cmd_tail: %s Tail:0x%x, Head:0x%x.\n", 298 ctrl->cmd_tail, 299 ctrl->cmd_head); 300 #endif 301 302 } 303 304 /* 305 * Various commands supported by IOMMU. 306 */ 307 308 /* Completion wait command. */ 309 static void 310 amdvi_cmd_cmp(struct amdvi_softc *softc, const uint64_t data) 311 { 312 struct amdvi_cmd *cmd; 313 uint64_t pa; 314 315 cmd = amdvi_get_cmd_tail(softc); 316 KASSERT(cmd != NULL, ("Cmd is NULL")); 317 318 pa = vtophys(&softc->cmp_data); 319 cmd->opcode = AMDVI_CMP_WAIT_OPCODE; 320 cmd->word0 = (pa & 0xFFFFFFF8) | AMDVI_CMP_WAIT_STORE; 321 cmd->word1 = (pa >> 32) & 0xFFFFF; 322 cmd->addr = data; 323 324 amdvi_update_cmd_tail(softc); 325 } 326 327 /* Invalidate device table entry. */ 328 static void 329 amdvi_cmd_inv_dte(struct amdvi_softc *softc, uint16_t devid) 330 { 331 struct amdvi_cmd *cmd; 332 333 cmd = amdvi_get_cmd_tail(softc); 334 KASSERT(cmd != NULL, ("Cmd is NULL")); 335 cmd->opcode = AMDVI_INVD_DTE_OPCODE; 336 cmd->word0 = devid; 337 amdvi_update_cmd_tail(softc); 338 #ifdef AMDVI_DEBUG_CMD 339 device_printf(softc->dev, "Invalidated DTE:0x%x\n", devid); 340 #endif 341 } 342 343 /* Invalidate IOMMU page, use for invalidation of domain. */ 344 static void 345 amdvi_cmd_inv_iommu_pages(struct amdvi_softc *softc, uint16_t domain_id, 346 uint64_t addr, bool guest_nested, 347 bool pde, bool page) 348 { 349 struct amdvi_cmd *cmd; 350 351 cmd = amdvi_get_cmd_tail(softc); 352 KASSERT(cmd != NULL, ("Cmd is NULL")); 353 354 cmd->opcode = AMDVI_INVD_PAGE_OPCODE; 355 cmd->word1 = domain_id; 356 /* 357 * Invalidate all addresses for this domain. 358 */ 359 cmd->addr = addr; 360 cmd->addr |= pde ? AMDVI_INVD_PAGE_PDE : 0; 361 cmd->addr |= page ? AMDVI_INVD_PAGE_S : 0; 362 363 amdvi_update_cmd_tail(softc); 364 } 365 366 #ifdef AMDVI_ATS_ENABLE 367 /* Invalidate device IOTLB. */ 368 static void 369 amdvi_cmd_inv_iotlb(struct amdvi_softc *softc, uint16_t devid) 370 { 371 struct amdvi_cmd *cmd; 372 int qlen; 373 374 if (!softc->iotlb) 375 return; 376 377 qlen = amdvi_find_ats_qlen(devid); 378 if (qlen < 0) { 379 panic("AMDVI: Invalid ATS qlen(%d) for device %d.%d.%d\n", 380 qlen, RID2PCI_STR(devid)); 381 } 382 cmd = amdvi_get_cmd_tail(softc); 383 KASSERT(cmd != NULL, ("Cmd is NULL")); 384 385 #ifdef AMDVI_DEBUG_CMD 386 device_printf(softc->dev, "Invalidate IOTLB devID 0x%x" 387 " Qlen:%d\n", devid, qlen); 388 #endif 389 cmd->opcode = AMDVI_INVD_IOTLB_OPCODE; 390 cmd->word0 = devid; 391 cmd->word1 = qlen; 392 cmd->addr = AMDVI_INVD_IOTLB_ALL_ADDR | 393 AMDVI_INVD_IOTLB_S; 394 amdvi_update_cmd_tail(softc); 395 } 396 #endif 397 398 #ifdef notyet /* For Interrupt Remap. */ 399 static void 400 amdvi_cmd_inv_intr_map(struct amdvi_softc *softc, 401 uint16_t devid) 402 { 403 struct amdvi_cmd *cmd; 404 405 cmd = amdvi_get_cmd_tail(softc); 406 KASSERT(cmd != NULL, ("Cmd is NULL")); 407 cmd->opcode = AMDVI_INVD_INTR_OPCODE; 408 cmd->word0 = devid; 409 amdvi_update_cmd_tail(softc); 410 #ifdef AMDVI_DEBUG_CMD 411 device_printf(softc->dev, "Invalidate INTR map of devID 0x%x\n", devid); 412 #endif 413 } 414 #endif 415 416 /* Invalidate domain using INVALIDATE_IOMMU_PAGES command. */ 417 static void 418 amdvi_inv_domain(struct amdvi_softc *softc, uint16_t domain_id) 419 { 420 struct amdvi_cmd *cmd __diagused; 421 422 cmd = amdvi_get_cmd_tail(softc); 423 KASSERT(cmd != NULL, ("Cmd is NULL")); 424 425 /* 426 * See section 3.3.3 of IOMMU spec rev 2.0, software note 427 * for invalidating domain. 428 */ 429 amdvi_cmd_inv_iommu_pages(softc, domain_id, AMDVI_INVD_PAGE_ALL_ADDR, 430 false, true, true); 431 432 #ifdef AMDVI_DEBUG_CMD 433 device_printf(softc->dev, "Invalidate domain:0x%x\n", domain_id); 434 435 #endif 436 } 437 438 static bool 439 amdvi_cmp_wait(struct amdvi_softc *softc) 440 { 441 #ifdef AMDVI_DEBUG_CMD 442 struct amdvi_ctrl *ctrl = softc->ctrl; 443 #endif 444 const uint64_t VERIFY = 0xA5A5; 445 volatile uint64_t *read; 446 int i; 447 bool status; 448 449 read = &softc->cmp_data; 450 *read = 0; 451 amdvi_cmd_cmp(softc, VERIFY); 452 /* Wait for h/w to update completion data. */ 453 for (i = 0; i < 100 && (*read != VERIFY); i++) { 454 DELAY(1000); /* 1 ms */ 455 } 456 status = (VERIFY == softc->cmp_data) ? true : false; 457 458 #ifdef AMDVI_DEBUG_CMD 459 if (status) 460 device_printf(softc->dev, "CMD completion DONE Tail:0x%x, " 461 "Head:0x%x, loop:%d.\n", ctrl->cmd_tail, 462 ctrl->cmd_head, loop); 463 #endif 464 return (status); 465 } 466 467 static void 468 amdvi_wait(struct amdvi_softc *softc) 469 { 470 struct amdvi_ctrl *ctrl; 471 int i; 472 473 KASSERT(softc, ("softc is NULL")); 474 475 ctrl = softc->ctrl; 476 KASSERT(ctrl != NULL, ("ctrl is NULL")); 477 /* Don't wait if h/w is not enabled. */ 478 if ((ctrl->control & AMDVI_CTRL_EN) == 0) 479 return; 480 481 for (i = 0; i < 10; i++) { 482 if (amdvi_cmp_wait(softc)) 483 return; 484 } 485 486 device_printf(softc->dev, "Error: completion failed" 487 " tail:0x%x, head:0x%x.\n", 488 ctrl->cmd_tail, ctrl->cmd_head); 489 /* Dump the last command. */ 490 amdvi_dump_cmds(softc, 1); 491 } 492 493 static void 494 amdvi_dump_cmds(struct amdvi_softc *softc, int count) 495 { 496 struct amdvi_ctrl *ctrl; 497 struct amdvi_cmd *cmd; 498 int off, i; 499 500 ctrl = softc->ctrl; 501 device_printf(softc->dev, "Dump last %d command(s):\n", count); 502 /* 503 * If h/w is stuck in completion, it is the previous command, 504 * start dumping from previous command onward. 505 */ 506 off = MOD_DEC(ctrl->cmd_head, sizeof(struct amdvi_cmd), 507 softc->cmd_max); 508 for (i = 0; off != ctrl->cmd_tail && i < count; i++) { 509 cmd = (struct amdvi_cmd *)((uint8_t *)softc->cmd + off); 510 printf(" [CMD%d, off:0x%x] opcode= 0x%x 0x%x" 511 " 0x%x 0x%lx\n", i, off, cmd->opcode, 512 cmd->word0, cmd->word1, cmd->addr); 513 off = MOD_INC(off, sizeof(struct amdvi_cmd), softc->cmd_max); 514 } 515 } 516 517 static int 518 amdvi_init_event(struct amdvi_softc *softc) 519 { 520 struct amdvi_ctrl *ctrl; 521 522 ctrl = softc->ctrl; 523 ctrl->event.len = 8; 524 softc->event_max = 1 << ctrl->event.len; 525 softc->event = malloc(sizeof(struct amdvi_event) * 526 softc->event_max, M_AMDVI, M_WAITOK | M_ZERO); 527 if ((uintptr_t)softc->event & PAGE_MASK) { 528 device_printf(softc->dev, "Event buffer not aligned on page."); 529 return (false); 530 } 531 ctrl->event.base = vtophys(softc->event) / PAGE_SIZE; 532 533 /* Reset the pointers. */ 534 ctrl->evt_head = 0; 535 ctrl->evt_tail = 0; 536 537 return (0); 538 } 539 540 static inline void 541 amdvi_decode_evt_flag(uint16_t flag) 542 { 543 544 flag &= AMDVI_EVENT_FLAG_MASK; 545 printf(" 0x%b]\n", flag, 546 "\020" 547 "\001GN" 548 "\002NX" 549 "\003US" 550 "\004I" 551 "\005PR" 552 "\006RW" 553 "\007PE" 554 "\010RZ" 555 "\011TR" 556 ); 557 } 558 559 /* See section 2.5.4 of AMD IOMMU spec ver 2.62.*/ 560 static inline void 561 amdvi_decode_evt_flag_type(uint8_t type) 562 { 563 564 switch (AMDVI_EVENT_FLAG_TYPE(type)) { 565 case 0: 566 printf("RSVD\n"); 567 break; 568 case 1: 569 printf("Master Abort\n"); 570 break; 571 case 2: 572 printf("Target Abort\n"); 573 break; 574 case 3: 575 printf("Data Err\n"); 576 break; 577 default: 578 break; 579 } 580 } 581 582 static void 583 amdvi_decode_inv_dte_evt(uint16_t devid, uint16_t domid, uint64_t addr, 584 uint16_t flag) 585 { 586 587 printf("\t[IO_PAGE_FAULT EVT: devId:0x%x DomId:0x%x" 588 " Addr:0x%lx", 589 devid, domid, addr); 590 amdvi_decode_evt_flag(flag); 591 } 592 593 static void 594 amdvi_decode_pf_evt(uint16_t devid, uint16_t domid, uint64_t addr, 595 uint16_t flag) 596 { 597 598 printf("\t[IO_PAGE_FAULT EVT: devId:0x%x DomId:0x%x" 599 " Addr:0x%lx", 600 devid, domid, addr); 601 amdvi_decode_evt_flag(flag); 602 } 603 604 static void 605 amdvi_decode_dte_hwerr_evt(uint16_t devid, uint16_t domid, 606 uint64_t addr, uint16_t flag) 607 { 608 609 printf("\t[DEV_TAB_HW_ERR EVT: devId:0x%x DomId:0x%x" 610 " Addr:0x%lx", devid, domid, addr); 611 amdvi_decode_evt_flag(flag); 612 amdvi_decode_evt_flag_type(flag); 613 } 614 615 static void 616 amdvi_decode_page_hwerr_evt(uint16_t devid, uint16_t domid, uint64_t addr, 617 uint16_t flag) 618 { 619 620 printf("\t[PAGE_TAB_HW_ERR EVT: devId:0x%x DomId:0x%x" 621 " Addr:0x%lx", devid, domid, addr); 622 amdvi_decode_evt_flag(flag); 623 amdvi_decode_evt_flag_type(AMDVI_EVENT_FLAG_TYPE(flag)); 624 } 625 626 static void 627 amdvi_decode_evt(struct amdvi_event *evt) 628 { 629 struct amdvi_cmd *cmd; 630 631 switch (evt->opcode) { 632 case AMDVI_EVENT_INVALID_DTE: 633 amdvi_decode_inv_dte_evt(evt->devid, evt->pasid_domid, 634 evt->addr, evt->flag); 635 break; 636 637 case AMDVI_EVENT_PFAULT: 638 amdvi_decode_pf_evt(evt->devid, evt->pasid_domid, 639 evt->addr, evt->flag); 640 break; 641 642 case AMDVI_EVENT_DTE_HW_ERROR: 643 amdvi_decode_dte_hwerr_evt(evt->devid, evt->pasid_domid, 644 evt->addr, evt->flag); 645 break; 646 647 case AMDVI_EVENT_PAGE_HW_ERROR: 648 amdvi_decode_page_hwerr_evt(evt->devid, evt->pasid_domid, 649 evt->addr, evt->flag); 650 break; 651 652 case AMDVI_EVENT_ILLEGAL_CMD: 653 /* FALL THROUGH */ 654 case AMDVI_EVENT_CMD_HW_ERROR: 655 printf("\t[%s EVT]\n", (evt->opcode == AMDVI_EVENT_ILLEGAL_CMD) ? 656 "ILLEGAL CMD" : "CMD HW ERR"); 657 cmd = (struct amdvi_cmd *)PHYS_TO_DMAP(evt->addr); 658 printf("\tCMD opcode= 0x%x 0x%x 0x%x 0x%lx\n", 659 cmd->opcode, cmd->word0, cmd->word1, cmd->addr); 660 break; 661 662 case AMDVI_EVENT_IOTLB_TIMEOUT: 663 printf("\t[IOTLB_INV_TIMEOUT devid:0x%x addr:0x%lx]\n", 664 evt->devid, evt->addr); 665 break; 666 667 case AMDVI_EVENT_INVALID_DTE_REQ: 668 printf("\t[INV_DTE devid:0x%x addr:0x%lx type:0x%x tr:%d]\n", 669 evt->devid, evt->addr, evt->flag >> 9, 670 (evt->flag >> 8) & 1); 671 break; 672 673 case AMDVI_EVENT_INVALID_PPR_REQ: 674 case AMDVI_EVENT_COUNTER_ZERO: 675 printf("AMD-Vi: v2 events.\n"); 676 break; 677 678 default: 679 printf("Unsupported AMD-Vi event:%d\n", evt->opcode); 680 } 681 } 682 683 static void 684 amdvi_print_events(struct amdvi_softc *softc) 685 { 686 struct amdvi_ctrl *ctrl; 687 struct amdvi_event *event; 688 int i, size; 689 690 ctrl = softc->ctrl; 691 size = sizeof(struct amdvi_event); 692 for (i = 0; i < softc->event_max; i++) { 693 event = &softc->event[ctrl->evt_head / size]; 694 if (!event->opcode) 695 break; 696 device_printf(softc->dev, "\t[Event%d: Head:0x%x Tail:0x%x]\n", 697 i, ctrl->evt_head, ctrl->evt_tail); 698 amdvi_decode_evt(event); 699 ctrl->evt_head = MOD_INC(ctrl->evt_head, size, 700 softc->event_max); 701 } 702 } 703 704 static int 705 amdvi_init_dte(struct amdvi_softc *softc) 706 { 707 struct amdvi_ctrl *ctrl; 708 709 ctrl = softc->ctrl; 710 ctrl->dte.base = vtophys(amdvi_dte) / PAGE_SIZE; 711 ctrl->dte.size = 0x1FF; /* 2MB device table. */ 712 713 return (0); 714 } 715 716 /* 717 * Not all capabilities of IOMMU are available in ACPI IVHD flag 718 * or EFR entry, read directly from device. 719 */ 720 static int 721 amdvi_print_pci_cap(device_t dev) 722 { 723 struct amdvi_softc *softc; 724 uint32_t off, cap; 725 726 softc = device_get_softc(dev); 727 off = softc->cap_off; 728 729 /* 730 * Section 3.7.1 of IOMMU sepc rev 2.0. 731 * Read capability from device. 732 */ 733 cap = amdvi_pci_read(softc, off); 734 735 /* Make sure capability type[18:16] is 3. */ 736 KASSERT((((cap >> 16) & 0x7) == 0x3), 737 ("Not a IOMMU capability 0x%x@0x%x", cap, off)); 738 739 softc->pci_cap = cap >> 24; 740 device_printf(softc->dev, "PCI cap 0x%x@0x%x feature:%b\n", 741 cap, off, softc->pci_cap, 742 "\20\1IOTLB\2HT\3NPCache\4EFR\5CapExt"); 743 744 return (0); 745 } 746 747 static void 748 amdvi_event_intr(void *arg) 749 { 750 struct amdvi_softc *softc; 751 struct amdvi_ctrl *ctrl; 752 753 softc = (struct amdvi_softc *)arg; 754 ctrl = softc->ctrl; 755 device_printf(softc->dev, "EVT INTR %ld Status:0x%x" 756 " EVT Head:0x%x Tail:0x%x]\n", softc->event_intr_cnt++, 757 ctrl->status, ctrl->evt_head, ctrl->evt_tail); 758 printf(" [CMD Total 0x%lx] Tail:0x%x, Head:0x%x.\n", 759 softc->total_cmd, ctrl->cmd_tail, ctrl->cmd_head); 760 761 amdvi_print_events(softc); 762 ctrl->status &= AMDVI_STATUS_EV_OF | AMDVI_STATUS_EV_INTR; 763 } 764 765 static void 766 amdvi_free_evt_intr_res(device_t dev) 767 { 768 769 struct amdvi_softc *softc; 770 device_t mmio_dev; 771 772 softc = device_get_softc(dev); 773 mmio_dev = softc->pci_dev; 774 775 IVHD_TEARDOWN_INTR(mmio_dev); 776 } 777 778 static bool 779 amdvi_alloc_intr_resources(struct amdvi_softc *softc) 780 { 781 struct amdvi_ctrl *ctrl; 782 device_t dev, mmio_dev; 783 int err; 784 785 dev = softc->dev; 786 mmio_dev = softc->pci_dev; 787 788 /* Clear interrupt status bits. */ 789 ctrl = softc->ctrl; 790 ctrl->status &= AMDVI_STATUS_EV_OF | AMDVI_STATUS_EV_INTR; 791 792 err = IVHD_SETUP_INTR(mmio_dev, amdvi_event_intr, softc, "fault"); 793 if (err) 794 device_printf(dev, "Interrupt setup failed on %s\n", 795 device_get_nameunit(mmio_dev)); 796 return (err); 797 } 798 799 static void 800 amdvi_print_dev_cap(struct amdvi_softc *softc) 801 { 802 struct ivhd_dev_cfg *cfg; 803 int i; 804 805 cfg = softc->dev_cfg; 806 for (i = 0; i < softc->dev_cfg_cnt; i++) { 807 device_printf(softc->dev, "device [0x%x - 0x%x] " 808 "config:%b%s\n", cfg->start_id, cfg->end_id, 809 cfg->data, 810 "\020\001INIT\002ExtInt\003NMI" 811 "\007LINT0\010LINT1", 812 cfg->enable_ats ? "ATS enabled" : ""); 813 cfg++; 814 } 815 } 816 817 static int 818 amdvi_handle_sysctl(SYSCTL_HANDLER_ARGS) 819 { 820 struct amdvi_softc *softc; 821 int result, type, error = 0; 822 823 softc = (struct amdvi_softc *)arg1; 824 type = arg2; 825 826 switch (type) { 827 case 0: 828 result = softc->ctrl->cmd_head; 829 error = sysctl_handle_int(oidp, &result, 0, 830 req); 831 break; 832 case 1: 833 result = softc->ctrl->cmd_tail; 834 error = sysctl_handle_int(oidp, &result, 0, 835 req); 836 break; 837 case 2: 838 result = softc->ctrl->evt_head; 839 error = sysctl_handle_int(oidp, &result, 0, 840 req); 841 break; 842 case 3: 843 result = softc->ctrl->evt_tail; 844 error = sysctl_handle_int(oidp, &result, 0, 845 req); 846 break; 847 848 default: 849 device_printf(softc->dev, "Unknown sysctl:%d\n", type); 850 } 851 852 return (error); 853 } 854 855 static void 856 amdvi_add_sysctl(struct amdvi_softc *softc) 857 { 858 struct sysctl_oid_list *child; 859 struct sysctl_ctx_list *ctx; 860 device_t dev; 861 862 dev = softc->dev; 863 ctx = device_get_sysctl_ctx(dev); 864 child = SYSCTL_CHILDREN(device_get_sysctl_tree(dev)); 865 866 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "event_intr_count", CTLFLAG_RD, 867 &softc->event_intr_cnt, "Event interrupt count"); 868 SYSCTL_ADD_ULONG(ctx, child, OID_AUTO, "command_count", CTLFLAG_RD, 869 &softc->total_cmd, "Command submitted count"); 870 SYSCTL_ADD_U16(ctx, child, OID_AUTO, "pci_rid", CTLFLAG_RD, 871 &softc->pci_rid, 0, "IOMMU RID"); 872 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "command_head", 873 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, softc, 0, 874 amdvi_handle_sysctl, "IU", "Command head"); 875 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "command_tail", 876 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, softc, 1, 877 amdvi_handle_sysctl, "IU", "Command tail"); 878 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "event_head", 879 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, softc, 2, 880 amdvi_handle_sysctl, "IU", "Command head"); 881 SYSCTL_ADD_PROC(ctx, child, OID_AUTO, "event_tail", 882 CTLTYPE_UINT | CTLFLAG_RD | CTLFLAG_MPSAFE, softc, 3, 883 amdvi_handle_sysctl, "IU", "Command tail"); 884 } 885 886 int 887 amdvi_setup_hw(struct amdvi_softc *softc) 888 { 889 device_t dev; 890 int status; 891 892 dev = softc->dev; 893 894 amdvi_hw_enable_iotlb(softc); 895 896 amdvi_print_dev_cap(softc); 897 898 if ((status = amdvi_print_pci_cap(dev)) != 0) { 899 device_printf(dev, "PCI capability.\n"); 900 return (status); 901 } 902 if ((status = amdvi_init_cmd(softc)) != 0) { 903 device_printf(dev, "Couldn't configure command buffer.\n"); 904 return (status); 905 } 906 if ((status = amdvi_init_event(softc)) != 0) { 907 device_printf(dev, "Couldn't configure event buffer.\n"); 908 return (status); 909 } 910 if ((status = amdvi_init_dte(softc)) != 0) { 911 device_printf(dev, "Couldn't configure device table.\n"); 912 return (status); 913 } 914 if ((status = amdvi_alloc_intr_resources(softc)) != 0) { 915 return (status); 916 } 917 amdvi_add_sysctl(softc); 918 return (0); 919 } 920 921 int 922 amdvi_teardown_hw(struct amdvi_softc *softc) 923 { 924 device_t dev; 925 926 dev = softc->dev; 927 928 /* 929 * Called after disable, h/w is stopped by now, free all the resources. 930 */ 931 amdvi_free_evt_intr_res(dev); 932 933 if (softc->cmd) 934 free(softc->cmd, M_AMDVI); 935 936 if (softc->event) 937 free(softc->event, M_AMDVI); 938 939 return (0); 940 } 941 942 /*********** bhyve interfaces *********************/ 943 static int 944 amdvi_init(void) 945 { 946 if (!ivhd_count) { 947 return (EIO); 948 } 949 if (!amdvi_enable_user && ivhd_count) { 950 printf("bhyve: Found %d AMD-Vi/IOMMU device(s), " 951 "use hw.vmm.amdvi.enable=1 to enable pass-through.\n", 952 ivhd_count); 953 return (EINVAL); 954 } 955 return (0); 956 } 957 958 static void 959 amdvi_cleanup(void) 960 { 961 /* Nothing. */ 962 } 963 964 static uint16_t 965 amdvi_domainId(void) 966 { 967 968 /* 969 * If we hit maximum domain limit, rollover leaving host 970 * domain(0). 971 * XXX: make sure that this domain is not used. 972 */ 973 if (amdvi_dom_id == AMDVI_MAX_DOMAIN) 974 amdvi_dom_id = 1; 975 976 return ((uint16_t)amdvi_dom_id++); 977 } 978 979 static void 980 amdvi_do_inv_domain(uint16_t domain_id, bool create) 981 { 982 struct amdvi_softc *softc; 983 int i; 984 985 for (i = 0; i < ivhd_count; i++) { 986 softc = device_get_softc(ivhd_devs[i]); 987 KASSERT(softc, ("softc is NULL")); 988 /* 989 * If not present pages are cached, invalidate page after 990 * creating domain. 991 */ 992 #if 0 993 if (create && ((softc->pci_cap & AMDVI_PCI_CAP_NPCACHE) == 0)) 994 continue; 995 #endif 996 amdvi_inv_domain(softc, domain_id); 997 amdvi_wait(softc); 998 } 999 } 1000 1001 static void * 1002 amdvi_create_domain(vm_paddr_t maxaddr) 1003 { 1004 struct amdvi_domain *dom; 1005 1006 dom = malloc(sizeof(struct amdvi_domain), M_AMDVI, M_ZERO | M_WAITOK); 1007 dom->id = amdvi_domainId(); 1008 //dom->maxaddr = maxaddr; 1009 #ifdef AMDVI_DEBUG_CMD 1010 printf("Created domain #%d\n", dom->id); 1011 #endif 1012 /* 1013 * Host domain(#0) don't create translation table. 1014 */ 1015 if (dom->id || amdvi_host_ptp) 1016 dom->ptp = malloc(PAGE_SIZE, M_AMDVI, M_WAITOK | M_ZERO); 1017 1018 dom->ptp_level = amdvi_ptp_level; 1019 1020 amdvi_do_inv_domain(dom->id, true); 1021 SLIST_INSERT_HEAD(&dom_head, dom, next); 1022 1023 return (dom); 1024 } 1025 1026 static void 1027 amdvi_free_ptp(uint64_t *ptp, int level) 1028 { 1029 int i; 1030 1031 if (level < 1) 1032 return; 1033 1034 for (i = 0; i < NPTEPG ; i++) { 1035 if ((ptp[i] & AMDVI_PT_PRESENT) == 0) 1036 continue; 1037 /* XXX: Add super-page or PTE mapping > 4KB. */ 1038 #ifdef notyet 1039 /* Super-page mapping. */ 1040 if (AMDVI_PD_SUPER(ptp[i])) 1041 continue; 1042 #endif 1043 1044 amdvi_free_ptp((uint64_t *)PHYS_TO_DMAP(ptp[i] 1045 & AMDVI_PT_MASK), level - 1); 1046 } 1047 1048 free(ptp, M_AMDVI); 1049 } 1050 1051 static void 1052 amdvi_destroy_domain(void *arg) 1053 { 1054 struct amdvi_domain *domain; 1055 1056 domain = (struct amdvi_domain *)arg; 1057 KASSERT(domain, ("domain is NULL")); 1058 #ifdef AMDVI_DEBUG_CMD 1059 printf("Destroying domain %d\n", domain->id); 1060 #endif 1061 if (domain->ptp) 1062 amdvi_free_ptp(domain->ptp, domain->ptp_level); 1063 1064 amdvi_do_inv_domain(domain->id, false); 1065 SLIST_REMOVE(&dom_head, domain, amdvi_domain, next); 1066 free(domain, M_AMDVI); 1067 } 1068 1069 static uint64_t 1070 amdvi_set_pt(uint64_t *pt, int level, vm_paddr_t gpa, 1071 vm_paddr_t hpa, uint64_t pg_size, bool create) 1072 { 1073 uint64_t *page, pa; 1074 int shift, index; 1075 const int PT_SHIFT = 9; 1076 const int PT_INDEX_MASK = (1 << PT_SHIFT) - 1; /* Based on PT_SHIFT */ 1077 1078 if (!pg_size) 1079 return (0); 1080 1081 if (hpa & (pg_size - 1)) { 1082 printf("HPA is not size aligned.\n"); 1083 return (0); 1084 } 1085 if (gpa & (pg_size - 1)) { 1086 printf("HPA is not size aligned.\n"); 1087 return (0); 1088 } 1089 shift = PML4SHIFT; 1090 while ((shift > PAGE_SHIFT) && (pg_size < (1UL << shift))) { 1091 index = (gpa >> shift) & PT_INDEX_MASK; 1092 1093 if ((pt[index] == 0) && create) { 1094 page = malloc(PAGE_SIZE, M_AMDVI, M_WAITOK | M_ZERO); 1095 pa = vtophys(page); 1096 pt[index] = pa | AMDVI_PT_PRESENT | AMDVI_PT_RW | 1097 ((level - 1) << AMDVI_PD_LEVEL_SHIFT); 1098 } 1099 #ifdef AMDVI_DEBUG_PTE 1100 if ((gpa % 0x1000000) == 0) 1101 printf("[level%d, shift = %d]PTE:0x%lx\n", 1102 level, shift, pt[index]); 1103 #endif 1104 #define PTE2PA(x) ((uint64_t)(x) & AMDVI_PT_MASK) 1105 pa = PTE2PA(pt[index]); 1106 pt = (uint64_t *)PHYS_TO_DMAP(pa); 1107 shift -= PT_SHIFT; 1108 level--; 1109 } 1110 1111 /* Leaf entry. */ 1112 index = (gpa >> shift) & PT_INDEX_MASK; 1113 1114 if (create) { 1115 pt[index] = hpa | AMDVI_PT_RW | AMDVI_PT_PRESENT; 1116 } else 1117 pt[index] = 0; 1118 1119 #ifdef AMDVI_DEBUG_PTE 1120 if ((gpa % 0x1000000) == 0) 1121 printf("[Last level%d, shift = %d]PTE:0x%lx\n", 1122 level, shift, pt[index]); 1123 #endif 1124 return (1ULL << shift); 1125 } 1126 1127 static uint64_t 1128 amdvi_update_mapping(struct amdvi_domain *domain, vm_paddr_t gpa, 1129 vm_paddr_t hpa, uint64_t size, bool create) 1130 { 1131 uint64_t mapped, *ptp, len; 1132 int level; 1133 1134 KASSERT(domain, ("domain is NULL")); 1135 level = domain->ptp_level; 1136 KASSERT(level, ("Page table level is 0")); 1137 1138 ptp = domain->ptp; 1139 KASSERT(ptp, ("PTP is NULL")); 1140 mapped = 0; 1141 while (mapped < size) { 1142 len = amdvi_set_pt(ptp, level, gpa + mapped, hpa + mapped, 1143 PAGE_SIZE, create); 1144 if (!len) { 1145 printf("Error: Couldn't map HPA:0x%lx GPA:0x%lx\n", 1146 hpa, gpa); 1147 return (0); 1148 } 1149 mapped += len; 1150 } 1151 1152 return (mapped); 1153 } 1154 1155 static uint64_t 1156 amdvi_create_mapping(void *arg, vm_paddr_t gpa, vm_paddr_t hpa, 1157 uint64_t len) 1158 { 1159 struct amdvi_domain *domain; 1160 1161 domain = (struct amdvi_domain *)arg; 1162 1163 if (domain->id && !domain->ptp) { 1164 printf("ptp is NULL"); 1165 return (-1); 1166 } 1167 1168 /* 1169 * If host domain is created w/o page table, skip IOMMU page 1170 * table set-up. 1171 */ 1172 if (domain->ptp) 1173 return (amdvi_update_mapping(domain, gpa, hpa, len, true)); 1174 else 1175 return (len); 1176 } 1177 1178 static uint64_t 1179 amdvi_remove_mapping(void *arg, vm_paddr_t gpa, uint64_t len) 1180 { 1181 struct amdvi_domain *domain; 1182 1183 domain = (struct amdvi_domain *)arg; 1184 /* 1185 * If host domain is created w/o page table, skip IOMMU page 1186 * table set-up. 1187 */ 1188 if (domain->ptp) 1189 return (amdvi_update_mapping(domain, gpa, 0, len, false)); 1190 return 1191 (len); 1192 } 1193 1194 static struct amdvi_softc * 1195 amdvi_find_iommu(uint16_t devid) 1196 { 1197 struct amdvi_softc *softc; 1198 int i, j; 1199 1200 for (i = 0; i < ivhd_count; i++) { 1201 softc = device_get_softc(ivhd_devs[i]); 1202 for (j = 0; j < softc->dev_cfg_cnt; j++) 1203 if ((devid >= softc->dev_cfg[j].start_id) && 1204 (devid <= softc->dev_cfg[j].end_id)) 1205 return (softc); 1206 } 1207 1208 return (NULL); 1209 } 1210 1211 /* 1212 * Set-up device table entry. 1213 * IOMMU spec Rev 2.0, section 3.2.2.2, some of the fields must 1214 * be set concurrently, e.g. read and write bits. 1215 */ 1216 static void 1217 amdvi_set_dte(struct amdvi_domain *domain, struct amdvi_softc *softc, 1218 uint16_t devid, bool enable) 1219 { 1220 struct amdvi_dte* temp; 1221 1222 KASSERT(domain, ("domain is NULL for pci_rid:0x%x\n", devid)); 1223 KASSERT(softc, ("softc is NULL for pci_rid:0x%x\n", devid)); 1224 1225 temp = &amdvi_dte[devid]; 1226 1227 #ifdef AMDVI_ATS_ENABLE 1228 /* If IOMMU and device support IOTLB, enable it. */ 1229 if (amdvi_dev_support_iotlb(softc, devid) && softc->iotlb) 1230 temp->iotlb_enable = 1; 1231 #endif 1232 1233 /* Avoid duplicate I/O faults. */ 1234 temp->sup_second_io_fault = 1; 1235 temp->sup_all_io_fault = amdvi_disable_io_fault; 1236 1237 temp->dt_valid = 1; 1238 temp->domain_id = domain->id; 1239 1240 if (enable) { 1241 if (domain->ptp) { 1242 temp->pt_base = vtophys(domain->ptp) >> 12; 1243 temp->pt_level = amdvi_ptp_level; 1244 } 1245 /* 1246 * XXX: Page table valid[TV] bit must be set even if host domain 1247 * page tables are not enabled. 1248 */ 1249 temp->pt_valid = 1; 1250 temp->read_allow = 1; 1251 temp->write_allow = 1; 1252 } 1253 } 1254 1255 static void 1256 amdvi_inv_device(struct amdvi_softc *softc, uint16_t devid) 1257 { 1258 KASSERT(softc, ("softc is NULL")); 1259 1260 amdvi_cmd_inv_dte(softc, devid); 1261 #ifdef AMDVI_ATS_ENABLE 1262 if (amdvi_dev_support_iotlb(softc, devid)) 1263 amdvi_cmd_inv_iotlb(softc, devid); 1264 #endif 1265 amdvi_wait(softc); 1266 } 1267 1268 static void 1269 amdvi_add_device(void *arg, uint16_t devid) 1270 { 1271 struct amdvi_domain *domain; 1272 struct amdvi_softc *softc; 1273 1274 domain = (struct amdvi_domain *)arg; 1275 KASSERT(domain != NULL, ("domain is NULL")); 1276 #ifdef AMDVI_DEBUG_CMD 1277 printf("Assigning device(%d.%d.%d) to domain:%d\n", 1278 RID2PCI_STR(devid), domain->id); 1279 #endif 1280 softc = amdvi_find_iommu(devid); 1281 if (softc == NULL) 1282 return; 1283 amdvi_set_dte(domain, softc, devid, true); 1284 amdvi_inv_device(softc, devid); 1285 } 1286 1287 static void 1288 amdvi_remove_device(void *arg, uint16_t devid) 1289 { 1290 struct amdvi_domain *domain; 1291 struct amdvi_softc *softc; 1292 1293 domain = (struct amdvi_domain *)arg; 1294 #ifdef AMDVI_DEBUG_CMD 1295 printf("Remove device(0x%x) from domain:%d\n", 1296 devid, domain->id); 1297 #endif 1298 softc = amdvi_find_iommu(devid); 1299 if (softc == NULL) 1300 return; 1301 amdvi_set_dte(domain, softc, devid, false); 1302 amdvi_inv_device(softc, devid); 1303 } 1304 1305 static void 1306 amdvi_enable(void) 1307 { 1308 struct amdvi_ctrl *ctrl; 1309 struct amdvi_softc *softc; 1310 uint64_t val; 1311 int i; 1312 1313 for (i = 0; i < ivhd_count; i++) { 1314 softc = device_get_softc(ivhd_devs[i]); 1315 KASSERT(softc, ("softc is NULL\n")); 1316 ctrl = softc->ctrl; 1317 KASSERT(ctrl, ("ctrl is NULL\n")); 1318 1319 val = ( AMDVI_CTRL_EN | 1320 AMDVI_CTRL_CMD | 1321 AMDVI_CTRL_ELOG | 1322 AMDVI_CTRL_ELOGINT | 1323 AMDVI_CTRL_INV_TO_1S); 1324 1325 if (softc->ivhd_flag & IVHD_FLAG_COH) 1326 val |= AMDVI_CTRL_COH; 1327 if (softc->ivhd_flag & IVHD_FLAG_HTT) 1328 val |= AMDVI_CTRL_HTT; 1329 if (softc->ivhd_flag & IVHD_FLAG_RPPW) 1330 val |= AMDVI_CTRL_RPPW; 1331 if (softc->ivhd_flag & IVHD_FLAG_PPW) 1332 val |= AMDVI_CTRL_PPW; 1333 if (softc->ivhd_flag & IVHD_FLAG_ISOC) 1334 val |= AMDVI_CTRL_ISOC; 1335 1336 ctrl->control = val; 1337 } 1338 } 1339 1340 static void 1341 amdvi_disable(void) 1342 { 1343 struct amdvi_ctrl *ctrl; 1344 struct amdvi_softc *softc; 1345 int i; 1346 1347 for (i = 0; i < ivhd_count; i++) { 1348 softc = device_get_softc(ivhd_devs[i]); 1349 KASSERT(softc, ("softc is NULL\n")); 1350 ctrl = softc->ctrl; 1351 KASSERT(ctrl, ("ctrl is NULL\n")); 1352 1353 ctrl->control = 0; 1354 } 1355 } 1356 1357 static void 1358 amdvi_invalidate_tlb(void *arg) 1359 { 1360 struct amdvi_domain *domain; 1361 1362 domain = (struct amdvi_domain *)arg; 1363 KASSERT(domain, ("domain is NULL")); 1364 amdvi_do_inv_domain(domain->id, false); 1365 } 1366 1367 const struct iommu_ops iommu_ops_amd = { 1368 .init = amdvi_init, 1369 .cleanup = amdvi_cleanup, 1370 .enable = amdvi_enable, 1371 .disable = amdvi_disable, 1372 .create_domain = amdvi_create_domain, 1373 .destroy_domain = amdvi_destroy_domain, 1374 .create_mapping = amdvi_create_mapping, 1375 .remove_mapping = amdvi_remove_mapping, 1376 .add_device = amdvi_add_device, 1377 .remove_device = amdvi_remove_device, 1378 .invalidate_tlb = amdvi_invalidate_tlb 1379 }; 1380