1 /*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/kernel.h> 34 #include <sys/systm.h> 35 #include <sys/malloc.h> 36 37 #include <vm/vm.h> 38 #include <vm/pmap.h> 39 40 #include <dev/pci/pcireg.h> 41 42 #include <machine/pmap.h> 43 #include <machine/vmparam.h> 44 #include <contrib/dev/acpica/include/acpi.h> 45 46 #include "io/iommu.h" 47 48 /* 49 * Documented in the "Intel Virtualization Technology for Directed I/O", 50 * Architecture Spec, September 2008. 51 */ 52 53 /* Section 10.4 "Register Descriptions" */ 54 struct vtdmap { 55 volatile uint32_t version; 56 volatile uint32_t res0; 57 volatile uint64_t cap; 58 volatile uint64_t ext_cap; 59 volatile uint32_t gcr; 60 volatile uint32_t gsr; 61 volatile uint64_t rta; 62 volatile uint64_t ccr; 63 }; 64 65 #define VTD_CAP_SAGAW(cap) (((cap) >> 8) & 0x1F) 66 #define VTD_CAP_ND(cap) ((cap) & 0x7) 67 #define VTD_CAP_CM(cap) (((cap) >> 7) & 0x1) 68 #define VTD_CAP_SPS(cap) (((cap) >> 34) & 0xF) 69 #define VTD_CAP_RWBF(cap) (((cap) >> 4) & 0x1) 70 71 #define VTD_ECAP_DI(ecap) (((ecap) >> 2) & 0x1) 72 #define VTD_ECAP_COHERENCY(ecap) ((ecap) & 0x1) 73 #define VTD_ECAP_IRO(ecap) (((ecap) >> 8) & 0x3FF) 74 75 #define VTD_GCR_WBF (1 << 27) 76 #define VTD_GCR_SRTP (1 << 30) 77 #define VTD_GCR_TE (1 << 31) 78 79 #define VTD_GSR_WBFS (1 << 27) 80 #define VTD_GSR_RTPS (1 << 30) 81 #define VTD_GSR_TES (1 << 31) 82 83 #define VTD_CCR_ICC (1UL << 63) /* invalidate context cache */ 84 #define VTD_CCR_CIRG_GLOBAL (1UL << 61) /* global invalidation */ 85 86 #define VTD_IIR_IVT (1UL << 63) /* invalidation IOTLB */ 87 #define VTD_IIR_IIRG_GLOBAL (1ULL << 60) /* global IOTLB invalidation */ 88 #define VTD_IIR_IIRG_DOMAIN (2ULL << 60) /* domain IOTLB invalidation */ 89 #define VTD_IIR_IIRG_PAGE (3ULL << 60) /* page IOTLB invalidation */ 90 #define VTD_IIR_DRAIN_READS (1ULL << 49) /* drain pending DMA reads */ 91 #define VTD_IIR_DRAIN_WRITES (1ULL << 48) /* drain pending DMA writes */ 92 #define VTD_IIR_DOMAIN_P 32 93 94 #define VTD_ROOT_PRESENT 0x1 95 #define VTD_CTX_PRESENT 0x1 96 #define VTD_CTX_TT_ALL (1UL << 2) 97 98 #define VTD_PTE_RD (1UL << 0) 99 #define VTD_PTE_WR (1UL << 1) 100 #define VTD_PTE_SUPERPAGE (1UL << 7) 101 #define VTD_PTE_ADDR_M (0x000FFFFFFFFFF000UL) 102 103 struct domain { 104 uint64_t *ptp; /* first level page table page */ 105 int pt_levels; /* number of page table levels */ 106 int addrwidth; /* 'AW' field in context entry */ 107 int spsmask; /* supported super page sizes */ 108 u_int id; /* domain id */ 109 vm_paddr_t maxaddr; /* highest address to be mapped */ 110 SLIST_ENTRY(domain) next; 111 }; 112 113 static SLIST_HEAD(, domain) domhead; 114 115 #define DRHD_MAX_UNITS 8 116 static int drhd_num; 117 static struct vtdmap *vtdmaps[DRHD_MAX_UNITS]; 118 static int max_domains; 119 typedef int (*drhd_ident_func_t)(void); 120 121 static uint64_t root_table[PAGE_SIZE / sizeof(uint64_t)] __aligned(4096); 122 static uint64_t ctx_tables[256][PAGE_SIZE / sizeof(uint64_t)] __aligned(4096); 123 124 static MALLOC_DEFINE(M_VTD, "vtd", "vtd"); 125 126 static int 127 vtd_max_domains(struct vtdmap *vtdmap) 128 { 129 int nd; 130 131 nd = VTD_CAP_ND(vtdmap->cap); 132 133 switch (nd) { 134 case 0: 135 return (16); 136 case 1: 137 return (64); 138 case 2: 139 return (256); 140 case 3: 141 return (1024); 142 case 4: 143 return (4 * 1024); 144 case 5: 145 return (16 * 1024); 146 case 6: 147 return (64 * 1024); 148 default: 149 panic("vtd_max_domains: invalid value of nd (0x%0x)", nd); 150 } 151 } 152 153 static u_int 154 domain_id(void) 155 { 156 u_int id; 157 struct domain *dom; 158 159 /* Skip domain id 0 - it is reserved when Caching Mode field is set */ 160 for (id = 1; id < max_domains; id++) { 161 SLIST_FOREACH(dom, &domhead, next) { 162 if (dom->id == id) 163 break; 164 } 165 if (dom == NULL) 166 break; /* found it */ 167 } 168 169 if (id >= max_domains) 170 panic("domain ids exhausted"); 171 172 return (id); 173 } 174 175 static void 176 vtd_wbflush(struct vtdmap *vtdmap) 177 { 178 179 if (VTD_ECAP_COHERENCY(vtdmap->ext_cap) == 0) 180 pmap_invalidate_cache(); 181 182 if (VTD_CAP_RWBF(vtdmap->cap)) { 183 vtdmap->gcr = VTD_GCR_WBF; 184 while ((vtdmap->gsr & VTD_GSR_WBFS) != 0) 185 ; 186 } 187 } 188 189 static void 190 vtd_ctx_global_invalidate(struct vtdmap *vtdmap) 191 { 192 193 vtdmap->ccr = VTD_CCR_ICC | VTD_CCR_CIRG_GLOBAL; 194 while ((vtdmap->ccr & VTD_CCR_ICC) != 0) 195 ; 196 } 197 198 static void 199 vtd_iotlb_global_invalidate(struct vtdmap *vtdmap) 200 { 201 int offset; 202 volatile uint64_t *iotlb_reg, val; 203 204 vtd_wbflush(vtdmap); 205 206 offset = VTD_ECAP_IRO(vtdmap->ext_cap) * 16; 207 iotlb_reg = (volatile uint64_t *)((caddr_t)vtdmap + offset + 8); 208 209 *iotlb_reg = VTD_IIR_IVT | VTD_IIR_IIRG_GLOBAL | 210 VTD_IIR_DRAIN_READS | VTD_IIR_DRAIN_WRITES; 211 212 while (1) { 213 val = *iotlb_reg; 214 if ((val & VTD_IIR_IVT) == 0) 215 break; 216 } 217 } 218 219 static void 220 vtd_translation_enable(struct vtdmap *vtdmap) 221 { 222 223 vtdmap->gcr = VTD_GCR_TE; 224 while ((vtdmap->gsr & VTD_GSR_TES) == 0) 225 ; 226 } 227 228 static void 229 vtd_translation_disable(struct vtdmap *vtdmap) 230 { 231 232 vtdmap->gcr = 0; 233 while ((vtdmap->gsr & VTD_GSR_TES) != 0) 234 ; 235 } 236 237 static int 238 vtd_init(void) 239 { 240 int i, units, remaining; 241 struct vtdmap *vtdmap; 242 vm_paddr_t ctx_paddr; 243 char *end, envname[32]; 244 unsigned long mapaddr; 245 ACPI_STATUS status; 246 ACPI_TABLE_DMAR *dmar; 247 ACPI_DMAR_HEADER *hdr; 248 ACPI_DMAR_HARDWARE_UNIT *drhd; 249 250 /* 251 * Allow the user to override the ACPI DMAR table by specifying the 252 * physical address of each remapping unit. 253 * 254 * The following example specifies two remapping units at 255 * physical addresses 0xfed90000 and 0xfeda0000 respectively. 256 * set vtd.regmap.0.addr=0xfed90000 257 * set vtd.regmap.1.addr=0xfeda0000 258 */ 259 for (units = 0; units < DRHD_MAX_UNITS; units++) { 260 snprintf(envname, sizeof(envname), "vtd.regmap.%d.addr", units); 261 if (getenv_ulong(envname, &mapaddr) == 0) 262 break; 263 vtdmaps[units] = (struct vtdmap *)PHYS_TO_DMAP(mapaddr); 264 } 265 266 if (units > 0) 267 goto skip_dmar; 268 269 /* Search for DMAR table. */ 270 status = AcpiGetTable(ACPI_SIG_DMAR, 0, (ACPI_TABLE_HEADER **)&dmar); 271 if (ACPI_FAILURE(status)) 272 return (ENXIO); 273 274 end = (char *)dmar + dmar->Header.Length; 275 remaining = dmar->Header.Length - sizeof(ACPI_TABLE_DMAR); 276 while (remaining > sizeof(ACPI_DMAR_HEADER)) { 277 hdr = (ACPI_DMAR_HEADER *)(end - remaining); 278 if (hdr->Length > remaining) 279 break; 280 /* 281 * From Intel VT-d arch spec, version 1.3: 282 * BIOS implementations must report mapping structures 283 * in numerical order, i.e. All remapping structures of 284 * type 0 (DRHD) enumerated before remapping structures of 285 * type 1 (RMRR) and so forth. 286 */ 287 if (hdr->Type != ACPI_DMAR_TYPE_HARDWARE_UNIT) 288 break; 289 290 drhd = (ACPI_DMAR_HARDWARE_UNIT *)hdr; 291 vtdmaps[units++] = (struct vtdmap *)PHYS_TO_DMAP(drhd->Address); 292 if (units >= DRHD_MAX_UNITS) 293 break; 294 remaining -= hdr->Length; 295 } 296 297 if (units <= 0) 298 return (ENXIO); 299 300 skip_dmar: 301 drhd_num = units; 302 vtdmap = vtdmaps[0]; 303 304 if (VTD_CAP_CM(vtdmap->cap) != 0) 305 panic("vtd_init: invalid caching mode"); 306 307 max_domains = vtd_max_domains(vtdmap); 308 309 /* 310 * Set up the root-table to point to the context-entry tables 311 */ 312 for (i = 0; i < 256; i++) { 313 ctx_paddr = vtophys(ctx_tables[i]); 314 if (ctx_paddr & PAGE_MASK) 315 panic("ctx table (0x%0lx) not page aligned", ctx_paddr); 316 317 root_table[i * 2] = ctx_paddr | VTD_ROOT_PRESENT; 318 } 319 320 return (0); 321 } 322 323 static void 324 vtd_cleanup(void) 325 { 326 } 327 328 static void 329 vtd_enable(void) 330 { 331 int i; 332 struct vtdmap *vtdmap; 333 334 for (i = 0; i < drhd_num; i++) { 335 vtdmap = vtdmaps[i]; 336 vtd_wbflush(vtdmap); 337 338 /* Update the root table address */ 339 vtdmap->rta = vtophys(root_table); 340 vtdmap->gcr = VTD_GCR_SRTP; 341 while ((vtdmap->gsr & VTD_GSR_RTPS) == 0) 342 ; 343 344 vtd_ctx_global_invalidate(vtdmap); 345 vtd_iotlb_global_invalidate(vtdmap); 346 347 vtd_translation_enable(vtdmap); 348 } 349 } 350 351 static void 352 vtd_disable(void) 353 { 354 int i; 355 struct vtdmap *vtdmap; 356 357 for (i = 0; i < drhd_num; i++) { 358 vtdmap = vtdmaps[i]; 359 vtd_translation_disable(vtdmap); 360 } 361 } 362 363 static void 364 vtd_add_device(void *arg, int bus, int slot, int func) 365 { 366 int idx; 367 uint64_t *ctxp; 368 struct domain *dom = arg; 369 vm_paddr_t pt_paddr; 370 struct vtdmap *vtdmap; 371 372 if (bus < 0 || bus > PCI_BUSMAX || 373 slot < 0 || slot > PCI_SLOTMAX || 374 func < 0 || func > PCI_FUNCMAX) 375 panic("vtd_add_device: invalid bsf %d/%d/%d", bus, slot, func); 376 377 vtdmap = vtdmaps[0]; 378 ctxp = ctx_tables[bus]; 379 pt_paddr = vtophys(dom->ptp); 380 idx = (slot << 3 | func) * 2; 381 382 if (ctxp[idx] & VTD_CTX_PRESENT) { 383 panic("vtd_add_device: device %d/%d/%d is already owned by " 384 "domain %d", bus, slot, func, 385 (uint16_t)(ctxp[idx + 1] >> 8)); 386 } 387 388 /* 389 * Order is important. The 'present' bit is set only after all fields 390 * of the context pointer are initialized. 391 */ 392 ctxp[idx + 1] = dom->addrwidth | (dom->id << 8); 393 394 if (VTD_ECAP_DI(vtdmap->ext_cap)) 395 ctxp[idx] = VTD_CTX_TT_ALL; 396 else 397 ctxp[idx] = 0; 398 399 ctxp[idx] |= pt_paddr | VTD_CTX_PRESENT; 400 401 /* 402 * 'Not Present' entries are not cached in either the Context Cache 403 * or in the IOTLB, so there is no need to invalidate either of them. 404 */ 405 } 406 407 static void 408 vtd_remove_device(void *arg, int bus, int slot, int func) 409 { 410 int i, idx; 411 uint64_t *ctxp; 412 struct vtdmap *vtdmap; 413 414 if (bus < 0 || bus > PCI_BUSMAX || 415 slot < 0 || slot > PCI_SLOTMAX || 416 func < 0 || func > PCI_FUNCMAX) 417 panic("vtd_add_device: invalid bsf %d/%d/%d", bus, slot, func); 418 419 ctxp = ctx_tables[bus]; 420 idx = (slot << 3 | func) * 2; 421 422 /* 423 * Order is important. The 'present' bit is must be cleared first. 424 */ 425 ctxp[idx] = 0; 426 ctxp[idx + 1] = 0; 427 428 /* 429 * Invalidate the Context Cache and the IOTLB. 430 * 431 * XXX use device-selective invalidation for Context Cache 432 * XXX use domain-selective invalidation for IOTLB 433 */ 434 for (i = 0; i < drhd_num; i++) { 435 vtdmap = vtdmaps[i]; 436 vtd_ctx_global_invalidate(vtdmap); 437 vtd_iotlb_global_invalidate(vtdmap); 438 } 439 } 440 441 #define CREATE_MAPPING 0 442 #define REMOVE_MAPPING 1 443 444 static uint64_t 445 vtd_update_mapping(void *arg, vm_paddr_t gpa, vm_paddr_t hpa, uint64_t len, 446 int remove) 447 { 448 struct domain *dom; 449 int i, spshift, ptpshift, ptpindex, nlevels; 450 uint64_t spsize, *ptp; 451 452 dom = arg; 453 ptpindex = 0; 454 ptpshift = 0; 455 456 if (gpa & PAGE_MASK) 457 panic("vtd_create_mapping: unaligned gpa 0x%0lx", gpa); 458 459 if (hpa & PAGE_MASK) 460 panic("vtd_create_mapping: unaligned hpa 0x%0lx", hpa); 461 462 if (len & PAGE_MASK) 463 panic("vtd_create_mapping: unaligned len 0x%0lx", len); 464 465 /* 466 * Compute the size of the mapping that we can accomodate. 467 * 468 * This is based on three factors: 469 * - supported super page size 470 * - alignment of the region starting at 'gpa' and 'hpa' 471 * - length of the region 'len' 472 */ 473 spshift = 48; 474 for (i = 3; i >= 0; i--) { 475 spsize = 1UL << spshift; 476 if ((dom->spsmask & (1 << i)) != 0 && 477 (gpa & (spsize - 1)) == 0 && 478 (hpa & (spsize - 1)) == 0 && 479 (len >= spsize)) { 480 break; 481 } 482 spshift -= 9; 483 } 484 485 ptp = dom->ptp; 486 nlevels = dom->pt_levels; 487 while (--nlevels >= 0) { 488 ptpshift = 12 + nlevels * 9; 489 ptpindex = (gpa >> ptpshift) & 0x1FF; 490 491 /* We have reached the leaf mapping */ 492 if (spshift >= ptpshift) { 493 break; 494 } 495 496 /* 497 * We are working on a non-leaf page table page. 498 * 499 * Create a downstream page table page if necessary and point 500 * to it from the current page table. 501 */ 502 if (ptp[ptpindex] == 0) { 503 void *nlp = malloc(PAGE_SIZE, M_VTD, M_WAITOK | M_ZERO); 504 ptp[ptpindex] = vtophys(nlp)| VTD_PTE_RD | VTD_PTE_WR; 505 } 506 507 ptp = (uint64_t *)PHYS_TO_DMAP(ptp[ptpindex] & VTD_PTE_ADDR_M); 508 } 509 510 if ((gpa & ((1UL << ptpshift) - 1)) != 0) 511 panic("gpa 0x%lx and ptpshift %d mismatch", gpa, ptpshift); 512 513 /* 514 * Update the 'gpa' -> 'hpa' mapping 515 */ 516 if (remove) { 517 ptp[ptpindex] = 0; 518 } else { 519 ptp[ptpindex] = hpa | VTD_PTE_RD | VTD_PTE_WR; 520 521 if (nlevels > 0) 522 ptp[ptpindex] |= VTD_PTE_SUPERPAGE; 523 } 524 525 return (1UL << ptpshift); 526 } 527 528 static uint64_t 529 vtd_create_mapping(void *arg, vm_paddr_t gpa, vm_paddr_t hpa, uint64_t len) 530 { 531 532 return (vtd_update_mapping(arg, gpa, hpa, len, CREATE_MAPPING)); 533 } 534 535 static uint64_t 536 vtd_remove_mapping(void *arg, vm_paddr_t gpa, uint64_t len) 537 { 538 539 return (vtd_update_mapping(arg, gpa, 0, len, REMOVE_MAPPING)); 540 } 541 542 static void 543 vtd_invalidate_tlb(void *dom) 544 { 545 int i; 546 struct vtdmap *vtdmap; 547 548 /* 549 * Invalidate the IOTLB. 550 * XXX use domain-selective invalidation for IOTLB 551 */ 552 for (i = 0; i < drhd_num; i++) { 553 vtdmap = vtdmaps[i]; 554 vtd_iotlb_global_invalidate(vtdmap); 555 } 556 } 557 558 static void * 559 vtd_create_domain(vm_paddr_t maxaddr) 560 { 561 struct domain *dom; 562 vm_paddr_t addr; 563 int tmp, i, gaw, agaw, sagaw, res, pt_levels, addrwidth; 564 struct vtdmap *vtdmap; 565 566 if (drhd_num <= 0) 567 panic("vtd_create_domain: no dma remapping hardware available"); 568 569 vtdmap = vtdmaps[0]; 570 571 /* 572 * Calculate AGAW. 573 * Section 3.4.2 "Adjusted Guest Address Width", Architecture Spec. 574 */ 575 addr = 0; 576 for (gaw = 0; addr < maxaddr; gaw++) 577 addr = 1ULL << gaw; 578 579 res = (gaw - 12) % 9; 580 if (res == 0) 581 agaw = gaw; 582 else 583 agaw = gaw + 9 - res; 584 585 if (agaw > 64) 586 agaw = 64; 587 588 /* 589 * Select the smallest Supported AGAW and the corresponding number 590 * of page table levels. 591 */ 592 pt_levels = 2; 593 sagaw = 30; 594 addrwidth = 0; 595 tmp = VTD_CAP_SAGAW(vtdmap->cap); 596 for (i = 0; i < 5; i++) { 597 if ((tmp & (1 << i)) != 0 && sagaw >= agaw) 598 break; 599 pt_levels++; 600 addrwidth++; 601 sagaw += 9; 602 if (sagaw > 64) 603 sagaw = 64; 604 } 605 606 if (i >= 5) { 607 panic("vtd_create_domain: SAGAW 0x%lx does not support AGAW %d", 608 VTD_CAP_SAGAW(vtdmap->cap), agaw); 609 } 610 611 dom = malloc(sizeof(struct domain), M_VTD, M_ZERO | M_WAITOK); 612 dom->pt_levels = pt_levels; 613 dom->addrwidth = addrwidth; 614 dom->id = domain_id(); 615 dom->maxaddr = maxaddr; 616 dom->ptp = malloc(PAGE_SIZE, M_VTD, M_ZERO | M_WAITOK); 617 if ((uintptr_t)dom->ptp & PAGE_MASK) 618 panic("vtd_create_domain: ptp (%p) not page aligned", dom->ptp); 619 620 #ifdef notyet 621 /* 622 * XXX superpage mappings for the iommu do not work correctly. 623 * 624 * By default all physical memory is mapped into the host_domain. 625 * When a VM is allocated wired memory the pages belonging to it 626 * are removed from the host_domain and added to the vm's domain. 627 * 628 * If the page being removed was mapped using a superpage mapping 629 * in the host_domain then we need to demote the mapping before 630 * removing the page. 631 * 632 * There is not any code to deal with the demotion at the moment 633 * so we disable superpage mappings altogether. 634 */ 635 dom->spsmask = VTD_CAP_SPS(vtdmap->cap); 636 #endif 637 638 SLIST_INSERT_HEAD(&domhead, dom, next); 639 640 return (dom); 641 } 642 643 static void 644 vtd_free_ptp(uint64_t *ptp, int level) 645 { 646 int i; 647 uint64_t *nlp; 648 649 if (level > 1) { 650 for (i = 0; i < 512; i++) { 651 if ((ptp[i] & (VTD_PTE_RD | VTD_PTE_WR)) == 0) 652 continue; 653 if ((ptp[i] & VTD_PTE_SUPERPAGE) != 0) 654 continue; 655 nlp = (uint64_t *)PHYS_TO_DMAP(ptp[i] & VTD_PTE_ADDR_M); 656 vtd_free_ptp(nlp, level - 1); 657 } 658 } 659 660 bzero(ptp, PAGE_SIZE); 661 free(ptp, M_VTD); 662 } 663 664 static void 665 vtd_destroy_domain(void *arg) 666 { 667 struct domain *dom; 668 669 dom = arg; 670 671 SLIST_REMOVE(&domhead, dom, domain, next); 672 vtd_free_ptp(dom->ptp, dom->pt_levels); 673 free(dom, M_VTD); 674 } 675 676 struct iommu_ops iommu_ops_intel = { 677 vtd_init, 678 vtd_cleanup, 679 vtd_enable, 680 vtd_disable, 681 vtd_create_domain, 682 vtd_destroy_domain, 683 vtd_create_mapping, 684 vtd_remove_mapping, 685 vtd_add_device, 686 vtd_remove_device, 687 vtd_invalidate_tlb, 688 }; 689