1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/param.h> 30 #include <sys/systm.h> 31 #include <sys/kernel.h> 32 #include <sys/malloc.h> 33 #include <sys/module.h> 34 #include <sys/bus.h> 35 #include <sys/pciio.h> 36 #include <sys/rman.h> 37 #include <sys/smp.h> 38 #include <sys/sysctl.h> 39 40 #include <dev/pci/pcivar.h> 41 #include <dev/pci/pcireg.h> 42 43 #include <machine/resource.h> 44 #include <machine/vmm.h> 45 #include <machine/vmm_dev.h> 46 47 #include <dev/vmm/vmm_ktr.h> 48 49 #include "vmm_lapic.h" 50 51 #include "iommu.h" 52 #include "ppt.h" 53 54 /* XXX locking */ 55 56 #define MAX_MSIMSGS 32 57 58 /* 59 * If the MSI-X table is located in the middle of a BAR then that MMIO 60 * region gets split into two segments - one segment above the MSI-X table 61 * and the other segment below the MSI-X table - with a hole in place of 62 * the MSI-X table so accesses to it can be trapped and emulated. 63 * 64 * So, allocate a MMIO segment for each BAR register + 1 additional segment. 65 */ 66 #define MAX_MMIOSEGS ((PCIR_MAX_BAR_0 + 1) + 1) 67 68 MALLOC_DEFINE(M_PPTMSIX, "pptmsix", "Passthru MSI-X resources"); 69 70 struct pptintr_arg { /* pptintr(pptintr_arg) */ 71 struct pptdev *pptdev; 72 uint64_t addr; 73 uint64_t msg_data; 74 }; 75 76 struct pptseg { 77 vm_paddr_t gpa; 78 size_t len; 79 int wired; 80 }; 81 82 struct pptdev { 83 device_t dev; 84 struct vm *vm; /* owner of this device */ 85 TAILQ_ENTRY(pptdev) next; 86 struct pptseg mmio[MAX_MMIOSEGS]; 87 struct { 88 int num_msgs; /* guest state */ 89 90 int startrid; /* host state */ 91 struct resource *res[MAX_MSIMSGS]; 92 void *cookie[MAX_MSIMSGS]; 93 struct pptintr_arg arg[MAX_MSIMSGS]; 94 } msi; 95 96 struct { 97 int num_msgs; 98 int startrid; 99 int msix_table_rid; 100 int msix_pba_rid; 101 struct resource *msix_table_res; 102 struct resource *msix_pba_res; 103 struct resource **res; 104 void **cookie; 105 struct pptintr_arg *arg; 106 } msix; 107 }; 108 109 SYSCTL_DECL(_hw_vmm); 110 SYSCTL_NODE(_hw_vmm, OID_AUTO, ppt, CTLFLAG_RW | CTLFLAG_MPSAFE, 0, 111 "bhyve passthru devices"); 112 113 static int num_pptdevs; 114 SYSCTL_INT(_hw_vmm_ppt, OID_AUTO, devices, CTLFLAG_RD, &num_pptdevs, 0, 115 "number of pci passthru devices"); 116 117 static TAILQ_HEAD(, pptdev) pptdev_list = TAILQ_HEAD_INITIALIZER(pptdev_list); 118 119 static int 120 ppt_probe(device_t dev) 121 { 122 int bus, slot, func; 123 struct pci_devinfo *dinfo; 124 125 dinfo = (struct pci_devinfo *)device_get_ivars(dev); 126 127 bus = pci_get_bus(dev); 128 slot = pci_get_slot(dev); 129 func = pci_get_function(dev); 130 131 /* 132 * To qualify as a pci passthrough device a device must: 133 * - be allowed by administrator to be used in this role 134 * - be an endpoint device 135 */ 136 if ((dinfo->cfg.hdrtype & PCIM_HDRTYPE) != PCIM_HDRTYPE_NORMAL) 137 return (ENXIO); 138 else if (vmm_is_pptdev(bus, slot, func)) 139 return (0); 140 else 141 /* 142 * Returning BUS_PROBE_NOWILDCARD here matches devices that the 143 * SR-IOV infrastructure specified as "ppt" passthrough devices. 144 * All normal devices that did not have "ppt" specified as their 145 * driver will not be matched by this. 146 */ 147 return (BUS_PROBE_NOWILDCARD); 148 } 149 150 static int 151 ppt_attach(device_t dev) 152 { 153 struct pptdev *ppt; 154 uint16_t cmd; 155 156 ppt = device_get_softc(dev); 157 158 cmd = pci_read_config(dev, PCIR_COMMAND, 2); 159 cmd &= ~(PCIM_CMD_PORTEN | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN); 160 pci_write_config(dev, PCIR_COMMAND, cmd, 2); 161 iommu_remove_device(iommu_host_domain(), pci_get_rid(dev)); 162 num_pptdevs++; 163 TAILQ_INSERT_TAIL(&pptdev_list, ppt, next); 164 ppt->dev = dev; 165 166 if (bootverbose) 167 device_printf(dev, "attached\n"); 168 169 return (0); 170 } 171 172 static int 173 ppt_detach(device_t dev) 174 { 175 struct pptdev *ppt; 176 177 ppt = device_get_softc(dev); 178 179 if (ppt->vm != NULL) 180 return (EBUSY); 181 num_pptdevs--; 182 TAILQ_REMOVE(&pptdev_list, ppt, next); 183 184 if (iommu_host_domain() != NULL) 185 iommu_add_device(iommu_host_domain(), pci_get_rid(dev)); 186 187 return (0); 188 } 189 190 static device_method_t ppt_methods[] = { 191 /* Device interface */ 192 DEVMETHOD(device_probe, ppt_probe), 193 DEVMETHOD(device_attach, ppt_attach), 194 DEVMETHOD(device_detach, ppt_detach), 195 {0, 0} 196 }; 197 198 DEFINE_CLASS_0(ppt, ppt_driver, ppt_methods, sizeof(struct pptdev)); 199 DRIVER_MODULE(ppt, pci, ppt_driver, NULL, NULL); 200 201 static int 202 ppt_find(struct vm *vm, int bus, int slot, int func, struct pptdev **pptp) 203 { 204 device_t dev; 205 struct pptdev *ppt; 206 int b, s, f; 207 208 TAILQ_FOREACH(ppt, &pptdev_list, next) { 209 dev = ppt->dev; 210 b = pci_get_bus(dev); 211 s = pci_get_slot(dev); 212 f = pci_get_function(dev); 213 if (bus == b && slot == s && func == f) 214 break; 215 } 216 217 if (ppt == NULL) 218 return (ENOENT); 219 if (ppt->vm != vm) /* Make sure we own this device */ 220 return (EBUSY); 221 *pptp = ppt; 222 return (0); 223 } 224 225 static void 226 ppt_unmap_all_mmio(struct vm *vm, struct pptdev *ppt) 227 { 228 int i; 229 struct pptseg *seg; 230 231 for (i = 0; i < MAX_MMIOSEGS; i++) { 232 seg = &ppt->mmio[i]; 233 if (seg->len == 0) 234 continue; 235 (void)vm_unmap_mmio(vm, seg->gpa, seg->len); 236 bzero(seg, sizeof(struct pptseg)); 237 } 238 } 239 240 static void 241 ppt_teardown_msi(struct pptdev *ppt) 242 { 243 int i, rid; 244 void *cookie; 245 struct resource *res; 246 247 if (ppt->msi.num_msgs == 0) 248 return; 249 250 for (i = 0; i < ppt->msi.num_msgs; i++) { 251 rid = ppt->msi.startrid + i; 252 res = ppt->msi.res[i]; 253 cookie = ppt->msi.cookie[i]; 254 255 if (cookie != NULL) 256 bus_teardown_intr(ppt->dev, res, cookie); 257 258 if (res != NULL) 259 bus_release_resource(ppt->dev, SYS_RES_IRQ, rid, res); 260 261 ppt->msi.res[i] = NULL; 262 ppt->msi.cookie[i] = NULL; 263 } 264 265 if (ppt->msi.startrid == 1) 266 pci_release_msi(ppt->dev); 267 268 ppt->msi.num_msgs = 0; 269 } 270 271 static void 272 ppt_teardown_msix_intr(struct pptdev *ppt, int idx) 273 { 274 int rid; 275 struct resource *res; 276 void *cookie; 277 278 rid = ppt->msix.startrid + idx; 279 res = ppt->msix.res[idx]; 280 cookie = ppt->msix.cookie[idx]; 281 282 if (cookie != NULL) 283 bus_teardown_intr(ppt->dev, res, cookie); 284 285 if (res != NULL) 286 bus_release_resource(ppt->dev, SYS_RES_IRQ, rid, res); 287 288 ppt->msix.res[idx] = NULL; 289 ppt->msix.cookie[idx] = NULL; 290 } 291 292 static void 293 ppt_teardown_msix(struct pptdev *ppt) 294 { 295 int i; 296 297 if (ppt->msix.num_msgs == 0) 298 return; 299 300 for (i = 0; i < ppt->msix.num_msgs; i++) 301 ppt_teardown_msix_intr(ppt, i); 302 303 free(ppt->msix.res, M_PPTMSIX); 304 free(ppt->msix.cookie, M_PPTMSIX); 305 free(ppt->msix.arg, M_PPTMSIX); 306 307 pci_release_msi(ppt->dev); 308 309 if (ppt->msix.msix_table_res) { 310 bus_release_resource(ppt->dev, SYS_RES_MEMORY, 311 ppt->msix.msix_table_rid, 312 ppt->msix.msix_table_res); 313 ppt->msix.msix_table_res = NULL; 314 ppt->msix.msix_table_rid = 0; 315 } 316 if (ppt->msix.msix_pba_res) { 317 bus_release_resource(ppt->dev, SYS_RES_MEMORY, 318 ppt->msix.msix_pba_rid, 319 ppt->msix.msix_pba_res); 320 ppt->msix.msix_pba_res = NULL; 321 ppt->msix.msix_pba_rid = 0; 322 } 323 324 ppt->msix.num_msgs = 0; 325 } 326 327 int 328 ppt_avail_devices(void) 329 { 330 331 return (num_pptdevs); 332 } 333 334 int 335 ppt_assigned_devices(struct vm *vm) 336 { 337 struct pptdev *ppt; 338 int num; 339 340 num = 0; 341 TAILQ_FOREACH(ppt, &pptdev_list, next) { 342 if (ppt->vm == vm) 343 num++; 344 } 345 return (num); 346 } 347 348 bool 349 ppt_is_mmio(struct vm *vm, vm_paddr_t gpa) 350 { 351 int i; 352 struct pptdev *ppt; 353 struct pptseg *seg; 354 355 TAILQ_FOREACH(ppt, &pptdev_list, next) { 356 if (ppt->vm != vm) 357 continue; 358 359 for (i = 0; i < MAX_MMIOSEGS; i++) { 360 seg = &ppt->mmio[i]; 361 if (seg->len == 0) 362 continue; 363 if (gpa >= seg->gpa && gpa < seg->gpa + seg->len) 364 return (true); 365 } 366 } 367 368 return (false); 369 } 370 371 static void 372 ppt_pci_reset(device_t dev) 373 { 374 375 if (pcie_flr(dev, 376 max(pcie_get_max_completion_timeout(dev) / 1000, 10), true)) 377 return; 378 379 pci_power_reset(dev); 380 } 381 382 static uint16_t 383 ppt_bar_enables(struct pptdev *ppt) 384 { 385 struct pci_map *pm; 386 uint16_t cmd; 387 388 cmd = 0; 389 for (pm = pci_first_bar(ppt->dev); pm != NULL; pm = pci_next_bar(pm)) { 390 if (PCI_BAR_IO(pm->pm_value)) 391 cmd |= PCIM_CMD_PORTEN; 392 if (PCI_BAR_MEM(pm->pm_value)) 393 cmd |= PCIM_CMD_MEMEN; 394 } 395 return (cmd); 396 } 397 398 int 399 ppt_assign_device(struct vm *vm, int bus, int slot, int func) 400 { 401 struct pptdev *ppt; 402 int error; 403 uint16_t cmd; 404 405 /* Passing NULL requires the device to be unowned. */ 406 error = ppt_find(NULL, bus, slot, func, &ppt); 407 if (error) 408 return (error); 409 410 pci_save_state(ppt->dev); 411 ppt_pci_reset(ppt->dev); 412 pci_restore_state(ppt->dev); 413 ppt->vm = vm; 414 iommu_add_device(vm_iommu_domain(vm), pci_get_rid(ppt->dev)); 415 cmd = pci_read_config(ppt->dev, PCIR_COMMAND, 2); 416 cmd |= PCIM_CMD_BUSMASTEREN | ppt_bar_enables(ppt); 417 pci_write_config(ppt->dev, PCIR_COMMAND, cmd, 2); 418 return (0); 419 } 420 421 int 422 ppt_unassign_device(struct vm *vm, int bus, int slot, int func) 423 { 424 struct pptdev *ppt; 425 int error; 426 uint16_t cmd; 427 428 error = ppt_find(vm, bus, slot, func, &ppt); 429 if (error) 430 return (error); 431 432 cmd = pci_read_config(ppt->dev, PCIR_COMMAND, 2); 433 cmd &= ~(PCIM_CMD_PORTEN | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN); 434 pci_write_config(ppt->dev, PCIR_COMMAND, cmd, 2); 435 pci_save_state(ppt->dev); 436 ppt_pci_reset(ppt->dev); 437 pci_restore_state(ppt->dev); 438 ppt_unmap_all_mmio(vm, ppt); 439 ppt_teardown_msi(ppt); 440 ppt_teardown_msix(ppt); 441 iommu_remove_device(vm_iommu_domain(vm), pci_get_rid(ppt->dev)); 442 ppt->vm = NULL; 443 return (0); 444 } 445 446 int 447 ppt_unassign_all(struct vm *vm) 448 { 449 struct pptdev *ppt; 450 int bus, slot, func; 451 device_t dev; 452 453 TAILQ_FOREACH(ppt, &pptdev_list, next) { 454 if (ppt->vm == vm) { 455 dev = ppt->dev; 456 bus = pci_get_bus(dev); 457 slot = pci_get_slot(dev); 458 func = pci_get_function(dev); 459 vm_unassign_pptdev(vm, bus, slot, func); 460 } 461 } 462 463 return (0); 464 } 465 466 static bool 467 ppt_valid_bar_mapping(struct pptdev *ppt, vm_paddr_t hpa, size_t len) 468 { 469 struct pci_map *pm; 470 pci_addr_t base, size; 471 472 for (pm = pci_first_bar(ppt->dev); pm != NULL; pm = pci_next_bar(pm)) { 473 if (!PCI_BAR_MEM(pm->pm_value)) 474 continue; 475 base = pm->pm_value & PCIM_BAR_MEM_BASE; 476 size = (pci_addr_t)1 << pm->pm_size; 477 if (hpa >= base && hpa + len <= base + size) 478 return (true); 479 } 480 return (false); 481 } 482 483 int 484 ppt_map_mmio(struct vm *vm, int bus, int slot, int func, 485 vm_paddr_t gpa, size_t len, vm_paddr_t hpa) 486 { 487 int i, error; 488 struct pptseg *seg; 489 struct pptdev *ppt; 490 491 if (len % PAGE_SIZE != 0 || len == 0 || gpa % PAGE_SIZE != 0 || 492 hpa % PAGE_SIZE != 0 || gpa + len < gpa || hpa + len < hpa) 493 return (EINVAL); 494 495 error = ppt_find(vm, bus, slot, func, &ppt); 496 if (error) 497 return (error); 498 499 if (!ppt_valid_bar_mapping(ppt, hpa, len)) 500 return (EINVAL); 501 502 for (i = 0; i < MAX_MMIOSEGS; i++) { 503 seg = &ppt->mmio[i]; 504 if (seg->len == 0) { 505 error = vm_map_mmio(vm, gpa, len, hpa); 506 if (error == 0) { 507 seg->gpa = gpa; 508 seg->len = len; 509 } 510 return (error); 511 } 512 } 513 return (ENOSPC); 514 } 515 516 int 517 ppt_unmap_mmio(struct vm *vm, int bus, int slot, int func, 518 vm_paddr_t gpa, size_t len) 519 { 520 int i, error; 521 struct pptseg *seg; 522 struct pptdev *ppt; 523 524 error = ppt_find(vm, bus, slot, func, &ppt); 525 if (error) 526 return (error); 527 528 for (i = 0; i < MAX_MMIOSEGS; i++) { 529 seg = &ppt->mmio[i]; 530 if (seg->gpa == gpa && seg->len == len) { 531 error = vm_unmap_mmio(vm, seg->gpa, seg->len); 532 if (error == 0) { 533 seg->gpa = 0; 534 seg->len = 0; 535 } 536 return (error); 537 } 538 } 539 return (ENOENT); 540 } 541 542 static int 543 pptintr(void *arg) 544 { 545 struct pptdev *ppt; 546 struct pptintr_arg *pptarg; 547 548 pptarg = arg; 549 ppt = pptarg->pptdev; 550 551 if (ppt->vm != NULL) 552 lapic_intr_msi(ppt->vm, pptarg->addr, pptarg->msg_data); 553 else { 554 /* 555 * XXX 556 * This is not expected to happen - panic? 557 */ 558 } 559 560 /* 561 * For legacy interrupts give other filters a chance in case 562 * the interrupt was not generated by the passthrough device. 563 */ 564 if (ppt->msi.startrid == 0) 565 return (FILTER_STRAY); 566 else 567 return (FILTER_HANDLED); 568 } 569 570 int 571 ppt_setup_msi(struct vm *vm, int bus, int slot, int func, 572 uint64_t addr, uint64_t msg, int numvec) 573 { 574 int i, rid, flags; 575 int msi_count, startrid, error, tmp; 576 struct pptdev *ppt; 577 578 if (numvec < 0 || numvec > MAX_MSIMSGS) 579 return (EINVAL); 580 581 error = ppt_find(vm, bus, slot, func, &ppt); 582 if (error) 583 return (error); 584 585 /* Reject attempts to enable MSI while MSI-X is active. */ 586 if (ppt->msix.num_msgs != 0 && numvec != 0) 587 return (EBUSY); 588 589 /* Free any allocated resources */ 590 ppt_teardown_msi(ppt); 591 592 if (numvec == 0) /* nothing more to do */ 593 return (0); 594 595 flags = RF_ACTIVE; 596 msi_count = pci_msi_count(ppt->dev); 597 if (msi_count == 0) { 598 startrid = 0; /* legacy interrupt */ 599 msi_count = 1; 600 flags |= RF_SHAREABLE; 601 } else 602 startrid = 1; /* MSI */ 603 604 /* 605 * The device must be capable of supporting the number of vectors 606 * the guest wants to allocate. 607 */ 608 if (numvec > msi_count) 609 return (EINVAL); 610 611 /* 612 * Make sure that we can allocate all the MSI vectors that are needed 613 * by the guest. 614 */ 615 if (startrid == 1) { 616 tmp = numvec; 617 error = pci_alloc_msi(ppt->dev, &tmp); 618 if (error) 619 return (error); 620 else if (tmp != numvec) { 621 pci_release_msi(ppt->dev); 622 return (ENOSPC); 623 } else { 624 /* success */ 625 } 626 } 627 628 ppt->msi.startrid = startrid; 629 630 /* 631 * Allocate the irq resource and attach it to the interrupt handler. 632 */ 633 for (i = 0; i < numvec; i++) { 634 ppt->msi.num_msgs = i + 1; 635 ppt->msi.cookie[i] = NULL; 636 637 rid = startrid + i; 638 ppt->msi.res[i] = bus_alloc_resource_any(ppt->dev, SYS_RES_IRQ, 639 &rid, flags); 640 if (ppt->msi.res[i] == NULL) 641 break; 642 643 ppt->msi.arg[i].pptdev = ppt; 644 ppt->msi.arg[i].addr = addr; 645 ppt->msi.arg[i].msg_data = msg + i; 646 647 error = bus_setup_intr(ppt->dev, ppt->msi.res[i], 648 INTR_TYPE_NET | INTR_MPSAFE, 649 pptintr, NULL, &ppt->msi.arg[i], 650 &ppt->msi.cookie[i]); 651 if (error != 0) 652 break; 653 } 654 655 if (i < numvec) { 656 ppt_teardown_msi(ppt); 657 return (ENXIO); 658 } 659 660 return (0); 661 } 662 663 int 664 ppt_setup_msix(struct vm *vm, int bus, int slot, int func, 665 int idx, uint64_t addr, uint64_t msg, uint32_t vector_control) 666 { 667 struct pptdev *ppt; 668 struct pci_devinfo *dinfo; 669 int numvec, alloced, rid, error; 670 size_t res_size, cookie_size, arg_size; 671 672 error = ppt_find(vm, bus, slot, func, &ppt); 673 if (error) 674 return (error); 675 676 /* Reject attempts to enable MSI-X while MSI is active. */ 677 if (ppt->msi.num_msgs != 0) 678 return (EBUSY); 679 680 dinfo = device_get_ivars(ppt->dev); 681 if (!dinfo) 682 return (ENXIO); 683 684 /* 685 * First-time configuration: 686 * Allocate the MSI-X table 687 * Allocate the IRQ resources 688 * Set up some variables in ppt->msix 689 */ 690 if (ppt->msix.num_msgs == 0) { 691 numvec = pci_msix_count(ppt->dev); 692 if (numvec <= 0) 693 return (EINVAL); 694 695 ppt->msix.startrid = 1; 696 ppt->msix.num_msgs = numvec; 697 698 res_size = numvec * sizeof(ppt->msix.res[0]); 699 cookie_size = numvec * sizeof(ppt->msix.cookie[0]); 700 arg_size = numvec * sizeof(ppt->msix.arg[0]); 701 702 ppt->msix.res = malloc(res_size, M_PPTMSIX, M_WAITOK | M_ZERO); 703 ppt->msix.cookie = malloc(cookie_size, M_PPTMSIX, 704 M_WAITOK | M_ZERO); 705 ppt->msix.arg = malloc(arg_size, M_PPTMSIX, M_WAITOK | M_ZERO); 706 707 rid = dinfo->cfg.msix.msix_table_bar; 708 ppt->msix.msix_table_res = bus_alloc_resource_any(ppt->dev, 709 SYS_RES_MEMORY, &rid, RF_ACTIVE); 710 711 if (ppt->msix.msix_table_res == NULL) { 712 ppt_teardown_msix(ppt); 713 return (ENOSPC); 714 } 715 ppt->msix.msix_table_rid = rid; 716 717 if (dinfo->cfg.msix.msix_table_bar != 718 dinfo->cfg.msix.msix_pba_bar) { 719 rid = dinfo->cfg.msix.msix_pba_bar; 720 ppt->msix.msix_pba_res = bus_alloc_resource_any( 721 ppt->dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); 722 723 if (ppt->msix.msix_pba_res == NULL) { 724 ppt_teardown_msix(ppt); 725 return (ENOSPC); 726 } 727 ppt->msix.msix_pba_rid = rid; 728 } 729 730 alloced = numvec; 731 error = pci_alloc_msix(ppt->dev, &alloced); 732 if (error || alloced != numvec) { 733 ppt_teardown_msix(ppt); 734 return (error == 0 ? ENOSPC: error); 735 } 736 } 737 738 if ((vector_control & PCIM_MSIX_VCTRL_MASK) == 0) { 739 /* Tear down the IRQ if it's already set up */ 740 ppt_teardown_msix_intr(ppt, idx); 741 742 /* Allocate the IRQ resource */ 743 ppt->msix.cookie[idx] = NULL; 744 rid = ppt->msix.startrid + idx; 745 ppt->msix.res[idx] = bus_alloc_resource_any(ppt->dev, SYS_RES_IRQ, 746 &rid, RF_ACTIVE); 747 if (ppt->msix.res[idx] == NULL) 748 return (ENXIO); 749 750 ppt->msix.arg[idx].pptdev = ppt; 751 ppt->msix.arg[idx].addr = addr; 752 ppt->msix.arg[idx].msg_data = msg; 753 754 /* Setup the MSI-X interrupt */ 755 error = bus_setup_intr(ppt->dev, ppt->msix.res[idx], 756 INTR_TYPE_NET | INTR_MPSAFE, 757 pptintr, NULL, &ppt->msix.arg[idx], 758 &ppt->msix.cookie[idx]); 759 760 if (error != 0) { 761 bus_release_resource(ppt->dev, SYS_RES_IRQ, rid, ppt->msix.res[idx]); 762 ppt->msix.cookie[idx] = NULL; 763 ppt->msix.res[idx] = NULL; 764 return (ENXIO); 765 } 766 } else { 767 /* Masked, tear it down if it's already been set up */ 768 ppt_teardown_msix_intr(ppt, idx); 769 } 770 771 return (0); 772 } 773 774 int 775 ppt_disable_msix(struct vm *vm, int bus, int slot, int func) 776 { 777 struct pptdev *ppt; 778 int error; 779 780 error = ppt_find(vm, bus, slot, func, &ppt); 781 if (error) 782 return (error); 783 784 ppt_teardown_msix(ppt); 785 return (0); 786 } 787