1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 /* 32 * Copyright 2019 Joyent, Inc. 33 * Copyright 2022 OmniOS Community Edition (OmniOSce) Association. 34 */ 35 36 #include <sys/cdefs.h> 37 __FBSDID("$FreeBSD$"); 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/kernel.h> 42 #include <sys/malloc.h> 43 #include <sys/module.h> 44 #include <sys/bus.h> 45 #include <sys/pciio.h> 46 #include <sys/sysctl.h> 47 48 #include <dev/pci/pcivar.h> 49 #include <dev/pci/pcireg.h> 50 51 #include <machine/vmm.h> 52 #include <machine/vmm_dev.h> 53 54 #include <sys/conf.h> 55 #include <sys/ddi.h> 56 #include <sys/stat.h> 57 #include <sys/sunddi.h> 58 #include <sys/pci.h> 59 #include <sys/pci_cap.h> 60 #include <sys/pcie_impl.h> 61 #include <sys/ppt_dev.h> 62 #include <sys/mkdev.h> 63 #include <sys/sysmacros.h> 64 65 #include "vmm_lapic.h" 66 #include "vmm_ktr.h" 67 68 #include "iommu.h" 69 #include "ppt.h" 70 71 #define MAX_MSIMSGS 32 72 73 /* 74 * If the MSI-X table is located in the middle of a BAR then that MMIO 75 * region gets split into two segments - one segment above the MSI-X table 76 * and the other segment below the MSI-X table - with a hole in place of 77 * the MSI-X table so accesses to it can be trapped and emulated. 78 * 79 * So, allocate a MMIO segment for each BAR register + 1 additional segment. 80 */ 81 #define MAX_MMIOSEGS ((PCIR_MAX_BAR_0 + 1) + 1) 82 83 struct pptintr_arg { 84 struct pptdev *pptdev; 85 uint64_t addr; 86 uint64_t msg_data; 87 }; 88 89 struct pptseg { 90 vm_paddr_t gpa; 91 size_t len; 92 int wired; 93 }; 94 95 struct pptbar { 96 uint64_t base; 97 uint64_t size; 98 uint_t type; 99 ddi_acc_handle_t io_handle; 100 caddr_t io_ptr; 101 uint_t ddireg; 102 }; 103 104 struct pptdev { 105 dev_info_t *pptd_dip; 106 list_node_t pptd_node; 107 ddi_acc_handle_t pptd_cfg; 108 struct pptbar pptd_bars[PCI_BASE_NUM]; 109 struct vm *vm; 110 struct pptseg mmio[MAX_MMIOSEGS]; 111 struct { 112 int num_msgs; /* guest state */ 113 boolean_t is_fixed; 114 size_t inth_sz; 115 ddi_intr_handle_t *inth; 116 struct pptintr_arg arg[MAX_MSIMSGS]; 117 } msi; 118 119 struct { 120 int num_msgs; 121 size_t inth_sz; 122 size_t arg_sz; 123 ddi_intr_handle_t *inth; 124 struct pptintr_arg *arg; 125 } msix; 126 }; 127 128 129 static major_t ppt_major; 130 static void *ppt_state; 131 static kmutex_t pptdev_mtx; 132 static list_t pptdev_list; 133 134 #define PPT_MINOR_NAME "ppt" 135 136 static ddi_device_acc_attr_t ppt_attr = { 137 DDI_DEVICE_ATTR_V0, 138 DDI_NEVERSWAP_ACC, 139 DDI_STORECACHING_OK_ACC, 140 DDI_DEFAULT_ACC 141 }; 142 143 static int 144 ppt_open(dev_t *devp, int flag, int otyp, cred_t *cr) 145 { 146 /* XXX: require extra privs? */ 147 return (0); 148 } 149 150 #define BAR_TO_IDX(bar) (((bar) - PCI_CONF_BASE0) / PCI_BAR_SZ_32) 151 #define BAR_VALID(b) ( \ 152 (b) >= PCI_CONF_BASE0 && \ 153 (b) <= PCI_CONF_BASE5 && \ 154 ((b) & (PCI_BAR_SZ_32-1)) == 0) 155 156 static int 157 ppt_ioctl(dev_t dev, int cmd, intptr_t arg, int md, cred_t *cr, int *rv) 158 { 159 minor_t minor = getminor(dev); 160 struct pptdev *ppt; 161 void *data = (void *)arg; 162 163 if ((ppt = ddi_get_soft_state(ppt_state, minor)) == NULL) { 164 return (ENOENT); 165 } 166 167 switch (cmd) { 168 case PPT_CFG_READ: { 169 struct ppt_cfg_io cio; 170 ddi_acc_handle_t cfg = ppt->pptd_cfg; 171 172 if (ddi_copyin(data, &cio, sizeof (cio), md) != 0) { 173 return (EFAULT); 174 } 175 switch (cio.pci_width) { 176 case 4: 177 cio.pci_data = pci_config_get32(cfg, cio.pci_off); 178 break; 179 case 2: 180 cio.pci_data = pci_config_get16(cfg, cio.pci_off); 181 break; 182 case 1: 183 cio.pci_data = pci_config_get8(cfg, cio.pci_off); 184 break; 185 default: 186 return (EINVAL); 187 } 188 189 if (ddi_copyout(&cio, data, sizeof (cio), md) != 0) { 190 return (EFAULT); 191 } 192 return (0); 193 } 194 case PPT_CFG_WRITE: { 195 struct ppt_cfg_io cio; 196 ddi_acc_handle_t cfg = ppt->pptd_cfg; 197 198 if (ddi_copyin(data, &cio, sizeof (cio), md) != 0) { 199 return (EFAULT); 200 } 201 switch (cio.pci_width) { 202 case 4: 203 pci_config_put32(cfg, cio.pci_off, cio.pci_data); 204 break; 205 case 2: 206 pci_config_put16(cfg, cio.pci_off, cio.pci_data); 207 break; 208 case 1: 209 pci_config_put8(cfg, cio.pci_off, cio.pci_data); 210 break; 211 default: 212 return (EINVAL); 213 } 214 215 return (0); 216 } 217 case PPT_BAR_QUERY: { 218 struct ppt_bar_query barg; 219 struct pptbar *pbar; 220 221 if (ddi_copyin(data, &barg, sizeof (barg), md) != 0) { 222 return (EFAULT); 223 } 224 if (barg.pbq_baridx >= PCI_BASE_NUM) { 225 return (EINVAL); 226 } 227 pbar = &ppt->pptd_bars[barg.pbq_baridx]; 228 229 if (pbar->base == 0 || pbar->size == 0) { 230 return (ENOENT); 231 } 232 barg.pbq_type = pbar->type; 233 barg.pbq_base = pbar->base; 234 barg.pbq_size = pbar->size; 235 236 if (ddi_copyout(&barg, data, sizeof (barg), md) != 0) { 237 return (EFAULT); 238 } 239 return (0); 240 } 241 case PPT_BAR_READ: { 242 struct ppt_bar_io bio; 243 struct pptbar *pbar; 244 void *addr; 245 uint_t rnum; 246 ddi_acc_handle_t cfg; 247 248 if (ddi_copyin(data, &bio, sizeof (bio), md) != 0) { 249 return (EFAULT); 250 } 251 rnum = bio.pbi_bar; 252 if (rnum >= PCI_BASE_NUM) { 253 return (EINVAL); 254 } 255 pbar = &ppt->pptd_bars[rnum]; 256 if (pbar->type != PCI_ADDR_IO || pbar->io_handle == NULL) { 257 return (EINVAL); 258 } 259 addr = pbar->io_ptr + bio.pbi_off; 260 261 switch (bio.pbi_width) { 262 case 4: 263 bio.pbi_data = ddi_get32(pbar->io_handle, addr); 264 break; 265 case 2: 266 bio.pbi_data = ddi_get16(pbar->io_handle, addr); 267 break; 268 case 1: 269 bio.pbi_data = ddi_get8(pbar->io_handle, addr); 270 break; 271 default: 272 return (EINVAL); 273 } 274 275 if (ddi_copyout(&bio, data, sizeof (bio), md) != 0) { 276 return (EFAULT); 277 } 278 return (0); 279 } 280 case PPT_BAR_WRITE: { 281 struct ppt_bar_io bio; 282 struct pptbar *pbar; 283 void *addr; 284 uint_t rnum; 285 ddi_acc_handle_t cfg; 286 287 if (ddi_copyin(data, &bio, sizeof (bio), md) != 0) { 288 return (EFAULT); 289 } 290 rnum = bio.pbi_bar; 291 if (rnum >= PCI_BASE_NUM) { 292 return (EINVAL); 293 } 294 pbar = &ppt->pptd_bars[rnum]; 295 if (pbar->type != PCI_ADDR_IO || pbar->io_handle == NULL) { 296 return (EINVAL); 297 } 298 addr = pbar->io_ptr + bio.pbi_off; 299 300 switch (bio.pbi_width) { 301 case 4: 302 ddi_put32(pbar->io_handle, addr, bio.pbi_data); 303 break; 304 case 2: 305 ddi_put16(pbar->io_handle, addr, bio.pbi_data); 306 break; 307 case 1: 308 ddi_put8(pbar->io_handle, addr, bio.pbi_data); 309 break; 310 default: 311 return (EINVAL); 312 } 313 314 return (0); 315 } 316 317 default: 318 return (ENOTTY); 319 } 320 321 return (0); 322 } 323 324 static int 325 ppt_find_msix_table_bar(struct pptdev *ppt) 326 { 327 uint16_t base; 328 uint32_t off; 329 330 if (PCI_CAP_LOCATE(ppt->pptd_cfg, PCI_CAP_ID_MSI_X, &base) != 331 DDI_SUCCESS) 332 return (-1); 333 334 off = pci_config_get32(ppt->pptd_cfg, base + PCI_MSIX_TBL_OFFSET); 335 336 if (off == PCI_EINVAL32) 337 return (-1); 338 339 return (off & PCI_MSIX_TBL_BIR_MASK); 340 } 341 342 static int 343 ppt_devmap(dev_t dev, devmap_cookie_t dhp, offset_t off, size_t len, 344 size_t *maplen, uint_t model) 345 { 346 minor_t minor; 347 struct pptdev *ppt; 348 int err, bar; 349 uint_t ddireg; 350 351 minor = getminor(dev); 352 353 if ((ppt = ddi_get_soft_state(ppt_state, minor)) == NULL) 354 return (ENXIO); 355 356 #ifdef _MULTI_DATAMODEL 357 if (ddi_model_convert_from(model) != DDI_MODEL_NONE) 358 return (ENXIO); 359 #endif 360 361 if (off < 0 || off != P2ALIGN(off, PAGESIZE)) 362 return (EINVAL); 363 364 if ((bar = ppt_find_msix_table_bar(ppt)) == -1) 365 return (EINVAL); 366 367 ddireg = ppt->pptd_bars[bar].ddireg; 368 369 if (ddireg == 0) 370 return (EINVAL); 371 372 err = devmap_devmem_setup(dhp, ppt->pptd_dip, NULL, ddireg, off, len, 373 PROT_USER | PROT_READ | PROT_WRITE, IOMEM_DATA_CACHED, &ppt_attr); 374 375 if (err == DDI_SUCCESS) 376 *maplen = len; 377 378 return (err); 379 } 380 381 static void 382 ppt_bar_wipe(struct pptdev *ppt) 383 { 384 uint_t i; 385 386 for (i = 0; i < PCI_BASE_NUM; i++) { 387 struct pptbar *pbar = &ppt->pptd_bars[i]; 388 if (pbar->type == PCI_ADDR_IO && pbar->io_handle != NULL) { 389 ddi_regs_map_free(&pbar->io_handle); 390 } 391 } 392 bzero(&ppt->pptd_bars, sizeof (ppt->pptd_bars)); 393 } 394 395 static int 396 ppt_bar_crawl(struct pptdev *ppt) 397 { 398 pci_regspec_t *regs; 399 uint_t rcount, i; 400 int err = 0, rlen; 401 402 if (ddi_getlongprop(DDI_DEV_T_ANY, ppt->pptd_dip, DDI_PROP_DONTPASS, 403 "assigned-addresses", (caddr_t)®s, &rlen) != DDI_PROP_SUCCESS) { 404 return (EIO); 405 } 406 407 VERIFY3S(rlen, >, 0); 408 rcount = rlen / sizeof (pci_regspec_t); 409 for (i = 0; i < rcount; i++) { 410 pci_regspec_t *reg = ®s[i]; 411 struct pptbar *pbar; 412 uint_t bar, rnum; 413 414 DTRACE_PROBE1(ppt__crawl__reg, pci_regspec_t *, reg); 415 bar = PCI_REG_REG_G(reg->pci_phys_hi); 416 if (!BAR_VALID(bar)) { 417 continue; 418 } 419 420 rnum = BAR_TO_IDX(bar); 421 pbar = &ppt->pptd_bars[rnum]; 422 /* is this somehow already populated? */ 423 if (pbar->base != 0 || pbar->size != 0) { 424 err = EEXIST; 425 break; 426 } 427 428 /* 429 * Register 0 corresponds to the PCI config space. 430 * The registers which match the assigned-addresses list are 431 * offset by 1. 432 */ 433 pbar->ddireg = i + 1; 434 435 pbar->type = reg->pci_phys_hi & PCI_ADDR_MASK; 436 pbar->base = ((uint64_t)reg->pci_phys_mid << 32) | 437 (uint64_t)reg->pci_phys_low; 438 pbar->size = ((uint64_t)reg->pci_size_hi << 32) | 439 (uint64_t)reg->pci_size_low; 440 if (pbar->type == PCI_ADDR_IO) { 441 err = ddi_regs_map_setup(ppt->pptd_dip, rnum, 442 &pbar->io_ptr, 0, 0, &ppt_attr, &pbar->io_handle); 443 if (err != 0) { 444 break; 445 } 446 } 447 } 448 kmem_free(regs, rlen); 449 450 if (err != 0) { 451 ppt_bar_wipe(ppt); 452 } 453 return (err); 454 } 455 456 static boolean_t 457 ppt_bar_verify_mmio(struct pptdev *ppt, uint64_t base, uint64_t size) 458 { 459 const uint64_t map_end = base + size; 460 461 /* Zero-length or overflow mappings are not valid */ 462 if (map_end <= base) { 463 return (B_FALSE); 464 } 465 /* MMIO bounds should be page-aligned */ 466 if ((base & PAGEOFFSET) != 0 || (size & PAGEOFFSET) != 0) { 467 return (B_FALSE); 468 } 469 470 for (uint_t i = 0; i < PCI_BASE_NUM; i++) { 471 const struct pptbar *bar = &ppt->pptd_bars[i]; 472 const uint64_t bar_end = bar->base + bar->size; 473 474 /* Only memory BARs can be mapped */ 475 if (bar->type != PCI_ADDR_MEM32 && 476 bar->type != PCI_ADDR_MEM64) { 477 continue; 478 } 479 480 /* Does the mapping fit within this BAR? */ 481 if (base < bar->base || base >= bar_end || 482 map_end < bar->base || map_end > bar_end) { 483 continue; 484 } 485 486 /* This BAR satisfies the provided map */ 487 return (B_TRUE); 488 } 489 return (B_FALSE); 490 } 491 492 static int 493 ppt_ddi_attach(dev_info_t *dip, ddi_attach_cmd_t cmd) 494 { 495 struct pptdev *ppt = NULL; 496 char name[PPT_MAXNAMELEN]; 497 int inst; 498 499 if (cmd != DDI_ATTACH) 500 return (DDI_FAILURE); 501 502 inst = ddi_get_instance(dip); 503 504 if (ddi_soft_state_zalloc(ppt_state, inst) != DDI_SUCCESS) { 505 goto fail; 506 } 507 VERIFY(ppt = ddi_get_soft_state(ppt_state, inst)); 508 ppt->pptd_dip = dip; 509 ddi_set_driver_private(dip, ppt); 510 511 if (pci_config_setup(dip, &ppt->pptd_cfg) != DDI_SUCCESS) { 512 goto fail; 513 } 514 if (ppt_bar_crawl(ppt) != 0) { 515 goto fail; 516 } 517 if (ddi_create_minor_node(dip, PPT_MINOR_NAME, S_IFCHR, inst, 518 DDI_PSEUDO, 0) != DDI_SUCCESS) { 519 goto fail; 520 } 521 522 mutex_enter(&pptdev_mtx); 523 list_insert_tail(&pptdev_list, ppt); 524 mutex_exit(&pptdev_mtx); 525 526 return (DDI_SUCCESS); 527 528 fail: 529 if (ppt != NULL) { 530 ddi_remove_minor_node(dip, NULL); 531 if (ppt->pptd_cfg != NULL) { 532 pci_config_teardown(&ppt->pptd_cfg); 533 } 534 ppt_bar_wipe(ppt); 535 ddi_soft_state_free(ppt_state, inst); 536 } 537 return (DDI_FAILURE); 538 } 539 540 static int 541 ppt_ddi_detach(dev_info_t *dip, ddi_detach_cmd_t cmd) 542 { 543 struct pptdev *ppt; 544 int inst; 545 546 if (cmd != DDI_DETACH) 547 return (DDI_FAILURE); 548 549 ppt = ddi_get_driver_private(dip); 550 inst = ddi_get_instance(dip); 551 552 ASSERT3P(ddi_get_soft_state(ppt_state, inst), ==, ppt); 553 554 mutex_enter(&pptdev_mtx); 555 if (ppt->vm != NULL) { 556 mutex_exit(&pptdev_mtx); 557 return (DDI_FAILURE); 558 } 559 list_remove(&pptdev_list, ppt); 560 mutex_exit(&pptdev_mtx); 561 562 ddi_remove_minor_node(dip, PPT_MINOR_NAME); 563 ppt_bar_wipe(ppt); 564 pci_config_teardown(&ppt->pptd_cfg); 565 ddi_set_driver_private(dip, NULL); 566 ddi_soft_state_free(ppt_state, inst); 567 568 return (DDI_SUCCESS); 569 } 570 571 static int 572 ppt_ddi_info(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result) 573 { 574 int error = DDI_FAILURE; 575 int inst = getminor((dev_t)arg); 576 577 switch (cmd) { 578 case DDI_INFO_DEVT2DEVINFO: { 579 struct pptdev *ppt = ddi_get_soft_state(ppt_state, inst); 580 581 if (ppt != NULL) { 582 *result = (void *)ppt->pptd_dip; 583 error = DDI_SUCCESS; 584 } 585 break; 586 } 587 case DDI_INFO_DEVT2INSTANCE: { 588 *result = (void *)(uintptr_t)inst; 589 error = DDI_SUCCESS; 590 break; 591 } 592 default: 593 break; 594 } 595 return (error); 596 } 597 598 static struct cb_ops ppt_cb_ops = { 599 ppt_open, 600 nulldev, /* close */ 601 nodev, /* strategy */ 602 nodev, /* print */ 603 nodev, /* dump */ 604 nodev, /* read */ 605 nodev, /* write */ 606 ppt_ioctl, 607 ppt_devmap, /* devmap */ 608 NULL, /* mmap */ 609 NULL, /* segmap */ 610 nochpoll, /* poll */ 611 ddi_prop_op, 612 NULL, 613 D_NEW | D_MP | D_64BIT | D_DEVMAP, 614 CB_REV 615 }; 616 617 static struct dev_ops ppt_ops = { 618 DEVO_REV, 619 0, 620 ppt_ddi_info, 621 nulldev, /* identify */ 622 nulldev, /* probe */ 623 ppt_ddi_attach, 624 ppt_ddi_detach, 625 nodev, /* reset */ 626 &ppt_cb_ops, 627 (struct bus_ops *)NULL 628 }; 629 630 static struct modldrv modldrv = { 631 &mod_driverops, 632 "bhyve pci pass-thru", 633 &ppt_ops 634 }; 635 636 static struct modlinkage modlinkage = { 637 MODREV_1, 638 &modldrv, 639 NULL 640 }; 641 642 int 643 _init(void) 644 { 645 int error; 646 647 mutex_init(&pptdev_mtx, NULL, MUTEX_DRIVER, NULL); 648 list_create(&pptdev_list, sizeof (struct pptdev), 649 offsetof(struct pptdev, pptd_node)); 650 651 error = ddi_soft_state_init(&ppt_state, sizeof (struct pptdev), 0); 652 if (error) { 653 goto fail; 654 } 655 656 error = mod_install(&modlinkage); 657 658 ppt_major = ddi_name_to_major("ppt"); 659 fail: 660 if (error) { 661 ddi_soft_state_fini(&ppt_state); 662 } 663 return (error); 664 } 665 666 int 667 _fini(void) 668 { 669 int error; 670 671 error = mod_remove(&modlinkage); 672 if (error) 673 return (error); 674 ddi_soft_state_fini(&ppt_state); 675 676 return (0); 677 } 678 679 int 680 _info(struct modinfo *modinfop) 681 { 682 return (mod_info(&modlinkage, modinfop)); 683 } 684 685 static boolean_t 686 ppt_wait_for_pending_txn(dev_info_t *dip, uint_t max_delay_us) 687 { 688 uint16_t cap_ptr, devsts; 689 ddi_acc_handle_t hdl; 690 691 if (pci_config_setup(dip, &hdl) != DDI_SUCCESS) 692 return (B_FALSE); 693 694 if (PCI_CAP_LOCATE(hdl, PCI_CAP_ID_PCI_E, &cap_ptr) != DDI_SUCCESS) { 695 pci_config_teardown(&hdl); 696 return (B_FALSE); 697 } 698 699 devsts = PCI_CAP_GET16(hdl, 0, cap_ptr, PCIE_DEVSTS); 700 while ((devsts & PCIE_DEVSTS_TRANS_PENDING) != 0) { 701 if (max_delay_us == 0) { 702 pci_config_teardown(&hdl); 703 return (B_FALSE); 704 } 705 706 /* Poll once every 100 milliseconds up to the timeout. */ 707 if (max_delay_us > 100000) { 708 delay(drv_usectohz(100000)); 709 max_delay_us -= 100000; 710 } else { 711 delay(drv_usectohz(max_delay_us)); 712 max_delay_us = 0; 713 } 714 devsts = PCI_CAP_GET16(hdl, 0, cap_ptr, PCIE_DEVSTS); 715 } 716 717 pci_config_teardown(&hdl); 718 return (B_TRUE); 719 } 720 721 static uint_t 722 ppt_max_completion_tmo_us(dev_info_t *dip) 723 { 724 uint_t timo = 0; 725 uint16_t cap_ptr; 726 ddi_acc_handle_t hdl; 727 uint_t timo_ranges[] = { /* timeout ranges */ 728 50000, /* 50ms */ 729 100, /* 100us */ 730 10000, /* 10ms */ 731 0, 732 0, 733 55000, /* 55ms */ 734 210000, /* 210ms */ 735 0, 736 0, 737 900000, /* 900ms */ 738 3500000, /* 3.5s */ 739 0, 740 0, 741 13000000, /* 13s */ 742 64000000, /* 64s */ 743 0 744 }; 745 746 if (pci_config_setup(dip, &hdl) != DDI_SUCCESS) 747 return (50000); /* default 50ms */ 748 749 if (PCI_CAP_LOCATE(hdl, PCI_CAP_ID_PCI_E, &cap_ptr) != DDI_SUCCESS) 750 goto out; 751 752 if ((PCI_CAP_GET16(hdl, 0, cap_ptr, PCIE_PCIECAP) & 753 PCIE_PCIECAP_VER_MASK) < PCIE_PCIECAP_VER_2_0) 754 goto out; 755 756 if ((PCI_CAP_GET32(hdl, 0, cap_ptr, PCIE_DEVCAP2) & 757 PCIE_DEVCTL2_COM_TO_RANGE_MASK) == 0) 758 goto out; 759 760 timo = timo_ranges[PCI_CAP_GET16(hdl, 0, cap_ptr, PCIE_DEVCTL2) & 761 PCIE_DEVCAP2_COM_TO_RANGE_MASK]; 762 763 out: 764 if (timo == 0) 765 timo = 50000; /* default 50ms */ 766 767 pci_config_teardown(&hdl); 768 return (timo); 769 } 770 771 static boolean_t 772 ppt_flr(dev_info_t *dip, boolean_t force) 773 { 774 uint16_t cap_ptr, ctl, cmd; 775 ddi_acc_handle_t hdl; 776 uint_t compl_delay = 0, max_delay_us; 777 778 if (pci_config_setup(dip, &hdl) != DDI_SUCCESS) 779 return (B_FALSE); 780 781 if (PCI_CAP_LOCATE(hdl, PCI_CAP_ID_PCI_E, &cap_ptr) != DDI_SUCCESS) 782 goto fail; 783 784 if ((PCI_CAP_GET32(hdl, 0, cap_ptr, PCIE_DEVCAP) & PCIE_DEVCAP_FLR) 785 == 0) 786 goto fail; 787 788 max_delay_us = MAX(ppt_max_completion_tmo_us(dip), 10000); 789 790 /* 791 * Disable busmastering to prevent generation of new transactions while 792 * waiting for the device to go idle. If the idle timeout fails, the 793 * command register is restored which will re-enable busmastering. 794 */ 795 cmd = pci_config_get16(hdl, PCI_CONF_COMM); 796 pci_config_put16(hdl, PCI_CONF_COMM, cmd & ~PCI_COMM_ME); 797 if (!ppt_wait_for_pending_txn(dip, max_delay_us)) { 798 if (!force) { 799 pci_config_put16(hdl, PCI_CONF_COMM, cmd); 800 goto fail; 801 } 802 dev_err(dip, CE_WARN, 803 "?Resetting with transactions pending after %u us\n", 804 max_delay_us); 805 806 /* 807 * Extend the post-FLR delay to cover the maximum Completion 808 * Timeout delay of anything in flight during the FLR delay. 809 * Enforce a minimum delay of at least 10ms. 810 */ 811 compl_delay = MAX(10, (ppt_max_completion_tmo_us(dip) / 1000)); 812 } 813 814 /* Initiate the reset. */ 815 ctl = PCI_CAP_GET16(hdl, 0, cap_ptr, PCIE_DEVCTL); 816 (void) PCI_CAP_PUT16(hdl, 0, cap_ptr, PCIE_DEVCTL, 817 ctl | PCIE_DEVCTL_INITIATE_FLR); 818 819 /* Wait for at least 100ms */ 820 delay(drv_usectohz((100 + compl_delay) * 1000)); 821 822 pci_config_teardown(&hdl); 823 return (B_TRUE); 824 825 fail: 826 /* 827 * TODO: If the FLR fails for some reason, we should attempt a reset 828 * using the PCI power management facilities (if possible). 829 */ 830 pci_config_teardown(&hdl); 831 return (B_FALSE); 832 } 833 834 static int 835 ppt_findf(struct vm *vm, int fd, struct pptdev **pptp) 836 { 837 struct pptdev *ppt = NULL; 838 file_t *fp; 839 vattr_t va; 840 int err = 0; 841 842 ASSERT(MUTEX_HELD(&pptdev_mtx)); 843 844 if ((fp = getf(fd)) == NULL) 845 return (EBADF); 846 847 va.va_mask = AT_RDEV; 848 if (VOP_GETATTR(fp->f_vnode, &va, NO_FOLLOW, fp->f_cred, NULL) != 0 || 849 getmajor(va.va_rdev) != ppt_major) { 850 err = EBADF; 851 goto fail; 852 } 853 854 ppt = ddi_get_soft_state(ppt_state, getminor(va.va_rdev)); 855 856 if (ppt == NULL) { 857 err = EBADF; 858 goto fail; 859 } 860 861 if (ppt->vm != vm) { 862 err = EBUSY; 863 goto fail; 864 } 865 866 *pptp = ppt; 867 return (0); 868 869 fail: 870 releasef(fd); 871 return (err); 872 } 873 874 static void 875 ppt_unmap_all_mmio(struct vm *vm, struct pptdev *ppt) 876 { 877 int i; 878 struct pptseg *seg; 879 880 for (i = 0; i < MAX_MMIOSEGS; i++) { 881 seg = &ppt->mmio[i]; 882 if (seg->len == 0) 883 continue; 884 (void) vm_unmap_mmio(vm, seg->gpa, seg->len); 885 bzero(seg, sizeof (struct pptseg)); 886 } 887 } 888 889 static void 890 ppt_teardown_msi(struct pptdev *ppt) 891 { 892 int i; 893 894 if (ppt->msi.num_msgs == 0) 895 return; 896 897 for (i = 0; i < ppt->msi.num_msgs; i++) { 898 int intr_cap; 899 900 (void) ddi_intr_get_cap(ppt->msi.inth[i], &intr_cap); 901 if (intr_cap & DDI_INTR_FLAG_BLOCK) 902 ddi_intr_block_disable(&ppt->msi.inth[i], 1); 903 else 904 ddi_intr_disable(ppt->msi.inth[i]); 905 906 ddi_intr_remove_handler(ppt->msi.inth[i]); 907 ddi_intr_free(ppt->msi.inth[i]); 908 909 ppt->msi.inth[i] = NULL; 910 } 911 912 kmem_free(ppt->msi.inth, ppt->msi.inth_sz); 913 ppt->msi.inth = NULL; 914 ppt->msi.inth_sz = 0; 915 ppt->msi.is_fixed = B_FALSE; 916 917 ppt->msi.num_msgs = 0; 918 } 919 920 static void 921 ppt_teardown_msix_intr(struct pptdev *ppt, int idx) 922 { 923 if (ppt->msix.inth != NULL && ppt->msix.inth[idx] != NULL) { 924 int intr_cap; 925 926 (void) ddi_intr_get_cap(ppt->msix.inth[idx], &intr_cap); 927 if (intr_cap & DDI_INTR_FLAG_BLOCK) 928 ddi_intr_block_disable(&ppt->msix.inth[idx], 1); 929 else 930 ddi_intr_disable(ppt->msix.inth[idx]); 931 932 ddi_intr_remove_handler(ppt->msix.inth[idx]); 933 } 934 } 935 936 static void 937 ppt_teardown_msix(struct pptdev *ppt) 938 { 939 uint_t i; 940 941 if (ppt->msix.num_msgs == 0) 942 return; 943 944 for (i = 0; i < ppt->msix.num_msgs; i++) 945 ppt_teardown_msix_intr(ppt, i); 946 947 if (ppt->msix.inth) { 948 for (i = 0; i < ppt->msix.num_msgs; i++) 949 ddi_intr_free(ppt->msix.inth[i]); 950 kmem_free(ppt->msix.inth, ppt->msix.inth_sz); 951 ppt->msix.inth = NULL; 952 ppt->msix.inth_sz = 0; 953 kmem_free(ppt->msix.arg, ppt->msix.arg_sz); 954 ppt->msix.arg = NULL; 955 ppt->msix.arg_sz = 0; 956 } 957 958 ppt->msix.num_msgs = 0; 959 } 960 961 int 962 ppt_assigned_devices(struct vm *vm) 963 { 964 struct pptdev *ppt; 965 uint_t num = 0; 966 967 mutex_enter(&pptdev_mtx); 968 for (ppt = list_head(&pptdev_list); ppt != NULL; 969 ppt = list_next(&pptdev_list, ppt)) { 970 if (ppt->vm == vm) { 971 num++; 972 } 973 } 974 mutex_exit(&pptdev_mtx); 975 return (num); 976 } 977 978 boolean_t 979 ppt_is_mmio(struct vm *vm, vm_paddr_t gpa) 980 { 981 struct pptdev *ppt = list_head(&pptdev_list); 982 983 /* XXX: this should probably be restructured to avoid the lock */ 984 mutex_enter(&pptdev_mtx); 985 for (ppt = list_head(&pptdev_list); ppt != NULL; 986 ppt = list_next(&pptdev_list, ppt)) { 987 if (ppt->vm != vm) { 988 continue; 989 } 990 991 for (uint_t i = 0; i < MAX_MMIOSEGS; i++) { 992 struct pptseg *seg = &ppt->mmio[i]; 993 994 if (seg->len == 0) 995 continue; 996 if (gpa >= seg->gpa && gpa < seg->gpa + seg->len) { 997 mutex_exit(&pptdev_mtx); 998 return (B_TRUE); 999 } 1000 } 1001 } 1002 1003 mutex_exit(&pptdev_mtx); 1004 return (B_FALSE); 1005 } 1006 1007 int 1008 ppt_assign_device(struct vm *vm, int pptfd) 1009 { 1010 struct pptdev *ppt; 1011 int err = 0; 1012 1013 mutex_enter(&pptdev_mtx); 1014 /* Passing NULL requires the device to be unowned. */ 1015 err = ppt_findf(NULL, pptfd, &ppt); 1016 if (err != 0) { 1017 mutex_exit(&pptdev_mtx); 1018 return (err); 1019 } 1020 1021 if (pci_save_config_regs(ppt->pptd_dip) != DDI_SUCCESS) { 1022 err = EIO; 1023 goto done; 1024 } 1025 ppt_flr(ppt->pptd_dip, B_TRUE); 1026 1027 /* 1028 * Restore the device state after reset and then perform another save 1029 * so the "pristine" state can be restored when the device is removed 1030 * from the guest. 1031 */ 1032 if (pci_restore_config_regs(ppt->pptd_dip) != DDI_SUCCESS || 1033 pci_save_config_regs(ppt->pptd_dip) != DDI_SUCCESS) { 1034 err = EIO; 1035 goto done; 1036 } 1037 1038 ppt->vm = vm; 1039 iommu_remove_device(iommu_host_domain(), pci_get_bdf(ppt->pptd_dip)); 1040 iommu_add_device(vm_iommu_domain(vm), pci_get_bdf(ppt->pptd_dip)); 1041 pf_set_passthru(ppt->pptd_dip, B_TRUE); 1042 1043 done: 1044 releasef(pptfd); 1045 mutex_exit(&pptdev_mtx); 1046 return (err); 1047 } 1048 1049 static void 1050 ppt_reset_pci_power_state(dev_info_t *dip) 1051 { 1052 ddi_acc_handle_t cfg; 1053 uint16_t cap_ptr; 1054 1055 if (pci_config_setup(dip, &cfg) != DDI_SUCCESS) 1056 return; 1057 1058 if (PCI_CAP_LOCATE(cfg, PCI_CAP_ID_PM, &cap_ptr) == DDI_SUCCESS) { 1059 uint16_t val; 1060 1061 val = PCI_CAP_GET16(cfg, 0, cap_ptr, PCI_PMCSR); 1062 if ((val & PCI_PMCSR_STATE_MASK) != PCI_PMCSR_D0) { 1063 val = (val & ~PCI_PMCSR_STATE_MASK) | PCI_PMCSR_D0; 1064 (void) PCI_CAP_PUT16(cfg, 0, cap_ptr, PCI_PMCSR, 1065 val); 1066 } 1067 } 1068 1069 pci_config_teardown(&cfg); 1070 } 1071 1072 static void 1073 ppt_do_unassign(struct pptdev *ppt) 1074 { 1075 struct vm *vm = ppt->vm; 1076 1077 ASSERT3P(vm, !=, NULL); 1078 ASSERT(MUTEX_HELD(&pptdev_mtx)); 1079 1080 1081 ppt_flr(ppt->pptd_dip, B_TRUE); 1082 1083 /* 1084 * Restore from the state saved during device assignment. 1085 * If the device power state has been altered, that must be remedied 1086 * first, as it will reset register state during the transition. 1087 */ 1088 ppt_reset_pci_power_state(ppt->pptd_dip); 1089 (void) pci_restore_config_regs(ppt->pptd_dip); 1090 1091 pf_set_passthru(ppt->pptd_dip, B_FALSE); 1092 1093 ppt_unmap_all_mmio(vm, ppt); 1094 ppt_teardown_msi(ppt); 1095 ppt_teardown_msix(ppt); 1096 iommu_remove_device(vm_iommu_domain(vm), pci_get_bdf(ppt->pptd_dip)); 1097 iommu_add_device(iommu_host_domain(), pci_get_bdf(ppt->pptd_dip)); 1098 ppt->vm = NULL; 1099 } 1100 1101 int 1102 ppt_unassign_device(struct vm *vm, int pptfd) 1103 { 1104 struct pptdev *ppt; 1105 int err = 0; 1106 1107 mutex_enter(&pptdev_mtx); 1108 err = ppt_findf(vm, pptfd, &ppt); 1109 if (err != 0) { 1110 mutex_exit(&pptdev_mtx); 1111 return (err); 1112 } 1113 1114 ppt_do_unassign(ppt); 1115 1116 releasef(pptfd); 1117 mutex_exit(&pptdev_mtx); 1118 return (err); 1119 } 1120 1121 void 1122 ppt_unassign_all(struct vm *vm) 1123 { 1124 struct pptdev *ppt; 1125 1126 mutex_enter(&pptdev_mtx); 1127 for (ppt = list_head(&pptdev_list); ppt != NULL; 1128 ppt = list_next(&pptdev_list, ppt)) { 1129 if (ppt->vm == vm) { 1130 ppt_do_unassign(ppt); 1131 } 1132 } 1133 mutex_exit(&pptdev_mtx); 1134 } 1135 1136 int 1137 ppt_map_mmio(struct vm *vm, int pptfd, vm_paddr_t gpa, size_t len, 1138 vm_paddr_t hpa) 1139 { 1140 struct pptdev *ppt; 1141 int err = 0; 1142 1143 mutex_enter(&pptdev_mtx); 1144 err = ppt_findf(vm, pptfd, &ppt); 1145 if (err != 0) { 1146 mutex_exit(&pptdev_mtx); 1147 return (err); 1148 } 1149 1150 /* 1151 * Ensure that the host-physical range of the requested mapping fits 1152 * within one of the MMIO BARs of the device. 1153 */ 1154 if (!ppt_bar_verify_mmio(ppt, hpa, len)) { 1155 err = EINVAL; 1156 goto done; 1157 } 1158 1159 for (uint_t i = 0; i < MAX_MMIOSEGS; i++) { 1160 struct pptseg *seg = &ppt->mmio[i]; 1161 1162 if (seg->len == 0) { 1163 err = vm_map_mmio(vm, gpa, len, hpa); 1164 if (err == 0) { 1165 seg->gpa = gpa; 1166 seg->len = len; 1167 } 1168 goto done; 1169 } 1170 } 1171 err = ENOSPC; 1172 1173 done: 1174 releasef(pptfd); 1175 mutex_exit(&pptdev_mtx); 1176 return (err); 1177 } 1178 1179 int 1180 ppt_unmap_mmio(struct vm *vm, int pptfd, vm_paddr_t gpa, size_t len) 1181 { 1182 struct pptdev *ppt; 1183 int err = 0; 1184 uint_t i; 1185 1186 mutex_enter(&pptdev_mtx); 1187 err = ppt_findf(vm, pptfd, &ppt); 1188 if (err != 0) { 1189 mutex_exit(&pptdev_mtx); 1190 return (err); 1191 } 1192 1193 for (i = 0; i < MAX_MMIOSEGS; i++) { 1194 struct pptseg *seg = &ppt->mmio[i]; 1195 1196 if (seg->gpa == gpa && seg->len == len) { 1197 err = vm_unmap_mmio(vm, seg->gpa, seg->len); 1198 if (err == 0) { 1199 seg->gpa = 0; 1200 seg->len = 0; 1201 } 1202 goto out; 1203 } 1204 } 1205 err = ENOENT; 1206 out: 1207 releasef(pptfd); 1208 mutex_exit(&pptdev_mtx); 1209 return (err); 1210 } 1211 1212 static uint_t 1213 pptintr(caddr_t arg, caddr_t unused) 1214 { 1215 struct pptintr_arg *pptarg = (struct pptintr_arg *)arg; 1216 struct pptdev *ppt = pptarg->pptdev; 1217 1218 if (ppt->vm != NULL) { 1219 lapic_intr_msi(ppt->vm, pptarg->addr, pptarg->msg_data); 1220 } else { 1221 /* 1222 * XXX 1223 * This is not expected to happen - panic? 1224 */ 1225 } 1226 1227 /* 1228 * For legacy interrupts give other filters a chance in case 1229 * the interrupt was not generated by the passthrough device. 1230 */ 1231 return (ppt->msi.is_fixed ? DDI_INTR_UNCLAIMED : DDI_INTR_CLAIMED); 1232 } 1233 1234 int 1235 ppt_setup_msi(struct vm *vm, int vcpu, int pptfd, uint64_t addr, uint64_t msg, 1236 int numvec) 1237 { 1238 int i, msi_count, intr_type; 1239 struct pptdev *ppt; 1240 int err = 0; 1241 1242 if (numvec < 0 || numvec > MAX_MSIMSGS) 1243 return (EINVAL); 1244 1245 mutex_enter(&pptdev_mtx); 1246 err = ppt_findf(vm, pptfd, &ppt); 1247 if (err != 0) { 1248 mutex_exit(&pptdev_mtx); 1249 return (err); 1250 } 1251 1252 /* Reject attempts to enable MSI while MSI-X is active. */ 1253 if (ppt->msix.num_msgs != 0 && numvec != 0) { 1254 err = EBUSY; 1255 goto done; 1256 } 1257 1258 /* Free any allocated resources */ 1259 ppt_teardown_msi(ppt); 1260 1261 if (numvec == 0) { 1262 /* nothing more to do */ 1263 goto done; 1264 } 1265 1266 if (ddi_intr_get_navail(ppt->pptd_dip, DDI_INTR_TYPE_MSI, 1267 &msi_count) != DDI_SUCCESS) { 1268 if (ddi_intr_get_navail(ppt->pptd_dip, DDI_INTR_TYPE_FIXED, 1269 &msi_count) != DDI_SUCCESS) { 1270 err = EINVAL; 1271 goto done; 1272 } 1273 1274 intr_type = DDI_INTR_TYPE_FIXED; 1275 ppt->msi.is_fixed = B_TRUE; 1276 } else { 1277 intr_type = DDI_INTR_TYPE_MSI; 1278 } 1279 1280 /* 1281 * The device must be capable of supporting the number of vectors 1282 * the guest wants to allocate. 1283 */ 1284 if (numvec > msi_count) { 1285 err = EINVAL; 1286 goto done; 1287 } 1288 1289 ppt->msi.inth_sz = numvec * sizeof (ddi_intr_handle_t); 1290 ppt->msi.inth = kmem_zalloc(ppt->msi.inth_sz, KM_SLEEP); 1291 if (ddi_intr_alloc(ppt->pptd_dip, ppt->msi.inth, intr_type, 0, 1292 numvec, &msi_count, 0) != DDI_SUCCESS) { 1293 kmem_free(ppt->msi.inth, ppt->msi.inth_sz); 1294 err = EINVAL; 1295 goto done; 1296 } 1297 1298 /* Verify that we got as many vectors as the guest requested */ 1299 if (numvec != msi_count) { 1300 ppt_teardown_msi(ppt); 1301 err = EINVAL; 1302 goto done; 1303 } 1304 1305 /* Set up & enable interrupt handler for each vector. */ 1306 for (i = 0; i < numvec; i++) { 1307 int res, intr_cap = 0; 1308 1309 ppt->msi.num_msgs = i + 1; 1310 ppt->msi.arg[i].pptdev = ppt; 1311 ppt->msi.arg[i].addr = addr; 1312 ppt->msi.arg[i].msg_data = msg + i; 1313 1314 if (ddi_intr_add_handler(ppt->msi.inth[i], pptintr, 1315 &ppt->msi.arg[i], NULL) != DDI_SUCCESS) 1316 break; 1317 1318 (void) ddi_intr_get_cap(ppt->msi.inth[i], &intr_cap); 1319 if (intr_cap & DDI_INTR_FLAG_BLOCK) 1320 res = ddi_intr_block_enable(&ppt->msi.inth[i], 1); 1321 else 1322 res = ddi_intr_enable(ppt->msi.inth[i]); 1323 1324 if (res != DDI_SUCCESS) 1325 break; 1326 } 1327 if (i < numvec) { 1328 ppt_teardown_msi(ppt); 1329 err = ENXIO; 1330 } 1331 1332 done: 1333 releasef(pptfd); 1334 mutex_exit(&pptdev_mtx); 1335 return (err); 1336 } 1337 1338 int 1339 ppt_setup_msix(struct vm *vm, int vcpu, int pptfd, int idx, uint64_t addr, 1340 uint64_t msg, uint32_t vector_control) 1341 { 1342 struct pptdev *ppt; 1343 int numvec, alloced; 1344 int err = 0; 1345 1346 mutex_enter(&pptdev_mtx); 1347 err = ppt_findf(vm, pptfd, &ppt); 1348 if (err != 0) { 1349 mutex_exit(&pptdev_mtx); 1350 return (err); 1351 } 1352 1353 /* Reject attempts to enable MSI-X while MSI is active. */ 1354 if (ppt->msi.num_msgs != 0) { 1355 err = EBUSY; 1356 goto done; 1357 } 1358 1359 /* 1360 * First-time configuration: 1361 * Allocate the MSI-X table 1362 * Allocate the IRQ resources 1363 * Set up some variables in ppt->msix 1364 */ 1365 if (ppt->msix.num_msgs == 0) { 1366 dev_info_t *dip = ppt->pptd_dip; 1367 1368 if (ddi_intr_get_navail(dip, DDI_INTR_TYPE_MSIX, 1369 &numvec) != DDI_SUCCESS) { 1370 err = EINVAL; 1371 goto done; 1372 } 1373 1374 ppt->msix.num_msgs = numvec; 1375 1376 ppt->msix.arg_sz = numvec * sizeof (ppt->msix.arg[0]); 1377 ppt->msix.arg = kmem_zalloc(ppt->msix.arg_sz, KM_SLEEP); 1378 ppt->msix.inth_sz = numvec * sizeof (ddi_intr_handle_t); 1379 ppt->msix.inth = kmem_zalloc(ppt->msix.inth_sz, KM_SLEEP); 1380 1381 if (ddi_intr_alloc(dip, ppt->msix.inth, DDI_INTR_TYPE_MSIX, 0, 1382 numvec, &alloced, 0) != DDI_SUCCESS) { 1383 kmem_free(ppt->msix.arg, ppt->msix.arg_sz); 1384 kmem_free(ppt->msix.inth, ppt->msix.inth_sz); 1385 ppt->msix.arg = NULL; 1386 ppt->msix.inth = NULL; 1387 ppt->msix.arg_sz = ppt->msix.inth_sz = 0; 1388 err = EINVAL; 1389 goto done; 1390 } 1391 1392 if (numvec != alloced) { 1393 ppt_teardown_msix(ppt); 1394 err = EINVAL; 1395 goto done; 1396 } 1397 } 1398 1399 if (idx >= ppt->msix.num_msgs) { 1400 err = EINVAL; 1401 goto done; 1402 } 1403 1404 if ((vector_control & PCIM_MSIX_VCTRL_MASK) == 0) { 1405 int intr_cap, res; 1406 1407 /* Tear down the IRQ if it's already set up */ 1408 ppt_teardown_msix_intr(ppt, idx); 1409 1410 ppt->msix.arg[idx].pptdev = ppt; 1411 ppt->msix.arg[idx].addr = addr; 1412 ppt->msix.arg[idx].msg_data = msg; 1413 1414 /* Setup the MSI-X interrupt */ 1415 if (ddi_intr_add_handler(ppt->msix.inth[idx], pptintr, 1416 &ppt->msix.arg[idx], NULL) != DDI_SUCCESS) { 1417 err = ENXIO; 1418 goto done; 1419 } 1420 1421 (void) ddi_intr_get_cap(ppt->msix.inth[idx], &intr_cap); 1422 if (intr_cap & DDI_INTR_FLAG_BLOCK) 1423 res = ddi_intr_block_enable(&ppt->msix.inth[idx], 1); 1424 else 1425 res = ddi_intr_enable(ppt->msix.inth[idx]); 1426 1427 if (res != DDI_SUCCESS) { 1428 ddi_intr_remove_handler(ppt->msix.inth[idx]); 1429 err = ENXIO; 1430 goto done; 1431 } 1432 } else { 1433 /* Masked, tear it down if it's already been set up */ 1434 ppt_teardown_msix_intr(ppt, idx); 1435 } 1436 1437 done: 1438 releasef(pptfd); 1439 mutex_exit(&pptdev_mtx); 1440 return (err); 1441 } 1442 1443 int 1444 ppt_get_limits(struct vm *vm, int pptfd, int *msilimit, int *msixlimit) 1445 { 1446 struct pptdev *ppt; 1447 int err = 0; 1448 1449 mutex_enter(&pptdev_mtx); 1450 err = ppt_findf(vm, pptfd, &ppt); 1451 if (err != 0) { 1452 mutex_exit(&pptdev_mtx); 1453 return (err); 1454 } 1455 1456 if (ddi_intr_get_navail(ppt->pptd_dip, DDI_INTR_TYPE_MSI, 1457 msilimit) != DDI_SUCCESS) { 1458 *msilimit = -1; 1459 } 1460 if (ddi_intr_get_navail(ppt->pptd_dip, DDI_INTR_TYPE_MSIX, 1461 msixlimit) != DDI_SUCCESS) { 1462 *msixlimit = -1; 1463 } 1464 1465 releasef(pptfd); 1466 mutex_exit(&pptdev_mtx); 1467 return (err); 1468 } 1469 1470 int 1471 ppt_disable_msix(struct vm *vm, int pptfd) 1472 { 1473 struct pptdev *ppt; 1474 int err = 0; 1475 1476 mutex_enter(&pptdev_mtx); 1477 err = ppt_findf(vm, pptfd, &ppt); 1478 if (err != 0) { 1479 mutex_exit(&pptdev_mtx); 1480 return (err); 1481 } 1482 1483 ppt_teardown_msix(ppt); 1484 1485 releasef(pptfd); 1486 mutex_exit(&pptdev_mtx); 1487 return (err); 1488 } 1489