1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 * 28 * $FreeBSD$ 29 */ 30 31 #include <sys/cdefs.h> 32 __FBSDID("$FreeBSD$"); 33 34 #include <sys/param.h> 35 #include <sys/linker_set.h> 36 #include <sys/mman.h> 37 38 #include <ctype.h> 39 #include <err.h> 40 #include <errno.h> 41 #include <pthread.h> 42 #include <stdio.h> 43 #include <stdlib.h> 44 #include <string.h> 45 #include <strings.h> 46 #include <assert.h> 47 #include <stdbool.h> 48 #include <sysexits.h> 49 50 #include <machine/vmm.h> 51 #include <machine/vmm_snapshot.h> 52 #include <vmmapi.h> 53 54 #include "acpi.h" 55 #include "bhyverun.h" 56 #include "config.h" 57 #include "debug.h" 58 #include "inout.h" 59 #include "ioapic.h" 60 #include "mem.h" 61 #include "pci_emul.h" 62 #include "pci_irq.h" 63 #include "pci_lpc.h" 64 #include "pci_passthru.h" 65 66 #define CONF1_ADDR_PORT 0x0cf8 67 #define CONF1_DATA_PORT 0x0cfc 68 69 #define CONF1_ENABLE 0x80000000ul 70 71 #define MAXBUSES (PCI_BUSMAX + 1) 72 #define MAXSLOTS (PCI_SLOTMAX + 1) 73 #define MAXFUNCS (PCI_FUNCMAX + 1) 74 75 #define GB (1024 * 1024 * 1024UL) 76 77 struct funcinfo { 78 nvlist_t *fi_config; 79 struct pci_devemu *fi_pde; 80 struct pci_devinst *fi_devi; 81 }; 82 83 struct intxinfo { 84 int ii_count; 85 int ii_pirq_pin; 86 int ii_ioapic_irq; 87 }; 88 89 struct slotinfo { 90 struct intxinfo si_intpins[4]; 91 struct funcinfo si_funcs[MAXFUNCS]; 92 }; 93 94 struct businfo { 95 uint16_t iobase, iolimit; /* I/O window */ 96 uint32_t membase32, memlimit32; /* mmio window below 4GB */ 97 uint64_t membase64, memlimit64; /* mmio window above 4GB */ 98 struct slotinfo slotinfo[MAXSLOTS]; 99 }; 100 101 static struct businfo *pci_businfo[MAXBUSES]; 102 103 SET_DECLARE(pci_devemu_set, struct pci_devemu); 104 105 static uint64_t pci_emul_iobase; 106 static uint8_t *pci_emul_rombase; 107 static uint64_t pci_emul_romoffset; 108 static uint8_t *pci_emul_romlim; 109 static uint64_t pci_emul_membase32; 110 static uint64_t pci_emul_membase64; 111 static uint64_t pci_emul_memlim64; 112 113 struct pci_bar_allocation { 114 TAILQ_ENTRY(pci_bar_allocation) chain; 115 struct pci_devinst *pdi; 116 int idx; 117 enum pcibar_type type; 118 uint64_t size; 119 }; 120 121 static TAILQ_HEAD(pci_bar_list, pci_bar_allocation) pci_bars = 122 TAILQ_HEAD_INITIALIZER(pci_bars); 123 124 #define PCI_EMUL_IOBASE 0x2000 125 #define PCI_EMUL_IOLIMIT 0x10000 126 127 #define PCI_EMUL_ROMSIZE 0x10000000 128 129 #define PCI_EMUL_ECFG_BASE 0xE0000000 /* 3.5GB */ 130 #define PCI_EMUL_ECFG_SIZE (MAXBUSES * 1024 * 1024) /* 1MB per bus */ 131 SYSRES_MEM(PCI_EMUL_ECFG_BASE, PCI_EMUL_ECFG_SIZE); 132 133 /* 134 * OVMF always uses 0xC0000000 as base address for 32 bit PCI MMIO. Don't 135 * change this address without changing it in OVMF. 136 */ 137 #define PCI_EMUL_MEMBASE32 0xC0000000 138 #define PCI_EMUL_MEMLIMIT32 PCI_EMUL_ECFG_BASE 139 #define PCI_EMUL_MEMSIZE64 (32*GB) 140 141 static struct pci_devemu *pci_emul_finddev(const char *name); 142 static void pci_lintr_route(struct pci_devinst *pi); 143 static void pci_lintr_update(struct pci_devinst *pi); 144 static void pci_cfgrw(int in, int bus, int slot, int func, int coff, 145 int bytes, uint32_t *val); 146 147 static __inline void 148 CFGWRITE(struct pci_devinst *pi, int coff, uint32_t val, int bytes) 149 { 150 151 if (bytes == 1) 152 pci_set_cfgdata8(pi, coff, val); 153 else if (bytes == 2) 154 pci_set_cfgdata16(pi, coff, val); 155 else 156 pci_set_cfgdata32(pi, coff, val); 157 } 158 159 static __inline uint32_t 160 CFGREAD(struct pci_devinst *pi, int coff, int bytes) 161 { 162 163 if (bytes == 1) 164 return (pci_get_cfgdata8(pi, coff)); 165 else if (bytes == 2) 166 return (pci_get_cfgdata16(pi, coff)); 167 else 168 return (pci_get_cfgdata32(pi, coff)); 169 } 170 171 static int 172 is_pcir_bar(int coff) 173 { 174 return (coff >= PCIR_BAR(0) && coff < PCIR_BAR(PCI_BARMAX + 1)); 175 } 176 177 static int 178 is_pcir_bios(int coff) 179 { 180 return (coff >= PCIR_BIOS && coff < PCIR_BIOS + 4); 181 } 182 183 /* 184 * I/O access 185 */ 186 187 /* 188 * Slot options are in the form: 189 * 190 * <bus>:<slot>:<func>,<emul>[,<config>] 191 * <slot>[:<func>],<emul>[,<config>] 192 * 193 * slot is 0..31 194 * func is 0..7 195 * emul is a string describing the type of PCI device e.g. virtio-net 196 * config is an optional string, depending on the device, that can be 197 * used for configuration. 198 * Examples are: 199 * 1,virtio-net,tap0 200 * 3:0,dummy 201 */ 202 static void 203 pci_parse_slot_usage(char *aopt) 204 { 205 206 EPRINTLN("Invalid PCI slot info field \"%s\"", aopt); 207 } 208 209 /* 210 * Helper function to parse a list of comma-separated options where 211 * each option is formatted as "name[=value]". If no value is 212 * provided, the option is treated as a boolean and is given a value 213 * of true. 214 */ 215 int 216 pci_parse_legacy_config(nvlist_t *nvl, const char *opt) 217 { 218 char *config, *name, *tofree, *value; 219 220 if (opt == NULL) 221 return (0); 222 223 config = tofree = strdup(opt); 224 while ((name = strsep(&config, ",")) != NULL) { 225 value = strchr(name, '='); 226 if (value != NULL) { 227 *value = '\0'; 228 value++; 229 set_config_value_node(nvl, name, value); 230 } else 231 set_config_bool_node(nvl, name, true); 232 } 233 free(tofree); 234 return (0); 235 } 236 237 /* 238 * PCI device configuration is stored in MIBs that encode the device's 239 * location: 240 * 241 * pci.<bus>.<slot>.<func> 242 * 243 * Where "bus", "slot", and "func" are all decimal values without 244 * leading zeroes. Each valid device must have a "device" node which 245 * identifies the driver model of the device. 246 * 247 * Device backends can provide a parser for the "config" string. If 248 * a custom parser is not provided, pci_parse_legacy_config() is used 249 * to parse the string. 250 */ 251 int 252 pci_parse_slot(char *opt) 253 { 254 char node_name[sizeof("pci.XXX.XX.X")]; 255 struct pci_devemu *pde; 256 char *emul, *config, *str, *cp; 257 int error, bnum, snum, fnum; 258 nvlist_t *nvl; 259 260 error = -1; 261 str = strdup(opt); 262 263 emul = config = NULL; 264 if ((cp = strchr(str, ',')) != NULL) { 265 *cp = '\0'; 266 emul = cp + 1; 267 if ((cp = strchr(emul, ',')) != NULL) { 268 *cp = '\0'; 269 config = cp + 1; 270 } 271 } else { 272 pci_parse_slot_usage(opt); 273 goto done; 274 } 275 276 /* <bus>:<slot>:<func> */ 277 if (sscanf(str, "%d:%d:%d", &bnum, &snum, &fnum) != 3) { 278 bnum = 0; 279 /* <slot>:<func> */ 280 if (sscanf(str, "%d:%d", &snum, &fnum) != 2) { 281 fnum = 0; 282 /* <slot> */ 283 if (sscanf(str, "%d", &snum) != 1) { 284 snum = -1; 285 } 286 } 287 } 288 289 if (bnum < 0 || bnum >= MAXBUSES || snum < 0 || snum >= MAXSLOTS || 290 fnum < 0 || fnum >= MAXFUNCS) { 291 pci_parse_slot_usage(opt); 292 goto done; 293 } 294 295 pde = pci_emul_finddev(emul); 296 if (pde == NULL) { 297 EPRINTLN("pci slot %d:%d:%d: unknown device \"%s\"", bnum, snum, 298 fnum, emul); 299 goto done; 300 } 301 302 snprintf(node_name, sizeof(node_name), "pci.%d.%d.%d", bnum, snum, 303 fnum); 304 nvl = find_config_node(node_name); 305 if (nvl != NULL) { 306 EPRINTLN("pci slot %d:%d:%d already occupied!", bnum, snum, 307 fnum); 308 goto done; 309 } 310 nvl = create_config_node(node_name); 311 if (pde->pe_alias != NULL) 312 set_config_value_node(nvl, "device", pde->pe_alias); 313 else 314 set_config_value_node(nvl, "device", pde->pe_emu); 315 316 if (pde->pe_legacy_config != NULL) 317 error = pde->pe_legacy_config(nvl, config); 318 else 319 error = pci_parse_legacy_config(nvl, config); 320 done: 321 free(str); 322 return (error); 323 } 324 325 void 326 pci_print_supported_devices(void) 327 { 328 struct pci_devemu **pdpp, *pdp; 329 330 SET_FOREACH(pdpp, pci_devemu_set) { 331 pdp = *pdpp; 332 printf("%s\n", pdp->pe_emu); 333 } 334 } 335 336 uint32_t 337 pci_config_read_reg(const struct pcisel *const host_sel, nvlist_t *nvl, 338 const uint32_t reg, const uint8_t size, const uint32_t def) 339 { 340 const char *config; 341 const nvlist_t *pci_regs; 342 343 assert(size == 1 || size == 2 || size == 4); 344 345 pci_regs = find_relative_config_node(nvl, "pcireg"); 346 if (pci_regs == NULL) { 347 return def; 348 } 349 350 switch (reg) { 351 case PCIR_DEVICE: 352 config = get_config_value_node(pci_regs, "device"); 353 break; 354 case PCIR_VENDOR: 355 config = get_config_value_node(pci_regs, "vendor"); 356 break; 357 case PCIR_REVID: 358 config = get_config_value_node(pci_regs, "revid"); 359 break; 360 case PCIR_SUBVEND_0: 361 config = get_config_value_node(pci_regs, "subvendor"); 362 break; 363 case PCIR_SUBDEV_0: 364 config = get_config_value_node(pci_regs, "subdevice"); 365 break; 366 default: 367 return (-1); 368 } 369 370 if (config == NULL) { 371 return def; 372 } else if (host_sel != NULL && strcmp(config, "host") == 0) { 373 return read_config(host_sel, reg, size); 374 } else { 375 return strtol(config, NULL, 16); 376 } 377 } 378 379 static int 380 pci_valid_pba_offset(struct pci_devinst *pi, uint64_t offset) 381 { 382 383 if (offset < pi->pi_msix.pba_offset) 384 return (0); 385 386 if (offset >= pi->pi_msix.pba_offset + pi->pi_msix.pba_size) { 387 return (0); 388 } 389 390 return (1); 391 } 392 393 int 394 pci_emul_msix_twrite(struct pci_devinst *pi, uint64_t offset, int size, 395 uint64_t value) 396 { 397 int msix_entry_offset; 398 int tab_index; 399 char *dest; 400 401 /* support only 4 or 8 byte writes */ 402 if (size != 4 && size != 8) 403 return (-1); 404 405 /* 406 * Return if table index is beyond what device supports 407 */ 408 tab_index = offset / MSIX_TABLE_ENTRY_SIZE; 409 if (tab_index >= pi->pi_msix.table_count) 410 return (-1); 411 412 msix_entry_offset = offset % MSIX_TABLE_ENTRY_SIZE; 413 414 /* support only aligned writes */ 415 if ((msix_entry_offset % size) != 0) 416 return (-1); 417 418 dest = (char *)(pi->pi_msix.table + tab_index); 419 dest += msix_entry_offset; 420 421 if (size == 4) 422 *((uint32_t *)dest) = value; 423 else 424 *((uint64_t *)dest) = value; 425 426 return (0); 427 } 428 429 uint64_t 430 pci_emul_msix_tread(struct pci_devinst *pi, uint64_t offset, int size) 431 { 432 char *dest; 433 int msix_entry_offset; 434 int tab_index; 435 uint64_t retval = ~0; 436 437 /* 438 * The PCI standard only allows 4 and 8 byte accesses to the MSI-X 439 * table but we also allow 1 byte access to accommodate reads from 440 * ddb. 441 */ 442 if (size != 1 && size != 4 && size != 8) 443 return (retval); 444 445 msix_entry_offset = offset % MSIX_TABLE_ENTRY_SIZE; 446 447 /* support only aligned reads */ 448 if ((msix_entry_offset % size) != 0) { 449 return (retval); 450 } 451 452 tab_index = offset / MSIX_TABLE_ENTRY_SIZE; 453 454 if (tab_index < pi->pi_msix.table_count) { 455 /* valid MSI-X Table access */ 456 dest = (char *)(pi->pi_msix.table + tab_index); 457 dest += msix_entry_offset; 458 459 if (size == 1) 460 retval = *((uint8_t *)dest); 461 else if (size == 4) 462 retval = *((uint32_t *)dest); 463 else 464 retval = *((uint64_t *)dest); 465 } else if (pci_valid_pba_offset(pi, offset)) { 466 /* return 0 for PBA access */ 467 retval = 0; 468 } 469 470 return (retval); 471 } 472 473 int 474 pci_msix_table_bar(struct pci_devinst *pi) 475 { 476 477 if (pi->pi_msix.table != NULL) 478 return (pi->pi_msix.table_bar); 479 else 480 return (-1); 481 } 482 483 int 484 pci_msix_pba_bar(struct pci_devinst *pi) 485 { 486 487 if (pi->pi_msix.table != NULL) 488 return (pi->pi_msix.pba_bar); 489 else 490 return (-1); 491 } 492 493 static int 494 pci_emul_io_handler(struct vmctx *ctx __unused, int in, int port, 495 int bytes, uint32_t *eax, void *arg) 496 { 497 struct pci_devinst *pdi = arg; 498 struct pci_devemu *pe = pdi->pi_d; 499 uint64_t offset; 500 int i; 501 502 assert(port >= 0); 503 504 for (i = 0; i <= PCI_BARMAX; i++) { 505 if (pdi->pi_bar[i].type == PCIBAR_IO && 506 (uint64_t)port >= pdi->pi_bar[i].addr && 507 (uint64_t)port + bytes <= 508 pdi->pi_bar[i].addr + pdi->pi_bar[i].size) { 509 offset = port - pdi->pi_bar[i].addr; 510 if (in) 511 *eax = (*pe->pe_barread)(pdi, i, 512 offset, bytes); 513 else 514 (*pe->pe_barwrite)(pdi, i, offset, 515 bytes, *eax); 516 return (0); 517 } 518 } 519 return (-1); 520 } 521 522 static int 523 pci_emul_mem_handler(struct vcpu *vcpu __unused, int dir, 524 uint64_t addr, int size, uint64_t *val, void *arg1, long arg2) 525 { 526 struct pci_devinst *pdi = arg1; 527 struct pci_devemu *pe = pdi->pi_d; 528 uint64_t offset; 529 int bidx = (int) arg2; 530 531 assert(bidx <= PCI_BARMAX); 532 assert(pdi->pi_bar[bidx].type == PCIBAR_MEM32 || 533 pdi->pi_bar[bidx].type == PCIBAR_MEM64); 534 assert(addr >= pdi->pi_bar[bidx].addr && 535 addr + size <= pdi->pi_bar[bidx].addr + pdi->pi_bar[bidx].size); 536 537 offset = addr - pdi->pi_bar[bidx].addr; 538 539 if (dir == MEM_F_WRITE) { 540 if (size == 8) { 541 (*pe->pe_barwrite)(pdi, bidx, offset, 542 4, *val & 0xffffffff); 543 (*pe->pe_barwrite)(pdi, bidx, offset + 4, 544 4, *val >> 32); 545 } else { 546 (*pe->pe_barwrite)(pdi, bidx, offset, 547 size, *val); 548 } 549 } else { 550 if (size == 8) { 551 *val = (*pe->pe_barread)(pdi, bidx, 552 offset, 4); 553 *val |= (*pe->pe_barread)(pdi, bidx, 554 offset + 4, 4) << 32; 555 } else { 556 *val = (*pe->pe_barread)(pdi, bidx, 557 offset, size); 558 } 559 } 560 561 return (0); 562 } 563 564 565 static int 566 pci_emul_alloc_resource(uint64_t *baseptr, uint64_t limit, uint64_t size, 567 uint64_t *addr) 568 { 569 uint64_t base; 570 571 assert((size & (size - 1)) == 0); /* must be a power of 2 */ 572 573 base = roundup2(*baseptr, size); 574 575 if (base + size <= limit) { 576 *addr = base; 577 *baseptr = base + size; 578 return (0); 579 } else 580 return (-1); 581 } 582 583 /* 584 * Register (or unregister) the MMIO or I/O region associated with the BAR 585 * register 'idx' of an emulated pci device. 586 */ 587 static void 588 modify_bar_registration(struct pci_devinst *pi, int idx, int registration) 589 { 590 struct pci_devemu *pe; 591 int error; 592 struct inout_port iop; 593 struct mem_range mr; 594 595 pe = pi->pi_d; 596 switch (pi->pi_bar[idx].type) { 597 case PCIBAR_IO: 598 bzero(&iop, sizeof(struct inout_port)); 599 iop.name = pi->pi_name; 600 iop.port = pi->pi_bar[idx].addr; 601 iop.size = pi->pi_bar[idx].size; 602 if (registration) { 603 iop.flags = IOPORT_F_INOUT; 604 iop.handler = pci_emul_io_handler; 605 iop.arg = pi; 606 error = register_inout(&iop); 607 } else 608 error = unregister_inout(&iop); 609 if (pe->pe_baraddr != NULL) 610 (*pe->pe_baraddr)(pi, idx, registration, 611 pi->pi_bar[idx].addr); 612 break; 613 case PCIBAR_MEM32: 614 case PCIBAR_MEM64: 615 bzero(&mr, sizeof(struct mem_range)); 616 mr.name = pi->pi_name; 617 mr.base = pi->pi_bar[idx].addr; 618 mr.size = pi->pi_bar[idx].size; 619 if (registration) { 620 mr.flags = MEM_F_RW; 621 mr.handler = pci_emul_mem_handler; 622 mr.arg1 = pi; 623 mr.arg2 = idx; 624 error = register_mem(&mr); 625 } else 626 error = unregister_mem(&mr); 627 if (pe->pe_baraddr != NULL) 628 (*pe->pe_baraddr)(pi, idx, registration, 629 pi->pi_bar[idx].addr); 630 break; 631 case PCIBAR_ROM: 632 error = 0; 633 if (pe->pe_baraddr != NULL) 634 (*pe->pe_baraddr)(pi, idx, registration, 635 pi->pi_bar[idx].addr); 636 break; 637 default: 638 error = EINVAL; 639 break; 640 } 641 assert(error == 0); 642 } 643 644 static void 645 unregister_bar(struct pci_devinst *pi, int idx) 646 { 647 648 modify_bar_registration(pi, idx, 0); 649 } 650 651 static void 652 register_bar(struct pci_devinst *pi, int idx) 653 { 654 655 modify_bar_registration(pi, idx, 1); 656 } 657 658 /* Is the ROM enabled for the emulated pci device? */ 659 static int 660 romen(struct pci_devinst *pi) 661 { 662 return (pi->pi_bar[PCI_ROM_IDX].lobits & PCIM_BIOS_ENABLE) == 663 PCIM_BIOS_ENABLE; 664 } 665 666 /* Are we decoding i/o port accesses for the emulated pci device? */ 667 static int 668 porten(struct pci_devinst *pi) 669 { 670 uint16_t cmd; 671 672 cmd = pci_get_cfgdata16(pi, PCIR_COMMAND); 673 674 return (cmd & PCIM_CMD_PORTEN); 675 } 676 677 /* Are we decoding memory accesses for the emulated pci device? */ 678 static int 679 memen(struct pci_devinst *pi) 680 { 681 uint16_t cmd; 682 683 cmd = pci_get_cfgdata16(pi, PCIR_COMMAND); 684 685 return (cmd & PCIM_CMD_MEMEN); 686 } 687 688 /* 689 * Update the MMIO or I/O address that is decoded by the BAR register. 690 * 691 * If the pci device has enabled the address space decoding then intercept 692 * the address range decoded by the BAR register. 693 */ 694 static void 695 update_bar_address(struct pci_devinst *pi, uint64_t addr, int idx, int type) 696 { 697 int decode; 698 699 if (pi->pi_bar[idx].type == PCIBAR_IO) 700 decode = porten(pi); 701 else 702 decode = memen(pi); 703 704 if (decode) 705 unregister_bar(pi, idx); 706 707 switch (type) { 708 case PCIBAR_IO: 709 case PCIBAR_MEM32: 710 pi->pi_bar[idx].addr = addr; 711 break; 712 case PCIBAR_MEM64: 713 pi->pi_bar[idx].addr &= ~0xffffffffUL; 714 pi->pi_bar[idx].addr |= addr; 715 break; 716 case PCIBAR_MEMHI64: 717 pi->pi_bar[idx].addr &= 0xffffffff; 718 pi->pi_bar[idx].addr |= addr; 719 break; 720 default: 721 assert(0); 722 } 723 724 if (decode) 725 register_bar(pi, idx); 726 } 727 728 int 729 pci_emul_alloc_bar(struct pci_devinst *pdi, int idx, enum pcibar_type type, 730 uint64_t size) 731 { 732 assert((type == PCIBAR_ROM) || (idx >= 0 && idx <= PCI_BARMAX)); 733 assert((type != PCIBAR_ROM) || (idx == PCI_ROM_IDX)); 734 735 if ((size & (size - 1)) != 0) 736 size = 1UL << flsl(size); /* round up to a power of 2 */ 737 738 /* Enforce minimum BAR sizes required by the PCI standard */ 739 if (type == PCIBAR_IO) { 740 if (size < 4) 741 size = 4; 742 } else if (type == PCIBAR_ROM) { 743 if (size < ~PCIM_BIOS_ADDR_MASK + 1) 744 size = ~PCIM_BIOS_ADDR_MASK + 1; 745 } else { 746 if (size < 16) 747 size = 16; 748 } 749 750 /* 751 * To reduce fragmentation of the MMIO space, we allocate the BARs by 752 * size. Therefore, don't allocate the BAR yet. We create a list of all 753 * BAR allocation which is sorted by BAR size. When all PCI devices are 754 * initialized, we will assign an address to the BARs. 755 */ 756 757 /* create a new list entry */ 758 struct pci_bar_allocation *const new_bar = malloc(sizeof(*new_bar)); 759 memset(new_bar, 0, sizeof(*new_bar)); 760 new_bar->pdi = pdi; 761 new_bar->idx = idx; 762 new_bar->type = type; 763 new_bar->size = size; 764 765 /* 766 * Search for a BAR which size is lower than the size of our newly 767 * allocated BAR. 768 */ 769 struct pci_bar_allocation *bar = NULL; 770 TAILQ_FOREACH(bar, &pci_bars, chain) { 771 if (bar->size < size) { 772 break; 773 } 774 } 775 776 if (bar == NULL) { 777 /* 778 * Either the list is empty or new BAR is the smallest BAR of 779 * the list. Append it to the end of our list. 780 */ 781 TAILQ_INSERT_TAIL(&pci_bars, new_bar, chain); 782 } else { 783 /* 784 * The found BAR is smaller than our new BAR. For that reason, 785 * insert our new BAR before the found BAR. 786 */ 787 TAILQ_INSERT_BEFORE(bar, new_bar, chain); 788 } 789 790 /* 791 * pci_passthru devices synchronize their physical and virtual command 792 * register on init. For that reason, the virtual cmd reg should be 793 * updated as early as possible. 794 */ 795 uint16_t enbit = 0; 796 switch (type) { 797 case PCIBAR_IO: 798 enbit = PCIM_CMD_PORTEN; 799 break; 800 case PCIBAR_MEM64: 801 case PCIBAR_MEM32: 802 enbit = PCIM_CMD_MEMEN; 803 break; 804 default: 805 enbit = 0; 806 break; 807 } 808 809 const uint16_t cmd = pci_get_cfgdata16(pdi, PCIR_COMMAND); 810 pci_set_cfgdata16(pdi, PCIR_COMMAND, cmd | enbit); 811 812 return (0); 813 } 814 815 static int 816 pci_emul_assign_bar(struct pci_devinst *const pdi, const int idx, 817 const enum pcibar_type type, const uint64_t size) 818 { 819 int error; 820 uint64_t *baseptr, limit, addr, mask, lobits, bar; 821 822 switch (type) { 823 case PCIBAR_NONE: 824 baseptr = NULL; 825 addr = mask = lobits = 0; 826 break; 827 case PCIBAR_IO: 828 baseptr = &pci_emul_iobase; 829 limit = PCI_EMUL_IOLIMIT; 830 mask = PCIM_BAR_IO_BASE; 831 lobits = PCIM_BAR_IO_SPACE; 832 break; 833 case PCIBAR_MEM64: 834 /* 835 * XXX 836 * Some drivers do not work well if the 64-bit BAR is allocated 837 * above 4GB. Allow for this by allocating small requests under 838 * 4GB unless then allocation size is larger than some arbitrary 839 * number (128MB currently). 840 */ 841 if (size > 128 * 1024 * 1024) { 842 baseptr = &pci_emul_membase64; 843 limit = pci_emul_memlim64; 844 mask = PCIM_BAR_MEM_BASE; 845 lobits = PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_64 | 846 PCIM_BAR_MEM_PREFETCH; 847 } else { 848 baseptr = &pci_emul_membase32; 849 limit = PCI_EMUL_MEMLIMIT32; 850 mask = PCIM_BAR_MEM_BASE; 851 lobits = PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_64; 852 } 853 break; 854 case PCIBAR_MEM32: 855 baseptr = &pci_emul_membase32; 856 limit = PCI_EMUL_MEMLIMIT32; 857 mask = PCIM_BAR_MEM_BASE; 858 lobits = PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_32; 859 break; 860 case PCIBAR_ROM: 861 /* do not claim memory for ROM. OVMF will do it for us. */ 862 baseptr = NULL; 863 limit = 0; 864 mask = PCIM_BIOS_ADDR_MASK; 865 lobits = 0; 866 break; 867 default: 868 printf("pci_emul_alloc_base: invalid bar type %d\n", type); 869 assert(0); 870 } 871 872 if (baseptr != NULL) { 873 error = pci_emul_alloc_resource(baseptr, limit, size, &addr); 874 if (error != 0) 875 return (error); 876 } else { 877 addr = 0; 878 } 879 880 pdi->pi_bar[idx].type = type; 881 pdi->pi_bar[idx].addr = addr; 882 pdi->pi_bar[idx].size = size; 883 /* 884 * passthru devices are using same lobits as physical device they set 885 * this property 886 */ 887 if (pdi->pi_bar[idx].lobits != 0) { 888 lobits = pdi->pi_bar[idx].lobits; 889 } else { 890 pdi->pi_bar[idx].lobits = lobits; 891 } 892 893 /* Initialize the BAR register in config space */ 894 bar = (addr & mask) | lobits; 895 pci_set_cfgdata32(pdi, PCIR_BAR(idx), bar); 896 897 if (type == PCIBAR_MEM64) { 898 assert(idx + 1 <= PCI_BARMAX); 899 pdi->pi_bar[idx + 1].type = PCIBAR_MEMHI64; 900 pci_set_cfgdata32(pdi, PCIR_BAR(idx + 1), bar >> 32); 901 } 902 903 if (type != PCIBAR_ROM) { 904 register_bar(pdi, idx); 905 } 906 907 return (0); 908 } 909 910 int 911 pci_emul_alloc_rom(struct pci_devinst *const pdi, const uint64_t size, 912 void **const addr) 913 { 914 /* allocate ROM space once on first call */ 915 if (pci_emul_rombase == 0) { 916 pci_emul_rombase = vm_create_devmem(pdi->pi_vmctx, VM_PCIROM, 917 "pcirom", PCI_EMUL_ROMSIZE); 918 if (pci_emul_rombase == MAP_FAILED) { 919 warnx("%s: failed to create rom segment", __func__); 920 return (-1); 921 } 922 pci_emul_romlim = pci_emul_rombase + PCI_EMUL_ROMSIZE; 923 pci_emul_romoffset = 0; 924 } 925 926 /* ROM size should be a power of 2 and greater than 2 KB */ 927 const uint64_t rom_size = MAX(1UL << flsl(size), 928 ~PCIM_BIOS_ADDR_MASK + 1); 929 930 /* check if ROM fits into ROM space */ 931 if (pci_emul_romoffset + rom_size > PCI_EMUL_ROMSIZE) { 932 warnx("%s: no space left in rom segment:", __func__); 933 warnx("%16lu bytes left", 934 PCI_EMUL_ROMSIZE - pci_emul_romoffset); 935 warnx("%16lu bytes required by %d/%d/%d", rom_size, pdi->pi_bus, 936 pdi->pi_slot, pdi->pi_func); 937 return (-1); 938 } 939 940 /* allocate ROM BAR */ 941 const int error = pci_emul_alloc_bar(pdi, PCI_ROM_IDX, PCIBAR_ROM, 942 rom_size); 943 if (error) 944 return error; 945 946 /* return address */ 947 *addr = pci_emul_rombase + pci_emul_romoffset; 948 949 /* save offset into ROM Space */ 950 pdi->pi_romoffset = pci_emul_romoffset; 951 952 /* increase offset for next ROM */ 953 pci_emul_romoffset += rom_size; 954 955 return (0); 956 } 957 958 #define CAP_START_OFFSET 0x40 959 static int 960 pci_emul_add_capability(struct pci_devinst *pi, u_char *capdata, int caplen) 961 { 962 int i, capoff, reallen; 963 uint16_t sts; 964 965 assert(caplen > 0); 966 967 reallen = roundup2(caplen, 4); /* dword aligned */ 968 969 sts = pci_get_cfgdata16(pi, PCIR_STATUS); 970 if ((sts & PCIM_STATUS_CAPPRESENT) == 0) 971 capoff = CAP_START_OFFSET; 972 else 973 capoff = pi->pi_capend + 1; 974 975 /* Check if we have enough space */ 976 if (capoff + reallen > PCI_REGMAX + 1) 977 return (-1); 978 979 /* Set the previous capability pointer */ 980 if ((sts & PCIM_STATUS_CAPPRESENT) == 0) { 981 pci_set_cfgdata8(pi, PCIR_CAP_PTR, capoff); 982 pci_set_cfgdata16(pi, PCIR_STATUS, sts|PCIM_STATUS_CAPPRESENT); 983 } else 984 pci_set_cfgdata8(pi, pi->pi_prevcap + 1, capoff); 985 986 /* Copy the capability */ 987 for (i = 0; i < caplen; i++) 988 pci_set_cfgdata8(pi, capoff + i, capdata[i]); 989 990 /* Set the next capability pointer */ 991 pci_set_cfgdata8(pi, capoff + 1, 0); 992 993 pi->pi_prevcap = capoff; 994 pi->pi_capend = capoff + reallen - 1; 995 return (0); 996 } 997 998 static struct pci_devemu * 999 pci_emul_finddev(const char *name) 1000 { 1001 struct pci_devemu **pdpp, *pdp; 1002 1003 SET_FOREACH(pdpp, pci_devemu_set) { 1004 pdp = *pdpp; 1005 if (!strcmp(pdp->pe_emu, name)) { 1006 return (pdp); 1007 } 1008 } 1009 1010 return (NULL); 1011 } 1012 1013 static int 1014 pci_emul_init(struct vmctx *ctx, struct pci_devemu *pde, int bus, int slot, 1015 int func, struct funcinfo *fi) 1016 { 1017 struct pci_devinst *pdi; 1018 int err; 1019 1020 pdi = calloc(1, sizeof(struct pci_devinst)); 1021 1022 pdi->pi_vmctx = ctx; 1023 pdi->pi_bus = bus; 1024 pdi->pi_slot = slot; 1025 pdi->pi_func = func; 1026 pthread_mutex_init(&pdi->pi_lintr.lock, NULL); 1027 pdi->pi_lintr.pin = 0; 1028 pdi->pi_lintr.state = IDLE; 1029 pdi->pi_lintr.pirq_pin = 0; 1030 pdi->pi_lintr.ioapic_irq = 0; 1031 pdi->pi_d = pde; 1032 snprintf(pdi->pi_name, PI_NAMESZ, "%s-pci-%d", pde->pe_emu, slot); 1033 1034 /* Disable legacy interrupts */ 1035 pci_set_cfgdata8(pdi, PCIR_INTLINE, 255); 1036 pci_set_cfgdata8(pdi, PCIR_INTPIN, 0); 1037 1038 pci_set_cfgdata8(pdi, PCIR_COMMAND, PCIM_CMD_BUSMASTEREN); 1039 1040 err = (*pde->pe_init)(pdi, fi->fi_config); 1041 if (err == 0) 1042 fi->fi_devi = pdi; 1043 else 1044 free(pdi); 1045 1046 return (err); 1047 } 1048 1049 void 1050 pci_populate_msicap(struct msicap *msicap, int msgnum, int nextptr) 1051 { 1052 int mmc; 1053 1054 /* Number of msi messages must be a power of 2 between 1 and 32 */ 1055 assert((msgnum & (msgnum - 1)) == 0 && msgnum >= 1 && msgnum <= 32); 1056 mmc = ffs(msgnum) - 1; 1057 1058 bzero(msicap, sizeof(struct msicap)); 1059 msicap->capid = PCIY_MSI; 1060 msicap->nextptr = nextptr; 1061 msicap->msgctrl = PCIM_MSICTRL_64BIT | (mmc << 1); 1062 } 1063 1064 int 1065 pci_emul_add_msicap(struct pci_devinst *pi, int msgnum) 1066 { 1067 struct msicap msicap; 1068 1069 pci_populate_msicap(&msicap, msgnum, 0); 1070 1071 return (pci_emul_add_capability(pi, (u_char *)&msicap, sizeof(msicap))); 1072 } 1073 1074 static void 1075 pci_populate_msixcap(struct msixcap *msixcap, int msgnum, int barnum, 1076 uint32_t msix_tab_size) 1077 { 1078 1079 assert(msix_tab_size % 4096 == 0); 1080 1081 bzero(msixcap, sizeof(struct msixcap)); 1082 msixcap->capid = PCIY_MSIX; 1083 1084 /* 1085 * Message Control Register, all fields set to 1086 * zero except for the Table Size. 1087 * Note: Table size N is encoded as N-1 1088 */ 1089 msixcap->msgctrl = msgnum - 1; 1090 1091 /* 1092 * MSI-X BAR setup: 1093 * - MSI-X table start at offset 0 1094 * - PBA table starts at a 4K aligned offset after the MSI-X table 1095 */ 1096 msixcap->table_info = barnum & PCIM_MSIX_BIR_MASK; 1097 msixcap->pba_info = msix_tab_size | (barnum & PCIM_MSIX_BIR_MASK); 1098 } 1099 1100 static void 1101 pci_msix_table_init(struct pci_devinst *pi, int table_entries) 1102 { 1103 int i, table_size; 1104 1105 assert(table_entries > 0); 1106 assert(table_entries <= MAX_MSIX_TABLE_ENTRIES); 1107 1108 table_size = table_entries * MSIX_TABLE_ENTRY_SIZE; 1109 pi->pi_msix.table = calloc(1, table_size); 1110 1111 /* set mask bit of vector control register */ 1112 for (i = 0; i < table_entries; i++) 1113 pi->pi_msix.table[i].vector_control |= PCIM_MSIX_VCTRL_MASK; 1114 } 1115 1116 int 1117 pci_emul_add_msixcap(struct pci_devinst *pi, int msgnum, int barnum) 1118 { 1119 uint32_t tab_size; 1120 struct msixcap msixcap; 1121 1122 assert(msgnum >= 1 && msgnum <= MAX_MSIX_TABLE_ENTRIES); 1123 assert(barnum >= 0 && barnum <= PCIR_MAX_BAR_0); 1124 1125 tab_size = msgnum * MSIX_TABLE_ENTRY_SIZE; 1126 1127 /* Align table size to nearest 4K */ 1128 tab_size = roundup2(tab_size, 4096); 1129 1130 pi->pi_msix.table_bar = barnum; 1131 pi->pi_msix.pba_bar = barnum; 1132 pi->pi_msix.table_offset = 0; 1133 pi->pi_msix.table_count = msgnum; 1134 pi->pi_msix.pba_offset = tab_size; 1135 pi->pi_msix.pba_size = PBA_SIZE(msgnum); 1136 1137 pci_msix_table_init(pi, msgnum); 1138 1139 pci_populate_msixcap(&msixcap, msgnum, barnum, tab_size); 1140 1141 /* allocate memory for MSI-X Table and PBA */ 1142 pci_emul_alloc_bar(pi, barnum, PCIBAR_MEM32, 1143 tab_size + pi->pi_msix.pba_size); 1144 1145 return (pci_emul_add_capability(pi, (u_char *)&msixcap, 1146 sizeof(msixcap))); 1147 } 1148 1149 static void 1150 msixcap_cfgwrite(struct pci_devinst *pi, int capoff, int offset, 1151 int bytes, uint32_t val) 1152 { 1153 uint16_t msgctrl, rwmask; 1154 int off; 1155 1156 off = offset - capoff; 1157 /* Message Control Register */ 1158 if (off == 2 && bytes == 2) { 1159 rwmask = PCIM_MSIXCTRL_MSIX_ENABLE | PCIM_MSIXCTRL_FUNCTION_MASK; 1160 msgctrl = pci_get_cfgdata16(pi, offset); 1161 msgctrl &= ~rwmask; 1162 msgctrl |= val & rwmask; 1163 val = msgctrl; 1164 1165 pi->pi_msix.enabled = val & PCIM_MSIXCTRL_MSIX_ENABLE; 1166 pi->pi_msix.function_mask = val & PCIM_MSIXCTRL_FUNCTION_MASK; 1167 pci_lintr_update(pi); 1168 } 1169 1170 CFGWRITE(pi, offset, val, bytes); 1171 } 1172 1173 static void 1174 msicap_cfgwrite(struct pci_devinst *pi, int capoff, int offset, 1175 int bytes, uint32_t val) 1176 { 1177 uint16_t msgctrl, rwmask, msgdata, mme; 1178 uint32_t addrlo; 1179 1180 /* 1181 * If guest is writing to the message control register make sure 1182 * we do not overwrite read-only fields. 1183 */ 1184 if ((offset - capoff) == 2 && bytes == 2) { 1185 rwmask = PCIM_MSICTRL_MME_MASK | PCIM_MSICTRL_MSI_ENABLE; 1186 msgctrl = pci_get_cfgdata16(pi, offset); 1187 msgctrl &= ~rwmask; 1188 msgctrl |= val & rwmask; 1189 val = msgctrl; 1190 } 1191 CFGWRITE(pi, offset, val, bytes); 1192 1193 msgctrl = pci_get_cfgdata16(pi, capoff + 2); 1194 addrlo = pci_get_cfgdata32(pi, capoff + 4); 1195 if (msgctrl & PCIM_MSICTRL_64BIT) 1196 msgdata = pci_get_cfgdata16(pi, capoff + 12); 1197 else 1198 msgdata = pci_get_cfgdata16(pi, capoff + 8); 1199 1200 mme = msgctrl & PCIM_MSICTRL_MME_MASK; 1201 pi->pi_msi.enabled = msgctrl & PCIM_MSICTRL_MSI_ENABLE ? 1 : 0; 1202 if (pi->pi_msi.enabled) { 1203 pi->pi_msi.addr = addrlo; 1204 pi->pi_msi.msg_data = msgdata; 1205 pi->pi_msi.maxmsgnum = 1 << (mme >> 4); 1206 } else { 1207 pi->pi_msi.maxmsgnum = 0; 1208 } 1209 pci_lintr_update(pi); 1210 } 1211 1212 static void 1213 pciecap_cfgwrite(struct pci_devinst *pi, int capoff __unused, int offset, 1214 int bytes, uint32_t val) 1215 { 1216 1217 /* XXX don't write to the readonly parts */ 1218 CFGWRITE(pi, offset, val, bytes); 1219 } 1220 1221 #define PCIECAP_VERSION 0x2 1222 int 1223 pci_emul_add_pciecap(struct pci_devinst *pi, int type) 1224 { 1225 int err; 1226 struct pciecap pciecap; 1227 1228 bzero(&pciecap, sizeof(pciecap)); 1229 1230 /* 1231 * Use the integrated endpoint type for endpoints on a root complex bus. 1232 * 1233 * NB: bhyve currently only supports a single PCI bus that is the root 1234 * complex bus, so all endpoints are integrated. 1235 */ 1236 if ((type == PCIEM_TYPE_ENDPOINT) && (pi->pi_bus == 0)) 1237 type = PCIEM_TYPE_ROOT_INT_EP; 1238 1239 pciecap.capid = PCIY_EXPRESS; 1240 pciecap.pcie_capabilities = PCIECAP_VERSION | type; 1241 if (type != PCIEM_TYPE_ROOT_INT_EP) { 1242 pciecap.link_capabilities = 0x411; /* gen1, x1 */ 1243 pciecap.link_status = 0x11; /* gen1, x1 */ 1244 } 1245 1246 err = pci_emul_add_capability(pi, (u_char *)&pciecap, sizeof(pciecap)); 1247 return (err); 1248 } 1249 1250 /* 1251 * This function assumes that 'coff' is in the capabilities region of the 1252 * config space. A capoff parameter of zero will force a search for the 1253 * offset and type. 1254 */ 1255 void 1256 pci_emul_capwrite(struct pci_devinst *pi, int offset, int bytes, uint32_t val, 1257 uint8_t capoff, int capid) 1258 { 1259 uint8_t nextoff; 1260 1261 /* Do not allow un-aligned writes */ 1262 if ((offset & (bytes - 1)) != 0) 1263 return; 1264 1265 if (capoff == 0) { 1266 /* Find the capability that we want to update */ 1267 capoff = CAP_START_OFFSET; 1268 while (1) { 1269 nextoff = pci_get_cfgdata8(pi, capoff + 1); 1270 if (nextoff == 0) 1271 break; 1272 if (offset >= capoff && offset < nextoff) 1273 break; 1274 1275 capoff = nextoff; 1276 } 1277 assert(offset >= capoff); 1278 capid = pci_get_cfgdata8(pi, capoff); 1279 } 1280 1281 /* 1282 * Capability ID and Next Capability Pointer are readonly. 1283 * However, some o/s's do 4-byte writes that include these. 1284 * For this case, trim the write back to 2 bytes and adjust 1285 * the data. 1286 */ 1287 if (offset == capoff || offset == capoff + 1) { 1288 if (offset == capoff && bytes == 4) { 1289 bytes = 2; 1290 offset += 2; 1291 val >>= 16; 1292 } else 1293 return; 1294 } 1295 1296 switch (capid) { 1297 case PCIY_MSI: 1298 msicap_cfgwrite(pi, capoff, offset, bytes, val); 1299 break; 1300 case PCIY_MSIX: 1301 msixcap_cfgwrite(pi, capoff, offset, bytes, val); 1302 break; 1303 case PCIY_EXPRESS: 1304 pciecap_cfgwrite(pi, capoff, offset, bytes, val); 1305 break; 1306 default: 1307 break; 1308 } 1309 } 1310 1311 static int 1312 pci_emul_iscap(struct pci_devinst *pi, int offset) 1313 { 1314 uint16_t sts; 1315 1316 sts = pci_get_cfgdata16(pi, PCIR_STATUS); 1317 if ((sts & PCIM_STATUS_CAPPRESENT) != 0) { 1318 if (offset >= CAP_START_OFFSET && offset <= pi->pi_capend) 1319 return (1); 1320 } 1321 return (0); 1322 } 1323 1324 static int 1325 pci_emul_fallback_handler(struct vcpu *vcpu __unused, int dir, 1326 uint64_t addr __unused, int size __unused, uint64_t *val, 1327 void *arg1 __unused, long arg2 __unused) 1328 { 1329 /* 1330 * Ignore writes; return 0xff's for reads. The mem read code 1331 * will take care of truncating to the correct size. 1332 */ 1333 if (dir == MEM_F_READ) { 1334 *val = 0xffffffffffffffff; 1335 } 1336 1337 return (0); 1338 } 1339 1340 static int 1341 pci_emul_ecfg_handler(struct vcpu *vcpu __unused, int dir, uint64_t addr, 1342 int bytes, uint64_t *val, void *arg1 __unused, long arg2 __unused) 1343 { 1344 int bus, slot, func, coff, in; 1345 1346 coff = addr & 0xfff; 1347 func = (addr >> 12) & 0x7; 1348 slot = (addr >> 15) & 0x1f; 1349 bus = (addr >> 20) & 0xff; 1350 in = (dir == MEM_F_READ); 1351 if (in) 1352 *val = ~0UL; 1353 pci_cfgrw(in, bus, slot, func, coff, bytes, (uint32_t *)val); 1354 return (0); 1355 } 1356 1357 uint64_t 1358 pci_ecfg_base(void) 1359 { 1360 1361 return (PCI_EMUL_ECFG_BASE); 1362 } 1363 1364 #define BUSIO_ROUNDUP 32 1365 #define BUSMEM32_ROUNDUP (1024 * 1024) 1366 #define BUSMEM64_ROUNDUP (512 * 1024 * 1024) 1367 1368 int 1369 init_pci(struct vmctx *ctx) 1370 { 1371 char node_name[sizeof("pci.XXX.XX.X")]; 1372 struct mem_range mr; 1373 struct pci_devemu *pde; 1374 struct businfo *bi; 1375 struct slotinfo *si; 1376 struct funcinfo *fi; 1377 nvlist_t *nvl; 1378 const char *emul; 1379 size_t lowmem; 1380 int bus, slot, func; 1381 int error; 1382 1383 if (vm_get_lowmem_limit(ctx) > PCI_EMUL_MEMBASE32) 1384 errx(EX_OSERR, "Invalid lowmem limit"); 1385 1386 pci_emul_iobase = PCI_EMUL_IOBASE; 1387 pci_emul_membase32 = PCI_EMUL_MEMBASE32; 1388 1389 pci_emul_membase64 = 4*GB + vm_get_highmem_size(ctx); 1390 pci_emul_membase64 = roundup2(pci_emul_membase64, PCI_EMUL_MEMSIZE64); 1391 pci_emul_memlim64 = pci_emul_membase64 + PCI_EMUL_MEMSIZE64; 1392 1393 for (bus = 0; bus < MAXBUSES; bus++) { 1394 snprintf(node_name, sizeof(node_name), "pci.%d", bus); 1395 nvl = find_config_node(node_name); 1396 if (nvl == NULL) 1397 continue; 1398 pci_businfo[bus] = calloc(1, sizeof(struct businfo)); 1399 bi = pci_businfo[bus]; 1400 1401 /* 1402 * Keep track of the i/o and memory resources allocated to 1403 * this bus. 1404 */ 1405 bi->iobase = pci_emul_iobase; 1406 bi->membase32 = pci_emul_membase32; 1407 bi->membase64 = pci_emul_membase64; 1408 1409 /* first run: init devices */ 1410 for (slot = 0; slot < MAXSLOTS; slot++) { 1411 si = &bi->slotinfo[slot]; 1412 for (func = 0; func < MAXFUNCS; func++) { 1413 fi = &si->si_funcs[func]; 1414 snprintf(node_name, sizeof(node_name), 1415 "pci.%d.%d.%d", bus, slot, func); 1416 nvl = find_config_node(node_name); 1417 if (nvl == NULL) 1418 continue; 1419 1420 fi->fi_config = nvl; 1421 emul = get_config_value_node(nvl, "device"); 1422 if (emul == NULL) { 1423 EPRINTLN("pci slot %d:%d:%d: missing " 1424 "\"device\" value", bus, slot, func); 1425 return (EINVAL); 1426 } 1427 pde = pci_emul_finddev(emul); 1428 if (pde == NULL) { 1429 EPRINTLN("pci slot %d:%d:%d: unknown " 1430 "device \"%s\"", bus, slot, func, 1431 emul); 1432 return (EINVAL); 1433 } 1434 if (pde->pe_alias != NULL) { 1435 EPRINTLN("pci slot %d:%d:%d: legacy " 1436 "device \"%s\", use \"%s\" instead", 1437 bus, slot, func, emul, 1438 pde->pe_alias); 1439 return (EINVAL); 1440 } 1441 fi->fi_pde = pde; 1442 error = pci_emul_init(ctx, pde, bus, slot, 1443 func, fi); 1444 if (error) 1445 return (error); 1446 } 1447 } 1448 1449 /* second run: assign BARs and free list */ 1450 struct pci_bar_allocation *bar; 1451 struct pci_bar_allocation *bar_tmp; 1452 TAILQ_FOREACH_SAFE(bar, &pci_bars, chain, bar_tmp) { 1453 pci_emul_assign_bar(bar->pdi, bar->idx, bar->type, 1454 bar->size); 1455 free(bar); 1456 } 1457 TAILQ_INIT(&pci_bars); 1458 1459 /* 1460 * Add some slop to the I/O and memory resources decoded by 1461 * this bus to give a guest some flexibility if it wants to 1462 * reprogram the BARs. 1463 */ 1464 pci_emul_iobase += BUSIO_ROUNDUP; 1465 pci_emul_iobase = roundup2(pci_emul_iobase, BUSIO_ROUNDUP); 1466 bi->iolimit = pci_emul_iobase; 1467 1468 pci_emul_membase32 += BUSMEM32_ROUNDUP; 1469 pci_emul_membase32 = roundup2(pci_emul_membase32, 1470 BUSMEM32_ROUNDUP); 1471 bi->memlimit32 = pci_emul_membase32; 1472 1473 pci_emul_membase64 += BUSMEM64_ROUNDUP; 1474 pci_emul_membase64 = roundup2(pci_emul_membase64, 1475 BUSMEM64_ROUNDUP); 1476 bi->memlimit64 = pci_emul_membase64; 1477 } 1478 1479 /* 1480 * PCI backends are initialized before routing INTx interrupts 1481 * so that LPC devices are able to reserve ISA IRQs before 1482 * routing PIRQ pins. 1483 */ 1484 for (bus = 0; bus < MAXBUSES; bus++) { 1485 if ((bi = pci_businfo[bus]) == NULL) 1486 continue; 1487 1488 for (slot = 0; slot < MAXSLOTS; slot++) { 1489 si = &bi->slotinfo[slot]; 1490 for (func = 0; func < MAXFUNCS; func++) { 1491 fi = &si->si_funcs[func]; 1492 if (fi->fi_devi == NULL) 1493 continue; 1494 pci_lintr_route(fi->fi_devi); 1495 } 1496 } 1497 } 1498 lpc_pirq_routed(); 1499 1500 /* 1501 * The guest physical memory map looks like the following: 1502 * [0, lowmem) guest system memory 1503 * [lowmem, 0xC0000000) memory hole (may be absent) 1504 * [0xC0000000, 0xE0000000) PCI hole (32-bit BAR allocation) 1505 * [0xE0000000, 0xF0000000) PCI extended config window 1506 * [0xF0000000, 4GB) LAPIC, IOAPIC, HPET, firmware 1507 * [4GB, 4GB + highmem) 1508 */ 1509 1510 /* 1511 * Accesses to memory addresses that are not allocated to system 1512 * memory or PCI devices return 0xff's. 1513 */ 1514 lowmem = vm_get_lowmem_size(ctx); 1515 bzero(&mr, sizeof(struct mem_range)); 1516 mr.name = "PCI hole"; 1517 mr.flags = MEM_F_RW | MEM_F_IMMUTABLE; 1518 mr.base = lowmem; 1519 mr.size = (4ULL * 1024 * 1024 * 1024) - lowmem; 1520 mr.handler = pci_emul_fallback_handler; 1521 error = register_mem_fallback(&mr); 1522 assert(error == 0); 1523 1524 /* PCI extended config space */ 1525 bzero(&mr, sizeof(struct mem_range)); 1526 mr.name = "PCI ECFG"; 1527 mr.flags = MEM_F_RW | MEM_F_IMMUTABLE; 1528 mr.base = PCI_EMUL_ECFG_BASE; 1529 mr.size = PCI_EMUL_ECFG_SIZE; 1530 mr.handler = pci_emul_ecfg_handler; 1531 error = register_mem(&mr); 1532 assert(error == 0); 1533 1534 return (0); 1535 } 1536 1537 static void 1538 pci_apic_prt_entry(int bus __unused, int slot, int pin, int pirq_pin __unused, 1539 int ioapic_irq, void *arg __unused) 1540 { 1541 1542 dsdt_line(" Package ()"); 1543 dsdt_line(" {"); 1544 dsdt_line(" 0x%X,", slot << 16 | 0xffff); 1545 dsdt_line(" 0x%02X,", pin - 1); 1546 dsdt_line(" Zero,"); 1547 dsdt_line(" 0x%X", ioapic_irq); 1548 dsdt_line(" },"); 1549 } 1550 1551 static void 1552 pci_pirq_prt_entry(int bus __unused, int slot, int pin, int pirq_pin, 1553 int ioapic_irq __unused, void *arg __unused) 1554 { 1555 char *name; 1556 1557 name = lpc_pirq_name(pirq_pin); 1558 if (name == NULL) 1559 return; 1560 dsdt_line(" Package ()"); 1561 dsdt_line(" {"); 1562 dsdt_line(" 0x%X,", slot << 16 | 0xffff); 1563 dsdt_line(" 0x%02X,", pin - 1); 1564 dsdt_line(" %s,", name); 1565 dsdt_line(" 0x00"); 1566 dsdt_line(" },"); 1567 free(name); 1568 } 1569 1570 /* 1571 * A bhyve virtual machine has a flat PCI hierarchy with a root port 1572 * corresponding to each PCI bus. 1573 */ 1574 static void 1575 pci_bus_write_dsdt(int bus) 1576 { 1577 struct businfo *bi; 1578 struct slotinfo *si; 1579 struct pci_devinst *pi; 1580 int count, func, slot; 1581 1582 /* 1583 * If there are no devices on this 'bus' then just return. 1584 */ 1585 if ((bi = pci_businfo[bus]) == NULL) { 1586 /* 1587 * Bus 0 is special because it decodes the I/O ports used 1588 * for PCI config space access even if there are no devices 1589 * on it. 1590 */ 1591 if (bus != 0) 1592 return; 1593 } 1594 1595 dsdt_line(" Device (PC%02X)", bus); 1596 dsdt_line(" {"); 1597 dsdt_line(" Name (_HID, EisaId (\"PNP0A03\"))"); 1598 1599 dsdt_line(" Method (_BBN, 0, NotSerialized)"); 1600 dsdt_line(" {"); 1601 dsdt_line(" Return (0x%08X)", bus); 1602 dsdt_line(" }"); 1603 dsdt_line(" Name (_CRS, ResourceTemplate ()"); 1604 dsdt_line(" {"); 1605 dsdt_line(" WordBusNumber (ResourceProducer, MinFixed, " 1606 "MaxFixed, PosDecode,"); 1607 dsdt_line(" 0x0000, // Granularity"); 1608 dsdt_line(" 0x%04X, // Range Minimum", bus); 1609 dsdt_line(" 0x%04X, // Range Maximum", bus); 1610 dsdt_line(" 0x0000, // Translation Offset"); 1611 dsdt_line(" 0x0001, // Length"); 1612 dsdt_line(" ,, )"); 1613 1614 if (bus == 0) { 1615 dsdt_indent(3); 1616 dsdt_fixed_ioport(0xCF8, 8); 1617 dsdt_unindent(3); 1618 1619 dsdt_line(" WordIO (ResourceProducer, MinFixed, MaxFixed, " 1620 "PosDecode, EntireRange,"); 1621 dsdt_line(" 0x0000, // Granularity"); 1622 dsdt_line(" 0x0000, // Range Minimum"); 1623 dsdt_line(" 0x0CF7, // Range Maximum"); 1624 dsdt_line(" 0x0000, // Translation Offset"); 1625 dsdt_line(" 0x0CF8, // Length"); 1626 dsdt_line(" ,, , TypeStatic)"); 1627 1628 dsdt_line(" WordIO (ResourceProducer, MinFixed, MaxFixed, " 1629 "PosDecode, EntireRange,"); 1630 dsdt_line(" 0x0000, // Granularity"); 1631 dsdt_line(" 0x0D00, // Range Minimum"); 1632 dsdt_line(" 0x%04X, // Range Maximum", 1633 PCI_EMUL_IOBASE - 1); 1634 dsdt_line(" 0x0000, // Translation Offset"); 1635 dsdt_line(" 0x%04X, // Length", 1636 PCI_EMUL_IOBASE - 0x0D00); 1637 dsdt_line(" ,, , TypeStatic)"); 1638 1639 if (bi == NULL) { 1640 dsdt_line(" })"); 1641 goto done; 1642 } 1643 } 1644 assert(bi != NULL); 1645 1646 /* i/o window */ 1647 dsdt_line(" WordIO (ResourceProducer, MinFixed, MaxFixed, " 1648 "PosDecode, EntireRange,"); 1649 dsdt_line(" 0x0000, // Granularity"); 1650 dsdt_line(" 0x%04X, // Range Minimum", bi->iobase); 1651 dsdt_line(" 0x%04X, // Range Maximum", 1652 bi->iolimit - 1); 1653 dsdt_line(" 0x0000, // Translation Offset"); 1654 dsdt_line(" 0x%04X, // Length", 1655 bi->iolimit - bi->iobase); 1656 dsdt_line(" ,, , TypeStatic)"); 1657 1658 /* mmio window (32-bit) */ 1659 dsdt_line(" DWordMemory (ResourceProducer, PosDecode, " 1660 "MinFixed, MaxFixed, NonCacheable, ReadWrite,"); 1661 dsdt_line(" 0x00000000, // Granularity"); 1662 dsdt_line(" 0x%08X, // Range Minimum\n", bi->membase32); 1663 dsdt_line(" 0x%08X, // Range Maximum\n", 1664 bi->memlimit32 - 1); 1665 dsdt_line(" 0x00000000, // Translation Offset"); 1666 dsdt_line(" 0x%08X, // Length\n", 1667 bi->memlimit32 - bi->membase32); 1668 dsdt_line(" ,, , AddressRangeMemory, TypeStatic)"); 1669 1670 /* mmio window (64-bit) */ 1671 dsdt_line(" QWordMemory (ResourceProducer, PosDecode, " 1672 "MinFixed, MaxFixed, NonCacheable, ReadWrite,"); 1673 dsdt_line(" 0x0000000000000000, // Granularity"); 1674 dsdt_line(" 0x%016lX, // Range Minimum\n", bi->membase64); 1675 dsdt_line(" 0x%016lX, // Range Maximum\n", 1676 bi->memlimit64 - 1); 1677 dsdt_line(" 0x0000000000000000, // Translation Offset"); 1678 dsdt_line(" 0x%016lX, // Length\n", 1679 bi->memlimit64 - bi->membase64); 1680 dsdt_line(" ,, , AddressRangeMemory, TypeStatic)"); 1681 dsdt_line(" })"); 1682 1683 count = pci_count_lintr(bus); 1684 if (count != 0) { 1685 dsdt_indent(2); 1686 dsdt_line("Name (PPRT, Package ()"); 1687 dsdt_line("{"); 1688 pci_walk_lintr(bus, pci_pirq_prt_entry, NULL); 1689 dsdt_line("})"); 1690 dsdt_line("Name (APRT, Package ()"); 1691 dsdt_line("{"); 1692 pci_walk_lintr(bus, pci_apic_prt_entry, NULL); 1693 dsdt_line("})"); 1694 dsdt_line("Method (_PRT, 0, NotSerialized)"); 1695 dsdt_line("{"); 1696 dsdt_line(" If (PICM)"); 1697 dsdt_line(" {"); 1698 dsdt_line(" Return (APRT)"); 1699 dsdt_line(" }"); 1700 dsdt_line(" Else"); 1701 dsdt_line(" {"); 1702 dsdt_line(" Return (PPRT)"); 1703 dsdt_line(" }"); 1704 dsdt_line("}"); 1705 dsdt_unindent(2); 1706 } 1707 1708 dsdt_indent(2); 1709 for (slot = 0; slot < MAXSLOTS; slot++) { 1710 si = &bi->slotinfo[slot]; 1711 for (func = 0; func < MAXFUNCS; func++) { 1712 pi = si->si_funcs[func].fi_devi; 1713 if (pi != NULL && pi->pi_d->pe_write_dsdt != NULL) 1714 pi->pi_d->pe_write_dsdt(pi); 1715 } 1716 } 1717 dsdt_unindent(2); 1718 done: 1719 dsdt_line(" }"); 1720 } 1721 1722 void 1723 pci_write_dsdt(void) 1724 { 1725 int bus; 1726 1727 dsdt_indent(1); 1728 dsdt_line("Name (PICM, 0x00)"); 1729 dsdt_line("Method (_PIC, 1, NotSerialized)"); 1730 dsdt_line("{"); 1731 dsdt_line(" Store (Arg0, PICM)"); 1732 dsdt_line("}"); 1733 dsdt_line(""); 1734 dsdt_line("Scope (_SB)"); 1735 dsdt_line("{"); 1736 for (bus = 0; bus < MAXBUSES; bus++) 1737 pci_bus_write_dsdt(bus); 1738 dsdt_line("}"); 1739 dsdt_unindent(1); 1740 } 1741 1742 int 1743 pci_bus_configured(int bus) 1744 { 1745 assert(bus >= 0 && bus < MAXBUSES); 1746 return (pci_businfo[bus] != NULL); 1747 } 1748 1749 int 1750 pci_msi_enabled(struct pci_devinst *pi) 1751 { 1752 return (pi->pi_msi.enabled); 1753 } 1754 1755 int 1756 pci_msi_maxmsgnum(struct pci_devinst *pi) 1757 { 1758 if (pi->pi_msi.enabled) 1759 return (pi->pi_msi.maxmsgnum); 1760 else 1761 return (0); 1762 } 1763 1764 int 1765 pci_msix_enabled(struct pci_devinst *pi) 1766 { 1767 1768 return (pi->pi_msix.enabled && !pi->pi_msi.enabled); 1769 } 1770 1771 void 1772 pci_generate_msix(struct pci_devinst *pi, int index) 1773 { 1774 struct msix_table_entry *mte; 1775 1776 if (!pci_msix_enabled(pi)) 1777 return; 1778 1779 if (pi->pi_msix.function_mask) 1780 return; 1781 1782 if (index >= pi->pi_msix.table_count) 1783 return; 1784 1785 mte = &pi->pi_msix.table[index]; 1786 if ((mte->vector_control & PCIM_MSIX_VCTRL_MASK) == 0) { 1787 /* XXX Set PBA bit if interrupt is disabled */ 1788 vm_lapic_msi(pi->pi_vmctx, mte->addr, mte->msg_data); 1789 } 1790 } 1791 1792 void 1793 pci_generate_msi(struct pci_devinst *pi, int index) 1794 { 1795 1796 if (pci_msi_enabled(pi) && index < pci_msi_maxmsgnum(pi)) { 1797 vm_lapic_msi(pi->pi_vmctx, pi->pi_msi.addr, 1798 pi->pi_msi.msg_data + index); 1799 } 1800 } 1801 1802 static bool 1803 pci_lintr_permitted(struct pci_devinst *pi) 1804 { 1805 uint16_t cmd; 1806 1807 cmd = pci_get_cfgdata16(pi, PCIR_COMMAND); 1808 return (!(pi->pi_msi.enabled || pi->pi_msix.enabled || 1809 (cmd & PCIM_CMD_INTxDIS))); 1810 } 1811 1812 void 1813 pci_lintr_request(struct pci_devinst *pi) 1814 { 1815 struct businfo *bi; 1816 struct slotinfo *si; 1817 int bestpin, bestcount, pin; 1818 1819 bi = pci_businfo[pi->pi_bus]; 1820 assert(bi != NULL); 1821 1822 /* 1823 * Just allocate a pin from our slot. The pin will be 1824 * assigned IRQs later when interrupts are routed. 1825 */ 1826 si = &bi->slotinfo[pi->pi_slot]; 1827 bestpin = 0; 1828 bestcount = si->si_intpins[0].ii_count; 1829 for (pin = 1; pin < 4; pin++) { 1830 if (si->si_intpins[pin].ii_count < bestcount) { 1831 bestpin = pin; 1832 bestcount = si->si_intpins[pin].ii_count; 1833 } 1834 } 1835 1836 si->si_intpins[bestpin].ii_count++; 1837 pi->pi_lintr.pin = bestpin + 1; 1838 pci_set_cfgdata8(pi, PCIR_INTPIN, bestpin + 1); 1839 } 1840 1841 static void 1842 pci_lintr_route(struct pci_devinst *pi) 1843 { 1844 struct businfo *bi; 1845 struct intxinfo *ii; 1846 1847 if (pi->pi_lintr.pin == 0) 1848 return; 1849 1850 bi = pci_businfo[pi->pi_bus]; 1851 assert(bi != NULL); 1852 ii = &bi->slotinfo[pi->pi_slot].si_intpins[pi->pi_lintr.pin - 1]; 1853 1854 /* 1855 * Attempt to allocate an I/O APIC pin for this intpin if one 1856 * is not yet assigned. 1857 */ 1858 if (ii->ii_ioapic_irq == 0) 1859 ii->ii_ioapic_irq = ioapic_pci_alloc_irq(pi); 1860 assert(ii->ii_ioapic_irq > 0); 1861 1862 /* 1863 * Attempt to allocate a PIRQ pin for this intpin if one is 1864 * not yet assigned. 1865 */ 1866 if (ii->ii_pirq_pin == 0) 1867 ii->ii_pirq_pin = pirq_alloc_pin(pi); 1868 assert(ii->ii_pirq_pin > 0); 1869 1870 pi->pi_lintr.ioapic_irq = ii->ii_ioapic_irq; 1871 pi->pi_lintr.pirq_pin = ii->ii_pirq_pin; 1872 pci_set_cfgdata8(pi, PCIR_INTLINE, pirq_irq(ii->ii_pirq_pin)); 1873 } 1874 1875 void 1876 pci_lintr_assert(struct pci_devinst *pi) 1877 { 1878 1879 assert(pi->pi_lintr.pin > 0); 1880 1881 pthread_mutex_lock(&pi->pi_lintr.lock); 1882 if (pi->pi_lintr.state == IDLE) { 1883 if (pci_lintr_permitted(pi)) { 1884 pi->pi_lintr.state = ASSERTED; 1885 pci_irq_assert(pi); 1886 } else 1887 pi->pi_lintr.state = PENDING; 1888 } 1889 pthread_mutex_unlock(&pi->pi_lintr.lock); 1890 } 1891 1892 void 1893 pci_lintr_deassert(struct pci_devinst *pi) 1894 { 1895 1896 assert(pi->pi_lintr.pin > 0); 1897 1898 pthread_mutex_lock(&pi->pi_lintr.lock); 1899 if (pi->pi_lintr.state == ASSERTED) { 1900 pi->pi_lintr.state = IDLE; 1901 pci_irq_deassert(pi); 1902 } else if (pi->pi_lintr.state == PENDING) 1903 pi->pi_lintr.state = IDLE; 1904 pthread_mutex_unlock(&pi->pi_lintr.lock); 1905 } 1906 1907 static void 1908 pci_lintr_update(struct pci_devinst *pi) 1909 { 1910 1911 pthread_mutex_lock(&pi->pi_lintr.lock); 1912 if (pi->pi_lintr.state == ASSERTED && !pci_lintr_permitted(pi)) { 1913 pci_irq_deassert(pi); 1914 pi->pi_lintr.state = PENDING; 1915 } else if (pi->pi_lintr.state == PENDING && pci_lintr_permitted(pi)) { 1916 pi->pi_lintr.state = ASSERTED; 1917 pci_irq_assert(pi); 1918 } 1919 pthread_mutex_unlock(&pi->pi_lintr.lock); 1920 } 1921 1922 int 1923 pci_count_lintr(int bus) 1924 { 1925 int count, slot, pin; 1926 struct slotinfo *slotinfo; 1927 1928 count = 0; 1929 if (pci_businfo[bus] != NULL) { 1930 for (slot = 0; slot < MAXSLOTS; slot++) { 1931 slotinfo = &pci_businfo[bus]->slotinfo[slot]; 1932 for (pin = 0; pin < 4; pin++) { 1933 if (slotinfo->si_intpins[pin].ii_count != 0) 1934 count++; 1935 } 1936 } 1937 } 1938 return (count); 1939 } 1940 1941 void 1942 pci_walk_lintr(int bus, pci_lintr_cb cb, void *arg) 1943 { 1944 struct businfo *bi; 1945 struct slotinfo *si; 1946 struct intxinfo *ii; 1947 int slot, pin; 1948 1949 if ((bi = pci_businfo[bus]) == NULL) 1950 return; 1951 1952 for (slot = 0; slot < MAXSLOTS; slot++) { 1953 si = &bi->slotinfo[slot]; 1954 for (pin = 0; pin < 4; pin++) { 1955 ii = &si->si_intpins[pin]; 1956 if (ii->ii_count != 0) 1957 cb(bus, slot, pin + 1, ii->ii_pirq_pin, 1958 ii->ii_ioapic_irq, arg); 1959 } 1960 } 1961 } 1962 1963 /* 1964 * Return 1 if the emulated device in 'slot' is a multi-function device. 1965 * Return 0 otherwise. 1966 */ 1967 static int 1968 pci_emul_is_mfdev(int bus, int slot) 1969 { 1970 struct businfo *bi; 1971 struct slotinfo *si; 1972 int f, numfuncs; 1973 1974 numfuncs = 0; 1975 if ((bi = pci_businfo[bus]) != NULL) { 1976 si = &bi->slotinfo[slot]; 1977 for (f = 0; f < MAXFUNCS; f++) { 1978 if (si->si_funcs[f].fi_devi != NULL) { 1979 numfuncs++; 1980 } 1981 } 1982 } 1983 return (numfuncs > 1); 1984 } 1985 1986 /* 1987 * Ensure that the PCIM_MFDEV bit is properly set (or unset) depending on 1988 * whether or not is a multi-function being emulated in the pci 'slot'. 1989 */ 1990 static void 1991 pci_emul_hdrtype_fixup(int bus, int slot, int off, int bytes, uint32_t *rv) 1992 { 1993 int mfdev; 1994 1995 if (off <= PCIR_HDRTYPE && off + bytes > PCIR_HDRTYPE) { 1996 mfdev = pci_emul_is_mfdev(bus, slot); 1997 switch (bytes) { 1998 case 1: 1999 case 2: 2000 *rv &= ~PCIM_MFDEV; 2001 if (mfdev) { 2002 *rv |= PCIM_MFDEV; 2003 } 2004 break; 2005 case 4: 2006 *rv &= ~(PCIM_MFDEV << 16); 2007 if (mfdev) { 2008 *rv |= (PCIM_MFDEV << 16); 2009 } 2010 break; 2011 } 2012 } 2013 } 2014 2015 /* 2016 * Update device state in response to changes to the PCI command 2017 * register. 2018 */ 2019 void 2020 pci_emul_cmd_changed(struct pci_devinst *pi, uint16_t old) 2021 { 2022 int i; 2023 uint16_t changed, new; 2024 2025 new = pci_get_cfgdata16(pi, PCIR_COMMAND); 2026 changed = old ^ new; 2027 2028 /* 2029 * If the MMIO or I/O address space decoding has changed then 2030 * register/unregister all BARs that decode that address space. 2031 */ 2032 for (i = 0; i <= PCI_BARMAX_WITH_ROM; i++) { 2033 switch (pi->pi_bar[i].type) { 2034 case PCIBAR_NONE: 2035 case PCIBAR_MEMHI64: 2036 break; 2037 case PCIBAR_IO: 2038 /* I/O address space decoding changed? */ 2039 if (changed & PCIM_CMD_PORTEN) { 2040 if (new & PCIM_CMD_PORTEN) 2041 register_bar(pi, i); 2042 else 2043 unregister_bar(pi, i); 2044 } 2045 break; 2046 case PCIBAR_ROM: 2047 /* skip (un-)register of ROM if it disabled */ 2048 if (!romen(pi)) 2049 break; 2050 /* fallthrough */ 2051 case PCIBAR_MEM32: 2052 case PCIBAR_MEM64: 2053 /* MMIO address space decoding changed? */ 2054 if (changed & PCIM_CMD_MEMEN) { 2055 if (new & PCIM_CMD_MEMEN) 2056 register_bar(pi, i); 2057 else 2058 unregister_bar(pi, i); 2059 } 2060 break; 2061 default: 2062 assert(0); 2063 } 2064 } 2065 2066 /* 2067 * If INTx has been unmasked and is pending, assert the 2068 * interrupt. 2069 */ 2070 pci_lintr_update(pi); 2071 } 2072 2073 static void 2074 pci_emul_cmdsts_write(struct pci_devinst *pi, int coff, uint32_t new, int bytes) 2075 { 2076 int rshift; 2077 uint32_t cmd, old, readonly; 2078 2079 cmd = pci_get_cfgdata16(pi, PCIR_COMMAND); /* stash old value */ 2080 2081 /* 2082 * From PCI Local Bus Specification 3.0 sections 6.2.2 and 6.2.3. 2083 * 2084 * XXX Bits 8, 11, 12, 13, 14 and 15 in the status register are 2085 * 'write 1 to clear'. However these bits are not set to '1' by 2086 * any device emulation so it is simpler to treat them as readonly. 2087 */ 2088 rshift = (coff & 0x3) * 8; 2089 readonly = 0xFFFFF880 >> rshift; 2090 2091 old = CFGREAD(pi, coff, bytes); 2092 new &= ~readonly; 2093 new |= (old & readonly); 2094 CFGWRITE(pi, coff, new, bytes); /* update config */ 2095 2096 pci_emul_cmd_changed(pi, cmd); 2097 } 2098 2099 static void 2100 pci_cfgrw(int in, int bus, int slot, int func, int coff, int bytes, 2101 uint32_t *eax) 2102 { 2103 struct businfo *bi; 2104 struct slotinfo *si; 2105 struct pci_devinst *pi; 2106 struct pci_devemu *pe; 2107 int idx, needcfg; 2108 uint64_t addr, bar, mask; 2109 2110 if ((bi = pci_businfo[bus]) != NULL) { 2111 si = &bi->slotinfo[slot]; 2112 pi = si->si_funcs[func].fi_devi; 2113 } else 2114 pi = NULL; 2115 2116 /* 2117 * Just return if there is no device at this slot:func or if the 2118 * the guest is doing an un-aligned access. 2119 */ 2120 if (pi == NULL || (bytes != 1 && bytes != 2 && bytes != 4) || 2121 (coff & (bytes - 1)) != 0) { 2122 if (in) 2123 *eax = 0xffffffff; 2124 return; 2125 } 2126 2127 /* 2128 * Ignore all writes beyond the standard config space and return all 2129 * ones on reads. 2130 */ 2131 if (coff >= PCI_REGMAX + 1) { 2132 if (in) { 2133 *eax = 0xffffffff; 2134 /* 2135 * Extended capabilities begin at offset 256 in config 2136 * space. Absence of extended capabilities is signaled 2137 * with all 0s in the extended capability header at 2138 * offset 256. 2139 */ 2140 if (coff <= PCI_REGMAX + 4) 2141 *eax = 0x00000000; 2142 } 2143 return; 2144 } 2145 2146 pe = pi->pi_d; 2147 2148 /* 2149 * Config read 2150 */ 2151 if (in) { 2152 /* Let the device emulation override the default handler */ 2153 if (pe->pe_cfgread != NULL) { 2154 needcfg = pe->pe_cfgread(pi, coff, bytes, eax); 2155 } else { 2156 needcfg = 1; 2157 } 2158 2159 if (needcfg) 2160 *eax = CFGREAD(pi, coff, bytes); 2161 2162 pci_emul_hdrtype_fixup(bus, slot, coff, bytes, eax); 2163 } else { 2164 /* Let the device emulation override the default handler */ 2165 if (pe->pe_cfgwrite != NULL && 2166 (*pe->pe_cfgwrite)(pi, coff, bytes, *eax) == 0) 2167 return; 2168 2169 /* 2170 * Special handling for write to BAR and ROM registers 2171 */ 2172 if (is_pcir_bar(coff) || is_pcir_bios(coff)) { 2173 /* 2174 * Ignore writes to BAR registers that are not 2175 * 4-byte aligned. 2176 */ 2177 if (bytes != 4 || (coff & 0x3) != 0) 2178 return; 2179 2180 if (is_pcir_bar(coff)) { 2181 idx = (coff - PCIR_BAR(0)) / 4; 2182 } else if (is_pcir_bios(coff)) { 2183 idx = PCI_ROM_IDX; 2184 } else { 2185 errx(4, "%s: invalid BAR offset %d", __func__, 2186 coff); 2187 } 2188 2189 mask = ~(pi->pi_bar[idx].size - 1); 2190 switch (pi->pi_bar[idx].type) { 2191 case PCIBAR_NONE: 2192 pi->pi_bar[idx].addr = bar = 0; 2193 break; 2194 case PCIBAR_IO: 2195 addr = *eax & mask; 2196 addr &= 0xffff; 2197 bar = addr | pi->pi_bar[idx].lobits; 2198 /* 2199 * Register the new BAR value for interception 2200 */ 2201 if (addr != pi->pi_bar[idx].addr) { 2202 update_bar_address(pi, addr, idx, 2203 PCIBAR_IO); 2204 } 2205 break; 2206 case PCIBAR_MEM32: 2207 addr = bar = *eax & mask; 2208 bar |= pi->pi_bar[idx].lobits; 2209 if (addr != pi->pi_bar[idx].addr) { 2210 update_bar_address(pi, addr, idx, 2211 PCIBAR_MEM32); 2212 } 2213 break; 2214 case PCIBAR_MEM64: 2215 addr = bar = *eax & mask; 2216 bar |= pi->pi_bar[idx].lobits; 2217 if (addr != (uint32_t)pi->pi_bar[idx].addr) { 2218 update_bar_address(pi, addr, idx, 2219 PCIBAR_MEM64); 2220 } 2221 break; 2222 case PCIBAR_MEMHI64: 2223 mask = ~(pi->pi_bar[idx - 1].size - 1); 2224 addr = ((uint64_t)*eax << 32) & mask; 2225 bar = addr >> 32; 2226 if (bar != pi->pi_bar[idx - 1].addr >> 32) { 2227 update_bar_address(pi, addr, idx - 1, 2228 PCIBAR_MEMHI64); 2229 } 2230 break; 2231 case PCIBAR_ROM: 2232 addr = bar = *eax & mask; 2233 if (memen(pi) && romen(pi)) { 2234 unregister_bar(pi, idx); 2235 } 2236 pi->pi_bar[idx].addr = addr; 2237 pi->pi_bar[idx].lobits = *eax & 2238 PCIM_BIOS_ENABLE; 2239 /* romen could have changed it value */ 2240 if (memen(pi) && romen(pi)) { 2241 register_bar(pi, idx); 2242 } 2243 bar |= pi->pi_bar[idx].lobits; 2244 break; 2245 default: 2246 assert(0); 2247 } 2248 pci_set_cfgdata32(pi, coff, bar); 2249 2250 } else if (pci_emul_iscap(pi, coff)) { 2251 pci_emul_capwrite(pi, coff, bytes, *eax, 0, 0); 2252 } else if (coff >= PCIR_COMMAND && coff < PCIR_REVID) { 2253 pci_emul_cmdsts_write(pi, coff, *eax, bytes); 2254 } else { 2255 CFGWRITE(pi, coff, *eax, bytes); 2256 } 2257 } 2258 } 2259 2260 static int cfgenable, cfgbus, cfgslot, cfgfunc, cfgoff; 2261 2262 static int 2263 pci_emul_cfgaddr(struct vmctx *ctx __unused, int in, 2264 int port __unused, int bytes, uint32_t *eax, void *arg __unused) 2265 { 2266 uint32_t x; 2267 2268 if (bytes != 4) { 2269 if (in) 2270 *eax = (bytes == 2) ? 0xffff : 0xff; 2271 return (0); 2272 } 2273 2274 if (in) { 2275 x = (cfgbus << 16) | (cfgslot << 11) | (cfgfunc << 8) | cfgoff; 2276 if (cfgenable) 2277 x |= CONF1_ENABLE; 2278 *eax = x; 2279 } else { 2280 x = *eax; 2281 cfgenable = (x & CONF1_ENABLE) == CONF1_ENABLE; 2282 cfgoff = (x & PCI_REGMAX) & ~0x03; 2283 cfgfunc = (x >> 8) & PCI_FUNCMAX; 2284 cfgslot = (x >> 11) & PCI_SLOTMAX; 2285 cfgbus = (x >> 16) & PCI_BUSMAX; 2286 } 2287 2288 return (0); 2289 } 2290 INOUT_PORT(pci_cfgaddr, CONF1_ADDR_PORT, IOPORT_F_INOUT, pci_emul_cfgaddr); 2291 2292 static int 2293 pci_emul_cfgdata(struct vmctx *ctx __unused, int in, int port, 2294 int bytes, uint32_t *eax, void *arg __unused) 2295 { 2296 int coff; 2297 2298 assert(bytes == 1 || bytes == 2 || bytes == 4); 2299 2300 coff = cfgoff + (port - CONF1_DATA_PORT); 2301 if (cfgenable) { 2302 pci_cfgrw(in, cfgbus, cfgslot, cfgfunc, coff, bytes, eax); 2303 } else { 2304 /* Ignore accesses to cfgdata if not enabled by cfgaddr */ 2305 if (in) 2306 *eax = 0xffffffff; 2307 } 2308 return (0); 2309 } 2310 2311 INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+0, IOPORT_F_INOUT, pci_emul_cfgdata); 2312 INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+1, IOPORT_F_INOUT, pci_emul_cfgdata); 2313 INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+2, IOPORT_F_INOUT, pci_emul_cfgdata); 2314 INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+3, IOPORT_F_INOUT, pci_emul_cfgdata); 2315 2316 #ifdef BHYVE_SNAPSHOT 2317 /* 2318 * Saves/restores PCI device emulated state. Returns 0 on success. 2319 */ 2320 static int 2321 pci_snapshot_pci_dev(struct vm_snapshot_meta *meta) 2322 { 2323 struct pci_devinst *pi; 2324 int i; 2325 int ret; 2326 2327 pi = meta->dev_data; 2328 2329 SNAPSHOT_VAR_OR_LEAVE(pi->pi_msi.enabled, meta, ret, done); 2330 SNAPSHOT_VAR_OR_LEAVE(pi->pi_msi.addr, meta, ret, done); 2331 SNAPSHOT_VAR_OR_LEAVE(pi->pi_msi.msg_data, meta, ret, done); 2332 SNAPSHOT_VAR_OR_LEAVE(pi->pi_msi.maxmsgnum, meta, ret, done); 2333 2334 SNAPSHOT_VAR_OR_LEAVE(pi->pi_msix.enabled, meta, ret, done); 2335 SNAPSHOT_VAR_OR_LEAVE(pi->pi_msix.table_bar, meta, ret, done); 2336 SNAPSHOT_VAR_OR_LEAVE(pi->pi_msix.pba_bar, meta, ret, done); 2337 SNAPSHOT_VAR_OR_LEAVE(pi->pi_msix.table_offset, meta, ret, done); 2338 SNAPSHOT_VAR_OR_LEAVE(pi->pi_msix.table_count, meta, ret, done); 2339 SNAPSHOT_VAR_OR_LEAVE(pi->pi_msix.pba_offset, meta, ret, done); 2340 SNAPSHOT_VAR_OR_LEAVE(pi->pi_msix.pba_size, meta, ret, done); 2341 SNAPSHOT_VAR_OR_LEAVE(pi->pi_msix.function_mask, meta, ret, done); 2342 2343 SNAPSHOT_BUF_OR_LEAVE(pi->pi_cfgdata, sizeof(pi->pi_cfgdata), 2344 meta, ret, done); 2345 2346 for (i = 0; i < (int)nitems(pi->pi_bar); i++) { 2347 SNAPSHOT_VAR_OR_LEAVE(pi->pi_bar[i].type, meta, ret, done); 2348 SNAPSHOT_VAR_OR_LEAVE(pi->pi_bar[i].size, meta, ret, done); 2349 SNAPSHOT_VAR_OR_LEAVE(pi->pi_bar[i].addr, meta, ret, done); 2350 } 2351 2352 /* Restore MSI-X table. */ 2353 for (i = 0; i < pi->pi_msix.table_count; i++) { 2354 SNAPSHOT_VAR_OR_LEAVE(pi->pi_msix.table[i].addr, 2355 meta, ret, done); 2356 SNAPSHOT_VAR_OR_LEAVE(pi->pi_msix.table[i].msg_data, 2357 meta, ret, done); 2358 SNAPSHOT_VAR_OR_LEAVE(pi->pi_msix.table[i].vector_control, 2359 meta, ret, done); 2360 } 2361 2362 done: 2363 return (ret); 2364 } 2365 2366 static int 2367 pci_find_slotted_dev(const char *dev_name, struct pci_devemu **pde, 2368 struct pci_devinst **pdi) 2369 { 2370 struct businfo *bi; 2371 struct slotinfo *si; 2372 struct funcinfo *fi; 2373 int bus, slot, func; 2374 2375 assert(dev_name != NULL); 2376 assert(pde != NULL); 2377 assert(pdi != NULL); 2378 2379 for (bus = 0; bus < MAXBUSES; bus++) { 2380 if ((bi = pci_businfo[bus]) == NULL) 2381 continue; 2382 2383 for (slot = 0; slot < MAXSLOTS; slot++) { 2384 si = &bi->slotinfo[slot]; 2385 for (func = 0; func < MAXFUNCS; func++) { 2386 fi = &si->si_funcs[func]; 2387 if (fi->fi_pde == NULL) 2388 continue; 2389 if (strcmp(dev_name, fi->fi_pde->pe_emu) != 0) 2390 continue; 2391 2392 *pde = fi->fi_pde; 2393 *pdi = fi->fi_devi; 2394 return (0); 2395 } 2396 } 2397 } 2398 2399 return (EINVAL); 2400 } 2401 2402 int 2403 pci_snapshot(struct vm_snapshot_meta *meta) 2404 { 2405 struct pci_devemu *pde; 2406 struct pci_devinst *pdi; 2407 int ret; 2408 2409 assert(meta->dev_name != NULL); 2410 2411 ret = pci_find_slotted_dev(meta->dev_name, &pde, &pdi); 2412 if (ret != 0) { 2413 fprintf(stderr, "%s: no such name: %s\r\n", 2414 __func__, meta->dev_name); 2415 memset(meta->buffer.buf_start, 0, meta->buffer.buf_size); 2416 return (0); 2417 } 2418 2419 meta->dev_data = pdi; 2420 2421 if (pde->pe_snapshot == NULL) { 2422 fprintf(stderr, "%s: not implemented yet for: %s\r\n", 2423 __func__, meta->dev_name); 2424 return (-1); 2425 } 2426 2427 ret = pci_snapshot_pci_dev(meta); 2428 if (ret != 0) { 2429 fprintf(stderr, "%s: failed to snapshot pci dev\r\n", 2430 __func__); 2431 return (-1); 2432 } 2433 2434 ret = (*pde->pe_snapshot)(meta); 2435 2436 return (ret); 2437 } 2438 2439 int 2440 pci_pause(const char *dev_name) 2441 { 2442 struct pci_devemu *pde; 2443 struct pci_devinst *pdi; 2444 int ret; 2445 2446 assert(dev_name != NULL); 2447 2448 ret = pci_find_slotted_dev(dev_name, &pde, &pdi); 2449 if (ret != 0) { 2450 /* 2451 * It is possible to call this function without 2452 * checking that the device is inserted first. 2453 */ 2454 fprintf(stderr, "%s: no such name: %s\n", __func__, dev_name); 2455 return (0); 2456 } 2457 2458 if (pde->pe_pause == NULL) { 2459 /* The pause/resume functionality is optional. */ 2460 fprintf(stderr, "%s: not implemented for: %s\n", 2461 __func__, dev_name); 2462 return (0); 2463 } 2464 2465 return (*pde->pe_pause)(pdi); 2466 } 2467 2468 int 2469 pci_resume(const char *dev_name) 2470 { 2471 struct pci_devemu *pde; 2472 struct pci_devinst *pdi; 2473 int ret; 2474 2475 assert(dev_name != NULL); 2476 2477 ret = pci_find_slotted_dev(dev_name, &pde, &pdi); 2478 if (ret != 0) { 2479 /* 2480 * It is possible to call this function without 2481 * checking that the device is inserted first. 2482 */ 2483 fprintf(stderr, "%s: no such name: %s\n", __func__, dev_name); 2484 return (0); 2485 } 2486 2487 if (pde->pe_resume == NULL) { 2488 /* The pause/resume functionality is optional. */ 2489 fprintf(stderr, "%s: not implemented for: %s\n", 2490 __func__, dev_name); 2491 return (0); 2492 } 2493 2494 return (*pde->pe_resume)(pdi); 2495 } 2496 #endif 2497 2498 #define PCI_EMUL_TEST 2499 #ifdef PCI_EMUL_TEST 2500 /* 2501 * Define a dummy test device 2502 */ 2503 #define DIOSZ 8 2504 #define DMEMSZ 4096 2505 struct pci_emul_dsoftc { 2506 uint8_t ioregs[DIOSZ]; 2507 uint8_t memregs[2][DMEMSZ]; 2508 }; 2509 2510 #define PCI_EMUL_MSI_MSGS 4 2511 #define PCI_EMUL_MSIX_MSGS 16 2512 2513 static int 2514 pci_emul_dinit(struct pci_devinst *pi, nvlist_t *nvl __unused) 2515 { 2516 int error; 2517 struct pci_emul_dsoftc *sc; 2518 2519 sc = calloc(1, sizeof(struct pci_emul_dsoftc)); 2520 2521 pi->pi_arg = sc; 2522 2523 pci_set_cfgdata16(pi, PCIR_DEVICE, 0x0001); 2524 pci_set_cfgdata16(pi, PCIR_VENDOR, 0x10DD); 2525 pci_set_cfgdata8(pi, PCIR_CLASS, 0x02); 2526 2527 error = pci_emul_add_msicap(pi, PCI_EMUL_MSI_MSGS); 2528 assert(error == 0); 2529 2530 error = pci_emul_alloc_bar(pi, 0, PCIBAR_IO, DIOSZ); 2531 assert(error == 0); 2532 2533 error = pci_emul_alloc_bar(pi, 1, PCIBAR_MEM32, DMEMSZ); 2534 assert(error == 0); 2535 2536 error = pci_emul_alloc_bar(pi, 2, PCIBAR_MEM32, DMEMSZ); 2537 assert(error == 0); 2538 2539 return (0); 2540 } 2541 2542 static void 2543 pci_emul_diow(struct pci_devinst *pi, int baridx, uint64_t offset, int size, 2544 uint64_t value) 2545 { 2546 int i; 2547 struct pci_emul_dsoftc *sc = pi->pi_arg; 2548 2549 if (baridx == 0) { 2550 if (offset + size > DIOSZ) { 2551 printf("diow: iow too large, offset %ld size %d\n", 2552 offset, size); 2553 return; 2554 } 2555 2556 if (size == 1) { 2557 sc->ioregs[offset] = value & 0xff; 2558 } else if (size == 2) { 2559 *(uint16_t *)&sc->ioregs[offset] = value & 0xffff; 2560 } else if (size == 4) { 2561 *(uint32_t *)&sc->ioregs[offset] = value; 2562 } else { 2563 printf("diow: iow unknown size %d\n", size); 2564 } 2565 2566 /* 2567 * Special magic value to generate an interrupt 2568 */ 2569 if (offset == 4 && size == 4 && pci_msi_enabled(pi)) 2570 pci_generate_msi(pi, value % pci_msi_maxmsgnum(pi)); 2571 2572 if (value == 0xabcdef) { 2573 for (i = 0; i < pci_msi_maxmsgnum(pi); i++) 2574 pci_generate_msi(pi, i); 2575 } 2576 } 2577 2578 if (baridx == 1 || baridx == 2) { 2579 if (offset + size > DMEMSZ) { 2580 printf("diow: memw too large, offset %ld size %d\n", 2581 offset, size); 2582 return; 2583 } 2584 2585 i = baridx - 1; /* 'memregs' index */ 2586 2587 if (size == 1) { 2588 sc->memregs[i][offset] = value; 2589 } else if (size == 2) { 2590 *(uint16_t *)&sc->memregs[i][offset] = value; 2591 } else if (size == 4) { 2592 *(uint32_t *)&sc->memregs[i][offset] = value; 2593 } else if (size == 8) { 2594 *(uint64_t *)&sc->memregs[i][offset] = value; 2595 } else { 2596 printf("diow: memw unknown size %d\n", size); 2597 } 2598 2599 /* 2600 * magic interrupt ?? 2601 */ 2602 } 2603 2604 if (baridx > 2 || baridx < 0) { 2605 printf("diow: unknown bar idx %d\n", baridx); 2606 } 2607 } 2608 2609 static uint64_t 2610 pci_emul_dior(struct pci_devinst *pi, int baridx, uint64_t offset, int size) 2611 { 2612 struct pci_emul_dsoftc *sc = pi->pi_arg; 2613 uint32_t value; 2614 int i; 2615 2616 if (baridx == 0) { 2617 if (offset + size > DIOSZ) { 2618 printf("dior: ior too large, offset %ld size %d\n", 2619 offset, size); 2620 return (0); 2621 } 2622 2623 value = 0; 2624 if (size == 1) { 2625 value = sc->ioregs[offset]; 2626 } else if (size == 2) { 2627 value = *(uint16_t *) &sc->ioregs[offset]; 2628 } else if (size == 4) { 2629 value = *(uint32_t *) &sc->ioregs[offset]; 2630 } else { 2631 printf("dior: ior unknown size %d\n", size); 2632 } 2633 } 2634 2635 if (baridx == 1 || baridx == 2) { 2636 if (offset + size > DMEMSZ) { 2637 printf("dior: memr too large, offset %ld size %d\n", 2638 offset, size); 2639 return (0); 2640 } 2641 2642 i = baridx - 1; /* 'memregs' index */ 2643 2644 if (size == 1) { 2645 value = sc->memregs[i][offset]; 2646 } else if (size == 2) { 2647 value = *(uint16_t *) &sc->memregs[i][offset]; 2648 } else if (size == 4) { 2649 value = *(uint32_t *) &sc->memregs[i][offset]; 2650 } else if (size == 8) { 2651 value = *(uint64_t *) &sc->memregs[i][offset]; 2652 } else { 2653 printf("dior: ior unknown size %d\n", size); 2654 } 2655 } 2656 2657 2658 if (baridx > 2 || baridx < 0) { 2659 printf("dior: unknown bar idx %d\n", baridx); 2660 return (0); 2661 } 2662 2663 return (value); 2664 } 2665 2666 #ifdef BHYVE_SNAPSHOT 2667 static int 2668 pci_emul_snapshot(struct vm_snapshot_meta *meta __unused) 2669 { 2670 return (0); 2671 } 2672 #endif 2673 2674 static const struct pci_devemu pci_dummy = { 2675 .pe_emu = "dummy", 2676 .pe_init = pci_emul_dinit, 2677 .pe_barwrite = pci_emul_diow, 2678 .pe_barread = pci_emul_dior, 2679 #ifdef BHYVE_SNAPSHOT 2680 .pe_snapshot = pci_emul_snapshot, 2681 #endif 2682 }; 2683 PCI_EMUL_SET(pci_dummy); 2684 2685 #endif /* PCI_EMUL_TEST */ 2686