1 /*- 2 * SPDX-License-Identifier: BSD-2-Clause 3 * 4 * Copyright (c) 2011 NetApp, Inc. 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 16 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26 * SUCH DAMAGE. 27 */ 28 29 #include <sys/param.h> 30 #include <sys/linker_set.h> 31 #include <sys/mman.h> 32 33 #include <ctype.h> 34 #include <err.h> 35 #include <errno.h> 36 #include <pthread.h> 37 #include <stdio.h> 38 #include <stdlib.h> 39 #include <string.h> 40 #include <strings.h> 41 #include <assert.h> 42 #include <stdbool.h> 43 #include <sysexits.h> 44 45 #include <machine/vmm.h> 46 #include <machine/vmm_snapshot.h> 47 #include <vmmapi.h> 48 49 #include "acpi.h" 50 #include "bhyverun.h" 51 #include "bootrom.h" 52 #include "config.h" 53 #include "debug.h" 54 #ifdef __amd64__ 55 #include "amd64/inout.h" 56 #endif 57 #include "mem.h" 58 #include "pci_emul.h" 59 #ifdef __amd64__ 60 #include "amd64/pci_lpc.h" 61 #include "pci_passthru.h" 62 #endif 63 #include "qemu_fwcfg.h" 64 65 #define CONF1_ADDR_PORT 0x0cf8 66 #define CONF1_DATA_PORT 0x0cfc 67 68 #define CONF1_ENABLE 0x80000000ul 69 70 #define MAXBUSES (PCI_BUSMAX + 1) 71 #define MAXSLOTS (PCI_SLOTMAX + 1) 72 #define MAXFUNCS (PCI_FUNCMAX + 1) 73 74 #define GB (1024 * 1024 * 1024UL) 75 76 struct funcinfo { 77 nvlist_t *fi_config; 78 struct pci_devemu *fi_pde; 79 struct pci_devinst *fi_devi; 80 }; 81 82 struct intxinfo { 83 int ii_count; 84 struct pci_irq ii_irq; 85 }; 86 87 struct slotinfo { 88 struct intxinfo si_intpins[4]; 89 struct funcinfo si_funcs[MAXFUNCS]; 90 }; 91 92 struct businfo { 93 uint16_t iobase, iolimit; /* I/O window */ 94 uint32_t membase32, memlimit32; /* mmio window below 4GB */ 95 uint64_t membase64, memlimit64; /* mmio window above 4GB */ 96 struct slotinfo slotinfo[MAXSLOTS]; 97 }; 98 99 static struct businfo *pci_businfo[MAXBUSES]; 100 101 SET_DECLARE(pci_devemu_set, struct pci_devemu); 102 103 static uint64_t pci_emul_iobase; 104 static uint8_t *pci_emul_rombase; 105 static uint64_t pci_emul_romoffset; 106 static uint8_t *pci_emul_romlim; 107 static uint64_t pci_emul_membase32; 108 static uint64_t pci_emul_membase64; 109 static uint64_t pci_emul_memlim64; 110 111 struct pci_bar_allocation { 112 TAILQ_ENTRY(pci_bar_allocation) chain; 113 struct pci_devinst *pdi; 114 int idx; 115 enum pcibar_type type; 116 uint64_t size; 117 }; 118 119 static TAILQ_HEAD(pci_bar_list, pci_bar_allocation) pci_bars = 120 TAILQ_HEAD_INITIALIZER(pci_bars); 121 122 struct boot_device { 123 TAILQ_ENTRY(boot_device) boot_device_chain; 124 struct pci_devinst *pdi; 125 int bootindex; 126 }; 127 static TAILQ_HEAD(boot_list, boot_device) boot_devices = TAILQ_HEAD_INITIALIZER( 128 boot_devices); 129 130 #if defined(__amd64__) 131 #define PCI_EMUL_IOBASE 0x2000 132 #define PCI_EMUL_IOLIMIT 0x10000 133 #define PCI_EMUL_IOMASK 0xffff 134 /* 135 * OVMF always uses 0xc0000000 as base address for 32 bit PCI MMIO. Don't 136 * change this address without changing it in OVMF. 137 */ 138 #define PCI_EMUL_MEMBASE32 0xc0000000 139 #elif defined(__aarch64__) 140 #define PCI_EMUL_IOBASE 0xdf000000UL 141 #define PCI_EMUL_IOLIMIT 0xe0000000UL 142 #define PCI_EMUL_MEMBASE32 0xa0000000UL 143 #else 144 #error Unsupported platform 145 #endif 146 147 #define PCI_EMUL_ROMSIZE 0x10000000 148 149 #define PCI_EMUL_ECFG_BASE 0xE0000000 /* 3.5GB */ 150 #define PCI_EMUL_ECFG_SIZE (MAXBUSES * 1024 * 1024) /* 1MB per bus */ 151 #ifdef __amd64__ 152 SYSRES_MEM(PCI_EMUL_ECFG_BASE, PCI_EMUL_ECFG_SIZE); 153 #endif 154 155 #define PCI_EMUL_MEMLIMIT32 PCI_EMUL_ECFG_BASE 156 #define PCI_EMUL_MEMSIZE64 (32*GB) 157 158 static void pci_lintr_route(struct pci_devinst *pi); 159 static void pci_lintr_update(struct pci_devinst *pi); 160 161 static struct pci_devemu *pci_emul_finddev(const char *name); 162 static void pci_cfgrw(int in, int bus, int slot, int func, int coff, 163 int bytes, uint32_t *val); 164 165 static __inline void 166 CFGWRITE(struct pci_devinst *pi, int coff, uint32_t val, int bytes) 167 { 168 169 if (bytes == 1) 170 pci_set_cfgdata8(pi, coff, val); 171 else if (bytes == 2) 172 pci_set_cfgdata16(pi, coff, val); 173 else 174 pci_set_cfgdata32(pi, coff, val); 175 } 176 177 static __inline uint32_t 178 CFGREAD(struct pci_devinst *pi, int coff, int bytes) 179 { 180 181 if (bytes == 1) 182 return (pci_get_cfgdata8(pi, coff)); 183 else if (bytes == 2) 184 return (pci_get_cfgdata16(pi, coff)); 185 else 186 return (pci_get_cfgdata32(pi, coff)); 187 } 188 189 static int 190 is_pcir_bar(int coff) 191 { 192 return (coff >= PCIR_BAR(0) && coff < PCIR_BAR(PCI_BARMAX + 1)); 193 } 194 195 static int 196 is_pcir_bios(int coff) 197 { 198 return (coff >= PCIR_BIOS && coff < PCIR_BIOS + 4); 199 } 200 201 /* 202 * I/O access 203 */ 204 205 /* 206 * Slot options are in the form: 207 * 208 * <bus>:<slot>:<func>,<emul>[,<config>] 209 * <slot>[:<func>],<emul>[,<config>] 210 * 211 * slot is 0..31 212 * func is 0..7 213 * emul is a string describing the type of PCI device e.g. virtio-net 214 * config is an optional string, depending on the device, that can be 215 * used for configuration. 216 * Examples are: 217 * 1,virtio-net,tap0 218 * 3:0,dummy 219 */ 220 static void 221 pci_parse_slot_usage(char *aopt) 222 { 223 224 EPRINTLN("Invalid PCI slot info field \"%s\"", aopt); 225 } 226 227 /* 228 * Helper function to parse a list of comma-separated options where 229 * each option is formatted as "name[=value]". If no value is 230 * provided, the option is treated as a boolean and is given a value 231 * of true. 232 */ 233 int 234 pci_parse_legacy_config(nvlist_t *nvl, const char *opt) 235 { 236 char *config, *name, *tofree, *value; 237 238 if (opt == NULL) 239 return (0); 240 241 config = tofree = strdup(opt); 242 while ((name = strsep(&config, ",")) != NULL) { 243 value = strchr(name, '='); 244 if (value != NULL) { 245 *value = '\0'; 246 value++; 247 set_config_value_node(nvl, name, value); 248 } else 249 set_config_bool_node(nvl, name, true); 250 } 251 free(tofree); 252 return (0); 253 } 254 255 /* 256 * PCI device configuration is stored in MIBs that encode the device's 257 * location: 258 * 259 * pci.<bus>.<slot>.<func> 260 * 261 * Where "bus", "slot", and "func" are all decimal values without 262 * leading zeroes. Each valid device must have a "device" node which 263 * identifies the driver model of the device. 264 * 265 * Device backends can provide a parser for the "config" string. If 266 * a custom parser is not provided, pci_parse_legacy_config() is used 267 * to parse the string. 268 */ 269 int 270 pci_parse_slot(char *opt) 271 { 272 char node_name[sizeof("pci.XXX.XX.X")]; 273 struct pci_devemu *pde; 274 char *emul, *config, *str, *cp; 275 int error, bnum, snum, fnum; 276 nvlist_t *nvl; 277 278 error = -1; 279 str = strdup(opt); 280 281 emul = config = NULL; 282 if ((cp = strchr(str, ',')) != NULL) { 283 *cp = '\0'; 284 emul = cp + 1; 285 if ((cp = strchr(emul, ',')) != NULL) { 286 *cp = '\0'; 287 config = cp + 1; 288 } 289 } else { 290 pci_parse_slot_usage(opt); 291 goto done; 292 } 293 294 /* <bus>:<slot>:<func> */ 295 if (sscanf(str, "%d:%d:%d", &bnum, &snum, &fnum) != 3) { 296 bnum = 0; 297 /* <slot>:<func> */ 298 if (sscanf(str, "%d:%d", &snum, &fnum) != 2) { 299 fnum = 0; 300 /* <slot> */ 301 if (sscanf(str, "%d", &snum) != 1) { 302 snum = -1; 303 } 304 } 305 } 306 307 if (bnum < 0 || bnum >= MAXBUSES || snum < 0 || snum >= MAXSLOTS || 308 fnum < 0 || fnum >= MAXFUNCS) { 309 pci_parse_slot_usage(opt); 310 goto done; 311 } 312 313 pde = pci_emul_finddev(emul); 314 if (pde == NULL) { 315 EPRINTLN("pci slot %d:%d:%d: unknown device \"%s\"", bnum, snum, 316 fnum, emul); 317 goto done; 318 } 319 320 snprintf(node_name, sizeof(node_name), "pci.%d.%d.%d", bnum, snum, 321 fnum); 322 nvl = find_config_node(node_name); 323 if (nvl != NULL) { 324 EPRINTLN("pci slot %d:%d:%d already occupied!", bnum, snum, 325 fnum); 326 goto done; 327 } 328 nvl = create_config_node(node_name); 329 if (pde->pe_alias != NULL) 330 set_config_value_node(nvl, "device", pde->pe_alias); 331 else 332 set_config_value_node(nvl, "device", pde->pe_emu); 333 334 if (pde->pe_legacy_config != NULL) 335 error = pde->pe_legacy_config(nvl, config); 336 else 337 error = pci_parse_legacy_config(nvl, config); 338 done: 339 free(str); 340 return (error); 341 } 342 343 void 344 pci_print_supported_devices(void) 345 { 346 struct pci_devemu **pdpp, *pdp; 347 348 SET_FOREACH(pdpp, pci_devemu_set) { 349 pdp = *pdpp; 350 printf("%s\n", pdp->pe_emu); 351 } 352 } 353 354 uint32_t 355 pci_config_read_reg(const struct pcisel *const host_sel, nvlist_t *nvl, 356 const uint32_t reg, const uint8_t size, const uint32_t def) 357 { 358 const char *config; 359 const nvlist_t *pci_regs; 360 361 assert(size == 1 || size == 2 || size == 4); 362 363 pci_regs = find_relative_config_node(nvl, "pcireg"); 364 if (pci_regs == NULL) { 365 return def; 366 } 367 368 switch (reg) { 369 case PCIR_DEVICE: 370 config = get_config_value_node(pci_regs, "device"); 371 break; 372 case PCIR_VENDOR: 373 config = get_config_value_node(pci_regs, "vendor"); 374 break; 375 case PCIR_REVID: 376 config = get_config_value_node(pci_regs, "revid"); 377 break; 378 case PCIR_SUBVEND_0: 379 config = get_config_value_node(pci_regs, "subvendor"); 380 break; 381 case PCIR_SUBDEV_0: 382 config = get_config_value_node(pci_regs, "subdevice"); 383 break; 384 default: 385 return (-1); 386 } 387 388 if (config == NULL) { 389 return def; 390 } else if (host_sel != NULL && strcmp(config, "host") == 0) { 391 #ifdef __amd64__ 392 return pci_host_read_config(host_sel, reg, size); 393 #else 394 errx(1, "cannot fetch host PCI configuration"); 395 #endif 396 } else { 397 return strtol(config, NULL, 16); 398 } 399 } 400 401 static int 402 pci_valid_pba_offset(struct pci_devinst *pi, uint64_t offset) 403 { 404 405 if (offset < pi->pi_msix.pba_offset) 406 return (0); 407 408 if (offset >= pi->pi_msix.pba_offset + pi->pi_msix.pba_size) { 409 return (0); 410 } 411 412 return (1); 413 } 414 415 int 416 pci_emul_msix_twrite(struct pci_devinst *pi, uint64_t offset, int size, 417 uint64_t value) 418 { 419 int msix_entry_offset; 420 int tab_index; 421 char *dest; 422 423 /* support only 4 or 8 byte writes */ 424 if (size != 4 && size != 8) 425 return (-1); 426 427 /* 428 * Return if table index is beyond what device supports 429 */ 430 tab_index = offset / MSIX_TABLE_ENTRY_SIZE; 431 if (tab_index >= pi->pi_msix.table_count) 432 return (-1); 433 434 msix_entry_offset = offset % MSIX_TABLE_ENTRY_SIZE; 435 436 /* support only aligned writes */ 437 if ((msix_entry_offset % size) != 0) 438 return (-1); 439 440 dest = (char *)(pi->pi_msix.table + tab_index); 441 dest += msix_entry_offset; 442 443 if (size == 4) 444 *((uint32_t *)dest) = value; 445 else 446 *((uint64_t *)dest) = value; 447 448 return (0); 449 } 450 451 uint64_t 452 pci_emul_msix_tread(struct pci_devinst *pi, uint64_t offset, int size) 453 { 454 char *dest; 455 int msix_entry_offset; 456 int tab_index; 457 uint64_t retval = ~0; 458 459 /* 460 * The PCI standard only allows 4 and 8 byte accesses to the MSI-X 461 * table but we also allow 1 byte access to accommodate reads from 462 * ddb. 463 */ 464 if (size != 1 && size != 4 && size != 8) 465 return (retval); 466 467 msix_entry_offset = offset % MSIX_TABLE_ENTRY_SIZE; 468 469 /* support only aligned reads */ 470 if ((msix_entry_offset % size) != 0) { 471 return (retval); 472 } 473 474 tab_index = offset / MSIX_TABLE_ENTRY_SIZE; 475 476 if (tab_index < pi->pi_msix.table_count) { 477 /* valid MSI-X Table access */ 478 dest = (char *)(pi->pi_msix.table + tab_index); 479 dest += msix_entry_offset; 480 481 if (size == 1) 482 retval = *((uint8_t *)dest); 483 else if (size == 4) 484 retval = *((uint32_t *)dest); 485 else 486 retval = *((uint64_t *)dest); 487 } else if (pci_valid_pba_offset(pi, offset)) { 488 /* return 0 for PBA access */ 489 retval = 0; 490 } 491 492 return (retval); 493 } 494 495 int 496 pci_msix_table_bar(struct pci_devinst *pi) 497 { 498 499 if (pi->pi_msix.table != NULL) 500 return (pi->pi_msix.table_bar); 501 else 502 return (-1); 503 } 504 505 int 506 pci_msix_pba_bar(struct pci_devinst *pi) 507 { 508 509 if (pi->pi_msix.table != NULL) 510 return (pi->pi_msix.pba_bar); 511 else 512 return (-1); 513 } 514 515 #ifdef __amd64__ 516 static int 517 pci_emul_io_handler(struct vmctx *ctx __unused, int in, int port, 518 int bytes, uint32_t *eax, void *arg) 519 { 520 struct pci_devinst *pdi = arg; 521 struct pci_devemu *pe = pdi->pi_d; 522 uint64_t offset; 523 int i; 524 525 assert(port >= 0); 526 527 for (i = 0; i <= PCI_BARMAX; i++) { 528 if (pdi->pi_bar[i].type == PCIBAR_IO && 529 (uint64_t)port >= pdi->pi_bar[i].addr && 530 (uint64_t)port + bytes <= 531 pdi->pi_bar[i].addr + pdi->pi_bar[i].size) { 532 offset = port - pdi->pi_bar[i].addr; 533 if (in) 534 *eax = (*pe->pe_barread)(pdi, i, 535 offset, bytes); 536 else 537 (*pe->pe_barwrite)(pdi, i, offset, 538 bytes, *eax); 539 return (0); 540 } 541 } 542 return (-1); 543 } 544 #else 545 static int 546 pci_emul_iomem_handler(struct vcpu *vcpu __unused, int dir, 547 uint64_t addr, int size, uint64_t *val, void *arg1, long arg2) 548 { 549 struct pci_devinst *pdi = arg1; 550 struct pci_devemu *pe = pdi->pi_d; 551 uint64_t offset; 552 int bidx = (int)arg2; 553 554 assert(bidx <= PCI_BARMAX); 555 assert(pdi->pi_bar[bidx].type == PCIBAR_IO); 556 assert(addr >= pdi->pi_bar[bidx].addr && 557 addr + size <= pdi->pi_bar[bidx].addr + pdi->pi_bar[bidx].size); 558 assert(size == 1 || size == 2 || size == 4); 559 560 offset = addr - pdi->pi_bar[bidx].addr; 561 if (dir == MEM_F_READ) 562 *val = (*pe->pe_barread)(pdi, bidx, offset, size); 563 else 564 (*pe->pe_barwrite)(pdi, bidx, offset, size, *val); 565 566 return (0); 567 } 568 #endif /* !__amd64__ */ 569 570 static int 571 pci_emul_mem_handler(struct vcpu *vcpu __unused, int dir, 572 uint64_t addr, int size, uint64_t *val, void *arg1, long arg2) 573 { 574 struct pci_devinst *pdi = arg1; 575 struct pci_devemu *pe = pdi->pi_d; 576 uint64_t offset; 577 int bidx = (int)arg2; 578 579 assert(bidx <= PCI_BARMAX); 580 assert(pdi->pi_bar[bidx].type == PCIBAR_MEM32 || 581 pdi->pi_bar[bidx].type == PCIBAR_MEM64); 582 assert(addr >= pdi->pi_bar[bidx].addr && 583 addr + size <= pdi->pi_bar[bidx].addr + pdi->pi_bar[bidx].size); 584 585 offset = addr - pdi->pi_bar[bidx].addr; 586 587 if (dir == MEM_F_WRITE) { 588 if (size == 8) { 589 (*pe->pe_barwrite)(pdi, bidx, offset, 590 4, *val & 0xffffffff); 591 (*pe->pe_barwrite)(pdi, bidx, offset + 4, 592 4, *val >> 32); 593 } else { 594 (*pe->pe_barwrite)(pdi, bidx, offset, 595 size, *val); 596 } 597 } else { 598 if (size == 8) { 599 *val = (*pe->pe_barread)(pdi, bidx, 600 offset, 4); 601 *val |= (*pe->pe_barread)(pdi, bidx, 602 offset + 4, 4) << 32; 603 } else { 604 *val = (*pe->pe_barread)(pdi, bidx, 605 offset, size); 606 } 607 } 608 609 return (0); 610 } 611 612 613 static int 614 pci_emul_alloc_resource(uint64_t *baseptr, uint64_t limit, uint64_t size, 615 uint64_t *addr) 616 { 617 uint64_t base; 618 619 assert((size & (size - 1)) == 0); /* must be a power of 2 */ 620 621 base = roundup2(*baseptr, size); 622 623 if (base + size <= limit) { 624 *addr = base; 625 *baseptr = base + size; 626 return (0); 627 } else 628 return (-1); 629 } 630 631 /* 632 * Register (or unregister) the MMIO or I/O region associated with the BAR 633 * register 'idx' of an emulated pci device. 634 */ 635 static void 636 modify_bar_registration(struct pci_devinst *pi, int idx, int registration) 637 { 638 struct pci_devemu *pe; 639 int error; 640 enum pcibar_type type; 641 642 pe = pi->pi_d; 643 type = pi->pi_bar[idx].type; 644 switch (type) { 645 case PCIBAR_IO: 646 { 647 #ifdef __amd64__ 648 struct inout_port iop; 649 650 bzero(&iop, sizeof(struct inout_port)); 651 iop.name = pi->pi_name; 652 iop.port = pi->pi_bar[idx].addr; 653 iop.size = pi->pi_bar[idx].size; 654 if (registration) { 655 iop.flags = IOPORT_F_INOUT; 656 iop.handler = pci_emul_io_handler; 657 iop.arg = pi; 658 error = register_inout(&iop); 659 } else 660 error = unregister_inout(&iop); 661 #else 662 struct mem_range mr; 663 664 bzero(&mr, sizeof(struct mem_range)); 665 mr.name = pi->pi_name; 666 mr.base = pi->pi_bar[idx].addr; 667 mr.size = pi->pi_bar[idx].size; 668 if (registration) { 669 mr.flags = MEM_F_RW; 670 mr.handler = pci_emul_iomem_handler; 671 mr.arg1 = pi; 672 mr.arg2 = idx; 673 error = register_mem(&mr); 674 } else 675 error = unregister_mem(&mr); 676 #endif 677 break; 678 } 679 case PCIBAR_MEM32: 680 case PCIBAR_MEM64: 681 { 682 struct mem_range mr; 683 684 bzero(&mr, sizeof(struct mem_range)); 685 mr.name = pi->pi_name; 686 mr.base = pi->pi_bar[idx].addr; 687 mr.size = pi->pi_bar[idx].size; 688 if (registration) { 689 mr.flags = MEM_F_RW; 690 mr.handler = pci_emul_mem_handler; 691 mr.arg1 = pi; 692 mr.arg2 = idx; 693 error = register_mem(&mr); 694 } else 695 error = unregister_mem(&mr); 696 break; 697 } 698 case PCIBAR_ROM: 699 error = 0; 700 break; 701 default: 702 error = EINVAL; 703 break; 704 } 705 assert(error == 0); 706 707 if (pe->pe_baraddr != NULL) 708 (*pe->pe_baraddr)(pi, idx, registration, pi->pi_bar[idx].addr); 709 } 710 711 static void 712 unregister_bar(struct pci_devinst *pi, int idx) 713 { 714 715 modify_bar_registration(pi, idx, 0); 716 } 717 718 static void 719 register_bar(struct pci_devinst *pi, int idx) 720 { 721 722 modify_bar_registration(pi, idx, 1); 723 } 724 725 /* Is the ROM enabled for the emulated pci device? */ 726 static int 727 romen(struct pci_devinst *pi) 728 { 729 return (pi->pi_bar[PCI_ROM_IDX].lobits & PCIM_BIOS_ENABLE) == 730 PCIM_BIOS_ENABLE; 731 } 732 733 /* Are we decoding i/o port accesses for the emulated pci device? */ 734 static int 735 porten(struct pci_devinst *pi) 736 { 737 uint16_t cmd; 738 739 cmd = pci_get_cfgdata16(pi, PCIR_COMMAND); 740 741 return (cmd & PCIM_CMD_PORTEN); 742 } 743 744 /* Are we decoding memory accesses for the emulated pci device? */ 745 static int 746 memen(struct pci_devinst *pi) 747 { 748 uint16_t cmd; 749 750 cmd = pci_get_cfgdata16(pi, PCIR_COMMAND); 751 752 return (cmd & PCIM_CMD_MEMEN); 753 } 754 755 /* 756 * Update the MMIO or I/O address that is decoded by the BAR register. 757 * 758 * If the pci device has enabled the address space decoding then intercept 759 * the address range decoded by the BAR register. 760 */ 761 static void 762 update_bar_address(struct pci_devinst *pi, uint64_t addr, int idx, int type) 763 { 764 int decode; 765 766 if (pi->pi_bar[idx].type == PCIBAR_IO) 767 decode = porten(pi); 768 else 769 decode = memen(pi); 770 771 if (decode) 772 unregister_bar(pi, idx); 773 774 switch (type) { 775 case PCIBAR_IO: 776 case PCIBAR_MEM32: 777 pi->pi_bar[idx].addr = addr; 778 break; 779 case PCIBAR_MEM64: 780 pi->pi_bar[idx].addr &= ~0xffffffffUL; 781 pi->pi_bar[idx].addr |= addr; 782 break; 783 case PCIBAR_MEMHI64: 784 pi->pi_bar[idx].addr &= 0xffffffff; 785 pi->pi_bar[idx].addr |= addr; 786 break; 787 default: 788 assert(0); 789 } 790 791 if (decode) 792 register_bar(pi, idx); 793 } 794 795 int 796 pci_emul_alloc_bar(struct pci_devinst *pdi, int idx, enum pcibar_type type, 797 uint64_t size) 798 { 799 assert((type == PCIBAR_ROM) || (idx >= 0 && idx <= PCI_BARMAX)); 800 assert((type != PCIBAR_ROM) || (idx == PCI_ROM_IDX)); 801 802 if ((size & (size - 1)) != 0) 803 size = 1UL << flsl(size); /* round up to a power of 2 */ 804 805 /* Enforce minimum BAR sizes required by the PCI standard */ 806 if (type == PCIBAR_IO) { 807 if (size < 4) 808 size = 4; 809 } else if (type == PCIBAR_ROM) { 810 if (size < ~PCIM_BIOS_ADDR_MASK + 1) 811 size = ~PCIM_BIOS_ADDR_MASK + 1; 812 } else { 813 if (size < 16) 814 size = 16; 815 } 816 817 /* 818 * To reduce fragmentation of the MMIO space, we allocate the BARs by 819 * size. Therefore, don't allocate the BAR yet. We create a list of all 820 * BAR allocation which is sorted by BAR size. When all PCI devices are 821 * initialized, we will assign an address to the BARs. 822 */ 823 824 /* create a new list entry */ 825 struct pci_bar_allocation *const new_bar = malloc(sizeof(*new_bar)); 826 memset(new_bar, 0, sizeof(*new_bar)); 827 new_bar->pdi = pdi; 828 new_bar->idx = idx; 829 new_bar->type = type; 830 new_bar->size = size; 831 832 /* 833 * Search for a BAR which size is lower than the size of our newly 834 * allocated BAR. 835 */ 836 struct pci_bar_allocation *bar = NULL; 837 TAILQ_FOREACH(bar, &pci_bars, chain) { 838 if (bar->size < size) { 839 break; 840 } 841 } 842 843 if (bar == NULL) { 844 /* 845 * Either the list is empty or new BAR is the smallest BAR of 846 * the list. Append it to the end of our list. 847 */ 848 TAILQ_INSERT_TAIL(&pci_bars, new_bar, chain); 849 } else { 850 /* 851 * The found BAR is smaller than our new BAR. For that reason, 852 * insert our new BAR before the found BAR. 853 */ 854 TAILQ_INSERT_BEFORE(bar, new_bar, chain); 855 } 856 857 /* 858 * Enable PCI BARs only if we don't have a boot ROM, i.e., bhyveload was 859 * used to load the initial guest image. Otherwise, we rely on the boot 860 * ROM to handle this. 861 */ 862 if (!get_config_bool_default("pci.enable_bars", !bootrom_boot())) 863 return (0); 864 865 /* 866 * pci_passthru devices synchronize their physical and virtual command 867 * register on init. For that reason, the virtual cmd reg should be 868 * updated as early as possible. 869 */ 870 uint16_t enbit = 0; 871 switch (type) { 872 case PCIBAR_IO: 873 enbit = PCIM_CMD_PORTEN; 874 break; 875 case PCIBAR_MEM64: 876 case PCIBAR_MEM32: 877 enbit = PCIM_CMD_MEMEN; 878 break; 879 default: 880 enbit = 0; 881 break; 882 } 883 884 const uint16_t cmd = pci_get_cfgdata16(pdi, PCIR_COMMAND); 885 pci_set_cfgdata16(pdi, PCIR_COMMAND, cmd | enbit); 886 887 return (0); 888 } 889 890 static int 891 pci_emul_assign_bar(struct pci_devinst *const pdi, const int idx, 892 const enum pcibar_type type, const uint64_t size) 893 { 894 int error; 895 uint64_t *baseptr, limit, addr, mask, lobits, bar; 896 897 switch (type) { 898 case PCIBAR_NONE: 899 baseptr = NULL; 900 addr = mask = lobits = 0; 901 break; 902 case PCIBAR_IO: 903 baseptr = &pci_emul_iobase; 904 limit = PCI_EMUL_IOLIMIT; 905 mask = PCIM_BAR_IO_BASE; 906 lobits = PCIM_BAR_IO_SPACE; 907 break; 908 case PCIBAR_MEM64: 909 /* 910 * XXX 911 * Some drivers do not work well if the 64-bit BAR is allocated 912 * above 4GB. Allow for this by allocating small requests under 913 * 4GB unless then allocation size is larger than some arbitrary 914 * number (128MB currently). 915 */ 916 if (size > 128 * 1024 * 1024) { 917 baseptr = &pci_emul_membase64; 918 limit = pci_emul_memlim64; 919 mask = PCIM_BAR_MEM_BASE; 920 lobits = PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_64 | 921 PCIM_BAR_MEM_PREFETCH; 922 } else { 923 baseptr = &pci_emul_membase32; 924 limit = PCI_EMUL_MEMLIMIT32; 925 mask = PCIM_BAR_MEM_BASE; 926 lobits = PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_64; 927 } 928 break; 929 case PCIBAR_MEM32: 930 baseptr = &pci_emul_membase32; 931 limit = PCI_EMUL_MEMLIMIT32; 932 mask = PCIM_BAR_MEM_BASE; 933 lobits = PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_32; 934 break; 935 case PCIBAR_ROM: 936 /* do not claim memory for ROM. OVMF will do it for us. */ 937 baseptr = NULL; 938 limit = 0; 939 mask = PCIM_BIOS_ADDR_MASK; 940 lobits = 0; 941 break; 942 default: 943 printf("pci_emul_alloc_base: invalid bar type %d\n", type); 944 assert(0); 945 } 946 947 if (baseptr != NULL) { 948 error = pci_emul_alloc_resource(baseptr, limit, size, &addr); 949 if (error != 0) 950 return (error); 951 } else { 952 addr = 0; 953 } 954 955 pdi->pi_bar[idx].type = type; 956 pdi->pi_bar[idx].addr = addr; 957 pdi->pi_bar[idx].size = size; 958 /* 959 * passthru devices are using same lobits as physical device they set 960 * this property 961 */ 962 if (pdi->pi_bar[idx].lobits != 0) { 963 lobits = pdi->pi_bar[idx].lobits; 964 } else { 965 pdi->pi_bar[idx].lobits = lobits; 966 } 967 968 /* Initialize the BAR register in config space */ 969 bar = (addr & mask) | lobits; 970 pci_set_cfgdata32(pdi, PCIR_BAR(idx), bar); 971 972 if (type == PCIBAR_MEM64) { 973 assert(idx + 1 <= PCI_BARMAX); 974 pdi->pi_bar[idx + 1].type = PCIBAR_MEMHI64; 975 pci_set_cfgdata32(pdi, PCIR_BAR(idx + 1), bar >> 32); 976 } 977 978 switch (type) { 979 case PCIBAR_IO: 980 if (porten(pdi)) 981 register_bar(pdi, idx); 982 break; 983 case PCIBAR_MEM32: 984 case PCIBAR_MEM64: 985 case PCIBAR_MEMHI64: 986 if (memen(pdi)) 987 register_bar(pdi, idx); 988 break; 989 default: 990 break; 991 } 992 993 return (0); 994 } 995 996 int 997 pci_emul_alloc_rom(struct pci_devinst *const pdi, const uint64_t size, 998 void **const addr) 999 { 1000 /* allocate ROM space once on first call */ 1001 if (pci_emul_rombase == 0) { 1002 pci_emul_rombase = vm_create_devmem(pdi->pi_vmctx, VM_PCIROM, 1003 "pcirom", PCI_EMUL_ROMSIZE); 1004 if (pci_emul_rombase == MAP_FAILED) { 1005 warnx("%s: failed to create rom segment", __func__); 1006 return (-1); 1007 } 1008 pci_emul_romlim = pci_emul_rombase + PCI_EMUL_ROMSIZE; 1009 pci_emul_romoffset = 0; 1010 } 1011 1012 /* ROM size should be a power of 2 and greater than 2 KB */ 1013 const uint64_t rom_size = MAX(1UL << flsl(size), 1014 ~PCIM_BIOS_ADDR_MASK + 1); 1015 1016 /* check if ROM fits into ROM space */ 1017 if (pci_emul_romoffset + rom_size > PCI_EMUL_ROMSIZE) { 1018 warnx("%s: no space left in rom segment:", __func__); 1019 warnx("%16lu bytes left", 1020 PCI_EMUL_ROMSIZE - pci_emul_romoffset); 1021 warnx("%16lu bytes required by %d/%d/%d", rom_size, pdi->pi_bus, 1022 pdi->pi_slot, pdi->pi_func); 1023 return (-1); 1024 } 1025 1026 /* allocate ROM BAR */ 1027 const int error = pci_emul_alloc_bar(pdi, PCI_ROM_IDX, PCIBAR_ROM, 1028 rom_size); 1029 if (error) 1030 return error; 1031 1032 /* return address */ 1033 *addr = pci_emul_rombase + pci_emul_romoffset; 1034 1035 /* save offset into ROM Space */ 1036 pdi->pi_romoffset = pci_emul_romoffset; 1037 1038 /* increase offset for next ROM */ 1039 pci_emul_romoffset += rom_size; 1040 1041 return (0); 1042 } 1043 1044 int 1045 pci_emul_add_boot_device(struct pci_devinst *pi, int bootindex) 1046 { 1047 struct boot_device *new_device, *device; 1048 1049 /* don't permit a negative bootindex */ 1050 if (bootindex < 0) { 1051 errx(4, "Invalid bootindex %d for %s", bootindex, pi->pi_name); 1052 } 1053 1054 /* alloc new boot device */ 1055 new_device = calloc(1, sizeof(struct boot_device)); 1056 if (new_device == NULL) { 1057 return (ENOMEM); 1058 } 1059 new_device->pdi = pi; 1060 new_device->bootindex = bootindex; 1061 1062 /* search for boot device with higher boot index */ 1063 TAILQ_FOREACH(device, &boot_devices, boot_device_chain) { 1064 if (device->bootindex == bootindex) { 1065 errx(4, 1066 "Could not set bootindex %d for %s. Bootindex already occupied by %s", 1067 bootindex, pi->pi_name, device->pdi->pi_name); 1068 } else if (device->bootindex > bootindex) { 1069 break; 1070 } 1071 } 1072 1073 /* add boot device to queue */ 1074 if (device == NULL) { 1075 TAILQ_INSERT_TAIL(&boot_devices, new_device, boot_device_chain); 1076 } else { 1077 TAILQ_INSERT_BEFORE(device, new_device, boot_device_chain); 1078 } 1079 1080 return (0); 1081 } 1082 1083 #define CAP_START_OFFSET 0x40 1084 static int 1085 pci_emul_add_capability(struct pci_devinst *pi, u_char *capdata, int caplen) 1086 { 1087 int i, capoff, reallen; 1088 uint16_t sts; 1089 1090 assert(caplen > 0); 1091 1092 reallen = roundup2(caplen, 4); /* dword aligned */ 1093 1094 sts = pci_get_cfgdata16(pi, PCIR_STATUS); 1095 if ((sts & PCIM_STATUS_CAPPRESENT) == 0) 1096 capoff = CAP_START_OFFSET; 1097 else 1098 capoff = pi->pi_capend + 1; 1099 1100 /* Check if we have enough space */ 1101 if (capoff + reallen > PCI_REGMAX + 1) 1102 return (-1); 1103 1104 /* Set the previous capability pointer */ 1105 if ((sts & PCIM_STATUS_CAPPRESENT) == 0) { 1106 pci_set_cfgdata8(pi, PCIR_CAP_PTR, capoff); 1107 pci_set_cfgdata16(pi, PCIR_STATUS, sts|PCIM_STATUS_CAPPRESENT); 1108 } else 1109 pci_set_cfgdata8(pi, pi->pi_prevcap + 1, capoff); 1110 1111 /* Copy the capability */ 1112 for (i = 0; i < caplen; i++) 1113 pci_set_cfgdata8(pi, capoff + i, capdata[i]); 1114 1115 /* Set the next capability pointer */ 1116 pci_set_cfgdata8(pi, capoff + 1, 0); 1117 1118 pi->pi_prevcap = capoff; 1119 pi->pi_capend = capoff + reallen - 1; 1120 return (0); 1121 } 1122 1123 static struct pci_devemu * 1124 pci_emul_finddev(const char *name) 1125 { 1126 struct pci_devemu **pdpp, *pdp; 1127 1128 SET_FOREACH(pdpp, pci_devemu_set) { 1129 pdp = *pdpp; 1130 if (!strcmp(pdp->pe_emu, name)) { 1131 return (pdp); 1132 } 1133 } 1134 1135 return (NULL); 1136 } 1137 1138 static int 1139 pci_emul_init(struct vmctx *ctx, struct pci_devemu *pde, int bus, int slot, 1140 int func, struct funcinfo *fi) 1141 { 1142 struct pci_devinst *pdi; 1143 int err; 1144 1145 pdi = calloc(1, sizeof(struct pci_devinst)); 1146 1147 pdi->pi_vmctx = ctx; 1148 pdi->pi_bus = bus; 1149 pdi->pi_slot = slot; 1150 pdi->pi_func = func; 1151 pthread_mutex_init(&pdi->pi_lintr.lock, NULL); 1152 pdi->pi_lintr.pin = 0; 1153 pdi->pi_lintr.state = IDLE; 1154 pci_irq_init_irq(&pdi->pi_lintr.irq); 1155 pdi->pi_d = pde; 1156 snprintf(pdi->pi_name, PI_NAMESZ, "%s@pci.%d.%d.%d", pde->pe_emu, bus, 1157 slot, func); 1158 1159 /* Disable legacy interrupts */ 1160 pci_set_cfgdata8(pdi, PCIR_INTLINE, 255); 1161 pci_set_cfgdata8(pdi, PCIR_INTPIN, 0); 1162 1163 if (get_config_bool_default("pci.enable_bars", !bootrom_boot())) 1164 pci_set_cfgdata8(pdi, PCIR_COMMAND, PCIM_CMD_BUSMASTEREN); 1165 1166 err = (*pde->pe_init)(pdi, fi->fi_config); 1167 if (err == 0) 1168 fi->fi_devi = pdi; 1169 else 1170 free(pdi); 1171 1172 return (err); 1173 } 1174 1175 void 1176 pci_populate_msicap(struct msicap *msicap, int msgnum, int nextptr) 1177 { 1178 int mmc; 1179 1180 /* Number of msi messages must be a power of 2 between 1 and 32 */ 1181 assert((msgnum & (msgnum - 1)) == 0 && msgnum >= 1 && msgnum <= 32); 1182 mmc = ffs(msgnum) - 1; 1183 1184 bzero(msicap, sizeof(struct msicap)); 1185 msicap->capid = PCIY_MSI; 1186 msicap->nextptr = nextptr; 1187 msicap->msgctrl = PCIM_MSICTRL_64BIT | (mmc << 1); 1188 } 1189 1190 int 1191 pci_emul_add_msicap(struct pci_devinst *pi, int msgnum) 1192 { 1193 struct msicap msicap; 1194 1195 pci_populate_msicap(&msicap, msgnum, 0); 1196 1197 return (pci_emul_add_capability(pi, (u_char *)&msicap, sizeof(msicap))); 1198 } 1199 1200 static void 1201 pci_populate_msixcap(struct msixcap *msixcap, int msgnum, int barnum, 1202 uint32_t msix_tab_size) 1203 { 1204 1205 assert(msix_tab_size % 4096 == 0); 1206 1207 bzero(msixcap, sizeof(struct msixcap)); 1208 msixcap->capid = PCIY_MSIX; 1209 1210 /* 1211 * Message Control Register, all fields set to 1212 * zero except for the Table Size. 1213 * Note: Table size N is encoded as N-1 1214 */ 1215 msixcap->msgctrl = msgnum - 1; 1216 1217 /* 1218 * MSI-X BAR setup: 1219 * - MSI-X table start at offset 0 1220 * - PBA table starts at a 4K aligned offset after the MSI-X table 1221 */ 1222 msixcap->table_info = barnum & PCIM_MSIX_BIR_MASK; 1223 msixcap->pba_info = msix_tab_size | (barnum & PCIM_MSIX_BIR_MASK); 1224 } 1225 1226 static void 1227 pci_msix_table_init(struct pci_devinst *pi, int table_entries) 1228 { 1229 int i, table_size; 1230 1231 assert(table_entries > 0); 1232 assert(table_entries <= MAX_MSIX_TABLE_ENTRIES); 1233 1234 table_size = table_entries * MSIX_TABLE_ENTRY_SIZE; 1235 pi->pi_msix.table = calloc(1, table_size); 1236 1237 /* set mask bit of vector control register */ 1238 for (i = 0; i < table_entries; i++) 1239 pi->pi_msix.table[i].vector_control |= PCIM_MSIX_VCTRL_MASK; 1240 } 1241 1242 int 1243 pci_emul_add_msixcap(struct pci_devinst *pi, int msgnum, int barnum) 1244 { 1245 uint32_t tab_size; 1246 struct msixcap msixcap; 1247 1248 assert(msgnum >= 1 && msgnum <= MAX_MSIX_TABLE_ENTRIES); 1249 assert(barnum >= 0 && barnum <= PCIR_MAX_BAR_0); 1250 1251 tab_size = msgnum * MSIX_TABLE_ENTRY_SIZE; 1252 1253 /* Align table size to nearest 4K */ 1254 tab_size = roundup2(tab_size, 4096); 1255 1256 pi->pi_msix.table_bar = barnum; 1257 pi->pi_msix.pba_bar = barnum; 1258 pi->pi_msix.table_offset = 0; 1259 pi->pi_msix.table_count = msgnum; 1260 pi->pi_msix.pba_offset = tab_size; 1261 pi->pi_msix.pba_size = PBA_SIZE(msgnum); 1262 1263 pci_msix_table_init(pi, msgnum); 1264 1265 pci_populate_msixcap(&msixcap, msgnum, barnum, tab_size); 1266 1267 /* allocate memory for MSI-X Table and PBA */ 1268 pci_emul_alloc_bar(pi, barnum, PCIBAR_MEM32, 1269 tab_size + pi->pi_msix.pba_size); 1270 1271 return (pci_emul_add_capability(pi, (u_char *)&msixcap, 1272 sizeof(msixcap))); 1273 } 1274 1275 static void 1276 msixcap_cfgwrite(struct pci_devinst *pi, int capoff, int offset, 1277 int bytes, uint32_t val) 1278 { 1279 uint16_t msgctrl, rwmask; 1280 int off; 1281 1282 off = offset - capoff; 1283 /* Message Control Register */ 1284 if (off == 2 && bytes == 2) { 1285 rwmask = PCIM_MSIXCTRL_MSIX_ENABLE | PCIM_MSIXCTRL_FUNCTION_MASK; 1286 msgctrl = pci_get_cfgdata16(pi, offset); 1287 msgctrl &= ~rwmask; 1288 msgctrl |= val & rwmask; 1289 val = msgctrl; 1290 1291 pi->pi_msix.enabled = val & PCIM_MSIXCTRL_MSIX_ENABLE; 1292 pi->pi_msix.function_mask = val & PCIM_MSIXCTRL_FUNCTION_MASK; 1293 pci_lintr_update(pi); 1294 } 1295 1296 CFGWRITE(pi, offset, val, bytes); 1297 } 1298 1299 static void 1300 msicap_cfgwrite(struct pci_devinst *pi, int capoff, int offset, 1301 int bytes, uint32_t val) 1302 { 1303 uint16_t msgctrl, rwmask, msgdata, mme; 1304 uint32_t addrlo; 1305 1306 /* 1307 * If guest is writing to the message control register make sure 1308 * we do not overwrite read-only fields. 1309 */ 1310 if ((offset - capoff) == 2 && bytes == 2) { 1311 rwmask = PCIM_MSICTRL_MME_MASK | PCIM_MSICTRL_MSI_ENABLE; 1312 msgctrl = pci_get_cfgdata16(pi, offset); 1313 msgctrl &= ~rwmask; 1314 msgctrl |= val & rwmask; 1315 val = msgctrl; 1316 } 1317 CFGWRITE(pi, offset, val, bytes); 1318 1319 msgctrl = pci_get_cfgdata16(pi, capoff + 2); 1320 addrlo = pci_get_cfgdata32(pi, capoff + 4); 1321 if (msgctrl & PCIM_MSICTRL_64BIT) 1322 msgdata = pci_get_cfgdata16(pi, capoff + 12); 1323 else 1324 msgdata = pci_get_cfgdata16(pi, capoff + 8); 1325 1326 mme = msgctrl & PCIM_MSICTRL_MME_MASK; 1327 pi->pi_msi.enabled = msgctrl & PCIM_MSICTRL_MSI_ENABLE ? 1 : 0; 1328 if (pi->pi_msi.enabled) { 1329 pi->pi_msi.addr = addrlo; 1330 pi->pi_msi.msg_data = msgdata; 1331 pi->pi_msi.maxmsgnum = 1 << (mme >> 4); 1332 } else { 1333 pi->pi_msi.maxmsgnum = 0; 1334 } 1335 pci_lintr_update(pi); 1336 } 1337 1338 static void 1339 pciecap_cfgwrite(struct pci_devinst *pi, int capoff __unused, int offset, 1340 int bytes, uint32_t val) 1341 { 1342 1343 /* XXX don't write to the readonly parts */ 1344 CFGWRITE(pi, offset, val, bytes); 1345 } 1346 1347 #define PCIECAP_VERSION 0x2 1348 int 1349 pci_emul_add_pciecap(struct pci_devinst *pi, int type) 1350 { 1351 int err; 1352 struct pciecap pciecap; 1353 1354 bzero(&pciecap, sizeof(pciecap)); 1355 1356 /* 1357 * Use the integrated endpoint type for endpoints on a root complex bus. 1358 * 1359 * NB: bhyve currently only supports a single PCI bus that is the root 1360 * complex bus, so all endpoints are integrated. 1361 */ 1362 if ((type == PCIEM_TYPE_ENDPOINT) && (pi->pi_bus == 0)) 1363 type = PCIEM_TYPE_ROOT_INT_EP; 1364 1365 pciecap.capid = PCIY_EXPRESS; 1366 pciecap.pcie_capabilities = PCIECAP_VERSION | type; 1367 if (type != PCIEM_TYPE_ROOT_INT_EP) { 1368 pciecap.link_capabilities = 0x411; /* gen1, x1 */ 1369 pciecap.link_status = 0x11; /* gen1, x1 */ 1370 } 1371 1372 err = pci_emul_add_capability(pi, (u_char *)&pciecap, sizeof(pciecap)); 1373 return (err); 1374 } 1375 1376 /* 1377 * This function assumes that 'coff' is in the capabilities region of the 1378 * config space. A capoff parameter of zero will force a search for the 1379 * offset and type. 1380 */ 1381 void 1382 pci_emul_capwrite(struct pci_devinst *pi, int offset, int bytes, uint32_t val, 1383 uint8_t capoff, int capid) 1384 { 1385 uint8_t nextoff; 1386 1387 /* Do not allow un-aligned writes */ 1388 if ((offset & (bytes - 1)) != 0) 1389 return; 1390 1391 if (capoff == 0) { 1392 /* Find the capability that we want to update */ 1393 capoff = CAP_START_OFFSET; 1394 while (1) { 1395 nextoff = pci_get_cfgdata8(pi, capoff + 1); 1396 if (nextoff == 0) 1397 break; 1398 if (offset >= capoff && offset < nextoff) 1399 break; 1400 1401 capoff = nextoff; 1402 } 1403 assert(offset >= capoff); 1404 capid = pci_get_cfgdata8(pi, capoff); 1405 } 1406 1407 /* 1408 * Capability ID and Next Capability Pointer are readonly. 1409 * However, some o/s's do 4-byte writes that include these. 1410 * For this case, trim the write back to 2 bytes and adjust 1411 * the data. 1412 */ 1413 if (offset == capoff || offset == capoff + 1) { 1414 if (offset == capoff && bytes == 4) { 1415 bytes = 2; 1416 offset += 2; 1417 val >>= 16; 1418 } else 1419 return; 1420 } 1421 1422 switch (capid) { 1423 case PCIY_MSI: 1424 msicap_cfgwrite(pi, capoff, offset, bytes, val); 1425 break; 1426 case PCIY_MSIX: 1427 msixcap_cfgwrite(pi, capoff, offset, bytes, val); 1428 break; 1429 case PCIY_EXPRESS: 1430 pciecap_cfgwrite(pi, capoff, offset, bytes, val); 1431 break; 1432 default: 1433 break; 1434 } 1435 } 1436 1437 static int 1438 pci_emul_iscap(struct pci_devinst *pi, int offset) 1439 { 1440 uint16_t sts; 1441 1442 sts = pci_get_cfgdata16(pi, PCIR_STATUS); 1443 if ((sts & PCIM_STATUS_CAPPRESENT) != 0) { 1444 if (offset >= CAP_START_OFFSET && offset <= pi->pi_capend) 1445 return (1); 1446 } 1447 return (0); 1448 } 1449 1450 static int 1451 pci_emul_fallback_handler(struct vcpu *vcpu __unused, int dir, 1452 uint64_t addr __unused, int size __unused, uint64_t *val, 1453 void *arg1 __unused, long arg2 __unused) 1454 { 1455 /* 1456 * Ignore writes; return 0xff's for reads. The mem read code 1457 * will take care of truncating to the correct size. 1458 */ 1459 if (dir == MEM_F_READ) { 1460 *val = 0xffffffffffffffff; 1461 } 1462 1463 return (0); 1464 } 1465 1466 static int 1467 pci_emul_ecfg_handler(struct vcpu *vcpu __unused, int dir, uint64_t addr, 1468 int bytes, uint64_t *val, void *arg1 __unused, long arg2 __unused) 1469 { 1470 int bus, slot, func, coff, in; 1471 1472 coff = addr & 0xfff; 1473 func = (addr >> 12) & 0x7; 1474 slot = (addr >> 15) & 0x1f; 1475 bus = (addr >> 20) & 0xff; 1476 in = (dir == MEM_F_READ); 1477 if (in) 1478 *val = ~0UL; 1479 pci_cfgrw(in, bus, slot, func, coff, bytes, (uint32_t *)val); 1480 return (0); 1481 } 1482 1483 uint64_t 1484 pci_ecfg_base(void) 1485 { 1486 1487 return (PCI_EMUL_ECFG_BASE); 1488 } 1489 1490 static int 1491 init_bootorder(void) 1492 { 1493 struct boot_device *device; 1494 FILE *fp; 1495 char *bootorder; 1496 size_t bootorder_len; 1497 1498 if (TAILQ_EMPTY(&boot_devices)) 1499 return (0); 1500 1501 fp = open_memstream(&bootorder, &bootorder_len); 1502 TAILQ_FOREACH(device, &boot_devices, boot_device_chain) { 1503 fprintf(fp, "/pci@i0cf8/pci@%d,%d\n", 1504 device->pdi->pi_slot, device->pdi->pi_func); 1505 } 1506 fclose(fp); 1507 1508 return (qemu_fwcfg_add_file("bootorder", bootorder_len, bootorder)); 1509 } 1510 1511 #define BUSIO_ROUNDUP 32 1512 #define BUSMEM32_ROUNDUP (1024 * 1024) 1513 #define BUSMEM64_ROUNDUP (512 * 1024 * 1024) 1514 1515 int 1516 init_pci(struct vmctx *ctx) 1517 { 1518 char node_name[sizeof("pci.XXX.XX.X")]; 1519 struct mem_range mr; 1520 struct pci_devemu *pde; 1521 struct businfo *bi; 1522 struct slotinfo *si; 1523 struct funcinfo *fi; 1524 nvlist_t *nvl; 1525 const char *emul; 1526 size_t lowmem; 1527 int bus, slot, func; 1528 int error; 1529 1530 if (vm_get_lowmem_limit(ctx) > PCI_EMUL_MEMBASE32) 1531 errx(EX_OSERR, "Invalid lowmem limit"); 1532 1533 pci_emul_iobase = PCI_EMUL_IOBASE; 1534 pci_emul_membase32 = PCI_EMUL_MEMBASE32; 1535 1536 pci_emul_membase64 = vm_get_highmem_base(ctx) + 1537 vm_get_highmem_size(ctx); 1538 pci_emul_membase64 = roundup2(pci_emul_membase64, PCI_EMUL_MEMSIZE64); 1539 pci_emul_memlim64 = pci_emul_membase64 + PCI_EMUL_MEMSIZE64; 1540 1541 TAILQ_INIT(&boot_devices); 1542 1543 for (bus = 0; bus < MAXBUSES; bus++) { 1544 snprintf(node_name, sizeof(node_name), "pci.%d", bus); 1545 nvl = find_config_node(node_name); 1546 if (nvl == NULL) 1547 continue; 1548 pci_businfo[bus] = calloc(1, sizeof(struct businfo)); 1549 bi = pci_businfo[bus]; 1550 1551 /* 1552 * Keep track of the i/o and memory resources allocated to 1553 * this bus. 1554 */ 1555 bi->iobase = pci_emul_iobase; 1556 bi->membase32 = pci_emul_membase32; 1557 bi->membase64 = pci_emul_membase64; 1558 1559 /* first run: init devices */ 1560 for (slot = 0; slot < MAXSLOTS; slot++) { 1561 si = &bi->slotinfo[slot]; 1562 for (func = 0; func < MAXFUNCS; func++) { 1563 fi = &si->si_funcs[func]; 1564 snprintf(node_name, sizeof(node_name), 1565 "pci.%d.%d.%d", bus, slot, func); 1566 nvl = find_config_node(node_name); 1567 if (nvl == NULL) 1568 continue; 1569 1570 fi->fi_config = nvl; 1571 emul = get_config_value_node(nvl, "device"); 1572 if (emul == NULL) { 1573 EPRINTLN("pci slot %d:%d:%d: missing " 1574 "\"device\" value", bus, slot, func); 1575 return (EINVAL); 1576 } 1577 pde = pci_emul_finddev(emul); 1578 if (pde == NULL) { 1579 EPRINTLN("pci slot %d:%d:%d: unknown " 1580 "device \"%s\"", bus, slot, func, 1581 emul); 1582 return (EINVAL); 1583 } 1584 if (pde->pe_alias != NULL) { 1585 EPRINTLN("pci slot %d:%d:%d: legacy " 1586 "device \"%s\", use \"%s\" instead", 1587 bus, slot, func, emul, 1588 pde->pe_alias); 1589 return (EINVAL); 1590 } 1591 fi->fi_pde = pde; 1592 error = pci_emul_init(ctx, pde, bus, slot, 1593 func, fi); 1594 if (error) 1595 return (error); 1596 } 1597 } 1598 1599 /* second run: assign BARs and free list */ 1600 struct pci_bar_allocation *bar; 1601 struct pci_bar_allocation *bar_tmp; 1602 TAILQ_FOREACH_SAFE(bar, &pci_bars, chain, bar_tmp) { 1603 pci_emul_assign_bar(bar->pdi, bar->idx, bar->type, 1604 bar->size); 1605 free(bar); 1606 } 1607 TAILQ_INIT(&pci_bars); 1608 1609 /* 1610 * Add some slop to the I/O and memory resources decoded by 1611 * this bus to give a guest some flexibility if it wants to 1612 * reprogram the BARs. 1613 */ 1614 pci_emul_iobase += BUSIO_ROUNDUP; 1615 pci_emul_iobase = roundup2(pci_emul_iobase, BUSIO_ROUNDUP); 1616 bi->iolimit = pci_emul_iobase; 1617 1618 pci_emul_membase32 += BUSMEM32_ROUNDUP; 1619 pci_emul_membase32 = roundup2(pci_emul_membase32, 1620 BUSMEM32_ROUNDUP); 1621 bi->memlimit32 = pci_emul_membase32; 1622 1623 pci_emul_membase64 += BUSMEM64_ROUNDUP; 1624 pci_emul_membase64 = roundup2(pci_emul_membase64, 1625 BUSMEM64_ROUNDUP); 1626 bi->memlimit64 = pci_emul_membase64; 1627 } 1628 1629 /* 1630 * PCI backends are initialized before routing INTx interrupts 1631 * so that LPC devices are able to reserve ISA IRQs before 1632 * routing PIRQ pins. 1633 */ 1634 for (bus = 0; bus < MAXBUSES; bus++) { 1635 if ((bi = pci_businfo[bus]) == NULL) 1636 continue; 1637 1638 for (slot = 0; slot < MAXSLOTS; slot++) { 1639 si = &bi->slotinfo[slot]; 1640 for (func = 0; func < MAXFUNCS; func++) { 1641 fi = &si->si_funcs[func]; 1642 if (fi->fi_devi == NULL) 1643 continue; 1644 pci_lintr_route(fi->fi_devi); 1645 } 1646 } 1647 } 1648 #ifdef __amd64__ 1649 lpc_pirq_routed(); 1650 #endif 1651 1652 if ((error = init_bootorder()) != 0) { 1653 warnx("%s: Unable to init bootorder", __func__); 1654 return (error); 1655 } 1656 1657 /* 1658 * The guest physical memory map looks like the following on amd64: 1659 * [0, lowmem) guest system memory 1660 * [lowmem, 0xC0000000) memory hole (may be absent) 1661 * [0xC0000000, 0xE0000000) PCI hole (32-bit BAR allocation) 1662 * [0xE0000000, 0xF0000000) PCI extended config window 1663 * [0xF0000000, 4GB) LAPIC, IOAPIC, HPET, firmware 1664 * [4GB, 4GB + highmem) guest system memory 1665 * [roundup(4GB + highmem, 32GB), ...) PCI 64-bit BAR allocation 1666 * 1667 * On arm64 the guest physical memory map looks like this: 1668 * [0x0DF00000, 0x10000000) PCI I/O memory 1669 * [0xA0000000, 0xE0000000) PCI 32-bit BAR allocation 1670 * [0xE0000000, 0xF0000000) PCI extended config window 1671 * [4GB, 4GB + highmem) guest system memory 1672 * [roundup(4GB + highmem, 32GB), ...) PCI 64-bit BAR allocation 1673 * 1674 * "lowmem" is guest memory below 0xC0000000. amd64 guests provisioned 1675 * with less than 3GB of RAM will have no memory above the 4GB boundary. 1676 * System memory for arm64 guests is all above the 4GB boundary. 1677 */ 1678 1679 /* 1680 * Accesses to memory addresses that are not allocated to system 1681 * memory or PCI devices return 0xff's. 1682 */ 1683 lowmem = vm_get_lowmem_size(ctx); 1684 bzero(&mr, sizeof(struct mem_range)); 1685 mr.name = "PCI hole"; 1686 mr.flags = MEM_F_RW | MEM_F_IMMUTABLE; 1687 mr.base = lowmem; 1688 mr.size = (4ULL * 1024 * 1024 * 1024) - lowmem; 1689 mr.handler = pci_emul_fallback_handler; 1690 error = register_mem_fallback(&mr); 1691 assert(error == 0); 1692 1693 /* PCI extended config space */ 1694 bzero(&mr, sizeof(struct mem_range)); 1695 mr.name = "PCI ECFG"; 1696 mr.flags = MEM_F_RW | MEM_F_IMMUTABLE; 1697 mr.base = PCI_EMUL_ECFG_BASE; 1698 mr.size = PCI_EMUL_ECFG_SIZE; 1699 mr.handler = pci_emul_ecfg_handler; 1700 error = register_mem(&mr); 1701 assert(error == 0); 1702 1703 return (0); 1704 } 1705 1706 #ifdef __amd64__ 1707 static void 1708 pci_apic_prt_entry(int bus __unused, int slot, int pin, struct pci_irq *irq, 1709 void *arg __unused) 1710 { 1711 1712 dsdt_line(" Package ()"); 1713 dsdt_line(" {"); 1714 dsdt_line(" 0x%X,", slot << 16 | 0xffff); 1715 dsdt_line(" 0x%02X,", pin - 1); 1716 dsdt_line(" Zero,"); 1717 dsdt_line(" 0x%X", irq->ioapic_irq); 1718 dsdt_line(" },"); 1719 } 1720 1721 static void 1722 pci_pirq_prt_entry(int bus __unused, int slot, int pin, struct pci_irq *irq, 1723 void *arg __unused) 1724 { 1725 char *name; 1726 1727 name = lpc_pirq_name(irq->pirq_pin); 1728 if (name == NULL) 1729 return; 1730 dsdt_line(" Package ()"); 1731 dsdt_line(" {"); 1732 dsdt_line(" 0x%X,", slot << 16 | 0xffff); 1733 dsdt_line(" 0x%02X,", pin - 1); 1734 dsdt_line(" %s,", name); 1735 dsdt_line(" 0x00"); 1736 dsdt_line(" },"); 1737 free(name); 1738 } 1739 #endif 1740 1741 /* 1742 * A bhyve virtual machine has a flat PCI hierarchy with a root port 1743 * corresponding to each PCI bus. 1744 */ 1745 static void 1746 pci_bus_write_dsdt(int bus) 1747 { 1748 struct businfo *bi; 1749 struct slotinfo *si; 1750 struct pci_devinst *pi; 1751 int func, slot; 1752 1753 /* 1754 * If there are no devices on this 'bus' then just return. 1755 */ 1756 if ((bi = pci_businfo[bus]) == NULL) { 1757 /* 1758 * Bus 0 is special because it decodes the I/O ports used 1759 * for PCI config space access even if there are no devices 1760 * on it. 1761 */ 1762 if (bus != 0) 1763 return; 1764 } 1765 1766 dsdt_line(" Device (PC%02X)", bus); 1767 dsdt_line(" {"); 1768 dsdt_line(" Name (_HID, EisaId (\"PNP0A03\"))"); 1769 1770 dsdt_line(" Method (_BBN, 0, NotSerialized)"); 1771 dsdt_line(" {"); 1772 dsdt_line(" Return (0x%08X)", bus); 1773 dsdt_line(" }"); 1774 dsdt_line(" Name (_CRS, ResourceTemplate ()"); 1775 dsdt_line(" {"); 1776 dsdt_line(" WordBusNumber (ResourceProducer, MinFixed, " 1777 "MaxFixed, PosDecode,"); 1778 dsdt_line(" 0x0000, // Granularity"); 1779 dsdt_line(" 0x%04X, // Range Minimum", bus); 1780 dsdt_line(" 0x%04X, // Range Maximum", bus); 1781 dsdt_line(" 0x0000, // Translation Offset"); 1782 dsdt_line(" 0x0001, // Length"); 1783 dsdt_line(" ,, )"); 1784 1785 #ifdef __amd64__ 1786 if (bus == 0) { 1787 dsdt_indent(3); 1788 dsdt_fixed_ioport(0xCF8, 8); 1789 dsdt_unindent(3); 1790 1791 dsdt_line(" WordIO (ResourceProducer, MinFixed, MaxFixed, " 1792 "PosDecode, EntireRange,"); 1793 dsdt_line(" 0x0000, // Granularity"); 1794 dsdt_line(" 0x0000, // Range Minimum"); 1795 dsdt_line(" 0x0CF7, // Range Maximum"); 1796 dsdt_line(" 0x0000, // Translation Offset"); 1797 dsdt_line(" 0x0CF8, // Length"); 1798 dsdt_line(" ,, , TypeStatic)"); 1799 1800 dsdt_line(" WordIO (ResourceProducer, MinFixed, MaxFixed, " 1801 "PosDecode, EntireRange,"); 1802 dsdt_line(" 0x0000, // Granularity"); 1803 dsdt_line(" 0x0D00, // Range Minimum"); 1804 dsdt_line(" 0x%04X, // Range Maximum", 1805 PCI_EMUL_IOBASE - 1); 1806 dsdt_line(" 0x0000, // Translation Offset"); 1807 dsdt_line(" 0x%04X, // Length", 1808 PCI_EMUL_IOBASE - 0x0D00); 1809 dsdt_line(" ,, , TypeStatic)"); 1810 1811 if (bi == NULL) { 1812 dsdt_line(" })"); 1813 goto done; 1814 } 1815 } 1816 #endif 1817 assert(bi != NULL); 1818 1819 /* i/o window */ 1820 dsdt_line(" WordIO (ResourceProducer, MinFixed, MaxFixed, " 1821 "PosDecode, EntireRange,"); 1822 dsdt_line(" 0x0000, // Granularity"); 1823 dsdt_line(" 0x%04X, // Range Minimum", bi->iobase); 1824 dsdt_line(" 0x%04X, // Range Maximum", 1825 bi->iolimit - 1); 1826 dsdt_line(" 0x0000, // Translation Offset"); 1827 dsdt_line(" 0x%04X, // Length", 1828 bi->iolimit - bi->iobase); 1829 dsdt_line(" ,, , TypeStatic)"); 1830 1831 /* mmio window (32-bit) */ 1832 dsdt_line(" DWordMemory (ResourceProducer, PosDecode, " 1833 "MinFixed, MaxFixed, NonCacheable, ReadWrite,"); 1834 dsdt_line(" 0x00000000, // Granularity"); 1835 dsdt_line(" 0x%08X, // Range Minimum\n", bi->membase32); 1836 dsdt_line(" 0x%08X, // Range Maximum\n", 1837 bi->memlimit32 - 1); 1838 dsdt_line(" 0x00000000, // Translation Offset"); 1839 dsdt_line(" 0x%08X, // Length\n", 1840 bi->memlimit32 - bi->membase32); 1841 dsdt_line(" ,, , AddressRangeMemory, TypeStatic)"); 1842 1843 /* mmio window (64-bit) */ 1844 dsdt_line(" QWordMemory (ResourceProducer, PosDecode, " 1845 "MinFixed, MaxFixed, NonCacheable, ReadWrite,"); 1846 dsdt_line(" 0x0000000000000000, // Granularity"); 1847 dsdt_line(" 0x%016lX, // Range Minimum\n", bi->membase64); 1848 dsdt_line(" 0x%016lX, // Range Maximum\n", 1849 bi->memlimit64 - 1); 1850 dsdt_line(" 0x0000000000000000, // Translation Offset"); 1851 dsdt_line(" 0x%016lX, // Length\n", 1852 bi->memlimit64 - bi->membase64); 1853 dsdt_line(" ,, , AddressRangeMemory, TypeStatic)"); 1854 dsdt_line(" })"); 1855 1856 #ifdef __amd64__ 1857 if (pci_count_lintr(bus) != 0) { 1858 dsdt_indent(2); 1859 dsdt_line("Name (PPRT, Package ()"); 1860 dsdt_line("{"); 1861 pci_walk_lintr(bus, pci_pirq_prt_entry, NULL); 1862 dsdt_line("})"); 1863 dsdt_line("Name (APRT, Package ()"); 1864 dsdt_line("{"); 1865 pci_walk_lintr(bus, pci_apic_prt_entry, NULL); 1866 dsdt_line("})"); 1867 dsdt_line("Method (_PRT, 0, NotSerialized)"); 1868 dsdt_line("{"); 1869 dsdt_line(" If (PICM)"); 1870 dsdt_line(" {"); 1871 dsdt_line(" Return (APRT)"); 1872 dsdt_line(" }"); 1873 dsdt_line(" Else"); 1874 dsdt_line(" {"); 1875 dsdt_line(" Return (PPRT)"); 1876 dsdt_line(" }"); 1877 dsdt_line("}"); 1878 dsdt_unindent(2); 1879 } 1880 #endif 1881 1882 dsdt_indent(2); 1883 for (slot = 0; slot < MAXSLOTS; slot++) { 1884 si = &bi->slotinfo[slot]; 1885 for (func = 0; func < MAXFUNCS; func++) { 1886 pi = si->si_funcs[func].fi_devi; 1887 if (pi != NULL && pi->pi_d->pe_write_dsdt != NULL) 1888 pi->pi_d->pe_write_dsdt(pi); 1889 } 1890 } 1891 dsdt_unindent(2); 1892 #ifdef __amd64__ 1893 done: 1894 #endif 1895 dsdt_line(" }"); 1896 } 1897 1898 void 1899 pci_write_dsdt(void) 1900 { 1901 int bus; 1902 1903 dsdt_indent(1); 1904 dsdt_line("Name (PICM, 0x00)"); 1905 dsdt_line("Method (_PIC, 1, NotSerialized)"); 1906 dsdt_line("{"); 1907 dsdt_line(" Store (Arg0, PICM)"); 1908 dsdt_line("}"); 1909 dsdt_line(""); 1910 dsdt_line("Scope (_SB)"); 1911 dsdt_line("{"); 1912 for (bus = 0; bus < MAXBUSES; bus++) 1913 pci_bus_write_dsdt(bus); 1914 dsdt_line("}"); 1915 dsdt_unindent(1); 1916 } 1917 1918 int 1919 pci_bus_configured(int bus) 1920 { 1921 assert(bus >= 0 && bus < MAXBUSES); 1922 return (pci_businfo[bus] != NULL); 1923 } 1924 1925 int 1926 pci_msi_enabled(struct pci_devinst *pi) 1927 { 1928 return (pi->pi_msi.enabled); 1929 } 1930 1931 int 1932 pci_msi_maxmsgnum(struct pci_devinst *pi) 1933 { 1934 if (pi->pi_msi.enabled) 1935 return (pi->pi_msi.maxmsgnum); 1936 else 1937 return (0); 1938 } 1939 1940 int 1941 pci_msix_enabled(struct pci_devinst *pi) 1942 { 1943 1944 return (pi->pi_msix.enabled && !pi->pi_msi.enabled); 1945 } 1946 1947 void 1948 pci_generate_msix(struct pci_devinst *pi, int index) 1949 { 1950 struct msix_table_entry *mte; 1951 1952 if (!pci_msix_enabled(pi)) 1953 return; 1954 1955 if (pi->pi_msix.function_mask) 1956 return; 1957 1958 if (index >= pi->pi_msix.table_count) 1959 return; 1960 1961 mte = &pi->pi_msix.table[index]; 1962 if ((mte->vector_control & PCIM_MSIX_VCTRL_MASK) == 0) { 1963 /* XXX Set PBA bit if interrupt is disabled */ 1964 vm_raise_msi(pi->pi_vmctx, mte->addr, mte->msg_data, 1965 pi->pi_bus, pi->pi_slot, pi->pi_func); 1966 } 1967 } 1968 1969 void 1970 pci_generate_msi(struct pci_devinst *pi, int index) 1971 { 1972 1973 if (pci_msi_enabled(pi) && index < pci_msi_maxmsgnum(pi)) { 1974 vm_raise_msi(pi->pi_vmctx, pi->pi_msi.addr, 1975 pi->pi_msi.msg_data + index, 1976 pi->pi_bus, pi->pi_slot, pi->pi_func); 1977 } 1978 } 1979 1980 static bool 1981 pci_lintr_permitted(struct pci_devinst *pi) 1982 { 1983 uint16_t cmd; 1984 1985 cmd = pci_get_cfgdata16(pi, PCIR_COMMAND); 1986 return (!(pi->pi_msi.enabled || pi->pi_msix.enabled || 1987 (cmd & PCIM_CMD_INTxDIS))); 1988 } 1989 1990 void 1991 pci_lintr_request(struct pci_devinst *pi) 1992 { 1993 struct businfo *bi; 1994 struct slotinfo *si; 1995 int bestpin, bestcount, pin; 1996 1997 bi = pci_businfo[pi->pi_bus]; 1998 assert(bi != NULL); 1999 2000 /* 2001 * Just allocate a pin from our slot. The pin will be 2002 * assigned IRQs later when interrupts are routed. 2003 */ 2004 si = &bi->slotinfo[pi->pi_slot]; 2005 bestpin = 0; 2006 bestcount = si->si_intpins[0].ii_count; 2007 for (pin = 1; pin < 4; pin++) { 2008 if (si->si_intpins[pin].ii_count < bestcount) { 2009 bestpin = pin; 2010 bestcount = si->si_intpins[pin].ii_count; 2011 } 2012 } 2013 2014 si->si_intpins[bestpin].ii_count++; 2015 pi->pi_lintr.pin = bestpin + 1; 2016 pci_set_cfgdata8(pi, PCIR_INTPIN, bestpin + 1); 2017 } 2018 2019 static void 2020 pci_lintr_route(struct pci_devinst *pi) 2021 { 2022 struct businfo *bi; 2023 struct intxinfo *ii; 2024 struct pci_irq *irq; 2025 2026 if (pi->pi_lintr.pin == 0) 2027 return; 2028 2029 bi = pci_businfo[pi->pi_bus]; 2030 assert(bi != NULL); 2031 ii = &bi->slotinfo[pi->pi_slot].si_intpins[pi->pi_lintr.pin - 1]; 2032 irq = &ii->ii_irq; 2033 pci_irq_route(pi, irq); 2034 pi->pi_lintr.irq = *irq; 2035 pci_set_cfgdata8(pi, PCIR_INTLINE, pci_irq_intline(irq)); 2036 } 2037 2038 void 2039 pci_lintr_assert(struct pci_devinst *pi) 2040 { 2041 2042 assert(pi->pi_lintr.pin > 0); 2043 2044 pthread_mutex_lock(&pi->pi_lintr.lock); 2045 if (pi->pi_lintr.state == IDLE) { 2046 if (pci_lintr_permitted(pi)) { 2047 pi->pi_lintr.state = ASSERTED; 2048 pci_irq_assert(pi); 2049 } else 2050 pi->pi_lintr.state = PENDING; 2051 } 2052 pthread_mutex_unlock(&pi->pi_lintr.lock); 2053 } 2054 2055 void 2056 pci_lintr_deassert(struct pci_devinst *pi) 2057 { 2058 2059 assert(pi->pi_lintr.pin > 0); 2060 2061 pthread_mutex_lock(&pi->pi_lintr.lock); 2062 if (pi->pi_lintr.state == ASSERTED) { 2063 pi->pi_lintr.state = IDLE; 2064 pci_irq_deassert(pi); 2065 } else if (pi->pi_lintr.state == PENDING) 2066 pi->pi_lintr.state = IDLE; 2067 pthread_mutex_unlock(&pi->pi_lintr.lock); 2068 } 2069 2070 static void 2071 pci_lintr_update(struct pci_devinst *pi) 2072 { 2073 2074 pthread_mutex_lock(&pi->pi_lintr.lock); 2075 if (pi->pi_lintr.state == ASSERTED && !pci_lintr_permitted(pi)) { 2076 pci_irq_deassert(pi); 2077 pi->pi_lintr.state = PENDING; 2078 } else if (pi->pi_lintr.state == PENDING && pci_lintr_permitted(pi)) { 2079 pi->pi_lintr.state = ASSERTED; 2080 pci_irq_assert(pi); 2081 } 2082 pthread_mutex_unlock(&pi->pi_lintr.lock); 2083 } 2084 2085 int 2086 pci_count_lintr(int bus) 2087 { 2088 int count, slot, pin; 2089 struct slotinfo *slotinfo; 2090 2091 count = 0; 2092 if (pci_businfo[bus] != NULL) { 2093 for (slot = 0; slot < MAXSLOTS; slot++) { 2094 slotinfo = &pci_businfo[bus]->slotinfo[slot]; 2095 for (pin = 0; pin < 4; pin++) { 2096 if (slotinfo->si_intpins[pin].ii_count != 0) 2097 count++; 2098 } 2099 } 2100 } 2101 return (count); 2102 } 2103 2104 void 2105 pci_walk_lintr(int bus, pci_lintr_cb cb, void *arg) 2106 { 2107 struct businfo *bi; 2108 struct slotinfo *si; 2109 struct intxinfo *ii; 2110 int slot, pin; 2111 2112 if ((bi = pci_businfo[bus]) == NULL) 2113 return; 2114 2115 for (slot = 0; slot < MAXSLOTS; slot++) { 2116 si = &bi->slotinfo[slot]; 2117 for (pin = 0; pin < 4; pin++) { 2118 ii = &si->si_intpins[pin]; 2119 if (ii->ii_count != 0) 2120 cb(bus, slot, pin + 1, &ii->ii_irq, arg); 2121 } 2122 } 2123 } 2124 2125 /* 2126 * Return 1 if the emulated device in 'slot' is a multi-function device. 2127 * Return 0 otherwise. 2128 */ 2129 static int 2130 pci_emul_is_mfdev(int bus, int slot) 2131 { 2132 struct businfo *bi; 2133 struct slotinfo *si; 2134 int f, numfuncs; 2135 2136 numfuncs = 0; 2137 if ((bi = pci_businfo[bus]) != NULL) { 2138 si = &bi->slotinfo[slot]; 2139 for (f = 0; f < MAXFUNCS; f++) { 2140 if (si->si_funcs[f].fi_devi != NULL) { 2141 numfuncs++; 2142 } 2143 } 2144 } 2145 return (numfuncs > 1); 2146 } 2147 2148 /* 2149 * Ensure that the PCIM_MFDEV bit is properly set (or unset) depending on 2150 * whether or not is a multi-function being emulated in the pci 'slot'. 2151 */ 2152 static void 2153 pci_emul_hdrtype_fixup(int bus, int slot, int off, int bytes, uint32_t *rv) 2154 { 2155 int mfdev; 2156 2157 if (off <= PCIR_HDRTYPE && off + bytes > PCIR_HDRTYPE) { 2158 mfdev = pci_emul_is_mfdev(bus, slot); 2159 switch (bytes) { 2160 case 1: 2161 case 2: 2162 *rv &= ~PCIM_MFDEV; 2163 if (mfdev) { 2164 *rv |= PCIM_MFDEV; 2165 } 2166 break; 2167 case 4: 2168 *rv &= ~(PCIM_MFDEV << 16); 2169 if (mfdev) { 2170 *rv |= (PCIM_MFDEV << 16); 2171 } 2172 break; 2173 } 2174 } 2175 } 2176 2177 /* 2178 * Update device state in response to changes to the PCI command 2179 * register. 2180 */ 2181 void 2182 pci_emul_cmd_changed(struct pci_devinst *pi, uint16_t old) 2183 { 2184 int i; 2185 uint16_t changed, new; 2186 2187 new = pci_get_cfgdata16(pi, PCIR_COMMAND); 2188 changed = old ^ new; 2189 2190 /* 2191 * If the MMIO or I/O address space decoding has changed then 2192 * register/unregister all BARs that decode that address space. 2193 */ 2194 for (i = 0; i <= PCI_BARMAX_WITH_ROM; i++) { 2195 switch (pi->pi_bar[i].type) { 2196 case PCIBAR_NONE: 2197 case PCIBAR_MEMHI64: 2198 break; 2199 case PCIBAR_IO: 2200 /* I/O address space decoding changed? */ 2201 if (changed & PCIM_CMD_PORTEN) { 2202 if (new & PCIM_CMD_PORTEN) 2203 register_bar(pi, i); 2204 else 2205 unregister_bar(pi, i); 2206 } 2207 break; 2208 case PCIBAR_ROM: 2209 /* skip (un-)register of ROM if it disabled */ 2210 if (!romen(pi)) 2211 break; 2212 /* fallthrough */ 2213 case PCIBAR_MEM32: 2214 case PCIBAR_MEM64: 2215 /* MMIO address space decoding changed? */ 2216 if (changed & PCIM_CMD_MEMEN) { 2217 if (new & PCIM_CMD_MEMEN) 2218 register_bar(pi, i); 2219 else 2220 unregister_bar(pi, i); 2221 } 2222 break; 2223 default: 2224 assert(0); 2225 } 2226 } 2227 2228 /* 2229 * If INTx has been unmasked and is pending, assert the 2230 * interrupt. 2231 */ 2232 pci_lintr_update(pi); 2233 } 2234 2235 static void 2236 pci_emul_cmdsts_write(struct pci_devinst *pi, int coff, uint32_t new, int bytes) 2237 { 2238 int rshift; 2239 uint32_t cmd, old, readonly; 2240 2241 cmd = pci_get_cfgdata16(pi, PCIR_COMMAND); /* stash old value */ 2242 2243 /* 2244 * From PCI Local Bus Specification 3.0 sections 6.2.2 and 6.2.3. 2245 * 2246 * XXX Bits 8, 11, 12, 13, 14 and 15 in the status register are 2247 * 'write 1 to clear'. However these bits are not set to '1' by 2248 * any device emulation so it is simpler to treat them as readonly. 2249 */ 2250 rshift = (coff & 0x3) * 8; 2251 readonly = 0xFFFFF880 >> rshift; 2252 2253 old = CFGREAD(pi, coff, bytes); 2254 new &= ~readonly; 2255 new |= (old & readonly); 2256 CFGWRITE(pi, coff, new, bytes); /* update config */ 2257 2258 pci_emul_cmd_changed(pi, cmd); 2259 } 2260 2261 static void 2262 pci_cfgrw(int in, int bus, int slot, int func, int coff, int bytes, 2263 uint32_t *valp) 2264 { 2265 struct businfo *bi; 2266 struct slotinfo *si; 2267 struct pci_devinst *pi; 2268 struct pci_devemu *pe; 2269 int idx, needcfg; 2270 uint64_t addr, bar, mask; 2271 2272 if ((bi = pci_businfo[bus]) != NULL) { 2273 si = &bi->slotinfo[slot]; 2274 pi = si->si_funcs[func].fi_devi; 2275 } else 2276 pi = NULL; 2277 2278 /* 2279 * Just return if there is no device at this slot:func or if the 2280 * guest is doing an un-aligned access. 2281 */ 2282 if (pi == NULL || (bytes != 1 && bytes != 2 && bytes != 4) || 2283 (coff & (bytes - 1)) != 0) { 2284 if (in) 2285 *valp = 0xffffffff; 2286 return; 2287 } 2288 2289 /* 2290 * Ignore all writes beyond the standard config space and return all 2291 * ones on reads. 2292 */ 2293 if (coff >= PCI_REGMAX + 1) { 2294 if (in) { 2295 *valp = 0xffffffff; 2296 /* 2297 * Extended capabilities begin at offset 256 in config 2298 * space. Absence of extended capabilities is signaled 2299 * with all 0s in the extended capability header at 2300 * offset 256. 2301 */ 2302 if (coff <= PCI_REGMAX + 4) 2303 *valp = 0x00000000; 2304 } 2305 return; 2306 } 2307 2308 pe = pi->pi_d; 2309 2310 /* 2311 * Config read 2312 */ 2313 if (in) { 2314 /* Let the device emulation override the default handler */ 2315 if (pe->pe_cfgread != NULL) { 2316 needcfg = pe->pe_cfgread(pi, coff, bytes, valp); 2317 } else { 2318 needcfg = 1; 2319 } 2320 2321 if (needcfg) 2322 *valp = CFGREAD(pi, coff, bytes); 2323 2324 pci_emul_hdrtype_fixup(bus, slot, coff, bytes, valp); 2325 } else { 2326 /* Let the device emulation override the default handler */ 2327 if (pe->pe_cfgwrite != NULL && 2328 (*pe->pe_cfgwrite)(pi, coff, bytes, *valp) == 0) 2329 return; 2330 2331 /* 2332 * Special handling for write to BAR and ROM registers 2333 */ 2334 if (is_pcir_bar(coff) || is_pcir_bios(coff)) { 2335 /* 2336 * Ignore writes to BAR registers that are not 2337 * 4-byte aligned. 2338 */ 2339 if (bytes != 4 || (coff & 0x3) != 0) 2340 return; 2341 2342 if (is_pcir_bar(coff)) { 2343 idx = (coff - PCIR_BAR(0)) / 4; 2344 } else if (is_pcir_bios(coff)) { 2345 idx = PCI_ROM_IDX; 2346 } else { 2347 errx(4, "%s: invalid BAR offset %d", __func__, 2348 coff); 2349 } 2350 2351 mask = ~(pi->pi_bar[idx].size - 1); 2352 switch (pi->pi_bar[idx].type) { 2353 case PCIBAR_NONE: 2354 pi->pi_bar[idx].addr = bar = 0; 2355 break; 2356 case PCIBAR_IO: 2357 addr = *valp & mask; 2358 #if defined(PCI_EMUL_IOMASK) 2359 addr &= PCI_EMUL_IOMASK; 2360 #endif 2361 bar = addr | pi->pi_bar[idx].lobits; 2362 /* 2363 * Register the new BAR value for interception 2364 */ 2365 if (addr != pi->pi_bar[idx].addr) { 2366 update_bar_address(pi, addr, idx, 2367 PCIBAR_IO); 2368 } 2369 break; 2370 case PCIBAR_MEM32: 2371 addr = bar = *valp & mask; 2372 bar |= pi->pi_bar[idx].lobits; 2373 if (addr != pi->pi_bar[idx].addr) { 2374 update_bar_address(pi, addr, idx, 2375 PCIBAR_MEM32); 2376 } 2377 break; 2378 case PCIBAR_MEM64: 2379 addr = bar = *valp & mask; 2380 bar |= pi->pi_bar[idx].lobits; 2381 if (addr != (uint32_t)pi->pi_bar[idx].addr) { 2382 update_bar_address(pi, addr, idx, 2383 PCIBAR_MEM64); 2384 } 2385 break; 2386 case PCIBAR_MEMHI64: 2387 mask = ~(pi->pi_bar[idx - 1].size - 1); 2388 addr = ((uint64_t)*valp << 32) & mask; 2389 bar = addr >> 32; 2390 if (bar != pi->pi_bar[idx - 1].addr >> 32) { 2391 update_bar_address(pi, addr, idx - 1, 2392 PCIBAR_MEMHI64); 2393 } 2394 break; 2395 case PCIBAR_ROM: 2396 addr = bar = *valp & mask; 2397 if (memen(pi) && romen(pi)) { 2398 unregister_bar(pi, idx); 2399 } 2400 pi->pi_bar[idx].addr = addr; 2401 pi->pi_bar[idx].lobits = *valp & 2402 PCIM_BIOS_ENABLE; 2403 /* romen could have changed it value */ 2404 if (memen(pi) && romen(pi)) { 2405 register_bar(pi, idx); 2406 } 2407 bar |= pi->pi_bar[idx].lobits; 2408 break; 2409 default: 2410 assert(0); 2411 } 2412 pci_set_cfgdata32(pi, coff, bar); 2413 2414 } else if (pci_emul_iscap(pi, coff)) { 2415 pci_emul_capwrite(pi, coff, bytes, *valp, 0, 0); 2416 } else if (coff >= PCIR_COMMAND && coff < PCIR_REVID) { 2417 pci_emul_cmdsts_write(pi, coff, *valp, bytes); 2418 } else { 2419 CFGWRITE(pi, coff, *valp, bytes); 2420 } 2421 } 2422 } 2423 2424 #ifdef __amd64__ 2425 static int cfgenable, cfgbus, cfgslot, cfgfunc, cfgoff; 2426 2427 static int 2428 pci_emul_cfgaddr(struct vmctx *ctx __unused, int in, 2429 int port __unused, int bytes, uint32_t *eax, void *arg __unused) 2430 { 2431 uint32_t x; 2432 2433 if (bytes != 4) { 2434 if (in) 2435 *eax = (bytes == 2) ? 0xffff : 0xff; 2436 return (0); 2437 } 2438 2439 if (in) { 2440 x = (cfgbus << 16) | (cfgslot << 11) | (cfgfunc << 8) | cfgoff; 2441 if (cfgenable) 2442 x |= CONF1_ENABLE; 2443 *eax = x; 2444 } else { 2445 x = *eax; 2446 cfgenable = (x & CONF1_ENABLE) == CONF1_ENABLE; 2447 cfgoff = (x & PCI_REGMAX) & ~0x03; 2448 cfgfunc = (x >> 8) & PCI_FUNCMAX; 2449 cfgslot = (x >> 11) & PCI_SLOTMAX; 2450 cfgbus = (x >> 16) & PCI_BUSMAX; 2451 } 2452 2453 return (0); 2454 } 2455 INOUT_PORT(pci_cfgaddr, CONF1_ADDR_PORT, IOPORT_F_INOUT, pci_emul_cfgaddr); 2456 2457 static int 2458 pci_emul_cfgdata(struct vmctx *ctx __unused, int in, int port, 2459 int bytes, uint32_t *eax, void *arg __unused) 2460 { 2461 int coff; 2462 2463 assert(bytes == 1 || bytes == 2 || bytes == 4); 2464 2465 coff = cfgoff + (port - CONF1_DATA_PORT); 2466 if (cfgenable) { 2467 pci_cfgrw(in, cfgbus, cfgslot, cfgfunc, coff, bytes, eax); 2468 } else { 2469 /* Ignore accesses to cfgdata if not enabled by cfgaddr */ 2470 if (in) 2471 *eax = 0xffffffff; 2472 } 2473 return (0); 2474 } 2475 2476 INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+0, IOPORT_F_INOUT, pci_emul_cfgdata); 2477 INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+1, IOPORT_F_INOUT, pci_emul_cfgdata); 2478 INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+2, IOPORT_F_INOUT, pci_emul_cfgdata); 2479 INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+3, IOPORT_F_INOUT, pci_emul_cfgdata); 2480 #endif 2481 2482 #ifdef BHYVE_SNAPSHOT 2483 /* 2484 * Saves/restores PCI device emulated state. Returns 0 on success. 2485 */ 2486 static int 2487 pci_snapshot_pci_dev(struct vm_snapshot_meta *meta) 2488 { 2489 struct pci_devinst *pi; 2490 int i; 2491 int ret; 2492 2493 pi = meta->dev_data; 2494 2495 SNAPSHOT_VAR_OR_LEAVE(pi->pi_msi.enabled, meta, ret, done); 2496 SNAPSHOT_VAR_OR_LEAVE(pi->pi_msi.addr, meta, ret, done); 2497 SNAPSHOT_VAR_OR_LEAVE(pi->pi_msi.msg_data, meta, ret, done); 2498 SNAPSHOT_VAR_OR_LEAVE(pi->pi_msi.maxmsgnum, meta, ret, done); 2499 2500 SNAPSHOT_VAR_OR_LEAVE(pi->pi_msix.enabled, meta, ret, done); 2501 SNAPSHOT_VAR_OR_LEAVE(pi->pi_msix.table_bar, meta, ret, done); 2502 SNAPSHOT_VAR_OR_LEAVE(pi->pi_msix.pba_bar, meta, ret, done); 2503 SNAPSHOT_VAR_OR_LEAVE(pi->pi_msix.table_offset, meta, ret, done); 2504 SNAPSHOT_VAR_OR_LEAVE(pi->pi_msix.table_count, meta, ret, done); 2505 SNAPSHOT_VAR_OR_LEAVE(pi->pi_msix.pba_offset, meta, ret, done); 2506 SNAPSHOT_VAR_OR_LEAVE(pi->pi_msix.pba_size, meta, ret, done); 2507 SNAPSHOT_VAR_OR_LEAVE(pi->pi_msix.function_mask, meta, ret, done); 2508 2509 SNAPSHOT_BUF_OR_LEAVE(pi->pi_cfgdata, sizeof(pi->pi_cfgdata), 2510 meta, ret, done); 2511 2512 for (i = 0; i < (int)nitems(pi->pi_bar); i++) { 2513 SNAPSHOT_VAR_OR_LEAVE(pi->pi_bar[i].type, meta, ret, done); 2514 SNAPSHOT_VAR_OR_LEAVE(pi->pi_bar[i].size, meta, ret, done); 2515 SNAPSHOT_VAR_OR_LEAVE(pi->pi_bar[i].addr, meta, ret, done); 2516 } 2517 2518 /* Restore MSI-X table. */ 2519 for (i = 0; i < pi->pi_msix.table_count; i++) { 2520 SNAPSHOT_VAR_OR_LEAVE(pi->pi_msix.table[i].addr, 2521 meta, ret, done); 2522 SNAPSHOT_VAR_OR_LEAVE(pi->pi_msix.table[i].msg_data, 2523 meta, ret, done); 2524 SNAPSHOT_VAR_OR_LEAVE(pi->pi_msix.table[i].vector_control, 2525 meta, ret, done); 2526 } 2527 2528 done: 2529 return (ret); 2530 } 2531 2532 int 2533 pci_snapshot(struct vm_snapshot_meta *meta) 2534 { 2535 struct pci_devemu *pde; 2536 struct pci_devinst *pdi; 2537 int ret; 2538 2539 assert(meta->dev_name != NULL); 2540 2541 pdi = meta->dev_data; 2542 pde = pdi->pi_d; 2543 2544 if (pde->pe_snapshot == NULL) 2545 return (ENOTSUP); 2546 2547 ret = pci_snapshot_pci_dev(meta); 2548 if (ret == 0) 2549 ret = (*pde->pe_snapshot)(meta); 2550 2551 return (ret); 2552 } 2553 2554 int 2555 pci_pause(struct pci_devinst *pdi) 2556 { 2557 struct pci_devemu *pde = pdi->pi_d; 2558 2559 if (pde->pe_pause == NULL) { 2560 /* The pause/resume functionality is optional. */ 2561 return (0); 2562 } 2563 2564 return (*pde->pe_pause)(pdi); 2565 } 2566 2567 int 2568 pci_resume(struct pci_devinst *pdi) 2569 { 2570 struct pci_devemu *pde = pdi->pi_d; 2571 2572 if (pde->pe_resume == NULL) { 2573 /* The pause/resume functionality is optional. */ 2574 return (0); 2575 } 2576 2577 return (*pde->pe_resume)(pdi); 2578 } 2579 #endif 2580 2581 #define PCI_EMUL_TEST 2582 #ifdef PCI_EMUL_TEST 2583 /* 2584 * Define a dummy test device 2585 */ 2586 #define DIOSZ 8 2587 #define DMEMSZ 4096 2588 struct pci_emul_dsoftc { 2589 uint8_t ioregs[DIOSZ]; 2590 uint8_t memregs[2][DMEMSZ]; 2591 }; 2592 2593 #define PCI_EMUL_MSI_MSGS 4 2594 #define PCI_EMUL_MSIX_MSGS 16 2595 2596 static int 2597 pci_emul_dinit(struct pci_devinst *pi, nvlist_t *nvl __unused) 2598 { 2599 int error; 2600 struct pci_emul_dsoftc *sc; 2601 2602 sc = calloc(1, sizeof(struct pci_emul_dsoftc)); 2603 2604 pi->pi_arg = sc; 2605 2606 pci_set_cfgdata16(pi, PCIR_DEVICE, 0x0001); 2607 pci_set_cfgdata16(pi, PCIR_VENDOR, 0x10DD); 2608 pci_set_cfgdata8(pi, PCIR_CLASS, 0x02); 2609 2610 error = pci_emul_add_msicap(pi, PCI_EMUL_MSI_MSGS); 2611 assert(error == 0); 2612 2613 error = pci_emul_alloc_bar(pi, 0, PCIBAR_IO, DIOSZ); 2614 assert(error == 0); 2615 2616 error = pci_emul_alloc_bar(pi, 1, PCIBAR_MEM32, DMEMSZ); 2617 assert(error == 0); 2618 2619 error = pci_emul_alloc_bar(pi, 2, PCIBAR_MEM32, DMEMSZ); 2620 assert(error == 0); 2621 2622 return (0); 2623 } 2624 2625 static void 2626 pci_emul_diow(struct pci_devinst *pi, int baridx, uint64_t offset, int size, 2627 uint64_t value) 2628 { 2629 int i; 2630 struct pci_emul_dsoftc *sc = pi->pi_arg; 2631 2632 if (baridx == 0) { 2633 if (offset + size > DIOSZ) { 2634 printf("diow: iow too large, offset %ld size %d\n", 2635 offset, size); 2636 return; 2637 } 2638 2639 if (size == 1) { 2640 sc->ioregs[offset] = value & 0xff; 2641 } else if (size == 2) { 2642 *(uint16_t *)&sc->ioregs[offset] = value & 0xffff; 2643 } else if (size == 4) { 2644 *(uint32_t *)&sc->ioregs[offset] = value; 2645 } else { 2646 printf("diow: iow unknown size %d\n", size); 2647 } 2648 2649 /* 2650 * Special magic value to generate an interrupt 2651 */ 2652 if (offset == 4 && size == 4 && pci_msi_enabled(pi)) 2653 pci_generate_msi(pi, value % pci_msi_maxmsgnum(pi)); 2654 2655 if (value == 0xabcdef) { 2656 for (i = 0; i < pci_msi_maxmsgnum(pi); i++) 2657 pci_generate_msi(pi, i); 2658 } 2659 } 2660 2661 if (baridx == 1 || baridx == 2) { 2662 if (offset + size > DMEMSZ) { 2663 printf("diow: memw too large, offset %ld size %d\n", 2664 offset, size); 2665 return; 2666 } 2667 2668 i = baridx - 1; /* 'memregs' index */ 2669 2670 if (size == 1) { 2671 sc->memregs[i][offset] = value; 2672 } else if (size == 2) { 2673 *(uint16_t *)&sc->memregs[i][offset] = value; 2674 } else if (size == 4) { 2675 *(uint32_t *)&sc->memregs[i][offset] = value; 2676 } else if (size == 8) { 2677 *(uint64_t *)&sc->memregs[i][offset] = value; 2678 } else { 2679 printf("diow: memw unknown size %d\n", size); 2680 } 2681 2682 /* 2683 * magic interrupt ?? 2684 */ 2685 } 2686 2687 if (baridx > 2 || baridx < 0) { 2688 printf("diow: unknown bar idx %d\n", baridx); 2689 } 2690 } 2691 2692 static uint64_t 2693 pci_emul_dior(struct pci_devinst *pi, int baridx, uint64_t offset, int size) 2694 { 2695 struct pci_emul_dsoftc *sc = pi->pi_arg; 2696 uint32_t value; 2697 int i; 2698 2699 if (baridx == 0) { 2700 if (offset + size > DIOSZ) { 2701 printf("dior: ior too large, offset %ld size %d\n", 2702 offset, size); 2703 return (0); 2704 } 2705 2706 value = 0; 2707 if (size == 1) { 2708 value = sc->ioregs[offset]; 2709 } else if (size == 2) { 2710 value = *(uint16_t *) &sc->ioregs[offset]; 2711 } else if (size == 4) { 2712 value = *(uint32_t *) &sc->ioregs[offset]; 2713 } else { 2714 printf("dior: ior unknown size %d\n", size); 2715 } 2716 } 2717 2718 if (baridx == 1 || baridx == 2) { 2719 if (offset + size > DMEMSZ) { 2720 printf("dior: memr too large, offset %ld size %d\n", 2721 offset, size); 2722 return (0); 2723 } 2724 2725 i = baridx - 1; /* 'memregs' index */ 2726 2727 if (size == 1) { 2728 value = sc->memregs[i][offset]; 2729 } else if (size == 2) { 2730 value = *(uint16_t *) &sc->memregs[i][offset]; 2731 } else if (size == 4) { 2732 value = *(uint32_t *) &sc->memregs[i][offset]; 2733 } else if (size == 8) { 2734 value = *(uint64_t *) &sc->memregs[i][offset]; 2735 } else { 2736 printf("dior: ior unknown size %d\n", size); 2737 } 2738 } 2739 2740 2741 if (baridx > 2 || baridx < 0) { 2742 printf("dior: unknown bar idx %d\n", baridx); 2743 return (0); 2744 } 2745 2746 return (value); 2747 } 2748 2749 #ifdef BHYVE_SNAPSHOT 2750 struct pci_devinst * 2751 pci_next(const struct pci_devinst *cursor) 2752 { 2753 unsigned bus = 0, slot = 0, func = 0; 2754 struct businfo *bi; 2755 struct slotinfo *si; 2756 struct funcinfo *fi; 2757 2758 bus = cursor ? cursor->pi_bus : 0; 2759 slot = cursor ? cursor->pi_slot : 0; 2760 func = cursor ? (cursor->pi_func + 1) : 0; 2761 2762 for (; bus < MAXBUSES; bus++) { 2763 if ((bi = pci_businfo[bus]) == NULL) 2764 continue; 2765 2766 if (slot >= MAXSLOTS) 2767 slot = 0; 2768 2769 for (; slot < MAXSLOTS; slot++) { 2770 si = &bi->slotinfo[slot]; 2771 if (func >= MAXFUNCS) 2772 func = 0; 2773 for (; func < MAXFUNCS; func++) { 2774 fi = &si->si_funcs[func]; 2775 if (fi->fi_devi == NULL) 2776 continue; 2777 2778 return (fi->fi_devi); 2779 } 2780 } 2781 } 2782 2783 return (NULL); 2784 } 2785 2786 static int 2787 pci_emul_snapshot(struct vm_snapshot_meta *meta __unused) 2788 { 2789 return (0); 2790 } 2791 #endif 2792 2793 static const struct pci_devemu pci_dummy = { 2794 .pe_emu = "dummy", 2795 .pe_init = pci_emul_dinit, 2796 .pe_barwrite = pci_emul_diow, 2797 .pe_barread = pci_emul_dior, 2798 #ifdef BHYVE_SNAPSHOT 2799 .pe_snapshot = pci_emul_snapshot, 2800 #endif 2801 }; 2802 PCI_EMUL_SET(pci_dummy); 2803 2804 #endif /* PCI_EMUL_TEST */ 2805