1 /*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/linker_set.h> 34 #include <sys/errno.h> 35 36 #include <ctype.h> 37 #include <pthread.h> 38 #include <stdio.h> 39 #include <stdlib.h> 40 #include <string.h> 41 #include <strings.h> 42 #include <assert.h> 43 #include <stdbool.h> 44 45 #include <machine/vmm.h> 46 #include <vmmapi.h> 47 48 #include "acpi.h" 49 #include "bhyverun.h" 50 #include "inout.h" 51 #include "ioapic.h" 52 #include "mem.h" 53 #include "pci_emul.h" 54 #include "pci_irq.h" 55 #include "pci_lpc.h" 56 57 #define CONF1_ADDR_PORT 0x0cf8 58 #define CONF1_DATA_PORT 0x0cfc 59 60 #define CONF1_ENABLE 0x80000000ul 61 62 #define CFGWRITE(pi,off,val,b) \ 63 do { \ 64 if ((b) == 1) { \ 65 pci_set_cfgdata8((pi),(off),(val)); \ 66 } else if ((b) == 2) { \ 67 pci_set_cfgdata16((pi),(off),(val)); \ 68 } else { \ 69 pci_set_cfgdata32((pi),(off),(val)); \ 70 } \ 71 } while (0) 72 73 #define MAXBUSES (PCI_BUSMAX + 1) 74 #define MAXSLOTS (PCI_SLOTMAX + 1) 75 #define MAXFUNCS (PCI_FUNCMAX + 1) 76 77 struct funcinfo { 78 char *fi_name; 79 char *fi_param; 80 struct pci_devinst *fi_devi; 81 }; 82 83 struct intxinfo { 84 int ii_count; 85 int ii_pirq_pin; 86 int ii_ioapic_irq; 87 }; 88 89 struct slotinfo { 90 struct intxinfo si_intpins[4]; 91 struct funcinfo si_funcs[MAXFUNCS]; 92 }; 93 94 struct businfo { 95 uint16_t iobase, iolimit; /* I/O window */ 96 uint32_t membase32, memlimit32; /* mmio window below 4GB */ 97 uint64_t membase64, memlimit64; /* mmio window above 4GB */ 98 struct slotinfo slotinfo[MAXSLOTS]; 99 }; 100 101 static struct businfo *pci_businfo[MAXBUSES]; 102 103 SET_DECLARE(pci_devemu_set, struct pci_devemu); 104 105 static uint64_t pci_emul_iobase; 106 static uint64_t pci_emul_membase32; 107 static uint64_t pci_emul_membase64; 108 109 #define PCI_EMUL_IOBASE 0x2000 110 #define PCI_EMUL_IOLIMIT 0x10000 111 112 #define PCI_EMUL_ECFG_BASE 0xE0000000 /* 3.5GB */ 113 #define PCI_EMUL_ECFG_SIZE (MAXBUSES * 1024 * 1024) /* 1MB per bus */ 114 SYSRES_MEM(PCI_EMUL_ECFG_BASE, PCI_EMUL_ECFG_SIZE); 115 116 #define PCI_EMUL_MEMLIMIT32 PCI_EMUL_ECFG_BASE 117 118 #define PCI_EMUL_MEMBASE64 0xD000000000UL 119 #define PCI_EMUL_MEMLIMIT64 0xFD00000000UL 120 121 static struct pci_devemu *pci_emul_finddev(char *name); 122 static void pci_lintr_route(struct pci_devinst *pi); 123 static void pci_lintr_update(struct pci_devinst *pi); 124 static void pci_cfgrw(struct vmctx *ctx, int vcpu, int in, int bus, int slot, 125 int func, int coff, int bytes, uint32_t *val); 126 127 /* 128 * I/O access 129 */ 130 131 /* 132 * Slot options are in the form: 133 * 134 * <bus>:<slot>:<func>,<emul>[,<config>] 135 * <slot>[:<func>],<emul>[,<config>] 136 * 137 * slot is 0..31 138 * func is 0..7 139 * emul is a string describing the type of PCI device e.g. virtio-net 140 * config is an optional string, depending on the device, that can be 141 * used for configuration. 142 * Examples are: 143 * 1,virtio-net,tap0 144 * 3:0,dummy 145 */ 146 static void 147 pci_parse_slot_usage(char *aopt) 148 { 149 150 fprintf(stderr, "Invalid PCI slot info field \"%s\"\n", aopt); 151 } 152 153 int 154 pci_parse_slot(char *opt) 155 { 156 struct businfo *bi; 157 struct slotinfo *si; 158 char *emul, *config, *str, *cp; 159 int error, bnum, snum, fnum; 160 161 error = -1; 162 str = strdup(opt); 163 164 emul = config = NULL; 165 if ((cp = strchr(str, ',')) != NULL) { 166 *cp = '\0'; 167 emul = cp + 1; 168 if ((cp = strchr(emul, ',')) != NULL) { 169 *cp = '\0'; 170 config = cp + 1; 171 } 172 } else { 173 pci_parse_slot_usage(opt); 174 goto done; 175 } 176 177 /* <bus>:<slot>:<func> */ 178 if (sscanf(str, "%d:%d:%d", &bnum, &snum, &fnum) != 3) { 179 bnum = 0; 180 /* <slot>:<func> */ 181 if (sscanf(str, "%d:%d", &snum, &fnum) != 2) { 182 fnum = 0; 183 /* <slot> */ 184 if (sscanf(str, "%d", &snum) != 1) { 185 snum = -1; 186 } 187 } 188 } 189 190 if (bnum < 0 || bnum >= MAXBUSES || snum < 0 || snum >= MAXSLOTS || 191 fnum < 0 || fnum >= MAXFUNCS) { 192 pci_parse_slot_usage(opt); 193 goto done; 194 } 195 196 if (pci_businfo[bnum] == NULL) 197 pci_businfo[bnum] = calloc(1, sizeof(struct businfo)); 198 199 bi = pci_businfo[bnum]; 200 si = &bi->slotinfo[snum]; 201 202 if (si->si_funcs[fnum].fi_name != NULL) { 203 fprintf(stderr, "pci slot %d:%d already occupied!\n", 204 snum, fnum); 205 goto done; 206 } 207 208 if (pci_emul_finddev(emul) == NULL) { 209 fprintf(stderr, "pci slot %d:%d: unknown device \"%s\"\n", 210 snum, fnum, emul); 211 goto done; 212 } 213 214 error = 0; 215 si->si_funcs[fnum].fi_name = emul; 216 si->si_funcs[fnum].fi_param = config; 217 218 done: 219 if (error) 220 free(str); 221 222 return (error); 223 } 224 225 static int 226 pci_valid_pba_offset(struct pci_devinst *pi, uint64_t offset) 227 { 228 229 if (offset < pi->pi_msix.pba_offset) 230 return (0); 231 232 if (offset >= pi->pi_msix.pba_offset + pi->pi_msix.pba_size) { 233 return (0); 234 } 235 236 return (1); 237 } 238 239 int 240 pci_emul_msix_twrite(struct pci_devinst *pi, uint64_t offset, int size, 241 uint64_t value) 242 { 243 int msix_entry_offset; 244 int tab_index; 245 char *dest; 246 247 /* support only 4 or 8 byte writes */ 248 if (size != 4 && size != 8) 249 return (-1); 250 251 /* 252 * Return if table index is beyond what device supports 253 */ 254 tab_index = offset / MSIX_TABLE_ENTRY_SIZE; 255 if (tab_index >= pi->pi_msix.table_count) 256 return (-1); 257 258 msix_entry_offset = offset % MSIX_TABLE_ENTRY_SIZE; 259 260 /* support only aligned writes */ 261 if ((msix_entry_offset % size) != 0) 262 return (-1); 263 264 dest = (char *)(pi->pi_msix.table + tab_index); 265 dest += msix_entry_offset; 266 267 if (size == 4) 268 *((uint32_t *)dest) = value; 269 else 270 *((uint64_t *)dest) = value; 271 272 return (0); 273 } 274 275 uint64_t 276 pci_emul_msix_tread(struct pci_devinst *pi, uint64_t offset, int size) 277 { 278 char *dest; 279 int msix_entry_offset; 280 int tab_index; 281 uint64_t retval = ~0; 282 283 /* 284 * The PCI standard only allows 4 and 8 byte accesses to the MSI-X 285 * table but we also allow 1 byte access to accomodate reads from 286 * ddb. 287 */ 288 if (size != 1 && size != 4 && size != 8) 289 return (retval); 290 291 msix_entry_offset = offset % MSIX_TABLE_ENTRY_SIZE; 292 293 /* support only aligned reads */ 294 if ((msix_entry_offset % size) != 0) { 295 return (retval); 296 } 297 298 tab_index = offset / MSIX_TABLE_ENTRY_SIZE; 299 300 if (tab_index < pi->pi_msix.table_count) { 301 /* valid MSI-X Table access */ 302 dest = (char *)(pi->pi_msix.table + tab_index); 303 dest += msix_entry_offset; 304 305 if (size == 1) 306 retval = *((uint8_t *)dest); 307 else if (size == 4) 308 retval = *((uint32_t *)dest); 309 else 310 retval = *((uint64_t *)dest); 311 } else if (pci_valid_pba_offset(pi, offset)) { 312 /* return 0 for PBA access */ 313 retval = 0; 314 } 315 316 return (retval); 317 } 318 319 int 320 pci_msix_table_bar(struct pci_devinst *pi) 321 { 322 323 if (pi->pi_msix.table != NULL) 324 return (pi->pi_msix.table_bar); 325 else 326 return (-1); 327 } 328 329 int 330 pci_msix_pba_bar(struct pci_devinst *pi) 331 { 332 333 if (pi->pi_msix.table != NULL) 334 return (pi->pi_msix.pba_bar); 335 else 336 return (-1); 337 } 338 339 static int 340 pci_emul_io_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes, 341 uint32_t *eax, void *arg) 342 { 343 struct pci_devinst *pdi = arg; 344 struct pci_devemu *pe = pdi->pi_d; 345 uint64_t offset; 346 int i; 347 348 for (i = 0; i <= PCI_BARMAX; i++) { 349 if (pdi->pi_bar[i].type == PCIBAR_IO && 350 port >= pdi->pi_bar[i].addr && 351 port + bytes <= pdi->pi_bar[i].addr + pdi->pi_bar[i].size) { 352 offset = port - pdi->pi_bar[i].addr; 353 if (in) 354 *eax = (*pe->pe_barread)(ctx, vcpu, pdi, i, 355 offset, bytes); 356 else 357 (*pe->pe_barwrite)(ctx, vcpu, pdi, i, offset, 358 bytes, *eax); 359 return (0); 360 } 361 } 362 return (-1); 363 } 364 365 static int 366 pci_emul_mem_handler(struct vmctx *ctx, int vcpu, int dir, uint64_t addr, 367 int size, uint64_t *val, void *arg1, long arg2) 368 { 369 struct pci_devinst *pdi = arg1; 370 struct pci_devemu *pe = pdi->pi_d; 371 uint64_t offset; 372 int bidx = (int) arg2; 373 374 assert(bidx <= PCI_BARMAX); 375 assert(pdi->pi_bar[bidx].type == PCIBAR_MEM32 || 376 pdi->pi_bar[bidx].type == PCIBAR_MEM64); 377 assert(addr >= pdi->pi_bar[bidx].addr && 378 addr + size <= pdi->pi_bar[bidx].addr + pdi->pi_bar[bidx].size); 379 380 offset = addr - pdi->pi_bar[bidx].addr; 381 382 if (dir == MEM_F_WRITE) { 383 if (size == 8) { 384 (*pe->pe_barwrite)(ctx, vcpu, pdi, bidx, offset, 385 4, *val & 0xffffffff); 386 (*pe->pe_barwrite)(ctx, vcpu, pdi, bidx, offset + 4, 387 4, *val >> 32); 388 } else { 389 (*pe->pe_barwrite)(ctx, vcpu, pdi, bidx, offset, 390 size, *val); 391 } 392 } else { 393 if (size == 8) { 394 *val = (*pe->pe_barread)(ctx, vcpu, pdi, bidx, 395 offset, 4); 396 *val |= (*pe->pe_barread)(ctx, vcpu, pdi, bidx, 397 offset + 4, 4) << 32; 398 } else { 399 *val = (*pe->pe_barread)(ctx, vcpu, pdi, bidx, 400 offset, size); 401 } 402 } 403 404 return (0); 405 } 406 407 408 static int 409 pci_emul_alloc_resource(uint64_t *baseptr, uint64_t limit, uint64_t size, 410 uint64_t *addr) 411 { 412 uint64_t base; 413 414 assert((size & (size - 1)) == 0); /* must be a power of 2 */ 415 416 base = roundup2(*baseptr, size); 417 418 if (base + size <= limit) { 419 *addr = base; 420 *baseptr = base + size; 421 return (0); 422 } else 423 return (-1); 424 } 425 426 int 427 pci_emul_alloc_bar(struct pci_devinst *pdi, int idx, enum pcibar_type type, 428 uint64_t size) 429 { 430 431 return (pci_emul_alloc_pbar(pdi, idx, 0, type, size)); 432 } 433 434 /* 435 * Register (or unregister) the MMIO or I/O region associated with the BAR 436 * register 'idx' of an emulated pci device. 437 */ 438 static void 439 modify_bar_registration(struct pci_devinst *pi, int idx, int registration) 440 { 441 int error; 442 struct inout_port iop; 443 struct mem_range mr; 444 445 switch (pi->pi_bar[idx].type) { 446 case PCIBAR_IO: 447 bzero(&iop, sizeof(struct inout_port)); 448 iop.name = pi->pi_name; 449 iop.port = pi->pi_bar[idx].addr; 450 iop.size = pi->pi_bar[idx].size; 451 if (registration) { 452 iop.flags = IOPORT_F_INOUT; 453 iop.handler = pci_emul_io_handler; 454 iop.arg = pi; 455 error = register_inout(&iop); 456 } else 457 error = unregister_inout(&iop); 458 break; 459 case PCIBAR_MEM32: 460 case PCIBAR_MEM64: 461 bzero(&mr, sizeof(struct mem_range)); 462 mr.name = pi->pi_name; 463 mr.base = pi->pi_bar[idx].addr; 464 mr.size = pi->pi_bar[idx].size; 465 if (registration) { 466 mr.flags = MEM_F_RW; 467 mr.handler = pci_emul_mem_handler; 468 mr.arg1 = pi; 469 mr.arg2 = idx; 470 error = register_mem(&mr); 471 } else 472 error = unregister_mem(&mr); 473 break; 474 default: 475 error = EINVAL; 476 break; 477 } 478 assert(error == 0); 479 } 480 481 static void 482 unregister_bar(struct pci_devinst *pi, int idx) 483 { 484 485 modify_bar_registration(pi, idx, 0); 486 } 487 488 static void 489 register_bar(struct pci_devinst *pi, int idx) 490 { 491 492 modify_bar_registration(pi, idx, 1); 493 } 494 495 /* Are we decoding i/o port accesses for the emulated pci device? */ 496 static int 497 porten(struct pci_devinst *pi) 498 { 499 uint16_t cmd; 500 501 cmd = pci_get_cfgdata16(pi, PCIR_COMMAND); 502 503 return (cmd & PCIM_CMD_PORTEN); 504 } 505 506 /* Are we decoding memory accesses for the emulated pci device? */ 507 static int 508 memen(struct pci_devinst *pi) 509 { 510 uint16_t cmd; 511 512 cmd = pci_get_cfgdata16(pi, PCIR_COMMAND); 513 514 return (cmd & PCIM_CMD_MEMEN); 515 } 516 517 /* 518 * Update the MMIO or I/O address that is decoded by the BAR register. 519 * 520 * If the pci device has enabled the address space decoding then intercept 521 * the address range decoded by the BAR register. 522 */ 523 static void 524 update_bar_address(struct pci_devinst *pi, uint64_t addr, int idx, int type) 525 { 526 int decode; 527 528 if (pi->pi_bar[idx].type == PCIBAR_IO) 529 decode = porten(pi); 530 else 531 decode = memen(pi); 532 533 if (decode) 534 unregister_bar(pi, idx); 535 536 switch (type) { 537 case PCIBAR_IO: 538 case PCIBAR_MEM32: 539 pi->pi_bar[idx].addr = addr; 540 break; 541 case PCIBAR_MEM64: 542 pi->pi_bar[idx].addr &= ~0xffffffffUL; 543 pi->pi_bar[idx].addr |= addr; 544 break; 545 case PCIBAR_MEMHI64: 546 pi->pi_bar[idx].addr &= 0xffffffff; 547 pi->pi_bar[idx].addr |= addr; 548 break; 549 default: 550 assert(0); 551 } 552 553 if (decode) 554 register_bar(pi, idx); 555 } 556 557 int 558 pci_emul_alloc_pbar(struct pci_devinst *pdi, int idx, uint64_t hostbase, 559 enum pcibar_type type, uint64_t size) 560 { 561 int error; 562 uint64_t *baseptr, limit, addr, mask, lobits, bar; 563 564 assert(idx >= 0 && idx <= PCI_BARMAX); 565 566 if ((size & (size - 1)) != 0) 567 size = 1UL << flsl(size); /* round up to a power of 2 */ 568 569 /* Enforce minimum BAR sizes required by the PCI standard */ 570 if (type == PCIBAR_IO) { 571 if (size < 4) 572 size = 4; 573 } else { 574 if (size < 16) 575 size = 16; 576 } 577 578 switch (type) { 579 case PCIBAR_NONE: 580 baseptr = NULL; 581 addr = mask = lobits = 0; 582 break; 583 case PCIBAR_IO: 584 baseptr = &pci_emul_iobase; 585 limit = PCI_EMUL_IOLIMIT; 586 mask = PCIM_BAR_IO_BASE; 587 lobits = PCIM_BAR_IO_SPACE; 588 break; 589 case PCIBAR_MEM64: 590 /* 591 * XXX 592 * Some drivers do not work well if the 64-bit BAR is allocated 593 * above 4GB. Allow for this by allocating small requests under 594 * 4GB unless then allocation size is larger than some arbitrary 595 * number (32MB currently). 596 */ 597 if (size > 32 * 1024 * 1024) { 598 /* 599 * XXX special case for device requiring peer-peer DMA 600 */ 601 if (size == 0x100000000UL) 602 baseptr = &hostbase; 603 else 604 baseptr = &pci_emul_membase64; 605 limit = PCI_EMUL_MEMLIMIT64; 606 mask = PCIM_BAR_MEM_BASE; 607 lobits = PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_64 | 608 PCIM_BAR_MEM_PREFETCH; 609 break; 610 } else { 611 baseptr = &pci_emul_membase32; 612 limit = PCI_EMUL_MEMLIMIT32; 613 mask = PCIM_BAR_MEM_BASE; 614 lobits = PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_64; 615 } 616 break; 617 case PCIBAR_MEM32: 618 baseptr = &pci_emul_membase32; 619 limit = PCI_EMUL_MEMLIMIT32; 620 mask = PCIM_BAR_MEM_BASE; 621 lobits = PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_32; 622 break; 623 default: 624 printf("pci_emul_alloc_base: invalid bar type %d\n", type); 625 assert(0); 626 } 627 628 if (baseptr != NULL) { 629 error = pci_emul_alloc_resource(baseptr, limit, size, &addr); 630 if (error != 0) 631 return (error); 632 } 633 634 pdi->pi_bar[idx].type = type; 635 pdi->pi_bar[idx].addr = addr; 636 pdi->pi_bar[idx].size = size; 637 638 /* Initialize the BAR register in config space */ 639 bar = (addr & mask) | lobits; 640 pci_set_cfgdata32(pdi, PCIR_BAR(idx), bar); 641 642 if (type == PCIBAR_MEM64) { 643 assert(idx + 1 <= PCI_BARMAX); 644 pdi->pi_bar[idx + 1].type = PCIBAR_MEMHI64; 645 pci_set_cfgdata32(pdi, PCIR_BAR(idx + 1), bar >> 32); 646 } 647 648 register_bar(pdi, idx); 649 650 return (0); 651 } 652 653 #define CAP_START_OFFSET 0x40 654 static int 655 pci_emul_add_capability(struct pci_devinst *pi, u_char *capdata, int caplen) 656 { 657 int i, capoff, reallen; 658 uint16_t sts; 659 660 assert(caplen > 0); 661 662 reallen = roundup2(caplen, 4); /* dword aligned */ 663 664 sts = pci_get_cfgdata16(pi, PCIR_STATUS); 665 if ((sts & PCIM_STATUS_CAPPRESENT) == 0) 666 capoff = CAP_START_OFFSET; 667 else 668 capoff = pi->pi_capend + 1; 669 670 /* Check if we have enough space */ 671 if (capoff + reallen > PCI_REGMAX + 1) 672 return (-1); 673 674 /* Set the previous capability pointer */ 675 if ((sts & PCIM_STATUS_CAPPRESENT) == 0) { 676 pci_set_cfgdata8(pi, PCIR_CAP_PTR, capoff); 677 pci_set_cfgdata16(pi, PCIR_STATUS, sts|PCIM_STATUS_CAPPRESENT); 678 } else 679 pci_set_cfgdata8(pi, pi->pi_prevcap + 1, capoff); 680 681 /* Copy the capability */ 682 for (i = 0; i < caplen; i++) 683 pci_set_cfgdata8(pi, capoff + i, capdata[i]); 684 685 /* Set the next capability pointer */ 686 pci_set_cfgdata8(pi, capoff + 1, 0); 687 688 pi->pi_prevcap = capoff; 689 pi->pi_capend = capoff + reallen - 1; 690 return (0); 691 } 692 693 static struct pci_devemu * 694 pci_emul_finddev(char *name) 695 { 696 struct pci_devemu **pdpp, *pdp; 697 698 SET_FOREACH(pdpp, pci_devemu_set) { 699 pdp = *pdpp; 700 if (!strcmp(pdp->pe_emu, name)) { 701 return (pdp); 702 } 703 } 704 705 return (NULL); 706 } 707 708 static int 709 pci_emul_init(struct vmctx *ctx, struct pci_devemu *pde, int bus, int slot, 710 int func, struct funcinfo *fi) 711 { 712 struct pci_devinst *pdi; 713 int err; 714 715 pdi = calloc(1, sizeof(struct pci_devinst)); 716 717 pdi->pi_vmctx = ctx; 718 pdi->pi_bus = bus; 719 pdi->pi_slot = slot; 720 pdi->pi_func = func; 721 pthread_mutex_init(&pdi->pi_lintr.lock, NULL); 722 pdi->pi_lintr.pin = 0; 723 pdi->pi_lintr.state = IDLE; 724 pdi->pi_lintr.pirq_pin = 0; 725 pdi->pi_lintr.ioapic_irq = 0; 726 pdi->pi_d = pde; 727 snprintf(pdi->pi_name, PI_NAMESZ, "%s-pci-%d", pde->pe_emu, slot); 728 729 /* Disable legacy interrupts */ 730 pci_set_cfgdata8(pdi, PCIR_INTLINE, 255); 731 pci_set_cfgdata8(pdi, PCIR_INTPIN, 0); 732 733 pci_set_cfgdata8(pdi, PCIR_COMMAND, 734 PCIM_CMD_PORTEN | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN); 735 736 err = (*pde->pe_init)(ctx, pdi, fi->fi_param); 737 if (err == 0) 738 fi->fi_devi = pdi; 739 else 740 free(pdi); 741 742 return (err); 743 } 744 745 void 746 pci_populate_msicap(struct msicap *msicap, int msgnum, int nextptr) 747 { 748 int mmc; 749 750 CTASSERT(sizeof(struct msicap) == 14); 751 752 /* Number of msi messages must be a power of 2 between 1 and 32 */ 753 assert((msgnum & (msgnum - 1)) == 0 && msgnum >= 1 && msgnum <= 32); 754 mmc = ffs(msgnum) - 1; 755 756 bzero(msicap, sizeof(struct msicap)); 757 msicap->capid = PCIY_MSI; 758 msicap->nextptr = nextptr; 759 msicap->msgctrl = PCIM_MSICTRL_64BIT | (mmc << 1); 760 } 761 762 int 763 pci_emul_add_msicap(struct pci_devinst *pi, int msgnum) 764 { 765 struct msicap msicap; 766 767 pci_populate_msicap(&msicap, msgnum, 0); 768 769 return (pci_emul_add_capability(pi, (u_char *)&msicap, sizeof(msicap))); 770 } 771 772 static void 773 pci_populate_msixcap(struct msixcap *msixcap, int msgnum, int barnum, 774 uint32_t msix_tab_size) 775 { 776 CTASSERT(sizeof(struct msixcap) == 12); 777 778 assert(msix_tab_size % 4096 == 0); 779 780 bzero(msixcap, sizeof(struct msixcap)); 781 msixcap->capid = PCIY_MSIX; 782 783 /* 784 * Message Control Register, all fields set to 785 * zero except for the Table Size. 786 * Note: Table size N is encoded as N-1 787 */ 788 msixcap->msgctrl = msgnum - 1; 789 790 /* 791 * MSI-X BAR setup: 792 * - MSI-X table start at offset 0 793 * - PBA table starts at a 4K aligned offset after the MSI-X table 794 */ 795 msixcap->table_info = barnum & PCIM_MSIX_BIR_MASK; 796 msixcap->pba_info = msix_tab_size | (barnum & PCIM_MSIX_BIR_MASK); 797 } 798 799 static void 800 pci_msix_table_init(struct pci_devinst *pi, int table_entries) 801 { 802 int i, table_size; 803 804 assert(table_entries > 0); 805 assert(table_entries <= MAX_MSIX_TABLE_ENTRIES); 806 807 table_size = table_entries * MSIX_TABLE_ENTRY_SIZE; 808 pi->pi_msix.table = calloc(1, table_size); 809 810 /* set mask bit of vector control register */ 811 for (i = 0; i < table_entries; i++) 812 pi->pi_msix.table[i].vector_control |= PCIM_MSIX_VCTRL_MASK; 813 } 814 815 int 816 pci_emul_add_msixcap(struct pci_devinst *pi, int msgnum, int barnum) 817 { 818 uint32_t tab_size; 819 struct msixcap msixcap; 820 821 assert(msgnum >= 1 && msgnum <= MAX_MSIX_TABLE_ENTRIES); 822 assert(barnum >= 0 && barnum <= PCIR_MAX_BAR_0); 823 824 tab_size = msgnum * MSIX_TABLE_ENTRY_SIZE; 825 826 /* Align table size to nearest 4K */ 827 tab_size = roundup2(tab_size, 4096); 828 829 pi->pi_msix.table_bar = barnum; 830 pi->pi_msix.pba_bar = barnum; 831 pi->pi_msix.table_offset = 0; 832 pi->pi_msix.table_count = msgnum; 833 pi->pi_msix.pba_offset = tab_size; 834 pi->pi_msix.pba_size = PBA_SIZE(msgnum); 835 836 pci_msix_table_init(pi, msgnum); 837 838 pci_populate_msixcap(&msixcap, msgnum, barnum, tab_size); 839 840 /* allocate memory for MSI-X Table and PBA */ 841 pci_emul_alloc_bar(pi, barnum, PCIBAR_MEM32, 842 tab_size + pi->pi_msix.pba_size); 843 844 return (pci_emul_add_capability(pi, (u_char *)&msixcap, 845 sizeof(msixcap))); 846 } 847 848 void 849 msixcap_cfgwrite(struct pci_devinst *pi, int capoff, int offset, 850 int bytes, uint32_t val) 851 { 852 uint16_t msgctrl, rwmask; 853 int off, table_bar; 854 855 off = offset - capoff; 856 table_bar = pi->pi_msix.table_bar; 857 /* Message Control Register */ 858 if (off == 2 && bytes == 2) { 859 rwmask = PCIM_MSIXCTRL_MSIX_ENABLE | PCIM_MSIXCTRL_FUNCTION_MASK; 860 msgctrl = pci_get_cfgdata16(pi, offset); 861 msgctrl &= ~rwmask; 862 msgctrl |= val & rwmask; 863 val = msgctrl; 864 865 pi->pi_msix.enabled = val & PCIM_MSIXCTRL_MSIX_ENABLE; 866 pi->pi_msix.function_mask = val & PCIM_MSIXCTRL_FUNCTION_MASK; 867 pci_lintr_update(pi); 868 } 869 870 CFGWRITE(pi, offset, val, bytes); 871 } 872 873 void 874 msicap_cfgwrite(struct pci_devinst *pi, int capoff, int offset, 875 int bytes, uint32_t val) 876 { 877 uint16_t msgctrl, rwmask, msgdata, mme; 878 uint32_t addrlo; 879 880 /* 881 * If guest is writing to the message control register make sure 882 * we do not overwrite read-only fields. 883 */ 884 if ((offset - capoff) == 2 && bytes == 2) { 885 rwmask = PCIM_MSICTRL_MME_MASK | PCIM_MSICTRL_MSI_ENABLE; 886 msgctrl = pci_get_cfgdata16(pi, offset); 887 msgctrl &= ~rwmask; 888 msgctrl |= val & rwmask; 889 val = msgctrl; 890 891 addrlo = pci_get_cfgdata32(pi, capoff + 4); 892 if (msgctrl & PCIM_MSICTRL_64BIT) 893 msgdata = pci_get_cfgdata16(pi, capoff + 12); 894 else 895 msgdata = pci_get_cfgdata16(pi, capoff + 8); 896 897 mme = msgctrl & PCIM_MSICTRL_MME_MASK; 898 pi->pi_msi.enabled = msgctrl & PCIM_MSICTRL_MSI_ENABLE ? 1 : 0; 899 if (pi->pi_msi.enabled) { 900 pi->pi_msi.addr = addrlo; 901 pi->pi_msi.msg_data = msgdata; 902 pi->pi_msi.maxmsgnum = 1 << (mme >> 4); 903 } else { 904 pi->pi_msi.maxmsgnum = 0; 905 } 906 pci_lintr_update(pi); 907 } 908 909 CFGWRITE(pi, offset, val, bytes); 910 } 911 912 void 913 pciecap_cfgwrite(struct pci_devinst *pi, int capoff, int offset, 914 int bytes, uint32_t val) 915 { 916 917 /* XXX don't write to the readonly parts */ 918 CFGWRITE(pi, offset, val, bytes); 919 } 920 921 #define PCIECAP_VERSION 0x2 922 int 923 pci_emul_add_pciecap(struct pci_devinst *pi, int type) 924 { 925 int err; 926 struct pciecap pciecap; 927 928 CTASSERT(sizeof(struct pciecap) == 60); 929 930 if (type != PCIEM_TYPE_ROOT_PORT) 931 return (-1); 932 933 bzero(&pciecap, sizeof(pciecap)); 934 935 pciecap.capid = PCIY_EXPRESS; 936 pciecap.pcie_capabilities = PCIECAP_VERSION | PCIEM_TYPE_ROOT_PORT; 937 pciecap.link_capabilities = 0x411; /* gen1, x1 */ 938 pciecap.link_status = 0x11; /* gen1, x1 */ 939 940 err = pci_emul_add_capability(pi, (u_char *)&pciecap, sizeof(pciecap)); 941 return (err); 942 } 943 944 /* 945 * This function assumes that 'coff' is in the capabilities region of the 946 * config space. 947 */ 948 static void 949 pci_emul_capwrite(struct pci_devinst *pi, int offset, int bytes, uint32_t val) 950 { 951 int capid; 952 uint8_t capoff, nextoff; 953 954 /* Do not allow un-aligned writes */ 955 if ((offset & (bytes - 1)) != 0) 956 return; 957 958 /* Find the capability that we want to update */ 959 capoff = CAP_START_OFFSET; 960 while (1) { 961 nextoff = pci_get_cfgdata8(pi, capoff + 1); 962 if (nextoff == 0) 963 break; 964 if (offset >= capoff && offset < nextoff) 965 break; 966 967 capoff = nextoff; 968 } 969 assert(offset >= capoff); 970 971 /* 972 * Capability ID and Next Capability Pointer are readonly. 973 * However, some o/s's do 4-byte writes that include these. 974 * For this case, trim the write back to 2 bytes and adjust 975 * the data. 976 */ 977 if (offset == capoff || offset == capoff + 1) { 978 if (offset == capoff && bytes == 4) { 979 bytes = 2; 980 offset += 2; 981 val >>= 16; 982 } else 983 return; 984 } 985 986 capid = pci_get_cfgdata8(pi, capoff); 987 switch (capid) { 988 case PCIY_MSI: 989 msicap_cfgwrite(pi, capoff, offset, bytes, val); 990 break; 991 case PCIY_MSIX: 992 msixcap_cfgwrite(pi, capoff, offset, bytes, val); 993 break; 994 case PCIY_EXPRESS: 995 pciecap_cfgwrite(pi, capoff, offset, bytes, val); 996 break; 997 default: 998 break; 999 } 1000 } 1001 1002 static int 1003 pci_emul_iscap(struct pci_devinst *pi, int offset) 1004 { 1005 uint16_t sts; 1006 1007 sts = pci_get_cfgdata16(pi, PCIR_STATUS); 1008 if ((sts & PCIM_STATUS_CAPPRESENT) != 0) { 1009 if (offset >= CAP_START_OFFSET && offset <= pi->pi_capend) 1010 return (1); 1011 } 1012 return (0); 1013 } 1014 1015 static int 1016 pci_emul_fallback_handler(struct vmctx *ctx, int vcpu, int dir, uint64_t addr, 1017 int size, uint64_t *val, void *arg1, long arg2) 1018 { 1019 /* 1020 * Ignore writes; return 0xff's for reads. The mem read code 1021 * will take care of truncating to the correct size. 1022 */ 1023 if (dir == MEM_F_READ) { 1024 *val = 0xffffffffffffffff; 1025 } 1026 1027 return (0); 1028 } 1029 1030 static int 1031 pci_emul_ecfg_handler(struct vmctx *ctx, int vcpu, int dir, uint64_t addr, 1032 int bytes, uint64_t *val, void *arg1, long arg2) 1033 { 1034 int bus, slot, func, coff, in; 1035 1036 coff = addr & 0xfff; 1037 func = (addr >> 12) & 0x7; 1038 slot = (addr >> 15) & 0x1f; 1039 bus = (addr >> 20) & 0xff; 1040 in = (dir == MEM_F_READ); 1041 if (in) 1042 *val = ~0UL; 1043 pci_cfgrw(ctx, vcpu, in, bus, slot, func, coff, bytes, (uint32_t *)val); 1044 return (0); 1045 } 1046 1047 uint64_t 1048 pci_ecfg_base(void) 1049 { 1050 1051 return (PCI_EMUL_ECFG_BASE); 1052 } 1053 1054 #define BUSIO_ROUNDUP 32 1055 #define BUSMEM_ROUNDUP (1024 * 1024) 1056 1057 int 1058 init_pci(struct vmctx *ctx) 1059 { 1060 struct mem_range mr; 1061 struct pci_devemu *pde; 1062 struct businfo *bi; 1063 struct slotinfo *si; 1064 struct funcinfo *fi; 1065 size_t lowmem; 1066 int bus, slot, func; 1067 int error; 1068 1069 pci_emul_iobase = PCI_EMUL_IOBASE; 1070 pci_emul_membase32 = vm_get_lowmem_limit(ctx); 1071 pci_emul_membase64 = PCI_EMUL_MEMBASE64; 1072 1073 for (bus = 0; bus < MAXBUSES; bus++) { 1074 if ((bi = pci_businfo[bus]) == NULL) 1075 continue; 1076 /* 1077 * Keep track of the i/o and memory resources allocated to 1078 * this bus. 1079 */ 1080 bi->iobase = pci_emul_iobase; 1081 bi->membase32 = pci_emul_membase32; 1082 bi->membase64 = pci_emul_membase64; 1083 1084 for (slot = 0; slot < MAXSLOTS; slot++) { 1085 si = &bi->slotinfo[slot]; 1086 for (func = 0; func < MAXFUNCS; func++) { 1087 fi = &si->si_funcs[func]; 1088 if (fi->fi_name == NULL) 1089 continue; 1090 pde = pci_emul_finddev(fi->fi_name); 1091 assert(pde != NULL); 1092 error = pci_emul_init(ctx, pde, bus, slot, 1093 func, fi); 1094 if (error) 1095 return (error); 1096 } 1097 } 1098 1099 /* 1100 * Add some slop to the I/O and memory resources decoded by 1101 * this bus to give a guest some flexibility if it wants to 1102 * reprogram the BARs. 1103 */ 1104 pci_emul_iobase += BUSIO_ROUNDUP; 1105 pci_emul_iobase = roundup2(pci_emul_iobase, BUSIO_ROUNDUP); 1106 bi->iolimit = pci_emul_iobase; 1107 1108 pci_emul_membase32 += BUSMEM_ROUNDUP; 1109 pci_emul_membase32 = roundup2(pci_emul_membase32, 1110 BUSMEM_ROUNDUP); 1111 bi->memlimit32 = pci_emul_membase32; 1112 1113 pci_emul_membase64 += BUSMEM_ROUNDUP; 1114 pci_emul_membase64 = roundup2(pci_emul_membase64, 1115 BUSMEM_ROUNDUP); 1116 bi->memlimit64 = pci_emul_membase64; 1117 } 1118 1119 /* 1120 * PCI backends are initialized before routing INTx interrupts 1121 * so that LPC devices are able to reserve ISA IRQs before 1122 * routing PIRQ pins. 1123 */ 1124 for (bus = 0; bus < MAXBUSES; bus++) { 1125 if ((bi = pci_businfo[bus]) == NULL) 1126 continue; 1127 1128 for (slot = 0; slot < MAXSLOTS; slot++) { 1129 si = &bi->slotinfo[slot]; 1130 for (func = 0; func < MAXFUNCS; func++) { 1131 fi = &si->si_funcs[func]; 1132 if (fi->fi_devi == NULL) 1133 continue; 1134 pci_lintr_route(fi->fi_devi); 1135 } 1136 } 1137 } 1138 lpc_pirq_routed(); 1139 1140 /* 1141 * The guest physical memory map looks like the following: 1142 * [0, lowmem) guest system memory 1143 * [lowmem, lowmem_limit) memory hole (may be absent) 1144 * [lowmem_limit, 0xE0000000) PCI hole (32-bit BAR allocation) 1145 * [0xE0000000, 0xF0000000) PCI extended config window 1146 * [0xF0000000, 4GB) LAPIC, IOAPIC, HPET, firmware 1147 * [4GB, 4GB + highmem) 1148 */ 1149 1150 /* 1151 * Accesses to memory addresses that are not allocated to system 1152 * memory or PCI devices return 0xff's. 1153 */ 1154 lowmem = vm_get_lowmem_size(ctx); 1155 bzero(&mr, sizeof(struct mem_range)); 1156 mr.name = "PCI hole"; 1157 mr.flags = MEM_F_RW | MEM_F_IMMUTABLE; 1158 mr.base = lowmem; 1159 mr.size = (4ULL * 1024 * 1024 * 1024) - lowmem; 1160 mr.handler = pci_emul_fallback_handler; 1161 error = register_mem_fallback(&mr); 1162 assert(error == 0); 1163 1164 /* PCI extended config space */ 1165 bzero(&mr, sizeof(struct mem_range)); 1166 mr.name = "PCI ECFG"; 1167 mr.flags = MEM_F_RW | MEM_F_IMMUTABLE; 1168 mr.base = PCI_EMUL_ECFG_BASE; 1169 mr.size = PCI_EMUL_ECFG_SIZE; 1170 mr.handler = pci_emul_ecfg_handler; 1171 error = register_mem(&mr); 1172 assert(error == 0); 1173 1174 return (0); 1175 } 1176 1177 static void 1178 pci_apic_prt_entry(int bus, int slot, int pin, int pirq_pin, int ioapic_irq, 1179 void *arg) 1180 { 1181 1182 dsdt_line(" Package ()"); 1183 dsdt_line(" {"); 1184 dsdt_line(" 0x%X,", slot << 16 | 0xffff); 1185 dsdt_line(" 0x%02X,", pin - 1); 1186 dsdt_line(" Zero,"); 1187 dsdt_line(" 0x%X", ioapic_irq); 1188 dsdt_line(" },"); 1189 } 1190 1191 static void 1192 pci_pirq_prt_entry(int bus, int slot, int pin, int pirq_pin, int ioapic_irq, 1193 void *arg) 1194 { 1195 char *name; 1196 1197 name = lpc_pirq_name(pirq_pin); 1198 if (name == NULL) 1199 return; 1200 dsdt_line(" Package ()"); 1201 dsdt_line(" {"); 1202 dsdt_line(" 0x%X,", slot << 16 | 0xffff); 1203 dsdt_line(" 0x%02X,", pin - 1); 1204 dsdt_line(" %s,", name); 1205 dsdt_line(" 0x00"); 1206 dsdt_line(" },"); 1207 free(name); 1208 } 1209 1210 /* 1211 * A bhyve virtual machine has a flat PCI hierarchy with a root port 1212 * corresponding to each PCI bus. 1213 */ 1214 static void 1215 pci_bus_write_dsdt(int bus) 1216 { 1217 struct businfo *bi; 1218 struct slotinfo *si; 1219 struct pci_devinst *pi; 1220 int count, func, slot; 1221 1222 /* 1223 * If there are no devices on this 'bus' then just return. 1224 */ 1225 if ((bi = pci_businfo[bus]) == NULL) { 1226 /* 1227 * Bus 0 is special because it decodes the I/O ports used 1228 * for PCI config space access even if there are no devices 1229 * on it. 1230 */ 1231 if (bus != 0) 1232 return; 1233 } 1234 1235 dsdt_line(" Device (PC%02X)", bus); 1236 dsdt_line(" {"); 1237 dsdt_line(" Name (_HID, EisaId (\"PNP0A03\"))"); 1238 dsdt_line(" Name (_ADR, Zero)"); 1239 1240 dsdt_line(" Method (_BBN, 0, NotSerialized)"); 1241 dsdt_line(" {"); 1242 dsdt_line(" Return (0x%08X)", bus); 1243 dsdt_line(" }"); 1244 dsdt_line(" Name (_CRS, ResourceTemplate ()"); 1245 dsdt_line(" {"); 1246 dsdt_line(" WordBusNumber (ResourceProducer, MinFixed, " 1247 "MaxFixed, PosDecode,"); 1248 dsdt_line(" 0x0000, // Granularity"); 1249 dsdt_line(" 0x%04X, // Range Minimum", bus); 1250 dsdt_line(" 0x%04X, // Range Maximum", bus); 1251 dsdt_line(" 0x0000, // Translation Offset"); 1252 dsdt_line(" 0x0001, // Length"); 1253 dsdt_line(" ,, )"); 1254 1255 if (bus == 0) { 1256 dsdt_indent(3); 1257 dsdt_fixed_ioport(0xCF8, 8); 1258 dsdt_unindent(3); 1259 1260 dsdt_line(" WordIO (ResourceProducer, MinFixed, MaxFixed, " 1261 "PosDecode, EntireRange,"); 1262 dsdt_line(" 0x0000, // Granularity"); 1263 dsdt_line(" 0x0000, // Range Minimum"); 1264 dsdt_line(" 0x0CF7, // Range Maximum"); 1265 dsdt_line(" 0x0000, // Translation Offset"); 1266 dsdt_line(" 0x0CF8, // Length"); 1267 dsdt_line(" ,, , TypeStatic)"); 1268 1269 dsdt_line(" WordIO (ResourceProducer, MinFixed, MaxFixed, " 1270 "PosDecode, EntireRange,"); 1271 dsdt_line(" 0x0000, // Granularity"); 1272 dsdt_line(" 0x0D00, // Range Minimum"); 1273 dsdt_line(" 0x%04X, // Range Maximum", 1274 PCI_EMUL_IOBASE - 1); 1275 dsdt_line(" 0x0000, // Translation Offset"); 1276 dsdt_line(" 0x%04X, // Length", 1277 PCI_EMUL_IOBASE - 0x0D00); 1278 dsdt_line(" ,, , TypeStatic)"); 1279 1280 if (bi == NULL) { 1281 dsdt_line(" })"); 1282 goto done; 1283 } 1284 } 1285 assert(bi != NULL); 1286 1287 /* i/o window */ 1288 dsdt_line(" WordIO (ResourceProducer, MinFixed, MaxFixed, " 1289 "PosDecode, EntireRange,"); 1290 dsdt_line(" 0x0000, // Granularity"); 1291 dsdt_line(" 0x%04X, // Range Minimum", bi->iobase); 1292 dsdt_line(" 0x%04X, // Range Maximum", 1293 bi->iolimit - 1); 1294 dsdt_line(" 0x0000, // Translation Offset"); 1295 dsdt_line(" 0x%04X, // Length", 1296 bi->iolimit - bi->iobase); 1297 dsdt_line(" ,, , TypeStatic)"); 1298 1299 /* mmio window (32-bit) */ 1300 dsdt_line(" DWordMemory (ResourceProducer, PosDecode, " 1301 "MinFixed, MaxFixed, NonCacheable, ReadWrite,"); 1302 dsdt_line(" 0x00000000, // Granularity"); 1303 dsdt_line(" 0x%08X, // Range Minimum\n", bi->membase32); 1304 dsdt_line(" 0x%08X, // Range Maximum\n", 1305 bi->memlimit32 - 1); 1306 dsdt_line(" 0x00000000, // Translation Offset"); 1307 dsdt_line(" 0x%08X, // Length\n", 1308 bi->memlimit32 - bi->membase32); 1309 dsdt_line(" ,, , AddressRangeMemory, TypeStatic)"); 1310 1311 /* mmio window (64-bit) */ 1312 dsdt_line(" QWordMemory (ResourceProducer, PosDecode, " 1313 "MinFixed, MaxFixed, NonCacheable, ReadWrite,"); 1314 dsdt_line(" 0x0000000000000000, // Granularity"); 1315 dsdt_line(" 0x%016lX, // Range Minimum\n", bi->membase64); 1316 dsdt_line(" 0x%016lX, // Range Maximum\n", 1317 bi->memlimit64 - 1); 1318 dsdt_line(" 0x0000000000000000, // Translation Offset"); 1319 dsdt_line(" 0x%016lX, // Length\n", 1320 bi->memlimit64 - bi->membase64); 1321 dsdt_line(" ,, , AddressRangeMemory, TypeStatic)"); 1322 dsdt_line(" })"); 1323 1324 count = pci_count_lintr(bus); 1325 if (count != 0) { 1326 dsdt_indent(2); 1327 dsdt_line("Name (PPRT, Package ()"); 1328 dsdt_line("{"); 1329 pci_walk_lintr(bus, pci_pirq_prt_entry, NULL); 1330 dsdt_line("})"); 1331 dsdt_line("Name (APRT, Package ()"); 1332 dsdt_line("{"); 1333 pci_walk_lintr(bus, pci_apic_prt_entry, NULL); 1334 dsdt_line("})"); 1335 dsdt_line("Method (_PRT, 0, NotSerialized)"); 1336 dsdt_line("{"); 1337 dsdt_line(" If (PICM)"); 1338 dsdt_line(" {"); 1339 dsdt_line(" Return (APRT)"); 1340 dsdt_line(" }"); 1341 dsdt_line(" Else"); 1342 dsdt_line(" {"); 1343 dsdt_line(" Return (PPRT)"); 1344 dsdt_line(" }"); 1345 dsdt_line("}"); 1346 dsdt_unindent(2); 1347 } 1348 1349 dsdt_indent(2); 1350 for (slot = 0; slot < MAXSLOTS; slot++) { 1351 si = &bi->slotinfo[slot]; 1352 for (func = 0; func < MAXFUNCS; func++) { 1353 pi = si->si_funcs[func].fi_devi; 1354 if (pi != NULL && pi->pi_d->pe_write_dsdt != NULL) 1355 pi->pi_d->pe_write_dsdt(pi); 1356 } 1357 } 1358 dsdt_unindent(2); 1359 done: 1360 dsdt_line(" }"); 1361 } 1362 1363 void 1364 pci_write_dsdt(void) 1365 { 1366 int bus; 1367 1368 dsdt_indent(1); 1369 dsdt_line("Name (PICM, 0x00)"); 1370 dsdt_line("Method (_PIC, 1, NotSerialized)"); 1371 dsdt_line("{"); 1372 dsdt_line(" Store (Arg0, PICM)"); 1373 dsdt_line("}"); 1374 dsdt_line(""); 1375 dsdt_line("Scope (_SB)"); 1376 dsdt_line("{"); 1377 for (bus = 0; bus < MAXBUSES; bus++) 1378 pci_bus_write_dsdt(bus); 1379 dsdt_line("}"); 1380 dsdt_unindent(1); 1381 } 1382 1383 int 1384 pci_bus_configured(int bus) 1385 { 1386 assert(bus >= 0 && bus < MAXBUSES); 1387 return (pci_businfo[bus] != NULL); 1388 } 1389 1390 int 1391 pci_msi_enabled(struct pci_devinst *pi) 1392 { 1393 return (pi->pi_msi.enabled); 1394 } 1395 1396 int 1397 pci_msi_maxmsgnum(struct pci_devinst *pi) 1398 { 1399 if (pi->pi_msi.enabled) 1400 return (pi->pi_msi.maxmsgnum); 1401 else 1402 return (0); 1403 } 1404 1405 int 1406 pci_msix_enabled(struct pci_devinst *pi) 1407 { 1408 1409 return (pi->pi_msix.enabled && !pi->pi_msi.enabled); 1410 } 1411 1412 void 1413 pci_generate_msix(struct pci_devinst *pi, int index) 1414 { 1415 struct msix_table_entry *mte; 1416 1417 if (!pci_msix_enabled(pi)) 1418 return; 1419 1420 if (pi->pi_msix.function_mask) 1421 return; 1422 1423 if (index >= pi->pi_msix.table_count) 1424 return; 1425 1426 mte = &pi->pi_msix.table[index]; 1427 if ((mte->vector_control & PCIM_MSIX_VCTRL_MASK) == 0) { 1428 /* XXX Set PBA bit if interrupt is disabled */ 1429 vm_lapic_msi(pi->pi_vmctx, mte->addr, mte->msg_data); 1430 } 1431 } 1432 1433 void 1434 pci_generate_msi(struct pci_devinst *pi, int index) 1435 { 1436 1437 if (pci_msi_enabled(pi) && index < pci_msi_maxmsgnum(pi)) { 1438 vm_lapic_msi(pi->pi_vmctx, pi->pi_msi.addr, 1439 pi->pi_msi.msg_data + index); 1440 } 1441 } 1442 1443 static bool 1444 pci_lintr_permitted(struct pci_devinst *pi) 1445 { 1446 uint16_t cmd; 1447 1448 cmd = pci_get_cfgdata16(pi, PCIR_COMMAND); 1449 return (!(pi->pi_msi.enabled || pi->pi_msix.enabled || 1450 (cmd & PCIM_CMD_INTxDIS))); 1451 } 1452 1453 void 1454 pci_lintr_request(struct pci_devinst *pi) 1455 { 1456 struct businfo *bi; 1457 struct slotinfo *si; 1458 int bestpin, bestcount, pin; 1459 1460 bi = pci_businfo[pi->pi_bus]; 1461 assert(bi != NULL); 1462 1463 /* 1464 * Just allocate a pin from our slot. The pin will be 1465 * assigned IRQs later when interrupts are routed. 1466 */ 1467 si = &bi->slotinfo[pi->pi_slot]; 1468 bestpin = 0; 1469 bestcount = si->si_intpins[0].ii_count; 1470 for (pin = 1; pin < 4; pin++) { 1471 if (si->si_intpins[pin].ii_count < bestcount) { 1472 bestpin = pin; 1473 bestcount = si->si_intpins[pin].ii_count; 1474 } 1475 } 1476 1477 si->si_intpins[bestpin].ii_count++; 1478 pi->pi_lintr.pin = bestpin + 1; 1479 pci_set_cfgdata8(pi, PCIR_INTPIN, bestpin + 1); 1480 } 1481 1482 static void 1483 pci_lintr_route(struct pci_devinst *pi) 1484 { 1485 struct businfo *bi; 1486 struct intxinfo *ii; 1487 1488 if (pi->pi_lintr.pin == 0) 1489 return; 1490 1491 bi = pci_businfo[pi->pi_bus]; 1492 assert(bi != NULL); 1493 ii = &bi->slotinfo[pi->pi_slot].si_intpins[pi->pi_lintr.pin - 1]; 1494 1495 /* 1496 * Attempt to allocate an I/O APIC pin for this intpin if one 1497 * is not yet assigned. 1498 */ 1499 if (ii->ii_ioapic_irq == 0) 1500 ii->ii_ioapic_irq = ioapic_pci_alloc_irq(); 1501 assert(ii->ii_ioapic_irq > 0); 1502 1503 /* 1504 * Attempt to allocate a PIRQ pin for this intpin if one is 1505 * not yet assigned. 1506 */ 1507 if (ii->ii_pirq_pin == 0) 1508 ii->ii_pirq_pin = pirq_alloc_pin(pi->pi_vmctx); 1509 assert(ii->ii_pirq_pin > 0); 1510 1511 pi->pi_lintr.ioapic_irq = ii->ii_ioapic_irq; 1512 pi->pi_lintr.pirq_pin = ii->ii_pirq_pin; 1513 pci_set_cfgdata8(pi, PCIR_INTLINE, pirq_irq(ii->ii_pirq_pin)); 1514 } 1515 1516 void 1517 pci_lintr_assert(struct pci_devinst *pi) 1518 { 1519 1520 assert(pi->pi_lintr.pin > 0); 1521 1522 pthread_mutex_lock(&pi->pi_lintr.lock); 1523 if (pi->pi_lintr.state == IDLE) { 1524 if (pci_lintr_permitted(pi)) { 1525 pi->pi_lintr.state = ASSERTED; 1526 pci_irq_assert(pi); 1527 } else 1528 pi->pi_lintr.state = PENDING; 1529 } 1530 pthread_mutex_unlock(&pi->pi_lintr.lock); 1531 } 1532 1533 void 1534 pci_lintr_deassert(struct pci_devinst *pi) 1535 { 1536 1537 assert(pi->pi_lintr.pin > 0); 1538 1539 pthread_mutex_lock(&pi->pi_lintr.lock); 1540 if (pi->pi_lintr.state == ASSERTED) { 1541 pi->pi_lintr.state = IDLE; 1542 pci_irq_deassert(pi); 1543 } else if (pi->pi_lintr.state == PENDING) 1544 pi->pi_lintr.state = IDLE; 1545 pthread_mutex_unlock(&pi->pi_lintr.lock); 1546 } 1547 1548 static void 1549 pci_lintr_update(struct pci_devinst *pi) 1550 { 1551 1552 pthread_mutex_lock(&pi->pi_lintr.lock); 1553 if (pi->pi_lintr.state == ASSERTED && !pci_lintr_permitted(pi)) { 1554 pci_irq_deassert(pi); 1555 pi->pi_lintr.state = PENDING; 1556 } else if (pi->pi_lintr.state == PENDING && pci_lintr_permitted(pi)) { 1557 pi->pi_lintr.state = ASSERTED; 1558 pci_irq_assert(pi); 1559 } 1560 pthread_mutex_unlock(&pi->pi_lintr.lock); 1561 } 1562 1563 int 1564 pci_count_lintr(int bus) 1565 { 1566 int count, slot, pin; 1567 struct slotinfo *slotinfo; 1568 1569 count = 0; 1570 if (pci_businfo[bus] != NULL) { 1571 for (slot = 0; slot < MAXSLOTS; slot++) { 1572 slotinfo = &pci_businfo[bus]->slotinfo[slot]; 1573 for (pin = 0; pin < 4; pin++) { 1574 if (slotinfo->si_intpins[pin].ii_count != 0) 1575 count++; 1576 } 1577 } 1578 } 1579 return (count); 1580 } 1581 1582 void 1583 pci_walk_lintr(int bus, pci_lintr_cb cb, void *arg) 1584 { 1585 struct businfo *bi; 1586 struct slotinfo *si; 1587 struct intxinfo *ii; 1588 int slot, pin; 1589 1590 if ((bi = pci_businfo[bus]) == NULL) 1591 return; 1592 1593 for (slot = 0; slot < MAXSLOTS; slot++) { 1594 si = &bi->slotinfo[slot]; 1595 for (pin = 0; pin < 4; pin++) { 1596 ii = &si->si_intpins[pin]; 1597 if (ii->ii_count != 0) 1598 cb(bus, slot, pin + 1, ii->ii_pirq_pin, 1599 ii->ii_ioapic_irq, arg); 1600 } 1601 } 1602 } 1603 1604 /* 1605 * Return 1 if the emulated device in 'slot' is a multi-function device. 1606 * Return 0 otherwise. 1607 */ 1608 static int 1609 pci_emul_is_mfdev(int bus, int slot) 1610 { 1611 struct businfo *bi; 1612 struct slotinfo *si; 1613 int f, numfuncs; 1614 1615 numfuncs = 0; 1616 if ((bi = pci_businfo[bus]) != NULL) { 1617 si = &bi->slotinfo[slot]; 1618 for (f = 0; f < MAXFUNCS; f++) { 1619 if (si->si_funcs[f].fi_devi != NULL) { 1620 numfuncs++; 1621 } 1622 } 1623 } 1624 return (numfuncs > 1); 1625 } 1626 1627 /* 1628 * Ensure that the PCIM_MFDEV bit is properly set (or unset) depending on 1629 * whether or not is a multi-function being emulated in the pci 'slot'. 1630 */ 1631 static void 1632 pci_emul_hdrtype_fixup(int bus, int slot, int off, int bytes, uint32_t *rv) 1633 { 1634 int mfdev; 1635 1636 if (off <= PCIR_HDRTYPE && off + bytes > PCIR_HDRTYPE) { 1637 mfdev = pci_emul_is_mfdev(bus, slot); 1638 switch (bytes) { 1639 case 1: 1640 case 2: 1641 *rv &= ~PCIM_MFDEV; 1642 if (mfdev) { 1643 *rv |= PCIM_MFDEV; 1644 } 1645 break; 1646 case 4: 1647 *rv &= ~(PCIM_MFDEV << 16); 1648 if (mfdev) { 1649 *rv |= (PCIM_MFDEV << 16); 1650 } 1651 break; 1652 } 1653 } 1654 } 1655 1656 static uint32_t 1657 bits_changed(uint32_t old, uint32_t new, uint32_t mask) 1658 { 1659 1660 return ((old ^ new) & mask); 1661 } 1662 1663 static void 1664 pci_emul_cmdwrite(struct pci_devinst *pi, uint32_t new, int bytes) 1665 { 1666 int i; 1667 uint16_t old; 1668 1669 /* 1670 * The command register is at an offset of 4 bytes and thus the 1671 * guest could write 1, 2 or 4 bytes starting at this offset. 1672 */ 1673 1674 old = pci_get_cfgdata16(pi, PCIR_COMMAND); /* stash old value */ 1675 CFGWRITE(pi, PCIR_COMMAND, new, bytes); /* update config */ 1676 new = pci_get_cfgdata16(pi, PCIR_COMMAND); /* get updated value */ 1677 1678 /* 1679 * If the MMIO or I/O address space decoding has changed then 1680 * register/unregister all BARs that decode that address space. 1681 */ 1682 for (i = 0; i <= PCI_BARMAX; i++) { 1683 switch (pi->pi_bar[i].type) { 1684 case PCIBAR_NONE: 1685 case PCIBAR_MEMHI64: 1686 break; 1687 case PCIBAR_IO: 1688 /* I/O address space decoding changed? */ 1689 if (bits_changed(old, new, PCIM_CMD_PORTEN)) { 1690 if (porten(pi)) 1691 register_bar(pi, i); 1692 else 1693 unregister_bar(pi, i); 1694 } 1695 break; 1696 case PCIBAR_MEM32: 1697 case PCIBAR_MEM64: 1698 /* MMIO address space decoding changed? */ 1699 if (bits_changed(old, new, PCIM_CMD_MEMEN)) { 1700 if (memen(pi)) 1701 register_bar(pi, i); 1702 else 1703 unregister_bar(pi, i); 1704 } 1705 break; 1706 default: 1707 assert(0); 1708 } 1709 } 1710 1711 /* 1712 * If INTx has been unmasked and is pending, assert the 1713 * interrupt. 1714 */ 1715 pci_lintr_update(pi); 1716 } 1717 1718 static void 1719 pci_cfgrw(struct vmctx *ctx, int vcpu, int in, int bus, int slot, int func, 1720 int coff, int bytes, uint32_t *eax) 1721 { 1722 struct businfo *bi; 1723 struct slotinfo *si; 1724 struct pci_devinst *pi; 1725 struct pci_devemu *pe; 1726 int idx, needcfg; 1727 uint64_t addr, bar, mask; 1728 1729 if ((bi = pci_businfo[bus]) != NULL) { 1730 si = &bi->slotinfo[slot]; 1731 pi = si->si_funcs[func].fi_devi; 1732 } else 1733 pi = NULL; 1734 1735 /* 1736 * Just return if there is no device at this slot:func or if the 1737 * the guest is doing an un-aligned access. 1738 */ 1739 if (pi == NULL || (bytes != 1 && bytes != 2 && bytes != 4) || 1740 (coff & (bytes - 1)) != 0) { 1741 if (in) 1742 *eax = 0xffffffff; 1743 return; 1744 } 1745 1746 /* 1747 * Ignore all writes beyond the standard config space and return all 1748 * ones on reads. 1749 */ 1750 if (coff >= PCI_REGMAX + 1) { 1751 if (in) { 1752 *eax = 0xffffffff; 1753 /* 1754 * Extended capabilities begin at offset 256 in config 1755 * space. Absence of extended capabilities is signaled 1756 * with all 0s in the extended capability header at 1757 * offset 256. 1758 */ 1759 if (coff <= PCI_REGMAX + 4) 1760 *eax = 0x00000000; 1761 } 1762 return; 1763 } 1764 1765 pe = pi->pi_d; 1766 1767 /* 1768 * Config read 1769 */ 1770 if (in) { 1771 /* Let the device emulation override the default handler */ 1772 if (pe->pe_cfgread != NULL) { 1773 needcfg = pe->pe_cfgread(ctx, vcpu, pi, coff, bytes, 1774 eax); 1775 } else { 1776 needcfg = 1; 1777 } 1778 1779 if (needcfg) { 1780 if (bytes == 1) 1781 *eax = pci_get_cfgdata8(pi, coff); 1782 else if (bytes == 2) 1783 *eax = pci_get_cfgdata16(pi, coff); 1784 else 1785 *eax = pci_get_cfgdata32(pi, coff); 1786 } 1787 1788 pci_emul_hdrtype_fixup(bus, slot, coff, bytes, eax); 1789 } else { 1790 /* Let the device emulation override the default handler */ 1791 if (pe->pe_cfgwrite != NULL && 1792 (*pe->pe_cfgwrite)(ctx, vcpu, pi, coff, bytes, *eax) == 0) 1793 return; 1794 1795 /* 1796 * Special handling for write to BAR registers 1797 */ 1798 if (coff >= PCIR_BAR(0) && coff < PCIR_BAR(PCI_BARMAX + 1)) { 1799 /* 1800 * Ignore writes to BAR registers that are not 1801 * 4-byte aligned. 1802 */ 1803 if (bytes != 4 || (coff & 0x3) != 0) 1804 return; 1805 idx = (coff - PCIR_BAR(0)) / 4; 1806 mask = ~(pi->pi_bar[idx].size - 1); 1807 switch (pi->pi_bar[idx].type) { 1808 case PCIBAR_NONE: 1809 pi->pi_bar[idx].addr = bar = 0; 1810 break; 1811 case PCIBAR_IO: 1812 addr = *eax & mask; 1813 addr &= 0xffff; 1814 bar = addr | PCIM_BAR_IO_SPACE; 1815 /* 1816 * Register the new BAR value for interception 1817 */ 1818 if (addr != pi->pi_bar[idx].addr) { 1819 update_bar_address(pi, addr, idx, 1820 PCIBAR_IO); 1821 } 1822 break; 1823 case PCIBAR_MEM32: 1824 addr = bar = *eax & mask; 1825 bar |= PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_32; 1826 if (addr != pi->pi_bar[idx].addr) { 1827 update_bar_address(pi, addr, idx, 1828 PCIBAR_MEM32); 1829 } 1830 break; 1831 case PCIBAR_MEM64: 1832 addr = bar = *eax & mask; 1833 bar |= PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_64 | 1834 PCIM_BAR_MEM_PREFETCH; 1835 if (addr != (uint32_t)pi->pi_bar[idx].addr) { 1836 update_bar_address(pi, addr, idx, 1837 PCIBAR_MEM64); 1838 } 1839 break; 1840 case PCIBAR_MEMHI64: 1841 mask = ~(pi->pi_bar[idx - 1].size - 1); 1842 addr = ((uint64_t)*eax << 32) & mask; 1843 bar = addr >> 32; 1844 if (bar != pi->pi_bar[idx - 1].addr >> 32) { 1845 update_bar_address(pi, addr, idx - 1, 1846 PCIBAR_MEMHI64); 1847 } 1848 break; 1849 default: 1850 assert(0); 1851 } 1852 pci_set_cfgdata32(pi, coff, bar); 1853 1854 } else if (pci_emul_iscap(pi, coff)) { 1855 pci_emul_capwrite(pi, coff, bytes, *eax); 1856 } else if (coff == PCIR_COMMAND) { 1857 pci_emul_cmdwrite(pi, *eax, bytes); 1858 } else { 1859 CFGWRITE(pi, coff, *eax, bytes); 1860 } 1861 } 1862 } 1863 1864 static int cfgenable, cfgbus, cfgslot, cfgfunc, cfgoff; 1865 1866 static int 1867 pci_emul_cfgaddr(struct vmctx *ctx, int vcpu, int in, int port, int bytes, 1868 uint32_t *eax, void *arg) 1869 { 1870 uint32_t x; 1871 1872 if (bytes != 4) { 1873 if (in) 1874 *eax = (bytes == 2) ? 0xffff : 0xff; 1875 return (0); 1876 } 1877 1878 if (in) { 1879 x = (cfgbus << 16) | (cfgslot << 11) | (cfgfunc << 8) | cfgoff; 1880 if (cfgenable) 1881 x |= CONF1_ENABLE; 1882 *eax = x; 1883 } else { 1884 x = *eax; 1885 cfgenable = (x & CONF1_ENABLE) == CONF1_ENABLE; 1886 cfgoff = x & PCI_REGMAX; 1887 cfgfunc = (x >> 8) & PCI_FUNCMAX; 1888 cfgslot = (x >> 11) & PCI_SLOTMAX; 1889 cfgbus = (x >> 16) & PCI_BUSMAX; 1890 } 1891 1892 return (0); 1893 } 1894 INOUT_PORT(pci_cfgaddr, CONF1_ADDR_PORT, IOPORT_F_INOUT, pci_emul_cfgaddr); 1895 1896 static int 1897 pci_emul_cfgdata(struct vmctx *ctx, int vcpu, int in, int port, int bytes, 1898 uint32_t *eax, void *arg) 1899 { 1900 int coff; 1901 1902 assert(bytes == 1 || bytes == 2 || bytes == 4); 1903 1904 coff = cfgoff + (port - CONF1_DATA_PORT); 1905 if (cfgenable) { 1906 pci_cfgrw(ctx, vcpu, in, cfgbus, cfgslot, cfgfunc, coff, bytes, 1907 eax); 1908 } else { 1909 /* Ignore accesses to cfgdata if not enabled by cfgaddr */ 1910 if (in) 1911 *eax = 0xffffffff; 1912 } 1913 return (0); 1914 } 1915 1916 INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+0, IOPORT_F_INOUT, pci_emul_cfgdata); 1917 INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+1, IOPORT_F_INOUT, pci_emul_cfgdata); 1918 INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+2, IOPORT_F_INOUT, pci_emul_cfgdata); 1919 INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+3, IOPORT_F_INOUT, pci_emul_cfgdata); 1920 1921 #define PCI_EMUL_TEST 1922 #ifdef PCI_EMUL_TEST 1923 /* 1924 * Define a dummy test device 1925 */ 1926 #define DIOSZ 8 1927 #define DMEMSZ 4096 1928 struct pci_emul_dsoftc { 1929 uint8_t ioregs[DIOSZ]; 1930 uint8_t memregs[DMEMSZ]; 1931 }; 1932 1933 #define PCI_EMUL_MSI_MSGS 4 1934 #define PCI_EMUL_MSIX_MSGS 16 1935 1936 static int 1937 pci_emul_dinit(struct vmctx *ctx, struct pci_devinst *pi, char *opts) 1938 { 1939 int error; 1940 struct pci_emul_dsoftc *sc; 1941 1942 sc = calloc(1, sizeof(struct pci_emul_dsoftc)); 1943 1944 pi->pi_arg = sc; 1945 1946 pci_set_cfgdata16(pi, PCIR_DEVICE, 0x0001); 1947 pci_set_cfgdata16(pi, PCIR_VENDOR, 0x10DD); 1948 pci_set_cfgdata8(pi, PCIR_CLASS, 0x02); 1949 1950 error = pci_emul_add_msicap(pi, PCI_EMUL_MSI_MSGS); 1951 assert(error == 0); 1952 1953 error = pci_emul_alloc_bar(pi, 0, PCIBAR_IO, DIOSZ); 1954 assert(error == 0); 1955 1956 error = pci_emul_alloc_bar(pi, 1, PCIBAR_MEM32, DMEMSZ); 1957 assert(error == 0); 1958 1959 return (0); 1960 } 1961 1962 static void 1963 pci_emul_diow(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx, 1964 uint64_t offset, int size, uint64_t value) 1965 { 1966 int i; 1967 struct pci_emul_dsoftc *sc = pi->pi_arg; 1968 1969 if (baridx == 0) { 1970 if (offset + size > DIOSZ) { 1971 printf("diow: iow too large, offset %ld size %d\n", 1972 offset, size); 1973 return; 1974 } 1975 1976 if (size == 1) { 1977 sc->ioregs[offset] = value & 0xff; 1978 } else if (size == 2) { 1979 *(uint16_t *)&sc->ioregs[offset] = value & 0xffff; 1980 } else if (size == 4) { 1981 *(uint32_t *)&sc->ioregs[offset] = value; 1982 } else { 1983 printf("diow: iow unknown size %d\n", size); 1984 } 1985 1986 /* 1987 * Special magic value to generate an interrupt 1988 */ 1989 if (offset == 4 && size == 4 && pci_msi_enabled(pi)) 1990 pci_generate_msi(pi, value % pci_msi_maxmsgnum(pi)); 1991 1992 if (value == 0xabcdef) { 1993 for (i = 0; i < pci_msi_maxmsgnum(pi); i++) 1994 pci_generate_msi(pi, i); 1995 } 1996 } 1997 1998 if (baridx == 1) { 1999 if (offset + size > DMEMSZ) { 2000 printf("diow: memw too large, offset %ld size %d\n", 2001 offset, size); 2002 return; 2003 } 2004 2005 if (size == 1) { 2006 sc->memregs[offset] = value; 2007 } else if (size == 2) { 2008 *(uint16_t *)&sc->memregs[offset] = value; 2009 } else if (size == 4) { 2010 *(uint32_t *)&sc->memregs[offset] = value; 2011 } else if (size == 8) { 2012 *(uint64_t *)&sc->memregs[offset] = value; 2013 } else { 2014 printf("diow: memw unknown size %d\n", size); 2015 } 2016 2017 /* 2018 * magic interrupt ?? 2019 */ 2020 } 2021 2022 if (baridx > 1) { 2023 printf("diow: unknown bar idx %d\n", baridx); 2024 } 2025 } 2026 2027 static uint64_t 2028 pci_emul_dior(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx, 2029 uint64_t offset, int size) 2030 { 2031 struct pci_emul_dsoftc *sc = pi->pi_arg; 2032 uint32_t value; 2033 2034 if (baridx == 0) { 2035 if (offset + size > DIOSZ) { 2036 printf("dior: ior too large, offset %ld size %d\n", 2037 offset, size); 2038 return (0); 2039 } 2040 2041 if (size == 1) { 2042 value = sc->ioregs[offset]; 2043 } else if (size == 2) { 2044 value = *(uint16_t *) &sc->ioregs[offset]; 2045 } else if (size == 4) { 2046 value = *(uint32_t *) &sc->ioregs[offset]; 2047 } else { 2048 printf("dior: ior unknown size %d\n", size); 2049 } 2050 } 2051 2052 if (baridx == 1) { 2053 if (offset + size > DMEMSZ) { 2054 printf("dior: memr too large, offset %ld size %d\n", 2055 offset, size); 2056 return (0); 2057 } 2058 2059 if (size == 1) { 2060 value = sc->memregs[offset]; 2061 } else if (size == 2) { 2062 value = *(uint16_t *) &sc->memregs[offset]; 2063 } else if (size == 4) { 2064 value = *(uint32_t *) &sc->memregs[offset]; 2065 } else if (size == 8) { 2066 value = *(uint64_t *) &sc->memregs[offset]; 2067 } else { 2068 printf("dior: ior unknown size %d\n", size); 2069 } 2070 } 2071 2072 2073 if (baridx > 1) { 2074 printf("dior: unknown bar idx %d\n", baridx); 2075 return (0); 2076 } 2077 2078 return (value); 2079 } 2080 2081 struct pci_devemu pci_dummy = { 2082 .pe_emu = "dummy", 2083 .pe_init = pci_emul_dinit, 2084 .pe_barwrite = pci_emul_diow, 2085 .pe_barread = pci_emul_dior 2086 }; 2087 PCI_EMUL_SET(pci_dummy); 2088 2089 #endif /* PCI_EMUL_TEST */ 2090