1 /*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/linker_set.h> 34 #include <sys/errno.h> 35 36 #include <ctype.h> 37 #include <stdio.h> 38 #include <stdlib.h> 39 #include <string.h> 40 #include <strings.h> 41 #include <assert.h> 42 #include <stdbool.h> 43 44 #include <machine/vmm.h> 45 #include <vmmapi.h> 46 47 #include "bhyverun.h" 48 #include "inout.h" 49 #include "mem.h" 50 #include "mptbl.h" 51 #include "pci_emul.h" 52 #include "ioapic.h" 53 54 #define CONF1_ADDR_PORT 0x0cf8 55 #define CONF1_DATA_PORT 0x0cfc 56 57 #define CFGWRITE(pi,off,val,b) \ 58 do { \ 59 if ((b) == 1) { \ 60 pci_set_cfgdata8((pi),(off),(val)); \ 61 } else if ((b) == 2) { \ 62 pci_set_cfgdata16((pi),(off),(val)); \ 63 } else { \ 64 pci_set_cfgdata32((pi),(off),(val)); \ 65 } \ 66 } while (0) 67 68 #define MAXSLOTS (PCI_SLOTMAX + 1) 69 #define MAXFUNCS (PCI_FUNCMAX + 1) 70 71 static struct slotinfo { 72 char *si_name; 73 char *si_param; 74 struct pci_devinst *si_devi; 75 int si_legacy; 76 } pci_slotinfo[MAXSLOTS][MAXFUNCS]; 77 78 /* 79 * Used to keep track of legacy interrupt owners/requestors 80 */ 81 #define NLIRQ 16 82 83 static struct lirqinfo { 84 int li_generic; 85 int li_acount; 86 struct pci_devinst *li_owner; /* XXX should be a list */ 87 } lirq[NLIRQ]; 88 89 SET_DECLARE(pci_devemu_set, struct pci_devemu); 90 91 static uint64_t pci_emul_iobase; 92 static uint64_t pci_emul_membase32; 93 static uint64_t pci_emul_membase64; 94 95 #define PCI_EMUL_IOBASE 0x2000 96 #define PCI_EMUL_IOLIMIT 0x10000 97 98 #define PCI_EMUL_MEMLIMIT32 0xE0000000 /* 3.5GB */ 99 100 #define PCI_EMUL_MEMBASE64 0xD000000000UL 101 #define PCI_EMUL_MEMLIMIT64 0xFD00000000UL 102 103 static int pci_emul_devices; 104 105 /* 106 * I/O access 107 */ 108 109 /* 110 * Slot options are in the form: 111 * 112 * <slot>[:<func>],<emul>[,<config>] 113 * 114 * slot is 0..31 115 * func is 0..7 116 * emul is a string describing the type of PCI device e.g. virtio-net 117 * config is an optional string, depending on the device, that can be 118 * used for configuration. 119 * Examples are: 120 * 1,virtio-net,tap0 121 * 3:0,dummy 122 */ 123 static void 124 pci_parse_slot_usage(char *aopt) 125 { 126 printf("Invalid PCI slot info field \"%s\"\n", aopt); 127 free(aopt); 128 } 129 130 void 131 pci_parse_slot(char *opt, int legacy) 132 { 133 char *slot, *func, *emul, *config; 134 char *str, *cpy; 135 int snum, fnum; 136 137 str = cpy = strdup(opt); 138 139 config = NULL; 140 141 if (strchr(str, ':') != NULL) { 142 slot = strsep(&str, ":"); 143 func = strsep(&str, ","); 144 } else { 145 slot = strsep(&str, ","); 146 func = NULL; 147 } 148 149 emul = strsep(&str, ","); 150 if (str != NULL) { 151 config = strsep(&str, ","); 152 } 153 154 if (emul == NULL) { 155 pci_parse_slot_usage(cpy); 156 return; 157 } 158 159 snum = atoi(slot); 160 fnum = func ? atoi(func) : 0; 161 if (snum < 0 || snum >= MAXSLOTS || fnum < 0 || fnum >= MAXFUNCS) { 162 pci_parse_slot_usage(cpy); 163 } else { 164 pci_slotinfo[snum][fnum].si_name = emul; 165 pci_slotinfo[snum][fnum].si_param = config; 166 pci_slotinfo[snum][fnum].si_legacy = legacy; 167 } 168 } 169 170 static int 171 pci_valid_pba_offset(struct pci_devinst *pi, uint64_t offset) 172 { 173 174 if (offset < pi->pi_msix.pba_offset) 175 return (0); 176 177 if (offset >= pi->pi_msix.pba_offset + pi->pi_msix.pba_size) { 178 return (0); 179 } 180 181 return (1); 182 } 183 184 int 185 pci_emul_msix_twrite(struct pci_devinst *pi, uint64_t offset, int size, 186 uint64_t value) 187 { 188 int msix_entry_offset; 189 int tab_index; 190 char *dest; 191 192 /* support only 4 or 8 byte writes */ 193 if (size != 4 && size != 8) 194 return (-1); 195 196 /* 197 * Return if table index is beyond what device supports 198 */ 199 tab_index = offset / MSIX_TABLE_ENTRY_SIZE; 200 if (tab_index >= pi->pi_msix.table_count) 201 return (-1); 202 203 msix_entry_offset = offset % MSIX_TABLE_ENTRY_SIZE; 204 205 /* support only aligned writes */ 206 if ((msix_entry_offset % size) != 0) 207 return (-1); 208 209 dest = (char *)(pi->pi_msix.table + tab_index); 210 dest += msix_entry_offset; 211 212 if (size == 4) 213 *((uint32_t *)dest) = value; 214 else 215 *((uint64_t *)dest) = value; 216 217 return (0); 218 } 219 220 uint64_t 221 pci_emul_msix_tread(struct pci_devinst *pi, uint64_t offset, int size) 222 { 223 char *dest; 224 int msix_entry_offset; 225 int tab_index; 226 uint64_t retval = ~0; 227 228 /* support only 4 or 8 byte reads */ 229 if (size != 4 && size != 8) 230 return (retval); 231 232 msix_entry_offset = offset % MSIX_TABLE_ENTRY_SIZE; 233 234 /* support only aligned reads */ 235 if ((msix_entry_offset % size) != 0) { 236 return (retval); 237 } 238 239 tab_index = offset / MSIX_TABLE_ENTRY_SIZE; 240 241 if (tab_index < pi->pi_msix.table_count) { 242 /* valid MSI-X Table access */ 243 dest = (char *)(pi->pi_msix.table + tab_index); 244 dest += msix_entry_offset; 245 246 if (size == 4) 247 retval = *((uint32_t *)dest); 248 else 249 retval = *((uint64_t *)dest); 250 } else if (pci_valid_pba_offset(pi, offset)) { 251 /* return 0 for PBA access */ 252 retval = 0; 253 } 254 255 return (retval); 256 } 257 258 int 259 pci_msix_table_bar(struct pci_devinst *pi) 260 { 261 262 if (pi->pi_msix.table != NULL) 263 return (pi->pi_msix.table_bar); 264 else 265 return (-1); 266 } 267 268 int 269 pci_msix_pba_bar(struct pci_devinst *pi) 270 { 271 272 if (pi->pi_msix.table != NULL) 273 return (pi->pi_msix.pba_bar); 274 else 275 return (-1); 276 } 277 278 static int 279 pci_emul_io_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes, 280 uint32_t *eax, void *arg) 281 { 282 struct pci_devinst *pdi = arg; 283 struct pci_devemu *pe = pdi->pi_d; 284 uint64_t offset; 285 int i; 286 287 for (i = 0; i <= PCI_BARMAX; i++) { 288 if (pdi->pi_bar[i].type == PCIBAR_IO && 289 port >= pdi->pi_bar[i].addr && 290 port + bytes <= pdi->pi_bar[i].addr + pdi->pi_bar[i].size) { 291 offset = port - pdi->pi_bar[i].addr; 292 if (in) 293 *eax = (*pe->pe_barread)(ctx, vcpu, pdi, i, 294 offset, bytes); 295 else 296 (*pe->pe_barwrite)(ctx, vcpu, pdi, i, offset, 297 bytes, *eax); 298 return (0); 299 } 300 } 301 return (-1); 302 } 303 304 static int 305 pci_emul_mem_handler(struct vmctx *ctx, int vcpu, int dir, uint64_t addr, 306 int size, uint64_t *val, void *arg1, long arg2) 307 { 308 struct pci_devinst *pdi = arg1; 309 struct pci_devemu *pe = pdi->pi_d; 310 uint64_t offset; 311 int bidx = (int) arg2; 312 313 assert(bidx <= PCI_BARMAX); 314 assert(pdi->pi_bar[bidx].type == PCIBAR_MEM32 || 315 pdi->pi_bar[bidx].type == PCIBAR_MEM64); 316 assert(addr >= pdi->pi_bar[bidx].addr && 317 addr + size <= pdi->pi_bar[bidx].addr + pdi->pi_bar[bidx].size); 318 319 offset = addr - pdi->pi_bar[bidx].addr; 320 321 if (dir == MEM_F_WRITE) 322 (*pe->pe_barwrite)(ctx, vcpu, pdi, bidx, offset, size, *val); 323 else 324 *val = (*pe->pe_barread)(ctx, vcpu, pdi, bidx, offset, size); 325 326 return (0); 327 } 328 329 330 static int 331 pci_emul_alloc_resource(uint64_t *baseptr, uint64_t limit, uint64_t size, 332 uint64_t *addr) 333 { 334 uint64_t base; 335 336 assert((size & (size - 1)) == 0); /* must be a power of 2 */ 337 338 base = roundup2(*baseptr, size); 339 340 if (base + size <= limit) { 341 *addr = base; 342 *baseptr = base + size; 343 return (0); 344 } else 345 return (-1); 346 } 347 348 int 349 pci_emul_alloc_bar(struct pci_devinst *pdi, int idx, enum pcibar_type type, 350 uint64_t size) 351 { 352 353 return (pci_emul_alloc_pbar(pdi, idx, 0, type, size)); 354 } 355 356 /* 357 * Register (or unregister) the MMIO or I/O region associated with the BAR 358 * register 'idx' of an emulated pci device. 359 */ 360 static void 361 modify_bar_registration(struct pci_devinst *pi, int idx, int registration) 362 { 363 int error; 364 struct inout_port iop; 365 struct mem_range mr; 366 367 switch (pi->pi_bar[idx].type) { 368 case PCIBAR_IO: 369 bzero(&iop, sizeof(struct inout_port)); 370 iop.name = pi->pi_name; 371 iop.port = pi->pi_bar[idx].addr; 372 iop.size = pi->pi_bar[idx].size; 373 if (registration) { 374 iop.flags = IOPORT_F_INOUT; 375 iop.handler = pci_emul_io_handler; 376 iop.arg = pi; 377 error = register_inout(&iop); 378 } else 379 error = unregister_inout(&iop); 380 break; 381 case PCIBAR_MEM32: 382 case PCIBAR_MEM64: 383 bzero(&mr, sizeof(struct mem_range)); 384 mr.name = pi->pi_name; 385 mr.base = pi->pi_bar[idx].addr; 386 mr.size = pi->pi_bar[idx].size; 387 if (registration) { 388 mr.flags = MEM_F_RW; 389 mr.handler = pci_emul_mem_handler; 390 mr.arg1 = pi; 391 mr.arg2 = idx; 392 error = register_mem(&mr); 393 } else 394 error = unregister_mem(&mr); 395 break; 396 default: 397 error = EINVAL; 398 break; 399 } 400 assert(error == 0); 401 } 402 403 static void 404 unregister_bar(struct pci_devinst *pi, int idx) 405 { 406 407 modify_bar_registration(pi, idx, 0); 408 } 409 410 static void 411 register_bar(struct pci_devinst *pi, int idx) 412 { 413 414 modify_bar_registration(pi, idx, 1); 415 } 416 417 /* Are we decoding i/o port accesses for the emulated pci device? */ 418 static int 419 porten(struct pci_devinst *pi) 420 { 421 uint16_t cmd; 422 423 cmd = pci_get_cfgdata16(pi, PCIR_COMMAND); 424 425 return (cmd & PCIM_CMD_PORTEN); 426 } 427 428 /* Are we decoding memory accesses for the emulated pci device? */ 429 static int 430 memen(struct pci_devinst *pi) 431 { 432 uint16_t cmd; 433 434 cmd = pci_get_cfgdata16(pi, PCIR_COMMAND); 435 436 return (cmd & PCIM_CMD_MEMEN); 437 } 438 439 /* 440 * Update the MMIO or I/O address that is decoded by the BAR register. 441 * 442 * If the pci device has enabled the address space decoding then intercept 443 * the address range decoded by the BAR register. 444 */ 445 static void 446 update_bar_address(struct pci_devinst *pi, uint64_t addr, int idx, int type) 447 { 448 int decode; 449 450 if (pi->pi_bar[idx].type == PCIBAR_IO) 451 decode = porten(pi); 452 else 453 decode = memen(pi); 454 455 if (decode) 456 unregister_bar(pi, idx); 457 458 switch (type) { 459 case PCIBAR_IO: 460 case PCIBAR_MEM32: 461 pi->pi_bar[idx].addr = addr; 462 break; 463 case PCIBAR_MEM64: 464 pi->pi_bar[idx].addr &= ~0xffffffffUL; 465 pi->pi_bar[idx].addr |= addr; 466 break; 467 case PCIBAR_MEMHI64: 468 pi->pi_bar[idx].addr &= 0xffffffff; 469 pi->pi_bar[idx].addr |= addr; 470 break; 471 default: 472 assert(0); 473 } 474 475 if (decode) 476 register_bar(pi, idx); 477 } 478 479 int 480 pci_emul_alloc_pbar(struct pci_devinst *pdi, int idx, uint64_t hostbase, 481 enum pcibar_type type, uint64_t size) 482 { 483 int error; 484 uint64_t *baseptr, limit, addr, mask, lobits, bar; 485 486 assert(idx >= 0 && idx <= PCI_BARMAX); 487 488 if ((size & (size - 1)) != 0) 489 size = 1UL << flsl(size); /* round up to a power of 2 */ 490 491 /* Enforce minimum BAR sizes required by the PCI standard */ 492 if (type == PCIBAR_IO) { 493 if (size < 4) 494 size = 4; 495 } else { 496 if (size < 16) 497 size = 16; 498 } 499 500 switch (type) { 501 case PCIBAR_NONE: 502 baseptr = NULL; 503 addr = mask = lobits = 0; 504 break; 505 case PCIBAR_IO: 506 if (hostbase && 507 pci_slotinfo[pdi->pi_slot][pdi->pi_func].si_legacy) { 508 assert(hostbase < PCI_EMUL_IOBASE); 509 baseptr = &hostbase; 510 } else { 511 baseptr = &pci_emul_iobase; 512 } 513 limit = PCI_EMUL_IOLIMIT; 514 mask = PCIM_BAR_IO_BASE; 515 lobits = PCIM_BAR_IO_SPACE; 516 break; 517 case PCIBAR_MEM64: 518 /* 519 * XXX 520 * Some drivers do not work well if the 64-bit BAR is allocated 521 * above 4GB. Allow for this by allocating small requests under 522 * 4GB unless then allocation size is larger than some arbitrary 523 * number (32MB currently). 524 */ 525 if (size > 32 * 1024 * 1024) { 526 /* 527 * XXX special case for device requiring peer-peer DMA 528 */ 529 if (size == 0x100000000UL) 530 baseptr = &hostbase; 531 else 532 baseptr = &pci_emul_membase64; 533 limit = PCI_EMUL_MEMLIMIT64; 534 mask = PCIM_BAR_MEM_BASE; 535 lobits = PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_64 | 536 PCIM_BAR_MEM_PREFETCH; 537 break; 538 } else { 539 baseptr = &pci_emul_membase32; 540 limit = PCI_EMUL_MEMLIMIT32; 541 mask = PCIM_BAR_MEM_BASE; 542 lobits = PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_64; 543 } 544 break; 545 case PCIBAR_MEM32: 546 baseptr = &pci_emul_membase32; 547 limit = PCI_EMUL_MEMLIMIT32; 548 mask = PCIM_BAR_MEM_BASE; 549 lobits = PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_32; 550 break; 551 default: 552 printf("pci_emul_alloc_base: invalid bar type %d\n", type); 553 assert(0); 554 } 555 556 if (baseptr != NULL) { 557 error = pci_emul_alloc_resource(baseptr, limit, size, &addr); 558 if (error != 0) 559 return (error); 560 } 561 562 pdi->pi_bar[idx].type = type; 563 pdi->pi_bar[idx].addr = addr; 564 pdi->pi_bar[idx].size = size; 565 566 /* Initialize the BAR register in config space */ 567 bar = (addr & mask) | lobits; 568 pci_set_cfgdata32(pdi, PCIR_BAR(idx), bar); 569 570 if (type == PCIBAR_MEM64) { 571 assert(idx + 1 <= PCI_BARMAX); 572 pdi->pi_bar[idx + 1].type = PCIBAR_MEMHI64; 573 pci_set_cfgdata32(pdi, PCIR_BAR(idx + 1), bar >> 32); 574 } 575 576 register_bar(pdi, idx); 577 578 return (0); 579 } 580 581 #define CAP_START_OFFSET 0x40 582 static int 583 pci_emul_add_capability(struct pci_devinst *pi, u_char *capdata, int caplen) 584 { 585 int i, capoff, capid, reallen; 586 uint16_t sts; 587 588 static u_char endofcap[4] = { 589 PCIY_RESERVED, 0, 0, 0 590 }; 591 592 assert(caplen > 0 && capdata[0] != PCIY_RESERVED); 593 594 reallen = roundup2(caplen, 4); /* dword aligned */ 595 596 sts = pci_get_cfgdata16(pi, PCIR_STATUS); 597 if ((sts & PCIM_STATUS_CAPPRESENT) == 0) { 598 capoff = CAP_START_OFFSET; 599 pci_set_cfgdata8(pi, PCIR_CAP_PTR, capoff); 600 pci_set_cfgdata16(pi, PCIR_STATUS, sts|PCIM_STATUS_CAPPRESENT); 601 } else { 602 capoff = pci_get_cfgdata8(pi, PCIR_CAP_PTR); 603 while (1) { 604 assert((capoff & 0x3) == 0); 605 capid = pci_get_cfgdata8(pi, capoff); 606 if (capid == PCIY_RESERVED) 607 break; 608 capoff = pci_get_cfgdata8(pi, capoff + 1); 609 } 610 } 611 612 /* Check if we have enough space */ 613 if (capoff + reallen + sizeof(endofcap) > PCI_REGMAX + 1) 614 return (-1); 615 616 /* Copy the capability */ 617 for (i = 0; i < caplen; i++) 618 pci_set_cfgdata8(pi, capoff + i, capdata[i]); 619 620 /* Set the next capability pointer */ 621 pci_set_cfgdata8(pi, capoff + 1, capoff + reallen); 622 623 /* Copy of the reserved capability which serves as the end marker */ 624 for (i = 0; i < sizeof(endofcap); i++) 625 pci_set_cfgdata8(pi, capoff + reallen + i, endofcap[i]); 626 627 return (0); 628 } 629 630 static struct pci_devemu * 631 pci_emul_finddev(char *name) 632 { 633 struct pci_devemu **pdpp, *pdp; 634 635 SET_FOREACH(pdpp, pci_devemu_set) { 636 pdp = *pdpp; 637 if (!strcmp(pdp->pe_emu, name)) { 638 return (pdp); 639 } 640 } 641 642 return (NULL); 643 } 644 645 static void 646 pci_emul_init(struct vmctx *ctx, struct pci_devemu *pde, int slot, int func, 647 char *params) 648 { 649 struct pci_devinst *pdi; 650 pdi = malloc(sizeof(struct pci_devinst)); 651 bzero(pdi, sizeof(*pdi)); 652 653 pdi->pi_vmctx = ctx; 654 pdi->pi_bus = 0; 655 pdi->pi_slot = slot; 656 pdi->pi_func = func; 657 pdi->pi_d = pde; 658 snprintf(pdi->pi_name, PI_NAMESZ, "%s-pci-%d", pde->pe_emu, slot); 659 660 /* Disable legacy interrupts */ 661 pci_set_cfgdata8(pdi, PCIR_INTLINE, 255); 662 pci_set_cfgdata8(pdi, PCIR_INTPIN, 0); 663 664 pci_set_cfgdata8(pdi, PCIR_COMMAND, 665 PCIM_CMD_PORTEN | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN); 666 667 if ((*pde->pe_init)(ctx, pdi, params) != 0) { 668 free(pdi); 669 } else { 670 pci_emul_devices++; 671 pci_slotinfo[slot][func].si_devi = pdi; 672 } 673 } 674 675 void 676 pci_populate_msicap(struct msicap *msicap, int msgnum, int nextptr) 677 { 678 int mmc; 679 680 CTASSERT(sizeof(struct msicap) == 14); 681 682 /* Number of msi messages must be a power of 2 between 1 and 32 */ 683 assert((msgnum & (msgnum - 1)) == 0 && msgnum >= 1 && msgnum <= 32); 684 mmc = ffs(msgnum) - 1; 685 686 bzero(msicap, sizeof(struct msicap)); 687 msicap->capid = PCIY_MSI; 688 msicap->nextptr = nextptr; 689 msicap->msgctrl = PCIM_MSICTRL_64BIT | (mmc << 1); 690 } 691 692 int 693 pci_emul_add_msicap(struct pci_devinst *pi, int msgnum) 694 { 695 struct msicap msicap; 696 697 pci_populate_msicap(&msicap, msgnum, 0); 698 699 return (pci_emul_add_capability(pi, (u_char *)&msicap, sizeof(msicap))); 700 } 701 702 static void 703 pci_populate_msixcap(struct msixcap *msixcap, int msgnum, int barnum, 704 uint32_t msix_tab_size, int nextptr) 705 { 706 CTASSERT(sizeof(struct msixcap) == 12); 707 708 assert(msix_tab_size % 4096 == 0); 709 710 bzero(msixcap, sizeof(struct msixcap)); 711 msixcap->capid = PCIY_MSIX; 712 msixcap->nextptr = nextptr; 713 714 /* 715 * Message Control Register, all fields set to 716 * zero except for the Table Size. 717 * Note: Table size N is encoded as N-1 718 */ 719 msixcap->msgctrl = msgnum - 1; 720 721 /* 722 * MSI-X BAR setup: 723 * - MSI-X table start at offset 0 724 * - PBA table starts at a 4K aligned offset after the MSI-X table 725 */ 726 msixcap->table_info = barnum & PCIM_MSIX_BIR_MASK; 727 msixcap->pba_info = msix_tab_size | (barnum & PCIM_MSIX_BIR_MASK); 728 } 729 730 static void 731 pci_msix_table_init(struct pci_devinst *pi, int table_entries) 732 { 733 int i, table_size; 734 735 assert(table_entries > 0); 736 assert(table_entries <= MAX_MSIX_TABLE_ENTRIES); 737 738 table_size = table_entries * MSIX_TABLE_ENTRY_SIZE; 739 pi->pi_msix.table = malloc(table_size); 740 bzero(pi->pi_msix.table, table_size); 741 742 /* set mask bit of vector control register */ 743 for (i = 0; i < table_entries; i++) 744 pi->pi_msix.table[i].vector_control |= PCIM_MSIX_VCTRL_MASK; 745 } 746 747 int 748 pci_emul_add_msixcap(struct pci_devinst *pi, int msgnum, int barnum) 749 { 750 uint16_t pba_index; 751 uint32_t tab_size; 752 struct msixcap msixcap; 753 754 assert(msgnum >= 1 && msgnum <= MAX_MSIX_TABLE_ENTRIES); 755 assert(barnum >= 0 && barnum <= PCIR_MAX_BAR_0); 756 757 tab_size = msgnum * MSIX_TABLE_ENTRY_SIZE; 758 759 /* Align table size to nearest 4K */ 760 tab_size = roundup2(tab_size, 4096); 761 762 pi->pi_msix.table_bar = barnum; 763 pi->pi_msix.pba_bar = barnum; 764 pi->pi_msix.table_offset = 0; 765 pi->pi_msix.table_count = msgnum; 766 pi->pi_msix.pba_offset = tab_size; 767 768 /* calculate the MMIO size required for MSI-X PBA */ 769 pba_index = (msgnum - 1) / (PBA_TABLE_ENTRY_SIZE * 8); 770 pi->pi_msix.pba_size = (pba_index + 1) * PBA_TABLE_ENTRY_SIZE; 771 772 pci_msix_table_init(pi, msgnum); 773 774 pci_populate_msixcap(&msixcap, msgnum, barnum, tab_size, 0); 775 776 /* allocate memory for MSI-X Table and PBA */ 777 pci_emul_alloc_bar(pi, barnum, PCIBAR_MEM32, 778 tab_size + pi->pi_msix.pba_size); 779 780 return (pci_emul_add_capability(pi, (u_char *)&msixcap, 781 sizeof(msixcap))); 782 } 783 784 void 785 msixcap_cfgwrite(struct pci_devinst *pi, int capoff, int offset, 786 int bytes, uint32_t val) 787 { 788 uint16_t msgctrl, rwmask; 789 int off, table_bar; 790 791 off = offset - capoff; 792 table_bar = pi->pi_msix.table_bar; 793 /* Message Control Register */ 794 if (off == 2 && bytes == 2) { 795 rwmask = PCIM_MSIXCTRL_MSIX_ENABLE | PCIM_MSIXCTRL_FUNCTION_MASK; 796 msgctrl = pci_get_cfgdata16(pi, offset); 797 msgctrl &= ~rwmask; 798 msgctrl |= val & rwmask; 799 val = msgctrl; 800 801 pi->pi_msix.enabled = val & PCIM_MSIXCTRL_MSIX_ENABLE; 802 pi->pi_msix.function_mask = val & PCIM_MSIXCTRL_FUNCTION_MASK; 803 } 804 805 CFGWRITE(pi, offset, val, bytes); 806 } 807 808 void 809 msicap_cfgwrite(struct pci_devinst *pi, int capoff, int offset, 810 int bytes, uint32_t val) 811 { 812 uint16_t msgctrl, rwmask, msgdata, mme; 813 uint32_t addrlo; 814 815 /* 816 * If guest is writing to the message control register make sure 817 * we do not overwrite read-only fields. 818 */ 819 if ((offset - capoff) == 2 && bytes == 2) { 820 rwmask = PCIM_MSICTRL_MME_MASK | PCIM_MSICTRL_MSI_ENABLE; 821 msgctrl = pci_get_cfgdata16(pi, offset); 822 msgctrl &= ~rwmask; 823 msgctrl |= val & rwmask; 824 val = msgctrl; 825 826 addrlo = pci_get_cfgdata32(pi, capoff + 4); 827 if (msgctrl & PCIM_MSICTRL_64BIT) 828 msgdata = pci_get_cfgdata16(pi, capoff + 12); 829 else 830 msgdata = pci_get_cfgdata16(pi, capoff + 8); 831 832 /* 833 * XXX check delivery mode, destination mode etc 834 */ 835 mme = msgctrl & PCIM_MSICTRL_MME_MASK; 836 pi->pi_msi.enabled = msgctrl & PCIM_MSICTRL_MSI_ENABLE ? 1 : 0; 837 if (pi->pi_msi.enabled) { 838 pi->pi_msi.cpu = (addrlo >> 12) & 0xff; 839 pi->pi_msi.vector = msgdata & 0xff; 840 pi->pi_msi.msgnum = 1 << (mme >> 4); 841 } else { 842 pi->pi_msi.cpu = 0; 843 pi->pi_msi.vector = 0; 844 pi->pi_msi.msgnum = 0; 845 } 846 } 847 848 CFGWRITE(pi, offset, val, bytes); 849 } 850 851 void 852 pciecap_cfgwrite(struct pci_devinst *pi, int capoff, int offset, 853 int bytes, uint32_t val) 854 { 855 856 /* XXX don't write to the readonly parts */ 857 CFGWRITE(pi, offset, val, bytes); 858 } 859 860 #define PCIECAP_VERSION 0x2 861 int 862 pci_emul_add_pciecap(struct pci_devinst *pi, int type) 863 { 864 int err; 865 struct pciecap pciecap; 866 867 CTASSERT(sizeof(struct pciecap) == 60); 868 869 if (type != PCIEM_TYPE_ROOT_PORT) 870 return (-1); 871 872 bzero(&pciecap, sizeof(pciecap)); 873 874 pciecap.capid = PCIY_EXPRESS; 875 pciecap.pcie_capabilities = PCIECAP_VERSION | PCIEM_TYPE_ROOT_PORT; 876 pciecap.link_capabilities = 0x411; /* gen1, x1 */ 877 pciecap.link_status = 0x11; /* gen1, x1 */ 878 879 err = pci_emul_add_capability(pi, (u_char *)&pciecap, sizeof(pciecap)); 880 return (err); 881 } 882 883 /* 884 * This function assumes that 'coff' is in the capabilities region of the 885 * config space. 886 */ 887 static void 888 pci_emul_capwrite(struct pci_devinst *pi, int offset, int bytes, uint32_t val) 889 { 890 int capid; 891 uint8_t capoff, nextoff; 892 893 /* Do not allow un-aligned writes */ 894 if ((offset & (bytes - 1)) != 0) 895 return; 896 897 /* Find the capability that we want to update */ 898 capoff = CAP_START_OFFSET; 899 while (1) { 900 capid = pci_get_cfgdata8(pi, capoff); 901 if (capid == PCIY_RESERVED) 902 break; 903 904 nextoff = pci_get_cfgdata8(pi, capoff + 1); 905 if (offset >= capoff && offset < nextoff) 906 break; 907 908 capoff = nextoff; 909 } 910 assert(offset >= capoff); 911 912 /* 913 * Capability ID and Next Capability Pointer are readonly 914 */ 915 if (offset == capoff || offset == capoff + 1) 916 return; 917 918 switch (capid) { 919 case PCIY_MSI: 920 msicap_cfgwrite(pi, capoff, offset, bytes, val); 921 break; 922 case PCIY_MSIX: 923 msixcap_cfgwrite(pi, capoff, offset, bytes, val); 924 break; 925 case PCIY_EXPRESS: 926 pciecap_cfgwrite(pi, capoff, offset, bytes, val); 927 break; 928 default: 929 break; 930 } 931 } 932 933 static int 934 pci_emul_iscap(struct pci_devinst *pi, int offset) 935 { 936 int found; 937 uint16_t sts; 938 uint8_t capid, lastoff; 939 940 found = 0; 941 sts = pci_get_cfgdata16(pi, PCIR_STATUS); 942 if ((sts & PCIM_STATUS_CAPPRESENT) != 0) { 943 lastoff = pci_get_cfgdata8(pi, PCIR_CAP_PTR); 944 while (1) { 945 assert((lastoff & 0x3) == 0); 946 capid = pci_get_cfgdata8(pi, lastoff); 947 if (capid == PCIY_RESERVED) 948 break; 949 lastoff = pci_get_cfgdata8(pi, lastoff + 1); 950 } 951 if (offset >= CAP_START_OFFSET && offset <= lastoff) 952 found = 1; 953 } 954 return (found); 955 } 956 957 static int 958 pci_emul_fallback_handler(struct vmctx *ctx, int vcpu, int dir, uint64_t addr, 959 int size, uint64_t *val, void *arg1, long arg2) 960 { 961 /* 962 * Ignore writes; return 0xff's for reads. The mem read code 963 * will take care of truncating to the correct size. 964 */ 965 if (dir == MEM_F_READ) { 966 *val = 0xffffffffffffffff; 967 } 968 969 return (0); 970 } 971 972 void 973 init_pci(struct vmctx *ctx) 974 { 975 struct mem_range memp; 976 struct pci_devemu *pde; 977 struct slotinfo *si; 978 size_t lowmem; 979 int slot, func; 980 int error; 981 982 pci_emul_iobase = PCI_EMUL_IOBASE; 983 pci_emul_membase32 = vm_get_lowmem_limit(ctx); 984 pci_emul_membase64 = PCI_EMUL_MEMBASE64; 985 986 for (slot = 0; slot < MAXSLOTS; slot++) { 987 for (func = 0; func < MAXFUNCS; func++) { 988 si = &pci_slotinfo[slot][func]; 989 if (si->si_name != NULL) { 990 pde = pci_emul_finddev(si->si_name); 991 if (pde != NULL) { 992 pci_emul_init(ctx, pde, slot, func, 993 si->si_param); 994 } 995 } 996 } 997 } 998 999 /* 1000 * Allow ISA IRQs 5,10,11,12, and 15 to be available for 1001 * generic use 1002 */ 1003 lirq[5].li_generic = 1; 1004 lirq[10].li_generic = 1; 1005 lirq[11].li_generic = 1; 1006 lirq[12].li_generic = 1; 1007 lirq[15].li_generic = 1; 1008 1009 /* 1010 * The guest physical memory map looks like the following: 1011 * [0, lowmem) guest system memory 1012 * [lowmem, lowmem_limit) memory hole (may be absent) 1013 * [lowmem_limit, 4GB) PCI hole (32-bit BAR allocation) 1014 * [4GB, 4GB + highmem) 1015 * 1016 * Accesses to memory addresses that are not allocated to system 1017 * memory or PCI devices return 0xff's. 1018 */ 1019 error = vm_get_memory_seg(ctx, 0, &lowmem); 1020 assert(error == 0); 1021 1022 memset(&memp, 0, sizeof(struct mem_range)); 1023 memp.name = "PCI hole"; 1024 memp.flags = MEM_F_RW; 1025 memp.base = lowmem; 1026 memp.size = (4ULL * 1024 * 1024 * 1024) - lowmem; 1027 memp.handler = pci_emul_fallback_handler; 1028 1029 error = register_mem_fallback(&memp); 1030 assert(error == 0); 1031 } 1032 1033 int 1034 pci_msi_enabled(struct pci_devinst *pi) 1035 { 1036 return (pi->pi_msi.enabled); 1037 } 1038 1039 int 1040 pci_msi_msgnum(struct pci_devinst *pi) 1041 { 1042 if (pi->pi_msi.enabled) 1043 return (pi->pi_msi.msgnum); 1044 else 1045 return (0); 1046 } 1047 1048 int 1049 pci_msix_enabled(struct pci_devinst *pi) 1050 { 1051 1052 return (pi->pi_msix.enabled && !pi->pi_msi.enabled); 1053 } 1054 1055 void 1056 pci_generate_msix(struct pci_devinst *pi, int index) 1057 { 1058 struct msix_table_entry *mte; 1059 1060 if (!pci_msix_enabled(pi)) 1061 return; 1062 1063 if (pi->pi_msix.function_mask) 1064 return; 1065 1066 if (index >= pi->pi_msix.table_count) 1067 return; 1068 1069 mte = &pi->pi_msix.table[index]; 1070 if ((mte->vector_control & PCIM_MSIX_VCTRL_MASK) == 0) { 1071 /* XXX Set PBA bit if interrupt is disabled */ 1072 vm_lapic_irq(pi->pi_vmctx, 1073 (mte->addr >> 12) & 0xff, mte->msg_data & 0xff); 1074 } 1075 } 1076 1077 void 1078 pci_generate_msi(struct pci_devinst *pi, int msg) 1079 { 1080 1081 if (pci_msi_enabled(pi) && msg < pci_msi_msgnum(pi)) { 1082 vm_lapic_irq(pi->pi_vmctx, 1083 pi->pi_msi.cpu, 1084 pi->pi_msi.vector + msg); 1085 } 1086 } 1087 1088 int 1089 pci_is_legacy(struct pci_devinst *pi) 1090 { 1091 1092 return (pci_slotinfo[pi->pi_slot][pi->pi_func].si_legacy); 1093 } 1094 1095 static int 1096 pci_lintr_alloc(struct pci_devinst *pi, int vec) 1097 { 1098 int i; 1099 1100 assert(vec < NLIRQ); 1101 1102 if (vec == -1) { 1103 for (i = 0; i < NLIRQ; i++) { 1104 if (lirq[i].li_generic && 1105 lirq[i].li_owner == NULL) { 1106 vec = i; 1107 break; 1108 } 1109 } 1110 } else { 1111 if (lirq[vec].li_owner != NULL) { 1112 vec = -1; 1113 } 1114 } 1115 assert(vec != -1); 1116 1117 lirq[vec].li_owner = pi; 1118 pi->pi_lintr_pin = vec; 1119 1120 return (vec); 1121 } 1122 1123 int 1124 pci_lintr_request(struct pci_devinst *pi, int vec) 1125 { 1126 1127 vec = pci_lintr_alloc(pi, vec); 1128 pci_set_cfgdata8(pi, PCIR_INTLINE, vec); 1129 pci_set_cfgdata8(pi, PCIR_INTPIN, 1); 1130 return (0); 1131 } 1132 1133 void 1134 pci_lintr_assert(struct pci_devinst *pi) 1135 { 1136 1137 assert(pi->pi_lintr_pin); 1138 ioapic_assert_pin(pi->pi_vmctx, pi->pi_lintr_pin); 1139 } 1140 1141 void 1142 pci_lintr_deassert(struct pci_devinst *pi) 1143 { 1144 1145 assert(pi->pi_lintr_pin); 1146 ioapic_deassert_pin(pi->pi_vmctx, pi->pi_lintr_pin); 1147 } 1148 1149 /* 1150 * Return 1 if the emulated device in 'slot' is a multi-function device. 1151 * Return 0 otherwise. 1152 */ 1153 static int 1154 pci_emul_is_mfdev(int slot) 1155 { 1156 int f, numfuncs; 1157 1158 numfuncs = 0; 1159 for (f = 0; f < MAXFUNCS; f++) { 1160 if (pci_slotinfo[slot][f].si_devi != NULL) { 1161 numfuncs++; 1162 } 1163 } 1164 return (numfuncs > 1); 1165 } 1166 1167 /* 1168 * Ensure that the PCIM_MFDEV bit is properly set (or unset) depending on 1169 * whether or not is a multi-function being emulated in the pci 'slot'. 1170 */ 1171 static void 1172 pci_emul_hdrtype_fixup(int slot, int off, int bytes, uint32_t *rv) 1173 { 1174 int mfdev; 1175 1176 if (off <= PCIR_HDRTYPE && off + bytes > PCIR_HDRTYPE) { 1177 mfdev = pci_emul_is_mfdev(slot); 1178 switch (bytes) { 1179 case 1: 1180 case 2: 1181 *rv &= ~PCIM_MFDEV; 1182 if (mfdev) { 1183 *rv |= PCIM_MFDEV; 1184 } 1185 break; 1186 case 4: 1187 *rv &= ~(PCIM_MFDEV << 16); 1188 if (mfdev) { 1189 *rv |= (PCIM_MFDEV << 16); 1190 } 1191 break; 1192 } 1193 } 1194 } 1195 1196 static int cfgbus, cfgslot, cfgfunc, cfgoff; 1197 1198 static int 1199 pci_emul_cfgaddr(struct vmctx *ctx, int vcpu, int in, int port, int bytes, 1200 uint32_t *eax, void *arg) 1201 { 1202 uint32_t x; 1203 1204 assert(!in); 1205 1206 if (bytes != 4) 1207 return (-1); 1208 1209 x = *eax; 1210 cfgoff = x & PCI_REGMAX; 1211 cfgfunc = (x >> 8) & PCI_FUNCMAX; 1212 cfgslot = (x >> 11) & PCI_SLOTMAX; 1213 cfgbus = (x >> 16) & PCI_BUSMAX; 1214 1215 return (0); 1216 } 1217 INOUT_PORT(pci_cfgaddr, CONF1_ADDR_PORT, IOPORT_F_OUT, pci_emul_cfgaddr); 1218 1219 static uint32_t 1220 bits_changed(uint32_t old, uint32_t new, uint32_t mask) 1221 { 1222 1223 return ((old ^ new) & mask); 1224 } 1225 1226 static void 1227 pci_emul_cmdwrite(struct pci_devinst *pi, uint32_t new, int bytes) 1228 { 1229 int i; 1230 uint16_t old; 1231 1232 /* 1233 * The command register is at an offset of 4 bytes and thus the 1234 * guest could write 1, 2 or 4 bytes starting at this offset. 1235 */ 1236 1237 old = pci_get_cfgdata16(pi, PCIR_COMMAND); /* stash old value */ 1238 CFGWRITE(pi, PCIR_COMMAND, new, bytes); /* update config */ 1239 new = pci_get_cfgdata16(pi, PCIR_COMMAND); /* get updated value */ 1240 1241 /* 1242 * If the MMIO or I/O address space decoding has changed then 1243 * register/unregister all BARs that decode that address space. 1244 */ 1245 for (i = 0; i < PCI_BARMAX; i++) { 1246 switch (pi->pi_bar[i].type) { 1247 case PCIBAR_NONE: 1248 case PCIBAR_MEMHI64: 1249 break; 1250 case PCIBAR_IO: 1251 /* I/O address space decoding changed? */ 1252 if (bits_changed(old, new, PCIM_CMD_PORTEN)) { 1253 if (porten(pi)) 1254 register_bar(pi, i); 1255 else 1256 unregister_bar(pi, i); 1257 } 1258 break; 1259 case PCIBAR_MEM32: 1260 case PCIBAR_MEM64: 1261 /* MMIO address space decoding changed? */ 1262 if (bits_changed(old, new, PCIM_CMD_MEMEN)) { 1263 if (memen(pi)) 1264 register_bar(pi, i); 1265 else 1266 unregister_bar(pi, i); 1267 } 1268 break; 1269 default: 1270 assert(0); 1271 } 1272 } 1273 } 1274 1275 static int 1276 pci_emul_cfgdata(struct vmctx *ctx, int vcpu, int in, int port, int bytes, 1277 uint32_t *eax, void *arg) 1278 { 1279 struct pci_devinst *pi; 1280 struct pci_devemu *pe; 1281 int coff, idx, needcfg; 1282 uint64_t addr, bar, mask; 1283 1284 assert(bytes == 1 || bytes == 2 || bytes == 4); 1285 1286 if (cfgbus == 0) 1287 pi = pci_slotinfo[cfgslot][cfgfunc].si_devi; 1288 else 1289 pi = NULL; 1290 1291 coff = cfgoff + (port - CONF1_DATA_PORT); 1292 1293 #if 0 1294 printf("pcicfg-%s from 0x%0x of %d bytes (%d/%d/%d)\n\r", 1295 in ? "read" : "write", coff, bytes, cfgbus, cfgslot, cfgfunc); 1296 #endif 1297 1298 /* 1299 * Just return if there is no device at this cfgslot:cfgfunc or 1300 * if the guest is doing an un-aligned access 1301 */ 1302 if (pi == NULL || (coff & (bytes - 1)) != 0) { 1303 if (in) 1304 *eax = 0xffffffff; 1305 return (0); 1306 } 1307 1308 pe = pi->pi_d; 1309 1310 /* 1311 * Config read 1312 */ 1313 if (in) { 1314 /* Let the device emulation override the default handler */ 1315 if (pe->pe_cfgread != NULL) { 1316 needcfg = pe->pe_cfgread(ctx, vcpu, pi, 1317 coff, bytes, eax); 1318 } else { 1319 needcfg = 1; 1320 } 1321 1322 if (needcfg) { 1323 if (bytes == 1) 1324 *eax = pci_get_cfgdata8(pi, coff); 1325 else if (bytes == 2) 1326 *eax = pci_get_cfgdata16(pi, coff); 1327 else 1328 *eax = pci_get_cfgdata32(pi, coff); 1329 } 1330 1331 pci_emul_hdrtype_fixup(cfgslot, coff, bytes, eax); 1332 } else { 1333 /* Let the device emulation override the default handler */ 1334 if (pe->pe_cfgwrite != NULL && 1335 (*pe->pe_cfgwrite)(ctx, vcpu, pi, coff, bytes, *eax) == 0) 1336 return (0); 1337 1338 /* 1339 * Special handling for write to BAR registers 1340 */ 1341 if (coff >= PCIR_BAR(0) && coff < PCIR_BAR(PCI_BARMAX + 1)) { 1342 /* 1343 * Ignore writes to BAR registers that are not 1344 * 4-byte aligned. 1345 */ 1346 if (bytes != 4 || (coff & 0x3) != 0) 1347 return (0); 1348 idx = (coff - PCIR_BAR(0)) / 4; 1349 mask = ~(pi->pi_bar[idx].size - 1); 1350 switch (pi->pi_bar[idx].type) { 1351 case PCIBAR_NONE: 1352 pi->pi_bar[idx].addr = bar = 0; 1353 break; 1354 case PCIBAR_IO: 1355 addr = *eax & mask; 1356 addr &= 0xffff; 1357 bar = addr | PCIM_BAR_IO_SPACE; 1358 /* 1359 * Register the new BAR value for interception 1360 */ 1361 if (addr != pi->pi_bar[idx].addr) { 1362 update_bar_address(pi, addr, idx, 1363 PCIBAR_IO); 1364 } 1365 break; 1366 case PCIBAR_MEM32: 1367 addr = bar = *eax & mask; 1368 bar |= PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_32; 1369 if (addr != pi->pi_bar[idx].addr) { 1370 update_bar_address(pi, addr, idx, 1371 PCIBAR_MEM32); 1372 } 1373 break; 1374 case PCIBAR_MEM64: 1375 addr = bar = *eax & mask; 1376 bar |= PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_64 | 1377 PCIM_BAR_MEM_PREFETCH; 1378 if (addr != (uint32_t)pi->pi_bar[idx].addr) { 1379 update_bar_address(pi, addr, idx, 1380 PCIBAR_MEM64); 1381 } 1382 break; 1383 case PCIBAR_MEMHI64: 1384 mask = ~(pi->pi_bar[idx - 1].size - 1); 1385 addr = ((uint64_t)*eax << 32) & mask; 1386 bar = addr >> 32; 1387 if (bar != pi->pi_bar[idx - 1].addr >> 32) { 1388 update_bar_address(pi, addr, idx - 1, 1389 PCIBAR_MEMHI64); 1390 } 1391 break; 1392 default: 1393 assert(0); 1394 } 1395 pci_set_cfgdata32(pi, coff, bar); 1396 1397 } else if (pci_emul_iscap(pi, coff)) { 1398 pci_emul_capwrite(pi, coff, bytes, *eax); 1399 } else if (coff == PCIR_COMMAND) { 1400 pci_emul_cmdwrite(pi, *eax, bytes); 1401 } else { 1402 CFGWRITE(pi, coff, *eax, bytes); 1403 } 1404 } 1405 1406 return (0); 1407 } 1408 1409 INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+0, IOPORT_F_INOUT, pci_emul_cfgdata); 1410 INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+1, IOPORT_F_INOUT, pci_emul_cfgdata); 1411 INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+2, IOPORT_F_INOUT, pci_emul_cfgdata); 1412 INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+3, IOPORT_F_INOUT, pci_emul_cfgdata); 1413 1414 /* 1415 * I/O ports to configure PCI IRQ routing. We ignore all writes to it. 1416 */ 1417 static int 1418 pci_irq_port_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes, 1419 uint32_t *eax, void *arg) 1420 { 1421 assert(in == 0); 1422 return (0); 1423 } 1424 INOUT_PORT(pci_irq, 0xC00, IOPORT_F_OUT, pci_irq_port_handler); 1425 INOUT_PORT(pci_irq, 0xC01, IOPORT_F_OUT, pci_irq_port_handler); 1426 1427 #define PCI_EMUL_TEST 1428 #ifdef PCI_EMUL_TEST 1429 /* 1430 * Define a dummy test device 1431 */ 1432 #define DIOSZ 20 1433 #define DMEMSZ 4096 1434 struct pci_emul_dsoftc { 1435 uint8_t ioregs[DIOSZ]; 1436 uint8_t memregs[DMEMSZ]; 1437 }; 1438 1439 #define PCI_EMUL_MSI_MSGS 4 1440 #define PCI_EMUL_MSIX_MSGS 16 1441 1442 static int 1443 pci_emul_dinit(struct vmctx *ctx, struct pci_devinst *pi, char *opts) 1444 { 1445 int error; 1446 struct pci_emul_dsoftc *sc; 1447 1448 sc = malloc(sizeof(struct pci_emul_dsoftc)); 1449 memset(sc, 0, sizeof(struct pci_emul_dsoftc)); 1450 1451 pi->pi_arg = sc; 1452 1453 pci_set_cfgdata16(pi, PCIR_DEVICE, 0x0001); 1454 pci_set_cfgdata16(pi, PCIR_VENDOR, 0x10DD); 1455 pci_set_cfgdata8(pi, PCIR_CLASS, 0x02); 1456 1457 error = pci_emul_add_msicap(pi, PCI_EMUL_MSI_MSGS); 1458 assert(error == 0); 1459 1460 error = pci_emul_alloc_bar(pi, 0, PCIBAR_IO, DIOSZ); 1461 assert(error == 0); 1462 1463 error = pci_emul_alloc_bar(pi, 1, PCIBAR_MEM32, DMEMSZ); 1464 assert(error == 0); 1465 1466 return (0); 1467 } 1468 1469 static void 1470 pci_emul_diow(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx, 1471 uint64_t offset, int size, uint64_t value) 1472 { 1473 int i; 1474 struct pci_emul_dsoftc *sc = pi->pi_arg; 1475 1476 if (baridx == 0) { 1477 if (offset + size > DIOSZ) { 1478 printf("diow: iow too large, offset %ld size %d\n", 1479 offset, size); 1480 return; 1481 } 1482 1483 if (size == 1) { 1484 sc->ioregs[offset] = value & 0xff; 1485 } else if (size == 2) { 1486 *(uint16_t *)&sc->ioregs[offset] = value & 0xffff; 1487 } else if (size == 4) { 1488 *(uint32_t *)&sc->ioregs[offset] = value; 1489 } else { 1490 printf("diow: iow unknown size %d\n", size); 1491 } 1492 1493 /* 1494 * Special magic value to generate an interrupt 1495 */ 1496 if (offset == 4 && size == 4 && pci_msi_enabled(pi)) 1497 pci_generate_msi(pi, value % pci_msi_msgnum(pi)); 1498 1499 if (value == 0xabcdef) { 1500 for (i = 0; i < pci_msi_msgnum(pi); i++) 1501 pci_generate_msi(pi, i); 1502 } 1503 } 1504 1505 if (baridx == 1) { 1506 if (offset + size > DMEMSZ) { 1507 printf("diow: memw too large, offset %ld size %d\n", 1508 offset, size); 1509 return; 1510 } 1511 1512 if (size == 1) { 1513 sc->memregs[offset] = value; 1514 } else if (size == 2) { 1515 *(uint16_t *)&sc->memregs[offset] = value; 1516 } else if (size == 4) { 1517 *(uint32_t *)&sc->memregs[offset] = value; 1518 } else if (size == 8) { 1519 *(uint64_t *)&sc->memregs[offset] = value; 1520 } else { 1521 printf("diow: memw unknown size %d\n", size); 1522 } 1523 1524 /* 1525 * magic interrupt ?? 1526 */ 1527 } 1528 1529 if (baridx > 1) { 1530 printf("diow: unknown bar idx %d\n", baridx); 1531 } 1532 } 1533 1534 static uint64_t 1535 pci_emul_dior(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx, 1536 uint64_t offset, int size) 1537 { 1538 struct pci_emul_dsoftc *sc = pi->pi_arg; 1539 uint32_t value; 1540 1541 if (baridx == 0) { 1542 if (offset + size > DIOSZ) { 1543 printf("dior: ior too large, offset %ld size %d\n", 1544 offset, size); 1545 return (0); 1546 } 1547 1548 if (size == 1) { 1549 value = sc->ioregs[offset]; 1550 } else if (size == 2) { 1551 value = *(uint16_t *) &sc->ioregs[offset]; 1552 } else if (size == 4) { 1553 value = *(uint32_t *) &sc->ioregs[offset]; 1554 } else { 1555 printf("dior: ior unknown size %d\n", size); 1556 } 1557 } 1558 1559 if (baridx == 1) { 1560 if (offset + size > DMEMSZ) { 1561 printf("dior: memr too large, offset %ld size %d\n", 1562 offset, size); 1563 return (0); 1564 } 1565 1566 if (size == 1) { 1567 value = sc->memregs[offset]; 1568 } else if (size == 2) { 1569 value = *(uint16_t *) &sc->memregs[offset]; 1570 } else if (size == 4) { 1571 value = *(uint32_t *) &sc->memregs[offset]; 1572 } else if (size == 8) { 1573 value = *(uint64_t *) &sc->memregs[offset]; 1574 } else { 1575 printf("dior: ior unknown size %d\n", size); 1576 } 1577 } 1578 1579 1580 if (baridx > 1) { 1581 printf("dior: unknown bar idx %d\n", baridx); 1582 return (0); 1583 } 1584 1585 return (value); 1586 } 1587 1588 struct pci_devemu pci_dummy = { 1589 .pe_emu = "dummy", 1590 .pe_init = pci_emul_dinit, 1591 .pe_barwrite = pci_emul_diow, 1592 .pe_barread = pci_emul_dior 1593 }; 1594 PCI_EMUL_SET(pci_dummy); 1595 1596 #endif /* PCI_EMUL_TEST */ 1597