1 /*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/linker_set.h> 34 35 #include <ctype.h> 36 #include <stdio.h> 37 #include <stdlib.h> 38 #include <string.h> 39 #include <strings.h> 40 #include <assert.h> 41 42 #include <machine/vmm.h> 43 #include <vmmapi.h> 44 45 #include "bhyverun.h" 46 #include "inout.h" 47 #include "mem.h" 48 #include "mptbl.h" 49 #include "pci_emul.h" 50 #include "ioapic.h" 51 52 #define CONF1_ADDR_PORT 0x0cf8 53 #define CONF1_DATA_PORT 0x0cfc 54 55 #define CFGWRITE(pi,off,val,b) \ 56 do { \ 57 if ((b) == 1) { \ 58 pci_set_cfgdata8((pi),(off),(val)); \ 59 } else if ((b) == 2) { \ 60 pci_set_cfgdata16((pi),(off),(val)); \ 61 } else { \ 62 pci_set_cfgdata32((pi),(off),(val)); \ 63 } \ 64 } while (0) 65 66 #define MAXSLOTS (PCI_SLOTMAX + 1) 67 #define MAXFUNCS (PCI_FUNCMAX + 1) 68 69 static struct slotinfo { 70 char *si_name; 71 char *si_param; 72 struct pci_devinst *si_devi; 73 int si_legacy; 74 } pci_slotinfo[MAXSLOTS][MAXFUNCS]; 75 76 /* 77 * Used to keep track of legacy interrupt owners/requestors 78 */ 79 #define NLIRQ 16 80 81 static struct lirqinfo { 82 int li_generic; 83 int li_acount; 84 struct pci_devinst *li_owner; /* XXX should be a list */ 85 } lirq[NLIRQ]; 86 87 SET_DECLARE(pci_devemu_set, struct pci_devemu); 88 89 static uint64_t pci_emul_iobase; 90 static uint64_t pci_emul_membase32; 91 static uint64_t pci_emul_membase64; 92 93 #define PCI_EMUL_IOBASE 0x2000 94 #define PCI_EMUL_IOLIMIT 0x10000 95 96 #define PCI_EMUL_MEMBASE32 (lomem_sz) 97 #define PCI_EMUL_MEMLIMIT32 0xE0000000 /* 3.5GB */ 98 99 #define PCI_EMUL_MEMBASE64 0xD000000000UL 100 #define PCI_EMUL_MEMLIMIT64 0xFD00000000UL 101 102 static int pci_emul_devices; 103 104 /* 105 * I/O access 106 */ 107 108 /* 109 * Slot options are in the form: 110 * 111 * <slot>[:<func>],<emul>[,<config>] 112 * 113 * slot is 0..31 114 * func is 0..7 115 * emul is a string describing the type of PCI device e.g. virtio-net 116 * config is an optional string, depending on the device, that can be 117 * used for configuration. 118 * Examples are: 119 * 1,virtio-net,tap0 120 * 3:0,dummy 121 */ 122 static void 123 pci_parse_slot_usage(char *aopt) 124 { 125 printf("Invalid PCI slot info field \"%s\"\n", aopt); 126 free(aopt); 127 } 128 129 void 130 pci_parse_slot(char *opt, int legacy) 131 { 132 char *slot, *func, *emul, *config; 133 char *str, *cpy; 134 int snum, fnum; 135 136 str = cpy = strdup(opt); 137 138 config = NULL; 139 140 if (strchr(str, ':') != NULL) { 141 slot = strsep(&str, ":"); 142 func = strsep(&str, ","); 143 } else { 144 slot = strsep(&str, ","); 145 func = NULL; 146 } 147 148 emul = strsep(&str, ","); 149 if (str != NULL) { 150 config = strsep(&str, ","); 151 } 152 153 if (emul == NULL) { 154 pci_parse_slot_usage(cpy); 155 return; 156 } 157 158 snum = atoi(slot); 159 fnum = func ? atoi(func) : 0; 160 if (snum < 0 || snum >= MAXSLOTS || fnum < 0 || fnum >= MAXFUNCS) { 161 pci_parse_slot_usage(cpy); 162 } else { 163 pci_slotinfo[snum][fnum].si_name = emul; 164 pci_slotinfo[snum][fnum].si_param = config; 165 pci_slotinfo[snum][fnum].si_legacy = legacy; 166 } 167 } 168 169 static int 170 pci_valid_pba_offset(struct pci_devinst *pi, uint64_t offset) 171 { 172 173 if (offset < pi->pi_msix.pba_offset) 174 return (0); 175 176 if (offset >= pi->pi_msix.pba_offset + pi->pi_msix.pba_size) { 177 return (0); 178 } 179 180 return (1); 181 } 182 183 int 184 pci_emul_msix_twrite(struct pci_devinst *pi, uint64_t offset, int size, 185 uint64_t value) 186 { 187 int msix_entry_offset; 188 int tab_index; 189 char *dest; 190 191 /* support only 4 or 8 byte writes */ 192 if (size != 4 && size != 8) 193 return (-1); 194 195 /* 196 * Return if table index is beyond what device supports 197 */ 198 tab_index = offset / MSIX_TABLE_ENTRY_SIZE; 199 if (tab_index >= pi->pi_msix.table_count) 200 return (-1); 201 202 msix_entry_offset = offset % MSIX_TABLE_ENTRY_SIZE; 203 204 /* support only aligned writes */ 205 if ((msix_entry_offset % size) != 0) 206 return (-1); 207 208 dest = (char *)(pi->pi_msix.table + tab_index); 209 dest += msix_entry_offset; 210 211 if (size == 4) 212 *((uint32_t *)dest) = value; 213 else 214 *((uint64_t *)dest) = value; 215 216 return (0); 217 } 218 219 uint64_t 220 pci_emul_msix_tread(struct pci_devinst *pi, uint64_t offset, int size) 221 { 222 char *dest; 223 int msix_entry_offset; 224 int tab_index; 225 uint64_t retval = ~0; 226 227 /* support only 4 or 8 byte reads */ 228 if (size != 4 && size != 8) 229 return (retval); 230 231 msix_entry_offset = offset % MSIX_TABLE_ENTRY_SIZE; 232 233 /* support only aligned reads */ 234 if ((msix_entry_offset % size) != 0) { 235 return (retval); 236 } 237 238 tab_index = offset / MSIX_TABLE_ENTRY_SIZE; 239 240 if (tab_index < pi->pi_msix.table_count) { 241 /* valid MSI-X Table access */ 242 dest = (char *)(pi->pi_msix.table + tab_index); 243 dest += msix_entry_offset; 244 245 if (size == 4) 246 retval = *((uint32_t *)dest); 247 else 248 retval = *((uint64_t *)dest); 249 } else if (pci_valid_pba_offset(pi, offset)) { 250 /* return 0 for PBA access */ 251 retval = 0; 252 } 253 254 return (retval); 255 } 256 257 int 258 pci_msix_table_bar(struct pci_devinst *pi) 259 { 260 261 if (pi->pi_msix.table != NULL) 262 return (pi->pi_msix.table_bar); 263 else 264 return (-1); 265 } 266 267 int 268 pci_msix_pba_bar(struct pci_devinst *pi) 269 { 270 271 if (pi->pi_msix.table != NULL) 272 return (pi->pi_msix.pba_bar); 273 else 274 return (-1); 275 } 276 277 static int 278 pci_emul_io_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes, 279 uint32_t *eax, void *arg) 280 { 281 struct pci_devinst *pdi = arg; 282 struct pci_devemu *pe = pdi->pi_d; 283 uint64_t offset; 284 int i; 285 286 for (i = 0; i <= PCI_BARMAX; i++) { 287 if (pdi->pi_bar[i].type == PCIBAR_IO && 288 port >= pdi->pi_bar[i].addr && 289 port + bytes <= pdi->pi_bar[i].addr + pdi->pi_bar[i].size) { 290 offset = port - pdi->pi_bar[i].addr; 291 if (in) 292 *eax = (*pe->pe_barread)(ctx, vcpu, pdi, i, 293 offset, bytes); 294 else 295 (*pe->pe_barwrite)(ctx, vcpu, pdi, i, offset, 296 bytes, *eax); 297 return (0); 298 } 299 } 300 return (-1); 301 } 302 303 static int 304 pci_emul_mem_handler(struct vmctx *ctx, int vcpu, int dir, uint64_t addr, 305 int size, uint64_t *val, void *arg1, long arg2) 306 { 307 struct pci_devinst *pdi = arg1; 308 struct pci_devemu *pe = pdi->pi_d; 309 uint64_t offset; 310 int bidx = (int) arg2; 311 312 assert(bidx <= PCI_BARMAX); 313 assert(pdi->pi_bar[bidx].type == PCIBAR_MEM32 || 314 pdi->pi_bar[bidx].type == PCIBAR_MEM64); 315 assert(addr >= pdi->pi_bar[bidx].addr && 316 addr + size <= pdi->pi_bar[bidx].addr + pdi->pi_bar[bidx].size); 317 318 offset = addr - pdi->pi_bar[bidx].addr; 319 320 if (dir == MEM_F_WRITE) 321 (*pe->pe_barwrite)(ctx, vcpu, pdi, bidx, offset, size, *val); 322 else 323 *val = (*pe->pe_barread)(ctx, vcpu, pdi, bidx, offset, size); 324 325 return (0); 326 } 327 328 329 static int 330 pci_emul_alloc_resource(uint64_t *baseptr, uint64_t limit, uint64_t size, 331 uint64_t *addr) 332 { 333 uint64_t base; 334 335 assert((size & (size - 1)) == 0); /* must be a power of 2 */ 336 337 base = roundup2(*baseptr, size); 338 339 if (base + size <= limit) { 340 *addr = base; 341 *baseptr = base + size; 342 return (0); 343 } else 344 return (-1); 345 } 346 347 int 348 pci_emul_alloc_bar(struct pci_devinst *pdi, int idx, enum pcibar_type type, 349 uint64_t size) 350 { 351 352 return (pci_emul_alloc_pbar(pdi, idx, 0, type, size)); 353 } 354 355 int 356 pci_emul_alloc_pbar(struct pci_devinst *pdi, int idx, uint64_t hostbase, 357 enum pcibar_type type, uint64_t size) 358 { 359 int i, error; 360 uint64_t *baseptr, limit, addr, mask, lobits, bar; 361 struct inout_port iop; 362 struct mem_range memp; 363 364 assert(idx >= 0 && idx <= PCI_BARMAX); 365 366 if ((size & (size - 1)) != 0) 367 size = 1UL << flsl(size); /* round up to a power of 2 */ 368 369 switch (type) { 370 case PCIBAR_NONE: 371 baseptr = NULL; 372 addr = mask = lobits = 0; 373 break; 374 case PCIBAR_IO: 375 if (hostbase && 376 pci_slotinfo[pdi->pi_slot][pdi->pi_func].si_legacy) { 377 assert(hostbase < PCI_EMUL_IOBASE); 378 baseptr = &hostbase; 379 } else { 380 baseptr = &pci_emul_iobase; 381 } 382 limit = PCI_EMUL_IOLIMIT; 383 mask = PCIM_BAR_IO_BASE; 384 lobits = PCIM_BAR_IO_SPACE; 385 break; 386 case PCIBAR_MEM64: 387 /* 388 * XXX 389 * Some drivers do not work well if the 64-bit BAR is allocated 390 * above 4GB. Allow for this by allocating small requests under 391 * 4GB unless then allocation size is larger than some arbitrary 392 * number (32MB currently). 393 */ 394 if (size > 32 * 1024 * 1024) { 395 /* 396 * XXX special case for device requiring peer-peer DMA 397 */ 398 if (size == 0x100000000UL) 399 baseptr = &hostbase; 400 else 401 baseptr = &pci_emul_membase64; 402 limit = PCI_EMUL_MEMLIMIT64; 403 mask = PCIM_BAR_MEM_BASE; 404 lobits = PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_64 | 405 PCIM_BAR_MEM_PREFETCH; 406 break; 407 } else { 408 baseptr = &pci_emul_membase32; 409 limit = PCI_EMUL_MEMLIMIT32; 410 mask = PCIM_BAR_MEM_BASE; 411 lobits = PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_64; 412 } 413 break; 414 case PCIBAR_MEM32: 415 baseptr = &pci_emul_membase32; 416 limit = PCI_EMUL_MEMLIMIT32; 417 mask = PCIM_BAR_MEM_BASE; 418 lobits = PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_32; 419 break; 420 default: 421 printf("pci_emul_alloc_base: invalid bar type %d\n", type); 422 assert(0); 423 } 424 425 if (baseptr != NULL) { 426 error = pci_emul_alloc_resource(baseptr, limit, size, &addr); 427 if (error != 0) 428 return (error); 429 } 430 431 pdi->pi_bar[idx].type = type; 432 pdi->pi_bar[idx].addr = addr; 433 pdi->pi_bar[idx].size = size; 434 435 /* Initialize the BAR register in config space */ 436 bar = (addr & mask) | lobits; 437 pci_set_cfgdata32(pdi, PCIR_BAR(idx), bar); 438 439 if (type == PCIBAR_MEM64) { 440 assert(idx + 1 <= PCI_BARMAX); 441 pdi->pi_bar[idx + 1].type = PCIBAR_MEMHI64; 442 pci_set_cfgdata32(pdi, PCIR_BAR(idx + 1), bar >> 32); 443 } 444 445 /* add a handler to intercept accesses to the I/O bar */ 446 if (type == PCIBAR_IO) { 447 iop.name = pdi->pi_name; 448 iop.flags = IOPORT_F_INOUT; 449 iop.handler = pci_emul_io_handler; 450 iop.arg = pdi; 451 452 for (i = 0; i < size; i++) { 453 iop.port = addr + i; 454 register_inout(&iop); 455 } 456 } else if (type == PCIBAR_MEM32 || type == PCIBAR_MEM64) { 457 /* add memory bar intercept handler */ 458 memp.name = pdi->pi_name; 459 memp.flags = MEM_F_RW; 460 memp.base = addr; 461 memp.size = size; 462 memp.handler = pci_emul_mem_handler; 463 memp.arg1 = pdi; 464 memp.arg2 = idx; 465 466 error = register_mem(&memp); 467 assert(error == 0); 468 } 469 470 return (0); 471 } 472 473 #define CAP_START_OFFSET 0x40 474 static int 475 pci_emul_add_capability(struct pci_devinst *pi, u_char *capdata, int caplen) 476 { 477 int i, capoff, capid, reallen; 478 uint16_t sts; 479 480 static u_char endofcap[4] = { 481 PCIY_RESERVED, 0, 0, 0 482 }; 483 484 assert(caplen > 0 && capdata[0] != PCIY_RESERVED); 485 486 reallen = roundup2(caplen, 4); /* dword aligned */ 487 488 sts = pci_get_cfgdata16(pi, PCIR_STATUS); 489 if ((sts & PCIM_STATUS_CAPPRESENT) == 0) { 490 capoff = CAP_START_OFFSET; 491 pci_set_cfgdata8(pi, PCIR_CAP_PTR, capoff); 492 pci_set_cfgdata16(pi, PCIR_STATUS, sts|PCIM_STATUS_CAPPRESENT); 493 } else { 494 capoff = pci_get_cfgdata8(pi, PCIR_CAP_PTR); 495 while (1) { 496 assert((capoff & 0x3) == 0); 497 capid = pci_get_cfgdata8(pi, capoff); 498 if (capid == PCIY_RESERVED) 499 break; 500 capoff = pci_get_cfgdata8(pi, capoff + 1); 501 } 502 } 503 504 /* Check if we have enough space */ 505 if (capoff + reallen + sizeof(endofcap) > PCI_REGMAX + 1) 506 return (-1); 507 508 /* Copy the capability */ 509 for (i = 0; i < caplen; i++) 510 pci_set_cfgdata8(pi, capoff + i, capdata[i]); 511 512 /* Set the next capability pointer */ 513 pci_set_cfgdata8(pi, capoff + 1, capoff + reallen); 514 515 /* Copy of the reserved capability which serves as the end marker */ 516 for (i = 0; i < sizeof(endofcap); i++) 517 pci_set_cfgdata8(pi, capoff + reallen + i, endofcap[i]); 518 519 return (0); 520 } 521 522 static struct pci_devemu * 523 pci_emul_finddev(char *name) 524 { 525 struct pci_devemu **pdpp, *pdp; 526 527 SET_FOREACH(pdpp, pci_devemu_set) { 528 pdp = *pdpp; 529 if (!strcmp(pdp->pe_emu, name)) { 530 return (pdp); 531 } 532 } 533 534 return (NULL); 535 } 536 537 static void 538 pci_emul_init(struct vmctx *ctx, struct pci_devemu *pde, int slot, int func, 539 char *params) 540 { 541 struct pci_devinst *pdi; 542 pdi = malloc(sizeof(struct pci_devinst)); 543 bzero(pdi, sizeof(*pdi)); 544 545 pdi->pi_vmctx = ctx; 546 pdi->pi_bus = 0; 547 pdi->pi_slot = slot; 548 pdi->pi_func = func; 549 pdi->pi_d = pde; 550 snprintf(pdi->pi_name, PI_NAMESZ, "%s-pci-%d", pde->pe_emu, slot); 551 552 /* Disable legacy interrupts */ 553 pci_set_cfgdata8(pdi, PCIR_INTLINE, 255); 554 pci_set_cfgdata8(pdi, PCIR_INTPIN, 0); 555 556 pci_set_cfgdata8(pdi, PCIR_COMMAND, 557 PCIM_CMD_PORTEN | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN); 558 559 if ((*pde->pe_init)(ctx, pdi, params) != 0) { 560 free(pdi); 561 } else { 562 pci_emul_devices++; 563 pci_slotinfo[slot][func].si_devi = pdi; 564 } 565 } 566 567 void 568 pci_populate_msicap(struct msicap *msicap, int msgnum, int nextptr) 569 { 570 int mmc; 571 572 CTASSERT(sizeof(struct msicap) == 14); 573 574 /* Number of msi messages must be a power of 2 between 1 and 32 */ 575 assert((msgnum & (msgnum - 1)) == 0 && msgnum >= 1 && msgnum <= 32); 576 mmc = ffs(msgnum) - 1; 577 578 bzero(msicap, sizeof(struct msicap)); 579 msicap->capid = PCIY_MSI; 580 msicap->nextptr = nextptr; 581 msicap->msgctrl = PCIM_MSICTRL_64BIT | (mmc << 1); 582 } 583 584 int 585 pci_emul_add_msicap(struct pci_devinst *pi, int msgnum) 586 { 587 struct msicap msicap; 588 589 pci_populate_msicap(&msicap, msgnum, 0); 590 591 return (pci_emul_add_capability(pi, (u_char *)&msicap, sizeof(msicap))); 592 } 593 594 static void 595 pci_populate_msixcap(struct msixcap *msixcap, int msgnum, int barnum, 596 uint32_t msix_tab_size, int nextptr) 597 { 598 CTASSERT(sizeof(struct msixcap) == 12); 599 600 assert(msix_tab_size % 4096 == 0); 601 602 bzero(msixcap, sizeof(struct msixcap)); 603 msixcap->capid = PCIY_MSIX; 604 msixcap->nextptr = nextptr; 605 606 /* 607 * Message Control Register, all fields set to 608 * zero except for the Table Size. 609 * Note: Table size N is encoded as N-1 610 */ 611 msixcap->msgctrl = msgnum - 1; 612 613 /* 614 * MSI-X BAR setup: 615 * - MSI-X table start at offset 0 616 * - PBA table starts at a 4K aligned offset after the MSI-X table 617 */ 618 msixcap->table_info = barnum & PCIM_MSIX_BIR_MASK; 619 msixcap->pba_info = msix_tab_size | (barnum & PCIM_MSIX_BIR_MASK); 620 } 621 622 static void 623 pci_msix_table_init(struct pci_devinst *pi, int table_entries) 624 { 625 int i, table_size; 626 627 assert(table_entries > 0); 628 assert(table_entries <= MAX_MSIX_TABLE_ENTRIES); 629 630 table_size = table_entries * MSIX_TABLE_ENTRY_SIZE; 631 pi->pi_msix.table = malloc(table_size); 632 bzero(pi->pi_msix.table, table_size); 633 634 /* set mask bit of vector control register */ 635 for (i = 0; i < table_entries; i++) 636 pi->pi_msix.table[i].vector_control |= PCIM_MSIX_VCTRL_MASK; 637 } 638 639 int 640 pci_emul_add_msixcap(struct pci_devinst *pi, int msgnum, int barnum) 641 { 642 uint16_t pba_index; 643 uint32_t tab_size; 644 struct msixcap msixcap; 645 646 assert(msgnum >= 1 && msgnum <= MAX_MSIX_TABLE_ENTRIES); 647 assert(barnum >= 0 && barnum <= PCIR_MAX_BAR_0); 648 649 tab_size = msgnum * MSIX_TABLE_ENTRY_SIZE; 650 651 /* Align table size to nearest 4K */ 652 tab_size = roundup2(tab_size, 4096); 653 654 pi->pi_msix.table_bar = barnum; 655 pi->pi_msix.pba_bar = barnum; 656 pi->pi_msix.table_offset = 0; 657 pi->pi_msix.table_count = msgnum; 658 pi->pi_msix.pba_offset = tab_size; 659 660 /* calculate the MMIO size required for MSI-X PBA */ 661 pba_index = (msgnum - 1) / (PBA_TABLE_ENTRY_SIZE * 8); 662 pi->pi_msix.pba_size = (pba_index + 1) * PBA_TABLE_ENTRY_SIZE; 663 664 pci_msix_table_init(pi, msgnum); 665 666 pci_populate_msixcap(&msixcap, msgnum, barnum, tab_size, 0); 667 668 /* allocate memory for MSI-X Table and PBA */ 669 pci_emul_alloc_bar(pi, barnum, PCIBAR_MEM32, 670 tab_size + pi->pi_msix.pba_size); 671 672 return (pci_emul_add_capability(pi, (u_char *)&msixcap, 673 sizeof(msixcap))); 674 } 675 676 void 677 msixcap_cfgwrite(struct pci_devinst *pi, int capoff, int offset, 678 int bytes, uint32_t val) 679 { 680 uint16_t msgctrl, rwmask; 681 int off, table_bar; 682 683 off = offset - capoff; 684 table_bar = pi->pi_msix.table_bar; 685 /* Message Control Register */ 686 if (off == 2 && bytes == 2) { 687 rwmask = PCIM_MSIXCTRL_MSIX_ENABLE | PCIM_MSIXCTRL_FUNCTION_MASK; 688 msgctrl = pci_get_cfgdata16(pi, offset); 689 msgctrl &= ~rwmask; 690 msgctrl |= val & rwmask; 691 val = msgctrl; 692 693 pi->pi_msix.enabled = val & PCIM_MSIXCTRL_MSIX_ENABLE; 694 pi->pi_msix.function_mask = val & PCIM_MSIXCTRL_FUNCTION_MASK; 695 } 696 697 CFGWRITE(pi, offset, val, bytes); 698 } 699 700 void 701 msicap_cfgwrite(struct pci_devinst *pi, int capoff, int offset, 702 int bytes, uint32_t val) 703 { 704 uint16_t msgctrl, rwmask, msgdata, mme; 705 uint32_t addrlo; 706 707 /* 708 * If guest is writing to the message control register make sure 709 * we do not overwrite read-only fields. 710 */ 711 if ((offset - capoff) == 2 && bytes == 2) { 712 rwmask = PCIM_MSICTRL_MME_MASK | PCIM_MSICTRL_MSI_ENABLE; 713 msgctrl = pci_get_cfgdata16(pi, offset); 714 msgctrl &= ~rwmask; 715 msgctrl |= val & rwmask; 716 val = msgctrl; 717 718 addrlo = pci_get_cfgdata32(pi, capoff + 4); 719 if (msgctrl & PCIM_MSICTRL_64BIT) 720 msgdata = pci_get_cfgdata16(pi, capoff + 12); 721 else 722 msgdata = pci_get_cfgdata16(pi, capoff + 8); 723 724 /* 725 * XXX check delivery mode, destination mode etc 726 */ 727 mme = msgctrl & PCIM_MSICTRL_MME_MASK; 728 pi->pi_msi.enabled = msgctrl & PCIM_MSICTRL_MSI_ENABLE ? 1 : 0; 729 if (pi->pi_msi.enabled) { 730 pi->pi_msi.cpu = (addrlo >> 12) & 0xff; 731 pi->pi_msi.vector = msgdata & 0xff; 732 pi->pi_msi.msgnum = 1 << (mme >> 4); 733 } else { 734 pi->pi_msi.cpu = 0; 735 pi->pi_msi.vector = 0; 736 pi->pi_msi.msgnum = 0; 737 } 738 } 739 740 CFGWRITE(pi, offset, val, bytes); 741 } 742 743 /* 744 * This function assumes that 'coff' is in the capabilities region of the 745 * config space. 746 */ 747 static void 748 pci_emul_capwrite(struct pci_devinst *pi, int offset, int bytes, uint32_t val) 749 { 750 int capid; 751 uint8_t capoff, nextoff; 752 753 /* Do not allow un-aligned writes */ 754 if ((offset & (bytes - 1)) != 0) 755 return; 756 757 /* Find the capability that we want to update */ 758 capoff = CAP_START_OFFSET; 759 while (1) { 760 capid = pci_get_cfgdata8(pi, capoff); 761 if (capid == PCIY_RESERVED) 762 break; 763 764 nextoff = pci_get_cfgdata8(pi, capoff + 1); 765 if (offset >= capoff && offset < nextoff) 766 break; 767 768 capoff = nextoff; 769 } 770 assert(offset >= capoff); 771 772 /* 773 * Capability ID and Next Capability Pointer are readonly 774 */ 775 if (offset == capoff || offset == capoff + 1) 776 return; 777 778 switch (capid) { 779 case PCIY_MSI: 780 msicap_cfgwrite(pi, capoff, offset, bytes, val); 781 break; 782 case PCIY_MSIX: 783 msixcap_cfgwrite(pi, capoff, offset, bytes, val); 784 break; 785 default: 786 break; 787 } 788 } 789 790 static int 791 pci_emul_iscap(struct pci_devinst *pi, int offset) 792 { 793 int found; 794 uint16_t sts; 795 uint8_t capid, lastoff; 796 797 found = 0; 798 sts = pci_get_cfgdata16(pi, PCIR_STATUS); 799 if ((sts & PCIM_STATUS_CAPPRESENT) != 0) { 800 lastoff = pci_get_cfgdata8(pi, PCIR_CAP_PTR); 801 while (1) { 802 assert((lastoff & 0x3) == 0); 803 capid = pci_get_cfgdata8(pi, lastoff); 804 if (capid == PCIY_RESERVED) 805 break; 806 lastoff = pci_get_cfgdata8(pi, lastoff + 1); 807 } 808 if (offset >= CAP_START_OFFSET && offset <= lastoff) 809 found = 1; 810 } 811 return (found); 812 } 813 814 void 815 init_pci(struct vmctx *ctx) 816 { 817 struct pci_devemu *pde; 818 struct slotinfo *si; 819 int slot, func; 820 821 pci_emul_iobase = PCI_EMUL_IOBASE; 822 pci_emul_membase32 = PCI_EMUL_MEMBASE32; 823 pci_emul_membase64 = PCI_EMUL_MEMBASE64; 824 825 for (slot = 0; slot < MAXSLOTS; slot++) { 826 for (func = 0; func < MAXFUNCS; func++) { 827 si = &pci_slotinfo[slot][func]; 828 if (si->si_name != NULL) { 829 pde = pci_emul_finddev(si->si_name); 830 if (pde != NULL) { 831 pci_emul_init(ctx, pde, slot, func, 832 si->si_param); 833 } 834 } 835 } 836 } 837 838 /* 839 * Allow ISA IRQs 5,10,11,12, and 15 to be available for 840 * generic use 841 */ 842 lirq[5].li_generic = 1; 843 lirq[10].li_generic = 1; 844 lirq[11].li_generic = 1; 845 lirq[12].li_generic = 1; 846 lirq[15].li_generic = 1; 847 } 848 849 int 850 pci_msi_enabled(struct pci_devinst *pi) 851 { 852 return (pi->pi_msi.enabled); 853 } 854 855 int 856 pci_msi_msgnum(struct pci_devinst *pi) 857 { 858 if (pi->pi_msi.enabled) 859 return (pi->pi_msi.msgnum); 860 else 861 return (0); 862 } 863 864 int 865 pci_msix_enabled(struct pci_devinst *pi) 866 { 867 868 return (pi->pi_msix.enabled && !pi->pi_msi.enabled); 869 } 870 871 void 872 pci_generate_msix(struct pci_devinst *pi, int index) 873 { 874 struct msix_table_entry *mte; 875 876 if (!pci_msix_enabled(pi)) 877 return; 878 879 if (pi->pi_msix.function_mask) 880 return; 881 882 if (index >= pi->pi_msix.table_count) 883 return; 884 885 mte = &pi->pi_msix.table[index]; 886 if ((mte->vector_control & PCIM_MSIX_VCTRL_MASK) == 0) { 887 /* XXX Set PBA bit if interrupt is disabled */ 888 vm_lapic_irq(pi->pi_vmctx, 889 (mte->addr >> 12) & 0xff, mte->msg_data & 0xff); 890 } 891 } 892 893 void 894 pci_generate_msi(struct pci_devinst *pi, int msg) 895 { 896 897 if (pci_msi_enabled(pi) && msg < pci_msi_msgnum(pi)) { 898 vm_lapic_irq(pi->pi_vmctx, 899 pi->pi_msi.cpu, 900 pi->pi_msi.vector + msg); 901 } 902 } 903 904 int 905 pci_is_legacy(struct pci_devinst *pi) 906 { 907 908 return (pci_slotinfo[pi->pi_slot][pi->pi_func].si_legacy); 909 } 910 911 static int 912 pci_lintr_alloc(struct pci_devinst *pi, int vec) 913 { 914 int i; 915 916 assert(vec < NLIRQ); 917 918 if (vec == -1) { 919 for (i = 0; i < NLIRQ; i++) { 920 if (lirq[i].li_generic && 921 lirq[i].li_owner == NULL) { 922 vec = i; 923 break; 924 } 925 } 926 } else { 927 if (lirq[vec].li_owner != NULL) { 928 vec = -1; 929 } 930 } 931 assert(vec != -1); 932 933 lirq[vec].li_owner = pi; 934 pi->pi_lintr_pin = vec; 935 936 return (vec); 937 } 938 939 int 940 pci_lintr_request(struct pci_devinst *pi, int vec) 941 { 942 943 vec = pci_lintr_alloc(pi, vec); 944 pci_set_cfgdata8(pi, PCIR_INTLINE, vec); 945 pci_set_cfgdata8(pi, PCIR_INTPIN, 1); 946 return (0); 947 } 948 949 void 950 pci_lintr_assert(struct pci_devinst *pi) 951 { 952 953 assert(pi->pi_lintr_pin); 954 ioapic_assert_pin(pi->pi_vmctx, pi->pi_lintr_pin); 955 } 956 957 void 958 pci_lintr_deassert(struct pci_devinst *pi) 959 { 960 961 assert(pi->pi_lintr_pin); 962 ioapic_deassert_pin(pi->pi_vmctx, pi->pi_lintr_pin); 963 } 964 965 /* 966 * Return 1 if the emulated device in 'slot' is a multi-function device. 967 * Return 0 otherwise. 968 */ 969 static int 970 pci_emul_is_mfdev(int slot) 971 { 972 int f, numfuncs; 973 974 numfuncs = 0; 975 for (f = 0; f < MAXFUNCS; f++) { 976 if (pci_slotinfo[slot][f].si_devi != NULL) { 977 numfuncs++; 978 } 979 } 980 return (numfuncs > 1); 981 } 982 983 /* 984 * Ensure that the PCIM_MFDEV bit is properly set (or unset) depending on 985 * whether or not is a multi-function being emulated in the pci 'slot'. 986 */ 987 static void 988 pci_emul_hdrtype_fixup(int slot, int off, int bytes, uint32_t *rv) 989 { 990 int mfdev; 991 992 if (off <= PCIR_HDRTYPE && off + bytes > PCIR_HDRTYPE) { 993 mfdev = pci_emul_is_mfdev(slot); 994 switch (bytes) { 995 case 1: 996 case 2: 997 *rv &= ~PCIM_MFDEV; 998 if (mfdev) { 999 *rv |= PCIM_MFDEV; 1000 } 1001 break; 1002 case 4: 1003 *rv &= ~(PCIM_MFDEV << 16); 1004 if (mfdev) { 1005 *rv |= (PCIM_MFDEV << 16); 1006 } 1007 break; 1008 } 1009 } 1010 } 1011 1012 static int cfgbus, cfgslot, cfgfunc, cfgoff; 1013 1014 static int 1015 pci_emul_cfgaddr(struct vmctx *ctx, int vcpu, int in, int port, int bytes, 1016 uint32_t *eax, void *arg) 1017 { 1018 uint32_t x; 1019 1020 assert(!in); 1021 1022 if (bytes != 4) 1023 return (-1); 1024 1025 x = *eax; 1026 cfgoff = x & PCI_REGMAX; 1027 cfgfunc = (x >> 8) & PCI_FUNCMAX; 1028 cfgslot = (x >> 11) & PCI_SLOTMAX; 1029 cfgbus = (x >> 16) & PCI_BUSMAX; 1030 1031 return (0); 1032 } 1033 INOUT_PORT(pci_cfgaddr, CONF1_ADDR_PORT, IOPORT_F_OUT, pci_emul_cfgaddr); 1034 1035 static int 1036 pci_emul_cfgdata(struct vmctx *ctx, int vcpu, int in, int port, int bytes, 1037 uint32_t *eax, void *arg) 1038 { 1039 struct pci_devinst *pi; 1040 struct pci_devemu *pe; 1041 int coff, idx, needcfg; 1042 uint64_t mask, bar; 1043 1044 assert(bytes == 1 || bytes == 2 || bytes == 4); 1045 1046 if (cfgbus == 0) 1047 pi = pci_slotinfo[cfgslot][cfgfunc].si_devi; 1048 else 1049 pi = NULL; 1050 1051 coff = cfgoff + (port - CONF1_DATA_PORT); 1052 1053 #if 0 1054 printf("pcicfg-%s from 0x%0x of %d bytes (%d/%d/%d)\n\r", 1055 in ? "read" : "write", coff, bytes, cfgbus, cfgslot, cfgfunc); 1056 #endif 1057 1058 /* 1059 * Just return if there is no device at this cfgslot:cfgfunc or 1060 * if the guest is doing an un-aligned access 1061 */ 1062 if (pi == NULL || (coff & (bytes - 1)) != 0) { 1063 if (in) 1064 *eax = 0xffffffff; 1065 return (0); 1066 } 1067 1068 pe = pi->pi_d; 1069 1070 /* 1071 * Config read 1072 */ 1073 if (in) { 1074 /* Let the device emulation override the default handler */ 1075 if (pe->pe_cfgread != NULL) { 1076 needcfg = pe->pe_cfgread(ctx, vcpu, pi, 1077 coff, bytes, eax); 1078 } else { 1079 needcfg = 1; 1080 } 1081 1082 if (needcfg) { 1083 if (bytes == 1) 1084 *eax = pci_get_cfgdata8(pi, coff); 1085 else if (bytes == 2) 1086 *eax = pci_get_cfgdata16(pi, coff); 1087 else 1088 *eax = pci_get_cfgdata32(pi, coff); 1089 } 1090 1091 pci_emul_hdrtype_fixup(cfgslot, coff, bytes, eax); 1092 } else { 1093 /* Let the device emulation override the default handler */ 1094 if (pe->pe_cfgwrite != NULL && 1095 (*pe->pe_cfgwrite)(ctx, vcpu, pi, coff, bytes, *eax) == 0) 1096 return (0); 1097 1098 /* 1099 * Special handling for write to BAR registers 1100 */ 1101 if (coff >= PCIR_BAR(0) && coff < PCIR_BAR(PCI_BARMAX + 1)) { 1102 /* 1103 * Ignore writes to BAR registers that are not 1104 * 4-byte aligned. 1105 */ 1106 if (bytes != 4 || (coff & 0x3) != 0) 1107 return (0); 1108 idx = (coff - PCIR_BAR(0)) / 4; 1109 switch (pi->pi_bar[idx].type) { 1110 case PCIBAR_NONE: 1111 bar = 0; 1112 break; 1113 case PCIBAR_IO: 1114 mask = ~(pi->pi_bar[idx].size - 1); 1115 mask &= PCIM_BAR_IO_BASE; 1116 bar = (*eax & mask) | PCIM_BAR_IO_SPACE; 1117 break; 1118 case PCIBAR_MEM32: 1119 mask = ~(pi->pi_bar[idx].size - 1); 1120 mask &= PCIM_BAR_MEM_BASE; 1121 bar = *eax & mask; 1122 bar |= PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_32; 1123 break; 1124 case PCIBAR_MEM64: 1125 mask = ~(pi->pi_bar[idx].size - 1); 1126 mask &= PCIM_BAR_MEM_BASE; 1127 bar = *eax & mask; 1128 bar |= PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_64 | 1129 PCIM_BAR_MEM_PREFETCH; 1130 break; 1131 case PCIBAR_MEMHI64: 1132 mask = ~(pi->pi_bar[idx - 1].size - 1); 1133 mask &= PCIM_BAR_MEM_BASE; 1134 bar = ((uint64_t)*eax << 32) & mask; 1135 bar = bar >> 32; 1136 break; 1137 default: 1138 assert(0); 1139 } 1140 pci_set_cfgdata32(pi, coff, bar); 1141 1142 } else if (pci_emul_iscap(pi, coff)) { 1143 pci_emul_capwrite(pi, coff, bytes, *eax); 1144 } else { 1145 CFGWRITE(pi, coff, *eax, bytes); 1146 } 1147 } 1148 1149 return (0); 1150 } 1151 1152 INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+0, IOPORT_F_INOUT, pci_emul_cfgdata); 1153 INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+1, IOPORT_F_INOUT, pci_emul_cfgdata); 1154 INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+2, IOPORT_F_INOUT, pci_emul_cfgdata); 1155 INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+3, IOPORT_F_INOUT, pci_emul_cfgdata); 1156 1157 /* 1158 * I/O ports to configure PCI IRQ routing. We ignore all writes to it. 1159 */ 1160 static int 1161 pci_irq_port_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes, 1162 uint32_t *eax, void *arg) 1163 { 1164 assert(in == 0); 1165 return (0); 1166 } 1167 INOUT_PORT(pci_irq, 0xC00, IOPORT_F_OUT, pci_irq_port_handler); 1168 INOUT_PORT(pci_irq, 0xC01, IOPORT_F_OUT, pci_irq_port_handler); 1169 1170 #define PCI_EMUL_TEST 1171 #ifdef PCI_EMUL_TEST 1172 /* 1173 * Define a dummy test device 1174 */ 1175 #define DIOSZ 20 1176 #define DMEMSZ 4096 1177 struct pci_emul_dsoftc { 1178 uint8_t ioregs[DIOSZ]; 1179 uint8_t memregs[DMEMSZ]; 1180 }; 1181 1182 #define PCI_EMUL_MSI_MSGS 4 1183 #define PCI_EMUL_MSIX_MSGS 16 1184 1185 static int 1186 pci_emul_dinit(struct vmctx *ctx, struct pci_devinst *pi, char *opts) 1187 { 1188 int error; 1189 struct pci_emul_dsoftc *sc; 1190 1191 sc = malloc(sizeof(struct pci_emul_dsoftc)); 1192 memset(sc, 0, sizeof(struct pci_emul_dsoftc)); 1193 1194 pi->pi_arg = sc; 1195 1196 pci_set_cfgdata16(pi, PCIR_DEVICE, 0x0001); 1197 pci_set_cfgdata16(pi, PCIR_VENDOR, 0x10DD); 1198 pci_set_cfgdata8(pi, PCIR_CLASS, 0x02); 1199 1200 error = pci_emul_add_msicap(pi, PCI_EMUL_MSI_MSGS); 1201 assert(error == 0); 1202 1203 error = pci_emul_alloc_bar(pi, 0, PCIBAR_IO, DIOSZ); 1204 assert(error == 0); 1205 1206 error = pci_emul_alloc_bar(pi, 1, PCIBAR_MEM32, DMEMSZ); 1207 assert(error == 0); 1208 1209 return (0); 1210 } 1211 1212 static void 1213 pci_emul_diow(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx, 1214 uint64_t offset, int size, uint64_t value) 1215 { 1216 int i; 1217 struct pci_emul_dsoftc *sc = pi->pi_arg; 1218 1219 if (baridx == 0) { 1220 if (offset + size > DIOSZ) { 1221 printf("diow: iow too large, offset %ld size %d\n", 1222 offset, size); 1223 return; 1224 } 1225 1226 if (size == 1) { 1227 sc->ioregs[offset] = value & 0xff; 1228 } else if (size == 2) { 1229 *(uint16_t *)&sc->ioregs[offset] = value & 0xffff; 1230 } else if (size == 4) { 1231 *(uint32_t *)&sc->ioregs[offset] = value; 1232 } else { 1233 printf("diow: iow unknown size %d\n", size); 1234 } 1235 1236 /* 1237 * Special magic value to generate an interrupt 1238 */ 1239 if (offset == 4 && size == 4 && pci_msi_enabled(pi)) 1240 pci_generate_msi(pi, value % pci_msi_msgnum(pi)); 1241 1242 if (value == 0xabcdef) { 1243 for (i = 0; i < pci_msi_msgnum(pi); i++) 1244 pci_generate_msi(pi, i); 1245 } 1246 } 1247 1248 if (baridx == 1) { 1249 if (offset + size > DMEMSZ) { 1250 printf("diow: memw too large, offset %ld size %d\n", 1251 offset, size); 1252 return; 1253 } 1254 1255 if (size == 1) { 1256 sc->memregs[offset] = value; 1257 } else if (size == 2) { 1258 *(uint16_t *)&sc->memregs[offset] = value; 1259 } else if (size == 4) { 1260 *(uint32_t *)&sc->memregs[offset] = value; 1261 } else if (size == 8) { 1262 *(uint64_t *)&sc->memregs[offset] = value; 1263 } else { 1264 printf("diow: memw unknown size %d\n", size); 1265 } 1266 1267 /* 1268 * magic interrupt ?? 1269 */ 1270 } 1271 1272 if (baridx > 1) { 1273 printf("diow: unknown bar idx %d\n", baridx); 1274 } 1275 } 1276 1277 static uint64_t 1278 pci_emul_dior(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx, 1279 uint64_t offset, int size) 1280 { 1281 struct pci_emul_dsoftc *sc = pi->pi_arg; 1282 uint32_t value; 1283 1284 if (baridx == 0) { 1285 if (offset + size > DIOSZ) { 1286 printf("dior: ior too large, offset %ld size %d\n", 1287 offset, size); 1288 return (0); 1289 } 1290 1291 if (size == 1) { 1292 value = sc->ioregs[offset]; 1293 } else if (size == 2) { 1294 value = *(uint16_t *) &sc->ioregs[offset]; 1295 } else if (size == 4) { 1296 value = *(uint32_t *) &sc->ioregs[offset]; 1297 } else { 1298 printf("dior: ior unknown size %d\n", size); 1299 } 1300 } 1301 1302 if (baridx == 1) { 1303 if (offset + size > DMEMSZ) { 1304 printf("dior: memr too large, offset %ld size %d\n", 1305 offset, size); 1306 return (0); 1307 } 1308 1309 if (size == 1) { 1310 value = sc->memregs[offset]; 1311 } else if (size == 2) { 1312 value = *(uint16_t *) &sc->memregs[offset]; 1313 } else if (size == 4) { 1314 value = *(uint32_t *) &sc->memregs[offset]; 1315 } else if (size == 8) { 1316 value = *(uint64_t *) &sc->memregs[offset]; 1317 } else { 1318 printf("dior: ior unknown size %d\n", size); 1319 } 1320 } 1321 1322 1323 if (baridx > 1) { 1324 printf("dior: unknown bar idx %d\n", baridx); 1325 return (0); 1326 } 1327 1328 return (value); 1329 } 1330 1331 struct pci_devemu pci_dummy = { 1332 .pe_emu = "dummy", 1333 .pe_init = pci_emul_dinit, 1334 .pe_barwrite = pci_emul_diow, 1335 .pe_barread = pci_emul_dior 1336 }; 1337 PCI_EMUL_SET(pci_dummy); 1338 1339 #endif /* PCI_EMUL_TEST */ 1340