1 /*- 2 * Copyright (c) 2011 NetApp, Inc. 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY NETAPP, INC ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL NETAPP, INC OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 #include <sys/cdefs.h> 30 __FBSDID("$FreeBSD$"); 31 32 #include <sys/param.h> 33 #include <sys/linker_set.h> 34 35 #include <ctype.h> 36 #include <stdio.h> 37 #include <stdlib.h> 38 #include <string.h> 39 #include <strings.h> 40 #include <assert.h> 41 42 #include <machine/vmm.h> 43 #include <vmmapi.h> 44 45 #include "bhyverun.h" 46 #include "inout.h" 47 #include "mem.h" 48 #include "mptbl.h" 49 #include "pci_emul.h" 50 #include "ioapic.h" 51 52 #define CONF1_ADDR_PORT 0x0cf8 53 #define CONF1_DATA_PORT 0x0cfc 54 55 #define CFGWRITE(pi,off,val,b) \ 56 do { \ 57 if ((b) == 1) { \ 58 pci_set_cfgdata8((pi),(off),(val)); \ 59 } else if ((b) == 2) { \ 60 pci_set_cfgdata16((pi),(off),(val)); \ 61 } else { \ 62 pci_set_cfgdata32((pi),(off),(val)); \ 63 } \ 64 } while (0) 65 66 #define MAXSLOTS (PCI_SLOTMAX + 1) 67 #define MAXFUNCS (PCI_FUNCMAX + 1) 68 69 static struct slotinfo { 70 char *si_name; 71 char *si_param; 72 struct pci_devinst *si_devi; 73 int si_legacy; 74 } pci_slotinfo[MAXSLOTS][MAXFUNCS]; 75 76 /* 77 * Used to keep track of legacy interrupt owners/requestors 78 */ 79 #define NLIRQ 16 80 81 static struct lirqinfo { 82 int li_generic; 83 int li_acount; 84 struct pci_devinst *li_owner; /* XXX should be a list */ 85 } lirq[NLIRQ]; 86 87 SET_DECLARE(pci_devemu_set, struct pci_devemu); 88 89 static uint32_t pci_hole_startaddr; 90 91 static uint64_t pci_emul_iobase; 92 static uint64_t pci_emul_membase32; 93 static uint64_t pci_emul_membase64; 94 95 #define PCI_EMUL_IOBASE 0x2000 96 #define PCI_EMUL_IOLIMIT 0x10000 97 98 #define PCI_EMUL_MEMLIMIT32 0xE0000000 /* 3.5GB */ 99 100 #define PCI_EMUL_MEMBASE64 0xD000000000UL 101 #define PCI_EMUL_MEMLIMIT64 0xFD00000000UL 102 103 static int pci_emul_devices; 104 105 /* 106 * I/O access 107 */ 108 109 /* 110 * Slot options are in the form: 111 * 112 * <slot>[:<func>],<emul>[,<config>] 113 * 114 * slot is 0..31 115 * func is 0..7 116 * emul is a string describing the type of PCI device e.g. virtio-net 117 * config is an optional string, depending on the device, that can be 118 * used for configuration. 119 * Examples are: 120 * 1,virtio-net,tap0 121 * 3:0,dummy 122 */ 123 static void 124 pci_parse_slot_usage(char *aopt) 125 { 126 printf("Invalid PCI slot info field \"%s\"\n", aopt); 127 free(aopt); 128 } 129 130 void 131 pci_parse_slot(char *opt, int legacy) 132 { 133 char *slot, *func, *emul, *config; 134 char *str, *cpy; 135 int snum, fnum; 136 137 str = cpy = strdup(opt); 138 139 config = NULL; 140 141 if (strchr(str, ':') != NULL) { 142 slot = strsep(&str, ":"); 143 func = strsep(&str, ","); 144 } else { 145 slot = strsep(&str, ","); 146 func = NULL; 147 } 148 149 emul = strsep(&str, ","); 150 if (str != NULL) { 151 config = strsep(&str, ","); 152 } 153 154 if (emul == NULL) { 155 pci_parse_slot_usage(cpy); 156 return; 157 } 158 159 snum = atoi(slot); 160 fnum = func ? atoi(func) : 0; 161 if (snum < 0 || snum >= MAXSLOTS || fnum < 0 || fnum >= MAXFUNCS) { 162 pci_parse_slot_usage(cpy); 163 } else { 164 pci_slotinfo[snum][fnum].si_name = emul; 165 pci_slotinfo[snum][fnum].si_param = config; 166 pci_slotinfo[snum][fnum].si_legacy = legacy; 167 } 168 } 169 170 static int 171 pci_valid_pba_offset(struct pci_devinst *pi, uint64_t offset) 172 { 173 174 if (offset < pi->pi_msix.pba_offset) 175 return (0); 176 177 if (offset >= pi->pi_msix.pba_offset + pi->pi_msix.pba_size) { 178 return (0); 179 } 180 181 return (1); 182 } 183 184 int 185 pci_emul_msix_twrite(struct pci_devinst *pi, uint64_t offset, int size, 186 uint64_t value) 187 { 188 int msix_entry_offset; 189 int tab_index; 190 char *dest; 191 192 /* support only 4 or 8 byte writes */ 193 if (size != 4 && size != 8) 194 return (-1); 195 196 /* 197 * Return if table index is beyond what device supports 198 */ 199 tab_index = offset / MSIX_TABLE_ENTRY_SIZE; 200 if (tab_index >= pi->pi_msix.table_count) 201 return (-1); 202 203 msix_entry_offset = offset % MSIX_TABLE_ENTRY_SIZE; 204 205 /* support only aligned writes */ 206 if ((msix_entry_offset % size) != 0) 207 return (-1); 208 209 dest = (char *)(pi->pi_msix.table + tab_index); 210 dest += msix_entry_offset; 211 212 if (size == 4) 213 *((uint32_t *)dest) = value; 214 else 215 *((uint64_t *)dest) = value; 216 217 return (0); 218 } 219 220 uint64_t 221 pci_emul_msix_tread(struct pci_devinst *pi, uint64_t offset, int size) 222 { 223 char *dest; 224 int msix_entry_offset; 225 int tab_index; 226 uint64_t retval = ~0; 227 228 /* support only 4 or 8 byte reads */ 229 if (size != 4 && size != 8) 230 return (retval); 231 232 msix_entry_offset = offset % MSIX_TABLE_ENTRY_SIZE; 233 234 /* support only aligned reads */ 235 if ((msix_entry_offset % size) != 0) { 236 return (retval); 237 } 238 239 tab_index = offset / MSIX_TABLE_ENTRY_SIZE; 240 241 if (tab_index < pi->pi_msix.table_count) { 242 /* valid MSI-X Table access */ 243 dest = (char *)(pi->pi_msix.table + tab_index); 244 dest += msix_entry_offset; 245 246 if (size == 4) 247 retval = *((uint32_t *)dest); 248 else 249 retval = *((uint64_t *)dest); 250 } else if (pci_valid_pba_offset(pi, offset)) { 251 /* return 0 for PBA access */ 252 retval = 0; 253 } 254 255 return (retval); 256 } 257 258 int 259 pci_msix_table_bar(struct pci_devinst *pi) 260 { 261 262 if (pi->pi_msix.table != NULL) 263 return (pi->pi_msix.table_bar); 264 else 265 return (-1); 266 } 267 268 int 269 pci_msix_pba_bar(struct pci_devinst *pi) 270 { 271 272 if (pi->pi_msix.table != NULL) 273 return (pi->pi_msix.pba_bar); 274 else 275 return (-1); 276 } 277 278 static int 279 pci_emul_io_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes, 280 uint32_t *eax, void *arg) 281 { 282 struct pci_devinst *pdi = arg; 283 struct pci_devemu *pe = pdi->pi_d; 284 uint64_t offset; 285 int i; 286 287 for (i = 0; i <= PCI_BARMAX; i++) { 288 if (pdi->pi_bar[i].type == PCIBAR_IO && 289 port >= pdi->pi_bar[i].addr && 290 port + bytes <= pdi->pi_bar[i].addr + pdi->pi_bar[i].size) { 291 offset = port - pdi->pi_bar[i].addr; 292 if (in) 293 *eax = (*pe->pe_barread)(ctx, vcpu, pdi, i, 294 offset, bytes); 295 else 296 (*pe->pe_barwrite)(ctx, vcpu, pdi, i, offset, 297 bytes, *eax); 298 return (0); 299 } 300 } 301 return (-1); 302 } 303 304 static int 305 pci_emul_mem_handler(struct vmctx *ctx, int vcpu, int dir, uint64_t addr, 306 int size, uint64_t *val, void *arg1, long arg2) 307 { 308 struct pci_devinst *pdi = arg1; 309 struct pci_devemu *pe = pdi->pi_d; 310 uint64_t offset; 311 int bidx = (int) arg2; 312 313 assert(bidx <= PCI_BARMAX); 314 assert(pdi->pi_bar[bidx].type == PCIBAR_MEM32 || 315 pdi->pi_bar[bidx].type == PCIBAR_MEM64); 316 assert(addr >= pdi->pi_bar[bidx].addr && 317 addr + size <= pdi->pi_bar[bidx].addr + pdi->pi_bar[bidx].size); 318 319 offset = addr - pdi->pi_bar[bidx].addr; 320 321 if (dir == MEM_F_WRITE) 322 (*pe->pe_barwrite)(ctx, vcpu, pdi, bidx, offset, size, *val); 323 else 324 *val = (*pe->pe_barread)(ctx, vcpu, pdi, bidx, offset, size); 325 326 return (0); 327 } 328 329 330 static int 331 pci_emul_alloc_resource(uint64_t *baseptr, uint64_t limit, uint64_t size, 332 uint64_t *addr) 333 { 334 uint64_t base; 335 336 assert((size & (size - 1)) == 0); /* must be a power of 2 */ 337 338 base = roundup2(*baseptr, size); 339 340 if (base + size <= limit) { 341 *addr = base; 342 *baseptr = base + size; 343 return (0); 344 } else 345 return (-1); 346 } 347 348 int 349 pci_emul_alloc_bar(struct pci_devinst *pdi, int idx, enum pcibar_type type, 350 uint64_t size) 351 { 352 353 return (pci_emul_alloc_pbar(pdi, idx, 0, type, size)); 354 } 355 356 int 357 pci_emul_alloc_pbar(struct pci_devinst *pdi, int idx, uint64_t hostbase, 358 enum pcibar_type type, uint64_t size) 359 { 360 int i, error; 361 uint64_t *baseptr, limit, addr, mask, lobits, bar; 362 struct inout_port iop; 363 struct mem_range memp; 364 365 assert(idx >= 0 && idx <= PCI_BARMAX); 366 367 if ((size & (size - 1)) != 0) 368 size = 1UL << flsl(size); /* round up to a power of 2 */ 369 370 switch (type) { 371 case PCIBAR_NONE: 372 baseptr = NULL; 373 addr = mask = lobits = 0; 374 break; 375 case PCIBAR_IO: 376 if (hostbase && 377 pci_slotinfo[pdi->pi_slot][pdi->pi_func].si_legacy) { 378 assert(hostbase < PCI_EMUL_IOBASE); 379 baseptr = &hostbase; 380 } else { 381 baseptr = &pci_emul_iobase; 382 } 383 limit = PCI_EMUL_IOLIMIT; 384 mask = PCIM_BAR_IO_BASE; 385 lobits = PCIM_BAR_IO_SPACE; 386 break; 387 case PCIBAR_MEM64: 388 /* 389 * XXX 390 * Some drivers do not work well if the 64-bit BAR is allocated 391 * above 4GB. Allow for this by allocating small requests under 392 * 4GB unless then allocation size is larger than some arbitrary 393 * number (32MB currently). 394 */ 395 if (size > 32 * 1024 * 1024) { 396 /* 397 * XXX special case for device requiring peer-peer DMA 398 */ 399 if (size == 0x100000000UL) 400 baseptr = &hostbase; 401 else 402 baseptr = &pci_emul_membase64; 403 limit = PCI_EMUL_MEMLIMIT64; 404 mask = PCIM_BAR_MEM_BASE; 405 lobits = PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_64 | 406 PCIM_BAR_MEM_PREFETCH; 407 break; 408 } else { 409 baseptr = &pci_emul_membase32; 410 limit = PCI_EMUL_MEMLIMIT32; 411 mask = PCIM_BAR_MEM_BASE; 412 lobits = PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_64; 413 } 414 break; 415 case PCIBAR_MEM32: 416 baseptr = &pci_emul_membase32; 417 limit = PCI_EMUL_MEMLIMIT32; 418 mask = PCIM_BAR_MEM_BASE; 419 lobits = PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_32; 420 break; 421 default: 422 printf("pci_emul_alloc_base: invalid bar type %d\n", type); 423 assert(0); 424 } 425 426 if (baseptr != NULL) { 427 error = pci_emul_alloc_resource(baseptr, limit, size, &addr); 428 if (error != 0) 429 return (error); 430 } 431 432 pdi->pi_bar[idx].type = type; 433 pdi->pi_bar[idx].addr = addr; 434 pdi->pi_bar[idx].size = size; 435 436 /* Initialize the BAR register in config space */ 437 bar = (addr & mask) | lobits; 438 pci_set_cfgdata32(pdi, PCIR_BAR(idx), bar); 439 440 if (type == PCIBAR_MEM64) { 441 assert(idx + 1 <= PCI_BARMAX); 442 pdi->pi_bar[idx + 1].type = PCIBAR_MEMHI64; 443 pci_set_cfgdata32(pdi, PCIR_BAR(idx + 1), bar >> 32); 444 } 445 446 /* add a handler to intercept accesses to the I/O bar */ 447 if (type == PCIBAR_IO) { 448 iop.name = pdi->pi_name; 449 iop.flags = IOPORT_F_INOUT; 450 iop.handler = pci_emul_io_handler; 451 iop.arg = pdi; 452 453 for (i = 0; i < size; i++) { 454 iop.port = addr + i; 455 register_inout(&iop); 456 } 457 } else if (type == PCIBAR_MEM32 || type == PCIBAR_MEM64) { 458 /* add memory bar intercept handler */ 459 memp.name = pdi->pi_name; 460 memp.flags = MEM_F_RW; 461 memp.base = addr; 462 memp.size = size; 463 memp.handler = pci_emul_mem_handler; 464 memp.arg1 = pdi; 465 memp.arg2 = idx; 466 467 error = register_mem(&memp); 468 assert(error == 0); 469 } 470 471 return (0); 472 } 473 474 #define CAP_START_OFFSET 0x40 475 static int 476 pci_emul_add_capability(struct pci_devinst *pi, u_char *capdata, int caplen) 477 { 478 int i, capoff, capid, reallen; 479 uint16_t sts; 480 481 static u_char endofcap[4] = { 482 PCIY_RESERVED, 0, 0, 0 483 }; 484 485 assert(caplen > 0 && capdata[0] != PCIY_RESERVED); 486 487 reallen = roundup2(caplen, 4); /* dword aligned */ 488 489 sts = pci_get_cfgdata16(pi, PCIR_STATUS); 490 if ((sts & PCIM_STATUS_CAPPRESENT) == 0) { 491 capoff = CAP_START_OFFSET; 492 pci_set_cfgdata8(pi, PCIR_CAP_PTR, capoff); 493 pci_set_cfgdata16(pi, PCIR_STATUS, sts|PCIM_STATUS_CAPPRESENT); 494 } else { 495 capoff = pci_get_cfgdata8(pi, PCIR_CAP_PTR); 496 while (1) { 497 assert((capoff & 0x3) == 0); 498 capid = pci_get_cfgdata8(pi, capoff); 499 if (capid == PCIY_RESERVED) 500 break; 501 capoff = pci_get_cfgdata8(pi, capoff + 1); 502 } 503 } 504 505 /* Check if we have enough space */ 506 if (capoff + reallen + sizeof(endofcap) > PCI_REGMAX + 1) 507 return (-1); 508 509 /* Copy the capability */ 510 for (i = 0; i < caplen; i++) 511 pci_set_cfgdata8(pi, capoff + i, capdata[i]); 512 513 /* Set the next capability pointer */ 514 pci_set_cfgdata8(pi, capoff + 1, capoff + reallen); 515 516 /* Copy of the reserved capability which serves as the end marker */ 517 for (i = 0; i < sizeof(endofcap); i++) 518 pci_set_cfgdata8(pi, capoff + reallen + i, endofcap[i]); 519 520 return (0); 521 } 522 523 static struct pci_devemu * 524 pci_emul_finddev(char *name) 525 { 526 struct pci_devemu **pdpp, *pdp; 527 528 SET_FOREACH(pdpp, pci_devemu_set) { 529 pdp = *pdpp; 530 if (!strcmp(pdp->pe_emu, name)) { 531 return (pdp); 532 } 533 } 534 535 return (NULL); 536 } 537 538 static void 539 pci_emul_init(struct vmctx *ctx, struct pci_devemu *pde, int slot, int func, 540 char *params) 541 { 542 struct pci_devinst *pdi; 543 pdi = malloc(sizeof(struct pci_devinst)); 544 bzero(pdi, sizeof(*pdi)); 545 546 pdi->pi_vmctx = ctx; 547 pdi->pi_bus = 0; 548 pdi->pi_slot = slot; 549 pdi->pi_func = func; 550 pdi->pi_d = pde; 551 snprintf(pdi->pi_name, PI_NAMESZ, "%s-pci-%d", pde->pe_emu, slot); 552 553 /* Disable legacy interrupts */ 554 pci_set_cfgdata8(pdi, PCIR_INTLINE, 255); 555 pci_set_cfgdata8(pdi, PCIR_INTPIN, 0); 556 557 pci_set_cfgdata8(pdi, PCIR_COMMAND, 558 PCIM_CMD_PORTEN | PCIM_CMD_MEMEN | PCIM_CMD_BUSMASTEREN); 559 560 if ((*pde->pe_init)(ctx, pdi, params) != 0) { 561 free(pdi); 562 } else { 563 pci_emul_devices++; 564 pci_slotinfo[slot][func].si_devi = pdi; 565 } 566 } 567 568 void 569 pci_populate_msicap(struct msicap *msicap, int msgnum, int nextptr) 570 { 571 int mmc; 572 573 CTASSERT(sizeof(struct msicap) == 14); 574 575 /* Number of msi messages must be a power of 2 between 1 and 32 */ 576 assert((msgnum & (msgnum - 1)) == 0 && msgnum >= 1 && msgnum <= 32); 577 mmc = ffs(msgnum) - 1; 578 579 bzero(msicap, sizeof(struct msicap)); 580 msicap->capid = PCIY_MSI; 581 msicap->nextptr = nextptr; 582 msicap->msgctrl = PCIM_MSICTRL_64BIT | (mmc << 1); 583 } 584 585 int 586 pci_emul_add_msicap(struct pci_devinst *pi, int msgnum) 587 { 588 struct msicap msicap; 589 590 pci_populate_msicap(&msicap, msgnum, 0); 591 592 return (pci_emul_add_capability(pi, (u_char *)&msicap, sizeof(msicap))); 593 } 594 595 static void 596 pci_populate_msixcap(struct msixcap *msixcap, int msgnum, int barnum, 597 uint32_t msix_tab_size, int nextptr) 598 { 599 CTASSERT(sizeof(struct msixcap) == 12); 600 601 assert(msix_tab_size % 4096 == 0); 602 603 bzero(msixcap, sizeof(struct msixcap)); 604 msixcap->capid = PCIY_MSIX; 605 msixcap->nextptr = nextptr; 606 607 /* 608 * Message Control Register, all fields set to 609 * zero except for the Table Size. 610 * Note: Table size N is encoded as N-1 611 */ 612 msixcap->msgctrl = msgnum - 1; 613 614 /* 615 * MSI-X BAR setup: 616 * - MSI-X table start at offset 0 617 * - PBA table starts at a 4K aligned offset after the MSI-X table 618 */ 619 msixcap->table_info = barnum & PCIM_MSIX_BIR_MASK; 620 msixcap->pba_info = msix_tab_size | (barnum & PCIM_MSIX_BIR_MASK); 621 } 622 623 static void 624 pci_msix_table_init(struct pci_devinst *pi, int table_entries) 625 { 626 int i, table_size; 627 628 assert(table_entries > 0); 629 assert(table_entries <= MAX_MSIX_TABLE_ENTRIES); 630 631 table_size = table_entries * MSIX_TABLE_ENTRY_SIZE; 632 pi->pi_msix.table = malloc(table_size); 633 bzero(pi->pi_msix.table, table_size); 634 635 /* set mask bit of vector control register */ 636 for (i = 0; i < table_entries; i++) 637 pi->pi_msix.table[i].vector_control |= PCIM_MSIX_VCTRL_MASK; 638 } 639 640 int 641 pci_emul_add_msixcap(struct pci_devinst *pi, int msgnum, int barnum) 642 { 643 uint16_t pba_index; 644 uint32_t tab_size; 645 struct msixcap msixcap; 646 647 assert(msgnum >= 1 && msgnum <= MAX_MSIX_TABLE_ENTRIES); 648 assert(barnum >= 0 && barnum <= PCIR_MAX_BAR_0); 649 650 tab_size = msgnum * MSIX_TABLE_ENTRY_SIZE; 651 652 /* Align table size to nearest 4K */ 653 tab_size = roundup2(tab_size, 4096); 654 655 pi->pi_msix.table_bar = barnum; 656 pi->pi_msix.pba_bar = barnum; 657 pi->pi_msix.table_offset = 0; 658 pi->pi_msix.table_count = msgnum; 659 pi->pi_msix.pba_offset = tab_size; 660 661 /* calculate the MMIO size required for MSI-X PBA */ 662 pba_index = (msgnum - 1) / (PBA_TABLE_ENTRY_SIZE * 8); 663 pi->pi_msix.pba_size = (pba_index + 1) * PBA_TABLE_ENTRY_SIZE; 664 665 pci_msix_table_init(pi, msgnum); 666 667 pci_populate_msixcap(&msixcap, msgnum, barnum, tab_size, 0); 668 669 /* allocate memory for MSI-X Table and PBA */ 670 pci_emul_alloc_bar(pi, barnum, PCIBAR_MEM32, 671 tab_size + pi->pi_msix.pba_size); 672 673 return (pci_emul_add_capability(pi, (u_char *)&msixcap, 674 sizeof(msixcap))); 675 } 676 677 void 678 msixcap_cfgwrite(struct pci_devinst *pi, int capoff, int offset, 679 int bytes, uint32_t val) 680 { 681 uint16_t msgctrl, rwmask; 682 int off, table_bar; 683 684 off = offset - capoff; 685 table_bar = pi->pi_msix.table_bar; 686 /* Message Control Register */ 687 if (off == 2 && bytes == 2) { 688 rwmask = PCIM_MSIXCTRL_MSIX_ENABLE | PCIM_MSIXCTRL_FUNCTION_MASK; 689 msgctrl = pci_get_cfgdata16(pi, offset); 690 msgctrl &= ~rwmask; 691 msgctrl |= val & rwmask; 692 val = msgctrl; 693 694 pi->pi_msix.enabled = val & PCIM_MSIXCTRL_MSIX_ENABLE; 695 pi->pi_msix.function_mask = val & PCIM_MSIXCTRL_FUNCTION_MASK; 696 } 697 698 CFGWRITE(pi, offset, val, bytes); 699 } 700 701 void 702 msicap_cfgwrite(struct pci_devinst *pi, int capoff, int offset, 703 int bytes, uint32_t val) 704 { 705 uint16_t msgctrl, rwmask, msgdata, mme; 706 uint32_t addrlo; 707 708 /* 709 * If guest is writing to the message control register make sure 710 * we do not overwrite read-only fields. 711 */ 712 if ((offset - capoff) == 2 && bytes == 2) { 713 rwmask = PCIM_MSICTRL_MME_MASK | PCIM_MSICTRL_MSI_ENABLE; 714 msgctrl = pci_get_cfgdata16(pi, offset); 715 msgctrl &= ~rwmask; 716 msgctrl |= val & rwmask; 717 val = msgctrl; 718 719 addrlo = pci_get_cfgdata32(pi, capoff + 4); 720 if (msgctrl & PCIM_MSICTRL_64BIT) 721 msgdata = pci_get_cfgdata16(pi, capoff + 12); 722 else 723 msgdata = pci_get_cfgdata16(pi, capoff + 8); 724 725 /* 726 * XXX check delivery mode, destination mode etc 727 */ 728 mme = msgctrl & PCIM_MSICTRL_MME_MASK; 729 pi->pi_msi.enabled = msgctrl & PCIM_MSICTRL_MSI_ENABLE ? 1 : 0; 730 if (pi->pi_msi.enabled) { 731 pi->pi_msi.cpu = (addrlo >> 12) & 0xff; 732 pi->pi_msi.vector = msgdata & 0xff; 733 pi->pi_msi.msgnum = 1 << (mme >> 4); 734 } else { 735 pi->pi_msi.cpu = 0; 736 pi->pi_msi.vector = 0; 737 pi->pi_msi.msgnum = 0; 738 } 739 } 740 741 CFGWRITE(pi, offset, val, bytes); 742 } 743 744 void 745 pciecap_cfgwrite(struct pci_devinst *pi, int capoff, int offset, 746 int bytes, uint32_t val) 747 { 748 749 /* XXX don't write to the readonly parts */ 750 CFGWRITE(pi, offset, val, bytes); 751 } 752 753 #define PCIECAP_VERSION 0x2 754 int 755 pci_emul_add_pciecap(struct pci_devinst *pi, int type) 756 { 757 int err; 758 struct pciecap pciecap; 759 760 CTASSERT(sizeof(struct pciecap) == 60); 761 762 if (type != PCIEM_TYPE_ROOT_PORT) 763 return (-1); 764 765 bzero(&pciecap, sizeof(pciecap)); 766 767 pciecap.capid = PCIY_EXPRESS; 768 pciecap.pcie_capabilities = PCIECAP_VERSION | PCIEM_TYPE_ROOT_PORT; 769 pciecap.link_capabilities = 0x411; /* gen1, x1 */ 770 pciecap.link_status = 0x11; /* gen1, x1 */ 771 772 err = pci_emul_add_capability(pi, (u_char *)&pciecap, sizeof(pciecap)); 773 return (err); 774 } 775 776 /* 777 * This function assumes that 'coff' is in the capabilities region of the 778 * config space. 779 */ 780 static void 781 pci_emul_capwrite(struct pci_devinst *pi, int offset, int bytes, uint32_t val) 782 { 783 int capid; 784 uint8_t capoff, nextoff; 785 786 /* Do not allow un-aligned writes */ 787 if ((offset & (bytes - 1)) != 0) 788 return; 789 790 /* Find the capability that we want to update */ 791 capoff = CAP_START_OFFSET; 792 while (1) { 793 capid = pci_get_cfgdata8(pi, capoff); 794 if (capid == PCIY_RESERVED) 795 break; 796 797 nextoff = pci_get_cfgdata8(pi, capoff + 1); 798 if (offset >= capoff && offset < nextoff) 799 break; 800 801 capoff = nextoff; 802 } 803 assert(offset >= capoff); 804 805 /* 806 * Capability ID and Next Capability Pointer are readonly 807 */ 808 if (offset == capoff || offset == capoff + 1) 809 return; 810 811 switch (capid) { 812 case PCIY_MSI: 813 msicap_cfgwrite(pi, capoff, offset, bytes, val); 814 break; 815 case PCIY_MSIX: 816 msixcap_cfgwrite(pi, capoff, offset, bytes, val); 817 break; 818 case PCIY_EXPRESS: 819 pciecap_cfgwrite(pi, capoff, offset, bytes, val); 820 break; 821 default: 822 break; 823 } 824 } 825 826 static int 827 pci_emul_iscap(struct pci_devinst *pi, int offset) 828 { 829 int found; 830 uint16_t sts; 831 uint8_t capid, lastoff; 832 833 found = 0; 834 sts = pci_get_cfgdata16(pi, PCIR_STATUS); 835 if ((sts & PCIM_STATUS_CAPPRESENT) != 0) { 836 lastoff = pci_get_cfgdata8(pi, PCIR_CAP_PTR); 837 while (1) { 838 assert((lastoff & 0x3) == 0); 839 capid = pci_get_cfgdata8(pi, lastoff); 840 if (capid == PCIY_RESERVED) 841 break; 842 lastoff = pci_get_cfgdata8(pi, lastoff + 1); 843 } 844 if (offset >= CAP_START_OFFSET && offset <= lastoff) 845 found = 1; 846 } 847 return (found); 848 } 849 850 static int 851 pci_emul_fallback_handler(struct vmctx *ctx, int vcpu, int dir, uint64_t addr, 852 int size, uint64_t *val, void *arg1, long arg2) 853 { 854 /* 855 * Ignore writes; return 0xff's for reads. The mem read code 856 * will take care of truncating to the correct size. 857 */ 858 if (dir == MEM_F_READ) { 859 *val = 0xffffffffffffffff; 860 } 861 862 return (0); 863 } 864 865 void 866 init_pci(struct vmctx *ctx) 867 { 868 struct mem_range memp; 869 struct pci_devemu *pde; 870 struct slotinfo *si; 871 int slot, func; 872 int error; 873 874 pci_hole_startaddr = vm_get_lowmem_limit(ctx); 875 876 pci_emul_iobase = PCI_EMUL_IOBASE; 877 pci_emul_membase32 = pci_hole_startaddr; 878 pci_emul_membase64 = PCI_EMUL_MEMBASE64; 879 880 for (slot = 0; slot < MAXSLOTS; slot++) { 881 for (func = 0; func < MAXFUNCS; func++) { 882 si = &pci_slotinfo[slot][func]; 883 if (si->si_name != NULL) { 884 pde = pci_emul_finddev(si->si_name); 885 if (pde != NULL) { 886 pci_emul_init(ctx, pde, slot, func, 887 si->si_param); 888 } 889 } 890 } 891 } 892 893 /* 894 * Allow ISA IRQs 5,10,11,12, and 15 to be available for 895 * generic use 896 */ 897 lirq[5].li_generic = 1; 898 lirq[10].li_generic = 1; 899 lirq[11].li_generic = 1; 900 lirq[12].li_generic = 1; 901 lirq[15].li_generic = 1; 902 903 /* 904 * Setup the PCI hole to return 0xff's when accessed in a region 905 * with no devices 906 */ 907 memset(&memp, 0, sizeof(struct mem_range)); 908 memp.name = "PCI hole"; 909 memp.flags = MEM_F_RW; 910 memp.base = pci_hole_startaddr; 911 memp.size = (4ULL * 1024 * 1024 * 1024) - pci_hole_startaddr; 912 memp.handler = pci_emul_fallback_handler; 913 914 error = register_mem_fallback(&memp); 915 assert(error == 0); 916 } 917 918 int 919 pci_msi_enabled(struct pci_devinst *pi) 920 { 921 return (pi->pi_msi.enabled); 922 } 923 924 int 925 pci_msi_msgnum(struct pci_devinst *pi) 926 { 927 if (pi->pi_msi.enabled) 928 return (pi->pi_msi.msgnum); 929 else 930 return (0); 931 } 932 933 int 934 pci_msix_enabled(struct pci_devinst *pi) 935 { 936 937 return (pi->pi_msix.enabled && !pi->pi_msi.enabled); 938 } 939 940 void 941 pci_generate_msix(struct pci_devinst *pi, int index) 942 { 943 struct msix_table_entry *mte; 944 945 if (!pci_msix_enabled(pi)) 946 return; 947 948 if (pi->pi_msix.function_mask) 949 return; 950 951 if (index >= pi->pi_msix.table_count) 952 return; 953 954 mte = &pi->pi_msix.table[index]; 955 if ((mte->vector_control & PCIM_MSIX_VCTRL_MASK) == 0) { 956 /* XXX Set PBA bit if interrupt is disabled */ 957 vm_lapic_irq(pi->pi_vmctx, 958 (mte->addr >> 12) & 0xff, mte->msg_data & 0xff); 959 } 960 } 961 962 void 963 pci_generate_msi(struct pci_devinst *pi, int msg) 964 { 965 966 if (pci_msi_enabled(pi) && msg < pci_msi_msgnum(pi)) { 967 vm_lapic_irq(pi->pi_vmctx, 968 pi->pi_msi.cpu, 969 pi->pi_msi.vector + msg); 970 } 971 } 972 973 int 974 pci_is_legacy(struct pci_devinst *pi) 975 { 976 977 return (pci_slotinfo[pi->pi_slot][pi->pi_func].si_legacy); 978 } 979 980 static int 981 pci_lintr_alloc(struct pci_devinst *pi, int vec) 982 { 983 int i; 984 985 assert(vec < NLIRQ); 986 987 if (vec == -1) { 988 for (i = 0; i < NLIRQ; i++) { 989 if (lirq[i].li_generic && 990 lirq[i].li_owner == NULL) { 991 vec = i; 992 break; 993 } 994 } 995 } else { 996 if (lirq[vec].li_owner != NULL) { 997 vec = -1; 998 } 999 } 1000 assert(vec != -1); 1001 1002 lirq[vec].li_owner = pi; 1003 pi->pi_lintr_pin = vec; 1004 1005 return (vec); 1006 } 1007 1008 int 1009 pci_lintr_request(struct pci_devinst *pi, int vec) 1010 { 1011 1012 vec = pci_lintr_alloc(pi, vec); 1013 pci_set_cfgdata8(pi, PCIR_INTLINE, vec); 1014 pci_set_cfgdata8(pi, PCIR_INTPIN, 1); 1015 return (0); 1016 } 1017 1018 void 1019 pci_lintr_assert(struct pci_devinst *pi) 1020 { 1021 1022 assert(pi->pi_lintr_pin); 1023 ioapic_assert_pin(pi->pi_vmctx, pi->pi_lintr_pin); 1024 } 1025 1026 void 1027 pci_lintr_deassert(struct pci_devinst *pi) 1028 { 1029 1030 assert(pi->pi_lintr_pin); 1031 ioapic_deassert_pin(pi->pi_vmctx, pi->pi_lintr_pin); 1032 } 1033 1034 /* 1035 * Return 1 if the emulated device in 'slot' is a multi-function device. 1036 * Return 0 otherwise. 1037 */ 1038 static int 1039 pci_emul_is_mfdev(int slot) 1040 { 1041 int f, numfuncs; 1042 1043 numfuncs = 0; 1044 for (f = 0; f < MAXFUNCS; f++) { 1045 if (pci_slotinfo[slot][f].si_devi != NULL) { 1046 numfuncs++; 1047 } 1048 } 1049 return (numfuncs > 1); 1050 } 1051 1052 /* 1053 * Ensure that the PCIM_MFDEV bit is properly set (or unset) depending on 1054 * whether or not is a multi-function being emulated in the pci 'slot'. 1055 */ 1056 static void 1057 pci_emul_hdrtype_fixup(int slot, int off, int bytes, uint32_t *rv) 1058 { 1059 int mfdev; 1060 1061 if (off <= PCIR_HDRTYPE && off + bytes > PCIR_HDRTYPE) { 1062 mfdev = pci_emul_is_mfdev(slot); 1063 switch (bytes) { 1064 case 1: 1065 case 2: 1066 *rv &= ~PCIM_MFDEV; 1067 if (mfdev) { 1068 *rv |= PCIM_MFDEV; 1069 } 1070 break; 1071 case 4: 1072 *rv &= ~(PCIM_MFDEV << 16); 1073 if (mfdev) { 1074 *rv |= (PCIM_MFDEV << 16); 1075 } 1076 break; 1077 } 1078 } 1079 } 1080 1081 static int cfgbus, cfgslot, cfgfunc, cfgoff; 1082 1083 static int 1084 pci_emul_cfgaddr(struct vmctx *ctx, int vcpu, int in, int port, int bytes, 1085 uint32_t *eax, void *arg) 1086 { 1087 uint32_t x; 1088 1089 assert(!in); 1090 1091 if (bytes != 4) 1092 return (-1); 1093 1094 x = *eax; 1095 cfgoff = x & PCI_REGMAX; 1096 cfgfunc = (x >> 8) & PCI_FUNCMAX; 1097 cfgslot = (x >> 11) & PCI_SLOTMAX; 1098 cfgbus = (x >> 16) & PCI_BUSMAX; 1099 1100 return (0); 1101 } 1102 INOUT_PORT(pci_cfgaddr, CONF1_ADDR_PORT, IOPORT_F_OUT, pci_emul_cfgaddr); 1103 1104 static int 1105 pci_emul_cfgdata(struct vmctx *ctx, int vcpu, int in, int port, int bytes, 1106 uint32_t *eax, void *arg) 1107 { 1108 struct pci_devinst *pi; 1109 struct pci_devemu *pe; 1110 int coff, idx, needcfg; 1111 uint64_t mask, bar; 1112 1113 assert(bytes == 1 || bytes == 2 || bytes == 4); 1114 1115 if (cfgbus == 0) 1116 pi = pci_slotinfo[cfgslot][cfgfunc].si_devi; 1117 else 1118 pi = NULL; 1119 1120 coff = cfgoff + (port - CONF1_DATA_PORT); 1121 1122 #if 0 1123 printf("pcicfg-%s from 0x%0x of %d bytes (%d/%d/%d)\n\r", 1124 in ? "read" : "write", coff, bytes, cfgbus, cfgslot, cfgfunc); 1125 #endif 1126 1127 /* 1128 * Just return if there is no device at this cfgslot:cfgfunc or 1129 * if the guest is doing an un-aligned access 1130 */ 1131 if (pi == NULL || (coff & (bytes - 1)) != 0) { 1132 if (in) 1133 *eax = 0xffffffff; 1134 return (0); 1135 } 1136 1137 pe = pi->pi_d; 1138 1139 /* 1140 * Config read 1141 */ 1142 if (in) { 1143 /* Let the device emulation override the default handler */ 1144 if (pe->pe_cfgread != NULL) { 1145 needcfg = pe->pe_cfgread(ctx, vcpu, pi, 1146 coff, bytes, eax); 1147 } else { 1148 needcfg = 1; 1149 } 1150 1151 if (needcfg) { 1152 if (bytes == 1) 1153 *eax = pci_get_cfgdata8(pi, coff); 1154 else if (bytes == 2) 1155 *eax = pci_get_cfgdata16(pi, coff); 1156 else 1157 *eax = pci_get_cfgdata32(pi, coff); 1158 } 1159 1160 pci_emul_hdrtype_fixup(cfgslot, coff, bytes, eax); 1161 } else { 1162 /* Let the device emulation override the default handler */ 1163 if (pe->pe_cfgwrite != NULL && 1164 (*pe->pe_cfgwrite)(ctx, vcpu, pi, coff, bytes, *eax) == 0) 1165 return (0); 1166 1167 /* 1168 * Special handling for write to BAR registers 1169 */ 1170 if (coff >= PCIR_BAR(0) && coff < PCIR_BAR(PCI_BARMAX + 1)) { 1171 /* 1172 * Ignore writes to BAR registers that are not 1173 * 4-byte aligned. 1174 */ 1175 if (bytes != 4 || (coff & 0x3) != 0) 1176 return (0); 1177 idx = (coff - PCIR_BAR(0)) / 4; 1178 switch (pi->pi_bar[idx].type) { 1179 case PCIBAR_NONE: 1180 bar = 0; 1181 break; 1182 case PCIBAR_IO: 1183 mask = ~(pi->pi_bar[idx].size - 1); 1184 mask &= PCIM_BAR_IO_BASE; 1185 bar = (*eax & mask) | PCIM_BAR_IO_SPACE; 1186 break; 1187 case PCIBAR_MEM32: 1188 mask = ~(pi->pi_bar[idx].size - 1); 1189 mask &= PCIM_BAR_MEM_BASE; 1190 bar = *eax & mask; 1191 bar |= PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_32; 1192 break; 1193 case PCIBAR_MEM64: 1194 mask = ~(pi->pi_bar[idx].size - 1); 1195 mask &= PCIM_BAR_MEM_BASE; 1196 bar = *eax & mask; 1197 bar |= PCIM_BAR_MEM_SPACE | PCIM_BAR_MEM_64 | 1198 PCIM_BAR_MEM_PREFETCH; 1199 break; 1200 case PCIBAR_MEMHI64: 1201 mask = ~(pi->pi_bar[idx - 1].size - 1); 1202 mask &= PCIM_BAR_MEM_BASE; 1203 bar = ((uint64_t)*eax << 32) & mask; 1204 bar = bar >> 32; 1205 break; 1206 default: 1207 assert(0); 1208 } 1209 pci_set_cfgdata32(pi, coff, bar); 1210 1211 } else if (pci_emul_iscap(pi, coff)) { 1212 pci_emul_capwrite(pi, coff, bytes, *eax); 1213 } else { 1214 CFGWRITE(pi, coff, *eax, bytes); 1215 } 1216 } 1217 1218 return (0); 1219 } 1220 1221 INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+0, IOPORT_F_INOUT, pci_emul_cfgdata); 1222 INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+1, IOPORT_F_INOUT, pci_emul_cfgdata); 1223 INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+2, IOPORT_F_INOUT, pci_emul_cfgdata); 1224 INOUT_PORT(pci_cfgdata, CONF1_DATA_PORT+3, IOPORT_F_INOUT, pci_emul_cfgdata); 1225 1226 /* 1227 * I/O ports to configure PCI IRQ routing. We ignore all writes to it. 1228 */ 1229 static int 1230 pci_irq_port_handler(struct vmctx *ctx, int vcpu, int in, int port, int bytes, 1231 uint32_t *eax, void *arg) 1232 { 1233 assert(in == 0); 1234 return (0); 1235 } 1236 INOUT_PORT(pci_irq, 0xC00, IOPORT_F_OUT, pci_irq_port_handler); 1237 INOUT_PORT(pci_irq, 0xC01, IOPORT_F_OUT, pci_irq_port_handler); 1238 1239 #define PCI_EMUL_TEST 1240 #ifdef PCI_EMUL_TEST 1241 /* 1242 * Define a dummy test device 1243 */ 1244 #define DIOSZ 20 1245 #define DMEMSZ 4096 1246 struct pci_emul_dsoftc { 1247 uint8_t ioregs[DIOSZ]; 1248 uint8_t memregs[DMEMSZ]; 1249 }; 1250 1251 #define PCI_EMUL_MSI_MSGS 4 1252 #define PCI_EMUL_MSIX_MSGS 16 1253 1254 static int 1255 pci_emul_dinit(struct vmctx *ctx, struct pci_devinst *pi, char *opts) 1256 { 1257 int error; 1258 struct pci_emul_dsoftc *sc; 1259 1260 sc = malloc(sizeof(struct pci_emul_dsoftc)); 1261 memset(sc, 0, sizeof(struct pci_emul_dsoftc)); 1262 1263 pi->pi_arg = sc; 1264 1265 pci_set_cfgdata16(pi, PCIR_DEVICE, 0x0001); 1266 pci_set_cfgdata16(pi, PCIR_VENDOR, 0x10DD); 1267 pci_set_cfgdata8(pi, PCIR_CLASS, 0x02); 1268 1269 error = pci_emul_add_msicap(pi, PCI_EMUL_MSI_MSGS); 1270 assert(error == 0); 1271 1272 error = pci_emul_alloc_bar(pi, 0, PCIBAR_IO, DIOSZ); 1273 assert(error == 0); 1274 1275 error = pci_emul_alloc_bar(pi, 1, PCIBAR_MEM32, DMEMSZ); 1276 assert(error == 0); 1277 1278 return (0); 1279 } 1280 1281 static void 1282 pci_emul_diow(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx, 1283 uint64_t offset, int size, uint64_t value) 1284 { 1285 int i; 1286 struct pci_emul_dsoftc *sc = pi->pi_arg; 1287 1288 if (baridx == 0) { 1289 if (offset + size > DIOSZ) { 1290 printf("diow: iow too large, offset %ld size %d\n", 1291 offset, size); 1292 return; 1293 } 1294 1295 if (size == 1) { 1296 sc->ioregs[offset] = value & 0xff; 1297 } else if (size == 2) { 1298 *(uint16_t *)&sc->ioregs[offset] = value & 0xffff; 1299 } else if (size == 4) { 1300 *(uint32_t *)&sc->ioregs[offset] = value; 1301 } else { 1302 printf("diow: iow unknown size %d\n", size); 1303 } 1304 1305 /* 1306 * Special magic value to generate an interrupt 1307 */ 1308 if (offset == 4 && size == 4 && pci_msi_enabled(pi)) 1309 pci_generate_msi(pi, value % pci_msi_msgnum(pi)); 1310 1311 if (value == 0xabcdef) { 1312 for (i = 0; i < pci_msi_msgnum(pi); i++) 1313 pci_generate_msi(pi, i); 1314 } 1315 } 1316 1317 if (baridx == 1) { 1318 if (offset + size > DMEMSZ) { 1319 printf("diow: memw too large, offset %ld size %d\n", 1320 offset, size); 1321 return; 1322 } 1323 1324 if (size == 1) { 1325 sc->memregs[offset] = value; 1326 } else if (size == 2) { 1327 *(uint16_t *)&sc->memregs[offset] = value; 1328 } else if (size == 4) { 1329 *(uint32_t *)&sc->memregs[offset] = value; 1330 } else if (size == 8) { 1331 *(uint64_t *)&sc->memregs[offset] = value; 1332 } else { 1333 printf("diow: memw unknown size %d\n", size); 1334 } 1335 1336 /* 1337 * magic interrupt ?? 1338 */ 1339 } 1340 1341 if (baridx > 1) { 1342 printf("diow: unknown bar idx %d\n", baridx); 1343 } 1344 } 1345 1346 static uint64_t 1347 pci_emul_dior(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx, 1348 uint64_t offset, int size) 1349 { 1350 struct pci_emul_dsoftc *sc = pi->pi_arg; 1351 uint32_t value; 1352 1353 if (baridx == 0) { 1354 if (offset + size > DIOSZ) { 1355 printf("dior: ior too large, offset %ld size %d\n", 1356 offset, size); 1357 return (0); 1358 } 1359 1360 if (size == 1) { 1361 value = sc->ioregs[offset]; 1362 } else if (size == 2) { 1363 value = *(uint16_t *) &sc->ioregs[offset]; 1364 } else if (size == 4) { 1365 value = *(uint32_t *) &sc->ioregs[offset]; 1366 } else { 1367 printf("dior: ior unknown size %d\n", size); 1368 } 1369 } 1370 1371 if (baridx == 1) { 1372 if (offset + size > DMEMSZ) { 1373 printf("dior: memr too large, offset %ld size %d\n", 1374 offset, size); 1375 return (0); 1376 } 1377 1378 if (size == 1) { 1379 value = sc->memregs[offset]; 1380 } else if (size == 2) { 1381 value = *(uint16_t *) &sc->memregs[offset]; 1382 } else if (size == 4) { 1383 value = *(uint32_t *) &sc->memregs[offset]; 1384 } else if (size == 8) { 1385 value = *(uint64_t *) &sc->memregs[offset]; 1386 } else { 1387 printf("dior: ior unknown size %d\n", size); 1388 } 1389 } 1390 1391 1392 if (baridx > 1) { 1393 printf("dior: unknown bar idx %d\n", baridx); 1394 return (0); 1395 } 1396 1397 return (value); 1398 } 1399 1400 struct pci_devemu pci_dummy = { 1401 .pe_emu = "dummy", 1402 .pe_init = pci_emul_dinit, 1403 .pe_barwrite = pci_emul_diow, 1404 .pe_barread = pci_emul_dior 1405 }; 1406 PCI_EMUL_SET(pci_dummy); 1407 1408 #endif /* PCI_EMUL_TEST */ 1409