1 /*- 2 * Copyright (c) 2015, 2020 Ruslan Bukin <br@bsdpad.com> 3 * Copyright (c) 2014 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * This software was developed by Semihalf under 7 * the sponsorship of the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* Generic ECAM PCIe driver */ 32 33 #include <sys/cdefs.h> 34 #include "opt_platform.h" 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/malloc.h> 39 #include <sys/kernel.h> 40 #include <sys/rman.h> 41 #include <sys/module.h> 42 #include <sys/bus.h> 43 #include <sys/endian.h> 44 45 #include <dev/pci/pcivar.h> 46 #include <dev/pci/pcireg.h> 47 #include <dev/pci/pcib_private.h> 48 #include <dev/pci/pci_host_generic.h> 49 50 #include <machine/bus.h> 51 #include <machine/intr.h> 52 53 #include "pcib_if.h" 54 55 #if defined(VM_MEMATTR_DEVICE_NP) 56 #define PCI_UNMAPPED 57 #define PCI_RF_FLAGS RF_UNMAPPED 58 #else 59 #define PCI_RF_FLAGS 0 60 #endif 61 62 63 /* Forward prototypes */ 64 65 static uint32_t generic_pcie_read_config(device_t dev, u_int bus, u_int slot, 66 u_int func, u_int reg, int bytes); 67 static void generic_pcie_write_config(device_t dev, u_int bus, u_int slot, 68 u_int func, u_int reg, uint32_t val, int bytes); 69 static int generic_pcie_maxslots(device_t dev); 70 static int generic_pcie_read_ivar(device_t dev, device_t child, int index, 71 uintptr_t *result); 72 static int generic_pcie_write_ivar(device_t dev, device_t child, int index, 73 uintptr_t value); 74 75 int 76 pci_host_generic_core_attach(device_t dev) 77 { 78 #ifdef PCI_UNMAPPED 79 struct resource_map_request req; 80 struct resource_map map; 81 #endif 82 struct generic_pcie_core_softc *sc; 83 struct rman *rm; 84 uint64_t phys_base; 85 uint64_t pci_base; 86 uint64_t size; 87 const char *range_descr; 88 char buf[64]; 89 int domain, error; 90 int flags, rid, tuple, type; 91 92 sc = device_get_softc(dev); 93 sc->dev = dev; 94 95 /* Create the parent DMA tag to pass down the coherent flag */ 96 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 97 1, 0, /* alignment, bounds */ 98 BUS_SPACE_MAXADDR, /* lowaddr */ 99 BUS_SPACE_MAXADDR, /* highaddr */ 100 NULL, NULL, /* filter, filterarg */ 101 BUS_SPACE_MAXSIZE, /* maxsize */ 102 BUS_SPACE_UNRESTRICTED, /* nsegments */ 103 BUS_SPACE_MAXSIZE, /* maxsegsize */ 104 sc->coherent ? BUS_DMA_COHERENT : 0, /* flags */ 105 NULL, NULL, /* lockfunc, lockarg */ 106 &sc->dmat); 107 if (error != 0) 108 return (error); 109 110 /* 111 * Attempt to set the domain. If it's missing, or we are unable to 112 * set it then memory allocations may be placed in the wrong domain. 113 */ 114 if (bus_get_domain(dev, &domain) == 0) 115 (void)bus_dma_tag_set_domain(sc->dmat, domain); 116 117 if ((sc->quirks & PCIE_CUSTOM_CONFIG_SPACE_QUIRK) == 0) { 118 rid = 0; 119 sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 120 PCI_RF_FLAGS | RF_ACTIVE); 121 if (sc->res == NULL) { 122 device_printf(dev, "could not allocate memory.\n"); 123 error = ENXIO; 124 goto err_resource; 125 } 126 #ifdef PCI_UNMAPPED 127 resource_init_map_request(&req); 128 req.memattr = VM_MEMATTR_DEVICE_NP; 129 error = bus_map_resource(dev, SYS_RES_MEMORY, sc->res, &req, 130 &map); 131 if (error != 0) { 132 device_printf(dev, "could not map memory.\n"); 133 return (error); 134 } 135 rman_set_mapping(sc->res, &map); 136 #endif 137 } 138 139 sc->has_pmem = false; 140 sc->pmem_rman.rm_type = RMAN_ARRAY; 141 snprintf(buf, sizeof(buf), "%s prefetch window", 142 device_get_nameunit(dev)); 143 sc->pmem_rman.rm_descr = strdup(buf, M_DEVBUF); 144 145 sc->mem_rman.rm_type = RMAN_ARRAY; 146 snprintf(buf, sizeof(buf), "%s memory window", 147 device_get_nameunit(dev)); 148 sc->mem_rman.rm_descr = strdup(buf, M_DEVBUF); 149 150 sc->io_rman.rm_type = RMAN_ARRAY; 151 snprintf(buf, sizeof(buf), "%s I/O port window", 152 device_get_nameunit(dev)); 153 sc->io_rman.rm_descr = strdup(buf, M_DEVBUF); 154 155 /* Initialize rman and allocate memory regions */ 156 error = rman_init(&sc->pmem_rman); 157 if (error) { 158 device_printf(dev, "rman_init() failed. error = %d\n", error); 159 goto err_pmem_rman; 160 } 161 162 error = rman_init(&sc->mem_rman); 163 if (error) { 164 device_printf(dev, "rman_init() failed. error = %d\n", error); 165 goto err_mem_rman; 166 } 167 168 error = rman_init(&sc->io_rman); 169 if (error) { 170 device_printf(dev, "rman_init() failed. error = %d\n", error); 171 goto err_io_rman; 172 } 173 174 for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { 175 phys_base = sc->ranges[tuple].phys_base; 176 pci_base = sc->ranges[tuple].pci_base; 177 size = sc->ranges[tuple].size; 178 rid = tuple + 1; 179 if (size == 0) 180 continue; /* empty range element */ 181 switch (FLAG_TYPE(sc->ranges[tuple].flags)) { 182 case FLAG_TYPE_PMEM: 183 sc->has_pmem = true; 184 range_descr = "prefetch"; 185 flags = RF_PREFETCHABLE; 186 type = SYS_RES_MEMORY; 187 rm = &sc->pmem_rman; 188 break; 189 case FLAG_TYPE_MEM: 190 range_descr = "memory"; 191 flags = 0; 192 type = SYS_RES_MEMORY; 193 rm = &sc->mem_rman; 194 break; 195 case FLAG_TYPE_IO: 196 range_descr = "I/O port"; 197 flags = 0; 198 type = SYS_RES_IOPORT; 199 rm = &sc->io_rman; 200 break; 201 default: 202 continue; 203 } 204 if (bootverbose) 205 device_printf(dev, 206 "PCI addr: 0x%jx, CPU addr: 0x%jx, Size: 0x%jx, Type: %s\n", 207 pci_base, phys_base, size, range_descr); 208 error = bus_set_resource(dev, type, rid, phys_base, size); 209 if (error != 0) { 210 device_printf(dev, 211 "failed to set resource for range %d: %d\n", tuple, 212 error); 213 continue; 214 } 215 sc->ranges[tuple].res = bus_alloc_resource_any(dev, type, &rid, 216 RF_ACTIVE | RF_UNMAPPED | flags); 217 if (sc->ranges[tuple].res == NULL) { 218 device_printf(dev, 219 "failed to allocate resource for range %d\n", tuple); 220 continue; 221 } 222 error = rman_manage_region(rm, pci_base, pci_base + size - 1); 223 if (error) { 224 device_printf(dev, "rman_manage_region() failed." 225 "error = %d\n", error); 226 continue; 227 } 228 } 229 230 return (0); 231 232 err_io_rman: 233 rman_fini(&sc->mem_rman); 234 err_mem_rman: 235 rman_fini(&sc->pmem_rman); 236 err_pmem_rman: 237 free(__DECONST(char *, sc->io_rman.rm_descr), M_DEVBUF); 238 free(__DECONST(char *, sc->mem_rman.rm_descr), M_DEVBUF); 239 free(__DECONST(char *, sc->pmem_rman.rm_descr), M_DEVBUF); 240 if (sc->res != NULL) 241 bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->res); 242 err_resource: 243 bus_dma_tag_destroy(sc->dmat); 244 return (error); 245 } 246 247 int 248 pci_host_generic_core_detach(device_t dev) 249 { 250 struct generic_pcie_core_softc *sc; 251 int error, tuple, type; 252 253 sc = device_get_softc(dev); 254 255 error = bus_generic_detach(dev); 256 if (error != 0) 257 return (error); 258 259 for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { 260 if (sc->ranges[tuple].size == 0) 261 continue; /* empty range element */ 262 switch (FLAG_TYPE(sc->ranges[tuple].flags)) { 263 case FLAG_TYPE_PMEM: 264 case FLAG_TYPE_MEM: 265 type = SYS_RES_MEMORY; 266 break; 267 case FLAG_TYPE_IO: 268 type = SYS_RES_IOPORT; 269 break; 270 default: 271 continue; 272 } 273 if (sc->ranges[tuple].res != NULL) 274 bus_release_resource(dev, type, tuple + 1, 275 sc->ranges[tuple].res); 276 bus_delete_resource(dev, type, tuple + 1); 277 } 278 rman_fini(&sc->io_rman); 279 rman_fini(&sc->mem_rman); 280 rman_fini(&sc->pmem_rman); 281 free(__DECONST(char *, sc->io_rman.rm_descr), M_DEVBUF); 282 free(__DECONST(char *, sc->mem_rman.rm_descr), M_DEVBUF); 283 free(__DECONST(char *, sc->pmem_rman.rm_descr), M_DEVBUF); 284 if (sc->res != NULL) 285 bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->res); 286 bus_dma_tag_destroy(sc->dmat); 287 288 return (0); 289 } 290 291 static uint32_t 292 generic_pcie_read_config(device_t dev, u_int bus, u_int slot, 293 u_int func, u_int reg, int bytes) 294 { 295 struct generic_pcie_core_softc *sc; 296 uint64_t offset; 297 uint32_t data; 298 299 sc = device_get_softc(dev); 300 if ((bus < sc->bus_start) || (bus > sc->bus_end)) 301 return (~0U); 302 if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) || 303 (reg > PCIE_REGMAX)) 304 return (~0U); 305 if ((sc->quirks & PCIE_ECAM_DESIGNWARE_QUIRK) && bus == 0 && slot > 0) 306 return (~0U); 307 308 offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg); 309 310 switch (bytes) { 311 case 1: 312 data = bus_read_1(sc->res, offset); 313 break; 314 case 2: 315 data = le16toh(bus_read_2(sc->res, offset)); 316 break; 317 case 4: 318 data = le32toh(bus_read_4(sc->res, offset)); 319 break; 320 default: 321 return (~0U); 322 } 323 324 return (data); 325 } 326 327 static void 328 generic_pcie_write_config(device_t dev, u_int bus, u_int slot, 329 u_int func, u_int reg, uint32_t val, int bytes) 330 { 331 struct generic_pcie_core_softc *sc; 332 uint64_t offset; 333 334 sc = device_get_softc(dev); 335 if ((bus < sc->bus_start) || (bus > sc->bus_end)) 336 return; 337 if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) || 338 (reg > PCIE_REGMAX)) 339 return; 340 341 offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg); 342 343 switch (bytes) { 344 case 1: 345 bus_write_1(sc->res, offset, val); 346 break; 347 case 2: 348 bus_write_2(sc->res, offset, htole16(val)); 349 break; 350 case 4: 351 bus_write_4(sc->res, offset, htole32(val)); 352 break; 353 default: 354 return; 355 } 356 } 357 358 static int 359 generic_pcie_maxslots(device_t dev) 360 { 361 362 return (31); /* max slots per bus acc. to standard */ 363 } 364 365 static int 366 generic_pcie_read_ivar(device_t dev, device_t child, int index, 367 uintptr_t *result) 368 { 369 struct generic_pcie_core_softc *sc; 370 371 sc = device_get_softc(dev); 372 switch (index) { 373 case PCIB_IVAR_BUS: 374 *result = sc->bus_start; 375 return (0); 376 case PCIB_IVAR_DOMAIN: 377 *result = sc->ecam; 378 return (0); 379 } 380 381 if (bootverbose) 382 device_printf(dev, "ERROR: Unknown index %d.\n", index); 383 return (ENOENT); 384 } 385 386 static int 387 generic_pcie_write_ivar(device_t dev, device_t child, int index, 388 uintptr_t value) 389 { 390 391 return (ENOENT); 392 } 393 394 static struct rman * 395 generic_pcie_get_rman(device_t dev, int type, u_int flags) 396 { 397 struct generic_pcie_core_softc *sc = device_get_softc(dev); 398 399 switch (type) { 400 case SYS_RES_IOPORT: 401 return (&sc->io_rman); 402 case SYS_RES_MEMORY: 403 if (sc->has_pmem && (flags & RF_PREFETCHABLE) != 0) 404 return (&sc->pmem_rman); 405 return (&sc->mem_rman); 406 default: 407 break; 408 } 409 410 return (NULL); 411 } 412 413 int 414 pci_host_generic_core_release_resource(device_t dev, device_t child, 415 struct resource *res) 416 { 417 struct generic_pcie_core_softc *sc; 418 419 sc = device_get_softc(dev); 420 switch (rman_get_type(res)) { 421 case PCI_RES_BUS: 422 return (pci_domain_release_bus(sc->ecam, child, res)); 423 case SYS_RES_IOPORT: 424 case SYS_RES_MEMORY: 425 return (bus_generic_rman_release_resource(dev, child, res)); 426 default: 427 return (bus_generic_release_resource(dev, child, res)); 428 } 429 } 430 431 static struct pcie_range * 432 generic_pcie_containing_range(device_t dev, int type, rman_res_t start, 433 rman_res_t end) 434 { 435 struct generic_pcie_core_softc *sc = device_get_softc(dev); 436 uint64_t pci_base; 437 uint64_t size; 438 int i, space; 439 440 switch (type) { 441 case SYS_RES_IOPORT: 442 case SYS_RES_MEMORY: 443 break; 444 default: 445 return (NULL); 446 } 447 448 for (i = 0; i < MAX_RANGES_TUPLES; i++) { 449 pci_base = sc->ranges[i].pci_base; 450 size = sc->ranges[i].size; 451 if (size == 0) 452 continue; /* empty range element */ 453 454 if (start < pci_base || end >= pci_base + size) 455 continue; 456 457 switch (FLAG_TYPE(sc->ranges[i].flags)) { 458 case FLAG_TYPE_MEM: 459 case FLAG_TYPE_PMEM: 460 space = SYS_RES_MEMORY; 461 break; 462 case FLAG_TYPE_IO: 463 space = SYS_RES_IOPORT; 464 break; 465 default: 466 continue; 467 } 468 469 if (type == space) 470 return (&sc->ranges[i]); 471 } 472 return (NULL); 473 } 474 475 static int 476 generic_pcie_translate_resource(device_t dev, int type, rman_res_t start, 477 rman_res_t *new_start) 478 { 479 struct pcie_range *range; 480 481 /* Translate the address from a PCI address to a physical address */ 482 switch (type) { 483 case SYS_RES_IOPORT: 484 case SYS_RES_MEMORY: 485 range = generic_pcie_containing_range(dev, type, start, start); 486 if (range == NULL) 487 return (ENOENT); 488 *new_start = start - range->pci_base + range->phys_base; 489 break; 490 default: 491 /* No translation for non-memory types */ 492 *new_start = start; 493 break; 494 } 495 496 return (0); 497 } 498 499 struct resource * 500 pci_host_generic_core_alloc_resource(device_t dev, device_t child, int type, 501 int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 502 { 503 struct generic_pcie_core_softc *sc; 504 struct resource *res; 505 506 sc = device_get_softc(dev); 507 508 switch (type) { 509 case PCI_RES_BUS: 510 res = pci_domain_alloc_bus(sc->ecam, child, rid, start, end, 511 count, flags); 512 break; 513 case SYS_RES_IOPORT: 514 case SYS_RES_MEMORY: 515 res = bus_generic_rman_alloc_resource(dev, child, type, rid, 516 start, end, count, flags); 517 break; 518 default: 519 res = bus_generic_alloc_resource(dev, child, type, rid, start, 520 end, count, flags); 521 break; 522 } 523 if (res == NULL) { 524 device_printf(dev, "%s FAIL: type=%d, rid=%d, " 525 "start=%016jx, end=%016jx, count=%016jx, flags=%x\n", 526 __func__, type, *rid, start, end, count, flags); 527 } 528 return (res); 529 } 530 531 static int 532 generic_pcie_activate_resource(device_t dev, device_t child, struct resource *r) 533 { 534 struct generic_pcie_core_softc *sc; 535 536 sc = device_get_softc(dev); 537 switch (rman_get_type(r)) { 538 case PCI_RES_BUS: 539 return (pci_domain_activate_bus(sc->ecam, child, r)); 540 case SYS_RES_IOPORT: 541 case SYS_RES_MEMORY: 542 return (bus_generic_rman_activate_resource(dev, child, r)); 543 default: 544 return (bus_generic_activate_resource(dev, child, r)); 545 } 546 } 547 548 static int 549 generic_pcie_deactivate_resource(device_t dev, device_t child, 550 struct resource *r) 551 { 552 struct generic_pcie_core_softc *sc; 553 554 sc = device_get_softc(dev); 555 switch (rman_get_type(r)) { 556 case PCI_RES_BUS: 557 return (pci_domain_deactivate_bus(sc->ecam, child, r)); 558 case SYS_RES_IOPORT: 559 case SYS_RES_MEMORY: 560 return (bus_generic_rman_deactivate_resource(dev, child, r)); 561 default: 562 return (bus_generic_deactivate_resource(dev, child, r)); 563 } 564 } 565 566 static int 567 generic_pcie_adjust_resource(device_t dev, device_t child, 568 struct resource *res, rman_res_t start, rman_res_t end) 569 { 570 struct generic_pcie_core_softc *sc; 571 572 sc = device_get_softc(dev); 573 switch (rman_get_type(res)) { 574 case PCI_RES_BUS: 575 return (pci_domain_adjust_bus(sc->ecam, child, res, start, 576 end)); 577 case SYS_RES_IOPORT: 578 case SYS_RES_MEMORY: 579 return (bus_generic_rman_adjust_resource(dev, child, res, 580 start, end)); 581 default: 582 return (bus_generic_adjust_resource(dev, child, res, start, 583 end)); 584 } 585 } 586 587 static int 588 generic_pcie_map_resource(device_t dev, device_t child, struct resource *r, 589 struct resource_map_request *argsp, struct resource_map *map) 590 { 591 struct resource_map_request args; 592 struct pcie_range *range; 593 rman_res_t length, start; 594 int error, type; 595 596 type = rman_get_type(r); 597 switch (type) { 598 case PCI_RES_BUS: 599 return (EINVAL); 600 case SYS_RES_IOPORT: 601 case SYS_RES_MEMORY: 602 break; 603 default: 604 return (bus_generic_map_resource(dev, child, r, argsp, map)); 605 } 606 607 /* Resources must be active to be mapped. */ 608 if (!(rman_get_flags(r) & RF_ACTIVE)) 609 return (ENXIO); 610 611 resource_init_map_request(&args); 612 error = resource_validate_map_request(r, argsp, &args, &start, &length); 613 if (error) 614 return (error); 615 616 range = generic_pcie_containing_range(dev, type, rman_get_start(r), 617 rman_get_end(r)); 618 if (range == NULL || range->res == NULL) 619 return (ENOENT); 620 621 args.offset = start - range->pci_base; 622 args.length = length; 623 return (bus_map_resource(dev, range->res, &args, map)); 624 } 625 626 static int 627 generic_pcie_unmap_resource(device_t dev, device_t child, struct resource *r, 628 struct resource_map *map) 629 { 630 struct pcie_range *range; 631 int type; 632 633 type = rman_get_type(r); 634 switch (type) { 635 case PCI_RES_BUS: 636 return (EINVAL); 637 case SYS_RES_IOPORT: 638 case SYS_RES_MEMORY: 639 break; 640 default: 641 return (bus_generic_unmap_resource(dev, child, r, map)); 642 } 643 644 range = generic_pcie_containing_range(dev, type, rman_get_start(r), 645 rman_get_end(r)); 646 if (range == NULL || range->res == NULL) 647 return (ENOENT); 648 return (bus_unmap_resource(dev, range->res, map)); 649 } 650 651 static bus_dma_tag_t 652 generic_pcie_get_dma_tag(device_t dev, device_t child) 653 { 654 struct generic_pcie_core_softc *sc; 655 656 sc = device_get_softc(dev); 657 return (sc->dmat); 658 } 659 660 static device_method_t generic_pcie_methods[] = { 661 DEVMETHOD(device_attach, pci_host_generic_core_attach), 662 DEVMETHOD(device_detach, pci_host_generic_core_detach), 663 664 DEVMETHOD(bus_get_rman, generic_pcie_get_rman), 665 DEVMETHOD(bus_read_ivar, generic_pcie_read_ivar), 666 DEVMETHOD(bus_write_ivar, generic_pcie_write_ivar), 667 DEVMETHOD(bus_alloc_resource, pci_host_generic_core_alloc_resource), 668 DEVMETHOD(bus_adjust_resource, generic_pcie_adjust_resource), 669 DEVMETHOD(bus_activate_resource, generic_pcie_activate_resource), 670 DEVMETHOD(bus_deactivate_resource, generic_pcie_deactivate_resource), 671 DEVMETHOD(bus_release_resource, pci_host_generic_core_release_resource), 672 DEVMETHOD(bus_translate_resource, generic_pcie_translate_resource), 673 DEVMETHOD(bus_map_resource, generic_pcie_map_resource), 674 DEVMETHOD(bus_unmap_resource, generic_pcie_unmap_resource), 675 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), 676 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), 677 678 DEVMETHOD(bus_get_dma_tag, generic_pcie_get_dma_tag), 679 680 /* pcib interface */ 681 DEVMETHOD(pcib_maxslots, generic_pcie_maxslots), 682 DEVMETHOD(pcib_read_config, generic_pcie_read_config), 683 DEVMETHOD(pcib_write_config, generic_pcie_write_config), 684 685 DEVMETHOD_END 686 }; 687 688 DEFINE_CLASS_0(pcib, generic_pcie_core_driver, 689 generic_pcie_methods, sizeof(struct generic_pcie_core_softc)); 690