1 /*- 2 * Copyright (c) 2015, 2020 Ruslan Bukin <br@bsdpad.com> 3 * Copyright (c) 2014 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * This software was developed by Semihalf under 7 * the sponsorship of the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer. 14 * 2. Redistributions in binary form must reproduce the above copyright 15 * notice, this list of conditions and the following disclaimer in the 16 * documentation and/or other materials provided with the distribution. 17 * 18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 28 * SUCH DAMAGE. 29 */ 30 31 /* Generic ECAM PCIe driver */ 32 33 #include <sys/cdefs.h> 34 #include "opt_platform.h" 35 36 #include <sys/param.h> 37 #include <sys/systm.h> 38 #include <sys/malloc.h> 39 #include <sys/kernel.h> 40 #include <sys/rman.h> 41 #include <sys/module.h> 42 #include <sys/bus.h> 43 #include <sys/endian.h> 44 45 #include <dev/pci/pcivar.h> 46 #include <dev/pci/pcireg.h> 47 #include <dev/pci/pcib_private.h> 48 #include <dev/pci/pci_host_generic.h> 49 50 #include <machine/bus.h> 51 #include <machine/intr.h> 52 53 #include "pcib_if.h" 54 55 #if defined(VM_MEMATTR_DEVICE_NP) 56 #define PCI_UNMAPPED 57 #define PCI_RF_FLAGS RF_UNMAPPED 58 #else 59 #define PCI_RF_FLAGS 0 60 #endif 61 62 63 /* Forward prototypes */ 64 65 static uint32_t generic_pcie_read_config(device_t dev, u_int bus, u_int slot, 66 u_int func, u_int reg, int bytes); 67 static void generic_pcie_write_config(device_t dev, u_int bus, u_int slot, 68 u_int func, u_int reg, uint32_t val, int bytes); 69 static int generic_pcie_maxslots(device_t dev); 70 static int generic_pcie_write_ivar(device_t dev, device_t child, int index, 71 uintptr_t value); 72 73 int 74 pci_host_generic_core_attach(device_t dev) 75 { 76 #ifdef PCI_UNMAPPED 77 struct resource_map_request req; 78 struct resource_map map; 79 #endif 80 struct generic_pcie_core_softc *sc; 81 struct rman *rm; 82 uint64_t phys_base; 83 uint64_t pci_base; 84 uint64_t size; 85 const char *range_descr; 86 char buf[64]; 87 int domain, error; 88 int flags, rid, tuple, type; 89 90 sc = device_get_softc(dev); 91 sc->dev = dev; 92 93 /* Create the parent DMA tag to pass down the coherent flag */ 94 error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 95 1, 0, /* alignment, bounds */ 96 BUS_SPACE_MAXADDR, /* lowaddr */ 97 BUS_SPACE_MAXADDR, /* highaddr */ 98 NULL, NULL, /* filter, filterarg */ 99 BUS_SPACE_MAXSIZE, /* maxsize */ 100 BUS_SPACE_UNRESTRICTED, /* nsegments */ 101 BUS_SPACE_MAXSIZE, /* maxsegsize */ 102 sc->coherent ? BUS_DMA_COHERENT : 0, /* flags */ 103 NULL, NULL, /* lockfunc, lockarg */ 104 &sc->dmat); 105 if (error != 0) 106 return (error); 107 108 /* 109 * Attempt to set the domain. If it's missing, or we are unable to 110 * set it then memory allocations may be placed in the wrong domain. 111 */ 112 if (bus_get_domain(dev, &domain) == 0) 113 (void)bus_dma_tag_set_domain(sc->dmat, domain); 114 115 if ((sc->quirks & PCIE_CUSTOM_CONFIG_SPACE_QUIRK) == 0) { 116 rid = 0; 117 sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, 118 PCI_RF_FLAGS | RF_ACTIVE); 119 if (sc->res == NULL) { 120 device_printf(dev, "could not allocate memory.\n"); 121 error = ENXIO; 122 goto err_resource; 123 } 124 #ifdef PCI_UNMAPPED 125 resource_init_map_request(&req); 126 req.memattr = VM_MEMATTR_DEVICE_NP; 127 error = bus_map_resource(dev, SYS_RES_MEMORY, sc->res, &req, 128 &map); 129 if (error != 0) { 130 device_printf(dev, "could not map memory.\n"); 131 return (error); 132 } 133 rman_set_mapping(sc->res, &map); 134 #endif 135 } 136 137 sc->has_pmem = false; 138 sc->pmem_rman.rm_type = RMAN_ARRAY; 139 snprintf(buf, sizeof(buf), "%s prefetch window", 140 device_get_nameunit(dev)); 141 sc->pmem_rman.rm_descr = strdup(buf, M_DEVBUF); 142 143 sc->mem_rman.rm_type = RMAN_ARRAY; 144 snprintf(buf, sizeof(buf), "%s memory window", 145 device_get_nameunit(dev)); 146 sc->mem_rman.rm_descr = strdup(buf, M_DEVBUF); 147 148 sc->io_rman.rm_type = RMAN_ARRAY; 149 snprintf(buf, sizeof(buf), "%s I/O port window", 150 device_get_nameunit(dev)); 151 sc->io_rman.rm_descr = strdup(buf, M_DEVBUF); 152 153 /* Initialize rman and allocate memory regions */ 154 error = rman_init(&sc->pmem_rman); 155 if (error) { 156 device_printf(dev, "rman_init() failed. error = %d\n", error); 157 goto err_pmem_rman; 158 } 159 160 error = rman_init(&sc->mem_rman); 161 if (error) { 162 device_printf(dev, "rman_init() failed. error = %d\n", error); 163 goto err_mem_rman; 164 } 165 166 error = rman_init(&sc->io_rman); 167 if (error) { 168 device_printf(dev, "rman_init() failed. error = %d\n", error); 169 goto err_io_rman; 170 } 171 172 for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { 173 phys_base = sc->ranges[tuple].phys_base; 174 pci_base = sc->ranges[tuple].pci_base; 175 size = sc->ranges[tuple].size; 176 rid = tuple + 1; 177 if (size == 0) 178 continue; /* empty range element */ 179 switch (FLAG_TYPE(sc->ranges[tuple].flags)) { 180 case FLAG_TYPE_PMEM: 181 sc->has_pmem = true; 182 range_descr = "prefetch"; 183 flags = RF_PREFETCHABLE; 184 type = SYS_RES_MEMORY; 185 rm = &sc->pmem_rman; 186 break; 187 case FLAG_TYPE_MEM: 188 range_descr = "memory"; 189 flags = 0; 190 type = SYS_RES_MEMORY; 191 rm = &sc->mem_rman; 192 break; 193 case FLAG_TYPE_IO: 194 range_descr = "I/O port"; 195 flags = 0; 196 type = SYS_RES_IOPORT; 197 rm = &sc->io_rman; 198 break; 199 default: 200 continue; 201 } 202 if (bootverbose) 203 device_printf(dev, 204 "PCI addr: 0x%jx, CPU addr: 0x%jx, Size: 0x%jx, Type: %s\n", 205 pci_base, phys_base, size, range_descr); 206 error = bus_set_resource(dev, type, rid, phys_base, size); 207 if (error != 0) { 208 device_printf(dev, 209 "failed to set resource for range %d: %d\n", tuple, 210 error); 211 continue; 212 } 213 sc->ranges[tuple].res = bus_alloc_resource_any(dev, type, &rid, 214 RF_ACTIVE | RF_UNMAPPED | flags); 215 if (sc->ranges[tuple].res == NULL) { 216 device_printf(dev, 217 "failed to allocate resource for range %d\n", tuple); 218 continue; 219 } 220 error = rman_manage_region(rm, pci_base, pci_base + size - 1); 221 if (error) { 222 device_printf(dev, "rman_manage_region() failed." 223 "error = %d\n", error); 224 continue; 225 } 226 } 227 228 return (0); 229 230 err_io_rman: 231 rman_fini(&sc->mem_rman); 232 err_mem_rman: 233 rman_fini(&sc->pmem_rman); 234 err_pmem_rman: 235 free(__DECONST(char *, sc->io_rman.rm_descr), M_DEVBUF); 236 free(__DECONST(char *, sc->mem_rman.rm_descr), M_DEVBUF); 237 free(__DECONST(char *, sc->pmem_rman.rm_descr), M_DEVBUF); 238 if (sc->res != NULL) 239 bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->res); 240 err_resource: 241 bus_dma_tag_destroy(sc->dmat); 242 return (error); 243 } 244 245 int 246 pci_host_generic_core_detach(device_t dev) 247 { 248 struct generic_pcie_core_softc *sc; 249 int error, tuple, type; 250 251 sc = device_get_softc(dev); 252 253 error = bus_generic_detach(dev); 254 if (error != 0) 255 return (error); 256 257 for (tuple = 0; tuple < MAX_RANGES_TUPLES; tuple++) { 258 if (sc->ranges[tuple].size == 0) 259 continue; /* empty range element */ 260 switch (FLAG_TYPE(sc->ranges[tuple].flags)) { 261 case FLAG_TYPE_PMEM: 262 case FLAG_TYPE_MEM: 263 type = SYS_RES_MEMORY; 264 break; 265 case FLAG_TYPE_IO: 266 type = SYS_RES_IOPORT; 267 break; 268 default: 269 continue; 270 } 271 if (sc->ranges[tuple].res != NULL) 272 bus_release_resource(dev, type, tuple + 1, 273 sc->ranges[tuple].res); 274 bus_delete_resource(dev, type, tuple + 1); 275 } 276 rman_fini(&sc->io_rman); 277 rman_fini(&sc->mem_rman); 278 rman_fini(&sc->pmem_rman); 279 free(__DECONST(char *, sc->io_rman.rm_descr), M_DEVBUF); 280 free(__DECONST(char *, sc->mem_rman.rm_descr), M_DEVBUF); 281 free(__DECONST(char *, sc->pmem_rman.rm_descr), M_DEVBUF); 282 if (sc->res != NULL) 283 bus_release_resource(dev, SYS_RES_MEMORY, 0, sc->res); 284 bus_dma_tag_destroy(sc->dmat); 285 286 return (0); 287 } 288 289 static uint32_t 290 generic_pcie_read_config(device_t dev, u_int bus, u_int slot, 291 u_int func, u_int reg, int bytes) 292 { 293 struct generic_pcie_core_softc *sc; 294 uint64_t offset; 295 uint32_t data; 296 297 sc = device_get_softc(dev); 298 if ((bus < sc->bus_start) || (bus > sc->bus_end)) 299 return (~0U); 300 if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) || 301 (reg > PCIE_REGMAX)) 302 return (~0U); 303 if ((sc->quirks & PCIE_ECAM_DESIGNWARE_QUIRK) && bus == 0 && slot > 0) 304 return (~0U); 305 306 offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg); 307 308 switch (bytes) { 309 case 1: 310 data = bus_read_1(sc->res, offset); 311 break; 312 case 2: 313 data = le16toh(bus_read_2(sc->res, offset)); 314 break; 315 case 4: 316 data = le32toh(bus_read_4(sc->res, offset)); 317 break; 318 default: 319 return (~0U); 320 } 321 322 return (data); 323 } 324 325 static void 326 generic_pcie_write_config(device_t dev, u_int bus, u_int slot, 327 u_int func, u_int reg, uint32_t val, int bytes) 328 { 329 struct generic_pcie_core_softc *sc; 330 uint64_t offset; 331 332 sc = device_get_softc(dev); 333 if ((bus < sc->bus_start) || (bus > sc->bus_end)) 334 return; 335 if ((slot > PCI_SLOTMAX) || (func > PCI_FUNCMAX) || 336 (reg > PCIE_REGMAX)) 337 return; 338 339 offset = PCIE_ADDR_OFFSET(bus - sc->bus_start, slot, func, reg); 340 341 switch (bytes) { 342 case 1: 343 bus_write_1(sc->res, offset, val); 344 break; 345 case 2: 346 bus_write_2(sc->res, offset, htole16(val)); 347 break; 348 case 4: 349 bus_write_4(sc->res, offset, htole32(val)); 350 break; 351 default: 352 return; 353 } 354 } 355 356 static int 357 generic_pcie_maxslots(device_t dev) 358 { 359 360 return (31); /* max slots per bus acc. to standard */ 361 } 362 363 int 364 generic_pcie_read_ivar(device_t dev, device_t child, int index, 365 uintptr_t *result) 366 { 367 struct generic_pcie_core_softc *sc; 368 369 sc = device_get_softc(dev); 370 switch (index) { 371 case PCIB_IVAR_BUS: 372 *result = sc->bus_start; 373 return (0); 374 case PCIB_IVAR_DOMAIN: 375 *result = sc->ecam; 376 return (0); 377 } 378 379 if (bootverbose) 380 device_printf(dev, "ERROR: Unknown index %d.\n", index); 381 return (ENOENT); 382 } 383 384 static int 385 generic_pcie_write_ivar(device_t dev, device_t child, int index, 386 uintptr_t value) 387 { 388 389 return (ENOENT); 390 } 391 392 static struct rman * 393 generic_pcie_get_rman(device_t dev, int type, u_int flags) 394 { 395 struct generic_pcie_core_softc *sc = device_get_softc(dev); 396 397 switch (type) { 398 case SYS_RES_IOPORT: 399 return (&sc->io_rman); 400 case SYS_RES_MEMORY: 401 if (sc->has_pmem && (flags & RF_PREFETCHABLE) != 0) 402 return (&sc->pmem_rman); 403 return (&sc->mem_rman); 404 default: 405 break; 406 } 407 408 return (NULL); 409 } 410 411 int 412 pci_host_generic_core_release_resource(device_t dev, device_t child, 413 struct resource *res) 414 { 415 struct generic_pcie_core_softc *sc; 416 417 sc = device_get_softc(dev); 418 switch (rman_get_type(res)) { 419 case PCI_RES_BUS: 420 return (pci_domain_release_bus(sc->ecam, child, res)); 421 case SYS_RES_IOPORT: 422 case SYS_RES_MEMORY: 423 return (bus_generic_rman_release_resource(dev, child, res)); 424 default: 425 return (bus_generic_release_resource(dev, child, res)); 426 } 427 } 428 429 static struct pcie_range * 430 generic_pcie_containing_range(device_t dev, int type, rman_res_t start, 431 rman_res_t end) 432 { 433 struct generic_pcie_core_softc *sc = device_get_softc(dev); 434 uint64_t pci_base; 435 uint64_t size; 436 int i, space; 437 438 switch (type) { 439 case SYS_RES_IOPORT: 440 case SYS_RES_MEMORY: 441 break; 442 default: 443 return (NULL); 444 } 445 446 for (i = 0; i < MAX_RANGES_TUPLES; i++) { 447 pci_base = sc->ranges[i].pci_base; 448 size = sc->ranges[i].size; 449 if (size == 0) 450 continue; /* empty range element */ 451 452 if (start < pci_base || end >= pci_base + size) 453 continue; 454 455 switch (FLAG_TYPE(sc->ranges[i].flags)) { 456 case FLAG_TYPE_MEM: 457 case FLAG_TYPE_PMEM: 458 space = SYS_RES_MEMORY; 459 break; 460 case FLAG_TYPE_IO: 461 space = SYS_RES_IOPORT; 462 break; 463 default: 464 continue; 465 } 466 467 if (type == space) 468 return (&sc->ranges[i]); 469 } 470 return (NULL); 471 } 472 473 static int 474 generic_pcie_translate_resource(device_t dev, int type, rman_res_t start, 475 rman_res_t *new_start) 476 { 477 struct pcie_range *range; 478 479 /* Translate the address from a PCI address to a physical address */ 480 switch (type) { 481 case SYS_RES_IOPORT: 482 case SYS_RES_MEMORY: 483 range = generic_pcie_containing_range(dev, type, start, start); 484 if (range == NULL) 485 return (ENOENT); 486 *new_start = start - range->pci_base + range->phys_base; 487 break; 488 default: 489 /* No translation for non-memory types */ 490 *new_start = start; 491 break; 492 } 493 494 return (0); 495 } 496 497 struct resource * 498 pci_host_generic_core_alloc_resource(device_t dev, device_t child, int type, 499 int *rid, rman_res_t start, rman_res_t end, rman_res_t count, u_int flags) 500 { 501 struct generic_pcie_core_softc *sc; 502 struct resource *res; 503 504 sc = device_get_softc(dev); 505 506 switch (type) { 507 case PCI_RES_BUS: 508 res = pci_domain_alloc_bus(sc->ecam, child, rid, start, end, 509 count, flags); 510 break; 511 case SYS_RES_IOPORT: 512 case SYS_RES_MEMORY: 513 res = bus_generic_rman_alloc_resource(dev, child, type, rid, 514 start, end, count, flags); 515 break; 516 default: 517 res = bus_generic_alloc_resource(dev, child, type, rid, start, 518 end, count, flags); 519 break; 520 } 521 if (res == NULL) { 522 device_printf(dev, "%s FAIL: type=%d, rid=%d, " 523 "start=%016jx, end=%016jx, count=%016jx, flags=%x\n", 524 __func__, type, *rid, start, end, count, flags); 525 } 526 return (res); 527 } 528 529 static int 530 generic_pcie_activate_resource(device_t dev, device_t child, struct resource *r) 531 { 532 struct generic_pcie_core_softc *sc; 533 534 sc = device_get_softc(dev); 535 switch (rman_get_type(r)) { 536 case PCI_RES_BUS: 537 return (pci_domain_activate_bus(sc->ecam, child, r)); 538 case SYS_RES_IOPORT: 539 case SYS_RES_MEMORY: 540 return (bus_generic_rman_activate_resource(dev, child, r)); 541 default: 542 return (bus_generic_activate_resource(dev, child, r)); 543 } 544 } 545 546 static int 547 generic_pcie_deactivate_resource(device_t dev, device_t child, 548 struct resource *r) 549 { 550 struct generic_pcie_core_softc *sc; 551 552 sc = device_get_softc(dev); 553 switch (rman_get_type(r)) { 554 case PCI_RES_BUS: 555 return (pci_domain_deactivate_bus(sc->ecam, child, r)); 556 case SYS_RES_IOPORT: 557 case SYS_RES_MEMORY: 558 return (bus_generic_rman_deactivate_resource(dev, child, r)); 559 default: 560 return (bus_generic_deactivate_resource(dev, child, r)); 561 } 562 } 563 564 static int 565 generic_pcie_adjust_resource(device_t dev, device_t child, 566 struct resource *res, rman_res_t start, rman_res_t end) 567 { 568 struct generic_pcie_core_softc *sc; 569 570 sc = device_get_softc(dev); 571 switch (rman_get_type(res)) { 572 case PCI_RES_BUS: 573 return (pci_domain_adjust_bus(sc->ecam, child, res, start, 574 end)); 575 case SYS_RES_IOPORT: 576 case SYS_RES_MEMORY: 577 return (bus_generic_rman_adjust_resource(dev, child, res, 578 start, end)); 579 default: 580 return (bus_generic_adjust_resource(dev, child, res, start, 581 end)); 582 } 583 } 584 585 static int 586 generic_pcie_map_resource(device_t dev, device_t child, struct resource *r, 587 struct resource_map_request *argsp, struct resource_map *map) 588 { 589 struct resource_map_request args; 590 struct pcie_range *range; 591 rman_res_t length, start; 592 int error, type; 593 594 type = rman_get_type(r); 595 switch (type) { 596 case PCI_RES_BUS: 597 return (EINVAL); 598 case SYS_RES_IOPORT: 599 case SYS_RES_MEMORY: 600 break; 601 default: 602 return (bus_generic_map_resource(dev, child, r, argsp, map)); 603 } 604 605 /* Resources must be active to be mapped. */ 606 if (!(rman_get_flags(r) & RF_ACTIVE)) 607 return (ENXIO); 608 609 resource_init_map_request(&args); 610 error = resource_validate_map_request(r, argsp, &args, &start, &length); 611 if (error) 612 return (error); 613 614 range = generic_pcie_containing_range(dev, type, rman_get_start(r), 615 rman_get_end(r)); 616 if (range == NULL || range->res == NULL) 617 return (ENOENT); 618 619 args.offset = start - range->pci_base; 620 args.length = length; 621 return (bus_map_resource(dev, range->res, &args, map)); 622 } 623 624 static int 625 generic_pcie_unmap_resource(device_t dev, device_t child, struct resource *r, 626 struct resource_map *map) 627 { 628 struct pcie_range *range; 629 int type; 630 631 type = rman_get_type(r); 632 switch (type) { 633 case PCI_RES_BUS: 634 return (EINVAL); 635 case SYS_RES_IOPORT: 636 case SYS_RES_MEMORY: 637 break; 638 default: 639 return (bus_generic_unmap_resource(dev, child, r, map)); 640 } 641 642 range = generic_pcie_containing_range(dev, type, rman_get_start(r), 643 rman_get_end(r)); 644 if (range == NULL || range->res == NULL) 645 return (ENOENT); 646 return (bus_unmap_resource(dev, range->res, map)); 647 } 648 649 static bus_dma_tag_t 650 generic_pcie_get_dma_tag(device_t dev, device_t child) 651 { 652 struct generic_pcie_core_softc *sc; 653 654 sc = device_get_softc(dev); 655 return (sc->dmat); 656 } 657 658 static device_method_t generic_pcie_methods[] = { 659 DEVMETHOD(device_attach, pci_host_generic_core_attach), 660 DEVMETHOD(device_detach, pci_host_generic_core_detach), 661 662 DEVMETHOD(bus_get_rman, generic_pcie_get_rman), 663 DEVMETHOD(bus_read_ivar, generic_pcie_read_ivar), 664 DEVMETHOD(bus_write_ivar, generic_pcie_write_ivar), 665 DEVMETHOD(bus_alloc_resource, pci_host_generic_core_alloc_resource), 666 DEVMETHOD(bus_adjust_resource, generic_pcie_adjust_resource), 667 DEVMETHOD(bus_activate_resource, generic_pcie_activate_resource), 668 DEVMETHOD(bus_deactivate_resource, generic_pcie_deactivate_resource), 669 DEVMETHOD(bus_release_resource, pci_host_generic_core_release_resource), 670 DEVMETHOD(bus_translate_resource, generic_pcie_translate_resource), 671 DEVMETHOD(bus_map_resource, generic_pcie_map_resource), 672 DEVMETHOD(bus_unmap_resource, generic_pcie_unmap_resource), 673 DEVMETHOD(bus_setup_intr, bus_generic_setup_intr), 674 DEVMETHOD(bus_teardown_intr, bus_generic_teardown_intr), 675 676 DEVMETHOD(bus_get_dma_tag, generic_pcie_get_dma_tag), 677 678 /* pcib interface */ 679 DEVMETHOD(pcib_maxslots, generic_pcie_maxslots), 680 DEVMETHOD(pcib_read_config, generic_pcie_read_config), 681 DEVMETHOD(pcib_write_config, generic_pcie_write_config), 682 683 DEVMETHOD_END 684 }; 685 686 DEFINE_CLASS_0(pcib, generic_pcie_core_driver, 687 generic_pcie_methods, sizeof(struct generic_pcie_core_softc)); 688