1 /*- 2 * Copyright (c) 2015-2016 Landon Fuller <landon@landonf.org> 3 * Copyright (c) 2017 The FreeBSD Foundation 4 * All rights reserved. 5 * 6 * Portions of this software were developed by Landon Fuller 7 * under sponsorship from the FreeBSD Foundation. 8 * 9 * Redistribution and use in source and binary forms, with or without 10 * modification, are permitted provided that the following conditions 11 * are met: 12 * 1. Redistributions of source code must retain the above copyright 13 * notice, this list of conditions and the following disclaimer, 14 * without modification. 15 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 16 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 17 * redistribution must be conditioned upon including a substantially 18 * similar Disclaimer requirement for further binary redistribution. 19 * 20 * NO WARRANTY 21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 23 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 24 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 25 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 26 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 29 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 31 * THE POSSIBILITY OF SUCH DAMAGES. 32 */ 33 34 #include <sys/cdefs.h> 35 /* 36 * PCI-specific implementation for the BHNDB bridge driver. 37 * 38 * Provides support for bridging from a PCI parent bus to a BHND-compatible 39 * bus (e.g. bcma or siba) via a Broadcom PCI core configured in end-point 40 * mode. 41 * 42 * This driver handles all initial generic host-level PCI interactions with a 43 * PCI/PCIe bridge core operating in endpoint mode. Once the bridged bhnd(4) 44 * bus has been enumerated, this driver works in tandem with a core-specific 45 * bhnd_pci_hostb driver to manage the PCI core. 46 */ 47 48 #include <sys/param.h> 49 #include <sys/kernel.h> 50 #include <sys/bus.h> 51 #include <sys/limits.h> 52 #include <sys/malloc.h> 53 #include <sys/module.h> 54 #include <sys/systm.h> 55 56 #include <dev/pci/pcireg.h> 57 #include <dev/pci/pcivar.h> 58 59 #include <dev/bhnd/bhnd.h> 60 #include <dev/bhnd/bhndreg.h> 61 62 #include <dev/bhnd/bhnd_erom.h> 63 #include <dev/bhnd/bhnd_eromvar.h> 64 65 #include <dev/bhnd/siba/sibareg.h> 66 67 #include <dev/bhnd/cores/pci/bhnd_pcireg.h> 68 69 #include "bhnd_pwrctl_hostb_if.h" 70 71 #include "bhndb_pcireg.h" 72 #include "bhndb_pcivar.h" 73 #include "bhndb_private.h" 74 75 struct bhndb_pci_eio; 76 struct bhndb_pci_probe; 77 78 static int bhndb_pci_alloc_msi(struct bhndb_pci_softc *sc, 79 int *msi_count); 80 81 static int bhndb_pci_add_children(struct bhndb_pci_softc *sc); 82 83 static bhnd_devclass_t bhndb_expected_pci_devclass(device_t dev); 84 static bool bhndb_is_pcie_attached(device_t dev); 85 86 static int bhndb_enable_pci_clocks(device_t dev); 87 static int bhndb_disable_pci_clocks(device_t dev); 88 89 static int bhndb_pci_compat_setregwin(device_t dev, 90 device_t pci_dev, const struct bhndb_regwin *, 91 bhnd_addr_t); 92 static int bhndb_pci_fast_setregwin(device_t dev, device_t pci_dev, 93 const struct bhndb_regwin *, bhnd_addr_t); 94 95 static void bhndb_pci_write_core(struct bhndb_pci_softc *sc, 96 bus_size_t offset, uint32_t value, u_int width); 97 static uint32_t bhndb_pci_read_core(struct bhndb_pci_softc *sc, 98 bus_size_t offset, u_int width); 99 100 static int bhndb_pci_srsh_pi_war(struct bhndb_pci_softc *sc, 101 struct bhndb_pci_probe *probe); 102 103 static bus_addr_t bhndb_pci_sprom_addr(struct bhndb_pci_softc *sc); 104 static bus_size_t bhndb_pci_sprom_size(struct bhndb_pci_softc *sc); 105 106 static int bhndb_pci_probe_alloc(struct bhndb_pci_probe **probe, 107 device_t dev, bhnd_devclass_t pci_devclass); 108 static void bhndb_pci_probe_free(struct bhndb_pci_probe *probe); 109 110 static int bhndb_pci_probe_copy_core_table( 111 struct bhndb_pci_probe *probe, 112 struct bhnd_core_info **cores, u_int *ncores); 113 static void bhndb_pci_probe_free_core_table( 114 struct bhnd_core_info *cores); 115 116 static void bhndb_pci_probe_write(struct bhndb_pci_probe *sc, 117 bhnd_addr_t addr, bhnd_size_t offset, 118 uint32_t value, u_int width); 119 static uint32_t bhndb_pci_probe_read(struct bhndb_pci_probe *sc, 120 bhnd_addr_t addr, bhnd_size_t offset, u_int width); 121 122 static void bhndb_pci_eio_init(struct bhndb_pci_eio *eio, 123 struct bhndb_pci_probe *probe); 124 static int bhndb_pci_eio_map(struct bhnd_erom_io *eio, 125 bhnd_addr_t addr, bhnd_size_t size); 126 static int bhndb_pci_eio_tell(struct bhnd_erom_io *eio, 127 bhnd_addr_t *addr, bhnd_size_t *size); 128 static uint32_t bhndb_pci_eio_read(struct bhnd_erom_io *eio, 129 bhnd_size_t offset, u_int width); 130 131 #define BHNDB_PCI_MSI_COUNT 1 132 133 static struct bhndb_pci_quirk bhndb_pci_quirks[]; 134 static struct bhndb_pci_quirk bhndb_pcie_quirks[]; 135 static struct bhndb_pci_quirk bhndb_pcie2_quirks[]; 136 137 static struct bhndb_pci_core bhndb_pci_cores[] = { 138 BHNDB_PCI_CORE(PCI, bhndb_pci_quirks), 139 BHNDB_PCI_CORE(PCIE, bhndb_pcie_quirks), 140 BHNDB_PCI_CORE(PCIE2, bhndb_pcie2_quirks), 141 BHNDB_PCI_CORE_END 142 }; 143 144 /* bhndb_pci erom I/O instance state */ 145 struct bhndb_pci_eio { 146 struct bhnd_erom_io eio; 147 bool mapped; /**< true if a valid mapping exists */ 148 bhnd_addr_t addr; /**< mapped address */ 149 bhnd_size_t size; /**< mapped size */ 150 struct bhndb_pci_probe *probe; /**< borrowed probe reference */ 151 }; 152 153 /** 154 * Provides early bus access to the bridged device's cores and core enumeration 155 * table. 156 * 157 * May be safely used during probe or early device attach, prior to calling 158 * bhndb_attach(). 159 */ 160 struct bhndb_pci_probe { 161 device_t dev; /**< bridge device */ 162 device_t pci_dev; /**< parent PCI device */ 163 struct bhnd_chipid cid; /**< chip identification */ 164 struct bhnd_core_info hostb_core; /**< PCI bridge core info */ 165 166 struct bhndb_pci_eio erom_io; /**< erom I/O instance */ 167 bhnd_erom_class_t *erom_class; /**< probed erom class */ 168 bhnd_erom_t *erom; /**< erom parser */ 169 struct bhnd_core_info *cores; /**< erom-owned core table */ 170 u_int ncores; /**< number of cores */ 171 172 const struct bhndb_regwin *m_win; /**< mapped register window, or NULL if no mapping */ 173 struct resource *m_res; /**< resource containing the register window, or NULL if no window mapped */ 174 bhnd_addr_t m_target; /**< base address mapped by m_win */ 175 bhnd_addr_t m_addr; /**< mapped address */ 176 bhnd_size_t m_size; /**< mapped size */ 177 bool m_valid; /**< true if a valid mapping exists, false otherwise */ 178 179 struct bhndb_host_resources *hr; /**< backing host resources */ 180 }; 181 182 static struct bhndb_pci_quirk bhndb_pci_quirks[] = { 183 /* Backplane interrupt flags must be routed via siba-specific 184 * SIBA_CFG0_INTVEC configuration register; the BHNDB_PCI_INT_MASK 185 * PCI configuration register is unsupported. */ 186 {{ BHND_MATCH_CHIP_TYPE (SIBA) }, 187 { BHND_MATCH_CORE_REV (HWREV_LTE(5)) }, 188 BHNDB_PCI_QUIRK_SIBA_INTVEC }, 189 190 /* All PCI core revisions require the SRSH work-around */ 191 BHNDB_PCI_QUIRK(HWREV_ANY, BHNDB_PCI_QUIRK_SRSH_WAR), 192 BHNDB_PCI_QUIRK_END 193 }; 194 195 static struct bhndb_pci_quirk bhndb_pcie_quirks[] = { 196 /* All PCIe-G1 core revisions require the SRSH work-around */ 197 BHNDB_PCI_QUIRK(HWREV_ANY, BHNDB_PCI_QUIRK_SRSH_WAR), 198 BHNDB_PCI_QUIRK_END 199 }; 200 201 static struct bhndb_pci_quirk bhndb_pcie2_quirks[] = { 202 BHNDB_PCI_QUIRK_END 203 }; 204 205 /** 206 * Return the device table entry for @p ci, or NULL if none. 207 */ 208 static struct bhndb_pci_core * 209 bhndb_pci_find_core(struct bhnd_core_info *ci) 210 { 211 for (size_t i = 0; !BHNDB_PCI_IS_CORE_END(&bhndb_pci_cores[i]); i++) { 212 struct bhndb_pci_core *entry = &bhndb_pci_cores[i]; 213 214 if (bhnd_core_matches(ci, &entry->match)) 215 return (entry); 216 } 217 218 return (NULL); 219 } 220 221 /** 222 * Return all quirk flags for the given @p cid and @p ci. 223 */ 224 static uint32_t 225 bhndb_pci_get_core_quirks(struct bhnd_chipid *cid, struct bhnd_core_info *ci) 226 { 227 struct bhndb_pci_core *entry; 228 struct bhndb_pci_quirk *qtable; 229 uint32_t quirks; 230 231 quirks = 0; 232 233 /* No core entry? */ 234 if ((entry = bhndb_pci_find_core(ci)) == NULL) 235 return (quirks); 236 237 /* No quirks? */ 238 if ((qtable = entry->quirks) == NULL) 239 return (quirks); 240 241 for (size_t i = 0; !BHNDB_PCI_IS_QUIRK_END(&qtable[i]); i++) { 242 struct bhndb_pci_quirk *q = &qtable[i]; 243 244 if (!bhnd_chip_matches(cid, &q->chip_desc)) 245 continue; 246 247 if (!bhnd_core_matches(ci, &q->core_desc)) 248 continue; 249 250 quirks |= q->quirks; 251 } 252 253 return (quirks); 254 } 255 256 /** 257 * Default bhndb_pci implementation of device_probe(). 258 * 259 * Verifies that the parent is a PCI/PCIe device. 260 */ 261 static int 262 bhndb_pci_probe(device_t dev) 263 { 264 struct bhndb_pci_probe *probe; 265 struct bhndb_pci_core *entry; 266 bhnd_devclass_t hostb_devclass; 267 device_t parent, parent_bus; 268 devclass_t pci, bus_devclass; 269 int error; 270 271 probe = NULL; 272 273 /* Our parent must be a PCI/PCIe device. */ 274 pci = devclass_find("pci"); 275 parent = device_get_parent(dev); 276 parent_bus = device_get_parent(parent); 277 if (parent_bus == NULL) 278 return (ENXIO); 279 280 /* The bus device class may inherit from 'pci' */ 281 for (bus_devclass = device_get_devclass(parent_bus); 282 bus_devclass != NULL; 283 bus_devclass = devclass_get_parent(bus_devclass)) 284 { 285 if (bus_devclass == pci) 286 break; 287 } 288 289 if (bus_devclass != pci) 290 return (ENXIO); 291 292 /* Enable clocks */ 293 if ((error = bhndb_enable_pci_clocks(dev))) 294 return (error); 295 296 /* Identify the chip and enumerate the bridged cores */ 297 hostb_devclass = bhndb_expected_pci_devclass(dev); 298 if ((error = bhndb_pci_probe_alloc(&probe, dev, hostb_devclass))) 299 goto cleanup; 300 301 /* Look for a matching core table entry */ 302 if ((entry = bhndb_pci_find_core(&probe->hostb_core)) == NULL) { 303 error = ENXIO; 304 goto cleanup; 305 } 306 307 device_set_desc(dev, "PCI-BHND bridge"); 308 309 /* fall-through */ 310 error = BUS_PROBE_DEFAULT; 311 312 cleanup: 313 if (probe != NULL) 314 bhndb_pci_probe_free(probe); 315 316 bhndb_disable_pci_clocks(dev); 317 318 return (error); 319 } 320 321 /** 322 * Attempt to allocate MSI interrupts, returning the count in @p msi_count 323 * on success. 324 */ 325 static int 326 bhndb_pci_alloc_msi(struct bhndb_pci_softc *sc, int *msi_count) 327 { 328 int error, count; 329 330 /* Is MSI available? */ 331 if (pci_msi_count(sc->parent) < BHNDB_PCI_MSI_COUNT) 332 return (ENXIO); 333 334 /* Allocate expected message count */ 335 count = BHNDB_PCI_MSI_COUNT; 336 if ((error = pci_alloc_msi(sc->parent, &count))) { 337 device_printf(sc->dev, "failed to allocate MSI interrupts: " 338 "%d\n", error); 339 340 return (error); 341 } 342 343 if (count < BHNDB_PCI_MSI_COUNT) { 344 pci_release_msi(sc->parent); 345 return (ENXIO); 346 } 347 348 *msi_count = count; 349 return (0); 350 } 351 352 static int 353 bhndb_pci_attach(device_t dev) 354 { 355 struct bhndb_pci_softc *sc; 356 struct bhnd_chipid cid; 357 struct bhnd_core_info *cores, hostb_core; 358 bhnd_erom_class_t *erom_class; 359 struct bhndb_pci_probe *probe; 360 u_int ncores; 361 int irq_rid; 362 int error; 363 364 sc = device_get_softc(dev); 365 sc->dev = dev; 366 sc->parent = device_get_parent(dev); 367 sc->pci_devclass = bhndb_expected_pci_devclass(dev); 368 sc->pci_quirks = 0; 369 sc->set_regwin = NULL; 370 371 BHNDB_PCI_LOCK_INIT(sc); 372 373 probe = NULL; 374 cores = NULL; 375 376 /* Enable PCI bus mastering */ 377 pci_enable_busmaster(sc->parent); 378 379 /* Enable clocks (if required by this hardware) */ 380 if ((error = bhndb_enable_pci_clocks(sc->dev))) 381 goto cleanup; 382 383 /* Identify the chip and enumerate the bridged cores */ 384 error = bhndb_pci_probe_alloc(&probe, dev, sc->pci_devclass); 385 if (error) 386 goto cleanup; 387 388 sc->pci_quirks = bhndb_pci_get_core_quirks(&probe->cid, 389 &probe->hostb_core); 390 391 /* Select the appropriate register window handler */ 392 if (probe->cid.chip_type == BHND_CHIPTYPE_SIBA) { 393 sc->set_regwin = bhndb_pci_compat_setregwin; 394 } else { 395 sc->set_regwin = bhndb_pci_fast_setregwin; 396 } 397 398 /* 399 * Fix up our PCI base address in the SPROM shadow, if necessary. 400 * 401 * This must be done prior to accessing any static register windows 402 * that map the PCI core. 403 */ 404 if ((error = bhndb_pci_srsh_pi_war(sc, probe))) 405 goto cleanup; 406 407 /* Set up PCI interrupt handling */ 408 if (bhndb_pci_alloc_msi(sc, &sc->msi_count) == 0) { 409 /* MSI uses resource IDs starting at 1 */ 410 irq_rid = 1; 411 412 device_printf(dev, "Using MSI interrupts on %s\n", 413 device_get_nameunit(sc->parent)); 414 } else { 415 sc->msi_count = 0; 416 irq_rid = 0; 417 418 device_printf(dev, "Using INTx interrupts on %s\n", 419 device_get_nameunit(sc->parent)); 420 } 421 422 sc->isrc = bhndb_alloc_intr_isrc(sc->parent, irq_rid, 0, RM_MAX_END, 1, 423 RF_SHAREABLE | RF_ACTIVE); 424 if (sc->isrc == NULL) { 425 device_printf(sc->dev, "failed to allocate interrupt " 426 "resource\n"); 427 error = ENXIO; 428 goto cleanup; 429 } 430 431 /* 432 * Copy out the probe results and then free our probe state, releasing 433 * its exclusive ownership of host bridge resources. 434 * 435 * This must be done prior to full configuration of the bridge via 436 * bhndb_attach(). 437 */ 438 cid = probe->cid; 439 erom_class = probe->erom_class; 440 hostb_core = probe->hostb_core; 441 442 error = bhndb_pci_probe_copy_core_table(probe, &cores, &ncores); 443 if (error) { 444 cores = NULL; 445 goto cleanup; 446 } 447 448 bhndb_pci_probe_free(probe); 449 probe = NULL; 450 451 /* Perform bridge attach */ 452 error = bhndb_attach(dev, &cid, cores, ncores, &hostb_core, erom_class); 453 if (error) 454 goto cleanup; 455 456 /* Add any additional child devices */ 457 if ((error = bhndb_pci_add_children(sc))) 458 goto cleanup; 459 460 /* Probe and attach our children */ 461 bus_attach_children(dev); 462 463 bhndb_pci_probe_free_core_table(cores); 464 465 return (0); 466 467 cleanup: 468 device_delete_children(dev); 469 470 if (sc->isrc != NULL) 471 bhndb_free_intr_isrc(sc->isrc); 472 473 if (sc->msi_count > 0) 474 pci_release_msi(sc->parent); 475 476 if (cores != NULL) 477 bhndb_pci_probe_free_core_table(cores); 478 479 if (probe != NULL) 480 bhndb_pci_probe_free(probe); 481 482 bhndb_disable_pci_clocks(sc->dev); 483 484 pci_disable_busmaster(sc->parent); 485 486 BHNDB_PCI_LOCK_DESTROY(sc); 487 488 return (error); 489 } 490 491 static int 492 bhndb_pci_detach(device_t dev) 493 { 494 struct bhndb_pci_softc *sc; 495 int error; 496 497 sc = device_get_softc(dev); 498 499 /* Attempt to detach our children */ 500 if ((error = bus_generic_detach(dev))) 501 return (error); 502 503 /* Perform generic bridge detach */ 504 if ((error = bhndb_generic_detach(dev))) 505 return (error); 506 507 /* Disable clocks (if required by this hardware) */ 508 if ((error = bhndb_disable_pci_clocks(sc->dev))) 509 return (error); 510 511 /* Free our interrupt resources */ 512 bhndb_free_intr_isrc(sc->isrc); 513 514 /* Release MSI interrupts */ 515 if (sc->msi_count > 0) 516 pci_release_msi(sc->parent); 517 518 /* Disable PCI bus mastering */ 519 pci_disable_busmaster(sc->parent); 520 521 BHNDB_PCI_LOCK_DESTROY(sc); 522 523 return (0); 524 } 525 526 static int 527 bhndb_pci_add_children(struct bhndb_pci_softc *sc) 528 { 529 bus_size_t nv_sz; 530 int error; 531 532 /** 533 * If SPROM is mapped directly into BAR0, add child NVRAM 534 * device. 535 */ 536 nv_sz = bhndb_pci_sprom_size(sc); 537 if (nv_sz > 0) { 538 struct bhndb_devinfo *dinfo; 539 device_t child; 540 541 if (bootverbose) { 542 device_printf(sc->dev, "found SPROM (%ju bytes)\n", 543 (uintmax_t)nv_sz); 544 } 545 546 /* Add sprom device, ordered early enough to be available 547 * before the bridged bhnd(4) bus is attached. */ 548 child = BUS_ADD_CHILD(sc->dev, 549 BHND_PROBE_ROOT + BHND_PROBE_ORDER_EARLY, "bhnd_nvram", -1); 550 if (child == NULL) { 551 device_printf(sc->dev, "failed to add sprom device\n"); 552 return (ENXIO); 553 } 554 555 /* Initialize device address space and resource covering the 556 * BAR0 SPROM shadow. */ 557 dinfo = device_get_ivars(child); 558 dinfo->addrspace = BHNDB_ADDRSPACE_NATIVE; 559 560 error = bus_set_resource(child, SYS_RES_MEMORY, 0, 561 bhndb_pci_sprom_addr(sc), nv_sz); 562 if (error) { 563 device_printf(sc->dev, 564 "failed to register sprom resources\n"); 565 return (error); 566 } 567 } 568 569 return (0); 570 } 571 572 static const struct bhndb_regwin * 573 bhndb_pci_sprom_regwin(struct bhndb_pci_softc *sc) 574 { 575 struct bhndb_resources *bres; 576 const struct bhndb_hwcfg *cfg; 577 const struct bhndb_regwin *sprom_win; 578 579 bres = sc->bhndb.bus_res; 580 cfg = bres->cfg; 581 582 sprom_win = bhndb_regwin_find_type(cfg->register_windows, 583 BHNDB_REGWIN_T_SPROM, BHNDB_PCI_V0_BAR0_SPROM_SIZE); 584 585 return (sprom_win); 586 } 587 588 static bus_addr_t 589 bhndb_pci_sprom_addr(struct bhndb_pci_softc *sc) 590 { 591 const struct bhndb_regwin *sprom_win; 592 struct resource *r; 593 594 /* Fetch the SPROM register window */ 595 sprom_win = bhndb_pci_sprom_regwin(sc); 596 KASSERT(sprom_win != NULL, ("requested sprom address on PCI_V2+")); 597 598 /* Fetch the associated resource */ 599 r = bhndb_host_resource_for_regwin(sc->bhndb.bus_res->res, sprom_win); 600 KASSERT(r != NULL, ("missing resource for sprom window\n")); 601 602 return (rman_get_start(r) + sprom_win->win_offset); 603 } 604 605 static bus_size_t 606 bhndb_pci_sprom_size(struct bhndb_pci_softc *sc) 607 { 608 const struct bhndb_regwin *sprom_win; 609 uint32_t sctl; 610 bus_size_t sprom_sz; 611 612 sprom_win = bhndb_pci_sprom_regwin(sc); 613 614 /* PCI_V2 and later devices map SPROM/OTP via ChipCommon */ 615 if (sprom_win == NULL) 616 return (0); 617 618 /* Determine SPROM size */ 619 sctl = pci_read_config(sc->parent, BHNDB_PCI_SPROM_CONTROL, 4); 620 if (sctl & BHNDB_PCI_SPROM_BLANK) 621 return (0); 622 623 switch (sctl & BHNDB_PCI_SPROM_SZ_MASK) { 624 case BHNDB_PCI_SPROM_SZ_1KB: 625 sprom_sz = (1 * 1024); 626 break; 627 628 case BHNDB_PCI_SPROM_SZ_4KB: 629 sprom_sz = (4 * 1024); 630 break; 631 632 case BHNDB_PCI_SPROM_SZ_16KB: 633 sprom_sz = (16 * 1024); 634 break; 635 636 case BHNDB_PCI_SPROM_SZ_RESERVED: 637 default: 638 device_printf(sc->dev, "invalid PCI sprom size 0x%x\n", sctl); 639 return (0); 640 } 641 642 /* If the device has a larger SPROM than can be addressed via our SPROM 643 * register window, the SPROM image data will still be located within 644 * the window's addressable range */ 645 sprom_sz = MIN(sprom_sz, sprom_win->win_size); 646 647 return (sprom_sz); 648 } 649 650 /** 651 * Return the host resource providing a static mapping of the PCI core's 652 * registers. 653 * 654 * @param sc bhndb PCI driver state. 655 * @param offset The required readable offset within the PCI core 656 * register block. 657 * @param size The required readable size at @p offset. 658 * @param[out] res On success, the host resource containing our PCI 659 * core's register window. 660 * @param[out] res_offset On success, the @p offset relative to @p res. 661 * 662 * @retval 0 success 663 * @retval ENXIO if a valid static register window mapping the PCI core 664 * registers is not available. 665 */ 666 static int 667 bhndb_pci_get_core_regs(struct bhndb_pci_softc *sc, bus_size_t offset, 668 bus_size_t size, struct resource **res, bus_size_t *res_offset) 669 { 670 const struct bhndb_regwin *win; 671 struct resource *r; 672 673 /* Locate the static register window mapping the requested offset */ 674 win = bhndb_regwin_find_core(sc->bhndb.bus_res->cfg->register_windows, 675 sc->pci_devclass, 0, BHND_PORT_DEVICE, 0, 0, offset, size); 676 if (win == NULL) { 677 device_printf(sc->dev, "missing PCI core register window\n"); 678 return (ENXIO); 679 } 680 681 /* Fetch the resource containing the register window */ 682 r = bhndb_host_resource_for_regwin(sc->bhndb.bus_res->res, win); 683 if (r == NULL) { 684 device_printf(sc->dev, "missing PCI core register resource\n"); 685 return (ENXIO); 686 } 687 688 KASSERT(offset >= win->d.core.offset, ("offset %#jx outside of " 689 "register window", (uintmax_t)offset)); 690 691 *res = r; 692 *res_offset = win->win_offset + (offset - win->d.core.offset); 693 694 return (0); 695 } 696 697 /** 698 * Write a 1, 2, or 4 byte data item to the PCI core's registers at @p offset. 699 * 700 * @param sc bhndb PCI driver state. 701 * @param offset register write offset. 702 * @param value value to be written. 703 * @param width item width (1, 2, or 4 bytes). 704 */ 705 static void 706 bhndb_pci_write_core(struct bhndb_pci_softc *sc, bus_size_t offset, 707 uint32_t value, u_int width) 708 { 709 struct resource *r; 710 bus_size_t r_offset; 711 int error; 712 713 error = bhndb_pci_get_core_regs(sc, offset, width, &r, &r_offset); 714 if (error) { 715 panic("no PCI register window mapping %#jx+%#x: %d", 716 (uintmax_t)offset, width, error); 717 } 718 719 switch (width) { 720 case 1: 721 bus_write_1(r, r_offset, value); 722 break; 723 case 2: 724 bus_write_2(r, r_offset, value); 725 break; 726 case 4: 727 bus_write_4(r, r_offset, value); 728 break; 729 default: 730 panic("invalid width: %u", width); 731 } 732 } 733 734 /** 735 * Read a 1, 2, or 4 byte data item from the PCI core's registers 736 * at @p offset. 737 * 738 * @param sc bhndb PCI driver state. 739 * @param offset register read offset. 740 * @param width item width (1, 2, or 4 bytes). 741 */ 742 static uint32_t 743 bhndb_pci_read_core(struct bhndb_pci_softc *sc, bus_size_t offset, u_int width) 744 { 745 struct resource *r; 746 bus_size_t r_offset; 747 int error; 748 749 error = bhndb_pci_get_core_regs(sc, offset, width, &r, &r_offset); 750 if (error) { 751 panic("no PCI register window mapping %#jx+%#x: %d", 752 (uintmax_t)offset, width, error); 753 } 754 755 switch (width) { 756 case 1: 757 return (bus_read_1(r, r_offset)); 758 case 2: 759 return (bus_read_2(r, r_offset)); 760 case 4: 761 return (bus_read_4(r, r_offset)); 762 default: 763 panic("invalid width: %u", width); 764 } 765 } 766 767 /** 768 * Fix-up power on defaults for SPROM-less devices. 769 * 770 * On SPROM-less devices, the PCI(e) cores will be initialized with their their 771 * Power-on-Reset defaults; this can leave the BHND_PCI_SRSH_PI value pointing 772 * to the wrong backplane address. This value is used by the PCI core when 773 * performing address translation between static register windows in BAR0 that 774 * map the PCI core's register block, and backplane address space. 775 * 776 * When translating accesses via these BAR0 regions, the PCI bridge determines 777 * the base address of the PCI core by concatenating: 778 * 779 * [bits] [source] 780 * 31:16 bits [31:16] of the enumeration space address (e.g. 0x18000000) 781 * 15:12 value of BHND_PCI_SRSH_PI from the PCI core's SPROM shadow 782 * 11:0 bits [11:0] of the PCI bus address 783 * 784 * For example, on a PCI_V0 device, the following PCI core register offsets are 785 * mapped into BAR0: 786 * 787 * [BAR0 offset] [description] [PCI core offset] 788 * 0x1000-0x17FF sprom shadow 0x800-0xFFF 789 * 0x1800-0x1DFF device registers 0x000-0x5FF 790 * 0x1E00+0x1FFF siba config registers 0xE00-0xFFF 791 * 792 * This function checks -- and if necessary, corrects -- the BHND_PCI_SRSH_PI 793 * value in the SPROM shadow. 794 * 795 * This workaround must applied prior to accessing any static register windows 796 * that map the PCI core. 797 * 798 * Applies to all PCI and PCIe-G1 core revisions. 799 */ 800 static int 801 bhndb_pci_srsh_pi_war(struct bhndb_pci_softc *sc, 802 struct bhndb_pci_probe *probe) 803 { 804 struct bhnd_core_match md; 805 bhnd_addr_t pci_addr; 806 bhnd_size_t pci_size; 807 bus_size_t srsh_offset; 808 uint16_t srsh_val, pci_val; 809 uint16_t val; 810 int error; 811 812 if ((sc->pci_quirks & BHNDB_PCI_QUIRK_SRSH_WAR) == 0) 813 return (0); 814 815 /* Use an equality match descriptor to look up our PCI core's base 816 * address in the EROM */ 817 md = bhnd_core_get_match_desc(&probe->hostb_core); 818 error = bhnd_erom_lookup_core_addr(probe->erom, &md, BHND_PORT_DEVICE, 819 0, 0, NULL, &pci_addr, &pci_size); 820 if (error) { 821 device_printf(sc->dev, "no base address found for the PCI host " 822 "bridge core: %d\n", error); 823 return (error); 824 } 825 826 /* Fetch the SPROM SRSH_PI value */ 827 srsh_offset = BHND_PCI_SPROM_SHADOW + BHND_PCI_SRSH_PI_OFFSET; 828 val = bhndb_pci_probe_read(probe, pci_addr, srsh_offset, sizeof(val)); 829 srsh_val = (val & BHND_PCI_SRSH_PI_MASK) >> BHND_PCI_SRSH_PI_SHIFT; 830 831 /* If it doesn't match PCI core's base address, update the SPROM 832 * shadow */ 833 pci_val = (pci_addr & BHND_PCI_SRSH_PI_ADDR_MASK) >> 834 BHND_PCI_SRSH_PI_ADDR_SHIFT; 835 if (srsh_val != pci_val) { 836 val &= ~BHND_PCI_SRSH_PI_MASK; 837 val |= (pci_val << BHND_PCI_SRSH_PI_SHIFT); 838 bhndb_pci_probe_write(probe, pci_addr, srsh_offset, val, 839 sizeof(val)); 840 } 841 842 return (0); 843 } 844 845 static int 846 bhndb_pci_resume(device_t dev) 847 { 848 struct bhndb_pci_softc *sc; 849 int error; 850 851 sc = device_get_softc(dev); 852 853 /* Enable clocks (if supported by this hardware) */ 854 if ((error = bhndb_enable_pci_clocks(sc->dev))) 855 return (error); 856 857 /* Perform resume */ 858 return (bhndb_generic_resume(dev)); 859 } 860 861 static int 862 bhndb_pci_suspend(device_t dev) 863 { 864 struct bhndb_pci_softc *sc; 865 int error; 866 867 sc = device_get_softc(dev); 868 869 /* Disable clocks (if supported by this hardware) */ 870 if ((error = bhndb_disable_pci_clocks(sc->dev))) 871 return (error); 872 873 /* Perform suspend */ 874 return (bhndb_generic_suspend(dev)); 875 } 876 877 static int 878 bhndb_pci_set_window_addr(device_t dev, const struct bhndb_regwin *rw, 879 bhnd_addr_t addr) 880 { 881 struct bhndb_pci_softc *sc = device_get_softc(dev); 882 return (sc->set_regwin(sc->dev, sc->parent, rw, addr)); 883 } 884 885 /** 886 * A siba(4) and bcma(4)-compatible bhndb_set_window_addr implementation. 887 * 888 * On siba(4) devices, it's possible that writing a PCI window register may 889 * not succeed; it's necessary to immediately read the configuration register 890 * and retry if not set to the desired value. 891 * 892 * This is not necessary on bcma(4) devices, but other than the overhead of 893 * validating the register, there's no harm in performing the verification. 894 */ 895 static int 896 bhndb_pci_compat_setregwin(device_t dev, device_t pci_dev, 897 const struct bhndb_regwin *rw, bhnd_addr_t addr) 898 { 899 int error; 900 int reg; 901 902 if (rw->win_type != BHNDB_REGWIN_T_DYN) 903 return (ENODEV); 904 905 reg = rw->d.dyn.cfg_offset; 906 for (u_int i = 0; i < BHNDB_PCI_BARCTRL_WRITE_RETRY; i++) { 907 if ((error = bhndb_pci_fast_setregwin(dev, pci_dev, rw, addr))) 908 return (error); 909 910 if (pci_read_config(pci_dev, reg, 4) == addr) 911 return (0); 912 913 DELAY(10); 914 } 915 916 /* Unable to set window */ 917 return (ENODEV); 918 } 919 920 /** 921 * A bcma(4)-only bhndb_set_window_addr implementation. 922 */ 923 static int 924 bhndb_pci_fast_setregwin(device_t dev, device_t pci_dev, 925 const struct bhndb_regwin *rw, bhnd_addr_t addr) 926 { 927 /* The PCI bridge core only supports 32-bit addressing, regardless 928 * of the bus' support for 64-bit addressing */ 929 if (addr > UINT32_MAX) 930 return (ERANGE); 931 932 switch (rw->win_type) { 933 case BHNDB_REGWIN_T_DYN: 934 /* Addresses must be page aligned */ 935 if (addr % rw->win_size != 0) 936 return (EINVAL); 937 938 pci_write_config(pci_dev, rw->d.dyn.cfg_offset, addr, 4); 939 break; 940 default: 941 return (ENODEV); 942 } 943 944 return (0); 945 } 946 947 static int 948 bhndb_pci_populate_board_info(device_t dev, device_t child, 949 struct bhnd_board_info *info) 950 { 951 struct bhndb_pci_softc *sc; 952 953 sc = device_get_softc(dev); 954 955 /* 956 * On a subset of Apple BCM4360 modules, always prefer the 957 * PCI subdevice to the SPROM-supplied boardtype. 958 * 959 * TODO: 960 * 961 * Broadcom's own drivers implement this override, and then later use 962 * the remapped BCM4360 board type to determine the required 963 * board-specific workarounds. 964 * 965 * Without access to this hardware, it's unclear why this mapping 966 * is done, and we must do the same. If we can survey the hardware 967 * in question, it may be possible to replace this behavior with 968 * explicit references to the SPROM-supplied boardtype(s) in our 969 * quirk definitions. 970 */ 971 if (pci_get_subvendor(sc->parent) == PCI_VENDOR_APPLE) { 972 switch (info->board_type) { 973 case BHND_BOARD_BCM94360X29C: 974 case BHND_BOARD_BCM94360X29CP2: 975 case BHND_BOARD_BCM94360X51: 976 case BHND_BOARD_BCM94360X51P2: 977 info->board_type = 0; /* allow override below */ 978 break; 979 default: 980 break; 981 } 982 } 983 984 /* If NVRAM did not supply vendor/type/devid info, provide the PCI 985 * subvendor/subdevice/device values. */ 986 if (info->board_vendor == 0) 987 info->board_vendor = pci_get_subvendor(sc->parent); 988 989 if (info->board_type == 0) 990 info->board_type = pci_get_subdevice(sc->parent); 991 992 if (info->board_devid == 0) 993 info->board_devid = pci_get_device(sc->parent); 994 995 return (0); 996 } 997 998 /** 999 * Examine the bridge device @p dev and return the expected host bridge 1000 * device class. 1001 * 1002 * @param dev The bhndb bridge device 1003 */ 1004 static bhnd_devclass_t 1005 bhndb_expected_pci_devclass(device_t dev) 1006 { 1007 if (bhndb_is_pcie_attached(dev)) 1008 return (BHND_DEVCLASS_PCIE); 1009 else 1010 return (BHND_DEVCLASS_PCI); 1011 } 1012 1013 /** 1014 * Return true if the bridge device @p dev is attached via PCIe, 1015 * false otherwise. 1016 * 1017 * @param dev The bhndb bridge device 1018 */ 1019 static bool 1020 bhndb_is_pcie_attached(device_t dev) 1021 { 1022 int reg; 1023 1024 if (pci_find_cap(device_get_parent(dev), PCIY_EXPRESS, ®) == 0) 1025 return (true); 1026 1027 return (false); 1028 } 1029 1030 /** 1031 * Enable externally managed clocks, if required. 1032 * 1033 * Some PCI chipsets (BCM4306, possibly others) chips do not support 1034 * the idle low-power clock. Clocking must be bootstrapped at 1035 * attach/resume by directly adjusting GPIO registers exposed in the 1036 * PCI config space, and correspondingly, explicitly shutdown at 1037 * detach/suspend. 1038 * 1039 * @note This function may be safely called prior to device attach, (e.g. 1040 * from DEVICE_PROBE). 1041 * 1042 * @param dev The bhndb bridge device 1043 */ 1044 static int 1045 bhndb_enable_pci_clocks(device_t dev) 1046 { 1047 device_t pci_dev; 1048 uint32_t gpio_in, gpio_out, gpio_en; 1049 uint32_t gpio_flags; 1050 uint16_t pci_status; 1051 1052 pci_dev = device_get_parent(dev); 1053 1054 /* Only supported and required on PCI devices */ 1055 if (bhndb_is_pcie_attached(dev)) 1056 return (0); 1057 1058 /* Read state of XTAL pin */ 1059 gpio_in = pci_read_config(pci_dev, BHNDB_PCI_GPIO_IN, 4); 1060 if (gpio_in & BHNDB_PCI_GPIO_XTAL_ON) 1061 return (0); /* already enabled */ 1062 1063 /* Fetch current config */ 1064 gpio_out = pci_read_config(pci_dev, BHNDB_PCI_GPIO_OUT, 4); 1065 gpio_en = pci_read_config(pci_dev, BHNDB_PCI_GPIO_OUTEN, 4); 1066 1067 /* Set PLL_OFF/XTAL_ON pins to HIGH and enable both pins */ 1068 gpio_flags = (BHNDB_PCI_GPIO_PLL_OFF|BHNDB_PCI_GPIO_XTAL_ON); 1069 gpio_out |= gpio_flags; 1070 gpio_en |= gpio_flags; 1071 1072 pci_write_config(pci_dev, BHNDB_PCI_GPIO_OUT, gpio_out, 4); 1073 pci_write_config(pci_dev, BHNDB_PCI_GPIO_OUTEN, gpio_en, 4); 1074 DELAY(1000); 1075 1076 /* Reset PLL_OFF */ 1077 gpio_out &= ~BHNDB_PCI_GPIO_PLL_OFF; 1078 pci_write_config(pci_dev, BHNDB_PCI_GPIO_OUT, gpio_out, 4); 1079 DELAY(5000); 1080 1081 /* Clear any PCI 'sent target-abort' flag. */ 1082 pci_status = pci_read_config(pci_dev, PCIR_STATUS, 2); 1083 pci_status &= ~PCIM_STATUS_STABORT; 1084 pci_write_config(pci_dev, PCIR_STATUS, pci_status, 2); 1085 1086 return (0); 1087 } 1088 1089 /** 1090 * Disable externally managed clocks, if required. 1091 * 1092 * This function may be safely called prior to device attach, (e.g. 1093 * from DEVICE_PROBE). 1094 * 1095 * @param dev The bhndb bridge device 1096 */ 1097 static int 1098 bhndb_disable_pci_clocks(device_t dev) 1099 { 1100 device_t pci_dev; 1101 uint32_t gpio_out, gpio_en; 1102 1103 pci_dev = device_get_parent(dev); 1104 1105 /* Only supported and required on PCI devices */ 1106 if (bhndb_is_pcie_attached(dev)) 1107 return (0); 1108 1109 /* Fetch current config */ 1110 gpio_out = pci_read_config(pci_dev, BHNDB_PCI_GPIO_OUT, 4); 1111 gpio_en = pci_read_config(pci_dev, BHNDB_PCI_GPIO_OUTEN, 4); 1112 1113 /* Set PLL_OFF to HIGH, XTAL_ON to LOW. */ 1114 gpio_out &= ~BHNDB_PCI_GPIO_XTAL_ON; 1115 gpio_out |= BHNDB_PCI_GPIO_PLL_OFF; 1116 pci_write_config(pci_dev, BHNDB_PCI_GPIO_OUT, gpio_out, 4); 1117 1118 /* Enable both output pins */ 1119 gpio_en |= (BHNDB_PCI_GPIO_PLL_OFF|BHNDB_PCI_GPIO_XTAL_ON); 1120 pci_write_config(pci_dev, BHNDB_PCI_GPIO_OUTEN, gpio_en, 4); 1121 1122 return (0); 1123 } 1124 1125 static bhnd_clksrc 1126 bhndb_pci_pwrctl_get_clksrc(device_t dev, device_t child, 1127 bhnd_clock clock) 1128 { 1129 struct bhndb_pci_softc *sc; 1130 uint32_t gpio_out; 1131 1132 sc = device_get_softc(dev); 1133 1134 /* Only supported on PCI devices */ 1135 if (bhndb_is_pcie_attached(sc->dev)) 1136 return (BHND_CLKSRC_UNKNOWN); 1137 1138 /* Only ILP is supported */ 1139 if (clock != BHND_CLOCK_ILP) 1140 return (BHND_CLKSRC_UNKNOWN); 1141 1142 gpio_out = pci_read_config(sc->parent, BHNDB_PCI_GPIO_OUT, 4); 1143 if (gpio_out & BHNDB_PCI_GPIO_SCS) 1144 return (BHND_CLKSRC_PCI); 1145 else 1146 return (BHND_CLKSRC_XTAL); 1147 } 1148 1149 static int 1150 bhndb_pci_pwrctl_gate_clock(device_t dev, device_t child, 1151 bhnd_clock clock) 1152 { 1153 struct bhndb_pci_softc *sc = device_get_softc(dev); 1154 1155 /* Only supported on PCI devices */ 1156 if (bhndb_is_pcie_attached(sc->dev)) 1157 return (ENODEV); 1158 1159 /* Only HT is supported */ 1160 if (clock != BHND_CLOCK_HT) 1161 return (ENXIO); 1162 1163 return (bhndb_disable_pci_clocks(sc->dev)); 1164 } 1165 1166 static int 1167 bhndb_pci_pwrctl_ungate_clock(device_t dev, device_t child, 1168 bhnd_clock clock) 1169 { 1170 struct bhndb_pci_softc *sc = device_get_softc(dev); 1171 1172 /* Only supported on PCI devices */ 1173 if (bhndb_is_pcie_attached(sc->dev)) 1174 return (ENODEV); 1175 1176 /* Only HT is supported */ 1177 if (clock != BHND_CLOCK_HT) 1178 return (ENXIO); 1179 1180 return (bhndb_enable_pci_clocks(sc->dev)); 1181 } 1182 1183 /** 1184 * BHNDB_MAP_INTR_ISRC() 1185 */ 1186 static int 1187 bhndb_pci_map_intr_isrc(device_t dev, struct resource *irq, 1188 struct bhndb_intr_isrc **isrc) 1189 { 1190 struct bhndb_pci_softc *sc = device_get_softc(dev); 1191 1192 /* There's only one bridged interrupt to choose from */ 1193 *isrc = sc->isrc; 1194 return (0); 1195 } 1196 1197 /* siba-specific implementation of BHNDB_ROUTE_INTERRUPTS() */ 1198 static int 1199 bhndb_pci_route_siba_interrupts(struct bhndb_pci_softc *sc, device_t child) 1200 { 1201 uint32_t sbintvec; 1202 u_int ivec; 1203 int error; 1204 1205 KASSERT(sc->pci_quirks & BHNDB_PCI_QUIRK_SIBA_INTVEC, 1206 ("route_siba_interrupts not supported by this hardware")); 1207 1208 /* Fetch the sbflag# for the child */ 1209 if ((error = bhnd_get_intr_ivec(child, 0, &ivec))) 1210 return (error); 1211 1212 if (ivec > (sizeof(sbintvec)*8) - 1 /* aka '31' */) { 1213 /* This should never be an issue in practice */ 1214 device_printf(sc->dev, "cannot route interrupts to high " 1215 "sbflag# %u\n", ivec); 1216 return (ENXIO); 1217 } 1218 1219 BHNDB_PCI_LOCK(sc); 1220 1221 sbintvec = bhndb_pci_read_core(sc, SB0_REG_ABS(SIBA_CFG0_INTVEC), 4); 1222 sbintvec |= (1 << ivec); 1223 bhndb_pci_write_core(sc, SB0_REG_ABS(SIBA_CFG0_INTVEC), sbintvec, 4); 1224 1225 BHNDB_PCI_UNLOCK(sc); 1226 1227 return (0); 1228 } 1229 1230 /* BHNDB_ROUTE_INTERRUPTS() */ 1231 static int 1232 bhndb_pci_route_interrupts(device_t dev, device_t child) 1233 { 1234 struct bhndb_pci_softc *sc; 1235 struct bhnd_core_info core; 1236 uint32_t core_bit; 1237 uint32_t intmask; 1238 1239 sc = device_get_softc(dev); 1240 1241 if (sc->pci_quirks & BHNDB_PCI_QUIRK_SIBA_INTVEC) 1242 return (bhndb_pci_route_siba_interrupts(sc, child)); 1243 1244 core = bhnd_get_core_info(child); 1245 if (core.core_idx > BHNDB_PCI_SBIM_COREIDX_MAX) { 1246 /* This should never be an issue in practice */ 1247 device_printf(dev, "cannot route interrupts to high core " 1248 "index %u\n", core.core_idx); 1249 return (ENXIO); 1250 } 1251 1252 BHNDB_PCI_LOCK(sc); 1253 1254 core_bit = (1<<core.core_idx) << BHNDB_PCI_SBIM_SHIFT; 1255 intmask = pci_read_config(sc->parent, BHNDB_PCI_INT_MASK, 4); 1256 intmask |= core_bit; 1257 pci_write_config(sc->parent, BHNDB_PCI_INT_MASK, intmask, 4); 1258 1259 BHNDB_PCI_UNLOCK(sc); 1260 1261 return (0); 1262 } 1263 1264 /** 1265 * Using the generic PCI bridge hardware configuration, allocate, initialize 1266 * and return a new bhndb_pci probe state instance. 1267 * 1268 * On success, the caller assumes ownership of the returned probe instance, and 1269 * is responsible for releasing this reference using bhndb_pci_probe_free(). 1270 * 1271 * @param[out] probe On success, the newly allocated probe instance. 1272 * @param dev The bhndb_pci bridge device. 1273 * @param hostb_devclass The expected device class of the bridge core. 1274 * 1275 * @retval 0 success 1276 * @retval non-zero if allocating the probe state fails, a regular 1277 * unix error code will be returned. 1278 * 1279 * @note This function requires exclusive ownership over allocating and 1280 * configuring host bridge resources, and should only be called prior to 1281 * completion of device attach and full configuration of the bridge. 1282 */ 1283 static int 1284 bhndb_pci_probe_alloc(struct bhndb_pci_probe **probe, device_t dev, 1285 bhnd_devclass_t hostb_devclass) 1286 { 1287 struct bhndb_pci_probe *p; 1288 struct bhnd_erom_io *eio; 1289 const struct bhndb_hwcfg *hwcfg; 1290 const struct bhnd_chipid *hint; 1291 device_t parent_dev; 1292 int error; 1293 1294 parent_dev = device_get_parent(dev); 1295 eio = NULL; 1296 1297 p = malloc(sizeof(*p), M_BHND, M_ZERO|M_WAITOK); 1298 p->dev = dev; 1299 p->pci_dev = parent_dev; 1300 1301 /* Our register window mapping state must be initialized at this point, 1302 * as bhndb_pci_eio will begin making calls into 1303 * bhndb_pci_probe_(read|write|get_mapping) */ 1304 p->m_win = NULL; 1305 p->m_res = NULL; 1306 p->m_valid = false; 1307 1308 bhndb_pci_eio_init(&p->erom_io, p); 1309 eio = &p->erom_io.eio; 1310 1311 /* Fetch our chipid hint (if any) and generic hardware configuration */ 1312 hwcfg = BHNDB_BUS_GET_GENERIC_HWCFG(parent_dev, dev); 1313 hint = BHNDB_BUS_GET_CHIPID(parent_dev, dev); 1314 1315 /* Allocate our host resources */ 1316 error = bhndb_alloc_host_resources(&p->hr, dev, parent_dev, hwcfg); 1317 if (error) { 1318 p->hr = NULL; 1319 goto failed; 1320 } 1321 1322 /* Map the first bus core from our bridged bhnd(4) bus */ 1323 error = bhnd_erom_io_map(eio, BHND_DEFAULT_CHIPC_ADDR, 1324 BHND_DEFAULT_CORE_SIZE); 1325 if (error) 1326 goto failed; 1327 1328 /* Probe for a usable EROM class, and read the chip identifier */ 1329 p->erom_class = bhnd_erom_probe_driver_classes( 1330 device_get_devclass(dev), eio, hint, &p->cid); 1331 if (p->erom_class == NULL) { 1332 device_printf(dev, "device enumeration unsupported; no " 1333 "compatible driver found\n"); 1334 1335 error = ENXIO; 1336 goto failed; 1337 } 1338 1339 /* Allocate EROM parser */ 1340 p->erom = bhnd_erom_alloc(p->erom_class, &p->cid, eio); 1341 if (p->erom == NULL) { 1342 device_printf(dev, "failed to allocate device enumeration " 1343 "table parser\n"); 1344 error = ENXIO; 1345 goto failed; 1346 } 1347 1348 /* The EROM I/O instance is now owned by our EROM parser */ 1349 eio = NULL; 1350 1351 /* Read the full core table */ 1352 error = bhnd_erom_get_core_table(p->erom, &p->cores, &p->ncores); 1353 if (error) { 1354 device_printf(p->dev, "error fetching core table: %d\n", 1355 error); 1356 1357 p->cores = NULL; 1358 goto failed; 1359 } 1360 1361 /* Identify the host bridge core */ 1362 error = bhndb_find_hostb_core(p->cores, p->ncores, hostb_devclass, 1363 &p->hostb_core); 1364 if (error) { 1365 device_printf(dev, "failed to identify the host bridge " 1366 "core: %d\n", error); 1367 1368 goto failed; 1369 } 1370 1371 *probe = p; 1372 return (0); 1373 1374 failed: 1375 if (eio != NULL) { 1376 KASSERT(p->erom == NULL, ("I/O instance will be freed by " 1377 "its owning parser")); 1378 1379 bhnd_erom_io_fini(eio); 1380 } 1381 1382 if (p->erom != NULL) { 1383 if (p->cores != NULL) 1384 bhnd_erom_free_core_table(p->erom, p->cores); 1385 1386 bhnd_erom_free(p->erom); 1387 } else { 1388 KASSERT(p->cores == NULL, ("cannot free erom-owned core table " 1389 "without erom reference")); 1390 } 1391 1392 if (p->hr != NULL) 1393 bhndb_release_host_resources(p->hr); 1394 1395 free(p, M_BHND); 1396 1397 return (error); 1398 } 1399 1400 /** 1401 * Free the given @p probe instance and any associated host bridge resources. 1402 */ 1403 static void 1404 bhndb_pci_probe_free(struct bhndb_pci_probe *probe) 1405 { 1406 bhnd_erom_free_core_table(probe->erom, probe->cores); 1407 bhnd_erom_free(probe->erom); 1408 bhndb_release_host_resources(probe->hr); 1409 free(probe, M_BHND); 1410 } 1411 1412 /** 1413 * Return a copy of probed core table from @p probe. 1414 * 1415 * @param probe The probe instance. 1416 * @param[out] cores On success, a copy of the probed core table. The 1417 * caller is responsible for freeing this table 1418 * bhndb_pci_probe_free_core_table(). 1419 * @param[out] ncores On success, the number of cores found in 1420 * @p cores. 1421 * 1422 * @retval 0 success 1423 * @retval non-zero if enumerating the bridged bhnd(4) bus fails, a regular 1424 * unix error code will be returned. 1425 */ 1426 static int 1427 bhndb_pci_probe_copy_core_table(struct bhndb_pci_probe *probe, 1428 struct bhnd_core_info **cores, u_int *ncores) 1429 { 1430 size_t len = sizeof(**cores) * probe->ncores; 1431 1432 *cores = malloc(len, M_BHND, M_WAITOK); 1433 memcpy(*cores, probe->cores, len); 1434 1435 *ncores = probe->ncores; 1436 1437 return (0); 1438 } 1439 1440 /** 1441 * Free a core table previously returned by bhndb_pci_probe_copy_core_table(). 1442 * 1443 * @param cores The core table to be freed. 1444 */ 1445 static void 1446 bhndb_pci_probe_free_core_table(struct bhnd_core_info *cores) 1447 { 1448 free(cores, M_BHND); 1449 } 1450 1451 /** 1452 * Return true if @p addr and @p size are mapped by the dynamic register window 1453 * backing @p probe. 1454 */ 1455 static bool 1456 bhndb_pci_probe_has_mapping(struct bhndb_pci_probe *probe, bhnd_addr_t addr, 1457 bhnd_size_t size) 1458 { 1459 if (!probe->m_valid) 1460 return (false); 1461 1462 KASSERT(probe->m_win != NULL, ("missing register window")); 1463 KASSERT(probe->m_res != NULL, ("missing regwin resource")); 1464 KASSERT(probe->m_win->win_type == BHNDB_REGWIN_T_DYN, 1465 ("unexpected window type %d", probe->m_win->win_type)); 1466 1467 if (addr < probe->m_target) 1468 return (false); 1469 1470 if (addr >= probe->m_target + probe->m_win->win_size) 1471 return (false); 1472 1473 if ((probe->m_target + probe->m_win->win_size) - addr < size) 1474 return (false); 1475 1476 return (true); 1477 } 1478 1479 /** 1480 * Attempt to adjust the dynamic register window backing @p probe to permit 1481 * accessing @p size bytes at @p addr. 1482 * 1483 * @param probe The bhndb_pci probe state to be modified. 1484 * @param addr The address at which @p size bytes will mapped. 1485 * @param size The number of bytes to be mapped. 1486 * @param[out] res On success, will be set to the host resource 1487 * mapping @p size bytes at @p addr. 1488 * @param[out] res_offset On success, will be set to the offset of @addr 1489 * within @p res. 1490 * 1491 * @retval 0 success 1492 * @retval non-zero if an error occurs adjusting the backing dynamic 1493 * register window. 1494 */ 1495 static int 1496 bhndb_pci_probe_map(struct bhndb_pci_probe *probe, bhnd_addr_t addr, 1497 bhnd_size_t offset, bhnd_size_t size, struct resource **res, 1498 bus_size_t *res_offset) 1499 { 1500 const struct bhndb_regwin *regwin, *regwin_table; 1501 struct resource *regwin_res; 1502 bhnd_addr_t target; 1503 int error; 1504 1505 /* Determine the absolute address */ 1506 if (BHND_SIZE_MAX - offset < addr) { 1507 device_printf(probe->dev, "invalid offset %#jx+%#jx\n", addr, 1508 offset); 1509 return (ENXIO); 1510 } 1511 1512 addr += offset; 1513 1514 /* Can we use the existing mapping? */ 1515 if (bhndb_pci_probe_has_mapping(probe, addr, size)) { 1516 *res = probe->m_res; 1517 *res_offset = (addr - probe->m_target) + 1518 probe->m_win->win_offset; 1519 1520 return (0); 1521 } 1522 1523 /* Locate a useable dynamic register window */ 1524 regwin_table = probe->hr->cfg->register_windows; 1525 regwin = bhndb_regwin_find_type(regwin_table, 1526 BHNDB_REGWIN_T_DYN, size); 1527 if (regwin == NULL) { 1528 device_printf(probe->dev, "unable to map %#jx+%#jx; no " 1529 "usable dynamic register window found\n", addr, 1530 size); 1531 return (ENXIO); 1532 } 1533 1534 /* Locate the host resource mapping our register window */ 1535 regwin_res = bhndb_host_resource_for_regwin(probe->hr, regwin); 1536 if (regwin_res == NULL) { 1537 device_printf(probe->dev, "unable to map %#jx+%#jx; no " 1538 "usable register resource found\n", addr, size); 1539 return (ENXIO); 1540 } 1541 1542 /* Page-align the target address */ 1543 target = addr - (addr % regwin->win_size); 1544 1545 /* Configure the register window */ 1546 error = bhndb_pci_compat_setregwin(probe->dev, probe->pci_dev, 1547 regwin, target); 1548 if (error) { 1549 device_printf(probe->dev, "failed to configure dynamic " 1550 "register window: %d\n", error); 1551 return (error); 1552 } 1553 1554 /* Update our mapping state */ 1555 probe->m_win = regwin; 1556 probe->m_res = regwin_res; 1557 probe->m_addr = addr; 1558 probe->m_size = size; 1559 probe->m_target = target; 1560 probe->m_valid = true; 1561 1562 *res = regwin_res; 1563 *res_offset = (addr - target) + regwin->win_offset; 1564 1565 return (0); 1566 } 1567 1568 /** 1569 * Write a data item to the bridged address space at the given @p offset from 1570 * @p addr. 1571 * 1572 * A dynamic register window will be used to map @p addr. 1573 * 1574 * @param probe The bhndb_pci probe state to be used to perform the 1575 * write. 1576 * @param addr The base address. 1577 * @param offset The offset from @p addr at which @p value will be 1578 * written. 1579 * @param value The data item to be written. 1580 * @param width The data item width (1, 2, or 4 bytes). 1581 */ 1582 static void 1583 bhndb_pci_probe_write(struct bhndb_pci_probe *probe, bhnd_addr_t addr, 1584 bhnd_size_t offset, uint32_t value, u_int width) 1585 { 1586 struct resource *r; 1587 bus_size_t res_offset; 1588 int error; 1589 1590 /* Map the target address */ 1591 error = bhndb_pci_probe_map(probe, addr, offset, width, &r, 1592 &res_offset); 1593 if (error) { 1594 device_printf(probe->dev, "error mapping %#jx+%#jx for " 1595 "writing: %d\n", addr, offset, error); 1596 return; 1597 } 1598 1599 /* Perform write */ 1600 switch (width) { 1601 case 1: 1602 return (bus_write_1(r, res_offset, value)); 1603 case 2: 1604 return (bus_write_2(r, res_offset, value)); 1605 case 4: 1606 return (bus_write_4(r, res_offset, value)); 1607 default: 1608 panic("unsupported width: %u", width); 1609 } 1610 } 1611 1612 /** 1613 * Read a data item from the bridged address space at the given @p offset 1614 * from @p addr. 1615 * 1616 * A dynamic register window will be used to map @p addr. 1617 * 1618 * @param probe The bhndb_pci probe state to be used to perform the 1619 * read. 1620 * @param addr The base address. 1621 * @param offset The offset from @p addr at which to read a data item of 1622 * @p width bytes. 1623 * @param width Item width (1, 2, or 4 bytes). 1624 */ 1625 static uint32_t 1626 bhndb_pci_probe_read(struct bhndb_pci_probe *probe, bhnd_addr_t addr, 1627 bhnd_size_t offset, u_int width) 1628 { 1629 struct resource *r; 1630 bus_size_t res_offset; 1631 int error; 1632 1633 /* Map the target address */ 1634 error = bhndb_pci_probe_map(probe, addr, offset, width, &r, 1635 &res_offset); 1636 if (error) { 1637 device_printf(probe->dev, "error mapping %#jx+%#jx for " 1638 "reading: %d\n", addr, offset, error); 1639 return (UINT32_MAX); 1640 } 1641 1642 /* Perform read */ 1643 switch (width) { 1644 case 1: 1645 return (bus_read_1(r, res_offset)); 1646 case 2: 1647 return (bus_read_2(r, res_offset)); 1648 case 4: 1649 return (bus_read_4(r, res_offset)); 1650 default: 1651 panic("unsupported width: %u", width); 1652 } 1653 } 1654 1655 /** 1656 * Initialize a new bhndb PCI bridge EROM I/O instance. All I/O will be 1657 * performed using @p probe. 1658 * 1659 * @param pio The instance to be initialized. 1660 * @param probe The bhndb_pci probe state to be used to perform all 1661 * I/O. 1662 */ 1663 static void 1664 bhndb_pci_eio_init(struct bhndb_pci_eio *pio, struct bhndb_pci_probe *probe) 1665 { 1666 memset(pio, 0, sizeof(*pio)); 1667 1668 pio->eio.map = bhndb_pci_eio_map; 1669 pio->eio.tell = bhndb_pci_eio_tell; 1670 pio->eio.read = bhndb_pci_eio_read; 1671 pio->eio.fini = NULL; 1672 1673 pio->mapped = false; 1674 pio->addr = 0; 1675 pio->size = 0; 1676 pio->probe = probe; 1677 } 1678 1679 /* bhnd_erom_io_map() implementation */ 1680 static int 1681 bhndb_pci_eio_map(struct bhnd_erom_io *eio, bhnd_addr_t addr, 1682 bhnd_size_t size) 1683 { 1684 struct bhndb_pci_eio *pio = (struct bhndb_pci_eio *)eio; 1685 1686 if (BHND_ADDR_MAX - addr < size) 1687 return (EINVAL); /* addr+size would overflow */ 1688 1689 pio->addr = addr; 1690 pio->size = size; 1691 pio->mapped = true; 1692 1693 return (0); 1694 } 1695 1696 /* bhnd_erom_io_tell() implementation */ 1697 static int 1698 bhndb_pci_eio_tell(struct bhnd_erom_io *eio, bhnd_addr_t *addr, 1699 bhnd_size_t *size) 1700 { 1701 struct bhndb_pci_eio *pio = (struct bhndb_pci_eio *)eio; 1702 1703 if (!pio->mapped) 1704 return (ENXIO); 1705 1706 *addr = pio->addr; 1707 *size = pio->size; 1708 1709 return (0); 1710 } 1711 1712 /* bhnd_erom_io_read() implementation */ 1713 static uint32_t 1714 bhndb_pci_eio_read(struct bhnd_erom_io *eio, bhnd_size_t offset, u_int width) 1715 { 1716 struct bhndb_pci_eio *pio = (struct bhndb_pci_eio *)eio; 1717 1718 /* Must have a valid mapping */ 1719 if (!pio->mapped) 1720 panic("no active mapping"); 1721 1722 /* The requested subrange must fall within the existing mapped range */ 1723 if (offset > pio->size || 1724 width > pio->size || 1725 pio->size - offset < width) 1726 { 1727 panic("invalid offset %#jx", offset); 1728 } 1729 1730 return (bhndb_pci_probe_read(pio->probe, pio->addr, offset, width)); 1731 } 1732 1733 static device_method_t bhndb_pci_methods[] = { 1734 /* Device interface */ 1735 DEVMETHOD(device_probe, bhndb_pci_probe), 1736 DEVMETHOD(device_attach, bhndb_pci_attach), 1737 DEVMETHOD(device_resume, bhndb_pci_resume), 1738 DEVMETHOD(device_suspend, bhndb_pci_suspend), 1739 DEVMETHOD(device_detach, bhndb_pci_detach), 1740 1741 /* BHNDB interface */ 1742 DEVMETHOD(bhndb_set_window_addr, bhndb_pci_set_window_addr), 1743 DEVMETHOD(bhndb_populate_board_info, bhndb_pci_populate_board_info), 1744 DEVMETHOD(bhndb_map_intr_isrc, bhndb_pci_map_intr_isrc), 1745 DEVMETHOD(bhndb_route_interrupts, bhndb_pci_route_interrupts), 1746 1747 /* BHND PWRCTL hostb interface */ 1748 DEVMETHOD(bhnd_pwrctl_hostb_get_clksrc, bhndb_pci_pwrctl_get_clksrc), 1749 DEVMETHOD(bhnd_pwrctl_hostb_gate_clock, bhndb_pci_pwrctl_gate_clock), 1750 DEVMETHOD(bhnd_pwrctl_hostb_ungate_clock, bhndb_pci_pwrctl_ungate_clock), 1751 1752 DEVMETHOD_END 1753 }; 1754 1755 DEFINE_CLASS_1(bhndb, bhndb_pci_driver, bhndb_pci_methods, 1756 sizeof(struct bhndb_pci_softc), bhndb_driver); 1757 1758 MODULE_VERSION(bhndb_pci, 1); 1759 MODULE_DEPEND(bhndb_pci, bhnd_pci_hostb, 1, 1, 1); 1760 MODULE_DEPEND(bhndb_pci, pci, 1, 1, 1); 1761 MODULE_DEPEND(bhndb_pci, bhndb, 1, 1, 1); 1762 MODULE_DEPEND(bhndb_pci, bhnd, 1, 1, 1); 1763