1 /*- 2 * Copyright (c) 2015 Landon Fuller <landon@landonf.org> 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer, 10 * without modification. 11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer 12 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any 13 * redistribution must be conditioned upon including a substantially 14 * similar Disclaimer requirement for further binary redistribution. 15 * 16 * NO WARRANTY 17 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 18 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 19 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY 20 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL 21 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, 22 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER 25 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF 27 * THE POSSIBILITY OF SUCH DAMAGES. 28 */ 29 30 #include <sys/cdefs.h> 31 __FBSDID("$FreeBSD$"); 32 33 /* 34 * PCI-specific implementation for the BHNDB bridge driver. 35 * 36 * Provides support for bridging from a PCI parent bus to a BHND-compatible 37 * bus (e.g. bcma or siba) via a Broadcom PCI core configured in end-point 38 * mode. 39 * 40 * This driver handles all interactions with the PCI bridge core. On the 41 * bridged bhnd bus, the PCI core device will be claimed by a simple 42 * bhnd_hostb driver. 43 */ 44 45 // Quirk TODO 46 // WARs for the following are not yet implemented: 47 // - BHND_PCI_QUIRK_SBINTVEC 48 // - BHND_PCIE_QUIRK_ASPM_OVR 49 // - BHND_PCIE_QUIRK_SERDES_NOPLLDOWN 50 // Quirks (and WARs) for the following are not yet defined: 51 // - Power savings via MDIO BLK1/PWR_MGMT3 on PCIe hwrev 15-20, 21-22 52 // - WOWL PME enable/disable 53 // - 4360 PCIe SerDes Tx amplitude/deemphasis (vendor Apple, boards 54 // BCM94360X51P2, BCM94360X51A). 55 // - PCI latency timer (boards CB2_4321_BOARD, CB2_4321_AG_BOARD) 56 // - Max SerDes TX drive strength (vendor Apple, pcie >= rev10, 57 // board BCM94322X9) 58 // - 700mV SerDes TX drive strength (chipid BCM4331, boards BCM94331X19, 59 // BCM94331X28, BCM94331X29B, BCM94331X19C) 60 61 #include <sys/param.h> 62 #include <sys/kernel.h> 63 #include <sys/bus.h> 64 #include <sys/limits.h> 65 #include <sys/malloc.h> 66 #include <sys/module.h> 67 #include <sys/systm.h> 68 69 #include <dev/pci/pcireg.h> 70 #include <dev/pci/pcivar.h> 71 72 #include <dev/bhnd/bhnd.h> 73 74 #include <dev/bhnd/cores/pci/bhnd_pcireg.h> 75 #include <dev/bhnd/cores/pci/mdio_pcievar.h> 76 77 #include "bhndb_pcireg.h" 78 #include "bhndb_pcivar.h" 79 #include "bhndb_private.h" 80 81 static int bhndb_enable_pci_clocks(struct bhndb_pci_softc *sc); 82 static int bhndb_disable_pci_clocks(struct bhndb_pci_softc *sc); 83 84 static int bhndb_pci_compat_setregwin(struct bhndb_pci_softc *, 85 const struct bhndb_regwin *, bhnd_addr_t); 86 static int bhndb_pci_fast_setregwin(struct bhndb_pci_softc *, 87 const struct bhndb_regwin *, bhnd_addr_t); 88 89 static uint32_t bhndb_pcie_read_proto_reg(struct bhndb_pci_softc *sc, 90 uint32_t addr); 91 static void bhndb_pcie_write_proto_reg(struct bhndb_pci_softc *sc, 92 uint32_t addr, uint32_t val); 93 94 static void bhndb_init_sromless_pci_config(struct bhndb_pci_softc *sc); 95 96 static int bhndb_pci_wars_register_access(struct bhndb_pci_softc *sc); 97 static int bhndb_pci_wars_early_once(struct bhndb_pci_softc *sc); 98 static int bhndb_pci_wars_hwup(struct bhndb_pci_softc *sc); 99 static int bhndb_pci_wars_hwdown(struct bhndb_pci_softc *sc); 100 101 static uint32_t bhndb_pci_discover_quirks(struct bhndb_pci_softc *, 102 const struct bhndb_pci_id *); 103 104 static const struct bhndb_pci_id *bhndb_pci_find_core_id( 105 struct bhnd_core_info *core); 106 /* 107 * Supported PCI bridge cores. 108 * 109 * This table defines quirks specific to core hwrev ranges; see also 110 * bhndb_pci_discover_quirks() for additional quirk detection. 111 */ 112 static const struct bhndb_pci_id bhndb_pci_ids[] = { 113 /* PCI */ 114 BHNDB_PCI_ID(PCI, 115 BHND_QUIRK_HWREV_GTE (0, 116 BHNDB_PCI_QUIRK_EXT_CLOCK_GATING | 117 BHNDB_PCI_QUIRK_SBTOPCI2_PREF_BURST), 118 119 BHND_QUIRK_HWREV_RANGE (0, 5, 120 BHNDB_PCI_QUIRK_SBINTVEC), 121 122 BHND_QUIRK_HWREV_GTE (11, 123 BHNDB_PCI_QUIRK_SBTOPCI2_READMULTI | 124 BHNDB_PCI_QUIRK_CLKRUN_DSBL), 125 126 BHND_QUIRK_HWREV_END 127 ), 128 129 /* PCI Gen 1 */ 130 BHNDB_PCI_ID(PCIE, 131 BHND_QUIRK_HWREV_EQ (0, 132 BHNDB_PCIE_QUIRK_SDR9_L0s_HANG), 133 134 BHND_QUIRK_HWREV_RANGE (0, 1, 135 BHNDB_PCIE_QUIRK_UR_STATUS_FIX), 136 137 BHND_QUIRK_HWREV_EQ (1, 138 BHNDB_PCIE_QUIRK_PCIPM_REQEN), 139 140 BHND_QUIRK_HWREV_RANGE (3, 5, 141 BHNDB_PCIE_QUIRK_ASPM_OVR | 142 BHNDB_PCIE_QUIRK_SDR9_POLARITY | 143 BHNDB_PCIE_QUIRK_SDR9_NO_FREQRETRY), 144 145 BHND_QUIRK_HWREV_LTE (6, 146 BHNDB_PCIE_QUIRK_L1_IDLE_THRESH), 147 148 BHND_QUIRK_HWREV_GTE (6, 149 BHNDB_PCIE_QUIRK_SPROM_L23_PCI_RESET), 150 151 BHND_QUIRK_HWREV_EQ (7, 152 BHNDB_PCIE_QUIRK_SERDES_NOPLLDOWN), 153 154 BHND_QUIRK_HWREV_GTE (8, 155 BHNDB_PCIE_QUIRK_L1_TIMER_PERF), 156 157 BHND_QUIRK_HWREV_GTE (10, 158 BHNDB_PCIE_QUIRK_SD_C22_EXTADDR), 159 160 BHND_QUIRK_HWREV_END 161 ), 162 163 { BHND_COREID_INVALID, BHND_PCI_REGFMT_PCI, NULL } 164 }; 165 166 167 /* quirk flag convenience macros */ 168 #define BHNDB_PCI_QUIRK(_sc, _name) \ 169 ((_sc)->quirks & BHNDB_PCI_QUIRK_ ## _name) 170 #define BHNDB_PCIE_QUIRK(_sc, _name) \ 171 ((_sc)->quirks & BHNDB_PCIE_QUIRK_ ## _name) 172 173 #define BHNDB_PCI_ASSERT_QUIRK(_sc, name) \ 174 KASSERT(BHNDB_PCI_QUIRK((_sc), name), ("quirk " __STRING(_name) " not set")) 175 #define BHNDB_PCIE_ASSERT_QUIRK(_sc, name) \ 176 KASSERT(BHNDB_PCIE_QUIRK((_sc), name), ("quirk " __STRING(_name) " not set")) 177 178 179 /* bus_(read|write)_* convenience macros */ 180 #define BHNDB_PCI_READ_2(_sc, _reg) \ 181 bus_read_2((_sc)->mem_res, (_sc)->mem_off + (_reg)) 182 #define BHNDB_PCI_READ_4(_sc, _reg) \ 183 bus_read_4((_sc)->mem_res, (_sc)->mem_off + (_reg)) 184 185 #define BHNDB_PCI_WRITE_2(_sc, _reg, _val) \ 186 bus_write_2((_sc)->mem_res, (_sc)->mem_off + (_reg), (_val)) 187 #define BHNDB_PCI_WRITE_4(_sc, _reg, _val) \ 188 bus_write_4((_sc)->mem_res, (_sc)->mem_off + (_reg), (_val)) 189 190 191 /* BHNDB_PCI_REG_* convenience macros */ 192 #define BPCI_REG_EXTRACT(_rv, _a) BHND_PCI_REG_EXTRACT(_rv, BHND_ ## _a) 193 #define BPCI_REG_INSERT(_rv, _a, _v) BHND_PCI_REG_INSERT(_rv, BHND_ ## _a, _v) 194 195 #define BPCI_COMMON_REG_EXTRACT(_r, _a) \ 196 BHND_PCI_COMMON_REG_EXTRACT(sc->regfmt, _r, _a) 197 198 #define BPCI_COMMON_REG_INSERT(_r, _a, _v) \ 199 BHND_PCI_COMMON_REG_INSERT(sc->regfmt, _r, _a, _v) 200 201 #define BPCI_COMMON_REG(_name) \ 202 BHND_PCI_COMMON_REG(sc->regfmt, _name) 203 204 #define BPCI_COMMON_REG_OFFSET(_base, _offset) \ 205 (BPCI_COMMON_REG(_base) + BPCI_COMMON_REG(_offset)) 206 207 /** 208 * Default bhndb_pci implementation of device_probe(). 209 * 210 * Verifies that the parent is a PCI/PCIe device. 211 */ 212 static int 213 bhndb_pci_probe(device_t dev) 214 { 215 device_t parent; 216 devclass_t parent_bus; 217 devclass_t pci; 218 219 /* Our parent must be a PCI/PCIe device. */ 220 pci = devclass_find("pci"); 221 parent = device_get_parent(dev); 222 parent_bus = device_get_devclass(device_get_parent(parent)); 223 224 if (parent_bus != pci) 225 return (ENXIO); 226 227 device_set_desc(dev, "PCI-BHND bridge"); 228 229 return (BUS_PROBE_DEFAULT); 230 } 231 232 static int 233 bhndb_pci_attach(device_t dev) 234 { 235 struct bhndb_pci_softc *sc; 236 int error, reg; 237 238 sc = device_get_softc(dev); 239 sc->dev = dev; 240 241 /* Enable PCI bus mastering */ 242 pci_enable_busmaster(device_get_parent(dev)); 243 244 /* Determine our bridge device class */ 245 sc->pci_devclass = BHND_DEVCLASS_PCI; 246 if (pci_find_cap(device_get_parent(dev), PCIY_EXPRESS, ®) == 0) 247 sc->pci_devclass = BHND_DEVCLASS_PCIE; 248 249 /* Determine the basic set of applicable quirks. This will be updated 250 * in bhndb_pci_init_full_config() once the PCI device core has 251 * been enumerated. */ 252 sc->quirks = bhndb_pci_discover_quirks(sc, NULL); 253 254 /* Using the discovered quirks, apply any WARs required for basic 255 * register access. */ 256 if ((error = bhndb_pci_wars_register_access(sc))) 257 return (error); 258 259 /* Use siba(4)-compatible regwin handling until we know 260 * what kind of bus is attached */ 261 sc->set_regwin = bhndb_pci_compat_setregwin; 262 263 /* Perform full bridge attach. This should call back into our 264 * bhndb_pci_init_full_config() implementation once the bridged 265 * bhnd(4) bus has been enumerated, but before any devices have been 266 * probed or attached. */ 267 if ((error = bhndb_attach(dev, sc->pci_devclass))) 268 return (error); 269 270 /* If supported, switch to the faster regwin handling */ 271 if (sc->bhndb.chipid.chip_type != BHND_CHIPTYPE_SIBA) { 272 atomic_store_rel_ptr((volatile void *) &sc->set_regwin, 273 (uintptr_t) &bhndb_pci_fast_setregwin); 274 } 275 276 return (0); 277 } 278 279 /** 280 * Initialize the full bridge configuration. 281 * 282 * This is called during the DEVICE_ATTACH() process by the bridged bhndb(4) 283 * bus, prior to probe/attachment of child cores. 284 * 285 * At this point, we can introspect the enumerated cores, find our host 286 * bridge device, and apply any bridge-level hardware workarounds required 287 * for proper operation of the bridged device cores. 288 */ 289 static int 290 bhndb_pci_init_full_config(device_t dev, device_t child, 291 const struct bhndb_hw_priority *prio_table) 292 { 293 struct bhnd_core_info core; 294 const struct bhndb_pci_id *id; 295 struct bhndb_pci_softc *sc; 296 struct bhndb_region *pcir; 297 bhnd_addr_t pcir_addr; 298 bhnd_size_t pcir_size; 299 int error; 300 301 sc = device_get_softc(dev); 302 303 /* Let bhndb perform full discovery and initialization of the 304 * available register windows and bridge resources. */ 305 if ((error = bhndb_generic_init_full_config(dev, child, prio_table))) 306 return (error); 307 308 /* 309 * Identify our PCI bridge core, its register family, and any 310 * applicable hardware quirks. 311 */ 312 KASSERT(sc->bhndb.hostb_dev, 313 ("missing hostb device\n")); 314 315 core = bhnd_get_core_info(sc->bhndb.hostb_dev); 316 id = bhndb_pci_find_core_id(&core); 317 if (id == NULL) { 318 device_printf(dev, "%s %s hostb core is not recognized\n", 319 bhnd_vendor_name(core.vendor), bhnd_core_name(&core)); 320 } 321 322 sc->regfmt = id->regfmt; 323 324 /* Now that we've identified the PCI bridge core, we can determine the 325 * full set of device quirks */ 326 sc->quirks = bhndb_pci_discover_quirks(sc, id); 327 328 /* 329 * Determine and save a reference to the bhndb resource and offset 330 * at which the bridge core's device registers are mapped. 331 * 332 * All known bhnd(4) hardware provides a fixed static mapping of 333 * the PCI core's registers. If this changes in the future -- which 334 * is unlikely -- this driver will need to be adjusted to use 335 * dynamic register windows. 336 */ 337 338 /* Find base address and size of the PCI core's register block. */ 339 error = bhnd_get_region_addr(sc->bhndb.hostb_dev, BHND_PORT_DEVICE, 0, 340 0, &pcir_addr, &pcir_size); 341 if (error) { 342 device_printf(dev, 343 "failed to locate PCI core registers\n"); 344 return (error); 345 } 346 347 /* Find the bhndb_region that statically maps this block */ 348 pcir = bhndb_find_resource_region(sc->bhndb.bus_res, pcir_addr, 349 pcir_size); 350 if (pcir == NULL || pcir->static_regwin == NULL) { 351 device_printf(dev, 352 "missing static PCI core register window\n"); 353 return (ENXIO); 354 } 355 356 /* Save borrowed reference to the mapped PCI core registers */ 357 sc->mem_off = pcir->static_regwin->win_offset; 358 sc->mem_res = bhndb_find_regwin_resource(sc->bhndb.bus_res, 359 pcir->static_regwin); 360 if (sc->mem_res == NULL || !(rman_get_flags(sc->mem_res) & RF_ACTIVE)) { 361 device_printf(dev, 362 "no active resource maps the PCI core register window\n"); 363 return (ENXIO); 364 } 365 366 /* Configure a direct bhnd_resource wrapper that we can pass to 367 * bhnd_resource APIs */ 368 sc->bhnd_mem_res = (struct bhnd_resource) { 369 .res = sc->mem_res, 370 .direct = true 371 }; 372 373 /* 374 * Attach MMIO device (if this is a PCIe device), which is used for 375 * access to the PCIe SerDes required by the quirk workarounds. 376 */ 377 if (sc->pci_devclass == BHND_DEVCLASS_PCIE) { 378 sc->mdio = device_add_child(dev, 379 devclass_get_name(bhnd_mdio_pci_devclass), 0); 380 if (sc->mdio == NULL) 381 return (ENXIO); 382 383 if ((error = device_probe_and_attach(sc->mdio))) { 384 device_printf(dev, "failed to attach MDIO device\n"); 385 return (error); 386 } 387 } 388 389 /* Apply any early one-time quirk workarounds */ 390 if ((error = bhndb_pci_wars_early_once(sc))) 391 return (error); 392 393 /* Apply attach-time quirk workarounds, required before the bridged 394 * bhnd(4) bus itself performs a full attach(). */ 395 if ((error = bhndb_pci_wars_hwup(sc))) 396 return (error); 397 398 return (0); 399 } 400 401 /** 402 * Apply any hardware workarounds that must be executed prior to attempting 403 * register access on the bridged chipset. 404 * 405 * This must be called very early in attach() or resume(), after the basic 406 * set of applicable device quirks has been determined. 407 */ 408 static int 409 bhndb_pci_wars_register_access(struct bhndb_pci_softc *sc) 410 { 411 int error; 412 413 if (BHNDB_PCI_QUIRK(sc, EXT_CLOCK_GATING)) { 414 if ((error = bhndb_enable_pci_clocks(sc))) { 415 device_printf(sc->dev, "failed to enable clocks\n"); 416 return (error); 417 } 418 } 419 420 return (0); 421 } 422 423 /** 424 * Apply any hardware work-arounds that must be executed exactly once, early in 425 * the attach process. 426 * 427 * This must be called after core enumeration and discovery of all applicable 428 * quirks, but prior to probe/attach of any cores, parsing of 429 * SPROM, etc. 430 */ 431 static int 432 bhndb_pci_wars_early_once(struct bhndb_pci_softc *sc) 433 { 434 /* Determine correct polarity by observing the attach-time PCIe PHY 435 * link status. This is used later to reset/force the SerDes 436 * polarity */ 437 if (BHNDB_PCIE_QUIRK(sc, SDR9_POLARITY)) { 438 uint32_t st; 439 bool inv; 440 441 442 st = bhndb_pcie_read_proto_reg(sc, BHND_PCIE_PLP_STATUSREG); 443 inv = ((st & BHND_PCIE_PLP_POLARITY_INV) != 0); 444 sc->sdr9_quirk_polarity.inv = inv; 445 } 446 447 return (0); 448 } 449 450 /** 451 * Apply any hardware workarounds that are required upon attach or resume 452 * of the bridge device. 453 */ 454 static int 455 bhndb_pci_wars_hwup(struct bhndb_pci_softc *sc) 456 { 457 /* Note that the order here matters; these work-arounds 458 * should not be re-ordered without careful review of their 459 * interdependencies */ 460 461 /* Fix up any PoR defaults on SROMless devices */ 462 bhndb_init_sromless_pci_config(sc); 463 464 /* Enable PCI prefetch/burst/readmulti flags */ 465 if (BHNDB_PCI_QUIRK(sc, SBTOPCI2_PREF_BURST) || 466 BHNDB_PCI_QUIRK(sc, SBTOPCI2_READMULTI)) 467 { 468 uint32_t sbp2; 469 sbp2 = BHNDB_PCI_READ_4(sc, BHND_PCI_SBTOPCI2); 470 471 if (BHNDB_PCI_QUIRK(sc, SBTOPCI2_PREF_BURST)) 472 sbp2 |= (BHND_PCI_SBTOPCI_PREF|BHND_PCI_SBTOPCI_BURST); 473 474 if (BHNDB_PCI_QUIRK(sc, SBTOPCI2_READMULTI)) 475 sbp2 |= BHND_PCI_SBTOPCI_RC_READMULTI; 476 477 BHNDB_PCI_WRITE_4(sc, BHND_PCI_SBTOPCI2, sbp2); 478 } 479 480 /* Disable PCI CLKRUN# */ 481 if (BHNDB_PCI_QUIRK(sc, CLKRUN_DSBL)) { 482 uint32_t ctl; 483 484 ctl = BHNDB_PCI_READ_4(sc, BHND_PCI_CLKRUN_CTL); 485 ctl |= BHND_PCI_CLKRUN_DSBL; 486 BHNDB_PCI_WRITE_4(sc, BHND_PCI_CLKRUN_CTL, ctl); 487 } 488 489 /* Enable TLP unmatched address handling work-around */ 490 if (BHNDB_PCIE_QUIRK(sc, UR_STATUS_FIX)) { 491 uint32_t wrs; 492 wrs = bhndb_pcie_read_proto_reg(sc, BHND_PCIE_TLP_WORKAROUNDSREG); 493 wrs |= BHND_PCIE_TLP_WORKAROUND_URBIT; 494 bhndb_pcie_write_proto_reg(sc, BHND_PCIE_TLP_WORKAROUNDSREG, wrs); 495 } 496 497 /* Adjust SerDes CDR tuning to ensure that CDR is stable before sending 498 * data during L0s to L0 exit transitions. */ 499 if (BHNDB_PCIE_QUIRK(sc, SDR9_L0s_HANG)) { 500 uint16_t sdv; 501 502 /* Set RX track/acquire timers to 2.064us/40.96us */ 503 sdv = BPCI_REG_INSERT(0, PCIE_SDR9_RX_TIMER1_LKTRK, (2064/16)); 504 sdv = BPCI_REG_INSERT(sdv, PCIE_SDR9_RX_TIMER1_LKACQ, 505 (40960/1024)); 506 MDIO_WRITEREG(sc->mdio, BHND_PCIE_PHY_SDR9_TXRX, 507 BHND_PCIE_SDR9_RX_TIMER1, sdv); 508 509 /* Apply CDR frequency workaround */ 510 sdv = BHND_PCIE_SDR9_RX_CDR_FREQ_OVR_EN; 511 sdv = BPCI_REG_INSERT(sdv, PCIE_SDR9_RX_CDR_FREQ_OVR, 0x0); 512 MDIO_WRITEREG(sc->mdio, BHND_PCIE_PHY_SDR9_TXRX, 513 BHND_PCIE_SDR9_RX_CDR, sdv); 514 515 /* Apply CDR BW tunings */ 516 sdv = 0; 517 sdv = BPCI_REG_INSERT(sdv, PCIE_SDR9_RX_CDRBW_INTGTRK, 0x2); 518 sdv = BPCI_REG_INSERT(sdv, PCIE_SDR9_RX_CDRBW_INTGACQ, 0x4); 519 sdv = BPCI_REG_INSERT(sdv, PCIE_SDR9_RX_CDRBW_PROPTRK, 0x6); 520 sdv = BPCI_REG_INSERT(sdv, PCIE_SDR9_RX_CDRBW_PROPACQ, 0x6); 521 MDIO_WRITEREG(sc->mdio, BHND_PCIE_PHY_SDR9_TXRX, 522 BHND_PCIE_SDR9_RX_CDRBW, sdv); 523 } 524 525 /* Force correct SerDes polarity */ 526 if (BHNDB_PCIE_QUIRK(sc, SDR9_POLARITY)) { 527 uint16_t rxctl; 528 529 rxctl = MDIO_READREG(sc->mdio, BHND_PCIE_PHY_SDR9_TXRX, 530 BHND_PCIE_SDR9_RX_CTRL); 531 532 rxctl |= BHND_PCIE_SDR9_RX_CTRL_FORCE; 533 if (sc->sdr9_quirk_polarity.inv) 534 rxctl |= BHND_PCIE_SDR9_RX_CTRL_POLARITY_INV; 535 else 536 rxctl &= ~BHND_PCIE_SDR9_RX_CTRL_POLARITY_INV; 537 538 MDIO_WRITEREG(sc->mdio, BHND_PCIE_PHY_SDR9_TXRX, 539 BHND_PCIE_SDR9_RX_CTRL, rxctl); 540 } 541 542 /* Disable startup retry on PLL frequency detection failure */ 543 if (BHNDB_PCIE_QUIRK(sc, SDR9_NO_FREQRETRY)) { 544 uint16_t pctl; 545 546 pctl = MDIO_READREG(sc->mdio, BHND_PCIE_PHY_SDR9_PLL, 547 BHND_PCIE_SDR9_PLL_CTRL); 548 549 pctl &= ~BHND_PCIE_SDR9_PLL_CTRL_FREQDET_EN; 550 MDIO_WRITEREG(sc->mdio, BHND_PCIE_PHY_SDR9_PLL, 551 BHND_PCIE_SDR9_PLL_CTRL, pctl); 552 } 553 554 /* Explicitly enable PCI-PM */ 555 if (BHNDB_PCIE_QUIRK(sc, PCIPM_REQEN)) { 556 uint32_t lcreg; 557 lcreg = bhndb_pcie_read_proto_reg(sc, BHND_PCIE_DLLP_LCREG); 558 lcreg |= BHND_PCIE_DLLP_LCREG_PCIPM_EN; 559 bhndb_pcie_write_proto_reg(sc, BHND_PCIE_DLLP_LCREG, lcreg); 560 } 561 562 /* Adjust L1 timer to fix slow L1->L0 transitions */ 563 if (BHNDB_PCIE_QUIRK(sc, L1_IDLE_THRESH)) { 564 uint32_t pmt; 565 pmt = bhndb_pcie_read_proto_reg(sc, BHND_PCIE_DLLP_PMTHRESHREG); 566 pmt = BPCI_REG_INSERT(pmt, PCIE_L1THRESHOLDTIME, 567 BHND_PCIE_L1THRESHOLD_WARVAL); 568 bhndb_pcie_write_proto_reg(sc, BHND_PCIE_DLLP_PMTHRESHREG, pmt); 569 } 570 571 /* Extend L1 timer for better performance. 572 * TODO: We could enable/disable this on demand for better power 573 * savings if we tie this to HT clock request handling */ 574 if (BHNDB_PCIE_QUIRK(sc, L1_TIMER_PERF)) { 575 uint32_t pmt; 576 pmt = bhndb_pcie_read_proto_reg(sc, BHND_PCIE_DLLP_PMTHRESHREG); 577 pmt |= BHND_PCIE_ASPMTIMER_EXTEND; 578 bhndb_pcie_write_proto_reg(sc, BHND_PCIE_DLLP_PMTHRESHREG, pmt); 579 } 580 581 /* Enable L23READY_EXIT_NOPRST if not already set in SPROM. */ 582 if (BHNDB_PCIE_QUIRK(sc, SPROM_L23_PCI_RESET)) { 583 bus_size_t reg; 584 uint16_t cfg; 585 586 /* Fetch the misc cfg flags from SPROM */ 587 reg = BHND_PCIE_SPROM_SHADOW + BHND_PCIE_SRSH_PCIE_MISC_CONFIG; 588 cfg = BHNDB_PCI_READ_2(sc, reg); 589 590 /* Write EXIT_NOPRST flag if not already set in SPROM */ 591 if (!(cfg & BHND_PCIE_SRSH_L23READY_EXIT_NOPRST)) { 592 cfg |= BHND_PCIE_SRSH_L23READY_EXIT_NOPRST; 593 BHNDB_PCI_WRITE_2(sc, reg, cfg); 594 } 595 } 596 597 return (0); 598 } 599 600 /** 601 * Apply any hardware workarounds that are required upon resume of the 602 * bridge device. 603 * 604 * This must be called before any bridged bhnd(4) cores have been resumed. 605 */ 606 static int 607 bhndb_pci_wars_hwresume(struct bhndb_pci_softc *sc) 608 { 609 int error; 610 611 /* Nothing is possible without register access */ 612 if ((error = bhndb_pci_wars_register_access(sc))) 613 return (error); 614 615 /* Apply the general hwup workarounds */ 616 return (bhndb_pci_wars_hwup(sc)); 617 } 618 619 /** 620 * Apply any hardware workarounds that are required upon detach or suspend 621 * of the bridge device. 622 */ 623 static int 624 bhndb_pci_wars_hwdown(struct bhndb_pci_softc *sc) 625 { 626 int error; 627 628 /* Reduce L1 timer for better power savings. 629 * TODO: We could enable/disable this on demand for better power 630 * savings if we tie this to HT clock request handling */ 631 if (BHNDB_PCIE_QUIRK(sc, L1_TIMER_PERF)) { 632 uint32_t pmt; 633 pmt = bhndb_pcie_read_proto_reg(sc, BHND_PCIE_DLLP_PMTHRESHREG); 634 pmt &= ~BHND_PCIE_ASPMTIMER_EXTEND; 635 bhndb_pcie_write_proto_reg(sc, BHND_PCIE_DLLP_PMTHRESHREG, pmt); 636 } 637 638 /* Disable clocks */ 639 if (BHNDB_PCI_QUIRK(sc, EXT_CLOCK_GATING)) { 640 if ((error = bhndb_disable_pci_clocks(sc))) { 641 device_printf(sc->dev, "failed to disable clocks\n"); 642 return (error); 643 } 644 } 645 646 return (0); 647 } 648 649 /* 650 * On devices without a SROM, the PCI(e) cores will be initialized with 651 * their Power-on-Reset defaults; this can leave the the BAR0 PCI windows 652 * potentially mapped to the wrong core index. 653 * 654 * This function updates the PCI core's BAR0 PCI configuration to point at the 655 * current PCI core. 656 * 657 * Applies to all PCI/PCIe revisions. Must be applied before bus devices 658 * are probed/attached or the SPROM is parsed. 659 */ 660 static void 661 bhndb_init_sromless_pci_config(struct bhndb_pci_softc *sc) 662 { 663 bus_size_t sprom_addr; 664 u_int sprom_core_idx; 665 u_int pci_core_idx; 666 uint16_t val; 667 668 /* Fetch the SPROM's configured core index */ 669 sprom_addr = BPCI_COMMON_REG_OFFSET(SPROM_SHADOW, SRSH_PI_OFFSET); 670 val = BHNDB_PCI_READ_2(sc, sprom_addr); 671 672 /* If it doesn't match host bridge's core index, update the index 673 * value */ 674 sprom_core_idx = BPCI_COMMON_REG_EXTRACT(val, SRSH_PI); 675 pci_core_idx = bhnd_get_core_index(sc->bhndb.hostb_dev); 676 677 if (sprom_core_idx != pci_core_idx) { 678 val = BPCI_COMMON_REG_INSERT(val, SRSH_PI, pci_core_idx); 679 BHNDB_PCI_WRITE_2(sc, sprom_addr, val); 680 } 681 } 682 683 static int 684 bhndb_pci_detach(device_t dev) 685 { 686 struct bhndb_pci_softc *sc; 687 int error; 688 689 sc = device_get_softc(dev); 690 691 if ((error = bhndb_generic_detach(dev))) 692 return (error); 693 694 /* Apply any hardware workarounds. This may disable the clock, and 695 * thus must be called *after* any children have been detached. */ 696 if ((error = bhndb_pci_wars_hwdown(sc))) 697 return (error); 698 699 /* Disable PCI bus mastering */ 700 pci_disable_busmaster(device_get_parent(dev)); 701 702 return (0); 703 } 704 705 static int 706 bhndb_pci_suspend(device_t dev) 707 { 708 struct bhndb_pci_softc *sc; 709 int error; 710 711 sc = device_get_softc(dev); 712 713 if ((error = bhndb_generic_suspend(dev))) 714 return (error); 715 716 /* Apply any hardware workarounds. This may disable the clock, and 717 * thus must be called *after* any children have been suspended. */ 718 if ((error = bhndb_pci_wars_hwdown(sc))) 719 return (error); 720 721 return (0); 722 } 723 724 static int 725 bhndb_pci_resume(device_t dev) 726 { 727 struct bhndb_pci_softc *sc; 728 int error; 729 730 sc = device_get_softc(dev); 731 732 /* Apply any resume workarounds; these may be required for bridged 733 * device access, and thus must be called *before* any children are 734 * resumed. */ 735 if ((error = bhndb_pci_wars_hwresume(sc))) 736 return (error); 737 738 if ((error = bhndb_generic_resume(dev))) 739 return (error); 740 741 return (0); 742 } 743 744 static int 745 bhndb_pci_set_window_addr(device_t dev, const struct bhndb_regwin *rw, 746 bhnd_addr_t addr) 747 { 748 struct bhndb_pci_softc *sc = device_get_softc(dev); 749 return (sc->set_regwin(sc, rw, addr)); 750 } 751 752 /** 753 * A siba(4) and bcma(4)-compatible bhndb_set_window_addr implementation. 754 * 755 * On siba(4) devices, it's possible that writing a PCI window register may 756 * not succeed; it's necessary to immediately read the configuration register 757 * and retry if not set to the desired value. 758 * 759 * This is not necessary on bcma(4) devices, but other than the overhead of 760 * validating the register, there's no harm in performing the verification. 761 */ 762 static int 763 bhndb_pci_compat_setregwin(struct bhndb_pci_softc *sc, 764 const struct bhndb_regwin *rw, bhnd_addr_t addr) 765 { 766 device_t parent; 767 int error; 768 769 parent = sc->bhndb.parent_dev; 770 771 if (rw->win_type != BHNDB_REGWIN_T_DYN) 772 return (ENODEV); 773 774 for (u_int i = 0; i < BHNDB_PCI_BARCTRL_WRITE_RETRY; i++) { 775 if ((error = bhndb_pci_fast_setregwin(sc, rw, addr))) 776 return (error); 777 778 if (pci_read_config(parent, rw->dyn.cfg_offset, 4) == addr) 779 return (0); 780 781 DELAY(10); 782 } 783 784 /* Unable to set window */ 785 return (ENODEV); 786 } 787 788 /** 789 * A bcma(4)-only bhndb_set_window_addr implementation. 790 */ 791 static int 792 bhndb_pci_fast_setregwin(struct bhndb_pci_softc *sc, 793 const struct bhndb_regwin *rw, bhnd_addr_t addr) 794 { 795 device_t parent = sc->bhndb.parent_dev; 796 797 /* The PCI bridge core only supports 32-bit addressing, regardless 798 * of the bus' support for 64-bit addressing */ 799 if (addr > UINT32_MAX) 800 return (ERANGE); 801 802 switch (rw->win_type) { 803 case BHNDB_REGWIN_T_DYN: 804 /* Addresses must be page aligned */ 805 if (addr % rw->win_size != 0) 806 return (EINVAL); 807 808 pci_write_config(parent, rw->dyn.cfg_offset, addr, 4); 809 break; 810 default: 811 return (ENODEV); 812 } 813 814 return (0); 815 } 816 817 818 /** 819 * Read a 32-bit PCIe TLP/DLLP/PLP protocol register. 820 * 821 * @param sc The bhndb_pci driver state. 822 * @param addr The protocol register offset. 823 */ 824 static uint32_t 825 bhndb_pcie_read_proto_reg(struct bhndb_pci_softc *sc, uint32_t addr) 826 { 827 uint32_t val; 828 829 KASSERT(bhnd_get_class(sc->bhndb.hostb_dev) == BHND_DEVCLASS_PCIE, 830 ("not a pcie device!")); 831 832 BHNDB_LOCK(&sc->bhndb); 833 BHNDB_PCI_WRITE_4(sc, BHND_PCIE_IND_ADDR, addr); 834 val = BHNDB_PCI_READ_4(sc, BHND_PCIE_IND_DATA); 835 BHNDB_UNLOCK(&sc->bhndb); 836 837 return (val); 838 } 839 840 /** 841 * Write a 32-bit PCIe TLP/DLLP/PLP protocol register value. 842 * 843 * @param sc The bhndb_pci driver state. 844 * @param addr The protocol register offset. 845 * @param val The value to write to @p addr. 846 */ 847 static void 848 bhndb_pcie_write_proto_reg(struct bhndb_pci_softc *sc, uint32_t addr, 849 uint32_t val) 850 { 851 KASSERT(bhnd_get_class(sc->bhndb.hostb_dev) == BHND_DEVCLASS_PCIE, 852 ("not a pcie device!")); 853 854 BHNDB_LOCK(&sc->bhndb); 855 BHNDB_PCI_WRITE_4(sc, BHND_PCIE_IND_ADDR, addr); 856 BHNDB_PCI_WRITE_4(sc, BHND_PCIE_IND_DATA, val); 857 BHNDB_UNLOCK(&sc->bhndb); 858 } 859 860 861 /** 862 * Enable externally managed clocks. 863 * 864 * Quirk Required: EXT_CLOCK_GATING 865 * 866 * @param sc Bridge driver state. 867 */ 868 static int 869 bhndb_enable_pci_clocks(struct bhndb_pci_softc *sc) 870 { 871 device_t pci_parent; 872 uint32_t gpio_in, gpio_out, gpio_en; 873 uint32_t gpio_flags; 874 uint16_t pci_status; 875 876 BHNDB_PCI_ASSERT_QUIRK(sc, EXT_CLOCK_GATING); 877 878 pci_parent = device_get_parent(sc->dev); 879 880 /* Read state of XTAL pin */ 881 gpio_in = pci_read_config(pci_parent, BHNDB_PCI_GPIO_IN, 4); 882 if (gpio_in & BHNDB_PCI_GPIO_XTAL_ON) 883 return (0); /* already enabled */ 884 885 /* Fetch current config */ 886 gpio_out = pci_read_config(pci_parent, BHNDB_PCI_GPIO_OUT, 4); 887 gpio_en = pci_read_config(pci_parent, BHNDB_PCI_GPIO_OUTEN, 4); 888 889 /* Set PLL_OFF/XTAL_ON pins to HIGH and enable both pins */ 890 gpio_flags = (BHNDB_PCI_GPIO_PLL_OFF|BHNDB_PCI_GPIO_XTAL_ON); 891 gpio_out |= gpio_flags; 892 gpio_en |= gpio_flags; 893 894 pci_write_config(pci_parent, BHNDB_PCI_GPIO_OUT, gpio_out, 4); 895 pci_write_config(pci_parent, BHNDB_PCI_GPIO_OUTEN, gpio_en, 4); 896 DELAY(1000); 897 898 /* Reset PLL_OFF */ 899 gpio_out &= ~BHNDB_PCI_GPIO_PLL_OFF; 900 pci_write_config(pci_parent, BHNDB_PCI_GPIO_OUT, gpio_out, 4); 901 DELAY(5000); 902 903 /* Clear any PCI 'sent target-abort' flag. */ 904 pci_status = pci_read_config(pci_parent, PCIR_STATUS, 2); 905 pci_status &= ~PCIM_STATUS_STABORT; 906 pci_write_config(pci_parent, PCIR_STATUS, pci_status, 2); 907 908 return (0); 909 } 910 911 /** 912 * Disable externally managed clocks. 913 * 914 * Quirk Required: EXT_CLOCK_GATING 915 * 916 * @param sc Bridge driver state. 917 */ 918 static int 919 bhndb_disable_pci_clocks(struct bhndb_pci_softc *sc) 920 { 921 device_t parent_dev; 922 uint32_t gpio_out, gpio_en; 923 924 BHNDB_PCI_ASSERT_QUIRK(sc, EXT_CLOCK_GATING); 925 926 parent_dev = device_get_parent(sc->dev); 927 928 // TODO: Check board flags for BFL2_XTALBUFOUTEN? 929 // TODO: Check PCI core revision? 930 // TODO: Switch to 'slow' clock? 931 932 /* Fetch current config */ 933 gpio_out = pci_read_config(parent_dev, BHNDB_PCI_GPIO_OUT, 4); 934 gpio_en = pci_read_config(parent_dev, BHNDB_PCI_GPIO_OUTEN, 4); 935 936 /* Set PLL_OFF to HIGH, XTAL_ON to LOW. */ 937 gpio_out &= ~BHNDB_PCI_GPIO_XTAL_ON; 938 gpio_out |= BHNDB_PCI_GPIO_PLL_OFF; 939 pci_write_config(parent_dev, BHNDB_PCI_GPIO_OUT, gpio_out, 4); 940 941 /* Enable both output pins */ 942 gpio_en |= (BHNDB_PCI_GPIO_PLL_OFF|BHNDB_PCI_GPIO_XTAL_ON); 943 pci_write_config(parent_dev, BHNDB_PCI_GPIO_OUTEN, gpio_en, 4); 944 945 return (0); 946 } 947 948 949 /** 950 * Find the identification table entry for a core descriptor. 951 * 952 * @param sc bhndb PCI driver state. 953 */ 954 static const struct bhndb_pci_id * 955 bhndb_pci_find_core_id(struct bhnd_core_info *core) 956 { 957 const struct bhndb_pci_id *id; 958 959 for (id = bhndb_pci_ids; id->device != BHND_COREID_INVALID; id++) { 960 if (core->vendor == BHND_MFGID_BCM && 961 core->device == id->device) 962 return (id); 963 } 964 965 return (NULL); 966 } 967 968 /** 969 * Return all quirks known to be applicable to the host bridge. 970 * 971 * If the PCI bridge core has not yet been identified, no core-specific 972 * quirk flags will be returned. This function may be called again to 973 * rediscover applicable quirks after the host bridge core has been 974 * identified. 975 * 976 * @param sc bhndb PCI driver state. 977 * @param id The host bridge core's identification table entry, or NULL 978 * if the host bridge core has not yet been identified. 979 * 980 * @return Returns the set of quirks applicable to the current hardware. 981 */ 982 static uint32_t 983 bhndb_pci_discover_quirks(struct bhndb_pci_softc *sc, 984 const struct bhndb_pci_id *id) 985 { 986 struct bhnd_device_quirk *qt; 987 uint32_t quirks; 988 uint8_t hwrev; 989 990 quirks = BHNDB_PCI_QUIRK_NONE; 991 992 /* Determine any device class-specific quirks */ 993 switch (sc->pci_devclass) { 994 case BHND_DEVCLASS_PCI: 995 /* All PCI devices require external clock gating */ 996 sc->quirks |= BHNDB_PCI_QUIRK_EXT_CLOCK_GATING; 997 break; 998 default: 999 break; 1000 } 1001 1002 // TODO: Additional quirk matching 1003 1004 /* Determine any PCI core hwrev-specific device quirks */ 1005 if (id != NULL) { 1006 hwrev = bhnd_get_hwrev(sc->bhndb.hostb_dev); 1007 for (qt = id->quirks; qt->quirks != 0; qt++) { 1008 if (bhnd_hwrev_matches(hwrev, &qt->hwrev)) 1009 quirks |= qt->quirks; 1010 }; 1011 } 1012 1013 1014 return (quirks); 1015 } 1016 1017 /* 1018 * Support for attaching the PCIe-Gen1 MDIO driver to a parent bhndb PCIe 1019 * bridge device. 1020 */ 1021 static int 1022 bhndb_mdio_pcie_probe(device_t dev) 1023 { 1024 struct bhndb_softc *psc; 1025 device_t parent; 1026 1027 /* Parent must be a bhndb_pcie instance */ 1028 parent = device_get_parent(dev); 1029 if (device_get_driver(parent) != &bhndb_pci_driver) 1030 return (ENXIO); 1031 1032 /* Parent must have PCIe-Gen1 hostb device */ 1033 psc = device_get_softc(parent); 1034 if (psc->hostb_dev == NULL) 1035 return (ENXIO); 1036 1037 if (bhnd_get_vendor(psc->hostb_dev) != BHND_MFGID_BCM || 1038 bhnd_get_device(psc->hostb_dev) != BHND_COREID_PCIE) 1039 { 1040 return (ENXIO); 1041 } 1042 1043 device_quiet(dev); 1044 return (BUS_PROBE_NOWILDCARD); 1045 } 1046 1047 static int 1048 bhndb_mdio_pcie_attach(device_t dev) 1049 { 1050 struct bhndb_pci_softc *psc; 1051 1052 psc = device_get_softc(device_get_parent(dev)); 1053 1054 return (bhnd_mdio_pcie_attach(dev, 1055 &psc->bhnd_mem_res, -1, 1056 psc->mem_off + BHND_PCIE_MDIO_CTL, 1057 (psc->quirks & BHNDB_PCIE_QUIRK_SD_C22_EXTADDR) != 0)); 1058 1059 return (ENXIO); 1060 } 1061 1062 static device_method_t bhnd_mdio_pcie_methods[] = { 1063 /* Device interface */ 1064 DEVMETHOD(device_probe, bhndb_mdio_pcie_probe), 1065 DEVMETHOD(device_attach, bhndb_mdio_pcie_attach), 1066 DEVMETHOD_END 1067 }; 1068 1069 static device_method_t bhndb_pci_methods[] = { 1070 /* Device interface */ 1071 DEVMETHOD(device_probe, bhndb_pci_probe), 1072 DEVMETHOD(device_attach, bhndb_pci_attach), 1073 DEVMETHOD(device_detach, bhndb_pci_detach), 1074 DEVMETHOD(device_suspend, bhndb_pci_suspend), 1075 DEVMETHOD(device_resume, bhndb_pci_resume), 1076 1077 /* BHNDB interface */ 1078 DEVMETHOD(bhndb_init_full_config, bhndb_pci_init_full_config), 1079 DEVMETHOD(bhndb_set_window_addr, bhndb_pci_set_window_addr), 1080 1081 DEVMETHOD_END 1082 }; 1083 1084 DEFINE_CLASS_1(bhndb, bhndb_pci_driver, bhndb_pci_methods, 1085 sizeof(struct bhndb_pci_softc), bhndb_driver); 1086 1087 DEFINE_CLASS_1(bhnd_mdio_pci, bhndb_mdio_pcie_driver, bhnd_mdio_pcie_methods, 1088 sizeof(struct bhnd_mdio_pcie_softc), bhnd_mdio_pcie_driver); 1089 1090 DRIVER_MODULE(bhnd_mdio_pcie, bhndb, bhndb_mdio_pcie_driver, 1091 bhnd_mdio_pci_devclass, NULL, NULL); 1092 1093 MODULE_VERSION(bhndb_pci, 1); 1094 MODULE_DEPEND(bhndb_pci, bhnd_pci, 1, 1, 1); 1095 MODULE_DEPEND(bhndb_pci, pci, 1, 1, 1); 1096 MODULE_DEPEND(bhndb_pci, bhndb, 1, 1, 1); 1097