1 /*-
2 * Copyright (c) 2015-2016 Landon Fuller <landon@landonf.org>
3 * Copyright (c) 2017 The FreeBSD Foundation
4 * All rights reserved.
5 *
6 * Portions of this software were developed by Landon Fuller
7 * under sponsorship from the FreeBSD Foundation.
8 *
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
11 * are met:
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer,
14 * without modification.
15 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
16 * similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
17 * redistribution must be conditioned upon including a substantially
18 * similar Disclaimer requirement for further binary redistribution.
19 *
20 * NO WARRANTY
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
24 * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
25 * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
26 * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
29 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31 * THE POSSIBILITY OF SUCH DAMAGES.
32 */
33
34 #include <sys/cdefs.h>
35 /*
36 * PCI-specific implementation for the BHNDB bridge driver.
37 *
38 * Provides support for bridging from a PCI parent bus to a BHND-compatible
39 * bus (e.g. bcma or siba) via a Broadcom PCI core configured in end-point
40 * mode.
41 *
42 * This driver handles all initial generic host-level PCI interactions with a
43 * PCI/PCIe bridge core operating in endpoint mode. Once the bridged bhnd(4)
44 * bus has been enumerated, this driver works in tandem with a core-specific
45 * bhnd_pci_hostb driver to manage the PCI core.
46 */
47
48 #include <sys/param.h>
49 #include <sys/kernel.h>
50 #include <sys/bus.h>
51 #include <sys/limits.h>
52 #include <sys/malloc.h>
53 #include <sys/module.h>
54 #include <sys/systm.h>
55
56 #include <dev/pci/pcireg.h>
57 #include <dev/pci/pcivar.h>
58
59 #include <dev/bhnd/bhnd.h>
60 #include <dev/bhnd/bhndreg.h>
61
62 #include <dev/bhnd/bhnd_erom.h>
63 #include <dev/bhnd/bhnd_eromvar.h>
64
65 #include <dev/bhnd/siba/sibareg.h>
66
67 #include <dev/bhnd/cores/pci/bhnd_pcireg.h>
68
69 #include "bhnd_pwrctl_hostb_if.h"
70
71 #include "bhndb_pcireg.h"
72 #include "bhndb_pcivar.h"
73 #include "bhndb_private.h"
74
75 struct bhndb_pci_eio;
76 struct bhndb_pci_probe;
77
78 static int bhndb_pci_alloc_msi(struct bhndb_pci_softc *sc,
79 int *msi_count);
80
81 static int bhndb_pci_add_children(struct bhndb_pci_softc *sc);
82
83 static bhnd_devclass_t bhndb_expected_pci_devclass(device_t dev);
84 static bool bhndb_is_pcie_attached(device_t dev);
85
86 static int bhndb_enable_pci_clocks(device_t dev);
87 static int bhndb_disable_pci_clocks(device_t dev);
88
89 static int bhndb_pci_compat_setregwin(device_t dev,
90 device_t pci_dev, const struct bhndb_regwin *,
91 bhnd_addr_t);
92 static int bhndb_pci_fast_setregwin(device_t dev, device_t pci_dev,
93 const struct bhndb_regwin *, bhnd_addr_t);
94
95 static void bhndb_pci_write_core(struct bhndb_pci_softc *sc,
96 bus_size_t offset, uint32_t value, u_int width);
97 static uint32_t bhndb_pci_read_core(struct bhndb_pci_softc *sc,
98 bus_size_t offset, u_int width);
99
100 static int bhndb_pci_srsh_pi_war(struct bhndb_pci_softc *sc,
101 struct bhndb_pci_probe *probe);
102
103 static bus_addr_t bhndb_pci_sprom_addr(struct bhndb_pci_softc *sc);
104 static bus_size_t bhndb_pci_sprom_size(struct bhndb_pci_softc *sc);
105
106 static int bhndb_pci_probe_alloc(struct bhndb_pci_probe **probe,
107 device_t dev, bhnd_devclass_t pci_devclass);
108 static void bhndb_pci_probe_free(struct bhndb_pci_probe *probe);
109
110 static int bhndb_pci_probe_copy_core_table(
111 struct bhndb_pci_probe *probe,
112 struct bhnd_core_info **cores, u_int *ncores);
113 static void bhndb_pci_probe_free_core_table(
114 struct bhnd_core_info *cores);
115
116 static void bhndb_pci_probe_write(struct bhndb_pci_probe *sc,
117 bhnd_addr_t addr, bhnd_size_t offset,
118 uint32_t value, u_int width);
119 static uint32_t bhndb_pci_probe_read(struct bhndb_pci_probe *sc,
120 bhnd_addr_t addr, bhnd_size_t offset, u_int width);
121
122 static void bhndb_pci_eio_init(struct bhndb_pci_eio *eio,
123 struct bhndb_pci_probe *probe);
124 static int bhndb_pci_eio_map(struct bhnd_erom_io *eio,
125 bhnd_addr_t addr, bhnd_size_t size);
126 static int bhndb_pci_eio_tell(struct bhnd_erom_io *eio,
127 bhnd_addr_t *addr, bhnd_size_t *size);
128 static uint32_t bhndb_pci_eio_read(struct bhnd_erom_io *eio,
129 bhnd_size_t offset, u_int width);
130
131 #define BHNDB_PCI_MSI_COUNT 1
132
133 static struct bhndb_pci_quirk bhndb_pci_quirks[];
134 static struct bhndb_pci_quirk bhndb_pcie_quirks[];
135 static struct bhndb_pci_quirk bhndb_pcie2_quirks[];
136
137 static struct bhndb_pci_core bhndb_pci_cores[] = {
138 BHNDB_PCI_CORE(PCI, bhndb_pci_quirks),
139 BHNDB_PCI_CORE(PCIE, bhndb_pcie_quirks),
140 BHNDB_PCI_CORE(PCIE2, bhndb_pcie2_quirks),
141 BHNDB_PCI_CORE_END
142 };
143
144 /* bhndb_pci erom I/O instance state */
145 struct bhndb_pci_eio {
146 struct bhnd_erom_io eio;
147 bool mapped; /**< true if a valid mapping exists */
148 bhnd_addr_t addr; /**< mapped address */
149 bhnd_size_t size; /**< mapped size */
150 struct bhndb_pci_probe *probe; /**< borrowed probe reference */
151 };
152
153 /**
154 * Provides early bus access to the bridged device's cores and core enumeration
155 * table.
156 *
157 * May be safely used during probe or early device attach, prior to calling
158 * bhndb_attach().
159 */
160 struct bhndb_pci_probe {
161 device_t dev; /**< bridge device */
162 device_t pci_dev; /**< parent PCI device */
163 struct bhnd_chipid cid; /**< chip identification */
164 struct bhnd_core_info hostb_core; /**< PCI bridge core info */
165
166 struct bhndb_pci_eio erom_io; /**< erom I/O instance */
167 bhnd_erom_class_t *erom_class; /**< probed erom class */
168 bhnd_erom_t *erom; /**< erom parser */
169 struct bhnd_core_info *cores; /**< erom-owned core table */
170 u_int ncores; /**< number of cores */
171
172 const struct bhndb_regwin *m_win; /**< mapped register window, or NULL if no mapping */
173 struct resource *m_res; /**< resource containing the register window, or NULL if no window mapped */
174 bhnd_addr_t m_target; /**< base address mapped by m_win */
175 bhnd_addr_t m_addr; /**< mapped address */
176 bhnd_size_t m_size; /**< mapped size */
177 bool m_valid; /**< true if a valid mapping exists, false otherwise */
178
179 struct bhndb_host_resources *hr; /**< backing host resources */
180 };
181
182 static struct bhndb_pci_quirk bhndb_pci_quirks[] = {
183 /* Backplane interrupt flags must be routed via siba-specific
184 * SIBA_CFG0_INTVEC configuration register; the BHNDB_PCI_INT_MASK
185 * PCI configuration register is unsupported. */
186 {{ BHND_MATCH_CHIP_TYPE (SIBA) },
187 { BHND_MATCH_CORE_REV (HWREV_LTE(5)) },
188 BHNDB_PCI_QUIRK_SIBA_INTVEC },
189
190 /* All PCI core revisions require the SRSH work-around */
191 BHNDB_PCI_QUIRK(HWREV_ANY, BHNDB_PCI_QUIRK_SRSH_WAR),
192 BHNDB_PCI_QUIRK_END
193 };
194
195 static struct bhndb_pci_quirk bhndb_pcie_quirks[] = {
196 /* All PCIe-G1 core revisions require the SRSH work-around */
197 BHNDB_PCI_QUIRK(HWREV_ANY, BHNDB_PCI_QUIRK_SRSH_WAR),
198 BHNDB_PCI_QUIRK_END
199 };
200
201 static struct bhndb_pci_quirk bhndb_pcie2_quirks[] = {
202 BHNDB_PCI_QUIRK_END
203 };
204
205 /**
206 * Return the device table entry for @p ci, or NULL if none.
207 */
208 static struct bhndb_pci_core *
bhndb_pci_find_core(struct bhnd_core_info * ci)209 bhndb_pci_find_core(struct bhnd_core_info *ci)
210 {
211 for (size_t i = 0; !BHNDB_PCI_IS_CORE_END(&bhndb_pci_cores[i]); i++) {
212 struct bhndb_pci_core *entry = &bhndb_pci_cores[i];
213
214 if (bhnd_core_matches(ci, &entry->match))
215 return (entry);
216 }
217
218 return (NULL);
219 }
220
221 /**
222 * Return all quirk flags for the given @p cid and @p ci.
223 */
224 static uint32_t
bhndb_pci_get_core_quirks(struct bhnd_chipid * cid,struct bhnd_core_info * ci)225 bhndb_pci_get_core_quirks(struct bhnd_chipid *cid, struct bhnd_core_info *ci)
226 {
227 struct bhndb_pci_core *entry;
228 struct bhndb_pci_quirk *qtable;
229 uint32_t quirks;
230
231 quirks = 0;
232
233 /* No core entry? */
234 if ((entry = bhndb_pci_find_core(ci)) == NULL)
235 return (quirks);
236
237 /* No quirks? */
238 if ((qtable = entry->quirks) == NULL)
239 return (quirks);
240
241 for (size_t i = 0; !BHNDB_PCI_IS_QUIRK_END(&qtable[i]); i++) {
242 struct bhndb_pci_quirk *q = &qtable[i];
243
244 if (!bhnd_chip_matches(cid, &q->chip_desc))
245 continue;
246
247 if (!bhnd_core_matches(ci, &q->core_desc))
248 continue;
249
250 quirks |= q->quirks;
251 }
252
253 return (quirks);
254 }
255
256 /**
257 * Default bhndb_pci implementation of device_probe().
258 *
259 * Verifies that the parent is a PCI/PCIe device.
260 */
261 static int
bhndb_pci_probe(device_t dev)262 bhndb_pci_probe(device_t dev)
263 {
264 struct bhndb_pci_probe *probe;
265 struct bhndb_pci_core *entry;
266 bhnd_devclass_t hostb_devclass;
267 device_t parent, parent_bus;
268 devclass_t pci, bus_devclass;
269 int error;
270
271 probe = NULL;
272
273 /* Our parent must be a PCI/PCIe device. */
274 pci = devclass_find("pci");
275 parent = device_get_parent(dev);
276 parent_bus = device_get_parent(parent);
277 if (parent_bus == NULL)
278 return (ENXIO);
279
280 /* The bus device class may inherit from 'pci' */
281 for (bus_devclass = device_get_devclass(parent_bus);
282 bus_devclass != NULL;
283 bus_devclass = devclass_get_parent(bus_devclass))
284 {
285 if (bus_devclass == pci)
286 break;
287 }
288
289 if (bus_devclass != pci)
290 return (ENXIO);
291
292 /* Enable clocks */
293 if ((error = bhndb_enable_pci_clocks(dev)))
294 return (error);
295
296 /* Identify the chip and enumerate the bridged cores */
297 hostb_devclass = bhndb_expected_pci_devclass(dev);
298 if ((error = bhndb_pci_probe_alloc(&probe, dev, hostb_devclass)))
299 goto cleanup;
300
301 /* Look for a matching core table entry */
302 if ((entry = bhndb_pci_find_core(&probe->hostb_core)) == NULL) {
303 error = ENXIO;
304 goto cleanup;
305 }
306
307 device_set_desc(dev, "PCI-BHND bridge");
308
309 /* fall-through */
310 error = BUS_PROBE_DEFAULT;
311
312 cleanup:
313 if (probe != NULL)
314 bhndb_pci_probe_free(probe);
315
316 bhndb_disable_pci_clocks(dev);
317
318 return (error);
319 }
320
321 /**
322 * Attempt to allocate MSI interrupts, returning the count in @p msi_count
323 * on success.
324 */
325 static int
bhndb_pci_alloc_msi(struct bhndb_pci_softc * sc,int * msi_count)326 bhndb_pci_alloc_msi(struct bhndb_pci_softc *sc, int *msi_count)
327 {
328 int error, count;
329
330 /* Is MSI available? */
331 if (pci_msi_count(sc->parent) < BHNDB_PCI_MSI_COUNT)
332 return (ENXIO);
333
334 /* Allocate expected message count */
335 count = BHNDB_PCI_MSI_COUNT;
336 if ((error = pci_alloc_msi(sc->parent, &count))) {
337 device_printf(sc->dev, "failed to allocate MSI interrupts: "
338 "%d\n", error);
339
340 return (error);
341 }
342
343 if (count < BHNDB_PCI_MSI_COUNT) {
344 pci_release_msi(sc->parent);
345 return (ENXIO);
346 }
347
348 *msi_count = count;
349 return (0);
350 }
351
352 static int
bhndb_pci_attach(device_t dev)353 bhndb_pci_attach(device_t dev)
354 {
355 struct bhndb_pci_softc *sc;
356 struct bhnd_chipid cid;
357 struct bhnd_core_info *cores, hostb_core;
358 bhnd_erom_class_t *erom_class;
359 struct bhndb_pci_probe *probe;
360 u_int ncores;
361 int irq_rid;
362 int error;
363
364 sc = device_get_softc(dev);
365 sc->dev = dev;
366 sc->parent = device_get_parent(dev);
367 sc->pci_devclass = bhndb_expected_pci_devclass(dev);
368 sc->pci_quirks = 0;
369 sc->set_regwin = NULL;
370
371 BHNDB_PCI_LOCK_INIT(sc);
372
373 probe = NULL;
374 cores = NULL;
375
376 /* Enable PCI bus mastering */
377 pci_enable_busmaster(sc->parent);
378
379 /* Enable clocks (if required by this hardware) */
380 if ((error = bhndb_enable_pci_clocks(sc->dev)))
381 goto cleanup;
382
383 /* Identify the chip and enumerate the bridged cores */
384 error = bhndb_pci_probe_alloc(&probe, dev, sc->pci_devclass);
385 if (error)
386 goto cleanup;
387
388 sc->pci_quirks = bhndb_pci_get_core_quirks(&probe->cid,
389 &probe->hostb_core);
390
391 /* Select the appropriate register window handler */
392 if (probe->cid.chip_type == BHND_CHIPTYPE_SIBA) {
393 sc->set_regwin = bhndb_pci_compat_setregwin;
394 } else {
395 sc->set_regwin = bhndb_pci_fast_setregwin;
396 }
397
398 /*
399 * Fix up our PCI base address in the SPROM shadow, if necessary.
400 *
401 * This must be done prior to accessing any static register windows
402 * that map the PCI core.
403 */
404 if ((error = bhndb_pci_srsh_pi_war(sc, probe)))
405 goto cleanup;
406
407 /* Set up PCI interrupt handling */
408 if (bhndb_pci_alloc_msi(sc, &sc->msi_count) == 0) {
409 /* MSI uses resource IDs starting at 1 */
410 irq_rid = 1;
411
412 device_printf(dev, "Using MSI interrupts on %s\n",
413 device_get_nameunit(sc->parent));
414 } else {
415 sc->msi_count = 0;
416 irq_rid = 0;
417
418 device_printf(dev, "Using INTx interrupts on %s\n",
419 device_get_nameunit(sc->parent));
420 }
421
422 sc->isrc = bhndb_alloc_intr_isrc(sc->parent, irq_rid, 0, RM_MAX_END, 1,
423 RF_SHAREABLE | RF_ACTIVE);
424 if (sc->isrc == NULL) {
425 device_printf(sc->dev, "failed to allocate interrupt "
426 "resource\n");
427 error = ENXIO;
428 goto cleanup;
429 }
430
431 /*
432 * Copy out the probe results and then free our probe state, releasing
433 * its exclusive ownership of host bridge resources.
434 *
435 * This must be done prior to full configuration of the bridge via
436 * bhndb_attach().
437 */
438 cid = probe->cid;
439 erom_class = probe->erom_class;
440 hostb_core = probe->hostb_core;
441
442 error = bhndb_pci_probe_copy_core_table(probe, &cores, &ncores);
443 if (error) {
444 cores = NULL;
445 goto cleanup;
446 }
447
448 bhndb_pci_probe_free(probe);
449 probe = NULL;
450
451 /* Perform bridge attach */
452 error = bhndb_attach(dev, &cid, cores, ncores, &hostb_core, erom_class);
453 if (error)
454 goto cleanup;
455
456 /* Add any additional child devices */
457 if ((error = bhndb_pci_add_children(sc)))
458 goto cleanup;
459
460 /* Probe and attach our children */
461 if ((error = bus_generic_attach(dev)))
462 goto cleanup;
463
464 bhndb_pci_probe_free_core_table(cores);
465
466 return (0);
467
468 cleanup:
469 device_delete_children(dev);
470
471 if (sc->isrc != NULL)
472 bhndb_free_intr_isrc(sc->isrc);
473
474 if (sc->msi_count > 0)
475 pci_release_msi(sc->parent);
476
477 if (cores != NULL)
478 bhndb_pci_probe_free_core_table(cores);
479
480 if (probe != NULL)
481 bhndb_pci_probe_free(probe);
482
483 bhndb_disable_pci_clocks(sc->dev);
484
485 pci_disable_busmaster(sc->parent);
486
487 BHNDB_PCI_LOCK_DESTROY(sc);
488
489 return (error);
490 }
491
492 static int
bhndb_pci_detach(device_t dev)493 bhndb_pci_detach(device_t dev)
494 {
495 struct bhndb_pci_softc *sc;
496 int error;
497
498 sc = device_get_softc(dev);
499
500 /* Attempt to detach our children */
501 if ((error = bus_generic_detach(dev)))
502 return (error);
503
504 /* Perform generic bridge detach */
505 if ((error = bhndb_generic_detach(dev)))
506 return (error);
507
508 /* Disable clocks (if required by this hardware) */
509 if ((error = bhndb_disable_pci_clocks(sc->dev)))
510 return (error);
511
512 /* Free our interrupt resources */
513 bhndb_free_intr_isrc(sc->isrc);
514
515 /* Release MSI interrupts */
516 if (sc->msi_count > 0)
517 pci_release_msi(sc->parent);
518
519 /* Disable PCI bus mastering */
520 pci_disable_busmaster(sc->parent);
521
522 BHNDB_PCI_LOCK_DESTROY(sc);
523
524 return (0);
525 }
526
527 static int
bhndb_pci_add_children(struct bhndb_pci_softc * sc)528 bhndb_pci_add_children(struct bhndb_pci_softc *sc)
529 {
530 bus_size_t nv_sz;
531 int error;
532
533 /**
534 * If SPROM is mapped directly into BAR0, add child NVRAM
535 * device.
536 */
537 nv_sz = bhndb_pci_sprom_size(sc);
538 if (nv_sz > 0) {
539 struct bhndb_devinfo *dinfo;
540 device_t child;
541
542 if (bootverbose) {
543 device_printf(sc->dev, "found SPROM (%ju bytes)\n",
544 (uintmax_t)nv_sz);
545 }
546
547 /* Add sprom device, ordered early enough to be available
548 * before the bridged bhnd(4) bus is attached. */
549 child = BUS_ADD_CHILD(sc->dev,
550 BHND_PROBE_ROOT + BHND_PROBE_ORDER_EARLY, "bhnd_nvram", -1);
551 if (child == NULL) {
552 device_printf(sc->dev, "failed to add sprom device\n");
553 return (ENXIO);
554 }
555
556 /* Initialize device address space and resource covering the
557 * BAR0 SPROM shadow. */
558 dinfo = device_get_ivars(child);
559 dinfo->addrspace = BHNDB_ADDRSPACE_NATIVE;
560
561 error = bus_set_resource(child, SYS_RES_MEMORY, 0,
562 bhndb_pci_sprom_addr(sc), nv_sz);
563 if (error) {
564 device_printf(sc->dev,
565 "failed to register sprom resources\n");
566 return (error);
567 }
568 }
569
570 return (0);
571 }
572
573 static const struct bhndb_regwin *
bhndb_pci_sprom_regwin(struct bhndb_pci_softc * sc)574 bhndb_pci_sprom_regwin(struct bhndb_pci_softc *sc)
575 {
576 struct bhndb_resources *bres;
577 const struct bhndb_hwcfg *cfg;
578 const struct bhndb_regwin *sprom_win;
579
580 bres = sc->bhndb.bus_res;
581 cfg = bres->cfg;
582
583 sprom_win = bhndb_regwin_find_type(cfg->register_windows,
584 BHNDB_REGWIN_T_SPROM, BHNDB_PCI_V0_BAR0_SPROM_SIZE);
585
586 return (sprom_win);
587 }
588
589 static bus_addr_t
bhndb_pci_sprom_addr(struct bhndb_pci_softc * sc)590 bhndb_pci_sprom_addr(struct bhndb_pci_softc *sc)
591 {
592 const struct bhndb_regwin *sprom_win;
593 struct resource *r;
594
595 /* Fetch the SPROM register window */
596 sprom_win = bhndb_pci_sprom_regwin(sc);
597 KASSERT(sprom_win != NULL, ("requested sprom address on PCI_V2+"));
598
599 /* Fetch the associated resource */
600 r = bhndb_host_resource_for_regwin(sc->bhndb.bus_res->res, sprom_win);
601 KASSERT(r != NULL, ("missing resource for sprom window\n"));
602
603 return (rman_get_start(r) + sprom_win->win_offset);
604 }
605
606 static bus_size_t
bhndb_pci_sprom_size(struct bhndb_pci_softc * sc)607 bhndb_pci_sprom_size(struct bhndb_pci_softc *sc)
608 {
609 const struct bhndb_regwin *sprom_win;
610 uint32_t sctl;
611 bus_size_t sprom_sz;
612
613 sprom_win = bhndb_pci_sprom_regwin(sc);
614
615 /* PCI_V2 and later devices map SPROM/OTP via ChipCommon */
616 if (sprom_win == NULL)
617 return (0);
618
619 /* Determine SPROM size */
620 sctl = pci_read_config(sc->parent, BHNDB_PCI_SPROM_CONTROL, 4);
621 if (sctl & BHNDB_PCI_SPROM_BLANK)
622 return (0);
623
624 switch (sctl & BHNDB_PCI_SPROM_SZ_MASK) {
625 case BHNDB_PCI_SPROM_SZ_1KB:
626 sprom_sz = (1 * 1024);
627 break;
628
629 case BHNDB_PCI_SPROM_SZ_4KB:
630 sprom_sz = (4 * 1024);
631 break;
632
633 case BHNDB_PCI_SPROM_SZ_16KB:
634 sprom_sz = (16 * 1024);
635 break;
636
637 case BHNDB_PCI_SPROM_SZ_RESERVED:
638 default:
639 device_printf(sc->dev, "invalid PCI sprom size 0x%x\n", sctl);
640 return (0);
641 }
642
643 /* If the device has a larger SPROM than can be addressed via our SPROM
644 * register window, the SPROM image data will still be located within
645 * the window's addressable range */
646 sprom_sz = MIN(sprom_sz, sprom_win->win_size);
647
648 return (sprom_sz);
649 }
650
651 /**
652 * Return the host resource providing a static mapping of the PCI core's
653 * registers.
654 *
655 * @param sc bhndb PCI driver state.
656 * @param offset The required readable offset within the PCI core
657 * register block.
658 * @param size The required readable size at @p offset.
659 * @param[out] res On success, the host resource containing our PCI
660 * core's register window.
661 * @param[out] res_offset On success, the @p offset relative to @p res.
662 *
663 * @retval 0 success
664 * @retval ENXIO if a valid static register window mapping the PCI core
665 * registers is not available.
666 */
667 static int
bhndb_pci_get_core_regs(struct bhndb_pci_softc * sc,bus_size_t offset,bus_size_t size,struct resource ** res,bus_size_t * res_offset)668 bhndb_pci_get_core_regs(struct bhndb_pci_softc *sc, bus_size_t offset,
669 bus_size_t size, struct resource **res, bus_size_t *res_offset)
670 {
671 const struct bhndb_regwin *win;
672 struct resource *r;
673
674 /* Locate the static register window mapping the requested offset */
675 win = bhndb_regwin_find_core(sc->bhndb.bus_res->cfg->register_windows,
676 sc->pci_devclass, 0, BHND_PORT_DEVICE, 0, 0, offset, size);
677 if (win == NULL) {
678 device_printf(sc->dev, "missing PCI core register window\n");
679 return (ENXIO);
680 }
681
682 /* Fetch the resource containing the register window */
683 r = bhndb_host_resource_for_regwin(sc->bhndb.bus_res->res, win);
684 if (r == NULL) {
685 device_printf(sc->dev, "missing PCI core register resource\n");
686 return (ENXIO);
687 }
688
689 KASSERT(offset >= win->d.core.offset, ("offset %#jx outside of "
690 "register window", (uintmax_t)offset));
691
692 *res = r;
693 *res_offset = win->win_offset + (offset - win->d.core.offset);
694
695 return (0);
696 }
697
698 /**
699 * Write a 1, 2, or 4 byte data item to the PCI core's registers at @p offset.
700 *
701 * @param sc bhndb PCI driver state.
702 * @param offset register write offset.
703 * @param value value to be written.
704 * @param width item width (1, 2, or 4 bytes).
705 */
706 static void
bhndb_pci_write_core(struct bhndb_pci_softc * sc,bus_size_t offset,uint32_t value,u_int width)707 bhndb_pci_write_core(struct bhndb_pci_softc *sc, bus_size_t offset,
708 uint32_t value, u_int width)
709 {
710 struct resource *r;
711 bus_size_t r_offset;
712 int error;
713
714 error = bhndb_pci_get_core_regs(sc, offset, width, &r, &r_offset);
715 if (error) {
716 panic("no PCI register window mapping %#jx+%#x: %d",
717 (uintmax_t)offset, width, error);
718 }
719
720 switch (width) {
721 case 1:
722 bus_write_1(r, r_offset, value);
723 break;
724 case 2:
725 bus_write_2(r, r_offset, value);
726 break;
727 case 4:
728 bus_write_4(r, r_offset, value);
729 break;
730 default:
731 panic("invalid width: %u", width);
732 }
733 }
734
735 /**
736 * Read a 1, 2, or 4 byte data item from the PCI core's registers
737 * at @p offset.
738 *
739 * @param sc bhndb PCI driver state.
740 * @param offset register read offset.
741 * @param width item width (1, 2, or 4 bytes).
742 */
743 static uint32_t
bhndb_pci_read_core(struct bhndb_pci_softc * sc,bus_size_t offset,u_int width)744 bhndb_pci_read_core(struct bhndb_pci_softc *sc, bus_size_t offset, u_int width)
745 {
746 struct resource *r;
747 bus_size_t r_offset;
748 int error;
749
750 error = bhndb_pci_get_core_regs(sc, offset, width, &r, &r_offset);
751 if (error) {
752 panic("no PCI register window mapping %#jx+%#x: %d",
753 (uintmax_t)offset, width, error);
754 }
755
756 switch (width) {
757 case 1:
758 return (bus_read_1(r, r_offset));
759 case 2:
760 return (bus_read_2(r, r_offset));
761 case 4:
762 return (bus_read_4(r, r_offset));
763 default:
764 panic("invalid width: %u", width);
765 }
766 }
767
768 /**
769 * Fix-up power on defaults for SPROM-less devices.
770 *
771 * On SPROM-less devices, the PCI(e) cores will be initialized with their their
772 * Power-on-Reset defaults; this can leave the BHND_PCI_SRSH_PI value pointing
773 * to the wrong backplane address. This value is used by the PCI core when
774 * performing address translation between static register windows in BAR0 that
775 * map the PCI core's register block, and backplane address space.
776 *
777 * When translating accesses via these BAR0 regions, the PCI bridge determines
778 * the base address of the PCI core by concatenating:
779 *
780 * [bits] [source]
781 * 31:16 bits [31:16] of the enumeration space address (e.g. 0x18000000)
782 * 15:12 value of BHND_PCI_SRSH_PI from the PCI core's SPROM shadow
783 * 11:0 bits [11:0] of the PCI bus address
784 *
785 * For example, on a PCI_V0 device, the following PCI core register offsets are
786 * mapped into BAR0:
787 *
788 * [BAR0 offset] [description] [PCI core offset]
789 * 0x1000-0x17FF sprom shadow 0x800-0xFFF
790 * 0x1800-0x1DFF device registers 0x000-0x5FF
791 * 0x1E00+0x1FFF siba config registers 0xE00-0xFFF
792 *
793 * This function checks -- and if necessary, corrects -- the BHND_PCI_SRSH_PI
794 * value in the SPROM shadow.
795 *
796 * This workaround must applied prior to accessing any static register windows
797 * that map the PCI core.
798 *
799 * Applies to all PCI and PCIe-G1 core revisions.
800 */
801 static int
bhndb_pci_srsh_pi_war(struct bhndb_pci_softc * sc,struct bhndb_pci_probe * probe)802 bhndb_pci_srsh_pi_war(struct bhndb_pci_softc *sc,
803 struct bhndb_pci_probe *probe)
804 {
805 struct bhnd_core_match md;
806 bhnd_addr_t pci_addr;
807 bhnd_size_t pci_size;
808 bus_size_t srsh_offset;
809 uint16_t srsh_val, pci_val;
810 uint16_t val;
811 int error;
812
813 if ((sc->pci_quirks & BHNDB_PCI_QUIRK_SRSH_WAR) == 0)
814 return (0);
815
816 /* Use an equality match descriptor to look up our PCI core's base
817 * address in the EROM */
818 md = bhnd_core_get_match_desc(&probe->hostb_core);
819 error = bhnd_erom_lookup_core_addr(probe->erom, &md, BHND_PORT_DEVICE,
820 0, 0, NULL, &pci_addr, &pci_size);
821 if (error) {
822 device_printf(sc->dev, "no base address found for the PCI host "
823 "bridge core: %d\n", error);
824 return (error);
825 }
826
827 /* Fetch the SPROM SRSH_PI value */
828 srsh_offset = BHND_PCI_SPROM_SHADOW + BHND_PCI_SRSH_PI_OFFSET;
829 val = bhndb_pci_probe_read(probe, pci_addr, srsh_offset, sizeof(val));
830 srsh_val = (val & BHND_PCI_SRSH_PI_MASK) >> BHND_PCI_SRSH_PI_SHIFT;
831
832 /* If it doesn't match PCI core's base address, update the SPROM
833 * shadow */
834 pci_val = (pci_addr & BHND_PCI_SRSH_PI_ADDR_MASK) >>
835 BHND_PCI_SRSH_PI_ADDR_SHIFT;
836 if (srsh_val != pci_val) {
837 val &= ~BHND_PCI_SRSH_PI_MASK;
838 val |= (pci_val << BHND_PCI_SRSH_PI_SHIFT);
839 bhndb_pci_probe_write(probe, pci_addr, srsh_offset, val,
840 sizeof(val));
841 }
842
843 return (0);
844 }
845
846 static int
bhndb_pci_resume(device_t dev)847 bhndb_pci_resume(device_t dev)
848 {
849 struct bhndb_pci_softc *sc;
850 int error;
851
852 sc = device_get_softc(dev);
853
854 /* Enable clocks (if supported by this hardware) */
855 if ((error = bhndb_enable_pci_clocks(sc->dev)))
856 return (error);
857
858 /* Perform resume */
859 return (bhndb_generic_resume(dev));
860 }
861
862 static int
bhndb_pci_suspend(device_t dev)863 bhndb_pci_suspend(device_t dev)
864 {
865 struct bhndb_pci_softc *sc;
866 int error;
867
868 sc = device_get_softc(dev);
869
870 /* Disable clocks (if supported by this hardware) */
871 if ((error = bhndb_disable_pci_clocks(sc->dev)))
872 return (error);
873
874 /* Perform suspend */
875 return (bhndb_generic_suspend(dev));
876 }
877
878 static int
bhndb_pci_set_window_addr(device_t dev,const struct bhndb_regwin * rw,bhnd_addr_t addr)879 bhndb_pci_set_window_addr(device_t dev, const struct bhndb_regwin *rw,
880 bhnd_addr_t addr)
881 {
882 struct bhndb_pci_softc *sc = device_get_softc(dev);
883 return (sc->set_regwin(sc->dev, sc->parent, rw, addr));
884 }
885
886 /**
887 * A siba(4) and bcma(4)-compatible bhndb_set_window_addr implementation.
888 *
889 * On siba(4) devices, it's possible that writing a PCI window register may
890 * not succeed; it's necessary to immediately read the configuration register
891 * and retry if not set to the desired value.
892 *
893 * This is not necessary on bcma(4) devices, but other than the overhead of
894 * validating the register, there's no harm in performing the verification.
895 */
896 static int
bhndb_pci_compat_setregwin(device_t dev,device_t pci_dev,const struct bhndb_regwin * rw,bhnd_addr_t addr)897 bhndb_pci_compat_setregwin(device_t dev, device_t pci_dev,
898 const struct bhndb_regwin *rw, bhnd_addr_t addr)
899 {
900 int error;
901 int reg;
902
903 if (rw->win_type != BHNDB_REGWIN_T_DYN)
904 return (ENODEV);
905
906 reg = rw->d.dyn.cfg_offset;
907 for (u_int i = 0; i < BHNDB_PCI_BARCTRL_WRITE_RETRY; i++) {
908 if ((error = bhndb_pci_fast_setregwin(dev, pci_dev, rw, addr)))
909 return (error);
910
911 if (pci_read_config(pci_dev, reg, 4) == addr)
912 return (0);
913
914 DELAY(10);
915 }
916
917 /* Unable to set window */
918 return (ENODEV);
919 }
920
921 /**
922 * A bcma(4)-only bhndb_set_window_addr implementation.
923 */
924 static int
bhndb_pci_fast_setregwin(device_t dev,device_t pci_dev,const struct bhndb_regwin * rw,bhnd_addr_t addr)925 bhndb_pci_fast_setregwin(device_t dev, device_t pci_dev,
926 const struct bhndb_regwin *rw, bhnd_addr_t addr)
927 {
928 /* The PCI bridge core only supports 32-bit addressing, regardless
929 * of the bus' support for 64-bit addressing */
930 if (addr > UINT32_MAX)
931 return (ERANGE);
932
933 switch (rw->win_type) {
934 case BHNDB_REGWIN_T_DYN:
935 /* Addresses must be page aligned */
936 if (addr % rw->win_size != 0)
937 return (EINVAL);
938
939 pci_write_config(pci_dev, rw->d.dyn.cfg_offset, addr, 4);
940 break;
941 default:
942 return (ENODEV);
943 }
944
945 return (0);
946 }
947
948 static int
bhndb_pci_populate_board_info(device_t dev,device_t child,struct bhnd_board_info * info)949 bhndb_pci_populate_board_info(device_t dev, device_t child,
950 struct bhnd_board_info *info)
951 {
952 struct bhndb_pci_softc *sc;
953
954 sc = device_get_softc(dev);
955
956 /*
957 * On a subset of Apple BCM4360 modules, always prefer the
958 * PCI subdevice to the SPROM-supplied boardtype.
959 *
960 * TODO:
961 *
962 * Broadcom's own drivers implement this override, and then later use
963 * the remapped BCM4360 board type to determine the required
964 * board-specific workarounds.
965 *
966 * Without access to this hardware, it's unclear why this mapping
967 * is done, and we must do the same. If we can survey the hardware
968 * in question, it may be possible to replace this behavior with
969 * explicit references to the SPROM-supplied boardtype(s) in our
970 * quirk definitions.
971 */
972 if (pci_get_subvendor(sc->parent) == PCI_VENDOR_APPLE) {
973 switch (info->board_type) {
974 case BHND_BOARD_BCM94360X29C:
975 case BHND_BOARD_BCM94360X29CP2:
976 case BHND_BOARD_BCM94360X51:
977 case BHND_BOARD_BCM94360X51P2:
978 info->board_type = 0; /* allow override below */
979 break;
980 default:
981 break;
982 }
983 }
984
985 /* If NVRAM did not supply vendor/type/devid info, provide the PCI
986 * subvendor/subdevice/device values. */
987 if (info->board_vendor == 0)
988 info->board_vendor = pci_get_subvendor(sc->parent);
989
990 if (info->board_type == 0)
991 info->board_type = pci_get_subdevice(sc->parent);
992
993 if (info->board_devid == 0)
994 info->board_devid = pci_get_device(sc->parent);
995
996 return (0);
997 }
998
999 /**
1000 * Examine the bridge device @p dev and return the expected host bridge
1001 * device class.
1002 *
1003 * @param dev The bhndb bridge device
1004 */
1005 static bhnd_devclass_t
bhndb_expected_pci_devclass(device_t dev)1006 bhndb_expected_pci_devclass(device_t dev)
1007 {
1008 if (bhndb_is_pcie_attached(dev))
1009 return (BHND_DEVCLASS_PCIE);
1010 else
1011 return (BHND_DEVCLASS_PCI);
1012 }
1013
1014 /**
1015 * Return true if the bridge device @p dev is attached via PCIe,
1016 * false otherwise.
1017 *
1018 * @param dev The bhndb bridge device
1019 */
1020 static bool
bhndb_is_pcie_attached(device_t dev)1021 bhndb_is_pcie_attached(device_t dev)
1022 {
1023 int reg;
1024
1025 if (pci_find_cap(device_get_parent(dev), PCIY_EXPRESS, ®) == 0)
1026 return (true);
1027
1028 return (false);
1029 }
1030
1031 /**
1032 * Enable externally managed clocks, if required.
1033 *
1034 * Some PCI chipsets (BCM4306, possibly others) chips do not support
1035 * the idle low-power clock. Clocking must be bootstrapped at
1036 * attach/resume by directly adjusting GPIO registers exposed in the
1037 * PCI config space, and correspondingly, explicitly shutdown at
1038 * detach/suspend.
1039 *
1040 * @note This function may be safely called prior to device attach, (e.g.
1041 * from DEVICE_PROBE).
1042 *
1043 * @param dev The bhndb bridge device
1044 */
1045 static int
bhndb_enable_pci_clocks(device_t dev)1046 bhndb_enable_pci_clocks(device_t dev)
1047 {
1048 device_t pci_dev;
1049 uint32_t gpio_in, gpio_out, gpio_en;
1050 uint32_t gpio_flags;
1051 uint16_t pci_status;
1052
1053 pci_dev = device_get_parent(dev);
1054
1055 /* Only supported and required on PCI devices */
1056 if (bhndb_is_pcie_attached(dev))
1057 return (0);
1058
1059 /* Read state of XTAL pin */
1060 gpio_in = pci_read_config(pci_dev, BHNDB_PCI_GPIO_IN, 4);
1061 if (gpio_in & BHNDB_PCI_GPIO_XTAL_ON)
1062 return (0); /* already enabled */
1063
1064 /* Fetch current config */
1065 gpio_out = pci_read_config(pci_dev, BHNDB_PCI_GPIO_OUT, 4);
1066 gpio_en = pci_read_config(pci_dev, BHNDB_PCI_GPIO_OUTEN, 4);
1067
1068 /* Set PLL_OFF/XTAL_ON pins to HIGH and enable both pins */
1069 gpio_flags = (BHNDB_PCI_GPIO_PLL_OFF|BHNDB_PCI_GPIO_XTAL_ON);
1070 gpio_out |= gpio_flags;
1071 gpio_en |= gpio_flags;
1072
1073 pci_write_config(pci_dev, BHNDB_PCI_GPIO_OUT, gpio_out, 4);
1074 pci_write_config(pci_dev, BHNDB_PCI_GPIO_OUTEN, gpio_en, 4);
1075 DELAY(1000);
1076
1077 /* Reset PLL_OFF */
1078 gpio_out &= ~BHNDB_PCI_GPIO_PLL_OFF;
1079 pci_write_config(pci_dev, BHNDB_PCI_GPIO_OUT, gpio_out, 4);
1080 DELAY(5000);
1081
1082 /* Clear any PCI 'sent target-abort' flag. */
1083 pci_status = pci_read_config(pci_dev, PCIR_STATUS, 2);
1084 pci_status &= ~PCIM_STATUS_STABORT;
1085 pci_write_config(pci_dev, PCIR_STATUS, pci_status, 2);
1086
1087 return (0);
1088 }
1089
1090 /**
1091 * Disable externally managed clocks, if required.
1092 *
1093 * This function may be safely called prior to device attach, (e.g.
1094 * from DEVICE_PROBE).
1095 *
1096 * @param dev The bhndb bridge device
1097 */
1098 static int
bhndb_disable_pci_clocks(device_t dev)1099 bhndb_disable_pci_clocks(device_t dev)
1100 {
1101 device_t pci_dev;
1102 uint32_t gpio_out, gpio_en;
1103
1104 pci_dev = device_get_parent(dev);
1105
1106 /* Only supported and required on PCI devices */
1107 if (bhndb_is_pcie_attached(dev))
1108 return (0);
1109
1110 /* Fetch current config */
1111 gpio_out = pci_read_config(pci_dev, BHNDB_PCI_GPIO_OUT, 4);
1112 gpio_en = pci_read_config(pci_dev, BHNDB_PCI_GPIO_OUTEN, 4);
1113
1114 /* Set PLL_OFF to HIGH, XTAL_ON to LOW. */
1115 gpio_out &= ~BHNDB_PCI_GPIO_XTAL_ON;
1116 gpio_out |= BHNDB_PCI_GPIO_PLL_OFF;
1117 pci_write_config(pci_dev, BHNDB_PCI_GPIO_OUT, gpio_out, 4);
1118
1119 /* Enable both output pins */
1120 gpio_en |= (BHNDB_PCI_GPIO_PLL_OFF|BHNDB_PCI_GPIO_XTAL_ON);
1121 pci_write_config(pci_dev, BHNDB_PCI_GPIO_OUTEN, gpio_en, 4);
1122
1123 return (0);
1124 }
1125
1126 static bhnd_clksrc
bhndb_pci_pwrctl_get_clksrc(device_t dev,device_t child,bhnd_clock clock)1127 bhndb_pci_pwrctl_get_clksrc(device_t dev, device_t child,
1128 bhnd_clock clock)
1129 {
1130 struct bhndb_pci_softc *sc;
1131 uint32_t gpio_out;
1132
1133 sc = device_get_softc(dev);
1134
1135 /* Only supported on PCI devices */
1136 if (bhndb_is_pcie_attached(sc->dev))
1137 return (BHND_CLKSRC_UNKNOWN);
1138
1139 /* Only ILP is supported */
1140 if (clock != BHND_CLOCK_ILP)
1141 return (BHND_CLKSRC_UNKNOWN);
1142
1143 gpio_out = pci_read_config(sc->parent, BHNDB_PCI_GPIO_OUT, 4);
1144 if (gpio_out & BHNDB_PCI_GPIO_SCS)
1145 return (BHND_CLKSRC_PCI);
1146 else
1147 return (BHND_CLKSRC_XTAL);
1148 }
1149
1150 static int
bhndb_pci_pwrctl_gate_clock(device_t dev,device_t child,bhnd_clock clock)1151 bhndb_pci_pwrctl_gate_clock(device_t dev, device_t child,
1152 bhnd_clock clock)
1153 {
1154 struct bhndb_pci_softc *sc = device_get_softc(dev);
1155
1156 /* Only supported on PCI devices */
1157 if (bhndb_is_pcie_attached(sc->dev))
1158 return (ENODEV);
1159
1160 /* Only HT is supported */
1161 if (clock != BHND_CLOCK_HT)
1162 return (ENXIO);
1163
1164 return (bhndb_disable_pci_clocks(sc->dev));
1165 }
1166
1167 static int
bhndb_pci_pwrctl_ungate_clock(device_t dev,device_t child,bhnd_clock clock)1168 bhndb_pci_pwrctl_ungate_clock(device_t dev, device_t child,
1169 bhnd_clock clock)
1170 {
1171 struct bhndb_pci_softc *sc = device_get_softc(dev);
1172
1173 /* Only supported on PCI devices */
1174 if (bhndb_is_pcie_attached(sc->dev))
1175 return (ENODEV);
1176
1177 /* Only HT is supported */
1178 if (clock != BHND_CLOCK_HT)
1179 return (ENXIO);
1180
1181 return (bhndb_enable_pci_clocks(sc->dev));
1182 }
1183
1184 /**
1185 * BHNDB_MAP_INTR_ISRC()
1186 */
1187 static int
bhndb_pci_map_intr_isrc(device_t dev,struct resource * irq,struct bhndb_intr_isrc ** isrc)1188 bhndb_pci_map_intr_isrc(device_t dev, struct resource *irq,
1189 struct bhndb_intr_isrc **isrc)
1190 {
1191 struct bhndb_pci_softc *sc = device_get_softc(dev);
1192
1193 /* There's only one bridged interrupt to choose from */
1194 *isrc = sc->isrc;
1195 return (0);
1196 }
1197
1198 /* siba-specific implementation of BHNDB_ROUTE_INTERRUPTS() */
1199 static int
bhndb_pci_route_siba_interrupts(struct bhndb_pci_softc * sc,device_t child)1200 bhndb_pci_route_siba_interrupts(struct bhndb_pci_softc *sc, device_t child)
1201 {
1202 uint32_t sbintvec;
1203 u_int ivec;
1204 int error;
1205
1206 KASSERT(sc->pci_quirks & BHNDB_PCI_QUIRK_SIBA_INTVEC,
1207 ("route_siba_interrupts not supported by this hardware"));
1208
1209 /* Fetch the sbflag# for the child */
1210 if ((error = bhnd_get_intr_ivec(child, 0, &ivec)))
1211 return (error);
1212
1213 if (ivec > (sizeof(sbintvec)*8) - 1 /* aka '31' */) {
1214 /* This should never be an issue in practice */
1215 device_printf(sc->dev, "cannot route interrupts to high "
1216 "sbflag# %u\n", ivec);
1217 return (ENXIO);
1218 }
1219
1220 BHNDB_PCI_LOCK(sc);
1221
1222 sbintvec = bhndb_pci_read_core(sc, SB0_REG_ABS(SIBA_CFG0_INTVEC), 4);
1223 sbintvec |= (1 << ivec);
1224 bhndb_pci_write_core(sc, SB0_REG_ABS(SIBA_CFG0_INTVEC), sbintvec, 4);
1225
1226 BHNDB_PCI_UNLOCK(sc);
1227
1228 return (0);
1229 }
1230
1231 /* BHNDB_ROUTE_INTERRUPTS() */
1232 static int
bhndb_pci_route_interrupts(device_t dev,device_t child)1233 bhndb_pci_route_interrupts(device_t dev, device_t child)
1234 {
1235 struct bhndb_pci_softc *sc;
1236 struct bhnd_core_info core;
1237 uint32_t core_bit;
1238 uint32_t intmask;
1239
1240 sc = device_get_softc(dev);
1241
1242 if (sc->pci_quirks & BHNDB_PCI_QUIRK_SIBA_INTVEC)
1243 return (bhndb_pci_route_siba_interrupts(sc, child));
1244
1245 core = bhnd_get_core_info(child);
1246 if (core.core_idx > BHNDB_PCI_SBIM_COREIDX_MAX) {
1247 /* This should never be an issue in practice */
1248 device_printf(dev, "cannot route interrupts to high core "
1249 "index %u\n", core.core_idx);
1250 return (ENXIO);
1251 }
1252
1253 BHNDB_PCI_LOCK(sc);
1254
1255 core_bit = (1<<core.core_idx) << BHNDB_PCI_SBIM_SHIFT;
1256 intmask = pci_read_config(sc->parent, BHNDB_PCI_INT_MASK, 4);
1257 intmask |= core_bit;
1258 pci_write_config(sc->parent, BHNDB_PCI_INT_MASK, intmask, 4);
1259
1260 BHNDB_PCI_UNLOCK(sc);
1261
1262 return (0);
1263 }
1264
1265 /**
1266 * Using the generic PCI bridge hardware configuration, allocate, initialize
1267 * and return a new bhndb_pci probe state instance.
1268 *
1269 * On success, the caller assumes ownership of the returned probe instance, and
1270 * is responsible for releasing this reference using bhndb_pci_probe_free().
1271 *
1272 * @param[out] probe On success, the newly allocated probe instance.
1273 * @param dev The bhndb_pci bridge device.
1274 * @param hostb_devclass The expected device class of the bridge core.
1275 *
1276 * @retval 0 success
1277 * @retval non-zero if allocating the probe state fails, a regular
1278 * unix error code will be returned.
1279 *
1280 * @note This function requires exclusive ownership over allocating and
1281 * configuring host bridge resources, and should only be called prior to
1282 * completion of device attach and full configuration of the bridge.
1283 */
1284 static int
bhndb_pci_probe_alloc(struct bhndb_pci_probe ** probe,device_t dev,bhnd_devclass_t hostb_devclass)1285 bhndb_pci_probe_alloc(struct bhndb_pci_probe **probe, device_t dev,
1286 bhnd_devclass_t hostb_devclass)
1287 {
1288 struct bhndb_pci_probe *p;
1289 struct bhnd_erom_io *eio;
1290 const struct bhndb_hwcfg *hwcfg;
1291 const struct bhnd_chipid *hint;
1292 device_t parent_dev;
1293 int error;
1294
1295 parent_dev = device_get_parent(dev);
1296 eio = NULL;
1297
1298 p = malloc(sizeof(*p), M_BHND, M_ZERO|M_WAITOK);
1299 p->dev = dev;
1300 p->pci_dev = parent_dev;
1301
1302 /* Our register window mapping state must be initialized at this point,
1303 * as bhndb_pci_eio will begin making calls into
1304 * bhndb_pci_probe_(read|write|get_mapping) */
1305 p->m_win = NULL;
1306 p->m_res = NULL;
1307 p->m_valid = false;
1308
1309 bhndb_pci_eio_init(&p->erom_io, p);
1310 eio = &p->erom_io.eio;
1311
1312 /* Fetch our chipid hint (if any) and generic hardware configuration */
1313 hwcfg = BHNDB_BUS_GET_GENERIC_HWCFG(parent_dev, dev);
1314 hint = BHNDB_BUS_GET_CHIPID(parent_dev, dev);
1315
1316 /* Allocate our host resources */
1317 error = bhndb_alloc_host_resources(&p->hr, dev, parent_dev, hwcfg);
1318 if (error) {
1319 p->hr = NULL;
1320 goto failed;
1321 }
1322
1323 /* Map the first bus core from our bridged bhnd(4) bus */
1324 error = bhnd_erom_io_map(eio, BHND_DEFAULT_CHIPC_ADDR,
1325 BHND_DEFAULT_CORE_SIZE);
1326 if (error)
1327 goto failed;
1328
1329 /* Probe for a usable EROM class, and read the chip identifier */
1330 p->erom_class = bhnd_erom_probe_driver_classes(
1331 device_get_devclass(dev), eio, hint, &p->cid);
1332 if (p->erom_class == NULL) {
1333 device_printf(dev, "device enumeration unsupported; no "
1334 "compatible driver found\n");
1335
1336 error = ENXIO;
1337 goto failed;
1338 }
1339
1340 /* Allocate EROM parser */
1341 p->erom = bhnd_erom_alloc(p->erom_class, &p->cid, eio);
1342 if (p->erom == NULL) {
1343 device_printf(dev, "failed to allocate device enumeration "
1344 "table parser\n");
1345 error = ENXIO;
1346 goto failed;
1347 }
1348
1349 /* The EROM I/O instance is now owned by our EROM parser */
1350 eio = NULL;
1351
1352 /* Read the full core table */
1353 error = bhnd_erom_get_core_table(p->erom, &p->cores, &p->ncores);
1354 if (error) {
1355 device_printf(p->dev, "error fetching core table: %d\n",
1356 error);
1357
1358 p->cores = NULL;
1359 goto failed;
1360 }
1361
1362 /* Identify the host bridge core */
1363 error = bhndb_find_hostb_core(p->cores, p->ncores, hostb_devclass,
1364 &p->hostb_core);
1365 if (error) {
1366 device_printf(dev, "failed to identify the host bridge "
1367 "core: %d\n", error);
1368
1369 goto failed;
1370 }
1371
1372 *probe = p;
1373 return (0);
1374
1375 failed:
1376 if (eio != NULL) {
1377 KASSERT(p->erom == NULL, ("I/O instance will be freed by "
1378 "its owning parser"));
1379
1380 bhnd_erom_io_fini(eio);
1381 }
1382
1383 if (p->erom != NULL) {
1384 if (p->cores != NULL)
1385 bhnd_erom_free_core_table(p->erom, p->cores);
1386
1387 bhnd_erom_free(p->erom);
1388 } else {
1389 KASSERT(p->cores == NULL, ("cannot free erom-owned core table "
1390 "without erom reference"));
1391 }
1392
1393 if (p->hr != NULL)
1394 bhndb_release_host_resources(p->hr);
1395
1396 free(p, M_BHND);
1397
1398 return (error);
1399 }
1400
1401 /**
1402 * Free the given @p probe instance and any associated host bridge resources.
1403 */
1404 static void
bhndb_pci_probe_free(struct bhndb_pci_probe * probe)1405 bhndb_pci_probe_free(struct bhndb_pci_probe *probe)
1406 {
1407 bhnd_erom_free_core_table(probe->erom, probe->cores);
1408 bhnd_erom_free(probe->erom);
1409 bhndb_release_host_resources(probe->hr);
1410 free(probe, M_BHND);
1411 }
1412
1413 /**
1414 * Return a copy of probed core table from @p probe.
1415 *
1416 * @param probe The probe instance.
1417 * @param[out] cores On success, a copy of the probed core table. The
1418 * caller is responsible for freeing this table
1419 * bhndb_pci_probe_free_core_table().
1420 * @param[out] ncores On success, the number of cores found in
1421 * @p cores.
1422 *
1423 * @retval 0 success
1424 * @retval non-zero if enumerating the bridged bhnd(4) bus fails, a regular
1425 * unix error code will be returned.
1426 */
1427 static int
bhndb_pci_probe_copy_core_table(struct bhndb_pci_probe * probe,struct bhnd_core_info ** cores,u_int * ncores)1428 bhndb_pci_probe_copy_core_table(struct bhndb_pci_probe *probe,
1429 struct bhnd_core_info **cores, u_int *ncores)
1430 {
1431 size_t len = sizeof(**cores) * probe->ncores;
1432
1433 *cores = malloc(len, M_BHND, M_WAITOK);
1434 memcpy(*cores, probe->cores, len);
1435
1436 *ncores = probe->ncores;
1437
1438 return (0);
1439 }
1440
1441 /**
1442 * Free a core table previously returned by bhndb_pci_probe_copy_core_table().
1443 *
1444 * @param cores The core table to be freed.
1445 */
1446 static void
bhndb_pci_probe_free_core_table(struct bhnd_core_info * cores)1447 bhndb_pci_probe_free_core_table(struct bhnd_core_info *cores)
1448 {
1449 free(cores, M_BHND);
1450 }
1451
1452 /**
1453 * Return true if @p addr and @p size are mapped by the dynamic register window
1454 * backing @p probe.
1455 */
1456 static bool
bhndb_pci_probe_has_mapping(struct bhndb_pci_probe * probe,bhnd_addr_t addr,bhnd_size_t size)1457 bhndb_pci_probe_has_mapping(struct bhndb_pci_probe *probe, bhnd_addr_t addr,
1458 bhnd_size_t size)
1459 {
1460 if (!probe->m_valid)
1461 return (false);
1462
1463 KASSERT(probe->m_win != NULL, ("missing register window"));
1464 KASSERT(probe->m_res != NULL, ("missing regwin resource"));
1465 KASSERT(probe->m_win->win_type == BHNDB_REGWIN_T_DYN,
1466 ("unexpected window type %d", probe->m_win->win_type));
1467
1468 if (addr < probe->m_target)
1469 return (false);
1470
1471 if (addr >= probe->m_target + probe->m_win->win_size)
1472 return (false);
1473
1474 if ((probe->m_target + probe->m_win->win_size) - addr < size)
1475 return (false);
1476
1477 return (true);
1478 }
1479
1480 /**
1481 * Attempt to adjust the dynamic register window backing @p probe to permit
1482 * accessing @p size bytes at @p addr.
1483 *
1484 * @param probe The bhndb_pci probe state to be modified.
1485 * @param addr The address at which @p size bytes will mapped.
1486 * @param size The number of bytes to be mapped.
1487 * @param[out] res On success, will be set to the host resource
1488 * mapping @p size bytes at @p addr.
1489 * @param[out] res_offset On success, will be set to the offset of @addr
1490 * within @p res.
1491 *
1492 * @retval 0 success
1493 * @retval non-zero if an error occurs adjusting the backing dynamic
1494 * register window.
1495 */
1496 static int
bhndb_pci_probe_map(struct bhndb_pci_probe * probe,bhnd_addr_t addr,bhnd_size_t offset,bhnd_size_t size,struct resource ** res,bus_size_t * res_offset)1497 bhndb_pci_probe_map(struct bhndb_pci_probe *probe, bhnd_addr_t addr,
1498 bhnd_size_t offset, bhnd_size_t size, struct resource **res,
1499 bus_size_t *res_offset)
1500 {
1501 const struct bhndb_regwin *regwin, *regwin_table;
1502 struct resource *regwin_res;
1503 bhnd_addr_t target;
1504 int error;
1505
1506 /* Determine the absolute address */
1507 if (BHND_SIZE_MAX - offset < addr) {
1508 device_printf(probe->dev, "invalid offset %#jx+%#jx\n", addr,
1509 offset);
1510 return (ENXIO);
1511 }
1512
1513 addr += offset;
1514
1515 /* Can we use the existing mapping? */
1516 if (bhndb_pci_probe_has_mapping(probe, addr, size)) {
1517 *res = probe->m_res;
1518 *res_offset = (addr - probe->m_target) +
1519 probe->m_win->win_offset;
1520
1521 return (0);
1522 }
1523
1524 /* Locate a useable dynamic register window */
1525 regwin_table = probe->hr->cfg->register_windows;
1526 regwin = bhndb_regwin_find_type(regwin_table,
1527 BHNDB_REGWIN_T_DYN, size);
1528 if (regwin == NULL) {
1529 device_printf(probe->dev, "unable to map %#jx+%#jx; no "
1530 "usable dynamic register window found\n", addr,
1531 size);
1532 return (ENXIO);
1533 }
1534
1535 /* Locate the host resource mapping our register window */
1536 regwin_res = bhndb_host_resource_for_regwin(probe->hr, regwin);
1537 if (regwin_res == NULL) {
1538 device_printf(probe->dev, "unable to map %#jx+%#jx; no "
1539 "usable register resource found\n", addr, size);
1540 return (ENXIO);
1541 }
1542
1543 /* Page-align the target address */
1544 target = addr - (addr % regwin->win_size);
1545
1546 /* Configure the register window */
1547 error = bhndb_pci_compat_setregwin(probe->dev, probe->pci_dev,
1548 regwin, target);
1549 if (error) {
1550 device_printf(probe->dev, "failed to configure dynamic "
1551 "register window: %d\n", error);
1552 return (error);
1553 }
1554
1555 /* Update our mapping state */
1556 probe->m_win = regwin;
1557 probe->m_res = regwin_res;
1558 probe->m_addr = addr;
1559 probe->m_size = size;
1560 probe->m_target = target;
1561 probe->m_valid = true;
1562
1563 *res = regwin_res;
1564 *res_offset = (addr - target) + regwin->win_offset;
1565
1566 return (0);
1567 }
1568
1569 /**
1570 * Write a data item to the bridged address space at the given @p offset from
1571 * @p addr.
1572 *
1573 * A dynamic register window will be used to map @p addr.
1574 *
1575 * @param probe The bhndb_pci probe state to be used to perform the
1576 * write.
1577 * @param addr The base address.
1578 * @param offset The offset from @p addr at which @p value will be
1579 * written.
1580 * @param value The data item to be written.
1581 * @param width The data item width (1, 2, or 4 bytes).
1582 */
1583 static void
bhndb_pci_probe_write(struct bhndb_pci_probe * probe,bhnd_addr_t addr,bhnd_size_t offset,uint32_t value,u_int width)1584 bhndb_pci_probe_write(struct bhndb_pci_probe *probe, bhnd_addr_t addr,
1585 bhnd_size_t offset, uint32_t value, u_int width)
1586 {
1587 struct resource *r;
1588 bus_size_t res_offset;
1589 int error;
1590
1591 /* Map the target address */
1592 error = bhndb_pci_probe_map(probe, addr, offset, width, &r,
1593 &res_offset);
1594 if (error) {
1595 device_printf(probe->dev, "error mapping %#jx+%#jx for "
1596 "writing: %d\n", addr, offset, error);
1597 return;
1598 }
1599
1600 /* Perform write */
1601 switch (width) {
1602 case 1:
1603 return (bus_write_1(r, res_offset, value));
1604 case 2:
1605 return (bus_write_2(r, res_offset, value));
1606 case 4:
1607 return (bus_write_4(r, res_offset, value));
1608 default:
1609 panic("unsupported width: %u", width);
1610 }
1611 }
1612
1613 /**
1614 * Read a data item from the bridged address space at the given @p offset
1615 * from @p addr.
1616 *
1617 * A dynamic register window will be used to map @p addr.
1618 *
1619 * @param probe The bhndb_pci probe state to be used to perform the
1620 * read.
1621 * @param addr The base address.
1622 * @param offset The offset from @p addr at which to read a data item of
1623 * @p width bytes.
1624 * @param width Item width (1, 2, or 4 bytes).
1625 */
1626 static uint32_t
bhndb_pci_probe_read(struct bhndb_pci_probe * probe,bhnd_addr_t addr,bhnd_size_t offset,u_int width)1627 bhndb_pci_probe_read(struct bhndb_pci_probe *probe, bhnd_addr_t addr,
1628 bhnd_size_t offset, u_int width)
1629 {
1630 struct resource *r;
1631 bus_size_t res_offset;
1632 int error;
1633
1634 /* Map the target address */
1635 error = bhndb_pci_probe_map(probe, addr, offset, width, &r,
1636 &res_offset);
1637 if (error) {
1638 device_printf(probe->dev, "error mapping %#jx+%#jx for "
1639 "reading: %d\n", addr, offset, error);
1640 return (UINT32_MAX);
1641 }
1642
1643 /* Perform read */
1644 switch (width) {
1645 case 1:
1646 return (bus_read_1(r, res_offset));
1647 case 2:
1648 return (bus_read_2(r, res_offset));
1649 case 4:
1650 return (bus_read_4(r, res_offset));
1651 default:
1652 panic("unsupported width: %u", width);
1653 }
1654 }
1655
1656 /**
1657 * Initialize a new bhndb PCI bridge EROM I/O instance. All I/O will be
1658 * performed using @p probe.
1659 *
1660 * @param pio The instance to be initialized.
1661 * @param probe The bhndb_pci probe state to be used to perform all
1662 * I/O.
1663 */
1664 static void
bhndb_pci_eio_init(struct bhndb_pci_eio * pio,struct bhndb_pci_probe * probe)1665 bhndb_pci_eio_init(struct bhndb_pci_eio *pio, struct bhndb_pci_probe *probe)
1666 {
1667 memset(pio, 0, sizeof(*pio));
1668
1669 pio->eio.map = bhndb_pci_eio_map;
1670 pio->eio.tell = bhndb_pci_eio_tell;
1671 pio->eio.read = bhndb_pci_eio_read;
1672 pio->eio.fini = NULL;
1673
1674 pio->mapped = false;
1675 pio->addr = 0;
1676 pio->size = 0;
1677 pio->probe = probe;
1678 }
1679
1680 /* bhnd_erom_io_map() implementation */
1681 static int
bhndb_pci_eio_map(struct bhnd_erom_io * eio,bhnd_addr_t addr,bhnd_size_t size)1682 bhndb_pci_eio_map(struct bhnd_erom_io *eio, bhnd_addr_t addr,
1683 bhnd_size_t size)
1684 {
1685 struct bhndb_pci_eio *pio = (struct bhndb_pci_eio *)eio;
1686
1687 if (BHND_ADDR_MAX - addr < size)
1688 return (EINVAL); /* addr+size would overflow */
1689
1690 pio->addr = addr;
1691 pio->size = size;
1692 pio->mapped = true;
1693
1694 return (0);
1695 }
1696
1697 /* bhnd_erom_io_tell() implementation */
1698 static int
bhndb_pci_eio_tell(struct bhnd_erom_io * eio,bhnd_addr_t * addr,bhnd_size_t * size)1699 bhndb_pci_eio_tell(struct bhnd_erom_io *eio, bhnd_addr_t *addr,
1700 bhnd_size_t *size)
1701 {
1702 struct bhndb_pci_eio *pio = (struct bhndb_pci_eio *)eio;
1703
1704 if (!pio->mapped)
1705 return (ENXIO);
1706
1707 *addr = pio->addr;
1708 *size = pio->size;
1709
1710 return (0);
1711 }
1712
1713 /* bhnd_erom_io_read() implementation */
1714 static uint32_t
bhndb_pci_eio_read(struct bhnd_erom_io * eio,bhnd_size_t offset,u_int width)1715 bhndb_pci_eio_read(struct bhnd_erom_io *eio, bhnd_size_t offset, u_int width)
1716 {
1717 struct bhndb_pci_eio *pio = (struct bhndb_pci_eio *)eio;
1718
1719 /* Must have a valid mapping */
1720 if (!pio->mapped)
1721 panic("no active mapping");
1722
1723 /* The requested subrange must fall within the existing mapped range */
1724 if (offset > pio->size ||
1725 width > pio->size ||
1726 pio->size - offset < width)
1727 {
1728 panic("invalid offset %#jx", offset);
1729 }
1730
1731 return (bhndb_pci_probe_read(pio->probe, pio->addr, offset, width));
1732 }
1733
1734 static device_method_t bhndb_pci_methods[] = {
1735 /* Device interface */
1736 DEVMETHOD(device_probe, bhndb_pci_probe),
1737 DEVMETHOD(device_attach, bhndb_pci_attach),
1738 DEVMETHOD(device_resume, bhndb_pci_resume),
1739 DEVMETHOD(device_suspend, bhndb_pci_suspend),
1740 DEVMETHOD(device_detach, bhndb_pci_detach),
1741
1742 /* BHNDB interface */
1743 DEVMETHOD(bhndb_set_window_addr, bhndb_pci_set_window_addr),
1744 DEVMETHOD(bhndb_populate_board_info, bhndb_pci_populate_board_info),
1745 DEVMETHOD(bhndb_map_intr_isrc, bhndb_pci_map_intr_isrc),
1746 DEVMETHOD(bhndb_route_interrupts, bhndb_pci_route_interrupts),
1747
1748 /* BHND PWRCTL hostb interface */
1749 DEVMETHOD(bhnd_pwrctl_hostb_get_clksrc, bhndb_pci_pwrctl_get_clksrc),
1750 DEVMETHOD(bhnd_pwrctl_hostb_gate_clock, bhndb_pci_pwrctl_gate_clock),
1751 DEVMETHOD(bhnd_pwrctl_hostb_ungate_clock, bhndb_pci_pwrctl_ungate_clock),
1752
1753 DEVMETHOD_END
1754 };
1755
1756 DEFINE_CLASS_1(bhndb, bhndb_pci_driver, bhndb_pci_methods,
1757 sizeof(struct bhndb_pci_softc), bhndb_driver);
1758
1759 MODULE_VERSION(bhndb_pci, 1);
1760 MODULE_DEPEND(bhndb_pci, bhnd_pci_hostb, 1, 1, 1);
1761 MODULE_DEPEND(bhndb_pci, pci, 1, 1, 1);
1762 MODULE_DEPEND(bhndb_pci, bhndb, 1, 1, 1);
1763 MODULE_DEPEND(bhndb_pci, bhnd, 1, 1, 1);
1764