xref: /freebsd/sys/dev/bhnd/bhndb/bhndb_pci.c (revision d8a0fe102c0cfdfcd5b818f850eff09d8536c9bc)
1 /*-
2  * Copyright (c) 2015-2016 Landon Fuller <landon@landonf.org>
3  * Copyright (c) 2017 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Landon Fuller
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer,
14  *    without modification.
15  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
16  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
17  *    redistribution must be conditioned upon including a substantially
18  *    similar Disclaimer requirement for further binary redistribution.
19  *
20  * NO WARRANTY
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
24  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
25  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
26  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
29  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGES.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 /*
38  * PCI-specific implementation for the BHNDB bridge driver.
39  *
40  * Provides support for bridging from a PCI parent bus to a BHND-compatible
41  * bus (e.g. bcma or siba) via a Broadcom PCI core configured in end-point
42  * mode.
43  *
44  * This driver handles all initial generic host-level PCI interactions with a
45  * PCI/PCIe bridge core operating in endpoint mode. Once the bridged bhnd(4)
46  * bus has been enumerated, this driver works in tandem with a core-specific
47  * bhnd_pci_hostb driver to manage the PCI core.
48  */
49 
50 #include <sys/param.h>
51 #include <sys/kernel.h>
52 #include <sys/bus.h>
53 #include <sys/limits.h>
54 #include <sys/malloc.h>
55 #include <sys/module.h>
56 #include <sys/systm.h>
57 
58 #include <dev/pci/pcireg.h>
59 #include <dev/pci/pcivar.h>
60 
61 #include <dev/bhnd/bhnd.h>
62 #include <dev/bhnd/bhndreg.h>
63 
64 #include <dev/bhnd/bhnd_erom.h>
65 #include <dev/bhnd/bhnd_eromvar.h>
66 
67 #include <dev/bhnd/siba/sibareg.h>
68 
69 #include <dev/bhnd/cores/pci/bhnd_pcireg.h>
70 
71 #include "bhnd_pwrctl_hostb_if.h"
72 
73 #include "bhndb_pcireg.h"
74 #include "bhndb_pcivar.h"
75 #include "bhndb_private.h"
76 
77 struct bhndb_pci_eio;
78 
79 static int		bhndb_pci_alloc_msi(struct bhndb_pci_softc *sc,
80 			    int *msi_count);
81 static int		bhndb_pci_read_core_table(device_t dev,
82 			    struct bhnd_chipid *chipid,
83 			    struct bhnd_core_info **cores, u_int *ncores,
84 			    bhnd_erom_class_t **eromcls);
85 static int		bhndb_pci_add_children(struct bhndb_pci_softc *sc);
86 
87 static bhnd_devclass_t	bhndb_expected_pci_devclass(device_t dev);
88 static bool		bhndb_is_pcie_attached(device_t dev);
89 
90 static int		bhndb_enable_pci_clocks(device_t dev);
91 static int		bhndb_disable_pci_clocks(device_t dev);
92 
93 static int		bhndb_pci_compat_setregwin(device_t dev,
94 			    device_t pci_dev, const struct bhndb_regwin *,
95 			    bhnd_addr_t);
96 static int		bhndb_pci_fast_setregwin(device_t dev, device_t pci_dev,
97 			    const struct bhndb_regwin *, bhnd_addr_t);
98 
99 static void		bhndb_pci_write_core(struct bhndb_pci_softc *sc,
100 			    bus_size_t offset, uint32_t value, u_int width);
101 static uint32_t		bhndb_pci_read_core(struct bhndb_pci_softc *sc,
102 			    bus_size_t offset, u_int width);
103 
104 static void		bhndb_init_sromless_pci_config(
105 			    struct bhndb_pci_softc *sc);
106 
107 static bus_addr_t	bhndb_pci_sprom_addr(struct bhndb_pci_softc *sc);
108 static bus_size_t	bhndb_pci_sprom_size(struct bhndb_pci_softc *sc);
109 
110 static int		bhndb_pci_eio_init(struct bhndb_pci_eio *pio,
111 			    device_t dev, device_t pci_dev,
112 			    struct bhndb_host_resources *hr);
113 static int		bhndb_pci_eio_map(struct bhnd_erom_io *eio,
114 			    bhnd_addr_t addr, bhnd_size_t size);
115 static uint32_t		bhndb_pci_eio_read(struct bhnd_erom_io *eio,
116 			    bhnd_size_t offset, u_int width);
117 
118 #define	BHNDB_PCI_MSI_COUNT	1
119 
120 static struct bhndb_pci_quirk	bhndb_pci_quirks[];
121 static struct bhndb_pci_quirk	bhndb_pcie_quirks[];
122 static struct bhndb_pci_quirk	bhndb_pcie2_quirks[];
123 
124 static struct bhndb_pci_core bhndb_pci_cores[] = {
125 	BHNDB_PCI_CORE(PCI,	BHND_PCI_SRSH_PI_OFFSET,	bhndb_pci_quirks),
126 	BHNDB_PCI_CORE(PCIE,	BHND_PCIE_SRSH_PI_OFFSET,	bhndb_pcie_quirks),
127 	BHNDB_PCI_CORE(PCIE2,	BHND_PCIE_SRSH_PI_OFFSET,	bhndb_pcie2_quirks),
128 	BHNDB_PCI_CORE_END
129 };
130 
131 /* bhndb_pci erom I/O instance state */
132 struct bhndb_pci_eio {
133 	struct bhnd_erom_io		 eio;
134 	device_t			 dev;		/**< bridge device */
135 	device_t			 pci_dev;	/**< parent PCI device */
136 	struct bhndb_host_resources	*hr;		/**< borrowed reference to host resources */
137 	const struct bhndb_regwin	*win;		/**< mapped register window, or NULL */
138 	struct resource			*res;		/**< resource containing the register window, or NULL if no window mapped */
139 	bhnd_addr_t			 res_target;	/**< current target address (if mapped) */
140 	bool				 mapped;	/**< true if a valid mapping exists, false otherwise */
141 	bhnd_addr_t			 addr;		/**< mapped address */
142 	bhnd_size_t			 size;		/**< mapped size */
143 };
144 
145 static struct bhndb_pci_quirk bhndb_pci_quirks[] = {
146 	/* Backplane interrupt flags must be routed via siba-specific
147 	 * SIBA_CFG0_INTVEC configuration register; the BHNDB_PCI_INT_MASK
148 	 * PCI configuration register is unsupported. */
149 	{{ BHND_MATCH_CHIP_TYPE		(SIBA) },
150 	 { BHND_MATCH_CORE_REV		(HWREV_LTE(5)) },
151 		BHNDB_PCI_QUIRK_SIBA_INTVEC },
152 
153 	/* All PCI core revisions require the SRSH work-around */
154 	BHNDB_PCI_QUIRK(HWREV_ANY,	BHNDB_PCI_QUIRK_SRSH_WAR),
155 	BHNDB_PCI_QUIRK_END
156 };
157 
158 static struct bhndb_pci_quirk bhndb_pcie_quirks[] = {
159 	/* All PCIe-G1 core revisions require the SRSH work-around */
160 	BHNDB_PCI_QUIRK(HWREV_ANY,	BHNDB_PCI_QUIRK_SRSH_WAR),
161 	BHNDB_PCI_QUIRK_END
162 };
163 
164 static struct bhndb_pci_quirk bhndb_pcie2_quirks[] = {
165 	/* All PCIe-G2 core revisions require the SRSH work-around */
166 	BHNDB_PCI_QUIRK(HWREV_ANY,	BHNDB_PCI_QUIRK_SRSH_WAR),
167 	BHNDB_PCI_QUIRK_END
168 };
169 
170 
171 /**
172  * Return the device table entry for @p ci, or NULL if none.
173  */
174 static struct bhndb_pci_core *
175 bhndb_pci_find_core(struct bhnd_core_info *ci)
176 {
177 	for (size_t i = 0; !BHNDB_PCI_IS_CORE_END(&bhndb_pci_cores[i]); i++) {
178 		struct bhndb_pci_core *entry = &bhndb_pci_cores[i];
179 
180 		if (bhnd_core_matches(ci, &entry->match))
181 			return (entry);
182 	}
183 
184 	return (NULL);
185 }
186 
187 /**
188  * Return all quirk flags for the given @p cid and @p ci.
189  */
190 static uint32_t
191 bhndb_pci_get_core_quirks(struct bhnd_chipid *cid, struct bhnd_core_info *ci)
192 {
193 	struct bhndb_pci_core	*entry;
194 	struct bhndb_pci_quirk	*qtable;
195 	uint32_t		 quirks;
196 
197 	quirks = 0;
198 
199 	/* No core entry? */
200 	if ((entry = bhndb_pci_find_core(ci)) == NULL)
201 		return (quirks);
202 
203 	/* No quirks? */
204 	if ((qtable = entry->quirks) == NULL)
205 		return (quirks);
206 
207 	for (size_t i = 0; !BHNDB_PCI_IS_QUIRK_END(&qtable[i]); i++) {
208 		struct bhndb_pci_quirk *q = &qtable[i];
209 
210 		if (!bhnd_chip_matches(cid, &q->chip_desc))
211 			continue;
212 
213 		if (!bhnd_core_matches(ci, &q->core_desc))
214 			continue;
215 
216 		quirks |= q->quirks;
217 	}
218 
219 	return (quirks);
220 }
221 
222 /**
223  * Default bhndb_pci implementation of device_probe().
224  *
225  * Verifies that the parent is a PCI/PCIe device.
226  */
227 static int
228 bhndb_pci_probe(device_t dev)
229 {
230 	struct bhnd_chipid	 cid;
231 	struct bhnd_core_info	*cores, hostb_core;
232 	struct bhndb_pci_core	*entry;
233 	bhnd_devclass_t		 hostb_devclass;
234 	u_int			 ncores;
235 	device_t		 parent;
236 	devclass_t		 parent_bus, pci;
237 	int			 error;
238 
239 	cores = NULL;
240 
241 	/* Our parent must be a PCI/PCIe device. */
242 	pci = devclass_find("pci");
243 	parent = device_get_parent(dev);
244 	parent_bus = device_get_devclass(device_get_parent(parent));
245 
246 	if (parent_bus != pci)
247 		return (ENXIO);
248 
249 	/* Enable clocks */
250 	if ((error = bhndb_enable_pci_clocks(dev)))
251 		return (error);
252 
253 	/* Identify the chip and enumerate the bridged cores */
254 	error = bhndb_pci_read_core_table(dev, &cid, &cores, &ncores, NULL);
255 	if (error)
256 		goto cleanup;
257 
258 	/* Search our core table for the host bridge core */
259 	hostb_devclass = bhndb_expected_pci_devclass(dev);
260 	error = bhndb_find_hostb_core(cores, ncores, hostb_devclass,
261 	    &hostb_core);
262 	if (error)
263 		goto cleanup;
264 
265 	/* Look for a matching core table entry */
266 	if ((entry = bhndb_pci_find_core(&hostb_core)) == NULL) {
267 		error = ENXIO;
268 		goto cleanup;
269 	}
270 
271 	device_set_desc(dev, "PCI-BHND bridge");
272 
273 	/* fall-through */
274 	error = BUS_PROBE_DEFAULT;
275 
276 cleanup:
277 	bhndb_disable_pci_clocks(dev);
278 	if (cores != NULL)
279 		free(cores, M_BHND);
280 
281 	return (error);
282 }
283 
284 /**
285  * Attempt to allocate MSI interrupts, returning the count in @p msi_count
286  * on success.
287  */
288 static int
289 bhndb_pci_alloc_msi(struct bhndb_pci_softc *sc, int *msi_count)
290 {
291 	int error, count;
292 
293 	/* Is MSI available? */
294 	if (pci_msi_count(sc->parent) < BHNDB_PCI_MSI_COUNT)
295 		return (ENXIO);
296 
297 	/* Allocate expected message count */
298 	count = BHNDB_PCI_MSI_COUNT;
299 	if ((error = pci_alloc_msi(sc->parent, &count))) {
300 		device_printf(sc->dev, "failed to allocate MSI interrupts: "
301 		    "%d\n", error);
302 
303 		return (error);
304 	}
305 
306 	if (count < BHNDB_PCI_MSI_COUNT) {
307 		pci_release_msi(sc->parent);
308 		return (ENXIO);
309 	}
310 
311 	*msi_count = count;
312 	return (0);
313 }
314 
315 static int
316 bhndb_pci_attach(device_t dev)
317 {
318 	struct bhndb_pci_softc	*sc;
319 	struct bhnd_chipid	 cid;
320 	struct bhnd_core_info	*cores, hostb_core;
321 	bhnd_erom_class_t	*erom_class;
322 	u_int			 ncores;
323 	int			 irq_rid;
324 	int			 error;
325 
326 	sc = device_get_softc(dev);
327 	sc->dev = dev;
328 	sc->parent = device_get_parent(dev);
329 	sc->pci_devclass = bhndb_expected_pci_devclass(dev);
330 	sc->pci_quirks = 0;
331 	sc->set_regwin = NULL;
332 
333 	BHNDB_PCI_LOCK_INIT(sc);
334 
335 	cores = NULL;
336 
337 	/* Enable PCI bus mastering */
338 	pci_enable_busmaster(sc->parent);
339 
340 	/* Set up PCI interrupt handling */
341 	if (bhndb_pci_alloc_msi(sc, &sc->msi_count) == 0) {
342 		/* MSI uses resource IDs starting at 1 */
343 		irq_rid = 1;
344 
345 		device_printf(dev, "Using MSI interrupts on %s\n",
346 		    device_get_nameunit(sc->parent));
347 	} else {
348 		sc->msi_count = 0;
349 		irq_rid = 0;
350 
351 		device_printf(dev, "Using INTx interrupts on %s\n",
352 		    device_get_nameunit(sc->parent));
353 	}
354 
355 	sc->isrc = bhndb_alloc_intr_isrc(sc->parent, irq_rid, 0, RM_MAX_END, 1,
356 	    RF_SHAREABLE | RF_ACTIVE);
357 	if (sc->isrc == NULL) {
358 		device_printf(sc->dev, "failed to allocate interrupt "
359 		    "resource\n");
360 		error = ENXIO;
361 		goto cleanup;
362 	}
363 
364 	/* Enable clocks (if required by this hardware) */
365 	if ((error = bhndb_enable_pci_clocks(sc->dev)))
366 		goto cleanup;
367 
368 	/* Identify the chip and enumerate the bridged cores */
369 	error = bhndb_pci_read_core_table(dev, &cid, &cores, &ncores,
370 	    &erom_class);
371 	if (error)
372 		goto cleanup;
373 
374 	/* Select the appropriate register window handler */
375 	if (cid.chip_type == BHND_CHIPTYPE_SIBA) {
376 		sc->set_regwin = bhndb_pci_compat_setregwin;
377 	} else {
378 		sc->set_regwin = bhndb_pci_fast_setregwin;
379 	}
380 
381 	/* Determine our host bridge core and populate our quirk flags */
382 	error = bhndb_find_hostb_core(cores, ncores, sc->pci_devclass,
383 	    &hostb_core);
384 	if (error)
385 		goto cleanup;
386 
387 	sc->pci_quirks = bhndb_pci_get_core_quirks(&cid, &hostb_core);
388 
389 	/* Perform bridge attach */
390 	error = bhndb_attach(dev, &cid, cores, ncores, &hostb_core, erom_class);
391 	if (error)
392 		goto cleanup;
393 
394 	/* Fix-up power on defaults for SROM-less devices. */
395 	bhndb_init_sromless_pci_config(sc);
396 
397 	/* Add any additional child devices */
398 	if ((error = bhndb_pci_add_children(sc)))
399 		goto cleanup;
400 
401 	/* Probe and attach our children */
402 	if ((error = bus_generic_attach(dev)))
403 		goto cleanup;
404 
405 	free(cores, M_BHND);
406 
407 	return (0);
408 
409 cleanup:
410 	device_delete_children(dev);
411 	bhndb_disable_pci_clocks(sc->dev);
412 
413 	if (sc->isrc != NULL)
414 		bhndb_free_intr_isrc(sc->isrc);
415 
416 	if (sc->msi_count > 0)
417 		pci_release_msi(sc->parent);
418 
419 	if (cores != NULL)
420 		free(cores, M_BHND);
421 
422 	pci_disable_busmaster(sc->parent);
423 
424 	BHNDB_PCI_LOCK_DESTROY(sc);
425 
426 	return (error);
427 }
428 
429 static int
430 bhndb_pci_detach(device_t dev)
431 {
432 	struct bhndb_pci_softc	*sc;
433 	int			 error;
434 
435 	sc = device_get_softc(dev);
436 
437 	/* Attempt to detach our children */
438 	if ((error = bus_generic_detach(dev)))
439 		return (error);
440 
441 	/* Perform generic bridge detach */
442 	if ((error = bhndb_generic_detach(dev)))
443 		return (error);
444 
445 	/* Disable clocks (if required by this hardware) */
446 	if ((error = bhndb_disable_pci_clocks(sc->dev)))
447 		return (error);
448 
449 	/* Free our interrupt resources */
450 	bhndb_free_intr_isrc(sc->isrc);
451 
452 	/* Release MSI interrupts */
453 	if (sc->msi_count > 0)
454 		pci_release_msi(sc->parent);
455 
456 	/* Disable PCI bus mastering */
457 	pci_disable_busmaster(sc->parent);
458 
459 	BHNDB_PCI_LOCK_DESTROY(sc);
460 
461 	return (0);
462 }
463 
464 /**
465  * Use the generic PCI bridge hardware configuration to enumerate the bridged
466  * bhnd(4) bus' core table.
467  *
468  * @note This function may be safely called prior to device attach, (e.g.
469  * from DEVICE_PROBE).
470  * @note This function requires exclusive ownership over allocating and
471  * configuring host bridge resources, and should only be called prior to
472  * completion of device attach and full configuration of the bridge.
473  *
474  * @param	dev		The bhndb_pci bridge device.
475  * @param[out]	chipid		On success, the parsed chip identification.
476  * @param[out]	cores		On success, the enumerated core table. The
477  *				caller is responsible for freeing this table via
478  *				bhndb_pci_free_core_table().
479  * @param[out]	ncores		On success, the number of cores found in
480  *				@p cores.
481  * @param[out]	eromcls		On success, a pointer to the erom class used to
482  *				parse the device enumeration table. This
483  *				argument may be NULL if the class is not
484  *				desired.
485  *
486  * @retval 0		success
487  * @retval non-zero	if enumerating the bridged bhnd(4) bus fails, a regular
488  * 			unix error code will be returned.
489  */
490 static int
491 bhndb_pci_read_core_table(device_t dev, struct bhnd_chipid *chipid,
492     struct bhnd_core_info **cores, u_int *ncores,
493     bhnd_erom_class_t **eromcls)
494 {
495 	const struct bhndb_hwcfg	*cfg;
496 	struct bhndb_host_resources	*hr;
497 	struct bhndb_pci_eio		 pio;
498 	struct bhnd_core_info		*erom_cores;
499 	const struct bhnd_chipid	*hint;
500 	struct bhnd_chipid		 cid;
501 	bhnd_erom_class_t		*erom_class;
502 	bhnd_erom_t			*erom;
503 	device_t			 parent_dev;
504 	u_int				 erom_ncores;
505 	int				 error;
506 
507 	parent_dev = device_get_parent(dev);
508 	erom = NULL;
509 	erom_cores = NULL;
510 
511 	/* Fetch our chipid hint (if any) and generic hardware configuration */
512 	cfg = BHNDB_BUS_GET_GENERIC_HWCFG(parent_dev, dev);
513 	hint = BHNDB_BUS_GET_CHIPID(parent_dev, dev);
514 
515 	/* Allocate our host resources */
516 	if ((error = bhndb_alloc_host_resources(&hr, dev, parent_dev, cfg)))
517 		return (error);
518 
519 	/* Initialize our erom I/O state */
520 	if ((error = bhndb_pci_eio_init(&pio, dev, parent_dev, hr)))
521 		goto failed;
522 
523 	/* Map the first bus core from our bridged bhnd(4) bus */
524 	error = bhndb_pci_eio_map(&pio.eio, BHND_DEFAULT_CHIPC_ADDR,
525 	    BHND_DEFAULT_CORE_SIZE);
526 	if (error)
527 		goto failed;
528 
529 	/* Probe for a usable EROM class, and read the chip identifier */
530 	erom_class = bhnd_erom_probe_driver_classes(device_get_devclass(dev),
531 	    &pio.eio, hint, &cid);
532 	if (erom_class == NULL) {
533 		device_printf(dev, "device enumeration unsupported; no "
534 		    "compatible driver found\n");
535 
536 		error = ENXIO;
537 		goto failed;
538 	}
539 
540 	/* Allocate EROM parser */
541 	if ((erom = bhnd_erom_alloc(erom_class, &cid, &pio.eio)) == NULL) {
542 		device_printf(dev, "failed to allocate device enumeration "
543 		    "table parser\n");
544 		error = ENXIO;
545 		goto failed;
546 	}
547 
548 	/* Read the full core table */
549 	error = bhnd_erom_get_core_table(erom, &erom_cores, &erom_ncores);
550 	if (error) {
551 		device_printf(dev, "error fetching core table: %d\n", error);
552 		goto failed;
553 	}
554 
555 	/* Provide the results to our caller */
556 	*cores = malloc(sizeof(erom_cores[0]) * erom_ncores, M_BHND, M_WAITOK);
557 	memcpy(*cores, erom_cores, sizeof(erom_cores[0]) * erom_ncores);
558 	*ncores = erom_ncores;
559 
560 	*chipid = cid;
561 	if (eromcls != NULL)
562 		*eromcls = erom_class;
563 
564 	/* Clean up */
565 	bhnd_erom_free_core_table(erom, erom_cores);
566 	bhnd_erom_free(erom);
567 	bhndb_release_host_resources(hr);
568 
569 	return (0);
570 
571 failed:
572 	if (erom_cores != NULL)
573 		bhnd_erom_free_core_table(erom, erom_cores);
574 
575 	if (erom != NULL)
576 		bhnd_erom_free(erom);
577 
578 	bhndb_release_host_resources(hr);
579 	return (error);
580 }
581 
582 static int
583 bhndb_pci_add_children(struct bhndb_pci_softc *sc)
584 {
585 	bus_size_t		 nv_sz;
586 	int			 error;
587 
588 	/**
589 	 * If SPROM is mapped directly into BAR0, add child NVRAM
590 	 * device.
591 	 */
592 	nv_sz = bhndb_pci_sprom_size(sc);
593 	if (nv_sz > 0) {
594 		struct bhndb_devinfo	*dinfo;
595 		device_t		 child;
596 
597 		if (bootverbose) {
598 			device_printf(sc->dev, "found SPROM (%ju bytes)\n",
599 			    (uintmax_t)nv_sz);
600 		}
601 
602 		/* Add sprom device, ordered early enough to be available
603 		 * before the bridged bhnd(4) bus is attached. */
604 		child = BUS_ADD_CHILD(sc->dev,
605 		    BHND_PROBE_ROOT + BHND_PROBE_ORDER_EARLY, "bhnd_nvram", -1);
606 		if (child == NULL) {
607 			device_printf(sc->dev, "failed to add sprom device\n");
608 			return (ENXIO);
609 		}
610 
611 		/* Initialize device address space and resource covering the
612 		 * BAR0 SPROM shadow. */
613 		dinfo = device_get_ivars(child);
614 		dinfo->addrspace = BHNDB_ADDRSPACE_NATIVE;
615 
616 		error = bus_set_resource(child, SYS_RES_MEMORY, 0,
617 		    bhndb_pci_sprom_addr(sc), nv_sz);
618 		if (error) {
619 			device_printf(sc->dev,
620 			    "failed to register sprom resources\n");
621 			return (error);
622 		}
623 	}
624 
625 	return (0);
626 }
627 
628 static const struct bhndb_regwin *
629 bhndb_pci_sprom_regwin(struct bhndb_pci_softc *sc)
630 {
631 	struct bhndb_resources		*bres;
632 	const struct bhndb_hwcfg	*cfg;
633 	const struct bhndb_regwin	*sprom_win;
634 
635 	bres = sc->bhndb.bus_res;
636 	cfg = bres->cfg;
637 
638 	sprom_win = bhndb_regwin_find_type(cfg->register_windows,
639 	    BHNDB_REGWIN_T_SPROM, BHNDB_PCI_V0_BAR0_SPROM_SIZE);
640 
641 	return (sprom_win);
642 }
643 
644 static bus_addr_t
645 bhndb_pci_sprom_addr(struct bhndb_pci_softc *sc)
646 {
647 	const struct bhndb_regwin	*sprom_win;
648 	struct resource			*r;
649 
650 	/* Fetch the SPROM register window */
651 	sprom_win = bhndb_pci_sprom_regwin(sc);
652 	KASSERT(sprom_win != NULL, ("requested sprom address on PCI_V2+"));
653 
654 	/* Fetch the associated resource */
655 	r = bhndb_host_resource_for_regwin(sc->bhndb.bus_res->res, sprom_win);
656 	KASSERT(r != NULL, ("missing resource for sprom window\n"));
657 
658 	return (rman_get_start(r) + sprom_win->win_offset);
659 }
660 
661 static bus_size_t
662 bhndb_pci_sprom_size(struct bhndb_pci_softc *sc)
663 {
664 	const struct bhndb_regwin	*sprom_win;
665 	uint32_t			 sctl;
666 	bus_size_t			 sprom_sz;
667 
668 	sprom_win = bhndb_pci_sprom_regwin(sc);
669 
670 	/* PCI_V2 and later devices map SPROM/OTP via ChipCommon */
671 	if (sprom_win == NULL)
672 		return (0);
673 
674 	/* Determine SPROM size */
675 	sctl = pci_read_config(sc->parent, BHNDB_PCI_SPROM_CONTROL, 4);
676 	if (sctl & BHNDB_PCI_SPROM_BLANK)
677 		return (0);
678 
679 	switch (sctl & BHNDB_PCI_SPROM_SZ_MASK) {
680 	case BHNDB_PCI_SPROM_SZ_1KB:
681 		sprom_sz = (1 * 1024);
682 		break;
683 
684 	case BHNDB_PCI_SPROM_SZ_4KB:
685 		sprom_sz = (4 * 1024);
686 		break;
687 
688 	case BHNDB_PCI_SPROM_SZ_16KB:
689 		sprom_sz = (16 * 1024);
690 		break;
691 
692 	case BHNDB_PCI_SPROM_SZ_RESERVED:
693 	default:
694 		device_printf(sc->dev, "invalid PCI sprom size 0x%x\n", sctl);
695 		return (0);
696 	}
697 
698 	if (sprom_sz > sprom_win->win_size) {
699 		device_printf(sc->dev,
700 		    "PCI sprom size (0x%x) overruns defined register window\n",
701 		    sctl);
702 		return (0);
703 	}
704 
705 	return (sprom_sz);
706 }
707 
708 /**
709  * Return the host resource providing a static mapping of the PCI core's
710  * registers.
711  *
712  * @param	sc		bhndb PCI driver state.
713  * @param	offset		The required readable offset within the PCI core
714  *				register block.
715  * @param	size		The required readable size at @p offset.
716  * @param[out]	res		On success, the host resource containing our PCI
717  *				core's register window.
718  * @param[out]	res_offset	On success, the @p offset relative to @p res.
719  *
720  * @retval 0		success
721  * @retval ENXIO	if a valid static register window mapping the PCI core
722  *			registers is not available.
723  */
724 static int
725 bhndb_pci_get_core_regs(struct bhndb_pci_softc *sc, bus_size_t offset,
726     bus_size_t size, struct resource **res, bus_size_t *res_offset)
727 {
728 	const struct bhndb_regwin	*win;
729 	struct resource			*r;
730 
731 	/* Locate the static register window mapping the requested offset */
732 	win = bhndb_regwin_find_core(sc->bhndb.bus_res->cfg->register_windows,
733 	    sc->pci_devclass, 0, BHND_PORT_DEVICE, 0, 0, offset, size);
734 	if (win == NULL) {
735 		device_printf(sc->dev, "missing PCI core register window\n");
736 		return (ENXIO);
737 	}
738 
739 	/* Fetch the resource containing the register window */
740 	r = bhndb_host_resource_for_regwin(sc->bhndb.bus_res->res, win);
741 	if (r == NULL) {
742 		device_printf(sc->dev, "missing PCI core register resource\n");
743 		return (ENXIO);
744 	}
745 
746 	KASSERT(offset >= win->d.core.offset, ("offset %#jx outside of "
747 	    "register window", (uintmax_t)offset));
748 
749 	*res = r;
750 	*res_offset = win->win_offset + (offset - win->d.core.offset);
751 
752 	return (0);
753 }
754 
755 /**
756  * Write a 1, 2, or 4 byte data item to the PCI core's registers at @p offset.
757  *
758  * @param sc		bhndb PCI driver state.
759  * @param offset	register write offset.
760  * @param value		value to be written.
761  * @param width		item width (1, 2, or 4 bytes).
762  */
763 static void
764 bhndb_pci_write_core(struct bhndb_pci_softc *sc, bus_size_t offset,
765     uint32_t value, u_int width)
766 {
767 	struct resource	*r;
768 	bus_size_t	 r_offset;
769 	int		 error;
770 
771 	error = bhndb_pci_get_core_regs(sc, offset, width, &r, &r_offset);
772 	if (error) {
773 		panic("no PCI register window mapping %#jx+%#x: %d",
774 		    (uintmax_t)offset, width, error);
775 	}
776 
777 	switch (width) {
778 	case 1:
779 		bus_write_1(r, r_offset, value);
780 		break;
781 	case 2:
782 		bus_write_2(r, r_offset, value);
783 		break;
784 	case 4:
785 		bus_write_4(r, r_offset, value);
786 		break;
787 	default:
788 		panic("invalid width: %u", width);
789 	}
790 }
791 
792 /**
793  * Read a 1, 2, or 4 byte data item from the PCI core's registers
794  * at @p offset.
795  *
796  * @param sc		bhndb PCI driver state.
797  * @param offset	register read offset.
798  * @param width		item width (1, 2, or 4 bytes).
799  */
800 static uint32_t
801 bhndb_pci_read_core(struct bhndb_pci_softc *sc, bus_size_t offset, u_int width)
802 {
803 	struct resource	*r;
804 	bus_size_t	 r_offset;
805 	int		 error;
806 
807 	error = bhndb_pci_get_core_regs(sc, offset, width, &r, &r_offset);
808 	if (error) {
809 		panic("no PCI register window mapping %#jx+%#x: %d",
810 		    (uintmax_t)offset, width, error);
811 	}
812 
813 	switch (width) {
814 	case 1:
815 		return (bus_read_1(r, r_offset));
816 	case 2:
817 		return (bus_read_2(r, r_offset));
818 	case 4:
819 		return (bus_read_4(r, r_offset));
820 	default:
821 		panic("invalid width: %u", width);
822 	}
823 }
824 
825 /*
826  * On devices without a SROM, the PCI(e) cores will be initialized with
827  * their Power-on-Reset defaults; this can leave two of the BAR0 PCI windows
828  * mapped to the wrong core.
829  *
830  * This function updates the SROM shadow to point the BAR0 windows at the
831  * current PCI core.
832  *
833  * Applies to all PCI/PCIe revisions.
834  */
835 static void
836 bhndb_init_sromless_pci_config(struct bhndb_pci_softc *sc)
837 {
838 	const struct bhndb_pci_core	*pci_core;
839 	bus_size_t			 srsh_offset;
840 	u_int				 pci_cidx, sprom_cidx;
841 	uint16_t			 val;
842 
843 	if ((sc->pci_quirks & BHNDB_PCI_QUIRK_SRSH_WAR) == 0)
844 		return;
845 
846 	/* Determine the correct register offset for our PCI core */
847 	pci_core = bhndb_pci_find_core(&sc->bhndb.bridge_core);
848 	KASSERT(pci_core != NULL, ("missing core table entry"));
849 
850 	srsh_offset = pci_core->srsh_offset;
851 
852 	/* Fetch the SPROM's configured core index */
853 	val = bhndb_pci_read_core(sc, srsh_offset, sizeof(val));
854 	sprom_cidx = (val & BHND_PCI_SRSH_PI_MASK) >> BHND_PCI_SRSH_PI_SHIFT;
855 
856 	/* If it doesn't match host bridge's core index, update the index
857 	 * value */
858 	pci_cidx = sc->bhndb.bridge_core.core_idx;
859 	if (sprom_cidx != pci_cidx) {
860 		val &= ~BHND_PCI_SRSH_PI_MASK;
861 		val |= (pci_cidx << BHND_PCI_SRSH_PI_SHIFT);
862 		bhndb_pci_write_core(sc, srsh_offset, val, sizeof(val));
863 	}
864 }
865 
866 static int
867 bhndb_pci_resume(device_t dev)
868 {
869 	struct bhndb_pci_softc	*sc;
870 	int			 error;
871 
872 	sc = device_get_softc(dev);
873 
874 	/* Enable clocks (if supported by this hardware) */
875 	if ((error = bhndb_enable_pci_clocks(sc->dev)))
876 		return (error);
877 
878 	/* Perform resume */
879 	return (bhndb_generic_resume(dev));
880 }
881 
882 static int
883 bhndb_pci_suspend(device_t dev)
884 {
885 	struct bhndb_pci_softc	*sc;
886 	int			 error;
887 
888 	sc = device_get_softc(dev);
889 
890 	/* Disable clocks (if supported by this hardware) */
891 	if ((error = bhndb_disable_pci_clocks(sc->dev)))
892 		return (error);
893 
894 	/* Perform suspend */
895 	return (bhndb_generic_suspend(dev));
896 }
897 
898 static int
899 bhndb_pci_set_window_addr(device_t dev, const struct bhndb_regwin *rw,
900     bhnd_addr_t addr)
901 {
902 	struct bhndb_pci_softc *sc = device_get_softc(dev);
903 	return (sc->set_regwin(sc->dev, sc->parent, rw, addr));
904 }
905 
906 /**
907  * A siba(4) and bcma(4)-compatible bhndb_set_window_addr implementation.
908  *
909  * On siba(4) devices, it's possible that writing a PCI window register may
910  * not succeed; it's necessary to immediately read the configuration register
911  * and retry if not set to the desired value.
912  *
913  * This is not necessary on bcma(4) devices, but other than the overhead of
914  * validating the register, there's no harm in performing the verification.
915  */
916 static int
917 bhndb_pci_compat_setregwin(device_t dev, device_t pci_dev,
918     const struct bhndb_regwin *rw, bhnd_addr_t addr)
919 {
920 	int		error;
921 	int		reg;
922 
923 	if (rw->win_type != BHNDB_REGWIN_T_DYN)
924 		return (ENODEV);
925 
926 	reg = rw->d.dyn.cfg_offset;
927 	for (u_int i = 0; i < BHNDB_PCI_BARCTRL_WRITE_RETRY; i++) {
928 		if ((error = bhndb_pci_fast_setregwin(dev, pci_dev, rw, addr)))
929 			return (error);
930 
931 		if (pci_read_config(pci_dev, reg, 4) == addr)
932 			return (0);
933 
934 		DELAY(10);
935 	}
936 
937 	/* Unable to set window */
938 	return (ENODEV);
939 }
940 
941 /**
942  * A bcma(4)-only bhndb_set_window_addr implementation.
943  */
944 static int
945 bhndb_pci_fast_setregwin(device_t dev, device_t pci_dev,
946     const struct bhndb_regwin *rw, bhnd_addr_t addr)
947 {
948 	/* The PCI bridge core only supports 32-bit addressing, regardless
949 	 * of the bus' support for 64-bit addressing */
950 	if (addr > UINT32_MAX)
951 		return (ERANGE);
952 
953 	switch (rw->win_type) {
954 	case BHNDB_REGWIN_T_DYN:
955 		/* Addresses must be page aligned */
956 		if (addr % rw->win_size != 0)
957 			return (EINVAL);
958 
959 		pci_write_config(pci_dev, rw->d.dyn.cfg_offset, addr, 4);
960 		break;
961 	default:
962 		return (ENODEV);
963 	}
964 
965 	return (0);
966 }
967 
968 static int
969 bhndb_pci_populate_board_info(device_t dev, device_t child,
970     struct bhnd_board_info *info)
971 {
972 	struct bhndb_pci_softc	*sc;
973 
974 	sc = device_get_softc(dev);
975 
976 	/*
977 	 * On a subset of Apple BCM4360 modules, always prefer the
978 	 * PCI subdevice to the SPROM-supplied boardtype.
979 	 *
980 	 * TODO:
981 	 *
982 	 * Broadcom's own drivers implement this override, and then later use
983 	 * the remapped BCM4360 board type to determine the required
984 	 * board-specific workarounds.
985 	 *
986 	 * Without access to this hardware, it's unclear why this mapping
987 	 * is done, and we must do the same. If we can survey the hardware
988 	 * in question, it may be possible to replace this behavior with
989 	 * explicit references to the SPROM-supplied boardtype(s) in our
990 	 * quirk definitions.
991 	 */
992 	if (pci_get_subvendor(sc->parent) == PCI_VENDOR_APPLE) {
993 		switch (info->board_type) {
994 		case BHND_BOARD_BCM94360X29C:
995 		case BHND_BOARD_BCM94360X29CP2:
996 		case BHND_BOARD_BCM94360X51:
997 		case BHND_BOARD_BCM94360X51P2:
998 			info->board_type = 0;	/* allow override below */
999 			break;
1000 		default:
1001 			break;
1002 		}
1003 	}
1004 
1005 	/* If NVRAM did not supply vendor/type/devid info, provide the PCI
1006 	 * subvendor/subdevice/device values. */
1007 	if (info->board_vendor == 0)
1008 		info->board_vendor = pci_get_subvendor(sc->parent);
1009 
1010 	if (info->board_type == 0)
1011 		info->board_type = pci_get_subdevice(sc->parent);
1012 
1013 	if (info->board_devid == 0)
1014 		info->board_devid = pci_get_device(sc->parent);
1015 
1016 	return (0);
1017 }
1018 
1019 /**
1020  * Examine the bridge device @p dev and return the expected host bridge
1021  * device class.
1022  *
1023  * @param dev The bhndb bridge device
1024  */
1025 static bhnd_devclass_t
1026 bhndb_expected_pci_devclass(device_t dev)
1027 {
1028 	if (bhndb_is_pcie_attached(dev))
1029 		return (BHND_DEVCLASS_PCIE);
1030 	else
1031 		return (BHND_DEVCLASS_PCI);
1032 }
1033 
1034 /**
1035  * Return true if the bridge device @p dev is attached via PCIe,
1036  * false otherwise.
1037  *
1038  * @param dev The bhndb bridge device
1039  */
1040 static bool
1041 bhndb_is_pcie_attached(device_t dev)
1042 {
1043 	int reg;
1044 
1045 	if (pci_find_cap(device_get_parent(dev), PCIY_EXPRESS, &reg) == 0)
1046 		return (true);
1047 
1048 	return (false);
1049 }
1050 
1051 /**
1052  * Enable externally managed clocks, if required.
1053  *
1054  * Some PCI chipsets (BCM4306, possibly others) chips do not support
1055  * the idle low-power clock. Clocking must be bootstrapped at
1056  * attach/resume by directly adjusting GPIO registers exposed in the
1057  * PCI config space, and correspondingly, explicitly shutdown at
1058  * detach/suspend.
1059  *
1060  * @note This function may be safely called prior to device attach, (e.g.
1061  * from DEVICE_PROBE).
1062  *
1063  * @param dev The bhndb bridge device
1064  */
1065 static int
1066 bhndb_enable_pci_clocks(device_t dev)
1067 {
1068 	device_t		pci_dev;
1069 	uint32_t		gpio_in, gpio_out, gpio_en;
1070 	uint32_t		gpio_flags;
1071 	uint16_t		pci_status;
1072 
1073 	pci_dev = device_get_parent(dev);
1074 
1075 	/* Only supported and required on PCI devices */
1076 	if (bhndb_is_pcie_attached(dev))
1077 		return (0);
1078 
1079 	/* Read state of XTAL pin */
1080 	gpio_in = pci_read_config(pci_dev, BHNDB_PCI_GPIO_IN, 4);
1081 	if (gpio_in & BHNDB_PCI_GPIO_XTAL_ON)
1082 		return (0); /* already enabled */
1083 
1084 	/* Fetch current config */
1085 	gpio_out = pci_read_config(pci_dev, BHNDB_PCI_GPIO_OUT, 4);
1086 	gpio_en = pci_read_config(pci_dev, BHNDB_PCI_GPIO_OUTEN, 4);
1087 
1088 	/* Set PLL_OFF/XTAL_ON pins to HIGH and enable both pins */
1089 	gpio_flags = (BHNDB_PCI_GPIO_PLL_OFF|BHNDB_PCI_GPIO_XTAL_ON);
1090 	gpio_out |= gpio_flags;
1091 	gpio_en |= gpio_flags;
1092 
1093 	pci_write_config(pci_dev, BHNDB_PCI_GPIO_OUT, gpio_out, 4);
1094 	pci_write_config(pci_dev, BHNDB_PCI_GPIO_OUTEN, gpio_en, 4);
1095 	DELAY(1000);
1096 
1097 	/* Reset PLL_OFF */
1098 	gpio_out &= ~BHNDB_PCI_GPIO_PLL_OFF;
1099 	pci_write_config(pci_dev, BHNDB_PCI_GPIO_OUT, gpio_out, 4);
1100 	DELAY(5000);
1101 
1102 	/* Clear any PCI 'sent target-abort' flag. */
1103 	pci_status = pci_read_config(pci_dev, PCIR_STATUS, 2);
1104 	pci_status &= ~PCIM_STATUS_STABORT;
1105 	pci_write_config(pci_dev, PCIR_STATUS, pci_status, 2);
1106 
1107 	return (0);
1108 }
1109 
1110 /**
1111  * Disable externally managed clocks, if required.
1112  *
1113  * This function may be safely called prior to device attach, (e.g.
1114  * from DEVICE_PROBE).
1115  *
1116  * @param dev The bhndb bridge device
1117  */
1118 static int
1119 bhndb_disable_pci_clocks(device_t dev)
1120 {
1121 	device_t	pci_dev;
1122 	uint32_t	gpio_out, gpio_en;
1123 
1124 	pci_dev = device_get_parent(dev);
1125 
1126 	/* Only supported and required on PCI devices */
1127 	if (bhndb_is_pcie_attached(dev))
1128 		return (0);
1129 
1130 	/* Fetch current config */
1131 	gpio_out = pci_read_config(pci_dev, BHNDB_PCI_GPIO_OUT, 4);
1132 	gpio_en = pci_read_config(pci_dev, BHNDB_PCI_GPIO_OUTEN, 4);
1133 
1134 	/* Set PLL_OFF to HIGH, XTAL_ON to LOW. */
1135 	gpio_out &= ~BHNDB_PCI_GPIO_XTAL_ON;
1136 	gpio_out |= BHNDB_PCI_GPIO_PLL_OFF;
1137 	pci_write_config(pci_dev, BHNDB_PCI_GPIO_OUT, gpio_out, 4);
1138 
1139 	/* Enable both output pins */
1140 	gpio_en |= (BHNDB_PCI_GPIO_PLL_OFF|BHNDB_PCI_GPIO_XTAL_ON);
1141 	pci_write_config(pci_dev, BHNDB_PCI_GPIO_OUTEN, gpio_en, 4);
1142 
1143 	return (0);
1144 }
1145 
1146 static bhnd_clksrc
1147 bhndb_pci_pwrctl_get_clksrc(device_t dev, device_t child,
1148 	bhnd_clock clock)
1149 {
1150 	struct bhndb_pci_softc	*sc;
1151 	uint32_t		 gpio_out;
1152 
1153 	sc = device_get_softc(dev);
1154 
1155 	/* Only supported on PCI devices */
1156 	if (bhndb_is_pcie_attached(sc->dev))
1157 		return (BHND_CLKSRC_UNKNOWN);
1158 
1159 	/* Only ILP is supported */
1160 	if (clock != BHND_CLOCK_ILP)
1161 		return (BHND_CLKSRC_UNKNOWN);
1162 
1163 	gpio_out = pci_read_config(sc->parent, BHNDB_PCI_GPIO_OUT, 4);
1164 	if (gpio_out & BHNDB_PCI_GPIO_SCS)
1165 		return (BHND_CLKSRC_PCI);
1166 	else
1167 		return (BHND_CLKSRC_XTAL);
1168 }
1169 
1170 static int
1171 bhndb_pci_pwrctl_gate_clock(device_t dev, device_t child,
1172 	bhnd_clock clock)
1173 {
1174 	struct bhndb_pci_softc *sc = device_get_softc(dev);
1175 
1176 	/* Only supported on PCI devices */
1177 	if (bhndb_is_pcie_attached(sc->dev))
1178 		return (ENODEV);
1179 
1180 	/* Only HT is supported */
1181 	if (clock != BHND_CLOCK_HT)
1182 		return (ENXIO);
1183 
1184 	return (bhndb_disable_pci_clocks(sc->dev));
1185 }
1186 
1187 static int
1188 bhndb_pci_pwrctl_ungate_clock(device_t dev, device_t child,
1189 	bhnd_clock clock)
1190 {
1191 	struct bhndb_pci_softc *sc = device_get_softc(dev);
1192 
1193 	/* Only supported on PCI devices */
1194 	if (bhndb_is_pcie_attached(sc->dev))
1195 		return (ENODEV);
1196 
1197 	/* Only HT is supported */
1198 	if (clock != BHND_CLOCK_HT)
1199 		return (ENXIO);
1200 
1201 	return (bhndb_enable_pci_clocks(sc->dev));
1202 }
1203 
1204 /**
1205  * BHNDB_MAP_INTR_ISRC()
1206  */
1207 static int
1208 bhndb_pci_map_intr_isrc(device_t dev, struct resource *irq,
1209     struct bhndb_intr_isrc **isrc)
1210 {
1211 	struct bhndb_pci_softc *sc = device_get_softc(dev);
1212 
1213 	/* There's only one bridged interrupt to choose from */
1214 	*isrc = sc->isrc;
1215 	return (0);
1216 }
1217 
1218 /* siba-specific implementation of BHNDB_ROUTE_INTERRUPTS() */
1219 static int
1220 bhndb_pci_route_siba_interrupts(struct bhndb_pci_softc *sc, device_t child)
1221 {
1222 	uint32_t	sbintvec;
1223 	u_int		ivec;
1224 	int		error;
1225 
1226 	KASSERT(sc->pci_quirks & BHNDB_PCI_QUIRK_SIBA_INTVEC,
1227 	    ("route_siba_interrupts not supported by this hardware"));
1228 
1229 	/* Fetch the sbflag# for the child */
1230 	if ((error = bhnd_get_intr_ivec(child, 0, &ivec)))
1231 		return (error);
1232 
1233 	if (ivec > (sizeof(sbintvec)*8) - 1 /* aka '31' */) {
1234 		/* This should never be an issue in practice */
1235 		device_printf(sc->dev, "cannot route interrupts to high "
1236 		    "sbflag# %u\n", ivec);
1237 		return (ENXIO);
1238 	}
1239 
1240 	BHNDB_PCI_LOCK(sc);
1241 
1242 	sbintvec = bhndb_pci_read_core(sc, SB0_REG_ABS(SIBA_CFG0_INTVEC), 4);
1243 	sbintvec |= (1 << ivec);
1244 	bhndb_pci_write_core(sc, SB0_REG_ABS(SIBA_CFG0_INTVEC), sbintvec, 4);
1245 
1246 	BHNDB_PCI_UNLOCK(sc);
1247 
1248 	return (0);
1249 }
1250 
1251 /* BHNDB_ROUTE_INTERRUPTS() */
1252 static int
1253 bhndb_pci_route_interrupts(device_t dev, device_t child)
1254 {
1255 	struct bhndb_pci_softc	*sc;
1256 	struct bhnd_core_info	 core;
1257 	uint32_t		 core_bit;
1258 	uint32_t		 intmask;
1259 
1260 	sc = device_get_softc(dev);
1261 
1262 	if (sc->pci_quirks & BHNDB_PCI_QUIRK_SIBA_INTVEC)
1263 		return (bhndb_pci_route_siba_interrupts(sc, child));
1264 
1265 	core = bhnd_get_core_info(child);
1266 	if (core.core_idx > BHNDB_PCI_SBIM_COREIDX_MAX) {
1267 		/* This should never be an issue in practice */
1268 		device_printf(dev, "cannot route interrupts to high core "
1269 		    "index %u\n", core.core_idx);
1270 		return (ENXIO);
1271 	}
1272 
1273 	BHNDB_PCI_LOCK(sc);
1274 
1275 	core_bit = (1<<core.core_idx) << BHNDB_PCI_SBIM_SHIFT;
1276 	intmask = pci_read_config(sc->parent, BHNDB_PCI_INT_MASK, 4);
1277 	intmask |= core_bit;
1278 	pci_write_config(sc->parent, BHNDB_PCI_INT_MASK, intmask, 4);
1279 
1280 	BHNDB_PCI_UNLOCK(sc);
1281 
1282 	return (0);
1283 }
1284 
1285 /**
1286  * Initialize a new bhndb PCI bridge EROM I/O instance. This EROM I/O
1287  * implementation supports mapping of the device enumeration table via the
1288  * @p hr host resources.
1289  *
1290  * @param pio		The instance to be initialized.
1291  * @param dev		The bridge device.
1292  * @param pci_dev	The bridge's parent PCI device.
1293  * @param hr		The host resources to be used to map the device
1294  *			enumeration table.
1295  */
1296 static int
1297 bhndb_pci_eio_init(struct bhndb_pci_eio *pio, device_t dev, device_t pci_dev,
1298     struct bhndb_host_resources *hr)
1299 {
1300 	memset(&pio->eio, 0, sizeof(pio->eio));
1301 	pio->eio.map = bhndb_pci_eio_map;
1302 	pio->eio.read = bhndb_pci_eio_read;
1303 	pio->eio.fini = NULL;
1304 
1305 	pio->dev = dev;
1306 	pio->pci_dev = pci_dev;
1307 	pio->hr = hr;
1308 	pio->win = NULL;
1309 	pio->res = NULL;
1310 
1311 	return (0);
1312 }
1313 
1314 /**
1315  * Attempt to adjust the dynamic register window backing @p pio to permit
1316  * reading @p size bytes at @p addr.
1317  *
1318  * If @p addr or @p size fall outside the existing mapped range, or if
1319  * @p pio is not backed by a dynamic register window, ENXIO will be returned.
1320  *
1321  * @param pio	The bhndb PCI erom I/O state to be modified.
1322  * @param addr	The address to be include
1323  */
1324 static int
1325 bhndb_pci_eio_adjust_mapping(struct bhndb_pci_eio *pio, bhnd_addr_t addr,
1326     bhnd_size_t size)
1327 {
1328 	bhnd_addr_t	 target;
1329 	bhnd_size_t	 offset;
1330 	int		 error;
1331 
1332 
1333 	KASSERT(pio->win != NULL, ("missing register window"));
1334 	KASSERT(pio->res != NULL, ("missing regwin resource"));
1335 	KASSERT(pio->win->win_type == BHNDB_REGWIN_T_DYN,
1336 	    ("unexpected window type %d", pio->win->win_type));
1337 
1338 	/* The requested subrange must fall within the total mapped range */
1339 	if (addr < pio->addr || (addr - pio->addr) > pio->size ||
1340 	    size > pio->size || (addr - pio->addr) - pio->size < size)
1341 	{
1342 		return (ENXIO);
1343 	}
1344 
1345 	/* Do we already have a useable mapping? */
1346 	if (addr >= pio->res_target &&
1347 	    addr <= pio->res_target + pio->win->win_size &&
1348 	    (pio->res_target + pio->win->win_size) - addr >= size)
1349 	{
1350 		return (0);
1351 	}
1352 
1353 	/* Page-align the target address */
1354 	offset = addr % pio->win->win_size;
1355 	target = addr - offset;
1356 
1357 	/* Configure the register window */
1358 	error = bhndb_pci_compat_setregwin(pio->dev, pio->pci_dev, pio->win,
1359 	    target);
1360 	if (error) {
1361 		device_printf(pio->dev, "failed to configure dynamic register "
1362 		    "window: %d\n", error);
1363 		return (error);
1364 	}
1365 
1366 	pio->res_target = target;
1367 	return (0);
1368 }
1369 
1370 /* bhnd_erom_io_map() implementation */
1371 static int
1372 bhndb_pci_eio_map(struct bhnd_erom_io *eio, bhnd_addr_t addr,
1373     bhnd_size_t size)
1374 {
1375 	struct bhndb_pci_eio		*pio;
1376 	const struct bhndb_regwin	*regwin;
1377 	struct resource			*r;
1378 	bhnd_addr_t			 target;
1379 	bhnd_size_t			 offset;
1380 	int				 error;
1381 
1382 	pio = (struct bhndb_pci_eio *)eio;
1383 
1384 	/* Locate a useable dynamic register window */
1385 	regwin = bhndb_regwin_find_type(pio->hr->cfg->register_windows,
1386 	    BHNDB_REGWIN_T_DYN, MIN(size, BHND_DEFAULT_CORE_SIZE));
1387 	if (regwin == NULL) {
1388 		device_printf(pio->dev, "unable to map %#jx+%#jx; no "
1389 		    "usable dynamic register window found\n", addr, size);
1390 		return (ENXIO);
1391 	}
1392 
1393 	/* Locate the host resource mapping our register window */
1394 	if ((r = bhndb_host_resource_for_regwin(pio->hr, regwin)) == NULL) {
1395 		device_printf(pio->dev, "unable to map %#jx+%#jx; no "
1396 		    "usable register resource found\n", addr, size);
1397 		return (ENXIO);
1398 	}
1399 
1400 	/* Page-align the target address */
1401 	offset = addr % regwin->win_size;
1402 	target = addr - offset;
1403 
1404 	/* Configure the register window */
1405 	error = bhndb_pci_compat_setregwin(pio->dev, pio->pci_dev, regwin,
1406 	    target);
1407 	if (error) {
1408 		device_printf(pio->dev, "failed to configure dynamic register "
1409 		    "window: %d\n", error);
1410 		return (error);
1411 	}
1412 
1413 	/* Update our mapping state */
1414 	pio->win = regwin;
1415 	pio->res = r;
1416 	pio->addr = addr;
1417 	pio->size = size;
1418 	pio->res_target = target;
1419 
1420 	return (0);
1421 }
1422 
1423 /* bhnd_erom_io_read() implementation */
1424 static uint32_t
1425 bhndb_pci_eio_read(struct bhnd_erom_io *eio, bhnd_size_t offset, u_int width)
1426 {
1427 	struct bhndb_pci_eio		*pio;
1428 	bhnd_addr_t			 addr;
1429 	bus_size_t			 res_offset;
1430 	int				 error;
1431 
1432 	pio = (struct bhndb_pci_eio *)eio;
1433 
1434 	/* Calculate absolute address */
1435 	if (BHND_SIZE_MAX - offset < pio->addr) {
1436 		device_printf(pio->dev, "invalid offset %#jx+%#jx\n", pio->addr,
1437 		    offset);
1438 		return (UINT32_MAX);
1439 	}
1440 
1441 	addr = pio->addr + offset;
1442 
1443 	/* Adjust the mapping for our read */
1444 	if ((error = bhndb_pci_eio_adjust_mapping(pio, addr, width))) {
1445 		device_printf(pio->dev, "failed to adjust register mapping: "
1446 		    "%d\n", error);
1447 		return (UINT32_MAX);
1448 	}
1449 
1450 	KASSERT(pio->res_target <= addr, ("invalid mapping (%#jx vs. %#jx)",
1451 	    pio->res_target, addr));
1452 
1453 	/* Determine the actual read offset within our register window
1454 	 * resource */
1455 	res_offset = (addr - pio->res_target) + pio->win->win_offset;
1456 
1457 	/* Perform our read */
1458 	switch (width) {
1459 	case 1:
1460 		return (bus_read_1(pio->res, res_offset));
1461 	case 2:
1462 		return (bus_read_2(pio->res, res_offset));
1463 	case 4:
1464 		return (bus_read_4(pio->res, res_offset));
1465 	default:
1466 		panic("unsupported width: %u", width);
1467 	}
1468 }
1469 
1470 static device_method_t bhndb_pci_methods[] = {
1471 	/* Device interface */
1472 	DEVMETHOD(device_probe,				bhndb_pci_probe),
1473 	DEVMETHOD(device_attach,			bhndb_pci_attach),
1474 	DEVMETHOD(device_resume,			bhndb_pci_resume),
1475 	DEVMETHOD(device_suspend,			bhndb_pci_suspend),
1476 	DEVMETHOD(device_detach,			bhndb_pci_detach),
1477 
1478 	/* BHNDB interface */
1479 	DEVMETHOD(bhndb_set_window_addr,		bhndb_pci_set_window_addr),
1480 	DEVMETHOD(bhndb_populate_board_info,		bhndb_pci_populate_board_info),
1481 	DEVMETHOD(bhndb_map_intr_isrc,			bhndb_pci_map_intr_isrc),
1482 	DEVMETHOD(bhndb_route_interrupts,		bhndb_pci_route_interrupts),
1483 
1484 	/* BHND PWRCTL hostb interface */
1485 	DEVMETHOD(bhnd_pwrctl_hostb_get_clksrc,		bhndb_pci_pwrctl_get_clksrc),
1486 	DEVMETHOD(bhnd_pwrctl_hostb_gate_clock,		bhndb_pci_pwrctl_gate_clock),
1487 	DEVMETHOD(bhnd_pwrctl_hostb_ungate_clock,	bhndb_pci_pwrctl_ungate_clock),
1488 
1489 	DEVMETHOD_END
1490 };
1491 
1492 DEFINE_CLASS_1(bhndb, bhndb_pci_driver, bhndb_pci_methods,
1493     sizeof(struct bhndb_pci_softc), bhndb_driver);
1494 
1495 MODULE_VERSION(bhndb_pci, 1);
1496 MODULE_DEPEND(bhndb_pci, bhnd_pci_hostb, 1, 1, 1);
1497 MODULE_DEPEND(bhndb_pci, pci, 1, 1, 1);
1498 MODULE_DEPEND(bhndb_pci, bhndb, 1, 1, 1);
1499 MODULE_DEPEND(bhndb_pci, bhnd, 1, 1, 1);
1500