xref: /freebsd/sys/dev/bhnd/bhndb/bhndb_pci.c (revision 718cf2ccb9956613756ab15d7a0e28f2c8e91cab)
1 /*-
2  * Copyright (c) 2015-2016 Landon Fuller <landon@landonf.org>
3  * Copyright (c) 2017 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Landon Fuller
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer,
14  *    without modification.
15  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
16  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
17  *    redistribution must be conditioned upon including a substantially
18  *    similar Disclaimer requirement for further binary redistribution.
19  *
20  * NO WARRANTY
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
24  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
25  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
26  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
29  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGES.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 /*
38  * PCI-specific implementation for the BHNDB bridge driver.
39  *
40  * Provides support for bridging from a PCI parent bus to a BHND-compatible
41  * bus (e.g. bcma or siba) via a Broadcom PCI core configured in end-point
42  * mode.
43  *
44  * This driver handles all initial generic host-level PCI interactions with a
45  * PCI/PCIe bridge core operating in endpoint mode. Once the bridged bhnd(4)
46  * bus has been enumerated, this driver works in tandem with a core-specific
47  * bhnd_pci_hostb driver to manage the PCI core.
48  */
49 
50 #include <sys/param.h>
51 #include <sys/kernel.h>
52 #include <sys/bus.h>
53 #include <sys/limits.h>
54 #include <sys/malloc.h>
55 #include <sys/module.h>
56 #include <sys/systm.h>
57 
58 #include <dev/pci/pcireg.h>
59 #include <dev/pci/pcivar.h>
60 
61 #include <dev/bhnd/bhnd.h>
62 #include <dev/bhnd/bhndreg.h>
63 
64 #include <dev/bhnd/bhnd_erom.h>
65 #include <dev/bhnd/bhnd_eromvar.h>
66 
67 #include <dev/bhnd/siba/sibareg.h>
68 
69 #include <dev/bhnd/cores/pci/bhnd_pcireg.h>
70 
71 #include "bhnd_pwrctl_hostb_if.h"
72 
73 #include "bhndb_pcireg.h"
74 #include "bhndb_pcivar.h"
75 #include "bhndb_private.h"
76 
77 struct bhndb_pci_eio;
78 
79 static int		bhndb_pci_alloc_msi(struct bhndb_pci_softc *sc,
80 			    int *msi_count);
81 static int		bhndb_pci_read_core_table(device_t dev,
82 			    struct bhnd_chipid *chipid,
83 			    struct bhnd_core_info **cores, u_int *ncores,
84 			    bhnd_erom_class_t **eromcls);
85 static int		bhndb_pci_add_children(struct bhndb_pci_softc *sc);
86 
87 static bhnd_devclass_t	bhndb_expected_pci_devclass(device_t dev);
88 static bool		bhndb_is_pcie_attached(device_t dev);
89 
90 static int		bhndb_enable_pci_clocks(device_t dev);
91 static int		bhndb_disable_pci_clocks(device_t dev);
92 
93 static int		bhndb_pci_compat_setregwin(device_t dev,
94 			    device_t pci_dev, const struct bhndb_regwin *,
95 			    bhnd_addr_t);
96 static int		bhndb_pci_fast_setregwin(device_t dev, device_t pci_dev,
97 			    const struct bhndb_regwin *, bhnd_addr_t);
98 
99 static void		bhndb_pci_write_core(struct bhndb_pci_softc *sc,
100 			    bus_size_t offset, uint32_t value, u_int width);
101 static uint32_t		bhndb_pci_read_core(struct bhndb_pci_softc *sc,
102 			    bus_size_t offset, u_int width);
103 
104 static void		bhndb_init_sromless_pci_config(
105 			    struct bhndb_pci_softc *sc);
106 
107 static bus_addr_t	bhndb_pci_sprom_addr(struct bhndb_pci_softc *sc);
108 static bus_size_t	bhndb_pci_sprom_size(struct bhndb_pci_softc *sc);
109 
110 static int		bhndb_pci_eio_init(struct bhndb_pci_eio *pio,
111 			    device_t dev, device_t pci_dev,
112 			    struct bhndb_host_resources *hr);
113 static int		bhndb_pci_eio_map(struct bhnd_erom_io *eio,
114 			    bhnd_addr_t addr, bhnd_size_t size);
115 static uint32_t		bhndb_pci_eio_read(struct bhnd_erom_io *eio,
116 			    bhnd_size_t offset, u_int width);
117 
118 #define	BHNDB_PCI_MSI_COUNT	1
119 
120 static struct bhndb_pci_quirk	bhndb_pci_quirks[];
121 static struct bhndb_pci_quirk	bhndb_pcie_quirks[];
122 static struct bhndb_pci_quirk	bhndb_pcie2_quirks[];
123 
124 static struct bhndb_pci_core bhndb_pci_cores[] = {
125 	BHNDB_PCI_CORE(PCI,	BHND_PCI_SRSH_PI_OFFSET,	bhndb_pci_quirks),
126 	BHNDB_PCI_CORE(PCIE,	BHND_PCIE_SRSH_PI_OFFSET,	bhndb_pcie_quirks),
127 	BHNDB_PCI_CORE(PCIE2,	BHND_PCIE_SRSH_PI_OFFSET,	bhndb_pcie2_quirks),
128 	BHNDB_PCI_CORE_END
129 };
130 
131 /* bhndb_pci erom I/O instance state */
132 struct bhndb_pci_eio {
133 	struct bhnd_erom_io		 eio;
134 	device_t			 dev;		/**< bridge device */
135 	device_t			 pci_dev;	/**< parent PCI device */
136 	struct bhndb_host_resources	*hr;		/**< borrowed reference to host resources */
137 	const struct bhndb_regwin	*win;		/**< mapped register window, or NULL */
138 	struct resource			*res;		/**< resource containing the register window, or NULL if no window mapped */
139 	bhnd_addr_t			 res_target;	/**< current target address (if mapped) */
140 	bool				 mapped;	/**< true if a valid mapping exists, false otherwise */
141 	bhnd_addr_t			 addr;		/**< mapped address */
142 	bhnd_size_t			 size;		/**< mapped size */
143 };
144 
145 static struct bhndb_pci_quirk bhndb_pci_quirks[] = {
146 	/* Backplane interrupt flags must be routed via siba-specific
147 	 * SIBA_CFG0_INTVEC configuration register; the BHNDB_PCI_INT_MASK
148 	 * PCI configuration register is unsupported. */
149 	{{ BHND_MATCH_CHIP_TYPE		(SIBA) },
150 	 { BHND_MATCH_CORE_REV		(HWREV_LTE(5)) },
151 		BHNDB_PCI_QUIRK_SIBA_INTVEC },
152 
153 	/* All PCI core revisions require the SRSH work-around */
154 	BHNDB_PCI_QUIRK(HWREV_ANY,	BHNDB_PCI_QUIRK_SRSH_WAR),
155 	BHNDB_PCI_QUIRK_END
156 };
157 
158 static struct bhndb_pci_quirk bhndb_pcie_quirks[] = {
159 	/* All PCIe-G1 core revisions require the SRSH work-around */
160 	BHNDB_PCI_QUIRK(HWREV_ANY,	BHNDB_PCI_QUIRK_SRSH_WAR),
161 	BHNDB_PCI_QUIRK_END
162 };
163 
164 static struct bhndb_pci_quirk bhndb_pcie2_quirks[] = {
165 	/* All PCIe-G2 core revisions require the SRSH work-around */
166 	BHNDB_PCI_QUIRK(HWREV_ANY,	BHNDB_PCI_QUIRK_SRSH_WAR),
167 	BHNDB_PCI_QUIRK_END
168 };
169 
170 
171 /**
172  * Return the device table entry for @p ci, or NULL if none.
173  */
174 static struct bhndb_pci_core *
175 bhndb_pci_find_core(struct bhnd_core_info *ci)
176 {
177 	for (size_t i = 0; !BHNDB_PCI_IS_CORE_END(&bhndb_pci_cores[i]); i++) {
178 		struct bhndb_pci_core *entry = &bhndb_pci_cores[i];
179 
180 		if (bhnd_core_matches(ci, &entry->match))
181 			return (entry);
182 	}
183 
184 	return (NULL);
185 }
186 
187 /**
188  * Return all quirk flags for the given @p cid and @p ci.
189  */
190 static uint32_t
191 bhndb_pci_get_core_quirks(struct bhnd_chipid *cid, struct bhnd_core_info *ci)
192 {
193 	struct bhndb_pci_core	*entry;
194 	struct bhndb_pci_quirk	*qtable;
195 	uint32_t		 quirks;
196 
197 	quirks = 0;
198 
199 	/* No core entry? */
200 	if ((entry = bhndb_pci_find_core(ci)) == NULL)
201 		return (quirks);
202 
203 	/* No quirks? */
204 	if ((qtable = entry->quirks) == NULL)
205 		return (quirks);
206 
207 	for (size_t i = 0; !BHNDB_PCI_IS_QUIRK_END(&qtable[i]); i++) {
208 		struct bhndb_pci_quirk *q = &qtable[i];
209 
210 		if (!bhnd_chip_matches(cid, &q->chip_desc))
211 			continue;
212 
213 		if (!bhnd_core_matches(ci, &q->core_desc))
214 			continue;
215 
216 		quirks |= q->quirks;
217 	}
218 
219 	return (quirks);
220 }
221 
222 /**
223  * Default bhndb_pci implementation of device_probe().
224  *
225  * Verifies that the parent is a PCI/PCIe device.
226  */
227 static int
228 bhndb_pci_probe(device_t dev)
229 {
230 	struct bhnd_chipid	 cid;
231 	struct bhnd_core_info	*cores, hostb_core;
232 	struct bhndb_pci_core	*entry;
233 	bhnd_devclass_t		 hostb_devclass;
234 	u_int			 ncores;
235 	device_t		 parent;
236 	devclass_t		 parent_bus, pci;
237 	int			 error;
238 
239 	cores = NULL;
240 
241 	/* Our parent must be a PCI/PCIe device. */
242 	pci = devclass_find("pci");
243 	parent = device_get_parent(dev);
244 	parent_bus = device_get_devclass(device_get_parent(parent));
245 
246 	if (parent_bus != pci)
247 		return (ENXIO);
248 
249 	/* Enable clocks */
250 	if ((error = bhndb_enable_pci_clocks(dev)))
251 		return (error);
252 
253 	/* Identify the chip and enumerate the bridged cores */
254 	error = bhndb_pci_read_core_table(dev, &cid, &cores, &ncores, NULL);
255 	if (error)
256 		goto cleanup;
257 
258 	/* Search our core table for the host bridge core */
259 	hostb_devclass = bhndb_expected_pci_devclass(dev);
260 	error = bhndb_find_hostb_core(cores, ncores, hostb_devclass,
261 	    &hostb_core);
262 	if (error)
263 		goto cleanup;
264 
265 	/* Look for a matching core table entry */
266 	if ((entry = bhndb_pci_find_core(&hostb_core)) == NULL) {
267 		error = ENXIO;
268 		goto cleanup;
269 	}
270 
271 	device_set_desc(dev, "PCI-BHND bridge");
272 
273 	/* fall-through */
274 	error = BUS_PROBE_DEFAULT;
275 
276 cleanup:
277 	bhndb_disable_pci_clocks(dev);
278 	if (cores != NULL)
279 		free(cores, M_BHND);
280 
281 	return (error);
282 }
283 
284 /**
285  * Attempt to allocate MSI interrupts, returning the count in @p msi_count
286  * on success.
287  */
288 static int
289 bhndb_pci_alloc_msi(struct bhndb_pci_softc *sc, int *msi_count)
290 {
291 	int error, count;
292 
293 	/* Is MSI available? */
294 	if (pci_msi_count(sc->parent) < BHNDB_PCI_MSI_COUNT)
295 		return (ENXIO);
296 
297 	/* Allocate expected message count */
298 	count = BHNDB_PCI_MSI_COUNT;
299 	if ((error = pci_alloc_msi(sc->parent, &count))) {
300 		device_printf(sc->dev, "failed to allocate MSI interrupts: "
301 		    "%d\n", error);
302 
303 		return (error);
304 	}
305 
306 	if (count < BHNDB_PCI_MSI_COUNT)
307 		return (ENXIO);
308 
309 	*msi_count = count;
310 	return (0);
311 }
312 
313 static int
314 bhndb_pci_attach(device_t dev)
315 {
316 	struct bhndb_pci_softc	*sc;
317 	struct bhnd_chipid	 cid;
318 	struct bhnd_core_info	*cores, hostb_core;
319 	bhnd_erom_class_t	*erom_class;
320 	u_int			 ncores;
321 	int			 irq_rid;
322 	int			 error;
323 
324 	sc = device_get_softc(dev);
325 	sc->dev = dev;
326 	sc->parent = device_get_parent(dev);
327 	sc->pci_devclass = bhndb_expected_pci_devclass(dev);
328 	sc->pci_quirks = 0;
329 	sc->set_regwin = NULL;
330 
331 	BHNDB_PCI_LOCK_INIT(sc);
332 
333 	cores = NULL;
334 
335 	/* Enable PCI bus mastering */
336 	pci_enable_busmaster(sc->parent);
337 
338 	/* Set up PCI interrupt handling */
339 	if (bhndb_pci_alloc_msi(sc, &sc->msi_count) == 0) {
340 		/* MSI uses resource IDs starting at 1 */
341 		irq_rid = 1;
342 
343 		device_printf(dev, "Using MSI interrupts on %s\n",
344 		    device_get_nameunit(sc->parent));
345 	} else {
346 		sc->msi_count = 0;
347 		irq_rid = 0;
348 
349 		device_printf(dev, "Using INTx interrupts on %s\n",
350 		    device_get_nameunit(sc->parent));
351 	}
352 
353 	sc->isrc = bhndb_alloc_intr_isrc(sc->parent, irq_rid, 0, RM_MAX_END, 1,
354 	    RF_SHAREABLE | RF_ACTIVE);
355 	if (sc->isrc == NULL) {
356 		device_printf(sc->dev, "failed to allocate interrupt "
357 		    "resource\n");
358 		error = ENXIO;
359 		goto cleanup;
360 	}
361 
362 	/* Enable clocks (if required by this hardware) */
363 	if ((error = bhndb_enable_pci_clocks(sc->dev)))
364 		goto cleanup;
365 
366 	/* Identify the chip and enumerate the bridged cores */
367 	error = bhndb_pci_read_core_table(dev, &cid, &cores, &ncores,
368 	    &erom_class);
369 	if (error)
370 		goto cleanup;
371 
372 	/* Select the appropriate register window handler */
373 	if (cid.chip_type == BHND_CHIPTYPE_SIBA) {
374 		sc->set_regwin = bhndb_pci_compat_setregwin;
375 	} else {
376 		sc->set_regwin = bhndb_pci_fast_setregwin;
377 	}
378 
379 	/* Determine our host bridge core and populate our quirk flags */
380 	error = bhndb_find_hostb_core(cores, ncores, sc->pci_devclass,
381 	    &hostb_core);
382 	if (error)
383 		goto cleanup;
384 
385 	sc->pci_quirks = bhndb_pci_get_core_quirks(&cid, &hostb_core);
386 
387 	/* Perform bridge attach */
388 	error = bhndb_attach(dev, &cid, cores, ncores, &hostb_core, erom_class);
389 	if (error)
390 		goto cleanup;
391 
392 	/* Fix-up power on defaults for SROM-less devices. */
393 	bhndb_init_sromless_pci_config(sc);
394 
395 	/* Add any additional child devices */
396 	if ((error = bhndb_pci_add_children(sc)))
397 		goto cleanup;
398 
399 	/* Probe and attach our children */
400 	if ((error = bus_generic_attach(dev)))
401 		goto cleanup;
402 
403 	free(cores, M_BHND);
404 
405 	return (0);
406 
407 cleanup:
408 	device_delete_children(dev);
409 	bhndb_disable_pci_clocks(sc->dev);
410 
411 	if (sc->isrc != NULL)
412 		bhndb_free_intr_isrc(sc->isrc);
413 
414 	if (sc->msi_count > 0)
415 		pci_release_msi(dev);
416 
417 	if (cores != NULL)
418 		free(cores, M_BHND);
419 
420 	pci_disable_busmaster(sc->parent);
421 
422 	BHNDB_PCI_LOCK_DESTROY(sc);
423 
424 	return (error);
425 }
426 
427 static int
428 bhndb_pci_detach(device_t dev)
429 {
430 	struct bhndb_pci_softc	*sc;
431 	int			 error;
432 
433 	sc = device_get_softc(dev);
434 
435 	/* Attempt to detach our children */
436 	if ((error = bus_generic_detach(dev)))
437 		return (error);
438 
439 	/* Perform generic bridge detach */
440 	if ((error = bhndb_generic_detach(dev)))
441 		return (error);
442 
443 	/* Disable clocks (if required by this hardware) */
444 	if ((error = bhndb_disable_pci_clocks(sc->dev)))
445 		return (error);
446 
447 	/* Free our interrupt resources */
448 	bhndb_free_intr_isrc(sc->isrc);
449 
450 	/* Release MSI interrupts */
451 	if (sc->msi_count > 0)
452 		pci_release_msi(dev);
453 
454 	/* Disable PCI bus mastering */
455 	pci_disable_busmaster(sc->parent);
456 
457 	BHNDB_PCI_LOCK_DESTROY(sc);
458 
459 	return (0);
460 }
461 
462 /**
463  * Use the generic PCI bridge hardware configuration to enumerate the bridged
464  * bhnd(4) bus' core table.
465  *
466  * @note This function may be safely called prior to device attach, (e.g.
467  * from DEVICE_PROBE).
468  * @note This function requires exclusive ownership over allocating and
469  * configuring host bridge resources, and should only be called prior to
470  * completion of device attach and full configuration of the bridge.
471  *
472  * @param	dev		The bhndb_pci bridge device.
473  * @param[out]	chipid		On success, the parsed chip identification.
474  * @param[out]	cores		On success, the enumerated core table. The
475  *				caller is responsible for freeing this table via
476  *				bhndb_pci_free_core_table().
477  * @param[out]	ncores		On success, the number of cores found in
478  *				@p cores.
479  * @param[out]	eromcls		On success, a pointer to the erom class used to
480  *				parse the device enumeration table. This
481  *				argument may be NULL if the class is not
482  *				desired.
483  *
484  * @retval 0		success
485  * @retval non-zero	if enumerating the bridged bhnd(4) bus fails, a regular
486  * 			unix error code will be returned.
487  */
488 static int
489 bhndb_pci_read_core_table(device_t dev, struct bhnd_chipid *chipid,
490     struct bhnd_core_info **cores, u_int *ncores,
491     bhnd_erom_class_t **eromcls)
492 {
493 	const struct bhndb_hwcfg	*cfg;
494 	struct bhndb_host_resources	*hr;
495 	struct bhndb_pci_eio		 pio;
496 	struct bhnd_core_info		*erom_cores;
497 	const struct bhnd_chipid	*hint;
498 	struct bhnd_chipid		 cid;
499 	bhnd_erom_class_t		*erom_class;
500 	bhnd_erom_t			*erom;
501 	device_t			 parent_dev;
502 	u_int				 erom_ncores;
503 	int				 error;
504 
505 	parent_dev = device_get_parent(dev);
506 	erom = NULL;
507 	erom_cores = NULL;
508 
509 	/* Fetch our chipid hint (if any) and generic hardware configuration */
510 	cfg = BHNDB_BUS_GET_GENERIC_HWCFG(parent_dev, dev);
511 	hint = BHNDB_BUS_GET_CHIPID(parent_dev, dev);
512 
513 	/* Allocate our host resources */
514 	if ((error = bhndb_alloc_host_resources(&hr, dev, parent_dev, cfg)))
515 		return (error);
516 
517 	/* Initialize our erom I/O state */
518 	if ((error = bhndb_pci_eio_init(&pio, dev, parent_dev, hr)))
519 		goto failed;
520 
521 	/* Map the first bus core from our bridged bhnd(4) bus */
522 	error = bhndb_pci_eio_map(&pio.eio, BHND_DEFAULT_CHIPC_ADDR,
523 	    BHND_DEFAULT_CORE_SIZE);
524 	if (error)
525 		goto failed;
526 
527 	/* Probe for a usable EROM class, and read the chip identifier */
528 	erom_class = bhnd_erom_probe_driver_classes(device_get_devclass(dev),
529 	    &pio.eio, hint, &cid);
530 	if (erom_class == NULL) {
531 		device_printf(dev, "device enumeration unsupported; no "
532 		    "compatible driver found\n");
533 
534 		error = ENXIO;
535 		goto failed;
536 	}
537 
538 	/* Allocate EROM parser */
539 	if ((erom = bhnd_erom_alloc(erom_class, &cid, &pio.eio)) == NULL) {
540 		device_printf(dev, "failed to allocate device enumeration "
541 		    "table parser\n");
542 		error = ENXIO;
543 		goto failed;
544 	}
545 
546 	/* Read the full core table */
547 	error = bhnd_erom_get_core_table(erom, &erom_cores, &erom_ncores);
548 	if (error) {
549 		device_printf(dev, "error fetching core table: %d\n", error);
550 		goto failed;
551 	}
552 
553 	/* Provide the results to our caller */
554 	*cores = malloc(sizeof(erom_cores[0]) * erom_ncores, M_BHND, M_WAITOK);
555 	memcpy(*cores, erom_cores, sizeof(erom_cores[0]) * erom_ncores);
556 	*ncores = erom_ncores;
557 
558 	*chipid = cid;
559 	if (eromcls != NULL)
560 		*eromcls = erom_class;
561 
562 	/* Clean up */
563 	bhnd_erom_free_core_table(erom, erom_cores);
564 	bhnd_erom_free(erom);
565 	bhndb_release_host_resources(hr);
566 
567 	return (0);
568 
569 failed:
570 	if (erom_cores != NULL)
571 		bhnd_erom_free_core_table(erom, erom_cores);
572 
573 	if (erom != NULL)
574 		bhnd_erom_free(erom);
575 
576 	bhndb_release_host_resources(hr);
577 	return (error);
578 }
579 
580 static int
581 bhndb_pci_add_children(struct bhndb_pci_softc *sc)
582 {
583 	bus_size_t		 nv_sz;
584 	int			 error;
585 
586 	/**
587 	 * If SPROM is mapped directly into BAR0, add child NVRAM
588 	 * device.
589 	 */
590 	nv_sz = bhndb_pci_sprom_size(sc);
591 	if (nv_sz > 0) {
592 		struct bhndb_devinfo	*dinfo;
593 		device_t		 child;
594 
595 		if (bootverbose) {
596 			device_printf(sc->dev, "found SPROM (%ju bytes)\n",
597 			    (uintmax_t)nv_sz);
598 		}
599 
600 		/* Add sprom device, ordered early enough to be available
601 		 * before the bridged bhnd(4) bus is attached. */
602 		child = BUS_ADD_CHILD(sc->dev,
603 		    BHND_PROBE_ROOT + BHND_PROBE_ORDER_EARLY, "bhnd_nvram", -1);
604 		if (child == NULL) {
605 			device_printf(sc->dev, "failed to add sprom device\n");
606 			return (ENXIO);
607 		}
608 
609 		/* Initialize device address space and resource covering the
610 		 * BAR0 SPROM shadow. */
611 		dinfo = device_get_ivars(child);
612 		dinfo->addrspace = BHNDB_ADDRSPACE_NATIVE;
613 
614 		error = bus_set_resource(child, SYS_RES_MEMORY, 0,
615 		    bhndb_pci_sprom_addr(sc), nv_sz);
616 		if (error) {
617 			device_printf(sc->dev,
618 			    "failed to register sprom resources\n");
619 			return (error);
620 		}
621 	}
622 
623 	return (0);
624 }
625 
626 static const struct bhndb_regwin *
627 bhndb_pci_sprom_regwin(struct bhndb_pci_softc *sc)
628 {
629 	struct bhndb_resources		*bres;
630 	const struct bhndb_hwcfg	*cfg;
631 	const struct bhndb_regwin	*sprom_win;
632 
633 	bres = sc->bhndb.bus_res;
634 	cfg = bres->cfg;
635 
636 	sprom_win = bhndb_regwin_find_type(cfg->register_windows,
637 	    BHNDB_REGWIN_T_SPROM, BHNDB_PCI_V0_BAR0_SPROM_SIZE);
638 
639 	return (sprom_win);
640 }
641 
642 static bus_addr_t
643 bhndb_pci_sprom_addr(struct bhndb_pci_softc *sc)
644 {
645 	const struct bhndb_regwin	*sprom_win;
646 	struct resource			*r;
647 
648 	/* Fetch the SPROM register window */
649 	sprom_win = bhndb_pci_sprom_regwin(sc);
650 	KASSERT(sprom_win != NULL, ("requested sprom address on PCI_V2+"));
651 
652 	/* Fetch the associated resource */
653 	r = bhndb_host_resource_for_regwin(sc->bhndb.bus_res->res, sprom_win);
654 	KASSERT(r != NULL, ("missing resource for sprom window\n"));
655 
656 	return (rman_get_start(r) + sprom_win->win_offset);
657 }
658 
659 static bus_size_t
660 bhndb_pci_sprom_size(struct bhndb_pci_softc *sc)
661 {
662 	const struct bhndb_regwin	*sprom_win;
663 	uint32_t			 sctl;
664 	bus_size_t			 sprom_sz;
665 
666 	sprom_win = bhndb_pci_sprom_regwin(sc);
667 
668 	/* PCI_V2 and later devices map SPROM/OTP via ChipCommon */
669 	if (sprom_win == NULL)
670 		return (0);
671 
672 	/* Determine SPROM size */
673 	sctl = pci_read_config(sc->parent, BHNDB_PCI_SPROM_CONTROL, 4);
674 	if (sctl & BHNDB_PCI_SPROM_BLANK)
675 		return (0);
676 
677 	switch (sctl & BHNDB_PCI_SPROM_SZ_MASK) {
678 	case BHNDB_PCI_SPROM_SZ_1KB:
679 		sprom_sz = (1 * 1024);
680 		break;
681 
682 	case BHNDB_PCI_SPROM_SZ_4KB:
683 		sprom_sz = (4 * 1024);
684 		break;
685 
686 	case BHNDB_PCI_SPROM_SZ_16KB:
687 		sprom_sz = (16 * 1024);
688 		break;
689 
690 	case BHNDB_PCI_SPROM_SZ_RESERVED:
691 	default:
692 		device_printf(sc->dev, "invalid PCI sprom size 0x%x\n", sctl);
693 		return (0);
694 	}
695 
696 	if (sprom_sz > sprom_win->win_size) {
697 		device_printf(sc->dev,
698 		    "PCI sprom size (0x%x) overruns defined register window\n",
699 		    sctl);
700 		return (0);
701 	}
702 
703 	return (sprom_sz);
704 }
705 
706 /**
707  * Return the host resource providing a static mapping of the PCI core's
708  * registers.
709  *
710  * @param	sc	bhndb PCI driver state.
711  * @param[out]	res	On success, the host resource containing our PCI
712  *			core's register window.
713  * @param[out]	offset	On success, the offset of the PCI core registers within
714  * 			@p res.
715  *
716  * @retval 0		success
717  * @retval ENXIO	if a valid static register window mapping the PCI core
718  *			registers is not available.
719  */
720 static int
721 bhndb_pci_get_core_regs(struct bhndb_pci_softc *sc, struct resource **res,
722     bus_size_t *offset)
723 {
724 	const struct bhndb_regwin	*win;
725 	struct resource			*r;
726 
727 	/* Locate the static register window mapping the PCI core */
728 	win = bhndb_regwin_find_core(sc->bhndb.bus_res->cfg->register_windows,
729 	    sc->pci_devclass, 0, BHND_PORT_DEVICE, 0, 0);
730 	if (win == NULL) {
731 		device_printf(sc->dev, "missing PCI core register window\n");
732 		return (ENXIO);
733 	}
734 
735 	/* Fetch the resource containing the register window */
736 	r = bhndb_host_resource_for_regwin(sc->bhndb.bus_res->res, win);
737 	if (r == NULL) {
738 		device_printf(sc->dev, "missing PCI core register resource\n");
739 		return (ENXIO);
740 	}
741 
742 	*res = r;
743 	*offset = win->win_offset;
744 
745 	return (0);
746 }
747 
748 /**
749  * Write a 1, 2, or 4 byte data item to the PCI core's registers at @p offset.
750  *
751  * @param sc		bhndb PCI driver state.
752  * @param offset	register write offset.
753  * @param value		value to be written.
754  * @param width		item width (1, 2, or 4 bytes).
755  */
756 static void
757 bhndb_pci_write_core(struct bhndb_pci_softc *sc, bus_size_t offset,
758     uint32_t value, u_int width)
759 {
760 	struct resource	*r;
761 	bus_size_t	 r_offset;
762 	int		 error;
763 
764 	if ((error = bhndb_pci_get_core_regs(sc, &r, &r_offset)))
765 		panic("no PCI core registers: %d", error);
766 
767 	switch (width) {
768 	case 1:
769 		bus_write_1(r, r_offset + offset, value);
770 		break;
771 	case 2:
772 		bus_write_2(r, r_offset + offset, value);
773 		break;
774 	case 4:
775 		bus_write_4(r, r_offset + offset, value);
776 		break;
777 	default:
778 		panic("invalid width: %u", width);
779 	}
780 }
781 
782 /**
783  * Read a 1, 2, or 4 byte data item from the PCI core's registers
784  * at @p offset.
785  *
786  * @param sc		bhndb PCI driver state.
787  * @param offset	register read offset.
788  * @param width		item width (1, 2, or 4 bytes).
789  */
790 static uint32_t
791 bhndb_pci_read_core(struct bhndb_pci_softc *sc, bus_size_t offset, u_int width)
792 {
793 	struct resource	*r;
794 	bus_size_t	 r_offset;
795 	int		 error;
796 
797 	if ((error = bhndb_pci_get_core_regs(sc, &r, &r_offset)))
798 		panic("no PCI core registers: %d", error);
799 
800 	switch (width) {
801 	case 1:
802 		return (bus_read_1(r, r_offset + offset));
803 	case 2:
804 		return (bus_read_2(r, r_offset + offset));
805 	case 4:
806 		return (bus_read_4(r, r_offset + offset));
807 	default:
808 		panic("invalid width: %u", width);
809 	}
810 }
811 
812 /*
813  * On devices without a SROM, the PCI(e) cores will be initialized with
814  * their Power-on-Reset defaults; this can leave two of the BAR0 PCI windows
815  * mapped to the wrong core.
816  *
817  * This function updates the SROM shadow to point the BAR0 windows at the
818  * current PCI core.
819  *
820  * Applies to all PCI/PCIe revisions.
821  */
822 static void
823 bhndb_init_sromless_pci_config(struct bhndb_pci_softc *sc)
824 {
825 	const struct bhndb_pci_core	*pci_core;
826 	bus_size_t			 srsh_offset;
827 	u_int				 pci_cidx, sprom_cidx;
828 	uint16_t			 val;
829 
830 	if ((sc->pci_quirks & BHNDB_PCI_QUIRK_SRSH_WAR) == 0)
831 		return;
832 
833 	/* Determine the correct register offset for our PCI core */
834 	pci_core = bhndb_pci_find_core(&sc->bhndb.bridge_core);
835 	KASSERT(pci_core != NULL, ("missing core table entry"));
836 
837 	srsh_offset = pci_core->srsh_offset;
838 
839 	/* Fetch the SPROM's configured core index */
840 	val = bhndb_pci_read_core(sc, srsh_offset, sizeof(val));
841 	sprom_cidx = (val & BHND_PCI_SRSH_PI_MASK) >> BHND_PCI_SRSH_PI_SHIFT;
842 
843 	/* If it doesn't match host bridge's core index, update the index
844 	 * value */
845 	pci_cidx = sc->bhndb.bridge_core.core_idx;
846 	if (sprom_cidx != pci_cidx) {
847 		val &= ~BHND_PCI_SRSH_PI_MASK;
848 		val |= (pci_cidx << BHND_PCI_SRSH_PI_SHIFT);
849 		bhndb_pci_write_core(sc, srsh_offset, val, sizeof(val));
850 	}
851 }
852 
853 static int
854 bhndb_pci_resume(device_t dev)
855 {
856 	struct bhndb_pci_softc	*sc;
857 	int			 error;
858 
859 	sc = device_get_softc(dev);
860 
861 	/* Enable clocks (if supported by this hardware) */
862 	if ((error = bhndb_enable_pci_clocks(sc->dev)))
863 		return (error);
864 
865 	/* Perform resume */
866 	return (bhndb_generic_resume(dev));
867 }
868 
869 static int
870 bhndb_pci_suspend(device_t dev)
871 {
872 	struct bhndb_pci_softc	*sc;
873 	int			 error;
874 
875 	sc = device_get_softc(dev);
876 
877 	/* Disable clocks (if supported by this hardware) */
878 	if ((error = bhndb_disable_pci_clocks(sc->dev)))
879 		return (error);
880 
881 	/* Perform suspend */
882 	return (bhndb_generic_suspend(dev));
883 }
884 
885 static int
886 bhndb_pci_set_window_addr(device_t dev, const struct bhndb_regwin *rw,
887     bhnd_addr_t addr)
888 {
889 	struct bhndb_pci_softc *sc = device_get_softc(dev);
890 	return (sc->set_regwin(sc->dev, sc->parent, rw, addr));
891 }
892 
893 /**
894  * A siba(4) and bcma(4)-compatible bhndb_set_window_addr implementation.
895  *
896  * On siba(4) devices, it's possible that writing a PCI window register may
897  * not succeed; it's necessary to immediately read the configuration register
898  * and retry if not set to the desired value.
899  *
900  * This is not necessary on bcma(4) devices, but other than the overhead of
901  * validating the register, there's no harm in performing the verification.
902  */
903 static int
904 bhndb_pci_compat_setregwin(device_t dev, device_t pci_dev,
905     const struct bhndb_regwin *rw, bhnd_addr_t addr)
906 {
907 	int		error;
908 	int		reg;
909 
910 	if (rw->win_type != BHNDB_REGWIN_T_DYN)
911 		return (ENODEV);
912 
913 	reg = rw->d.dyn.cfg_offset;
914 	for (u_int i = 0; i < BHNDB_PCI_BARCTRL_WRITE_RETRY; i++) {
915 		if ((error = bhndb_pci_fast_setregwin(dev, pci_dev, rw, addr)))
916 			return (error);
917 
918 		if (pci_read_config(pci_dev, reg, 4) == addr)
919 			return (0);
920 
921 		DELAY(10);
922 	}
923 
924 	/* Unable to set window */
925 	return (ENODEV);
926 }
927 
928 /**
929  * A bcma(4)-only bhndb_set_window_addr implementation.
930  */
931 static int
932 bhndb_pci_fast_setregwin(device_t dev, device_t pci_dev,
933     const struct bhndb_regwin *rw, bhnd_addr_t addr)
934 {
935 	/* The PCI bridge core only supports 32-bit addressing, regardless
936 	 * of the bus' support for 64-bit addressing */
937 	if (addr > UINT32_MAX)
938 		return (ERANGE);
939 
940 	switch (rw->win_type) {
941 	case BHNDB_REGWIN_T_DYN:
942 		/* Addresses must be page aligned */
943 		if (addr % rw->win_size != 0)
944 			return (EINVAL);
945 
946 		pci_write_config(pci_dev, rw->d.dyn.cfg_offset, addr, 4);
947 		break;
948 	default:
949 		return (ENODEV);
950 	}
951 
952 	return (0);
953 }
954 
955 static int
956 bhndb_pci_populate_board_info(device_t dev, device_t child,
957     struct bhnd_board_info *info)
958 {
959 	struct bhndb_pci_softc	*sc;
960 
961 	sc = device_get_softc(dev);
962 
963 	/*
964 	 * On a subset of Apple BCM4360 modules, always prefer the
965 	 * PCI subdevice to the SPROM-supplied boardtype.
966 	 *
967 	 * TODO:
968 	 *
969 	 * Broadcom's own drivers implement this override, and then later use
970 	 * the remapped BCM4360 board type to determine the required
971 	 * board-specific workarounds.
972 	 *
973 	 * Without access to this hardware, it's unclear why this mapping
974 	 * is done, and we must do the same. If we can survey the hardware
975 	 * in question, it may be possible to replace this behavior with
976 	 * explicit references to the SPROM-supplied boardtype(s) in our
977 	 * quirk definitions.
978 	 */
979 	if (pci_get_subvendor(sc->parent) == PCI_VENDOR_APPLE) {
980 		switch (info->board_type) {
981 		case BHND_BOARD_BCM94360X29C:
982 		case BHND_BOARD_BCM94360X29CP2:
983 		case BHND_BOARD_BCM94360X51:
984 		case BHND_BOARD_BCM94360X51P2:
985 			info->board_type = 0;	/* allow override below */
986 			break;
987 		default:
988 			break;
989 		}
990 	}
991 
992 	/* If NVRAM did not supply vendor/type info, provide the PCI
993 	 * subvendor/subdevice values. */
994 	if (info->board_vendor == 0)
995 		info->board_vendor = pci_get_subvendor(sc->parent);
996 
997 	if (info->board_type == 0)
998 		info->board_type = pci_get_subdevice(sc->parent);
999 
1000 	return (0);
1001 }
1002 
1003 /**
1004  * Examine the bridge device @p dev and return the expected host bridge
1005  * device class.
1006  *
1007  * @param dev The bhndb bridge device
1008  */
1009 static bhnd_devclass_t
1010 bhndb_expected_pci_devclass(device_t dev)
1011 {
1012 	if (bhndb_is_pcie_attached(dev))
1013 		return (BHND_DEVCLASS_PCIE);
1014 	else
1015 		return (BHND_DEVCLASS_PCI);
1016 }
1017 
1018 /**
1019  * Return true if the bridge device @p dev is attached via PCIe,
1020  * false otherwise.
1021  *
1022  * @param dev The bhndb bridge device
1023  */
1024 static bool
1025 bhndb_is_pcie_attached(device_t dev)
1026 {
1027 	int reg;
1028 
1029 	if (pci_find_cap(device_get_parent(dev), PCIY_EXPRESS, &reg) == 0)
1030 		return (true);
1031 
1032 	return (false);
1033 }
1034 
1035 /**
1036  * Enable externally managed clocks, if required.
1037  *
1038  * Some PCI chipsets (BCM4306, possibly others) chips do not support
1039  * the idle low-power clock. Clocking must be bootstrapped at
1040  * attach/resume by directly adjusting GPIO registers exposed in the
1041  * PCI config space, and correspondingly, explicitly shutdown at
1042  * detach/suspend.
1043  *
1044  * @note This function may be safely called prior to device attach, (e.g.
1045  * from DEVICE_PROBE).
1046  *
1047  * @param dev The bhndb bridge device
1048  */
1049 static int
1050 bhndb_enable_pci_clocks(device_t dev)
1051 {
1052 	device_t		pci_dev;
1053 	uint32_t		gpio_in, gpio_out, gpio_en;
1054 	uint32_t		gpio_flags;
1055 	uint16_t		pci_status;
1056 
1057 	pci_dev = device_get_parent(dev);
1058 
1059 	/* Only supported and required on PCI devices */
1060 	if (!bhndb_is_pcie_attached(dev))
1061 		return (0);
1062 
1063 	/* Read state of XTAL pin */
1064 	gpio_in = pci_read_config(pci_dev, BHNDB_PCI_GPIO_IN, 4);
1065 	if (gpio_in & BHNDB_PCI_GPIO_XTAL_ON)
1066 		return (0); /* already enabled */
1067 
1068 	/* Fetch current config */
1069 	gpio_out = pci_read_config(pci_dev, BHNDB_PCI_GPIO_OUT, 4);
1070 	gpio_en = pci_read_config(pci_dev, BHNDB_PCI_GPIO_OUTEN, 4);
1071 
1072 	/* Set PLL_OFF/XTAL_ON pins to HIGH and enable both pins */
1073 	gpio_flags = (BHNDB_PCI_GPIO_PLL_OFF|BHNDB_PCI_GPIO_XTAL_ON);
1074 	gpio_out |= gpio_flags;
1075 	gpio_en |= gpio_flags;
1076 
1077 	pci_write_config(pci_dev, BHNDB_PCI_GPIO_OUT, gpio_out, 4);
1078 	pci_write_config(pci_dev, BHNDB_PCI_GPIO_OUTEN, gpio_en, 4);
1079 	DELAY(1000);
1080 
1081 	/* Reset PLL_OFF */
1082 	gpio_out &= ~BHNDB_PCI_GPIO_PLL_OFF;
1083 	pci_write_config(pci_dev, BHNDB_PCI_GPIO_OUT, gpio_out, 4);
1084 	DELAY(5000);
1085 
1086 	/* Clear any PCI 'sent target-abort' flag. */
1087 	pci_status = pci_read_config(pci_dev, PCIR_STATUS, 2);
1088 	pci_status &= ~PCIM_STATUS_STABORT;
1089 	pci_write_config(pci_dev, PCIR_STATUS, pci_status, 2);
1090 
1091 	return (0);
1092 }
1093 
1094 /**
1095  * Disable externally managed clocks, if required.
1096  *
1097  * This function may be safely called prior to device attach, (e.g.
1098  * from DEVICE_PROBE).
1099  *
1100  * @param dev The bhndb bridge device
1101  */
1102 static int
1103 bhndb_disable_pci_clocks(device_t dev)
1104 {
1105 	device_t	pci_dev;
1106 	uint32_t	gpio_out, gpio_en;
1107 
1108 	pci_dev = device_get_parent(dev);
1109 
1110 	/* Only supported and required on PCI devices */
1111 	if (bhndb_is_pcie_attached(dev))
1112 		return (0);
1113 
1114 	/* Fetch current config */
1115 	gpio_out = pci_read_config(pci_dev, BHNDB_PCI_GPIO_OUT, 4);
1116 	gpio_en = pci_read_config(pci_dev, BHNDB_PCI_GPIO_OUTEN, 4);
1117 
1118 	/* Set PLL_OFF to HIGH, XTAL_ON to LOW. */
1119 	gpio_out &= ~BHNDB_PCI_GPIO_XTAL_ON;
1120 	gpio_out |= BHNDB_PCI_GPIO_PLL_OFF;
1121 	pci_write_config(pci_dev, BHNDB_PCI_GPIO_OUT, gpio_out, 4);
1122 
1123 	/* Enable both output pins */
1124 	gpio_en |= (BHNDB_PCI_GPIO_PLL_OFF|BHNDB_PCI_GPIO_XTAL_ON);
1125 	pci_write_config(pci_dev, BHNDB_PCI_GPIO_OUTEN, gpio_en, 4);
1126 
1127 	return (0);
1128 }
1129 
1130 static bhnd_clksrc
1131 bhndb_pci_pwrctl_get_clksrc(device_t dev, device_t child,
1132 	bhnd_clock clock)
1133 {
1134 	struct bhndb_pci_softc	*sc;
1135 	uint32_t		 gpio_out;
1136 
1137 	sc = device_get_softc(dev);
1138 
1139 	/* Only supported on PCI devices */
1140 	if (bhndb_is_pcie_attached(sc->dev))
1141 		return (BHND_CLKSRC_UNKNOWN);
1142 
1143 	/* Only ILP is supported */
1144 	if (clock != BHND_CLOCK_ILP)
1145 		return (BHND_CLKSRC_UNKNOWN);
1146 
1147 	gpio_out = pci_read_config(sc->parent, BHNDB_PCI_GPIO_OUT, 4);
1148 	if (gpio_out & BHNDB_PCI_GPIO_SCS)
1149 		return (BHND_CLKSRC_PCI);
1150 	else
1151 		return (BHND_CLKSRC_XTAL);
1152 }
1153 
1154 static int
1155 bhndb_pci_pwrctl_gate_clock(device_t dev, device_t child,
1156 	bhnd_clock clock)
1157 {
1158 	struct bhndb_pci_softc *sc = device_get_softc(dev);
1159 
1160 	/* Only supported on PCI devices */
1161 	if (bhndb_is_pcie_attached(sc->dev))
1162 		return (ENODEV);
1163 
1164 	/* Only HT is supported */
1165 	if (clock != BHND_CLOCK_HT)
1166 		return (ENXIO);
1167 
1168 	return (bhndb_disable_pci_clocks(sc->dev));
1169 }
1170 
1171 static int
1172 bhndb_pci_pwrctl_ungate_clock(device_t dev, device_t child,
1173 	bhnd_clock clock)
1174 {
1175 	struct bhndb_pci_softc *sc = device_get_softc(dev);
1176 
1177 	/* Only supported on PCI devices */
1178 	if (bhndb_is_pcie_attached(sc->dev))
1179 		return (ENODEV);
1180 
1181 	/* Only HT is supported */
1182 	if (clock != BHND_CLOCK_HT)
1183 		return (ENXIO);
1184 
1185 	return (bhndb_enable_pci_clocks(sc->dev));
1186 }
1187 
1188 /**
1189  * BHNDB_MAP_INTR_ISRC()
1190  */
1191 static int
1192 bhndb_pci_map_intr_isrc(device_t dev, struct resource *irq,
1193     struct bhndb_intr_isrc **isrc)
1194 {
1195 	struct bhndb_pci_softc *sc = device_get_softc(dev);
1196 
1197 	/* There's only one bridged interrupt to choose from */
1198 	*isrc = sc->isrc;
1199 	return (0);
1200 }
1201 
1202 /* siba-specific implementation of BHNDB_ROUTE_INTERRUPTS() */
1203 static int
1204 bhndb_pci_route_siba_interrupts(struct bhndb_pci_softc *sc, device_t child)
1205 {
1206 	uint32_t	sbintvec;
1207 	u_int		ivec;
1208 	int		error;
1209 
1210 	KASSERT(sc->pci_quirks & BHNDB_PCI_QUIRK_SIBA_INTVEC,
1211 	    ("route_siba_interrupts not supported by this hardware"));
1212 
1213 	/* Fetch the sbflag# for the child */
1214 	if ((error = bhnd_get_intr_ivec(child, 0, &ivec)))
1215 		return (error);
1216 
1217 	if (ivec > (sizeof(sbintvec)*8) - 1 /* aka '31' */) {
1218 		/* This should never be an issue in practice */
1219 		device_printf(sc->dev, "cannot route interrupts to high "
1220 		    "sbflag# %u\n", ivec);
1221 		return (ENXIO);
1222 	}
1223 
1224 	BHNDB_PCI_LOCK(sc);
1225 
1226 	sbintvec = bhndb_pci_read_core(sc, SB0_REG_ABS(SIBA_CFG0_INTVEC), 4);
1227 	sbintvec |= (1 << ivec);
1228 	bhndb_pci_write_core(sc, SB0_REG_ABS(SIBA_CFG0_INTVEC), sbintvec, 4);
1229 
1230 	BHNDB_PCI_UNLOCK(sc);
1231 
1232 	return (0);
1233 }
1234 
1235 /* BHNDB_ROUTE_INTERRUPTS() */
1236 static int
1237 bhndb_pci_route_interrupts(device_t dev, device_t child)
1238 {
1239 	struct bhndb_pci_softc	*sc;
1240 	struct bhnd_core_info	 core;
1241 	uint32_t		 core_bit;
1242 	uint32_t		 intmask;
1243 
1244 	sc = device_get_softc(dev);
1245 
1246 	if (sc->pci_quirks & BHNDB_PCI_QUIRK_SIBA_INTVEC)
1247 		return (bhndb_pci_route_siba_interrupts(sc, child));
1248 
1249 	core = bhnd_get_core_info(child);
1250 	if (core.core_idx > BHNDB_PCI_SBIM_COREIDX_MAX) {
1251 		/* This should never be an issue in practice */
1252 		device_printf(dev, "cannot route interrupts to high core "
1253 		    "index %u\n", core.core_idx);
1254 		return (ENXIO);
1255 	}
1256 
1257 	BHNDB_PCI_LOCK(sc);
1258 
1259 	core_bit = (1<<core.core_idx) << BHNDB_PCI_SBIM_SHIFT;
1260 	intmask = pci_read_config(sc->parent, BHNDB_PCI_INT_MASK, 4);
1261 	intmask |= core_bit;
1262 	pci_write_config(sc->parent, BHNDB_PCI_INT_MASK, intmask, 4);
1263 
1264 	BHNDB_PCI_UNLOCK(sc);
1265 
1266 	return (0);
1267 }
1268 
1269 /**
1270  * Initialize a new bhndb PCI bridge EROM I/O instance. This EROM I/O
1271  * implementation supports mapping of the device enumeration table via the
1272  * @p hr host resources.
1273  *
1274  * @param pio		The instance to be initialized.
1275  * @param dev		The bridge device.
1276  * @param pci_dev	The bridge's parent PCI device.
1277  * @param hr		The host resources to be used to map the device
1278  *			enumeration table.
1279  */
1280 static int
1281 bhndb_pci_eio_init(struct bhndb_pci_eio *pio, device_t dev, device_t pci_dev,
1282     struct bhndb_host_resources *hr)
1283 {
1284 	memset(&pio->eio, sizeof(pio->eio), 0);
1285 	pio->eio.map = bhndb_pci_eio_map;
1286 	pio->eio.read = bhndb_pci_eio_read;
1287 	pio->eio.fini = NULL;
1288 
1289 	pio->dev = dev;
1290 	pio->pci_dev = pci_dev;
1291 	pio->hr = hr;
1292 	pio->win = NULL;
1293 	pio->res = NULL;
1294 
1295 	return (0);
1296 }
1297 
1298 /**
1299  * Attempt to adjust the dynamic register window backing @p pio to permit
1300  * reading @p size bytes at @p addr.
1301  *
1302  * If @p addr or @p size fall outside the existing mapped range, or if
1303  * @p pio is not backed by a dynamic register window, ENXIO will be returned.
1304  *
1305  * @param pio	The bhndb PCI erom I/O state to be modified.
1306  * @param addr	The address to be include
1307  */
1308 static int
1309 bhndb_pci_eio_adjust_mapping(struct bhndb_pci_eio *pio, bhnd_addr_t addr,
1310     bhnd_size_t size)
1311 {
1312 	bhnd_addr_t	 target;
1313 	bhnd_size_t	 offset;
1314 	int		 error;
1315 
1316 
1317 	KASSERT(pio->win != NULL, ("missing register window"));
1318 	KASSERT(pio->res != NULL, ("missing regwin resource"));
1319 	KASSERT(pio->win->win_type == BHNDB_REGWIN_T_DYN,
1320 	    ("unexpected window type %d", pio->win->win_type));
1321 
1322 	/* The requested subrange must fall within the total mapped range */
1323 	if (addr < pio->addr || (addr - pio->addr) > pio->size ||
1324 	    size > pio->size || (addr - pio->addr) - pio->size < size)
1325 	{
1326 		return (ENXIO);
1327 	}
1328 
1329 	/* Do we already have a useable mapping? */
1330 	if (addr >= pio->res_target &&
1331 	    addr <= pio->res_target + pio->win->win_size &&
1332 	    (pio->res_target + pio->win->win_size) - addr >= size)
1333 	{
1334 		return (0);
1335 	}
1336 
1337 	/* Page-align the target address */
1338 	offset = addr % pio->win->win_size;
1339 	target = addr - offset;
1340 
1341 	/* Configure the register window */
1342 	error = bhndb_pci_compat_setregwin(pio->dev, pio->pci_dev, pio->win,
1343 	    target);
1344 	if (error) {
1345 		device_printf(pio->dev, "failed to configure dynamic register "
1346 		    "window: %d\n", error);
1347 		return (error);
1348 	}
1349 
1350 	pio->res_target = target;
1351 	return (0);
1352 }
1353 
1354 /* bhnd_erom_io_map() implementation */
1355 static int
1356 bhndb_pci_eio_map(struct bhnd_erom_io *eio, bhnd_addr_t addr,
1357     bhnd_size_t size)
1358 {
1359 	struct bhndb_pci_eio		*pio;
1360 	const struct bhndb_regwin	*regwin;
1361 	struct resource			*r;
1362 	bhnd_addr_t			 target;
1363 	bhnd_size_t			 offset;
1364 	int				 error;
1365 
1366 	pio = (struct bhndb_pci_eio *)eio;
1367 
1368 	/* Locate a useable dynamic register window */
1369 	regwin = bhndb_regwin_find_type(pio->hr->cfg->register_windows,
1370 	    BHNDB_REGWIN_T_DYN, MIN(size, BHND_DEFAULT_CORE_SIZE));
1371 	if (regwin == NULL) {
1372 		device_printf(pio->dev, "unable to map %#jx+%#jx; no "
1373 		    "usable dynamic register window found\n", addr, size);
1374 		return (ENXIO);
1375 	}
1376 
1377 	/* Locate the host resource mapping our register window */
1378 	if ((r = bhndb_host_resource_for_regwin(pio->hr, regwin)) == NULL) {
1379 		device_printf(pio->dev, "unable to map %#jx+%#jx; no "
1380 		    "usable register resource found\n", addr, size);
1381 		return (ENXIO);
1382 	}
1383 
1384 	/* Page-align the target address */
1385 	offset = addr % regwin->win_size;
1386 	target = addr - offset;
1387 
1388 	/* Configure the register window */
1389 	error = bhndb_pci_compat_setregwin(pio->dev, pio->pci_dev, regwin,
1390 	    target);
1391 	if (error) {
1392 		device_printf(pio->dev, "failed to configure dynamic register "
1393 		    "window: %d\n", error);
1394 		return (error);
1395 	}
1396 
1397 	/* Update our mapping state */
1398 	pio->win = regwin;
1399 	pio->res = r;
1400 	pio->addr = addr;
1401 	pio->size = size;
1402 	pio->res_target = target;
1403 
1404 	return (0);
1405 }
1406 
1407 /* bhnd_erom_io_read() implementation */
1408 static uint32_t
1409 bhndb_pci_eio_read(struct bhnd_erom_io *eio, bhnd_size_t offset, u_int width)
1410 {
1411 	struct bhndb_pci_eio		*pio;
1412 	bhnd_addr_t			 addr;
1413 	bus_size_t			 res_offset;
1414 	int				 error;
1415 
1416 	pio = (struct bhndb_pci_eio *)eio;
1417 
1418 	/* Calculate absolute address */
1419 	if (BHND_SIZE_MAX - offset < pio->addr) {
1420 		device_printf(pio->dev, "invalid offset %#jx+%#jx\n", pio->addr,
1421 		    offset);
1422 		return (UINT32_MAX);
1423 	}
1424 
1425 	addr = pio->addr + offset;
1426 
1427 	/* Adjust the mapping for our read */
1428 	if ((error = bhndb_pci_eio_adjust_mapping(pio, addr, width))) {
1429 		device_printf(pio->dev, "failed to adjust register mapping: "
1430 		    "%d\n", error);
1431 		return (UINT32_MAX);
1432 	}
1433 
1434 	KASSERT(pio->res_target <= addr, ("invalid mapping (%#jx vs. %#jx)",
1435 	    pio->res_target, addr));
1436 
1437 	/* Determine the actual read offset within our register window
1438 	 * resource */
1439 	res_offset = (addr - pio->res_target) + pio->win->win_offset;
1440 
1441 	/* Perform our read */
1442 	switch (width) {
1443 	case 1:
1444 		return (bus_read_1(pio->res, res_offset));
1445 	case 2:
1446 		return (bus_read_2(pio->res, res_offset));
1447 	case 4:
1448 		return (bus_read_4(pio->res, res_offset));
1449 	default:
1450 		panic("unsupported width: %u", width);
1451 	}
1452 }
1453 
1454 static device_method_t bhndb_pci_methods[] = {
1455 	/* Device interface */
1456 	DEVMETHOD(device_probe,				bhndb_pci_probe),
1457 	DEVMETHOD(device_attach,			bhndb_pci_attach),
1458 	DEVMETHOD(device_resume,			bhndb_pci_resume),
1459 	DEVMETHOD(device_suspend,			bhndb_pci_suspend),
1460 	DEVMETHOD(device_detach,			bhndb_pci_detach),
1461 
1462 	/* BHNDB interface */
1463 	DEVMETHOD(bhndb_set_window_addr,		bhndb_pci_set_window_addr),
1464 	DEVMETHOD(bhndb_populate_board_info,		bhndb_pci_populate_board_info),
1465 	DEVMETHOD(bhndb_map_intr_isrc,			bhndb_pci_map_intr_isrc),
1466 	DEVMETHOD(bhndb_route_interrupts,		bhndb_pci_route_interrupts),
1467 
1468 	/* BHND PWRCTL hostb interface */
1469 	DEVMETHOD(bhnd_pwrctl_hostb_get_clksrc,		bhndb_pci_pwrctl_get_clksrc),
1470 	DEVMETHOD(bhnd_pwrctl_hostb_gate_clock,		bhndb_pci_pwrctl_gate_clock),
1471 	DEVMETHOD(bhnd_pwrctl_hostb_ungate_clock,	bhndb_pci_pwrctl_ungate_clock),
1472 
1473 	DEVMETHOD_END
1474 };
1475 
1476 DEFINE_CLASS_1(bhndb, bhndb_pci_driver, bhndb_pci_methods,
1477     sizeof(struct bhndb_pci_softc), bhndb_driver);
1478 
1479 MODULE_VERSION(bhndb_pci, 1);
1480 MODULE_DEPEND(bhndb_pci, bhnd_pci_hostb, 1, 1, 1);
1481 MODULE_DEPEND(bhndb_pci, pci, 1, 1, 1);
1482 MODULE_DEPEND(bhndb_pci, bhndb, 1, 1, 1);
1483 MODULE_DEPEND(bhndb_pci, bhnd, 1, 1, 1);
1484