xref: /freebsd/sys/dev/bhnd/siba/siba.c (revision 22cf89c938886d14f5796fc49f9f020c23ea8eaf)
1 /*-
2  * Copyright (c) 2015-2016 Landon Fuller <landon@landonf.org>
3  * Copyright (c) 2017 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Landon Fuller
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer,
14  *    without modification.
15  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
16  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
17  *    redistribution must be conditioned upon including a substantially
18  *    similar Disclaimer requirement for further binary redistribution.
19  *
20  * NO WARRANTY
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
24  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
25  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
26  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
29  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGES.
32  */
33 
34 #include <sys/cdefs.h>
35 #include <sys/param.h>
36 #include <sys/bus.h>
37 #include <sys/kernel.h>
38 #include <sys/malloc.h>
39 #include <sys/module.h>
40 #include <sys/refcount.h>
41 #include <sys/systm.h>
42 
43 #include <machine/bus.h>
44 
45 #include <dev/bhnd/cores/chipc/chipc.h>
46 #include <dev/bhnd/cores/chipc/pwrctl/bhnd_pwrctl.h>
47 
48 #include "siba_eromvar.h"
49 
50 #include "sibareg.h"
51 #include "sibavar.h"
52 
53 /* RID used when allocating EROM resources */
54 #define	SIBA_EROM_RID	0
55 
56 static bhnd_erom_class_t *
57 siba_get_erom_class(driver_t *driver)
58 {
59 	return (&siba_erom_parser);
60 }
61 
62 int
63 siba_probe(device_t dev)
64 {
65 	device_set_desc(dev, "SIBA BHND bus");
66 	return (BUS_PROBE_DEFAULT);
67 }
68 
69 /**
70  * Default siba(4) bus driver implementation of DEVICE_ATTACH().
71  *
72  * This implementation initializes internal siba(4) state and performs
73  * bus enumeration, and must be called by subclassing drivers in
74  * DEVICE_ATTACH() before any other bus methods.
75  */
76 int
77 siba_attach(device_t dev)
78 {
79 	struct siba_softc	*sc;
80 	int			 error;
81 
82 	sc = device_get_softc(dev);
83 	sc->dev = dev;
84 
85 	SIBA_LOCK_INIT(sc);
86 
87 	/* Enumerate children */
88 	if ((error = siba_add_children(dev))) {
89 		device_delete_children(dev);
90 		SIBA_LOCK_DESTROY(sc);
91 		return (error);
92 	}
93 
94 	return (0);
95 }
96 
97 int
98 siba_detach(device_t dev)
99 {
100 	struct siba_softc	*sc;
101 	int			 error;
102 
103 	sc = device_get_softc(dev);
104 
105 	if ((error = bhnd_generic_detach(dev)))
106 		return (error);
107 
108 	SIBA_LOCK_DESTROY(sc);
109 
110 	return (0);
111 }
112 
113 int
114 siba_resume(device_t dev)
115 {
116 	return (bhnd_generic_resume(dev));
117 }
118 
119 int
120 siba_suspend(device_t dev)
121 {
122 	return (bhnd_generic_suspend(dev));
123 }
124 
125 static int
126 siba_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
127 {
128 	struct siba_softc		*sc;
129 	const struct siba_devinfo	*dinfo;
130 	const struct bhnd_core_info	*cfg;
131 
132 	sc = device_get_softc(dev);
133 	dinfo = device_get_ivars(child);
134 	cfg = &dinfo->core_id.core_info;
135 
136 	switch (index) {
137 	case BHND_IVAR_VENDOR:
138 		*result = cfg->vendor;
139 		return (0);
140 	case BHND_IVAR_DEVICE:
141 		*result = cfg->device;
142 		return (0);
143 	case BHND_IVAR_HWREV:
144 		*result = cfg->hwrev;
145 		return (0);
146 	case BHND_IVAR_DEVICE_CLASS:
147 		*result = bhnd_core_class(cfg);
148 		return (0);
149 	case BHND_IVAR_VENDOR_NAME:
150 		*result = (uintptr_t) bhnd_vendor_name(cfg->vendor);
151 		return (0);
152 	case BHND_IVAR_DEVICE_NAME:
153 		*result = (uintptr_t) bhnd_core_name(cfg);
154 		return (0);
155 	case BHND_IVAR_CORE_INDEX:
156 		*result = cfg->core_idx;
157 		return (0);
158 	case BHND_IVAR_CORE_UNIT:
159 		*result = cfg->unit;
160 		return (0);
161 	case BHND_IVAR_PMU_INFO:
162 		SIBA_LOCK(sc);
163 		switch (dinfo->pmu_state) {
164 		case SIBA_PMU_NONE:
165 			*result = (uintptr_t)NULL;
166 			SIBA_UNLOCK(sc);
167 			return (0);
168 
169 		case SIBA_PMU_BHND:
170 			*result = (uintptr_t)dinfo->pmu.bhnd_info;
171 			SIBA_UNLOCK(sc);
172 			return (0);
173 
174 		case SIBA_PMU_PWRCTL:
175 		case SIBA_PMU_FIXED:
176 			*result = (uintptr_t)NULL;
177 			SIBA_UNLOCK(sc);
178 			return (0);
179 		}
180 
181 		panic("invalid PMU state: %d", dinfo->pmu_state);
182 		return (ENXIO);
183 
184 	default:
185 		return (ENOENT);
186 	}
187 }
188 
189 static int
190 siba_write_ivar(device_t dev, device_t child, int index, uintptr_t value)
191 {
192 	struct siba_softc	*sc;
193 	struct siba_devinfo	*dinfo;
194 
195 	sc = device_get_softc(dev);
196 	dinfo = device_get_ivars(child);
197 
198 	switch (index) {
199 	case BHND_IVAR_VENDOR:
200 	case BHND_IVAR_DEVICE:
201 	case BHND_IVAR_HWREV:
202 	case BHND_IVAR_DEVICE_CLASS:
203 	case BHND_IVAR_VENDOR_NAME:
204 	case BHND_IVAR_DEVICE_NAME:
205 	case BHND_IVAR_CORE_INDEX:
206 	case BHND_IVAR_CORE_UNIT:
207 		return (EINVAL);
208 	case BHND_IVAR_PMU_INFO:
209 		SIBA_LOCK(sc);
210 		switch (dinfo->pmu_state) {
211 		case SIBA_PMU_NONE:
212 		case SIBA_PMU_BHND:
213 			dinfo->pmu.bhnd_info = (void *)value;
214 			dinfo->pmu_state = SIBA_PMU_BHND;
215 			SIBA_UNLOCK(sc);
216 			return (0);
217 
218 		case SIBA_PMU_PWRCTL:
219 		case SIBA_PMU_FIXED:
220 			panic("bhnd_set_pmu_info() called with siba PMU state "
221 			    "%d", dinfo->pmu_state);
222 			return (ENXIO);
223 		}
224 
225 		panic("invalid PMU state: %d", dinfo->pmu_state);
226 		return (ENXIO);
227 
228 	default:
229 		return (ENOENT);
230 	}
231 }
232 
233 static struct resource_list *
234 siba_get_resource_list(device_t dev, device_t child)
235 {
236 	struct siba_devinfo *dinfo = device_get_ivars(child);
237 	return (&dinfo->resources);
238 }
239 
240 /* BHND_BUS_ALLOC_PMU() */
241 static int
242 siba_alloc_pmu(device_t dev, device_t child)
243 {
244 	struct siba_softc	*sc;
245 	struct siba_devinfo	*dinfo;
246 	device_t		 chipc;
247 	device_t		 pwrctl;
248 	struct chipc_caps	 ccaps;
249 	siba_pmu_state		 pmu_state;
250 	int			 error;
251 
252 	if (device_get_parent(child) != dev)
253 		return (EINVAL);
254 
255 	sc = device_get_softc(dev);
256 	dinfo = device_get_ivars(child);
257 	pwrctl = NULL;
258 
259 	/* Fetch ChipCommon capability flags */
260 	chipc = bhnd_retain_provider(child, BHND_SERVICE_CHIPC);
261 	if (chipc != NULL) {
262 		ccaps = *BHND_CHIPC_GET_CAPS(chipc);
263 		bhnd_release_provider(child, chipc, BHND_SERVICE_CHIPC);
264 	} else {
265 		memset(&ccaps, 0, sizeof(ccaps));
266 	}
267 
268 	/* Defer to bhnd(4)'s PMU implementation if ChipCommon exists and
269 	 * advertises PMU support */
270 	if (ccaps.pmu) {
271 		if ((error = bhnd_generic_alloc_pmu(dev, child)))
272 			return (error);
273 
274 		KASSERT(dinfo->pmu_state == SIBA_PMU_BHND,
275 		    ("unexpected PMU state: %d", dinfo->pmu_state));
276 
277 		return (0);
278 	}
279 
280 	/*
281 	 * This is either a legacy PWRCTL chipset, or the device does not
282 	 * support dynamic clock control.
283 	 *
284 	 * We need to map all bhnd(4) bus PMU to PWRCTL or no-op operations.
285 	 */
286 	if (ccaps.pwr_ctrl) {
287 		pmu_state = SIBA_PMU_PWRCTL;
288 		pwrctl = bhnd_retain_provider(child, BHND_SERVICE_PWRCTL);
289 		if (pwrctl == NULL) {
290 			device_printf(dev, "PWRCTL not found\n");
291 			return (ENODEV);
292 		}
293 	} else {
294 		pmu_state = SIBA_PMU_FIXED;
295 		pwrctl = NULL;
296 	}
297 
298 	SIBA_LOCK(sc);
299 
300 	/* Per-core PMU state already allocated? */
301 	if (dinfo->pmu_state != SIBA_PMU_NONE) {
302 		panic("duplicate PMU allocation for %s",
303 		    device_get_nameunit(child));
304 	}
305 
306 	/* Update the child's PMU allocation state, and transfer ownership of
307 	 * the PWRCTL provider reference (if any) */
308 	dinfo->pmu_state = pmu_state;
309 	dinfo->pmu.pwrctl = pwrctl;
310 
311 	SIBA_UNLOCK(sc);
312 
313 	return (0);
314 }
315 
316 /* BHND_BUS_RELEASE_PMU() */
317 static int
318 siba_release_pmu(device_t dev, device_t child)
319 {
320 	struct siba_softc	*sc;
321 	struct siba_devinfo	*dinfo;
322 	device_t		 pwrctl;
323 	int			 error;
324 
325 	if (device_get_parent(child) != dev)
326 		return (EINVAL);
327 
328 	sc = device_get_softc(dev);
329 	dinfo = device_get_ivars(child);
330 
331 	SIBA_LOCK(sc);
332 	switch(dinfo->pmu_state) {
333 	case SIBA_PMU_NONE:
334 		panic("pmu over-release for %s", device_get_nameunit(child));
335 		SIBA_UNLOCK(sc);
336 		return (ENXIO);
337 
338 	case SIBA_PMU_BHND:
339 		SIBA_UNLOCK(sc);
340 		return (bhnd_generic_release_pmu(dev, child));
341 
342 	case SIBA_PMU_PWRCTL:
343 		/* Requesting BHND_CLOCK_DYN releases any outstanding clock
344 		 * reservations */
345 		pwrctl = dinfo->pmu.pwrctl;
346 		error = bhnd_pwrctl_request_clock(pwrctl, child,
347 		    BHND_CLOCK_DYN);
348 		if (error) {
349 			SIBA_UNLOCK(sc);
350 			return (error);
351 		}
352 
353 		/* Clean up the child's PMU state */
354 		dinfo->pmu_state = SIBA_PMU_NONE;
355 		dinfo->pmu.pwrctl = NULL;
356 		SIBA_UNLOCK(sc);
357 
358 		/* Release the provider reference */
359 		bhnd_release_provider(child, pwrctl, BHND_SERVICE_PWRCTL);
360 		return (0);
361 
362 	case SIBA_PMU_FIXED:
363 		/* Clean up the child's PMU state */
364 		KASSERT(dinfo->pmu.pwrctl == NULL,
365 		    ("PWRCTL reference with FIXED state"));
366 
367 		dinfo->pmu_state = SIBA_PMU_NONE;
368 		dinfo->pmu.pwrctl = NULL;
369 		SIBA_UNLOCK(sc);
370 	}
371 
372 	panic("invalid PMU state: %d", dinfo->pmu_state);
373 }
374 
375 /* BHND_BUS_GET_CLOCK_LATENCY() */
376 static int
377 siba_get_clock_latency(device_t dev, device_t child, bhnd_clock clock,
378     u_int *latency)
379 {
380 	struct siba_softc	*sc;
381 	struct siba_devinfo	*dinfo;
382 	int			 error;
383 
384 	if (device_get_parent(child) != dev)
385 		return (EINVAL);
386 
387 	sc = device_get_softc(dev);
388 	dinfo = device_get_ivars(child);
389 
390 	SIBA_LOCK(sc);
391 	switch(dinfo->pmu_state) {
392 	case SIBA_PMU_NONE:
393 		panic("no active PMU request state");
394 
395 		SIBA_UNLOCK(sc);
396 		return (ENXIO);
397 
398 	case SIBA_PMU_BHND:
399 		SIBA_UNLOCK(sc);
400 		return (bhnd_generic_get_clock_latency(dev, child, clock,
401 		    latency));
402 
403 	case SIBA_PMU_PWRCTL:
404 		 error = bhnd_pwrctl_get_clock_latency(dinfo->pmu.pwrctl, clock,
405 		    latency);
406 		 SIBA_UNLOCK(sc);
407 
408 		 return (error);
409 
410 	case SIBA_PMU_FIXED:
411 		SIBA_UNLOCK(sc);
412 
413 		/* HT clock is always available, and incurs no transition
414 		 * delay. */
415 		switch (clock) {
416 		case BHND_CLOCK_HT:
417 			*latency = 0;
418 			return (0);
419 
420 		default:
421 			return (ENODEV);
422 		}
423 
424 		return (ENODEV);
425 	}
426 
427 	panic("invalid PMU state: %d", dinfo->pmu_state);
428 }
429 
430 /* BHND_BUS_GET_CLOCK_FREQ() */
431 static int
432 siba_get_clock_freq(device_t dev, device_t child, bhnd_clock clock,
433     u_int *freq)
434 {
435 	struct siba_softc	*sc;
436 	struct siba_devinfo	*dinfo;
437 	int			 error;
438 
439 	if (device_get_parent(child) != dev)
440 		return (EINVAL);
441 
442 	sc = device_get_softc(dev);
443 	dinfo = device_get_ivars(child);
444 
445 	SIBA_LOCK(sc);
446 	switch(dinfo->pmu_state) {
447 	case SIBA_PMU_NONE:
448 		panic("no active PMU request state");
449 
450 		SIBA_UNLOCK(sc);
451 		return (ENXIO);
452 
453 	case SIBA_PMU_BHND:
454 		SIBA_UNLOCK(sc);
455 		return (bhnd_generic_get_clock_freq(dev, child, clock, freq));
456 
457 	case SIBA_PMU_PWRCTL:
458 		error = bhnd_pwrctl_get_clock_freq(dinfo->pmu.pwrctl, clock,
459 		    freq);
460 		SIBA_UNLOCK(sc);
461 
462 		return (error);
463 
464 	case SIBA_PMU_FIXED:
465 		SIBA_UNLOCK(sc);
466 
467 		return (ENODEV);
468 	}
469 
470 	panic("invalid PMU state: %d", dinfo->pmu_state);
471 }
472 
473 /* BHND_BUS_REQUEST_EXT_RSRC() */
474 static int
475 siba_request_ext_rsrc(device_t dev, device_t child, u_int rsrc)
476 {
477 	struct siba_softc	*sc;
478 	struct siba_devinfo	*dinfo;
479 
480 	if (device_get_parent(child) != dev)
481 		return (EINVAL);
482 
483 	sc = device_get_softc(dev);
484 	dinfo = device_get_ivars(child);
485 
486 	SIBA_LOCK(sc);
487 	switch(dinfo->pmu_state) {
488 	case SIBA_PMU_NONE:
489 		panic("no active PMU request state");
490 
491 		SIBA_UNLOCK(sc);
492 		return (ENXIO);
493 
494 	case SIBA_PMU_BHND:
495 		SIBA_UNLOCK(sc);
496 		return (bhnd_generic_request_ext_rsrc(dev, child, rsrc));
497 
498 	case SIBA_PMU_PWRCTL:
499 	case SIBA_PMU_FIXED:
500 		/* HW does not support per-core external resources */
501 		SIBA_UNLOCK(sc);
502 		return (ENODEV);
503 	}
504 
505 	panic("invalid PMU state: %d", dinfo->pmu_state);
506 }
507 
508 /* BHND_BUS_RELEASE_EXT_RSRC() */
509 static int
510 siba_release_ext_rsrc(device_t dev, device_t child, u_int rsrc)
511 {
512 	struct siba_softc	*sc;
513 	struct siba_devinfo	*dinfo;
514 
515 	if (device_get_parent(child) != dev)
516 		return (EINVAL);
517 
518 	sc = device_get_softc(dev);
519 	dinfo = device_get_ivars(child);
520 
521 	SIBA_LOCK(sc);
522 	switch(dinfo->pmu_state) {
523 	case SIBA_PMU_NONE:
524 		panic("no active PMU request state");
525 
526 		SIBA_UNLOCK(sc);
527 		return (ENXIO);
528 
529 	case SIBA_PMU_BHND:
530 		SIBA_UNLOCK(sc);
531 		return (bhnd_generic_release_ext_rsrc(dev, child, rsrc));
532 
533 	case SIBA_PMU_PWRCTL:
534 	case SIBA_PMU_FIXED:
535 		/* HW does not support per-core external resources */
536 		SIBA_UNLOCK(sc);
537 		return (ENODEV);
538 	}
539 
540 	panic("invalid PMU state: %d", dinfo->pmu_state);
541 }
542 
543 /* BHND_BUS_REQUEST_CLOCK() */
544 static int
545 siba_request_clock(device_t dev, device_t child, bhnd_clock clock)
546 {
547 	struct siba_softc	*sc;
548 	struct siba_devinfo	*dinfo;
549 	int			 error;
550 
551 	if (device_get_parent(child) != dev)
552 		return (EINVAL);
553 
554 	sc = device_get_softc(dev);
555 	dinfo = device_get_ivars(child);
556 
557 	SIBA_LOCK(sc);
558 	switch(dinfo->pmu_state) {
559 	case SIBA_PMU_NONE:
560 		panic("no active PMU request state");
561 
562 		SIBA_UNLOCK(sc);
563 		return (ENXIO);
564 
565 	case SIBA_PMU_BHND:
566 		SIBA_UNLOCK(sc);
567 		return (bhnd_generic_request_clock(dev, child, clock));
568 
569 	case SIBA_PMU_PWRCTL:
570 		error = bhnd_pwrctl_request_clock(dinfo->pmu.pwrctl, child,
571 		    clock);
572 		SIBA_UNLOCK(sc);
573 
574 		return (error);
575 
576 	case SIBA_PMU_FIXED:
577 		SIBA_UNLOCK(sc);
578 
579 		/* HT clock is always available, and fulfills any of the
580 		 * following clock requests */
581 		switch (clock) {
582 		case BHND_CLOCK_DYN:
583 		case BHND_CLOCK_ILP:
584 		case BHND_CLOCK_ALP:
585 		case BHND_CLOCK_HT:
586 			return (0);
587 
588 		default:
589 			return (ENODEV);
590 		}
591 	}
592 
593 	panic("invalid PMU state: %d", dinfo->pmu_state);
594 }
595 
596 /* BHND_BUS_ENABLE_CLOCKS() */
597 static int
598 siba_enable_clocks(device_t dev, device_t child, uint32_t clocks)
599 {
600 	struct siba_softc	*sc;
601 	struct siba_devinfo	*dinfo;
602 
603 	if (device_get_parent(child) != dev)
604 		return (EINVAL);
605 
606 	sc = device_get_softc(dev);
607 	dinfo = device_get_ivars(child);
608 
609 	SIBA_LOCK(sc);
610 	switch(dinfo->pmu_state) {
611 	case SIBA_PMU_NONE:
612 		panic("no active PMU request state");
613 
614 		SIBA_UNLOCK(sc);
615 		return (ENXIO);
616 
617 	case SIBA_PMU_BHND:
618 		SIBA_UNLOCK(sc);
619 		return (bhnd_generic_enable_clocks(dev, child, clocks));
620 
621 	case SIBA_PMU_PWRCTL:
622 	case SIBA_PMU_FIXED:
623 		SIBA_UNLOCK(sc);
624 
625 		/* All (supported) clocks are already enabled by default */
626 		clocks &= ~(BHND_CLOCK_DYN |
627 			    BHND_CLOCK_ILP |
628 			    BHND_CLOCK_ALP |
629 			    BHND_CLOCK_HT);
630 
631 		if (clocks != 0) {
632 			device_printf(dev, "%s requested unknown clocks: %#x\n",
633 			    device_get_nameunit(child), clocks);
634 			return (ENODEV);
635 		}
636 
637 		return (0);
638 	}
639 
640 	panic("invalid PMU state: %d", dinfo->pmu_state);
641 }
642 
643 static int
644 siba_read_iost(device_t dev, device_t child, uint16_t *iost)
645 {
646 	uint32_t	tmhigh;
647 	int		error;
648 
649 	error = bhnd_read_config(child, SIBA_CFG0_TMSTATEHIGH, &tmhigh, 4);
650 	if (error)
651 		return (error);
652 
653 	*iost = (SIBA_REG_GET(tmhigh, TMH_SISF));
654 	return (0);
655 }
656 
657 static int
658 siba_read_ioctl(device_t dev, device_t child, uint16_t *ioctl)
659 {
660 	uint32_t	ts_low;
661 	int		error;
662 
663 	if ((error = bhnd_read_config(child, SIBA_CFG0_TMSTATELOW, &ts_low, 4)))
664 		return (error);
665 
666 	*ioctl = (SIBA_REG_GET(ts_low, TML_SICF));
667 	return (0);
668 }
669 
670 static int
671 siba_write_ioctl(device_t dev, device_t child, uint16_t value, uint16_t mask)
672 {
673 	struct siba_devinfo	*dinfo;
674 	struct bhnd_resource	*r;
675 	uint32_t		 ts_low, ts_mask;
676 
677 	if (device_get_parent(child) != dev)
678 		return (EINVAL);
679 
680 	/* Fetch CFG0 mapping */
681 	dinfo = device_get_ivars(child);
682 	if ((r = dinfo->cfg_res[0]) == NULL)
683 		return (ENODEV);
684 
685 	/* Mask and set TMSTATELOW core flag bits */
686 	ts_mask = (mask << SIBA_TML_SICF_SHIFT) & SIBA_TML_SICF_MASK;
687 	ts_low = (value << SIBA_TML_SICF_SHIFT) & ts_mask;
688 
689 	siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
690 	    ts_low, ts_mask);
691 	return (0);
692 }
693 
694 static bool
695 siba_is_hw_suspended(device_t dev, device_t child)
696 {
697 	uint32_t		ts_low;
698 	uint16_t		ioctl;
699 	int			error;
700 
701 	/* Fetch target state */
702 	error = bhnd_read_config(child, SIBA_CFG0_TMSTATELOW, &ts_low, 4);
703 	if (error) {
704 		device_printf(child, "error reading HW reset state: %d\n",
705 		    error);
706 		return (true);
707 	}
708 
709 	/* Is core held in RESET? */
710 	if (ts_low & SIBA_TML_RESET)
711 		return (true);
712 
713 	/* Is target reject enabled? */
714 	if (ts_low & SIBA_TML_REJ_MASK)
715 		return (true);
716 
717 	/* Is core clocked? */
718 	ioctl = SIBA_REG_GET(ts_low, TML_SICF);
719 	if (!(ioctl & BHND_IOCTL_CLK_EN))
720 		return (true);
721 
722 	return (false);
723 }
724 
725 static int
726 siba_reset_hw(device_t dev, device_t child, uint16_t ioctl,
727     uint16_t reset_ioctl)
728 {
729 	struct siba_devinfo		*dinfo;
730 	struct bhnd_resource		*r;
731 	uint32_t			 ts_low, imstate;
732 	uint16_t			 clkflags;
733 	int				 error;
734 
735 	if (device_get_parent(child) != dev)
736 		return (EINVAL);
737 
738 	dinfo = device_get_ivars(child);
739 
740 	/* Can't suspend the core without access to the CFG0 registers */
741 	if ((r = dinfo->cfg_res[0]) == NULL)
742 		return (ENODEV);
743 
744 	/* We require exclusive control over BHND_IOCTL_CLK_(EN|FORCE) */
745 	clkflags = BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE;
746 	if (ioctl & clkflags)
747 		return (EINVAL);
748 
749 	/* Place core into known RESET state */
750 	if ((error = bhnd_suspend_hw(child, reset_ioctl)))
751 		return (error);
752 
753 	/* Set RESET, clear REJ, set the caller's IOCTL flags, and
754 	 * force clocks to ensure the signal propagates throughout the
755 	 * core. */
756 	ts_low = SIBA_TML_RESET |
757 		 (ioctl << SIBA_TML_SICF_SHIFT) |
758 		 (BHND_IOCTL_CLK_EN << SIBA_TML_SICF_SHIFT) |
759 		 (BHND_IOCTL_CLK_FORCE << SIBA_TML_SICF_SHIFT);
760 
761 	siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
762 	    ts_low, UINT32_MAX);
763 
764 	/* Clear any target errors */
765 	if (bhnd_bus_read_4(r, SIBA_CFG0_TMSTATEHIGH) & SIBA_TMH_SERR) {
766 		siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATEHIGH,
767 		    0x0, SIBA_TMH_SERR);
768 	}
769 
770 	/* Clear any initiator errors */
771 	imstate = bhnd_bus_read_4(r, SIBA_CFG0_IMSTATE);
772 	if (imstate & (SIBA_IM_IBE|SIBA_IM_TO)) {
773 		siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE, 0x0,
774 		    SIBA_IM_IBE|SIBA_IM_TO);
775 	}
776 
777 	/* Release from RESET while leaving clocks forced, ensuring the
778 	 * signal propagates throughout the core */
779 	siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 0x0,
780 	    SIBA_TML_RESET);
781 
782 	/* The core should now be active; we can clear the BHND_IOCTL_CLK_FORCE
783 	 * bit and allow the core to manage clock gating. */
784 	siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 0x0,
785 	    (BHND_IOCTL_CLK_FORCE << SIBA_TML_SICF_SHIFT));
786 
787 	return (0);
788 }
789 
790 static int
791 siba_suspend_hw(device_t dev, device_t child, uint16_t ioctl)
792 {
793 	struct siba_softc		*sc;
794 	struct siba_devinfo		*dinfo;
795 	struct bhnd_resource		*r;
796 	uint32_t			 idl, ts_low, ts_mask;
797 	uint16_t			 cflags, clkflags;
798 	int				 error;
799 
800 	if (device_get_parent(child) != dev)
801 		return (EINVAL);
802 
803 	sc = device_get_softc(dev);
804 	dinfo = device_get_ivars(child);
805 
806 	/* Can't suspend the core without access to the CFG0 registers */
807 	if ((r = dinfo->cfg_res[0]) == NULL)
808 		return (ENODEV);
809 
810 	/* We require exclusive control over BHND_IOCTL_CLK_(EN|FORCE) */
811 	clkflags = BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE;
812 	if (ioctl & clkflags)
813 		return (EINVAL);
814 
815 	/* Already in RESET? */
816 	ts_low = bhnd_bus_read_4(r, SIBA_CFG0_TMSTATELOW);
817 	if (ts_low & SIBA_TML_RESET)
818 		return (0);
819 
820 	/* If clocks are already disabled, we can place the core directly
821 	 * into RESET|REJ while setting the caller's IOCTL flags. */
822 	cflags = SIBA_REG_GET(ts_low, TML_SICF);
823 	if (!(cflags & BHND_IOCTL_CLK_EN)) {
824 		ts_low = SIBA_TML_RESET | SIBA_TML_REJ |
825 			 (ioctl << SIBA_TML_SICF_SHIFT);
826 		ts_mask = SIBA_TML_RESET | SIBA_TML_REJ | SIBA_TML_SICF_MASK;
827 
828 		siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
829 		    ts_low, ts_mask);
830 		return (0);
831 	}
832 
833 	/* Reject further transactions reaching this core */
834 	siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
835 	    SIBA_TML_REJ, SIBA_TML_REJ);
836 
837 	/* Wait for transaction busy flag to clear for all transactions
838 	 * initiated by this core */
839 	error = siba_wait_target_state(child, dinfo, SIBA_CFG0_TMSTATEHIGH,
840 	    0x0, SIBA_TMH_BUSY, 100000);
841 	if (error)
842 		return (error);
843 
844 	/* If this is an initiator core, we need to reject initiator
845 	 * transactions too. */
846 	idl = bhnd_bus_read_4(r, SIBA_CFG0_IDLOW);
847 	if (idl & SIBA_IDL_INIT) {
848 		/* Reject further initiator transactions */
849 		siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE,
850 		    SIBA_IM_RJ, SIBA_IM_RJ);
851 
852 		/* Wait for initiator busy flag to clear */
853 		error = siba_wait_target_state(child, dinfo, SIBA_CFG0_IMSTATE,
854 		    0x0, SIBA_IM_BY, 100000);
855 		if (error)
856 			return (error);
857 	}
858 
859 	/* Put the core into RESET, set the caller's IOCTL flags, and
860 	 * force clocks to ensure the RESET signal propagates throughout the
861 	 * core. */
862 	ts_low = SIBA_TML_RESET |
863 		 (ioctl << SIBA_TML_SICF_SHIFT) |
864 		 (BHND_IOCTL_CLK_EN << SIBA_TML_SICF_SHIFT) |
865 		 (BHND_IOCTL_CLK_FORCE << SIBA_TML_SICF_SHIFT);
866 	ts_mask = SIBA_TML_RESET |
867 		  SIBA_TML_SICF_MASK;
868 
869 	siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, ts_low,
870 	    ts_mask);
871 
872 	/* Give RESET ample time */
873 	DELAY(10);
874 
875 	/* Clear previously asserted initiator reject */
876 	if (idl & SIBA_IDL_INIT) {
877 		siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE, 0x0,
878 		    SIBA_IM_RJ);
879 	}
880 
881 	/* Disable all clocks, leaving RESET and REJ asserted */
882 	siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 0x0,
883 	    (BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE) << SIBA_TML_SICF_SHIFT);
884 
885 	/*
886 	 * Core is now in RESET.
887 	 *
888 	 * If the core holds any PWRCTL clock reservations, we need to release
889 	 * those now. This emulates the standard bhnd(4) PMU behavior of RESET
890 	 * automatically clearing clkctl
891 	 */
892 	SIBA_LOCK(sc);
893 	if (dinfo->pmu_state == SIBA_PMU_PWRCTL) {
894 		error = bhnd_pwrctl_request_clock(dinfo->pmu.pwrctl, child,
895 		    BHND_CLOCK_DYN);
896 		SIBA_UNLOCK(sc);
897 
898 		if (error) {
899 			device_printf(child, "failed to release clock request: "
900 			    "%d", error);
901 			return (error);
902 		}
903 
904 		return (0);
905 	} else {
906 		SIBA_UNLOCK(sc);
907 		return (0);
908 	}
909 }
910 
911 static int
912 siba_read_config(device_t dev, device_t child, bus_size_t offset, void *value,
913     u_int width)
914 {
915 	struct siba_devinfo	*dinfo;
916 	rman_res_t		 r_size;
917 
918 	/* Must be directly attached */
919 	if (device_get_parent(child) != dev)
920 		return (EINVAL);
921 
922 	/* CFG0 registers must be available */
923 	dinfo = device_get_ivars(child);
924 	if (dinfo->cfg_res[0] == NULL)
925 		return (ENODEV);
926 
927 	/* Offset must fall within CFG0 */
928 	r_size = rman_get_size(dinfo->cfg_res[0]->res);
929 	if (r_size < offset || r_size - offset < width)
930 		return (EFAULT);
931 
932 	switch (width) {
933 	case 1:
934 		*((uint8_t *)value) = bhnd_bus_read_1(dinfo->cfg_res[0],
935 		    offset);
936 		return (0);
937 	case 2:
938 		*((uint16_t *)value) = bhnd_bus_read_2(dinfo->cfg_res[0],
939 		    offset);
940 		return (0);
941 	case 4:
942 		*((uint32_t *)value) = bhnd_bus_read_4(dinfo->cfg_res[0],
943 		    offset);
944 		return (0);
945 	default:
946 		return (EINVAL);
947 	}
948 }
949 
950 static int
951 siba_write_config(device_t dev, device_t child, bus_size_t offset,
952     const void *value, u_int width)
953 {
954 	struct siba_devinfo	*dinfo;
955 	struct bhnd_resource	*r;
956 	rman_res_t		 r_size;
957 
958 	/* Must be directly attached */
959 	if (device_get_parent(child) != dev)
960 		return (EINVAL);
961 
962 	/* CFG0 registers must be available */
963 	dinfo = device_get_ivars(child);
964 	if ((r = dinfo->cfg_res[0]) == NULL)
965 		return (ENODEV);
966 
967 	/* Offset must fall within CFG0 */
968 	r_size = rman_get_size(r->res);
969 	if (r_size < offset || r_size - offset < width)
970 		return (EFAULT);
971 
972 	switch (width) {
973 	case 1:
974 		bhnd_bus_write_1(r, offset, *(const uint8_t *)value);
975 		return (0);
976 	case 2:
977 		bhnd_bus_write_2(r, offset, *(const uint8_t *)value);
978 		return (0);
979 	case 4:
980 		bhnd_bus_write_4(r, offset, *(const uint8_t *)value);
981 		return (0);
982 	default:
983 		return (EINVAL);
984 	}
985 }
986 
987 static u_int
988 siba_get_port_count(device_t dev, device_t child, bhnd_port_type type)
989 {
990 	struct siba_devinfo *dinfo;
991 
992 	/* delegate non-bus-attached devices to our parent */
993 	if (device_get_parent(child) != dev)
994 		return (BHND_BUS_GET_PORT_COUNT(device_get_parent(dev), child,
995 		    type));
996 
997 	dinfo = device_get_ivars(child);
998 	return (siba_port_count(&dinfo->core_id, type));
999 }
1000 
1001 static u_int
1002 siba_get_region_count(device_t dev, device_t child, bhnd_port_type type,
1003     u_int port)
1004 {
1005 	struct siba_devinfo	*dinfo;
1006 
1007 	/* delegate non-bus-attached devices to our parent */
1008 	if (device_get_parent(child) != dev)
1009 		return (BHND_BUS_GET_REGION_COUNT(device_get_parent(dev), child,
1010 		    type, port));
1011 
1012 	dinfo = device_get_ivars(child);
1013 	return (siba_port_region_count(&dinfo->core_id, type, port));
1014 }
1015 
1016 static int
1017 siba_get_port_rid(device_t dev, device_t child, bhnd_port_type port_type,
1018     u_int port_num, u_int region_num)
1019 {
1020 	struct siba_devinfo	*dinfo;
1021 	struct siba_addrspace	*addrspace;
1022 	struct siba_cfg_block	*cfg;
1023 
1024 	/* delegate non-bus-attached devices to our parent */
1025 	if (device_get_parent(child) != dev)
1026 		return (BHND_BUS_GET_PORT_RID(device_get_parent(dev), child,
1027 		    port_type, port_num, region_num));
1028 
1029 	dinfo = device_get_ivars(child);
1030 
1031 	/* Look for a matching addrspace entry */
1032 	addrspace = siba_find_addrspace(dinfo, port_type, port_num, region_num);
1033 	if (addrspace != NULL)
1034 		return (addrspace->sa_rid);
1035 
1036 	/* Try the config blocks */
1037 	cfg = siba_find_cfg_block(dinfo, port_type, port_num, region_num);
1038 	if (cfg != NULL)
1039 		return (cfg->cb_rid);
1040 
1041 	/* Not found */
1042 	return (-1);
1043 }
1044 
1045 static int
1046 siba_decode_port_rid(device_t dev, device_t child, int type, int rid,
1047     bhnd_port_type *port_type, u_int *port_num, u_int *region_num)
1048 {
1049 	struct siba_devinfo	*dinfo;
1050 
1051 	/* delegate non-bus-attached devices to our parent */
1052 	if (device_get_parent(child) != dev)
1053 		return (BHND_BUS_DECODE_PORT_RID(device_get_parent(dev), child,
1054 		    type, rid, port_type, port_num, region_num));
1055 
1056 	dinfo = device_get_ivars(child);
1057 
1058 	/* Ports are always memory mapped */
1059 	if (type != SYS_RES_MEMORY)
1060 		return (EINVAL);
1061 
1062 	/* Look for a matching addrspace entry */
1063 	for (u_int i = 0; i < dinfo->core_id.num_admatch; i++) {
1064 		if (dinfo->addrspace[i].sa_rid != rid)
1065 			continue;
1066 
1067 		*port_type = BHND_PORT_DEVICE;
1068 		*port_num = siba_addrspace_device_port(i);
1069 		*region_num = siba_addrspace_device_region(i);
1070 		return (0);
1071 	}
1072 
1073 	/* Try the config blocks */
1074 	for (u_int i = 0; i < dinfo->core_id.num_cfg_blocks; i++) {
1075 		if (dinfo->cfg[i].cb_rid != rid)
1076 			continue;
1077 
1078 		*port_type = BHND_PORT_AGENT;
1079 		*port_num = siba_cfg_agent_port(i);
1080 		*region_num = siba_cfg_agent_region(i);
1081 		return (0);
1082 	}
1083 
1084 	/* Not found */
1085 	return (ENOENT);
1086 }
1087 
1088 static int
1089 siba_get_region_addr(device_t dev, device_t child, bhnd_port_type port_type,
1090     u_int port_num, u_int region_num, bhnd_addr_t *addr, bhnd_size_t *size)
1091 {
1092 	struct siba_devinfo	*dinfo;
1093 	struct siba_addrspace	*addrspace;
1094 	struct siba_cfg_block	*cfg;
1095 
1096 	/* delegate non-bus-attached devices to our parent */
1097 	if (device_get_parent(child) != dev) {
1098 		return (BHND_BUS_GET_REGION_ADDR(device_get_parent(dev), child,
1099 		    port_type, port_num, region_num, addr, size));
1100 	}
1101 
1102 	dinfo = device_get_ivars(child);
1103 
1104 	/* Look for a matching addrspace */
1105 	addrspace = siba_find_addrspace(dinfo, port_type, port_num, region_num);
1106 	if (addrspace != NULL) {
1107 		*addr = addrspace->sa_base;
1108 		*size = addrspace->sa_size - addrspace->sa_bus_reserved;
1109 		return (0);
1110 	}
1111 
1112 	/* Look for a matching cfg block */
1113 	cfg = siba_find_cfg_block(dinfo, port_type, port_num, region_num);
1114 	if (cfg != NULL) {
1115 		*addr = cfg->cb_base;
1116 		*size = cfg->cb_size;
1117 		return (0);
1118 	}
1119 
1120 	/* Not found */
1121 	return (ENOENT);
1122 }
1123 
1124 /**
1125  * Default siba(4) bus driver implementation of BHND_BUS_GET_INTR_COUNT().
1126  */
1127 u_int
1128 siba_get_intr_count(device_t dev, device_t child)
1129 {
1130 	struct siba_devinfo	*dinfo;
1131 
1132 	/* delegate non-bus-attached devices to our parent */
1133 	if (device_get_parent(child) != dev)
1134 		return (BHND_BUS_GET_INTR_COUNT(device_get_parent(dev), child));
1135 
1136 	dinfo = device_get_ivars(child);
1137 	if (!dinfo->core_id.intr_en) {
1138 		/* No interrupts */
1139 		return (0);
1140 	} else {
1141 		/* One assigned interrupt */
1142 		return (1);
1143 	}
1144 }
1145 
1146 /**
1147  * Default siba(4) bus driver implementation of BHND_BUS_GET_INTR_IVEC().
1148  */
1149 int
1150 siba_get_intr_ivec(device_t dev, device_t child, u_int intr, u_int *ivec)
1151 {
1152 	struct siba_devinfo	*dinfo;
1153 
1154 	/* delegate non-bus-attached devices to our parent */
1155 	if (device_get_parent(child) != dev)
1156 		return (BHND_BUS_GET_INTR_IVEC(device_get_parent(dev), child,
1157 		    intr, ivec));
1158 
1159 	/* Must be a valid interrupt ID */
1160 	if (intr >= siba_get_intr_count(dev, child))
1161 		return (ENXIO);
1162 
1163 	KASSERT(intr == 0, ("invalid ivec %u", intr));
1164 
1165 	dinfo = device_get_ivars(child);
1166 
1167 	KASSERT(dinfo->core_id.intr_en,
1168 	    ("core does not have an interrupt assigned"));
1169 
1170 	*ivec = dinfo->core_id.intr_flag;
1171 	return (0);
1172 }
1173 
1174 /**
1175  * Map per-core configuration blocks for @p dinfo.
1176  *
1177  * @param dev The siba bus device.
1178  * @param dinfo The device info instance on which to map all per-core
1179  * configuration blocks.
1180  */
1181 static int
1182 siba_map_cfg_resources(device_t dev, struct siba_devinfo *dinfo)
1183 {
1184 	struct siba_addrspace	*addrspace;
1185 	rman_res_t		 r_start, r_count, r_end;
1186 	uint8_t			 num_cfg;
1187 	int			 rid;
1188 
1189 	num_cfg = dinfo->core_id.num_cfg_blocks;
1190 	if (num_cfg > SIBA_MAX_CFG) {
1191 		device_printf(dev, "config block count %hhu out of range\n",
1192 		    num_cfg);
1193 		return (ENXIO);
1194 	}
1195 
1196 	/* Fetch the core register address space */
1197 	addrspace = siba_find_addrspace(dinfo, BHND_PORT_DEVICE, 0, 0);
1198 	if (addrspace == NULL) {
1199 		device_printf(dev, "missing device registers\n");
1200 		return (ENXIO);
1201 	}
1202 
1203 	/*
1204 	 * Map the per-core configuration blocks
1205 	 */
1206 	for (uint8_t i = 0; i < num_cfg; i++) {
1207 		/* Add to child's resource list */
1208 		r_start = addrspace->sa_base + SIBA_CFG_OFFSET(i);
1209 		r_count = SIBA_CFG_SIZE;
1210 		r_end = r_start + r_count - 1;
1211 
1212 		rid = resource_list_add_next(&dinfo->resources, SYS_RES_MEMORY,
1213 		    r_start, r_end, r_count);
1214 
1215 		/* Initialize config block descriptor */
1216 		dinfo->cfg[i] = ((struct siba_cfg_block) {
1217 			.cb_base = r_start,
1218 			.cb_size = SIBA_CFG_SIZE,
1219 			.cb_rid = rid
1220 		});
1221 
1222 		/* Map the config resource for bus-level access */
1223 		dinfo->cfg_rid[i] = SIBA_CFG_RID(dinfo, i);
1224 		dinfo->cfg_res[i] = BHND_BUS_ALLOC_RESOURCE(dev, dev,
1225 		    SYS_RES_MEMORY, &dinfo->cfg_rid[i], r_start, r_end,
1226 		    r_count, RF_ACTIVE|RF_SHAREABLE);
1227 
1228 		if (dinfo->cfg_res[i] == NULL) {
1229 			device_printf(dev, "failed to allocate SIBA_CFG%hhu\n",
1230 			    i);
1231 			return (ENXIO);
1232 		}
1233 	}
1234 
1235 	return (0);
1236 }
1237 
1238 static device_t
1239 siba_add_child(device_t dev, u_int order, const char *name, int unit)
1240 {
1241 	struct siba_devinfo	*dinfo;
1242 	device_t		 child;
1243 
1244 	child = device_add_child_ordered(dev, order, name, unit);
1245 	if (child == NULL)
1246 		return (NULL);
1247 
1248 	if ((dinfo = siba_alloc_dinfo(dev)) == NULL) {
1249 		device_delete_child(dev, child);
1250 		return (NULL);
1251 	}
1252 
1253 	device_set_ivars(child, dinfo);
1254 
1255 	return (child);
1256 }
1257 
1258 static void
1259 siba_child_deleted(device_t dev, device_t child)
1260 {
1261 	struct siba_devinfo	*dinfo;
1262 
1263 	/* Call required bhnd(4) implementation */
1264 	bhnd_generic_child_deleted(dev, child);
1265 
1266 	/* Free siba device info */
1267 	if ((dinfo = device_get_ivars(child)) != NULL)
1268 		siba_free_dinfo(dev, child, dinfo);
1269 
1270 	device_set_ivars(child, NULL);
1271 }
1272 
1273 /**
1274  * Scan the core table and add all valid discovered cores to
1275  * the bus.
1276  *
1277  * @param dev The siba bus device.
1278  */
1279 int
1280 siba_add_children(device_t dev)
1281 {
1282 	bhnd_erom_t			*erom;
1283 	struct siba_erom		*siba_erom;
1284 	struct bhnd_erom_io		*eio;
1285 	const struct bhnd_chipid	*cid;
1286 	struct siba_core_id		*cores;
1287 	device_t			*children;
1288 	int				 error;
1289 
1290 	cid = BHND_BUS_GET_CHIPID(dev, dev);
1291 
1292 	/* Allocate our EROM parser */
1293 	eio = bhnd_erom_iores_new(dev, SIBA_EROM_RID);
1294 	erom = bhnd_erom_alloc(&siba_erom_parser, cid, eio);
1295 	if (erom == NULL) {
1296 		bhnd_erom_io_fini(eio);
1297 		return (ENODEV);
1298 	}
1299 
1300 	/* Allocate our temporary core and device table */
1301 	cores = malloc(sizeof(*cores) * cid->ncores, M_BHND, M_WAITOK);
1302 	children = malloc(sizeof(*children) * cid->ncores, M_BHND,
1303 	    M_WAITOK | M_ZERO);
1304 
1305 	/*
1306 	 * Add child devices for all discovered cores.
1307 	 *
1308 	 * On bridged devices, we'll exhaust our available register windows if
1309 	 * we map config blocks on unpopulated/disabled cores. To avoid this, we
1310 	 * defer mapping of the per-core siba(4) config blocks until all cores
1311 	 * have been enumerated and otherwise configured.
1312 	 */
1313 	siba_erom = (struct siba_erom *)erom;
1314 	for (u_int i = 0; i < cid->ncores; i++) {
1315 		struct siba_devinfo	*dinfo;
1316 		device_t		 child;
1317 
1318 		if ((error = siba_erom_get_core_id(siba_erom, i, &cores[i])))
1319 			goto failed;
1320 
1321 		/* Add the child device */
1322 		child = BUS_ADD_CHILD(dev, 0, NULL, -1);
1323 		if (child == NULL) {
1324 			error = ENXIO;
1325 			goto failed;
1326 		}
1327 
1328 		children[i] = child;
1329 
1330 		/* Initialize per-device bus info */
1331 		if ((dinfo = device_get_ivars(child)) == NULL) {
1332 			error = ENXIO;
1333 			goto failed;
1334 		}
1335 
1336 		if ((error = siba_init_dinfo(dev, child, dinfo, &cores[i])))
1337 			goto failed;
1338 
1339 		/* If pins are floating or the hardware is otherwise
1340 		 * unpopulated, the device shouldn't be used. */
1341 		if (bhnd_is_hw_disabled(child))
1342 			device_disable(child);
1343 	}
1344 
1345 	/* Free EROM (and any bridge register windows it might hold) */
1346 	bhnd_erom_free(erom);
1347 	erom = NULL;
1348 
1349 	/* Map all valid core's config register blocks and perform interrupt
1350 	 * assignment */
1351 	for (u_int i = 0; i < cid->ncores; i++) {
1352 		struct siba_devinfo	*dinfo;
1353 		device_t		 child;
1354 
1355 		child = children[i];
1356 
1357 		/* Skip if core is disabled */
1358 		if (bhnd_is_hw_disabled(child))
1359 			continue;
1360 
1361 		dinfo = device_get_ivars(child);
1362 
1363 		/* Map the core's config blocks */
1364 		if ((error = siba_map_cfg_resources(dev, dinfo)))
1365 			goto failed;
1366 
1367 		/* Issue bus callback for fully initialized child. */
1368 		BHND_BUS_CHILD_ADDED(dev, child);
1369 	}
1370 
1371 	free(cores, M_BHND);
1372 	free(children, M_BHND);
1373 
1374 	return (0);
1375 
1376 failed:
1377 	for (u_int i = 0; i < cid->ncores; i++) {
1378 		if (children[i] == NULL)
1379 			continue;
1380 
1381 		device_delete_child(dev, children[i]);
1382 	}
1383 
1384 	free(cores, M_BHND);
1385 	free(children, M_BHND);
1386 	if (erom != NULL)
1387 		bhnd_erom_free(erom);
1388 
1389 	return (error);
1390 }
1391 
1392 static device_method_t siba_methods[] = {
1393 	/* Device interface */
1394 	DEVMETHOD(device_probe,			siba_probe),
1395 	DEVMETHOD(device_attach,		siba_attach),
1396 	DEVMETHOD(device_detach,		siba_detach),
1397 	DEVMETHOD(device_resume,		siba_resume),
1398 	DEVMETHOD(device_suspend,		siba_suspend),
1399 
1400 	/* Bus interface */
1401 	DEVMETHOD(bus_add_child,		siba_add_child),
1402 	DEVMETHOD(bus_child_deleted,		siba_child_deleted),
1403 	DEVMETHOD(bus_read_ivar,		siba_read_ivar),
1404 	DEVMETHOD(bus_write_ivar,		siba_write_ivar),
1405 	DEVMETHOD(bus_get_resource_list,	siba_get_resource_list),
1406 
1407 	/* BHND interface */
1408 	DEVMETHOD(bhnd_bus_get_erom_class,	siba_get_erom_class),
1409 	DEVMETHOD(bhnd_bus_alloc_pmu,		siba_alloc_pmu),
1410 	DEVMETHOD(bhnd_bus_release_pmu,		siba_release_pmu),
1411 	DEVMETHOD(bhnd_bus_request_clock,	siba_request_clock),
1412 	DEVMETHOD(bhnd_bus_enable_clocks,	siba_enable_clocks),
1413 	DEVMETHOD(bhnd_bus_request_ext_rsrc,	siba_request_ext_rsrc),
1414 	DEVMETHOD(bhnd_bus_release_ext_rsrc,	siba_release_ext_rsrc),
1415 	DEVMETHOD(bhnd_bus_get_clock_freq,	siba_get_clock_freq),
1416 	DEVMETHOD(bhnd_bus_get_clock_latency,	siba_get_clock_latency),
1417 	DEVMETHOD(bhnd_bus_read_ioctl,		siba_read_ioctl),
1418 	DEVMETHOD(bhnd_bus_write_ioctl,		siba_write_ioctl),
1419 	DEVMETHOD(bhnd_bus_read_iost,		siba_read_iost),
1420 	DEVMETHOD(bhnd_bus_is_hw_suspended,	siba_is_hw_suspended),
1421 	DEVMETHOD(bhnd_bus_reset_hw,		siba_reset_hw),
1422 	DEVMETHOD(bhnd_bus_suspend_hw,		siba_suspend_hw),
1423 	DEVMETHOD(bhnd_bus_read_config,		siba_read_config),
1424 	DEVMETHOD(bhnd_bus_write_config,	siba_write_config),
1425 	DEVMETHOD(bhnd_bus_get_port_count,	siba_get_port_count),
1426 	DEVMETHOD(bhnd_bus_get_region_count,	siba_get_region_count),
1427 	DEVMETHOD(bhnd_bus_get_port_rid,	siba_get_port_rid),
1428 	DEVMETHOD(bhnd_bus_decode_port_rid,	siba_decode_port_rid),
1429 	DEVMETHOD(bhnd_bus_get_region_addr,	siba_get_region_addr),
1430 	DEVMETHOD(bhnd_bus_get_intr_count,	siba_get_intr_count),
1431 	DEVMETHOD(bhnd_bus_get_intr_ivec,	siba_get_intr_ivec),
1432 
1433 	DEVMETHOD_END
1434 };
1435 
1436 DEFINE_CLASS_1(bhnd, siba_driver, siba_methods, sizeof(struct siba_softc), bhnd_driver);
1437 
1438 MODULE_VERSION(siba, 1);
1439 MODULE_DEPEND(siba, bhnd, 1, 1, 1);
1440