xref: /freebsd/sys/dev/bhnd/siba/siba.c (revision 718cf2ccb9956613756ab15d7a0e28f2c8e91cab)
1 /*-
2  * Copyright (c) 2015-2016 Landon Fuller <landon@landonf.org>
3  * Copyright (c) 2017 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Landon Fuller
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer,
14  *    without modification.
15  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
16  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
17  *    redistribution must be conditioned upon including a substantially
18  *    similar Disclaimer requirement for further binary redistribution.
19  *
20  * NO WARRANTY
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
24  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
25  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
26  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
29  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGES.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include <sys/param.h>
38 #include <sys/bus.h>
39 #include <sys/kernel.h>
40 #include <sys/malloc.h>
41 #include <sys/module.h>
42 #include <sys/refcount.h>
43 #include <sys/systm.h>
44 
45 #include <machine/bus.h>
46 
47 #include <dev/bhnd/cores/chipc/pwrctl/bhnd_pwrctl.h>
48 
49 #include "sibareg.h"
50 #include "sibavar.h"
51 
52 static bhnd_erom_class_t *
53 siba_get_erom_class(driver_t *driver)
54 {
55 	return (&siba_erom_parser);
56 }
57 
58 int
59 siba_probe(device_t dev)
60 {
61 	device_set_desc(dev, "SIBA BHND bus");
62 	return (BUS_PROBE_DEFAULT);
63 }
64 
65 /**
66  * Default siba(4) bus driver implementation of DEVICE_ATTACH().
67  *
68  * This implementation initializes internal siba(4) state and performs
69  * bus enumeration, and must be called by subclassing drivers in
70  * DEVICE_ATTACH() before any other bus methods.
71  */
72 int
73 siba_attach(device_t dev)
74 {
75 	struct siba_softc	*sc;
76 	int			 error;
77 
78 	sc = device_get_softc(dev);
79 	sc->dev = dev;
80 
81 	SIBA_LOCK_INIT(sc);
82 
83 	/* Enumerate children */
84 	if ((error = siba_add_children(dev))) {
85 		device_delete_children(dev);
86 		SIBA_LOCK_DESTROY(sc);
87 		return (error);
88 	}
89 
90 	return (0);
91 }
92 
93 int
94 siba_detach(device_t dev)
95 {
96 	struct siba_softc	*sc;
97 	int			 error;
98 
99 	sc = device_get_softc(dev);
100 
101 	if ((error = bhnd_generic_detach(dev)))
102 		return (error);
103 
104 	SIBA_LOCK_DESTROY(sc);
105 
106 	return (0);
107 }
108 
109 int
110 siba_resume(device_t dev)
111 {
112 	return (bhnd_generic_resume(dev));
113 }
114 
115 int
116 siba_suspend(device_t dev)
117 {
118 	return (bhnd_generic_suspend(dev));
119 }
120 
121 static int
122 siba_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
123 {
124 	struct siba_softc		*sc;
125 	const struct siba_devinfo	*dinfo;
126 	const struct bhnd_core_info	*cfg;
127 
128 	sc = device_get_softc(dev);
129 	dinfo = device_get_ivars(child);
130 	cfg = &dinfo->core_id.core_info;
131 
132 	switch (index) {
133 	case BHND_IVAR_VENDOR:
134 		*result = cfg->vendor;
135 		return (0);
136 	case BHND_IVAR_DEVICE:
137 		*result = cfg->device;
138 		return (0);
139 	case BHND_IVAR_HWREV:
140 		*result = cfg->hwrev;
141 		return (0);
142 	case BHND_IVAR_DEVICE_CLASS:
143 		*result = bhnd_core_class(cfg);
144 		return (0);
145 	case BHND_IVAR_VENDOR_NAME:
146 		*result = (uintptr_t) bhnd_vendor_name(cfg->vendor);
147 		return (0);
148 	case BHND_IVAR_DEVICE_NAME:
149 		*result = (uintptr_t) bhnd_core_name(cfg);
150 		return (0);
151 	case BHND_IVAR_CORE_INDEX:
152 		*result = cfg->core_idx;
153 		return (0);
154 	case BHND_IVAR_CORE_UNIT:
155 		*result = cfg->unit;
156 		return (0);
157 	case BHND_IVAR_PMU_INFO:
158 		SIBA_LOCK(sc);
159 		switch (dinfo->pmu_state) {
160 		case SIBA_PMU_NONE:
161 			*result = (uintptr_t)NULL;
162 			SIBA_UNLOCK(sc);
163 			return (0);
164 
165 		case SIBA_PMU_BHND:
166 			*result = (uintptr_t)dinfo->pmu.bhnd_info;
167 			SIBA_UNLOCK(sc);
168 			return (0);
169 
170 		case SIBA_PMU_PWRCTL:
171 			panic("bhnd_get_pmu_info() called with "
172 			    "SIBA_PMU_PWRCTL");
173 			return (ENXIO);
174 		}
175 
176 		panic("invalid PMU state: %d", dinfo->pmu_state);
177 		return (ENXIO);
178 
179 	default:
180 		return (ENOENT);
181 	}
182 }
183 
184 static int
185 siba_write_ivar(device_t dev, device_t child, int index, uintptr_t value)
186 {
187 	struct siba_softc	*sc;
188 	struct siba_devinfo	*dinfo;
189 
190 	sc = device_get_softc(dev);
191 	dinfo = device_get_ivars(child);
192 
193 	switch (index) {
194 	case BHND_IVAR_VENDOR:
195 	case BHND_IVAR_DEVICE:
196 	case BHND_IVAR_HWREV:
197 	case BHND_IVAR_DEVICE_CLASS:
198 	case BHND_IVAR_VENDOR_NAME:
199 	case BHND_IVAR_DEVICE_NAME:
200 	case BHND_IVAR_CORE_INDEX:
201 	case BHND_IVAR_CORE_UNIT:
202 		return (EINVAL);
203 	case BHND_IVAR_PMU_INFO:
204 		SIBA_LOCK(sc);
205 		switch (dinfo->pmu_state) {
206 		case SIBA_PMU_NONE:
207 		case SIBA_PMU_BHND:
208 			dinfo->pmu.bhnd_info = (void *)value;
209 			dinfo->pmu_state = SIBA_PMU_BHND;
210 			SIBA_UNLOCK(sc);
211 			return (0);
212 
213 		case SIBA_PMU_PWRCTL:
214 			panic("bhnd_set_pmu_info() called with "
215 			    "SIBA_PMU_PWRCTL");
216 			return (ENXIO);
217 		}
218 
219 		panic("invalid PMU state: %d", dinfo->pmu_state);
220 		return (ENXIO);
221 
222 	default:
223 		return (ENOENT);
224 	}
225 }
226 
227 static struct resource_list *
228 siba_get_resource_list(device_t dev, device_t child)
229 {
230 	struct siba_devinfo *dinfo = device_get_ivars(child);
231 	return (&dinfo->resources);
232 }
233 
234 /* BHND_BUS_ALLOC_PMU() */
235 static int
236 siba_alloc_pmu(device_t dev, device_t child)
237 {
238 	struct siba_softc	*sc;
239 	struct siba_devinfo	*dinfo;
240 	device_t		 pwrctl;
241 	int			 error;
242 
243 	if (device_get_parent(child) != dev)
244 		return (EINVAL);
245 
246 	sc = device_get_softc(dev);
247 	dinfo = device_get_ivars(child);
248 	pwrctl = bhnd_retain_provider(child, BHND_SERVICE_PWRCTL);
249 
250 	/* Unless this is a legacy PWRCTL chipset, defer to bhnd(4)'s PMU
251 	 * implementation */
252 	if (pwrctl == NULL) {
253 		if ((error = bhnd_generic_alloc_pmu(dev, child)))
254 			return (error);
255 
256 		KASSERT(dinfo->pmu_state == SIBA_PMU_BHND,
257 		    ("unexpected PMU state: %d", dinfo->pmu_state));
258 
259 		return (0);
260 	}
261 
262 	/* This is a legacy PWRCTL chipset; we need to map all bhnd(4) bus PMU
263 	 * to PWRCTL operations ourselves.*/
264 	SIBA_LOCK(sc);
265 
266 	/* Per-core PMU state already allocated? */
267 	if (dinfo->pmu_state != SIBA_PMU_NONE) {
268 		panic("duplicate PMU allocation for %s",
269 		    device_get_nameunit(child));
270 	}
271 
272 	/* Update the child's PMU allocation state, and transfer ownership of
273 	 * the PWRCTL provider reference */
274 	dinfo->pmu_state = SIBA_PMU_PWRCTL;
275 	dinfo->pmu.pwrctl = pwrctl;
276 
277 	SIBA_UNLOCK(sc);
278 
279 	return (0);
280 }
281 
282 /* BHND_BUS_RELEASE_PMU() */
283 static int
284 siba_release_pmu(device_t dev, device_t child)
285 {
286 	struct siba_softc	*sc;
287 	struct siba_devinfo	*dinfo;
288 	device_t		 pwrctl;
289 	int			 error;
290 
291 	if (device_get_parent(child) != dev)
292 		return (EINVAL);
293 
294 	sc = device_get_softc(dev);
295 	dinfo = device_get_ivars(child);
296 
297 	SIBA_LOCK(sc);
298 	switch(dinfo->pmu_state) {
299 	case SIBA_PMU_NONE:
300 		panic("pmu over-release for %s", device_get_nameunit(child));
301 		SIBA_UNLOCK(sc);
302 		return (ENXIO);
303 
304 	case SIBA_PMU_BHND:
305 		SIBA_UNLOCK(sc);
306 		return (bhnd_generic_release_pmu(dev, child));
307 
308 	case SIBA_PMU_PWRCTL:
309 		/* Requesting BHND_CLOCK_DYN releases any outstanding clock
310 		 * reservations */
311 		pwrctl = dinfo->pmu.pwrctl;
312 		error = bhnd_pwrctl_request_clock(pwrctl, child,
313 		    BHND_CLOCK_DYN);
314 		if (error) {
315 			SIBA_UNLOCK(sc);
316 			return (error);
317 		}
318 
319 		/* Clean up the child's PMU state */
320 		dinfo->pmu_state = SIBA_PMU_NONE;
321 		dinfo->pmu.pwrctl = NULL;
322 		SIBA_UNLOCK(sc);
323 
324 		/* Release the provider reference */
325 		bhnd_release_provider(child, pwrctl, BHND_SERVICE_PWRCTL);
326 		return (0);
327 	}
328 
329 	panic("invalid PMU state: %d", dinfo->pmu_state);
330 }
331 
332 /* BHND_BUS_GET_CLOCK_LATENCY() */
333 static int
334 siba_get_clock_latency(device_t dev, device_t child, bhnd_clock clock,
335     u_int *latency)
336 {
337 	struct siba_softc	*sc;
338 	struct siba_devinfo	*dinfo;
339 	int			 error;
340 
341 	if (device_get_parent(child) != dev)
342 		return (EINVAL);
343 
344 	sc = device_get_softc(dev);
345 	dinfo = device_get_ivars(child);
346 
347 	SIBA_LOCK(sc);
348 	switch(dinfo->pmu_state) {
349 	case SIBA_PMU_NONE:
350 		panic("no active PMU request state");
351 
352 		SIBA_UNLOCK(sc);
353 		return (ENXIO);
354 
355 	case SIBA_PMU_BHND:
356 		SIBA_UNLOCK(sc);
357 		return (bhnd_generic_get_clock_latency(dev, child, clock,
358 		    latency));
359 
360 	case SIBA_PMU_PWRCTL:
361 		 error = bhnd_pwrctl_get_clock_latency(dinfo->pmu.pwrctl, clock,
362 		    latency);
363 		 SIBA_UNLOCK(sc);
364 
365 		 return (error);
366 	}
367 
368 	panic("invalid PMU state: %d", dinfo->pmu_state);
369 }
370 
371 /* BHND_BUS_GET_CLOCK_FREQ() */
372 static int
373 siba_get_clock_freq(device_t dev, device_t child, bhnd_clock clock,
374     u_int *freq)
375 {
376 	struct siba_softc	*sc;
377 	struct siba_devinfo	*dinfo;
378 	int			 error;
379 
380 	if (device_get_parent(child) != dev)
381 		return (EINVAL);
382 
383 	sc = device_get_softc(dev);
384 	dinfo = device_get_ivars(child);
385 
386 	SIBA_LOCK(sc);
387 	switch(dinfo->pmu_state) {
388 	case SIBA_PMU_NONE:
389 		panic("no active PMU request state");
390 
391 		SIBA_UNLOCK(sc);
392 		return (ENXIO);
393 
394 	case SIBA_PMU_BHND:
395 		SIBA_UNLOCK(sc);
396 		return (bhnd_generic_get_clock_freq(dev, child, clock, freq));
397 
398 	case SIBA_PMU_PWRCTL:
399 		error = bhnd_pwrctl_get_clock_freq(dinfo->pmu.pwrctl, clock,
400 		    freq);
401 		SIBA_UNLOCK(sc);
402 
403 		return (error);
404 	}
405 
406 	panic("invalid PMU state: %d", dinfo->pmu_state);
407 }
408 
409 /* BHND_BUS_REQUEST_EXT_RSRC() */
410 static int
411 siba_request_ext_rsrc(device_t dev, device_t child, u_int rsrc)
412 {
413 	struct siba_softc	*sc;
414 	struct siba_devinfo	*dinfo;
415 
416 	if (device_get_parent(child) != dev)
417 		return (EINVAL);
418 
419 	sc = device_get_softc(dev);
420 	dinfo = device_get_ivars(child);
421 
422 	SIBA_LOCK(sc);
423 	switch(dinfo->pmu_state) {
424 	case SIBA_PMU_NONE:
425 		panic("no active PMU request state");
426 
427 		SIBA_UNLOCK(sc);
428 		return (ENXIO);
429 
430 	case SIBA_PMU_BHND:
431 		SIBA_UNLOCK(sc);
432 		return (bhnd_generic_request_ext_rsrc(dev, child, rsrc));
433 
434 	case SIBA_PMU_PWRCTL:
435 		/* HW does not support per-core external resources */
436 		SIBA_UNLOCK(sc);
437 		return (ENODEV);
438 	}
439 
440 	panic("invalid PMU state: %d", dinfo->pmu_state);
441 }
442 
443 /* BHND_BUS_RELEASE_EXT_RSRC() */
444 static int
445 siba_release_ext_rsrc(device_t dev, device_t child, u_int rsrc)
446 {
447 	struct siba_softc	*sc;
448 	struct siba_devinfo	*dinfo;
449 
450 	if (device_get_parent(child) != dev)
451 		return (EINVAL);
452 
453 	sc = device_get_softc(dev);
454 	dinfo = device_get_ivars(child);
455 
456 	SIBA_LOCK(sc);
457 	switch(dinfo->pmu_state) {
458 	case SIBA_PMU_NONE:
459 		panic("no active PMU request state");
460 
461 		SIBA_UNLOCK(sc);
462 		return (ENXIO);
463 
464 	case SIBA_PMU_BHND:
465 		SIBA_UNLOCK(sc);
466 		return (bhnd_generic_release_ext_rsrc(dev, child, rsrc));
467 
468 	case SIBA_PMU_PWRCTL:
469 		/* HW does not support per-core external resources */
470 		SIBA_UNLOCK(sc);
471 		return (ENODEV);
472 	}
473 
474 	panic("invalid PMU state: %d", dinfo->pmu_state);
475 }
476 
477 /* BHND_BUS_REQUEST_CLOCK() */
478 static int
479 siba_request_clock(device_t dev, device_t child, bhnd_clock clock)
480 {
481 	struct siba_softc	*sc;
482 	struct siba_devinfo	*dinfo;
483 	int			 error;
484 
485 	if (device_get_parent(child) != dev)
486 		return (EINVAL);
487 
488 	sc = device_get_softc(dev);
489 	dinfo = device_get_ivars(child);
490 
491 	SIBA_LOCK(sc);
492 	switch(dinfo->pmu_state) {
493 	case SIBA_PMU_NONE:
494 		panic("no active PMU request state");
495 
496 		SIBA_UNLOCK(sc);
497 		return (ENXIO);
498 
499 	case SIBA_PMU_BHND:
500 		SIBA_UNLOCK(sc);
501 		return (bhnd_generic_request_clock(dev, child, clock));
502 
503 	case SIBA_PMU_PWRCTL:
504 		error = bhnd_pwrctl_request_clock(dinfo->pmu.pwrctl, child,
505 		    clock);
506 		SIBA_UNLOCK(sc);
507 
508 		return (error);
509 	}
510 
511 	panic("invalid PMU state: %d", dinfo->pmu_state);
512 }
513 
514 /* BHND_BUS_ENABLE_CLOCKS() */
515 static int
516 siba_enable_clocks(device_t dev, device_t child, uint32_t clocks)
517 {
518 	struct siba_softc	*sc;
519 	struct siba_devinfo	*dinfo;
520 
521 	if (device_get_parent(child) != dev)
522 		return (EINVAL);
523 
524 	sc = device_get_softc(dev);
525 	dinfo = device_get_ivars(child);
526 
527 	SIBA_LOCK(sc);
528 	switch(dinfo->pmu_state) {
529 	case SIBA_PMU_NONE:
530 		panic("no active PMU request state");
531 
532 		SIBA_UNLOCK(sc);
533 		return (ENXIO);
534 
535 	case SIBA_PMU_BHND:
536 		SIBA_UNLOCK(sc);
537 		return (bhnd_generic_enable_clocks(dev, child, clocks));
538 
539 	case SIBA_PMU_PWRCTL:
540 		SIBA_UNLOCK(sc);
541 
542 		/* All (supported) clocks are already enabled by default */
543 		clocks &= ~(BHND_CLOCK_DYN |
544 			    BHND_CLOCK_ILP |
545 			    BHND_CLOCK_ALP |
546 			    BHND_CLOCK_HT);
547 
548 		if (clocks != 0) {
549 			device_printf(dev, "%s requested unknown clocks: %#x\n",
550 			    device_get_nameunit(child), clocks);
551 			return (ENODEV);
552 		}
553 
554 		return (0);
555 	}
556 
557 	panic("invalid PMU state: %d", dinfo->pmu_state);
558 }
559 
560 static int
561 siba_read_iost(device_t dev, device_t child, uint16_t *iost)
562 {
563 	uint32_t	tmhigh;
564 	int		error;
565 
566 	error = bhnd_read_config(child, SIBA_CFG0_TMSTATEHIGH, &tmhigh, 4);
567 	if (error)
568 		return (error);
569 
570 	*iost = (SIBA_REG_GET(tmhigh, TMH_SISF));
571 	return (0);
572 }
573 
574 static int
575 siba_read_ioctl(device_t dev, device_t child, uint16_t *ioctl)
576 {
577 	uint32_t	ts_low;
578 	int		error;
579 
580 	if ((error = bhnd_read_config(child, SIBA_CFG0_TMSTATELOW, &ts_low, 4)))
581 		return (error);
582 
583 	*ioctl = (SIBA_REG_GET(ts_low, TML_SICF));
584 	return (0);
585 }
586 
587 static int
588 siba_write_ioctl(device_t dev, device_t child, uint16_t value, uint16_t mask)
589 {
590 	struct siba_devinfo	*dinfo;
591 	struct bhnd_resource	*r;
592 	uint32_t		 ts_low, ts_mask;
593 
594 	if (device_get_parent(child) != dev)
595 		return (EINVAL);
596 
597 	/* Fetch CFG0 mapping */
598 	dinfo = device_get_ivars(child);
599 	if ((r = dinfo->cfg_res[0]) == NULL)
600 		return (ENODEV);
601 
602 	/* Mask and set TMSTATELOW core flag bits */
603 	ts_mask = (mask << SIBA_TML_SICF_SHIFT) & SIBA_TML_SICF_MASK;
604 	ts_low = (value << SIBA_TML_SICF_SHIFT) & ts_mask;
605 
606 	return (siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
607 	    ts_low, ts_mask));
608 }
609 
610 static bool
611 siba_is_hw_suspended(device_t dev, device_t child)
612 {
613 	uint32_t		ts_low;
614 	uint16_t		ioctl;
615 	int			error;
616 
617 	/* Fetch target state */
618 	error = bhnd_read_config(child, SIBA_CFG0_TMSTATELOW, &ts_low, 4);
619 	if (error) {
620 		device_printf(child, "error reading HW reset state: %d\n",
621 		    error);
622 		return (true);
623 	}
624 
625 	/* Is core held in RESET? */
626 	if (ts_low & SIBA_TML_RESET)
627 		return (true);
628 
629 	/* Is core clocked? */
630 	ioctl = SIBA_REG_GET(ts_low, TML_SICF);
631 	if (!(ioctl & BHND_IOCTL_CLK_EN))
632 		return (true);
633 
634 	return (false);
635 }
636 
637 static int
638 siba_reset_hw(device_t dev, device_t child, uint16_t ioctl)
639 {
640 	struct siba_devinfo		*dinfo;
641 	struct bhnd_resource		*r;
642 	uint32_t			 ts_low, imstate;
643 	int				 error;
644 
645 	if (device_get_parent(child) != dev)
646 		return (EINVAL);
647 
648 	dinfo = device_get_ivars(child);
649 
650 	/* Can't suspend the core without access to the CFG0 registers */
651 	if ((r = dinfo->cfg_res[0]) == NULL)
652 		return (ENODEV);
653 
654 	/* We require exclusive control over BHND_IOCTL_CLK_EN and
655 	 * BHND_IOCTL_CLK_FORCE. */
656 	if (ioctl & (BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE))
657 		return (EINVAL);
658 
659 	/* Place core into known RESET state */
660 	if ((error = BHND_BUS_SUSPEND_HW(dev, child)))
661 		return (error);
662 
663 	/* Leaving the core in reset, set the caller's IOCTL flags and
664 	 * enable the core's clocks. */
665 	ts_low = (ioctl | BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE) <<
666 	    SIBA_TML_SICF_SHIFT;
667 	error = siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
668 	    ts_low, SIBA_TML_SICF_MASK);
669 	if (error)
670 		return (error);
671 
672 	/* Clear any target errors */
673 	if (bhnd_bus_read_4(r, SIBA_CFG0_TMSTATEHIGH) & SIBA_TMH_SERR) {
674 		error = siba_write_target_state(child, dinfo,
675 		    SIBA_CFG0_TMSTATEHIGH, 0, SIBA_TMH_SERR);
676 		if (error)
677 			return (error);
678 	}
679 
680 	/* Clear any initiator errors */
681 	imstate = bhnd_bus_read_4(r, SIBA_CFG0_IMSTATE);
682 	if (imstate & (SIBA_IM_IBE|SIBA_IM_TO)) {
683 		error = siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE,
684 		    0, SIBA_IM_IBE|SIBA_IM_TO);
685 		if (error)
686 			return (error);
687 	}
688 
689 	/* Release from RESET while leaving clocks forced, ensuring the
690 	 * signal propagates throughout the core */
691 	error = siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
692 	    0x0, SIBA_TML_RESET);
693 	if (error)
694 		return (error);
695 
696 	/* The core should now be active; we can clear the BHND_IOCTL_CLK_FORCE
697 	 * bit and allow the core to manage clock gating. */
698 	error = siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
699 	    0x0, (BHND_IOCTL_CLK_FORCE << SIBA_TML_SICF_SHIFT));
700 	if (error)
701 		return (error);
702 
703 	return (0);
704 }
705 
706 static int
707 siba_suspend_hw(device_t dev, device_t child)
708 {
709 	struct siba_softc		*sc;
710 	struct siba_devinfo		*dinfo;
711 	struct bhnd_resource		*r;
712 	uint32_t			 idl, ts_low;
713 	uint16_t			 ioctl;
714 	int				 error;
715 
716 	if (device_get_parent(child) != dev)
717 		return (EINVAL);
718 
719 	sc = device_get_softc(dev);
720 	dinfo = device_get_ivars(child);
721 
722 	/* Can't suspend the core without access to the CFG0 registers */
723 	if ((r = dinfo->cfg_res[0]) == NULL)
724 		return (ENODEV);
725 
726 	/* Already in RESET? */
727 	ts_low = bhnd_bus_read_4(r, SIBA_CFG0_TMSTATELOW);
728 	if (ts_low & SIBA_TML_RESET) {
729 		/* Clear IOCTL flags, ensuring the clock is disabled */
730 		return (siba_write_target_state(child, dinfo,
731 		    SIBA_CFG0_TMSTATELOW, 0x0, SIBA_TML_SICF_MASK));
732 
733 		return (0);
734 	}
735 
736 	/* If clocks are already disabled, we can put the core directly
737 	 * into RESET */
738 	ioctl = SIBA_REG_GET(ts_low, TML_SICF);
739 	if (!(ioctl & BHND_IOCTL_CLK_EN)) {
740 		/* Set RESET and clear IOCTL flags */
741 		return (siba_write_target_state(child, dinfo,
742 		    SIBA_CFG0_TMSTATELOW,
743 		    SIBA_TML_RESET,
744 		    SIBA_TML_RESET | SIBA_TML_SICF_MASK));
745 	}
746 
747 	/* Reject any further target backplane transactions */
748 	error = siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
749 	    SIBA_TML_REJ, SIBA_TML_REJ);
750 	if (error)
751 		return (error);
752 
753 	/* If this is an initiator core, we need to reject initiator
754 	 * transactions too. */
755 	idl = bhnd_bus_read_4(r, SIBA_CFG0_IDLOW);
756 	if (idl & SIBA_IDL_INIT) {
757 		error = siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE,
758 		    SIBA_IM_RJ, SIBA_IM_RJ);
759 		if (error)
760 			return (error);
761 	}
762 
763 	/* Put the core into RESET|REJECT, forcing clocks to ensure the RESET
764 	 * signal propagates throughout the core, leaving REJECT asserted. */
765 	ts_low = SIBA_TML_RESET;
766 	ts_low |= (BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE) <<
767 	    SIBA_TML_SICF_SHIFT;
768 
769 	error = siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
770 		ts_low, ts_low);
771 	if (error)
772 		return (error);
773 
774 	/* Give RESET ample time */
775 	DELAY(10);
776 
777 	/* Leaving core in reset, disable all clocks, clear REJ flags and
778 	 * IOCTL state */
779 	error = siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
780 		SIBA_TML_RESET,
781 		SIBA_TML_RESET | SIBA_TML_REJ | SIBA_TML_SICF_MASK);
782 	if (error)
783 		return (error);
784 
785 	/* Clear previously asserted initiator reject */
786 	if (idl & SIBA_IDL_INIT) {
787 		error = siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE,
788 		    0, SIBA_IM_RJ);
789 		if (error)
790 			return (error);
791 	}
792 
793 	/*
794 	 * Core is now in RESET, with clocks disabled and REJ not asserted.
795 	 *
796 	 * If the core holds any PWRCTL clock reservations, we need to release
797 	 * those now. This emulates the standard bhnd(4) PMU behavior of RESET
798 	 * automatically clearing clkctl
799 	 */
800 	SIBA_LOCK(sc);
801 	if (dinfo->pmu_state == SIBA_PMU_PWRCTL) {
802 		error = bhnd_pwrctl_request_clock(dinfo->pmu.pwrctl, child,
803 		    BHND_CLOCK_DYN);
804 		SIBA_UNLOCK(sc);
805 
806 		if (error) {
807 			device_printf(child, "failed to release clock request: "
808 			    "%d", error);
809 			return (error);
810 		}
811 
812 		return (0);
813 	} else {
814 		SIBA_UNLOCK(sc);
815 		return (0);
816 	}
817 }
818 
819 static int
820 siba_read_config(device_t dev, device_t child, bus_size_t offset, void *value,
821     u_int width)
822 {
823 	struct siba_devinfo	*dinfo;
824 	rman_res_t		 r_size;
825 
826 	/* Must be directly attached */
827 	if (device_get_parent(child) != dev)
828 		return (EINVAL);
829 
830 	/* CFG0 registers must be available */
831 	dinfo = device_get_ivars(child);
832 	if (dinfo->cfg_res[0] == NULL)
833 		return (ENODEV);
834 
835 	/* Offset must fall within CFG0 */
836 	r_size = rman_get_size(dinfo->cfg_res[0]->res);
837 	if (r_size < offset || r_size - offset < width)
838 		return (EFAULT);
839 
840 	switch (width) {
841 	case 1:
842 		*((uint8_t *)value) = bhnd_bus_read_1(dinfo->cfg_res[0],
843 		    offset);
844 		return (0);
845 	case 2:
846 		*((uint16_t *)value) = bhnd_bus_read_2(dinfo->cfg_res[0],
847 		    offset);
848 		return (0);
849 	case 4:
850 		*((uint32_t *)value) = bhnd_bus_read_4(dinfo->cfg_res[0],
851 		    offset);
852 		return (0);
853 	default:
854 		return (EINVAL);
855 	}
856 }
857 
858 static int
859 siba_write_config(device_t dev, device_t child, bus_size_t offset,
860     const void *value, u_int width)
861 {
862 	struct siba_devinfo	*dinfo;
863 	struct bhnd_resource	*r;
864 	rman_res_t		 r_size;
865 
866 	/* Must be directly attached */
867 	if (device_get_parent(child) != dev)
868 		return (EINVAL);
869 
870 	/* CFG0 registers must be available */
871 	dinfo = device_get_ivars(child);
872 	if ((r = dinfo->cfg_res[0]) == NULL)
873 		return (ENODEV);
874 
875 	/* Offset must fall within CFG0 */
876 	r_size = rman_get_size(r->res);
877 	if (r_size < offset || r_size - offset < width)
878 		return (EFAULT);
879 
880 	switch (width) {
881 	case 1:
882 		bhnd_bus_write_1(r, offset, *(const uint8_t *)value);
883 		return (0);
884 	case 2:
885 		bhnd_bus_write_2(r, offset, *(const uint8_t *)value);
886 		return (0);
887 	case 4:
888 		bhnd_bus_write_4(r, offset, *(const uint8_t *)value);
889 		return (0);
890 	default:
891 		return (EINVAL);
892 	}
893 }
894 
895 static u_int
896 siba_get_port_count(device_t dev, device_t child, bhnd_port_type type)
897 {
898 	struct siba_devinfo *dinfo;
899 
900 	/* delegate non-bus-attached devices to our parent */
901 	if (device_get_parent(child) != dev)
902 		return (BHND_BUS_GET_PORT_COUNT(device_get_parent(dev), child,
903 		    type));
904 
905 	dinfo = device_get_ivars(child);
906 	return (siba_port_count(&dinfo->core_id, type));
907 }
908 
909 static u_int
910 siba_get_region_count(device_t dev, device_t child, bhnd_port_type type,
911     u_int port)
912 {
913 	struct siba_devinfo	*dinfo;
914 
915 	/* delegate non-bus-attached devices to our parent */
916 	if (device_get_parent(child) != dev)
917 		return (BHND_BUS_GET_REGION_COUNT(device_get_parent(dev), child,
918 		    type, port));
919 
920 	dinfo = device_get_ivars(child);
921 	return (siba_port_region_count(&dinfo->core_id, type, port));
922 }
923 
924 static int
925 siba_get_port_rid(device_t dev, device_t child, bhnd_port_type port_type,
926     u_int port_num, u_int region_num)
927 {
928 	struct siba_devinfo	*dinfo;
929 	struct siba_addrspace	*addrspace;
930 	struct siba_cfg_block	*cfg;
931 
932 	/* delegate non-bus-attached devices to our parent */
933 	if (device_get_parent(child) != dev)
934 		return (BHND_BUS_GET_PORT_RID(device_get_parent(dev), child,
935 		    port_type, port_num, region_num));
936 
937 	dinfo = device_get_ivars(child);
938 
939 	/* Look for a matching addrspace entry */
940 	addrspace = siba_find_addrspace(dinfo, port_type, port_num, region_num);
941 	if (addrspace != NULL)
942 		return (addrspace->sa_rid);
943 
944 	/* Try the config blocks */
945 	cfg = siba_find_cfg_block(dinfo, port_type, port_num, region_num);
946 	if (cfg != NULL)
947 		return (cfg->cb_rid);
948 
949 	/* Not found */
950 	return (-1);
951 }
952 
953 static int
954 siba_decode_port_rid(device_t dev, device_t child, int type, int rid,
955     bhnd_port_type *port_type, u_int *port_num, u_int *region_num)
956 {
957 	struct siba_devinfo	*dinfo;
958 
959 	/* delegate non-bus-attached devices to our parent */
960 	if (device_get_parent(child) != dev)
961 		return (BHND_BUS_DECODE_PORT_RID(device_get_parent(dev), child,
962 		    type, rid, port_type, port_num, region_num));
963 
964 	dinfo = device_get_ivars(child);
965 
966 	/* Ports are always memory mapped */
967 	if (type != SYS_RES_MEMORY)
968 		return (EINVAL);
969 
970 	/* Look for a matching addrspace entry */
971 	for (u_int i = 0; i < dinfo->core_id.num_addrspace; i++) {
972 		if (dinfo->addrspace[i].sa_rid != rid)
973 			continue;
974 
975 		*port_type = BHND_PORT_DEVICE;
976 		*port_num = siba_addrspace_device_port(i);
977 		*region_num = siba_addrspace_device_region(i);
978 		return (0);
979 	}
980 
981 	/* Try the config blocks */
982 	for (u_int i = 0; i < dinfo->core_id.num_cfg_blocks; i++) {
983 		if (dinfo->cfg[i].cb_rid != rid)
984 			continue;
985 
986 		*port_type = BHND_PORT_AGENT;
987 		*port_num = siba_cfg_agent_port(i);
988 		*region_num = siba_cfg_agent_region(i);
989 		return (0);
990 	}
991 
992 	/* Not found */
993 	return (ENOENT);
994 }
995 
996 static int
997 siba_get_region_addr(device_t dev, device_t child, bhnd_port_type port_type,
998     u_int port_num, u_int region_num, bhnd_addr_t *addr, bhnd_size_t *size)
999 {
1000 	struct siba_devinfo	*dinfo;
1001 	struct siba_addrspace	*addrspace;
1002 	struct siba_cfg_block	*cfg;
1003 
1004 	/* delegate non-bus-attached devices to our parent */
1005 	if (device_get_parent(child) != dev) {
1006 		return (BHND_BUS_GET_REGION_ADDR(device_get_parent(dev), child,
1007 		    port_type, port_num, region_num, addr, size));
1008 	}
1009 
1010 	dinfo = device_get_ivars(child);
1011 
1012 	/* Look for a matching addrspace */
1013 	addrspace = siba_find_addrspace(dinfo, port_type, port_num, region_num);
1014 	if (addrspace != NULL) {
1015 		*addr = addrspace->sa_base;
1016 		*size = addrspace->sa_size - addrspace->sa_bus_reserved;
1017 		return (0);
1018 	}
1019 
1020 	/* Look for a matching cfg block */
1021 	cfg = siba_find_cfg_block(dinfo, port_type, port_num, region_num);
1022 	if (cfg != NULL) {
1023 		*addr = cfg->cb_base;
1024 		*size = cfg->cb_size;
1025 		return (0);
1026 	}
1027 
1028 	/* Not found */
1029 	return (ENOENT);
1030 }
1031 
1032 /**
1033  * Default siba(4) bus driver implementation of BHND_BUS_GET_INTR_COUNT().
1034  */
1035 u_int
1036 siba_get_intr_count(device_t dev, device_t child)
1037 {
1038 	struct siba_devinfo	*dinfo;
1039 
1040 	/* delegate non-bus-attached devices to our parent */
1041 	if (device_get_parent(child) != dev)
1042 		return (BHND_BUS_GET_INTR_COUNT(device_get_parent(dev), child));
1043 
1044 	dinfo = device_get_ivars(child);
1045 	if (!dinfo->intr_en) {
1046 		/* No interrupts */
1047 		return (0);
1048 	} else {
1049 		/* One assigned interrupt */
1050 		return (1);
1051 	}
1052 }
1053 
1054 /**
1055  * Default siba(4) bus driver implementation of BHND_BUS_GET_INTR_IVEC().
1056  */
1057 int
1058 siba_get_intr_ivec(device_t dev, device_t child, u_int intr, u_int *ivec)
1059 {
1060 	struct siba_devinfo	*dinfo;
1061 
1062 	/* delegate non-bus-attached devices to our parent */
1063 	if (device_get_parent(child) != dev)
1064 		return (BHND_BUS_GET_INTR_IVEC(device_get_parent(dev), child,
1065 		    intr, ivec));
1066 
1067 	/* Must be a valid interrupt ID */
1068 	if (intr >= siba_get_intr_count(dev, child))
1069 		return (ENXIO);
1070 
1071 	KASSERT(intr == 0, ("invalid ivec %u", intr));
1072 
1073 	dinfo = device_get_ivars(child);
1074 
1075 	KASSERT(dinfo->intr_en, ("core does not have an interrupt assigned"));
1076 	*ivec = dinfo->intr.flag;
1077 	return (0);
1078 }
1079 
1080 /**
1081  * Register all address space mappings for @p di.
1082  *
1083  * @param dev The siba bus device.
1084  * @param di The device info instance on which to register all address
1085  * space entries.
1086  * @param r A resource mapping the enumeration table block for @p di.
1087  */
1088 static int
1089 siba_register_addrspaces(device_t dev, struct siba_devinfo *di,
1090     struct bhnd_resource *r)
1091 {
1092 	struct siba_core_id	*cid;
1093 	uint32_t		 addr;
1094 	uint32_t		 size;
1095 	int			 error;
1096 
1097 	cid = &di->core_id;
1098 
1099 
1100 	/* Register the device address space entries */
1101 	for (uint8_t i = 0; i < di->core_id.num_addrspace; i++) {
1102 		uint32_t	adm;
1103 		u_int		adm_offset;
1104 		uint32_t	bus_reserved;
1105 
1106 		/* Determine the register offset */
1107 		adm_offset = siba_admatch_offset(i);
1108 		if (adm_offset == 0) {
1109 		    device_printf(dev, "addrspace %hhu is unsupported", i);
1110 		    return (ENODEV);
1111 		}
1112 
1113 		/* Fetch the address match register value */
1114 		adm = bhnd_bus_read_4(r, adm_offset);
1115 
1116 		/* Parse the value */
1117 		if ((error = siba_parse_admatch(adm, &addr, &size))) {
1118 			device_printf(dev, "failed to decode address "
1119 			    " match register value 0x%x\n", adm);
1120 			return (error);
1121 		}
1122 
1123 		/* If this is the device's core/enumeration addrespace,
1124 		 * reserve the Sonics configuration register blocks for the
1125 		 * use of our bus. */
1126 		bus_reserved = 0;
1127 		if (i == SIBA_CORE_ADDRSPACE)
1128 			bus_reserved = cid->num_cfg_blocks * SIBA_CFG_SIZE;
1129 
1130 		/* Append the region info */
1131 		error = siba_append_dinfo_region(di, i, addr, size,
1132 		    bus_reserved);
1133 		if (error)
1134 			return (error);
1135 	}
1136 
1137 	return (0);
1138 }
1139 
1140 
1141 /**
1142  * Register all interrupt descriptors for @p dinfo. Must be called after
1143  * configuration blocks have been mapped.
1144  *
1145  * @param dev The siba bus device.
1146  * @param child The siba child device.
1147  * @param dinfo The device info instance on which to register all interrupt
1148  * descriptor entries.
1149  * @param r A resource mapping the enumeration table block for @p di.
1150  */
1151 static int
1152 siba_register_interrupts(device_t dev, device_t child,
1153     struct siba_devinfo *dinfo, struct bhnd_resource *r)
1154 {
1155 	uint32_t	tpsflag;
1156 	int		error;
1157 
1158 	/* Is backplane interrupt distribution enabled for this core? */
1159 	tpsflag = bhnd_bus_read_4(r, SB0_REG_ABS(SIBA_CFG0_TPSFLAG));
1160 	if ((tpsflag & SIBA_TPS_F0EN0) == 0) {
1161 		dinfo->intr_en = false;
1162 		return (0);
1163 	}
1164 
1165 	/* Have one interrupt */
1166 	dinfo->intr_en = true;
1167 	dinfo->intr.flag = SIBA_REG_GET(tpsflag, TPS_NUM0);
1168 	dinfo->intr.mapped = false;
1169 	dinfo->intr.irq = 0;
1170 	dinfo->intr.rid = -1;
1171 
1172 	/* Map the interrupt */
1173 	error = BHND_BUS_MAP_INTR(dev, child, 0 /* single intr is always 0 */,
1174 	    &dinfo->intr.irq);
1175 	if (error) {
1176 		device_printf(dev, "failed mapping interrupt line for core %u: "
1177 		    "%d\n", dinfo->core_id.core_info.core_idx, error);
1178 		return (error);
1179 	}
1180 	dinfo->intr.mapped = true;
1181 
1182 	/* Update the resource list */
1183 	dinfo->intr.rid = resource_list_add_next(&dinfo->resources, SYS_RES_IRQ,
1184 	    dinfo->intr.irq, dinfo->intr.irq, 1);
1185 
1186 	return (0);
1187 }
1188 
1189 /**
1190  * Map per-core configuration blocks for @p dinfo.
1191  *
1192  * @param dev The siba bus device.
1193  * @param dinfo The device info instance on which to map all per-core
1194  * configuration blocks.
1195  */
1196 static int
1197 siba_map_cfg_resources(device_t dev, struct siba_devinfo *dinfo)
1198 {
1199 	struct siba_addrspace	*addrspace;
1200 	rman_res_t		 r_start, r_count, r_end;
1201 	uint8_t			 num_cfg;
1202 	int			 rid;
1203 
1204 	num_cfg = dinfo->core_id.num_cfg_blocks;
1205 	if (num_cfg > SIBA_MAX_CFG) {
1206 		device_printf(dev, "config block count %hhu out of range\n",
1207 		    num_cfg);
1208 		return (ENXIO);
1209 	}
1210 
1211 	/* Fetch the core register address space */
1212 	addrspace = siba_find_addrspace(dinfo, BHND_PORT_DEVICE, 0, 0);
1213 	if (addrspace == NULL) {
1214 		device_printf(dev, "missing device registers\n");
1215 		return (ENXIO);
1216 	}
1217 
1218 	/*
1219 	 * Map the per-core configuration blocks
1220 	 */
1221 	for (uint8_t i = 0; i < num_cfg; i++) {
1222 		/* Add to child's resource list */
1223 		r_start = addrspace->sa_base + SIBA_CFG_OFFSET(i);
1224 		r_count = SIBA_CFG_SIZE;
1225 		r_end = r_start + r_count - 1;
1226 
1227 		rid = resource_list_add_next(&dinfo->resources, SYS_RES_MEMORY,
1228 		    r_start, r_end, r_count);
1229 
1230 		/* Initialize config block descriptor */
1231 		dinfo->cfg[i] = ((struct siba_cfg_block) {
1232 			.cb_base = r_start,
1233 			.cb_size = SIBA_CFG_SIZE,
1234 			.cb_rid = rid
1235 		});
1236 
1237 		/* Map the config resource for bus-level access */
1238 		dinfo->cfg_rid[i] = SIBA_CFG_RID(dinfo, i);
1239 		dinfo->cfg_res[i] = BHND_BUS_ALLOC_RESOURCE(dev, dev,
1240 		    SYS_RES_MEMORY, &dinfo->cfg_rid[i], r_start, r_end,
1241 		    r_count, RF_ACTIVE|RF_SHAREABLE);
1242 
1243 		if (dinfo->cfg_res[i] == NULL) {
1244 			device_printf(dev, "failed to allocate SIBA_CFG%hhu\n",
1245 			    i);
1246 			return (ENXIO);
1247 		}
1248 	}
1249 
1250 	return (0);
1251 }
1252 
1253 static device_t
1254 siba_add_child(device_t dev, u_int order, const char *name, int unit)
1255 {
1256 	struct siba_devinfo	*dinfo;
1257 	device_t		 child;
1258 
1259 	child = device_add_child_ordered(dev, order, name, unit);
1260 	if (child == NULL)
1261 		return (NULL);
1262 
1263 	if ((dinfo = siba_alloc_dinfo(dev)) == NULL) {
1264 		device_delete_child(dev, child);
1265 		return (NULL);
1266 	}
1267 
1268 	device_set_ivars(child, dinfo);
1269 
1270 	return (child);
1271 }
1272 
1273 static void
1274 siba_child_deleted(device_t dev, device_t child)
1275 {
1276 	struct bhnd_softc	*sc;
1277 	struct siba_devinfo	*dinfo;
1278 
1279 	sc = device_get_softc(dev);
1280 
1281 	/* Call required bhnd(4) implementation */
1282 	bhnd_generic_child_deleted(dev, child);
1283 
1284 	/* Free siba device info */
1285 	if ((dinfo = device_get_ivars(child)) != NULL)
1286 		siba_free_dinfo(dev, child, dinfo);
1287 
1288 	device_set_ivars(child, NULL);
1289 }
1290 
1291 /**
1292  * Scan the core table and add all valid discovered cores to
1293  * the bus.
1294  *
1295  * @param dev The siba bus device.
1296  */
1297 int
1298 siba_add_children(device_t dev)
1299 {
1300 	const struct bhnd_chipid	*chipid;
1301 	struct siba_core_id		*cores;
1302 	struct bhnd_resource		*r;
1303 	device_t			*children;
1304 	int				 rid;
1305 	int				 error;
1306 
1307 	cores = NULL;
1308 	r = NULL;
1309 
1310 	chipid = BHND_BUS_GET_CHIPID(dev, dev);
1311 
1312 	/* Allocate our temporary core and device table */
1313 	cores = malloc(sizeof(*cores) * chipid->ncores, M_BHND, M_WAITOK);
1314 	children = malloc(sizeof(*children) * chipid->ncores, M_BHND,
1315 	    M_WAITOK | M_ZERO);
1316 
1317 	/*
1318 	 * Add child devices for all discovered cores.
1319 	 *
1320 	 * On bridged devices, we'll exhaust our available register windows if
1321 	 * we map config blocks on unpopulated/disabled cores. To avoid this, we
1322 	 * defer mapping of the per-core siba(4) config blocks until all cores
1323 	 * have been enumerated and otherwise configured.
1324 	 */
1325 	for (u_int i = 0; i < chipid->ncores; i++) {
1326 		struct siba_devinfo	*dinfo;
1327 		device_t		 child;
1328 		uint32_t		 idhigh, idlow;
1329 		rman_res_t		 r_count, r_end, r_start;
1330 
1331 		/* Map the core's register block */
1332 		rid = 0;
1333 		r_start = SIBA_CORE_ADDR(i);
1334 		r_count = SIBA_CORE_SIZE;
1335 		r_end = r_start + SIBA_CORE_SIZE - 1;
1336 		r = bhnd_alloc_resource(dev, SYS_RES_MEMORY, &rid, r_start,
1337 		    r_end, r_count, RF_ACTIVE);
1338 		if (r == NULL) {
1339 			error = ENXIO;
1340 			goto failed;
1341 		}
1342 
1343 		/* Read the core info */
1344 		idhigh = bhnd_bus_read_4(r, SB0_REG_ABS(SIBA_CFG0_IDHIGH));
1345 		idlow = bhnd_bus_read_4(r, SB0_REG_ABS(SIBA_CFG0_IDLOW));
1346 
1347 		cores[i] = siba_parse_core_id(idhigh, idlow, i, 0);
1348 
1349 		/* Determine and set unit number */
1350 		for (u_int j = 0; j < i; j++) {
1351 			struct bhnd_core_info *cur = &cores[i].core_info;
1352 			struct bhnd_core_info *prev = &cores[j].core_info;
1353 
1354 			if (prev->vendor == cur->vendor &&
1355 			    prev->device == cur->device)
1356 				cur->unit++;
1357 		}
1358 
1359 		/* Add the child device */
1360 		child = BUS_ADD_CHILD(dev, 0, NULL, -1);
1361 		if (child == NULL) {
1362 			error = ENXIO;
1363 			goto failed;
1364 		}
1365 
1366 		children[i] = child;
1367 
1368 		/* Initialize per-device bus info */
1369 		if ((dinfo = device_get_ivars(child)) == NULL) {
1370 			error = ENXIO;
1371 			goto failed;
1372 		}
1373 
1374 		if ((error = siba_init_dinfo(dev, dinfo, &cores[i])))
1375 			goto failed;
1376 
1377 		/* Register the core's address space(s). */
1378 		if ((error = siba_register_addrspaces(dev, dinfo, r)))
1379 			goto failed;
1380 
1381 		/* Register the core's interrupts */
1382 		if ((error = siba_register_interrupts(dev, child, dinfo, r)))
1383 			goto failed;
1384 
1385 		/* Unmap the core's register block */
1386 		bhnd_release_resource(dev, SYS_RES_MEMORY, rid, r);
1387 		r = NULL;
1388 
1389 		/* If pins are floating or the hardware is otherwise
1390 		 * unpopulated, the device shouldn't be used. */
1391 		if (bhnd_is_hw_disabled(child))
1392 			device_disable(child);
1393 	}
1394 
1395 	/* Map all valid core's config register blocks and perform interrupt
1396 	 * assignment */
1397 	for (u_int i = 0; i < chipid->ncores; i++) {
1398 		struct siba_devinfo	*dinfo;
1399 		device_t		 child;
1400 
1401 		child = children[i];
1402 
1403 		/* Skip if core is disabled */
1404 		if (bhnd_is_hw_disabled(child))
1405 			continue;
1406 
1407 		dinfo = device_get_ivars(child);
1408 
1409 		/* Map the core's config blocks */
1410 		if ((error = siba_map_cfg_resources(dev, dinfo)))
1411 			goto failed;
1412 
1413 		/* Issue bus callback for fully initialized child. */
1414 		BHND_BUS_CHILD_ADDED(dev, child);
1415 	}
1416 
1417 	free(cores, M_BHND);
1418 	free(children, M_BHND);
1419 
1420 	return (0);
1421 
1422 failed:
1423 	for (u_int i = 0; i < chipid->ncores; i++) {
1424 		if (children[i] == NULL)
1425 			continue;
1426 
1427 		device_delete_child(dev, children[i]);
1428 	}
1429 
1430 	free(cores, M_BHND);
1431 	free(children, M_BHND);
1432 
1433 	if (r != NULL)
1434 		bhnd_release_resource(dev, SYS_RES_MEMORY, rid, r);
1435 
1436 	return (error);
1437 }
1438 
1439 static device_method_t siba_methods[] = {
1440 	/* Device interface */
1441 	DEVMETHOD(device_probe,			siba_probe),
1442 	DEVMETHOD(device_attach,		siba_attach),
1443 	DEVMETHOD(device_detach,		siba_detach),
1444 	DEVMETHOD(device_resume,		siba_resume),
1445 	DEVMETHOD(device_suspend,		siba_suspend),
1446 
1447 	/* Bus interface */
1448 	DEVMETHOD(bus_add_child,		siba_add_child),
1449 	DEVMETHOD(bus_child_deleted,		siba_child_deleted),
1450 	DEVMETHOD(bus_read_ivar,		siba_read_ivar),
1451 	DEVMETHOD(bus_write_ivar,		siba_write_ivar),
1452 	DEVMETHOD(bus_get_resource_list,	siba_get_resource_list),
1453 
1454 	/* BHND interface */
1455 	DEVMETHOD(bhnd_bus_get_erom_class,	siba_get_erom_class),
1456 	DEVMETHOD(bhnd_bus_alloc_pmu,		siba_alloc_pmu),
1457 	DEVMETHOD(bhnd_bus_release_pmu,		siba_release_pmu),
1458 	DEVMETHOD(bhnd_bus_request_clock,	siba_request_clock),
1459 	DEVMETHOD(bhnd_bus_enable_clocks,	siba_enable_clocks),
1460 	DEVMETHOD(bhnd_bus_request_ext_rsrc,	siba_request_ext_rsrc),
1461 	DEVMETHOD(bhnd_bus_release_ext_rsrc,	siba_release_ext_rsrc),
1462 	DEVMETHOD(bhnd_bus_get_clock_freq,	siba_get_clock_freq),
1463 	DEVMETHOD(bhnd_bus_get_clock_latency,	siba_get_clock_latency),
1464 	DEVMETHOD(bhnd_bus_read_ioctl,		siba_read_ioctl),
1465 	DEVMETHOD(bhnd_bus_write_ioctl,		siba_write_ioctl),
1466 	DEVMETHOD(bhnd_bus_read_iost,		siba_read_iost),
1467 	DEVMETHOD(bhnd_bus_is_hw_suspended,	siba_is_hw_suspended),
1468 	DEVMETHOD(bhnd_bus_reset_hw,		siba_reset_hw),
1469 	DEVMETHOD(bhnd_bus_suspend_hw,		siba_suspend_hw),
1470 	DEVMETHOD(bhnd_bus_read_config,		siba_read_config),
1471 	DEVMETHOD(bhnd_bus_write_config,	siba_write_config),
1472 	DEVMETHOD(bhnd_bus_get_port_count,	siba_get_port_count),
1473 	DEVMETHOD(bhnd_bus_get_region_count,	siba_get_region_count),
1474 	DEVMETHOD(bhnd_bus_get_port_rid,	siba_get_port_rid),
1475 	DEVMETHOD(bhnd_bus_decode_port_rid,	siba_decode_port_rid),
1476 	DEVMETHOD(bhnd_bus_get_region_addr,	siba_get_region_addr),
1477 	DEVMETHOD(bhnd_bus_get_intr_count,	siba_get_intr_count),
1478 	DEVMETHOD(bhnd_bus_get_intr_ivec,	siba_get_intr_ivec),
1479 
1480 	DEVMETHOD_END
1481 };
1482 
1483 DEFINE_CLASS_1(bhnd, siba_driver, siba_methods, sizeof(struct siba_softc), bhnd_driver);
1484 
1485 MODULE_VERSION(siba, 1);
1486 MODULE_DEPEND(siba, bhnd, 1, 1, 1);
1487