xref: /freebsd/sys/dev/bhnd/siba/siba.c (revision 97cb52fa9aefd90fad38790fded50905aeeb9b9e)
1 /*-
2  * Copyright (c) 2015-2016 Landon Fuller <landon@landonf.org>
3  * Copyright (c) 2017 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Landon Fuller
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer,
14  *    without modification.
15  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
16  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
17  *    redistribution must be conditioned upon including a substantially
18  *    similar Disclaimer requirement for further binary redistribution.
19  *
20  * NO WARRANTY
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
24  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
25  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
26  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
29  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGES.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include <sys/param.h>
38 #include <sys/bus.h>
39 #include <sys/kernel.h>
40 #include <sys/malloc.h>
41 #include <sys/module.h>
42 #include <sys/refcount.h>
43 #include <sys/systm.h>
44 
45 #include <machine/bus.h>
46 
47 #include <dev/bhnd/cores/chipc/chipc.h>
48 #include <dev/bhnd/cores/chipc/pwrctl/bhnd_pwrctl.h>
49 
50 #include "sibareg.h"
51 #include "sibavar.h"
52 
53 static bhnd_erom_class_t *
54 siba_get_erom_class(driver_t *driver)
55 {
56 	return (&siba_erom_parser);
57 }
58 
59 int
60 siba_probe(device_t dev)
61 {
62 	device_set_desc(dev, "SIBA BHND bus");
63 	return (BUS_PROBE_DEFAULT);
64 }
65 
66 /**
67  * Default siba(4) bus driver implementation of DEVICE_ATTACH().
68  *
69  * This implementation initializes internal siba(4) state and performs
70  * bus enumeration, and must be called by subclassing drivers in
71  * DEVICE_ATTACH() before any other bus methods.
72  */
73 int
74 siba_attach(device_t dev)
75 {
76 	struct siba_softc	*sc;
77 	int			 error;
78 
79 	sc = device_get_softc(dev);
80 	sc->dev = dev;
81 
82 	SIBA_LOCK_INIT(sc);
83 
84 	/* Enumerate children */
85 	if ((error = siba_add_children(dev))) {
86 		device_delete_children(dev);
87 		SIBA_LOCK_DESTROY(sc);
88 		return (error);
89 	}
90 
91 	return (0);
92 }
93 
94 int
95 siba_detach(device_t dev)
96 {
97 	struct siba_softc	*sc;
98 	int			 error;
99 
100 	sc = device_get_softc(dev);
101 
102 	if ((error = bhnd_generic_detach(dev)))
103 		return (error);
104 
105 	SIBA_LOCK_DESTROY(sc);
106 
107 	return (0);
108 }
109 
110 int
111 siba_resume(device_t dev)
112 {
113 	return (bhnd_generic_resume(dev));
114 }
115 
116 int
117 siba_suspend(device_t dev)
118 {
119 	return (bhnd_generic_suspend(dev));
120 }
121 
122 static int
123 siba_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
124 {
125 	struct siba_softc		*sc;
126 	const struct siba_devinfo	*dinfo;
127 	const struct bhnd_core_info	*cfg;
128 
129 	sc = device_get_softc(dev);
130 	dinfo = device_get_ivars(child);
131 	cfg = &dinfo->core_id.core_info;
132 
133 	switch (index) {
134 	case BHND_IVAR_VENDOR:
135 		*result = cfg->vendor;
136 		return (0);
137 	case BHND_IVAR_DEVICE:
138 		*result = cfg->device;
139 		return (0);
140 	case BHND_IVAR_HWREV:
141 		*result = cfg->hwrev;
142 		return (0);
143 	case BHND_IVAR_DEVICE_CLASS:
144 		*result = bhnd_core_class(cfg);
145 		return (0);
146 	case BHND_IVAR_VENDOR_NAME:
147 		*result = (uintptr_t) bhnd_vendor_name(cfg->vendor);
148 		return (0);
149 	case BHND_IVAR_DEVICE_NAME:
150 		*result = (uintptr_t) bhnd_core_name(cfg);
151 		return (0);
152 	case BHND_IVAR_CORE_INDEX:
153 		*result = cfg->core_idx;
154 		return (0);
155 	case BHND_IVAR_CORE_UNIT:
156 		*result = cfg->unit;
157 		return (0);
158 	case BHND_IVAR_PMU_INFO:
159 		SIBA_LOCK(sc);
160 		switch (dinfo->pmu_state) {
161 		case SIBA_PMU_NONE:
162 			*result = (uintptr_t)NULL;
163 			SIBA_UNLOCK(sc);
164 			return (0);
165 
166 		case SIBA_PMU_BHND:
167 			*result = (uintptr_t)dinfo->pmu.bhnd_info;
168 			SIBA_UNLOCK(sc);
169 			return (0);
170 
171 		case SIBA_PMU_PWRCTL:
172 		case SIBA_PMU_FIXED:
173 			panic("bhnd_get_pmu_info() called with siba PMU state "
174 			    "%d", dinfo->pmu_state);
175 			return (ENXIO);
176 		}
177 
178 		panic("invalid PMU state: %d", dinfo->pmu_state);
179 		return (ENXIO);
180 
181 	default:
182 		return (ENOENT);
183 	}
184 }
185 
186 static int
187 siba_write_ivar(device_t dev, device_t child, int index, uintptr_t value)
188 {
189 	struct siba_softc	*sc;
190 	struct siba_devinfo	*dinfo;
191 
192 	sc = device_get_softc(dev);
193 	dinfo = device_get_ivars(child);
194 
195 	switch (index) {
196 	case BHND_IVAR_VENDOR:
197 	case BHND_IVAR_DEVICE:
198 	case BHND_IVAR_HWREV:
199 	case BHND_IVAR_DEVICE_CLASS:
200 	case BHND_IVAR_VENDOR_NAME:
201 	case BHND_IVAR_DEVICE_NAME:
202 	case BHND_IVAR_CORE_INDEX:
203 	case BHND_IVAR_CORE_UNIT:
204 		return (EINVAL);
205 	case BHND_IVAR_PMU_INFO:
206 		SIBA_LOCK(sc);
207 		switch (dinfo->pmu_state) {
208 		case SIBA_PMU_NONE:
209 		case SIBA_PMU_BHND:
210 			dinfo->pmu.bhnd_info = (void *)value;
211 			dinfo->pmu_state = SIBA_PMU_BHND;
212 			SIBA_UNLOCK(sc);
213 			return (0);
214 
215 		case SIBA_PMU_PWRCTL:
216 		case SIBA_PMU_FIXED:
217 			panic("bhnd_set_pmu_info() called with siba PMU state "
218 			    "%d", dinfo->pmu_state);
219 			return (ENXIO);
220 		}
221 
222 		panic("invalid PMU state: %d", dinfo->pmu_state);
223 		return (ENXIO);
224 
225 	default:
226 		return (ENOENT);
227 	}
228 }
229 
230 static struct resource_list *
231 siba_get_resource_list(device_t dev, device_t child)
232 {
233 	struct siba_devinfo *dinfo = device_get_ivars(child);
234 	return (&dinfo->resources);
235 }
236 
237 /* BHND_BUS_ALLOC_PMU() */
238 static int
239 siba_alloc_pmu(device_t dev, device_t child)
240 {
241 	struct siba_softc	*sc;
242 	struct siba_devinfo	*dinfo;
243 	device_t		 chipc;
244 	device_t		 pwrctl;
245 	struct chipc_caps	 ccaps;
246 	siba_pmu_state		 pmu_state;
247 	int			 error;
248 
249 	if (device_get_parent(child) != dev)
250 		return (EINVAL);
251 
252 	sc = device_get_softc(dev);
253 	dinfo = device_get_ivars(child);
254 	pwrctl = NULL;
255 
256 	/* Fetch ChipCommon capability flags */
257 	chipc = bhnd_retain_provider(child, BHND_SERVICE_CHIPC);
258 	if (chipc != NULL) {
259 		ccaps = *BHND_CHIPC_GET_CAPS(chipc);
260 		bhnd_release_provider(child, chipc, BHND_SERVICE_CHIPC);
261 	} else {
262 		memset(&ccaps, 0, sizeof(ccaps));
263 	}
264 
265 	/* Defer to bhnd(4)'s PMU implementation if ChipCommon exists and
266 	 * advertises PMU support */
267 	if (ccaps.pmu) {
268 		if ((error = bhnd_generic_alloc_pmu(dev, child)))
269 			return (error);
270 
271 		KASSERT(dinfo->pmu_state == SIBA_PMU_BHND,
272 		    ("unexpected PMU state: %d", dinfo->pmu_state));
273 
274 		return (0);
275 	}
276 
277 	/*
278 	 * This is either a legacy PWRCTL chipset, or the device does not
279 	 * support dynamic clock control.
280 	 *
281 	 * We need to map all bhnd(4) bus PMU to PWRCTL or no-op operations.
282 	 */
283 	if (ccaps.pwr_ctrl) {
284 		pmu_state = SIBA_PMU_PWRCTL;
285 		pwrctl = bhnd_retain_provider(child, BHND_SERVICE_PWRCTL);
286 		if (pwrctl == NULL) {
287 			device_printf(dev, "PWRCTL not found\n");
288 			return (ENODEV);
289 		}
290 	} else {
291 		pmu_state = SIBA_PMU_FIXED;
292 		pwrctl = NULL;
293 	}
294 
295 	SIBA_LOCK(sc);
296 
297 	/* Per-core PMU state already allocated? */
298 	if (dinfo->pmu_state != SIBA_PMU_NONE) {
299 		panic("duplicate PMU allocation for %s",
300 		    device_get_nameunit(child));
301 	}
302 
303 	/* Update the child's PMU allocation state, and transfer ownership of
304 	 * the PWRCTL provider reference (if any) */
305 	dinfo->pmu_state = pmu_state;
306 	dinfo->pmu.pwrctl = pwrctl;
307 
308 	SIBA_UNLOCK(sc);
309 
310 	return (0);
311 }
312 
313 /* BHND_BUS_RELEASE_PMU() */
314 static int
315 siba_release_pmu(device_t dev, device_t child)
316 {
317 	struct siba_softc	*sc;
318 	struct siba_devinfo	*dinfo;
319 	device_t		 pwrctl;
320 	int			 error;
321 
322 	if (device_get_parent(child) != dev)
323 		return (EINVAL);
324 
325 	sc = device_get_softc(dev);
326 	dinfo = device_get_ivars(child);
327 
328 	SIBA_LOCK(sc);
329 	switch(dinfo->pmu_state) {
330 	case SIBA_PMU_NONE:
331 		panic("pmu over-release for %s", device_get_nameunit(child));
332 		SIBA_UNLOCK(sc);
333 		return (ENXIO);
334 
335 	case SIBA_PMU_BHND:
336 		SIBA_UNLOCK(sc);
337 		return (bhnd_generic_release_pmu(dev, child));
338 
339 	case SIBA_PMU_PWRCTL:
340 		/* Requesting BHND_CLOCK_DYN releases any outstanding clock
341 		 * reservations */
342 		pwrctl = dinfo->pmu.pwrctl;
343 		error = bhnd_pwrctl_request_clock(pwrctl, child,
344 		    BHND_CLOCK_DYN);
345 		if (error) {
346 			SIBA_UNLOCK(sc);
347 			return (error);
348 		}
349 
350 		/* Clean up the child's PMU state */
351 		dinfo->pmu_state = SIBA_PMU_NONE;
352 		dinfo->pmu.pwrctl = NULL;
353 		SIBA_UNLOCK(sc);
354 
355 		/* Release the provider reference */
356 		bhnd_release_provider(child, pwrctl, BHND_SERVICE_PWRCTL);
357 		return (0);
358 
359 	case SIBA_PMU_FIXED:
360 		/* Clean up the child's PMU state */
361 		KASSERT(dinfo->pmu.pwrctl == NULL,
362 		    ("PWRCTL reference with FIXED state"));
363 
364 		dinfo->pmu_state = SIBA_PMU_NONE;
365 		dinfo->pmu.pwrctl = NULL;
366 		SIBA_UNLOCK(sc);
367 	}
368 
369 	panic("invalid PMU state: %d", dinfo->pmu_state);
370 }
371 
372 /* BHND_BUS_GET_CLOCK_LATENCY() */
373 static int
374 siba_get_clock_latency(device_t dev, device_t child, bhnd_clock clock,
375     u_int *latency)
376 {
377 	struct siba_softc	*sc;
378 	struct siba_devinfo	*dinfo;
379 	int			 error;
380 
381 	if (device_get_parent(child) != dev)
382 		return (EINVAL);
383 
384 	sc = device_get_softc(dev);
385 	dinfo = device_get_ivars(child);
386 
387 	SIBA_LOCK(sc);
388 	switch(dinfo->pmu_state) {
389 	case SIBA_PMU_NONE:
390 		panic("no active PMU request state");
391 
392 		SIBA_UNLOCK(sc);
393 		return (ENXIO);
394 
395 	case SIBA_PMU_BHND:
396 		SIBA_UNLOCK(sc);
397 		return (bhnd_generic_get_clock_latency(dev, child, clock,
398 		    latency));
399 
400 	case SIBA_PMU_PWRCTL:
401 		 error = bhnd_pwrctl_get_clock_latency(dinfo->pmu.pwrctl, clock,
402 		    latency);
403 		 SIBA_UNLOCK(sc);
404 
405 		 return (error);
406 
407 	case SIBA_PMU_FIXED:
408 		SIBA_UNLOCK(sc);
409 
410 		/* HT clock is always available, and incurs no transition
411 		 * delay. */
412 		switch (clock) {
413 		case BHND_CLOCK_HT:
414 			*latency = 0;
415 			return (0);
416 
417 		default:
418 			return (ENODEV);
419 		}
420 
421 		return (ENODEV);
422 	}
423 
424 	panic("invalid PMU state: %d", dinfo->pmu_state);
425 }
426 
427 /* BHND_BUS_GET_CLOCK_FREQ() */
428 static int
429 siba_get_clock_freq(device_t dev, device_t child, bhnd_clock clock,
430     u_int *freq)
431 {
432 	struct siba_softc	*sc;
433 	struct siba_devinfo	*dinfo;
434 	int			 error;
435 
436 	if (device_get_parent(child) != dev)
437 		return (EINVAL);
438 
439 	sc = device_get_softc(dev);
440 	dinfo = device_get_ivars(child);
441 
442 	SIBA_LOCK(sc);
443 	switch(dinfo->pmu_state) {
444 	case SIBA_PMU_NONE:
445 		panic("no active PMU request state");
446 
447 		SIBA_UNLOCK(sc);
448 		return (ENXIO);
449 
450 	case SIBA_PMU_BHND:
451 		SIBA_UNLOCK(sc);
452 		return (bhnd_generic_get_clock_freq(dev, child, clock, freq));
453 
454 	case SIBA_PMU_PWRCTL:
455 		error = bhnd_pwrctl_get_clock_freq(dinfo->pmu.pwrctl, clock,
456 		    freq);
457 		SIBA_UNLOCK(sc);
458 
459 		return (error);
460 
461 	case SIBA_PMU_FIXED:
462 		SIBA_UNLOCK(sc);
463 
464 		return (ENODEV);
465 	}
466 
467 	panic("invalid PMU state: %d", dinfo->pmu_state);
468 }
469 
470 /* BHND_BUS_REQUEST_EXT_RSRC() */
471 static int
472 siba_request_ext_rsrc(device_t dev, device_t child, u_int rsrc)
473 {
474 	struct siba_softc	*sc;
475 	struct siba_devinfo	*dinfo;
476 
477 	if (device_get_parent(child) != dev)
478 		return (EINVAL);
479 
480 	sc = device_get_softc(dev);
481 	dinfo = device_get_ivars(child);
482 
483 	SIBA_LOCK(sc);
484 	switch(dinfo->pmu_state) {
485 	case SIBA_PMU_NONE:
486 		panic("no active PMU request state");
487 
488 		SIBA_UNLOCK(sc);
489 		return (ENXIO);
490 
491 	case SIBA_PMU_BHND:
492 		SIBA_UNLOCK(sc);
493 		return (bhnd_generic_request_ext_rsrc(dev, child, rsrc));
494 
495 	case SIBA_PMU_PWRCTL:
496 	case SIBA_PMU_FIXED:
497 		/* HW does not support per-core external resources */
498 		SIBA_UNLOCK(sc);
499 		return (ENODEV);
500 	}
501 
502 	panic("invalid PMU state: %d", dinfo->pmu_state);
503 }
504 
505 /* BHND_BUS_RELEASE_EXT_RSRC() */
506 static int
507 siba_release_ext_rsrc(device_t dev, device_t child, u_int rsrc)
508 {
509 	struct siba_softc	*sc;
510 	struct siba_devinfo	*dinfo;
511 
512 	if (device_get_parent(child) != dev)
513 		return (EINVAL);
514 
515 	sc = device_get_softc(dev);
516 	dinfo = device_get_ivars(child);
517 
518 	SIBA_LOCK(sc);
519 	switch(dinfo->pmu_state) {
520 	case SIBA_PMU_NONE:
521 		panic("no active PMU request state");
522 
523 		SIBA_UNLOCK(sc);
524 		return (ENXIO);
525 
526 	case SIBA_PMU_BHND:
527 		SIBA_UNLOCK(sc);
528 		return (bhnd_generic_release_ext_rsrc(dev, child, rsrc));
529 
530 	case SIBA_PMU_PWRCTL:
531 	case SIBA_PMU_FIXED:
532 		/* HW does not support per-core external resources */
533 		SIBA_UNLOCK(sc);
534 		return (ENODEV);
535 	}
536 
537 	panic("invalid PMU state: %d", dinfo->pmu_state);
538 }
539 
540 /* BHND_BUS_REQUEST_CLOCK() */
541 static int
542 siba_request_clock(device_t dev, device_t child, bhnd_clock clock)
543 {
544 	struct siba_softc	*sc;
545 	struct siba_devinfo	*dinfo;
546 	int			 error;
547 
548 	if (device_get_parent(child) != dev)
549 		return (EINVAL);
550 
551 	sc = device_get_softc(dev);
552 	dinfo = device_get_ivars(child);
553 
554 	SIBA_LOCK(sc);
555 	switch(dinfo->pmu_state) {
556 	case SIBA_PMU_NONE:
557 		panic("no active PMU request state");
558 
559 		SIBA_UNLOCK(sc);
560 		return (ENXIO);
561 
562 	case SIBA_PMU_BHND:
563 		SIBA_UNLOCK(sc);
564 		return (bhnd_generic_request_clock(dev, child, clock));
565 
566 	case SIBA_PMU_PWRCTL:
567 		error = bhnd_pwrctl_request_clock(dinfo->pmu.pwrctl, child,
568 		    clock);
569 		SIBA_UNLOCK(sc);
570 
571 		return (error);
572 
573 	case SIBA_PMU_FIXED:
574 		SIBA_UNLOCK(sc);
575 
576 		/* HT clock is always available, and fulfills any of the
577 		 * following clock requests */
578 		switch (clock) {
579 		case BHND_CLOCK_DYN:
580 		case BHND_CLOCK_ILP:
581 		case BHND_CLOCK_ALP:
582 		case BHND_CLOCK_HT:
583 			return (0);
584 
585 		default:
586 			return (ENODEV);
587 		}
588 	}
589 
590 	panic("invalid PMU state: %d", dinfo->pmu_state);
591 }
592 
593 /* BHND_BUS_ENABLE_CLOCKS() */
594 static int
595 siba_enable_clocks(device_t dev, device_t child, uint32_t clocks)
596 {
597 	struct siba_softc	*sc;
598 	struct siba_devinfo	*dinfo;
599 
600 	if (device_get_parent(child) != dev)
601 		return (EINVAL);
602 
603 	sc = device_get_softc(dev);
604 	dinfo = device_get_ivars(child);
605 
606 	SIBA_LOCK(sc);
607 	switch(dinfo->pmu_state) {
608 	case SIBA_PMU_NONE:
609 		panic("no active PMU request state");
610 
611 		SIBA_UNLOCK(sc);
612 		return (ENXIO);
613 
614 	case SIBA_PMU_BHND:
615 		SIBA_UNLOCK(sc);
616 		return (bhnd_generic_enable_clocks(dev, child, clocks));
617 
618 	case SIBA_PMU_PWRCTL:
619 	case SIBA_PMU_FIXED:
620 		SIBA_UNLOCK(sc);
621 
622 		/* All (supported) clocks are already enabled by default */
623 		clocks &= ~(BHND_CLOCK_DYN |
624 			    BHND_CLOCK_ILP |
625 			    BHND_CLOCK_ALP |
626 			    BHND_CLOCK_HT);
627 
628 		if (clocks != 0) {
629 			device_printf(dev, "%s requested unknown clocks: %#x\n",
630 			    device_get_nameunit(child), clocks);
631 			return (ENODEV);
632 		}
633 
634 		return (0);
635 	}
636 
637 	panic("invalid PMU state: %d", dinfo->pmu_state);
638 }
639 
640 static int
641 siba_read_iost(device_t dev, device_t child, uint16_t *iost)
642 {
643 	uint32_t	tmhigh;
644 	int		error;
645 
646 	error = bhnd_read_config(child, SIBA_CFG0_TMSTATEHIGH, &tmhigh, 4);
647 	if (error)
648 		return (error);
649 
650 	*iost = (SIBA_REG_GET(tmhigh, TMH_SISF));
651 	return (0);
652 }
653 
654 static int
655 siba_read_ioctl(device_t dev, device_t child, uint16_t *ioctl)
656 {
657 	uint32_t	ts_low;
658 	int		error;
659 
660 	if ((error = bhnd_read_config(child, SIBA_CFG0_TMSTATELOW, &ts_low, 4)))
661 		return (error);
662 
663 	*ioctl = (SIBA_REG_GET(ts_low, TML_SICF));
664 	return (0);
665 }
666 
667 static int
668 siba_write_ioctl(device_t dev, device_t child, uint16_t value, uint16_t mask)
669 {
670 	struct siba_devinfo	*dinfo;
671 	struct bhnd_resource	*r;
672 	uint32_t		 ts_low, ts_mask;
673 
674 	if (device_get_parent(child) != dev)
675 		return (EINVAL);
676 
677 	/* Fetch CFG0 mapping */
678 	dinfo = device_get_ivars(child);
679 	if ((r = dinfo->cfg_res[0]) == NULL)
680 		return (ENODEV);
681 
682 	/* Mask and set TMSTATELOW core flag bits */
683 	ts_mask = (mask << SIBA_TML_SICF_SHIFT) & SIBA_TML_SICF_MASK;
684 	ts_low = (value << SIBA_TML_SICF_SHIFT) & ts_mask;
685 
686 	siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
687 	    ts_low, ts_mask);
688 	return (0);
689 }
690 
691 static bool
692 siba_is_hw_suspended(device_t dev, device_t child)
693 {
694 	uint32_t		ts_low;
695 	uint16_t		ioctl;
696 	int			error;
697 
698 	/* Fetch target state */
699 	error = bhnd_read_config(child, SIBA_CFG0_TMSTATELOW, &ts_low, 4);
700 	if (error) {
701 		device_printf(child, "error reading HW reset state: %d\n",
702 		    error);
703 		return (true);
704 	}
705 
706 	/* Is core held in RESET? */
707 	if (ts_low & SIBA_TML_RESET)
708 		return (true);
709 
710 	/* Is target reject enabled? */
711 	if (ts_low & SIBA_TML_REJ_MASK)
712 		return (true);
713 
714 	/* Is core clocked? */
715 	ioctl = SIBA_REG_GET(ts_low, TML_SICF);
716 	if (!(ioctl & BHND_IOCTL_CLK_EN))
717 		return (true);
718 
719 	return (false);
720 }
721 
722 static int
723 siba_reset_hw(device_t dev, device_t child, uint16_t ioctl,
724     uint16_t reset_ioctl)
725 {
726 	struct siba_devinfo		*dinfo;
727 	struct bhnd_resource		*r;
728 	uint32_t			 ts_low, imstate;
729 	uint16_t			 clkflags;
730 	int				 error;
731 
732 	if (device_get_parent(child) != dev)
733 		return (EINVAL);
734 
735 	dinfo = device_get_ivars(child);
736 
737 	/* Can't suspend the core without access to the CFG0 registers */
738 	if ((r = dinfo->cfg_res[0]) == NULL)
739 		return (ENODEV);
740 
741 	/* We require exclusive control over BHND_IOCTL_CLK_(EN|FORCE) */
742 	clkflags = BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE;
743 	if (ioctl & clkflags)
744 		return (EINVAL);
745 
746 	/* Place core into known RESET state */
747 	if ((error = bhnd_suspend_hw(child, reset_ioctl)))
748 		return (error);
749 
750 	/* Set RESET, clear REJ, set the caller's IOCTL flags, and
751 	 * force clocks to ensure the signal propagates throughout the
752 	 * core. */
753 	ts_low = SIBA_TML_RESET |
754 		 (ioctl << SIBA_TML_SICF_SHIFT) |
755 		 (BHND_IOCTL_CLK_EN << SIBA_TML_SICF_SHIFT) |
756 		 (BHND_IOCTL_CLK_FORCE << SIBA_TML_SICF_SHIFT);
757 
758 	siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
759 	    ts_low, UINT32_MAX);
760 
761 	/* Clear any target errors */
762 	if (bhnd_bus_read_4(r, SIBA_CFG0_TMSTATEHIGH) & SIBA_TMH_SERR) {
763 		siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATEHIGH,
764 		    0x0, SIBA_TMH_SERR);
765 	}
766 
767 	/* Clear any initiator errors */
768 	imstate = bhnd_bus_read_4(r, SIBA_CFG0_IMSTATE);
769 	if (imstate & (SIBA_IM_IBE|SIBA_IM_TO)) {
770 		siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE, 0x0,
771 		    SIBA_IM_IBE|SIBA_IM_TO);
772 	}
773 
774 	/* Release from RESET while leaving clocks forced, ensuring the
775 	 * signal propagates throughout the core */
776 	siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 0x0,
777 	    SIBA_TML_RESET);
778 
779 	/* The core should now be active; we can clear the BHND_IOCTL_CLK_FORCE
780 	 * bit and allow the core to manage clock gating. */
781 	siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 0x0,
782 	    (BHND_IOCTL_CLK_FORCE << SIBA_TML_SICF_SHIFT));
783 
784 	return (0);
785 }
786 
787 static int
788 siba_suspend_hw(device_t dev, device_t child, uint16_t ioctl)
789 {
790 	struct siba_softc		*sc;
791 	struct siba_devinfo		*dinfo;
792 	struct bhnd_resource		*r;
793 	uint32_t			 idl, ts_low, ts_mask;
794 	uint16_t			 cflags, clkflags;
795 	int				 error;
796 
797 	if (device_get_parent(child) != dev)
798 		return (EINVAL);
799 
800 	sc = device_get_softc(dev);
801 	dinfo = device_get_ivars(child);
802 
803 	/* Can't suspend the core without access to the CFG0 registers */
804 	if ((r = dinfo->cfg_res[0]) == NULL)
805 		return (ENODEV);
806 
807 	/* We require exclusive control over BHND_IOCTL_CLK_(EN|FORCE) */
808 	clkflags = BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE;
809 	if (ioctl & clkflags)
810 		return (EINVAL);
811 
812 	/* Already in RESET? */
813 	ts_low = bhnd_bus_read_4(r, SIBA_CFG0_TMSTATELOW);
814 	if (ts_low & SIBA_TML_RESET)
815 		return (0);
816 
817 	/* If clocks are already disabled, we can place the core directly
818 	 * into RESET|REJ while setting the caller's IOCTL flags. */
819 	cflags = SIBA_REG_GET(ts_low, TML_SICF);
820 	if (!(cflags & BHND_IOCTL_CLK_EN)) {
821 		ts_low = SIBA_TML_RESET | SIBA_TML_REJ |
822 			 (ioctl << SIBA_TML_SICF_SHIFT);
823 		ts_mask = SIBA_TML_RESET | SIBA_TML_REJ | SIBA_TML_SICF_MASK;
824 
825 		siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
826 		    ts_low, ts_mask);
827 		return (0);
828 	}
829 
830 	/* Reject further transactions reaching this core */
831 	siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
832 	    SIBA_TML_REJ, SIBA_TML_REJ);
833 
834 	/* Wait for transaction busy flag to clear for all transactions
835 	 * initiated by this core */
836 	error = siba_wait_target_state(child, dinfo, SIBA_CFG0_TMSTATEHIGH,
837 	    0x0, SIBA_TMH_BUSY, 100000);
838 	if (error)
839 		return (error);
840 
841 	/* If this is an initiator core, we need to reject initiator
842 	 * transactions too. */
843 	idl = bhnd_bus_read_4(r, SIBA_CFG0_IDLOW);
844 	if (idl & SIBA_IDL_INIT) {
845 		/* Reject further initiator transactions */
846 		siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE,
847 		    SIBA_IM_RJ, SIBA_IM_RJ);
848 
849 		/* Wait for initiator busy flag to clear */
850 		error = siba_wait_target_state(child, dinfo, SIBA_CFG0_IMSTATE,
851 		    0x0, SIBA_IM_BY, 100000);
852 		if (error)
853 			return (error);
854 	}
855 
856 	/* Put the core into RESET, set the caller's IOCTL flags, and
857 	 * force clocks to ensure the RESET signal propagates throughout the
858 	 * core. */
859 	ts_low = SIBA_TML_RESET |
860 		 (ioctl << SIBA_TML_SICF_SHIFT) |
861 		 (BHND_IOCTL_CLK_EN << SIBA_TML_SICF_SHIFT) |
862 		 (BHND_IOCTL_CLK_FORCE << SIBA_TML_SICF_SHIFT);
863 	ts_mask = SIBA_TML_RESET |
864 		  SIBA_TML_SICF_MASK;
865 
866 	siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, ts_low,
867 	    ts_mask);
868 	if (error)
869 		return (error);
870 
871 	/* Give RESET ample time */
872 	DELAY(10);
873 
874 	/* Clear previously asserted initiator reject */
875 	if (idl & SIBA_IDL_INIT) {
876 		siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE, 0x0,
877 		    SIBA_IM_RJ);
878 	}
879 
880 	/* Disable all clocks, leaving RESET and REJ asserted */
881 	siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 0x0,
882 	    (BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE) << SIBA_TML_SICF_SHIFT);
883 
884 	/*
885 	 * Core is now in RESET.
886 	 *
887 	 * If the core holds any PWRCTL clock reservations, we need to release
888 	 * those now. This emulates the standard bhnd(4) PMU behavior of RESET
889 	 * automatically clearing clkctl
890 	 */
891 	SIBA_LOCK(sc);
892 	if (dinfo->pmu_state == SIBA_PMU_PWRCTL) {
893 		error = bhnd_pwrctl_request_clock(dinfo->pmu.pwrctl, child,
894 		    BHND_CLOCK_DYN);
895 		SIBA_UNLOCK(sc);
896 
897 		if (error) {
898 			device_printf(child, "failed to release clock request: "
899 			    "%d", error);
900 			return (error);
901 		}
902 
903 		return (0);
904 	} else {
905 		SIBA_UNLOCK(sc);
906 		return (0);
907 	}
908 }
909 
910 static int
911 siba_read_config(device_t dev, device_t child, bus_size_t offset, void *value,
912     u_int width)
913 {
914 	struct siba_devinfo	*dinfo;
915 	rman_res_t		 r_size;
916 
917 	/* Must be directly attached */
918 	if (device_get_parent(child) != dev)
919 		return (EINVAL);
920 
921 	/* CFG0 registers must be available */
922 	dinfo = device_get_ivars(child);
923 	if (dinfo->cfg_res[0] == NULL)
924 		return (ENODEV);
925 
926 	/* Offset must fall within CFG0 */
927 	r_size = rman_get_size(dinfo->cfg_res[0]->res);
928 	if (r_size < offset || r_size - offset < width)
929 		return (EFAULT);
930 
931 	switch (width) {
932 	case 1:
933 		*((uint8_t *)value) = bhnd_bus_read_1(dinfo->cfg_res[0],
934 		    offset);
935 		return (0);
936 	case 2:
937 		*((uint16_t *)value) = bhnd_bus_read_2(dinfo->cfg_res[0],
938 		    offset);
939 		return (0);
940 	case 4:
941 		*((uint32_t *)value) = bhnd_bus_read_4(dinfo->cfg_res[0],
942 		    offset);
943 		return (0);
944 	default:
945 		return (EINVAL);
946 	}
947 }
948 
949 static int
950 siba_write_config(device_t dev, device_t child, bus_size_t offset,
951     const void *value, u_int width)
952 {
953 	struct siba_devinfo	*dinfo;
954 	struct bhnd_resource	*r;
955 	rman_res_t		 r_size;
956 
957 	/* Must be directly attached */
958 	if (device_get_parent(child) != dev)
959 		return (EINVAL);
960 
961 	/* CFG0 registers must be available */
962 	dinfo = device_get_ivars(child);
963 	if ((r = dinfo->cfg_res[0]) == NULL)
964 		return (ENODEV);
965 
966 	/* Offset must fall within CFG0 */
967 	r_size = rman_get_size(r->res);
968 	if (r_size < offset || r_size - offset < width)
969 		return (EFAULT);
970 
971 	switch (width) {
972 	case 1:
973 		bhnd_bus_write_1(r, offset, *(const uint8_t *)value);
974 		return (0);
975 	case 2:
976 		bhnd_bus_write_2(r, offset, *(const uint8_t *)value);
977 		return (0);
978 	case 4:
979 		bhnd_bus_write_4(r, offset, *(const uint8_t *)value);
980 		return (0);
981 	default:
982 		return (EINVAL);
983 	}
984 }
985 
986 static u_int
987 siba_get_port_count(device_t dev, device_t child, bhnd_port_type type)
988 {
989 	struct siba_devinfo *dinfo;
990 
991 	/* delegate non-bus-attached devices to our parent */
992 	if (device_get_parent(child) != dev)
993 		return (BHND_BUS_GET_PORT_COUNT(device_get_parent(dev), child,
994 		    type));
995 
996 	dinfo = device_get_ivars(child);
997 	return (siba_port_count(&dinfo->core_id, type));
998 }
999 
1000 static u_int
1001 siba_get_region_count(device_t dev, device_t child, bhnd_port_type type,
1002     u_int port)
1003 {
1004 	struct siba_devinfo	*dinfo;
1005 
1006 	/* delegate non-bus-attached devices to our parent */
1007 	if (device_get_parent(child) != dev)
1008 		return (BHND_BUS_GET_REGION_COUNT(device_get_parent(dev), child,
1009 		    type, port));
1010 
1011 	dinfo = device_get_ivars(child);
1012 	return (siba_port_region_count(&dinfo->core_id, type, port));
1013 }
1014 
1015 static int
1016 siba_get_port_rid(device_t dev, device_t child, bhnd_port_type port_type,
1017     u_int port_num, u_int region_num)
1018 {
1019 	struct siba_devinfo	*dinfo;
1020 	struct siba_addrspace	*addrspace;
1021 	struct siba_cfg_block	*cfg;
1022 
1023 	/* delegate non-bus-attached devices to our parent */
1024 	if (device_get_parent(child) != dev)
1025 		return (BHND_BUS_GET_PORT_RID(device_get_parent(dev), child,
1026 		    port_type, port_num, region_num));
1027 
1028 	dinfo = device_get_ivars(child);
1029 
1030 	/* Look for a matching addrspace entry */
1031 	addrspace = siba_find_addrspace(dinfo, port_type, port_num, region_num);
1032 	if (addrspace != NULL)
1033 		return (addrspace->sa_rid);
1034 
1035 	/* Try the config blocks */
1036 	cfg = siba_find_cfg_block(dinfo, port_type, port_num, region_num);
1037 	if (cfg != NULL)
1038 		return (cfg->cb_rid);
1039 
1040 	/* Not found */
1041 	return (-1);
1042 }
1043 
1044 static int
1045 siba_decode_port_rid(device_t dev, device_t child, int type, int rid,
1046     bhnd_port_type *port_type, u_int *port_num, u_int *region_num)
1047 {
1048 	struct siba_devinfo	*dinfo;
1049 
1050 	/* delegate non-bus-attached devices to our parent */
1051 	if (device_get_parent(child) != dev)
1052 		return (BHND_BUS_DECODE_PORT_RID(device_get_parent(dev), child,
1053 		    type, rid, port_type, port_num, region_num));
1054 
1055 	dinfo = device_get_ivars(child);
1056 
1057 	/* Ports are always memory mapped */
1058 	if (type != SYS_RES_MEMORY)
1059 		return (EINVAL);
1060 
1061 	/* Look for a matching addrspace entry */
1062 	for (u_int i = 0; i < dinfo->core_id.num_addrspace; i++) {
1063 		if (dinfo->addrspace[i].sa_rid != rid)
1064 			continue;
1065 
1066 		*port_type = BHND_PORT_DEVICE;
1067 		*port_num = siba_addrspace_device_port(i);
1068 		*region_num = siba_addrspace_device_region(i);
1069 		return (0);
1070 	}
1071 
1072 	/* Try the config blocks */
1073 	for (u_int i = 0; i < dinfo->core_id.num_cfg_blocks; i++) {
1074 		if (dinfo->cfg[i].cb_rid != rid)
1075 			continue;
1076 
1077 		*port_type = BHND_PORT_AGENT;
1078 		*port_num = siba_cfg_agent_port(i);
1079 		*region_num = siba_cfg_agent_region(i);
1080 		return (0);
1081 	}
1082 
1083 	/* Not found */
1084 	return (ENOENT);
1085 }
1086 
1087 static int
1088 siba_get_region_addr(device_t dev, device_t child, bhnd_port_type port_type,
1089     u_int port_num, u_int region_num, bhnd_addr_t *addr, bhnd_size_t *size)
1090 {
1091 	struct siba_devinfo	*dinfo;
1092 	struct siba_addrspace	*addrspace;
1093 	struct siba_cfg_block	*cfg;
1094 
1095 	/* delegate non-bus-attached devices to our parent */
1096 	if (device_get_parent(child) != dev) {
1097 		return (BHND_BUS_GET_REGION_ADDR(device_get_parent(dev), child,
1098 		    port_type, port_num, region_num, addr, size));
1099 	}
1100 
1101 	dinfo = device_get_ivars(child);
1102 
1103 	/* Look for a matching addrspace */
1104 	addrspace = siba_find_addrspace(dinfo, port_type, port_num, region_num);
1105 	if (addrspace != NULL) {
1106 		*addr = addrspace->sa_base;
1107 		*size = addrspace->sa_size - addrspace->sa_bus_reserved;
1108 		return (0);
1109 	}
1110 
1111 	/* Look for a matching cfg block */
1112 	cfg = siba_find_cfg_block(dinfo, port_type, port_num, region_num);
1113 	if (cfg != NULL) {
1114 		*addr = cfg->cb_base;
1115 		*size = cfg->cb_size;
1116 		return (0);
1117 	}
1118 
1119 	/* Not found */
1120 	return (ENOENT);
1121 }
1122 
1123 /**
1124  * Default siba(4) bus driver implementation of BHND_BUS_GET_INTR_COUNT().
1125  */
1126 u_int
1127 siba_get_intr_count(device_t dev, device_t child)
1128 {
1129 	struct siba_devinfo	*dinfo;
1130 
1131 	/* delegate non-bus-attached devices to our parent */
1132 	if (device_get_parent(child) != dev)
1133 		return (BHND_BUS_GET_INTR_COUNT(device_get_parent(dev), child));
1134 
1135 	dinfo = device_get_ivars(child);
1136 	if (!dinfo->intr_en) {
1137 		/* No interrupts */
1138 		return (0);
1139 	} else {
1140 		/* One assigned interrupt */
1141 		return (1);
1142 	}
1143 }
1144 
1145 /**
1146  * Default siba(4) bus driver implementation of BHND_BUS_GET_INTR_IVEC().
1147  */
1148 int
1149 siba_get_intr_ivec(device_t dev, device_t child, u_int intr, u_int *ivec)
1150 {
1151 	struct siba_devinfo	*dinfo;
1152 
1153 	/* delegate non-bus-attached devices to our parent */
1154 	if (device_get_parent(child) != dev)
1155 		return (BHND_BUS_GET_INTR_IVEC(device_get_parent(dev), child,
1156 		    intr, ivec));
1157 
1158 	/* Must be a valid interrupt ID */
1159 	if (intr >= siba_get_intr_count(dev, child))
1160 		return (ENXIO);
1161 
1162 	KASSERT(intr == 0, ("invalid ivec %u", intr));
1163 
1164 	dinfo = device_get_ivars(child);
1165 
1166 	KASSERT(dinfo->intr_en, ("core does not have an interrupt assigned"));
1167 	*ivec = dinfo->intr.flag;
1168 	return (0);
1169 }
1170 
1171 /**
1172  * Register all address space mappings for @p di.
1173  *
1174  * @param dev The siba bus device.
1175  * @param di The device info instance on which to register all address
1176  * space entries.
1177  * @param r A resource mapping the enumeration table block for @p di.
1178  */
1179 static int
1180 siba_register_addrspaces(device_t dev, struct siba_devinfo *di,
1181     struct bhnd_resource *r)
1182 {
1183 	struct siba_core_id	*cid;
1184 	uint32_t		 addr;
1185 	uint32_t		 size;
1186 	int			 error;
1187 
1188 	cid = &di->core_id;
1189 
1190 
1191 	/* Register the device address space entries */
1192 	for (uint8_t i = 0; i < di->core_id.num_addrspace; i++) {
1193 		uint32_t	adm;
1194 		u_int		adm_offset;
1195 		uint32_t	bus_reserved;
1196 
1197 		/* Determine the register offset */
1198 		adm_offset = siba_admatch_offset(i);
1199 		if (adm_offset == 0) {
1200 		    device_printf(dev, "addrspace %hhu is unsupported", i);
1201 		    return (ENODEV);
1202 		}
1203 
1204 		/* Fetch the address match register value */
1205 		adm = bhnd_bus_read_4(r, adm_offset);
1206 
1207 		/* Parse the value */
1208 		if ((error = siba_parse_admatch(adm, &addr, &size))) {
1209 			device_printf(dev, "failed to decode address "
1210 			    " match register value 0x%x\n", adm);
1211 			return (error);
1212 		}
1213 
1214 		/* If this is the device's core/enumeration addrespace,
1215 		 * reserve the Sonics configuration register blocks for the
1216 		 * use of our bus. */
1217 		bus_reserved = 0;
1218 		if (i == SIBA_CORE_ADDRSPACE)
1219 			bus_reserved = cid->num_cfg_blocks * SIBA_CFG_SIZE;
1220 
1221 		/* Append the region info */
1222 		error = siba_append_dinfo_region(di, i, addr, size,
1223 		    bus_reserved);
1224 		if (error)
1225 			return (error);
1226 	}
1227 
1228 	return (0);
1229 }
1230 
1231 
1232 /**
1233  * Register all interrupt descriptors for @p dinfo. Must be called after
1234  * configuration blocks have been mapped.
1235  *
1236  * @param dev The siba bus device.
1237  * @param child The siba child device.
1238  * @param dinfo The device info instance on which to register all interrupt
1239  * descriptor entries.
1240  * @param r A resource mapping the enumeration table block for @p di.
1241  */
1242 static int
1243 siba_register_interrupts(device_t dev, device_t child,
1244     struct siba_devinfo *dinfo, struct bhnd_resource *r)
1245 {
1246 	uint32_t	tpsflag;
1247 	int		error;
1248 
1249 	/* Is backplane interrupt distribution enabled for this core? */
1250 	tpsflag = bhnd_bus_read_4(r, SB0_REG_ABS(SIBA_CFG0_TPSFLAG));
1251 	if ((tpsflag & SIBA_TPS_F0EN0) == 0) {
1252 		dinfo->intr_en = false;
1253 		return (0);
1254 	}
1255 
1256 	/* Have one interrupt */
1257 	dinfo->intr_en = true;
1258 	dinfo->intr.flag = SIBA_REG_GET(tpsflag, TPS_NUM0);
1259 	dinfo->intr.mapped = false;
1260 	dinfo->intr.irq = 0;
1261 	dinfo->intr.rid = -1;
1262 
1263 	/* Map the interrupt */
1264 	error = BHND_BUS_MAP_INTR(dev, child, 0 /* single intr is always 0 */,
1265 	    &dinfo->intr.irq);
1266 	if (error) {
1267 		device_printf(dev, "failed mapping interrupt line for core %u: "
1268 		    "%d\n", dinfo->core_id.core_info.core_idx, error);
1269 		return (error);
1270 	}
1271 	dinfo->intr.mapped = true;
1272 
1273 	/* Update the resource list */
1274 	dinfo->intr.rid = resource_list_add_next(&dinfo->resources, SYS_RES_IRQ,
1275 	    dinfo->intr.irq, dinfo->intr.irq, 1);
1276 
1277 	return (0);
1278 }
1279 
1280 /**
1281  * Map per-core configuration blocks for @p dinfo.
1282  *
1283  * @param dev The siba bus device.
1284  * @param dinfo The device info instance on which to map all per-core
1285  * configuration blocks.
1286  */
1287 static int
1288 siba_map_cfg_resources(device_t dev, struct siba_devinfo *dinfo)
1289 {
1290 	struct siba_addrspace	*addrspace;
1291 	rman_res_t		 r_start, r_count, r_end;
1292 	uint8_t			 num_cfg;
1293 	int			 rid;
1294 
1295 	num_cfg = dinfo->core_id.num_cfg_blocks;
1296 	if (num_cfg > SIBA_MAX_CFG) {
1297 		device_printf(dev, "config block count %hhu out of range\n",
1298 		    num_cfg);
1299 		return (ENXIO);
1300 	}
1301 
1302 	/* Fetch the core register address space */
1303 	addrspace = siba_find_addrspace(dinfo, BHND_PORT_DEVICE, 0, 0);
1304 	if (addrspace == NULL) {
1305 		device_printf(dev, "missing device registers\n");
1306 		return (ENXIO);
1307 	}
1308 
1309 	/*
1310 	 * Map the per-core configuration blocks
1311 	 */
1312 	for (uint8_t i = 0; i < num_cfg; i++) {
1313 		/* Add to child's resource list */
1314 		r_start = addrspace->sa_base + SIBA_CFG_OFFSET(i);
1315 		r_count = SIBA_CFG_SIZE;
1316 		r_end = r_start + r_count - 1;
1317 
1318 		rid = resource_list_add_next(&dinfo->resources, SYS_RES_MEMORY,
1319 		    r_start, r_end, r_count);
1320 
1321 		/* Initialize config block descriptor */
1322 		dinfo->cfg[i] = ((struct siba_cfg_block) {
1323 			.cb_base = r_start,
1324 			.cb_size = SIBA_CFG_SIZE,
1325 			.cb_rid = rid
1326 		});
1327 
1328 		/* Map the config resource for bus-level access */
1329 		dinfo->cfg_rid[i] = SIBA_CFG_RID(dinfo, i);
1330 		dinfo->cfg_res[i] = BHND_BUS_ALLOC_RESOURCE(dev, dev,
1331 		    SYS_RES_MEMORY, &dinfo->cfg_rid[i], r_start, r_end,
1332 		    r_count, RF_ACTIVE|RF_SHAREABLE);
1333 
1334 		if (dinfo->cfg_res[i] == NULL) {
1335 			device_printf(dev, "failed to allocate SIBA_CFG%hhu\n",
1336 			    i);
1337 			return (ENXIO);
1338 		}
1339 	}
1340 
1341 	return (0);
1342 }
1343 
1344 static device_t
1345 siba_add_child(device_t dev, u_int order, const char *name, int unit)
1346 {
1347 	struct siba_devinfo	*dinfo;
1348 	device_t		 child;
1349 
1350 	child = device_add_child_ordered(dev, order, name, unit);
1351 	if (child == NULL)
1352 		return (NULL);
1353 
1354 	if ((dinfo = siba_alloc_dinfo(dev)) == NULL) {
1355 		device_delete_child(dev, child);
1356 		return (NULL);
1357 	}
1358 
1359 	device_set_ivars(child, dinfo);
1360 
1361 	return (child);
1362 }
1363 
1364 static void
1365 siba_child_deleted(device_t dev, device_t child)
1366 {
1367 	struct bhnd_softc	*sc;
1368 	struct siba_devinfo	*dinfo;
1369 
1370 	sc = device_get_softc(dev);
1371 
1372 	/* Call required bhnd(4) implementation */
1373 	bhnd_generic_child_deleted(dev, child);
1374 
1375 	/* Free siba device info */
1376 	if ((dinfo = device_get_ivars(child)) != NULL)
1377 		siba_free_dinfo(dev, child, dinfo);
1378 
1379 	device_set_ivars(child, NULL);
1380 }
1381 
1382 /**
1383  * Scan the core table and add all valid discovered cores to
1384  * the bus.
1385  *
1386  * @param dev The siba bus device.
1387  */
1388 int
1389 siba_add_children(device_t dev)
1390 {
1391 	const struct bhnd_chipid	*chipid;
1392 	struct siba_core_id		*cores;
1393 	struct bhnd_resource		*r;
1394 	device_t			*children;
1395 	int				 rid;
1396 	int				 error;
1397 
1398 	cores = NULL;
1399 	r = NULL;
1400 
1401 	chipid = BHND_BUS_GET_CHIPID(dev, dev);
1402 
1403 	/* Allocate our temporary core and device table */
1404 	cores = malloc(sizeof(*cores) * chipid->ncores, M_BHND, M_WAITOK);
1405 	children = malloc(sizeof(*children) * chipid->ncores, M_BHND,
1406 	    M_WAITOK | M_ZERO);
1407 
1408 	/*
1409 	 * Add child devices for all discovered cores.
1410 	 *
1411 	 * On bridged devices, we'll exhaust our available register windows if
1412 	 * we map config blocks on unpopulated/disabled cores. To avoid this, we
1413 	 * defer mapping of the per-core siba(4) config blocks until all cores
1414 	 * have been enumerated and otherwise configured.
1415 	 */
1416 	for (u_int i = 0; i < chipid->ncores; i++) {
1417 		struct siba_devinfo	*dinfo;
1418 		device_t		 child;
1419 		uint32_t		 idhigh, idlow;
1420 		rman_res_t		 r_count, r_end, r_start;
1421 
1422 		/* Map the core's register block */
1423 		rid = 0;
1424 		r_start = SIBA_CORE_ADDR(i);
1425 		r_count = SIBA_CORE_SIZE;
1426 		r_end = r_start + SIBA_CORE_SIZE - 1;
1427 		r = bhnd_alloc_resource(dev, SYS_RES_MEMORY, &rid, r_start,
1428 		    r_end, r_count, RF_ACTIVE);
1429 		if (r == NULL) {
1430 			error = ENXIO;
1431 			goto failed;
1432 		}
1433 
1434 		/* Read the core info */
1435 		idhigh = bhnd_bus_read_4(r, SB0_REG_ABS(SIBA_CFG0_IDHIGH));
1436 		idlow = bhnd_bus_read_4(r, SB0_REG_ABS(SIBA_CFG0_IDLOW));
1437 
1438 		cores[i] = siba_parse_core_id(idhigh, idlow, i, 0);
1439 
1440 		/* Determine and set unit number */
1441 		for (u_int j = 0; j < i; j++) {
1442 			struct bhnd_core_info *cur = &cores[i].core_info;
1443 			struct bhnd_core_info *prev = &cores[j].core_info;
1444 
1445 			if (prev->vendor == cur->vendor &&
1446 			    prev->device == cur->device)
1447 				cur->unit++;
1448 		}
1449 
1450 		/* Add the child device */
1451 		child = BUS_ADD_CHILD(dev, 0, NULL, -1);
1452 		if (child == NULL) {
1453 			error = ENXIO;
1454 			goto failed;
1455 		}
1456 
1457 		children[i] = child;
1458 
1459 		/* Initialize per-device bus info */
1460 		if ((dinfo = device_get_ivars(child)) == NULL) {
1461 			error = ENXIO;
1462 			goto failed;
1463 		}
1464 
1465 		if ((error = siba_init_dinfo(dev, dinfo, &cores[i])))
1466 			goto failed;
1467 
1468 		/* Register the core's address space(s). */
1469 		if ((error = siba_register_addrspaces(dev, dinfo, r)))
1470 			goto failed;
1471 
1472 		/* Register the core's interrupts */
1473 		if ((error = siba_register_interrupts(dev, child, dinfo, r)))
1474 			goto failed;
1475 
1476 		/* Unmap the core's register block */
1477 		bhnd_release_resource(dev, SYS_RES_MEMORY, rid, r);
1478 		r = NULL;
1479 
1480 		/* If pins are floating or the hardware is otherwise
1481 		 * unpopulated, the device shouldn't be used. */
1482 		if (bhnd_is_hw_disabled(child))
1483 			device_disable(child);
1484 	}
1485 
1486 	/* Map all valid core's config register blocks and perform interrupt
1487 	 * assignment */
1488 	for (u_int i = 0; i < chipid->ncores; i++) {
1489 		struct siba_devinfo	*dinfo;
1490 		device_t		 child;
1491 
1492 		child = children[i];
1493 
1494 		/* Skip if core is disabled */
1495 		if (bhnd_is_hw_disabled(child))
1496 			continue;
1497 
1498 		dinfo = device_get_ivars(child);
1499 
1500 		/* Map the core's config blocks */
1501 		if ((error = siba_map_cfg_resources(dev, dinfo)))
1502 			goto failed;
1503 
1504 		/* Issue bus callback for fully initialized child. */
1505 		BHND_BUS_CHILD_ADDED(dev, child);
1506 	}
1507 
1508 	free(cores, M_BHND);
1509 	free(children, M_BHND);
1510 
1511 	return (0);
1512 
1513 failed:
1514 	for (u_int i = 0; i < chipid->ncores; i++) {
1515 		if (children[i] == NULL)
1516 			continue;
1517 
1518 		device_delete_child(dev, children[i]);
1519 	}
1520 
1521 	free(cores, M_BHND);
1522 	free(children, M_BHND);
1523 
1524 	if (r != NULL)
1525 		bhnd_release_resource(dev, SYS_RES_MEMORY, rid, r);
1526 
1527 	return (error);
1528 }
1529 
1530 static device_method_t siba_methods[] = {
1531 	/* Device interface */
1532 	DEVMETHOD(device_probe,			siba_probe),
1533 	DEVMETHOD(device_attach,		siba_attach),
1534 	DEVMETHOD(device_detach,		siba_detach),
1535 	DEVMETHOD(device_resume,		siba_resume),
1536 	DEVMETHOD(device_suspend,		siba_suspend),
1537 
1538 	/* Bus interface */
1539 	DEVMETHOD(bus_add_child,		siba_add_child),
1540 	DEVMETHOD(bus_child_deleted,		siba_child_deleted),
1541 	DEVMETHOD(bus_read_ivar,		siba_read_ivar),
1542 	DEVMETHOD(bus_write_ivar,		siba_write_ivar),
1543 	DEVMETHOD(bus_get_resource_list,	siba_get_resource_list),
1544 
1545 	/* BHND interface */
1546 	DEVMETHOD(bhnd_bus_get_erom_class,	siba_get_erom_class),
1547 	DEVMETHOD(bhnd_bus_alloc_pmu,		siba_alloc_pmu),
1548 	DEVMETHOD(bhnd_bus_release_pmu,		siba_release_pmu),
1549 	DEVMETHOD(bhnd_bus_request_clock,	siba_request_clock),
1550 	DEVMETHOD(bhnd_bus_enable_clocks,	siba_enable_clocks),
1551 	DEVMETHOD(bhnd_bus_request_ext_rsrc,	siba_request_ext_rsrc),
1552 	DEVMETHOD(bhnd_bus_release_ext_rsrc,	siba_release_ext_rsrc),
1553 	DEVMETHOD(bhnd_bus_get_clock_freq,	siba_get_clock_freq),
1554 	DEVMETHOD(bhnd_bus_get_clock_latency,	siba_get_clock_latency),
1555 	DEVMETHOD(bhnd_bus_read_ioctl,		siba_read_ioctl),
1556 	DEVMETHOD(bhnd_bus_write_ioctl,		siba_write_ioctl),
1557 	DEVMETHOD(bhnd_bus_read_iost,		siba_read_iost),
1558 	DEVMETHOD(bhnd_bus_is_hw_suspended,	siba_is_hw_suspended),
1559 	DEVMETHOD(bhnd_bus_reset_hw,		siba_reset_hw),
1560 	DEVMETHOD(bhnd_bus_suspend_hw,		siba_suspend_hw),
1561 	DEVMETHOD(bhnd_bus_read_config,		siba_read_config),
1562 	DEVMETHOD(bhnd_bus_write_config,	siba_write_config),
1563 	DEVMETHOD(bhnd_bus_get_port_count,	siba_get_port_count),
1564 	DEVMETHOD(bhnd_bus_get_region_count,	siba_get_region_count),
1565 	DEVMETHOD(bhnd_bus_get_port_rid,	siba_get_port_rid),
1566 	DEVMETHOD(bhnd_bus_decode_port_rid,	siba_decode_port_rid),
1567 	DEVMETHOD(bhnd_bus_get_region_addr,	siba_get_region_addr),
1568 	DEVMETHOD(bhnd_bus_get_intr_count,	siba_get_intr_count),
1569 	DEVMETHOD(bhnd_bus_get_intr_ivec,	siba_get_intr_ivec),
1570 
1571 	DEVMETHOD_END
1572 };
1573 
1574 DEFINE_CLASS_1(bhnd, siba_driver, siba_methods, sizeof(struct siba_softc), bhnd_driver);
1575 
1576 MODULE_VERSION(siba, 1);
1577 MODULE_DEPEND(siba, bhnd, 1, 1, 1);
1578