xref: /freebsd/sys/dev/bhnd/siba/siba.c (revision dd21556857e8d40f66bf5ad54754d9d52669ebf7)
1 /*-
2  * Copyright (c) 2015-2016 Landon Fuller <landon@landonf.org>
3  * Copyright (c) 2017 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Landon Fuller
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer,
14  *    without modification.
15  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
16  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
17  *    redistribution must be conditioned upon including a substantially
18  *    similar Disclaimer requirement for further binary redistribution.
19  *
20  * NO WARRANTY
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
24  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
25  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
26  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
29  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGES.
32  */
33 
34 #include <sys/param.h>
35 #include <sys/bus.h>
36 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/module.h>
39 #include <sys/refcount.h>
40 #include <sys/systm.h>
41 
42 #include <machine/bus.h>
43 
44 #include <dev/bhnd/cores/chipc/chipc.h>
45 #include <dev/bhnd/cores/chipc/pwrctl/bhnd_pwrctl.h>
46 
47 #include "siba_eromvar.h"
48 
49 #include "sibareg.h"
50 #include "sibavar.h"
51 
52 /* RID used when allocating EROM resources */
53 #define	SIBA_EROM_RID	0
54 
55 static bhnd_erom_class_t *
56 siba_get_erom_class(driver_t *driver)
57 {
58 	return (&siba_erom_parser);
59 }
60 
61 int
62 siba_probe(device_t dev)
63 {
64 	device_set_desc(dev, "SIBA BHND bus");
65 	return (BUS_PROBE_DEFAULT);
66 }
67 
68 /**
69  * Default siba(4) bus driver implementation of DEVICE_ATTACH().
70  *
71  * This implementation initializes internal siba(4) state and performs
72  * bus enumeration, and must be called by subclassing drivers in
73  * DEVICE_ATTACH() before any other bus methods.
74  */
75 int
76 siba_attach(device_t dev)
77 {
78 	struct siba_softc	*sc;
79 	int			 error;
80 
81 	sc = device_get_softc(dev);
82 	sc->dev = dev;
83 
84 	SIBA_LOCK_INIT(sc);
85 
86 	/* Enumerate children */
87 	if ((error = siba_add_children(dev))) {
88 		SIBA_LOCK_DESTROY(sc);
89 		return (error);
90 	}
91 
92 	return (0);
93 }
94 
95 int
96 siba_detach(device_t dev)
97 {
98 	struct siba_softc	*sc;
99 	int			 error;
100 
101 	sc = device_get_softc(dev);
102 
103 	if ((error = bhnd_generic_detach(dev)))
104 		return (error);
105 
106 	SIBA_LOCK_DESTROY(sc);
107 
108 	return (0);
109 }
110 
111 int
112 siba_resume(device_t dev)
113 {
114 	return (bhnd_generic_resume(dev));
115 }
116 
117 int
118 siba_suspend(device_t dev)
119 {
120 	return (bhnd_generic_suspend(dev));
121 }
122 
123 static int
124 siba_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
125 {
126 	struct siba_softc		*sc;
127 	const struct siba_devinfo	*dinfo;
128 	const struct bhnd_core_info	*cfg;
129 
130 	sc = device_get_softc(dev);
131 	dinfo = device_get_ivars(child);
132 	cfg = &dinfo->core_id.core_info;
133 
134 	switch (index) {
135 	case BHND_IVAR_VENDOR:
136 		*result = cfg->vendor;
137 		return (0);
138 	case BHND_IVAR_DEVICE:
139 		*result = cfg->device;
140 		return (0);
141 	case BHND_IVAR_HWREV:
142 		*result = cfg->hwrev;
143 		return (0);
144 	case BHND_IVAR_DEVICE_CLASS:
145 		*result = bhnd_core_class(cfg);
146 		return (0);
147 	case BHND_IVAR_VENDOR_NAME:
148 		*result = (uintptr_t) bhnd_vendor_name(cfg->vendor);
149 		return (0);
150 	case BHND_IVAR_DEVICE_NAME:
151 		*result = (uintptr_t) bhnd_core_name(cfg);
152 		return (0);
153 	case BHND_IVAR_CORE_INDEX:
154 		*result = cfg->core_idx;
155 		return (0);
156 	case BHND_IVAR_CORE_UNIT:
157 		*result = cfg->unit;
158 		return (0);
159 	case BHND_IVAR_PMU_INFO:
160 		SIBA_LOCK(sc);
161 		switch (dinfo->pmu_state) {
162 		case SIBA_PMU_NONE:
163 			*result = (uintptr_t)NULL;
164 			SIBA_UNLOCK(sc);
165 			return (0);
166 
167 		case SIBA_PMU_BHND:
168 			*result = (uintptr_t)dinfo->pmu.bhnd_info;
169 			SIBA_UNLOCK(sc);
170 			return (0);
171 
172 		case SIBA_PMU_PWRCTL:
173 		case SIBA_PMU_FIXED:
174 			*result = (uintptr_t)NULL;
175 			SIBA_UNLOCK(sc);
176 			return (0);
177 		}
178 
179 		panic("invalid PMU state: %d", dinfo->pmu_state);
180 		return (ENXIO);
181 
182 	default:
183 		return (ENOENT);
184 	}
185 }
186 
187 static int
188 siba_write_ivar(device_t dev, device_t child, int index, uintptr_t value)
189 {
190 	struct siba_softc	*sc;
191 	struct siba_devinfo	*dinfo;
192 
193 	sc = device_get_softc(dev);
194 	dinfo = device_get_ivars(child);
195 
196 	switch (index) {
197 	case BHND_IVAR_VENDOR:
198 	case BHND_IVAR_DEVICE:
199 	case BHND_IVAR_HWREV:
200 	case BHND_IVAR_DEVICE_CLASS:
201 	case BHND_IVAR_VENDOR_NAME:
202 	case BHND_IVAR_DEVICE_NAME:
203 	case BHND_IVAR_CORE_INDEX:
204 	case BHND_IVAR_CORE_UNIT:
205 		return (EINVAL);
206 	case BHND_IVAR_PMU_INFO:
207 		SIBA_LOCK(sc);
208 		switch (dinfo->pmu_state) {
209 		case SIBA_PMU_NONE:
210 		case SIBA_PMU_BHND:
211 			dinfo->pmu.bhnd_info = (void *)value;
212 			dinfo->pmu_state = SIBA_PMU_BHND;
213 			SIBA_UNLOCK(sc);
214 			return (0);
215 
216 		case SIBA_PMU_PWRCTL:
217 		case SIBA_PMU_FIXED:
218 			panic("bhnd_set_pmu_info() called with siba PMU state "
219 			    "%d", dinfo->pmu_state);
220 			return (ENXIO);
221 		}
222 
223 		panic("invalid PMU state: %d", dinfo->pmu_state);
224 		return (ENXIO);
225 
226 	default:
227 		return (ENOENT);
228 	}
229 }
230 
231 static struct resource_list *
232 siba_get_resource_list(device_t dev, device_t child)
233 {
234 	struct siba_devinfo *dinfo = device_get_ivars(child);
235 	return (&dinfo->resources);
236 }
237 
238 /* BHND_BUS_ALLOC_PMU() */
239 static int
240 siba_alloc_pmu(device_t dev, device_t child)
241 {
242 	struct siba_softc	*sc;
243 	struct siba_devinfo	*dinfo;
244 	device_t		 chipc;
245 	device_t		 pwrctl;
246 	struct chipc_caps	 ccaps;
247 	siba_pmu_state		 pmu_state;
248 	int			 error;
249 
250 	if (device_get_parent(child) != dev)
251 		return (EINVAL);
252 
253 	sc = device_get_softc(dev);
254 	dinfo = device_get_ivars(child);
255 	pwrctl = NULL;
256 
257 	/* Fetch ChipCommon capability flags */
258 	chipc = bhnd_retain_provider(child, BHND_SERVICE_CHIPC);
259 	if (chipc != NULL) {
260 		ccaps = *BHND_CHIPC_GET_CAPS(chipc);
261 		bhnd_release_provider(child, chipc, BHND_SERVICE_CHIPC);
262 	} else {
263 		memset(&ccaps, 0, sizeof(ccaps));
264 	}
265 
266 	/* Defer to bhnd(4)'s PMU implementation if ChipCommon exists and
267 	 * advertises PMU support */
268 	if (ccaps.pmu) {
269 		if ((error = bhnd_generic_alloc_pmu(dev, child)))
270 			return (error);
271 
272 		KASSERT(dinfo->pmu_state == SIBA_PMU_BHND,
273 		    ("unexpected PMU state: %d", dinfo->pmu_state));
274 
275 		return (0);
276 	}
277 
278 	/*
279 	 * This is either a legacy PWRCTL chipset, or the device does not
280 	 * support dynamic clock control.
281 	 *
282 	 * We need to map all bhnd(4) bus PMU to PWRCTL or no-op operations.
283 	 */
284 	if (ccaps.pwr_ctrl) {
285 		pmu_state = SIBA_PMU_PWRCTL;
286 		pwrctl = bhnd_retain_provider(child, BHND_SERVICE_PWRCTL);
287 		if (pwrctl == NULL) {
288 			device_printf(dev, "PWRCTL not found\n");
289 			return (ENODEV);
290 		}
291 	} else {
292 		pmu_state = SIBA_PMU_FIXED;
293 		pwrctl = NULL;
294 	}
295 
296 	SIBA_LOCK(sc);
297 
298 	/* Per-core PMU state already allocated? */
299 	if (dinfo->pmu_state != SIBA_PMU_NONE) {
300 		panic("duplicate PMU allocation for %s",
301 		    device_get_nameunit(child));
302 	}
303 
304 	/* Update the child's PMU allocation state, and transfer ownership of
305 	 * the PWRCTL provider reference (if any) */
306 	dinfo->pmu_state = pmu_state;
307 	dinfo->pmu.pwrctl = pwrctl;
308 
309 	SIBA_UNLOCK(sc);
310 
311 	return (0);
312 }
313 
314 /* BHND_BUS_RELEASE_PMU() */
315 static int
316 siba_release_pmu(device_t dev, device_t child)
317 {
318 	struct siba_softc	*sc;
319 	struct siba_devinfo	*dinfo;
320 	device_t		 pwrctl;
321 	int			 error;
322 
323 	if (device_get_parent(child) != dev)
324 		return (EINVAL);
325 
326 	sc = device_get_softc(dev);
327 	dinfo = device_get_ivars(child);
328 
329 	SIBA_LOCK(sc);
330 	switch(dinfo->pmu_state) {
331 	case SIBA_PMU_NONE:
332 		panic("pmu over-release for %s", device_get_nameunit(child));
333 		SIBA_UNLOCK(sc);
334 		return (ENXIO);
335 
336 	case SIBA_PMU_BHND:
337 		SIBA_UNLOCK(sc);
338 		return (bhnd_generic_release_pmu(dev, child));
339 
340 	case SIBA_PMU_PWRCTL:
341 		/* Requesting BHND_CLOCK_DYN releases any outstanding clock
342 		 * reservations */
343 		pwrctl = dinfo->pmu.pwrctl;
344 		error = bhnd_pwrctl_request_clock(pwrctl, child,
345 		    BHND_CLOCK_DYN);
346 		if (error) {
347 			SIBA_UNLOCK(sc);
348 			return (error);
349 		}
350 
351 		/* Clean up the child's PMU state */
352 		dinfo->pmu_state = SIBA_PMU_NONE;
353 		dinfo->pmu.pwrctl = NULL;
354 		SIBA_UNLOCK(sc);
355 
356 		/* Release the provider reference */
357 		bhnd_release_provider(child, pwrctl, BHND_SERVICE_PWRCTL);
358 		return (0);
359 
360 	case SIBA_PMU_FIXED:
361 		/* Clean up the child's PMU state */
362 		KASSERT(dinfo->pmu.pwrctl == NULL,
363 		    ("PWRCTL reference with FIXED state"));
364 
365 		dinfo->pmu_state = SIBA_PMU_NONE;
366 		dinfo->pmu.pwrctl = NULL;
367 		SIBA_UNLOCK(sc);
368 	}
369 
370 	panic("invalid PMU state: %d", dinfo->pmu_state);
371 }
372 
373 /* BHND_BUS_GET_CLOCK_LATENCY() */
374 static int
375 siba_get_clock_latency(device_t dev, device_t child, bhnd_clock clock,
376     u_int *latency)
377 {
378 	struct siba_softc	*sc;
379 	struct siba_devinfo	*dinfo;
380 	int			 error;
381 
382 	if (device_get_parent(child) != dev)
383 		return (EINVAL);
384 
385 	sc = device_get_softc(dev);
386 	dinfo = device_get_ivars(child);
387 
388 	SIBA_LOCK(sc);
389 	switch(dinfo->pmu_state) {
390 	case SIBA_PMU_NONE:
391 		panic("no active PMU request state");
392 
393 		SIBA_UNLOCK(sc);
394 		return (ENXIO);
395 
396 	case SIBA_PMU_BHND:
397 		SIBA_UNLOCK(sc);
398 		return (bhnd_generic_get_clock_latency(dev, child, clock,
399 		    latency));
400 
401 	case SIBA_PMU_PWRCTL:
402 		 error = bhnd_pwrctl_get_clock_latency(dinfo->pmu.pwrctl, clock,
403 		    latency);
404 		 SIBA_UNLOCK(sc);
405 
406 		 return (error);
407 
408 	case SIBA_PMU_FIXED:
409 		SIBA_UNLOCK(sc);
410 
411 		/* HT clock is always available, and incurs no transition
412 		 * delay. */
413 		switch (clock) {
414 		case BHND_CLOCK_HT:
415 			*latency = 0;
416 			return (0);
417 
418 		default:
419 			return (ENODEV);
420 		}
421 
422 		return (ENODEV);
423 	}
424 
425 	panic("invalid PMU state: %d", dinfo->pmu_state);
426 }
427 
428 /* BHND_BUS_GET_CLOCK_FREQ() */
429 static int
430 siba_get_clock_freq(device_t dev, device_t child, bhnd_clock clock,
431     u_int *freq)
432 {
433 	struct siba_softc	*sc;
434 	struct siba_devinfo	*dinfo;
435 	int			 error;
436 
437 	if (device_get_parent(child) != dev)
438 		return (EINVAL);
439 
440 	sc = device_get_softc(dev);
441 	dinfo = device_get_ivars(child);
442 
443 	SIBA_LOCK(sc);
444 	switch(dinfo->pmu_state) {
445 	case SIBA_PMU_NONE:
446 		panic("no active PMU request state");
447 
448 		SIBA_UNLOCK(sc);
449 		return (ENXIO);
450 
451 	case SIBA_PMU_BHND:
452 		SIBA_UNLOCK(sc);
453 		return (bhnd_generic_get_clock_freq(dev, child, clock, freq));
454 
455 	case SIBA_PMU_PWRCTL:
456 		error = bhnd_pwrctl_get_clock_freq(dinfo->pmu.pwrctl, clock,
457 		    freq);
458 		SIBA_UNLOCK(sc);
459 
460 		return (error);
461 
462 	case SIBA_PMU_FIXED:
463 		SIBA_UNLOCK(sc);
464 
465 		return (ENODEV);
466 	}
467 
468 	panic("invalid PMU state: %d", dinfo->pmu_state);
469 }
470 
471 /* BHND_BUS_REQUEST_EXT_RSRC() */
472 static int
473 siba_request_ext_rsrc(device_t dev, device_t child, u_int rsrc)
474 {
475 	struct siba_softc	*sc;
476 	struct siba_devinfo	*dinfo;
477 
478 	if (device_get_parent(child) != dev)
479 		return (EINVAL);
480 
481 	sc = device_get_softc(dev);
482 	dinfo = device_get_ivars(child);
483 
484 	SIBA_LOCK(sc);
485 	switch(dinfo->pmu_state) {
486 	case SIBA_PMU_NONE:
487 		panic("no active PMU request state");
488 
489 		SIBA_UNLOCK(sc);
490 		return (ENXIO);
491 
492 	case SIBA_PMU_BHND:
493 		SIBA_UNLOCK(sc);
494 		return (bhnd_generic_request_ext_rsrc(dev, child, rsrc));
495 
496 	case SIBA_PMU_PWRCTL:
497 	case SIBA_PMU_FIXED:
498 		/* HW does not support per-core external resources */
499 		SIBA_UNLOCK(sc);
500 		return (ENODEV);
501 	}
502 
503 	panic("invalid PMU state: %d", dinfo->pmu_state);
504 }
505 
506 /* BHND_BUS_RELEASE_EXT_RSRC() */
507 static int
508 siba_release_ext_rsrc(device_t dev, device_t child, u_int rsrc)
509 {
510 	struct siba_softc	*sc;
511 	struct siba_devinfo	*dinfo;
512 
513 	if (device_get_parent(child) != dev)
514 		return (EINVAL);
515 
516 	sc = device_get_softc(dev);
517 	dinfo = device_get_ivars(child);
518 
519 	SIBA_LOCK(sc);
520 	switch(dinfo->pmu_state) {
521 	case SIBA_PMU_NONE:
522 		panic("no active PMU request state");
523 
524 		SIBA_UNLOCK(sc);
525 		return (ENXIO);
526 
527 	case SIBA_PMU_BHND:
528 		SIBA_UNLOCK(sc);
529 		return (bhnd_generic_release_ext_rsrc(dev, child, rsrc));
530 
531 	case SIBA_PMU_PWRCTL:
532 	case SIBA_PMU_FIXED:
533 		/* HW does not support per-core external resources */
534 		SIBA_UNLOCK(sc);
535 		return (ENODEV);
536 	}
537 
538 	panic("invalid PMU state: %d", dinfo->pmu_state);
539 }
540 
541 /* BHND_BUS_REQUEST_CLOCK() */
542 static int
543 siba_request_clock(device_t dev, device_t child, bhnd_clock clock)
544 {
545 	struct siba_softc	*sc;
546 	struct siba_devinfo	*dinfo;
547 	int			 error;
548 
549 	if (device_get_parent(child) != dev)
550 		return (EINVAL);
551 
552 	sc = device_get_softc(dev);
553 	dinfo = device_get_ivars(child);
554 
555 	SIBA_LOCK(sc);
556 	switch(dinfo->pmu_state) {
557 	case SIBA_PMU_NONE:
558 		panic("no active PMU request state");
559 
560 		SIBA_UNLOCK(sc);
561 		return (ENXIO);
562 
563 	case SIBA_PMU_BHND:
564 		SIBA_UNLOCK(sc);
565 		return (bhnd_generic_request_clock(dev, child, clock));
566 
567 	case SIBA_PMU_PWRCTL:
568 		error = bhnd_pwrctl_request_clock(dinfo->pmu.pwrctl, child,
569 		    clock);
570 		SIBA_UNLOCK(sc);
571 
572 		return (error);
573 
574 	case SIBA_PMU_FIXED:
575 		SIBA_UNLOCK(sc);
576 
577 		/* HT clock is always available, and fulfills any of the
578 		 * following clock requests */
579 		switch (clock) {
580 		case BHND_CLOCK_DYN:
581 		case BHND_CLOCK_ILP:
582 		case BHND_CLOCK_ALP:
583 		case BHND_CLOCK_HT:
584 			return (0);
585 
586 		default:
587 			return (ENODEV);
588 		}
589 	}
590 
591 	panic("invalid PMU state: %d", dinfo->pmu_state);
592 }
593 
594 /* BHND_BUS_ENABLE_CLOCKS() */
595 static int
596 siba_enable_clocks(device_t dev, device_t child, uint32_t clocks)
597 {
598 	struct siba_softc	*sc;
599 	struct siba_devinfo	*dinfo;
600 
601 	if (device_get_parent(child) != dev)
602 		return (EINVAL);
603 
604 	sc = device_get_softc(dev);
605 	dinfo = device_get_ivars(child);
606 
607 	SIBA_LOCK(sc);
608 	switch(dinfo->pmu_state) {
609 	case SIBA_PMU_NONE:
610 		panic("no active PMU request state");
611 
612 		SIBA_UNLOCK(sc);
613 		return (ENXIO);
614 
615 	case SIBA_PMU_BHND:
616 		SIBA_UNLOCK(sc);
617 		return (bhnd_generic_enable_clocks(dev, child, clocks));
618 
619 	case SIBA_PMU_PWRCTL:
620 	case SIBA_PMU_FIXED:
621 		SIBA_UNLOCK(sc);
622 
623 		/* All (supported) clocks are already enabled by default */
624 		clocks &= ~(BHND_CLOCK_DYN |
625 			    BHND_CLOCK_ILP |
626 			    BHND_CLOCK_ALP |
627 			    BHND_CLOCK_HT);
628 
629 		if (clocks != 0) {
630 			device_printf(dev, "%s requested unknown clocks: %#x\n",
631 			    device_get_nameunit(child), clocks);
632 			return (ENODEV);
633 		}
634 
635 		return (0);
636 	}
637 
638 	panic("invalid PMU state: %d", dinfo->pmu_state);
639 }
640 
641 static int
642 siba_read_iost(device_t dev, device_t child, uint16_t *iost)
643 {
644 	uint32_t	tmhigh;
645 	int		error;
646 
647 	error = bhnd_read_config(child, SIBA_CFG0_TMSTATEHIGH, &tmhigh, 4);
648 	if (error)
649 		return (error);
650 
651 	*iost = (SIBA_REG_GET(tmhigh, TMH_SISF));
652 	return (0);
653 }
654 
655 static int
656 siba_read_ioctl(device_t dev, device_t child, uint16_t *ioctl)
657 {
658 	uint32_t	ts_low;
659 	int		error;
660 
661 	if ((error = bhnd_read_config(child, SIBA_CFG0_TMSTATELOW, &ts_low, 4)))
662 		return (error);
663 
664 	*ioctl = (SIBA_REG_GET(ts_low, TML_SICF));
665 	return (0);
666 }
667 
668 static int
669 siba_write_ioctl(device_t dev, device_t child, uint16_t value, uint16_t mask)
670 {
671 	struct siba_devinfo	*dinfo;
672 	struct bhnd_resource	*r;
673 	uint32_t		 ts_low, ts_mask;
674 
675 	if (device_get_parent(child) != dev)
676 		return (EINVAL);
677 
678 	/* Fetch CFG0 mapping */
679 	dinfo = device_get_ivars(child);
680 	if ((r = dinfo->cfg_res[0]) == NULL)
681 		return (ENODEV);
682 
683 	/* Mask and set TMSTATELOW core flag bits */
684 	ts_mask = (mask << SIBA_TML_SICF_SHIFT) & SIBA_TML_SICF_MASK;
685 	ts_low = (value << SIBA_TML_SICF_SHIFT) & ts_mask;
686 
687 	siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
688 	    ts_low, ts_mask);
689 	return (0);
690 }
691 
692 static bool
693 siba_is_hw_suspended(device_t dev, device_t child)
694 {
695 	uint32_t		ts_low;
696 	uint16_t		ioctl;
697 	int			error;
698 
699 	/* Fetch target state */
700 	error = bhnd_read_config(child, SIBA_CFG0_TMSTATELOW, &ts_low, 4);
701 	if (error) {
702 		device_printf(child, "error reading HW reset state: %d\n",
703 		    error);
704 		return (true);
705 	}
706 
707 	/* Is core held in RESET? */
708 	if (ts_low & SIBA_TML_RESET)
709 		return (true);
710 
711 	/* Is target reject enabled? */
712 	if (ts_low & SIBA_TML_REJ_MASK)
713 		return (true);
714 
715 	/* Is core clocked? */
716 	ioctl = SIBA_REG_GET(ts_low, TML_SICF);
717 	if (!(ioctl & BHND_IOCTL_CLK_EN))
718 		return (true);
719 
720 	return (false);
721 }
722 
723 static int
724 siba_reset_hw(device_t dev, device_t child, uint16_t ioctl,
725     uint16_t reset_ioctl)
726 {
727 	struct siba_devinfo		*dinfo;
728 	struct bhnd_resource		*r;
729 	uint32_t			 ts_low, imstate;
730 	uint16_t			 clkflags;
731 	int				 error;
732 
733 	if (device_get_parent(child) != dev)
734 		return (EINVAL);
735 
736 	dinfo = device_get_ivars(child);
737 
738 	/* Can't suspend the core without access to the CFG0 registers */
739 	if ((r = dinfo->cfg_res[0]) == NULL)
740 		return (ENODEV);
741 
742 	/* We require exclusive control over BHND_IOCTL_CLK_(EN|FORCE) */
743 	clkflags = BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE;
744 	if (ioctl & clkflags)
745 		return (EINVAL);
746 
747 	/* Place core into known RESET state */
748 	if ((error = bhnd_suspend_hw(child, reset_ioctl)))
749 		return (error);
750 
751 	/* Set RESET, clear REJ, set the caller's IOCTL flags, and
752 	 * force clocks to ensure the signal propagates throughout the
753 	 * core. */
754 	ts_low = SIBA_TML_RESET |
755 		 (ioctl << SIBA_TML_SICF_SHIFT) |
756 		 (BHND_IOCTL_CLK_EN << SIBA_TML_SICF_SHIFT) |
757 		 (BHND_IOCTL_CLK_FORCE << SIBA_TML_SICF_SHIFT);
758 
759 	siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
760 	    ts_low, UINT32_MAX);
761 
762 	/* Clear any target errors */
763 	if (bhnd_bus_read_4(r, SIBA_CFG0_TMSTATEHIGH) & SIBA_TMH_SERR) {
764 		siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATEHIGH,
765 		    0x0, SIBA_TMH_SERR);
766 	}
767 
768 	/* Clear any initiator errors */
769 	imstate = bhnd_bus_read_4(r, SIBA_CFG0_IMSTATE);
770 	if (imstate & (SIBA_IM_IBE|SIBA_IM_TO)) {
771 		siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE, 0x0,
772 		    SIBA_IM_IBE|SIBA_IM_TO);
773 	}
774 
775 	/* Release from RESET while leaving clocks forced, ensuring the
776 	 * signal propagates throughout the core */
777 	siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 0x0,
778 	    SIBA_TML_RESET);
779 
780 	/* The core should now be active; we can clear the BHND_IOCTL_CLK_FORCE
781 	 * bit and allow the core to manage clock gating. */
782 	siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 0x0,
783 	    (BHND_IOCTL_CLK_FORCE << SIBA_TML_SICF_SHIFT));
784 
785 	return (0);
786 }
787 
788 static int
789 siba_suspend_hw(device_t dev, device_t child, uint16_t ioctl)
790 {
791 	struct siba_softc		*sc;
792 	struct siba_devinfo		*dinfo;
793 	struct bhnd_resource		*r;
794 	uint32_t			 idl, ts_low, ts_mask;
795 	uint16_t			 cflags, clkflags;
796 	int				 error;
797 
798 	if (device_get_parent(child) != dev)
799 		return (EINVAL);
800 
801 	sc = device_get_softc(dev);
802 	dinfo = device_get_ivars(child);
803 
804 	/* Can't suspend the core without access to the CFG0 registers */
805 	if ((r = dinfo->cfg_res[0]) == NULL)
806 		return (ENODEV);
807 
808 	/* We require exclusive control over BHND_IOCTL_CLK_(EN|FORCE) */
809 	clkflags = BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE;
810 	if (ioctl & clkflags)
811 		return (EINVAL);
812 
813 	/* Already in RESET? */
814 	ts_low = bhnd_bus_read_4(r, SIBA_CFG0_TMSTATELOW);
815 	if (ts_low & SIBA_TML_RESET)
816 		return (0);
817 
818 	/* If clocks are already disabled, we can place the core directly
819 	 * into RESET|REJ while setting the caller's IOCTL flags. */
820 	cflags = SIBA_REG_GET(ts_low, TML_SICF);
821 	if (!(cflags & BHND_IOCTL_CLK_EN)) {
822 		ts_low = SIBA_TML_RESET | SIBA_TML_REJ |
823 			 (ioctl << SIBA_TML_SICF_SHIFT);
824 		ts_mask = SIBA_TML_RESET | SIBA_TML_REJ | SIBA_TML_SICF_MASK;
825 
826 		siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
827 		    ts_low, ts_mask);
828 		return (0);
829 	}
830 
831 	/* Reject further transactions reaching this core */
832 	siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
833 	    SIBA_TML_REJ, SIBA_TML_REJ);
834 
835 	/* Wait for transaction busy flag to clear for all transactions
836 	 * initiated by this core */
837 	error = siba_wait_target_state(child, dinfo, SIBA_CFG0_TMSTATEHIGH,
838 	    0x0, SIBA_TMH_BUSY, 100000);
839 	if (error)
840 		return (error);
841 
842 	/* If this is an initiator core, we need to reject initiator
843 	 * transactions too. */
844 	idl = bhnd_bus_read_4(r, SIBA_CFG0_IDLOW);
845 	if (idl & SIBA_IDL_INIT) {
846 		/* Reject further initiator transactions */
847 		siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE,
848 		    SIBA_IM_RJ, SIBA_IM_RJ);
849 
850 		/* Wait for initiator busy flag to clear */
851 		error = siba_wait_target_state(child, dinfo, SIBA_CFG0_IMSTATE,
852 		    0x0, SIBA_IM_BY, 100000);
853 		if (error)
854 			return (error);
855 	}
856 
857 	/* Put the core into RESET, set the caller's IOCTL flags, and
858 	 * force clocks to ensure the RESET signal propagates throughout the
859 	 * core. */
860 	ts_low = SIBA_TML_RESET |
861 		 (ioctl << SIBA_TML_SICF_SHIFT) |
862 		 (BHND_IOCTL_CLK_EN << SIBA_TML_SICF_SHIFT) |
863 		 (BHND_IOCTL_CLK_FORCE << SIBA_TML_SICF_SHIFT);
864 	ts_mask = SIBA_TML_RESET |
865 		  SIBA_TML_SICF_MASK;
866 
867 	siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, ts_low,
868 	    ts_mask);
869 
870 	/* Give RESET ample time */
871 	DELAY(10);
872 
873 	/* Clear previously asserted initiator reject */
874 	if (idl & SIBA_IDL_INIT) {
875 		siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE, 0x0,
876 		    SIBA_IM_RJ);
877 	}
878 
879 	/* Disable all clocks, leaving RESET and REJ asserted */
880 	siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW, 0x0,
881 	    (BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE) << SIBA_TML_SICF_SHIFT);
882 
883 	/*
884 	 * Core is now in RESET.
885 	 *
886 	 * If the core holds any PWRCTL clock reservations, we need to release
887 	 * those now. This emulates the standard bhnd(4) PMU behavior of RESET
888 	 * automatically clearing clkctl
889 	 */
890 	SIBA_LOCK(sc);
891 	if (dinfo->pmu_state == SIBA_PMU_PWRCTL) {
892 		error = bhnd_pwrctl_request_clock(dinfo->pmu.pwrctl, child,
893 		    BHND_CLOCK_DYN);
894 		SIBA_UNLOCK(sc);
895 
896 		if (error) {
897 			device_printf(child, "failed to release clock request: "
898 			    "%d", error);
899 			return (error);
900 		}
901 
902 		return (0);
903 	} else {
904 		SIBA_UNLOCK(sc);
905 		return (0);
906 	}
907 }
908 
909 static int
910 siba_read_config(device_t dev, device_t child, bus_size_t offset, void *value,
911     u_int width)
912 {
913 	struct siba_devinfo	*dinfo;
914 	rman_res_t		 r_size;
915 
916 	/* Must be directly attached */
917 	if (device_get_parent(child) != dev)
918 		return (EINVAL);
919 
920 	/* CFG0 registers must be available */
921 	dinfo = device_get_ivars(child);
922 	if (dinfo->cfg_res[0] == NULL)
923 		return (ENODEV);
924 
925 	/* Offset must fall within CFG0 */
926 	r_size = rman_get_size(dinfo->cfg_res[0]->res);
927 	if (r_size < offset || r_size - offset < width)
928 		return (EFAULT);
929 
930 	switch (width) {
931 	case 1:
932 		*((uint8_t *)value) = bhnd_bus_read_1(dinfo->cfg_res[0],
933 		    offset);
934 		return (0);
935 	case 2:
936 		*((uint16_t *)value) = bhnd_bus_read_2(dinfo->cfg_res[0],
937 		    offset);
938 		return (0);
939 	case 4:
940 		*((uint32_t *)value) = bhnd_bus_read_4(dinfo->cfg_res[0],
941 		    offset);
942 		return (0);
943 	default:
944 		return (EINVAL);
945 	}
946 }
947 
948 static int
949 siba_write_config(device_t dev, device_t child, bus_size_t offset,
950     const void *value, u_int width)
951 {
952 	struct siba_devinfo	*dinfo;
953 	struct bhnd_resource	*r;
954 	rman_res_t		 r_size;
955 
956 	/* Must be directly attached */
957 	if (device_get_parent(child) != dev)
958 		return (EINVAL);
959 
960 	/* CFG0 registers must be available */
961 	dinfo = device_get_ivars(child);
962 	if ((r = dinfo->cfg_res[0]) == NULL)
963 		return (ENODEV);
964 
965 	/* Offset must fall within CFG0 */
966 	r_size = rman_get_size(r->res);
967 	if (r_size < offset || r_size - offset < width)
968 		return (EFAULT);
969 
970 	switch (width) {
971 	case 1:
972 		bhnd_bus_write_1(r, offset, *(const uint8_t *)value);
973 		return (0);
974 	case 2:
975 		bhnd_bus_write_2(r, offset, *(const uint8_t *)value);
976 		return (0);
977 	case 4:
978 		bhnd_bus_write_4(r, offset, *(const uint8_t *)value);
979 		return (0);
980 	default:
981 		return (EINVAL);
982 	}
983 }
984 
985 static u_int
986 siba_get_port_count(device_t dev, device_t child, bhnd_port_type type)
987 {
988 	struct siba_devinfo *dinfo;
989 
990 	/* delegate non-bus-attached devices to our parent */
991 	if (device_get_parent(child) != dev)
992 		return (BHND_BUS_GET_PORT_COUNT(device_get_parent(dev), child,
993 		    type));
994 
995 	dinfo = device_get_ivars(child);
996 	return (siba_port_count(&dinfo->core_id, type));
997 }
998 
999 static u_int
1000 siba_get_region_count(device_t dev, device_t child, bhnd_port_type type,
1001     u_int port)
1002 {
1003 	struct siba_devinfo	*dinfo;
1004 
1005 	/* delegate non-bus-attached devices to our parent */
1006 	if (device_get_parent(child) != dev)
1007 		return (BHND_BUS_GET_REGION_COUNT(device_get_parent(dev), child,
1008 		    type, port));
1009 
1010 	dinfo = device_get_ivars(child);
1011 	return (siba_port_region_count(&dinfo->core_id, type, port));
1012 }
1013 
1014 static int
1015 siba_get_port_rid(device_t dev, device_t child, bhnd_port_type port_type,
1016     u_int port_num, u_int region_num)
1017 {
1018 	struct siba_devinfo	*dinfo;
1019 	struct siba_addrspace	*addrspace;
1020 	struct siba_cfg_block	*cfg;
1021 
1022 	/* delegate non-bus-attached devices to our parent */
1023 	if (device_get_parent(child) != dev)
1024 		return (BHND_BUS_GET_PORT_RID(device_get_parent(dev), child,
1025 		    port_type, port_num, region_num));
1026 
1027 	dinfo = device_get_ivars(child);
1028 
1029 	/* Look for a matching addrspace entry */
1030 	addrspace = siba_find_addrspace(dinfo, port_type, port_num, region_num);
1031 	if (addrspace != NULL)
1032 		return (addrspace->sa_rid);
1033 
1034 	/* Try the config blocks */
1035 	cfg = siba_find_cfg_block(dinfo, port_type, port_num, region_num);
1036 	if (cfg != NULL)
1037 		return (cfg->cb_rid);
1038 
1039 	/* Not found */
1040 	return (-1);
1041 }
1042 
1043 static int
1044 siba_decode_port_rid(device_t dev, device_t child, int type, int rid,
1045     bhnd_port_type *port_type, u_int *port_num, u_int *region_num)
1046 {
1047 	struct siba_devinfo	*dinfo;
1048 
1049 	/* delegate non-bus-attached devices to our parent */
1050 	if (device_get_parent(child) != dev)
1051 		return (BHND_BUS_DECODE_PORT_RID(device_get_parent(dev), child,
1052 		    type, rid, port_type, port_num, region_num));
1053 
1054 	dinfo = device_get_ivars(child);
1055 
1056 	/* Ports are always memory mapped */
1057 	if (type != SYS_RES_MEMORY)
1058 		return (EINVAL);
1059 
1060 	/* Look for a matching addrspace entry */
1061 	for (u_int i = 0; i < dinfo->core_id.num_admatch; i++) {
1062 		if (dinfo->addrspace[i].sa_rid != rid)
1063 			continue;
1064 
1065 		*port_type = BHND_PORT_DEVICE;
1066 		*port_num = siba_addrspace_device_port(i);
1067 		*region_num = siba_addrspace_device_region(i);
1068 		return (0);
1069 	}
1070 
1071 	/* Try the config blocks */
1072 	for (u_int i = 0; i < dinfo->core_id.num_cfg_blocks; i++) {
1073 		if (dinfo->cfg[i].cb_rid != rid)
1074 			continue;
1075 
1076 		*port_type = BHND_PORT_AGENT;
1077 		*port_num = siba_cfg_agent_port(i);
1078 		*region_num = siba_cfg_agent_region(i);
1079 		return (0);
1080 	}
1081 
1082 	/* Not found */
1083 	return (ENOENT);
1084 }
1085 
1086 static int
1087 siba_get_region_addr(device_t dev, device_t child, bhnd_port_type port_type,
1088     u_int port_num, u_int region_num, bhnd_addr_t *addr, bhnd_size_t *size)
1089 {
1090 	struct siba_devinfo	*dinfo;
1091 	struct siba_addrspace	*addrspace;
1092 	struct siba_cfg_block	*cfg;
1093 
1094 	/* delegate non-bus-attached devices to our parent */
1095 	if (device_get_parent(child) != dev) {
1096 		return (BHND_BUS_GET_REGION_ADDR(device_get_parent(dev), child,
1097 		    port_type, port_num, region_num, addr, size));
1098 	}
1099 
1100 	dinfo = device_get_ivars(child);
1101 
1102 	/* Look for a matching addrspace */
1103 	addrspace = siba_find_addrspace(dinfo, port_type, port_num, region_num);
1104 	if (addrspace != NULL) {
1105 		*addr = addrspace->sa_base;
1106 		*size = addrspace->sa_size - addrspace->sa_bus_reserved;
1107 		return (0);
1108 	}
1109 
1110 	/* Look for a matching cfg block */
1111 	cfg = siba_find_cfg_block(dinfo, port_type, port_num, region_num);
1112 	if (cfg != NULL) {
1113 		*addr = cfg->cb_base;
1114 		*size = cfg->cb_size;
1115 		return (0);
1116 	}
1117 
1118 	/* Not found */
1119 	return (ENOENT);
1120 }
1121 
1122 /**
1123  * Default siba(4) bus driver implementation of BHND_BUS_GET_INTR_COUNT().
1124  */
1125 u_int
1126 siba_get_intr_count(device_t dev, device_t child)
1127 {
1128 	struct siba_devinfo	*dinfo;
1129 
1130 	/* delegate non-bus-attached devices to our parent */
1131 	if (device_get_parent(child) != dev)
1132 		return (BHND_BUS_GET_INTR_COUNT(device_get_parent(dev), child));
1133 
1134 	dinfo = device_get_ivars(child);
1135 	if (!dinfo->core_id.intr_en) {
1136 		/* No interrupts */
1137 		return (0);
1138 	} else {
1139 		/* One assigned interrupt */
1140 		return (1);
1141 	}
1142 }
1143 
1144 /**
1145  * Default siba(4) bus driver implementation of BHND_BUS_GET_INTR_IVEC().
1146  */
1147 int
1148 siba_get_intr_ivec(device_t dev, device_t child, u_int intr, u_int *ivec)
1149 {
1150 	struct siba_devinfo	*dinfo;
1151 
1152 	/* delegate non-bus-attached devices to our parent */
1153 	if (device_get_parent(child) != dev)
1154 		return (BHND_BUS_GET_INTR_IVEC(device_get_parent(dev), child,
1155 		    intr, ivec));
1156 
1157 	/* Must be a valid interrupt ID */
1158 	if (intr >= siba_get_intr_count(dev, child))
1159 		return (ENXIO);
1160 
1161 	KASSERT(intr == 0, ("invalid ivec %u", intr));
1162 
1163 	dinfo = device_get_ivars(child);
1164 
1165 	KASSERT(dinfo->core_id.intr_en,
1166 	    ("core does not have an interrupt assigned"));
1167 
1168 	*ivec = dinfo->core_id.intr_flag;
1169 	return (0);
1170 }
1171 
1172 /**
1173  * Map per-core configuration blocks for @p dinfo.
1174  *
1175  * @param dev The siba bus device.
1176  * @param dinfo The device info instance on which to map all per-core
1177  * configuration blocks.
1178  */
1179 static int
1180 siba_map_cfg_resources(device_t dev, struct siba_devinfo *dinfo)
1181 {
1182 	struct siba_addrspace	*addrspace;
1183 	rman_res_t		 r_start, r_count, r_end;
1184 	uint8_t			 num_cfg;
1185 	int			 rid;
1186 
1187 	num_cfg = dinfo->core_id.num_cfg_blocks;
1188 	if (num_cfg > SIBA_MAX_CFG) {
1189 		device_printf(dev, "config block count %hhu out of range\n",
1190 		    num_cfg);
1191 		return (ENXIO);
1192 	}
1193 
1194 	/* Fetch the core register address space */
1195 	addrspace = siba_find_addrspace(dinfo, BHND_PORT_DEVICE, 0, 0);
1196 	if (addrspace == NULL) {
1197 		device_printf(dev, "missing device registers\n");
1198 		return (ENXIO);
1199 	}
1200 
1201 	/*
1202 	 * Map the per-core configuration blocks
1203 	 */
1204 	for (uint8_t i = 0; i < num_cfg; i++) {
1205 		/* Add to child's resource list */
1206 		r_start = addrspace->sa_base + SIBA_CFG_OFFSET(i);
1207 		r_count = SIBA_CFG_SIZE;
1208 		r_end = r_start + r_count - 1;
1209 
1210 		rid = resource_list_add_next(&dinfo->resources, SYS_RES_MEMORY,
1211 		    r_start, r_end, r_count);
1212 
1213 		/* Initialize config block descriptor */
1214 		dinfo->cfg[i] = ((struct siba_cfg_block) {
1215 			.cb_base = r_start,
1216 			.cb_size = SIBA_CFG_SIZE,
1217 			.cb_rid = rid
1218 		});
1219 
1220 		/* Map the config resource for bus-level access */
1221 		dinfo->cfg_rid[i] = SIBA_CFG_RID(dinfo, i);
1222 		dinfo->cfg_res[i] = BHND_BUS_ALLOC_RESOURCE(dev, dev,
1223 		    SYS_RES_MEMORY, &dinfo->cfg_rid[i], r_start, r_end,
1224 		    r_count, RF_ACTIVE|RF_SHAREABLE);
1225 
1226 		if (dinfo->cfg_res[i] == NULL) {
1227 			device_printf(dev, "failed to allocate SIBA_CFG%hhu\n",
1228 			    i);
1229 			return (ENXIO);
1230 		}
1231 	}
1232 
1233 	return (0);
1234 }
1235 
1236 static device_t
1237 siba_add_child(device_t dev, u_int order, const char *name, int unit)
1238 {
1239 	struct siba_devinfo	*dinfo;
1240 	device_t		 child;
1241 
1242 	child = device_add_child_ordered(dev, order, name, unit);
1243 	if (child == NULL)
1244 		return (NULL);
1245 
1246 	if ((dinfo = siba_alloc_dinfo(dev)) == NULL) {
1247 		device_delete_child(dev, child);
1248 		return (NULL);
1249 	}
1250 
1251 	device_set_ivars(child, dinfo);
1252 
1253 	return (child);
1254 }
1255 
1256 static void
1257 siba_child_deleted(device_t dev, device_t child)
1258 {
1259 	struct siba_devinfo	*dinfo;
1260 
1261 	/* Call required bhnd(4) implementation */
1262 	bhnd_generic_child_deleted(dev, child);
1263 
1264 	/* Free siba device info */
1265 	if ((dinfo = device_get_ivars(child)) != NULL)
1266 		siba_free_dinfo(dev, child, dinfo);
1267 
1268 	device_set_ivars(child, NULL);
1269 }
1270 
1271 /**
1272  * Scan the core table and add all valid discovered cores to
1273  * the bus.
1274  *
1275  * @param dev The siba bus device.
1276  */
1277 int
1278 siba_add_children(device_t dev)
1279 {
1280 	bhnd_erom_t			*erom;
1281 	struct siba_erom		*siba_erom;
1282 	struct bhnd_erom_io		*eio;
1283 	const struct bhnd_chipid	*cid;
1284 	struct siba_core_id		*cores;
1285 	device_t			*children;
1286 	int				 error;
1287 
1288 	cid = BHND_BUS_GET_CHIPID(dev, dev);
1289 
1290 	/* Allocate our EROM parser */
1291 	eio = bhnd_erom_iores_new(dev, SIBA_EROM_RID);
1292 	erom = bhnd_erom_alloc(&siba_erom_parser, cid, eio);
1293 	if (erom == NULL) {
1294 		bhnd_erom_io_fini(eio);
1295 		return (ENODEV);
1296 	}
1297 
1298 	/* Allocate our temporary core and device table */
1299 	cores = malloc(sizeof(*cores) * cid->ncores, M_BHND, M_WAITOK);
1300 	children = malloc(sizeof(*children) * cid->ncores, M_BHND,
1301 	    M_WAITOK | M_ZERO);
1302 
1303 	/*
1304 	 * Add child devices for all discovered cores.
1305 	 *
1306 	 * On bridged devices, we'll exhaust our available register windows if
1307 	 * we map config blocks on unpopulated/disabled cores. To avoid this, we
1308 	 * defer mapping of the per-core siba(4) config blocks until all cores
1309 	 * have been enumerated and otherwise configured.
1310 	 */
1311 	siba_erom = (struct siba_erom *)erom;
1312 	for (u_int i = 0; i < cid->ncores; i++) {
1313 		struct siba_devinfo	*dinfo;
1314 		device_t		 child;
1315 
1316 		if ((error = siba_erom_get_core_id(siba_erom, i, &cores[i])))
1317 			goto failed;
1318 
1319 		/* Add the child device */
1320 		child = BUS_ADD_CHILD(dev, 0, NULL, DEVICE_UNIT_ANY);
1321 		if (child == NULL) {
1322 			error = ENXIO;
1323 			goto failed;
1324 		}
1325 
1326 		children[i] = child;
1327 
1328 		/* Initialize per-device bus info */
1329 		if ((dinfo = device_get_ivars(child)) == NULL) {
1330 			error = ENXIO;
1331 			goto failed;
1332 		}
1333 
1334 		if ((error = siba_init_dinfo(dev, child, dinfo, &cores[i])))
1335 			goto failed;
1336 
1337 		/* If pins are floating or the hardware is otherwise
1338 		 * unpopulated, the device shouldn't be used. */
1339 		if (bhnd_is_hw_disabled(child))
1340 			device_disable(child);
1341 	}
1342 
1343 	/* Free EROM (and any bridge register windows it might hold) */
1344 	bhnd_erom_free(erom);
1345 	erom = NULL;
1346 
1347 	/* Map all valid core's config register blocks and perform interrupt
1348 	 * assignment */
1349 	for (u_int i = 0; i < cid->ncores; i++) {
1350 		struct siba_devinfo	*dinfo;
1351 		device_t		 child;
1352 
1353 		child = children[i];
1354 
1355 		/* Skip if core is disabled */
1356 		if (bhnd_is_hw_disabled(child))
1357 			continue;
1358 
1359 		dinfo = device_get_ivars(child);
1360 
1361 		/* Map the core's config blocks */
1362 		if ((error = siba_map_cfg_resources(dev, dinfo)))
1363 			goto failed;
1364 
1365 		/* Issue bus callback for fully initialized child. */
1366 		BHND_BUS_CHILD_ADDED(dev, child);
1367 	}
1368 
1369 	free(cores, M_BHND);
1370 	free(children, M_BHND);
1371 
1372 	return (0);
1373 
1374 failed:
1375 	device_delete_children(dev);
1376 
1377 	free(cores, M_BHND);
1378 	free(children, M_BHND);
1379 	if (erom != NULL)
1380 		bhnd_erom_free(erom);
1381 
1382 	return (error);
1383 }
1384 
1385 static device_method_t siba_methods[] = {
1386 	/* Device interface */
1387 	DEVMETHOD(device_probe,			siba_probe),
1388 	DEVMETHOD(device_attach,		siba_attach),
1389 	DEVMETHOD(device_detach,		siba_detach),
1390 	DEVMETHOD(device_resume,		siba_resume),
1391 	DEVMETHOD(device_suspend,		siba_suspend),
1392 
1393 	/* Bus interface */
1394 	DEVMETHOD(bus_add_child,		siba_add_child),
1395 	DEVMETHOD(bus_child_deleted,		siba_child_deleted),
1396 	DEVMETHOD(bus_read_ivar,		siba_read_ivar),
1397 	DEVMETHOD(bus_write_ivar,		siba_write_ivar),
1398 	DEVMETHOD(bus_get_resource_list,	siba_get_resource_list),
1399 
1400 	/* BHND interface */
1401 	DEVMETHOD(bhnd_bus_get_erom_class,	siba_get_erom_class),
1402 	DEVMETHOD(bhnd_bus_alloc_pmu,		siba_alloc_pmu),
1403 	DEVMETHOD(bhnd_bus_release_pmu,		siba_release_pmu),
1404 	DEVMETHOD(bhnd_bus_request_clock,	siba_request_clock),
1405 	DEVMETHOD(bhnd_bus_enable_clocks,	siba_enable_clocks),
1406 	DEVMETHOD(bhnd_bus_request_ext_rsrc,	siba_request_ext_rsrc),
1407 	DEVMETHOD(bhnd_bus_release_ext_rsrc,	siba_release_ext_rsrc),
1408 	DEVMETHOD(bhnd_bus_get_clock_freq,	siba_get_clock_freq),
1409 	DEVMETHOD(bhnd_bus_get_clock_latency,	siba_get_clock_latency),
1410 	DEVMETHOD(bhnd_bus_read_ioctl,		siba_read_ioctl),
1411 	DEVMETHOD(bhnd_bus_write_ioctl,		siba_write_ioctl),
1412 	DEVMETHOD(bhnd_bus_read_iost,		siba_read_iost),
1413 	DEVMETHOD(bhnd_bus_is_hw_suspended,	siba_is_hw_suspended),
1414 	DEVMETHOD(bhnd_bus_reset_hw,		siba_reset_hw),
1415 	DEVMETHOD(bhnd_bus_suspend_hw,		siba_suspend_hw),
1416 	DEVMETHOD(bhnd_bus_read_config,		siba_read_config),
1417 	DEVMETHOD(bhnd_bus_write_config,	siba_write_config),
1418 	DEVMETHOD(bhnd_bus_get_port_count,	siba_get_port_count),
1419 	DEVMETHOD(bhnd_bus_get_region_count,	siba_get_region_count),
1420 	DEVMETHOD(bhnd_bus_get_port_rid,	siba_get_port_rid),
1421 	DEVMETHOD(bhnd_bus_decode_port_rid,	siba_decode_port_rid),
1422 	DEVMETHOD(bhnd_bus_get_region_addr,	siba_get_region_addr),
1423 	DEVMETHOD(bhnd_bus_get_intr_count,	siba_get_intr_count),
1424 	DEVMETHOD(bhnd_bus_get_intr_ivec,	siba_get_intr_ivec),
1425 
1426 	DEVMETHOD_END
1427 };
1428 
1429 DEFINE_CLASS_1(bhnd, siba_driver, siba_methods, sizeof(struct siba_softc), bhnd_driver);
1430 
1431 MODULE_VERSION(siba, 1);
1432 MODULE_DEPEND(siba, bhnd, 1, 1, 1);
1433