xref: /freebsd/sys/dev/bhnd/siba/siba.c (revision 1c05a6ea6b849ff95e539c31adea887c644a6a01)
1 /*-
2  * Copyright (c) 2015 Landon Fuller <landon@landonf.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification.
11  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
13  *    redistribution must be conditioned upon including a substantially
14  *    similar Disclaimer requirement for further binary redistribution.
15  *
16  * NO WARRANTY
17  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
18  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
19  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
20  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
21  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
22  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
25  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
27  * THE POSSIBILITY OF SUCH DAMAGES.
28  */
29 
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 
33 #include <sys/param.h>
34 #include <sys/bus.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/systm.h>
39 
40 #include <machine/bus.h>
41 
42 #include <dev/bhnd/cores/chipc/chipcreg.h>
43 #include <dev/bhnd/cores/pmu/bhnd_pmu.h>
44 
45 #include "sibareg.h"
46 #include "sibavar.h"
47 
48 static bhnd_erom_class_t *
49 siba_get_erom_class(driver_t *driver)
50 {
51 	return (&siba_erom_parser);
52 }
53 
54 int
55 siba_probe(device_t dev)
56 {
57 	device_set_desc(dev, "SIBA BHND bus");
58 	return (BUS_PROBE_DEFAULT);
59 }
60 
61 /**
62  * Default siba(4) bus driver implementation of DEVICE_ATTACH().
63  *
64  * This implementation initializes internal siba(4) state and performs
65  * bus enumeration, and must be called by subclassing drivers in
66  * DEVICE_ATTACH() before any other bus methods.
67  */
68 int
69 siba_attach(device_t dev)
70 {
71 	struct siba_softc	*sc;
72 	int			 error;
73 
74 	sc = device_get_softc(dev);
75 	sc->dev = dev;
76 
77 	/* Enumerate children */
78 	if ((error = siba_add_children(dev))) {
79 		device_delete_children(dev);
80 		return (error);
81 	}
82 
83 	return (0);
84 }
85 
86 int
87 siba_detach(device_t dev)
88 {
89 	return (bhnd_generic_detach(dev));
90 }
91 
92 int
93 siba_resume(device_t dev)
94 {
95 	return (bhnd_generic_resume(dev));
96 }
97 
98 int
99 siba_suspend(device_t dev)
100 {
101 	return (bhnd_generic_suspend(dev));
102 }
103 
104 static int
105 siba_read_ivar(device_t dev, device_t child, int index, uintptr_t *result)
106 {
107 	const struct siba_devinfo *dinfo;
108 	const struct bhnd_core_info *cfg;
109 
110 	dinfo = device_get_ivars(child);
111 	cfg = &dinfo->core_id.core_info;
112 
113 	switch (index) {
114 	case BHND_IVAR_VENDOR:
115 		*result = cfg->vendor;
116 		return (0);
117 	case BHND_IVAR_DEVICE:
118 		*result = cfg->device;
119 		return (0);
120 	case BHND_IVAR_HWREV:
121 		*result = cfg->hwrev;
122 		return (0);
123 	case BHND_IVAR_DEVICE_CLASS:
124 		*result = bhnd_core_class(cfg);
125 		return (0);
126 	case BHND_IVAR_VENDOR_NAME:
127 		*result = (uintptr_t) bhnd_vendor_name(cfg->vendor);
128 		return (0);
129 	case BHND_IVAR_DEVICE_NAME:
130 		*result = (uintptr_t) bhnd_core_name(cfg);
131 		return (0);
132 	case BHND_IVAR_CORE_INDEX:
133 		*result = cfg->core_idx;
134 		return (0);
135 	case BHND_IVAR_CORE_UNIT:
136 		*result = cfg->unit;
137 		return (0);
138 	case BHND_IVAR_PMU_INFO:
139 		*result = (uintptr_t) dinfo->pmu_info;
140 		return (0);
141 	default:
142 		return (ENOENT);
143 	}
144 }
145 
146 static int
147 siba_write_ivar(device_t dev, device_t child, int index, uintptr_t value)
148 {
149 	struct siba_devinfo *dinfo;
150 
151 	dinfo = device_get_ivars(child);
152 
153 	switch (index) {
154 	case BHND_IVAR_VENDOR:
155 	case BHND_IVAR_DEVICE:
156 	case BHND_IVAR_HWREV:
157 	case BHND_IVAR_DEVICE_CLASS:
158 	case BHND_IVAR_VENDOR_NAME:
159 	case BHND_IVAR_DEVICE_NAME:
160 	case BHND_IVAR_CORE_INDEX:
161 	case BHND_IVAR_CORE_UNIT:
162 		return (EINVAL);
163 	case BHND_IVAR_PMU_INFO:
164 		dinfo->pmu_info = (struct bhnd_core_pmu_info *) value;
165 		return (0);
166 	default:
167 		return (ENOENT);
168 	}
169 }
170 
171 static struct resource_list *
172 siba_get_resource_list(device_t dev, device_t child)
173 {
174 	struct siba_devinfo *dinfo = device_get_ivars(child);
175 	return (&dinfo->resources);
176 }
177 
178 static int
179 siba_read_iost(device_t dev, device_t child, uint16_t *iost)
180 {
181 	uint32_t	tmhigh;
182 	int		error;
183 
184 	error = bhnd_read_config(child, SIBA_CFG0_TMSTATEHIGH, &tmhigh, 4);
185 	if (error)
186 		return (error);
187 
188 	*iost = (SIBA_REG_GET(tmhigh, TMH_SISF));
189 	return (0);
190 }
191 
192 static int
193 siba_read_ioctl(device_t dev, device_t child, uint16_t *ioctl)
194 {
195 	uint32_t	ts_low;
196 	int		error;
197 
198 	if ((error = bhnd_read_config(child, SIBA_CFG0_TMSTATELOW, &ts_low, 4)))
199 		return (error);
200 
201 	*ioctl = (SIBA_REG_GET(ts_low, TML_SICF));
202 	return (0);
203 }
204 
205 static int
206 siba_write_ioctl(device_t dev, device_t child, uint16_t value, uint16_t mask)
207 {
208 	struct siba_devinfo	*dinfo;
209 	struct bhnd_resource	*r;
210 	uint32_t		 ts_low, ts_mask;
211 
212 	if (device_get_parent(child) != dev)
213 		return (EINVAL);
214 
215 	/* Fetch CFG0 mapping */
216 	dinfo = device_get_ivars(child);
217 	if ((r = dinfo->cfg[0]) == NULL)
218 		return (ENODEV);
219 
220 	/* Mask and set TMSTATELOW core flag bits */
221 	ts_mask = (mask << SIBA_TML_SICF_SHIFT) & SIBA_TML_SICF_MASK;
222 	ts_low = (value << SIBA_TML_SICF_SHIFT) & ts_mask;
223 
224 	return (siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
225 	    ts_low, ts_mask));
226 }
227 
228 static bool
229 siba_is_hw_suspended(device_t dev, device_t child)
230 {
231 	uint32_t		ts_low;
232 	uint16_t		ioctl;
233 	int			error;
234 
235 	/* Fetch target state */
236 	error = bhnd_read_config(child, SIBA_CFG0_TMSTATELOW, &ts_low, 4);
237 	if (error) {
238 		device_printf(child, "error reading HW reset state: %d\n",
239 		    error);
240 		return (true);
241 	}
242 
243 	/* Is core held in RESET? */
244 	if (ts_low & SIBA_TML_RESET)
245 		return (true);
246 
247 	/* Is core clocked? */
248 	ioctl = SIBA_REG_GET(ts_low, TML_SICF);
249 	if (!(ioctl & BHND_IOCTL_CLK_EN))
250 		return (true);
251 
252 	return (false);
253 }
254 
255 static int
256 siba_reset_hw(device_t dev, device_t child, uint16_t ioctl)
257 {
258 	struct siba_devinfo		*dinfo;
259 	struct bhnd_resource		*r;
260 	uint32_t			 ts_low, imstate;
261 	int				 error;
262 
263 	if (device_get_parent(child) != dev)
264 		return (EINVAL);
265 
266 	dinfo = device_get_ivars(child);
267 
268 	/* Can't suspend the core without access to the CFG0 registers */
269 	if ((r = dinfo->cfg[0]) == NULL)
270 		return (ENODEV);
271 
272 	/* We require exclusive control over BHND_IOCTL_CLK_EN and
273 	 * BHND_IOCTL_CLK_FORCE. */
274 	if (ioctl & (BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE))
275 		return (EINVAL);
276 
277 	/* Place core into known RESET state */
278 	if ((error = BHND_BUS_SUSPEND_HW(dev, child)))
279 		return (error);
280 
281 	/* Leaving the core in reset, set the caller's IOCTL flags and
282 	 * enable the core's clocks. */
283 	ts_low = (ioctl | BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE) <<
284 	    SIBA_TML_SICF_SHIFT;
285 	error = siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
286 	    ts_low, SIBA_TML_SICF_MASK);
287 	if (error)
288 		return (error);
289 
290 	/* Clear any target errors */
291 	if (bhnd_bus_read_4(r, SIBA_CFG0_TMSTATEHIGH) & SIBA_TMH_SERR) {
292 		error = siba_write_target_state(child, dinfo,
293 		    SIBA_CFG0_TMSTATEHIGH, 0, SIBA_TMH_SERR);
294 		if (error)
295 			return (error);
296 	}
297 
298 	/* Clear any initiator errors */
299 	imstate = bhnd_bus_read_4(r, SIBA_CFG0_IMSTATE);
300 	if (imstate & (SIBA_IM_IBE|SIBA_IM_TO)) {
301 		error = siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE,
302 		    0, SIBA_IM_IBE|SIBA_IM_TO);
303 		if (error)
304 			return (error);
305 	}
306 
307 	/* Release from RESET while leaving clocks forced, ensuring the
308 	 * signal propagates throughout the core */
309 	error = siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
310 	    0x0, SIBA_TML_RESET);
311 	if (error)
312 		return (error);
313 
314 	/* The core should now be active; we can clear the BHND_IOCTL_CLK_FORCE
315 	 * bit and allow the core to manage clock gating. */
316 	error = siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
317 	    0x0, (BHND_IOCTL_CLK_FORCE << SIBA_TML_SICF_SHIFT));
318 	if (error)
319 		return (error);
320 
321 	return (0);
322 }
323 
324 static int
325 siba_suspend_hw(device_t dev, device_t child)
326 {
327 	struct siba_devinfo		*dinfo;
328 	struct bhnd_core_pmu_info	*pm;
329 	struct bhnd_resource		*r;
330 	uint32_t			 idl, ts_low;
331 	uint16_t			 ioctl;
332 	int				 error;
333 
334 	if (device_get_parent(child) != dev)
335 		return (EINVAL);
336 
337 	dinfo = device_get_ivars(child);
338 	pm = dinfo->pmu_info;
339 
340 	/* Can't suspend the core without access to the CFG0 registers */
341 	if ((r = dinfo->cfg[0]) == NULL)
342 		return (ENODEV);
343 
344 	/* Already in RESET? */
345 	ts_low = bhnd_bus_read_4(r, SIBA_CFG0_TMSTATELOW);
346 	if (ts_low & SIBA_TML_RESET) {
347 		/* Clear IOCTL flags, ensuring the clock is disabled */
348 		return (siba_write_target_state(child, dinfo,
349 		    SIBA_CFG0_TMSTATELOW, 0x0, SIBA_TML_SICF_MASK));
350 
351 		return (0);
352 	}
353 
354 	/* If clocks are already disabled, we can put the core directly
355 	 * into RESET */
356 	ioctl = SIBA_REG_GET(ts_low, TML_SICF);
357 	if (!(ioctl & BHND_IOCTL_CLK_EN)) {
358 		/* Set RESET and clear IOCTL flags */
359 		return (siba_write_target_state(child, dinfo,
360 		    SIBA_CFG0_TMSTATELOW,
361 		    SIBA_TML_RESET,
362 		    SIBA_TML_RESET | SIBA_TML_SICF_MASK));
363 	}
364 
365 	/* Reject any further target backplane transactions */
366 	error = siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
367 	    SIBA_TML_REJ, SIBA_TML_REJ);
368 	if (error)
369 		return (error);
370 
371 	/* If this is an initiator core, we need to reject initiator
372 	 * transactions too. */
373 	idl = bhnd_bus_read_4(r, SIBA_CFG0_IDLOW);
374 	if (idl & SIBA_IDL_INIT) {
375 		error = siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE,
376 		    SIBA_IM_RJ, SIBA_IM_RJ);
377 		if (error)
378 			return (error);
379 	}
380 
381 	/* Put the core into RESET|REJECT, forcing clocks to ensure the RESET
382 	 * signal propagates throughout the core, leaving REJECT asserted. */
383 	ts_low = SIBA_TML_RESET;
384 	ts_low |= (BHND_IOCTL_CLK_EN | BHND_IOCTL_CLK_FORCE) <<
385 	    SIBA_TML_SICF_SHIFT;
386 
387 	error = siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
388 		ts_low, ts_low);
389 	if (error)
390 		return (error);
391 
392 	/* Give RESET ample time */
393 	DELAY(10);
394 
395 	/* Leaving core in reset, disable all clocks, clear REJ flags and
396 	 * IOCTL state */
397 	error = siba_write_target_state(child, dinfo, SIBA_CFG0_TMSTATELOW,
398 		SIBA_TML_RESET,
399 		SIBA_TML_RESET | SIBA_TML_REJ | SIBA_TML_SICF_MASK);
400 	if (error)
401 		return (error);
402 
403 	/* Clear previously asserted initiator reject */
404 	if (idl & SIBA_IDL_INIT) {
405 		error = siba_write_target_state(child, dinfo, SIBA_CFG0_IMSTATE,
406 		    0, SIBA_IM_RJ);
407 		if (error)
408 			return (error);
409 	}
410 
411 	/* Core is now in RESET, with clocks disabled and REJ not asserted.
412 	 *
413 	 * We lastly need to inform the PMU, releasing any outstanding per-core
414 	 * PMU requests */
415 	if (pm != NULL) {
416 		if ((error = BHND_PMU_CORE_RELEASE(pm->pm_pmu, pm)))
417 			return (error);
418 	}
419 
420 	return (0);
421 }
422 
423 static int
424 siba_read_config(device_t dev, device_t child, bus_size_t offset, void *value,
425     u_int width)
426 {
427 	struct siba_devinfo	*dinfo;
428 	rman_res_t		 r_size;
429 
430 	/* Must be directly attached */
431 	if (device_get_parent(child) != dev)
432 		return (EINVAL);
433 
434 	/* CFG0 registers must be available */
435 	dinfo = device_get_ivars(child);
436 	if (dinfo->cfg[0] == NULL)
437 		return (ENODEV);
438 
439 	/* Offset must fall within CFG0 */
440 	r_size = rman_get_size(dinfo->cfg[0]->res);
441 	if (r_size < offset || r_size - offset < width)
442 		return (EFAULT);
443 
444 	switch (width) {
445 	case 1:
446 		*((uint8_t *)value) = bhnd_bus_read_1(dinfo->cfg[0], offset);
447 		return (0);
448 	case 2:
449 		*((uint16_t *)value) = bhnd_bus_read_2(dinfo->cfg[0], offset);
450 		return (0);
451 	case 4:
452 		*((uint32_t *)value) = bhnd_bus_read_4(dinfo->cfg[0], offset);
453 		return (0);
454 	default:
455 		return (EINVAL);
456 	}
457 }
458 
459 static int
460 siba_write_config(device_t dev, device_t child, bus_size_t offset,
461     const void *value, u_int width)
462 {
463 	struct siba_devinfo	*dinfo;
464 	struct bhnd_resource	*r;
465 	rman_res_t		 r_size;
466 
467 	/* Must be directly attached */
468 	if (device_get_parent(child) != dev)
469 		return (EINVAL);
470 
471 	/* CFG0 registers must be available */
472 	dinfo = device_get_ivars(child);
473 	if ((r = dinfo->cfg[0]) == NULL)
474 		return (ENODEV);
475 
476 	/* Offset must fall within CFG0 */
477 	r_size = rman_get_size(r->res);
478 	if (r_size < offset || r_size - offset < width)
479 		return (EFAULT);
480 
481 	switch (width) {
482 	case 1:
483 		bhnd_bus_write_1(r, offset, *(const uint8_t *)value);
484 		return (0);
485 	case 2:
486 		bhnd_bus_write_2(r, offset, *(const uint8_t *)value);
487 		return (0);
488 	case 4:
489 		bhnd_bus_write_4(r, offset, *(const uint8_t *)value);
490 		return (0);
491 	default:
492 		return (EINVAL);
493 	}
494 }
495 
496 static u_int
497 siba_get_port_count(device_t dev, device_t child, bhnd_port_type type)
498 {
499 	struct siba_devinfo *dinfo;
500 
501 	/* delegate non-bus-attached devices to our parent */
502 	if (device_get_parent(child) != dev)
503 		return (BHND_BUS_GET_PORT_COUNT(device_get_parent(dev), child,
504 		    type));
505 
506 	dinfo = device_get_ivars(child);
507 	return (siba_addrspace_port_count(dinfo->core_id.num_addrspace));
508 }
509 
510 static u_int
511 siba_get_region_count(device_t dev, device_t child, bhnd_port_type type,
512     u_int port)
513 {
514 	struct siba_devinfo	*dinfo;
515 
516 	/* delegate non-bus-attached devices to our parent */
517 	if (device_get_parent(child) != dev)
518 		return (BHND_BUS_GET_REGION_COUNT(device_get_parent(dev), child,
519 		    type, port));
520 
521 	dinfo = device_get_ivars(child);
522 	if (!siba_is_port_valid(dinfo->core_id.num_addrspace, type, port))
523 		return (0);
524 
525 	return (siba_addrspace_region_count(dinfo->core_id.num_addrspace,
526 	    port));
527 }
528 
529 static int
530 siba_get_port_rid(device_t dev, device_t child, bhnd_port_type port_type,
531     u_int port_num, u_int region_num)
532 {
533 	struct siba_devinfo	*dinfo;
534 	struct siba_addrspace	*addrspace;
535 
536 	/* delegate non-bus-attached devices to our parent */
537 	if (device_get_parent(child) != dev)
538 		return (BHND_BUS_GET_PORT_RID(device_get_parent(dev), child,
539 		    port_type, port_num, region_num));
540 
541 	dinfo = device_get_ivars(child);
542 	addrspace = siba_find_addrspace(dinfo, port_type, port_num, region_num);
543 	if (addrspace == NULL)
544 		return (-1);
545 
546 	return (addrspace->sa_rid);
547 }
548 
549 static int
550 siba_decode_port_rid(device_t dev, device_t child, int type, int rid,
551     bhnd_port_type *port_type, u_int *port_num, u_int *region_num)
552 {
553 	struct siba_devinfo	*dinfo;
554 
555 	/* delegate non-bus-attached devices to our parent */
556 	if (device_get_parent(child) != dev)
557 		return (BHND_BUS_DECODE_PORT_RID(device_get_parent(dev), child,
558 		    type, rid, port_type, port_num, region_num));
559 
560 	dinfo = device_get_ivars(child);
561 
562 	/* Ports are always memory mapped */
563 	if (type != SYS_RES_MEMORY)
564 		return (EINVAL);
565 
566 	for (int i = 0; i < dinfo->core_id.num_addrspace; i++) {
567 		if (dinfo->addrspace[i].sa_rid != rid)
568 			continue;
569 
570 		*port_type = BHND_PORT_DEVICE;
571 		*port_num = siba_addrspace_port(i);
572 		*region_num = siba_addrspace_region(i);
573 		return (0);
574 	}
575 
576 	/* Not found */
577 	return (ENOENT);
578 }
579 
580 static int
581 siba_get_region_addr(device_t dev, device_t child, bhnd_port_type port_type,
582     u_int port_num, u_int region_num, bhnd_addr_t *addr, bhnd_size_t *size)
583 {
584 	struct siba_devinfo	*dinfo;
585 	struct siba_addrspace	*addrspace;
586 
587 	/* delegate non-bus-attached devices to our parent */
588 	if (device_get_parent(child) != dev) {
589 		return (BHND_BUS_GET_REGION_ADDR(device_get_parent(dev), child,
590 		    port_type, port_num, region_num, addr, size));
591 	}
592 
593 	dinfo = device_get_ivars(child);
594 	addrspace = siba_find_addrspace(dinfo, port_type, port_num, region_num);
595 	if (addrspace == NULL)
596 		return (ENOENT);
597 
598 	*addr = addrspace->sa_base;
599 	*size = addrspace->sa_size - addrspace->sa_bus_reserved;
600 	return (0);
601 }
602 
603 /**
604  * Default siba(4) bus driver implementation of BHND_BUS_GET_INTR_COUNT().
605  *
606  * This implementation consults @p child's configuration block mapping,
607  * returning SIBA_CORE_NUM_INTR if a valid CFG0 block is mapped.
608  */
609 int
610 siba_get_intr_count(device_t dev, device_t child)
611 {
612 	struct siba_devinfo *dinfo;
613 
614 	/* delegate non-bus-attached devices to our parent */
615 	if (device_get_parent(child) != dev)
616 		return (BHND_BUS_GET_INTR_COUNT(device_get_parent(dev), child));
617 
618 	dinfo = device_get_ivars(child);
619 
620 	/* We can get/set interrupt sbflags on any core with a valid cfg0
621 	 * block; whether the core actually makes use of it is another matter
622 	 * entirely */
623 	if (dinfo->cfg[0] == NULL)
624 		return (0);
625 
626 	return (SIBA_CORE_NUM_INTR);
627 }
628 
629 /**
630  * Default siba(4) bus driver implementation of BHND_BUS_GET_CORE_IVEC().
631  *
632  * This implementation consults @p child's CFG0 register block,
633  * returning the interrupt flag assigned to @p child.
634  */
635 int
636 siba_get_core_ivec(device_t dev, device_t child, u_int intr, uint32_t *ivec)
637 {
638 	struct siba_devinfo	*dinfo;
639 	uint32_t		 tpsflag;
640 
641 	/* delegate non-bus-attached devices to our parent */
642 	if (device_get_parent(child) != dev)
643 		return (BHND_BUS_GET_CORE_IVEC(device_get_parent(dev), child,
644 		    intr, ivec));
645 
646 	/* Must be a valid interrupt ID */
647 	if (intr >= siba_get_intr_count(dev, child))
648 		return (ENXIO);
649 
650 	/* Fetch sbflag number */
651 	dinfo = device_get_ivars(child);
652 	tpsflag = bhnd_bus_read_4(dinfo->cfg[0], SIBA_CFG0_TPSFLAG);
653 	*ivec = SIBA_REG_GET(tpsflag, TPS_NUM0);
654 
655 	return (0);
656 }
657 
658 /**
659  * Register all address space mappings for @p di.
660  *
661  * @param dev The siba bus device.
662  * @param di The device info instance on which to register all address
663  * space entries.
664  * @param r A resource mapping the enumeration table block for @p di.
665  */
666 static int
667 siba_register_addrspaces(device_t dev, struct siba_devinfo *di,
668     struct bhnd_resource *r)
669 {
670 	struct siba_core_id	*cid;
671 	uint32_t		 addr;
672 	uint32_t		 size;
673 	int			 error;
674 
675 	cid = &di->core_id;
676 
677 
678 	/* Register the device address space entries */
679 	for (uint8_t i = 0; i < di->core_id.num_addrspace; i++) {
680 		uint32_t	adm;
681 		u_int		adm_offset;
682 		uint32_t	bus_reserved;
683 
684 		/* Determine the register offset */
685 		adm_offset = siba_admatch_offset(i);
686 		if (adm_offset == 0) {
687 		    device_printf(dev, "addrspace %hhu is unsupported", i);
688 		    return (ENODEV);
689 		}
690 
691 		/* Fetch the address match register value */
692 		adm = bhnd_bus_read_4(r, adm_offset);
693 
694 		/* Parse the value */
695 		if ((error = siba_parse_admatch(adm, &addr, &size))) {
696 			device_printf(dev, "failed to decode address "
697 			    " match register value 0x%x\n", adm);
698 			return (error);
699 		}
700 
701 		/* If this is the device's core/enumeration addrespace,
702 		 * reserve the Sonics configuration register blocks for the
703 		 * use of our bus. */
704 		bus_reserved = 0;
705 		if (i == SIBA_CORE_ADDRSPACE)
706 			bus_reserved = cid->num_cfg_blocks * SIBA_CFG_SIZE;
707 
708 		/* Append the region info */
709 		error = siba_append_dinfo_region(di, i, addr, size,
710 		    bus_reserved);
711 		if (error)
712 			return (error);
713 	}
714 
715 	return (0);
716 }
717 
718 /**
719  * Map per-core configuration blocks for @p dinfo.
720  *
721  * @param dev The siba bus device.
722  * @param dinfo The device info instance on which to map all per-core
723  * configuration blocks.
724  */
725 static int
726 siba_map_cfg_resources(device_t dev, struct siba_devinfo *dinfo)
727 {
728 	struct siba_addrspace	*addrspace;
729 	rman_res_t		 r_start, r_count, r_end;
730 	uint8_t			 num_cfg;
731 
732 	num_cfg = dinfo->core_id.num_cfg_blocks;
733 	if (num_cfg > SIBA_MAX_CFG) {
734 		device_printf(dev, "config block count %hhu out of range\n",
735 		    num_cfg);
736 		return (ENXIO);
737 	}
738 
739 	/* Fetch the core register address space */
740 	addrspace = siba_find_addrspace(dinfo, BHND_PORT_DEVICE, 0, 0);
741 	if (addrspace == NULL) {
742 		device_printf(dev, "missing device registers\n");
743 		return (ENXIO);
744 	}
745 
746 	/*
747 	 * Map the per-core configuration blocks
748 	 */
749 	for (uint8_t i = 0; i < num_cfg; i++) {
750 		/* Determine the config block's address range; configuration
751 		 * blocks are allocated starting at SIBA_CFG0_OFFSET,
752 		 * growing downwards. */
753 		r_start = addrspace->sa_base + SIBA_CFG0_OFFSET;
754 		r_start -= i * SIBA_CFG_SIZE;
755 
756 		r_count = SIBA_CFG_SIZE;
757 		r_end = r_start + r_count - 1;
758 
759 		/* Allocate the config resource */
760 		dinfo->cfg_rid[i] = SIBA_CFG_RID(dinfo, i);
761 		dinfo->cfg[i] = BHND_BUS_ALLOC_RESOURCE(dev, dev,
762 		    SYS_RES_MEMORY, &dinfo->cfg_rid[i], r_start, r_end,
763 		    r_count, RF_ACTIVE);
764 
765 		if (dinfo->cfg[i] == NULL) {
766 			device_printf(dev, "failed to allocate SIBA_CFG%hhu\n",
767 			    i);
768 			return (ENXIO);
769 		}
770 	}
771 
772 	return (0);
773 }
774 
775 static device_t
776 siba_add_child(device_t dev, u_int order, const char *name, int unit)
777 {
778 	struct siba_devinfo	*dinfo;
779 	device_t		 child;
780 
781 	child = device_add_child_ordered(dev, order, name, unit);
782 	if (child == NULL)
783 		return (NULL);
784 
785 	if ((dinfo = siba_alloc_dinfo(dev)) == NULL) {
786 		device_delete_child(dev, child);
787 		return (NULL);
788 	}
789 
790 	device_set_ivars(child, dinfo);
791 
792 	return (child);
793 }
794 
795 static void
796 siba_child_deleted(device_t dev, device_t child)
797 {
798 	struct bhnd_softc	*sc;
799 	struct siba_devinfo	*dinfo;
800 
801 	sc = device_get_softc(dev);
802 
803 	/* Call required bhnd(4) implementation */
804 	bhnd_generic_child_deleted(dev, child);
805 
806 	/* Free siba device info */
807 	if ((dinfo = device_get_ivars(child)) != NULL)
808 		siba_free_dinfo(dev, dinfo);
809 
810 	device_set_ivars(child, NULL);
811 }
812 
813 /**
814  * Scan the core table and add all valid discovered cores to
815  * the bus.
816  *
817  * @param dev The siba bus device.
818  */
819 int
820 siba_add_children(device_t dev)
821 {
822 	const struct bhnd_chipid	*chipid;
823 	struct siba_core_id		*cores;
824 	struct bhnd_resource		*r;
825 	device_t			*children;
826 	int				 rid;
827 	int				 error;
828 
829 	cores = NULL;
830 	r = NULL;
831 
832 	chipid = BHND_BUS_GET_CHIPID(dev, dev);
833 
834 	/* Allocate our temporary core and device table */
835 	cores = malloc(sizeof(*cores) * chipid->ncores, M_BHND, M_WAITOK);
836 	children = malloc(sizeof(*children) * chipid->ncores, M_BHND,
837 	    M_WAITOK | M_ZERO);
838 
839 	/*
840 	 * Add child devices for all discovered cores.
841 	 *
842 	 * On bridged devices, we'll exhaust our available register windows if
843 	 * we map config blocks on unpopulated/disabled cores. To avoid this, we
844 	 * defer mapping of the per-core siba(4) config blocks until all cores
845 	 * have been enumerated and otherwise configured.
846 	 */
847 	for (u_int i = 0; i < chipid->ncores; i++) {
848 		struct siba_devinfo	*dinfo;
849 		uint32_t		 idhigh, idlow;
850 		rman_res_t		 r_count, r_end, r_start;
851 
852 		/* Map the core's register block */
853 		rid = 0;
854 		r_start = SIBA_CORE_ADDR(i);
855 		r_count = SIBA_CORE_SIZE;
856 		r_end = r_start + SIBA_CORE_SIZE - 1;
857 		r = bhnd_alloc_resource(dev, SYS_RES_MEMORY, &rid, r_start,
858 		    r_end, r_count, RF_ACTIVE);
859 		if (r == NULL) {
860 			error = ENXIO;
861 			goto failed;
862 		}
863 
864 		/* Read the core info */
865 		idhigh = bhnd_bus_read_4(r, SB0_REG_ABS(SIBA_CFG0_IDHIGH));
866 		idlow = bhnd_bus_read_4(r, SB0_REG_ABS(SIBA_CFG0_IDLOW));
867 
868 		cores[i] = siba_parse_core_id(idhigh, idlow, i, 0);
869 
870 		/* Determine and set unit number */
871 		for (u_int j = 0; j < i; j++) {
872 			struct bhnd_core_info *cur = &cores[i].core_info;
873 			struct bhnd_core_info *prev = &cores[j].core_info;
874 
875 			if (prev->vendor == cur->vendor &&
876 			    prev->device == cur->device)
877 				cur->unit++;
878 		}
879 
880 		/* Add the child device */
881 		children[i] = BUS_ADD_CHILD(dev, 0, NULL, -1);
882 		if (children[i] == NULL) {
883 			error = ENXIO;
884 			goto failed;
885 		}
886 
887 		/* Initialize per-device bus info */
888 		if ((dinfo = device_get_ivars(children[i])) == NULL) {
889 			error = ENXIO;
890 			goto failed;
891 		}
892 
893 		if ((error = siba_init_dinfo(dev, dinfo, &cores[i])))
894 			goto failed;
895 
896 		/* Register the core's address space(s). */
897 		if ((error = siba_register_addrspaces(dev, dinfo, r)))
898 			goto failed;
899 
900 		/* Unmap the core's register block */
901 		bhnd_release_resource(dev, SYS_RES_MEMORY, rid, r);
902 		r = NULL;
903 
904 		/* If pins are floating or the hardware is otherwise
905 		 * unpopulated, the device shouldn't be used. */
906 		if (bhnd_is_hw_disabled(children[i]))
907 			device_disable(children[i]);
908 	}
909 
910 	/* Map all valid core's config register blocks and perform interrupt
911 	 * assignment */
912 	for (u_int i = 0; i < chipid->ncores; i++) {
913 		struct siba_devinfo	*dinfo;
914 		device_t		 child;
915 		int			 nintr;
916 
917 		child = children[i];
918 
919 		/* Skip if core is disabled */
920 		if (bhnd_is_hw_disabled(child))
921 			continue;
922 
923 		dinfo = device_get_ivars(child);
924 
925 		/* Map the core's config blocks */
926 		if ((error = siba_map_cfg_resources(dev, dinfo)))
927 			goto failed;
928 
929 		/* Assign interrupts */
930 		nintr = bhnd_get_intr_count(child);
931 		for (int rid = 0; rid < nintr; rid++) {
932 			error = BHND_BUS_ASSIGN_INTR(dev, child, rid);
933 			if (error) {
934 				device_printf(dev, "failed to assign interrupt "
935 				    "%d to core %u: %d\n", rid, i, error);
936 			}
937 		}
938 
939 		/* Issue bus callback for fully initialized child. */
940 		BHND_BUS_CHILD_ADDED(dev, child);
941 	}
942 
943 	free(cores, M_BHND);
944 	free(children, M_BHND);
945 
946 	return (0);
947 
948 failed:
949 	for (u_int i = 0; i < chipid->ncores; i++) {
950 		if (children[i] == NULL)
951 			continue;
952 
953 		device_delete_child(dev, children[i]);
954 	}
955 
956 	free(cores, M_BHND);
957 	free(children, M_BHND);
958 
959 	if (r != NULL)
960 		bhnd_release_resource(dev, SYS_RES_MEMORY, rid, r);
961 
962 	return (error);
963 }
964 
965 static device_method_t siba_methods[] = {
966 	/* Device interface */
967 	DEVMETHOD(device_probe,			siba_probe),
968 	DEVMETHOD(device_attach,		siba_attach),
969 	DEVMETHOD(device_detach,		siba_detach),
970 	DEVMETHOD(device_resume,		siba_resume),
971 	DEVMETHOD(device_suspend,		siba_suspend),
972 
973 	/* Bus interface */
974 	DEVMETHOD(bus_add_child,		siba_add_child),
975 	DEVMETHOD(bus_child_deleted,		siba_child_deleted),
976 	DEVMETHOD(bus_read_ivar,		siba_read_ivar),
977 	DEVMETHOD(bus_write_ivar,		siba_write_ivar),
978 	DEVMETHOD(bus_get_resource_list,	siba_get_resource_list),
979 
980 	/* BHND interface */
981 	DEVMETHOD(bhnd_bus_get_erom_class,	siba_get_erom_class),
982 	DEVMETHOD(bhnd_bus_read_ioctl,		siba_read_ioctl),
983 	DEVMETHOD(bhnd_bus_write_ioctl,		siba_write_ioctl),
984 	DEVMETHOD(bhnd_bus_read_iost,		siba_read_iost),
985 	DEVMETHOD(bhnd_bus_is_hw_suspended,	siba_is_hw_suspended),
986 	DEVMETHOD(bhnd_bus_reset_hw,		siba_reset_hw),
987 	DEVMETHOD(bhnd_bus_suspend_hw,		siba_suspend_hw),
988 	DEVMETHOD(bhnd_bus_read_config,		siba_read_config),
989 	DEVMETHOD(bhnd_bus_write_config,	siba_write_config),
990 	DEVMETHOD(bhnd_bus_get_port_count,	siba_get_port_count),
991 	DEVMETHOD(bhnd_bus_get_region_count,	siba_get_region_count),
992 	DEVMETHOD(bhnd_bus_get_port_rid,	siba_get_port_rid),
993 	DEVMETHOD(bhnd_bus_decode_port_rid,	siba_decode_port_rid),
994 	DEVMETHOD(bhnd_bus_get_region_addr,	siba_get_region_addr),
995 	DEVMETHOD(bhnd_bus_get_intr_count,	siba_get_intr_count),
996 	DEVMETHOD(bhnd_bus_get_core_ivec,	siba_get_core_ivec),
997 
998 	DEVMETHOD_END
999 };
1000 
1001 DEFINE_CLASS_1(bhnd, siba_driver, siba_methods, sizeof(struct siba_softc), bhnd_driver);
1002 
1003 MODULE_VERSION(siba, 1);
1004 MODULE_DEPEND(siba, bhnd, 1, 1, 1);
1005