xref: /freebsd/sys/dev/bhnd/bhndb/bhndb_subr.c (revision ecf4106237505fa9459ae871793b754334989c17)
1 /*-
2  * Copyright (c) 2015-2016 Landon Fuller <landon@landonf.org>
3  * Copyright (c) 2017 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Landon Fuller
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer,
14  *    without modification.
15  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
16  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
17  *    redistribution must be conditioned upon including a substantially
18  *    similar Disclaimer requirement for further binary redistribution.
19  *
20  * NO WARRANTY
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
24  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
25  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
26  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
29  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGES.
32  */
33 
34 #include <sys/param.h>
35 #include <sys/kernel.h>
36 #include <sys/limits.h>
37 
38 #include "bhndb_private.h"
39 #include "bhndbvar.h"
40 
41 static int	bhndb_dma_tag_create(device_t dev, bus_dma_tag_t parent_dmat,
42 		    const struct bhnd_dma_translation *translation,
43 		    bus_dma_tag_t *dmat);
44 
45 /**
46  * Attach a BHND bridge device to @p parent.
47  *
48  * @param parent A parent PCI device.
49  * @param[out] bhndb On success, the probed and attached bhndb bridge device.
50  * @param unit The device unit number, or -1 to select the next available unit
51  * number.
52  *
53  * @retval 0 success
54  * @retval non-zero Failed to attach the bhndb device.
55  */
56 int
57 bhndb_attach_bridge(device_t parent, device_t *bhndb, int unit)
58 {
59 	int error;
60 
61 	*bhndb = device_add_child(parent, "bhndb", unit);
62 	if (*bhndb == NULL)
63 		return (ENXIO);
64 
65 	if (!(error = device_probe_and_attach(*bhndb)))
66 		return (0);
67 
68 	if ((device_delete_child(parent, *bhndb)))
69 		device_printf(parent, "failed to detach bhndb child\n");
70 
71 	return (error);
72 }
73 
74 /*
75  * Call BHNDB_SUSPEND_RESOURCE() for all resources in @p rl.
76  */
77 static void
78 bhndb_do_suspend_resources(device_t dev, struct resource_list *rl)
79 {
80 	struct resource_list_entry *rle;
81 
82 	/* Suspend all child resources. */
83 	STAILQ_FOREACH(rle, rl, link) {
84 		/* Skip non-allocated resources */
85 		if (rle->res == NULL)
86 			continue;
87 
88 		BHNDB_SUSPEND_RESOURCE(device_get_parent(dev), dev, rle->type,
89 		    rle->res);
90 	}
91 }
92 
93 /**
94  * Helper function for implementing BUS_RESUME_CHILD() on bridged
95  * bhnd(4) buses.
96  *
97  * This implementation of BUS_RESUME_CHILD() uses BUS_GET_RESOURCE_LIST()
98  * to find the child's resources and call BHNDB_SUSPEND_RESOURCE() for all
99  * child resources, ensuring that the device's allocated bridge resources
100  * will be available to other devices during bus resumption.
101  *
102  * Before suspending any resources, @p child is suspended by
103  * calling bhnd_generic_suspend_child().
104  *
105  * If @p child is not a direct child of @p dev, suspension is delegated to
106  * the @p dev parent.
107  */
108 int
109 bhnd_generic_br_suspend_child(device_t dev, device_t child)
110 {
111 	struct resource_list		*rl;
112 	int				 error;
113 
114 	if (device_get_parent(child) != dev)
115 		BUS_SUSPEND_CHILD(device_get_parent(dev), child);
116 
117 	if (device_is_suspended(child))
118 		return (EBUSY);
119 
120 	/* Suspend the child device */
121 	if ((error = bhnd_generic_suspend_child(dev, child)))
122 		return (error);
123 
124 	/* Fetch the resource list. If none, there's nothing else to do */
125 	rl = BUS_GET_RESOURCE_LIST(device_get_parent(child), child);
126 	if (rl == NULL)
127 		return (0);
128 
129 	/* Suspend all child resources. */
130 	bhndb_do_suspend_resources(dev, rl);
131 
132 	return (0);
133 }
134 
135 /**
136  * Helper function for implementing BUS_RESUME_CHILD() on bridged
137  * bhnd(4) bus devices.
138  *
139  * This implementation of BUS_RESUME_CHILD() uses BUS_GET_RESOURCE_LIST()
140  * to find the child's resources and call BHNDB_RESUME_RESOURCE() for all
141  * child resources, before delegating to bhnd_generic_resume_child().
142  *
143  * If resource resumption fails, @p child will not be resumed.
144  *
145  * If @p child is not a direct child of @p dev, suspension is delegated to
146  * the @p dev parent.
147  */
148 int
149 bhnd_generic_br_resume_child(device_t dev, device_t child)
150 {
151 	struct resource_list		*rl;
152 	struct resource_list_entry	*rle;
153 	int				 error;
154 
155 	if (device_get_parent(child) != dev)
156 		BUS_RESUME_CHILD(device_get_parent(dev), child);
157 
158 	if (!device_is_suspended(child))
159 		return (EBUSY);
160 
161 	/* Fetch the resource list. If none, there's nothing else to do */
162 	rl = BUS_GET_RESOURCE_LIST(device_get_parent(child), child);
163 	if (rl == NULL)
164 		return (bhnd_generic_resume_child(dev, child));
165 
166 	/* Resume all resources */
167 	STAILQ_FOREACH(rle, rl, link) {
168 		/* Skip non-allocated resources */
169 		if (rle->res == NULL)
170 			continue;
171 
172 		error = BHNDB_RESUME_RESOURCE(device_get_parent(dev), dev,
173 		    rle->type, rle->res);
174 		if (error) {
175 			/* Put all resources back into a suspend state */
176 			bhndb_do_suspend_resources(dev, rl);
177 			return (error);
178 		}
179 	}
180 
181 	/* Now that all resources are resumed, resume child */
182 	if ((error = bhnd_generic_resume_child(dev, child))) {
183 		/* Put all resources back into a suspend state */
184 		bhndb_do_suspend_resources(dev, rl);
185 	}
186 
187 	return (error);
188 }
189 
190 /**
191  * Find a host resource of @p type that maps the given range.
192  *
193  * @param hr The resource state to search.
194  * @param type The resource type to search for (see SYS_RES_*).
195  * @param start The start address of the range to search for.
196  * @param count The size of the range to search for.
197  *
198  * @retval resource the host resource containing the requested range.
199  * @retval NULL if no resource containing the requested range can be found.
200  */
201 struct resource *
202 bhndb_host_resource_for_range(struct bhndb_host_resources *hr, int type,
203     rman_res_t start, rman_res_t count)
204 {
205 	for (u_int i = 0; hr->resource_specs[i].type != -1; i++) {
206 		struct resource *r = hr->resources[i];
207 
208 		if (hr->resource_specs[i].type != type)
209 			continue;
210 
211 		/* Verify range */
212 		if (rman_get_start(r) > start)
213 			continue;
214 
215 		if (rman_get_end(r) < (start + count - 1))
216 			continue;
217 
218 		return (r);
219 	}
220 
221 	return (NULL);
222 }
223 
224 /**
225  * Find a host resource of that matches the given register window definition.
226  *
227  * @param hr The resource state to search.
228  * @param win A register window definition.
229  *
230  * @retval resource the host resource corresponding to @p win.
231  * @retval NULL if no resource corresponding to @p win can be found.
232  */
233 struct resource *
234 bhndb_host_resource_for_regwin(struct bhndb_host_resources *hr,
235     const struct bhndb_regwin *win)
236 {
237 	const struct resource_spec *rspecs;
238 
239 	rspecs = hr->resource_specs;
240 	for (u_int i = 0; rspecs[i].type != -1; i++) {
241 		if (win->res.type != rspecs[i].type)
242 			continue;
243 
244 		if (win->res.rid != rspecs[i].rid)
245 			continue;
246 
247 		/* Found declared resource */
248 		return (hr->resources[i]);
249 	}
250 
251 	device_printf(hr->owner, "missing regwin resource spec "
252 	    "(type=%d, rid=%d)\n", win->res.type, win->res.rid);
253 
254 	return (NULL);
255 }
256 
257 /**
258  * Allocate and initialize a new resource state structure.
259  *
260  * @param dev The bridge device.
261  * @param parent_dev The parent device from which host resources should be
262  * allocated.
263  * @param cfg The hardware configuration to be used.
264  */
265 struct bhndb_resources *
266 bhndb_alloc_resources(device_t dev, device_t parent_dev,
267     const struct bhndb_hwcfg *cfg)
268 {
269 	struct bhndb_resources		*r;
270 	const struct bhndb_regwin	*win;
271 	bus_size_t			 last_window_size;
272 	int				 rnid;
273 	int				 error;
274 	bool				 free_ht_mem, free_br_mem, free_br_irq;
275 
276 	free_ht_mem = false;
277 	free_br_mem = false;
278 	free_br_irq = false;
279 
280 	r = malloc(sizeof(*r), M_BHND, M_NOWAIT|M_ZERO);
281 	if (r == NULL)
282 		return (NULL);
283 
284 	/* Basic initialization */
285 	r->dev = dev;
286 	r->cfg = cfg;
287 	r->res = NULL;
288 	r->min_prio = BHNDB_PRIORITY_NONE;
289 	STAILQ_INIT(&r->bus_regions);
290 	STAILQ_INIT(&r->bus_intrs);
291 
292 	mtx_init(&r->dw_steal_mtx, device_get_nameunit(dev),
293 	    "bhndb dwa_steal lock", MTX_SPIN);
294 
295 	/* Initialize host address space resource manager. */
296 	r->ht_mem_rman.rm_start = 0;
297 	r->ht_mem_rman.rm_end = ~0;
298 	r->ht_mem_rman.rm_type = RMAN_ARRAY;
299 	r->ht_mem_rman.rm_descr = "BHNDB host memory";
300 	if ((error = rman_init(&r->ht_mem_rman))) {
301 		device_printf(r->dev, "could not initialize ht_mem_rman\n");
302 		goto failed;
303 	}
304 	free_ht_mem = true;
305 
306 	/* Initialize resource manager for the bridged address space. */
307 	r->br_mem_rman.rm_start = 0;
308 	r->br_mem_rman.rm_end = BUS_SPACE_MAXADDR_32BIT;
309 	r->br_mem_rman.rm_type = RMAN_ARRAY;
310 	r->br_mem_rman.rm_descr = "BHNDB bridged memory";
311 
312 	if ((error = rman_init(&r->br_mem_rman))) {
313 		device_printf(r->dev, "could not initialize br_mem_rman\n");
314 		goto failed;
315 	}
316 	free_br_mem = true;
317 
318 	error = rman_manage_region(&r->br_mem_rman, 0, BUS_SPACE_MAXADDR_32BIT);
319 	if (error) {
320 		device_printf(r->dev, "could not configure br_mem_rman\n");
321 		goto failed;
322 	}
323 
324 	/* Initialize resource manager for the bridged interrupt controller. */
325 	r->br_irq_rman.rm_start = 0;
326 	r->br_irq_rman.rm_end = RM_MAX_END;
327 	r->br_irq_rman.rm_type = RMAN_ARRAY;
328 	r->br_irq_rman.rm_descr = "BHNDB bridged interrupts";
329 
330 	if ((error = rman_init(&r->br_irq_rman))) {
331 		device_printf(r->dev, "could not initialize br_irq_rman\n");
332 		goto failed;
333 	}
334 	free_br_irq = true;
335 
336 	error = rman_manage_region(&r->br_irq_rman, 0, RM_MAX_END);
337 	if (error) {
338 		device_printf(r->dev, "could not configure br_irq_rman\n");
339 		goto failed;
340 	}
341 
342 	/* Fetch the dynamic regwin count and verify that it does not exceed
343 	 * what is representable via our freelist bitstring. */
344 	r->dwa_count = bhndb_regwin_count(cfg->register_windows,
345 	    BHNDB_REGWIN_T_DYN);
346 	if (r->dwa_count >= INT_MAX) {
347 		device_printf(r->dev, "max dynamic regwin count exceeded\n");
348 		goto failed;
349 	}
350 
351 	/* Allocate the dynamic window allocation table. */
352 	r->dw_alloc = malloc(sizeof(r->dw_alloc[0]) * r->dwa_count, M_BHND,
353 	    M_NOWAIT);
354 	if (r->dw_alloc == NULL)
355 		goto failed;
356 
357 	/* Allocate the dynamic window allocation freelist */
358 	r->dwa_freelist = bit_alloc(r->dwa_count, M_BHND, M_NOWAIT);
359 	if (r->dwa_freelist == NULL)
360 		goto failed;
361 
362 	/* Initialize the dynamic window table */
363 	rnid = 0;
364 	last_window_size = 0;
365 	for (win = cfg->register_windows;
366 	    win->win_type != BHNDB_REGWIN_T_INVALID; win++)
367 	{
368 		struct bhndb_dw_alloc *dwa;
369 
370 		/* Skip non-DYN windows */
371 		if (win->win_type != BHNDB_REGWIN_T_DYN)
372 			continue;
373 
374 		/* Validate the window size */
375 		if (win->win_size == 0) {
376 			device_printf(r->dev, "ignoring zero-length dynamic "
377 			    "register window\n");
378 			continue;
379 		} else if (last_window_size == 0) {
380 			last_window_size = win->win_size;
381 		} else if (last_window_size != win->win_size) {
382 			/*
383 			 * No existing hardware should trigger this.
384 			 *
385 			 * If you run into this in the future, the dynamic
386 			 * window allocator and the resource priority system
387 			 * will need to be extended to support multiple register
388 			 * window allocation pools.
389 			 */
390 			device_printf(r->dev, "devices that vend multiple "
391 			    "dynamic register window sizes are not currently "
392 			    "supported\n");
393 			goto failed;
394 		}
395 
396 		dwa = &r->dw_alloc[rnid];
397 		dwa->win = win;
398 		dwa->parent_res = NULL;
399 		dwa->rnid = rnid;
400 		dwa->target = 0x0;
401 
402 		LIST_INIT(&dwa->refs);
403 		rnid++;
404 	}
405 
406 	/* Allocate host resources */
407 	error = bhndb_alloc_host_resources(&r->res, dev, parent_dev, r->cfg);
408 	if (error) {
409 		device_printf(r->dev,
410 		    "could not allocate host resources on %s: %d\n",
411 		    device_get_nameunit(parent_dev), error);
412 		goto failed;
413 	}
414 
415 	/* Populate (and validate) parent resource references for all
416 	 * dynamic windows */
417 	for (size_t i = 0; i < r->dwa_count; i++) {
418 		struct bhndb_dw_alloc		*dwa;
419 		const struct bhndb_regwin	*win;
420 
421 		dwa = &r->dw_alloc[i];
422 		win = dwa->win;
423 
424 		/* Find and validate corresponding resource. */
425 		dwa->parent_res = bhndb_host_resource_for_regwin(r->res, win);
426 		if (dwa->parent_res == NULL) {
427 			device_printf(r->dev, "no host resource found for %u "
428 			    "register window with offset %#jx and "
429 			    "size %#jx\n",
430 			    win->win_type,
431 			    (uintmax_t)win->win_offset,
432 			    (uintmax_t)win->win_size);
433 
434 			error = ENXIO;
435 			goto failed;
436 		}
437 
438 		if (rman_get_size(dwa->parent_res) < win->win_offset +
439 		    win->win_size)
440 		{
441 			device_printf(r->dev, "resource %d too small for "
442 			    "register window with offset %llx and size %llx\n",
443 			    rman_get_rid(dwa->parent_res),
444 			    (unsigned long long) win->win_offset,
445 			    (unsigned long long) win->win_size);
446 
447 			error = EINVAL;
448 			goto failed;
449 		}
450 	}
451 
452 	/* Add allocated memory resources to our host memory resource manager */
453 	for (u_int i = 0; r->res->resource_specs[i].type != -1; i++) {
454 		struct resource *res;
455 
456 		/* skip non-memory resources */
457 		if (r->res->resource_specs[i].type != SYS_RES_MEMORY)
458 			continue;
459 
460 		/* add host resource to set of managed regions */
461 		res = r->res->resources[i];
462 		error = rman_manage_region(&r->ht_mem_rman,
463 		    rman_get_start(res), rman_get_end(res));
464 		if (error) {
465 			device_printf(r->dev,
466 			    "could not register host memory region with "
467 			    "ht_mem_rman: %d\n", error);
468 			goto failed;
469 		}
470 	}
471 
472 	return (r);
473 
474 failed:
475 	if (free_ht_mem)
476 		rman_fini(&r->ht_mem_rman);
477 
478 	if (free_br_mem)
479 		rman_fini(&r->br_mem_rman);
480 
481 	if (free_br_irq)
482 		rman_fini(&r->br_irq_rman);
483 
484 	if (r->dw_alloc != NULL)
485 		free(r->dw_alloc, M_BHND);
486 
487 	if (r->dwa_freelist != NULL)
488 		free(r->dwa_freelist, M_BHND);
489 
490 	if (r->res != NULL)
491 		bhndb_release_host_resources(r->res);
492 
493 	mtx_destroy(&r->dw_steal_mtx);
494 
495 	free(r, M_BHND);
496 
497 	return (NULL);
498 }
499 
500 /**
501  * Create a new DMA tag for the given @p translation.
502  *
503  * @param	dev		The bridge device.
504  * @param	parent_dmat	The parent DMA tag, or NULL if none.
505  * @param	translation	The DMA translation for which a DMA tag will
506  *				be created.
507  * @param[out]	dmat		On success, the newly created DMA tag.
508  *
509  * @retval 0		success
510  * @retval non-zero	if creating the new DMA tag otherwise fails, a regular
511  *			unix error code will be returned.
512  */
513 static int
514 bhndb_dma_tag_create(device_t dev, bus_dma_tag_t parent_dmat,
515     const struct bhnd_dma_translation *translation, bus_dma_tag_t *dmat)
516 {
517 	bus_dma_tag_t	translation_tag;
518 	bhnd_addr_t	dt_mask;
519 	bus_addr_t	lowaddr, highaddr;
520 	bus_size_t	maxsegsz;
521 	int		error;
522 
523 	highaddr = BUS_SPACE_MAXADDR;
524 	maxsegsz = BUS_SPACE_MAXSIZE;
525 
526 	/* Determine full addressable mask */
527 	dt_mask = (translation->addr_mask | translation->addrext_mask);
528 	KASSERT(dt_mask != 0, ("DMA addr_mask invalid: %#jx",
529 		(uintmax_t)dt_mask));
530 
531 	/* (addr_mask|addrext_mask) is our maximum supported address */
532 	lowaddr = MIN(dt_mask, BUS_SPACE_MAXADDR);
533 
534 	/* Constrain to translation window size */
535 	if (translation->addr_mask < maxsegsz)
536 		maxsegsz = translation->addr_mask;
537 
538 	/* Create our DMA tag */
539 	error = bus_dma_tag_create(parent_dmat,
540 	    1, 0,			/* alignment, boundary */
541 	    lowaddr, highaddr,
542 	    NULL, NULL,			/* filter, filterarg */
543 	    BUS_SPACE_MAXSIZE, 0,	/* maxsize, nsegments */
544 	    maxsegsz, 0,		/* maxsegsize, flags */
545 	    NULL, NULL,			/* lockfunc, lockarg */
546 	    &translation_tag);
547 	if (error) {
548 		device_printf(dev, "failed to create bridge DMA tag: %d\n",
549 		    error);
550 		return (error);
551 	}
552 
553 	*dmat = translation_tag;
554 	return (0);
555 }
556 
557 /**
558  * Deallocate the given bridge resource structure and any associated resources.
559  *
560  * @param br Resource state to be deallocated.
561  */
562 void
563 bhndb_free_resources(struct bhndb_resources *br)
564 {
565 	struct bhndb_region		*region, *r_next;
566 	struct bhndb_dw_alloc		*dwa;
567 	struct bhndb_dw_rentry		*dwr, *dwr_next;
568 	struct bhndb_intr_handler	*ih;
569 	bool				 leaked_regions, leaked_intrs;
570 
571 	leaked_regions = false;
572 	leaked_intrs = false;
573 
574 	/* No window regions may still be held */
575 	if (!bhndb_dw_all_free(br)) {
576 		for (int i = 0; i < br->dwa_count; i++) {
577 			dwa = &br->dw_alloc[i];
578 
579 			/* Skip free dynamic windows */
580 			if (bhndb_dw_is_free(br, dwa))
581 				continue;
582 
583 			device_printf(br->dev,
584 			    "leaked dynamic register window %d\n", dwa->rnid);
585 			leaked_regions = true;
586 		}
587 	}
588 
589 	/* There should be no interrupt handlers still registered */
590 	STAILQ_FOREACH(ih, &br->bus_intrs, ih_link) {
591 		device_printf(br->dev, "interrupt handler leaked %p\n",
592 		    ih->ih_cookiep);
593 	}
594 
595 	if (leaked_intrs || leaked_regions) {
596 		panic("leaked%s%s", leaked_intrs ? " active interrupts" : "",
597 		    leaked_regions ? " active register windows" : "");
598 	}
599 
600 	/* Release host resources allocated through our parent. */
601 	if (br->res != NULL)
602 		bhndb_release_host_resources(br->res);
603 
604 	/* Clean up resource reservations */
605 	for (size_t i = 0; i < br->dwa_count; i++) {
606 		dwa = &br->dw_alloc[i];
607 
608 		LIST_FOREACH_SAFE(dwr, &dwa->refs, dw_link, dwr_next) {
609 			LIST_REMOVE(dwr, dw_link);
610 			free(dwr, M_BHND);
611 		}
612 	}
613 
614 	/* Release bus regions */
615 	STAILQ_FOREACH_SAFE(region, &br->bus_regions, link, r_next) {
616 		STAILQ_REMOVE(&br->bus_regions, region, bhndb_region, link);
617 		free(region, M_BHND);
618 	}
619 
620 	/* Release our resource managers */
621 	rman_fini(&br->ht_mem_rman);
622 	rman_fini(&br->br_mem_rman);
623 	rman_fini(&br->br_irq_rman);
624 
625 	free(br->dw_alloc, M_BHND);
626 	free(br->dwa_freelist, M_BHND);
627 
628 	mtx_destroy(&br->dw_steal_mtx);
629 
630 	free(br, M_BHND);
631 }
632 
633 /**
634  * Allocate host bus resources defined by @p hwcfg.
635  *
636  * On success, the caller assumes ownership of the allocated host resources,
637  * which must be freed via bhndb_release_host_resources().
638  *
639  * @param[out]	resources	On success, the allocated host resources.
640  * @param	dev		The bridge device.
641  * @param	parent_dev	The parent device from which host resources
642  *				should be allocated (e.g. via
643  *				bus_alloc_resources()).
644  * @param	hwcfg		The hardware configuration defining the host
645  *				resources to be allocated
646  */
647 int
648 bhndb_alloc_host_resources(struct bhndb_host_resources **resources,
649     device_t dev, device_t parent_dev, const struct bhndb_hwcfg *hwcfg)
650 {
651 	struct bhndb_host_resources		*hr;
652 	const struct bhnd_dma_translation	*dt;
653 	bus_dma_tag_t				 parent_dmat;
654 	size_t					 nres, ndt;
655 	int					 error;
656 
657 	parent_dmat = bus_get_dma_tag(parent_dev);
658 
659 	hr = malloc(sizeof(*hr), M_BHND, M_WAITOK);
660 	hr->owner = parent_dev;
661 	hr->cfg = hwcfg;
662 	hr->resource_specs = NULL;
663 	hr->resources = NULL;
664 	hr->dma_tags = NULL;
665 	hr->num_dma_tags = 0;
666 
667 	/* Determine our bridge resource count from the hardware config. */
668 	nres = 0;
669 	for (size_t i = 0; hwcfg->resource_specs[i].type != -1; i++)
670 		nres++;
671 
672 	/* Determine the total count and validate our DMA translation table. */
673 	ndt = 0;
674 	for (dt = hwcfg->dma_translations; dt != NULL &&
675 	    !BHND_DMA_IS_TRANSLATION_TABLE_END(dt); dt++)
676 	{
677 		/* Validate the defined translation */
678 		if ((dt->base_addr & dt->addr_mask) != 0) {
679 			device_printf(dev, "invalid DMA translation; base "
680 			    "address %#jx overlaps address mask %#jx",
681 			    (uintmax_t)dt->base_addr, (uintmax_t)dt->addr_mask);
682 
683 			error = EINVAL;
684 			goto failed;
685 		}
686 
687 		if ((dt->addrext_mask & dt->addr_mask) != 0) {
688 			device_printf(dev, "invalid DMA translation; addrext "
689 			    "mask %#jx overlaps address mask %#jx",
690 			    (uintmax_t)dt->addrext_mask,
691 			    (uintmax_t)dt->addr_mask);
692 
693 			error = EINVAL;
694 			goto failed;
695 		}
696 
697 		/* Increment our entry count */
698 		ndt++;
699 	}
700 
701 	/* Allocate our DMA tags */
702 	hr->dma_tags = malloc(sizeof(*hr->dma_tags) * ndt, M_BHND,
703 	    M_WAITOK|M_ZERO);
704 	for (size_t i = 0; i < ndt; i++) {
705 		error = bhndb_dma_tag_create(dev, parent_dmat,
706 		    &hwcfg->dma_translations[i], &hr->dma_tags[i]);
707 		if (error)
708 			goto failed;
709 
710 		hr->num_dma_tags++;
711 	}
712 
713 	/* Allocate space for a non-const copy of our resource_spec
714 	 * table; this will be updated with the RIDs assigned by
715 	 * bus_alloc_resources. */
716 	hr->resource_specs = malloc(sizeof(hr->resource_specs[0]) * (nres + 1),
717 	    M_BHND, M_WAITOK);
718 
719 	/* Initialize and terminate the table */
720 	for (size_t i = 0; i < nres; i++)
721 		hr->resource_specs[i] = hwcfg->resource_specs[i];
722 
723 	hr->resource_specs[nres].type = -1;
724 
725 	/* Allocate space for our resource references */
726 	hr->resources = malloc(sizeof(hr->resources[0]) * nres, M_BHND,
727 	    M_WAITOK);
728 
729 	/* Allocate host resources */
730 	error = bus_alloc_resources(hr->owner, hr->resource_specs,
731 	    hr->resources);
732 	if (error) {
733 		device_printf(dev, "could not allocate bridge resources via "
734 		    "%s: %d\n", device_get_nameunit(parent_dev), error);
735 		goto failed;
736 	}
737 
738 	*resources = hr;
739 	return (0);
740 
741 failed:
742 	if (hr->resource_specs != NULL)
743 		free(hr->resource_specs, M_BHND);
744 
745 	if (hr->resources != NULL)
746 		free(hr->resources, M_BHND);
747 
748 	for (size_t i = 0; i < hr->num_dma_tags; i++)
749 		bus_dma_tag_destroy(hr->dma_tags[i]);
750 
751 	if (hr->dma_tags != NULL)
752 		free(hr->dma_tags, M_BHND);
753 
754 	free(hr, M_BHND);
755 
756 	return (error);
757 }
758 
759 /**
760  * Deallocate a set of bridge host resources.
761  *
762  * @param hr The resources to be freed.
763  */
764 void
765 bhndb_release_host_resources(struct bhndb_host_resources *hr)
766 {
767 	bus_release_resources(hr->owner, hr->resource_specs, hr->resources);
768 
769 	for (size_t i = 0; i < hr->num_dma_tags; i++)
770 		bus_dma_tag_destroy(hr->dma_tags[i]);
771 
772 	free(hr->resources, M_BHND);
773 	free(hr->resource_specs, M_BHND);
774 	free(hr->dma_tags, M_BHND);
775 	free(hr, M_BHND);
776 }
777 
778 /**
779  * Search @p cores for the core serving as the bhnd host bridge.
780  *
781  * This function uses a heuristic valid on all known PCI/PCIe/PCMCIA-bridged
782  * bhnd(4) devices to determine the hostb core:
783  *
784  * - The core must have a Broadcom vendor ID.
785  * - The core devclass must match the bridge type.
786  * - The core must be the first device on the bus with the bridged device
787  *   class.
788  *
789  * @param	cores		The core table to search.
790  * @param	ncores		The number of cores in @p cores.
791  * @param	bridge_devclass	The expected device class of the bridge core.
792  * @param[out]	core		If found, the matching host bridge core info.
793  *
794  * @retval 0		success
795  * @retval ENOENT	not found
796  */
797 int
798 bhndb_find_hostb_core(struct bhnd_core_info *cores, u_int ncores,
799     bhnd_devclass_t bridge_devclass, struct bhnd_core_info *core)
800 {
801 	struct bhnd_core_match	 md;
802 	struct bhnd_core_info	*match;
803 	u_int			 match_core_idx;
804 
805 	/* Set up a match descriptor for the required device class. */
806 	md = (struct bhnd_core_match) {
807 		BHND_MATCH_CORE_CLASS(bridge_devclass),
808 		BHND_MATCH_CORE_UNIT(0)
809 	};
810 
811 	/* Find the matching core with the lowest core index */
812 	match = NULL;
813 	match_core_idx = UINT_MAX;
814 
815 	for (u_int i = 0; i < ncores; i++) {
816 		if (!bhnd_core_matches(&cores[i], &md))
817 			continue;
818 
819 		/* Lower core indices take precedence */
820 		if (match != NULL && match_core_idx < match->core_idx)
821 			continue;
822 
823 		match = &cores[i];
824 		match_core_idx = match->core_idx;
825 	}
826 
827 	if (match == NULL)
828 		return (ENOENT);
829 
830 	*core = *match;
831 	return (0);
832 }
833 
834 /**
835  * Allocate a host interrupt source and its backing SYS_RES_IRQ host resource.
836  *
837  * @param owner	The device to be used to allocate a SYS_RES_IRQ
838  *		resource with @p rid.
839  * @param rid	The resource ID of the IRQ to be allocated.
840  * @param start	The start value to be passed to bus_alloc_resource().
841  * @param end	The end value to be passed to bus_alloc_resource().
842  * @param count	The count to be passed to bus_alloc_resource().
843  * @param flags	The flags to be passed to bus_alloc_resource().
844  *
845  * @retval non-NULL	success
846  * @retval NULL		if allocation fails.
847  */
848 struct bhndb_intr_isrc *
849 bhndb_alloc_intr_isrc(device_t owner, int rid, rman_res_t start, rman_res_t end,
850     rman_res_t count, u_int flags)
851 {
852 	struct bhndb_intr_isrc *isrc;
853 
854 	isrc = malloc(sizeof(*isrc), M_BHND, M_NOWAIT);
855 	if (isrc == NULL)
856 		return (NULL);
857 
858 	isrc->is_owner = owner;
859 	isrc->is_rid = rid;
860 	isrc->is_res = bus_alloc_resource(owner, SYS_RES_IRQ, &isrc->is_rid,
861 	    start, end, count, flags);
862 	if (isrc->is_res == NULL) {
863 		free(isrc, M_BHND);
864 		return (NULL);
865 	}
866 
867 	return (isrc);
868 }
869 
870 /**
871  * Free a host interrupt source and its backing host resource.
872  *
873  * @param isrc	The interrupt source to be freed.
874  */
875 void
876 bhndb_free_intr_isrc(struct bhndb_intr_isrc *isrc)
877 {
878 	bus_release_resource(isrc->is_owner, SYS_RES_IRQ, isrc->is_rid,
879 	    isrc->is_res);
880 	free(isrc, M_BHND);
881 }
882 
883 /**
884  * Allocate and initialize a new interrupt handler entry.
885  *
886  * @param owner	The child device that owns this entry.
887  * @param r	The child's interrupt resource.
888  * @param isrc	The isrc mapped for this entry.
889  *
890  * @retval non-NULL	success
891  * @retval NULL		if allocation fails.
892  */
893 struct bhndb_intr_handler *
894 bhndb_alloc_intr_handler(device_t owner, struct resource *r,
895     struct bhndb_intr_isrc *isrc)
896 {
897 	struct bhndb_intr_handler *ih;
898 
899 	ih = malloc(sizeof(*ih), M_BHND, M_NOWAIT | M_ZERO);
900 	ih->ih_owner = owner;
901 	ih->ih_res = r;
902 	ih->ih_isrc = isrc;
903 	ih->ih_cookiep = NULL;
904 	ih->ih_active = false;
905 
906 	return (ih);
907 }
908 
909 /**
910  * Free an interrupt handler entry.
911  *
912  * @param br The resource state owning @p ih.
913  * @param ih The interrupt handler entry to be removed.
914  */
915 void
916 bhndb_free_intr_handler(struct bhndb_intr_handler *ih)
917 {
918 	KASSERT(!ih->ih_active, ("free of active interrupt handler %p",
919 	    ih->ih_cookiep));
920 
921 	free(ih, M_BHND);
922 }
923 
924 /**
925  * Add an active interrupt handler to the given resource state.
926   *
927  * @param br The resource state to be modified.
928  * @param ih The interrupt handler entry to be added.
929  */
930 void
931 bhndb_register_intr_handler(struct bhndb_resources *br,
932     struct bhndb_intr_handler *ih)
933 {
934 	KASSERT(!ih->ih_active, ("duplicate registration of interrupt "
935 	    "handler %p", ih->ih_cookiep));
936 	KASSERT(ih->ih_cookiep != NULL, ("missing cookiep"));
937 
938 	ih->ih_active = true;
939 	STAILQ_INSERT_HEAD(&br->bus_intrs, ih, ih_link);
940 }
941 
942 /**
943  * Remove an interrupt handler from the given resource state.
944  *
945  * @param br The resource state containing @p ih.
946  * @param ih The interrupt handler entry to be removed.
947  */
948 void
949 bhndb_deregister_intr_handler(struct bhndb_resources *br,
950     struct bhndb_intr_handler *ih)
951 {
952 	KASSERT(ih->ih_active, ("duplicate deregistration of interrupt "
953 	    "handler %p", ih->ih_cookiep));
954 
955 	KASSERT(bhndb_find_intr_handler(br, ih) == ih,
956 	    ("unknown interrupt handler %p", ih));
957 
958 	STAILQ_REMOVE(&br->bus_intrs, ih, bhndb_intr_handler, ih_link);
959 	ih->ih_active = false;
960 }
961 
962 /**
963  * Return the interrupt handler entry corresponding to @p cookiep, or NULL
964  * if no entry is found.
965  *
966  * @param br The resource state to search for the given @p cookiep.
967  * @param cookiep The interrupt handler's bus-assigned cookiep value.
968  */
969 struct bhndb_intr_handler *
970 bhndb_find_intr_handler(struct bhndb_resources *br, void *cookiep)
971 {
972 	struct bhndb_intr_handler *ih;
973 
974 	STAILQ_FOREACH(ih, &br->bus_intrs, ih_link) {
975 		if (ih == cookiep)
976 			return (ih);
977 	}
978 
979 	/* Not found */
980 	return (NULL);
981 }
982 
983 /**
984  * Find the maximum start and end limits of the bridged resource @p r.
985  *
986  * If the resource is not currently mapped by the bridge, ENOENT will be
987  * returned.
988  *
989  * @param	br		The resource state to search.
990  * @param	type The resource type (see SYS_RES_*).
991  * @param	r The resource to search for in @p br.
992  * @param[out]	start	On success, the minimum supported start address.
993  * @param[out]	end	On success, the maximum supported end address.
994  *
995  * @retval 0		success
996  * @retval ENOENT	no active mapping found for @p r of @p type
997  */
998 int
999 bhndb_find_resource_limits(struct bhndb_resources *br, int type,
1000     struct resource *r, rman_res_t *start, rman_res_t *end)
1001 {
1002 	struct bhndb_dw_alloc		*dynamic;
1003 	struct bhndb_region		*sregion;
1004 	struct bhndb_intr_handler	*ih;
1005 
1006 	switch (type) {
1007 	case SYS_RES_IRQ:
1008 		/* Is this one of ours? */
1009 		STAILQ_FOREACH(ih, &br->bus_intrs, ih_link) {
1010 			if (ih->ih_res == r)
1011 				continue;
1012 
1013 			/* We don't support adjusting IRQ resource limits */
1014 			*start = rman_get_start(r);
1015 			*end = rman_get_end(r);
1016 			return (0);
1017 		}
1018 
1019 		/* Not found */
1020 		return (ENOENT);
1021 
1022 	case SYS_RES_MEMORY: {
1023 		/* Check for an enclosing dynamic register window */
1024 		if ((dynamic = bhndb_dw_find_resource(br, r))) {
1025 			*start = dynamic->target;
1026 			*end = dynamic->target + dynamic->win->win_size - 1;
1027 			return (0);
1028 		}
1029 
1030 		/* Check for a static region */
1031 		sregion = bhndb_find_resource_region(br, rman_get_start(r),
1032 		rman_get_size(r));
1033 		if (sregion != NULL && sregion->static_regwin != NULL) {
1034 			*start = sregion->addr;
1035 			*end = sregion->addr + sregion->size - 1;
1036 
1037 			return (0);
1038 		}
1039 
1040 		/* Not found */
1041 		return (ENOENT);
1042 	}
1043 
1044 	default:
1045 		device_printf(br->dev, "unknown resource type: %d\n", type);
1046 		return (ENOENT);
1047 	}
1048 }
1049 
1050 /**
1051  * Add a bus region entry to @p r for the given base @p addr and @p size.
1052  *
1053  * @param br The resource state to which the bus region entry will be added.
1054  * @param addr The base address of this region.
1055  * @param size The size of this region.
1056  * @param priority The resource priority to be assigned to allocations
1057  * made within this bus region.
1058  * @param alloc_flags resource allocation flags (@see bhndb_alloc_flags)
1059  * @param static_regwin If available, a static register window mapping this
1060  * bus region entry. If not available, NULL.
1061  *
1062  * @retval 0 success
1063  * @retval non-zero if adding the bus region fails.
1064  */
1065 int
1066 bhndb_add_resource_region(struct bhndb_resources *br, bhnd_addr_t addr,
1067     bhnd_size_t size, bhndb_priority_t priority, uint32_t alloc_flags,
1068     const struct bhndb_regwin *static_regwin)
1069 {
1070 	struct bhndb_region	*reg;
1071 
1072 	/* Insert in the bus resource list */
1073 	reg = malloc(sizeof(*reg), M_BHND, M_NOWAIT);
1074 	if (reg == NULL)
1075 		return (ENOMEM);
1076 
1077 	*reg = (struct bhndb_region) {
1078 		.addr = addr,
1079 		.size = size,
1080 		.priority = priority,
1081 		.alloc_flags = alloc_flags,
1082 		.static_regwin = static_regwin
1083 	};
1084 
1085 	STAILQ_INSERT_HEAD(&br->bus_regions, reg, link);
1086 
1087 	return (0);
1088 }
1089 
1090 /**
1091  * Return true if a mapping of @p size bytes at @p addr is provided by either
1092  * one contiguous bus region, or by multiple discontiguous regions.
1093  *
1094  * @param br The resource state to query.
1095  * @param addr The requested starting address.
1096  * @param size The requested size.
1097  */
1098 bool
1099 bhndb_has_static_region_mapping(struct bhndb_resources *br,
1100     bhnd_addr_t addr, bhnd_size_t size)
1101 {
1102 	struct bhndb_region	*region;
1103 	bhnd_addr_t		 r_addr;
1104 
1105 	r_addr = addr;
1106 	while ((region = bhndb_find_resource_region(br, r_addr, 1)) != NULL) {
1107 		/* Must be backed by a static register window */
1108 		if (region->static_regwin == NULL)
1109 			return (false);
1110 
1111 		/* Adjust the search offset */
1112 		r_addr += region->size;
1113 
1114 		/* Have we traversed a complete (if discontiguous) mapping? */
1115 		if (r_addr == addr + size)
1116 			return (true);
1117 	}
1118 
1119 	/* No complete mapping found */
1120 	return (false);
1121 }
1122 
1123 /**
1124  * Find the bus region that maps @p size bytes at @p addr.
1125  *
1126  * @param br The resource state to search.
1127  * @param addr The requested starting address.
1128  * @param size The requested size.
1129  *
1130  * @retval bhndb_region A region that fully contains the requested range.
1131  * @retval NULL If no mapping region can be found.
1132  */
1133 struct bhndb_region *
1134 bhndb_find_resource_region(struct bhndb_resources *br, bhnd_addr_t addr,
1135     bhnd_size_t size)
1136 {
1137 	struct bhndb_region *region;
1138 
1139 	STAILQ_FOREACH(region, &br->bus_regions, link) {
1140 		/* Request must fit within the region's mapping  */
1141 		if (addr < region->addr)
1142 			continue;
1143 
1144 		if (addr + size > region->addr + region->size)
1145 			continue;
1146 
1147 		return (region);
1148 	}
1149 
1150 	/* Not found */
1151 	return (NULL);
1152 }
1153 
1154 /**
1155  * Find the entry matching @p r in @p dwa's references, if any.
1156  *
1157  * @param dwa The dynamic window allocation to search
1158  * @param r The resource to search for in @p dwa.
1159  */
1160 static struct bhndb_dw_rentry *
1161 bhndb_dw_find_resource_entry(struct bhndb_dw_alloc *dwa, struct resource *r)
1162 {
1163 	struct bhndb_dw_rentry	*rentry;
1164 
1165 	LIST_FOREACH(rentry, &dwa->refs, dw_link) {
1166 		struct resource *dw_res = rentry->dw_res;
1167 
1168 		/* Match dev/rid/addr/size */
1169 		if (rman_get_device(dw_res)	!= rman_get_device(r) ||
1170 			rman_get_rid(dw_res)	!= rman_get_rid(r) ||
1171 			rman_get_start(dw_res)	!= rman_get_start(r) ||
1172 			rman_get_size(dw_res)	!= rman_get_size(r))
1173 		{
1174 			continue;
1175 		}
1176 
1177 		/* Matching allocation found */
1178 		return (rentry);
1179 	}
1180 
1181 	return (NULL);
1182 }
1183 
1184 /**
1185  * Find the dynamic region allocated for @p r, if any.
1186  *
1187  * @param br The resource state to search.
1188  * @param r The resource to search for.
1189  *
1190  * @retval bhndb_dw_alloc The allocation record for @p r.
1191  * @retval NULL if no dynamic window is allocated for @p r.
1192  */
1193 struct bhndb_dw_alloc *
1194 bhndb_dw_find_resource(struct bhndb_resources *br, struct resource *r)
1195 {
1196 	struct bhndb_dw_alloc	*dwa;
1197 
1198 	for (size_t i = 0; i < br->dwa_count; i++) {
1199 		dwa = &br->dw_alloc[i];
1200 
1201 		/* Skip free dynamic windows */
1202 		if (bhndb_dw_is_free(br, dwa))
1203 			continue;
1204 
1205 		/* Matching allocation found? */
1206 		if (bhndb_dw_find_resource_entry(dwa, r) != NULL)
1207 			return (dwa);
1208 	}
1209 
1210 	return (NULL);
1211 }
1212 
1213 /**
1214  * Find an existing dynamic window mapping @p size bytes
1215  * at @p addr. The window may or may not be free.
1216  *
1217  * @param br The resource state to search.
1218  * @param addr The requested starting address.
1219  * @param size The requested size.
1220  *
1221  * @retval bhndb_dw_alloc A window allocation that fully contains the requested
1222  * range.
1223  * @retval NULL If no mapping region can be found.
1224  */
1225 struct bhndb_dw_alloc *
1226 bhndb_dw_find_mapping(struct bhndb_resources *br, bhnd_addr_t addr,
1227     bhnd_size_t size)
1228 {
1229 	struct bhndb_dw_alloc		*dwr;
1230 	const struct bhndb_regwin	*win;
1231 
1232 	/* Search for an existing dynamic mapping of this address range. */
1233 	for (size_t i = 0; i < br->dwa_count; i++) {
1234 		dwr = &br->dw_alloc[i];
1235 		win = dwr->win;
1236 
1237 		/* Verify the range */
1238 		if (addr < dwr->target)
1239 			continue;
1240 
1241 		if (addr + size > dwr->target + win->win_size)
1242 			continue;
1243 
1244 		/* Found a usable mapping */
1245 		return (dwr);
1246 	}
1247 
1248 	/* not found */
1249 	return (NULL);
1250 }
1251 
1252 /**
1253  * Retain a reference to @p dwa for use by @p res.
1254  *
1255  * @param br The resource state owning @p dwa.
1256  * @param dwa The allocation record to be retained.
1257  * @param res The resource that will own a reference to @p dwa.
1258  *
1259  * @retval 0 success
1260  * @retval ENOMEM Failed to allocate a new reference structure.
1261  */
1262 int
1263 bhndb_dw_retain(struct bhndb_resources *br, struct bhndb_dw_alloc *dwa,
1264     struct resource *res)
1265 {
1266 	struct bhndb_dw_rentry *rentry;
1267 
1268 	KASSERT(bhndb_dw_find_resource_entry(dwa, res) == NULL,
1269 	    ("double-retain of dynamic window for same resource"));
1270 
1271 	/* Insert a reference entry; we use M_NOWAIT to allow use from
1272 	 * within a non-sleepable lock */
1273 	rentry = malloc(sizeof(*rentry), M_BHND, M_NOWAIT);
1274 	if (rentry == NULL)
1275 		return (ENOMEM);
1276 
1277 	rentry->dw_res = res;
1278 	LIST_INSERT_HEAD(&dwa->refs, rentry, dw_link);
1279 
1280 	/* Update the free list */
1281 	bit_set(br->dwa_freelist, dwa->rnid);
1282 
1283 	return (0);
1284 }
1285 
1286 /**
1287  * Release a reference to @p dwa previously retained by @p res. If the
1288  * reference count of @p dwa reaches zero, it will be added to the
1289  * free list.
1290  *
1291  * @param br The resource state owning @p dwa.
1292  * @param dwa The allocation record to be released.
1293  * @param res The resource that currently owns a reference to @p dwa.
1294  */
1295 void
1296 bhndb_dw_release(struct bhndb_resources *br, struct bhndb_dw_alloc *dwa,
1297     struct resource *r)
1298 {
1299 	struct bhndb_dw_rentry	*rentry;
1300 
1301 	/* Find the rentry */
1302 	rentry = bhndb_dw_find_resource_entry(dwa, r);
1303 	KASSERT(rentry != NULL, ("over release of resource entry"));
1304 
1305 	LIST_REMOVE(rentry, dw_link);
1306 	free(rentry, M_BHND);
1307 
1308 	/* If this was the last reference, update the free list */
1309 	if (LIST_EMPTY(&dwa->refs))
1310 		bit_clear(br->dwa_freelist, dwa->rnid);
1311 }
1312 
1313 /**
1314  * Attempt to set (or reset) the target address of @p dwa to map @p size bytes
1315  * at @p addr.
1316  *
1317  * This will apply any necessary window alignment and verify that
1318  * the window is capable of mapping the requested range prior to modifying
1319  * therecord.
1320  *
1321  * @param dev The device on which to issue the BHNDB_SET_WINDOW_ADDR() request.
1322  * @param br The resource state owning @p dwa.
1323  * @param dwa The allocation record to be configured.
1324  * @param addr The address to be mapped via @p dwa.
1325  * @param size The number of bytes to be mapped at @p addr.
1326  *
1327  * @retval 0 success
1328  * @retval non-zero no usable register window available.
1329  */
1330 int
1331 bhndb_dw_set_addr(device_t dev, struct bhndb_resources *br,
1332     struct bhndb_dw_alloc *dwa, bus_addr_t addr, bus_size_t size)
1333 {
1334 	const struct bhndb_regwin	*rw;
1335 	bus_addr_t			 offset;
1336 	int				 error;
1337 
1338 	rw = dwa->win;
1339 
1340 	KASSERT(bhndb_dw_is_free(br, dwa) || mtx_owned(&br->dw_steal_mtx),
1341 	    ("attempting to set the target address on an in-use window"));
1342 
1343 	/* Page-align the target address */
1344 	offset = addr % rw->win_size;
1345 	dwa->target = addr - offset;
1346 
1347 	/* Verify that the window is large enough for the full target */
1348 	if (rw->win_size - offset < size)
1349 		return (ENOMEM);
1350 
1351 	/* Update the window target */
1352 	error = BHNDB_SET_WINDOW_ADDR(dev, dwa->win, dwa->target);
1353 	if (error) {
1354 		dwa->target = 0x0;
1355 		return (error);
1356 	}
1357 
1358 	return (0);
1359 }
1360 
1361 /**
1362  * Steal an in-use allocation record from @p br, returning the record's current
1363  * target in @p saved on success.
1364  *
1365  * This function acquires a mutex and disables interrupts; callers should
1366  * avoid holding a stolen window longer than required to issue an I/O
1367  * request.
1368  *
1369  * A successful call to bhndb_dw_steal() must be balanced with a call to
1370  * bhndb_dw_return_stolen().
1371  *
1372  * @param br The resource state from which a window should be stolen.
1373  * @param saved The stolen window's saved target address.
1374  *
1375  * @retval non-NULL success
1376  * @retval NULL no dynamic window regions are defined.
1377  */
1378 struct bhndb_dw_alloc *
1379 bhndb_dw_steal(struct bhndb_resources *br, bus_addr_t *saved)
1380 {
1381 	struct bhndb_dw_alloc *dw_stolen;
1382 
1383 	KASSERT(bhndb_dw_next_free(br) == NULL,
1384 	    ("attempting to steal an in-use window while free windows remain"));
1385 
1386 	/* Nothing to steal from? */
1387 	if (br->dwa_count == 0)
1388 		return (NULL);
1389 
1390 	/*
1391 	 * Acquire our steal spinlock; this will be released in
1392 	 * bhndb_dw_return_stolen().
1393 	 *
1394 	 * Acquiring also disables interrupts, which is required when one is
1395 	 * stealing an in-use existing register window.
1396 	 */
1397 	mtx_lock_spin(&br->dw_steal_mtx);
1398 
1399 	dw_stolen = &br->dw_alloc[0];
1400 	*saved = dw_stolen->target;
1401 	return (dw_stolen);
1402 }
1403 
1404 /**
1405  * Return an allocation record previously stolen using bhndb_dw_steal().
1406  *
1407  * @param dev The device on which to issue a BHNDB_SET_WINDOW_ADDR() request.
1408  * @param br The resource state owning @p dwa.
1409  * @param dwa The allocation record to be returned.
1410  * @param saved The original target address provided by bhndb_dw_steal().
1411  */
1412 void
1413 bhndb_dw_return_stolen(device_t dev, struct bhndb_resources *br,
1414     struct bhndb_dw_alloc *dwa, bus_addr_t saved)
1415 {
1416 	int error;
1417 
1418 	mtx_assert(&br->dw_steal_mtx, MA_OWNED);
1419 
1420 	error = bhndb_dw_set_addr(dev, br, dwa, saved, 0);
1421 	if (error) {
1422 		panic("failed to restore register window target %#jx: %d\n",
1423 		    (uintmax_t)saved, error);
1424 	}
1425 
1426 	mtx_unlock_spin(&br->dw_steal_mtx);
1427 }
1428 
1429 /**
1430  * Return the count of @p type register windows in @p table.
1431  *
1432  * @param table The table to search.
1433  * @param type The required window type, or BHNDB_REGWIN_T_INVALID to
1434  * count all register window types.
1435  */
1436 size_t
1437 bhndb_regwin_count(const struct bhndb_regwin *table,
1438     bhndb_regwin_type_t type)
1439 {
1440 	const struct bhndb_regwin	*rw;
1441 	size_t				 count;
1442 
1443 	count = 0;
1444 	for (rw = table; rw->win_type != BHNDB_REGWIN_T_INVALID; rw++) {
1445 		if (type == BHNDB_REGWIN_T_INVALID || rw->win_type == type)
1446 			count++;
1447 	}
1448 
1449 	return (count);
1450 }
1451 
1452 /**
1453  * Search @p table for the first window with the given @p type.
1454  *
1455  * @param table The table to search.
1456  * @param type The required window type.
1457  * @param min_size The minimum window size.
1458  *
1459  * @retval bhndb_regwin The first matching window.
1460  * @retval NULL If no window of the requested type could be found.
1461  */
1462 const struct bhndb_regwin *
1463 bhndb_regwin_find_type(const struct bhndb_regwin *table,
1464     bhndb_regwin_type_t type, bus_size_t min_size)
1465 {
1466 	const struct bhndb_regwin *rw;
1467 
1468 	for (rw = table; rw->win_type != BHNDB_REGWIN_T_INVALID; rw++)
1469 	{
1470 		if (rw->win_type == type && rw->win_size >= min_size)
1471 			return (rw);
1472 	}
1473 
1474 	return (NULL);
1475 }
1476 
1477 /**
1478  * Search @p windows for the first matching core window.
1479  *
1480  * @param table The table to search.
1481  * @param class The required core class.
1482  * @param unit The required core unit, or -1.
1483  * @param port_type The required port type.
1484  * @param port The required port.
1485  * @param region The required region.
1486  * @param offset The required readable core register block offset.
1487  * @param min_size The required minimum readable size at @p offset.
1488  *
1489  * @retval bhndb_regwin The first matching window.
1490  * @retval NULL If no matching window was found.
1491  */
1492 const struct bhndb_regwin *
1493 bhndb_regwin_find_core(const struct bhndb_regwin *table, bhnd_devclass_t class,
1494     int unit, bhnd_port_type port_type, u_int port, u_int region,
1495     bus_size_t offset, bus_size_t min_size)
1496 {
1497 	const struct bhndb_regwin *rw;
1498 
1499 	for (rw = table; rw->win_type != BHNDB_REGWIN_T_INVALID; rw++)
1500 	{
1501 		bus_size_t rw_offset;
1502 
1503 		/* Match on core, port, and region attributes */
1504 		if (rw->win_type != BHNDB_REGWIN_T_CORE)
1505 			continue;
1506 
1507 		if (rw->d.core.class != class)
1508 			continue;
1509 
1510 		if (unit != -1 && rw->d.core.unit != unit)
1511 			continue;
1512 
1513 		if (rw->d.core.port_type != port_type)
1514 			continue;
1515 
1516 		if (rw->d.core.port != port)
1517 			continue;
1518 
1519 		if (rw->d.core.region != region)
1520 			continue;
1521 
1522 		/* Verify that the requested range is mapped within
1523 		 * this register window */
1524 		if (rw->d.core.offset > offset)
1525 			continue;
1526 
1527 		rw_offset = offset - rw->d.core.offset;
1528 
1529 		if (rw->win_size < rw_offset)
1530 			continue;
1531 
1532 		if (rw->win_size - rw_offset < min_size)
1533 			continue;
1534 
1535 		return (rw);
1536 	}
1537 
1538 	return (NULL);
1539 }
1540 
1541 /**
1542  * Search @p windows for the best available window of at least @p min_size.
1543  *
1544  * Search order:
1545  * - BHND_REGWIN_T_CORE
1546  * - BHND_REGWIN_T_DYN
1547  *
1548  * @param table The table to search.
1549  * @param class The required core class.
1550  * @param unit The required core unit, or -1.
1551  * @param port_type The required port type.
1552  * @param port The required port.
1553  * @param region The required region.
1554  * @param offset The required readable core register block offset.
1555  * @param min_size The required minimum readable size at @p offset.
1556  *
1557  * @retval bhndb_regwin The first matching window.
1558  * @retval NULL If no matching window was found.
1559  */
1560 const struct bhndb_regwin *
1561 bhndb_regwin_find_best(const struct bhndb_regwin *table,
1562     bhnd_devclass_t class, int unit, bhnd_port_type port_type, u_int port,
1563     u_int region, bus_size_t offset, bus_size_t min_size)
1564 {
1565 	const struct bhndb_regwin *rw;
1566 
1567 	/* Prefer a fixed core mapping */
1568 	rw = bhndb_regwin_find_core(table, class, unit, port_type,
1569 	    port, region, offset, min_size);
1570 	if (rw != NULL)
1571 		return (rw);
1572 
1573 	/* Fall back on a generic dynamic window */
1574 	return (bhndb_regwin_find_type(table, BHNDB_REGWIN_T_DYN, min_size));
1575 }
1576 
1577 /**
1578  * Return true if @p regw defines a BHNDB_REGWIN_T_CORE register window
1579  * that matches against @p core.
1580  *
1581  * @param regw A register window to match against.
1582  * @param core The bhnd(4) core info to match against @p regw.
1583  */
1584 bool
1585 bhndb_regwin_match_core(const struct bhndb_regwin *regw,
1586     struct bhnd_core_info *core)
1587 {
1588 	/* Only core windows are supported */
1589 	if (regw->win_type != BHNDB_REGWIN_T_CORE)
1590 		return (false);
1591 
1592 	/* Device class must match */
1593 	if (bhnd_core_class(core) != regw->d.core.class)
1594 		return (false);
1595 
1596 	/* Device unit must match */
1597 	if (core->unit != regw->d.core.unit)
1598 		return (false);
1599 
1600 	/* Matches */
1601 	return (true);
1602 }
1603 
1604 /**
1605  * Search for a core resource priority descriptor in @p table that matches
1606  * @p core.
1607  *
1608  * @param table The table to search.
1609  * @param core The core to match against @p table.
1610  */
1611 const struct bhndb_hw_priority *
1612 bhndb_hw_priority_find_core(const struct bhndb_hw_priority *table,
1613     struct bhnd_core_info *core)
1614 {
1615 	const struct bhndb_hw_priority	*hp;
1616 
1617 	for (hp = table; hp->ports != NULL; hp++) {
1618 		if (bhnd_core_matches(core, &hp->match))
1619 			return (hp);
1620 	}
1621 
1622 	/* not found */
1623 	return (NULL);
1624 }
1625 
1626 /**
1627  * Search for a port resource priority descriptor in @p table.
1628  *
1629  * @param table The table to search.
1630  * @param core The core to match against @p table.
1631  * @param port_type The required port type.
1632  * @param port The required port.
1633  * @param region The required region.
1634  */
1635 const struct bhndb_port_priority *
1636 bhndb_hw_priorty_find_port(const struct bhndb_hw_priority *table,
1637     struct bhnd_core_info *core, bhnd_port_type port_type, u_int port,
1638     u_int region)
1639 {
1640 	const struct bhndb_hw_priority		*hp;
1641 
1642 	if ((hp = bhndb_hw_priority_find_core(table, core)) == NULL)
1643 		return (NULL);
1644 
1645 	for (u_int i = 0; i < hp->num_ports; i++) {
1646 		const struct bhndb_port_priority *pp = &hp->ports[i];
1647 
1648 		if (pp->type != port_type)
1649 			continue;
1650 
1651 		if (pp->port != port)
1652 			continue;
1653 
1654 		if (pp->region != region)
1655 			continue;
1656 
1657 		return (pp);
1658 	}
1659 
1660 	/* not found */
1661 	return (NULL);
1662 }
1663