xref: /freebsd/sys/dev/bhnd/bhndb/bhndb_subr.c (revision 6e778a7efdc0e804471750157f6bacd1ef7d1580)
1 /*-
2  * Copyright (c) 2015-2016 Landon Fuller <landon@landonf.org>
3  * Copyright (c) 2017 The FreeBSD Foundation
4  * All rights reserved.
5  *
6  * Portions of this software were developed by Landon Fuller
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer,
14  *    without modification.
15  * 2. Redistributions in binary form must reproduce at minimum a disclaimer
16  *    similar to the "NO WARRANTY" disclaimer below ("Disclaimer") and any
17  *    redistribution must be conditioned upon including a substantially
18  *    similar Disclaimer requirement for further binary redistribution.
19  *
20  * NO WARRANTY
21  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23  * LIMITED TO, THE IMPLIED WARRANTIES OF NONINFRINGEMENT, MERCHANTIBILITY
24  * AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL
25  * THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY,
26  * OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
27  * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
28  * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
29  * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
30  * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
31  * THE POSSIBILITY OF SUCH DAMAGES.
32  */
33 
34 #include <sys/cdefs.h>
35 __FBSDID("$FreeBSD$");
36 
37 #include <sys/param.h>
38 #include <sys/kernel.h>
39 #include <sys/limits.h>
40 
41 #include "bhndb_private.h"
42 #include "bhndbvar.h"
43 
44 static int	bhndb_dma_tag_create(device_t dev, bus_dma_tag_t parent_dmat,
45 		    const struct bhnd_dma_translation *translation,
46 		    bus_dma_tag_t *dmat);
47 
48 /**
49  * Attach a BHND bridge device to @p parent.
50  *
51  * @param parent A parent PCI device.
52  * @param[out] bhndb On success, the probed and attached bhndb bridge device.
53  * @param unit The device unit number, or -1 to select the next available unit
54  * number.
55  *
56  * @retval 0 success
57  * @retval non-zero Failed to attach the bhndb device.
58  */
59 int
60 bhndb_attach_bridge(device_t parent, device_t *bhndb, int unit)
61 {
62 	int error;
63 
64 	*bhndb = device_add_child(parent, "bhndb", unit);
65 	if (*bhndb == NULL)
66 		return (ENXIO);
67 
68 	if (!(error = device_probe_and_attach(*bhndb)))
69 		return (0);
70 
71 	if ((device_delete_child(parent, *bhndb)))
72 		device_printf(parent, "failed to detach bhndb child\n");
73 
74 	return (error);
75 }
76 
77 /*
78  * Call BHNDB_SUSPEND_RESOURCE() for all resources in @p rl.
79  */
80 static void
81 bhndb_do_suspend_resources(device_t dev, struct resource_list *rl)
82 {
83 	struct resource_list_entry *rle;
84 
85 	/* Suspend all child resources. */
86 	STAILQ_FOREACH(rle, rl, link) {
87 		/* Skip non-allocated resources */
88 		if (rle->res == NULL)
89 			continue;
90 
91 		BHNDB_SUSPEND_RESOURCE(device_get_parent(dev), dev, rle->type,
92 		    rle->res);
93 	}
94 }
95 
96 /**
97  * Helper function for implementing BUS_RESUME_CHILD() on bridged
98  * bhnd(4) buses.
99  *
100  * This implementation of BUS_RESUME_CHILD() uses BUS_GET_RESOURCE_LIST()
101  * to find the child's resources and call BHNDB_SUSPEND_RESOURCE() for all
102  * child resources, ensuring that the device's allocated bridge resources
103  * will be available to other devices during bus resumption.
104  *
105  * Before suspending any resources, @p child is suspended by
106  * calling bhnd_generic_suspend_child().
107  *
108  * If @p child is not a direct child of @p dev, suspension is delegated to
109  * the @p dev parent.
110  */
111 int
112 bhnd_generic_br_suspend_child(device_t dev, device_t child)
113 {
114 	struct resource_list		*rl;
115 	int				 error;
116 
117 	if (device_get_parent(child) != dev)
118 		BUS_SUSPEND_CHILD(device_get_parent(dev), child);
119 
120 	if (device_is_suspended(child))
121 		return (EBUSY);
122 
123 	/* Suspend the child device */
124 	if ((error = bhnd_generic_suspend_child(dev, child)))
125 		return (error);
126 
127 	/* Fetch the resource list. If none, there's nothing else to do */
128 	rl = BUS_GET_RESOURCE_LIST(device_get_parent(child), child);
129 	if (rl == NULL)
130 		return (0);
131 
132 	/* Suspend all child resources. */
133 	bhndb_do_suspend_resources(dev, rl);
134 
135 	return (0);
136 }
137 
138 /**
139  * Helper function for implementing BUS_RESUME_CHILD() on bridged
140  * bhnd(4) bus devices.
141  *
142  * This implementation of BUS_RESUME_CHILD() uses BUS_GET_RESOURCE_LIST()
143  * to find the child's resources and call BHNDB_RESUME_RESOURCE() for all
144  * child resources, before delegating to bhnd_generic_resume_child().
145  *
146  * If resource resumption fails, @p child will not be resumed.
147  *
148  * If @p child is not a direct child of @p dev, suspension is delegated to
149  * the @p dev parent.
150  */
151 int
152 bhnd_generic_br_resume_child(device_t dev, device_t child)
153 {
154 	struct resource_list		*rl;
155 	struct resource_list_entry	*rle;
156 	int				 error;
157 
158 	if (device_get_parent(child) != dev)
159 		BUS_RESUME_CHILD(device_get_parent(dev), child);
160 
161 	if (!device_is_suspended(child))
162 		return (EBUSY);
163 
164 	/* Fetch the resource list. If none, there's nothing else to do */
165 	rl = BUS_GET_RESOURCE_LIST(device_get_parent(child), child);
166 	if (rl == NULL)
167 		return (bhnd_generic_resume_child(dev, child));
168 
169 	/* Resume all resources */
170 	STAILQ_FOREACH(rle, rl, link) {
171 		/* Skip non-allocated resources */
172 		if (rle->res == NULL)
173 			continue;
174 
175 		error = BHNDB_RESUME_RESOURCE(device_get_parent(dev), dev,
176 		    rle->type, rle->res);
177 		if (error) {
178 			/* Put all resources back into a suspend state */
179 			bhndb_do_suspend_resources(dev, rl);
180 			return (error);
181 		}
182 	}
183 
184 	/* Now that all resources are resumed, resume child */
185 	if ((error = bhnd_generic_resume_child(dev, child))) {
186 		/* Put all resources back into a suspend state */
187 		bhndb_do_suspend_resources(dev, rl);
188 	}
189 
190 	return (error);
191 }
192 
193 /**
194  * Find a host resource of @p type that maps the given range.
195  *
196  * @param hr The resource state to search.
197  * @param type The resource type to search for (see SYS_RES_*).
198  * @param start The start address of the range to search for.
199  * @param count The size of the range to search for.
200  *
201  * @retval resource the host resource containing the requested range.
202  * @retval NULL if no resource containing the requested range can be found.
203  */
204 struct resource *
205 bhndb_host_resource_for_range(struct bhndb_host_resources *hr, int type,
206     rman_res_t start, rman_res_t count)
207 {
208 	for (u_int i = 0; hr->resource_specs[i].type != -1; i++) {
209 		struct resource *r = hr->resources[i];
210 
211 		if (hr->resource_specs[i].type != type)
212 			continue;
213 
214 		/* Verify range */
215 		if (rman_get_start(r) > start)
216 			continue;
217 
218 		if (rman_get_end(r) < (start + count - 1))
219 			continue;
220 
221 		return (r);
222 	}
223 
224 	return (NULL);
225 }
226 
227 /**
228  * Find a host resource of that matches the given register window definition.
229  *
230  * @param hr The resource state to search.
231  * @param win A register window definition.
232  *
233  * @retval resource the host resource corresponding to @p win.
234  * @retval NULL if no resource corresponding to @p win can be found.
235  */
236 struct resource *
237 bhndb_host_resource_for_regwin(struct bhndb_host_resources *hr,
238     const struct bhndb_regwin *win)
239 {
240 	const struct resource_spec *rspecs;
241 
242 	rspecs = hr->resource_specs;
243 	for (u_int i = 0; rspecs[i].type != -1; i++) {
244 		if (win->res.type != rspecs[i].type)
245 			continue;
246 
247 		if (win->res.rid != rspecs[i].rid)
248 			continue;
249 
250 		/* Found declared resource */
251 		return (hr->resources[i]);
252 	}
253 
254 	device_printf(hr->owner, "missing regwin resource spec "
255 	    "(type=%d, rid=%d)\n", win->res.type, win->res.rid);
256 
257 	return (NULL);
258 }
259 
260 /**
261  * Allocate and initialize a new resource state structure.
262  *
263  * @param dev The bridge device.
264  * @param parent_dev The parent device from which host resources should be
265  * allocated.
266  * @param cfg The hardware configuration to be used.
267  */
268 struct bhndb_resources *
269 bhndb_alloc_resources(device_t dev, device_t parent_dev,
270     const struct bhndb_hwcfg *cfg)
271 {
272 	struct bhndb_resources		*r;
273 	const struct bhndb_regwin	*win;
274 	bus_size_t			 last_window_size;
275 	int				 rnid;
276 	int				 error;
277 	bool				 free_ht_mem, free_br_mem, free_br_irq;
278 
279 	free_ht_mem = false;
280 	free_br_mem = false;
281 	free_br_irq = false;
282 
283 	r = malloc(sizeof(*r), M_BHND, M_NOWAIT|M_ZERO);
284 	if (r == NULL)
285 		return (NULL);
286 
287 	/* Basic initialization */
288 	r->dev = dev;
289 	r->cfg = cfg;
290 	r->res = NULL;
291 	r->min_prio = BHNDB_PRIORITY_NONE;
292 	STAILQ_INIT(&r->bus_regions);
293 	STAILQ_INIT(&r->bus_intrs);
294 
295 	mtx_init(&r->dw_steal_mtx, device_get_nameunit(dev),
296 	    "bhndb dwa_steal lock", MTX_SPIN);
297 
298 	/* Initialize host address space resource manager. */
299 	r->ht_mem_rman.rm_start = 0;
300 	r->ht_mem_rman.rm_end = ~0;
301 	r->ht_mem_rman.rm_type = RMAN_ARRAY;
302 	r->ht_mem_rman.rm_descr = "BHNDB host memory";
303 	if ((error = rman_init(&r->ht_mem_rman))) {
304 		device_printf(r->dev, "could not initialize ht_mem_rman\n");
305 		goto failed;
306 	}
307 	free_ht_mem = true;
308 
309 
310 	/* Initialize resource manager for the bridged address space. */
311 	r->br_mem_rman.rm_start = 0;
312 	r->br_mem_rman.rm_end = BUS_SPACE_MAXADDR_32BIT;
313 	r->br_mem_rman.rm_type = RMAN_ARRAY;
314 	r->br_mem_rman.rm_descr = "BHNDB bridged memory";
315 
316 	if ((error = rman_init(&r->br_mem_rman))) {
317 		device_printf(r->dev, "could not initialize br_mem_rman\n");
318 		goto failed;
319 	}
320 	free_br_mem = true;
321 
322 	error = rman_manage_region(&r->br_mem_rman, 0, BUS_SPACE_MAXADDR_32BIT);
323 	if (error) {
324 		device_printf(r->dev, "could not configure br_mem_rman\n");
325 		goto failed;
326 	}
327 
328 
329 	/* Initialize resource manager for the bridged interrupt controller. */
330 	r->br_irq_rman.rm_start = 0;
331 	r->br_irq_rman.rm_end = RM_MAX_END;
332 	r->br_irq_rman.rm_type = RMAN_ARRAY;
333 	r->br_irq_rman.rm_descr = "BHNDB bridged interrupts";
334 
335 	if ((error = rman_init(&r->br_irq_rman))) {
336 		device_printf(r->dev, "could not initialize br_irq_rman\n");
337 		goto failed;
338 	}
339 	free_br_irq = true;
340 
341 	error = rman_manage_region(&r->br_irq_rman, 0, RM_MAX_END);
342 	if (error) {
343 		device_printf(r->dev, "could not configure br_irq_rman\n");
344 		goto failed;
345 	}
346 
347 	/* Fetch the dynamic regwin count and verify that it does not exceed
348 	 * what is representable via our freelist bitstring. */
349 	r->dwa_count = bhndb_regwin_count(cfg->register_windows,
350 	    BHNDB_REGWIN_T_DYN);
351 	if (r->dwa_count >= INT_MAX) {
352 		device_printf(r->dev, "max dynamic regwin count exceeded\n");
353 		goto failed;
354 	}
355 
356 	/* Allocate the dynamic window allocation table. */
357 	r->dw_alloc = malloc(sizeof(r->dw_alloc[0]) * r->dwa_count, M_BHND,
358 	    M_NOWAIT);
359 	if (r->dw_alloc == NULL)
360 		goto failed;
361 
362 	/* Allocate the dynamic window allocation freelist */
363 	r->dwa_freelist = bit_alloc(r->dwa_count, M_BHND, M_NOWAIT);
364 	if (r->dwa_freelist == NULL)
365 		goto failed;
366 
367 	/* Initialize the dynamic window table */
368 	rnid = 0;
369 	last_window_size = 0;
370 	for (win = cfg->register_windows;
371 	    win->win_type != BHNDB_REGWIN_T_INVALID; win++)
372 	{
373 		struct bhndb_dw_alloc *dwa;
374 
375 		/* Skip non-DYN windows */
376 		if (win->win_type != BHNDB_REGWIN_T_DYN)
377 			continue;
378 
379 		/* Validate the window size */
380 		if (win->win_size == 0) {
381 			device_printf(r->dev, "ignoring zero-length dynamic "
382 			    "register window\n");
383 			continue;
384 		} else if (last_window_size == 0) {
385 			last_window_size = win->win_size;
386 		} else if (last_window_size != win->win_size) {
387 			/*
388 			 * No existing hardware should trigger this.
389 			 *
390 			 * If you run into this in the future, the dynamic
391 			 * window allocator and the resource priority system
392 			 * will need to be extended to support multiple register
393 			 * window allocation pools.
394 			 */
395 			device_printf(r->dev, "devices that vend multiple "
396 			    "dynamic register window sizes are not currently "
397 			    "supported\n");
398 			goto failed;
399 		}
400 
401 		dwa = &r->dw_alloc[rnid];
402 		dwa->win = win;
403 		dwa->parent_res = NULL;
404 		dwa->rnid = rnid;
405 		dwa->target = 0x0;
406 
407 		LIST_INIT(&dwa->refs);
408 		rnid++;
409 	}
410 
411 	/* Allocate host resources */
412 	error = bhndb_alloc_host_resources(&r->res, dev, parent_dev, r->cfg);
413 	if (error) {
414 		device_printf(r->dev,
415 		    "could not allocate host resources on %s: %d\n",
416 		    device_get_nameunit(parent_dev), error);
417 		goto failed;
418 	}
419 
420 	/* Populate (and validate) parent resource references for all
421 	 * dynamic windows */
422 	for (size_t i = 0; i < r->dwa_count; i++) {
423 		struct bhndb_dw_alloc		*dwa;
424 		const struct bhndb_regwin	*win;
425 
426 		dwa = &r->dw_alloc[i];
427 		win = dwa->win;
428 
429 		/* Find and validate corresponding resource. */
430 		dwa->parent_res = bhndb_host_resource_for_regwin(r->res, win);
431 		if (dwa->parent_res == NULL) {
432 			device_printf(r->dev, "no host resource found for %u "
433 			    "register window with offset %#jx and "
434 			    "size %#jx\n",
435 			    win->win_type,
436 			    (uintmax_t)win->win_offset,
437 			    (uintmax_t)win->win_size);
438 
439 			error = ENXIO;
440 			goto failed;
441 		}
442 
443 		if (rman_get_size(dwa->parent_res) < win->win_offset +
444 		    win->win_size)
445 		{
446 			device_printf(r->dev, "resource %d too small for "
447 			    "register window with offset %llx and size %llx\n",
448 			    rman_get_rid(dwa->parent_res),
449 			    (unsigned long long) win->win_offset,
450 			    (unsigned long long) win->win_size);
451 
452 			error = EINVAL;
453 			goto failed;
454 		}
455 	}
456 
457 	/* Add allocated memory resources to our host memory resource manager */
458 	for (u_int i = 0; r->res->resource_specs[i].type != -1; i++) {
459 		struct resource *res;
460 
461 		/* skip non-memory resources */
462 		if (r->res->resource_specs[i].type != SYS_RES_MEMORY)
463 			continue;
464 
465 		/* add host resource to set of managed regions */
466 		res = r->res->resources[i];
467 		error = rman_manage_region(&r->ht_mem_rman,
468 		    rman_get_start(res), rman_get_end(res));
469 		if (error) {
470 			device_printf(r->dev,
471 			    "could not register host memory region with "
472 			    "ht_mem_rman: %d\n", error);
473 			goto failed;
474 		}
475 	}
476 
477 	return (r);
478 
479 failed:
480 	if (free_ht_mem)
481 		rman_fini(&r->ht_mem_rman);
482 
483 	if (free_br_mem)
484 		rman_fini(&r->br_mem_rman);
485 
486 	if (free_br_irq)
487 		rman_fini(&r->br_irq_rman);
488 
489 	if (r->dw_alloc != NULL)
490 		free(r->dw_alloc, M_BHND);
491 
492 	if (r->dwa_freelist != NULL)
493 		free(r->dwa_freelist, M_BHND);
494 
495 	if (r->res != NULL)
496 		bhndb_release_host_resources(r->res);
497 
498 	mtx_destroy(&r->dw_steal_mtx);
499 
500 	free(r, M_BHND);
501 
502 	return (NULL);
503 }
504 
505 /**
506  * Create a new DMA tag for the given @p translation.
507  *
508  * @param	dev		The bridge device.
509  * @param	parent_dmat	The parent DMA tag, or NULL if none.
510  * @param	translation	The DMA translation for which a DMA tag will
511  *				be created.
512  * @param[out]	dmat		On success, the newly created DMA tag.
513  *
514  * @retval 0		success
515  * @retval non-zero	if creating the new DMA tag otherwise fails, a regular
516  *			unix error code will be returned.
517  */
518 static int
519 bhndb_dma_tag_create(device_t dev, bus_dma_tag_t parent_dmat,
520     const struct bhnd_dma_translation *translation, bus_dma_tag_t *dmat)
521 {
522 	bus_dma_tag_t	translation_tag;
523 	bhnd_addr_t	dt_mask;
524 	bus_addr_t	boundary;
525 	bus_addr_t	lowaddr, highaddr;
526 	int		error;
527 
528 	highaddr = BUS_SPACE_MAXADDR;
529 	boundary = 0;
530 
531 	/* Determine full addressable mask */
532 	dt_mask = (translation->addr_mask | translation->addrext_mask);
533 	KASSERT(dt_mask != 0, ("DMA addr_mask invalid: %#jx",
534 		(uintmax_t)dt_mask));
535 
536 	/* (addr_mask|addrext_mask) is our maximum supported address */
537 	lowaddr = MIN(dt_mask, BUS_SPACE_MAXADDR);
538 
539 	/* Do we need to to avoid crossing a DMA translation window boundary? */
540 	if (translation->addr_mask < BUS_SPACE_MAXADDR) {
541 		/* round down to nearest power of two */
542 		boundary = translation->addr_mask & (~1ULL);
543 	}
544 
545 	/* Create our DMA tag */
546 	error = bus_dma_tag_create(parent_dmat,
547 	    1,				/* alignment */
548 	    boundary, lowaddr, highaddr,
549 	    NULL, NULL,			/* filter, filterarg */
550 	    BUS_SPACE_MAXSIZE, 0,	/* maxsize, nsegments */
551 	    BUS_SPACE_MAXSIZE, 0,	/* maxsegsize, flags */
552 	    NULL, NULL,			/* lockfunc, lockarg */
553 	    &translation_tag);
554 	if (error) {
555 		device_printf(dev, "failed to create bridge DMA tag: %d\n",
556 		    error);
557 		return (error);
558 	}
559 
560 	*dmat = translation_tag;
561 	return (0);
562 }
563 
564 /**
565  * Deallocate the given bridge resource structure and any associated resources.
566  *
567  * @param br Resource state to be deallocated.
568  */
569 void
570 bhndb_free_resources(struct bhndb_resources *br)
571 {
572 	struct bhndb_region		*region, *r_next;
573 	struct bhndb_dw_alloc		*dwa;
574 	struct bhndb_dw_rentry		*dwr, *dwr_next;
575 	struct bhndb_intr_handler	*ih;
576 	bool				 leaked_regions, leaked_intrs;
577 
578 	leaked_regions = false;
579 	leaked_intrs = false;
580 
581 	/* No window regions may still be held */
582 	if (!bhndb_dw_all_free(br)) {
583 		for (int i = 0; i < br->dwa_count; i++) {
584 			dwa = &br->dw_alloc[i];
585 
586 			/* Skip free dynamic windows */
587 			if (bhndb_dw_is_free(br, dwa))
588 				continue;
589 
590 			device_printf(br->dev,
591 			    "leaked dynamic register window %d\n", dwa->rnid);
592 			leaked_regions = true;
593 		}
594 	}
595 
596 	/* There should be no interrupt handlers still registered */
597 	STAILQ_FOREACH(ih, &br->bus_intrs, ih_link) {
598 		device_printf(br->dev, "interrupt handler leaked %p\n",
599 		    ih->ih_cookiep);
600 	}
601 
602 	if (leaked_intrs || leaked_regions) {
603 		panic("leaked%s%s", leaked_intrs ? " active interrupts" : "",
604 		    leaked_regions ? " active register windows" : "");
605 	}
606 
607 	/* Release host resources allocated through our parent. */
608 	if (br->res != NULL)
609 		bhndb_release_host_resources(br->res);
610 
611 	/* Clean up resource reservations */
612 	for (size_t i = 0; i < br->dwa_count; i++) {
613 		dwa = &br->dw_alloc[i];
614 
615 		LIST_FOREACH_SAFE(dwr, &dwa->refs, dw_link, dwr_next) {
616 			LIST_REMOVE(dwr, dw_link);
617 			free(dwr, M_BHND);
618 		}
619 	}
620 
621 	/* Release bus regions */
622 	STAILQ_FOREACH_SAFE(region, &br->bus_regions, link, r_next) {
623 		STAILQ_REMOVE(&br->bus_regions, region, bhndb_region, link);
624 		free(region, M_BHND);
625 	}
626 
627 	/* Release our resource managers */
628 	rman_fini(&br->ht_mem_rman);
629 	rman_fini(&br->br_mem_rman);
630 	rman_fini(&br->br_irq_rman);
631 
632 	free(br->dw_alloc, M_BHND);
633 	free(br->dwa_freelist, M_BHND);
634 
635 	mtx_destroy(&br->dw_steal_mtx);
636 
637 	free(br, M_BHND);
638 }
639 
640 /**
641  * Allocate host bus resources defined by @p hwcfg.
642  *
643  * On success, the caller assumes ownership of the allocated host resources,
644  * which must be freed via bhndb_release_host_resources().
645  *
646  * @param[out]	resources	On success, the allocated host resources.
647  * @param	dev		The bridge device.
648  * @param	parent_dev	The parent device from which host resources
649  *				should be allocated (e.g. via
650  *				bus_alloc_resources()).
651  * @param	hwcfg		The hardware configuration defining the host
652  *				resources to be allocated
653  */
654 int
655 bhndb_alloc_host_resources(struct bhndb_host_resources **resources,
656     device_t dev, device_t parent_dev, const struct bhndb_hwcfg *hwcfg)
657 {
658 	struct bhndb_host_resources		*hr;
659 	const struct bhnd_dma_translation	*dt;
660 	bus_dma_tag_t				 parent_dmat;
661 	size_t					 nres, ndt;
662 	int					 error;
663 
664 	parent_dmat = bus_get_dma_tag(parent_dev);
665 
666 	hr = malloc(sizeof(*hr), M_BHND, M_WAITOK);
667 	hr->owner = parent_dev;
668 	hr->cfg = hwcfg;
669 	hr->resource_specs = NULL;
670 	hr->resources = NULL;
671 	hr->dma_tags = NULL;
672 	hr->num_dma_tags = 0;
673 
674 	/* Determine our bridge resource count from the hardware config. */
675 	nres = 0;
676 	for (size_t i = 0; hwcfg->resource_specs[i].type != -1; i++)
677 		nres++;
678 
679 	/* Determine the total count and validate our DMA translation table. */
680 	ndt = 0;
681 	for (dt = hwcfg->dma_translations; dt != NULL &&
682 	    !BHND_DMA_IS_TRANSLATION_TABLE_END(dt); dt++)
683 	{
684 		/* Validate the defined translation */
685 		if ((dt->base_addr & dt->addr_mask) != 0) {
686 			device_printf(dev, "invalid DMA translation; base "
687 			    "address %#jx overlaps address mask %#jx",
688 			    (uintmax_t)dt->base_addr, (uintmax_t)dt->addr_mask);
689 
690 			error = EINVAL;
691 			goto failed;
692 		}
693 
694 		if ((dt->addrext_mask & dt->addr_mask) != 0) {
695 			device_printf(dev, "invalid DMA translation; addrext "
696 			    "mask %#jx overlaps address mask %#jx",
697 			    (uintmax_t)dt->addrext_mask,
698 			    (uintmax_t)dt->addr_mask);
699 
700 			error = EINVAL;
701 			goto failed;
702 		}
703 
704 		/* Increment our entry count */
705 		ndt++;
706 	}
707 
708 	/* Allocate our DMA tags */
709 	hr->dma_tags = malloc(sizeof(*hr->dma_tags) * ndt, M_BHND,
710 	    M_WAITOK|M_ZERO);
711 	for (size_t i = 0; i < ndt; i++) {
712 		error = bhndb_dma_tag_create(dev, parent_dmat,
713 		    &hwcfg->dma_translations[i], &hr->dma_tags[i]);
714 		if (error)
715 			goto failed;
716 
717 		hr->num_dma_tags++;
718 	}
719 
720 	/* Allocate space for a non-const copy of our resource_spec
721 	 * table; this will be updated with the RIDs assigned by
722 	 * bus_alloc_resources. */
723 	hr->resource_specs = malloc(sizeof(hr->resource_specs[0]) * (nres + 1),
724 	    M_BHND, M_WAITOK);
725 
726 	/* Initialize and terminate the table */
727 	for (size_t i = 0; i < nres; i++)
728 		hr->resource_specs[i] = hwcfg->resource_specs[i];
729 
730 	hr->resource_specs[nres].type = -1;
731 
732 	/* Allocate space for our resource references */
733 	hr->resources = malloc(sizeof(hr->resources[0]) * nres, M_BHND,
734 	    M_WAITOK);
735 
736 	/* Allocate host resources */
737 	error = bus_alloc_resources(hr->owner, hr->resource_specs,
738 	    hr->resources);
739 	if (error) {
740 		device_printf(dev, "could not allocate bridge resources via "
741 		    "%s: %d\n", device_get_nameunit(parent_dev), error);
742 		goto failed;
743 	}
744 
745 	*resources = hr;
746 	return (0);
747 
748 failed:
749 	if (hr->resource_specs != NULL)
750 		free(hr->resource_specs, M_BHND);
751 
752 	if (hr->resources != NULL)
753 		free(hr->resources, M_BHND);
754 
755 	for (size_t i = 0; i < hr->num_dma_tags; i++)
756 		bus_dma_tag_destroy(hr->dma_tags[i]);
757 
758 	if (hr->dma_tags != NULL)
759 		free(hr->dma_tags, M_BHND);
760 
761 	free(hr, M_BHND);
762 
763 	return (error);
764 }
765 
766 /**
767  * Deallocate a set of bridge host resources.
768  *
769  * @param hr The resources to be freed.
770  */
771 void
772 bhndb_release_host_resources(struct bhndb_host_resources *hr)
773 {
774 	bus_release_resources(hr->owner, hr->resource_specs, hr->resources);
775 
776 	for (size_t i = 0; i < hr->num_dma_tags; i++)
777 		bus_dma_tag_destroy(hr->dma_tags[i]);
778 
779 	free(hr->resources, M_BHND);
780 	free(hr->resource_specs, M_BHND);
781 	free(hr->dma_tags, M_BHND);
782 	free(hr, M_BHND);
783 }
784 
785 
786 /**
787  * Search @p cores for the core serving as the bhnd host bridge.
788  *
789  * This function uses a heuristic valid on all known PCI/PCIe/PCMCIA-bridged
790  * bhnd(4) devices to determine the hostb core:
791  *
792  * - The core must have a Broadcom vendor ID.
793  * - The core devclass must match the bridge type.
794  * - The core must be the first device on the bus with the bridged device
795  *   class.
796  *
797  * @param	cores		The core table to search.
798  * @param	ncores		The number of cores in @p cores.
799  * @param	bridge_devclass	The expected device class of the bridge core.
800  * @param[out]	core		If found, the matching host bridge core info.
801  *
802  * @retval 0		success
803  * @retval ENOENT	not found
804  */
805 int
806 bhndb_find_hostb_core(struct bhnd_core_info *cores, u_int ncores,
807     bhnd_devclass_t bridge_devclass, struct bhnd_core_info *core)
808 {
809 	struct bhnd_core_match	 md;
810 	struct bhnd_core_info	*match;
811 	u_int			 match_core_idx;
812 
813 	/* Set up a match descriptor for the required device class. */
814 	md = (struct bhnd_core_match) {
815 		BHND_MATCH_CORE_CLASS(bridge_devclass),
816 		BHND_MATCH_CORE_UNIT(0)
817 	};
818 
819 	/* Find the matching core with the lowest core index */
820 	match = NULL;
821 	match_core_idx = UINT_MAX;
822 
823 	for (u_int i = 0; i < ncores; i++) {
824 		if (!bhnd_core_matches(&cores[i], &md))
825 			continue;
826 
827 		/* Lower core indices take precedence */
828 		if (match != NULL && match_core_idx < match->core_idx)
829 			continue;
830 
831 		match = &cores[i];
832 		match_core_idx = match->core_idx;
833 	}
834 
835 	if (match == NULL)
836 		return (ENOENT);
837 
838 	*core = *match;
839 	return (0);
840 }
841 
842 /**
843  * Allocate a host interrupt source and its backing SYS_RES_IRQ host resource.
844  *
845  * @param owner	The device to be used to allocate a SYS_RES_IRQ
846  *		resource with @p rid.
847  * @param rid	The resource ID of the IRQ to be allocated.
848  * @param start	The start value to be passed to bus_alloc_resource().
849  * @param end	The end value to be passed to bus_alloc_resource().
850  * @param count	The count to be passed to bus_alloc_resource().
851  * @param flags	The flags to be passed to bus_alloc_resource().
852  *
853  * @retval non-NULL	success
854  * @retval NULL		if allocation fails.
855  */
856 struct bhndb_intr_isrc *
857 bhndb_alloc_intr_isrc(device_t owner, int rid, rman_res_t start, rman_res_t end,
858     rman_res_t count, u_int flags)
859 {
860 	struct bhndb_intr_isrc *isrc;
861 
862 	isrc = malloc(sizeof(*isrc), M_BHND, M_NOWAIT);
863 	if (isrc == NULL)
864 		return (NULL);
865 
866 	isrc->is_owner = owner;
867 	isrc->is_rid = rid;
868 	isrc->is_res = bus_alloc_resource(owner, SYS_RES_IRQ, &isrc->is_rid,
869 	    start, end, count, flags);
870 	if (isrc->is_res == NULL) {
871 		free(isrc, M_BHND);
872 		return (NULL);
873 	}
874 
875 	return (isrc);
876 }
877 
878 /**
879  * Free a host interrupt source and its backing host resource.
880  *
881  * @param isrc	The interrupt source to be freed.
882  */
883 void
884 bhndb_free_intr_isrc(struct bhndb_intr_isrc *isrc)
885 {
886 	bus_release_resource(isrc->is_owner, SYS_RES_IRQ, isrc->is_rid,
887 	    isrc->is_res);
888 	free(isrc, M_BHND);
889 }
890 
891 /**
892  * Allocate and initialize a new interrupt handler entry.
893  *
894  * @param owner	The child device that owns this entry.
895  * @param r	The child's interrupt resource.
896  * @param isrc	The isrc mapped for this entry.
897  *
898  * @retval non-NULL	success
899  * @retval NULL		if allocation fails.
900  */
901 struct bhndb_intr_handler *
902 bhndb_alloc_intr_handler(device_t owner, struct resource *r,
903     struct bhndb_intr_isrc *isrc)
904 {
905 	struct bhndb_intr_handler *ih;
906 
907 	ih = malloc(sizeof(*ih), M_BHND, M_NOWAIT | M_ZERO);
908 	ih->ih_owner = owner;
909 	ih->ih_res = r;
910 	ih->ih_isrc = isrc;
911 	ih->ih_cookiep = NULL;
912 	ih->ih_active = false;
913 
914 	return (ih);
915 }
916 
917 /**
918  * Free an interrupt handler entry.
919  *
920  * @param br The resource state owning @p ih.
921  * @param ih The interrupt handler entry to be removed.
922  */
923 void
924 bhndb_free_intr_handler(struct bhndb_intr_handler *ih)
925 {
926 	KASSERT(!ih->ih_active, ("free of active interrupt handler %p",
927 	    ih->ih_cookiep));
928 
929 	free(ih, M_BHND);
930 }
931 
932 /**
933  * Add an active interrupt handler to the given resource state.
934   *
935  * @param br The resource state to be modified.
936  * @param ih The interrupt handler entry to be added.
937  */
938 void
939 bhndb_register_intr_handler(struct bhndb_resources *br,
940     struct bhndb_intr_handler *ih)
941 {
942 	KASSERT(!ih->ih_active, ("duplicate registration of interrupt "
943 	    "handler %p", ih->ih_cookiep));
944 	KASSERT(ih->ih_cookiep != NULL, ("missing cookiep"));
945 
946 	ih->ih_active = true;
947 	STAILQ_INSERT_HEAD(&br->bus_intrs, ih, ih_link);
948 }
949 
950 /**
951  * Remove an interrupt handler from the given resource state.
952  *
953  * @param br The resource state containing @p ih.
954  * @param ih The interrupt handler entry to be removed.
955  */
956 void
957 bhndb_deregister_intr_handler(struct bhndb_resources *br,
958     struct bhndb_intr_handler *ih)
959 {
960 	KASSERT(ih->ih_active, ("duplicate deregistration of interrupt "
961 	    "handler %p", ih->ih_cookiep));
962 
963 	KASSERT(bhndb_find_intr_handler(br, ih) == ih,
964 	    ("unknown interrupt handler %p", ih));
965 
966 	STAILQ_REMOVE(&br->bus_intrs, ih, bhndb_intr_handler, ih_link);
967 	ih->ih_active = false;
968 }
969 
970 /**
971  * Return the interrupt handler entry corresponding to @p cookiep, or NULL
972  * if no entry is found.
973  *
974  * @param br The resource state to search for the given @p cookiep.
975  * @param cookiep The interrupt handler's bus-assigned cookiep value.
976  */
977 struct bhndb_intr_handler *
978 bhndb_find_intr_handler(struct bhndb_resources *br, void *cookiep)
979 {
980 	struct bhndb_intr_handler *ih;
981 
982 	STAILQ_FOREACH(ih, &br->bus_intrs, ih_link) {
983 		if (ih == cookiep)
984 			return (ih);
985 	}
986 
987 	/* Not found */
988 	return (NULL);
989 }
990 
991 /**
992  * Find the maximum start and end limits of the bridged resource @p r.
993  *
994  * If the resource is not currently mapped by the bridge, ENOENT will be
995  * returned.
996  *
997  * @param	br		The resource state to search.
998  * @param	type The resource type (see SYS_RES_*).
999  * @param	r The resource to search for in @p br.
1000  * @param[out]	start	On success, the minimum supported start address.
1001  * @param[out]	end	On success, the maximum supported end address.
1002  *
1003  * @retval 0		success
1004  * @retval ENOENT	no active mapping found for @p r of @p type
1005  */
1006 int
1007 bhndb_find_resource_limits(struct bhndb_resources *br, int type,
1008     struct resource *r, rman_res_t *start, rman_res_t *end)
1009 {
1010 	struct bhndb_dw_alloc		*dynamic;
1011 	struct bhndb_region		*sregion;
1012 	struct bhndb_intr_handler	*ih;
1013 
1014 	switch (type) {
1015 	case SYS_RES_IRQ:
1016 		/* Is this one of ours? */
1017 		STAILQ_FOREACH(ih, &br->bus_intrs, ih_link) {
1018 			if (ih->ih_res == r)
1019 				continue;
1020 
1021 			/* We don't support adjusting IRQ resource limits */
1022 			*start = rman_get_start(r);
1023 			*end = rman_get_end(r);
1024 			return (0);
1025 		}
1026 
1027 		/* Not found */
1028 		return (ENOENT);
1029 
1030 	case SYS_RES_MEMORY: {
1031 		/* Check for an enclosing dynamic register window */
1032 		if ((dynamic = bhndb_dw_find_resource(br, r))) {
1033 			*start = dynamic->target;
1034 			*end = dynamic->target + dynamic->win->win_size - 1;
1035 			return (0);
1036 		}
1037 
1038 		/* Check for a static region */
1039 		sregion = bhndb_find_resource_region(br, rman_get_start(r),
1040 		rman_get_size(r));
1041 		if (sregion != NULL && sregion->static_regwin != NULL) {
1042 			*start = sregion->addr;
1043 			*end = sregion->addr + sregion->size - 1;
1044 
1045 			return (0);
1046 		}
1047 
1048 		/* Not found */
1049 		return (ENOENT);
1050 	}
1051 
1052 	default:
1053 		device_printf(br->dev, "unknown resource type: %d\n", type);
1054 		return (ENOENT);
1055 	}
1056 }
1057 
1058 /**
1059  * Add a bus region entry to @p r for the given base @p addr and @p size.
1060  *
1061  * @param br The resource state to which the bus region entry will be added.
1062  * @param addr The base address of this region.
1063  * @param size The size of this region.
1064  * @param priority The resource priority to be assigned to allocations
1065  * made within this bus region.
1066  * @param alloc_flags resource allocation flags (@see bhndb_alloc_flags)
1067  * @param static_regwin If available, a static register window mapping this
1068  * bus region entry. If not available, NULL.
1069  *
1070  * @retval 0 success
1071  * @retval non-zero if adding the bus region fails.
1072  */
1073 int
1074 bhndb_add_resource_region(struct bhndb_resources *br, bhnd_addr_t addr,
1075     bhnd_size_t size, bhndb_priority_t priority, uint32_t alloc_flags,
1076     const struct bhndb_regwin *static_regwin)
1077 {
1078 	struct bhndb_region	*reg;
1079 
1080 	/* Insert in the bus resource list */
1081 	reg = malloc(sizeof(*reg), M_BHND, M_NOWAIT);
1082 	if (reg == NULL)
1083 		return (ENOMEM);
1084 
1085 	*reg = (struct bhndb_region) {
1086 		.addr = addr,
1087 		.size = size,
1088 		.priority = priority,
1089 		.alloc_flags = alloc_flags,
1090 		.static_regwin = static_regwin
1091 	};
1092 
1093 	STAILQ_INSERT_HEAD(&br->bus_regions, reg, link);
1094 
1095 	return (0);
1096 }
1097 
1098 /**
1099  * Return true if a mapping of @p size bytes at @p addr is provided by either
1100  * one contiguous bus region, or by multiple discontiguous regions.
1101  *
1102  * @param br The resource state to query.
1103  * @param addr The requested starting address.
1104  * @param size The requested size.
1105  */
1106 bool
1107 bhndb_has_static_region_mapping(struct bhndb_resources *br,
1108     bhnd_addr_t addr, bhnd_size_t size)
1109 {
1110 	struct bhndb_region	*region;
1111 	bhnd_addr_t		 r_addr;
1112 
1113 	r_addr = addr;
1114 	while ((region = bhndb_find_resource_region(br, r_addr, 1)) != NULL) {
1115 		/* Must be backed by a static register window */
1116 		if (region->static_regwin == NULL)
1117 			return (false);
1118 
1119 		/* Adjust the search offset */
1120 		r_addr += region->size;
1121 
1122 		/* Have we traversed a complete (if discontiguous) mapping? */
1123 		if (r_addr == addr + size)
1124 			return (true);
1125 
1126 	}
1127 
1128 	/* No complete mapping found */
1129 	return (false);
1130 }
1131 
1132 /**
1133  * Find the bus region that maps @p size bytes at @p addr.
1134  *
1135  * @param br The resource state to search.
1136  * @param addr The requested starting address.
1137  * @param size The requested size.
1138  *
1139  * @retval bhndb_region A region that fully contains the requested range.
1140  * @retval NULL If no mapping region can be found.
1141  */
1142 struct bhndb_region *
1143 bhndb_find_resource_region(struct bhndb_resources *br, bhnd_addr_t addr,
1144     bhnd_size_t size)
1145 {
1146 	struct bhndb_region *region;
1147 
1148 	STAILQ_FOREACH(region, &br->bus_regions, link) {
1149 		/* Request must fit within the region's mapping  */
1150 		if (addr < region->addr)
1151 			continue;
1152 
1153 		if (addr + size > region->addr + region->size)
1154 			continue;
1155 
1156 		return (region);
1157 	}
1158 
1159 	/* Not found */
1160 	return (NULL);
1161 }
1162 
1163 /**
1164  * Find the entry matching @p r in @p dwa's references, if any.
1165  *
1166  * @param dwa The dynamic window allocation to search
1167  * @param r The resource to search for in @p dwa.
1168  */
1169 static struct bhndb_dw_rentry *
1170 bhndb_dw_find_resource_entry(struct bhndb_dw_alloc *dwa, struct resource *r)
1171 {
1172 	struct bhndb_dw_rentry	*rentry;
1173 
1174 	LIST_FOREACH(rentry, &dwa->refs, dw_link) {
1175 		struct resource *dw_res = rentry->dw_res;
1176 
1177 		/* Match dev/rid/addr/size */
1178 		if (rman_get_device(dw_res)	!= rman_get_device(r) ||
1179 			rman_get_rid(dw_res)	!= rman_get_rid(r) ||
1180 			rman_get_start(dw_res)	!= rman_get_start(r) ||
1181 			rman_get_size(dw_res)	!= rman_get_size(r))
1182 		{
1183 			continue;
1184 		}
1185 
1186 		/* Matching allocation found */
1187 		return (rentry);
1188 	}
1189 
1190 	return (NULL);
1191 }
1192 
1193 /**
1194  * Find the dynamic region allocated for @p r, if any.
1195  *
1196  * @param br The resource state to search.
1197  * @param r The resource to search for.
1198  *
1199  * @retval bhndb_dw_alloc The allocation record for @p r.
1200  * @retval NULL if no dynamic window is allocated for @p r.
1201  */
1202 struct bhndb_dw_alloc *
1203 bhndb_dw_find_resource(struct bhndb_resources *br, struct resource *r)
1204 {
1205 	struct bhndb_dw_alloc	*dwa;
1206 
1207 	for (size_t i = 0; i < br->dwa_count; i++) {
1208 		dwa = &br->dw_alloc[i];
1209 
1210 		/* Skip free dynamic windows */
1211 		if (bhndb_dw_is_free(br, dwa))
1212 			continue;
1213 
1214 		/* Matching allocation found? */
1215 		if (bhndb_dw_find_resource_entry(dwa, r) != NULL)
1216 			return (dwa);
1217 	}
1218 
1219 	return (NULL);
1220 }
1221 
1222 /**
1223  * Find an existing dynamic window mapping @p size bytes
1224  * at @p addr. The window may or may not be free.
1225  *
1226  * @param br The resource state to search.
1227  * @param addr The requested starting address.
1228  * @param size The requested size.
1229  *
1230  * @retval bhndb_dw_alloc A window allocation that fully contains the requested
1231  * range.
1232  * @retval NULL If no mapping region can be found.
1233  */
1234 struct bhndb_dw_alloc *
1235 bhndb_dw_find_mapping(struct bhndb_resources *br, bhnd_addr_t addr,
1236     bhnd_size_t size)
1237 {
1238 	struct bhndb_dw_alloc		*dwr;
1239 	const struct bhndb_regwin	*win;
1240 
1241 	/* Search for an existing dynamic mapping of this address range. */
1242 	for (size_t i = 0; i < br->dwa_count; i++) {
1243 		dwr = &br->dw_alloc[i];
1244 		win = dwr->win;
1245 
1246 		/* Verify the range */
1247 		if (addr < dwr->target)
1248 			continue;
1249 
1250 		if (addr + size > dwr->target + win->win_size)
1251 			continue;
1252 
1253 		/* Found a usable mapping */
1254 		return (dwr);
1255 	}
1256 
1257 	/* not found */
1258 	return (NULL);
1259 }
1260 
1261 /**
1262  * Retain a reference to @p dwa for use by @p res.
1263  *
1264  * @param br The resource state owning @p dwa.
1265  * @param dwa The allocation record to be retained.
1266  * @param res The resource that will own a reference to @p dwa.
1267  *
1268  * @retval 0 success
1269  * @retval ENOMEM Failed to allocate a new reference structure.
1270  */
1271 int
1272 bhndb_dw_retain(struct bhndb_resources *br, struct bhndb_dw_alloc *dwa,
1273     struct resource *res)
1274 {
1275 	struct bhndb_dw_rentry *rentry;
1276 
1277 	KASSERT(bhndb_dw_find_resource_entry(dwa, res) == NULL,
1278 	    ("double-retain of dynamic window for same resource"));
1279 
1280 	/* Insert a reference entry; we use M_NOWAIT to allow use from
1281 	 * within a non-sleepable lock */
1282 	rentry = malloc(sizeof(*rentry), M_BHND, M_NOWAIT);
1283 	if (rentry == NULL)
1284 		return (ENOMEM);
1285 
1286 	rentry->dw_res = res;
1287 	LIST_INSERT_HEAD(&dwa->refs, rentry, dw_link);
1288 
1289 	/* Update the free list */
1290 	bit_set(br->dwa_freelist, dwa->rnid);
1291 
1292 	return (0);
1293 }
1294 
1295 /**
1296  * Release a reference to @p dwa previously retained by @p res. If the
1297  * reference count of @p dwa reaches zero, it will be added to the
1298  * free list.
1299  *
1300  * @param br The resource state owning @p dwa.
1301  * @param dwa The allocation record to be released.
1302  * @param res The resource that currently owns a reference to @p dwa.
1303  */
1304 void
1305 bhndb_dw_release(struct bhndb_resources *br, struct bhndb_dw_alloc *dwa,
1306     struct resource *r)
1307 {
1308 	struct bhndb_dw_rentry	*rentry;
1309 
1310 	/* Find the rentry */
1311 	rentry = bhndb_dw_find_resource_entry(dwa, r);
1312 	KASSERT(rentry != NULL, ("over release of resource entry"));
1313 
1314 	LIST_REMOVE(rentry, dw_link);
1315 	free(rentry, M_BHND);
1316 
1317 	/* If this was the last reference, update the free list */
1318 	if (LIST_EMPTY(&dwa->refs))
1319 		bit_clear(br->dwa_freelist, dwa->rnid);
1320 }
1321 
1322 /**
1323  * Attempt to set (or reset) the target address of @p dwa to map @p size bytes
1324  * at @p addr.
1325  *
1326  * This will apply any necessary window alignment and verify that
1327  * the window is capable of mapping the requested range prior to modifying
1328  * therecord.
1329  *
1330  * @param dev The device on which to issue the BHNDB_SET_WINDOW_ADDR() request.
1331  * @param br The resource state owning @p dwa.
1332  * @param dwa The allocation record to be configured.
1333  * @param addr The address to be mapped via @p dwa.
1334  * @param size The number of bytes to be mapped at @p addr.
1335  *
1336  * @retval 0 success
1337  * @retval non-zero no usable register window available.
1338  */
1339 int
1340 bhndb_dw_set_addr(device_t dev, struct bhndb_resources *br,
1341     struct bhndb_dw_alloc *dwa, bus_addr_t addr, bus_size_t size)
1342 {
1343 	const struct bhndb_regwin	*rw;
1344 	bus_addr_t			 offset;
1345 	int				 error;
1346 
1347 	rw = dwa->win;
1348 
1349 	KASSERT(bhndb_dw_is_free(br, dwa) || mtx_owned(&br->dw_steal_mtx),
1350 	    ("attempting to set the target address on an in-use window"));
1351 
1352 	/* Page-align the target address */
1353 	offset = addr % rw->win_size;
1354 	dwa->target = addr - offset;
1355 
1356 	/* Verify that the window is large enough for the full target */
1357 	if (rw->win_size - offset < size)
1358 		return (ENOMEM);
1359 
1360 	/* Update the window target */
1361 	error = BHNDB_SET_WINDOW_ADDR(dev, dwa->win, dwa->target);
1362 	if (error) {
1363 		dwa->target = 0x0;
1364 		return (error);
1365 	}
1366 
1367 	return (0);
1368 }
1369 
1370 /**
1371  * Steal an in-use allocation record from @p br, returning the record's current
1372  * target in @p saved on success.
1373  *
1374  * This function acquires a mutex and disables interrupts; callers should
1375  * avoid holding a stolen window longer than required to issue an I/O
1376  * request.
1377  *
1378  * A successful call to bhndb_dw_steal() must be balanced with a call to
1379  * bhndb_dw_return_stolen().
1380  *
1381  * @param br The resource state from which a window should be stolen.
1382  * @param saved The stolen window's saved target address.
1383  *
1384  * @retval non-NULL success
1385  * @retval NULL no dynamic window regions are defined.
1386  */
1387 struct bhndb_dw_alloc *
1388 bhndb_dw_steal(struct bhndb_resources *br, bus_addr_t *saved)
1389 {
1390 	struct bhndb_dw_alloc *dw_stolen;
1391 
1392 	KASSERT(bhndb_dw_next_free(br) == NULL,
1393 	    ("attempting to steal an in-use window while free windows remain"));
1394 
1395 	/* Nothing to steal from? */
1396 	if (br->dwa_count == 0)
1397 		return (NULL);
1398 
1399 	/*
1400 	 * Acquire our steal spinlock; this will be released in
1401 	 * bhndb_dw_return_stolen().
1402 	 *
1403 	 * Acquiring also disables interrupts, which is required when one is
1404 	 * stealing an in-use existing register window.
1405 	 */
1406 	mtx_lock_spin(&br->dw_steal_mtx);
1407 
1408 	dw_stolen = &br->dw_alloc[0];
1409 	*saved = dw_stolen->target;
1410 	return (dw_stolen);
1411 }
1412 
1413 /**
1414  * Return an allocation record previously stolen using bhndb_dw_steal().
1415  *
1416  * @param dev The device on which to issue a BHNDB_SET_WINDOW_ADDR() request.
1417  * @param br The resource state owning @p dwa.
1418  * @param dwa The allocation record to be returned.
1419  * @param saved The original target address provided by bhndb_dw_steal().
1420  */
1421 void
1422 bhndb_dw_return_stolen(device_t dev, struct bhndb_resources *br,
1423     struct bhndb_dw_alloc *dwa, bus_addr_t saved)
1424 {
1425 	int error;
1426 
1427 	mtx_assert(&br->dw_steal_mtx, MA_OWNED);
1428 
1429 	error = bhndb_dw_set_addr(dev, br, dwa, saved, 0);
1430 	if (error) {
1431 		panic("failed to restore register window target %#jx: %d\n",
1432 		    (uintmax_t)saved, error);
1433 	}
1434 
1435 	mtx_unlock_spin(&br->dw_steal_mtx);
1436 }
1437 
1438 /**
1439  * Return the count of @p type register windows in @p table.
1440  *
1441  * @param table The table to search.
1442  * @param type The required window type, or BHNDB_REGWIN_T_INVALID to
1443  * count all register window types.
1444  */
1445 size_t
1446 bhndb_regwin_count(const struct bhndb_regwin *table,
1447     bhndb_regwin_type_t type)
1448 {
1449 	const struct bhndb_regwin	*rw;
1450 	size_t				 count;
1451 
1452 	count = 0;
1453 	for (rw = table; rw->win_type != BHNDB_REGWIN_T_INVALID; rw++) {
1454 		if (type == BHNDB_REGWIN_T_INVALID || rw->win_type == type)
1455 			count++;
1456 	}
1457 
1458 	return (count);
1459 }
1460 
1461 /**
1462  * Search @p table for the first window with the given @p type.
1463  *
1464  * @param table The table to search.
1465  * @param type The required window type.
1466  * @param min_size The minimum window size.
1467  *
1468  * @retval bhndb_regwin The first matching window.
1469  * @retval NULL If no window of the requested type could be found.
1470  */
1471 const struct bhndb_regwin *
1472 bhndb_regwin_find_type(const struct bhndb_regwin *table,
1473     bhndb_regwin_type_t type, bus_size_t min_size)
1474 {
1475 	const struct bhndb_regwin *rw;
1476 
1477 	for (rw = table; rw->win_type != BHNDB_REGWIN_T_INVALID; rw++)
1478 	{
1479 		if (rw->win_type == type && rw->win_size >= min_size)
1480 			return (rw);
1481 	}
1482 
1483 	return (NULL);
1484 }
1485 
1486 /**
1487  * Search @p windows for the first matching core window.
1488  *
1489  * @param table The table to search.
1490  * @param class The required core class.
1491  * @param unit The required core unit, or -1.
1492  * @param port_type The required port type.
1493  * @param port The required port.
1494  * @param region The required region.
1495  * @param offset The required readable core register block offset.
1496  * @param min_size The required minimum readable size at @p offset.
1497  *
1498  * @retval bhndb_regwin The first matching window.
1499  * @retval NULL If no matching window was found.
1500  */
1501 const struct bhndb_regwin *
1502 bhndb_regwin_find_core(const struct bhndb_regwin *table, bhnd_devclass_t class,
1503     int unit, bhnd_port_type port_type, u_int port, u_int region,
1504     bus_size_t offset, bus_size_t min_size)
1505 {
1506 	const struct bhndb_regwin *rw;
1507 
1508 	for (rw = table; rw->win_type != BHNDB_REGWIN_T_INVALID; rw++)
1509 	{
1510 		bus_size_t rw_offset;
1511 
1512 		/* Match on core, port, and region attributes */
1513 		if (rw->win_type != BHNDB_REGWIN_T_CORE)
1514 			continue;
1515 
1516 		if (rw->d.core.class != class)
1517 			continue;
1518 
1519 		if (unit != -1 && rw->d.core.unit != unit)
1520 			continue;
1521 
1522 		if (rw->d.core.port_type != port_type)
1523 			continue;
1524 
1525 		if (rw->d.core.port != port)
1526 			continue;
1527 
1528 		if (rw->d.core.region != region)
1529 			continue;
1530 
1531 		/* Verify that the requested range is mapped within
1532 		 * this register window */
1533 		if (rw->d.core.offset > offset)
1534 			continue;
1535 
1536 		rw_offset = offset - rw->d.core.offset;
1537 
1538 		if (rw->win_size < rw_offset)
1539 			continue;
1540 
1541 		if (rw->win_size - rw_offset < min_size)
1542 			continue;
1543 
1544 		return (rw);
1545 	}
1546 
1547 	return (NULL);
1548 }
1549 
1550 /**
1551  * Search @p windows for the best available window of at least @p min_size.
1552  *
1553  * Search order:
1554  * - BHND_REGWIN_T_CORE
1555  * - BHND_REGWIN_T_DYN
1556  *
1557  * @param table The table to search.
1558  * @param class The required core class.
1559  * @param unit The required core unit, or -1.
1560  * @param port_type The required port type.
1561  * @param port The required port.
1562  * @param region The required region.
1563  * @param offset The required readable core register block offset.
1564  * @param min_size The required minimum readable size at @p offset.
1565  *
1566  * @retval bhndb_regwin The first matching window.
1567  * @retval NULL If no matching window was found.
1568  */
1569 const struct bhndb_regwin *
1570 bhndb_regwin_find_best(const struct bhndb_regwin *table,
1571     bhnd_devclass_t class, int unit, bhnd_port_type port_type, u_int port,
1572     u_int region, bus_size_t offset, bus_size_t min_size)
1573 {
1574 	const struct bhndb_regwin *rw;
1575 
1576 	/* Prefer a fixed core mapping */
1577 	rw = bhndb_regwin_find_core(table, class, unit, port_type,
1578 	    port, region, offset, min_size);
1579 	if (rw != NULL)
1580 		return (rw);
1581 
1582 	/* Fall back on a generic dynamic window */
1583 	return (bhndb_regwin_find_type(table, BHNDB_REGWIN_T_DYN, min_size));
1584 }
1585 
1586 /**
1587  * Return true if @p regw defines a BHNDB_REGWIN_T_CORE register window
1588  * that matches against @p core.
1589  *
1590  * @param regw A register window to match against.
1591  * @param core The bhnd(4) core info to match against @p regw.
1592  */
1593 bool
1594 bhndb_regwin_match_core(const struct bhndb_regwin *regw,
1595     struct bhnd_core_info *core)
1596 {
1597 	/* Only core windows are supported */
1598 	if (regw->win_type != BHNDB_REGWIN_T_CORE)
1599 		return (false);
1600 
1601 	/* Device class must match */
1602 	if (bhnd_core_class(core) != regw->d.core.class)
1603 		return (false);
1604 
1605 	/* Device unit must match */
1606 	if (core->unit != regw->d.core.unit)
1607 		return (false);
1608 
1609 	/* Matches */
1610 	return (true);
1611 }
1612 
1613 /**
1614  * Search for a core resource priority descriptor in @p table that matches
1615  * @p core.
1616  *
1617  * @param table The table to search.
1618  * @param core The core to match against @p table.
1619  */
1620 const struct bhndb_hw_priority *
1621 bhndb_hw_priority_find_core(const struct bhndb_hw_priority *table,
1622     struct bhnd_core_info *core)
1623 {
1624 	const struct bhndb_hw_priority	*hp;
1625 
1626 	for (hp = table; hp->ports != NULL; hp++) {
1627 		if (bhnd_core_matches(core, &hp->match))
1628 			return (hp);
1629 	}
1630 
1631 	/* not found */
1632 	return (NULL);
1633 }
1634 
1635 
1636 /**
1637  * Search for a port resource priority descriptor in @p table.
1638  *
1639  * @param table The table to search.
1640  * @param core The core to match against @p table.
1641  * @param port_type The required port type.
1642  * @param port The required port.
1643  * @param region The required region.
1644  */
1645 const struct bhndb_port_priority *
1646 bhndb_hw_priorty_find_port(const struct bhndb_hw_priority *table,
1647     struct bhnd_core_info *core, bhnd_port_type port_type, u_int port,
1648     u_int region)
1649 {
1650 	const struct bhndb_hw_priority		*hp;
1651 
1652 	if ((hp = bhndb_hw_priority_find_core(table, core)) == NULL)
1653 		return (NULL);
1654 
1655 	for (u_int i = 0; i < hp->num_ports; i++) {
1656 		const struct bhndb_port_priority *pp = &hp->ports[i];
1657 
1658 		if (pp->type != port_type)
1659 			continue;
1660 
1661 		if (pp->port != port)
1662 			continue;
1663 
1664 		if (pp->region != region)
1665 			continue;
1666 
1667 		return (pp);
1668 	}
1669 
1670 	/* not found */
1671 	return (NULL);
1672 }
1673