xref: /freebsd/sys/kern/subr_rman.c (revision 8aac90f18aef7c9eea906c3ff9a001ca7b94f375)
1 /*-
2  * Copyright 1998 Massachusetts Institute of Technology
3  *
4  * Permission to use, copy, modify, and distribute this software and
5  * its documentation for any purpose and without fee is hereby
6  * granted, provided that both the above copyright notice and this
7  * permission notice appear in all copies, that both the above
8  * copyright notice and this permission notice appear in all
9  * supporting documentation, and that the name of M.I.T. not be used
10  * in advertising or publicity pertaining to distribution of the
11  * software without specific, written prior permission.  M.I.T. makes
12  * no representations about the suitability of this software for any
13  * purpose.  It is provided "as is" without express or implied
14  * warranty.
15  *
16  * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
17  * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
18  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
20  * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * The kernel resource manager.  This code is responsible for keeping track
32  * of hardware resources which are apportioned out to various drivers.
33  * It does not actually assign those resources, and it is not expected
34  * that end-device drivers will call into this code directly.  Rather,
35  * the code which implements the buses that those devices are attached to,
36  * and the code which manages CPU resources, will call this code, and the
37  * end-device drivers will make upcalls to that code to actually perform
38  * the allocation.
39  *
40  * There are two sorts of resources managed by this code.  The first is
41  * the more familiar array (RMAN_ARRAY) type; resources in this class
42  * consist of a sequence of individually-allocatable objects which have
43  * been numbered in some well-defined order.  Most of the resources
44  * are of this type, as it is the most familiar.  The second type is
45  * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
46  * resources in which each instance is indistinguishable from every
47  * other instance).  The principal anticipated application of gauges
48  * is in the context of power consumption, where a bus may have a specific
49  * power budget which all attached devices share.  RMAN_GAUGE is not
50  * implemented yet.
51  *
52  * For array resources, we make one simplifying assumption: two clients
53  * sharing the same resource must use the same range of indices.  That
54  * is to say, sharing of overlapping-but-not-identical regions is not
55  * permitted.
56  */
57 
58 #include "opt_ddb.h"
59 
60 #include <sys/param.h>
61 #include <sys/systm.h>
62 #include <sys/kernel.h>
63 #include <sys/limits.h>
64 #include <sys/lock.h>
65 #include <sys/malloc.h>
66 #include <sys/mutex.h>
67 #include <sys/bus.h>		/* XXX debugging */
68 #include <machine/bus.h>
69 #include <sys/rman.h>
70 #include <sys/sysctl.h>
71 
72 #ifdef DDB
73 #include <ddb/ddb.h>
74 #endif
75 
76 /*
77  * We use a linked list rather than a bitmap because we need to be able to
78  * represent potentially huge objects (like all of a processor's physical
79  * address space).
80  */
81 struct resource_i {
82 	struct resource		r_r;
83 	TAILQ_ENTRY(resource_i)	r_link;
84 	LIST_ENTRY(resource_i)	r_sharelink;
85 	LIST_HEAD(, resource_i)	*r_sharehead;
86 	rman_res_t	r_start;	/* index of the first entry in this resource */
87 	rman_res_t	r_end;		/* index of the last entry (inclusive) */
88 	u_int	r_flags;
89 	void	*r_virtual;	/* virtual address of this resource */
90 	void	*r_irq_cookie;	/* interrupt cookie for this (interrupt) resource */
91 	device_t r_dev;	/* device which has allocated this resource */
92 	struct rman *r_rm;	/* resource manager from whence this came */
93 	int	r_rid;		/* optional rid for this resource. */
94 	int	r_type;		/* optional type for this resource. */
95 };
96 
97 static int rman_debug = 0;
98 SYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RWTUN,
99     &rman_debug, 0, "rman debug");
100 
101 #define DPRINTF(...) do { if (rman_debug) printf(__VA_ARGS__); } while (0)
102 
103 static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
104 
105 struct rman_head rman_head;
106 static struct mtx rman_mtx; /* mutex to protect rman_head */
107 static int int_rman_release_resource(struct rman *rm, struct resource_i *r);
108 
109 static __inline struct resource_i *
110 int_alloc_resource(int malloc_flag)
111 {
112 	struct resource_i *r;
113 
114 	r = malloc(sizeof *r, M_RMAN, malloc_flag | M_ZERO);
115 	if (r != NULL) {
116 		r->r_r.__r_i = r;
117 	}
118 	return (r);
119 }
120 
121 int
122 rman_init(struct rman *rm)
123 {
124 	static int once = 0;
125 
126 	if (once == 0) {
127 		once = 1;
128 		TAILQ_INIT(&rman_head);
129 		mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF);
130 	}
131 
132 	if (rm->rm_start == 0 && rm->rm_end == 0)
133 		rm->rm_end = ~0;
134 	if (rm->rm_type == RMAN_UNINIT)
135 		panic("rman_init");
136 	if (rm->rm_type == RMAN_GAUGE)
137 		panic("implement RMAN_GAUGE");
138 
139 	TAILQ_INIT(&rm->rm_list);
140 	rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO);
141 	if (rm->rm_mtx == NULL)
142 		return ENOMEM;
143 	mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF);
144 
145 	mtx_lock(&rman_mtx);
146 	TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
147 	mtx_unlock(&rman_mtx);
148 	return 0;
149 }
150 
151 int
152 rman_manage_region(struct rman *rm, rman_res_t start, rman_res_t end)
153 {
154 	struct resource_i *r, *s, *t;
155 	int rv = 0;
156 
157 	DPRINTF("%s: <%s> request: start %#jx, end %#jx\n", __func__,
158 	    rm->rm_descr, start, end);
159 	if (start < rm->rm_start || end > rm->rm_end)
160 		return EINVAL;
161 	r = int_alloc_resource(M_NOWAIT);
162 	if (r == NULL)
163 		return ENOMEM;
164 	r->r_start = start;
165 	r->r_end = end;
166 	r->r_rm = rm;
167 
168 	mtx_lock(rm->rm_mtx);
169 
170 	/* Skip entries before us. */
171 	TAILQ_FOREACH(s, &rm->rm_list, r_link) {
172 		if (s->r_end == ~0)
173 			break;
174 		if (s->r_end + 1 >= r->r_start)
175 			break;
176 	}
177 
178 	/* If we ran off the end of the list, insert at the tail. */
179 	if (s == NULL) {
180 		TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
181 	} else {
182 		/* Check for any overlap with the current region. */
183 		if (r->r_start <= s->r_end && r->r_end >= s->r_start) {
184 			rv = EBUSY;
185 			goto out;
186 		}
187 
188 		/* Check for any overlap with the next region. */
189 		t = TAILQ_NEXT(s, r_link);
190 		if (t && r->r_start <= t->r_end && r->r_end >= t->r_start) {
191 			rv = EBUSY;
192 			goto out;
193 		}
194 
195 		/*
196 		 * See if this region can be merged with the next region.  If
197 		 * not, clear the pointer.
198 		 */
199 		if (t && (r->r_end + 1 != t->r_start || t->r_flags != 0))
200 			t = NULL;
201 
202 		/* See if we can merge with the current region. */
203 		if (s->r_end + 1 == r->r_start && s->r_flags == 0) {
204 			/* Can we merge all 3 regions? */
205 			if (t != NULL) {
206 				s->r_end = t->r_end;
207 				TAILQ_REMOVE(&rm->rm_list, t, r_link);
208 				free(r, M_RMAN);
209 				free(t, M_RMAN);
210 			} else {
211 				s->r_end = r->r_end;
212 				free(r, M_RMAN);
213 			}
214 		} else if (t != NULL) {
215 			/* Can we merge with just the next region? */
216 			t->r_start = r->r_start;
217 			free(r, M_RMAN);
218 		} else if (s->r_end < r->r_start) {
219 			TAILQ_INSERT_AFTER(&rm->rm_list, s, r, r_link);
220 		} else {
221 			TAILQ_INSERT_BEFORE(s, r, r_link);
222 		}
223 	}
224 out:
225 	mtx_unlock(rm->rm_mtx);
226 	return rv;
227 }
228 
229 int
230 rman_init_from_resource(struct rman *rm, struct resource *r)
231 {
232 	int rv;
233 
234 	if ((rv = rman_init(rm)) != 0)
235 		return (rv);
236 	return (rman_manage_region(rm, r->__r_i->r_start, r->__r_i->r_end));
237 }
238 
239 int
240 rman_fini(struct rman *rm)
241 {
242 	struct resource_i *r;
243 
244 	mtx_lock(rm->rm_mtx);
245 	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
246 		if (r->r_flags & RF_ALLOCATED) {
247 			mtx_unlock(rm->rm_mtx);
248 			return EBUSY;
249 		}
250 	}
251 
252 	/*
253 	 * There really should only be one of these if we are in this
254 	 * state and the code is working properly, but it can't hurt.
255 	 */
256 	while (!TAILQ_EMPTY(&rm->rm_list)) {
257 		r = TAILQ_FIRST(&rm->rm_list);
258 		TAILQ_REMOVE(&rm->rm_list, r, r_link);
259 		free(r, M_RMAN);
260 	}
261 	mtx_unlock(rm->rm_mtx);
262 	mtx_lock(&rman_mtx);
263 	TAILQ_REMOVE(&rman_head, rm, rm_link);
264 	mtx_unlock(&rman_mtx);
265 	mtx_destroy(rm->rm_mtx);
266 	free(rm->rm_mtx, M_RMAN);
267 
268 	return 0;
269 }
270 
271 int
272 rman_first_free_region(struct rman *rm, rman_res_t *start, rman_res_t *end)
273 {
274 	struct resource_i *r;
275 
276 	mtx_lock(rm->rm_mtx);
277 	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
278 		if (!(r->r_flags & RF_ALLOCATED)) {
279 			*start = r->r_start;
280 			*end = r->r_end;
281 			mtx_unlock(rm->rm_mtx);
282 			return (0);
283 		}
284 	}
285 	mtx_unlock(rm->rm_mtx);
286 	return (ENOENT);
287 }
288 
289 int
290 rman_last_free_region(struct rman *rm, rman_res_t *start, rman_res_t *end)
291 {
292 	struct resource_i *r;
293 
294 	mtx_lock(rm->rm_mtx);
295 	TAILQ_FOREACH_REVERSE(r, &rm->rm_list, resource_head, r_link) {
296 		if (!(r->r_flags & RF_ALLOCATED)) {
297 			*start = r->r_start;
298 			*end = r->r_end;
299 			mtx_unlock(rm->rm_mtx);
300 			return (0);
301 		}
302 	}
303 	mtx_unlock(rm->rm_mtx);
304 	return (ENOENT);
305 }
306 
307 /* Shrink or extend one or both ends of an allocated resource. */
308 int
309 rman_adjust_resource(struct resource *rr, rman_res_t start, rman_res_t end)
310 {
311 	struct resource_i *r, *s, *t, *new;
312 	struct rman *rm;
313 
314 	/* Not supported for shared resources. */
315 	r = rr->__r_i;
316 	if (r->r_flags & RF_SHAREABLE)
317 		return (EINVAL);
318 
319 	/*
320 	 * This does not support wholesale moving of a resource.  At
321 	 * least part of the desired new range must overlap with the
322 	 * existing resource.
323 	 */
324 	if (end < r->r_start || r->r_end < start)
325 		return (EINVAL);
326 
327 	/*
328 	 * Find the two resource regions immediately adjacent to the
329 	 * allocated resource.
330 	 */
331 	rm = r->r_rm;
332 	mtx_lock(rm->rm_mtx);
333 #ifdef INVARIANTS
334 	TAILQ_FOREACH(s, &rm->rm_list, r_link) {
335 		if (s == r)
336 			break;
337 	}
338 	if (s == NULL)
339 		panic("resource not in list");
340 #endif
341 	s = TAILQ_PREV(r, resource_head, r_link);
342 	t = TAILQ_NEXT(r, r_link);
343 	KASSERT(s == NULL || s->r_end + 1 == r->r_start,
344 	    ("prev resource mismatch"));
345 	KASSERT(t == NULL || r->r_end + 1 == t->r_start,
346 	    ("next resource mismatch"));
347 
348 	/*
349 	 * See if the changes are permitted.  Shrinking is always allowed,
350 	 * but growing requires sufficient room in the adjacent region.
351 	 */
352 	if (start < r->r_start && (s == NULL || (s->r_flags & RF_ALLOCATED) ||
353 	    s->r_start > start)) {
354 		mtx_unlock(rm->rm_mtx);
355 		return (EBUSY);
356 	}
357 	if (end > r->r_end && (t == NULL || (t->r_flags & RF_ALLOCATED) ||
358 	    t->r_end < end)) {
359 		mtx_unlock(rm->rm_mtx);
360 		return (EBUSY);
361 	}
362 
363 	/*
364 	 * While holding the lock, grow either end of the resource as
365 	 * needed and shrink either end if the shrinking does not require
366 	 * allocating a new resource.  We can safely drop the lock and then
367 	 * insert a new range to handle the shrinking case afterwards.
368 	 */
369 	if (start < r->r_start ||
370 	    (start > r->r_start && s != NULL && !(s->r_flags & RF_ALLOCATED))) {
371 		KASSERT(s->r_flags == 0, ("prev is busy"));
372 		r->r_start = start;
373 		if (s->r_start == start) {
374 			TAILQ_REMOVE(&rm->rm_list, s, r_link);
375 			free(s, M_RMAN);
376 		} else
377 			s->r_end = start - 1;
378 	}
379 	if (end > r->r_end ||
380 	    (end < r->r_end && t != NULL && !(t->r_flags & RF_ALLOCATED))) {
381 		KASSERT(t->r_flags == 0, ("next is busy"));
382 		r->r_end = end;
383 		if (t->r_end == end) {
384 			TAILQ_REMOVE(&rm->rm_list, t, r_link);
385 			free(t, M_RMAN);
386 		} else
387 			t->r_start = end + 1;
388 	}
389 	mtx_unlock(rm->rm_mtx);
390 
391 	/*
392 	 * Handle the shrinking cases that require allocating a new
393 	 * resource to hold the newly-free region.  We have to recheck
394 	 * if we still need this new region after acquiring the lock.
395 	 */
396 	if (start > r->r_start) {
397 		new = int_alloc_resource(M_WAITOK);
398 		new->r_start = r->r_start;
399 		new->r_end = start - 1;
400 		new->r_rm = rm;
401 		mtx_lock(rm->rm_mtx);
402 		r->r_start = start;
403 		s = TAILQ_PREV(r, resource_head, r_link);
404 		if (s != NULL && !(s->r_flags & RF_ALLOCATED)) {
405 			s->r_end = start - 1;
406 			free(new, M_RMAN);
407 		} else
408 			TAILQ_INSERT_BEFORE(r, new, r_link);
409 		mtx_unlock(rm->rm_mtx);
410 	}
411 	if (end < r->r_end) {
412 		new = int_alloc_resource(M_WAITOK);
413 		new->r_start = end + 1;
414 		new->r_end = r->r_end;
415 		new->r_rm = rm;
416 		mtx_lock(rm->rm_mtx);
417 		r->r_end = end;
418 		t = TAILQ_NEXT(r, r_link);
419 		if (t != NULL && !(t->r_flags & RF_ALLOCATED)) {
420 			t->r_start = end + 1;
421 			free(new, M_RMAN);
422 		} else
423 			TAILQ_INSERT_AFTER(&rm->rm_list, r, new, r_link);
424 		mtx_unlock(rm->rm_mtx);
425 	}
426 	return (0);
427 }
428 
429 #define	SHARE_TYPE(f)	(f & (RF_SHAREABLE | RF_PREFETCHABLE))
430 
431 struct resource *
432 rman_reserve_resource(struct rman *rm, rman_res_t start, rman_res_t end,
433 			    rman_res_t count, u_int flags, device_t dev)
434 {
435 	u_int new_rflags;
436 	struct resource_i *r, *s, *rv;
437 	rman_res_t rstart, rend, amask;
438 
439 	rv = NULL;
440 
441 	DPRINTF("%s: <%s> request: [%#jx, %#jx], length %#jx, flags %x, "
442 	    "device %s\n", __func__, rm->rm_descr, start, end, count, flags,
443 	    dev == NULL ? "<null>" : device_get_nameunit(dev));
444 	KASSERT(count != 0, ("%s: attempted to allocate an empty range",
445 	    __func__));
446 	KASSERT((flags & RF_FIRSTSHARE) == 0,
447 	    ("invalid flags %#x", flags));
448 	new_rflags = (flags & ~RF_FIRSTSHARE) | RF_ALLOCATED;
449 
450 	mtx_lock(rm->rm_mtx);
451 
452 	r = TAILQ_FIRST(&rm->rm_list);
453 	if (r == NULL)
454 		DPRINTF("NULL list head\n");
455 	else
456 		DPRINTF("%s: trying %#jx <%#jx,%#jx>\n", __func__, r->r_end,
457 		    start, count-1);
458 
459 	for (r = TAILQ_FIRST(&rm->rm_list);
460 	     r && r->r_end < start + count - 1;
461 	     r = TAILQ_NEXT(r, r_link))
462 		DPRINTF("%s: tried %#jx <%#jx,%#jx>\n", __func__, r->r_end,
463 		    start, count-1);
464 
465 	if (r == NULL) {
466 		DPRINTF("could not find a region\n");
467 		goto out;
468 	}
469 
470 	amask = (1ull << RF_ALIGNMENT(flags)) - 1;
471 	KASSERT(start <= RM_MAX_END - amask,
472 	    ("start (%#jx) + amask (%#jx) would wrap around", start, amask));
473 
474 	/*
475 	 * First try to find an acceptable totally-unshared region.
476 	 */
477 	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
478 		DPRINTF("considering [%#jx, %#jx]\n", s->r_start, s->r_end);
479 		/*
480 		 * The resource list is sorted, so there is no point in
481 		 * searching further once r_start is too large.
482 		 */
483 		if (s->r_start > end - (count - 1)) {
484 			DPRINTF("s->r_start (%#jx) + count - 1> end (%#jx)\n",
485 			    s->r_start, end);
486 			break;
487 		}
488 		if (s->r_start > RM_MAX_END - amask) {
489 			DPRINTF("s->r_start (%#jx) + amask (%#jx) too large\n",
490 			    s->r_start, amask);
491 			break;
492 		}
493 		if (s->r_flags & RF_ALLOCATED) {
494 			DPRINTF("region is allocated\n");
495 			continue;
496 		}
497 		rstart = ummax(s->r_start, start);
498 		/*
499 		 * Try to find a region by adjusting to boundary and alignment
500 		 * until both conditions are satisfied. This is not an optimal
501 		 * algorithm, but in most cases it isn't really bad, either.
502 		 */
503 		do {
504 			rstart = (rstart + amask) & ~amask;
505 		} while ((rstart & amask) != 0 && rstart < end &&
506 		    rstart < s->r_end);
507 		rend = ummin(s->r_end, ummax(rstart + count - 1, end));
508 		if (rstart > rend) {
509 			DPRINTF("adjusted start exceeds end\n");
510 			continue;
511 		}
512 		DPRINTF("truncated region: [%#jx, %#jx]; size %#jx (requested %#jx)\n",
513 		       rstart, rend, (rend - rstart + 1), count);
514 
515 		if ((rend - rstart) >= (count - 1)) {
516 			DPRINTF("candidate region: [%#jx, %#jx], size %#jx\n",
517 			       rstart, rend, (rend - rstart + 1));
518 			if ((s->r_end - s->r_start + 1) == count) {
519 				DPRINTF("candidate region is entire chunk\n");
520 				rv = s;
521 				rv->r_flags = new_rflags;
522 				rv->r_dev = dev;
523 				goto out;
524 			}
525 
526 			/*
527 			 * If s->r_start < rstart and
528 			 *    s->r_end > rstart + count - 1, then
529 			 * we need to split the region into three pieces
530 			 * (the middle one will get returned to the user).
531 			 * Otherwise, we are allocating at either the
532 			 * beginning or the end of s, so we only need to
533 			 * split it in two.  The first case requires
534 			 * two new allocations; the second requires but one.
535 			 */
536 			rv = int_alloc_resource(M_NOWAIT);
537 			if (rv == NULL)
538 				goto out;
539 			rv->r_start = rstart;
540 			rv->r_end = rstart + count - 1;
541 			rv->r_flags = new_rflags;
542 			rv->r_dev = dev;
543 			rv->r_rm = rm;
544 
545 			if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
546 				DPRINTF("splitting region in three parts: "
547 				       "[%#jx, %#jx]; [%#jx, %#jx]; [%#jx, %#jx]\n",
548 				       s->r_start, rv->r_start - 1,
549 				       rv->r_start, rv->r_end,
550 				       rv->r_end + 1, s->r_end);
551 				/*
552 				 * We are allocating in the middle.
553 				 */
554 				r = int_alloc_resource(M_NOWAIT);
555 				if (r == NULL) {
556 					free(rv, M_RMAN);
557 					rv = NULL;
558 					goto out;
559 				}
560 				r->r_start = rv->r_end + 1;
561 				r->r_end = s->r_end;
562 				r->r_flags = s->r_flags;
563 				r->r_rm = rm;
564 				s->r_end = rv->r_start - 1;
565 				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
566 						     r_link);
567 				TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
568 						     r_link);
569 			} else if (s->r_start == rv->r_start) {
570 				DPRINTF("allocating from the beginning\n");
571 				/*
572 				 * We are allocating at the beginning.
573 				 */
574 				s->r_start = rv->r_end + 1;
575 				TAILQ_INSERT_BEFORE(s, rv, r_link);
576 			} else {
577 				DPRINTF("allocating at the end\n");
578 				/*
579 				 * We are allocating at the end.
580 				 */
581 				s->r_end = rv->r_start - 1;
582 				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
583 						     r_link);
584 			}
585 			goto out;
586 		}
587 	}
588 
589 	/*
590 	 * Now find an acceptable shared region, if the client's requirements
591 	 * allow sharing.  By our implementation restriction, a candidate
592 	 * region must match exactly by both size and sharing type in order
593 	 * to be considered compatible with the client's request.  (The
594 	 * former restriction could probably be lifted without too much
595 	 * additional work, but this does not seem warranted.)
596 	 */
597 	DPRINTF("no unshared regions found\n");
598 	if ((flags & RF_SHAREABLE) == 0)
599 		goto out;
600 
601 	for (s = r; s && s->r_end <= end; s = TAILQ_NEXT(s, r_link)) {
602 		if (SHARE_TYPE(s->r_flags) == SHARE_TYPE(flags) &&
603 		    s->r_start >= start &&
604 		    (s->r_end - s->r_start + 1) == count &&
605 		    (s->r_start & amask) == 0) {
606 			rv = int_alloc_resource(M_NOWAIT);
607 			if (rv == NULL)
608 				goto out;
609 			rv->r_start = s->r_start;
610 			rv->r_end = s->r_end;
611 			rv->r_flags = new_rflags;
612 			rv->r_dev = dev;
613 			rv->r_rm = rm;
614 			if (s->r_sharehead == NULL) {
615 				s->r_sharehead = malloc(sizeof *s->r_sharehead,
616 						M_RMAN, M_NOWAIT | M_ZERO);
617 				if (s->r_sharehead == NULL) {
618 					free(rv, M_RMAN);
619 					rv = NULL;
620 					goto out;
621 				}
622 				LIST_INIT(s->r_sharehead);
623 				LIST_INSERT_HEAD(s->r_sharehead, s,
624 						 r_sharelink);
625 				s->r_flags |= RF_FIRSTSHARE;
626 			}
627 			rv->r_sharehead = s->r_sharehead;
628 			LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
629 			goto out;
630 		}
631 	}
632 	/*
633 	 * We couldn't find anything.
634 	 */
635 
636 out:
637 	mtx_unlock(rm->rm_mtx);
638 	return (rv == NULL ? NULL : &rv->r_r);
639 }
640 
641 int
642 rman_activate_resource(struct resource *re)
643 {
644 	struct resource_i *r;
645 	struct rman *rm;
646 
647 	r = re->__r_i;
648 	rm = r->r_rm;
649 	mtx_lock(rm->rm_mtx);
650 	r->r_flags |= RF_ACTIVE;
651 	mtx_unlock(rm->rm_mtx);
652 	return 0;
653 }
654 
655 int
656 rman_deactivate_resource(struct resource *r)
657 {
658 	struct rman *rm;
659 
660 	rm = r->__r_i->r_rm;
661 	mtx_lock(rm->rm_mtx);
662 	r->__r_i->r_flags &= ~RF_ACTIVE;
663 	mtx_unlock(rm->rm_mtx);
664 	return 0;
665 }
666 
667 static int
668 int_rman_release_resource(struct rman *rm, struct resource_i *r)
669 {
670 	struct resource_i *s, *t;
671 
672 	if (r->r_flags & RF_ACTIVE)
673 		r->r_flags &= ~RF_ACTIVE;
674 
675 	/*
676 	 * Check for a sharing list first.  If there is one, then we don't
677 	 * have to think as hard.
678 	 */
679 	if (r->r_sharehead) {
680 		/*
681 		 * If a sharing list exists, then we know there are at
682 		 * least two sharers.
683 		 *
684 		 * If we are in the main circleq, appoint someone else.
685 		 */
686 		LIST_REMOVE(r, r_sharelink);
687 		s = LIST_FIRST(r->r_sharehead);
688 		if (r->r_flags & RF_FIRSTSHARE) {
689 			s->r_flags |= RF_FIRSTSHARE;
690 			TAILQ_INSERT_BEFORE(r, s, r_link);
691 			TAILQ_REMOVE(&rm->rm_list, r, r_link);
692 		}
693 
694 		/*
695 		 * Make sure that the sharing list goes away completely
696 		 * if the resource is no longer being shared at all.
697 		 */
698 		if (LIST_NEXT(s, r_sharelink) == NULL) {
699 			free(s->r_sharehead, M_RMAN);
700 			s->r_sharehead = NULL;
701 			s->r_flags &= ~RF_FIRSTSHARE;
702 		}
703 		goto out;
704 	}
705 
706 	/*
707 	 * Look at the adjacent resources in the list and see if our
708 	 * segment can be merged with any of them.  If either of the
709 	 * resources is allocated or is not exactly adjacent then they
710 	 * cannot be merged with our segment.
711 	 */
712 	s = TAILQ_PREV(r, resource_head, r_link);
713 	if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 ||
714 	    s->r_end + 1 != r->r_start))
715 		s = NULL;
716 	t = TAILQ_NEXT(r, r_link);
717 	if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 ||
718 	    r->r_end + 1 != t->r_start))
719 		t = NULL;
720 
721 	if (s != NULL && t != NULL) {
722 		/*
723 		 * Merge all three segments.
724 		 */
725 		s->r_end = t->r_end;
726 		TAILQ_REMOVE(&rm->rm_list, r, r_link);
727 		TAILQ_REMOVE(&rm->rm_list, t, r_link);
728 		free(t, M_RMAN);
729 	} else if (s != NULL) {
730 		/*
731 		 * Merge previous segment with ours.
732 		 */
733 		s->r_end = r->r_end;
734 		TAILQ_REMOVE(&rm->rm_list, r, r_link);
735 	} else if (t != NULL) {
736 		/*
737 		 * Merge next segment with ours.
738 		 */
739 		t->r_start = r->r_start;
740 		TAILQ_REMOVE(&rm->rm_list, r, r_link);
741 	} else {
742 		/*
743 		 * At this point, we know there is nothing we
744 		 * can potentially merge with, because on each
745 		 * side, there is either nothing there or what is
746 		 * there is still allocated.  In that case, we don't
747 		 * want to remove r from the list; we simply want to
748 		 * change it to an unallocated region and return
749 		 * without freeing anything.
750 		 */
751 		r->r_flags &= ~RF_ALLOCATED;
752 		r->r_dev = NULL;
753 		return 0;
754 	}
755 
756 out:
757 	free(r, M_RMAN);
758 	return 0;
759 }
760 
761 int
762 rman_release_resource(struct resource *re)
763 {
764 	int rv;
765 	struct resource_i *r;
766 	struct rman *rm;
767 
768 	r = re->__r_i;
769 	rm = r->r_rm;
770 	mtx_lock(rm->rm_mtx);
771 	rv = int_rman_release_resource(rm, r);
772 	mtx_unlock(rm->rm_mtx);
773 	return (rv);
774 }
775 
776 uint32_t
777 rman_make_alignment_flags(uint32_t size)
778 {
779 
780 	/*
781 	 * Find the hightest bit set, and add one if more than one bit
782 	 * set.  We're effectively computing the ceil(log2(size)) here.
783 	 */
784 	if (__predict_false(size == 0))
785 		return (0);
786 	return (RF_ALIGNMENT_LOG2(flsl(size - 1)));
787 }
788 
789 rman_res_t
790 rman_get_start(const struct resource *r)
791 {
792 
793 	return (r->__r_i->r_start);
794 }
795 
796 rman_res_t
797 rman_get_end(const struct resource *r)
798 {
799 
800 	return (r->__r_i->r_end);
801 }
802 
803 rman_res_t
804 rman_get_size(const struct resource *r)
805 {
806 
807 	return (r->__r_i->r_end - r->__r_i->r_start + 1);
808 }
809 
810 u_int
811 rman_get_flags(const struct resource *r)
812 {
813 
814 	return (r->__r_i->r_flags);
815 }
816 
817 void
818 rman_set_virtual(struct resource *r, void *v)
819 {
820 
821 	r->__r_i->r_virtual = v;
822 }
823 
824 void *
825 rman_get_virtual(const struct resource *r)
826 {
827 
828 	return (r->__r_i->r_virtual);
829 }
830 
831 void
832 rman_set_irq_cookie(struct resource *r, void *c)
833 {
834 
835 	r->__r_i->r_irq_cookie = c;
836 }
837 
838 void *
839 rman_get_irq_cookie(const struct resource *r)
840 {
841 
842 	return (r->__r_i->r_irq_cookie);
843 }
844 
845 void
846 rman_set_bustag(struct resource *r, bus_space_tag_t t)
847 {
848 
849 	r->r_bustag = t;
850 }
851 
852 bus_space_tag_t
853 rman_get_bustag(const struct resource *r)
854 {
855 
856 	return (r->r_bustag);
857 }
858 
859 void
860 rman_set_bushandle(struct resource *r, bus_space_handle_t h)
861 {
862 
863 	r->r_bushandle = h;
864 }
865 
866 bus_space_handle_t
867 rman_get_bushandle(const struct resource *r)
868 {
869 
870 	return (r->r_bushandle);
871 }
872 
873 void
874 rman_set_mapping(struct resource *r, struct resource_map *map)
875 {
876 
877 	KASSERT(rman_get_size(r) == map->r_size,
878 	    ("rman_set_mapping: size mismatch"));
879 	rman_set_bustag(r, map->r_bustag);
880 	rman_set_bushandle(r, map->r_bushandle);
881 	rman_set_virtual(r, map->r_vaddr);
882 }
883 
884 void
885 rman_get_mapping(const struct resource *r, struct resource_map *map)
886 {
887 
888 	map->r_bustag = rman_get_bustag(r);
889 	map->r_bushandle = rman_get_bushandle(r);
890 	map->r_size = rman_get_size(r);
891 	map->r_vaddr = rman_get_virtual(r);
892 }
893 
894 void
895 rman_set_rid(struct resource *r, int rid)
896 {
897 
898 	r->__r_i->r_rid = rid;
899 }
900 
901 int
902 rman_get_rid(const struct resource *r)
903 {
904 
905 	return (r->__r_i->r_rid);
906 }
907 
908 void
909 rman_set_type(struct resource *r, int type)
910 {
911 	r->__r_i->r_type = type;
912 }
913 
914 int
915 rman_get_type(const struct resource *r)
916 {
917 	return (r->__r_i->r_type);
918 }
919 
920 void
921 rman_set_device(struct resource *r, device_t dev)
922 {
923 
924 	r->__r_i->r_dev = dev;
925 }
926 
927 device_t
928 rman_get_device(const struct resource *r)
929 {
930 
931 	return (r->__r_i->r_dev);
932 }
933 
934 int
935 rman_is_region_manager(const struct resource *r, const struct rman *rm)
936 {
937 
938 	return (r->__r_i->r_rm == rm);
939 }
940 
941 /*
942  * Sysctl interface for scanning the resource lists.
943  *
944  * We take two input parameters; the index into the list of resource
945  * managers, and the resource offset into the list.
946  */
947 static int
948 sysctl_rman(SYSCTL_HANDLER_ARGS)
949 {
950 	int			*name = (int *)arg1;
951 	u_int			namelen = arg2;
952 	int			rman_idx, res_idx;
953 	struct rman		*rm;
954 	struct resource_i	*res;
955 	struct resource_i	*sres;
956 	struct u_rman		urm;
957 	struct u_resource	ures;
958 	int			error;
959 
960 	if (namelen != 3)
961 		return (EINVAL);
962 
963 	if (bus_data_generation_check(name[0]))
964 		return (EINVAL);
965 	rman_idx = name[1];
966 	res_idx = name[2];
967 
968 	/*
969 	 * Find the indexed resource manager
970 	 */
971 	mtx_lock(&rman_mtx);
972 	TAILQ_FOREACH(rm, &rman_head, rm_link) {
973 		if (rman_idx-- == 0)
974 			break;
975 	}
976 	mtx_unlock(&rman_mtx);
977 	if (rm == NULL)
978 		return (ENOENT);
979 
980 	/*
981 	 * If the resource index is -1, we want details on the
982 	 * resource manager.
983 	 */
984 	if (res_idx == -1) {
985 		bzero(&urm, sizeof(urm));
986 		urm.rm_handle = (uintptr_t)rm;
987 		if (rm->rm_descr != NULL)
988 			strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN);
989 		urm.rm_start = rm->rm_start;
990 		urm.rm_size = rm->rm_end - rm->rm_start + 1;
991 		urm.rm_type = rm->rm_type;
992 
993 		error = SYSCTL_OUT(req, &urm, sizeof(urm));
994 		return (error);
995 	}
996 
997 	/*
998 	 * Find the indexed resource and return it.
999 	 */
1000 	mtx_lock(rm->rm_mtx);
1001 	TAILQ_FOREACH(res, &rm->rm_list, r_link) {
1002 		if (res->r_sharehead != NULL) {
1003 			LIST_FOREACH(sres, res->r_sharehead, r_sharelink)
1004 				if (res_idx-- == 0) {
1005 					res = sres;
1006 					goto found;
1007 				}
1008 		}
1009 		else if (res_idx-- == 0)
1010 				goto found;
1011 	}
1012 	mtx_unlock(rm->rm_mtx);
1013 	return (ENOENT);
1014 
1015 found:
1016 	bzero(&ures, sizeof(ures));
1017 	ures.r_handle = (uintptr_t)res;
1018 	ures.r_parent = (uintptr_t)res->r_rm;
1019 	ures.r_device = (uintptr_t)res->r_dev;
1020 	if (res->r_dev != NULL) {
1021 		if (device_get_name(res->r_dev) != NULL) {
1022 			snprintf(ures.r_devname, RM_TEXTLEN,
1023 			    "%s%d",
1024 			    device_get_name(res->r_dev),
1025 			    device_get_unit(res->r_dev));
1026 		} else {
1027 			strlcpy(ures.r_devname, "nomatch",
1028 			    RM_TEXTLEN);
1029 		}
1030 	} else {
1031 		ures.r_devname[0] = '\0';
1032 	}
1033 	ures.r_start = res->r_start;
1034 	ures.r_size = res->r_end - res->r_start + 1;
1035 	ures.r_flags = res->r_flags;
1036 
1037 	mtx_unlock(rm->rm_mtx);
1038 	error = SYSCTL_OUT(req, &ures, sizeof(ures));
1039 	return (error);
1040 }
1041 
1042 static SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD | CTLFLAG_MPSAFE,
1043     sysctl_rman,
1044     "kernel resource manager");
1045 
1046 #ifdef DDB
1047 static void
1048 dump_rman_header(struct rman *rm)
1049 {
1050 
1051 	if (db_pager_quit)
1052 		return;
1053 	db_printf("rman %p: %s (0x%jx-0x%jx full range)\n",
1054 	    rm, rm->rm_descr, (rman_res_t)rm->rm_start, (rman_res_t)rm->rm_end);
1055 }
1056 
1057 static void
1058 dump_rman(struct rman *rm)
1059 {
1060 	struct resource_i *r;
1061 	const char *devname;
1062 
1063 	if (db_pager_quit)
1064 		return;
1065 	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
1066 		if (r->r_dev != NULL) {
1067 			devname = device_get_nameunit(r->r_dev);
1068 			if (devname == NULL)
1069 				devname = "nomatch";
1070 		} else
1071 			devname = NULL;
1072 		db_printf("    0x%jx-0x%jx (RID=%d) ",
1073 		    r->r_start, r->r_end, r->r_rid);
1074 		if (devname != NULL)
1075 			db_printf("(%s)\n", devname);
1076 		else
1077 			db_printf("----\n");
1078 		if (db_pager_quit)
1079 			return;
1080 	}
1081 }
1082 
1083 DB_SHOW_COMMAND(rman, db_show_rman)
1084 {
1085 
1086 	if (have_addr) {
1087 		dump_rman_header((struct rman *)addr);
1088 		dump_rman((struct rman *)addr);
1089 	}
1090 }
1091 
1092 DB_SHOW_COMMAND_FLAGS(rmans, db_show_rmans, DB_CMD_MEMSAFE)
1093 {
1094 	struct rman *rm;
1095 
1096 	TAILQ_FOREACH(rm, &rman_head, rm_link) {
1097 		dump_rman_header(rm);
1098 	}
1099 }
1100 
1101 DB_SHOW_ALL_COMMAND(rman, db_show_all_rman)
1102 {
1103 	struct rman *rm;
1104 
1105 	TAILQ_FOREACH(rm, &rman_head, rm_link) {
1106 		dump_rman_header(rm);
1107 		dump_rman(rm);
1108 	}
1109 }
1110 DB_SHOW_ALIAS_FLAGS(allrman, db_show_all_rman, DB_CMD_MEMSAFE);
1111 #endif
1112