xref: /freebsd/sys/kern/subr_rman.c (revision 9336e0699bda8a301cd2bfa37106b6ec5e32012e)
1 /*-
2  * Copyright 1998 Massachusetts Institute of Technology
3  *
4  * Permission to use, copy, modify, and distribute this software and
5  * its documentation for any purpose and without fee is hereby
6  * granted, provided that both the above copyright notice and this
7  * permission notice appear in all copies, that both the above
8  * copyright notice and this permission notice appear in all
9  * supporting documentation, and that the name of M.I.T. not be used
10  * in advertising or publicity pertaining to distribution of the
11  * software without specific, written prior permission.  M.I.T. makes
12  * no representations about the suitability of this software for any
13  * purpose.  It is provided "as is" without express or implied
14  * warranty.
15  *
16  * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
17  * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
18  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
20  * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * The kernel resource manager.  This code is responsible for keeping track
32  * of hardware resources which are apportioned out to various drivers.
33  * It does not actually assign those resources, and it is not expected
34  * that end-device drivers will call into this code directly.  Rather,
35  * the code which implements the buses that those devices are attached to,
36  * and the code which manages CPU resources, will call this code, and the
37  * end-device drivers will make upcalls to that code to actually perform
38  * the allocation.
39  *
40  * There are two sorts of resources managed by this code.  The first is
41  * the more familiar array (RMAN_ARRAY) type; resources in this class
42  * consist of a sequence of individually-allocatable objects which have
43  * been numbered in some well-defined order.  Most of the resources
44  * are of this type, as it is the most familiar.  The second type is
45  * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
46  * resources in which each instance is indistinguishable from every
47  * other instance).  The principal anticipated application of gauges
48  * is in the context of power consumption, where a bus may have a specific
49  * power budget which all attached devices share.  RMAN_GAUGE is not
50  * implemented yet.
51  *
52  * For array resources, we make one simplifying assumption: two clients
53  * sharing the same resource must use the same range of indices.  That
54  * is to say, sharing of overlapping-but-not-identical regions is not
55  * permitted.
56  */
57 
58 #include "opt_ddb.h"
59 
60 #include <sys/cdefs.h>
61 __FBSDID("$FreeBSD$");
62 
63 #include <sys/param.h>
64 #include <sys/systm.h>
65 #include <sys/kernel.h>
66 #include <sys/limits.h>
67 #include <sys/lock.h>
68 #include <sys/malloc.h>
69 #include <sys/mutex.h>
70 #include <sys/bus.h>		/* XXX debugging */
71 #include <machine/bus.h>
72 #include <sys/rman.h>
73 #include <sys/sysctl.h>
74 
75 #ifdef DDB
76 #include <ddb/ddb.h>
77 #endif
78 
79 /*
80  * We use a linked list rather than a bitmap because we need to be able to
81  * represent potentially huge objects (like all of a processor's physical
82  * address space).  That is also why the indices are defined to have type
83  * `unsigned long' -- that being the largest integral type in ISO C (1990).
84  * The 1999 version of C allows `long long'; we may need to switch to that
85  * at some point in the future, particularly if we want to support 36-bit
86  * addresses on IA32 hardware.
87  */
88 struct resource_i {
89 	struct resource		r_r;
90 	TAILQ_ENTRY(resource_i)	r_link;
91 	LIST_ENTRY(resource_i)	r_sharelink;
92 	LIST_HEAD(, resource_i)	*r_sharehead;
93 	u_long	r_start;	/* index of the first entry in this resource */
94 	u_long	r_end;		/* index of the last entry (inclusive) */
95 	u_int	r_flags;
96 	void	*r_virtual;	/* virtual address of this resource */
97 	struct	device *r_dev;	/* device which has allocated this resource */
98 	struct	rman *r_rm;	/* resource manager from whence this came */
99 	int	r_rid;		/* optional rid for this resource. */
100 };
101 
102 int     rman_debug = 0;
103 TUNABLE_INT("debug.rman_debug", &rman_debug);
104 SYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RW,
105     &rman_debug, 0, "rman debug");
106 
107 #define DPRINTF(params) if (rman_debug) printf params
108 
109 static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
110 
111 struct	rman_head rman_head;
112 static	struct mtx rman_mtx; /* mutex to protect rman_head */
113 static	int int_rman_activate_resource(struct rman *rm, struct resource_i *r,
114 				       struct resource_i **whohas);
115 static	int int_rman_deactivate_resource(struct resource_i *r);
116 static	int int_rman_release_resource(struct rman *rm, struct resource_i *r);
117 
118 static __inline struct resource_i *
119 int_alloc_resource(int malloc_flag)
120 {
121 	struct resource_i *r;
122 
123 	r = malloc(sizeof *r, M_RMAN, malloc_flag | M_ZERO);
124 	if (r != NULL) {
125 		r->r_r.__r_i = r;
126 	}
127 	return (r);
128 }
129 
130 int
131 rman_init(struct rman *rm)
132 {
133 	static int once = 0;
134 
135 	if (once == 0) {
136 		once = 1;
137 		TAILQ_INIT(&rman_head);
138 		mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF);
139 	}
140 
141 	if (rm->rm_type == RMAN_UNINIT)
142 		panic("rman_init");
143 	if (rm->rm_type == RMAN_GAUGE)
144 		panic("implement RMAN_GAUGE");
145 
146 	TAILQ_INIT(&rm->rm_list);
147 	rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO);
148 	if (rm->rm_mtx == NULL)
149 		return ENOMEM;
150 	mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF);
151 
152 	mtx_lock(&rman_mtx);
153 	TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
154 	mtx_unlock(&rman_mtx);
155 	return 0;
156 }
157 
158 int
159 rman_manage_region(struct rman *rm, u_long start, u_long end)
160 {
161 	struct resource_i *r, *s, *t;
162 
163 	DPRINTF(("rman_manage_region: <%s> request: start %#lx, end %#lx\n",
164 	    rm->rm_descr, start, end));
165 	r = int_alloc_resource(M_NOWAIT);
166 	if (r == NULL)
167 		return ENOMEM;
168 	r->r_start = start;
169 	r->r_end = end;
170 	r->r_rm = rm;
171 
172 	mtx_lock(rm->rm_mtx);
173 
174 	/* Skip entries before us. */
175 	TAILQ_FOREACH(s, &rm->rm_list, r_link) {
176 		if (s->r_end == ULONG_MAX)
177 			break;
178 		if (s->r_end + 1 >= r->r_start)
179 			break;
180 	}
181 
182 	/* If we ran off the end of the list, insert at the tail. */
183 	if (s == NULL) {
184 		TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
185 	} else {
186 		/* Check for any overlap with the current region. */
187 		if (r->r_start <= s->r_end && r->r_end >= s->r_start)
188 			return EBUSY;
189 
190 		/* Check for any overlap with the next region. */
191 		t = TAILQ_NEXT(s, r_link);
192 		if (t && r->r_start <= t->r_end && r->r_end >= t->r_start)
193 			return EBUSY;
194 
195 		/*
196 		 * See if this region can be merged with the next region.  If
197 		 * not, clear the pointer.
198 		 */
199 		if (t && (r->r_end + 1 != t->r_start || t->r_flags != 0))
200 			t = NULL;
201 
202 		/* See if we can merge with the current region. */
203 		if (s->r_end + 1 == r->r_start && s->r_flags == 0) {
204 			/* Can we merge all 3 regions? */
205 			if (t != NULL) {
206 				s->r_end = t->r_end;
207 				TAILQ_REMOVE(&rm->rm_list, t, r_link);
208 				free(r, M_RMAN);
209 				free(t, M_RMAN);
210 			} else {
211 				s->r_end = r->r_end;
212 				free(r, M_RMAN);
213 			}
214 		} else if (t != NULL) {
215 			/* Can we merge with just the next region? */
216 			t->r_start = r->r_start;
217 			free(r, M_RMAN);
218 		} else if (s->r_end < r->r_start) {
219 			TAILQ_INSERT_AFTER(&rm->rm_list, s, r, r_link);
220 		} else {
221 			TAILQ_INSERT_BEFORE(s, r, r_link);
222 		}
223 	}
224 
225 	mtx_unlock(rm->rm_mtx);
226 	return 0;
227 }
228 
229 int
230 rman_init_from_resource(struct rman *rm, struct resource *r)
231 {
232 	int rv;
233 
234 	if ((rv = rman_init(rm)) != 0)
235 		return (rv);
236 	return (rman_manage_region(rm, r->__r_i->r_start, r->__r_i->r_end));
237 }
238 
239 int
240 rman_fini(struct rman *rm)
241 {
242 	struct resource_i *r;
243 
244 	mtx_lock(rm->rm_mtx);
245 	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
246 		if (r->r_flags & RF_ALLOCATED) {
247 			mtx_unlock(rm->rm_mtx);
248 			return EBUSY;
249 		}
250 	}
251 
252 	/*
253 	 * There really should only be one of these if we are in this
254 	 * state and the code is working properly, but it can't hurt.
255 	 */
256 	while (!TAILQ_EMPTY(&rm->rm_list)) {
257 		r = TAILQ_FIRST(&rm->rm_list);
258 		TAILQ_REMOVE(&rm->rm_list, r, r_link);
259 		free(r, M_RMAN);
260 	}
261 	mtx_unlock(rm->rm_mtx);
262 	mtx_lock(&rman_mtx);
263 	TAILQ_REMOVE(&rman_head, rm, rm_link);
264 	mtx_unlock(&rman_mtx);
265 	mtx_destroy(rm->rm_mtx);
266 	free(rm->rm_mtx, M_RMAN);
267 
268 	return 0;
269 }
270 
271 struct resource *
272 rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
273 		      u_long count, u_long bound,  u_int flags,
274 		      struct device *dev)
275 {
276 	u_int	want_activate;
277 	struct	resource_i *r, *s, *rv;
278 	u_long	rstart, rend, amask, bmask;
279 
280 	rv = NULL;
281 
282 	DPRINTF(("rman_reserve_resource_bound: <%s> request: [%#lx, %#lx], "
283 	       "length %#lx, flags %u, device %s\n", rm->rm_descr, start, end,
284 	       count, flags,
285 	       dev == NULL ? "<null>" : device_get_nameunit(dev)));
286 	want_activate = (flags & RF_ACTIVE);
287 	flags &= ~RF_ACTIVE;
288 
289 	mtx_lock(rm->rm_mtx);
290 
291 	for (r = TAILQ_FIRST(&rm->rm_list);
292 	     r && r->r_end < start;
293 	     r = TAILQ_NEXT(r, r_link))
294 		;
295 
296 	if (r == NULL) {
297 		DPRINTF(("could not find a region\n"));
298 		goto out;
299 	}
300 
301 	amask = (1ul << RF_ALIGNMENT(flags)) - 1;
302 	/* If bound is 0, bmask will also be 0 */
303 	bmask = ~(bound - 1);
304 	/*
305 	 * First try to find an acceptable totally-unshared region.
306 	 */
307 	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
308 		DPRINTF(("considering [%#lx, %#lx]\n", s->r_start, s->r_end));
309 		if (s->r_start + count - 1 > end) {
310 			DPRINTF(("s->r_start (%#lx) + count - 1> end (%#lx)\n",
311 			    s->r_start, end));
312 			break;
313 		}
314 		if (s->r_flags & RF_ALLOCATED) {
315 			DPRINTF(("region is allocated\n"));
316 			continue;
317 		}
318 		rstart = ulmax(s->r_start, start);
319 		/*
320 		 * Try to find a region by adjusting to boundary and alignment
321 		 * until both conditions are satisfied. This is not an optimal
322 		 * algorithm, but in most cases it isn't really bad, either.
323 		 */
324 		do {
325 			rstart = (rstart + amask) & ~amask;
326 			if (((rstart ^ (rstart + count - 1)) & bmask) != 0)
327 				rstart += bound - (rstart & ~bmask);
328 		} while ((rstart & amask) != 0 && rstart < end &&
329 		    rstart < s->r_end);
330 		rend = ulmin(s->r_end, ulmax(rstart + count - 1, end));
331 		if (rstart > rend) {
332 			DPRINTF(("adjusted start exceeds end\n"));
333 			continue;
334 		}
335 		DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n",
336 		       rstart, rend, (rend - rstart + 1), count));
337 
338 		if ((rend - rstart + 1) >= count) {
339 			DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n",
340 			       rstart, rend, (rend - rstart + 1)));
341 			if ((s->r_end - s->r_start + 1) == count) {
342 				DPRINTF(("candidate region is entire chunk\n"));
343 				rv = s;
344 				rv->r_flags |= RF_ALLOCATED | flags;
345 				rv->r_dev = dev;
346 				goto out;
347 			}
348 
349 			/*
350 			 * If s->r_start < rstart and
351 			 *    s->r_end > rstart + count - 1, then
352 			 * we need to split the region into three pieces
353 			 * (the middle one will get returned to the user).
354 			 * Otherwise, we are allocating at either the
355 			 * beginning or the end of s, so we only need to
356 			 * split it in two.  The first case requires
357 			 * two new allocations; the second requires but one.
358 			 */
359 			rv = int_alloc_resource(M_NOWAIT);
360 			if (rv == NULL)
361 				goto out;
362 			rv->r_start = rstart;
363 			rv->r_end = rstart + count - 1;
364 			rv->r_flags = flags | RF_ALLOCATED;
365 			rv->r_dev = dev;
366 			rv->r_rm = rm;
367 
368 			if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
369 				DPRINTF(("splitting region in three parts: "
370 				       "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n",
371 				       s->r_start, rv->r_start - 1,
372 				       rv->r_start, rv->r_end,
373 				       rv->r_end + 1, s->r_end));
374 				/*
375 				 * We are allocating in the middle.
376 				 */
377 				r = int_alloc_resource(M_NOWAIT);
378 				if (r == NULL) {
379 					free(rv, M_RMAN);
380 					rv = NULL;
381 					goto out;
382 				}
383 				r->r_start = rv->r_end + 1;
384 				r->r_end = s->r_end;
385 				r->r_flags = s->r_flags;
386 				r->r_rm = rm;
387 				s->r_end = rv->r_start - 1;
388 				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
389 						     r_link);
390 				TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
391 						     r_link);
392 			} else if (s->r_start == rv->r_start) {
393 				DPRINTF(("allocating from the beginning\n"));
394 				/*
395 				 * We are allocating at the beginning.
396 				 */
397 				s->r_start = rv->r_end + 1;
398 				TAILQ_INSERT_BEFORE(s, rv, r_link);
399 			} else {
400 				DPRINTF(("allocating at the end\n"));
401 				/*
402 				 * We are allocating at the end.
403 				 */
404 				s->r_end = rv->r_start - 1;
405 				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
406 						     r_link);
407 			}
408 			goto out;
409 		}
410 	}
411 
412 	/*
413 	 * Now find an acceptable shared region, if the client's requirements
414 	 * allow sharing.  By our implementation restriction, a candidate
415 	 * region must match exactly by both size and sharing type in order
416 	 * to be considered compatible with the client's request.  (The
417 	 * former restriction could probably be lifted without too much
418 	 * additional work, but this does not seem warranted.)
419 	 */
420 	DPRINTF(("no unshared regions found\n"));
421 	if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0)
422 		goto out;
423 
424 	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
425 		if (s->r_start > end)
426 			break;
427 		if ((s->r_flags & flags) != flags)
428 			continue;
429 		rstart = ulmax(s->r_start, start);
430 		rend = ulmin(s->r_end, ulmax(start + count - 1, end));
431 		if (s->r_start >= start && s->r_end <= end
432 		    && (s->r_end - s->r_start + 1) == count &&
433 		    (s->r_start & amask) == 0 &&
434 		    ((s->r_start ^ s->r_end) & bmask) == 0) {
435 			rv = int_alloc_resource(M_NOWAIT);
436 			if (rv == NULL)
437 				goto out;
438 			rv->r_start = s->r_start;
439 			rv->r_end = s->r_end;
440 			rv->r_flags = s->r_flags &
441 				(RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE);
442 			rv->r_dev = dev;
443 			rv->r_rm = rm;
444 			if (s->r_sharehead == NULL) {
445 				s->r_sharehead = malloc(sizeof *s->r_sharehead,
446 						M_RMAN, M_NOWAIT | M_ZERO);
447 				if (s->r_sharehead == NULL) {
448 					free(rv, M_RMAN);
449 					rv = NULL;
450 					goto out;
451 				}
452 				LIST_INIT(s->r_sharehead);
453 				LIST_INSERT_HEAD(s->r_sharehead, s,
454 						 r_sharelink);
455 				s->r_flags |= RF_FIRSTSHARE;
456 			}
457 			rv->r_sharehead = s->r_sharehead;
458 			LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
459 			goto out;
460 		}
461 	}
462 
463 	/*
464 	 * We couldn't find anything.
465 	 */
466 out:
467 	/*
468 	 * If the user specified RF_ACTIVE in the initial flags,
469 	 * which is reflected in `want_activate', we attempt to atomically
470 	 * activate the resource.  If this fails, we release the resource
471 	 * and indicate overall failure.  (This behavior probably doesn't
472 	 * make sense for RF_TIMESHARE-type resources.)
473 	 */
474 	if (rv && want_activate) {
475 		struct resource_i *whohas;
476 		if (int_rman_activate_resource(rm, rv, &whohas)) {
477 			int_rman_release_resource(rm, rv);
478 			rv = NULL;
479 		}
480 	}
481 
482 	mtx_unlock(rm->rm_mtx);
483 	return (rv == NULL ? NULL : &rv->r_r);
484 }
485 
486 struct resource *
487 rman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count,
488 		      u_int flags, struct device *dev)
489 {
490 
491 	return (rman_reserve_resource_bound(rm, start, end, count, 0, flags,
492 	    dev));
493 }
494 
495 static int
496 int_rman_activate_resource(struct rman *rm, struct resource_i *r,
497 			   struct resource_i **whohas)
498 {
499 	struct resource_i *s;
500 	int ok;
501 
502 	/*
503 	 * If we are not timesharing, then there is nothing much to do.
504 	 * If we already have the resource, then there is nothing at all to do.
505 	 * If we are not on a sharing list with anybody else, then there is
506 	 * little to do.
507 	 */
508 	if ((r->r_flags & RF_TIMESHARE) == 0
509 	    || (r->r_flags & RF_ACTIVE) != 0
510 	    || r->r_sharehead == NULL) {
511 		r->r_flags |= RF_ACTIVE;
512 		return 0;
513 	}
514 
515 	ok = 1;
516 	for (s = LIST_FIRST(r->r_sharehead); s && ok;
517 	     s = LIST_NEXT(s, r_sharelink)) {
518 		if ((s->r_flags & RF_ACTIVE) != 0) {
519 			ok = 0;
520 			*whohas = s;
521 		}
522 	}
523 	if (ok) {
524 		r->r_flags |= RF_ACTIVE;
525 		return 0;
526 	}
527 	return EBUSY;
528 }
529 
530 int
531 rman_activate_resource(struct resource *re)
532 {
533 	int rv;
534 	struct resource_i *r, *whohas;
535 	struct rman *rm;
536 
537 	r = re->__r_i;
538 	rm = r->r_rm;
539 	mtx_lock(rm->rm_mtx);
540 	rv = int_rman_activate_resource(rm, r, &whohas);
541 	mtx_unlock(rm->rm_mtx);
542 	return rv;
543 }
544 
545 int
546 rman_await_resource(struct resource *re, int pri, int timo)
547 {
548 	int	rv;
549 	struct	resource_i *r, *whohas;
550 	struct	rman *rm;
551 
552 	r = re->__r_i;
553 	rm = r->r_rm;
554 	mtx_lock(rm->rm_mtx);
555 	for (;;) {
556 		rv = int_rman_activate_resource(rm, r, &whohas);
557 		if (rv != EBUSY)
558 			return (rv);	/* returns with mutex held */
559 
560 		if (r->r_sharehead == NULL)
561 			panic("rman_await_resource");
562 		whohas->r_flags |= RF_WANTED;
563 		rv = msleep(r->r_sharehead, rm->rm_mtx, pri, "rmwait", timo);
564 		if (rv) {
565 			mtx_unlock(rm->rm_mtx);
566 			return (rv);
567 		}
568 	}
569 }
570 
571 static int
572 int_rman_deactivate_resource(struct resource_i *r)
573 {
574 
575 	r->r_flags &= ~RF_ACTIVE;
576 	if (r->r_flags & RF_WANTED) {
577 		r->r_flags &= ~RF_WANTED;
578 		wakeup(r->r_sharehead);
579 	}
580 	return 0;
581 }
582 
583 int
584 rman_deactivate_resource(struct resource *r)
585 {
586 	struct	rman *rm;
587 
588 	rm = r->__r_i->r_rm;
589 	mtx_lock(rm->rm_mtx);
590 	int_rman_deactivate_resource(r->__r_i);
591 	mtx_unlock(rm->rm_mtx);
592 	return 0;
593 }
594 
595 static int
596 int_rman_release_resource(struct rman *rm, struct resource_i *r)
597 {
598 	struct	resource_i *s, *t;
599 
600 	if (r->r_flags & RF_ACTIVE)
601 		int_rman_deactivate_resource(r);
602 
603 	/*
604 	 * Check for a sharing list first.  If there is one, then we don't
605 	 * have to think as hard.
606 	 */
607 	if (r->r_sharehead) {
608 		/*
609 		 * If a sharing list exists, then we know there are at
610 		 * least two sharers.
611 		 *
612 		 * If we are in the main circleq, appoint someone else.
613 		 */
614 		LIST_REMOVE(r, r_sharelink);
615 		s = LIST_FIRST(r->r_sharehead);
616 		if (r->r_flags & RF_FIRSTSHARE) {
617 			s->r_flags |= RF_FIRSTSHARE;
618 			TAILQ_INSERT_BEFORE(r, s, r_link);
619 			TAILQ_REMOVE(&rm->rm_list, r, r_link);
620 		}
621 
622 		/*
623 		 * Make sure that the sharing list goes away completely
624 		 * if the resource is no longer being shared at all.
625 		 */
626 		if (LIST_NEXT(s, r_sharelink) == NULL) {
627 			free(s->r_sharehead, M_RMAN);
628 			s->r_sharehead = NULL;
629 			s->r_flags &= ~RF_FIRSTSHARE;
630 		}
631 		goto out;
632 	}
633 
634 	/*
635 	 * Look at the adjacent resources in the list and see if our
636 	 * segment can be merged with any of them.  If either of the
637 	 * resources is allocated or is not exactly adjacent then they
638 	 * cannot be merged with our segment.
639 	 */
640 	s = TAILQ_PREV(r, resource_head, r_link);
641 	if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 ||
642 	    s->r_end + 1 != r->r_start))
643 		s = NULL;
644 	t = TAILQ_NEXT(r, r_link);
645 	if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 ||
646 	    r->r_end + 1 != t->r_start))
647 		t = NULL;
648 
649 	if (s != NULL && t != NULL) {
650 		/*
651 		 * Merge all three segments.
652 		 */
653 		s->r_end = t->r_end;
654 		TAILQ_REMOVE(&rm->rm_list, r, r_link);
655 		TAILQ_REMOVE(&rm->rm_list, t, r_link);
656 		free(t, M_RMAN);
657 	} else if (s != NULL) {
658 		/*
659 		 * Merge previous segment with ours.
660 		 */
661 		s->r_end = r->r_end;
662 		TAILQ_REMOVE(&rm->rm_list, r, r_link);
663 	} else if (t != NULL) {
664 		/*
665 		 * Merge next segment with ours.
666 		 */
667 		t->r_start = r->r_start;
668 		TAILQ_REMOVE(&rm->rm_list, r, r_link);
669 	} else {
670 		/*
671 		 * At this point, we know there is nothing we
672 		 * can potentially merge with, because on each
673 		 * side, there is either nothing there or what is
674 		 * there is still allocated.  In that case, we don't
675 		 * want to remove r from the list; we simply want to
676 		 * change it to an unallocated region and return
677 		 * without freeing anything.
678 		 */
679 		r->r_flags &= ~RF_ALLOCATED;
680 		return 0;
681 	}
682 
683 out:
684 	free(r, M_RMAN);
685 	return 0;
686 }
687 
688 int
689 rman_release_resource(struct resource *re)
690 {
691 	int	rv;
692 	struct	resource_i *r;
693 	struct	rman *rm;
694 
695 	r = re->__r_i;
696 	rm = r->r_rm;
697 	mtx_lock(rm->rm_mtx);
698 	rv = int_rman_release_resource(rm, r);
699 	mtx_unlock(rm->rm_mtx);
700 	return (rv);
701 }
702 
703 uint32_t
704 rman_make_alignment_flags(uint32_t size)
705 {
706 	int	i;
707 
708 	/*
709 	 * Find the hightest bit set, and add one if more than one bit
710 	 * set.  We're effectively computing the ceil(log2(size)) here.
711 	 */
712 	for (i = 31; i > 0; i--)
713 		if ((1 << i) & size)
714 			break;
715 	if (~(1 << i) & size)
716 		i++;
717 
718 	return(RF_ALIGNMENT_LOG2(i));
719 }
720 
721 u_long
722 rman_get_start(struct resource *r)
723 {
724 	return (r->__r_i->r_start);
725 }
726 
727 u_long
728 rman_get_end(struct resource *r)
729 {
730 	return (r->__r_i->r_end);
731 }
732 
733 u_long
734 rman_get_size(struct resource *r)
735 {
736 	return (r->__r_i->r_end - r->__r_i->r_start + 1);
737 }
738 
739 u_int
740 rman_get_flags(struct resource *r)
741 {
742 	return (r->__r_i->r_flags);
743 }
744 
745 void
746 rman_set_virtual(struct resource *r, void *v)
747 {
748 	r->__r_i->r_virtual = v;
749 }
750 
751 void *
752 rman_get_virtual(struct resource *r)
753 {
754 	return (r->__r_i->r_virtual);
755 }
756 
757 void
758 rman_set_bustag(struct resource *r, bus_space_tag_t t)
759 {
760 	r->r_bustag = t;
761 }
762 
763 bus_space_tag_t
764 rman_get_bustag(struct resource *r)
765 {
766 	return (r->r_bustag);
767 }
768 
769 void
770 rman_set_bushandle(struct resource *r, bus_space_handle_t h)
771 {
772 	r->r_bushandle = h;
773 }
774 
775 bus_space_handle_t
776 rman_get_bushandle(struct resource *r)
777 {
778 	return (r->r_bushandle);
779 }
780 
781 void
782 rman_set_rid(struct resource *r, int rid)
783 {
784 	r->__r_i->r_rid = rid;
785 }
786 
787 void
788 rman_set_start(struct resource *r, u_long start)
789 {
790 	r->__r_i->r_start = start;
791 }
792 
793 void
794 rman_set_end(struct resource *r, u_long end)
795 {
796 	r->__r_i->r_end = end;
797 }
798 
799 int
800 rman_get_rid(struct resource *r)
801 {
802 	return (r->__r_i->r_rid);
803 }
804 
805 struct device *
806 rman_get_device(struct resource *r)
807 {
808 	return (r->__r_i->r_dev);
809 }
810 
811 void
812 rman_set_device(struct resource *r, struct device *dev)
813 {
814 	r->__r_i->r_dev = dev;
815 }
816 
817 int
818 rman_is_region_manager(struct resource *r, struct rman *rm)
819 {
820 
821 	return (r->__r_i->r_rm == rm);
822 }
823 
824 /*
825  * Sysctl interface for scanning the resource lists.
826  *
827  * We take two input parameters; the index into the list of resource
828  * managers, and the resource offset into the list.
829  */
830 static int
831 sysctl_rman(SYSCTL_HANDLER_ARGS)
832 {
833 	int			*name = (int *)arg1;
834 	u_int			namelen = arg2;
835 	int			rman_idx, res_idx;
836 	struct rman		*rm;
837 	struct resource_i	*res;
838 	struct u_rman		urm;
839 	struct u_resource	ures;
840 	int			error;
841 
842 	if (namelen != 3)
843 		return (EINVAL);
844 
845 	if (bus_data_generation_check(name[0]))
846 		return (EINVAL);
847 	rman_idx = name[1];
848 	res_idx = name[2];
849 
850 	/*
851 	 * Find the indexed resource manager
852 	 */
853 	mtx_lock(&rman_mtx);
854 	TAILQ_FOREACH(rm, &rman_head, rm_link) {
855 		if (rman_idx-- == 0)
856 			break;
857 	}
858 	mtx_unlock(&rman_mtx);
859 	if (rm == NULL)
860 		return (ENOENT);
861 
862 	/*
863 	 * If the resource index is -1, we want details on the
864 	 * resource manager.
865 	 */
866 	if (res_idx == -1) {
867 		bzero(&urm, sizeof(urm));
868 		urm.rm_handle = (uintptr_t)rm;
869 		strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN);
870 		urm.rm_start = rm->rm_start;
871 		urm.rm_size = rm->rm_end - rm->rm_start + 1;
872 		urm.rm_type = rm->rm_type;
873 
874 		error = SYSCTL_OUT(req, &urm, sizeof(urm));
875 		return (error);
876 	}
877 
878 	/*
879 	 * Find the indexed resource and return it.
880 	 */
881 	mtx_lock(rm->rm_mtx);
882 	TAILQ_FOREACH(res, &rm->rm_list, r_link) {
883 		if (res_idx-- == 0) {
884 			bzero(&ures, sizeof(ures));
885 			ures.r_handle = (uintptr_t)res;
886 			ures.r_parent = (uintptr_t)res->r_rm;
887 			ures.r_device = (uintptr_t)res->r_dev;
888 			if (res->r_dev != NULL) {
889 				if (device_get_name(res->r_dev) != NULL) {
890 					snprintf(ures.r_devname, RM_TEXTLEN,
891 					    "%s%d",
892 					    device_get_name(res->r_dev),
893 					    device_get_unit(res->r_dev));
894 				} else {
895 					strlcpy(ures.r_devname, "nomatch",
896 					    RM_TEXTLEN);
897 				}
898 			} else {
899 				ures.r_devname[0] = '\0';
900 			}
901 			ures.r_start = res->r_start;
902 			ures.r_size = res->r_end - res->r_start + 1;
903 			ures.r_flags = res->r_flags;
904 
905 			mtx_unlock(rm->rm_mtx);
906 			error = SYSCTL_OUT(req, &ures, sizeof(ures));
907 			return (error);
908 		}
909 	}
910 	mtx_unlock(rm->rm_mtx);
911 	return (ENOENT);
912 }
913 
914 SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD, sysctl_rman,
915     "kernel resource manager");
916 
917 #ifdef DDB
918 static void
919 dump_rman(struct rman *rm)
920 {
921 	struct resource_i *r;
922 	const char *devname;
923 
924 	if (db_pager_quit)
925 		return;
926 	db_printf("rman: %s\n", rm->rm_descr);
927 	db_printf("    0x%lx-0x%lx (full range)\n", rm->rm_start, rm->rm_end);
928 	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
929 		if (r->r_dev != NULL) {
930 			devname = device_get_nameunit(r->r_dev);
931 			if (devname == NULL)
932 				devname = "nomatch";
933 		} else
934 			devname = NULL;
935 		db_printf("    0x%lx-0x%lx ", r->r_start, r->r_end);
936 		if (devname != NULL)
937 			db_printf("(%s)\n", devname);
938 		else
939 			db_printf("----\n");
940 		if (db_pager_quit)
941 			return;
942 	}
943 }
944 
945 DB_SHOW_COMMAND(rman, db_show_rman)
946 {
947 
948 	if (have_addr)
949 		dump_rman((struct rman *)addr);
950 }
951 
952 DB_SHOW_COMMAND(allrman, db_show_all_rman)
953 {
954 	struct rman *rm;
955 
956 	TAILQ_FOREACH(rm, &rman_head, rm_link)
957 		dump_rman(rm);
958 }
959 #endif
960