xref: /freebsd/sys/kern/subr_rman.c (revision f856af0466c076beef4ea9b15d088e1119a945b8)
1 /*-
2  * Copyright 1998 Massachusetts Institute of Technology
3  *
4  * Permission to use, copy, modify, and distribute this software and
5  * its documentation for any purpose and without fee is hereby
6  * granted, provided that both the above copyright notice and this
7  * permission notice appear in all copies, that both the above
8  * copyright notice and this permission notice appear in all
9  * supporting documentation, and that the name of M.I.T. not be used
10  * in advertising or publicity pertaining to distribution of the
11  * software without specific, written prior permission.  M.I.T. makes
12  * no representations about the suitability of this software for any
13  * purpose.  It is provided "as is" without express or implied
14  * warranty.
15  *
16  * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''.  M.I.T. DISCLAIMS
17  * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE,
18  * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
19  * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT
20  * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
23  * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
24  * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
25  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
26  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  */
29 
30 /*
31  * The kernel resource manager.  This code is responsible for keeping track
32  * of hardware resources which are apportioned out to various drivers.
33  * It does not actually assign those resources, and it is not expected
34  * that end-device drivers will call into this code directly.  Rather,
35  * the code which implements the buses that those devices are attached to,
36  * and the code which manages CPU resources, will call this code, and the
37  * end-device drivers will make upcalls to that code to actually perform
38  * the allocation.
39  *
40  * There are two sorts of resources managed by this code.  The first is
41  * the more familiar array (RMAN_ARRAY) type; resources in this class
42  * consist of a sequence of individually-allocatable objects which have
43  * been numbered in some well-defined order.  Most of the resources
44  * are of this type, as it is the most familiar.  The second type is
45  * called a gauge (RMAN_GAUGE), and models fungible resources (i.e.,
46  * resources in which each instance is indistinguishable from every
47  * other instance).  The principal anticipated application of gauges
48  * is in the context of power consumption, where a bus may have a specific
49  * power budget which all attached devices share.  RMAN_GAUGE is not
50  * implemented yet.
51  *
52  * For array resources, we make one simplifying assumption: two clients
53  * sharing the same resource must use the same range of indices.  That
54  * is to say, sharing of overlapping-but-not-identical regions is not
55  * permitted.
56  */
57 
58 #include <sys/cdefs.h>
59 __FBSDID("$FreeBSD$");
60 
61 #include <sys/param.h>
62 #include <sys/systm.h>
63 #include <sys/kernel.h>
64 #include <sys/limits.h>
65 #include <sys/lock.h>
66 #include <sys/malloc.h>
67 #include <sys/mutex.h>
68 #include <sys/bus.h>		/* XXX debugging */
69 #include <machine/bus.h>
70 #include <sys/rman.h>
71 #include <sys/sysctl.h>
72 
73 /*
74  * We use a linked list rather than a bitmap because we need to be able to
75  * represent potentially huge objects (like all of a processor's physical
76  * address space).  That is also why the indices are defined to have type
77  * `unsigned long' -- that being the largest integral type in ISO C (1990).
78  * The 1999 version of C allows `long long'; we may need to switch to that
79  * at some point in the future, particularly if we want to support 36-bit
80  * addresses on IA32 hardware.
81  */
82 struct resource_i {
83 	struct resource		r_r;
84 	TAILQ_ENTRY(resource_i)	r_link;
85 	LIST_ENTRY(resource_i)	r_sharelink;
86 	LIST_HEAD(, resource_i)	*r_sharehead;
87 	u_long	r_start;	/* index of the first entry in this resource */
88 	u_long	r_end;		/* index of the last entry (inclusive) */
89 	u_int	r_flags;
90 	void	*r_virtual;	/* virtual address of this resource */
91 	struct	device *r_dev;	/* device which has allocated this resource */
92 	struct	rman *r_rm;	/* resource manager from whence this came */
93 	int	r_rid;		/* optional rid for this resource. */
94 };
95 
96 int     rman_debug = 0;
97 TUNABLE_INT("debug.rman_debug", &rman_debug);
98 SYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RW,
99     &rman_debug, 0, "rman debug");
100 
101 #define DPRINTF(params) if (rman_debug) printf params
102 
103 static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager");
104 
105 struct	rman_head rman_head;
106 static	struct mtx rman_mtx; /* mutex to protect rman_head */
107 static	int int_rman_activate_resource(struct rman *rm, struct resource_i *r,
108 				       struct resource_i **whohas);
109 static	int int_rman_deactivate_resource(struct resource_i *r);
110 static	int int_rman_release_resource(struct rman *rm, struct resource_i *r);
111 
112 static __inline struct resource_i *
113 int_alloc_resource(int malloc_flag)
114 {
115 	struct resource_i *r;
116 
117 	r = malloc(sizeof *r, M_RMAN, malloc_flag | M_ZERO);
118 	if (r != NULL) {
119 		r->r_r.__r_i = r;
120 	}
121 	return (r);
122 }
123 
124 int
125 rman_init(struct rman *rm)
126 {
127 	static int once = 0;
128 
129 	if (once == 0) {
130 		once = 1;
131 		TAILQ_INIT(&rman_head);
132 		mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF);
133 	}
134 
135 	if (rm->rm_type == RMAN_UNINIT)
136 		panic("rman_init");
137 	if (rm->rm_type == RMAN_GAUGE)
138 		panic("implement RMAN_GAUGE");
139 
140 	TAILQ_INIT(&rm->rm_list);
141 	rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO);
142 	if (rm->rm_mtx == NULL)
143 		return ENOMEM;
144 	mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF);
145 
146 	mtx_lock(&rman_mtx);
147 	TAILQ_INSERT_TAIL(&rman_head, rm, rm_link);
148 	mtx_unlock(&rman_mtx);
149 	return 0;
150 }
151 
152 /*
153  * NB: this interface is not robust against programming errors which
154  * add multiple copies of the same region.
155  */
156 int
157 rman_manage_region(struct rman *rm, u_long start, u_long end)
158 {
159 	struct resource_i *r, *s, *t;
160 
161 	DPRINTF(("rman_manage_region: <%s> request: start %#lx, end %#lx\n",
162 	    rm->rm_descr, start, end));
163 	r = int_alloc_resource(M_NOWAIT);
164 	if (r == NULL)
165 		return ENOMEM;
166 	r->r_start = start;
167 	r->r_end = end;
168 	r->r_rm = rm;
169 
170 	mtx_lock(rm->rm_mtx);
171 
172 	/* Skip entries before us. */
173 	TAILQ_FOREACH(s, &rm->rm_list, r_link) {
174 		if (s->r_end == ULONG_MAX)
175 			break;
176 		if (s->r_end + 1 >= r->r_start)
177 			break;
178 	}
179 
180 	/* If we ran off the end of the list, insert at the tail. */
181 	if (s == NULL) {
182 		TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link);
183 	} else {
184 		/* Check for any overlap with the current region. */
185 		if (r->r_start <= s->r_end && r->r_end >= s->r_start)
186 			return EBUSY;
187 
188 		/* Check for any overlap with the next region. */
189 		t = TAILQ_NEXT(s, r_link);
190 		if (t && r->r_start <= t->r_end && r->r_end >= t->r_start)
191 			return EBUSY;
192 
193 		/*
194 		 * See if this region can be merged with the next region.  If
195 		 * not, clear the pointer.
196 		 */
197 		if (t && (r->r_end + 1 != t->r_start || t->r_flags != 0))
198 			t = NULL;
199 
200 		/* See if we can merge with the current region. */
201 		if (s->r_end + 1 == r->r_start && s->r_flags == 0) {
202 			/* Can we merge all 3 regions? */
203 			if (t != NULL) {
204 				s->r_end = t->r_end;
205 				TAILQ_REMOVE(&rm->rm_list, t, r_link);
206 				free(r, M_RMAN);
207 				free(t, M_RMAN);
208 			} else {
209 				s->r_end = r->r_end;
210 				free(r, M_RMAN);
211 			}
212 		} else {
213 			/* Can we merge with just the next region? */
214 			if (t != NULL) {
215 				t->r_start = r->r_start;
216 				free(r, M_RMAN);
217 			} else
218 				TAILQ_INSERT_BEFORE(s, r, r_link);
219 		}
220 	}
221 
222 	mtx_unlock(rm->rm_mtx);
223 	return 0;
224 }
225 
226 int
227 rman_init_from_resource(struct rman *rm, struct resource *r)
228 {
229 	int rv;
230 
231 	if ((rv = rman_init(rm)) != 0)
232 		return (rv);
233 	return (rman_manage_region(rm, r->__r_i->r_start, r->__r_i->r_end));
234 }
235 
236 int
237 rman_fini(struct rman *rm)
238 {
239 	struct resource_i *r;
240 
241 	mtx_lock(rm->rm_mtx);
242 	TAILQ_FOREACH(r, &rm->rm_list, r_link) {
243 		if (r->r_flags & RF_ALLOCATED) {
244 			mtx_unlock(rm->rm_mtx);
245 			return EBUSY;
246 		}
247 	}
248 
249 	/*
250 	 * There really should only be one of these if we are in this
251 	 * state and the code is working properly, but it can't hurt.
252 	 */
253 	while (!TAILQ_EMPTY(&rm->rm_list)) {
254 		r = TAILQ_FIRST(&rm->rm_list);
255 		TAILQ_REMOVE(&rm->rm_list, r, r_link);
256 		free(r, M_RMAN);
257 	}
258 	mtx_unlock(rm->rm_mtx);
259 	mtx_lock(&rman_mtx);
260 	TAILQ_REMOVE(&rman_head, rm, rm_link);
261 	mtx_unlock(&rman_mtx);
262 	mtx_destroy(rm->rm_mtx);
263 	free(rm->rm_mtx, M_RMAN);
264 
265 	return 0;
266 }
267 
268 struct resource *
269 rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end,
270 		      u_long count, u_long bound,  u_int flags,
271 		      struct device *dev)
272 {
273 	u_int	want_activate;
274 	struct	resource_i *r, *s, *rv;
275 	u_long	rstart, rend, amask, bmask;
276 
277 	rv = NULL;
278 
279 	DPRINTF(("rman_reserve_resource_bound: <%s> request: [%#lx, %#lx], "
280 	       "length %#lx, flags %u, device %s\n", rm->rm_descr, start, end,
281 	       count, flags,
282 	       dev == NULL ? "<null>" : device_get_nameunit(dev)));
283 	want_activate = (flags & RF_ACTIVE);
284 	flags &= ~RF_ACTIVE;
285 
286 	mtx_lock(rm->rm_mtx);
287 
288 	for (r = TAILQ_FIRST(&rm->rm_list);
289 	     r && r->r_end < start;
290 	     r = TAILQ_NEXT(r, r_link))
291 		;
292 
293 	if (r == NULL) {
294 		DPRINTF(("could not find a region\n"));
295 		goto out;
296 	}
297 
298 	amask = (1ul << RF_ALIGNMENT(flags)) - 1;
299 	/* If bound is 0, bmask will also be 0 */
300 	bmask = ~(bound - 1);
301 	/*
302 	 * First try to find an acceptable totally-unshared region.
303 	 */
304 	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
305 		DPRINTF(("considering [%#lx, %#lx]\n", s->r_start, s->r_end));
306 		if (s->r_start + count - 1 > end) {
307 			DPRINTF(("s->r_start (%#lx) + count - 1> end (%#lx)\n",
308 			    s->r_start, end));
309 			break;
310 		}
311 		if (s->r_flags & RF_ALLOCATED) {
312 			DPRINTF(("region is allocated\n"));
313 			continue;
314 		}
315 		rstart = ulmax(s->r_start, start);
316 		/*
317 		 * Try to find a region by adjusting to boundary and alignment
318 		 * until both conditions are satisfied. This is not an optimal
319 		 * algorithm, but in most cases it isn't really bad, either.
320 		 */
321 		do {
322 			rstart = (rstart + amask) & ~amask;
323 			if (((rstart ^ (rstart + count - 1)) & bmask) != 0)
324 				rstart += bound - (rstart & ~bmask);
325 		} while ((rstart & amask) != 0 && rstart < end &&
326 		    rstart < s->r_end);
327 		rend = ulmin(s->r_end, ulmax(rstart + count - 1, end));
328 		if (rstart > rend) {
329 			DPRINTF(("adjusted start exceeds end\n"));
330 			continue;
331 		}
332 		DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n",
333 		       rstart, rend, (rend - rstart + 1), count));
334 
335 		if ((rend - rstart + 1) >= count) {
336 			DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n",
337 			       rstart, rend, (rend - rstart + 1)));
338 			if ((s->r_end - s->r_start + 1) == count) {
339 				DPRINTF(("candidate region is entire chunk\n"));
340 				rv = s;
341 				rv->r_flags |= RF_ALLOCATED | flags;
342 				rv->r_dev = dev;
343 				goto out;
344 			}
345 
346 			/*
347 			 * If s->r_start < rstart and
348 			 *    s->r_end > rstart + count - 1, then
349 			 * we need to split the region into three pieces
350 			 * (the middle one will get returned to the user).
351 			 * Otherwise, we are allocating at either the
352 			 * beginning or the end of s, so we only need to
353 			 * split it in two.  The first case requires
354 			 * two new allocations; the second requires but one.
355 			 */
356 			rv = int_alloc_resource(M_NOWAIT);
357 			if (rv == NULL)
358 				goto out;
359 			rv->r_start = rstart;
360 			rv->r_end = rstart + count - 1;
361 			rv->r_flags = flags | RF_ALLOCATED;
362 			rv->r_dev = dev;
363 			rv->r_rm = rm;
364 
365 			if (s->r_start < rv->r_start && s->r_end > rv->r_end) {
366 				DPRINTF(("splitting region in three parts: "
367 				       "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n",
368 				       s->r_start, rv->r_start - 1,
369 				       rv->r_start, rv->r_end,
370 				       rv->r_end + 1, s->r_end));
371 				/*
372 				 * We are allocating in the middle.
373 				 */
374 				r = int_alloc_resource(M_NOWAIT);
375 				if (r == NULL) {
376 					free(rv, M_RMAN);
377 					rv = NULL;
378 					goto out;
379 				}
380 				r->r_start = rv->r_end + 1;
381 				r->r_end = s->r_end;
382 				r->r_flags = s->r_flags;
383 				r->r_rm = rm;
384 				s->r_end = rv->r_start - 1;
385 				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
386 						     r_link);
387 				TAILQ_INSERT_AFTER(&rm->rm_list, rv, r,
388 						     r_link);
389 			} else if (s->r_start == rv->r_start) {
390 				DPRINTF(("allocating from the beginning\n"));
391 				/*
392 				 * We are allocating at the beginning.
393 				 */
394 				s->r_start = rv->r_end + 1;
395 				TAILQ_INSERT_BEFORE(s, rv, r_link);
396 			} else {
397 				DPRINTF(("allocating at the end\n"));
398 				/*
399 				 * We are allocating at the end.
400 				 */
401 				s->r_end = rv->r_start - 1;
402 				TAILQ_INSERT_AFTER(&rm->rm_list, s, rv,
403 						     r_link);
404 			}
405 			goto out;
406 		}
407 	}
408 
409 	/*
410 	 * Now find an acceptable shared region, if the client's requirements
411 	 * allow sharing.  By our implementation restriction, a candidate
412 	 * region must match exactly by both size and sharing type in order
413 	 * to be considered compatible with the client's request.  (The
414 	 * former restriction could probably be lifted without too much
415 	 * additional work, but this does not seem warranted.)
416 	 */
417 	DPRINTF(("no unshared regions found\n"));
418 	if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0)
419 		goto out;
420 
421 	for (s = r; s; s = TAILQ_NEXT(s, r_link)) {
422 		if (s->r_start > end)
423 			break;
424 		if ((s->r_flags & flags) != flags)
425 			continue;
426 		rstart = ulmax(s->r_start, start);
427 		rend = ulmin(s->r_end, ulmax(start + count - 1, end));
428 		if (s->r_start >= start && s->r_end <= end
429 		    && (s->r_end - s->r_start + 1) == count &&
430 		    (s->r_start & amask) == 0 &&
431 		    ((s->r_start ^ s->r_end) & bmask) == 0) {
432 			rv = int_alloc_resource(M_NOWAIT);
433 			if (rv == NULL)
434 				goto out;
435 			rv->r_start = s->r_start;
436 			rv->r_end = s->r_end;
437 			rv->r_flags = s->r_flags &
438 				(RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE);
439 			rv->r_dev = dev;
440 			rv->r_rm = rm;
441 			if (s->r_sharehead == NULL) {
442 				s->r_sharehead = malloc(sizeof *s->r_sharehead,
443 						M_RMAN, M_NOWAIT | M_ZERO);
444 				if (s->r_sharehead == NULL) {
445 					free(rv, M_RMAN);
446 					rv = NULL;
447 					goto out;
448 				}
449 				LIST_INIT(s->r_sharehead);
450 				LIST_INSERT_HEAD(s->r_sharehead, s,
451 						 r_sharelink);
452 				s->r_flags |= RF_FIRSTSHARE;
453 			}
454 			rv->r_sharehead = s->r_sharehead;
455 			LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink);
456 			goto out;
457 		}
458 	}
459 
460 	/*
461 	 * We couldn't find anything.
462 	 */
463 out:
464 	/*
465 	 * If the user specified RF_ACTIVE in the initial flags,
466 	 * which is reflected in `want_activate', we attempt to atomically
467 	 * activate the resource.  If this fails, we release the resource
468 	 * and indicate overall failure.  (This behavior probably doesn't
469 	 * make sense for RF_TIMESHARE-type resources.)
470 	 */
471 	if (rv && want_activate) {
472 		struct resource_i *whohas;
473 		if (int_rman_activate_resource(rm, rv, &whohas)) {
474 			int_rman_release_resource(rm, rv);
475 			rv = NULL;
476 		}
477 	}
478 
479 	mtx_unlock(rm->rm_mtx);
480 	return (rv == NULL ? NULL : &rv->r_r);
481 }
482 
483 struct resource *
484 rman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count,
485 		      u_int flags, struct device *dev)
486 {
487 
488 	return (rman_reserve_resource_bound(rm, start, end, count, 0, flags,
489 	    dev));
490 }
491 
492 static int
493 int_rman_activate_resource(struct rman *rm, struct resource_i *r,
494 			   struct resource_i **whohas)
495 {
496 	struct resource_i *s;
497 	int ok;
498 
499 	/*
500 	 * If we are not timesharing, then there is nothing much to do.
501 	 * If we already have the resource, then there is nothing at all to do.
502 	 * If we are not on a sharing list with anybody else, then there is
503 	 * little to do.
504 	 */
505 	if ((r->r_flags & RF_TIMESHARE) == 0
506 	    || (r->r_flags & RF_ACTIVE) != 0
507 	    || r->r_sharehead == NULL) {
508 		r->r_flags |= RF_ACTIVE;
509 		return 0;
510 	}
511 
512 	ok = 1;
513 	for (s = LIST_FIRST(r->r_sharehead); s && ok;
514 	     s = LIST_NEXT(s, r_sharelink)) {
515 		if ((s->r_flags & RF_ACTIVE) != 0) {
516 			ok = 0;
517 			*whohas = s;
518 		}
519 	}
520 	if (ok) {
521 		r->r_flags |= RF_ACTIVE;
522 		return 0;
523 	}
524 	return EBUSY;
525 }
526 
527 int
528 rman_activate_resource(struct resource *re)
529 {
530 	int rv;
531 	struct resource_i *r, *whohas;
532 	struct rman *rm;
533 
534 	r = re->__r_i;
535 	rm = r->r_rm;
536 	mtx_lock(rm->rm_mtx);
537 	rv = int_rman_activate_resource(rm, r, &whohas);
538 	mtx_unlock(rm->rm_mtx);
539 	return rv;
540 }
541 
542 int
543 rman_await_resource(struct resource *re, int pri, int timo)
544 {
545 	int	rv;
546 	struct	resource_i *r, *whohas;
547 	struct	rman *rm;
548 
549 	r = re->__r_i;
550 	rm = r->r_rm;
551 	mtx_lock(rm->rm_mtx);
552 	for (;;) {
553 		rv = int_rman_activate_resource(rm, r, &whohas);
554 		if (rv != EBUSY)
555 			return (rv);	/* returns with mutex held */
556 
557 		if (r->r_sharehead == NULL)
558 			panic("rman_await_resource");
559 		whohas->r_flags |= RF_WANTED;
560 		rv = msleep(r->r_sharehead, rm->rm_mtx, pri, "rmwait", timo);
561 		if (rv) {
562 			mtx_unlock(rm->rm_mtx);
563 			return (rv);
564 		}
565 	}
566 }
567 
568 static int
569 int_rman_deactivate_resource(struct resource_i *r)
570 {
571 
572 	r->r_flags &= ~RF_ACTIVE;
573 	if (r->r_flags & RF_WANTED) {
574 		r->r_flags &= ~RF_WANTED;
575 		wakeup(r->r_sharehead);
576 	}
577 	return 0;
578 }
579 
580 int
581 rman_deactivate_resource(struct resource *r)
582 {
583 	struct	rman *rm;
584 
585 	rm = r->__r_i->r_rm;
586 	mtx_lock(rm->rm_mtx);
587 	int_rman_deactivate_resource(r->__r_i);
588 	mtx_unlock(rm->rm_mtx);
589 	return 0;
590 }
591 
592 static int
593 int_rman_release_resource(struct rman *rm, struct resource_i *r)
594 {
595 	struct	resource_i *s, *t;
596 
597 	if (r->r_flags & RF_ACTIVE)
598 		int_rman_deactivate_resource(r);
599 
600 	/*
601 	 * Check for a sharing list first.  If there is one, then we don't
602 	 * have to think as hard.
603 	 */
604 	if (r->r_sharehead) {
605 		/*
606 		 * If a sharing list exists, then we know there are at
607 		 * least two sharers.
608 		 *
609 		 * If we are in the main circleq, appoint someone else.
610 		 */
611 		LIST_REMOVE(r, r_sharelink);
612 		s = LIST_FIRST(r->r_sharehead);
613 		if (r->r_flags & RF_FIRSTSHARE) {
614 			s->r_flags |= RF_FIRSTSHARE;
615 			TAILQ_INSERT_BEFORE(r, s, r_link);
616 			TAILQ_REMOVE(&rm->rm_list, r, r_link);
617 		}
618 
619 		/*
620 		 * Make sure that the sharing list goes away completely
621 		 * if the resource is no longer being shared at all.
622 		 */
623 		if (LIST_NEXT(s, r_sharelink) == NULL) {
624 			free(s->r_sharehead, M_RMAN);
625 			s->r_sharehead = NULL;
626 			s->r_flags &= ~RF_FIRSTSHARE;
627 		}
628 		goto out;
629 	}
630 
631 	/*
632 	 * Look at the adjacent resources in the list and see if our
633 	 * segment can be merged with any of them.  If either of the
634 	 * resources is allocated or is not exactly adjacent then they
635 	 * cannot be merged with our segment.
636 	 */
637 	s = TAILQ_PREV(r, resource_head, r_link);
638 	if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 ||
639 	    s->r_end + 1 != r->r_start))
640 		s = NULL;
641 	t = TAILQ_NEXT(r, r_link);
642 	if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 ||
643 	    r->r_end + 1 != t->r_start))
644 		t = NULL;
645 
646 	if (s != NULL && t != NULL) {
647 		/*
648 		 * Merge all three segments.
649 		 */
650 		s->r_end = t->r_end;
651 		TAILQ_REMOVE(&rm->rm_list, r, r_link);
652 		TAILQ_REMOVE(&rm->rm_list, t, r_link);
653 		free(t, M_RMAN);
654 	} else if (s != NULL) {
655 		/*
656 		 * Merge previous segment with ours.
657 		 */
658 		s->r_end = r->r_end;
659 		TAILQ_REMOVE(&rm->rm_list, r, r_link);
660 	} else if (t != NULL) {
661 		/*
662 		 * Merge next segment with ours.
663 		 */
664 		t->r_start = r->r_start;
665 		TAILQ_REMOVE(&rm->rm_list, r, r_link);
666 	} else {
667 		/*
668 		 * At this point, we know there is nothing we
669 		 * can potentially merge with, because on each
670 		 * side, there is either nothing there or what is
671 		 * there is still allocated.  In that case, we don't
672 		 * want to remove r from the list; we simply want to
673 		 * change it to an unallocated region and return
674 		 * without freeing anything.
675 		 */
676 		r->r_flags &= ~RF_ALLOCATED;
677 		return 0;
678 	}
679 
680 out:
681 	free(r, M_RMAN);
682 	return 0;
683 }
684 
685 int
686 rman_release_resource(struct resource *re)
687 {
688 	int	rv;
689 	struct	resource_i *r;
690 	struct	rman *rm;
691 
692 	r = re->__r_i;
693 	rm = r->r_rm;
694 	mtx_lock(rm->rm_mtx);
695 	rv = int_rman_release_resource(rm, r);
696 	mtx_unlock(rm->rm_mtx);
697 	return (rv);
698 }
699 
700 uint32_t
701 rman_make_alignment_flags(uint32_t size)
702 {
703 	int	i;
704 
705 	/*
706 	 * Find the hightest bit set, and add one if more than one bit
707 	 * set.  We're effectively computing the ceil(log2(size)) here.
708 	 */
709 	for (i = 31; i > 0; i--)
710 		if ((1 << i) & size)
711 			break;
712 	if (~(1 << i) & size)
713 		i++;
714 
715 	return(RF_ALIGNMENT_LOG2(i));
716 }
717 
718 u_long
719 rman_get_start(struct resource *r)
720 {
721 	return (r->__r_i->r_start);
722 }
723 
724 u_long
725 rman_get_end(struct resource *r)
726 {
727 	return (r->__r_i->r_end);
728 }
729 
730 u_long
731 rman_get_size(struct resource *r)
732 {
733 	return (r->__r_i->r_end - r->__r_i->r_start + 1);
734 }
735 
736 u_int
737 rman_get_flags(struct resource *r)
738 {
739 	return (r->__r_i->r_flags);
740 }
741 
742 void
743 rman_set_virtual(struct resource *r, void *v)
744 {
745 	r->__r_i->r_virtual = v;
746 }
747 
748 void *
749 rman_get_virtual(struct resource *r)
750 {
751 	return (r->__r_i->r_virtual);
752 }
753 
754 void
755 rman_set_bustag(struct resource *r, bus_space_tag_t t)
756 {
757 	r->r_bustag = t;
758 }
759 
760 bus_space_tag_t
761 rman_get_bustag(struct resource *r)
762 {
763 	return (r->r_bustag);
764 }
765 
766 void
767 rman_set_bushandle(struct resource *r, bus_space_handle_t h)
768 {
769 	r->r_bushandle = h;
770 }
771 
772 bus_space_handle_t
773 rman_get_bushandle(struct resource *r)
774 {
775 	return (r->r_bushandle);
776 }
777 
778 void
779 rman_set_rid(struct resource *r, int rid)
780 {
781 	r->__r_i->r_rid = rid;
782 }
783 
784 void
785 rman_set_start(struct resource *r, u_long start)
786 {
787 	r->__r_i->r_start = start;
788 }
789 
790 void
791 rman_set_end(struct resource *r, u_long end)
792 {
793 	r->__r_i->r_end = end;
794 }
795 
796 int
797 rman_get_rid(struct resource *r)
798 {
799 	return (r->__r_i->r_rid);
800 }
801 
802 struct device *
803 rman_get_device(struct resource *r)
804 {
805 	return (r->__r_i->r_dev);
806 }
807 
808 void
809 rman_set_device(struct resource *r, struct device *dev)
810 {
811 	r->__r_i->r_dev = dev;
812 }
813 
814 int
815 rman_is_region_manager(struct resource *r, struct rman *rm)
816 {
817 
818 	return (r->__r_i->r_rm == rm);
819 }
820 
821 /*
822  * Sysctl interface for scanning the resource lists.
823  *
824  * We take two input parameters; the index into the list of resource
825  * managers, and the resource offset into the list.
826  */
827 static int
828 sysctl_rman(SYSCTL_HANDLER_ARGS)
829 {
830 	int			*name = (int *)arg1;
831 	u_int			namelen = arg2;
832 	int			rman_idx, res_idx;
833 	struct rman		*rm;
834 	struct resource_i	*res;
835 	struct u_rman		urm;
836 	struct u_resource	ures;
837 	int			error;
838 
839 	if (namelen != 3)
840 		return (EINVAL);
841 
842 	if (bus_data_generation_check(name[0]))
843 		return (EINVAL);
844 	rman_idx = name[1];
845 	res_idx = name[2];
846 
847 	/*
848 	 * Find the indexed resource manager
849 	 */
850 	mtx_lock(&rman_mtx);
851 	TAILQ_FOREACH(rm, &rman_head, rm_link) {
852 		if (rman_idx-- == 0)
853 			break;
854 	}
855 	mtx_unlock(&rman_mtx);
856 	if (rm == NULL)
857 		return (ENOENT);
858 
859 	/*
860 	 * If the resource index is -1, we want details on the
861 	 * resource manager.
862 	 */
863 	if (res_idx == -1) {
864 		bzero(&urm, sizeof(urm));
865 		urm.rm_handle = (uintptr_t)rm;
866 		strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN);
867 		urm.rm_start = rm->rm_start;
868 		urm.rm_size = rm->rm_end - rm->rm_start + 1;
869 		urm.rm_type = rm->rm_type;
870 
871 		error = SYSCTL_OUT(req, &urm, sizeof(urm));
872 		return (error);
873 	}
874 
875 	/*
876 	 * Find the indexed resource and return it.
877 	 */
878 	mtx_lock(rm->rm_mtx);
879 	TAILQ_FOREACH(res, &rm->rm_list, r_link) {
880 		if (res_idx-- == 0) {
881 			bzero(&ures, sizeof(ures));
882 			ures.r_handle = (uintptr_t)res;
883 			ures.r_parent = (uintptr_t)res->r_rm;
884 			ures.r_device = (uintptr_t)res->r_dev;
885 			if (res->r_dev != NULL) {
886 				if (device_get_name(res->r_dev) != NULL) {
887 					snprintf(ures.r_devname, RM_TEXTLEN,
888 					    "%s%d",
889 					    device_get_name(res->r_dev),
890 					    device_get_unit(res->r_dev));
891 				} else {
892 					strlcpy(ures.r_devname, "nomatch",
893 					    RM_TEXTLEN);
894 				}
895 			} else {
896 				ures.r_devname[0] = '\0';
897 			}
898 			ures.r_start = res->r_start;
899 			ures.r_size = res->r_end - res->r_start + 1;
900 			ures.r_flags = res->r_flags;
901 
902 			mtx_unlock(rm->rm_mtx);
903 			error = SYSCTL_OUT(req, &ures, sizeof(ures));
904 			return (error);
905 		}
906 	}
907 	mtx_unlock(rm->rm_mtx);
908 	return (ENOENT);
909 }
910 
911 SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD, sysctl_rman,
912     "kernel resource manager");
913