1 /*- 2 * Copyright 1998 Massachusetts Institute of Technology 3 * 4 * Permission to use, copy, modify, and distribute this software and 5 * its documentation for any purpose and without fee is hereby 6 * granted, provided that both the above copyright notice and this 7 * permission notice appear in all copies, that both the above 8 * copyright notice and this permission notice appear in all 9 * supporting documentation, and that the name of M.I.T. not be used 10 * in advertising or publicity pertaining to distribution of the 11 * software without specific, written prior permission. M.I.T. makes 12 * no representations about the suitability of this software for any 13 * purpose. It is provided "as is" without express or implied 14 * warranty. 15 * 16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS 17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, 18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT 20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* 31 * The kernel resource manager. This code is responsible for keeping track 32 * of hardware resources which are apportioned out to various drivers. 33 * It does not actually assign those resources, and it is not expected 34 * that end-device drivers will call into this code directly. Rather, 35 * the code which implements the buses that those devices are attached to, 36 * and the code which manages CPU resources, will call this code, and the 37 * end-device drivers will make upcalls to that code to actually perform 38 * the allocation. 39 * 40 * There are two sorts of resources managed by this code. The first is 41 * the more familiar array (RMAN_ARRAY) type; resources in this class 42 * consist of a sequence of individually-allocatable objects which have 43 * been numbered in some well-defined order. Most of the resources 44 * are of this type, as it is the most familiar. The second type is 45 * called a gauge (RMAN_GAUGE), and models fungible resources (i.e., 46 * resources in which each instance is indistinguishable from every 47 * other instance). The principal anticipated application of gauges 48 * is in the context of power consumption, where a bus may have a specific 49 * power budget which all attached devices share. RMAN_GAUGE is not 50 * implemented yet. 51 * 52 * For array resources, we make one simplifying assumption: two clients 53 * sharing the same resource must use the same range of indices. That 54 * is to say, sharing of overlapping-but-not-identical regions is not 55 * permitted. 56 */ 57 58 #include "opt_ddb.h" 59 60 #include <sys/cdefs.h> 61 __FBSDID("$FreeBSD$"); 62 63 #include <sys/param.h> 64 #include <sys/systm.h> 65 #include <sys/kernel.h> 66 #include <sys/limits.h> 67 #include <sys/lock.h> 68 #include <sys/malloc.h> 69 #include <sys/mutex.h> 70 #include <sys/bus.h> /* XXX debugging */ 71 #include <machine/bus.h> 72 #include <sys/rman.h> 73 #include <sys/sysctl.h> 74 75 #ifdef DDB 76 #include <ddb/ddb.h> 77 #endif 78 79 /* 80 * We use a linked list rather than a bitmap because we need to be able to 81 * represent potentially huge objects (like all of a processor's physical 82 * address space). That is also why the indices are defined to have type 83 * `unsigned long' -- that being the largest integral type in ISO C (1990). 84 * The 1999 version of C allows `long long'; we may need to switch to that 85 * at some point in the future, particularly if we want to support 36-bit 86 * addresses on IA32 hardware. 87 */ 88 struct resource_i { 89 struct resource r_r; 90 TAILQ_ENTRY(resource_i) r_link; 91 LIST_ENTRY(resource_i) r_sharelink; 92 LIST_HEAD(, resource_i) *r_sharehead; 93 u_long r_start; /* index of the first entry in this resource */ 94 u_long r_end; /* index of the last entry (inclusive) */ 95 u_int r_flags; 96 void *r_virtual; /* virtual address of this resource */ 97 struct device *r_dev; /* device which has allocated this resource */ 98 struct rman *r_rm; /* resource manager from whence this came */ 99 int r_rid; /* optional rid for this resource. */ 100 }; 101 102 int rman_debug = 0; 103 TUNABLE_INT("debug.rman_debug", &rman_debug); 104 SYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RW, 105 &rman_debug, 0, "rman debug"); 106 107 #define DPRINTF(params) if (rman_debug) printf params 108 109 static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager"); 110 111 struct rman_head rman_head; 112 static struct mtx rman_mtx; /* mutex to protect rman_head */ 113 static int int_rman_activate_resource(struct rman *rm, struct resource_i *r, 114 struct resource_i **whohas); 115 static int int_rman_deactivate_resource(struct resource_i *r); 116 static int int_rman_release_resource(struct rman *rm, struct resource_i *r); 117 118 static __inline struct resource_i * 119 int_alloc_resource(int malloc_flag) 120 { 121 struct resource_i *r; 122 123 r = malloc(sizeof *r, M_RMAN, malloc_flag | M_ZERO); 124 if (r != NULL) { 125 r->r_r.__r_i = r; 126 } 127 return (r); 128 } 129 130 int 131 rman_init(struct rman *rm) 132 { 133 static int once = 0; 134 135 if (once == 0) { 136 once = 1; 137 TAILQ_INIT(&rman_head); 138 mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF); 139 } 140 141 if (rm->rm_type == RMAN_UNINIT) 142 panic("rman_init"); 143 if (rm->rm_type == RMAN_GAUGE) 144 panic("implement RMAN_GAUGE"); 145 146 TAILQ_INIT(&rm->rm_list); 147 rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO); 148 if (rm->rm_mtx == NULL) 149 return ENOMEM; 150 mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF); 151 152 mtx_lock(&rman_mtx); 153 TAILQ_INSERT_TAIL(&rman_head, rm, rm_link); 154 mtx_unlock(&rman_mtx); 155 return 0; 156 } 157 158 /* 159 * NB: this interface is not robust against programming errors which 160 * add multiple copies of the same region. 161 */ 162 int 163 rman_manage_region(struct rman *rm, u_long start, u_long end) 164 { 165 struct resource_i *r, *s, *t; 166 167 DPRINTF(("rman_manage_region: <%s> request: start %#lx, end %#lx\n", 168 rm->rm_descr, start, end)); 169 r = int_alloc_resource(M_NOWAIT); 170 if (r == NULL) 171 return ENOMEM; 172 r->r_start = start; 173 r->r_end = end; 174 r->r_rm = rm; 175 176 mtx_lock(rm->rm_mtx); 177 178 /* Skip entries before us. */ 179 TAILQ_FOREACH(s, &rm->rm_list, r_link) { 180 if (s->r_end == ULONG_MAX) 181 break; 182 if (s->r_end + 1 >= r->r_start) 183 break; 184 } 185 186 /* If we ran off the end of the list, insert at the tail. */ 187 if (s == NULL) { 188 TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link); 189 } else { 190 /* Check for any overlap with the current region. */ 191 if (r->r_start <= s->r_end && r->r_end >= s->r_start) 192 return EBUSY; 193 194 /* Check for any overlap with the next region. */ 195 t = TAILQ_NEXT(s, r_link); 196 if (t && r->r_start <= t->r_end && r->r_end >= t->r_start) 197 return EBUSY; 198 199 /* 200 * See if this region can be merged with the next region. If 201 * not, clear the pointer. 202 */ 203 if (t && (r->r_end + 1 != t->r_start || t->r_flags != 0)) 204 t = NULL; 205 206 /* See if we can merge with the current region. */ 207 if (s->r_end + 1 == r->r_start && s->r_flags == 0) { 208 /* Can we merge all 3 regions? */ 209 if (t != NULL) { 210 s->r_end = t->r_end; 211 TAILQ_REMOVE(&rm->rm_list, t, r_link); 212 free(r, M_RMAN); 213 free(t, M_RMAN); 214 } else { 215 s->r_end = r->r_end; 216 free(r, M_RMAN); 217 } 218 } else if (t != NULL) { 219 /* Can we merge with just the next region? */ 220 t->r_start = r->r_start; 221 free(r, M_RMAN); 222 } else if (s->r_end < r->r_start) { 223 TAILQ_INSERT_AFTER(&rm->rm_list, s, r, r_link); 224 } else { 225 TAILQ_INSERT_BEFORE(s, r, r_link); 226 } 227 } 228 229 mtx_unlock(rm->rm_mtx); 230 return 0; 231 } 232 233 int 234 rman_init_from_resource(struct rman *rm, struct resource *r) 235 { 236 int rv; 237 238 if ((rv = rman_init(rm)) != 0) 239 return (rv); 240 return (rman_manage_region(rm, r->__r_i->r_start, r->__r_i->r_end)); 241 } 242 243 int 244 rman_fini(struct rman *rm) 245 { 246 struct resource_i *r; 247 248 mtx_lock(rm->rm_mtx); 249 TAILQ_FOREACH(r, &rm->rm_list, r_link) { 250 if (r->r_flags & RF_ALLOCATED) { 251 mtx_unlock(rm->rm_mtx); 252 return EBUSY; 253 } 254 } 255 256 /* 257 * There really should only be one of these if we are in this 258 * state and the code is working properly, but it can't hurt. 259 */ 260 while (!TAILQ_EMPTY(&rm->rm_list)) { 261 r = TAILQ_FIRST(&rm->rm_list); 262 TAILQ_REMOVE(&rm->rm_list, r, r_link); 263 free(r, M_RMAN); 264 } 265 mtx_unlock(rm->rm_mtx); 266 mtx_lock(&rman_mtx); 267 TAILQ_REMOVE(&rman_head, rm, rm_link); 268 mtx_unlock(&rman_mtx); 269 mtx_destroy(rm->rm_mtx); 270 free(rm->rm_mtx, M_RMAN); 271 272 return 0; 273 } 274 275 struct resource * 276 rman_reserve_resource_bound(struct rman *rm, u_long start, u_long end, 277 u_long count, u_long bound, u_int flags, 278 struct device *dev) 279 { 280 u_int want_activate; 281 struct resource_i *r, *s, *rv; 282 u_long rstart, rend, amask, bmask; 283 284 rv = NULL; 285 286 DPRINTF(("rman_reserve_resource_bound: <%s> request: [%#lx, %#lx], " 287 "length %#lx, flags %u, device %s\n", rm->rm_descr, start, end, 288 count, flags, 289 dev == NULL ? "<null>" : device_get_nameunit(dev))); 290 want_activate = (flags & RF_ACTIVE); 291 flags &= ~RF_ACTIVE; 292 293 mtx_lock(rm->rm_mtx); 294 295 for (r = TAILQ_FIRST(&rm->rm_list); 296 r && r->r_end < start; 297 r = TAILQ_NEXT(r, r_link)) 298 ; 299 300 if (r == NULL) { 301 DPRINTF(("could not find a region\n")); 302 goto out; 303 } 304 305 amask = (1ul << RF_ALIGNMENT(flags)) - 1; 306 /* If bound is 0, bmask will also be 0 */ 307 bmask = ~(bound - 1); 308 /* 309 * First try to find an acceptable totally-unshared region. 310 */ 311 for (s = r; s; s = TAILQ_NEXT(s, r_link)) { 312 DPRINTF(("considering [%#lx, %#lx]\n", s->r_start, s->r_end)); 313 if (s->r_start + count - 1 > end) { 314 DPRINTF(("s->r_start (%#lx) + count - 1> end (%#lx)\n", 315 s->r_start, end)); 316 break; 317 } 318 if (s->r_flags & RF_ALLOCATED) { 319 DPRINTF(("region is allocated\n")); 320 continue; 321 } 322 rstart = ulmax(s->r_start, start); 323 /* 324 * Try to find a region by adjusting to boundary and alignment 325 * until both conditions are satisfied. This is not an optimal 326 * algorithm, but in most cases it isn't really bad, either. 327 */ 328 do { 329 rstart = (rstart + amask) & ~amask; 330 if (((rstart ^ (rstart + count - 1)) & bmask) != 0) 331 rstart += bound - (rstart & ~bmask); 332 } while ((rstart & amask) != 0 && rstart < end && 333 rstart < s->r_end); 334 rend = ulmin(s->r_end, ulmax(rstart + count - 1, end)); 335 if (rstart > rend) { 336 DPRINTF(("adjusted start exceeds end\n")); 337 continue; 338 } 339 DPRINTF(("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n", 340 rstart, rend, (rend - rstart + 1), count)); 341 342 if ((rend - rstart + 1) >= count) { 343 DPRINTF(("candidate region: [%#lx, %#lx], size %#lx\n", 344 rstart, rend, (rend - rstart + 1))); 345 if ((s->r_end - s->r_start + 1) == count) { 346 DPRINTF(("candidate region is entire chunk\n")); 347 rv = s; 348 rv->r_flags |= RF_ALLOCATED | flags; 349 rv->r_dev = dev; 350 goto out; 351 } 352 353 /* 354 * If s->r_start < rstart and 355 * s->r_end > rstart + count - 1, then 356 * we need to split the region into three pieces 357 * (the middle one will get returned to the user). 358 * Otherwise, we are allocating at either the 359 * beginning or the end of s, so we only need to 360 * split it in two. The first case requires 361 * two new allocations; the second requires but one. 362 */ 363 rv = int_alloc_resource(M_NOWAIT); 364 if (rv == NULL) 365 goto out; 366 rv->r_start = rstart; 367 rv->r_end = rstart + count - 1; 368 rv->r_flags = flags | RF_ALLOCATED; 369 rv->r_dev = dev; 370 rv->r_rm = rm; 371 372 if (s->r_start < rv->r_start && s->r_end > rv->r_end) { 373 DPRINTF(("splitting region in three parts: " 374 "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n", 375 s->r_start, rv->r_start - 1, 376 rv->r_start, rv->r_end, 377 rv->r_end + 1, s->r_end)); 378 /* 379 * We are allocating in the middle. 380 */ 381 r = int_alloc_resource(M_NOWAIT); 382 if (r == NULL) { 383 free(rv, M_RMAN); 384 rv = NULL; 385 goto out; 386 } 387 r->r_start = rv->r_end + 1; 388 r->r_end = s->r_end; 389 r->r_flags = s->r_flags; 390 r->r_rm = rm; 391 s->r_end = rv->r_start - 1; 392 TAILQ_INSERT_AFTER(&rm->rm_list, s, rv, 393 r_link); 394 TAILQ_INSERT_AFTER(&rm->rm_list, rv, r, 395 r_link); 396 } else if (s->r_start == rv->r_start) { 397 DPRINTF(("allocating from the beginning\n")); 398 /* 399 * We are allocating at the beginning. 400 */ 401 s->r_start = rv->r_end + 1; 402 TAILQ_INSERT_BEFORE(s, rv, r_link); 403 } else { 404 DPRINTF(("allocating at the end\n")); 405 /* 406 * We are allocating at the end. 407 */ 408 s->r_end = rv->r_start - 1; 409 TAILQ_INSERT_AFTER(&rm->rm_list, s, rv, 410 r_link); 411 } 412 goto out; 413 } 414 } 415 416 /* 417 * Now find an acceptable shared region, if the client's requirements 418 * allow sharing. By our implementation restriction, a candidate 419 * region must match exactly by both size and sharing type in order 420 * to be considered compatible with the client's request. (The 421 * former restriction could probably be lifted without too much 422 * additional work, but this does not seem warranted.) 423 */ 424 DPRINTF(("no unshared regions found\n")); 425 if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0) 426 goto out; 427 428 for (s = r; s; s = TAILQ_NEXT(s, r_link)) { 429 if (s->r_start > end) 430 break; 431 if ((s->r_flags & flags) != flags) 432 continue; 433 rstart = ulmax(s->r_start, start); 434 rend = ulmin(s->r_end, ulmax(start + count - 1, end)); 435 if (s->r_start >= start && s->r_end <= end 436 && (s->r_end - s->r_start + 1) == count && 437 (s->r_start & amask) == 0 && 438 ((s->r_start ^ s->r_end) & bmask) == 0) { 439 rv = int_alloc_resource(M_NOWAIT); 440 if (rv == NULL) 441 goto out; 442 rv->r_start = s->r_start; 443 rv->r_end = s->r_end; 444 rv->r_flags = s->r_flags & 445 (RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE); 446 rv->r_dev = dev; 447 rv->r_rm = rm; 448 if (s->r_sharehead == NULL) { 449 s->r_sharehead = malloc(sizeof *s->r_sharehead, 450 M_RMAN, M_NOWAIT | M_ZERO); 451 if (s->r_sharehead == NULL) { 452 free(rv, M_RMAN); 453 rv = NULL; 454 goto out; 455 } 456 LIST_INIT(s->r_sharehead); 457 LIST_INSERT_HEAD(s->r_sharehead, s, 458 r_sharelink); 459 s->r_flags |= RF_FIRSTSHARE; 460 } 461 rv->r_sharehead = s->r_sharehead; 462 LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink); 463 goto out; 464 } 465 } 466 467 /* 468 * We couldn't find anything. 469 */ 470 out: 471 /* 472 * If the user specified RF_ACTIVE in the initial flags, 473 * which is reflected in `want_activate', we attempt to atomically 474 * activate the resource. If this fails, we release the resource 475 * and indicate overall failure. (This behavior probably doesn't 476 * make sense for RF_TIMESHARE-type resources.) 477 */ 478 if (rv && want_activate) { 479 struct resource_i *whohas; 480 if (int_rman_activate_resource(rm, rv, &whohas)) { 481 int_rman_release_resource(rm, rv); 482 rv = NULL; 483 } 484 } 485 486 mtx_unlock(rm->rm_mtx); 487 return (rv == NULL ? NULL : &rv->r_r); 488 } 489 490 struct resource * 491 rman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count, 492 u_int flags, struct device *dev) 493 { 494 495 return (rman_reserve_resource_bound(rm, start, end, count, 0, flags, 496 dev)); 497 } 498 499 static int 500 int_rman_activate_resource(struct rman *rm, struct resource_i *r, 501 struct resource_i **whohas) 502 { 503 struct resource_i *s; 504 int ok; 505 506 /* 507 * If we are not timesharing, then there is nothing much to do. 508 * If we already have the resource, then there is nothing at all to do. 509 * If we are not on a sharing list with anybody else, then there is 510 * little to do. 511 */ 512 if ((r->r_flags & RF_TIMESHARE) == 0 513 || (r->r_flags & RF_ACTIVE) != 0 514 || r->r_sharehead == NULL) { 515 r->r_flags |= RF_ACTIVE; 516 return 0; 517 } 518 519 ok = 1; 520 for (s = LIST_FIRST(r->r_sharehead); s && ok; 521 s = LIST_NEXT(s, r_sharelink)) { 522 if ((s->r_flags & RF_ACTIVE) != 0) { 523 ok = 0; 524 *whohas = s; 525 } 526 } 527 if (ok) { 528 r->r_flags |= RF_ACTIVE; 529 return 0; 530 } 531 return EBUSY; 532 } 533 534 int 535 rman_activate_resource(struct resource *re) 536 { 537 int rv; 538 struct resource_i *r, *whohas; 539 struct rman *rm; 540 541 r = re->__r_i; 542 rm = r->r_rm; 543 mtx_lock(rm->rm_mtx); 544 rv = int_rman_activate_resource(rm, r, &whohas); 545 mtx_unlock(rm->rm_mtx); 546 return rv; 547 } 548 549 int 550 rman_await_resource(struct resource *re, int pri, int timo) 551 { 552 int rv; 553 struct resource_i *r, *whohas; 554 struct rman *rm; 555 556 r = re->__r_i; 557 rm = r->r_rm; 558 mtx_lock(rm->rm_mtx); 559 for (;;) { 560 rv = int_rman_activate_resource(rm, r, &whohas); 561 if (rv != EBUSY) 562 return (rv); /* returns with mutex held */ 563 564 if (r->r_sharehead == NULL) 565 panic("rman_await_resource"); 566 whohas->r_flags |= RF_WANTED; 567 rv = msleep(r->r_sharehead, rm->rm_mtx, pri, "rmwait", timo); 568 if (rv) { 569 mtx_unlock(rm->rm_mtx); 570 return (rv); 571 } 572 } 573 } 574 575 static int 576 int_rman_deactivate_resource(struct resource_i *r) 577 { 578 579 r->r_flags &= ~RF_ACTIVE; 580 if (r->r_flags & RF_WANTED) { 581 r->r_flags &= ~RF_WANTED; 582 wakeup(r->r_sharehead); 583 } 584 return 0; 585 } 586 587 int 588 rman_deactivate_resource(struct resource *r) 589 { 590 struct rman *rm; 591 592 rm = r->__r_i->r_rm; 593 mtx_lock(rm->rm_mtx); 594 int_rman_deactivate_resource(r->__r_i); 595 mtx_unlock(rm->rm_mtx); 596 return 0; 597 } 598 599 static int 600 int_rman_release_resource(struct rman *rm, struct resource_i *r) 601 { 602 struct resource_i *s, *t; 603 604 if (r->r_flags & RF_ACTIVE) 605 int_rman_deactivate_resource(r); 606 607 /* 608 * Check for a sharing list first. If there is one, then we don't 609 * have to think as hard. 610 */ 611 if (r->r_sharehead) { 612 /* 613 * If a sharing list exists, then we know there are at 614 * least two sharers. 615 * 616 * If we are in the main circleq, appoint someone else. 617 */ 618 LIST_REMOVE(r, r_sharelink); 619 s = LIST_FIRST(r->r_sharehead); 620 if (r->r_flags & RF_FIRSTSHARE) { 621 s->r_flags |= RF_FIRSTSHARE; 622 TAILQ_INSERT_BEFORE(r, s, r_link); 623 TAILQ_REMOVE(&rm->rm_list, r, r_link); 624 } 625 626 /* 627 * Make sure that the sharing list goes away completely 628 * if the resource is no longer being shared at all. 629 */ 630 if (LIST_NEXT(s, r_sharelink) == NULL) { 631 free(s->r_sharehead, M_RMAN); 632 s->r_sharehead = NULL; 633 s->r_flags &= ~RF_FIRSTSHARE; 634 } 635 goto out; 636 } 637 638 /* 639 * Look at the adjacent resources in the list and see if our 640 * segment can be merged with any of them. If either of the 641 * resources is allocated or is not exactly adjacent then they 642 * cannot be merged with our segment. 643 */ 644 s = TAILQ_PREV(r, resource_head, r_link); 645 if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 || 646 s->r_end + 1 != r->r_start)) 647 s = NULL; 648 t = TAILQ_NEXT(r, r_link); 649 if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 || 650 r->r_end + 1 != t->r_start)) 651 t = NULL; 652 653 if (s != NULL && t != NULL) { 654 /* 655 * Merge all three segments. 656 */ 657 s->r_end = t->r_end; 658 TAILQ_REMOVE(&rm->rm_list, r, r_link); 659 TAILQ_REMOVE(&rm->rm_list, t, r_link); 660 free(t, M_RMAN); 661 } else if (s != NULL) { 662 /* 663 * Merge previous segment with ours. 664 */ 665 s->r_end = r->r_end; 666 TAILQ_REMOVE(&rm->rm_list, r, r_link); 667 } else if (t != NULL) { 668 /* 669 * Merge next segment with ours. 670 */ 671 t->r_start = r->r_start; 672 TAILQ_REMOVE(&rm->rm_list, r, r_link); 673 } else { 674 /* 675 * At this point, we know there is nothing we 676 * can potentially merge with, because on each 677 * side, there is either nothing there or what is 678 * there is still allocated. In that case, we don't 679 * want to remove r from the list; we simply want to 680 * change it to an unallocated region and return 681 * without freeing anything. 682 */ 683 r->r_flags &= ~RF_ALLOCATED; 684 return 0; 685 } 686 687 out: 688 free(r, M_RMAN); 689 return 0; 690 } 691 692 int 693 rman_release_resource(struct resource *re) 694 { 695 int rv; 696 struct resource_i *r; 697 struct rman *rm; 698 699 r = re->__r_i; 700 rm = r->r_rm; 701 mtx_lock(rm->rm_mtx); 702 rv = int_rman_release_resource(rm, r); 703 mtx_unlock(rm->rm_mtx); 704 return (rv); 705 } 706 707 uint32_t 708 rman_make_alignment_flags(uint32_t size) 709 { 710 int i; 711 712 /* 713 * Find the hightest bit set, and add one if more than one bit 714 * set. We're effectively computing the ceil(log2(size)) here. 715 */ 716 for (i = 31; i > 0; i--) 717 if ((1 << i) & size) 718 break; 719 if (~(1 << i) & size) 720 i++; 721 722 return(RF_ALIGNMENT_LOG2(i)); 723 } 724 725 u_long 726 rman_get_start(struct resource *r) 727 { 728 return (r->__r_i->r_start); 729 } 730 731 u_long 732 rman_get_end(struct resource *r) 733 { 734 return (r->__r_i->r_end); 735 } 736 737 u_long 738 rman_get_size(struct resource *r) 739 { 740 return (r->__r_i->r_end - r->__r_i->r_start + 1); 741 } 742 743 u_int 744 rman_get_flags(struct resource *r) 745 { 746 return (r->__r_i->r_flags); 747 } 748 749 void 750 rman_set_virtual(struct resource *r, void *v) 751 { 752 r->__r_i->r_virtual = v; 753 } 754 755 void * 756 rman_get_virtual(struct resource *r) 757 { 758 return (r->__r_i->r_virtual); 759 } 760 761 void 762 rman_set_bustag(struct resource *r, bus_space_tag_t t) 763 { 764 r->r_bustag = t; 765 } 766 767 bus_space_tag_t 768 rman_get_bustag(struct resource *r) 769 { 770 return (r->r_bustag); 771 } 772 773 void 774 rman_set_bushandle(struct resource *r, bus_space_handle_t h) 775 { 776 r->r_bushandle = h; 777 } 778 779 bus_space_handle_t 780 rman_get_bushandle(struct resource *r) 781 { 782 return (r->r_bushandle); 783 } 784 785 void 786 rman_set_rid(struct resource *r, int rid) 787 { 788 r->__r_i->r_rid = rid; 789 } 790 791 void 792 rman_set_start(struct resource *r, u_long start) 793 { 794 r->__r_i->r_start = start; 795 } 796 797 void 798 rman_set_end(struct resource *r, u_long end) 799 { 800 r->__r_i->r_end = end; 801 } 802 803 int 804 rman_get_rid(struct resource *r) 805 { 806 return (r->__r_i->r_rid); 807 } 808 809 struct device * 810 rman_get_device(struct resource *r) 811 { 812 return (r->__r_i->r_dev); 813 } 814 815 void 816 rman_set_device(struct resource *r, struct device *dev) 817 { 818 r->__r_i->r_dev = dev; 819 } 820 821 int 822 rman_is_region_manager(struct resource *r, struct rman *rm) 823 { 824 825 return (r->__r_i->r_rm == rm); 826 } 827 828 /* 829 * Sysctl interface for scanning the resource lists. 830 * 831 * We take two input parameters; the index into the list of resource 832 * managers, and the resource offset into the list. 833 */ 834 static int 835 sysctl_rman(SYSCTL_HANDLER_ARGS) 836 { 837 int *name = (int *)arg1; 838 u_int namelen = arg2; 839 int rman_idx, res_idx; 840 struct rman *rm; 841 struct resource_i *res; 842 struct u_rman urm; 843 struct u_resource ures; 844 int error; 845 846 if (namelen != 3) 847 return (EINVAL); 848 849 if (bus_data_generation_check(name[0])) 850 return (EINVAL); 851 rman_idx = name[1]; 852 res_idx = name[2]; 853 854 /* 855 * Find the indexed resource manager 856 */ 857 mtx_lock(&rman_mtx); 858 TAILQ_FOREACH(rm, &rman_head, rm_link) { 859 if (rman_idx-- == 0) 860 break; 861 } 862 mtx_unlock(&rman_mtx); 863 if (rm == NULL) 864 return (ENOENT); 865 866 /* 867 * If the resource index is -1, we want details on the 868 * resource manager. 869 */ 870 if (res_idx == -1) { 871 bzero(&urm, sizeof(urm)); 872 urm.rm_handle = (uintptr_t)rm; 873 strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN); 874 urm.rm_start = rm->rm_start; 875 urm.rm_size = rm->rm_end - rm->rm_start + 1; 876 urm.rm_type = rm->rm_type; 877 878 error = SYSCTL_OUT(req, &urm, sizeof(urm)); 879 return (error); 880 } 881 882 /* 883 * Find the indexed resource and return it. 884 */ 885 mtx_lock(rm->rm_mtx); 886 TAILQ_FOREACH(res, &rm->rm_list, r_link) { 887 if (res_idx-- == 0) { 888 bzero(&ures, sizeof(ures)); 889 ures.r_handle = (uintptr_t)res; 890 ures.r_parent = (uintptr_t)res->r_rm; 891 ures.r_device = (uintptr_t)res->r_dev; 892 if (res->r_dev != NULL) { 893 if (device_get_name(res->r_dev) != NULL) { 894 snprintf(ures.r_devname, RM_TEXTLEN, 895 "%s%d", 896 device_get_name(res->r_dev), 897 device_get_unit(res->r_dev)); 898 } else { 899 strlcpy(ures.r_devname, "nomatch", 900 RM_TEXTLEN); 901 } 902 } else { 903 ures.r_devname[0] = '\0'; 904 } 905 ures.r_start = res->r_start; 906 ures.r_size = res->r_end - res->r_start + 1; 907 ures.r_flags = res->r_flags; 908 909 mtx_unlock(rm->rm_mtx); 910 error = SYSCTL_OUT(req, &ures, sizeof(ures)); 911 return (error); 912 } 913 } 914 mtx_unlock(rm->rm_mtx); 915 return (ENOENT); 916 } 917 918 SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD, sysctl_rman, 919 "kernel resource manager"); 920 921 #ifdef DDB 922 static void 923 dump_rman(struct rman *rm) 924 { 925 struct resource_i *r; 926 const char *devname; 927 928 if (db_pager_quit) 929 return; 930 db_printf("rman: %s\n", rm->rm_descr); 931 db_printf(" 0x%lx-0x%lx (full range)\n", rm->rm_start, rm->rm_end); 932 TAILQ_FOREACH(r, &rm->rm_list, r_link) { 933 if (r->r_dev != NULL) { 934 devname = device_get_nameunit(r->r_dev); 935 if (devname == NULL) 936 devname = "nomatch"; 937 } else 938 devname = NULL; 939 db_printf(" 0x%lx-0x%lx ", r->r_start, r->r_end); 940 if (devname != NULL) 941 db_printf("(%s)\n", devname); 942 else 943 db_printf("----\n"); 944 if (db_pager_quit) 945 return; 946 } 947 } 948 949 DB_SHOW_COMMAND(rman, db_show_rman) 950 { 951 952 if (have_addr) 953 dump_rman((struct rman *)addr); 954 } 955 956 DB_SHOW_COMMAND(allrman, db_show_all_rman) 957 { 958 struct rman *rm; 959 960 TAILQ_FOREACH(rm, &rman_head, rm_link) 961 dump_rman(rm); 962 } 963 #endif 964