1 /*- 2 * Copyright 1998 Massachusetts Institute of Technology 3 * 4 * Permission to use, copy, modify, and distribute this software and 5 * its documentation for any purpose and without fee is hereby 6 * granted, provided that both the above copyright notice and this 7 * permission notice appear in all copies, that both the above 8 * copyright notice and this permission notice appear in all 9 * supporting documentation, and that the name of M.I.T. not be used 10 * in advertising or publicity pertaining to distribution of the 11 * software without specific, written prior permission. M.I.T. makes 12 * no representations about the suitability of this software for any 13 * purpose. It is provided "as is" without express or implied 14 * warranty. 15 * 16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS 17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, 18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT 20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 */ 29 30 /* 31 * The kernel resource manager. This code is responsible for keeping track 32 * of hardware resources which are apportioned out to various drivers. 33 * It does not actually assign those resources, and it is not expected 34 * that end-device drivers will call into this code directly. Rather, 35 * the code which implements the buses that those devices are attached to, 36 * and the code which manages CPU resources, will call this code, and the 37 * end-device drivers will make upcalls to that code to actually perform 38 * the allocation. 39 * 40 * There are two sorts of resources managed by this code. The first is 41 * the more familiar array (RMAN_ARRAY) type; resources in this class 42 * consist of a sequence of individually-allocatable objects which have 43 * been numbered in some well-defined order. Most of the resources 44 * are of this type, as it is the most familiar. The second type is 45 * called a gauge (RMAN_GAUGE), and models fungible resources (i.e., 46 * resources in which each instance is indistinguishable from every 47 * other instance). The principal anticipated application of gauges 48 * is in the context of power consumption, where a bus may have a specific 49 * power budget which all attached devices share. RMAN_GAUGE is not 50 * implemented yet. 51 * 52 * For array resources, we make one simplifying assumption: two clients 53 * sharing the same resource must use the same range of indices. That 54 * is to say, sharing of overlapping-but-not-identical regions is not 55 * permitted. 56 */ 57 58 #include "opt_ddb.h" 59 60 #include <sys/cdefs.h> 61 __FBSDID("$FreeBSD$"); 62 63 #include <sys/param.h> 64 #include <sys/systm.h> 65 #include <sys/kernel.h> 66 #include <sys/limits.h> 67 #include <sys/lock.h> 68 #include <sys/malloc.h> 69 #include <sys/mutex.h> 70 #include <sys/bus.h> /* XXX debugging */ 71 #include <machine/bus.h> 72 #include <sys/rman.h> 73 #include <sys/sysctl.h> 74 75 #ifdef DDB 76 #include <ddb/ddb.h> 77 #endif 78 79 /* 80 * We use a linked list rather than a bitmap because we need to be able to 81 * represent potentially huge objects (like all of a processor's physical 82 * address space). That is also why the indices are defined to have type 83 * `unsigned long' -- that being the largest integral type in ISO C (1990). 84 * The 1999 version of C allows `long long'; we may need to switch to that 85 * at some point in the future, particularly if we want to support 36-bit 86 * addresses on IA32 hardware. 87 */ 88 struct resource_i { 89 struct resource r_r; 90 TAILQ_ENTRY(resource_i) r_link; 91 LIST_ENTRY(resource_i) r_sharelink; 92 LIST_HEAD(, resource_i) *r_sharehead; 93 rman_res_t r_start; /* index of the first entry in this resource */ 94 rman_res_t r_end; /* index of the last entry (inclusive) */ 95 u_int r_flags; 96 void *r_virtual; /* virtual address of this resource */ 97 struct device *r_dev; /* device which has allocated this resource */ 98 struct rman *r_rm; /* resource manager from whence this came */ 99 int r_rid; /* optional rid for this resource. */ 100 }; 101 102 static int rman_debug = 0; 103 SYSCTL_INT(_debug, OID_AUTO, rman_debug, CTLFLAG_RWTUN, 104 &rman_debug, 0, "rman debug"); 105 106 #define DPRINTF(params) if (rman_debug) printf params 107 108 static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager"); 109 110 struct rman_head rman_head; 111 static struct mtx rman_mtx; /* mutex to protect rman_head */ 112 static int int_rman_release_resource(struct rman *rm, struct resource_i *r); 113 114 static __inline struct resource_i * 115 int_alloc_resource(int malloc_flag) 116 { 117 struct resource_i *r; 118 119 r = malloc(sizeof *r, M_RMAN, malloc_flag | M_ZERO); 120 if (r != NULL) { 121 r->r_r.__r_i = r; 122 } 123 return (r); 124 } 125 126 int 127 rman_init(struct rman *rm) 128 { 129 static int once = 0; 130 131 if (once == 0) { 132 once = 1; 133 TAILQ_INIT(&rman_head); 134 mtx_init(&rman_mtx, "rman head", NULL, MTX_DEF); 135 } 136 137 if (rm->rm_start == 0 && rm->rm_end == 0) 138 rm->rm_end = ~0; 139 if (rm->rm_type == RMAN_UNINIT) 140 panic("rman_init"); 141 if (rm->rm_type == RMAN_GAUGE) 142 panic("implement RMAN_GAUGE"); 143 144 TAILQ_INIT(&rm->rm_list); 145 rm->rm_mtx = malloc(sizeof *rm->rm_mtx, M_RMAN, M_NOWAIT | M_ZERO); 146 if (rm->rm_mtx == NULL) 147 return ENOMEM; 148 mtx_init(rm->rm_mtx, "rman", NULL, MTX_DEF); 149 150 mtx_lock(&rman_mtx); 151 TAILQ_INSERT_TAIL(&rman_head, rm, rm_link); 152 mtx_unlock(&rman_mtx); 153 return 0; 154 } 155 156 int 157 rman_manage_region(struct rman *rm, rman_res_t start, rman_res_t end) 158 { 159 struct resource_i *r, *s, *t; 160 int rv = 0; 161 162 DPRINTF(("rman_manage_region: <%s> request: start %#jx, end %#jx\n", 163 rm->rm_descr, start, end)); 164 if (start < rm->rm_start || end > rm->rm_end) 165 return EINVAL; 166 r = int_alloc_resource(M_NOWAIT); 167 if (r == NULL) 168 return ENOMEM; 169 r->r_start = start; 170 r->r_end = end; 171 r->r_rm = rm; 172 173 mtx_lock(rm->rm_mtx); 174 175 /* Skip entries before us. */ 176 TAILQ_FOREACH(s, &rm->rm_list, r_link) { 177 if (s->r_end == ~0) 178 break; 179 if (s->r_end + 1 >= r->r_start) 180 break; 181 } 182 183 /* If we ran off the end of the list, insert at the tail. */ 184 if (s == NULL) { 185 TAILQ_INSERT_TAIL(&rm->rm_list, r, r_link); 186 } else { 187 /* Check for any overlap with the current region. */ 188 if (r->r_start <= s->r_end && r->r_end >= s->r_start) { 189 rv = EBUSY; 190 goto out; 191 } 192 193 /* Check for any overlap with the next region. */ 194 t = TAILQ_NEXT(s, r_link); 195 if (t && r->r_start <= t->r_end && r->r_end >= t->r_start) { 196 rv = EBUSY; 197 goto out; 198 } 199 200 /* 201 * See if this region can be merged with the next region. If 202 * not, clear the pointer. 203 */ 204 if (t && (r->r_end + 1 != t->r_start || t->r_flags != 0)) 205 t = NULL; 206 207 /* See if we can merge with the current region. */ 208 if (s->r_end + 1 == r->r_start && s->r_flags == 0) { 209 /* Can we merge all 3 regions? */ 210 if (t != NULL) { 211 s->r_end = t->r_end; 212 TAILQ_REMOVE(&rm->rm_list, t, r_link); 213 free(r, M_RMAN); 214 free(t, M_RMAN); 215 } else { 216 s->r_end = r->r_end; 217 free(r, M_RMAN); 218 } 219 } else if (t != NULL) { 220 /* Can we merge with just the next region? */ 221 t->r_start = r->r_start; 222 free(r, M_RMAN); 223 } else if (s->r_end < r->r_start) { 224 TAILQ_INSERT_AFTER(&rm->rm_list, s, r, r_link); 225 } else { 226 TAILQ_INSERT_BEFORE(s, r, r_link); 227 } 228 } 229 out: 230 mtx_unlock(rm->rm_mtx); 231 return rv; 232 } 233 234 int 235 rman_init_from_resource(struct rman *rm, struct resource *r) 236 { 237 int rv; 238 239 if ((rv = rman_init(rm)) != 0) 240 return (rv); 241 return (rman_manage_region(rm, r->__r_i->r_start, r->__r_i->r_end)); 242 } 243 244 int 245 rman_fini(struct rman *rm) 246 { 247 struct resource_i *r; 248 249 mtx_lock(rm->rm_mtx); 250 TAILQ_FOREACH(r, &rm->rm_list, r_link) { 251 if (r->r_flags & RF_ALLOCATED) { 252 mtx_unlock(rm->rm_mtx); 253 return EBUSY; 254 } 255 } 256 257 /* 258 * There really should only be one of these if we are in this 259 * state and the code is working properly, but it can't hurt. 260 */ 261 while (!TAILQ_EMPTY(&rm->rm_list)) { 262 r = TAILQ_FIRST(&rm->rm_list); 263 TAILQ_REMOVE(&rm->rm_list, r, r_link); 264 free(r, M_RMAN); 265 } 266 mtx_unlock(rm->rm_mtx); 267 mtx_lock(&rman_mtx); 268 TAILQ_REMOVE(&rman_head, rm, rm_link); 269 mtx_unlock(&rman_mtx); 270 mtx_destroy(rm->rm_mtx); 271 free(rm->rm_mtx, M_RMAN); 272 273 return 0; 274 } 275 276 int 277 rman_first_free_region(struct rman *rm, rman_res_t *start, rman_res_t *end) 278 { 279 struct resource_i *r; 280 281 mtx_lock(rm->rm_mtx); 282 TAILQ_FOREACH(r, &rm->rm_list, r_link) { 283 if (!(r->r_flags & RF_ALLOCATED)) { 284 *start = r->r_start; 285 *end = r->r_end; 286 mtx_unlock(rm->rm_mtx); 287 return (0); 288 } 289 } 290 mtx_unlock(rm->rm_mtx); 291 return (ENOENT); 292 } 293 294 int 295 rman_last_free_region(struct rman *rm, rman_res_t *start, rman_res_t *end) 296 { 297 struct resource_i *r; 298 299 mtx_lock(rm->rm_mtx); 300 TAILQ_FOREACH_REVERSE(r, &rm->rm_list, resource_head, r_link) { 301 if (!(r->r_flags & RF_ALLOCATED)) { 302 *start = r->r_start; 303 *end = r->r_end; 304 mtx_unlock(rm->rm_mtx); 305 return (0); 306 } 307 } 308 mtx_unlock(rm->rm_mtx); 309 return (ENOENT); 310 } 311 312 /* Shrink or extend one or both ends of an allocated resource. */ 313 int 314 rman_adjust_resource(struct resource *rr, rman_res_t start, rman_res_t end) 315 { 316 struct resource_i *r, *s, *t, *new; 317 struct rman *rm; 318 319 /* Not supported for shared resources. */ 320 r = rr->__r_i; 321 if (r->r_flags & RF_SHAREABLE) 322 return (EINVAL); 323 324 /* 325 * This does not support wholesale moving of a resource. At 326 * least part of the desired new range must overlap with the 327 * existing resource. 328 */ 329 if (end < r->r_start || r->r_end < start) 330 return (EINVAL); 331 332 /* 333 * Find the two resource regions immediately adjacent to the 334 * allocated resource. 335 */ 336 rm = r->r_rm; 337 mtx_lock(rm->rm_mtx); 338 #ifdef INVARIANTS 339 TAILQ_FOREACH(s, &rm->rm_list, r_link) { 340 if (s == r) 341 break; 342 } 343 if (s == NULL) 344 panic("resource not in list"); 345 #endif 346 s = TAILQ_PREV(r, resource_head, r_link); 347 t = TAILQ_NEXT(r, r_link); 348 KASSERT(s == NULL || s->r_end + 1 == r->r_start, 349 ("prev resource mismatch")); 350 KASSERT(t == NULL || r->r_end + 1 == t->r_start, 351 ("next resource mismatch")); 352 353 /* 354 * See if the changes are permitted. Shrinking is always allowed, 355 * but growing requires sufficient room in the adjacent region. 356 */ 357 if (start < r->r_start && (s == NULL || (s->r_flags & RF_ALLOCATED) || 358 s->r_start > start)) { 359 mtx_unlock(rm->rm_mtx); 360 return (EBUSY); 361 } 362 if (end > r->r_end && (t == NULL || (t->r_flags & RF_ALLOCATED) || 363 t->r_end < end)) { 364 mtx_unlock(rm->rm_mtx); 365 return (EBUSY); 366 } 367 368 /* 369 * While holding the lock, grow either end of the resource as 370 * needed and shrink either end if the shrinking does not require 371 * allocating a new resource. We can safely drop the lock and then 372 * insert a new range to handle the shrinking case afterwards. 373 */ 374 if (start < r->r_start || 375 (start > r->r_start && s != NULL && !(s->r_flags & RF_ALLOCATED))) { 376 KASSERT(s->r_flags == 0, ("prev is busy")); 377 r->r_start = start; 378 if (s->r_start == start) { 379 TAILQ_REMOVE(&rm->rm_list, s, r_link); 380 free(s, M_RMAN); 381 } else 382 s->r_end = start - 1; 383 } 384 if (end > r->r_end || 385 (end < r->r_end && t != NULL && !(t->r_flags & RF_ALLOCATED))) { 386 KASSERT(t->r_flags == 0, ("next is busy")); 387 r->r_end = end; 388 if (t->r_end == end) { 389 TAILQ_REMOVE(&rm->rm_list, t, r_link); 390 free(t, M_RMAN); 391 } else 392 t->r_start = end + 1; 393 } 394 mtx_unlock(rm->rm_mtx); 395 396 /* 397 * Handle the shrinking cases that require allocating a new 398 * resource to hold the newly-free region. We have to recheck 399 * if we still need this new region after acquiring the lock. 400 */ 401 if (start > r->r_start) { 402 new = int_alloc_resource(M_WAITOK); 403 new->r_start = r->r_start; 404 new->r_end = start - 1; 405 new->r_rm = rm; 406 mtx_lock(rm->rm_mtx); 407 r->r_start = start; 408 s = TAILQ_PREV(r, resource_head, r_link); 409 if (s != NULL && !(s->r_flags & RF_ALLOCATED)) { 410 s->r_end = start - 1; 411 free(new, M_RMAN); 412 } else 413 TAILQ_INSERT_BEFORE(r, new, r_link); 414 mtx_unlock(rm->rm_mtx); 415 } 416 if (end < r->r_end) { 417 new = int_alloc_resource(M_WAITOK); 418 new->r_start = end + 1; 419 new->r_end = r->r_end; 420 new->r_rm = rm; 421 mtx_lock(rm->rm_mtx); 422 r->r_end = end; 423 t = TAILQ_NEXT(r, r_link); 424 if (t != NULL && !(t->r_flags & RF_ALLOCATED)) { 425 t->r_start = end + 1; 426 free(new, M_RMAN); 427 } else 428 TAILQ_INSERT_AFTER(&rm->rm_list, r, new, r_link); 429 mtx_unlock(rm->rm_mtx); 430 } 431 return (0); 432 } 433 434 #define SHARE_TYPE(f) (f & (RF_SHAREABLE | RF_PREFETCHABLE)) 435 436 struct resource * 437 rman_reserve_resource_bound(struct rman *rm, rman_res_t start, rman_res_t end, 438 rman_res_t count, rman_res_t bound, u_int flags, 439 struct device *dev) 440 { 441 u_int new_rflags; 442 struct resource_i *r, *s, *rv; 443 rman_res_t rstart, rend, amask, bmask; 444 445 rv = NULL; 446 447 DPRINTF(("rman_reserve_resource_bound: <%s> request: [%#jx, %#jx], " 448 "length %#jx, flags %x, device %s\n", rm->rm_descr, start, end, 449 count, flags, 450 dev == NULL ? "<null>" : device_get_nameunit(dev))); 451 KASSERT((flags & RF_FIRSTSHARE) == 0, 452 ("invalid flags %#x", flags)); 453 new_rflags = (flags & ~RF_FIRSTSHARE) | RF_ALLOCATED; 454 455 mtx_lock(rm->rm_mtx); 456 457 r = TAILQ_FIRST(&rm->rm_list); 458 if (r == NULL) { 459 DPRINTF(("NULL list head\n")); 460 } else { 461 DPRINTF(("rman_reserve_resource_bound: trying %#jx <%#jx,%#jx>\n", 462 r->r_end, start, count-1)); 463 } 464 for (r = TAILQ_FIRST(&rm->rm_list); 465 r && r->r_end < start + count - 1; 466 r = TAILQ_NEXT(r, r_link)) { 467 ; 468 DPRINTF(("rman_reserve_resource_bound: tried %#jx <%#jx,%#jx>\n", 469 r->r_end, start, count-1)); 470 } 471 472 if (r == NULL) { 473 DPRINTF(("could not find a region\n")); 474 goto out; 475 } 476 477 amask = (1ull << RF_ALIGNMENT(flags)) - 1; 478 KASSERT(start <= RM_MAX_END - amask, 479 ("start (%#jx) + amask (%#jx) would wrap around", start, amask)); 480 481 /* If bound is 0, bmask will also be 0 */ 482 bmask = ~(bound - 1); 483 /* 484 * First try to find an acceptable totally-unshared region. 485 */ 486 for (s = r; s; s = TAILQ_NEXT(s, r_link)) { 487 DPRINTF(("considering [%#jx, %#jx]\n", s->r_start, s->r_end)); 488 /* 489 * The resource list is sorted, so there is no point in 490 * searching further once r_start is too large. 491 */ 492 if (s->r_start > end - (count - 1)) { 493 DPRINTF(("s->r_start (%#jx) + count - 1> end (%#jx)\n", 494 s->r_start, end)); 495 break; 496 } 497 if (s->r_start > RM_MAX_END - amask) { 498 DPRINTF(("s->r_start (%#jx) + amask (%#jx) too large\n", 499 s->r_start, amask)); 500 break; 501 } 502 if (s->r_flags & RF_ALLOCATED) { 503 DPRINTF(("region is allocated\n")); 504 continue; 505 } 506 rstart = ummax(s->r_start, start); 507 /* 508 * Try to find a region by adjusting to boundary and alignment 509 * until both conditions are satisfied. This is not an optimal 510 * algorithm, but in most cases it isn't really bad, either. 511 */ 512 do { 513 rstart = (rstart + amask) & ~amask; 514 if (((rstart ^ (rstart + count - 1)) & bmask) != 0) 515 rstart += bound - (rstart & ~bmask); 516 } while ((rstart & amask) != 0 && rstart < end && 517 rstart < s->r_end); 518 rend = ummin(s->r_end, ummax(rstart + count - 1, end)); 519 if (rstart > rend) { 520 DPRINTF(("adjusted start exceeds end\n")); 521 continue; 522 } 523 DPRINTF(("truncated region: [%#jx, %#jx]; size %#jx (requested %#jx)\n", 524 rstart, rend, (rend - rstart + 1), count)); 525 526 if ((rend - rstart + 1) >= count) { 527 DPRINTF(("candidate region: [%#jx, %#jx], size %#jx\n", 528 rstart, rend, (rend - rstart + 1))); 529 if ((s->r_end - s->r_start + 1) == count) { 530 DPRINTF(("candidate region is entire chunk\n")); 531 rv = s; 532 rv->r_flags = new_rflags; 533 rv->r_dev = dev; 534 goto out; 535 } 536 537 /* 538 * If s->r_start < rstart and 539 * s->r_end > rstart + count - 1, then 540 * we need to split the region into three pieces 541 * (the middle one will get returned to the user). 542 * Otherwise, we are allocating at either the 543 * beginning or the end of s, so we only need to 544 * split it in two. The first case requires 545 * two new allocations; the second requires but one. 546 */ 547 rv = int_alloc_resource(M_NOWAIT); 548 if (rv == NULL) 549 goto out; 550 rv->r_start = rstart; 551 rv->r_end = rstart + count - 1; 552 rv->r_flags = new_rflags; 553 rv->r_dev = dev; 554 rv->r_rm = rm; 555 556 if (s->r_start < rv->r_start && s->r_end > rv->r_end) { 557 DPRINTF(("splitting region in three parts: " 558 "[%#jx, %#jx]; [%#jx, %#jx]; [%#jx, %#jx]\n", 559 s->r_start, rv->r_start - 1, 560 rv->r_start, rv->r_end, 561 rv->r_end + 1, s->r_end)); 562 /* 563 * We are allocating in the middle. 564 */ 565 r = int_alloc_resource(M_NOWAIT); 566 if (r == NULL) { 567 free(rv, M_RMAN); 568 rv = NULL; 569 goto out; 570 } 571 r->r_start = rv->r_end + 1; 572 r->r_end = s->r_end; 573 r->r_flags = s->r_flags; 574 r->r_rm = rm; 575 s->r_end = rv->r_start - 1; 576 TAILQ_INSERT_AFTER(&rm->rm_list, s, rv, 577 r_link); 578 TAILQ_INSERT_AFTER(&rm->rm_list, rv, r, 579 r_link); 580 } else if (s->r_start == rv->r_start) { 581 DPRINTF(("allocating from the beginning\n")); 582 /* 583 * We are allocating at the beginning. 584 */ 585 s->r_start = rv->r_end + 1; 586 TAILQ_INSERT_BEFORE(s, rv, r_link); 587 } else { 588 DPRINTF(("allocating at the end\n")); 589 /* 590 * We are allocating at the end. 591 */ 592 s->r_end = rv->r_start - 1; 593 TAILQ_INSERT_AFTER(&rm->rm_list, s, rv, 594 r_link); 595 } 596 goto out; 597 } 598 } 599 600 /* 601 * Now find an acceptable shared region, if the client's requirements 602 * allow sharing. By our implementation restriction, a candidate 603 * region must match exactly by both size and sharing type in order 604 * to be considered compatible with the client's request. (The 605 * former restriction could probably be lifted without too much 606 * additional work, but this does not seem warranted.) 607 */ 608 DPRINTF(("no unshared regions found\n")); 609 if ((flags & RF_SHAREABLE) == 0) 610 goto out; 611 612 for (s = r; s && s->r_end <= end; s = TAILQ_NEXT(s, r_link)) { 613 if (SHARE_TYPE(s->r_flags) == SHARE_TYPE(flags) && 614 s->r_start >= start && 615 (s->r_end - s->r_start + 1) == count && 616 (s->r_start & amask) == 0 && 617 ((s->r_start ^ s->r_end) & bmask) == 0) { 618 rv = int_alloc_resource(M_NOWAIT); 619 if (rv == NULL) 620 goto out; 621 rv->r_start = s->r_start; 622 rv->r_end = s->r_end; 623 rv->r_flags = new_rflags; 624 rv->r_dev = dev; 625 rv->r_rm = rm; 626 if (s->r_sharehead == NULL) { 627 s->r_sharehead = malloc(sizeof *s->r_sharehead, 628 M_RMAN, M_NOWAIT | M_ZERO); 629 if (s->r_sharehead == NULL) { 630 free(rv, M_RMAN); 631 rv = NULL; 632 goto out; 633 } 634 LIST_INIT(s->r_sharehead); 635 LIST_INSERT_HEAD(s->r_sharehead, s, 636 r_sharelink); 637 s->r_flags |= RF_FIRSTSHARE; 638 } 639 rv->r_sharehead = s->r_sharehead; 640 LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink); 641 goto out; 642 } 643 } 644 /* 645 * We couldn't find anything. 646 */ 647 648 out: 649 mtx_unlock(rm->rm_mtx); 650 return (rv == NULL ? NULL : &rv->r_r); 651 } 652 653 struct resource * 654 rman_reserve_resource(struct rman *rm, rman_res_t start, rman_res_t end, 655 rman_res_t count, u_int flags, struct device *dev) 656 { 657 658 return (rman_reserve_resource_bound(rm, start, end, count, 0, flags, 659 dev)); 660 } 661 662 int 663 rman_activate_resource(struct resource *re) 664 { 665 struct resource_i *r; 666 struct rman *rm; 667 668 r = re->__r_i; 669 rm = r->r_rm; 670 mtx_lock(rm->rm_mtx); 671 r->r_flags |= RF_ACTIVE; 672 mtx_unlock(rm->rm_mtx); 673 return 0; 674 } 675 676 int 677 rman_deactivate_resource(struct resource *r) 678 { 679 struct rman *rm; 680 681 rm = r->__r_i->r_rm; 682 mtx_lock(rm->rm_mtx); 683 r->__r_i->r_flags &= ~RF_ACTIVE; 684 mtx_unlock(rm->rm_mtx); 685 return 0; 686 } 687 688 static int 689 int_rman_release_resource(struct rman *rm, struct resource_i *r) 690 { 691 struct resource_i *s, *t; 692 693 if (r->r_flags & RF_ACTIVE) 694 r->r_flags &= ~RF_ACTIVE; 695 696 /* 697 * Check for a sharing list first. If there is one, then we don't 698 * have to think as hard. 699 */ 700 if (r->r_sharehead) { 701 /* 702 * If a sharing list exists, then we know there are at 703 * least two sharers. 704 * 705 * If we are in the main circleq, appoint someone else. 706 */ 707 LIST_REMOVE(r, r_sharelink); 708 s = LIST_FIRST(r->r_sharehead); 709 if (r->r_flags & RF_FIRSTSHARE) { 710 s->r_flags |= RF_FIRSTSHARE; 711 TAILQ_INSERT_BEFORE(r, s, r_link); 712 TAILQ_REMOVE(&rm->rm_list, r, r_link); 713 } 714 715 /* 716 * Make sure that the sharing list goes away completely 717 * if the resource is no longer being shared at all. 718 */ 719 if (LIST_NEXT(s, r_sharelink) == NULL) { 720 free(s->r_sharehead, M_RMAN); 721 s->r_sharehead = NULL; 722 s->r_flags &= ~RF_FIRSTSHARE; 723 } 724 goto out; 725 } 726 727 /* 728 * Look at the adjacent resources in the list and see if our 729 * segment can be merged with any of them. If either of the 730 * resources is allocated or is not exactly adjacent then they 731 * cannot be merged with our segment. 732 */ 733 s = TAILQ_PREV(r, resource_head, r_link); 734 if (s != NULL && ((s->r_flags & RF_ALLOCATED) != 0 || 735 s->r_end + 1 != r->r_start)) 736 s = NULL; 737 t = TAILQ_NEXT(r, r_link); 738 if (t != NULL && ((t->r_flags & RF_ALLOCATED) != 0 || 739 r->r_end + 1 != t->r_start)) 740 t = NULL; 741 742 if (s != NULL && t != NULL) { 743 /* 744 * Merge all three segments. 745 */ 746 s->r_end = t->r_end; 747 TAILQ_REMOVE(&rm->rm_list, r, r_link); 748 TAILQ_REMOVE(&rm->rm_list, t, r_link); 749 free(t, M_RMAN); 750 } else if (s != NULL) { 751 /* 752 * Merge previous segment with ours. 753 */ 754 s->r_end = r->r_end; 755 TAILQ_REMOVE(&rm->rm_list, r, r_link); 756 } else if (t != NULL) { 757 /* 758 * Merge next segment with ours. 759 */ 760 t->r_start = r->r_start; 761 TAILQ_REMOVE(&rm->rm_list, r, r_link); 762 } else { 763 /* 764 * At this point, we know there is nothing we 765 * can potentially merge with, because on each 766 * side, there is either nothing there or what is 767 * there is still allocated. In that case, we don't 768 * want to remove r from the list; we simply want to 769 * change it to an unallocated region and return 770 * without freeing anything. 771 */ 772 r->r_flags &= ~RF_ALLOCATED; 773 r->r_dev = NULL; 774 return 0; 775 } 776 777 out: 778 free(r, M_RMAN); 779 return 0; 780 } 781 782 int 783 rman_release_resource(struct resource *re) 784 { 785 int rv; 786 struct resource_i *r; 787 struct rman *rm; 788 789 r = re->__r_i; 790 rm = r->r_rm; 791 mtx_lock(rm->rm_mtx); 792 rv = int_rman_release_resource(rm, r); 793 mtx_unlock(rm->rm_mtx); 794 return (rv); 795 } 796 797 uint32_t 798 rman_make_alignment_flags(uint32_t size) 799 { 800 int i; 801 802 /* 803 * Find the hightest bit set, and add one if more than one bit 804 * set. We're effectively computing the ceil(log2(size)) here. 805 */ 806 for (i = 31; i > 0; i--) 807 if ((1 << i) & size) 808 break; 809 if (~(1 << i) & size) 810 i++; 811 812 return(RF_ALIGNMENT_LOG2(i)); 813 } 814 815 void 816 rman_set_start(struct resource *r, rman_res_t start) 817 { 818 819 r->__r_i->r_start = start; 820 } 821 822 rman_res_t 823 rman_get_start(struct resource *r) 824 { 825 826 return (r->__r_i->r_start); 827 } 828 829 void 830 rman_set_end(struct resource *r, rman_res_t end) 831 { 832 833 r->__r_i->r_end = end; 834 } 835 836 rman_res_t 837 rman_get_end(struct resource *r) 838 { 839 840 return (r->__r_i->r_end); 841 } 842 843 rman_res_t 844 rman_get_size(struct resource *r) 845 { 846 847 return (r->__r_i->r_end - r->__r_i->r_start + 1); 848 } 849 850 u_int 851 rman_get_flags(struct resource *r) 852 { 853 854 return (r->__r_i->r_flags); 855 } 856 857 void 858 rman_set_virtual(struct resource *r, void *v) 859 { 860 861 r->__r_i->r_virtual = v; 862 } 863 864 void * 865 rman_get_virtual(struct resource *r) 866 { 867 868 return (r->__r_i->r_virtual); 869 } 870 871 void 872 rman_set_bustag(struct resource *r, bus_space_tag_t t) 873 { 874 875 r->r_bustag = t; 876 } 877 878 bus_space_tag_t 879 rman_get_bustag(struct resource *r) 880 { 881 882 return (r->r_bustag); 883 } 884 885 void 886 rman_set_bushandle(struct resource *r, bus_space_handle_t h) 887 { 888 889 r->r_bushandle = h; 890 } 891 892 bus_space_handle_t 893 rman_get_bushandle(struct resource *r) 894 { 895 896 return (r->r_bushandle); 897 } 898 899 void 900 rman_set_rid(struct resource *r, int rid) 901 { 902 903 r->__r_i->r_rid = rid; 904 } 905 906 int 907 rman_get_rid(struct resource *r) 908 { 909 910 return (r->__r_i->r_rid); 911 } 912 913 void 914 rman_set_device(struct resource *r, struct device *dev) 915 { 916 917 r->__r_i->r_dev = dev; 918 } 919 920 struct device * 921 rman_get_device(struct resource *r) 922 { 923 924 return (r->__r_i->r_dev); 925 } 926 927 int 928 rman_is_region_manager(struct resource *r, struct rman *rm) 929 { 930 931 return (r->__r_i->r_rm == rm); 932 } 933 934 /* 935 * Sysctl interface for scanning the resource lists. 936 * 937 * We take two input parameters; the index into the list of resource 938 * managers, and the resource offset into the list. 939 */ 940 static int 941 sysctl_rman(SYSCTL_HANDLER_ARGS) 942 { 943 int *name = (int *)arg1; 944 u_int namelen = arg2; 945 int rman_idx, res_idx; 946 struct rman *rm; 947 struct resource_i *res; 948 struct resource_i *sres; 949 struct u_rman urm; 950 struct u_resource ures; 951 int error; 952 953 if (namelen != 3) 954 return (EINVAL); 955 956 if (bus_data_generation_check(name[0])) 957 return (EINVAL); 958 rman_idx = name[1]; 959 res_idx = name[2]; 960 961 /* 962 * Find the indexed resource manager 963 */ 964 mtx_lock(&rman_mtx); 965 TAILQ_FOREACH(rm, &rman_head, rm_link) { 966 if (rman_idx-- == 0) 967 break; 968 } 969 mtx_unlock(&rman_mtx); 970 if (rm == NULL) 971 return (ENOENT); 972 973 /* 974 * If the resource index is -1, we want details on the 975 * resource manager. 976 */ 977 if (res_idx == -1) { 978 bzero(&urm, sizeof(urm)); 979 urm.rm_handle = (uintptr_t)rm; 980 if (rm->rm_descr != NULL) 981 strlcpy(urm.rm_descr, rm->rm_descr, RM_TEXTLEN); 982 urm.rm_start = rm->rm_start; 983 urm.rm_size = rm->rm_end - rm->rm_start + 1; 984 urm.rm_type = rm->rm_type; 985 986 error = SYSCTL_OUT(req, &urm, sizeof(urm)); 987 return (error); 988 } 989 990 /* 991 * Find the indexed resource and return it. 992 */ 993 mtx_lock(rm->rm_mtx); 994 TAILQ_FOREACH(res, &rm->rm_list, r_link) { 995 if (res->r_sharehead != NULL) { 996 LIST_FOREACH(sres, res->r_sharehead, r_sharelink) 997 if (res_idx-- == 0) { 998 res = sres; 999 goto found; 1000 } 1001 } 1002 else if (res_idx-- == 0) 1003 goto found; 1004 } 1005 mtx_unlock(rm->rm_mtx); 1006 return (ENOENT); 1007 1008 found: 1009 bzero(&ures, sizeof(ures)); 1010 ures.r_handle = (uintptr_t)res; 1011 ures.r_parent = (uintptr_t)res->r_rm; 1012 ures.r_device = (uintptr_t)res->r_dev; 1013 if (res->r_dev != NULL) { 1014 if (device_get_name(res->r_dev) != NULL) { 1015 snprintf(ures.r_devname, RM_TEXTLEN, 1016 "%s%d", 1017 device_get_name(res->r_dev), 1018 device_get_unit(res->r_dev)); 1019 } else { 1020 strlcpy(ures.r_devname, "nomatch", 1021 RM_TEXTLEN); 1022 } 1023 } else { 1024 ures.r_devname[0] = '\0'; 1025 } 1026 ures.r_start = res->r_start; 1027 ures.r_size = res->r_end - res->r_start + 1; 1028 ures.r_flags = res->r_flags; 1029 1030 mtx_unlock(rm->rm_mtx); 1031 error = SYSCTL_OUT(req, &ures, sizeof(ures)); 1032 return (error); 1033 } 1034 1035 static SYSCTL_NODE(_hw_bus, OID_AUTO, rman, CTLFLAG_RD, sysctl_rman, 1036 "kernel resource manager"); 1037 1038 #ifdef DDB 1039 static void 1040 dump_rman_header(struct rman *rm) 1041 { 1042 1043 if (db_pager_quit) 1044 return; 1045 db_printf("rman %p: %s (0x%jx-0x%jx full range)\n", 1046 rm, rm->rm_descr, (rman_res_t)rm->rm_start, (rman_res_t)rm->rm_end); 1047 } 1048 1049 static void 1050 dump_rman(struct rman *rm) 1051 { 1052 struct resource_i *r; 1053 const char *devname; 1054 1055 if (db_pager_quit) 1056 return; 1057 TAILQ_FOREACH(r, &rm->rm_list, r_link) { 1058 if (r->r_dev != NULL) { 1059 devname = device_get_nameunit(r->r_dev); 1060 if (devname == NULL) 1061 devname = "nomatch"; 1062 } else 1063 devname = NULL; 1064 db_printf(" 0x%jx-0x%jx (RID=%d) ", 1065 r->r_start, r->r_end, r->r_rid); 1066 if (devname != NULL) 1067 db_printf("(%s)\n", devname); 1068 else 1069 db_printf("----\n"); 1070 if (db_pager_quit) 1071 return; 1072 } 1073 } 1074 1075 DB_SHOW_COMMAND(rman, db_show_rman) 1076 { 1077 1078 if (have_addr) { 1079 dump_rman_header((struct rman *)addr); 1080 dump_rman((struct rman *)addr); 1081 } 1082 } 1083 1084 DB_SHOW_COMMAND(rmans, db_show_rmans) 1085 { 1086 struct rman *rm; 1087 1088 TAILQ_FOREACH(rm, &rman_head, rm_link) { 1089 dump_rman_header(rm); 1090 } 1091 } 1092 1093 DB_SHOW_ALL_COMMAND(rman, db_show_all_rman) 1094 { 1095 struct rman *rm; 1096 1097 TAILQ_FOREACH(rm, &rman_head, rm_link) { 1098 dump_rman_header(rm); 1099 dump_rman(rm); 1100 } 1101 } 1102 DB_SHOW_ALIAS(allrman, db_show_all_rman); 1103 #endif 1104