1 /* 2 * Copyright 1998 Massachusetts Institute of Technology 3 * 4 * Permission to use, copy, modify, and distribute this software and 5 * its documentation for any purpose and without fee is hereby 6 * granted, provided that both the above copyright notice and this 7 * permission notice appear in all copies, that both the above 8 * copyright notice and this permission notice appear in all 9 * supporting documentation, and that the name of M.I.T. not be used 10 * in advertising or publicity pertaining to distribution of the 11 * software without specific, written prior permission. M.I.T. makes 12 * no representations about the suitability of this software for any 13 * purpose. It is provided "as is" without express or implied 14 * warranty. 15 * 16 * THIS SOFTWARE IS PROVIDED BY M.I.T. ``AS IS''. M.I.T. DISCLAIMS 17 * ALL EXPRESS OR IMPLIED WARRANTIES WITH REGARD TO THIS SOFTWARE, 18 * INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF 19 * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE. IN NO EVENT 20 * SHALL M.I.T. BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 21 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 22 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF 23 * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND 24 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, 25 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 26 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * $FreeBSD$ 30 */ 31 32 /* 33 * The kernel resource manager. This code is responsible for keeping track 34 * of hardware resources which are apportioned out to various drivers. 35 * It does not actually assign those resources, and it is not expected 36 * that end-device drivers will call into this code directly. Rather, 37 * the code which implements the buses that those devices are attached to, 38 * and the code which manages CPU resources, will call this code, and the 39 * end-device drivers will make upcalls to that code to actually perform 40 * the allocation. 41 * 42 * There are two sorts of resources managed by this code. The first is 43 * the more familiar array (RMAN_ARRAY) type; resources in this class 44 * consist of a sequence of individually-allocatable objects which have 45 * been numbered in some well-defined order. Most of the resources 46 * are of this type, as it is the most familiar. The second type is 47 * called a gauge (RMAN_GAUGE), and models fungible resources (i.e., 48 * resources in which each instance is indistinguishable from every 49 * other instance). The principal anticipated application of gauges 50 * is in the context of power consumption, where a bus may have a specific 51 * power budget which all attached devices share. RMAN_GAUGE is not 52 * implemented yet. 53 * 54 * For array resources, we make one simplifying assumption: two clients 55 * sharing the same resource must use the same range of indices. That 56 * is to say, sharing of overlapping-but-not-identical regions is not 57 * permitted. 58 */ 59 60 #include <sys/param.h> 61 #include <sys/systm.h> 62 #include <sys/kernel.h> 63 #include <sys/lock.h> 64 #include <sys/malloc.h> 65 #include <sys/bus.h> /* XXX debugging */ 66 #include <machine/bus.h> 67 #include <sys/rman.h> 68 69 static MALLOC_DEFINE(M_RMAN, "rman", "Resource manager"); 70 71 struct rman_head rman_head; 72 #ifndef NULL_SIMPLELOCKS 73 static struct simplelock rman_lock; /* mutex to protect rman_head */ 74 #endif 75 static int int_rman_activate_resource(struct rman *rm, struct resource *r, 76 struct resource **whohas); 77 static int int_rman_deactivate_resource(struct resource *r); 78 static int int_rman_release_resource(struct rman *rm, struct resource *r); 79 80 #define CIRCLEQ_TERMCOND(var, head) (var == (void *)&(head)) 81 82 int 83 rman_init(struct rman *rm) 84 { 85 static int once; 86 87 if (once == 0) { 88 once = 1; 89 TAILQ_INIT(&rman_head); 90 simple_lock_init(&rman_lock); 91 } 92 93 if (rm->rm_type == RMAN_UNINIT) 94 panic("rman_init"); 95 if (rm->rm_type == RMAN_GAUGE) 96 panic("implement RMAN_GAUGE"); 97 98 CIRCLEQ_INIT(&rm->rm_list); 99 rm->rm_slock = malloc(sizeof *rm->rm_slock, M_RMAN, M_NOWAIT); 100 if (rm->rm_slock == 0) 101 return ENOMEM; 102 simple_lock_init(rm->rm_slock); 103 104 simple_lock(&rman_lock); 105 TAILQ_INSERT_TAIL(&rman_head, rm, rm_link); 106 simple_unlock(&rman_lock); 107 return 0; 108 } 109 110 /* 111 * NB: this interface is not robust against programming errors which 112 * add multiple copies of the same region. 113 */ 114 int 115 rman_manage_region(struct rman *rm, u_long start, u_long end) 116 { 117 struct resource *r, *s; 118 119 r = malloc(sizeof *r, M_RMAN, M_NOWAIT); 120 if (r == 0) 121 return ENOMEM; 122 bzero(r, sizeof *r); 123 r->r_sharehead = 0; 124 r->r_start = start; 125 r->r_end = end; 126 r->r_flags = 0; 127 r->r_dev = 0; 128 r->r_rm = rm; 129 130 simple_lock(rm->rm_slock); 131 for (s = rm->rm_list.cqh_first; 132 !CIRCLEQ_TERMCOND(s, rm->rm_list) && s->r_end < r->r_start; 133 s = s->r_link.cqe_next) 134 ; 135 136 if (CIRCLEQ_TERMCOND(s, rm->rm_list)) { 137 CIRCLEQ_INSERT_TAIL(&rm->rm_list, r, r_link); 138 } else { 139 CIRCLEQ_INSERT_BEFORE(&rm->rm_list, s, r, r_link); 140 } 141 142 simple_unlock(rm->rm_slock); 143 return 0; 144 } 145 146 int 147 rman_fini(struct rman *rm) 148 { 149 struct resource *r; 150 151 simple_lock(rm->rm_slock); 152 for (r = rm->rm_list.cqh_first; !CIRCLEQ_TERMCOND(r, rm->rm_list); 153 r = r->r_link.cqe_next) { 154 if (r->r_flags & RF_ALLOCATED) { 155 simple_unlock(rm->rm_slock); 156 return EBUSY; 157 } 158 } 159 160 /* 161 * There really should only be one of these if we are in this 162 * state and the code is working properly, but it can't hurt. 163 */ 164 for (r = rm->rm_list.cqh_first; !CIRCLEQ_TERMCOND(r, rm->rm_list); 165 r = rm->rm_list.cqh_first) { 166 CIRCLEQ_REMOVE(&rm->rm_list, r, r_link); 167 free(r, M_RMAN); 168 } 169 simple_unlock(rm->rm_slock); 170 simple_lock(&rman_lock); 171 TAILQ_REMOVE(&rman_head, rm, rm_link); 172 simple_unlock(&rman_lock); 173 free(rm->rm_slock, M_RMAN); 174 175 return 0; 176 } 177 178 struct resource * 179 rman_reserve_resource(struct rman *rm, u_long start, u_long end, u_long count, 180 u_int flags, struct device *dev) 181 { 182 u_int want_activate; 183 struct resource *r, *s, *rv; 184 u_long rstart, rend; 185 186 rv = 0; 187 188 #ifdef RMAN_DEBUG 189 printf("rman_reserve_resource: <%s> request: [%#lx, %#lx], length " 190 "%#lx, flags %u, device %s%d\n", rm->rm_descr, start, end, 191 count, flags, device_get_name(dev), device_get_unit(dev)); 192 #endif /* RMAN_DEBUG */ 193 want_activate = (flags & RF_ACTIVE); 194 flags &= ~RF_ACTIVE; 195 196 simple_lock(rm->rm_slock); 197 198 for (r = rm->rm_list.cqh_first; 199 !CIRCLEQ_TERMCOND(r, rm->rm_list) && r->r_end < start; 200 r = r->r_link.cqe_next) 201 ; 202 203 if (CIRCLEQ_TERMCOND(r, rm->rm_list)) { 204 #ifdef RMAN_DEBUG 205 printf("could not find a region\n"); 206 #endif RMAN_DEBUG 207 goto out; 208 } 209 210 /* 211 * First try to find an acceptable totally-unshared region. 212 */ 213 for (s = r; !CIRCLEQ_TERMCOND(s, rm->rm_list); 214 s = s->r_link.cqe_next) { 215 #ifdef RMAN_DEBUG 216 printf("considering [%#lx, %#lx]\n", s->r_start, s->r_end); 217 #endif /* RMAN_DEBUG */ 218 if (s->r_start > end) { 219 #ifdef RMAN_DEBUG 220 printf("s->r_start (%#lx) > end (%#lx)\n", s->r_start, end); 221 #endif /* RMAN_DEBUG */ 222 break; 223 } 224 if (s->r_flags & RF_ALLOCATED) { 225 #ifdef RMAN_DEBUG 226 printf("region is allocated\n"); 227 #endif /* RMAN_DEBUG */ 228 continue; 229 } 230 rstart = max(s->r_start, start); 231 rend = min(s->r_end, max(start + count, end)); 232 #ifdef RMAN_DEBUG 233 printf("truncated region: [%#lx, %#lx]; size %#lx (requested %#lx)\n", 234 rstart, rend, (rend - rstart + 1), count); 235 #endif /* RMAN_DEBUG */ 236 237 if ((rend - rstart + 1) >= count) { 238 #ifdef RMAN_DEBUG 239 printf("candidate region: [%#lx, %#lx], size %#lx\n", 240 rend, rstart, (rend - rstart + 1)); 241 #endif /* RMAN_DEBUG */ 242 if ((s->r_end - s->r_start + 1) == count) { 243 #ifdef RMAN_DEBUG 244 printf("candidate region is entire chunk\n"); 245 #endif /* RMAN_DEBUG */ 246 rv = s; 247 rv->r_flags |= RF_ALLOCATED | flags; 248 rv->r_dev = dev; 249 goto out; 250 } 251 252 /* 253 * If s->r_start < rstart and 254 * s->r_end > rstart + count - 1, then 255 * we need to split the region into three pieces 256 * (the middle one will get returned to the user). 257 * Otherwise, we are allocating at either the 258 * beginning or the end of s, so we only need to 259 * split it in two. The first case requires 260 * two new allocations; the second requires but one. 261 */ 262 rv = malloc(sizeof *rv, M_RMAN, M_NOWAIT); 263 if (rv == 0) 264 goto out; 265 bzero(rv, sizeof *rv); 266 rv->r_start = rstart; 267 rv->r_end = rstart + count - 1; 268 rv->r_flags = flags | RF_ALLOCATED; 269 rv->r_dev = dev; 270 rv->r_sharehead = 0; 271 rv->r_rm = rm; 272 273 if (s->r_start < rv->r_start && s->r_end > rv->r_end) { 274 #ifdef RMAN_DEBUG 275 printf("splitting region in three parts: " 276 "[%#lx, %#lx]; [%#lx, %#lx]; [%#lx, %#lx]\n", 277 s->r_start, rv->r_start - 1, 278 rv->r_start, rv->r_end, 279 rv->r_end + 1, s->r_end); 280 #endif /* RMAN_DEBUG */ 281 /* 282 * We are allocating in the middle. 283 */ 284 r = malloc(sizeof *r, M_RMAN, M_NOWAIT); 285 if (r == 0) { 286 free(rv, M_RMAN); 287 rv = 0; 288 goto out; 289 } 290 bzero(r, sizeof *r); 291 r->r_start = rv->r_end + 1; 292 r->r_end = s->r_end; 293 r->r_flags = s->r_flags; 294 r->r_dev = 0; 295 r->r_sharehead = 0; 296 r->r_rm = rm; 297 s->r_end = rv->r_start - 1; 298 CIRCLEQ_INSERT_AFTER(&rm->rm_list, s, rv, 299 r_link); 300 CIRCLEQ_INSERT_AFTER(&rm->rm_list, rv, r, 301 r_link); 302 } else if (s->r_start == rv->r_start) { 303 #ifdef RMAN_DEBUG 304 printf("allocating from the beginning\n"); 305 #endif /* RMAN_DEBUG */ 306 /* 307 * We are allocating at the beginning. 308 */ 309 s->r_start = rv->r_end + 1; 310 CIRCLEQ_INSERT_BEFORE(&rm->rm_list, s, rv, 311 r_link); 312 } else { 313 #ifdef RMAN_DEBUG 314 printf("allocating at the end\n"); 315 #endif /* RMAN_DEBUG */ 316 /* 317 * We are allocating at the end. 318 */ 319 s->r_end = rv->r_start - 1; 320 CIRCLEQ_INSERT_AFTER(&rm->rm_list, s, rv, 321 r_link); 322 } 323 goto out; 324 } 325 } 326 327 /* 328 * Now find an acceptable shared region, if the client's requirements 329 * allow sharing. By our implementation restriction, a candidate 330 * region must match exactly by both size and sharing type in order 331 * to be considered compatible with the client's request. (The 332 * former restriction could probably be lifted without too much 333 * additional work, but this does not seem warranted.) 334 */ 335 #ifdef RMAN_DEBUG 336 printf("no unshared regions found\n"); 337 #endif /* RMAN_DEBUG */ 338 if ((flags & (RF_SHAREABLE | RF_TIMESHARE)) == 0) 339 goto out; 340 341 for (s = r; !CIRCLEQ_TERMCOND(s, rm->rm_list); 342 s = s->r_link.cqe_next) { 343 if (s->r_start > end) 344 break; 345 if ((s->r_flags & flags) != flags) 346 continue; 347 rstart = max(s->r_start, start); 348 rend = min(s->r_end, max(start + count, end)); 349 if (s->r_start >= start && s->r_end <= end 350 && (s->r_end - s->r_start + 1) == count) { 351 rv = malloc(sizeof *rv, M_RMAN, M_NOWAIT); 352 if (rv == 0) 353 goto out; 354 bzero(rv, sizeof *rv); 355 rv->r_start = s->r_start; 356 rv->r_end = s->r_end; 357 rv->r_flags = s->r_flags & 358 (RF_ALLOCATED | RF_SHAREABLE | RF_TIMESHARE); 359 rv->r_dev = dev; 360 rv->r_rm = rm; 361 if (s->r_sharehead == 0) { 362 s->r_sharehead = malloc(sizeof *s->r_sharehead, 363 M_RMAN, M_NOWAIT); 364 if (s->r_sharehead == 0) { 365 free(rv, M_RMAN); 366 rv = 0; 367 goto out; 368 } 369 bzero(s->r_sharehead, sizeof *s->r_sharehead); 370 LIST_INIT(s->r_sharehead); 371 LIST_INSERT_HEAD(s->r_sharehead, s, 372 r_sharelink); 373 s->r_flags |= RF_FIRSTSHARE; 374 } 375 rv->r_sharehead = s->r_sharehead; 376 LIST_INSERT_HEAD(s->r_sharehead, rv, r_sharelink); 377 goto out; 378 } 379 } 380 381 /* 382 * We couldn't find anything. 383 */ 384 out: 385 /* 386 * If the user specified RF_ACTIVE in the initial flags, 387 * which is reflected in `want_activate', we attempt to atomically 388 * activate the resource. If this fails, we release the resource 389 * and indicate overall failure. (This behavior probably doesn't 390 * make sense for RF_TIMESHARE-type resources.) 391 */ 392 if (rv && want_activate) { 393 struct resource *whohas; 394 if (int_rman_activate_resource(rm, rv, &whohas)) { 395 int_rman_release_resource(rm, rv); 396 rv = 0; 397 } 398 } 399 400 simple_unlock(rm->rm_slock); 401 return (rv); 402 } 403 404 static int 405 int_rman_activate_resource(struct rman *rm, struct resource *r, 406 struct resource **whohas) 407 { 408 struct resource *s; 409 int ok; 410 411 /* 412 * If we are not timesharing, then there is nothing much to do. 413 * If we already have the resource, then there is nothing at all to do. 414 * If we are not on a sharing list with anybody else, then there is 415 * little to do. 416 */ 417 if ((r->r_flags & RF_TIMESHARE) == 0 418 || (r->r_flags & RF_ACTIVE) != 0 419 || r->r_sharehead == 0) { 420 r->r_flags |= RF_ACTIVE; 421 return 0; 422 } 423 424 ok = 1; 425 for (s = r->r_sharehead->lh_first; s && ok; 426 s = s->r_sharelink.le_next) { 427 if ((s->r_flags & RF_ACTIVE) != 0) { 428 ok = 0; 429 *whohas = s; 430 } 431 } 432 if (ok) { 433 r->r_flags |= RF_ACTIVE; 434 return 0; 435 } 436 return EBUSY; 437 } 438 439 int 440 rman_activate_resource(struct resource *r) 441 { 442 int rv; 443 struct resource *whohas; 444 struct rman *rm; 445 446 rm = r->r_rm; 447 simple_lock(rm->rm_slock); 448 rv = int_rman_activate_resource(rm, r, &whohas); 449 simple_unlock(rm->rm_slock); 450 return rv; 451 } 452 453 int 454 rman_await_resource(struct resource *r, int pri, int timo) 455 { 456 int rv, s; 457 struct resource *whohas; 458 struct rman *rm; 459 460 rm = r->r_rm; 461 for (;;) { 462 simple_lock(rm->rm_slock); 463 rv = int_rman_activate_resource(rm, r, &whohas); 464 if (rv != EBUSY) 465 return (rv); /* returns with simplelock */ 466 467 if (r->r_sharehead == 0) 468 panic("rman_await_resource"); 469 /* 470 * splhigh hopefully will prevent a race between 471 * simple_unlock and tsleep where a process 472 * could conceivably get in and release the resource 473 * before we have a chance to sleep on it. 474 */ 475 s = splhigh(); 476 whohas->r_flags |= RF_WANTED; 477 simple_unlock(rm->rm_slock); 478 rv = tsleep(r->r_sharehead, pri, "rmwait", timo); 479 if (rv) { 480 splx(s); 481 return rv; 482 } 483 simple_lock(rm->rm_slock); 484 splx(s); 485 } 486 } 487 488 static int 489 int_rman_deactivate_resource(struct resource *r) 490 { 491 struct rman *rm; 492 493 rm = r->r_rm; 494 r->r_flags &= ~RF_ACTIVE; 495 if (r->r_flags & RF_WANTED) { 496 r->r_flags &= ~RF_WANTED; 497 wakeup(r->r_sharehead); 498 } 499 return 0; 500 } 501 502 int 503 rman_deactivate_resource(struct resource *r) 504 { 505 struct rman *rm; 506 507 rm = r->r_rm; 508 simple_lock(rm->rm_slock); 509 int_rman_deactivate_resource(r); 510 simple_unlock(rm->rm_slock); 511 return 0; 512 } 513 514 static int 515 int_rman_release_resource(struct rman *rm, struct resource *r) 516 { 517 struct resource *s, *t; 518 519 if (r->r_flags & RF_ACTIVE) 520 int_rman_deactivate_resource(r); 521 522 /* 523 * Check for a sharing list first. If there is one, then we don't 524 * have to think as hard. 525 */ 526 if (r->r_sharehead) { 527 /* 528 * If a sharing list exists, then we know there are at 529 * least two sharers. 530 * 531 * If we are in the main circleq, appoint someone else. 532 */ 533 LIST_REMOVE(r, r_sharelink); 534 s = r->r_sharehead->lh_first; 535 if (r->r_flags & RF_FIRSTSHARE) { 536 s->r_flags |= RF_FIRSTSHARE; 537 CIRCLEQ_INSERT_BEFORE(&rm->rm_list, r, s, r_link); 538 CIRCLEQ_REMOVE(&rm->rm_list, r, r_link); 539 } 540 541 /* 542 * Make sure that the sharing list goes away completely 543 * if the resource is no longer being shared at all. 544 */ 545 if (s->r_sharelink.le_next == 0) { 546 free(s->r_sharehead, M_RMAN); 547 s->r_sharehead = 0; 548 s->r_flags &= ~RF_FIRSTSHARE; 549 } 550 goto out; 551 } 552 553 /* 554 * Look at the adjacent resources in the list and see if our 555 * segment can be merged with any of them. 556 */ 557 s = r->r_link.cqe_prev; 558 t = r->r_link.cqe_next; 559 560 if (s != (void *)&rm->rm_list && (s->r_flags & RF_ALLOCATED) == 0 561 && t != (void *)&rm->rm_list && (t->r_flags & RF_ALLOCATED) == 0) { 562 /* 563 * Merge all three segments. 564 */ 565 s->r_end = t->r_end; 566 CIRCLEQ_REMOVE(&rm->rm_list, r, r_link); 567 CIRCLEQ_REMOVE(&rm->rm_list, t, r_link); 568 free(t, M_RMAN); 569 } else if (s != (void *)&rm->rm_list 570 && (s->r_flags & RF_ALLOCATED) == 0) { 571 /* 572 * Merge previous segment with ours. 573 */ 574 s->r_end = r->r_end; 575 CIRCLEQ_REMOVE(&rm->rm_list, r, r_link); 576 } else if (t != (void *)&rm->rm_list 577 && (t->r_flags & RF_ALLOCATED) == 0) { 578 /* 579 * Merge next segment with ours. 580 */ 581 t->r_start = r->r_start; 582 CIRCLEQ_REMOVE(&rm->rm_list, r, r_link); 583 } else { 584 /* 585 * At this point, we know there is nothing we 586 * can potentially merge with, because on each 587 * side, there is either nothing there or what is 588 * there is still allocated. In that case, we don't 589 * want to remove r from the list; we simply want to 590 * change it to an unallocated region and return 591 * without freeing anything. 592 */ 593 r->r_flags &= ~RF_ALLOCATED; 594 return 0; 595 } 596 597 out: 598 free(r, M_RMAN); 599 return 0; 600 } 601 602 int 603 rman_release_resource(struct resource *r) 604 { 605 int rv; 606 struct rman *rm = r->r_rm; 607 608 simple_lock(rm->rm_slock); 609 rv = int_rman_release_resource(rm, r); 610 simple_unlock(rm->rm_slock); 611 return (rv); 612 } 613