1 /*- 2 * Copyright (c) 2004 Poul-Henning Kamp 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 */ 28 29 /* Unit number allocation functions. 30 * 31 * These functions implement a mixed run-length/bitmap management of unit 32 * number spaces. 33 * 34 * Allocation is always lowest free number first. 35 * 36 * Worst case memory usage (disregarding boundary effects in the low end) 37 * is two bits for each slot in the unit number space. (For a full 38 * [0 ... UINT_MAX] space that is still a lot of course.) 39 * 40 * The typical case, where no unit numbers are freed, is managed in a 41 * constant sized memory footprint of: 42 * sizeof(struct unrhdr) + 2 * sizeof (struct unr) == 56 bytes on i386 43 * 44 * The caller must provide locking. 45 * 46 * A userland test program is included. 47 * 48 */ 49 50 #include <sys/types.h> 51 #include <sys/queue.h> 52 #include <sys/bitstring.h> 53 54 #ifdef _KERNEL 55 56 #include <sys/param.h> 57 #include <sys/malloc.h> 58 #include <sys/kernel.h> 59 #include <sys/systm.h> 60 61 /* 62 * In theory it would be smarter to allocate the individual blocks 63 * with the zone allocator, but at this time the expectation is that 64 * there will typically not even be enough allocations to fill a single 65 * page, so we stick with malloc for now. 66 */ 67 static MALLOC_DEFINE(M_UNIT, "Unitno", "Unit number allocation"); 68 69 #define Malloc(foo) malloc(foo, M_UNIT, M_WAITOK | M_ZERO) 70 #define Free(foo) free(foo, M_UNIT) 71 72 #else /* ...USERLAND */ 73 74 #include <stdio.h> 75 #include <stdlib.h> 76 77 #define KASSERT(cond, arg) \ 78 do { \ 79 if (!(cond)) { \ 80 printf arg; \ 81 exit (1); \ 82 } \ 83 } while (0) 84 85 #define Malloc(foo) calloc(foo, 1) 86 #define Free(foo) free(foo) 87 88 #endif 89 90 /* 91 * This is our basic building block. 92 * 93 * It can be used in three different ways depending on the value of the ptr 94 * element: 95 * If ptr is NULL, it represents a run of free items. 96 * If ptr points to the unrhdr it represents a run of allocated items. 97 * Otherwise it points to an bitstring of allocated items. 98 * 99 * For runs the len field is the length of the run. 100 * For bitmaps the len field represents the number of allocated items. 101 * 102 * The bitmap is the same size as struct unr to optimize memory management. 103 */ 104 struct unr { 105 TAILQ_ENTRY(unr) list; 106 u_int len; 107 void *ptr; 108 }; 109 110 /* Number of bits in the bitmap */ 111 #define NBITS (sizeof(struct unr) * 8) 112 113 /* Header element for a unr number space. */ 114 115 struct unrhdr { 116 TAILQ_HEAD(unrhd,unr) head; 117 u_int low; /* Lowest item */ 118 u_int high; /* Highest item */ 119 u_int busy; /* Count of allocated items */ 120 u_int alloc; /* Count of memory allocations */ 121 }; 122 123 124 #if defined(DIAGNOSTIC) || !defined(_KERNEL) 125 /* 126 * Consistency check function. 127 * 128 * Checks the internal consistency as well as we can. 129 * 130 * Called at all boundaries of this API. 131 */ 132 static void 133 check_unrhdr(struct unrhdr *uh, int line) 134 { 135 struct unr *up; 136 u_int x, y, z, w; 137 138 y = 0; 139 z = 0; 140 TAILQ_FOREACH(up, &uh->head, list) { 141 z++; 142 if (up->ptr != uh && up->ptr != NULL) { 143 z++; 144 w = 0; 145 for (x = 0; x < NBITS; x++) 146 if (bit_test((bitstr_t *)up->ptr, x)) 147 w++; 148 KASSERT (w == up->len, 149 ("UNR inconsistency: bits %u found %u\n", 150 up->len, w)); 151 } 152 if (up->ptr != NULL) 153 y += up->len; 154 } 155 KASSERT (y == uh->busy, 156 ("UNR inconsistency: items %u found %u (line %d)\n", 157 uh->busy, y, line)); 158 KASSERT (z == uh->alloc, 159 ("UNR inconsistency: chunks %u found %u (line %d)\n", 160 uh->alloc, z, line)); 161 } 162 163 #else 164 165 static __inline void 166 check_unrhdr(struct unrhdr *uh, int line) 167 { 168 169 } 170 171 #endif 172 173 174 /* 175 * Userland memory management. Just use calloc and keep track of how 176 * many elements we have allocated for check_unrhdr(). 177 */ 178 179 static __inline void * 180 new_unr(struct unrhdr *uh) 181 { 182 uh->alloc++; 183 return (Malloc(sizeof (struct unr))); 184 185 } 186 187 static __inline void 188 delete_unr(struct unrhdr *uh, void *ptr) 189 { 190 uh->alloc--; 191 Free(ptr); 192 } 193 194 /* 195 * Allocate a new unrheader set. 196 * 197 * Highest and lowest valid values given as paramters. 198 */ 199 200 struct unrhdr * 201 new_unrhdr(u_int low, u_int high) 202 { 203 struct unrhdr *uh; 204 struct unr *up; 205 206 KASSERT(low <= high, 207 ("UNR: use error: new_unrhdr(%u, %u)", low, high)); 208 uh = Malloc(sizeof *uh); 209 TAILQ_INIT(&uh->head); 210 uh->low = low; 211 uh->high = high; 212 up = new_unr(uh); 213 up->len = 1 + (high - low); 214 up->ptr = NULL; 215 TAILQ_INSERT_HEAD(&uh->head, up, list); 216 check_unrhdr(uh, __LINE__); 217 return (uh); 218 } 219 220 void 221 delete_unrhdr(struct unrhdr *uh) 222 { 223 224 KASSERT(uh->busy == 0, ("unrhdr has %u allocations", uh->busy)); 225 226 /* We should have a single un only */ 227 delete_unr(uh, TAILQ_FIRST(&uh->head)); 228 KASSERT(uh->alloc == 0, ("UNR memory leak in delete_unrhdr")); 229 Free(uh); 230 } 231 232 /* 233 * See if a given unr should be collapsed with a neighbor 234 */ 235 static void 236 collapse_unr(struct unrhdr *uh, struct unr *up) 237 { 238 struct unr *upp; 239 240 upp = TAILQ_PREV(up, unrhd, list); 241 if (upp != NULL && up->ptr == upp->ptr) { 242 up->len += upp->len; 243 TAILQ_REMOVE(&uh->head, upp, list); 244 delete_unr(uh, upp); 245 } 246 upp = TAILQ_NEXT(up, list); 247 if (upp != NULL && up->ptr == upp->ptr) { 248 up->len += upp->len; 249 TAILQ_REMOVE(&uh->head, upp, list); 250 delete_unr(uh, upp); 251 } 252 } 253 254 /* 255 * Allocate a free unr. 256 */ 257 u_int 258 alloc_unr(struct unrhdr *uh) 259 { 260 struct unr *up, *upp; 261 u_int x; 262 int y; 263 264 check_unrhdr(uh, __LINE__); 265 x = uh->low; 266 /* 267 * We can always allocate from one of the first two unrs on the list. 268 * The first one is likely an allocated run, but the second has to 269 * be a free run or a bitmap. 270 */ 271 up = TAILQ_FIRST(&uh->head); 272 KASSERT(up != NULL, ("UNR empty list")); 273 if (up->ptr == uh) { 274 x += up->len; 275 up = TAILQ_NEXT(up, list); 276 } 277 KASSERT(up != NULL, ("UNR Ran out of numbers")); /* XXX */ 278 KASSERT(up->ptr != uh, ("UNR second element allocated")); 279 280 if (up->ptr != NULL) { 281 /* Bitmap unr */ 282 KASSERT(up->len < NBITS, ("UNR bitmap confusion")); 283 bit_ffc((bitstr_t *)up->ptr, NBITS, &y); 284 KASSERT(y != -1, ("UNR corruption: No clear bit in bitmap.")); 285 bit_set((bitstr_t *)up->ptr, y); 286 up->len++; 287 uh->busy++; 288 if (up->len == NBITS) { 289 /* The unr is all allocated, drop bitmap */ 290 delete_unr(uh, up->ptr); 291 up->ptr = uh; 292 collapse_unr(uh, up); 293 } 294 check_unrhdr(uh, __LINE__); 295 return (x + y); 296 } 297 298 if (up->len == 1) { 299 /* Run of one free item, grab it */ 300 up->ptr = uh; 301 uh->busy++; 302 collapse_unr(uh, up); 303 check_unrhdr(uh, __LINE__); 304 return (x); 305 } 306 307 /* 308 * Slice first item into an preceeding allocated run, even if we 309 * have to create it. Because allocation is always lowest free 310 * number first, we know the preceeding element (if any) to be 311 * an allocated run. 312 */ 313 upp = TAILQ_PREV(up, unrhd, list); 314 if (upp == NULL) { 315 upp = new_unr(uh); 316 upp->len = 0; 317 upp->ptr = uh; 318 TAILQ_INSERT_BEFORE(up, upp, list); 319 } 320 KASSERT(upp->ptr == uh, ("UNR list corruption")); 321 upp->len++; 322 up->len--; 323 uh->busy++; 324 check_unrhdr(uh, __LINE__); 325 return (x); 326 } 327 328 /* 329 * Free a unr. 330 * 331 * If we can save unrs by using a bitmap, do so. 332 */ 333 void 334 free_unr(struct unrhdr *uh, u_int item) 335 { 336 struct unr *up, *upp, *upn, *ul; 337 u_int x, l, xl, n, pl; 338 339 KASSERT(item >= uh->low && item <= uh->high, 340 ("UNR: free_unr(%u) out of range [%u...%u]", 341 item, uh->low, uh->high)); 342 check_unrhdr(uh, __LINE__); 343 item -= uh->low; 344 xl = x = 0; 345 /* Find the start of the potential bitmap */ 346 l = item - item % NBITS; 347 ul = 0; 348 TAILQ_FOREACH(up, &uh->head, list) { 349 350 /* Keep track of which unr we'll split if we do */ 351 if (x <= l) { 352 ul = up; 353 xl = x; 354 } 355 356 /* Handle bitmap items */ 357 if (up->ptr != NULL && up->ptr != uh) { 358 if (x + NBITS <= item) { /* not yet */ 359 x += NBITS; 360 continue; 361 } 362 KASSERT(bit_test((bitstr_t *)up->ptr, item - x) != 0, 363 ("UNR: Freeing free item %d (%d) (bitmap)\n", 364 item, item - x)); 365 bit_clear((bitstr_t *)up->ptr, item - x); 366 uh->busy--; 367 up->len--; 368 /* 369 * XXX: up->len == 1 could possibly be collapsed to 370 * XXX: neighboring runs. 371 */ 372 if (up->len > 0) 373 return; 374 /* We have freed all items in bitmap, drop it */ 375 delete_unr(uh, up->ptr); 376 up->ptr = NULL; 377 up->len = NBITS; 378 collapse_unr(uh, up); 379 check_unrhdr(uh, __LINE__); 380 return; 381 } 382 383 /* Run length unr's */ 384 385 if (x + up->len <= item) { /* not yet */ 386 x += up->len; 387 continue; 388 } 389 390 /* We now have our run length unr */ 391 KASSERT(up->ptr == uh, 392 ("UNR Freeing free item %d (run))\n", item)); 393 394 /* Just this one left, reap it */ 395 if (up->len == 1) { 396 up->ptr = NULL; 397 uh->busy--; 398 collapse_unr(uh, up); 399 check_unrhdr(uh, __LINE__); 400 return; 401 } 402 403 /* Check if we can shift the item to the previous run */ 404 upp = TAILQ_PREV(up, unrhd, list); 405 if (item == x && upp != NULL && upp->ptr == NULL) { 406 upp->len++; 407 up->len--; 408 uh->busy--; 409 check_unrhdr(uh, __LINE__); 410 return; 411 } 412 413 /* Check if we can shift the item to the next run */ 414 upn = TAILQ_NEXT(up, list); 415 if (item == x + up->len - 1 && 416 upn != NULL && upn->ptr == NULL) { 417 upn->len++; 418 up->len--; 419 uh->busy--; 420 check_unrhdr(uh, __LINE__); 421 return; 422 } 423 424 /* Split off the tail end, if any. */ 425 pl = up->len - (1 + (item - x)); 426 if (pl > 0) { 427 upp = new_unr(uh); 428 upp->ptr = uh; 429 upp->len = pl; 430 TAILQ_INSERT_AFTER(&uh->head, up, upp, list); 431 } 432 433 if (item == x) { 434 /* We are done splitting */ 435 up->len = 1; 436 up->ptr = NULL; 437 } else { 438 /* The freed item */ 439 upp = new_unr(uh); 440 upp->len = 1; 441 upp->ptr = NULL; 442 TAILQ_INSERT_AFTER(&uh->head, up, upp, list); 443 /* Adjust current unr */ 444 up->len = item - x; 445 } 446 447 uh->busy--; 448 check_unrhdr(uh, __LINE__); 449 450 /* Our ul marker element may have shifted one later */ 451 if (ul->len + xl <= l) { 452 xl += ul->len; 453 ul = TAILQ_NEXT(ul, list); 454 } 455 KASSERT(ul != NULL, ("UNR lost bitmap pointer")); 456 457 /* Count unrs entirely inside potential bitmap */ 458 n = 0; 459 pl = xl; 460 item = l + NBITS; 461 for (up = ul; 462 up != NULL && pl + up->len <= item; 463 up = TAILQ_NEXT(up, list)) { 464 if (pl >= l) 465 n++; 466 pl += up->len; 467 } 468 469 /* If less than three, a bitmap does not pay off */ 470 if (n < 3) 471 return; 472 473 /* Allocate bitmap */ 474 upp = new_unr(uh); 475 upp->ptr = new_unr(uh); 476 477 /* Insert bitmap after ul element */ 478 TAILQ_INSERT_AFTER(&uh->head, ul, upp, list); 479 480 /* Slice off the tail from the ul element */ 481 pl = ul->len - (l - xl); 482 if (ul->ptr != NULL) { 483 bit_nset(upp->ptr, 0, pl - 1); 484 upp->len = pl; 485 } 486 ul->len -= pl; 487 488 /* Ditch ul if it got reduced to zero size */ 489 if (ul->len == 0) { 490 TAILQ_REMOVE(&uh->head, ul, list); 491 delete_unr(uh, ul); 492 } 493 494 /* Soak up run length unrs until we have absorbed NBITS */ 495 while (pl != NBITS) { 496 497 /* Grab first one in line */ 498 upn = TAILQ_NEXT(upp, list); 499 500 /* We may not have a multiple of NBITS totally */ 501 if (upn == NULL) 502 break; 503 504 /* Run may extend past our new bitmap */ 505 n = NBITS - pl; 506 if (n > upn->len) 507 n = upn->len; 508 509 if (upn->ptr != NULL) { 510 bit_nset(upp->ptr, pl, pl + n - 1); 511 upp->len += n; 512 } 513 pl += n; 514 515 if (n != upn->len) { 516 /* We did not absorb the entire run */ 517 upn->len -= n; 518 break; 519 } 520 TAILQ_REMOVE(&uh->head, upn, list); 521 delete_unr(uh, upn); 522 } 523 check_unrhdr(uh, __LINE__); 524 return; 525 } 526 KASSERT(0 != 1, ("UNR: Fell off the end in free_unr()")); 527 } 528 529 #ifndef _KERNEL /* USERLAND test driver */ 530 531 /* 532 * Simple stochastic test driver for the above functions 533 */ 534 535 static void 536 print_unr(struct unrhdr *uh, struct unr *up) 537 { 538 u_int x; 539 540 printf(" %p len = %5u ", up, up->len); 541 if (up->ptr == NULL) 542 printf("free\n"); 543 else if (up->ptr == uh) 544 printf("alloc\n"); 545 else { 546 printf(" ["); 547 for (x = 0; x < NBITS; x++) { 548 if (bit_test((bitstr_t *)up->ptr, x)) 549 putchar('#'); 550 else 551 putchar(' '); 552 } 553 printf("]\n"); 554 } 555 } 556 557 static void 558 print_unrhdr(struct unrhdr *uh) 559 { 560 struct unr *up; 561 u_int x; 562 563 printf("%p low = %u high = %u busy %u\n", 564 uh, uh->low, uh->high, uh->busy); 565 x = uh->low; 566 TAILQ_FOREACH(up, &uh->head, list) { 567 printf(" from = %5u", x); 568 print_unr(uh, up); 569 if (up->ptr == NULL || up->ptr == uh) 570 x += up->len; 571 else 572 x += NBITS; 573 } 574 } 575 576 /* Number of unrs to test */ 577 #define NN 10000 578 579 int 580 main(int argc __unused, const char **argv __unused) 581 { 582 struct unrhdr *uh; 583 int i, x, m; 584 char a[NN]; 585 586 uh = new_unrhdr(0, NN - 1); 587 588 memset(a, 0, sizeof a); 589 590 fprintf(stderr, "sizeof(struct unr) %d\n", sizeof (struct unr)); 591 fprintf(stderr, "sizeof(struct unrhdr) %d\n", sizeof (struct unrhdr)); 592 x = 1; 593 for (m = 0; m < NN; m++) { 594 i = random() % NN; 595 if (a[i]) { 596 printf("F %u\n", i); 597 free_unr(uh, i); 598 a[i] = 0; 599 } else { 600 i = alloc_unr(uh); 601 a[i] = 1; 602 printf("A %u\n", i); 603 } 604 if (1) /* XXX: change this for detailed debug printout */ 605 print_unrhdr(uh); 606 check_unrhdr(uh, __LINE__); 607 } 608 for (i = 0; i < NN; i++) 609 if (a[i]) 610 free_unr(uh, i); 611 print_unrhdr(uh); 612 delete_unrhdr(uh); 613 return (0); 614 } 615 #endif 616