1 /*- 2 * Copyright (c) 2004 Poul-Henning Kamp 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 * 28 * Unit number allocation functions. 29 * 30 * These functions implement a mixed run-length/bitmap management of unit 31 * number spaces. 32 * 33 * Allocation is always lowest free number first. 34 * 35 * Worst case memory usage (disregarding boundary effects in the low end) 36 * is two bits for each slot in the unit number space. (For a full 37 * [0 ... UINT_MAX] space that is still a lot of course.) 38 * 39 * The typical case, where no unit numbers are freed, is managed in a 40 * constant sized memory footprint of: 41 * sizeof(struct unrhdr) + 2 * sizeof (struct unr) == 56 bytes on i386 42 * 43 * The caller must provide locking. 44 * 45 * A userland test program is included. 46 * 47 */ 48 49 #include <sys/types.h> 50 #include <sys/queue.h> 51 #include <sys/bitstring.h> 52 53 #ifdef _KERNEL 54 55 #include <sys/param.h> 56 #include <sys/malloc.h> 57 #include <sys/kernel.h> 58 #include <sys/systm.h> 59 60 /* 61 * In theory it would be smarter to allocate the individual blocks 62 * with the zone allocator, but at this time the expectation is that 63 * there will typically not even be enough allocations to fill a single 64 * page, so we stick with malloc for now. 65 */ 66 static MALLOC_DEFINE(M_UNIT, "Unitno", "Unit number allocation"); 67 68 #define Malloc(foo) malloc(foo, M_UNIT, M_WAITOK | M_ZERO) 69 #define Free(foo) free(foo, M_UNIT) 70 71 #else /* ...USERLAND */ 72 73 #include <stdio.h> 74 #include <stdlib.h> 75 76 #define KASSERT(cond, arg) \ 77 do { \ 78 if (!(cond)) { \ 79 printf arg; \ 80 exit (1); \ 81 } \ 82 } while (0) 83 84 #define Malloc(foo) calloc(foo, 1) 85 #define Free(foo) free(foo) 86 87 #endif 88 89 /* 90 * This is our basic building block. 91 * 92 * It can be used in three different ways depending on the value of the ptr 93 * element: 94 * If ptr is NULL, it represents a run of free items. 95 * If ptr points to the unrhdr it represents a run of allocated items. 96 * Otherwise it points to an bitstring of allocated items. 97 * 98 * For runs the len field is the length of the run. 99 * For bitmaps the len field represents the number of allocated items. 100 * 101 * The bitmap is the same size as struct unr to optimize memory management. 102 */ 103 struct unr { 104 TAILQ_ENTRY(unr) list; 105 u_int len; 106 void *ptr; 107 }; 108 109 /* Number of bits in the bitmap */ 110 #define NBITS (sizeof(struct unr) * 8) 111 112 /* Header element for a unr number space. */ 113 114 struct unrhdr { 115 TAILQ_HEAD(unrhd,unr) head; 116 u_int low; /* Lowest item */ 117 u_int high; /* Highest item */ 118 u_int busy; /* Count of allocated items */ 119 u_int alloc; /* Count of memory allocations */ 120 }; 121 122 123 #if defined(DIAGNOSTIC) || !defined(_KERNEL) 124 /* 125 * Consistency check function. 126 * 127 * Checks the internal consistency as well as we can. 128 * 129 * Called at all boundaries of this API. 130 */ 131 static void 132 check_unrhdr(struct unrhdr *uh, int line) 133 { 134 struct unr *up; 135 u_int x, y, z, w; 136 137 y = 0; 138 z = 0; 139 TAILQ_FOREACH(up, &uh->head, list) { 140 z++; 141 if (up->ptr != uh && up->ptr != NULL) { 142 z++; 143 w = 0; 144 for (x = 0; x < NBITS; x++) 145 if (bit_test((bitstr_t *)up->ptr, x)) 146 w++; 147 KASSERT (w == up->len, 148 ("UNR inconsistency: bits %u found %u\n", 149 up->len, w)); 150 } 151 if (up->ptr != NULL) 152 y += up->len; 153 } 154 KASSERT (y == uh->busy, 155 ("UNR inconsistency: items %u found %u (line %d)\n", 156 uh->busy, y, line)); 157 KASSERT (z == uh->alloc, 158 ("UNR inconsistency: chunks %u found %u (line %d)\n", 159 uh->alloc, z, line)); 160 } 161 162 #else 163 164 static __inline void 165 check_unrhdr(struct unrhdr *uh, int line) 166 { 167 168 } 169 170 #endif 171 172 173 /* 174 * Userland memory management. Just use calloc and keep track of how 175 * many elements we have allocated for check_unrhdr(). 176 */ 177 178 static __inline void * 179 new_unr(struct unrhdr *uh) 180 { 181 uh->alloc++; 182 return (Malloc(sizeof (struct unr))); 183 184 } 185 186 static __inline void 187 delete_unr(struct unrhdr *uh, void *ptr) 188 { 189 uh->alloc--; 190 Free(ptr); 191 } 192 193 /* 194 * Allocate a new unrheader set. 195 * 196 * Highest and lowest valid values given as paramters. 197 */ 198 199 struct unrhdr * 200 new_unrhdr(u_int low, u_int high) 201 { 202 struct unrhdr *uh; 203 struct unr *up; 204 205 KASSERT(low <= high, 206 ("UNR: use error: new_unrhdr(%u, %u)", low, high)); 207 uh = Malloc(sizeof *uh); 208 TAILQ_INIT(&uh->head); 209 uh->low = low; 210 uh->high = high; 211 up = new_unr(uh); 212 up->len = 1 + (high - low); 213 up->ptr = NULL; 214 TAILQ_INSERT_HEAD(&uh->head, up, list); 215 check_unrhdr(uh, __LINE__); 216 return (uh); 217 } 218 219 void 220 delete_unrhdr(struct unrhdr *uh) 221 { 222 223 KASSERT(uh->busy == 0, ("unrhdr has %u allocations", uh->busy)); 224 225 /* We should have a single un only */ 226 delete_unr(uh, TAILQ_FIRST(&uh->head)); 227 KASSERT(uh->alloc == 0, ("UNR memory leak in delete_unrhdr")); 228 Free(uh); 229 } 230 231 /* 232 * See if a given unr should be collapsed with a neighbor 233 */ 234 static void 235 collapse_unr(struct unrhdr *uh, struct unr *up) 236 { 237 struct unr *upp; 238 239 upp = TAILQ_PREV(up, unrhd, list); 240 if (upp != NULL && up->ptr == upp->ptr) { 241 up->len += upp->len; 242 TAILQ_REMOVE(&uh->head, upp, list); 243 delete_unr(uh, upp); 244 } 245 upp = TAILQ_NEXT(up, list); 246 if (upp != NULL && up->ptr == upp->ptr) { 247 up->len += upp->len; 248 TAILQ_REMOVE(&uh->head, upp, list); 249 delete_unr(uh, upp); 250 } 251 } 252 253 /* 254 * Allocate a free unr. 255 */ 256 u_int 257 alloc_unr(struct unrhdr *uh) 258 { 259 struct unr *up, *upp; 260 u_int x; 261 int y; 262 263 check_unrhdr(uh, __LINE__); 264 x = uh->low; 265 /* 266 * We can always allocate from one of the first two unrs on the list. 267 * The first one is likely an allocated run, but the second has to 268 * be a free run or a bitmap. 269 */ 270 up = TAILQ_FIRST(&uh->head); 271 KASSERT(up != NULL, ("UNR empty list")); 272 if (up->ptr == uh) { 273 x += up->len; 274 up = TAILQ_NEXT(up, list); 275 } 276 KASSERT(up != NULL, ("UNR Ran out of numbers")); /* XXX */ 277 KASSERT(up->ptr != uh, ("UNR second element allocated")); 278 279 if (up->ptr != NULL) { 280 /* Bitmap unr */ 281 KASSERT(up->len < NBITS, ("UNR bitmap confusion")); 282 bit_ffc((bitstr_t *)up->ptr, NBITS, &y); 283 KASSERT(y != -1, ("UNR corruption: No clear bit in bitmap.")); 284 bit_set((bitstr_t *)up->ptr, y); 285 up->len++; 286 uh->busy++; 287 if (up->len == NBITS) { 288 /* The unr is all allocated, drop bitmap */ 289 delete_unr(uh, up->ptr); 290 up->ptr = uh; 291 collapse_unr(uh, up); 292 } 293 check_unrhdr(uh, __LINE__); 294 return (x + y); 295 } 296 297 if (up->len == 1) { 298 /* Run of one free item, grab it */ 299 up->ptr = uh; 300 uh->busy++; 301 collapse_unr(uh, up); 302 check_unrhdr(uh, __LINE__); 303 return (x); 304 } 305 306 /* 307 * Slice first item into an preceeding allocated run, even if we 308 * have to create it. Because allocation is always lowest free 309 * number first, we know the preceeding element (if any) to be 310 * an allocated run. 311 */ 312 upp = TAILQ_PREV(up, unrhd, list); 313 if (upp == NULL) { 314 upp = new_unr(uh); 315 upp->len = 0; 316 upp->ptr = uh; 317 TAILQ_INSERT_BEFORE(up, upp, list); 318 } 319 KASSERT(upp->ptr == uh, ("UNR list corruption")); 320 upp->len++; 321 up->len--; 322 uh->busy++; 323 check_unrhdr(uh, __LINE__); 324 return (x); 325 } 326 327 /* 328 * Free a unr. 329 * 330 * If we can save unrs by using a bitmap, do so. 331 */ 332 void 333 free_unr(struct unrhdr *uh, u_int item) 334 { 335 struct unr *up, *upp, *upn, *ul; 336 u_int x, l, xl, n, pl; 337 338 KASSERT(item >= uh->low && item <= uh->high, 339 ("UNR: free_unr(%u) out of range [%u...%u]", 340 item, uh->low, uh->high)); 341 check_unrhdr(uh, __LINE__); 342 item -= uh->low; 343 xl = x = 0; 344 /* Find the start of the potential bitmap */ 345 l = item - item % NBITS; 346 ul = 0; 347 TAILQ_FOREACH(up, &uh->head, list) { 348 349 /* Keep track of which unr we'll split if we do */ 350 if (x <= l) { 351 ul = up; 352 xl = x; 353 } 354 355 /* Handle bitmap items */ 356 if (up->ptr != NULL && up->ptr != uh) { 357 if (x + NBITS <= item) { /* not yet */ 358 x += NBITS; 359 continue; 360 } 361 KASSERT(bit_test((bitstr_t *)up->ptr, item - x) != 0, 362 ("UNR: Freeing free item %d (%d) (bitmap)\n", 363 item, item - x)); 364 bit_clear((bitstr_t *)up->ptr, item - x); 365 uh->busy--; 366 up->len--; 367 /* 368 * XXX: up->len == 1 could possibly be collapsed to 369 * XXX: neighboring runs. 370 */ 371 if (up->len > 0) 372 return; 373 /* We have freed all items in bitmap, drop it */ 374 delete_unr(uh, up->ptr); 375 up->ptr = NULL; 376 up->len = NBITS; 377 collapse_unr(uh, up); 378 check_unrhdr(uh, __LINE__); 379 return; 380 } 381 382 /* Run length unr's */ 383 384 if (x + up->len <= item) { /* not yet */ 385 x += up->len; 386 continue; 387 } 388 389 /* We now have our run length unr */ 390 KASSERT(up->ptr == uh, 391 ("UNR Freeing free item %d (run))\n", item)); 392 393 /* Just this one left, reap it */ 394 if (up->len == 1) { 395 up->ptr = NULL; 396 uh->busy--; 397 collapse_unr(uh, up); 398 check_unrhdr(uh, __LINE__); 399 return; 400 } 401 402 /* Check if we can shift the item to the previous run */ 403 upp = TAILQ_PREV(up, unrhd, list); 404 if (item == x && upp != NULL && upp->ptr == NULL) { 405 upp->len++; 406 up->len--; 407 uh->busy--; 408 check_unrhdr(uh, __LINE__); 409 return; 410 } 411 412 /* Check if we can shift the item to the next run */ 413 upn = TAILQ_NEXT(up, list); 414 if (item == x + up->len - 1 && 415 upn != NULL && upn->ptr == NULL) { 416 upn->len++; 417 up->len--; 418 uh->busy--; 419 check_unrhdr(uh, __LINE__); 420 return; 421 } 422 423 /* Split off the tail end, if any. */ 424 pl = up->len - (1 + (item - x)); 425 if (pl > 0) { 426 upp = new_unr(uh); 427 upp->ptr = uh; 428 upp->len = pl; 429 TAILQ_INSERT_AFTER(&uh->head, up, upp, list); 430 } 431 432 if (item == x) { 433 /* We are done splitting */ 434 up->len = 1; 435 up->ptr = NULL; 436 } else { 437 /* The freed item */ 438 upp = new_unr(uh); 439 upp->len = 1; 440 upp->ptr = NULL; 441 TAILQ_INSERT_AFTER(&uh->head, up, upp, list); 442 /* Adjust current unr */ 443 up->len = item - x; 444 } 445 446 uh->busy--; 447 check_unrhdr(uh, __LINE__); 448 449 /* Our ul marker element may have shifted one later */ 450 if (ul->len + xl <= l) { 451 xl += ul->len; 452 ul = TAILQ_NEXT(ul, list); 453 } 454 KASSERT(ul != NULL, ("UNR lost bitmap pointer")); 455 456 /* Count unrs entirely inside potential bitmap */ 457 n = 0; 458 pl = xl; 459 item = l + NBITS; 460 for (up = ul; 461 up != NULL && pl + up->len <= item; 462 up = TAILQ_NEXT(up, list)) { 463 if (pl >= l) 464 n++; 465 pl += up->len; 466 } 467 468 /* If less than three, a bitmap does not pay off */ 469 if (n < 3) 470 return; 471 472 /* Allocate bitmap */ 473 upp = new_unr(uh); 474 upp->ptr = new_unr(uh); 475 476 /* Insert bitmap after ul element */ 477 TAILQ_INSERT_AFTER(&uh->head, ul, upp, list); 478 479 /* Slice off the tail from the ul element */ 480 pl = ul->len - (l - xl); 481 if (ul->ptr != NULL) { 482 bit_nset(upp->ptr, 0, pl - 1); 483 upp->len = pl; 484 } 485 ul->len -= pl; 486 487 /* Ditch ul if it got reduced to zero size */ 488 if (ul->len == 0) { 489 TAILQ_REMOVE(&uh->head, ul, list); 490 delete_unr(uh, ul); 491 } 492 493 /* Soak up run length unrs until we have absorbed NBITS */ 494 while (pl != NBITS) { 495 496 /* Grab first one in line */ 497 upn = TAILQ_NEXT(upp, list); 498 499 /* We may not have a multiple of NBITS totally */ 500 if (upn == NULL) 501 break; 502 503 /* Run may extend past our new bitmap */ 504 n = NBITS - pl; 505 if (n > upn->len) 506 n = upn->len; 507 508 if (upn->ptr != NULL) { 509 bit_nset(upp->ptr, pl, pl + n - 1); 510 upp->len += n; 511 } 512 pl += n; 513 514 if (n != upn->len) { 515 /* We did not absorb the entire run */ 516 upn->len -= n; 517 break; 518 } 519 TAILQ_REMOVE(&uh->head, upn, list); 520 delete_unr(uh, upn); 521 } 522 check_unrhdr(uh, __LINE__); 523 return; 524 } 525 KASSERT(0 != 1, ("UNR: Fell off the end in free_unr()")); 526 } 527 528 #ifndef _KERNEL /* USERLAND test driver */ 529 530 /* 531 * Simple stochastic test driver for the above functions 532 */ 533 534 static void 535 print_unr(struct unrhdr *uh, struct unr *up) 536 { 537 u_int x; 538 539 printf(" %p len = %5u ", up, up->len); 540 if (up->ptr == NULL) 541 printf("free\n"); 542 else if (up->ptr == uh) 543 printf("alloc\n"); 544 else { 545 printf(" ["); 546 for (x = 0; x < NBITS; x++) { 547 if (bit_test((bitstr_t *)up->ptr, x)) 548 putchar('#'); 549 else 550 putchar(' '); 551 } 552 printf("]\n"); 553 } 554 } 555 556 static void 557 print_unrhdr(struct unrhdr *uh) 558 { 559 struct unr *up; 560 u_int x; 561 562 printf("%p low = %u high = %u busy %u\n", 563 uh, uh->low, uh->high, uh->busy); 564 x = uh->low; 565 TAILQ_FOREACH(up, &uh->head, list) { 566 printf(" from = %5u", x); 567 print_unr(uh, up); 568 if (up->ptr == NULL || up->ptr == uh) 569 x += up->len; 570 else 571 x += NBITS; 572 } 573 } 574 575 /* Number of unrs to test */ 576 #define NN 10000 577 578 int 579 main(int argc __unused, const char **argv __unused) 580 { 581 struct unrhdr *uh; 582 int i, x, m; 583 char a[NN]; 584 585 uh = new_unrhdr(0, NN - 1); 586 587 memset(a, 0, sizeof a); 588 589 fprintf(stderr, "sizeof(struct unr) %d\n", sizeof (struct unr)); 590 fprintf(stderr, "sizeof(struct unrhdr) %d\n", sizeof (struct unrhdr)); 591 x = 1; 592 for (m = 0; m < NN; m++) { 593 i = random() % NN; 594 if (a[i]) { 595 printf("F %u\n", i); 596 free_unr(uh, i); 597 a[i] = 0; 598 } else { 599 i = alloc_unr(uh); 600 a[i] = 1; 601 printf("A %u\n", i); 602 } 603 if (1) /* XXX: change this for detailed debug printout */ 604 print_unrhdr(uh); 605 check_unrhdr(uh, __LINE__); 606 } 607 for (i = 0; i < NN; i++) 608 if (a[i]) 609 free_unr(uh, i); 610 print_unrhdr(uh); 611 delete_unrhdr(uh); 612 return (0); 613 } 614 #endif 615