1 /*- 2 * Copyright (c) 2004 Poul-Henning Kamp 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 * 28 * 29 * Unit number allocation functions. 30 * 31 * These functions implement a mixed run-length/bitmap management of unit 32 * number spaces in a very memory efficient manner. 33 * 34 * Allocation policy is always lowest free number first. 35 * 36 * A return value of -1 signals that no more unit numbers are available. 37 * 38 * There is no cost associated with the range of unitnumbers, so unless 39 * the resource really is finite, specify INT_MAX to new_unrhdr() and 40 * forget about checking the return value. 41 * 42 * If a mutex is not provided when the unit number space is created, a 43 * default global mutex is used. The advantage to passing a mutex in, is 44 * that the alloc_unrl() function can be called with the mutex already 45 * held (it will not be released by alloc_unrl()). 46 * 47 * The allocation function alloc_unr{l}() never sleeps (but it may block on 48 * the mutex of course). 49 * 50 * Freeing a unit number may require allocating memory, and can therefore 51 * sleep so the free_unr() function does not come in a pre-locked variant. 52 * 53 * A userland test program is included. 54 * 55 * Memory usage is a very complex function of the exact allocation 56 * pattern, but always very compact: 57 * * For the very typical case where a single unbroken run of unit 58 * numbers are allocated 44 bytes are used on i386. 59 * * For a unit number space of 1000 units and the random pattern 60 * in the usermode test program included, the worst case usage 61 * was 252 bytes on i386 for 500 allocated and 500 free units. 62 * * For a unit number space of 10000 units and the random pattern 63 * in the usermode test program included, the worst case usage 64 * was 798 bytes on i386 for 5000 allocated and 5000 free units. 65 * * The worst case is where every other unit number is allocated and 66 * the rest are free. In that case 44 + N/4 bytes are used where 67 * N is the number of the highest unit allocated. 68 */ 69 70 #include <sys/param.h> 71 #include <sys/types.h> 72 #include <sys/_unrhdr.h> 73 74 #ifdef _KERNEL 75 76 #include <sys/bitstring.h> 77 #include <sys/malloc.h> 78 #include <sys/kernel.h> 79 #include <sys/systm.h> 80 #include <sys/limits.h> 81 #include <sys/lock.h> 82 #include <sys/mutex.h> 83 84 /* 85 * In theory it would be smarter to allocate the individual blocks 86 * with the zone allocator, but at this time the expectation is that 87 * there will typically not even be enough allocations to fill a single 88 * page, so we stick with malloc for now. 89 */ 90 static MALLOC_DEFINE(M_UNIT, "Unitno", "Unit number allocation"); 91 92 #define Malloc(foo) malloc(foo, M_UNIT, M_WAITOK | M_ZERO) 93 #define Free(foo) free(foo, M_UNIT) 94 95 static struct mtx unitmtx; 96 97 MTX_SYSINIT(unit, &unitmtx, "unit# allocation", MTX_DEF); 98 99 #else /* ...USERLAND */ 100 101 #include <bitstring.h> 102 #include <err.h> 103 #include <errno.h> 104 #include <getopt.h> 105 #include <stdbool.h> 106 #include <stdio.h> 107 #include <stdlib.h> 108 #include <string.h> 109 110 #define KASSERT(cond, arg) \ 111 do { \ 112 if (!(cond)) { \ 113 printf arg; \ 114 abort(); \ 115 } \ 116 } while (0) 117 118 static int no_alloc; 119 #define Malloc(foo) _Malloc(foo, __LINE__) 120 static void * 121 _Malloc(size_t foo, int line) 122 { 123 124 KASSERT(no_alloc == 0, ("malloc in wrong place() line %d", line)); 125 return (calloc(foo, 1)); 126 } 127 #define Free(foo) free(foo) 128 129 struct unrhdr; 130 131 132 struct mtx { 133 int state; 134 } unitmtx; 135 136 static void 137 mtx_lock(struct mtx *mp) 138 { 139 KASSERT(mp->state == 0, ("mutex already locked")); 140 mp->state = 1; 141 } 142 143 static void 144 mtx_unlock(struct mtx *mp) 145 { 146 KASSERT(mp->state == 1, ("mutex not locked")); 147 mp->state = 0; 148 } 149 150 #define MA_OWNED 9 151 152 static void 153 mtx_assert(struct mtx *mp, int flag) 154 { 155 if (flag == MA_OWNED) { 156 KASSERT(mp->state == 1, ("mtx_assert(MA_OWNED) not true")); 157 } 158 } 159 160 #define CTASSERT(foo) 161 #define WITNESS_WARN(flags, lock, fmt, ...) (void)0 162 163 #endif /* USERLAND */ 164 165 /* 166 * This is our basic building block. 167 * 168 * It can be used in three different ways depending on the value of the ptr 169 * element: 170 * If ptr is NULL, it represents a run of free items. 171 * If ptr points to the unrhdr it represents a run of allocated items. 172 * Otherwise it points to a bitstring of allocated items. 173 * 174 * For runs the len field is the length of the run. 175 * For bitmaps the len field represents the number of allocated items. 176 * 177 * The bitmap is the same size as struct unr to optimize memory management. 178 */ 179 struct unr { 180 TAILQ_ENTRY(unr) list; 181 u_int len; 182 void *ptr; 183 }; 184 185 struct unrb { 186 bitstr_t map[sizeof(struct unr) / sizeof(bitstr_t)]; 187 }; 188 189 CTASSERT((sizeof(struct unr) % sizeof(bitstr_t)) == 0); 190 191 /* Number of bits we can store in the bitmap */ 192 #define NBITS (8 * sizeof(((struct unrb*)NULL)->map)) 193 194 /* Is the unrb empty in at least the first len bits? */ 195 static inline bool 196 ub_empty(struct unrb *ub, int len) { 197 int first_set; 198 199 bit_ffs(ub->map, len, &first_set); 200 return (first_set == -1); 201 } 202 203 /* Is the unrb full? That is, is the number of set elements equal to len? */ 204 static inline bool 205 ub_full(struct unrb *ub, int len) 206 { 207 int first_clear; 208 209 bit_ffc(ub->map, len, &first_clear); 210 return (first_clear == -1); 211 } 212 213 214 #if defined(DIAGNOSTIC) || !defined(_KERNEL) 215 /* 216 * Consistency check function. 217 * 218 * Checks the internal consistency as well as we can. 219 * 220 * Called at all boundaries of this API. 221 */ 222 static void 223 check_unrhdr(struct unrhdr *uh, int line) 224 { 225 struct unr *up; 226 struct unrb *ub; 227 int w; 228 u_int y, z; 229 230 y = uh->first; 231 z = 0; 232 TAILQ_FOREACH(up, &uh->head, list) { 233 z++; 234 if (up->ptr != uh && up->ptr != NULL) { 235 ub = up->ptr; 236 KASSERT (up->len <= NBITS, 237 ("UNR inconsistency: len %u max %zd (line %d)\n", 238 up->len, NBITS, line)); 239 z++; 240 w = 0; 241 bit_count(ub->map, 0, up->len, &w); 242 y += w; 243 } else if (up->ptr != NULL) 244 y += up->len; 245 } 246 KASSERT (y == uh->busy, 247 ("UNR inconsistency: items %u found %u (line %d)\n", 248 uh->busy, y, line)); 249 KASSERT (z == uh->alloc, 250 ("UNR inconsistency: chunks %u found %u (line %d)\n", 251 uh->alloc, z, line)); 252 } 253 254 #else 255 256 static __inline void 257 check_unrhdr(struct unrhdr *uh __unused, int line __unused) 258 { 259 260 } 261 262 #endif 263 264 265 /* 266 * Userland memory management. Just use calloc and keep track of how 267 * many elements we have allocated for check_unrhdr(). 268 */ 269 270 static __inline void * 271 new_unr(struct unrhdr *uh, void **p1, void **p2) 272 { 273 void *p; 274 275 uh->alloc++; 276 KASSERT(*p1 != NULL || *p2 != NULL, ("Out of cached memory")); 277 if (*p1 != NULL) { 278 p = *p1; 279 *p1 = NULL; 280 return (p); 281 } else { 282 p = *p2; 283 *p2 = NULL; 284 return (p); 285 } 286 } 287 288 static __inline void 289 delete_unr(struct unrhdr *uh, void *ptr) 290 { 291 struct unr *up; 292 293 uh->alloc--; 294 up = ptr; 295 TAILQ_INSERT_TAIL(&uh->ppfree, up, list); 296 } 297 298 void 299 clean_unrhdrl(struct unrhdr *uh) 300 { 301 struct unr *up; 302 303 mtx_assert(uh->mtx, MA_OWNED); 304 while ((up = TAILQ_FIRST(&uh->ppfree)) != NULL) { 305 TAILQ_REMOVE(&uh->ppfree, up, list); 306 mtx_unlock(uh->mtx); 307 Free(up); 308 mtx_lock(uh->mtx); 309 } 310 311 } 312 313 void 314 clean_unrhdr(struct unrhdr *uh) 315 { 316 317 mtx_lock(uh->mtx); 318 clean_unrhdrl(uh); 319 mtx_unlock(uh->mtx); 320 } 321 322 void 323 init_unrhdr(struct unrhdr *uh, int low, int high, struct mtx *mutex) 324 { 325 326 KASSERT(low >= 0 && low <= high, 327 ("UNR: use error: new_unrhdr(%d, %d)", low, high)); 328 if (mutex != NULL) 329 uh->mtx = mutex; 330 else 331 uh->mtx = &unitmtx; 332 TAILQ_INIT(&uh->head); 333 TAILQ_INIT(&uh->ppfree); 334 uh->low = low; 335 uh->high = high; 336 uh->first = 0; 337 uh->last = 1 + (high - low); 338 check_unrhdr(uh, __LINE__); 339 } 340 341 /* 342 * Allocate a new unrheader set. 343 * 344 * Highest and lowest valid values given as parameters. 345 */ 346 347 struct unrhdr * 348 new_unrhdr(int low, int high, struct mtx *mutex) 349 { 350 struct unrhdr *uh; 351 352 uh = Malloc(sizeof *uh); 353 init_unrhdr(uh, low, high, mutex); 354 return (uh); 355 } 356 357 void 358 delete_unrhdr(struct unrhdr *uh) 359 { 360 361 check_unrhdr(uh, __LINE__); 362 KASSERT(uh->busy == 0, ("unrhdr has %u allocations", uh->busy)); 363 KASSERT(uh->alloc == 0, ("UNR memory leak in delete_unrhdr")); 364 KASSERT(TAILQ_FIRST(&uh->ppfree) == NULL, 365 ("unrhdr has postponed item for free")); 366 Free(uh); 367 } 368 369 void 370 clear_unrhdr(struct unrhdr *uh) 371 { 372 struct unr *up, *uq; 373 374 KASSERT(TAILQ_EMPTY(&uh->ppfree), 375 ("unrhdr has postponed item for free")); 376 TAILQ_FOREACH_SAFE(up, &uh->head, list, uq) { 377 if (up->ptr != uh) { 378 Free(up->ptr); 379 } 380 Free(up); 381 } 382 uh->busy = 0; 383 uh->alloc = 0; 384 init_unrhdr(uh, uh->low, uh->high, uh->mtx); 385 386 check_unrhdr(uh, __LINE__); 387 } 388 389 static __inline int 390 is_bitmap(struct unrhdr *uh, struct unr *up) 391 { 392 return (up->ptr != uh && up->ptr != NULL); 393 } 394 395 /* 396 * Look for sequence of items which can be combined into a bitmap, if 397 * multiple are present, take the one which saves most memory. 398 * 399 * Return (1) if a sequence was found to indicate that another call 400 * might be able to do more. Return (0) if we found no suitable sequence. 401 * 402 * NB: called from alloc_unr(), no new memory allocation allowed. 403 */ 404 static int 405 optimize_unr(struct unrhdr *uh) 406 { 407 struct unr *up, *uf, *us; 408 struct unrb *ub, *ubf; 409 u_int a, l, ba; 410 411 /* 412 * Look for the run of items (if any) which when collapsed into 413 * a bitmap would save most memory. 414 */ 415 us = NULL; 416 ba = 0; 417 TAILQ_FOREACH(uf, &uh->head, list) { 418 if (uf->len >= NBITS) 419 continue; 420 a = 1; 421 if (is_bitmap(uh, uf)) 422 a++; 423 l = uf->len; 424 up = uf; 425 while (1) { 426 up = TAILQ_NEXT(up, list); 427 if (up == NULL) 428 break; 429 if ((up->len + l) > NBITS) 430 break; 431 a++; 432 if (is_bitmap(uh, up)) 433 a++; 434 l += up->len; 435 } 436 if (a > ba) { 437 ba = a; 438 us = uf; 439 } 440 } 441 if (ba < 3) 442 return (0); 443 444 /* 445 * If the first element is not a bitmap, make it one. 446 * Trying to do so without allocating more memory complicates things 447 * a bit 448 */ 449 if (!is_bitmap(uh, us)) { 450 uf = TAILQ_NEXT(us, list); 451 TAILQ_REMOVE(&uh->head, us, list); 452 a = us->len; 453 l = us->ptr == uh ? 1 : 0; 454 ub = (void *)us; 455 bit_nclear(ub->map, 0, NBITS - 1); 456 if (l) 457 bit_nset(ub->map, 0, a); 458 if (!is_bitmap(uh, uf)) { 459 if (uf->ptr == NULL) 460 bit_nclear(ub->map, a, a + uf->len - 1); 461 else 462 bit_nset(ub->map, a, a + uf->len - 1); 463 uf->ptr = ub; 464 uf->len += a; 465 us = uf; 466 } else { 467 ubf = uf->ptr; 468 for (l = 0; l < uf->len; l++, a++) { 469 if (bit_test(ubf->map, l)) 470 bit_set(ub->map, a); 471 else 472 bit_clear(ub->map, a); 473 } 474 uf->len = a; 475 delete_unr(uh, uf->ptr); 476 uf->ptr = ub; 477 us = uf; 478 } 479 } 480 ub = us->ptr; 481 while (1) { 482 uf = TAILQ_NEXT(us, list); 483 if (uf == NULL) 484 return (1); 485 if (uf->len + us->len > NBITS) 486 return (1); 487 if (uf->ptr == NULL) { 488 bit_nclear(ub->map, us->len, us->len + uf->len - 1); 489 us->len += uf->len; 490 TAILQ_REMOVE(&uh->head, uf, list); 491 delete_unr(uh, uf); 492 } else if (uf->ptr == uh) { 493 bit_nset(ub->map, us->len, us->len + uf->len - 1); 494 us->len += uf->len; 495 TAILQ_REMOVE(&uh->head, uf, list); 496 delete_unr(uh, uf); 497 } else { 498 ubf = uf->ptr; 499 for (l = 0; l < uf->len; l++, us->len++) { 500 if (bit_test(ubf->map, l)) 501 bit_set(ub->map, us->len); 502 else 503 bit_clear(ub->map, us->len); 504 } 505 TAILQ_REMOVE(&uh->head, uf, list); 506 delete_unr(uh, ubf); 507 delete_unr(uh, uf); 508 } 509 } 510 } 511 512 /* 513 * See if a given unr should be collapsed with a neighbor. 514 * 515 * NB: called from alloc_unr(), no new memory allocation allowed. 516 */ 517 static void 518 collapse_unr(struct unrhdr *uh, struct unr *up) 519 { 520 struct unr *upp; 521 struct unrb *ub; 522 523 /* If bitmap is all set or clear, change it to runlength */ 524 if (is_bitmap(uh, up)) { 525 ub = up->ptr; 526 if (ub_full(ub, up->len)) { 527 delete_unr(uh, up->ptr); 528 up->ptr = uh; 529 } else if (ub_empty(ub, up->len)) { 530 delete_unr(uh, up->ptr); 531 up->ptr = NULL; 532 } 533 } 534 535 /* If nothing left in runlength, delete it */ 536 if (up->len == 0) { 537 upp = TAILQ_PREV(up, unrhd, list); 538 if (upp == NULL) 539 upp = TAILQ_NEXT(up, list); 540 TAILQ_REMOVE(&uh->head, up, list); 541 delete_unr(uh, up); 542 up = upp; 543 } 544 545 /* If we have "hot-spot" still, merge with neighbor if possible */ 546 if (up != NULL) { 547 upp = TAILQ_PREV(up, unrhd, list); 548 if (upp != NULL && up->ptr == upp->ptr) { 549 up->len += upp->len; 550 TAILQ_REMOVE(&uh->head, upp, list); 551 delete_unr(uh, upp); 552 } 553 upp = TAILQ_NEXT(up, list); 554 if (upp != NULL && up->ptr == upp->ptr) { 555 up->len += upp->len; 556 TAILQ_REMOVE(&uh->head, upp, list); 557 delete_unr(uh, upp); 558 } 559 } 560 561 /* Merge into ->first if possible */ 562 upp = TAILQ_FIRST(&uh->head); 563 if (upp != NULL && upp->ptr == uh) { 564 uh->first += upp->len; 565 TAILQ_REMOVE(&uh->head, upp, list); 566 delete_unr(uh, upp); 567 if (up == upp) 568 up = NULL; 569 } 570 571 /* Merge into ->last if possible */ 572 upp = TAILQ_LAST(&uh->head, unrhd); 573 if (upp != NULL && upp->ptr == NULL) { 574 uh->last += upp->len; 575 TAILQ_REMOVE(&uh->head, upp, list); 576 delete_unr(uh, upp); 577 if (up == upp) 578 up = NULL; 579 } 580 581 /* Try to make bitmaps */ 582 while (optimize_unr(uh)) 583 continue; 584 } 585 586 /* 587 * Allocate a free unr. 588 */ 589 int 590 alloc_unrl(struct unrhdr *uh) 591 { 592 struct unr *up; 593 struct unrb *ub; 594 u_int x; 595 int y; 596 597 mtx_assert(uh->mtx, MA_OWNED); 598 check_unrhdr(uh, __LINE__); 599 x = uh->low + uh->first; 600 601 up = TAILQ_FIRST(&uh->head); 602 603 /* 604 * If we have an ideal split, just adjust the first+last 605 */ 606 if (up == NULL && uh->last > 0) { 607 uh->first++; 608 uh->last--; 609 uh->busy++; 610 return (x); 611 } 612 613 /* 614 * We can always allocate from the first list element, so if we have 615 * nothing on the list, we must have run out of unit numbers. 616 */ 617 if (up == NULL) 618 return (-1); 619 620 KASSERT(up->ptr != uh, ("UNR first element is allocated")); 621 622 if (up->ptr == NULL) { /* free run */ 623 uh->first++; 624 up->len--; 625 } else { /* bitmap */ 626 ub = up->ptr; 627 bit_ffc(ub->map, up->len, &y); 628 KASSERT(y != -1, ("UNR corruption: No clear bit in bitmap.")); 629 bit_set(ub->map, y); 630 x += y; 631 } 632 uh->busy++; 633 collapse_unr(uh, up); 634 return (x); 635 } 636 637 int 638 alloc_unr(struct unrhdr *uh) 639 { 640 int i; 641 642 mtx_lock(uh->mtx); 643 i = alloc_unrl(uh); 644 clean_unrhdrl(uh); 645 mtx_unlock(uh->mtx); 646 return (i); 647 } 648 649 static int 650 alloc_unr_specificl(struct unrhdr *uh, u_int item, void **p1, void **p2) 651 { 652 struct unr *up, *upn; 653 struct unrb *ub; 654 u_int i, last, tl; 655 656 mtx_assert(uh->mtx, MA_OWNED); 657 658 if (item < uh->low + uh->first || item > uh->high) 659 return (-1); 660 661 up = TAILQ_FIRST(&uh->head); 662 /* Ideal split. */ 663 if (up == NULL && item - uh->low == uh->first) { 664 uh->first++; 665 uh->last--; 666 uh->busy++; 667 check_unrhdr(uh, __LINE__); 668 return (item); 669 } 670 671 i = item - uh->low - uh->first; 672 673 if (up == NULL) { 674 up = new_unr(uh, p1, p2); 675 up->ptr = NULL; 676 up->len = i; 677 TAILQ_INSERT_TAIL(&uh->head, up, list); 678 up = new_unr(uh, p1, p2); 679 up->ptr = uh; 680 up->len = 1; 681 TAILQ_INSERT_TAIL(&uh->head, up, list); 682 uh->last = uh->high - uh->low - i; 683 uh->busy++; 684 check_unrhdr(uh, __LINE__); 685 return (item); 686 } else { 687 /* Find the item which contains the unit we want to allocate. */ 688 TAILQ_FOREACH(up, &uh->head, list) { 689 if (up->len > i) 690 break; 691 i -= up->len; 692 } 693 } 694 695 if (up == NULL) { 696 if (i > 0) { 697 up = new_unr(uh, p1, p2); 698 up->ptr = NULL; 699 up->len = i; 700 TAILQ_INSERT_TAIL(&uh->head, up, list); 701 } 702 up = new_unr(uh, p1, p2); 703 up->ptr = uh; 704 up->len = 1; 705 TAILQ_INSERT_TAIL(&uh->head, up, list); 706 goto done; 707 } 708 709 if (is_bitmap(uh, up)) { 710 ub = up->ptr; 711 if (bit_test(ub->map, i) == 0) { 712 bit_set(ub->map, i); 713 goto done; 714 } else 715 return (-1); 716 } else if (up->ptr == uh) 717 return (-1); 718 719 KASSERT(up->ptr == NULL, 720 ("alloc_unr_specificl: up->ptr != NULL (up=%p)", up)); 721 722 /* Split off the tail end, if any. */ 723 tl = up->len - (1 + i); 724 if (tl > 0) { 725 upn = new_unr(uh, p1, p2); 726 upn->ptr = NULL; 727 upn->len = tl; 728 TAILQ_INSERT_AFTER(&uh->head, up, upn, list); 729 } 730 731 /* Split off head end, if any */ 732 if (i > 0) { 733 upn = new_unr(uh, p1, p2); 734 upn->len = i; 735 upn->ptr = NULL; 736 TAILQ_INSERT_BEFORE(up, upn, list); 737 } 738 up->len = 1; 739 up->ptr = uh; 740 741 done: 742 last = uh->high - uh->low - (item - uh->low); 743 if (uh->last > last) 744 uh->last = last; 745 uh->busy++; 746 collapse_unr(uh, up); 747 check_unrhdr(uh, __LINE__); 748 return (item); 749 } 750 751 int 752 alloc_unr_specific(struct unrhdr *uh, u_int item) 753 { 754 void *p1, *p2; 755 int i; 756 757 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "alloc_unr_specific"); 758 759 p1 = Malloc(sizeof(struct unr)); 760 p2 = Malloc(sizeof(struct unr)); 761 762 mtx_lock(uh->mtx); 763 i = alloc_unr_specificl(uh, item, &p1, &p2); 764 mtx_unlock(uh->mtx); 765 766 if (p1 != NULL) 767 Free(p1); 768 if (p2 != NULL) 769 Free(p2); 770 771 return (i); 772 } 773 774 /* 775 * Free a unr. 776 * 777 * If we can save unrs by using a bitmap, do so. 778 */ 779 static void 780 free_unrl(struct unrhdr *uh, u_int item, void **p1, void **p2) 781 { 782 struct unr *up, *upp, *upn; 783 struct unrb *ub; 784 u_int pl; 785 786 KASSERT(item >= uh->low && item <= uh->high, 787 ("UNR: free_unr(%u) out of range [%u...%u]", 788 item, uh->low, uh->high)); 789 check_unrhdr(uh, __LINE__); 790 item -= uh->low; 791 upp = TAILQ_FIRST(&uh->head); 792 /* 793 * Freeing in the ideal split case 794 */ 795 if (item + 1 == uh->first && upp == NULL) { 796 uh->last++; 797 uh->first--; 798 uh->busy--; 799 check_unrhdr(uh, __LINE__); 800 return; 801 } 802 /* 803 * Freeing in the ->first section. Create a run starting at the 804 * freed item. The code below will subdivide it. 805 */ 806 if (item < uh->first) { 807 up = new_unr(uh, p1, p2); 808 up->ptr = uh; 809 up->len = uh->first - item; 810 TAILQ_INSERT_HEAD(&uh->head, up, list); 811 uh->first -= up->len; 812 } 813 814 item -= uh->first; 815 816 /* Find the item which contains the unit we want to free */ 817 TAILQ_FOREACH(up, &uh->head, list) { 818 if (up->len > item) 819 break; 820 item -= up->len; 821 } 822 823 /* Handle bitmap items */ 824 if (is_bitmap(uh, up)) { 825 ub = up->ptr; 826 827 KASSERT(bit_test(ub->map, item) != 0, 828 ("UNR: Freeing free item %d (bitmap)\n", item)); 829 bit_clear(ub->map, item); 830 uh->busy--; 831 collapse_unr(uh, up); 832 return; 833 } 834 835 KASSERT(up->ptr == uh, ("UNR Freeing free item %d (run))\n", item)); 836 837 /* Just this one left, reap it */ 838 if (up->len == 1) { 839 up->ptr = NULL; 840 uh->busy--; 841 collapse_unr(uh, up); 842 return; 843 } 844 845 /* Check if we can shift the item into the previous 'free' run */ 846 upp = TAILQ_PREV(up, unrhd, list); 847 if (item == 0 && upp != NULL && upp->ptr == NULL) { 848 upp->len++; 849 up->len--; 850 uh->busy--; 851 collapse_unr(uh, up); 852 return; 853 } 854 855 /* Check if we can shift the item to the next 'free' run */ 856 upn = TAILQ_NEXT(up, list); 857 if (item == up->len - 1 && upn != NULL && upn->ptr == NULL) { 858 upn->len++; 859 up->len--; 860 uh->busy--; 861 collapse_unr(uh, up); 862 return; 863 } 864 865 /* Split off the tail end, if any. */ 866 pl = up->len - (1 + item); 867 if (pl > 0) { 868 upp = new_unr(uh, p1, p2); 869 upp->ptr = uh; 870 upp->len = pl; 871 TAILQ_INSERT_AFTER(&uh->head, up, upp, list); 872 } 873 874 /* Split off head end, if any */ 875 if (item > 0) { 876 upp = new_unr(uh, p1, p2); 877 upp->len = item; 878 upp->ptr = uh; 879 TAILQ_INSERT_BEFORE(up, upp, list); 880 } 881 up->len = 1; 882 up->ptr = NULL; 883 uh->busy--; 884 collapse_unr(uh, up); 885 } 886 887 void 888 free_unr(struct unrhdr *uh, u_int item) 889 { 890 void *p1, *p2; 891 892 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "free_unr"); 893 p1 = Malloc(sizeof(struct unr)); 894 p2 = Malloc(sizeof(struct unr)); 895 mtx_lock(uh->mtx); 896 free_unrl(uh, item, &p1, &p2); 897 clean_unrhdrl(uh); 898 mtx_unlock(uh->mtx); 899 if (p1 != NULL) 900 Free(p1); 901 if (p2 != NULL) 902 Free(p2); 903 } 904 905 #ifndef _KERNEL /* USERLAND test driver */ 906 907 /* 908 * Simple stochastic test driver for the above functions. The code resides 909 * here so that it can access static functions and structures. 910 */ 911 912 static bool verbose; 913 #define VPRINTF(...) {if (verbose) printf(__VA_ARGS__);} 914 915 static void 916 print_unr(struct unrhdr *uh, struct unr *up) 917 { 918 u_int x; 919 struct unrb *ub; 920 921 printf(" %p len = %5u ", up, up->len); 922 if (up->ptr == NULL) 923 printf("free\n"); 924 else if (up->ptr == uh) 925 printf("alloc\n"); 926 else { 927 ub = up->ptr; 928 printf("bitmap ["); 929 for (x = 0; x < up->len; x++) { 930 if (bit_test(ub->map, x)) 931 printf("#"); 932 else 933 printf(" "); 934 } 935 printf("]\n"); 936 } 937 } 938 939 static void 940 print_unrhdr(struct unrhdr *uh) 941 { 942 struct unr *up; 943 u_int x; 944 945 printf( 946 "%p low = %u high = %u first = %u last = %u busy %u chunks = %u\n", 947 uh, uh->low, uh->high, uh->first, uh->last, uh->busy, uh->alloc); 948 x = uh->low + uh->first; 949 TAILQ_FOREACH(up, &uh->head, list) { 950 printf(" from = %5u", x); 951 print_unr(uh, up); 952 if (up->ptr == NULL || up->ptr == uh) 953 x += up->len; 954 else 955 x += NBITS; 956 } 957 } 958 959 static void 960 test_alloc_unr(struct unrhdr *uh, u_int i, char a[]) 961 { 962 int j; 963 964 if (a[i]) { 965 VPRINTF("F %u\n", i); 966 free_unr(uh, i); 967 a[i] = 0; 968 } else { 969 no_alloc = 1; 970 j = alloc_unr(uh); 971 if (j != -1) { 972 a[j] = 1; 973 VPRINTF("A %d\n", j); 974 } 975 no_alloc = 0; 976 } 977 } 978 979 static void 980 test_alloc_unr_specific(struct unrhdr *uh, u_int i, char a[]) 981 { 982 int j; 983 984 j = alloc_unr_specific(uh, i); 985 if (j == -1) { 986 VPRINTF("F %u\n", i); 987 a[i] = 0; 988 free_unr(uh, i); 989 } else { 990 a[i] = 1; 991 VPRINTF("A %d\n", j); 992 } 993 } 994 995 static void 996 usage(char** argv) 997 { 998 printf("%s [-h] [-r REPETITIONS] [-v]\n", argv[0]); 999 } 1000 1001 int 1002 main(int argc, char **argv) 1003 { 1004 struct unrhdr *uh; 1005 char *a; 1006 long count = 10000; /* Number of unrs to test */ 1007 long reps = 1, m; 1008 int ch; 1009 u_int i, j; 1010 1011 verbose = false; 1012 1013 while ((ch = getopt(argc, argv, "hr:v")) != -1) { 1014 switch (ch) { 1015 case 'r': 1016 errno = 0; 1017 reps = strtol(optarg, NULL, 0); 1018 if (errno == ERANGE || errno == EINVAL) { 1019 usage(argv); 1020 exit(2); 1021 } 1022 1023 break; 1024 case 'v': 1025 verbose = true; 1026 break; 1027 case 'h': 1028 default: 1029 usage(argv); 1030 exit(2); 1031 } 1032 1033 1034 } 1035 1036 setbuf(stdout, NULL); 1037 uh = new_unrhdr(0, count - 1, NULL); 1038 print_unrhdr(uh); 1039 1040 a = calloc(count, sizeof(char)); 1041 if (a == NULL) 1042 err(1, "calloc failed"); 1043 srandomdev(); 1044 1045 printf("sizeof(struct unr) %zu\n", sizeof(struct unr)); 1046 printf("sizeof(struct unrb) %zu\n", sizeof(struct unrb)); 1047 printf("sizeof(struct unrhdr) %zu\n", sizeof(struct unrhdr)); 1048 printf("NBITS %lu\n", (unsigned long)NBITS); 1049 for (m = 0; m < count * reps; m++) { 1050 j = random(); 1051 i = (j >> 1) % count; 1052 #if 0 1053 if (a[i] && (j & 1)) 1054 continue; 1055 #endif 1056 if ((random() & 1) != 0) 1057 test_alloc_unr(uh, i, a); 1058 else 1059 test_alloc_unr_specific(uh, i, a); 1060 1061 if (verbose) 1062 print_unrhdr(uh); 1063 check_unrhdr(uh, __LINE__); 1064 } 1065 for (i = 0; i < (u_int)count; i++) { 1066 if (a[i]) { 1067 if (verbose) { 1068 printf("C %u\n", i); 1069 print_unrhdr(uh); 1070 } 1071 free_unr(uh, i); 1072 } 1073 } 1074 print_unrhdr(uh); 1075 delete_unrhdr(uh); 1076 free(a); 1077 return (0); 1078 } 1079 #endif 1080