1 /*- 2 * Copyright (c) 2004 Poul-Henning Kamp 3 * All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 24 * SUCH DAMAGE. 25 * 26 * $FreeBSD$ 27 * 28 * 29 * Unit number allocation functions. 30 * 31 * These functions implement a mixed run-length/bitmap management of unit 32 * number spaces in a very memory efficient manner. 33 * 34 * Allocation policy is always lowest free number first. 35 * 36 * A return value of -1 signals that no more unit numbers are available. 37 * 38 * There is no cost associated with the range of unitnumbers, so unless 39 * the resource really is finite, specify INT_MAX to new_unrhdr() and 40 * forget about checking the return value. 41 * 42 * If a mutex is not provided when the unit number space is created, a 43 * default global mutex is used. The advantage to passing a mutex in, is 44 * that the alloc_unrl() function can be called with the mutex already 45 * held (it will not be released by alloc_unrl()). 46 * 47 * The allocation function alloc_unr{l}() never sleeps (but it may block on 48 * the mutex of course). 49 * 50 * Freeing a unit number may require allocating memory, and can therefore 51 * sleep so the free_unr() function does not come in a pre-locked variant. 52 * 53 * A userland test program is included. 54 * 55 * Memory usage is a very complex function of the exact allocation 56 * pattern, but always very compact: 57 * * For the very typical case where a single unbroken run of unit 58 * numbers are allocated 44 bytes are used on i386. 59 * * For a unit number space of 1000 units and the random pattern 60 * in the usermode test program included, the worst case usage 61 * was 252 bytes on i386 for 500 allocated and 500 free units. 62 * * For a unit number space of 10000 units and the random pattern 63 * in the usermode test program included, the worst case usage 64 * was 798 bytes on i386 for 5000 allocated and 5000 free units. 65 * * The worst case is where every other unit number is allocated and 66 * the rest are free. In that case 44 + N/4 bytes are used where 67 * N is the number of the highest unit allocated. 68 */ 69 70 #include <sys/param.h> 71 #include <sys/types.h> 72 #include <sys/_unrhdr.h> 73 74 #ifdef _KERNEL 75 76 #include <sys/bitstring.h> 77 #include <sys/malloc.h> 78 #include <sys/kernel.h> 79 #include <sys/systm.h> 80 #include <sys/limits.h> 81 #include <sys/lock.h> 82 #include <sys/mutex.h> 83 84 /* 85 * In theory it would be smarter to allocate the individual blocks 86 * with the zone allocator, but at this time the expectation is that 87 * there will typically not even be enough allocations to fill a single 88 * page, so we stick with malloc for now. 89 */ 90 static MALLOC_DEFINE(M_UNIT, "Unitno", "Unit number allocation"); 91 92 #define Malloc(foo) malloc(foo, M_UNIT, M_WAITOK | M_ZERO) 93 #define Free(foo) free(foo, M_UNIT) 94 95 static struct mtx unitmtx; 96 97 MTX_SYSINIT(unit, &unitmtx, "unit# allocation", MTX_DEF); 98 99 #else /* ...USERLAND */ 100 101 #include <bitstring.h> 102 #include <err.h> 103 #include <errno.h> 104 #include <getopt.h> 105 #include <stdbool.h> 106 #include <stdio.h> 107 #include <stdlib.h> 108 #include <string.h> 109 110 #define KASSERT(cond, arg) \ 111 do { \ 112 if (!(cond)) { \ 113 printf arg; \ 114 abort(); \ 115 } \ 116 } while (0) 117 118 static int no_alloc; 119 #define Malloc(foo) _Malloc(foo, __LINE__) 120 static void * 121 _Malloc(size_t foo, int line) 122 { 123 124 KASSERT(no_alloc == 0, ("malloc in wrong place() line %d", line)); 125 return (calloc(foo, 1)); 126 } 127 #define Free(foo) free(foo) 128 129 struct unrhdr; 130 131 132 struct mtx { 133 int state; 134 } unitmtx; 135 136 static void 137 mtx_lock(struct mtx *mp) 138 { 139 KASSERT(mp->state == 0, ("mutex already locked")); 140 mp->state = 1; 141 } 142 143 static void 144 mtx_unlock(struct mtx *mp) 145 { 146 KASSERT(mp->state == 1, ("mutex not locked")); 147 mp->state = 0; 148 } 149 150 #define MA_OWNED 9 151 152 static void 153 mtx_assert(struct mtx *mp, int flag) 154 { 155 if (flag == MA_OWNED) { 156 KASSERT(mp->state == 1, ("mtx_assert(MA_OWNED) not true")); 157 } 158 } 159 160 #define CTASSERT(foo) 161 #define WITNESS_WARN(flags, lock, fmt, ...) (void)0 162 163 #endif /* USERLAND */ 164 165 /* 166 * This is our basic building block. 167 * 168 * It can be used in three different ways depending on the value of the ptr 169 * element: 170 * If ptr is NULL, it represents a run of free items. 171 * If ptr points to the unrhdr it represents a run of allocated items. 172 * Otherwise it points to a bitstring of allocated items. 173 * 174 * For runs the len field is the length of the run. 175 * For bitmaps the len field represents the number of allocated items. 176 * 177 * The bitmap is the same size as struct unr to optimize memory management. 178 */ 179 struct unr { 180 TAILQ_ENTRY(unr) list; 181 u_int len; 182 void *ptr; 183 }; 184 185 struct unrb { 186 bitstr_t map[sizeof(struct unr) / sizeof(bitstr_t)]; 187 }; 188 189 CTASSERT((sizeof(struct unr) % sizeof(bitstr_t)) == 0); 190 191 /* Number of bits we can store in the bitmap */ 192 #define NBITS (8 * sizeof(((struct unrb*)NULL)->map)) 193 194 /* Is the unrb empty in at least the first len bits? */ 195 static inline bool 196 ub_empty(struct unrb *ub, int len) { 197 int first_set; 198 199 bit_ffs(ub->map, len, &first_set); 200 return (first_set == -1); 201 } 202 203 /* Is the unrb full? That is, is the number of set elements equal to len? */ 204 static inline bool 205 ub_full(struct unrb *ub, int len) 206 { 207 int first_clear; 208 209 bit_ffc(ub->map, len, &first_clear); 210 return (first_clear == -1); 211 } 212 213 214 #if defined(DIAGNOSTIC) || !defined(_KERNEL) 215 /* 216 * Consistency check function. 217 * 218 * Checks the internal consistency as well as we can. 219 * 220 * Called at all boundaries of this API. 221 */ 222 static void 223 check_unrhdr(struct unrhdr *uh, int line) 224 { 225 struct unr *up; 226 struct unrb *ub; 227 u_int x, y, z, w; 228 229 y = uh->first; 230 z = 0; 231 TAILQ_FOREACH(up, &uh->head, list) { 232 z++; 233 if (up->ptr != uh && up->ptr != NULL) { 234 ub = up->ptr; 235 KASSERT (up->len <= NBITS, 236 ("UNR inconsistency: len %u max %zd (line %d)\n", 237 up->len, NBITS, line)); 238 z++; 239 w = 0; 240 for (x = 0; x < up->len; x++) 241 if (bit_test(ub->map, x)) 242 w++; 243 y += w; 244 } else if (up->ptr != NULL) 245 y += up->len; 246 } 247 KASSERT (y == uh->busy, 248 ("UNR inconsistency: items %u found %u (line %d)\n", 249 uh->busy, y, line)); 250 KASSERT (z == uh->alloc, 251 ("UNR inconsistency: chunks %u found %u (line %d)\n", 252 uh->alloc, z, line)); 253 } 254 255 #else 256 257 static __inline void 258 check_unrhdr(struct unrhdr *uh __unused, int line __unused) 259 { 260 261 } 262 263 #endif 264 265 266 /* 267 * Userland memory management. Just use calloc and keep track of how 268 * many elements we have allocated for check_unrhdr(). 269 */ 270 271 static __inline void * 272 new_unr(struct unrhdr *uh, void **p1, void **p2) 273 { 274 void *p; 275 276 uh->alloc++; 277 KASSERT(*p1 != NULL || *p2 != NULL, ("Out of cached memory")); 278 if (*p1 != NULL) { 279 p = *p1; 280 *p1 = NULL; 281 return (p); 282 } else { 283 p = *p2; 284 *p2 = NULL; 285 return (p); 286 } 287 } 288 289 static __inline void 290 delete_unr(struct unrhdr *uh, void *ptr) 291 { 292 struct unr *up; 293 294 uh->alloc--; 295 up = ptr; 296 TAILQ_INSERT_TAIL(&uh->ppfree, up, list); 297 } 298 299 void 300 clean_unrhdrl(struct unrhdr *uh) 301 { 302 struct unr *up; 303 304 mtx_assert(uh->mtx, MA_OWNED); 305 while ((up = TAILQ_FIRST(&uh->ppfree)) != NULL) { 306 TAILQ_REMOVE(&uh->ppfree, up, list); 307 mtx_unlock(uh->mtx); 308 Free(up); 309 mtx_lock(uh->mtx); 310 } 311 312 } 313 314 void 315 clean_unrhdr(struct unrhdr *uh) 316 { 317 318 mtx_lock(uh->mtx); 319 clean_unrhdrl(uh); 320 mtx_unlock(uh->mtx); 321 } 322 323 void 324 init_unrhdr(struct unrhdr *uh, int low, int high, struct mtx *mutex) 325 { 326 327 KASSERT(low >= 0 && low <= high, 328 ("UNR: use error: new_unrhdr(%d, %d)", low, high)); 329 if (mutex != NULL) 330 uh->mtx = mutex; 331 else 332 uh->mtx = &unitmtx; 333 TAILQ_INIT(&uh->head); 334 TAILQ_INIT(&uh->ppfree); 335 uh->low = low; 336 uh->high = high; 337 uh->first = 0; 338 uh->last = 1 + (high - low); 339 check_unrhdr(uh, __LINE__); 340 } 341 342 /* 343 * Allocate a new unrheader set. 344 * 345 * Highest and lowest valid values given as parameters. 346 */ 347 348 struct unrhdr * 349 new_unrhdr(int low, int high, struct mtx *mutex) 350 { 351 struct unrhdr *uh; 352 353 uh = Malloc(sizeof *uh); 354 init_unrhdr(uh, low, high, mutex); 355 return (uh); 356 } 357 358 void 359 delete_unrhdr(struct unrhdr *uh) 360 { 361 362 check_unrhdr(uh, __LINE__); 363 KASSERT(uh->busy == 0, ("unrhdr has %u allocations", uh->busy)); 364 KASSERT(uh->alloc == 0, ("UNR memory leak in delete_unrhdr")); 365 KASSERT(TAILQ_FIRST(&uh->ppfree) == NULL, 366 ("unrhdr has postponed item for free")); 367 Free(uh); 368 } 369 370 static __inline int 371 is_bitmap(struct unrhdr *uh, struct unr *up) 372 { 373 return (up->ptr != uh && up->ptr != NULL); 374 } 375 376 /* 377 * Look for sequence of items which can be combined into a bitmap, if 378 * multiple are present, take the one which saves most memory. 379 * 380 * Return (1) if a sequence was found to indicate that another call 381 * might be able to do more. Return (0) if we found no suitable sequence. 382 * 383 * NB: called from alloc_unr(), no new memory allocation allowed. 384 */ 385 static int 386 optimize_unr(struct unrhdr *uh) 387 { 388 struct unr *up, *uf, *us; 389 struct unrb *ub, *ubf; 390 u_int a, l, ba; 391 392 /* 393 * Look for the run of items (if any) which when collapsed into 394 * a bitmap would save most memory. 395 */ 396 us = NULL; 397 ba = 0; 398 TAILQ_FOREACH(uf, &uh->head, list) { 399 if (uf->len >= NBITS) 400 continue; 401 a = 1; 402 if (is_bitmap(uh, uf)) 403 a++; 404 l = uf->len; 405 up = uf; 406 while (1) { 407 up = TAILQ_NEXT(up, list); 408 if (up == NULL) 409 break; 410 if ((up->len + l) > NBITS) 411 break; 412 a++; 413 if (is_bitmap(uh, up)) 414 a++; 415 l += up->len; 416 } 417 if (a > ba) { 418 ba = a; 419 us = uf; 420 } 421 } 422 if (ba < 3) 423 return (0); 424 425 /* 426 * If the first element is not a bitmap, make it one. 427 * Trying to do so without allocating more memory complicates things 428 * a bit 429 */ 430 if (!is_bitmap(uh, us)) { 431 uf = TAILQ_NEXT(us, list); 432 TAILQ_REMOVE(&uh->head, us, list); 433 a = us->len; 434 l = us->ptr == uh ? 1 : 0; 435 ub = (void *)us; 436 bit_nclear(ub->map, 0, NBITS - 1); 437 if (l) 438 bit_nset(ub->map, 0, a); 439 if (!is_bitmap(uh, uf)) { 440 if (uf->ptr == NULL) 441 bit_nclear(ub->map, a, a + uf->len - 1); 442 else 443 bit_nset(ub->map, a, a + uf->len - 1); 444 uf->ptr = ub; 445 uf->len += a; 446 us = uf; 447 } else { 448 ubf = uf->ptr; 449 for (l = 0; l < uf->len; l++, a++) { 450 if (bit_test(ubf->map, l)) 451 bit_set(ub->map, a); 452 else 453 bit_clear(ub->map, a); 454 } 455 uf->len = a; 456 delete_unr(uh, uf->ptr); 457 uf->ptr = ub; 458 us = uf; 459 } 460 } 461 ub = us->ptr; 462 while (1) { 463 uf = TAILQ_NEXT(us, list); 464 if (uf == NULL) 465 return (1); 466 if (uf->len + us->len > NBITS) 467 return (1); 468 if (uf->ptr == NULL) { 469 bit_nclear(ub->map, us->len, us->len + uf->len - 1); 470 us->len += uf->len; 471 TAILQ_REMOVE(&uh->head, uf, list); 472 delete_unr(uh, uf); 473 } else if (uf->ptr == uh) { 474 bit_nset(ub->map, us->len, us->len + uf->len - 1); 475 us->len += uf->len; 476 TAILQ_REMOVE(&uh->head, uf, list); 477 delete_unr(uh, uf); 478 } else { 479 ubf = uf->ptr; 480 for (l = 0; l < uf->len; l++, us->len++) { 481 if (bit_test(ubf->map, l)) 482 bit_set(ub->map, us->len); 483 else 484 bit_clear(ub->map, us->len); 485 } 486 TAILQ_REMOVE(&uh->head, uf, list); 487 delete_unr(uh, ubf); 488 delete_unr(uh, uf); 489 } 490 } 491 } 492 493 /* 494 * See if a given unr should be collapsed with a neighbor. 495 * 496 * NB: called from alloc_unr(), no new memory allocation allowed. 497 */ 498 static void 499 collapse_unr(struct unrhdr *uh, struct unr *up) 500 { 501 struct unr *upp; 502 struct unrb *ub; 503 504 /* If bitmap is all set or clear, change it to runlength */ 505 if (is_bitmap(uh, up)) { 506 ub = up->ptr; 507 if (ub_full(ub, up->len)) { 508 delete_unr(uh, up->ptr); 509 up->ptr = uh; 510 } else if (ub_empty(ub, up->len)) { 511 delete_unr(uh, up->ptr); 512 up->ptr = NULL; 513 } 514 } 515 516 /* If nothing left in runlength, delete it */ 517 if (up->len == 0) { 518 upp = TAILQ_PREV(up, unrhd, list); 519 if (upp == NULL) 520 upp = TAILQ_NEXT(up, list); 521 TAILQ_REMOVE(&uh->head, up, list); 522 delete_unr(uh, up); 523 up = upp; 524 } 525 526 /* If we have "hot-spot" still, merge with neighbor if possible */ 527 if (up != NULL) { 528 upp = TAILQ_PREV(up, unrhd, list); 529 if (upp != NULL && up->ptr == upp->ptr) { 530 up->len += upp->len; 531 TAILQ_REMOVE(&uh->head, upp, list); 532 delete_unr(uh, upp); 533 } 534 upp = TAILQ_NEXT(up, list); 535 if (upp != NULL && up->ptr == upp->ptr) { 536 up->len += upp->len; 537 TAILQ_REMOVE(&uh->head, upp, list); 538 delete_unr(uh, upp); 539 } 540 } 541 542 /* Merge into ->first if possible */ 543 upp = TAILQ_FIRST(&uh->head); 544 if (upp != NULL && upp->ptr == uh) { 545 uh->first += upp->len; 546 TAILQ_REMOVE(&uh->head, upp, list); 547 delete_unr(uh, upp); 548 if (up == upp) 549 up = NULL; 550 } 551 552 /* Merge into ->last if possible */ 553 upp = TAILQ_LAST(&uh->head, unrhd); 554 if (upp != NULL && upp->ptr == NULL) { 555 uh->last += upp->len; 556 TAILQ_REMOVE(&uh->head, upp, list); 557 delete_unr(uh, upp); 558 if (up == upp) 559 up = NULL; 560 } 561 562 /* Try to make bitmaps */ 563 while (optimize_unr(uh)) 564 continue; 565 } 566 567 /* 568 * Allocate a free unr. 569 */ 570 int 571 alloc_unrl(struct unrhdr *uh) 572 { 573 struct unr *up; 574 struct unrb *ub; 575 u_int x; 576 int y; 577 578 mtx_assert(uh->mtx, MA_OWNED); 579 check_unrhdr(uh, __LINE__); 580 x = uh->low + uh->first; 581 582 up = TAILQ_FIRST(&uh->head); 583 584 /* 585 * If we have an ideal split, just adjust the first+last 586 */ 587 if (up == NULL && uh->last > 0) { 588 uh->first++; 589 uh->last--; 590 uh->busy++; 591 return (x); 592 } 593 594 /* 595 * We can always allocate from the first list element, so if we have 596 * nothing on the list, we must have run out of unit numbers. 597 */ 598 if (up == NULL) 599 return (-1); 600 601 KASSERT(up->ptr != uh, ("UNR first element is allocated")); 602 603 if (up->ptr == NULL) { /* free run */ 604 uh->first++; 605 up->len--; 606 } else { /* bitmap */ 607 ub = up->ptr; 608 bit_ffc(ub->map, up->len, &y); 609 KASSERT(y != -1, ("UNR corruption: No clear bit in bitmap.")); 610 bit_set(ub->map, y); 611 x += y; 612 } 613 uh->busy++; 614 collapse_unr(uh, up); 615 return (x); 616 } 617 618 int 619 alloc_unr(struct unrhdr *uh) 620 { 621 int i; 622 623 mtx_lock(uh->mtx); 624 i = alloc_unrl(uh); 625 clean_unrhdrl(uh); 626 mtx_unlock(uh->mtx); 627 return (i); 628 } 629 630 static int 631 alloc_unr_specificl(struct unrhdr *uh, u_int item, void **p1, void **p2) 632 { 633 struct unr *up, *upn; 634 struct unrb *ub; 635 u_int i, last, tl; 636 637 mtx_assert(uh->mtx, MA_OWNED); 638 639 if (item < uh->low + uh->first || item > uh->high) 640 return (-1); 641 642 up = TAILQ_FIRST(&uh->head); 643 /* Ideal split. */ 644 if (up == NULL && item - uh->low == uh->first) { 645 uh->first++; 646 uh->last--; 647 uh->busy++; 648 check_unrhdr(uh, __LINE__); 649 return (item); 650 } 651 652 i = item - uh->low - uh->first; 653 654 if (up == NULL) { 655 up = new_unr(uh, p1, p2); 656 up->ptr = NULL; 657 up->len = i; 658 TAILQ_INSERT_TAIL(&uh->head, up, list); 659 up = new_unr(uh, p1, p2); 660 up->ptr = uh; 661 up->len = 1; 662 TAILQ_INSERT_TAIL(&uh->head, up, list); 663 uh->last = uh->high - uh->low - i; 664 uh->busy++; 665 check_unrhdr(uh, __LINE__); 666 return (item); 667 } else { 668 /* Find the item which contains the unit we want to allocate. */ 669 TAILQ_FOREACH(up, &uh->head, list) { 670 if (up->len > i) 671 break; 672 i -= up->len; 673 } 674 } 675 676 if (up == NULL) { 677 if (i > 0) { 678 up = new_unr(uh, p1, p2); 679 up->ptr = NULL; 680 up->len = i; 681 TAILQ_INSERT_TAIL(&uh->head, up, list); 682 } 683 up = new_unr(uh, p1, p2); 684 up->ptr = uh; 685 up->len = 1; 686 TAILQ_INSERT_TAIL(&uh->head, up, list); 687 goto done; 688 } 689 690 if (is_bitmap(uh, up)) { 691 ub = up->ptr; 692 if (bit_test(ub->map, i) == 0) { 693 bit_set(ub->map, i); 694 goto done; 695 } else 696 return (-1); 697 } else if (up->ptr == uh) 698 return (-1); 699 700 KASSERT(up->ptr == NULL, 701 ("alloc_unr_specificl: up->ptr != NULL (up=%p)", up)); 702 703 /* Split off the tail end, if any. */ 704 tl = up->len - (1 + i); 705 if (tl > 0) { 706 upn = new_unr(uh, p1, p2); 707 upn->ptr = NULL; 708 upn->len = tl; 709 TAILQ_INSERT_AFTER(&uh->head, up, upn, list); 710 } 711 712 /* Split off head end, if any */ 713 if (i > 0) { 714 upn = new_unr(uh, p1, p2); 715 upn->len = i; 716 upn->ptr = NULL; 717 TAILQ_INSERT_BEFORE(up, upn, list); 718 } 719 up->len = 1; 720 up->ptr = uh; 721 722 done: 723 last = uh->high - uh->low - (item - uh->low); 724 if (uh->last > last) 725 uh->last = last; 726 uh->busy++; 727 collapse_unr(uh, up); 728 check_unrhdr(uh, __LINE__); 729 return (item); 730 } 731 732 int 733 alloc_unr_specific(struct unrhdr *uh, u_int item) 734 { 735 void *p1, *p2; 736 int i; 737 738 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "alloc_unr_specific"); 739 740 p1 = Malloc(sizeof(struct unr)); 741 p2 = Malloc(sizeof(struct unr)); 742 743 mtx_lock(uh->mtx); 744 i = alloc_unr_specificl(uh, item, &p1, &p2); 745 mtx_unlock(uh->mtx); 746 747 if (p1 != NULL) 748 Free(p1); 749 if (p2 != NULL) 750 Free(p2); 751 752 return (i); 753 } 754 755 /* 756 * Free a unr. 757 * 758 * If we can save unrs by using a bitmap, do so. 759 */ 760 static void 761 free_unrl(struct unrhdr *uh, u_int item, void **p1, void **p2) 762 { 763 struct unr *up, *upp, *upn; 764 struct unrb *ub; 765 u_int pl; 766 767 KASSERT(item >= uh->low && item <= uh->high, 768 ("UNR: free_unr(%u) out of range [%u...%u]", 769 item, uh->low, uh->high)); 770 check_unrhdr(uh, __LINE__); 771 item -= uh->low; 772 upp = TAILQ_FIRST(&uh->head); 773 /* 774 * Freeing in the ideal split case 775 */ 776 if (item + 1 == uh->first && upp == NULL) { 777 uh->last++; 778 uh->first--; 779 uh->busy--; 780 check_unrhdr(uh, __LINE__); 781 return; 782 } 783 /* 784 * Freeing in the ->first section. Create a run starting at the 785 * freed item. The code below will subdivide it. 786 */ 787 if (item < uh->first) { 788 up = new_unr(uh, p1, p2); 789 up->ptr = uh; 790 up->len = uh->first - item; 791 TAILQ_INSERT_HEAD(&uh->head, up, list); 792 uh->first -= up->len; 793 } 794 795 item -= uh->first; 796 797 /* Find the item which contains the unit we want to free */ 798 TAILQ_FOREACH(up, &uh->head, list) { 799 if (up->len > item) 800 break; 801 item -= up->len; 802 } 803 804 /* Handle bitmap items */ 805 if (is_bitmap(uh, up)) { 806 ub = up->ptr; 807 808 KASSERT(bit_test(ub->map, item) != 0, 809 ("UNR: Freeing free item %d (bitmap)\n", item)); 810 bit_clear(ub->map, item); 811 uh->busy--; 812 collapse_unr(uh, up); 813 return; 814 } 815 816 KASSERT(up->ptr == uh, ("UNR Freeing free item %d (run))\n", item)); 817 818 /* Just this one left, reap it */ 819 if (up->len == 1) { 820 up->ptr = NULL; 821 uh->busy--; 822 collapse_unr(uh, up); 823 return; 824 } 825 826 /* Check if we can shift the item into the previous 'free' run */ 827 upp = TAILQ_PREV(up, unrhd, list); 828 if (item == 0 && upp != NULL && upp->ptr == NULL) { 829 upp->len++; 830 up->len--; 831 uh->busy--; 832 collapse_unr(uh, up); 833 return; 834 } 835 836 /* Check if we can shift the item to the next 'free' run */ 837 upn = TAILQ_NEXT(up, list); 838 if (item == up->len - 1 && upn != NULL && upn->ptr == NULL) { 839 upn->len++; 840 up->len--; 841 uh->busy--; 842 collapse_unr(uh, up); 843 return; 844 } 845 846 /* Split off the tail end, if any. */ 847 pl = up->len - (1 + item); 848 if (pl > 0) { 849 upp = new_unr(uh, p1, p2); 850 upp->ptr = uh; 851 upp->len = pl; 852 TAILQ_INSERT_AFTER(&uh->head, up, upp, list); 853 } 854 855 /* Split off head end, if any */ 856 if (item > 0) { 857 upp = new_unr(uh, p1, p2); 858 upp->len = item; 859 upp->ptr = uh; 860 TAILQ_INSERT_BEFORE(up, upp, list); 861 } 862 up->len = 1; 863 up->ptr = NULL; 864 uh->busy--; 865 collapse_unr(uh, up); 866 } 867 868 void 869 free_unr(struct unrhdr *uh, u_int item) 870 { 871 void *p1, *p2; 872 873 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL, "free_unr"); 874 p1 = Malloc(sizeof(struct unr)); 875 p2 = Malloc(sizeof(struct unr)); 876 mtx_lock(uh->mtx); 877 free_unrl(uh, item, &p1, &p2); 878 clean_unrhdrl(uh); 879 mtx_unlock(uh->mtx); 880 if (p1 != NULL) 881 Free(p1); 882 if (p2 != NULL) 883 Free(p2); 884 } 885 886 #ifndef _KERNEL /* USERLAND test driver */ 887 888 /* 889 * Simple stochastic test driver for the above functions. The code resides 890 * here so that it can access static functions and structures. 891 */ 892 893 static bool verbose; 894 #define VPRINTF(...) {if (verbose) printf(__VA_ARGS__);} 895 896 static void 897 print_unr(struct unrhdr *uh, struct unr *up) 898 { 899 u_int x; 900 struct unrb *ub; 901 902 printf(" %p len = %5u ", up, up->len); 903 if (up->ptr == NULL) 904 printf("free\n"); 905 else if (up->ptr == uh) 906 printf("alloc\n"); 907 else { 908 ub = up->ptr; 909 printf("bitmap ["); 910 for (x = 0; x < up->len; x++) { 911 if (bit_test(ub->map, x)) 912 printf("#"); 913 else 914 printf(" "); 915 } 916 printf("]\n"); 917 } 918 } 919 920 static void 921 print_unrhdr(struct unrhdr *uh) 922 { 923 struct unr *up; 924 u_int x; 925 926 printf( 927 "%p low = %u high = %u first = %u last = %u busy %u chunks = %u\n", 928 uh, uh->low, uh->high, uh->first, uh->last, uh->busy, uh->alloc); 929 x = uh->low + uh->first; 930 TAILQ_FOREACH(up, &uh->head, list) { 931 printf(" from = %5u", x); 932 print_unr(uh, up); 933 if (up->ptr == NULL || up->ptr == uh) 934 x += up->len; 935 else 936 x += NBITS; 937 } 938 } 939 940 static void 941 test_alloc_unr(struct unrhdr *uh, u_int i, char a[]) 942 { 943 int j; 944 945 if (a[i]) { 946 VPRINTF("F %u\n", i); 947 free_unr(uh, i); 948 a[i] = 0; 949 } else { 950 no_alloc = 1; 951 j = alloc_unr(uh); 952 if (j != -1) { 953 a[j] = 1; 954 VPRINTF("A %d\n", j); 955 } 956 no_alloc = 0; 957 } 958 } 959 960 static void 961 test_alloc_unr_specific(struct unrhdr *uh, u_int i, char a[]) 962 { 963 int j; 964 965 j = alloc_unr_specific(uh, i); 966 if (j == -1) { 967 VPRINTF("F %u\n", i); 968 a[i] = 0; 969 free_unr(uh, i); 970 } else { 971 a[i] = 1; 972 VPRINTF("A %d\n", j); 973 } 974 } 975 976 static void 977 usage(char** argv) 978 { 979 printf("%s [-h] [-r REPETITIONS] [-v]\n", argv[0]); 980 } 981 982 int 983 main(int argc, char **argv) 984 { 985 struct unrhdr *uh; 986 char *a; 987 long count = 10000; /* Number of unrs to test */ 988 long reps = 1; 989 int ch; 990 u_int i, x, m, j; 991 992 verbose = false; 993 994 while ((ch = getopt(argc, argv, "hr:v")) != -1) { 995 switch (ch) { 996 case 'r': 997 errno = 0; 998 reps = strtol(optarg, NULL, 0); 999 if (errno == ERANGE || errno == EINVAL) { 1000 usage(argv); 1001 exit(2); 1002 } 1003 1004 break; 1005 case 'v': 1006 verbose = true; 1007 break; 1008 case 'h': 1009 default: 1010 usage(argv); 1011 exit(2); 1012 } 1013 1014 1015 } 1016 1017 setbuf(stdout, NULL); 1018 uh = new_unrhdr(0, count - 1, NULL); 1019 print_unrhdr(uh); 1020 1021 a = calloc(count, sizeof(char)); 1022 if (a == NULL) 1023 err(1, "calloc failed"); 1024 srandomdev(); 1025 1026 printf("sizeof(struct unr) %zu\n", sizeof(struct unr)); 1027 printf("sizeof(struct unrb) %zu\n", sizeof(struct unrb)); 1028 printf("sizeof(struct unrhdr) %zu\n", sizeof(struct unrhdr)); 1029 printf("NBITS %lu\n", (unsigned long)NBITS); 1030 x = 1; 1031 for (m = 0; m < count * reps; m++) { 1032 j = random(); 1033 i = (j >> 1) % count; 1034 #if 0 1035 if (a[i] && (j & 1)) 1036 continue; 1037 #endif 1038 if ((random() & 1) != 0) 1039 test_alloc_unr(uh, i, a); 1040 else 1041 test_alloc_unr_specific(uh, i, a); 1042 1043 if (verbose) 1044 print_unrhdr(uh); 1045 check_unrhdr(uh, __LINE__); 1046 } 1047 for (i = 0; i < count; i++) { 1048 if (a[i]) { 1049 if (verbose) { 1050 printf("C %u\n", i); 1051 print_unrhdr(uh); 1052 } 1053 free_unr(uh, i); 1054 } 1055 } 1056 print_unrhdr(uh); 1057 delete_unrhdr(uh); 1058 free(a); 1059 return (0); 1060 } 1061 #endif 1062