1 2 /* 3 * BLIST.C - Bitmap allocator/deallocator, using a radix tree with hinting 4 * 5 * (c)Copyright 1998, Matthew Dillon. Terms for use and redistribution 6 * are covered by the BSD Copyright as found in /usr/src/COPYRIGHT. 7 * 8 * This module implements a general bitmap allocator/deallocator. The 9 * allocator eats around 2 bits per 'block'. The module does not 10 * try to interpret the meaning of a 'block' other then to return 11 * SWAPBLK_NONE on an allocation failure. 12 * 13 * A radix tree is used to maintain the bitmap. Two radix constants are 14 * involved: One for the bitmaps contained in the leaf nodes (typically 15 * 32), and one for the meta nodes (typically 16). Both meta and leaf 16 * nodes have a hint field. This field gives us a hint as to the largest 17 * free contiguous range of blocks under the node. It may contain a 18 * value that is too high, but will never contain a value that is too 19 * low. When the radix tree is searched, allocation failures in subtrees 20 * update the hint. 21 * 22 * The radix tree also implements two collapsed states for meta nodes: 23 * the ALL-ALLOCATED state and the ALL-FREE state. If a meta node is 24 * in either of these two states, all information contained underneath 25 * the node is considered stale. These states are used to optimize 26 * allocation and freeing operations. 27 * 28 * The hinting greatly increases code efficiency for allocations while 29 * the general radix structure optimizes both allocations and frees. The 30 * radix tree should be able to operate well no matter how much 31 * fragmentation there is and no matter how large a bitmap is used. 32 * 33 * Unlike the rlist code, the blist code wires all necessary memory at 34 * creation time. Neither allocations nor frees require interaction with 35 * the memory subsystem. In contrast, the rlist code may allocate memory 36 * on an rlist_free() call. The non-blocking features of the blist code 37 * are used to great advantage in the swap code (vm/nswap_pager.c). The 38 * rlist code uses a little less overall memory then the blist code (but 39 * due to swap interleaving not all that much less), but the blist code 40 * scales much, much better. 41 * 42 * LAYOUT: The radix tree is layed out recursively using a 43 * linear array. Each meta node is immediately followed (layed out 44 * sequentially in memory) by BLIST_META_RADIX lower level nodes. This 45 * is a recursive structure but one that can be easily scanned through 46 * a very simple 'skip' calculation. In order to support large radixes, 47 * portions of the tree may reside outside our memory allocation. We 48 * handle this with an early-termination optimization (when bighint is 49 * set to -1) on the scan. The memory allocation is only large enough 50 * to cover the number of blocks requested at creation time even if it 51 * must be encompassed in larger root-node radix. 52 * 53 * NOTE: the allocator cannot currently allocate more then 54 * BLIST_BMAP_RADIX blocks per call. It will panic with 'allocation too 55 * large' if you try. This is an area that could use improvement. The 56 * radix is large enough that this restriction does not effect the swap 57 * system, though. Currently only the allocation code is effected by 58 * this algorithmic unfeature. The freeing code can handle arbitrary 59 * ranges. 60 * 61 * This code can be compiled stand-alone for debugging. 62 * 63 * $FreeBSD$ 64 */ 65 66 #ifdef KERNEL 67 68 #include <sys/param.h> 69 #include <sys/systm.h> 70 #include <sys/lock.h> 71 #include <sys/kernel.h> 72 #include <sys/blist.h> 73 #include <sys/malloc.h> 74 #include <vm/vm.h> 75 #include <vm/vm_object.h> 76 #include <vm/vm_kern.h> 77 #include <vm/vm_extern.h> 78 #include <vm/vm_page.h> 79 80 #else 81 82 #ifndef BLIST_NO_DEBUG 83 #define BLIST_DEBUG 84 #endif 85 86 #define SWAPBLK_NONE ((daddr_t)-1) 87 88 #include <sys/types.h> 89 #include <stdio.h> 90 #include <string.h> 91 #include <stdlib.h> 92 #include <stdarg.h> 93 94 #define malloc(a,b,c) malloc(a) 95 #define free(a,b) free(a) 96 97 typedef unsigned int u_daddr_t; 98 99 #include <sys/blist.h> 100 101 void panic(const char *ctl, ...); 102 103 #endif 104 105 /* 106 * static support functions 107 */ 108 109 static daddr_t blst_leaf_alloc(blmeta_t *scan, daddr_t blk, int count); 110 static daddr_t blst_meta_alloc(blmeta_t *scan, daddr_t blk, 111 daddr_t count, daddr_t radix, int skip); 112 static void blst_leaf_free(blmeta_t *scan, daddr_t relblk, int count); 113 static void blst_meta_free(blmeta_t *scan, daddr_t freeBlk, daddr_t count, 114 daddr_t radix, int skip, daddr_t blk); 115 static void blst_copy(blmeta_t *scan, daddr_t blk, daddr_t radix, 116 daddr_t skip, blist_t dest, daddr_t count); 117 static daddr_t blst_radix_init(blmeta_t *scan, daddr_t radix, 118 int skip, daddr_t count); 119 #ifndef KERNEL 120 static void blst_radix_print(blmeta_t *scan, daddr_t blk, 121 daddr_t radix, int skip, int tab); 122 #endif 123 124 #ifdef KERNEL 125 static MALLOC_DEFINE(M_SWAP, "SWAP", "Swap space"); 126 #endif 127 128 /* 129 * blist_create() - create a blist capable of handling up to the specified 130 * number of blocks 131 * 132 * blocks must be greater then 0 133 * 134 * The smallest blist consists of a single leaf node capable of 135 * managing BLIST_BMAP_RADIX blocks. 136 */ 137 138 blist_t 139 blist_create(daddr_t blocks) 140 { 141 blist_t bl; 142 int radix; 143 int skip = 0; 144 145 /* 146 * Calculate radix and skip field used for scanning. 147 */ 148 radix = BLIST_BMAP_RADIX; 149 150 while (radix < blocks) { 151 radix <<= BLIST_META_RADIX_SHIFT; 152 skip = (skip + 1) << BLIST_META_RADIX_SHIFT; 153 } 154 155 bl = malloc(sizeof(struct blist), M_SWAP, M_WAITOK); 156 157 bzero(bl, sizeof(*bl)); 158 159 bl->bl_blocks = blocks; 160 bl->bl_radix = radix; 161 bl->bl_skip = skip; 162 bl->bl_rootblks = 1 + 163 blst_radix_init(NULL, bl->bl_radix, bl->bl_skip, blocks); 164 bl->bl_root = malloc(sizeof(blmeta_t) * bl->bl_rootblks, M_SWAP, M_WAITOK); 165 166 #if defined(BLIST_DEBUG) 167 printf( 168 "BLIST representing %d blocks (%d MB of swap)" 169 ", requiring %dK of ram\n", 170 bl->bl_blocks, 171 bl->bl_blocks * 4 / 1024, 172 (bl->bl_rootblks * sizeof(blmeta_t) + 1023) / 1024 173 ); 174 printf("BLIST raw radix tree contains %d records\n", bl->bl_rootblks); 175 #endif 176 blst_radix_init(bl->bl_root, bl->bl_radix, bl->bl_skip, blocks); 177 178 return(bl); 179 } 180 181 void 182 blist_destroy(blist_t bl) 183 { 184 free(bl->bl_root, M_SWAP); 185 free(bl, M_SWAP); 186 } 187 188 /* 189 * blist_alloc() - reserve space in the block bitmap. Return the base 190 * of a contiguous region or SWAPBLK_NONE if space could 191 * not be allocated. 192 */ 193 194 daddr_t 195 blist_alloc(blist_t bl, daddr_t count) 196 { 197 daddr_t blk = SWAPBLK_NONE; 198 199 if (bl) { 200 if (bl->bl_radix == BLIST_BMAP_RADIX) 201 blk = blst_leaf_alloc(bl->bl_root, 0, count); 202 else 203 blk = blst_meta_alloc(bl->bl_root, 0, count, bl->bl_radix, bl->bl_skip); 204 if (blk != SWAPBLK_NONE) 205 bl->bl_free -= count; 206 } 207 return(blk); 208 } 209 210 /* 211 * blist_free() - free up space in the block bitmap. Return the base 212 * of a contiguous region. Panic if an inconsistancy is 213 * found. 214 */ 215 216 void 217 blist_free(blist_t bl, daddr_t blkno, daddr_t count) 218 { 219 if (bl) { 220 if (bl->bl_radix == BLIST_BMAP_RADIX) 221 blst_leaf_free(bl->bl_root, blkno, count); 222 else 223 blst_meta_free(bl->bl_root, blkno, count, bl->bl_radix, bl->bl_skip, 0); 224 bl->bl_free += count; 225 } 226 } 227 228 /* 229 * blist_resize() - resize an existing radix tree to handle the 230 * specified number of blocks. This will reallocate 231 * the tree and transfer the previous bitmap to the new 232 * one. When extending the tree you can specify whether 233 * the new blocks are to left allocated or freed. 234 */ 235 236 void 237 blist_resize(blist_t *pbl, daddr_t count, int freenew) 238 { 239 blist_t newbl = blist_create(count); 240 blist_t save = *pbl; 241 242 *pbl = newbl; 243 if (count > save->bl_blocks) 244 count = save->bl_blocks; 245 blst_copy(save->bl_root, 0, save->bl_radix, save->bl_skip, newbl, count); 246 247 /* 248 * If resizing upwards, should we free the new space or not? 249 */ 250 if (freenew && count < newbl->bl_blocks) { 251 blist_free(newbl, count, newbl->bl_blocks - count); 252 } 253 blist_destroy(save); 254 } 255 256 #ifdef BLIST_DEBUG 257 258 /* 259 * blist_print() - dump radix tree 260 */ 261 262 void 263 blist_print(blist_t bl) 264 { 265 printf("BLIST {\n"); 266 blst_radix_print(bl->bl_root, 0, bl->bl_radix, bl->bl_skip, 4); 267 printf("}\n"); 268 } 269 270 #endif 271 272 /************************************************************************ 273 * ALLOCATION SUPPORT FUNCTIONS * 274 ************************************************************************ 275 * 276 * These support functions do all the actual work. They may seem 277 * rather longish, but that's because I've commented them up. The 278 * actual code is straight forward. 279 * 280 */ 281 282 /* 283 * blist_leaf_alloc() - allocate at a leaf in the radix tree (a bitmap). 284 * 285 * This is the core of the allocator and is optimized for the 1 block 286 * and the BLIST_BMAP_RADIX block allocation cases. Other cases are 287 * somewhat slower. The 1 block allocation case is log2 and extremely 288 * quick. 289 */ 290 291 static daddr_t 292 blst_leaf_alloc( 293 blmeta_t *scan, 294 daddr_t blk, 295 int count 296 ) { 297 u_daddr_t orig = scan->u.bmu_bitmap; 298 299 if (orig == 0) { 300 /* 301 * Optimize bitmap all-allocated case. Also, count = 1 302 * case assumes at least 1 bit is free in the bitmap, so 303 * we have to take care of this case here. 304 */ 305 scan->bm_bighint = 0; 306 return(SWAPBLK_NONE); 307 } 308 if (count == 1) { 309 /* 310 * Optimized code to allocate one bit out of the bitmap 311 */ 312 u_daddr_t mask; 313 int j = BLIST_BMAP_RADIX/2; 314 int r = 0; 315 316 mask = (u_daddr_t)-1 >> (BLIST_BMAP_RADIX/2); 317 318 while (j) { 319 if ((orig & mask) == 0) { 320 r += j; 321 orig >>= j; 322 } 323 j >>= 1; 324 mask >>= j; 325 } 326 scan->u.bmu_bitmap &= ~(1 << r); 327 return(blk + r); 328 } 329 if (count <= BLIST_BMAP_RADIX) { 330 /* 331 * non-optimized code to allocate N bits out of the bitmap. 332 * The more bits, the faster the code runs. It will run 333 * the slowest allocating 2 bits, but since there aren't any 334 * memory ops in the core loop (or shouldn't be, anyway), 335 * you probably won't notice the difference. 336 */ 337 int j; 338 int n = BLIST_BMAP_RADIX - count; 339 u_daddr_t mask; 340 341 mask = (u_daddr_t)-1 >> n; 342 343 for (j = 0; j <= n; ++j) { 344 if ((orig & mask) == mask) { 345 scan->u.bmu_bitmap &= ~mask; 346 return(blk + j); 347 } 348 mask = (mask << 1); 349 } 350 } 351 /* 352 * We couldn't allocate count in this subtree, update bighint. 353 */ 354 scan->bm_bighint = count - 1; 355 return(SWAPBLK_NONE); 356 } 357 358 /* 359 * blist_meta_alloc() - allocate at a meta in the radix tree. 360 * 361 * Attempt to allocate at a meta node. If we can't, we update 362 * bighint and return a failure. Updating bighint optimize future 363 * calls that hit this node. We have to check for our collapse cases 364 * and we have a few optimizations strewn in as well. 365 */ 366 367 static daddr_t 368 blst_meta_alloc( 369 blmeta_t *scan, 370 daddr_t blk, 371 daddr_t count, 372 daddr_t radix, 373 int skip 374 ) { 375 int i; 376 int next_skip = (skip >> BLIST_META_RADIX_SHIFT); 377 378 if (scan->u.bmu_avail == 0) { 379 /* 380 * ALL-ALLOCATED special case 381 */ 382 scan->bm_bighint = count; 383 return(SWAPBLK_NONE); 384 } 385 386 if (scan->u.bmu_avail == radix) { 387 radix >>= BLIST_META_RADIX_SHIFT; 388 389 /* 390 * ALL-FREE special case, initialize uninitialize 391 * sublevel. 392 */ 393 for (i = 1; i <= skip; i += next_skip) { 394 if (scan[i].bm_bighint == (daddr_t)-1) 395 break; 396 if (next_skip == 1) { 397 scan[i].u.bmu_bitmap = (u_daddr_t)-1; 398 scan[i].bm_bighint = BLIST_BMAP_RADIX; 399 } else { 400 scan[i].bm_bighint = radix; 401 scan[i].u.bmu_avail = radix; 402 } 403 } 404 } else { 405 radix >>= BLIST_META_RADIX_SHIFT; 406 } 407 408 for (i = 1; i <= skip; i += next_skip) { 409 if (count <= scan[i].bm_bighint) { 410 /* 411 * count fits in object 412 */ 413 daddr_t r; 414 if (next_skip == 1) { 415 r = blst_leaf_alloc(&scan[i], blk, count); 416 } else { 417 r = blst_meta_alloc(&scan[i], blk, count, radix, next_skip - 1); 418 } 419 if (r != SWAPBLK_NONE) { 420 scan->u.bmu_avail -= count; 421 if (scan->bm_bighint > scan->u.bmu_avail) 422 scan->bm_bighint = scan->u.bmu_avail; 423 return(r); 424 } 425 } else if (scan[i].bm_bighint == (daddr_t)-1) { 426 /* 427 * Terminator 428 */ 429 break; 430 } else if (count > radix) { 431 /* 432 * count does not fit in object even if it were 433 * complete free. 434 */ 435 panic("blist_meta_alloc: allocation too large"); 436 } 437 blk += radix; 438 } 439 440 /* 441 * We couldn't allocate count in this subtree, update bighint. 442 */ 443 if (scan->bm_bighint >= count) 444 scan->bm_bighint = count - 1; 445 return(SWAPBLK_NONE); 446 } 447 448 /* 449 * BLST_LEAF_FREE() - free allocated block from leaf bitmap 450 * 451 */ 452 453 static void 454 blst_leaf_free( 455 blmeta_t *scan, 456 daddr_t blk, 457 int count 458 ) { 459 /* 460 * free some data in this bitmap 461 * 462 * e.g. 463 * 0000111111111110000 464 * \_________/\__/ 465 * v n 466 */ 467 int n = blk & (BLIST_BMAP_RADIX - 1); 468 u_daddr_t mask; 469 470 mask = ((u_daddr_t)-1 << n) & 471 ((u_daddr_t)-1 >> (BLIST_BMAP_RADIX - count - n)); 472 473 if (scan->u.bmu_bitmap & mask) 474 panic("blst_radix_free: freeing free block"); 475 scan->u.bmu_bitmap |= mask; 476 477 /* 478 * We could probably do a better job here. We are required to make 479 * bighint at least as large as the biggest contiguous block of 480 * data. If we just shoehorn it, a little extra overhead will 481 * be incured on the next allocation (but only that one typically). 482 */ 483 scan->bm_bighint = BLIST_BMAP_RADIX; 484 } 485 486 /* 487 * BLST_META_FREE() - free allocated blocks from radix tree meta info 488 * 489 * This support routine frees a range of blocks from the bitmap. 490 * The range must be entirely enclosed by this radix node. If a 491 * meta node, we break the range down recursively to free blocks 492 * in subnodes (which means that this code can free an arbitrary 493 * range whereas the allocation code cannot allocate an arbitrary 494 * range). 495 */ 496 497 static void 498 blst_meta_free( 499 blmeta_t *scan, 500 daddr_t freeBlk, 501 daddr_t count, 502 daddr_t radix, 503 int skip, 504 daddr_t blk 505 ) { 506 int i; 507 int next_skip = (skip >> BLIST_META_RADIX_SHIFT); 508 509 #if 0 510 printf("FREE (%x,%d) FROM (%x,%d)\n", 511 freeBlk, count, 512 blk, radix 513 ); 514 #endif 515 516 if (scan->u.bmu_avail == 0) { 517 /* 518 * ALL-ALLOCATED special case, with possible 519 * shortcut to ALL-FREE special case. 520 */ 521 scan->u.bmu_avail = count; 522 scan->bm_bighint = count; 523 524 if (count != radix) { 525 for (i = 1; i <= skip; i += next_skip) { 526 if (scan[i].bm_bighint == (daddr_t)-1) 527 break; 528 scan[i].bm_bighint = 0; 529 if (next_skip == 1) { 530 scan[i].u.bmu_bitmap = 0; 531 } else { 532 scan[i].u.bmu_avail = 0; 533 } 534 } 535 /* fall through */ 536 } 537 } else { 538 scan->u.bmu_avail += count; 539 /* scan->bm_bighint = radix; */ 540 } 541 542 /* 543 * ALL-FREE special case. 544 */ 545 546 if (scan->u.bmu_avail == radix) 547 return; 548 #if !defined(MAX_PERF) 549 if (scan->u.bmu_avail > radix) 550 panic("blst_meta_free: freeing already free blocks (%d) %d/%d", count, scan->u.bmu_avail, radix); 551 #endif 552 553 /* 554 * Break the free down into its components 555 */ 556 557 radix >>= BLIST_META_RADIX_SHIFT; 558 559 i = (freeBlk - blk) / radix; 560 blk += i * radix; 561 i = i * next_skip + 1; 562 563 while (i <= skip && blk < freeBlk + count) { 564 daddr_t v; 565 566 v = blk + radix - freeBlk; 567 if (v > count) 568 v = count; 569 570 if (scan->bm_bighint == (daddr_t)-1) 571 panic("blst_meta_free: freeing unexpected range"); 572 573 if (next_skip == 1) { 574 blst_leaf_free(&scan[i], freeBlk, v); 575 } else { 576 blst_meta_free(&scan[i], freeBlk, v, radix, next_skip - 1, blk); 577 } 578 if (scan->bm_bighint < scan[i].bm_bighint) 579 scan->bm_bighint = scan[i].bm_bighint; 580 count -= v; 581 freeBlk += v; 582 blk += radix; 583 i += next_skip; 584 } 585 } 586 587 /* 588 * BLIST_RADIX_COPY() - copy one radix tree to another 589 * 590 * Locates free space in the source tree and frees it in the destination 591 * tree. The space may not already be free in the destination. 592 */ 593 594 static void blst_copy( 595 blmeta_t *scan, 596 daddr_t blk, 597 daddr_t radix, 598 daddr_t skip, 599 blist_t dest, 600 daddr_t count 601 ) { 602 int next_skip; 603 int i; 604 605 /* 606 * Leaf node 607 */ 608 609 if (radix == BLIST_BMAP_RADIX) { 610 u_daddr_t v = scan->u.bmu_bitmap; 611 612 if (v == (u_daddr_t)-1) { 613 blist_free(dest, blk, count); 614 } else if (v != 0) { 615 int i; 616 617 for (i = 0; i < BLIST_BMAP_RADIX && i < count; ++i) { 618 if (v & (1 << i)) 619 blist_free(dest, blk + i, 1); 620 } 621 } 622 return; 623 } 624 625 /* 626 * Meta node 627 */ 628 629 if (scan->u.bmu_avail == 0) { 630 /* 631 * Source all allocated, leave dest allocated 632 */ 633 return; 634 } 635 if (scan->u.bmu_avail == radix) { 636 /* 637 * Source all free, free entire dest 638 */ 639 if (count < radix) 640 blist_free(dest, blk, count); 641 else 642 blist_free(dest, blk, radix); 643 return; 644 } 645 646 647 radix >>= BLIST_META_RADIX_SHIFT; 648 next_skip = (skip >> BLIST_META_RADIX_SHIFT); 649 650 for (i = 1; count && i <= skip; i += next_skip) { 651 if (scan[i].bm_bighint == (daddr_t)-1) 652 break; 653 654 if (count >= radix) { 655 blst_copy( 656 &scan[i], 657 blk, 658 radix, 659 next_skip - 1, 660 dest, 661 radix 662 ); 663 count -= radix; 664 } else { 665 if (count) { 666 blst_copy( 667 &scan[i], 668 blk, 669 radix, 670 next_skip - 1, 671 dest, 672 count 673 ); 674 } 675 count = 0; 676 } 677 blk += radix; 678 } 679 } 680 681 /* 682 * BLST_RADIX_INIT() - initialize radix tree 683 * 684 * Initialize our meta structures and bitmaps and calculate the exact 685 * amount of space required to manage 'count' blocks - this space may 686 * be considerably less then the calculated radix due to the large 687 * RADIX values we use. 688 */ 689 690 static daddr_t 691 blst_radix_init(blmeta_t *scan, daddr_t radix, int skip, daddr_t count) 692 { 693 int i; 694 int next_skip; 695 daddr_t memindex = 0; 696 697 /* 698 * Leaf node 699 */ 700 701 if (radix == BLIST_BMAP_RADIX) { 702 if (scan) { 703 scan->bm_bighint = 0; 704 scan->u.bmu_bitmap = 0; 705 } 706 return(memindex); 707 } 708 709 /* 710 * Meta node. If allocating the entire object we can special 711 * case it. However, we need to figure out how much memory 712 * is required to manage 'count' blocks, so we continue on anyway. 713 */ 714 715 if (scan) { 716 scan->bm_bighint = 0; 717 scan->u.bmu_avail = 0; 718 } 719 720 radix >>= BLIST_META_RADIX_SHIFT; 721 next_skip = (skip >> BLIST_META_RADIX_SHIFT); 722 723 for (i = 1; i <= skip; i += next_skip) { 724 if (count >= radix) { 725 /* 726 * Allocate the entire object 727 */ 728 memindex = i + blst_radix_init( 729 ((scan) ? &scan[i] : NULL), 730 radix, 731 next_skip - 1, 732 radix 733 ); 734 count -= radix; 735 } else if (count > 0) { 736 /* 737 * Allocate a partial object 738 */ 739 memindex = i + blst_radix_init( 740 ((scan) ? &scan[i] : NULL), 741 radix, 742 next_skip - 1, 743 count 744 ); 745 count = 0; 746 } else { 747 /* 748 * Add terminator and break out 749 */ 750 if (scan) 751 scan[i].bm_bighint = (daddr_t)-1; 752 break; 753 } 754 } 755 if (memindex < i) 756 memindex = i; 757 return(memindex); 758 } 759 760 #ifdef BLIST_DEBUG 761 762 static void 763 blst_radix_print(blmeta_t *scan, daddr_t blk, daddr_t radix, int skip, int tab) 764 { 765 int i; 766 int next_skip; 767 int lastState = 0; 768 769 if (radix == BLIST_BMAP_RADIX) { 770 printf( 771 "%*.*s(%04x,%d): bitmap %08x big=%d\n", 772 tab, tab, "", 773 blk, radix, 774 scan->u.bmu_bitmap, 775 scan->bm_bighint 776 ); 777 return; 778 } 779 780 if (scan->u.bmu_avail == 0) { 781 printf( 782 "%*.*s(%04x,%d) ALL ALLOCATED\n", 783 tab, tab, "", 784 blk, 785 radix 786 ); 787 return; 788 } 789 if (scan->u.bmu_avail == radix) { 790 printf( 791 "%*.*s(%04x,%d) ALL FREE\n", 792 tab, tab, "", 793 blk, 794 radix 795 ); 796 return; 797 } 798 799 printf( 800 "%*.*s(%04x,%d): subtree (%d/%d) big=%d {\n", 801 tab, tab, "", 802 blk, radix, 803 scan->u.bmu_avail, 804 radix, 805 scan->bm_bighint 806 ); 807 808 radix >>= BLIST_META_RADIX_SHIFT; 809 next_skip = (skip >> BLIST_META_RADIX_SHIFT); 810 tab += 4; 811 812 for (i = 1; i <= skip; i += next_skip) { 813 if (scan[i].bm_bighint == (daddr_t)-1) { 814 printf( 815 "%*.*s(%04x,%d): Terminator\n", 816 tab, tab, "", 817 blk, radix 818 ); 819 lastState = 0; 820 break; 821 } 822 blst_radix_print( 823 &scan[i], 824 blk, 825 radix, 826 next_skip - 1, 827 tab 828 ); 829 blk += radix; 830 } 831 tab -= 4; 832 833 printf( 834 "%*.*s}\n", 835 tab, tab, "" 836 ); 837 } 838 839 #endif 840 841 #ifdef BLIST_DEBUG 842 843 int 844 main(int ac, char **av) 845 { 846 int size = 1024; 847 int i; 848 blist_t bl; 849 850 for (i = 1; i < ac; ++i) { 851 const char *ptr = av[i]; 852 if (*ptr != '-') { 853 size = strtol(ptr, NULL, 0); 854 continue; 855 } 856 ptr += 2; 857 fprintf(stderr, "Bad option: %s\n", ptr - 2); 858 exit(1); 859 } 860 bl = blist_create(size); 861 blist_free(bl, 0, size); 862 863 for (;;) { 864 char buf[1024]; 865 daddr_t da = 0; 866 daddr_t count = 0; 867 868 869 printf("%d/%d/%d> ", bl->bl_free, size, bl->bl_radix); 870 fflush(stdout); 871 if (fgets(buf, sizeof(buf), stdin) == NULL) 872 break; 873 switch(buf[0]) { 874 case 'r': 875 if (sscanf(buf + 1, "%d", &count) == 1) { 876 blist_resize(&bl, count, 1); 877 } else { 878 printf("?\n"); 879 } 880 case 'p': 881 blist_print(bl); 882 break; 883 case 'a': 884 if (sscanf(buf + 1, "%d", &count) == 1) { 885 daddr_t blk = blist_alloc(bl, count); 886 printf(" R=%04x\n", blk); 887 } else { 888 printf("?\n"); 889 } 890 break; 891 case 'f': 892 if (sscanf(buf + 1, "%x %d", &da, &count) == 2) { 893 blist_free(bl, da, count); 894 } else { 895 printf("?\n"); 896 } 897 break; 898 case '?': 899 case 'h': 900 puts( 901 "p -print\n" 902 "a %d -allocate\n" 903 "f %x %d -free\n" 904 "r %d -resize\n" 905 "h/? -help" 906 ); 907 break; 908 default: 909 printf("?\n"); 910 break; 911 } 912 } 913 return(0); 914 } 915 916 void 917 panic(const char *ctl, ...) 918 { 919 va_list va; 920 921 va_start(va, ctl); 922 vfprintf(stderr, ctl, va); 923 fprintf(stderr, "\n"); 924 va_end(va); 925 exit(1); 926 } 927 928 #endif 929 930