1 2 /* 3 * BLIST.C - Bitmap allocator/deallocator, using a radix tree with hinting 4 * 5 * (c)Copyright 1998, Matthew Dillon. Terms for use and redistribution 6 * are covered by the BSD Copyright as found in /usr/src/COPYRIGHT. 7 * 8 * This module implements a general bitmap allocator/deallocator. The 9 * allocator eats around 2 bits per 'block'. The module does not 10 * try to interpret the meaning of a 'block' other then to return 11 * SWAPBLK_NONE on an allocation failure. 12 * 13 * A radix tree is used to maintain the bitmap. Two radix constants are 14 * involved: One for the bitmaps contained in the leaf nodes (typically 15 * 32), and one for the meta nodes (typically 16). Both meta and leaf 16 * nodes have a hint field. This field gives us a hint as to the largest 17 * free contiguous range of blocks under the node. It may contain a 18 * value that is too high, but will never contain a value that is too 19 * low. When the radix tree is searched, allocation failures in subtrees 20 * update the hint. 21 * 22 * The radix tree also implements two collapsed states for meta nodes: 23 * the ALL-ALLOCATED state and the ALL-FREE state. If a meta node is 24 * in either of these two states, all information contained underneath 25 * the node is considered stale. These states are used to optimize 26 * allocation and freeing operations. 27 * 28 * The hinting greatly increases code efficiency for allocations while 29 * the general radix structure optimizes both allocations and frees. The 30 * radix tree should be able to operate well no matter how much 31 * fragmentation there is and no matter how large a bitmap is used. 32 * 33 * Unlike the rlist code, the blist code wires all necessary memory at 34 * creation time. Neither allocations nor frees require interaction with 35 * the memory subsystem. In contrast, the rlist code may allocate memory 36 * on an rlist_free() call. The non-blocking features of the blist code 37 * are used to great advantage in the swap code (vm/nswap_pager.c). The 38 * rlist code uses a little less overall memory then the blist code (but 39 * due to swap interleaving not all that much less), but the blist code 40 * scales much, much better. 41 * 42 * LAYOUT: The radix tree is layed out recursively using a 43 * linear array. Each meta node is immediately followed (layed out 44 * sequentially in memory) by BLIST_META_RADIX lower level nodes. This 45 * is a recursive structure but one that can be easily scanned through 46 * a very simple 'skip' calculation. In order to support large radixes, 47 * portions of the tree may reside outside our memory allocation. We 48 * handle this with an early-termination optimization (when bighint is 49 * set to -1) on the scan. The memory allocation is only large enough 50 * to cover the number of blocks requested at creation time even if it 51 * must be encompassed in larger root-node radix. 52 * 53 * NOTE: the allocator cannot currently allocate more then 54 * BLIST_BMAP_RADIX blocks per call. It will panic with 'allocation too 55 * large' if you try. This is an area that could use improvement. The 56 * radix is large enough that this restriction does not effect the swap 57 * system, though. Currently only the allocation code is effected by 58 * this algorithmic unfeature. The freeing code can handle arbitrary 59 * ranges. 60 * 61 * This code can be compiled stand-alone for debugging. 62 * 63 * $Id$ 64 */ 65 66 #ifdef KERNEL 67 68 #include <sys/param.h> 69 #include <sys/systm.h> 70 #include <sys/lock.h> 71 #include <sys/kernel.h> 72 #include <sys/blist.h> 73 #include <sys/malloc.h> 74 #include <vm/vm.h> 75 #include <vm/vm_prot.h> 76 #include <vm/vm_object.h> 77 #include <vm/vm_kern.h> 78 #include <vm/vm_extern.h> 79 #include <vm/vm_page.h> 80 81 #else 82 83 #ifndef BLIST_NO_DEBUG 84 #define BLIST_DEBUG 85 #endif 86 87 #define SWAPBLK_NONE ((daddr_t)-1) 88 89 #include <sys/types.h> 90 #include <stdio.h> 91 #include <string.h> 92 #include <stdlib.h> 93 #include <stdarg.h> 94 95 #define malloc(a,b,c) malloc(a) 96 #define free(a,b) free(a) 97 98 typedef unsigned int u_daddr_t; 99 100 #include <sys/blist.h> 101 102 void panic(const char *ctl, ...); 103 104 #endif 105 106 /* 107 * static support functions 108 */ 109 110 static daddr_t blst_leaf_alloc(blmeta_t *scan, daddr_t blk, int count); 111 static daddr_t blst_meta_alloc(blmeta_t *scan, daddr_t blk, 112 daddr_t count, daddr_t radix, int skip); 113 static void blst_leaf_free(blmeta_t *scan, daddr_t relblk, int count); 114 static void blst_meta_free(blmeta_t *scan, daddr_t freeBlk, daddr_t count, 115 daddr_t radix, int skip, daddr_t blk); 116 static void blst_copy(blmeta_t *scan, daddr_t blk, daddr_t radix, 117 daddr_t skip, blist_t dest, daddr_t count); 118 static daddr_t blst_radix_init(blmeta_t *scan, daddr_t radix, 119 int skip, daddr_t count); 120 #ifndef KERNEL 121 static void blst_radix_print(blmeta_t *scan, daddr_t blk, 122 daddr_t radix, int skip, int tab); 123 #endif 124 125 #ifdef KERNEL 126 static MALLOC_DEFINE(M_SWAP, "SWAP", "Swap space"); 127 #endif 128 129 /* 130 * blist_create() - create a blist capable of handling up to the specified 131 * number of blocks 132 * 133 * blocks must be greater then 0 134 * 135 * The smallest blist consists of a single leaf node capable of 136 * managing BLIST_BMAP_RADIX blocks. 137 */ 138 139 blist_t 140 blist_create(daddr_t blocks) 141 { 142 blist_t bl; 143 int radix; 144 int skip = 0; 145 146 /* 147 * Calculate radix and skip field used for scanning. 148 */ 149 radix = BLIST_BMAP_RADIX; 150 151 while (radix < blocks) { 152 radix <<= BLIST_META_RADIX_SHIFT; 153 skip = (skip + 1) << BLIST_META_RADIX_SHIFT; 154 } 155 156 bl = malloc(sizeof(struct blist), M_SWAP, M_WAITOK); 157 158 bzero(bl, sizeof(*bl)); 159 160 bl->bl_blocks = blocks; 161 bl->bl_radix = radix; 162 bl->bl_skip = skip; 163 bl->bl_rootblks = 1 + 164 blst_radix_init(NULL, bl->bl_radix, bl->bl_skip, blocks); 165 bl->bl_root = malloc(sizeof(blmeta_t) * bl->bl_rootblks, M_SWAP, M_WAITOK); 166 167 #if defined(BLIST_DEBUG) 168 printf( 169 "BLIST representing %d blocks (%d MB of swap)" 170 ", requiring %dK of ram\n", 171 bl->bl_blocks, 172 bl->bl_blocks * 4 / 1024, 173 (bl->bl_rootblks * sizeof(blmeta_t) + 1023) / 1024 174 ); 175 printf("BLIST raw radix tree contains %d records\n", bl->bl_rootblks); 176 #endif 177 blst_radix_init(bl->bl_root, bl->bl_radix, bl->bl_skip, blocks); 178 179 return(bl); 180 } 181 182 void 183 blist_destroy(blist_t bl) 184 { 185 free(bl->bl_root, M_SWAP); 186 free(bl, M_SWAP); 187 } 188 189 /* 190 * blist_alloc() - reserve space in the block bitmap. Return the base 191 * of a contiguous region or SWAPBLK_NONE if space could 192 * not be allocated. 193 */ 194 195 daddr_t 196 blist_alloc(blist_t bl, daddr_t count) 197 { 198 daddr_t blk = SWAPBLK_NONE; 199 200 if (bl) { 201 if (bl->bl_radix == BLIST_BMAP_RADIX) 202 blk = blst_leaf_alloc(bl->bl_root, 0, count); 203 else 204 blk = blst_meta_alloc(bl->bl_root, 0, count, bl->bl_radix, bl->bl_skip); 205 if (blk != SWAPBLK_NONE) 206 bl->bl_free -= count; 207 } 208 return(blk); 209 } 210 211 /* 212 * blist_free() - free up space in the block bitmap. Return the base 213 * of a contiguous region. Panic if an inconsistancy is 214 * found. 215 */ 216 217 void 218 blist_free(blist_t bl, daddr_t blkno, daddr_t count) 219 { 220 if (bl) { 221 if (bl->bl_radix == BLIST_BMAP_RADIX) 222 blst_leaf_free(bl->bl_root, blkno, count); 223 else 224 blst_meta_free(bl->bl_root, blkno, count, bl->bl_radix, bl->bl_skip, 0); 225 bl->bl_free += count; 226 } 227 } 228 229 /* 230 * blist_resize() - resize an existing radix tree to handle the 231 * specified number of blocks. This will reallocate 232 * the tree and transfer the previous bitmap to the new 233 * one. When extending the tree you can specify whether 234 * the new blocks are to left allocated or freed. 235 */ 236 237 void 238 blist_resize(blist_t *pbl, daddr_t count, int freenew) 239 { 240 blist_t newbl = blist_create(count); 241 blist_t save = *pbl; 242 243 *pbl = newbl; 244 if (count > save->bl_blocks) 245 count = save->bl_blocks; 246 blst_copy(save->bl_root, 0, save->bl_radix, save->bl_skip, newbl, count); 247 248 /* 249 * If resizing upwards, should we free the new space or not? 250 */ 251 if (freenew && count < newbl->bl_blocks) { 252 blist_free(newbl, count, newbl->bl_blocks - count); 253 } 254 blist_destroy(save); 255 } 256 257 #ifdef BLIST_DEBUG 258 259 /* 260 * blist_print() - dump radix tree 261 */ 262 263 void 264 blist_print(blist_t bl) 265 { 266 printf("BLIST {\n"); 267 blst_radix_print(bl->bl_root, 0, bl->bl_radix, bl->bl_skip, 4); 268 printf("}\n"); 269 } 270 271 #endif 272 273 /************************************************************************ 274 * ALLOCATION SUPPORT FUNCTIONS * 275 ************************************************************************ 276 * 277 * These support functions do all the actual work. They may seem 278 * rather longish, but that's because I've commented them up. The 279 * actual code is straight forward. 280 * 281 */ 282 283 /* 284 * blist_leaf_alloc() - allocate at a leaf in the radix tree (a bitmap). 285 * 286 * This is the core of the allocator and is optimized for the 1 block 287 * and the BLIST_BMAP_RADIX block allocation cases. Other cases are 288 * somewhat slower. The 1 block allocation case is log2 and extremely 289 * quick. 290 */ 291 292 static daddr_t 293 blst_leaf_alloc( 294 blmeta_t *scan, 295 daddr_t blk, 296 int count 297 ) { 298 u_daddr_t orig = scan->u.bmu_bitmap; 299 300 if (orig == 0) { 301 /* 302 * Optimize bitmap all-allocated case. Also, count = 1 303 * case assumes at least 1 bit is free in the bitmap, so 304 * we have to take care of this case here. 305 */ 306 scan->bm_bighint = 0; 307 return(SWAPBLK_NONE); 308 } 309 if (count == 1) { 310 /* 311 * Optimized code to allocate one bit out of the bitmap 312 */ 313 u_daddr_t mask; 314 int j = BLIST_BMAP_RADIX/2; 315 int r = 0; 316 317 mask = (u_daddr_t)-1 >> (BLIST_BMAP_RADIX/2); 318 319 while (j) { 320 if ((orig & mask) == 0) { 321 r += j; 322 orig >>= j; 323 } 324 j >>= 1; 325 mask >>= j; 326 } 327 scan->u.bmu_bitmap &= ~(1 << r); 328 return(blk + r); 329 } 330 if (count <= BLIST_BMAP_RADIX) { 331 /* 332 * non-optimized code to allocate N bits out of the bitmap. 333 * The more bits, the faster the code runs. It will run 334 * the slowest allocating 2 bits, but since there aren't any 335 * memory ops in the core loop (or shouldn't be, anyway), 336 * you probably won't notice the difference. 337 */ 338 int j; 339 int n = BLIST_BMAP_RADIX - count; 340 u_daddr_t mask; 341 342 mask = (u_daddr_t)-1 >> n; 343 344 for (j = 0; j <= n; ++j) { 345 if ((orig & mask) == mask) { 346 scan->u.bmu_bitmap &= ~mask; 347 return(blk + j); 348 } 349 mask = (mask << 1); 350 } 351 } 352 /* 353 * We couldn't allocate count in this subtree, update bighint. 354 */ 355 scan->bm_bighint = count - 1; 356 return(SWAPBLK_NONE); 357 } 358 359 /* 360 * blist_meta_alloc() - allocate at a meta in the radix tree. 361 * 362 * Attempt to allocate at a meta node. If we can't, we update 363 * bighint and return a failure. Updating bighint optimize future 364 * calls that hit this node. We have to check for our collapse cases 365 * and we have a few optimizations strewn in as well. 366 */ 367 368 static daddr_t 369 blst_meta_alloc( 370 blmeta_t *scan, 371 daddr_t blk, 372 daddr_t count, 373 daddr_t radix, 374 int skip 375 ) { 376 int i; 377 int next_skip = (skip >> BLIST_META_RADIX_SHIFT); 378 379 if (scan->u.bmu_avail == 0) { 380 /* 381 * ALL-ALLOCATED special case 382 */ 383 scan->bm_bighint = count; 384 return(SWAPBLK_NONE); 385 } 386 387 if (scan->u.bmu_avail == radix) { 388 radix >>= BLIST_META_RADIX_SHIFT; 389 390 /* 391 * ALL-FREE special case, initialize uninitialize 392 * sublevel. 393 */ 394 for (i = 1; i <= skip; i += next_skip) { 395 if (scan[i].bm_bighint == (daddr_t)-1) 396 break; 397 if (next_skip == 1) { 398 scan[i].u.bmu_bitmap = (u_daddr_t)-1; 399 scan[i].bm_bighint = BLIST_BMAP_RADIX; 400 } else { 401 scan[i].bm_bighint = radix; 402 scan[i].u.bmu_avail = radix; 403 } 404 } 405 } else { 406 radix >>= BLIST_META_RADIX_SHIFT; 407 } 408 409 for (i = 1; i <= skip; i += next_skip) { 410 if (count <= scan[i].bm_bighint) { 411 /* 412 * count fits in object 413 */ 414 daddr_t r; 415 if (next_skip == 1) { 416 r = blst_leaf_alloc(&scan[i], blk, count); 417 } else { 418 r = blst_meta_alloc(&scan[i], blk, count, radix, next_skip - 1); 419 } 420 if (r != SWAPBLK_NONE) { 421 scan->u.bmu_avail -= count; 422 if (scan->bm_bighint > scan->u.bmu_avail) 423 scan->bm_bighint = scan->u.bmu_avail; 424 return(r); 425 } 426 } else if (scan[i].bm_bighint == (daddr_t)-1) { 427 /* 428 * Terminator 429 */ 430 break; 431 } else if (count > radix) { 432 /* 433 * count does not fit in object even if it were 434 * complete free. 435 */ 436 panic("blist_meta_alloc: allocation too large"); 437 } 438 blk += radix; 439 } 440 441 /* 442 * We couldn't allocate count in this subtree, update bighint. 443 */ 444 if (scan->bm_bighint >= count) 445 scan->bm_bighint = count - 1; 446 return(SWAPBLK_NONE); 447 } 448 449 /* 450 * BLST_LEAF_FREE() - free allocated block from leaf bitmap 451 * 452 */ 453 454 static void 455 blst_leaf_free( 456 blmeta_t *scan, 457 daddr_t blk, 458 int count 459 ) { 460 /* 461 * free some data in this bitmap 462 * 463 * e.g. 464 * 0000111111111110000 465 * \_________/\__/ 466 * v n 467 */ 468 int n = blk & (BLIST_BMAP_RADIX - 1); 469 u_daddr_t mask; 470 471 mask = ((u_daddr_t)-1 << n) & 472 ((u_daddr_t)-1 >> (BLIST_BMAP_RADIX - count - n)); 473 474 if (scan->u.bmu_bitmap & mask) 475 panic("blst_radix_free: freeing free block"); 476 scan->u.bmu_bitmap |= mask; 477 478 /* 479 * We could probably do a better job here. We are required to make 480 * bighint at least as large as the biggest contiguous block of 481 * data. If we just shoehorn it, a little extra overhead will 482 * be incured on the next allocation (but only that one typically). 483 */ 484 scan->bm_bighint = BLIST_BMAP_RADIX; 485 } 486 487 /* 488 * BLST_META_FREE() - free allocated blocks from radix tree meta info 489 * 490 * This support routine frees a range of blocks from the bitmap. 491 * The range must be entirely enclosed by this radix node. If a 492 * meta node, we break the range down recursively to free blocks 493 * in subnodes (which means that this code can free an arbitrary 494 * range whereas the allocation code cannot allocate an arbitrary 495 * range). 496 */ 497 498 static void 499 blst_meta_free( 500 blmeta_t *scan, 501 daddr_t freeBlk, 502 daddr_t count, 503 daddr_t radix, 504 int skip, 505 daddr_t blk 506 ) { 507 int i; 508 int next_skip = (skip >> BLIST_META_RADIX_SHIFT); 509 510 #if 0 511 printf("FREE (%x,%d) FROM (%x,%d)\n", 512 freeBlk, count, 513 blk, radix 514 ); 515 #endif 516 517 if (scan->u.bmu_avail == 0) { 518 /* 519 * ALL-ALLOCATED special case, with possible 520 * shortcut to ALL-FREE special case. 521 */ 522 scan->u.bmu_avail = count; 523 scan->bm_bighint = count; 524 525 if (count != radix) { 526 for (i = 1; i <= skip; i += next_skip) { 527 if (scan[i].bm_bighint == (daddr_t)-1) 528 break; 529 scan[i].bm_bighint = 0; 530 if (next_skip == 1) { 531 scan[i].u.bmu_bitmap = 0; 532 } else { 533 scan[i].u.bmu_avail = 0; 534 } 535 } 536 /* fall through */ 537 } 538 } else { 539 scan->u.bmu_avail += count; 540 /* scan->bm_bighint = radix; */ 541 } 542 543 /* 544 * ALL-FREE special case. 545 */ 546 547 if (scan->u.bmu_avail == radix) 548 return; 549 #if !defined(MAX_PERF) 550 if (scan->u.bmu_avail > radix) 551 panic("blst_meta_free: freeing already free blocks (%d) %d/%d", count, scan->u.bmu_avail, radix); 552 #endif 553 554 /* 555 * Break the free down into its components 556 */ 557 558 radix >>= BLIST_META_RADIX_SHIFT; 559 560 i = (freeBlk - blk) / radix; 561 blk += i * radix; 562 i = i * next_skip + 1; 563 564 while (i <= skip && blk < freeBlk + count) { 565 daddr_t v; 566 567 v = blk + radix - freeBlk; 568 if (v > count) 569 v = count; 570 571 if (scan->bm_bighint == (daddr_t)-1) 572 panic("blst_meta_free: freeing unexpected range"); 573 574 if (next_skip == 1) { 575 blst_leaf_free(&scan[i], freeBlk, v); 576 } else { 577 blst_meta_free(&scan[i], freeBlk, v, radix, next_skip - 1, blk); 578 } 579 if (scan->bm_bighint < scan[i].bm_bighint) 580 scan->bm_bighint = scan[i].bm_bighint; 581 count -= v; 582 freeBlk += v; 583 blk += radix; 584 i += next_skip; 585 } 586 } 587 588 /* 589 * BLIST_RADIX_COPY() - copy one radix tree to another 590 * 591 * Locates free space in the source tree and frees it in the destination 592 * tree. The space may not already be free in the destination. 593 */ 594 595 static void blst_copy( 596 blmeta_t *scan, 597 daddr_t blk, 598 daddr_t radix, 599 daddr_t skip, 600 blist_t dest, 601 daddr_t count 602 ) { 603 int next_skip; 604 int i; 605 606 /* 607 * Leaf node 608 */ 609 610 if (radix == BLIST_BMAP_RADIX) { 611 u_daddr_t v = scan->u.bmu_bitmap; 612 613 if (v == (u_daddr_t)-1) { 614 blist_free(dest, blk, count); 615 } else if (v != 0) { 616 int i; 617 618 for (i = 0; i < BLIST_BMAP_RADIX && i < count; ++i) { 619 if (v & (1 << i)) 620 blist_free(dest, blk + i, 1); 621 } 622 } 623 return; 624 } 625 626 /* 627 * Meta node 628 */ 629 630 if (scan->u.bmu_avail == 0) { 631 /* 632 * Source all allocated, leave dest allocated 633 */ 634 return; 635 } 636 if (scan->u.bmu_avail == radix) { 637 /* 638 * Source all free, free entire dest 639 */ 640 if (count < radix) 641 blist_free(dest, blk, count); 642 else 643 blist_free(dest, blk, radix); 644 return; 645 } 646 647 648 radix >>= BLIST_META_RADIX_SHIFT; 649 next_skip = (skip >> BLIST_META_RADIX_SHIFT); 650 651 for (i = 1; count && i <= skip; i += next_skip) { 652 if (scan[i].bm_bighint == (daddr_t)-1) 653 break; 654 655 if (count >= radix) { 656 blst_copy( 657 &scan[i], 658 blk, 659 radix, 660 next_skip - 1, 661 dest, 662 radix 663 ); 664 count -= radix; 665 } else { 666 if (count) { 667 blst_copy( 668 &scan[i], 669 blk, 670 radix, 671 next_skip - 1, 672 dest, 673 count 674 ); 675 } 676 count = 0; 677 } 678 blk += radix; 679 } 680 } 681 682 /* 683 * BLST_RADIX_INIT() - initialize radix tree 684 * 685 * Initialize our meta structures and bitmaps and calculate the exact 686 * amount of space required to manage 'count' blocks - this space may 687 * be considerably less then the calculated radix due to the large 688 * RADIX values we use. 689 */ 690 691 static daddr_t 692 blst_radix_init(blmeta_t *scan, daddr_t radix, int skip, daddr_t count) 693 { 694 int i; 695 int next_skip; 696 daddr_t memindex = 0; 697 698 /* 699 * Leaf node 700 */ 701 702 if (radix == BLIST_BMAP_RADIX) { 703 if (scan) { 704 scan->bm_bighint = 0; 705 scan->u.bmu_bitmap = 0; 706 } 707 return(memindex); 708 } 709 710 /* 711 * Meta node. If allocating the entire object we can special 712 * case it. However, we need to figure out how much memory 713 * is required to manage 'count' blocks, so we continue on anyway. 714 */ 715 716 if (scan) { 717 scan->bm_bighint = 0; 718 scan->u.bmu_avail = 0; 719 } 720 721 radix >>= BLIST_META_RADIX_SHIFT; 722 next_skip = (skip >> BLIST_META_RADIX_SHIFT); 723 724 for (i = 1; i <= skip; i += next_skip) { 725 if (count >= radix) { 726 /* 727 * Allocate the entire object 728 */ 729 memindex = i + blst_radix_init( 730 ((scan) ? &scan[i] : NULL), 731 radix, 732 next_skip - 1, 733 radix 734 ); 735 count -= radix; 736 } else if (count > 0) { 737 /* 738 * Allocate a partial object 739 */ 740 memindex = i + blst_radix_init( 741 ((scan) ? &scan[i] : NULL), 742 radix, 743 next_skip - 1, 744 count 745 ); 746 count = 0; 747 } else { 748 /* 749 * Add terminator and break out 750 */ 751 if (scan) 752 scan[i].bm_bighint = (daddr_t)-1; 753 break; 754 } 755 } 756 if (memindex < i) 757 memindex = i; 758 return(memindex); 759 } 760 761 #ifdef BLIST_DEBUG 762 763 static void 764 blst_radix_print(blmeta_t *scan, daddr_t blk, daddr_t radix, int skip, int tab) 765 { 766 int i; 767 int next_skip; 768 int lastState = 0; 769 770 if (radix == BLIST_BMAP_RADIX) { 771 printf( 772 "%*.*s(%04x,%d): bitmap %08x big=%d\n", 773 tab, tab, "", 774 blk, radix, 775 scan->u.bmu_bitmap, 776 scan->bm_bighint 777 ); 778 return; 779 } 780 781 if (scan->u.bmu_avail == 0) { 782 printf( 783 "%*.*s(%04x,%d) ALL ALLOCATED\n", 784 tab, tab, "", 785 blk, 786 radix 787 ); 788 return; 789 } 790 if (scan->u.bmu_avail == radix) { 791 printf( 792 "%*.*s(%04x,%d) ALL FREE\n", 793 tab, tab, "", 794 blk, 795 radix 796 ); 797 return; 798 } 799 800 printf( 801 "%*.*s(%04x,%d): subtree (%d/%d) big=%d {\n", 802 tab, tab, "", 803 blk, radix, 804 scan->u.bmu_avail, 805 radix, 806 scan->bm_bighint 807 ); 808 809 radix >>= BLIST_META_RADIX_SHIFT; 810 next_skip = (skip >> BLIST_META_RADIX_SHIFT); 811 tab += 4; 812 813 for (i = 1; i <= skip; i += next_skip) { 814 if (scan[i].bm_bighint == (daddr_t)-1) { 815 printf( 816 "%*.*s(%04x,%d): Terminator\n", 817 tab, tab, "", 818 blk, radix 819 ); 820 lastState = 0; 821 break; 822 } 823 blst_radix_print( 824 &scan[i], 825 blk, 826 radix, 827 next_skip - 1, 828 tab 829 ); 830 blk += radix; 831 } 832 tab -= 4; 833 834 printf( 835 "%*.*s}\n", 836 tab, tab, "" 837 ); 838 } 839 840 #endif 841 842 #ifdef BLIST_DEBUG 843 844 int 845 main(int ac, char **av) 846 { 847 int size = 1024; 848 int i; 849 blist_t bl; 850 851 for (i = 1; i < ac; ++i) { 852 const char *ptr = av[i]; 853 if (*ptr != '-') { 854 size = strtol(ptr, NULL, 0); 855 continue; 856 } 857 ptr += 2; 858 fprintf(stderr, "Bad option: %s\n", ptr - 2); 859 exit(1); 860 } 861 bl = blist_create(size); 862 blist_free(bl, 0, size); 863 864 for (;;) { 865 char buf[1024]; 866 daddr_t da = 0; 867 daddr_t count = 0; 868 869 870 printf("%d/%d/%d> ", bl->bl_free, size, bl->bl_radix); 871 fflush(stdout); 872 if (fgets(buf, sizeof(buf), stdin) == NULL) 873 break; 874 switch(buf[0]) { 875 case 'r': 876 if (sscanf(buf + 1, "%d", &count) == 1) { 877 blist_resize(&bl, count, 1); 878 } else { 879 printf("?\n"); 880 } 881 case 'p': 882 blist_print(bl); 883 break; 884 case 'a': 885 if (sscanf(buf + 1, "%d", &count) == 1) { 886 daddr_t blk = blist_alloc(bl, count); 887 printf(" R=%04x\n", blk); 888 } else { 889 printf("?\n"); 890 } 891 break; 892 case 'f': 893 if (sscanf(buf + 1, "%x %d", &da, &count) == 2) { 894 blist_free(bl, da, count); 895 } else { 896 printf("?\n"); 897 } 898 break; 899 case '?': 900 case 'h': 901 puts( 902 "p -print\n" 903 "a %d -allocate\n" 904 "f %x %d -free\n" 905 "r %d -resize\n" 906 "h/? -help" 907 ); 908 break; 909 default: 910 printf("?\n"); 911 break; 912 } 913 } 914 return(0); 915 } 916 917 void 918 panic(const char *ctl, ...) 919 { 920 va_list va; 921 922 va_start(va, ctl); 923 vfprintf(stderr, ctl, va); 924 fprintf(stderr, "\n"); 925 va_end(va); 926 exit(1); 927 } 928 929 #endif 930 931