1 2 /* 3 * BLIST.C - Bitmap allocator/deallocator, using a radix tree with hinting 4 * 5 * (c)Copyright 1998, Matthew Dillon. Terms for use and redistribution 6 * are covered by the BSD Copyright as found in /usr/src/COPYRIGHT. 7 * 8 * This module implements a general bitmap allocator/deallocator. The 9 * allocator eats around 2 bits per 'block'. The module does not 10 * try to interpret the meaning of a 'block' other then to return 11 * SWAPBLK_NONE on an allocation failure. 12 * 13 * A radix tree is used to maintain the bitmap. Two radix constants are 14 * involved: One for the bitmaps contained in the leaf nodes (typically 15 * 32), and one for the meta nodes (typically 16). Both meta and leaf 16 * nodes have a hint field. This field gives us a hint as to the largest 17 * free contiguous range of blocks under the node. It may contain a 18 * value that is too high, but will never contain a value that is too 19 * low. When the radix tree is searched, allocation failures in subtrees 20 * update the hint. 21 * 22 * The radix tree also implements two collapsed states for meta nodes: 23 * the ALL-ALLOCATED state and the ALL-FREE state. If a meta node is 24 * in either of these two states, all information contained underneath 25 * the node is considered stale. These states are used to optimize 26 * allocation and freeing operations. 27 * 28 * The hinting greatly increases code efficiency for allocations while 29 * the general radix structure optimizes both allocations and frees. The 30 * radix tree should be able to operate well no matter how much 31 * fragmentation there is and no matter how large a bitmap is used. 32 * 33 * Unlike the rlist code, the blist code wires all necessary memory at 34 * creation time. Neither allocations nor frees require interaction with 35 * the memory subsystem. In contrast, the rlist code may allocate memory 36 * on an rlist_free() call. The non-blocking features of the blist code 37 * are used to great advantage in the swap code (vm/nswap_pager.c). The 38 * rlist code uses a little less overall memory then the blist code (but 39 * due to swap interleaving not all that much less), but the blist code 40 * scales much, much better. 41 * 42 * LAYOUT: The radix tree is layed out recursively using a 43 * linear array. Each meta node is immediately followed (layed out 44 * sequentially in memory) by BLIST_META_RADIX lower level nodes. This 45 * is a recursive structure but one that can be easily scanned through 46 * a very simple 'skip' calculation. In order to support large radixes, 47 * portions of the tree may reside outside our memory allocation. We 48 * handle this with an early-termination optimization (when bighint is 49 * set to -1) on the scan. The memory allocation is only large enough 50 * to cover the number of blocks requested at creation time even if it 51 * must be encompassed in larger root-node radix. 52 * 53 * NOTE: the allocator cannot currently allocate more then 54 * BLIST_BMAP_RADIX blocks per call. It will panic with 'allocation too 55 * large' if you try. This is an area that could use improvement. The 56 * radix is large enough that this restriction does not effect the swap 57 * system, though. Currently only the allocation code is effected by 58 * this algorithmic unfeature. The freeing code can handle arbitrary 59 * ranges. 60 * 61 * This code can be compiled stand-alone for debugging. 62 * 63 * $FreeBSD$ 64 */ 65 66 #ifdef _KERNEL 67 68 #include <sys/param.h> 69 #include <sys/systm.h> 70 #include <sys/lock.h> 71 #include <sys/kernel.h> 72 #include <sys/blist.h> 73 #include <sys/malloc.h> 74 #include <sys/proc.h> 75 #include <sys/mutex.h> 76 #include <vm/vm.h> 77 #include <vm/vm_object.h> 78 #include <vm/vm_kern.h> 79 #include <vm/vm_extern.h> 80 #include <vm/vm_page.h> 81 82 #else 83 84 #ifndef BLIST_NO_DEBUG 85 #define BLIST_DEBUG 86 #endif 87 88 #define SWAPBLK_NONE ((daddr_t)-1) 89 90 #include <sys/types.h> 91 #include <stdio.h> 92 #include <string.h> 93 #include <stdlib.h> 94 #include <stdarg.h> 95 96 #define malloc(a,b,c) malloc(a) 97 #define free(a,b) free(a) 98 99 typedef unsigned int u_daddr_t; 100 101 #include <sys/blist.h> 102 103 void panic(const char *ctl, ...); 104 105 #endif 106 107 /* 108 * static support functions 109 */ 110 111 static daddr_t blst_leaf_alloc(blmeta_t *scan, daddr_t blk, int count); 112 static daddr_t blst_meta_alloc(blmeta_t *scan, daddr_t blk, 113 daddr_t count, daddr_t radix, int skip); 114 static void blst_leaf_free(blmeta_t *scan, daddr_t relblk, int count); 115 static void blst_meta_free(blmeta_t *scan, daddr_t freeBlk, daddr_t count, 116 daddr_t radix, int skip, daddr_t blk); 117 static void blst_copy(blmeta_t *scan, daddr_t blk, daddr_t radix, 118 daddr_t skip, blist_t dest, daddr_t count); 119 static daddr_t blst_radix_init(blmeta_t *scan, daddr_t radix, 120 int skip, daddr_t count); 121 #ifndef _KERNEL 122 static void blst_radix_print(blmeta_t *scan, daddr_t blk, 123 daddr_t radix, int skip, int tab); 124 #endif 125 126 #ifdef _KERNEL 127 static MALLOC_DEFINE(M_SWAP, "SWAP", "Swap space"); 128 #endif 129 130 /* 131 * blist_create() - create a blist capable of handling up to the specified 132 * number of blocks 133 * 134 * blocks must be greater then 0 135 * 136 * The smallest blist consists of a single leaf node capable of 137 * managing BLIST_BMAP_RADIX blocks. 138 */ 139 140 blist_t 141 blist_create(daddr_t blocks) 142 { 143 blist_t bl; 144 int radix; 145 int skip = 0; 146 147 /* 148 * Calculate radix and skip field used for scanning. 149 */ 150 radix = BLIST_BMAP_RADIX; 151 152 while (radix < blocks) { 153 radix <<= BLIST_META_RADIX_SHIFT; 154 skip = (skip + 1) << BLIST_META_RADIX_SHIFT; 155 } 156 157 bl = malloc(sizeof(struct blist), M_SWAP, M_WAITOK | M_ZERO); 158 159 bl->bl_blocks = blocks; 160 bl->bl_radix = radix; 161 bl->bl_skip = skip; 162 bl->bl_rootblks = 1 + 163 blst_radix_init(NULL, bl->bl_radix, bl->bl_skip, blocks); 164 bl->bl_root = malloc(sizeof(blmeta_t) * bl->bl_rootblks, M_SWAP, M_WAITOK); 165 166 #if defined(BLIST_DEBUG) 167 printf( 168 "BLIST representing %d blocks (%d MB of swap)" 169 ", requiring %dK of ram\n", 170 bl->bl_blocks, 171 bl->bl_blocks * 4 / 1024, 172 (bl->bl_rootblks * sizeof(blmeta_t) + 1023) / 1024 173 ); 174 printf("BLIST raw radix tree contains %d records\n", bl->bl_rootblks); 175 #endif 176 blst_radix_init(bl->bl_root, bl->bl_radix, bl->bl_skip, blocks); 177 178 return(bl); 179 } 180 181 void 182 blist_destroy(blist_t bl) 183 { 184 free(bl->bl_root, M_SWAP); 185 free(bl, M_SWAP); 186 } 187 188 /* 189 * blist_alloc() - reserve space in the block bitmap. Return the base 190 * of a contiguous region or SWAPBLK_NONE if space could 191 * not be allocated. 192 */ 193 194 daddr_t 195 blist_alloc(blist_t bl, daddr_t count) 196 { 197 daddr_t blk = SWAPBLK_NONE; 198 199 if (bl) { 200 if (bl->bl_radix == BLIST_BMAP_RADIX) 201 blk = blst_leaf_alloc(bl->bl_root, 0, count); 202 else 203 blk = blst_meta_alloc(bl->bl_root, 0, count, bl->bl_radix, bl->bl_skip); 204 if (blk != SWAPBLK_NONE) 205 bl->bl_free -= count; 206 } 207 return(blk); 208 } 209 210 /* 211 * blist_free() - free up space in the block bitmap. Return the base 212 * of a contiguous region. Panic if an inconsistancy is 213 * found. 214 */ 215 216 void 217 blist_free(blist_t bl, daddr_t blkno, daddr_t count) 218 { 219 if (bl) { 220 if (bl->bl_radix == BLIST_BMAP_RADIX) 221 blst_leaf_free(bl->bl_root, blkno, count); 222 else 223 blst_meta_free(bl->bl_root, blkno, count, bl->bl_radix, bl->bl_skip, 0); 224 bl->bl_free += count; 225 } 226 } 227 228 /* 229 * blist_resize() - resize an existing radix tree to handle the 230 * specified number of blocks. This will reallocate 231 * the tree and transfer the previous bitmap to the new 232 * one. When extending the tree you can specify whether 233 * the new blocks are to left allocated or freed. 234 */ 235 236 void 237 blist_resize(blist_t *pbl, daddr_t count, int freenew) 238 { 239 blist_t newbl = blist_create(count); 240 blist_t save = *pbl; 241 242 *pbl = newbl; 243 if (count > save->bl_blocks) 244 count = save->bl_blocks; 245 blst_copy(save->bl_root, 0, save->bl_radix, save->bl_skip, newbl, count); 246 247 /* 248 * If resizing upwards, should we free the new space or not? 249 */ 250 if (freenew && count < newbl->bl_blocks) { 251 blist_free(newbl, count, newbl->bl_blocks - count); 252 } 253 blist_destroy(save); 254 } 255 256 #ifdef BLIST_DEBUG 257 258 /* 259 * blist_print() - dump radix tree 260 */ 261 262 void 263 blist_print(blist_t bl) 264 { 265 printf("BLIST {\n"); 266 blst_radix_print(bl->bl_root, 0, bl->bl_radix, bl->bl_skip, 4); 267 printf("}\n"); 268 } 269 270 #endif 271 272 /************************************************************************ 273 * ALLOCATION SUPPORT FUNCTIONS * 274 ************************************************************************ 275 * 276 * These support functions do all the actual work. They may seem 277 * rather longish, but that's because I've commented them up. The 278 * actual code is straight forward. 279 * 280 */ 281 282 /* 283 * blist_leaf_alloc() - allocate at a leaf in the radix tree (a bitmap). 284 * 285 * This is the core of the allocator and is optimized for the 1 block 286 * and the BLIST_BMAP_RADIX block allocation cases. Other cases are 287 * somewhat slower. The 1 block allocation case is log2 and extremely 288 * quick. 289 */ 290 291 static daddr_t 292 blst_leaf_alloc( 293 blmeta_t *scan, 294 daddr_t blk, 295 int count 296 ) { 297 u_daddr_t orig = scan->u.bmu_bitmap; 298 299 if (orig == 0) { 300 /* 301 * Optimize bitmap all-allocated case. Also, count = 1 302 * case assumes at least 1 bit is free in the bitmap, so 303 * we have to take care of this case here. 304 */ 305 scan->bm_bighint = 0; 306 return(SWAPBLK_NONE); 307 } 308 if (count == 1) { 309 /* 310 * Optimized code to allocate one bit out of the bitmap 311 */ 312 u_daddr_t mask; 313 int j = BLIST_BMAP_RADIX/2; 314 int r = 0; 315 316 mask = (u_daddr_t)-1 >> (BLIST_BMAP_RADIX/2); 317 318 while (j) { 319 if ((orig & mask) == 0) { 320 r += j; 321 orig >>= j; 322 } 323 j >>= 1; 324 mask >>= j; 325 } 326 scan->u.bmu_bitmap &= ~(1 << r); 327 return(blk + r); 328 } 329 if (count <= BLIST_BMAP_RADIX) { 330 /* 331 * non-optimized code to allocate N bits out of the bitmap. 332 * The more bits, the faster the code runs. It will run 333 * the slowest allocating 2 bits, but since there aren't any 334 * memory ops in the core loop (or shouldn't be, anyway), 335 * you probably won't notice the difference. 336 */ 337 int j; 338 int n = BLIST_BMAP_RADIX - count; 339 u_daddr_t mask; 340 341 mask = (u_daddr_t)-1 >> n; 342 343 for (j = 0; j <= n; ++j) { 344 if ((orig & mask) == mask) { 345 scan->u.bmu_bitmap &= ~mask; 346 return(blk + j); 347 } 348 mask = (mask << 1); 349 } 350 } 351 /* 352 * We couldn't allocate count in this subtree, update bighint. 353 */ 354 scan->bm_bighint = count - 1; 355 return(SWAPBLK_NONE); 356 } 357 358 /* 359 * blist_meta_alloc() - allocate at a meta in the radix tree. 360 * 361 * Attempt to allocate at a meta node. If we can't, we update 362 * bighint and return a failure. Updating bighint optimize future 363 * calls that hit this node. We have to check for our collapse cases 364 * and we have a few optimizations strewn in as well. 365 */ 366 367 static daddr_t 368 blst_meta_alloc( 369 blmeta_t *scan, 370 daddr_t blk, 371 daddr_t count, 372 daddr_t radix, 373 int skip 374 ) { 375 int i; 376 int next_skip = (skip >> BLIST_META_RADIX_SHIFT); 377 378 if (scan->u.bmu_avail == 0) { 379 /* 380 * ALL-ALLOCATED special case 381 */ 382 scan->bm_bighint = count; 383 return(SWAPBLK_NONE); 384 } 385 386 if (scan->u.bmu_avail == radix) { 387 radix >>= BLIST_META_RADIX_SHIFT; 388 389 /* 390 * ALL-FREE special case, initialize uninitialize 391 * sublevel. 392 */ 393 for (i = 1; i <= skip; i += next_skip) { 394 if (scan[i].bm_bighint == (daddr_t)-1) 395 break; 396 if (next_skip == 1) { 397 scan[i].u.bmu_bitmap = (u_daddr_t)-1; 398 scan[i].bm_bighint = BLIST_BMAP_RADIX; 399 } else { 400 scan[i].bm_bighint = radix; 401 scan[i].u.bmu_avail = radix; 402 } 403 } 404 } else { 405 radix >>= BLIST_META_RADIX_SHIFT; 406 } 407 408 for (i = 1; i <= skip; i += next_skip) { 409 if (count <= scan[i].bm_bighint) { 410 /* 411 * count fits in object 412 */ 413 daddr_t r; 414 if (next_skip == 1) { 415 r = blst_leaf_alloc(&scan[i], blk, count); 416 } else { 417 r = blst_meta_alloc(&scan[i], blk, count, radix, next_skip - 1); 418 } 419 if (r != SWAPBLK_NONE) { 420 scan->u.bmu_avail -= count; 421 if (scan->bm_bighint > scan->u.bmu_avail) 422 scan->bm_bighint = scan->u.bmu_avail; 423 return(r); 424 } 425 } else if (scan[i].bm_bighint == (daddr_t)-1) { 426 /* 427 * Terminator 428 */ 429 break; 430 } else if (count > radix) { 431 /* 432 * count does not fit in object even if it were 433 * complete free. 434 */ 435 panic("blist_meta_alloc: allocation too large"); 436 } 437 blk += radix; 438 } 439 440 /* 441 * We couldn't allocate count in this subtree, update bighint. 442 */ 443 if (scan->bm_bighint >= count) 444 scan->bm_bighint = count - 1; 445 return(SWAPBLK_NONE); 446 } 447 448 /* 449 * BLST_LEAF_FREE() - free allocated block from leaf bitmap 450 * 451 */ 452 453 static void 454 blst_leaf_free( 455 blmeta_t *scan, 456 daddr_t blk, 457 int count 458 ) { 459 /* 460 * free some data in this bitmap 461 * 462 * e.g. 463 * 0000111111111110000 464 * \_________/\__/ 465 * v n 466 */ 467 int n = blk & (BLIST_BMAP_RADIX - 1); 468 u_daddr_t mask; 469 470 mask = ((u_daddr_t)-1 << n) & 471 ((u_daddr_t)-1 >> (BLIST_BMAP_RADIX - count - n)); 472 473 if (scan->u.bmu_bitmap & mask) 474 panic("blst_radix_free: freeing free block"); 475 scan->u.bmu_bitmap |= mask; 476 477 /* 478 * We could probably do a better job here. We are required to make 479 * bighint at least as large as the biggest contiguous block of 480 * data. If we just shoehorn it, a little extra overhead will 481 * be incured on the next allocation (but only that one typically). 482 */ 483 scan->bm_bighint = BLIST_BMAP_RADIX; 484 } 485 486 /* 487 * BLST_META_FREE() - free allocated blocks from radix tree meta info 488 * 489 * This support routine frees a range of blocks from the bitmap. 490 * The range must be entirely enclosed by this radix node. If a 491 * meta node, we break the range down recursively to free blocks 492 * in subnodes (which means that this code can free an arbitrary 493 * range whereas the allocation code cannot allocate an arbitrary 494 * range). 495 */ 496 497 static void 498 blst_meta_free( 499 blmeta_t *scan, 500 daddr_t freeBlk, 501 daddr_t count, 502 daddr_t radix, 503 int skip, 504 daddr_t blk 505 ) { 506 int i; 507 int next_skip = (skip >> BLIST_META_RADIX_SHIFT); 508 509 #if 0 510 printf("FREE (%x,%d) FROM (%x,%d)\n", 511 freeBlk, count, 512 blk, radix 513 ); 514 #endif 515 516 if (scan->u.bmu_avail == 0) { 517 /* 518 * ALL-ALLOCATED special case, with possible 519 * shortcut to ALL-FREE special case. 520 */ 521 scan->u.bmu_avail = count; 522 scan->bm_bighint = count; 523 524 if (count != radix) { 525 for (i = 1; i <= skip; i += next_skip) { 526 if (scan[i].bm_bighint == (daddr_t)-1) 527 break; 528 scan[i].bm_bighint = 0; 529 if (next_skip == 1) { 530 scan[i].u.bmu_bitmap = 0; 531 } else { 532 scan[i].u.bmu_avail = 0; 533 } 534 } 535 /* fall through */ 536 } 537 } else { 538 scan->u.bmu_avail += count; 539 /* scan->bm_bighint = radix; */ 540 } 541 542 /* 543 * ALL-FREE special case. 544 */ 545 546 if (scan->u.bmu_avail == radix) 547 return; 548 if (scan->u.bmu_avail > radix) 549 panic("blst_meta_free: freeing already free blocks (%d) %d/%d", count, scan->u.bmu_avail, radix); 550 551 /* 552 * Break the free down into its components 553 */ 554 555 radix >>= BLIST_META_RADIX_SHIFT; 556 557 i = (freeBlk - blk) / radix; 558 blk += i * radix; 559 i = i * next_skip + 1; 560 561 while (i <= skip && blk < freeBlk + count) { 562 daddr_t v; 563 564 v = blk + radix - freeBlk; 565 if (v > count) 566 v = count; 567 568 if (scan->bm_bighint == (daddr_t)-1) 569 panic("blst_meta_free: freeing unexpected range"); 570 571 if (next_skip == 1) { 572 blst_leaf_free(&scan[i], freeBlk, v); 573 } else { 574 blst_meta_free(&scan[i], freeBlk, v, radix, next_skip - 1, blk); 575 } 576 if (scan->bm_bighint < scan[i].bm_bighint) 577 scan->bm_bighint = scan[i].bm_bighint; 578 count -= v; 579 freeBlk += v; 580 blk += radix; 581 i += next_skip; 582 } 583 } 584 585 /* 586 * BLIST_RADIX_COPY() - copy one radix tree to another 587 * 588 * Locates free space in the source tree and frees it in the destination 589 * tree. The space may not already be free in the destination. 590 */ 591 592 static void blst_copy( 593 blmeta_t *scan, 594 daddr_t blk, 595 daddr_t radix, 596 daddr_t skip, 597 blist_t dest, 598 daddr_t count 599 ) { 600 int next_skip; 601 int i; 602 603 /* 604 * Leaf node 605 */ 606 607 if (radix == BLIST_BMAP_RADIX) { 608 u_daddr_t v = scan->u.bmu_bitmap; 609 610 if (v == (u_daddr_t)-1) { 611 blist_free(dest, blk, count); 612 } else if (v != 0) { 613 int i; 614 615 for (i = 0; i < BLIST_BMAP_RADIX && i < count; ++i) { 616 if (v & (1 << i)) 617 blist_free(dest, blk + i, 1); 618 } 619 } 620 return; 621 } 622 623 /* 624 * Meta node 625 */ 626 627 if (scan->u.bmu_avail == 0) { 628 /* 629 * Source all allocated, leave dest allocated 630 */ 631 return; 632 } 633 if (scan->u.bmu_avail == radix) { 634 /* 635 * Source all free, free entire dest 636 */ 637 if (count < radix) 638 blist_free(dest, blk, count); 639 else 640 blist_free(dest, blk, radix); 641 return; 642 } 643 644 645 radix >>= BLIST_META_RADIX_SHIFT; 646 next_skip = (skip >> BLIST_META_RADIX_SHIFT); 647 648 for (i = 1; count && i <= skip; i += next_skip) { 649 if (scan[i].bm_bighint == (daddr_t)-1) 650 break; 651 652 if (count >= radix) { 653 blst_copy( 654 &scan[i], 655 blk, 656 radix, 657 next_skip - 1, 658 dest, 659 radix 660 ); 661 count -= radix; 662 } else { 663 if (count) { 664 blst_copy( 665 &scan[i], 666 blk, 667 radix, 668 next_skip - 1, 669 dest, 670 count 671 ); 672 } 673 count = 0; 674 } 675 blk += radix; 676 } 677 } 678 679 /* 680 * BLST_RADIX_INIT() - initialize radix tree 681 * 682 * Initialize our meta structures and bitmaps and calculate the exact 683 * amount of space required to manage 'count' blocks - this space may 684 * be considerably less then the calculated radix due to the large 685 * RADIX values we use. 686 */ 687 688 static daddr_t 689 blst_radix_init(blmeta_t *scan, daddr_t radix, int skip, daddr_t count) 690 { 691 int i; 692 int next_skip; 693 daddr_t memindex = 0; 694 695 /* 696 * Leaf node 697 */ 698 699 if (radix == BLIST_BMAP_RADIX) { 700 if (scan) { 701 scan->bm_bighint = 0; 702 scan->u.bmu_bitmap = 0; 703 } 704 return(memindex); 705 } 706 707 /* 708 * Meta node. If allocating the entire object we can special 709 * case it. However, we need to figure out how much memory 710 * is required to manage 'count' blocks, so we continue on anyway. 711 */ 712 713 if (scan) { 714 scan->bm_bighint = 0; 715 scan->u.bmu_avail = 0; 716 } 717 718 radix >>= BLIST_META_RADIX_SHIFT; 719 next_skip = (skip >> BLIST_META_RADIX_SHIFT); 720 721 for (i = 1; i <= skip; i += next_skip) { 722 if (count >= radix) { 723 /* 724 * Allocate the entire object 725 */ 726 memindex = i + blst_radix_init( 727 ((scan) ? &scan[i] : NULL), 728 radix, 729 next_skip - 1, 730 radix 731 ); 732 count -= radix; 733 } else if (count > 0) { 734 /* 735 * Allocate a partial object 736 */ 737 memindex = i + blst_radix_init( 738 ((scan) ? &scan[i] : NULL), 739 radix, 740 next_skip - 1, 741 count 742 ); 743 count = 0; 744 } else { 745 /* 746 * Add terminator and break out 747 */ 748 if (scan) 749 scan[i].bm_bighint = (daddr_t)-1; 750 break; 751 } 752 } 753 if (memindex < i) 754 memindex = i; 755 return(memindex); 756 } 757 758 #ifdef BLIST_DEBUG 759 760 static void 761 blst_radix_print(blmeta_t *scan, daddr_t blk, daddr_t radix, int skip, int tab) 762 { 763 int i; 764 int next_skip; 765 int lastState = 0; 766 767 if (radix == BLIST_BMAP_RADIX) { 768 printf( 769 "%*.*s(%04x,%d): bitmap %08x big=%d\n", 770 tab, tab, "", 771 blk, radix, 772 scan->u.bmu_bitmap, 773 scan->bm_bighint 774 ); 775 return; 776 } 777 778 if (scan->u.bmu_avail == 0) { 779 printf( 780 "%*.*s(%04x,%d) ALL ALLOCATED\n", 781 tab, tab, "", 782 blk, 783 radix 784 ); 785 return; 786 } 787 if (scan->u.bmu_avail == radix) { 788 printf( 789 "%*.*s(%04x,%d) ALL FREE\n", 790 tab, tab, "", 791 blk, 792 radix 793 ); 794 return; 795 } 796 797 printf( 798 "%*.*s(%04x,%d): subtree (%d/%d) big=%d {\n", 799 tab, tab, "", 800 blk, radix, 801 scan->u.bmu_avail, 802 radix, 803 scan->bm_bighint 804 ); 805 806 radix >>= BLIST_META_RADIX_SHIFT; 807 next_skip = (skip >> BLIST_META_RADIX_SHIFT); 808 tab += 4; 809 810 for (i = 1; i <= skip; i += next_skip) { 811 if (scan[i].bm_bighint == (daddr_t)-1) { 812 printf( 813 "%*.*s(%04x,%d): Terminator\n", 814 tab, tab, "", 815 blk, radix 816 ); 817 lastState = 0; 818 break; 819 } 820 blst_radix_print( 821 &scan[i], 822 blk, 823 radix, 824 next_skip - 1, 825 tab 826 ); 827 blk += radix; 828 } 829 tab -= 4; 830 831 printf( 832 "%*.*s}\n", 833 tab, tab, "" 834 ); 835 } 836 837 #endif 838 839 #ifdef BLIST_DEBUG 840 841 int 842 main(int ac, char **av) 843 { 844 int size = 1024; 845 int i; 846 blist_t bl; 847 848 for (i = 1; i < ac; ++i) { 849 const char *ptr = av[i]; 850 if (*ptr != '-') { 851 size = strtol(ptr, NULL, 0); 852 continue; 853 } 854 ptr += 2; 855 fprintf(stderr, "Bad option: %s\n", ptr - 2); 856 exit(1); 857 } 858 bl = blist_create(size); 859 blist_free(bl, 0, size); 860 861 for (;;) { 862 char buf[1024]; 863 daddr_t da = 0; 864 daddr_t count = 0; 865 866 867 printf("%d/%d/%d> ", bl->bl_free, size, bl->bl_radix); 868 fflush(stdout); 869 if (fgets(buf, sizeof(buf), stdin) == NULL) 870 break; 871 switch(buf[0]) { 872 case 'r': 873 if (sscanf(buf + 1, "%d", &count) == 1) { 874 blist_resize(&bl, count, 1); 875 } else { 876 printf("?\n"); 877 } 878 case 'p': 879 blist_print(bl); 880 break; 881 case 'a': 882 if (sscanf(buf + 1, "%d", &count) == 1) { 883 daddr_t blk = blist_alloc(bl, count); 884 printf(" R=%04x\n", blk); 885 } else { 886 printf("?\n"); 887 } 888 break; 889 case 'f': 890 if (sscanf(buf + 1, "%x %d", &da, &count) == 2) { 891 blist_free(bl, da, count); 892 } else { 893 printf("?\n"); 894 } 895 break; 896 case '?': 897 case 'h': 898 puts( 899 "p -print\n" 900 "a %d -allocate\n" 901 "f %x %d -free\n" 902 "r %d -resize\n" 903 "h/? -help" 904 ); 905 break; 906 default: 907 printf("?\n"); 908 break; 909 } 910 } 911 return(0); 912 } 913 914 void 915 panic(const char *ctl, ...) 916 { 917 va_list va; 918 919 va_start(va, ctl); 920 vfprintf(stderr, ctl, va); 921 fprintf(stderr, "\n"); 922 va_end(va); 923 exit(1); 924 } 925 926 #endif 927 928