1 /*********************************************************************** 2 * * 3 * This software is part of the ast package * 4 * Copyright (c) 1985-2007 AT&T Knowledge Ventures * 5 * and is licensed under the * 6 * Common Public License, Version 1.0 * 7 * by AT&T Knowledge Ventures * 8 * * 9 * A copy of the License is available at * 10 * http://www.opensource.org/licenses/cpl1.0.txt * 11 * (with md5 checksum 059e8cd6165cb4c31e351f2b69388fd9) * 12 * * 13 * Information and Software Systems Research * 14 * AT&T Research * 15 * Florham Park NJ * 16 * * 17 * Glenn Fowler <gsf@research.att.com> * 18 * David Korn <dgk@research.att.com> * 19 * Phong Vo <kpv@research.att.com> * 20 * * 21 ***********************************************************************/ 22 #if defined(_UWIN) && defined(_BLD_ast) 23 24 void _STUB_vmbest(){} 25 26 #else 27 28 #include "vmhdr.h" 29 30 /* Best-fit allocation method. This is based on a best-fit strategy 31 ** using a splay tree for storage of lists of free blocks of the same 32 ** size. Recent free blocks may be cached for fast reuse. 33 ** 34 ** Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94. 35 */ 36 37 #ifdef DEBUG 38 static int N_free; /* # of free calls */ 39 static int N_alloc; /* # of alloc calls */ 40 static int N_resize; /* # of resize calls */ 41 static int N_wild; /* # allocated from the wild block */ 42 static int N_last; /* # allocated from last free block */ 43 static int N_reclaim; /* # of bestreclaim calls */ 44 45 #undef VM_TRUST /* always check for locking, etc.s */ 46 #define VM_TRUST 0 47 #endif /*DEBUG*/ 48 49 #define COMPACT 8 /* factor to decide when to compact */ 50 51 /* Check to see if a block is in the free tree */ 52 #if __STD_C 53 static int vmintree(Block_t* node, Block_t* b) 54 #else 55 static int vmintree(node,b) 56 Block_t* node; 57 Block_t* b; 58 #endif 59 { Block_t* t; 60 61 for(t = node; t; t = LINK(t)) 62 if(t == b) 63 return 1; 64 if(LEFT(node) && vmintree(LEFT(node),b)) 65 return 1; 66 if(RIGHT(node) && vmintree(RIGHT(node),b)) 67 return 1; 68 return 0; 69 } 70 71 #if __STD_C 72 static int vmonlist(Block_t* list, Block_t* b) 73 #else 74 static int vmonlist(list,b) 75 Block_t* list; 76 Block_t* b; 77 #endif 78 { 79 for(; list; list = LINK(list)) 80 if(list == b) 81 return 1; 82 return 0; 83 } 84 85 /* Check to see if a block is known to be free */ 86 #if __STD_C 87 static int vmisfree(Vmdata_t* vd, Block_t* b) 88 #else 89 static int vmisfree(vd,b) 90 Vmdata_t* vd; 91 Block_t* b; 92 #endif 93 { 94 if(SIZE(b) & (BUSY|JUNK|PFREE)) 95 return 0; 96 97 if(b == vd->wild) 98 return 1; 99 100 if(SIZE(b) < MAXTINY) 101 return vmonlist(TINY(vd)[INDEX(SIZE(b))], b); 102 103 if(vd->root) 104 return vmintree(vd->root, b); 105 106 return 0; 107 } 108 109 /* Check to see if a block is known to be junked */ 110 #if __STD_C 111 static int vmisjunk(Vmdata_t* vd, Block_t* b) 112 #else 113 static int vmisjunk(vd,b) 114 Vmdata_t* vd; 115 Block_t* b; 116 #endif 117 { 118 Block_t* t; 119 120 if((SIZE(b)&BUSY) == 0 || (SIZE(b)&JUNK) == 0) 121 return 0; 122 123 if(b == vd->free) /* recently freed */ 124 return 1; 125 126 /* check the list that b is supposed to be in */ 127 for(t = CACHE(vd)[C_INDEX(SIZE(b))]; t; t = LINK(t)) 128 if(t == b) 129 return 1; 130 131 /* on occasions, b may be put onto the catch-all list */ 132 if(C_INDEX(SIZE(b)) < S_CACHE) 133 for(t = CACHE(vd)[S_CACHE]; t; t = LINK(t)) 134 if(t == b) 135 return 1; 136 137 return 0; 138 } 139 140 /* check to see if the free tree is in good shape */ 141 #if __STD_C 142 static int vmchktree(Block_t* node) 143 #else 144 static int vmchktree(node) 145 Block_t* node; 146 #endif 147 { Block_t* t; 148 149 if(SIZE(node) & BITS) 150 { /**/ASSERT(0); return -1; } 151 152 for(t = LINK(node); t; t = LINK(t)) 153 if(SIZE(t) != SIZE(node)) 154 { /**/ASSERT(0); return -1; } 155 156 if((t = LEFT(node)) ) 157 { if(SIZE(t) >= SIZE(node) ) 158 { /**/ASSERT(0); return -1; } 159 else return vmchktree(t); 160 } 161 if((t = RIGHT(node)) ) 162 { if(SIZE(t) <= SIZE(node) ) 163 { /**/ASSERT(0); return -1; } 164 else return vmchktree(t); 165 } 166 167 return 0; 168 } 169 170 #if __STD_C 171 int _vmbestcheck(Vmdata_t* vd, Block_t* freeb) 172 #else 173 int _vmbestcheck(vd, freeb) 174 Vmdata_t* vd; 175 Block_t* freeb; /* known to be free but not on any free list */ 176 #endif 177 { 178 reg Seg_t *seg; 179 reg Block_t *b, *endb, *nextb; 180 int rv = 0; 181 182 if(!CHECK()) 183 return 0; 184 185 /* make sure the free tree is still in shape */ 186 if(vd->root && vmchktree(vd->root) < 0 ) 187 { rv = -1; /**/ASSERT(0); } 188 189 for(seg = vd->seg; seg && rv == 0; seg = seg->next) 190 { b = SEGBLOCK(seg); 191 endb = (Block_t*)(seg->baddr - sizeof(Head_t)); 192 for(; b < endb && rv == 0; b = nextb) 193 { nextb = (Block_t*)((Vmuchar_t*)DATA(b) + (SIZE(b)&~BITS) ); 194 195 if(!ISBUSY(SIZE(b)) ) /* a completely free block */ 196 { /* there should be no marked bits of any type */ 197 if(SIZE(b) & (BUSY|JUNK|PFREE) ) 198 { rv = -1; /**/ASSERT(0); } 199 200 /* next block must be busy and marked PFREE */ 201 if(!ISBUSY(SIZE(nextb)) || !ISPFREE(SIZE(nextb)) ) 202 { rv = -1; /**/ASSERT(0); } 203 204 /* must have a self-reference pointer */ 205 if(*SELF(b) != b) 206 { rv = -1; /**/ASSERT(0); } 207 208 /* segment pointer should be well-defined */ 209 if(!TINIEST(b) && SEG(b) != seg) 210 { rv = -1; /**/ASSERT(0); } 211 212 /* must be on a free list */ 213 if(b != freeb && !vmisfree(vd, b) ) 214 { rv = -1; /**/ASSERT(0); } 215 } 216 else 217 { /* segment pointer should be well-defined */ 218 if(SEG(b) != seg) 219 { rv = -1; /**/ASSERT(0); } 220 221 /* next block should not be marked PFREE */ 222 if(ISPFREE(SIZE(nextb)) ) 223 { rv = -1; /**/ASSERT(0); } 224 225 /* if PFREE, last block should be free */ 226 if(ISPFREE(SIZE(b)) && LAST(b) != freeb && 227 !vmisfree(vd, LAST(b)) ) 228 { rv = -1; /**/ASSERT(0); } 229 230 /* if free but unreclaimed, should be junk */ 231 if(ISJUNK(SIZE(b)) && !vmisjunk(vd, b)) 232 { rv = -1; /**/ASSERT(0); } 233 } 234 } 235 } 236 237 return rv; 238 } 239 240 /* Tree rotation functions */ 241 #define RROTATE(x,y) (LEFT(x) = RIGHT(y), RIGHT(y) = (x), (x) = (y)) 242 #define LROTATE(x,y) (RIGHT(x) = LEFT(y), LEFT(y) = (x), (x) = (y)) 243 #define RLINK(s,x) ((s) = LEFT(s) = (x)) 244 #define LLINK(s,x) ((s) = RIGHT(s) = (x)) 245 246 /* Find and delete a suitable element in the free tree. */ 247 #if __STD_C 248 static Block_t* bestsearch(Vmdata_t* vd, reg size_t size, Block_t* wanted) 249 #else 250 static Block_t* bestsearch(vd, size, wanted) 251 Vmdata_t* vd; 252 reg size_t size; 253 Block_t* wanted; 254 #endif 255 { 256 reg size_t s; 257 reg Block_t *t, *root, *l, *r; 258 Block_t link; 259 260 /* extracting a tiniest block from its list */ 261 if((root = wanted) && size == TINYSIZE) 262 { reg Seg_t* seg; 263 264 l = TLEFT(root); 265 if((r = LINK(root)) ) 266 TLEFT(r) = l; 267 if(l) 268 LINK(l) = r; 269 else TINY(vd)[0] = r; 270 271 seg = vd->seg; 272 if(!seg->next) 273 SEG(root) = seg; 274 else for(;; seg = seg->next) 275 { if((Vmuchar_t*)root > (Vmuchar_t*)seg->addr && 276 (Vmuchar_t*)root < seg->baddr) 277 { SEG(root) = seg; 278 break; 279 } 280 } 281 282 return root; 283 } 284 285 /**/ASSERT(!vd->root || vmchktree(vd->root) == 0); 286 287 /* find the right one to delete */ 288 l = r = &link; 289 if((root = vd->root) ) do 290 { /**/ ASSERT(!ISBITS(size) && !ISBITS(SIZE(root))); 291 if(size == (s = SIZE(root)) ) 292 break; 293 if(size < s) 294 { if((t = LEFT(root)) ) 295 { if(size <= (s = SIZE(t)) ) 296 { RROTATE(root,t); 297 if(size == s) 298 break; 299 t = LEFT(root); 300 } 301 else 302 { LLINK(l,t); 303 t = RIGHT(t); 304 } 305 } 306 RLINK(r,root); 307 } 308 else 309 { if((t = RIGHT(root)) ) 310 { if(size >= (s = SIZE(t)) ) 311 { LROTATE(root,t); 312 if(size == s) 313 break; 314 t = RIGHT(root); 315 } 316 else 317 { RLINK(r,t); 318 t = LEFT(t); 319 } 320 } 321 LLINK(l,root); 322 } 323 /**/ ASSERT(root != t); 324 } while((root = t) ); 325 326 if(root) /* found it, now isolate it */ 327 { RIGHT(l) = LEFT(root); 328 LEFT(r) = RIGHT(root); 329 } 330 else /* nothing exactly fit */ 331 { LEFT(r) = NIL(Block_t*); 332 RIGHT(l) = NIL(Block_t*); 333 334 /* grab the least one from the right tree */ 335 if((root = LEFT(&link)) ) 336 { while((t = LEFT(root)) ) 337 RROTATE(root,t); 338 LEFT(&link) = RIGHT(root); 339 } 340 } 341 342 if(root && (r = LINK(root)) ) 343 { /* head of a link list, use next one for root */ 344 LEFT(r) = RIGHT(&link); 345 RIGHT(r) = LEFT(&link); 346 } 347 else if(!(r = LEFT(&link)) ) 348 r = RIGHT(&link); 349 else /* graft left tree to right tree */ 350 { while((t = LEFT(r)) ) 351 RROTATE(r,t); 352 LEFT(r) = RIGHT(&link); 353 } 354 vd->root = r; /**/ASSERT(!r || !ISBITS(SIZE(r))); 355 356 /**/ASSERT(!vd->root || vmchktree(vd->root) == 0); 357 /**/ASSERT(!wanted || wanted == root); 358 359 return root; 360 } 361 362 /* Reclaim all delayed free blocks into the free tree */ 363 #if __STD_C 364 static int bestreclaim(reg Vmdata_t* vd, Block_t* wanted, int c) 365 #else 366 static int bestreclaim(vd, wanted, c) 367 reg Vmdata_t* vd; 368 Block_t* wanted; 369 int c; 370 #endif 371 { 372 reg size_t size, s; 373 reg Block_t *fp, *np, *t, *list; 374 reg int n, saw_wanted; 375 reg Seg_t *seg; 376 377 /**/COUNT(N_reclaim); 378 /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0); 379 380 if((fp = vd->free) ) 381 { LINK(fp) = CACHE(vd)[S_CACHE]; CACHE(vd)[S_CACHE] = fp; 382 vd->free = NIL(Block_t*); 383 } 384 385 saw_wanted = wanted ? 0 : 1; 386 for(n = S_CACHE; n >= c; --n) 387 { list = CACHE(vd)[n]; CACHE(vd)[n] = NIL(Block_t*); 388 while((fp = list) ) 389 { /* Note that below here we allow ISJUNK blocks to be 390 ** forward-merged even though they are not removed from 391 ** the list immediately. In this way, the list is 392 ** scanned only once. It works because the LINK and SIZE 393 ** fields are not destroyed during the merging. This can 394 ** be seen by observing that a tiniest block has a 2-word 395 ** header and a 2-word body. Merging a tiniest block 396 ** (1seg) and the next block (2seg) looks like this: 397 ** 1seg size link left 2seg size link left .... 398 ** 1seg size link left rite xxxx xxxx .... self 399 ** After the merge, the 2seg word is replaced by the RIGHT 400 ** pointer of the new block and somewhere beyond the 401 ** two xxxx fields, the SELF pointer will replace some 402 ** other word. The important part is that the two xxxx 403 ** fields are kept intact. 404 */ 405 list = LINK(list); /**/ASSERT(!vmonlist(list,fp)); 406 407 size = SIZE(fp); 408 if(!ISJUNK(size)) /* already done */ 409 continue; 410 411 if(_Vmassert & VM_region) 412 { /* see if this address is from region */ 413 for(seg = vd->seg; seg; seg = seg->next) 414 if(fp >= SEGBLOCK(seg) && fp < (Block_t*)seg->baddr ) 415 break; 416 if(!seg) /* must be a bug in application code! */ 417 { /**/ ASSERT(seg != NIL(Seg_t*)); 418 continue; 419 } 420 } 421 422 if(ISPFREE(size)) /* backward merge */ 423 { fp = LAST(fp); 424 s = SIZE(fp); /**/ASSERT(!(s&BITS)); 425 REMOVE(vd,fp,INDEX(s),t,bestsearch); 426 size = (size&~BITS) + s + sizeof(Head_t); 427 } 428 else size &= ~BITS; 429 430 for(;;) /* forward merge */ 431 { np = (Block_t*)((Vmuchar_t*)fp+size+sizeof(Head_t)); 432 s = SIZE(np); /**/ASSERT(s > 0); 433 if(!ISBUSY(s)) 434 { /**/ASSERT((s&BITS) == 0); 435 if(np == vd->wild) 436 vd->wild = NIL(Block_t*); 437 else REMOVE(vd,np,INDEX(s),t,bestsearch); 438 } 439 else if(ISJUNK(s)) 440 { /* reclaim any touched junk list */ 441 if((int)C_INDEX(s) < c) 442 c = C_INDEX(s); 443 SIZE(np) = 0; 444 CLRBITS(s); 445 } 446 else break; 447 size += s + sizeof(Head_t); 448 } 449 SIZE(fp) = size; 450 451 /* tell next block that this one is free */ 452 np = NEXT(fp); /**/ASSERT(ISBUSY(SIZE(np))); 453 /**/ASSERT(!ISJUNK(SIZE(np))); 454 SETPFREE(SIZE(np)); 455 *(SELF(fp)) = fp; 456 457 if(fp == wanted) /* to be consumed soon */ 458 { /**/ASSERT(!saw_wanted); /* should be seen just once */ 459 saw_wanted = 1; 460 continue; 461 } 462 463 /* wilderness preservation */ 464 if(np->body.data >= vd->seg->baddr) 465 { vd->wild = fp; 466 continue; 467 } 468 469 /* tiny block goes to tiny list */ 470 if(size < MAXTINY) 471 { s = INDEX(size); 472 np = LINK(fp) = TINY(vd)[s]; 473 if(s == 0) /* TINIEST block */ 474 { if(np) 475 TLEFT(np) = fp; 476 TLEFT(fp) = NIL(Block_t*); 477 } 478 else 479 { if(np) 480 LEFT(np) = fp; 481 LEFT(fp) = NIL(Block_t*); 482 SETLINK(fp); 483 } 484 TINY(vd)[s] = fp; 485 continue; 486 } 487 488 LEFT(fp) = RIGHT(fp) = LINK(fp) = NIL(Block_t*); 489 if(!(np = vd->root) ) /* inserting into an empty tree */ 490 { vd->root = fp; 491 continue; 492 } 493 494 size = SIZE(fp); 495 while(1) /* leaf insertion */ 496 { /**/ASSERT(np != fp); 497 if((s = SIZE(np)) > size) 498 { if((t = LEFT(np)) ) 499 { /**/ ASSERT(np != t); 500 np = t; 501 } 502 else 503 { LEFT(np) = fp; 504 break; 505 } 506 } 507 else if(s < size) 508 { if((t = RIGHT(np)) ) 509 { /**/ ASSERT(np != t); 510 np = t; 511 } 512 else 513 { RIGHT(np) = fp; 514 break; 515 } 516 } 517 else /* s == size */ 518 { if((t = LINK(np)) ) 519 { LINK(fp) = t; 520 LEFT(t) = fp; 521 } 522 LINK(np) = fp; 523 LEFT(fp) = np; 524 SETLINK(fp); 525 break; 526 } 527 } 528 } 529 } 530 531 /**/ASSERT(!wanted || saw_wanted == 1); 532 /**/ASSERT(_vmbestcheck(vd, wanted) == 0); 533 return saw_wanted; 534 } 535 536 #if __STD_C 537 static int bestcompact(Vmalloc_t* vm) 538 #else 539 static int bestcompact(vm) 540 Vmalloc_t* vm; 541 #endif 542 { 543 reg Seg_t *seg, *next; 544 reg Block_t *bp, *t; 545 reg size_t size, segsize, round; 546 reg int local; 547 reg Vmdata_t* vd = vm->data; 548 549 if(!(local = vd->mode&VM_TRUST) ) 550 { GETLOCAL(vd,local); 551 if(ISLOCK(vd,local)) 552 return -1; 553 SETLOCK(vd,local); 554 } 555 556 bestreclaim(vd,NIL(Block_t*),0); 557 558 for(seg = vd->seg; seg; seg = next) 559 { next = seg->next; 560 561 bp = BLOCK(seg->baddr); 562 if(!ISPFREE(SIZE(bp)) ) 563 continue; 564 565 bp = LAST(bp); /**/ASSERT(vmisfree(vd,bp)); 566 size = SIZE(bp); 567 if(bp == vd->wild) 568 { /* During large block allocations, _Vmextend might 569 ** have been enlarged the rounding factor. Reducing 570 ** it a bit help avoiding getting large raw memory. 571 */ 572 if((round = vm->disc->round) == 0) 573 round = _Vmpagesize; 574 if(size > COMPACT*vd->incr && vd->incr > round) 575 vd->incr /= 2; 576 577 /* for the bottom segment, we don't necessarily want 578 ** to return raw memory too early. vd->pool has an 579 ** approximation of the average size of recently freed 580 ** blocks. If this is large, the application is managing 581 ** large blocks so we throttle back memory chopping 582 ** to avoid thrashing the underlying memory system. 583 */ 584 if(size <= COMPACT*vd->incr || size <= COMPACT*vd->pool) 585 continue; 586 587 vd->wild = NIL(Block_t*); 588 vd->pool = 0; 589 } 590 else REMOVE(vd,bp,INDEX(size),t,bestsearch); 591 CLRPFREE(SIZE(NEXT(bp))); 592 593 if(size < (segsize = seg->size)) 594 size += sizeof(Head_t); 595 596 if((size = (*_Vmtruncate)(vm,seg,size,0)) > 0) 597 { if(size >= segsize) /* entire segment deleted */ 598 continue; 599 /**/ASSERT(SEG(BLOCK(seg->baddr)) == seg); 600 601 if((size = (seg->baddr - ((Vmuchar_t*)bp) - sizeof(Head_t))) > 0) 602 SIZE(bp) = size - sizeof(Head_t); 603 else bp = NIL(Block_t*); 604 } 605 606 if(bp) 607 { /**/ ASSERT(SIZE(bp) >= BODYSIZE); 608 /**/ ASSERT(SEGWILD(bp)); 609 /**/ ASSERT(!vd->root || !vmintree(vd->root,bp)); 610 SIZE(bp) |= BUSY|JUNK; 611 LINK(bp) = CACHE(vd)[C_INDEX(SIZE(bp))]; 612 CACHE(vd)[C_INDEX(SIZE(bp))] = bp; 613 } 614 } 615 616 if(!local && _Vmtrace && (vd->mode&VM_TRACE) && VMETHOD(vd) == VM_MTBEST) 617 (*_Vmtrace)(vm, (Vmuchar_t*)0, (Vmuchar_t*)0, 0, 0); 618 619 CLRLOCK(vd,local); /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0); 620 621 return 0; 622 } 623 624 #if __STD_C 625 static Void_t* bestalloc(Vmalloc_t* vm, reg size_t size ) 626 #else 627 static Void_t* bestalloc(vm,size) 628 Vmalloc_t* vm; /* region allocating from */ 629 reg size_t size; /* desired block size */ 630 #endif 631 { 632 reg Vmdata_t* vd = vm->data; 633 reg size_t s; 634 reg int n; 635 reg Block_t *tp, *np; 636 reg int local; 637 size_t orgsize = 0; 638 639 if(!(_Vmassert & VM_init)) 640 { char *chk = getenv("VMCHECK"); 641 _Vmassert = VM_init|VM_primary|VM_secondary; 642 if(chk) 643 { int set = 1; 644 for(;; set ? (_Vmassert |= n) : (_Vmassert &= ~n)) 645 { 646 switch (*chk++) 647 { 648 case 0: 649 break; 650 case '-': 651 case '!': 652 set = 0; 653 n = 0; 654 continue; 655 case '+': 656 set = 1; 657 n = 0; 658 continue; 659 case '1': 660 n = VM_check; 661 continue; 662 case '2': 663 n = VM_abort; 664 continue; 665 case '3': 666 n = VM_check|VM_abort; 667 continue; 668 case 'a': 669 case 'A': 670 n = VM_abort; 671 continue; 672 case 'c': 673 case 'C': 674 n = VM_check; 675 continue; 676 case 'p': 677 case 'P': 678 n = VM_primary; 679 continue; 680 case 'r': 681 case 'R': 682 n = VM_region; 683 continue; 684 case 's': 685 case 'S': 686 n = VM_secondary; 687 continue; 688 default: 689 n = 0; 690 continue; 691 } 692 break; 693 } 694 } 695 } 696 /**/COUNT(N_alloc); 697 698 if(!(local = vd->mode&VM_TRUST)) 699 { GETLOCAL(vd,local); /**/ASSERT(!ISLOCK(vd,local)); 700 if(ISLOCK(vd,local) ) 701 return NIL(Void_t*); 702 SETLOCK(vd,local); 703 orgsize = size; 704 } 705 706 /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0); 707 /**/ ASSERT(HEADSIZE == sizeof(Head_t)); 708 /**/ ASSERT(BODYSIZE == sizeof(Body_t)); 709 /**/ ASSERT((ALIGN%(BITS+1)) == 0 ); 710 /**/ ASSERT((sizeof(Head_t)%ALIGN) == 0 ); 711 /**/ ASSERT((sizeof(Body_t)%ALIGN) == 0 ); 712 /**/ ASSERT((BODYSIZE%ALIGN) == 0 ); 713 /**/ ASSERT(sizeof(Block_t) == (sizeof(Body_t)+sizeof(Head_t)) ); 714 715 /* for ANSI requirement that malloc(0) returns non-NULL pointer */ 716 size = size <= BODYSIZE ? BODYSIZE : ROUND(size,ALIGN); 717 718 if((tp = vd->free) ) /* reuse last free piece if appropriate */ 719 { /**/ASSERT(ISBUSY(SIZE(tp)) ); 720 /**/ASSERT(ISJUNK(SIZE(tp)) ); 721 /**/COUNT(N_last); 722 723 vd->free = NIL(Block_t*); 724 if((s = SIZE(tp)) >= size && s < (size << 1) ) 725 { if(s >= size + (sizeof(Head_t)+BODYSIZE) ) 726 { SIZE(tp) = size; 727 np = NEXT(tp); 728 SEG(np) = SEG(tp); 729 SIZE(np) = ((s&~BITS) - (size+sizeof(Head_t)))|JUNK|BUSY; 730 vd->free = np; 731 SIZE(tp) |= s&BITS; 732 } 733 CLRJUNK(SIZE(tp)); 734 goto done; 735 } 736 737 LINK(tp) = CACHE(vd)[S_CACHE]; 738 CACHE(vd)[S_CACHE] = tp; 739 } 740 741 for(;;) 742 { for(n = S_CACHE; n >= 0; --n) /* best-fit except for coalescing */ 743 { bestreclaim(vd,NIL(Block_t*),n); 744 if(vd->root && (tp = bestsearch(vd,size,NIL(Block_t*))) ) 745 goto got_block; 746 } 747 748 /**/ASSERT(!vd->free); 749 if((tp = vd->wild) && SIZE(tp) >= size) 750 { /**/COUNT(N_wild); 751 vd->wild = NIL(Block_t*); 752 goto got_block; 753 } 754 755 KPVCOMPACT(vm,bestcompact); 756 if((tp = (*_Vmextend)(vm,size,bestsearch)) ) 757 goto got_block; 758 else if(vd->mode&VM_AGAIN) 759 vd->mode &= ~VM_AGAIN; 760 else 761 { CLRLOCK(vd,local); 762 return NIL(Void_t*); 763 } 764 } 765 766 got_block: 767 /**/ ASSERT(!ISBITS(SIZE(tp))); 768 /**/ ASSERT(SIZE(tp) >= size); 769 /**/ ASSERT((SIZE(tp)%ALIGN) == 0); 770 /**/ ASSERT(!vd->free); 771 772 /* tell next block that we are no longer a free block */ 773 CLRPFREE(SIZE(NEXT(tp))); /**/ ASSERT(ISBUSY(SIZE(NEXT(tp)))); 774 775 if((s = SIZE(tp)-size) >= (sizeof(Head_t)+BODYSIZE) ) 776 { SIZE(tp) = size; 777 778 np = NEXT(tp); 779 SEG(np) = SEG(tp); 780 SIZE(np) = (s - sizeof(Head_t)) | BUSY|JUNK; 781 782 if(VMWILD(vd,np)) 783 { SIZE(np) &= ~BITS; 784 *SELF(np) = np; /**/ASSERT(ISBUSY(SIZE(NEXT(np)))); 785 SETPFREE(SIZE(NEXT(np))); 786 vd->wild = np; 787 } 788 else vd->free = np; 789 } 790 791 SETBUSY(SIZE(tp)); 792 793 done: 794 if(!local && (vd->mode&VM_TRACE) && _Vmtrace && VMETHOD(vd) == VM_MTBEST) 795 (*_Vmtrace)(vm,NIL(Vmuchar_t*),(Vmuchar_t*)DATA(tp),orgsize,0); 796 797 /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0); 798 CLRLOCK(vd,local); 799 ANNOUNCE(local, vm, VM_ALLOC, DATA(tp), vm->disc); 800 801 return DATA(tp); 802 } 803 804 #if __STD_C 805 static long bestaddr(Vmalloc_t* vm, Void_t* addr ) 806 #else 807 static long bestaddr(vm, addr) 808 Vmalloc_t* vm; /* region allocating from */ 809 Void_t* addr; /* address to check */ 810 #endif 811 { 812 reg Seg_t* seg; 813 reg Block_t *b, *endb; 814 reg long offset; 815 reg Vmdata_t* vd = vm->data; 816 reg int local; 817 818 if(!(local = vd->mode&VM_TRUST) ) 819 { GETLOCAL(vd,local); /**/ASSERT(!ISLOCK(vd,local)); 820 if(ISLOCK(vd,local)) 821 return -1L; 822 SETLOCK(vd,local); 823 } 824 825 offset = -1L; b = endb = NIL(Block_t*); 826 for(seg = vd->seg; seg; seg = seg->next) 827 { b = SEGBLOCK(seg); 828 endb = (Block_t*)(seg->baddr - sizeof(Head_t)); 829 if((Vmuchar_t*)addr > (Vmuchar_t*)b && 830 (Vmuchar_t*)addr < (Vmuchar_t*)endb) 831 break; 832 } 833 834 if(local && !(vd->mode&VM_TRUST) ) /* from bestfree or bestresize */ 835 { b = BLOCK(addr); 836 if(seg && SEG(b) == seg && ISBUSY(SIZE(b)) && !ISJUNK(SIZE(b)) ) 837 offset = 0; 838 if(offset != 0 && vm->disc->exceptf) 839 (void)(*vm->disc->exceptf)(vm,VM_BADADDR,addr,vm->disc); 840 } 841 else if(seg) 842 { while(b < endb) 843 { reg Vmuchar_t* data = (Vmuchar_t*)DATA(b); 844 reg size_t size = SIZE(b)&~BITS; 845 846 if((Vmuchar_t*)addr >= data && (Vmuchar_t*)addr < data+size) 847 { if(ISJUNK(SIZE(b)) || !ISBUSY(SIZE(b))) 848 offset = -1L; 849 else offset = (Vmuchar_t*)addr - data; 850 goto done; 851 } 852 853 b = (Block_t*)((Vmuchar_t*)DATA(b) + size); 854 } 855 } 856 857 done: 858 CLRLOCK(vd,local); 859 return offset; 860 } 861 862 #if __STD_C 863 static int bestfree(Vmalloc_t* vm, Void_t* data ) 864 #else 865 static int bestfree(vm, data ) 866 Vmalloc_t* vm; 867 Void_t* data; 868 #endif 869 { 870 reg Vmdata_t* vd = vm->data; 871 reg Block_t *bp; 872 reg size_t s; 873 reg int local; 874 875 #ifdef DEBUG 876 if((local = (int)data) >= 0 && local <= 0xf) 877 { int vmassert = _Vmassert; 878 _Vmassert = local ? local : vmassert ? vmassert : (VM_check|VM_abort); 879 _vmbestcheck(vd, NIL(Block_t*)); 880 _Vmassert = local ? local : vmassert; 881 return 0; 882 } 883 #endif 884 885 if(!data) /* ANSI-ism */ 886 return 0; 887 888 /**/COUNT(N_free); 889 890 if(!(local = vd->mode&VM_TRUST) ) 891 { GETLOCAL(vd,local); /**/ASSERT(!ISLOCK(vd,local)); 892 if(ISLOCK(vd,local) ) 893 return -1; 894 if(KPVADDR(vm,data,bestaddr) != 0 ) 895 return -1; 896 SETLOCK(vd,local); 897 } 898 899 /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0); 900 bp = BLOCK(data); s = SIZE(bp); 901 902 /* Keep an approximate average free block size. 903 ** This is used in bestcompact() to decide when to release 904 ** raw memory back to the underlying memory system. 905 */ 906 vd->pool = (vd->pool + (s&~BITS))/2; 907 908 if(ISBUSY(s) && !ISJUNK(s)) 909 { SETJUNK(SIZE(bp)); 910 if(s < MAXCACHE) 911 { /**/ASSERT(!vmonlist(CACHE(vd)[INDEX(s)], bp) ); 912 LINK(bp) = CACHE(vd)[INDEX(s)]; 913 CACHE(vd)[INDEX(s)] = bp; 914 } 915 else if(!vd->free) 916 vd->free = bp; 917 else 918 { /**/ASSERT(!vmonlist(CACHE(vd)[S_CACHE], bp) ); 919 LINK(bp) = CACHE(vd)[S_CACHE]; 920 CACHE(vd)[S_CACHE] = bp; 921 } 922 923 /* coalesce on freeing large blocks to avoid fragmentation */ 924 if(SIZE(bp) >= 2*vd->incr) 925 { bestreclaim(vd,NIL(Block_t*),0); 926 if(vd->wild && SIZE(vd->wild) >= COMPACT*vd->incr) 927 KPVCOMPACT(vm,bestcompact); 928 } 929 } 930 931 if(!local && _Vmtrace && (vd->mode&VM_TRACE) && VMETHOD(vd) == VM_MTBEST ) 932 (*_Vmtrace)(vm,(Vmuchar_t*)data,NIL(Vmuchar_t*), (s&~BITS), 0); 933 934 /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0); 935 CLRLOCK(vd,local); 936 ANNOUNCE(local, vm, VM_FREE, data, vm->disc); 937 938 return 0; 939 } 940 941 #if __STD_C 942 static Void_t* bestresize(Vmalloc_t* vm, Void_t* data, reg size_t size, int type) 943 #else 944 static Void_t* bestresize(vm,data,size,type) 945 Vmalloc_t* vm; /* region allocating from */ 946 Void_t* data; /* old block of data */ 947 reg size_t size; /* new size */ 948 int type; /* !=0 to move, <0 for not copy */ 949 #endif 950 { 951 reg Block_t *rp, *np, *t; 952 int local; 953 size_t s, bs, oldsize = 0, orgsize = 0; 954 Void_t *oldd, *orgdata = NIL(Void_t*); 955 Vmdata_t *vd = vm->data; 956 957 /**/COUNT(N_resize); 958 959 if(!data) 960 { if((data = bestalloc(vm,size)) ) 961 { oldsize = 0; 962 size = size <= BODYSIZE ? BODYSIZE : ROUND(size,ALIGN); 963 } 964 goto done; 965 } 966 if(size == 0) 967 { (void)bestfree(vm,data); 968 return NIL(Void_t*); 969 } 970 971 if(!(local = vd->mode&VM_TRUST) ) 972 { GETLOCAL(vd,local); /**/ASSERT(!ISLOCK(vd,local)); 973 if(ISLOCK(vd,local) ) 974 return NIL(Void_t*); 975 if(!local && KPVADDR(vm,data,bestaddr) != 0 ) 976 return NIL(Void_t*); 977 SETLOCK(vd,local); 978 979 orgdata = data; /* for tracing */ 980 orgsize = size; 981 } 982 983 /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0); 984 size = size <= BODYSIZE ? BODYSIZE : ROUND(size,ALIGN); 985 rp = BLOCK(data); /**/ASSERT(ISBUSY(SIZE(rp)) && !ISJUNK(SIZE(rp))); 986 oldsize = SIZE(rp); CLRBITS(oldsize); 987 if(oldsize < size) 988 { np = (Block_t*)((Vmuchar_t*)rp + oldsize + sizeof(Head_t)); 989 do /* forward merge as much as possible */ 990 { s = SIZE(np); /**/ASSERT(!ISPFREE(s)); 991 if(np == vd->free) 992 { vd->free = NIL(Block_t*); 993 CLRBITS(s); 994 } 995 else if(ISJUNK(s) ) 996 { if(!bestreclaim(vd,np,C_INDEX(s)) ) 997 /**/ASSERT(0); /* oops: did not see np! */ 998 s = SIZE(np); /**/ASSERT(s%ALIGN == 0); 999 } 1000 else if(!ISBUSY(s) ) 1001 { if(np == vd->wild) 1002 vd->wild = NIL(Block_t*); 1003 else REMOVE(vd,np,INDEX(s),t,bestsearch); 1004 } 1005 else break; 1006 1007 SIZE(rp) += (s += sizeof(Head_t)); /**/ASSERT((s%ALIGN) == 0); 1008 np = (Block_t*)((Vmuchar_t*)np + s); 1009 CLRPFREE(SIZE(np)); 1010 } while(SIZE(rp) < size); 1011 1012 if(SIZE(rp) < size && size > vd->incr && SEGWILD(rp) ) 1013 { reg Seg_t* seg; 1014 1015 s = (size - SIZE(rp)) + sizeof(Head_t); s = ROUND(s,vd->incr); 1016 seg = SEG(rp); 1017 if((*vm->disc->memoryf)(vm,seg->addr,seg->extent,seg->extent+s, 1018 vm->disc) == seg->addr ) 1019 { SIZE(rp) += s; 1020 seg->extent += s; 1021 seg->size += s; 1022 seg->baddr += s; 1023 s = (SIZE(rp)&~BITS) + sizeof(Head_t); 1024 np = (Block_t*)((Vmuchar_t*)rp + s); 1025 SEG(np) = seg; 1026 SIZE(np) = BUSY; 1027 } 1028 } 1029 } 1030 1031 if((s = SIZE(rp)) >= (size + (BODYSIZE+sizeof(Head_t))) ) 1032 { SIZE(rp) = size; 1033 np = NEXT(rp); 1034 SEG(np) = SEG(rp); 1035 SIZE(np) = (((s&~BITS)-size) - sizeof(Head_t))|BUSY|JUNK; 1036 CPYBITS(SIZE(rp),s); 1037 rp = np; 1038 goto do_free; 1039 } 1040 else if((bs = s&~BITS) < size) 1041 { if(!(type&(VM_RSMOVE|VM_RSCOPY)) ) 1042 data = NIL(Void_t*); /* old data is not moveable */ 1043 else 1044 { oldd = data; 1045 if((data = KPVALLOC(vm,size,bestalloc)) ) 1046 { if(type&VM_RSCOPY) 1047 memcpy(data, oldd, bs); 1048 1049 do_free: /* reclaim these right away */ 1050 SETJUNK(SIZE(rp)); 1051 LINK(rp) = CACHE(vd)[S_CACHE]; 1052 CACHE(vd)[S_CACHE] = rp; 1053 bestreclaim(vd, NIL(Block_t*), S_CACHE); 1054 } 1055 } 1056 } 1057 1058 if(!local && _Vmtrace && data && (vd->mode&VM_TRACE) && VMETHOD(vd) == VM_MTBEST) 1059 (*_Vmtrace)(vm, (Vmuchar_t*)orgdata, (Vmuchar_t*)data, orgsize, 0); 1060 1061 /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0); 1062 CLRLOCK(vd,local); 1063 ANNOUNCE(local, vm, VM_RESIZE, data, vm->disc); 1064 1065 done: if(data && (type&VM_RSZERO) && (size = SIZE(BLOCK(data))&~BITS) > oldsize ) 1066 memset((Void_t*)((Vmuchar_t*)data + oldsize), 0, size-oldsize); 1067 1068 return data; 1069 } 1070 1071 #if __STD_C 1072 static long bestsize(Vmalloc_t* vm, Void_t* addr ) 1073 #else 1074 static long bestsize(vm, addr) 1075 Vmalloc_t* vm; /* region allocating from */ 1076 Void_t* addr; /* address to check */ 1077 #endif 1078 { 1079 reg Seg_t* seg; 1080 reg Block_t *b, *endb; 1081 reg long size; 1082 reg Vmdata_t* vd = vm->data; 1083 1084 if(!(vd->mode&VM_TRUST) ) 1085 { if(ISLOCK(vd,0)) 1086 return -1L; 1087 SETLOCK(vd,0); 1088 } 1089 1090 size = -1L; 1091 for(seg = vd->seg; seg; seg = seg->next) 1092 { b = SEGBLOCK(seg); 1093 endb = (Block_t*)(seg->baddr - sizeof(Head_t)); 1094 if((Vmuchar_t*)addr <= (Vmuchar_t*)b || 1095 (Vmuchar_t*)addr >= (Vmuchar_t*)endb) 1096 continue; 1097 while(b < endb) 1098 { if(addr == DATA(b)) 1099 { if(!ISBUSY(SIZE(b)) || ISJUNK(SIZE(b)) ) 1100 size = -1L; 1101 else size = (long)SIZE(b)&~BITS; 1102 goto done; 1103 } 1104 else if((Vmuchar_t*)addr <= (Vmuchar_t*)b) 1105 break; 1106 1107 b = (Block_t*)((Vmuchar_t*)DATA(b) + (SIZE(b)&~BITS) ); 1108 } 1109 } 1110 1111 done: 1112 CLRLOCK(vd,0); 1113 return size; 1114 } 1115 1116 #if __STD_C 1117 static Void_t* bestalign(Vmalloc_t* vm, size_t size, size_t align) 1118 #else 1119 static Void_t* bestalign(vm, size, align) 1120 Vmalloc_t* vm; 1121 size_t size; 1122 size_t align; 1123 #endif 1124 { 1125 reg Vmuchar_t *data; 1126 reg Block_t *tp, *np; 1127 reg Seg_t* seg; 1128 reg int local; 1129 reg size_t s, extra, orgsize = 0, orgalign = 0; 1130 reg Vmdata_t* vd = vm->data; 1131 1132 if(size <= 0 || align <= 0) 1133 return NIL(Void_t*); 1134 1135 if(!(local = vd->mode&VM_TRUST) ) 1136 { GETLOCAL(vd,local); /**/ASSERT(!ISLOCK(vd,local)); 1137 if(ISLOCK(vd,local) ) 1138 return NIL(Void_t*); 1139 SETLOCK(vd,local); 1140 orgsize = size; 1141 orgalign = align; 1142 } 1143 1144 /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0); 1145 size = size <= BODYSIZE ? BODYSIZE : ROUND(size,ALIGN); 1146 align = MULTIPLE(align,ALIGN); 1147 1148 /* hack so that dbalign() can store header data */ 1149 if(VMETHOD(vd) != VM_MTDEBUG) 1150 extra = 0; 1151 else 1152 { extra = DB_HEAD; 1153 while(align < extra || (align - extra) < sizeof(Block_t)) 1154 align *= 2; 1155 } 1156 1157 /* reclaim all free blocks now to avoid fragmentation */ 1158 bestreclaim(vd,NIL(Block_t*),0); 1159 1160 s = size + 2*(align+sizeof(Head_t)+extra); 1161 if(!(data = (Vmuchar_t*)KPVALLOC(vm,s,bestalloc)) ) 1162 goto done; 1163 1164 tp = BLOCK(data); 1165 seg = SEG(tp); 1166 1167 /* get an aligned address that we can live with */ 1168 if((s = (size_t)((VLONG(data)+extra)%align)) != 0) 1169 data += align-s; /**/ASSERT(((VLONG(data)+extra)%align) == 0); 1170 1171 if((np = BLOCK(data)) != tp ) /* need to free left part */ 1172 { if(((Vmuchar_t*)np - (Vmuchar_t*)tp) < (ssize_t)(sizeof(Block_t)+extra) ) 1173 { data += align; 1174 np = BLOCK(data); 1175 } /**/ASSERT(((VLONG(data)+extra)%align) == 0); 1176 1177 s = (Vmuchar_t*)np - (Vmuchar_t*)tp; 1178 SIZE(np) = ((SIZE(tp)&~BITS) - s)|BUSY; 1179 SEG(np) = seg; 1180 1181 SIZE(tp) = (s - sizeof(Head_t)) | (SIZE(tp)&BITS) | JUNK; 1182 /**/ ASSERT(SIZE(tp) >= sizeof(Body_t) ); 1183 LINK(tp) = CACHE(vd)[C_INDEX(SIZE(tp))]; 1184 CACHE(vd)[C_INDEX(SIZE(tp))] = tp; 1185 } 1186 1187 /* free left-over if too big */ 1188 if((s = SIZE(np) - size) >= sizeof(Block_t)) 1189 { SIZE(np) = size; 1190 1191 tp = NEXT(np); 1192 SIZE(tp) = ((s & ~BITS) - sizeof(Head_t)) | BUSY | JUNK; 1193 SEG(tp) = seg; 1194 LINK(tp) = CACHE(vd)[C_INDEX(SIZE(tp))]; 1195 CACHE(vd)[C_INDEX(SIZE(tp))] = tp; 1196 1197 SIZE(np) |= s&BITS; 1198 } 1199 1200 bestreclaim(vd,NIL(Block_t*),0); /* coalesce all free blocks */ 1201 1202 if(!local && !(vd->mode&VM_TRUST) && _Vmtrace && (vd->mode&VM_TRACE) ) 1203 (*_Vmtrace)(vm,NIL(Vmuchar_t*),data,orgsize,orgalign); 1204 1205 done: 1206 /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0); 1207 CLRLOCK(vd,local); 1208 ANNOUNCE(local, vm, VM_ALLOC, (Void_t*)data, vm->disc); 1209 1210 return (Void_t*)data; 1211 } 1212 1213 1214 #if _mem_win32 1215 #if _PACKAGE_ast 1216 #include <ast_windows.h> 1217 #else 1218 #include <windows.h> 1219 #endif 1220 #endif /* _lib_win32 */ 1221 1222 #if _mem_mmap_anon 1223 #include <sys/mman.h> 1224 #ifndef MAP_ANON 1225 #define MAP_ANON MAP_ANONYMOUS 1226 #endif 1227 #endif /* _mem_mmap_anon */ 1228 1229 #if _mem_mmap_zero 1230 #include <sys/fcntl.h> 1231 #include <sys/mman.h> 1232 typedef struct _mmapdisc_s 1233 { Vmdisc_t disc; 1234 int fd; 1235 off_t offset; 1236 } Mmapdisc_t; 1237 1238 #ifndef OPEN_MAX 1239 #define OPEN_MAX 64 1240 #endif 1241 #define OPEN_PRIVATE (3*OPEN_MAX/4) 1242 #endif /* _mem_mmap_zero */ 1243 1244 /* failure mode of mmap, sbrk and brk */ 1245 #ifndef MAP_FAILED 1246 #define MAP_FAILED ((Void_t*)(-1)) 1247 #endif 1248 #define BRK_FAILED ((Void_t*)(-1)) 1249 1250 /* A discipline to get raw memory using sbrk/VirtualAlloc/mmap */ 1251 #if __STD_C 1252 static Void_t* sbrkmem(Vmalloc_t* vm, Void_t* caddr, 1253 size_t csize, size_t nsize, Vmdisc_t* disc) 1254 #else 1255 static Void_t* sbrkmem(vm, caddr, csize, nsize, disc) 1256 Vmalloc_t* vm; /* region doing allocation from */ 1257 Void_t* caddr; /* current address */ 1258 size_t csize; /* current size */ 1259 size_t nsize; /* new size */ 1260 Vmdisc_t* disc; /* discipline structure */ 1261 #endif 1262 { 1263 #undef _done_sbrkmem 1264 1265 #if !defined(_done_sbrkmem) && defined(_mem_win32) 1266 #define _done_sbrkmem 1 1267 NOTUSED(vm); 1268 NOTUSED(disc); 1269 if(csize == 0) 1270 return (Void_t*)VirtualAlloc(0,nsize,MEM_COMMIT,PAGE_READWRITE); 1271 else if(nsize == 0) 1272 return VirtualFree((LPVOID)caddr,0,MEM_RELEASE) ? caddr : NIL(Void_t*); 1273 else return NIL(Void_t*); 1274 #endif /* MUST_WIN32 */ 1275 1276 #if !defined(_done_sbrkmem) && (_mem_sbrk || _mem_mmap_zero || _mem_mmap_anon) 1277 #define _done_sbrkmem 1 1278 Vmuchar_t *addr; 1279 #if _mem_mmap_zero 1280 Mmapdisc_t *mmdc = (Mmapdisc_t*)disc; 1281 #else 1282 NOTUSED(disc); 1283 #endif 1284 NOTUSED(vm); 1285 1286 if(csize == 0) /* allocating new memory */ 1287 { 1288 #if ( _mem_sbrk || _mem_mmap_anon || _mem_mmap_zero ) 1289 #define ALTERNATES VM_primary 1290 #endif 1291 #if _mem_sbrk /* try using sbrk() and brk() */ 1292 #if ALTERNATES 1293 if (_Vmassert & ALTERNATES) 1294 #undef ALTERNATES 1295 #define ALTERNATES VM_secondary 1296 #endif 1297 { 1298 addr = (Vmuchar_t*)sbrk(0); /* old break value */ 1299 if(addr && addr != (Vmuchar_t*)BRK_FAILED ) 1300 if(brk(addr+nsize) == 0 ) 1301 return addr; 1302 } 1303 #endif /* _mem_sbrk */ 1304 1305 #if _mem_mmap_anon /* anonymous mmap */ 1306 #if ALTERNATES 1307 if (_Vmassert & ALTERNATES) 1308 #undef ALTERNATES 1309 #define ALTERNATES VM_secondary 1310 #endif 1311 { 1312 addr = (Vmuchar_t*)mmap(0, nsize, PROT_READ|PROT_WRITE, 1313 MAP_ANON|MAP_PRIVATE, -1, 0); 1314 if(addr && addr != (Vmuchar_t*)MAP_FAILED) 1315 return addr; 1316 } 1317 #endif /* _mem_mmap_anon */ 1318 1319 #if _mem_mmap_zero /* mmap from /dev/zero */ 1320 #if ALTERNATES 1321 if (_Vmassert & ALTERNATES) 1322 #undef ALTERNATES 1323 #define ALTERNATES VM_secondary 1324 #endif 1325 { 1326 if(mmdc->fd < 0) 1327 { int fd; 1328 if(mmdc->fd != -1) 1329 return NIL(Void_t*); 1330 if((fd = open("/dev/zero", O_RDONLY)) < 0 ) 1331 { mmdc->fd = -2; 1332 return NIL(Void_t*); 1333 } 1334 if(fd >= OPEN_PRIVATE || (mmdc->fd = dup2(fd,OPEN_PRIVATE)) < 0 ) 1335 mmdc->fd = fd; 1336 else close(fd); 1337 #ifdef FD_CLOEXEC 1338 fcntl(mmdc->fd, F_SETFD, FD_CLOEXEC); 1339 #endif 1340 } 1341 addr = (Vmuchar_t*)mmap(0, nsize, PROT_READ|PROT_WRITE, 1342 MAP_PRIVATE, mmdc->fd, mmdc->offset); 1343 if(addr && addr != (Vmuchar_t*)MAP_FAILED) 1344 { mmdc->offset += nsize; 1345 return addr; 1346 } 1347 } 1348 #endif /* _mem_mmap_zero */ 1349 1350 return NIL(Void_t*); 1351 } 1352 else 1353 { addr = caddr; /* in case !_mem_sbrk */ 1354 #if ( _mem_sbrk || _mem_mmap_anon || _mem_mmap_zero ) 1355 #undef ALTERNATES 1356 #define ALTERNATES VM_primary 1357 #endif 1358 #if _mem_sbrk 1359 #if ALTERNATES 1360 if (_Vmassert & ALTERNATES) 1361 #undef ALTERNATES 1362 #define ALTERNATES VM_secondary 1363 #endif 1364 { 1365 addr = (Vmuchar_t*)sbrk(0); 1366 if(!addr || addr == (Vmuchar_t*)BRK_FAILED) 1367 addr = caddr; 1368 else if(((Vmuchar_t*)caddr+csize) == addr) /* in sbrk-space */ 1369 { if(nsize > csize) 1370 addr += nsize-csize; 1371 else addr -= csize-nsize; 1372 return brk(addr) == 0 ? caddr : NIL(Void_t*); 1373 } 1374 } 1375 #endif /* _mem_sbrk */ 1376 1377 #if _mem_mmap_zero || _mem_mmap_anon 1378 #if ALTERNATES 1379 if (_Vmassert & ALTERNATES) 1380 #undef ALTERNATES 1381 #define ALTERNATES VM_secondary 1382 #endif 1383 { 1384 if(((Vmuchar_t*)caddr+csize) > addr) /* in mmap-space */ 1385 if(nsize == 0 && munmap(caddr,csize) == 0) 1386 return caddr; 1387 } 1388 #endif /* _mem_mmap_zero || _mem_mmap_anon */ 1389 1390 return NIL(Void_t*); 1391 } 1392 #endif /*_done_sbrkmem*/ 1393 1394 #if !_done_sbrkmem /* use native malloc/free as a last resource */ 1395 /**/ASSERT(_std_malloc); /* _std_malloc should be well-defined */ 1396 NOTUSED(vm); 1397 NOTUSED(disc); 1398 if(csize == 0) 1399 return (Void_t*)malloc(nsize); 1400 else if(nsize == 0) 1401 { free(caddr); 1402 return caddr; 1403 } 1404 else return NIL(Void_t*); 1405 #endif /* _done_sbrkmem */ 1406 } 1407 1408 #if _mem_mmap_zero 1409 static Mmapdisc_t _Vmdcsbrk = { { sbrkmem, NIL(Vmexcept_f), 64*1024 }, -1, 0 }; 1410 #else 1411 static Vmdisc_t _Vmdcsbrk = { sbrkmem, NIL(Vmexcept_f), 0 }; 1412 #endif 1413 1414 static Vmethod_t _Vmbest = 1415 { 1416 bestalloc, 1417 bestresize, 1418 bestfree, 1419 bestaddr, 1420 bestsize, 1421 bestcompact, 1422 bestalign, 1423 VM_MTBEST 1424 }; 1425 1426 /* The heap region */ 1427 static Vmdata_t _Vmdata = 1428 { 1429 VM_MTBEST|VM_TRUST, /* mode */ 1430 0, /* incr */ 1431 0, /* pool */ 1432 NIL(Seg_t*), /* seg */ 1433 NIL(Block_t*), /* free */ 1434 NIL(Block_t*), /* wild */ 1435 NIL(Block_t*), /* root */ 1436 }; 1437 Vmalloc_t _Vmheap = 1438 { 1439 { bestalloc, 1440 bestresize, 1441 bestfree, 1442 bestaddr, 1443 bestsize, 1444 bestcompact, 1445 bestalign, 1446 VM_MTBEST 1447 }, 1448 NIL(char*), /* file */ 1449 0, /* line */ 1450 0, /* func */ 1451 (Vmdisc_t*)(&_Vmdcsbrk), /* disc */ 1452 &_Vmdata, /* data */ 1453 NIL(Vmalloc_t*) /* next */ 1454 }; 1455 1456 __DEFINE__(Vmalloc_t*, Vmheap, &_Vmheap); 1457 __DEFINE__(Vmalloc_t*, Vmregion, &_Vmheap); 1458 __DEFINE__(Vmethod_t*, Vmbest, &_Vmbest); 1459 __DEFINE__(Vmdisc_t*, Vmdcsbrk, (Vmdisc_t*)(&_Vmdcsbrk) ); 1460 1461 #ifdef NoF 1462 NoF(vmbest) 1463 #endif 1464 1465 #endif 1466