1 /***********************************************************************
2 * *
3 * This software is part of the ast package *
4 * Copyright (c) 1985-2010 AT&T Intellectual Property *
5 * and is licensed under the *
6 * Common Public License, Version 1.0 *
7 * by AT&T Intellectual Property *
8 * *
9 * A copy of the License is available at *
10 * http://www.opensource.org/licenses/cpl1.0.txt *
11 * (with md5 checksum 059e8cd6165cb4c31e351f2b69388fd9) *
12 * *
13 * Information and Software Systems Research *
14 * AT&T Research *
15 * Florham Park NJ *
16 * *
17 * Glenn Fowler <gsf@research.att.com> *
18 * David Korn <dgk@research.att.com> *
19 * Phong Vo <kpv@research.att.com> *
20 * *
21 ***********************************************************************/
22 #if defined(_UWIN) && defined(_BLD_ast)
23
_STUB_vmbest()24 void _STUB_vmbest(){}
25
26 #else
27
28 #include "vmhdr.h"
29
30 /* Best-fit allocation method. This is based on a best-fit strategy
31 ** using a splay tree for storage of lists of free blocks of the same
32 ** size. Recent free blocks may be cached for fast reuse.
33 **
34 ** Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94.
35 */
36
37 #ifdef DEBUG
38 static int N_free; /* # of free calls */
39 static int N_alloc; /* # of alloc calls */
40 static int N_resize; /* # of resize calls */
41 static int N_wild; /* # allocated from the wild block */
42 static int N_last; /* # allocated from last free block */
43 static int N_reclaim; /* # of bestreclaim calls */
44
45 #undef VM_TRUST /* always check for locking, etc.s */
46 #define VM_TRUST 0
47 #endif /*DEBUG*/
48
49 #if _BLD_posix
50 #define logmsg(d,a ...) logsrc(d,__FILE__,__LINE__,a)
51
52 extern int logsrc(int, const char*, int, const char*, ...);
53 #endif /*_BLD_posix*/
54
55 #define COMPACT 8 /* factor to decide when to compact */
56
57 /* Check to see if a block is in the free tree */
58 #if __STD_C
vmintree(Block_t * node,Block_t * b)59 static int vmintree(Block_t* node, Block_t* b)
60 #else
61 static int vmintree(node,b)
62 Block_t* node;
63 Block_t* b;
64 #endif
65 { Block_t* t;
66
67 for(t = node; t; t = LINK(t))
68 if(t == b)
69 return 1;
70 if(LEFT(node) && vmintree(LEFT(node),b))
71 return 1;
72 if(RIGHT(node) && vmintree(RIGHT(node),b))
73 return 1;
74 return 0;
75 }
76
77 #if __STD_C
vmonlist(Block_t * list,Block_t * b)78 static int vmonlist(Block_t* list, Block_t* b)
79 #else
80 static int vmonlist(list,b)
81 Block_t* list;
82 Block_t* b;
83 #endif
84 {
85 for(; list; list = LINK(list))
86 if(list == b)
87 return 1;
88 return 0;
89 }
90
91 /* Check to see if a block is known to be free */
92 #if __STD_C
vmisfree(Vmdata_t * vd,Block_t * b)93 static int vmisfree(Vmdata_t* vd, Block_t* b)
94 #else
95 static int vmisfree(vd,b)
96 Vmdata_t* vd;
97 Block_t* b;
98 #endif
99 {
100 if(SIZE(b) & (BUSY|JUNK|PFREE))
101 return 0;
102
103 if(b == vd->wild)
104 return 1;
105
106 if(SIZE(b) < MAXTINY)
107 return vmonlist(TINY(vd)[INDEX(SIZE(b))], b);
108
109 if(vd->root)
110 return vmintree(vd->root, b);
111
112 return 0;
113 }
114
115 /* Check to see if a block is known to be junked */
116 #if __STD_C
vmisjunk(Vmdata_t * vd,Block_t * b)117 static int vmisjunk(Vmdata_t* vd, Block_t* b)
118 #else
119 static int vmisjunk(vd,b)
120 Vmdata_t* vd;
121 Block_t* b;
122 #endif
123 {
124 Block_t* t;
125
126 if((SIZE(b)&BUSY) == 0 || (SIZE(b)&JUNK) == 0)
127 return 0;
128
129 if(b == vd->free) /* recently freed */
130 return 1;
131
132 /* check the list that b is supposed to be in */
133 for(t = CACHE(vd)[C_INDEX(SIZE(b))]; t; t = LINK(t))
134 if(t == b)
135 return 1;
136
137 /* on occasions, b may be put onto the catch-all list */
138 if(C_INDEX(SIZE(b)) < S_CACHE)
139 for(t = CACHE(vd)[S_CACHE]; t; t = LINK(t))
140 if(t == b)
141 return 1;
142
143 return 0;
144 }
145
146 /* check to see if the free tree is in good shape */
147 #if __STD_C
vmchktree(Block_t * node)148 static int vmchktree(Block_t* node)
149 #else
150 static int vmchktree(node)
151 Block_t* node;
152 #endif
153 { Block_t* t;
154
155 if(SIZE(node) & BITS)
156 { /**/ASSERT(0); return -1; }
157
158 for(t = LINK(node); t; t = LINK(t))
159 if(SIZE(t) != SIZE(node))
160 { /**/ASSERT(0); return -1; }
161
162 if((t = LEFT(node)) )
163 { if(SIZE(t) >= SIZE(node) )
164 { /**/ASSERT(0); return -1; }
165 else return vmchktree(t);
166 }
167 if((t = RIGHT(node)) )
168 { if(SIZE(t) <= SIZE(node) )
169 { /**/ASSERT(0); return -1; }
170 else return vmchktree(t);
171 }
172
173 return 0;
174 }
175
176 #if __STD_C
_vmbestcheck(Vmdata_t * vd,Block_t * freeb)177 int _vmbestcheck(Vmdata_t* vd, Block_t* freeb)
178 #else
179 int _vmbestcheck(vd, freeb)
180 Vmdata_t* vd;
181 Block_t* freeb; /* known to be free but not on any free list */
182 #endif
183 {
184 reg Seg_t *seg;
185 reg Block_t *b, *endb, *nextb;
186 int rv = 0;
187
188 if(!CHECK())
189 return 0;
190
191 /* make sure the free tree is still in shape */
192 if(vd->root && vmchktree(vd->root) < 0 )
193 { rv = -1; /**/ASSERT(0); }
194
195 for(seg = vd->seg; seg && rv == 0; seg = seg->next)
196 { b = SEGBLOCK(seg);
197 endb = (Block_t*)(seg->baddr - sizeof(Head_t));
198 for(; b < endb && rv == 0; b = nextb)
199 { nextb = (Block_t*)((Vmuchar_t*)DATA(b) + (SIZE(b)&~BITS) );
200
201 if(!ISBUSY(SIZE(b)) ) /* a completely free block */
202 { /* there should be no marked bits of any type */
203 if(SIZE(b) & (BUSY|JUNK|PFREE) )
204 { rv = -1; /**/ASSERT(0); }
205
206 /* next block must be busy and marked PFREE */
207 if(!ISBUSY(SIZE(nextb)) || !ISPFREE(SIZE(nextb)) )
208 { rv = -1; /**/ASSERT(0); }
209
210 /* must have a self-reference pointer */
211 if(*SELF(b) != b)
212 { rv = -1; /**/ASSERT(0); }
213
214 /* segment pointer should be well-defined */
215 if(!TINIEST(b) && SEG(b) != seg)
216 { rv = -1; /**/ASSERT(0); }
217
218 /* must be on a free list */
219 if(b != freeb && !vmisfree(vd, b) )
220 { rv = -1; /**/ASSERT(0); }
221 }
222 else
223 { /* segment pointer should be well-defined */
224 if(SEG(b) != seg)
225 { rv = -1; /**/ASSERT(0); }
226
227 /* next block should not be marked PFREE */
228 if(ISPFREE(SIZE(nextb)) )
229 { rv = -1; /**/ASSERT(0); }
230
231 /* if PFREE, last block should be free */
232 if(ISPFREE(SIZE(b)) && LAST(b) != freeb &&
233 !vmisfree(vd, LAST(b)) )
234 { rv = -1; /**/ASSERT(0); }
235
236 /* if free but unreclaimed, should be junk */
237 if(ISJUNK(SIZE(b)) && !vmisjunk(vd, b))
238 { rv = -1; /**/ASSERT(0); }
239 }
240 }
241 }
242
243 return rv;
244 }
245
246 /* Tree rotation functions */
247 #define RROTATE(x,y) (LEFT(x) = RIGHT(y), RIGHT(y) = (x), (x) = (y))
248 #define LROTATE(x,y) (RIGHT(x) = LEFT(y), LEFT(y) = (x), (x) = (y))
249 #define RLINK(s,x) ((s) = LEFT(s) = (x))
250 #define LLINK(s,x) ((s) = RIGHT(s) = (x))
251
252 /* Find and delete a suitable element in the free tree. */
253 #if __STD_C
bestsearch(Vmdata_t * vd,reg size_t size,Block_t * wanted)254 static Block_t* bestsearch(Vmdata_t* vd, reg size_t size, Block_t* wanted)
255 #else
256 static Block_t* bestsearch(vd, size, wanted)
257 Vmdata_t* vd;
258 reg size_t size;
259 Block_t* wanted;
260 #endif
261 {
262 reg size_t s;
263 reg Block_t *t, *root, *l, *r;
264 Block_t link;
265
266 /* extracting a tiniest block from its list */
267 if((root = wanted) && size == TINYSIZE)
268 { reg Seg_t* seg;
269
270 l = TLEFT(root);
271 if((r = LINK(root)) )
272 TLEFT(r) = l;
273 if(l)
274 LINK(l) = r;
275 else TINY(vd)[0] = r;
276
277 seg = vd->seg;
278 if(!seg->next)
279 SEG(root) = seg;
280 else for(;; seg = seg->next)
281 { if((Vmuchar_t*)root > (Vmuchar_t*)seg->addr &&
282 (Vmuchar_t*)root < seg->baddr)
283 { SEG(root) = seg;
284 break;
285 }
286 }
287
288 return root;
289 }
290
291 /**/ASSERT(!vd->root || vmchktree(vd->root) == 0);
292
293 /* find the right one to delete */
294 l = r = &link;
295 if((root = vd->root) ) do
296 { /**/ ASSERT(!ISBITS(size) && !ISBITS(SIZE(root)));
297 if(size == (s = SIZE(root)) )
298 break;
299 if(size < s)
300 { if((t = LEFT(root)) )
301 { if(size <= (s = SIZE(t)) )
302 { RROTATE(root,t);
303 if(size == s)
304 break;
305 t = LEFT(root);
306 }
307 else
308 { LLINK(l,t);
309 t = RIGHT(t);
310 }
311 }
312 RLINK(r,root);
313 }
314 else
315 { if((t = RIGHT(root)) )
316 { if(size >= (s = SIZE(t)) )
317 { LROTATE(root,t);
318 if(size == s)
319 break;
320 t = RIGHT(root);
321 }
322 else
323 { RLINK(r,t);
324 t = LEFT(t);
325 }
326 }
327 LLINK(l,root);
328 }
329 /**/ ASSERT(root != t);
330 } while((root = t) );
331
332 if(root) /* found it, now isolate it */
333 { RIGHT(l) = LEFT(root);
334 LEFT(r) = RIGHT(root);
335 }
336 else /* nothing exactly fit */
337 { LEFT(r) = NIL(Block_t*);
338 RIGHT(l) = NIL(Block_t*);
339
340 /* grab the least one from the right tree */
341 if((root = LEFT(&link)) )
342 { while((t = LEFT(root)) )
343 RROTATE(root,t);
344 LEFT(&link) = RIGHT(root);
345 }
346 }
347
348 if(root && (r = LINK(root)) )
349 { /* head of a link list, use next one for root */
350 LEFT(r) = RIGHT(&link);
351 RIGHT(r) = LEFT(&link);
352 }
353 else if(!(r = LEFT(&link)) )
354 r = RIGHT(&link);
355 else /* graft left tree to right tree */
356 { while((t = LEFT(r)) )
357 RROTATE(r,t);
358 LEFT(r) = RIGHT(&link);
359 }
360 vd->root = r; /**/ASSERT(!r || !ISBITS(SIZE(r)));
361
362 /**/ASSERT(!vd->root || vmchktree(vd->root) == 0);
363 /**/ASSERT(!wanted || wanted == root);
364
365 return root;
366 }
367
368 /* Reclaim all delayed free blocks into the free tree */
369 #if __STD_C
bestreclaim(reg Vmdata_t * vd,Block_t * wanted,int c)370 static int bestreclaim(reg Vmdata_t* vd, Block_t* wanted, int c)
371 #else
372 static int bestreclaim(vd, wanted, c)
373 reg Vmdata_t* vd;
374 Block_t* wanted;
375 int c;
376 #endif
377 {
378 reg size_t size, s;
379 reg Block_t *fp, *np, *t, *list;
380 reg int n, saw_wanted;
381 reg Seg_t *seg;
382
383 /**/COUNT(N_reclaim);
384 /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
385
386 if((fp = vd->free) )
387 { LINK(fp) = CACHE(vd)[S_CACHE]; CACHE(vd)[S_CACHE] = fp;
388 vd->free = NIL(Block_t*);
389 }
390
391 saw_wanted = wanted ? 0 : 1;
392 for(n = S_CACHE; n >= c; --n)
393 { list = CACHE(vd)[n]; CACHE(vd)[n] = NIL(Block_t*);
394 while((fp = list) )
395 { /* Note that below here we allow ISJUNK blocks to be
396 ** forward-merged even though they are not removed from
397 ** the list immediately. In this way, the list is
398 ** scanned only once. It works because the LINK and SIZE
399 ** fields are not destroyed during the merging. This can
400 ** be seen by observing that a tiniest block has a 2-word
401 ** header and a 2-word body. Merging a tiniest block
402 ** (1seg) and the next block (2seg) looks like this:
403 ** 1seg size link left 2seg size link left ....
404 ** 1seg size link left rite xxxx xxxx .... self
405 ** After the merge, the 2seg word is replaced by the RIGHT
406 ** pointer of the new block and somewhere beyond the
407 ** two xxxx fields, the SELF pointer will replace some
408 ** other word. The important part is that the two xxxx
409 ** fields are kept intact.
410 */
411 list = LINK(list); /**/ASSERT(!vmonlist(list,fp));
412
413 size = SIZE(fp);
414 if(!ISJUNK(size)) /* already done */
415 continue;
416
417 if(_Vmassert & VM_region)
418 { /* see if this address is from region */
419 for(seg = vd->seg; seg; seg = seg->next)
420 if(fp >= SEGBLOCK(seg) && fp < (Block_t*)seg->baddr )
421 break;
422 if(!seg) /* must be a bug in application code! */
423 { /**/ ASSERT(seg != NIL(Seg_t*));
424 continue;
425 }
426 }
427
428 if(ISPFREE(size)) /* backward merge */
429 { fp = LAST(fp);
430 #if _BLD_posix
431 if (fp < (Block_t*)0x00120000)
432 {
433 logmsg(0, "bestreclaim fp=%p", fp);
434 ASSERT(!fp);
435 }
436 #endif
437 s = SIZE(fp); /**/ASSERT(!(s&BITS));
438 REMOVE(vd,fp,INDEX(s),t,bestsearch);
439 size = (size&~BITS) + s + sizeof(Head_t);
440 }
441 else size &= ~BITS;
442
443 for(;;) /* forward merge */
444 { np = (Block_t*)((Vmuchar_t*)fp+size+sizeof(Head_t));
445 #if _BLD_posix
446 if (np < (Block_t*)0x00120000)
447 {
448 logmsg(0, "bestreclaim np=%p", np);
449 ASSERT(!np);
450 }
451 #endif
452 s = SIZE(np); /**/ASSERT(s > 0);
453 if(!ISBUSY(s))
454 { /**/ASSERT((s&BITS) == 0);
455 if(np == vd->wild)
456 vd->wild = NIL(Block_t*);
457 else REMOVE(vd,np,INDEX(s),t,bestsearch);
458 }
459 else if(ISJUNK(s))
460 { /* reclaim any touched junk list */
461 if((int)C_INDEX(s) < c)
462 c = C_INDEX(s);
463 SIZE(np) = 0;
464 CLRBITS(s);
465 }
466 else break;
467 size += s + sizeof(Head_t);
468 }
469 SIZE(fp) = size;
470
471 /* tell next block that this one is free */
472 np = NEXT(fp); /**/ASSERT(ISBUSY(SIZE(np)));
473 /**/ASSERT(!ISJUNK(SIZE(np)));
474 SETPFREE(SIZE(np));
475 *(SELF(fp)) = fp;
476
477 if(fp == wanted) /* to be consumed soon */
478 { /**/ASSERT(!saw_wanted); /* should be seen just once */
479 saw_wanted = 1;
480 continue;
481 }
482
483 /* wilderness preservation */
484 if(np->body.data >= vd->seg->baddr)
485 { vd->wild = fp;
486 continue;
487 }
488
489 /* tiny block goes to tiny list */
490 if(size < MAXTINY)
491 { s = INDEX(size);
492 np = LINK(fp) = TINY(vd)[s];
493 if(s == 0) /* TINIEST block */
494 { if(np)
495 TLEFT(np) = fp;
496 TLEFT(fp) = NIL(Block_t*);
497 }
498 else
499 { if(np)
500 LEFT(np) = fp;
501 LEFT(fp) = NIL(Block_t*);
502 SETLINK(fp);
503 }
504 TINY(vd)[s] = fp;
505 continue;
506 }
507
508 LEFT(fp) = RIGHT(fp) = LINK(fp) = NIL(Block_t*);
509 if(!(np = vd->root) ) /* inserting into an empty tree */
510 { vd->root = fp;
511 continue;
512 }
513
514 size = SIZE(fp);
515 while(1) /* leaf insertion */
516 { /**/ASSERT(np != fp);
517 if((s = SIZE(np)) > size)
518 { if((t = LEFT(np)) )
519 { /**/ ASSERT(np != t);
520 np = t;
521 }
522 else
523 { LEFT(np) = fp;
524 break;
525 }
526 }
527 else if(s < size)
528 { if((t = RIGHT(np)) )
529 { /**/ ASSERT(np != t);
530 np = t;
531 }
532 else
533 { RIGHT(np) = fp;
534 break;
535 }
536 }
537 else /* s == size */
538 { if((t = LINK(np)) )
539 { LINK(fp) = t;
540 LEFT(t) = fp;
541 }
542 LINK(np) = fp;
543 LEFT(fp) = np;
544 SETLINK(fp);
545 break;
546 }
547 }
548 }
549 }
550
551 /**/ASSERT(!wanted || saw_wanted == 1);
552 /**/ASSERT(_vmbestcheck(vd, wanted) == 0);
553 return saw_wanted;
554 }
555
556 #if __STD_C
bestcompact(Vmalloc_t * vm)557 static int bestcompact(Vmalloc_t* vm)
558 #else
559 static int bestcompact(vm)
560 Vmalloc_t* vm;
561 #endif
562 {
563 reg Seg_t *seg, *next;
564 reg Block_t *bp, *t;
565 reg size_t size, segsize, round;
566 reg int local, inuse;
567 reg Vmdata_t* vd = vm->data;
568
569 SETINUSE(vd, inuse);
570
571 if(!(local = vd->mode&VM_TRUST) )
572 { GETLOCAL(vd,local);
573 if(ISLOCK(vd,local))
574 { CLRINUSE(vd, inuse);
575 return -1;
576 }
577 SETLOCK(vd,local);
578 }
579
580 bestreclaim(vd,NIL(Block_t*),0);
581
582 for(seg = vd->seg; seg; seg = next)
583 { next = seg->next;
584
585 bp = BLOCK(seg->baddr);
586 if(!ISPFREE(SIZE(bp)) )
587 continue;
588
589 bp = LAST(bp); /**/ASSERT(vmisfree(vd,bp));
590 size = SIZE(bp);
591 if(bp == vd->wild)
592 { /* During large block allocations, _Vmextend might
593 ** have been enlarged the rounding factor. Reducing
594 ** it a bit help avoiding getting large raw memory.
595 */
596 if((round = vm->disc->round) == 0)
597 round = _Vmpagesize;
598 if(size > COMPACT*vd->incr && vd->incr > round)
599 vd->incr /= 2;
600
601 /* for the bottom segment, we don't necessarily want
602 ** to return raw memory too early. vd->pool has an
603 ** approximation of the average size of recently freed
604 ** blocks. If this is large, the application is managing
605 ** large blocks so we throttle back memory chopping
606 ** to avoid thrashing the underlying memory system.
607 */
608 if(size <= COMPACT*vd->incr || size <= COMPACT*vd->pool)
609 continue;
610
611 vd->wild = NIL(Block_t*);
612 vd->pool = 0;
613 }
614 else REMOVE(vd,bp,INDEX(size),t,bestsearch);
615 CLRPFREE(SIZE(NEXT(bp)));
616
617 if(size < (segsize = seg->size))
618 size += sizeof(Head_t);
619
620 if((size = (*_Vmtruncate)(vm,seg,size,0)) > 0)
621 { if(size >= segsize) /* entire segment deleted */
622 continue;
623 /**/ASSERT(SEG(BLOCK(seg->baddr)) == seg);
624
625 if((size = (seg->baddr - ((Vmuchar_t*)bp) - sizeof(Head_t))) > 0)
626 SIZE(bp) = size - sizeof(Head_t);
627 else bp = NIL(Block_t*);
628 }
629
630 if(bp)
631 { /**/ ASSERT(SIZE(bp) >= BODYSIZE);
632 /**/ ASSERT(SEGWILD(bp));
633 /**/ ASSERT(!vd->root || !vmintree(vd->root,bp));
634 SIZE(bp) |= BUSY|JUNK;
635 LINK(bp) = CACHE(vd)[C_INDEX(SIZE(bp))];
636 CACHE(vd)[C_INDEX(SIZE(bp))] = bp;
637 }
638 }
639
640 if(!local && _Vmtrace && (vd->mode&VM_TRACE) && VMETHOD(vd) == VM_MTBEST)
641 (*_Vmtrace)(vm, (Vmuchar_t*)0, (Vmuchar_t*)0, 0, 0);
642
643 CLRLOCK(vd,local); /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
644
645 CLRINUSE(vd, inuse);
646 return 0;
647 }
648
649 #if __STD_C
bestalloc(Vmalloc_t * vm,reg size_t size)650 static Void_t* bestalloc(Vmalloc_t* vm, reg size_t size )
651 #else
652 static Void_t* bestalloc(vm,size)
653 Vmalloc_t* vm; /* region allocating from */
654 reg size_t size; /* desired block size */
655 #endif
656 {
657 reg Vmdata_t* vd = vm->data;
658 reg size_t s;
659 reg int n;
660 reg Block_t *tp, *np;
661 reg int local, inuse;
662 size_t orgsize = 0;
663
664 VMOPTIONS();
665
666 /**/COUNT(N_alloc);
667
668 SETINUSE(vd, inuse);
669
670 if(!(local = vd->mode&VM_TRUST))
671 { GETLOCAL(vd,local); /**/ASSERT(!ISLOCK(vd,local));
672 if(ISLOCK(vd,local) )
673 { CLRINUSE(vd, inuse);
674 return NIL(Void_t*);
675 }
676 SETLOCK(vd,local);
677 orgsize = size;
678 }
679
680 /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
681 /**/ ASSERT(HEADSIZE == sizeof(Head_t));
682 /**/ ASSERT(BODYSIZE == sizeof(Body_t));
683 /**/ ASSERT((ALIGN%(BITS+1)) == 0 );
684 /**/ ASSERT((sizeof(Head_t)%ALIGN) == 0 );
685 /**/ ASSERT((sizeof(Body_t)%ALIGN) == 0 );
686 /**/ ASSERT((BODYSIZE%ALIGN) == 0 );
687 /**/ ASSERT(sizeof(Block_t) == (sizeof(Body_t)+sizeof(Head_t)) );
688
689 /* for ANSI requirement that malloc(0) returns non-NULL pointer */
690 size = size <= BODYSIZE ? BODYSIZE : ROUND(size,ALIGN);
691
692 if((tp = vd->free) ) /* reuse last free piece if appropriate */
693 { /**/ASSERT(ISBUSY(SIZE(tp)) );
694 /**/ASSERT(ISJUNK(SIZE(tp)) );
695 /**/COUNT(N_last);
696
697 vd->free = NIL(Block_t*);
698 if((s = SIZE(tp)) >= size && s < (size << 1) )
699 { if(s >= size + (sizeof(Head_t)+BODYSIZE) )
700 { SIZE(tp) = size;
701 np = NEXT(tp);
702 SEG(np) = SEG(tp);
703 SIZE(np) = ((s&~BITS) - (size+sizeof(Head_t)))|JUNK|BUSY;
704 vd->free = np;
705 SIZE(tp) |= s&BITS;
706 }
707 CLRJUNK(SIZE(tp));
708 goto done;
709 }
710
711 LINK(tp) = CACHE(vd)[S_CACHE];
712 CACHE(vd)[S_CACHE] = tp;
713 }
714
715 for(;;)
716 { for(n = S_CACHE; n >= 0; --n) /* best-fit except for coalescing */
717 { bestreclaim(vd,NIL(Block_t*),n);
718 if(vd->root && (tp = bestsearch(vd,size,NIL(Block_t*))) )
719 goto got_block;
720 }
721
722 /**/ASSERT(!vd->free);
723 if((tp = vd->wild) && SIZE(tp) >= size)
724 { /**/COUNT(N_wild);
725 vd->wild = NIL(Block_t*);
726 goto got_block;
727 }
728
729 KPVCOMPACT(vm,bestcompact);
730 if((tp = (*_Vmextend)(vm,size,bestsearch)) )
731 goto got_block;
732 else if(vd->mode&VM_AGAIN)
733 vd->mode &= ~VM_AGAIN;
734 else
735 { CLRLOCK(vd,local);
736 CLRINUSE(vd, inuse);
737 return NIL(Void_t*);
738 }
739 }
740
741 got_block:
742 /**/ ASSERT(!ISBITS(SIZE(tp)));
743 /**/ ASSERT(SIZE(tp) >= size);
744 /**/ ASSERT((SIZE(tp)%ALIGN) == 0);
745 /**/ ASSERT(!vd->free);
746
747 /* tell next block that we are no longer a free block */
748 CLRPFREE(SIZE(NEXT(tp))); /**/ ASSERT(ISBUSY(SIZE(NEXT(tp))));
749
750 if((s = SIZE(tp)-size) >= (sizeof(Head_t)+BODYSIZE) )
751 { SIZE(tp) = size;
752
753 np = NEXT(tp);
754 SEG(np) = SEG(tp);
755 SIZE(np) = (s - sizeof(Head_t)) | BUSY|JUNK;
756
757 if(VMWILD(vd,np))
758 { SIZE(np) &= ~BITS;
759 *SELF(np) = np; /**/ASSERT(ISBUSY(SIZE(NEXT(np))));
760 SETPFREE(SIZE(NEXT(np)));
761 vd->wild = np;
762 }
763 else vd->free = np;
764 }
765
766 SETBUSY(SIZE(tp));
767
768 done:
769 if(!local && (vd->mode&VM_TRACE) && _Vmtrace && VMETHOD(vd) == VM_MTBEST)
770 (*_Vmtrace)(vm,NIL(Vmuchar_t*),(Vmuchar_t*)DATA(tp),orgsize,0);
771
772 /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
773 CLRLOCK(vd,local);
774 ANNOUNCE(local, vm, VM_ALLOC, DATA(tp), vm->disc);
775
776 CLRINUSE(vd, inuse);
777 return DATA(tp);
778 }
779
780 #if __STD_C
bestaddr(Vmalloc_t * vm,Void_t * addr)781 static long bestaddr(Vmalloc_t* vm, Void_t* addr )
782 #else
783 static long bestaddr(vm, addr)
784 Vmalloc_t* vm; /* region allocating from */
785 Void_t* addr; /* address to check */
786 #endif
787 {
788 reg Seg_t* seg;
789 reg Block_t *b, *endb;
790 reg long offset;
791 reg Vmdata_t* vd = vm->data;
792 reg int local, inuse;
793
794 SETINUSE(vd, inuse);
795
796 if(!(local = vd->mode&VM_TRUST) )
797 { GETLOCAL(vd,local); /**/ASSERT(!ISLOCK(vd,local));
798 if(ISLOCK(vd,local))
799 { CLRINUSE(vd, inuse);
800 return -1L;
801 }
802 SETLOCK(vd,local);
803 }
804
805 offset = -1L; b = endb = NIL(Block_t*);
806 for(seg = vd->seg; seg; seg = seg->next)
807 { b = SEGBLOCK(seg);
808 endb = (Block_t*)(seg->baddr - sizeof(Head_t));
809 if((Vmuchar_t*)addr > (Vmuchar_t*)b &&
810 (Vmuchar_t*)addr < (Vmuchar_t*)endb)
811 break;
812 }
813
814 if(local && !(vd->mode&VM_TRUST) ) /* from bestfree or bestresize */
815 { b = BLOCK(addr);
816 if(seg && SEG(b) == seg && ISBUSY(SIZE(b)) && !ISJUNK(SIZE(b)) )
817 offset = 0;
818 if(offset != 0 && vm->disc->exceptf)
819 (void)(*vm->disc->exceptf)(vm,VM_BADADDR,addr,vm->disc);
820 }
821 else if(seg)
822 { while(b < endb)
823 { reg Vmuchar_t* data = (Vmuchar_t*)DATA(b);
824 reg size_t size = SIZE(b)&~BITS;
825
826 if((Vmuchar_t*)addr >= data && (Vmuchar_t*)addr < data+size)
827 { if(ISJUNK(SIZE(b)) || !ISBUSY(SIZE(b)))
828 offset = -1L;
829 else offset = (Vmuchar_t*)addr - data;
830 goto done;
831 }
832
833 b = (Block_t*)((Vmuchar_t*)DATA(b) + size);
834 }
835 }
836
837 done:
838 CLRLOCK(vd,local);
839 CLRINUSE(vd, inuse);
840 return offset;
841 }
842
843 #if __STD_C
bestfree(Vmalloc_t * vm,Void_t * data)844 static int bestfree(Vmalloc_t* vm, Void_t* data )
845 #else
846 static int bestfree(vm, data )
847 Vmalloc_t* vm;
848 Void_t* data;
849 #endif
850 {
851 reg Vmdata_t* vd = vm->data;
852 reg Block_t *bp;
853 reg size_t s;
854 reg int local, inuse;
855
856 #ifdef DEBUG
857 if((local = (int)integralof(data)) >= 0 && local <= 0xf)
858 { int vmassert = _Vmassert;
859 _Vmassert = local ? local : vmassert ? vmassert : (VM_check|VM_abort);
860 _vmbestcheck(vd, NIL(Block_t*));
861 _Vmassert = local ? local : vmassert;
862 return 0;
863 }
864 #endif
865
866 if(!data) /* ANSI-ism */
867 return 0;
868
869 /**/COUNT(N_free);
870
871 SETINUSE(vd, inuse);
872
873 if(!(local = vd->mode&VM_TRUST) )
874 { GETLOCAL(vd,local); /**/ASSERT(!ISLOCK(vd,local));
875 if(ISLOCK(vd,local) || KPVADDR(vm,data,bestaddr) != 0 )
876 { CLRINUSE(vd, inuse);
877 return -1;
878 }
879 SETLOCK(vd,local);
880 }
881
882 /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
883 bp = BLOCK(data); s = SIZE(bp);
884
885 /* Keep an approximate average free block size.
886 ** This is used in bestcompact() to decide when to release
887 ** raw memory back to the underlying memory system.
888 */
889 vd->pool = (vd->pool + (s&~BITS))/2;
890
891 if(ISBUSY(s) && !ISJUNK(s))
892 { SETJUNK(SIZE(bp));
893 if(s < MAXCACHE)
894 { /**/ASSERT(!vmonlist(CACHE(vd)[INDEX(s)], bp) );
895 LINK(bp) = CACHE(vd)[INDEX(s)];
896 CACHE(vd)[INDEX(s)] = bp;
897 }
898 else if(!vd->free)
899 vd->free = bp;
900 else
901 { /**/ASSERT(!vmonlist(CACHE(vd)[S_CACHE], bp) );
902 LINK(bp) = CACHE(vd)[S_CACHE];
903 CACHE(vd)[S_CACHE] = bp;
904 }
905
906 /* coalesce on freeing large blocks to avoid fragmentation */
907 if(SIZE(bp) >= 2*vd->incr)
908 { bestreclaim(vd,NIL(Block_t*),0);
909 if(vd->wild && SIZE(vd->wild) >= COMPACT*vd->incr)
910 KPVCOMPACT(vm,bestcompact);
911 }
912 }
913
914 if(!local && _Vmtrace && (vd->mode&VM_TRACE) && VMETHOD(vd) == VM_MTBEST )
915 (*_Vmtrace)(vm,(Vmuchar_t*)data,NIL(Vmuchar_t*), (s&~BITS), 0);
916
917 /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
918 CLRLOCK(vd,local);
919 ANNOUNCE(local, vm, VM_FREE, data, vm->disc);
920
921 CLRINUSE(vd, inuse);
922 return 0;
923 }
924
925 #if __STD_C
bestresize(Vmalloc_t * vm,Void_t * data,reg size_t size,int type)926 static Void_t* bestresize(Vmalloc_t* vm, Void_t* data, reg size_t size, int type)
927 #else
928 static Void_t* bestresize(vm,data,size,type)
929 Vmalloc_t* vm; /* region allocating from */
930 Void_t* data; /* old block of data */
931 reg size_t size; /* new size */
932 int type; /* !=0 to move, <0 for not copy */
933 #endif
934 {
935 reg Block_t *rp, *np, *t;
936 int local, inuse;
937 size_t s, bs, oldsize = 0, orgsize = 0;
938 Void_t *oldd, *orgdata = NIL(Void_t*);
939 Vmdata_t *vd = vm->data;
940
941 /**/COUNT(N_resize);
942
943 SETINUSE(vd, inuse);
944
945 if(!data)
946 { if((data = bestalloc(vm,size)) )
947 { oldsize = 0;
948 size = size <= BODYSIZE ? BODYSIZE : ROUND(size,ALIGN);
949 }
950 goto done;
951 }
952 if(size == 0)
953 { (void)bestfree(vm,data);
954 CLRINUSE(vd, inuse);
955 return NIL(Void_t*);
956 }
957
958 if(!(local = vd->mode&VM_TRUST) )
959 { GETLOCAL(vd,local); /**/ASSERT(!ISLOCK(vd,local));
960 if(ISLOCK(vd,local) || (!local && KPVADDR(vm,data,bestaddr) != 0 ) )
961 { CLRINUSE(vd, inuse);
962 return NIL(Void_t*);
963 }
964 SETLOCK(vd,local);
965
966 orgdata = data; /* for tracing */
967 orgsize = size;
968 }
969
970 /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
971 size = size <= BODYSIZE ? BODYSIZE : ROUND(size,ALIGN);
972 rp = BLOCK(data); /**/ASSERT(ISBUSY(SIZE(rp)) && !ISJUNK(SIZE(rp)));
973 oldsize = SIZE(rp); CLRBITS(oldsize);
974 if(oldsize < size)
975 { np = (Block_t*)((Vmuchar_t*)rp + oldsize + sizeof(Head_t));
976 do /* forward merge as much as possible */
977 { s = SIZE(np); /**/ASSERT(!ISPFREE(s));
978 if(np == vd->free)
979 { vd->free = NIL(Block_t*);
980 CLRBITS(s);
981 }
982 else if(ISJUNK(s) )
983 { if(!bestreclaim(vd,np,C_INDEX(s)) )
984 /**/ASSERT(0); /* oops: did not see np! */
985 s = SIZE(np); /**/ASSERT(s%ALIGN == 0);
986 }
987 else if(!ISBUSY(s) )
988 { if(np == vd->wild)
989 vd->wild = NIL(Block_t*);
990 else REMOVE(vd,np,INDEX(s),t,bestsearch);
991 }
992 else break;
993
994 SIZE(rp) += (s += sizeof(Head_t)); /**/ASSERT((s%ALIGN) == 0);
995 np = (Block_t*)((Vmuchar_t*)np + s);
996 CLRPFREE(SIZE(np));
997 } while(SIZE(rp) < size);
998
999 if(SIZE(rp) < size && size > vd->incr && SEGWILD(rp) )
1000 { reg Seg_t* seg;
1001
1002 s = (size - SIZE(rp)) + sizeof(Head_t); s = ROUND(s,vd->incr);
1003 seg = SEG(rp);
1004 if((*vm->disc->memoryf)(vm,seg->addr,seg->extent,seg->extent+s,
1005 vm->disc) == seg->addr )
1006 { SIZE(rp) += s;
1007 seg->extent += s;
1008 seg->size += s;
1009 seg->baddr += s;
1010 s = (SIZE(rp)&~BITS) + sizeof(Head_t);
1011 np = (Block_t*)((Vmuchar_t*)rp + s);
1012 SEG(np) = seg;
1013 SIZE(np) = BUSY;
1014 }
1015 }
1016 }
1017
1018 if((s = SIZE(rp)) >= (size + (BODYSIZE+sizeof(Head_t))) )
1019 { SIZE(rp) = size;
1020 np = NEXT(rp);
1021 SEG(np) = SEG(rp);
1022 SIZE(np) = (((s&~BITS)-size) - sizeof(Head_t))|BUSY|JUNK;
1023 CPYBITS(SIZE(rp),s);
1024 rp = np;
1025 goto do_free;
1026 }
1027 else if((bs = s&~BITS) < size)
1028 { if(!(type&(VM_RSMOVE|VM_RSCOPY)) )
1029 data = NIL(Void_t*); /* old data is not moveable */
1030 else
1031 { oldd = data;
1032 if((data = KPVALLOC(vm,size,bestalloc)) )
1033 { if(type&VM_RSCOPY)
1034 memcpy(data, oldd, bs);
1035
1036 do_free: /* reclaim these right away */
1037 SETJUNK(SIZE(rp));
1038 LINK(rp) = CACHE(vd)[S_CACHE];
1039 CACHE(vd)[S_CACHE] = rp;
1040 bestreclaim(vd, NIL(Block_t*), S_CACHE);
1041 }
1042 }
1043 }
1044
1045 if(!local && _Vmtrace && data && (vd->mode&VM_TRACE) && VMETHOD(vd) == VM_MTBEST)
1046 (*_Vmtrace)(vm, (Vmuchar_t*)orgdata, (Vmuchar_t*)data, orgsize, 0);
1047
1048 /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
1049 CLRLOCK(vd,local);
1050 ANNOUNCE(local, vm, VM_RESIZE, data, vm->disc);
1051
1052 done: if(data && (type&VM_RSZERO) && (size = SIZE(BLOCK(data))&~BITS) > oldsize )
1053 memset((Void_t*)((Vmuchar_t*)data + oldsize), 0, size-oldsize);
1054
1055 CLRINUSE(vd, inuse);
1056 return data;
1057 }
1058
1059 #if __STD_C
bestsize(Vmalloc_t * vm,Void_t * addr)1060 static long bestsize(Vmalloc_t* vm, Void_t* addr )
1061 #else
1062 static long bestsize(vm, addr)
1063 Vmalloc_t* vm; /* region allocating from */
1064 Void_t* addr; /* address to check */
1065 #endif
1066 {
1067 reg Seg_t* seg;
1068 reg Block_t *b, *endb;
1069 reg long size;
1070 reg Vmdata_t* vd = vm->data;
1071 reg int inuse;
1072
1073 SETINUSE(vd, inuse);
1074
1075 if(!(vd->mode&VM_TRUST) )
1076 { if(ISLOCK(vd,0))
1077 { CLRINUSE(vd, inuse);
1078 return -1L;
1079 }
1080 SETLOCK(vd,0);
1081 }
1082
1083 size = -1L;
1084 for(seg = vd->seg; seg; seg = seg->next)
1085 { b = SEGBLOCK(seg);
1086 endb = (Block_t*)(seg->baddr - sizeof(Head_t));
1087 if((Vmuchar_t*)addr <= (Vmuchar_t*)b ||
1088 (Vmuchar_t*)addr >= (Vmuchar_t*)endb)
1089 continue;
1090 while(b < endb)
1091 { if(addr == DATA(b))
1092 { if(!ISBUSY(SIZE(b)) || ISJUNK(SIZE(b)) )
1093 size = -1L;
1094 else size = (long)SIZE(b)&~BITS;
1095 goto done;
1096 }
1097 else if((Vmuchar_t*)addr <= (Vmuchar_t*)b)
1098 break;
1099
1100 b = (Block_t*)((Vmuchar_t*)DATA(b) + (SIZE(b)&~BITS) );
1101 }
1102 }
1103
1104 done:
1105 CLRLOCK(vd,0);
1106 CLRINUSE(vd, inuse);
1107 return size;
1108 }
1109
1110 #if __STD_C
bestalign(Vmalloc_t * vm,size_t size,size_t align)1111 static Void_t* bestalign(Vmalloc_t* vm, size_t size, size_t align)
1112 #else
1113 static Void_t* bestalign(vm, size, align)
1114 Vmalloc_t* vm;
1115 size_t size;
1116 size_t align;
1117 #endif
1118 {
1119 reg Vmuchar_t *data;
1120 reg Block_t *tp, *np;
1121 reg Seg_t* seg;
1122 reg int local, inuse;
1123 reg size_t s, extra, orgsize = 0, orgalign = 0;
1124 reg Vmdata_t* vd = vm->data;
1125
1126 if(size <= 0 || align <= 0)
1127 return NIL(Void_t*);
1128
1129 SETINUSE(vd, inuse);
1130
1131 if(!(local = vd->mode&VM_TRUST) )
1132 { GETLOCAL(vd,local); /**/ASSERT(!ISLOCK(vd,local));
1133 if(ISLOCK(vd,local) )
1134 { CLRINUSE(vd, inuse);
1135 return NIL(Void_t*);
1136 }
1137 SETLOCK(vd,local);
1138 orgsize = size;
1139 orgalign = align;
1140 }
1141
1142 /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
1143 size = size <= BODYSIZE ? BODYSIZE : ROUND(size,ALIGN);
1144 align = MULTIPLE(align,ALIGN);
1145
1146 /* hack so that dbalign() can store header data */
1147 if(VMETHOD(vd) != VM_MTDEBUG)
1148 extra = 0;
1149 else
1150 { extra = DB_HEAD;
1151 while(align < extra || (align - extra) < sizeof(Block_t))
1152 align *= 2;
1153 }
1154
1155 /* reclaim all free blocks now to avoid fragmentation */
1156 bestreclaim(vd,NIL(Block_t*),0);
1157
1158 s = size + 2*(align+sizeof(Head_t)+extra);
1159 if(!(data = (Vmuchar_t*)KPVALLOC(vm,s,bestalloc)) )
1160 goto done;
1161
1162 tp = BLOCK(data);
1163 seg = SEG(tp);
1164
1165 /* get an aligned address that we can live with */
1166 if((s = (size_t)((VLONG(data)+extra)%align)) != 0)
1167 data += align-s; /**/ASSERT(((VLONG(data)+extra)%align) == 0);
1168
1169 if((np = BLOCK(data)) != tp ) /* need to free left part */
1170 { if(((Vmuchar_t*)np - (Vmuchar_t*)tp) < (ssize_t)(sizeof(Block_t)+extra) )
1171 { data += align;
1172 np = BLOCK(data);
1173 } /**/ASSERT(((VLONG(data)+extra)%align) == 0);
1174
1175 s = (Vmuchar_t*)np - (Vmuchar_t*)tp;
1176 SIZE(np) = ((SIZE(tp)&~BITS) - s)|BUSY;
1177 SEG(np) = seg;
1178
1179 SIZE(tp) = (s - sizeof(Head_t)) | (SIZE(tp)&BITS) | JUNK;
1180 /**/ ASSERT(SIZE(tp) >= sizeof(Body_t) );
1181 LINK(tp) = CACHE(vd)[C_INDEX(SIZE(tp))];
1182 CACHE(vd)[C_INDEX(SIZE(tp))] = tp;
1183 }
1184
1185 /* free left-over if too big */
1186 if((s = SIZE(np) - size) >= sizeof(Block_t))
1187 { SIZE(np) = size;
1188
1189 tp = NEXT(np);
1190 SIZE(tp) = ((s & ~BITS) - sizeof(Head_t)) | BUSY | JUNK;
1191 SEG(tp) = seg;
1192 LINK(tp) = CACHE(vd)[C_INDEX(SIZE(tp))];
1193 CACHE(vd)[C_INDEX(SIZE(tp))] = tp;
1194
1195 SIZE(np) |= s&BITS;
1196 }
1197
1198 bestreclaim(vd,NIL(Block_t*),0); /* coalesce all free blocks */
1199
1200 if(!local && !(vd->mode&VM_TRUST) && _Vmtrace && (vd->mode&VM_TRACE) )
1201 (*_Vmtrace)(vm,NIL(Vmuchar_t*),data,orgsize,orgalign);
1202
1203 done:
1204 /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
1205 CLRLOCK(vd,local);
1206 ANNOUNCE(local, vm, VM_ALLOC, (Void_t*)data, vm->disc);
1207
1208 CLRINUSE(vd, inuse);
1209 return (Void_t*)data;
1210 }
1211
1212
1213 #if _mem_win32
1214 #if _PACKAGE_ast
1215 #include <ast_windows.h>
1216 #else
1217 #include <windows.h>
1218 #endif
1219 #endif /* _lib_win32 */
1220
1221 #if _mem_mmap_anon
1222 #include <sys/mman.h>
1223 #ifndef MAP_ANON
1224 #define MAP_ANON MAP_ANONYMOUS
1225 #endif
1226 #endif /* _mem_mmap_anon */
1227
1228 #if _mem_mmap_zero
1229 #include <sys/fcntl.h>
1230 #include <sys/mman.h>
1231 typedef struct _mmapdisc_s
1232 { Vmdisc_t disc;
1233 int fd;
1234 off_t offset;
1235 } Mmapdisc_t;
1236
1237 #ifndef OPEN_MAX
1238 #define OPEN_MAX 64
1239 #endif
1240 #define OPEN_PRIVATE (3*OPEN_MAX/4)
1241 #endif /* _mem_mmap_zero */
1242
1243 /* failure mode of mmap, sbrk and brk */
1244 #ifndef MAP_FAILED
1245 #define MAP_FAILED ((Void_t*)(-1))
1246 #endif
1247 #define BRK_FAILED ((Void_t*)(-1))
1248
1249 /* make sure that allocated memory are addressable */
1250
1251 #if _PACKAGE_ast
1252 #include <sig.h>
1253 #else
1254 #include <signal.h>
1255 typedef void (*Sig_handler_t)(int);
1256 #endif
1257
1258 static int Gotsegv = 0;
1259
1260 #if __STD_C
sigsegv(int sig)1261 static void sigsegv(int sig)
1262 #else
1263 static void sigsegv(sig)
1264 int sig;
1265 #endif
1266 {
1267 if(sig == SIGSEGV)
1268 Gotsegv = 1;
1269 }
1270
1271 #if __STD_C
okaddr(Void_t * addr,size_t nsize)1272 static int okaddr(Void_t* addr, size_t nsize)
1273 #else
1274 static int okaddr(addr, nsize)
1275 Void_t* addr;
1276 size_t nsize;
1277 #endif
1278 {
1279 Sig_handler_t segv;
1280 int rv;
1281
1282 Gotsegv = 0; /* catch segment fault */
1283 segv = signal(SIGSEGV, sigsegv);
1284
1285 if(Gotsegv == 0)
1286 rv = *((char*)addr);
1287 if(Gotsegv == 0)
1288 rv += *(((char*)addr)+nsize-1);
1289 if(Gotsegv == 0)
1290 rv = rv == 0 ? 0 : 1;
1291 else rv = -1;
1292
1293 signal(SIGSEGV, segv); /* restore signal catcher */
1294 Gotsegv = 0;
1295
1296 return rv;
1297 }
1298
1299 /* A discipline to get raw memory using sbrk/VirtualAlloc/mmap */
1300 #if __STD_C
sbrkmem(Vmalloc_t * vm,Void_t * caddr,size_t csize,size_t nsize,Vmdisc_t * disc)1301 static Void_t* sbrkmem(Vmalloc_t* vm, Void_t* caddr,
1302 size_t csize, size_t nsize, Vmdisc_t* disc)
1303 #else
1304 static Void_t* sbrkmem(vm, caddr, csize, nsize, disc)
1305 Vmalloc_t* vm; /* region doing allocation from */
1306 Void_t* caddr; /* current address */
1307 size_t csize; /* current size */
1308 size_t nsize; /* new size */
1309 Vmdisc_t* disc; /* discipline structure */
1310 #endif
1311 {
1312 #undef _done_sbrkmem
1313
1314 #if !defined(_done_sbrkmem) && defined(_mem_win32)
1315 #define _done_sbrkmem 1
1316 NOTUSED(vm);
1317 NOTUSED(disc);
1318 if(csize == 0)
1319 return (Void_t*)VirtualAlloc(0,nsize,MEM_COMMIT,PAGE_READWRITE);
1320 else if(nsize == 0)
1321 return VirtualFree((LPVOID)caddr,0,MEM_RELEASE) ? caddr : NIL(Void_t*);
1322 else return NIL(Void_t*);
1323 #endif /* MUST_WIN32 */
1324
1325 #if !defined(_done_sbrkmem) && (_mem_sbrk || _mem_mmap_zero || _mem_mmap_anon)
1326 #define _done_sbrkmem 1
1327 Vmuchar_t *addr;
1328 #if _mem_mmap_zero
1329 Mmapdisc_t *mmdc = (Mmapdisc_t*)disc;
1330 #else
1331 NOTUSED(disc);
1332 #endif
1333 NOTUSED(vm);
1334
1335 if(csize == 0) /* allocating new memory */
1336 {
1337
1338 #if _mem_sbrk /* try using sbrk() and brk() */
1339 if(!(_Vmassert & VM_mmap))
1340 {
1341 addr = (Vmuchar_t*)sbrk(0); /* old break value */
1342 if(addr && addr != (Vmuchar_t*)BRK_FAILED )
1343 {
1344 if((addr+nsize) < addr)
1345 return NIL(Void_t*);
1346 if(brk(addr+nsize) == 0 )
1347 { if(okaddr(addr,nsize) >= 0)
1348 return addr;
1349 (void)brk(addr); /* release reserved address */
1350 }
1351 }
1352 }
1353 #endif /* _mem_sbrk */
1354
1355 #if _mem_mmap_anon /* anonymous mmap */
1356 addr = (Vmuchar_t*)mmap(0, nsize, PROT_READ|PROT_WRITE,
1357 MAP_ANON|MAP_PRIVATE, -1, 0);
1358 if(addr && addr != (Vmuchar_t*)MAP_FAILED)
1359 { if(okaddr(addr,nsize) >= 0)
1360 return addr;
1361 (void)munmap((char*)addr, nsize); /* release reserved address */
1362 }
1363 #endif /* _mem_mmap_anon */
1364
1365 #if _mem_mmap_zero /* mmap from /dev/zero */
1366 if(mmdc->fd < 0)
1367 { int fd;
1368 if(mmdc->fd != -1)
1369 return NIL(Void_t*);
1370 if((fd = open("/dev/zero", O_RDONLY)) < 0 )
1371 { mmdc->fd = -2;
1372 return NIL(Void_t*);
1373 }
1374 if(fd >= OPEN_PRIVATE || (mmdc->fd = dup2(fd,OPEN_PRIVATE)) < 0 )
1375 mmdc->fd = fd;
1376 else close(fd);
1377 #ifdef FD_CLOEXEC
1378 fcntl(mmdc->fd, F_SETFD, FD_CLOEXEC);
1379 #endif
1380 }
1381 addr = (Vmuchar_t*)mmap(0, nsize, PROT_READ|PROT_WRITE,
1382 MAP_PRIVATE, mmdc->fd, mmdc->offset);
1383 if(addr && addr != (Vmuchar_t*)MAP_FAILED)
1384 { if(okaddr(addr, nsize) >= 0)
1385 { mmdc->offset += nsize;
1386 return addr;
1387 }
1388 (void)munmap((char*)addr, nsize); /* release reserved address */
1389 }
1390 #endif /* _mem_mmap_zero */
1391
1392 return NIL(Void_t*);
1393 }
1394 else
1395 {
1396
1397 #if _mem_sbrk
1398 addr = (Vmuchar_t*)sbrk(0);
1399 if(!addr || addr == (Vmuchar_t*)BRK_FAILED)
1400 addr = caddr;
1401 else if(((Vmuchar_t*)caddr+csize) == addr) /* in sbrk-space */
1402 { if(nsize <= csize)
1403 addr -= csize-nsize;
1404 else if((addr += nsize-csize) < (Vmuchar_t*)caddr)
1405 return NIL(Void_t*); /* wrapped around address */
1406 else return brk(addr) == 0 ? caddr : NIL(Void_t*);
1407 }
1408 #else
1409 addr = caddr;
1410 #endif /* _mem_sbrk */
1411
1412 #if _mem_mmap_zero || _mem_mmap_anon
1413 if(((Vmuchar_t*)caddr+csize) > addr) /* in mmap-space */
1414 if(nsize == 0 && munmap(caddr,csize) == 0)
1415 return caddr;
1416 #endif /* _mem_mmap_zero || _mem_mmap_anon */
1417
1418 return NIL(Void_t*);
1419 }
1420 #endif /*_done_sbrkmem*/
1421
1422 #if !_done_sbrkmem /* use native malloc/free as a last resort */
1423 /**/ASSERT(_std_malloc); /* _std_malloc should be well-defined */
1424 NOTUSED(vm);
1425 NOTUSED(disc);
1426 if(csize == 0)
1427 return (Void_t*)malloc(nsize);
1428 else if(nsize == 0)
1429 { free(caddr);
1430 return caddr;
1431 }
1432 else return NIL(Void_t*);
1433 #endif /* _done_sbrkmem */
1434 }
1435
1436 #if _mem_mmap_zero
1437 static Mmapdisc_t _Vmdcsbrk = { { sbrkmem, NIL(Vmexcept_f), 64*1024 }, -1, 0 };
1438 #else
1439 static Vmdisc_t _Vmdcsbrk = { sbrkmem, NIL(Vmexcept_f), 0 };
1440 #endif
1441
1442 static Vmethod_t _Vmbest =
1443 {
1444 bestalloc,
1445 bestresize,
1446 bestfree,
1447 bestaddr,
1448 bestsize,
1449 bestcompact,
1450 bestalign,
1451 VM_MTBEST
1452 };
1453
1454 /* The heap region */
1455 static Vmdata_t _Vmdata =
1456 {
1457 VM_MTBEST|VM_TRUST, /* mode */
1458 0, /* incr */
1459 0, /* pool */
1460 NIL(Seg_t*), /* seg */
1461 NIL(Block_t*), /* free */
1462 NIL(Block_t*), /* wild */
1463 NIL(Block_t*), /* root */
1464 };
1465 Vmalloc_t _Vmheap =
1466 {
1467 { bestalloc,
1468 bestresize,
1469 bestfree,
1470 bestaddr,
1471 bestsize,
1472 bestcompact,
1473 bestalign,
1474 VM_MTBEST
1475 },
1476 NIL(char*), /* file */
1477 0, /* line */
1478 0, /* func */
1479 (Vmdisc_t*)(&_Vmdcsbrk), /* disc */
1480 &_Vmdata, /* data */
1481 NIL(Vmalloc_t*) /* next */
1482 };
1483
1484 __DEFINE__(Vmalloc_t*, Vmheap, &_Vmheap);
1485 __DEFINE__(Vmalloc_t*, Vmregion, &_Vmheap);
1486 __DEFINE__(Vmethod_t*, Vmbest, &_Vmbest);
1487 __DEFINE__(Vmdisc_t*, Vmdcsbrk, (Vmdisc_t*)(&_Vmdcsbrk) );
1488
1489 #ifdef NoF
1490 NoF(vmbest)
1491 #endif
1492
1493 #endif
1494