1 /***********************************************************************
2 * *
3 * This software is part of the ast package *
4 * Copyright (c) 1985-2012 AT&T Intellectual Property *
5 * and is licensed under the *
6 * Eclipse Public License, Version 1.0 *
7 * by AT&T Intellectual Property *
8 * *
9 * A copy of the License is available at *
10 * http://www.eclipse.org/org/documents/epl-v10.html *
11 * (with md5 checksum b35adb5213ca9657e911e9befb180842) *
12 * *
13 * Information and Software Systems Research *
14 * AT&T Research *
15 * Florham Park NJ *
16 * *
17 * Glenn Fowler <gsf@research.att.com> *
18 * David Korn <dgk@research.att.com> *
19 * Phong Vo <kpv@research.att.com> *
20 * *
21 ***********************************************************************/
22 #if defined(_UWIN) && defined(_BLD_ast)
23
_STUB_vmbest()24 void _STUB_vmbest(){}
25
26 #else
27
28 #include "vmhdr.h"
29
30 /* Best-fit allocation method. This is based on a best-fit strategy
31 ** using a splay tree for storage of lists of free blocks of the same
32 ** size. Recent free blocks may be cached for fast reuse.
33 **
34 ** Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94.
35 */
36
37 #ifdef DEBUG
38 static int N_free; /* # of free calls */
39 static int N_alloc; /* # of alloc calls */
40 static int N_resize; /* # of resize calls */
41 static int N_wild; /* # allocated from the wild block */
42 static int N_last; /* # allocated from last free block */
43 static int N_reclaim; /* # of bestreclaim calls */
44 #endif /*DEBUG*/
45
46 #define COMPACT 8 /* factor to decide when to compact */
47
48 /* Check to see if a block is in the free tree */
49 #if __STD_C
vmintree(Block_t * node,Block_t * b)50 static int vmintree(Block_t* node, Block_t* b)
51 #else
52 static int vmintree(node,b)
53 Block_t* node;
54 Block_t* b;
55 #endif
56 { Block_t* t;
57
58 for(t = node; t; t = LINK(t))
59 if(t == b)
60 return 1;
61 if(LEFT(node) && vmintree(LEFT(node),b))
62 return 1;
63 if(RIGHT(node) && vmintree(RIGHT(node),b))
64 return 1;
65 return 0;
66 }
67
68 #if __STD_C
vmonlist(Block_t * list,Block_t * b)69 static int vmonlist(Block_t* list, Block_t* b)
70 #else
71 static int vmonlist(list,b)
72 Block_t* list;
73 Block_t* b;
74 #endif
75 {
76 for(; list; list = LINK(list))
77 if(list == b)
78 return 1;
79 return 0;
80 }
81
82 /* Check to see if a block is known to be free */
83 #if __STD_C
vmisfree(Vmdata_t * vd,Block_t * b)84 static int vmisfree(Vmdata_t* vd, Block_t* b)
85 #else
86 static int vmisfree(vd,b)
87 Vmdata_t* vd;
88 Block_t* b;
89 #endif
90 {
91 if(SIZE(b) & (BUSY|JUNK|PFREE))
92 return 0;
93
94 if(b == vd->wild)
95 return 1;
96
97 if(SIZE(b) < MAXTINY)
98 return vmonlist(TINY(vd)[INDEX(SIZE(b))], b);
99
100 if(vd->root)
101 return vmintree(vd->root, b);
102
103 return 0;
104 }
105
106 /* Check to see if a block is known to be junked */
107 #if __STD_C
vmisjunk(Vmdata_t * vd,Block_t * b)108 static int vmisjunk(Vmdata_t* vd, Block_t* b)
109 #else
110 static int vmisjunk(vd,b)
111 Vmdata_t* vd;
112 Block_t* b;
113 #endif
114 {
115 Block_t* t;
116
117 if((SIZE(b)&BUSY) == 0 || (SIZE(b)&JUNK) == 0)
118 return 0;
119
120 if(b == vd->free) /* recently freed */
121 return 1;
122
123 /* check the list that b is supposed to be in */
124 for(t = CACHE(vd)[C_INDEX(SIZE(b))]; t; t = LINK(t))
125 if(t == b)
126 return 1;
127
128 /* on occasions, b may be put onto the catch-all list */
129 if(C_INDEX(SIZE(b)) < S_CACHE)
130 for(t = CACHE(vd)[S_CACHE]; t; t = LINK(t))
131 if(t == b)
132 return 1;
133
134 return 0;
135 }
136
137 /* check to see if the free tree is in good shape */
138 #if __STD_C
vmchktree(Block_t * node)139 static int vmchktree(Block_t* node)
140 #else
141 static int vmchktree(node)
142 Block_t* node;
143 #endif
144 { Block_t* t;
145
146 if(SIZE(node) & BITS)
147 { /**/ASSERT(0); return -1; }
148
149 for(t = LINK(node); t; t = LINK(t))
150 if(SIZE(t) != SIZE(node))
151 { /**/ASSERT(0); return -1; }
152
153 if((t = LEFT(node)) )
154 { if(SIZE(t) >= SIZE(node) )
155 { /**/ASSERT(0); return -1; }
156 else return vmchktree(t);
157 }
158 if((t = RIGHT(node)) )
159 { if(SIZE(t) <= SIZE(node) )
160 { /**/ASSERT(0); return -1; }
161 else return vmchktree(t);
162 }
163
164 return 0;
165 }
166
167 #if __STD_C
_vmbestcheck(Vmdata_t * vd,Block_t * freeb)168 int _vmbestcheck(Vmdata_t* vd, Block_t* freeb)
169 #else
170 int _vmbestcheck(vd, freeb)
171 Vmdata_t* vd;
172 Block_t* freeb; /* known to be free but not on any free list */
173 #endif
174 {
175 reg Seg_t *seg;
176 reg Block_t *b, *endb, *nextb;
177 int rv = 0;
178
179 if(!CHECK())
180 return 0;
181
182 /* make sure the free tree is still in shape */
183 if(vd->root && vmchktree(vd->root) < 0 )
184 { rv = -1; /**/ASSERT(0); }
185
186 for(seg = vd->seg; seg && rv == 0; seg = seg->next)
187 { b = SEGBLOCK(seg);
188 endb = (Block_t*)(seg->baddr - sizeof(Head_t));
189 for(; b < endb && rv == 0; b = nextb)
190 { nextb = (Block_t*)((Vmuchar_t*)DATA(b) + (SIZE(b)&~BITS) );
191
192 if(!ISBUSY(SIZE(b)) ) /* a completely free block */
193 { /* there should be no marked bits of any type */
194 if(SIZE(b) & (BUSY|JUNK|PFREE) )
195 { rv = -1; /**/ASSERT(0); }
196
197 /* next block must be busy and marked PFREE */
198 if(!ISBUSY(SIZE(nextb)) || !ISPFREE(SIZE(nextb)) )
199 { rv = -1; /**/ASSERT(0); }
200
201 /* must have a self-reference pointer */
202 if(SELF(b) != b)
203 { rv = -1; /**/ASSERT(0); }
204
205 /* segment pointer should be well-defined */
206 if(!TINIEST(b) && SEG(b) != seg)
207 { rv = -1; /**/ASSERT(0); }
208
209 /* must be on a free list */
210 if(b != freeb && !vmisfree(vd, b) )
211 { rv = -1; /**/ASSERT(0); }
212 }
213 else
214 { /* segment pointer should be well-defined */
215 if(SEG(b) != seg)
216 { rv = -1; /**/ASSERT(0); }
217
218 /* next block should not be marked PFREE */
219 if(ISPFREE(SIZE(nextb)) )
220 { rv = -1; /**/ASSERT(0); }
221
222 /* if PFREE, last block should be free */
223 if(ISPFREE(SIZE(b)) && LAST(b) != freeb &&
224 !vmisfree(vd, LAST(b)) )
225 { rv = -1; /**/ASSERT(0); }
226
227 /* if free but unreclaimed, should be junk */
228 if(ISJUNK(SIZE(b)) && !vmisjunk(vd, b))
229 { rv = -1; /**/ASSERT(0); }
230 }
231 }
232 }
233
234 return rv;
235 }
236
237 /* Tree rotation functions */
238 #define RROTATE(x,y) (LEFT(x) = RIGHT(y), RIGHT(y) = (x), (x) = (y))
239 #define LROTATE(x,y) (RIGHT(x) = LEFT(y), LEFT(y) = (x), (x) = (y))
240 #define RLINK(s,x) ((s) = LEFT(s) = (x))
241 #define LLINK(s,x) ((s) = RIGHT(s) = (x))
242
243 /* Find and delete a suitable element in the free tree. */
244 #if __STD_C
bestsearch(Vmdata_t * vd,reg size_t size,Block_t * wanted)245 static Block_t* bestsearch(Vmdata_t* vd, reg size_t size, Block_t* wanted)
246 #else
247 static Block_t* bestsearch(vd, size, wanted)
248 Vmdata_t* vd;
249 reg size_t size;
250 Block_t* wanted;
251 #endif
252 {
253 reg size_t s;
254 reg Block_t *t, *root, *l, *r;
255 Block_t link;
256
257 /* extracting a tiniest block from its list */
258 if((root = wanted) && size == TINYSIZE)
259 { reg Seg_t* seg;
260
261 l = TLEFT(root);
262 if((r = LINK(root)) )
263 TLEFT(r) = l;
264 if(l)
265 LINK(l) = r;
266 else TINY(vd)[0] = r;
267
268 seg = vd->seg;
269 if(!seg->next)
270 SEG(root) = seg;
271 else for(;; seg = seg->next)
272 { if((Vmuchar_t*)root > (Vmuchar_t*)seg->addr &&
273 (Vmuchar_t*)root < seg->baddr)
274 { SEG(root) = seg;
275 break;
276 }
277 }
278
279 return root;
280 }
281
282 /**/ASSERT(!vd->root || vmchktree(vd->root) == 0);
283
284 /* find the right one to delete */
285 l = r = &link;
286 if((root = vd->root) ) do
287 { /**/ ASSERT(!ISBITS(size) && !ISBITS(SIZE(root)));
288 if(size == (s = SIZE(root)) )
289 break;
290 if(size < s)
291 { if((t = LEFT(root)) )
292 { if(size <= (s = SIZE(t)) )
293 { RROTATE(root,t);
294 if(size == s)
295 break;
296 t = LEFT(root);
297 }
298 else
299 { LLINK(l,t);
300 t = RIGHT(t);
301 }
302 }
303 RLINK(r,root);
304 }
305 else
306 { if((t = RIGHT(root)) )
307 { if(size >= (s = SIZE(t)) )
308 { LROTATE(root,t);
309 if(size == s)
310 break;
311 t = RIGHT(root);
312 }
313 else
314 { RLINK(r,t);
315 t = LEFT(t);
316 }
317 }
318 LLINK(l,root);
319 }
320 /**/ ASSERT(root != t);
321 } while((root = t) );
322
323 if(root) /* found it, now isolate it */
324 { RIGHT(l) = LEFT(root);
325 LEFT(r) = RIGHT(root);
326 }
327 else /* nothing exactly fit */
328 { LEFT(r) = NIL(Block_t*);
329 RIGHT(l) = NIL(Block_t*);
330
331 /* grab the least one from the right tree */
332 if((root = LEFT(&link)) )
333 { while((t = LEFT(root)) )
334 RROTATE(root,t);
335 LEFT(&link) = RIGHT(root);
336 }
337 }
338
339 if(root && (r = LINK(root)) )
340 { /* head of a link list, use next one for root */
341 LEFT(r) = RIGHT(&link);
342 RIGHT(r) = LEFT(&link);
343 }
344 else if(!(r = LEFT(&link)) )
345 r = RIGHT(&link);
346 else /* graft left tree to right tree */
347 { while((t = LEFT(r)) )
348 RROTATE(r,t);
349 LEFT(r) = RIGHT(&link);
350 }
351 vd->root = r; /**/ASSERT(!r || !ISBITS(SIZE(r)));
352
353 /**/ASSERT(!vd->root || vmchktree(vd->root) == 0);
354 /**/ASSERT(!wanted || wanted == root);
355
356 return root;
357 }
358
359 /* Reclaim all delayed free blocks into the free tree */
360 #if __STD_C
bestreclaim(reg Vmdata_t * vd,Block_t * wanted,int c)361 static int bestreclaim(reg Vmdata_t* vd, Block_t* wanted, int c)
362 #else
363 static int bestreclaim(vd, wanted, c)
364 reg Vmdata_t* vd;
365 Block_t* wanted;
366 int c;
367 #endif
368 {
369 reg size_t size, s;
370 reg Block_t *fp, *np, *t, *list;
371 reg int n, saw_wanted;
372
373 /**/COUNT(N_reclaim);
374 /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
375
376 if((fp = vd->free) )
377 { LINK(fp) = CACHE(vd)[S_CACHE]; CACHE(vd)[S_CACHE] = fp;
378 vd->free = NIL(Block_t*);
379 }
380
381 saw_wanted = wanted ? 0 : 1;
382 for(n = S_CACHE; n >= c; --n)
383 { list = CACHE(vd)[n]; CACHE(vd)[n] = NIL(Block_t*);
384 while((fp = list) )
385 { /* Note that below here we allow ISJUNK blocks to be
386 ** forward-merged even though they are not removed from
387 ** the list immediately. In this way, the list is
388 ** scanned only once. It works because the LINK and SIZE
389 ** fields are not destroyed during the merging. This can
390 ** be seen by observing that a tiniest block has a 2-word
391 ** header and a 2-word body. Merging a tiniest block
392 ** (1seg) and the next block (2seg) looks like this:
393 ** 1seg size link left 2seg size link left ....
394 ** 1seg size link left rite xxxx xxxx .... self
395 ** After the merge, the 2seg word is replaced by the RIGHT
396 ** pointer of the new block and somewhere beyond the
397 ** two xxxx fields, the SELF pointer will replace some
398 ** other word. The important part is that the two xxxx
399 ** fields are kept intact.
400 */
401 list = LINK(list); /**/ASSERT(!vmonlist(list,fp));
402
403 size = SIZE(fp);
404 if(!ISJUNK(size)) /* already done */
405 continue;
406
407 if(ISPFREE(size)) /* backward merge */
408 { fp = LAST(fp);
409 s = SIZE(fp); /**/ASSERT(!(s&BITS));
410 REMOVE(vd,fp,INDEX(s),t,bestsearch);
411 size = (size&~BITS) + s + sizeof(Head_t);
412 }
413 else size &= ~BITS;
414
415 for(;;) /* forward merge */
416 { np = (Block_t*)((Vmuchar_t*)fp+size+sizeof(Head_t));
417 s = SIZE(np); /**/ASSERT(s > 0);
418 if(!ISBUSY(s))
419 { /**/ASSERT((s&BITS) == 0);
420 if(np == vd->wild)
421 vd->wild = NIL(Block_t*);
422 else REMOVE(vd,np,INDEX(s),t,bestsearch);
423 }
424 else if(ISJUNK(s))
425 { /* reclaim any touched junk list */
426 if((int)C_INDEX(s) < c)
427 c = (int)C_INDEX(s);
428 SIZE(np) = 0;
429 CLRBITS(s);
430 }
431 else break;
432 size += s + sizeof(Head_t);
433 }
434 SIZE(fp) = size;
435
436 /* tell next block that this one is free */
437 np = NEXT(fp); /**/ASSERT(ISBUSY(SIZE(np)));
438 /**/ASSERT(!ISJUNK(SIZE(np)));
439 SETPFREE(SIZE(np));
440 SELF(fp) = fp;
441
442 if(fp == wanted) /* to be consumed soon */
443 { /**/ASSERT(!saw_wanted); /* should be seen just once */
444 saw_wanted = 1;
445 continue;
446 }
447
448 /* wilderness preservation */
449 if(np->body.data >= vd->seg->baddr)
450 { vd->wild = fp;
451 continue;
452 }
453
454 /* tiny block goes to tiny list */
455 if(size < MAXTINY)
456 { s = INDEX(size);
457 np = LINK(fp) = TINY(vd)[s];
458 if(s == 0) /* TINIEST block */
459 { if(np)
460 TLEFT(np) = fp;
461 TLEFT(fp) = NIL(Block_t*);
462 }
463 else
464 { if(np)
465 LEFT(np) = fp;
466 LEFT(fp) = NIL(Block_t*);
467 SETLINK(fp);
468 }
469 TINY(vd)[s] = fp;
470 continue;
471 }
472
473 LEFT(fp) = RIGHT(fp) = LINK(fp) = NIL(Block_t*);
474 if(!(np = vd->root) ) /* inserting into an empty tree */
475 { vd->root = fp;
476 continue;
477 }
478
479 size = SIZE(fp);
480 while(1) /* leaf insertion */
481 { /**/ASSERT(np != fp);
482 if((s = SIZE(np)) > size)
483 { if((t = LEFT(np)) )
484 { /**/ ASSERT(np != t);
485 np = t;
486 }
487 else
488 { LEFT(np) = fp;
489 break;
490 }
491 }
492 else if(s < size)
493 { if((t = RIGHT(np)) )
494 { /**/ ASSERT(np != t);
495 np = t;
496 }
497 else
498 { RIGHT(np) = fp;
499 break;
500 }
501 }
502 else /* s == size */
503 { if((t = LINK(np)) )
504 { LINK(fp) = t;
505 LEFT(t) = fp;
506 }
507 LINK(np) = fp;
508 LEFT(fp) = np;
509 SETLINK(fp);
510 break;
511 }
512 }
513 }
514 }
515
516 /**/ASSERT(!wanted || saw_wanted == 1);
517 /**/ASSERT(_vmbestcheck(vd, wanted) == 0);
518 return saw_wanted;
519 }
520
521 #if __STD_C
bestcompact(Vmalloc_t * vm,int local)522 static int bestcompact(Vmalloc_t* vm, int local)
523 #else
524 static int bestcompact(vm, local)
525 Vmalloc_t* vm;
526 int local;
527 #endif
528 {
529 reg Seg_t *seg, *next;
530 reg Block_t *bp, *tp;
531 reg size_t size, segsize, round;
532 reg Vmdata_t* vd = vm->data;
533
534 SETLOCK(vm, local);
535
536 bestreclaim(vd,NIL(Block_t*),0);
537
538 for(seg = vd->seg; seg; seg = next)
539 { next = seg->next;
540
541 bp = BLOCK(seg->baddr);
542 if(!ISPFREE(SIZE(bp)) )
543 continue;
544
545 bp = LAST(bp); /**/ASSERT(vmisfree(vd,bp));
546 size = SIZE(bp);
547 if(bp == vd->wild)
548 { /* During large block allocations, _Vmextend might
549 ** have been enlarged the rounding factor. Reducing
550 ** it a bit help avoiding getting large raw memory.
551 */
552 if((round = vm->disc->round) == 0)
553 round = _Vmpagesize;
554 if(size > COMPACT*vd->incr && vd->incr > round)
555 vd->incr /= 2;
556
557 /* for the bottom segment, we don't necessarily want
558 ** to return raw memory too early. vd->pool has an
559 ** approximation of the average size of recently freed
560 ** blocks. If this is large, the application is managing
561 ** large blocks so we throttle back memory chopping
562 ** to avoid thrashing the underlying memory system.
563 */
564 if(size <= COMPACT*vd->incr || size <= COMPACT*vd->pool)
565 continue;
566
567 vd->wild = NIL(Block_t*);
568 vd->pool = 0;
569 }
570 else REMOVE(vd,bp,INDEX(size),tp,bestsearch);
571 tp = NEXT(bp); /* avoid strict-aliasing pun */
572 CLRPFREE(SIZE(tp));
573
574 if(size < (segsize = seg->size))
575 size += sizeof(Head_t);
576
577 if((size = (*_Vmtruncate)(vm,seg,size,0)) > 0)
578 { if(size >= segsize) /* entire segment deleted */
579 continue;
580 /**/ASSERT(SEG(BLOCK(seg->baddr)) == seg);
581
582 if((size = (seg->baddr - ((Vmuchar_t*)bp) - sizeof(Head_t))) > 0)
583 SIZE(bp) = size - sizeof(Head_t);
584 else bp = NIL(Block_t*);
585 }
586
587 if(bp)
588 { /**/ ASSERT(SIZE(bp) >= BODYSIZE);
589 /**/ ASSERT(SEGWILD(bp));
590 /**/ ASSERT(!vd->root || !vmintree(vd->root,bp));
591 SIZE(bp) |= BUSY|JUNK;
592 LINK(bp) = CACHE(vd)[C_INDEX(SIZE(bp))];
593 CACHE(vd)[C_INDEX(SIZE(bp))] = bp;
594 }
595 }
596
597 if(!local && _Vmtrace && (vd->mode&VM_TRACE) && VMETHOD(vd) == VM_MTBEST)
598 (*_Vmtrace)(vm, (Vmuchar_t*)0, (Vmuchar_t*)0, 0, 0);
599
600 CLRLOCK(vm, local); /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
601
602 return 0;
603 }
604
605 #if __STD_C
bestalloc(Vmalloc_t * vm,size_t size,int local)606 static Void_t* bestalloc(Vmalloc_t* vm, size_t size , int local)
607 #else
608 static Void_t* bestalloc(vm, size, local)
609 Vmalloc_t* vm; /* region allocating from */
610 size_t size; /* desired block size */
611 int local; /* internal call */
612 #endif
613 {
614 reg Vmdata_t* vd = vm->data;
615 reg size_t s;
616 reg int n;
617 reg Block_t *tp, *np, *ap;
618 size_t orgsize = size;
619
620 /**/COUNT(N_alloc);
621 /**/ASSERT(local ? (vd->lock == 1) : 1 );
622
623 SETLOCK(vm,local);
624
625 /**/ ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
626 /**/ ASSERT(HEADSIZE == sizeof(Head_t));
627 /**/ ASSERT(BODYSIZE == sizeof(Body_t));
628 /**/ ASSERT((ALIGN%(BITS+1)) == 0 );
629 /**/ ASSERT((sizeof(Head_t)%ALIGN) == 0 );
630 /**/ ASSERT((sizeof(Body_t)%ALIGN) == 0 );
631 /**/ ASSERT((BODYSIZE%ALIGN) == 0 );
632 /**/ ASSERT(sizeof(Block_t) == (sizeof(Body_t)+sizeof(Head_t)) );
633
634 /* for ANSI requirement that malloc(0) returns non-NULL pointer */
635 size = size <= BODYSIZE ? BODYSIZE : ROUND(size,ALIGN);
636
637 if((tp = vd->free) ) /* reuse last free piece if appropriate */
638 { /**/ASSERT(ISBUSY(SIZE(tp)) );
639 /**/ASSERT(ISJUNK(SIZE(tp)) );
640 /**/COUNT(N_last);
641
642 vd->free = NIL(Block_t*);
643 if((s = SIZE(tp)) >= size && s < (size << 1) )
644 { if(s >= size + (sizeof(Head_t)+BODYSIZE) )
645 { SIZE(tp) = size;
646 np = NEXT(tp);
647 SEG(np) = SEG(tp);
648 SIZE(np) = ((s&~BITS) - (size+sizeof(Head_t)))|JUNK|BUSY;
649 vd->free = np;
650 SIZE(tp) |= s&BITS;
651 }
652 CLRJUNK(SIZE(tp));
653 goto done;
654 }
655
656 LINK(tp) = CACHE(vd)[S_CACHE];
657 CACHE(vd)[S_CACHE] = tp;
658 }
659
660 for(n = S_CACHE; n >= 0; --n) /* best-fit except for coalescing */
661 { bestreclaim(vd,NIL(Block_t*),n);
662 if(vd->root && (tp = bestsearch(vd,size,NIL(Block_t*))) )
663 goto got_block;
664 }
665
666 /**/ASSERT(!vd->free);
667 if((tp = vd->wild) && SIZE(tp) >= size)
668 { /**/COUNT(N_wild);
669 vd->wild = NIL(Block_t*);
670 goto got_block;
671 }
672
673 /* need to extend the arena */
674 KPVCOMPACT(vm,bestcompact);
675 if((tp = (*_Vmextend)(vm,size,bestsearch)) )
676 { got_block:
677 /**/ ASSERT(!ISBITS(SIZE(tp)));
678 /**/ ASSERT(SIZE(tp) >= size);
679 /**/ ASSERT((SIZE(tp)%ALIGN) == 0);
680 /**/ ASSERT(!vd->free);
681
682 /* tell next block that we are no longer a free block */
683 np = NEXT(tp);
684 CLRPFREE(SIZE(np)); /**/ ASSERT(ISBUSY(SIZE(np)));
685
686 if((s = SIZE(tp)-size) >= (sizeof(Head_t)+BODYSIZE) )
687 { SIZE(tp) = size;
688
689 np = NEXT(tp);
690 SEG(np) = SEG(tp);
691 SIZE(np) = (s - sizeof(Head_t)) | BUSY|JUNK;
692
693 if(VMWILD(vd,np))
694 { SIZE(np) &= ~BITS;
695 SELF(np) = np;
696 ap = NEXT(np); /**/ASSERT(ISBUSY(SIZE(ap)));
697 SETPFREE(SIZE(ap));
698 vd->wild = np;
699 }
700 else vd->free = np;
701 }
702
703 SETBUSY(SIZE(tp));
704 }
705
706 done:
707 if(tp && !local && (vd->mode&VM_TRACE) && _Vmtrace && VMETHOD(vd) == VM_MTBEST)
708 (*_Vmtrace)(vm,NIL(Vmuchar_t*),(Vmuchar_t*)DATA(tp),orgsize,0);
709
710 CLRLOCK(vm,local); /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
711
712 return tp ? DATA(tp) : NIL(Void_t*);
713 }
714
715 #if __STD_C
bestaddr(Vmalloc_t * vm,Void_t * addr,int local)716 static long bestaddr(Vmalloc_t* vm, Void_t* addr, int local )
717 #else
718 static long bestaddr(vm, addr, local)
719 Vmalloc_t* vm; /* region allocating from */
720 Void_t* addr; /* address to check */
721 int local;
722 #endif
723 {
724 reg Seg_t* seg;
725 reg Block_t *b, *endb;
726 reg long offset;
727 reg Vmdata_t* vd = vm->data;
728
729 /**/ASSERT(local ? (vd->lock == 1) : 1 );
730 SETLOCK(vm, local);
731
732 offset = -1L; b = endb = NIL(Block_t*);
733 for(seg = vd->seg; seg; seg = seg->next)
734 { b = SEGBLOCK(seg);
735 endb = (Block_t*)(seg->baddr - sizeof(Head_t));
736 if((Vmuchar_t*)addr > (Vmuchar_t*)b &&
737 (Vmuchar_t*)addr < (Vmuchar_t*)endb)
738 break;
739 }
740
741 if(local ) /* from bestfree or bestresize */
742 { b = BLOCK(addr);
743 if(seg && SEG(b) == seg && ISBUSY(SIZE(b)) && !ISJUNK(SIZE(b)) )
744 offset = 0;
745 }
746 else if(seg)
747 { while(b < endb)
748 { reg Vmuchar_t* data = (Vmuchar_t*)DATA(b);
749 reg size_t size = SIZE(b)&~BITS;
750
751 if((Vmuchar_t*)addr >= data && (Vmuchar_t*)addr < data+size)
752 { if(ISJUNK(SIZE(b)) || !ISBUSY(SIZE(b)))
753 offset = -1L;
754 else offset = (long)((Vmuchar_t*)addr - data);
755 goto done;
756 }
757
758 b = (Block_t*)((Vmuchar_t*)DATA(b) + size);
759 }
760 }
761
762 done:
763 CLRLOCK(vm,local);
764 return offset;
765 }
766
767 #if __STD_C
bestfree(Vmalloc_t * vm,Void_t * data,int local)768 static int bestfree(Vmalloc_t* vm, Void_t* data, int local )
769 #else
770 static int bestfree(vm, data, local )
771 Vmalloc_t* vm;
772 Void_t* data;
773 int local;
774 #endif
775 {
776 reg Vmdata_t* vd = vm->data;
777 reg Block_t *bp;
778 reg size_t s;
779
780 #ifdef DEBUG
781 if(((char*)data - (char*)0) <= 1)
782 { _Vmassert |= VM_check;
783 _vmbestcheck(vd, NIL(Block_t*));
784 if (!data)
785 _Vmassert &= ~VM_check;
786 return 0;
787 }
788 #else
789 if(!data) /* ANSI-ism */
790 return 0;
791 #endif
792
793 /**/COUNT(N_free);
794 /**/ASSERT(local ? (vd->lock == 1) : 1 );
795
796 SETLOCK(vm, local);
797
798 /**/ASSERT(KPVADDR(vm, data, bestaddr) == 0);
799 /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
800 bp = BLOCK(data); s = SIZE(bp);
801
802 /* Keep an approximate average free block size.
803 ** This is used in bestcompact() to decide when to release
804 ** raw memory back to the underlying memory system.
805 */
806 vd->pool = (vd->pool + (s&~BITS))/2;
807
808 if(ISBUSY(s) && !ISJUNK(s))
809 { SETJUNK(SIZE(bp));
810 if(s < MAXCACHE)
811 { /**/ASSERT(!vmonlist(CACHE(vd)[INDEX(s)], bp) );
812 LINK(bp) = CACHE(vd)[INDEX(s)];
813 CACHE(vd)[INDEX(s)] = bp;
814 }
815 else if(!vd->free)
816 vd->free = bp;
817 else
818 { /**/ASSERT(!vmonlist(CACHE(vd)[S_CACHE], bp) );
819 LINK(bp) = CACHE(vd)[S_CACHE];
820 CACHE(vd)[S_CACHE] = bp;
821 }
822
823 /* coalesce on freeing large blocks to avoid fragmentation */
824 if(SIZE(bp) >= 2*vd->incr)
825 { bestreclaim(vd,NIL(Block_t*),0);
826 if(vd->wild && SIZE(vd->wild) >= COMPACT*vd->incr)
827 KPVCOMPACT(vm,bestcompact);
828 }
829 }
830
831 if(!local && _Vmtrace && (vd->mode&VM_TRACE) && VMETHOD(vd) == VM_MTBEST )
832 (*_Vmtrace)(vm,(Vmuchar_t*)data,NIL(Vmuchar_t*), (s&~BITS), 0);
833
834 CLRLOCK(vm, local); /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
835
836 return 0;
837 }
838
839 #if __STD_C
bestresize(Vmalloc_t * vm,Void_t * data,reg size_t size,int type,int local)840 static Void_t* bestresize(Vmalloc_t* vm, Void_t* data, reg size_t size, int type, int local)
841 #else
842 static Void_t* bestresize(vm, data, size, type, local)
843 Vmalloc_t* vm; /* region allocating from */
844 Void_t* data; /* old block of data */
845 reg size_t size; /* new size */
846 int type; /* !=0 to move, <0 for not copy */
847 int local;
848 #endif
849 {
850 reg Block_t *rp, *np, *t;
851 size_t s, bs;
852 size_t oldz = 0, orgsize = size;
853 Void_t *oldd = 0, *orgdata = data;
854 Vmdata_t *vd = vm->data;
855
856 /**/COUNT(N_resize);
857 /**/ASSERT(local ? (vd->lock == 1) : 1);
858
859 if(!data) /* resizing a NULL block is the same as allocating */
860 { data = bestalloc(vm, size, local);
861 if(data && (type&VM_RSZERO) )
862 memset((Void_t*)data, 0, size);
863 return data;
864 }
865 if(size == 0) /* resizing to zero size is the same as freeing */
866 { (void)bestfree(vm, data, local);
867 return NIL(Void_t*);
868 }
869
870 SETLOCK(vm, local);
871
872 /**/ASSERT(KPVADDR(vm, data, bestaddr) == 0);
873 /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
874 size = size <= BODYSIZE ? BODYSIZE : ROUND(size,ALIGN);
875 rp = BLOCK(data); /**/ASSERT(ISBUSY(SIZE(rp)) && !ISJUNK(SIZE(rp)));
876 oldz = SIZE(rp); CLRBITS(oldz);
877 if(oldz < size)
878 { np = (Block_t*)((Vmuchar_t*)rp + oldz + sizeof(Head_t));
879 do /* forward merge as much as possible */
880 { s = SIZE(np); /**/ASSERT(!ISPFREE(s));
881 if(np == vd->free)
882 { vd->free = NIL(Block_t*);
883 CLRBITS(s);
884 }
885 else if(ISJUNK(s) )
886 {
887 if(!bestreclaim(vd,np,(int)C_INDEX(s)) )
888 /**/ASSERT(0); /* oops: did not see np! */
889 s = SIZE(np); /**/ASSERT(s%ALIGN == 0);
890 }
891 else if(!ISBUSY(s) )
892 { if(np == vd->wild)
893 vd->wild = NIL(Block_t*);
894 else REMOVE(vd,np,INDEX(s),t,bestsearch);
895 }
896 else break;
897
898 SIZE(rp) += (s += sizeof(Head_t)); /**/ASSERT((s%ALIGN) == 0);
899 np = (Block_t*)((Vmuchar_t*)np + s);
900 CLRPFREE(SIZE(np));
901 } while(SIZE(rp) < size);
902
903 if(SIZE(rp) < size && size > vd->incr && SEGWILD(rp) )
904 { reg Seg_t* seg;
905
906 s = (size - SIZE(rp)) + sizeof(Head_t); s = ROUND(s,vd->incr);
907 seg = SEG(rp);
908 if((*vm->disc->memoryf)(vm,seg->addr,seg->extent,seg->extent+s,
909 vm->disc) == seg->addr )
910 { SIZE(rp) += s;
911 seg->extent += s;
912 seg->size += s;
913 seg->baddr += s;
914 s = (SIZE(rp)&~BITS) + sizeof(Head_t);
915 np = (Block_t*)((Vmuchar_t*)rp + s);
916 SEG(np) = seg;
917 SIZE(np) = BUSY;
918 }
919 }
920 }
921
922 if((s = SIZE(rp)) >= (size + (BODYSIZE+sizeof(Head_t))) )
923 { SIZE(rp) = size;
924 np = NEXT(rp);
925 SEG(np) = SEG(rp);
926 SIZE(np) = (((s&~BITS)-size) - sizeof(Head_t))|BUSY|JUNK;
927 CPYBITS(SIZE(rp),s);
928 rp = np;
929 goto do_free;
930 }
931 else if((bs = s&~BITS) < size)
932 { if(!(type&(VM_RSMOVE|VM_RSCOPY)) )
933 data = NIL(Void_t*); /* old data is not moveable */
934 else
935 { oldd = data;
936 if((data = KPVALLOC(vm,size,bestalloc)) )
937 { if(type&VM_RSCOPY)
938 memcpy(data, oldd, bs);
939
940 do_free: /* reclaim these right away */
941 SETJUNK(SIZE(rp));
942 LINK(rp) = CACHE(vd)[S_CACHE];
943 CACHE(vd)[S_CACHE] = rp;
944 bestreclaim(vd, NIL(Block_t*), S_CACHE);
945 }
946 }
947 }
948
949 if(data && (type&VM_RSZERO) && (size = SIZE(BLOCK(data))&~BITS) > oldz )
950 memset((Void_t*)((Vmuchar_t*)data + oldz), 0, size-oldz);
951
952 if(!local && _Vmtrace && data && (vd->mode&VM_TRACE) && VMETHOD(vd) == VM_MTBEST)
953 (*_Vmtrace)(vm, (Vmuchar_t*)orgdata, (Vmuchar_t*)data, orgsize, 0);
954
955 CLRLOCK(vm, local); /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
956
957 return data;
958 }
959
960 #if __STD_C
bestsize(Vmalloc_t * vm,Void_t * addr,int local)961 static long bestsize(Vmalloc_t* vm, Void_t* addr, int local )
962 #else
963 static long bestsize(vm, addr, local)
964 Vmalloc_t* vm; /* region allocating from */
965 Void_t* addr; /* address to check */
966 int local;
967 #endif
968 {
969 Seg_t *seg;
970 Block_t *b, *endb;
971 long size;
972 Vmdata_t *vd = vm->data;
973
974 SETLOCK(vm, local);
975
976 size = -1L;
977 for(seg = vd->seg; seg; seg = seg->next)
978 { b = SEGBLOCK(seg);
979 endb = (Block_t*)(seg->baddr - sizeof(Head_t));
980 if((Vmuchar_t*)addr <= (Vmuchar_t*)b ||
981 (Vmuchar_t*)addr >= (Vmuchar_t*)endb)
982 continue;
983 while(b < endb)
984 { if(addr == DATA(b))
985 { if(!ISBUSY(SIZE(b)) || ISJUNK(SIZE(b)) )
986 size = -1L;
987 else size = (long)SIZE(b)&~BITS;
988 goto done;
989 }
990 else if((Vmuchar_t*)addr <= (Vmuchar_t*)b)
991 break;
992
993 b = (Block_t*)((Vmuchar_t*)DATA(b) + (SIZE(b)&~BITS) );
994 }
995 }
996
997 done:
998 CLRLOCK(vm, local);
999 return size;
1000 }
1001
1002 #if __STD_C
bestalign(Vmalloc_t * vm,size_t size,size_t align,int local)1003 static Void_t* bestalign(Vmalloc_t* vm, size_t size, size_t align, int local)
1004 #else
1005 static Void_t* bestalign(vm, size, align, local)
1006 Vmalloc_t* vm;
1007 size_t size;
1008 size_t align;
1009 int local;
1010 #endif
1011 {
1012 Vmuchar_t *data;
1013 Block_t *tp, *np;
1014 Seg_t *seg;
1015 size_t s, extra;
1016 size_t orgsize = size, orgalign = align;
1017 Vmdata_t *vd = vm->data;
1018
1019 if(size <= 0 || align <= 0)
1020 return NIL(Void_t*);
1021
1022 SETLOCK(vm, local);
1023
1024 /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
1025 size = size <= BODYSIZE ? BODYSIZE : ROUND(size,ALIGN);
1026 align = MULTIPLE(align,ALIGN);
1027
1028 /* hack so that dbalign() can store header data */
1029 if(VMETHOD(vd) != VM_MTDEBUG)
1030 extra = 0;
1031 else
1032 { extra = DB_HEAD;
1033 while(align < extra || (align - extra) < sizeof(Block_t))
1034 align *= 2;
1035 }
1036
1037 /* reclaim all free blocks now to avoid fragmentation */
1038 bestreclaim(vd,NIL(Block_t*),0);
1039
1040 s = size + 2*(align+sizeof(Head_t)+extra);
1041 if(!(data = (Vmuchar_t*)KPVALLOC(vm,s,bestalloc)) )
1042 goto done;
1043
1044 tp = BLOCK(data);
1045 seg = SEG(tp);
1046
1047 /* get an aligned address that we can live with */
1048 if((s = (size_t)((VLONG(data)+extra)%align)) != 0)
1049 data += align-s; /**/ASSERT(((VLONG(data)+extra)%align) == 0);
1050
1051 if((np = BLOCK(data)) != tp ) /* need to free left part */
1052 { if(((Vmuchar_t*)np - (Vmuchar_t*)tp) < (ssize_t)(sizeof(Block_t)+extra) )
1053 { data += align;
1054 np = BLOCK(data);
1055 } /**/ASSERT(((VLONG(data)+extra)%align) == 0);
1056
1057 s = (Vmuchar_t*)np - (Vmuchar_t*)tp;
1058 SIZE(np) = ((SIZE(tp)&~BITS) - s)|BUSY;
1059 SEG(np) = seg;
1060
1061 SIZE(tp) = (s - sizeof(Head_t)) | (SIZE(tp)&BITS) | JUNK;
1062 /**/ ASSERT(SIZE(tp) >= sizeof(Body_t) );
1063 LINK(tp) = CACHE(vd)[C_INDEX(SIZE(tp))];
1064 CACHE(vd)[C_INDEX(SIZE(tp))] = tp;
1065 }
1066
1067 /* free left-over if too big */
1068 if((s = SIZE(np) - size) >= sizeof(Block_t))
1069 { SIZE(np) = size;
1070
1071 tp = NEXT(np);
1072 SIZE(tp) = ((s & ~BITS) - sizeof(Head_t)) | BUSY | JUNK;
1073 SEG(tp) = seg;
1074 LINK(tp) = CACHE(vd)[C_INDEX(SIZE(tp))];
1075 CACHE(vd)[C_INDEX(SIZE(tp))] = tp;
1076
1077 SIZE(np) |= s&BITS;
1078 }
1079
1080 bestreclaim(vd,NIL(Block_t*),0); /* coalesce all free blocks */
1081
1082 if(!local && _Vmtrace && (vd->mode&VM_TRACE) )
1083 (*_Vmtrace)(vm,NIL(Vmuchar_t*),data,orgsize,orgalign);
1084
1085 done:
1086 CLRLOCK(vm, local); /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
1087
1088 return (Void_t*)data;
1089 }
1090
1091 /* The below implements the discipline Vmdcsbrk and the heap region Vmheap.
1092 ** There are 5 alternative ways to get raw memory:
1093 ** win32, sbrk, mmap_anon, mmap_zero and reusing the native malloc
1094 ** The selection of method done here is to enable our malloc implementation
1095 ** to work with concurrent threads. The sbrk/brk interface is unfortunately
1096 ** not atomic. Thus, we prefer mmap_anon or mmap_zero if they are available.
1097 */
1098 #if _mem_win32
1099 #undef _mem_mmap_anon
1100 #undef _mem_mmap_zero
1101 #undef _mem_sbrk
1102 #endif
1103 #if _mem_mmap_anon
1104 #undef _mem_mmap_zero
1105 #if !_PACKAGE_ast
1106 #undef _mem_sbrk
1107 #endif
1108 #endif
1109 #if _mem_mmap_zero
1110 #if !_PACKAGE_ast
1111 #undef _mem_sbrk
1112 #endif
1113 #endif
1114
1115 #if __linux__
1116
1117 /* make sure that allocated memory is addressable */
1118
1119 #include <signal.h>
1120 typedef void (*Sig_f)(int);
1121 static int Gotsegv = 0;
1122
sigsegv(int sig)1123 static void sigsegv(int sig)
1124 {
1125 if(sig == SIGSEGV)
1126 Gotsegv = 1;
1127 }
chkaddr(Vmuchar_t * addr,size_t nsize)1128 static int chkaddr(Vmuchar_t* addr, size_t nsize)
1129 {
1130 Sig_f segv;
1131 int rv;
1132
1133 Gotsegv = 0; /* catch segment fault */
1134 segv = signal(SIGSEGV, sigsegv);
1135
1136 rv = *(addr+nsize-1);
1137 rv = Gotsegv ? -1 : rv;
1138
1139 signal(SIGSEGV, segv); /* restore signal catcher */
1140 Gotsegv = 0;
1141
1142 return rv;
1143 }
1144 #else
1145
1146 /* known !__linux__ guarantee that brk-addresses are valid */
1147
1148 #define chkaddr(a,n) (0)
1149
1150 #endif /*__linux__*/
1151
1152 #if _mem_win32 /* getting memory on a window system */
1153 #if _PACKAGE_ast
1154 #include <ast_windows.h>
1155 #else
1156 #include <windows.h>
1157 #endif
1158
win32mem(Void_t * caddr,size_t csize,size_t nsize)1159 static Void_t* win32mem(Void_t* caddr, size_t csize, size_t nsize)
1160 { /**/ ASSERT(csize > 0 || nsize > 0)
1161 if(csize == 0)
1162 { caddr = (Void_t*)VirtualAlloc(0,nsize,MEM_COMMIT,PAGE_READWRITE);
1163 return caddr;
1164 }
1165 else if(nsize == 0)
1166 { (void)VirtualFree((LPVOID)caddr,0,MEM_RELEASE);
1167 return caddr;
1168 }
1169 else return NIL(Void_t*);
1170 }
1171 #endif /* _mem_win32 */
1172
1173 #if _mem_sbrk /* getting space via brk/sbrk - not concurrent-ready */
sbrkmem(Void_t * caddr,size_t csize,size_t nsize)1174 static Void_t* sbrkmem(Void_t* caddr, size_t csize, size_t nsize)
1175 {
1176 Vmuchar_t *addr = (Vmuchar_t*)sbrk(0);
1177
1178 if(!addr || addr == (Vmuchar_t*)(-1) )
1179 return NIL(Void_t*);
1180
1181 if(csize > 0 && addr != (Vmuchar_t*)caddr+csize)
1182 return NIL(Void_t*);
1183 else if(csize == 0)
1184 caddr = addr;
1185
1186 /**/ASSERT(addr == (Vmuchar_t*)caddr+csize);
1187 if(nsize < csize)
1188 addr -= csize-nsize;
1189 else if((addr += nsize-csize) < (Vmuchar_t*)caddr )
1190 return NIL(Void_t*);
1191
1192 if(brk(addr) != 0 )
1193 return NIL(Void_t*);
1194 else if(nsize > csize && chkaddr(caddr, nsize) < 0 )
1195 { (void)brk((Vmuchar_t*)caddr+csize);
1196 return NIL(Void_t*);
1197 }
1198 else return caddr;
1199 }
1200 #endif /* _mem_sbrk */
1201
1202 #if _mem_mmap_anon || _mem_mmap_zero /* get space using mmap */
1203 #include <fcntl.h>
1204 #include <sys/mman.h>
1205
1206 #ifndef MAP_ANON
1207 #ifdef MAP_ANONYMOUS
1208 #define MAP_ANON MAP_ANONYMOUS
1209 #else
1210 #define MAP_ANON 0
1211 #endif
1212 #endif /*MAP_ANON*/
1213
1214 #ifndef OPEN_MAX
1215 #define OPEN_MAX 64
1216 #endif
1217 #define FD_INIT (-1) /* uninitialized file desc */
1218 #define FD_NONE (-2) /* no mapping with file desc */
1219
1220 typedef struct _mmdisc_s
1221 { Vmdisc_t disc;
1222 int fd;
1223 off_t offset;
1224 } Mmdisc_t;
1225
mmapmem(Void_t * caddr,size_t csize,size_t nsize,Mmdisc_t * mmdc)1226 static Void_t* mmapmem(Void_t* caddr, size_t csize, size_t nsize, Mmdisc_t* mmdc)
1227 {
1228 #if _mem_mmap_zero
1229 if(mmdc) /* /dev/zero mapping */
1230 { if(mmdc->fd == FD_INIT ) /* open /dev/zero for mapping */
1231 { int fd;
1232 if((fd = open("/dev/zero", O_RDONLY)) < 0 )
1233 { mmdc->fd = FD_NONE;
1234 return NIL(Void_t*);
1235 }
1236 mmdc->fd = _vmfd(fd);
1237 }
1238
1239 if(mmdc->fd == FD_NONE)
1240 return NIL(Void_t*);
1241 }
1242 #endif /* _mem_mmap_zero */
1243
1244 /**/ASSERT(csize > 0 || nsize > 0);
1245 if(csize == 0)
1246 { nsize = ROUND(nsize, _Vmpagesize);
1247 caddr = NIL(Void_t*);
1248 #if _mem_mmap_zero
1249 if(mmdc && mmdc->fd >= 0 )
1250 caddr = mmap(0, nsize, PROT_READ|PROT_WRITE, MAP_PRIVATE, mmdc->fd, mmdc->offset);
1251 #endif
1252 #if _mem_mmap_anon
1253 if(!mmdc )
1254 caddr = mmap(0, nsize, PROT_READ|PROT_WRITE, MAP_ANON|MAP_PRIVATE, -1, 0);
1255 #endif
1256 if(!caddr || caddr == (Void_t*)(-1))
1257 return NIL(Void_t*);
1258 else if(chkaddr((Vmuchar_t*)caddr, nsize) < 0 )
1259 { (void)munmap(caddr, nsize);
1260 return NIL(Void_t*);
1261 }
1262 else
1263 { if(mmdc)
1264 mmdc->offset += nsize;
1265 return caddr;
1266 }
1267 }
1268 else if(nsize == 0)
1269 { Vmuchar_t *addr = (Vmuchar_t*)sbrk(0);
1270 if(addr < (Vmuchar_t*)caddr ) /* in sbrk space */
1271 return NIL(Void_t*);
1272 else
1273 { (void)munmap(caddr, csize);
1274 return caddr;
1275 }
1276 }
1277 else return NIL(Void_t*);
1278 }
1279 #endif /* _mem_map_anon || _mem_mmap_zero */
1280
1281 #if _std_malloc /* using native malloc as a last resource */
mallocmem(Void_t * caddr,size_t csize,size_t nsize)1282 static Void_t* mallocmem(Void_t* caddr, size_t csize, size_t nsize)
1283 {
1284 /**/ASSERT(csize > 0 || nsize > 0);
1285 if(csize == 0)
1286 return (Void_t*)malloc(nsize);
1287 else if(nsize == 0)
1288 { free(caddr);
1289 return caddr;
1290 }
1291 else return NIL(Void_t*);
1292 }
1293 #endif
1294
1295 /* A discipline to get raw memory using VirtualAlloc/mmap/sbrk */
getmemory(Vmalloc_t * vm,Void_t * caddr,size_t csize,size_t nsize,Vmdisc_t * disc)1296 static Void_t* getmemory(Vmalloc_t* vm, Void_t* caddr, size_t csize, size_t nsize, Vmdisc_t* disc)
1297 {
1298 Vmuchar_t *addr;
1299
1300 if((csize > 0 && !caddr) || (csize == 0 && nsize == 0) )
1301 return NIL(Void_t*);
1302
1303 #if _mem_win32
1304 if((addr = win32mem(caddr, csize, nsize)) )
1305 return (Void_t*)addr;
1306 #endif
1307 #if _mem_sbrk
1308 #if 1 /* no sbrk() unless explicit VM_break */
1309 if((_Vmassert & VM_break) && (addr = sbrkmem(caddr, csize, nsize)) )
1310 #else /* asoinit(0,0,0)==0 => the application did not request a specific aso method => sbrk() ok */
1311 if(((_Vmassert & VM_break) || !(_Vmassert & VM_mmap) && !asoinit(0, 0, 0)) && (addr = sbrkmem(caddr, csize, nsize)) )
1312 #endif
1313 return (Void_t*)addr;
1314 #endif
1315 #if _mem_mmap_anon
1316 if((addr = mmapmem(caddr, csize, nsize, (Mmdisc_t*)0)) )
1317 return (Void_t*)addr;
1318 #endif
1319 #if _mem_mmap_zero
1320 if((addr = mmapmem(caddr, csize, nsize, (Mmdisc_t*)disc)) )
1321 return (Void_t*)addr;
1322 #endif
1323 #if _mem_sbrk
1324 if(!(_Vmassert & VM_break) && (addr = sbrkmem(caddr, csize, nsize)) )
1325 return (Void_t*)addr;
1326 #endif
1327 #if _std_malloc
1328 if((addr = mallocmem(caddr, csize, nsize)) )
1329 return (Void_t*)addr;
1330 #endif
1331 return NIL(Void_t*);
1332 }
1333
1334 #if _mem_mmap_zero || _mem_mmap_anon
1335 static Mmdisc_t _Vmdcsystem = { { getmemory, NIL(Vmexcept_f), 64*1024, sizeof(Mmdisc_t) }, FD_INIT, 0 };
1336 #else
1337 static Vmdisc_t _Vmdcsystem = { getmemory, NIL(Vmexcept_f), 0, sizeof(Vmdisc_t) };
1338 #endif
1339
1340 static Vmethod_t _Vmbest =
1341 {
1342 bestalloc,
1343 bestresize,
1344 bestfree,
1345 bestaddr,
1346 bestsize,
1347 bestcompact,
1348 bestalign,
1349 VM_MTBEST
1350 };
1351
1352 /* The heap region */
1353 static Vmdata_t _Vmdata =
1354 {
1355 0, /* lock */
1356 VM_MTBEST|VM_SHARE, /* mode */
1357 0, /* incr */
1358 0, /* pool */
1359 NIL(Seg_t*), /* seg */
1360 NIL(Block_t*), /* free */
1361 NIL(Block_t*), /* wild */
1362 NIL(Block_t*) /* root */
1363 /* tiny[] */
1364 /* cache[] */
1365 };
1366 Vmalloc_t _Vmheap =
1367 {
1368 { bestalloc,
1369 bestresize,
1370 bestfree,
1371 bestaddr,
1372 bestsize,
1373 bestcompact,
1374 bestalign,
1375 VM_MTBEST
1376 },
1377 NIL(char*), /* file */
1378 0, /* line */
1379 0, /* func */
1380 (Vmdisc_t*)(&_Vmdcsystem), /* disc */
1381 &_Vmdata, /* data */
1382 NIL(Vmalloc_t*) /* next */
1383 };
1384
1385 __DEFINE__(Vmalloc_t*, Vmheap, &_Vmheap);
1386 __DEFINE__(Vmalloc_t*, Vmregion, &_Vmheap);
1387 __DEFINE__(Vmethod_t*, Vmbest, &_Vmbest);
1388 __DEFINE__(Vmdisc_t*, Vmdcsystem, (Vmdisc_t*)(&_Vmdcsystem) );
1389 __DEFINE__(Vmdisc_t*, Vmdcsbrk, (Vmdisc_t*)(&_Vmdcsystem) );
1390
1391 #ifdef NoF
1392 NoF(vmbest)
1393 #endif
1394
1395 #endif
1396