xref: /titanic_41/usr/src/lib/libast/common/vmalloc/vmbest.c (revision 3e14f97f673e8a630f076077de35afdd43dc1587)
1da2e3ebdSchin /***********************************************************************
2da2e3ebdSchin *                                                                      *
3da2e3ebdSchin *               This software is part of the ast package               *
4*3e14f97fSRoger A. Faulkner *          Copyright (c) 1985-2010 AT&T Intellectual Property          *
5da2e3ebdSchin *                      and is licensed under the                       *
6da2e3ebdSchin *                  Common Public License, Version 1.0                  *
77c2fbfb3SApril Chin *                    by AT&T Intellectual Property                     *
8da2e3ebdSchin *                                                                      *
9da2e3ebdSchin *                A copy of the License is available at                 *
10da2e3ebdSchin *            http://www.opensource.org/licenses/cpl1.0.txt             *
11da2e3ebdSchin *         (with md5 checksum 059e8cd6165cb4c31e351f2b69388fd9)         *
12da2e3ebdSchin *                                                                      *
13da2e3ebdSchin *              Information and Software Systems Research               *
14da2e3ebdSchin *                            AT&T Research                             *
15da2e3ebdSchin *                           Florham Park NJ                            *
16da2e3ebdSchin *                                                                      *
17da2e3ebdSchin *                 Glenn Fowler <gsf@research.att.com>                  *
18da2e3ebdSchin *                  David Korn <dgk@research.att.com>                   *
19da2e3ebdSchin *                   Phong Vo <kpv@research.att.com>                    *
20da2e3ebdSchin *                                                                      *
21da2e3ebdSchin ***********************************************************************/
22da2e3ebdSchin #if defined(_UWIN) && defined(_BLD_ast)
23da2e3ebdSchin 
_STUB_vmbest()24da2e3ebdSchin void _STUB_vmbest(){}
25da2e3ebdSchin 
26da2e3ebdSchin #else
27da2e3ebdSchin 
28da2e3ebdSchin #include	"vmhdr.h"
29da2e3ebdSchin 
30da2e3ebdSchin /*	Best-fit allocation method. This is based on a best-fit strategy
31da2e3ebdSchin **	using a splay tree for storage of lists of free blocks of the same
32da2e3ebdSchin **	size. Recent free blocks may be cached for fast reuse.
33da2e3ebdSchin **
34da2e3ebdSchin **	Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94.
35da2e3ebdSchin */
36da2e3ebdSchin 
37da2e3ebdSchin #ifdef DEBUG
38da2e3ebdSchin static int	N_free;		/* # of free calls			*/
39da2e3ebdSchin static int	N_alloc;	/* # of alloc calls			*/
40da2e3ebdSchin static int	N_resize;	/* # of resize calls			*/
41da2e3ebdSchin static int	N_wild;		/* # allocated from the wild block	*/
42da2e3ebdSchin static int	N_last;		/* # allocated from last free block	*/
43da2e3ebdSchin static int	N_reclaim;	/* # of bestreclaim calls		*/
44da2e3ebdSchin 
45da2e3ebdSchin #undef	VM_TRUST		/* always check for locking, etc.s	*/
46da2e3ebdSchin #define	VM_TRUST	0
47da2e3ebdSchin #endif /*DEBUG*/
48da2e3ebdSchin 
49*3e14f97fSRoger A. Faulkner #if _BLD_posix
50*3e14f97fSRoger A. Faulkner #define logmsg(d,a ...)	logsrc(d,__FILE__,__LINE__,a)
51*3e14f97fSRoger A. Faulkner 
52*3e14f97fSRoger A. Faulkner extern int	logsrc(int, const char*, int, const char*, ...);
53*3e14f97fSRoger A. Faulkner #endif /*_BLD_posix*/
54*3e14f97fSRoger A. Faulkner 
55da2e3ebdSchin #define COMPACT		8	/* factor to decide when to compact	*/
56da2e3ebdSchin 
57da2e3ebdSchin /* Check to see if a block is in the free tree */
58da2e3ebdSchin #if __STD_C
vmintree(Block_t * node,Block_t * b)59da2e3ebdSchin static int vmintree(Block_t* node, Block_t* b)
60da2e3ebdSchin #else
61da2e3ebdSchin static int vmintree(node,b)
62da2e3ebdSchin Block_t*	node;
63da2e3ebdSchin Block_t*	b;
64da2e3ebdSchin #endif
65da2e3ebdSchin {	Block_t*	t;
66da2e3ebdSchin 
67da2e3ebdSchin 	for(t = node; t; t = LINK(t))
68da2e3ebdSchin 		if(t == b)
69da2e3ebdSchin 			return 1;
70da2e3ebdSchin 	if(LEFT(node) && vmintree(LEFT(node),b))
71da2e3ebdSchin 		return 1;
72da2e3ebdSchin 	if(RIGHT(node) && vmintree(RIGHT(node),b))
73da2e3ebdSchin 		return 1;
74da2e3ebdSchin 	return 0;
75da2e3ebdSchin }
76da2e3ebdSchin 
77da2e3ebdSchin #if __STD_C
vmonlist(Block_t * list,Block_t * b)78da2e3ebdSchin static int vmonlist(Block_t* list, Block_t* b)
79da2e3ebdSchin #else
80da2e3ebdSchin static int vmonlist(list,b)
81da2e3ebdSchin Block_t*	list;
82da2e3ebdSchin Block_t*	b;
83da2e3ebdSchin #endif
84da2e3ebdSchin {
85da2e3ebdSchin 	for(; list; list = LINK(list))
86da2e3ebdSchin 		if(list == b)
87da2e3ebdSchin 			return 1;
88da2e3ebdSchin 	return 0;
89da2e3ebdSchin }
90da2e3ebdSchin 
91da2e3ebdSchin /* Check to see if a block is known to be free */
92da2e3ebdSchin #if __STD_C
vmisfree(Vmdata_t * vd,Block_t * b)93da2e3ebdSchin static int vmisfree(Vmdata_t* vd, Block_t* b)
94da2e3ebdSchin #else
95da2e3ebdSchin static int vmisfree(vd,b)
96da2e3ebdSchin Vmdata_t*	vd;
97da2e3ebdSchin Block_t*	b;
98da2e3ebdSchin #endif
99da2e3ebdSchin {
100da2e3ebdSchin 	if(SIZE(b) & (BUSY|JUNK|PFREE))
101da2e3ebdSchin 		return 0;
102da2e3ebdSchin 
103da2e3ebdSchin 	if(b == vd->wild)
104da2e3ebdSchin 		return 1;
105da2e3ebdSchin 
106da2e3ebdSchin 	if(SIZE(b) < MAXTINY)
107da2e3ebdSchin 		return vmonlist(TINY(vd)[INDEX(SIZE(b))], b);
108da2e3ebdSchin 
109da2e3ebdSchin 	if(vd->root)
110da2e3ebdSchin 		return vmintree(vd->root, b);
111da2e3ebdSchin 
112da2e3ebdSchin 	return 0;
113da2e3ebdSchin }
114da2e3ebdSchin 
115da2e3ebdSchin /* Check to see if a block is known to be junked */
116da2e3ebdSchin #if __STD_C
vmisjunk(Vmdata_t * vd,Block_t * b)117da2e3ebdSchin static int vmisjunk(Vmdata_t* vd, Block_t* b)
118da2e3ebdSchin #else
119da2e3ebdSchin static int vmisjunk(vd,b)
120da2e3ebdSchin Vmdata_t*	vd;
121da2e3ebdSchin Block_t*	b;
122da2e3ebdSchin #endif
123da2e3ebdSchin {
124da2e3ebdSchin 	Block_t*	t;
125da2e3ebdSchin 
126da2e3ebdSchin 	if((SIZE(b)&BUSY) == 0 || (SIZE(b)&JUNK) == 0)
127da2e3ebdSchin 		return 0;
128da2e3ebdSchin 
129da2e3ebdSchin 	if(b == vd->free) /* recently freed */
130da2e3ebdSchin 		return 1;
131da2e3ebdSchin 
132da2e3ebdSchin 	/* check the list that b is supposed to be in */
133da2e3ebdSchin 	for(t = CACHE(vd)[C_INDEX(SIZE(b))]; t; t = LINK(t))
134da2e3ebdSchin 		if(t == b)
135da2e3ebdSchin 			return 1;
136da2e3ebdSchin 
137da2e3ebdSchin 	/* on occasions, b may be put onto the catch-all list */
138da2e3ebdSchin 	if(C_INDEX(SIZE(b)) < S_CACHE)
139da2e3ebdSchin 		for(t = CACHE(vd)[S_CACHE]; t; t = LINK(t))
140da2e3ebdSchin 			if(t == b)
141da2e3ebdSchin 				return 1;
142da2e3ebdSchin 
143da2e3ebdSchin 	return 0;
144da2e3ebdSchin }
145da2e3ebdSchin 
146da2e3ebdSchin /* check to see if the free tree is in good shape */
147da2e3ebdSchin #if __STD_C
vmchktree(Block_t * node)148da2e3ebdSchin static int vmchktree(Block_t* node)
149da2e3ebdSchin #else
150da2e3ebdSchin static int vmchktree(node)
151da2e3ebdSchin Block_t*	node;
152da2e3ebdSchin #endif
153da2e3ebdSchin {	Block_t*	t;
154da2e3ebdSchin 
155da2e3ebdSchin 	if(SIZE(node) & BITS)
156da2e3ebdSchin 		{ /**/ASSERT(0); return -1; }
157da2e3ebdSchin 
158da2e3ebdSchin 	for(t = LINK(node); t; t = LINK(t))
159da2e3ebdSchin 		if(SIZE(t) != SIZE(node))
160da2e3ebdSchin 			{ /**/ASSERT(0); return -1; }
161da2e3ebdSchin 
162da2e3ebdSchin 	if((t = LEFT(node)) )
163da2e3ebdSchin 	{	if(SIZE(t) >= SIZE(node) )
164da2e3ebdSchin 			{ /**/ASSERT(0); return -1; }
165da2e3ebdSchin 		else	return vmchktree(t);
166da2e3ebdSchin 	}
167da2e3ebdSchin 	if((t = RIGHT(node)) )
168da2e3ebdSchin 	{	if(SIZE(t) <= SIZE(node) )
169da2e3ebdSchin 			{ /**/ASSERT(0); return -1; }
170da2e3ebdSchin 		else	return vmchktree(t);
171da2e3ebdSchin 	}
172da2e3ebdSchin 
173da2e3ebdSchin 	return 0;
174da2e3ebdSchin }
175da2e3ebdSchin 
176da2e3ebdSchin #if __STD_C
_vmbestcheck(Vmdata_t * vd,Block_t * freeb)177da2e3ebdSchin int _vmbestcheck(Vmdata_t* vd, Block_t* freeb)
178da2e3ebdSchin #else
179da2e3ebdSchin int _vmbestcheck(vd, freeb)
180da2e3ebdSchin Vmdata_t*	vd;
181da2e3ebdSchin Block_t*	freeb; /* known to be free but not on any free list */
182da2e3ebdSchin #endif
183da2e3ebdSchin {
184da2e3ebdSchin 	reg Seg_t	*seg;
185da2e3ebdSchin 	reg Block_t	*b, *endb, *nextb;
186da2e3ebdSchin 	int		rv = 0;
187da2e3ebdSchin 
188da2e3ebdSchin 	if(!CHECK())
189da2e3ebdSchin 		return 0;
190da2e3ebdSchin 
191da2e3ebdSchin 	/* make sure the free tree is still in shape */
192da2e3ebdSchin 	if(vd->root && vmchktree(vd->root) < 0 )
193da2e3ebdSchin 		{ rv = -1; /**/ASSERT(0); }
194da2e3ebdSchin 
195da2e3ebdSchin 	for(seg = vd->seg; seg && rv == 0; seg = seg->next)
196da2e3ebdSchin 	{	b = SEGBLOCK(seg);
197da2e3ebdSchin 		endb = (Block_t*)(seg->baddr - sizeof(Head_t));
198da2e3ebdSchin 		for(; b < endb && rv == 0; b = nextb)
199da2e3ebdSchin 		{	nextb = (Block_t*)((Vmuchar_t*)DATA(b) + (SIZE(b)&~BITS) );
200da2e3ebdSchin 
201da2e3ebdSchin 			if(!ISBUSY(SIZE(b)) ) /* a completely free block */
202da2e3ebdSchin 			{	/* there should be no marked bits of any type */
203da2e3ebdSchin 				if(SIZE(b) & (BUSY|JUNK|PFREE) )
204da2e3ebdSchin 					{ rv = -1; /**/ASSERT(0); }
205da2e3ebdSchin 
206da2e3ebdSchin 				/* next block must be busy and marked PFREE */
207da2e3ebdSchin 				if(!ISBUSY(SIZE(nextb)) || !ISPFREE(SIZE(nextb)) )
208da2e3ebdSchin 					{ rv = -1; /**/ASSERT(0); }
209da2e3ebdSchin 
210da2e3ebdSchin 				/* must have a self-reference pointer */
211da2e3ebdSchin 				if(*SELF(b) != b)
212da2e3ebdSchin 					{ rv = -1; /**/ASSERT(0); }
213da2e3ebdSchin 
214da2e3ebdSchin 				/* segment pointer should be well-defined */
215da2e3ebdSchin 				if(!TINIEST(b) && SEG(b) != seg)
216da2e3ebdSchin 					{ rv = -1; /**/ASSERT(0); }
217da2e3ebdSchin 
218da2e3ebdSchin 				/* must be on a free list */
219da2e3ebdSchin 				if(b != freeb && !vmisfree(vd, b) )
220da2e3ebdSchin 					{ rv = -1; /**/ASSERT(0); }
221da2e3ebdSchin 			}
222da2e3ebdSchin 			else
223da2e3ebdSchin 			{	/* segment pointer should be well-defined */
224da2e3ebdSchin 				if(SEG(b) != seg)
225da2e3ebdSchin 					{ rv = -1; /**/ASSERT(0); }
226da2e3ebdSchin 
227da2e3ebdSchin 				/* next block should not be marked PFREE */
228da2e3ebdSchin 				if(ISPFREE(SIZE(nextb)) )
229da2e3ebdSchin 					{ rv = -1; /**/ASSERT(0); }
230da2e3ebdSchin 
231da2e3ebdSchin 				/* if PFREE, last block should be free */
232da2e3ebdSchin 				if(ISPFREE(SIZE(b)) && LAST(b) != freeb &&
233da2e3ebdSchin 				   !vmisfree(vd, LAST(b)) )
234da2e3ebdSchin 					{ rv = -1; /**/ASSERT(0); }
235da2e3ebdSchin 
236da2e3ebdSchin 				/* if free but unreclaimed, should be junk */
237da2e3ebdSchin 				if(ISJUNK(SIZE(b)) && !vmisjunk(vd, b))
238da2e3ebdSchin 					{ rv = -1; /**/ASSERT(0); }
239da2e3ebdSchin 			}
240da2e3ebdSchin 		}
241da2e3ebdSchin 	}
242da2e3ebdSchin 
243da2e3ebdSchin 	return rv;
244da2e3ebdSchin }
245da2e3ebdSchin 
246da2e3ebdSchin /* Tree rotation functions */
247da2e3ebdSchin #define RROTATE(x,y)	(LEFT(x) = RIGHT(y), RIGHT(y) = (x), (x) = (y))
248da2e3ebdSchin #define LROTATE(x,y)	(RIGHT(x) = LEFT(y), LEFT(y) = (x), (x) = (y))
249da2e3ebdSchin #define RLINK(s,x)	((s) = LEFT(s) = (x))
250da2e3ebdSchin #define LLINK(s,x)	((s) = RIGHT(s) = (x))
251da2e3ebdSchin 
252da2e3ebdSchin /* Find and delete a suitable element in the free tree. */
253da2e3ebdSchin #if __STD_C
bestsearch(Vmdata_t * vd,reg size_t size,Block_t * wanted)254da2e3ebdSchin static Block_t* bestsearch(Vmdata_t* vd, reg size_t size, Block_t* wanted)
255da2e3ebdSchin #else
256da2e3ebdSchin static Block_t* bestsearch(vd, size, wanted)
257da2e3ebdSchin Vmdata_t*	vd;
258da2e3ebdSchin reg size_t	size;
259da2e3ebdSchin Block_t*	wanted;
260da2e3ebdSchin #endif
261da2e3ebdSchin {
262da2e3ebdSchin 	reg size_t	s;
263da2e3ebdSchin 	reg Block_t	*t, *root, *l, *r;
264da2e3ebdSchin 	Block_t		link;
265da2e3ebdSchin 
266da2e3ebdSchin 	/* extracting a tiniest block from its list */
267da2e3ebdSchin 	if((root = wanted) && size == TINYSIZE)
268da2e3ebdSchin 	{	reg Seg_t*	seg;
269da2e3ebdSchin 
270da2e3ebdSchin 		l = TLEFT(root);
271da2e3ebdSchin 		if((r = LINK(root)) )
272da2e3ebdSchin 			TLEFT(r) = l;
273da2e3ebdSchin 		if(l)
274da2e3ebdSchin 			LINK(l) = r;
275da2e3ebdSchin 		else	TINY(vd)[0] = r;
276da2e3ebdSchin 
277da2e3ebdSchin 		seg = vd->seg;
278da2e3ebdSchin 		if(!seg->next)
279da2e3ebdSchin 			SEG(root) = seg;
280da2e3ebdSchin 		else for(;; seg = seg->next)
281da2e3ebdSchin 		{	if((Vmuchar_t*)root > (Vmuchar_t*)seg->addr &&
282da2e3ebdSchin 			   (Vmuchar_t*)root < seg->baddr)
283da2e3ebdSchin 			{	SEG(root) = seg;
284da2e3ebdSchin 				break;
285da2e3ebdSchin 			}
286da2e3ebdSchin 		}
287da2e3ebdSchin 
288da2e3ebdSchin 		return root;
289da2e3ebdSchin 	}
290da2e3ebdSchin 
291da2e3ebdSchin 	/**/ASSERT(!vd->root || vmchktree(vd->root) == 0);
292da2e3ebdSchin 
293da2e3ebdSchin 	/* find the right one to delete */
294da2e3ebdSchin 	l = r = &link;
295da2e3ebdSchin 	if((root = vd->root) ) do
296da2e3ebdSchin 	{	/**/ ASSERT(!ISBITS(size) && !ISBITS(SIZE(root)));
297da2e3ebdSchin 		if(size == (s = SIZE(root)) )
298da2e3ebdSchin 			break;
299da2e3ebdSchin 		if(size < s)
300da2e3ebdSchin 		{	if((t = LEFT(root)) )
301da2e3ebdSchin 			{	if(size <= (s = SIZE(t)) )
302da2e3ebdSchin 				{	RROTATE(root,t);
303da2e3ebdSchin 					if(size == s)
304da2e3ebdSchin 						break;
305da2e3ebdSchin 					t = LEFT(root);
306da2e3ebdSchin 				}
307da2e3ebdSchin 				else
308da2e3ebdSchin 				{	LLINK(l,t);
309da2e3ebdSchin 					t = RIGHT(t);
310da2e3ebdSchin 				}
311da2e3ebdSchin 			}
312da2e3ebdSchin 			RLINK(r,root);
313da2e3ebdSchin 		}
314da2e3ebdSchin 		else
315da2e3ebdSchin 		{	if((t = RIGHT(root)) )
316da2e3ebdSchin 			{	if(size >= (s = SIZE(t)) )
317da2e3ebdSchin 				{	LROTATE(root,t);
318da2e3ebdSchin 					if(size == s)
319da2e3ebdSchin 						break;
320da2e3ebdSchin 					t = RIGHT(root);
321da2e3ebdSchin 				}
322da2e3ebdSchin 				else
323da2e3ebdSchin 				{	RLINK(r,t);
324da2e3ebdSchin 					t = LEFT(t);
325da2e3ebdSchin 				}
326da2e3ebdSchin 			}
327da2e3ebdSchin 			LLINK(l,root);
328da2e3ebdSchin 		}
329da2e3ebdSchin 		/**/ ASSERT(root != t);
330da2e3ebdSchin 	} while((root = t) );
331da2e3ebdSchin 
332da2e3ebdSchin 	if(root)	/* found it, now isolate it */
333da2e3ebdSchin 	{	RIGHT(l) = LEFT(root);
334da2e3ebdSchin 		LEFT(r) = RIGHT(root);
335da2e3ebdSchin 	}
336da2e3ebdSchin 	else		/* nothing exactly fit	*/
337da2e3ebdSchin 	{	LEFT(r) = NIL(Block_t*);
338da2e3ebdSchin 		RIGHT(l) = NIL(Block_t*);
339da2e3ebdSchin 
340da2e3ebdSchin 		/* grab the least one from the right tree */
341da2e3ebdSchin 		if((root = LEFT(&link)) )
342da2e3ebdSchin 		{	while((t = LEFT(root)) )
343da2e3ebdSchin 				RROTATE(root,t);
344da2e3ebdSchin 			LEFT(&link) = RIGHT(root);
345da2e3ebdSchin 		}
346da2e3ebdSchin 	}
347da2e3ebdSchin 
348da2e3ebdSchin 	if(root && (r = LINK(root)) )
349da2e3ebdSchin 	{	/* head of a link list, use next one for root */
350da2e3ebdSchin 		LEFT(r) = RIGHT(&link);
351da2e3ebdSchin 		RIGHT(r) = LEFT(&link);
352da2e3ebdSchin 	}
353da2e3ebdSchin 	else if(!(r = LEFT(&link)) )
354da2e3ebdSchin 		r = RIGHT(&link);
355da2e3ebdSchin 	else /* graft left tree to right tree */
356da2e3ebdSchin 	{	while((t = LEFT(r)) )
357da2e3ebdSchin 			RROTATE(r,t);
358da2e3ebdSchin 		LEFT(r) = RIGHT(&link);
359da2e3ebdSchin 	}
360da2e3ebdSchin 	vd->root = r; /**/ASSERT(!r || !ISBITS(SIZE(r)));
361da2e3ebdSchin 
362da2e3ebdSchin 	/**/ASSERT(!vd->root || vmchktree(vd->root) == 0);
363da2e3ebdSchin 	/**/ASSERT(!wanted || wanted == root);
364da2e3ebdSchin 
365da2e3ebdSchin 	return root;
366da2e3ebdSchin }
367da2e3ebdSchin 
368da2e3ebdSchin /* Reclaim all delayed free blocks into the free tree */
369da2e3ebdSchin #if __STD_C
bestreclaim(reg Vmdata_t * vd,Block_t * wanted,int c)370da2e3ebdSchin static int bestreclaim(reg Vmdata_t* vd, Block_t* wanted, int c)
371da2e3ebdSchin #else
372da2e3ebdSchin static int bestreclaim(vd, wanted, c)
373da2e3ebdSchin reg Vmdata_t*	vd;
374da2e3ebdSchin Block_t*	wanted;
375da2e3ebdSchin int		c;
376da2e3ebdSchin #endif
377da2e3ebdSchin {
378da2e3ebdSchin 	reg size_t	size, s;
379da2e3ebdSchin 	reg Block_t	*fp, *np, *t, *list;
380da2e3ebdSchin 	reg int		n, saw_wanted;
381da2e3ebdSchin 	reg Seg_t	*seg;
382da2e3ebdSchin 
383da2e3ebdSchin 	/**/COUNT(N_reclaim);
384da2e3ebdSchin 	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
385da2e3ebdSchin 
386da2e3ebdSchin 	if((fp = vd->free) )
387da2e3ebdSchin 	{	LINK(fp) = CACHE(vd)[S_CACHE]; CACHE(vd)[S_CACHE] = fp;
388da2e3ebdSchin 		vd->free = NIL(Block_t*);
389da2e3ebdSchin 	}
390da2e3ebdSchin 
391da2e3ebdSchin 	saw_wanted = wanted ? 0 : 1;
392da2e3ebdSchin 	for(n = S_CACHE; n >= c; --n)
393da2e3ebdSchin 	{	list = CACHE(vd)[n]; CACHE(vd)[n] = NIL(Block_t*);
394da2e3ebdSchin 		while((fp = list) )
395da2e3ebdSchin 		{	/* Note that below here we allow ISJUNK blocks to be
396da2e3ebdSchin 			** forward-merged even though they are not removed from
397da2e3ebdSchin 			** the list immediately. In this way, the list is
398da2e3ebdSchin 			** scanned only once. It works because the LINK and SIZE
399da2e3ebdSchin 			** fields are not destroyed during the merging. This can
400da2e3ebdSchin 			** be seen by observing that a tiniest block has a 2-word
401da2e3ebdSchin 			** header and a 2-word body. Merging a tiniest block
402da2e3ebdSchin 			** (1seg) and the next block (2seg) looks like this:
403da2e3ebdSchin 			**	1seg  size  link  left  2seg size link left ....
404da2e3ebdSchin 			**	1seg  size  link  left  rite xxxx xxxx .... self
405da2e3ebdSchin 			** After the merge, the 2seg word is replaced by the RIGHT
406da2e3ebdSchin 			** pointer of the new block and somewhere beyond the
407da2e3ebdSchin 			** two xxxx fields, the SELF pointer will replace some
408da2e3ebdSchin 			** other word. The important part is that the two xxxx
409da2e3ebdSchin 			** fields are kept intact.
410da2e3ebdSchin 			*/
411da2e3ebdSchin 			list = LINK(list); /**/ASSERT(!vmonlist(list,fp));
412da2e3ebdSchin 
413da2e3ebdSchin 			size = SIZE(fp);
414da2e3ebdSchin 			if(!ISJUNK(size))	/* already done */
415da2e3ebdSchin 				continue;
416da2e3ebdSchin 
417da2e3ebdSchin 			if(_Vmassert & VM_region)
418da2e3ebdSchin 			{	/* see if this address is from region */
419da2e3ebdSchin 				for(seg = vd->seg; seg; seg = seg->next)
420da2e3ebdSchin 					if(fp >= SEGBLOCK(seg) && fp < (Block_t*)seg->baddr )
421da2e3ebdSchin 						break;
422da2e3ebdSchin 				if(!seg) /* must be a bug in application code! */
423da2e3ebdSchin 				{	/**/ ASSERT(seg != NIL(Seg_t*));
424da2e3ebdSchin 					continue;
425da2e3ebdSchin 				}
426da2e3ebdSchin 			}
427da2e3ebdSchin 
428da2e3ebdSchin 			if(ISPFREE(size))	/* backward merge */
429da2e3ebdSchin 			{	fp = LAST(fp);
430*3e14f97fSRoger A. Faulkner #if _BLD_posix
431*3e14f97fSRoger A. Faulkner 				if (fp < (Block_t*)0x00120000)
432*3e14f97fSRoger A. Faulkner 				{
433*3e14f97fSRoger A. Faulkner 					logmsg(0, "bestreclaim fp=%p", fp);
434*3e14f97fSRoger A. Faulkner 					ASSERT(!fp);
435*3e14f97fSRoger A. Faulkner 				}
436*3e14f97fSRoger A. Faulkner #endif
437da2e3ebdSchin 				s = SIZE(fp); /**/ASSERT(!(s&BITS));
438da2e3ebdSchin 				REMOVE(vd,fp,INDEX(s),t,bestsearch);
439da2e3ebdSchin 				size = (size&~BITS) + s + sizeof(Head_t);
440da2e3ebdSchin 			}
441da2e3ebdSchin 			else	size &= ~BITS;
442da2e3ebdSchin 
443da2e3ebdSchin 			for(;;)	/* forward merge */
444da2e3ebdSchin 			{	np = (Block_t*)((Vmuchar_t*)fp+size+sizeof(Head_t));
445*3e14f97fSRoger A. Faulkner #if _BLD_posix
446*3e14f97fSRoger A. Faulkner 				if (np < (Block_t*)0x00120000)
447*3e14f97fSRoger A. Faulkner 				{
448*3e14f97fSRoger A. Faulkner 					logmsg(0, "bestreclaim np=%p", np);
449*3e14f97fSRoger A. Faulkner 					ASSERT(!np);
450*3e14f97fSRoger A. Faulkner 				}
451*3e14f97fSRoger A. Faulkner #endif
452da2e3ebdSchin 				s = SIZE(np);	/**/ASSERT(s > 0);
453da2e3ebdSchin 				if(!ISBUSY(s))
454da2e3ebdSchin 				{	/**/ASSERT((s&BITS) == 0);
455da2e3ebdSchin 					if(np == vd->wild)
456da2e3ebdSchin 						vd->wild = NIL(Block_t*);
457da2e3ebdSchin 					else	REMOVE(vd,np,INDEX(s),t,bestsearch);
458da2e3ebdSchin 				}
459da2e3ebdSchin 				else if(ISJUNK(s))
460da2e3ebdSchin 				{	/* reclaim any touched junk list */
461da2e3ebdSchin 					if((int)C_INDEX(s) < c)
462da2e3ebdSchin 						c = C_INDEX(s);
463da2e3ebdSchin 					SIZE(np) = 0;
464da2e3ebdSchin 					CLRBITS(s);
465da2e3ebdSchin 				}
466da2e3ebdSchin 				else	break;
467da2e3ebdSchin 				size += s + sizeof(Head_t);
468da2e3ebdSchin 			}
469da2e3ebdSchin 			SIZE(fp) = size;
470da2e3ebdSchin 
471da2e3ebdSchin 			/* tell next block that this one is free */
472da2e3ebdSchin 			np = NEXT(fp);	/**/ASSERT(ISBUSY(SIZE(np)));
473da2e3ebdSchin 					/**/ASSERT(!ISJUNK(SIZE(np)));
474da2e3ebdSchin 			SETPFREE(SIZE(np));
475da2e3ebdSchin 			*(SELF(fp)) = fp;
476da2e3ebdSchin 
477da2e3ebdSchin 			if(fp == wanted) /* to be consumed soon */
478da2e3ebdSchin 			{	/**/ASSERT(!saw_wanted); /* should be seen just once */
479da2e3ebdSchin 				saw_wanted = 1;
480da2e3ebdSchin 				continue;
481da2e3ebdSchin 			}
482da2e3ebdSchin 
483da2e3ebdSchin 			/* wilderness preservation */
484da2e3ebdSchin 			if(np->body.data >= vd->seg->baddr)
485da2e3ebdSchin 			{	vd->wild = fp;
486da2e3ebdSchin 				continue;
487da2e3ebdSchin 			}
488da2e3ebdSchin 
489da2e3ebdSchin 			/* tiny block goes to tiny list */
490da2e3ebdSchin 			if(size < MAXTINY)
491da2e3ebdSchin 			{	s = INDEX(size);
492da2e3ebdSchin 				np = LINK(fp) = TINY(vd)[s];
493da2e3ebdSchin 				if(s == 0)	/* TINIEST block */
494da2e3ebdSchin 				{	if(np)
495da2e3ebdSchin 						TLEFT(np) = fp;
496da2e3ebdSchin 					TLEFT(fp) = NIL(Block_t*);
497da2e3ebdSchin 				}
498da2e3ebdSchin 				else
499da2e3ebdSchin 				{	if(np)
500da2e3ebdSchin 						LEFT(np)  = fp;
501da2e3ebdSchin 					LEFT(fp) = NIL(Block_t*);
502da2e3ebdSchin 					SETLINK(fp);
503da2e3ebdSchin 				}
504da2e3ebdSchin 				TINY(vd)[s] = fp;
505da2e3ebdSchin 				continue;
506da2e3ebdSchin 			}
507da2e3ebdSchin 
508da2e3ebdSchin 			LEFT(fp) = RIGHT(fp) = LINK(fp) = NIL(Block_t*);
509da2e3ebdSchin 			if(!(np = vd->root) )	/* inserting into an empty tree	*/
510da2e3ebdSchin 			{	vd->root = fp;
511da2e3ebdSchin 				continue;
512da2e3ebdSchin 			}
513da2e3ebdSchin 
514da2e3ebdSchin 			size = SIZE(fp);
515da2e3ebdSchin 			while(1)	/* leaf insertion */
516da2e3ebdSchin 			{	/**/ASSERT(np != fp);
517da2e3ebdSchin 				if((s = SIZE(np)) > size)
518da2e3ebdSchin 				{	if((t = LEFT(np)) )
519da2e3ebdSchin 					{	/**/ ASSERT(np != t);
520da2e3ebdSchin 						np = t;
521da2e3ebdSchin 					}
522da2e3ebdSchin 					else
523da2e3ebdSchin 					{	LEFT(np) = fp;
524da2e3ebdSchin 						break;
525da2e3ebdSchin 					}
526da2e3ebdSchin 				}
527da2e3ebdSchin 				else if(s < size)
528da2e3ebdSchin 				{	if((t = RIGHT(np)) )
529da2e3ebdSchin 					{	/**/ ASSERT(np != t);
530da2e3ebdSchin 						np = t;
531da2e3ebdSchin 					}
532da2e3ebdSchin 					else
533da2e3ebdSchin 					{	RIGHT(np) = fp;
534da2e3ebdSchin 						break;
535da2e3ebdSchin 					}
536da2e3ebdSchin 				}
537da2e3ebdSchin 				else /* s == size */
538da2e3ebdSchin 				{	if((t = LINK(np)) )
539da2e3ebdSchin 					{	LINK(fp) = t;
540da2e3ebdSchin 						LEFT(t) = fp;
541da2e3ebdSchin 					}
542da2e3ebdSchin 					LINK(np) = fp;
543da2e3ebdSchin 					LEFT(fp) = np;
544da2e3ebdSchin 					SETLINK(fp);
545da2e3ebdSchin 					break;
546da2e3ebdSchin 				}
547da2e3ebdSchin 			}
548da2e3ebdSchin 		}
549da2e3ebdSchin 	}
550da2e3ebdSchin 
551da2e3ebdSchin 	/**/ASSERT(!wanted || saw_wanted == 1);
552da2e3ebdSchin 	/**/ASSERT(_vmbestcheck(vd, wanted) == 0);
553da2e3ebdSchin 	return saw_wanted;
554da2e3ebdSchin }
555da2e3ebdSchin 
556da2e3ebdSchin #if __STD_C
bestcompact(Vmalloc_t * vm)557da2e3ebdSchin static int bestcompact(Vmalloc_t* vm)
558da2e3ebdSchin #else
559da2e3ebdSchin static int bestcompact(vm)
560da2e3ebdSchin Vmalloc_t*	vm;
561da2e3ebdSchin #endif
562da2e3ebdSchin {
563da2e3ebdSchin 	reg Seg_t	*seg, *next;
564da2e3ebdSchin 	reg Block_t	*bp, *t;
565da2e3ebdSchin 	reg size_t	size, segsize, round;
5667c2fbfb3SApril Chin 	reg int		local, inuse;
567da2e3ebdSchin 	reg Vmdata_t*	vd = vm->data;
568da2e3ebdSchin 
5697c2fbfb3SApril Chin 	SETINUSE(vd, inuse);
5707c2fbfb3SApril Chin 
571da2e3ebdSchin 	if(!(local = vd->mode&VM_TRUST) )
572da2e3ebdSchin 	{	GETLOCAL(vd,local);
573da2e3ebdSchin 		if(ISLOCK(vd,local))
5747c2fbfb3SApril Chin 		{	CLRINUSE(vd, inuse);
575da2e3ebdSchin 			return -1;
5767c2fbfb3SApril Chin 		}
577da2e3ebdSchin 		SETLOCK(vd,local);
578da2e3ebdSchin 	}
579da2e3ebdSchin 
580da2e3ebdSchin 	bestreclaim(vd,NIL(Block_t*),0);
581da2e3ebdSchin 
582da2e3ebdSchin 	for(seg = vd->seg; seg; seg = next)
583da2e3ebdSchin 	{	next = seg->next;
584da2e3ebdSchin 
585da2e3ebdSchin 		bp = BLOCK(seg->baddr);
586da2e3ebdSchin 		if(!ISPFREE(SIZE(bp)) )
587da2e3ebdSchin 			continue;
588da2e3ebdSchin 
589da2e3ebdSchin 		bp = LAST(bp);	/**/ASSERT(vmisfree(vd,bp));
590da2e3ebdSchin 		size = SIZE(bp);
591da2e3ebdSchin 		if(bp == vd->wild)
592da2e3ebdSchin 		{	/* During large block allocations, _Vmextend might
593da2e3ebdSchin 			** have been enlarged the rounding factor. Reducing
594da2e3ebdSchin 			** it a bit help avoiding getting large raw memory.
595da2e3ebdSchin 			*/
596da2e3ebdSchin 			if((round = vm->disc->round) == 0)
597da2e3ebdSchin 				round = _Vmpagesize;
598da2e3ebdSchin 			if(size > COMPACT*vd->incr && vd->incr > round)
599da2e3ebdSchin 				vd->incr /= 2;
600da2e3ebdSchin 
601da2e3ebdSchin 			/* for the bottom segment, we don't necessarily want
602da2e3ebdSchin 			** to return raw memory too early. vd->pool has an
603da2e3ebdSchin 			** approximation of the average size of recently freed
604da2e3ebdSchin 			** blocks. If this is large, the application is managing
605da2e3ebdSchin 			** large blocks so we throttle back memory chopping
606da2e3ebdSchin 			** to avoid thrashing the underlying memory system.
607da2e3ebdSchin 			*/
608da2e3ebdSchin 			if(size <= COMPACT*vd->incr || size <= COMPACT*vd->pool)
609da2e3ebdSchin 				continue;
610da2e3ebdSchin 
611da2e3ebdSchin 			vd->wild = NIL(Block_t*);
612da2e3ebdSchin 			vd->pool = 0;
613da2e3ebdSchin 		}
614da2e3ebdSchin 		else	REMOVE(vd,bp,INDEX(size),t,bestsearch);
615da2e3ebdSchin 		CLRPFREE(SIZE(NEXT(bp)));
616da2e3ebdSchin 
617da2e3ebdSchin 		if(size < (segsize = seg->size))
618da2e3ebdSchin 			size += sizeof(Head_t);
619da2e3ebdSchin 
620da2e3ebdSchin 		if((size = (*_Vmtruncate)(vm,seg,size,0)) > 0)
621da2e3ebdSchin 		{	if(size >= segsize) /* entire segment deleted */
622da2e3ebdSchin 				continue;
623da2e3ebdSchin 			/**/ASSERT(SEG(BLOCK(seg->baddr)) == seg);
624da2e3ebdSchin 
625da2e3ebdSchin 			if((size = (seg->baddr - ((Vmuchar_t*)bp) - sizeof(Head_t))) > 0)
626da2e3ebdSchin 				SIZE(bp) = size - sizeof(Head_t);
627da2e3ebdSchin 			else	bp = NIL(Block_t*);
628da2e3ebdSchin 		}
629da2e3ebdSchin 
630da2e3ebdSchin 		if(bp)
631da2e3ebdSchin 		{	/**/ ASSERT(SIZE(bp) >= BODYSIZE);
632da2e3ebdSchin 			/**/ ASSERT(SEGWILD(bp));
633da2e3ebdSchin 			/**/ ASSERT(!vd->root || !vmintree(vd->root,bp));
634da2e3ebdSchin 			SIZE(bp) |= BUSY|JUNK;
635da2e3ebdSchin 			LINK(bp) = CACHE(vd)[C_INDEX(SIZE(bp))];
636da2e3ebdSchin 			CACHE(vd)[C_INDEX(SIZE(bp))] = bp;
637da2e3ebdSchin 		}
638da2e3ebdSchin 	}
639da2e3ebdSchin 
640da2e3ebdSchin 	if(!local && _Vmtrace && (vd->mode&VM_TRACE) && VMETHOD(vd) == VM_MTBEST)
641da2e3ebdSchin 		(*_Vmtrace)(vm, (Vmuchar_t*)0, (Vmuchar_t*)0, 0, 0);
642da2e3ebdSchin 
643da2e3ebdSchin 	CLRLOCK(vd,local); /**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
644da2e3ebdSchin 
6457c2fbfb3SApril Chin 	CLRINUSE(vd, inuse);
646da2e3ebdSchin 	return 0;
647da2e3ebdSchin }
648da2e3ebdSchin 
649da2e3ebdSchin #if __STD_C
bestalloc(Vmalloc_t * vm,reg size_t size)650da2e3ebdSchin static Void_t* bestalloc(Vmalloc_t* vm, reg size_t size )
651da2e3ebdSchin #else
652da2e3ebdSchin static Void_t* bestalloc(vm,size)
653da2e3ebdSchin Vmalloc_t*	vm;	/* region allocating from	*/
654da2e3ebdSchin reg size_t	size;	/* desired block size		*/
655da2e3ebdSchin #endif
656da2e3ebdSchin {
657da2e3ebdSchin 	reg Vmdata_t*	vd = vm->data;
658da2e3ebdSchin 	reg size_t	s;
659da2e3ebdSchin 	reg int		n;
660da2e3ebdSchin 	reg Block_t	*tp, *np;
6617c2fbfb3SApril Chin 	reg int		local, inuse;
662da2e3ebdSchin 	size_t		orgsize = 0;
663da2e3ebdSchin 
664*3e14f97fSRoger A. Faulkner 	VMOPTIONS();
665*3e14f97fSRoger A. Faulkner 
666da2e3ebdSchin 	/**/COUNT(N_alloc);
667da2e3ebdSchin 
6687c2fbfb3SApril Chin 	SETINUSE(vd, inuse);
6697c2fbfb3SApril Chin 
670da2e3ebdSchin 	if(!(local = vd->mode&VM_TRUST))
671da2e3ebdSchin 	{	GETLOCAL(vd,local);	/**/ASSERT(!ISLOCK(vd,local));
672da2e3ebdSchin 		if(ISLOCK(vd,local) )
6737c2fbfb3SApril Chin 		{	CLRINUSE(vd, inuse);
674da2e3ebdSchin 			return NIL(Void_t*);
6757c2fbfb3SApril Chin 		}
676da2e3ebdSchin 		SETLOCK(vd,local);
677da2e3ebdSchin 		orgsize = size;
678da2e3ebdSchin 	}
679da2e3ebdSchin 
680da2e3ebdSchin 	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
681da2e3ebdSchin 	/**/ ASSERT(HEADSIZE == sizeof(Head_t));
682da2e3ebdSchin 	/**/ ASSERT(BODYSIZE == sizeof(Body_t));
683da2e3ebdSchin 	/**/ ASSERT((ALIGN%(BITS+1)) == 0 );
684da2e3ebdSchin 	/**/ ASSERT((sizeof(Head_t)%ALIGN) == 0 );
685da2e3ebdSchin 	/**/ ASSERT((sizeof(Body_t)%ALIGN) == 0 );
686da2e3ebdSchin 	/**/ ASSERT((BODYSIZE%ALIGN) == 0 );
687da2e3ebdSchin 	/**/ ASSERT(sizeof(Block_t) == (sizeof(Body_t)+sizeof(Head_t)) );
688da2e3ebdSchin 
689da2e3ebdSchin 	/* for ANSI requirement that malloc(0) returns non-NULL pointer */
690da2e3ebdSchin 	size = size <= BODYSIZE ? BODYSIZE : ROUND(size,ALIGN);
691da2e3ebdSchin 
692da2e3ebdSchin 	if((tp = vd->free) )	/* reuse last free piece if appropriate */
693da2e3ebdSchin 	{	/**/ASSERT(ISBUSY(SIZE(tp)) );
694da2e3ebdSchin 		/**/ASSERT(ISJUNK(SIZE(tp)) );
695da2e3ebdSchin 		/**/COUNT(N_last);
696da2e3ebdSchin 
697da2e3ebdSchin 		vd->free = NIL(Block_t*);
698da2e3ebdSchin 		if((s = SIZE(tp)) >= size && s < (size << 1) )
699da2e3ebdSchin 		{	if(s >= size + (sizeof(Head_t)+BODYSIZE) )
700da2e3ebdSchin 			{	SIZE(tp) = size;
701da2e3ebdSchin 				np = NEXT(tp);
702da2e3ebdSchin 				SEG(np) = SEG(tp);
703da2e3ebdSchin 				SIZE(np) = ((s&~BITS) - (size+sizeof(Head_t)))|JUNK|BUSY;
704da2e3ebdSchin 				vd->free = np;
705da2e3ebdSchin 				SIZE(tp) |= s&BITS;
706da2e3ebdSchin 			}
707da2e3ebdSchin 			CLRJUNK(SIZE(tp));
708da2e3ebdSchin 			goto done;
709da2e3ebdSchin 		}
710da2e3ebdSchin 
711da2e3ebdSchin 		LINK(tp) = CACHE(vd)[S_CACHE];
712da2e3ebdSchin 		CACHE(vd)[S_CACHE] = tp;
713da2e3ebdSchin 	}
714da2e3ebdSchin 
715da2e3ebdSchin 	for(;;)
716da2e3ebdSchin 	{	for(n = S_CACHE; n >= 0; --n)	/* best-fit except for coalescing */
717da2e3ebdSchin 		{	bestreclaim(vd,NIL(Block_t*),n);
718da2e3ebdSchin 			if(vd->root && (tp = bestsearch(vd,size,NIL(Block_t*))) )
719da2e3ebdSchin 				goto got_block;
720da2e3ebdSchin 		}
721da2e3ebdSchin 
722da2e3ebdSchin 		/**/ASSERT(!vd->free);
723da2e3ebdSchin 		if((tp = vd->wild) && SIZE(tp) >= size)
724da2e3ebdSchin 		{	/**/COUNT(N_wild);
725da2e3ebdSchin 			vd->wild = NIL(Block_t*);
726da2e3ebdSchin 			goto got_block;
727da2e3ebdSchin 		}
728da2e3ebdSchin 
729da2e3ebdSchin 		KPVCOMPACT(vm,bestcompact);
730da2e3ebdSchin 		if((tp = (*_Vmextend)(vm,size,bestsearch)) )
731da2e3ebdSchin 			goto got_block;
732da2e3ebdSchin 		else if(vd->mode&VM_AGAIN)
733da2e3ebdSchin 			vd->mode &= ~VM_AGAIN;
734da2e3ebdSchin 		else
735da2e3ebdSchin 		{	CLRLOCK(vd,local);
7367c2fbfb3SApril Chin 			CLRINUSE(vd, inuse);
737da2e3ebdSchin 			return NIL(Void_t*);
738da2e3ebdSchin 		}
739da2e3ebdSchin 	}
740da2e3ebdSchin 
741da2e3ebdSchin got_block:
742da2e3ebdSchin 	/**/ ASSERT(!ISBITS(SIZE(tp)));
743da2e3ebdSchin 	/**/ ASSERT(SIZE(tp) >= size);
744da2e3ebdSchin 	/**/ ASSERT((SIZE(tp)%ALIGN) == 0);
745da2e3ebdSchin 	/**/ ASSERT(!vd->free);
746da2e3ebdSchin 
747da2e3ebdSchin 	/* tell next block that we are no longer a free block */
748da2e3ebdSchin 	CLRPFREE(SIZE(NEXT(tp)));	/**/ ASSERT(ISBUSY(SIZE(NEXT(tp))));
749da2e3ebdSchin 
750da2e3ebdSchin 	if((s = SIZE(tp)-size) >= (sizeof(Head_t)+BODYSIZE) )
751da2e3ebdSchin 	{	SIZE(tp) = size;
752da2e3ebdSchin 
753da2e3ebdSchin 		np = NEXT(tp);
754da2e3ebdSchin 		SEG(np) = SEG(tp);
755da2e3ebdSchin 		SIZE(np) = (s - sizeof(Head_t)) | BUSY|JUNK;
756da2e3ebdSchin 
757da2e3ebdSchin 		if(VMWILD(vd,np))
758da2e3ebdSchin 		{	SIZE(np) &= ~BITS;
759da2e3ebdSchin 			*SELF(np) = np; /**/ASSERT(ISBUSY(SIZE(NEXT(np))));
760da2e3ebdSchin 			SETPFREE(SIZE(NEXT(np)));
761da2e3ebdSchin 			vd->wild = np;
762da2e3ebdSchin 		}
763da2e3ebdSchin 		else	vd->free = np;
764da2e3ebdSchin 	}
765da2e3ebdSchin 
766da2e3ebdSchin 	SETBUSY(SIZE(tp));
767da2e3ebdSchin 
768da2e3ebdSchin done:
769da2e3ebdSchin 	if(!local && (vd->mode&VM_TRACE) && _Vmtrace && VMETHOD(vd) == VM_MTBEST)
770da2e3ebdSchin 		(*_Vmtrace)(vm,NIL(Vmuchar_t*),(Vmuchar_t*)DATA(tp),orgsize,0);
771da2e3ebdSchin 
772da2e3ebdSchin 	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
773da2e3ebdSchin 	CLRLOCK(vd,local);
774da2e3ebdSchin 	ANNOUNCE(local, vm, VM_ALLOC, DATA(tp), vm->disc);
775da2e3ebdSchin 
7767c2fbfb3SApril Chin 	CLRINUSE(vd, inuse);
777da2e3ebdSchin 	return DATA(tp);
778da2e3ebdSchin }
779da2e3ebdSchin 
780da2e3ebdSchin #if __STD_C
bestaddr(Vmalloc_t * vm,Void_t * addr)781da2e3ebdSchin static long bestaddr(Vmalloc_t* vm, Void_t* addr )
782da2e3ebdSchin #else
783da2e3ebdSchin static long bestaddr(vm, addr)
784da2e3ebdSchin Vmalloc_t*	vm;	/* region allocating from	*/
785da2e3ebdSchin Void_t*		addr;	/* address to check		*/
786da2e3ebdSchin #endif
787da2e3ebdSchin {
788da2e3ebdSchin 	reg Seg_t*	seg;
789da2e3ebdSchin 	reg Block_t	*b, *endb;
790da2e3ebdSchin 	reg long	offset;
791da2e3ebdSchin 	reg Vmdata_t*	vd = vm->data;
7927c2fbfb3SApril Chin 	reg int		local, inuse;
7937c2fbfb3SApril Chin 
7947c2fbfb3SApril Chin 	SETINUSE(vd, inuse);
795da2e3ebdSchin 
796da2e3ebdSchin 	if(!(local = vd->mode&VM_TRUST) )
797da2e3ebdSchin 	{	GETLOCAL(vd,local); /**/ASSERT(!ISLOCK(vd,local));
798da2e3ebdSchin 		if(ISLOCK(vd,local))
7997c2fbfb3SApril Chin 		{	CLRINUSE(vd, inuse);
800da2e3ebdSchin 			return -1L;
8017c2fbfb3SApril Chin 		}
802da2e3ebdSchin 		SETLOCK(vd,local);
803da2e3ebdSchin 	}
804da2e3ebdSchin 
805da2e3ebdSchin 	offset = -1L; b = endb = NIL(Block_t*);
806da2e3ebdSchin 	for(seg = vd->seg; seg; seg = seg->next)
807da2e3ebdSchin 	{	b = SEGBLOCK(seg);
808da2e3ebdSchin 		endb = (Block_t*)(seg->baddr - sizeof(Head_t));
809da2e3ebdSchin 		if((Vmuchar_t*)addr > (Vmuchar_t*)b &&
810da2e3ebdSchin 		   (Vmuchar_t*)addr < (Vmuchar_t*)endb)
811da2e3ebdSchin 			break;
812da2e3ebdSchin 	}
813da2e3ebdSchin 
814da2e3ebdSchin 	if(local && !(vd->mode&VM_TRUST) ) /* from bestfree or bestresize */
815da2e3ebdSchin 	{	b = BLOCK(addr);
816da2e3ebdSchin 		if(seg && SEG(b) == seg && ISBUSY(SIZE(b)) && !ISJUNK(SIZE(b)) )
817da2e3ebdSchin 			offset = 0;
818da2e3ebdSchin 		if(offset != 0 && vm->disc->exceptf)
819da2e3ebdSchin 			(void)(*vm->disc->exceptf)(vm,VM_BADADDR,addr,vm->disc);
820da2e3ebdSchin 	}
821da2e3ebdSchin 	else if(seg)
822da2e3ebdSchin 	{	while(b < endb)
823da2e3ebdSchin 		{	reg Vmuchar_t*	data = (Vmuchar_t*)DATA(b);
824da2e3ebdSchin 			reg size_t	size = SIZE(b)&~BITS;
825da2e3ebdSchin 
826da2e3ebdSchin 			if((Vmuchar_t*)addr >= data && (Vmuchar_t*)addr < data+size)
827da2e3ebdSchin 			{	if(ISJUNK(SIZE(b)) || !ISBUSY(SIZE(b)))
828da2e3ebdSchin 					offset = -1L;
829da2e3ebdSchin 				else	offset = (Vmuchar_t*)addr - data;
830da2e3ebdSchin 				goto done;
831da2e3ebdSchin 			}
832da2e3ebdSchin 
833da2e3ebdSchin 			b = (Block_t*)((Vmuchar_t*)DATA(b) + size);
834da2e3ebdSchin 		}
835da2e3ebdSchin 	}
836da2e3ebdSchin 
837da2e3ebdSchin done:
838da2e3ebdSchin 	CLRLOCK(vd,local);
8397c2fbfb3SApril Chin 	CLRINUSE(vd, inuse);
840da2e3ebdSchin 	return offset;
841da2e3ebdSchin }
842da2e3ebdSchin 
843da2e3ebdSchin #if __STD_C
bestfree(Vmalloc_t * vm,Void_t * data)844da2e3ebdSchin static int bestfree(Vmalloc_t* vm, Void_t* data )
845da2e3ebdSchin #else
846da2e3ebdSchin static int bestfree(vm, data )
847da2e3ebdSchin Vmalloc_t*	vm;
848da2e3ebdSchin Void_t*		data;
849da2e3ebdSchin #endif
850da2e3ebdSchin {
851da2e3ebdSchin 	reg Vmdata_t*	vd = vm->data;
852da2e3ebdSchin 	reg Block_t	*bp;
853da2e3ebdSchin 	reg size_t	s;
8547c2fbfb3SApril Chin 	reg int		local, inuse;
855da2e3ebdSchin 
856da2e3ebdSchin #ifdef DEBUG
85734f9b3eeSRoland Mainz 	if((local = (int)integralof(data)) >= 0 && local <= 0xf)
858da2e3ebdSchin 	{	int	vmassert = _Vmassert;
859da2e3ebdSchin 		_Vmassert = local ? local : vmassert ? vmassert : (VM_check|VM_abort);
860da2e3ebdSchin 		_vmbestcheck(vd, NIL(Block_t*));
861da2e3ebdSchin 		_Vmassert = local ? local : vmassert;
862da2e3ebdSchin 		return 0;
863da2e3ebdSchin 	}
864da2e3ebdSchin #endif
865da2e3ebdSchin 
866da2e3ebdSchin 	if(!data) /* ANSI-ism */
867da2e3ebdSchin 		return 0;
868da2e3ebdSchin 
869da2e3ebdSchin 	/**/COUNT(N_free);
870da2e3ebdSchin 
8717c2fbfb3SApril Chin 	SETINUSE(vd, inuse);
8727c2fbfb3SApril Chin 
873da2e3ebdSchin 	if(!(local = vd->mode&VM_TRUST) )
874da2e3ebdSchin 	{	GETLOCAL(vd,local);	/**/ASSERT(!ISLOCK(vd,local));
8757c2fbfb3SApril Chin 		if(ISLOCK(vd,local) || KPVADDR(vm,data,bestaddr) != 0 )
8767c2fbfb3SApril Chin 		{	CLRINUSE(vd, inuse);
877da2e3ebdSchin 			return -1;
8787c2fbfb3SApril Chin 		}
879da2e3ebdSchin 		SETLOCK(vd,local);
880da2e3ebdSchin 	}
881da2e3ebdSchin 
882da2e3ebdSchin 	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
883da2e3ebdSchin 	bp = BLOCK(data); s = SIZE(bp);
884da2e3ebdSchin 
885da2e3ebdSchin 	/* Keep an approximate average free block size.
886da2e3ebdSchin 	** This is used in bestcompact() to decide when to release
887da2e3ebdSchin 	** raw memory back to the underlying memory system.
888da2e3ebdSchin 	*/
889da2e3ebdSchin 	vd->pool = (vd->pool + (s&~BITS))/2;
890da2e3ebdSchin 
891da2e3ebdSchin 	if(ISBUSY(s) && !ISJUNK(s))
892da2e3ebdSchin 	{	SETJUNK(SIZE(bp));
893da2e3ebdSchin 	        if(s < MAXCACHE)
894da2e3ebdSchin 	        {       /**/ASSERT(!vmonlist(CACHE(vd)[INDEX(s)], bp) );
895da2e3ebdSchin 	                LINK(bp) = CACHE(vd)[INDEX(s)];
896da2e3ebdSchin 	                CACHE(vd)[INDEX(s)] = bp;
897da2e3ebdSchin 	        }
898da2e3ebdSchin 	        else if(!vd->free)
899da2e3ebdSchin 	                vd->free = bp;
900da2e3ebdSchin 	        else
901da2e3ebdSchin 	        {       /**/ASSERT(!vmonlist(CACHE(vd)[S_CACHE], bp) );
902da2e3ebdSchin 	                LINK(bp) = CACHE(vd)[S_CACHE];
903da2e3ebdSchin 	                CACHE(vd)[S_CACHE] = bp;
904da2e3ebdSchin 	        }
905da2e3ebdSchin 
906da2e3ebdSchin 		/* coalesce on freeing large blocks to avoid fragmentation */
907da2e3ebdSchin 		if(SIZE(bp) >= 2*vd->incr)
908da2e3ebdSchin 		{	bestreclaim(vd,NIL(Block_t*),0);
909da2e3ebdSchin 			if(vd->wild && SIZE(vd->wild) >= COMPACT*vd->incr)
910da2e3ebdSchin 				KPVCOMPACT(vm,bestcompact);
911da2e3ebdSchin 		}
912da2e3ebdSchin 	}
913da2e3ebdSchin 
914da2e3ebdSchin 	if(!local && _Vmtrace && (vd->mode&VM_TRACE) && VMETHOD(vd) == VM_MTBEST )
915da2e3ebdSchin 		(*_Vmtrace)(vm,(Vmuchar_t*)data,NIL(Vmuchar_t*), (s&~BITS), 0);
916da2e3ebdSchin 
917da2e3ebdSchin 	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
918da2e3ebdSchin 	CLRLOCK(vd,local);
919da2e3ebdSchin 	ANNOUNCE(local, vm, VM_FREE, data, vm->disc);
920da2e3ebdSchin 
9217c2fbfb3SApril Chin 	CLRINUSE(vd, inuse);
922da2e3ebdSchin 	return 0;
923da2e3ebdSchin }
924da2e3ebdSchin 
925da2e3ebdSchin #if __STD_C
bestresize(Vmalloc_t * vm,Void_t * data,reg size_t size,int type)926da2e3ebdSchin static Void_t* bestresize(Vmalloc_t* vm, Void_t* data, reg size_t size, int type)
927da2e3ebdSchin #else
928da2e3ebdSchin static Void_t* bestresize(vm,data,size,type)
929da2e3ebdSchin Vmalloc_t*	vm;		/* region allocating from	*/
930da2e3ebdSchin Void_t*		data;		/* old block of data		*/
931da2e3ebdSchin reg size_t	size;		/* new size			*/
932da2e3ebdSchin int		type;		/* !=0 to move, <0 for not copy */
933da2e3ebdSchin #endif
934da2e3ebdSchin {
935da2e3ebdSchin 	reg Block_t	*rp, *np, *t;
9367c2fbfb3SApril Chin 	int		local, inuse;
937da2e3ebdSchin 	size_t		s, bs, oldsize = 0, orgsize = 0;
938da2e3ebdSchin 	Void_t		*oldd, *orgdata = NIL(Void_t*);
939da2e3ebdSchin 	Vmdata_t	*vd = vm->data;
940da2e3ebdSchin 
941da2e3ebdSchin 	/**/COUNT(N_resize);
942da2e3ebdSchin 
9437c2fbfb3SApril Chin 	SETINUSE(vd, inuse);
9447c2fbfb3SApril Chin 
945da2e3ebdSchin 	if(!data)
946da2e3ebdSchin 	{	if((data = bestalloc(vm,size)) )
947da2e3ebdSchin 		{	oldsize = 0;
948da2e3ebdSchin 			size = size <= BODYSIZE ? BODYSIZE : ROUND(size,ALIGN);
949da2e3ebdSchin 		}
950da2e3ebdSchin 		goto done;
951da2e3ebdSchin 	}
952da2e3ebdSchin 	if(size == 0)
953da2e3ebdSchin 	{	(void)bestfree(vm,data);
9547c2fbfb3SApril Chin 		CLRINUSE(vd, inuse);
955da2e3ebdSchin 		return NIL(Void_t*);
956da2e3ebdSchin 	}
957da2e3ebdSchin 
958da2e3ebdSchin 	if(!(local = vd->mode&VM_TRUST) )
959da2e3ebdSchin 	{	GETLOCAL(vd,local); /**/ASSERT(!ISLOCK(vd,local));
9607c2fbfb3SApril Chin 		if(ISLOCK(vd,local) || (!local && KPVADDR(vm,data,bestaddr) != 0 ) )
9617c2fbfb3SApril Chin 		{	CLRINUSE(vd, inuse);
962da2e3ebdSchin 			return NIL(Void_t*);
9637c2fbfb3SApril Chin 		}
964da2e3ebdSchin 		SETLOCK(vd,local);
965da2e3ebdSchin 
966da2e3ebdSchin 		orgdata = data;	/* for tracing */
967da2e3ebdSchin 		orgsize = size;
968da2e3ebdSchin 	}
969da2e3ebdSchin 
970da2e3ebdSchin 	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
971da2e3ebdSchin 	size = size <= BODYSIZE ? BODYSIZE : ROUND(size,ALIGN);
972da2e3ebdSchin 	rp = BLOCK(data);	/**/ASSERT(ISBUSY(SIZE(rp)) && !ISJUNK(SIZE(rp)));
973da2e3ebdSchin 	oldsize = SIZE(rp); CLRBITS(oldsize);
974da2e3ebdSchin 	if(oldsize < size)
975da2e3ebdSchin 	{	np = (Block_t*)((Vmuchar_t*)rp + oldsize + sizeof(Head_t));
976da2e3ebdSchin 		do	/* forward merge as much as possible */
977da2e3ebdSchin 		{	s = SIZE(np); /**/ASSERT(!ISPFREE(s));
978da2e3ebdSchin 			if(np == vd->free)
979da2e3ebdSchin 			{	vd->free = NIL(Block_t*);
980da2e3ebdSchin 				CLRBITS(s);
981da2e3ebdSchin 			}
982da2e3ebdSchin 			else if(ISJUNK(s) )
983da2e3ebdSchin 			{	if(!bestreclaim(vd,np,C_INDEX(s)) )
984da2e3ebdSchin 					/**/ASSERT(0); /* oops: did not see np! */
985da2e3ebdSchin 				s = SIZE(np); /**/ASSERT(s%ALIGN == 0);
986da2e3ebdSchin 			}
987da2e3ebdSchin 			else if(!ISBUSY(s) )
988da2e3ebdSchin 			{	if(np == vd->wild)
989da2e3ebdSchin 					vd->wild = NIL(Block_t*);
990da2e3ebdSchin 				else	REMOVE(vd,np,INDEX(s),t,bestsearch);
991da2e3ebdSchin 			}
992da2e3ebdSchin 			else	break;
993da2e3ebdSchin 
994da2e3ebdSchin 			SIZE(rp) += (s += sizeof(Head_t)); /**/ASSERT((s%ALIGN) == 0);
995da2e3ebdSchin 			np = (Block_t*)((Vmuchar_t*)np + s);
996da2e3ebdSchin 			CLRPFREE(SIZE(np));
997da2e3ebdSchin 		} while(SIZE(rp) < size);
998da2e3ebdSchin 
999da2e3ebdSchin 		if(SIZE(rp) < size && size > vd->incr && SEGWILD(rp) )
1000da2e3ebdSchin 		{	reg Seg_t*	seg;
1001da2e3ebdSchin 
1002da2e3ebdSchin 			s = (size - SIZE(rp)) + sizeof(Head_t); s = ROUND(s,vd->incr);
1003da2e3ebdSchin 			seg = SEG(rp);
1004da2e3ebdSchin 			if((*vm->disc->memoryf)(vm,seg->addr,seg->extent,seg->extent+s,
1005da2e3ebdSchin 				      vm->disc) == seg->addr )
1006da2e3ebdSchin 			{	SIZE(rp) += s;
1007da2e3ebdSchin 				seg->extent += s;
1008da2e3ebdSchin 				seg->size += s;
1009da2e3ebdSchin 				seg->baddr += s;
1010da2e3ebdSchin 				s  = (SIZE(rp)&~BITS) + sizeof(Head_t);
1011da2e3ebdSchin 				np = (Block_t*)((Vmuchar_t*)rp + s);
1012da2e3ebdSchin 				SEG(np) = seg;
1013da2e3ebdSchin 				SIZE(np) = BUSY;
1014da2e3ebdSchin 			}
1015da2e3ebdSchin 		}
1016da2e3ebdSchin 	}
1017da2e3ebdSchin 
1018da2e3ebdSchin 	if((s = SIZE(rp)) >= (size + (BODYSIZE+sizeof(Head_t))) )
1019da2e3ebdSchin 	{	SIZE(rp) = size;
1020da2e3ebdSchin 		np = NEXT(rp);
1021da2e3ebdSchin 		SEG(np) = SEG(rp);
1022da2e3ebdSchin 		SIZE(np) = (((s&~BITS)-size) - sizeof(Head_t))|BUSY|JUNK;
1023da2e3ebdSchin 		CPYBITS(SIZE(rp),s);
1024da2e3ebdSchin 		rp = np;
1025da2e3ebdSchin 		goto do_free;
1026da2e3ebdSchin 	}
1027da2e3ebdSchin 	else if((bs = s&~BITS) < size)
1028da2e3ebdSchin 	{	if(!(type&(VM_RSMOVE|VM_RSCOPY)) )
1029da2e3ebdSchin 			data = NIL(Void_t*); /* old data is not moveable */
1030da2e3ebdSchin 		else
1031da2e3ebdSchin 		{	oldd = data;
1032da2e3ebdSchin 			if((data = KPVALLOC(vm,size,bestalloc)) )
1033da2e3ebdSchin 			{	if(type&VM_RSCOPY)
1034da2e3ebdSchin 					memcpy(data, oldd, bs);
1035da2e3ebdSchin 
1036da2e3ebdSchin 			do_free: /* reclaim these right away */
1037da2e3ebdSchin 				SETJUNK(SIZE(rp));
1038da2e3ebdSchin 				LINK(rp) = CACHE(vd)[S_CACHE];
1039da2e3ebdSchin 				CACHE(vd)[S_CACHE] = rp;
1040da2e3ebdSchin 				bestreclaim(vd, NIL(Block_t*), S_CACHE);
1041da2e3ebdSchin 			}
1042da2e3ebdSchin 		}
1043da2e3ebdSchin 	}
1044da2e3ebdSchin 
1045da2e3ebdSchin 	if(!local && _Vmtrace && data && (vd->mode&VM_TRACE) && VMETHOD(vd) == VM_MTBEST)
1046da2e3ebdSchin 		(*_Vmtrace)(vm, (Vmuchar_t*)orgdata, (Vmuchar_t*)data, orgsize, 0);
1047da2e3ebdSchin 
1048da2e3ebdSchin 	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
1049da2e3ebdSchin 	CLRLOCK(vd,local);
1050da2e3ebdSchin 	ANNOUNCE(local, vm, VM_RESIZE, data, vm->disc);
1051da2e3ebdSchin 
1052da2e3ebdSchin done:	if(data && (type&VM_RSZERO) && (size = SIZE(BLOCK(data))&~BITS) > oldsize )
1053da2e3ebdSchin 		memset((Void_t*)((Vmuchar_t*)data + oldsize), 0, size-oldsize);
1054da2e3ebdSchin 
10557c2fbfb3SApril Chin 	CLRINUSE(vd, inuse);
1056da2e3ebdSchin 	return data;
1057da2e3ebdSchin }
1058da2e3ebdSchin 
1059da2e3ebdSchin #if __STD_C
bestsize(Vmalloc_t * vm,Void_t * addr)1060da2e3ebdSchin static long bestsize(Vmalloc_t* vm, Void_t* addr )
1061da2e3ebdSchin #else
1062da2e3ebdSchin static long bestsize(vm, addr)
1063da2e3ebdSchin Vmalloc_t*	vm;	/* region allocating from	*/
1064da2e3ebdSchin Void_t*		addr;	/* address to check		*/
1065da2e3ebdSchin #endif
1066da2e3ebdSchin {
1067da2e3ebdSchin 	reg Seg_t*	seg;
1068da2e3ebdSchin 	reg Block_t	*b, *endb;
1069da2e3ebdSchin 	reg long	size;
1070da2e3ebdSchin 	reg Vmdata_t*	vd = vm->data;
10717c2fbfb3SApril Chin 	reg int		inuse;
10727c2fbfb3SApril Chin 
10737c2fbfb3SApril Chin 	SETINUSE(vd, inuse);
1074da2e3ebdSchin 
1075da2e3ebdSchin 	if(!(vd->mode&VM_TRUST) )
1076da2e3ebdSchin 	{	if(ISLOCK(vd,0))
10777c2fbfb3SApril Chin 		{	CLRINUSE(vd, inuse);
1078da2e3ebdSchin 			return -1L;
10797c2fbfb3SApril Chin 		}
1080da2e3ebdSchin 		SETLOCK(vd,0);
1081da2e3ebdSchin 	}
1082da2e3ebdSchin 
1083da2e3ebdSchin 	size = -1L;
1084da2e3ebdSchin 	for(seg = vd->seg; seg; seg = seg->next)
1085da2e3ebdSchin 	{	b = SEGBLOCK(seg);
1086da2e3ebdSchin 		endb = (Block_t*)(seg->baddr - sizeof(Head_t));
1087da2e3ebdSchin 		if((Vmuchar_t*)addr <= (Vmuchar_t*)b ||
1088da2e3ebdSchin 		   (Vmuchar_t*)addr >= (Vmuchar_t*)endb)
1089da2e3ebdSchin 			continue;
1090da2e3ebdSchin 		while(b < endb)
1091da2e3ebdSchin 		{	if(addr == DATA(b))
1092da2e3ebdSchin 			{	if(!ISBUSY(SIZE(b)) || ISJUNK(SIZE(b)) )
1093da2e3ebdSchin 					size = -1L;
1094da2e3ebdSchin 				else	size = (long)SIZE(b)&~BITS;
1095da2e3ebdSchin 				goto done;
1096da2e3ebdSchin 			}
1097da2e3ebdSchin 			else if((Vmuchar_t*)addr <= (Vmuchar_t*)b)
1098da2e3ebdSchin 				break;
1099da2e3ebdSchin 
1100da2e3ebdSchin 			b = (Block_t*)((Vmuchar_t*)DATA(b) + (SIZE(b)&~BITS) );
1101da2e3ebdSchin 		}
1102da2e3ebdSchin 	}
1103da2e3ebdSchin 
1104da2e3ebdSchin done:
1105da2e3ebdSchin 	CLRLOCK(vd,0);
11067c2fbfb3SApril Chin 	CLRINUSE(vd, inuse);
1107da2e3ebdSchin 	return size;
1108da2e3ebdSchin }
1109da2e3ebdSchin 
1110da2e3ebdSchin #if __STD_C
bestalign(Vmalloc_t * vm,size_t size,size_t align)1111da2e3ebdSchin static Void_t* bestalign(Vmalloc_t* vm, size_t size, size_t align)
1112da2e3ebdSchin #else
1113da2e3ebdSchin static Void_t* bestalign(vm, size, align)
1114da2e3ebdSchin Vmalloc_t*	vm;
1115da2e3ebdSchin size_t		size;
1116da2e3ebdSchin size_t		align;
1117da2e3ebdSchin #endif
1118da2e3ebdSchin {
1119da2e3ebdSchin 	reg Vmuchar_t	*data;
1120da2e3ebdSchin 	reg Block_t	*tp, *np;
1121da2e3ebdSchin 	reg Seg_t*	seg;
11227c2fbfb3SApril Chin 	reg int		local, inuse;
1123da2e3ebdSchin 	reg size_t	s, extra, orgsize = 0, orgalign = 0;
1124da2e3ebdSchin 	reg Vmdata_t*	vd = vm->data;
1125da2e3ebdSchin 
1126da2e3ebdSchin 	if(size <= 0 || align <= 0)
1127da2e3ebdSchin 		return NIL(Void_t*);
1128da2e3ebdSchin 
11297c2fbfb3SApril Chin 	SETINUSE(vd, inuse);
11307c2fbfb3SApril Chin 
1131da2e3ebdSchin 	if(!(local = vd->mode&VM_TRUST) )
1132da2e3ebdSchin 	{	GETLOCAL(vd,local); /**/ASSERT(!ISLOCK(vd,local));
1133da2e3ebdSchin 		if(ISLOCK(vd,local) )
11347c2fbfb3SApril Chin 		{	CLRINUSE(vd, inuse);
1135da2e3ebdSchin 			return NIL(Void_t*);
11367c2fbfb3SApril Chin 		}
1137da2e3ebdSchin 		SETLOCK(vd,local);
1138da2e3ebdSchin 		orgsize = size;
1139da2e3ebdSchin 		orgalign = align;
1140da2e3ebdSchin 	}
1141da2e3ebdSchin 
1142da2e3ebdSchin 	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
1143da2e3ebdSchin 	size = size <= BODYSIZE ? BODYSIZE : ROUND(size,ALIGN);
1144da2e3ebdSchin 	align = MULTIPLE(align,ALIGN);
1145da2e3ebdSchin 
1146da2e3ebdSchin 	/* hack so that dbalign() can store header data */
1147da2e3ebdSchin 	if(VMETHOD(vd) != VM_MTDEBUG)
1148da2e3ebdSchin 		extra = 0;
1149da2e3ebdSchin 	else
1150da2e3ebdSchin 	{	extra = DB_HEAD;
1151da2e3ebdSchin 		while(align < extra || (align - extra) < sizeof(Block_t))
1152da2e3ebdSchin 			align *= 2;
1153da2e3ebdSchin 	}
1154da2e3ebdSchin 
1155da2e3ebdSchin 	/* reclaim all free blocks now to avoid fragmentation */
1156da2e3ebdSchin 	bestreclaim(vd,NIL(Block_t*),0);
1157da2e3ebdSchin 
1158da2e3ebdSchin 	s = size + 2*(align+sizeof(Head_t)+extra);
1159da2e3ebdSchin 	if(!(data = (Vmuchar_t*)KPVALLOC(vm,s,bestalloc)) )
1160da2e3ebdSchin 		goto done;
1161da2e3ebdSchin 
1162da2e3ebdSchin 	tp = BLOCK(data);
1163da2e3ebdSchin 	seg = SEG(tp);
1164da2e3ebdSchin 
1165da2e3ebdSchin 	/* get an aligned address that we can live with */
1166da2e3ebdSchin 	if((s = (size_t)((VLONG(data)+extra)%align)) != 0)
1167da2e3ebdSchin 		data += align-s; /**/ASSERT(((VLONG(data)+extra)%align) == 0);
1168da2e3ebdSchin 
1169da2e3ebdSchin 	if((np = BLOCK(data)) != tp ) /* need to free left part */
1170da2e3ebdSchin 	{	if(((Vmuchar_t*)np - (Vmuchar_t*)tp) < (ssize_t)(sizeof(Block_t)+extra) )
1171da2e3ebdSchin 		{	data += align;
1172da2e3ebdSchin 			np = BLOCK(data);
1173da2e3ebdSchin 		} /**/ASSERT(((VLONG(data)+extra)%align) == 0);
1174da2e3ebdSchin 
1175da2e3ebdSchin 		s  = (Vmuchar_t*)np - (Vmuchar_t*)tp;
1176da2e3ebdSchin 		SIZE(np) = ((SIZE(tp)&~BITS) - s)|BUSY;
1177da2e3ebdSchin 		SEG(np) = seg;
1178da2e3ebdSchin 
1179da2e3ebdSchin 		SIZE(tp) = (s - sizeof(Head_t)) | (SIZE(tp)&BITS) | JUNK;
1180da2e3ebdSchin 		/**/ ASSERT(SIZE(tp) >= sizeof(Body_t) );
1181da2e3ebdSchin 		LINK(tp) = CACHE(vd)[C_INDEX(SIZE(tp))];
1182da2e3ebdSchin 		CACHE(vd)[C_INDEX(SIZE(tp))] = tp;
1183da2e3ebdSchin 	}
1184da2e3ebdSchin 
1185da2e3ebdSchin 	/* free left-over if too big */
1186da2e3ebdSchin 	if((s = SIZE(np) - size) >= sizeof(Block_t))
1187da2e3ebdSchin 	{	SIZE(np) = size;
1188da2e3ebdSchin 
1189da2e3ebdSchin 		tp = NEXT(np);
1190da2e3ebdSchin 		SIZE(tp) = ((s & ~BITS) - sizeof(Head_t)) | BUSY | JUNK;
1191da2e3ebdSchin 		SEG(tp) = seg;
1192da2e3ebdSchin 		LINK(tp) = CACHE(vd)[C_INDEX(SIZE(tp))];
1193da2e3ebdSchin 		CACHE(vd)[C_INDEX(SIZE(tp))] = tp;
1194da2e3ebdSchin 
1195da2e3ebdSchin 		SIZE(np) |= s&BITS;
1196da2e3ebdSchin 	}
1197da2e3ebdSchin 
1198da2e3ebdSchin 	bestreclaim(vd,NIL(Block_t*),0); /* coalesce all free blocks */
1199da2e3ebdSchin 
1200da2e3ebdSchin 	if(!local && !(vd->mode&VM_TRUST) && _Vmtrace && (vd->mode&VM_TRACE) )
1201da2e3ebdSchin 		(*_Vmtrace)(vm,NIL(Vmuchar_t*),data,orgsize,orgalign);
1202da2e3ebdSchin 
1203da2e3ebdSchin done:
1204da2e3ebdSchin 	/**/ASSERT(_vmbestcheck(vd, NIL(Block_t*)) == 0);
1205da2e3ebdSchin 	CLRLOCK(vd,local);
1206da2e3ebdSchin 	ANNOUNCE(local, vm, VM_ALLOC, (Void_t*)data, vm->disc);
1207da2e3ebdSchin 
12087c2fbfb3SApril Chin 	CLRINUSE(vd, inuse);
1209da2e3ebdSchin 	return (Void_t*)data;
1210da2e3ebdSchin }
1211da2e3ebdSchin 
1212da2e3ebdSchin 
1213da2e3ebdSchin #if _mem_win32
1214da2e3ebdSchin #if _PACKAGE_ast
1215da2e3ebdSchin #include		<ast_windows.h>
1216da2e3ebdSchin #else
1217da2e3ebdSchin #include		<windows.h>
1218da2e3ebdSchin #endif
1219da2e3ebdSchin #endif /* _lib_win32 */
1220da2e3ebdSchin 
1221da2e3ebdSchin #if _mem_mmap_anon
1222da2e3ebdSchin #include		<sys/mman.h>
1223da2e3ebdSchin #ifndef MAP_ANON
1224da2e3ebdSchin #define	MAP_ANON	MAP_ANONYMOUS
1225da2e3ebdSchin #endif
1226da2e3ebdSchin #endif /* _mem_mmap_anon */
1227da2e3ebdSchin 
1228da2e3ebdSchin #if _mem_mmap_zero
1229da2e3ebdSchin #include		<sys/fcntl.h>
1230da2e3ebdSchin #include		<sys/mman.h>
1231da2e3ebdSchin typedef struct _mmapdisc_s
1232da2e3ebdSchin {	Vmdisc_t	disc;
1233da2e3ebdSchin 	int		fd;
1234da2e3ebdSchin 	off_t		offset;
1235da2e3ebdSchin } Mmapdisc_t;
1236da2e3ebdSchin 
1237da2e3ebdSchin #ifndef OPEN_MAX
1238da2e3ebdSchin #define	OPEN_MAX	64
1239da2e3ebdSchin #endif
1240da2e3ebdSchin #define OPEN_PRIVATE	(3*OPEN_MAX/4)
1241da2e3ebdSchin #endif /* _mem_mmap_zero */
1242da2e3ebdSchin 
1243da2e3ebdSchin /* failure mode of mmap, sbrk and brk */
1244da2e3ebdSchin #ifndef MAP_FAILED
1245da2e3ebdSchin #define MAP_FAILED	((Void_t*)(-1))
1246da2e3ebdSchin #endif
1247da2e3ebdSchin #define BRK_FAILED	((Void_t*)(-1))
1248da2e3ebdSchin 
124934f9b3eeSRoland Mainz /* make sure that allocated memory are addressable */
125034f9b3eeSRoland Mainz 
125134f9b3eeSRoland Mainz #if _PACKAGE_ast
125234f9b3eeSRoland Mainz #include	<sig.h>
125334f9b3eeSRoland Mainz #else
125434f9b3eeSRoland Mainz #include	<signal.h>
125534f9b3eeSRoland Mainz typedef void	(*Sig_handler_t)(int);
125634f9b3eeSRoland Mainz #endif
125734f9b3eeSRoland Mainz 
125834f9b3eeSRoland Mainz static int	Gotsegv = 0;
125934f9b3eeSRoland Mainz 
126034f9b3eeSRoland Mainz #if __STD_C
sigsegv(int sig)126134f9b3eeSRoland Mainz static void sigsegv(int sig)
126234f9b3eeSRoland Mainz #else
126334f9b3eeSRoland Mainz static void sigsegv(sig)
126434f9b3eeSRoland Mainz int	sig;
126534f9b3eeSRoland Mainz #endif
126634f9b3eeSRoland Mainz {
126734f9b3eeSRoland Mainz 	if(sig == SIGSEGV)
126834f9b3eeSRoland Mainz 		Gotsegv = 1;
126934f9b3eeSRoland Mainz }
127034f9b3eeSRoland Mainz 
127134f9b3eeSRoland Mainz #if __STD_C
okaddr(Void_t * addr,size_t nsize)127234f9b3eeSRoland Mainz static int okaddr(Void_t* addr, size_t nsize)
127334f9b3eeSRoland Mainz #else
127434f9b3eeSRoland Mainz static int okaddr(addr, nsize)
127534f9b3eeSRoland Mainz Void_t*	addr;
127634f9b3eeSRoland Mainz size_t	nsize;
127734f9b3eeSRoland Mainz #endif
127834f9b3eeSRoland Mainz {
127934f9b3eeSRoland Mainz 	Sig_handler_t	segv;
128034f9b3eeSRoland Mainz 	int		rv;
128134f9b3eeSRoland Mainz 
128234f9b3eeSRoland Mainz 	Gotsegv = 0; /* catch segment fault */
128334f9b3eeSRoland Mainz 	segv = signal(SIGSEGV, sigsegv);
128434f9b3eeSRoland Mainz 
128534f9b3eeSRoland Mainz 	if(Gotsegv == 0)
128634f9b3eeSRoland Mainz 		rv = *((char*)addr);
128734f9b3eeSRoland Mainz 	if(Gotsegv == 0)
128834f9b3eeSRoland Mainz 		rv += *(((char*)addr)+nsize-1);
128934f9b3eeSRoland Mainz 	if(Gotsegv == 0)
129034f9b3eeSRoland Mainz 		rv = rv == 0 ? 0 : 1;
129134f9b3eeSRoland Mainz 	else	rv = -1;
129234f9b3eeSRoland Mainz 
129334f9b3eeSRoland Mainz 	signal(SIGSEGV, segv); /* restore signal catcher */
129434f9b3eeSRoland Mainz 	Gotsegv = 0;
129534f9b3eeSRoland Mainz 
129634f9b3eeSRoland Mainz 	return rv;
129734f9b3eeSRoland Mainz }
129834f9b3eeSRoland Mainz 
1299da2e3ebdSchin /* A discipline to get raw memory using sbrk/VirtualAlloc/mmap */
1300da2e3ebdSchin #if __STD_C
sbrkmem(Vmalloc_t * vm,Void_t * caddr,size_t csize,size_t nsize,Vmdisc_t * disc)1301da2e3ebdSchin static Void_t* sbrkmem(Vmalloc_t* vm, Void_t* caddr,
1302da2e3ebdSchin 			size_t csize, size_t nsize, Vmdisc_t* disc)
1303da2e3ebdSchin #else
1304da2e3ebdSchin static Void_t* sbrkmem(vm, caddr, csize, nsize, disc)
1305da2e3ebdSchin Vmalloc_t*	vm;	/* region doing allocation from		*/
1306da2e3ebdSchin Void_t*		caddr;	/* current address			*/
1307da2e3ebdSchin size_t		csize;	/* current size				*/
1308da2e3ebdSchin size_t		nsize;	/* new size				*/
1309da2e3ebdSchin Vmdisc_t*	disc;	/* discipline structure			*/
1310da2e3ebdSchin #endif
1311da2e3ebdSchin {
1312da2e3ebdSchin #undef _done_sbrkmem
1313da2e3ebdSchin 
1314da2e3ebdSchin #if !defined(_done_sbrkmem) && defined(_mem_win32)
1315da2e3ebdSchin #define _done_sbrkmem	1
1316da2e3ebdSchin 	NOTUSED(vm);
1317da2e3ebdSchin 	NOTUSED(disc);
1318da2e3ebdSchin 	if(csize == 0)
1319da2e3ebdSchin 		return (Void_t*)VirtualAlloc(0,nsize,MEM_COMMIT,PAGE_READWRITE);
1320da2e3ebdSchin 	else if(nsize == 0)
1321da2e3ebdSchin 		return VirtualFree((LPVOID)caddr,0,MEM_RELEASE) ? caddr : NIL(Void_t*);
1322da2e3ebdSchin 	else	return NIL(Void_t*);
1323da2e3ebdSchin #endif /* MUST_WIN32 */
1324da2e3ebdSchin 
1325da2e3ebdSchin #if !defined(_done_sbrkmem) && (_mem_sbrk || _mem_mmap_zero || _mem_mmap_anon)
1326da2e3ebdSchin #define _done_sbrkmem	1
1327da2e3ebdSchin 	Vmuchar_t	*addr;
1328da2e3ebdSchin #if _mem_mmap_zero
1329da2e3ebdSchin 	Mmapdisc_t	*mmdc = (Mmapdisc_t*)disc;
1330da2e3ebdSchin #else
1331da2e3ebdSchin 	NOTUSED(disc);
1332da2e3ebdSchin #endif
1333da2e3ebdSchin 	NOTUSED(vm);
1334da2e3ebdSchin 
1335da2e3ebdSchin 	if(csize == 0) /* allocating new memory */
1336da2e3ebdSchin 	{
13377c2fbfb3SApril Chin 
1338da2e3ebdSchin #if _mem_sbrk	/* try using sbrk() and brk() */
13397c2fbfb3SApril Chin 		if(!(_Vmassert & VM_mmap))
1340da2e3ebdSchin 		{
1341da2e3ebdSchin 			addr = (Vmuchar_t*)sbrk(0); /* old break value */
1342da2e3ebdSchin 			if(addr && addr != (Vmuchar_t*)BRK_FAILED )
134334f9b3eeSRoland Mainz 			{
134434f9b3eeSRoland Mainz 				if((addr+nsize) < addr)
134534f9b3eeSRoland Mainz 					return NIL(Void_t*);
1346da2e3ebdSchin 				if(brk(addr+nsize) == 0 )
134734f9b3eeSRoland Mainz 				{	if(okaddr(addr,nsize) >= 0)
1348da2e3ebdSchin 						return addr;
134934f9b3eeSRoland Mainz 					(void)brk(addr); /* release reserved address */
135034f9b3eeSRoland Mainz 				}
135134f9b3eeSRoland Mainz 			}
1352da2e3ebdSchin 		}
1353da2e3ebdSchin #endif /* _mem_sbrk */
1354da2e3ebdSchin 
1355da2e3ebdSchin #if _mem_mmap_anon /* anonymous mmap */
1356da2e3ebdSchin 		addr = (Vmuchar_t*)mmap(0, nsize, PROT_READ|PROT_WRITE,
1357da2e3ebdSchin                                         MAP_ANON|MAP_PRIVATE, -1, 0);
1358da2e3ebdSchin 		if(addr && addr != (Vmuchar_t*)MAP_FAILED)
135934f9b3eeSRoland Mainz 		{	if(okaddr(addr,nsize) >= 0)
1360da2e3ebdSchin 				return addr;
1361*3e14f97fSRoger A. Faulkner 			(void)munmap((char*)addr, nsize); /* release reserved address */
1362da2e3ebdSchin 		}
1363da2e3ebdSchin #endif /* _mem_mmap_anon */
1364da2e3ebdSchin 
1365da2e3ebdSchin #if _mem_mmap_zero /* mmap from /dev/zero */
1366da2e3ebdSchin 		if(mmdc->fd < 0)
1367da2e3ebdSchin 		{	int	fd;
1368da2e3ebdSchin 			if(mmdc->fd != -1)
1369da2e3ebdSchin 				return NIL(Void_t*);
1370da2e3ebdSchin 			if((fd = open("/dev/zero", O_RDONLY)) < 0 )
1371da2e3ebdSchin 			{	mmdc->fd = -2;
1372da2e3ebdSchin 				return NIL(Void_t*);
1373da2e3ebdSchin 			}
1374da2e3ebdSchin 			if(fd >= OPEN_PRIVATE || (mmdc->fd = dup2(fd,OPEN_PRIVATE)) < 0 )
1375da2e3ebdSchin 				mmdc->fd = fd;
1376da2e3ebdSchin 			else	close(fd);
1377da2e3ebdSchin #ifdef FD_CLOEXEC
1378da2e3ebdSchin 			fcntl(mmdc->fd, F_SETFD, FD_CLOEXEC);
1379da2e3ebdSchin #endif
1380da2e3ebdSchin 		}
1381da2e3ebdSchin 		addr = (Vmuchar_t*)mmap(0, nsize, PROT_READ|PROT_WRITE,
1382da2e3ebdSchin 					MAP_PRIVATE, mmdc->fd, mmdc->offset);
1383da2e3ebdSchin 		if(addr && addr != (Vmuchar_t*)MAP_FAILED)
138434f9b3eeSRoland Mainz 		{	if(okaddr(addr, nsize) >= 0)
1385da2e3ebdSchin 			{	mmdc->offset += nsize;
1386da2e3ebdSchin 				return addr;
1387da2e3ebdSchin 			}
1388*3e14f97fSRoger A. Faulkner 			(void)munmap((char*)addr, nsize); /* release reserved address */
1389da2e3ebdSchin 		}
1390da2e3ebdSchin #endif /* _mem_mmap_zero */
1391da2e3ebdSchin 
1392da2e3ebdSchin 		return NIL(Void_t*);
1393da2e3ebdSchin 	}
1394da2e3ebdSchin 	else
1395*3e14f97fSRoger A. Faulkner 	{
13967c2fbfb3SApril Chin 
1397da2e3ebdSchin #if _mem_sbrk
1398da2e3ebdSchin 		addr = (Vmuchar_t*)sbrk(0);
1399da2e3ebdSchin 		if(!addr || addr == (Vmuchar_t*)BRK_FAILED)
1400da2e3ebdSchin 			addr = caddr;
1401da2e3ebdSchin 		else if(((Vmuchar_t*)caddr+csize) == addr) /* in sbrk-space */
140234f9b3eeSRoland Mainz 		{	if(nsize <= csize)
140334f9b3eeSRoland Mainz 				addr -= csize-nsize;
140434f9b3eeSRoland Mainz 			else if((addr += nsize-csize) < (Vmuchar_t*)caddr)
140534f9b3eeSRoland Mainz 				return NIL(Void_t*); /* wrapped around address */
140634f9b3eeSRoland Mainz 			else	return brk(addr) == 0 ? caddr : NIL(Void_t*);
1407da2e3ebdSchin 		}
1408*3e14f97fSRoger A. Faulkner #else
1409*3e14f97fSRoger A. Faulkner 		addr = caddr;
1410da2e3ebdSchin #endif /* _mem_sbrk */
1411da2e3ebdSchin 
1412da2e3ebdSchin #if _mem_mmap_zero || _mem_mmap_anon
1413da2e3ebdSchin 		if(((Vmuchar_t*)caddr+csize) > addr) /* in mmap-space */
1414da2e3ebdSchin 			if(nsize == 0 && munmap(caddr,csize) == 0)
1415da2e3ebdSchin 				return caddr;
1416da2e3ebdSchin #endif /* _mem_mmap_zero || _mem_mmap_anon */
1417da2e3ebdSchin 
1418da2e3ebdSchin 		return NIL(Void_t*);
1419da2e3ebdSchin 	}
1420da2e3ebdSchin #endif /*_done_sbrkmem*/
1421da2e3ebdSchin 
14227c2fbfb3SApril Chin #if !_done_sbrkmem /* use native malloc/free as a last resort */
1423da2e3ebdSchin 	/**/ASSERT(_std_malloc); /* _std_malloc should be well-defined */
1424da2e3ebdSchin 	NOTUSED(vm);
1425da2e3ebdSchin 	NOTUSED(disc);
1426da2e3ebdSchin 	if(csize == 0)
1427da2e3ebdSchin 		return (Void_t*)malloc(nsize);
1428da2e3ebdSchin 	else if(nsize == 0)
1429da2e3ebdSchin 	{	free(caddr);
1430da2e3ebdSchin 		return caddr;
1431da2e3ebdSchin 	}
1432da2e3ebdSchin 	else	return NIL(Void_t*);
1433da2e3ebdSchin #endif /* _done_sbrkmem */
1434da2e3ebdSchin }
1435da2e3ebdSchin 
1436da2e3ebdSchin #if _mem_mmap_zero
1437da2e3ebdSchin static Mmapdisc_t _Vmdcsbrk = { { sbrkmem, NIL(Vmexcept_f), 64*1024 }, -1, 0 };
1438da2e3ebdSchin #else
1439da2e3ebdSchin static Vmdisc_t _Vmdcsbrk = { sbrkmem, NIL(Vmexcept_f), 0 };
1440da2e3ebdSchin #endif
1441da2e3ebdSchin 
1442da2e3ebdSchin static Vmethod_t _Vmbest =
1443da2e3ebdSchin {
1444da2e3ebdSchin 	bestalloc,
1445da2e3ebdSchin 	bestresize,
1446da2e3ebdSchin 	bestfree,
1447da2e3ebdSchin 	bestaddr,
1448da2e3ebdSchin 	bestsize,
1449da2e3ebdSchin 	bestcompact,
1450da2e3ebdSchin 	bestalign,
1451da2e3ebdSchin 	VM_MTBEST
1452da2e3ebdSchin };
1453da2e3ebdSchin 
1454da2e3ebdSchin /* The heap region */
1455da2e3ebdSchin static Vmdata_t	_Vmdata =
1456da2e3ebdSchin {
1457da2e3ebdSchin 	VM_MTBEST|VM_TRUST,		/* mode		*/
1458da2e3ebdSchin 	0,				/* incr		*/
1459da2e3ebdSchin 	0,				/* pool		*/
1460da2e3ebdSchin 	NIL(Seg_t*),			/* seg		*/
1461da2e3ebdSchin 	NIL(Block_t*),			/* free		*/
1462da2e3ebdSchin 	NIL(Block_t*),			/* wild		*/
1463da2e3ebdSchin 	NIL(Block_t*),			/* root		*/
1464da2e3ebdSchin };
1465da2e3ebdSchin Vmalloc_t _Vmheap =
1466da2e3ebdSchin {
1467da2e3ebdSchin 	{ bestalloc,
1468da2e3ebdSchin 	  bestresize,
1469da2e3ebdSchin 	  bestfree,
1470da2e3ebdSchin 	  bestaddr,
1471da2e3ebdSchin 	  bestsize,
1472da2e3ebdSchin 	  bestcompact,
1473da2e3ebdSchin 	  bestalign,
1474da2e3ebdSchin 	  VM_MTBEST
1475da2e3ebdSchin 	},
1476da2e3ebdSchin 	NIL(char*),			/* file		*/
1477da2e3ebdSchin 	0,				/* line		*/
1478da2e3ebdSchin 	0,				/* func		*/
1479da2e3ebdSchin 	(Vmdisc_t*)(&_Vmdcsbrk),	/* disc		*/
1480da2e3ebdSchin 	&_Vmdata,			/* data		*/
1481da2e3ebdSchin 	NIL(Vmalloc_t*)			/* next		*/
1482da2e3ebdSchin };
1483da2e3ebdSchin 
1484da2e3ebdSchin __DEFINE__(Vmalloc_t*, Vmheap, &_Vmheap);
1485da2e3ebdSchin __DEFINE__(Vmalloc_t*, Vmregion, &_Vmheap);
1486da2e3ebdSchin __DEFINE__(Vmethod_t*, Vmbest, &_Vmbest);
1487da2e3ebdSchin __DEFINE__(Vmdisc_t*,  Vmdcsbrk, (Vmdisc_t*)(&_Vmdcsbrk) );
1488da2e3ebdSchin 
1489da2e3ebdSchin #ifdef NoF
1490da2e3ebdSchin NoF(vmbest)
1491da2e3ebdSchin #endif
1492da2e3ebdSchin 
1493da2e3ebdSchin #endif
1494