xref: /titanic_50/usr/src/lib/libast/common/vmalloc/vmpool.c (revision 29e83d4b25fd82feb8e0e0fbe89f7e2a8438533d)
1 /***********************************************************************
2 *                                                                      *
3 *               This software is part of the ast package               *
4 *           Copyright (c) 1985-2007 AT&T Knowledge Ventures            *
5 *                      and is licensed under the                       *
6 *                  Common Public License, Version 1.0                  *
7 *                      by AT&T Knowledge Ventures                      *
8 *                                                                      *
9 *                A copy of the License is available at                 *
10 *            http://www.opensource.org/licenses/cpl1.0.txt             *
11 *         (with md5 checksum 059e8cd6165cb4c31e351f2b69388fd9)         *
12 *                                                                      *
13 *              Information and Software Systems Research               *
14 *                            AT&T Research                             *
15 *                           Florham Park NJ                            *
16 *                                                                      *
17 *                 Glenn Fowler <gsf@research.att.com>                  *
18 *                  David Korn <dgk@research.att.com>                   *
19 *                   Phong Vo <kpv@research.att.com>                    *
20 *                                                                      *
21 ***********************************************************************/
22 #if defined(_UWIN) && defined(_BLD_ast)
23 
24 void _STUB_vmpool(){}
25 
26 #else
27 
28 #include	"vmhdr.h"
29 
30 #define POOLFREE	0x55555555L	/* block free indicator	 */
31 
32 /*	Method for pool allocation.
33 **	All elements in a pool have the same size.
34 **	The following fields of Vmdata_t are used as:
35 **		pool:	size of a block.
36 **		free:	list of free blocks.
37 **
38 **	Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94.
39 */
40 
41 #if __STD_C
42 static Void_t* poolalloc(Vmalloc_t* vm, reg size_t size)
43 #else
44 static Void_t* poolalloc(vm, size )
45 Vmalloc_t*	vm;
46 reg size_t	size;
47 #endif
48 {
49 	reg Vmdata_t*	vd = vm->data;
50 	reg Block_t	*tp, *next;
51 	reg size_t	s;
52 	reg Seg_t*	seg;
53 	reg int		local;
54 
55 	if(size <= 0)
56 		return NIL(Void_t*);
57 	else if(size != vd->pool)
58 	{	if(vd->pool <= 0)
59 			vd->pool = size;
60 		else	return NIL(Void_t*);
61 	}
62 
63 	if(!(local = vd->mode&VM_TRUST) )
64 	{	GETLOCAL(vd,local);
65 		if(ISLOCK(vd, local))
66 			return NIL(Void_t*);
67 		SETLOCK(vd, local);
68 	}
69 
70 	if((tp = vd->free) ) /* there is a ready free block */
71 	{	vd->free = SEGLINK(tp);
72 		goto done;
73 	}
74 
75 	size = ROUND(size,ALIGN);
76 
77 	/* look thru all segments for a suitable free block */
78 	for(tp = NIL(Block_t*), seg = vd->seg; seg; seg = seg->next)
79 	{	if((tp = seg->free) &&
80 		   (s = (SIZE(tp) & ~BITS) + sizeof(Head_t)) >= size )
81 			goto has_blk;
82 	}
83 
84 	for(;;) /* must extend region */
85 	{	if((tp = (*_Vmextend)(vm,ROUND(size,vd->incr),NIL(Vmsearch_f))) )
86 		{	s = (SIZE(tp) & ~BITS) + sizeof(Head_t);
87 			seg = SEG(tp);
88 			goto has_blk;
89 		}
90 		else if(vd->mode&VM_AGAIN)
91 			vd->mode &= ~VM_AGAIN;
92 		else	goto done;
93 	}
94 
95 has_blk: /* if get here, (tp, s, seg) must be well-defined */
96 	next = (Block_t*)((Vmuchar_t*)tp+size);
97 	if((s -= size) <= (size + sizeof(Head_t)) )
98 	{	for(; s >= size; s -= size)
99 		{	SIZE(next) = POOLFREE;
100 			SEGLINK(next) = vd->free;
101 			vd->free = next;
102 			next = (Block_t*)((Vmuchar_t*)next + size);
103 		}
104 		seg->free = NIL(Block_t*);
105 	}
106 	else
107 	{	SIZE(next) = s - sizeof(Head_t);
108 		SEG(next) = seg;
109 		seg->free = next;
110 	}
111 
112 done:
113 	if(!local && (vd->mode&VM_TRACE) && _Vmtrace && tp)
114 		(*_Vmtrace)(vm,NIL(Vmuchar_t*),(Vmuchar_t*)tp,vd->pool,0);
115 
116 	CLRLOCK(vd, local);
117 	ANNOUNCE(local, vm, VM_ALLOC, (Void_t*)tp, vm->disc);
118 	return (Void_t*)tp;
119 }
120 
121 #if __STD_C
122 static long pooladdr(Vmalloc_t* vm, reg Void_t* addr)
123 #else
124 static long pooladdr(vm, addr)
125 Vmalloc_t*	vm;
126 reg Void_t*	addr;
127 #endif
128 {
129 	reg Block_t	*bp, *tp;
130 	reg Vmuchar_t	*laddr, *baddr;
131 	reg size_t	size;
132 	reg Seg_t*	seg;
133 	reg long	offset;
134 	reg Vmdata_t*	vd = vm->data;
135 	reg int		local;
136 
137 	if(!(local = vd->mode&VM_TRUST))
138 	{	GETLOCAL(vd,local);
139 		if(ISLOCK(vd,local))
140 			return -1L;
141 		SETLOCK(vd,local);
142 	}
143 
144 	offset = -1L;
145 	for(seg = vd->seg; seg; seg = seg->next)
146 	{	laddr = (Vmuchar_t*)SEGBLOCK(seg);
147 		baddr = seg->baddr-sizeof(Head_t);
148 		if((Vmuchar_t*)addr < laddr || (Vmuchar_t*)addr >= baddr)
149 			continue;
150 
151 		/* the block that has this address */
152 		size = ROUND(vd->pool,ALIGN);
153 		tp = (Block_t*)(laddr + (((Vmuchar_t*)addr-laddr)/size)*size );
154 
155 		/* see if this block has been freed */
156 		if(SIZE(tp) == POOLFREE) /* may be a coincidence - make sure */
157 			for(bp = vd->free; bp; bp = SEGLINK(bp))
158 				if(bp == tp)
159 					goto done;
160 
161 		offset = (Vmuchar_t*)addr - (Vmuchar_t*)tp;
162 		goto done;
163 	}
164 
165 done :
166 	CLRLOCK(vd,local);
167 	return offset;
168 }
169 
170 #if __STD_C
171 static int poolfree(reg Vmalloc_t* vm, reg Void_t* data )
172 #else
173 static int poolfree(vm, data)
174 reg Vmalloc_t*	vm;
175 reg Void_t*	data;
176 #endif
177 {
178 	reg Block_t*	bp;
179 	reg Vmdata_t*	vd = vm->data;
180 	reg int		local;
181 
182 	if(!data)
183 		return 0;
184 
185 	if(!(local = vd->mode&VM_TRUST))
186 	{	GETLOCAL(vd, local);
187 
188 		if(ISLOCK(vd, local) || vd->pool <= 0)
189 			return -1;
190 
191 		if(KPVADDR(vm,data,pooladdr) != 0)
192 		{	if(vm->disc->exceptf)
193 				(void)(*vm->disc->exceptf)(vm,VM_BADADDR,data,vm->disc);
194 			return -1;
195 		}
196 
197 		SETLOCK(vd, local);
198 	}
199 
200 	bp = (Block_t*)data;
201 	SIZE(bp) = POOLFREE;
202 	SEGLINK(bp) = vd->free;
203 	vd->free = bp;
204 
205 	if(!local && (vd->mode&VM_TRACE) && _Vmtrace)
206 		(*_Vmtrace)(vm, (Vmuchar_t*)data, NIL(Vmuchar_t*), vd->pool, 0);
207 
208 	CLRLOCK(vd,local);
209 	ANNOUNCE(local, vm, VM_FREE, data, vm->disc);
210 	return 0;
211 }
212 
213 #if __STD_C
214 static Void_t* poolresize(Vmalloc_t* vm, Void_t* data, size_t size, int type )
215 #else
216 static Void_t* poolresize(vm, data, size, type )
217 Vmalloc_t*	vm;
218 Void_t*		data;
219 size_t		size;
220 int		type;
221 #endif
222 {
223 	int		local;
224 	reg Vmdata_t*	vd = vm->data;
225 
226 	NOTUSED(type);
227 
228 	if(!data)
229 	{	if((data = poolalloc(vm,size)) && (type&VM_RSZERO) )
230 		{	reg int	*d = (int*)data, *ed = (int*)((char*)data+size);
231 			do { *d++ = 0;} while(d < ed);
232 		}
233 		return data;
234 	}
235 	if(size == 0)
236 	{	(void)poolfree(vm,data);
237 		return NIL(Void_t*);
238 	}
239 
240 	if(!(local = vd->mode&VM_TRUST) )
241 	{	GETLOCAL(vd, local);
242 
243 		if(ISLOCK(vd, local) )
244 			return NIL(Void_t*);
245 
246 		if(size != vd->pool || KPVADDR(vm,data,pooladdr) != 0)
247 		{	if(vm->disc->exceptf)
248 				(void)(*vm->disc->exceptf)(vm,VM_BADADDR,data,vm->disc);
249 			return NIL(Void_t*);
250 		}
251 
252 		if((vd->mode&VM_TRACE) && _Vmtrace)
253 			(*_Vmtrace)(vm, (Vmuchar_t*)data, (Vmuchar_t*)data, size, 0);
254 	}
255 
256 	ANNOUNCE(local, vm, VM_RESIZE, data, vm->disc);
257 	return data;
258 }
259 
260 #if __STD_C
261 static long poolsize(Vmalloc_t* vm, Void_t* addr)
262 #else
263 static long poolsize(vm, addr)
264 Vmalloc_t*	vm;
265 Void_t*		addr;
266 #endif
267 {
268 	return pooladdr(vm,addr) == 0 ? (long)vm->data->pool : -1L;
269 }
270 
271 #if __STD_C
272 static int poolcompact(Vmalloc_t* vm)
273 #else
274 static int poolcompact(vm)
275 Vmalloc_t*	vm;
276 #endif
277 {
278 	reg Block_t*	fp;
279 	reg Seg_t	*seg, *next;
280 	reg size_t	s;
281 	reg Vmdata_t*	vd = vm->data;
282 
283 	if(!(vd->mode&VM_TRUST))
284 	{	if(ISLOCK(vd,0))
285 			return -1;
286 		SETLOCK(vd,0);
287 	}
288 
289 	for(seg = vd->seg; seg; seg = next)
290 	{	next = seg->next;
291 
292 		if(!(fp = seg->free))
293 			continue;
294 
295 		seg->free = NIL(Block_t*);
296 		if(seg->size == (s = SIZE(fp)&~BITS))
297 			s = seg->extent;
298 		else	s += sizeof(Head_t);
299 
300 		if((*_Vmtruncate)(vm,seg,s,1) == s)
301 			seg->free = fp;
302 	}
303 
304 	if((vd->mode&VM_TRACE) && _Vmtrace)
305 		(*_Vmtrace)(vm, (Vmuchar_t*)0, (Vmuchar_t*)0, 0, 0);
306 
307 	CLRLOCK(vd,0);
308 	return 0;
309 }
310 
311 #if __STD_C
312 static Void_t* poolalign(Vmalloc_t* vm, size_t size, size_t align)
313 #else
314 static Void_t* poolalign(vm, size, align)
315 Vmalloc_t*	vm;
316 size_t		size;
317 size_t		align;
318 #endif
319 {
320 	NOTUSED(vm);
321 	NOTUSED(size);
322 	NOTUSED(align);
323 	return NIL(Void_t*);
324 }
325 
326 /* Public interface */
327 static Vmethod_t _Vmpool =
328 {
329 	poolalloc,
330 	poolresize,
331 	poolfree,
332 	pooladdr,
333 	poolsize,
334 	poolcompact,
335 	poolalign,
336 	VM_MTPOOL
337 };
338 
339 __DEFINE__(Vmethod_t*,Vmpool,&_Vmpool);
340 
341 #ifdef NoF
342 NoF(vmpool)
343 #endif
344 
345 #endif
346