1 /***********************************************************************
2 * *
3 * This software is part of the ast package *
4 * Copyright (c) 1985-2010 AT&T Intellectual Property *
5 * and is licensed under the *
6 * Common Public License, Version 1.0 *
7 * by AT&T Intellectual Property *
8 * *
9 * A copy of the License is available at *
10 * http://www.opensource.org/licenses/cpl1.0.txt *
11 * (with md5 checksum 059e8cd6165cb4c31e351f2b69388fd9) *
12 * *
13 * Information and Software Systems Research *
14 * AT&T Research *
15 * Florham Park NJ *
16 * *
17 * Glenn Fowler <gsf@research.att.com> *
18 * David Korn <dgk@research.att.com> *
19 * Phong Vo <kpv@research.att.com> *
20 * *
21 ***********************************************************************/
22 #if defined(_UWIN) && defined(_BLD_ast)
23
_STUB_vmpool()24 void _STUB_vmpool(){}
25
26 #else
27
28 #include "vmhdr.h"
29
30 #define POOLFREE 0x55555555L /* block free indicator */
31
32 /* Method for pool allocation.
33 ** All elements in a pool have the same size.
34 ** The following fields of Vmdata_t are used as:
35 ** pool: size of a block.
36 ** free: list of free blocks.
37 **
38 ** Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94.
39 */
40
41 #if __STD_C
poolalloc(Vmalloc_t * vm,reg size_t size)42 static Void_t* poolalloc(Vmalloc_t* vm, reg size_t size)
43 #else
44 static Void_t* poolalloc(vm, size )
45 Vmalloc_t* vm;
46 reg size_t size;
47 #endif
48 {
49 reg Vmdata_t* vd = vm->data;
50 reg Block_t *tp, *next;
51 reg size_t s;
52 reg Seg_t* seg;
53 reg int local, inuse;
54
55 if(size <= 0)
56 return NIL(Void_t*);
57 if(size != vd->pool)
58 { if(vd->pool <= 0)
59 vd->pool = size;
60 else return NIL(Void_t*);
61 }
62
63 SETINUSE(vd, inuse);
64 if(!(local = vd->mode&VM_TRUST) )
65 { GETLOCAL(vd,local);
66 if(ISLOCK(vd, local))
67 { CLRINUSE(vd, inuse);
68 return NIL(Void_t*);
69 }
70 SETLOCK(vd, local);
71 }
72
73 if((tp = vd->free) ) /* there is a ready free block */
74 { vd->free = SEGLINK(tp);
75 goto done;
76 }
77
78 size = ROUND(size,ALIGN);
79
80 /* look thru all segments for a suitable free block */
81 for(tp = NIL(Block_t*), seg = vd->seg; seg; seg = seg->next)
82 { if((tp = seg->free) &&
83 (s = (SIZE(tp) & ~BITS) + sizeof(Head_t)) >= size )
84 goto has_blk;
85 }
86
87 for(;;) /* must extend region */
88 { if((tp = (*_Vmextend)(vm,ROUND(size,vd->incr),NIL(Vmsearch_f))) )
89 { s = (SIZE(tp) & ~BITS) + sizeof(Head_t);
90 seg = SEG(tp);
91 goto has_blk;
92 }
93 else if(vd->mode&VM_AGAIN)
94 vd->mode &= ~VM_AGAIN;
95 else goto done;
96 }
97
98 has_blk: /* if get here, (tp, s, seg) must be well-defined */
99 next = (Block_t*)((Vmuchar_t*)tp+size);
100 if((s -= size) <= (size + sizeof(Head_t)) )
101 { for(; s >= size; s -= size)
102 { SIZE(next) = POOLFREE;
103 SEGLINK(next) = vd->free;
104 vd->free = next;
105 next = (Block_t*)((Vmuchar_t*)next + size);
106 }
107 seg->free = NIL(Block_t*);
108 }
109 else
110 { SIZE(next) = s - sizeof(Head_t);
111 SEG(next) = seg;
112 seg->free = next;
113 }
114
115 done:
116 if(!local && (vd->mode&VM_TRACE) && _Vmtrace && tp)
117 (*_Vmtrace)(vm,NIL(Vmuchar_t*),(Vmuchar_t*)tp,vd->pool,0);
118
119 CLRLOCK(vd, local);
120 ANNOUNCE(local, vm, VM_ALLOC, (Void_t*)tp, vm->disc);
121 CLRINUSE(vd, inuse);
122 return (Void_t*)tp;
123 }
124
125 #if __STD_C
pooladdr(Vmalloc_t * vm,reg Void_t * addr)126 static long pooladdr(Vmalloc_t* vm, reg Void_t* addr)
127 #else
128 static long pooladdr(vm, addr)
129 Vmalloc_t* vm;
130 reg Void_t* addr;
131 #endif
132 {
133 reg Block_t *bp, *tp;
134 reg Vmuchar_t *laddr, *baddr;
135 reg size_t size;
136 reg Seg_t* seg;
137 reg long offset;
138 reg Vmdata_t* vd = vm->data;
139 reg int local, inuse;
140
141 SETINUSE(vd, inuse);
142 if(!(local = vd->mode&VM_TRUST))
143 { GETLOCAL(vd,local);
144 if(ISLOCK(vd,local))
145 { CLRINUSE(vd, inuse);
146 return -1L;
147 }
148 SETLOCK(vd,local);
149 }
150
151 offset = -1L;
152 for(seg = vd->seg; seg; seg = seg->next)
153 { laddr = (Vmuchar_t*)SEGBLOCK(seg);
154 baddr = seg->baddr-sizeof(Head_t);
155 if((Vmuchar_t*)addr < laddr || (Vmuchar_t*)addr >= baddr)
156 continue;
157
158 /* the block that has this address */
159 size = ROUND(vd->pool,ALIGN);
160 tp = (Block_t*)(laddr + (((Vmuchar_t*)addr-laddr)/size)*size );
161
162 /* see if this block has been freed */
163 if(SIZE(tp) == POOLFREE) /* may be a coincidence - make sure */
164 for(bp = vd->free; bp; bp = SEGLINK(bp))
165 if(bp == tp)
166 goto done;
167
168 offset = (Vmuchar_t*)addr - (Vmuchar_t*)tp;
169 goto done;
170 }
171
172 done :
173 CLRLOCK(vd,local);
174 CLRINUSE(vd, inuse);
175 return offset;
176 }
177
178 #if __STD_C
poolfree(reg Vmalloc_t * vm,reg Void_t * data)179 static int poolfree(reg Vmalloc_t* vm, reg Void_t* data )
180 #else
181 static int poolfree(vm, data)
182 reg Vmalloc_t* vm;
183 reg Void_t* data;
184 #endif
185 {
186 reg Block_t* bp;
187 reg Vmdata_t* vd = vm->data;
188 reg int local, inuse;
189
190 if(!data)
191 return 0;
192
193 SETINUSE(vd, inuse);
194 if(!(local = vd->mode&VM_TRUST))
195 { GETLOCAL(vd, local);
196
197 if(ISLOCK(vd, local) || vd->pool <= 0)
198 { CLRINUSE(vd, inuse);
199 return -1;
200 }
201
202 if(KPVADDR(vm,data,pooladdr) != 0)
203 { if(vm->disc->exceptf)
204 (void)(*vm->disc->exceptf)(vm,VM_BADADDR,data,vm->disc);
205 CLRINUSE(vd, inuse);
206 return -1;
207 }
208
209 SETLOCK(vd, local);
210 }
211
212 bp = (Block_t*)data;
213 SIZE(bp) = POOLFREE;
214 SEGLINK(bp) = vd->free;
215 vd->free = bp;
216
217 if(!local && (vd->mode&VM_TRACE) && _Vmtrace)
218 (*_Vmtrace)(vm, (Vmuchar_t*)data, NIL(Vmuchar_t*), vd->pool, 0);
219
220 CLRLOCK(vd,local);
221 ANNOUNCE(local, vm, VM_FREE, data, vm->disc);
222 CLRINUSE(vd, inuse);
223 return 0;
224 }
225
226 #if __STD_C
poolresize(Vmalloc_t * vm,Void_t * data,size_t size,int type)227 static Void_t* poolresize(Vmalloc_t* vm, Void_t* data, size_t size, int type )
228 #else
229 static Void_t* poolresize(vm, data, size, type )
230 Vmalloc_t* vm;
231 Void_t* data;
232 size_t size;
233 int type;
234 #endif
235 {
236 int local, inuse;
237 reg Vmdata_t* vd = vm->data;
238
239 NOTUSED(type);
240
241 SETINUSE(vd, inuse);
242 if(!data)
243 { if((data = poolalloc(vm,size)) && (type&VM_RSZERO) )
244 { reg int *d = (int*)data, *ed = (int*)((char*)data+size);
245 do { *d++ = 0;} while(d < ed);
246 }
247 CLRINUSE(vd, inuse);
248 return data;
249 }
250 if(size == 0)
251 { (void)poolfree(vm,data);
252 CLRINUSE(vd, inuse);
253 return NIL(Void_t*);
254 }
255
256 if(!(local = vd->mode&VM_TRUST) )
257 { GETLOCAL(vd, local);
258
259 if(ISLOCK(vd, local) )
260 { CLRINUSE(vd, inuse);
261 return NIL(Void_t*);
262 }
263
264 if(size != vd->pool || KPVADDR(vm,data,pooladdr) != 0)
265 { if(vm->disc->exceptf)
266 (void)(*vm->disc->exceptf)(vm,VM_BADADDR,data,vm->disc);
267 CLRINUSE(vd, inuse);
268 return NIL(Void_t*);
269 }
270
271 if((vd->mode&VM_TRACE) && _Vmtrace)
272 (*_Vmtrace)(vm, (Vmuchar_t*)data, (Vmuchar_t*)data, size, 0);
273 }
274
275 ANNOUNCE(local, vm, VM_RESIZE, data, vm->disc);
276 CLRINUSE(vd, inuse);
277 return data;
278 }
279
280 #if __STD_C
poolsize(Vmalloc_t * vm,Void_t * addr)281 static long poolsize(Vmalloc_t* vm, Void_t* addr)
282 #else
283 static long poolsize(vm, addr)
284 Vmalloc_t* vm;
285 Void_t* addr;
286 #endif
287 {
288 return pooladdr(vm,addr) == 0 ? (long)vm->data->pool : -1L;
289 }
290
291 #if __STD_C
poolcompact(Vmalloc_t * vm)292 static int poolcompact(Vmalloc_t* vm)
293 #else
294 static int poolcompact(vm)
295 Vmalloc_t* vm;
296 #endif
297 {
298 reg Block_t* fp;
299 reg Seg_t *seg, *next;
300 reg size_t s;
301 reg Vmdata_t* vd = vm->data;
302 reg int inuse;
303
304 SETINUSE(vd, inuse);
305 if(!(vd->mode&VM_TRUST))
306 { if(ISLOCK(vd,0))
307 { CLRINUSE(vd, inuse);
308 return -1;
309 }
310 SETLOCK(vd,0);
311 }
312
313 for(seg = vd->seg; seg; seg = next)
314 { next = seg->next;
315
316 if(!(fp = seg->free))
317 continue;
318
319 seg->free = NIL(Block_t*);
320 if(seg->size == (s = SIZE(fp)&~BITS))
321 s = seg->extent;
322 else s += sizeof(Head_t);
323
324 if((*_Vmtruncate)(vm,seg,s,1) == s)
325 seg->free = fp;
326 }
327
328 if((vd->mode&VM_TRACE) && _Vmtrace)
329 (*_Vmtrace)(vm, (Vmuchar_t*)0, (Vmuchar_t*)0, 0, 0);
330
331 CLRLOCK(vd,0);
332 CLRINUSE(vd, inuse);
333 return 0;
334 }
335
336 #if __STD_C
poolalign(Vmalloc_t * vm,size_t size,size_t align)337 static Void_t* poolalign(Vmalloc_t* vm, size_t size, size_t align)
338 #else
339 static Void_t* poolalign(vm, size, align)
340 Vmalloc_t* vm;
341 size_t size;
342 size_t align;
343 #endif
344 {
345 NOTUSED(vm);
346 NOTUSED(size);
347 NOTUSED(align);
348 return NIL(Void_t*);
349 }
350
351 /* Public interface */
352 static Vmethod_t _Vmpool =
353 {
354 poolalloc,
355 poolresize,
356 poolfree,
357 pooladdr,
358 poolsize,
359 poolcompact,
360 poolalign,
361 VM_MTPOOL
362 };
363
364 __DEFINE__(Vmethod_t*,Vmpool,&_Vmpool);
365
366 #ifdef NoF
367 NoF(vmpool)
368 #endif
369
370 #endif
371