1 /***********************************************************************
2 * *
3 * This software is part of the ast package *
4 * Copyright (c) 1985-2010 AT&T Intellectual Property *
5 * and is licensed under the *
6 * Common Public License, Version 1.0 *
7 * by AT&T Intellectual Property *
8 * *
9 * A copy of the License is available at *
10 * http://www.opensource.org/licenses/cpl1.0.txt *
11 * (with md5 checksum 059e8cd6165cb4c31e351f2b69388fd9) *
12 * *
13 * Information and Software Systems Research *
14 * AT&T Research *
15 * Florham Park NJ *
16 * *
17 * Glenn Fowler <gsf@research.att.com> *
18 * David Korn <dgk@research.att.com> *
19 * Phong Vo <kpv@research.att.com> *
20 * *
21 ***********************************************************************/
22 #if defined(_UWIN) && defined(_BLD_ast)
23
_STUB_vmprivate()24 void _STUB_vmprivate(){}
25
26 #else
27
28 #include "vmhdr.h"
29
30 static char* Version = "\n@(#)$Id: Vmalloc (AT&T Research) 2010-01-01 $\0\n";
31
32 /* Private code used in the vmalloc library
33 **
34 ** Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94.
35 */
36
37 /* Get more memory for a region */
38 #if __STD_C
vmextend(reg Vmalloc_t * vm,size_t size,Vmsearch_f searchf)39 static Block_t* vmextend(reg Vmalloc_t* vm, size_t size, Vmsearch_f searchf )
40 #else
41 static Block_t* vmextend(vm, size, searchf )
42 reg Vmalloc_t* vm; /* region to increase in size */
43 size_t size; /* desired amount of space */
44 Vmsearch_f searchf; /* tree search function */
45 #endif
46 {
47 reg size_t s;
48 reg Seg_t* seg;
49 reg Block_t *bp, *t;
50 reg Vmuchar_t* addr = (Vmuchar_t*)Version; /* shut compiler warning */
51 reg Vmdata_t* vd = vm->data;
52 reg Vmemory_f memoryf = vm->disc->memoryf;
53 reg Vmexcept_f exceptf = vm->disc->exceptf;
54
55 GETPAGESIZE(_Vmpagesize);
56
57 #if DEBUG /* trace all allocation calls through the heap */
58 if(!_Vmtrace && vm == Vmheap && (vd->mode&VM_TRUST) )
59 VMOPTIONS();
60 #endif
61
62 if(vd->incr <= 0) /* this is just _Vmheap on the first call */
63 vd->incr = VMHEAPINCR;
64
65 /* Get slightly more for administrative data */
66 s = size + sizeof(Seg_t) + sizeof(Block_t) + sizeof(Head_t) + 2*ALIGN;
67 if(s <= size) /* size was too large and we have wrapped around */
68 return NIL(Block_t*);
69 if((size = ROUND(s,vd->incr)) < s)
70 return NIL(Block_t*);
71
72 /* increase the rounding factor to reduce # of future extensions */
73 if(size > 2*vd->incr && vm->disc->round < vd->incr)
74 vd->incr *= 2;
75
76 /* see if we can extend the current segment */
77 if(!(seg = vd->seg) )
78 addr = NIL(Vmuchar_t*);
79 else
80 { if(!vd->wild || SEG(vd->wild) != seg)
81 s = 0;
82 else
83 { s = SIZE(vd->wild) + sizeof(Head_t);
84 if((s = (s/vd->incr)*vd->incr) == size)
85 size += vd->incr;
86 }
87 addr = (Vmuchar_t*)(*memoryf)(vm,seg->addr,seg->extent,
88 seg->extent+size-s,vm->disc);
89 if(!addr)
90 seg = NIL(Seg_t*);
91 else
92 { /**/ASSERT(addr == (Vmuchar_t*)seg->addr);
93 addr += seg->extent;
94 size -= s;
95 }
96 }
97
98 while(!addr) /* try to get space */
99 { if((addr = (Vmuchar_t*)(*memoryf)(vm,NIL(Void_t*),0,size,vm->disc)) )
100 break;
101
102 /* check with exception handler to see if we should continue */
103 if(!exceptf)
104 return NIL(Block_t*);
105 else
106 { int rv, lock;
107 lock = vd->mode&VM_LOCK;
108 vd->mode &= ~VM_LOCK;
109 rv = (*exceptf)(vm,VM_NOMEM,(Void_t*)size,vm->disc);
110 vd->mode |= lock;
111 if(rv <= 0)
112 { if(rv == 0)
113 vd->mode |= VM_AGAIN;
114 return NIL(Block_t*);
115 }
116 }
117 }
118
119 if(seg)
120 { /* extending current segment */
121 bp = BLOCK(seg->baddr);
122
123 if(vd->mode&(VM_MTBEST|VM_MTDEBUG|VM_MTPROFILE) )
124 { /**/ ASSERT((SIZE(bp)&~BITS) == 0);
125 /**/ ASSERT(SEG(bp) == seg);
126 if(!ISPFREE(SIZE(bp)) )
127 SIZE(bp) = size - sizeof(Head_t);
128 else
129 { /**/ ASSERT(searchf);
130 bp = LAST(bp);
131 if(bp == vd->wild)
132 vd->wild = NIL(Block_t*);
133 else REMOVE(vd,bp,INDEX(SIZE(bp)),t,(*searchf));
134 SIZE(bp) += size;
135 }
136 }
137 else
138 { if(seg->free)
139 { bp = seg->free;
140 seg->free = NIL(Block_t*);
141 SIZE(bp) += size;
142 }
143 else
144 { SEG(bp) = seg;
145 SIZE(bp) = size - sizeof(Head_t);
146 }
147 }
148
149 seg->size += size;
150 seg->extent += size;
151 seg->baddr += size;
152 }
153 else
154 { /* creating a new segment */
155 reg Seg_t *sp, *lastsp;
156
157 if((s = (size_t)(VLONG(addr)%ALIGN)) != 0)
158 addr += ALIGN-s;
159
160 seg = (Seg_t*)addr;
161 seg->vmdt = vd;
162 seg->addr = (Void_t*)(addr - (s ? ALIGN-s : 0));
163 seg->extent = size;
164 seg->baddr = addr + size - (s ? 2*ALIGN : 0);
165 seg->free = NIL(Block_t*);
166 bp = SEGBLOCK(seg);
167 SEG(bp) = seg;
168 SIZE(bp) = seg->baddr - (Vmuchar_t*)bp - 2*sizeof(Head_t);
169
170 /* NOTE: for Vmbest, Vmdebug and Vmprofile the region's segment list
171 is reversely ordered by addresses. This is so that we can easily
172 check for the wild block.
173 */
174 lastsp = NIL(Seg_t*);
175 sp = vd->seg;
176 if(vd->mode&(VM_MTBEST|VM_MTDEBUG|VM_MTPROFILE))
177 for(; sp; lastsp = sp, sp = sp->next)
178 if(seg->addr > sp->addr)
179 break;
180 seg->next = sp;
181 if(lastsp)
182 lastsp->next = seg;
183 else vd->seg = seg;
184
185 seg->size = SIZE(bp);
186 }
187
188 /* make a fake header for possible segmented memory */
189 t = NEXT(bp);
190 SEG(t) = seg;
191 SIZE(t) = BUSY;
192
193 /* see if the wild block is still wild */
194 if((t = vd->wild) && (seg = SEG(t)) != vd->seg)
195 { CLRPFREE(SIZE(NEXT(t)));
196 if(vd->mode&(VM_MTBEST|VM_MTDEBUG|VM_MTPROFILE) )
197 { SIZE(t) |= BUSY|JUNK;
198 LINK(t) = CACHE(vd)[C_INDEX(SIZE(t))];
199 CACHE(vd)[C_INDEX(SIZE(t))] = t;
200 }
201 else seg->free = t;
202
203 vd->wild = NIL(Block_t*);
204 }
205
206 return bp;
207 }
208
209 /* Truncate a segment if possible */
210 #if __STD_C
vmtruncate(Vmalloc_t * vm,Seg_t * seg,size_t size,int exact)211 static ssize_t vmtruncate(Vmalloc_t* vm, Seg_t* seg, size_t size, int exact)
212 #else
213 static ssize_t vmtruncate(vm, seg, size, exact)
214 Vmalloc_t* vm; /* containing region */
215 Seg_t* seg; /* the one to be truncated */
216 size_t size; /* amount of free space */
217 int exact;
218 #endif
219 {
220 reg Void_t* caddr;
221 reg Seg_t* last;
222 reg Vmdata_t* vd = vm->data;
223 reg Vmemory_f memoryf = vm->disc->memoryf;
224
225 caddr = seg->addr;
226
227 if(size < seg->size)
228 { reg ssize_t less;
229
230 if(exact)
231 less = size;
232 else /* keep truncated amount to discipline requirements */
233 { if((less = vm->disc->round) <= 0)
234 less = _Vmpagesize;
235 less = (size/less)*less;
236 less = (less/vd->incr)*vd->incr;
237 if(less > 0 && size > (size_t)less && (size-(size_t)less) < sizeof(Block_t) )
238 less = (size_t)less <= vd->incr ? 0 : (size_t)less - vd->incr;
239 }
240
241 if(less <= 0 ||
242 (*memoryf)(vm,caddr,seg->extent,seg->extent-less,vm->disc) != caddr)
243 return 0;
244
245 seg->extent -= less;
246 seg->size -= less;
247 seg->baddr -= less;
248 SEG(BLOCK(seg->baddr)) = seg;
249 SIZE(BLOCK(seg->baddr)) = BUSY;
250
251 return less;
252 }
253 else
254 { /* unlink segment from region */
255 if(seg == vd->seg)
256 { vd->seg = seg->next;
257 last = NIL(Seg_t*);
258 }
259 else
260 { for(last = vd->seg; last->next != seg; last = last->next)
261 ;
262 last->next = seg->next;
263 }
264
265 /* now delete it */
266 if((*memoryf)(vm,caddr,seg->extent,0,vm->disc) == caddr)
267 return size;
268
269 /* space reduction failed, reinsert segment */
270 if(last)
271 { seg->next = last->next;
272 last->next = seg;
273 }
274 else
275 { seg->next = vd->seg;
276 vd->seg = seg;
277 }
278 return 0;
279 }
280 }
281
282 /* Externally visible names but local to library */
283 Vmextern_t _Vmextern =
284 { vmextend, /* _Vmextend */
285 vmtruncate, /* _Vmtruncate */
286 0, /* _Vmpagesize */
287 NIL(char*(*)_ARG_((char*,const char*,int))), /* _Vmstrcpy */
288 NIL(char*(*)_ARG_((Vmulong_t,int))), /* _Vmitoa */
289 NIL(void(*)_ARG_((Vmalloc_t*,
290 Vmuchar_t*,Vmuchar_t*,size_t,size_t))), /* _Vmtrace */
291 NIL(void(*)_ARG_((Vmalloc_t*))) /* _Vmpfclose */
292 };
293
294 #endif
295