1 /*********************************************************************** 2 * * 3 * This software is part of the ast package * 4 * Copyright (c) 1985-2007 AT&T Knowledge Ventures * 5 * and is licensed under the * 6 * Common Public License, Version 1.0 * 7 * by AT&T Knowledge Ventures * 8 * * 9 * A copy of the License is available at * 10 * http://www.opensource.org/licenses/cpl1.0.txt * 11 * (with md5 checksum 059e8cd6165cb4c31e351f2b69388fd9) * 12 * * 13 * Information and Software Systems Research * 14 * AT&T Research * 15 * Florham Park NJ * 16 * * 17 * Glenn Fowler <gsf@research.att.com> * 18 * David Korn <dgk@research.att.com> * 19 * Phong Vo <kpv@research.att.com> * 20 * * 21 ***********************************************************************/ 22 #if defined(_UWIN) && defined(_BLD_ast) 23 24 void _STUB_vmprivate(){} 25 26 #else 27 28 #include "vmhdr.h" 29 30 static char* Version = "\n@(#)$Id: Vmalloc (AT&T Research) 2005-09-28 $\0\n"; 31 32 #if _sys_stat 33 #include <sys/stat.h> 34 #endif 35 #include <fcntl.h> 36 37 #ifdef S_IRUSR 38 #define CREAT_MODE (S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH) 39 #else 40 #define CREAT_MODE 0644 41 #endif 42 43 /* Private code used in the vmalloc library 44 ** 45 ** Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94. 46 */ 47 48 /* Get more memory for a region */ 49 #if __STD_C 50 static Block_t* vmextend(reg Vmalloc_t* vm, size_t size, Vmsearch_f searchf ) 51 #else 52 static Block_t* vmextend(vm, size, searchf ) 53 reg Vmalloc_t* vm; /* region to increase in size */ 54 size_t size; /* desired amount of space */ 55 Vmsearch_f searchf; /* tree search function */ 56 #endif 57 { 58 reg size_t s; 59 reg Seg_t* seg; 60 reg Block_t *bp, *t; 61 reg Vmuchar_t* addr = (Vmuchar_t*)Version; /* shut compiler warning */ 62 reg Vmdata_t* vd = vm->data; 63 reg Vmemory_f memoryf = vm->disc->memoryf; 64 reg Vmexcept_f exceptf = vm->disc->exceptf; 65 66 GETPAGESIZE(_Vmpagesize); 67 68 #if DEBUG /* trace all allocation calls through the heap */ 69 if(!_Vmtrace && vm == Vmheap && (vd->mode&VM_TRUST) ) 70 { char *env; 71 int fd; 72 vd->mode = (vd->mode&~VM_TRUST)|VM_TRACE; 73 if((fd = vmtrace(-1)) >= 0 || 74 ((env = getenv("VMTRACE")) && (fd = creat(env, CREAT_MODE)) >= 0 ) ) 75 vmtrace(fd); 76 } 77 #endif 78 79 if(vd->incr <= 0) /* this is just _Vmheap on the first call */ 80 vd->incr = _Vmpagesize; 81 82 /* Get slightly more for administrative data */ 83 s = size + sizeof(Seg_t) + sizeof(Block_t) + sizeof(Head_t) + 2*ALIGN; 84 if(s <= size) /* size was too large and we have wrapped around */ 85 return NIL(Block_t*); 86 if((size = ROUND(s,vd->incr)) < s) 87 return NIL(Block_t*); 88 89 /* increase the rounding factor to reduce # of future extensions */ 90 if(size > 2*vd->incr && vm->disc->round < vd->incr) 91 vd->incr *= 2; 92 93 /* see if we can extend the current segment */ 94 if(!(seg = vd->seg) ) 95 addr = NIL(Vmuchar_t*); 96 else 97 { if(!vd->wild || SEG(vd->wild) != seg) 98 s = 0; 99 else 100 { s = SIZE(vd->wild) + sizeof(Head_t); 101 if((s = (s/vd->incr)*vd->incr) == size) 102 size += vd->incr; 103 } 104 addr = (Vmuchar_t*)(*memoryf)(vm,seg->addr,seg->extent, 105 seg->extent+size-s,vm->disc); 106 if(!addr) 107 seg = NIL(Seg_t*); 108 else 109 { /**/ASSERT(addr == (Vmuchar_t*)seg->addr); 110 addr += seg->extent; 111 size -= s; 112 } 113 } 114 115 while(!addr) /* try to get space */ 116 { if((addr = (Vmuchar_t*)(*memoryf)(vm,NIL(Void_t*),0,size,vm->disc)) ) 117 break; 118 119 /* check with exception handler to see if we should continue */ 120 if(!exceptf) 121 return NIL(Block_t*); 122 else 123 { int rv, lock; 124 lock = vd->mode&VM_LOCK; 125 vd->mode &= ~VM_LOCK; 126 rv = (*exceptf)(vm,VM_NOMEM,(Void_t*)size,vm->disc); 127 vd->mode |= lock; 128 if(rv <= 0) 129 { if(rv == 0) 130 vd->mode |= VM_AGAIN; 131 return NIL(Block_t*); 132 } 133 } 134 } 135 136 if(seg) 137 { /* extending current segment */ 138 bp = BLOCK(seg->baddr); /**/ ASSERT((SIZE(bp)&~BITS) == 0); 139 /**/ ASSERT(SEG(bp) == seg); 140 141 if(vd->mode&(VM_MTBEST|VM_MTDEBUG|VM_MTPROFILE) ) 142 { if(!ISPFREE(SIZE(bp)) ) 143 SIZE(bp) = size - sizeof(Head_t); 144 else 145 { /**/ ASSERT(searchf); 146 bp = LAST(bp); 147 if(bp == vd->wild) 148 vd->wild = NIL(Block_t*); 149 else REMOVE(vd,bp,INDEX(SIZE(bp)),t,(*searchf)); 150 SIZE(bp) += size; 151 } 152 } 153 else 154 { if(seg->free) 155 { bp = seg->free; 156 seg->free = NIL(Block_t*); 157 SIZE(bp) += size; 158 } 159 else SIZE(bp) = size - sizeof(Head_t); 160 } 161 162 seg->size += size; 163 seg->extent += size; 164 seg->baddr += size; 165 } 166 else 167 { /* creating a new segment */ 168 reg Seg_t *sp, *lastsp; 169 170 if((s = (size_t)(VLONG(addr)%ALIGN)) != 0) 171 addr += ALIGN-s; 172 173 seg = (Seg_t*)addr; 174 seg->vm = vm; 175 seg->addr = (Void_t*)(addr - (s ? ALIGN-s : 0)); 176 seg->extent = size; 177 seg->baddr = addr + size - (s ? 2*ALIGN : 0); 178 seg->free = NIL(Block_t*); 179 bp = SEGBLOCK(seg); 180 SEG(bp) = seg; 181 SIZE(bp) = seg->baddr - (Vmuchar_t*)bp - 2*sizeof(Head_t); 182 183 /* NOTE: for Vmbest, Vmdebug and Vmprofile the region's segment list 184 is reversely ordered by addresses. This is so that we can easily 185 check for the wild block. 186 */ 187 lastsp = NIL(Seg_t*); 188 sp = vd->seg; 189 if(vd->mode&(VM_MTBEST|VM_MTDEBUG|VM_MTPROFILE)) 190 for(; sp; lastsp = sp, sp = sp->next) 191 if(seg->addr > sp->addr) 192 break; 193 seg->next = sp; 194 if(lastsp) 195 lastsp->next = seg; 196 else vd->seg = seg; 197 198 seg->size = SIZE(bp); 199 } 200 201 /* make a fake header for possible segmented memory */ 202 t = NEXT(bp); 203 SEG(t) = seg; 204 SIZE(t) = BUSY; 205 206 /* see if the wild block is still wild */ 207 if((t = vd->wild) && (seg = SEG(t)) != vd->seg) 208 { CLRPFREE(SIZE(NEXT(t))); 209 if(vd->mode&(VM_MTBEST|VM_MTDEBUG|VM_MTPROFILE) ) 210 { SIZE(t) |= BUSY|JUNK; 211 LINK(t) = CACHE(vd)[C_INDEX(SIZE(t))]; 212 CACHE(vd)[C_INDEX(SIZE(t))] = t; 213 } 214 else seg->free = t; 215 216 vd->wild = NIL(Block_t*); 217 } 218 219 return bp; 220 } 221 222 /* Truncate a segment if possible */ 223 #if __STD_C 224 static ssize_t vmtruncate(Vmalloc_t* vm, Seg_t* seg, size_t size, int exact) 225 #else 226 static ssize_t vmtruncate(vm, seg, size, exact) 227 Vmalloc_t* vm; /* containing region */ 228 Seg_t* seg; /* the one to be truncated */ 229 size_t size; /* amount of free space */ 230 int exact; 231 #endif 232 { 233 reg Void_t* caddr; 234 reg Seg_t* last; 235 reg Vmdata_t* vd = vm->data; 236 reg Vmemory_f memoryf = vm->disc->memoryf; 237 238 caddr = seg->addr; 239 240 if(size < seg->size) 241 { reg ssize_t less; 242 243 if(exact) 244 less = size; 245 else /* keep truncated amount to discipline requirements */ 246 { if((less = vm->disc->round) <= 0) 247 less = _Vmpagesize; 248 less = (size/less)*less; 249 less = (less/vd->incr)*vd->incr; 250 if(less > 0 && size > less && (size-less) < sizeof(Block_t) ) 251 less = less <= vd->incr ? 0 : less - vd->incr; 252 } 253 254 if(less <= 0 || 255 (*memoryf)(vm,caddr,seg->extent,seg->extent-less,vm->disc) != caddr) 256 return 0; 257 258 seg->extent -= less; 259 seg->size -= less; 260 seg->baddr -= less; 261 SEG(BLOCK(seg->baddr)) = seg; 262 SIZE(BLOCK(seg->baddr)) = BUSY; 263 264 return less; 265 } 266 else 267 { /* unlink segment from region */ 268 if(seg == vd->seg) 269 { vd->seg = seg->next; 270 last = NIL(Seg_t*); 271 } 272 else 273 { for(last = vd->seg; last->next != seg; last = last->next) 274 ; 275 last->next = seg->next; 276 } 277 278 /* now delete it */ 279 if((*memoryf)(vm,caddr,seg->extent,0,vm->disc) == caddr) 280 return size; 281 282 /* space reduction failed, reinsert segment */ 283 if(last) 284 { seg->next = last->next; 285 last->next = seg; 286 } 287 else 288 { seg->next = vd->seg; 289 vd->seg = seg; 290 } 291 return 0; 292 } 293 } 294 295 /* Externally visible names but local to library */ 296 Vmextern_t _Vmextern = 297 { vmextend, /* _Vmextend */ 298 vmtruncate, /* _Vmtruncate */ 299 0, /* _Vmpagesize */ 300 NIL(char*(*)_ARG_((char*,const char*,int))), /* _Vmstrcpy */ 301 NIL(char*(*)_ARG_((Vmulong_t,int))), /* _Vmitoa */ 302 NIL(void(*)_ARG_((Vmalloc_t*, 303 Vmuchar_t*,Vmuchar_t*,size_t,size_t))), /* _Vmtrace */ 304 NIL(void(*)_ARG_((Vmalloc_t*))) /* _Vmpfclose */ 305 }; 306 307 #endif 308