1 /*********************************************************************** 2 * * 3 * This software is part of the ast package * 4 * Copyright (c) 1985-2011 AT&T Intellectual Property * 5 * and is licensed under the * 6 * Eclipse Public License, Version 1.0 * 7 * by AT&T Intellectual Property * 8 * * 9 * A copy of the License is available at * 10 * http://www.eclipse.org/org/documents/epl-v10.html * 11 * (with md5 checksum b35adb5213ca9657e911e9befb180842) * 12 * * 13 * Information and Software Systems Research * 14 * AT&T Research * 15 * Florham Park NJ * 16 * * 17 * Glenn Fowler <gsf@research.att.com> * 18 * David Korn <dgk@research.att.com> * 19 * Phong Vo <kpv@research.att.com> * 20 * * 21 ***********************************************************************/ 22 #if defined(_UWIN) && defined(_BLD_ast) 23 24 void _STUB_vmpool(){} 25 26 #else 27 28 #include "vmhdr.h" 29 30 #define POOLFREE 0x55555555L /* block free indicator */ 31 32 /* Method for pool allocation. 33 ** All elements in a pool have the same size. 34 ** The following fields of Vmdata_t are used as: 35 ** pool: size of a block. 36 ** free: list of free blocks. 37 ** 38 ** Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94. 39 */ 40 41 #if __STD_C 42 static Void_t* poolalloc(Vmalloc_t* vm, reg size_t size, int local) 43 #else 44 static Void_t* poolalloc(vm, size, local ) 45 Vmalloc_t* vm; 46 reg size_t size; 47 int local; 48 #endif 49 { 50 reg Block_t *tp, *next; 51 reg size_t s; 52 reg Seg_t *seg; 53 reg Vmdata_t *vd = vm->data; 54 55 if(size <= 0) 56 return NIL(Void_t*); 57 58 if(size != vd->pool) 59 { if(vd->pool <= 0) 60 vd->pool = size; 61 else return NIL(Void_t*); 62 } 63 64 SETLOCK(vm, local); 65 66 if((tp = vd->free) ) /* there is a ready free block */ 67 { vd->free = SEGLINK(tp); 68 goto done; 69 } 70 71 size = ROUND(size,ALIGN); 72 73 /* look thru all segments for a suitable free block */ 74 for(tp = NIL(Block_t*), seg = vd->seg; seg; seg = seg->next) 75 { if((tp = seg->free) && 76 (s = (SIZE(tp) & ~BITS) + sizeof(Head_t)) >= size ) 77 goto got_blk; 78 } 79 80 if((tp = (*_Vmextend)(vm,ROUND(size,vd->incr),NIL(Vmsearch_f))) ) 81 { s = (SIZE(tp) & ~BITS) + sizeof(Head_t); 82 seg = SEG(tp); 83 goto got_blk; 84 } 85 else goto done; 86 87 got_blk: /* if get here, (tp, s, seg) must be well-defined */ 88 next = (Block_t*)((Vmuchar_t*)tp+size); 89 if((s -= size) <= (size + sizeof(Head_t)) ) 90 { for(; s >= size; s -= size) 91 { SIZE(next) = POOLFREE; 92 SEGLINK(next) = vd->free; 93 vd->free = next; 94 next = (Block_t*)((Vmuchar_t*)next + size); 95 } 96 seg->free = NIL(Block_t*); 97 } 98 else 99 { SIZE(next) = s - sizeof(Head_t); 100 SEG(next) = seg; 101 seg->free = next; 102 } 103 104 done: 105 if(!local && (vd->mode&VM_TRACE) && _Vmtrace && tp) 106 (*_Vmtrace)(vm,NIL(Vmuchar_t*),(Vmuchar_t*)tp,vd->pool,0); 107 108 CLRLOCK(vm, local); 109 110 return (Void_t*)tp; 111 } 112 113 #if __STD_C 114 static long pooladdr(Vmalloc_t* vm, reg Void_t* addr, int local) 115 #else 116 static long pooladdr(vm, addr, local) 117 Vmalloc_t* vm; 118 reg Void_t* addr; 119 int local; 120 #endif 121 { 122 Block_t *bp, *tp; 123 Vmuchar_t *laddr, *baddr; 124 size_t size; 125 Seg_t *seg; 126 long offset; 127 Vmdata_t* vd = vm->data; 128 129 SETLOCK(vm, local); 130 131 offset = -1L; 132 for(seg = vd->seg; seg; seg = seg->next) 133 { laddr = (Vmuchar_t*)SEGBLOCK(seg); 134 baddr = seg->baddr-sizeof(Head_t); 135 if((Vmuchar_t*)addr < laddr || (Vmuchar_t*)addr >= baddr) 136 continue; 137 138 /* the block that has this address */ 139 size = ROUND(vd->pool,ALIGN); 140 tp = (Block_t*)(laddr + (((Vmuchar_t*)addr-laddr)/size)*size ); 141 142 /* see if this block has been freed */ 143 if(SIZE(tp) == POOLFREE) /* may be a coincidence - make sure */ 144 for(bp = vd->free; bp; bp = SEGLINK(bp)) 145 if(bp == tp) 146 goto done; 147 148 offset = (Vmuchar_t*)addr - (Vmuchar_t*)tp; 149 goto done; 150 } 151 152 done : 153 CLRLOCK(vm, local); 154 155 return offset; 156 } 157 158 #if __STD_C 159 static int poolfree(reg Vmalloc_t* vm, reg Void_t* data, int local ) 160 #else 161 static int poolfree(vm, data, local) 162 Vmalloc_t* vm; 163 Void_t* data; 164 int local; 165 #endif 166 { 167 Block_t *bp; 168 Vmdata_t *vd = vm->data; 169 170 if(!data) 171 return 0; 172 if(vd->pool <= 0) 173 return -1; 174 175 SETLOCK(vm, local); 176 177 /**/ASSERT(KPVADDR(vm, data, pooladdr) == 0); 178 bp = (Block_t*)data; 179 SIZE(bp) = POOLFREE; 180 SEGLINK(bp) = vd->free; 181 vd->free = bp; 182 183 if(!local && (vd->mode&VM_TRACE) && _Vmtrace) 184 (*_Vmtrace)(vm, (Vmuchar_t*)data, NIL(Vmuchar_t*), vd->pool, 0); 185 186 CLRLOCK(vm, local); 187 188 return 0; 189 } 190 191 #if __STD_C 192 static Void_t* poolresize(Vmalloc_t* vm, Void_t* data, size_t size, int type, int local ) 193 #else 194 static Void_t* poolresize(vm, data, size, type, local ) 195 Vmalloc_t* vm; 196 Void_t* data; 197 size_t size; 198 int type; 199 int local; 200 #endif 201 { 202 Vmdata_t *vd = vm->data; 203 204 NOTUSED(type); 205 206 if(!data) 207 { data = poolalloc(vm, size, local); 208 if(data && (type&VM_RSZERO) ) 209 memset(data, 0, size); 210 return data; 211 } 212 if(size == 0) 213 { (void)poolfree(vm, data, local); 214 return NIL(Void_t*); 215 } 216 if(size != vd->pool) 217 return NIL(Void_t*); 218 219 SETLOCK(vm, local); 220 221 /**/ASSERT(KPVADDR(vm, data, pooladdr) == 0); 222 223 if(!local && (vd->mode&VM_TRACE) && _Vmtrace) 224 (*_Vmtrace)(vm, (Vmuchar_t*)data, (Vmuchar_t*)data, size, 0); 225 226 CLRLOCK(vm, local); 227 228 return data; 229 } 230 231 #if __STD_C 232 static long poolsize(Vmalloc_t* vm, Void_t* addr, int local) 233 #else 234 static long poolsize(vm, addr, local) 235 Vmalloc_t* vm; 236 Void_t* addr; 237 int local; 238 #endif 239 { 240 return pooladdr(vm, addr, local) == 0 ? (long)vm->data->pool : -1L; 241 } 242 243 #if __STD_C 244 static int poolcompact(Vmalloc_t* vm, int local) 245 #else 246 static int poolcompact(vm, local) 247 Vmalloc_t* vm; 248 int local; 249 #endif 250 { 251 ssize_t s; 252 Block_t *fp; 253 Seg_t *seg, *next; 254 Vmdata_t *vd = vm->data; 255 256 SETLOCK(vm, local); 257 258 for(seg = vd->seg; seg; seg = next) 259 { next = seg->next; 260 261 if(!(fp = seg->free)) 262 continue; 263 264 seg->free = NIL(Block_t*); 265 if(seg->size == (s = SIZE(fp)&~BITS)) 266 s = seg->extent; 267 else s += sizeof(Head_t); 268 269 if((*_Vmtruncate)(vm,seg,s,1) == s) 270 seg->free = fp; 271 } 272 273 if(!local && (vd->mode&VM_TRACE) && _Vmtrace) 274 (*_Vmtrace)(vm, (Vmuchar_t*)0, (Vmuchar_t*)0, 0, 0); 275 276 CLRLOCK(vm, local); 277 278 return 0; 279 } 280 281 #if __STD_C 282 static Void_t* poolalign(Vmalloc_t* vm, size_t size, size_t align, int local) 283 #else 284 static Void_t* poolalign(vm, size, align, local) 285 Vmalloc_t* vm; 286 size_t size; 287 size_t align; 288 int local; 289 #endif 290 { 291 NOTUSED(vm); 292 NOTUSED(size); 293 NOTUSED(align); 294 return NIL(Void_t*); 295 } 296 297 /* Public interface */ 298 static Vmethod_t _Vmpool = 299 { 300 poolalloc, 301 poolresize, 302 poolfree, 303 pooladdr, 304 poolsize, 305 poolcompact, 306 poolalign, 307 VM_MTPOOL 308 }; 309 310 __DEFINE__(Vmethod_t*,Vmpool,&_Vmpool); 311 312 #ifdef NoF 313 NoF(vmpool) 314 #endif 315 316 #endif 317