1 /*********************************************************************** 2 * * 3 * This software is part of the ast package * 4 * Copyright (c) 1985-2007 AT&T Knowledge Ventures * 5 * and is licensed under the * 6 * Common Public License, Version 1.0 * 7 * by AT&T Knowledge Ventures * 8 * * 9 * A copy of the License is available at * 10 * http://www.opensource.org/licenses/cpl1.0.txt * 11 * (with md5 checksum 059e8cd6165cb4c31e351f2b69388fd9) * 12 * * 13 * Information and Software Systems Research * 14 * AT&T Research * 15 * Florham Park NJ * 16 * * 17 * Glenn Fowler <gsf@research.att.com> * 18 * David Korn <dgk@research.att.com> * 19 * Phong Vo <kpv@research.att.com> * 20 * * 21 ***********************************************************************/ 22 #ifndef _VMHDR_H 23 #define _VMHDR_H 1 24 #ifndef _BLD_vmalloc 25 #define _BLD_vmalloc 1 26 #endif 27 28 /* Common types, and macros for vmalloc functions. 29 ** 30 ** Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94. 31 */ 32 33 #ifndef __STD_C /* this is normally in vmalloc.h but it's included late here */ 34 #ifdef __STDC__ 35 #define __STD_C 1 36 #else 37 #if __cplusplus || c_plusplus 38 #define __STD_C 1 39 #else 40 #define __STD_C 0 41 #endif /*__cplusplus*/ 42 #endif /*__STDC__*/ 43 #endif /*__STD_C*/ 44 45 #if _PACKAGE_ast 46 47 #if !_UWIN 48 #define getpagesize ______getpagesize 49 #define _npt_getpagesize 1 50 #define brk ______brk 51 #define sbrk ______sbrk 52 #define _npt_sbrk 1 53 #endif 54 55 #include <ast.h> 56 57 #if _npt_getpagesize 58 #undef getpagesize 59 #endif 60 #if _npt_sbrk 61 #undef brk 62 #undef sbrk 63 #endif 64 65 #else 66 67 #include <ast_common.h> 68 69 #if !_UWIN 70 #define _npt_getpagesize 1 71 #define _npt_sbrk 1 72 #endif 73 74 #endif /*_PACKAGE_ast*/ 75 76 #include "FEATURE/vmalloc" 77 78 #include <setjmp.h> 79 80 /* the below macros decide which combinations of sbrk() or mmap() to used */ 81 #if defined(_WIN32) 82 #define _mem_win32 1 83 #undef _mem_sbrk 84 #undef _mem_mmap_anon 85 #undef _mem_mmap_zero 86 #endif 87 88 #if _mem_mmap_anon 89 #undef _mem_mmap_zero 90 #endif 91 92 #if !_mem_win32 && !_mem_sbrk && !_mem_mmap_anon && !_mem_mmap_zero 93 #undef _std_malloc 94 #define _std_malloc 1 /* do not define malloc/free/realloc */ 95 #endif 96 97 typedef unsigned char Vmuchar_t; 98 typedef unsigned long Vmulong_t; 99 100 typedef union _head_u Head_t; 101 typedef union _body_u Body_t; 102 typedef struct _block_s Block_t; 103 typedef struct _seg_s Seg_t; 104 typedef struct _pfobj_s Pfobj_t; 105 106 #if !_typ_ssize_t 107 typedef int ssize_t; 108 #endif 109 110 #define NIL(t) ((t)0) 111 #define reg register 112 #if __STD_C 113 #define NOTUSED(x) (void)(x) 114 #else 115 #define NOTUSED(x) (&x,1) 116 #endif 117 118 /* convert an address to an integral value */ 119 #define VLONG(addr) ((Vmulong_t)((char*)(addr) - (char*)0) ) 120 121 /* Round x up to a multiple of y. ROUND2 does powers-of-2 and ROUNDX does others */ 122 #define ROUND2(x,y) (((x) + ((y)-1)) & ~((y)-1)) 123 #define ROUNDX(x,y) ((((x) + ((y)-1)) / (y)) * (y)) 124 #define ROUND(x,y) (((y)&((y)-1)) ? ROUNDX((x),(y)) : ROUND2((x),(y)) ) 125 126 /* compute a value that is a common multiple of x and y */ 127 #define MULTIPLE(x,y) ((x)%(y) == 0 ? (x) : (y)%(x) == 0 ? (y) : (y)*(x)) 128 129 #define VM_check 0x0001 /* enable detailed checks */ 130 #define VM_abort 0x0002 /* abort() on assertion failure */ 131 #define VM_primary 0x0004 /* enable primary native allocation */ 132 #define VM_region 0x0008 /* enable region segment checks */ 133 #define VM_secondary 0x0010 /* enable secondary native allocation */ 134 #define VM_init 0x8000 /* VMCHECK env var checked */ 135 136 #if _UWIN 137 #include <ast_windows.h> 138 #endif 139 140 #ifndef DEBUG 141 #ifdef _BLD_DEBUG 142 #define DEBUG 1 143 #endif /*_BLD_DEBUG*/ 144 #endif /*DEBUG*/ 145 #if DEBUG 146 extern void _vmmessage _ARG_((const char*, long, const char*, long)); 147 #define ABORT() (_Vmassert & VM_abort) 148 #define CHECK() (_Vmassert & VM_check) 149 #define ASSERT(p) ((p) ? 0 : (MESSAGE("Assertion failed"), ABORT() ? (abort(),0) : 0)) 150 #define COUNT(n) ((n) += 1) 151 #define MESSAGE(s) _vmmessage(__FILE__,__LINE__,s,0) 152 #else 153 #define ABORT() (0) 154 #define ASSERT(p) 155 #define CHECK() (0) 156 #define COUNT(n) 157 #define MESSAGE(s) (0) 158 #endif /*DEBUG*/ 159 160 #define VMPAGESIZE 8192 161 #if _AST_PAGESIZE > VMPAGESIZE 162 #undef VMPAGESIZE 163 #define VMPAGESIZE _AST_PAGESIZE 164 #endif 165 #if _lib_getpagesize 166 #define GETPAGESIZE(x) ((x) ? (x) : \ 167 (((x)=getpagesize()) < VMPAGESIZE ? ((x)=VMPAGESIZE) : (x)) ) 168 #else 169 #define GETPAGESIZE(x) ((x) = VMPAGESIZE) 170 #endif 171 172 /* Blocks are allocated such that their sizes are 0%(BITS+1) 173 ** This frees up enough low order bits to store state information 174 */ 175 #define BUSY (01) /* block is busy */ 176 #define PFREE (02) /* preceding block is free */ 177 #define JUNK (04) /* marked as freed but not yet processed */ 178 #define BITS (07) /* (BUSY|PFREE|JUNK) */ 179 #define ALIGNB (8) /* size must be a multiple of BITS+1 */ 180 181 #define ISBITS(w) ((w) & BITS) 182 #define CLRBITS(w) ((w) &= ~BITS) 183 #define CPYBITS(w,f) ((w) |= ((f)&BITS) ) 184 185 #define ISBUSY(w) ((w) & BUSY) 186 #define SETBUSY(w) ((w) |= BUSY) 187 #define CLRBUSY(w) ((w) &= ~BUSY) 188 189 #define ISPFREE(w) ((w) & PFREE) 190 #define SETPFREE(w) ((w) |= PFREE) 191 #define CLRPFREE(w) ((w) &= ~PFREE) 192 193 #define ISJUNK(w) ((w) & JUNK) 194 #define SETJUNK(w) ((w) |= JUNK) 195 #define CLRJUNK(w) ((w) &= ~JUNK) 196 197 #define OFFSET(t,e) ((size_t)(&(((t*)0)->e)) ) 198 199 /* these bits share the "mode" field with the public bits */ 200 #define VM_AGAIN 0010000 /* research the arena for space */ 201 #define VM_LOCK 0020000 /* region is locked */ 202 #define VM_LOCAL 0040000 /* local call, bypass lock */ 203 #define VM_UNUSED 0104060 204 #define VMETHOD(vd) ((vd)->mode&VM_METHODS) 205 206 /* test/set/clear lock state */ 207 #define SETLOCAL(vd) ((vd)->mode |= VM_LOCAL) 208 #define GETLOCAL(vd,l) (((l) = (vd)->mode&VM_LOCAL), ((vd)->mode &= ~VM_LOCAL) ) 209 #define ISLOCK(vd,l) ((l) ? 0 : ((vd)->mode & VM_LOCK) ) 210 #define SETLOCK(vd,l) ((l) ? 0 : ((vd)->mode |= VM_LOCK) ) 211 #define CLRLOCK(vd,l) ((l) ? 0 : ((vd)->mode &= ~VM_LOCK) ) 212 213 /* announcing entry/exit of allocation calls */ 214 #define ANNOUNCE(lc, vm,ev,dt,dc) \ 215 (( ((lc)&VM_LOCAL) || !(dc) || !(dc)->exceptf ) ? 0 : \ 216 (*(dc)->exceptf)((vm), (ev), (Void_t*)(dt), (dc)) ) 217 218 219 /* local calls */ 220 #define KPVALLOC(vm,sz,func) (SETLOCAL((vm)->data), func((vm),(sz)) ) 221 #define KPVALIGN(vm,sz,al,func) (SETLOCAL((vm)->data), func((vm),(sz),(al)) ) 222 #define KPVFREE(vm,d,func) (SETLOCAL((vm)->data), func((vm),(d)) ) 223 #define KPVRESIZE(vm,d,sz,mv,func) (SETLOCAL((vm)->data), func((vm),(d),(sz),(mv)) ) 224 #define KPVADDR(vm,addr,func) (SETLOCAL((vm)->data), func((vm),(addr)) ) 225 #define KPVCOMPACT(vm,func) (SETLOCAL((vm)->data), func((vm)) ) 226 227 /* ALIGN is chosen so that a block can store all primitive types. 228 ** It should also be a multiple of ALIGNB==(BITS+1) so the size field 229 ** of Block_t will always be 0%(BITS+1) as noted above. 230 ** Of paramount importance is the ALIGNA macro below. If the local compile 231 ** environment is strange enough that the below method does not calculate 232 ** ALIGNA right, then the code below should be commented out and ALIGNA 233 ** redefined to the appropriate requirement. 234 */ 235 union _align_u 236 { char c, *cp; 237 int i, *ip; 238 long l, *lp; 239 double d, *dp, ***dppp[8]; 240 size_t s, *sp; 241 void(* fn)(); 242 union _align_u* align; 243 Head_t* head; 244 Body_t* body; 245 Block_t* block; 246 Vmuchar_t a[ALIGNB]; 247 _ast_fltmax_t ld, *ldp; 248 jmp_buf jmp; 249 }; 250 struct _a_s 251 { char c; 252 union _align_u a; 253 }; 254 #define ALIGNA (sizeof(struct _a_s) - sizeof(union _align_u)) 255 struct _align_s 256 { char data[MULTIPLE(ALIGNA,ALIGNB)]; 257 }; 258 #undef ALIGN /* bsd sys/param.h defines this */ 259 #define ALIGN sizeof(struct _align_s) 260 261 /* make sure that the head of a block is a multiple of ALIGN */ 262 struct _head_s 263 { union 264 { Seg_t* seg; /* the containing segment */ 265 Block_t* link; /* possible link list usage */ 266 Pfobj_t* pf; /* profile structure pointer */ 267 char* file; /* for file name in Vmdebug */ 268 } seg; 269 union 270 { size_t size; /* size of data area in bytes */ 271 Block_t* link; /* possible link list usage */ 272 int line; /* for line number in Vmdebug */ 273 } size; 274 }; 275 #define HEADSIZE ROUND(sizeof(struct _head_s),ALIGN) 276 union _head_u 277 { Vmuchar_t data[HEADSIZE]; /* to standardize size */ 278 struct _head_s head; 279 }; 280 281 /* now make sure that the body of a block is a multiple of ALIGN */ 282 struct _body_s 283 { Block_t* link; /* next in link list */ 284 Block_t* left; /* left child in free tree */ 285 Block_t* right; /* right child in free tree */ 286 Block_t** self; /* self pointer when free */ 287 }; 288 #define BODYSIZE ROUND(sizeof(struct _body_s),ALIGN) 289 union _body_u 290 { Vmuchar_t data[BODYSIZE]; /* to standardize size */ 291 struct _body_s body; 292 }; 293 294 /* After all the songs and dances, we should now have: 295 ** sizeof(Head_t)%ALIGN == 0 296 ** sizeof(Body_t)%ALIGN == 0 297 ** and sizeof(Block_t) = sizeof(Head_t)+sizeof(Body_t) 298 */ 299 struct _block_s 300 { Head_t head; 301 Body_t body; 302 }; 303 304 /* requirements for smallest block type */ 305 struct _tiny_s 306 { Block_t* link; 307 Block_t* self; 308 }; 309 #define TINYSIZE ROUND(sizeof(struct _tiny_s),ALIGN) 310 #define S_TINY 1 /* # of tiny blocks */ 311 #define MAXTINY (S_TINY*ALIGN + TINYSIZE) 312 #define TLEFT(b) ((b)->head.head.seg.link) /* instead of LEFT */ 313 #define TINIEST(b) (SIZE(b) == TINYSIZE) /* this type uses TLEFT */ 314 315 #define DIV(x,y) ((y) == 8 ? ((x)>>3) : (x)/(y) ) 316 #define INDEX(s) DIV((s)-TINYSIZE,ALIGN) 317 318 /* small block types kept in separate caches for quick allocation */ 319 #define S_CACHE 6 /* # of types of small blocks to be cached */ 320 #define N_CACHE 32 /* on allocation, create this many at a time */ 321 #define MAXCACHE (S_CACHE*ALIGN + TINYSIZE) 322 #define C_INDEX(s) (s < MAXCACHE ? INDEX(s) : S_CACHE) 323 324 #define TINY(vd) ((vd)->tiny) 325 #define CACHE(vd) ((vd)->cache) 326 327 typedef struct _vmdata_s 328 { int mode; /* current mode for region */ 329 size_t incr; /* allocate in multiple of this */ 330 size_t pool; /* size of an elt in a Vmpool region */ 331 Seg_t* seg; /* list of segments */ 332 Block_t* free; /* most recent free block */ 333 Block_t* wild; /* wilderness block */ 334 Block_t* root; /* root of free tree */ 335 Block_t* tiny[S_TINY]; /* small blocks */ 336 Block_t* cache[S_CACHE+1]; /* delayed free blocks */ 337 } Vmdata_t; 338 339 /* private parts of Vmalloc_t */ 340 #define _VM_PRIVATE_ \ 341 Vmdisc_t* disc; /* discipline to get space */ \ 342 Vmdata_t* data; /* the real region data */ \ 343 Vmalloc_t* next; /* linked list of regions */ 344 345 #include "vmalloc.h" 346 347 #if !_PACKAGE_ast 348 /* we don't use these here and they interfere with some local names */ 349 #undef malloc 350 #undef free 351 #undef realloc 352 #endif 353 354 /* segment structure */ 355 struct _seg_s 356 { Vmalloc_t* vm; /* the region that holds this */ 357 Seg_t* next; /* next segment */ 358 Void_t* addr; /* starting segment address */ 359 size_t extent; /* extent of segment */ 360 Vmuchar_t* baddr; /* bottom of usable memory */ 361 size_t size; /* allocable size */ 362 Block_t* free; /* recent free blocks */ 363 Block_t* last; /* Vmlast last-allocated block */ 364 }; 365 366 /* starting block of a segment */ 367 #define SEGBLOCK(s) ((Block_t*)(((Vmuchar_t*)(s)) + ROUND(sizeof(Seg_t),ALIGN))) 368 369 /* short-hands for block data */ 370 #define SEG(b) ((b)->head.head.seg.seg) 371 #define SEGLINK(b) ((b)->head.head.seg.link) 372 #define SIZE(b) ((b)->head.head.size.size) 373 #define SIZELINK(b) ((b)->head.head.size.link) 374 #define LINK(b) ((b)->body.body.link) 375 #define LEFT(b) ((b)->body.body.left) 376 #define RIGHT(b) ((b)->body.body.right) 377 #define VM(b) (SEG(b)->vm) 378 379 #define DATA(b) ((Void_t*)((b)->body.data) ) 380 #define BLOCK(d) ((Block_t*)((char*)(d) - sizeof(Head_t)) ) 381 #define SELF(b) ((Block_t**)((b)->body.data + SIZE(b) - sizeof(Block_t*)) ) 382 #define LAST(b) (*((Block_t**)(((char*)(b)) - sizeof(Block_t*)) ) ) 383 #define NEXT(b) ((Block_t*)((b)->body.data + SIZE(b)) ) 384 385 /* functions to manipulate link lists of elts of the same size */ 386 #define SETLINK(b) (RIGHT(b) = (b) ) 387 #define ISLINK(b) (RIGHT(b) == (b) ) 388 #define UNLINK(vd,b,i,t) \ 389 ((((t) = LINK(b)) ? (LEFT(t) = LEFT(b)) : NIL(Block_t*) ), \ 390 (((t) = LEFT(b)) ? (LINK(t) = LINK(b)) : (TINY(vd)[i] = LINK(b)) ) ) 391 392 /* delete a block from a link list or the free tree. 393 ** The test in the below macro is worth scratching your head a bit. 394 ** Even though tiny blocks (size < BODYSIZE) are kept in separate lists, 395 ** only the TINIEST ones require TLEFT(b) for the back link. Since this 396 ** destroys the SEG(b) pointer, it must be carefully restored in bestsearch(). 397 ** Other tiny blocks have enough space to use the usual LEFT(b). 398 ** In this case, I have also carefully arranged so that RIGHT(b) and 399 ** SELF(b) can be overlapped and the test ISLINK() will go through. 400 */ 401 #define REMOVE(vd,b,i,t,func) \ 402 ((!TINIEST(b) && ISLINK(b)) ? UNLINK((vd),(b),(i),(t)) : \ 403 func((vd),SIZE(b),(b)) ) 404 405 /* see if a block is the wilderness block */ 406 #define SEGWILD(b) (((b)->body.data+SIZE(b)+sizeof(Head_t)) >= SEG(b)->baddr) 407 #define VMWILD(vd,b) (((b)->body.data+SIZE(b)+sizeof(Head_t)) >= vd->seg->baddr) 408 409 #define VMFLF(vm,fi,ln,fn) ((fi) = (vm)->file, (vm)->file = NIL(char*), \ 410 (ln) = (vm)->line, (vm)->line = 0 , \ 411 (fn) = (vm)->func, (vm)->func = NIL(Void_t*) ) 412 413 /* The lay-out of a Vmprofile block is this: 414 ** seg_ size ----data---- _pf_ size 415 ** _________ ____________ _________ 416 ** seg_, size: header required by Vmbest. 417 ** data: actual data block. 418 ** _pf_: pointer to the corresponding Pfobj_t struct 419 ** size: the true size of the block. 420 ** So each block requires an extra Head_t. 421 */ 422 #define PF_EXTRA sizeof(Head_t) 423 #define PFDATA(d) ((Head_t*)((Vmuchar_t*)(d)+(SIZE(BLOCK(d))&~BITS)-sizeof(Head_t)) ) 424 #define PFOBJ(d) (PFDATA(d)->head.seg.pf) 425 #define PFSIZE(d) (PFDATA(d)->head.size.size) 426 427 /* The lay-out of a block allocated by Vmdebug is this: 428 ** seg_ size file size seg_ magi ----data---- --magi-- magi line 429 ** --------- --------- --------- ------------ -------- --------- 430 ** seg_,size: header required by Vmbest management. 431 ** file: the file where it was created. 432 ** size: the true byte count of the block 433 ** seg_: should be the same as the previous seg_. 434 ** This allows the function vmregion() to work. 435 ** magi: magic bytes to detect overwrites. 436 ** data: the actual data block. 437 ** magi: more magic bytes. 438 ** line: the line number in the file where it was created. 439 ** So for each allocated block, we'll need 3 extra Head_t. 440 */ 441 442 /* convenient macros for accessing the above fields */ 443 #define DB_HEAD (2*sizeof(Head_t)) 444 #define DB_TAIL (2*sizeof(Head_t)) 445 #define DB_EXTRA (DB_HEAD+DB_TAIL) 446 #define DBBLOCK(d) ((Block_t*)((Vmuchar_t*)(d) - 3*sizeof(Head_t)) ) 447 #define DBBSIZE(d) (SIZE(DBBLOCK(d)) & ~BITS) 448 #define DBSEG(d) (((Head_t*)((Vmuchar_t*)(d) - sizeof(Head_t)))->head.seg.seg ) 449 #define DBSIZE(d) (((Head_t*)((Vmuchar_t*)(d) - 2*sizeof(Head_t)))->head.size.size ) 450 #define DBFILE(d) (((Head_t*)((Vmuchar_t*)(d) - 2*sizeof(Head_t)))->head.seg.file ) 451 #define DBLN(d) (((Head_t*)((Vmuchar_t*)DBBLOCK(d)+DBBSIZE(d)))->head.size.line ) 452 #define DBLINE(d) (DBLN(d) < 0 ? -DBLN(d) : DBLN(d)) 453 454 /* forward/backward translation for addresses between Vmbest and Vmdebug */ 455 #define DB2BEST(d) ((Vmuchar_t*)(d) - 2*sizeof(Head_t)) 456 #define DB2DEBUG(b) ((Vmuchar_t*)(b) + 2*sizeof(Head_t)) 457 458 /* set file and line number, note that DBLN > 0 so that DBISBAD will work */ 459 #define DBSETFL(d,f,l) (DBFILE(d) = (f), DBLN(d) = (f) ? (l) : 1) 460 461 /* set and test the state of known to be corrupted */ 462 #define DBSETBAD(d) (DBLN(d) > 0 ? (DBLN(d) = -DBLN(d)) : -1) 463 #define DBISBAD(d) (DBLN(d) <= 0) 464 465 #define DB_MAGIC 0255 /* 10101101 */ 466 467 /* compute the bounds of the magic areas */ 468 #define DBHEAD(d,begp,endp) \ 469 (((begp) = (Vmuchar_t*)(&DBSEG(d)) + sizeof(Seg_t*)), ((endp) = (d)) ) 470 #define DBTAIL(d,begp,endp) \ 471 (((begp) = (Vmuchar_t*)(d)+DBSIZE(d)), ((endp) = (Vmuchar_t*)(&DBLN(d))) ) 472 473 /* external symbols for internal use by vmalloc */ 474 typedef Block_t* (*Vmsearch_f)_ARG_((Vmdata_t*, size_t, Block_t*)); 475 typedef struct _vmextern_ 476 { Block_t* (*vm_extend)_ARG_((Vmalloc_t*, size_t, Vmsearch_f )); 477 ssize_t (*vm_truncate)_ARG_((Vmalloc_t*, Seg_t*, size_t, int)); 478 size_t vm_pagesize; 479 char* (*vm_strcpy)_ARG_((char*, const char*, int)); 480 char* (*vm_itoa)_ARG_((Vmulong_t, int)); 481 void (*vm_trace)_ARG_((Vmalloc_t*, 482 Vmuchar_t*, Vmuchar_t*, size_t, size_t)); 483 void (*vm_pfclose)_ARG_((Vmalloc_t*)); 484 int vm_assert; 485 } Vmextern_t; 486 487 #define _Vmextend (_Vmextern.vm_extend) 488 #define _Vmtruncate (_Vmextern.vm_truncate) 489 #define _Vmpagesize (_Vmextern.vm_pagesize) 490 #define _Vmstrcpy (_Vmextern.vm_strcpy) 491 #define _Vmitoa (_Vmextern.vm_itoa) 492 #define _Vmtrace (_Vmextern.vm_trace) 493 #define _Vmpfclose (_Vmextern.vm_pfclose) 494 #define _Vmassert (_Vmextern.vm_assert) 495 496 extern int _vmbestcheck _ARG_((Vmdata_t*, Block_t*)); 497 498 _BEGIN_EXTERNS_ 499 500 extern Vmextern_t _Vmextern; 501 502 #if _PACKAGE_ast 503 504 #if _npt_getpagesize 505 extern int getpagesize _ARG_((void)); 506 #endif 507 #if _npt_sbrk 508 extern int brk _ARG_(( void* )); 509 extern Void_t* sbrk _ARG_(( ssize_t )); 510 #endif 511 512 #else 513 514 #if _hdr_unistd 515 #include <unistd.h> 516 #else 517 extern void abort _ARG_(( void )); 518 extern ssize_t write _ARG_(( int, const void*, size_t )); 519 extern int getpagesize _ARG_((void)); 520 extern Void_t* sbrk _ARG_((ssize_t)); 521 #endif 522 523 #if !__STDC__ && !_hdr_stdlib 524 extern size_t strlen _ARG_(( const char* )); 525 extern char* strcpy _ARG_(( char*, const char* )); 526 extern int strcmp _ARG_(( const char*, const char* )); 527 extern int atexit _ARG_(( void(*)(void) )); 528 extern char* getenv _ARG_(( const char* )); 529 extern Void_t* memcpy _ARG_(( Void_t*, const Void_t*, size_t )); 530 extern Void_t* memset _ARG_(( Void_t*, int, size_t )); 531 #else 532 #include <stdlib.h> 533 #include <string.h> 534 #endif 535 536 /* for vmexit.c */ 537 extern int onexit _ARG_(( void(*)(void) )); 538 extern void _exit _ARG_(( int )); 539 extern void _cleanup _ARG_(( void )); 540 541 #endif /*_PACKAGE_ast*/ 542 543 _END_EXTERNS_ 544 545 #if _UWIN 546 #define abort() (DebugBreak(),abort()) 547 #endif 548 549 #endif /* _VMHDR_H */ 550