1 /*********************************************************************** 2 * * 3 * This software is part of the ast package * 4 * Copyright (c) 1985-2008 AT&T Intellectual Property * 5 * and is licensed under the * 6 * Common Public License, Version 1.0 * 7 * by AT&T Intellectual Property * 8 * * 9 * A copy of the License is available at * 10 * http://www.opensource.org/licenses/cpl1.0.txt * 11 * (with md5 checksum 059e8cd6165cb4c31e351f2b69388fd9) * 12 * * 13 * Information and Software Systems Research * 14 * AT&T Research * 15 * Florham Park NJ * 16 * * 17 * Glenn Fowler <gsf@research.att.com> * 18 * David Korn <dgk@research.att.com> * 19 * Phong Vo <kpv@research.att.com> * 20 * * 21 ***********************************************************************/ 22 #ifndef _VMHDR_H 23 #define _VMHDR_H 1 24 #ifndef _BLD_vmalloc 25 #define _BLD_vmalloc 1 26 #endif 27 28 /* Common types, and macros for vmalloc functions. 29 ** 30 ** Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94. 31 */ 32 33 #ifndef __STD_C /* this is normally in vmalloc.h but it's included late here */ 34 #ifdef __STDC__ 35 #define __STD_C 1 36 #else 37 #if __cplusplus || c_plusplus 38 #define __STD_C 1 39 #else 40 #define __STD_C 0 41 #endif /*__cplusplus*/ 42 #endif /*__STDC__*/ 43 #endif /*__STD_C*/ 44 45 #if _PACKAGE_ast 46 47 #if !_UWIN 48 #define getpagesize ______getpagesize 49 #define _npt_getpagesize 1 50 #define brk ______brk 51 #define sbrk ______sbrk 52 #define _npt_sbrk 1 53 #endif 54 55 #include <ast.h> 56 57 #if _npt_getpagesize 58 #undef getpagesize 59 #endif 60 #if _npt_sbrk 61 #undef brk 62 #undef sbrk 63 #endif 64 65 #else 66 67 #include <ast_common.h> 68 69 #if !_UWIN 70 #define _npt_getpagesize 1 71 #define _npt_sbrk 1 72 #endif 73 74 #endif /*_PACKAGE_ast*/ 75 76 #include "FEATURE/vmalloc" 77 78 #include <setjmp.h> 79 80 /* the below macros decide which combinations of sbrk() or mmap() to used */ 81 #if defined(_WIN32) 82 #define _mem_win32 1 83 #undef _mem_sbrk 84 #undef _mem_mmap_anon 85 #undef _mem_mmap_zero 86 #endif 87 88 #if _mem_mmap_anon 89 #undef _mem_mmap_zero 90 #endif 91 92 #if !_mem_win32 && !_mem_sbrk && !_mem_mmap_anon && !_mem_mmap_zero 93 #undef _std_malloc 94 #define _std_malloc 1 /* do not define malloc/free/realloc */ 95 #endif 96 97 typedef unsigned char Vmuchar_t; 98 typedef unsigned long Vmulong_t; 99 100 typedef union _head_u Head_t; 101 typedef union _body_u Body_t; 102 typedef struct _block_s Block_t; 103 typedef struct _seg_s Seg_t; 104 typedef struct _pfobj_s Pfobj_t; 105 106 #if !_typ_ssize_t 107 typedef int ssize_t; 108 #endif 109 110 #define NIL(t) ((t)0) 111 #define reg register 112 #if __STD_C 113 #define NOTUSED(x) (void)(x) 114 #else 115 #define NOTUSED(x) (&x,1) 116 #endif 117 118 /* convert an address to an integral value */ 119 #define VLONG(addr) ((Vmulong_t)((char*)(addr) - (char*)0) ) 120 121 /* Round x up to a multiple of y. ROUND2 does powers-of-2 and ROUNDX does others */ 122 #define ROUND2(x,y) (((x) + ((y)-1)) & ~((y)-1)) 123 #define ROUNDX(x,y) ((((x) + ((y)-1)) / (y)) * (y)) 124 #define ROUND(x,y) (((y)&((y)-1)) ? ROUNDX((x),(y)) : ROUND2((x),(y)) ) 125 126 /* compute a value that is a common multiple of x and y */ 127 #define MULTIPLE(x,y) ((x)%(y) == 0 ? (x) : (y)%(x) == 0 ? (y) : (y)*(x)) 128 129 #define VM_check 0x0001 /* enable detailed checks */ 130 #define VM_abort 0x0002 /* abort() on assertion failure */ 131 #define VM_region 0x0004 /* enable region segment checks */ 132 #define VM_mmap 0x0010 /* favor mmap allocation */ 133 #define VM_init 0x8000 /* VMCHECK env var checked */ 134 135 #if _UWIN 136 #include <ast_windows.h> 137 #endif 138 139 #ifndef DEBUG 140 #ifdef _BLD_DEBUG 141 #define DEBUG 1 142 #endif /*_BLD_DEBUG*/ 143 #endif /*DEBUG*/ 144 #if DEBUG 145 extern void _vmmessage _ARG_((const char*, long, const char*, long)); 146 #define ABORT() (_Vmassert & VM_abort) 147 #define CHECK() (_Vmassert & VM_check) 148 #define ASSERT(p) ((p) ? 0 : (MESSAGE("Assertion failed"), ABORT() ? (abort(),0) : 0)) 149 #define COUNT(n) ((n) += 1) 150 #define MESSAGE(s) _vmmessage(__FILE__,__LINE__,s,0) 151 #else 152 #define ABORT() (0) 153 #define ASSERT(p) 154 #define CHECK() (0) 155 #define COUNT(n) 156 #define MESSAGE(s) (0) 157 #endif /*DEBUG*/ 158 159 #define VMPAGESIZE 8192 160 #if _AST_PAGESIZE > VMPAGESIZE 161 #undef VMPAGESIZE 162 #define VMPAGESIZE _AST_PAGESIZE 163 #endif 164 #if _lib_getpagesize 165 #define GETPAGESIZE(x) ((x) ? (x) : \ 166 (((x)=getpagesize()) < VMPAGESIZE ? ((x)=VMPAGESIZE) : (x)) ) 167 #else 168 #define GETPAGESIZE(x) ((x) = VMPAGESIZE) 169 #endif 170 171 /* Blocks are allocated such that their sizes are 0%(BITS+1) 172 ** This frees up enough low order bits to store state information 173 */ 174 #define BUSY (01) /* block is busy */ 175 #define PFREE (02) /* preceding block is free */ 176 #define JUNK (04) /* marked as freed but not yet processed */ 177 #define BITS (07) /* (BUSY|PFREE|JUNK) */ 178 #define ALIGNB (8) /* size must be a multiple of BITS+1 */ 179 180 #define ISBITS(w) ((w) & BITS) 181 #define CLRBITS(w) ((w) &= ~BITS) 182 #define CPYBITS(w,f) ((w) |= ((f)&BITS) ) 183 184 #define ISBUSY(w) ((w) & BUSY) 185 #define SETBUSY(w) ((w) |= BUSY) 186 #define CLRBUSY(w) ((w) &= ~BUSY) 187 188 #define ISPFREE(w) ((w) & PFREE) 189 #define SETPFREE(w) ((w) |= PFREE) 190 #define CLRPFREE(w) ((w) &= ~PFREE) 191 192 #define ISJUNK(w) ((w) & JUNK) 193 #define SETJUNK(w) ((w) |= JUNK) 194 #define CLRJUNK(w) ((w) &= ~JUNK) 195 196 #define OFFSET(t,e) ((size_t)(&(((t*)0)->e)) ) 197 198 /* these bits share the "mode" field with the public bits */ 199 #define VM_AGAIN 0010000 /* research the arena for space */ 200 #define VM_LOCK 0020000 /* region is locked */ 201 #define VM_LOCAL 0040000 /* local call, bypass lock */ 202 #define VM_INUSE 0004000 /* some operation is running */ 203 #define VM_UNUSED 0100060 204 #define VMETHOD(vd) ((vd)->mode&VM_METHODS) 205 206 /* test/set/clear lock state */ 207 #define SETINUSE(vd,iu) (((iu) = (vd)->mode&VM_INUSE), ((vd)->mode |= VM_INUSE) ) 208 #define CLRINUSE(vd,iu) ((iu) ? 0 : ((vd)->mode &= ~VM_INUSE) ) 209 #define SETLOCAL(vd) ((vd)->mode |= VM_LOCAL) 210 #define GETLOCAL(vd,l) (((l) = (vd)->mode&VM_LOCAL), ((vd)->mode &= ~VM_LOCAL) ) 211 #define ISLOCK(vd,l) ((l) ? 0 : ((vd)->mode & VM_LOCK) ) 212 #define SETLOCK(vd,l) ((l) ? 0 : ((vd)->mode |= VM_LOCK) ) 213 #define CLRLOCK(vd,l) ((l) ? 0 : ((vd)->mode &= ~VM_LOCK) ) 214 215 /* announcing entry/exit of allocation calls */ 216 #define ANNOUNCE(lc, vm,ev,dt,dc) \ 217 (( ((lc)&VM_LOCAL) || !(dc) || !(dc)->exceptf ) ? 0 : \ 218 (*(dc)->exceptf)((vm), (ev), (Void_t*)(dt), (dc)) ) 219 220 221 /* local calls */ 222 #define KPVALLOC(vm,sz,func) (SETLOCAL((vm)->data), func((vm),(sz)) ) 223 #define KPVALIGN(vm,sz,al,func) (SETLOCAL((vm)->data), func((vm),(sz),(al)) ) 224 #define KPVFREE(vm,d,func) (SETLOCAL((vm)->data), func((vm),(d)) ) 225 #define KPVRESIZE(vm,d,sz,mv,func) (SETLOCAL((vm)->data), func((vm),(d),(sz),(mv)) ) 226 #define KPVADDR(vm,addr,func) (SETLOCAL((vm)->data), func((vm),(addr)) ) 227 #define KPVCOMPACT(vm,func) (SETLOCAL((vm)->data), func((vm)) ) 228 229 /* ALIGN is chosen so that a block can store all primitive types. 230 ** It should also be a multiple of ALIGNB==(BITS+1) so the size field 231 ** of Block_t will always be 0%(BITS+1) as noted above. 232 ** Of paramount importance is the ALIGNA macro below. If the local compile 233 ** environment is strange enough that the below method does not calculate 234 ** ALIGNA right, then the code below should be commented out and ALIGNA 235 ** redefined to the appropriate requirement. 236 */ 237 union _align_u 238 { char c, *cp; 239 int i, *ip; 240 long l, *lp; 241 double d, *dp, ***dppp[8]; 242 size_t s, *sp; 243 void(* fn)(); 244 union _align_u* align; 245 Head_t* head; 246 Body_t* body; 247 Block_t* block; 248 Vmuchar_t a[ALIGNB]; 249 _ast_fltmax_t ld, *ldp; 250 jmp_buf jmp; 251 }; 252 struct _a_s 253 { char c; 254 union _align_u a; 255 }; 256 #define ALIGNA (sizeof(struct _a_s) - sizeof(union _align_u)) 257 struct _align_s 258 { char data[MULTIPLE(ALIGNA,ALIGNB)]; 259 }; 260 #undef ALIGN /* bsd sys/param.h defines this */ 261 #define ALIGN sizeof(struct _align_s) 262 263 /* make sure that the head of a block is a multiple of ALIGN */ 264 struct _head_s 265 { union 266 { Seg_t* seg; /* the containing segment */ 267 Block_t* link; /* possible link list usage */ 268 Pfobj_t* pf; /* profile structure pointer */ 269 char* file; /* for file name in Vmdebug */ 270 } seg; 271 union 272 { size_t size; /* size of data area in bytes */ 273 Block_t* link; /* possible link list usage */ 274 int line; /* for line number in Vmdebug */ 275 } size; 276 }; 277 #define HEADSIZE ROUND(sizeof(struct _head_s),ALIGN) 278 union _head_u 279 { Vmuchar_t data[HEADSIZE]; /* to standardize size */ 280 struct _head_s head; 281 }; 282 283 /* now make sure that the body of a block is a multiple of ALIGN */ 284 struct _body_s 285 { Block_t* link; /* next in link list */ 286 Block_t* left; /* left child in free tree */ 287 Block_t* right; /* right child in free tree */ 288 Block_t** self; /* self pointer when free */ 289 }; 290 #define BODYSIZE ROUND(sizeof(struct _body_s),ALIGN) 291 union _body_u 292 { Vmuchar_t data[BODYSIZE]; /* to standardize size */ 293 struct _body_s body; 294 }; 295 296 /* After all the songs and dances, we should now have: 297 ** sizeof(Head_t)%ALIGN == 0 298 ** sizeof(Body_t)%ALIGN == 0 299 ** and sizeof(Block_t) = sizeof(Head_t)+sizeof(Body_t) 300 */ 301 struct _block_s 302 { Head_t head; 303 Body_t body; 304 }; 305 306 /* requirements for smallest block type */ 307 struct _tiny_s 308 { Block_t* link; 309 Block_t* self; 310 }; 311 #define TINYSIZE ROUND(sizeof(struct _tiny_s),ALIGN) 312 #define S_TINY 1 /* # of tiny blocks */ 313 #define MAXTINY (S_TINY*ALIGN + TINYSIZE) 314 #define TLEFT(b) ((b)->head.head.seg.link) /* instead of LEFT */ 315 #define TINIEST(b) (SIZE(b) == TINYSIZE) /* this type uses TLEFT */ 316 317 #define DIV(x,y) ((y) == 8 ? ((x)>>3) : (x)/(y) ) 318 #define INDEX(s) DIV((s)-TINYSIZE,ALIGN) 319 320 /* small block types kept in separate caches for quick allocation */ 321 #define S_CACHE 6 /* # of types of small blocks to be cached */ 322 #define N_CACHE 32 /* on allocation, create this many at a time */ 323 #define MAXCACHE (S_CACHE*ALIGN + TINYSIZE) 324 #define C_INDEX(s) (s < MAXCACHE ? INDEX(s) : S_CACHE) 325 326 #define TINY(vd) ((vd)->tiny) 327 #define CACHE(vd) ((vd)->cache) 328 329 typedef struct _vmdata_s 330 { int mode; /* current mode for region */ 331 size_t incr; /* allocate in multiple of this */ 332 size_t pool; /* size of an elt in a Vmpool region */ 333 Seg_t* seg; /* list of segments */ 334 Block_t* free; /* most recent free block */ 335 Block_t* wild; /* wilderness block */ 336 Block_t* root; /* root of free tree */ 337 Block_t* tiny[S_TINY]; /* small blocks */ 338 Block_t* cache[S_CACHE+1]; /* delayed free blocks */ 339 } Vmdata_t; 340 341 /* private parts of Vmalloc_t */ 342 #define _VM_PRIVATE_ \ 343 Vmdisc_t* disc; /* discipline to get space */ \ 344 Vmdata_t* data; /* the real region data */ \ 345 Vmalloc_t* next; /* linked list of regions */ 346 347 #include "vmalloc.h" 348 349 #if !_PACKAGE_ast 350 /* we don't use these here and they interfere with some local names */ 351 #undef malloc 352 #undef free 353 #undef realloc 354 #endif 355 356 /* segment structure */ 357 struct _seg_s 358 { Vmalloc_t* vm; /* the region that holds this */ 359 Seg_t* next; /* next segment */ 360 Void_t* addr; /* starting segment address */ 361 size_t extent; /* extent of segment */ 362 Vmuchar_t* baddr; /* bottom of usable memory */ 363 size_t size; /* allocable size */ 364 Block_t* free; /* recent free blocks */ 365 Block_t* last; /* Vmlast last-allocated block */ 366 }; 367 368 /* starting block of a segment */ 369 #define SEGBLOCK(s) ((Block_t*)(((Vmuchar_t*)(s)) + ROUND(sizeof(Seg_t),ALIGN))) 370 371 /* short-hands for block data */ 372 #define SEG(b) ((b)->head.head.seg.seg) 373 #define SEGLINK(b) ((b)->head.head.seg.link) 374 #define SIZE(b) ((b)->head.head.size.size) 375 #define SIZELINK(b) ((b)->head.head.size.link) 376 #define LINK(b) ((b)->body.body.link) 377 #define LEFT(b) ((b)->body.body.left) 378 #define RIGHT(b) ((b)->body.body.right) 379 #define VM(b) (SEG(b)->vm) 380 381 #define DATA(b) ((Void_t*)((b)->body.data) ) 382 #define BLOCK(d) ((Block_t*)((char*)(d) - sizeof(Head_t)) ) 383 #define SELF(b) ((Block_t**)((b)->body.data + SIZE(b) - sizeof(Block_t*)) ) 384 #define LAST(b) (*((Block_t**)(((char*)(b)) - sizeof(Block_t*)) ) ) 385 #define NEXT(b) ((Block_t*)((b)->body.data + SIZE(b)) ) 386 387 /* functions to manipulate link lists of elts of the same size */ 388 #define SETLINK(b) (RIGHT(b) = (b) ) 389 #define ISLINK(b) (RIGHT(b) == (b) ) 390 #define UNLINK(vd,b,i,t) \ 391 ((((t) = LINK(b)) ? (LEFT(t) = LEFT(b)) : NIL(Block_t*) ), \ 392 (((t) = LEFT(b)) ? (LINK(t) = LINK(b)) : (TINY(vd)[i] = LINK(b)) ) ) 393 394 /* delete a block from a link list or the free tree. 395 ** The test in the below macro is worth scratching your head a bit. 396 ** Even though tiny blocks (size < BODYSIZE) are kept in separate lists, 397 ** only the TINIEST ones require TLEFT(b) for the back link. Since this 398 ** destroys the SEG(b) pointer, it must be carefully restored in bestsearch(). 399 ** Other tiny blocks have enough space to use the usual LEFT(b). 400 ** In this case, I have also carefully arranged so that RIGHT(b) and 401 ** SELF(b) can be overlapped and the test ISLINK() will go through. 402 */ 403 #define REMOVE(vd,b,i,t,func) \ 404 ((!TINIEST(b) && ISLINK(b)) ? UNLINK((vd),(b),(i),(t)) : \ 405 func((vd),SIZE(b),(b)) ) 406 407 /* see if a block is the wilderness block */ 408 #define SEGWILD(b) (((b)->body.data+SIZE(b)+sizeof(Head_t)) >= SEG(b)->baddr) 409 #define VMWILD(vd,b) (((b)->body.data+SIZE(b)+sizeof(Head_t)) >= vd->seg->baddr) 410 411 #define VMFLF(vm,fi,ln,fn) ((fi) = (vm)->file, (vm)->file = NIL(char*), \ 412 (ln) = (vm)->line, (vm)->line = 0 , \ 413 (fn) = (vm)->func, (vm)->func = NIL(Void_t*) ) 414 415 /* The lay-out of a Vmprofile block is this: 416 ** seg_ size ----data---- _pf_ size 417 ** _________ ____________ _________ 418 ** seg_, size: header required by Vmbest. 419 ** data: actual data block. 420 ** _pf_: pointer to the corresponding Pfobj_t struct 421 ** size: the true size of the block. 422 ** So each block requires an extra Head_t. 423 */ 424 #define PF_EXTRA sizeof(Head_t) 425 #define PFDATA(d) ((Head_t*)((Vmuchar_t*)(d)+(SIZE(BLOCK(d))&~BITS)-sizeof(Head_t)) ) 426 #define PFOBJ(d) (PFDATA(d)->head.seg.pf) 427 #define PFSIZE(d) (PFDATA(d)->head.size.size) 428 429 /* The lay-out of a block allocated by Vmdebug is this: 430 ** seg_ size file size seg_ magi ----data---- --magi-- magi line 431 ** --------- --------- --------- ------------ -------- --------- 432 ** seg_,size: header required by Vmbest management. 433 ** file: the file where it was created. 434 ** size: the true byte count of the block 435 ** seg_: should be the same as the previous seg_. 436 ** This allows the function vmregion() to work. 437 ** magi: magic bytes to detect overwrites. 438 ** data: the actual data block. 439 ** magi: more magic bytes. 440 ** line: the line number in the file where it was created. 441 ** So for each allocated block, we'll need 3 extra Head_t. 442 */ 443 444 /* convenient macros for accessing the above fields */ 445 #define DB_HEAD (2*sizeof(Head_t)) 446 #define DB_TAIL (2*sizeof(Head_t)) 447 #define DB_EXTRA (DB_HEAD+DB_TAIL) 448 #define DBBLOCK(d) ((Block_t*)((Vmuchar_t*)(d) - 3*sizeof(Head_t)) ) 449 #define DBBSIZE(d) (SIZE(DBBLOCK(d)) & ~BITS) 450 #define DBSEG(d) (((Head_t*)((Vmuchar_t*)(d) - sizeof(Head_t)))->head.seg.seg ) 451 #define DBSIZE(d) (((Head_t*)((Vmuchar_t*)(d) - 2*sizeof(Head_t)))->head.size.size ) 452 #define DBFILE(d) (((Head_t*)((Vmuchar_t*)(d) - 2*sizeof(Head_t)))->head.seg.file ) 453 #define DBLN(d) (((Head_t*)((Vmuchar_t*)DBBLOCK(d)+DBBSIZE(d)))->head.size.line ) 454 #define DBLINE(d) (DBLN(d) < 0 ? -DBLN(d) : DBLN(d)) 455 456 /* forward/backward translation for addresses between Vmbest and Vmdebug */ 457 #define DB2BEST(d) ((Vmuchar_t*)(d) - 2*sizeof(Head_t)) 458 #define DB2DEBUG(b) ((Vmuchar_t*)(b) + 2*sizeof(Head_t)) 459 460 /* set file and line number, note that DBLN > 0 so that DBISBAD will work */ 461 #define DBSETFL(d,f,l) (DBFILE(d) = (f), DBLN(d) = (f) ? (l) : 1) 462 463 /* set and test the state of known to be corrupted */ 464 #define DBSETBAD(d) (DBLN(d) > 0 ? (DBLN(d) = -DBLN(d)) : -1) 465 #define DBISBAD(d) (DBLN(d) <= 0) 466 467 #define DB_MAGIC 0255 /* 10101101 */ 468 469 /* compute the bounds of the magic areas */ 470 #define DBHEAD(d,begp,endp) \ 471 (((begp) = (Vmuchar_t*)(&DBSEG(d)) + sizeof(Seg_t*)), ((endp) = (d)) ) 472 #define DBTAIL(d,begp,endp) \ 473 (((begp) = (Vmuchar_t*)(d)+DBSIZE(d)), ((endp) = (Vmuchar_t*)(&DBLN(d))) ) 474 475 /* external symbols for internal use by vmalloc */ 476 typedef Block_t* (*Vmsearch_f)_ARG_((Vmdata_t*, size_t, Block_t*)); 477 typedef struct _vmextern_ 478 { Block_t* (*vm_extend)_ARG_((Vmalloc_t*, size_t, Vmsearch_f )); 479 ssize_t (*vm_truncate)_ARG_((Vmalloc_t*, Seg_t*, size_t, int)); 480 size_t vm_pagesize; 481 char* (*vm_strcpy)_ARG_((char*, const char*, int)); 482 char* (*vm_itoa)_ARG_((Vmulong_t, int)); 483 void (*vm_trace)_ARG_((Vmalloc_t*, 484 Vmuchar_t*, Vmuchar_t*, size_t, size_t)); 485 void (*vm_pfclose)_ARG_((Vmalloc_t*)); 486 int vm_assert; 487 } Vmextern_t; 488 489 #define _Vmextend (_Vmextern.vm_extend) 490 #define _Vmtruncate (_Vmextern.vm_truncate) 491 #define _Vmpagesize (_Vmextern.vm_pagesize) 492 #define _Vmstrcpy (_Vmextern.vm_strcpy) 493 #define _Vmitoa (_Vmextern.vm_itoa) 494 #define _Vmtrace (_Vmextern.vm_trace) 495 #define _Vmpfclose (_Vmextern.vm_pfclose) 496 #define _Vmassert (_Vmextern.vm_assert) 497 498 extern int _vmbestcheck _ARG_((Vmdata_t*, Block_t*)); 499 500 _BEGIN_EXTERNS_ 501 502 extern Vmextern_t _Vmextern; 503 504 #if _PACKAGE_ast 505 506 #if _npt_getpagesize 507 extern int getpagesize _ARG_((void)); 508 #endif 509 #if _npt_sbrk 510 extern int brk _ARG_(( void* )); 511 extern Void_t* sbrk _ARG_(( ssize_t )); 512 #endif 513 514 #else 515 516 #if _hdr_unistd 517 #include <unistd.h> 518 #else 519 extern void abort _ARG_(( void )); 520 extern ssize_t write _ARG_(( int, const void*, size_t )); 521 extern int getpagesize _ARG_((void)); 522 extern Void_t* sbrk _ARG_((ssize_t)); 523 #endif 524 525 #if !__STDC__ && !_hdr_stdlib 526 extern size_t strlen _ARG_(( const char* )); 527 extern char* strcpy _ARG_(( char*, const char* )); 528 extern int strcmp _ARG_(( const char*, const char* )); 529 extern int atexit _ARG_(( void(*)(void) )); 530 extern char* getenv _ARG_(( const char* )); 531 extern Void_t* memcpy _ARG_(( Void_t*, const Void_t*, size_t )); 532 extern Void_t* memset _ARG_(( Void_t*, int, size_t )); 533 #else 534 #include <stdlib.h> 535 #include <string.h> 536 #endif 537 538 /* for vmexit.c */ 539 extern int onexit _ARG_(( void(*)(void) )); 540 extern void _exit _ARG_(( int )); 541 extern void _cleanup _ARG_(( void )); 542 543 #endif /*_PACKAGE_ast*/ 544 545 _END_EXTERNS_ 546 547 #if _UWIN 548 #define abort() (DebugBreak(),abort()) 549 #endif 550 551 #endif /* _VMHDR_H */ 552