xref: /illumos-gate/usr/src/contrib/ast/src/lib/libast/vmalloc/malloc.c (revision b30d193948be5a7794d7ae3ba0ed9c2f72c88e0f)
1 /***********************************************************************
2 *                                                                      *
3 *               This software is part of the ast package               *
4 *          Copyright (c) 1985-2012 AT&T Intellectual Property          *
5 *                      and is licensed under the                       *
6 *                 Eclipse Public License, Version 1.0                  *
7 *                    by AT&T Intellectual Property                     *
8 *                                                                      *
9 *                A copy of the License is available at                 *
10 *          http://www.eclipse.org/org/documents/epl-v10.html           *
11 *         (with md5 checksum b35adb5213ca9657e911e9befb180842)         *
12 *                                                                      *
13 *              Information and Software Systems Research               *
14 *                            AT&T Research                             *
15 *                           Florham Park NJ                            *
16 *                                                                      *
17 *                 Glenn Fowler <gsf@research.att.com>                  *
18 *                  David Korn <dgk@research.att.com>                   *
19 *                   Phong Vo <kpv@research.att.com>                    *
20 *                                                                      *
21 ***********************************************************************/
22 #if defined(_UWIN) && defined(_BLD_ast)
23 
_STUB_malloc()24 void _STUB_malloc(){}
25 
26 #else
27 
28 #if _UWIN
29 
30 #define calloc		______calloc
31 #define _ast_free	______free
32 #define malloc		______malloc
33 #define mallinfo	______mallinfo
34 #define mallopt		______mallopt
35 #define mstats		______mstats
36 #define realloc		______realloc
37 
38 #define _STDLIB_H_	1
39 
40 extern int		atexit(void(*)(void));
41 extern char*		getenv(const char*);
42 
43 #endif
44 
45 #include	"vmhdr.h"
46 #include	<errno.h>
47 
48 #if _UWIN
49 
50 #include	<malloc.h>
51 
52 #define _map_malloc	1
53 #define _mal_alloca	1
54 
55 #undef	calloc
56 #define calloc		_ast_calloc
57 #undef	_ast_free
58 #define free		_ast_free
59 #undef	malloc
60 #define malloc		_ast_malloc
61 #undef	mallinfo
62 typedef struct ______mallinfo Mallinfo_t;
63 #undef	mallopt
64 #undef	mstats
65 typedef struct ______mstats Mstats_t;
66 #undef	realloc
67 #define realloc		_ast_realloc
68 
69 #endif
70 
71 #if __STD_C
72 #define F0(f,t0)		f(t0)
73 #define F1(f,t1,a1)		f(t1 a1)
74 #define F2(f,t1,a1,t2,a2)	f(t1 a1, t2 a2)
75 #else
76 #define F0(f,t0)		f()
77 #define F1(f,t1,a1)		f(a1) t1 a1;
78 #define F2(f,t1,a1,t2,a2)	f(a1, a2) t1 a1; t2 a2;
79 #endif
80 
81 /*
82  * define _AST_std_malloc=1 to force the standard malloc
83  * if _map_malloc is also defined then _ast_malloc etc.
84  * will simply call malloc etc.
85  */
86 
87 #if !defined(_AST_std_malloc) && __CYGWIN__
88 #define _AST_std_malloc	1
89 #endif
90 
91 /*	malloc compatibility functions
92 **
93 **	These are aware of debugging/profiling and are driven by the
94 **	VMALLOC_OPTIONS environment variable which is a comma or space
95 **	separated list of [no]name[=value] options:
96 **
97 **	    abort	if Vmregion==Vmdebug then VM_DBABORT is set,
98 **			otherwise _BLD_DEBUG enabled assertions abort()
99 **			on failure
100 **	    break	try sbrk() block allocator first
101 **	    check	if Vmregion==Vmbest then the region is checked every op
102 **	    free	disable addfreelist()
103 **	    keep	disable free -- if code works with this enabled then it
104 **	    		probably accesses free'd data
105 **	    method=m	sets Vmregion=m if not defined, m (Vm prefix optional)
106 **			may be one of { best debug last profile }
107 **	    mmap	try mmap() block allocator first
108 **	    period=n	sets Vmregion=Vmdebug if not defined, if
109 **			Vmregion==Vmdebug the region is checked every n ops
110 **	    profile=f	sets Vmregion=Vmprofile if not set, if
111 **			Vmregion==Vmprofile then profile info printed to file f
112 **	    start=n	sets Vmregion=Vmdebug if not defined, if
113 **			Vmregion==Vmdebug region checking starts after n ops
114 **	    trace=f	enables tracing to file f
115 **	    warn=f	sets Vmregion=Vmdebug if not defined, if
116 **			Vmregion==Vmdebug then warnings printed to file f
117 **	    watch=a	sets Vmregion=Vmdebug if not defined, if
118 **			Vmregion==Vmdebug then address a is watched
119 **
120 **	Output files are created if they don't exist. &n and /dev/fd/n name
121 **	the file descriptor n which must be open for writing. The pattern %p
122 **	in a file name is replaced by the process ID.
123 **
124 **	VMALLOC_OPTIONS combines the features of these previously used env vars:
125 **	    { VMCHECK VMDEBUG VMETHOD VMPROFILE VMTRACE }
126 **
127 **	Written by Kiem-Phong Vo, kpv@research.att.com, 01/16/94.
128 */
129 
130 #if _sys_stat
131 #include	<sys/stat.h>
132 #endif
133 #include	<fcntl.h>
134 
135 #ifdef S_IRUSR
136 #define CREAT_MODE	(S_IRUSR|S_IWUSR|S_IRGRP|S_IROTH)
137 #else
138 #define CREAT_MODE	0644
139 #endif
140 
141 static Vmulong_t	_Vmdbstart = 0;
142 static Vmulong_t	_Vmdbcheck = 0;
143 static Vmulong_t	_Vmdbtime = 0;
144 static int		_Vmpffd = -1;
145 
146 #if ( !_std_malloc || !_BLD_ast ) && !_AST_std_malloc
147 
148 #if !_map_malloc
149 #undef calloc
150 #undef cfree
151 #undef free
152 #undef mallinfo
153 #undef malloc
154 #undef mallopt
155 #undef memalign
156 #undef mstats
157 #undef realloc
158 #undef valloc
159 
160 #if _malloc_hook
161 
162 #include <malloc.h>
163 
164 #undef	calloc
165 #undef	cfree
166 #undef	free
167 #undef	malloc
168 #undef	memalign
169 #undef	realloc
170 
171 #define calloc		_ast_calloc
172 #define cfree		_ast_cfree
173 #define free		_ast_free
174 #define malloc		_ast_malloc
175 #define memalign	_ast_memalign
176 #define realloc		_ast_realloc
177 
178 #endif
179 
180 #endif
181 
182 #if _WINIX
183 
184 #include <ast_windows.h>
185 
186 #if _UWIN
187 
188 #define VMRECORD(p)	_vmrecord(p)
189 #define VMBLOCK		{ int _vmblock = _sigblock();
190 #define VMUNBLOCK	_sigunblock(_vmblock); }
191 
192 extern int		_sigblock(void);
193 extern void		_sigunblock(int);
194 extern unsigned long	_record[2048];
195 
_vmrecord(Void_t * p)196 __inline Void_t* _vmrecord(Void_t* p)
197 {
198 	register unsigned long	v = ((unsigned long)p)>>16;
199 
200 	_record[v>>5] |= 1<<((v&0x1f));
201 	return p;
202 }
203 
204 #else
205 
206 #define getenv(s)	lcl_getenv(s)
207 
208 static char*
lcl_getenv(const char * s)209 lcl_getenv(const char* s)
210 {
211 	int		n;
212 	static char	buf[512];
213 
214 	if (!(n = GetEnvironmentVariable(s, buf, sizeof(buf))) || n > sizeof(buf))
215 		return 0;
216 	return buf;
217 }
218 
219 #endif /* _UWIN */
220 
221 #endif /* _WINIX */
222 
223 #ifndef VMRECORD
224 #define VMRECORD(p)	(p)
225 #define VMBLOCK
226 #define VMUNBLOCK
227 #endif
228 
229 #if defined(__EXPORT__)
230 #define extern		extern __EXPORT__
231 #endif
232 
233 static int		_Vmflinit = 0;
234 #define VMFLINIT() \
235 	{ if(!_Vmflinit)	vmflinit(); \
236 	  if(_Vmdbcheck) \
237 	  { if(_Vmdbtime < _Vmdbstart) _Vmdbtime += 1; \
238 	    else if((_Vmdbtime += 1) < _Vmdbstart) _Vmdbtime = _Vmdbstart; \
239 	    if(_Vmdbtime >= _Vmdbstart && (_Vmdbtime % _Vmdbcheck) == 0 && \
240 	       Vmregion->meth.meth == VM_MTDEBUG) \
241 		vmdbcheck(Vmregion); \
242 	  } \
243 	}
244 
245 #if __STD_C
vmflinit(void)246 static int vmflinit(void)
247 #else
248 static int vmflinit()
249 #endif
250 {
251 	char*		file;
252 	int		line;
253 	Void_t*		func;
254 
255 	/* this must be done now to avoid any inadvertent recursion (more below) */
256 	_Vmflinit = 1;
257 	VMFLF(Vmregion,file,line,func);
258 
259 	/* if getenv() calls malloc(), the options may not affect the eventual region */
260 	VMOPTIONS();
261 
262 	/* reset file and line number to correct values for the call */
263 	Vmregion->file = file;
264 	Vmregion->line = line;
265 	Vmregion->func = func;
266 
267 	return 0;
268 }
269 
270 /* use multiple regions to reduce blocking by concurrent threads  */
271 #if _mem_mmap_anon || _mem_mmap_zero
272 static Vmalloc_t	*Region[64];	/* list of concurrent regions	*/
273 static unsigned int	Regmax = 64;	/* max number of regions	*/
274 #else
275 static Vmalloc_t*	Region[1];	/* list of concurrent regions	*/
276 static unsigned int	Regmax = 0;
277 #endif
278 static unsigned int	Regnum = 0; 	/* current #concurrent regions	*/
279 
280 /* statistics */
281 static unsigned int	Regopen = 0; 	/* #allocation calls opened	*/
282 static unsigned int	Reglock = 0; 	/* #allocation calls locked	*/
283 static unsigned int	Regprobe = 0; 	/* #probes to find a region	*/
284 
setregmax(int regmax)285 int setregmax(int regmax)
286 {
287 	int	oldmax = Regmax;
288 
289 	if(regmax >= Regnum && regmax <= sizeof(Region)/sizeof(Region[0]))
290 		Regmax = regmax;
291 
292 	return oldmax;
293 }
294 
295 /* return statistics */
_mallocstat(Vmstat_t * st)296 int _mallocstat(Vmstat_t* st)
297 {
298 	Vmstat_t	vmst;
299 	int		k;
300 
301 	if(vmstat(Vmregion, st) < 0) /* add up all stats */
302 		return -1;
303 	for(k = 0; k < Regnum; ++k)
304 	{	if(!Region[k])
305 			continue;
306 		if(vmstat(Region[k], &vmst) < 0 )
307 			return -1;
308 		st->n_busy += vmst.n_busy;
309 		st->n_free += vmst.n_free;
310 		st->s_busy += vmst.s_busy;
311 		st->s_free += vmst.s_free;
312 		st->m_busy += vmst.m_busy;
313 		st->m_free += vmst.m_free;
314 		st->n_seg  += vmst.n_seg;
315 		st->extent += vmst.extent;
316 	}
317 
318 	st->n_region = Regnum+1;
319 	st->n_open = Regopen;
320 	st->n_lock = Reglock;
321 	st->n_probe = Regprobe;
322 
323 	return 0;
324 }
325 
326 /* find the region that a block was allocated from */
regionof(Void_t * addr)327 static Vmalloc_t* regionof(Void_t* addr)
328 {
329 	int	k;
330 
331 #if USE_NATIVE
332 #define CAUTIOUS	1
333 #else
334 #define CAUTIOUS	0
335 #endif
336 	if(CAUTIOUS || Vmregion->meth.meth != VM_MTBEST )
337 	{	/* addr will not be dereferenced here */
338 		if(vmaddr(Vmregion,addr) == 0 )
339 			return Vmregion;
340 		for(k = 0; k < Regnum; ++k)
341 			if(Region[k] && vmaddr(Region[k], addr) == 0 )
342 				return Region[k];
343 		return NIL(Vmalloc_t*);
344 	}
345 	else
346 	{	/* fast, but susceptible to bad data */
347 		Vmdata_t *vd = SEG(BLOCK(addr))->vmdt;
348 		if(Vmregion->data == vd )
349 			return Vmregion;
350 		for(k = 0; k < Regnum; ++k)
351 			if(Region[k] && Region[k]->data == vd)
352 				return Region[k];
353 		return NIL(Vmalloc_t*);
354 	}
355 }
356 
357 /* manage a cache of free objects */
358 typedef struct _regfree_s
359 {	struct _regfree_s*	next;
360 } Regfree_t;
361 static Regfree_t	*Regfree;
362 
addfreelist(Regfree_t * data)363 static void addfreelist(Regfree_t* data)
364 {
365 	unsigned int	k;
366 	Regfree_t	*head;
367 
368 	for(k = 0;; ASOLOOP(k) )
369 	{	data->next = head = Regfree;
370 		if(asocasptr(&Regfree, head, data) == (Void_t*)head )
371 			return;
372 	}
373 }
374 
clrfreelist()375 static void clrfreelist()
376 {
377 	Regfree_t	*list, *next;
378 	Vmalloc_t	*vm;
379 
380 	if(!(list = Regfree) )
381 		return; /* nothing to do */
382 
383 	if(asocasptr(&Regfree, list, NIL(Regfree_t*)) != list )
384 		return; /* somebody else is doing it */
385 
386 	for(; list; list = next)
387 	{	next = list->next;
388 		if(vm = regionof((Void_t*)list))
389 		{	if(asocasint(&vm->data->lock, 0, 1) == 0) /* can free this now */
390 			{	(void)(*vm->meth.freef)(vm, (Void_t*)list, 1);
391 				vm->data->lock = 0;
392 			}
393 			else	addfreelist(list); /* ah well, back in the queue */
394 		}
395 	}
396 }
397 
398 /* get a suitable region to allocate from */
399 typedef struct _regdisc_s
400 {	Vmdisc_t	disc;
401 	char		slop[64]; /* to absorb any extra data in Vmdcsystem */
402 } Regdisc_t;
403 
regexcept(Vmalloc_t * vm,int type,Void_t * data,Vmdisc_t * disc)404 static int regexcept(Vmalloc_t* vm, int type, Void_t* data, Vmdisc_t* disc)
405 {
406 	if(type == VM_OPEN)
407 	{	if(data) /* make vmopen allocate all memory using discipline */
408 			*(Void_t**)data = data; /* just make it non-NULL */
409 		return 0;
410 	}
411 	return 0;
412 }
413 
getregion(int * local)414 static Vmalloc_t* getregion(int* local)
415 {
416 	Vmalloc_t		*vm;
417 	int			p, pos;
418 
419 	static unsigned int	Rand = 0xdeadbeef; /* a cheap prng */
420 #define RAND()			(Rand = Rand*16777617 + 3)
421 
422 	clrfreelist();
423 
424 	if(Regmax <= 0 )
425 	{	/* uni-process/thread */
426 		*local = 1;
427 		Vmregion->data->lock = 1;
428 		return Vmregion;
429 	}
430 	else if(asocasint(&Vmregion->data->lock, 0, 1) == 0 )
431 	{	/* Vmregion is open, so use it */
432 		*local = 1;
433 		asoincint(&Regopen);
434 		return Vmregion;
435 	}
436 
437 	asoincint(&Regprobe); /* probe Region[] to find an open region */
438 	if(Regnum == 0)
439 		pos = 0;
440 	else for(pos = p = RAND()%Regnum;; )
441 	{	if(Region[p] && asocasint(&Region[p]->data->lock, 0, 1) == 0 )
442 		{	*local = 1;
443 			asoincint(&Regopen);
444 			return Region[p];
445 		}
446 		if((p = (p+1)%Regnum) == pos )
447 			break;
448 	}
449 
450 	/* grab the next open slot for a new region */
451 	while((p = Regnum) < Regmax)
452 		if(asocasint(&Regnum, p, p+1) == p )
453 			break;
454 	if(p < Regmax) /* this slot is now ours */
455 	{	static Regdisc_t	Regdisc;
456 		if(!Regdisc.disc.exceptf) /* one time initialization */
457 		{	GETPAGESIZE(_Vmpagesize);
458 			memcpy(&Regdisc, Vmdcsystem, Vmdcsystem->size);
459 			Regdisc.disc.round = ROUND(_Vmpagesize, 64*1024);
460 			Regdisc.disc.exceptf = regexcept;
461 		}
462 
463 		/**/ASSERT(Region[p] == NIL(Vmalloc_t*));
464 		if((vm = vmopen(&Regdisc.disc, Vmbest, VM_SHARE)) != NIL(Vmalloc_t*) )
465 		{	vm->data->lock = 1; /* lock new region now */
466 			*local = 1;
467 			asoincint(&Regopen);
468 			return (Region[p] = vm);
469 		}
470 		else	Region[p] = Vmregion; /* better than nothing */
471 	}
472 
473 	/* must return something */
474 	vm = Region[pos] ? Region[pos] : Vmregion;
475 	if(asocasint(&vm->data->lock, 0, 1) == 0)
476 	{	*local = 1;
477 		asoincint(&Regopen);
478 	}
479 	else
480 	{	*local = 0;
481 		asoincint(&Reglock);
482 	}
483 	return vm;
484 }
485 
486 #if __STD_C
calloc(reg size_t n_obj,reg size_t s_obj)487 extern Void_t* calloc(reg size_t n_obj, reg size_t s_obj)
488 #else
489 extern Void_t* calloc(n_obj, s_obj)
490 reg size_t	n_obj;
491 reg size_t	s_obj;
492 #endif
493 {
494 	Void_t		*addr;
495 	Vmalloc_t	*vm;
496 	int		local = 0;
497 	VMFLINIT();
498 
499 	vm = getregion(&local);
500 	addr = (*vm->meth.resizef)(vm, NIL(Void_t*), n_obj*s_obj, VM_RSZERO, local);
501 	if(local)
502 	{	/**/ASSERT(vm->data->lock == 1);
503 		vm->data->lock = 0;
504 	}
505 	return VMRECORD(addr);
506 }
507 
508 #if __STD_C
malloc(reg size_t size)509 extern Void_t* malloc(reg size_t size)
510 #else
511 extern Void_t* malloc(size)
512 reg size_t	size;
513 #endif
514 {
515 	Void_t		*addr;
516 	Vmalloc_t	*vm;
517 	int		local = 0;
518 	VMFLINIT();
519 
520 	vm = getregion(&local);
521 	addr = (*vm->meth.allocf)(vm, size, local);
522 	if(local)
523 	{	/**/ASSERT(vm->data->lock == 1);
524 		vm->data->lock = 0;
525 	}
526 	return VMRECORD(addr);
527 }
528 
529 #if __STD_C
realloc(reg Void_t * data,reg size_t size)530 extern Void_t* realloc(reg Void_t* data, reg size_t size)
531 #else
532 extern Void_t* realloc(data,size)
533 reg Void_t*	data;	/* block to be reallocated	*/
534 reg size_t	size;	/* new size			*/
535 #endif
536 {
537 	ssize_t		copy;
538 	Void_t		*addr;
539 	Vmalloc_t	*vm;
540 	VMFLINIT();
541 
542 	if(!data)
543 		return malloc(size);
544 	else if((vm = regionof(data)) )
545 	{	if(vm == Vmregion && vm != Vmheap) /* no multiple region usage here */
546 		{	addr = (*vm->meth.resizef)(vm, data, size, VM_RSCOPY|VM_RSMOVE, 0);
547 			return VMRECORD(addr);
548 		}
549 		if(asocasint(&vm->data->lock, 0, 1) == 0 ) /* region is open */
550 		{	addr = (*vm->meth.resizef)(vm, data, size, VM_RSCOPY|VM_RSMOVE, 1);
551 			vm->data->lock = 0;
552 			return VMRECORD(addr);
553 		}
554 		else if(Regmax > 0 && Vmregion == Vmheap && (addr = malloc(size)) )
555 		{	if((copy = SIZE(BLOCK(data))&~BITS) > size )
556 				copy = size;
557 			memcpy(addr, data, copy);
558 			addfreelist((Regfree_t*)data);
559 			return VMRECORD(addr);
560 		}
561 		else /* this may block but it is the best that we can do now */
562 		{	addr = (*vm->meth.resizef)(vm, data, size, VM_RSCOPY|VM_RSMOVE, 0);
563 			return VMRECORD(addr);
564 		}
565 	}
566 	else /* not our data */
567 	{
568 #if USE_NATIVE
569 #undef	realloc /* let the native realloc() take care of it */
570 #if __STD_C
571 		extern Void_t*	realloc(Void_t*, size_t);
572 #else
573 		extern Void_t*	realloc();
574 #endif
575 		return realloc(data, size);
576 #else
577 		return NIL(Void_t*);
578 #endif
579 	}
580 }
581 
582 #if __STD_C
free(reg Void_t * data)583 extern void free(reg Void_t* data)
584 #else
585 extern void free(data)
586 reg Void_t*	data;
587 #endif
588 {
589 	Vmalloc_t	*vm;
590 	VMFLINIT();
591 
592 	if(!data || (_Vmassert & VM_keep))
593 		return;
594 	else if((vm = regionof(data)) )
595 	{
596 		if(vm == Vmregion && Vmregion != Vmheap || (_Vmassert & VM_free))
597 			(void)(*vm->meth.freef)(vm, data, 0);
598 		else	addfreelist((Regfree_t*)data);
599 		return;
600 	}
601 	else /* not our data */
602 	{
603 #if USE_NATIVE
604 #undef	free /* let the native free() take care of it */
605 #if __STD_C
606 		extern void	free(Void_t*);
607 #else
608 		extern void	free();
609 #endif
610 		free(data);
611 #endif
612 		return;
613 	}
614 }
615 
616 #if __STD_C
cfree(reg Void_t * data)617 extern void cfree(reg Void_t* data)
618 #else
619 extern void cfree(data)
620 reg Void_t*	data;
621 #endif
622 {
623 	free(data);
624 }
625 
626 #if __STD_C
memalign(reg size_t align,reg size_t size)627 extern Void_t* memalign(reg size_t align, reg size_t size)
628 #else
629 extern Void_t* memalign(align, size)
630 reg size_t	align;
631 reg size_t	size;
632 #endif
633 {
634 	Void_t		*addr;
635 	Vmalloc_t	*vm;
636 	int		local = 0;
637 	VMFLINIT();
638 
639 	vm = getregion(&local);
640 	VMBLOCK
641 	addr = (*vm->meth.alignf)(vm, size, align, local);
642 	if(local)
643 	{	/**/ASSERT(vm->data->lock == 1);
644 		vm->data->lock = 0;
645 	}
646 	VMUNBLOCK
647 	return VMRECORD(addr);
648 }
649 
650 #if __STD_C
posix_memalign(reg Void_t ** memptr,reg size_t align,reg size_t size)651 extern int posix_memalign(reg Void_t **memptr, reg size_t align, reg size_t size)
652 #else
653 extern int posix_memalign(memptr, align, size)
654 reg Void_t**	memptr;
655 reg size_t	align;
656 reg size_t	size;
657 #endif
658 {
659 	Void_t	*mem;
660 
661 	if(align == 0 || (align%sizeof(Void_t*)) != 0 || ((align-1)&align) != 0 )
662 		return EINVAL;
663 
664 	if(!(mem = memalign(align, size)) )
665 		return ENOMEM;
666 
667 	*memptr = mem;
668 	return 0;
669 }
670 
671 #if __STD_C
valloc(reg size_t size)672 extern Void_t* valloc(reg size_t size)
673 #else
674 extern Void_t* valloc(size)
675 reg size_t	size;
676 #endif
677 {
678 	VMFLINIT();
679 
680 	GETPAGESIZE(_Vmpagesize);
681 	return VMRECORD(memalign(_Vmpagesize, size));
682 }
683 
684 #if __STD_C
pvalloc(reg size_t size)685 extern Void_t* pvalloc(reg size_t size)
686 #else
687 extern Void_t* pvalloc(size)
688 reg size_t	size;
689 #endif
690 {
691 	VMFLINIT();
692 
693 	GETPAGESIZE(_Vmpagesize);
694 	return VMRECORD(memalign(_Vmpagesize, ROUND(size,_Vmpagesize)) );
695 }
696 
697 #if !_PACKAGE_ast
698 #if __STD_C
strdup(const char * s)699 char* strdup(const char* s)
700 #else
701 char* strdup(s)
702 char*	s;
703 #endif
704 {
705 	char	*ns;
706 	size_t	n;
707 
708 	if(!s)
709 		return NIL(char*);
710 	else
711 	{	n = strlen(s);
712 		if((ns = malloc(n+1)) )
713 			memcpy(ns,s,n+1);
714 		return ns;
715 	}
716 }
717 #endif /* _PACKAGE_ast */
718 
719 #if !_lib_alloca || _mal_alloca
720 #ifndef _stk_down
721 #define _stk_down	0
722 #endif
723 typedef struct _alloca_s	Alloca_t;
724 union _alloca_u
725 {	struct
726 	{	char*		addr;
727 		Alloca_t*	next;
728 	} head;
729 	char	array[ALIGN];
730 };
731 struct _alloca_s
732 {	union _alloca_u	head;
733 	Vmuchar_t	data[1];
734 };
735 
736 #if __STD_C
alloca(size_t size)737 extern Void_t* alloca(size_t size)
738 #else
739 extern Void_t* alloca(size)
740 size_t	size;
741 #endif
742 {	char		array[ALIGN];
743 	char*		file;
744 	int		line;
745 	Void_t*		func;
746 	Alloca_t*	f;
747 	Vmalloc_t	*vm;
748 	static Alloca_t* Frame;
749 
750 	VMFLINIT();
751 
752 	VMFLF(Vmregion,file,line,func); /* save info before freeing frames */
753 
754 	while(Frame) /* free unused frames */
755 	{	if(( _stk_down && &array[0] > Frame->head.head.addr) ||
756 		   (!_stk_down && &array[0] < Frame->head.head.addr) )
757 		{	f = Frame; Frame = f->head.head.next;
758 			if((vm = regionof(f)) )
759 				(void)(*vm->meth.freef)(vm, f, 0);
760 			/* else: something bad happened. just keep going */
761 		}
762 		else	break;
763 	}
764 
765 	Vmregion->file = file; /* restore file/line info before allocation */
766 	Vmregion->line = line;
767 	Vmregion->func = func;
768 
769 	f = (Alloca_t*)(*Vmregion->meth.allocf)(Vmregion, size+sizeof(Alloca_t)-1, 0);
770 
771 	/* if f is NULL, this mimics a stack overflow with a memory error! */
772 	f->head.head.addr = &array[0];
773 	f->head.head.next = Frame;
774 	Frame = f;
775 
776 	return (Void_t*)f->data;
777 }
778 #endif /*!_lib_alloca || _mal_alloca*/
779 
780 #if _map_malloc
781 
782 /* not sure of all the implications -- 0 is conservative for now */
783 #define USE_NATIVE	0	/* native free/realloc on non-vmalloc ptrs */
784 
785 #else
786 
787 #if _malloc_hook
788 
vm_free_hook(void * ptr,const void * caller)789 static void vm_free_hook(void* ptr, const void* caller)
790 {
791 	free(ptr);
792 }
793 
vm_malloc_hook(size_t size,const void * caller)794 static void* vm_malloc_hook(size_t size, const void* caller)
795 {
796 	void*	r;
797 
798 	r = malloc(size);
799 	return r;
800 }
801 
vm_memalign_hook(size_t align,size_t size,const void * caller)802 static void* vm_memalign_hook(size_t align, size_t size, const void* caller)
803 {
804 	void*	r;
805 
806 	r = memalign(align, size);
807 	return r;
808 }
809 
vm_realloc_hook(void * ptr,size_t size,const void * caller)810 static void* vm_realloc_hook(void* ptr, size_t size, const void* caller)
811 {
812 	void*	r;
813 
814 	r = realloc(ptr, size);
815 	return r;
816 }
817 
vm_initialize_hook(void)818 static void vm_initialize_hook(void)
819 {
820 	__free_hook = vm_free_hook;
821 	__malloc_hook = vm_malloc_hook;
822 	__memalign_hook = vm_memalign_hook;
823 	__realloc_hook = vm_realloc_hook;
824 }
825 
826 void	(*__malloc_initialize_hook)(void) = vm_initialize_hook;
827 
828 #if 0 /* 2012-02-29 this may be needed to cover shared libs */
829 
830 void __attribute__ ((constructor)) vm_initialize_initialize_hook(void)
831 {
832 	vm_initialize_hook();
833 	__malloc_initialize_hook = vm_initialize_hook;
834 }
835 
836 #endif
837 
838 #else
839 
840 /* intercept _* __* __libc_* variants */
841 
842 #if __lib__malloc
F2(_calloc,size_t,n,size_t,m)843 extern Void_t*	F2(_calloc, size_t,n, size_t,m) { return calloc(n, m); }
F1(_cfree,Void_t *,p)844 extern Void_t	F1(_cfree, Void_t*,p) { free(p); }
F1(_free,Void_t *,p)845 extern Void_t	F1(_free, Void_t*,p) { free(p); }
F1(_malloc,size_t,n)846 extern Void_t*	F1(_malloc, size_t,n) { return malloc(n); }
847 #if _lib_memalign
F2(_memalign,size_t,a,size_t,n)848 extern Void_t*	F2(_memalign, size_t,a, size_t,n) { return memalign(a, n); }
849 #endif
850 #if _lib_pvalloc
F1(_pvalloc,size_t,n)851 extern Void_t*	F1(_pvalloc, size_t,n) { return pvalloc(n); }
852 #endif
F2(_realloc,Void_t *,p,size_t,n)853 extern Void_t*	F2(_realloc, Void_t*,p, size_t,n) { return realloc(p, n); }
854 #if _lib_valloc
F1(_valloc,size_t,n)855 extern Void_t*	F1(_valloc, size_t,n) { return valloc(n); }
856 #endif
857 #endif
858 
859 #if _lib___malloc
F2(__calloc,size_t,n,size_t,m)860 extern Void_t*	F2(__calloc, size_t,n, size_t,m) { return calloc(n, m); }
F1(__cfree,Void_t *,p)861 extern Void_t	F1(__cfree, Void_t*,p) { free(p); }
F1(__free,Void_t *,p)862 extern Void_t	F1(__free, Void_t*,p) { free(p); }
F1(__malloc,size_t,n)863 extern Void_t*	F1(__malloc, size_t,n) { return malloc(n); }
864 #if _lib_memalign
F2(__memalign,size_t,a,size_t,n)865 extern Void_t*	F2(__memalign, size_t,a, size_t,n) { return memalign(a, n); }
866 #endif
867 #if _lib_pvalloc
F1(__pvalloc,size_t,n)868 extern Void_t*	F1(__pvalloc, size_t,n) { return pvalloc(n); }
869 #endif
F2(__realloc,Void_t *,p,size_t,n)870 extern Void_t*	F2(__realloc, Void_t*,p, size_t,n) { return realloc(p, n); }
871 #if _lib_valloc
F1(__valloc,size_t,n)872 extern Void_t*	F1(__valloc, size_t,n) { return valloc(n); }
873 #endif
874 #endif
875 
876 #if _lib___libc_malloc
F2(__libc_calloc,size_t,n,size_t,m)877 extern Void_t*	F2(__libc_calloc, size_t,n, size_t,m) { return calloc(n, m); }
F1(__libc_cfree,Void_t *,p)878 extern Void_t	F1(__libc_cfree, Void_t*,p) { free(p); }
F1(__libc_free,Void_t *,p)879 extern Void_t	F1(__libc_free, Void_t*,p) { free(p); }
F1(__libc_malloc,size_t,n)880 extern Void_t*	F1(__libc_malloc, size_t,n) { return malloc(n); }
881 #if _lib_memalign
F2(__libc_memalign,size_t,a,size_t,n)882 extern Void_t*	F2(__libc_memalign, size_t,a, size_t,n) { return memalign(a, n); }
883 #endif
884 #if _lib_pvalloc
F1(__libc_pvalloc,size_t,n)885 extern Void_t*	F1(__libc_pvalloc, size_t,n) { return pvalloc(n); }
886 #endif
F2(__libc_realloc,Void_t *,p,size_t,n)887 extern Void_t*	F2(__libc_realloc, Void_t*,p, size_t,n) { return realloc(p, n); }
888 #if _lib_valloc
F1(__libc_valloc,size_t,n)889 extern Void_t*	F1(__libc_valloc, size_t,n) { return valloc(n); }
890 #endif
891 #endif
892 
893 #endif /* _malloc_hook */
894 
895 #endif /* _map_malloc */
896 
897 #undef	extern
898 
899 #if _hdr_malloc /* need the mallint interface for statistics, etc. */
900 
901 #undef	calloc
902 #define calloc		______calloc
903 #undef	cfree
904 #define cfree		______cfree
905 #undef	free
906 #define free		______free
907 #undef	malloc
908 #define malloc		______malloc
909 #undef	pvalloc
910 #define pvalloc		______pvalloc
911 #undef	realloc
912 #define realloc		______realloc
913 #undef	valloc
914 #define valloc		______valloc
915 
916 #if !_UWIN
917 
918 #include	<malloc.h>
919 
920 typedef struct mallinfo Mallinfo_t;
921 typedef struct mstats Mstats_t;
922 
923 #endif
924 
925 #if defined(__EXPORT__)
926 #define extern		__EXPORT__
927 #endif
928 
929 #if _lib_mallopt
930 #if __STD_C
mallopt(int cmd,int value)931 extern int mallopt(int cmd, int value)
932 #else
933 extern int mallopt(cmd, value)
934 int	cmd;
935 int	value;
936 #endif
937 {
938 	VMFLINIT();
939 	return 0;
940 }
941 #endif /*_lib_mallopt*/
942 
943 #if _lib_mallinfo && _mem_arena_mallinfo
944 #if __STD_C
mallinfo(void)945 extern Mallinfo_t mallinfo(void)
946 #else
947 extern Mallinfo_t mallinfo()
948 #endif
949 {
950 	Vmstat_t	sb;
951 	Mallinfo_t	mi;
952 
953 	VMFLINIT();
954 	memset(&mi,0,sizeof(mi));
955 	if(vmstat(Vmregion,&sb) >= 0)
956 	{	mi.arena = sb.extent;
957 		mi.ordblks = sb.n_busy+sb.n_free;
958 		mi.uordblks = sb.s_busy;
959 		mi.fordblks = sb.s_free;
960 	}
961 	return mi;
962 }
963 #endif /* _lib_mallinfo */
964 
965 #if _lib_mstats && _mem_bytes_total_mstats
966 #if __STD_C
mstats(void)967 extern Mstats_t mstats(void)
968 #else
969 extern Mstats_t mstats()
970 #endif
971 {
972 	Vmstat_t	sb;
973 	Mstats_t	ms;
974 
975 	VMFLINIT();
976 	memset(&ms,0,sizeof(ms));
977 	if(vmstat(Vmregion,&sb) >= 0)
978 	{	ms.bytes_total = sb.extent;
979 		ms.chunks_used = sb.n_busy;
980 		ms.bytes_used = sb.s_busy;
981 		ms.chunks_free = sb.n_free;
982 		ms.bytes_free = sb.s_free;
983 	}
984 	return ms;
985 }
986 #endif /*_lib_mstats*/
987 
988 #undef	extern
989 
990 #endif/*_hdr_malloc*/
991 
992 #else
993 
994 /*
995  * even though there is no malloc override, still provide
996  * _ast_* counterparts for object compatibility
997  */
998 
999 #define setregmax(n)
1000 
1001 #undef	calloc
1002 extern Void_t*	calloc _ARG_((size_t, size_t));
1003 
1004 #undef	cfree
1005 extern void	cfree _ARG_((Void_t*));
1006 
1007 #undef	free
1008 extern void	free _ARG_((Void_t*));
1009 
1010 #undef	malloc
1011 extern Void_t*	malloc _ARG_((size_t));
1012 
1013 #if _lib_memalign
1014 #undef	memalign
1015 extern Void_t*	memalign _ARG_((size_t, size_t));
1016 #endif
1017 
1018 #if _lib_pvalloc
1019 #undef	pvalloc
1020 extern Void_t*	pvalloc _ARG_((size_t));
1021 #endif
1022 
1023 #undef	realloc
1024 extern Void_t*	realloc _ARG_((Void_t*, size_t));
1025 
1026 #if _lib_valloc
1027 #undef	valloc
1028 extern Void_t*	valloc _ARG_((size_t));
1029 #endif
1030 
1031 #if defined(__EXPORT__)
1032 #define extern		__EXPORT__
1033 #endif
1034 
1035 #if !_malloc_hook
1036 
F1(_ast_free,Void_t *,p)1037 extern Void_t	F1(_ast_free, Void_t*,p) { free(p); }
F1(_ast_malloc,size_t,n)1038 extern Void_t*	F1(_ast_malloc, size_t,n) { return malloc(n); }
1039 #if _lib_memalign
F2(_ast_memalign,size_t,a,size_t,n)1040 extern Void_t*	F2(_ast_memalign, size_t,a, size_t,n) { return memalign(a, n); }
1041 #endif
F2(_ast_realloc,Void_t *,p,size_t,n)1042 extern Void_t*	F2(_ast_realloc, Void_t*,p, size_t,n) { return realloc(p, n); }
1043 
1044 #endif
1045 
F2(_ast_calloc,size_t,n,size_t,m)1046 extern Void_t*	F2(_ast_calloc, size_t,n, size_t,m) { return calloc(n, m); }
F1(_ast_cfree,Void_t *,p)1047 extern Void_t	F1(_ast_cfree, Void_t*,p) { free(p); }
1048 #if _lib_pvalloc
F1(_ast_pvalloc,size_t,n)1049 extern Void_t*	F1(_ast_pvalloc, size_t,n) { return pvalloc(n); }
1050 #endif
1051 #if _lib_valloc
F1(_ast_valloc,size_t,n)1052 extern Void_t*	F1(_ast_valloc, size_t,n) { return valloc(n); }
1053 #endif
1054 
1055 #undef	extern
1056 
1057 #if _hdr_malloc
1058 
1059 #undef	mallinfo
1060 #undef	mallopt
1061 #undef	mstats
1062 
1063 #define calloc		______calloc
1064 #define cfree		______cfree
1065 #define free		______free
1066 #define malloc		______malloc
1067 #define pvalloc		______pvalloc
1068 #define realloc		______realloc
1069 #define valloc		______valloc
1070 
1071 #if !_UWIN
1072 
1073 #if !_malloc_hook
1074 
1075 #include	<malloc.h>
1076 
1077 #endif
1078 
1079 typedef struct mallinfo Mallinfo_t;
1080 typedef struct mstats Mstats_t;
1081 
1082 #endif
1083 
1084 #if defined(__EXPORT__)
1085 #define extern		__EXPORT__
1086 #endif
1087 
1088 #if _lib_mallopt
F2(_ast_mallopt,int,cmd,int,value)1089 extern int	F2(_ast_mallopt, int,cmd, int,value) { return mallopt(cmd, value); }
1090 #endif
1091 
1092 #if _lib_mallinfo && _mem_arena_mallinfo
F0(_ast_mallinfo,void)1093 extern Mallinfo_t	F0(_ast_mallinfo, void) { return mallinfo(); }
1094 #endif
1095 
1096 #if _lib_mstats && _mem_bytes_total_mstats
F0(_ast_mstats,void)1097 extern Mstats_t		F0(_ast_mstats, void) { return mstats(); }
1098 #endif
1099 
1100 #undef	extern
1101 
1102 #endif /*_hdr_malloc*/
1103 
1104 #endif /*!_std_malloc*/
1105 
1106 #if __STD_C
atou(char ** sp)1107 static Vmulong_t atou(char** sp)
1108 #else
1109 static Vmulong_t atou(sp)
1110 char**	sp;
1111 #endif
1112 {
1113 	char*		s = *sp;
1114 	Vmulong_t	v = 0;
1115 
1116 	if(s[0] == '0' && (s[1] == 'x' || s[1] == 'X') )
1117 	{	for(s += 2; *s; ++s)
1118 		{	if(*s >= '0' && *s <= '9')
1119 				v = (v << 4) + (*s - '0');
1120 			else if(*s >= 'a' && *s <= 'f')
1121 				v = (v << 4) + (*s - 'a') + 10;
1122 			else if(*s >= 'A' && *s <= 'F')
1123 				v = (v << 4) + (*s - 'A') + 10;
1124 			else break;
1125 		}
1126 	}
1127 	else
1128 	{	for(; *s; ++s)
1129 		{	if(*s >= '0' && *s <= '9')
1130 				v = v*10 + (*s - '0');
1131 			else break;
1132 		}
1133 	}
1134 
1135 	*sp = s;
1136 	return v;
1137 }
1138 
1139 #if __STD_C
insertpid(char * begs,char * ends)1140 static char* insertpid(char* begs, char* ends)
1141 #else
1142 static char* insertpid(begs,ends)
1143 char*	begs;
1144 char*	ends;
1145 #endif
1146 {	int	pid;
1147 	char*	s;
1148 
1149 	if((pid = getpid()) < 0)
1150 		return NIL(char*);
1151 
1152 	s = ends;
1153 	do
1154 	{	if(s == begs)
1155 			return NIL(char*);
1156 		*--s = '0' + pid%10;
1157 	} while((pid /= 10) > 0);
1158 	while(s < ends)
1159 		*begs++ = *s++;
1160 
1161 	return begs;
1162 }
1163 
1164 #define FD_PRIVATE	(3*OPEN_MAX/4)
1165 
1166 #if __STD_C
_vmfd(int fd)1167 int _vmfd(int fd)
1168 #else
1169 int _vmfd(fd)
1170 int	fd;
1171 #endif
1172 {
1173 	int	pd;
1174 
1175 	if (fd >= 0)
1176 	{
1177 		if (fd < FD_PRIVATE && (pd = fcntl(fd, F_DUPFD, FD_PRIVATE)) >= 0)
1178 		{
1179 			close(fd);
1180 			fd = pd;
1181 		}
1182 #ifdef FD_CLOEXEC
1183 		fcntl(fd,  F_SETFD, FD_CLOEXEC);
1184 #endif
1185 	}
1186 	return fd;
1187 }
1188 
1189 #if __STD_C
createfile(char * file)1190 static int createfile(char* file)
1191 #else
1192 static int createfile(file)
1193 char*	file;
1194 #endif
1195 {
1196 	char	buf[1024];
1197 	char	*next, *endb;
1198 	int	fd;
1199 
1200 	next = buf;
1201 	endb = buf + sizeof(buf);
1202 	while(*file)
1203 	{	if(*file == '%')
1204 		{	switch(file[1])
1205 			{
1206 			case 'p' :
1207 				if(!(next = insertpid(next,endb)) )
1208 					return -1;
1209 				file += 2;
1210 				break;
1211 			default :
1212 				goto copy;
1213 			}
1214 		}
1215 		else
1216 		{ copy:
1217 			*next++ = *file++;
1218 		}
1219 
1220 		if(next >= endb)
1221 			return -1;
1222 	}
1223 
1224 	*next = '\0';
1225 	file = buf;
1226 	if (*file == '&' && *(file += 1) || strncmp(file, "/dev/fd/", 8) == 0 && *(file += 8))
1227 		fd = dup((int)atou(&file));
1228 	else if (*file)
1229 	{
1230 #if _PACKAGE_ast
1231 		fd = open(file, O_WRONLY|O_CREAT|O_TRUNC, CREAT_MODE);
1232 #else
1233 		fd = creat(file, CREAT_MODE);
1234 #endif
1235 		fd = _vmfd(fd);
1236 	}
1237 	else
1238 		return -1;
1239 #if _PACKAGE_ast
1240 #ifdef FD_CLOEXEC
1241 	if (fd >= 0)
1242 		fcntl(fd, F_SETFD, FD_CLOEXEC);
1243 #endif
1244 #endif
1245 	return fd;
1246 }
1247 
1248 #if __STD_C
pfprint(void)1249 static void pfprint(void)
1250 #else
1251 static void pfprint()
1252 #endif
1253 {
1254 	if(Vmregion->meth.meth == VM_MTPROFILE)
1255 		vmprofile(Vmregion,_Vmpffd);
1256 }
1257 
1258 /*
1259  * initialize runtime options from the VMALLOC_OPTIONS env var
1260  */
1261 
1262 #define COPY(t,e,f)	while ((*t = *f++) && t < e) t++
1263 
1264 #if __STD_C
_vmoptions(void)1265 void _vmoptions(void)
1266 #else
1267 void _vmoptions()
1268 #endif
1269 {
1270 	Vmalloc_t*	vm = 0;
1271 	char*		trace = 0;
1272 	char*		s;
1273 	char*		t;
1274 	char*		v;
1275 	Vmulong_t	n;
1276 	int		fd;
1277 	char		buf[1024];
1278 
1279 	_Vmoptions = 1;
1280 	t = buf;
1281 	v = &buf[sizeof(buf)-1];
1282 	if (s = getenv("VMALLOC_OPTIONS"))
1283 		COPY(t, v, s);
1284 	if (t > buf)
1285 	{
1286 		*t = 0;
1287 		s = buf;
1288 		for (;;)
1289 		{
1290 			while (*s == ',' || *s == ' ' || *s == '\t' || *s == '\r' || *s == '\n')
1291 				s++;
1292 			if (!*(t = s))
1293 				break;
1294 			v = 0;
1295 			while (*s)
1296 				if (*s == ',' || *s == ' ' || *s == '\t' || *s == '\r' || *s == '\n')
1297 				{
1298 					*s++ = 0;
1299 					break;
1300 				}
1301 				else if (!v && *s == '=')
1302 				{
1303 					*s++ = 0;
1304 					if (!*(v = s))
1305 						v = 0;
1306 				}
1307 				else
1308 					s++;
1309 			if (t[0] == 'n' && t[1] == 'o')
1310 				continue;
1311 			switch (t[0])
1312 			{
1313 			case 'a':		/* abort */
1314 				if (!vm)
1315 					vm = vmopen(Vmdcsystem, Vmdebug, 0);
1316 				if (vm && vm->meth.meth == VM_MTDEBUG)
1317 					vmset(vm, VM_DBABORT, 1);
1318 				else
1319 					_Vmassert |= VM_abort;
1320 				break;
1321 			case 'b':		/* break */
1322 				_Vmassert |= VM_break;
1323 				break;
1324 			case 'c':		/* check */
1325 				_Vmassert |= VM_check;
1326 				break;
1327 			case 'f':		/* free */
1328 				_Vmassert |= VM_free;
1329 				break;
1330 			case 'k':		/* keep */
1331 				_Vmassert |= VM_keep;
1332 				break;
1333 			case 'm':
1334 				if (v)
1335 					switch (t[1])
1336 					{
1337 					case 'e': /* method=METHOD */
1338 						if (!vm)
1339 						{
1340 							if ((v[0] == 'V' || v[0] == 'v') && (v[1] == 'M' || v[1] == 'm'))
1341 								v += 2;
1342 							if (strcmp(v, "debug") == 0)
1343 								vm = vmopen(Vmdcsystem, Vmdebug, 0);
1344 							else if (strcmp(v, "profile") == 0)
1345 								vm = vmopen(Vmdcsystem, Vmprofile, 0);
1346 							else if (strcmp(v, "last") == 0)
1347 								vm = vmopen(Vmdcsystem, Vmlast, 0);
1348 							else if (strcmp(v, "best") == 0)
1349 								vm = Vmheap;
1350 						}
1351 						break;
1352 					case 'm': /* mmap */
1353 						_Vmassert |= VM_mmap;
1354 						break;
1355 					}
1356 				break;
1357 			case 'p':
1358 				if (v)
1359 					switch (t[1])
1360 					{
1361 					case 'e':	/* period=<count> */
1362 						if (!vm)
1363 							vm = vmopen(Vmdcsystem, Vmdebug, 0);
1364 						if (vm && vm->meth.meth == VM_MTDEBUG)
1365 							_Vmdbcheck = atou(&v);
1366 						break;
1367 					case 'r':	/* profile=<path> */
1368 						if (!vm)
1369 							vm = vmopen(Vmdcsystem, Vmprofile, 0);
1370 						if (v && vm && vm->meth.meth == VM_MTPROFILE)
1371 							_Vmpffd = createfile(v);
1372 						break;
1373 					}
1374 				break;
1375 			case 's':		/* start=<count> */
1376 				if (!vm)
1377 					vm = vmopen(Vmdcsystem, Vmdebug, 0);
1378 				if (v && vm && vm->meth.meth == VM_MTDEBUG)
1379 					_Vmdbstart = atou(&v);
1380 				break;
1381 			case 't':		/* trace=<path> */
1382 				trace = v;
1383 				break;
1384 			case 'w':
1385 				if (t[1] == 'a')
1386 					switch (t[2])
1387 					{
1388 					case 'r':	/* warn=<path> */
1389 						if (!vm)
1390 							vm = vmopen(Vmdcsystem, Vmdebug, 0);
1391 						if (v && vm && vm->meth.meth == VM_MTDEBUG && (fd = createfile(v)) >= 0)
1392 							vmdebug(fd);
1393 						break;
1394 					case 't':	/* watch=<addr> */
1395 						if (!vm)
1396 							vm = vmopen(Vmdcsystem, Vmdebug, 0);
1397 						if (v && vm && vm->meth.meth == VM_MTDEBUG && (n = atou(&v)) >= 0)
1398 							vmdbwatch((Void_t*)n);
1399 						break;
1400 					}
1401 				break;
1402 			}
1403 		}
1404 	}
1405 
1406 	/* slip in the new region now so that malloc() will work fine */
1407 
1408 	if (vm)
1409 	{
1410 		if (vm->meth.meth == VM_MTDEBUG)
1411 			_Vmdbcheck = 1;
1412 		Vmregion = vm;
1413 	}
1414 
1415 	/* enable tracing -- this currently disables multiple regions */
1416 
1417 	if (trace)
1418 	{
1419 		setregmax(0);
1420 		if ((fd = createfile(trace)) >= 0)
1421 		{
1422 			vmset(Vmregion, VM_TRACE, 1);
1423 			vmtrace(fd);
1424 		}
1425 	}
1426 	else if (Vmregion != Vmheap || asometh(0, 0)->type == ASO_SIGNAL)
1427 		setregmax(0);
1428 
1429 	/* make sure that profile data is output upon exiting */
1430 
1431 	if (vm && vm->meth.meth == VM_MTPROFILE)
1432 	{
1433 		if (_Vmpffd < 0)
1434 			_Vmpffd = 2;
1435 		/* this may wind up calling malloc(), but region is ok now */
1436 		atexit(pfprint);
1437 	}
1438 	else if (_Vmpffd >= 0)
1439 	{
1440 		close(_Vmpffd);
1441 		_Vmpffd = -1;
1442 	}
1443 }
1444 
1445 /*
1446  * ast semi-private workaround for system functions
1447  * that misbehave by passing bogus addresses to free()
1448  *
1449  * not prototyped in any header to keep it ast semi-private
1450  *
1451  * to keep malloc() data by disabling free()
1452  *	extern _vmkeep(int);
1453  *	int r = _vmkeep(1);
1454  * and to restore to the previous state
1455  *	(void)_vmkeep(r);
1456  */
1457 
1458 int
1459 #if __STD_C
_vmkeep(int v)1460 _vmkeep(int v)
1461 #else
1462 _vmkeep(v)
1463 int	v;
1464 #endif
1465 {
1466 	int	r;
1467 
1468 	r = !!(_Vmassert & VM_keep);
1469 	if (v)
1470 		_Vmassert |= VM_keep;
1471 	else
1472 		_Vmassert &= ~VM_keep;
1473 	return r;
1474 }
1475 
1476 #endif /*_UWIN*/
1477