xref: /titanic_50/usr/src/lib/libmalloc/common/malloc.c (revision 7c478bd95313f5f23a4c958a745db2134aa03244)
1*7c478bd9Sstevel@tonic-gate /*
2*7c478bd9Sstevel@tonic-gate  * CDDL HEADER START
3*7c478bd9Sstevel@tonic-gate  *
4*7c478bd9Sstevel@tonic-gate  * The contents of this file are subject to the terms of the
5*7c478bd9Sstevel@tonic-gate  * Common Development and Distribution License, Version 1.0 only
6*7c478bd9Sstevel@tonic-gate  * (the "License").  You may not use this file except in compliance
7*7c478bd9Sstevel@tonic-gate  * with the License.
8*7c478bd9Sstevel@tonic-gate  *
9*7c478bd9Sstevel@tonic-gate  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10*7c478bd9Sstevel@tonic-gate  * or http://www.opensolaris.org/os/licensing.
11*7c478bd9Sstevel@tonic-gate  * See the License for the specific language governing permissions
12*7c478bd9Sstevel@tonic-gate  * and limitations under the License.
13*7c478bd9Sstevel@tonic-gate  *
14*7c478bd9Sstevel@tonic-gate  * When distributing Covered Code, include this CDDL HEADER in each
15*7c478bd9Sstevel@tonic-gate  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16*7c478bd9Sstevel@tonic-gate  * If applicable, add the following below this CDDL HEADER, with the
17*7c478bd9Sstevel@tonic-gate  * fields enclosed by brackets "[]" replaced with your own identifying
18*7c478bd9Sstevel@tonic-gate  * information: Portions Copyright [yyyy] [name of copyright owner]
19*7c478bd9Sstevel@tonic-gate  *
20*7c478bd9Sstevel@tonic-gate  * CDDL HEADER END
21*7c478bd9Sstevel@tonic-gate  */
22*7c478bd9Sstevel@tonic-gate /*	Copyright (c) 1988 AT&T	*/
23*7c478bd9Sstevel@tonic-gate /*	  All Rights Reserved  	*/
24*7c478bd9Sstevel@tonic-gate 
25*7c478bd9Sstevel@tonic-gate 
26*7c478bd9Sstevel@tonic-gate #pragma ident	"%Z%%M%	%I%	%E% SMI"
27*7c478bd9Sstevel@tonic-gate 
28*7c478bd9Sstevel@tonic-gate /*
29*7c478bd9Sstevel@tonic-gate  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
30*7c478bd9Sstevel@tonic-gate  * Use is subject to license terms.
31*7c478bd9Sstevel@tonic-gate  */
32*7c478bd9Sstevel@tonic-gate 
33*7c478bd9Sstevel@tonic-gate #pragma weak mallopt = _mallopt
34*7c478bd9Sstevel@tonic-gate #pragma weak mallinfo = _mallinfo
35*7c478bd9Sstevel@tonic-gate #pragma weak cfree = _cfree
36*7c478bd9Sstevel@tonic-gate #pragma weak memalign = _memalign
37*7c478bd9Sstevel@tonic-gate #pragma weak valloc = _valloc
38*7c478bd9Sstevel@tonic-gate 
39*7c478bd9Sstevel@tonic-gate #include <sys/types.h>
40*7c478bd9Sstevel@tonic-gate 
41*7c478bd9Sstevel@tonic-gate #ifndef debug
42*7c478bd9Sstevel@tonic-gate #define	NDEBUG
43*7c478bd9Sstevel@tonic-gate #endif
44*7c478bd9Sstevel@tonic-gate 
45*7c478bd9Sstevel@tonic-gate #include <stdlib.h>
46*7c478bd9Sstevel@tonic-gate #include <string.h>
47*7c478bd9Sstevel@tonic-gate #include "assert.h"
48*7c478bd9Sstevel@tonic-gate #include "malloc.h"
49*7c478bd9Sstevel@tonic-gate #include "mallint.h"
50*7c478bd9Sstevel@tonic-gate #include <thread.h>
51*7c478bd9Sstevel@tonic-gate #include <synch.h>
52*7c478bd9Sstevel@tonic-gate #include <unistd.h>
53*7c478bd9Sstevel@tonic-gate #include <limits.h>
54*7c478bd9Sstevel@tonic-gate 
55*7c478bd9Sstevel@tonic-gate static mutex_t mlock = DEFAULTMUTEX;
56*7c478bd9Sstevel@tonic-gate static ssize_t freespace(struct holdblk *);
57*7c478bd9Sstevel@tonic-gate static void *malloc_unlocked(size_t, int);
58*7c478bd9Sstevel@tonic-gate static void *realloc_unlocked(void *, size_t);
59*7c478bd9Sstevel@tonic-gate static void free_unlocked(void *);
60*7c478bd9Sstevel@tonic-gate static void *morecore(size_t);
61*7c478bd9Sstevel@tonic-gate 
62*7c478bd9Sstevel@tonic-gate /*
63*7c478bd9Sstevel@tonic-gate  * use level memory allocater (malloc, free, realloc)
64*7c478bd9Sstevel@tonic-gate  *
65*7c478bd9Sstevel@tonic-gate  *	-malloc, free, realloc and mallopt form a memory allocator
66*7c478bd9Sstevel@tonic-gate  *	similar to malloc, free, and realloc.  The routines
67*7c478bd9Sstevel@tonic-gate  *	here are much faster than the original, with slightly worse
68*7c478bd9Sstevel@tonic-gate  *	space usage (a few percent difference on most input).  They
69*7c478bd9Sstevel@tonic-gate  *	do not have the property that data in freed blocks is left
70*7c478bd9Sstevel@tonic-gate  *	untouched until the space is reallocated.
71*7c478bd9Sstevel@tonic-gate  *
72*7c478bd9Sstevel@tonic-gate  *	-Memory is kept in the "arena", a singly linked list of blocks.
73*7c478bd9Sstevel@tonic-gate  *	These blocks are of 3 types.
74*7c478bd9Sstevel@tonic-gate  *		1. A free block.  This is a block not in use by the
75*7c478bd9Sstevel@tonic-gate  *		   user.  It has a 3 word header. (See description
76*7c478bd9Sstevel@tonic-gate  *		   of the free queue.)
77*7c478bd9Sstevel@tonic-gate  *		2. An allocated block.  This is a block the user has
78*7c478bd9Sstevel@tonic-gate  *		   requested.  It has only a 1 word header, pointing
79*7c478bd9Sstevel@tonic-gate  *		   to the next block of any sort.
80*7c478bd9Sstevel@tonic-gate  *		3. A permanently allocated block.  This covers space
81*7c478bd9Sstevel@tonic-gate  *		   aquired by the user directly through sbrk().  It
82*7c478bd9Sstevel@tonic-gate  *		   has a 1 word header, as does 2.
83*7c478bd9Sstevel@tonic-gate  *	Blocks of type 1 have the lower bit of the pointer to the
84*7c478bd9Sstevel@tonic-gate  *	nextblock = 0.  Blocks of type 2 and 3 have that bit set,
85*7c478bd9Sstevel@tonic-gate  *	to mark them busy.
86*7c478bd9Sstevel@tonic-gate  *
87*7c478bd9Sstevel@tonic-gate  *	-Unallocated blocks are kept on an unsorted doubly linked
88*7c478bd9Sstevel@tonic-gate  *	free list.
89*7c478bd9Sstevel@tonic-gate  *
90*7c478bd9Sstevel@tonic-gate  *	-Memory is allocated in blocks, with sizes specified by the
91*7c478bd9Sstevel@tonic-gate  *	user.  A circular first-fit startegy is used, with a roving
92*7c478bd9Sstevel@tonic-gate  *	head of the free queue, which prevents bunching of small
93*7c478bd9Sstevel@tonic-gate  *	blocks at the head of the queue.
94*7c478bd9Sstevel@tonic-gate  *
95*7c478bd9Sstevel@tonic-gate  *	-Compaction is performed at free time of any blocks immediately
96*7c478bd9Sstevel@tonic-gate  *	following the freed block.  The freed block will be combined
97*7c478bd9Sstevel@tonic-gate  *	with a preceding block during the search phase of malloc.
98*7c478bd9Sstevel@tonic-gate  *	Since a freed block is added at the front of the free queue,
99*7c478bd9Sstevel@tonic-gate  *	which is moved to the end of the queue if considered and
100*7c478bd9Sstevel@tonic-gate  *	rejected during the search, fragmentation only occurs if
101*7c478bd9Sstevel@tonic-gate  *	a block with a contiguious preceding block that is free is
102*7c478bd9Sstevel@tonic-gate  *	freed and reallocated on the next call to malloc.  The
103*7c478bd9Sstevel@tonic-gate  *	time savings of this strategy is judged to be worth the
104*7c478bd9Sstevel@tonic-gate  *	occasional waste of memory.
105*7c478bd9Sstevel@tonic-gate  *
106*7c478bd9Sstevel@tonic-gate  *	-Small blocks (of size < MAXSIZE)  are not allocated directly.
107*7c478bd9Sstevel@tonic-gate  *	A large "holding" block is allocated via a recursive call to
108*7c478bd9Sstevel@tonic-gate  *	malloc.  This block contains a header and ?????? small blocks.
109*7c478bd9Sstevel@tonic-gate  *	Holding blocks for a given size of small block (rounded to the
110*7c478bd9Sstevel@tonic-gate  *	nearest ALIGNSZ bytes) are kept on a queue with the property that any
111*7c478bd9Sstevel@tonic-gate  *	holding block with an unused small block is in front of any without.
112*7c478bd9Sstevel@tonic-gate  *	A list of free blocks is kept within the holding block.
113*7c478bd9Sstevel@tonic-gate  */
114*7c478bd9Sstevel@tonic-gate 
115*7c478bd9Sstevel@tonic-gate /*
116*7c478bd9Sstevel@tonic-gate  *	description of arena, free queue, holding blocks etc.
117*7c478bd9Sstevel@tonic-gate  *
118*7c478bd9Sstevel@tonic-gate  * New compiler and linker does not guarentee order of initialized data.
119*7c478bd9Sstevel@tonic-gate  * Define freeptr as arena[2-3] to guarentee it follows arena in memory.
120*7c478bd9Sstevel@tonic-gate  * Later code depends on this order.
121*7c478bd9Sstevel@tonic-gate  */
122*7c478bd9Sstevel@tonic-gate 
123*7c478bd9Sstevel@tonic-gate static struct header arena[4] = {
124*7c478bd9Sstevel@tonic-gate 	    {0, 0, 0},
125*7c478bd9Sstevel@tonic-gate 	    {0, 0, 0},
126*7c478bd9Sstevel@tonic-gate 	    {0, 0, 0},
127*7c478bd9Sstevel@tonic-gate 	    {0, 0, 0}
128*7c478bd9Sstevel@tonic-gate 	};
129*7c478bd9Sstevel@tonic-gate 				/*
130*7c478bd9Sstevel@tonic-gate 				 * the second word is a minimal block to
131*7c478bd9Sstevel@tonic-gate 				 * start the arena. The first is a busy
132*7c478bd9Sstevel@tonic-gate 				 * block to be pointed to by the last block.
133*7c478bd9Sstevel@tonic-gate 				 */
134*7c478bd9Sstevel@tonic-gate 
135*7c478bd9Sstevel@tonic-gate #define	freeptr (arena + 2)
136*7c478bd9Sstevel@tonic-gate 				/* first and last entry in free list */
137*7c478bd9Sstevel@tonic-gate static struct header *arenaend;	/* ptr to block marking high end of arena */
138*7c478bd9Sstevel@tonic-gate static struct header *lastblk;	/* the highest block in the arena */
139*7c478bd9Sstevel@tonic-gate static struct holdblk **holdhead;   /* pointer to array of head pointers */
140*7c478bd9Sstevel@tonic-gate 				    /* to holding block chains */
141*7c478bd9Sstevel@tonic-gate /*
142*7c478bd9Sstevel@tonic-gate  * In order to save time calculating indices, the array is 1 too
143*7c478bd9Sstevel@tonic-gate  * large, and the first element is unused
144*7c478bd9Sstevel@tonic-gate  *
145*7c478bd9Sstevel@tonic-gate  * Variables controlling algorithm, esp. how holding blocs are used
146*7c478bd9Sstevel@tonic-gate  */
147*7c478bd9Sstevel@tonic-gate static int numlblks = NUMLBLKS;
148*7c478bd9Sstevel@tonic-gate static int minhead = MINHEAD;
149*7c478bd9Sstevel@tonic-gate static int change = 0;	/* != 0, once param changes are no longer allowed */
150*7c478bd9Sstevel@tonic-gate static int fastct = FASTCT;
151*7c478bd9Sstevel@tonic-gate static unsigned int maxfast = MAXFAST;
152*7c478bd9Sstevel@tonic-gate /* number of small block sizes to map to one size */
153*7c478bd9Sstevel@tonic-gate 
154*7c478bd9Sstevel@tonic-gate static int grain = ALIGNSZ;
155*7c478bd9Sstevel@tonic-gate 
156*7c478bd9Sstevel@tonic-gate #ifdef debug
157*7c478bd9Sstevel@tonic-gate static int case1count = 0;
158*7c478bd9Sstevel@tonic-gate 
159*7c478bd9Sstevel@tonic-gate static void
160*7c478bd9Sstevel@tonic-gate checkq(void)
161*7c478bd9Sstevel@tonic-gate {
162*7c478bd9Sstevel@tonic-gate 	register struct header *p;
163*7c478bd9Sstevel@tonic-gate 
164*7c478bd9Sstevel@tonic-gate 	p = &freeptr[0];
165*7c478bd9Sstevel@tonic-gate 
166*7c478bd9Sstevel@tonic-gate 	/* check forward */
167*7c478bd9Sstevel@tonic-gate 	/*CSTYLED*/
168*7c478bd9Sstevel@tonic-gate 	while (p != &freeptr[1]) {
169*7c478bd9Sstevel@tonic-gate 		p = p->nextfree;
170*7c478bd9Sstevel@tonic-gate 		assert(p->prevfree->nextfree == p);
171*7c478bd9Sstevel@tonic-gate 	}
172*7c478bd9Sstevel@tonic-gate 
173*7c478bd9Sstevel@tonic-gate 	/* check backward */
174*7c478bd9Sstevel@tonic-gate 	/*CSTYLED*/
175*7c478bd9Sstevel@tonic-gate 	while (p != &freeptr[0]) {
176*7c478bd9Sstevel@tonic-gate 		p = p->prevfree;
177*7c478bd9Sstevel@tonic-gate 		assert(p->nextfree->prevfree == p);
178*7c478bd9Sstevel@tonic-gate 	}
179*7c478bd9Sstevel@tonic-gate }
180*7c478bd9Sstevel@tonic-gate #endif
181*7c478bd9Sstevel@tonic-gate 
182*7c478bd9Sstevel@tonic-gate 
183*7c478bd9Sstevel@tonic-gate /*
184*7c478bd9Sstevel@tonic-gate  * malloc(nbytes) - give a user nbytes to use
185*7c478bd9Sstevel@tonic-gate  */
186*7c478bd9Sstevel@tonic-gate 
187*7c478bd9Sstevel@tonic-gate void *
188*7c478bd9Sstevel@tonic-gate malloc(size_t nbytes)
189*7c478bd9Sstevel@tonic-gate {
190*7c478bd9Sstevel@tonic-gate 	void *ret;
191*7c478bd9Sstevel@tonic-gate 
192*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
193*7c478bd9Sstevel@tonic-gate 	ret = malloc_unlocked(nbytes, 0);
194*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
195*7c478bd9Sstevel@tonic-gate 	return (ret);
196*7c478bd9Sstevel@tonic-gate }
197*7c478bd9Sstevel@tonic-gate 
198*7c478bd9Sstevel@tonic-gate /*
199*7c478bd9Sstevel@tonic-gate  * Use malloc_unlocked() to get the address to start with; Given this
200*7c478bd9Sstevel@tonic-gate  * address, find out the closest address that aligns with the request
201*7c478bd9Sstevel@tonic-gate  * and return that address after doing some house keeping (refer to the
202*7c478bd9Sstevel@tonic-gate  * ascii art below).
203*7c478bd9Sstevel@tonic-gate  */
204*7c478bd9Sstevel@tonic-gate void *
205*7c478bd9Sstevel@tonic-gate _memalign(size_t alignment, size_t size)
206*7c478bd9Sstevel@tonic-gate {
207*7c478bd9Sstevel@tonic-gate 	void *alloc_buf;
208*7c478bd9Sstevel@tonic-gate 	struct header *hd;
209*7c478bd9Sstevel@tonic-gate 	size_t alloc_size;
210*7c478bd9Sstevel@tonic-gate 	uintptr_t fr;
211*7c478bd9Sstevel@tonic-gate 	static int realloc;
212*7c478bd9Sstevel@tonic-gate 
213*7c478bd9Sstevel@tonic-gate 	if (size == 0 || alignment == 0 ||
214*7c478bd9Sstevel@tonic-gate 		(alignment & (alignment - 1)) != 0) {
215*7c478bd9Sstevel@tonic-gate 		return (NULL);
216*7c478bd9Sstevel@tonic-gate 	}
217*7c478bd9Sstevel@tonic-gate 	if (alignment <= ALIGNSZ)
218*7c478bd9Sstevel@tonic-gate 		return (malloc(size));
219*7c478bd9Sstevel@tonic-gate 
220*7c478bd9Sstevel@tonic-gate 	alloc_size = size + alignment;
221*7c478bd9Sstevel@tonic-gate 	if (alloc_size < size) { /* overflow */
222*7c478bd9Sstevel@tonic-gate 		return (NULL);
223*7c478bd9Sstevel@tonic-gate 	}
224*7c478bd9Sstevel@tonic-gate 
225*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
226*7c478bd9Sstevel@tonic-gate 	alloc_buf = malloc_unlocked(alloc_size, 1);
227*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
228*7c478bd9Sstevel@tonic-gate 
229*7c478bd9Sstevel@tonic-gate 	if (alloc_buf == NULL)
230*7c478bd9Sstevel@tonic-gate 		return (NULL);
231*7c478bd9Sstevel@tonic-gate 	fr = (uintptr_t)alloc_buf;
232*7c478bd9Sstevel@tonic-gate 
233*7c478bd9Sstevel@tonic-gate 	fr = (fr + alignment - 1) / alignment * alignment;
234*7c478bd9Sstevel@tonic-gate 
235*7c478bd9Sstevel@tonic-gate 	if (fr == (uintptr_t)alloc_buf)
236*7c478bd9Sstevel@tonic-gate 		return (alloc_buf);
237*7c478bd9Sstevel@tonic-gate 
238*7c478bd9Sstevel@tonic-gate 	if ((fr - (uintptr_t)alloc_buf) <= HEADSZ) {
239*7c478bd9Sstevel@tonic-gate 		/*
240*7c478bd9Sstevel@tonic-gate 		 * we hit an edge case, where the space ahead of aligned
241*7c478bd9Sstevel@tonic-gate 		 * address is not sufficient to hold 'header' and hence we
242*7c478bd9Sstevel@tonic-gate 		 * can't free it. So double the allocation request.
243*7c478bd9Sstevel@tonic-gate 		 */
244*7c478bd9Sstevel@tonic-gate 		realloc++;
245*7c478bd9Sstevel@tonic-gate 		free(alloc_buf);
246*7c478bd9Sstevel@tonic-gate 		alloc_size = size + alignment*2;
247*7c478bd9Sstevel@tonic-gate 		if (alloc_size < size) {
248*7c478bd9Sstevel@tonic-gate 			return (NULL);
249*7c478bd9Sstevel@tonic-gate 		}
250*7c478bd9Sstevel@tonic-gate 
251*7c478bd9Sstevel@tonic-gate 		(void) mutex_lock(&mlock);
252*7c478bd9Sstevel@tonic-gate 		alloc_buf = malloc_unlocked(alloc_size, 1);
253*7c478bd9Sstevel@tonic-gate 		(void) mutex_unlock(&mlock);
254*7c478bd9Sstevel@tonic-gate 
255*7c478bd9Sstevel@tonic-gate 		if (alloc_buf == NULL)
256*7c478bd9Sstevel@tonic-gate 			return (NULL);
257*7c478bd9Sstevel@tonic-gate 		fr = (uintptr_t)alloc_buf;
258*7c478bd9Sstevel@tonic-gate 
259*7c478bd9Sstevel@tonic-gate 		fr = (fr + alignment - 1) / alignment * alignment;
260*7c478bd9Sstevel@tonic-gate 		if (fr == (uintptr_t)alloc_buf)
261*7c478bd9Sstevel@tonic-gate 			return (alloc_buf);
262*7c478bd9Sstevel@tonic-gate 		if ((fr - (uintptr_t)alloc_buf) <= HEADSZ) {
263*7c478bd9Sstevel@tonic-gate 			fr = fr + alignment;
264*7c478bd9Sstevel@tonic-gate 		}
265*7c478bd9Sstevel@tonic-gate 	}
266*7c478bd9Sstevel@tonic-gate 
267*7c478bd9Sstevel@tonic-gate 	/*
268*7c478bd9Sstevel@tonic-gate 	 *	+-------+		+-------+
269*7c478bd9Sstevel@tonic-gate 	 *  +---| <a>   |		| <a>   |--+
270*7c478bd9Sstevel@tonic-gate 	 *  |   +-------+<--alloc_buf-->+-------+  |
271*7c478bd9Sstevel@tonic-gate 	 *  |   |	|		|	|  |
272*7c478bd9Sstevel@tonic-gate 	 *  |   |	|		|	|  |
273*7c478bd9Sstevel@tonic-gate 	 *  |   |	|		|	|  |
274*7c478bd9Sstevel@tonic-gate 	 *  |   |	|	 hd-->  +-------+  |
275*7c478bd9Sstevel@tonic-gate 	 *  |   |	|	    +---|  <b>  |<-+
276*7c478bd9Sstevel@tonic-gate 	 *  |   |	|	    |   +-------+<--- fr
277*7c478bd9Sstevel@tonic-gate 	 *  |   |	|	    |   |	|
278*7c478bd9Sstevel@tonic-gate 	 *  |   |	|	    |   |	|
279*7c478bd9Sstevel@tonic-gate 	 *  |   |	|	    |   |	|
280*7c478bd9Sstevel@tonic-gate 	 *  |   |	|	    |   |	|
281*7c478bd9Sstevel@tonic-gate 	 *  |   |	|	    |   |	|
282*7c478bd9Sstevel@tonic-gate 	 *  |   |	|	    |   |	|
283*7c478bd9Sstevel@tonic-gate 	 *  |   +-------+	    |   +-------+
284*7c478bd9Sstevel@tonic-gate 	 *  +-->|  next |	    +-->|  next |
285*7c478bd9Sstevel@tonic-gate 	 *	+-------+		+-------+
286*7c478bd9Sstevel@tonic-gate 	 *
287*7c478bd9Sstevel@tonic-gate 	 */
288*7c478bd9Sstevel@tonic-gate 	hd = (struct header *)((char *)fr - minhead);
289*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
290*7c478bd9Sstevel@tonic-gate 	hd->nextblk = ((struct header *)((char *)alloc_buf - minhead))->nextblk;
291*7c478bd9Sstevel@tonic-gate 	((struct header *)((char *)alloc_buf - minhead))->nextblk = SETBUSY(hd);
292*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
293*7c478bd9Sstevel@tonic-gate 	free(alloc_buf);
294*7c478bd9Sstevel@tonic-gate 	CHECKQ
295*7c478bd9Sstevel@tonic-gate 	return ((void *)fr);
296*7c478bd9Sstevel@tonic-gate }
297*7c478bd9Sstevel@tonic-gate 
298*7c478bd9Sstevel@tonic-gate void *
299*7c478bd9Sstevel@tonic-gate _valloc(size_t size)
300*7c478bd9Sstevel@tonic-gate {
301*7c478bd9Sstevel@tonic-gate 	static unsigned pagesize;
302*7c478bd9Sstevel@tonic-gate 	if (size == 0)
303*7c478bd9Sstevel@tonic-gate 		return (NULL);
304*7c478bd9Sstevel@tonic-gate 
305*7c478bd9Sstevel@tonic-gate 	if (!pagesize)
306*7c478bd9Sstevel@tonic-gate 		pagesize = sysconf(_SC_PAGESIZE);
307*7c478bd9Sstevel@tonic-gate 
308*7c478bd9Sstevel@tonic-gate 	return (memalign(pagesize, size));
309*7c478bd9Sstevel@tonic-gate }
310*7c478bd9Sstevel@tonic-gate 
311*7c478bd9Sstevel@tonic-gate /*
312*7c478bd9Sstevel@tonic-gate  * malloc_unlocked(nbytes, nosmall) - Do the real work for malloc
313*7c478bd9Sstevel@tonic-gate  */
314*7c478bd9Sstevel@tonic-gate 
315*7c478bd9Sstevel@tonic-gate static void *
316*7c478bd9Sstevel@tonic-gate malloc_unlocked(size_t nbytes, int nosmall)
317*7c478bd9Sstevel@tonic-gate {
318*7c478bd9Sstevel@tonic-gate 	struct header *blk;
319*7c478bd9Sstevel@tonic-gate 	size_t nb;	/* size of entire block we need */
320*7c478bd9Sstevel@tonic-gate 
321*7c478bd9Sstevel@tonic-gate 	/* on first call, initialize */
322*7c478bd9Sstevel@tonic-gate 	if (freeptr[0].nextfree == GROUND) {
323*7c478bd9Sstevel@tonic-gate 		/* initialize arena */
324*7c478bd9Sstevel@tonic-gate 		arena[1].nextblk = (struct header *)BUSY;
325*7c478bd9Sstevel@tonic-gate 		arena[0].nextblk = (struct header *)BUSY;
326*7c478bd9Sstevel@tonic-gate 		lastblk = arenaend = &(arena[1]);
327*7c478bd9Sstevel@tonic-gate 		/* initialize free queue */
328*7c478bd9Sstevel@tonic-gate 		freeptr[0].nextfree = &(freeptr[1]);
329*7c478bd9Sstevel@tonic-gate 		freeptr[1].nextblk = &(arena[0]);
330*7c478bd9Sstevel@tonic-gate 		freeptr[1].prevfree = &(freeptr[0]);
331*7c478bd9Sstevel@tonic-gate 		/* mark that small blocks not init yet */
332*7c478bd9Sstevel@tonic-gate 	}
333*7c478bd9Sstevel@tonic-gate 	if (nbytes == 0)
334*7c478bd9Sstevel@tonic-gate 		return (NULL);
335*7c478bd9Sstevel@tonic-gate 
336*7c478bd9Sstevel@tonic-gate 	if (nbytes <= maxfast && !nosmall) {
337*7c478bd9Sstevel@tonic-gate 		/*
338*7c478bd9Sstevel@tonic-gate 		 * We can allocate out of a holding block
339*7c478bd9Sstevel@tonic-gate 		 */
340*7c478bd9Sstevel@tonic-gate 		struct holdblk *holdblk; /* head of right sized queue */
341*7c478bd9Sstevel@tonic-gate 		struct lblk *lblk;	 /* pointer to a little block */
342*7c478bd9Sstevel@tonic-gate 		struct holdblk *newhold;
343*7c478bd9Sstevel@tonic-gate 
344*7c478bd9Sstevel@tonic-gate 		if (!change) {
345*7c478bd9Sstevel@tonic-gate 			int i;
346*7c478bd9Sstevel@tonic-gate 			/*
347*7c478bd9Sstevel@tonic-gate 			 * This allocates space for hold block
348*7c478bd9Sstevel@tonic-gate 			 * pointers by calling malloc recursively.
349*7c478bd9Sstevel@tonic-gate 			 * Maxfast is temporarily set to 0, to
350*7c478bd9Sstevel@tonic-gate 			 * avoid infinite recursion.  allocate
351*7c478bd9Sstevel@tonic-gate 			 * space for an extra ptr so that an index
352*7c478bd9Sstevel@tonic-gate 			 * is just ->blksz/grain, with the first
353*7c478bd9Sstevel@tonic-gate 			 * ptr unused.
354*7c478bd9Sstevel@tonic-gate 			 */
355*7c478bd9Sstevel@tonic-gate 			change = 1;	/* change to algorithm params */
356*7c478bd9Sstevel@tonic-gate 					/* no longer allowed */
357*7c478bd9Sstevel@tonic-gate 			/*
358*7c478bd9Sstevel@tonic-gate 			 * temporarily alter maxfast, to avoid
359*7c478bd9Sstevel@tonic-gate 			 * infinite recursion
360*7c478bd9Sstevel@tonic-gate 			 */
361*7c478bd9Sstevel@tonic-gate 			maxfast = 0;
362*7c478bd9Sstevel@tonic-gate 			holdhead = (struct holdblk **)
363*7c478bd9Sstevel@tonic-gate 			    malloc_unlocked(sizeof (struct holdblk *) *
364*7c478bd9Sstevel@tonic-gate 			    (fastct + 1), 0);
365*7c478bd9Sstevel@tonic-gate 			if (holdhead == NULL)
366*7c478bd9Sstevel@tonic-gate 				return (malloc_unlocked(nbytes, 0));
367*7c478bd9Sstevel@tonic-gate 			for (i = 1; i <= fastct; i++) {
368*7c478bd9Sstevel@tonic-gate 				holdhead[i] = HGROUND;
369*7c478bd9Sstevel@tonic-gate 			}
370*7c478bd9Sstevel@tonic-gate 			maxfast = fastct * grain;
371*7c478bd9Sstevel@tonic-gate 		}
372*7c478bd9Sstevel@tonic-gate 		/*
373*7c478bd9Sstevel@tonic-gate 		 * Note that this uses the absolute min header size (MINHEAD)
374*7c478bd9Sstevel@tonic-gate 		 * unlike the large block case which uses minhead
375*7c478bd9Sstevel@tonic-gate 		 *
376*7c478bd9Sstevel@tonic-gate 		 * round up to nearest multiple of grain
377*7c478bd9Sstevel@tonic-gate 		 * code assumes grain is a multiple of MINHEAD
378*7c478bd9Sstevel@tonic-gate 		 */
379*7c478bd9Sstevel@tonic-gate 		/* round up to grain */
380*7c478bd9Sstevel@tonic-gate 		nb = (nbytes + grain - 1) / grain * grain;
381*7c478bd9Sstevel@tonic-gate 		holdblk = holdhead[nb / grain];
382*7c478bd9Sstevel@tonic-gate 		nb = nb + MINHEAD;
383*7c478bd9Sstevel@tonic-gate 		/*
384*7c478bd9Sstevel@tonic-gate 		 * look for space in the holding block.  Blocks with
385*7c478bd9Sstevel@tonic-gate 		 * space will be in front of those without
386*7c478bd9Sstevel@tonic-gate 		 */
387*7c478bd9Sstevel@tonic-gate 		if ((holdblk != HGROUND) && (holdblk->lfreeq != LGROUND))  {
388*7c478bd9Sstevel@tonic-gate 			/* there is space */
389*7c478bd9Sstevel@tonic-gate 			lblk = holdblk->lfreeq;
390*7c478bd9Sstevel@tonic-gate 
391*7c478bd9Sstevel@tonic-gate 			/*
392*7c478bd9Sstevel@tonic-gate 			 * Now make lfreeq point to a free block.
393*7c478bd9Sstevel@tonic-gate 			 * If lblk has been previously allocated and
394*7c478bd9Sstevel@tonic-gate 			 * freed, it has a valid pointer to use.
395*7c478bd9Sstevel@tonic-gate 			 * Otherwise, lblk is at the beginning of
396*7c478bd9Sstevel@tonic-gate 			 * the unallocated blocks at the end of
397*7c478bd9Sstevel@tonic-gate 			 * the holding block, so, if there is room, take
398*7c478bd9Sstevel@tonic-gate 			 * the next space.  If not, mark holdblk full,
399*7c478bd9Sstevel@tonic-gate 			 * and move holdblk to the end of the queue
400*7c478bd9Sstevel@tonic-gate 			 */
401*7c478bd9Sstevel@tonic-gate 			if (lblk < holdblk->unused) {
402*7c478bd9Sstevel@tonic-gate 				/* move to next holdblk, if this one full */
403*7c478bd9Sstevel@tonic-gate 				if ((holdblk->lfreeq =
404*7c478bd9Sstevel@tonic-gate 				    CLRSMAL(lblk->header.nextfree)) ==
405*7c478bd9Sstevel@tonic-gate 				    LGROUND) {
406*7c478bd9Sstevel@tonic-gate 					holdhead[(nb-MINHEAD) / grain] =
407*7c478bd9Sstevel@tonic-gate 					    holdblk->nexthblk;
408*7c478bd9Sstevel@tonic-gate 				}
409*7c478bd9Sstevel@tonic-gate 			} else if (((char *)holdblk->unused + nb) <
410*7c478bd9Sstevel@tonic-gate 			    ((char *)holdblk + HOLDSZ(nb))) {
411*7c478bd9Sstevel@tonic-gate 				holdblk->unused = (struct lblk *)
412*7c478bd9Sstevel@tonic-gate 				    ((char *)holdblk->unused+nb);
413*7c478bd9Sstevel@tonic-gate 				holdblk->lfreeq = holdblk->unused;
414*7c478bd9Sstevel@tonic-gate 			} else {
415*7c478bd9Sstevel@tonic-gate 				holdblk->unused = (struct lblk *)
416*7c478bd9Sstevel@tonic-gate 				    ((char *)holdblk->unused+nb);
417*7c478bd9Sstevel@tonic-gate 				holdblk->lfreeq = LGROUND;
418*7c478bd9Sstevel@tonic-gate 				holdhead[(nb-MINHEAD)/grain] =
419*7c478bd9Sstevel@tonic-gate 				    holdblk->nexthblk;
420*7c478bd9Sstevel@tonic-gate 			}
421*7c478bd9Sstevel@tonic-gate 			/* mark as busy and small */
422*7c478bd9Sstevel@tonic-gate 			lblk->header.holder = (struct holdblk *)SETALL(holdblk);
423*7c478bd9Sstevel@tonic-gate 		} else {
424*7c478bd9Sstevel@tonic-gate 			/* we need a new holding block */
425*7c478bd9Sstevel@tonic-gate 			newhold = (struct holdblk *)
426*7c478bd9Sstevel@tonic-gate 			    malloc_unlocked(HOLDSZ(nb), 0);
427*7c478bd9Sstevel@tonic-gate 			if ((char *)newhold == NULL) {
428*7c478bd9Sstevel@tonic-gate 				return (NULL);
429*7c478bd9Sstevel@tonic-gate 			}
430*7c478bd9Sstevel@tonic-gate 			/* add to head of free queue */
431*7c478bd9Sstevel@tonic-gate 			if (holdblk != HGROUND) {
432*7c478bd9Sstevel@tonic-gate 				newhold->nexthblk = holdblk;
433*7c478bd9Sstevel@tonic-gate 				newhold->prevhblk = holdblk->prevhblk;
434*7c478bd9Sstevel@tonic-gate 				holdblk->prevhblk = newhold;
435*7c478bd9Sstevel@tonic-gate 				newhold->prevhblk->nexthblk = newhold;
436*7c478bd9Sstevel@tonic-gate 			} else {
437*7c478bd9Sstevel@tonic-gate 				newhold->nexthblk = newhold->prevhblk = newhold;
438*7c478bd9Sstevel@tonic-gate 			}
439*7c478bd9Sstevel@tonic-gate 			holdhead[(nb-MINHEAD)/grain] = newhold;
440*7c478bd9Sstevel@tonic-gate 			/* set up newhold */
441*7c478bd9Sstevel@tonic-gate 			lblk = (struct lblk *)(newhold->space);
442*7c478bd9Sstevel@tonic-gate 			newhold->lfreeq = newhold->unused =
443*7c478bd9Sstevel@tonic-gate 			    (struct lblk *)((char *)newhold->space+nb);
444*7c478bd9Sstevel@tonic-gate 			lblk->header.holder = (struct holdblk *)SETALL(newhold);
445*7c478bd9Sstevel@tonic-gate 			newhold->blksz = nb-MINHEAD;
446*7c478bd9Sstevel@tonic-gate 		}
447*7c478bd9Sstevel@tonic-gate #ifdef debug
448*7c478bd9Sstevel@tonic-gate 		assert(((struct holdblk *)CLRALL(lblk->header.holder))->blksz >=
449*7c478bd9Sstevel@tonic-gate 		    nbytes);
450*7c478bd9Sstevel@tonic-gate #endif /* debug */
451*7c478bd9Sstevel@tonic-gate 		return ((char *)lblk + MINHEAD);
452*7c478bd9Sstevel@tonic-gate 	} else {
453*7c478bd9Sstevel@tonic-gate 		/*
454*7c478bd9Sstevel@tonic-gate 		 * We need an ordinary block
455*7c478bd9Sstevel@tonic-gate 		 */
456*7c478bd9Sstevel@tonic-gate 		struct header *newblk;	/* used for creating a block */
457*7c478bd9Sstevel@tonic-gate 
458*7c478bd9Sstevel@tonic-gate 		/* get number of bytes we need */
459*7c478bd9Sstevel@tonic-gate 		nb = nbytes + minhead;
460*7c478bd9Sstevel@tonic-gate 		nb = (nb + ALIGNSZ - 1) / ALIGNSZ * ALIGNSZ;	/* align */
461*7c478bd9Sstevel@tonic-gate 		nb = (nb > MINBLKSZ) ? nb : MINBLKSZ;
462*7c478bd9Sstevel@tonic-gate 		/*
463*7c478bd9Sstevel@tonic-gate 		 * see if there is a big enough block
464*7c478bd9Sstevel@tonic-gate 		 * If none exists, you will get to freeptr[1].
465*7c478bd9Sstevel@tonic-gate 		 * freeptr[1].next = &arena[0], so when you do the test,
466*7c478bd9Sstevel@tonic-gate 		 * the result is a large positive number, since arena[0]
467*7c478bd9Sstevel@tonic-gate 		 * comes before all blocks.  Arena[0] is marked busy so
468*7c478bd9Sstevel@tonic-gate 		 * that it will not be compacted.  This kludge is for the
469*7c478bd9Sstevel@tonic-gate 		 * sake of the almighty efficiency.
470*7c478bd9Sstevel@tonic-gate 		 */
471*7c478bd9Sstevel@tonic-gate 		/* check that a very large request won't cause an inf. loop */
472*7c478bd9Sstevel@tonic-gate 
473*7c478bd9Sstevel@tonic-gate 		if ((freeptr[1].nextblk-&(freeptr[1])) < nb) {
474*7c478bd9Sstevel@tonic-gate 			return (NULL);
475*7c478bd9Sstevel@tonic-gate 		} else {
476*7c478bd9Sstevel@tonic-gate 			struct header *next;		/* following block */
477*7c478bd9Sstevel@tonic-gate 			struct header *nextnext;	/* block after next */
478*7c478bd9Sstevel@tonic-gate 
479*7c478bd9Sstevel@tonic-gate 			blk = freeptr;
480*7c478bd9Sstevel@tonic-gate 			do {
481*7c478bd9Sstevel@tonic-gate 				blk = blk->nextfree;
482*7c478bd9Sstevel@tonic-gate 				/* see if we can compact */
483*7c478bd9Sstevel@tonic-gate 				next = blk->nextblk;
484*7c478bd9Sstevel@tonic-gate 				if (!TESTBUSY(nextnext = next->nextblk)) {
485*7c478bd9Sstevel@tonic-gate 					do {
486*7c478bd9Sstevel@tonic-gate 						DELFREEQ(next);
487*7c478bd9Sstevel@tonic-gate 						next = nextnext;
488*7c478bd9Sstevel@tonic-gate 						nextnext = next->nextblk;
489*7c478bd9Sstevel@tonic-gate 					} while (!TESTBUSY(nextnext));
490*7c478bd9Sstevel@tonic-gate 					/*
491*7c478bd9Sstevel@tonic-gate 					 * next will be at most == to lastblk,
492*7c478bd9Sstevel@tonic-gate 					 * but I think the >= test is faster
493*7c478bd9Sstevel@tonic-gate 					 */
494*7c478bd9Sstevel@tonic-gate 					if (next >= arenaend)
495*7c478bd9Sstevel@tonic-gate 						lastblk = blk;
496*7c478bd9Sstevel@tonic-gate 					blk->nextblk = next;
497*7c478bd9Sstevel@tonic-gate 				}
498*7c478bd9Sstevel@tonic-gate 			} while (((char *)(next) - (char *)blk) < nb);
499*7c478bd9Sstevel@tonic-gate 		}
500*7c478bd9Sstevel@tonic-gate 		/*
501*7c478bd9Sstevel@tonic-gate 		 * if we didn't find a block, get more memory
502*7c478bd9Sstevel@tonic-gate 		 */
503*7c478bd9Sstevel@tonic-gate 		if (blk == &(freeptr[1])) {
504*7c478bd9Sstevel@tonic-gate 			/*
505*7c478bd9Sstevel@tonic-gate 			 * careful coding could likely replace
506*7c478bd9Sstevel@tonic-gate 			 * newend with arenaend
507*7c478bd9Sstevel@tonic-gate 			 */
508*7c478bd9Sstevel@tonic-gate 			struct header *newend;	/* new end of arena */
509*7c478bd9Sstevel@tonic-gate 			ssize_t nget;	/* number of words to get */
510*7c478bd9Sstevel@tonic-gate 
511*7c478bd9Sstevel@tonic-gate 			/*
512*7c478bd9Sstevel@tonic-gate 			 * Three cases - 1. There is space between arenaend
513*7c478bd9Sstevel@tonic-gate 			 *		    and the break value that will become
514*7c478bd9Sstevel@tonic-gate 			 *		    a permanently allocated block.
515*7c478bd9Sstevel@tonic-gate 			 *		 2. Case 1 is not true, and the last
516*7c478bd9Sstevel@tonic-gate 			 *		    block is allocated.
517*7c478bd9Sstevel@tonic-gate 			 *		 3. Case 1 is not true, and the last
518*7c478bd9Sstevel@tonic-gate 			 *		    block is free
519*7c478bd9Sstevel@tonic-gate 			 */
520*7c478bd9Sstevel@tonic-gate 			if ((newblk = (struct header *)sbrk(0)) !=
521*7c478bd9Sstevel@tonic-gate 			    (struct header *)((char *)arenaend + HEADSZ)) {
522*7c478bd9Sstevel@tonic-gate 				/* case 1 */
523*7c478bd9Sstevel@tonic-gate #ifdef debug
524*7c478bd9Sstevel@tonic-gate 				if (case1count++ > 0)
525*7c478bd9Sstevel@tonic-gate 				    (void) write(2, "Case 1 hit more that once."
526*7c478bd9Sstevel@tonic-gate 					" brk or sbrk?\n", 41);
527*7c478bd9Sstevel@tonic-gate #endif
528*7c478bd9Sstevel@tonic-gate 				/* get size to fetch */
529*7c478bd9Sstevel@tonic-gate 				nget = nb + HEADSZ;
530*7c478bd9Sstevel@tonic-gate 				/* round up to a block */
531*7c478bd9Sstevel@tonic-gate 				nget = (nget + BLOCKSZ - 1)/BLOCKSZ * BLOCKSZ;
532*7c478bd9Sstevel@tonic-gate 				assert((uintptr_t)newblk % ALIGNSZ == 0);
533*7c478bd9Sstevel@tonic-gate 				/* get memory */
534*7c478bd9Sstevel@tonic-gate 				if (morecore(nget) == (void *)-1)
535*7c478bd9Sstevel@tonic-gate 					return (NULL);
536*7c478bd9Sstevel@tonic-gate 				/* add to arena */
537*7c478bd9Sstevel@tonic-gate 				newend = (struct header *)((char *)newblk + nget
538*7c478bd9Sstevel@tonic-gate 				    - HEADSZ);
539*7c478bd9Sstevel@tonic-gate 				assert((uintptr_t)newblk % ALIGNSZ == 0);
540*7c478bd9Sstevel@tonic-gate 				newend->nextblk = SETBUSY(&(arena[1]));
541*7c478bd9Sstevel@tonic-gate /* ???  newblk ?? */
542*7c478bd9Sstevel@tonic-gate 				newblk->nextblk = newend;
543*7c478bd9Sstevel@tonic-gate 
544*7c478bd9Sstevel@tonic-gate 				/*
545*7c478bd9Sstevel@tonic-gate 				 * space becomes a permanently allocated block.
546*7c478bd9Sstevel@tonic-gate 				 * This is likely not mt-safe as lock is not
547*7c478bd9Sstevel@tonic-gate 				 * shared with brk or sbrk
548*7c478bd9Sstevel@tonic-gate 				 */
549*7c478bd9Sstevel@tonic-gate 				arenaend->nextblk = SETBUSY(newblk);
550*7c478bd9Sstevel@tonic-gate 				/* adjust other pointers */
551*7c478bd9Sstevel@tonic-gate 				arenaend = newend;
552*7c478bd9Sstevel@tonic-gate 				lastblk = newblk;
553*7c478bd9Sstevel@tonic-gate 				blk = newblk;
554*7c478bd9Sstevel@tonic-gate 			} else if (TESTBUSY(lastblk->nextblk)) {
555*7c478bd9Sstevel@tonic-gate 				/* case 2 */
556*7c478bd9Sstevel@tonic-gate 				nget = (nb + BLOCKSZ - 1) / BLOCKSZ * BLOCKSZ;
557*7c478bd9Sstevel@tonic-gate 				if (morecore(nget) == (void *)-1)
558*7c478bd9Sstevel@tonic-gate 					return (NULL);
559*7c478bd9Sstevel@tonic-gate 				/* block must be word aligned */
560*7c478bd9Sstevel@tonic-gate 				assert(((uintptr_t)newblk%ALIGNSZ) == 0);
561*7c478bd9Sstevel@tonic-gate 				/*
562*7c478bd9Sstevel@tonic-gate 				 * stub at old arenaend becomes first word
563*7c478bd9Sstevel@tonic-gate 				 * in blk
564*7c478bd9Sstevel@tonic-gate 				 */
565*7c478bd9Sstevel@tonic-gate /* ???  	newblk = arenaend; */
566*7c478bd9Sstevel@tonic-gate 
567*7c478bd9Sstevel@tonic-gate 				newend =
568*7c478bd9Sstevel@tonic-gate 				    (struct header *)((char *)arenaend+nget);
569*7c478bd9Sstevel@tonic-gate 				newend->nextblk = SETBUSY(&(arena[1]));
570*7c478bd9Sstevel@tonic-gate 				arenaend->nextblk = newend;
571*7c478bd9Sstevel@tonic-gate 				lastblk = blk = arenaend;
572*7c478bd9Sstevel@tonic-gate 				arenaend = newend;
573*7c478bd9Sstevel@tonic-gate 			} else {
574*7c478bd9Sstevel@tonic-gate 				/* case 3 */
575*7c478bd9Sstevel@tonic-gate 				/*
576*7c478bd9Sstevel@tonic-gate 				 * last block in arena is at end of memory and
577*7c478bd9Sstevel@tonic-gate 				 * is free
578*7c478bd9Sstevel@tonic-gate 				 */
579*7c478bd9Sstevel@tonic-gate 				/* 1.7 had this backward without cast */
580*7c478bd9Sstevel@tonic-gate 				nget = nb -
581*7c478bd9Sstevel@tonic-gate 				    ((char *)arenaend - (char *)lastblk);
582*7c478bd9Sstevel@tonic-gate 				nget = (nget + (BLOCKSZ - 1)) /
583*7c478bd9Sstevel@tonic-gate 				    BLOCKSZ * BLOCKSZ;
584*7c478bd9Sstevel@tonic-gate 				assert(((uintptr_t)newblk % ALIGNSZ) == 0);
585*7c478bd9Sstevel@tonic-gate 				if (morecore(nget) == (void *)-1)
586*7c478bd9Sstevel@tonic-gate 					return (NULL);
587*7c478bd9Sstevel@tonic-gate 				/* combine with last block, put in arena */
588*7c478bd9Sstevel@tonic-gate 				newend = (struct header *)
589*7c478bd9Sstevel@tonic-gate 				    ((char *)arenaend + nget);
590*7c478bd9Sstevel@tonic-gate 				arenaend = lastblk->nextblk = newend;
591*7c478bd9Sstevel@tonic-gate 				newend->nextblk = SETBUSY(&(arena[1]));
592*7c478bd9Sstevel@tonic-gate 				/* set which block to use */
593*7c478bd9Sstevel@tonic-gate 				blk = lastblk;
594*7c478bd9Sstevel@tonic-gate 				DELFREEQ(blk);
595*7c478bd9Sstevel@tonic-gate 			}
596*7c478bd9Sstevel@tonic-gate 		} else {
597*7c478bd9Sstevel@tonic-gate 			struct header *nblk;	/* next block */
598*7c478bd9Sstevel@tonic-gate 
599*7c478bd9Sstevel@tonic-gate 			/* take block found of free queue */
600*7c478bd9Sstevel@tonic-gate 			DELFREEQ(blk);
601*7c478bd9Sstevel@tonic-gate 			/*
602*7c478bd9Sstevel@tonic-gate 			 * make head of free queue immediately follow blk,
603*7c478bd9Sstevel@tonic-gate 			 * unless blk was at the end of the queue
604*7c478bd9Sstevel@tonic-gate 			 */
605*7c478bd9Sstevel@tonic-gate 			nblk = blk->nextfree;
606*7c478bd9Sstevel@tonic-gate 			if (nblk != &(freeptr[1])) {
607*7c478bd9Sstevel@tonic-gate 				MOVEHEAD(nblk);
608*7c478bd9Sstevel@tonic-gate 			}
609*7c478bd9Sstevel@tonic-gate 		}
610*7c478bd9Sstevel@tonic-gate 		/* blk now points to an adequate block */
611*7c478bd9Sstevel@tonic-gate 		if (((char *)blk->nextblk - (char *)blk) - nb >= MINBLKSZ) {
612*7c478bd9Sstevel@tonic-gate 			/* carve out the right size block */
613*7c478bd9Sstevel@tonic-gate 			/* newblk will be the remainder */
614*7c478bd9Sstevel@tonic-gate 			newblk = (struct header *)((char *)blk + nb);
615*7c478bd9Sstevel@tonic-gate 			newblk->nextblk = blk->nextblk;
616*7c478bd9Sstevel@tonic-gate 			/* mark the block busy */
617*7c478bd9Sstevel@tonic-gate 			blk->nextblk = SETBUSY(newblk);
618*7c478bd9Sstevel@tonic-gate 			ADDFREEQ(newblk);
619*7c478bd9Sstevel@tonic-gate 			/* if blk was lastblk, make newblk lastblk */
620*7c478bd9Sstevel@tonic-gate 			if (blk == lastblk)
621*7c478bd9Sstevel@tonic-gate 				lastblk = newblk;
622*7c478bd9Sstevel@tonic-gate 		} else {
623*7c478bd9Sstevel@tonic-gate 			/* just mark the block busy */
624*7c478bd9Sstevel@tonic-gate 			blk->nextblk = SETBUSY(blk->nextblk);
625*7c478bd9Sstevel@tonic-gate 		}
626*7c478bd9Sstevel@tonic-gate 	}
627*7c478bd9Sstevel@tonic-gate 	CHECKQ
628*7c478bd9Sstevel@tonic-gate 	assert((char *)CLRALL(blk->nextblk) -
629*7c478bd9Sstevel@tonic-gate 	    ((char *)blk + minhead) >= nbytes);
630*7c478bd9Sstevel@tonic-gate 	assert((char *)CLRALL(blk->nextblk) -
631*7c478bd9Sstevel@tonic-gate 	    ((char *)blk + minhead) < nbytes + MINBLKSZ);
632*7c478bd9Sstevel@tonic-gate 	return ((char *)blk + minhead);
633*7c478bd9Sstevel@tonic-gate }
634*7c478bd9Sstevel@tonic-gate 
635*7c478bd9Sstevel@tonic-gate /*
636*7c478bd9Sstevel@tonic-gate  * free(ptr) - free block that user thinks starts at ptr
637*7c478bd9Sstevel@tonic-gate  *
638*7c478bd9Sstevel@tonic-gate  *	input - ptr-1 contains the block header.
639*7c478bd9Sstevel@tonic-gate  *		If the header points forward, we have a normal
640*7c478bd9Sstevel@tonic-gate  *			block pointing to the next block
641*7c478bd9Sstevel@tonic-gate  *		if the header points backward, we have a small
642*7c478bd9Sstevel@tonic-gate  *			block from a holding block.
643*7c478bd9Sstevel@tonic-gate  *		In both cases, the busy bit must be set
644*7c478bd9Sstevel@tonic-gate  */
645*7c478bd9Sstevel@tonic-gate 
646*7c478bd9Sstevel@tonic-gate void
647*7c478bd9Sstevel@tonic-gate free(void *ptr)
648*7c478bd9Sstevel@tonic-gate {
649*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
650*7c478bd9Sstevel@tonic-gate 	free_unlocked(ptr);
651*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
652*7c478bd9Sstevel@tonic-gate }
653*7c478bd9Sstevel@tonic-gate 
654*7c478bd9Sstevel@tonic-gate /*
655*7c478bd9Sstevel@tonic-gate  * free_unlocked(ptr) - Do the real work for free()
656*7c478bd9Sstevel@tonic-gate  */
657*7c478bd9Sstevel@tonic-gate 
658*7c478bd9Sstevel@tonic-gate void
659*7c478bd9Sstevel@tonic-gate free_unlocked(void *ptr)
660*7c478bd9Sstevel@tonic-gate {
661*7c478bd9Sstevel@tonic-gate 	struct holdblk *holdblk;	/* block holding blk */
662*7c478bd9Sstevel@tonic-gate 	struct holdblk *oldhead;	/* former head of the hold block */
663*7c478bd9Sstevel@tonic-gate 					/* queue containing blk's holder */
664*7c478bd9Sstevel@tonic-gate 
665*7c478bd9Sstevel@tonic-gate 	if (ptr == NULL)
666*7c478bd9Sstevel@tonic-gate 		return;
667*7c478bd9Sstevel@tonic-gate 	if (TESTSMAL(((struct header *)((char *)ptr - MINHEAD))->nextblk)) {
668*7c478bd9Sstevel@tonic-gate 		struct lblk	*lblk;	/* pointer to freed block */
669*7c478bd9Sstevel@tonic-gate 		ssize_t		offset;	/* choice of header lists */
670*7c478bd9Sstevel@tonic-gate 
671*7c478bd9Sstevel@tonic-gate 		lblk = (struct lblk *)CLRBUSY((char *)ptr - MINHEAD);
672*7c478bd9Sstevel@tonic-gate 		assert((struct header *)lblk < arenaend);
673*7c478bd9Sstevel@tonic-gate 		assert((struct header *)lblk > arena);
674*7c478bd9Sstevel@tonic-gate 		/* allow twits (e.g. awk) to free a block twice */
675*7c478bd9Sstevel@tonic-gate 		holdblk = lblk->header.holder;
676*7c478bd9Sstevel@tonic-gate 		if (!TESTBUSY(holdblk))
677*7c478bd9Sstevel@tonic-gate 			return;
678*7c478bd9Sstevel@tonic-gate 		holdblk = (struct holdblk *)CLRALL(holdblk);
679*7c478bd9Sstevel@tonic-gate 		/* put lblk on its hold block's free list */
680*7c478bd9Sstevel@tonic-gate 		lblk->header.nextfree = SETSMAL(holdblk->lfreeq);
681*7c478bd9Sstevel@tonic-gate 		holdblk->lfreeq = lblk;
682*7c478bd9Sstevel@tonic-gate 		/* move holdblk to head of queue, if its not already there */
683*7c478bd9Sstevel@tonic-gate 		offset = holdblk->blksz / grain;
684*7c478bd9Sstevel@tonic-gate 		oldhead = holdhead[offset];
685*7c478bd9Sstevel@tonic-gate 		if (oldhead != holdblk) {
686*7c478bd9Sstevel@tonic-gate 			/* first take out of current spot */
687*7c478bd9Sstevel@tonic-gate 			holdhead[offset] = holdblk;
688*7c478bd9Sstevel@tonic-gate 			holdblk->nexthblk->prevhblk = holdblk->prevhblk;
689*7c478bd9Sstevel@tonic-gate 			holdblk->prevhblk->nexthblk = holdblk->nexthblk;
690*7c478bd9Sstevel@tonic-gate 			/* now add at front */
691*7c478bd9Sstevel@tonic-gate 			holdblk->nexthblk = oldhead;
692*7c478bd9Sstevel@tonic-gate 			holdblk->prevhblk = oldhead->prevhblk;
693*7c478bd9Sstevel@tonic-gate 			oldhead->prevhblk = holdblk;
694*7c478bd9Sstevel@tonic-gate 			holdblk->prevhblk->nexthblk = holdblk;
695*7c478bd9Sstevel@tonic-gate 		}
696*7c478bd9Sstevel@tonic-gate 	} else {
697*7c478bd9Sstevel@tonic-gate 		struct header *blk;	/* real start of block */
698*7c478bd9Sstevel@tonic-gate 		struct header *next;	/* next = blk->nextblk */
699*7c478bd9Sstevel@tonic-gate 		struct header *nextnext;	/* block after next */
700*7c478bd9Sstevel@tonic-gate 
701*7c478bd9Sstevel@tonic-gate 		blk = (struct header *)((char *)ptr - minhead);
702*7c478bd9Sstevel@tonic-gate 		next = blk->nextblk;
703*7c478bd9Sstevel@tonic-gate 		/* take care of twits (e.g. awk) who return blocks twice */
704*7c478bd9Sstevel@tonic-gate 		if (!TESTBUSY(next))
705*7c478bd9Sstevel@tonic-gate 			return;
706*7c478bd9Sstevel@tonic-gate 		blk->nextblk = next = CLRBUSY(next);
707*7c478bd9Sstevel@tonic-gate 		ADDFREEQ(blk);
708*7c478bd9Sstevel@tonic-gate 		/* see if we can compact */
709*7c478bd9Sstevel@tonic-gate 		if (!TESTBUSY(nextnext = next->nextblk)) {
710*7c478bd9Sstevel@tonic-gate 			do {
711*7c478bd9Sstevel@tonic-gate 				DELFREEQ(next);
712*7c478bd9Sstevel@tonic-gate 				next = nextnext;
713*7c478bd9Sstevel@tonic-gate 			} while (!TESTBUSY(nextnext = next->nextblk));
714*7c478bd9Sstevel@tonic-gate 			if (next == arenaend) lastblk = blk;
715*7c478bd9Sstevel@tonic-gate 			blk->nextblk = next;
716*7c478bd9Sstevel@tonic-gate 		}
717*7c478bd9Sstevel@tonic-gate 	}
718*7c478bd9Sstevel@tonic-gate 	CHECKQ
719*7c478bd9Sstevel@tonic-gate }
720*7c478bd9Sstevel@tonic-gate 
721*7c478bd9Sstevel@tonic-gate 
722*7c478bd9Sstevel@tonic-gate /*
723*7c478bd9Sstevel@tonic-gate  * realloc(ptr, size) - give the user a block of size "size", with
724*7c478bd9Sstevel@tonic-gate  *			    the contents pointed to by ptr.  Free ptr.
725*7c478bd9Sstevel@tonic-gate  */
726*7c478bd9Sstevel@tonic-gate 
727*7c478bd9Sstevel@tonic-gate void *
728*7c478bd9Sstevel@tonic-gate realloc(void *ptr, size_t size)
729*7c478bd9Sstevel@tonic-gate {
730*7c478bd9Sstevel@tonic-gate 	void	*retval;
731*7c478bd9Sstevel@tonic-gate 
732*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
733*7c478bd9Sstevel@tonic-gate 	retval = realloc_unlocked(ptr, size);
734*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
735*7c478bd9Sstevel@tonic-gate 	return (retval);
736*7c478bd9Sstevel@tonic-gate }
737*7c478bd9Sstevel@tonic-gate 
738*7c478bd9Sstevel@tonic-gate 
739*7c478bd9Sstevel@tonic-gate /*
740*7c478bd9Sstevel@tonic-gate  * realloc_unlocked(ptr) - Do the real work for realloc()
741*7c478bd9Sstevel@tonic-gate  */
742*7c478bd9Sstevel@tonic-gate 
743*7c478bd9Sstevel@tonic-gate static void *
744*7c478bd9Sstevel@tonic-gate realloc_unlocked(void *ptr, size_t size)
745*7c478bd9Sstevel@tonic-gate {
746*7c478bd9Sstevel@tonic-gate 	struct header *blk;	/* block ptr is contained in */
747*7c478bd9Sstevel@tonic-gate 	size_t trusize;	/* block size as allocater sees it */
748*7c478bd9Sstevel@tonic-gate 	char *newptr;			/* pointer to user's new block */
749*7c478bd9Sstevel@tonic-gate 	size_t cpysize;	/* amount to copy */
750*7c478bd9Sstevel@tonic-gate 	struct header *next;	/* block after blk */
751*7c478bd9Sstevel@tonic-gate 
752*7c478bd9Sstevel@tonic-gate 	if (ptr == NULL)
753*7c478bd9Sstevel@tonic-gate 		return (malloc_unlocked(size, 0));
754*7c478bd9Sstevel@tonic-gate 
755*7c478bd9Sstevel@tonic-gate 	if (size == 0) {
756*7c478bd9Sstevel@tonic-gate 		free_unlocked(ptr);
757*7c478bd9Sstevel@tonic-gate 		return (NULL);
758*7c478bd9Sstevel@tonic-gate 	}
759*7c478bd9Sstevel@tonic-gate 
760*7c478bd9Sstevel@tonic-gate 	if (TESTSMAL(((struct lblk *)((char *)ptr - MINHEAD))->
761*7c478bd9Sstevel@tonic-gate 	    header.holder)) {
762*7c478bd9Sstevel@tonic-gate 		/*
763*7c478bd9Sstevel@tonic-gate 		 * we have a special small block which can't be expanded
764*7c478bd9Sstevel@tonic-gate 		 *
765*7c478bd9Sstevel@tonic-gate 		 * This makes the assumption that even if the user is
766*7c478bd9Sstevel@tonic-gate 		 * reallocating a free block, malloc doesn't alter the contents
767*7c478bd9Sstevel@tonic-gate 		 * of small blocks
768*7c478bd9Sstevel@tonic-gate 		 */
769*7c478bd9Sstevel@tonic-gate 		newptr = malloc_unlocked(size, 0);
770*7c478bd9Sstevel@tonic-gate 		if (newptr == NULL)
771*7c478bd9Sstevel@tonic-gate 			return (NULL);
772*7c478bd9Sstevel@tonic-gate 		/* this isn't to save time--its to protect the twits */
773*7c478bd9Sstevel@tonic-gate 		if ((char *)ptr != newptr) {
774*7c478bd9Sstevel@tonic-gate 			struct lblk *lblk;
775*7c478bd9Sstevel@tonic-gate 			lblk = (struct lblk *)((char *)ptr - MINHEAD);
776*7c478bd9Sstevel@tonic-gate 			cpysize = ((struct holdblk *)
777*7c478bd9Sstevel@tonic-gate 			    CLRALL(lblk->header.holder))->blksz;
778*7c478bd9Sstevel@tonic-gate 			cpysize = (size > cpysize) ? cpysize : size;
779*7c478bd9Sstevel@tonic-gate 			(void) memcpy(newptr, ptr, cpysize);
780*7c478bd9Sstevel@tonic-gate 			free_unlocked(ptr);
781*7c478bd9Sstevel@tonic-gate 		}
782*7c478bd9Sstevel@tonic-gate 	} else {
783*7c478bd9Sstevel@tonic-gate 		blk = (struct header *)((char *)ptr - minhead);
784*7c478bd9Sstevel@tonic-gate 		next = blk->nextblk;
785*7c478bd9Sstevel@tonic-gate 		/*
786*7c478bd9Sstevel@tonic-gate 		 * deal with twits who reallocate free blocks
787*7c478bd9Sstevel@tonic-gate 		 *
788*7c478bd9Sstevel@tonic-gate 		 * if they haven't reset minblk via getopt, that's
789*7c478bd9Sstevel@tonic-gate 		 * their problem
790*7c478bd9Sstevel@tonic-gate 		 */
791*7c478bd9Sstevel@tonic-gate 		if (!TESTBUSY(next)) {
792*7c478bd9Sstevel@tonic-gate 			DELFREEQ(blk);
793*7c478bd9Sstevel@tonic-gate 			blk->nextblk = SETBUSY(next);
794*7c478bd9Sstevel@tonic-gate 		}
795*7c478bd9Sstevel@tonic-gate 		next = CLRBUSY(next);
796*7c478bd9Sstevel@tonic-gate 		/* make blk as big as possible */
797*7c478bd9Sstevel@tonic-gate 		if (!TESTBUSY(next->nextblk)) {
798*7c478bd9Sstevel@tonic-gate 			do {
799*7c478bd9Sstevel@tonic-gate 				DELFREEQ(next);
800*7c478bd9Sstevel@tonic-gate 				next = next->nextblk;
801*7c478bd9Sstevel@tonic-gate 			} while (!TESTBUSY(next->nextblk));
802*7c478bd9Sstevel@tonic-gate 			blk->nextblk = SETBUSY(next);
803*7c478bd9Sstevel@tonic-gate 			if (next >= arenaend) lastblk = blk;
804*7c478bd9Sstevel@tonic-gate 		}
805*7c478bd9Sstevel@tonic-gate 		/* get size we really need */
806*7c478bd9Sstevel@tonic-gate 		trusize = size+minhead;
807*7c478bd9Sstevel@tonic-gate 		trusize = (trusize + ALIGNSZ - 1)/ALIGNSZ*ALIGNSZ;
808*7c478bd9Sstevel@tonic-gate 		trusize = (trusize >= MINBLKSZ) ? trusize : MINBLKSZ;
809*7c478bd9Sstevel@tonic-gate 		/* see if we have enough */
810*7c478bd9Sstevel@tonic-gate 		/* this isn't really the copy size, but I need a register */
811*7c478bd9Sstevel@tonic-gate 		cpysize = (char *)next - (char *)blk;
812*7c478bd9Sstevel@tonic-gate 		if (cpysize >= trusize) {
813*7c478bd9Sstevel@tonic-gate 			/* carve out the size we need */
814*7c478bd9Sstevel@tonic-gate 			struct header *newblk;	/* remainder */
815*7c478bd9Sstevel@tonic-gate 
816*7c478bd9Sstevel@tonic-gate 			if (cpysize - trusize >= MINBLKSZ) {
817*7c478bd9Sstevel@tonic-gate 				/*
818*7c478bd9Sstevel@tonic-gate 				 * carve out the right size block
819*7c478bd9Sstevel@tonic-gate 				 * newblk will be the remainder
820*7c478bd9Sstevel@tonic-gate 				 */
821*7c478bd9Sstevel@tonic-gate 				newblk = (struct header *)((char *)blk +
822*7c478bd9Sstevel@tonic-gate 				    trusize);
823*7c478bd9Sstevel@tonic-gate 				newblk->nextblk = next;
824*7c478bd9Sstevel@tonic-gate 				blk->nextblk = SETBUSY(newblk);
825*7c478bd9Sstevel@tonic-gate 				/* at this point, next is invalid */
826*7c478bd9Sstevel@tonic-gate 				ADDFREEQ(newblk);
827*7c478bd9Sstevel@tonic-gate 				/* if blk was lastblk, make newblk lastblk */
828*7c478bd9Sstevel@tonic-gate 				if (blk == lastblk)
829*7c478bd9Sstevel@tonic-gate 					lastblk = newblk;
830*7c478bd9Sstevel@tonic-gate 			}
831*7c478bd9Sstevel@tonic-gate 			newptr = ptr;
832*7c478bd9Sstevel@tonic-gate 		} else {
833*7c478bd9Sstevel@tonic-gate 			/* bite the bullet, and call malloc */
834*7c478bd9Sstevel@tonic-gate 			cpysize = (size > cpysize) ? cpysize : size;
835*7c478bd9Sstevel@tonic-gate 			newptr = malloc_unlocked(size, 0);
836*7c478bd9Sstevel@tonic-gate 			if (newptr == NULL)
837*7c478bd9Sstevel@tonic-gate 				return (NULL);
838*7c478bd9Sstevel@tonic-gate 			(void) memcpy(newptr, ptr, cpysize);
839*7c478bd9Sstevel@tonic-gate 			free_unlocked(ptr);
840*7c478bd9Sstevel@tonic-gate 		}
841*7c478bd9Sstevel@tonic-gate 	}
842*7c478bd9Sstevel@tonic-gate 	return (newptr);
843*7c478bd9Sstevel@tonic-gate }
844*7c478bd9Sstevel@tonic-gate 
845*7c478bd9Sstevel@tonic-gate 
846*7c478bd9Sstevel@tonic-gate /* LINTLIBRARY */
847*7c478bd9Sstevel@tonic-gate /*
848*7c478bd9Sstevel@tonic-gate  * calloc - allocate and clear memory block
849*7c478bd9Sstevel@tonic-gate  */
850*7c478bd9Sstevel@tonic-gate 
851*7c478bd9Sstevel@tonic-gate void *
852*7c478bd9Sstevel@tonic-gate calloc(size_t num, size_t size)
853*7c478bd9Sstevel@tonic-gate {
854*7c478bd9Sstevel@tonic-gate 	char *mp;
855*7c478bd9Sstevel@tonic-gate 
856*7c478bd9Sstevel@tonic-gate 	num *= size;
857*7c478bd9Sstevel@tonic-gate 	mp = malloc(num);
858*7c478bd9Sstevel@tonic-gate 	if (mp == NULL)
859*7c478bd9Sstevel@tonic-gate 		return (NULL);
860*7c478bd9Sstevel@tonic-gate 	(void) memset(mp, 0, num);
861*7c478bd9Sstevel@tonic-gate 	return (mp);
862*7c478bd9Sstevel@tonic-gate }
863*7c478bd9Sstevel@tonic-gate 
864*7c478bd9Sstevel@tonic-gate 
865*7c478bd9Sstevel@tonic-gate /*
866*7c478bd9Sstevel@tonic-gate  * Mallopt - set options for allocation
867*7c478bd9Sstevel@tonic-gate  *
868*7c478bd9Sstevel@tonic-gate  *	Mallopt provides for control over the allocation algorithm.
869*7c478bd9Sstevel@tonic-gate  *	The cmds available are:
870*7c478bd9Sstevel@tonic-gate  *
871*7c478bd9Sstevel@tonic-gate  *	M_MXFAST Set maxfast to value.  Maxfast is the size of the
872*7c478bd9Sstevel@tonic-gate  *		 largest small, quickly allocated block.  Maxfast
873*7c478bd9Sstevel@tonic-gate  *		 may be set to 0 to disable fast allocation entirely.
874*7c478bd9Sstevel@tonic-gate  *
875*7c478bd9Sstevel@tonic-gate  *	M_NLBLKS Set numlblks to value.  Numlblks is the number of
876*7c478bd9Sstevel@tonic-gate  *		 small blocks per holding block.  Value must be
877*7c478bd9Sstevel@tonic-gate  *		 greater than 0.
878*7c478bd9Sstevel@tonic-gate  *
879*7c478bd9Sstevel@tonic-gate  *	M_GRAIN  Set grain to value.  The sizes of all blocks
880*7c478bd9Sstevel@tonic-gate  *		 smaller than maxfast are considered to be rounded
881*7c478bd9Sstevel@tonic-gate  *		 up to the nearest multiple of grain. The default
882*7c478bd9Sstevel@tonic-gate  *		 value of grain is the smallest number of bytes
883*7c478bd9Sstevel@tonic-gate  *		 which will allow alignment of any data type.    Grain
884*7c478bd9Sstevel@tonic-gate  *		 will be rounded up to a multiple of its default,
885*7c478bd9Sstevel@tonic-gate  *		 and maxsize will be rounded up to a multiple of
886*7c478bd9Sstevel@tonic-gate  *		 grain.  Value must be greater than 0.
887*7c478bd9Sstevel@tonic-gate  *
888*7c478bd9Sstevel@tonic-gate  *	M_KEEP   Retain data in freed block until the next malloc,
889*7c478bd9Sstevel@tonic-gate  *		 realloc, or calloc.  Value is ignored.
890*7c478bd9Sstevel@tonic-gate  *		 This option is provided only for compatibility with
891*7c478bd9Sstevel@tonic-gate  *		 the old version of malloc, and is not recommended.
892*7c478bd9Sstevel@tonic-gate  *
893*7c478bd9Sstevel@tonic-gate  *	returns - 0, upon successful completion
894*7c478bd9Sstevel@tonic-gate  *		 1, if malloc has previously been called or
895*7c478bd9Sstevel@tonic-gate  *		    if value or cmd have illegal values
896*7c478bd9Sstevel@tonic-gate  */
897*7c478bd9Sstevel@tonic-gate 
898*7c478bd9Sstevel@tonic-gate int
899*7c478bd9Sstevel@tonic-gate _mallopt(int cmd, int value)
900*7c478bd9Sstevel@tonic-gate {
901*7c478bd9Sstevel@tonic-gate 	/* disallow changes once a small block is allocated */
902*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
903*7c478bd9Sstevel@tonic-gate 	if (change) {
904*7c478bd9Sstevel@tonic-gate 		(void) mutex_unlock(&mlock);
905*7c478bd9Sstevel@tonic-gate 		return (1);
906*7c478bd9Sstevel@tonic-gate 	}
907*7c478bd9Sstevel@tonic-gate 	switch (cmd) {
908*7c478bd9Sstevel@tonic-gate 	case M_MXFAST:
909*7c478bd9Sstevel@tonic-gate 		if (value < 0) {
910*7c478bd9Sstevel@tonic-gate 			(void) mutex_unlock(&mlock);
911*7c478bd9Sstevel@tonic-gate 			return (1);
912*7c478bd9Sstevel@tonic-gate 		}
913*7c478bd9Sstevel@tonic-gate 		fastct = (value + grain - 1) / grain;
914*7c478bd9Sstevel@tonic-gate 		maxfast = grain*fastct;
915*7c478bd9Sstevel@tonic-gate 		break;
916*7c478bd9Sstevel@tonic-gate 	case M_NLBLKS:
917*7c478bd9Sstevel@tonic-gate 		if (value <= 1) {
918*7c478bd9Sstevel@tonic-gate 			(void) mutex_unlock(&mlock);
919*7c478bd9Sstevel@tonic-gate 			return (1);
920*7c478bd9Sstevel@tonic-gate 		}
921*7c478bd9Sstevel@tonic-gate 		numlblks = value;
922*7c478bd9Sstevel@tonic-gate 		break;
923*7c478bd9Sstevel@tonic-gate 	case M_GRAIN:
924*7c478bd9Sstevel@tonic-gate 		if (value <= 0) {
925*7c478bd9Sstevel@tonic-gate 			(void) mutex_unlock(&mlock);
926*7c478bd9Sstevel@tonic-gate 			return (1);
927*7c478bd9Sstevel@tonic-gate 		}
928*7c478bd9Sstevel@tonic-gate 
929*7c478bd9Sstevel@tonic-gate 		/* round grain up to a multiple of ALIGNSZ */
930*7c478bd9Sstevel@tonic-gate 		grain = (value + ALIGNSZ - 1)/ALIGNSZ*ALIGNSZ;
931*7c478bd9Sstevel@tonic-gate 
932*7c478bd9Sstevel@tonic-gate 		/* reduce fastct appropriately */
933*7c478bd9Sstevel@tonic-gate 		fastct = (maxfast + grain - 1) / grain;
934*7c478bd9Sstevel@tonic-gate 		maxfast = grain * fastct;
935*7c478bd9Sstevel@tonic-gate 		break;
936*7c478bd9Sstevel@tonic-gate 	case M_KEEP:
937*7c478bd9Sstevel@tonic-gate 		if (change && holdhead != NULL) {
938*7c478bd9Sstevel@tonic-gate 			mutex_unlock(&mlock);
939*7c478bd9Sstevel@tonic-gate 			return (1);
940*7c478bd9Sstevel@tonic-gate 		}
941*7c478bd9Sstevel@tonic-gate 		minhead = HEADSZ;
942*7c478bd9Sstevel@tonic-gate 		break;
943*7c478bd9Sstevel@tonic-gate 	default:
944*7c478bd9Sstevel@tonic-gate 		(void) mutex_unlock(&mlock);
945*7c478bd9Sstevel@tonic-gate 		return (1);
946*7c478bd9Sstevel@tonic-gate 	}
947*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
948*7c478bd9Sstevel@tonic-gate 	return (0);
949*7c478bd9Sstevel@tonic-gate }
950*7c478bd9Sstevel@tonic-gate 
951*7c478bd9Sstevel@tonic-gate /*
952*7c478bd9Sstevel@tonic-gate  * mallinfo-provide information about space usage
953*7c478bd9Sstevel@tonic-gate  *
954*7c478bd9Sstevel@tonic-gate  *	input - max; mallinfo will return the size of the
955*7c478bd9Sstevel@tonic-gate  *		largest block < max.
956*7c478bd9Sstevel@tonic-gate  *
957*7c478bd9Sstevel@tonic-gate  *	output - a structure containing a description of
958*7c478bd9Sstevel@tonic-gate  *		 of space usage, defined in malloc.h
959*7c478bd9Sstevel@tonic-gate  */
960*7c478bd9Sstevel@tonic-gate 
961*7c478bd9Sstevel@tonic-gate struct mallinfo
962*7c478bd9Sstevel@tonic-gate _mallinfo(void)
963*7c478bd9Sstevel@tonic-gate {
964*7c478bd9Sstevel@tonic-gate 	struct header *blk, *next;	/* ptr to ordinary blocks */
965*7c478bd9Sstevel@tonic-gate 	struct holdblk *hblk;		/* ptr to holding blocks */
966*7c478bd9Sstevel@tonic-gate 	struct mallinfo inf;		/* return value */
967*7c478bd9Sstevel@tonic-gate 	int	i;			/* the ubiquitous counter */
968*7c478bd9Sstevel@tonic-gate 	ssize_t size;			/* size of a block */
969*7c478bd9Sstevel@tonic-gate 	ssize_t fsp;			/* free space in 1 hold block */
970*7c478bd9Sstevel@tonic-gate 
971*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
972*7c478bd9Sstevel@tonic-gate 	(void) memset(&inf, 0, sizeof (struct mallinfo));
973*7c478bd9Sstevel@tonic-gate 	if (freeptr[0].nextfree == GROUND) {
974*7c478bd9Sstevel@tonic-gate 		(void) mutex_unlock(&mlock);
975*7c478bd9Sstevel@tonic-gate 		return (inf);
976*7c478bd9Sstevel@tonic-gate 	}
977*7c478bd9Sstevel@tonic-gate 	blk = CLRBUSY(arena[1].nextblk);
978*7c478bd9Sstevel@tonic-gate 	/* return total space used */
979*7c478bd9Sstevel@tonic-gate 	inf.arena = (char *)arenaend - (char *)blk;
980*7c478bd9Sstevel@tonic-gate 
981*7c478bd9Sstevel@tonic-gate 	/*
982*7c478bd9Sstevel@tonic-gate 	 * loop through arena, counting # of blocks, and
983*7c478bd9Sstevel@tonic-gate 	 * and space used by blocks
984*7c478bd9Sstevel@tonic-gate 	 */
985*7c478bd9Sstevel@tonic-gate 	next = CLRBUSY(blk->nextblk);
986*7c478bd9Sstevel@tonic-gate 	while (next != &(arena[1])) {
987*7c478bd9Sstevel@tonic-gate 		inf.ordblks++;
988*7c478bd9Sstevel@tonic-gate 		size = (char *)next - (char *)blk;
989*7c478bd9Sstevel@tonic-gate 		if (TESTBUSY(blk->nextblk)) {
990*7c478bd9Sstevel@tonic-gate 			inf.uordblks += size;
991*7c478bd9Sstevel@tonic-gate 			inf.keepcost += HEADSZ-MINHEAD;
992*7c478bd9Sstevel@tonic-gate 		} else {
993*7c478bd9Sstevel@tonic-gate 			inf.fordblks += size;
994*7c478bd9Sstevel@tonic-gate 		}
995*7c478bd9Sstevel@tonic-gate 		blk = next;
996*7c478bd9Sstevel@tonic-gate 		next = CLRBUSY(blk->nextblk);
997*7c478bd9Sstevel@tonic-gate 	}
998*7c478bd9Sstevel@tonic-gate 
999*7c478bd9Sstevel@tonic-gate 	/*
1000*7c478bd9Sstevel@tonic-gate 	 * if any holding block have been allocated
1001*7c478bd9Sstevel@tonic-gate 	 * then examine space in holding blks
1002*7c478bd9Sstevel@tonic-gate 	 */
1003*7c478bd9Sstevel@tonic-gate 	if (change && holdhead != NULL) {
1004*7c478bd9Sstevel@tonic-gate 		for (i = fastct; i > 0; i--) {	/* loop thru ea. chain */
1005*7c478bd9Sstevel@tonic-gate 			hblk = holdhead[i];
1006*7c478bd9Sstevel@tonic-gate 			/* do only if chain not empty */
1007*7c478bd9Sstevel@tonic-gate 			if (hblk != HGROUND) {
1008*7c478bd9Sstevel@tonic-gate 				size = hblk->blksz +
1009*7c478bd9Sstevel@tonic-gate 				    sizeof (struct lblk) - sizeof (int);
1010*7c478bd9Sstevel@tonic-gate 				do {	/* loop thru 1 hold blk chain */
1011*7c478bd9Sstevel@tonic-gate 					inf.hblks++;
1012*7c478bd9Sstevel@tonic-gate 					fsp = freespace(hblk);
1013*7c478bd9Sstevel@tonic-gate 					inf.fsmblks += fsp;
1014*7c478bd9Sstevel@tonic-gate 					inf.usmblks += numlblks*size - fsp;
1015*7c478bd9Sstevel@tonic-gate 					inf.smblks += numlblks;
1016*7c478bd9Sstevel@tonic-gate 					hblk = hblk->nexthblk;
1017*7c478bd9Sstevel@tonic-gate 				} while (hblk != holdhead[i]);
1018*7c478bd9Sstevel@tonic-gate 			}
1019*7c478bd9Sstevel@tonic-gate 		}
1020*7c478bd9Sstevel@tonic-gate 	}
1021*7c478bd9Sstevel@tonic-gate 	inf.hblkhd = (inf.smblks / numlblks) * sizeof (struct holdblk);
1022*7c478bd9Sstevel@tonic-gate 	/* holding block were counted in ordblks, so subtract off */
1023*7c478bd9Sstevel@tonic-gate 	inf.ordblks -= inf.hblks;
1024*7c478bd9Sstevel@tonic-gate 	inf.uordblks -= inf.hblkhd + inf.usmblks + inf.fsmblks;
1025*7c478bd9Sstevel@tonic-gate 	inf.keepcost -= inf.hblks*(HEADSZ - MINHEAD);
1026*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
1027*7c478bd9Sstevel@tonic-gate 	return (inf);
1028*7c478bd9Sstevel@tonic-gate }
1029*7c478bd9Sstevel@tonic-gate 
1030*7c478bd9Sstevel@tonic-gate 
1031*7c478bd9Sstevel@tonic-gate /*
1032*7c478bd9Sstevel@tonic-gate  * freespace - calc. how much space is used in the free
1033*7c478bd9Sstevel@tonic-gate  *		    small blocks in a given holding block
1034*7c478bd9Sstevel@tonic-gate  *
1035*7c478bd9Sstevel@tonic-gate  *	input - hblk = given holding block
1036*7c478bd9Sstevel@tonic-gate  *
1037*7c478bd9Sstevel@tonic-gate  *	returns space used in free small blocks of hblk
1038*7c478bd9Sstevel@tonic-gate  */
1039*7c478bd9Sstevel@tonic-gate 
1040*7c478bd9Sstevel@tonic-gate static ssize_t
1041*7c478bd9Sstevel@tonic-gate freespace(struct holdblk *holdblk)
1042*7c478bd9Sstevel@tonic-gate {
1043*7c478bd9Sstevel@tonic-gate 	struct lblk *lblk;
1044*7c478bd9Sstevel@tonic-gate 	ssize_t space = 0;
1045*7c478bd9Sstevel@tonic-gate 	ssize_t size;
1046*7c478bd9Sstevel@tonic-gate 	struct lblk *unused;
1047*7c478bd9Sstevel@tonic-gate 
1048*7c478bd9Sstevel@tonic-gate 	lblk = CLRSMAL(holdblk->lfreeq);
1049*7c478bd9Sstevel@tonic-gate 	size = holdblk->blksz + sizeof (struct lblk) - sizeof (int);
1050*7c478bd9Sstevel@tonic-gate 	unused = CLRSMAL(holdblk->unused);
1051*7c478bd9Sstevel@tonic-gate 	/* follow free chain */
1052*7c478bd9Sstevel@tonic-gate 	while ((lblk != LGROUND) && (lblk != unused)) {
1053*7c478bd9Sstevel@tonic-gate 		space += size;
1054*7c478bd9Sstevel@tonic-gate 		lblk = CLRSMAL(lblk->header.nextfree);
1055*7c478bd9Sstevel@tonic-gate 	}
1056*7c478bd9Sstevel@tonic-gate 	space += ((char *)holdblk + HOLDSZ(size)) - (char *)unused;
1057*7c478bd9Sstevel@tonic-gate 	return (space);
1058*7c478bd9Sstevel@tonic-gate }
1059*7c478bd9Sstevel@tonic-gate 
1060*7c478bd9Sstevel@tonic-gate static void *
1061*7c478bd9Sstevel@tonic-gate morecore(size_t bytes)
1062*7c478bd9Sstevel@tonic-gate {
1063*7c478bd9Sstevel@tonic-gate 	void * ret;
1064*7c478bd9Sstevel@tonic-gate 
1065*7c478bd9Sstevel@tonic-gate 	if (bytes > LONG_MAX) {
1066*7c478bd9Sstevel@tonic-gate 		intptr_t wad;
1067*7c478bd9Sstevel@tonic-gate 		/*
1068*7c478bd9Sstevel@tonic-gate 		 * The request size is too big. We need to do this in
1069*7c478bd9Sstevel@tonic-gate 		 * chunks. Sbrk only takes an int for an arg.
1070*7c478bd9Sstevel@tonic-gate 		 */
1071*7c478bd9Sstevel@tonic-gate 		if (bytes == ULONG_MAX)
1072*7c478bd9Sstevel@tonic-gate 			return ((void *)-1);
1073*7c478bd9Sstevel@tonic-gate 
1074*7c478bd9Sstevel@tonic-gate 		ret = sbrk(0);
1075*7c478bd9Sstevel@tonic-gate 		wad = LONG_MAX;
1076*7c478bd9Sstevel@tonic-gate 		while (wad > 0) {
1077*7c478bd9Sstevel@tonic-gate 			if (sbrk(wad) == (void *)-1) {
1078*7c478bd9Sstevel@tonic-gate 				if (ret != sbrk(0))
1079*7c478bd9Sstevel@tonic-gate 					(void) sbrk(-LONG_MAX);
1080*7c478bd9Sstevel@tonic-gate 				return ((void *)-1);
1081*7c478bd9Sstevel@tonic-gate 			}
1082*7c478bd9Sstevel@tonic-gate 			bytes -= LONG_MAX;
1083*7c478bd9Sstevel@tonic-gate 			wad = bytes;
1084*7c478bd9Sstevel@tonic-gate 		}
1085*7c478bd9Sstevel@tonic-gate 	} else
1086*7c478bd9Sstevel@tonic-gate 		ret = sbrk(bytes);
1087*7c478bd9Sstevel@tonic-gate 
1088*7c478bd9Sstevel@tonic-gate 	return (ret);
1089*7c478bd9Sstevel@tonic-gate }
1090*7c478bd9Sstevel@tonic-gate 
1091*7c478bd9Sstevel@tonic-gate #ifdef debug
1092*7c478bd9Sstevel@tonic-gate int
1093*7c478bd9Sstevel@tonic-gate check_arena(void)
1094*7c478bd9Sstevel@tonic-gate {
1095*7c478bd9Sstevel@tonic-gate 	struct header *blk, *prev, *next;	/* ptr to ordinary blocks */
1096*7c478bd9Sstevel@tonic-gate 
1097*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
1098*7c478bd9Sstevel@tonic-gate 	if (freeptr[0].nextfree == GROUND) {
1099*7c478bd9Sstevel@tonic-gate 		(void) mutex_unlock(&mlock);
1100*7c478bd9Sstevel@tonic-gate 		return (-1);
1101*7c478bd9Sstevel@tonic-gate 	}
1102*7c478bd9Sstevel@tonic-gate 	blk = arena + 1;
1103*7c478bd9Sstevel@tonic-gate 
1104*7c478bd9Sstevel@tonic-gate 	/* loop through arena, checking */
1105*7c478bd9Sstevel@tonic-gate 	blk = (struct header *)CLRALL(blk->nextblk);
1106*7c478bd9Sstevel@tonic-gate 	next = (struct header *)CLRALL(blk->nextblk);
1107*7c478bd9Sstevel@tonic-gate 	while (next != arena + 1) {
1108*7c478bd9Sstevel@tonic-gate 		assert(blk >= arena + 1);
1109*7c478bd9Sstevel@tonic-gate 		assert(blk <= lastblk);
1110*7c478bd9Sstevel@tonic-gate 		assert(next >= blk + 1);
1111*7c478bd9Sstevel@tonic-gate 		assert(((uintptr_t)((struct header *)blk->nextblk) &
1112*7c478bd9Sstevel@tonic-gate 		    (4 | SMAL)) == 0);
1113*7c478bd9Sstevel@tonic-gate 
1114*7c478bd9Sstevel@tonic-gate 		if (TESTBUSY(blk->nextblk) == 0) {
1115*7c478bd9Sstevel@tonic-gate 			assert(blk->nextfree >= freeptr);
1116*7c478bd9Sstevel@tonic-gate 			assert(blk->prevfree >= freeptr);
1117*7c478bd9Sstevel@tonic-gate 			assert(blk->nextfree <= lastblk);
1118*7c478bd9Sstevel@tonic-gate 			assert(blk->prevfree <= lastblk);
1119*7c478bd9Sstevel@tonic-gate 			assert(((uintptr_t)((struct header *)blk->nextfree) &
1120*7c478bd9Sstevel@tonic-gate 			    7) == 0);
1121*7c478bd9Sstevel@tonic-gate 			assert(((uintptr_t)((struct header *)blk->prevfree) &
1122*7c478bd9Sstevel@tonic-gate 			    7) == 0 || blk->prevfree == freeptr);
1123*7c478bd9Sstevel@tonic-gate 		}
1124*7c478bd9Sstevel@tonic-gate 		blk = next;
1125*7c478bd9Sstevel@tonic-gate 		next = CLRBUSY(blk->nextblk);
1126*7c478bd9Sstevel@tonic-gate 	}
1127*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
1128*7c478bd9Sstevel@tonic-gate 	return (0);
1129*7c478bd9Sstevel@tonic-gate }
1130*7c478bd9Sstevel@tonic-gate 
1131*7c478bd9Sstevel@tonic-gate #define	RSTALLOC	1
1132*7c478bd9Sstevel@tonic-gate #endif
1133*7c478bd9Sstevel@tonic-gate 
1134*7c478bd9Sstevel@tonic-gate #ifdef RSTALLOC
1135*7c478bd9Sstevel@tonic-gate /*
1136*7c478bd9Sstevel@tonic-gate  * rstalloc - reset alloc routines
1137*7c478bd9Sstevel@tonic-gate  *
1138*7c478bd9Sstevel@tonic-gate  *	description -	return allocated memory and reset
1139*7c478bd9Sstevel@tonic-gate  *			allocation pointers.
1140*7c478bd9Sstevel@tonic-gate  *
1141*7c478bd9Sstevel@tonic-gate  *	Warning - This is for debugging purposes only.
1142*7c478bd9Sstevel@tonic-gate  *		  It will return all memory allocated after
1143*7c478bd9Sstevel@tonic-gate  *		  the first call to malloc, even if some
1144*7c478bd9Sstevel@tonic-gate  *		  of it was fetched by a user's sbrk().
1145*7c478bd9Sstevel@tonic-gate  */
1146*7c478bd9Sstevel@tonic-gate 
1147*7c478bd9Sstevel@tonic-gate void
1148*7c478bd9Sstevel@tonic-gate rstalloc(void)
1149*7c478bd9Sstevel@tonic-gate {
1150*7c478bd9Sstevel@tonic-gate 	(void) mutex_lock(&mlock);
1151*7c478bd9Sstevel@tonic-gate 	minhead = MINHEAD;
1152*7c478bd9Sstevel@tonic-gate 	grain = ALIGNSZ;
1153*7c478bd9Sstevel@tonic-gate 	numlblks = NUMLBLKS;
1154*7c478bd9Sstevel@tonic-gate 	fastct = FASTCT;
1155*7c478bd9Sstevel@tonic-gate 	maxfast = MAXFAST;
1156*7c478bd9Sstevel@tonic-gate 	change = 0;
1157*7c478bd9Sstevel@tonic-gate 	if (freeptr[0].nextfree == GROUND) {
1158*7c478bd9Sstevel@tonic-gate 		(void) mutex_unlock(&mlock);
1159*7c478bd9Sstevel@tonic-gate 		return;
1160*7c478bd9Sstevel@tonic-gate 	}
1161*7c478bd9Sstevel@tonic-gate 	brk(CLRBUSY(arena[1].nextblk));
1162*7c478bd9Sstevel@tonic-gate 	freeptr[0].nextfree = GROUND;
1163*7c478bd9Sstevel@tonic-gate #ifdef debug
1164*7c478bd9Sstevel@tonic-gate 	case1count = 0;
1165*7c478bd9Sstevel@tonic-gate #endif
1166*7c478bd9Sstevel@tonic-gate 	(void) mutex_unlock(&mlock);
1167*7c478bd9Sstevel@tonic-gate }
1168*7c478bd9Sstevel@tonic-gate #endif	/* RSTALLOC */
1169*7c478bd9Sstevel@tonic-gate 
1170*7c478bd9Sstevel@tonic-gate /*
1171*7c478bd9Sstevel@tonic-gate  * cfree is an undocumented, obsolete function
1172*7c478bd9Sstevel@tonic-gate  */
1173*7c478bd9Sstevel@tonic-gate 
1174*7c478bd9Sstevel@tonic-gate /* ARGSUSED */
1175*7c478bd9Sstevel@tonic-gate void
1176*7c478bd9Sstevel@tonic-gate _cfree(char *p, unsigned num, unsigned size)
1177*7c478bd9Sstevel@tonic-gate {
1178*7c478bd9Sstevel@tonic-gate 	free(p);
1179*7c478bd9Sstevel@tonic-gate }
1180