xref: /titanic_52/usr/src/lib/libmalloc/common/malloc.c (revision 1cb6af97c6f66f456d4f726ef056e1ebc0f73305)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*	Copyright (c) 1988 AT&T	*/
23 /*	  All Rights Reserved  	*/
24 
25 
26 #pragma ident	"%Z%%M%	%I%	%E% SMI"
27 
28 /*
29  * Copyright 2005 Sun Microsystems, Inc.  All rights reserved.
30  * Use is subject to license terms.
31  */
32 
33 #pragma weak mallopt = _mallopt
34 #pragma weak mallinfo = _mallinfo
35 #pragma weak cfree = _cfree
36 #pragma weak memalign = _memalign
37 #pragma weak valloc = _valloc
38 
39 #include <sys/types.h>
40 
41 #ifndef debug
42 #define	NDEBUG
43 #endif
44 
45 #include <stdlib.h>
46 #include <string.h>
47 #include "assert.h"
48 #include "malloc.h"
49 #include "mallint.h"
50 #include <thread.h>
51 #include <synch.h>
52 #include <unistd.h>
53 #include <limits.h>
54 
55 static mutex_t mlock = DEFAULTMUTEX;
56 static ssize_t freespace(struct holdblk *);
57 static void *malloc_unlocked(size_t, int);
58 static void *realloc_unlocked(void *, size_t);
59 static void free_unlocked(void *);
60 static void *morecore(size_t);
61 
62 /*
63  * use level memory allocater (malloc, free, realloc)
64  *
65  *	-malloc, free, realloc and mallopt form a memory allocator
66  *	similar to malloc, free, and realloc.  The routines
67  *	here are much faster than the original, with slightly worse
68  *	space usage (a few percent difference on most input).  They
69  *	do not have the property that data in freed blocks is left
70  *	untouched until the space is reallocated.
71  *
72  *	-Memory is kept in the "arena", a singly linked list of blocks.
73  *	These blocks are of 3 types.
74  *		1. A free block.  This is a block not in use by the
75  *		   user.  It has a 3 word header. (See description
76  *		   of the free queue.)
77  *		2. An allocated block.  This is a block the user has
78  *		   requested.  It has only a 1 word header, pointing
79  *		   to the next block of any sort.
80  *		3. A permanently allocated block.  This covers space
81  *		   aquired by the user directly through sbrk().  It
82  *		   has a 1 word header, as does 2.
83  *	Blocks of type 1 have the lower bit of the pointer to the
84  *	nextblock = 0.  Blocks of type 2 and 3 have that bit set,
85  *	to mark them busy.
86  *
87  *	-Unallocated blocks are kept on an unsorted doubly linked
88  *	free list.
89  *
90  *	-Memory is allocated in blocks, with sizes specified by the
91  *	user.  A circular first-fit startegy is used, with a roving
92  *	head of the free queue, which prevents bunching of small
93  *	blocks at the head of the queue.
94  *
95  *	-Compaction is performed at free time of any blocks immediately
96  *	following the freed block.  The freed block will be combined
97  *	with a preceding block during the search phase of malloc.
98  *	Since a freed block is added at the front of the free queue,
99  *	which is moved to the end of the queue if considered and
100  *	rejected during the search, fragmentation only occurs if
101  *	a block with a contiguious preceding block that is free is
102  *	freed and reallocated on the next call to malloc.  The
103  *	time savings of this strategy is judged to be worth the
104  *	occasional waste of memory.
105  *
106  *	-Small blocks (of size < MAXSIZE)  are not allocated directly.
107  *	A large "holding" block is allocated via a recursive call to
108  *	malloc.  This block contains a header and ?????? small blocks.
109  *	Holding blocks for a given size of small block (rounded to the
110  *	nearest ALIGNSZ bytes) are kept on a queue with the property that any
111  *	holding block with an unused small block is in front of any without.
112  *	A list of free blocks is kept within the holding block.
113  */
114 
115 /*
116  *	description of arena, free queue, holding blocks etc.
117  *
118  * New compiler and linker does not guarentee order of initialized data.
119  * Define freeptr as arena[2-3] to guarentee it follows arena in memory.
120  * Later code depends on this order.
121  */
122 
123 static struct header arena[4] = {
124 	    {0, 0, 0},
125 	    {0, 0, 0},
126 	    {0, 0, 0},
127 	    {0, 0, 0}
128 	};
129 				/*
130 				 * the second word is a minimal block to
131 				 * start the arena. The first is a busy
132 				 * block to be pointed to by the last block.
133 				 */
134 
135 #define	freeptr (arena + 2)
136 				/* first and last entry in free list */
137 static struct header *arenaend;	/* ptr to block marking high end of arena */
138 static struct header *lastblk;	/* the highest block in the arena */
139 static struct holdblk **holdhead;   /* pointer to array of head pointers */
140 				    /* to holding block chains */
141 /*
142  * In order to save time calculating indices, the array is 1 too
143  * large, and the first element is unused
144  *
145  * Variables controlling algorithm, esp. how holding blocs are used
146  */
147 static int numlblks = NUMLBLKS;
148 static int minhead = MINHEAD;
149 static int change = 0;	/* != 0, once param changes are no longer allowed */
150 static int fastct = FASTCT;
151 static unsigned int maxfast = MAXFAST;
152 /* number of small block sizes to map to one size */
153 
154 static int grain = ALIGNSZ;
155 
156 #ifdef debug
157 static int case1count = 0;
158 
159 static void
160 checkq(void)
161 {
162 	register struct header *p;
163 
164 	p = &freeptr[0];
165 
166 	/* check forward */
167 	/*CSTYLED*/
168 	while (p != &freeptr[1]) {
169 		p = p->nextfree;
170 		assert(p->prevfree->nextfree == p);
171 	}
172 
173 	/* check backward */
174 	/*CSTYLED*/
175 	while (p != &freeptr[0]) {
176 		p = p->prevfree;
177 		assert(p->nextfree->prevfree == p);
178 	}
179 }
180 #endif
181 
182 
183 /*
184  * malloc(nbytes) - give a user nbytes to use
185  */
186 
187 void *
188 malloc(size_t nbytes)
189 {
190 	void *ret;
191 
192 	(void) mutex_lock(&mlock);
193 	ret = malloc_unlocked(nbytes, 0);
194 	(void) mutex_unlock(&mlock);
195 	return (ret);
196 }
197 
198 /*
199  * Use malloc_unlocked() to get the address to start with; Given this
200  * address, find out the closest address that aligns with the request
201  * and return that address after doing some house keeping (refer to the
202  * ascii art below).
203  */
204 void *
205 _memalign(size_t alignment, size_t size)
206 {
207 	void *alloc_buf;
208 	struct header *hd;
209 	size_t alloc_size;
210 	uintptr_t fr;
211 	static int realloc;
212 
213 	if (size == 0 || alignment == 0 ||
214 		(alignment & (alignment - 1)) != 0) {
215 		return (NULL);
216 	}
217 	if (alignment <= ALIGNSZ)
218 		return (malloc(size));
219 
220 	alloc_size = size + alignment;
221 	if (alloc_size < size) { /* overflow */
222 		return (NULL);
223 	}
224 
225 	(void) mutex_lock(&mlock);
226 	alloc_buf = malloc_unlocked(alloc_size, 1);
227 	(void) mutex_unlock(&mlock);
228 
229 	if (alloc_buf == NULL)
230 		return (NULL);
231 	fr = (uintptr_t)alloc_buf;
232 
233 	fr = (fr + alignment - 1) / alignment * alignment;
234 
235 	if (fr == (uintptr_t)alloc_buf)
236 		return (alloc_buf);
237 
238 	if ((fr - (uintptr_t)alloc_buf) <= HEADSZ) {
239 		/*
240 		 * we hit an edge case, where the space ahead of aligned
241 		 * address is not sufficient to hold 'header' and hence we
242 		 * can't free it. So double the allocation request.
243 		 */
244 		realloc++;
245 		free(alloc_buf);
246 		alloc_size = size + alignment*2;
247 		if (alloc_size < size) {
248 			return (NULL);
249 		}
250 
251 		(void) mutex_lock(&mlock);
252 		alloc_buf = malloc_unlocked(alloc_size, 1);
253 		(void) mutex_unlock(&mlock);
254 
255 		if (alloc_buf == NULL)
256 			return (NULL);
257 		fr = (uintptr_t)alloc_buf;
258 
259 		fr = (fr + alignment - 1) / alignment * alignment;
260 		if (fr == (uintptr_t)alloc_buf)
261 			return (alloc_buf);
262 		if ((fr - (uintptr_t)alloc_buf) <= HEADSZ) {
263 			fr = fr + alignment;
264 		}
265 	}
266 
267 	/*
268 	 *	+-------+		+-------+
269 	 *  +---| <a>   |		| <a>   |--+
270 	 *  |   +-------+<--alloc_buf-->+-------+  |
271 	 *  |   |	|		|	|  |
272 	 *  |   |	|		|	|  |
273 	 *  |   |	|		|	|  |
274 	 *  |   |	|	 hd-->  +-------+  |
275 	 *  |   |	|	    +---|  <b>  |<-+
276 	 *  |   |	|	    |   +-------+<--- fr
277 	 *  |   |	|	    |   |	|
278 	 *  |   |	|	    |   |	|
279 	 *  |   |	|	    |   |	|
280 	 *  |   |	|	    |   |	|
281 	 *  |   |	|	    |   |	|
282 	 *  |   |	|	    |   |	|
283 	 *  |   +-------+	    |   +-------+
284 	 *  +-->|  next |	    +-->|  next |
285 	 *	+-------+		+-------+
286 	 *
287 	 */
288 	hd = (struct header *)((char *)fr - minhead);
289 	(void) mutex_lock(&mlock);
290 	hd->nextblk = ((struct header *)((char *)alloc_buf - minhead))->nextblk;
291 	((struct header *)((char *)alloc_buf - minhead))->nextblk = SETBUSY(hd);
292 	(void) mutex_unlock(&mlock);
293 	free(alloc_buf);
294 	CHECKQ
295 	return ((void *)fr);
296 }
297 
298 void *
299 _valloc(size_t size)
300 {
301 	static unsigned pagesize;
302 	if (size == 0)
303 		return (NULL);
304 
305 	if (!pagesize)
306 		pagesize = sysconf(_SC_PAGESIZE);
307 
308 	return (memalign(pagesize, size));
309 }
310 
311 /*
312  * malloc_unlocked(nbytes, nosmall) - Do the real work for malloc
313  */
314 
315 static void *
316 malloc_unlocked(size_t nbytes, int nosmall)
317 {
318 	struct header *blk;
319 	size_t nb;	/* size of entire block we need */
320 
321 	/* on first call, initialize */
322 	if (freeptr[0].nextfree == GROUND) {
323 		/* initialize arena */
324 		arena[1].nextblk = (struct header *)BUSY;
325 		arena[0].nextblk = (struct header *)BUSY;
326 		lastblk = arenaend = &(arena[1]);
327 		/* initialize free queue */
328 		freeptr[0].nextfree = &(freeptr[1]);
329 		freeptr[1].nextblk = &(arena[0]);
330 		freeptr[1].prevfree = &(freeptr[0]);
331 		/* mark that small blocks not init yet */
332 	}
333 	if (nbytes == 0)
334 		return (NULL);
335 
336 	if (nbytes <= maxfast && !nosmall) {
337 		/*
338 		 * We can allocate out of a holding block
339 		 */
340 		struct holdblk *holdblk; /* head of right sized queue */
341 		struct lblk *lblk;	 /* pointer to a little block */
342 		struct holdblk *newhold;
343 
344 		if (!change) {
345 			int i;
346 			/*
347 			 * This allocates space for hold block
348 			 * pointers by calling malloc recursively.
349 			 * Maxfast is temporarily set to 0, to
350 			 * avoid infinite recursion.  allocate
351 			 * space for an extra ptr so that an index
352 			 * is just ->blksz/grain, with the first
353 			 * ptr unused.
354 			 */
355 			change = 1;	/* change to algorithm params */
356 					/* no longer allowed */
357 			/*
358 			 * temporarily alter maxfast, to avoid
359 			 * infinite recursion
360 			 */
361 			maxfast = 0;
362 			holdhead = (struct holdblk **)
363 			    malloc_unlocked(sizeof (struct holdblk *) *
364 			    (fastct + 1), 0);
365 			if (holdhead == NULL)
366 				return (malloc_unlocked(nbytes, 0));
367 			for (i = 1; i <= fastct; i++) {
368 				holdhead[i] = HGROUND;
369 			}
370 			maxfast = fastct * grain;
371 		}
372 		/*
373 		 * Note that this uses the absolute min header size (MINHEAD)
374 		 * unlike the large block case which uses minhead
375 		 *
376 		 * round up to nearest multiple of grain
377 		 * code assumes grain is a multiple of MINHEAD
378 		 */
379 		/* round up to grain */
380 		nb = (nbytes + grain - 1) / grain * grain;
381 		holdblk = holdhead[nb / grain];
382 		nb = nb + MINHEAD;
383 		/*
384 		 * look for space in the holding block.  Blocks with
385 		 * space will be in front of those without
386 		 */
387 		if ((holdblk != HGROUND) && (holdblk->lfreeq != LGROUND))  {
388 			/* there is space */
389 			lblk = holdblk->lfreeq;
390 
391 			/*
392 			 * Now make lfreeq point to a free block.
393 			 * If lblk has been previously allocated and
394 			 * freed, it has a valid pointer to use.
395 			 * Otherwise, lblk is at the beginning of
396 			 * the unallocated blocks at the end of
397 			 * the holding block, so, if there is room, take
398 			 * the next space.  If not, mark holdblk full,
399 			 * and move holdblk to the end of the queue
400 			 */
401 			if (lblk < holdblk->unused) {
402 				/* move to next holdblk, if this one full */
403 				if ((holdblk->lfreeq =
404 				    CLRSMAL(lblk->header.nextfree)) ==
405 				    LGROUND) {
406 					holdhead[(nb-MINHEAD) / grain] =
407 					    holdblk->nexthblk;
408 				}
409 			} else if (((char *)holdblk->unused + nb) <
410 			    ((char *)holdblk + HOLDSZ(nb))) {
411 				holdblk->unused = (struct lblk *)
412 				    ((char *)holdblk->unused+nb);
413 				holdblk->lfreeq = holdblk->unused;
414 			} else {
415 				holdblk->unused = (struct lblk *)
416 				    ((char *)holdblk->unused+nb);
417 				holdblk->lfreeq = LGROUND;
418 				holdhead[(nb-MINHEAD)/grain] =
419 				    holdblk->nexthblk;
420 			}
421 			/* mark as busy and small */
422 			lblk->header.holder = (struct holdblk *)SETALL(holdblk);
423 		} else {
424 			/* we need a new holding block */
425 			newhold = (struct holdblk *)
426 			    malloc_unlocked(HOLDSZ(nb), 0);
427 			if ((char *)newhold == NULL) {
428 				return (NULL);
429 			}
430 			/* add to head of free queue */
431 			if (holdblk != HGROUND) {
432 				newhold->nexthblk = holdblk;
433 				newhold->prevhblk = holdblk->prevhblk;
434 				holdblk->prevhblk = newhold;
435 				newhold->prevhblk->nexthblk = newhold;
436 			} else {
437 				newhold->nexthblk = newhold->prevhblk = newhold;
438 			}
439 			holdhead[(nb-MINHEAD)/grain] = newhold;
440 			/* set up newhold */
441 			lblk = (struct lblk *)(newhold->space);
442 			newhold->lfreeq = newhold->unused =
443 			    (struct lblk *)((char *)newhold->space+nb);
444 			lblk->header.holder = (struct holdblk *)SETALL(newhold);
445 			newhold->blksz = nb-MINHEAD;
446 		}
447 #ifdef debug
448 		assert(((struct holdblk *)CLRALL(lblk->header.holder))->blksz >=
449 		    nbytes);
450 #endif /* debug */
451 		return ((char *)lblk + MINHEAD);
452 	} else {
453 		/*
454 		 * We need an ordinary block
455 		 */
456 		struct header *newblk;	/* used for creating a block */
457 
458 		/* get number of bytes we need */
459 		nb = nbytes + minhead;
460 		nb = (nb + ALIGNSZ - 1) / ALIGNSZ * ALIGNSZ;	/* align */
461 		nb = (nb > MINBLKSZ) ? nb : MINBLKSZ;
462 		/*
463 		 * see if there is a big enough block
464 		 * If none exists, you will get to freeptr[1].
465 		 * freeptr[1].next = &arena[0], so when you do the test,
466 		 * the result is a large positive number, since arena[0]
467 		 * comes before all blocks.  Arena[0] is marked busy so
468 		 * that it will not be compacted.  This kludge is for the
469 		 * sake of the almighty efficiency.
470 		 */
471 		/* check that a very large request won't cause an inf. loop */
472 
473 		if ((freeptr[1].nextblk-&(freeptr[1])) < nb) {
474 			return (NULL);
475 		} else {
476 			struct header *next;		/* following block */
477 			struct header *nextnext;	/* block after next */
478 
479 			blk = freeptr;
480 			do {
481 				blk = blk->nextfree;
482 				/* see if we can compact */
483 				next = blk->nextblk;
484 				if (!TESTBUSY(nextnext = next->nextblk)) {
485 					do {
486 						DELFREEQ(next);
487 						next = nextnext;
488 						nextnext = next->nextblk;
489 					} while (!TESTBUSY(nextnext));
490 					/*
491 					 * next will be at most == to lastblk,
492 					 * but I think the >= test is faster
493 					 */
494 					if (next >= arenaend)
495 						lastblk = blk;
496 					blk->nextblk = next;
497 				}
498 			} while (((char *)(next) - (char *)blk) < nb);
499 		}
500 		/*
501 		 * if we didn't find a block, get more memory
502 		 */
503 		if (blk == &(freeptr[1])) {
504 			/*
505 			 * careful coding could likely replace
506 			 * newend with arenaend
507 			 */
508 			struct header *newend;	/* new end of arena */
509 			ssize_t nget;	/* number of words to get */
510 
511 			/*
512 			 * Three cases - 1. There is space between arenaend
513 			 *		    and the break value that will become
514 			 *		    a permanently allocated block.
515 			 *		 2. Case 1 is not true, and the last
516 			 *		    block is allocated.
517 			 *		 3. Case 1 is not true, and the last
518 			 *		    block is free
519 			 */
520 			if ((newblk = (struct header *)sbrk(0)) !=
521 			    (struct header *)((char *)arenaend + HEADSZ)) {
522 				/* case 1 */
523 #ifdef debug
524 				if (case1count++ > 0)
525 				    (void) write(2, "Case 1 hit more that once."
526 					" brk or sbrk?\n", 41);
527 #endif
528 				/* get size to fetch */
529 				nget = nb + HEADSZ;
530 				/* round up to a block */
531 				nget = (nget + BLOCKSZ - 1)/BLOCKSZ * BLOCKSZ;
532 				assert((uintptr_t)newblk % ALIGNSZ == 0);
533 				/* get memory */
534 				if (morecore(nget) == (void *)-1)
535 					return (NULL);
536 				/* add to arena */
537 				newend = (struct header *)((char *)newblk + nget
538 				    - HEADSZ);
539 				assert((uintptr_t)newblk % ALIGNSZ == 0);
540 				newend->nextblk = SETBUSY(&(arena[1]));
541 /* ???  newblk ?? */
542 				newblk->nextblk = newend;
543 
544 				/*
545 				 * space becomes a permanently allocated block.
546 				 * This is likely not mt-safe as lock is not
547 				 * shared with brk or sbrk
548 				 */
549 				arenaend->nextblk = SETBUSY(newblk);
550 				/* adjust other pointers */
551 				arenaend = newend;
552 				lastblk = newblk;
553 				blk = newblk;
554 			} else if (TESTBUSY(lastblk->nextblk)) {
555 				/* case 2 */
556 				nget = (nb + BLOCKSZ - 1) / BLOCKSZ * BLOCKSZ;
557 				if (morecore(nget) == (void *)-1)
558 					return (NULL);
559 				/* block must be word aligned */
560 				assert(((uintptr_t)newblk%ALIGNSZ) == 0);
561 				/*
562 				 * stub at old arenaend becomes first word
563 				 * in blk
564 				 */
565 /* ???  	newblk = arenaend; */
566 
567 				newend =
568 				    (struct header *)((char *)arenaend+nget);
569 				newend->nextblk = SETBUSY(&(arena[1]));
570 				arenaend->nextblk = newend;
571 				lastblk = blk = arenaend;
572 				arenaend = newend;
573 			} else {
574 				/* case 3 */
575 				/*
576 				 * last block in arena is at end of memory and
577 				 * is free
578 				 */
579 				/* 1.7 had this backward without cast */
580 				nget = nb -
581 				    ((char *)arenaend - (char *)lastblk);
582 				nget = (nget + (BLOCKSZ - 1)) /
583 				    BLOCKSZ * BLOCKSZ;
584 				assert(((uintptr_t)newblk % ALIGNSZ) == 0);
585 				if (morecore(nget) == (void *)-1)
586 					return (NULL);
587 				/* combine with last block, put in arena */
588 				newend = (struct header *)
589 				    ((char *)arenaend + nget);
590 				arenaend = lastblk->nextblk = newend;
591 				newend->nextblk = SETBUSY(&(arena[1]));
592 				/* set which block to use */
593 				blk = lastblk;
594 				DELFREEQ(blk);
595 			}
596 		} else {
597 			struct header *nblk;	/* next block */
598 
599 			/* take block found of free queue */
600 			DELFREEQ(blk);
601 			/*
602 			 * make head of free queue immediately follow blk,
603 			 * unless blk was at the end of the queue
604 			 */
605 			nblk = blk->nextfree;
606 			if (nblk != &(freeptr[1])) {
607 				MOVEHEAD(nblk);
608 			}
609 		}
610 		/* blk now points to an adequate block */
611 		if (((char *)blk->nextblk - (char *)blk) - nb >= MINBLKSZ) {
612 			/* carve out the right size block */
613 			/* newblk will be the remainder */
614 			newblk = (struct header *)((char *)blk + nb);
615 			newblk->nextblk = blk->nextblk;
616 			/* mark the block busy */
617 			blk->nextblk = SETBUSY(newblk);
618 			ADDFREEQ(newblk);
619 			/* if blk was lastblk, make newblk lastblk */
620 			if (blk == lastblk)
621 				lastblk = newblk;
622 		} else {
623 			/* just mark the block busy */
624 			blk->nextblk = SETBUSY(blk->nextblk);
625 		}
626 	}
627 	CHECKQ
628 	assert((char *)CLRALL(blk->nextblk) -
629 	    ((char *)blk + minhead) >= nbytes);
630 	assert((char *)CLRALL(blk->nextblk) -
631 	    ((char *)blk + minhead) < nbytes + MINBLKSZ);
632 	return ((char *)blk + minhead);
633 }
634 
635 /*
636  * free(ptr) - free block that user thinks starts at ptr
637  *
638  *	input - ptr-1 contains the block header.
639  *		If the header points forward, we have a normal
640  *			block pointing to the next block
641  *		if the header points backward, we have a small
642  *			block from a holding block.
643  *		In both cases, the busy bit must be set
644  */
645 
646 void
647 free(void *ptr)
648 {
649 	(void) mutex_lock(&mlock);
650 	free_unlocked(ptr);
651 	(void) mutex_unlock(&mlock);
652 }
653 
654 /*
655  * free_unlocked(ptr) - Do the real work for free()
656  */
657 
658 void
659 free_unlocked(void *ptr)
660 {
661 	struct holdblk *holdblk;	/* block holding blk */
662 	struct holdblk *oldhead;	/* former head of the hold block */
663 					/* queue containing blk's holder */
664 
665 	if (ptr == NULL)
666 		return;
667 	if (TESTSMAL(((struct header *)((char *)ptr - MINHEAD))->nextblk)) {
668 		struct lblk	*lblk;	/* pointer to freed block */
669 		ssize_t		offset;	/* choice of header lists */
670 
671 		lblk = (struct lblk *)CLRBUSY((char *)ptr - MINHEAD);
672 		assert((struct header *)lblk < arenaend);
673 		assert((struct header *)lblk > arena);
674 		/* allow twits (e.g. awk) to free a block twice */
675 		holdblk = lblk->header.holder;
676 		if (!TESTBUSY(holdblk))
677 			return;
678 		holdblk = (struct holdblk *)CLRALL(holdblk);
679 		/* put lblk on its hold block's free list */
680 		lblk->header.nextfree = SETSMAL(holdblk->lfreeq);
681 		holdblk->lfreeq = lblk;
682 		/* move holdblk to head of queue, if its not already there */
683 		offset = holdblk->blksz / grain;
684 		oldhead = holdhead[offset];
685 		if (oldhead != holdblk) {
686 			/* first take out of current spot */
687 			holdhead[offset] = holdblk;
688 			holdblk->nexthblk->prevhblk = holdblk->prevhblk;
689 			holdblk->prevhblk->nexthblk = holdblk->nexthblk;
690 			/* now add at front */
691 			holdblk->nexthblk = oldhead;
692 			holdblk->prevhblk = oldhead->prevhblk;
693 			oldhead->prevhblk = holdblk;
694 			holdblk->prevhblk->nexthblk = holdblk;
695 		}
696 	} else {
697 		struct header *blk;	/* real start of block */
698 		struct header *next;	/* next = blk->nextblk */
699 		struct header *nextnext;	/* block after next */
700 
701 		blk = (struct header *)((char *)ptr - minhead);
702 		next = blk->nextblk;
703 		/* take care of twits (e.g. awk) who return blocks twice */
704 		if (!TESTBUSY(next))
705 			return;
706 		blk->nextblk = next = CLRBUSY(next);
707 		ADDFREEQ(blk);
708 		/* see if we can compact */
709 		if (!TESTBUSY(nextnext = next->nextblk)) {
710 			do {
711 				DELFREEQ(next);
712 				next = nextnext;
713 			} while (!TESTBUSY(nextnext = next->nextblk));
714 			if (next == arenaend) lastblk = blk;
715 			blk->nextblk = next;
716 		}
717 	}
718 	CHECKQ
719 }
720 
721 
722 /*
723  * realloc(ptr, size) - give the user a block of size "size", with
724  *			    the contents pointed to by ptr.  Free ptr.
725  */
726 
727 void *
728 realloc(void *ptr, size_t size)
729 {
730 	void	*retval;
731 
732 	(void) mutex_lock(&mlock);
733 	retval = realloc_unlocked(ptr, size);
734 	(void) mutex_unlock(&mlock);
735 	return (retval);
736 }
737 
738 
739 /*
740  * realloc_unlocked(ptr) - Do the real work for realloc()
741  */
742 
743 static void *
744 realloc_unlocked(void *ptr, size_t size)
745 {
746 	struct header *blk;	/* block ptr is contained in */
747 	size_t trusize;	/* block size as allocater sees it */
748 	char *newptr;			/* pointer to user's new block */
749 	size_t cpysize;	/* amount to copy */
750 	struct header *next;	/* block after blk */
751 
752 	if (ptr == NULL)
753 		return (malloc_unlocked(size, 0));
754 
755 	if (size == 0) {
756 		free_unlocked(ptr);
757 		return (NULL);
758 	}
759 
760 	if (TESTSMAL(((struct lblk *)((char *)ptr - MINHEAD))->
761 	    header.holder)) {
762 		/*
763 		 * we have a special small block which can't be expanded
764 		 *
765 		 * This makes the assumption that even if the user is
766 		 * reallocating a free block, malloc doesn't alter the contents
767 		 * of small blocks
768 		 */
769 		newptr = malloc_unlocked(size, 0);
770 		if (newptr == NULL)
771 			return (NULL);
772 		/* this isn't to save time--its to protect the twits */
773 		if ((char *)ptr != newptr) {
774 			struct lblk *lblk;
775 			lblk = (struct lblk *)((char *)ptr - MINHEAD);
776 			cpysize = ((struct holdblk *)
777 			    CLRALL(lblk->header.holder))->blksz;
778 			cpysize = (size > cpysize) ? cpysize : size;
779 			(void) memcpy(newptr, ptr, cpysize);
780 			free_unlocked(ptr);
781 		}
782 	} else {
783 		blk = (struct header *)((char *)ptr - minhead);
784 		next = blk->nextblk;
785 		/*
786 		 * deal with twits who reallocate free blocks
787 		 *
788 		 * if they haven't reset minblk via getopt, that's
789 		 * their problem
790 		 */
791 		if (!TESTBUSY(next)) {
792 			DELFREEQ(blk);
793 			blk->nextblk = SETBUSY(next);
794 		}
795 		next = CLRBUSY(next);
796 		/* make blk as big as possible */
797 		if (!TESTBUSY(next->nextblk)) {
798 			do {
799 				DELFREEQ(next);
800 				next = next->nextblk;
801 			} while (!TESTBUSY(next->nextblk));
802 			blk->nextblk = SETBUSY(next);
803 			if (next >= arenaend) lastblk = blk;
804 		}
805 		/* get size we really need */
806 		trusize = size+minhead;
807 		trusize = (trusize + ALIGNSZ - 1)/ALIGNSZ*ALIGNSZ;
808 		trusize = (trusize >= MINBLKSZ) ? trusize : MINBLKSZ;
809 		/* see if we have enough */
810 		/* this isn't really the copy size, but I need a register */
811 		cpysize = (char *)next - (char *)blk;
812 		if (cpysize >= trusize) {
813 			/* carve out the size we need */
814 			struct header *newblk;	/* remainder */
815 
816 			if (cpysize - trusize >= MINBLKSZ) {
817 				/*
818 				 * carve out the right size block
819 				 * newblk will be the remainder
820 				 */
821 				newblk = (struct header *)((char *)blk +
822 				    trusize);
823 				newblk->nextblk = next;
824 				blk->nextblk = SETBUSY(newblk);
825 				/* at this point, next is invalid */
826 				ADDFREEQ(newblk);
827 				/* if blk was lastblk, make newblk lastblk */
828 				if (blk == lastblk)
829 					lastblk = newblk;
830 			}
831 			newptr = ptr;
832 		} else {
833 			/* bite the bullet, and call malloc */
834 			cpysize = (size > cpysize) ? cpysize : size;
835 			newptr = malloc_unlocked(size, 0);
836 			if (newptr == NULL)
837 				return (NULL);
838 			(void) memcpy(newptr, ptr, cpysize);
839 			free_unlocked(ptr);
840 		}
841 	}
842 	return (newptr);
843 }
844 
845 
846 /* LINTLIBRARY */
847 /*
848  * calloc - allocate and clear memory block
849  */
850 
851 void *
852 calloc(size_t num, size_t size)
853 {
854 	char *mp;
855 
856 	num *= size;
857 	mp = malloc(num);
858 	if (mp == NULL)
859 		return (NULL);
860 	(void) memset(mp, 0, num);
861 	return (mp);
862 }
863 
864 
865 /*
866  * Mallopt - set options for allocation
867  *
868  *	Mallopt provides for control over the allocation algorithm.
869  *	The cmds available are:
870  *
871  *	M_MXFAST Set maxfast to value.  Maxfast is the size of the
872  *		 largest small, quickly allocated block.  Maxfast
873  *		 may be set to 0 to disable fast allocation entirely.
874  *
875  *	M_NLBLKS Set numlblks to value.  Numlblks is the number of
876  *		 small blocks per holding block.  Value must be
877  *		 greater than 0.
878  *
879  *	M_GRAIN  Set grain to value.  The sizes of all blocks
880  *		 smaller than maxfast are considered to be rounded
881  *		 up to the nearest multiple of grain. The default
882  *		 value of grain is the smallest number of bytes
883  *		 which will allow alignment of any data type.    Grain
884  *		 will be rounded up to a multiple of its default,
885  *		 and maxsize will be rounded up to a multiple of
886  *		 grain.  Value must be greater than 0.
887  *
888  *	M_KEEP   Retain data in freed block until the next malloc,
889  *		 realloc, or calloc.  Value is ignored.
890  *		 This option is provided only for compatibility with
891  *		 the old version of malloc, and is not recommended.
892  *
893  *	returns - 0, upon successful completion
894  *		 1, if malloc has previously been called or
895  *		    if value or cmd have illegal values
896  */
897 
898 int
899 _mallopt(int cmd, int value)
900 {
901 	/* disallow changes once a small block is allocated */
902 	(void) mutex_lock(&mlock);
903 	if (change) {
904 		(void) mutex_unlock(&mlock);
905 		return (1);
906 	}
907 	switch (cmd) {
908 	case M_MXFAST:
909 		if (value < 0) {
910 			(void) mutex_unlock(&mlock);
911 			return (1);
912 		}
913 		fastct = (value + grain - 1) / grain;
914 		maxfast = grain*fastct;
915 		break;
916 	case M_NLBLKS:
917 		if (value <= 1) {
918 			(void) mutex_unlock(&mlock);
919 			return (1);
920 		}
921 		numlblks = value;
922 		break;
923 	case M_GRAIN:
924 		if (value <= 0) {
925 			(void) mutex_unlock(&mlock);
926 			return (1);
927 		}
928 
929 		/* round grain up to a multiple of ALIGNSZ */
930 		grain = (value + ALIGNSZ - 1)/ALIGNSZ*ALIGNSZ;
931 
932 		/* reduce fastct appropriately */
933 		fastct = (maxfast + grain - 1) / grain;
934 		maxfast = grain * fastct;
935 		break;
936 	case M_KEEP:
937 		if (change && holdhead != NULL) {
938 			mutex_unlock(&mlock);
939 			return (1);
940 		}
941 		minhead = HEADSZ;
942 		break;
943 	default:
944 		(void) mutex_unlock(&mlock);
945 		return (1);
946 	}
947 	(void) mutex_unlock(&mlock);
948 	return (0);
949 }
950 
951 /*
952  * mallinfo-provide information about space usage
953  *
954  *	input - max; mallinfo will return the size of the
955  *		largest block < max.
956  *
957  *	output - a structure containing a description of
958  *		 of space usage, defined in malloc.h
959  */
960 
961 struct mallinfo
962 _mallinfo(void)
963 {
964 	struct header *blk, *next;	/* ptr to ordinary blocks */
965 	struct holdblk *hblk;		/* ptr to holding blocks */
966 	struct mallinfo inf;		/* return value */
967 	int	i;			/* the ubiquitous counter */
968 	ssize_t size;			/* size of a block */
969 	ssize_t fsp;			/* free space in 1 hold block */
970 
971 	(void) mutex_lock(&mlock);
972 	(void) memset(&inf, 0, sizeof (struct mallinfo));
973 	if (freeptr[0].nextfree == GROUND) {
974 		(void) mutex_unlock(&mlock);
975 		return (inf);
976 	}
977 	blk = CLRBUSY(arena[1].nextblk);
978 	/* return total space used */
979 	inf.arena = (char *)arenaend - (char *)blk;
980 
981 	/*
982 	 * loop through arena, counting # of blocks, and
983 	 * and space used by blocks
984 	 */
985 	next = CLRBUSY(blk->nextblk);
986 	while (next != &(arena[1])) {
987 		inf.ordblks++;
988 		size = (char *)next - (char *)blk;
989 		if (TESTBUSY(blk->nextblk)) {
990 			inf.uordblks += size;
991 			inf.keepcost += HEADSZ-MINHEAD;
992 		} else {
993 			inf.fordblks += size;
994 		}
995 		blk = next;
996 		next = CLRBUSY(blk->nextblk);
997 	}
998 
999 	/*
1000 	 * if any holding block have been allocated
1001 	 * then examine space in holding blks
1002 	 */
1003 	if (change && holdhead != NULL) {
1004 		for (i = fastct; i > 0; i--) {	/* loop thru ea. chain */
1005 			hblk = holdhead[i];
1006 			/* do only if chain not empty */
1007 			if (hblk != HGROUND) {
1008 				size = hblk->blksz +
1009 				    sizeof (struct lblk) - sizeof (int);
1010 				do {	/* loop thru 1 hold blk chain */
1011 					inf.hblks++;
1012 					fsp = freespace(hblk);
1013 					inf.fsmblks += fsp;
1014 					inf.usmblks += numlblks*size - fsp;
1015 					inf.smblks += numlblks;
1016 					hblk = hblk->nexthblk;
1017 				} while (hblk != holdhead[i]);
1018 			}
1019 		}
1020 	}
1021 	inf.hblkhd = (inf.smblks / numlblks) * sizeof (struct holdblk);
1022 	/* holding block were counted in ordblks, so subtract off */
1023 	inf.ordblks -= inf.hblks;
1024 	inf.uordblks -= inf.hblkhd + inf.usmblks + inf.fsmblks;
1025 	inf.keepcost -= inf.hblks*(HEADSZ - MINHEAD);
1026 	(void) mutex_unlock(&mlock);
1027 	return (inf);
1028 }
1029 
1030 
1031 /*
1032  * freespace - calc. how much space is used in the free
1033  *		    small blocks in a given holding block
1034  *
1035  *	input - hblk = given holding block
1036  *
1037  *	returns space used in free small blocks of hblk
1038  */
1039 
1040 static ssize_t
1041 freespace(struct holdblk *holdblk)
1042 {
1043 	struct lblk *lblk;
1044 	ssize_t space = 0;
1045 	ssize_t size;
1046 	struct lblk *unused;
1047 
1048 	lblk = CLRSMAL(holdblk->lfreeq);
1049 	size = holdblk->blksz + sizeof (struct lblk) - sizeof (int);
1050 	unused = CLRSMAL(holdblk->unused);
1051 	/* follow free chain */
1052 	while ((lblk != LGROUND) && (lblk != unused)) {
1053 		space += size;
1054 		lblk = CLRSMAL(lblk->header.nextfree);
1055 	}
1056 	space += ((char *)holdblk + HOLDSZ(size)) - (char *)unused;
1057 	return (space);
1058 }
1059 
1060 static void *
1061 morecore(size_t bytes)
1062 {
1063 	void * ret;
1064 
1065 	if (bytes > LONG_MAX) {
1066 		intptr_t wad;
1067 		/*
1068 		 * The request size is too big. We need to do this in
1069 		 * chunks. Sbrk only takes an int for an arg.
1070 		 */
1071 		if (bytes == ULONG_MAX)
1072 			return ((void *)-1);
1073 
1074 		ret = sbrk(0);
1075 		wad = LONG_MAX;
1076 		while (wad > 0) {
1077 			if (sbrk(wad) == (void *)-1) {
1078 				if (ret != sbrk(0))
1079 					(void) sbrk(-LONG_MAX);
1080 				return ((void *)-1);
1081 			}
1082 			bytes -= LONG_MAX;
1083 			wad = bytes;
1084 		}
1085 	} else
1086 		ret = sbrk(bytes);
1087 
1088 	return (ret);
1089 }
1090 
1091 #ifdef debug
1092 int
1093 check_arena(void)
1094 {
1095 	struct header *blk, *prev, *next;	/* ptr to ordinary blocks */
1096 
1097 	(void) mutex_lock(&mlock);
1098 	if (freeptr[0].nextfree == GROUND) {
1099 		(void) mutex_unlock(&mlock);
1100 		return (-1);
1101 	}
1102 	blk = arena + 1;
1103 
1104 	/* loop through arena, checking */
1105 	blk = (struct header *)CLRALL(blk->nextblk);
1106 	next = (struct header *)CLRALL(blk->nextblk);
1107 	while (next != arena + 1) {
1108 		assert(blk >= arena + 1);
1109 		assert(blk <= lastblk);
1110 		assert(next >= blk + 1);
1111 		assert(((uintptr_t)((struct header *)blk->nextblk) &
1112 		    (4 | SMAL)) == 0);
1113 
1114 		if (TESTBUSY(blk->nextblk) == 0) {
1115 			assert(blk->nextfree >= freeptr);
1116 			assert(blk->prevfree >= freeptr);
1117 			assert(blk->nextfree <= lastblk);
1118 			assert(blk->prevfree <= lastblk);
1119 			assert(((uintptr_t)((struct header *)blk->nextfree) &
1120 			    7) == 0);
1121 			assert(((uintptr_t)((struct header *)blk->prevfree) &
1122 			    7) == 0 || blk->prevfree == freeptr);
1123 		}
1124 		blk = next;
1125 		next = CLRBUSY(blk->nextblk);
1126 	}
1127 	(void) mutex_unlock(&mlock);
1128 	return (0);
1129 }
1130 
1131 #define	RSTALLOC	1
1132 #endif
1133 
1134 #ifdef RSTALLOC
1135 /*
1136  * rstalloc - reset alloc routines
1137  *
1138  *	description -	return allocated memory and reset
1139  *			allocation pointers.
1140  *
1141  *	Warning - This is for debugging purposes only.
1142  *		  It will return all memory allocated after
1143  *		  the first call to malloc, even if some
1144  *		  of it was fetched by a user's sbrk().
1145  */
1146 
1147 void
1148 rstalloc(void)
1149 {
1150 	(void) mutex_lock(&mlock);
1151 	minhead = MINHEAD;
1152 	grain = ALIGNSZ;
1153 	numlblks = NUMLBLKS;
1154 	fastct = FASTCT;
1155 	maxfast = MAXFAST;
1156 	change = 0;
1157 	if (freeptr[0].nextfree == GROUND) {
1158 		(void) mutex_unlock(&mlock);
1159 		return;
1160 	}
1161 	brk(CLRBUSY(arena[1].nextblk));
1162 	freeptr[0].nextfree = GROUND;
1163 #ifdef debug
1164 	case1count = 0;
1165 #endif
1166 	(void) mutex_unlock(&mlock);
1167 }
1168 #endif	/* RSTALLOC */
1169 
1170 /*
1171  * cfree is an undocumented, obsolete function
1172  */
1173 
1174 /* ARGSUSED */
1175 void
1176 _cfree(char *p, unsigned num, unsigned size)
1177 {
1178 	free(p);
1179 }
1180