xref: /titanic_44/usr/src/lib/libmalloc/common/malloc.c (revision f48205be61a214698b763ff550ab9e657525104c)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License (the "License").
6  * You may not use this file except in compliance with the License.
7  *
8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9  * or http://www.opensolaris.org/os/licensing.
10  * See the License for the specific language governing permissions
11  * and limitations under the License.
12  *
13  * When distributing Covered Code, include this CDDL HEADER in each
14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15  * If applicable, add the following below this CDDL HEADER, with the
16  * fields enclosed by brackets "[]" replaced with your own identifying
17  * information: Portions Copyright [yyyy] [name of copyright owner]
18  *
19  * CDDL HEADER END
20  */
21 
22 /*
23  * Copyright 2007 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 /*	Copyright (c) 1988 AT&T	*/
28 /*	  All Rights Reserved  	*/
29 
30 #pragma ident	"%Z%%M%	%I%	%E% SMI"
31 
32 #include <c_synonyms.h>
33 #include <sys/types.h>
34 
35 #ifndef debug
36 #define	NDEBUG
37 #endif
38 
39 #include <stdlib.h>
40 #include <string.h>
41 #include "assert.h"
42 #include "malloc.h"
43 #include "mallint.h"
44 #include <thread.h>
45 #include <pthread.h>
46 #include <synch.h>
47 #include <unistd.h>
48 #include <limits.h>
49 
50 static mutex_t mlock = DEFAULTMUTEX;
51 static ssize_t freespace(struct holdblk *);
52 static void *malloc_unlocked(size_t, int);
53 static void *realloc_unlocked(void *, size_t);
54 static void free_unlocked(void *);
55 static void *morecore(size_t);
56 
57 /*
58  * use level memory allocater (malloc, free, realloc)
59  *
60  *	-malloc, free, realloc and mallopt form a memory allocator
61  *	similar to malloc, free, and realloc.  The routines
62  *	here are much faster than the original, with slightly worse
63  *	space usage (a few percent difference on most input).  They
64  *	do not have the property that data in freed blocks is left
65  *	untouched until the space is reallocated.
66  *
67  *	-Memory is kept in the "arena", a singly linked list of blocks.
68  *	These blocks are of 3 types.
69  *		1. A free block.  This is a block not in use by the
70  *		   user.  It has a 3 word header. (See description
71  *		   of the free queue.)
72  *		2. An allocated block.  This is a block the user has
73  *		   requested.  It has only a 1 word header, pointing
74  *		   to the next block of any sort.
75  *		3. A permanently allocated block.  This covers space
76  *		   aquired by the user directly through sbrk().  It
77  *		   has a 1 word header, as does 2.
78  *	Blocks of type 1 have the lower bit of the pointer to the
79  *	nextblock = 0.  Blocks of type 2 and 3 have that bit set,
80  *	to mark them busy.
81  *
82  *	-Unallocated blocks are kept on an unsorted doubly linked
83  *	free list.
84  *
85  *	-Memory is allocated in blocks, with sizes specified by the
86  *	user.  A circular first-fit startegy is used, with a roving
87  *	head of the free queue, which prevents bunching of small
88  *	blocks at the head of the queue.
89  *
90  *	-Compaction is performed at free time of any blocks immediately
91  *	following the freed block.  The freed block will be combined
92  *	with a preceding block during the search phase of malloc.
93  *	Since a freed block is added at the front of the free queue,
94  *	which is moved to the end of the queue if considered and
95  *	rejected during the search, fragmentation only occurs if
96  *	a block with a contiguious preceding block that is free is
97  *	freed and reallocated on the next call to malloc.  The
98  *	time savings of this strategy is judged to be worth the
99  *	occasional waste of memory.
100  *
101  *	-Small blocks (of size < MAXSIZE)  are not allocated directly.
102  *	A large "holding" block is allocated via a recursive call to
103  *	malloc.  This block contains a header and ?????? small blocks.
104  *	Holding blocks for a given size of small block (rounded to the
105  *	nearest ALIGNSZ bytes) are kept on a queue with the property that any
106  *	holding block with an unused small block is in front of any without.
107  *	A list of free blocks is kept within the holding block.
108  */
109 
110 /*
111  *	description of arena, free queue, holding blocks etc.
112  *
113  * New compiler and linker does not guarentee order of initialized data.
114  * Define freeptr as arena[2-3] to guarentee it follows arena in memory.
115  * Later code depends on this order.
116  */
117 
118 static struct header arena[4] = {
119 	    {0, 0, 0},
120 	    {0, 0, 0},
121 	    {0, 0, 0},
122 	    {0, 0, 0}
123 	};
124 				/*
125 				 * the second word is a minimal block to
126 				 * start the arena. The first is a busy
127 				 * block to be pointed to by the last block.
128 				 */
129 
130 #define	freeptr (arena + 2)
131 				/* first and last entry in free list */
132 static struct header *arenaend;	/* ptr to block marking high end of arena */
133 static struct header *lastblk;	/* the highest block in the arena */
134 static struct holdblk **holdhead;   /* pointer to array of head pointers */
135 				    /* to holding block chains */
136 /*
137  * In order to save time calculating indices, the array is 1 too
138  * large, and the first element is unused
139  *
140  * Variables controlling algorithm, esp. how holding blocs are used
141  */
142 static int numlblks = NUMLBLKS;
143 static int minhead = MINHEAD;
144 static int change = 0;	/* != 0, once param changes are no longer allowed */
145 static int fastct = FASTCT;
146 static unsigned int maxfast = MAXFAST;
147 /* number of small block sizes to map to one size */
148 
149 static int grain = ALIGNSZ;
150 
151 #ifdef debug
152 static int case1count = 0;
153 
154 static void
155 checkq(void)
156 {
157 	register struct header *p;
158 
159 	p = &freeptr[0];
160 
161 	/* check forward */
162 	/*CSTYLED*/
163 	while (p != &freeptr[1]) {
164 		p = p->nextfree;
165 		assert(p->prevfree->nextfree == p);
166 	}
167 
168 	/* check backward */
169 	/*CSTYLED*/
170 	while (p != &freeptr[0]) {
171 		p = p->prevfree;
172 		assert(p->nextfree->prevfree == p);
173 	}
174 }
175 #endif
176 
177 
178 /*
179  * malloc(nbytes) - give a user nbytes to use
180  */
181 
182 void *
183 malloc(size_t nbytes)
184 {
185 	void *ret;
186 
187 	(void) mutex_lock(&mlock);
188 	ret = malloc_unlocked(nbytes, 0);
189 	(void) mutex_unlock(&mlock);
190 	return (ret);
191 }
192 
193 /*
194  * Use malloc_unlocked() to get the address to start with; Given this
195  * address, find out the closest address that aligns with the request
196  * and return that address after doing some house keeping (refer to the
197  * ascii art below).
198  */
199 void *
200 memalign(size_t alignment, size_t size)
201 {
202 	void *alloc_buf;
203 	struct header *hd;
204 	size_t alloc_size;
205 	uintptr_t fr;
206 	static int realloc;
207 
208 	if (size == 0 || alignment == 0 ||
209 		(alignment & (alignment - 1)) != 0) {
210 		return (NULL);
211 	}
212 	if (alignment <= ALIGNSZ)
213 		return (malloc(size));
214 
215 	alloc_size = size + alignment;
216 	if (alloc_size < size) { /* overflow */
217 		return (NULL);
218 	}
219 
220 	(void) mutex_lock(&mlock);
221 	alloc_buf = malloc_unlocked(alloc_size, 1);
222 	(void) mutex_unlock(&mlock);
223 
224 	if (alloc_buf == NULL)
225 		return (NULL);
226 	fr = (uintptr_t)alloc_buf;
227 
228 	fr = (fr + alignment - 1) / alignment * alignment;
229 
230 	if (fr == (uintptr_t)alloc_buf)
231 		return (alloc_buf);
232 
233 	if ((fr - (uintptr_t)alloc_buf) <= HEADSZ) {
234 		/*
235 		 * we hit an edge case, where the space ahead of aligned
236 		 * address is not sufficient to hold 'header' and hence we
237 		 * can't free it. So double the allocation request.
238 		 */
239 		realloc++;
240 		free(alloc_buf);
241 		alloc_size = size + alignment*2;
242 		if (alloc_size < size) {
243 			return (NULL);
244 		}
245 
246 		(void) mutex_lock(&mlock);
247 		alloc_buf = malloc_unlocked(alloc_size, 1);
248 		(void) mutex_unlock(&mlock);
249 
250 		if (alloc_buf == NULL)
251 			return (NULL);
252 		fr = (uintptr_t)alloc_buf;
253 
254 		fr = (fr + alignment - 1) / alignment * alignment;
255 		if (fr == (uintptr_t)alloc_buf)
256 			return (alloc_buf);
257 		if ((fr - (uintptr_t)alloc_buf) <= HEADSZ) {
258 			fr = fr + alignment;
259 		}
260 	}
261 
262 	/*
263 	 *	+-------+		+-------+
264 	 *  +---| <a>   |		| <a>   |--+
265 	 *  |   +-------+<--alloc_buf-->+-------+  |
266 	 *  |   |	|		|	|  |
267 	 *  |   |	|		|	|  |
268 	 *  |   |	|		|	|  |
269 	 *  |   |	|	 hd-->  +-------+  |
270 	 *  |   |	|	    +---|  <b>  |<-+
271 	 *  |   |	|	    |   +-------+<--- fr
272 	 *  |   |	|	    |   |	|
273 	 *  |   |	|	    |   |	|
274 	 *  |   |	|	    |   |	|
275 	 *  |   |	|	    |   |	|
276 	 *  |   |	|	    |   |	|
277 	 *  |   |	|	    |   |	|
278 	 *  |   +-------+	    |   +-------+
279 	 *  +-->|  next |	    +-->|  next |
280 	 *	+-------+		+-------+
281 	 *
282 	 */
283 	hd = (struct header *)((char *)fr - minhead);
284 	(void) mutex_lock(&mlock);
285 	hd->nextblk = ((struct header *)((char *)alloc_buf - minhead))->nextblk;
286 	((struct header *)((char *)alloc_buf - minhead))->nextblk = SETBUSY(hd);
287 	(void) mutex_unlock(&mlock);
288 	free(alloc_buf);
289 	CHECKQ
290 	return ((void *)fr);
291 }
292 
293 void *
294 valloc(size_t size)
295 {
296 	static unsigned pagesize;
297 	if (size == 0)
298 		return (NULL);
299 
300 	if (!pagesize)
301 		pagesize = sysconf(_SC_PAGESIZE);
302 
303 	return (memalign(pagesize, size));
304 }
305 
306 /*
307  * malloc_unlocked(nbytes, nosmall) - Do the real work for malloc
308  */
309 
310 static void *
311 malloc_unlocked(size_t nbytes, int nosmall)
312 {
313 	struct header *blk;
314 	size_t nb;	/* size of entire block we need */
315 
316 	/* on first call, initialize */
317 	if (freeptr[0].nextfree == GROUND) {
318 		/* initialize arena */
319 		arena[1].nextblk = (struct header *)BUSY;
320 		arena[0].nextblk = (struct header *)BUSY;
321 		lastblk = arenaend = &(arena[1]);
322 		/* initialize free queue */
323 		freeptr[0].nextfree = &(freeptr[1]);
324 		freeptr[1].nextblk = &(arena[0]);
325 		freeptr[1].prevfree = &(freeptr[0]);
326 		/* mark that small blocks not init yet */
327 	}
328 	if (nbytes == 0)
329 		return (NULL);
330 
331 	if (nbytes <= maxfast && !nosmall) {
332 		/*
333 		 * We can allocate out of a holding block
334 		 */
335 		struct holdblk *holdblk; /* head of right sized queue */
336 		struct lblk *lblk;	 /* pointer to a little block */
337 		struct holdblk *newhold;
338 
339 		if (!change) {
340 			int i;
341 			/*
342 			 * This allocates space for hold block
343 			 * pointers by calling malloc recursively.
344 			 * Maxfast is temporarily set to 0, to
345 			 * avoid infinite recursion.  allocate
346 			 * space for an extra ptr so that an index
347 			 * is just ->blksz/grain, with the first
348 			 * ptr unused.
349 			 */
350 			change = 1;	/* change to algorithm params */
351 					/* no longer allowed */
352 			/*
353 			 * temporarily alter maxfast, to avoid
354 			 * infinite recursion
355 			 */
356 			maxfast = 0;
357 			holdhead = (struct holdblk **)
358 			    malloc_unlocked(sizeof (struct holdblk *) *
359 			    (fastct + 1), 0);
360 			if (holdhead == NULL)
361 				return (malloc_unlocked(nbytes, 0));
362 			for (i = 1; i <= fastct; i++) {
363 				holdhead[i] = HGROUND;
364 			}
365 			maxfast = fastct * grain;
366 		}
367 		/*
368 		 * Note that this uses the absolute min header size (MINHEAD)
369 		 * unlike the large block case which uses minhead
370 		 *
371 		 * round up to nearest multiple of grain
372 		 * code assumes grain is a multiple of MINHEAD
373 		 */
374 		/* round up to grain */
375 		nb = (nbytes + grain - 1) / grain * grain;
376 		holdblk = holdhead[nb / grain];
377 		nb = nb + MINHEAD;
378 		/*
379 		 * look for space in the holding block.  Blocks with
380 		 * space will be in front of those without
381 		 */
382 		if ((holdblk != HGROUND) && (holdblk->lfreeq != LGROUND))  {
383 			/* there is space */
384 			lblk = holdblk->lfreeq;
385 
386 			/*
387 			 * Now make lfreeq point to a free block.
388 			 * If lblk has been previously allocated and
389 			 * freed, it has a valid pointer to use.
390 			 * Otherwise, lblk is at the beginning of
391 			 * the unallocated blocks at the end of
392 			 * the holding block, so, if there is room, take
393 			 * the next space.  If not, mark holdblk full,
394 			 * and move holdblk to the end of the queue
395 			 */
396 			if (lblk < holdblk->unused) {
397 				/* move to next holdblk, if this one full */
398 				if ((holdblk->lfreeq =
399 				    CLRSMAL(lblk->header.nextfree)) ==
400 				    LGROUND) {
401 					holdhead[(nb-MINHEAD) / grain] =
402 					    holdblk->nexthblk;
403 				}
404 			} else if (((char *)holdblk->unused + nb) <
405 			    ((char *)holdblk + HOLDSZ(nb))) {
406 				holdblk->unused = (struct lblk *)
407 				    ((char *)holdblk->unused+nb);
408 				holdblk->lfreeq = holdblk->unused;
409 			} else {
410 				holdblk->unused = (struct lblk *)
411 				    ((char *)holdblk->unused+nb);
412 				holdblk->lfreeq = LGROUND;
413 				holdhead[(nb-MINHEAD)/grain] =
414 				    holdblk->nexthblk;
415 			}
416 			/* mark as busy and small */
417 			lblk->header.holder = (struct holdblk *)SETALL(holdblk);
418 		} else {
419 			/* we need a new holding block */
420 			newhold = (struct holdblk *)
421 			    malloc_unlocked(HOLDSZ(nb), 0);
422 			if ((char *)newhold == NULL) {
423 				return (NULL);
424 			}
425 			/* add to head of free queue */
426 			if (holdblk != HGROUND) {
427 				newhold->nexthblk = holdblk;
428 				newhold->prevhblk = holdblk->prevhblk;
429 				holdblk->prevhblk = newhold;
430 				newhold->prevhblk->nexthblk = newhold;
431 			} else {
432 				newhold->nexthblk = newhold->prevhblk = newhold;
433 			}
434 			holdhead[(nb-MINHEAD)/grain] = newhold;
435 			/* set up newhold */
436 			lblk = (struct lblk *)(newhold->space);
437 			newhold->lfreeq = newhold->unused =
438 			    (struct lblk *)((char *)newhold->space+nb);
439 			lblk->header.holder = (struct holdblk *)SETALL(newhold);
440 			newhold->blksz = nb-MINHEAD;
441 		}
442 #ifdef debug
443 		assert(((struct holdblk *)CLRALL(lblk->header.holder))->blksz >=
444 		    nbytes);
445 #endif /* debug */
446 		return ((char *)lblk + MINHEAD);
447 	} else {
448 		/*
449 		 * We need an ordinary block
450 		 */
451 		struct header *newblk;	/* used for creating a block */
452 
453 		/* get number of bytes we need */
454 		nb = nbytes + minhead;
455 		nb = (nb + ALIGNSZ - 1) / ALIGNSZ * ALIGNSZ;	/* align */
456 		nb = (nb > MINBLKSZ) ? nb : MINBLKSZ;
457 		/*
458 		 * see if there is a big enough block
459 		 * If none exists, you will get to freeptr[1].
460 		 * freeptr[1].next = &arena[0], so when you do the test,
461 		 * the result is a large positive number, since arena[0]
462 		 * comes before all blocks.  Arena[0] is marked busy so
463 		 * that it will not be compacted.  This kludge is for the
464 		 * sake of the almighty efficiency.
465 		 */
466 		/* check that a very large request won't cause an inf. loop */
467 
468 		if ((freeptr[1].nextblk-&(freeptr[1])) < nb) {
469 			return (NULL);
470 		} else {
471 			struct header *next;		/* following block */
472 			struct header *nextnext;	/* block after next */
473 
474 			blk = freeptr;
475 			do {
476 				blk = blk->nextfree;
477 				/* see if we can compact */
478 				next = blk->nextblk;
479 				if (!TESTBUSY(nextnext = next->nextblk)) {
480 					do {
481 						DELFREEQ(next);
482 						next = nextnext;
483 						nextnext = next->nextblk;
484 					} while (!TESTBUSY(nextnext));
485 					/*
486 					 * next will be at most == to lastblk,
487 					 * but I think the >= test is faster
488 					 */
489 					if (next >= arenaend)
490 						lastblk = blk;
491 					blk->nextblk = next;
492 				}
493 			} while (((char *)(next) - (char *)blk) < nb);
494 		}
495 		/*
496 		 * if we didn't find a block, get more memory
497 		 */
498 		if (blk == &(freeptr[1])) {
499 			/*
500 			 * careful coding could likely replace
501 			 * newend with arenaend
502 			 */
503 			struct header *newend;	/* new end of arena */
504 			ssize_t nget;	/* number of words to get */
505 
506 			/*
507 			 * Three cases - 1. There is space between arenaend
508 			 *		    and the break value that will become
509 			 *		    a permanently allocated block.
510 			 *		 2. Case 1 is not true, and the last
511 			 *		    block is allocated.
512 			 *		 3. Case 1 is not true, and the last
513 			 *		    block is free
514 			 */
515 			if ((newblk = (struct header *)sbrk(0)) !=
516 			    (struct header *)((char *)arenaend + HEADSZ)) {
517 				/* case 1 */
518 #ifdef debug
519 				if (case1count++ > 0)
520 				    (void) write(2, "Case 1 hit more that once."
521 					" brk or sbrk?\n", 41);
522 #endif
523 				/* get size to fetch */
524 				nget = nb + HEADSZ;
525 				/* round up to a block */
526 				nget = (nget + BLOCKSZ - 1)/BLOCKSZ * BLOCKSZ;
527 				assert((uintptr_t)newblk % ALIGNSZ == 0);
528 				/* get memory */
529 				if (morecore(nget) == (void *)-1)
530 					return (NULL);
531 				/* add to arena */
532 				newend = (struct header *)((char *)newblk + nget
533 				    - HEADSZ);
534 				assert((uintptr_t)newblk % ALIGNSZ == 0);
535 				newend->nextblk = SETBUSY(&(arena[1]));
536 /* ???  newblk ?? */
537 				newblk->nextblk = newend;
538 
539 				/*
540 				 * space becomes a permanently allocated block.
541 				 * This is likely not mt-safe as lock is not
542 				 * shared with brk or sbrk
543 				 */
544 				arenaend->nextblk = SETBUSY(newblk);
545 				/* adjust other pointers */
546 				arenaend = newend;
547 				lastblk = newblk;
548 				blk = newblk;
549 			} else if (TESTBUSY(lastblk->nextblk)) {
550 				/* case 2 */
551 				nget = (nb + BLOCKSZ - 1) / BLOCKSZ * BLOCKSZ;
552 				if (morecore(nget) == (void *)-1)
553 					return (NULL);
554 				/* block must be word aligned */
555 				assert(((uintptr_t)newblk%ALIGNSZ) == 0);
556 				/*
557 				 * stub at old arenaend becomes first word
558 				 * in blk
559 				 */
560 /* ???  	newblk = arenaend; */
561 
562 				newend =
563 				    (struct header *)((char *)arenaend+nget);
564 				newend->nextblk = SETBUSY(&(arena[1]));
565 				arenaend->nextblk = newend;
566 				lastblk = blk = arenaend;
567 				arenaend = newend;
568 			} else {
569 				/* case 3 */
570 				/*
571 				 * last block in arena is at end of memory and
572 				 * is free
573 				 */
574 				/* 1.7 had this backward without cast */
575 				nget = nb -
576 				    ((char *)arenaend - (char *)lastblk);
577 				nget = (nget + (BLOCKSZ - 1)) /
578 				    BLOCKSZ * BLOCKSZ;
579 				assert(((uintptr_t)newblk % ALIGNSZ) == 0);
580 				if (morecore(nget) == (void *)-1)
581 					return (NULL);
582 				/* combine with last block, put in arena */
583 				newend = (struct header *)
584 				    ((char *)arenaend + nget);
585 				arenaend = lastblk->nextblk = newend;
586 				newend->nextblk = SETBUSY(&(arena[1]));
587 				/* set which block to use */
588 				blk = lastblk;
589 				DELFREEQ(blk);
590 			}
591 		} else {
592 			struct header *nblk;	/* next block */
593 
594 			/* take block found of free queue */
595 			DELFREEQ(blk);
596 			/*
597 			 * make head of free queue immediately follow blk,
598 			 * unless blk was at the end of the queue
599 			 */
600 			nblk = blk->nextfree;
601 			if (nblk != &(freeptr[1])) {
602 				MOVEHEAD(nblk);
603 			}
604 		}
605 		/* blk now points to an adequate block */
606 		if (((char *)blk->nextblk - (char *)blk) - nb >= MINBLKSZ) {
607 			/* carve out the right size block */
608 			/* newblk will be the remainder */
609 			newblk = (struct header *)((char *)blk + nb);
610 			newblk->nextblk = blk->nextblk;
611 			/* mark the block busy */
612 			blk->nextblk = SETBUSY(newblk);
613 			ADDFREEQ(newblk);
614 			/* if blk was lastblk, make newblk lastblk */
615 			if (blk == lastblk)
616 				lastblk = newblk;
617 		} else {
618 			/* just mark the block busy */
619 			blk->nextblk = SETBUSY(blk->nextblk);
620 		}
621 	}
622 	CHECKQ
623 	assert((char *)CLRALL(blk->nextblk) -
624 	    ((char *)blk + minhead) >= nbytes);
625 	assert((char *)CLRALL(blk->nextblk) -
626 	    ((char *)blk + minhead) < nbytes + MINBLKSZ);
627 	return ((char *)blk + minhead);
628 }
629 
630 /*
631  * free(ptr) - free block that user thinks starts at ptr
632  *
633  *	input - ptr-1 contains the block header.
634  *		If the header points forward, we have a normal
635  *			block pointing to the next block
636  *		if the header points backward, we have a small
637  *			block from a holding block.
638  *		In both cases, the busy bit must be set
639  */
640 
641 void
642 free(void *ptr)
643 {
644 	(void) mutex_lock(&mlock);
645 	free_unlocked(ptr);
646 	(void) mutex_unlock(&mlock);
647 }
648 
649 /*
650  * free_unlocked(ptr) - Do the real work for free()
651  */
652 
653 void
654 free_unlocked(void *ptr)
655 {
656 	struct holdblk *holdblk;	/* block holding blk */
657 	struct holdblk *oldhead;	/* former head of the hold block */
658 					/* queue containing blk's holder */
659 
660 	if (ptr == NULL)
661 		return;
662 	if (TESTSMAL(((struct header *)((char *)ptr - MINHEAD))->nextblk)) {
663 		struct lblk	*lblk;	/* pointer to freed block */
664 		ssize_t		offset;	/* choice of header lists */
665 
666 		lblk = (struct lblk *)CLRBUSY((char *)ptr - MINHEAD);
667 		assert((struct header *)lblk < arenaend);
668 		assert((struct header *)lblk > arena);
669 		/* allow twits (e.g. awk) to free a block twice */
670 		holdblk = lblk->header.holder;
671 		if (!TESTBUSY(holdblk))
672 			return;
673 		holdblk = (struct holdblk *)CLRALL(holdblk);
674 		/* put lblk on its hold block's free list */
675 		lblk->header.nextfree = SETSMAL(holdblk->lfreeq);
676 		holdblk->lfreeq = lblk;
677 		/* move holdblk to head of queue, if its not already there */
678 		offset = holdblk->blksz / grain;
679 		oldhead = holdhead[offset];
680 		if (oldhead != holdblk) {
681 			/* first take out of current spot */
682 			holdhead[offset] = holdblk;
683 			holdblk->nexthblk->prevhblk = holdblk->prevhblk;
684 			holdblk->prevhblk->nexthblk = holdblk->nexthblk;
685 			/* now add at front */
686 			holdblk->nexthblk = oldhead;
687 			holdblk->prevhblk = oldhead->prevhblk;
688 			oldhead->prevhblk = holdblk;
689 			holdblk->prevhblk->nexthblk = holdblk;
690 		}
691 	} else {
692 		struct header *blk;	/* real start of block */
693 		struct header *next;	/* next = blk->nextblk */
694 		struct header *nextnext;	/* block after next */
695 
696 		blk = (struct header *)((char *)ptr - minhead);
697 		next = blk->nextblk;
698 		/* take care of twits (e.g. awk) who return blocks twice */
699 		if (!TESTBUSY(next))
700 			return;
701 		blk->nextblk = next = CLRBUSY(next);
702 		ADDFREEQ(blk);
703 		/* see if we can compact */
704 		if (!TESTBUSY(nextnext = next->nextblk)) {
705 			do {
706 				DELFREEQ(next);
707 				next = nextnext;
708 			} while (!TESTBUSY(nextnext = next->nextblk));
709 			if (next == arenaend) lastblk = blk;
710 			blk->nextblk = next;
711 		}
712 	}
713 	CHECKQ
714 }
715 
716 
717 /*
718  * realloc(ptr, size) - give the user a block of size "size", with
719  *			    the contents pointed to by ptr.  Free ptr.
720  */
721 
722 void *
723 realloc(void *ptr, size_t size)
724 {
725 	void	*retval;
726 
727 	(void) mutex_lock(&mlock);
728 	retval = realloc_unlocked(ptr, size);
729 	(void) mutex_unlock(&mlock);
730 	return (retval);
731 }
732 
733 
734 /*
735  * realloc_unlocked(ptr) - Do the real work for realloc()
736  */
737 
738 static void *
739 realloc_unlocked(void *ptr, size_t size)
740 {
741 	struct header *blk;	/* block ptr is contained in */
742 	size_t trusize;	/* block size as allocater sees it */
743 	char *newptr;			/* pointer to user's new block */
744 	size_t cpysize;	/* amount to copy */
745 	struct header *next;	/* block after blk */
746 
747 	if (ptr == NULL)
748 		return (malloc_unlocked(size, 0));
749 
750 	if (size == 0) {
751 		free_unlocked(ptr);
752 		return (NULL);
753 	}
754 
755 	if (TESTSMAL(((struct lblk *)((char *)ptr - MINHEAD))->
756 	    header.holder)) {
757 		/*
758 		 * we have a special small block which can't be expanded
759 		 *
760 		 * This makes the assumption that even if the user is
761 		 * reallocating a free block, malloc doesn't alter the contents
762 		 * of small blocks
763 		 */
764 		newptr = malloc_unlocked(size, 0);
765 		if (newptr == NULL)
766 			return (NULL);
767 		/* this isn't to save time--its to protect the twits */
768 		if ((char *)ptr != newptr) {
769 			struct lblk *lblk;
770 			lblk = (struct lblk *)((char *)ptr - MINHEAD);
771 			cpysize = ((struct holdblk *)
772 			    CLRALL(lblk->header.holder))->blksz;
773 			cpysize = (size > cpysize) ? cpysize : size;
774 			(void) memcpy(newptr, ptr, cpysize);
775 			free_unlocked(ptr);
776 		}
777 	} else {
778 		blk = (struct header *)((char *)ptr - minhead);
779 		next = blk->nextblk;
780 		/*
781 		 * deal with twits who reallocate free blocks
782 		 *
783 		 * if they haven't reset minblk via getopt, that's
784 		 * their problem
785 		 */
786 		if (!TESTBUSY(next)) {
787 			DELFREEQ(blk);
788 			blk->nextblk = SETBUSY(next);
789 		}
790 		next = CLRBUSY(next);
791 		/* make blk as big as possible */
792 		if (!TESTBUSY(next->nextblk)) {
793 			do {
794 				DELFREEQ(next);
795 				next = next->nextblk;
796 			} while (!TESTBUSY(next->nextblk));
797 			blk->nextblk = SETBUSY(next);
798 			if (next >= arenaend) lastblk = blk;
799 		}
800 		/* get size we really need */
801 		trusize = size+minhead;
802 		trusize = (trusize + ALIGNSZ - 1)/ALIGNSZ*ALIGNSZ;
803 		trusize = (trusize >= MINBLKSZ) ? trusize : MINBLKSZ;
804 		/* see if we have enough */
805 		/* this isn't really the copy size, but I need a register */
806 		cpysize = (char *)next - (char *)blk;
807 		if (cpysize >= trusize) {
808 			/* carve out the size we need */
809 			struct header *newblk;	/* remainder */
810 
811 			if (cpysize - trusize >= MINBLKSZ) {
812 				/*
813 				 * carve out the right size block
814 				 * newblk will be the remainder
815 				 */
816 				newblk = (struct header *)((char *)blk +
817 				    trusize);
818 				newblk->nextblk = next;
819 				blk->nextblk = SETBUSY(newblk);
820 				/* at this point, next is invalid */
821 				ADDFREEQ(newblk);
822 				/* if blk was lastblk, make newblk lastblk */
823 				if (blk == lastblk)
824 					lastblk = newblk;
825 			}
826 			newptr = ptr;
827 		} else {
828 			/* bite the bullet, and call malloc */
829 			cpysize = (size > cpysize) ? cpysize : size;
830 			newptr = malloc_unlocked(size, 0);
831 			if (newptr == NULL)
832 				return (NULL);
833 			(void) memcpy(newptr, ptr, cpysize);
834 			free_unlocked(ptr);
835 		}
836 	}
837 	return (newptr);
838 }
839 
840 
841 /* LINTLIBRARY */
842 /*
843  * calloc - allocate and clear memory block
844  */
845 
846 void *
847 calloc(size_t num, size_t size)
848 {
849 	char *mp;
850 
851 	num *= size;
852 	mp = malloc(num);
853 	if (mp == NULL)
854 		return (NULL);
855 	(void) memset(mp, 0, num);
856 	return (mp);
857 }
858 
859 
860 /*
861  * Mallopt - set options for allocation
862  *
863  *	Mallopt provides for control over the allocation algorithm.
864  *	The cmds available are:
865  *
866  *	M_MXFAST Set maxfast to value.  Maxfast is the size of the
867  *		 largest small, quickly allocated block.  Maxfast
868  *		 may be set to 0 to disable fast allocation entirely.
869  *
870  *	M_NLBLKS Set numlblks to value.  Numlblks is the number of
871  *		 small blocks per holding block.  Value must be
872  *		 greater than 0.
873  *
874  *	M_GRAIN  Set grain to value.  The sizes of all blocks
875  *		 smaller than maxfast are considered to be rounded
876  *		 up to the nearest multiple of grain. The default
877  *		 value of grain is the smallest number of bytes
878  *		 which will allow alignment of any data type.    Grain
879  *		 will be rounded up to a multiple of its default,
880  *		 and maxsize will be rounded up to a multiple of
881  *		 grain.  Value must be greater than 0.
882  *
883  *	M_KEEP   Retain data in freed block until the next malloc,
884  *		 realloc, or calloc.  Value is ignored.
885  *		 This option is provided only for compatibility with
886  *		 the old version of malloc, and is not recommended.
887  *
888  *	returns - 0, upon successful completion
889  *		 1, if malloc has previously been called or
890  *		    if value or cmd have illegal values
891  */
892 
893 int
894 mallopt(int cmd, int value)
895 {
896 	/* disallow changes once a small block is allocated */
897 	(void) mutex_lock(&mlock);
898 	if (change) {
899 		(void) mutex_unlock(&mlock);
900 		return (1);
901 	}
902 	switch (cmd) {
903 	case M_MXFAST:
904 		if (value < 0) {
905 			(void) mutex_unlock(&mlock);
906 			return (1);
907 		}
908 		fastct = (value + grain - 1) / grain;
909 		maxfast = grain*fastct;
910 		break;
911 	case M_NLBLKS:
912 		if (value <= 1) {
913 			(void) mutex_unlock(&mlock);
914 			return (1);
915 		}
916 		numlblks = value;
917 		break;
918 	case M_GRAIN:
919 		if (value <= 0) {
920 			(void) mutex_unlock(&mlock);
921 			return (1);
922 		}
923 
924 		/* round grain up to a multiple of ALIGNSZ */
925 		grain = (value + ALIGNSZ - 1)/ALIGNSZ*ALIGNSZ;
926 
927 		/* reduce fastct appropriately */
928 		fastct = (maxfast + grain - 1) / grain;
929 		maxfast = grain * fastct;
930 		break;
931 	case M_KEEP:
932 		if (change && holdhead != NULL) {
933 			mutex_unlock(&mlock);
934 			return (1);
935 		}
936 		minhead = HEADSZ;
937 		break;
938 	default:
939 		(void) mutex_unlock(&mlock);
940 		return (1);
941 	}
942 	(void) mutex_unlock(&mlock);
943 	return (0);
944 }
945 
946 /*
947  * mallinfo-provide information about space usage
948  *
949  *	input - max; mallinfo will return the size of the
950  *		largest block < max.
951  *
952  *	output - a structure containing a description of
953  *		 of space usage, defined in malloc.h
954  */
955 
956 struct mallinfo
957 mallinfo(void)
958 {
959 	struct header *blk, *next;	/* ptr to ordinary blocks */
960 	struct holdblk *hblk;		/* ptr to holding blocks */
961 	struct mallinfo inf;		/* return value */
962 	int	i;			/* the ubiquitous counter */
963 	ssize_t size;			/* size of a block */
964 	ssize_t fsp;			/* free space in 1 hold block */
965 
966 	(void) mutex_lock(&mlock);
967 	(void) memset(&inf, 0, sizeof (struct mallinfo));
968 	if (freeptr[0].nextfree == GROUND) {
969 		(void) mutex_unlock(&mlock);
970 		return (inf);
971 	}
972 	blk = CLRBUSY(arena[1].nextblk);
973 	/* return total space used */
974 	inf.arena = (char *)arenaend - (char *)blk;
975 
976 	/*
977 	 * loop through arena, counting # of blocks, and
978 	 * and space used by blocks
979 	 */
980 	next = CLRBUSY(blk->nextblk);
981 	while (next != &(arena[1])) {
982 		inf.ordblks++;
983 		size = (char *)next - (char *)blk;
984 		if (TESTBUSY(blk->nextblk)) {
985 			inf.uordblks += size;
986 			inf.keepcost += HEADSZ-MINHEAD;
987 		} else {
988 			inf.fordblks += size;
989 		}
990 		blk = next;
991 		next = CLRBUSY(blk->nextblk);
992 	}
993 
994 	/*
995 	 * if any holding block have been allocated
996 	 * then examine space in holding blks
997 	 */
998 	if (change && holdhead != NULL) {
999 		for (i = fastct; i > 0; i--) {	/* loop thru ea. chain */
1000 			hblk = holdhead[i];
1001 			/* do only if chain not empty */
1002 			if (hblk != HGROUND) {
1003 				size = hblk->blksz +
1004 				    sizeof (struct lblk) - sizeof (int);
1005 				do {	/* loop thru 1 hold blk chain */
1006 					inf.hblks++;
1007 					fsp = freespace(hblk);
1008 					inf.fsmblks += fsp;
1009 					inf.usmblks += numlblks*size - fsp;
1010 					inf.smblks += numlblks;
1011 					hblk = hblk->nexthblk;
1012 				} while (hblk != holdhead[i]);
1013 			}
1014 		}
1015 	}
1016 	inf.hblkhd = (inf.smblks / numlblks) * sizeof (struct holdblk);
1017 	/* holding block were counted in ordblks, so subtract off */
1018 	inf.ordblks -= inf.hblks;
1019 	inf.uordblks -= inf.hblkhd + inf.usmblks + inf.fsmblks;
1020 	inf.keepcost -= inf.hblks*(HEADSZ - MINHEAD);
1021 	(void) mutex_unlock(&mlock);
1022 	return (inf);
1023 }
1024 
1025 
1026 /*
1027  * freespace - calc. how much space is used in the free
1028  *		    small blocks in a given holding block
1029  *
1030  *	input - hblk = given holding block
1031  *
1032  *	returns space used in free small blocks of hblk
1033  */
1034 
1035 static ssize_t
1036 freespace(struct holdblk *holdblk)
1037 {
1038 	struct lblk *lblk;
1039 	ssize_t space = 0;
1040 	ssize_t size;
1041 	struct lblk *unused;
1042 
1043 	lblk = CLRSMAL(holdblk->lfreeq);
1044 	size = holdblk->blksz + sizeof (struct lblk) - sizeof (int);
1045 	unused = CLRSMAL(holdblk->unused);
1046 	/* follow free chain */
1047 	while ((lblk != LGROUND) && (lblk != unused)) {
1048 		space += size;
1049 		lblk = CLRSMAL(lblk->header.nextfree);
1050 	}
1051 	space += ((char *)holdblk + HOLDSZ(size)) - (char *)unused;
1052 	return (space);
1053 }
1054 
1055 static void *
1056 morecore(size_t bytes)
1057 {
1058 	void * ret;
1059 
1060 	if (bytes > LONG_MAX) {
1061 		intptr_t wad;
1062 		/*
1063 		 * The request size is too big. We need to do this in
1064 		 * chunks. Sbrk only takes an int for an arg.
1065 		 */
1066 		if (bytes == ULONG_MAX)
1067 			return ((void *)-1);
1068 
1069 		ret = sbrk(0);
1070 		wad = LONG_MAX;
1071 		while (wad > 0) {
1072 			if (sbrk(wad) == (void *)-1) {
1073 				if (ret != sbrk(0))
1074 					(void) sbrk(-LONG_MAX);
1075 				return ((void *)-1);
1076 			}
1077 			bytes -= LONG_MAX;
1078 			wad = bytes;
1079 		}
1080 	} else
1081 		ret = sbrk(bytes);
1082 
1083 	return (ret);
1084 }
1085 
1086 #ifdef debug
1087 int
1088 check_arena(void)
1089 {
1090 	struct header *blk, *prev, *next;	/* ptr to ordinary blocks */
1091 
1092 	(void) mutex_lock(&mlock);
1093 	if (freeptr[0].nextfree == GROUND) {
1094 		(void) mutex_unlock(&mlock);
1095 		return (-1);
1096 	}
1097 	blk = arena + 1;
1098 
1099 	/* loop through arena, checking */
1100 	blk = (struct header *)CLRALL(blk->nextblk);
1101 	next = (struct header *)CLRALL(blk->nextblk);
1102 	while (next != arena + 1) {
1103 		assert(blk >= arena + 1);
1104 		assert(blk <= lastblk);
1105 		assert(next >= blk + 1);
1106 		assert(((uintptr_t)((struct header *)blk->nextblk) &
1107 		    (4 | SMAL)) == 0);
1108 
1109 		if (TESTBUSY(blk->nextblk) == 0) {
1110 			assert(blk->nextfree >= freeptr);
1111 			assert(blk->prevfree >= freeptr);
1112 			assert(blk->nextfree <= lastblk);
1113 			assert(blk->prevfree <= lastblk);
1114 			assert(((uintptr_t)((struct header *)blk->nextfree) &
1115 			    7) == 0);
1116 			assert(((uintptr_t)((struct header *)blk->prevfree) &
1117 			    7) == 0 || blk->prevfree == freeptr);
1118 		}
1119 		blk = next;
1120 		next = CLRBUSY(blk->nextblk);
1121 	}
1122 	(void) mutex_unlock(&mlock);
1123 	return (0);
1124 }
1125 
1126 #define	RSTALLOC	1
1127 #endif
1128 
1129 #ifdef RSTALLOC
1130 /*
1131  * rstalloc - reset alloc routines
1132  *
1133  *	description -	return allocated memory and reset
1134  *			allocation pointers.
1135  *
1136  *	Warning - This is for debugging purposes only.
1137  *		  It will return all memory allocated after
1138  *		  the first call to malloc, even if some
1139  *		  of it was fetched by a user's sbrk().
1140  */
1141 
1142 void
1143 rstalloc(void)
1144 {
1145 	(void) mutex_lock(&mlock);
1146 	minhead = MINHEAD;
1147 	grain = ALIGNSZ;
1148 	numlblks = NUMLBLKS;
1149 	fastct = FASTCT;
1150 	maxfast = MAXFAST;
1151 	change = 0;
1152 	if (freeptr[0].nextfree == GROUND) {
1153 		(void) mutex_unlock(&mlock);
1154 		return;
1155 	}
1156 	brk(CLRBUSY(arena[1].nextblk));
1157 	freeptr[0].nextfree = GROUND;
1158 #ifdef debug
1159 	case1count = 0;
1160 #endif
1161 	(void) mutex_unlock(&mlock);
1162 }
1163 #endif	/* RSTALLOC */
1164 
1165 /*
1166  * cfree is an undocumented, obsolete function
1167  */
1168 
1169 /* ARGSUSED1 */
1170 void
1171 cfree(void *p, size_t num, size_t size)
1172 {
1173 	free(p);
1174 }
1175 
1176 static void
1177 malloc_prepare()
1178 {
1179 	(void) mutex_lock(&mlock);
1180 }
1181 
1182 static void
1183 malloc_release()
1184 {
1185 	(void) mutex_unlock(&mlock);
1186 }
1187 
1188 #pragma init(malloc_init)
1189 static void
1190 malloc_init(void)
1191 {
1192 	(void) pthread_atfork(malloc_prepare, malloc_release, malloc_release);
1193 }
1194