xref: /titanic_51/usr/src/lib/libc/port/gen/malloc.c (revision 7aec1d6e253b21f9e9b7ef68b4d81ab9859b51fe)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 2004 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*	Copyright (c) 1988 AT&T	*/
30 /*	  All Rights Reserved  	*/
31 
32 
33 /*
34  *	Memory management: malloc(), realloc(), free().
35  *
36  *	The following #-parameters may be redefined:
37  *	SEGMENTED: if defined, memory requests are assumed to be
38  *		non-contiguous across calls of GETCORE's.
39  *	GETCORE: a function to get more core memory. If not SEGMENTED,
40  *		GETCORE(0) is assumed to return the next available
41  *		address. Default is 'sbrk'.
42  *	ERRCORE: the error code as returned by GETCORE.
43  *		Default is (char *)(-1).
44  *	CORESIZE: a desired unit (measured in bytes) to be used
45  *		with GETCORE. Default is (1024*ALIGN).
46  *
47  *	This algorithm is based on a  best fit strategy with lists of
48  *	free elts maintained in a self-adjusting binary tree. Each list
49  *	contains all elts of the same size. The tree is ordered by size.
50  *	For results on self-adjusting trees, see the paper:
51  *		Self-Adjusting Binary Trees,
52  *		DD Sleator & RE Tarjan, JACM 1985.
53  *
54  *	The header of a block contains the size of the data part in bytes.
55  *	Since the size of a block is 0%4, the low two bits of the header
56  *	are free and used as follows:
57  *
58  *		BIT0:	1 for busy (block is in use), 0 for free.
59  *		BIT1:	if the block is busy, this bit is 1 if the
60  *			preceding block in contiguous memory is free.
61  *			Otherwise, it is always 0.
62  */
63 
64 #include "synonyms.h"
65 #include "mallint.h"
66 #include "mtlib.h"
67 
68 /*
69  * Some abusers of the system (notably java1.2) acquire __malloc_lock
70  * in order to prevent threads from holding it while they are being
71  * suspended via thr_suspend() or thr_suspend_allmutators().
72  * The new scheme of acquiring it via lmutex_lock() solves the same
73  * problem but causes these old programs to malfunction (acquiring
74  * a lock by both mutex_lock() and lmutex_lock() leads to fatal
75  * inconsistencies).  Therefore, we leave __malloc_lock as an external
76  * variable to satisfy these old programs, but we define a new lock,
77  * private to libc, to do the real locking: libc_malloc_lock
78  */
79 mutex_t __malloc_lock = DEFAULTMUTEX;
80 
81 mutex_t libc_malloc_lock = DEFAULTMUTEX;
82 
83 static TREE	*Root,		/* root of the free tree */
84 		*Bottom,	/* the last free chunk in the arena */
85 		*_morecore(size_t);	/* function to get more core */
86 
87 static char	*Baddr;		/* current high address of the arena */
88 static char	*Lfree;		/* last freed block with data intact */
89 
90 static void	t_delete(TREE *);
91 static void	t_splay(TREE *);
92 static void	realfree(void *);
93 static void	cleanfree(void *);
94 static void	*_malloc_unlocked(size_t);
95 
96 #define	FREESIZE (1<<5) /* size for preserving free blocks until next malloc */
97 #define	FREEMASK FREESIZE-1
98 
99 static void *flist[FREESIZE];	/* list of blocks to be freed on next malloc */
100 static int freeidx;		/* index of free blocks in flist % FREESIZE */
101 
102 /*
103  *	Allocation of small blocks
104  */
105 static TREE	*List[MINSIZE/WORDSIZE-1]; /* lists of small blocks */
106 
107 static void *
108 _smalloc(size_t size)
109 {
110 	TREE	*tp;
111 	size_t	i;
112 
113 	ASSERT(size % WORDSIZE == 0);
114 	/* want to return a unique pointer on malloc(0) */
115 	if (size == 0)
116 		size = WORDSIZE;
117 
118 	/* list to use */
119 	i = size / WORDSIZE - 1;
120 
121 	if (List[i] == NULL) {
122 		TREE *np;
123 		int n;
124 		/* number of blocks to get at one time */
125 #define	NPS (WORDSIZE*8)
126 		ASSERT((size + WORDSIZE) * NPS >= MINSIZE);
127 
128 		/* get NPS of these block types */
129 		if ((List[i] = _malloc_unlocked((size + WORDSIZE) * NPS)) == 0)
130 			return (0);
131 
132 		/* make them into a link list */
133 		for (n = 0, np = List[i]; n < NPS; ++n) {
134 			tp = np;
135 			SIZE(tp) = size;
136 			np = NEXT(tp);
137 			AFTER(tp) = np;
138 		}
139 		AFTER(tp) = NULL;
140 	}
141 
142 	/* allocate from the head of the queue */
143 	tp = List[i];
144 	List[i] = AFTER(tp);
145 	SETBIT0(SIZE(tp));
146 	return (DATA(tp));
147 }
148 
149 void *
150 malloc(size_t size)
151 {
152 	void *ret;
153 
154 	if (!primary_link_map) {
155 		errno = ENOTSUP;
156 		return (NULL);
157 	}
158 	assert_no_libc_locks_held();
159 	lmutex_lock(&libc_malloc_lock);
160 	ret = _malloc_unlocked(size);
161 	lmutex_unlock(&libc_malloc_lock);
162 	return (ret);
163 }
164 
165 static void *
166 _malloc_unlocked(size_t size)
167 {
168 	size_t	n;
169 	TREE	*tp, *sp;
170 	size_t	o_bit1;
171 
172 	COUNT(nmalloc);
173 	ASSERT(WORDSIZE == ALIGN);
174 
175 	/* check for size that could overflow calculations */
176 	if (size > MAX_MALLOC) {
177 		errno = ENOMEM;
178 		return (NULL);
179 	}
180 
181 	/* make sure that size is 0 mod ALIGN */
182 	ROUND(size);
183 
184 	/* see if the last free block can be used */
185 	if (Lfree) {
186 		sp = BLOCK(Lfree);
187 		n = SIZE(sp);
188 		CLRBITS01(n);
189 		if (n == size) {
190 			/*
191 			 * exact match, use it as is
192 			 */
193 			freeidx = (freeidx + FREESIZE - 1) &
194 				FREEMASK; /* 1 back */
195 			flist[freeidx] = Lfree = NULL;
196 			return (DATA(sp));
197 		} else if (size >= MINSIZE && n > size) {
198 			/*
199 			 * got a big enough piece
200 			 */
201 			freeidx = (freeidx + FREESIZE - 1) &
202 				FREEMASK; /* 1 back */
203 			flist[freeidx] = Lfree = NULL;
204 			o_bit1 = SIZE(sp) & BIT1;
205 			SIZE(sp) = n;
206 			goto leftover;
207 		}
208 	}
209 	o_bit1 = 0;
210 
211 	/* perform free's of space since last malloc */
212 	cleanfree(NULL);
213 
214 	/* small blocks */
215 	if (size < MINSIZE)
216 		return (_smalloc(size));
217 
218 	/* search for an elt of the right size */
219 	sp = NULL;
220 	n  = 0;
221 	if (Root) {
222 		tp = Root;
223 		for (;;) {
224 			/* branch left */
225 			if (SIZE(tp) >= size) {
226 				if (n == 0 || n >= SIZE(tp)) {
227 					sp = tp;
228 					n = SIZE(tp);
229 				}
230 				if (LEFT(tp))
231 					tp = LEFT(tp);
232 				else
233 					break;
234 			} else { /* branch right */
235 				if (RIGHT(tp))
236 					tp = RIGHT(tp);
237 				else
238 					break;
239 			}
240 		}
241 
242 		if (sp) {
243 			t_delete(sp);
244 		} else if (tp != Root) {
245 			/* make the searched-to element the root */
246 			t_splay(tp);
247 			Root = tp;
248 		}
249 	}
250 
251 	/* if found none fitted in the tree */
252 	if (!sp) {
253 		if (Bottom && size <= SIZE(Bottom)) {
254 			sp = Bottom;
255 			CLRBITS01(SIZE(sp));
256 		} else if ((sp = _morecore(size)) == NULL) /* no more memory */
257 			return (NULL);
258 	}
259 
260 	/* tell the forward neighbor that we're busy */
261 	CLRBIT1(SIZE(NEXT(sp)));
262 
263 	ASSERT(ISBIT0(SIZE(NEXT(sp))));
264 
265 leftover:
266 	/* if the leftover is enough for a new free piece */
267 	if ((n = (SIZE(sp) - size)) >= MINSIZE + WORDSIZE) {
268 		n -= WORDSIZE;
269 		SIZE(sp) = size;
270 		tp = NEXT(sp);
271 		SIZE(tp) = n|BIT0;
272 		realfree(DATA(tp));
273 	} else if (BOTTOM(sp))
274 		Bottom = NULL;
275 
276 	/* return the allocated space */
277 	SIZE(sp) |= BIT0 | o_bit1;
278 	return (DATA(sp));
279 }
280 
281 
282 /*
283  * realloc().
284  *
285  * If the block size is increasing, we try forward merging first.
286  * This is not best-fit but it avoids some data recopying.
287  */
288 void *
289 realloc(void *old, size_t size)
290 {
291 	TREE	*tp, *np;
292 	size_t	ts;
293 	char	*new;
294 
295 	if (!primary_link_map) {
296 		errno = ENOTSUP;
297 		return (NULL);
298 	}
299 
300 	assert_no_libc_locks_held();
301 	COUNT(nrealloc);
302 
303 	/* check for size that could overflow calculations */
304 	if (size > MAX_MALLOC) {
305 		errno = ENOMEM;
306 		return (NULL);
307 	}
308 
309 	/* pointer to the block */
310 	lmutex_lock(&libc_malloc_lock);
311 	if (old == NULL) {
312 		new = _malloc_unlocked(size);
313 		lmutex_unlock(&libc_malloc_lock);
314 		return (new);
315 	}
316 
317 	/* perform free's of space since last malloc */
318 	cleanfree(old);
319 
320 	/* make sure that size is 0 mod ALIGN */
321 	ROUND(size);
322 
323 	tp = BLOCK(old);
324 	ts = SIZE(tp);
325 
326 	/* if the block was freed, data has been destroyed. */
327 	if (!ISBIT0(ts)) {
328 		lmutex_unlock(&libc_malloc_lock);
329 		return (NULL);
330 	}
331 
332 	/* nothing to do */
333 	CLRBITS01(SIZE(tp));
334 	if (size == SIZE(tp)) {
335 		SIZE(tp) = ts;
336 		lmutex_unlock(&libc_malloc_lock);
337 		return (old);
338 	}
339 
340 	/* special cases involving small blocks */
341 	if (size < MINSIZE || SIZE(tp) < MINSIZE) {
342 		/* free is size is zero */
343 		if (size == 0) {
344 			SETOLD01(SIZE(tp), ts);
345 			_free_unlocked(old);
346 			lmutex_unlock(&libc_malloc_lock);
347 			return (NULL);
348 		} else {
349 			goto call_malloc;
350 		}
351 	}
352 
353 	/* block is increasing in size, try merging the next block */
354 	if (size > SIZE(tp)) {
355 		np = NEXT(tp);
356 		if (!ISBIT0(SIZE(np))) {
357 			ASSERT(SIZE(np) >= MINSIZE);
358 			ASSERT(!ISBIT1(SIZE(np)));
359 			SIZE(tp) += SIZE(np) + WORDSIZE;
360 			if (np != Bottom)
361 				t_delete(np);
362 			else
363 				Bottom = NULL;
364 			CLRBIT1(SIZE(NEXT(np)));
365 		}
366 
367 #ifndef SEGMENTED
368 		/* not enough & at TRUE end of memory, try extending core */
369 		if (size > SIZE(tp) && BOTTOM(tp) && GETCORE(0) == Baddr) {
370 			Bottom = tp;
371 			if ((tp = _morecore(size)) == NULL) {
372 				tp = Bottom;
373 				Bottom = NULL;
374 				}
375 		}
376 #endif
377 	}
378 
379 	/* got enough space to use */
380 	if (size <= SIZE(tp)) {
381 		size_t n;
382 
383 chop_big:
384 		if ((n = (SIZE(tp) - size)) >= MINSIZE + WORDSIZE) {
385 			n -= WORDSIZE;
386 			SIZE(tp) = size;
387 			np = NEXT(tp);
388 			SIZE(np) = n|BIT0;
389 			realfree(DATA(np));
390 		} else if (BOTTOM(tp))
391 			Bottom = NULL;
392 
393 		/* the previous block may be free */
394 		SETOLD01(SIZE(tp), ts);
395 		lmutex_unlock(&libc_malloc_lock);
396 		return (old);
397 	}
398 
399 	/* call malloc to get a new block */
400 call_malloc:
401 	SETOLD01(SIZE(tp), ts);
402 	if ((new = _malloc_unlocked(size)) != NULL) {
403 		CLRBITS01(ts);
404 		if (ts > size)
405 			ts = size;
406 		MEMCOPY(new, old, ts);
407 		_free_unlocked(old);
408 		lmutex_unlock(&libc_malloc_lock);
409 		return (new);
410 	}
411 
412 	/*
413 	 * Attempt special case recovery allocations since malloc() failed:
414 	 *
415 	 * 1. size <= SIZE(tp) < MINSIZE
416 	 *	Simply return the existing block
417 	 * 2. SIZE(tp) < size < MINSIZE
418 	 *	malloc() may have failed to allocate the chunk of
419 	 *	small blocks. Try asking for MINSIZE bytes.
420 	 * 3. size < MINSIZE <= SIZE(tp)
421 	 *	malloc() may have failed as with 2.  Change to
422 	 *	MINSIZE allocation which is taken from the beginning
423 	 *	of the current block.
424 	 * 4. MINSIZE <= SIZE(tp) < size
425 	 *	If the previous block is free and the combination of
426 	 *	these two blocks has at least size bytes, then merge
427 	 *	the two blocks copying the existing contents backwards.
428 	 */
429 	CLRBITS01(SIZE(tp));
430 	if (SIZE(tp) < MINSIZE) {
431 		if (size < SIZE(tp)) {			/* case 1. */
432 			SETOLD01(SIZE(tp), ts);
433 			lmutex_unlock(&libc_malloc_lock);
434 			return (old);
435 		} else if (size < MINSIZE) {		/* case 2. */
436 			size = MINSIZE;
437 			goto call_malloc;
438 		}
439 	} else if (size < MINSIZE) {			/* case 3. */
440 		size = MINSIZE;
441 		goto chop_big;
442 	} else if (ISBIT1(ts) &&
443 	    (SIZE(np = LAST(tp)) + SIZE(tp) + WORDSIZE) >= size) {
444 		ASSERT(!ISBIT0(SIZE(np)));
445 		t_delete(np);
446 		SIZE(np) += SIZE(tp) + WORDSIZE;
447 		/*
448 		 * Since the copy may overlap, use memmove() if available.
449 		 * Otherwise, copy by hand.
450 		 */
451 		(void) memmove(DATA(np), old, SIZE(tp));
452 		old = DATA(np);
453 		tp = np;
454 		CLRBIT1(ts);
455 		goto chop_big;
456 	}
457 	SETOLD01(SIZE(tp), ts);
458 	lmutex_unlock(&libc_malloc_lock);
459 	return (NULL);
460 }
461 
462 
463 /*
464  * realfree().
465  *
466  * Coalescing of adjacent free blocks is done first.
467  * Then, the new free block is leaf-inserted into the free tree
468  * without splaying. This strategy does not guarantee the amortized
469  * O(nlogn) behaviour for the insert/delete/find set of operations
470  * on the tree. In practice, however, free is much more infrequent
471  * than malloc/realloc and the tree searches performed by these
472  * functions adequately keep the tree in balance.
473  */
474 static void
475 realfree(void *old)
476 {
477 	TREE	*tp, *sp, *np;
478 	size_t	ts, size;
479 
480 	COUNT(nfree);
481 
482 	/* pointer to the block */
483 	tp = BLOCK(old);
484 	ts = SIZE(tp);
485 	if (!ISBIT0(ts))
486 		return;
487 	CLRBITS01(SIZE(tp));
488 
489 	/* small block, put it in the right linked list */
490 	if (SIZE(tp) < MINSIZE) {
491 		ASSERT(SIZE(tp) / WORDSIZE >= 1);
492 		ts = SIZE(tp) / WORDSIZE - 1;
493 		AFTER(tp) = List[ts];
494 		List[ts] = tp;
495 		return;
496 	}
497 
498 	/* see if coalescing with next block is warranted */
499 	np = NEXT(tp);
500 	if (!ISBIT0(SIZE(np))) {
501 		if (np != Bottom)
502 			t_delete(np);
503 		SIZE(tp) += SIZE(np) + WORDSIZE;
504 	}
505 
506 	/* the same with the preceding block */
507 	if (ISBIT1(ts)) {
508 		np = LAST(tp);
509 		ASSERT(!ISBIT0(SIZE(np)));
510 		ASSERT(np != Bottom);
511 		t_delete(np);
512 		SIZE(np) += SIZE(tp) + WORDSIZE;
513 		tp = np;
514 	}
515 
516 	/* initialize tree info */
517 	PARENT(tp) = LEFT(tp) = RIGHT(tp) = LINKFOR(tp) = NULL;
518 
519 	/* the last word of the block contains self's address */
520 	*(SELFP(tp)) = tp;
521 
522 	/* set bottom block, or insert in the free tree */
523 	if (BOTTOM(tp))
524 		Bottom = tp;
525 	else {
526 		/* search for the place to insert */
527 		if (Root) {
528 			size = SIZE(tp);
529 			np = Root;
530 			for (;;) {
531 				if (SIZE(np) > size) {
532 					if (LEFT(np))
533 						np = LEFT(np);
534 					else {
535 						LEFT(np) = tp;
536 						PARENT(tp) = np;
537 						break;
538 					}
539 				} else if (SIZE(np) < size) {
540 					if (RIGHT(np))
541 						np = RIGHT(np);
542 					else {
543 						RIGHT(np) = tp;
544 						PARENT(tp) = np;
545 						break;
546 					}
547 				} else {
548 					if ((sp = PARENT(np)) != NULL) {
549 						if (np == LEFT(sp))
550 							LEFT(sp) = tp;
551 						else
552 							RIGHT(sp) = tp;
553 						PARENT(tp) = sp;
554 					} else
555 						Root = tp;
556 
557 					/* insert to head of list */
558 					if ((sp = LEFT(np)) != NULL)
559 						PARENT(sp) = tp;
560 					LEFT(tp) = sp;
561 
562 					if ((sp = RIGHT(np)) != NULL)
563 						PARENT(sp) = tp;
564 					RIGHT(tp) = sp;
565 
566 					/* doubly link list */
567 					LINKFOR(tp) = np;
568 					LINKBAK(np) = tp;
569 					SETNOTREE(np);
570 
571 					break;
572 				}
573 			}
574 		} else
575 			Root = tp;
576 	}
577 
578 	/* tell next block that this one is free */
579 	SETBIT1(SIZE(NEXT(tp)));
580 
581 	ASSERT(ISBIT0(SIZE(NEXT(tp))));
582 }
583 
584 /*
585  * Get more core. Gaps in memory are noted as busy blocks.
586  */
587 static TREE *
588 _morecore(size_t size)
589 {
590 	TREE	*tp;
591 	ssize_t	n, offset;
592 	char	*addr;
593 	ssize_t	nsize;
594 
595 	/* compute new amount of memory to get */
596 	tp = Bottom;
597 	n = (ssize_t)size + 2 * WORDSIZE;
598 	addr = GETCORE(0);
599 
600 	if (addr == ERRCORE)
601 		return (NULL);
602 
603 	/* need to pad size out so that addr is aligned */
604 	if ((((uintptr_t)addr) % ALIGN) != 0)
605 		offset = ALIGN - (uintptr_t)addr % ALIGN;
606 	else
607 		offset = 0;
608 
609 #ifndef SEGMENTED
610 	/* if not segmented memory, what we need may be smaller */
611 	if (addr == Baddr) {
612 		n -= WORDSIZE;
613 		if (tp != NULL)
614 			n -= SIZE(tp);
615 	}
616 #endif
617 
618 	/* get a multiple of CORESIZE */
619 	n = ((n - 1) / CORESIZE + 1) * CORESIZE;
620 	nsize = n + offset;
621 
622 	/* check if nsize request could overflow in GETCORE */
623 	if ((size_t)nsize > MAX_MALLOC - (uintptr_t)addr) {
624 		errno = ENOMEM;
625 		return (NULL);
626 	}
627 
628 	if ((size_t)nsize <= MAX_GETCORE) {
629 		if (GETCORE(nsize) == ERRCORE)
630 			return (NULL);
631 	} else {
632 		intptr_t	delta;
633 		/*
634 		 * the value required is too big for GETCORE() to deal with
635 		 * in one go, so use GETCORE() at most 2 times instead.
636 		 * Argument to GETCORE() must be multiple of ALIGN.
637 		 * If not, GETCORE(-MAX_GETCORE) will not return brk point
638 		 * to previous value, but will be ALIGN more.
639 		 * This would leave a small hole.
640 		 */
641 		delta = MAX_GETCORE;
642 		while (delta > 0) {
643 			if (GETCORE(delta) == ERRCORE) {
644 				if (addr != GETCORE(0))
645 					(void) GETCORE(-MAX_GETCORE);
646 				return (NULL);
647 			}
648 			nsize -= MAX_GETCORE;
649 			delta = nsize;
650 		}
651 	}
652 
653 	/* contiguous memory */
654 	if (addr == Baddr) {
655 		ASSERT(offset == 0);
656 		if (tp) {
657 			addr = (char *)tp;
658 			n += SIZE(tp) + 2 * WORDSIZE;
659 		} else {
660 			addr = Baddr - WORDSIZE;
661 			n += WORDSIZE;
662 		}
663 	} else
664 		addr += offset;
665 
666 	/* new bottom address */
667 	Baddr = addr + n;
668 
669 	/* new bottom block */
670 	tp = (TREE *)(uintptr_t)addr;
671 	SIZE(tp) = n - 2 * WORDSIZE;
672 	ASSERT((SIZE(tp) % ALIGN) == 0);
673 
674 	/* reserved the last word to head any noncontiguous memory */
675 	SETBIT0(SIZE(NEXT(tp)));
676 
677 	/* non-contiguous memory, free old bottom block */
678 	if (Bottom && Bottom != tp) {
679 		SETBIT0(SIZE(Bottom));
680 		realfree(DATA(Bottom));
681 	}
682 
683 	return (tp);
684 }
685 
686 
687 /*
688  * Tree rotation functions (BU: bottom-up, TD: top-down)
689  */
690 
691 #define	LEFT1(x, y)		\
692 			if ((RIGHT(x) = LEFT(y)) != NULL) PARENT(RIGHT(x)) = x;\
693 			if ((PARENT(y) = PARENT(x)) != NULL)\
694 				if (LEFT(PARENT(x)) == x) LEFT(PARENT(y)) = y;\
695 				else RIGHT(PARENT(y)) = y;\
696 			LEFT(y) = x; PARENT(x) = y
697 
698 #define	RIGHT1(x, y)		\
699 			if ((LEFT(x) = RIGHT(y)) != NULL) PARENT(LEFT(x)) = x;\
700 			if ((PARENT(y) = PARENT(x)) != NULL)\
701 				if (LEFT(PARENT(x)) == x) LEFT(PARENT(y)) = y;\
702 				else RIGHT(PARENT(y)) = y;\
703 			RIGHT(y) = x; PARENT(x) = y
704 
705 #define	BULEFT2(x, y, z)	\
706 			if ((RIGHT(x) = LEFT(y)) != NULL) PARENT(RIGHT(x)) = x;\
707 			if ((RIGHT(y) = LEFT(z)) != NULL) PARENT(RIGHT(y)) = y;\
708 			if ((PARENT(z) = PARENT(x)) != NULL)\
709 				if (LEFT(PARENT(x)) == x) LEFT(PARENT(z)) = z;\
710 				else RIGHT(PARENT(z)) = z;\
711 			LEFT(z) = y; PARENT(y) = z; LEFT(y) = x; PARENT(x) = y
712 
713 #define	BURIGHT2(x, y, z)	\
714 			if ((LEFT(x) = RIGHT(y)) != NULL) PARENT(LEFT(x)) = x;\
715 			if ((LEFT(y) = RIGHT(z)) != NULL) PARENT(LEFT(y)) = y;\
716 			if ((PARENT(z) = PARENT(x)) != NULL)\
717 				if (LEFT(PARENT(x)) == x) LEFT(PARENT(z)) = z;\
718 				else RIGHT(PARENT(z)) = z;\
719 			RIGHT(z) = y; PARENT(y) = z; RIGHT(y) = x; PARENT(x) = y
720 
721 #define	TDLEFT2(x, y, z)	\
722 			if ((RIGHT(y) = LEFT(z)) != NULL) PARENT(RIGHT(y)) = y;\
723 			if ((PARENT(z) = PARENT(x)) != NULL)\
724 				if (LEFT(PARENT(x)) == x) LEFT(PARENT(z)) = z;\
725 				else RIGHT(PARENT(z)) = z;\
726 			PARENT(x) = z; LEFT(z) = x;
727 
728 #define	TDRIGHT2(x, y, z)	\
729 			if ((LEFT(y) = RIGHT(z)) != NULL) PARENT(LEFT(y)) = y;\
730 			if ((PARENT(z) = PARENT(x)) != NULL)\
731 				if (LEFT(PARENT(x)) == x) LEFT(PARENT(z)) = z;\
732 				else RIGHT(PARENT(z)) = z;\
733 			PARENT(x) = z; RIGHT(z) = x;
734 
735 /*
736  * Delete a tree element
737  */
738 static void
739 t_delete(TREE *op)
740 {
741 	TREE	*tp, *sp, *gp;
742 
743 	/* if this is a non-tree node */
744 	if (ISNOTREE(op)) {
745 		tp = LINKBAK(op);
746 		if ((sp = LINKFOR(op)) != NULL)
747 			LINKBAK(sp) = tp;
748 		LINKFOR(tp) = sp;
749 		return;
750 	}
751 
752 	/* make op the root of the tree */
753 	if (PARENT(op))
754 		t_splay(op);
755 
756 	/* if this is the start of a list */
757 	if ((tp = LINKFOR(op)) != NULL) {
758 		PARENT(tp) = NULL;
759 		if ((sp = LEFT(op)) != NULL)
760 			PARENT(sp) = tp;
761 		LEFT(tp) = sp;
762 
763 		if ((sp = RIGHT(op)) != NULL)
764 			PARENT(sp) = tp;
765 		RIGHT(tp) = sp;
766 
767 		Root = tp;
768 		return;
769 	}
770 
771 	/* if op has a non-null left subtree */
772 	if ((tp = LEFT(op)) != NULL) {
773 		PARENT(tp) = NULL;
774 
775 		if (RIGHT(op)) {
776 			/* make the right-end of the left subtree its root */
777 			while ((sp = RIGHT(tp)) != NULL) {
778 				if ((gp = RIGHT(sp)) != NULL) {
779 					TDLEFT2(tp, sp, gp);
780 					tp = gp;
781 				} else {
782 					LEFT1(tp, sp);
783 					tp = sp;
784 				}
785 			}
786 
787 			/* hook the right subtree of op to the above elt */
788 			RIGHT(tp) = RIGHT(op);
789 			PARENT(RIGHT(tp)) = tp;
790 		}
791 	} else if ((tp = RIGHT(op)) != NULL)	/* no left subtree */
792 		PARENT(tp) = NULL;
793 
794 	Root = tp;
795 }
796 
797 /*
798  * Bottom up splaying (simple version).
799  * The basic idea is to roughly cut in half the
800  * path from Root to tp and make tp the new root.
801  */
802 static void
803 t_splay(TREE *tp)
804 {
805 	TREE	*pp, *gp;
806 
807 	/* iterate until tp is the root */
808 	while ((pp = PARENT(tp)) != NULL) {
809 		/* grandparent of tp */
810 		gp = PARENT(pp);
811 
812 		/* x is a left child */
813 		if (LEFT(pp) == tp) {
814 			if (gp && LEFT(gp) == pp) {
815 				BURIGHT2(gp, pp, tp);
816 			} else {
817 				RIGHT1(pp, tp);
818 			}
819 		} else {
820 			ASSERT(RIGHT(pp) == tp);
821 			if (gp && RIGHT(gp) == pp) {
822 				BULEFT2(gp, pp, tp);
823 			} else {
824 				LEFT1(pp, tp);
825 			}
826 		}
827 	}
828 }
829 
830 
831 /*
832  *	free().
833  *	Performs a delayed free of the block pointed to
834  *	by old. The pointer to old is saved on a list, flist,
835  *	until the next malloc or realloc. At that time, all the
836  *	blocks pointed to in flist are actually freed via
837  *	realfree(). This allows the contents of free blocks to
838  *	remain undisturbed until the next malloc or realloc.
839  */
840 void
841 free(void *old)
842 {
843 	assert_no_libc_locks_held();
844 	lmutex_lock(&libc_malloc_lock);
845 	_free_unlocked(old);
846 	lmutex_unlock(&libc_malloc_lock);
847 }
848 
849 
850 void
851 _free_unlocked(void *old)
852 {
853 	int	i;
854 
855 	if (old == NULL || !primary_link_map)
856 		return;
857 
858 	/*
859 	 * Make sure the same data block is not freed twice.
860 	 * 3 cases are checked.  It returns immediately if either
861 	 * one of the conditions is true.
862 	 *	1. Last freed.
863 	 *	2. Not in use or freed already.
864 	 *	3. In the free list.
865 	 */
866 	if (old == Lfree)
867 		return;
868 	if (!ISBIT0(SIZE(BLOCK(old))))
869 		return;
870 	for (i = 0; i < freeidx; i++)
871 		if (old == flist[i])
872 			return;
873 
874 	if (flist[freeidx] != NULL)
875 		realfree(flist[freeidx]);
876 	flist[freeidx] = Lfree = old;
877 	freeidx = (freeidx + 1) & FREEMASK; /* one forward */
878 }
879 
880 /*
881  * cleanfree() frees all the blocks pointed to be flist.
882  *
883  * realloc() should work if it is called with a pointer
884  * to a block that was freed since the last call to malloc() or
885  * realloc(). If cleanfree() is called from realloc(), ptr
886  * is set to the old block and that block should not be
887  * freed since it is actually being reallocated.
888  */
889 static void
890 cleanfree(void *ptr)
891 {
892 	char	**flp;
893 
894 	flp = (char **)&(flist[freeidx]);
895 	for (;;) {
896 		if (flp == (char **)&(flist[0]))
897 			flp = (char **)&(flist[FREESIZE]);
898 		if (*--flp == NULL)
899 			break;
900 		if (*flp != ptr)
901 			realfree(*flp);
902 		*flp = NULL;
903 	}
904 	freeidx = 0;
905 	Lfree = NULL;
906 }
907