1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /*
23 * Copyright 2008 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 /* Copyright (c) 1988 AT&T */
28 /* All Rights Reserved */
29
30 /*
31 * Memory management: malloc(), realloc(), free().
32 *
33 * The following #-parameters may be redefined:
34 * SEGMENTED: if defined, memory requests are assumed to be
35 * non-contiguous across calls of GETCORE's.
36 * GETCORE: a function to get more core memory. If not SEGMENTED,
37 * GETCORE(0) is assumed to return the next available
38 * address. Default is 'sbrk'.
39 * ERRCORE: the error code as returned by GETCORE.
40 * Default is (char *)(-1).
41 * CORESIZE: a desired unit (measured in bytes) to be used
42 * with GETCORE. Default is (1024*ALIGN).
43 *
44 * This algorithm is based on a best fit strategy with lists of
45 * free elts maintained in a self-adjusting binary tree. Each list
46 * contains all elts of the same size. The tree is ordered by size.
47 * For results on self-adjusting trees, see the paper:
48 * Self-Adjusting Binary Trees,
49 * DD Sleator & RE Tarjan, JACM 1985.
50 *
51 * The header of a block contains the size of the data part in bytes.
52 * Since the size of a block is 0%4, the low two bits of the header
53 * are free and used as follows:
54 *
55 * BIT0: 1 for busy (block is in use), 0 for free.
56 * BIT1: if the block is busy, this bit is 1 if the
57 * preceding block in contiguous memory is free.
58 * Otherwise, it is always 0.
59 */
60
61 #include "lint.h"
62 #include "mallint.h"
63 #include "mtlib.h"
64
65 /*
66 * Some abusers of the system (notably java1.2) acquire __malloc_lock
67 * in order to prevent threads from holding it while they are being
68 * suspended via thr_suspend() or thr_suspend_allmutators().
69 * This never worked when alternate malloc() libraries were used
70 * because they don't use __malloc_lock for their locking strategy.
71 * We leave __malloc_lock as an external variable to satisfy these
72 * old programs, but we define a new lock, private to libc, to do the
73 * real locking: libc_malloc_lock. This puts libc's malloc() package
74 * on the same footing as all other malloc packages.
75 */
76 mutex_t __malloc_lock = DEFAULTMUTEX;
77 mutex_t libc_malloc_lock = DEFAULTMUTEX;
78
79 static TREE *Root, /* root of the free tree */
80 *Bottom, /* the last free chunk in the arena */
81 *_morecore(size_t); /* function to get more core */
82
83 static char *Baddr; /* current high address of the arena */
84 static char *Lfree; /* last freed block with data intact */
85
86 static void t_delete(TREE *);
87 static void t_splay(TREE *);
88 static void realfree(void *);
89 static void cleanfree(void *);
90 static void *_malloc_unlocked(size_t);
91
92 #define FREESIZE (1<<5) /* size for preserving free blocks until next malloc */
93 #define FREEMASK FREESIZE-1
94
95 static void *flist[FREESIZE]; /* list of blocks to be freed on next malloc */
96 static int freeidx; /* index of free blocks in flist % FREESIZE */
97
98 /*
99 * Interfaces used only by atfork_init() functions.
100 */
101 void
malloc_locks(void)102 malloc_locks(void)
103 {
104 (void) mutex_lock(&libc_malloc_lock);
105 }
106
107 void
malloc_unlocks(void)108 malloc_unlocks(void)
109 {
110 (void) mutex_unlock(&libc_malloc_lock);
111 }
112
113 /*
114 * Allocation of small blocks
115 */
116 static TREE *List[MINSIZE/WORDSIZE-1]; /* lists of small blocks */
117
118 static void *
_smalloc(size_t size)119 _smalloc(size_t size)
120 {
121 TREE *tp;
122 size_t i;
123
124 ASSERT(size % WORDSIZE == 0);
125 /* want to return a unique pointer on malloc(0) */
126 if (size == 0)
127 size = WORDSIZE;
128
129 /* list to use */
130 i = size / WORDSIZE - 1;
131
132 if (List[i] == NULL) {
133 TREE *np;
134 int n;
135 /* number of blocks to get at one time */
136 #define NPS (WORDSIZE*8)
137 ASSERT((size + WORDSIZE) * NPS >= MINSIZE);
138
139 /* get NPS of these block types */
140 if ((List[i] = _malloc_unlocked((size + WORDSIZE) * NPS)) == 0)
141 return (0);
142
143 /* make them into a link list */
144 for (n = 0, np = List[i]; n < NPS; ++n) {
145 tp = np;
146 SIZE(tp) = size;
147 np = NEXT(tp);
148 AFTER(tp) = np;
149 }
150 AFTER(tp) = NULL;
151 }
152
153 /* allocate from the head of the queue */
154 tp = List[i];
155 List[i] = AFTER(tp);
156 SETBIT0(SIZE(tp));
157 return (DATA(tp));
158 }
159
160 void *
malloc(size_t size)161 malloc(size_t size)
162 {
163 void *ret;
164
165 if (!primary_link_map) {
166 errno = ENOTSUP;
167 return (NULL);
168 }
169 assert_no_libc_locks_held();
170 (void) mutex_lock(&libc_malloc_lock);
171 ret = _malloc_unlocked(size);
172 (void) mutex_unlock(&libc_malloc_lock);
173 return (ret);
174 }
175
176 static void *
_malloc_unlocked(size_t size)177 _malloc_unlocked(size_t size)
178 {
179 size_t n;
180 TREE *tp, *sp;
181 size_t o_bit1;
182
183 COUNT(nmalloc);
184 ASSERT(WORDSIZE == ALIGN);
185
186 /* check for size that could overflow calculations */
187 if (size > MAX_MALLOC) {
188 errno = ENOMEM;
189 return (NULL);
190 }
191
192 /* make sure that size is 0 mod ALIGN */
193 ROUND(size);
194
195 /* see if the last free block can be used */
196 if (Lfree) {
197 sp = BLOCK(Lfree);
198 n = SIZE(sp);
199 CLRBITS01(n);
200 if (n == size) {
201 /*
202 * exact match, use it as is
203 */
204 freeidx = (freeidx + FREESIZE - 1) &
205 FREEMASK; /* 1 back */
206 flist[freeidx] = Lfree = NULL;
207 return (DATA(sp));
208 } else if (size >= MINSIZE && n > size) {
209 /*
210 * got a big enough piece
211 */
212 freeidx = (freeidx + FREESIZE - 1) &
213 FREEMASK; /* 1 back */
214 flist[freeidx] = Lfree = NULL;
215 o_bit1 = SIZE(sp) & BIT1;
216 SIZE(sp) = n;
217 goto leftover;
218 }
219 }
220 o_bit1 = 0;
221
222 /* perform free's of space since last malloc */
223 cleanfree(NULL);
224
225 /* small blocks */
226 if (size < MINSIZE)
227 return (_smalloc(size));
228
229 /* search for an elt of the right size */
230 sp = NULL;
231 n = 0;
232 if (Root) {
233 tp = Root;
234 for (;;) {
235 /* branch left */
236 if (SIZE(tp) >= size) {
237 if (n == 0 || n >= SIZE(tp)) {
238 sp = tp;
239 n = SIZE(tp);
240 }
241 if (LEFT(tp))
242 tp = LEFT(tp);
243 else
244 break;
245 } else { /* branch right */
246 if (RIGHT(tp))
247 tp = RIGHT(tp);
248 else
249 break;
250 }
251 }
252
253 if (sp) {
254 t_delete(sp);
255 } else if (tp != Root) {
256 /* make the searched-to element the root */
257 t_splay(tp);
258 Root = tp;
259 }
260 }
261
262 /* if found none fitted in the tree */
263 if (!sp) {
264 if (Bottom && size <= SIZE(Bottom)) {
265 sp = Bottom;
266 CLRBITS01(SIZE(sp));
267 } else if ((sp = _morecore(size)) == NULL) /* no more memory */
268 return (NULL);
269 }
270
271 /* tell the forward neighbor that we're busy */
272 CLRBIT1(SIZE(NEXT(sp)));
273
274 ASSERT(ISBIT0(SIZE(NEXT(sp))));
275
276 leftover:
277 /* if the leftover is enough for a new free piece */
278 if ((n = (SIZE(sp) - size)) >= MINSIZE + WORDSIZE) {
279 n -= WORDSIZE;
280 SIZE(sp) = size;
281 tp = NEXT(sp);
282 SIZE(tp) = n|BIT0;
283 realfree(DATA(tp));
284 } else if (BOTTOM(sp))
285 Bottom = NULL;
286
287 /* return the allocated space */
288 SIZE(sp) |= BIT0 | o_bit1;
289 return (DATA(sp));
290 }
291
292
293 /*
294 * realloc().
295 *
296 * If the block size is increasing, we try forward merging first.
297 * This is not best-fit but it avoids some data recopying.
298 */
299 void *
realloc(void * old,size_t size)300 realloc(void *old, size_t size)
301 {
302 TREE *tp, *np;
303 size_t ts;
304 char *new;
305
306 if (!primary_link_map) {
307 errno = ENOTSUP;
308 return (NULL);
309 }
310
311 assert_no_libc_locks_held();
312 COUNT(nrealloc);
313
314 /* check for size that could overflow calculations */
315 if (size > MAX_MALLOC) {
316 errno = ENOMEM;
317 return (NULL);
318 }
319
320 /* pointer to the block */
321 (void) mutex_lock(&libc_malloc_lock);
322 if (old == NULL) {
323 new = _malloc_unlocked(size);
324 (void) mutex_unlock(&libc_malloc_lock);
325 return (new);
326 }
327
328 /* perform free's of space since last malloc */
329 cleanfree(old);
330
331 /* make sure that size is 0 mod ALIGN */
332 ROUND(size);
333
334 tp = BLOCK(old);
335 ts = SIZE(tp);
336
337 /* if the block was freed, data has been destroyed. */
338 if (!ISBIT0(ts)) {
339 (void) mutex_unlock(&libc_malloc_lock);
340 return (NULL);
341 }
342
343 /* nothing to do */
344 CLRBITS01(SIZE(tp));
345 if (size == SIZE(tp)) {
346 SIZE(tp) = ts;
347 (void) mutex_unlock(&libc_malloc_lock);
348 return (old);
349 }
350
351 /* special cases involving small blocks */
352 if (size < MINSIZE || SIZE(tp) < MINSIZE) {
353 /* free is size is zero */
354 if (size == 0) {
355 SETOLD01(SIZE(tp), ts);
356 _free_unlocked(old);
357 (void) mutex_unlock(&libc_malloc_lock);
358 return (NULL);
359 } else {
360 goto call_malloc;
361 }
362 }
363
364 /* block is increasing in size, try merging the next block */
365 if (size > SIZE(tp)) {
366 np = NEXT(tp);
367 if (!ISBIT0(SIZE(np))) {
368 ASSERT(SIZE(np) >= MINSIZE);
369 ASSERT(!ISBIT1(SIZE(np)));
370 SIZE(tp) += SIZE(np) + WORDSIZE;
371 if (np != Bottom)
372 t_delete(np);
373 else
374 Bottom = NULL;
375 CLRBIT1(SIZE(NEXT(np)));
376 }
377
378 #ifndef SEGMENTED
379 /* not enough & at TRUE end of memory, try extending core */
380 if (size > SIZE(tp) && BOTTOM(tp) && GETCORE(0) == Baddr) {
381 Bottom = tp;
382 if ((tp = _morecore(size)) == NULL) {
383 tp = Bottom;
384 Bottom = NULL;
385 }
386 }
387 #endif
388 }
389
390 /* got enough space to use */
391 if (size <= SIZE(tp)) {
392 size_t n;
393
394 chop_big:
395 if ((n = (SIZE(tp) - size)) >= MINSIZE + WORDSIZE) {
396 n -= WORDSIZE;
397 SIZE(tp) = size;
398 np = NEXT(tp);
399 SIZE(np) = n|BIT0;
400 realfree(DATA(np));
401 } else if (BOTTOM(tp))
402 Bottom = NULL;
403
404 /* the previous block may be free */
405 SETOLD01(SIZE(tp), ts);
406 (void) mutex_unlock(&libc_malloc_lock);
407 return (old);
408 }
409
410 /* call malloc to get a new block */
411 call_malloc:
412 SETOLD01(SIZE(tp), ts);
413 if ((new = _malloc_unlocked(size)) != NULL) {
414 CLRBITS01(ts);
415 if (ts > size)
416 ts = size;
417 MEMCOPY(new, old, ts);
418 _free_unlocked(old);
419 (void) mutex_unlock(&libc_malloc_lock);
420 return (new);
421 }
422
423 /*
424 * Attempt special case recovery allocations since malloc() failed:
425 *
426 * 1. size <= SIZE(tp) < MINSIZE
427 * Simply return the existing block
428 * 2. SIZE(tp) < size < MINSIZE
429 * malloc() may have failed to allocate the chunk of
430 * small blocks. Try asking for MINSIZE bytes.
431 * 3. size < MINSIZE <= SIZE(tp)
432 * malloc() may have failed as with 2. Change to
433 * MINSIZE allocation which is taken from the beginning
434 * of the current block.
435 * 4. MINSIZE <= SIZE(tp) < size
436 * If the previous block is free and the combination of
437 * these two blocks has at least size bytes, then merge
438 * the two blocks copying the existing contents backwards.
439 */
440 CLRBITS01(SIZE(tp));
441 if (SIZE(tp) < MINSIZE) {
442 if (size < SIZE(tp)) { /* case 1. */
443 SETOLD01(SIZE(tp), ts);
444 (void) mutex_unlock(&libc_malloc_lock);
445 return (old);
446 } else if (size < MINSIZE) { /* case 2. */
447 size = MINSIZE;
448 goto call_malloc;
449 }
450 } else if (size < MINSIZE) { /* case 3. */
451 size = MINSIZE;
452 goto chop_big;
453 } else if (ISBIT1(ts) &&
454 (SIZE(np = LAST(tp)) + SIZE(tp) + WORDSIZE) >= size) {
455 ASSERT(!ISBIT0(SIZE(np)));
456 t_delete(np);
457 SIZE(np) += SIZE(tp) + WORDSIZE;
458 /*
459 * Since the copy may overlap, use memmove() if available.
460 * Otherwise, copy by hand.
461 */
462 (void) memmove(DATA(np), old, SIZE(tp));
463 old = DATA(np);
464 tp = np;
465 CLRBIT1(ts);
466 goto chop_big;
467 }
468 SETOLD01(SIZE(tp), ts);
469 (void) mutex_unlock(&libc_malloc_lock);
470 return (NULL);
471 }
472
473
474 /*
475 * realfree().
476 *
477 * Coalescing of adjacent free blocks is done first.
478 * Then, the new free block is leaf-inserted into the free tree
479 * without splaying. This strategy does not guarantee the amortized
480 * O(nlogn) behaviour for the insert/delete/find set of operations
481 * on the tree. In practice, however, free is much more infrequent
482 * than malloc/realloc and the tree searches performed by these
483 * functions adequately keep the tree in balance.
484 */
485 static void
realfree(void * old)486 realfree(void *old)
487 {
488 TREE *tp, *sp, *np;
489 size_t ts, size;
490
491 COUNT(nfree);
492
493 /* pointer to the block */
494 tp = BLOCK(old);
495 ts = SIZE(tp);
496 if (!ISBIT0(ts))
497 return;
498 CLRBITS01(SIZE(tp));
499
500 /* small block, put it in the right linked list */
501 if (SIZE(tp) < MINSIZE) {
502 ASSERT(SIZE(tp) / WORDSIZE >= 1);
503 ts = SIZE(tp) / WORDSIZE - 1;
504 AFTER(tp) = List[ts];
505 List[ts] = tp;
506 return;
507 }
508
509 /* see if coalescing with next block is warranted */
510 np = NEXT(tp);
511 if (!ISBIT0(SIZE(np))) {
512 if (np != Bottom)
513 t_delete(np);
514 SIZE(tp) += SIZE(np) + WORDSIZE;
515 }
516
517 /* the same with the preceding block */
518 if (ISBIT1(ts)) {
519 np = LAST(tp);
520 ASSERT(!ISBIT0(SIZE(np)));
521 ASSERT(np != Bottom);
522 t_delete(np);
523 SIZE(np) += SIZE(tp) + WORDSIZE;
524 tp = np;
525 }
526
527 /* initialize tree info */
528 PARENT(tp) = LEFT(tp) = RIGHT(tp) = LINKFOR(tp) = NULL;
529
530 /* the last word of the block contains self's address */
531 *(SELFP(tp)) = tp;
532
533 /* set bottom block, or insert in the free tree */
534 if (BOTTOM(tp))
535 Bottom = tp;
536 else {
537 /* search for the place to insert */
538 if (Root) {
539 size = SIZE(tp);
540 np = Root;
541 for (;;) {
542 if (SIZE(np) > size) {
543 if (LEFT(np))
544 np = LEFT(np);
545 else {
546 LEFT(np) = tp;
547 PARENT(tp) = np;
548 break;
549 }
550 } else if (SIZE(np) < size) {
551 if (RIGHT(np))
552 np = RIGHT(np);
553 else {
554 RIGHT(np) = tp;
555 PARENT(tp) = np;
556 break;
557 }
558 } else {
559 if ((sp = PARENT(np)) != NULL) {
560 if (np == LEFT(sp))
561 LEFT(sp) = tp;
562 else
563 RIGHT(sp) = tp;
564 PARENT(tp) = sp;
565 } else
566 Root = tp;
567
568 /* insert to head of list */
569 if ((sp = LEFT(np)) != NULL)
570 PARENT(sp) = tp;
571 LEFT(tp) = sp;
572
573 if ((sp = RIGHT(np)) != NULL)
574 PARENT(sp) = tp;
575 RIGHT(tp) = sp;
576
577 /* doubly link list */
578 LINKFOR(tp) = np;
579 LINKBAK(np) = tp;
580 SETNOTREE(np);
581
582 break;
583 }
584 }
585 } else
586 Root = tp;
587 }
588
589 /* tell next block that this one is free */
590 SETBIT1(SIZE(NEXT(tp)));
591
592 ASSERT(ISBIT0(SIZE(NEXT(tp))));
593 }
594
595 /*
596 * Get more core. Gaps in memory are noted as busy blocks.
597 */
598 static TREE *
_morecore(size_t size)599 _morecore(size_t size)
600 {
601 TREE *tp;
602 ssize_t n, offset;
603 char *addr;
604 ssize_t nsize;
605
606 /* compute new amount of memory to get */
607 tp = Bottom;
608 n = (ssize_t)size + 2 * WORDSIZE;
609 addr = GETCORE(0);
610
611 if (addr == ERRCORE)
612 return (NULL);
613
614 /* need to pad size out so that addr is aligned */
615 if ((((uintptr_t)addr) % ALIGN) != 0)
616 offset = ALIGN - (uintptr_t)addr % ALIGN;
617 else
618 offset = 0;
619
620 #ifndef SEGMENTED
621 /* if not segmented memory, what we need may be smaller */
622 if (addr == Baddr) {
623 n -= WORDSIZE;
624 if (tp != NULL)
625 n -= SIZE(tp);
626 }
627 #endif
628
629 /* get a multiple of CORESIZE */
630 n = ((n - 1) / CORESIZE + 1) * CORESIZE;
631 nsize = n + offset;
632
633 /* check if nsize request could overflow in GETCORE */
634 if ((size_t)nsize > MAX_MALLOC - (uintptr_t)addr) {
635 errno = ENOMEM;
636 return (NULL);
637 }
638
639 if ((size_t)nsize <= MAX_GETCORE) {
640 if (GETCORE(nsize) == ERRCORE)
641 return (NULL);
642 } else {
643 intptr_t delta;
644 /*
645 * the value required is too big for GETCORE() to deal with
646 * in one go, so use GETCORE() at most 2 times instead.
647 * Argument to GETCORE() must be multiple of ALIGN.
648 * If not, GETCORE(-MAX_GETCORE) will not return brk point
649 * to previous value, but will be ALIGN more.
650 * This would leave a small hole.
651 */
652 delta = MAX_GETCORE;
653 while (delta > 0) {
654 if (GETCORE(delta) == ERRCORE) {
655 if (addr != GETCORE(0))
656 (void) GETCORE(-MAX_GETCORE);
657 return (NULL);
658 }
659 nsize -= MAX_GETCORE;
660 delta = nsize;
661 }
662 }
663
664 /* contiguous memory */
665 if (addr == Baddr) {
666 ASSERT(offset == 0);
667 if (tp) {
668 addr = (char *)tp;
669 n += SIZE(tp) + 2 * WORDSIZE;
670 } else {
671 addr = Baddr - WORDSIZE;
672 n += WORDSIZE;
673 }
674 } else
675 addr += offset;
676
677 /* new bottom address */
678 Baddr = addr + n;
679
680 /* new bottom block */
681 tp = (TREE *)(uintptr_t)addr;
682 SIZE(tp) = n - 2 * WORDSIZE;
683 ASSERT((SIZE(tp) % ALIGN) == 0);
684
685 /* reserved the last word to head any noncontiguous memory */
686 SETBIT0(SIZE(NEXT(tp)));
687
688 /* non-contiguous memory, free old bottom block */
689 if (Bottom && Bottom != tp) {
690 SETBIT0(SIZE(Bottom));
691 realfree(DATA(Bottom));
692 }
693
694 return (tp);
695 }
696
697
698 /*
699 * Tree rotation functions (BU: bottom-up, TD: top-down)
700 */
701
702 #define LEFT1(x, y) \
703 if ((RIGHT(x) = LEFT(y)) != NULL) PARENT(RIGHT(x)) = x;\
704 if ((PARENT(y) = PARENT(x)) != NULL)\
705 if (LEFT(PARENT(x)) == x) LEFT(PARENT(y)) = y;\
706 else RIGHT(PARENT(y)) = y;\
707 LEFT(y) = x; PARENT(x) = y
708
709 #define RIGHT1(x, y) \
710 if ((LEFT(x) = RIGHT(y)) != NULL) PARENT(LEFT(x)) = x;\
711 if ((PARENT(y) = PARENT(x)) != NULL)\
712 if (LEFT(PARENT(x)) == x) LEFT(PARENT(y)) = y;\
713 else RIGHT(PARENT(y)) = y;\
714 RIGHT(y) = x; PARENT(x) = y
715
716 #define BULEFT2(x, y, z) \
717 if ((RIGHT(x) = LEFT(y)) != NULL) PARENT(RIGHT(x)) = x;\
718 if ((RIGHT(y) = LEFT(z)) != NULL) PARENT(RIGHT(y)) = y;\
719 if ((PARENT(z) = PARENT(x)) != NULL)\
720 if (LEFT(PARENT(x)) == x) LEFT(PARENT(z)) = z;\
721 else RIGHT(PARENT(z)) = z;\
722 LEFT(z) = y; PARENT(y) = z; LEFT(y) = x; PARENT(x) = y
723
724 #define BURIGHT2(x, y, z) \
725 if ((LEFT(x) = RIGHT(y)) != NULL) PARENT(LEFT(x)) = x;\
726 if ((LEFT(y) = RIGHT(z)) != NULL) PARENT(LEFT(y)) = y;\
727 if ((PARENT(z) = PARENT(x)) != NULL)\
728 if (LEFT(PARENT(x)) == x) LEFT(PARENT(z)) = z;\
729 else RIGHT(PARENT(z)) = z;\
730 RIGHT(z) = y; PARENT(y) = z; RIGHT(y) = x; PARENT(x) = y
731
732 #define TDLEFT2(x, y, z) \
733 if ((RIGHT(y) = LEFT(z)) != NULL) PARENT(RIGHT(y)) = y;\
734 if ((PARENT(z) = PARENT(x)) != NULL)\
735 if (LEFT(PARENT(x)) == x) LEFT(PARENT(z)) = z;\
736 else RIGHT(PARENT(z)) = z;\
737 PARENT(x) = z; LEFT(z) = x;
738
739 #define TDRIGHT2(x, y, z) \
740 if ((LEFT(y) = RIGHT(z)) != NULL) PARENT(LEFT(y)) = y;\
741 if ((PARENT(z) = PARENT(x)) != NULL)\
742 if (LEFT(PARENT(x)) == x) LEFT(PARENT(z)) = z;\
743 else RIGHT(PARENT(z)) = z;\
744 PARENT(x) = z; RIGHT(z) = x;
745
746 /*
747 * Delete a tree element
748 */
749 static void
t_delete(TREE * op)750 t_delete(TREE *op)
751 {
752 TREE *tp, *sp, *gp;
753
754 /* if this is a non-tree node */
755 if (ISNOTREE(op)) {
756 tp = LINKBAK(op);
757 if ((sp = LINKFOR(op)) != NULL)
758 LINKBAK(sp) = tp;
759 LINKFOR(tp) = sp;
760 return;
761 }
762
763 /* make op the root of the tree */
764 if (PARENT(op))
765 t_splay(op);
766
767 /* if this is the start of a list */
768 if ((tp = LINKFOR(op)) != NULL) {
769 PARENT(tp) = NULL;
770 if ((sp = LEFT(op)) != NULL)
771 PARENT(sp) = tp;
772 LEFT(tp) = sp;
773
774 if ((sp = RIGHT(op)) != NULL)
775 PARENT(sp) = tp;
776 RIGHT(tp) = sp;
777
778 Root = tp;
779 return;
780 }
781
782 /* if op has a non-null left subtree */
783 if ((tp = LEFT(op)) != NULL) {
784 PARENT(tp) = NULL;
785
786 if (RIGHT(op)) {
787 /* make the right-end of the left subtree its root */
788 while ((sp = RIGHT(tp)) != NULL) {
789 if ((gp = RIGHT(sp)) != NULL) {
790 TDLEFT2(tp, sp, gp);
791 tp = gp;
792 } else {
793 LEFT1(tp, sp);
794 tp = sp;
795 }
796 }
797
798 /* hook the right subtree of op to the above elt */
799 RIGHT(tp) = RIGHT(op);
800 PARENT(RIGHT(tp)) = tp;
801 }
802 } else if ((tp = RIGHT(op)) != NULL) /* no left subtree */
803 PARENT(tp) = NULL;
804
805 Root = tp;
806 }
807
808 /*
809 * Bottom up splaying (simple version).
810 * The basic idea is to roughly cut in half the
811 * path from Root to tp and make tp the new root.
812 */
813 static void
t_splay(TREE * tp)814 t_splay(TREE *tp)
815 {
816 TREE *pp, *gp;
817
818 /* iterate until tp is the root */
819 while ((pp = PARENT(tp)) != NULL) {
820 /* grandparent of tp */
821 gp = PARENT(pp);
822
823 /* x is a left child */
824 if (LEFT(pp) == tp) {
825 if (gp && LEFT(gp) == pp) {
826 BURIGHT2(gp, pp, tp);
827 } else {
828 RIGHT1(pp, tp);
829 }
830 } else {
831 ASSERT(RIGHT(pp) == tp);
832 if (gp && RIGHT(gp) == pp) {
833 BULEFT2(gp, pp, tp);
834 } else {
835 LEFT1(pp, tp);
836 }
837 }
838 }
839 }
840
841
842 /*
843 * free().
844 * Performs a delayed free of the block pointed to
845 * by old. The pointer to old is saved on a list, flist,
846 * until the next malloc or realloc. At that time, all the
847 * blocks pointed to in flist are actually freed via
848 * realfree(). This allows the contents of free blocks to
849 * remain undisturbed until the next malloc or realloc.
850 */
851 void
free(void * old)852 free(void *old)
853 {
854 if (!primary_link_map) {
855 errno = ENOTSUP;
856 return;
857 }
858 assert_no_libc_locks_held();
859 (void) mutex_lock(&libc_malloc_lock);
860 _free_unlocked(old);
861 (void) mutex_unlock(&libc_malloc_lock);
862 }
863
864
865 void
_free_unlocked(void * old)866 _free_unlocked(void *old)
867 {
868 int i;
869
870 if (old == NULL)
871 return;
872
873 /*
874 * Make sure the same data block is not freed twice.
875 * 3 cases are checked. It returns immediately if either
876 * one of the conditions is true.
877 * 1. Last freed.
878 * 2. Not in use or freed already.
879 * 3. In the free list.
880 */
881 if (old == Lfree)
882 return;
883 if (!ISBIT0(SIZE(BLOCK(old))))
884 return;
885 for (i = 0; i < freeidx; i++)
886 if (old == flist[i])
887 return;
888
889 if (flist[freeidx] != NULL)
890 realfree(flist[freeidx]);
891 flist[freeidx] = Lfree = old;
892 freeidx = (freeidx + 1) & FREEMASK; /* one forward */
893 }
894
895 /*
896 * cleanfree() frees all the blocks pointed to be flist.
897 *
898 * realloc() should work if it is called with a pointer
899 * to a block that was freed since the last call to malloc() or
900 * realloc(). If cleanfree() is called from realloc(), ptr
901 * is set to the old block and that block should not be
902 * freed since it is actually being reallocated.
903 */
904 static void
cleanfree(void * ptr)905 cleanfree(void *ptr)
906 {
907 char **flp;
908
909 flp = (char **)&(flist[freeidx]);
910 for (;;) {
911 if (flp == (char **)&(flist[0]))
912 flp = (char **)&(flist[FREESIZE]);
913 if (*--flp == NULL)
914 break;
915 if (*flp != ptr)
916 realfree(*flp);
917 *flp = NULL;
918 }
919 freeidx = 0;
920 Lfree = NULL;
921 }
922