xref: /titanic_41/usr/src/lib/libbc/libc/gen/common/malloc.c (revision 5d54f3d8999eac1762fe0a8c7177d20f1f201fae)
1 /*
2  * CDDL HEADER START
3  *
4  * The contents of this file are subject to the terms of the
5  * Common Development and Distribution License, Version 1.0 only
6  * (the "License").  You may not use this file except in compliance
7  * with the License.
8  *
9  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10  * or http://www.opensolaris.org/os/licensing.
11  * See the License for the specific language governing permissions
12  * and limitations under the License.
13  *
14  * When distributing Covered Code, include this CDDL HEADER in each
15  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16  * If applicable, add the following below this CDDL HEADER, with the
17  * fields enclosed by brackets "[]" replaced with your own identifying
18  * information: Portions Copyright [yyyy] [name of copyright owner]
19  *
20  * CDDL HEADER END
21  */
22 /*
23  * Copyright 1986 Sun Microsystems, Inc.  All rights reserved.
24  * Use is subject to license terms.
25  */
26 
27 #pragma ident	"%Z%%M%	%I%	%E% SMI"
28 
29 /*
30  * file: malloc.c
31  * description:
32  *	Yet another memory allocator, this one based on a method
33  *	described in C.J. Stephenson, "Fast Fits"
34  *
35  *	The basic data structure is a "Cartesian" binary tree, in which
36  *	nodes are ordered by ascending addresses (thus minimizing free
37  *	list insertion time) and block sizes decrease with depth in the
38  *	tree (thus minimizing search time for a block of a given size).
39  *
40  *	In other words: for any node s, let D(s) denote the set of
41  *	descendents of s; for all x in D(left(s)) and all y in
42  *	D(right(s)), we have:
43  *
44  *	a. addr(x) <  addr(s) <  addr(y)
45  *	b. len(x)  <= len(s)  >= len(y)
46  */
47 
48 #include "mallint.h"
49 #include <errno.h>
50 #include <stdlib.h>
51 #include <stdarg.h>
52 
53 /* system interface */
54 
55 extern	char	*sbrk();
56 extern	int	getpagesize();
57 
58 static	int	nbpg = 0;	/* set by calling getpagesize() */
59 static	bool	morecore(uint);	/* get more memory into free space */
60 
61 #ifdef	S5EMUL
62 #define	ptr_t		void *	/* ANSI C says these are voids */
63 #define	free_t		void	/* ANSI says void free(ptr_t ptr) */
64 #define	free_return(x)	return
65 #else
66 #define	ptr_t		char *	/* BSD still (4.3) wants char*'s */
67 #define	free_t		int	/* BSD says int free(ptr_t ptr) */
68 #define	free_return(x)	return(x)
69 #endif
70 
71 /* SystemV-compatible information structure */
72 #define INIT_MXFAST 0
73 #define INIT_NLBLKS 100
74 #define INIT_GRAIN ALIGNSIZ
75 
76 struct	mallinfo __mallinfo = {
77 	0,0,0,0,0,0,0,0,0,0,			/* basic info */
78 	INIT_MXFAST, INIT_NLBLKS, INIT_GRAIN,	/* mallopt options */
79 	0,0,0
80 };
81 
82 /* heap data structures */
83 
84 Freehdr	_root	= NIL;			/* root of free space list */
85 char	*_lbound = NULL;		/* lower bound of heap */
86 char	*_ubound = NULL;		/* upper bound of heap */
87 
88 /* free header list management */
89 
90 static	Freehdr	getfreehdr(void);
91 static	void	putfreehdr(Freehdr);
92 static	Freehdr	freehdrptr = NIL;	/* ptr to block of available headers */
93 static	int	nfreehdrs = 0;		/* # of headers in current block */
94 static	Freehdr	freehdrlist = NIL;	/* List of available headers */
95 
96 /* error checking */
97 static void	error(char *, ...);
98 /* sets errno; prints msg and aborts if DEBUG is on */
99 
100 static int	reclaim(Dblk, uint, int);
101 
102 #ifdef	DEBUG	/*	>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> */
103 
104 int	malloc_debug(int);
105 int	malloc_verify(void);
106 static	int debug_level = 1;
107 
108 /*
109  * A block with a negative size, a size that is not a multiple
110  * of ALIGNSIZ, a size greater than the current extent of the
111  * heap, or a size which extends beyond the end of the heap is
112  * considered bad.
113  */
114 
115 #define badblksize(p,size)\
116 ( (size) < SMALLEST_BLK \
117 	|| (size) & (ALIGNSIZ-1) \
118 	|| (size) > heapsize() \
119 	|| ((char*)(p))+(size) > _ubound )
120 
121 #else	/* !DEBUG	================================================= */
122 
123 #define malloc_debug(level) 0
124 #define malloc_verify() 1
125 #define debug_level 0
126 #define badblksize(p,size) 0
127 
128 #endif	/* !DEBUG	<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
129 
130 
131 /*
132  * insert (newblk, len)
133  *	Inserts a new node in the free space tree, placing it
134  *	in the correct position with respect to the existing nodes.
135  *
136  * algorithm:
137  *	Starting from the root, a binary search is made for the new
138  *	node. If this search were allowed to continue, it would
139  *	eventually fail (since there cannot already be a node at the
140  *	given address); but in fact it stops when it reaches a node in
141  *	the tree which has a length less than that of the new node (or
142  *	when it reaches a null tree pointer).
143  *
144  *	The new node is then inserted at the root of the subtree for
145  *	which the shorter node forms the old root (or in place of the
146  *	null pointer).
147  *
148  * Arguments
149  *	newblk:	Ptr to the block to insert
150  *	len:	Length of new node
151  */
152 
153 static void
insert(Dblk newblk,uint len)154 insert(Dblk newblk, uint len)
155 {
156 	Freehdr *fpp;		/* Address of ptr to subtree */
157 	Freehdr x;
158 	Freehdr *left_hook;	/* Temp for insertion */
159 	Freehdr *right_hook;	/* Temp for insertion */
160 	Freehdr newhdr;
161 
162 	/*
163 	 * check for bad block size.
164 	 */
165 	if ( badblksize(newblk,len) ) {
166 		error("insert: bad block size (%d) at %#x\n", len, newblk);
167 		return;
168 	}
169 
170 	/*
171 	 * Search for the first node which has a weight less
172 	 *	than that of the new node; this will be the
173 	 *	point at which we insert the new node.
174 	 */
175 	fpp = &_root;
176 	x = *fpp;
177 	while (weight(x) >= len) {
178 		if (newblk < x->block)
179 			fpp = &x->left;
180 		else
181 			fpp = &x->right;
182 		x = *fpp;
183 	}
184 
185 	/*
186 	 * Perform root insertion. The variable x traces a path through
187 	 *	the fpp, and with the help of left_hook and right_hook,
188 	 *	rewrites all links that cross the territory occupied
189 	 *	by newblk.
190 	 */
191 
192 	if ((newhdr = getfreehdr()) == NIL) {
193 		/* Error message returned by getfreehdr() */
194 		return;
195 	}
196 	*fpp = newhdr;
197 
198 	newhdr->left = NIL;
199 	newhdr->right = NIL;
200 	newhdr->block = newblk;
201 	newhdr->size = len;
202 
203 	/*
204 	 * set length word in the block for consistency with the header.
205 	 */
206 
207 	newblk->size = len;
208 
209 	left_hook = &newhdr->left;
210 	right_hook = &newhdr->right;
211 
212 	while (x != NIL) {
213 		/*
214 		 * Remark:
215 		 *	The name 'left_hook' is somewhat confusing, since
216 		 *	it is always set to the address of a .right link
217 		 *	field.  However, its value is always an address
218 		 *	below (i.e., to the left of) newblk. Similarly
219 		 *	for right_hook. The values of left_hook and
220 		 *	right_hook converge toward the value of newblk,
221 		 *	as in a classical binary search.
222 		 */
223 		if (x->block < newblk) {
224 			/*
225 			 * rewrite link crossing from the left
226 			 */
227 			*left_hook = x;
228 			left_hook = &x->right;
229 			x = x->right;
230 		} else {
231 			/*
232 			 * rewrite link crossing from the right
233 			 */
234 			*right_hook = x;
235 			right_hook = &x->left;
236 			x = x->left;
237 		} /*else*/
238 	} /*while*/
239 
240 	*left_hook = *right_hook = NIL;		/* clear remaining hooks */
241 
242 } /*insert*/
243 
244 /*
245  * delete(p)
246  *	deletes a node from a cartesian tree. p is the address of
247  *	a pointer to the node which is to be deleted.
248  *
249  * algorithm:
250  *	The left and right branches of the node to be deleted define two
251  *	subtrees which are to be merged and attached in place of the
252  *	deleted node.  Each node on the inside edges of these two
253  *	subtrees is examined and longer nodes are placed above the
254  *	shorter ones.
255  *
256  * On entry:
257  *	*p is assumed to be non-null.
258  */
259 static void
delete(Freehdr * p)260 delete(Freehdr *p)
261 {
262 	Freehdr x;
263 	Freehdr left_branch;	/* left subtree of deleted node */
264 	Freehdr right_branch;	/* right subtree of deleted node */
265 	uint left_weight;
266 	uint right_weight;
267 
268 	x = *p;
269 	left_branch = x->left;
270 	left_weight = weight(left_branch);
271 	right_branch = x->right;
272 	right_weight = weight(right_branch);
273 
274 	while (left_branch != right_branch) {
275 		/*
276 		 * iterate until left branch and right branch are
277 		 * both NIL.
278 		 */
279 		if ( left_weight >= right_weight ) {
280 			/*
281 			 * promote the left branch
282 			 */
283 			if (left_branch != NIL) {
284 				if (left_weight == 0) {
285 					/* zero-length block */
286 					error("blocksize=0 at %#x\n",
287 						(int)left_branch->block->data);
288 					break;
289 				}
290 				*p = left_branch;
291 				p = &left_branch->right;
292 				left_branch = *p;
293 				left_weight = weight(left_branch);
294 			}
295 		} else {
296 			/*
297 			 * promote the right branch
298 			 */
299 			if (right_branch != NIL) {
300 				if (right_weight == 0) {
301 					/* zero-length block */
302 					error("blocksize=0 at %#x\n",
303 						(int)right_branch->block->data);
304 					break;
305 				}
306 				*p = right_branch;
307 				p = &right_branch->left;
308 				right_branch = *p;
309 				right_weight = weight(right_branch);
310 			}
311 		}/*else*/
312 	}/*while*/
313 	*p = NIL;
314 	putfreehdr(x);
315 } /*delete*/
316 
317 
318 /*
319  * demote(p)
320  *	Demotes a node in a cartesian tree, if necessary, to establish
321  *	the required vertical ordering.
322  *
323  * algorithm:
324  *	The left and right subtrees of the node to be demoted are to
325  *	be partially merged and attached in place of the demoted node.
326  *	The nodes on the inside edges of these two subtrees are
327  *	examined and the longer nodes are placed above the shorter
328  *	ones, until a node is reached which has a length no greater
329  *	than that of the node being demoted (or until a null pointer
330  *	is reached).  The node is then attached at this point, and
331  *	the remaining subtrees (if any) become its descendants.
332  *
333  * on entry:
334  *   a. All the nodes in the tree, including the one to be demoted,
335  *	must be correctly ordered horizontally;
336  *   b. All the nodes except the one to be demoted must also be
337  *	correctly positioned vertically.  The node to be demoted
338  *	may be already correctly positioned vertically, or it may
339  *	have a length which is less than that of one or both of
340  *	its progeny.
341  *   c. *p is non-null
342  */
343 
344 static void
demote(Freehdr * p)345 demote(Freehdr *p)
346 {
347 	Freehdr x;		/* addr of node to be demoted */
348 	Freehdr left_branch;
349 	Freehdr right_branch;
350 	uint	left_weight;
351 	uint	right_weight;
352 	uint	x_weight;
353 
354 	x = *p;
355 	x_weight = weight(x);
356 	left_branch = x->left;
357 	right_branch = x->right;
358 	left_weight = weight(left_branch);
359 	right_weight = weight(right_branch);
360 
361 	while (left_weight > x_weight || right_weight > x_weight) {
362 		/*
363 		 * select a descendant branch for promotion
364 		 */
365 		if (left_weight >= right_weight) {
366 			/*
367 			 * promote the left branch
368 			 */
369 			*p = left_branch;
370 			p = &left_branch->right;
371 			left_branch = *p;
372 			left_weight = weight(left_branch);
373 		} else {
374 			/*
375 			 * promote the right branch
376 			 */
377 			*p = right_branch;
378 			p = &right_branch->left;
379 			right_branch = *p;
380 			right_weight = weight(right_branch);
381 		} /*else*/
382 	} /*while*/
383 
384 	*p = x;				/* attach demoted node here */
385 	x->left = left_branch;
386 	x->right = right_branch;
387 
388 } /*demote*/
389 
390 
391 /*
392  * char*
393  * malloc(nbytes)
394  *	Allocates a block of length specified in bytes.  If nbytes is
395  *	zero, a valid pointer (that should not be dereferenced) is returned.
396  *
397  * algorithm:
398  *	The freelist is searched by descending the tree from the root
399  *	so that at each decision point the "better fitting" branch node
400  *	is chosen (i.e., the shorter one, if it is long enough, or
401  *	the longer one, otherwise).  The descent stops when both
402  *	branch nodes are too short.
403  *
404  * function result:
405  *	Malloc returns a pointer to the allocated block. A null
406  *	pointer indicates an error.
407  *
408  * diagnostics:
409  *
410  *	ENOMEM: storage could not be allocated.
411  *
412  *	EINVAL: either the argument was invalid, or the heap was found
413  *	to be in an inconsistent state.  More detailed information may
414  *	be obtained by enabling range checks (cf., malloc_debug()).
415  *
416  * Note: In this implementation, each allocated block includes a
417  *	length word, which occurs before the address seen by the caller.
418  *	Allocation requests are rounded up to a multiple of wordsize.
419  */
420 
421 ptr_t
malloc(uint nbytes)422 malloc(uint nbytes)
423 {
424 	Freehdr allocp;	/* ptr to node to be allocated */
425 	Freehdr *fpp;		/* for tree modifications */
426 	Freehdr left_branch;
427 	Freehdr right_branch;
428 	uint	 left_weight;
429 	uint	 right_weight;
430 	Dblk	 retblk;		/* block returned to the user */
431 
432 	/*
433 	 * if rigorous checking was requested, do it.
434 	 */
435 	if (debug_level >= 2) {
436 		malloc_verify();
437 	}
438 
439 	/*
440 	 * add the size of a length word to the request, and
441 	 * guarantee at least one word of usable data.
442 	 */
443 	nbytes += ALIGNSIZ;
444 	if (nbytes < SMALLEST_BLK) {
445 		nbytes = SMALLEST_BLK;
446 	} else {
447 		nbytes = roundup(nbytes, ALIGNSIZ);
448 	}
449 
450 	/*
451 	 * ensure that at least one block is big enough to satisfy
452 	 *	the request.
453 	 */
454 
455 	if (weight(_root) < nbytes) {
456 		/*
457 		 * the largest block is not enough.
458 		 */
459 		if(!morecore(nbytes))
460 			return 0;
461 	}
462 
463 	/*
464 	 * search down through the tree until a suitable block is
465 	 *	found.  At each decision point, select the better
466 	 *	fitting node.
467 	 */
468 
469 	fpp = &_root;
470 	allocp = *fpp;
471 	left_branch = allocp->left;
472 	right_branch = allocp->right;
473 	left_weight = weight(left_branch);
474 	right_weight = weight(right_branch);
475 
476 	while (left_weight >= nbytes || right_weight >= nbytes) {
477 		if (left_weight <= right_weight) {
478 			if (left_weight >= nbytes) {
479 				fpp = &allocp->left;
480 				allocp = left_branch;
481 			} else {
482 				fpp = &allocp->right;
483 				allocp = right_branch;
484 			}
485 		} else {
486 			if (right_weight >= nbytes) {
487 				fpp = &allocp->right;
488 				allocp = right_branch;
489 			} else {
490 				fpp = &allocp->left;
491 				allocp = left_branch;
492 			}
493 		}
494 		left_branch = allocp->left;
495 		right_branch = allocp->right;
496 		left_weight = weight(left_branch);
497 		right_weight = weight(right_branch);
498 	} /*while*/
499 
500 	/*
501 	 * allocate storage from the selected node.
502 	 */
503 
504 	if (allocp->size - nbytes <= SMALLEST_BLK) {
505 		/*
506 		 * not big enough to split; must leave at least
507 		 * a dblk's worth of space.
508 		 */
509 		retblk = allocp->block;
510 		delete(fpp);
511 	} else {
512 
513 		/*
514 		 * Split the selected block n bytes from the top. The
515 		 * n bytes at the top are returned to the caller; the
516 		 * remainder of the block goes back to free space.
517 		 */
518 		Dblk nblk;
519 
520 		retblk = allocp->block;
521 		nblk = nextblk(retblk, nbytes);		/* ^next block */
522 		nblk->size =  allocp->size = retblk->size - nbytes;
523 		__mallinfo.ordblks++;			/* count fragments */
524 
525 		/*
526 		 * Change the selected node to point at the newly split
527 		 * block, and move the node to its proper place in
528 		 * the free space list.
529 		 */
530 		allocp->block = nblk;
531 		demote(fpp);
532 
533 		/*
534 		 * set the length field of the allocated block; we need
535 		 * this because free() does not specify a length.
536 		 */
537 		retblk->size = nbytes;
538 	}
539 	/* maintain statistics */
540 	__mallinfo.uordbytes += retblk->size;		/* bytes allocated */
541 	__mallinfo.allocated++;				/* frags allocated */
542 	if (nbytes < __mallinfo.mxfast)
543 		__mallinfo.smblks++;	/* kludge to pass the SVVS */
544 
545 	return((ptr_t)retblk->data);
546 
547 } /*malloc*/
548 
549 /*
550  * free(p)
551  *	return a block to the free space tree.
552  *
553  * algorithm:
554  *	Starting at the root, search for and coalesce free blocks
555  *	adjacent to one given.  When the appropriate place in the
556  *	tree is found, insert the given block.
557  *
558  * Some sanity checks to avoid total confusion in the tree.
559  *	If the block has already been freed, return.
560  *	If the ptr is not from the sbrk'ed space, return.
561  *	If the block size is invalid, return.
562  */
563 free_t
free(ptr_t ptr)564 free(ptr_t ptr)
565 {
566 	uint 	 nbytes;	/* Size of node to be released */
567 	Freehdr *fpp;		/* For deletion from free list */
568 	Freehdr neighbor;	/* Node to be coalesced */
569 	Dblk	 neighbor_blk;	/* Ptr to potential neighbor */
570 	uint	 neighbor_size;	/* Size of potential neighbor */
571 	Dblk	 oldblk;	/* Ptr to block to be freed */
572 
573 	/*
574 	 * if rigorous checking was requested, do it.
575 	 */
576 	if (debug_level >= 2) {
577 		malloc_verify();
578 	}
579 
580 	/*
581 	 * Check the address of the old block.
582 	 */
583 	if ( misaligned(ptr) ) {
584 		error("free: illegal address (%#x)\n", ptr);
585 		free_return(0);
586 	}
587 
588 	/*
589 	 * Freeing something that wasn't allocated isn't
590 	 * exactly kosher, but fclose() does it routinely.
591 	 */
592 	if( ptr < (ptr_t)_lbound || ptr > (ptr_t)_ubound ) {
593 		errno = EINVAL;
594 		free_return(0);
595 	}
596 
597 	/*
598 	 * Get node length by backing up by the size of a header.
599 	 * Check for a valid length.  It must be a positive
600 	 * multiple of ALIGNSIZ, at least as large as SMALLEST_BLK,
601 	 * no larger than the extent of the heap, and must not
602 	 * extend beyond the end of the heap.
603 	 */
604 	oldblk = (Dblk)((char*)ptr - ALIGNSIZ);
605 	nbytes = oldblk->size;
606 	if (badblksize(oldblk,nbytes)) {
607 		error("free: bad block size (%d) at %#x\n",
608 			(int)nbytes, (int)oldblk );
609 		free_return(0);
610 	}
611 
612 	/* maintain statistics */
613 	__mallinfo.uordbytes -= nbytes;		/* bytes allocated */
614 	__mallinfo.allocated--;			/* frags allocated */
615 
616 	/*
617 	 * Search the tree for the correct insertion point for this
618 	 *	node, coalescing adjacent free blocks along the way.
619 	 */
620 	fpp = &_root;
621 	neighbor = *fpp;
622 	while (neighbor != NIL) {
623 		neighbor_blk = neighbor->block;
624 		neighbor_size = neighbor->size;
625 		if (oldblk < neighbor_blk) {
626 			Dblk nblk = nextblk(oldblk,nbytes);
627 			if (nblk == neighbor_blk) {
628 				/*
629 				 * Absorb and delete right neighbor
630 				 */
631 				nbytes += neighbor_size;
632 				__mallinfo.ordblks--;
633 				delete(fpp);
634 			} else if (nblk > neighbor_blk) {
635 				/*
636 				 * The block being freed overlaps
637 				 * another block in the tree.  This
638 				 * is bad news.  Return to avoid
639 				 * further fouling up the the tree.
640 				 */
641 				 error("free: blocks %#x, %#x overlap\n",
642 						(int)oldblk, (int)neighbor_blk);
643 				 free_return(0);
644 			} else {
645 				/*
646 				 * Search to the left
647 			 	 */
648 				fpp = &neighbor->left;
649 			}
650 		} else if (oldblk > neighbor_blk) {
651 			Dblk nblk = nextblk(neighbor_blk, neighbor_size);
652 			if (nblk == oldblk) {
653 				/*
654 				 * Absorb and delete left neighbor
655 				 */
656 				oldblk = neighbor_blk;
657 				nbytes += neighbor_size;
658 				__mallinfo.ordblks--;
659 				delete(fpp);
660 			} else if (nblk > oldblk) {
661 				/*
662 				 * This block has already been freed
663 				 */
664 				error("free: block %#x was already free\n",
665 					(int)ptr);
666 				free_return(0);
667 			} else {
668 				/*
669 				 * search to the right
670 				 */
671 				fpp = &neighbor->right;
672 			}
673 		} else {
674 			/*
675 			 * This block has already been freed
676 			 * as "oldblk == neighbor_blk"
677 			 */
678 			error("free: block %#x was already free\n", (int)ptr);
679 			free_return(0);
680 		} /*else*/
681 
682 		/*
683 		 * Note that this depends on a side effect of
684 		 * delete(fpp) in order to terminate the loop!
685 		 */
686 		neighbor = *fpp;
687 
688 	} /*while*/
689 
690 	/*
691 	 * Insert the new node into the free space tree
692 	 */
693 	insert( oldblk, nbytes );
694 	free_return(1);
695 
696 } /*free*/
697 
698 
699 /*
700  * char*
701  * shrink(oldblk, oldsize, newsize)
702  *	Decreases the size of an old block to a new size.
703  *	Returns the remainder to free space.  Returns the
704  *	truncated block to the caller.
705  */
706 
707 static char *
shrink(Dblk oldblk,uint oldsize,uint newsize)708 shrink(Dblk oldblk, uint oldsize, uint newsize)
709 {
710 	Dblk remainder;
711 	if (oldsize - newsize >= SMALLEST_BLK) {
712 		/*
713 		 * Block is to be contracted. Split the old block
714 		 * and return the remainder to free space.
715 		 */
716 		remainder = nextblk(oldblk, newsize);
717 		remainder->size = oldsize - newsize;
718 		oldblk->size = newsize;
719 
720 		/* maintain statistics */
721 		__mallinfo.ordblks++;		/* count fragments */
722 		__mallinfo.allocated++;		/* negate effect of free() */
723 
724 		free(remainder->data);
725 	}
726 	return(oldblk->data);
727 }
728 
729 /*
730  * char*
731  * realloc(ptr, nbytes)
732  *
733  * Reallocate an old block with a new size, returning the old block
734  * if possible. The block returned is guaranteed to preserve the
735  * contents of the old block up to min(size(old block), newsize).
736  *
737  * For backwards compatibility, ptr is allowed to reference
738  * a block freed since the LAST call of malloc().  Thus the old
739  * block may be busy, free, or may even be nested within a free
740  * block.
741  *
742  * Some old programs have been known to do things like the following,
743  * which is guaranteed not to work:
744  *
745  *	free(ptr);
746  *	free(dummy);
747  *	dummy = malloc(1);
748  *	ptr = realloc(ptr,nbytes);
749  *
750  * This atrocity was found in the source for diff(1).
751  */
752 ptr_t
realloc(ptr_t ptr,uint nbytes)753 realloc(ptr_t ptr, uint nbytes)
754 {
755 	Freehdr *fpp;
756 	Freehdr fp;
757 	Dblk	oldblk;
758 	Dblk	freeblk;
759 	Dblk	oldneighbor;
760 	uint	oldsize;
761 	uint	newsize;
762 	uint	oldneighborsize;
763 
764 	/*
765 	 * Add SVR4 semantics for OS 5.x so /usr/lib librarys
766 	 * work correctly when running in BCP mode
767 	 */
768 	if (ptr == NULL) {
769 		return (malloc(nbytes));
770 	}
771 
772 	/*
773 	 * if rigorous checking was requested, do it.
774 	 */
775 	if (debug_level >= 2) {
776 		malloc_verify();
777 	}
778 
779 	/*
780 	 * Check the address of the old block.
781 	 */
782 	if ( misaligned(ptr) ||
783 	    ptr < (ptr_t)_lbound ||
784 	    ptr > (ptr_t)_ubound ) {
785 		error("realloc: illegal address (%#x)\n", ptr);
786 		return(NULL);
787 	}
788 
789 	/*
790 	 * check location and size of the old block and its
791 	 * neighboring block to the right.  If the old block is
792 	 * at end of memory, the neighboring block is undefined.
793 	 */
794 	oldblk = (Dblk)((char*)ptr - ALIGNSIZ);
795 	oldsize = oldblk->size;
796 	if (badblksize(oldblk,oldsize)) {
797 		error("realloc: bad block size (%d) at %#x\n",
798 			oldsize, oldblk);
799 		return(NULL);
800 	}
801 	oldneighbor = nextblk(oldblk,oldsize);
802 
803 	/* *** tree search code pulled into separate subroutine *** */
804 	if (reclaim(oldblk, oldsize, 1) == -1) {
805 		return(NULL);		/* internal error */
806 	}
807 
808 	/*
809 	 * At this point, we can guarantee that oldblk is out of free
810 	 * space. What we do next depends on a comparison of the size
811 	 * of the old block and the requested new block size.  To do
812 	 * this, first round up the new size request.
813 	 */
814 	newsize = nbytes + ALIGNSIZ;		/* add size of a length word */
815 	if (newsize < SMALLEST_BLK) {
816 		newsize = SMALLEST_BLK;
817 	} else {
818 		newsize = roundup(newsize, ALIGNSIZ);
819 	}
820 
821 	/*
822 	 * Next, examine the size of the old block, and compare it
823 	 * with the requested new size.
824 	 */
825 
826 	if (oldsize >= newsize) {
827 		/*
828 		 * Block is to be made smaller.
829 		 */
830 		return(shrink(oldblk, oldsize, newsize));
831 	}
832 
833 	/*
834 	 * Block is to be expanded.  Look for adjacent free memory.
835 	 */
836 	if ( oldneighbor < (Dblk)_ubound ) {
837 		/*
838 		 * Search for the adjacent block in the free
839 		 * space tree.  Note that the tree may have been
840 		 * modified in the earlier loop.
841 		 */
842 		fpp = &_root;
843 		fp = *fpp;
844 		oldneighborsize = oldneighbor->size;
845 		if ( badblksize(oldneighbor, oldneighborsize) ) {
846 			error("realloc: bad blocksize(%d) at %#x\n",
847 				oldneighborsize, oldneighbor);
848 			return(NULL);
849 		}
850 		while ( weight(fp) >= oldneighborsize ) {
851 			freeblk = fp->block;
852 			if (oldneighbor < freeblk) {
853 				/*
854 				 * search to the left
855 				 */
856 				fpp = &(fp->left);
857 				fp = *fpp;
858 			}
859 			else if (oldneighbor > freeblk) {
860 				/*
861 				 * search to the right
862 				 */
863 				fpp = &(fp->right);
864 				fp = *fpp;
865 			}
866 			else {		/* oldneighbor == freeblk */
867 				/*
868 				 * neighboring block is free; is it big enough?
869 				 */
870 				if (oldsize + oldneighborsize >= newsize) {
871 					/*
872 					 * Big enough. Delete freeblk, join
873 					 * oldblk to neighbor, return newsize
874 					 * bytes to the caller, and return the
875 					 * remainder to free storage.
876 					 */
877 					delete(fpp);
878 
879 					/* maintain statistics */
880 					__mallinfo.ordblks--;
881 					__mallinfo.uordbytes += oldneighborsize;
882 
883 					oldsize += oldneighborsize;
884 					oldblk->size = oldsize;
885 					return(shrink(oldblk, oldsize, newsize));
886 				} else {
887 					/*
888 					 * Not big enough. Stop looking for a
889 					 * free lunch.
890 					 */
891 					break;
892 				} /*else*/
893 			} /*else*/
894 		}/*while*/
895 	} /*if*/
896 
897 	/*
898 	 * At this point, we know there is no free space in which to
899 	 * expand. Malloc a new block, copy the old block to the new,
900 	 * and free the old block, IN THAT ORDER.
901 	 */
902 	ptr = malloc(nbytes);
903 	if (ptr != NULL) {
904 		bcopy(oldblk->data, ptr, oldsize-ALIGNSIZ);
905 		free(oldblk->data);
906 	}
907 	return(ptr);
908 
909 } /* realloc */
910 
911 
912 /*
913  * *** The following code was pulled out of realloc() ***
914  *
915  * int
916  * reclaim(oldblk, oldsize, flag)
917  *	If a block containing 'oldsize' bytes from 'oldblk'
918  *	is in the free list, remove it from the free list.
919  *	'oldblk' and 'oldsize' are assumed to include the free block header.
920  *
921  *	Returns 1 if block was successfully removed.
922  *	Returns 0 if block was not in free list.
923  *	Returns -1 if block spans a free/allocated boundary (error() called
924  *						if 'flag' == 1).
925  */
926 static int
reclaim(Dblk oldblk,uint oldsize,int flag)927 reclaim(Dblk oldblk, uint oldsize, int flag)
928 {
929 	Dblk oldneighbor;
930 	Freehdr	*fpp;
931 	Freehdr	fp;
932 	Dblk		freeblk;
933 	uint		size;
934 
935 	/*
936 	 * Search the free space list for a node describing oldblk,
937 	 * or a node describing a block containing oldblk.  Assuming
938 	 * the size of blocks decreases monotonically with depth in
939 	 * the tree, the loop may terminate as soon as a block smaller
940 	 * than oldblk is encountered.
941 	 */
942 
943 	oldneighbor = nextblk(oldblk, oldsize);
944 
945 	fpp = &_root;
946 	fp = *fpp;
947 	while ( (size = weight(fp)) >= oldsize ) {
948 		freeblk = fp->block;
949 		if (badblksize(freeblk,size)) {
950 			error("realloc: bad block size (%d) at %#x\n",
951 				size, freeblk);
952 			return(-1);
953 		}
954 		if ( oldblk == freeblk ) {
955 			/*
956 			 * |<-- freeblk ...
957 			 * _________________________________
958 			 * |<-- oldblk ...
959 			 * ---------------------------------
960 			 * Found oldblk in the free space tree; delete it.
961 			 */
962 			delete(fpp);
963 
964 			/* maintain statistics */
965 			__mallinfo.uordbytes += oldsize;
966 			__mallinfo.allocated++;
967 			return(1);
968 		}
969 		else if (oldblk < freeblk) {
970 			/*
971 			 * 		|<-- freeblk ...
972 			 * _________________________________
973 			 * |<--oldblk ...
974 			 * ---------------------------------
975 			 * Search to the left for oldblk
976 			 */
977 			fpp = &fp->left;
978 			fp = *fpp;
979 		}
980 		else {
981 			/*
982 			 * |<-- freeblk ...
983 			 * _________________________________
984 			 * |     		|<--oldblk--->|<--oldneighbor
985 			 * ---------------------------------
986 			 * oldblk is somewhere to the right of freeblk.
987 			 * Check to see if it lies within freeblk.
988 			 */
989 			Dblk freeneighbor;
990 			freeneighbor =  nextblk(freeblk, freeblk->size);
991 			if (oldblk >= freeneighbor) {
992 				/*
993 				 * |<-- freeblk--->|<--- freeneighbor ...
994 				 * _________________________________
995 				 * |  		      |<--oldblk--->|
996 				 * ---------------------------------
997 				 * no such luck; search to the right.
998 				 */
999 				fpp =  &fp->right;
1000 				fp = *fpp;
1001 			}
1002 			else {
1003 				/*
1004 				 * freeblk < oldblk < freeneighbor;
1005 				 * i.e., oldblk begins within freeblk.
1006 				 */
1007 				if (oldneighbor > freeneighbor) {
1008 					/*
1009 					 * |<-- freeblk--->|<--- freeneighbor
1010 					 * _________________________________
1011 					 * |     |<--oldblk--->|<--oldneighbor
1012 					 * ---------------------------------
1013 					 * oldblk straddles a block boundary!
1014 					 */
1015 					if (flag) {
1016 	    error("realloc: block %#x straddles free block boundary\n", oldblk);
1017 					}
1018 					return(-1);
1019 				}
1020 				else if (  oldneighbor == freeneighbor ) {
1021 					/*
1022 					 * |<-------- freeblk------------->|
1023 					 * _________________________________
1024 					 * |                 |<--oldblk--->|
1025 					 * ---------------------------------
1026 					 * Oldblk is on the right end of
1027 					 * freeblk. Delete freeblk, split
1028 					 * into two fragments, and return
1029 					 * the one on the left to free space.
1030 					 */
1031 					delete(fpp);
1032 
1033 					/* maintain statistics */
1034 					__mallinfo.ordblks++;
1035 					__mallinfo.uordbytes += oldsize;
1036 					__mallinfo.allocated += 2;
1037 
1038 					freeblk->size -= oldsize;
1039 					free(freeblk->data);
1040 					return(1);
1041 				}
1042 				else {
1043 					/*
1044 					 * |<-------- freeblk------------->|
1045 					 * _________________________________
1046 					 * |        |oldblk  | oldneighbor |
1047 					 * ---------------------------------
1048 					 * Oldblk is in the middle of freeblk.
1049 					 * Delete freeblk, split into three
1050 					 * fragments, and return the ones on
1051 					 * the ends to free space.
1052 					 */
1053 					delete(fpp);
1054 
1055 					/* maintain statistics */
1056 					__mallinfo.ordblks += 2;
1057 					__mallinfo.uordbytes += freeblk->size;
1058 					__mallinfo.allocated += 3;
1059 
1060 					/*
1061 					 * split the left fragment by
1062 					 * subtracting the size of oldblk
1063 					 * and oldblk's neighbor
1064 					 */
1065 					freeblk->size -=
1066 						( (char*)freeneighbor
1067 							- (char*)oldblk );
1068 					/*
1069 					 * split the right fragment by
1070 					 * setting oldblk's neighbor's size
1071 					 */
1072 					oldneighbor->size =
1073 						(char*)freeneighbor
1074 							- (char*)oldneighbor;
1075 					/*
1076 					 * return the fragments to free space
1077 					 */
1078 					free(freeblk->data);
1079 					free(oldneighbor->data);
1080 					return(1);
1081 				} /*else*/
1082 			} /*else*/
1083 		} /* else */
1084 	} /*while*/
1085 
1086 	return(0);		/* free block not found */
1087 }
1088 
1089 /*
1090  * bool
1091  * morecore(nbytes)
1092  *	Add a block of at least nbytes from end-of-memory to the
1093  *	free space tree.
1094  *
1095  * return value:
1096  *	true	if at least n bytes can be allocated
1097  *	false	otherwise
1098  *
1099  * remarks:
1100  *
1101  *   -- free space (delimited by the extern variable _ubound) is
1102  *	extended by an amount determined by rounding nbytes up to
1103  *	a multiple of the system page size.
1104  *
1105  *   -- The lower bound of the heap is determined the first time
1106  *	this routine is entered. It does NOT necessarily begin at
1107  *	the end of static data space, since startup code (e.g., for
1108  *	profiling) may have invoked sbrk() before we got here.
1109  */
1110 
1111 static bool
morecore(uint nbytes)1112 morecore(uint nbytes)
1113 {
1114 	Dblk p;
1115 	Freehdr newhdr;
1116 
1117 	if (nbpg == 0) {
1118 		nbpg = getpagesize();
1119 		/* hack to avoid fragmenting the heap with the first
1120 		   freehdr page */
1121 		if ((newhdr = getfreehdr()) == NIL) {
1122 			/* Error message returned by getfreehdr() */
1123 			return(false);
1124 		}
1125 		(void)putfreehdr(newhdr);
1126 	}
1127 	nbytes = roundup(nbytes, nbpg);
1128 	p = (Dblk) sbrk((int)nbytes);
1129 	if (p == (Dblk) -1) {
1130 		if (errno == EAGAIN) errno = ENOMEM;
1131 		return(false);	/* errno = ENOMEM */
1132 	}
1133 	if (_lbound == NULL)	/* set _lbound the first time through */
1134 		_lbound = (char*) p;
1135 	_ubound = (char *) p + nbytes;
1136 	p->size = nbytes;
1137 
1138 	/* maintain statistics */
1139 	__mallinfo.arena = _ubound - _lbound;
1140 	__mallinfo.uordbytes += nbytes;
1141 	__mallinfo.ordblks++;
1142 	__mallinfo.allocated++;
1143 
1144 	free(p->data);
1145 	return(true);
1146 
1147 } /*morecore*/
1148 
1149 
1150 /*
1151  * Get a free block header from the free header list.
1152  * When the list is empty, allocate an array of headers.
1153  * When the array is empty, allocate another one.
1154  * When we can't allocate another array, we're in deep weeds.
1155  */
1156 static	Freehdr
getfreehdr(void)1157 getfreehdr(void)
1158 {
1159 	Freehdr	r;
1160 	Dblk	blk;
1161 	uint	size;
1162 
1163 	if (freehdrlist != NIL) {
1164 		r = freehdrlist;
1165 		freehdrlist = freehdrlist->left;
1166 		return(r);
1167 	}
1168 	if (nfreehdrs <= 0) {
1169 		size = NFREE_HDRS*sizeof(struct freehdr) + ALIGNSIZ;
1170 		blk = (Dblk) sbrk(size);
1171 		if ((int)blk == -1) {
1172 			malloc_debug(1);
1173 			error("getfreehdr: out of memory");
1174 			if (errno == EAGAIN) errno = ENOMEM;
1175 			return(NIL);
1176 		}
1177 		if (_lbound == NULL)	/* set _lbound on first allocation */
1178 			_lbound = (char*)blk;
1179 		blk->size = size;
1180 		freehdrptr = (Freehdr)blk->data;
1181 		nfreehdrs = NFREE_HDRS;
1182 		_ubound = (char*) nextblk(blk,size);
1183 
1184 		/* maintain statistics */
1185 		__mallinfo.arena = _ubound - _lbound;
1186 		__mallinfo.treeoverhead += size;
1187 	}
1188 	nfreehdrs--;
1189 	return(freehdrptr++);
1190 }
1191 
1192 /*
1193  * Free a free block header
1194  * Add it to the list of available headers.
1195  */
1196 static void
putfreehdr(Freehdr p)1197 putfreehdr(Freehdr p)
1198 {
1199 	p->left = freehdrlist;
1200 	freehdrlist = p;
1201 }
1202 
1203 #ifndef DEBUG	/*	>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> */
1204 
1205 /*
1206  * stubs for error handling and diagnosis routines. These are what
1207  * you get in the standard C library; for non-placebo diagnostics
1208  * load /usr/lib/malloc.debug.o with your program.
1209  */
1210 /*ARGSUSED*/
1211 static void
error(char * fmt,...)1212 error(char *fmt, ...)
1213 {
1214 	errno = EINVAL;
1215 }
1216 
1217 #endif	/* !DEBUG	<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
1218 
1219 
1220 #ifdef	DEBUG	/*	>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> */
1221 
1222 /*
1223  * malloc_debug(level)
1224  *
1225  * description:
1226  *
1227  *	Controls the level of error diagnosis and consistency checking
1228  *	done by malloc() and free(). level is interpreted as follows:
1229  *
1230  *	0:  malloc() and free() return 0 if error detected in arguments
1231  *	    (errno is set to EINVAL)
1232  *	1:  malloc() and free() abort if errors detected in arguments
1233  *	2:  same as 1, but scan entire heap for errors on every call
1234  *	    to malloc() or free()
1235  *
1236  * function result:
1237  *	returns the previous level of error reporting.
1238  */
1239 int
malloc_debug(int level)1240 malloc_debug(int level)
1241 {
1242 	int old_level;
1243 	old_level = debug_level;
1244 	debug_level = level;
1245 	return (old_level);
1246 }
1247 
1248 /*
1249  * check a free space tree pointer. Should be in
1250  * the static free pool or somewhere in the heap.
1251  */
1252 
1253 #define chkblk(p)\
1254 	if ( misaligned(p)\
1255 		|| ((Dblk)(p) < (Dblk)_lbound || (Dblk)(p) > (Dblk)_ubound)){\
1256 		blkerror(p);\
1257 		return 0;\
1258 	}
1259 
1260 #define chkhdr(p) chkblk(p)
1261 
1262 static
blkerror(Freehdr p)1263 blkerror(Freehdr p)
1264 {
1265 	error("Illegal block address (%#x)\n", (p));
1266 }
1267 
1268 /*
1269  * cartesian(p)
1270  *	returns 1 if free space tree p satisfies internal consistency
1271  *	checks.
1272  */
1273 
1274 static int
cartesian(Freehdr p)1275 cartesian(Freehdr p)
1276 {
1277 	Freehdr probe;
1278 	Dblk db,pdb;
1279 
1280 	if (p == NIL)				/* no tree to test */
1281 		return 1;
1282 	/*
1283 	 * check that root has a data block
1284 	 */
1285 	chkhdr(p);
1286 	pdb = p->block;
1287 	chkblk(pdb);
1288 
1289 	/*
1290 	 * check that the child blocks are no larger than the parent block.
1291 	 */
1292 	probe = p->left;
1293 	if (probe != NIL) {
1294 		chkhdr(probe);
1295 		db = probe->block;
1296 		chkblk(db);
1297 		if (probe->size > p->size)	/* child larger than parent */
1298 			return 0;
1299 	}
1300 	probe = p->right;
1301 	if (probe != NIL) {
1302 		chkhdr(probe);
1303 		db = probe->block;
1304 		chkblk(db);
1305 		if (probe->size > p->size)	/* child larger than parent */
1306 			return 0;
1307 	}
1308 	/*
1309 	 * test data addresses in the left subtree,
1310 	 * starting at the left subroot and probing to
1311 	 * the right.  All data addresses must be < p->block.
1312 	 */
1313 	probe = p->left;
1314 	while (probe != NIL) {
1315 		chkhdr(probe);
1316 		db = probe->block;
1317 		chkblk(db);
1318 		if ( nextblk(db, probe->size) >= pdb )	/* overlap */
1319 			return 0;
1320 		probe = probe->right;
1321 	}
1322 	/*
1323 	 * test data addresses in the right subtree,
1324 	 * starting at the right subroot and probing to
1325 	 * the left.  All addresses must be > nextblk(p->block).
1326 	 */
1327 	pdb = nextblk(pdb, p->size);
1328 	probe = p->right;
1329 	while (probe != NIL) {
1330 		chkhdr(probe);
1331 		db = probe->block;
1332 		chkblk(db);
1333 		if (db == NULL || db <= pdb)		/* overlap */
1334 			return 0;
1335 		probe = probe->left;
1336 	}
1337 	return (cartesian(p->left) && cartesian(p->right));
1338 }
1339 
1340 /*
1341  * malloc_verify()
1342  *
1343  * This is a verification routine.  It walks through all blocks
1344  * in the heap (both free and busy) and checks for bad blocks.
1345  * malloc_verify returns 1 if the heap contains no detectably bad
1346  * blocks; otherwise it returns 0.
1347  */
1348 
1349 int
malloc_verify(void)1350 malloc_verify(void)
1351 {
1352 	int	maxsize;
1353 	int	hdrsize;
1354 	int	size;
1355 	Dblk	p;
1356 	uint	lb,ub;
1357 
1358 	extern  char	end[];
1359 
1360 	if (_lbound == NULL)	/* no allocation yet */
1361 		return 1;
1362 
1363 	/*
1364 	 * first check heap bounds pointers
1365 	 */
1366 	lb = (uint)end;
1367 	ub = (uint)sbrk(0);
1368 
1369 	if ((uint)_lbound < lb || (uint)_lbound > ub) {
1370 		error("malloc_verify: illegal heap lower bound (%#x)\n",
1371 			_lbound);
1372 		return 0;
1373 	}
1374 	if ((uint)_ubound < lb || (uint)_ubound > ub) {
1375 		error("malloc_verify: illegal heap upper bound (%#x)\n",
1376 			_ubound);
1377 		return 0;
1378 	}
1379 	maxsize = heapsize();
1380 	p = (Dblk)_lbound;
1381 	while (p < (Dblk) _ubound) {
1382 		size = p->size;
1383 		if ( (size) < SMALLEST_BLK
1384 			|| (size) & (ALIGNSIZ-1)
1385 			|| (size) > heapsize()
1386 			|| ((char*)(p))+(size) > _ubound ) {
1387 			error("malloc_verify: bad block size (%d) at %#x\n",
1388 				size, p);
1389 			return(0);		/* Badness */
1390 		}
1391 		p = nextblk(p, size);
1392 	}
1393 	if (p > (Dblk) _ubound) {
1394 		error("malloc_verify: heap corrupted\n");
1395 		return(0);
1396 	}
1397 	if (!cartesian(_root)){
1398 		error("malloc_verify: free space tree corrupted\n");
1399 		return(0);
1400 	}
1401 	return(1);
1402 }
1403 
1404 /*
1405  * The following is a kludge to avoid dependency on stdio, which
1406  * uses malloc() and free(), one of which probably got us here in
1407  * the first place.
1408  */
1409 
1410 #define putchar(c) (*buf++ = (c))
1411 extern	int	fileno();	/*bletch*/
1412 #define stderr 2		/*bletch*/
1413 #define	LBUFSIZ	256
1414 
1415 static	char	stderrbuf[LBUFSIZ];
1416 
1417 /*
1418  * Error routine.
1419  * If debug_level == 0, does nothing except set errno = EINVAL.
1420  * Otherwise, prints an error message to stderr and generates a
1421  * core image.
1422  */
1423 static void
error(char * fmt,...)1424 error(char *fmt, ...)
1425 {
1426 	static int n = 0; /* prevents infinite recursion when using stdio */
1427 	int nbytes;
1428 	va_list ap;
1429 
1430 	errno = EINVAL;
1431 	if (debug_level == 0)
1432 		return;
1433 	if (!n++) {
1434 		va_start(ap, fmt);
1435 		nbytes = vsprintf(stderrbuf, fmt, ap);
1436 		va_end(ap);
1437 		stderrbuf[nbytes++] = '\n';
1438 		stderrbuf[nbytes] = '\0';
1439 		write(fileno(stderr), stderrbuf, nbytes);
1440 	}
1441 	abort();
1442 }
1443 
1444 #endif	/* DEBUG	<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<< */
1445