1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22 /*
23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #pragma ident "%Z%%M% %I% %E% SMI"
28
29 #if 1
30 #undef DEBUG
31 #endif
32
33 /* #define DEBUG ON */
34
35 /*
36 * Conditions on use:
37 * kmem_alloc and kmem_free must not be called from interrupt level,
38 * except from software interrupt level. This is because they are
39 * not reentrant, and only block out software interrupts. They take
40 * too long to block any real devices. There is a routine
41 * kmem_free_intr that can be used to free blocks at interrupt level,
42 * but only up to splimp, not higher. This is because kmem_free_intr
43 * only spl's to splimp.
44 *
45 * Also, these routines are not that fast, so they should not be used
46 * in very frequent operations (e.g. operations that happen more often
47 * than, say, once every few seconds).
48 */
49
50 /*
51 * description:
52 * Yet another memory allocator, this one based on a method
53 * described in C.J. Stephenson, "Fast Fits", IBM Sys. Journal
54 *
55 * The basic data structure is a "Cartesian" binary tree, in which
56 * nodes are ordered by ascending addresses (thus minimizing free
57 * list insertion time) and block sizes decrease with depth in the
58 * tree (thus minimizing search time for a block of a given size).
59 *
60 * In other words, for any node s, letting D(s) denote
61 * the set of descendents of s, we have:
62 *
63 * a. addr(D(left(s))) < addr(s) < addr(D(right(s)))
64 * b. len(D(left(s))) <= len(s) >= len(D(right(s)))
65 */
66
67 #include <sys/param.h>
68 #include <sys/sysmacros.h>
69 #include <sys/salib.h>
70 #include <sys/saio.h>
71 #include <sys/promif.h>
72
73 /*
74 * The node header structure.
75 *
76 * To reduce storage consumption, a header block is associated with
77 * free blocks only, not allocated blocks.
78 * When a free block is allocated, its header block is put on
79 * a free header block list.
80 *
81 * This creates a header space and a free block space.
82 * The left pointer of a header blocks is used to chain free header
83 * blocks together.
84 */
85
86 typedef enum {false, true} bool;
87 typedef struct freehdr *Freehdr;
88 typedef struct dblk *Dblk;
89
90 /*
91 * Description of a header for a free block
92 * Only free blocks have such headers.
93 */
94 struct freehdr {
95 Freehdr left; /* Left tree pointer */
96 Freehdr right; /* Right tree pointer */
97 Dblk block; /* Ptr to the data block */
98 size_t size; /* Size of the data block */
99 };
100
101 #define NIL ((Freehdr) 0)
102 #define WORDSIZE sizeof (int)
103 #define SMALLEST_BLK 1 /* Size of smallest block */
104
105 /*
106 * Description of a data block.
107 */
108 struct dblk {
109 char data[1]; /* Addr returned to the caller */
110 };
111
112 /*
113 * weight(x) is the size of a block, in bytes; or 0 if and only if x
114 * is a null pointer. It is the responsibility of kmem_alloc() and
115 * kmem_free() to keep zero-length blocks out of the arena.
116 */
117
118 #define weight(x) ((x) == NIL? 0: (x->size))
119 #define nextblk(p, size) ((Dblk) ((char *)(p) + (size)))
120 #define max(a, b) ((a) < (b)? (b): (a))
121
122 void *kmem_alloc(size_t, int);
123 void kmem_free(void *ptr, size_t nbytes);
124 Freehdr getfreehdr(void);
125 static bool morecore(size_t);
126 void insert(Dblk p, size_t len, Freehdr *tree);
127 void freehdr(Freehdr p);
128 void delete(Freehdr *p);
129 static void check_need_to_free(void);
130 extern caddr_t resalloc(enum RESOURCES, size_t, caddr_t, int);
131 #ifdef __sparc
132 extern void resalloc_init(void);
133 #endif
134 extern int splnet(void);
135 extern int splimp(void);
136 extern void splx(int);
137
138 /*
139 * Structure containing various info about allocated memory.
140 */
141 #define NEED_TO_FREE_SIZE 5
142 struct kmem_info {
143 Freehdr free_root;
144 Freehdr free_hdr_list;
145 struct map *map;
146 struct pte *pte;
147 caddr_t vaddr;
148 struct need_to_free {
149 caddr_t addr;
150 size_t nbytes;
151 } need_to_free_list, need_to_free[NEED_TO_FREE_SIZE];
152 } kmem_info;
153
154
155 struct map *kernelmap;
156
157 #ifdef DEBUG
158 static void prtree(Freehdr, char *);
159 #endif
160
161 /*
162 * Initialize kernel memory allocator
163 */
164
165 void
kmem_init(void)166 kmem_init(void)
167 {
168 int i;
169 struct need_to_free *ntf;
170
171 #ifdef DEBUG
172 printf("kmem_init entered\n");
173 #endif
174
175 #ifdef __sparc
176 resalloc_init();
177 #endif
178
179 kmem_info.free_root = NIL;
180 kmem_info.free_hdr_list = NULL;
181 kmem_info.map = kernelmap;
182 kmem_info.need_to_free_list.addr = 0;
183 ntf = kmem_info.need_to_free;
184 for (i = 0; i < NEED_TO_FREE_SIZE; i++) {
185 ntf[i].addr = 0;
186 }
187 #ifdef DEBUG
188 printf("kmem_init returning\n");
189 prtree(kmem_info.free_root, "kmem_init");
190 #endif
191 }
192
193 /*
194 * Insert a new node in a cartesian tree or subtree, placing it
195 * in the correct position with respect to the existing nodes.
196 *
197 * algorithm:
198 * Starting from the root, a binary search is made for the new
199 * node. If this search were allowed to continue, it would
200 * eventually fail (since there cannot already be a node at the
201 * given address); but in fact it stops when it reaches a node in
202 * the tree which has a length less than that of the new node (or
203 * when it reaches a null tree pointer). The new node is then
204 * inserted at the root of the subtree for which the shorter node
205 * forms the old root (or in place of the null pointer).
206 */
207
208
209 void
insert(Dblk p,size_t len,Freehdr * tree)210 insert(Dblk p, /* Ptr to the block to insert */
211 size_t len, /* Length of new node */
212 Freehdr *tree) /* Address of ptr to root */
213 {
214 Freehdr x;
215 Freehdr *left_hook; /* Temp for insertion */
216 Freehdr *right_hook; /* Temp for insertion */
217 Freehdr newhdr;
218
219 x = *tree;
220 /*
221 * Search for the first node which has a weight less
222 * than that of the new node; this will be the
223 * point at which we insert the new node.
224 */
225
226 while (weight(x) >= len) {
227 if (p < x->block)
228 tree = &x->left;
229 else
230 tree = &x->right;
231 x = *tree;
232 }
233
234 /*
235 * Perform root insertion. The variable x traces a path through
236 * the tree, and with the help of left_hook and right_hook,
237 * rewrites all links that cross the territory occupied
238 * by p. Note that this improves performance under
239 * paging.
240 */
241
242 newhdr = getfreehdr();
243 *tree = newhdr;
244 left_hook = &newhdr->left;
245 right_hook = &newhdr->right;
246
247 newhdr->left = NIL;
248 newhdr->right = NIL;
249 newhdr->block = p;
250 newhdr->size = len;
251
252 while (x != NIL) {
253 /*
254 * Remark:
255 * The name 'left_hook' is somewhat confusing, since
256 * it is always set to the address of a .right link
257 * field. However, its value is always an address
258 * below (i.e., to the left of) p. Similarly
259 * for right_hook. The values of left_hook and
260 * right_hook converge toward the value of p,
261 * as in a classical binary search.
262 */
263 if (x->block < p) {
264 /*
265 * rewrite link crossing from the left
266 */
267 *left_hook = x;
268 left_hook = &x->right;
269 x = x->right;
270 } else {
271 /*
272 * rewrite link crossing from the right
273 */
274 *right_hook = x;
275 right_hook = &x->left;
276 x = x->left;
277 } /* else */
278 } /* while */
279
280 *left_hook = *right_hook = NIL; /* clear remaining hooks */
281
282 } /* insert */
283
284
285 /*
286 * Delete a node from a cartesian tree. p is the address of
287 * a pointer to the node which is to be deleted.
288 *
289 * algorithm:
290 * The left and right sons of the node to be deleted define two
291 * subtrees which are to be merged and attached in place of the
292 * deleted node. Each node on the inside edges of these two
293 * subtrees is examined and longer nodes are placed above the
294 * shorter ones.
295 *
296 * On entry:
297 * *p is assumed to be non-null.
298 */
299
300 void
delete(Freehdr * p)301 delete(Freehdr *p)
302 {
303 Freehdr x;
304 Freehdr left_branch; /* left subtree of deleted node */
305 Freehdr right_branch; /* right subtree of deleted node */
306
307 x = *p;
308 left_branch = x->left;
309 right_branch = x->right;
310
311 while (left_branch != right_branch) {
312 /*
313 * iterate until left branch and right branch are
314 * both NIL.
315 */
316 if (weight(left_branch) >= weight(right_branch)) {
317 /*
318 * promote the left branch
319 */
320 *p = left_branch;
321 p = &left_branch->right;
322 left_branch = left_branch->right;
323 } else {
324 /*
325 * promote the right branch
326 */
327 *p = right_branch;
328 p = &right_branch->left;
329 right_branch = right_branch->left;
330 } /* else */
331 } /* while */
332 *p = NIL;
333 freehdr(x);
334 } /* delete */
335
336
337 /*
338 * Demote a node in a cartesian tree, if necessary, to establish
339 * the required vertical ordering.
340 *
341 * algorithm:
342 * The left and right subtrees of the node to be demoted are to
343 * be partially merged and attached in place of the demoted node.
344 * The nodes on the inside edges of these two subtrees are
345 * examined and the longer nodes are placed above the shorter
346 * ones, until a node is reached which has a length no greater
347 * than that of the node being demoted (or until a null pointer
348 * is reached). The node is then attached at this point, and
349 * the remaining subtrees (if any) become its descendants.
350 *
351 * on entry:
352 * a. All the nodes in the tree, including the one to be demoted,
353 * must be correctly ordered horizontally;
354 * b. All the nodes except the one to be demoted must also be
355 * correctly positioned vertically. The node to be demoted
356 * may be already correctly positioned vertically, or it may
357 * have a length which is less than that of one or both of
358 * its progeny.
359 * c. *p is non-null
360 */
361
362
363 static void
demote(Freehdr * p)364 demote(Freehdr *p)
365 {
366 Freehdr x; /* addr of node to be demoted */
367 Freehdr left_branch;
368 Freehdr right_branch;
369 size_t wx;
370
371 x = *p;
372 left_branch = x->left;
373 right_branch = x->right;
374 wx = weight(x);
375
376 while (weight(left_branch) > wx || weight(right_branch) > wx) {
377 /*
378 * select a descendant branch for promotion
379 */
380 if (weight(left_branch) >= weight(right_branch)) {
381 /*
382 * promote the left branch
383 */
384 *p = left_branch;
385 p = &left_branch->right;
386 left_branch = *p;
387 } else {
388 /*
389 * promote the right branch
390 */
391 *p = right_branch;
392 p = &right_branch->left;
393 right_branch = *p;
394 } /* else */
395 } /* while */
396
397 *p = x; /* attach demoted node here */
398 x->left = left_branch;
399 x->right = right_branch;
400 } /* demote */
401
402 /*
403 * Allocate a block of storage
404 *
405 * algorithm:
406 * The freelist is searched by descending the tree from the root
407 * so that at each decision point the "better fitting" child node
408 * is chosen (i.e., the shorter one, if it is long enough, or
409 * the longer one, otherwise). The descent stops when both
410 * child nodes are too short.
411 *
412 * function result:
413 * kmem_alloc returns a pointer to the allocated block; a null
414 * pointer indicates storage could not be allocated.
415 */
416 /*
417 * We need to return blocks that are on word boundaries so that callers
418 * that are putting int's into the area will work. Since we allow
419 * arbitrary free'ing, we need a weight function that considers
420 * free blocks starting on an odd boundary special. Allocation is
421 * aligned to 8 byte boundaries (ALIGN).
422 */
423 #define ALIGN 8 /* doubleword aligned .. */
424 #define ALIGNMASK (ALIGN-1)
425 #define ALIGNMORE(addr) (ALIGN - ((uintptr_t)(addr) & ALIGNMASK))
426
427 /*
428 * If it is empty then weight == 0
429 * If it is aligned then weight == size
430 * If it is unaligned
431 * if not enough room to align then weight == 0
432 * else weight == aligned size
433 */
434 #define mweight(x) ((x) == NIL ? 0 : \
435 ((((uintptr_t)(x)->block) & ALIGNMASK) == 0 ? (x)->size : \
436 (((x)->size <= ALIGNMORE((x)->block)) ? 0 : \
437 (x)->size - ALIGNMORE((x)->block))))
438
439 /*ARGSUSED1*/
440 void *
kmem_alloc(size_t nbytes,int kmflag)441 kmem_alloc(size_t nbytes, int kmflag)
442 {
443 Freehdr a; /* ptr to node to be allocated */
444 Freehdr *p; /* address of ptr to node */
445 size_t left_weight;
446 size_t right_weight;
447 Freehdr left_son;
448 Freehdr right_son;
449 char *retblock; /* Address returned to the user */
450 int s;
451 #ifdef DEBUG
452 printf("kmem_alloc(nbytes 0x%lx)\n", nbytes);
453 #endif /* DEBUG */
454
455 if (nbytes == 0) {
456 return (NULL);
457 }
458 s = splnet();
459
460 if (nbytes < SMALLEST_BLK) {
461 printf("illegal kmem_alloc call for %lx bytes\n", nbytes);
462 prom_panic("kmem_alloc");
463 }
464 check_need_to_free();
465
466 /*
467 * ensure that at least one block is big enough to satisfy
468 * the request.
469 */
470
471 if (mweight(kmem_info.free_root) <= nbytes) {
472 /*
473 * the largest block is not enough.
474 */
475 if (!morecore(nbytes)) {
476 printf("kmem_alloc failed, nbytes %lx\n", nbytes);
477 prom_panic("kmem_alloc");
478 }
479 }
480
481 /*
482 * search down through the tree until a suitable block is
483 * found. At each decision point, select the better
484 * fitting node.
485 */
486
487 p = (Freehdr *) &kmem_info.free_root;
488 a = *p;
489 left_son = a->left;
490 right_son = a->right;
491 left_weight = mweight(left_son);
492 right_weight = mweight(right_son);
493
494 while (left_weight >= nbytes || right_weight >= nbytes) {
495 if (left_weight <= right_weight) {
496 if (left_weight >= nbytes) {
497 p = &a->left;
498 a = left_son;
499 } else {
500 p = &a->right;
501 a = right_son;
502 }
503 } else {
504 if (right_weight >= nbytes) {
505 p = &a->right;
506 a = right_son;
507 } else {
508 p = &a->left;
509 a = left_son;
510 }
511 }
512 left_son = a->left;
513 right_son = a->right;
514 left_weight = mweight(left_son);
515 right_weight = mweight(right_son);
516 } /* while */
517
518 /*
519 * allocate storage from the selected node.
520 */
521
522 if (a->size - nbytes < SMALLEST_BLK) {
523 /*
524 * not big enough to split; must leave at least
525 * a dblk's worth of space.
526 */
527 retblock = a->block->data;
528 delete(p);
529 } else {
530
531 /*
532 * split the node, allocating nbytes from the top.
533 * Remember we've already accounted for the
534 * allocated node's header space.
535 */
536 Freehdr x;
537 x = getfreehdr();
538 if ((uintptr_t)a->block->data & ALIGNMASK) {
539 size_t size;
540 if (a->size <= ALIGNMORE(a->block->data))
541 prom_panic("kmem_alloc: short block allocated");
542 size = nbytes + ALIGNMORE(a->block->data);
543 x->block = a->block;
544 x->size = ALIGNMORE(a->block->data);
545 x->left = a->left;
546 x->right = a->right;
547 /*
548 * the node pointed to by *p has become smaller;
549 * move it down to its appropriate place in
550 * the tree.
551 */
552 *p = x;
553 demote(p);
554 retblock = a->block->data + ALIGNMORE(a->block->data);
555 if (a->size > size) {
556 kmem_free((caddr_t)nextblk(a->block, size),
557 (size_t)(a->size - size));
558 }
559 freehdr(a);
560 } else {
561 x->block = nextblk(a->block, nbytes);
562 x->size = a->size - nbytes;
563 x->left = a->left;
564 x->right = a->right;
565 /*
566 * the node pointed to by *p has become smaller;
567 * move it down to its appropriate place in
568 * the tree.
569 */
570 *p = x;
571 demote(p);
572 retblock = a->block->data;
573 freehdr(a);
574 }
575 }
576 #ifdef DEBUG
577 prtree(kmem_info.free_root, "kmem_alloc");
578 #endif
579
580 splx(s);
581 bzero(retblock, nbytes);
582 #ifdef DEBUG
583 printf("kmem_alloc bzero complete - returning %p\n", retblock);
584 #endif
585 return (retblock);
586
587 } /* kmem_alloc */
588
589 /*
590 * Return a block to the free space tree.
591 *
592 * algorithm:
593 * Starting at the root, search for and coalesce free blocks
594 * adjacent to one given. When the appropriate place in the
595 * tree is found, insert the given block.
596 *
597 * Do some sanity checks to avoid total confusion in the tree.
598 * If the block has already been freed, prom_panic.
599 * If the ptr is not from the arena, prom_panic.
600 */
601 void
kmem_free(void * ptr,size_t nbytes)602 kmem_free(void *ptr, size_t nbytes)
603 {
604 Freehdr *np; /* For deletion from free list */
605 Freehdr neighbor; /* Node to be coalesced */
606 char *neigh_block; /* Ptr to potential neighbor */
607 size_t neigh_size; /* Size of potential neighbor */
608 int s;
609
610 #ifdef DEBUG
611 printf("kmem_free (ptr %p nbytes %lx)\n", ptr, nbytes);
612 prtree(kmem_info.free_root, "kmem_free");
613 #endif
614
615 #ifdef lint
616 neigh_block = bkmem_zalloc(nbytes);
617 neigh_block = neigh_block;
618 #endif
619 if (nbytes == 0) {
620 return;
621 }
622
623 if (ptr == 0) {
624 prom_panic("kmem_free of 0");
625 }
626 s = splnet();
627
628 /*
629 * Search the tree for the correct insertion point for this
630 * node, coalescing adjacent free blocks along the way.
631 */
632 np = &kmem_info.free_root;
633 neighbor = *np;
634 while (neighbor != NIL) {
635 neigh_block = (char *)neighbor->block;
636 neigh_size = neighbor->size;
637 if ((char *)ptr < neigh_block) {
638 if ((char *)ptr + nbytes == neigh_block) {
639 /*
640 * Absorb and delete right neighbor
641 */
642 nbytes += neigh_size;
643 delete(np);
644 } else if ((char *)ptr + nbytes > neigh_block) {
645 /*
646 * The block being freed overlaps
647 * another block in the tree. This
648 * is bad news.
649 */
650 printf("kmem_free: free block overlap %p+%lx"
651 " over %p\n", (void *)ptr, nbytes,
652 (void *)neigh_block);
653 prom_panic("kmem_free: free block overlap");
654 } else {
655 /*
656 * Search to the left
657 */
658 np = &neighbor->left;
659 }
660 } else if ((char *)ptr > neigh_block) {
661 if (neigh_block + neigh_size == ptr) {
662 /*
663 * Absorb and delete left neighbor
664 */
665 ptr = neigh_block;
666 nbytes += neigh_size;
667 delete(np);
668 } else if (neigh_block + neigh_size > (char *)ptr) {
669 /*
670 * This block has already been freed
671 */
672 prom_panic("kmem_free block already free");
673 } else {
674 /*
675 * search to the right
676 */
677 np = &neighbor->right;
678 }
679 } else {
680 /*
681 * This block has already been freed
682 * as "ptr == neigh_block"
683 */
684 prom_panic("kmem_free: block already free as neighbor");
685 return;
686 } /* else */
687 neighbor = *np;
688 } /* while */
689
690 /*
691 * Insert the new node into the free space tree
692 */
693 insert((Dblk) ptr, nbytes, &kmem_info.free_root);
694 #ifdef DEBUG
695 printf("exiting kmem_free\n");
696 prtree(kmem_info.free_root, "kmem_free");
697 #endif
698 splx(s);
699 } /* kmem_free */
700
701 /*
702 * Sigh. We include a header file which the kernel
703 * uses to declare (one of its many) kmem_free prototypes.
704 * In order not to use the kernel's namespace, then, we must
705 * define another name here for use by boot.
706 */
707 void *
bkmem_alloc(size_t size)708 bkmem_alloc(size_t size)
709 {
710 return (kmem_alloc(size, 0));
711 }
712
713 /*
714 * Boot's kmem_alloc is really kmem_zalloc().
715 */
716 void *
bkmem_zalloc(size_t size)717 bkmem_zalloc(size_t size)
718 {
719 return (kmem_alloc(size, 0));
720 }
721
722 void
bkmem_free(void * p,size_t bytes)723 bkmem_free(void *p, size_t bytes)
724 {
725 kmem_free(p, bytes);
726 }
727
728 static void
check_need_to_free(void)729 check_need_to_free(void)
730 {
731 int i;
732 struct need_to_free *ntf;
733 caddr_t addr;
734 size_t nbytes;
735 int s;
736
737 again:
738 s = splimp();
739 ntf = &kmem_info.need_to_free_list;
740 if (ntf->addr) {
741 addr = ntf->addr;
742 nbytes = ntf->nbytes;
743 *ntf = *(struct need_to_free *)ntf->addr;
744 splx(s);
745 kmem_free(addr, nbytes);
746 goto again;
747 }
748 ntf = kmem_info.need_to_free;
749 for (i = 0; i < NEED_TO_FREE_SIZE; i++) {
750 if (ntf[i].addr) {
751 addr = ntf[i].addr;
752 nbytes = ntf[i].nbytes;
753 ntf[i].addr = 0;
754 splx(s);
755 kmem_free(addr, nbytes);
756 goto again;
757 }
758 }
759 splx(s);
760 }
761
762 /*
763 * Add a block of at least nbytes to the free space tree.
764 *
765 * return value:
766 * true if at least nbytes can be allocated
767 * false otherwise
768 *
769 * remark:
770 * free space (delimited by the static variable ubound) is
771 * extended by an amount determined by rounding nbytes up to
772 * a multiple of the system page size.
773 */
774
775 static bool
morecore(size_t nbytes)776 morecore(size_t nbytes)
777 {
778 #ifdef __sparc
779 enum RESOURCES type = RES_BOOTSCRATCH_NOFAIL;
780 #else
781 enum RESOURCES type = RES_BOOTSCRATCH;
782 #endif
783 Dblk p;
784 #ifdef DEBUG
785 printf("morecore(nbytes 0x%lx)\n", nbytes);
786 #endif /* DEBUG */
787
788
789 nbytes = roundup(nbytes, PAGESIZE);
790 p = (Dblk) resalloc(type, nbytes, (caddr_t)0, 0);
791 if (p == 0) {
792 return (false);
793 }
794 kmem_free((caddr_t)p, nbytes);
795 #ifdef DEBUG
796 printf("morecore() returing, p = %p\n", p);
797 #endif
798 return (true);
799
800 } /* morecore */
801
802 /*
803 * Get a free block header
804 * There is a list of available free block headers.
805 * When the list is empty, allocate another pagefull.
806 */
807 Freehdr
getfreehdr(void)808 getfreehdr(void)
809 {
810 Freehdr r;
811 int n = 0;
812 #ifdef DEBUG
813 printf("getfreehdr()\n");
814 #endif /* DEBUG */
815
816 if (kmem_info.free_hdr_list != NIL) {
817 r = kmem_info.free_hdr_list;
818 kmem_info.free_hdr_list = kmem_info.free_hdr_list->left;
819 } else {
820 r = (Freehdr)resalloc(RES_BOOTSCRATCH, PAGESIZE, (caddr_t)0, 0);
821 if (r == 0) {
822 prom_panic("getfreehdr");
823 }
824 for (n = 1; n < PAGESIZE / sizeof (*r); n++) {
825 freehdr(&r[n]);
826 }
827 }
828 #ifdef DEBUG
829 printf("getfreehdr: freed %x headers\n", n);
830 printf("getfreehdr: returning %p\n", r);
831 #endif /* DEBUG */
832 return (r);
833 }
834
835 /*
836 * Free a free block header
837 * Add it to the list of available headers.
838 */
839
840 void
freehdr(Freehdr p)841 freehdr(Freehdr p)
842 {
843 #ifdef DEBUG
844 printf("freehdr(%p)\n", p);
845 #endif /* DEBUG */
846 p->left = kmem_info.free_hdr_list;
847 p->right = NIL;
848 p->block = NULL;
849 kmem_info.free_hdr_list = p;
850 }
851
852 #ifdef DEBUG
853 /*
854 * Diagnostic routines
855 */
856 static int depth = 0;
857
858 static void
prtree(Freehdr p,char * cp)859 prtree(Freehdr p, char *cp)
860 {
861 int n;
862 if (depth == 0) {
863 printf("prtree(p %p cp %s)\n", p, cp);
864 }
865 if (p != NIL) {
866 depth++;
867 prtree(p->left, (char *)NULL);
868 depth--;
869
870 for (n = 0; n < depth; n++) {
871 printf(" ");
872 }
873 printf(
874 "(%p): (left = %p, right = %p, block = %p, size = %lx)\n",
875 p, p->left, p->right, p->block, p->size);
876
877 depth++;
878 prtree(p->right, (char *)NULL);
879 depth--;
880 }
881 }
882 #endif /* DEBUG */
883