1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License, Version 1.0 only
6 * (the "License"). You may not use this file except in compliance
7 * with the License.
8 *
9 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
10 * or http://www.opensolaris.org/os/licensing.
11 * See the License for the specific language governing permissions
12 * and limitations under the License.
13 *
14 * When distributing Covered Code, include this CDDL HEADER in each
15 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
16 * If applicable, add the following below this CDDL HEADER, with the
17 * fields enclosed by brackets "[]" replaced with your own identifying
18 * information: Portions Copyright [yyyy] [name of copyright owner]
19 *
20 * CDDL HEADER END
21 */
22 /*
23 * Copyright 2005 Sun Microsystems, Inc. All rights reserved.
24 * Use is subject to license terms.
25 */
26
27 #if 1
28 #undef DEBUG
29 #endif
30
31 /* #define DEBUG ON */
32
33 /*
34 * Conditions on use:
35 * kmem_alloc and kmem_free must not be called from interrupt level,
36 * except from software interrupt level. This is because they are
37 * not reentrant, and only block out software interrupts. They take
38 * too long to block any real devices. There is a routine
39 * kmem_free_intr that can be used to free blocks at interrupt level,
40 * but only up to splimp, not higher. This is because kmem_free_intr
41 * only spl's to splimp.
42 *
43 * Also, these routines are not that fast, so they should not be used
44 * in very frequent operations (e.g. operations that happen more often
45 * than, say, once every few seconds).
46 */
47
48 /*
49 * description:
50 * Yet another memory allocator, this one based on a method
51 * described in C.J. Stephenson, "Fast Fits", IBM Sys. Journal
52 *
53 * The basic data structure is a "Cartesian" binary tree, in which
54 * nodes are ordered by ascending addresses (thus minimizing free
55 * list insertion time) and block sizes decrease with depth in the
56 * tree (thus minimizing search time for a block of a given size).
57 *
58 * In other words, for any node s, letting D(s) denote
59 * the set of descendents of s, we have:
60 *
61 * a. addr(D(left(s))) < addr(s) < addr(D(right(s)))
62 * b. len(D(left(s))) <= len(s) >= len(D(right(s)))
63 */
64
65 #include <sys/param.h>
66 #include <sys/sysmacros.h>
67 #include <sys/salib.h>
68 #include <sys/saio.h>
69 #include <sys/promif.h>
70
71 /*
72 * The node header structure.
73 *
74 * To reduce storage consumption, a header block is associated with
75 * free blocks only, not allocated blocks.
76 * When a free block is allocated, its header block is put on
77 * a free header block list.
78 *
79 * This creates a header space and a free block space.
80 * The left pointer of a header blocks is used to chain free header
81 * blocks together.
82 */
83
84 typedef enum {false, true} bool;
85 typedef struct freehdr *Freehdr;
86 typedef struct dblk *Dblk;
87
88 /*
89 * Description of a header for a free block
90 * Only free blocks have such headers.
91 */
92 struct freehdr {
93 Freehdr left; /* Left tree pointer */
94 Freehdr right; /* Right tree pointer */
95 Dblk block; /* Ptr to the data block */
96 size_t size; /* Size of the data block */
97 };
98
99 #define NIL ((Freehdr) 0)
100 #define WORDSIZE sizeof (int)
101 #define SMALLEST_BLK 1 /* Size of smallest block */
102
103 /*
104 * Description of a data block.
105 */
106 struct dblk {
107 char data[1]; /* Addr returned to the caller */
108 };
109
110 /*
111 * weight(x) is the size of a block, in bytes; or 0 if and only if x
112 * is a null pointer. It is the responsibility of kmem_alloc() and
113 * kmem_free() to keep zero-length blocks out of the arena.
114 */
115
116 #define weight(x) ((x) == NIL? 0: (x->size))
117 #define nextblk(p, size) ((Dblk) ((char *)(p) + (size)))
118 #define max(a, b) ((a) < (b)? (b): (a))
119
120 void *kmem_alloc(size_t, int);
121 void kmem_free(void *ptr, size_t nbytes);
122 Freehdr getfreehdr(void);
123 static bool morecore(size_t);
124 void insert(Dblk p, size_t len, Freehdr *tree);
125 void freehdr(Freehdr p);
126 void delete(Freehdr *p);
127 static void check_need_to_free(void);
128 extern caddr_t resalloc(enum RESOURCES, size_t, caddr_t, int);
129 #ifdef __sparc
130 extern void resalloc_init(void);
131 #endif
132 extern int splnet(void);
133 extern int splimp(void);
134 extern void splx(int);
135
136 /*
137 * Structure containing various info about allocated memory.
138 */
139 #define NEED_TO_FREE_SIZE 5
140 struct kmem_info {
141 Freehdr free_root;
142 Freehdr free_hdr_list;
143 struct map *map;
144 struct pte *pte;
145 caddr_t vaddr;
146 struct need_to_free {
147 caddr_t addr;
148 size_t nbytes;
149 } need_to_free_list, need_to_free[NEED_TO_FREE_SIZE];
150 } kmem_info;
151
152
153 struct map *kernelmap;
154
155 #ifdef DEBUG
156 static void prtree(Freehdr, char *);
157 #endif
158
159 /*
160 * Initialize kernel memory allocator
161 */
162
163 void
kmem_init(void)164 kmem_init(void)
165 {
166 int i;
167 struct need_to_free *ntf;
168
169 #ifdef DEBUG
170 printf("kmem_init entered\n");
171 #endif
172
173 #ifdef __sparc
174 resalloc_init();
175 #endif
176
177 kmem_info.free_root = NIL;
178 kmem_info.free_hdr_list = NULL;
179 kmem_info.map = kernelmap;
180 kmem_info.need_to_free_list.addr = 0;
181 ntf = kmem_info.need_to_free;
182 for (i = 0; i < NEED_TO_FREE_SIZE; i++) {
183 ntf[i].addr = 0;
184 }
185 #ifdef DEBUG
186 printf("kmem_init returning\n");
187 prtree(kmem_info.free_root, "kmem_init");
188 #endif
189 }
190
191 /*
192 * Insert a new node in a cartesian tree or subtree, placing it
193 * in the correct position with respect to the existing nodes.
194 *
195 * algorithm:
196 * Starting from the root, a binary search is made for the new
197 * node. If this search were allowed to continue, it would
198 * eventually fail (since there cannot already be a node at the
199 * given address); but in fact it stops when it reaches a node in
200 * the tree which has a length less than that of the new node (or
201 * when it reaches a null tree pointer). The new node is then
202 * inserted at the root of the subtree for which the shorter node
203 * forms the old root (or in place of the null pointer).
204 */
205
206
207 void
insert(Dblk p,size_t len,Freehdr * tree)208 insert(Dblk p, /* Ptr to the block to insert */
209 size_t len, /* Length of new node */
210 Freehdr *tree) /* Address of ptr to root */
211 {
212 Freehdr x;
213 Freehdr *left_hook; /* Temp for insertion */
214 Freehdr *right_hook; /* Temp for insertion */
215 Freehdr newhdr;
216
217 x = *tree;
218 /*
219 * Search for the first node which has a weight less
220 * than that of the new node; this will be the
221 * point at which we insert the new node.
222 */
223
224 while (weight(x) >= len) {
225 if (p < x->block)
226 tree = &x->left;
227 else
228 tree = &x->right;
229 x = *tree;
230 }
231
232 /*
233 * Perform root insertion. The variable x traces a path through
234 * the tree, and with the help of left_hook and right_hook,
235 * rewrites all links that cross the territory occupied
236 * by p. Note that this improves performance under
237 * paging.
238 */
239
240 newhdr = getfreehdr();
241 *tree = newhdr;
242 left_hook = &newhdr->left;
243 right_hook = &newhdr->right;
244
245 newhdr->left = NIL;
246 newhdr->right = NIL;
247 newhdr->block = p;
248 newhdr->size = len;
249
250 while (x != NIL) {
251 /*
252 * Remark:
253 * The name 'left_hook' is somewhat confusing, since
254 * it is always set to the address of a .right link
255 * field. However, its value is always an address
256 * below (i.e., to the left of) p. Similarly
257 * for right_hook. The values of left_hook and
258 * right_hook converge toward the value of p,
259 * as in a classical binary search.
260 */
261 if (x->block < p) {
262 /*
263 * rewrite link crossing from the left
264 */
265 *left_hook = x;
266 left_hook = &x->right;
267 x = x->right;
268 } else {
269 /*
270 * rewrite link crossing from the right
271 */
272 *right_hook = x;
273 right_hook = &x->left;
274 x = x->left;
275 } /* else */
276 } /* while */
277
278 *left_hook = *right_hook = NIL; /* clear remaining hooks */
279
280 } /* insert */
281
282
283 /*
284 * Delete a node from a cartesian tree. p is the address of
285 * a pointer to the node which is to be deleted.
286 *
287 * algorithm:
288 * The left and right sons of the node to be deleted define two
289 * subtrees which are to be merged and attached in place of the
290 * deleted node. Each node on the inside edges of these two
291 * subtrees is examined and longer nodes are placed above the
292 * shorter ones.
293 *
294 * On entry:
295 * *p is assumed to be non-null.
296 */
297
298 void
delete(Freehdr * p)299 delete(Freehdr *p)
300 {
301 Freehdr x;
302 Freehdr left_branch; /* left subtree of deleted node */
303 Freehdr right_branch; /* right subtree of deleted node */
304
305 x = *p;
306 left_branch = x->left;
307 right_branch = x->right;
308
309 while (left_branch != right_branch) {
310 /*
311 * iterate until left branch and right branch are
312 * both NIL.
313 */
314 if (weight(left_branch) >= weight(right_branch)) {
315 /*
316 * promote the left branch
317 */
318 *p = left_branch;
319 p = &left_branch->right;
320 left_branch = left_branch->right;
321 } else {
322 /*
323 * promote the right branch
324 */
325 *p = right_branch;
326 p = &right_branch->left;
327 right_branch = right_branch->left;
328 } /* else */
329 } /* while */
330 *p = NIL;
331 freehdr(x);
332 } /* delete */
333
334
335 /*
336 * Demote a node in a cartesian tree, if necessary, to establish
337 * the required vertical ordering.
338 *
339 * algorithm:
340 * The left and right subtrees of the node to be demoted are to
341 * be partially merged and attached in place of the demoted node.
342 * The nodes on the inside edges of these two subtrees are
343 * examined and the longer nodes are placed above the shorter
344 * ones, until a node is reached which has a length no greater
345 * than that of the node being demoted (or until a null pointer
346 * is reached). The node is then attached at this point, and
347 * the remaining subtrees (if any) become its descendants.
348 *
349 * on entry:
350 * a. All the nodes in the tree, including the one to be demoted,
351 * must be correctly ordered horizontally;
352 * b. All the nodes except the one to be demoted must also be
353 * correctly positioned vertically. The node to be demoted
354 * may be already correctly positioned vertically, or it may
355 * have a length which is less than that of one or both of
356 * its progeny.
357 * c. *p is non-null
358 */
359
360
361 static void
demote(Freehdr * p)362 demote(Freehdr *p)
363 {
364 Freehdr x; /* addr of node to be demoted */
365 Freehdr left_branch;
366 Freehdr right_branch;
367 size_t wx;
368
369 x = *p;
370 left_branch = x->left;
371 right_branch = x->right;
372 wx = weight(x);
373
374 while (weight(left_branch) > wx || weight(right_branch) > wx) {
375 /*
376 * select a descendant branch for promotion
377 */
378 if (weight(left_branch) >= weight(right_branch)) {
379 /*
380 * promote the left branch
381 */
382 *p = left_branch;
383 p = &left_branch->right;
384 left_branch = *p;
385 } else {
386 /*
387 * promote the right branch
388 */
389 *p = right_branch;
390 p = &right_branch->left;
391 right_branch = *p;
392 } /* else */
393 } /* while */
394
395 *p = x; /* attach demoted node here */
396 x->left = left_branch;
397 x->right = right_branch;
398 } /* demote */
399
400 /*
401 * Allocate a block of storage
402 *
403 * algorithm:
404 * The freelist is searched by descending the tree from the root
405 * so that at each decision point the "better fitting" child node
406 * is chosen (i.e., the shorter one, if it is long enough, or
407 * the longer one, otherwise). The descent stops when both
408 * child nodes are too short.
409 *
410 * function result:
411 * kmem_alloc returns a pointer to the allocated block; a null
412 * pointer indicates storage could not be allocated.
413 */
414 /*
415 * We need to return blocks that are on word boundaries so that callers
416 * that are putting int's into the area will work. Since we allow
417 * arbitrary free'ing, we need a weight function that considers
418 * free blocks starting on an odd boundary special. Allocation is
419 * aligned to 8 byte boundaries (ALIGN).
420 */
421 #define ALIGN 8 /* doubleword aligned .. */
422 #define ALIGNMASK (ALIGN-1)
423 #define ALIGNMORE(addr) (ALIGN - ((uintptr_t)(addr) & ALIGNMASK))
424
425 /*
426 * If it is empty then weight == 0
427 * If it is aligned then weight == size
428 * If it is unaligned
429 * if not enough room to align then weight == 0
430 * else weight == aligned size
431 */
432 #define mweight(x) ((x) == NIL ? 0 : \
433 ((((uintptr_t)(x)->block) & ALIGNMASK) == 0 ? (x)->size : \
434 (((x)->size <= ALIGNMORE((x)->block)) ? 0 : \
435 (x)->size - ALIGNMORE((x)->block))))
436
437 /*ARGSUSED1*/
438 void *
kmem_alloc(size_t nbytes,int kmflag)439 kmem_alloc(size_t nbytes, int kmflag)
440 {
441 Freehdr a; /* ptr to node to be allocated */
442 Freehdr *p; /* address of ptr to node */
443 size_t left_weight;
444 size_t right_weight;
445 Freehdr left_son;
446 Freehdr right_son;
447 char *retblock; /* Address returned to the user */
448 int s;
449 #ifdef DEBUG
450 printf("kmem_alloc(nbytes 0x%lx)\n", nbytes);
451 #endif /* DEBUG */
452
453 if (nbytes == 0) {
454 return (NULL);
455 }
456 s = splnet();
457
458 if (nbytes < SMALLEST_BLK) {
459 printf("illegal kmem_alloc call for %lx bytes\n", nbytes);
460 prom_panic("kmem_alloc");
461 }
462 check_need_to_free();
463
464 /*
465 * ensure that at least one block is big enough to satisfy
466 * the request.
467 */
468
469 if (mweight(kmem_info.free_root) <= nbytes) {
470 /*
471 * the largest block is not enough.
472 */
473 if (!morecore(nbytes)) {
474 printf("kmem_alloc failed, nbytes %lx\n", nbytes);
475 prom_panic("kmem_alloc");
476 }
477 }
478
479 /*
480 * search down through the tree until a suitable block is
481 * found. At each decision point, select the better
482 * fitting node.
483 */
484
485 p = (Freehdr *) &kmem_info.free_root;
486 a = *p;
487 left_son = a->left;
488 right_son = a->right;
489 left_weight = mweight(left_son);
490 right_weight = mweight(right_son);
491
492 while (left_weight >= nbytes || right_weight >= nbytes) {
493 if (left_weight <= right_weight) {
494 if (left_weight >= nbytes) {
495 p = &a->left;
496 a = left_son;
497 } else {
498 p = &a->right;
499 a = right_son;
500 }
501 } else {
502 if (right_weight >= nbytes) {
503 p = &a->right;
504 a = right_son;
505 } else {
506 p = &a->left;
507 a = left_son;
508 }
509 }
510 left_son = a->left;
511 right_son = a->right;
512 left_weight = mweight(left_son);
513 right_weight = mweight(right_son);
514 } /* while */
515
516 /*
517 * allocate storage from the selected node.
518 */
519
520 if (a->size - nbytes < SMALLEST_BLK) {
521 /*
522 * not big enough to split; must leave at least
523 * a dblk's worth of space.
524 */
525 retblock = a->block->data;
526 delete(p);
527 } else {
528
529 /*
530 * split the node, allocating nbytes from the top.
531 * Remember we've already accounted for the
532 * allocated node's header space.
533 */
534 Freehdr x;
535 x = getfreehdr();
536 if ((uintptr_t)a->block->data & ALIGNMASK) {
537 size_t size;
538 if (a->size <= ALIGNMORE(a->block->data))
539 prom_panic("kmem_alloc: short block allocated");
540 size = nbytes + ALIGNMORE(a->block->data);
541 x->block = a->block;
542 x->size = ALIGNMORE(a->block->data);
543 x->left = a->left;
544 x->right = a->right;
545 /*
546 * the node pointed to by *p has become smaller;
547 * move it down to its appropriate place in
548 * the tree.
549 */
550 *p = x;
551 demote(p);
552 retblock = a->block->data + ALIGNMORE(a->block->data);
553 if (a->size > size) {
554 kmem_free((caddr_t)nextblk(a->block, size),
555 (size_t)(a->size - size));
556 }
557 freehdr(a);
558 } else {
559 x->block = nextblk(a->block, nbytes);
560 x->size = a->size - nbytes;
561 x->left = a->left;
562 x->right = a->right;
563 /*
564 * the node pointed to by *p has become smaller;
565 * move it down to its appropriate place in
566 * the tree.
567 */
568 *p = x;
569 demote(p);
570 retblock = a->block->data;
571 freehdr(a);
572 }
573 }
574 #ifdef DEBUG
575 prtree(kmem_info.free_root, "kmem_alloc");
576 #endif
577
578 splx(s);
579 bzero(retblock, nbytes);
580 #ifdef DEBUG
581 printf("kmem_alloc bzero complete - returning %p\n", retblock);
582 #endif
583 return (retblock);
584
585 } /* kmem_alloc */
586
587 /*
588 * Return a block to the free space tree.
589 *
590 * algorithm:
591 * Starting at the root, search for and coalesce free blocks
592 * adjacent to one given. When the appropriate place in the
593 * tree is found, insert the given block.
594 *
595 * Do some sanity checks to avoid total confusion in the tree.
596 * If the block has already been freed, prom_panic.
597 * If the ptr is not from the arena, prom_panic.
598 */
599 void
kmem_free(void * ptr,size_t nbytes)600 kmem_free(void *ptr, size_t nbytes)
601 {
602 Freehdr *np; /* For deletion from free list */
603 Freehdr neighbor; /* Node to be coalesced */
604 char *neigh_block; /* Ptr to potential neighbor */
605 size_t neigh_size; /* Size of potential neighbor */
606 int s;
607
608 #ifdef DEBUG
609 printf("kmem_free (ptr %p nbytes %lx)\n", ptr, nbytes);
610 prtree(kmem_info.free_root, "kmem_free");
611 #endif
612
613 #ifdef lint
614 neigh_block = bkmem_zalloc(nbytes);
615 neigh_block = neigh_block;
616 #endif
617 if (nbytes == 0) {
618 return;
619 }
620
621 if (ptr == 0) {
622 prom_panic("kmem_free of 0");
623 }
624 s = splnet();
625
626 /*
627 * Search the tree for the correct insertion point for this
628 * node, coalescing adjacent free blocks along the way.
629 */
630 np = &kmem_info.free_root;
631 neighbor = *np;
632 while (neighbor != NIL) {
633 neigh_block = (char *)neighbor->block;
634 neigh_size = neighbor->size;
635 if ((char *)ptr < neigh_block) {
636 if ((char *)ptr + nbytes == neigh_block) {
637 /*
638 * Absorb and delete right neighbor
639 */
640 nbytes += neigh_size;
641 delete(np);
642 } else if ((char *)ptr + nbytes > neigh_block) {
643 /*
644 * The block being freed overlaps
645 * another block in the tree. This
646 * is bad news.
647 */
648 printf("kmem_free: free block overlap %p+%lx"
649 " over %p\n", (void *)ptr, nbytes,
650 (void *)neigh_block);
651 prom_panic("kmem_free: free block overlap");
652 } else {
653 /*
654 * Search to the left
655 */
656 np = &neighbor->left;
657 }
658 } else if ((char *)ptr > neigh_block) {
659 if (neigh_block + neigh_size == ptr) {
660 /*
661 * Absorb and delete left neighbor
662 */
663 ptr = neigh_block;
664 nbytes += neigh_size;
665 delete(np);
666 } else if (neigh_block + neigh_size > (char *)ptr) {
667 /*
668 * This block has already been freed
669 */
670 prom_panic("kmem_free block already free");
671 } else {
672 /*
673 * search to the right
674 */
675 np = &neighbor->right;
676 }
677 } else {
678 /*
679 * This block has already been freed
680 * as "ptr == neigh_block"
681 */
682 prom_panic("kmem_free: block already free as neighbor");
683 } /* else */
684 neighbor = *np;
685 } /* while */
686
687 /*
688 * Insert the new node into the free space tree
689 */
690 insert((Dblk) ptr, nbytes, &kmem_info.free_root);
691 #ifdef DEBUG
692 printf("exiting kmem_free\n");
693 prtree(kmem_info.free_root, "kmem_free");
694 #endif
695 splx(s);
696 } /* kmem_free */
697
698 /*
699 * Sigh. We include a header file which the kernel
700 * uses to declare (one of its many) kmem_free prototypes.
701 * In order not to use the kernel's namespace, then, we must
702 * define another name here for use by boot.
703 */
704 void *
bkmem_alloc(size_t size)705 bkmem_alloc(size_t size)
706 {
707 return (kmem_alloc(size, 0));
708 }
709
710 /*
711 * Boot's kmem_alloc is really kmem_zalloc().
712 */
713 void *
bkmem_zalloc(size_t size)714 bkmem_zalloc(size_t size)
715 {
716 return (kmem_alloc(size, 0));
717 }
718
719 void
bkmem_free(void * p,size_t bytes)720 bkmem_free(void *p, size_t bytes)
721 {
722 kmem_free(p, bytes);
723 }
724
725 static void
check_need_to_free(void)726 check_need_to_free(void)
727 {
728 int i;
729 struct need_to_free *ntf;
730 caddr_t addr;
731 size_t nbytes;
732 int s;
733
734 again:
735 s = splimp();
736 ntf = &kmem_info.need_to_free_list;
737 if (ntf->addr) {
738 addr = ntf->addr;
739 nbytes = ntf->nbytes;
740 *ntf = *(struct need_to_free *)ntf->addr;
741 splx(s);
742 kmem_free(addr, nbytes);
743 goto again;
744 }
745 ntf = kmem_info.need_to_free;
746 for (i = 0; i < NEED_TO_FREE_SIZE; i++) {
747 if (ntf[i].addr) {
748 addr = ntf[i].addr;
749 nbytes = ntf[i].nbytes;
750 ntf[i].addr = 0;
751 splx(s);
752 kmem_free(addr, nbytes);
753 goto again;
754 }
755 }
756 splx(s);
757 }
758
759 /*
760 * Add a block of at least nbytes to the free space tree.
761 *
762 * return value:
763 * true if at least nbytes can be allocated
764 * false otherwise
765 *
766 * remark:
767 * free space (delimited by the static variable ubound) is
768 * extended by an amount determined by rounding nbytes up to
769 * a multiple of the system page size.
770 */
771
772 static bool
morecore(size_t nbytes)773 morecore(size_t nbytes)
774 {
775 #ifdef __sparc
776 enum RESOURCES type = RES_BOOTSCRATCH_NOFAIL;
777 #else
778 enum RESOURCES type = RES_BOOTSCRATCH;
779 #endif
780 Dblk p;
781 #ifdef DEBUG
782 printf("morecore(nbytes 0x%lx)\n", nbytes);
783 #endif /* DEBUG */
784
785
786 nbytes = roundup(nbytes, PAGESIZE);
787 p = (Dblk) resalloc(type, nbytes, (caddr_t)0, 0);
788 if (p == 0) {
789 return (false);
790 }
791 kmem_free((caddr_t)p, nbytes);
792 #ifdef DEBUG
793 printf("morecore() returing, p = %p\n", p);
794 #endif
795 return (true);
796
797 } /* morecore */
798
799 /*
800 * Get a free block header
801 * There is a list of available free block headers.
802 * When the list is empty, allocate another pagefull.
803 */
804 Freehdr
getfreehdr(void)805 getfreehdr(void)
806 {
807 Freehdr r;
808 int n = 0;
809 #ifdef DEBUG
810 printf("getfreehdr()\n");
811 #endif /* DEBUG */
812
813 if (kmem_info.free_hdr_list != NIL) {
814 r = kmem_info.free_hdr_list;
815 kmem_info.free_hdr_list = kmem_info.free_hdr_list->left;
816 } else {
817 r = (Freehdr)resalloc(RES_BOOTSCRATCH, PAGESIZE, (caddr_t)0, 0);
818 if (r == 0) {
819 prom_panic("getfreehdr");
820 }
821 for (n = 1; n < PAGESIZE / sizeof (*r); n++) {
822 freehdr(&r[n]);
823 }
824 }
825 #ifdef DEBUG
826 printf("getfreehdr: freed %x headers\n", n);
827 printf("getfreehdr: returning %p\n", r);
828 #endif /* DEBUG */
829 return (r);
830 }
831
832 /*
833 * Free a free block header
834 * Add it to the list of available headers.
835 */
836
837 void
freehdr(Freehdr p)838 freehdr(Freehdr p)
839 {
840 #ifdef DEBUG
841 printf("freehdr(%p)\n", p);
842 #endif /* DEBUG */
843 p->left = kmem_info.free_hdr_list;
844 p->right = NIL;
845 p->block = NULL;
846 kmem_info.free_hdr_list = p;
847 }
848
849 #ifdef DEBUG
850 /*
851 * Diagnostic routines
852 */
853 static int depth = 0;
854
855 static void
prtree(Freehdr p,char * cp)856 prtree(Freehdr p, char *cp)
857 {
858 int n;
859 if (depth == 0) {
860 printf("prtree(p %p cp %s)\n", p, cp);
861 }
862 if (p != NIL) {
863 depth++;
864 prtree(p->left, (char *)NULL);
865 depth--;
866
867 for (n = 0; n < depth; n++) {
868 printf(" ");
869 }
870 printf(
871 "(%p): (left = %p, right = %p, block = %p, size = %lx)\n",
872 p, p->left, p->right, p->block, p->size);
873
874 depth++;
875 prtree(p->right, (char *)NULL);
876 depth--;
877 }
878 }
879 #endif /* DEBUG */
880