xref: /freebsd/sys/kern/kern_malloc.c (revision 7660b554bc59a07be0431c17e0e33815818baa69)
1 /*
2  * Copyright (c) 1987, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by the University of
16  *	California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	@(#)kern_malloc.c	8.3 (Berkeley) 1/4/94
34  */
35 
36 #include <sys/cdefs.h>
37 __FBSDID("$FreeBSD$");
38 
39 #include "opt_vm.h"
40 
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/kernel.h>
44 #include <sys/lock.h>
45 #include <sys/malloc.h>
46 #include <sys/mbuf.h>
47 #include <sys/mutex.h>
48 #include <sys/vmmeter.h>
49 #include <sys/proc.h>
50 #include <sys/sysctl.h>
51 #include <sys/time.h>
52 
53 #include <vm/vm.h>
54 #include <vm/pmap.h>
55 #include <vm/vm_param.h>
56 #include <vm/vm_kern.h>
57 #include <vm/vm_extern.h>
58 #include <vm/vm_map.h>
59 #include <vm/vm_page.h>
60 #include <vm/uma.h>
61 #include <vm/uma_int.h>
62 #include <vm/uma_dbg.h>
63 
64 #if defined(INVARIANTS) && defined(__i386__)
65 #include <machine/cpu.h>
66 #endif
67 
68 /*
69  * When realloc() is called, if the new size is sufficiently smaller than
70  * the old size, realloc() will allocate a new, smaller block to avoid
71  * wasting memory. 'Sufficiently smaller' is defined as: newsize <=
72  * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'.
73  */
74 #ifndef REALLOC_FRACTION
75 #define	REALLOC_FRACTION	1	/* new block if <= half the size */
76 #endif
77 
78 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
79 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
80 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
81 
82 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
83 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
84 
85 static void kmeminit(void *);
86 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL)
87 
88 static MALLOC_DEFINE(M_FREE, "free", "should be on free list");
89 
90 static struct malloc_type *kmemstatistics;
91 static char *kmembase;
92 static char *kmemlimit;
93 
94 #define KMEM_ZSHIFT	4
95 #define KMEM_ZBASE	16
96 #define KMEM_ZMASK	(KMEM_ZBASE - 1)
97 
98 #define KMEM_ZMAX	PAGE_SIZE
99 #define KMEM_ZSIZE	(KMEM_ZMAX >> KMEM_ZSHIFT)
100 static u_int8_t kmemsize[KMEM_ZSIZE + 1];
101 
102 /* These won't be powers of two for long */
103 struct {
104 	int kz_size;
105 	char *kz_name;
106 	uma_zone_t kz_zone;
107 } kmemzones[] = {
108 	{16, "16", NULL},
109 	{32, "32", NULL},
110 	{64, "64", NULL},
111 	{128, "128", NULL},
112 	{256, "256", NULL},
113 	{512, "512", NULL},
114 	{1024, "1024", NULL},
115 	{2048, "2048", NULL},
116 	{4096, "4096", NULL},
117 #if PAGE_SIZE > 4096
118 	{8192, "8192", NULL},
119 #if PAGE_SIZE > 8192
120 	{16384, "16384", NULL},
121 #if PAGE_SIZE > 16384
122 	{32768, "32768", NULL},
123 #if PAGE_SIZE > 32768
124 	{65536, "65536", NULL},
125 #if PAGE_SIZE > 65536
126 #error	"Unsupported PAGE_SIZE"
127 #endif	/* 65536 */
128 #endif	/* 32768 */
129 #endif	/* 16384 */
130 #endif	/* 8192 */
131 #endif	/* 4096 */
132 	{0, NULL},
133 };
134 
135 u_int vm_kmem_size;
136 
137 /*
138  * The malloc_mtx protects the kmemstatistics linked list.
139  */
140 
141 struct mtx malloc_mtx;
142 
143 #ifdef MALLOC_PROFILE
144 uint64_t krequests[KMEM_ZSIZE + 1];
145 
146 static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS);
147 #endif
148 
149 static int sysctl_kern_malloc(SYSCTL_HANDLER_ARGS);
150 
151 /* time_uptime of last malloc(9) failure */
152 static time_t t_malloc_fail;
153 
154 #ifdef MALLOC_MAKE_FAILURES
155 /*
156  * Causes malloc failures every (n) mallocs with M_NOWAIT.  If set to 0,
157  * doesn't cause failures.
158  */
159 SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD, 0,
160     "Kernel malloc debugging options");
161 
162 static int malloc_failure_rate;
163 static int malloc_nowait_count;
164 static int malloc_failure_count;
165 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RW,
166     &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail");
167 TUNABLE_INT("debug.malloc.failure_rate", &malloc_failure_rate);
168 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD,
169     &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures");
170 #endif
171 
172 int
173 malloc_last_fail(void)
174 {
175 
176 	return (time_uptime - t_malloc_fail);
177 }
178 
179 /*
180  *	malloc:
181  *
182  *	Allocate a block of memory.
183  *
184  *	If M_NOWAIT is set, this routine will not block and return NULL if
185  *	the allocation fails.
186  */
187 void *
188 malloc(size, type, flags)
189 	unsigned long size;
190 	struct malloc_type *type;
191 	int flags;
192 {
193 	int indx;
194 	caddr_t va;
195 	uma_zone_t zone;
196 #ifdef DIAGNOSTIC
197 	unsigned long osize = size;
198 #endif
199 	register struct malloc_type *ksp = type;
200 
201 #ifdef INVARIANTS
202 	/*
203 	 * To make sure that WAITOK or NOWAIT is set, but not more than
204 	 * one, and check against the API botches that are common.
205 	 */
206 	indx = flags & (M_WAITOK | M_NOWAIT | M_DONTWAIT | M_TRYWAIT);
207 	if (indx != M_NOWAIT && indx != M_WAITOK) {
208 		static	struct timeval lasterr;
209 		static	int curerr, once;
210 		if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) {
211 			printf("Bad malloc flags: %x\n", indx);
212 			backtrace();
213 			flags |= M_WAITOK;
214 			once++;
215 		}
216 	}
217 #endif
218 #if 0
219 	if (size == 0)
220 		Debugger("zero size malloc");
221 #endif
222 #ifdef MALLOC_MAKE_FAILURES
223 	if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) {
224 		atomic_add_int(&malloc_nowait_count, 1);
225 		if ((malloc_nowait_count % malloc_failure_rate) == 0) {
226 			atomic_add_int(&malloc_failure_count, 1);
227 			t_malloc_fail = time_uptime;
228 			return (NULL);
229 		}
230 	}
231 #endif
232 	if (flags & M_WAITOK)
233 		KASSERT(curthread->td_intr_nesting_level == 0,
234 		   ("malloc(M_WAITOK) in interrupt context"));
235 	if (size <= KMEM_ZMAX) {
236 		if (size & KMEM_ZMASK)
237 			size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
238 		indx = kmemsize[size >> KMEM_ZSHIFT];
239 		zone = kmemzones[indx].kz_zone;
240 #ifdef MALLOC_PROFILE
241 		krequests[size >> KMEM_ZSHIFT]++;
242 #endif
243 		va = uma_zalloc(zone, flags);
244 		mtx_lock(&ksp->ks_mtx);
245 		if (va == NULL)
246 			goto out;
247 
248 		ksp->ks_size |= 1 << indx;
249 		size = zone->uz_size;
250 	} else {
251 		size = roundup(size, PAGE_SIZE);
252 		zone = NULL;
253 		va = uma_large_malloc(size, flags);
254 		mtx_lock(&ksp->ks_mtx);
255 		if (va == NULL)
256 			goto out;
257 	}
258 	ksp->ks_memuse += size;
259 	ksp->ks_inuse++;
260 out:
261 	ksp->ks_calls++;
262 	if (ksp->ks_memuse > ksp->ks_maxused)
263 		ksp->ks_maxused = ksp->ks_memuse;
264 
265 	mtx_unlock(&ksp->ks_mtx);
266 	if (flags & M_WAITOK)
267 		KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL"));
268 	else if (va == NULL)
269 		t_malloc_fail = time_uptime;
270 #ifdef DIAGNOSTIC
271 	if (va != NULL && !(flags & M_ZERO)) {
272 		memset(va, 0x70, osize);
273 	}
274 #endif
275 	return ((void *) va);
276 }
277 
278 /*
279  *	free:
280  *
281  *	Free a block of memory allocated by malloc.
282  *
283  *	This routine may not block.
284  */
285 void
286 free(addr, type)
287 	void *addr;
288 	struct malloc_type *type;
289 {
290 	register struct malloc_type *ksp = type;
291 	uma_slab_t slab;
292 	u_long size;
293 
294 	/* free(NULL, ...) does nothing */
295 	if (addr == NULL)
296 		return;
297 
298 	KASSERT(ksp->ks_memuse > 0,
299 		("malloc(9)/free(9) confusion.\n%s",
300 		 "Probably freeing with wrong type, but maybe not here."));
301 	size = 0;
302 
303 	slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK));
304 
305 	if (slab == NULL)
306 		panic("free: address %p(%p) has not been allocated.\n",
307 		    addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
308 
309 
310 	if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
311 #ifdef INVARIANTS
312 		struct malloc_type **mtp = addr;
313 #endif
314 		size = slab->us_zone->uz_size;
315 #ifdef INVARIANTS
316 		/*
317 		 * Cache a pointer to the malloc_type that most recently freed
318 		 * this memory here.  This way we know who is most likely to
319 		 * have stepped on it later.
320 		 *
321 		 * This code assumes that size is a multiple of 8 bytes for
322 		 * 64 bit machines
323 		 */
324 		mtp = (struct malloc_type **)
325 		    ((unsigned long)mtp & ~UMA_ALIGN_PTR);
326 		mtp += (size - sizeof(struct malloc_type *)) /
327 		    sizeof(struct malloc_type *);
328 		*mtp = type;
329 #endif
330 		uma_zfree_arg(slab->us_zone, addr, slab);
331 	} else {
332 		size = slab->us_size;
333 		uma_large_free(slab);
334 	}
335 	mtx_lock(&ksp->ks_mtx);
336 	KASSERT(size <= ksp->ks_memuse,
337 		("malloc(9)/free(9) confusion.\n%s",
338 		 "Probably freeing with wrong type, but maybe not here."));
339 	ksp->ks_memuse -= size;
340 	ksp->ks_inuse--;
341 	mtx_unlock(&ksp->ks_mtx);
342 }
343 
344 /*
345  *	realloc: change the size of a memory block
346  */
347 void *
348 realloc(addr, size, type, flags)
349 	void *addr;
350 	unsigned long size;
351 	struct malloc_type *type;
352 	int flags;
353 {
354 	uma_slab_t slab;
355 	unsigned long alloc;
356 	void *newaddr;
357 
358 	/* realloc(NULL, ...) is equivalent to malloc(...) */
359 	if (addr == NULL)
360 		return (malloc(size, type, flags));
361 
362 	slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK));
363 
364 	/* Sanity check */
365 	KASSERT(slab != NULL,
366 	    ("realloc: address %p out of range", (void *)addr));
367 
368 	/* Get the size of the original block */
369 	if (slab->us_zone)
370 		alloc = slab->us_zone->uz_size;
371 	else
372 		alloc = slab->us_size;
373 
374 	/* Reuse the original block if appropriate */
375 	if (size <= alloc
376 	    && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE))
377 		return (addr);
378 
379 	/* Allocate a new, bigger (or smaller) block */
380 	if ((newaddr = malloc(size, type, flags)) == NULL)
381 		return (NULL);
382 
383 	/* Copy over original contents */
384 	bcopy(addr, newaddr, min(size, alloc));
385 	free(addr, type);
386 	return (newaddr);
387 }
388 
389 /*
390  *	reallocf: same as realloc() but free memory on failure.
391  */
392 void *
393 reallocf(addr, size, type, flags)
394 	void *addr;
395 	unsigned long size;
396 	struct malloc_type *type;
397 	int flags;
398 {
399 	void *mem;
400 
401 	if ((mem = realloc(addr, size, type, flags)) == NULL)
402 		free(addr, type);
403 	return (mem);
404 }
405 
406 /*
407  * Initialize the kernel memory allocator
408  */
409 /* ARGSUSED*/
410 static void
411 kmeminit(dummy)
412 	void *dummy;
413 {
414 	u_int8_t indx;
415 	u_long npg;
416 	u_long mem_size;
417 	int i;
418 
419 	mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
420 
421 	/*
422 	 * Try to auto-tune the kernel memory size, so that it is
423 	 * more applicable for a wider range of machine sizes.
424 	 * On an X86, a VM_KMEM_SIZE_SCALE value of 4 is good, while
425 	 * a VM_KMEM_SIZE of 12MB is a fair compromise.  The
426 	 * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space
427 	 * available, and on an X86 with a total KVA space of 256MB,
428 	 * try to keep VM_KMEM_SIZE_MAX at 80MB or below.
429 	 *
430 	 * Note that the kmem_map is also used by the zone allocator,
431 	 * so make sure that there is enough space.
432 	 */
433 	vm_kmem_size = VM_KMEM_SIZE;
434 	mem_size = cnt.v_page_count;
435 
436 #if defined(VM_KMEM_SIZE_SCALE)
437 	if ((mem_size / VM_KMEM_SIZE_SCALE) > (vm_kmem_size / PAGE_SIZE))
438 		vm_kmem_size = (mem_size / VM_KMEM_SIZE_SCALE) * PAGE_SIZE;
439 #endif
440 
441 #if defined(VM_KMEM_SIZE_MAX)
442 	if (vm_kmem_size >= VM_KMEM_SIZE_MAX)
443 		vm_kmem_size = VM_KMEM_SIZE_MAX;
444 #endif
445 
446 	/* Allow final override from the kernel environment */
447 	TUNABLE_INT_FETCH("kern.vm.kmem.size", &vm_kmem_size);
448 
449 	/*
450 	 * Limit kmem virtual size to twice the physical memory.
451 	 * This allows for kmem map sparseness, but limits the size
452 	 * to something sane. Be careful to not overflow the 32bit
453 	 * ints while doing the check.
454 	 */
455 	if (((vm_kmem_size / 2) / PAGE_SIZE) > cnt.v_page_count)
456 		vm_kmem_size = 2 * cnt.v_page_count * PAGE_SIZE;
457 
458 	/*
459 	 * Tune settings based on the kernel map's size at this time.
460 	 */
461 	init_param3(vm_kmem_size / PAGE_SIZE);
462 
463 	/*
464 	 * In mbuf_init(), we set up submaps for mbufs and clusters, in which
465 	 * case we rounddown() (nmbufs * MSIZE) and (nmbclusters * MCLBYTES),
466 	 * respectively. Mathematically, this means that what we do here may
467 	 * amount to slightly more address space than we need for the submaps,
468 	 * but it never hurts to have an extra page in kmem_map.
469 	 */
470 	npg = (nmbufs*MSIZE + nmbclusters*MCLBYTES + vm_kmem_size) / PAGE_SIZE;
471 
472 	kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
473 		(vm_offset_t *)&kmemlimit, (vm_size_t)(npg * PAGE_SIZE));
474 	kmem_map->system_map = 1;
475 
476 	uma_startup2();
477 
478 	for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
479 		int size = kmemzones[indx].kz_size;
480 		char *name = kmemzones[indx].kz_name;
481 
482 		kmemzones[indx].kz_zone = uma_zcreate(name, size,
483 #ifdef INVARIANTS
484 		    mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
485 #else
486 		    NULL, NULL, NULL, NULL,
487 #endif
488 		    UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
489 
490 		for (;i <= size; i+= KMEM_ZBASE)
491 			kmemsize[i >> KMEM_ZSHIFT] = indx;
492 
493 	}
494 }
495 
496 void
497 malloc_init(data)
498 	void *data;
499 {
500 	struct malloc_type *type = (struct malloc_type *)data;
501 
502 	mtx_lock(&malloc_mtx);
503 	if (type->ks_magic != M_MAGIC)
504 		panic("malloc type lacks magic");
505 
506 	if (cnt.v_page_count == 0)
507 		panic("malloc_init not allowed before vm init");
508 
509 	if (type->ks_next != NULL)
510 		return;
511 
512 	type->ks_next = kmemstatistics;
513 	kmemstatistics = type;
514 	mtx_init(&type->ks_mtx, type->ks_shortdesc, "Malloc Stats", MTX_DEF);
515 	mtx_unlock(&malloc_mtx);
516 }
517 
518 void
519 malloc_uninit(data)
520 	void *data;
521 {
522 	struct malloc_type *type = (struct malloc_type *)data;
523 	struct malloc_type *t;
524 
525 	mtx_lock(&malloc_mtx);
526 	mtx_lock(&type->ks_mtx);
527 	if (type->ks_magic != M_MAGIC)
528 		panic("malloc type lacks magic");
529 
530 	if (cnt.v_page_count == 0)
531 		panic("malloc_uninit not allowed before vm init");
532 
533 	if (type == kmemstatistics)
534 		kmemstatistics = type->ks_next;
535 	else {
536 		for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) {
537 			if (t->ks_next == type) {
538 				t->ks_next = type->ks_next;
539 				break;
540 			}
541 		}
542 	}
543 	type->ks_next = NULL;
544 	mtx_destroy(&type->ks_mtx);
545 	mtx_unlock(&malloc_mtx);
546 }
547 
548 static int
549 sysctl_kern_malloc(SYSCTL_HANDLER_ARGS)
550 {
551 	struct malloc_type *type;
552 	int linesize = 128;
553 	int curline;
554 	int bufsize;
555 	int first;
556 	int error;
557 	char *buf;
558 	char *p;
559 	int cnt;
560 	int len;
561 	int i;
562 
563 	cnt = 0;
564 
565 	mtx_lock(&malloc_mtx);
566 	for (type = kmemstatistics; type != NULL; type = type->ks_next)
567 		cnt++;
568 
569 	mtx_unlock(&malloc_mtx);
570 	bufsize = linesize * (cnt + 1);
571 	p = buf = (char *)malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO);
572 	mtx_lock(&malloc_mtx);
573 
574 	len = snprintf(p, linesize,
575 	    "\n        Type  InUse MemUse HighUse Requests  Size(s)\n");
576 	p += len;
577 
578 	for (type = kmemstatistics; cnt != 0 && type != NULL;
579 	    type = type->ks_next, cnt--) {
580 		if (type->ks_calls == 0)
581 			continue;
582 
583 		curline = linesize - 2;	/* Leave room for the \n */
584 		len = snprintf(p, curline, "%13s%6lu%6luK%7luK%9llu",
585 			type->ks_shortdesc,
586 			type->ks_inuse,
587 			(type->ks_memuse + 1023) / 1024,
588 			(type->ks_maxused + 1023) / 1024,
589 			(long long unsigned)type->ks_calls);
590 		curline -= len;
591 		p += len;
592 
593 		first = 1;
594 		for (i = 0; i < sizeof(kmemzones) / sizeof(kmemzones[0]) - 1;
595 		    i++) {
596 			if (type->ks_size & (1 << i)) {
597 				if (first)
598 					len = snprintf(p, curline, "  ");
599 				else
600 					len = snprintf(p, curline, ",");
601 				curline -= len;
602 				p += len;
603 
604 				len = snprintf(p, curline,
605 				    "%s", kmemzones[i].kz_name);
606 				curline -= len;
607 				p += len;
608 
609 				first = 0;
610 			}
611 		}
612 
613 		len = snprintf(p, 2, "\n");
614 		p += len;
615 	}
616 
617 	mtx_unlock(&malloc_mtx);
618 	error = SYSCTL_OUT(req, buf, p - buf);
619 
620 	free(buf, M_TEMP);
621 	return (error);
622 }
623 
624 SYSCTL_OID(_kern, OID_AUTO, malloc, CTLTYPE_STRING|CTLFLAG_RD,
625     NULL, 0, sysctl_kern_malloc, "A", "Malloc Stats");
626 
627 #ifdef MALLOC_PROFILE
628 
629 static int
630 sysctl_kern_mprof(SYSCTL_HANDLER_ARGS)
631 {
632 	int linesize = 64;
633 	uint64_t count;
634 	uint64_t waste;
635 	uint64_t mem;
636 	int bufsize;
637 	int error;
638 	char *buf;
639 	int rsize;
640 	int size;
641 	char *p;
642 	int len;
643 	int i;
644 
645 	bufsize = linesize * (KMEM_ZSIZE + 1);
646 	bufsize += 128; 	/* For the stats line */
647 	bufsize += 128; 	/* For the banner line */
648 	waste = 0;
649 	mem = 0;
650 
651 	p = buf = (char *)malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO);
652 	len = snprintf(p, bufsize,
653 	    "\n  Size                    Requests  Real Size\n");
654 	bufsize -= len;
655 	p += len;
656 
657 	for (i = 0; i < KMEM_ZSIZE; i++) {
658 		size = i << KMEM_ZSHIFT;
659 		rsize = kmemzones[kmemsize[i]].kz_size;
660 		count = (long long unsigned)krequests[i];
661 
662 		len = snprintf(p, bufsize, "%6d%28llu%11d\n",
663 		    size, (unsigned long long)count, rsize);
664 		bufsize -= len;
665 		p += len;
666 
667 		if ((rsize * count) > (size * count))
668 			waste += (rsize * count) - (size * count);
669 		mem += (rsize * count);
670 	}
671 
672 	len = snprintf(p, bufsize,
673 	    "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n",
674 	    (unsigned long long)mem, (unsigned long long)waste);
675 	p += len;
676 
677 	error = SYSCTL_OUT(req, buf, p - buf);
678 
679 	free(buf, M_TEMP);
680 	return (error);
681 }
682 
683 SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD,
684     NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling");
685 #endif /* MALLOC_PROFILE */
686