xref: /freebsd/sys/kern/kern_malloc.c (revision d37ea99837e6ad50837fd9fe1771ddf1c3ba6002)
1 /*
2  * Copyright (c) 1987, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 4. Neither the name of the University nor the names of its contributors
14  *    may be used to endorse or promote products derived from this software
15  *    without specific prior written permission.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  *	@(#)kern_malloc.c	8.3 (Berkeley) 1/4/94
30  */
31 
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 
35 #include "opt_vm.h"
36 
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/kernel.h>
40 #include <sys/lock.h>
41 #include <sys/malloc.h>
42 #include <sys/mbuf.h>
43 #include <sys/mutex.h>
44 #include <sys/vmmeter.h>
45 #include <sys/proc.h>
46 #include <sys/sysctl.h>
47 #include <sys/time.h>
48 
49 #include <vm/vm.h>
50 #include <vm/pmap.h>
51 #include <vm/vm_param.h>
52 #include <vm/vm_kern.h>
53 #include <vm/vm_extern.h>
54 #include <vm/vm_map.h>
55 #include <vm/vm_page.h>
56 #include <vm/uma.h>
57 #include <vm/uma_int.h>
58 #include <vm/uma_dbg.h>
59 
60 #if defined(INVARIANTS) && defined(__i386__)
61 #include <machine/cpu.h>
62 #endif
63 
64 /*
65  * When realloc() is called, if the new size is sufficiently smaller than
66  * the old size, realloc() will allocate a new, smaller block to avoid
67  * wasting memory. 'Sufficiently smaller' is defined as: newsize <=
68  * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'.
69  */
70 #ifndef REALLOC_FRACTION
71 #define	REALLOC_FRACTION	1	/* new block if <= half the size */
72 #endif
73 
74 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
75 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
76 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
77 
78 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
79 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
80 
81 static void kmeminit(void *);
82 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL)
83 
84 static MALLOC_DEFINE(M_FREE, "free", "should be on free list");
85 
86 static struct malloc_type *kmemstatistics;
87 static char *kmembase;
88 static char *kmemlimit;
89 
90 #define KMEM_ZSHIFT	4
91 #define KMEM_ZBASE	16
92 #define KMEM_ZMASK	(KMEM_ZBASE - 1)
93 
94 #define KMEM_ZMAX	PAGE_SIZE
95 #define KMEM_ZSIZE	(KMEM_ZMAX >> KMEM_ZSHIFT)
96 static u_int8_t kmemsize[KMEM_ZSIZE + 1];
97 
98 /* These won't be powers of two for long */
99 struct {
100 	int kz_size;
101 	char *kz_name;
102 	uma_zone_t kz_zone;
103 } kmemzones[] = {
104 	{16, "16", NULL},
105 	{32, "32", NULL},
106 	{64, "64", NULL},
107 	{128, "128", NULL},
108 	{256, "256", NULL},
109 	{512, "512", NULL},
110 	{1024, "1024", NULL},
111 	{2048, "2048", NULL},
112 	{4096, "4096", NULL},
113 #if PAGE_SIZE > 4096
114 	{8192, "8192", NULL},
115 #if PAGE_SIZE > 8192
116 	{16384, "16384", NULL},
117 #if PAGE_SIZE > 16384
118 	{32768, "32768", NULL},
119 #if PAGE_SIZE > 32768
120 	{65536, "65536", NULL},
121 #if PAGE_SIZE > 65536
122 #error	"Unsupported PAGE_SIZE"
123 #endif	/* 65536 */
124 #endif	/* 32768 */
125 #endif	/* 16384 */
126 #endif	/* 8192 */
127 #endif	/* 4096 */
128 	{0, NULL},
129 };
130 
131 u_int vm_kmem_size;
132 SYSCTL_UINT(_vm, OID_AUTO, kmem_size, CTLFLAG_RD, &vm_kmem_size, 0,
133     "Size of kernel memory");
134 
135 /*
136  * The malloc_mtx protects the kmemstatistics linked list.
137  */
138 
139 struct mtx malloc_mtx;
140 
141 #ifdef MALLOC_PROFILE
142 uint64_t krequests[KMEM_ZSIZE + 1];
143 
144 static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS);
145 #endif
146 
147 static int sysctl_kern_malloc(SYSCTL_HANDLER_ARGS);
148 
149 /* time_uptime of last malloc(9) failure */
150 static time_t t_malloc_fail;
151 
152 #ifdef MALLOC_MAKE_FAILURES
153 /*
154  * Causes malloc failures every (n) mallocs with M_NOWAIT.  If set to 0,
155  * doesn't cause failures.
156  */
157 SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD, 0,
158     "Kernel malloc debugging options");
159 
160 static int malloc_failure_rate;
161 static int malloc_nowait_count;
162 static int malloc_failure_count;
163 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RW,
164     &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail");
165 TUNABLE_INT("debug.malloc.failure_rate", &malloc_failure_rate);
166 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD,
167     &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures");
168 #endif
169 
170 int
171 malloc_last_fail(void)
172 {
173 
174 	return (time_uptime - t_malloc_fail);
175 }
176 
177 /*
178  *	malloc:
179  *
180  *	Allocate a block of memory.
181  *
182  *	If M_NOWAIT is set, this routine will not block and return NULL if
183  *	the allocation fails.
184  */
185 void *
186 malloc(size, type, flags)
187 	unsigned long size;
188 	struct malloc_type *type;
189 	int flags;
190 {
191 	int indx;
192 	caddr_t va;
193 	uma_zone_t zone;
194 	uma_keg_t keg;
195 #ifdef DIAGNOSTIC
196 	unsigned long osize = size;
197 #endif
198 	register struct malloc_type *ksp = type;
199 
200 #ifdef INVARIANTS
201 	/*
202 	 * To make sure that WAITOK or NOWAIT is set, but not more than
203 	 * one, and check against the API botches that are common.
204 	 */
205 	indx = flags & (M_WAITOK | M_NOWAIT | M_DONTWAIT | M_TRYWAIT);
206 	if (indx != M_NOWAIT && indx != M_WAITOK) {
207 		static	struct timeval lasterr;
208 		static	int curerr, once;
209 		if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) {
210 			printf("Bad malloc flags: %x\n", indx);
211 			backtrace();
212 			flags |= M_WAITOK;
213 			once++;
214 		}
215 	}
216 #endif
217 #if 0
218 	if (size == 0)
219 		Debugger("zero size malloc");
220 #endif
221 #ifdef MALLOC_MAKE_FAILURES
222 	if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) {
223 		atomic_add_int(&malloc_nowait_count, 1);
224 		if ((malloc_nowait_count % malloc_failure_rate) == 0) {
225 			atomic_add_int(&malloc_failure_count, 1);
226 			t_malloc_fail = time_uptime;
227 			return (NULL);
228 		}
229 	}
230 #endif
231 	if (flags & M_WAITOK)
232 		KASSERT(curthread->td_intr_nesting_level == 0,
233 		   ("malloc(M_WAITOK) in interrupt context"));
234 	if (size <= KMEM_ZMAX) {
235 		if (size & KMEM_ZMASK)
236 			size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
237 		indx = kmemsize[size >> KMEM_ZSHIFT];
238 		zone = kmemzones[indx].kz_zone;
239 		keg = zone->uz_keg;
240 #ifdef MALLOC_PROFILE
241 		krequests[size >> KMEM_ZSHIFT]++;
242 #endif
243 		va = uma_zalloc(zone, flags);
244 		mtx_lock(&ksp->ks_mtx);
245 		if (va == NULL)
246 			goto out;
247 
248 		ksp->ks_size |= 1 << indx;
249 		size = keg->uk_size;
250 	} else {
251 		size = roundup(size, PAGE_SIZE);
252 		zone = NULL;
253 		keg = NULL;
254 		va = uma_large_malloc(size, flags);
255 		mtx_lock(&ksp->ks_mtx);
256 		if (va == NULL)
257 			goto out;
258 	}
259 	ksp->ks_memuse += size;
260 	ksp->ks_inuse++;
261 out:
262 	ksp->ks_calls++;
263 	if (ksp->ks_memuse > ksp->ks_maxused)
264 		ksp->ks_maxused = ksp->ks_memuse;
265 
266 	mtx_unlock(&ksp->ks_mtx);
267 	if (flags & M_WAITOK)
268 		KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL"));
269 	else if (va == NULL)
270 		t_malloc_fail = time_uptime;
271 #ifdef DIAGNOSTIC
272 	if (va != NULL && !(flags & M_ZERO)) {
273 		memset(va, 0x70, osize);
274 	}
275 #endif
276 	return ((void *) va);
277 }
278 
279 /*
280  *	free:
281  *
282  *	Free a block of memory allocated by malloc.
283  *
284  *	This routine may not block.
285  */
286 void
287 free(addr, type)
288 	void *addr;
289 	struct malloc_type *type;
290 {
291 	register struct malloc_type *ksp = type;
292 	uma_slab_t slab;
293 	u_long size;
294 
295 	/* free(NULL, ...) does nothing */
296 	if (addr == NULL)
297 		return;
298 
299 	KASSERT(ksp->ks_memuse > 0,
300 		("malloc(9)/free(9) confusion.\n%s",
301 		 "Probably freeing with wrong type, but maybe not here."));
302 	size = 0;
303 
304 	slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK));
305 
306 	if (slab == NULL)
307 		panic("free: address %p(%p) has not been allocated.\n",
308 		    addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
309 
310 
311 	if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
312 #ifdef INVARIANTS
313 		struct malloc_type **mtp = addr;
314 #endif
315 		size = slab->us_keg->uk_size;
316 #ifdef INVARIANTS
317 		/*
318 		 * Cache a pointer to the malloc_type that most recently freed
319 		 * this memory here.  This way we know who is most likely to
320 		 * have stepped on it later.
321 		 *
322 		 * This code assumes that size is a multiple of 8 bytes for
323 		 * 64 bit machines
324 		 */
325 		mtp = (struct malloc_type **)
326 		    ((unsigned long)mtp & ~UMA_ALIGN_PTR);
327 		mtp += (size - sizeof(struct malloc_type *)) /
328 		    sizeof(struct malloc_type *);
329 		*mtp = type;
330 #endif
331 		uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab);
332 	} else {
333 		size = slab->us_size;
334 		uma_large_free(slab);
335 	}
336 	mtx_lock(&ksp->ks_mtx);
337 	KASSERT(size <= ksp->ks_memuse,
338 		("malloc(9)/free(9) confusion.\n%s",
339 		 "Probably freeing with wrong type, but maybe not here."));
340 	ksp->ks_memuse -= size;
341 	ksp->ks_inuse--;
342 	mtx_unlock(&ksp->ks_mtx);
343 }
344 
345 /*
346  *	realloc: change the size of a memory block
347  */
348 void *
349 realloc(addr, size, type, flags)
350 	void *addr;
351 	unsigned long size;
352 	struct malloc_type *type;
353 	int flags;
354 {
355 	uma_slab_t slab;
356 	unsigned long alloc;
357 	void *newaddr;
358 
359 	/* realloc(NULL, ...) is equivalent to malloc(...) */
360 	if (addr == NULL)
361 		return (malloc(size, type, flags));
362 
363 	slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK));
364 
365 	/* Sanity check */
366 	KASSERT(slab != NULL,
367 	    ("realloc: address %p out of range", (void *)addr));
368 
369 	/* Get the size of the original block */
370 	if (slab->us_keg)
371 		alloc = slab->us_keg->uk_size;
372 	else
373 		alloc = slab->us_size;
374 
375 	/* Reuse the original block if appropriate */
376 	if (size <= alloc
377 	    && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE))
378 		return (addr);
379 
380 	/* Allocate a new, bigger (or smaller) block */
381 	if ((newaddr = malloc(size, type, flags)) == NULL)
382 		return (NULL);
383 
384 	/* Copy over original contents */
385 	bcopy(addr, newaddr, min(size, alloc));
386 	free(addr, type);
387 	return (newaddr);
388 }
389 
390 /*
391  *	reallocf: same as realloc() but free memory on failure.
392  */
393 void *
394 reallocf(addr, size, type, flags)
395 	void *addr;
396 	unsigned long size;
397 	struct malloc_type *type;
398 	int flags;
399 {
400 	void *mem;
401 
402 	if ((mem = realloc(addr, size, type, flags)) == NULL)
403 		free(addr, type);
404 	return (mem);
405 }
406 
407 /*
408  * Initialize the kernel memory allocator
409  */
410 /* ARGSUSED*/
411 static void
412 kmeminit(dummy)
413 	void *dummy;
414 {
415 	u_int8_t indx;
416 	u_long mem_size;
417 	int i;
418 
419 	mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
420 
421 	/*
422 	 * Try to auto-tune the kernel memory size, so that it is
423 	 * more applicable for a wider range of machine sizes.
424 	 * On an X86, a VM_KMEM_SIZE_SCALE value of 4 is good, while
425 	 * a VM_KMEM_SIZE of 12MB is a fair compromise.  The
426 	 * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space
427 	 * available, and on an X86 with a total KVA space of 256MB,
428 	 * try to keep VM_KMEM_SIZE_MAX at 80MB or below.
429 	 *
430 	 * Note that the kmem_map is also used by the zone allocator,
431 	 * so make sure that there is enough space.
432 	 */
433 	vm_kmem_size = VM_KMEM_SIZE + nmbclusters * PAGE_SIZE;
434 	mem_size = cnt.v_page_count;
435 
436 #if defined(VM_KMEM_SIZE_SCALE)
437 	if ((mem_size / VM_KMEM_SIZE_SCALE) > (vm_kmem_size / PAGE_SIZE))
438 		vm_kmem_size = (mem_size / VM_KMEM_SIZE_SCALE) * PAGE_SIZE;
439 #endif
440 
441 #if defined(VM_KMEM_SIZE_MAX)
442 	if (vm_kmem_size >= VM_KMEM_SIZE_MAX)
443 		vm_kmem_size = VM_KMEM_SIZE_MAX;
444 #endif
445 
446 	/* Allow final override from the kernel environment */
447 #ifndef BURN_BRIDGES
448 	if (TUNABLE_INT_FETCH("kern.vm.kmem.size", &vm_kmem_size) != 0)
449 		printf("kern.vm.kmem.size is now called vm.kmem_size!\n");
450 #endif
451 	TUNABLE_INT_FETCH("vm.kmem_size", &vm_kmem_size);
452 
453 	/*
454 	 * Limit kmem virtual size to twice the physical memory.
455 	 * This allows for kmem map sparseness, but limits the size
456 	 * to something sane. Be careful to not overflow the 32bit
457 	 * ints while doing the check.
458 	 */
459 	if (((vm_kmem_size / 2) / PAGE_SIZE) > cnt.v_page_count)
460 		vm_kmem_size = 2 * cnt.v_page_count * PAGE_SIZE;
461 
462 	/*
463 	 * Tune settings based on the kernel map's size at this time.
464 	 */
465 	init_param3(vm_kmem_size / PAGE_SIZE);
466 
467 	kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
468 		(vm_offset_t *)&kmemlimit, vm_kmem_size);
469 	kmem_map->system_map = 1;
470 
471 	uma_startup2();
472 
473 	for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
474 		int size = kmemzones[indx].kz_size;
475 		char *name = kmemzones[indx].kz_name;
476 
477 		kmemzones[indx].kz_zone = uma_zcreate(name, size,
478 #ifdef INVARIANTS
479 		    mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
480 #else
481 		    NULL, NULL, NULL, NULL,
482 #endif
483 		    UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
484 
485 		for (;i <= size; i+= KMEM_ZBASE)
486 			kmemsize[i >> KMEM_ZSHIFT] = indx;
487 
488 	}
489 }
490 
491 void
492 malloc_init(data)
493 	void *data;
494 {
495 	struct malloc_type *type = (struct malloc_type *)data;
496 
497 	mtx_lock(&malloc_mtx);
498 	if (type->ks_magic != M_MAGIC)
499 		panic("malloc type lacks magic");
500 
501 	if (cnt.v_page_count == 0)
502 		panic("malloc_init not allowed before vm init");
503 
504 	if (type->ks_next != NULL)
505 		return;
506 
507 	type->ks_next = kmemstatistics;
508 	kmemstatistics = type;
509 	mtx_init(&type->ks_mtx, type->ks_shortdesc, "Malloc Stats", MTX_DEF);
510 	mtx_unlock(&malloc_mtx);
511 }
512 
513 void
514 malloc_uninit(data)
515 	void *data;
516 {
517 	struct malloc_type *type = (struct malloc_type *)data;
518 	struct malloc_type *t;
519 
520 	mtx_lock(&malloc_mtx);
521 	mtx_lock(&type->ks_mtx);
522 	if (type->ks_magic != M_MAGIC)
523 		panic("malloc type lacks magic");
524 
525 	if (cnt.v_page_count == 0)
526 		panic("malloc_uninit not allowed before vm init");
527 
528 	if (type == kmemstatistics)
529 		kmemstatistics = type->ks_next;
530 	else {
531 		for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) {
532 			if (t->ks_next == type) {
533 				t->ks_next = type->ks_next;
534 				break;
535 			}
536 		}
537 	}
538 	type->ks_next = NULL;
539 	mtx_destroy(&type->ks_mtx);
540 	mtx_unlock(&malloc_mtx);
541 }
542 
543 static int
544 sysctl_kern_malloc(SYSCTL_HANDLER_ARGS)
545 {
546 	struct malloc_type *type;
547 	int linesize = 128;
548 	int curline;
549 	int bufsize;
550 	int first;
551 	int error;
552 	char *buf;
553 	char *p;
554 	int cnt;
555 	int len;
556 	int i;
557 
558 	cnt = 0;
559 
560 	mtx_lock(&malloc_mtx);
561 	for (type = kmemstatistics; type != NULL; type = type->ks_next)
562 		cnt++;
563 
564 	mtx_unlock(&malloc_mtx);
565 	bufsize = linesize * (cnt + 1);
566 	p = buf = (char *)malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO);
567 	mtx_lock(&malloc_mtx);
568 
569 	len = snprintf(p, linesize,
570 	    "\n        Type  InUse MemUse HighUse Requests  Size(s)\n");
571 	p += len;
572 
573 	for (type = kmemstatistics; cnt != 0 && type != NULL;
574 	    type = type->ks_next, cnt--) {
575 		if (type->ks_calls == 0)
576 			continue;
577 
578 		curline = linesize - 2;	/* Leave room for the \n */
579 		len = snprintf(p, curline, "%13s%6lu%6luK%7luK%9llu",
580 			type->ks_shortdesc,
581 			type->ks_inuse,
582 			(type->ks_memuse + 1023) / 1024,
583 			(type->ks_maxused + 1023) / 1024,
584 			(long long unsigned)type->ks_calls);
585 		curline -= len;
586 		p += len;
587 
588 		first = 1;
589 		for (i = 0; i < sizeof(kmemzones) / sizeof(kmemzones[0]) - 1;
590 		    i++) {
591 			if (type->ks_size & (1 << i)) {
592 				if (first)
593 					len = snprintf(p, curline, "  ");
594 				else
595 					len = snprintf(p, curline, ",");
596 				curline -= len;
597 				p += len;
598 
599 				len = snprintf(p, curline,
600 				    "%s", kmemzones[i].kz_name);
601 				curline -= len;
602 				p += len;
603 
604 				first = 0;
605 			}
606 		}
607 
608 		len = snprintf(p, 2, "\n");
609 		p += len;
610 	}
611 
612 	mtx_unlock(&malloc_mtx);
613 	error = SYSCTL_OUT(req, buf, p - buf);
614 
615 	free(buf, M_TEMP);
616 	return (error);
617 }
618 
619 SYSCTL_OID(_kern, OID_AUTO, malloc, CTLTYPE_STRING|CTLFLAG_RD,
620     NULL, 0, sysctl_kern_malloc, "A", "Malloc Stats");
621 
622 #ifdef MALLOC_PROFILE
623 
624 static int
625 sysctl_kern_mprof(SYSCTL_HANDLER_ARGS)
626 {
627 	int linesize = 64;
628 	uint64_t count;
629 	uint64_t waste;
630 	uint64_t mem;
631 	int bufsize;
632 	int error;
633 	char *buf;
634 	int rsize;
635 	int size;
636 	char *p;
637 	int len;
638 	int i;
639 
640 	bufsize = linesize * (KMEM_ZSIZE + 1);
641 	bufsize += 128; 	/* For the stats line */
642 	bufsize += 128; 	/* For the banner line */
643 	waste = 0;
644 	mem = 0;
645 
646 	p = buf = (char *)malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO);
647 	len = snprintf(p, bufsize,
648 	    "\n  Size                    Requests  Real Size\n");
649 	bufsize -= len;
650 	p += len;
651 
652 	for (i = 0; i < KMEM_ZSIZE; i++) {
653 		size = i << KMEM_ZSHIFT;
654 		rsize = kmemzones[kmemsize[i]].kz_size;
655 		count = (long long unsigned)krequests[i];
656 
657 		len = snprintf(p, bufsize, "%6d%28llu%11d\n",
658 		    size, (unsigned long long)count, rsize);
659 		bufsize -= len;
660 		p += len;
661 
662 		if ((rsize * count) > (size * count))
663 			waste += (rsize * count) - (size * count);
664 		mem += (rsize * count);
665 	}
666 
667 	len = snprintf(p, bufsize,
668 	    "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n",
669 	    (unsigned long long)mem, (unsigned long long)waste);
670 	p += len;
671 
672 	error = SYSCTL_OUT(req, buf, p - buf);
673 
674 	free(buf, M_TEMP);
675 	return (error);
676 }
677 
678 SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD,
679     NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling");
680 #endif /* MALLOC_PROFILE */
681