xref: /freebsd/sys/kern/kern_malloc.c (revision 77b7cdf1999ee965ad494fddd184b18f532ac91a)
1 /*
2  * Copyright (c) 1987, 1991, 1993
3  *	The Regents of the University of California.  All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  * 3. All advertising materials mentioning features or use of this software
14  *    must display the following acknowledgement:
15  *	This product includes software developed by the University of
16  *	California, Berkeley and its contributors.
17  * 4. Neither the name of the University nor the names of its contributors
18  *    may be used to endorse or promote products derived from this software
19  *    without specific prior written permission.
20  *
21  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
22  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
25  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
26  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
27  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
28  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31  * SUCH DAMAGE.
32  *
33  *	@(#)kern_malloc.c	8.3 (Berkeley) 1/4/94
34  * $FreeBSD$
35  */
36 
37 #include "opt_vm.h"
38 
39 #include <sys/param.h>
40 #include <sys/systm.h>
41 #include <sys/kernel.h>
42 #include <sys/lock.h>
43 #include <sys/malloc.h>
44 #include <sys/mbuf.h>
45 #include <sys/mutex.h>
46 #include <sys/vmmeter.h>
47 #include <sys/proc.h>
48 #include <sys/sysctl.h>
49 #include <sys/time.h>
50 
51 #include <vm/vm.h>
52 #include <vm/pmap.h>
53 #include <vm/vm_param.h>
54 #include <vm/vm_kern.h>
55 #include <vm/vm_extern.h>
56 #include <vm/vm_map.h>
57 #include <vm/vm_page.h>
58 #include <vm/uma.h>
59 #include <vm/uma_int.h>
60 #include <vm/uma_dbg.h>
61 
62 #if defined(INVARIANTS) && defined(__i386__)
63 #include <machine/cpu.h>
64 #endif
65 
66 /*
67  * When realloc() is called, if the new size is sufficiently smaller than
68  * the old size, realloc() will allocate a new, smaller block to avoid
69  * wasting memory. 'Sufficiently smaller' is defined as: newsize <=
70  * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'.
71  */
72 #ifndef REALLOC_FRACTION
73 #define	REALLOC_FRACTION	1	/* new block if <= half the size */
74 #endif
75 
76 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
77 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
78 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
79 
80 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
81 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
82 
83 static void kmeminit(void *);
84 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL)
85 
86 static MALLOC_DEFINE(M_FREE, "free", "should be on free list");
87 
88 static struct malloc_type *kmemstatistics;
89 static char *kmembase;
90 static char *kmemlimit;
91 
92 #define KMEM_ZSHIFT	4
93 #define KMEM_ZBASE	16
94 #define KMEM_ZMASK	(KMEM_ZBASE - 1)
95 
96 #define KMEM_ZMAX	65536
97 #define KMEM_ZSIZE	(KMEM_ZMAX >> KMEM_ZSHIFT)
98 static u_int8_t kmemsize[KMEM_ZSIZE + 1];
99 
100 /* These won't be powers of two for long */
101 struct {
102 	int kz_size;
103 	char *kz_name;
104 	uma_zone_t kz_zone;
105 } kmemzones[] = {
106 	{16, "16", NULL},
107 	{32, "32", NULL},
108 	{64, "64", NULL},
109 	{128, "128", NULL},
110 	{256, "256", NULL},
111 	{512, "512", NULL},
112 	{1024, "1024", NULL},
113 	{2048, "2048", NULL},
114 	{4096, "4096", NULL},
115 	{8192, "8192", NULL},
116 	{16384, "16384", NULL},
117 	{32768, "32768", NULL},
118 	{65536, "65536", NULL},
119 	{0, NULL},
120 };
121 
122 u_int vm_kmem_size;
123 
124 /*
125  * The malloc_mtx protects the kmemstatistics linked list.
126  */
127 
128 struct mtx malloc_mtx;
129 
130 #ifdef MALLOC_PROFILE
131 uint64_t krequests[KMEM_ZSIZE + 1];
132 
133 static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS);
134 #endif
135 
136 static int sysctl_kern_malloc(SYSCTL_HANDLER_ARGS);
137 
138 /* time_uptime of last malloc(9) failure */
139 static time_t t_malloc_fail;
140 
141 #ifdef MALLOC_MAKE_FAILURES
142 /*
143  * Causes malloc failures every (n) mallocs with M_NOWAIT.  If set to 0,
144  * doesn't cause failures.
145  */
146 SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD, 0,
147     "Kernel malloc debugging options");
148 
149 static int malloc_failure_rate;
150 static int malloc_nowait_count;
151 static int malloc_failure_count;
152 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RW,
153     &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail");
154 TUNABLE_INT("debug.malloc.failure_rate", &malloc_failure_rate);
155 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD,
156     &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures");
157 #endif
158 
159 int
160 malloc_last_fail(void)
161 {
162 
163 	return (time_uptime - t_malloc_fail);
164 }
165 
166 /*
167  *	malloc:
168  *
169  *	Allocate a block of memory.
170  *
171  *	If M_NOWAIT is set, this routine will not block and return NULL if
172  *	the allocation fails.
173  */
174 void *
175 malloc(size, type, flags)
176 	unsigned long size;
177 	struct malloc_type *type;
178 	int flags;
179 {
180 	int indx;
181 	caddr_t va;
182 	uma_zone_t zone;
183 #ifdef DIAGNOSTIC
184 	unsigned long osize = size;
185 #endif
186 	register struct malloc_type *ksp = type;
187 
188 #ifdef INVARIANTS
189 	/*
190 	 * To make sure that WAITOK or NOWAIT is set, but not more than
191 	 * one, and check against the API botches that are common.
192 	 */
193 	indx = flags & (M_WAITOK | M_NOWAIT | M_DONTWAIT | M_TRYWAIT);
194 	if (indx != M_NOWAIT && indx != M_WAITOK) {
195 		static	struct timeval lasterr;
196 		static	int curerr, once;
197 		if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) {
198 			printf("Bad malloc flags: %x\n", indx);
199 			backtrace();
200 			flags |= M_WAITOK;
201 			once++;
202 		}
203 	}
204 #endif
205 #if 0
206 	if (size == 0)
207 		Debugger("zero size malloc");
208 #endif
209 #ifdef MALLOC_MAKE_FAILURES
210 	if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) {
211 		atomic_add_int(&malloc_nowait_count, 1);
212 		if ((malloc_nowait_count % malloc_failure_rate) == 0) {
213 			atomic_add_int(&malloc_failure_count, 1);
214 			t_malloc_fail = time_uptime;
215 			return (NULL);
216 		}
217 	}
218 #endif
219 	if (flags & M_WAITOK)
220 		KASSERT(curthread->td_intr_nesting_level == 0,
221 		   ("malloc(M_WAITOK) in interrupt context"));
222 	if (size <= KMEM_ZMAX) {
223 		if (size & KMEM_ZMASK)
224 			size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
225 		indx = kmemsize[size >> KMEM_ZSHIFT];
226 		zone = kmemzones[indx].kz_zone;
227 #ifdef MALLOC_PROFILE
228 		krequests[size >> KMEM_ZSHIFT]++;
229 #endif
230 		va = uma_zalloc(zone, flags);
231 		mtx_lock(&ksp->ks_mtx);
232 		if (va == NULL)
233 			goto out;
234 
235 		ksp->ks_size |= 1 << indx;
236 		size = zone->uz_size;
237 	} else {
238 		size = roundup(size, PAGE_SIZE);
239 		zone = NULL;
240 		va = uma_large_malloc(size, flags);
241 		mtx_lock(&ksp->ks_mtx);
242 		if (va == NULL)
243 			goto out;
244 	}
245 	ksp->ks_memuse += size;
246 	ksp->ks_inuse++;
247 out:
248 	ksp->ks_calls++;
249 	if (ksp->ks_memuse > ksp->ks_maxused)
250 		ksp->ks_maxused = ksp->ks_memuse;
251 
252 	mtx_unlock(&ksp->ks_mtx);
253 	if (!(flags & M_NOWAIT))
254 		KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL"));
255 	if (va == NULL) {
256 		t_malloc_fail = time_uptime;
257 	}
258 #ifdef DIAGNOSTIC
259 	if (!(flags & M_ZERO)) {
260 		memset(va, 0x70, osize);
261 	}
262 #endif
263 	return ((void *) va);
264 }
265 
266 /*
267  *	free:
268  *
269  *	Free a block of memory allocated by malloc.
270  *
271  *	This routine may not block.
272  */
273 void
274 free(addr, type)
275 	void *addr;
276 	struct malloc_type *type;
277 {
278 	register struct malloc_type *ksp = type;
279 	uma_slab_t slab;
280 	u_long size;
281 
282 	/* free(NULL, ...) does nothing */
283 	if (addr == NULL)
284 		return;
285 
286 	size = 0;
287 
288 	slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK));
289 
290 	if (slab == NULL)
291 		panic("free: address %p(%p) has not been allocated.\n",
292 		    addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
293 
294 
295 	if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
296 #ifdef INVARIANTS
297 		struct malloc_type **mtp = addr;
298 #endif
299 		size = slab->us_zone->uz_size;
300 #ifdef INVARIANTS
301 		/*
302 		 * Cache a pointer to the malloc_type that most recently freed
303 		 * this memory here.  This way we know who is most likely to
304 		 * have stepped on it later.
305 		 *
306 		 * This code assumes that size is a multiple of 8 bytes for
307 		 * 64 bit machines
308 		 */
309 		mtp = (struct malloc_type **)
310 		    ((unsigned long)mtp & ~UMA_ALIGN_PTR);
311 		mtp += (size - sizeof(struct malloc_type *)) /
312 		    sizeof(struct malloc_type *);
313 		*mtp = type;
314 #endif
315 		uma_zfree_arg(slab->us_zone, addr, slab);
316 	} else {
317 		size = slab->us_size;
318 		uma_large_free(slab);
319 	}
320 	mtx_lock(&ksp->ks_mtx);
321 	ksp->ks_memuse -= size;
322 	ksp->ks_inuse--;
323 	mtx_unlock(&ksp->ks_mtx);
324 }
325 
326 /*
327  *	realloc: change the size of a memory block
328  */
329 void *
330 realloc(addr, size, type, flags)
331 	void *addr;
332 	unsigned long size;
333 	struct malloc_type *type;
334 	int flags;
335 {
336 	uma_slab_t slab;
337 	unsigned long alloc;
338 	void *newaddr;
339 
340 	/* realloc(NULL, ...) is equivalent to malloc(...) */
341 	if (addr == NULL)
342 		return (malloc(size, type, flags));
343 
344 	slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK));
345 
346 	/* Sanity check */
347 	KASSERT(slab != NULL,
348 	    ("realloc: address %p out of range", (void *)addr));
349 
350 	/* Get the size of the original block */
351 	if (slab->us_zone)
352 		alloc = slab->us_zone->uz_size;
353 	else
354 		alloc = slab->us_size;
355 
356 	/* Reuse the original block if appropriate */
357 	if (size <= alloc
358 	    && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE))
359 		return (addr);
360 
361 	/* Allocate a new, bigger (or smaller) block */
362 	if ((newaddr = malloc(size, type, flags)) == NULL)
363 		return (NULL);
364 
365 	/* Copy over original contents */
366 	bcopy(addr, newaddr, min(size, alloc));
367 	free(addr, type);
368 	return (newaddr);
369 }
370 
371 /*
372  *	reallocf: same as realloc() but free memory on failure.
373  */
374 void *
375 reallocf(addr, size, type, flags)
376 	void *addr;
377 	unsigned long size;
378 	struct malloc_type *type;
379 	int flags;
380 {
381 	void *mem;
382 
383 	if ((mem = realloc(addr, size, type, flags)) == NULL)
384 		free(addr, type);
385 	return (mem);
386 }
387 
388 /*
389  * Initialize the kernel memory allocator
390  */
391 /* ARGSUSED*/
392 static void
393 kmeminit(dummy)
394 	void *dummy;
395 {
396 	u_int8_t indx;
397 	u_long npg;
398 	u_long mem_size;
399 	int i;
400 
401 	mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
402 
403 	/*
404 	 * Try to auto-tune the kernel memory size, so that it is
405 	 * more applicable for a wider range of machine sizes.
406 	 * On an X86, a VM_KMEM_SIZE_SCALE value of 4 is good, while
407 	 * a VM_KMEM_SIZE of 12MB is a fair compromise.  The
408 	 * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space
409 	 * available, and on an X86 with a total KVA space of 256MB,
410 	 * try to keep VM_KMEM_SIZE_MAX at 80MB or below.
411 	 *
412 	 * Note that the kmem_map is also used by the zone allocator,
413 	 * so make sure that there is enough space.
414 	 */
415 	vm_kmem_size = VM_KMEM_SIZE;
416 	mem_size = cnt.v_page_count * PAGE_SIZE;
417 
418 #if defined(VM_KMEM_SIZE_SCALE)
419 	if ((mem_size / VM_KMEM_SIZE_SCALE) > vm_kmem_size)
420 		vm_kmem_size = mem_size / VM_KMEM_SIZE_SCALE;
421 #endif
422 
423 #if defined(VM_KMEM_SIZE_MAX)
424 	if (vm_kmem_size >= VM_KMEM_SIZE_MAX)
425 		vm_kmem_size = VM_KMEM_SIZE_MAX;
426 #endif
427 
428 	/* Allow final override from the kernel environment */
429 	TUNABLE_INT_FETCH("kern.vm.kmem.size", &vm_kmem_size);
430 
431 	/*
432 	 * Limit kmem virtual size to twice the physical memory.
433 	 * This allows for kmem map sparseness, but limits the size
434 	 * to something sane. Be careful to not overflow the 32bit
435 	 * ints while doing the check.
436 	 */
437 	if ((vm_kmem_size / 2) > (cnt.v_page_count * PAGE_SIZE))
438 		vm_kmem_size = 2 * cnt.v_page_count * PAGE_SIZE;
439 
440 	/*
441 	 * In mbuf_init(), we set up submaps for mbufs and clusters, in which
442 	 * case we rounddown() (nmbufs * MSIZE) and (nmbclusters * MCLBYTES),
443 	 * respectively. Mathematically, this means that what we do here may
444 	 * amount to slightly more address space than we need for the submaps,
445 	 * but it never hurts to have an extra page in kmem_map.
446 	 */
447 	npg = (nmbufs*MSIZE + nmbclusters*MCLBYTES + vm_kmem_size) / PAGE_SIZE;
448 
449 	kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
450 		(vm_offset_t *)&kmemlimit, (vm_size_t)(npg * PAGE_SIZE));
451 	kmem_map->system_map = 1;
452 
453 	uma_startup2();
454 
455 	for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
456 		int size = kmemzones[indx].kz_size;
457 		char *name = kmemzones[indx].kz_name;
458 
459 		kmemzones[indx].kz_zone = uma_zcreate(name, size,
460 #ifdef INVARIANTS
461 		    mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
462 #else
463 		    NULL, NULL, NULL, NULL,
464 #endif
465 		    UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
466 
467 		for (;i <= size; i+= KMEM_ZBASE)
468 			kmemsize[i >> KMEM_ZSHIFT] = indx;
469 
470 	}
471 }
472 
473 void
474 malloc_init(data)
475 	void *data;
476 {
477 	struct malloc_type *type = (struct malloc_type *)data;
478 
479 	mtx_lock(&malloc_mtx);
480 	if (type->ks_magic != M_MAGIC)
481 		panic("malloc type lacks magic");
482 
483 	if (cnt.v_page_count == 0)
484 		panic("malloc_init not allowed before vm init");
485 
486 	if (type->ks_next != NULL)
487 		return;
488 
489 	type->ks_next = kmemstatistics;
490 	kmemstatistics = type;
491 	mtx_init(&type->ks_mtx, type->ks_shortdesc, "Malloc Stats", MTX_DEF);
492 	mtx_unlock(&malloc_mtx);
493 }
494 
495 void
496 malloc_uninit(data)
497 	void *data;
498 {
499 	struct malloc_type *type = (struct malloc_type *)data;
500 	struct malloc_type *t;
501 
502 	mtx_lock(&malloc_mtx);
503 	mtx_lock(&type->ks_mtx);
504 	if (type->ks_magic != M_MAGIC)
505 		panic("malloc type lacks magic");
506 
507 	if (cnt.v_page_count == 0)
508 		panic("malloc_uninit not allowed before vm init");
509 
510 	if (type == kmemstatistics)
511 		kmemstatistics = type->ks_next;
512 	else {
513 		for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) {
514 			if (t->ks_next == type) {
515 				t->ks_next = type->ks_next;
516 				break;
517 			}
518 		}
519 	}
520 	type->ks_next = NULL;
521 	mtx_destroy(&type->ks_mtx);
522 	mtx_unlock(&malloc_mtx);
523 }
524 
525 static int
526 sysctl_kern_malloc(SYSCTL_HANDLER_ARGS)
527 {
528 	struct malloc_type *type;
529 	int linesize = 128;
530 	int curline;
531 	int bufsize;
532 	int first;
533 	int error;
534 	char *buf;
535 	char *p;
536 	int cnt;
537 	int len;
538 	int i;
539 
540 	cnt = 0;
541 
542 	mtx_lock(&malloc_mtx);
543 	for (type = kmemstatistics; type != NULL; type = type->ks_next)
544 		cnt++;
545 
546 	mtx_unlock(&malloc_mtx);
547 	bufsize = linesize * (cnt + 1);
548 	p = buf = (char *)malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO);
549 	mtx_lock(&malloc_mtx);
550 
551 	len = snprintf(p, linesize,
552 	    "\n        Type  InUse MemUse HighUse Requests  Size(s)\n");
553 	p += len;
554 
555 	for (type = kmemstatistics; cnt != 0 && type != NULL;
556 	    type = type->ks_next, cnt--) {
557 		if (type->ks_calls == 0)
558 			continue;
559 
560 		curline = linesize - 2;	/* Leave room for the \n */
561 		len = snprintf(p, curline, "%13s%6lu%6luK%7luK%9llu",
562 			type->ks_shortdesc,
563 			type->ks_inuse,
564 			(type->ks_memuse + 1023) / 1024,
565 			(type->ks_maxused + 1023) / 1024,
566 			(long long unsigned)type->ks_calls);
567 		curline -= len;
568 		p += len;
569 
570 		first = 1;
571 		for (i = 0; i < sizeof(kmemzones) / sizeof(kmemzones[0]) - 1;
572 		    i++) {
573 			if (type->ks_size & (1 << i)) {
574 				if (first)
575 					len = snprintf(p, curline, "  ");
576 				else
577 					len = snprintf(p, curline, ",");
578 				curline -= len;
579 				p += len;
580 
581 				len = snprintf(p, curline,
582 				    "%s", kmemzones[i].kz_name);
583 				curline -= len;
584 				p += len;
585 
586 				first = 0;
587 			}
588 		}
589 
590 		len = snprintf(p, 2, "\n");
591 		p += len;
592 	}
593 
594 	mtx_unlock(&malloc_mtx);
595 	error = SYSCTL_OUT(req, buf, p - buf);
596 
597 	free(buf, M_TEMP);
598 	return (error);
599 }
600 
601 SYSCTL_OID(_kern, OID_AUTO, malloc, CTLTYPE_STRING|CTLFLAG_RD,
602     NULL, 0, sysctl_kern_malloc, "A", "Malloc Stats");
603 
604 #ifdef MALLOC_PROFILE
605 
606 static int
607 sysctl_kern_mprof(SYSCTL_HANDLER_ARGS)
608 {
609 	int linesize = 64;
610 	uint64_t count;
611 	uint64_t waste;
612 	uint64_t mem;
613 	int bufsize;
614 	int error;
615 	char *buf;
616 	int rsize;
617 	int size;
618 	char *p;
619 	int len;
620 	int i;
621 
622 	bufsize = linesize * (KMEM_ZSIZE + 1);
623 	bufsize += 128; 	/* For the stats line */
624 	bufsize += 128; 	/* For the banner line */
625 	waste = 0;
626 	mem = 0;
627 
628 	p = buf = (char *)malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO);
629 	len = snprintf(p, bufsize,
630 	    "\n  Size                    Requests  Real Size\n");
631 	bufsize -= len;
632 	p += len;
633 
634 	for (i = 0; i < KMEM_ZSIZE; i++) {
635 		size = i << KMEM_ZSHIFT;
636 		rsize = kmemzones[kmemsize[i]].kz_size;
637 		count = (long long unsigned)krequests[i];
638 
639 		len = snprintf(p, bufsize, "%6d%28llu%11d\n",
640 		    size, (unsigned long long)count, rsize);
641 		bufsize -= len;
642 		p += len;
643 
644 		if ((rsize * count) > (size * count))
645 			waste += (rsize * count) - (size * count);
646 		mem += (rsize * count);
647 	}
648 
649 	len = snprintf(p, bufsize,
650 	    "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n",
651 	    (unsigned long long)mem, (unsigned long long)waste);
652 	p += len;
653 
654 	error = SYSCTL_OUT(req, buf, p - buf);
655 
656 	free(buf, M_TEMP);
657 	return (error);
658 }
659 
660 SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD,
661     NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling");
662 #endif /* MALLOC_PROFILE */
663