xref: /freebsd/sys/kern/kern_malloc.c (revision 87efd4d58a20aab03531bc03cc14da56642b4f5e)
19454b2d8SWarner Losh /*-
2df8bae1dSRodney W. Grimes  * Copyright (c) 1987, 1991, 1993
3df8bae1dSRodney W. Grimes  *	The Regents of the University of California.  All rights reserved.
4df8bae1dSRodney W. Grimes  *
5df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
6df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
7df8bae1dSRodney W. Grimes  * are met:
8df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
9df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
10df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
11df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
12df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
13df8bae1dSRodney W. Grimes  * 4. Neither the name of the University nor the names of its contributors
14df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
15df8bae1dSRodney W. Grimes  *    without specific prior written permission.
16df8bae1dSRodney W. Grimes  *
17df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
28df8bae1dSRodney W. Grimes  *
29df8bae1dSRodney W. Grimes  *	@(#)kern_malloc.c	8.3 (Berkeley) 1/4/94
30df8bae1dSRodney W. Grimes  */
31df8bae1dSRodney W. Grimes 
32677b542eSDavid E. O'Brien #include <sys/cdefs.h>
33677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
34677b542eSDavid E. O'Brien 
358a58a9f6SJohn Dyson #include "opt_vm.h"
368a58a9f6SJohn Dyson 
37df8bae1dSRodney W. Grimes #include <sys/param.h>
3826f9a767SRodney W. Grimes #include <sys/systm.h>
392d50560aSMarcel Moolenaar #include <sys/kdb.h>
40df8bae1dSRodney W. Grimes #include <sys/kernel.h>
41fb919e4dSMark Murray #include <sys/lock.h>
42df8bae1dSRodney W. Grimes #include <sys/malloc.h>
4354e7152cSDavid Greenman #include <sys/mbuf.h>
44eec258d2SJohn Baldwin #include <sys/mutex.h>
45efeaf95aSDavid Greenman #include <sys/vmmeter.h>
46a448b62aSJake Burkholder #include <sys/proc.h>
476f267175SJeff Roberson #include <sys/sysctl.h>
481fb14a47SPoul-Henning Kamp #include <sys/time.h>
499a02e8c6SJason Evans 
50df8bae1dSRodney W. Grimes #include <vm/vm.h>
5199571dc3SJeff Roberson #include <vm/pmap.h>
52efeaf95aSDavid Greenman #include <vm/vm_param.h>
53df8bae1dSRodney W. Grimes #include <vm/vm_kern.h>
54efeaf95aSDavid Greenman #include <vm/vm_extern.h>
553075778bSJohn Dyson #include <vm/vm_map.h>
5699571dc3SJeff Roberson #include <vm/vm_page.h>
578355f576SJeff Roberson #include <vm/uma.h>
588355f576SJeff Roberson #include <vm/uma_int.h>
598efc4effSJeff Roberson #include <vm/uma_dbg.h>
60df8bae1dSRodney W. Grimes 
61e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD
62e4eb384bSBosko Milekic #include <vm/memguard.h>
63e4eb384bSBosko Milekic #endif
64e4eb384bSBosko Milekic 
65984982d6SPoul-Henning Kamp #if defined(INVARIANTS) && defined(__i386__)
66984982d6SPoul-Henning Kamp #include <machine/cpu.h>
67984982d6SPoul-Henning Kamp #endif
68984982d6SPoul-Henning Kamp 
6944a8ff31SArchie Cobbs /*
7044a8ff31SArchie Cobbs  * When realloc() is called, if the new size is sufficiently smaller than
7144a8ff31SArchie Cobbs  * the old size, realloc() will allocate a new, smaller block to avoid
7244a8ff31SArchie Cobbs  * wasting memory. 'Sufficiently smaller' is defined as: newsize <=
7344a8ff31SArchie Cobbs  * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'.
7444a8ff31SArchie Cobbs  */
7544a8ff31SArchie Cobbs #ifndef REALLOC_FRACTION
7644a8ff31SArchie Cobbs #define	REALLOC_FRACTION	1	/* new block if <= half the size */
7744a8ff31SArchie Cobbs #endif
7844a8ff31SArchie Cobbs 
793b6fb885SPoul-Henning Kamp MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
809ef246c6SBruce Evans MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
819ef246c6SBruce Evans MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
829ef246c6SBruce Evans 
8382cd038dSYoshinobu Inoue MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options");
8482cd038dSYoshinobu Inoue MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery");
8582cd038dSYoshinobu Inoue 
864d77a549SAlfred Perlstein static void kmeminit(void *);
872b14f991SJulian Elischer SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL)
882b14f991SJulian Elischer 
89a1c995b6SPoul-Henning Kamp static MALLOC_DEFINE(M_FREE, "free", "should be on free list");
90a1c995b6SPoul-Henning Kamp 
91db669378SPeter Wemm static struct malloc_type *kmemstatistics;
92254c6cb3SPoul-Henning Kamp static char *kmembase;
93043a2f3bSBruce Evans static char *kmemlimit;
941f6889a1SMatthew Dillon 
958355f576SJeff Roberson #define KMEM_ZSHIFT	4
968355f576SJeff Roberson #define KMEM_ZBASE	16
978355f576SJeff Roberson #define KMEM_ZMASK	(KMEM_ZBASE - 1)
988355f576SJeff Roberson 
999fb535deSJeff Roberson #define KMEM_ZMAX	PAGE_SIZE
1008355f576SJeff Roberson #define KMEM_ZSIZE	(KMEM_ZMAX >> KMEM_ZSHIFT)
1016f267175SJeff Roberson static u_int8_t kmemsize[KMEM_ZSIZE + 1];
1026f267175SJeff Roberson 
1038355f576SJeff Roberson /* These won't be powers of two for long */
1048355f576SJeff Roberson struct {
1056f267175SJeff Roberson 	int kz_size;
1066f267175SJeff Roberson 	char *kz_name;
1076f267175SJeff Roberson 	uma_zone_t kz_zone;
1086f267175SJeff Roberson } kmemzones[] = {
1096f267175SJeff Roberson 	{16, "16", NULL},
1106f267175SJeff Roberson 	{32, "32", NULL},
1116f267175SJeff Roberson 	{64, "64", NULL},
1126f267175SJeff Roberson 	{128, "128", NULL},
1136f267175SJeff Roberson 	{256, "256", NULL},
1146f267175SJeff Roberson 	{512, "512", NULL},
1156f267175SJeff Roberson 	{1024, "1024", NULL},
1166f267175SJeff Roberson 	{2048, "2048", NULL},
1176f267175SJeff Roberson 	{4096, "4096", NULL},
1189fb535deSJeff Roberson #if PAGE_SIZE > 4096
1196f267175SJeff Roberson 	{8192, "8192", NULL},
1209fb535deSJeff Roberson #if PAGE_SIZE > 8192
12143a7c4e9SRobert Watson 	{16384, "16384", NULL},
1229fb535deSJeff Roberson #if PAGE_SIZE > 16384
123bd796eb2SRobert Watson 	{32768, "32768", NULL},
1249fb535deSJeff Roberson #if PAGE_SIZE > 32768
125bd796eb2SRobert Watson 	{65536, "65536", NULL},
1269fb535deSJeff Roberson #if PAGE_SIZE > 65536
1279fb535deSJeff Roberson #error	"Unsupported PAGE_SIZE"
1289fb535deSJeff Roberson #endif	/* 65536 */
1299fb535deSJeff Roberson #endif	/* 32768 */
1309fb535deSJeff Roberson #endif	/* 16384 */
1319fb535deSJeff Roberson #endif	/* 8192 */
1329fb535deSJeff Roberson #endif	/* 4096 */
1338355f576SJeff Roberson 	{0, NULL},
1348355f576SJeff Roberson };
1358355f576SJeff Roberson 
136e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD
137e4eb384bSBosko Milekic u_int vm_memguard_divisor;
138e4eb384bSBosko Milekic SYSCTL_UINT(_vm, OID_AUTO, memguard_divisor, CTLFLAG_RD, &vm_memguard_divisor,
139e4eb384bSBosko Milekic     0, "(kmem_size/memguard_divisor) == memguard submap size");
140e4eb384bSBosko Milekic #endif
141e4eb384bSBosko Milekic 
1426f267175SJeff Roberson u_int vm_kmem_size;
14384344f9fSDag-Erling Smørgrav SYSCTL_UINT(_vm, OID_AUTO, kmem_size, CTLFLAG_RD, &vm_kmem_size, 0,
14484344f9fSDag-Erling Smørgrav     "Size of kernel memory");
1455a34a9f0SJeff Roberson 
146479439b4SDag-Erling Smørgrav u_int vm_kmem_size_max;
147479439b4SDag-Erling Smørgrav SYSCTL_UINT(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RD, &vm_kmem_size_max, 0,
148479439b4SDag-Erling Smørgrav     "Maximum size of kernel memory");
149479439b4SDag-Erling Smørgrav 
150479439b4SDag-Erling Smørgrav u_int vm_kmem_size_scale;
151479439b4SDag-Erling Smørgrav SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RD, &vm_kmem_size_scale, 0,
152479439b4SDag-Erling Smørgrav     "Scale factor for kernel memory size");
153479439b4SDag-Erling Smørgrav 
1545a34a9f0SJeff Roberson /*
15599571dc3SJeff Roberson  * The malloc_mtx protects the kmemstatistics linked list.
1565a34a9f0SJeff Roberson  */
1575a34a9f0SJeff Roberson 
1585a34a9f0SJeff Roberson struct mtx malloc_mtx;
15969ef67f9SJason Evans 
1605e914b96SJeff Roberson #ifdef MALLOC_PROFILE
1615e914b96SJeff Roberson uint64_t krequests[KMEM_ZSIZE + 1];
1626f267175SJeff Roberson 
1635e914b96SJeff Roberson static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS);
1645e914b96SJeff Roberson #endif
1655e914b96SJeff Roberson 
1665e914b96SJeff Roberson static int sysctl_kern_malloc(SYSCTL_HANDLER_ARGS);
167df8bae1dSRodney W. Grimes 
1681fb14a47SPoul-Henning Kamp /* time_uptime of last malloc(9) failure */
1691fb14a47SPoul-Henning Kamp static time_t t_malloc_fail;
1701fb14a47SPoul-Henning Kamp 
171eae870cdSRobert Watson #ifdef MALLOC_MAKE_FAILURES
172eae870cdSRobert Watson /*
173eae870cdSRobert Watson  * Causes malloc failures every (n) mallocs with M_NOWAIT.  If set to 0,
174eae870cdSRobert Watson  * doesn't cause failures.
175eae870cdSRobert Watson  */
176eae870cdSRobert Watson SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD, 0,
177eae870cdSRobert Watson     "Kernel malloc debugging options");
178eae870cdSRobert Watson 
179eae870cdSRobert Watson static int malloc_failure_rate;
180eae870cdSRobert Watson static int malloc_nowait_count;
181eae870cdSRobert Watson static int malloc_failure_count;
182eae870cdSRobert Watson SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RW,
183eae870cdSRobert Watson     &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail");
184f2538508SRobert Watson TUNABLE_INT("debug.malloc.failure_rate", &malloc_failure_rate);
185eae870cdSRobert Watson SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD,
186eae870cdSRobert Watson     &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures");
187eae870cdSRobert Watson #endif
188eae870cdSRobert Watson 
1891fb14a47SPoul-Henning Kamp int
1901fb14a47SPoul-Henning Kamp malloc_last_fail(void)
1911fb14a47SPoul-Henning Kamp {
1921fb14a47SPoul-Henning Kamp 
1931fb14a47SPoul-Henning Kamp 	return (time_uptime - t_malloc_fail);
1941fb14a47SPoul-Henning Kamp }
1951fb14a47SPoul-Henning Kamp 
196df8bae1dSRodney W. Grimes /*
1974362fadaSBrian Feldman  * Add this to the informational malloc_type bucket.
1984362fadaSBrian Feldman  */
1994362fadaSBrian Feldman static void
2004362fadaSBrian Feldman malloc_type_zone_allocated(struct malloc_type *ksp, unsigned long size,
2014362fadaSBrian Feldman     int zindx)
2024362fadaSBrian Feldman {
2034362fadaSBrian Feldman 	mtx_lock(&ksp->ks_mtx);
2044362fadaSBrian Feldman 	ksp->ks_calls++;
2054362fadaSBrian Feldman 	if (zindx != -1)
2064362fadaSBrian Feldman 		ksp->ks_size |= 1 << zindx;
2074362fadaSBrian Feldman 	if (size != 0) {
2084362fadaSBrian Feldman 		ksp->ks_memuse += size;
2094362fadaSBrian Feldman 		ksp->ks_inuse++;
2104362fadaSBrian Feldman 		if (ksp->ks_memuse > ksp->ks_maxused)
2114362fadaSBrian Feldman 			ksp->ks_maxused = ksp->ks_memuse;
2124362fadaSBrian Feldman 	}
2134362fadaSBrian Feldman 	mtx_unlock(&ksp->ks_mtx);
2144362fadaSBrian Feldman }
2154362fadaSBrian Feldman 
2164362fadaSBrian Feldman void
2174362fadaSBrian Feldman malloc_type_allocated(struct malloc_type *ksp, unsigned long size)
2184362fadaSBrian Feldman {
2194362fadaSBrian Feldman 	malloc_type_zone_allocated(ksp, size, -1);
2204362fadaSBrian Feldman }
2214362fadaSBrian Feldman 
2224362fadaSBrian Feldman /*
2234362fadaSBrian Feldman  * Remove this allocation from the informational malloc_type bucket.
2244362fadaSBrian Feldman  */
2254362fadaSBrian Feldman void
2264362fadaSBrian Feldman malloc_type_freed(struct malloc_type *ksp, unsigned long size)
2274362fadaSBrian Feldman {
2284362fadaSBrian Feldman 	mtx_lock(&ksp->ks_mtx);
2294362fadaSBrian Feldman 	KASSERT(size <= ksp->ks_memuse,
2304362fadaSBrian Feldman 		("malloc(9)/free(9) confusion.\n%s",
2314362fadaSBrian Feldman 		 "Probably freeing with wrong type, but maybe not here."));
2324362fadaSBrian Feldman 	ksp->ks_memuse -= size;
2334362fadaSBrian Feldman 	ksp->ks_inuse--;
2344362fadaSBrian Feldman 	mtx_unlock(&ksp->ks_mtx);
2354362fadaSBrian Feldman }
2364362fadaSBrian Feldman 
2374362fadaSBrian Feldman /*
2381c7c3c6aSMatthew Dillon  *	malloc:
2391c7c3c6aSMatthew Dillon  *
2401c7c3c6aSMatthew Dillon  *	Allocate a block of memory.
2411c7c3c6aSMatthew Dillon  *
2421c7c3c6aSMatthew Dillon  *	If M_NOWAIT is set, this routine will not block and return NULL if
2431c7c3c6aSMatthew Dillon  *	the allocation fails.
244df8bae1dSRodney W. Grimes  */
245df8bae1dSRodney W. Grimes void *
24687efd4d5SRobert Watson malloc(unsigned long size, struct malloc_type *type, int flags)
247df8bae1dSRodney W. Grimes {
2486f267175SJeff Roberson 	int indx;
2498355f576SJeff Roberson 	caddr_t va;
2508355f576SJeff Roberson 	uma_zone_t zone;
251099a0e58SBosko Milekic 	uma_keg_t keg;
2524db4f5c8SPoul-Henning Kamp #ifdef DIAGNOSTIC
2534db4f5c8SPoul-Henning Kamp 	unsigned long osize = size;
2544db4f5c8SPoul-Henning Kamp #endif
255df8bae1dSRodney W. Grimes 
256194a0abfSPoul-Henning Kamp #ifdef INVARIANTS
257d3c11994SPoul-Henning Kamp 	/*
258d3c11994SPoul-Henning Kamp 	 * To make sure that WAITOK or NOWAIT is set, but not more than
259d3c11994SPoul-Henning Kamp 	 * one, and check against the API botches that are common.
260d3c11994SPoul-Henning Kamp 	 */
261d3c11994SPoul-Henning Kamp 	indx = flags & (M_WAITOK | M_NOWAIT | M_DONTWAIT | M_TRYWAIT);
262d3c11994SPoul-Henning Kamp 	if (indx != M_NOWAIT && indx != M_WAITOK) {
263d3c11994SPoul-Henning Kamp 		static	struct timeval lasterr;
264d3c11994SPoul-Henning Kamp 		static	int curerr, once;
265d3c11994SPoul-Henning Kamp 		if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) {
266d3c11994SPoul-Henning Kamp 			printf("Bad malloc flags: %x\n", indx);
2672d50560aSMarcel Moolenaar 			kdb_backtrace();
268d3c11994SPoul-Henning Kamp 			flags |= M_WAITOK;
269d3c11994SPoul-Henning Kamp 			once++;
270d3c11994SPoul-Henning Kamp 		}
271d3c11994SPoul-Henning Kamp 	}
272194a0abfSPoul-Henning Kamp #endif
273708da94eSPoul-Henning Kamp #if 0
274708da94eSPoul-Henning Kamp 	if (size == 0)
2752d50560aSMarcel Moolenaar 		kdb_enter("zero size malloc");
276708da94eSPoul-Henning Kamp #endif
277eae870cdSRobert Watson #ifdef MALLOC_MAKE_FAILURES
278eae870cdSRobert Watson 	if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) {
279eae870cdSRobert Watson 		atomic_add_int(&malloc_nowait_count, 1);
280eae870cdSRobert Watson 		if ((malloc_nowait_count % malloc_failure_rate) == 0) {
281eae870cdSRobert Watson 			atomic_add_int(&malloc_failure_count, 1);
2823f6ee876SPoul-Henning Kamp 			t_malloc_fail = time_uptime;
283eae870cdSRobert Watson 			return (NULL);
284eae870cdSRobert Watson 		}
285eae870cdSRobert Watson 	}
286eae870cdSRobert Watson #endif
287d3c11994SPoul-Henning Kamp 	if (flags & M_WAITOK)
288b40ce416SJulian Elischer 		KASSERT(curthread->td_intr_nesting_level == 0,
289a163d034SWarner Losh 		   ("malloc(M_WAITOK) in interrupt context"));
290e4eb384bSBosko Milekic 
291e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD
292e4eb384bSBosko Milekic 	/* XXX CHANGEME! */
293e4eb384bSBosko Milekic 	if (type == M_SUBPROC)
294e4eb384bSBosko Milekic 		return memguard_alloc(size, flags);
295e4eb384bSBosko Milekic #endif
296e4eb384bSBosko Milekic 
2978355f576SJeff Roberson 	if (size <= KMEM_ZMAX) {
2986f267175SJeff Roberson 		if (size & KMEM_ZMASK)
2996f267175SJeff Roberson 			size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
3006f267175SJeff Roberson 		indx = kmemsize[size >> KMEM_ZSHIFT];
3016f267175SJeff Roberson 		zone = kmemzones[indx].kz_zone;
302099a0e58SBosko Milekic 		keg = zone->uz_keg;
3036f267175SJeff Roberson #ifdef MALLOC_PROFILE
3046f267175SJeff Roberson 		krequests[size >> KMEM_ZSHIFT]++;
3056f267175SJeff Roberson #endif
3068355f576SJeff Roberson 		va = uma_zalloc(zone, flags);
3074362fadaSBrian Feldman 		if (va != NULL)
308099a0e58SBosko Milekic 			size = keg->uk_size;
3094362fadaSBrian Feldman 		malloc_type_zone_allocated(type, va == NULL ? 0 : size, indx);
3108355f576SJeff Roberson 	} else {
3116f267175SJeff Roberson 		size = roundup(size, PAGE_SIZE);
3128355f576SJeff Roberson 		zone = NULL;
313099a0e58SBosko Milekic 		keg = NULL;
3148355f576SJeff Roberson 		va = uma_large_malloc(size, flags);
3154362fadaSBrian Feldman 		malloc_type_allocated(type, va == NULL ? 0 : size);
316df8bae1dSRodney W. Grimes 	}
3171282e9acSPoul-Henning Kamp 	if (flags & M_WAITOK)
318a163d034SWarner Losh 		KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL"));
3191282e9acSPoul-Henning Kamp 	else if (va == NULL)
3201fb14a47SPoul-Henning Kamp 		t_malloc_fail = time_uptime;
3214db4f5c8SPoul-Henning Kamp #ifdef DIAGNOSTIC
3221282e9acSPoul-Henning Kamp 	if (va != NULL && !(flags & M_ZERO)) {
3234db4f5c8SPoul-Henning Kamp 		memset(va, 0x70, osize);
3244db4f5c8SPoul-Henning Kamp 	}
3254db4f5c8SPoul-Henning Kamp #endif
326df8bae1dSRodney W. Grimes 	return ((void *) va);
327df8bae1dSRodney W. Grimes }
328df8bae1dSRodney W. Grimes 
329df8bae1dSRodney W. Grimes /*
3301c7c3c6aSMatthew Dillon  *	free:
3311c7c3c6aSMatthew Dillon  *
332df8bae1dSRodney W. Grimes  *	Free a block of memory allocated by malloc.
3331c7c3c6aSMatthew Dillon  *
3341c7c3c6aSMatthew Dillon  *	This routine may not block.
335df8bae1dSRodney W. Grimes  */
336df8bae1dSRodney W. Grimes void
33787efd4d5SRobert Watson free(void *addr, struct malloc_type *type)
338df8bae1dSRodney W. Grimes {
33999571dc3SJeff Roberson 	uma_slab_t slab;
34099571dc3SJeff Roberson 	u_long size;
341254c6cb3SPoul-Henning Kamp 
34244a8ff31SArchie Cobbs 	/* free(NULL, ...) does nothing */
34344a8ff31SArchie Cobbs 	if (addr == NULL)
34444a8ff31SArchie Cobbs 		return;
34544a8ff31SArchie Cobbs 
346e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD
347e4eb384bSBosko Milekic 	/* XXX CHANGEME! */
348e4eb384bSBosko Milekic 	if (type == M_SUBPROC) {
349e4eb384bSBosko Milekic 		memguard_free(addr);
350e4eb384bSBosko Milekic 		return;
351e4eb384bSBosko Milekic 	}
352e4eb384bSBosko Milekic #endif
353e4eb384bSBosko Milekic 
3544362fadaSBrian Feldman 	KASSERT(type->ks_memuse > 0,
3558cb72d61SPoul-Henning Kamp 		("malloc(9)/free(9) confusion.\n%s",
3568cb72d61SPoul-Henning Kamp 		 "Probably freeing with wrong type, but maybe not here."));
3578355f576SJeff Roberson 	size = 0;
35869ef67f9SJason Evans 
35999571dc3SJeff Roberson 	slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK));
3608355f576SJeff Roberson 
3618355f576SJeff Roberson 	if (slab == NULL)
3626f267175SJeff Roberson 		panic("free: address %p(%p) has not been allocated.\n",
36399571dc3SJeff Roberson 		    addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
36499571dc3SJeff Roberson 
3658355f576SJeff Roberson 
3668355f576SJeff Roberson 	if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
3678f70816cSJeff Roberson #ifdef INVARIANTS
3688f70816cSJeff Roberson 		struct malloc_type **mtp = addr;
3698f70816cSJeff Roberson #endif
370099a0e58SBosko Milekic 		size = slab->us_keg->uk_size;
3718f70816cSJeff Roberson #ifdef INVARIANTS
3728f70816cSJeff Roberson 		/*
3738f70816cSJeff Roberson 		 * Cache a pointer to the malloc_type that most recently freed
3748f70816cSJeff Roberson 		 * this memory here.  This way we know who is most likely to
3758f70816cSJeff Roberson 		 * have stepped on it later.
3768f70816cSJeff Roberson 		 *
3778f70816cSJeff Roberson 		 * This code assumes that size is a multiple of 8 bytes for
3788f70816cSJeff Roberson 		 * 64 bit machines
3798f70816cSJeff Roberson 		 */
3808f70816cSJeff Roberson 		mtp = (struct malloc_type **)
3818f70816cSJeff Roberson 		    ((unsigned long)mtp & ~UMA_ALIGN_PTR);
3828f70816cSJeff Roberson 		mtp += (size - sizeof(struct malloc_type *)) /
3838f70816cSJeff Roberson 		    sizeof(struct malloc_type *);
3848f70816cSJeff Roberson 		*mtp = type;
3858f70816cSJeff Roberson #endif
386099a0e58SBosko Milekic 		uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab);
38714bf02f8SJohn Dyson 	} else {
3888355f576SJeff Roberson 		size = slab->us_size;
3898355f576SJeff Roberson 		uma_large_free(slab);
39014bf02f8SJohn Dyson 	}
3914362fadaSBrian Feldman 	malloc_type_freed(type, size);
392df8bae1dSRodney W. Grimes }
393df8bae1dSRodney W. Grimes 
394df8bae1dSRodney W. Grimes /*
39544a8ff31SArchie Cobbs  *	realloc: change the size of a memory block
39644a8ff31SArchie Cobbs  */
39744a8ff31SArchie Cobbs void *
39887efd4d5SRobert Watson realloc(void *addr, unsigned long size, struct malloc_type *type, int flags)
39944a8ff31SArchie Cobbs {
4008355f576SJeff Roberson 	uma_slab_t slab;
40144a8ff31SArchie Cobbs 	unsigned long alloc;
40244a8ff31SArchie Cobbs 	void *newaddr;
40344a8ff31SArchie Cobbs 
40444a8ff31SArchie Cobbs 	/* realloc(NULL, ...) is equivalent to malloc(...) */
40544a8ff31SArchie Cobbs 	if (addr == NULL)
40644a8ff31SArchie Cobbs 		return (malloc(size, type, flags));
40744a8ff31SArchie Cobbs 
408e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD
409e4eb384bSBosko Milekic /* XXX: CHANGEME! */
410e4eb384bSBosko Milekic if (type == M_SUBPROC) {
411e4eb384bSBosko Milekic 	slab = NULL;
412e4eb384bSBosko Milekic 	alloc = size;
413e4eb384bSBosko Milekic } else {
414e4eb384bSBosko Milekic #endif
415e4eb384bSBosko Milekic 
41699571dc3SJeff Roberson 	slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK));
4178355f576SJeff Roberson 
41844a8ff31SArchie Cobbs 	/* Sanity check */
4198355f576SJeff Roberson 	KASSERT(slab != NULL,
42044a8ff31SArchie Cobbs 	    ("realloc: address %p out of range", (void *)addr));
42144a8ff31SArchie Cobbs 
42244a8ff31SArchie Cobbs 	/* Get the size of the original block */
423099a0e58SBosko Milekic 	if (slab->us_keg)
424099a0e58SBosko Milekic 		alloc = slab->us_keg->uk_size;
4258355f576SJeff Roberson 	else
4268355f576SJeff Roberson 		alloc = slab->us_size;
42744a8ff31SArchie Cobbs 
42844a8ff31SArchie Cobbs 	/* Reuse the original block if appropriate */
42944a8ff31SArchie Cobbs 	if (size <= alloc
43044a8ff31SArchie Cobbs 	    && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE))
43144a8ff31SArchie Cobbs 		return (addr);
43244a8ff31SArchie Cobbs 
433e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD
434e4eb384bSBosko Milekic }
435e4eb384bSBosko Milekic #endif
436e4eb384bSBosko Milekic 
43744a8ff31SArchie Cobbs 	/* Allocate a new, bigger (or smaller) block */
43844a8ff31SArchie Cobbs 	if ((newaddr = malloc(size, type, flags)) == NULL)
43944a8ff31SArchie Cobbs 		return (NULL);
44044a8ff31SArchie Cobbs 
44144a8ff31SArchie Cobbs 	/* Copy over original contents */
44244a8ff31SArchie Cobbs 	bcopy(addr, newaddr, min(size, alloc));
44344a8ff31SArchie Cobbs 	free(addr, type);
44444a8ff31SArchie Cobbs 	return (newaddr);
44544a8ff31SArchie Cobbs }
44644a8ff31SArchie Cobbs 
44744a8ff31SArchie Cobbs /*
44844a8ff31SArchie Cobbs  *	reallocf: same as realloc() but free memory on failure.
44944a8ff31SArchie Cobbs  */
45044a8ff31SArchie Cobbs void *
45187efd4d5SRobert Watson reallocf(void *addr, unsigned long size, struct malloc_type *type, int flags)
45244a8ff31SArchie Cobbs {
45344a8ff31SArchie Cobbs 	void *mem;
45444a8ff31SArchie Cobbs 
45568f2d20bSPoul-Henning Kamp 	if ((mem = realloc(addr, size, type, flags)) == NULL)
45644a8ff31SArchie Cobbs 		free(addr, type);
45744a8ff31SArchie Cobbs 	return (mem);
45844a8ff31SArchie Cobbs }
45944a8ff31SArchie Cobbs 
46044a8ff31SArchie Cobbs /*
461df8bae1dSRodney W. Grimes  * Initialize the kernel memory allocator
462df8bae1dSRodney W. Grimes  */
4632b14f991SJulian Elischer /* ARGSUSED*/
4642b14f991SJulian Elischer static void
46587efd4d5SRobert Watson kmeminit(void *dummy)
466df8bae1dSRodney W. Grimes {
4676f267175SJeff Roberson 	u_int8_t indx;
46827b8623fSDavid Greenman 	u_long mem_size;
4698355f576SJeff Roberson 	int i;
4708a58a9f6SJohn Dyson 
4716008862bSJohn Baldwin 	mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
47269ef67f9SJason Evans 
4738a58a9f6SJohn Dyson 	/*
4748a58a9f6SJohn Dyson 	 * Try to auto-tune the kernel memory size, so that it is
4758a58a9f6SJohn Dyson 	 * more applicable for a wider range of machine sizes.
4768a58a9f6SJohn Dyson 	 * On an X86, a VM_KMEM_SIZE_SCALE value of 4 is good, while
4778a58a9f6SJohn Dyson 	 * a VM_KMEM_SIZE of 12MB is a fair compromise.  The
4788a58a9f6SJohn Dyson 	 * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space
4798a58a9f6SJohn Dyson 	 * available, and on an X86 with a total KVA space of 256MB,
4808a58a9f6SJohn Dyson 	 * try to keep VM_KMEM_SIZE_MAX at 80MB or below.
4818a58a9f6SJohn Dyson 	 *
4828a58a9f6SJohn Dyson 	 * Note that the kmem_map is also used by the zone allocator,
4838a58a9f6SJohn Dyson 	 * so make sure that there is enough space.
4848a58a9f6SJohn Dyson 	 */
485099a0e58SBosko Milekic 	vm_kmem_size = VM_KMEM_SIZE + nmbclusters * PAGE_SIZE;
4861795d0cdSPaul Saab 	mem_size = cnt.v_page_count;
4878a58a9f6SJohn Dyson 
4888a58a9f6SJohn Dyson #if defined(VM_KMEM_SIZE_SCALE)
489479439b4SDag-Erling Smørgrav 	vm_kmem_size_scale = VM_KMEM_SIZE_SCALE;
4908a58a9f6SJohn Dyson #endif
491479439b4SDag-Erling Smørgrav 	TUNABLE_INT_FETCH("vm.kmem_size_scale", &vm_kmem_size_scale);
492479439b4SDag-Erling Smørgrav 	if (vm_kmem_size_scale > 0 &&
493479439b4SDag-Erling Smørgrav 	    (mem_size / vm_kmem_size_scale) > (vm_kmem_size / PAGE_SIZE))
494479439b4SDag-Erling Smørgrav 		vm_kmem_size = (mem_size / vm_kmem_size_scale) * PAGE_SIZE;
4958a58a9f6SJohn Dyson 
4968a58a9f6SJohn Dyson #if defined(VM_KMEM_SIZE_MAX)
497479439b4SDag-Erling Smørgrav 	vm_kmem_size_max = VM_KMEM_SIZE_MAX;
4988a58a9f6SJohn Dyson #endif
499479439b4SDag-Erling Smørgrav 	TUNABLE_INT_FETCH("vm.kmem_size_max", &vm_kmem_size_max);
500479439b4SDag-Erling Smørgrav 	if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max)
501479439b4SDag-Erling Smørgrav 		vm_kmem_size = vm_kmem_size_max;
5028a58a9f6SJohn Dyson 
5038de6e8e1SMike Smith 	/* Allow final override from the kernel environment */
50484344f9fSDag-Erling Smørgrav #ifndef BURN_BRIDGES
50584344f9fSDag-Erling Smørgrav 	if (TUNABLE_INT_FETCH("kern.vm.kmem.size", &vm_kmem_size) != 0)
50684344f9fSDag-Erling Smørgrav 		printf("kern.vm.kmem.size is now called vm.kmem_size!\n");
50784344f9fSDag-Erling Smørgrav #endif
50884344f9fSDag-Erling Smørgrav 	TUNABLE_INT_FETCH("vm.kmem_size", &vm_kmem_size);
5098de6e8e1SMike Smith 
51027b8623fSDavid Greenman 	/*
51127b8623fSDavid Greenman 	 * Limit kmem virtual size to twice the physical memory.
51227b8623fSDavid Greenman 	 * This allows for kmem map sparseness, but limits the size
51327b8623fSDavid Greenman 	 * to something sane. Be careful to not overflow the 32bit
51427b8623fSDavid Greenman 	 * ints while doing the check.
51527b8623fSDavid Greenman 	 */
5161795d0cdSPaul Saab 	if (((vm_kmem_size / 2) / PAGE_SIZE) > cnt.v_page_count)
51727b8623fSDavid Greenman 		vm_kmem_size = 2 * cnt.v_page_count * PAGE_SIZE;
5188a58a9f6SJohn Dyson 
51908442f8aSBosko Milekic 	/*
520347194c1SMike Silbersack 	 * Tune settings based on the kernel map's size at this time.
521347194c1SMike Silbersack 	 */
522347194c1SMike Silbersack 	init_param3(vm_kmem_size / PAGE_SIZE);
523347194c1SMike Silbersack 
524df8bae1dSRodney W. Grimes 	kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase,
525099a0e58SBosko Milekic 		(vm_offset_t *)&kmemlimit, vm_kmem_size);
5263075778bSJohn Dyson 	kmem_map->system_map = 1;
5278355f576SJeff Roberson 
528e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD
529e4eb384bSBosko Milekic 	/*
530e4eb384bSBosko Milekic 	 * Initialize MemGuard if support compiled in.  MemGuard is a
531e4eb384bSBosko Milekic 	 * replacement allocator used for detecting tamper-after-free
532e4eb384bSBosko Milekic 	 * scenarios as they occur.  It is only used for debugging.
533e4eb384bSBosko Milekic 	 */
534e4eb384bSBosko Milekic 	vm_memguard_divisor = 10;
535e4eb384bSBosko Milekic 	TUNABLE_INT_FETCH("vm.memguard_divisor", &vm_memguard_divisor);
536e4eb384bSBosko Milekic 
537e4eb384bSBosko Milekic 	/* Pick a conservative value if provided value sucks. */
538e4eb384bSBosko Milekic 	if ((vm_memguard_divisor <= 0) ||
539e4eb384bSBosko Milekic 	    ((vm_kmem_size / vm_memguard_divisor) == 0))
540e4eb384bSBosko Milekic 		vm_memguard_divisor = 10;
541e4eb384bSBosko Milekic 	memguard_init(kmem_map, vm_kmem_size / vm_memguard_divisor);
542e4eb384bSBosko Milekic #endif
543e4eb384bSBosko Milekic 
54499571dc3SJeff Roberson 	uma_startup2();
5458355f576SJeff Roberson 
5466f267175SJeff Roberson 	for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
5476f267175SJeff Roberson 		int size = kmemzones[indx].kz_size;
5486f267175SJeff Roberson 		char *name = kmemzones[indx].kz_name;
5498355f576SJeff Roberson 
5508efc4effSJeff Roberson 		kmemzones[indx].kz_zone = uma_zcreate(name, size,
5518efc4effSJeff Roberson #ifdef INVARIANTS
5528f70816cSJeff Roberson 		    mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
5538efc4effSJeff Roberson #else
5548efc4effSJeff Roberson 		    NULL, NULL, NULL, NULL,
5558efc4effSJeff Roberson #endif
5568efc4effSJeff Roberson 		    UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
5576f267175SJeff Roberson 
5588355f576SJeff Roberson 		for (;i <= size; i+= KMEM_ZBASE)
5596f267175SJeff Roberson 			kmemsize[i >> KMEM_ZSHIFT] = indx;
5608355f576SJeff Roberson 
561df8bae1dSRodney W. Grimes 	}
562254c6cb3SPoul-Henning Kamp }
563254c6cb3SPoul-Henning Kamp 
564db669378SPeter Wemm void
56587efd4d5SRobert Watson malloc_init(void *data)
566254c6cb3SPoul-Henning Kamp {
567db669378SPeter Wemm 	struct malloc_type *type = (struct malloc_type *)data;
568254c6cb3SPoul-Henning Kamp 
5696f267175SJeff Roberson 	mtx_lock(&malloc_mtx);
570d1bbc7ecSPoul-Henning Kamp 	if (type->ks_magic != M_MAGIC)
571d1bbc7ecSPoul-Henning Kamp 		panic("malloc type lacks magic");
572d1bbc7ecSPoul-Henning Kamp 
573d4060a87SJohn Dyson 	if (cnt.v_page_count == 0)
574d4060a87SJohn Dyson 		panic("malloc_init not allowed before vm init");
575d4060a87SJohn Dyson 
5766f267175SJeff Roberson 	if (type->ks_next != NULL)
5776f267175SJeff Roberson 		return;
5786f267175SJeff Roberson 
579254c6cb3SPoul-Henning Kamp 	type->ks_next = kmemstatistics;
580254c6cb3SPoul-Henning Kamp 	kmemstatistics = type;
5815a34a9f0SJeff Roberson 	mtx_init(&type->ks_mtx, type->ks_shortdesc, "Malloc Stats", MTX_DEF);
5826f267175SJeff Roberson 	mtx_unlock(&malloc_mtx);
583df8bae1dSRodney W. Grimes }
584db669378SPeter Wemm 
585db669378SPeter Wemm void
58687efd4d5SRobert Watson malloc_uninit(void *data)
587db669378SPeter Wemm {
588db669378SPeter Wemm 	struct malloc_type *type = (struct malloc_type *)data;
589db669378SPeter Wemm 	struct malloc_type *t;
590db669378SPeter Wemm 
5916f267175SJeff Roberson 	mtx_lock(&malloc_mtx);
5925a34a9f0SJeff Roberson 	mtx_lock(&type->ks_mtx);
593db669378SPeter Wemm 	if (type->ks_magic != M_MAGIC)
594db669378SPeter Wemm 		panic("malloc type lacks magic");
595db669378SPeter Wemm 
596db669378SPeter Wemm 	if (cnt.v_page_count == 0)
597db669378SPeter Wemm 		panic("malloc_uninit not allowed before vm init");
598db669378SPeter Wemm 
599db669378SPeter Wemm 	if (type == kmemstatistics)
600db669378SPeter Wemm 		kmemstatistics = type->ks_next;
601db669378SPeter Wemm 	else {
602db669378SPeter Wemm 		for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) {
603db669378SPeter Wemm 			if (t->ks_next == type) {
604db669378SPeter Wemm 				t->ks_next = type->ks_next;
605db669378SPeter Wemm 				break;
606db669378SPeter Wemm 			}
607db669378SPeter Wemm 		}
608db669378SPeter Wemm 	}
609ce45b512SBruce Evans 	type->ks_next = NULL;
6105a34a9f0SJeff Roberson 	mtx_destroy(&type->ks_mtx);
6116f267175SJeff Roberson 	mtx_unlock(&malloc_mtx);
612db669378SPeter Wemm }
6136f267175SJeff Roberson 
6146f267175SJeff Roberson static int
6156f267175SJeff Roberson sysctl_kern_malloc(SYSCTL_HANDLER_ARGS)
6166f267175SJeff Roberson {
6176f267175SJeff Roberson 	struct malloc_type *type;
6186f267175SJeff Roberson 	int linesize = 128;
6196f267175SJeff Roberson 	int curline;
6206f267175SJeff Roberson 	int bufsize;
6216f267175SJeff Roberson 	int first;
6226f267175SJeff Roberson 	int error;
6236f267175SJeff Roberson 	char *buf;
6246f267175SJeff Roberson 	char *p;
6256f267175SJeff Roberson 	int cnt;
6266f267175SJeff Roberson 	int len;
6276f267175SJeff Roberson 	int i;
6286f267175SJeff Roberson 
6296f267175SJeff Roberson 	cnt = 0;
6306f267175SJeff Roberson 
6316f267175SJeff Roberson 	mtx_lock(&malloc_mtx);
6326f267175SJeff Roberson 	for (type = kmemstatistics; type != NULL; type = type->ks_next)
6336f267175SJeff Roberson 		cnt++;
6346f267175SJeff Roberson 
6355a34a9f0SJeff Roberson 	mtx_unlock(&malloc_mtx);
6366f267175SJeff Roberson 	bufsize = linesize * (cnt + 1);
637a163d034SWarner Losh 	p = buf = (char *)malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO);
6385a34a9f0SJeff Roberson 	mtx_lock(&malloc_mtx);
6396f267175SJeff Roberson 
6406f267175SJeff Roberson 	len = snprintf(p, linesize,
6416f267175SJeff Roberson 	    "\n        Type  InUse MemUse HighUse Requests  Size(s)\n");
6426f267175SJeff Roberson 	p += len;
6436f267175SJeff Roberson 
6446f267175SJeff Roberson 	for (type = kmemstatistics; cnt != 0 && type != NULL;
6456f267175SJeff Roberson 	    type = type->ks_next, cnt--) {
6466f267175SJeff Roberson 		if (type->ks_calls == 0)
6476f267175SJeff Roberson 			continue;
6486f267175SJeff Roberson 
6496f267175SJeff Roberson 		curline = linesize - 2;	/* Leave room for the \n */
650289f207cSJeff Roberson 		len = snprintf(p, curline, "%13s%6lu%6luK%7luK%9llu",
6516f267175SJeff Roberson 			type->ks_shortdesc,
6526f267175SJeff Roberson 			type->ks_inuse,
6536f267175SJeff Roberson 			(type->ks_memuse + 1023) / 1024,
6546f267175SJeff Roberson 			(type->ks_maxused + 1023) / 1024,
6556f267175SJeff Roberson 			(long long unsigned)type->ks_calls);
6566f267175SJeff Roberson 		curline -= len;
6576f267175SJeff Roberson 		p += len;
6586f267175SJeff Roberson 
6596f267175SJeff Roberson 		first = 1;
660280759e7SRobert Drehmel 		for (i = 0; i < sizeof(kmemzones) / sizeof(kmemzones[0]) - 1;
661280759e7SRobert Drehmel 		    i++) {
6626f267175SJeff Roberson 			if (type->ks_size & (1 << i)) {
6636f267175SJeff Roberson 				if (first)
6646f267175SJeff Roberson 					len = snprintf(p, curline, "  ");
6656f267175SJeff Roberson 				else
6666f267175SJeff Roberson 					len = snprintf(p, curline, ",");
6676f267175SJeff Roberson 				curline -= len;
6686f267175SJeff Roberson 				p += len;
6696f267175SJeff Roberson 
6706f267175SJeff Roberson 				len = snprintf(p, curline,
6716f267175SJeff Roberson 				    "%s", kmemzones[i].kz_name);
6726f267175SJeff Roberson 				curline -= len;
6736f267175SJeff Roberson 				p += len;
6746f267175SJeff Roberson 
6756f267175SJeff Roberson 				first = 0;
6766f267175SJeff Roberson 			}
677280759e7SRobert Drehmel 		}
6786f267175SJeff Roberson 
6796f267175SJeff Roberson 		len = snprintf(p, 2, "\n");
6806f267175SJeff Roberson 		p += len;
6816f267175SJeff Roberson 	}
6826f267175SJeff Roberson 
6836f267175SJeff Roberson 	mtx_unlock(&malloc_mtx);
6846f267175SJeff Roberson 	error = SYSCTL_OUT(req, buf, p - buf);
6856f267175SJeff Roberson 
6866f267175SJeff Roberson 	free(buf, M_TEMP);
6876f267175SJeff Roberson 	return (error);
6886f267175SJeff Roberson }
6896f267175SJeff Roberson 
6906f267175SJeff Roberson SYSCTL_OID(_kern, OID_AUTO, malloc, CTLTYPE_STRING|CTLFLAG_RD,
6916f267175SJeff Roberson     NULL, 0, sysctl_kern_malloc, "A", "Malloc Stats");
6925e914b96SJeff Roberson 
6935e914b96SJeff Roberson #ifdef MALLOC_PROFILE
6945e914b96SJeff Roberson 
6955e914b96SJeff Roberson static int
6965e914b96SJeff Roberson sysctl_kern_mprof(SYSCTL_HANDLER_ARGS)
6975e914b96SJeff Roberson {
6985e914b96SJeff Roberson 	int linesize = 64;
6995e914b96SJeff Roberson 	uint64_t count;
7005e914b96SJeff Roberson 	uint64_t waste;
7015e914b96SJeff Roberson 	uint64_t mem;
7025e914b96SJeff Roberson 	int bufsize;
7035e914b96SJeff Roberson 	int error;
7045e914b96SJeff Roberson 	char *buf;
7055e914b96SJeff Roberson 	int rsize;
7065e914b96SJeff Roberson 	int size;
7075e914b96SJeff Roberson 	char *p;
7085e914b96SJeff Roberson 	int len;
7095e914b96SJeff Roberson 	int i;
7105e914b96SJeff Roberson 
7115e914b96SJeff Roberson 	bufsize = linesize * (KMEM_ZSIZE + 1);
7125e914b96SJeff Roberson 	bufsize += 128; 	/* For the stats line */
7135e914b96SJeff Roberson 	bufsize += 128; 	/* For the banner line */
7145e914b96SJeff Roberson 	waste = 0;
7155e914b96SJeff Roberson 	mem = 0;
7165e914b96SJeff Roberson 
717a163d034SWarner Losh 	p = buf = (char *)malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO);
7185e914b96SJeff Roberson 	len = snprintf(p, bufsize,
7195e914b96SJeff Roberson 	    "\n  Size                    Requests  Real Size\n");
7205e914b96SJeff Roberson 	bufsize -= len;
7215e914b96SJeff Roberson 	p += len;
7225e914b96SJeff Roberson 
7235e914b96SJeff Roberson 	for (i = 0; i < KMEM_ZSIZE; i++) {
7245e914b96SJeff Roberson 		size = i << KMEM_ZSHIFT;
7255e914b96SJeff Roberson 		rsize = kmemzones[kmemsize[i]].kz_size;
7265e914b96SJeff Roberson 		count = (long long unsigned)krequests[i];
7275e914b96SJeff Roberson 
7285e914b96SJeff Roberson 		len = snprintf(p, bufsize, "%6d%28llu%11d\n",
7295e914b96SJeff Roberson 		    size, (unsigned long long)count, rsize);
7305e914b96SJeff Roberson 		bufsize -= len;
7315e914b96SJeff Roberson 		p += len;
7325e914b96SJeff Roberson 
7335e914b96SJeff Roberson 		if ((rsize * count) > (size * count))
7345e914b96SJeff Roberson 			waste += (rsize * count) - (size * count);
7355e914b96SJeff Roberson 		mem += (rsize * count);
7365e914b96SJeff Roberson 	}
7375e914b96SJeff Roberson 
7385e914b96SJeff Roberson 	len = snprintf(p, bufsize,
7395e914b96SJeff Roberson 	    "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n",
7405e914b96SJeff Roberson 	    (unsigned long long)mem, (unsigned long long)waste);
7415e914b96SJeff Roberson 	p += len;
7425e914b96SJeff Roberson 
7435e914b96SJeff Roberson 	error = SYSCTL_OUT(req, buf, p - buf);
7445e914b96SJeff Roberson 
7455e914b96SJeff Roberson 	free(buf, M_TEMP);
7465e914b96SJeff Roberson 	return (error);
7475e914b96SJeff Roberson }
7485e914b96SJeff Roberson 
7495e914b96SJeff Roberson SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD,
7505e914b96SJeff Roberson     NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling");
7515e914b96SJeff Roberson #endif /* MALLOC_PROFILE */
752