xref: /freebsd/sys/kern/kern_malloc.c (revision 46d70077be51eeffe9ec24ce74f81ecb4cea5fea)
19454b2d8SWarner Losh /*-
251369649SPedro F. Giffuni  * SPDX-License-Identifier: BSD-3-Clause
351369649SPedro F. Giffuni  *
4df8bae1dSRodney W. Grimes  * Copyright (c) 1987, 1991, 1993
563a7e0a3SRobert Watson  *	The Regents of the University of California.
6bb1c7df8SRobert Watson  * Copyright (c) 2005-2009 Robert N. M. Watson
7fd91e076SKristof Provost  * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net> (mallocarray)
863a7e0a3SRobert Watson  * All rights reserved.
9df8bae1dSRodney W. Grimes  *
10df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
11df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
12df8bae1dSRodney W. Grimes  * are met:
13df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
14df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
15df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
16df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
17df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
1869a28758SEd Maste  * 3. Neither the name of the University nor the names of its contributors
19df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
20df8bae1dSRodney W. Grimes  *    without specific prior written permission.
21df8bae1dSRodney W. Grimes  *
22df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
33df8bae1dSRodney W. Grimes  *
34df8bae1dSRodney W. Grimes  *	@(#)kern_malloc.c	8.3 (Berkeley) 1/4/94
35df8bae1dSRodney W. Grimes  */
36df8bae1dSRodney W. Grimes 
370ce3f16dSRobert Watson /*
380ce3f16dSRobert Watson  * Kernel malloc(9) implementation -- general purpose kernel memory allocator
390ce3f16dSRobert Watson  * based on memory types.  Back end is implemented using the UMA(9) zone
400ce3f16dSRobert Watson  * allocator.  A set of fixed-size buckets are used for smaller allocations,
410ce3f16dSRobert Watson  * and a special UMA allocation interface is used for larger allocations.
420ce3f16dSRobert Watson  * Callers declare memory types, and statistics are maintained independently
430ce3f16dSRobert Watson  * for each memory type.  Statistics are maintained per-CPU for performance
440ce3f16dSRobert Watson  * reasons.  See malloc(9) and comments in malloc.h for a detailed
450ce3f16dSRobert Watson  * description.
460ce3f16dSRobert Watson  */
470ce3f16dSRobert Watson 
48677b542eSDavid E. O'Brien #include <sys/cdefs.h>
49677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
50677b542eSDavid E. O'Brien 
51909ed16cSRobert Watson #include "opt_ddb.h"
528a58a9f6SJohn Dyson #include "opt_vm.h"
538a58a9f6SJohn Dyson 
54df8bae1dSRodney W. Grimes #include <sys/param.h>
5526f9a767SRodney W. Grimes #include <sys/systm.h>
562d50560aSMarcel Moolenaar #include <sys/kdb.h>
57df8bae1dSRodney W. Grimes #include <sys/kernel.h>
58fb919e4dSMark Murray #include <sys/lock.h>
59df8bae1dSRodney W. Grimes #include <sys/malloc.h>
60eec258d2SJohn Baldwin #include <sys/mutex.h>
61efeaf95aSDavid Greenman #include <sys/vmmeter.h>
62a448b62aSJake Burkholder #include <sys/proc.h>
6363a7e0a3SRobert Watson #include <sys/sbuf.h>
649afff6b1SMateusz Guzik #include <sys/smp.h>
656f267175SJeff Roberson #include <sys/sysctl.h>
661fb14a47SPoul-Henning Kamp #include <sys/time.h>
675df87b21SJeff Roberson #include <sys/vmem.h>
689a02e8c6SJason Evans 
69df8bae1dSRodney W. Grimes #include <vm/vm.h>
7099571dc3SJeff Roberson #include <vm/pmap.h>
719978bd99SMark Johnston #include <vm/vm_domainset.h>
725df87b21SJeff Roberson #include <vm/vm_pageout.h>
73efeaf95aSDavid Greenman #include <vm/vm_param.h>
74df8bae1dSRodney W. Grimes #include <vm/vm_kern.h>
75efeaf95aSDavid Greenman #include <vm/vm_extern.h>
763075778bSJohn Dyson #include <vm/vm_map.h>
7799571dc3SJeff Roberson #include <vm/vm_page.h>
788355f576SJeff Roberson #include <vm/uma.h>
798355f576SJeff Roberson #include <vm/uma_int.h>
808efc4effSJeff Roberson #include <vm/uma_dbg.h>
81df8bae1dSRodney W. Grimes 
82e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD
83e4eb384bSBosko Milekic #include <vm/memguard.h>
84e4eb384bSBosko Milekic #endif
85847a2a17SPawel Jakub Dawidek #ifdef DEBUG_REDZONE
86847a2a17SPawel Jakub Dawidek #include <vm/redzone.h>
87847a2a17SPawel Jakub Dawidek #endif
88e4eb384bSBosko Milekic 
89984982d6SPoul-Henning Kamp #if defined(INVARIANTS) && defined(__i386__)
90984982d6SPoul-Henning Kamp #include <machine/cpu.h>
91984982d6SPoul-Henning Kamp #endif
92984982d6SPoul-Henning Kamp 
93909ed16cSRobert Watson #include <ddb/ddb.h>
94909ed16cSRobert Watson 
9591dd776cSJohn Birrell #ifdef KDTRACE_HOOKS
9691dd776cSJohn Birrell #include <sys/dtrace_bsd.h>
9791dd776cSJohn Birrell 
987cd79421SMateusz Guzik bool	__read_frequently			dtrace_malloc_enabled;
997cd79421SMateusz Guzik dtrace_malloc_probe_func_t __read_mostly	dtrace_malloc_probe;
10091dd776cSJohn Birrell #endif
10191dd776cSJohn Birrell 
102ab3185d1SJeff Roberson #if defined(INVARIANTS) || defined(MALLOC_MAKE_FAILURES) ||		\
103ab3185d1SJeff Roberson     defined(DEBUG_MEMGUARD) || defined(DEBUG_REDZONE)
104ab3185d1SJeff Roberson #define	MALLOC_DEBUG	1
105ab3185d1SJeff Roberson #endif
106ab3185d1SJeff Roberson 
10744a8ff31SArchie Cobbs /*
10844a8ff31SArchie Cobbs  * When realloc() is called, if the new size is sufficiently smaller than
10944a8ff31SArchie Cobbs  * the old size, realloc() will allocate a new, smaller block to avoid
11044a8ff31SArchie Cobbs  * wasting memory. 'Sufficiently smaller' is defined as: newsize <=
11144a8ff31SArchie Cobbs  * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'.
11244a8ff31SArchie Cobbs  */
11344a8ff31SArchie Cobbs #ifndef REALLOC_FRACTION
11444a8ff31SArchie Cobbs #define	REALLOC_FRACTION	1	/* new block if <= half the size */
11544a8ff31SArchie Cobbs #endif
11644a8ff31SArchie Cobbs 
1170ce3f16dSRobert Watson /*
1180ce3f16dSRobert Watson  * Centrally define some common malloc types.
1190ce3f16dSRobert Watson  */
1203b6fb885SPoul-Henning Kamp MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
1219ef246c6SBruce Evans MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
1229ef246c6SBruce Evans MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
1239ef246c6SBruce Evans 
124db669378SPeter Wemm static struct malloc_type *kmemstatistics;
125cd814b26SRobert Watson static int kmemcount;
1261f6889a1SMatthew Dillon 
1278355f576SJeff Roberson #define KMEM_ZSHIFT	4
1288355f576SJeff Roberson #define KMEM_ZBASE	16
1298355f576SJeff Roberson #define KMEM_ZMASK	(KMEM_ZBASE - 1)
1308355f576SJeff Roberson 
131bda06553SXin LI #define KMEM_ZMAX	65536
1328355f576SJeff Roberson #define KMEM_ZSIZE	(KMEM_ZMAX >> KMEM_ZSHIFT)
13360ae52f7SEd Schouten static uint8_t kmemsize[KMEM_ZSIZE + 1];
1346f267175SJeff Roberson 
135d7854da1SMatthew D Fleming #ifndef MALLOC_DEBUG_MAXZONES
136d7854da1SMatthew D Fleming #define	MALLOC_DEBUG_MAXZONES	1
137d7854da1SMatthew D Fleming #endif
138d7854da1SMatthew D Fleming static int numzones = MALLOC_DEBUG_MAXZONES;
139d7854da1SMatthew D Fleming 
1400ce3f16dSRobert Watson /*
1410ce3f16dSRobert Watson  * Small malloc(9) memory allocations are allocated from a set of UMA buckets
1420ce3f16dSRobert Watson  * of various sizes.
1430ce3f16dSRobert Watson  *
1440ce3f16dSRobert Watson  * XXX: The comment here used to read "These won't be powers of two for
1450ce3f16dSRobert Watson  * long."  It's possible that a significant amount of wasted memory could be
1460ce3f16dSRobert Watson  * recovered by tuning the sizes of these buckets.
1470ce3f16dSRobert Watson  */
1488355f576SJeff Roberson struct {
1496f267175SJeff Roberson 	int kz_size;
1506f267175SJeff Roberson 	char *kz_name;
151d7854da1SMatthew D Fleming 	uma_zone_t kz_zone[MALLOC_DEBUG_MAXZONES];
1526f267175SJeff Roberson } kmemzones[] = {
153d7854da1SMatthew D Fleming 	{16, "16", },
154d7854da1SMatthew D Fleming 	{32, "32", },
155d7854da1SMatthew D Fleming 	{64, "64", },
156d7854da1SMatthew D Fleming 	{128, "128", },
157d7854da1SMatthew D Fleming 	{256, "256", },
158d7854da1SMatthew D Fleming 	{512, "512", },
159d7854da1SMatthew D Fleming 	{1024, "1024", },
160d7854da1SMatthew D Fleming 	{2048, "2048", },
161d7854da1SMatthew D Fleming 	{4096, "4096", },
162d7854da1SMatthew D Fleming 	{8192, "8192", },
163d7854da1SMatthew D Fleming 	{16384, "16384", },
164d7854da1SMatthew D Fleming 	{32768, "32768", },
165d7854da1SMatthew D Fleming 	{65536, "65536", },
1668355f576SJeff Roberson 	{0, NULL},
1678355f576SJeff Roberson };
1688355f576SJeff Roberson 
1690ce3f16dSRobert Watson /*
1700ce3f16dSRobert Watson  * Zone to allocate malloc type descriptions from.  For ABI reasons, memory
1710ce3f16dSRobert Watson  * types are described by a data structure passed by the declaring code, but
1720ce3f16dSRobert Watson  * the malloc(9) implementation has its own data structure describing the
1730ce3f16dSRobert Watson  * type and statistics.  This permits the malloc(9)-internal data structures
1740ce3f16dSRobert Watson  * to be modified without breaking binary-compiled kernel modules that
1750ce3f16dSRobert Watson  * declare malloc types.
1760ce3f16dSRobert Watson  */
17763a7e0a3SRobert Watson static uma_zone_t mt_zone;
1789afff6b1SMateusz Guzik static uma_zone_t mt_stats_zone;
17963a7e0a3SRobert Watson 
180b89eaf4eSAlan Cox u_long vm_kmem_size;
181d801e824SAndriy Gapon SYSCTL_ULONG(_vm, OID_AUTO, kmem_size, CTLFLAG_RDTUN, &vm_kmem_size, 0,
18284344f9fSDag-Erling Smørgrav     "Size of kernel memory");
1835a34a9f0SJeff Roberson 
1847001d850SXin LI static u_long kmem_zmax = KMEM_ZMAX;
1857001d850SXin LI SYSCTL_ULONG(_vm, OID_AUTO, kmem_zmax, CTLFLAG_RDTUN, &kmem_zmax, 0,
1867001d850SXin LI     "Maximum allocation size that malloc(9) would use UMA as backend");
1877001d850SXin LI 
188b89eaf4eSAlan Cox static u_long vm_kmem_size_min;
189d801e824SAndriy Gapon SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_min, CTLFLAG_RDTUN, &vm_kmem_size_min, 0,
1900e5179e4SStephane E. Potvin     "Minimum size of kernel memory");
1910e5179e4SStephane E. Potvin 
192b89eaf4eSAlan Cox static u_long vm_kmem_size_max;
193d801e824SAndriy Gapon SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RDTUN, &vm_kmem_size_max, 0,
194479439b4SDag-Erling Smørgrav     "Maximum size of kernel memory");
195479439b4SDag-Erling Smørgrav 
1964813ad54SHans Petter Selasky static u_int vm_kmem_size_scale;
197d801e824SAndriy Gapon SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale, 0,
198479439b4SDag-Erling Smørgrav     "Scale factor for kernel memory size");
199479439b4SDag-Erling Smørgrav 
2007814c80aSAndriy Gapon static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS);
2017814c80aSAndriy Gapon SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size,
2027814c80aSAndriy Gapon     CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
2035df87b21SJeff Roberson     sysctl_kmem_map_size, "LU", "Current kmem allocation size");
2047814c80aSAndriy Gapon 
20595bb9d38SAndriy Gapon static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS);
20695bb9d38SAndriy Gapon SYSCTL_PROC(_vm, OID_AUTO, kmem_map_free,
20795bb9d38SAndriy Gapon     CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
2085df87b21SJeff Roberson     sysctl_kmem_map_free, "LU", "Free space in kmem");
20995bb9d38SAndriy Gapon 
2105a34a9f0SJeff Roberson /*
21199571dc3SJeff Roberson  * The malloc_mtx protects the kmemstatistics linked list.
2125a34a9f0SJeff Roberson  */
2135a34a9f0SJeff Roberson struct mtx malloc_mtx;
21469ef67f9SJason Evans 
2155e914b96SJeff Roberson #ifdef MALLOC_PROFILE
2165e914b96SJeff Roberson uint64_t krequests[KMEM_ZSIZE + 1];
2176f267175SJeff Roberson 
2185e914b96SJeff Roberson static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS);
2195e914b96SJeff Roberson #endif
2205e914b96SJeff Roberson 
221cd814b26SRobert Watson static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS);
222df8bae1dSRodney W. Grimes 
2230ce3f16dSRobert Watson /*
2240ce3f16dSRobert Watson  * time_uptime of the last malloc(9) failure (induced or real).
2250ce3f16dSRobert Watson  */
2261fb14a47SPoul-Henning Kamp static time_t t_malloc_fail;
2271fb14a47SPoul-Henning Kamp 
228d7854da1SMatthew D Fleming #if defined(MALLOC_MAKE_FAILURES) || (MALLOC_DEBUG_MAXZONES > 1)
2296472ac3dSEd Schouten static SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD, 0,
230d7854da1SMatthew D Fleming     "Kernel malloc debugging options");
231d7854da1SMatthew D Fleming #endif
232d7854da1SMatthew D Fleming 
233eae870cdSRobert Watson /*
2340ce3f16dSRobert Watson  * malloc(9) fault injection -- cause malloc failures every (n) mallocs when
2350ce3f16dSRobert Watson  * the caller specifies M_NOWAIT.  If set to 0, no failures are caused.
236eae870cdSRobert Watson  */
2370ce3f16dSRobert Watson #ifdef MALLOC_MAKE_FAILURES
238eae870cdSRobert Watson static int malloc_failure_rate;
239eae870cdSRobert Watson static int malloc_nowait_count;
240eae870cdSRobert Watson static int malloc_failure_count;
241af3b2549SHans Petter Selasky SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RWTUN,
242eae870cdSRobert Watson     &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail");
243eae870cdSRobert Watson SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD,
244eae870cdSRobert Watson     &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures");
245eae870cdSRobert Watson #endif
246eae870cdSRobert Watson 
2477814c80aSAndriy Gapon static int
2487814c80aSAndriy Gapon sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS)
2497814c80aSAndriy Gapon {
2507814c80aSAndriy Gapon 	u_long size;
2517814c80aSAndriy Gapon 
2522e47807cSJeff Roberson 	size = uma_size();
2537814c80aSAndriy Gapon 	return (sysctl_handle_long(oidp, &size, 0, req));
2547814c80aSAndriy Gapon }
2557814c80aSAndriy Gapon 
25695bb9d38SAndriy Gapon static int
25795bb9d38SAndriy Gapon sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS)
25895bb9d38SAndriy Gapon {
2592e47807cSJeff Roberson 	u_long size, limit;
26095bb9d38SAndriy Gapon 
2612e47807cSJeff Roberson 	/* The sysctl is unsigned, implement as a saturation value. */
2622e47807cSJeff Roberson 	size = uma_size();
2632e47807cSJeff Roberson 	limit = uma_limit();
2642e47807cSJeff Roberson 	if (size > limit)
2652e47807cSJeff Roberson 		size = 0;
2662e47807cSJeff Roberson 	else
2672e47807cSJeff Roberson 		size = limit - size;
26895bb9d38SAndriy Gapon 	return (sysctl_handle_long(oidp, &size, 0, req));
26995bb9d38SAndriy Gapon }
27095bb9d38SAndriy Gapon 
271d7854da1SMatthew D Fleming /*
272d7854da1SMatthew D Fleming  * malloc(9) uma zone separation -- sub-page buffer overruns in one
273d7854da1SMatthew D Fleming  * malloc type will affect only a subset of other malloc types.
274d7854da1SMatthew D Fleming  */
275d7854da1SMatthew D Fleming #if MALLOC_DEBUG_MAXZONES > 1
276d7854da1SMatthew D Fleming static void
277d7854da1SMatthew D Fleming tunable_set_numzones(void)
278d7854da1SMatthew D Fleming {
279d7854da1SMatthew D Fleming 
280d7854da1SMatthew D Fleming 	TUNABLE_INT_FETCH("debug.malloc.numzones",
281d7854da1SMatthew D Fleming 	    &numzones);
282d7854da1SMatthew D Fleming 
283d7854da1SMatthew D Fleming 	/* Sanity check the number of malloc uma zones. */
284d7854da1SMatthew D Fleming 	if (numzones <= 0)
285d7854da1SMatthew D Fleming 		numzones = 1;
286d7854da1SMatthew D Fleming 	if (numzones > MALLOC_DEBUG_MAXZONES)
287d7854da1SMatthew D Fleming 		numzones = MALLOC_DEBUG_MAXZONES;
288d7854da1SMatthew D Fleming }
289d7854da1SMatthew D Fleming SYSINIT(numzones, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_set_numzones, NULL);
290af3b2549SHans Petter Selasky SYSCTL_INT(_debug_malloc, OID_AUTO, numzones, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
291d7854da1SMatthew D Fleming     &numzones, 0, "Number of malloc uma subzones");
292d7854da1SMatthew D Fleming 
293d7854da1SMatthew D Fleming /*
294d7854da1SMatthew D Fleming  * Any number that changes regularly is an okay choice for the
295d7854da1SMatthew D Fleming  * offset.  Build numbers are pretty good of you have them.
296d7854da1SMatthew D Fleming  */
297d7854da1SMatthew D Fleming static u_int zone_offset = __FreeBSD_version;
298d7854da1SMatthew D Fleming TUNABLE_INT("debug.malloc.zone_offset", &zone_offset);
299d7854da1SMatthew D Fleming SYSCTL_UINT(_debug_malloc, OID_AUTO, zone_offset, CTLFLAG_RDTUN,
300d7854da1SMatthew D Fleming     &zone_offset, 0, "Separate malloc types by examining the "
301d7854da1SMatthew D Fleming     "Nth character in the malloc type short description.");
302d7854da1SMatthew D Fleming 
303c9e05ccdSMateusz Guzik static void
304c9e05ccdSMateusz Guzik mtp_set_subzone(struct malloc_type *mtp)
305d7854da1SMatthew D Fleming {
306c9e05ccdSMateusz Guzik 	struct malloc_type_internal *mtip;
307c9e05ccdSMateusz Guzik 	const char *desc;
308d7854da1SMatthew D Fleming 	size_t len;
309d7854da1SMatthew D Fleming 	u_int val;
310d7854da1SMatthew D Fleming 
311c9e05ccdSMateusz Guzik 	mtip = mtp->ks_handle;
312c9e05ccdSMateusz Guzik 	desc = mtp->ks_shortdesc;
313d7854da1SMatthew D Fleming 	if (desc == NULL || (len = strlen(desc)) == 0)
314c9e05ccdSMateusz Guzik 		val = 0;
315c9e05ccdSMateusz Guzik 	else
316d7854da1SMatthew D Fleming 		val = desc[zone_offset % len];
317c9e05ccdSMateusz Guzik 	mtip->mti_zone = (val % numzones);
318c9e05ccdSMateusz Guzik }
319c9e05ccdSMateusz Guzik 
320c9e05ccdSMateusz Guzik static inline u_int
321c9e05ccdSMateusz Guzik mtp_get_subzone(struct malloc_type *mtp)
322c9e05ccdSMateusz Guzik {
323c9e05ccdSMateusz Guzik 	struct malloc_type_internal *mtip;
324c9e05ccdSMateusz Guzik 
325c9e05ccdSMateusz Guzik 	mtip = mtp->ks_handle;
326c9e05ccdSMateusz Guzik 
327c9e05ccdSMateusz Guzik 	KASSERT(mtip->mti_zone < numzones,
328c9e05ccdSMateusz Guzik 	    ("mti_zone %u out of range %d",
329c9e05ccdSMateusz Guzik 	    mtip->mti_zone, numzones));
330c9e05ccdSMateusz Guzik 	return (mtip->mti_zone);
331d7854da1SMatthew D Fleming }
332d7854da1SMatthew D Fleming #elif MALLOC_DEBUG_MAXZONES == 0
333d7854da1SMatthew D Fleming #error "MALLOC_DEBUG_MAXZONES must be positive."
334d7854da1SMatthew D Fleming #else
335c9e05ccdSMateusz Guzik static void
336c9e05ccdSMateusz Guzik mtp_set_subzone(struct malloc_type *mtp)
337c9e05ccdSMateusz Guzik {
338c9e05ccdSMateusz Guzik 	struct malloc_type_internal *mtip;
339c9e05ccdSMateusz Guzik 
340c9e05ccdSMateusz Guzik 	mtip = mtp->ks_handle;
341c9e05ccdSMateusz Guzik 	mtip->mti_zone = 0;
342c9e05ccdSMateusz Guzik }
343c9e05ccdSMateusz Guzik 
344d7854da1SMatthew D Fleming static inline u_int
345c9e05ccdSMateusz Guzik mtp_get_subzone(struct malloc_type *mtp)
346d7854da1SMatthew D Fleming {
347d7854da1SMatthew D Fleming 
348d7854da1SMatthew D Fleming 	return (0);
349d7854da1SMatthew D Fleming }
350d7854da1SMatthew D Fleming #endif /* MALLOC_DEBUG_MAXZONES > 1 */
351d7854da1SMatthew D Fleming 
3521fb14a47SPoul-Henning Kamp int
3531fb14a47SPoul-Henning Kamp malloc_last_fail(void)
3541fb14a47SPoul-Henning Kamp {
3551fb14a47SPoul-Henning Kamp 
3561fb14a47SPoul-Henning Kamp 	return (time_uptime - t_malloc_fail);
3571fb14a47SPoul-Henning Kamp }
3581fb14a47SPoul-Henning Kamp 
359df8bae1dSRodney W. Grimes /*
3600ce3f16dSRobert Watson  * An allocation has succeeded -- update malloc type statistics for the
3610ce3f16dSRobert Watson  * amount of bucket size.  Occurs within a critical section so that the
3620ce3f16dSRobert Watson  * thread isn't preempted and doesn't migrate while updating per-PCU
3630ce3f16dSRobert Watson  * statistics.
3644362fadaSBrian Feldman  */
3654362fadaSBrian Feldman static void
36663a7e0a3SRobert Watson malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size,
3674362fadaSBrian Feldman     int zindx)
3684362fadaSBrian Feldman {
36963a7e0a3SRobert Watson 	struct malloc_type_internal *mtip;
37063a7e0a3SRobert Watson 	struct malloc_type_stats *mtsp;
37163a7e0a3SRobert Watson 
37263a7e0a3SRobert Watson 	critical_enter();
37363a7e0a3SRobert Watson 	mtip = mtp->ks_handle;
3749afff6b1SMateusz Guzik 	mtsp = zpcpu_get(mtip->mti_stats);
37573864adbSPawel Jakub Dawidek 	if (size > 0) {
37663a7e0a3SRobert Watson 		mtsp->mts_memalloced += size;
37763a7e0a3SRobert Watson 		mtsp->mts_numallocs++;
37873864adbSPawel Jakub Dawidek 	}
3794362fadaSBrian Feldman 	if (zindx != -1)
38063a7e0a3SRobert Watson 		mtsp->mts_size |= 1 << zindx;
38191dd776cSJohn Birrell 
38291dd776cSJohn Birrell #ifdef KDTRACE_HOOKS
3837cd79421SMateusz Guzik 	if (__predict_false(dtrace_malloc_enabled)) {
38491dd776cSJohn Birrell 		uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_MALLOC];
38591dd776cSJohn Birrell 		if (probe_id != 0)
38691dd776cSJohn Birrell 			(dtrace_malloc_probe)(probe_id,
38791dd776cSJohn Birrell 			    (uintptr_t) mtp, (uintptr_t) mtip,
38891dd776cSJohn Birrell 			    (uintptr_t) mtsp, size, zindx);
38991dd776cSJohn Birrell 	}
39091dd776cSJohn Birrell #endif
39191dd776cSJohn Birrell 
39263a7e0a3SRobert Watson 	critical_exit();
3934362fadaSBrian Feldman }
3944362fadaSBrian Feldman 
3954362fadaSBrian Feldman void
39663a7e0a3SRobert Watson malloc_type_allocated(struct malloc_type *mtp, unsigned long size)
3974362fadaSBrian Feldman {
39863a7e0a3SRobert Watson 
39973864adbSPawel Jakub Dawidek 	if (size > 0)
40063a7e0a3SRobert Watson 		malloc_type_zone_allocated(mtp, size, -1);
4014362fadaSBrian Feldman }
4024362fadaSBrian Feldman 
4034362fadaSBrian Feldman /*
4043805385eSRobert Watson  * A free operation has occurred -- update malloc type statistics for the
4050ce3f16dSRobert Watson  * amount of the bucket size.  Occurs within a critical section so that the
4060ce3f16dSRobert Watson  * thread isn't preempted and doesn't migrate while updating per-CPU
4070ce3f16dSRobert Watson  * statistics.
4084362fadaSBrian Feldman  */
4094362fadaSBrian Feldman void
41063a7e0a3SRobert Watson malloc_type_freed(struct malloc_type *mtp, unsigned long size)
4114362fadaSBrian Feldman {
41263a7e0a3SRobert Watson 	struct malloc_type_internal *mtip;
41363a7e0a3SRobert Watson 	struct malloc_type_stats *mtsp;
41463a7e0a3SRobert Watson 
41563a7e0a3SRobert Watson 	critical_enter();
41663a7e0a3SRobert Watson 	mtip = mtp->ks_handle;
4179afff6b1SMateusz Guzik 	mtsp = zpcpu_get(mtip->mti_stats);
41863a7e0a3SRobert Watson 	mtsp->mts_memfreed += size;
41963a7e0a3SRobert Watson 	mtsp->mts_numfrees++;
42091dd776cSJohn Birrell 
42191dd776cSJohn Birrell #ifdef KDTRACE_HOOKS
4227cd79421SMateusz Guzik 	if (__predict_false(dtrace_malloc_enabled)) {
42391dd776cSJohn Birrell 		uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_FREE];
42491dd776cSJohn Birrell 		if (probe_id != 0)
42591dd776cSJohn Birrell 			(dtrace_malloc_probe)(probe_id,
42691dd776cSJohn Birrell 			    (uintptr_t) mtp, (uintptr_t) mtip,
42791dd776cSJohn Birrell 			    (uintptr_t) mtsp, size, 0);
42891dd776cSJohn Birrell 	}
42991dd776cSJohn Birrell #endif
43091dd776cSJohn Birrell 
43163a7e0a3SRobert Watson 	critical_exit();
4324362fadaSBrian Feldman }
4334362fadaSBrian Feldman 
4344362fadaSBrian Feldman /*
435f346986bSAlan Cox  *	contigmalloc:
436f346986bSAlan Cox  *
437f346986bSAlan Cox  *	Allocate a block of physically contiguous memory.
438f346986bSAlan Cox  *
439f346986bSAlan Cox  *	If M_NOWAIT is set, this routine will not block and return NULL if
440f346986bSAlan Cox  *	the allocation fails.
441f346986bSAlan Cox  */
442f346986bSAlan Cox void *
443f346986bSAlan Cox contigmalloc(unsigned long size, struct malloc_type *type, int flags,
444f346986bSAlan Cox     vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
445831ce4cbSJohn Baldwin     vm_paddr_t boundary)
446f346986bSAlan Cox {
447f346986bSAlan Cox 	void *ret;
448f346986bSAlan Cox 
44944d0efb2SAlan Cox 	ret = (void *)kmem_alloc_contig(size, flags, low, high, alignment,
45044d0efb2SAlan Cox 	    boundary, VM_MEMATTR_DEFAULT);
451f346986bSAlan Cox 	if (ret != NULL)
452f346986bSAlan Cox 		malloc_type_allocated(type, round_page(size));
453f346986bSAlan Cox 	return (ret);
454f346986bSAlan Cox }
455f346986bSAlan Cox 
456ab3185d1SJeff Roberson void *
4579978bd99SMark Johnston contigmalloc_domainset(unsigned long size, struct malloc_type *type,
4589978bd99SMark Johnston     struct domainset *ds, int flags, vm_paddr_t low, vm_paddr_t high,
459ab3185d1SJeff Roberson     unsigned long alignment, vm_paddr_t boundary)
460ab3185d1SJeff Roberson {
461ab3185d1SJeff Roberson 	void *ret;
462ab3185d1SJeff Roberson 
4639978bd99SMark Johnston 	ret = (void *)kmem_alloc_contig_domainset(ds, size, flags, low, high,
464ab3185d1SJeff Roberson 	    alignment, boundary, VM_MEMATTR_DEFAULT);
465ab3185d1SJeff Roberson 	if (ret != NULL)
466ab3185d1SJeff Roberson 		malloc_type_allocated(type, round_page(size));
467ab3185d1SJeff Roberson 	return (ret);
468ab3185d1SJeff Roberson }
469ab3185d1SJeff Roberson 
470f346986bSAlan Cox /*
471f346986bSAlan Cox  *	contigfree:
472f346986bSAlan Cox  *
473f346986bSAlan Cox  *	Free a block of memory allocated by contigmalloc.
474f346986bSAlan Cox  *
475f346986bSAlan Cox  *	This routine may not block.
476f346986bSAlan Cox  */
477f346986bSAlan Cox void
478f346986bSAlan Cox contigfree(void *addr, unsigned long size, struct malloc_type *type)
479f346986bSAlan Cox {
480f346986bSAlan Cox 
48149bfa624SAlan Cox 	kmem_free((vm_offset_t)addr, size);
482f346986bSAlan Cox 	malloc_type_freed(type, round_page(size));
483f346986bSAlan Cox }
484f346986bSAlan Cox 
485ab3185d1SJeff Roberson #ifdef MALLOC_DEBUG
486ab3185d1SJeff Roberson static int
4875a70796aSLi-Wen Hsu malloc_dbg(caddr_t *vap, size_t *sizep, struct malloc_type *mtp,
488ab3185d1SJeff Roberson     int flags)
489df8bae1dSRodney W. Grimes {
490194a0abfSPoul-Henning Kamp #ifdef INVARIANTS
491ab3185d1SJeff Roberson 	int indx;
492ab3185d1SJeff Roberson 
493bb1c7df8SRobert Watson 	KASSERT(mtp->ks_magic == M_MAGIC, ("malloc: bad malloc type magic"));
494d3c11994SPoul-Henning Kamp 	/*
49523198357SRuslan Ermilov 	 * Check that exactly one of M_WAITOK or M_NOWAIT is specified.
496d3c11994SPoul-Henning Kamp 	 */
49723198357SRuslan Ermilov 	indx = flags & (M_WAITOK | M_NOWAIT);
498d3c11994SPoul-Henning Kamp 	if (indx != M_NOWAIT && indx != M_WAITOK) {
499d3c11994SPoul-Henning Kamp 		static	struct timeval lasterr;
500d3c11994SPoul-Henning Kamp 		static	int curerr, once;
501d3c11994SPoul-Henning Kamp 		if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) {
502d3c11994SPoul-Henning Kamp 			printf("Bad malloc flags: %x\n", indx);
5032d50560aSMarcel Moolenaar 			kdb_backtrace();
504d3c11994SPoul-Henning Kamp 			flags |= M_WAITOK;
505d3c11994SPoul-Henning Kamp 			once++;
506d3c11994SPoul-Henning Kamp 		}
507d3c11994SPoul-Henning Kamp 	}
508194a0abfSPoul-Henning Kamp #endif
509eae870cdSRobert Watson #ifdef MALLOC_MAKE_FAILURES
510eae870cdSRobert Watson 	if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) {
511eae870cdSRobert Watson 		atomic_add_int(&malloc_nowait_count, 1);
512eae870cdSRobert Watson 		if ((malloc_nowait_count % malloc_failure_rate) == 0) {
513eae870cdSRobert Watson 			atomic_add_int(&malloc_failure_count, 1);
5143f6ee876SPoul-Henning Kamp 			t_malloc_fail = time_uptime;
515ab3185d1SJeff Roberson 			*vap = NULL;
516ab3185d1SJeff Roberson 			return (EJUSTRETURN);
517eae870cdSRobert Watson 		}
518eae870cdSRobert Watson 	}
519eae870cdSRobert Watson #endif
52006bf2a6aSMatt Macy 	if (flags & M_WAITOK) {
521b40ce416SJulian Elischer 		KASSERT(curthread->td_intr_nesting_level == 0,
522a163d034SWarner Losh 		   ("malloc(M_WAITOK) in interrupt context"));
52306bf2a6aSMatt Macy 		KASSERT(curthread->td_epochnest == 0,
52406bf2a6aSMatt Macy 			("malloc(M_WAITOK) in epoch context"));
52506bf2a6aSMatt Macy 	}
526d9e2e68dSMark Johnston 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
5271067a2baSJonathan T. Looney 	    ("malloc: called with spinlock or critical section held"));
5281067a2baSJonathan T. Looney 
529e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD
530ab3185d1SJeff Roberson 	if (memguard_cmp_mtp(mtp, *sizep)) {
531ab3185d1SJeff Roberson 		*vap = memguard_alloc(*sizep, flags);
532ab3185d1SJeff Roberson 		if (*vap != NULL)
533ab3185d1SJeff Roberson 			return (EJUSTRETURN);
534e3813573SMatthew D Fleming 		/* This is unfortunate but should not be fatal. */
535e3813573SMatthew D Fleming 	}
536e4eb384bSBosko Milekic #endif
537e4eb384bSBosko Milekic 
538847a2a17SPawel Jakub Dawidek #ifdef DEBUG_REDZONE
539ab3185d1SJeff Roberson 	*sizep = redzone_size_ntor(*sizep);
540ab3185d1SJeff Roberson #endif
541ab3185d1SJeff Roberson 
542ab3185d1SJeff Roberson 	return (0);
543ab3185d1SJeff Roberson }
544ab3185d1SJeff Roberson #endif
545ab3185d1SJeff Roberson 
546ab3185d1SJeff Roberson /*
547ab3185d1SJeff Roberson  *	malloc:
548ab3185d1SJeff Roberson  *
549ab3185d1SJeff Roberson  *	Allocate a block of memory.
550ab3185d1SJeff Roberson  *
551ab3185d1SJeff Roberson  *	If M_NOWAIT is set, this routine will not block and return NULL if
552ab3185d1SJeff Roberson  *	the allocation fails.
553ab3185d1SJeff Roberson  */
554ab3185d1SJeff Roberson void *
55534c538c3SMateusz Guzik (malloc)(size_t size, struct malloc_type *mtp, int flags)
556ab3185d1SJeff Roberson {
557ab3185d1SJeff Roberson 	int indx;
558ab3185d1SJeff Roberson 	caddr_t va;
559ab3185d1SJeff Roberson 	uma_zone_t zone;
560ab3185d1SJeff Roberson #if defined(DEBUG_REDZONE)
561ab3185d1SJeff Roberson 	unsigned long osize = size;
562ab3185d1SJeff Roberson #endif
563ab3185d1SJeff Roberson 
564ab3185d1SJeff Roberson #ifdef MALLOC_DEBUG
5655072a5f4SMatt Macy 	va = NULL;
566ab3185d1SJeff Roberson 	if (malloc_dbg(&va, &size, mtp, flags) != 0)
567ab3185d1SJeff Roberson 		return (va);
568847a2a17SPawel Jakub Dawidek #endif
569847a2a17SPawel Jakub Dawidek 
5700766f278SJonathan T. Looney 	if (size <= kmem_zmax && (flags & M_EXEC) == 0) {
5716f267175SJeff Roberson 		if (size & KMEM_ZMASK)
5726f267175SJeff Roberson 			size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
5736f267175SJeff Roberson 		indx = kmemsize[size >> KMEM_ZSHIFT];
574c9e05ccdSMateusz Guzik 		zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)];
5756f267175SJeff Roberson #ifdef MALLOC_PROFILE
5766f267175SJeff Roberson 		krequests[size >> KMEM_ZSHIFT]++;
5776f267175SJeff Roberson #endif
5788355f576SJeff Roberson 		va = uma_zalloc(zone, flags);
5794362fadaSBrian Feldman 		if (va != NULL)
580e20a199fSJeff Roberson 			size = zone->uz_size;
58163a7e0a3SRobert Watson 		malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
5828355f576SJeff Roberson 	} else {
5836f267175SJeff Roberson 		size = roundup(size, PAGE_SIZE);
5848355f576SJeff Roberson 		zone = NULL;
5858355f576SJeff Roberson 		va = uma_large_malloc(size, flags);
58663a7e0a3SRobert Watson 		malloc_type_allocated(mtp, va == NULL ? 0 : size);
587df8bae1dSRodney W. Grimes 	}
5881282e9acSPoul-Henning Kamp 	if (flags & M_WAITOK)
589a163d034SWarner Losh 		KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL"));
5901282e9acSPoul-Henning Kamp 	else if (va == NULL)
5911fb14a47SPoul-Henning Kamp 		t_malloc_fail = time_uptime;
592ab3185d1SJeff Roberson #ifdef DEBUG_REDZONE
593ab3185d1SJeff Roberson 	if (va != NULL)
594ab3185d1SJeff Roberson 		va = redzone_setup(va, osize);
5954db4f5c8SPoul-Henning Kamp #endif
596ab3185d1SJeff Roberson 	return ((void *) va);
597ab3185d1SJeff Roberson }
598ab3185d1SJeff Roberson 
5999978bd99SMark Johnston static void *
6009978bd99SMark Johnston malloc_domain(size_t size, struct malloc_type *mtp, int domain, int flags)
601ab3185d1SJeff Roberson {
602ab3185d1SJeff Roberson 	int indx;
603ab3185d1SJeff Roberson 	caddr_t va;
604ab3185d1SJeff Roberson 	uma_zone_t zone;
605ab3185d1SJeff Roberson #if defined(DEBUG_REDZONE)
606ab3185d1SJeff Roberson 	unsigned long osize = size;
607ab3185d1SJeff Roberson #endif
608ab3185d1SJeff Roberson 
609ab3185d1SJeff Roberson #ifdef MALLOC_DEBUG
6105072a5f4SMatt Macy 	va = NULL;
611ab3185d1SJeff Roberson 	if (malloc_dbg(&va, &size, mtp, flags) != 0)
612ab3185d1SJeff Roberson 		return (va);
613ab3185d1SJeff Roberson #endif
6140766f278SJonathan T. Looney 	if (size <= kmem_zmax && (flags & M_EXEC) == 0) {
615ab3185d1SJeff Roberson 		if (size & KMEM_ZMASK)
616ab3185d1SJeff Roberson 			size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
617ab3185d1SJeff Roberson 		indx = kmemsize[size >> KMEM_ZSHIFT];
618c9e05ccdSMateusz Guzik 		zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)];
619ab3185d1SJeff Roberson #ifdef MALLOC_PROFILE
620ab3185d1SJeff Roberson 		krequests[size >> KMEM_ZSHIFT]++;
621ab3185d1SJeff Roberson #endif
622ab3185d1SJeff Roberson 		va = uma_zalloc_domain(zone, NULL, domain, flags);
623ab3185d1SJeff Roberson 		if (va != NULL)
624ab3185d1SJeff Roberson 			size = zone->uz_size;
625ab3185d1SJeff Roberson 		malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
626ab3185d1SJeff Roberson 	} else {
627ab3185d1SJeff Roberson 		size = roundup(size, PAGE_SIZE);
628ab3185d1SJeff Roberson 		zone = NULL;
629ab3185d1SJeff Roberson 		va = uma_large_malloc_domain(size, domain, flags);
630ab3185d1SJeff Roberson 		malloc_type_allocated(mtp, va == NULL ? 0 : size);
631ab3185d1SJeff Roberson 	}
632ab3185d1SJeff Roberson 	if (flags & M_WAITOK)
633ab3185d1SJeff Roberson 		KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL"));
634ab3185d1SJeff Roberson 	else if (va == NULL)
635ab3185d1SJeff Roberson 		t_malloc_fail = time_uptime;
636847a2a17SPawel Jakub Dawidek #ifdef DEBUG_REDZONE
637847a2a17SPawel Jakub Dawidek 	if (va != NULL)
638847a2a17SPawel Jakub Dawidek 		va = redzone_setup(va, osize);
639847a2a17SPawel Jakub Dawidek #endif
640df8bae1dSRodney W. Grimes 	return ((void *) va);
641df8bae1dSRodney W. Grimes }
642df8bae1dSRodney W. Grimes 
643fd91e076SKristof Provost void *
6449978bd99SMark Johnston malloc_domainset(size_t size, struct malloc_type *mtp, struct domainset *ds,
6459978bd99SMark Johnston     int flags)
6469978bd99SMark Johnston {
6479978bd99SMark Johnston 	struct vm_domainset_iter di;
6489978bd99SMark Johnston 	void *ret;
6499978bd99SMark Johnston 	int domain;
6509978bd99SMark Johnston 
6519978bd99SMark Johnston 	vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
6529978bd99SMark Johnston 	do {
6539978bd99SMark Johnston 		ret = malloc_domain(size, mtp, domain, flags);
6549978bd99SMark Johnston 		if (ret != NULL)
6559978bd99SMark Johnston 			break;
6569978bd99SMark Johnston 	} while (vm_domainset_iter_policy(&di, &domain) == 0);
6579978bd99SMark Johnston 
6589978bd99SMark Johnston 	return (ret);
6599978bd99SMark Johnston }
6609978bd99SMark Johnston 
6619978bd99SMark Johnston void *
662fd91e076SKristof Provost mallocarray(size_t nmemb, size_t size, struct malloc_type *type, int flags)
663fd91e076SKristof Provost {
664fd91e076SKristof Provost 
665c02fc960SConrad Meyer 	if (WOULD_OVERFLOW(nmemb, size))
666c02fc960SConrad Meyer 		panic("mallocarray: %zu * %zu overflowed", nmemb, size);
667fd91e076SKristof Provost 
668fd91e076SKristof Provost 	return (malloc(size * nmemb, type, flags));
669fd91e076SKristof Provost }
670fd91e076SKristof Provost 
671ab3185d1SJeff Roberson #ifdef INVARIANTS
672ab3185d1SJeff Roberson static void
673ab3185d1SJeff Roberson free_save_type(void *addr, struct malloc_type *mtp, u_long size)
674ab3185d1SJeff Roberson {
675ab3185d1SJeff Roberson 	struct malloc_type **mtpp = addr;
676ab3185d1SJeff Roberson 
677ab3185d1SJeff Roberson 	/*
678ab3185d1SJeff Roberson 	 * Cache a pointer to the malloc_type that most recently freed
679ab3185d1SJeff Roberson 	 * this memory here.  This way we know who is most likely to
680ab3185d1SJeff Roberson 	 * have stepped on it later.
681ab3185d1SJeff Roberson 	 *
682ab3185d1SJeff Roberson 	 * This code assumes that size is a multiple of 8 bytes for
683ab3185d1SJeff Roberson 	 * 64 bit machines
684ab3185d1SJeff Roberson 	 */
685ab3185d1SJeff Roberson 	mtpp = (struct malloc_type **) ((unsigned long)mtpp & ~UMA_ALIGN_PTR);
686ab3185d1SJeff Roberson 	mtpp += (size - sizeof(struct malloc_type *)) /
687ab3185d1SJeff Roberson 	    sizeof(struct malloc_type *);
688ab3185d1SJeff Roberson 	*mtpp = mtp;
689ab3185d1SJeff Roberson }
690ab3185d1SJeff Roberson #endif
691ab3185d1SJeff Roberson 
692ab3185d1SJeff Roberson #ifdef MALLOC_DEBUG
693ab3185d1SJeff Roberson static int
694ab3185d1SJeff Roberson free_dbg(void **addrp, struct malloc_type *mtp)
695ab3185d1SJeff Roberson {
696ab3185d1SJeff Roberson 	void *addr;
697ab3185d1SJeff Roberson 
698ab3185d1SJeff Roberson 	addr = *addrp;
699ab3185d1SJeff Roberson 	KASSERT(mtp->ks_magic == M_MAGIC, ("free: bad malloc type magic"));
700ab3185d1SJeff Roberson 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
701ab3185d1SJeff Roberson 	    ("free: called with spinlock or critical section held"));
702ab3185d1SJeff Roberson 
703ab3185d1SJeff Roberson 	/* free(NULL, ...) does nothing */
704ab3185d1SJeff Roberson 	if (addr == NULL)
705ab3185d1SJeff Roberson 		return (EJUSTRETURN);
706ab3185d1SJeff Roberson 
707ab3185d1SJeff Roberson #ifdef DEBUG_MEMGUARD
708ab3185d1SJeff Roberson 	if (is_memguard_addr(addr)) {
709ab3185d1SJeff Roberson 		memguard_free(addr);
710ab3185d1SJeff Roberson 		return (EJUSTRETURN);
711ab3185d1SJeff Roberson 	}
712ab3185d1SJeff Roberson #endif
713ab3185d1SJeff Roberson 
714ab3185d1SJeff Roberson #ifdef DEBUG_REDZONE
715ab3185d1SJeff Roberson 	redzone_check(addr);
716ab3185d1SJeff Roberson 	*addrp = redzone_addr_ntor(addr);
717ab3185d1SJeff Roberson #endif
718ab3185d1SJeff Roberson 
719ab3185d1SJeff Roberson 	return (0);
720ab3185d1SJeff Roberson }
721ab3185d1SJeff Roberson #endif
722ab3185d1SJeff Roberson 
723fd91e076SKristof Provost /*
7241c7c3c6aSMatthew Dillon  *	free:
7251c7c3c6aSMatthew Dillon  *
726df8bae1dSRodney W. Grimes  *	Free a block of memory allocated by malloc.
7271c7c3c6aSMatthew Dillon  *
7281c7c3c6aSMatthew Dillon  *	This routine may not block.
729df8bae1dSRodney W. Grimes  */
730df8bae1dSRodney W. Grimes void
73163a7e0a3SRobert Watson free(void *addr, struct malloc_type *mtp)
732df8bae1dSRodney W. Grimes {
73399571dc3SJeff Roberson 	uma_slab_t slab;
73499571dc3SJeff Roberson 	u_long size;
735254c6cb3SPoul-Henning Kamp 
736ab3185d1SJeff Roberson #ifdef MALLOC_DEBUG
737ab3185d1SJeff Roberson 	if (free_dbg(&addr, mtp) != 0)
738ab3185d1SJeff Roberson 		return;
739ab3185d1SJeff Roberson #endif
74044a8ff31SArchie Cobbs 	/* free(NULL, ...) does nothing */
74144a8ff31SArchie Cobbs 	if (addr == NULL)
74244a8ff31SArchie Cobbs 		return;
74344a8ff31SArchie Cobbs 
74499571dc3SJeff Roberson 	slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK));
7458355f576SJeff Roberson 	if (slab == NULL)
7466f267175SJeff Roberson 		panic("free: address %p(%p) has not been allocated.\n",
74799571dc3SJeff Roberson 		    addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
74899571dc3SJeff Roberson 
7498355f576SJeff Roberson 	if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
750099a0e58SBosko Milekic 		size = slab->us_keg->uk_size;
7518f70816cSJeff Roberson #ifdef INVARIANTS
752ab3185d1SJeff Roberson 		free_save_type(addr, mtp, size);
7538f70816cSJeff Roberson #endif
754099a0e58SBosko Milekic 		uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab);
75514bf02f8SJohn Dyson 	} else {
7568355f576SJeff Roberson 		size = slab->us_size;
7578355f576SJeff Roberson 		uma_large_free(slab);
75814bf02f8SJohn Dyson 	}
75963a7e0a3SRobert Watson 	malloc_type_freed(mtp, size);
760df8bae1dSRodney W. Grimes }
761df8bae1dSRodney W. Grimes 
762ab3185d1SJeff Roberson void
763ab3185d1SJeff Roberson free_domain(void *addr, struct malloc_type *mtp)
764ab3185d1SJeff Roberson {
765ab3185d1SJeff Roberson 	uma_slab_t slab;
766ab3185d1SJeff Roberson 	u_long size;
767ab3185d1SJeff Roberson 
768ab3185d1SJeff Roberson #ifdef MALLOC_DEBUG
769ab3185d1SJeff Roberson 	if (free_dbg(&addr, mtp) != 0)
770ab3185d1SJeff Roberson 		return;
771ab3185d1SJeff Roberson #endif
772ab3185d1SJeff Roberson 
773ab3185d1SJeff Roberson 	/* free(NULL, ...) does nothing */
774ab3185d1SJeff Roberson 	if (addr == NULL)
775ab3185d1SJeff Roberson 		return;
776ab3185d1SJeff Roberson 
777ab3185d1SJeff Roberson 	slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK));
778ab3185d1SJeff Roberson 	if (slab == NULL)
779ab3185d1SJeff Roberson 		panic("free_domain: address %p(%p) has not been allocated.\n",
780ab3185d1SJeff Roberson 		    addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
781ab3185d1SJeff Roberson 
782ab3185d1SJeff Roberson 	if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
783ab3185d1SJeff Roberson 		size = slab->us_keg->uk_size;
784ab3185d1SJeff Roberson #ifdef INVARIANTS
785ab3185d1SJeff Roberson 		free_save_type(addr, mtp, size);
786ab3185d1SJeff Roberson #endif
787ab3185d1SJeff Roberson 		uma_zfree_domain(LIST_FIRST(&slab->us_keg->uk_zones),
788ab3185d1SJeff Roberson 		    addr, slab);
789ab3185d1SJeff Roberson 	} else {
790ab3185d1SJeff Roberson 		size = slab->us_size;
791ab3185d1SJeff Roberson 		uma_large_free(slab);
792ab3185d1SJeff Roberson 	}
793ab3185d1SJeff Roberson 	malloc_type_freed(mtp, size);
794ab3185d1SJeff Roberson }
795ab3185d1SJeff Roberson 
796df8bae1dSRodney W. Grimes /*
79744a8ff31SArchie Cobbs  *	realloc: change the size of a memory block
79844a8ff31SArchie Cobbs  */
79944a8ff31SArchie Cobbs void *
800bd555da9SConrad Meyer realloc(void *addr, size_t size, struct malloc_type *mtp, int flags)
80144a8ff31SArchie Cobbs {
8028355f576SJeff Roberson 	uma_slab_t slab;
80344a8ff31SArchie Cobbs 	unsigned long alloc;
80444a8ff31SArchie Cobbs 	void *newaddr;
80544a8ff31SArchie Cobbs 
806bb1c7df8SRobert Watson 	KASSERT(mtp->ks_magic == M_MAGIC,
807bb1c7df8SRobert Watson 	    ("realloc: bad malloc type magic"));
808d9e2e68dSMark Johnston 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
8091067a2baSJonathan T. Looney 	    ("realloc: called with spinlock or critical section held"));
8101067a2baSJonathan T. Looney 
81144a8ff31SArchie Cobbs 	/* realloc(NULL, ...) is equivalent to malloc(...) */
81244a8ff31SArchie Cobbs 	if (addr == NULL)
81363a7e0a3SRobert Watson 		return (malloc(size, mtp, flags));
81463a7e0a3SRobert Watson 
81563a7e0a3SRobert Watson 	/*
81663a7e0a3SRobert Watson 	 * XXX: Should report free of old memory and alloc of new memory to
81763a7e0a3SRobert Watson 	 * per-CPU stats.
81863a7e0a3SRobert Watson 	 */
81944a8ff31SArchie Cobbs 
820e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD
8216d3ed393SMatthew D Fleming 	if (is_memguard_addr(addr))
8226d3ed393SMatthew D Fleming 		return (memguard_realloc(addr, size, mtp, flags));
823e4eb384bSBosko Milekic #endif
824e4eb384bSBosko Milekic 
825847a2a17SPawel Jakub Dawidek #ifdef DEBUG_REDZONE
826847a2a17SPawel Jakub Dawidek 	slab = NULL;
827847a2a17SPawel Jakub Dawidek 	alloc = redzone_get_size(addr);
828847a2a17SPawel Jakub Dawidek #else
82999571dc3SJeff Roberson 	slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK));
8308355f576SJeff Roberson 
83144a8ff31SArchie Cobbs 	/* Sanity check */
8328355f576SJeff Roberson 	KASSERT(slab != NULL,
83344a8ff31SArchie Cobbs 	    ("realloc: address %p out of range", (void *)addr));
83444a8ff31SArchie Cobbs 
83544a8ff31SArchie Cobbs 	/* Get the size of the original block */
836619f2841SPawel Jakub Dawidek 	if (!(slab->us_flags & UMA_SLAB_MALLOC))
837099a0e58SBosko Milekic 		alloc = slab->us_keg->uk_size;
8388355f576SJeff Roberson 	else
8398355f576SJeff Roberson 		alloc = slab->us_size;
84044a8ff31SArchie Cobbs 
84144a8ff31SArchie Cobbs 	/* Reuse the original block if appropriate */
84244a8ff31SArchie Cobbs 	if (size <= alloc
84344a8ff31SArchie Cobbs 	    && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE))
84444a8ff31SArchie Cobbs 		return (addr);
845847a2a17SPawel Jakub Dawidek #endif /* !DEBUG_REDZONE */
84644a8ff31SArchie Cobbs 
84744a8ff31SArchie Cobbs 	/* Allocate a new, bigger (or smaller) block */
84863a7e0a3SRobert Watson 	if ((newaddr = malloc(size, mtp, flags)) == NULL)
84944a8ff31SArchie Cobbs 		return (NULL);
85044a8ff31SArchie Cobbs 
85144a8ff31SArchie Cobbs 	/* Copy over original contents */
85244a8ff31SArchie Cobbs 	bcopy(addr, newaddr, min(size, alloc));
85363a7e0a3SRobert Watson 	free(addr, mtp);
85444a8ff31SArchie Cobbs 	return (newaddr);
85544a8ff31SArchie Cobbs }
85644a8ff31SArchie Cobbs 
85744a8ff31SArchie Cobbs /*
85844a8ff31SArchie Cobbs  *	reallocf: same as realloc() but free memory on failure.
85944a8ff31SArchie Cobbs  */
86044a8ff31SArchie Cobbs void *
861bd555da9SConrad Meyer reallocf(void *addr, size_t size, struct malloc_type *mtp, int flags)
86244a8ff31SArchie Cobbs {
86344a8ff31SArchie Cobbs 	void *mem;
86444a8ff31SArchie Cobbs 
86563a7e0a3SRobert Watson 	if ((mem = realloc(addr, size, mtp, flags)) == NULL)
86663a7e0a3SRobert Watson 		free(addr, mtp);
86744a8ff31SArchie Cobbs 	return (mem);
86844a8ff31SArchie Cobbs }
86944a8ff31SArchie Cobbs 
870f9d498adSDimitry Andric #ifndef __sparc64__
871c70af487SAlan Cox CTASSERT(VM_KMEM_SIZE_SCALE >= 1);
872f9d498adSDimitry Andric #endif
873c70af487SAlan Cox 
8745df87b21SJeff Roberson /*
875c70af487SAlan Cox  * Initialize the kernel memory (kmem) arena.
8765df87b21SJeff Roberson  */
8775df87b21SJeff Roberson void
8785df87b21SJeff Roberson kmeminit(void)
8795df87b21SJeff Roberson {
880af3b2549SHans Petter Selasky 	u_long mem_size;
881af3b2549SHans Petter Selasky 	u_long tmp;
88269ef67f9SJason Evans 
883af3b2549SHans Petter Selasky #ifdef VM_KMEM_SIZE
884af3b2549SHans Petter Selasky 	if (vm_kmem_size == 0)
885af3b2549SHans Petter Selasky 		vm_kmem_size = VM_KMEM_SIZE;
886af3b2549SHans Petter Selasky #endif
887af3b2549SHans Petter Selasky #ifdef VM_KMEM_SIZE_MIN
888af3b2549SHans Petter Selasky 	if (vm_kmem_size_min == 0)
889af3b2549SHans Petter Selasky 		vm_kmem_size_min = VM_KMEM_SIZE_MIN;
890af3b2549SHans Petter Selasky #endif
891af3b2549SHans Petter Selasky #ifdef VM_KMEM_SIZE_MAX
892af3b2549SHans Petter Selasky 	if (vm_kmem_size_max == 0)
893af3b2549SHans Petter Selasky 		vm_kmem_size_max = VM_KMEM_SIZE_MAX;
894af3b2549SHans Petter Selasky #endif
8958a58a9f6SJohn Dyson 	/*
896c70af487SAlan Cox 	 * Calculate the amount of kernel virtual address (KVA) space that is
897c70af487SAlan Cox 	 * preallocated to the kmem arena.  In order to support a wide range
898c70af487SAlan Cox 	 * of machines, it is a function of the physical memory size,
899c70af487SAlan Cox 	 * specifically,
9008a58a9f6SJohn Dyson 	 *
901c70af487SAlan Cox 	 *	min(max(physical memory size / VM_KMEM_SIZE_SCALE,
902c70af487SAlan Cox 	 *	    VM_KMEM_SIZE_MIN), VM_KMEM_SIZE_MAX)
903c70af487SAlan Cox 	 *
904c70af487SAlan Cox 	 * Every architecture must define an integral value for
905c70af487SAlan Cox 	 * VM_KMEM_SIZE_SCALE.  However, the definitions of VM_KMEM_SIZE_MIN
906c70af487SAlan Cox 	 * and VM_KMEM_SIZE_MAX, which represent respectively the floor and
907c70af487SAlan Cox 	 * ceiling on this preallocation, are optional.  Typically,
908c70af487SAlan Cox 	 * VM_KMEM_SIZE_MAX is itself a function of the available KVA space on
909c70af487SAlan Cox 	 * a given architecture.
9108a58a9f6SJohn Dyson 	 */
91144f1c916SBryan Drewery 	mem_size = vm_cnt.v_page_count;
9127c51714eSSean Bruno 	if (mem_size <= 32768) /* delphij XXX 128MB */
9137c51714eSSean Bruno 		kmem_zmax = PAGE_SIZE;
9148a58a9f6SJohn Dyson 
915c70af487SAlan Cox 	if (vm_kmem_size_scale < 1)
916c70af487SAlan Cox 		vm_kmem_size_scale = VM_KMEM_SIZE_SCALE;
917c70af487SAlan Cox 
918af3b2549SHans Petter Selasky 	/*
919af3b2549SHans Petter Selasky 	 * Check if we should use defaults for the "vm_kmem_size"
920af3b2549SHans Petter Selasky 	 * variable:
921af3b2549SHans Petter Selasky 	 */
922af3b2549SHans Petter Selasky 	if (vm_kmem_size == 0) {
92328b740daSKonstantin Belousov 		vm_kmem_size = mem_size / vm_kmem_size_scale;
92428b740daSKonstantin Belousov 		vm_kmem_size = vm_kmem_size * PAGE_SIZE < vm_kmem_size ?
92528b740daSKonstantin Belousov 		    vm_kmem_size_max : vm_kmem_size * PAGE_SIZE;
926c70af487SAlan Cox 		if (vm_kmem_size_min > 0 && vm_kmem_size < vm_kmem_size_min)
9270e5179e4SStephane E. Potvin 			vm_kmem_size = vm_kmem_size_min;
928479439b4SDag-Erling Smørgrav 		if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max)
929479439b4SDag-Erling Smørgrav 			vm_kmem_size = vm_kmem_size_max;
930af3b2549SHans Petter Selasky 	}
93128b740daSKonstantin Belousov 	if (vm_kmem_size == 0)
93228b740daSKonstantin Belousov 		panic("Tune VM_KMEM_SIZE_* for the platform");
9338a58a9f6SJohn Dyson 
93427b8623fSDavid Greenman 	/*
935af3b2549SHans Petter Selasky 	 * The amount of KVA space that is preallocated to the
936c70af487SAlan Cox 	 * kmem arena can be set statically at compile-time or manually
937c70af487SAlan Cox 	 * through the kernel environment.  However, it is still limited to
938c70af487SAlan Cox 	 * twice the physical memory size, which has been sufficient to handle
939c70af487SAlan Cox 	 * the most severe cases of external fragmentation in the kmem arena.
94027b8623fSDavid Greenman 	 */
941c749c003SAlan Cox 	if (vm_kmem_size / 2 / PAGE_SIZE > mem_size)
942c749c003SAlan Cox 		vm_kmem_size = 2 * mem_size * PAGE_SIZE;
9438a58a9f6SJohn Dyson 
944e137643eSOlivier Houchard 	vm_kmem_size = round_page(vm_kmem_size);
945e3813573SMatthew D Fleming #ifdef DEBUG_MEMGUARD
946f806cdcfSMatthew D Fleming 	tmp = memguard_fudge(vm_kmem_size, kernel_map);
947e3813573SMatthew D Fleming #else
948e3813573SMatthew D Fleming 	tmp = vm_kmem_size;
949e3813573SMatthew D Fleming #endif
9502e47807cSJeff Roberson 	uma_set_limit(tmp);
9518355f576SJeff Roberson 
952e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD
953e4eb384bSBosko Milekic 	/*
954e4eb384bSBosko Milekic 	 * Initialize MemGuard if support compiled in.  MemGuard is a
955e4eb384bSBosko Milekic 	 * replacement allocator used for detecting tamper-after-free
956e4eb384bSBosko Milekic 	 * scenarios as they occur.  It is only used for debugging.
957e4eb384bSBosko Milekic 	 */
9582e47807cSJeff Roberson 	memguard_init(kernel_arena);
959e4eb384bSBosko Milekic #endif
9605df87b21SJeff Roberson }
9615df87b21SJeff Roberson 
9625df87b21SJeff Roberson /*
9635df87b21SJeff Roberson  * Initialize the kernel memory allocator
9645df87b21SJeff Roberson  */
9655df87b21SJeff Roberson /* ARGSUSED*/
9665df87b21SJeff Roberson static void
9675df87b21SJeff Roberson mallocinit(void *dummy)
9685df87b21SJeff Roberson {
9695df87b21SJeff Roberson 	int i;
9705df87b21SJeff Roberson 	uint8_t indx;
9715df87b21SJeff Roberson 
9725df87b21SJeff Roberson 	mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
9735df87b21SJeff Roberson 
9745df87b21SJeff Roberson 	kmeminit();
975e4eb384bSBosko Milekic 
9767001d850SXin LI 	if (kmem_zmax < PAGE_SIZE || kmem_zmax > KMEM_ZMAX)
9777001d850SXin LI 		kmem_zmax = KMEM_ZMAX;
9787001d850SXin LI 
9799afff6b1SMateusz Guzik 	mt_stats_zone = uma_zcreate("mt_stats_zone",
9809afff6b1SMateusz Guzik 	    sizeof(struct malloc_type_stats), NULL, NULL, NULL, NULL,
9819afff6b1SMateusz Guzik 	    UMA_ALIGN_PTR, UMA_ZONE_PCPU);
98263a7e0a3SRobert Watson 	mt_zone = uma_zcreate("mt_zone", sizeof(struct malloc_type_internal),
98363a7e0a3SRobert Watson #ifdef INVARIANTS
98463a7e0a3SRobert Watson 	    mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
98563a7e0a3SRobert Watson #else
98663a7e0a3SRobert Watson 	    NULL, NULL, NULL, NULL,
98763a7e0a3SRobert Watson #endif
98863a7e0a3SRobert Watson 	    UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
9896f267175SJeff Roberson 	for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
9906f267175SJeff Roberson 		int size = kmemzones[indx].kz_size;
9916f267175SJeff Roberson 		char *name = kmemzones[indx].kz_name;
992d7854da1SMatthew D Fleming 		int subzone;
9938355f576SJeff Roberson 
994d7854da1SMatthew D Fleming 		for (subzone = 0; subzone < numzones; subzone++) {
995d7854da1SMatthew D Fleming 			kmemzones[indx].kz_zone[subzone] =
996d7854da1SMatthew D Fleming 			    uma_zcreate(name, size,
9978efc4effSJeff Roberson #ifdef INVARIANTS
9988f70816cSJeff Roberson 			    mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
9998efc4effSJeff Roberson #else
10008efc4effSJeff Roberson 			    NULL, NULL, NULL, NULL,
10018efc4effSJeff Roberson #endif
10028efc4effSJeff Roberson 			    UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
1003d7854da1SMatthew D Fleming 		}
10048355f576SJeff Roberson 		for (;i <= size; i+= KMEM_ZBASE)
10056f267175SJeff Roberson 			kmemsize[i >> KMEM_ZSHIFT] = indx;
10068355f576SJeff Roberson 
1007df8bae1dSRodney W. Grimes 	}
1008254c6cb3SPoul-Henning Kamp }
1009af3b2549SHans Petter Selasky SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_SECOND, mallocinit, NULL);
1010254c6cb3SPoul-Henning Kamp 
1011db669378SPeter Wemm void
101287efd4d5SRobert Watson malloc_init(void *data)
1013254c6cb3SPoul-Henning Kamp {
101463a7e0a3SRobert Watson 	struct malloc_type_internal *mtip;
101563a7e0a3SRobert Watson 	struct malloc_type *mtp;
101663a7e0a3SRobert Watson 
101744f1c916SBryan Drewery 	KASSERT(vm_cnt.v_page_count != 0, ("malloc_register before vm_init"));
101863a7e0a3SRobert Watson 
101963a7e0a3SRobert Watson 	mtp = data;
1020f121baaaSBrian Somers 	if (mtp->ks_magic != M_MAGIC)
1021f121baaaSBrian Somers 		panic("malloc_init: bad malloc type magic");
1022bb1c7df8SRobert Watson 
102363a7e0a3SRobert Watson 	mtip = uma_zalloc(mt_zone, M_WAITOK | M_ZERO);
10249afff6b1SMateusz Guzik 	mtip->mti_stats = uma_zalloc_pcpu(mt_stats_zone, M_WAITOK | M_ZERO);
102563a7e0a3SRobert Watson 	mtp->ks_handle = mtip;
1026c9e05ccdSMateusz Guzik 	mtp_set_subzone(mtp);
1027254c6cb3SPoul-Henning Kamp 
10286f267175SJeff Roberson 	mtx_lock(&malloc_mtx);
102963a7e0a3SRobert Watson 	mtp->ks_next = kmemstatistics;
103063a7e0a3SRobert Watson 	kmemstatistics = mtp;
1031cd814b26SRobert Watson 	kmemcount++;
10326f267175SJeff Roberson 	mtx_unlock(&malloc_mtx);
1033df8bae1dSRodney W. Grimes }
1034db669378SPeter Wemm 
1035db669378SPeter Wemm void
103687efd4d5SRobert Watson malloc_uninit(void *data)
1037db669378SPeter Wemm {
103863a7e0a3SRobert Watson 	struct malloc_type_internal *mtip;
10392a143d5bSPawel Jakub Dawidek 	struct malloc_type_stats *mtsp;
104063a7e0a3SRobert Watson 	struct malloc_type *mtp, *temp;
104145d48bdaSPaul Saab 	uma_slab_t slab;
10422a143d5bSPawel Jakub Dawidek 	long temp_allocs, temp_bytes;
10432a143d5bSPawel Jakub Dawidek 	int i;
1044db669378SPeter Wemm 
104563a7e0a3SRobert Watson 	mtp = data;
1046bb1c7df8SRobert Watson 	KASSERT(mtp->ks_magic == M_MAGIC,
1047bb1c7df8SRobert Watson 	    ("malloc_uninit: bad malloc type magic"));
104863a7e0a3SRobert Watson 	KASSERT(mtp->ks_handle != NULL, ("malloc_deregister: cookie NULL"));
1049bb1c7df8SRobert Watson 
10506f267175SJeff Roberson 	mtx_lock(&malloc_mtx);
105163a7e0a3SRobert Watson 	mtip = mtp->ks_handle;
105263a7e0a3SRobert Watson 	mtp->ks_handle = NULL;
105363a7e0a3SRobert Watson 	if (mtp != kmemstatistics) {
105463a7e0a3SRobert Watson 		for (temp = kmemstatistics; temp != NULL;
105563a7e0a3SRobert Watson 		    temp = temp->ks_next) {
1056f121baaaSBrian Somers 			if (temp->ks_next == mtp) {
105763a7e0a3SRobert Watson 				temp->ks_next = mtp->ks_next;
1058f121baaaSBrian Somers 				break;
1059db669378SPeter Wemm 			}
1060f121baaaSBrian Somers 		}
1061f121baaaSBrian Somers 		KASSERT(temp,
1062f121baaaSBrian Somers 		    ("malloc_uninit: type '%s' not found", mtp->ks_shortdesc));
106363a7e0a3SRobert Watson 	} else
106463a7e0a3SRobert Watson 		kmemstatistics = mtp->ks_next;
1065cd814b26SRobert Watson 	kmemcount--;
10666f267175SJeff Roberson 	mtx_unlock(&malloc_mtx);
10672a143d5bSPawel Jakub Dawidek 
10682a143d5bSPawel Jakub Dawidek 	/*
10692a143d5bSPawel Jakub Dawidek 	 * Look for memory leaks.
10702a143d5bSPawel Jakub Dawidek 	 */
10712a143d5bSPawel Jakub Dawidek 	temp_allocs = temp_bytes = 0;
10729afff6b1SMateusz Guzik 	for (i = 0; i <= mp_maxid; i++) {
10739afff6b1SMateusz Guzik 		mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
10742a143d5bSPawel Jakub Dawidek 		temp_allocs += mtsp->mts_numallocs;
10752a143d5bSPawel Jakub Dawidek 		temp_allocs -= mtsp->mts_numfrees;
10762a143d5bSPawel Jakub Dawidek 		temp_bytes += mtsp->mts_memalloced;
10772a143d5bSPawel Jakub Dawidek 		temp_bytes -= mtsp->mts_memfreed;
10782a143d5bSPawel Jakub Dawidek 	}
10792a143d5bSPawel Jakub Dawidek 	if (temp_allocs > 0 || temp_bytes > 0) {
10802a143d5bSPawel Jakub Dawidek 		printf("Warning: memory type %s leaked memory on destroy "
10812a143d5bSPawel Jakub Dawidek 		    "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc,
10822a143d5bSPawel Jakub Dawidek 		    temp_allocs, temp_bytes);
10832a143d5bSPawel Jakub Dawidek 	}
10842a143d5bSPawel Jakub Dawidek 
108545d48bdaSPaul Saab 	slab = vtoslab((vm_offset_t) mtip & (~UMA_SLAB_MASK));
10869afff6b1SMateusz Guzik 	uma_zfree_pcpu(mt_stats_zone, mtip->mti_stats);
108745d48bdaSPaul Saab 	uma_zfree_arg(mt_zone, mtip, slab);
1088db669378SPeter Wemm }
10896f267175SJeff Roberson 
1090d362c40dSPawel Jakub Dawidek struct malloc_type *
1091d362c40dSPawel Jakub Dawidek malloc_desc2type(const char *desc)
1092d362c40dSPawel Jakub Dawidek {
1093d362c40dSPawel Jakub Dawidek 	struct malloc_type *mtp;
1094d362c40dSPawel Jakub Dawidek 
1095d362c40dSPawel Jakub Dawidek 	mtx_assert(&malloc_mtx, MA_OWNED);
1096d362c40dSPawel Jakub Dawidek 	for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1097d362c40dSPawel Jakub Dawidek 		if (strcmp(mtp->ks_shortdesc, desc) == 0)
1098d362c40dSPawel Jakub Dawidek 			return (mtp);
1099d362c40dSPawel Jakub Dawidek 	}
1100d362c40dSPawel Jakub Dawidek 	return (NULL);
1101d362c40dSPawel Jakub Dawidek }
1102d362c40dSPawel Jakub Dawidek 
11036f267175SJeff Roberson static int
1104cd814b26SRobert Watson sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS)
1105cd814b26SRobert Watson {
1106cd814b26SRobert Watson 	struct malloc_type_stream_header mtsh;
1107cd814b26SRobert Watson 	struct malloc_type_internal *mtip;
11089afff6b1SMateusz Guzik 	struct malloc_type_stats *mtsp, zeromts;
1109cd814b26SRobert Watson 	struct malloc_type_header mth;
1110cd814b26SRobert Watson 	struct malloc_type *mtp;
11114e657159SMatthew D Fleming 	int error, i;
1112cd814b26SRobert Watson 	struct sbuf sbuf;
1113cd814b26SRobert Watson 
111400f0e671SMatthew D Fleming 	error = sysctl_wire_old_buffer(req, 0);
111500f0e671SMatthew D Fleming 	if (error != 0)
111600f0e671SMatthew D Fleming 		return (error);
11174e657159SMatthew D Fleming 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
11181eafc078SIan Lepore 	sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
1119cd814b26SRobert Watson 	mtx_lock(&malloc_mtx);
1120cd814b26SRobert Watson 
11219afff6b1SMateusz Guzik 	bzero(&zeromts, sizeof(zeromts));
11229afff6b1SMateusz Guzik 
1123cd814b26SRobert Watson 	/*
1124cd814b26SRobert Watson 	 * Insert stream header.
1125cd814b26SRobert Watson 	 */
1126cd814b26SRobert Watson 	bzero(&mtsh, sizeof(mtsh));
1127cd814b26SRobert Watson 	mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION;
1128cd814b26SRobert Watson 	mtsh.mtsh_maxcpus = MAXCPU;
1129cd814b26SRobert Watson 	mtsh.mtsh_count = kmemcount;
11304e657159SMatthew D Fleming 	(void)sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh));
1131cd814b26SRobert Watson 
1132cd814b26SRobert Watson 	/*
1133cd814b26SRobert Watson 	 * Insert alternating sequence of type headers and type statistics.
1134cd814b26SRobert Watson 	 */
1135cd814b26SRobert Watson 	for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1136cd814b26SRobert Watson 		mtip = (struct malloc_type_internal *)mtp->ks_handle;
1137cd814b26SRobert Watson 
1138cd814b26SRobert Watson 		/*
1139cd814b26SRobert Watson 		 * Insert type header.
1140cd814b26SRobert Watson 		 */
1141cd814b26SRobert Watson 		bzero(&mth, sizeof(mth));
1142cd814b26SRobert Watson 		strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME);
11434e657159SMatthew D Fleming 		(void)sbuf_bcat(&sbuf, &mth, sizeof(mth));
1144cd814b26SRobert Watson 
1145cd814b26SRobert Watson 		/*
1146cd814b26SRobert Watson 		 * Insert type statistics for each CPU.
1147cd814b26SRobert Watson 		 */
11489afff6b1SMateusz Guzik 		for (i = 0; i <= mp_maxid; i++) {
11499afff6b1SMateusz Guzik 			mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
11509afff6b1SMateusz Guzik 			(void)sbuf_bcat(&sbuf, mtsp, sizeof(*mtsp));
1151cd814b26SRobert Watson 		}
11529afff6b1SMateusz Guzik 		/*
11539afff6b1SMateusz Guzik 		 * Fill in the missing CPUs.
11549afff6b1SMateusz Guzik 		 */
11559afff6b1SMateusz Guzik 		for (; i < MAXCPU; i++) {
11569afff6b1SMateusz Guzik 			(void)sbuf_bcat(&sbuf, &zeromts, sizeof(zeromts));
11579afff6b1SMateusz Guzik 		}
11589afff6b1SMateusz Guzik 
1159cd814b26SRobert Watson 	}
1160cd814b26SRobert Watson 	mtx_unlock(&malloc_mtx);
11614e657159SMatthew D Fleming 	error = sbuf_finish(&sbuf);
1162cd814b26SRobert Watson 	sbuf_delete(&sbuf);
1163cd814b26SRobert Watson 	return (error);
1164cd814b26SRobert Watson }
1165cd814b26SRobert Watson 
1166cd814b26SRobert Watson SYSCTL_PROC(_kern, OID_AUTO, malloc_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
1167cd814b26SRobert Watson     0, 0, sysctl_kern_malloc_stats, "s,malloc_type_ustats",
1168cd814b26SRobert Watson     "Return malloc types");
1169cd814b26SRobert Watson 
1170cd814b26SRobert Watson SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0,
1171cd814b26SRobert Watson     "Count of kernel malloc types");
1172cd814b26SRobert Watson 
117391dd776cSJohn Birrell void
117491dd776cSJohn Birrell malloc_type_list(malloc_type_list_func_t *func, void *arg)
117591dd776cSJohn Birrell {
117691dd776cSJohn Birrell 	struct malloc_type *mtp, **bufmtp;
117791dd776cSJohn Birrell 	int count, i;
117891dd776cSJohn Birrell 	size_t buflen;
117991dd776cSJohn Birrell 
118091dd776cSJohn Birrell 	mtx_lock(&malloc_mtx);
118191dd776cSJohn Birrell restart:
118291dd776cSJohn Birrell 	mtx_assert(&malloc_mtx, MA_OWNED);
118391dd776cSJohn Birrell 	count = kmemcount;
118491dd776cSJohn Birrell 	mtx_unlock(&malloc_mtx);
118591dd776cSJohn Birrell 
118691dd776cSJohn Birrell 	buflen = sizeof(struct malloc_type *) * count;
118791dd776cSJohn Birrell 	bufmtp = malloc(buflen, M_TEMP, M_WAITOK);
118891dd776cSJohn Birrell 
118991dd776cSJohn Birrell 	mtx_lock(&malloc_mtx);
119091dd776cSJohn Birrell 
119191dd776cSJohn Birrell 	if (count < kmemcount) {
119291dd776cSJohn Birrell 		free(bufmtp, M_TEMP);
119391dd776cSJohn Birrell 		goto restart;
119491dd776cSJohn Birrell 	}
119591dd776cSJohn Birrell 
119691dd776cSJohn Birrell 	for (mtp = kmemstatistics, i = 0; mtp != NULL; mtp = mtp->ks_next, i++)
119791dd776cSJohn Birrell 		bufmtp[i] = mtp;
119891dd776cSJohn Birrell 
119991dd776cSJohn Birrell 	mtx_unlock(&malloc_mtx);
120091dd776cSJohn Birrell 
120191dd776cSJohn Birrell 	for (i = 0; i < count; i++)
120291dd776cSJohn Birrell 		(func)(bufmtp[i], arg);
120391dd776cSJohn Birrell 
120491dd776cSJohn Birrell 	free(bufmtp, M_TEMP);
120591dd776cSJohn Birrell }
120691dd776cSJohn Birrell 
1207909ed16cSRobert Watson #ifdef DDB
1208*46d70077SConrad Meyer static int64_t
1209*46d70077SConrad Meyer get_malloc_stats(const struct malloc_type_internal *mtip, uint64_t *allocs,
1210*46d70077SConrad Meyer     uint64_t *inuse)
1211909ed16cSRobert Watson {
1212*46d70077SConrad Meyer 	const struct malloc_type_stats *mtsp;
1213*46d70077SConrad Meyer 	uint64_t frees, alloced, freed;
1214909ed16cSRobert Watson 	int i;
1215909ed16cSRobert Watson 
1216*46d70077SConrad Meyer 	*allocs = 0;
1217909ed16cSRobert Watson 	frees = 0;
121824076d13SRobert Watson 	alloced = 0;
121924076d13SRobert Watson 	freed = 0;
12209afff6b1SMateusz Guzik 	for (i = 0; i <= mp_maxid; i++) {
12219afff6b1SMateusz Guzik 		mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
1222*46d70077SConrad Meyer 
1223*46d70077SConrad Meyer 		*allocs += mtsp->mts_numallocs;
122426e9d9b0SMark Johnston 		frees += mtsp->mts_numfrees;
122526e9d9b0SMark Johnston 		alloced += mtsp->mts_memalloced;
122626e9d9b0SMark Johnston 		freed += mtsp->mts_memfreed;
1227909ed16cSRobert Watson 	}
1228*46d70077SConrad Meyer 	*inuse = *allocs - frees;
1229*46d70077SConrad Meyer 	return (alloced - freed);
1230*46d70077SConrad Meyer }
1231*46d70077SConrad Meyer 
1232*46d70077SConrad Meyer DB_SHOW_COMMAND(malloc, db_show_malloc)
1233*46d70077SConrad Meyer {
1234*46d70077SConrad Meyer 	const char *fmt_hdr, *fmt_entry;
1235*46d70077SConrad Meyer 	struct malloc_type *mtp;
1236*46d70077SConrad Meyer 	uint64_t allocs, inuse;
1237*46d70077SConrad Meyer 	int64_t size;
1238*46d70077SConrad Meyer 	/* variables for sorting */
1239*46d70077SConrad Meyer 	struct malloc_type *last_mtype, *cur_mtype;
1240*46d70077SConrad Meyer 	int64_t cur_size, last_size;
1241*46d70077SConrad Meyer 	int ties;
1242*46d70077SConrad Meyer 
1243*46d70077SConrad Meyer 	if (modif[0] == 'i') {
1244*46d70077SConrad Meyer 		fmt_hdr = "%s,%s,%s,%s\n";
1245*46d70077SConrad Meyer 		fmt_entry = "\"%s\",%ju,%jdK,%ju\n";
1246*46d70077SConrad Meyer 	} else {
1247*46d70077SConrad Meyer 		fmt_hdr = "%18s %12s  %12s %12s\n";
1248*46d70077SConrad Meyer 		fmt_entry = "%18s %12ju %12jdK %12ju\n";
1249*46d70077SConrad Meyer 	}
1250*46d70077SConrad Meyer 
1251*46d70077SConrad Meyer 	db_printf(fmt_hdr, "Type", "InUse", "MemUse", "Requests");
1252*46d70077SConrad Meyer 
1253*46d70077SConrad Meyer 	/* Select sort, largest size first. */
1254*46d70077SConrad Meyer 	last_mtype = NULL;
1255*46d70077SConrad Meyer 	last_size = INT64_MAX;
1256*46d70077SConrad Meyer 	for (;;) {
1257*46d70077SConrad Meyer 		cur_mtype = NULL;
1258*46d70077SConrad Meyer 		cur_size = -1;
1259*46d70077SConrad Meyer 		ties = 0;
1260*46d70077SConrad Meyer 
1261*46d70077SConrad Meyer 		for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1262*46d70077SConrad Meyer 			/*
1263*46d70077SConrad Meyer 			 * In the case of size ties, print out mtypes
1264*46d70077SConrad Meyer 			 * in the order they are encountered.  That is,
1265*46d70077SConrad Meyer 			 * when we encounter the most recently output
1266*46d70077SConrad Meyer 			 * mtype, we have already printed all preceding
1267*46d70077SConrad Meyer 			 * ties, and we must print all following ties.
1268*46d70077SConrad Meyer 			 */
1269*46d70077SConrad Meyer 			if (mtp == last_mtype) {
1270*46d70077SConrad Meyer 				ties = 1;
1271*46d70077SConrad Meyer 				continue;
1272*46d70077SConrad Meyer 			}
1273*46d70077SConrad Meyer 			size = get_malloc_stats(mtp->ks_handle, &allocs,
1274*46d70077SConrad Meyer 			    &inuse);
1275*46d70077SConrad Meyer 			if (size > cur_size && size < last_size + ties) {
1276*46d70077SConrad Meyer 				cur_size = size;
1277*46d70077SConrad Meyer 				cur_mtype = mtp;
1278*46d70077SConrad Meyer 			}
1279*46d70077SConrad Meyer 		}
1280*46d70077SConrad Meyer 		if (cur_mtype == NULL)
1281*46d70077SConrad Meyer 			break;
1282*46d70077SConrad Meyer 
1283*46d70077SConrad Meyer 		size = get_malloc_stats(cur_mtype->ks_handle, &allocs, &inuse);
1284*46d70077SConrad Meyer 		db_printf(fmt_entry, cur_mtype->ks_shortdesc, inuse,
1285*46d70077SConrad Meyer 		    howmany(size, 1024), allocs);
1286*46d70077SConrad Meyer 
1287687c94aaSJohn Baldwin 		if (db_pager_quit)
1288687c94aaSJohn Baldwin 			break;
1289*46d70077SConrad Meyer 
1290*46d70077SConrad Meyer 		last_mtype = cur_mtype;
1291*46d70077SConrad Meyer 		last_size = cur_size;
1292909ed16cSRobert Watson 	}
1293909ed16cSRobert Watson }
1294d7854da1SMatthew D Fleming 
1295d7854da1SMatthew D Fleming #if MALLOC_DEBUG_MAXZONES > 1
1296d7854da1SMatthew D Fleming DB_SHOW_COMMAND(multizone_matches, db_show_multizone_matches)
1297d7854da1SMatthew D Fleming {
1298d7854da1SMatthew D Fleming 	struct malloc_type_internal *mtip;
1299d7854da1SMatthew D Fleming 	struct malloc_type *mtp;
1300d7854da1SMatthew D Fleming 	u_int subzone;
1301d7854da1SMatthew D Fleming 
1302d7854da1SMatthew D Fleming 	if (!have_addr) {
1303d7854da1SMatthew D Fleming 		db_printf("Usage: show multizone_matches <malloc type/addr>\n");
1304d7854da1SMatthew D Fleming 		return;
1305d7854da1SMatthew D Fleming 	}
1306d7854da1SMatthew D Fleming 	mtp = (void *)addr;
1307d7854da1SMatthew D Fleming 	if (mtp->ks_magic != M_MAGIC) {
1308d7854da1SMatthew D Fleming 		db_printf("Magic %lx does not match expected %x\n",
1309d7854da1SMatthew D Fleming 		    mtp->ks_magic, M_MAGIC);
1310d7854da1SMatthew D Fleming 		return;
1311d7854da1SMatthew D Fleming 	}
1312d7854da1SMatthew D Fleming 
1313d7854da1SMatthew D Fleming 	mtip = mtp->ks_handle;
1314d7854da1SMatthew D Fleming 	subzone = mtip->mti_zone;
1315d7854da1SMatthew D Fleming 
1316d7854da1SMatthew D Fleming 	for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1317d7854da1SMatthew D Fleming 		mtip = mtp->ks_handle;
1318d7854da1SMatthew D Fleming 		if (mtip->mti_zone != subzone)
1319d7854da1SMatthew D Fleming 			continue;
1320d7854da1SMatthew D Fleming 		db_printf("%s\n", mtp->ks_shortdesc);
1321687c94aaSJohn Baldwin 		if (db_pager_quit)
1322687c94aaSJohn Baldwin 			break;
1323d7854da1SMatthew D Fleming 	}
1324d7854da1SMatthew D Fleming }
1325d7854da1SMatthew D Fleming #endif /* MALLOC_DEBUG_MAXZONES > 1 */
1326d7854da1SMatthew D Fleming #endif /* DDB */
1327909ed16cSRobert Watson 
13285e914b96SJeff Roberson #ifdef MALLOC_PROFILE
13295e914b96SJeff Roberson 
13305e914b96SJeff Roberson static int
13315e914b96SJeff Roberson sysctl_kern_mprof(SYSCTL_HANDLER_ARGS)
13325e914b96SJeff Roberson {
133363a7e0a3SRobert Watson 	struct sbuf sbuf;
13345e914b96SJeff Roberson 	uint64_t count;
13355e914b96SJeff Roberson 	uint64_t waste;
13365e914b96SJeff Roberson 	uint64_t mem;
13375e914b96SJeff Roberson 	int error;
13385e914b96SJeff Roberson 	int rsize;
13395e914b96SJeff Roberson 	int size;
13405e914b96SJeff Roberson 	int i;
13415e914b96SJeff Roberson 
13425e914b96SJeff Roberson 	waste = 0;
13435e914b96SJeff Roberson 	mem = 0;
13445e914b96SJeff Roberson 
134500f0e671SMatthew D Fleming 	error = sysctl_wire_old_buffer(req, 0);
134600f0e671SMatthew D Fleming 	if (error != 0)
134700f0e671SMatthew D Fleming 		return (error);
13484e657159SMatthew D Fleming 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
134963a7e0a3SRobert Watson 	sbuf_printf(&sbuf,
13505e914b96SJeff Roberson 	    "\n  Size                    Requests  Real Size\n");
13515e914b96SJeff Roberson 	for (i = 0; i < KMEM_ZSIZE; i++) {
13525e914b96SJeff Roberson 		size = i << KMEM_ZSHIFT;
13535e914b96SJeff Roberson 		rsize = kmemzones[kmemsize[i]].kz_size;
13545e914b96SJeff Roberson 		count = (long long unsigned)krequests[i];
13555e914b96SJeff Roberson 
135663a7e0a3SRobert Watson 		sbuf_printf(&sbuf, "%6d%28llu%11d\n", size,
135763a7e0a3SRobert Watson 		    (unsigned long long)count, rsize);
13585e914b96SJeff Roberson 
13595e914b96SJeff Roberson 		if ((rsize * count) > (size * count))
13605e914b96SJeff Roberson 			waste += (rsize * count) - (size * count);
13615e914b96SJeff Roberson 		mem += (rsize * count);
13625e914b96SJeff Roberson 	}
136363a7e0a3SRobert Watson 	sbuf_printf(&sbuf,
13645e914b96SJeff Roberson 	    "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n",
13655e914b96SJeff Roberson 	    (unsigned long long)mem, (unsigned long long)waste);
13664e657159SMatthew D Fleming 	error = sbuf_finish(&sbuf);
136763a7e0a3SRobert Watson 	sbuf_delete(&sbuf);
13685e914b96SJeff Roberson 	return (error);
13695e914b96SJeff Roberson }
13705e914b96SJeff Roberson 
13715e914b96SJeff Roberson SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD,
13725e914b96SJeff Roberson     NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling");
13735e914b96SJeff Roberson #endif /* MALLOC_PROFILE */
1374