xref: /freebsd/sys/kern/kern_malloc.c (revision 69a2875821ed9404a95f41bbeeb561908e3c38dc)
19454b2d8SWarner Losh /*-
2df8bae1dSRodney W. Grimes  * Copyright (c) 1987, 1991, 1993
363a7e0a3SRobert Watson  *	The Regents of the University of California.
4bb1c7df8SRobert Watson  * Copyright (c) 2005-2009 Robert N. M. Watson
563a7e0a3SRobert Watson  * All rights reserved.
6df8bae1dSRodney W. Grimes  *
7df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
8df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
9df8bae1dSRodney W. Grimes  * are met:
10df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
11df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
12df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
13df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
14df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
15*69a28758SEd Maste  * 3. Neither the name of the University nor the names of its contributors
16df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
17df8bae1dSRodney W. Grimes  *    without specific prior written permission.
18df8bae1dSRodney W. Grimes  *
19df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
20df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
23df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
30df8bae1dSRodney W. Grimes  *
31df8bae1dSRodney W. Grimes  *	@(#)kern_malloc.c	8.3 (Berkeley) 1/4/94
32df8bae1dSRodney W. Grimes  */
33df8bae1dSRodney W. Grimes 
340ce3f16dSRobert Watson /*
350ce3f16dSRobert Watson  * Kernel malloc(9) implementation -- general purpose kernel memory allocator
360ce3f16dSRobert Watson  * based on memory types.  Back end is implemented using the UMA(9) zone
370ce3f16dSRobert Watson  * allocator.  A set of fixed-size buckets are used for smaller allocations,
380ce3f16dSRobert Watson  * and a special UMA allocation interface is used for larger allocations.
390ce3f16dSRobert Watson  * Callers declare memory types, and statistics are maintained independently
400ce3f16dSRobert Watson  * for each memory type.  Statistics are maintained per-CPU for performance
410ce3f16dSRobert Watson  * reasons.  See malloc(9) and comments in malloc.h for a detailed
420ce3f16dSRobert Watson  * description.
430ce3f16dSRobert Watson  */
440ce3f16dSRobert Watson 
45677b542eSDavid E. O'Brien #include <sys/cdefs.h>
46677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$");
47677b542eSDavid E. O'Brien 
48909ed16cSRobert Watson #include "opt_ddb.h"
498a58a9f6SJohn Dyson #include "opt_vm.h"
508a58a9f6SJohn Dyson 
51df8bae1dSRodney W. Grimes #include <sys/param.h>
5226f9a767SRodney W. Grimes #include <sys/systm.h>
532d50560aSMarcel Moolenaar #include <sys/kdb.h>
54df8bae1dSRodney W. Grimes #include <sys/kernel.h>
55fb919e4dSMark Murray #include <sys/lock.h>
56df8bae1dSRodney W. Grimes #include <sys/malloc.h>
57eec258d2SJohn Baldwin #include <sys/mutex.h>
58efeaf95aSDavid Greenman #include <sys/vmmeter.h>
59a448b62aSJake Burkholder #include <sys/proc.h>
6063a7e0a3SRobert Watson #include <sys/sbuf.h>
616f267175SJeff Roberson #include <sys/sysctl.h>
621fb14a47SPoul-Henning Kamp #include <sys/time.h>
635df87b21SJeff Roberson #include <sys/vmem.h>
649a02e8c6SJason Evans 
65df8bae1dSRodney W. Grimes #include <vm/vm.h>
6699571dc3SJeff Roberson #include <vm/pmap.h>
675df87b21SJeff Roberson #include <vm/vm_pageout.h>
68efeaf95aSDavid Greenman #include <vm/vm_param.h>
69df8bae1dSRodney W. Grimes #include <vm/vm_kern.h>
70efeaf95aSDavid Greenman #include <vm/vm_extern.h>
713075778bSJohn Dyson #include <vm/vm_map.h>
7299571dc3SJeff Roberson #include <vm/vm_page.h>
738355f576SJeff Roberson #include <vm/uma.h>
748355f576SJeff Roberson #include <vm/uma_int.h>
758efc4effSJeff Roberson #include <vm/uma_dbg.h>
76df8bae1dSRodney W. Grimes 
77e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD
78e4eb384bSBosko Milekic #include <vm/memguard.h>
79e4eb384bSBosko Milekic #endif
80847a2a17SPawel Jakub Dawidek #ifdef DEBUG_REDZONE
81847a2a17SPawel Jakub Dawidek #include <vm/redzone.h>
82847a2a17SPawel Jakub Dawidek #endif
83e4eb384bSBosko Milekic 
84984982d6SPoul-Henning Kamp #if defined(INVARIANTS) && defined(__i386__)
85984982d6SPoul-Henning Kamp #include <machine/cpu.h>
86984982d6SPoul-Henning Kamp #endif
87984982d6SPoul-Henning Kamp 
88909ed16cSRobert Watson #include <ddb/ddb.h>
89909ed16cSRobert Watson 
9091dd776cSJohn Birrell #ifdef KDTRACE_HOOKS
9191dd776cSJohn Birrell #include <sys/dtrace_bsd.h>
9291dd776cSJohn Birrell 
9391dd776cSJohn Birrell dtrace_malloc_probe_func_t	dtrace_malloc_probe;
9491dd776cSJohn Birrell #endif
9591dd776cSJohn Birrell 
9644a8ff31SArchie Cobbs /*
9744a8ff31SArchie Cobbs  * When realloc() is called, if the new size is sufficiently smaller than
9844a8ff31SArchie Cobbs  * the old size, realloc() will allocate a new, smaller block to avoid
9944a8ff31SArchie Cobbs  * wasting memory. 'Sufficiently smaller' is defined as: newsize <=
10044a8ff31SArchie Cobbs  * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'.
10144a8ff31SArchie Cobbs  */
10244a8ff31SArchie Cobbs #ifndef REALLOC_FRACTION
10344a8ff31SArchie Cobbs #define	REALLOC_FRACTION	1	/* new block if <= half the size */
10444a8ff31SArchie Cobbs #endif
10544a8ff31SArchie Cobbs 
1060ce3f16dSRobert Watson /*
1070ce3f16dSRobert Watson  * Centrally define some common malloc types.
1080ce3f16dSRobert Watson  */
1093b6fb885SPoul-Henning Kamp MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
1109ef246c6SBruce Evans MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
1119ef246c6SBruce Evans MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
1129ef246c6SBruce Evans 
113db669378SPeter Wemm static struct malloc_type *kmemstatistics;
114cd814b26SRobert Watson static int kmemcount;
1151f6889a1SMatthew Dillon 
1168355f576SJeff Roberson #define KMEM_ZSHIFT	4
1178355f576SJeff Roberson #define KMEM_ZBASE	16
1188355f576SJeff Roberson #define KMEM_ZMASK	(KMEM_ZBASE - 1)
1198355f576SJeff Roberson 
120bda06553SXin LI #define KMEM_ZMAX	65536
1218355f576SJeff Roberson #define KMEM_ZSIZE	(KMEM_ZMAX >> KMEM_ZSHIFT)
12260ae52f7SEd Schouten static uint8_t kmemsize[KMEM_ZSIZE + 1];
1236f267175SJeff Roberson 
124d7854da1SMatthew D Fleming #ifndef MALLOC_DEBUG_MAXZONES
125d7854da1SMatthew D Fleming #define	MALLOC_DEBUG_MAXZONES	1
126d7854da1SMatthew D Fleming #endif
127d7854da1SMatthew D Fleming static int numzones = MALLOC_DEBUG_MAXZONES;
128d7854da1SMatthew D Fleming 
1290ce3f16dSRobert Watson /*
1300ce3f16dSRobert Watson  * Small malloc(9) memory allocations are allocated from a set of UMA buckets
1310ce3f16dSRobert Watson  * of various sizes.
1320ce3f16dSRobert Watson  *
1330ce3f16dSRobert Watson  * XXX: The comment here used to read "These won't be powers of two for
1340ce3f16dSRobert Watson  * long."  It's possible that a significant amount of wasted memory could be
1350ce3f16dSRobert Watson  * recovered by tuning the sizes of these buckets.
1360ce3f16dSRobert Watson  */
1378355f576SJeff Roberson struct {
1386f267175SJeff Roberson 	int kz_size;
1396f267175SJeff Roberson 	char *kz_name;
140d7854da1SMatthew D Fleming 	uma_zone_t kz_zone[MALLOC_DEBUG_MAXZONES];
1416f267175SJeff Roberson } kmemzones[] = {
142d7854da1SMatthew D Fleming 	{16, "16", },
143d7854da1SMatthew D Fleming 	{32, "32", },
144d7854da1SMatthew D Fleming 	{64, "64", },
145d7854da1SMatthew D Fleming 	{128, "128", },
146d7854da1SMatthew D Fleming 	{256, "256", },
147d7854da1SMatthew D Fleming 	{512, "512", },
148d7854da1SMatthew D Fleming 	{1024, "1024", },
149d7854da1SMatthew D Fleming 	{2048, "2048", },
150d7854da1SMatthew D Fleming 	{4096, "4096", },
151d7854da1SMatthew D Fleming 	{8192, "8192", },
152d7854da1SMatthew D Fleming 	{16384, "16384", },
153d7854da1SMatthew D Fleming 	{32768, "32768", },
154d7854da1SMatthew D Fleming 	{65536, "65536", },
1558355f576SJeff Roberson 	{0, NULL},
1568355f576SJeff Roberson };
1578355f576SJeff Roberson 
1580ce3f16dSRobert Watson /*
1590ce3f16dSRobert Watson  * Zone to allocate malloc type descriptions from.  For ABI reasons, memory
1600ce3f16dSRobert Watson  * types are described by a data structure passed by the declaring code, but
1610ce3f16dSRobert Watson  * the malloc(9) implementation has its own data structure describing the
1620ce3f16dSRobert Watson  * type and statistics.  This permits the malloc(9)-internal data structures
1630ce3f16dSRobert Watson  * to be modified without breaking binary-compiled kernel modules that
1640ce3f16dSRobert Watson  * declare malloc types.
1650ce3f16dSRobert Watson  */
16663a7e0a3SRobert Watson static uma_zone_t mt_zone;
16763a7e0a3SRobert Watson 
168b89eaf4eSAlan Cox u_long vm_kmem_size;
169d801e824SAndriy Gapon SYSCTL_ULONG(_vm, OID_AUTO, kmem_size, CTLFLAG_RDTUN, &vm_kmem_size, 0,
17084344f9fSDag-Erling Smørgrav     "Size of kernel memory");
1715a34a9f0SJeff Roberson 
1727001d850SXin LI static u_long kmem_zmax = KMEM_ZMAX;
1737001d850SXin LI SYSCTL_ULONG(_vm, OID_AUTO, kmem_zmax, CTLFLAG_RDTUN, &kmem_zmax, 0,
1747001d850SXin LI     "Maximum allocation size that malloc(9) would use UMA as backend");
1757001d850SXin LI 
176b89eaf4eSAlan Cox static u_long vm_kmem_size_min;
177d801e824SAndriy Gapon SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_min, CTLFLAG_RDTUN, &vm_kmem_size_min, 0,
1780e5179e4SStephane E. Potvin     "Minimum size of kernel memory");
1790e5179e4SStephane E. Potvin 
180b89eaf4eSAlan Cox static u_long vm_kmem_size_max;
181d801e824SAndriy Gapon SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RDTUN, &vm_kmem_size_max, 0,
182479439b4SDag-Erling Smørgrav     "Maximum size of kernel memory");
183479439b4SDag-Erling Smørgrav 
1844813ad54SHans Petter Selasky static u_int vm_kmem_size_scale;
185d801e824SAndriy Gapon SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale, 0,
186479439b4SDag-Erling Smørgrav     "Scale factor for kernel memory size");
187479439b4SDag-Erling Smørgrav 
1887814c80aSAndriy Gapon static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS);
1897814c80aSAndriy Gapon SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size,
1907814c80aSAndriy Gapon     CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
1915df87b21SJeff Roberson     sysctl_kmem_map_size, "LU", "Current kmem allocation size");
1927814c80aSAndriy Gapon 
19395bb9d38SAndriy Gapon static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS);
19495bb9d38SAndriy Gapon SYSCTL_PROC(_vm, OID_AUTO, kmem_map_free,
19595bb9d38SAndriy Gapon     CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
1965df87b21SJeff Roberson     sysctl_kmem_map_free, "LU", "Free space in kmem");
19795bb9d38SAndriy Gapon 
1985a34a9f0SJeff Roberson /*
19999571dc3SJeff Roberson  * The malloc_mtx protects the kmemstatistics linked list.
2005a34a9f0SJeff Roberson  */
2015a34a9f0SJeff Roberson struct mtx malloc_mtx;
20269ef67f9SJason Evans 
2035e914b96SJeff Roberson #ifdef MALLOC_PROFILE
2045e914b96SJeff Roberson uint64_t krequests[KMEM_ZSIZE + 1];
2056f267175SJeff Roberson 
2065e914b96SJeff Roberson static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS);
2075e914b96SJeff Roberson #endif
2085e914b96SJeff Roberson 
209cd814b26SRobert Watson static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS);
210df8bae1dSRodney W. Grimes 
2110ce3f16dSRobert Watson /*
2120ce3f16dSRobert Watson  * time_uptime of the last malloc(9) failure (induced or real).
2130ce3f16dSRobert Watson  */
2141fb14a47SPoul-Henning Kamp static time_t t_malloc_fail;
2151fb14a47SPoul-Henning Kamp 
216d7854da1SMatthew D Fleming #if defined(MALLOC_MAKE_FAILURES) || (MALLOC_DEBUG_MAXZONES > 1)
2176472ac3dSEd Schouten static SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD, 0,
218d7854da1SMatthew D Fleming     "Kernel malloc debugging options");
219d7854da1SMatthew D Fleming #endif
220d7854da1SMatthew D Fleming 
221eae870cdSRobert Watson /*
2220ce3f16dSRobert Watson  * malloc(9) fault injection -- cause malloc failures every (n) mallocs when
2230ce3f16dSRobert Watson  * the caller specifies M_NOWAIT.  If set to 0, no failures are caused.
224eae870cdSRobert Watson  */
2250ce3f16dSRobert Watson #ifdef MALLOC_MAKE_FAILURES
226eae870cdSRobert Watson static int malloc_failure_rate;
227eae870cdSRobert Watson static int malloc_nowait_count;
228eae870cdSRobert Watson static int malloc_failure_count;
229af3b2549SHans Petter Selasky SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RWTUN,
230eae870cdSRobert Watson     &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail");
231eae870cdSRobert Watson SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD,
232eae870cdSRobert Watson     &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures");
233eae870cdSRobert Watson #endif
234eae870cdSRobert Watson 
2357814c80aSAndriy Gapon static int
2367814c80aSAndriy Gapon sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS)
2377814c80aSAndriy Gapon {
2387814c80aSAndriy Gapon 	u_long size;
2397814c80aSAndriy Gapon 
2405df87b21SJeff Roberson 	size = vmem_size(kmem_arena, VMEM_ALLOC);
2417814c80aSAndriy Gapon 	return (sysctl_handle_long(oidp, &size, 0, req));
2427814c80aSAndriy Gapon }
2437814c80aSAndriy Gapon 
24495bb9d38SAndriy Gapon static int
24595bb9d38SAndriy Gapon sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS)
24695bb9d38SAndriy Gapon {
24795bb9d38SAndriy Gapon 	u_long size;
24895bb9d38SAndriy Gapon 
2495df87b21SJeff Roberson 	size = vmem_size(kmem_arena, VMEM_FREE);
25095bb9d38SAndriy Gapon 	return (sysctl_handle_long(oidp, &size, 0, req));
25195bb9d38SAndriy Gapon }
25295bb9d38SAndriy Gapon 
253d7854da1SMatthew D Fleming /*
254d7854da1SMatthew D Fleming  * malloc(9) uma zone separation -- sub-page buffer overruns in one
255d7854da1SMatthew D Fleming  * malloc type will affect only a subset of other malloc types.
256d7854da1SMatthew D Fleming  */
257d7854da1SMatthew D Fleming #if MALLOC_DEBUG_MAXZONES > 1
258d7854da1SMatthew D Fleming static void
259d7854da1SMatthew D Fleming tunable_set_numzones(void)
260d7854da1SMatthew D Fleming {
261d7854da1SMatthew D Fleming 
262d7854da1SMatthew D Fleming 	TUNABLE_INT_FETCH("debug.malloc.numzones",
263d7854da1SMatthew D Fleming 	    &numzones);
264d7854da1SMatthew D Fleming 
265d7854da1SMatthew D Fleming 	/* Sanity check the number of malloc uma zones. */
266d7854da1SMatthew D Fleming 	if (numzones <= 0)
267d7854da1SMatthew D Fleming 		numzones = 1;
268d7854da1SMatthew D Fleming 	if (numzones > MALLOC_DEBUG_MAXZONES)
269d7854da1SMatthew D Fleming 		numzones = MALLOC_DEBUG_MAXZONES;
270d7854da1SMatthew D Fleming }
271d7854da1SMatthew D Fleming SYSINIT(numzones, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_set_numzones, NULL);
272af3b2549SHans Petter Selasky SYSCTL_INT(_debug_malloc, OID_AUTO, numzones, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
273d7854da1SMatthew D Fleming     &numzones, 0, "Number of malloc uma subzones");
274d7854da1SMatthew D Fleming 
275d7854da1SMatthew D Fleming /*
276d7854da1SMatthew D Fleming  * Any number that changes regularly is an okay choice for the
277d7854da1SMatthew D Fleming  * offset.  Build numbers are pretty good of you have them.
278d7854da1SMatthew D Fleming  */
279d7854da1SMatthew D Fleming static u_int zone_offset = __FreeBSD_version;
280d7854da1SMatthew D Fleming TUNABLE_INT("debug.malloc.zone_offset", &zone_offset);
281d7854da1SMatthew D Fleming SYSCTL_UINT(_debug_malloc, OID_AUTO, zone_offset, CTLFLAG_RDTUN,
282d7854da1SMatthew D Fleming     &zone_offset, 0, "Separate malloc types by examining the "
283d7854da1SMatthew D Fleming     "Nth character in the malloc type short description.");
284d7854da1SMatthew D Fleming 
285d7854da1SMatthew D Fleming static u_int
286d7854da1SMatthew D Fleming mtp_get_subzone(const char *desc)
287d7854da1SMatthew D Fleming {
288d7854da1SMatthew D Fleming 	size_t len;
289d7854da1SMatthew D Fleming 	u_int val;
290d7854da1SMatthew D Fleming 
291d7854da1SMatthew D Fleming 	if (desc == NULL || (len = strlen(desc)) == 0)
292d7854da1SMatthew D Fleming 		return (0);
293d7854da1SMatthew D Fleming 	val = desc[zone_offset % len];
294d7854da1SMatthew D Fleming 	return (val % numzones);
295d7854da1SMatthew D Fleming }
296d7854da1SMatthew D Fleming #elif MALLOC_DEBUG_MAXZONES == 0
297d7854da1SMatthew D Fleming #error "MALLOC_DEBUG_MAXZONES must be positive."
298d7854da1SMatthew D Fleming #else
299d7854da1SMatthew D Fleming static inline u_int
300d7854da1SMatthew D Fleming mtp_get_subzone(const char *desc)
301d7854da1SMatthew D Fleming {
302d7854da1SMatthew D Fleming 
303d7854da1SMatthew D Fleming 	return (0);
304d7854da1SMatthew D Fleming }
305d7854da1SMatthew D Fleming #endif /* MALLOC_DEBUG_MAXZONES > 1 */
306d7854da1SMatthew D Fleming 
3071fb14a47SPoul-Henning Kamp int
3081fb14a47SPoul-Henning Kamp malloc_last_fail(void)
3091fb14a47SPoul-Henning Kamp {
3101fb14a47SPoul-Henning Kamp 
3111fb14a47SPoul-Henning Kamp 	return (time_uptime - t_malloc_fail);
3121fb14a47SPoul-Henning Kamp }
3131fb14a47SPoul-Henning Kamp 
314df8bae1dSRodney W. Grimes /*
3150ce3f16dSRobert Watson  * An allocation has succeeded -- update malloc type statistics for the
3160ce3f16dSRobert Watson  * amount of bucket size.  Occurs within a critical section so that the
3170ce3f16dSRobert Watson  * thread isn't preempted and doesn't migrate while updating per-PCU
3180ce3f16dSRobert Watson  * statistics.
3194362fadaSBrian Feldman  */
3204362fadaSBrian Feldman static void
32163a7e0a3SRobert Watson malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size,
3224362fadaSBrian Feldman     int zindx)
3234362fadaSBrian Feldman {
32463a7e0a3SRobert Watson 	struct malloc_type_internal *mtip;
32563a7e0a3SRobert Watson 	struct malloc_type_stats *mtsp;
32663a7e0a3SRobert Watson 
32763a7e0a3SRobert Watson 	critical_enter();
32863a7e0a3SRobert Watson 	mtip = mtp->ks_handle;
32963a7e0a3SRobert Watson 	mtsp = &mtip->mti_stats[curcpu];
33073864adbSPawel Jakub Dawidek 	if (size > 0) {
33163a7e0a3SRobert Watson 		mtsp->mts_memalloced += size;
33263a7e0a3SRobert Watson 		mtsp->mts_numallocs++;
33373864adbSPawel Jakub Dawidek 	}
3344362fadaSBrian Feldman 	if (zindx != -1)
33563a7e0a3SRobert Watson 		mtsp->mts_size |= 1 << zindx;
33691dd776cSJohn Birrell 
33791dd776cSJohn Birrell #ifdef KDTRACE_HOOKS
33891dd776cSJohn Birrell 	if (dtrace_malloc_probe != NULL) {
33991dd776cSJohn Birrell 		uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_MALLOC];
34091dd776cSJohn Birrell 		if (probe_id != 0)
34191dd776cSJohn Birrell 			(dtrace_malloc_probe)(probe_id,
34291dd776cSJohn Birrell 			    (uintptr_t) mtp, (uintptr_t) mtip,
34391dd776cSJohn Birrell 			    (uintptr_t) mtsp, size, zindx);
34491dd776cSJohn Birrell 	}
34591dd776cSJohn Birrell #endif
34691dd776cSJohn Birrell 
34763a7e0a3SRobert Watson 	critical_exit();
3484362fadaSBrian Feldman }
3494362fadaSBrian Feldman 
3504362fadaSBrian Feldman void
35163a7e0a3SRobert Watson malloc_type_allocated(struct malloc_type *mtp, unsigned long size)
3524362fadaSBrian Feldman {
35363a7e0a3SRobert Watson 
35473864adbSPawel Jakub Dawidek 	if (size > 0)
35563a7e0a3SRobert Watson 		malloc_type_zone_allocated(mtp, size, -1);
3564362fadaSBrian Feldman }
3574362fadaSBrian Feldman 
3584362fadaSBrian Feldman /*
3593805385eSRobert Watson  * A free operation has occurred -- update malloc type statistics for the
3600ce3f16dSRobert Watson  * amount of the bucket size.  Occurs within a critical section so that the
3610ce3f16dSRobert Watson  * thread isn't preempted and doesn't migrate while updating per-CPU
3620ce3f16dSRobert Watson  * statistics.
3634362fadaSBrian Feldman  */
3644362fadaSBrian Feldman void
36563a7e0a3SRobert Watson malloc_type_freed(struct malloc_type *mtp, unsigned long size)
3664362fadaSBrian Feldman {
36763a7e0a3SRobert Watson 	struct malloc_type_internal *mtip;
36863a7e0a3SRobert Watson 	struct malloc_type_stats *mtsp;
36963a7e0a3SRobert Watson 
37063a7e0a3SRobert Watson 	critical_enter();
37163a7e0a3SRobert Watson 	mtip = mtp->ks_handle;
37263a7e0a3SRobert Watson 	mtsp = &mtip->mti_stats[curcpu];
37363a7e0a3SRobert Watson 	mtsp->mts_memfreed += size;
37463a7e0a3SRobert Watson 	mtsp->mts_numfrees++;
37591dd776cSJohn Birrell 
37691dd776cSJohn Birrell #ifdef KDTRACE_HOOKS
37791dd776cSJohn Birrell 	if (dtrace_malloc_probe != NULL) {
37891dd776cSJohn Birrell 		uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_FREE];
37991dd776cSJohn Birrell 		if (probe_id != 0)
38091dd776cSJohn Birrell 			(dtrace_malloc_probe)(probe_id,
38191dd776cSJohn Birrell 			    (uintptr_t) mtp, (uintptr_t) mtip,
38291dd776cSJohn Birrell 			    (uintptr_t) mtsp, size, 0);
38391dd776cSJohn Birrell 	}
38491dd776cSJohn Birrell #endif
38591dd776cSJohn Birrell 
38663a7e0a3SRobert Watson 	critical_exit();
3874362fadaSBrian Feldman }
3884362fadaSBrian Feldman 
3894362fadaSBrian Feldman /*
390f346986bSAlan Cox  *	contigmalloc:
391f346986bSAlan Cox  *
392f346986bSAlan Cox  *	Allocate a block of physically contiguous memory.
393f346986bSAlan Cox  *
394f346986bSAlan Cox  *	If M_NOWAIT is set, this routine will not block and return NULL if
395f346986bSAlan Cox  *	the allocation fails.
396f346986bSAlan Cox  */
397f346986bSAlan Cox void *
398f346986bSAlan Cox contigmalloc(unsigned long size, struct malloc_type *type, int flags,
399f346986bSAlan Cox     vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
400831ce4cbSJohn Baldwin     vm_paddr_t boundary)
401f346986bSAlan Cox {
402f346986bSAlan Cox 	void *ret;
403f346986bSAlan Cox 
4045df87b21SJeff Roberson 	ret = (void *)kmem_alloc_contig(kernel_arena, size, flags, low, high,
405f346986bSAlan Cox 	    alignment, boundary, VM_MEMATTR_DEFAULT);
406f346986bSAlan Cox 	if (ret != NULL)
407f346986bSAlan Cox 		malloc_type_allocated(type, round_page(size));
408f346986bSAlan Cox 	return (ret);
409f346986bSAlan Cox }
410f346986bSAlan Cox 
411f346986bSAlan Cox /*
412f346986bSAlan Cox  *	contigfree:
413f346986bSAlan Cox  *
414f346986bSAlan Cox  *	Free a block of memory allocated by contigmalloc.
415f346986bSAlan Cox  *
416f346986bSAlan Cox  *	This routine may not block.
417f346986bSAlan Cox  */
418f346986bSAlan Cox void
419f346986bSAlan Cox contigfree(void *addr, unsigned long size, struct malloc_type *type)
420f346986bSAlan Cox {
421f346986bSAlan Cox 
4225df87b21SJeff Roberson 	kmem_free(kernel_arena, (vm_offset_t)addr, size);
423f346986bSAlan Cox 	malloc_type_freed(type, round_page(size));
424f346986bSAlan Cox }
425f346986bSAlan Cox 
426f346986bSAlan Cox /*
4271c7c3c6aSMatthew Dillon  *	malloc:
4281c7c3c6aSMatthew Dillon  *
4291c7c3c6aSMatthew Dillon  *	Allocate a block of memory.
4301c7c3c6aSMatthew Dillon  *
4311c7c3c6aSMatthew Dillon  *	If M_NOWAIT is set, this routine will not block and return NULL if
4321c7c3c6aSMatthew Dillon  *	the allocation fails.
433df8bae1dSRodney W. Grimes  */
434df8bae1dSRodney W. Grimes void *
43563a7e0a3SRobert Watson malloc(unsigned long size, struct malloc_type *mtp, int flags)
436df8bae1dSRodney W. Grimes {
4376f267175SJeff Roberson 	int indx;
438d7854da1SMatthew D Fleming 	struct malloc_type_internal *mtip;
4398355f576SJeff Roberson 	caddr_t va;
4408355f576SJeff Roberson 	uma_zone_t zone;
441847a2a17SPawel Jakub Dawidek #if defined(DIAGNOSTIC) || defined(DEBUG_REDZONE)
4424db4f5c8SPoul-Henning Kamp 	unsigned long osize = size;
4434db4f5c8SPoul-Henning Kamp #endif
444df8bae1dSRodney W. Grimes 
445194a0abfSPoul-Henning Kamp #ifdef INVARIANTS
446bb1c7df8SRobert Watson 	KASSERT(mtp->ks_magic == M_MAGIC, ("malloc: bad malloc type magic"));
447d3c11994SPoul-Henning Kamp 	/*
44823198357SRuslan Ermilov 	 * Check that exactly one of M_WAITOK or M_NOWAIT is specified.
449d3c11994SPoul-Henning Kamp 	 */
45023198357SRuslan Ermilov 	indx = flags & (M_WAITOK | M_NOWAIT);
451d3c11994SPoul-Henning Kamp 	if (indx != M_NOWAIT && indx != M_WAITOK) {
452d3c11994SPoul-Henning Kamp 		static	struct timeval lasterr;
453d3c11994SPoul-Henning Kamp 		static	int curerr, once;
454d3c11994SPoul-Henning Kamp 		if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) {
455d3c11994SPoul-Henning Kamp 			printf("Bad malloc flags: %x\n", indx);
4562d50560aSMarcel Moolenaar 			kdb_backtrace();
457d3c11994SPoul-Henning Kamp 			flags |= M_WAITOK;
458d3c11994SPoul-Henning Kamp 			once++;
459d3c11994SPoul-Henning Kamp 		}
460d3c11994SPoul-Henning Kamp 	}
461194a0abfSPoul-Henning Kamp #endif
462eae870cdSRobert Watson #ifdef MALLOC_MAKE_FAILURES
463eae870cdSRobert Watson 	if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) {
464eae870cdSRobert Watson 		atomic_add_int(&malloc_nowait_count, 1);
465eae870cdSRobert Watson 		if ((malloc_nowait_count % malloc_failure_rate) == 0) {
466eae870cdSRobert Watson 			atomic_add_int(&malloc_failure_count, 1);
4673f6ee876SPoul-Henning Kamp 			t_malloc_fail = time_uptime;
468eae870cdSRobert Watson 			return (NULL);
469eae870cdSRobert Watson 		}
470eae870cdSRobert Watson 	}
471eae870cdSRobert Watson #endif
472d3c11994SPoul-Henning Kamp 	if (flags & M_WAITOK)
473b40ce416SJulian Elischer 		KASSERT(curthread->td_intr_nesting_level == 0,
474a163d034SWarner Losh 		   ("malloc(M_WAITOK) in interrupt context"));
475d9e2e68dSMark Johnston 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
4761067a2baSJonathan T. Looney 	    ("malloc: called with spinlock or critical section held"));
4771067a2baSJonathan T. Looney 
478e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD
4798d689e04SGleb Smirnoff 	if (memguard_cmp_mtp(mtp, size)) {
480e3813573SMatthew D Fleming 		va = memguard_alloc(size, flags);
481e3813573SMatthew D Fleming 		if (va != NULL)
482e3813573SMatthew D Fleming 			return (va);
483e3813573SMatthew D Fleming 		/* This is unfortunate but should not be fatal. */
484e3813573SMatthew D Fleming 	}
485e4eb384bSBosko Milekic #endif
486e4eb384bSBosko Milekic 
487847a2a17SPawel Jakub Dawidek #ifdef DEBUG_REDZONE
488847a2a17SPawel Jakub Dawidek 	size = redzone_size_ntor(size);
489847a2a17SPawel Jakub Dawidek #endif
490847a2a17SPawel Jakub Dawidek 
4917001d850SXin LI 	if (size <= kmem_zmax) {
492d7854da1SMatthew D Fleming 		mtip = mtp->ks_handle;
4936f267175SJeff Roberson 		if (size & KMEM_ZMASK)
4946f267175SJeff Roberson 			size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
4956f267175SJeff Roberson 		indx = kmemsize[size >> KMEM_ZSHIFT];
496d7854da1SMatthew D Fleming 		KASSERT(mtip->mti_zone < numzones,
497d7854da1SMatthew D Fleming 		    ("mti_zone %u out of range %d",
498d7854da1SMatthew D Fleming 		    mtip->mti_zone, numzones));
499d7854da1SMatthew D Fleming 		zone = kmemzones[indx].kz_zone[mtip->mti_zone];
5006f267175SJeff Roberson #ifdef MALLOC_PROFILE
5016f267175SJeff Roberson 		krequests[size >> KMEM_ZSHIFT]++;
5026f267175SJeff Roberson #endif
5038355f576SJeff Roberson 		va = uma_zalloc(zone, flags);
5044362fadaSBrian Feldman 		if (va != NULL)
505e20a199fSJeff Roberson 			size = zone->uz_size;
50663a7e0a3SRobert Watson 		malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
5078355f576SJeff Roberson 	} else {
5086f267175SJeff Roberson 		size = roundup(size, PAGE_SIZE);
5098355f576SJeff Roberson 		zone = NULL;
5108355f576SJeff Roberson 		va = uma_large_malloc(size, flags);
51163a7e0a3SRobert Watson 		malloc_type_allocated(mtp, va == NULL ? 0 : size);
512df8bae1dSRodney W. Grimes 	}
5131282e9acSPoul-Henning Kamp 	if (flags & M_WAITOK)
514a163d034SWarner Losh 		KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL"));
5151282e9acSPoul-Henning Kamp 	else if (va == NULL)
5161fb14a47SPoul-Henning Kamp 		t_malloc_fail = time_uptime;
5174db4f5c8SPoul-Henning Kamp #ifdef DIAGNOSTIC
5181282e9acSPoul-Henning Kamp 	if (va != NULL && !(flags & M_ZERO)) {
5194db4f5c8SPoul-Henning Kamp 		memset(va, 0x70, osize);
5204db4f5c8SPoul-Henning Kamp 	}
5214db4f5c8SPoul-Henning Kamp #endif
522847a2a17SPawel Jakub Dawidek #ifdef DEBUG_REDZONE
523847a2a17SPawel Jakub Dawidek 	if (va != NULL)
524847a2a17SPawel Jakub Dawidek 		va = redzone_setup(va, osize);
525847a2a17SPawel Jakub Dawidek #endif
526df8bae1dSRodney W. Grimes 	return ((void *) va);
527df8bae1dSRodney W. Grimes }
528df8bae1dSRodney W. Grimes 
529df8bae1dSRodney W. Grimes /*
5301c7c3c6aSMatthew Dillon  *	free:
5311c7c3c6aSMatthew Dillon  *
532df8bae1dSRodney W. Grimes  *	Free a block of memory allocated by malloc.
5331c7c3c6aSMatthew Dillon  *
5341c7c3c6aSMatthew Dillon  *	This routine may not block.
535df8bae1dSRodney W. Grimes  */
536df8bae1dSRodney W. Grimes void
53763a7e0a3SRobert Watson free(void *addr, struct malloc_type *mtp)
538df8bae1dSRodney W. Grimes {
53999571dc3SJeff Roberson 	uma_slab_t slab;
54099571dc3SJeff Roberson 	u_long size;
541254c6cb3SPoul-Henning Kamp 
542bb1c7df8SRobert Watson 	KASSERT(mtp->ks_magic == M_MAGIC, ("free: bad malloc type magic"));
543d9e2e68dSMark Johnston 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
5441067a2baSJonathan T. Looney 	    ("free: called with spinlock or critical section held"));
5451067a2baSJonathan T. Looney 
54644a8ff31SArchie Cobbs 	/* free(NULL, ...) does nothing */
54744a8ff31SArchie Cobbs 	if (addr == NULL)
54844a8ff31SArchie Cobbs 		return;
54944a8ff31SArchie Cobbs 
550e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD
551e3813573SMatthew D Fleming 	if (is_memguard_addr(addr)) {
552e4eb384bSBosko Milekic 		memguard_free(addr);
553e4eb384bSBosko Milekic 		return;
554e4eb384bSBosko Milekic 	}
555e4eb384bSBosko Milekic #endif
556e4eb384bSBosko Milekic 
557847a2a17SPawel Jakub Dawidek #ifdef DEBUG_REDZONE
558847a2a17SPawel Jakub Dawidek 	redzone_check(addr);
559847a2a17SPawel Jakub Dawidek 	addr = redzone_addr_ntor(addr);
560847a2a17SPawel Jakub Dawidek #endif
561847a2a17SPawel Jakub Dawidek 
56299571dc3SJeff Roberson 	slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK));
5638355f576SJeff Roberson 
5648355f576SJeff Roberson 	if (slab == NULL)
5656f267175SJeff Roberson 		panic("free: address %p(%p) has not been allocated.\n",
56699571dc3SJeff Roberson 		    addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
56799571dc3SJeff Roberson 
5688355f576SJeff Roberson 	if (!(slab->us_flags & UMA_SLAB_MALLOC)) {
5698f70816cSJeff Roberson #ifdef INVARIANTS
57063a7e0a3SRobert Watson 		struct malloc_type **mtpp = addr;
5718f70816cSJeff Roberson #endif
572099a0e58SBosko Milekic 		size = slab->us_keg->uk_size;
5738f70816cSJeff Roberson #ifdef INVARIANTS
5748f70816cSJeff Roberson 		/*
5758f70816cSJeff Roberson 		 * Cache a pointer to the malloc_type that most recently freed
5768f70816cSJeff Roberson 		 * this memory here.  This way we know who is most likely to
5778f70816cSJeff Roberson 		 * have stepped on it later.
5788f70816cSJeff Roberson 		 *
5798f70816cSJeff Roberson 		 * This code assumes that size is a multiple of 8 bytes for
5808f70816cSJeff Roberson 		 * 64 bit machines
5818f70816cSJeff Roberson 		 */
58263a7e0a3SRobert Watson 		mtpp = (struct malloc_type **)
58363a7e0a3SRobert Watson 		    ((unsigned long)mtpp & ~UMA_ALIGN_PTR);
58463a7e0a3SRobert Watson 		mtpp += (size - sizeof(struct malloc_type *)) /
5858f70816cSJeff Roberson 		    sizeof(struct malloc_type *);
58663a7e0a3SRobert Watson 		*mtpp = mtp;
5878f70816cSJeff Roberson #endif
588099a0e58SBosko Milekic 		uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab);
58914bf02f8SJohn Dyson 	} else {
5908355f576SJeff Roberson 		size = slab->us_size;
5918355f576SJeff Roberson 		uma_large_free(slab);
59214bf02f8SJohn Dyson 	}
59363a7e0a3SRobert Watson 	malloc_type_freed(mtp, size);
594df8bae1dSRodney W. Grimes }
595df8bae1dSRodney W. Grimes 
596df8bae1dSRodney W. Grimes /*
59744a8ff31SArchie Cobbs  *	realloc: change the size of a memory block
59844a8ff31SArchie Cobbs  */
59944a8ff31SArchie Cobbs void *
60063a7e0a3SRobert Watson realloc(void *addr, unsigned long size, struct malloc_type *mtp, int flags)
60144a8ff31SArchie Cobbs {
6028355f576SJeff Roberson 	uma_slab_t slab;
60344a8ff31SArchie Cobbs 	unsigned long alloc;
60444a8ff31SArchie Cobbs 	void *newaddr;
60544a8ff31SArchie Cobbs 
606bb1c7df8SRobert Watson 	KASSERT(mtp->ks_magic == M_MAGIC,
607bb1c7df8SRobert Watson 	    ("realloc: bad malloc type magic"));
608d9e2e68dSMark Johnston 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
6091067a2baSJonathan T. Looney 	    ("realloc: called with spinlock or critical section held"));
6101067a2baSJonathan T. Looney 
61144a8ff31SArchie Cobbs 	/* realloc(NULL, ...) is equivalent to malloc(...) */
61244a8ff31SArchie Cobbs 	if (addr == NULL)
61363a7e0a3SRobert Watson 		return (malloc(size, mtp, flags));
61463a7e0a3SRobert Watson 
61563a7e0a3SRobert Watson 	/*
61663a7e0a3SRobert Watson 	 * XXX: Should report free of old memory and alloc of new memory to
61763a7e0a3SRobert Watson 	 * per-CPU stats.
61863a7e0a3SRobert Watson 	 */
61944a8ff31SArchie Cobbs 
620e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD
6216d3ed393SMatthew D Fleming 	if (is_memguard_addr(addr))
6226d3ed393SMatthew D Fleming 		return (memguard_realloc(addr, size, mtp, flags));
623e4eb384bSBosko Milekic #endif
624e4eb384bSBosko Milekic 
625847a2a17SPawel Jakub Dawidek #ifdef DEBUG_REDZONE
626847a2a17SPawel Jakub Dawidek 	slab = NULL;
627847a2a17SPawel Jakub Dawidek 	alloc = redzone_get_size(addr);
628847a2a17SPawel Jakub Dawidek #else
62999571dc3SJeff Roberson 	slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK));
6308355f576SJeff Roberson 
63144a8ff31SArchie Cobbs 	/* Sanity check */
6328355f576SJeff Roberson 	KASSERT(slab != NULL,
63344a8ff31SArchie Cobbs 	    ("realloc: address %p out of range", (void *)addr));
63444a8ff31SArchie Cobbs 
63544a8ff31SArchie Cobbs 	/* Get the size of the original block */
636619f2841SPawel Jakub Dawidek 	if (!(slab->us_flags & UMA_SLAB_MALLOC))
637099a0e58SBosko Milekic 		alloc = slab->us_keg->uk_size;
6388355f576SJeff Roberson 	else
6398355f576SJeff Roberson 		alloc = slab->us_size;
64044a8ff31SArchie Cobbs 
64144a8ff31SArchie Cobbs 	/* Reuse the original block if appropriate */
64244a8ff31SArchie Cobbs 	if (size <= alloc
64344a8ff31SArchie Cobbs 	    && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE))
64444a8ff31SArchie Cobbs 		return (addr);
645847a2a17SPawel Jakub Dawidek #endif /* !DEBUG_REDZONE */
64644a8ff31SArchie Cobbs 
64744a8ff31SArchie Cobbs 	/* Allocate a new, bigger (or smaller) block */
64863a7e0a3SRobert Watson 	if ((newaddr = malloc(size, mtp, flags)) == NULL)
64944a8ff31SArchie Cobbs 		return (NULL);
65044a8ff31SArchie Cobbs 
65144a8ff31SArchie Cobbs 	/* Copy over original contents */
65244a8ff31SArchie Cobbs 	bcopy(addr, newaddr, min(size, alloc));
65363a7e0a3SRobert Watson 	free(addr, mtp);
65444a8ff31SArchie Cobbs 	return (newaddr);
65544a8ff31SArchie Cobbs }
65644a8ff31SArchie Cobbs 
65744a8ff31SArchie Cobbs /*
65844a8ff31SArchie Cobbs  *	reallocf: same as realloc() but free memory on failure.
65944a8ff31SArchie Cobbs  */
66044a8ff31SArchie Cobbs void *
66163a7e0a3SRobert Watson reallocf(void *addr, unsigned long size, struct malloc_type *mtp, int flags)
66244a8ff31SArchie Cobbs {
66344a8ff31SArchie Cobbs 	void *mem;
66444a8ff31SArchie Cobbs 
66563a7e0a3SRobert Watson 	if ((mem = realloc(addr, size, mtp, flags)) == NULL)
66663a7e0a3SRobert Watson 		free(addr, mtp);
66744a8ff31SArchie Cobbs 	return (mem);
66844a8ff31SArchie Cobbs }
66944a8ff31SArchie Cobbs 
67044a8ff31SArchie Cobbs /*
67144ec2b63SKonstantin Belousov  * Wake the uma reclamation pagedaemon thread when we exhaust KVA.  It
67244ec2b63SKonstantin Belousov  * will call the lowmem handler and uma_reclaim() callbacks in a
67344ec2b63SKonstantin Belousov  * context that is safe.
674df8bae1dSRodney W. Grimes  */
6752b14f991SJulian Elischer static void
6765df87b21SJeff Roberson kmem_reclaim(vmem_t *vm, int flags)
677df8bae1dSRodney W. Grimes {
6788a58a9f6SJohn Dyson 
67944ec2b63SKonstantin Belousov 	uma_reclaim_wakeup();
6805df87b21SJeff Roberson 	pagedaemon_wakeup();
6815df87b21SJeff Roberson }
6825df87b21SJeff Roberson 
683f9d498adSDimitry Andric #ifndef __sparc64__
684c70af487SAlan Cox CTASSERT(VM_KMEM_SIZE_SCALE >= 1);
685f9d498adSDimitry Andric #endif
686c70af487SAlan Cox 
6875df87b21SJeff Roberson /*
688c70af487SAlan Cox  * Initialize the kernel memory (kmem) arena.
6895df87b21SJeff Roberson  */
6905df87b21SJeff Roberson void
6915df87b21SJeff Roberson kmeminit(void)
6925df87b21SJeff Roberson {
693af3b2549SHans Petter Selasky 	u_long mem_size;
694af3b2549SHans Petter Selasky 	u_long tmp;
69569ef67f9SJason Evans 
696af3b2549SHans Petter Selasky #ifdef VM_KMEM_SIZE
697af3b2549SHans Petter Selasky 	if (vm_kmem_size == 0)
698af3b2549SHans Petter Selasky 		vm_kmem_size = VM_KMEM_SIZE;
699af3b2549SHans Petter Selasky #endif
700af3b2549SHans Petter Selasky #ifdef VM_KMEM_SIZE_MIN
701af3b2549SHans Petter Selasky 	if (vm_kmem_size_min == 0)
702af3b2549SHans Petter Selasky 		vm_kmem_size_min = VM_KMEM_SIZE_MIN;
703af3b2549SHans Petter Selasky #endif
704af3b2549SHans Petter Selasky #ifdef VM_KMEM_SIZE_MAX
705af3b2549SHans Petter Selasky 	if (vm_kmem_size_max == 0)
706af3b2549SHans Petter Selasky 		vm_kmem_size_max = VM_KMEM_SIZE_MAX;
707af3b2549SHans Petter Selasky #endif
7088a58a9f6SJohn Dyson 	/*
709c70af487SAlan Cox 	 * Calculate the amount of kernel virtual address (KVA) space that is
710c70af487SAlan Cox 	 * preallocated to the kmem arena.  In order to support a wide range
711c70af487SAlan Cox 	 * of machines, it is a function of the physical memory size,
712c70af487SAlan Cox 	 * specifically,
7138a58a9f6SJohn Dyson 	 *
714c70af487SAlan Cox 	 *	min(max(physical memory size / VM_KMEM_SIZE_SCALE,
715c70af487SAlan Cox 	 *	    VM_KMEM_SIZE_MIN), VM_KMEM_SIZE_MAX)
716c70af487SAlan Cox 	 *
717c70af487SAlan Cox 	 * Every architecture must define an integral value for
718c70af487SAlan Cox 	 * VM_KMEM_SIZE_SCALE.  However, the definitions of VM_KMEM_SIZE_MIN
719c70af487SAlan Cox 	 * and VM_KMEM_SIZE_MAX, which represent respectively the floor and
720c70af487SAlan Cox 	 * ceiling on this preallocation, are optional.  Typically,
721c70af487SAlan Cox 	 * VM_KMEM_SIZE_MAX is itself a function of the available KVA space on
722c70af487SAlan Cox 	 * a given architecture.
7238a58a9f6SJohn Dyson 	 */
72444f1c916SBryan Drewery 	mem_size = vm_cnt.v_page_count;
7257c51714eSSean Bruno 	if (mem_size <= 32768) /* delphij XXX 128MB */
7267c51714eSSean Bruno 		kmem_zmax = PAGE_SIZE;
7278a58a9f6SJohn Dyson 
728c70af487SAlan Cox 	if (vm_kmem_size_scale < 1)
729c70af487SAlan Cox 		vm_kmem_size_scale = VM_KMEM_SIZE_SCALE;
730c70af487SAlan Cox 
731af3b2549SHans Petter Selasky 	/*
732af3b2549SHans Petter Selasky 	 * Check if we should use defaults for the "vm_kmem_size"
733af3b2549SHans Petter Selasky 	 * variable:
734af3b2549SHans Petter Selasky 	 */
735af3b2549SHans Petter Selasky 	if (vm_kmem_size == 0) {
736479439b4SDag-Erling Smørgrav 		vm_kmem_size = (mem_size / vm_kmem_size_scale) * PAGE_SIZE;
7378a58a9f6SJohn Dyson 
738c70af487SAlan Cox 		if (vm_kmem_size_min > 0 && vm_kmem_size < vm_kmem_size_min)
7390e5179e4SStephane E. Potvin 			vm_kmem_size = vm_kmem_size_min;
740479439b4SDag-Erling Smørgrav 		if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max)
741479439b4SDag-Erling Smørgrav 			vm_kmem_size = vm_kmem_size_max;
742af3b2549SHans Petter Selasky 	}
7438a58a9f6SJohn Dyson 
74427b8623fSDavid Greenman 	/*
745af3b2549SHans Petter Selasky 	 * The amount of KVA space that is preallocated to the
746c70af487SAlan Cox 	 * kmem arena can be set statically at compile-time or manually
747c70af487SAlan Cox 	 * through the kernel environment.  However, it is still limited to
748c70af487SAlan Cox 	 * twice the physical memory size, which has been sufficient to handle
749c70af487SAlan Cox 	 * the most severe cases of external fragmentation in the kmem arena.
75027b8623fSDavid Greenman 	 */
751c749c003SAlan Cox 	if (vm_kmem_size / 2 / PAGE_SIZE > mem_size)
752c749c003SAlan Cox 		vm_kmem_size = 2 * mem_size * PAGE_SIZE;
7538a58a9f6SJohn Dyson 
754e137643eSOlivier Houchard 	vm_kmem_size = round_page(vm_kmem_size);
755e3813573SMatthew D Fleming #ifdef DEBUG_MEMGUARD
756f806cdcfSMatthew D Fleming 	tmp = memguard_fudge(vm_kmem_size, kernel_map);
757e3813573SMatthew D Fleming #else
758e3813573SMatthew D Fleming 	tmp = vm_kmem_size;
759e3813573SMatthew D Fleming #endif
7605df87b21SJeff Roberson 	vmem_init(kmem_arena, "kmem arena", kva_alloc(tmp), tmp, PAGE_SIZE,
76199de9af2SJeff Roberson 	    0, 0);
7625df87b21SJeff Roberson 	vmem_set_reclaim(kmem_arena, kmem_reclaim);
7638355f576SJeff Roberson 
764e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD
765e4eb384bSBosko Milekic 	/*
766e4eb384bSBosko Milekic 	 * Initialize MemGuard if support compiled in.  MemGuard is a
767e4eb384bSBosko Milekic 	 * replacement allocator used for detecting tamper-after-free
768e4eb384bSBosko Milekic 	 * scenarios as they occur.  It is only used for debugging.
769e4eb384bSBosko Milekic 	 */
7705df87b21SJeff Roberson 	memguard_init(kmem_arena);
771e4eb384bSBosko Milekic #endif
7725df87b21SJeff Roberson }
7735df87b21SJeff Roberson 
7745df87b21SJeff Roberson /*
7755df87b21SJeff Roberson  * Initialize the kernel memory allocator
7765df87b21SJeff Roberson  */
7775df87b21SJeff Roberson /* ARGSUSED*/
7785df87b21SJeff Roberson static void
7795df87b21SJeff Roberson mallocinit(void *dummy)
7805df87b21SJeff Roberson {
7815df87b21SJeff Roberson 	int i;
7825df87b21SJeff Roberson 	uint8_t indx;
7835df87b21SJeff Roberson 
7845df87b21SJeff Roberson 	mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
7855df87b21SJeff Roberson 
7865df87b21SJeff Roberson 	kmeminit();
787e4eb384bSBosko Milekic 
78899571dc3SJeff Roberson 	uma_startup2();
7898355f576SJeff Roberson 
7907001d850SXin LI 	if (kmem_zmax < PAGE_SIZE || kmem_zmax > KMEM_ZMAX)
7917001d850SXin LI 		kmem_zmax = KMEM_ZMAX;
7927001d850SXin LI 
79363a7e0a3SRobert Watson 	mt_zone = uma_zcreate("mt_zone", sizeof(struct malloc_type_internal),
79463a7e0a3SRobert Watson #ifdef INVARIANTS
79563a7e0a3SRobert Watson 	    mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
79663a7e0a3SRobert Watson #else
79763a7e0a3SRobert Watson 	    NULL, NULL, NULL, NULL,
79863a7e0a3SRobert Watson #endif
79963a7e0a3SRobert Watson 	    UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
8006f267175SJeff Roberson 	for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
8016f267175SJeff Roberson 		int size = kmemzones[indx].kz_size;
8026f267175SJeff Roberson 		char *name = kmemzones[indx].kz_name;
803d7854da1SMatthew D Fleming 		int subzone;
8048355f576SJeff Roberson 
805d7854da1SMatthew D Fleming 		for (subzone = 0; subzone < numzones; subzone++) {
806d7854da1SMatthew D Fleming 			kmemzones[indx].kz_zone[subzone] =
807d7854da1SMatthew D Fleming 			    uma_zcreate(name, size,
8088efc4effSJeff Roberson #ifdef INVARIANTS
8098f70816cSJeff Roberson 			    mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
8108efc4effSJeff Roberson #else
8118efc4effSJeff Roberson 			    NULL, NULL, NULL, NULL,
8128efc4effSJeff Roberson #endif
8138efc4effSJeff Roberson 			    UMA_ALIGN_PTR, UMA_ZONE_MALLOC);
814d7854da1SMatthew D Fleming 		}
8158355f576SJeff Roberson 		for (;i <= size; i+= KMEM_ZBASE)
8166f267175SJeff Roberson 			kmemsize[i >> KMEM_ZSHIFT] = indx;
8178355f576SJeff Roberson 
818df8bae1dSRodney W. Grimes 	}
819254c6cb3SPoul-Henning Kamp }
820af3b2549SHans Petter Selasky SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_SECOND, mallocinit, NULL);
821254c6cb3SPoul-Henning Kamp 
822db669378SPeter Wemm void
82387efd4d5SRobert Watson malloc_init(void *data)
824254c6cb3SPoul-Henning Kamp {
82563a7e0a3SRobert Watson 	struct malloc_type_internal *mtip;
82663a7e0a3SRobert Watson 	struct malloc_type *mtp;
82763a7e0a3SRobert Watson 
82844f1c916SBryan Drewery 	KASSERT(vm_cnt.v_page_count != 0, ("malloc_register before vm_init"));
82963a7e0a3SRobert Watson 
83063a7e0a3SRobert Watson 	mtp = data;
831f121baaaSBrian Somers 	if (mtp->ks_magic != M_MAGIC)
832f121baaaSBrian Somers 		panic("malloc_init: bad malloc type magic");
833bb1c7df8SRobert Watson 
83463a7e0a3SRobert Watson 	mtip = uma_zalloc(mt_zone, M_WAITOK | M_ZERO);
83563a7e0a3SRobert Watson 	mtp->ks_handle = mtip;
836d7854da1SMatthew D Fleming 	mtip->mti_zone = mtp_get_subzone(mtp->ks_shortdesc);
837254c6cb3SPoul-Henning Kamp 
8386f267175SJeff Roberson 	mtx_lock(&malloc_mtx);
83963a7e0a3SRobert Watson 	mtp->ks_next = kmemstatistics;
84063a7e0a3SRobert Watson 	kmemstatistics = mtp;
841cd814b26SRobert Watson 	kmemcount++;
8426f267175SJeff Roberson 	mtx_unlock(&malloc_mtx);
843df8bae1dSRodney W. Grimes }
844db669378SPeter Wemm 
845db669378SPeter Wemm void
84687efd4d5SRobert Watson malloc_uninit(void *data)
847db669378SPeter Wemm {
84863a7e0a3SRobert Watson 	struct malloc_type_internal *mtip;
8492a143d5bSPawel Jakub Dawidek 	struct malloc_type_stats *mtsp;
85063a7e0a3SRobert Watson 	struct malloc_type *mtp, *temp;
85145d48bdaSPaul Saab 	uma_slab_t slab;
8522a143d5bSPawel Jakub Dawidek 	long temp_allocs, temp_bytes;
8532a143d5bSPawel Jakub Dawidek 	int i;
854db669378SPeter Wemm 
85563a7e0a3SRobert Watson 	mtp = data;
856bb1c7df8SRobert Watson 	KASSERT(mtp->ks_magic == M_MAGIC,
857bb1c7df8SRobert Watson 	    ("malloc_uninit: bad malloc type magic"));
85863a7e0a3SRobert Watson 	KASSERT(mtp->ks_handle != NULL, ("malloc_deregister: cookie NULL"));
859bb1c7df8SRobert Watson 
8606f267175SJeff Roberson 	mtx_lock(&malloc_mtx);
86163a7e0a3SRobert Watson 	mtip = mtp->ks_handle;
86263a7e0a3SRobert Watson 	mtp->ks_handle = NULL;
86363a7e0a3SRobert Watson 	if (mtp != kmemstatistics) {
86463a7e0a3SRobert Watson 		for (temp = kmemstatistics; temp != NULL;
86563a7e0a3SRobert Watson 		    temp = temp->ks_next) {
866f121baaaSBrian Somers 			if (temp->ks_next == mtp) {
86763a7e0a3SRobert Watson 				temp->ks_next = mtp->ks_next;
868f121baaaSBrian Somers 				break;
869db669378SPeter Wemm 			}
870f121baaaSBrian Somers 		}
871f121baaaSBrian Somers 		KASSERT(temp,
872f121baaaSBrian Somers 		    ("malloc_uninit: type '%s' not found", mtp->ks_shortdesc));
87363a7e0a3SRobert Watson 	} else
87463a7e0a3SRobert Watson 		kmemstatistics = mtp->ks_next;
875cd814b26SRobert Watson 	kmemcount--;
8766f267175SJeff Roberson 	mtx_unlock(&malloc_mtx);
8772a143d5bSPawel Jakub Dawidek 
8782a143d5bSPawel Jakub Dawidek 	/*
8792a143d5bSPawel Jakub Dawidek 	 * Look for memory leaks.
8802a143d5bSPawel Jakub Dawidek 	 */
8812a143d5bSPawel Jakub Dawidek 	temp_allocs = temp_bytes = 0;
8822a143d5bSPawel Jakub Dawidek 	for (i = 0; i < MAXCPU; i++) {
8832a143d5bSPawel Jakub Dawidek 		mtsp = &mtip->mti_stats[i];
8842a143d5bSPawel Jakub Dawidek 		temp_allocs += mtsp->mts_numallocs;
8852a143d5bSPawel Jakub Dawidek 		temp_allocs -= mtsp->mts_numfrees;
8862a143d5bSPawel Jakub Dawidek 		temp_bytes += mtsp->mts_memalloced;
8872a143d5bSPawel Jakub Dawidek 		temp_bytes -= mtsp->mts_memfreed;
8882a143d5bSPawel Jakub Dawidek 	}
8892a143d5bSPawel Jakub Dawidek 	if (temp_allocs > 0 || temp_bytes > 0) {
8902a143d5bSPawel Jakub Dawidek 		printf("Warning: memory type %s leaked memory on destroy "
8912a143d5bSPawel Jakub Dawidek 		    "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc,
8922a143d5bSPawel Jakub Dawidek 		    temp_allocs, temp_bytes);
8932a143d5bSPawel Jakub Dawidek 	}
8942a143d5bSPawel Jakub Dawidek 
89545d48bdaSPaul Saab 	slab = vtoslab((vm_offset_t) mtip & (~UMA_SLAB_MASK));
89645d48bdaSPaul Saab 	uma_zfree_arg(mt_zone, mtip, slab);
897db669378SPeter Wemm }
8986f267175SJeff Roberson 
899d362c40dSPawel Jakub Dawidek struct malloc_type *
900d362c40dSPawel Jakub Dawidek malloc_desc2type(const char *desc)
901d362c40dSPawel Jakub Dawidek {
902d362c40dSPawel Jakub Dawidek 	struct malloc_type *mtp;
903d362c40dSPawel Jakub Dawidek 
904d362c40dSPawel Jakub Dawidek 	mtx_assert(&malloc_mtx, MA_OWNED);
905d362c40dSPawel Jakub Dawidek 	for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
906d362c40dSPawel Jakub Dawidek 		if (strcmp(mtp->ks_shortdesc, desc) == 0)
907d362c40dSPawel Jakub Dawidek 			return (mtp);
908d362c40dSPawel Jakub Dawidek 	}
909d362c40dSPawel Jakub Dawidek 	return (NULL);
910d362c40dSPawel Jakub Dawidek }
911d362c40dSPawel Jakub Dawidek 
9126f267175SJeff Roberson static int
913cd814b26SRobert Watson sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS)
914cd814b26SRobert Watson {
915cd814b26SRobert Watson 	struct malloc_type_stream_header mtsh;
916cd814b26SRobert Watson 	struct malloc_type_internal *mtip;
917cd814b26SRobert Watson 	struct malloc_type_header mth;
918cd814b26SRobert Watson 	struct malloc_type *mtp;
9194e657159SMatthew D Fleming 	int error, i;
920cd814b26SRobert Watson 	struct sbuf sbuf;
921cd814b26SRobert Watson 
92200f0e671SMatthew D Fleming 	error = sysctl_wire_old_buffer(req, 0);
92300f0e671SMatthew D Fleming 	if (error != 0)
92400f0e671SMatthew D Fleming 		return (error);
9254e657159SMatthew D Fleming 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
9261eafc078SIan Lepore 	sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
927cd814b26SRobert Watson 	mtx_lock(&malloc_mtx);
928cd814b26SRobert Watson 
929cd814b26SRobert Watson 	/*
930cd814b26SRobert Watson 	 * Insert stream header.
931cd814b26SRobert Watson 	 */
932cd814b26SRobert Watson 	bzero(&mtsh, sizeof(mtsh));
933cd814b26SRobert Watson 	mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION;
934cd814b26SRobert Watson 	mtsh.mtsh_maxcpus = MAXCPU;
935cd814b26SRobert Watson 	mtsh.mtsh_count = kmemcount;
9364e657159SMatthew D Fleming 	(void)sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh));
937cd814b26SRobert Watson 
938cd814b26SRobert Watson 	/*
939cd814b26SRobert Watson 	 * Insert alternating sequence of type headers and type statistics.
940cd814b26SRobert Watson 	 */
941cd814b26SRobert Watson 	for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
942cd814b26SRobert Watson 		mtip = (struct malloc_type_internal *)mtp->ks_handle;
943cd814b26SRobert Watson 
944cd814b26SRobert Watson 		/*
945cd814b26SRobert Watson 		 * Insert type header.
946cd814b26SRobert Watson 		 */
947cd814b26SRobert Watson 		bzero(&mth, sizeof(mth));
948cd814b26SRobert Watson 		strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME);
9494e657159SMatthew D Fleming 		(void)sbuf_bcat(&sbuf, &mth, sizeof(mth));
950cd814b26SRobert Watson 
951cd814b26SRobert Watson 		/*
952cd814b26SRobert Watson 		 * Insert type statistics for each CPU.
953cd814b26SRobert Watson 		 */
954cd814b26SRobert Watson 		for (i = 0; i < MAXCPU; i++) {
9554e657159SMatthew D Fleming 			(void)sbuf_bcat(&sbuf, &mtip->mti_stats[i],
9564e657159SMatthew D Fleming 			    sizeof(mtip->mti_stats[i]));
957cd814b26SRobert Watson 		}
958cd814b26SRobert Watson 	}
959cd814b26SRobert Watson 	mtx_unlock(&malloc_mtx);
9604e657159SMatthew D Fleming 	error = sbuf_finish(&sbuf);
961cd814b26SRobert Watson 	sbuf_delete(&sbuf);
962cd814b26SRobert Watson 	return (error);
963cd814b26SRobert Watson }
964cd814b26SRobert Watson 
965cd814b26SRobert Watson SYSCTL_PROC(_kern, OID_AUTO, malloc_stats, CTLFLAG_RD|CTLTYPE_STRUCT,
966cd814b26SRobert Watson     0, 0, sysctl_kern_malloc_stats, "s,malloc_type_ustats",
967cd814b26SRobert Watson     "Return malloc types");
968cd814b26SRobert Watson 
969cd814b26SRobert Watson SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0,
970cd814b26SRobert Watson     "Count of kernel malloc types");
971cd814b26SRobert Watson 
97291dd776cSJohn Birrell void
97391dd776cSJohn Birrell malloc_type_list(malloc_type_list_func_t *func, void *arg)
97491dd776cSJohn Birrell {
97591dd776cSJohn Birrell 	struct malloc_type *mtp, **bufmtp;
97691dd776cSJohn Birrell 	int count, i;
97791dd776cSJohn Birrell 	size_t buflen;
97891dd776cSJohn Birrell 
97991dd776cSJohn Birrell 	mtx_lock(&malloc_mtx);
98091dd776cSJohn Birrell restart:
98191dd776cSJohn Birrell 	mtx_assert(&malloc_mtx, MA_OWNED);
98291dd776cSJohn Birrell 	count = kmemcount;
98391dd776cSJohn Birrell 	mtx_unlock(&malloc_mtx);
98491dd776cSJohn Birrell 
98591dd776cSJohn Birrell 	buflen = sizeof(struct malloc_type *) * count;
98691dd776cSJohn Birrell 	bufmtp = malloc(buflen, M_TEMP, M_WAITOK);
98791dd776cSJohn Birrell 
98891dd776cSJohn Birrell 	mtx_lock(&malloc_mtx);
98991dd776cSJohn Birrell 
99091dd776cSJohn Birrell 	if (count < kmemcount) {
99191dd776cSJohn Birrell 		free(bufmtp, M_TEMP);
99291dd776cSJohn Birrell 		goto restart;
99391dd776cSJohn Birrell 	}
99491dd776cSJohn Birrell 
99591dd776cSJohn Birrell 	for (mtp = kmemstatistics, i = 0; mtp != NULL; mtp = mtp->ks_next, i++)
99691dd776cSJohn Birrell 		bufmtp[i] = mtp;
99791dd776cSJohn Birrell 
99891dd776cSJohn Birrell 	mtx_unlock(&malloc_mtx);
99991dd776cSJohn Birrell 
100091dd776cSJohn Birrell 	for (i = 0; i < count; i++)
100191dd776cSJohn Birrell 		(func)(bufmtp[i], arg);
100291dd776cSJohn Birrell 
100391dd776cSJohn Birrell 	free(bufmtp, M_TEMP);
100491dd776cSJohn Birrell }
100591dd776cSJohn Birrell 
1006909ed16cSRobert Watson #ifdef DDB
1007909ed16cSRobert Watson DB_SHOW_COMMAND(malloc, db_show_malloc)
1008909ed16cSRobert Watson {
1009909ed16cSRobert Watson 	struct malloc_type_internal *mtip;
1010909ed16cSRobert Watson 	struct malloc_type *mtp;
101160ae52f7SEd Schouten 	uint64_t allocs, frees;
101260ae52f7SEd Schouten 	uint64_t alloced, freed;
1013909ed16cSRobert Watson 	int i;
1014909ed16cSRobert Watson 
101524076d13SRobert Watson 	db_printf("%18s %12s  %12s %12s\n", "Type", "InUse", "MemUse",
101624076d13SRobert Watson 	    "Requests");
1017909ed16cSRobert Watson 	for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1018909ed16cSRobert Watson 		mtip = (struct malloc_type_internal *)mtp->ks_handle;
1019909ed16cSRobert Watson 		allocs = 0;
1020909ed16cSRobert Watson 		frees = 0;
102124076d13SRobert Watson 		alloced = 0;
102224076d13SRobert Watson 		freed = 0;
1023909ed16cSRobert Watson 		for (i = 0; i < MAXCPU; i++) {
1024909ed16cSRobert Watson 			allocs += mtip->mti_stats[i].mts_numallocs;
1025909ed16cSRobert Watson 			frees += mtip->mti_stats[i].mts_numfrees;
102624076d13SRobert Watson 			alloced += mtip->mti_stats[i].mts_memalloced;
102724076d13SRobert Watson 			freed += mtip->mti_stats[i].mts_memfreed;
1028909ed16cSRobert Watson 		}
102924076d13SRobert Watson 		db_printf("%18s %12ju %12juK %12ju\n",
103024076d13SRobert Watson 		    mtp->ks_shortdesc, allocs - frees,
103124076d13SRobert Watson 		    (alloced - freed + 1023) / 1024, allocs);
1032687c94aaSJohn Baldwin 		if (db_pager_quit)
1033687c94aaSJohn Baldwin 			break;
1034909ed16cSRobert Watson 	}
1035909ed16cSRobert Watson }
1036d7854da1SMatthew D Fleming 
1037d7854da1SMatthew D Fleming #if MALLOC_DEBUG_MAXZONES > 1
1038d7854da1SMatthew D Fleming DB_SHOW_COMMAND(multizone_matches, db_show_multizone_matches)
1039d7854da1SMatthew D Fleming {
1040d7854da1SMatthew D Fleming 	struct malloc_type_internal *mtip;
1041d7854da1SMatthew D Fleming 	struct malloc_type *mtp;
1042d7854da1SMatthew D Fleming 	u_int subzone;
1043d7854da1SMatthew D Fleming 
1044d7854da1SMatthew D Fleming 	if (!have_addr) {
1045d7854da1SMatthew D Fleming 		db_printf("Usage: show multizone_matches <malloc type/addr>\n");
1046d7854da1SMatthew D Fleming 		return;
1047d7854da1SMatthew D Fleming 	}
1048d7854da1SMatthew D Fleming 	mtp = (void *)addr;
1049d7854da1SMatthew D Fleming 	if (mtp->ks_magic != M_MAGIC) {
1050d7854da1SMatthew D Fleming 		db_printf("Magic %lx does not match expected %x\n",
1051d7854da1SMatthew D Fleming 		    mtp->ks_magic, M_MAGIC);
1052d7854da1SMatthew D Fleming 		return;
1053d7854da1SMatthew D Fleming 	}
1054d7854da1SMatthew D Fleming 
1055d7854da1SMatthew D Fleming 	mtip = mtp->ks_handle;
1056d7854da1SMatthew D Fleming 	subzone = mtip->mti_zone;
1057d7854da1SMatthew D Fleming 
1058d7854da1SMatthew D Fleming 	for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1059d7854da1SMatthew D Fleming 		mtip = mtp->ks_handle;
1060d7854da1SMatthew D Fleming 		if (mtip->mti_zone != subzone)
1061d7854da1SMatthew D Fleming 			continue;
1062d7854da1SMatthew D Fleming 		db_printf("%s\n", mtp->ks_shortdesc);
1063687c94aaSJohn Baldwin 		if (db_pager_quit)
1064687c94aaSJohn Baldwin 			break;
1065d7854da1SMatthew D Fleming 	}
1066d7854da1SMatthew D Fleming }
1067d7854da1SMatthew D Fleming #endif /* MALLOC_DEBUG_MAXZONES > 1 */
1068d7854da1SMatthew D Fleming #endif /* DDB */
1069909ed16cSRobert Watson 
10705e914b96SJeff Roberson #ifdef MALLOC_PROFILE
10715e914b96SJeff Roberson 
10725e914b96SJeff Roberson static int
10735e914b96SJeff Roberson sysctl_kern_mprof(SYSCTL_HANDLER_ARGS)
10745e914b96SJeff Roberson {
107563a7e0a3SRobert Watson 	struct sbuf sbuf;
10765e914b96SJeff Roberson 	uint64_t count;
10775e914b96SJeff Roberson 	uint64_t waste;
10785e914b96SJeff Roberson 	uint64_t mem;
10795e914b96SJeff Roberson 	int error;
10805e914b96SJeff Roberson 	int rsize;
10815e914b96SJeff Roberson 	int size;
10825e914b96SJeff Roberson 	int i;
10835e914b96SJeff Roberson 
10845e914b96SJeff Roberson 	waste = 0;
10855e914b96SJeff Roberson 	mem = 0;
10865e914b96SJeff Roberson 
108700f0e671SMatthew D Fleming 	error = sysctl_wire_old_buffer(req, 0);
108800f0e671SMatthew D Fleming 	if (error != 0)
108900f0e671SMatthew D Fleming 		return (error);
10904e657159SMatthew D Fleming 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
109163a7e0a3SRobert Watson 	sbuf_printf(&sbuf,
10925e914b96SJeff Roberson 	    "\n  Size                    Requests  Real Size\n");
10935e914b96SJeff Roberson 	for (i = 0; i < KMEM_ZSIZE; i++) {
10945e914b96SJeff Roberson 		size = i << KMEM_ZSHIFT;
10955e914b96SJeff Roberson 		rsize = kmemzones[kmemsize[i]].kz_size;
10965e914b96SJeff Roberson 		count = (long long unsigned)krequests[i];
10975e914b96SJeff Roberson 
109863a7e0a3SRobert Watson 		sbuf_printf(&sbuf, "%6d%28llu%11d\n", size,
109963a7e0a3SRobert Watson 		    (unsigned long long)count, rsize);
11005e914b96SJeff Roberson 
11015e914b96SJeff Roberson 		if ((rsize * count) > (size * count))
11025e914b96SJeff Roberson 			waste += (rsize * count) - (size * count);
11035e914b96SJeff Roberson 		mem += (rsize * count);
11045e914b96SJeff Roberson 	}
110563a7e0a3SRobert Watson 	sbuf_printf(&sbuf,
11065e914b96SJeff Roberson 	    "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n",
11075e914b96SJeff Roberson 	    (unsigned long long)mem, (unsigned long long)waste);
11084e657159SMatthew D Fleming 	error = sbuf_finish(&sbuf);
110963a7e0a3SRobert Watson 	sbuf_delete(&sbuf);
11105e914b96SJeff Roberson 	return (error);
11115e914b96SJeff Roberson }
11125e914b96SJeff Roberson 
11135e914b96SJeff Roberson SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD,
11145e914b96SJeff Roberson     NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling");
11155e914b96SJeff Roberson #endif /* MALLOC_PROFILE */
1116