xref: /freebsd/sys/kern/kern_malloc.c (revision c0df224ba5b1fe6257dd197f8298bf59fb6e520f)
19454b2d8SWarner Losh /*-
251369649SPedro F. Giffuni  * SPDX-License-Identifier: BSD-3-Clause
351369649SPedro F. Giffuni  *
4df8bae1dSRodney W. Grimes  * Copyright (c) 1987, 1991, 1993
563a7e0a3SRobert Watson  *	The Regents of the University of California.
6bb1c7df8SRobert Watson  * Copyright (c) 2005-2009 Robert N. M. Watson
7fd91e076SKristof Provost  * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net> (mallocarray)
863a7e0a3SRobert Watson  * All rights reserved.
9df8bae1dSRodney W. Grimes  *
10df8bae1dSRodney W. Grimes  * Redistribution and use in source and binary forms, with or without
11df8bae1dSRodney W. Grimes  * modification, are permitted provided that the following conditions
12df8bae1dSRodney W. Grimes  * are met:
13df8bae1dSRodney W. Grimes  * 1. Redistributions of source code must retain the above copyright
14df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer.
15df8bae1dSRodney W. Grimes  * 2. Redistributions in binary form must reproduce the above copyright
16df8bae1dSRodney W. Grimes  *    notice, this list of conditions and the following disclaimer in the
17df8bae1dSRodney W. Grimes  *    documentation and/or other materials provided with the distribution.
1869a28758SEd Maste  * 3. Neither the name of the University nor the names of its contributors
19df8bae1dSRodney W. Grimes  *    may be used to endorse or promote products derived from this software
20df8bae1dSRodney W. Grimes  *    without specific prior written permission.
21df8bae1dSRodney W. Grimes  *
22df8bae1dSRodney W. Grimes  * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23df8bae1dSRodney W. Grimes  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24df8bae1dSRodney W. Grimes  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25df8bae1dSRodney W. Grimes  * ARE DISCLAIMED.  IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26df8bae1dSRodney W. Grimes  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27df8bae1dSRodney W. Grimes  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28df8bae1dSRodney W. Grimes  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29df8bae1dSRodney W. Grimes  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30df8bae1dSRodney W. Grimes  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31df8bae1dSRodney W. Grimes  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32df8bae1dSRodney W. Grimes  * SUCH DAMAGE.
33df8bae1dSRodney W. Grimes  */
34df8bae1dSRodney W. Grimes 
350ce3f16dSRobert Watson /*
360ce3f16dSRobert Watson  * Kernel malloc(9) implementation -- general purpose kernel memory allocator
370ce3f16dSRobert Watson  * based on memory types.  Back end is implemented using the UMA(9) zone
380ce3f16dSRobert Watson  * allocator.  A set of fixed-size buckets are used for smaller allocations,
390ce3f16dSRobert Watson  * and a special UMA allocation interface is used for larger allocations.
400ce3f16dSRobert Watson  * Callers declare memory types, and statistics are maintained independently
410ce3f16dSRobert Watson  * for each memory type.  Statistics are maintained per-CPU for performance
420ce3f16dSRobert Watson  * reasons.  See malloc(9) and comments in malloc.h for a detailed
430ce3f16dSRobert Watson  * description.
440ce3f16dSRobert Watson  */
450ce3f16dSRobert Watson 
46677b542eSDavid E. O'Brien #include <sys/cdefs.h>
47909ed16cSRobert Watson #include "opt_ddb.h"
488a58a9f6SJohn Dyson #include "opt_vm.h"
498a58a9f6SJohn Dyson 
50df8bae1dSRodney W. Grimes #include <sys/param.h>
5126f9a767SRodney W. Grimes #include <sys/systm.h>
5206a53ecfSMark Johnston #include <sys/asan.h>
532d50560aSMarcel Moolenaar #include <sys/kdb.h>
54df8bae1dSRodney W. Grimes #include <sys/kernel.h>
55fb919e4dSMark Murray #include <sys/lock.h>
56df8bae1dSRodney W. Grimes #include <sys/malloc.h>
5710094910SMark Johnston #include <sys/msan.h>
58eec258d2SJohn Baldwin #include <sys/mutex.h>
59efeaf95aSDavid Greenman #include <sys/vmmeter.h>
60a448b62aSJake Burkholder #include <sys/proc.h>
616d6a03d7SJeff Roberson #include <sys/queue.h>
6263a7e0a3SRobert Watson #include <sys/sbuf.h>
639afff6b1SMateusz Guzik #include <sys/smp.h>
646f267175SJeff Roberson #include <sys/sysctl.h>
651fb14a47SPoul-Henning Kamp #include <sys/time.h>
665df87b21SJeff Roberson #include <sys/vmem.h>
674b25d1f2SGleb Smirnoff #ifdef EPOCH_TRACE
684b25d1f2SGleb Smirnoff #include <sys/epoch.h>
694b25d1f2SGleb Smirnoff #endif
709a02e8c6SJason Evans 
71df8bae1dSRodney W. Grimes #include <vm/vm.h>
7299571dc3SJeff Roberson #include <vm/pmap.h>
739978bd99SMark Johnston #include <vm/vm_domainset.h>
745df87b21SJeff Roberson #include <vm/vm_pageout.h>
75efeaf95aSDavid Greenman #include <vm/vm_param.h>
76df8bae1dSRodney W. Grimes #include <vm/vm_kern.h>
77efeaf95aSDavid Greenman #include <vm/vm_extern.h>
783075778bSJohn Dyson #include <vm/vm_map.h>
7999571dc3SJeff Roberson #include <vm/vm_page.h>
806d6a03d7SJeff Roberson #include <vm/vm_phys.h>
816d6a03d7SJeff Roberson #include <vm/vm_pagequeue.h>
828355f576SJeff Roberson #include <vm/uma.h>
838355f576SJeff Roberson #include <vm/uma_int.h>
848efc4effSJeff Roberson #include <vm/uma_dbg.h>
85df8bae1dSRodney W. Grimes 
86e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD
87e4eb384bSBosko Milekic #include <vm/memguard.h>
88e4eb384bSBosko Milekic #endif
89847a2a17SPawel Jakub Dawidek #ifdef DEBUG_REDZONE
90847a2a17SPawel Jakub Dawidek #include <vm/redzone.h>
91847a2a17SPawel Jakub Dawidek #endif
92e4eb384bSBosko Milekic 
93984982d6SPoul-Henning Kamp #if defined(INVARIANTS) && defined(__i386__)
94984982d6SPoul-Henning Kamp #include <machine/cpu.h>
95984982d6SPoul-Henning Kamp #endif
96984982d6SPoul-Henning Kamp 
97909ed16cSRobert Watson #include <ddb/ddb.h>
98909ed16cSRobert Watson 
9991dd776cSJohn Birrell #ifdef KDTRACE_HOOKS
10091dd776cSJohn Birrell #include <sys/dtrace_bsd.h>
10191dd776cSJohn Birrell 
1027cd79421SMateusz Guzik bool	__read_frequently			dtrace_malloc_enabled;
1037cd79421SMateusz Guzik dtrace_malloc_probe_func_t __read_mostly	dtrace_malloc_probe;
10491dd776cSJohn Birrell #endif
10591dd776cSJohn Birrell 
106ab3185d1SJeff Roberson #if defined(INVARIANTS) || defined(MALLOC_MAKE_FAILURES) ||		\
107ab3185d1SJeff Roberson     defined(DEBUG_MEMGUARD) || defined(DEBUG_REDZONE)
108ab3185d1SJeff Roberson #define	MALLOC_DEBUG	1
109ab3185d1SJeff Roberson #endif
110ab3185d1SJeff Roberson 
11106a53ecfSMark Johnston #if defined(KASAN) || defined(DEBUG_REDZONE)
11289deca0aSMateusz Guzik #define	DEBUG_REDZONE_ARG_DEF	, unsigned long osize
11389deca0aSMateusz Guzik #define	DEBUG_REDZONE_ARG	, osize
11489deca0aSMateusz Guzik #else
11589deca0aSMateusz Guzik #define	DEBUG_REDZONE_ARG_DEF
11689deca0aSMateusz Guzik #define	DEBUG_REDZONE_ARG
11789deca0aSMateusz Guzik #endif
11889deca0aSMateusz Guzik 
1199e6544ddSBjoern A. Zeeb typedef	enum {
1209e6544ddSBjoern A. Zeeb 	SLAB_COOKIE_SLAB_PTR		= 0x0,
1219e6544ddSBjoern A. Zeeb 	SLAB_COOKIE_MALLOC_LARGE	= 0x1,
1229e6544ddSBjoern A. Zeeb 	SLAB_COOKIE_CONTIG_MALLOC	= 0x2,
1239e6544ddSBjoern A. Zeeb } slab_cookie_t;
1249e6544ddSBjoern A. Zeeb #define	SLAB_COOKIE_MASK		0x3
1259e6544ddSBjoern A. Zeeb #define	SLAB_COOKIE_SHIFT		2
1269e6544ddSBjoern A. Zeeb #define	GET_SLAB_COOKIE(_slab)						\
1279e6544ddSBjoern A. Zeeb     ((slab_cookie_t)(uintptr_t)(_slab) & SLAB_COOKIE_MASK)
1289e6544ddSBjoern A. Zeeb 
12944a8ff31SArchie Cobbs /*
13044a8ff31SArchie Cobbs  * When realloc() is called, if the new size is sufficiently smaller than
13144a8ff31SArchie Cobbs  * the old size, realloc() will allocate a new, smaller block to avoid
13244a8ff31SArchie Cobbs  * wasting memory. 'Sufficiently smaller' is defined as: newsize <=
13344a8ff31SArchie Cobbs  * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'.
13444a8ff31SArchie Cobbs  */
13544a8ff31SArchie Cobbs #ifndef REALLOC_FRACTION
13644a8ff31SArchie Cobbs #define	REALLOC_FRACTION	1	/* new block if <= half the size */
13744a8ff31SArchie Cobbs #endif
13844a8ff31SArchie Cobbs 
1390ce3f16dSRobert Watson /*
1400ce3f16dSRobert Watson  * Centrally define some common malloc types.
1410ce3f16dSRobert Watson  */
1423b6fb885SPoul-Henning Kamp MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
1439ef246c6SBruce Evans MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
1449ef246c6SBruce Evans MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
1459ef246c6SBruce Evans 
146db669378SPeter Wemm static struct malloc_type *kmemstatistics;
147cd814b26SRobert Watson static int kmemcount;
1481f6889a1SMatthew Dillon 
1498355f576SJeff Roberson #define KMEM_ZSHIFT	4
1508355f576SJeff Roberson #define KMEM_ZBASE	16
1518355f576SJeff Roberson #define KMEM_ZMASK	(KMEM_ZBASE - 1)
1528355f576SJeff Roberson 
153bda06553SXin LI #define KMEM_ZMAX	65536
1548355f576SJeff Roberson #define KMEM_ZSIZE	(KMEM_ZMAX >> KMEM_ZSHIFT)
15560ae52f7SEd Schouten static uint8_t kmemsize[KMEM_ZSIZE + 1];
1566f267175SJeff Roberson 
157d7854da1SMatthew D Fleming #ifndef MALLOC_DEBUG_MAXZONES
158d7854da1SMatthew D Fleming #define	MALLOC_DEBUG_MAXZONES	1
159d7854da1SMatthew D Fleming #endif
160d7854da1SMatthew D Fleming static int numzones = MALLOC_DEBUG_MAXZONES;
161d7854da1SMatthew D Fleming 
1620ce3f16dSRobert Watson /*
1630ce3f16dSRobert Watson  * Small malloc(9) memory allocations are allocated from a set of UMA buckets
1640ce3f16dSRobert Watson  * of various sizes.
1650ce3f16dSRobert Watson  *
166828afddaSMateusz Guzik  * Warning: the layout of the struct is duplicated in libmemstat for KVM support.
167828afddaSMateusz Guzik  *
1680ce3f16dSRobert Watson  * XXX: The comment here used to read "These won't be powers of two for
1690ce3f16dSRobert Watson  * long."  It's possible that a significant amount of wasted memory could be
1700ce3f16dSRobert Watson  * recovered by tuning the sizes of these buckets.
1710ce3f16dSRobert Watson  */
1728355f576SJeff Roberson struct {
1736f267175SJeff Roberson 	int kz_size;
174eaa17d42SRyan Libby 	const char *kz_name;
175d7854da1SMatthew D Fleming 	uma_zone_t kz_zone[MALLOC_DEBUG_MAXZONES];
1766f267175SJeff Roberson } kmemzones[] = {
177e1b6a7f8SMateusz Guzik 	{16, "malloc-16", },
178e1b6a7f8SMateusz Guzik 	{32, "malloc-32", },
179e1b6a7f8SMateusz Guzik 	{64, "malloc-64", },
180e1b6a7f8SMateusz Guzik 	{128, "malloc-128", },
181e1b6a7f8SMateusz Guzik 	{256, "malloc-256", },
182f0c90a09SMateusz Guzik 	{384, "malloc-384", },
183e1b6a7f8SMateusz Guzik 	{512, "malloc-512", },
184e1b6a7f8SMateusz Guzik 	{1024, "malloc-1024", },
185e1b6a7f8SMateusz Guzik 	{2048, "malloc-2048", },
186e1b6a7f8SMateusz Guzik 	{4096, "malloc-4096", },
187e1b6a7f8SMateusz Guzik 	{8192, "malloc-8192", },
188e1b6a7f8SMateusz Guzik 	{16384, "malloc-16384", },
189e1b6a7f8SMateusz Guzik 	{32768, "malloc-32768", },
190e1b6a7f8SMateusz Guzik 	{65536, "malloc-65536", },
1918355f576SJeff Roberson 	{0, NULL},
1928355f576SJeff Roberson };
1938355f576SJeff Roberson 
194b89eaf4eSAlan Cox u_long vm_kmem_size;
195d801e824SAndriy Gapon SYSCTL_ULONG(_vm, OID_AUTO, kmem_size, CTLFLAG_RDTUN, &vm_kmem_size, 0,
19684344f9fSDag-Erling Smørgrav     "Size of kernel memory");
1975a34a9f0SJeff Roberson 
1987001d850SXin LI static u_long kmem_zmax = KMEM_ZMAX;
1997001d850SXin LI SYSCTL_ULONG(_vm, OID_AUTO, kmem_zmax, CTLFLAG_RDTUN, &kmem_zmax, 0,
2007001d850SXin LI     "Maximum allocation size that malloc(9) would use UMA as backend");
2017001d850SXin LI 
202b89eaf4eSAlan Cox static u_long vm_kmem_size_min;
203d801e824SAndriy Gapon SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_min, CTLFLAG_RDTUN, &vm_kmem_size_min, 0,
2040e5179e4SStephane E. Potvin     "Minimum size of kernel memory");
2050e5179e4SStephane E. Potvin 
206b89eaf4eSAlan Cox static u_long vm_kmem_size_max;
207d801e824SAndriy Gapon SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RDTUN, &vm_kmem_size_max, 0,
208479439b4SDag-Erling Smørgrav     "Maximum size of kernel memory");
209479439b4SDag-Erling Smørgrav 
2104813ad54SHans Petter Selasky static u_int vm_kmem_size_scale;
211d801e824SAndriy Gapon SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale, 0,
212479439b4SDag-Erling Smørgrav     "Scale factor for kernel memory size");
213479439b4SDag-Erling Smørgrav 
2147814c80aSAndriy Gapon static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS);
2157814c80aSAndriy Gapon SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size,
2167814c80aSAndriy Gapon     CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
2175df87b21SJeff Roberson     sysctl_kmem_map_size, "LU", "Current kmem allocation size");
2187814c80aSAndriy Gapon 
21995bb9d38SAndriy Gapon static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS);
22095bb9d38SAndriy Gapon SYSCTL_PROC(_vm, OID_AUTO, kmem_map_free,
22195bb9d38SAndriy Gapon     CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
2225df87b21SJeff Roberson     sysctl_kmem_map_free, "LU", "Free space in kmem");
22395bb9d38SAndriy Gapon 
224828afddaSMateusz Guzik static SYSCTL_NODE(_vm, OID_AUTO, malloc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
225828afddaSMateusz Guzik     "Malloc information");
226828afddaSMateusz Guzik 
227828afddaSMateusz Guzik static u_int vm_malloc_zone_count = nitems(kmemzones);
228828afddaSMateusz Guzik SYSCTL_UINT(_vm_malloc, OID_AUTO, zone_count,
229828afddaSMateusz Guzik     CTLFLAG_RD, &vm_malloc_zone_count, 0,
230828afddaSMateusz Guzik     "Number of malloc zones");
231828afddaSMateusz Guzik 
232828afddaSMateusz Guzik static int sysctl_vm_malloc_zone_sizes(SYSCTL_HANDLER_ARGS);
233828afddaSMateusz Guzik SYSCTL_PROC(_vm_malloc, OID_AUTO, zone_sizes,
234828afddaSMateusz Guzik     CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, NULL, 0,
235828afddaSMateusz Guzik     sysctl_vm_malloc_zone_sizes, "S", "Zone sizes used by malloc");
236828afddaSMateusz Guzik 
2375a34a9f0SJeff Roberson /*
23899571dc3SJeff Roberson  * The malloc_mtx protects the kmemstatistics linked list.
2395a34a9f0SJeff Roberson  */
2405a34a9f0SJeff Roberson struct mtx malloc_mtx;
24169ef67f9SJason Evans 
242cd814b26SRobert Watson static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS);
243df8bae1dSRodney W. Grimes 
244d7854da1SMatthew D Fleming #if defined(MALLOC_MAKE_FAILURES) || (MALLOC_DEBUG_MAXZONES > 1)
2457029da5cSPawel Biernacki static SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
246d7854da1SMatthew D Fleming     "Kernel malloc debugging options");
247d7854da1SMatthew D Fleming #endif
248d7854da1SMatthew D Fleming 
249eae870cdSRobert Watson /*
2500ce3f16dSRobert Watson  * malloc(9) fault injection -- cause malloc failures every (n) mallocs when
2510ce3f16dSRobert Watson  * the caller specifies M_NOWAIT.  If set to 0, no failures are caused.
252eae870cdSRobert Watson  */
2530ce3f16dSRobert Watson #ifdef MALLOC_MAKE_FAILURES
254eae870cdSRobert Watson static int malloc_failure_rate;
255eae870cdSRobert Watson static int malloc_nowait_count;
256eae870cdSRobert Watson static int malloc_failure_count;
257af3b2549SHans Petter Selasky SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RWTUN,
258eae870cdSRobert Watson     &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail");
259eae870cdSRobert Watson SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD,
260eae870cdSRobert Watson     &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures");
261eae870cdSRobert Watson #endif
262eae870cdSRobert Watson 
2637814c80aSAndriy Gapon static int
2647814c80aSAndriy Gapon sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS)
2657814c80aSAndriy Gapon {
2667814c80aSAndriy Gapon 	u_long size;
2677814c80aSAndriy Gapon 
2682e47807cSJeff Roberson 	size = uma_size();
2697814c80aSAndriy Gapon 	return (sysctl_handle_long(oidp, &size, 0, req));
2707814c80aSAndriy Gapon }
2717814c80aSAndriy Gapon 
27295bb9d38SAndriy Gapon static int
27395bb9d38SAndriy Gapon sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS)
27495bb9d38SAndriy Gapon {
2752e47807cSJeff Roberson 	u_long size, limit;
27695bb9d38SAndriy Gapon 
2772e47807cSJeff Roberson 	/* The sysctl is unsigned, implement as a saturation value. */
2782e47807cSJeff Roberson 	size = uma_size();
2792e47807cSJeff Roberson 	limit = uma_limit();
2802e47807cSJeff Roberson 	if (size > limit)
2812e47807cSJeff Roberson 		size = 0;
2822e47807cSJeff Roberson 	else
2832e47807cSJeff Roberson 		size = limit - size;
28495bb9d38SAndriy Gapon 	return (sysctl_handle_long(oidp, &size, 0, req));
28595bb9d38SAndriy Gapon }
28695bb9d38SAndriy Gapon 
287828afddaSMateusz Guzik static int
288828afddaSMateusz Guzik sysctl_vm_malloc_zone_sizes(SYSCTL_HANDLER_ARGS)
289828afddaSMateusz Guzik {
290828afddaSMateusz Guzik 	int sizes[nitems(kmemzones)];
291828afddaSMateusz Guzik 	int i;
292828afddaSMateusz Guzik 
293828afddaSMateusz Guzik 	for (i = 0; i < nitems(kmemzones); i++) {
294828afddaSMateusz Guzik 		sizes[i] = kmemzones[i].kz_size;
295828afddaSMateusz Guzik 	}
296828afddaSMateusz Guzik 
297828afddaSMateusz Guzik 	return (SYSCTL_OUT(req, &sizes, sizeof(sizes)));
298828afddaSMateusz Guzik }
299828afddaSMateusz Guzik 
300d7854da1SMatthew D Fleming /*
301d7854da1SMatthew D Fleming  * malloc(9) uma zone separation -- sub-page buffer overruns in one
302d7854da1SMatthew D Fleming  * malloc type will affect only a subset of other malloc types.
303d7854da1SMatthew D Fleming  */
304d7854da1SMatthew D Fleming #if MALLOC_DEBUG_MAXZONES > 1
305d7854da1SMatthew D Fleming static void
306d7854da1SMatthew D Fleming tunable_set_numzones(void)
307d7854da1SMatthew D Fleming {
308d7854da1SMatthew D Fleming 
309d7854da1SMatthew D Fleming 	TUNABLE_INT_FETCH("debug.malloc.numzones",
310d7854da1SMatthew D Fleming 	    &numzones);
311d7854da1SMatthew D Fleming 
312d7854da1SMatthew D Fleming 	/* Sanity check the number of malloc uma zones. */
313d7854da1SMatthew D Fleming 	if (numzones <= 0)
314d7854da1SMatthew D Fleming 		numzones = 1;
315d7854da1SMatthew D Fleming 	if (numzones > MALLOC_DEBUG_MAXZONES)
316d7854da1SMatthew D Fleming 		numzones = MALLOC_DEBUG_MAXZONES;
317d7854da1SMatthew D Fleming }
318d7854da1SMatthew D Fleming SYSINIT(numzones, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_set_numzones, NULL);
319af3b2549SHans Petter Selasky SYSCTL_INT(_debug_malloc, OID_AUTO, numzones, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
320d7854da1SMatthew D Fleming     &numzones, 0, "Number of malloc uma subzones");
321d7854da1SMatthew D Fleming 
322d7854da1SMatthew D Fleming /*
323d7854da1SMatthew D Fleming  * Any number that changes regularly is an okay choice for the
324d7854da1SMatthew D Fleming  * offset.  Build numbers are pretty good of you have them.
325d7854da1SMatthew D Fleming  */
326d7854da1SMatthew D Fleming static u_int zone_offset = __FreeBSD_version;
327d7854da1SMatthew D Fleming TUNABLE_INT("debug.malloc.zone_offset", &zone_offset);
328d7854da1SMatthew D Fleming SYSCTL_UINT(_debug_malloc, OID_AUTO, zone_offset, CTLFLAG_RDTUN,
329d7854da1SMatthew D Fleming     &zone_offset, 0, "Separate malloc types by examining the "
330d7854da1SMatthew D Fleming     "Nth character in the malloc type short description.");
331d7854da1SMatthew D Fleming 
332c9e05ccdSMateusz Guzik static void
333c9e05ccdSMateusz Guzik mtp_set_subzone(struct malloc_type *mtp)
334d7854da1SMatthew D Fleming {
335c9e05ccdSMateusz Guzik 	struct malloc_type_internal *mtip;
336c9e05ccdSMateusz Guzik 	const char *desc;
337d7854da1SMatthew D Fleming 	size_t len;
338d7854da1SMatthew D Fleming 	u_int val;
339d7854da1SMatthew D Fleming 
340bdcc2226SMateusz Guzik 	mtip = &mtp->ks_mti;
341c9e05ccdSMateusz Guzik 	desc = mtp->ks_shortdesc;
342d7854da1SMatthew D Fleming 	if (desc == NULL || (len = strlen(desc)) == 0)
343c9e05ccdSMateusz Guzik 		val = 0;
344c9e05ccdSMateusz Guzik 	else
345d7854da1SMatthew D Fleming 		val = desc[zone_offset % len];
346c9e05ccdSMateusz Guzik 	mtip->mti_zone = (val % numzones);
347c9e05ccdSMateusz Guzik }
348c9e05ccdSMateusz Guzik 
349c9e05ccdSMateusz Guzik static inline u_int
350c9e05ccdSMateusz Guzik mtp_get_subzone(struct malloc_type *mtp)
351c9e05ccdSMateusz Guzik {
352c9e05ccdSMateusz Guzik 	struct malloc_type_internal *mtip;
353c9e05ccdSMateusz Guzik 
354bdcc2226SMateusz Guzik 	mtip = &mtp->ks_mti;
355c9e05ccdSMateusz Guzik 
356c9e05ccdSMateusz Guzik 	KASSERT(mtip->mti_zone < numzones,
357c9e05ccdSMateusz Guzik 	    ("mti_zone %u out of range %d",
358c9e05ccdSMateusz Guzik 	    mtip->mti_zone, numzones));
359c9e05ccdSMateusz Guzik 	return (mtip->mti_zone);
360d7854da1SMatthew D Fleming }
361d7854da1SMatthew D Fleming #elif MALLOC_DEBUG_MAXZONES == 0
362d7854da1SMatthew D Fleming #error "MALLOC_DEBUG_MAXZONES must be positive."
363d7854da1SMatthew D Fleming #else
364c9e05ccdSMateusz Guzik static void
365c9e05ccdSMateusz Guzik mtp_set_subzone(struct malloc_type *mtp)
366c9e05ccdSMateusz Guzik {
367c9e05ccdSMateusz Guzik 	struct malloc_type_internal *mtip;
368c9e05ccdSMateusz Guzik 
369bdcc2226SMateusz Guzik 	mtip = &mtp->ks_mti;
370c9e05ccdSMateusz Guzik 	mtip->mti_zone = 0;
371c9e05ccdSMateusz Guzik }
372c9e05ccdSMateusz Guzik 
373d7854da1SMatthew D Fleming static inline u_int
374c9e05ccdSMateusz Guzik mtp_get_subzone(struct malloc_type *mtp)
375d7854da1SMatthew D Fleming {
376d7854da1SMatthew D Fleming 
377d7854da1SMatthew D Fleming 	return (0);
378d7854da1SMatthew D Fleming }
379d7854da1SMatthew D Fleming #endif /* MALLOC_DEBUG_MAXZONES > 1 */
380d7854da1SMatthew D Fleming 
381df8bae1dSRodney W. Grimes /*
3820ce3f16dSRobert Watson  * An allocation has succeeded -- update malloc type statistics for the
3830ce3f16dSRobert Watson  * amount of bucket size.  Occurs within a critical section so that the
3840ce3f16dSRobert Watson  * thread isn't preempted and doesn't migrate while updating per-PCU
3850ce3f16dSRobert Watson  * statistics.
3864362fadaSBrian Feldman  */
3874362fadaSBrian Feldman static void
38863a7e0a3SRobert Watson malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size,
3894362fadaSBrian Feldman     int zindx)
3904362fadaSBrian Feldman {
39163a7e0a3SRobert Watson 	struct malloc_type_internal *mtip;
39263a7e0a3SRobert Watson 	struct malloc_type_stats *mtsp;
39363a7e0a3SRobert Watson 
39463a7e0a3SRobert Watson 	critical_enter();
395bdcc2226SMateusz Guzik 	mtip = &mtp->ks_mti;
3969afff6b1SMateusz Guzik 	mtsp = zpcpu_get(mtip->mti_stats);
39773864adbSPawel Jakub Dawidek 	if (size > 0) {
39863a7e0a3SRobert Watson 		mtsp->mts_memalloced += size;
39963a7e0a3SRobert Watson 		mtsp->mts_numallocs++;
40073864adbSPawel Jakub Dawidek 	}
4014362fadaSBrian Feldman 	if (zindx != -1)
40263a7e0a3SRobert Watson 		mtsp->mts_size |= 1 << zindx;
40391dd776cSJohn Birrell 
40491dd776cSJohn Birrell #ifdef KDTRACE_HOOKS
4057cd79421SMateusz Guzik 	if (__predict_false(dtrace_malloc_enabled)) {
40691dd776cSJohn Birrell 		uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_MALLOC];
40791dd776cSJohn Birrell 		if (probe_id != 0)
40891dd776cSJohn Birrell 			(dtrace_malloc_probe)(probe_id,
40991dd776cSJohn Birrell 			    (uintptr_t) mtp, (uintptr_t) mtip,
41091dd776cSJohn Birrell 			    (uintptr_t) mtsp, size, zindx);
41191dd776cSJohn Birrell 	}
41291dd776cSJohn Birrell #endif
41391dd776cSJohn Birrell 
41463a7e0a3SRobert Watson 	critical_exit();
4154362fadaSBrian Feldman }
4164362fadaSBrian Feldman 
4174362fadaSBrian Feldman void
41863a7e0a3SRobert Watson malloc_type_allocated(struct malloc_type *mtp, unsigned long size)
4194362fadaSBrian Feldman {
42063a7e0a3SRobert Watson 
42173864adbSPawel Jakub Dawidek 	if (size > 0)
42263a7e0a3SRobert Watson 		malloc_type_zone_allocated(mtp, size, -1);
4234362fadaSBrian Feldman }
4244362fadaSBrian Feldman 
4254362fadaSBrian Feldman /*
4263805385eSRobert Watson  * A free operation has occurred -- update malloc type statistics for the
4270ce3f16dSRobert Watson  * amount of the bucket size.  Occurs within a critical section so that the
4280ce3f16dSRobert Watson  * thread isn't preempted and doesn't migrate while updating per-CPU
4290ce3f16dSRobert Watson  * statistics.
4304362fadaSBrian Feldman  */
4314362fadaSBrian Feldman void
43263a7e0a3SRobert Watson malloc_type_freed(struct malloc_type *mtp, unsigned long size)
4334362fadaSBrian Feldman {
43463a7e0a3SRobert Watson 	struct malloc_type_internal *mtip;
43563a7e0a3SRobert Watson 	struct malloc_type_stats *mtsp;
43663a7e0a3SRobert Watson 
43763a7e0a3SRobert Watson 	critical_enter();
438bdcc2226SMateusz Guzik 	mtip = &mtp->ks_mti;
4399afff6b1SMateusz Guzik 	mtsp = zpcpu_get(mtip->mti_stats);
44063a7e0a3SRobert Watson 	mtsp->mts_memfreed += size;
44163a7e0a3SRobert Watson 	mtsp->mts_numfrees++;
44291dd776cSJohn Birrell 
44391dd776cSJohn Birrell #ifdef KDTRACE_HOOKS
4447cd79421SMateusz Guzik 	if (__predict_false(dtrace_malloc_enabled)) {
44591dd776cSJohn Birrell 		uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_FREE];
44691dd776cSJohn Birrell 		if (probe_id != 0)
44791dd776cSJohn Birrell 			(dtrace_malloc_probe)(probe_id,
44891dd776cSJohn Birrell 			    (uintptr_t) mtp, (uintptr_t) mtip,
44991dd776cSJohn Birrell 			    (uintptr_t) mtsp, size, 0);
45091dd776cSJohn Birrell 	}
45191dd776cSJohn Birrell #endif
45291dd776cSJohn Birrell 
45363a7e0a3SRobert Watson 	critical_exit();
4544362fadaSBrian Feldman }
4554362fadaSBrian Feldman 
4564362fadaSBrian Feldman /*
457f346986bSAlan Cox  *	contigmalloc:
458f346986bSAlan Cox  *
459f346986bSAlan Cox  *	Allocate a block of physically contiguous memory.
460f346986bSAlan Cox  *
461f346986bSAlan Cox  *	If M_NOWAIT is set, this routine will not block and return NULL if
462f346986bSAlan Cox  *	the allocation fails.
463f346986bSAlan Cox  */
4649e6544ddSBjoern A. Zeeb #define	IS_CONTIG_MALLOC(_slab)						\
4659e6544ddSBjoern A. Zeeb     (GET_SLAB_COOKIE(_slab) == SLAB_COOKIE_CONTIG_MALLOC)
4669e6544ddSBjoern A. Zeeb #define	CONTIG_MALLOC_SLAB(_size)					\
4679e6544ddSBjoern A. Zeeb     ((void *)(((_size) << SLAB_COOKIE_SHIFT) | SLAB_COOKIE_CONTIG_MALLOC))
4689e6544ddSBjoern A. Zeeb static inline size_t
4699e6544ddSBjoern A. Zeeb contigmalloc_size(uma_slab_t slab)
4709e6544ddSBjoern A. Zeeb {
4719e6544ddSBjoern A. Zeeb 	uintptr_t va;
4729e6544ddSBjoern A. Zeeb 
4739e6544ddSBjoern A. Zeeb 	KASSERT(IS_CONTIG_MALLOC(slab),
4749e6544ddSBjoern A. Zeeb 	    ("%s: called on non-contigmalloc allocation: %p", __func__, slab));
4759e6544ddSBjoern A. Zeeb 	va = (uintptr_t)slab;
4769e6544ddSBjoern A. Zeeb 	return (va >> SLAB_COOKIE_SHIFT);
4779e6544ddSBjoern A. Zeeb }
4789e6544ddSBjoern A. Zeeb 
479f346986bSAlan Cox void *
480f346986bSAlan Cox contigmalloc(unsigned long size, struct malloc_type *type, int flags,
481f346986bSAlan Cox     vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
482831ce4cbSJohn Baldwin     vm_paddr_t boundary)
483f346986bSAlan Cox {
484f346986bSAlan Cox 	void *ret;
485f346986bSAlan Cox 
48644d0efb2SAlan Cox 	ret = (void *)kmem_alloc_contig(size, flags, low, high, alignment,
48744d0efb2SAlan Cox 	    boundary, VM_MEMATTR_DEFAULT);
4889e6544ddSBjoern A. Zeeb 	if (ret != NULL) {
4899e6544ddSBjoern A. Zeeb 		/* Use low bits unused for slab pointers. */
4909e6544ddSBjoern A. Zeeb 		vsetzoneslab((uintptr_t)ret, NULL, CONTIG_MALLOC_SLAB(size));
491f346986bSAlan Cox 		malloc_type_allocated(type, round_page(size));
4929e6544ddSBjoern A. Zeeb 	}
493f346986bSAlan Cox 	return (ret);
494f346986bSAlan Cox }
495f346986bSAlan Cox 
496ab3185d1SJeff Roberson void *
4979978bd99SMark Johnston contigmalloc_domainset(unsigned long size, struct malloc_type *type,
4989978bd99SMark Johnston     struct domainset *ds, int flags, vm_paddr_t low, vm_paddr_t high,
499ab3185d1SJeff Roberson     unsigned long alignment, vm_paddr_t boundary)
500ab3185d1SJeff Roberson {
501ab3185d1SJeff Roberson 	void *ret;
502ab3185d1SJeff Roberson 
5039978bd99SMark Johnston 	ret = (void *)kmem_alloc_contig_domainset(ds, size, flags, low, high,
504ab3185d1SJeff Roberson 	    alignment, boundary, VM_MEMATTR_DEFAULT);
5059e6544ddSBjoern A. Zeeb 	if (ret != NULL) {
5069e6544ddSBjoern A. Zeeb 		/* Use low bits unused for slab pointers. */
5079e6544ddSBjoern A. Zeeb 		vsetzoneslab((uintptr_t)ret, NULL, CONTIG_MALLOC_SLAB(size));
508ab3185d1SJeff Roberson 		malloc_type_allocated(type, round_page(size));
5099e6544ddSBjoern A. Zeeb 	}
510ab3185d1SJeff Roberson 	return (ret);
511ab3185d1SJeff Roberson }
512ab3185d1SJeff Roberson 
513f346986bSAlan Cox /*
5149e6544ddSBjoern A. Zeeb  *	contigfree (deprecated).
515f346986bSAlan Cox  *
516f346986bSAlan Cox  *	Free a block of memory allocated by contigmalloc.
517f346986bSAlan Cox  *
518f346986bSAlan Cox  *	This routine may not block.
519f346986bSAlan Cox  */
520f346986bSAlan Cox void
5219e6544ddSBjoern A. Zeeb contigfree(void *addr, unsigned long size __unused, struct malloc_type *type)
522f346986bSAlan Cox {
5239e6544ddSBjoern A. Zeeb 	free(addr, type);
524f346986bSAlan Cox }
5259e6544ddSBjoern A. Zeeb #undef	IS_CONTIG_MALLOC
5269e6544ddSBjoern A. Zeeb #undef	CONTIG_MALLOC_SLAB
527f346986bSAlan Cox 
528ab3185d1SJeff Roberson #ifdef MALLOC_DEBUG
529ab3185d1SJeff Roberson static int
5305a70796aSLi-Wen Hsu malloc_dbg(caddr_t *vap, size_t *sizep, struct malloc_type *mtp,
531ab3185d1SJeff Roberson     int flags)
532df8bae1dSRodney W. Grimes {
533194a0abfSPoul-Henning Kamp #ifdef INVARIANTS
534ab3185d1SJeff Roberson 	int indx;
535ab3185d1SJeff Roberson 
536bdcc2226SMateusz Guzik 	KASSERT(mtp->ks_version == M_VERSION, ("malloc: bad malloc type version"));
537d3c11994SPoul-Henning Kamp 	/*
53823198357SRuslan Ermilov 	 * Check that exactly one of M_WAITOK or M_NOWAIT is specified.
539d3c11994SPoul-Henning Kamp 	 */
54023198357SRuslan Ermilov 	indx = flags & (M_WAITOK | M_NOWAIT);
541d3c11994SPoul-Henning Kamp 	if (indx != M_NOWAIT && indx != M_WAITOK) {
542d3c11994SPoul-Henning Kamp 		static	struct timeval lasterr;
543d3c11994SPoul-Henning Kamp 		static	int curerr, once;
544d3c11994SPoul-Henning Kamp 		if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) {
545d3c11994SPoul-Henning Kamp 			printf("Bad malloc flags: %x\n", indx);
5462d50560aSMarcel Moolenaar 			kdb_backtrace();
547d3c11994SPoul-Henning Kamp 			flags |= M_WAITOK;
548d3c11994SPoul-Henning Kamp 			once++;
549d3c11994SPoul-Henning Kamp 		}
550d3c11994SPoul-Henning Kamp 	}
551194a0abfSPoul-Henning Kamp #endif
552eae870cdSRobert Watson #ifdef MALLOC_MAKE_FAILURES
553eae870cdSRobert Watson 	if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) {
554eae870cdSRobert Watson 		atomic_add_int(&malloc_nowait_count, 1);
555eae870cdSRobert Watson 		if ((malloc_nowait_count % malloc_failure_rate) == 0) {
556eae870cdSRobert Watson 			atomic_add_int(&malloc_failure_count, 1);
557ab3185d1SJeff Roberson 			*vap = NULL;
558ab3185d1SJeff Roberson 			return (EJUSTRETURN);
559eae870cdSRobert Watson 		}
560eae870cdSRobert Watson 	}
561eae870cdSRobert Watson #endif
56206bf2a6aSMatt Macy 	if (flags & M_WAITOK) {
563b40ce416SJulian Elischer 		KASSERT(curthread->td_intr_nesting_level == 0,
564a163d034SWarner Losh 		   ("malloc(M_WAITOK) in interrupt context"));
5655757b59fSGleb Smirnoff 		if (__predict_false(!THREAD_CAN_SLEEP())) {
566bac06038SGleb Smirnoff #ifdef EPOCH_TRACE
567bac06038SGleb Smirnoff 			epoch_trace_list(curthread);
568bac06038SGleb Smirnoff #endif
5691ae20f7cSKyle Evans 			KASSERT(0,
5705757b59fSGleb Smirnoff 			    ("malloc(M_WAITOK) with sleeping prohibited"));
5715757b59fSGleb Smirnoff 		}
57206bf2a6aSMatt Macy 	}
573d9e2e68dSMark Johnston 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
5741067a2baSJonathan T. Looney 	    ("malloc: called with spinlock or critical section held"));
5751067a2baSJonathan T. Looney 
576e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD
577ab3185d1SJeff Roberson 	if (memguard_cmp_mtp(mtp, *sizep)) {
578ab3185d1SJeff Roberson 		*vap = memguard_alloc(*sizep, flags);
579ab3185d1SJeff Roberson 		if (*vap != NULL)
580ab3185d1SJeff Roberson 			return (EJUSTRETURN);
581e3813573SMatthew D Fleming 		/* This is unfortunate but should not be fatal. */
582e3813573SMatthew D Fleming 	}
583e4eb384bSBosko Milekic #endif
584e4eb384bSBosko Milekic 
585847a2a17SPawel Jakub Dawidek #ifdef DEBUG_REDZONE
586ab3185d1SJeff Roberson 	*sizep = redzone_size_ntor(*sizep);
587ab3185d1SJeff Roberson #endif
588ab3185d1SJeff Roberson 
589ab3185d1SJeff Roberson 	return (0);
590ab3185d1SJeff Roberson }
591ab3185d1SJeff Roberson #endif
592ab3185d1SJeff Roberson 
593ab3185d1SJeff Roberson /*
5946d6a03d7SJeff Roberson  * Handle large allocations and frees by using kmem_malloc directly.
5956d6a03d7SJeff Roberson  */
5969e6544ddSBjoern A. Zeeb #define	IS_MALLOC_LARGE(_slab)						\
5979e6544ddSBjoern A. Zeeb     (GET_SLAB_COOKIE(_slab) == SLAB_COOKIE_MALLOC_LARGE)
5989e6544ddSBjoern A. Zeeb #define	MALLOC_LARGE_SLAB(_size)					\
5999e6544ddSBjoern A. Zeeb     ((void *)(((_size) << SLAB_COOKIE_SHIFT) | SLAB_COOKIE_MALLOC_LARGE))
6006d6a03d7SJeff Roberson static inline size_t
6016d6a03d7SJeff Roberson malloc_large_size(uma_slab_t slab)
6026d6a03d7SJeff Roberson {
6036d6a03d7SJeff Roberson 	uintptr_t va;
6046d6a03d7SJeff Roberson 
6056d6a03d7SJeff Roberson 	va = (uintptr_t)slab;
6069e6544ddSBjoern A. Zeeb 	KASSERT(IS_MALLOC_LARGE(slab),
6079e6544ddSBjoern A. Zeeb 	    ("%s: called on non-malloc_large allocation: %p", __func__, slab));
6089e6544ddSBjoern A. Zeeb 	return (va >> SLAB_COOKIE_SHIFT);
6096d6a03d7SJeff Roberson }
6106d6a03d7SJeff Roberson 
61189deca0aSMateusz Guzik static caddr_t __noinline
61245e23571SMark Johnston malloc_large(size_t size, struct malloc_type *mtp, struct domainset *policy,
61389deca0aSMateusz Guzik     int flags DEBUG_REDZONE_ARG_DEF)
6146d6a03d7SJeff Roberson {
615f49fd63aSJohn Baldwin 	void *va;
6166d6a03d7SJeff Roberson 
61745e23571SMark Johnston 	size = roundup(size, PAGE_SIZE);
618f49fd63aSJohn Baldwin 	va = kmem_malloc_domainset(policy, size, flags);
619f49fd63aSJohn Baldwin 	if (va != NULL) {
6209e6544ddSBjoern A. Zeeb 		/* Use low bits unused for slab pointers. */
6219e6544ddSBjoern A. Zeeb 		vsetzoneslab((uintptr_t)va, NULL, MALLOC_LARGE_SLAB(size));
62245e23571SMark Johnston 		uma_total_inc(size);
6236d6a03d7SJeff Roberson 	}
62445e23571SMark Johnston 	malloc_type_allocated(mtp, va == NULL ? 0 : size);
62589deca0aSMateusz Guzik 	if (__predict_false(va == NULL)) {
62689deca0aSMateusz Guzik 		KASSERT((flags & M_WAITOK) == 0,
62789deca0aSMateusz Guzik 		    ("malloc(M_WAITOK) returned NULL"));
62806a53ecfSMark Johnston 	} else {
62989deca0aSMateusz Guzik #ifdef DEBUG_REDZONE
63089deca0aSMateusz Guzik 		va = redzone_setup(va, osize);
63189deca0aSMateusz Guzik #endif
632f49fd63aSJohn Baldwin 		kasan_mark(va, osize, size, KASAN_MALLOC_REDZONE);
63306a53ecfSMark Johnston 	}
63489deca0aSMateusz Guzik 	return (va);
6356d6a03d7SJeff Roberson }
6366d6a03d7SJeff Roberson 
6376d6a03d7SJeff Roberson static void
6386d6a03d7SJeff Roberson free_large(void *addr, size_t size)
6396d6a03d7SJeff Roberson {
6406d6a03d7SJeff Roberson 
641f49fd63aSJohn Baldwin 	kmem_free(addr, size);
6426d6a03d7SJeff Roberson 	uma_total_dec(size);
6436d6a03d7SJeff Roberson }
6449e6544ddSBjoern A. Zeeb #undef	IS_MALLOC_LARGE
6459e6544ddSBjoern A. Zeeb #undef	MALLOC_LARGE_SLAB
6466d6a03d7SJeff Roberson 
6476d6a03d7SJeff Roberson /*
648ab3185d1SJeff Roberson  *	malloc:
649ab3185d1SJeff Roberson  *
650ab3185d1SJeff Roberson  *	Allocate a block of memory.
651ab3185d1SJeff Roberson  *
652ab3185d1SJeff Roberson  *	If M_NOWAIT is set, this routine will not block and return NULL if
653ab3185d1SJeff Roberson  *	the allocation fails.
654ab3185d1SJeff Roberson  */
655ab3185d1SJeff Roberson void *
65634c538c3SMateusz Guzik (malloc)(size_t size, struct malloc_type *mtp, int flags)
657ab3185d1SJeff Roberson {
658ab3185d1SJeff Roberson 	int indx;
659ab3185d1SJeff Roberson 	caddr_t va;
660ab3185d1SJeff Roberson 	uma_zone_t zone;
66106a53ecfSMark Johnston #if defined(DEBUG_REDZONE) || defined(KASAN)
662ab3185d1SJeff Roberson 	unsigned long osize = size;
663ab3185d1SJeff Roberson #endif
664ab3185d1SJeff Roberson 
66582c174a3SMateusz Guzik 	MPASS((flags & M_EXEC) == 0);
66689deca0aSMateusz Guzik 
667ab3185d1SJeff Roberson #ifdef MALLOC_DEBUG
6685072a5f4SMatt Macy 	va = NULL;
669ab3185d1SJeff Roberson 	if (malloc_dbg(&va, &size, mtp, flags) != 0)
670ab3185d1SJeff Roberson 		return (va);
671847a2a17SPawel Jakub Dawidek #endif
672847a2a17SPawel Jakub Dawidek 
67389deca0aSMateusz Guzik 	if (__predict_false(size > kmem_zmax))
67445e23571SMark Johnston 		return (malloc_large(size, mtp, DOMAINSET_RR(), flags
67589deca0aSMateusz Guzik 		    DEBUG_REDZONE_ARG));
67689deca0aSMateusz Guzik 
6776f267175SJeff Roberson 	if (size & KMEM_ZMASK)
6786f267175SJeff Roberson 		size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
6796f267175SJeff Roberson 	indx = kmemsize[size >> KMEM_ZSHIFT];
680c9e05ccdSMateusz Guzik 	zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)];
681a03c2393SAlexander Motin 	va = uma_zalloc_arg(zone, zone, flags);
68210094910SMark Johnston 	if (va != NULL) {
683e20a199fSJeff Roberson 		size = zone->uz_size;
68410094910SMark Johnston 		if ((flags & M_ZERO) == 0) {
68510094910SMark Johnston 			kmsan_mark(va, size, KMSAN_STATE_UNINIT);
68610094910SMark Johnston 			kmsan_orig(va, size, KMSAN_TYPE_MALLOC, KMSAN_RET_ADDR);
68710094910SMark Johnston 		}
68810094910SMark Johnston 	}
68963a7e0a3SRobert Watson 	malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
69082c174a3SMateusz Guzik 	if (__predict_false(va == NULL)) {
69182c174a3SMateusz Guzik 		KASSERT((flags & M_WAITOK) == 0,
69282c174a3SMateusz Guzik 		    ("malloc(M_WAITOK) returned NULL"));
69382c174a3SMateusz Guzik 	}
694ab3185d1SJeff Roberson #ifdef DEBUG_REDZONE
695ab3185d1SJeff Roberson 	if (va != NULL)
696ab3185d1SJeff Roberson 		va = redzone_setup(va, osize);
6974db4f5c8SPoul-Henning Kamp #endif
69806a53ecfSMark Johnston #ifdef KASAN
69906a53ecfSMark Johnston 	if (va != NULL)
70006a53ecfSMark Johnston 		kasan_mark((void *)va, osize, size, KASAN_MALLOC_REDZONE);
70106a53ecfSMark Johnston #endif
702ab3185d1SJeff Roberson 	return ((void *) va);
703ab3185d1SJeff Roberson }
704ab3185d1SJeff Roberson 
7059978bd99SMark Johnston static void *
706dc727127SMark Johnston malloc_domain(size_t *sizep, int *indxp, struct malloc_type *mtp, int domain,
7076d6a03d7SJeff Roberson     int flags)
708ab3185d1SJeff Roberson {
709ab3185d1SJeff Roberson 	uma_zone_t zone;
710dc727127SMark Johnston 	caddr_t va;
711dc727127SMark Johnston 	size_t size;
712dc727127SMark Johnston 	int indx;
713ab3185d1SJeff Roberson 
714dc727127SMark Johnston 	size = *sizep;
7156d6a03d7SJeff Roberson 	KASSERT(size <= kmem_zmax && (flags & M_EXEC) == 0,
716*c0df224bSBjoern A. Zeeb 	    ("malloc_domain: Called with bad flag / size combination"));
717ab3185d1SJeff Roberson 	if (size & KMEM_ZMASK)
718ab3185d1SJeff Roberson 		size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
719ab3185d1SJeff Roberson 	indx = kmemsize[size >> KMEM_ZSHIFT];
720c9e05ccdSMateusz Guzik 	zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)];
721a03c2393SAlexander Motin 	va = uma_zalloc_domain(zone, zone, domain, flags);
722ab3185d1SJeff Roberson 	if (va != NULL)
723dc727127SMark Johnston 		*sizep = zone->uz_size;
7246d6a03d7SJeff Roberson 	*indxp = indx;
725df8bae1dSRodney W. Grimes 	return ((void *)va);
726df8bae1dSRodney W. Grimes }
727df8bae1dSRodney W. Grimes 
728fd91e076SKristof Provost void *
7299978bd99SMark Johnston malloc_domainset(size_t size, struct malloc_type *mtp, struct domainset *ds,
7309978bd99SMark Johnston     int flags)
7319978bd99SMark Johnston {
7329978bd99SMark Johnston 	struct vm_domainset_iter di;
73382c174a3SMateusz Guzik 	caddr_t va;
7349978bd99SMark Johnston 	int domain;
7356d6a03d7SJeff Roberson 	int indx;
73606a53ecfSMark Johnston #if defined(KASAN) || defined(DEBUG_REDZONE)
7376d6a03d7SJeff Roberson 	unsigned long osize = size;
7386d6a03d7SJeff Roberson #endif
73989deca0aSMateusz Guzik 
74082c174a3SMateusz Guzik 	MPASS((flags & M_EXEC) == 0);
74189deca0aSMateusz Guzik 
7426d6a03d7SJeff Roberson #ifdef MALLOC_DEBUG
74382c174a3SMateusz Guzik 	va = NULL;
74482c174a3SMateusz Guzik 	if (malloc_dbg(&va, &size, mtp, flags) != 0)
74582c174a3SMateusz Guzik 		return (va);
7466d6a03d7SJeff Roberson #endif
74789deca0aSMateusz Guzik 
74889deca0aSMateusz Guzik 	if (__predict_false(size > kmem_zmax))
74945e23571SMark Johnston 		return (malloc_large(size, mtp, DOMAINSET_RR(), flags
75089deca0aSMateusz Guzik 		    DEBUG_REDZONE_ARG));
75189deca0aSMateusz Guzik 
7529978bd99SMark Johnston 	vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
7539978bd99SMark Johnston 	do {
75482c174a3SMateusz Guzik 		va = malloc_domain(&size, &indx, mtp, domain, flags);
75589deca0aSMateusz Guzik 	} while (va == NULL && vm_domainset_iter_policy(&di, &domain) == 0);
75682c174a3SMateusz Guzik 	malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
75782c174a3SMateusz Guzik 	if (__predict_false(va == NULL)) {
75882c174a3SMateusz Guzik 		KASSERT((flags & M_WAITOK) == 0,
75982c174a3SMateusz Guzik 		    ("malloc(M_WAITOK) returned NULL"));
76082c174a3SMateusz Guzik 	}
76182c174a3SMateusz Guzik #ifdef DEBUG_REDZONE
76282c174a3SMateusz Guzik 	if (va != NULL)
76382c174a3SMateusz Guzik 		va = redzone_setup(va, osize);
76482c174a3SMateusz Guzik #endif
76506a53ecfSMark Johnston #ifdef KASAN
76606a53ecfSMark Johnston 	if (va != NULL)
76706a53ecfSMark Johnston 		kasan_mark((void *)va, osize, size, KASAN_MALLOC_REDZONE);
76806a53ecfSMark Johnston #endif
76910094910SMark Johnston #ifdef KMSAN
77010094910SMark Johnston 	if ((flags & M_ZERO) == 0) {
77110094910SMark Johnston 		kmsan_mark(va, size, KMSAN_STATE_UNINIT);
77210094910SMark Johnston 		kmsan_orig(va, size, KMSAN_TYPE_MALLOC, KMSAN_RET_ADDR);
77310094910SMark Johnston 	}
77410094910SMark Johnston #endif
77582c174a3SMateusz Guzik 	return (va);
7766d6a03d7SJeff Roberson }
7779978bd99SMark Johnston 
77882c174a3SMateusz Guzik /*
77982c174a3SMateusz Guzik  * Allocate an executable area.
78082c174a3SMateusz Guzik  */
78182c174a3SMateusz Guzik void *
78282c174a3SMateusz Guzik malloc_exec(size_t size, struct malloc_type *mtp, int flags)
78382c174a3SMateusz Guzik {
78482c174a3SMateusz Guzik 
78589deca0aSMateusz Guzik 	return (malloc_domainset_exec(size, mtp, DOMAINSET_RR(), flags));
78682c174a3SMateusz Guzik }
78782c174a3SMateusz Guzik 
78882c174a3SMateusz Guzik void *
78982c174a3SMateusz Guzik malloc_domainset_exec(size_t size, struct malloc_type *mtp, struct domainset *ds,
79082c174a3SMateusz Guzik     int flags)
79182c174a3SMateusz Guzik {
79206a53ecfSMark Johnston #if defined(DEBUG_REDZONE) || defined(KASAN)
79382c174a3SMateusz Guzik 	unsigned long osize = size;
79482c174a3SMateusz Guzik #endif
79589deca0aSMateusz Guzik #ifdef MALLOC_DEBUG
79689deca0aSMateusz Guzik 	caddr_t va;
79789deca0aSMateusz Guzik #endif
79882c174a3SMateusz Guzik 
79982c174a3SMateusz Guzik 	flags |= M_EXEC;
80089deca0aSMateusz Guzik 
80182c174a3SMateusz Guzik #ifdef MALLOC_DEBUG
80282c174a3SMateusz Guzik 	va = NULL;
80382c174a3SMateusz Guzik 	if (malloc_dbg(&va, &size, mtp, flags) != 0)
80482c174a3SMateusz Guzik 		return (va);
80582c174a3SMateusz Guzik #endif
80689deca0aSMateusz Guzik 
80745e23571SMark Johnston 	return (malloc_large(size, mtp, ds, flags DEBUG_REDZONE_ARG));
8089978bd99SMark Johnston }
8099978bd99SMark Johnston 
8109978bd99SMark Johnston void *
81104cc0c39SKyle Evans malloc_aligned(size_t size, size_t align, struct malloc_type *type, int flags)
81204cc0c39SKyle Evans {
81304cc0c39SKyle Evans 	return (malloc_domainset_aligned(size, align, type, DOMAINSET_RR(),
81404cc0c39SKyle Evans 	    flags));
81504cc0c39SKyle Evans }
81604cc0c39SKyle Evans 
81704cc0c39SKyle Evans void *
8183b15beb3SKonstantin Belousov malloc_domainset_aligned(size_t size, size_t align,
8193b15beb3SKonstantin Belousov     struct malloc_type *mtp, struct domainset *ds, int flags)
8203b15beb3SKonstantin Belousov {
8213b15beb3SKonstantin Belousov 	void *res;
8221ac7c344SKonstantin Belousov 	size_t asize;
8233b15beb3SKonstantin Belousov 
82471d31f1cSKonstantin Belousov 	KASSERT(powerof2(align),
8253b15beb3SKonstantin Belousov 	    ("malloc_domainset_aligned: wrong align %#zx size %#zx",
8263b15beb3SKonstantin Belousov 	    align, size));
8270781c79dSKonstantin Belousov 	KASSERT(align <= PAGE_SIZE,
8283b15beb3SKonstantin Belousov 	    ("malloc_domainset_aligned: align %#zx (size %#zx) too large",
8293b15beb3SKonstantin Belousov 	    align, size));
8303b15beb3SKonstantin Belousov 
8311ac7c344SKonstantin Belousov 	/*
8321ac7c344SKonstantin Belousov 	 * Round the allocation size up to the next power of 2,
8331ac7c344SKonstantin Belousov 	 * because we can only guarantee alignment for
8341ac7c344SKonstantin Belousov 	 * power-of-2-sized allocations.  Further increase the
8351ac7c344SKonstantin Belousov 	 * allocation size to align if the rounded size is less than
8361ac7c344SKonstantin Belousov 	 * align, since malloc zones provide alignment equal to their
8371ac7c344SKonstantin Belousov 	 * size.
8381ac7c344SKonstantin Belousov 	 */
83971d31f1cSKonstantin Belousov 	if (size == 0)
84071d31f1cSKonstantin Belousov 		size = 1;
8411ac7c344SKonstantin Belousov 	asize = size <= align ? align : 1UL << flsl(size - 1);
8421ac7c344SKonstantin Belousov 
8431ac7c344SKonstantin Belousov 	res = malloc_domainset(asize, mtp, ds, flags);
8443b15beb3SKonstantin Belousov 	KASSERT(res == NULL || ((uintptr_t)res & (align - 1)) == 0,
8453b15beb3SKonstantin Belousov 	    ("malloc_domainset_aligned: result not aligned %p size %#zx "
8461ac7c344SKonstantin Belousov 	    "allocsize %#zx align %#zx", res, size, asize, align));
8473b15beb3SKonstantin Belousov 	return (res);
8483b15beb3SKonstantin Belousov }
8493b15beb3SKonstantin Belousov 
8503b15beb3SKonstantin Belousov void *
851fd91e076SKristof Provost mallocarray(size_t nmemb, size_t size, struct malloc_type *type, int flags)
852fd91e076SKristof Provost {
853fd91e076SKristof Provost 
854c02fc960SConrad Meyer 	if (WOULD_OVERFLOW(nmemb, size))
855c02fc960SConrad Meyer 		panic("mallocarray: %zu * %zu overflowed", nmemb, size);
856fd91e076SKristof Provost 
857fd91e076SKristof Provost 	return (malloc(size * nmemb, type, flags));
858fd91e076SKristof Provost }
859fd91e076SKristof Provost 
860c743a6bdSHans Petter Selasky void *
861c743a6bdSHans Petter Selasky mallocarray_domainset(size_t nmemb, size_t size, struct malloc_type *type,
862c743a6bdSHans Petter Selasky     struct domainset *ds, int flags)
863c743a6bdSHans Petter Selasky {
864c743a6bdSHans Petter Selasky 
865c743a6bdSHans Petter Selasky 	if (WOULD_OVERFLOW(nmemb, size))
866c743a6bdSHans Petter Selasky 		panic("mallocarray_domainset: %zu * %zu overflowed", nmemb, size);
867c743a6bdSHans Petter Selasky 
868c743a6bdSHans Petter Selasky 	return (malloc_domainset(size * nmemb, type, ds, flags));
869c743a6bdSHans Petter Selasky }
870c743a6bdSHans Petter Selasky 
87106a53ecfSMark Johnston #if defined(INVARIANTS) && !defined(KASAN)
872ab3185d1SJeff Roberson static void
873ab3185d1SJeff Roberson free_save_type(void *addr, struct malloc_type *mtp, u_long size)
874ab3185d1SJeff Roberson {
875ab3185d1SJeff Roberson 	struct malloc_type **mtpp = addr;
876ab3185d1SJeff Roberson 
877ab3185d1SJeff Roberson 	/*
878ab3185d1SJeff Roberson 	 * Cache a pointer to the malloc_type that most recently freed
879ab3185d1SJeff Roberson 	 * this memory here.  This way we know who is most likely to
880ab3185d1SJeff Roberson 	 * have stepped on it later.
881ab3185d1SJeff Roberson 	 *
882ab3185d1SJeff Roberson 	 * This code assumes that size is a multiple of 8 bytes for
883ab3185d1SJeff Roberson 	 * 64 bit machines
884ab3185d1SJeff Roberson 	 */
885ab3185d1SJeff Roberson 	mtpp = (struct malloc_type **) ((unsigned long)mtpp & ~UMA_ALIGN_PTR);
886ab3185d1SJeff Roberson 	mtpp += (size - sizeof(struct malloc_type *)) /
887ab3185d1SJeff Roberson 	    sizeof(struct malloc_type *);
888ab3185d1SJeff Roberson 	*mtpp = mtp;
889ab3185d1SJeff Roberson }
890ab3185d1SJeff Roberson #endif
891ab3185d1SJeff Roberson 
892ab3185d1SJeff Roberson #ifdef MALLOC_DEBUG
893ab3185d1SJeff Roberson static int
894ab3185d1SJeff Roberson free_dbg(void **addrp, struct malloc_type *mtp)
895ab3185d1SJeff Roberson {
896ab3185d1SJeff Roberson 	void *addr;
897ab3185d1SJeff Roberson 
898ab3185d1SJeff Roberson 	addr = *addrp;
899bdcc2226SMateusz Guzik 	KASSERT(mtp->ks_version == M_VERSION, ("free: bad malloc type version"));
900ab3185d1SJeff Roberson 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
901ab3185d1SJeff Roberson 	    ("free: called with spinlock or critical section held"));
902ab3185d1SJeff Roberson 
903ab3185d1SJeff Roberson 	/* free(NULL, ...) does nothing */
904ab3185d1SJeff Roberson 	if (addr == NULL)
905ab3185d1SJeff Roberson 		return (EJUSTRETURN);
906ab3185d1SJeff Roberson 
907ab3185d1SJeff Roberson #ifdef DEBUG_MEMGUARD
908ab3185d1SJeff Roberson 	if (is_memguard_addr(addr)) {
909ab3185d1SJeff Roberson 		memguard_free(addr);
910ab3185d1SJeff Roberson 		return (EJUSTRETURN);
911ab3185d1SJeff Roberson 	}
912ab3185d1SJeff Roberson #endif
913ab3185d1SJeff Roberson 
914ab3185d1SJeff Roberson #ifdef DEBUG_REDZONE
915ab3185d1SJeff Roberson 	redzone_check(addr);
916ab3185d1SJeff Roberson 	*addrp = redzone_addr_ntor(addr);
917ab3185d1SJeff Roberson #endif
918ab3185d1SJeff Roberson 
919ab3185d1SJeff Roberson 	return (0);
920ab3185d1SJeff Roberson }
921ab3185d1SJeff Roberson #endif
922ab3185d1SJeff Roberson 
923fd91e076SKristof Provost /*
9241c7c3c6aSMatthew Dillon  *	free:
9251c7c3c6aSMatthew Dillon  *
926df8bae1dSRodney W. Grimes  *	Free a block of memory allocated by malloc.
9271c7c3c6aSMatthew Dillon  *
9281c7c3c6aSMatthew Dillon  *	This routine may not block.
929df8bae1dSRodney W. Grimes  */
930df8bae1dSRodney W. Grimes void
93163a7e0a3SRobert Watson free(void *addr, struct malloc_type *mtp)
932df8bae1dSRodney W. Grimes {
933584061b4SJeff Roberson 	uma_zone_t zone;
93499571dc3SJeff Roberson 	uma_slab_t slab;
93599571dc3SJeff Roberson 	u_long size;
936254c6cb3SPoul-Henning Kamp 
937ab3185d1SJeff Roberson #ifdef MALLOC_DEBUG
938ab3185d1SJeff Roberson 	if (free_dbg(&addr, mtp) != 0)
939ab3185d1SJeff Roberson 		return;
940ab3185d1SJeff Roberson #endif
94144a8ff31SArchie Cobbs 	/* free(NULL, ...) does nothing */
94244a8ff31SArchie Cobbs 	if (addr == NULL)
94344a8ff31SArchie Cobbs 		return;
94444a8ff31SArchie Cobbs 
945584061b4SJeff Roberson 	vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
9468355f576SJeff Roberson 	if (slab == NULL)
9479e6544ddSBjoern A. Zeeb 		panic("free: address %p(%p) has not been allocated",
94899571dc3SJeff Roberson 		    addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
94999571dc3SJeff Roberson 
9509e6544ddSBjoern A. Zeeb 	switch (GET_SLAB_COOKIE(slab)) {
9519e6544ddSBjoern A. Zeeb 	case __predict_true(SLAB_COOKIE_SLAB_PTR):
952584061b4SJeff Roberson 		size = zone->uz_size;
95306a53ecfSMark Johnston #if defined(INVARIANTS) && !defined(KASAN)
954ab3185d1SJeff Roberson 		free_save_type(addr, mtp, size);
9558f70816cSJeff Roberson #endif
956584061b4SJeff Roberson 		uma_zfree_arg(zone, addr, slab);
9579e6544ddSBjoern A. Zeeb 		break;
9589e6544ddSBjoern A. Zeeb 	case SLAB_COOKIE_MALLOC_LARGE:
9596d6a03d7SJeff Roberson 		size = malloc_large_size(slab);
9606d6a03d7SJeff Roberson 		free_large(addr, size);
9619e6544ddSBjoern A. Zeeb 		break;
9629e6544ddSBjoern A. Zeeb 	case SLAB_COOKIE_CONTIG_MALLOC:
9639e6544ddSBjoern A. Zeeb 		size = contigmalloc_size(slab);
9649e6544ddSBjoern A. Zeeb 		kmem_free(addr, size);
9659e6544ddSBjoern A. Zeeb 		size = round_page(size);
9669e6544ddSBjoern A. Zeeb 		break;
9679e6544ddSBjoern A. Zeeb 	default:
9689e6544ddSBjoern A. Zeeb 		panic("%s: addr %p slab %p with unknown cookie %d", __func__,
9699e6544ddSBjoern A. Zeeb 		    addr, slab, GET_SLAB_COOKIE(slab));
9709e6544ddSBjoern A. Zeeb 		/* NOTREACHED */
97114bf02f8SJohn Dyson 	}
97263a7e0a3SRobert Watson 	malloc_type_freed(mtp, size);
973df8bae1dSRodney W. Grimes }
974df8bae1dSRodney W. Grimes 
97545035becSMatt Macy /*
97645035becSMatt Macy  *	zfree:
97745035becSMatt Macy  *
97845035becSMatt Macy  *	Zero then free a block of memory allocated by malloc.
97945035becSMatt Macy  *
98045035becSMatt Macy  *	This routine may not block.
98145035becSMatt Macy  */
98245035becSMatt Macy void
98345035becSMatt Macy zfree(void *addr, struct malloc_type *mtp)
98445035becSMatt Macy {
98545035becSMatt Macy 	uma_zone_t zone;
98645035becSMatt Macy 	uma_slab_t slab;
98745035becSMatt Macy 	u_long size;
98845035becSMatt Macy 
98945035becSMatt Macy #ifdef MALLOC_DEBUG
99045035becSMatt Macy 	if (free_dbg(&addr, mtp) != 0)
99145035becSMatt Macy 		return;
99245035becSMatt Macy #endif
99345035becSMatt Macy 	/* free(NULL, ...) does nothing */
99445035becSMatt Macy 	if (addr == NULL)
99545035becSMatt Macy 		return;
99645035becSMatt Macy 
99745035becSMatt Macy 	vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
99845035becSMatt Macy 	if (slab == NULL)
999*c0df224bSBjoern A. Zeeb 		panic("free: address %p(%p) has not been allocated",
100045035becSMatt Macy 		    addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
100145035becSMatt Macy 
10029e6544ddSBjoern A. Zeeb 	switch (GET_SLAB_COOKIE(slab)) {
10039e6544ddSBjoern A. Zeeb 	case __predict_true(SLAB_COOKIE_SLAB_PTR):
100445035becSMatt Macy 		size = zone->uz_size;
100506a53ecfSMark Johnston #if defined(INVARIANTS) && !defined(KASAN)
100645035becSMatt Macy 		free_save_type(addr, mtp, size);
100745035becSMatt Macy #endif
100806a53ecfSMark Johnston 		kasan_mark(addr, size, size, 0);
100945035becSMatt Macy 		explicit_bzero(addr, size);
101045035becSMatt Macy 		uma_zfree_arg(zone, addr, slab);
10119e6544ddSBjoern A. Zeeb 		break;
10129e6544ddSBjoern A. Zeeb 	case SLAB_COOKIE_MALLOC_LARGE:
101345035becSMatt Macy 		size = malloc_large_size(slab);
101406a53ecfSMark Johnston 		kasan_mark(addr, size, size, 0);
101545035becSMatt Macy 		explicit_bzero(addr, size);
101645035becSMatt Macy 		free_large(addr, size);
10179e6544ddSBjoern A. Zeeb 		break;
10189e6544ddSBjoern A. Zeeb 	case SLAB_COOKIE_CONTIG_MALLOC:
10199e6544ddSBjoern A. Zeeb 		size = round_page(contigmalloc_size(slab));
10209e6544ddSBjoern A. Zeeb 		explicit_bzero(addr, size);
10219e6544ddSBjoern A. Zeeb 		kmem_free(addr, size);
10229e6544ddSBjoern A. Zeeb 		break;
10239e6544ddSBjoern A. Zeeb 	default:
10249e6544ddSBjoern A. Zeeb 		panic("%s: addr %p slab %p with unknown cookie %d", __func__,
10259e6544ddSBjoern A. Zeeb 		    addr, slab, GET_SLAB_COOKIE(slab));
10269e6544ddSBjoern A. Zeeb 		/* NOTREACHED */
102745035becSMatt Macy 	}
102845035becSMatt Macy 	malloc_type_freed(mtp, size);
102945035becSMatt Macy }
103045035becSMatt Macy 
1031df8bae1dSRodney W. Grimes /*
103244a8ff31SArchie Cobbs  *	realloc: change the size of a memory block
103344a8ff31SArchie Cobbs  */
103444a8ff31SArchie Cobbs void *
1035bd555da9SConrad Meyer realloc(void *addr, size_t size, struct malloc_type *mtp, int flags)
103644a8ff31SArchie Cobbs {
1037dbd51c41SJohn Baldwin #ifndef DEBUG_REDZONE
1038584061b4SJeff Roberson 	uma_zone_t zone;
10398355f576SJeff Roberson 	uma_slab_t slab;
1040dbd51c41SJohn Baldwin #endif
104144a8ff31SArchie Cobbs 	unsigned long alloc;
104244a8ff31SArchie Cobbs 	void *newaddr;
104344a8ff31SArchie Cobbs 
1044bdcc2226SMateusz Guzik 	KASSERT(mtp->ks_version == M_VERSION,
1045bdcc2226SMateusz Guzik 	    ("realloc: bad malloc type version"));
1046d9e2e68dSMark Johnston 	KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
10471067a2baSJonathan T. Looney 	    ("realloc: called with spinlock or critical section held"));
10481067a2baSJonathan T. Looney 
104944a8ff31SArchie Cobbs 	/* realloc(NULL, ...) is equivalent to malloc(...) */
105044a8ff31SArchie Cobbs 	if (addr == NULL)
105163a7e0a3SRobert Watson 		return (malloc(size, mtp, flags));
105263a7e0a3SRobert Watson 
105363a7e0a3SRobert Watson 	/*
105463a7e0a3SRobert Watson 	 * XXX: Should report free of old memory and alloc of new memory to
105563a7e0a3SRobert Watson 	 * per-CPU stats.
105663a7e0a3SRobert Watson 	 */
105744a8ff31SArchie Cobbs 
1058e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD
10596d3ed393SMatthew D Fleming 	if (is_memguard_addr(addr))
10606d3ed393SMatthew D Fleming 		return (memguard_realloc(addr, size, mtp, flags));
1061e4eb384bSBosko Milekic #endif
1062e4eb384bSBosko Milekic 
1063847a2a17SPawel Jakub Dawidek #ifdef DEBUG_REDZONE
1064847a2a17SPawel Jakub Dawidek 	alloc = redzone_get_size(addr);
1065847a2a17SPawel Jakub Dawidek #else
1066584061b4SJeff Roberson 	vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
10678355f576SJeff Roberson 
106844a8ff31SArchie Cobbs 	/* Sanity check */
10698355f576SJeff Roberson 	KASSERT(slab != NULL,
107044a8ff31SArchie Cobbs 	    ("realloc: address %p out of range", (void *)addr));
107144a8ff31SArchie Cobbs 
107244a8ff31SArchie Cobbs 	/* Get the size of the original block */
10739e6544ddSBjoern A. Zeeb 	switch (GET_SLAB_COOKIE(slab)) {
10749e6544ddSBjoern A. Zeeb 	case __predict_true(SLAB_COOKIE_SLAB_PTR):
1075584061b4SJeff Roberson 		alloc = zone->uz_size;
10769e6544ddSBjoern A. Zeeb 		break;
10779e6544ddSBjoern A. Zeeb 	case SLAB_COOKIE_MALLOC_LARGE:
10786d6a03d7SJeff Roberson 		alloc = malloc_large_size(slab);
10799e6544ddSBjoern A. Zeeb 		break;
10809e6544ddSBjoern A. Zeeb 	default:
10819e6544ddSBjoern A. Zeeb #ifdef INVARIANTS
10829e6544ddSBjoern A. Zeeb 		panic("%s: called for addr %p of unsupported allocation type; "
10839e6544ddSBjoern A. Zeeb 		    "slab %p cookie %d", __func__, addr, slab, GET_SLAB_COOKIE(slab));
10849e6544ddSBjoern A. Zeeb #endif
10859e6544ddSBjoern A. Zeeb 		return (NULL);
10869e6544ddSBjoern A. Zeeb 	}
108744a8ff31SArchie Cobbs 
108844a8ff31SArchie Cobbs 	/* Reuse the original block if appropriate */
108906a53ecfSMark Johnston 	if (size <= alloc &&
109006a53ecfSMark Johnston 	    (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE)) {
109106a53ecfSMark Johnston 		kasan_mark((void *)addr, size, alloc, KASAN_MALLOC_REDZONE);
109244a8ff31SArchie Cobbs 		return (addr);
109306a53ecfSMark Johnston 	}
1094847a2a17SPawel Jakub Dawidek #endif /* !DEBUG_REDZONE */
109544a8ff31SArchie Cobbs 
109644a8ff31SArchie Cobbs 	/* Allocate a new, bigger (or smaller) block */
109763a7e0a3SRobert Watson 	if ((newaddr = malloc(size, mtp, flags)) == NULL)
109844a8ff31SArchie Cobbs 		return (NULL);
109944a8ff31SArchie Cobbs 
110006a53ecfSMark Johnston 	/*
110106a53ecfSMark Johnston 	 * Copy over original contents.  For KASAN, the redzone must be marked
110206a53ecfSMark Johnston 	 * valid before performing the copy.
110306a53ecfSMark Johnston 	 */
11049a7c2de3SMark Johnston 	kasan_mark(addr, alloc, alloc, 0);
110544a8ff31SArchie Cobbs 	bcopy(addr, newaddr, min(size, alloc));
110663a7e0a3SRobert Watson 	free(addr, mtp);
110744a8ff31SArchie Cobbs 	return (newaddr);
110844a8ff31SArchie Cobbs }
110944a8ff31SArchie Cobbs 
111044a8ff31SArchie Cobbs /*
111144a8ff31SArchie Cobbs  *	reallocf: same as realloc() but free memory on failure.
111244a8ff31SArchie Cobbs  */
111344a8ff31SArchie Cobbs void *
1114bd555da9SConrad Meyer reallocf(void *addr, size_t size, struct malloc_type *mtp, int flags)
111544a8ff31SArchie Cobbs {
111644a8ff31SArchie Cobbs 	void *mem;
111744a8ff31SArchie Cobbs 
111863a7e0a3SRobert Watson 	if ((mem = realloc(addr, size, mtp, flags)) == NULL)
111963a7e0a3SRobert Watson 		free(addr, mtp);
112044a8ff31SArchie Cobbs 	return (mem);
112144a8ff31SArchie Cobbs }
112244a8ff31SArchie Cobbs 
11235d4bf057SVladimir Kondratyev /*
112416b971edSMateusz Guzik  * 	malloc_size: returns the number of bytes allocated for a request of the
112516b971edSMateusz Guzik  * 		     specified size
112616b971edSMateusz Guzik  */
112716b971edSMateusz Guzik size_t
112816b971edSMateusz Guzik malloc_size(size_t size)
112916b971edSMateusz Guzik {
113016b971edSMateusz Guzik 	int indx;
113116b971edSMateusz Guzik 
113216b971edSMateusz Guzik 	if (size > kmem_zmax)
113316b971edSMateusz Guzik 		return (0);
113416b971edSMateusz Guzik 	if (size & KMEM_ZMASK)
113516b971edSMateusz Guzik 		size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
113616b971edSMateusz Guzik 	indx = kmemsize[size >> KMEM_ZSHIFT];
113716b971edSMateusz Guzik 	return (kmemzones[indx].kz_size);
113816b971edSMateusz Guzik }
113916b971edSMateusz Guzik 
114016b971edSMateusz Guzik /*
11415d4bf057SVladimir Kondratyev  *	malloc_usable_size: returns the usable size of the allocation.
11425d4bf057SVladimir Kondratyev  */
11435d4bf057SVladimir Kondratyev size_t
11445d4bf057SVladimir Kondratyev malloc_usable_size(const void *addr)
11455d4bf057SVladimir Kondratyev {
11465d4bf057SVladimir Kondratyev #ifndef DEBUG_REDZONE
11475d4bf057SVladimir Kondratyev 	uma_zone_t zone;
11485d4bf057SVladimir Kondratyev 	uma_slab_t slab;
11495d4bf057SVladimir Kondratyev #endif
11505d4bf057SVladimir Kondratyev 	u_long size;
11515d4bf057SVladimir Kondratyev 
11525d4bf057SVladimir Kondratyev 	if (addr == NULL)
11535d4bf057SVladimir Kondratyev 		return (0);
11545d4bf057SVladimir Kondratyev 
11555d4bf057SVladimir Kondratyev #ifdef DEBUG_MEMGUARD
11565d4bf057SVladimir Kondratyev 	if (is_memguard_addr(__DECONST(void *, addr)))
11575d4bf057SVladimir Kondratyev 		return (memguard_get_req_size(addr));
11585d4bf057SVladimir Kondratyev #endif
11595d4bf057SVladimir Kondratyev 
11605d4bf057SVladimir Kondratyev #ifdef DEBUG_REDZONE
11615d4bf057SVladimir Kondratyev 	size = redzone_get_size(__DECONST(void *, addr));
11625d4bf057SVladimir Kondratyev #else
11635d4bf057SVladimir Kondratyev 	vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
11645d4bf057SVladimir Kondratyev 	if (slab == NULL)
1165*c0df224bSBjoern A. Zeeb 		panic("malloc_usable_size: address %p(%p) is not allocated",
11665d4bf057SVladimir Kondratyev 		    addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
11675d4bf057SVladimir Kondratyev 
11689e6544ddSBjoern A. Zeeb 	switch (GET_SLAB_COOKIE(slab)) {
11699e6544ddSBjoern A. Zeeb 	case __predict_true(SLAB_COOKIE_SLAB_PTR):
11705d4bf057SVladimir Kondratyev 		size = zone->uz_size;
11719e6544ddSBjoern A. Zeeb 		break;
11729e6544ddSBjoern A. Zeeb 	case SLAB_COOKIE_MALLOC_LARGE:
11735d4bf057SVladimir Kondratyev 		size = malloc_large_size(slab);
11749e6544ddSBjoern A. Zeeb 		break;
11759e6544ddSBjoern A. Zeeb 	default:
11769e6544ddSBjoern A. Zeeb 		__assert_unreachable();
11779e6544ddSBjoern A. Zeeb 		size = 0;
11789e6544ddSBjoern A. Zeeb 		break;
11799e6544ddSBjoern A. Zeeb 	}
11805d4bf057SVladimir Kondratyev #endif
1181880b670cSMark Johnston 
1182880b670cSMark Johnston 	/*
1183880b670cSMark Johnston 	 * Unmark the redzone to avoid reports from consumers who are
1184880b670cSMark Johnston 	 * (presumably) about to use the full allocation size.
1185880b670cSMark Johnston 	 */
1186880b670cSMark Johnston 	kasan_mark(addr, size, size, 0);
1187880b670cSMark Johnston 
11885d4bf057SVladimir Kondratyev 	return (size);
11895d4bf057SVladimir Kondratyev }
11905d4bf057SVladimir Kondratyev 
1191c70af487SAlan Cox CTASSERT(VM_KMEM_SIZE_SCALE >= 1);
1192c70af487SAlan Cox 
11935df87b21SJeff Roberson /*
1194c70af487SAlan Cox  * Initialize the kernel memory (kmem) arena.
11955df87b21SJeff Roberson  */
11965df87b21SJeff Roberson void
11975df87b21SJeff Roberson kmeminit(void)
11985df87b21SJeff Roberson {
1199af3b2549SHans Petter Selasky 	u_long mem_size;
1200af3b2549SHans Petter Selasky 	u_long tmp;
120169ef67f9SJason Evans 
1202af3b2549SHans Petter Selasky #ifdef VM_KMEM_SIZE
1203af3b2549SHans Petter Selasky 	if (vm_kmem_size == 0)
1204af3b2549SHans Petter Selasky 		vm_kmem_size = VM_KMEM_SIZE;
1205af3b2549SHans Petter Selasky #endif
1206af3b2549SHans Petter Selasky #ifdef VM_KMEM_SIZE_MIN
1207af3b2549SHans Petter Selasky 	if (vm_kmem_size_min == 0)
1208af3b2549SHans Petter Selasky 		vm_kmem_size_min = VM_KMEM_SIZE_MIN;
1209af3b2549SHans Petter Selasky #endif
1210af3b2549SHans Petter Selasky #ifdef VM_KMEM_SIZE_MAX
1211af3b2549SHans Petter Selasky 	if (vm_kmem_size_max == 0)
1212af3b2549SHans Petter Selasky 		vm_kmem_size_max = VM_KMEM_SIZE_MAX;
1213af3b2549SHans Petter Selasky #endif
12148a58a9f6SJohn Dyson 	/*
1215c70af487SAlan Cox 	 * Calculate the amount of kernel virtual address (KVA) space that is
1216c70af487SAlan Cox 	 * preallocated to the kmem arena.  In order to support a wide range
1217c70af487SAlan Cox 	 * of machines, it is a function of the physical memory size,
1218c70af487SAlan Cox 	 * specifically,
12198a58a9f6SJohn Dyson 	 *
1220c70af487SAlan Cox 	 *	min(max(physical memory size / VM_KMEM_SIZE_SCALE,
1221c70af487SAlan Cox 	 *	    VM_KMEM_SIZE_MIN), VM_KMEM_SIZE_MAX)
1222c70af487SAlan Cox 	 *
1223c70af487SAlan Cox 	 * Every architecture must define an integral value for
1224c70af487SAlan Cox 	 * VM_KMEM_SIZE_SCALE.  However, the definitions of VM_KMEM_SIZE_MIN
1225c70af487SAlan Cox 	 * and VM_KMEM_SIZE_MAX, which represent respectively the floor and
1226c70af487SAlan Cox 	 * ceiling on this preallocation, are optional.  Typically,
1227c70af487SAlan Cox 	 * VM_KMEM_SIZE_MAX is itself a function of the available KVA space on
1228c70af487SAlan Cox 	 * a given architecture.
12298a58a9f6SJohn Dyson 	 */
123044f1c916SBryan Drewery 	mem_size = vm_cnt.v_page_count;
12317c51714eSSean Bruno 	if (mem_size <= 32768) /* delphij XXX 128MB */
12327c51714eSSean Bruno 		kmem_zmax = PAGE_SIZE;
12338a58a9f6SJohn Dyson 
1234c70af487SAlan Cox 	if (vm_kmem_size_scale < 1)
1235c70af487SAlan Cox 		vm_kmem_size_scale = VM_KMEM_SIZE_SCALE;
1236c70af487SAlan Cox 
1237af3b2549SHans Petter Selasky 	/*
1238af3b2549SHans Petter Selasky 	 * Check if we should use defaults for the "vm_kmem_size"
1239af3b2549SHans Petter Selasky 	 * variable:
1240af3b2549SHans Petter Selasky 	 */
1241af3b2549SHans Petter Selasky 	if (vm_kmem_size == 0) {
124228b740daSKonstantin Belousov 		vm_kmem_size = mem_size / vm_kmem_size_scale;
124328b740daSKonstantin Belousov 		vm_kmem_size = vm_kmem_size * PAGE_SIZE < vm_kmem_size ?
124428b740daSKonstantin Belousov 		    vm_kmem_size_max : vm_kmem_size * PAGE_SIZE;
1245c70af487SAlan Cox 		if (vm_kmem_size_min > 0 && vm_kmem_size < vm_kmem_size_min)
12460e5179e4SStephane E. Potvin 			vm_kmem_size = vm_kmem_size_min;
1247479439b4SDag-Erling Smørgrav 		if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max)
1248479439b4SDag-Erling Smørgrav 			vm_kmem_size = vm_kmem_size_max;
1249af3b2549SHans Petter Selasky 	}
125028b740daSKonstantin Belousov 	if (vm_kmem_size == 0)
125128b740daSKonstantin Belousov 		panic("Tune VM_KMEM_SIZE_* for the platform");
12528a58a9f6SJohn Dyson 
125327b8623fSDavid Greenman 	/*
1254af3b2549SHans Petter Selasky 	 * The amount of KVA space that is preallocated to the
1255c70af487SAlan Cox 	 * kmem arena can be set statically at compile-time or manually
1256c70af487SAlan Cox 	 * through the kernel environment.  However, it is still limited to
1257c70af487SAlan Cox 	 * twice the physical memory size, which has been sufficient to handle
1258c70af487SAlan Cox 	 * the most severe cases of external fragmentation in the kmem arena.
125927b8623fSDavid Greenman 	 */
1260c749c003SAlan Cox 	if (vm_kmem_size / 2 / PAGE_SIZE > mem_size)
1261c749c003SAlan Cox 		vm_kmem_size = 2 * mem_size * PAGE_SIZE;
12628a58a9f6SJohn Dyson 
1263e137643eSOlivier Houchard 	vm_kmem_size = round_page(vm_kmem_size);
12646faf45b3SMark Johnston 
12656faf45b3SMark Johnston 	/*
126689786088SMark Johnston 	 * With KASAN or KMSAN enabled, dynamically allocated kernel memory is
126789786088SMark Johnston 	 * shadowed.  Account for this when setting the UMA limit.
12686faf45b3SMark Johnston 	 */
126989786088SMark Johnston #if defined(KASAN)
12706faf45b3SMark Johnston 	vm_kmem_size = (vm_kmem_size * KASAN_SHADOW_SCALE) /
12716faf45b3SMark Johnston 	    (KASAN_SHADOW_SCALE + 1);
127289786088SMark Johnston #elif defined(KMSAN)
127389786088SMark Johnston 	vm_kmem_size /= 3;
12746faf45b3SMark Johnston #endif
12756faf45b3SMark Johnston 
1276e3813573SMatthew D Fleming #ifdef DEBUG_MEMGUARD
1277f806cdcfSMatthew D Fleming 	tmp = memguard_fudge(vm_kmem_size, kernel_map);
1278e3813573SMatthew D Fleming #else
1279e3813573SMatthew D Fleming 	tmp = vm_kmem_size;
1280e3813573SMatthew D Fleming #endif
12812e47807cSJeff Roberson 	uma_set_limit(tmp);
12828355f576SJeff Roberson 
1283e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD
1284e4eb384bSBosko Milekic 	/*
1285e4eb384bSBosko Milekic 	 * Initialize MemGuard if support compiled in.  MemGuard is a
1286e4eb384bSBosko Milekic 	 * replacement allocator used for detecting tamper-after-free
1287e4eb384bSBosko Milekic 	 * scenarios as they occur.  It is only used for debugging.
1288e4eb384bSBosko Milekic 	 */
12892e47807cSJeff Roberson 	memguard_init(kernel_arena);
1290e4eb384bSBosko Milekic #endif
12915df87b21SJeff Roberson }
12925df87b21SJeff Roberson 
12935df87b21SJeff Roberson /*
12945df87b21SJeff Roberson  * Initialize the kernel memory allocator
12955df87b21SJeff Roberson  */
12965df87b21SJeff Roberson /* ARGSUSED*/
12975df87b21SJeff Roberson static void
12985df87b21SJeff Roberson mallocinit(void *dummy)
12995df87b21SJeff Roberson {
13005df87b21SJeff Roberson 	int i;
13015df87b21SJeff Roberson 	uint8_t indx;
13025df87b21SJeff Roberson 
13035df87b21SJeff Roberson 	mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
13045df87b21SJeff Roberson 
13055df87b21SJeff Roberson 	kmeminit();
1306e4eb384bSBosko Milekic 
13077001d850SXin LI 	if (kmem_zmax < PAGE_SIZE || kmem_zmax > KMEM_ZMAX)
13087001d850SXin LI 		kmem_zmax = KMEM_ZMAX;
13097001d850SXin LI 
13106f267175SJeff Roberson 	for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
13116f267175SJeff Roberson 		int size = kmemzones[indx].kz_size;
1312eaa17d42SRyan Libby 		const char *name = kmemzones[indx].kz_name;
13133b15beb3SKonstantin Belousov 		size_t align;
1314d7854da1SMatthew D Fleming 		int subzone;
13158355f576SJeff Roberson 
13163b15beb3SKonstantin Belousov 		align = UMA_ALIGN_PTR;
13173b15beb3SKonstantin Belousov 		if (powerof2(size) && size > sizeof(void *))
13180781c79dSKonstantin Belousov 			align = MIN(size, PAGE_SIZE) - 1;
1319d7854da1SMatthew D Fleming 		for (subzone = 0; subzone < numzones; subzone++) {
1320d7854da1SMatthew D Fleming 			kmemzones[indx].kz_zone[subzone] =
1321d7854da1SMatthew D Fleming 			    uma_zcreate(name, size,
132210094910SMark Johnston #if defined(INVARIANTS) && !defined(KASAN) && !defined(KMSAN)
13238f70816cSJeff Roberson 			    mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
13248efc4effSJeff Roberson #else
13258efc4effSJeff Roberson 			    NULL, NULL, NULL, NULL,
13268efc4effSJeff Roberson #endif
13273b15beb3SKonstantin Belousov 			    align, UMA_ZONE_MALLOC);
1328d7854da1SMatthew D Fleming 		}
13298355f576SJeff Roberson 		for (;i <= size; i+= KMEM_ZBASE)
13306f267175SJeff Roberson 			kmemsize[i >> KMEM_ZSHIFT] = indx;
1331df8bae1dSRodney W. Grimes 	}
1332254c6cb3SPoul-Henning Kamp }
1333af3b2549SHans Petter Selasky SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_SECOND, mallocinit, NULL);
1334254c6cb3SPoul-Henning Kamp 
1335db669378SPeter Wemm void
133687efd4d5SRobert Watson malloc_init(void *data)
1337254c6cb3SPoul-Henning Kamp {
133863a7e0a3SRobert Watson 	struct malloc_type_internal *mtip;
133963a7e0a3SRobert Watson 	struct malloc_type *mtp;
134063a7e0a3SRobert Watson 
1341deab5717SMitchell Horne 	KASSERT(vm_cnt.v_page_count != 0,
1342deab5717SMitchell Horne 	    ("malloc_init() called before vm_mem_init()"));
134363a7e0a3SRobert Watson 
134463a7e0a3SRobert Watson 	mtp = data;
1345bdcc2226SMateusz Guzik 	if (mtp->ks_version != M_VERSION)
1346e25d8b67SMateusz Guzik 		panic("malloc_init: type %s with unsupported version %lu",
1347e25d8b67SMateusz Guzik 		    mtp->ks_shortdesc, mtp->ks_version);
1348bb1c7df8SRobert Watson 
1349bdcc2226SMateusz Guzik 	mtip = &mtp->ks_mti;
13508e6526e9SMateusz Guzik 	mtip->mti_stats = uma_zalloc_pcpu(pcpu_zone_64, M_WAITOK | M_ZERO);
1351c9e05ccdSMateusz Guzik 	mtp_set_subzone(mtp);
1352254c6cb3SPoul-Henning Kamp 
13536f267175SJeff Roberson 	mtx_lock(&malloc_mtx);
135463a7e0a3SRobert Watson 	mtp->ks_next = kmemstatistics;
135563a7e0a3SRobert Watson 	kmemstatistics = mtp;
1356cd814b26SRobert Watson 	kmemcount++;
13576f267175SJeff Roberson 	mtx_unlock(&malloc_mtx);
1358df8bae1dSRodney W. Grimes }
1359db669378SPeter Wemm 
1360db669378SPeter Wemm void
136187efd4d5SRobert Watson malloc_uninit(void *data)
1362db669378SPeter Wemm {
136363a7e0a3SRobert Watson 	struct malloc_type_internal *mtip;
13642a143d5bSPawel Jakub Dawidek 	struct malloc_type_stats *mtsp;
136563a7e0a3SRobert Watson 	struct malloc_type *mtp, *temp;
13662a143d5bSPawel Jakub Dawidek 	long temp_allocs, temp_bytes;
13672a143d5bSPawel Jakub Dawidek 	int i;
1368db669378SPeter Wemm 
136963a7e0a3SRobert Watson 	mtp = data;
1370bdcc2226SMateusz Guzik 	KASSERT(mtp->ks_version == M_VERSION,
1371bdcc2226SMateusz Guzik 	    ("malloc_uninit: bad malloc type version"));
1372bb1c7df8SRobert Watson 
13736f267175SJeff Roberson 	mtx_lock(&malloc_mtx);
1374bdcc2226SMateusz Guzik 	mtip = &mtp->ks_mti;
137563a7e0a3SRobert Watson 	if (mtp != kmemstatistics) {
137663a7e0a3SRobert Watson 		for (temp = kmemstatistics; temp != NULL;
137763a7e0a3SRobert Watson 		    temp = temp->ks_next) {
1378f121baaaSBrian Somers 			if (temp->ks_next == mtp) {
137963a7e0a3SRobert Watson 				temp->ks_next = mtp->ks_next;
1380f121baaaSBrian Somers 				break;
1381db669378SPeter Wemm 			}
1382f121baaaSBrian Somers 		}
1383f121baaaSBrian Somers 		KASSERT(temp,
1384f121baaaSBrian Somers 		    ("malloc_uninit: type '%s' not found", mtp->ks_shortdesc));
138563a7e0a3SRobert Watson 	} else
138663a7e0a3SRobert Watson 		kmemstatistics = mtp->ks_next;
1387cd814b26SRobert Watson 	kmemcount--;
13886f267175SJeff Roberson 	mtx_unlock(&malloc_mtx);
13892a143d5bSPawel Jakub Dawidek 
13902a143d5bSPawel Jakub Dawidek 	/*
13912a143d5bSPawel Jakub Dawidek 	 * Look for memory leaks.
13922a143d5bSPawel Jakub Dawidek 	 */
13932a143d5bSPawel Jakub Dawidek 	temp_allocs = temp_bytes = 0;
13949afff6b1SMateusz Guzik 	for (i = 0; i <= mp_maxid; i++) {
13959afff6b1SMateusz Guzik 		mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
13962a143d5bSPawel Jakub Dawidek 		temp_allocs += mtsp->mts_numallocs;
13972a143d5bSPawel Jakub Dawidek 		temp_allocs -= mtsp->mts_numfrees;
13982a143d5bSPawel Jakub Dawidek 		temp_bytes += mtsp->mts_memalloced;
13992a143d5bSPawel Jakub Dawidek 		temp_bytes -= mtsp->mts_memfreed;
14002a143d5bSPawel Jakub Dawidek 	}
14012a143d5bSPawel Jakub Dawidek 	if (temp_allocs > 0 || temp_bytes > 0) {
14022a143d5bSPawel Jakub Dawidek 		printf("Warning: memory type %s leaked memory on destroy "
14032a143d5bSPawel Jakub Dawidek 		    "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc,
14042a143d5bSPawel Jakub Dawidek 		    temp_allocs, temp_bytes);
14052a143d5bSPawel Jakub Dawidek 	}
14062a143d5bSPawel Jakub Dawidek 
14078e6526e9SMateusz Guzik 	uma_zfree_pcpu(pcpu_zone_64, mtip->mti_stats);
1408db669378SPeter Wemm }
14096f267175SJeff Roberson 
1410d362c40dSPawel Jakub Dawidek struct malloc_type *
1411d362c40dSPawel Jakub Dawidek malloc_desc2type(const char *desc)
1412d362c40dSPawel Jakub Dawidek {
1413d362c40dSPawel Jakub Dawidek 	struct malloc_type *mtp;
1414d362c40dSPawel Jakub Dawidek 
1415d362c40dSPawel Jakub Dawidek 	mtx_assert(&malloc_mtx, MA_OWNED);
1416d362c40dSPawel Jakub Dawidek 	for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1417d362c40dSPawel Jakub Dawidek 		if (strcmp(mtp->ks_shortdesc, desc) == 0)
1418d362c40dSPawel Jakub Dawidek 			return (mtp);
1419d362c40dSPawel Jakub Dawidek 	}
1420d362c40dSPawel Jakub Dawidek 	return (NULL);
1421d362c40dSPawel Jakub Dawidek }
1422d362c40dSPawel Jakub Dawidek 
14236f267175SJeff Roberson static int
1424cd814b26SRobert Watson sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS)
1425cd814b26SRobert Watson {
1426cd814b26SRobert Watson 	struct malloc_type_stream_header mtsh;
1427cd814b26SRobert Watson 	struct malloc_type_internal *mtip;
14289afff6b1SMateusz Guzik 	struct malloc_type_stats *mtsp, zeromts;
1429cd814b26SRobert Watson 	struct malloc_type_header mth;
1430cd814b26SRobert Watson 	struct malloc_type *mtp;
14314e657159SMatthew D Fleming 	int error, i;
1432cd814b26SRobert Watson 	struct sbuf sbuf;
1433cd814b26SRobert Watson 
143400f0e671SMatthew D Fleming 	error = sysctl_wire_old_buffer(req, 0);
143500f0e671SMatthew D Fleming 	if (error != 0)
143600f0e671SMatthew D Fleming 		return (error);
14374e657159SMatthew D Fleming 	sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
14381eafc078SIan Lepore 	sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
1439cd814b26SRobert Watson 	mtx_lock(&malloc_mtx);
1440cd814b26SRobert Watson 
14419afff6b1SMateusz Guzik 	bzero(&zeromts, sizeof(zeromts));
14429afff6b1SMateusz Guzik 
1443cd814b26SRobert Watson 	/*
1444cd814b26SRobert Watson 	 * Insert stream header.
1445cd814b26SRobert Watson 	 */
1446cd814b26SRobert Watson 	bzero(&mtsh, sizeof(mtsh));
1447cd814b26SRobert Watson 	mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION;
1448cd814b26SRobert Watson 	mtsh.mtsh_maxcpus = MAXCPU;
1449cd814b26SRobert Watson 	mtsh.mtsh_count = kmemcount;
14504e657159SMatthew D Fleming 	(void)sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh));
1451cd814b26SRobert Watson 
1452cd814b26SRobert Watson 	/*
1453cd814b26SRobert Watson 	 * Insert alternating sequence of type headers and type statistics.
1454cd814b26SRobert Watson 	 */
1455cd814b26SRobert Watson 	for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1456bdcc2226SMateusz Guzik 		mtip = &mtp->ks_mti;
1457cd814b26SRobert Watson 
1458cd814b26SRobert Watson 		/*
1459cd814b26SRobert Watson 		 * Insert type header.
1460cd814b26SRobert Watson 		 */
1461cd814b26SRobert Watson 		bzero(&mth, sizeof(mth));
1462cd814b26SRobert Watson 		strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME);
14634e657159SMatthew D Fleming 		(void)sbuf_bcat(&sbuf, &mth, sizeof(mth));
1464cd814b26SRobert Watson 
1465cd814b26SRobert Watson 		/*
1466cd814b26SRobert Watson 		 * Insert type statistics for each CPU.
1467cd814b26SRobert Watson 		 */
14689afff6b1SMateusz Guzik 		for (i = 0; i <= mp_maxid; i++) {
14699afff6b1SMateusz Guzik 			mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
14709afff6b1SMateusz Guzik 			(void)sbuf_bcat(&sbuf, mtsp, sizeof(*mtsp));
1471cd814b26SRobert Watson 		}
14729afff6b1SMateusz Guzik 		/*
14739afff6b1SMateusz Guzik 		 * Fill in the missing CPUs.
14749afff6b1SMateusz Guzik 		 */
14759afff6b1SMateusz Guzik 		for (; i < MAXCPU; i++) {
14769afff6b1SMateusz Guzik 			(void)sbuf_bcat(&sbuf, &zeromts, sizeof(zeromts));
14779afff6b1SMateusz Guzik 		}
1478cd814b26SRobert Watson 	}
1479cd814b26SRobert Watson 	mtx_unlock(&malloc_mtx);
14804e657159SMatthew D Fleming 	error = sbuf_finish(&sbuf);
1481cd814b26SRobert Watson 	sbuf_delete(&sbuf);
1482cd814b26SRobert Watson 	return (error);
1483cd814b26SRobert Watson }
1484cd814b26SRobert Watson 
14857029da5cSPawel Biernacki SYSCTL_PROC(_kern, OID_AUTO, malloc_stats,
14867029da5cSPawel Biernacki     CTLFLAG_RD | CTLTYPE_STRUCT | CTLFLAG_MPSAFE, 0, 0,
14877029da5cSPawel Biernacki     sysctl_kern_malloc_stats, "s,malloc_type_ustats",
1488cd814b26SRobert Watson     "Return malloc types");
1489cd814b26SRobert Watson 
1490cd814b26SRobert Watson SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0,
1491cd814b26SRobert Watson     "Count of kernel malloc types");
1492cd814b26SRobert Watson 
149391dd776cSJohn Birrell void
149491dd776cSJohn Birrell malloc_type_list(malloc_type_list_func_t *func, void *arg)
149591dd776cSJohn Birrell {
149691dd776cSJohn Birrell 	struct malloc_type *mtp, **bufmtp;
149791dd776cSJohn Birrell 	int count, i;
149891dd776cSJohn Birrell 	size_t buflen;
149991dd776cSJohn Birrell 
150091dd776cSJohn Birrell 	mtx_lock(&malloc_mtx);
150191dd776cSJohn Birrell restart:
150291dd776cSJohn Birrell 	mtx_assert(&malloc_mtx, MA_OWNED);
150391dd776cSJohn Birrell 	count = kmemcount;
150491dd776cSJohn Birrell 	mtx_unlock(&malloc_mtx);
150591dd776cSJohn Birrell 
150691dd776cSJohn Birrell 	buflen = sizeof(struct malloc_type *) * count;
150791dd776cSJohn Birrell 	bufmtp = malloc(buflen, M_TEMP, M_WAITOK);
150891dd776cSJohn Birrell 
150991dd776cSJohn Birrell 	mtx_lock(&malloc_mtx);
151091dd776cSJohn Birrell 
151191dd776cSJohn Birrell 	if (count < kmemcount) {
151291dd776cSJohn Birrell 		free(bufmtp, M_TEMP);
151391dd776cSJohn Birrell 		goto restart;
151491dd776cSJohn Birrell 	}
151591dd776cSJohn Birrell 
151691dd776cSJohn Birrell 	for (mtp = kmemstatistics, i = 0; mtp != NULL; mtp = mtp->ks_next, i++)
151791dd776cSJohn Birrell 		bufmtp[i] = mtp;
151891dd776cSJohn Birrell 
151991dd776cSJohn Birrell 	mtx_unlock(&malloc_mtx);
152091dd776cSJohn Birrell 
152191dd776cSJohn Birrell 	for (i = 0; i < count; i++)
152291dd776cSJohn Birrell 		(func)(bufmtp[i], arg);
152391dd776cSJohn Birrell 
152491dd776cSJohn Birrell 	free(bufmtp, M_TEMP);
152591dd776cSJohn Birrell }
152691dd776cSJohn Birrell 
1527909ed16cSRobert Watson #ifdef DDB
152846d70077SConrad Meyer static int64_t
152946d70077SConrad Meyer get_malloc_stats(const struct malloc_type_internal *mtip, uint64_t *allocs,
153046d70077SConrad Meyer     uint64_t *inuse)
1531909ed16cSRobert Watson {
153246d70077SConrad Meyer 	const struct malloc_type_stats *mtsp;
153346d70077SConrad Meyer 	uint64_t frees, alloced, freed;
1534909ed16cSRobert Watson 	int i;
1535909ed16cSRobert Watson 
153646d70077SConrad Meyer 	*allocs = 0;
1537909ed16cSRobert Watson 	frees = 0;
153824076d13SRobert Watson 	alloced = 0;
153924076d13SRobert Watson 	freed = 0;
15409afff6b1SMateusz Guzik 	for (i = 0; i <= mp_maxid; i++) {
15419afff6b1SMateusz Guzik 		mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
154246d70077SConrad Meyer 
154346d70077SConrad Meyer 		*allocs += mtsp->mts_numallocs;
154426e9d9b0SMark Johnston 		frees += mtsp->mts_numfrees;
154526e9d9b0SMark Johnston 		alloced += mtsp->mts_memalloced;
154626e9d9b0SMark Johnston 		freed += mtsp->mts_memfreed;
1547909ed16cSRobert Watson 	}
154846d70077SConrad Meyer 	*inuse = *allocs - frees;
154946d70077SConrad Meyer 	return (alloced - freed);
155046d70077SConrad Meyer }
155146d70077SConrad Meyer 
1552c84c5e00SMitchell Horne DB_SHOW_COMMAND_FLAGS(malloc, db_show_malloc, DB_CMD_MEMSAFE)
155346d70077SConrad Meyer {
155446d70077SConrad Meyer 	const char *fmt_hdr, *fmt_entry;
155546d70077SConrad Meyer 	struct malloc_type *mtp;
155646d70077SConrad Meyer 	uint64_t allocs, inuse;
155746d70077SConrad Meyer 	int64_t size;
155846d70077SConrad Meyer 	/* variables for sorting */
155946d70077SConrad Meyer 	struct malloc_type *last_mtype, *cur_mtype;
156046d70077SConrad Meyer 	int64_t cur_size, last_size;
156146d70077SConrad Meyer 	int ties;
156246d70077SConrad Meyer 
156346d70077SConrad Meyer 	if (modif[0] == 'i') {
156446d70077SConrad Meyer 		fmt_hdr = "%s,%s,%s,%s\n";
156546d70077SConrad Meyer 		fmt_entry = "\"%s\",%ju,%jdK,%ju\n";
156646d70077SConrad Meyer 	} else {
156746d70077SConrad Meyer 		fmt_hdr = "%18s %12s  %12s %12s\n";
156846d70077SConrad Meyer 		fmt_entry = "%18s %12ju %12jdK %12ju\n";
156946d70077SConrad Meyer 	}
157046d70077SConrad Meyer 
157146d70077SConrad Meyer 	db_printf(fmt_hdr, "Type", "InUse", "MemUse", "Requests");
157246d70077SConrad Meyer 
157346d70077SConrad Meyer 	/* Select sort, largest size first. */
157446d70077SConrad Meyer 	last_mtype = NULL;
157546d70077SConrad Meyer 	last_size = INT64_MAX;
157646d70077SConrad Meyer 	for (;;) {
157746d70077SConrad Meyer 		cur_mtype = NULL;
157846d70077SConrad Meyer 		cur_size = -1;
157946d70077SConrad Meyer 		ties = 0;
158046d70077SConrad Meyer 
158146d70077SConrad Meyer 		for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
158246d70077SConrad Meyer 			/*
158346d70077SConrad Meyer 			 * In the case of size ties, print out mtypes
158446d70077SConrad Meyer 			 * in the order they are encountered.  That is,
158546d70077SConrad Meyer 			 * when we encounter the most recently output
158646d70077SConrad Meyer 			 * mtype, we have already printed all preceding
158746d70077SConrad Meyer 			 * ties, and we must print all following ties.
158846d70077SConrad Meyer 			 */
158946d70077SConrad Meyer 			if (mtp == last_mtype) {
159046d70077SConrad Meyer 				ties = 1;
159146d70077SConrad Meyer 				continue;
159246d70077SConrad Meyer 			}
1593bdcc2226SMateusz Guzik 			size = get_malloc_stats(&mtp->ks_mti, &allocs,
159446d70077SConrad Meyer 			    &inuse);
159546d70077SConrad Meyer 			if (size > cur_size && size < last_size + ties) {
159646d70077SConrad Meyer 				cur_size = size;
159746d70077SConrad Meyer 				cur_mtype = mtp;
159846d70077SConrad Meyer 			}
159946d70077SConrad Meyer 		}
160046d70077SConrad Meyer 		if (cur_mtype == NULL)
160146d70077SConrad Meyer 			break;
160246d70077SConrad Meyer 
1603bdcc2226SMateusz Guzik 		size = get_malloc_stats(&cur_mtype->ks_mti, &allocs, &inuse);
160446d70077SConrad Meyer 		db_printf(fmt_entry, cur_mtype->ks_shortdesc, inuse,
160546d70077SConrad Meyer 		    howmany(size, 1024), allocs);
160646d70077SConrad Meyer 
1607687c94aaSJohn Baldwin 		if (db_pager_quit)
1608687c94aaSJohn Baldwin 			break;
160946d70077SConrad Meyer 
161046d70077SConrad Meyer 		last_mtype = cur_mtype;
161146d70077SConrad Meyer 		last_size = cur_size;
1612909ed16cSRobert Watson 	}
1613909ed16cSRobert Watson }
1614d7854da1SMatthew D Fleming 
1615d7854da1SMatthew D Fleming #if MALLOC_DEBUG_MAXZONES > 1
1616d7854da1SMatthew D Fleming DB_SHOW_COMMAND(multizone_matches, db_show_multizone_matches)
1617d7854da1SMatthew D Fleming {
1618d7854da1SMatthew D Fleming 	struct malloc_type_internal *mtip;
1619d7854da1SMatthew D Fleming 	struct malloc_type *mtp;
1620d7854da1SMatthew D Fleming 	u_int subzone;
1621d7854da1SMatthew D Fleming 
1622d7854da1SMatthew D Fleming 	if (!have_addr) {
1623d7854da1SMatthew D Fleming 		db_printf("Usage: show multizone_matches <malloc type/addr>\n");
1624d7854da1SMatthew D Fleming 		return;
1625d7854da1SMatthew D Fleming 	}
1626d7854da1SMatthew D Fleming 	mtp = (void *)addr;
1627bdcc2226SMateusz Guzik 	if (mtp->ks_version != M_VERSION) {
1628bdcc2226SMateusz Guzik 		db_printf("Version %lx does not match expected %x\n",
1629bdcc2226SMateusz Guzik 		    mtp->ks_version, M_VERSION);
1630d7854da1SMatthew D Fleming 		return;
1631d7854da1SMatthew D Fleming 	}
1632d7854da1SMatthew D Fleming 
1633bdcc2226SMateusz Guzik 	mtip = &mtp->ks_mti;
1634d7854da1SMatthew D Fleming 	subzone = mtip->mti_zone;
1635d7854da1SMatthew D Fleming 
1636d7854da1SMatthew D Fleming 	for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1637bdcc2226SMateusz Guzik 		mtip = &mtp->ks_mti;
1638d7854da1SMatthew D Fleming 		if (mtip->mti_zone != subzone)
1639d7854da1SMatthew D Fleming 			continue;
1640d7854da1SMatthew D Fleming 		db_printf("%s\n", mtp->ks_shortdesc);
1641687c94aaSJohn Baldwin 		if (db_pager_quit)
1642687c94aaSJohn Baldwin 			break;
1643d7854da1SMatthew D Fleming 	}
1644d7854da1SMatthew D Fleming }
1645d7854da1SMatthew D Fleming #endif /* MALLOC_DEBUG_MAXZONES > 1 */
1646d7854da1SMatthew D Fleming #endif /* DDB */
1647