19454b2d8SWarner Losh /*-
251369649SPedro F. Giffuni * SPDX-License-Identifier: BSD-3-Clause
351369649SPedro F. Giffuni *
4df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1991, 1993
563a7e0a3SRobert Watson * The Regents of the University of California.
6bb1c7df8SRobert Watson * Copyright (c) 2005-2009 Robert N. M. Watson
7fd91e076SKristof Provost * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net> (mallocarray)
863a7e0a3SRobert Watson * All rights reserved.
9df8bae1dSRodney W. Grimes *
10df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without
11df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions
12df8bae1dSRodney W. Grimes * are met:
13df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright
14df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer.
15df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright
16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the
17df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution.
1869a28758SEd Maste * 3. Neither the name of the University nor the names of its contributors
19df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software
20df8bae1dSRodney W. Grimes * without specific prior written permission.
21df8bae1dSRodney W. Grimes *
22df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
23df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
24df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
25df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
26df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
27df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
28df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
29df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
30df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
31df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32df8bae1dSRodney W. Grimes * SUCH DAMAGE.
33df8bae1dSRodney W. Grimes */
34df8bae1dSRodney W. Grimes
350ce3f16dSRobert Watson /*
360ce3f16dSRobert Watson * Kernel malloc(9) implementation -- general purpose kernel memory allocator
370ce3f16dSRobert Watson * based on memory types. Back end is implemented using the UMA(9) zone
380ce3f16dSRobert Watson * allocator. A set of fixed-size buckets are used for smaller allocations,
390ce3f16dSRobert Watson * and a special UMA allocation interface is used for larger allocations.
400ce3f16dSRobert Watson * Callers declare memory types, and statistics are maintained independently
410ce3f16dSRobert Watson * for each memory type. Statistics are maintained per-CPU for performance
420ce3f16dSRobert Watson * reasons. See malloc(9) and comments in malloc.h for a detailed
430ce3f16dSRobert Watson * description.
440ce3f16dSRobert Watson */
450ce3f16dSRobert Watson
46677b542eSDavid E. O'Brien #include <sys/cdefs.h>
47909ed16cSRobert Watson #include "opt_ddb.h"
488a58a9f6SJohn Dyson #include "opt_vm.h"
498a58a9f6SJohn Dyson
50df8bae1dSRodney W. Grimes #include <sys/param.h>
5126f9a767SRodney W. Grimes #include <sys/systm.h>
5206a53ecfSMark Johnston #include <sys/asan.h>
532d50560aSMarcel Moolenaar #include <sys/kdb.h>
54df8bae1dSRodney W. Grimes #include <sys/kernel.h>
55fb919e4dSMark Murray #include <sys/lock.h>
56df8bae1dSRodney W. Grimes #include <sys/malloc.h>
5710094910SMark Johnston #include <sys/msan.h>
58eec258d2SJohn Baldwin #include <sys/mutex.h>
59efeaf95aSDavid Greenman #include <sys/vmmeter.h>
60a448b62aSJake Burkholder #include <sys/proc.h>
616d6a03d7SJeff Roberson #include <sys/queue.h>
6263a7e0a3SRobert Watson #include <sys/sbuf.h>
639afff6b1SMateusz Guzik #include <sys/smp.h>
646f267175SJeff Roberson #include <sys/sysctl.h>
651fb14a47SPoul-Henning Kamp #include <sys/time.h>
665df87b21SJeff Roberson #include <sys/vmem.h>
674b25d1f2SGleb Smirnoff #ifdef EPOCH_TRACE
684b25d1f2SGleb Smirnoff #include <sys/epoch.h>
694b25d1f2SGleb Smirnoff #endif
709a02e8c6SJason Evans
71df8bae1dSRodney W. Grimes #include <vm/vm.h>
7299571dc3SJeff Roberson #include <vm/pmap.h>
739978bd99SMark Johnston #include <vm/vm_domainset.h>
745df87b21SJeff Roberson #include <vm/vm_pageout.h>
75efeaf95aSDavid Greenman #include <vm/vm_param.h>
76df8bae1dSRodney W. Grimes #include <vm/vm_kern.h>
77efeaf95aSDavid Greenman #include <vm/vm_extern.h>
783075778bSJohn Dyson #include <vm/vm_map.h>
7999571dc3SJeff Roberson #include <vm/vm_page.h>
806d6a03d7SJeff Roberson #include <vm/vm_phys.h>
816d6a03d7SJeff Roberson #include <vm/vm_pagequeue.h>
828355f576SJeff Roberson #include <vm/uma.h>
838355f576SJeff Roberson #include <vm/uma_int.h>
848efc4effSJeff Roberson #include <vm/uma_dbg.h>
85df8bae1dSRodney W. Grimes
86e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD
87e4eb384bSBosko Milekic #include <vm/memguard.h>
88e4eb384bSBosko Milekic #endif
89847a2a17SPawel Jakub Dawidek #ifdef DEBUG_REDZONE
90847a2a17SPawel Jakub Dawidek #include <vm/redzone.h>
91847a2a17SPawel Jakub Dawidek #endif
92e4eb384bSBosko Milekic
93984982d6SPoul-Henning Kamp #if defined(INVARIANTS) && defined(__i386__)
94984982d6SPoul-Henning Kamp #include <machine/cpu.h>
95984982d6SPoul-Henning Kamp #endif
96984982d6SPoul-Henning Kamp
97909ed16cSRobert Watson #include <ddb/ddb.h>
98909ed16cSRobert Watson
9991dd776cSJohn Birrell #ifdef KDTRACE_HOOKS
10091dd776cSJohn Birrell #include <sys/dtrace_bsd.h>
10191dd776cSJohn Birrell
1027cd79421SMateusz Guzik bool __read_frequently dtrace_malloc_enabled;
1037cd79421SMateusz Guzik dtrace_malloc_probe_func_t __read_mostly dtrace_malloc_probe;
10491dd776cSJohn Birrell #endif
10591dd776cSJohn Birrell
106ab3185d1SJeff Roberson #if defined(INVARIANTS) || defined(MALLOC_MAKE_FAILURES) || \
107ab3185d1SJeff Roberson defined(DEBUG_MEMGUARD) || defined(DEBUG_REDZONE)
108ab3185d1SJeff Roberson #define MALLOC_DEBUG 1
109ab3185d1SJeff Roberson #endif
110ab3185d1SJeff Roberson
11106a53ecfSMark Johnston #if defined(KASAN) || defined(DEBUG_REDZONE)
11289deca0aSMateusz Guzik #define DEBUG_REDZONE_ARG_DEF , unsigned long osize
11389deca0aSMateusz Guzik #define DEBUG_REDZONE_ARG , osize
11489deca0aSMateusz Guzik #else
11589deca0aSMateusz Guzik #define DEBUG_REDZONE_ARG_DEF
11689deca0aSMateusz Guzik #define DEBUG_REDZONE_ARG
11789deca0aSMateusz Guzik #endif
11889deca0aSMateusz Guzik
1199e6544ddSBjoern A. Zeeb typedef enum {
1209e6544ddSBjoern A. Zeeb SLAB_COOKIE_SLAB_PTR = 0x0,
1219e6544ddSBjoern A. Zeeb SLAB_COOKIE_MALLOC_LARGE = 0x1,
1229e6544ddSBjoern A. Zeeb SLAB_COOKIE_CONTIG_MALLOC = 0x2,
1239e6544ddSBjoern A. Zeeb } slab_cookie_t;
1249e6544ddSBjoern A. Zeeb #define SLAB_COOKIE_MASK 0x3
1259e6544ddSBjoern A. Zeeb #define SLAB_COOKIE_SHIFT 2
1269e6544ddSBjoern A. Zeeb #define GET_SLAB_COOKIE(_slab) \
1279e6544ddSBjoern A. Zeeb ((slab_cookie_t)(uintptr_t)(_slab) & SLAB_COOKIE_MASK)
1289e6544ddSBjoern A. Zeeb
12944a8ff31SArchie Cobbs /*
13044a8ff31SArchie Cobbs * When realloc() is called, if the new size is sufficiently smaller than
13144a8ff31SArchie Cobbs * the old size, realloc() will allocate a new, smaller block to avoid
13244a8ff31SArchie Cobbs * wasting memory. 'Sufficiently smaller' is defined as: newsize <=
13344a8ff31SArchie Cobbs * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'.
13444a8ff31SArchie Cobbs */
13544a8ff31SArchie Cobbs #ifndef REALLOC_FRACTION
13644a8ff31SArchie Cobbs #define REALLOC_FRACTION 1 /* new block if <= half the size */
13744a8ff31SArchie Cobbs #endif
13844a8ff31SArchie Cobbs
1390ce3f16dSRobert Watson /*
1400ce3f16dSRobert Watson * Centrally define some common malloc types.
1410ce3f16dSRobert Watson */
1423b6fb885SPoul-Henning Kamp MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches");
1439ef246c6SBruce Evans MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory");
1449ef246c6SBruce Evans MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers");
1459ef246c6SBruce Evans
146db669378SPeter Wemm static struct malloc_type *kmemstatistics;
147cd814b26SRobert Watson static int kmemcount;
1481f6889a1SMatthew Dillon
1498355f576SJeff Roberson #define KMEM_ZSHIFT 4
1508355f576SJeff Roberson #define KMEM_ZBASE 16
1518355f576SJeff Roberson #define KMEM_ZMASK (KMEM_ZBASE - 1)
1528355f576SJeff Roberson
153bda06553SXin LI #define KMEM_ZMAX 65536
1548355f576SJeff Roberson #define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT)
15560ae52f7SEd Schouten static uint8_t kmemsize[KMEM_ZSIZE + 1];
1566f267175SJeff Roberson
157d7854da1SMatthew D Fleming #ifndef MALLOC_DEBUG_MAXZONES
158d7854da1SMatthew D Fleming #define MALLOC_DEBUG_MAXZONES 1
159d7854da1SMatthew D Fleming #endif
160d7854da1SMatthew D Fleming static int numzones = MALLOC_DEBUG_MAXZONES;
161d7854da1SMatthew D Fleming
1620ce3f16dSRobert Watson /*
1630ce3f16dSRobert Watson * Small malloc(9) memory allocations are allocated from a set of UMA buckets
1640ce3f16dSRobert Watson * of various sizes.
1650ce3f16dSRobert Watson *
166828afddaSMateusz Guzik * Warning: the layout of the struct is duplicated in libmemstat for KVM support.
167828afddaSMateusz Guzik *
1680ce3f16dSRobert Watson * XXX: The comment here used to read "These won't be powers of two for
1690ce3f16dSRobert Watson * long." It's possible that a significant amount of wasted memory could be
1700ce3f16dSRobert Watson * recovered by tuning the sizes of these buckets.
1710ce3f16dSRobert Watson */
1728355f576SJeff Roberson struct {
1736f267175SJeff Roberson int kz_size;
174eaa17d42SRyan Libby const char *kz_name;
175d7854da1SMatthew D Fleming uma_zone_t kz_zone[MALLOC_DEBUG_MAXZONES];
1766f267175SJeff Roberson } kmemzones[] = {
177e1b6a7f8SMateusz Guzik {16, "malloc-16", },
178e1b6a7f8SMateusz Guzik {32, "malloc-32", },
179e1b6a7f8SMateusz Guzik {64, "malloc-64", },
180e1b6a7f8SMateusz Guzik {128, "malloc-128", },
181e1b6a7f8SMateusz Guzik {256, "malloc-256", },
182f0c90a09SMateusz Guzik {384, "malloc-384", },
183e1b6a7f8SMateusz Guzik {512, "malloc-512", },
184e1b6a7f8SMateusz Guzik {1024, "malloc-1024", },
185e1b6a7f8SMateusz Guzik {2048, "malloc-2048", },
186e1b6a7f8SMateusz Guzik {4096, "malloc-4096", },
187e1b6a7f8SMateusz Guzik {8192, "malloc-8192", },
188e1b6a7f8SMateusz Guzik {16384, "malloc-16384", },
189e1b6a7f8SMateusz Guzik {32768, "malloc-32768", },
190e1b6a7f8SMateusz Guzik {65536, "malloc-65536", },
1918355f576SJeff Roberson {0, NULL},
1928355f576SJeff Roberson };
1938355f576SJeff Roberson
194b89eaf4eSAlan Cox u_long vm_kmem_size;
195d801e824SAndriy Gapon SYSCTL_ULONG(_vm, OID_AUTO, kmem_size, CTLFLAG_RDTUN, &vm_kmem_size, 0,
19684344f9fSDag-Erling Smørgrav "Size of kernel memory");
1975a34a9f0SJeff Roberson
1987001d850SXin LI static u_long kmem_zmax = KMEM_ZMAX;
1997001d850SXin LI SYSCTL_ULONG(_vm, OID_AUTO, kmem_zmax, CTLFLAG_RDTUN, &kmem_zmax, 0,
2007001d850SXin LI "Maximum allocation size that malloc(9) would use UMA as backend");
2017001d850SXin LI
202b89eaf4eSAlan Cox static u_long vm_kmem_size_min;
203d801e824SAndriy Gapon SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_min, CTLFLAG_RDTUN, &vm_kmem_size_min, 0,
2040e5179e4SStephane E. Potvin "Minimum size of kernel memory");
2050e5179e4SStephane E. Potvin
206b89eaf4eSAlan Cox static u_long vm_kmem_size_max;
207d801e824SAndriy Gapon SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RDTUN, &vm_kmem_size_max, 0,
208479439b4SDag-Erling Smørgrav "Maximum size of kernel memory");
209479439b4SDag-Erling Smørgrav
2104813ad54SHans Petter Selasky static u_int vm_kmem_size_scale;
211d801e824SAndriy Gapon SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale, 0,
212479439b4SDag-Erling Smørgrav "Scale factor for kernel memory size");
213479439b4SDag-Erling Smørgrav
2147814c80aSAndriy Gapon static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS);
2157814c80aSAndriy Gapon SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size,
2167814c80aSAndriy Gapon CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
2175df87b21SJeff Roberson sysctl_kmem_map_size, "LU", "Current kmem allocation size");
2187814c80aSAndriy Gapon
21995bb9d38SAndriy Gapon static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS);
22095bb9d38SAndriy Gapon SYSCTL_PROC(_vm, OID_AUTO, kmem_map_free,
22195bb9d38SAndriy Gapon CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0,
2225df87b21SJeff Roberson sysctl_kmem_map_free, "LU", "Free space in kmem");
22395bb9d38SAndriy Gapon
224828afddaSMateusz Guzik static SYSCTL_NODE(_vm, OID_AUTO, malloc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
225828afddaSMateusz Guzik "Malloc information");
226828afddaSMateusz Guzik
227828afddaSMateusz Guzik static u_int vm_malloc_zone_count = nitems(kmemzones);
228828afddaSMateusz Guzik SYSCTL_UINT(_vm_malloc, OID_AUTO, zone_count,
229828afddaSMateusz Guzik CTLFLAG_RD, &vm_malloc_zone_count, 0,
230828afddaSMateusz Guzik "Number of malloc zones");
231828afddaSMateusz Guzik
232828afddaSMateusz Guzik static int sysctl_vm_malloc_zone_sizes(SYSCTL_HANDLER_ARGS);
233828afddaSMateusz Guzik SYSCTL_PROC(_vm_malloc, OID_AUTO, zone_sizes,
234828afddaSMateusz Guzik CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, NULL, 0,
235828afddaSMateusz Guzik sysctl_vm_malloc_zone_sizes, "S", "Zone sizes used by malloc");
236828afddaSMateusz Guzik
2375a34a9f0SJeff Roberson /*
23899571dc3SJeff Roberson * The malloc_mtx protects the kmemstatistics linked list.
2395a34a9f0SJeff Roberson */
2405a34a9f0SJeff Roberson struct mtx malloc_mtx;
24169ef67f9SJason Evans
242cd814b26SRobert Watson static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS);
243df8bae1dSRodney W. Grimes
244d7854da1SMatthew D Fleming #if defined(MALLOC_MAKE_FAILURES) || (MALLOC_DEBUG_MAXZONES > 1)
2457029da5cSPawel Biernacki static SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
246d7854da1SMatthew D Fleming "Kernel malloc debugging options");
247d7854da1SMatthew D Fleming #endif
248d7854da1SMatthew D Fleming
249eae870cdSRobert Watson /*
2500ce3f16dSRobert Watson * malloc(9) fault injection -- cause malloc failures every (n) mallocs when
2510ce3f16dSRobert Watson * the caller specifies M_NOWAIT. If set to 0, no failures are caused.
252eae870cdSRobert Watson */
2530ce3f16dSRobert Watson #ifdef MALLOC_MAKE_FAILURES
254eae870cdSRobert Watson static int malloc_failure_rate;
255eae870cdSRobert Watson static int malloc_nowait_count;
256eae870cdSRobert Watson static int malloc_failure_count;
257af3b2549SHans Petter Selasky SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RWTUN,
258eae870cdSRobert Watson &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail");
259eae870cdSRobert Watson SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD,
260eae870cdSRobert Watson &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures");
261eae870cdSRobert Watson #endif
262eae870cdSRobert Watson
2637814c80aSAndriy Gapon static int
sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS)2647814c80aSAndriy Gapon sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS)
2657814c80aSAndriy Gapon {
2667814c80aSAndriy Gapon u_long size;
2677814c80aSAndriy Gapon
2682e47807cSJeff Roberson size = uma_size();
2697814c80aSAndriy Gapon return (sysctl_handle_long(oidp, &size, 0, req));
2707814c80aSAndriy Gapon }
2717814c80aSAndriy Gapon
27295bb9d38SAndriy Gapon static int
sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS)27395bb9d38SAndriy Gapon sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS)
27495bb9d38SAndriy Gapon {
2752e47807cSJeff Roberson u_long size, limit;
27695bb9d38SAndriy Gapon
2772e47807cSJeff Roberson /* The sysctl is unsigned, implement as a saturation value. */
2782e47807cSJeff Roberson size = uma_size();
2792e47807cSJeff Roberson limit = uma_limit();
2802e47807cSJeff Roberson if (size > limit)
2812e47807cSJeff Roberson size = 0;
2822e47807cSJeff Roberson else
2832e47807cSJeff Roberson size = limit - size;
28495bb9d38SAndriy Gapon return (sysctl_handle_long(oidp, &size, 0, req));
28595bb9d38SAndriy Gapon }
28695bb9d38SAndriy Gapon
287828afddaSMateusz Guzik static int
sysctl_vm_malloc_zone_sizes(SYSCTL_HANDLER_ARGS)288828afddaSMateusz Guzik sysctl_vm_malloc_zone_sizes(SYSCTL_HANDLER_ARGS)
289828afddaSMateusz Guzik {
290828afddaSMateusz Guzik int sizes[nitems(kmemzones)];
291828afddaSMateusz Guzik int i;
292828afddaSMateusz Guzik
293828afddaSMateusz Guzik for (i = 0; i < nitems(kmemzones); i++) {
294828afddaSMateusz Guzik sizes[i] = kmemzones[i].kz_size;
295828afddaSMateusz Guzik }
296828afddaSMateusz Guzik
297828afddaSMateusz Guzik return (SYSCTL_OUT(req, &sizes, sizeof(sizes)));
298828afddaSMateusz Guzik }
299828afddaSMateusz Guzik
300d7854da1SMatthew D Fleming /*
301d7854da1SMatthew D Fleming * malloc(9) uma zone separation -- sub-page buffer overruns in one
302d7854da1SMatthew D Fleming * malloc type will affect only a subset of other malloc types.
303d7854da1SMatthew D Fleming */
304d7854da1SMatthew D Fleming #if MALLOC_DEBUG_MAXZONES > 1
305d7854da1SMatthew D Fleming static void
tunable_set_numzones(void)306d7854da1SMatthew D Fleming tunable_set_numzones(void)
307d7854da1SMatthew D Fleming {
308d7854da1SMatthew D Fleming
309d7854da1SMatthew D Fleming TUNABLE_INT_FETCH("debug.malloc.numzones",
310d7854da1SMatthew D Fleming &numzones);
311d7854da1SMatthew D Fleming
312d7854da1SMatthew D Fleming /* Sanity check the number of malloc uma zones. */
313d7854da1SMatthew D Fleming if (numzones <= 0)
314d7854da1SMatthew D Fleming numzones = 1;
315d7854da1SMatthew D Fleming if (numzones > MALLOC_DEBUG_MAXZONES)
316d7854da1SMatthew D Fleming numzones = MALLOC_DEBUG_MAXZONES;
317d7854da1SMatthew D Fleming }
318d7854da1SMatthew D Fleming SYSINIT(numzones, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_set_numzones, NULL);
319af3b2549SHans Petter Selasky SYSCTL_INT(_debug_malloc, OID_AUTO, numzones, CTLFLAG_RDTUN | CTLFLAG_NOFETCH,
320d7854da1SMatthew D Fleming &numzones, 0, "Number of malloc uma subzones");
321d7854da1SMatthew D Fleming
322d7854da1SMatthew D Fleming /*
323d7854da1SMatthew D Fleming * Any number that changes regularly is an okay choice for the
324d7854da1SMatthew D Fleming * offset. Build numbers are pretty good of you have them.
325d7854da1SMatthew D Fleming */
326d7854da1SMatthew D Fleming static u_int zone_offset = __FreeBSD_version;
327d7854da1SMatthew D Fleming TUNABLE_INT("debug.malloc.zone_offset", &zone_offset);
328d7854da1SMatthew D Fleming SYSCTL_UINT(_debug_malloc, OID_AUTO, zone_offset, CTLFLAG_RDTUN,
329d7854da1SMatthew D Fleming &zone_offset, 0, "Separate malloc types by examining the "
330d7854da1SMatthew D Fleming "Nth character in the malloc type short description.");
331d7854da1SMatthew D Fleming
332c9e05ccdSMateusz Guzik static void
mtp_set_subzone(struct malloc_type * mtp)333c9e05ccdSMateusz Guzik mtp_set_subzone(struct malloc_type *mtp)
334d7854da1SMatthew D Fleming {
335c9e05ccdSMateusz Guzik struct malloc_type_internal *mtip;
336c9e05ccdSMateusz Guzik const char *desc;
337d7854da1SMatthew D Fleming size_t len;
338d7854da1SMatthew D Fleming u_int val;
339d7854da1SMatthew D Fleming
340bdcc2226SMateusz Guzik mtip = &mtp->ks_mti;
341c9e05ccdSMateusz Guzik desc = mtp->ks_shortdesc;
342d7854da1SMatthew D Fleming if (desc == NULL || (len = strlen(desc)) == 0)
343c9e05ccdSMateusz Guzik val = 0;
344c9e05ccdSMateusz Guzik else
345d7854da1SMatthew D Fleming val = desc[zone_offset % len];
346c9e05ccdSMateusz Guzik mtip->mti_zone = (val % numzones);
347c9e05ccdSMateusz Guzik }
348c9e05ccdSMateusz Guzik
349c9e05ccdSMateusz Guzik static inline u_int
mtp_get_subzone(struct malloc_type * mtp)350c9e05ccdSMateusz Guzik mtp_get_subzone(struct malloc_type *mtp)
351c9e05ccdSMateusz Guzik {
352c9e05ccdSMateusz Guzik struct malloc_type_internal *mtip;
353c9e05ccdSMateusz Guzik
354bdcc2226SMateusz Guzik mtip = &mtp->ks_mti;
355c9e05ccdSMateusz Guzik
356c9e05ccdSMateusz Guzik KASSERT(mtip->mti_zone < numzones,
357c9e05ccdSMateusz Guzik ("mti_zone %u out of range %d",
358c9e05ccdSMateusz Guzik mtip->mti_zone, numzones));
359c9e05ccdSMateusz Guzik return (mtip->mti_zone);
360d7854da1SMatthew D Fleming }
361d7854da1SMatthew D Fleming #elif MALLOC_DEBUG_MAXZONES == 0
362d7854da1SMatthew D Fleming #error "MALLOC_DEBUG_MAXZONES must be positive."
363d7854da1SMatthew D Fleming #else
364c9e05ccdSMateusz Guzik static void
mtp_set_subzone(struct malloc_type * mtp)365c9e05ccdSMateusz Guzik mtp_set_subzone(struct malloc_type *mtp)
366c9e05ccdSMateusz Guzik {
367c9e05ccdSMateusz Guzik struct malloc_type_internal *mtip;
368c9e05ccdSMateusz Guzik
369bdcc2226SMateusz Guzik mtip = &mtp->ks_mti;
370c9e05ccdSMateusz Guzik mtip->mti_zone = 0;
371c9e05ccdSMateusz Guzik }
372c9e05ccdSMateusz Guzik
373d7854da1SMatthew D Fleming static inline u_int
mtp_get_subzone(struct malloc_type * mtp)374c9e05ccdSMateusz Guzik mtp_get_subzone(struct malloc_type *mtp)
375d7854da1SMatthew D Fleming {
376d7854da1SMatthew D Fleming
377d7854da1SMatthew D Fleming return (0);
378d7854da1SMatthew D Fleming }
379d7854da1SMatthew D Fleming #endif /* MALLOC_DEBUG_MAXZONES > 1 */
380d7854da1SMatthew D Fleming
381df8bae1dSRodney W. Grimes /*
3820ce3f16dSRobert Watson * An allocation has succeeded -- update malloc type statistics for the
3830ce3f16dSRobert Watson * amount of bucket size. Occurs within a critical section so that the
3840ce3f16dSRobert Watson * thread isn't preempted and doesn't migrate while updating per-PCU
3850ce3f16dSRobert Watson * statistics.
3864362fadaSBrian Feldman */
3874362fadaSBrian Feldman static void
malloc_type_zone_allocated(struct malloc_type * mtp,unsigned long size,int zindx)38863a7e0a3SRobert Watson malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size,
3894362fadaSBrian Feldman int zindx)
3904362fadaSBrian Feldman {
39163a7e0a3SRobert Watson struct malloc_type_internal *mtip;
39263a7e0a3SRobert Watson struct malloc_type_stats *mtsp;
39363a7e0a3SRobert Watson
39463a7e0a3SRobert Watson critical_enter();
395bdcc2226SMateusz Guzik mtip = &mtp->ks_mti;
3969afff6b1SMateusz Guzik mtsp = zpcpu_get(mtip->mti_stats);
39773864adbSPawel Jakub Dawidek if (size > 0) {
39863a7e0a3SRobert Watson mtsp->mts_memalloced += size;
39963a7e0a3SRobert Watson mtsp->mts_numallocs++;
40073864adbSPawel Jakub Dawidek }
4014362fadaSBrian Feldman if (zindx != -1)
40263a7e0a3SRobert Watson mtsp->mts_size |= 1 << zindx;
40391dd776cSJohn Birrell
40491dd776cSJohn Birrell #ifdef KDTRACE_HOOKS
4057cd79421SMateusz Guzik if (__predict_false(dtrace_malloc_enabled)) {
40691dd776cSJohn Birrell uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_MALLOC];
40791dd776cSJohn Birrell if (probe_id != 0)
40891dd776cSJohn Birrell (dtrace_malloc_probe)(probe_id,
40991dd776cSJohn Birrell (uintptr_t) mtp, (uintptr_t) mtip,
41091dd776cSJohn Birrell (uintptr_t) mtsp, size, zindx);
41191dd776cSJohn Birrell }
41291dd776cSJohn Birrell #endif
41391dd776cSJohn Birrell
41463a7e0a3SRobert Watson critical_exit();
4154362fadaSBrian Feldman }
4164362fadaSBrian Feldman
4174362fadaSBrian Feldman void
malloc_type_allocated(struct malloc_type * mtp,unsigned long size)41863a7e0a3SRobert Watson malloc_type_allocated(struct malloc_type *mtp, unsigned long size)
4194362fadaSBrian Feldman {
42063a7e0a3SRobert Watson
42173864adbSPawel Jakub Dawidek if (size > 0)
42263a7e0a3SRobert Watson malloc_type_zone_allocated(mtp, size, -1);
4234362fadaSBrian Feldman }
4244362fadaSBrian Feldman
4254362fadaSBrian Feldman /*
4263805385eSRobert Watson * A free operation has occurred -- update malloc type statistics for the
4270ce3f16dSRobert Watson * amount of the bucket size. Occurs within a critical section so that the
4280ce3f16dSRobert Watson * thread isn't preempted and doesn't migrate while updating per-CPU
4290ce3f16dSRobert Watson * statistics.
4304362fadaSBrian Feldman */
4314362fadaSBrian Feldman void
malloc_type_freed(struct malloc_type * mtp,unsigned long size)43263a7e0a3SRobert Watson malloc_type_freed(struct malloc_type *mtp, unsigned long size)
4334362fadaSBrian Feldman {
43463a7e0a3SRobert Watson struct malloc_type_internal *mtip;
43563a7e0a3SRobert Watson struct malloc_type_stats *mtsp;
43663a7e0a3SRobert Watson
43763a7e0a3SRobert Watson critical_enter();
438bdcc2226SMateusz Guzik mtip = &mtp->ks_mti;
4399afff6b1SMateusz Guzik mtsp = zpcpu_get(mtip->mti_stats);
44063a7e0a3SRobert Watson mtsp->mts_memfreed += size;
44163a7e0a3SRobert Watson mtsp->mts_numfrees++;
44291dd776cSJohn Birrell
44391dd776cSJohn Birrell #ifdef KDTRACE_HOOKS
4447cd79421SMateusz Guzik if (__predict_false(dtrace_malloc_enabled)) {
44591dd776cSJohn Birrell uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_FREE];
44691dd776cSJohn Birrell if (probe_id != 0)
44791dd776cSJohn Birrell (dtrace_malloc_probe)(probe_id,
44891dd776cSJohn Birrell (uintptr_t) mtp, (uintptr_t) mtip,
44991dd776cSJohn Birrell (uintptr_t) mtsp, size, 0);
45091dd776cSJohn Birrell }
45191dd776cSJohn Birrell #endif
45291dd776cSJohn Birrell
45363a7e0a3SRobert Watson critical_exit();
4544362fadaSBrian Feldman }
4554362fadaSBrian Feldman
4564362fadaSBrian Feldman /*
457f346986bSAlan Cox * contigmalloc:
458f346986bSAlan Cox *
459f346986bSAlan Cox * Allocate a block of physically contiguous memory.
460f346986bSAlan Cox *
461f346986bSAlan Cox * If M_NOWAIT is set, this routine will not block and return NULL if
462f346986bSAlan Cox * the allocation fails.
463f346986bSAlan Cox */
4649e6544ddSBjoern A. Zeeb #define IS_CONTIG_MALLOC(_slab) \
4659e6544ddSBjoern A. Zeeb (GET_SLAB_COOKIE(_slab) == SLAB_COOKIE_CONTIG_MALLOC)
4669e6544ddSBjoern A. Zeeb #define CONTIG_MALLOC_SLAB(_size) \
4679e6544ddSBjoern A. Zeeb ((void *)(((_size) << SLAB_COOKIE_SHIFT) | SLAB_COOKIE_CONTIG_MALLOC))
4689e6544ddSBjoern A. Zeeb static inline size_t
contigmalloc_size(uma_slab_t slab)4699e6544ddSBjoern A. Zeeb contigmalloc_size(uma_slab_t slab)
4709e6544ddSBjoern A. Zeeb {
4719e6544ddSBjoern A. Zeeb uintptr_t va;
4729e6544ddSBjoern A. Zeeb
4739e6544ddSBjoern A. Zeeb KASSERT(IS_CONTIG_MALLOC(slab),
4749e6544ddSBjoern A. Zeeb ("%s: called on non-contigmalloc allocation: %p", __func__, slab));
4759e6544ddSBjoern A. Zeeb va = (uintptr_t)slab;
4769e6544ddSBjoern A. Zeeb return (va >> SLAB_COOKIE_SHIFT);
4779e6544ddSBjoern A. Zeeb }
4789e6544ddSBjoern A. Zeeb
479f346986bSAlan Cox void *
contigmalloc(unsigned long osize,struct malloc_type * type,int flags,vm_paddr_t low,vm_paddr_t high,unsigned long alignment,vm_paddr_t boundary)48074361d69SMark Johnston contigmalloc(unsigned long osize, struct malloc_type *type, int flags,
481f346986bSAlan Cox vm_paddr_t low, vm_paddr_t high, unsigned long alignment,
482831ce4cbSJohn Baldwin vm_paddr_t boundary)
483f346986bSAlan Cox {
484f346986bSAlan Cox void *ret;
48574361d69SMark Johnston unsigned long size;
48674361d69SMark Johnston
48774361d69SMark Johnston #ifdef DEBUG_REDZONE
48874361d69SMark Johnston size = redzone_size_ntor(osize);
48974361d69SMark Johnston #else
49074361d69SMark Johnston size = osize;
49174361d69SMark Johnston #endif
492f346986bSAlan Cox
49344d0efb2SAlan Cox ret = (void *)kmem_alloc_contig(size, flags, low, high, alignment,
49444d0efb2SAlan Cox boundary, VM_MEMATTR_DEFAULT);
4959e6544ddSBjoern A. Zeeb if (ret != NULL) {
4969e6544ddSBjoern A. Zeeb /* Use low bits unused for slab pointers. */
4979e6544ddSBjoern A. Zeeb vsetzoneslab((uintptr_t)ret, NULL, CONTIG_MALLOC_SLAB(size));
498f346986bSAlan Cox malloc_type_allocated(type, round_page(size));
49974361d69SMark Johnston #ifdef DEBUG_REDZONE
50074361d69SMark Johnston ret = redzone_setup(ret, osize);
50174361d69SMark Johnston #endif
5029e6544ddSBjoern A. Zeeb }
503f346986bSAlan Cox return (ret);
504f346986bSAlan Cox }
505f346986bSAlan Cox
506ab3185d1SJeff Roberson void *
contigmalloc_domainset(unsigned long osize,struct malloc_type * type,struct domainset * ds,int flags,vm_paddr_t low,vm_paddr_t high,unsigned long alignment,vm_paddr_t boundary)50774361d69SMark Johnston contigmalloc_domainset(unsigned long osize, struct malloc_type *type,
5089978bd99SMark Johnston struct domainset *ds, int flags, vm_paddr_t low, vm_paddr_t high,
509ab3185d1SJeff Roberson unsigned long alignment, vm_paddr_t boundary)
510ab3185d1SJeff Roberson {
511ab3185d1SJeff Roberson void *ret;
51274361d69SMark Johnston unsigned long size;
51374361d69SMark Johnston
51474361d69SMark Johnston #ifdef DEBUG_REDZONE
51574361d69SMark Johnston size = redzone_size_ntor(osize);
51674361d69SMark Johnston #else
51774361d69SMark Johnston size = osize;
51874361d69SMark Johnston #endif
519ab3185d1SJeff Roberson
5209978bd99SMark Johnston ret = (void *)kmem_alloc_contig_domainset(ds, size, flags, low, high,
521ab3185d1SJeff Roberson alignment, boundary, VM_MEMATTR_DEFAULT);
5229e6544ddSBjoern A. Zeeb if (ret != NULL) {
5239e6544ddSBjoern A. Zeeb /* Use low bits unused for slab pointers. */
5249e6544ddSBjoern A. Zeeb vsetzoneslab((uintptr_t)ret, NULL, CONTIG_MALLOC_SLAB(size));
525ab3185d1SJeff Roberson malloc_type_allocated(type, round_page(size));
52674361d69SMark Johnston #ifdef DEBUG_REDZONE
52774361d69SMark Johnston ret = redzone_setup(ret, osize);
52874361d69SMark Johnston #endif
5299e6544ddSBjoern A. Zeeb }
530ab3185d1SJeff Roberson return (ret);
531ab3185d1SJeff Roberson }
532d1bdc282SBjoern A. Zeeb #undef IS_CONTIG_MALLOC
533d1bdc282SBjoern A. Zeeb #undef CONTIG_MALLOC_SLAB
534ab3185d1SJeff Roberson
535d1bdc282SBjoern A. Zeeb /* contigfree(9) is deprecated. */
536f346986bSAlan Cox void
contigfree(void * addr,unsigned long size __unused,struct malloc_type * type)5379e6544ddSBjoern A. Zeeb contigfree(void *addr, unsigned long size __unused, struct malloc_type *type)
538f346986bSAlan Cox {
5399e6544ddSBjoern A. Zeeb free(addr, type);
540f346986bSAlan Cox }
541f346986bSAlan Cox
542ab3185d1SJeff Roberson #ifdef MALLOC_DEBUG
543ab3185d1SJeff Roberson static int
malloc_dbg(caddr_t * vap,size_t * sizep,struct malloc_type * mtp,int flags)5445a70796aSLi-Wen Hsu malloc_dbg(caddr_t *vap, size_t *sizep, struct malloc_type *mtp,
545ab3185d1SJeff Roberson int flags)
546df8bae1dSRodney W. Grimes {
547bdcc2226SMateusz Guzik KASSERT(mtp->ks_version == M_VERSION, ("malloc: bad malloc type version"));
548d0f9b0bdSDag-Erling Smørgrav KASSERT((flags & (M_WAITOK | M_NOWAIT)) != 0,
549d0f9b0bdSDag-Erling Smørgrav ("malloc: flags must include either M_WAITOK or M_NOWAIT"));
550d0f9b0bdSDag-Erling Smørgrav KASSERT((flags & (M_WAITOK | M_NOWAIT)) != (M_WAITOK | M_NOWAIT),
551d0f9b0bdSDag-Erling Smørgrav ("malloc: flags may not include both M_WAITOK and M_NOWAIT"));
55206134ea2SBojan Novković KASSERT((flags & M_NEVERFREED) == 0,
55306134ea2SBojan Novković ("malloc: M_NEVERFREED is for internal use only"));
554eae870cdSRobert Watson #ifdef MALLOC_MAKE_FAILURES
555eae870cdSRobert Watson if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) {
556eae870cdSRobert Watson atomic_add_int(&malloc_nowait_count, 1);
557eae870cdSRobert Watson if ((malloc_nowait_count % malloc_failure_rate) == 0) {
558eae870cdSRobert Watson atomic_add_int(&malloc_failure_count, 1);
559ab3185d1SJeff Roberson *vap = NULL;
560ab3185d1SJeff Roberson return (EJUSTRETURN);
561eae870cdSRobert Watson }
562eae870cdSRobert Watson }
563eae870cdSRobert Watson #endif
56406bf2a6aSMatt Macy if (flags & M_WAITOK) {
565b40ce416SJulian Elischer KASSERT(curthread->td_intr_nesting_level == 0,
566a163d034SWarner Losh ("malloc(M_WAITOK) in interrupt context"));
5675757b59fSGleb Smirnoff if (__predict_false(!THREAD_CAN_SLEEP())) {
568bac06038SGleb Smirnoff #ifdef EPOCH_TRACE
569bac06038SGleb Smirnoff epoch_trace_list(curthread);
570bac06038SGleb Smirnoff #endif
5711ae20f7cSKyle Evans KASSERT(0,
5725757b59fSGleb Smirnoff ("malloc(M_WAITOK) with sleeping prohibited"));
5735757b59fSGleb Smirnoff }
57406bf2a6aSMatt Macy }
575d9e2e68dSMark Johnston KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
5761067a2baSJonathan T. Looney ("malloc: called with spinlock or critical section held"));
5771067a2baSJonathan T. Looney
578e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD
579ab3185d1SJeff Roberson if (memguard_cmp_mtp(mtp, *sizep)) {
580ab3185d1SJeff Roberson *vap = memguard_alloc(*sizep, flags);
581ab3185d1SJeff Roberson if (*vap != NULL)
582ab3185d1SJeff Roberson return (EJUSTRETURN);
583e3813573SMatthew D Fleming /* This is unfortunate but should not be fatal. */
584e3813573SMatthew D Fleming }
585e4eb384bSBosko Milekic #endif
586e4eb384bSBosko Milekic
587847a2a17SPawel Jakub Dawidek #ifdef DEBUG_REDZONE
588ab3185d1SJeff Roberson *sizep = redzone_size_ntor(*sizep);
589ab3185d1SJeff Roberson #endif
590ab3185d1SJeff Roberson
591ab3185d1SJeff Roberson return (0);
592ab3185d1SJeff Roberson }
593ab3185d1SJeff Roberson #endif
594ab3185d1SJeff Roberson
595ab3185d1SJeff Roberson /*
5966d6a03d7SJeff Roberson * Handle large allocations and frees by using kmem_malloc directly.
5976d6a03d7SJeff Roberson */
5989e6544ddSBjoern A. Zeeb #define IS_MALLOC_LARGE(_slab) \
5999e6544ddSBjoern A. Zeeb (GET_SLAB_COOKIE(_slab) == SLAB_COOKIE_MALLOC_LARGE)
6009e6544ddSBjoern A. Zeeb #define MALLOC_LARGE_SLAB(_size) \
6019e6544ddSBjoern A. Zeeb ((void *)(((_size) << SLAB_COOKIE_SHIFT) | SLAB_COOKIE_MALLOC_LARGE))
6026d6a03d7SJeff Roberson static inline size_t
malloc_large_size(uma_slab_t slab)6036d6a03d7SJeff Roberson malloc_large_size(uma_slab_t slab)
6046d6a03d7SJeff Roberson {
6056d6a03d7SJeff Roberson uintptr_t va;
6066d6a03d7SJeff Roberson
6076d6a03d7SJeff Roberson va = (uintptr_t)slab;
6089e6544ddSBjoern A. Zeeb KASSERT(IS_MALLOC_LARGE(slab),
6099e6544ddSBjoern A. Zeeb ("%s: called on non-malloc_large allocation: %p", __func__, slab));
6109e6544ddSBjoern A. Zeeb return (va >> SLAB_COOKIE_SHIFT);
6116d6a03d7SJeff Roberson }
6126d6a03d7SJeff Roberson
61389deca0aSMateusz Guzik static caddr_t __noinline
malloc_large(size_t size,struct malloc_type * mtp,struct domainset * policy,int flags DEBUG_REDZONE_ARG_DEF)61445e23571SMark Johnston malloc_large(size_t size, struct malloc_type *mtp, struct domainset *policy,
61589deca0aSMateusz Guzik int flags DEBUG_REDZONE_ARG_DEF)
6166d6a03d7SJeff Roberson {
617f49fd63aSJohn Baldwin void *va;
6186d6a03d7SJeff Roberson
61945e23571SMark Johnston size = roundup(size, PAGE_SIZE);
620f49fd63aSJohn Baldwin va = kmem_malloc_domainset(policy, size, flags);
621f49fd63aSJohn Baldwin if (va != NULL) {
6229e6544ddSBjoern A. Zeeb /* Use low bits unused for slab pointers. */
6239e6544ddSBjoern A. Zeeb vsetzoneslab((uintptr_t)va, NULL, MALLOC_LARGE_SLAB(size));
62445e23571SMark Johnston uma_total_inc(size);
6256d6a03d7SJeff Roberson }
62645e23571SMark Johnston malloc_type_allocated(mtp, va == NULL ? 0 : size);
62789deca0aSMateusz Guzik if (__predict_false(va == NULL)) {
62889deca0aSMateusz Guzik KASSERT((flags & M_WAITOK) == 0,
62989deca0aSMateusz Guzik ("malloc(M_WAITOK) returned NULL"));
63006a53ecfSMark Johnston } else {
63189deca0aSMateusz Guzik #ifdef DEBUG_REDZONE
63289deca0aSMateusz Guzik va = redzone_setup(va, osize);
63389deca0aSMateusz Guzik #endif
634f49fd63aSJohn Baldwin kasan_mark(va, osize, size, KASAN_MALLOC_REDZONE);
63506a53ecfSMark Johnston }
63689deca0aSMateusz Guzik return (va);
6376d6a03d7SJeff Roberson }
6386d6a03d7SJeff Roberson
6396d6a03d7SJeff Roberson static void
free_large(void * addr,size_t size)6406d6a03d7SJeff Roberson free_large(void *addr, size_t size)
6416d6a03d7SJeff Roberson {
6426d6a03d7SJeff Roberson
643f49fd63aSJohn Baldwin kmem_free(addr, size);
6446d6a03d7SJeff Roberson uma_total_dec(size);
6456d6a03d7SJeff Roberson }
6469e6544ddSBjoern A. Zeeb #undef IS_MALLOC_LARGE
6479e6544ddSBjoern A. Zeeb #undef MALLOC_LARGE_SLAB
6486d6a03d7SJeff Roberson
6496d6a03d7SJeff Roberson /*
650ab3185d1SJeff Roberson * malloc:
651ab3185d1SJeff Roberson *
652ab3185d1SJeff Roberson * Allocate a block of memory.
653ab3185d1SJeff Roberson *
654ab3185d1SJeff Roberson * If M_NOWAIT is set, this routine will not block and return NULL if
655ab3185d1SJeff Roberson * the allocation fails.
656ab3185d1SJeff Roberson */
657ab3185d1SJeff Roberson void *
65834c538c3SMateusz Guzik (malloc)(size_t size, struct malloc_type *mtp, int flags)
659ab3185d1SJeff Roberson {
660ab3185d1SJeff Roberson int indx;
661ab3185d1SJeff Roberson caddr_t va;
662ab3185d1SJeff Roberson uma_zone_t zone;
66306a53ecfSMark Johnston #if defined(DEBUG_REDZONE) || defined(KASAN)
664ab3185d1SJeff Roberson unsigned long osize = size;
665ab3185d1SJeff Roberson #endif
666ab3185d1SJeff Roberson
66782c174a3SMateusz Guzik MPASS((flags & M_EXEC) == 0);
66889deca0aSMateusz Guzik
669ab3185d1SJeff Roberson #ifdef MALLOC_DEBUG
6705072a5f4SMatt Macy va = NULL;
671ab3185d1SJeff Roberson if (malloc_dbg(&va, &size, mtp, flags) != 0)
672ab3185d1SJeff Roberson return (va);
673847a2a17SPawel Jakub Dawidek #endif
674847a2a17SPawel Jakub Dawidek
67589deca0aSMateusz Guzik if (__predict_false(size > kmem_zmax))
67645e23571SMark Johnston return (malloc_large(size, mtp, DOMAINSET_RR(), flags
67789deca0aSMateusz Guzik DEBUG_REDZONE_ARG));
67889deca0aSMateusz Guzik
6796f267175SJeff Roberson if (size & KMEM_ZMASK)
6806f267175SJeff Roberson size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
6816f267175SJeff Roberson indx = kmemsize[size >> KMEM_ZSHIFT];
682c9e05ccdSMateusz Guzik zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)];
683a03c2393SAlexander Motin va = uma_zalloc_arg(zone, zone, flags);
68410094910SMark Johnston if (va != NULL) {
685e20a199fSJeff Roberson size = zone->uz_size;
68610094910SMark Johnston if ((flags & M_ZERO) == 0) {
68710094910SMark Johnston kmsan_mark(va, size, KMSAN_STATE_UNINIT);
68810094910SMark Johnston kmsan_orig(va, size, KMSAN_TYPE_MALLOC, KMSAN_RET_ADDR);
68910094910SMark Johnston }
69010094910SMark Johnston }
69163a7e0a3SRobert Watson malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
69282c174a3SMateusz Guzik if (__predict_false(va == NULL)) {
69382c174a3SMateusz Guzik KASSERT((flags & M_WAITOK) == 0,
69482c174a3SMateusz Guzik ("malloc(M_WAITOK) returned NULL"));
69582c174a3SMateusz Guzik }
696ab3185d1SJeff Roberson #ifdef DEBUG_REDZONE
697ab3185d1SJeff Roberson if (va != NULL)
698ab3185d1SJeff Roberson va = redzone_setup(va, osize);
6994db4f5c8SPoul-Henning Kamp #endif
70006a53ecfSMark Johnston #ifdef KASAN
70106a53ecfSMark Johnston if (va != NULL)
70206a53ecfSMark Johnston kasan_mark((void *)va, osize, size, KASAN_MALLOC_REDZONE);
70306a53ecfSMark Johnston #endif
704ab3185d1SJeff Roberson return ((void *) va);
705ab3185d1SJeff Roberson }
706ab3185d1SJeff Roberson
7079978bd99SMark Johnston static void *
malloc_domain(size_t * sizep,int * indxp,struct malloc_type * mtp,int domain,int flags)708dc727127SMark Johnston malloc_domain(size_t *sizep, int *indxp, struct malloc_type *mtp, int domain,
7096d6a03d7SJeff Roberson int flags)
710ab3185d1SJeff Roberson {
711ab3185d1SJeff Roberson uma_zone_t zone;
712dc727127SMark Johnston caddr_t va;
713dc727127SMark Johnston size_t size;
714dc727127SMark Johnston int indx;
715ab3185d1SJeff Roberson
716dc727127SMark Johnston size = *sizep;
7176d6a03d7SJeff Roberson KASSERT(size <= kmem_zmax && (flags & M_EXEC) == 0,
718c0df224bSBjoern A. Zeeb ("malloc_domain: Called with bad flag / size combination"));
719ab3185d1SJeff Roberson if (size & KMEM_ZMASK)
720ab3185d1SJeff Roberson size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
721ab3185d1SJeff Roberson indx = kmemsize[size >> KMEM_ZSHIFT];
722c9e05ccdSMateusz Guzik zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)];
723a03c2393SAlexander Motin va = uma_zalloc_domain(zone, zone, domain, flags);
724ab3185d1SJeff Roberson if (va != NULL)
725dc727127SMark Johnston *sizep = zone->uz_size;
7266d6a03d7SJeff Roberson *indxp = indx;
727df8bae1dSRodney W. Grimes return ((void *)va);
728df8bae1dSRodney W. Grimes }
729df8bae1dSRodney W. Grimes
730fd91e076SKristof Provost void *
malloc_domainset(size_t size,struct malloc_type * mtp,struct domainset * ds,int flags)7319978bd99SMark Johnston malloc_domainset(size_t size, struct malloc_type *mtp, struct domainset *ds,
7329978bd99SMark Johnston int flags)
7339978bd99SMark Johnston {
7349978bd99SMark Johnston struct vm_domainset_iter di;
73582c174a3SMateusz Guzik caddr_t va;
7369978bd99SMark Johnston int domain;
7376d6a03d7SJeff Roberson int indx;
73806a53ecfSMark Johnston #if defined(KASAN) || defined(DEBUG_REDZONE)
7396d6a03d7SJeff Roberson unsigned long osize = size;
7406d6a03d7SJeff Roberson #endif
74189deca0aSMateusz Guzik
74282c174a3SMateusz Guzik MPASS((flags & M_EXEC) == 0);
74389deca0aSMateusz Guzik
7446d6a03d7SJeff Roberson #ifdef MALLOC_DEBUG
74582c174a3SMateusz Guzik va = NULL;
74682c174a3SMateusz Guzik if (malloc_dbg(&va, &size, mtp, flags) != 0)
74782c174a3SMateusz Guzik return (va);
7486d6a03d7SJeff Roberson #endif
74989deca0aSMateusz Guzik
75089deca0aSMateusz Guzik if (__predict_false(size > kmem_zmax))
75145e23571SMark Johnston return (malloc_large(size, mtp, DOMAINSET_RR(), flags
75289deca0aSMateusz Guzik DEBUG_REDZONE_ARG));
75389deca0aSMateusz Guzik
7549978bd99SMark Johnston vm_domainset_iter_policy_init(&di, ds, &domain, &flags);
7559978bd99SMark Johnston do {
75682c174a3SMateusz Guzik va = malloc_domain(&size, &indx, mtp, domain, flags);
75789deca0aSMateusz Guzik } while (va == NULL && vm_domainset_iter_policy(&di, &domain) == 0);
75882c174a3SMateusz Guzik malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx);
75982c174a3SMateusz Guzik if (__predict_false(va == NULL)) {
76082c174a3SMateusz Guzik KASSERT((flags & M_WAITOK) == 0,
76182c174a3SMateusz Guzik ("malloc(M_WAITOK) returned NULL"));
76282c174a3SMateusz Guzik }
76382c174a3SMateusz Guzik #ifdef DEBUG_REDZONE
76482c174a3SMateusz Guzik if (va != NULL)
76582c174a3SMateusz Guzik va = redzone_setup(va, osize);
76682c174a3SMateusz Guzik #endif
76706a53ecfSMark Johnston #ifdef KASAN
76806a53ecfSMark Johnston if (va != NULL)
76906a53ecfSMark Johnston kasan_mark((void *)va, osize, size, KASAN_MALLOC_REDZONE);
77006a53ecfSMark Johnston #endif
77110094910SMark Johnston #ifdef KMSAN
77210094910SMark Johnston if ((flags & M_ZERO) == 0) {
77310094910SMark Johnston kmsan_mark(va, size, KMSAN_STATE_UNINIT);
77410094910SMark Johnston kmsan_orig(va, size, KMSAN_TYPE_MALLOC, KMSAN_RET_ADDR);
77510094910SMark Johnston }
77610094910SMark Johnston #endif
77782c174a3SMateusz Guzik return (va);
7786d6a03d7SJeff Roberson }
7799978bd99SMark Johnston
78082c174a3SMateusz Guzik /*
78182c174a3SMateusz Guzik * Allocate an executable area.
78282c174a3SMateusz Guzik */
78382c174a3SMateusz Guzik void *
malloc_exec(size_t size,struct malloc_type * mtp,int flags)78482c174a3SMateusz Guzik malloc_exec(size_t size, struct malloc_type *mtp, int flags)
78582c174a3SMateusz Guzik {
78682c174a3SMateusz Guzik
78789deca0aSMateusz Guzik return (malloc_domainset_exec(size, mtp, DOMAINSET_RR(), flags));
78882c174a3SMateusz Guzik }
78982c174a3SMateusz Guzik
79082c174a3SMateusz Guzik void *
malloc_domainset_exec(size_t size,struct malloc_type * mtp,struct domainset * ds,int flags)79182c174a3SMateusz Guzik malloc_domainset_exec(size_t size, struct malloc_type *mtp, struct domainset *ds,
79282c174a3SMateusz Guzik int flags)
79382c174a3SMateusz Guzik {
79406a53ecfSMark Johnston #if defined(DEBUG_REDZONE) || defined(KASAN)
79582c174a3SMateusz Guzik unsigned long osize = size;
79682c174a3SMateusz Guzik #endif
79789deca0aSMateusz Guzik #ifdef MALLOC_DEBUG
79889deca0aSMateusz Guzik caddr_t va;
79989deca0aSMateusz Guzik #endif
80082c174a3SMateusz Guzik
80182c174a3SMateusz Guzik flags |= M_EXEC;
80289deca0aSMateusz Guzik
80382c174a3SMateusz Guzik #ifdef MALLOC_DEBUG
80482c174a3SMateusz Guzik va = NULL;
80582c174a3SMateusz Guzik if (malloc_dbg(&va, &size, mtp, flags) != 0)
80682c174a3SMateusz Guzik return (va);
80782c174a3SMateusz Guzik #endif
80889deca0aSMateusz Guzik
80945e23571SMark Johnston return (malloc_large(size, mtp, ds, flags DEBUG_REDZONE_ARG));
8109978bd99SMark Johnston }
8119978bd99SMark Johnston
8129978bd99SMark Johnston void *
malloc_aligned(size_t size,size_t align,struct malloc_type * type,int flags)81304cc0c39SKyle Evans malloc_aligned(size_t size, size_t align, struct malloc_type *type, int flags)
81404cc0c39SKyle Evans {
81504cc0c39SKyle Evans return (malloc_domainset_aligned(size, align, type, DOMAINSET_RR(),
81604cc0c39SKyle Evans flags));
81704cc0c39SKyle Evans }
81804cc0c39SKyle Evans
81904cc0c39SKyle Evans void *
malloc_domainset_aligned(size_t size,size_t align,struct malloc_type * mtp,struct domainset * ds,int flags)8203b15beb3SKonstantin Belousov malloc_domainset_aligned(size_t size, size_t align,
8213b15beb3SKonstantin Belousov struct malloc_type *mtp, struct domainset *ds, int flags)
8223b15beb3SKonstantin Belousov {
8233b15beb3SKonstantin Belousov void *res;
8241ac7c344SKonstantin Belousov size_t asize;
8253b15beb3SKonstantin Belousov
82671d31f1cSKonstantin Belousov KASSERT(powerof2(align),
8273b15beb3SKonstantin Belousov ("malloc_domainset_aligned: wrong align %#zx size %#zx",
8283b15beb3SKonstantin Belousov align, size));
8290781c79dSKonstantin Belousov KASSERT(align <= PAGE_SIZE,
8303b15beb3SKonstantin Belousov ("malloc_domainset_aligned: align %#zx (size %#zx) too large",
8313b15beb3SKonstantin Belousov align, size));
8323b15beb3SKonstantin Belousov
8331ac7c344SKonstantin Belousov /*
8341ac7c344SKonstantin Belousov * Round the allocation size up to the next power of 2,
8351ac7c344SKonstantin Belousov * because we can only guarantee alignment for
8361ac7c344SKonstantin Belousov * power-of-2-sized allocations. Further increase the
8371ac7c344SKonstantin Belousov * allocation size to align if the rounded size is less than
8381ac7c344SKonstantin Belousov * align, since malloc zones provide alignment equal to their
8391ac7c344SKonstantin Belousov * size.
8401ac7c344SKonstantin Belousov */
84171d31f1cSKonstantin Belousov if (size == 0)
84271d31f1cSKonstantin Belousov size = 1;
8431ac7c344SKonstantin Belousov asize = size <= align ? align : 1UL << flsl(size - 1);
8441ac7c344SKonstantin Belousov
8451ac7c344SKonstantin Belousov res = malloc_domainset(asize, mtp, ds, flags);
8463b15beb3SKonstantin Belousov KASSERT(res == NULL || ((uintptr_t)res & (align - 1)) == 0,
8473b15beb3SKonstantin Belousov ("malloc_domainset_aligned: result not aligned %p size %#zx "
8481ac7c344SKonstantin Belousov "allocsize %#zx align %#zx", res, size, asize, align));
8493b15beb3SKonstantin Belousov return (res);
8503b15beb3SKonstantin Belousov }
8513b15beb3SKonstantin Belousov
8523b15beb3SKonstantin Belousov void *
mallocarray(size_t nmemb,size_t size,struct malloc_type * type,int flags)853fd91e076SKristof Provost mallocarray(size_t nmemb, size_t size, struct malloc_type *type, int flags)
854fd91e076SKristof Provost {
855fd91e076SKristof Provost
856c02fc960SConrad Meyer if (WOULD_OVERFLOW(nmemb, size))
857c02fc960SConrad Meyer panic("mallocarray: %zu * %zu overflowed", nmemb, size);
858fd91e076SKristof Provost
859fd91e076SKristof Provost return (malloc(size * nmemb, type, flags));
860fd91e076SKristof Provost }
861fd91e076SKristof Provost
862c743a6bdSHans Petter Selasky void *
mallocarray_domainset(size_t nmemb,size_t size,struct malloc_type * type,struct domainset * ds,int flags)863c743a6bdSHans Petter Selasky mallocarray_domainset(size_t nmemb, size_t size, struct malloc_type *type,
864c743a6bdSHans Petter Selasky struct domainset *ds, int flags)
865c743a6bdSHans Petter Selasky {
866c743a6bdSHans Petter Selasky
867c743a6bdSHans Petter Selasky if (WOULD_OVERFLOW(nmemb, size))
868c743a6bdSHans Petter Selasky panic("mallocarray_domainset: %zu * %zu overflowed", nmemb, size);
869c743a6bdSHans Petter Selasky
870c743a6bdSHans Petter Selasky return (malloc_domainset(size * nmemb, type, ds, flags));
871c743a6bdSHans Petter Selasky }
872c743a6bdSHans Petter Selasky
87306a53ecfSMark Johnston #if defined(INVARIANTS) && !defined(KASAN)
874ab3185d1SJeff Roberson static void
free_save_type(void * addr,struct malloc_type * mtp,u_long size)875ab3185d1SJeff Roberson free_save_type(void *addr, struct malloc_type *mtp, u_long size)
876ab3185d1SJeff Roberson {
877ab3185d1SJeff Roberson struct malloc_type **mtpp = addr;
878ab3185d1SJeff Roberson
879ab3185d1SJeff Roberson /*
880ab3185d1SJeff Roberson * Cache a pointer to the malloc_type that most recently freed
881ab3185d1SJeff Roberson * this memory here. This way we know who is most likely to
882ab3185d1SJeff Roberson * have stepped on it later.
883ab3185d1SJeff Roberson *
884ab3185d1SJeff Roberson * This code assumes that size is a multiple of 8 bytes for
885ab3185d1SJeff Roberson * 64 bit machines
886ab3185d1SJeff Roberson */
887ab3185d1SJeff Roberson mtpp = (struct malloc_type **) ((unsigned long)mtpp & ~UMA_ALIGN_PTR);
888ab3185d1SJeff Roberson mtpp += (size - sizeof(struct malloc_type *)) /
889ab3185d1SJeff Roberson sizeof(struct malloc_type *);
890ab3185d1SJeff Roberson *mtpp = mtp;
891ab3185d1SJeff Roberson }
892ab3185d1SJeff Roberson #endif
893ab3185d1SJeff Roberson
894ab3185d1SJeff Roberson #ifdef MALLOC_DEBUG
895ab3185d1SJeff Roberson static int
free_dbg(void ** addrp,struct malloc_type * mtp)896ab3185d1SJeff Roberson free_dbg(void **addrp, struct malloc_type *mtp)
897ab3185d1SJeff Roberson {
898ab3185d1SJeff Roberson void *addr;
899ab3185d1SJeff Roberson
900ab3185d1SJeff Roberson addr = *addrp;
901bdcc2226SMateusz Guzik KASSERT(mtp->ks_version == M_VERSION, ("free: bad malloc type version"));
902ab3185d1SJeff Roberson KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
903ab3185d1SJeff Roberson ("free: called with spinlock or critical section held"));
904ab3185d1SJeff Roberson
905ab3185d1SJeff Roberson /* free(NULL, ...) does nothing */
906ab3185d1SJeff Roberson if (addr == NULL)
907ab3185d1SJeff Roberson return (EJUSTRETURN);
908ab3185d1SJeff Roberson
909ab3185d1SJeff Roberson #ifdef DEBUG_MEMGUARD
910ab3185d1SJeff Roberson if (is_memguard_addr(addr)) {
911ab3185d1SJeff Roberson memguard_free(addr);
912ab3185d1SJeff Roberson return (EJUSTRETURN);
913ab3185d1SJeff Roberson }
914ab3185d1SJeff Roberson #endif
915ab3185d1SJeff Roberson
916ab3185d1SJeff Roberson #ifdef DEBUG_REDZONE
917ab3185d1SJeff Roberson redzone_check(addr);
918ab3185d1SJeff Roberson *addrp = redzone_addr_ntor(addr);
919ab3185d1SJeff Roberson #endif
920ab3185d1SJeff Roberson
921ab3185d1SJeff Roberson return (0);
922ab3185d1SJeff Roberson }
923ab3185d1SJeff Roberson #endif
924ab3185d1SJeff Roberson
9254fab5f00SBjoern A. Zeeb static __always_inline void
_free(void * addr,struct malloc_type * mtp,bool dozero)9264fab5f00SBjoern A. Zeeb _free(void *addr, struct malloc_type *mtp, bool dozero)
927df8bae1dSRodney W. Grimes {
928584061b4SJeff Roberson uma_zone_t zone;
92999571dc3SJeff Roberson uma_slab_t slab;
93099571dc3SJeff Roberson u_long size;
931254c6cb3SPoul-Henning Kamp
932ab3185d1SJeff Roberson #ifdef MALLOC_DEBUG
933ab3185d1SJeff Roberson if (free_dbg(&addr, mtp) != 0)
934ab3185d1SJeff Roberson return;
935ab3185d1SJeff Roberson #endif
93644a8ff31SArchie Cobbs /* free(NULL, ...) does nothing */
93744a8ff31SArchie Cobbs if (addr == NULL)
93844a8ff31SArchie Cobbs return;
93944a8ff31SArchie Cobbs
940584061b4SJeff Roberson vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
9418355f576SJeff Roberson if (slab == NULL)
9424fab5f00SBjoern A. Zeeb panic("%s(%d): address %p(%p) has not been allocated", __func__,
9434fab5f00SBjoern A. Zeeb dozero, addr, (void *)((uintptr_t)addr & (~UMA_SLAB_MASK)));
94499571dc3SJeff Roberson
9459e6544ddSBjoern A. Zeeb switch (GET_SLAB_COOKIE(slab)) {
9469e6544ddSBjoern A. Zeeb case __predict_true(SLAB_COOKIE_SLAB_PTR):
947584061b4SJeff Roberson size = zone->uz_size;
94806a53ecfSMark Johnston #if defined(INVARIANTS) && !defined(KASAN)
949ab3185d1SJeff Roberson free_save_type(addr, mtp, size);
9508f70816cSJeff Roberson #endif
95128391f18SOlivier Certner if (dozero) {
95228391f18SOlivier Certner kasan_mark(addr, size, size, 0);
95345035becSMatt Macy explicit_bzero(addr, size);
95428391f18SOlivier Certner }
95545035becSMatt Macy uma_zfree_arg(zone, addr, slab);
9569e6544ddSBjoern A. Zeeb break;
9579e6544ddSBjoern A. Zeeb case SLAB_COOKIE_MALLOC_LARGE:
95845035becSMatt Macy size = malloc_large_size(slab);
95928391f18SOlivier Certner if (dozero) {
96028391f18SOlivier Certner kasan_mark(addr, size, size, 0);
96145035becSMatt Macy explicit_bzero(addr, size);
96228391f18SOlivier Certner }
96345035becSMatt Macy free_large(addr, size);
9649e6544ddSBjoern A. Zeeb break;
9659e6544ddSBjoern A. Zeeb case SLAB_COOKIE_CONTIG_MALLOC:
9669e6544ddSBjoern A. Zeeb size = round_page(contigmalloc_size(slab));
9674fab5f00SBjoern A. Zeeb if (dozero)
9689e6544ddSBjoern A. Zeeb explicit_bzero(addr, size);
9699e6544ddSBjoern A. Zeeb kmem_free(addr, size);
9709e6544ddSBjoern A. Zeeb break;
9719e6544ddSBjoern A. Zeeb default:
9724fab5f00SBjoern A. Zeeb panic("%s(%d): addr %p slab %p with unknown cookie %d",
9734fab5f00SBjoern A. Zeeb __func__, dozero, addr, slab, GET_SLAB_COOKIE(slab));
9749e6544ddSBjoern A. Zeeb /* NOTREACHED */
97545035becSMatt Macy }
97645035becSMatt Macy malloc_type_freed(mtp, size);
97745035becSMatt Macy }
97845035becSMatt Macy
979df8bae1dSRodney W. Grimes /*
9804fab5f00SBjoern A. Zeeb * free:
9814fab5f00SBjoern A. Zeeb * Free a block of memory allocated by malloc/contigmalloc.
9824fab5f00SBjoern A. Zeeb * This routine may not block.
9834fab5f00SBjoern A. Zeeb */
9844fab5f00SBjoern A. Zeeb void
free(void * addr,struct malloc_type * mtp)9854fab5f00SBjoern A. Zeeb free(void *addr, struct malloc_type *mtp)
9864fab5f00SBjoern A. Zeeb {
9874fab5f00SBjoern A. Zeeb _free(addr, mtp, false);
9884fab5f00SBjoern A. Zeeb }
9894fab5f00SBjoern A. Zeeb
9904fab5f00SBjoern A. Zeeb /*
9914fab5f00SBjoern A. Zeeb * zfree:
9924fab5f00SBjoern A. Zeeb * Zero then free a block of memory allocated by malloc/contigmalloc.
9934fab5f00SBjoern A. Zeeb * This routine may not block.
9944fab5f00SBjoern A. Zeeb */
9954fab5f00SBjoern A. Zeeb void
zfree(void * addr,struct malloc_type * mtp)9964fab5f00SBjoern A. Zeeb zfree(void *addr, struct malloc_type *mtp)
9974fab5f00SBjoern A. Zeeb {
9984fab5f00SBjoern A. Zeeb _free(addr, mtp, true);
9994fab5f00SBjoern A. Zeeb }
10004fab5f00SBjoern A. Zeeb
10014fab5f00SBjoern A. Zeeb /*
100244a8ff31SArchie Cobbs * realloc: change the size of a memory block
100344a8ff31SArchie Cobbs */
100444a8ff31SArchie Cobbs void *
realloc(void * addr,size_t size,struct malloc_type * mtp,int flags)1005bd555da9SConrad Meyer realloc(void *addr, size_t size, struct malloc_type *mtp, int flags)
100644a8ff31SArchie Cobbs {
1007dbd51c41SJohn Baldwin #ifndef DEBUG_REDZONE
1008584061b4SJeff Roberson uma_zone_t zone;
10098355f576SJeff Roberson uma_slab_t slab;
1010dbd51c41SJohn Baldwin #endif
101144a8ff31SArchie Cobbs unsigned long alloc;
101244a8ff31SArchie Cobbs void *newaddr;
101344a8ff31SArchie Cobbs
1014bdcc2226SMateusz Guzik KASSERT(mtp->ks_version == M_VERSION,
1015bdcc2226SMateusz Guzik ("realloc: bad malloc type version"));
1016d9e2e68dSMark Johnston KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(),
10171067a2baSJonathan T. Looney ("realloc: called with spinlock or critical section held"));
10181067a2baSJonathan T. Looney
101944a8ff31SArchie Cobbs /* realloc(NULL, ...) is equivalent to malloc(...) */
102044a8ff31SArchie Cobbs if (addr == NULL)
102163a7e0a3SRobert Watson return (malloc(size, mtp, flags));
102263a7e0a3SRobert Watson
102363a7e0a3SRobert Watson /*
102463a7e0a3SRobert Watson * XXX: Should report free of old memory and alloc of new memory to
102563a7e0a3SRobert Watson * per-CPU stats.
102663a7e0a3SRobert Watson */
102744a8ff31SArchie Cobbs
1028e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD
10296d3ed393SMatthew D Fleming if (is_memguard_addr(addr))
10306d3ed393SMatthew D Fleming return (memguard_realloc(addr, size, mtp, flags));
1031e4eb384bSBosko Milekic #endif
1032e4eb384bSBosko Milekic
1033847a2a17SPawel Jakub Dawidek #ifdef DEBUG_REDZONE
1034847a2a17SPawel Jakub Dawidek alloc = redzone_get_size(addr);
1035847a2a17SPawel Jakub Dawidek #else
1036584061b4SJeff Roberson vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
10378355f576SJeff Roberson
103844a8ff31SArchie Cobbs /* Sanity check */
10398355f576SJeff Roberson KASSERT(slab != NULL,
104044a8ff31SArchie Cobbs ("realloc: address %p out of range", (void *)addr));
104144a8ff31SArchie Cobbs
104244a8ff31SArchie Cobbs /* Get the size of the original block */
10439e6544ddSBjoern A. Zeeb switch (GET_SLAB_COOKIE(slab)) {
10449e6544ddSBjoern A. Zeeb case __predict_true(SLAB_COOKIE_SLAB_PTR):
1045584061b4SJeff Roberson alloc = zone->uz_size;
10469e6544ddSBjoern A. Zeeb break;
10479e6544ddSBjoern A. Zeeb case SLAB_COOKIE_MALLOC_LARGE:
10486d6a03d7SJeff Roberson alloc = malloc_large_size(slab);
10499e6544ddSBjoern A. Zeeb break;
10509e6544ddSBjoern A. Zeeb default:
10519e6544ddSBjoern A. Zeeb #ifdef INVARIANTS
10529e6544ddSBjoern A. Zeeb panic("%s: called for addr %p of unsupported allocation type; "
10539e6544ddSBjoern A. Zeeb "slab %p cookie %d", __func__, addr, slab, GET_SLAB_COOKIE(slab));
10549e6544ddSBjoern A. Zeeb #endif
10559e6544ddSBjoern A. Zeeb return (NULL);
10569e6544ddSBjoern A. Zeeb }
105744a8ff31SArchie Cobbs
105844a8ff31SArchie Cobbs /* Reuse the original block if appropriate */
105906a53ecfSMark Johnston if (size <= alloc &&
106006a53ecfSMark Johnston (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE)) {
106106a53ecfSMark Johnston kasan_mark((void *)addr, size, alloc, KASAN_MALLOC_REDZONE);
106244a8ff31SArchie Cobbs return (addr);
106306a53ecfSMark Johnston }
1064847a2a17SPawel Jakub Dawidek #endif /* !DEBUG_REDZONE */
106544a8ff31SArchie Cobbs
106644a8ff31SArchie Cobbs /* Allocate a new, bigger (or smaller) block */
106763a7e0a3SRobert Watson if ((newaddr = malloc(size, mtp, flags)) == NULL)
106844a8ff31SArchie Cobbs return (NULL);
106944a8ff31SArchie Cobbs
107006a53ecfSMark Johnston /*
107106a53ecfSMark Johnston * Copy over original contents. For KASAN, the redzone must be marked
107206a53ecfSMark Johnston * valid before performing the copy.
107306a53ecfSMark Johnston */
10749a7c2de3SMark Johnston kasan_mark(addr, alloc, alloc, 0);
107544a8ff31SArchie Cobbs bcopy(addr, newaddr, min(size, alloc));
107663a7e0a3SRobert Watson free(addr, mtp);
107744a8ff31SArchie Cobbs return (newaddr);
107844a8ff31SArchie Cobbs }
107944a8ff31SArchie Cobbs
108044a8ff31SArchie Cobbs /*
108144a8ff31SArchie Cobbs * reallocf: same as realloc() but free memory on failure.
108244a8ff31SArchie Cobbs */
108344a8ff31SArchie Cobbs void *
reallocf(void * addr,size_t size,struct malloc_type * mtp,int flags)1084bd555da9SConrad Meyer reallocf(void *addr, size_t size, struct malloc_type *mtp, int flags)
108544a8ff31SArchie Cobbs {
108644a8ff31SArchie Cobbs void *mem;
108744a8ff31SArchie Cobbs
108863a7e0a3SRobert Watson if ((mem = realloc(addr, size, mtp, flags)) == NULL)
108963a7e0a3SRobert Watson free(addr, mtp);
109044a8ff31SArchie Cobbs return (mem);
109144a8ff31SArchie Cobbs }
109244a8ff31SArchie Cobbs
10935d4bf057SVladimir Kondratyev /*
109416b971edSMateusz Guzik * malloc_size: returns the number of bytes allocated for a request of the
109516b971edSMateusz Guzik * specified size
109616b971edSMateusz Guzik */
109716b971edSMateusz Guzik size_t
malloc_size(size_t size)109816b971edSMateusz Guzik malloc_size(size_t size)
109916b971edSMateusz Guzik {
110016b971edSMateusz Guzik int indx;
110116b971edSMateusz Guzik
110216b971edSMateusz Guzik if (size > kmem_zmax)
11031c30cf95SMark Johnston return (round_page(size));
110416b971edSMateusz Guzik if (size & KMEM_ZMASK)
110516b971edSMateusz Guzik size = (size & ~KMEM_ZMASK) + KMEM_ZBASE;
110616b971edSMateusz Guzik indx = kmemsize[size >> KMEM_ZSHIFT];
110716b971edSMateusz Guzik return (kmemzones[indx].kz_size);
110816b971edSMateusz Guzik }
110916b971edSMateusz Guzik
111016b971edSMateusz Guzik /*
11115d4bf057SVladimir Kondratyev * malloc_usable_size: returns the usable size of the allocation.
11125d4bf057SVladimir Kondratyev */
11135d4bf057SVladimir Kondratyev size_t
malloc_usable_size(const void * addr)11145d4bf057SVladimir Kondratyev malloc_usable_size(const void *addr)
11155d4bf057SVladimir Kondratyev {
11165d4bf057SVladimir Kondratyev #ifndef DEBUG_REDZONE
11175d4bf057SVladimir Kondratyev uma_zone_t zone;
11185d4bf057SVladimir Kondratyev uma_slab_t slab;
11195d4bf057SVladimir Kondratyev #endif
11205d4bf057SVladimir Kondratyev u_long size;
11215d4bf057SVladimir Kondratyev
11225d4bf057SVladimir Kondratyev if (addr == NULL)
11235d4bf057SVladimir Kondratyev return (0);
11245d4bf057SVladimir Kondratyev
11255d4bf057SVladimir Kondratyev #ifdef DEBUG_MEMGUARD
11265d4bf057SVladimir Kondratyev if (is_memguard_addr(__DECONST(void *, addr)))
11275d4bf057SVladimir Kondratyev return (memguard_get_req_size(addr));
11285d4bf057SVladimir Kondratyev #endif
11295d4bf057SVladimir Kondratyev
11305d4bf057SVladimir Kondratyev #ifdef DEBUG_REDZONE
11315d4bf057SVladimir Kondratyev size = redzone_get_size(__DECONST(void *, addr));
11325d4bf057SVladimir Kondratyev #else
11335d4bf057SVladimir Kondratyev vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab);
11345d4bf057SVladimir Kondratyev if (slab == NULL)
1135c0df224bSBjoern A. Zeeb panic("malloc_usable_size: address %p(%p) is not allocated",
11365d4bf057SVladimir Kondratyev addr, (void *)((u_long)addr & (~UMA_SLAB_MASK)));
11375d4bf057SVladimir Kondratyev
11389e6544ddSBjoern A. Zeeb switch (GET_SLAB_COOKIE(slab)) {
11399e6544ddSBjoern A. Zeeb case __predict_true(SLAB_COOKIE_SLAB_PTR):
11405d4bf057SVladimir Kondratyev size = zone->uz_size;
11419e6544ddSBjoern A. Zeeb break;
11429e6544ddSBjoern A. Zeeb case SLAB_COOKIE_MALLOC_LARGE:
11435d4bf057SVladimir Kondratyev size = malloc_large_size(slab);
11449e6544ddSBjoern A. Zeeb break;
1145*c5cf4b64SBjoern A. Zeeb case SLAB_COOKIE_CONTIG_MALLOC:
1146*c5cf4b64SBjoern A. Zeeb size = round_page(contigmalloc_size(slab));
1147*c5cf4b64SBjoern A. Zeeb break;
11489e6544ddSBjoern A. Zeeb default:
11499e6544ddSBjoern A. Zeeb __assert_unreachable();
11509e6544ddSBjoern A. Zeeb size = 0;
11519e6544ddSBjoern A. Zeeb break;
11529e6544ddSBjoern A. Zeeb }
11535d4bf057SVladimir Kondratyev #endif
1154880b670cSMark Johnston
1155880b670cSMark Johnston /*
1156880b670cSMark Johnston * Unmark the redzone to avoid reports from consumers who are
1157880b670cSMark Johnston * (presumably) about to use the full allocation size.
1158880b670cSMark Johnston */
1159880b670cSMark Johnston kasan_mark(addr, size, size, 0);
1160880b670cSMark Johnston
11615d4bf057SVladimir Kondratyev return (size);
11625d4bf057SVladimir Kondratyev }
11635d4bf057SVladimir Kondratyev
1164c70af487SAlan Cox CTASSERT(VM_KMEM_SIZE_SCALE >= 1);
1165c70af487SAlan Cox
11665df87b21SJeff Roberson /*
1167c70af487SAlan Cox * Initialize the kernel memory (kmem) arena.
11685df87b21SJeff Roberson */
11695df87b21SJeff Roberson void
kmeminit(void)11705df87b21SJeff Roberson kmeminit(void)
11715df87b21SJeff Roberson {
1172af3b2549SHans Petter Selasky u_long mem_size;
1173af3b2549SHans Petter Selasky u_long tmp;
117469ef67f9SJason Evans
1175af3b2549SHans Petter Selasky #ifdef VM_KMEM_SIZE
1176af3b2549SHans Petter Selasky if (vm_kmem_size == 0)
1177af3b2549SHans Petter Selasky vm_kmem_size = VM_KMEM_SIZE;
1178af3b2549SHans Petter Selasky #endif
1179af3b2549SHans Petter Selasky #ifdef VM_KMEM_SIZE_MIN
1180af3b2549SHans Petter Selasky if (vm_kmem_size_min == 0)
1181af3b2549SHans Petter Selasky vm_kmem_size_min = VM_KMEM_SIZE_MIN;
1182af3b2549SHans Petter Selasky #endif
1183af3b2549SHans Petter Selasky #ifdef VM_KMEM_SIZE_MAX
1184af3b2549SHans Petter Selasky if (vm_kmem_size_max == 0)
1185af3b2549SHans Petter Selasky vm_kmem_size_max = VM_KMEM_SIZE_MAX;
1186af3b2549SHans Petter Selasky #endif
11878a58a9f6SJohn Dyson /*
1188c70af487SAlan Cox * Calculate the amount of kernel virtual address (KVA) space that is
1189c70af487SAlan Cox * preallocated to the kmem arena. In order to support a wide range
1190c70af487SAlan Cox * of machines, it is a function of the physical memory size,
1191c70af487SAlan Cox * specifically,
11928a58a9f6SJohn Dyson *
1193c70af487SAlan Cox * min(max(physical memory size / VM_KMEM_SIZE_SCALE,
1194c70af487SAlan Cox * VM_KMEM_SIZE_MIN), VM_KMEM_SIZE_MAX)
1195c70af487SAlan Cox *
1196c70af487SAlan Cox * Every architecture must define an integral value for
1197c70af487SAlan Cox * VM_KMEM_SIZE_SCALE. However, the definitions of VM_KMEM_SIZE_MIN
1198c70af487SAlan Cox * and VM_KMEM_SIZE_MAX, which represent respectively the floor and
1199c70af487SAlan Cox * ceiling on this preallocation, are optional. Typically,
1200c70af487SAlan Cox * VM_KMEM_SIZE_MAX is itself a function of the available KVA space on
1201c70af487SAlan Cox * a given architecture.
12028a58a9f6SJohn Dyson */
120344f1c916SBryan Drewery mem_size = vm_cnt.v_page_count;
12047c51714eSSean Bruno if (mem_size <= 32768) /* delphij XXX 128MB */
12057c51714eSSean Bruno kmem_zmax = PAGE_SIZE;
12068a58a9f6SJohn Dyson
1207c70af487SAlan Cox if (vm_kmem_size_scale < 1)
1208c70af487SAlan Cox vm_kmem_size_scale = VM_KMEM_SIZE_SCALE;
1209c70af487SAlan Cox
1210af3b2549SHans Petter Selasky /*
1211af3b2549SHans Petter Selasky * Check if we should use defaults for the "vm_kmem_size"
1212af3b2549SHans Petter Selasky * variable:
1213af3b2549SHans Petter Selasky */
1214af3b2549SHans Petter Selasky if (vm_kmem_size == 0) {
121528b740daSKonstantin Belousov vm_kmem_size = mem_size / vm_kmem_size_scale;
121628b740daSKonstantin Belousov vm_kmem_size = vm_kmem_size * PAGE_SIZE < vm_kmem_size ?
121728b740daSKonstantin Belousov vm_kmem_size_max : vm_kmem_size * PAGE_SIZE;
1218c70af487SAlan Cox if (vm_kmem_size_min > 0 && vm_kmem_size < vm_kmem_size_min)
12190e5179e4SStephane E. Potvin vm_kmem_size = vm_kmem_size_min;
1220479439b4SDag-Erling Smørgrav if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max)
1221479439b4SDag-Erling Smørgrav vm_kmem_size = vm_kmem_size_max;
1222af3b2549SHans Petter Selasky }
122328b740daSKonstantin Belousov if (vm_kmem_size == 0)
122428b740daSKonstantin Belousov panic("Tune VM_KMEM_SIZE_* for the platform");
12258a58a9f6SJohn Dyson
122627b8623fSDavid Greenman /*
1227af3b2549SHans Petter Selasky * The amount of KVA space that is preallocated to the
1228c70af487SAlan Cox * kmem arena can be set statically at compile-time or manually
1229c70af487SAlan Cox * through the kernel environment. However, it is still limited to
1230c70af487SAlan Cox * twice the physical memory size, which has been sufficient to handle
1231c70af487SAlan Cox * the most severe cases of external fragmentation in the kmem arena.
123227b8623fSDavid Greenman */
1233c749c003SAlan Cox if (vm_kmem_size / 2 / PAGE_SIZE > mem_size)
1234c749c003SAlan Cox vm_kmem_size = 2 * mem_size * PAGE_SIZE;
12358a58a9f6SJohn Dyson
1236e137643eSOlivier Houchard vm_kmem_size = round_page(vm_kmem_size);
12376faf45b3SMark Johnston
12386faf45b3SMark Johnston /*
123989786088SMark Johnston * With KASAN or KMSAN enabled, dynamically allocated kernel memory is
124089786088SMark Johnston * shadowed. Account for this when setting the UMA limit.
12416faf45b3SMark Johnston */
124289786088SMark Johnston #if defined(KASAN)
12436faf45b3SMark Johnston vm_kmem_size = (vm_kmem_size * KASAN_SHADOW_SCALE) /
12446faf45b3SMark Johnston (KASAN_SHADOW_SCALE + 1);
124589786088SMark Johnston #elif defined(KMSAN)
124689786088SMark Johnston vm_kmem_size /= 3;
12476faf45b3SMark Johnston #endif
12486faf45b3SMark Johnston
1249e3813573SMatthew D Fleming #ifdef DEBUG_MEMGUARD
1250f806cdcfSMatthew D Fleming tmp = memguard_fudge(vm_kmem_size, kernel_map);
1251e3813573SMatthew D Fleming #else
1252e3813573SMatthew D Fleming tmp = vm_kmem_size;
1253e3813573SMatthew D Fleming #endif
12542e47807cSJeff Roberson uma_set_limit(tmp);
12558355f576SJeff Roberson
1256e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD
1257e4eb384bSBosko Milekic /*
1258e4eb384bSBosko Milekic * Initialize MemGuard if support compiled in. MemGuard is a
1259e4eb384bSBosko Milekic * replacement allocator used for detecting tamper-after-free
1260e4eb384bSBosko Milekic * scenarios as they occur. It is only used for debugging.
1261e4eb384bSBosko Milekic */
12622e47807cSJeff Roberson memguard_init(kernel_arena);
1263e4eb384bSBosko Milekic #endif
12645df87b21SJeff Roberson }
12655df87b21SJeff Roberson
12665df87b21SJeff Roberson /*
12675df87b21SJeff Roberson * Initialize the kernel memory allocator
12685df87b21SJeff Roberson */
12695df87b21SJeff Roberson /* ARGSUSED*/
12705df87b21SJeff Roberson static void
mallocinit(void * dummy)12715df87b21SJeff Roberson mallocinit(void *dummy)
12725df87b21SJeff Roberson {
12735df87b21SJeff Roberson int i;
12745df87b21SJeff Roberson uint8_t indx;
12755df87b21SJeff Roberson
12765df87b21SJeff Roberson mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF);
12775df87b21SJeff Roberson
12785df87b21SJeff Roberson kmeminit();
1279e4eb384bSBosko Milekic
12807001d850SXin LI if (kmem_zmax < PAGE_SIZE || kmem_zmax > KMEM_ZMAX)
12817001d850SXin LI kmem_zmax = KMEM_ZMAX;
12827001d850SXin LI
12836f267175SJeff Roberson for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) {
12846f267175SJeff Roberson int size = kmemzones[indx].kz_size;
1285eaa17d42SRyan Libby const char *name = kmemzones[indx].kz_name;
12863b15beb3SKonstantin Belousov size_t align;
1287d7854da1SMatthew D Fleming int subzone;
12888355f576SJeff Roberson
12893b15beb3SKonstantin Belousov align = UMA_ALIGN_PTR;
12903b15beb3SKonstantin Belousov if (powerof2(size) && size > sizeof(void *))
12910781c79dSKonstantin Belousov align = MIN(size, PAGE_SIZE) - 1;
1292d7854da1SMatthew D Fleming for (subzone = 0; subzone < numzones; subzone++) {
1293d7854da1SMatthew D Fleming kmemzones[indx].kz_zone[subzone] =
1294d7854da1SMatthew D Fleming uma_zcreate(name, size,
129510094910SMark Johnston #if defined(INVARIANTS) && !defined(KASAN) && !defined(KMSAN)
12968f70816cSJeff Roberson mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini,
12978efc4effSJeff Roberson #else
12988efc4effSJeff Roberson NULL, NULL, NULL, NULL,
12998efc4effSJeff Roberson #endif
13003b15beb3SKonstantin Belousov align, UMA_ZONE_MALLOC);
1301d7854da1SMatthew D Fleming }
13028355f576SJeff Roberson for (;i <= size; i+= KMEM_ZBASE)
13036f267175SJeff Roberson kmemsize[i >> KMEM_ZSHIFT] = indx;
1304df8bae1dSRodney W. Grimes }
1305254c6cb3SPoul-Henning Kamp }
1306af3b2549SHans Petter Selasky SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_SECOND, mallocinit, NULL);
1307254c6cb3SPoul-Henning Kamp
1308db669378SPeter Wemm void
malloc_init(void * data)130987efd4d5SRobert Watson malloc_init(void *data)
1310254c6cb3SPoul-Henning Kamp {
131163a7e0a3SRobert Watson struct malloc_type_internal *mtip;
131263a7e0a3SRobert Watson struct malloc_type *mtp;
131363a7e0a3SRobert Watson
1314deab5717SMitchell Horne KASSERT(vm_cnt.v_page_count != 0,
1315deab5717SMitchell Horne ("malloc_init() called before vm_mem_init()"));
131663a7e0a3SRobert Watson
131763a7e0a3SRobert Watson mtp = data;
1318bdcc2226SMateusz Guzik if (mtp->ks_version != M_VERSION)
1319e25d8b67SMateusz Guzik panic("malloc_init: type %s with unsupported version %lu",
1320e25d8b67SMateusz Guzik mtp->ks_shortdesc, mtp->ks_version);
1321bb1c7df8SRobert Watson
1322bdcc2226SMateusz Guzik mtip = &mtp->ks_mti;
13238e6526e9SMateusz Guzik mtip->mti_stats = uma_zalloc_pcpu(pcpu_zone_64, M_WAITOK | M_ZERO);
1324c9e05ccdSMateusz Guzik mtp_set_subzone(mtp);
1325254c6cb3SPoul-Henning Kamp
13266f267175SJeff Roberson mtx_lock(&malloc_mtx);
132763a7e0a3SRobert Watson mtp->ks_next = kmemstatistics;
132863a7e0a3SRobert Watson kmemstatistics = mtp;
1329cd814b26SRobert Watson kmemcount++;
13306f267175SJeff Roberson mtx_unlock(&malloc_mtx);
1331df8bae1dSRodney W. Grimes }
1332db669378SPeter Wemm
1333db669378SPeter Wemm void
malloc_uninit(void * data)133487efd4d5SRobert Watson malloc_uninit(void *data)
1335db669378SPeter Wemm {
133663a7e0a3SRobert Watson struct malloc_type_internal *mtip;
13372a143d5bSPawel Jakub Dawidek struct malloc_type_stats *mtsp;
133863a7e0a3SRobert Watson struct malloc_type *mtp, *temp;
13392a143d5bSPawel Jakub Dawidek long temp_allocs, temp_bytes;
13402a143d5bSPawel Jakub Dawidek int i;
1341db669378SPeter Wemm
134263a7e0a3SRobert Watson mtp = data;
1343bdcc2226SMateusz Guzik KASSERT(mtp->ks_version == M_VERSION,
1344bdcc2226SMateusz Guzik ("malloc_uninit: bad malloc type version"));
1345bb1c7df8SRobert Watson
13466f267175SJeff Roberson mtx_lock(&malloc_mtx);
1347bdcc2226SMateusz Guzik mtip = &mtp->ks_mti;
134863a7e0a3SRobert Watson if (mtp != kmemstatistics) {
134963a7e0a3SRobert Watson for (temp = kmemstatistics; temp != NULL;
135063a7e0a3SRobert Watson temp = temp->ks_next) {
1351f121baaaSBrian Somers if (temp->ks_next == mtp) {
135263a7e0a3SRobert Watson temp->ks_next = mtp->ks_next;
1353f121baaaSBrian Somers break;
1354db669378SPeter Wemm }
1355f121baaaSBrian Somers }
1356f121baaaSBrian Somers KASSERT(temp,
1357f121baaaSBrian Somers ("malloc_uninit: type '%s' not found", mtp->ks_shortdesc));
135863a7e0a3SRobert Watson } else
135963a7e0a3SRobert Watson kmemstatistics = mtp->ks_next;
1360cd814b26SRobert Watson kmemcount--;
13616f267175SJeff Roberson mtx_unlock(&malloc_mtx);
13622a143d5bSPawel Jakub Dawidek
13632a143d5bSPawel Jakub Dawidek /*
13642a143d5bSPawel Jakub Dawidek * Look for memory leaks.
13652a143d5bSPawel Jakub Dawidek */
13662a143d5bSPawel Jakub Dawidek temp_allocs = temp_bytes = 0;
13679afff6b1SMateusz Guzik for (i = 0; i <= mp_maxid; i++) {
13689afff6b1SMateusz Guzik mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
13692a143d5bSPawel Jakub Dawidek temp_allocs += mtsp->mts_numallocs;
13702a143d5bSPawel Jakub Dawidek temp_allocs -= mtsp->mts_numfrees;
13712a143d5bSPawel Jakub Dawidek temp_bytes += mtsp->mts_memalloced;
13722a143d5bSPawel Jakub Dawidek temp_bytes -= mtsp->mts_memfreed;
13732a143d5bSPawel Jakub Dawidek }
13742a143d5bSPawel Jakub Dawidek if (temp_allocs > 0 || temp_bytes > 0) {
13752a143d5bSPawel Jakub Dawidek printf("Warning: memory type %s leaked memory on destroy "
13762a143d5bSPawel Jakub Dawidek "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc,
13772a143d5bSPawel Jakub Dawidek temp_allocs, temp_bytes);
13782a143d5bSPawel Jakub Dawidek }
13792a143d5bSPawel Jakub Dawidek
13808e6526e9SMateusz Guzik uma_zfree_pcpu(pcpu_zone_64, mtip->mti_stats);
1381db669378SPeter Wemm }
13826f267175SJeff Roberson
1383d362c40dSPawel Jakub Dawidek struct malloc_type *
malloc_desc2type(const char * desc)1384d362c40dSPawel Jakub Dawidek malloc_desc2type(const char *desc)
1385d362c40dSPawel Jakub Dawidek {
1386d362c40dSPawel Jakub Dawidek struct malloc_type *mtp;
1387d362c40dSPawel Jakub Dawidek
1388d362c40dSPawel Jakub Dawidek mtx_assert(&malloc_mtx, MA_OWNED);
1389d362c40dSPawel Jakub Dawidek for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1390d362c40dSPawel Jakub Dawidek if (strcmp(mtp->ks_shortdesc, desc) == 0)
1391d362c40dSPawel Jakub Dawidek return (mtp);
1392d362c40dSPawel Jakub Dawidek }
1393d362c40dSPawel Jakub Dawidek return (NULL);
1394d362c40dSPawel Jakub Dawidek }
1395d362c40dSPawel Jakub Dawidek
13966f267175SJeff Roberson static int
sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS)1397cd814b26SRobert Watson sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS)
1398cd814b26SRobert Watson {
1399cd814b26SRobert Watson struct malloc_type_stream_header mtsh;
1400cd814b26SRobert Watson struct malloc_type_internal *mtip;
14019afff6b1SMateusz Guzik struct malloc_type_stats *mtsp, zeromts;
1402cd814b26SRobert Watson struct malloc_type_header mth;
1403cd814b26SRobert Watson struct malloc_type *mtp;
14044e657159SMatthew D Fleming int error, i;
1405cd814b26SRobert Watson struct sbuf sbuf;
1406cd814b26SRobert Watson
140700f0e671SMatthew D Fleming error = sysctl_wire_old_buffer(req, 0);
140800f0e671SMatthew D Fleming if (error != 0)
140900f0e671SMatthew D Fleming return (error);
14104e657159SMatthew D Fleming sbuf_new_for_sysctl(&sbuf, NULL, 128, req);
14111eafc078SIan Lepore sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL);
1412cd814b26SRobert Watson mtx_lock(&malloc_mtx);
1413cd814b26SRobert Watson
14149afff6b1SMateusz Guzik bzero(&zeromts, sizeof(zeromts));
14159afff6b1SMateusz Guzik
1416cd814b26SRobert Watson /*
1417cd814b26SRobert Watson * Insert stream header.
1418cd814b26SRobert Watson */
1419cd814b26SRobert Watson bzero(&mtsh, sizeof(mtsh));
1420cd814b26SRobert Watson mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION;
1421cd814b26SRobert Watson mtsh.mtsh_maxcpus = MAXCPU;
1422cd814b26SRobert Watson mtsh.mtsh_count = kmemcount;
14234e657159SMatthew D Fleming (void)sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh));
1424cd814b26SRobert Watson
1425cd814b26SRobert Watson /*
1426cd814b26SRobert Watson * Insert alternating sequence of type headers and type statistics.
1427cd814b26SRobert Watson */
1428cd814b26SRobert Watson for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1429bdcc2226SMateusz Guzik mtip = &mtp->ks_mti;
1430cd814b26SRobert Watson
1431cd814b26SRobert Watson /*
1432cd814b26SRobert Watson * Insert type header.
1433cd814b26SRobert Watson */
1434cd814b26SRobert Watson bzero(&mth, sizeof(mth));
1435cd814b26SRobert Watson strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME);
14364e657159SMatthew D Fleming (void)sbuf_bcat(&sbuf, &mth, sizeof(mth));
1437cd814b26SRobert Watson
1438cd814b26SRobert Watson /*
1439cd814b26SRobert Watson * Insert type statistics for each CPU.
1440cd814b26SRobert Watson */
14419afff6b1SMateusz Guzik for (i = 0; i <= mp_maxid; i++) {
14429afff6b1SMateusz Guzik mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
14439afff6b1SMateusz Guzik (void)sbuf_bcat(&sbuf, mtsp, sizeof(*mtsp));
1444cd814b26SRobert Watson }
14459afff6b1SMateusz Guzik /*
14469afff6b1SMateusz Guzik * Fill in the missing CPUs.
14479afff6b1SMateusz Guzik */
14489afff6b1SMateusz Guzik for (; i < MAXCPU; i++) {
14499afff6b1SMateusz Guzik (void)sbuf_bcat(&sbuf, &zeromts, sizeof(zeromts));
14509afff6b1SMateusz Guzik }
1451cd814b26SRobert Watson }
1452cd814b26SRobert Watson mtx_unlock(&malloc_mtx);
14534e657159SMatthew D Fleming error = sbuf_finish(&sbuf);
1454cd814b26SRobert Watson sbuf_delete(&sbuf);
1455cd814b26SRobert Watson return (error);
1456cd814b26SRobert Watson }
1457cd814b26SRobert Watson
14587029da5cSPawel Biernacki SYSCTL_PROC(_kern, OID_AUTO, malloc_stats,
14597029da5cSPawel Biernacki CTLFLAG_RD | CTLTYPE_STRUCT | CTLFLAG_MPSAFE, 0, 0,
14607029da5cSPawel Biernacki sysctl_kern_malloc_stats, "s,malloc_type_ustats",
1461cd814b26SRobert Watson "Return malloc types");
1462cd814b26SRobert Watson
1463cd814b26SRobert Watson SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0,
1464cd814b26SRobert Watson "Count of kernel malloc types");
1465cd814b26SRobert Watson
146691dd776cSJohn Birrell void
malloc_type_list(malloc_type_list_func_t * func,void * arg)146791dd776cSJohn Birrell malloc_type_list(malloc_type_list_func_t *func, void *arg)
146891dd776cSJohn Birrell {
146991dd776cSJohn Birrell struct malloc_type *mtp, **bufmtp;
147091dd776cSJohn Birrell int count, i;
147191dd776cSJohn Birrell size_t buflen;
147291dd776cSJohn Birrell
147391dd776cSJohn Birrell mtx_lock(&malloc_mtx);
147491dd776cSJohn Birrell restart:
147591dd776cSJohn Birrell mtx_assert(&malloc_mtx, MA_OWNED);
147691dd776cSJohn Birrell count = kmemcount;
147791dd776cSJohn Birrell mtx_unlock(&malloc_mtx);
147891dd776cSJohn Birrell
147991dd776cSJohn Birrell buflen = sizeof(struct malloc_type *) * count;
148091dd776cSJohn Birrell bufmtp = malloc(buflen, M_TEMP, M_WAITOK);
148191dd776cSJohn Birrell
148291dd776cSJohn Birrell mtx_lock(&malloc_mtx);
148391dd776cSJohn Birrell
148491dd776cSJohn Birrell if (count < kmemcount) {
148591dd776cSJohn Birrell free(bufmtp, M_TEMP);
148691dd776cSJohn Birrell goto restart;
148791dd776cSJohn Birrell }
148891dd776cSJohn Birrell
148991dd776cSJohn Birrell for (mtp = kmemstatistics, i = 0; mtp != NULL; mtp = mtp->ks_next, i++)
149091dd776cSJohn Birrell bufmtp[i] = mtp;
149191dd776cSJohn Birrell
149291dd776cSJohn Birrell mtx_unlock(&malloc_mtx);
149391dd776cSJohn Birrell
149491dd776cSJohn Birrell for (i = 0; i < count; i++)
149591dd776cSJohn Birrell (func)(bufmtp[i], arg);
149691dd776cSJohn Birrell
149791dd776cSJohn Birrell free(bufmtp, M_TEMP);
149891dd776cSJohn Birrell }
149991dd776cSJohn Birrell
1500909ed16cSRobert Watson #ifdef DDB
150146d70077SConrad Meyer static int64_t
get_malloc_stats(const struct malloc_type_internal * mtip,uint64_t * allocs,uint64_t * inuse)150246d70077SConrad Meyer get_malloc_stats(const struct malloc_type_internal *mtip, uint64_t *allocs,
150346d70077SConrad Meyer uint64_t *inuse)
1504909ed16cSRobert Watson {
150546d70077SConrad Meyer const struct malloc_type_stats *mtsp;
150646d70077SConrad Meyer uint64_t frees, alloced, freed;
1507909ed16cSRobert Watson int i;
1508909ed16cSRobert Watson
150946d70077SConrad Meyer *allocs = 0;
1510909ed16cSRobert Watson frees = 0;
151124076d13SRobert Watson alloced = 0;
151224076d13SRobert Watson freed = 0;
15139afff6b1SMateusz Guzik for (i = 0; i <= mp_maxid; i++) {
15149afff6b1SMateusz Guzik mtsp = zpcpu_get_cpu(mtip->mti_stats, i);
151546d70077SConrad Meyer
151646d70077SConrad Meyer *allocs += mtsp->mts_numallocs;
151726e9d9b0SMark Johnston frees += mtsp->mts_numfrees;
151826e9d9b0SMark Johnston alloced += mtsp->mts_memalloced;
151926e9d9b0SMark Johnston freed += mtsp->mts_memfreed;
1520909ed16cSRobert Watson }
152146d70077SConrad Meyer *inuse = *allocs - frees;
152246d70077SConrad Meyer return (alloced - freed);
152346d70077SConrad Meyer }
152446d70077SConrad Meyer
DB_SHOW_COMMAND_FLAGS(malloc,db_show_malloc,DB_CMD_MEMSAFE)1525c84c5e00SMitchell Horne DB_SHOW_COMMAND_FLAGS(malloc, db_show_malloc, DB_CMD_MEMSAFE)
152646d70077SConrad Meyer {
152746d70077SConrad Meyer const char *fmt_hdr, *fmt_entry;
152846d70077SConrad Meyer struct malloc_type *mtp;
152946d70077SConrad Meyer uint64_t allocs, inuse;
153046d70077SConrad Meyer int64_t size;
153146d70077SConrad Meyer /* variables for sorting */
153246d70077SConrad Meyer struct malloc_type *last_mtype, *cur_mtype;
153346d70077SConrad Meyer int64_t cur_size, last_size;
153446d70077SConrad Meyer int ties;
153546d70077SConrad Meyer
153646d70077SConrad Meyer if (modif[0] == 'i') {
153746d70077SConrad Meyer fmt_hdr = "%s,%s,%s,%s\n";
153846d70077SConrad Meyer fmt_entry = "\"%s\",%ju,%jdK,%ju\n";
153946d70077SConrad Meyer } else {
154046d70077SConrad Meyer fmt_hdr = "%18s %12s %12s %12s\n";
154146d70077SConrad Meyer fmt_entry = "%18s %12ju %12jdK %12ju\n";
154246d70077SConrad Meyer }
154346d70077SConrad Meyer
154446d70077SConrad Meyer db_printf(fmt_hdr, "Type", "InUse", "MemUse", "Requests");
154546d70077SConrad Meyer
154646d70077SConrad Meyer /* Select sort, largest size first. */
154746d70077SConrad Meyer last_mtype = NULL;
154846d70077SConrad Meyer last_size = INT64_MAX;
154946d70077SConrad Meyer for (;;) {
155046d70077SConrad Meyer cur_mtype = NULL;
155146d70077SConrad Meyer cur_size = -1;
155246d70077SConrad Meyer ties = 0;
155346d70077SConrad Meyer
155446d70077SConrad Meyer for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
155546d70077SConrad Meyer /*
155646d70077SConrad Meyer * In the case of size ties, print out mtypes
155746d70077SConrad Meyer * in the order they are encountered. That is,
155846d70077SConrad Meyer * when we encounter the most recently output
155946d70077SConrad Meyer * mtype, we have already printed all preceding
156046d70077SConrad Meyer * ties, and we must print all following ties.
156146d70077SConrad Meyer */
156246d70077SConrad Meyer if (mtp == last_mtype) {
156346d70077SConrad Meyer ties = 1;
156446d70077SConrad Meyer continue;
156546d70077SConrad Meyer }
1566bdcc2226SMateusz Guzik size = get_malloc_stats(&mtp->ks_mti, &allocs,
156746d70077SConrad Meyer &inuse);
156846d70077SConrad Meyer if (size > cur_size && size < last_size + ties) {
156946d70077SConrad Meyer cur_size = size;
157046d70077SConrad Meyer cur_mtype = mtp;
157146d70077SConrad Meyer }
157246d70077SConrad Meyer }
157346d70077SConrad Meyer if (cur_mtype == NULL)
157446d70077SConrad Meyer break;
157546d70077SConrad Meyer
1576bdcc2226SMateusz Guzik size = get_malloc_stats(&cur_mtype->ks_mti, &allocs, &inuse);
157746d70077SConrad Meyer db_printf(fmt_entry, cur_mtype->ks_shortdesc, inuse,
157846d70077SConrad Meyer howmany(size, 1024), allocs);
157946d70077SConrad Meyer
1580687c94aaSJohn Baldwin if (db_pager_quit)
1581687c94aaSJohn Baldwin break;
158246d70077SConrad Meyer
158346d70077SConrad Meyer last_mtype = cur_mtype;
158446d70077SConrad Meyer last_size = cur_size;
1585909ed16cSRobert Watson }
1586909ed16cSRobert Watson }
1587d7854da1SMatthew D Fleming
1588d7854da1SMatthew D Fleming #if MALLOC_DEBUG_MAXZONES > 1
DB_SHOW_COMMAND(multizone_matches,db_show_multizone_matches)1589d7854da1SMatthew D Fleming DB_SHOW_COMMAND(multizone_matches, db_show_multizone_matches)
1590d7854da1SMatthew D Fleming {
1591d7854da1SMatthew D Fleming struct malloc_type_internal *mtip;
1592d7854da1SMatthew D Fleming struct malloc_type *mtp;
1593d7854da1SMatthew D Fleming u_int subzone;
1594d7854da1SMatthew D Fleming
1595d7854da1SMatthew D Fleming if (!have_addr) {
1596d7854da1SMatthew D Fleming db_printf("Usage: show multizone_matches <malloc type/addr>\n");
1597d7854da1SMatthew D Fleming return;
1598d7854da1SMatthew D Fleming }
1599d7854da1SMatthew D Fleming mtp = (void *)addr;
1600bdcc2226SMateusz Guzik if (mtp->ks_version != M_VERSION) {
1601bdcc2226SMateusz Guzik db_printf("Version %lx does not match expected %x\n",
1602bdcc2226SMateusz Guzik mtp->ks_version, M_VERSION);
1603d7854da1SMatthew D Fleming return;
1604d7854da1SMatthew D Fleming }
1605d7854da1SMatthew D Fleming
1606bdcc2226SMateusz Guzik mtip = &mtp->ks_mti;
1607d7854da1SMatthew D Fleming subzone = mtip->mti_zone;
1608d7854da1SMatthew D Fleming
1609d7854da1SMatthew D Fleming for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) {
1610bdcc2226SMateusz Guzik mtip = &mtp->ks_mti;
1611d7854da1SMatthew D Fleming if (mtip->mti_zone != subzone)
1612d7854da1SMatthew D Fleming continue;
1613d7854da1SMatthew D Fleming db_printf("%s\n", mtp->ks_shortdesc);
1614687c94aaSJohn Baldwin if (db_pager_quit)
1615687c94aaSJohn Baldwin break;
1616d7854da1SMatthew D Fleming }
1617d7854da1SMatthew D Fleming }
1618d7854da1SMatthew D Fleming #endif /* MALLOC_DEBUG_MAXZONES > 1 */
1619d7854da1SMatthew D Fleming #endif /* DDB */
1620