19454b2d8SWarner Losh /*- 251369649SPedro F. Giffuni * SPDX-License-Identifier: BSD-3-Clause 351369649SPedro F. Giffuni * 4df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1991, 1993 563a7e0a3SRobert Watson * The Regents of the University of California. 6bb1c7df8SRobert Watson * Copyright (c) 2005-2009 Robert N. M. Watson 7fd91e076SKristof Provost * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net> (mallocarray) 863a7e0a3SRobert Watson * All rights reserved. 9df8bae1dSRodney W. Grimes * 10df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 11df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 12df8bae1dSRodney W. Grimes * are met: 13df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 14df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 15df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 16df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 17df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 1869a28758SEd Maste * 3. Neither the name of the University nor the names of its contributors 19df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 20df8bae1dSRodney W. Grimes * without specific prior written permission. 21df8bae1dSRodney W. Grimes * 22df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32df8bae1dSRodney W. Grimes * SUCH DAMAGE. 33df8bae1dSRodney W. Grimes */ 34df8bae1dSRodney W. Grimes 350ce3f16dSRobert Watson /* 360ce3f16dSRobert Watson * Kernel malloc(9) implementation -- general purpose kernel memory allocator 370ce3f16dSRobert Watson * based on memory types. Back end is implemented using the UMA(9) zone 380ce3f16dSRobert Watson * allocator. A set of fixed-size buckets are used for smaller allocations, 390ce3f16dSRobert Watson * and a special UMA allocation interface is used for larger allocations. 400ce3f16dSRobert Watson * Callers declare memory types, and statistics are maintained independently 410ce3f16dSRobert Watson * for each memory type. Statistics are maintained per-CPU for performance 420ce3f16dSRobert Watson * reasons. See malloc(9) and comments in malloc.h for a detailed 430ce3f16dSRobert Watson * description. 440ce3f16dSRobert Watson */ 450ce3f16dSRobert Watson 46677b542eSDavid E. O'Brien #include <sys/cdefs.h> 47909ed16cSRobert Watson #include "opt_ddb.h" 488a58a9f6SJohn Dyson #include "opt_vm.h" 498a58a9f6SJohn Dyson 50df8bae1dSRodney W. Grimes #include <sys/param.h> 5126f9a767SRodney W. Grimes #include <sys/systm.h> 5206a53ecfSMark Johnston #include <sys/asan.h> 532d50560aSMarcel Moolenaar #include <sys/kdb.h> 54df8bae1dSRodney W. Grimes #include <sys/kernel.h> 55fb919e4dSMark Murray #include <sys/lock.h> 56df8bae1dSRodney W. Grimes #include <sys/malloc.h> 5710094910SMark Johnston #include <sys/msan.h> 58eec258d2SJohn Baldwin #include <sys/mutex.h> 59efeaf95aSDavid Greenman #include <sys/vmmeter.h> 60a448b62aSJake Burkholder #include <sys/proc.h> 616d6a03d7SJeff Roberson #include <sys/queue.h> 6263a7e0a3SRobert Watson #include <sys/sbuf.h> 639afff6b1SMateusz Guzik #include <sys/smp.h> 646f267175SJeff Roberson #include <sys/sysctl.h> 651fb14a47SPoul-Henning Kamp #include <sys/time.h> 665df87b21SJeff Roberson #include <sys/vmem.h> 674b25d1f2SGleb Smirnoff #ifdef EPOCH_TRACE 684b25d1f2SGleb Smirnoff #include <sys/epoch.h> 694b25d1f2SGleb Smirnoff #endif 709a02e8c6SJason Evans 71df8bae1dSRodney W. Grimes #include <vm/vm.h> 7299571dc3SJeff Roberson #include <vm/pmap.h> 739978bd99SMark Johnston #include <vm/vm_domainset.h> 745df87b21SJeff Roberson #include <vm/vm_pageout.h> 75efeaf95aSDavid Greenman #include <vm/vm_param.h> 76df8bae1dSRodney W. Grimes #include <vm/vm_kern.h> 77efeaf95aSDavid Greenman #include <vm/vm_extern.h> 783075778bSJohn Dyson #include <vm/vm_map.h> 7999571dc3SJeff Roberson #include <vm/vm_page.h> 806d6a03d7SJeff Roberson #include <vm/vm_phys.h> 816d6a03d7SJeff Roberson #include <vm/vm_pagequeue.h> 828355f576SJeff Roberson #include <vm/uma.h> 838355f576SJeff Roberson #include <vm/uma_int.h> 848efc4effSJeff Roberson #include <vm/uma_dbg.h> 85df8bae1dSRodney W. Grimes 86e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD 87e4eb384bSBosko Milekic #include <vm/memguard.h> 88e4eb384bSBosko Milekic #endif 89847a2a17SPawel Jakub Dawidek #ifdef DEBUG_REDZONE 90847a2a17SPawel Jakub Dawidek #include <vm/redzone.h> 91847a2a17SPawel Jakub Dawidek #endif 92e4eb384bSBosko Milekic 93984982d6SPoul-Henning Kamp #if defined(INVARIANTS) && defined(__i386__) 94984982d6SPoul-Henning Kamp #include <machine/cpu.h> 95984982d6SPoul-Henning Kamp #endif 96984982d6SPoul-Henning Kamp 97909ed16cSRobert Watson #include <ddb/ddb.h> 98909ed16cSRobert Watson 9991dd776cSJohn Birrell #ifdef KDTRACE_HOOKS 10091dd776cSJohn Birrell #include <sys/dtrace_bsd.h> 10191dd776cSJohn Birrell 1027cd79421SMateusz Guzik bool __read_frequently dtrace_malloc_enabled; 1037cd79421SMateusz Guzik dtrace_malloc_probe_func_t __read_mostly dtrace_malloc_probe; 10491dd776cSJohn Birrell #endif 10591dd776cSJohn Birrell 106ab3185d1SJeff Roberson #if defined(INVARIANTS) || defined(MALLOC_MAKE_FAILURES) || \ 107ab3185d1SJeff Roberson defined(DEBUG_MEMGUARD) || defined(DEBUG_REDZONE) 108ab3185d1SJeff Roberson #define MALLOC_DEBUG 1 109ab3185d1SJeff Roberson #endif 110ab3185d1SJeff Roberson 11106a53ecfSMark Johnston #if defined(KASAN) || defined(DEBUG_REDZONE) 11289deca0aSMateusz Guzik #define DEBUG_REDZONE_ARG_DEF , unsigned long osize 11389deca0aSMateusz Guzik #define DEBUG_REDZONE_ARG , osize 11489deca0aSMateusz Guzik #else 11589deca0aSMateusz Guzik #define DEBUG_REDZONE_ARG_DEF 11689deca0aSMateusz Guzik #define DEBUG_REDZONE_ARG 11789deca0aSMateusz Guzik #endif 11889deca0aSMateusz Guzik 1199e6544ddSBjoern A. Zeeb typedef enum { 1209e6544ddSBjoern A. Zeeb SLAB_COOKIE_SLAB_PTR = 0x0, 1219e6544ddSBjoern A. Zeeb SLAB_COOKIE_MALLOC_LARGE = 0x1, 1229e6544ddSBjoern A. Zeeb SLAB_COOKIE_CONTIG_MALLOC = 0x2, 1239e6544ddSBjoern A. Zeeb } slab_cookie_t; 1249e6544ddSBjoern A. Zeeb #define SLAB_COOKIE_MASK 0x3 1259e6544ddSBjoern A. Zeeb #define SLAB_COOKIE_SHIFT 2 1269e6544ddSBjoern A. Zeeb #define GET_SLAB_COOKIE(_slab) \ 1279e6544ddSBjoern A. Zeeb ((slab_cookie_t)(uintptr_t)(_slab) & SLAB_COOKIE_MASK) 1289e6544ddSBjoern A. Zeeb 12944a8ff31SArchie Cobbs /* 13044a8ff31SArchie Cobbs * When realloc() is called, if the new size is sufficiently smaller than 13144a8ff31SArchie Cobbs * the old size, realloc() will allocate a new, smaller block to avoid 13244a8ff31SArchie Cobbs * wasting memory. 'Sufficiently smaller' is defined as: newsize <= 13344a8ff31SArchie Cobbs * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'. 13444a8ff31SArchie Cobbs */ 13544a8ff31SArchie Cobbs #ifndef REALLOC_FRACTION 13644a8ff31SArchie Cobbs #define REALLOC_FRACTION 1 /* new block if <= half the size */ 13744a8ff31SArchie Cobbs #endif 13844a8ff31SArchie Cobbs 1390ce3f16dSRobert Watson /* 1400ce3f16dSRobert Watson * Centrally define some common malloc types. 1410ce3f16dSRobert Watson */ 1423b6fb885SPoul-Henning Kamp MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 1439ef246c6SBruce Evans MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 1449ef246c6SBruce Evans MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 1459ef246c6SBruce Evans 146db669378SPeter Wemm static struct malloc_type *kmemstatistics; 147cd814b26SRobert Watson static int kmemcount; 1481f6889a1SMatthew Dillon 1498355f576SJeff Roberson #define KMEM_ZSHIFT 4 1508355f576SJeff Roberson #define KMEM_ZBASE 16 1518355f576SJeff Roberson #define KMEM_ZMASK (KMEM_ZBASE - 1) 1528355f576SJeff Roberson 153bda06553SXin LI #define KMEM_ZMAX 65536 1548355f576SJeff Roberson #define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT) 15560ae52f7SEd Schouten static uint8_t kmemsize[KMEM_ZSIZE + 1]; 1566f267175SJeff Roberson 157d7854da1SMatthew D Fleming #ifndef MALLOC_DEBUG_MAXZONES 158d7854da1SMatthew D Fleming #define MALLOC_DEBUG_MAXZONES 1 159d7854da1SMatthew D Fleming #endif 160d7854da1SMatthew D Fleming static int numzones = MALLOC_DEBUG_MAXZONES; 161d7854da1SMatthew D Fleming 1620ce3f16dSRobert Watson /* 1630ce3f16dSRobert Watson * Small malloc(9) memory allocations are allocated from a set of UMA buckets 1640ce3f16dSRobert Watson * of various sizes. 1650ce3f16dSRobert Watson * 166828afddaSMateusz Guzik * Warning: the layout of the struct is duplicated in libmemstat for KVM support. 167828afddaSMateusz Guzik * 1680ce3f16dSRobert Watson * XXX: The comment here used to read "These won't be powers of two for 1690ce3f16dSRobert Watson * long." It's possible that a significant amount of wasted memory could be 1700ce3f16dSRobert Watson * recovered by tuning the sizes of these buckets. 1710ce3f16dSRobert Watson */ 1728355f576SJeff Roberson struct { 1736f267175SJeff Roberson int kz_size; 174eaa17d42SRyan Libby const char *kz_name; 175d7854da1SMatthew D Fleming uma_zone_t kz_zone[MALLOC_DEBUG_MAXZONES]; 1766f267175SJeff Roberson } kmemzones[] = { 177e1b6a7f8SMateusz Guzik {16, "malloc-16", }, 178e1b6a7f8SMateusz Guzik {32, "malloc-32", }, 179e1b6a7f8SMateusz Guzik {64, "malloc-64", }, 180e1b6a7f8SMateusz Guzik {128, "malloc-128", }, 181e1b6a7f8SMateusz Guzik {256, "malloc-256", }, 182f0c90a09SMateusz Guzik {384, "malloc-384", }, 183e1b6a7f8SMateusz Guzik {512, "malloc-512", }, 184e1b6a7f8SMateusz Guzik {1024, "malloc-1024", }, 185e1b6a7f8SMateusz Guzik {2048, "malloc-2048", }, 186e1b6a7f8SMateusz Guzik {4096, "malloc-4096", }, 187e1b6a7f8SMateusz Guzik {8192, "malloc-8192", }, 188e1b6a7f8SMateusz Guzik {16384, "malloc-16384", }, 189e1b6a7f8SMateusz Guzik {32768, "malloc-32768", }, 190e1b6a7f8SMateusz Guzik {65536, "malloc-65536", }, 1918355f576SJeff Roberson {0, NULL}, 1928355f576SJeff Roberson }; 1938355f576SJeff Roberson 194b89eaf4eSAlan Cox u_long vm_kmem_size; 195d801e824SAndriy Gapon SYSCTL_ULONG(_vm, OID_AUTO, kmem_size, CTLFLAG_RDTUN, &vm_kmem_size, 0, 19684344f9fSDag-Erling Smørgrav "Size of kernel memory"); 1975a34a9f0SJeff Roberson 1987001d850SXin LI static u_long kmem_zmax = KMEM_ZMAX; 1997001d850SXin LI SYSCTL_ULONG(_vm, OID_AUTO, kmem_zmax, CTLFLAG_RDTUN, &kmem_zmax, 0, 2007001d850SXin LI "Maximum allocation size that malloc(9) would use UMA as backend"); 2017001d850SXin LI 202b89eaf4eSAlan Cox static u_long vm_kmem_size_min; 203d801e824SAndriy Gapon SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_min, CTLFLAG_RDTUN, &vm_kmem_size_min, 0, 2040e5179e4SStephane E. Potvin "Minimum size of kernel memory"); 2050e5179e4SStephane E. Potvin 206b89eaf4eSAlan Cox static u_long vm_kmem_size_max; 207d801e824SAndriy Gapon SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RDTUN, &vm_kmem_size_max, 0, 208479439b4SDag-Erling Smørgrav "Maximum size of kernel memory"); 209479439b4SDag-Erling Smørgrav 2104813ad54SHans Petter Selasky static u_int vm_kmem_size_scale; 211d801e824SAndriy Gapon SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale, 0, 212479439b4SDag-Erling Smørgrav "Scale factor for kernel memory size"); 213479439b4SDag-Erling Smørgrav 2147814c80aSAndriy Gapon static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS); 2157814c80aSAndriy Gapon SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size, 2167814c80aSAndriy Gapon CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0, 2175df87b21SJeff Roberson sysctl_kmem_map_size, "LU", "Current kmem allocation size"); 2187814c80aSAndriy Gapon 21995bb9d38SAndriy Gapon static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS); 22095bb9d38SAndriy Gapon SYSCTL_PROC(_vm, OID_AUTO, kmem_map_free, 22195bb9d38SAndriy Gapon CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0, 2225df87b21SJeff Roberson sysctl_kmem_map_free, "LU", "Free space in kmem"); 22395bb9d38SAndriy Gapon 224828afddaSMateusz Guzik static SYSCTL_NODE(_vm, OID_AUTO, malloc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 225828afddaSMateusz Guzik "Malloc information"); 226828afddaSMateusz Guzik 227828afddaSMateusz Guzik static u_int vm_malloc_zone_count = nitems(kmemzones); 228828afddaSMateusz Guzik SYSCTL_UINT(_vm_malloc, OID_AUTO, zone_count, 229828afddaSMateusz Guzik CTLFLAG_RD, &vm_malloc_zone_count, 0, 230828afddaSMateusz Guzik "Number of malloc zones"); 231828afddaSMateusz Guzik 232828afddaSMateusz Guzik static int sysctl_vm_malloc_zone_sizes(SYSCTL_HANDLER_ARGS); 233828afddaSMateusz Guzik SYSCTL_PROC(_vm_malloc, OID_AUTO, zone_sizes, 234828afddaSMateusz Guzik CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, NULL, 0, 235828afddaSMateusz Guzik sysctl_vm_malloc_zone_sizes, "S", "Zone sizes used by malloc"); 236828afddaSMateusz Guzik 2375a34a9f0SJeff Roberson /* 23899571dc3SJeff Roberson * The malloc_mtx protects the kmemstatistics linked list. 2395a34a9f0SJeff Roberson */ 2405a34a9f0SJeff Roberson struct mtx malloc_mtx; 24169ef67f9SJason Evans 242cd814b26SRobert Watson static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS); 243df8bae1dSRodney W. Grimes 244d7854da1SMatthew D Fleming #if defined(MALLOC_MAKE_FAILURES) || (MALLOC_DEBUG_MAXZONES > 1) 2457029da5cSPawel Biernacki static SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 246d7854da1SMatthew D Fleming "Kernel malloc debugging options"); 247d7854da1SMatthew D Fleming #endif 248d7854da1SMatthew D Fleming 249eae870cdSRobert Watson /* 2500ce3f16dSRobert Watson * malloc(9) fault injection -- cause malloc failures every (n) mallocs when 2510ce3f16dSRobert Watson * the caller specifies M_NOWAIT. If set to 0, no failures are caused. 252eae870cdSRobert Watson */ 2530ce3f16dSRobert Watson #ifdef MALLOC_MAKE_FAILURES 254eae870cdSRobert Watson static int malloc_failure_rate; 255eae870cdSRobert Watson static int malloc_nowait_count; 256eae870cdSRobert Watson static int malloc_failure_count; 257af3b2549SHans Petter Selasky SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RWTUN, 258eae870cdSRobert Watson &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail"); 259eae870cdSRobert Watson SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD, 260eae870cdSRobert Watson &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures"); 261eae870cdSRobert Watson #endif 262eae870cdSRobert Watson 2637814c80aSAndriy Gapon static int 2647814c80aSAndriy Gapon sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS) 2657814c80aSAndriy Gapon { 2667814c80aSAndriy Gapon u_long size; 2677814c80aSAndriy Gapon 2682e47807cSJeff Roberson size = uma_size(); 2697814c80aSAndriy Gapon return (sysctl_handle_long(oidp, &size, 0, req)); 2707814c80aSAndriy Gapon } 2717814c80aSAndriy Gapon 27295bb9d38SAndriy Gapon static int 27395bb9d38SAndriy Gapon sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS) 27495bb9d38SAndriy Gapon { 2752e47807cSJeff Roberson u_long size, limit; 27695bb9d38SAndriy Gapon 2772e47807cSJeff Roberson /* The sysctl is unsigned, implement as a saturation value. */ 2782e47807cSJeff Roberson size = uma_size(); 2792e47807cSJeff Roberson limit = uma_limit(); 2802e47807cSJeff Roberson if (size > limit) 2812e47807cSJeff Roberson size = 0; 2822e47807cSJeff Roberson else 2832e47807cSJeff Roberson size = limit - size; 28495bb9d38SAndriy Gapon return (sysctl_handle_long(oidp, &size, 0, req)); 28595bb9d38SAndriy Gapon } 28695bb9d38SAndriy Gapon 287828afddaSMateusz Guzik static int 288828afddaSMateusz Guzik sysctl_vm_malloc_zone_sizes(SYSCTL_HANDLER_ARGS) 289828afddaSMateusz Guzik { 290828afddaSMateusz Guzik int sizes[nitems(kmemzones)]; 291828afddaSMateusz Guzik int i; 292828afddaSMateusz Guzik 293828afddaSMateusz Guzik for (i = 0; i < nitems(kmemzones); i++) { 294828afddaSMateusz Guzik sizes[i] = kmemzones[i].kz_size; 295828afddaSMateusz Guzik } 296828afddaSMateusz Guzik 297828afddaSMateusz Guzik return (SYSCTL_OUT(req, &sizes, sizeof(sizes))); 298828afddaSMateusz Guzik } 299828afddaSMateusz Guzik 300d7854da1SMatthew D Fleming /* 301d7854da1SMatthew D Fleming * malloc(9) uma zone separation -- sub-page buffer overruns in one 302d7854da1SMatthew D Fleming * malloc type will affect only a subset of other malloc types. 303d7854da1SMatthew D Fleming */ 304d7854da1SMatthew D Fleming #if MALLOC_DEBUG_MAXZONES > 1 305d7854da1SMatthew D Fleming static void 306d7854da1SMatthew D Fleming tunable_set_numzones(void) 307d7854da1SMatthew D Fleming { 308d7854da1SMatthew D Fleming 309d7854da1SMatthew D Fleming TUNABLE_INT_FETCH("debug.malloc.numzones", 310d7854da1SMatthew D Fleming &numzones); 311d7854da1SMatthew D Fleming 312d7854da1SMatthew D Fleming /* Sanity check the number of malloc uma zones. */ 313d7854da1SMatthew D Fleming if (numzones <= 0) 314d7854da1SMatthew D Fleming numzones = 1; 315d7854da1SMatthew D Fleming if (numzones > MALLOC_DEBUG_MAXZONES) 316d7854da1SMatthew D Fleming numzones = MALLOC_DEBUG_MAXZONES; 317d7854da1SMatthew D Fleming } 318d7854da1SMatthew D Fleming SYSINIT(numzones, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_set_numzones, NULL); 319af3b2549SHans Petter Selasky SYSCTL_INT(_debug_malloc, OID_AUTO, numzones, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 320d7854da1SMatthew D Fleming &numzones, 0, "Number of malloc uma subzones"); 321d7854da1SMatthew D Fleming 322d7854da1SMatthew D Fleming /* 323d7854da1SMatthew D Fleming * Any number that changes regularly is an okay choice for the 324d7854da1SMatthew D Fleming * offset. Build numbers are pretty good of you have them. 325d7854da1SMatthew D Fleming */ 326d7854da1SMatthew D Fleming static u_int zone_offset = __FreeBSD_version; 327d7854da1SMatthew D Fleming TUNABLE_INT("debug.malloc.zone_offset", &zone_offset); 328d7854da1SMatthew D Fleming SYSCTL_UINT(_debug_malloc, OID_AUTO, zone_offset, CTLFLAG_RDTUN, 329d7854da1SMatthew D Fleming &zone_offset, 0, "Separate malloc types by examining the " 330d7854da1SMatthew D Fleming "Nth character in the malloc type short description."); 331d7854da1SMatthew D Fleming 332c9e05ccdSMateusz Guzik static void 333c9e05ccdSMateusz Guzik mtp_set_subzone(struct malloc_type *mtp) 334d7854da1SMatthew D Fleming { 335c9e05ccdSMateusz Guzik struct malloc_type_internal *mtip; 336c9e05ccdSMateusz Guzik const char *desc; 337d7854da1SMatthew D Fleming size_t len; 338d7854da1SMatthew D Fleming u_int val; 339d7854da1SMatthew D Fleming 340bdcc2226SMateusz Guzik mtip = &mtp->ks_mti; 341c9e05ccdSMateusz Guzik desc = mtp->ks_shortdesc; 342d7854da1SMatthew D Fleming if (desc == NULL || (len = strlen(desc)) == 0) 343c9e05ccdSMateusz Guzik val = 0; 344c9e05ccdSMateusz Guzik else 345d7854da1SMatthew D Fleming val = desc[zone_offset % len]; 346c9e05ccdSMateusz Guzik mtip->mti_zone = (val % numzones); 347c9e05ccdSMateusz Guzik } 348c9e05ccdSMateusz Guzik 349c9e05ccdSMateusz Guzik static inline u_int 350c9e05ccdSMateusz Guzik mtp_get_subzone(struct malloc_type *mtp) 351c9e05ccdSMateusz Guzik { 352c9e05ccdSMateusz Guzik struct malloc_type_internal *mtip; 353c9e05ccdSMateusz Guzik 354bdcc2226SMateusz Guzik mtip = &mtp->ks_mti; 355c9e05ccdSMateusz Guzik 356c9e05ccdSMateusz Guzik KASSERT(mtip->mti_zone < numzones, 357c9e05ccdSMateusz Guzik ("mti_zone %u out of range %d", 358c9e05ccdSMateusz Guzik mtip->mti_zone, numzones)); 359c9e05ccdSMateusz Guzik return (mtip->mti_zone); 360d7854da1SMatthew D Fleming } 361d7854da1SMatthew D Fleming #elif MALLOC_DEBUG_MAXZONES == 0 362d7854da1SMatthew D Fleming #error "MALLOC_DEBUG_MAXZONES must be positive." 363d7854da1SMatthew D Fleming #else 364c9e05ccdSMateusz Guzik static void 365c9e05ccdSMateusz Guzik mtp_set_subzone(struct malloc_type *mtp) 366c9e05ccdSMateusz Guzik { 367c9e05ccdSMateusz Guzik struct malloc_type_internal *mtip; 368c9e05ccdSMateusz Guzik 369bdcc2226SMateusz Guzik mtip = &mtp->ks_mti; 370c9e05ccdSMateusz Guzik mtip->mti_zone = 0; 371c9e05ccdSMateusz Guzik } 372c9e05ccdSMateusz Guzik 373d7854da1SMatthew D Fleming static inline u_int 374c9e05ccdSMateusz Guzik mtp_get_subzone(struct malloc_type *mtp) 375d7854da1SMatthew D Fleming { 376d7854da1SMatthew D Fleming 377d7854da1SMatthew D Fleming return (0); 378d7854da1SMatthew D Fleming } 379d7854da1SMatthew D Fleming #endif /* MALLOC_DEBUG_MAXZONES > 1 */ 380d7854da1SMatthew D Fleming 381df8bae1dSRodney W. Grimes /* 3820ce3f16dSRobert Watson * An allocation has succeeded -- update malloc type statistics for the 3830ce3f16dSRobert Watson * amount of bucket size. Occurs within a critical section so that the 3840ce3f16dSRobert Watson * thread isn't preempted and doesn't migrate while updating per-PCU 3850ce3f16dSRobert Watson * statistics. 3864362fadaSBrian Feldman */ 3874362fadaSBrian Feldman static void 38863a7e0a3SRobert Watson malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size, 3894362fadaSBrian Feldman int zindx) 3904362fadaSBrian Feldman { 39163a7e0a3SRobert Watson struct malloc_type_internal *mtip; 39263a7e0a3SRobert Watson struct malloc_type_stats *mtsp; 39363a7e0a3SRobert Watson 39463a7e0a3SRobert Watson critical_enter(); 395bdcc2226SMateusz Guzik mtip = &mtp->ks_mti; 3969afff6b1SMateusz Guzik mtsp = zpcpu_get(mtip->mti_stats); 39773864adbSPawel Jakub Dawidek if (size > 0) { 39863a7e0a3SRobert Watson mtsp->mts_memalloced += size; 39963a7e0a3SRobert Watson mtsp->mts_numallocs++; 40073864adbSPawel Jakub Dawidek } 4014362fadaSBrian Feldman if (zindx != -1) 40263a7e0a3SRobert Watson mtsp->mts_size |= 1 << zindx; 40391dd776cSJohn Birrell 40491dd776cSJohn Birrell #ifdef KDTRACE_HOOKS 4057cd79421SMateusz Guzik if (__predict_false(dtrace_malloc_enabled)) { 40691dd776cSJohn Birrell uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_MALLOC]; 40791dd776cSJohn Birrell if (probe_id != 0) 40891dd776cSJohn Birrell (dtrace_malloc_probe)(probe_id, 40991dd776cSJohn Birrell (uintptr_t) mtp, (uintptr_t) mtip, 41091dd776cSJohn Birrell (uintptr_t) mtsp, size, zindx); 41191dd776cSJohn Birrell } 41291dd776cSJohn Birrell #endif 41391dd776cSJohn Birrell 41463a7e0a3SRobert Watson critical_exit(); 4154362fadaSBrian Feldman } 4164362fadaSBrian Feldman 4174362fadaSBrian Feldman void 41863a7e0a3SRobert Watson malloc_type_allocated(struct malloc_type *mtp, unsigned long size) 4194362fadaSBrian Feldman { 42063a7e0a3SRobert Watson 42173864adbSPawel Jakub Dawidek if (size > 0) 42263a7e0a3SRobert Watson malloc_type_zone_allocated(mtp, size, -1); 4234362fadaSBrian Feldman } 4244362fadaSBrian Feldman 4254362fadaSBrian Feldman /* 4263805385eSRobert Watson * A free operation has occurred -- update malloc type statistics for the 4270ce3f16dSRobert Watson * amount of the bucket size. Occurs within a critical section so that the 4280ce3f16dSRobert Watson * thread isn't preempted and doesn't migrate while updating per-CPU 4290ce3f16dSRobert Watson * statistics. 4304362fadaSBrian Feldman */ 4314362fadaSBrian Feldman void 43263a7e0a3SRobert Watson malloc_type_freed(struct malloc_type *mtp, unsigned long size) 4334362fadaSBrian Feldman { 43463a7e0a3SRobert Watson struct malloc_type_internal *mtip; 43563a7e0a3SRobert Watson struct malloc_type_stats *mtsp; 43663a7e0a3SRobert Watson 43763a7e0a3SRobert Watson critical_enter(); 438bdcc2226SMateusz Guzik mtip = &mtp->ks_mti; 4399afff6b1SMateusz Guzik mtsp = zpcpu_get(mtip->mti_stats); 44063a7e0a3SRobert Watson mtsp->mts_memfreed += size; 44163a7e0a3SRobert Watson mtsp->mts_numfrees++; 44291dd776cSJohn Birrell 44391dd776cSJohn Birrell #ifdef KDTRACE_HOOKS 4447cd79421SMateusz Guzik if (__predict_false(dtrace_malloc_enabled)) { 44591dd776cSJohn Birrell uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_FREE]; 44691dd776cSJohn Birrell if (probe_id != 0) 44791dd776cSJohn Birrell (dtrace_malloc_probe)(probe_id, 44891dd776cSJohn Birrell (uintptr_t) mtp, (uintptr_t) mtip, 44991dd776cSJohn Birrell (uintptr_t) mtsp, size, 0); 45091dd776cSJohn Birrell } 45191dd776cSJohn Birrell #endif 45291dd776cSJohn Birrell 45363a7e0a3SRobert Watson critical_exit(); 4544362fadaSBrian Feldman } 4554362fadaSBrian Feldman 4564362fadaSBrian Feldman /* 457f346986bSAlan Cox * contigmalloc: 458f346986bSAlan Cox * 459f346986bSAlan Cox * Allocate a block of physically contiguous memory. 460f346986bSAlan Cox * 461f346986bSAlan Cox * If M_NOWAIT is set, this routine will not block and return NULL if 462f346986bSAlan Cox * the allocation fails. 463f346986bSAlan Cox */ 4649e6544ddSBjoern A. Zeeb #define IS_CONTIG_MALLOC(_slab) \ 4659e6544ddSBjoern A. Zeeb (GET_SLAB_COOKIE(_slab) == SLAB_COOKIE_CONTIG_MALLOC) 4669e6544ddSBjoern A. Zeeb #define CONTIG_MALLOC_SLAB(_size) \ 4679e6544ddSBjoern A. Zeeb ((void *)(((_size) << SLAB_COOKIE_SHIFT) | SLAB_COOKIE_CONTIG_MALLOC)) 4689e6544ddSBjoern A. Zeeb static inline size_t 4699e6544ddSBjoern A. Zeeb contigmalloc_size(uma_slab_t slab) 4709e6544ddSBjoern A. Zeeb { 4719e6544ddSBjoern A. Zeeb uintptr_t va; 4729e6544ddSBjoern A. Zeeb 4739e6544ddSBjoern A. Zeeb KASSERT(IS_CONTIG_MALLOC(slab), 4749e6544ddSBjoern A. Zeeb ("%s: called on non-contigmalloc allocation: %p", __func__, slab)); 4759e6544ddSBjoern A. Zeeb va = (uintptr_t)slab; 4769e6544ddSBjoern A. Zeeb return (va >> SLAB_COOKIE_SHIFT); 4779e6544ddSBjoern A. Zeeb } 4789e6544ddSBjoern A. Zeeb 479f346986bSAlan Cox void * 480f346986bSAlan Cox contigmalloc(unsigned long size, struct malloc_type *type, int flags, 481f346986bSAlan Cox vm_paddr_t low, vm_paddr_t high, unsigned long alignment, 482831ce4cbSJohn Baldwin vm_paddr_t boundary) 483f346986bSAlan Cox { 484f346986bSAlan Cox void *ret; 485f346986bSAlan Cox 48644d0efb2SAlan Cox ret = (void *)kmem_alloc_contig(size, flags, low, high, alignment, 48744d0efb2SAlan Cox boundary, VM_MEMATTR_DEFAULT); 4889e6544ddSBjoern A. Zeeb if (ret != NULL) { 4899e6544ddSBjoern A. Zeeb /* Use low bits unused for slab pointers. */ 4909e6544ddSBjoern A. Zeeb vsetzoneslab((uintptr_t)ret, NULL, CONTIG_MALLOC_SLAB(size)); 491f346986bSAlan Cox malloc_type_allocated(type, round_page(size)); 4929e6544ddSBjoern A. Zeeb } 493f346986bSAlan Cox return (ret); 494f346986bSAlan Cox } 495f346986bSAlan Cox 496ab3185d1SJeff Roberson void * 4979978bd99SMark Johnston contigmalloc_domainset(unsigned long size, struct malloc_type *type, 4989978bd99SMark Johnston struct domainset *ds, int flags, vm_paddr_t low, vm_paddr_t high, 499ab3185d1SJeff Roberson unsigned long alignment, vm_paddr_t boundary) 500ab3185d1SJeff Roberson { 501ab3185d1SJeff Roberson void *ret; 502ab3185d1SJeff Roberson 5039978bd99SMark Johnston ret = (void *)kmem_alloc_contig_domainset(ds, size, flags, low, high, 504ab3185d1SJeff Roberson alignment, boundary, VM_MEMATTR_DEFAULT); 5059e6544ddSBjoern A. Zeeb if (ret != NULL) { 5069e6544ddSBjoern A. Zeeb /* Use low bits unused for slab pointers. */ 5079e6544ddSBjoern A. Zeeb vsetzoneslab((uintptr_t)ret, NULL, CONTIG_MALLOC_SLAB(size)); 508ab3185d1SJeff Roberson malloc_type_allocated(type, round_page(size)); 5099e6544ddSBjoern A. Zeeb } 510ab3185d1SJeff Roberson return (ret); 511ab3185d1SJeff Roberson } 512d1bdc282SBjoern A. Zeeb #undef IS_CONTIG_MALLOC 513d1bdc282SBjoern A. Zeeb #undef CONTIG_MALLOC_SLAB 514ab3185d1SJeff Roberson 515d1bdc282SBjoern A. Zeeb /* contigfree(9) is deprecated. */ 516f346986bSAlan Cox void 5179e6544ddSBjoern A. Zeeb contigfree(void *addr, unsigned long size __unused, struct malloc_type *type) 518f346986bSAlan Cox { 5199e6544ddSBjoern A. Zeeb free(addr, type); 520f346986bSAlan Cox } 521f346986bSAlan Cox 522ab3185d1SJeff Roberson #ifdef MALLOC_DEBUG 523ab3185d1SJeff Roberson static int 5245a70796aSLi-Wen Hsu malloc_dbg(caddr_t *vap, size_t *sizep, struct malloc_type *mtp, 525ab3185d1SJeff Roberson int flags) 526df8bae1dSRodney W. Grimes { 527194a0abfSPoul-Henning Kamp #ifdef INVARIANTS 528ab3185d1SJeff Roberson int indx; 529ab3185d1SJeff Roberson 530bdcc2226SMateusz Guzik KASSERT(mtp->ks_version == M_VERSION, ("malloc: bad malloc type version")); 531d3c11994SPoul-Henning Kamp /* 53223198357SRuslan Ermilov * Check that exactly one of M_WAITOK or M_NOWAIT is specified. 533d3c11994SPoul-Henning Kamp */ 53423198357SRuslan Ermilov indx = flags & (M_WAITOK | M_NOWAIT); 535d3c11994SPoul-Henning Kamp if (indx != M_NOWAIT && indx != M_WAITOK) { 536d3c11994SPoul-Henning Kamp static struct timeval lasterr; 537d3c11994SPoul-Henning Kamp static int curerr, once; 538d3c11994SPoul-Henning Kamp if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) { 539d3c11994SPoul-Henning Kamp printf("Bad malloc flags: %x\n", indx); 5402d50560aSMarcel Moolenaar kdb_backtrace(); 541d3c11994SPoul-Henning Kamp flags |= M_WAITOK; 542d3c11994SPoul-Henning Kamp once++; 543d3c11994SPoul-Henning Kamp } 544d3c11994SPoul-Henning Kamp } 545194a0abfSPoul-Henning Kamp #endif 546eae870cdSRobert Watson #ifdef MALLOC_MAKE_FAILURES 547eae870cdSRobert Watson if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) { 548eae870cdSRobert Watson atomic_add_int(&malloc_nowait_count, 1); 549eae870cdSRobert Watson if ((malloc_nowait_count % malloc_failure_rate) == 0) { 550eae870cdSRobert Watson atomic_add_int(&malloc_failure_count, 1); 551ab3185d1SJeff Roberson *vap = NULL; 552ab3185d1SJeff Roberson return (EJUSTRETURN); 553eae870cdSRobert Watson } 554eae870cdSRobert Watson } 555eae870cdSRobert Watson #endif 55606bf2a6aSMatt Macy if (flags & M_WAITOK) { 557b40ce416SJulian Elischer KASSERT(curthread->td_intr_nesting_level == 0, 558a163d034SWarner Losh ("malloc(M_WAITOK) in interrupt context")); 5595757b59fSGleb Smirnoff if (__predict_false(!THREAD_CAN_SLEEP())) { 560bac06038SGleb Smirnoff #ifdef EPOCH_TRACE 561bac06038SGleb Smirnoff epoch_trace_list(curthread); 562bac06038SGleb Smirnoff #endif 5631ae20f7cSKyle Evans KASSERT(0, 5645757b59fSGleb Smirnoff ("malloc(M_WAITOK) with sleeping prohibited")); 5655757b59fSGleb Smirnoff } 56606bf2a6aSMatt Macy } 567d9e2e68dSMark Johnston KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 5681067a2baSJonathan T. Looney ("malloc: called with spinlock or critical section held")); 5691067a2baSJonathan T. Looney 570e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD 571ab3185d1SJeff Roberson if (memguard_cmp_mtp(mtp, *sizep)) { 572ab3185d1SJeff Roberson *vap = memguard_alloc(*sizep, flags); 573ab3185d1SJeff Roberson if (*vap != NULL) 574ab3185d1SJeff Roberson return (EJUSTRETURN); 575e3813573SMatthew D Fleming /* This is unfortunate but should not be fatal. */ 576e3813573SMatthew D Fleming } 577e4eb384bSBosko Milekic #endif 578e4eb384bSBosko Milekic 579847a2a17SPawel Jakub Dawidek #ifdef DEBUG_REDZONE 580ab3185d1SJeff Roberson *sizep = redzone_size_ntor(*sizep); 581ab3185d1SJeff Roberson #endif 582ab3185d1SJeff Roberson 583ab3185d1SJeff Roberson return (0); 584ab3185d1SJeff Roberson } 585ab3185d1SJeff Roberson #endif 586ab3185d1SJeff Roberson 587ab3185d1SJeff Roberson /* 5886d6a03d7SJeff Roberson * Handle large allocations and frees by using kmem_malloc directly. 5896d6a03d7SJeff Roberson */ 5909e6544ddSBjoern A. Zeeb #define IS_MALLOC_LARGE(_slab) \ 5919e6544ddSBjoern A. Zeeb (GET_SLAB_COOKIE(_slab) == SLAB_COOKIE_MALLOC_LARGE) 5929e6544ddSBjoern A. Zeeb #define MALLOC_LARGE_SLAB(_size) \ 5939e6544ddSBjoern A. Zeeb ((void *)(((_size) << SLAB_COOKIE_SHIFT) | SLAB_COOKIE_MALLOC_LARGE)) 5946d6a03d7SJeff Roberson static inline size_t 5956d6a03d7SJeff Roberson malloc_large_size(uma_slab_t slab) 5966d6a03d7SJeff Roberson { 5976d6a03d7SJeff Roberson uintptr_t va; 5986d6a03d7SJeff Roberson 5996d6a03d7SJeff Roberson va = (uintptr_t)slab; 6009e6544ddSBjoern A. Zeeb KASSERT(IS_MALLOC_LARGE(slab), 6019e6544ddSBjoern A. Zeeb ("%s: called on non-malloc_large allocation: %p", __func__, slab)); 6029e6544ddSBjoern A. Zeeb return (va >> SLAB_COOKIE_SHIFT); 6036d6a03d7SJeff Roberson } 6046d6a03d7SJeff Roberson 60589deca0aSMateusz Guzik static caddr_t __noinline 60645e23571SMark Johnston malloc_large(size_t size, struct malloc_type *mtp, struct domainset *policy, 60789deca0aSMateusz Guzik int flags DEBUG_REDZONE_ARG_DEF) 6086d6a03d7SJeff Roberson { 609f49fd63aSJohn Baldwin void *va; 6106d6a03d7SJeff Roberson 61145e23571SMark Johnston size = roundup(size, PAGE_SIZE); 612f49fd63aSJohn Baldwin va = kmem_malloc_domainset(policy, size, flags); 613f49fd63aSJohn Baldwin if (va != NULL) { 6149e6544ddSBjoern A. Zeeb /* Use low bits unused for slab pointers. */ 6159e6544ddSBjoern A. Zeeb vsetzoneslab((uintptr_t)va, NULL, MALLOC_LARGE_SLAB(size)); 61645e23571SMark Johnston uma_total_inc(size); 6176d6a03d7SJeff Roberson } 61845e23571SMark Johnston malloc_type_allocated(mtp, va == NULL ? 0 : size); 61989deca0aSMateusz Guzik if (__predict_false(va == NULL)) { 62089deca0aSMateusz Guzik KASSERT((flags & M_WAITOK) == 0, 62189deca0aSMateusz Guzik ("malloc(M_WAITOK) returned NULL")); 62206a53ecfSMark Johnston } else { 62389deca0aSMateusz Guzik #ifdef DEBUG_REDZONE 62489deca0aSMateusz Guzik va = redzone_setup(va, osize); 62589deca0aSMateusz Guzik #endif 626f49fd63aSJohn Baldwin kasan_mark(va, osize, size, KASAN_MALLOC_REDZONE); 62706a53ecfSMark Johnston } 62889deca0aSMateusz Guzik return (va); 6296d6a03d7SJeff Roberson } 6306d6a03d7SJeff Roberson 6316d6a03d7SJeff Roberson static void 6326d6a03d7SJeff Roberson free_large(void *addr, size_t size) 6336d6a03d7SJeff Roberson { 6346d6a03d7SJeff Roberson 635f49fd63aSJohn Baldwin kmem_free(addr, size); 6366d6a03d7SJeff Roberson uma_total_dec(size); 6376d6a03d7SJeff Roberson } 6389e6544ddSBjoern A. Zeeb #undef IS_MALLOC_LARGE 6399e6544ddSBjoern A. Zeeb #undef MALLOC_LARGE_SLAB 6406d6a03d7SJeff Roberson 6416d6a03d7SJeff Roberson /* 642ab3185d1SJeff Roberson * malloc: 643ab3185d1SJeff Roberson * 644ab3185d1SJeff Roberson * Allocate a block of memory. 645ab3185d1SJeff Roberson * 646ab3185d1SJeff Roberson * If M_NOWAIT is set, this routine will not block and return NULL if 647ab3185d1SJeff Roberson * the allocation fails. 648ab3185d1SJeff Roberson */ 649ab3185d1SJeff Roberson void * 65034c538c3SMateusz Guzik (malloc)(size_t size, struct malloc_type *mtp, int flags) 651ab3185d1SJeff Roberson { 652ab3185d1SJeff Roberson int indx; 653ab3185d1SJeff Roberson caddr_t va; 654ab3185d1SJeff Roberson uma_zone_t zone; 65506a53ecfSMark Johnston #if defined(DEBUG_REDZONE) || defined(KASAN) 656ab3185d1SJeff Roberson unsigned long osize = size; 657ab3185d1SJeff Roberson #endif 658ab3185d1SJeff Roberson 65982c174a3SMateusz Guzik MPASS((flags & M_EXEC) == 0); 66089deca0aSMateusz Guzik 661ab3185d1SJeff Roberson #ifdef MALLOC_DEBUG 6625072a5f4SMatt Macy va = NULL; 663ab3185d1SJeff Roberson if (malloc_dbg(&va, &size, mtp, flags) != 0) 664ab3185d1SJeff Roberson return (va); 665847a2a17SPawel Jakub Dawidek #endif 666847a2a17SPawel Jakub Dawidek 66789deca0aSMateusz Guzik if (__predict_false(size > kmem_zmax)) 66845e23571SMark Johnston return (malloc_large(size, mtp, DOMAINSET_RR(), flags 66989deca0aSMateusz Guzik DEBUG_REDZONE_ARG)); 67089deca0aSMateusz Guzik 6716f267175SJeff Roberson if (size & KMEM_ZMASK) 6726f267175SJeff Roberson size = (size & ~KMEM_ZMASK) + KMEM_ZBASE; 6736f267175SJeff Roberson indx = kmemsize[size >> KMEM_ZSHIFT]; 674c9e05ccdSMateusz Guzik zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)]; 675a03c2393SAlexander Motin va = uma_zalloc_arg(zone, zone, flags); 67610094910SMark Johnston if (va != NULL) { 677e20a199fSJeff Roberson size = zone->uz_size; 67810094910SMark Johnston if ((flags & M_ZERO) == 0) { 67910094910SMark Johnston kmsan_mark(va, size, KMSAN_STATE_UNINIT); 68010094910SMark Johnston kmsan_orig(va, size, KMSAN_TYPE_MALLOC, KMSAN_RET_ADDR); 68110094910SMark Johnston } 68210094910SMark Johnston } 68363a7e0a3SRobert Watson malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx); 68482c174a3SMateusz Guzik if (__predict_false(va == NULL)) { 68582c174a3SMateusz Guzik KASSERT((flags & M_WAITOK) == 0, 68682c174a3SMateusz Guzik ("malloc(M_WAITOK) returned NULL")); 68782c174a3SMateusz Guzik } 688ab3185d1SJeff Roberson #ifdef DEBUG_REDZONE 689ab3185d1SJeff Roberson if (va != NULL) 690ab3185d1SJeff Roberson va = redzone_setup(va, osize); 6914db4f5c8SPoul-Henning Kamp #endif 69206a53ecfSMark Johnston #ifdef KASAN 69306a53ecfSMark Johnston if (va != NULL) 69406a53ecfSMark Johnston kasan_mark((void *)va, osize, size, KASAN_MALLOC_REDZONE); 69506a53ecfSMark Johnston #endif 696ab3185d1SJeff Roberson return ((void *) va); 697ab3185d1SJeff Roberson } 698ab3185d1SJeff Roberson 6999978bd99SMark Johnston static void * 700dc727127SMark Johnston malloc_domain(size_t *sizep, int *indxp, struct malloc_type *mtp, int domain, 7016d6a03d7SJeff Roberson int flags) 702ab3185d1SJeff Roberson { 703ab3185d1SJeff Roberson uma_zone_t zone; 704dc727127SMark Johnston caddr_t va; 705dc727127SMark Johnston size_t size; 706dc727127SMark Johnston int indx; 707ab3185d1SJeff Roberson 708dc727127SMark Johnston size = *sizep; 7096d6a03d7SJeff Roberson KASSERT(size <= kmem_zmax && (flags & M_EXEC) == 0, 710c0df224bSBjoern A. Zeeb ("malloc_domain: Called with bad flag / size combination")); 711ab3185d1SJeff Roberson if (size & KMEM_ZMASK) 712ab3185d1SJeff Roberson size = (size & ~KMEM_ZMASK) + KMEM_ZBASE; 713ab3185d1SJeff Roberson indx = kmemsize[size >> KMEM_ZSHIFT]; 714c9e05ccdSMateusz Guzik zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)]; 715a03c2393SAlexander Motin va = uma_zalloc_domain(zone, zone, domain, flags); 716ab3185d1SJeff Roberson if (va != NULL) 717dc727127SMark Johnston *sizep = zone->uz_size; 7186d6a03d7SJeff Roberson *indxp = indx; 719df8bae1dSRodney W. Grimes return ((void *)va); 720df8bae1dSRodney W. Grimes } 721df8bae1dSRodney W. Grimes 722fd91e076SKristof Provost void * 7239978bd99SMark Johnston malloc_domainset(size_t size, struct malloc_type *mtp, struct domainset *ds, 7249978bd99SMark Johnston int flags) 7259978bd99SMark Johnston { 7269978bd99SMark Johnston struct vm_domainset_iter di; 72782c174a3SMateusz Guzik caddr_t va; 7289978bd99SMark Johnston int domain; 7296d6a03d7SJeff Roberson int indx; 73006a53ecfSMark Johnston #if defined(KASAN) || defined(DEBUG_REDZONE) 7316d6a03d7SJeff Roberson unsigned long osize = size; 7326d6a03d7SJeff Roberson #endif 73389deca0aSMateusz Guzik 73482c174a3SMateusz Guzik MPASS((flags & M_EXEC) == 0); 73589deca0aSMateusz Guzik 7366d6a03d7SJeff Roberson #ifdef MALLOC_DEBUG 73782c174a3SMateusz Guzik va = NULL; 73882c174a3SMateusz Guzik if (malloc_dbg(&va, &size, mtp, flags) != 0) 73982c174a3SMateusz Guzik return (va); 7406d6a03d7SJeff Roberson #endif 74189deca0aSMateusz Guzik 74289deca0aSMateusz Guzik if (__predict_false(size > kmem_zmax)) 74345e23571SMark Johnston return (malloc_large(size, mtp, DOMAINSET_RR(), flags 74489deca0aSMateusz Guzik DEBUG_REDZONE_ARG)); 74589deca0aSMateusz Guzik 7469978bd99SMark Johnston vm_domainset_iter_policy_init(&di, ds, &domain, &flags); 7479978bd99SMark Johnston do { 74882c174a3SMateusz Guzik va = malloc_domain(&size, &indx, mtp, domain, flags); 74989deca0aSMateusz Guzik } while (va == NULL && vm_domainset_iter_policy(&di, &domain) == 0); 75082c174a3SMateusz Guzik malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx); 75182c174a3SMateusz Guzik if (__predict_false(va == NULL)) { 75282c174a3SMateusz Guzik KASSERT((flags & M_WAITOK) == 0, 75382c174a3SMateusz Guzik ("malloc(M_WAITOK) returned NULL")); 75482c174a3SMateusz Guzik } 75582c174a3SMateusz Guzik #ifdef DEBUG_REDZONE 75682c174a3SMateusz Guzik if (va != NULL) 75782c174a3SMateusz Guzik va = redzone_setup(va, osize); 75882c174a3SMateusz Guzik #endif 75906a53ecfSMark Johnston #ifdef KASAN 76006a53ecfSMark Johnston if (va != NULL) 76106a53ecfSMark Johnston kasan_mark((void *)va, osize, size, KASAN_MALLOC_REDZONE); 76206a53ecfSMark Johnston #endif 76310094910SMark Johnston #ifdef KMSAN 76410094910SMark Johnston if ((flags & M_ZERO) == 0) { 76510094910SMark Johnston kmsan_mark(va, size, KMSAN_STATE_UNINIT); 76610094910SMark Johnston kmsan_orig(va, size, KMSAN_TYPE_MALLOC, KMSAN_RET_ADDR); 76710094910SMark Johnston } 76810094910SMark Johnston #endif 76982c174a3SMateusz Guzik return (va); 7706d6a03d7SJeff Roberson } 7719978bd99SMark Johnston 77282c174a3SMateusz Guzik /* 77382c174a3SMateusz Guzik * Allocate an executable area. 77482c174a3SMateusz Guzik */ 77582c174a3SMateusz Guzik void * 77682c174a3SMateusz Guzik malloc_exec(size_t size, struct malloc_type *mtp, int flags) 77782c174a3SMateusz Guzik { 77882c174a3SMateusz Guzik 77989deca0aSMateusz Guzik return (malloc_domainset_exec(size, mtp, DOMAINSET_RR(), flags)); 78082c174a3SMateusz Guzik } 78182c174a3SMateusz Guzik 78282c174a3SMateusz Guzik void * 78382c174a3SMateusz Guzik malloc_domainset_exec(size_t size, struct malloc_type *mtp, struct domainset *ds, 78482c174a3SMateusz Guzik int flags) 78582c174a3SMateusz Guzik { 78606a53ecfSMark Johnston #if defined(DEBUG_REDZONE) || defined(KASAN) 78782c174a3SMateusz Guzik unsigned long osize = size; 78882c174a3SMateusz Guzik #endif 78989deca0aSMateusz Guzik #ifdef MALLOC_DEBUG 79089deca0aSMateusz Guzik caddr_t va; 79189deca0aSMateusz Guzik #endif 79282c174a3SMateusz Guzik 79382c174a3SMateusz Guzik flags |= M_EXEC; 79489deca0aSMateusz Guzik 79582c174a3SMateusz Guzik #ifdef MALLOC_DEBUG 79682c174a3SMateusz Guzik va = NULL; 79782c174a3SMateusz Guzik if (malloc_dbg(&va, &size, mtp, flags) != 0) 79882c174a3SMateusz Guzik return (va); 79982c174a3SMateusz Guzik #endif 80089deca0aSMateusz Guzik 80145e23571SMark Johnston return (malloc_large(size, mtp, ds, flags DEBUG_REDZONE_ARG)); 8029978bd99SMark Johnston } 8039978bd99SMark Johnston 8049978bd99SMark Johnston void * 80504cc0c39SKyle Evans malloc_aligned(size_t size, size_t align, struct malloc_type *type, int flags) 80604cc0c39SKyle Evans { 80704cc0c39SKyle Evans return (malloc_domainset_aligned(size, align, type, DOMAINSET_RR(), 80804cc0c39SKyle Evans flags)); 80904cc0c39SKyle Evans } 81004cc0c39SKyle Evans 81104cc0c39SKyle Evans void * 8123b15beb3SKonstantin Belousov malloc_domainset_aligned(size_t size, size_t align, 8133b15beb3SKonstantin Belousov struct malloc_type *mtp, struct domainset *ds, int flags) 8143b15beb3SKonstantin Belousov { 8153b15beb3SKonstantin Belousov void *res; 8161ac7c344SKonstantin Belousov size_t asize; 8173b15beb3SKonstantin Belousov 81871d31f1cSKonstantin Belousov KASSERT(powerof2(align), 8193b15beb3SKonstantin Belousov ("malloc_domainset_aligned: wrong align %#zx size %#zx", 8203b15beb3SKonstantin Belousov align, size)); 8210781c79dSKonstantin Belousov KASSERT(align <= PAGE_SIZE, 8223b15beb3SKonstantin Belousov ("malloc_domainset_aligned: align %#zx (size %#zx) too large", 8233b15beb3SKonstantin Belousov align, size)); 8243b15beb3SKonstantin Belousov 8251ac7c344SKonstantin Belousov /* 8261ac7c344SKonstantin Belousov * Round the allocation size up to the next power of 2, 8271ac7c344SKonstantin Belousov * because we can only guarantee alignment for 8281ac7c344SKonstantin Belousov * power-of-2-sized allocations. Further increase the 8291ac7c344SKonstantin Belousov * allocation size to align if the rounded size is less than 8301ac7c344SKonstantin Belousov * align, since malloc zones provide alignment equal to their 8311ac7c344SKonstantin Belousov * size. 8321ac7c344SKonstantin Belousov */ 83371d31f1cSKonstantin Belousov if (size == 0) 83471d31f1cSKonstantin Belousov size = 1; 8351ac7c344SKonstantin Belousov asize = size <= align ? align : 1UL << flsl(size - 1); 8361ac7c344SKonstantin Belousov 8371ac7c344SKonstantin Belousov res = malloc_domainset(asize, mtp, ds, flags); 8383b15beb3SKonstantin Belousov KASSERT(res == NULL || ((uintptr_t)res & (align - 1)) == 0, 8393b15beb3SKonstantin Belousov ("malloc_domainset_aligned: result not aligned %p size %#zx " 8401ac7c344SKonstantin Belousov "allocsize %#zx align %#zx", res, size, asize, align)); 8413b15beb3SKonstantin Belousov return (res); 8423b15beb3SKonstantin Belousov } 8433b15beb3SKonstantin Belousov 8443b15beb3SKonstantin Belousov void * 845fd91e076SKristof Provost mallocarray(size_t nmemb, size_t size, struct malloc_type *type, int flags) 846fd91e076SKristof Provost { 847fd91e076SKristof Provost 848c02fc960SConrad Meyer if (WOULD_OVERFLOW(nmemb, size)) 849c02fc960SConrad Meyer panic("mallocarray: %zu * %zu overflowed", nmemb, size); 850fd91e076SKristof Provost 851fd91e076SKristof Provost return (malloc(size * nmemb, type, flags)); 852fd91e076SKristof Provost } 853fd91e076SKristof Provost 854c743a6bdSHans Petter Selasky void * 855c743a6bdSHans Petter Selasky mallocarray_domainset(size_t nmemb, size_t size, struct malloc_type *type, 856c743a6bdSHans Petter Selasky struct domainset *ds, int flags) 857c743a6bdSHans Petter Selasky { 858c743a6bdSHans Petter Selasky 859c743a6bdSHans Petter Selasky if (WOULD_OVERFLOW(nmemb, size)) 860c743a6bdSHans Petter Selasky panic("mallocarray_domainset: %zu * %zu overflowed", nmemb, size); 861c743a6bdSHans Petter Selasky 862c743a6bdSHans Petter Selasky return (malloc_domainset(size * nmemb, type, ds, flags)); 863c743a6bdSHans Petter Selasky } 864c743a6bdSHans Petter Selasky 86506a53ecfSMark Johnston #if defined(INVARIANTS) && !defined(KASAN) 866ab3185d1SJeff Roberson static void 867ab3185d1SJeff Roberson free_save_type(void *addr, struct malloc_type *mtp, u_long size) 868ab3185d1SJeff Roberson { 869ab3185d1SJeff Roberson struct malloc_type **mtpp = addr; 870ab3185d1SJeff Roberson 871ab3185d1SJeff Roberson /* 872ab3185d1SJeff Roberson * Cache a pointer to the malloc_type that most recently freed 873ab3185d1SJeff Roberson * this memory here. This way we know who is most likely to 874ab3185d1SJeff Roberson * have stepped on it later. 875ab3185d1SJeff Roberson * 876ab3185d1SJeff Roberson * This code assumes that size is a multiple of 8 bytes for 877ab3185d1SJeff Roberson * 64 bit machines 878ab3185d1SJeff Roberson */ 879ab3185d1SJeff Roberson mtpp = (struct malloc_type **) ((unsigned long)mtpp & ~UMA_ALIGN_PTR); 880ab3185d1SJeff Roberson mtpp += (size - sizeof(struct malloc_type *)) / 881ab3185d1SJeff Roberson sizeof(struct malloc_type *); 882ab3185d1SJeff Roberson *mtpp = mtp; 883ab3185d1SJeff Roberson } 884ab3185d1SJeff Roberson #endif 885ab3185d1SJeff Roberson 886ab3185d1SJeff Roberson #ifdef MALLOC_DEBUG 887ab3185d1SJeff Roberson static int 888ab3185d1SJeff Roberson free_dbg(void **addrp, struct malloc_type *mtp) 889ab3185d1SJeff Roberson { 890ab3185d1SJeff Roberson void *addr; 891ab3185d1SJeff Roberson 892ab3185d1SJeff Roberson addr = *addrp; 893bdcc2226SMateusz Guzik KASSERT(mtp->ks_version == M_VERSION, ("free: bad malloc type version")); 894ab3185d1SJeff Roberson KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 895ab3185d1SJeff Roberson ("free: called with spinlock or critical section held")); 896ab3185d1SJeff Roberson 897ab3185d1SJeff Roberson /* free(NULL, ...) does nothing */ 898ab3185d1SJeff Roberson if (addr == NULL) 899ab3185d1SJeff Roberson return (EJUSTRETURN); 900ab3185d1SJeff Roberson 901ab3185d1SJeff Roberson #ifdef DEBUG_MEMGUARD 902ab3185d1SJeff Roberson if (is_memguard_addr(addr)) { 903ab3185d1SJeff Roberson memguard_free(addr); 904ab3185d1SJeff Roberson return (EJUSTRETURN); 905ab3185d1SJeff Roberson } 906ab3185d1SJeff Roberson #endif 907ab3185d1SJeff Roberson 908ab3185d1SJeff Roberson #ifdef DEBUG_REDZONE 909ab3185d1SJeff Roberson redzone_check(addr); 910ab3185d1SJeff Roberson *addrp = redzone_addr_ntor(addr); 911ab3185d1SJeff Roberson #endif 912ab3185d1SJeff Roberson 913ab3185d1SJeff Roberson return (0); 914ab3185d1SJeff Roberson } 915ab3185d1SJeff Roberson #endif 916ab3185d1SJeff Roberson 917*4fab5f00SBjoern A. Zeeb static __always_inline void 918*4fab5f00SBjoern A. Zeeb _free(void *addr, struct malloc_type *mtp, bool dozero) 919df8bae1dSRodney W. Grimes { 920584061b4SJeff Roberson uma_zone_t zone; 92199571dc3SJeff Roberson uma_slab_t slab; 92299571dc3SJeff Roberson u_long size; 923254c6cb3SPoul-Henning Kamp 924ab3185d1SJeff Roberson #ifdef MALLOC_DEBUG 925ab3185d1SJeff Roberson if (free_dbg(&addr, mtp) != 0) 926ab3185d1SJeff Roberson return; 927ab3185d1SJeff Roberson #endif 92844a8ff31SArchie Cobbs /* free(NULL, ...) does nothing */ 92944a8ff31SArchie Cobbs if (addr == NULL) 93044a8ff31SArchie Cobbs return; 93144a8ff31SArchie Cobbs 932584061b4SJeff Roberson vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab); 9338355f576SJeff Roberson if (slab == NULL) 934*4fab5f00SBjoern A. Zeeb panic("%s(%d): address %p(%p) has not been allocated", __func__, 935*4fab5f00SBjoern A. Zeeb dozero, addr, (void *)((uintptr_t)addr & (~UMA_SLAB_MASK))); 93699571dc3SJeff Roberson 9379e6544ddSBjoern A. Zeeb switch (GET_SLAB_COOKIE(slab)) { 9389e6544ddSBjoern A. Zeeb case __predict_true(SLAB_COOKIE_SLAB_PTR): 939584061b4SJeff Roberson size = zone->uz_size; 94006a53ecfSMark Johnston #if defined(INVARIANTS) && !defined(KASAN) 941ab3185d1SJeff Roberson free_save_type(addr, mtp, size); 9428f70816cSJeff Roberson #endif 943*4fab5f00SBjoern A. Zeeb if (dozero) 94445035becSMatt Macy explicit_bzero(addr, size); 94545035becSMatt Macy uma_zfree_arg(zone, addr, slab); 9469e6544ddSBjoern A. Zeeb break; 9479e6544ddSBjoern A. Zeeb case SLAB_COOKIE_MALLOC_LARGE: 94845035becSMatt Macy size = malloc_large_size(slab); 949*4fab5f00SBjoern A. Zeeb if (dozero) 95045035becSMatt Macy explicit_bzero(addr, size); 95145035becSMatt Macy free_large(addr, size); 9529e6544ddSBjoern A. Zeeb break; 9539e6544ddSBjoern A. Zeeb case SLAB_COOKIE_CONTIG_MALLOC: 9549e6544ddSBjoern A. Zeeb size = round_page(contigmalloc_size(slab)); 955*4fab5f00SBjoern A. Zeeb if (dozero) 9569e6544ddSBjoern A. Zeeb explicit_bzero(addr, size); 9579e6544ddSBjoern A. Zeeb kmem_free(addr, size); 9589e6544ddSBjoern A. Zeeb break; 9599e6544ddSBjoern A. Zeeb default: 960*4fab5f00SBjoern A. Zeeb panic("%s(%d): addr %p slab %p with unknown cookie %d", 961*4fab5f00SBjoern A. Zeeb __func__, dozero, addr, slab, GET_SLAB_COOKIE(slab)); 9629e6544ddSBjoern A. Zeeb /* NOTREACHED */ 96345035becSMatt Macy } 96445035becSMatt Macy malloc_type_freed(mtp, size); 96545035becSMatt Macy } 96645035becSMatt Macy 967df8bae1dSRodney W. Grimes /* 968*4fab5f00SBjoern A. Zeeb * free: 969*4fab5f00SBjoern A. Zeeb * Free a block of memory allocated by malloc/contigmalloc. 970*4fab5f00SBjoern A. Zeeb * This routine may not block. 971*4fab5f00SBjoern A. Zeeb */ 972*4fab5f00SBjoern A. Zeeb void 973*4fab5f00SBjoern A. Zeeb free(void *addr, struct malloc_type *mtp) 974*4fab5f00SBjoern A. Zeeb { 975*4fab5f00SBjoern A. Zeeb _free(addr, mtp, false); 976*4fab5f00SBjoern A. Zeeb } 977*4fab5f00SBjoern A. Zeeb 978*4fab5f00SBjoern A. Zeeb /* 979*4fab5f00SBjoern A. Zeeb * zfree: 980*4fab5f00SBjoern A. Zeeb * Zero then free a block of memory allocated by malloc/contigmalloc. 981*4fab5f00SBjoern A. Zeeb * This routine may not block. 982*4fab5f00SBjoern A. Zeeb */ 983*4fab5f00SBjoern A. Zeeb void 984*4fab5f00SBjoern A. Zeeb zfree(void *addr, struct malloc_type *mtp) 985*4fab5f00SBjoern A. Zeeb { 986*4fab5f00SBjoern A. Zeeb _free(addr, mtp, true); 987*4fab5f00SBjoern A. Zeeb } 988*4fab5f00SBjoern A. Zeeb 989*4fab5f00SBjoern A. Zeeb /* 99044a8ff31SArchie Cobbs * realloc: change the size of a memory block 99144a8ff31SArchie Cobbs */ 99244a8ff31SArchie Cobbs void * 993bd555da9SConrad Meyer realloc(void *addr, size_t size, struct malloc_type *mtp, int flags) 99444a8ff31SArchie Cobbs { 995dbd51c41SJohn Baldwin #ifndef DEBUG_REDZONE 996584061b4SJeff Roberson uma_zone_t zone; 9978355f576SJeff Roberson uma_slab_t slab; 998dbd51c41SJohn Baldwin #endif 99944a8ff31SArchie Cobbs unsigned long alloc; 100044a8ff31SArchie Cobbs void *newaddr; 100144a8ff31SArchie Cobbs 1002bdcc2226SMateusz Guzik KASSERT(mtp->ks_version == M_VERSION, 1003bdcc2226SMateusz Guzik ("realloc: bad malloc type version")); 1004d9e2e68dSMark Johnston KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 10051067a2baSJonathan T. Looney ("realloc: called with spinlock or critical section held")); 10061067a2baSJonathan T. Looney 100744a8ff31SArchie Cobbs /* realloc(NULL, ...) is equivalent to malloc(...) */ 100844a8ff31SArchie Cobbs if (addr == NULL) 100963a7e0a3SRobert Watson return (malloc(size, mtp, flags)); 101063a7e0a3SRobert Watson 101163a7e0a3SRobert Watson /* 101263a7e0a3SRobert Watson * XXX: Should report free of old memory and alloc of new memory to 101363a7e0a3SRobert Watson * per-CPU stats. 101463a7e0a3SRobert Watson */ 101544a8ff31SArchie Cobbs 1016e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD 10176d3ed393SMatthew D Fleming if (is_memguard_addr(addr)) 10186d3ed393SMatthew D Fleming return (memguard_realloc(addr, size, mtp, flags)); 1019e4eb384bSBosko Milekic #endif 1020e4eb384bSBosko Milekic 1021847a2a17SPawel Jakub Dawidek #ifdef DEBUG_REDZONE 1022847a2a17SPawel Jakub Dawidek alloc = redzone_get_size(addr); 1023847a2a17SPawel Jakub Dawidek #else 1024584061b4SJeff Roberson vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab); 10258355f576SJeff Roberson 102644a8ff31SArchie Cobbs /* Sanity check */ 10278355f576SJeff Roberson KASSERT(slab != NULL, 102844a8ff31SArchie Cobbs ("realloc: address %p out of range", (void *)addr)); 102944a8ff31SArchie Cobbs 103044a8ff31SArchie Cobbs /* Get the size of the original block */ 10319e6544ddSBjoern A. Zeeb switch (GET_SLAB_COOKIE(slab)) { 10329e6544ddSBjoern A. Zeeb case __predict_true(SLAB_COOKIE_SLAB_PTR): 1033584061b4SJeff Roberson alloc = zone->uz_size; 10349e6544ddSBjoern A. Zeeb break; 10359e6544ddSBjoern A. Zeeb case SLAB_COOKIE_MALLOC_LARGE: 10366d6a03d7SJeff Roberson alloc = malloc_large_size(slab); 10379e6544ddSBjoern A. Zeeb break; 10389e6544ddSBjoern A. Zeeb default: 10399e6544ddSBjoern A. Zeeb #ifdef INVARIANTS 10409e6544ddSBjoern A. Zeeb panic("%s: called for addr %p of unsupported allocation type; " 10419e6544ddSBjoern A. Zeeb "slab %p cookie %d", __func__, addr, slab, GET_SLAB_COOKIE(slab)); 10429e6544ddSBjoern A. Zeeb #endif 10439e6544ddSBjoern A. Zeeb return (NULL); 10449e6544ddSBjoern A. Zeeb } 104544a8ff31SArchie Cobbs 104644a8ff31SArchie Cobbs /* Reuse the original block if appropriate */ 104706a53ecfSMark Johnston if (size <= alloc && 104806a53ecfSMark Johnston (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE)) { 104906a53ecfSMark Johnston kasan_mark((void *)addr, size, alloc, KASAN_MALLOC_REDZONE); 105044a8ff31SArchie Cobbs return (addr); 105106a53ecfSMark Johnston } 1052847a2a17SPawel Jakub Dawidek #endif /* !DEBUG_REDZONE */ 105344a8ff31SArchie Cobbs 105444a8ff31SArchie Cobbs /* Allocate a new, bigger (or smaller) block */ 105563a7e0a3SRobert Watson if ((newaddr = malloc(size, mtp, flags)) == NULL) 105644a8ff31SArchie Cobbs return (NULL); 105744a8ff31SArchie Cobbs 105806a53ecfSMark Johnston /* 105906a53ecfSMark Johnston * Copy over original contents. For KASAN, the redzone must be marked 106006a53ecfSMark Johnston * valid before performing the copy. 106106a53ecfSMark Johnston */ 10629a7c2de3SMark Johnston kasan_mark(addr, alloc, alloc, 0); 106344a8ff31SArchie Cobbs bcopy(addr, newaddr, min(size, alloc)); 106463a7e0a3SRobert Watson free(addr, mtp); 106544a8ff31SArchie Cobbs return (newaddr); 106644a8ff31SArchie Cobbs } 106744a8ff31SArchie Cobbs 106844a8ff31SArchie Cobbs /* 106944a8ff31SArchie Cobbs * reallocf: same as realloc() but free memory on failure. 107044a8ff31SArchie Cobbs */ 107144a8ff31SArchie Cobbs void * 1072bd555da9SConrad Meyer reallocf(void *addr, size_t size, struct malloc_type *mtp, int flags) 107344a8ff31SArchie Cobbs { 107444a8ff31SArchie Cobbs void *mem; 107544a8ff31SArchie Cobbs 107663a7e0a3SRobert Watson if ((mem = realloc(addr, size, mtp, flags)) == NULL) 107763a7e0a3SRobert Watson free(addr, mtp); 107844a8ff31SArchie Cobbs return (mem); 107944a8ff31SArchie Cobbs } 108044a8ff31SArchie Cobbs 10815d4bf057SVladimir Kondratyev /* 108216b971edSMateusz Guzik * malloc_size: returns the number of bytes allocated for a request of the 108316b971edSMateusz Guzik * specified size 108416b971edSMateusz Guzik */ 108516b971edSMateusz Guzik size_t 108616b971edSMateusz Guzik malloc_size(size_t size) 108716b971edSMateusz Guzik { 108816b971edSMateusz Guzik int indx; 108916b971edSMateusz Guzik 109016b971edSMateusz Guzik if (size > kmem_zmax) 10911c30cf95SMark Johnston return (round_page(size)); 109216b971edSMateusz Guzik if (size & KMEM_ZMASK) 109316b971edSMateusz Guzik size = (size & ~KMEM_ZMASK) + KMEM_ZBASE; 109416b971edSMateusz Guzik indx = kmemsize[size >> KMEM_ZSHIFT]; 109516b971edSMateusz Guzik return (kmemzones[indx].kz_size); 109616b971edSMateusz Guzik } 109716b971edSMateusz Guzik 109816b971edSMateusz Guzik /* 10995d4bf057SVladimir Kondratyev * malloc_usable_size: returns the usable size of the allocation. 11005d4bf057SVladimir Kondratyev */ 11015d4bf057SVladimir Kondratyev size_t 11025d4bf057SVladimir Kondratyev malloc_usable_size(const void *addr) 11035d4bf057SVladimir Kondratyev { 11045d4bf057SVladimir Kondratyev #ifndef DEBUG_REDZONE 11055d4bf057SVladimir Kondratyev uma_zone_t zone; 11065d4bf057SVladimir Kondratyev uma_slab_t slab; 11075d4bf057SVladimir Kondratyev #endif 11085d4bf057SVladimir Kondratyev u_long size; 11095d4bf057SVladimir Kondratyev 11105d4bf057SVladimir Kondratyev if (addr == NULL) 11115d4bf057SVladimir Kondratyev return (0); 11125d4bf057SVladimir Kondratyev 11135d4bf057SVladimir Kondratyev #ifdef DEBUG_MEMGUARD 11145d4bf057SVladimir Kondratyev if (is_memguard_addr(__DECONST(void *, addr))) 11155d4bf057SVladimir Kondratyev return (memguard_get_req_size(addr)); 11165d4bf057SVladimir Kondratyev #endif 11175d4bf057SVladimir Kondratyev 11185d4bf057SVladimir Kondratyev #ifdef DEBUG_REDZONE 11195d4bf057SVladimir Kondratyev size = redzone_get_size(__DECONST(void *, addr)); 11205d4bf057SVladimir Kondratyev #else 11215d4bf057SVladimir Kondratyev vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab); 11225d4bf057SVladimir Kondratyev if (slab == NULL) 1123c0df224bSBjoern A. Zeeb panic("malloc_usable_size: address %p(%p) is not allocated", 11245d4bf057SVladimir Kondratyev addr, (void *)((u_long)addr & (~UMA_SLAB_MASK))); 11255d4bf057SVladimir Kondratyev 11269e6544ddSBjoern A. Zeeb switch (GET_SLAB_COOKIE(slab)) { 11279e6544ddSBjoern A. Zeeb case __predict_true(SLAB_COOKIE_SLAB_PTR): 11285d4bf057SVladimir Kondratyev size = zone->uz_size; 11299e6544ddSBjoern A. Zeeb break; 11309e6544ddSBjoern A. Zeeb case SLAB_COOKIE_MALLOC_LARGE: 11315d4bf057SVladimir Kondratyev size = malloc_large_size(slab); 11329e6544ddSBjoern A. Zeeb break; 11339e6544ddSBjoern A. Zeeb default: 11349e6544ddSBjoern A. Zeeb __assert_unreachable(); 11359e6544ddSBjoern A. Zeeb size = 0; 11369e6544ddSBjoern A. Zeeb break; 11379e6544ddSBjoern A. Zeeb } 11385d4bf057SVladimir Kondratyev #endif 1139880b670cSMark Johnston 1140880b670cSMark Johnston /* 1141880b670cSMark Johnston * Unmark the redzone to avoid reports from consumers who are 1142880b670cSMark Johnston * (presumably) about to use the full allocation size. 1143880b670cSMark Johnston */ 1144880b670cSMark Johnston kasan_mark(addr, size, size, 0); 1145880b670cSMark Johnston 11465d4bf057SVladimir Kondratyev return (size); 11475d4bf057SVladimir Kondratyev } 11485d4bf057SVladimir Kondratyev 1149c70af487SAlan Cox CTASSERT(VM_KMEM_SIZE_SCALE >= 1); 1150c70af487SAlan Cox 11515df87b21SJeff Roberson /* 1152c70af487SAlan Cox * Initialize the kernel memory (kmem) arena. 11535df87b21SJeff Roberson */ 11545df87b21SJeff Roberson void 11555df87b21SJeff Roberson kmeminit(void) 11565df87b21SJeff Roberson { 1157af3b2549SHans Petter Selasky u_long mem_size; 1158af3b2549SHans Petter Selasky u_long tmp; 115969ef67f9SJason Evans 1160af3b2549SHans Petter Selasky #ifdef VM_KMEM_SIZE 1161af3b2549SHans Petter Selasky if (vm_kmem_size == 0) 1162af3b2549SHans Petter Selasky vm_kmem_size = VM_KMEM_SIZE; 1163af3b2549SHans Petter Selasky #endif 1164af3b2549SHans Petter Selasky #ifdef VM_KMEM_SIZE_MIN 1165af3b2549SHans Petter Selasky if (vm_kmem_size_min == 0) 1166af3b2549SHans Petter Selasky vm_kmem_size_min = VM_KMEM_SIZE_MIN; 1167af3b2549SHans Petter Selasky #endif 1168af3b2549SHans Petter Selasky #ifdef VM_KMEM_SIZE_MAX 1169af3b2549SHans Petter Selasky if (vm_kmem_size_max == 0) 1170af3b2549SHans Petter Selasky vm_kmem_size_max = VM_KMEM_SIZE_MAX; 1171af3b2549SHans Petter Selasky #endif 11728a58a9f6SJohn Dyson /* 1173c70af487SAlan Cox * Calculate the amount of kernel virtual address (KVA) space that is 1174c70af487SAlan Cox * preallocated to the kmem arena. In order to support a wide range 1175c70af487SAlan Cox * of machines, it is a function of the physical memory size, 1176c70af487SAlan Cox * specifically, 11778a58a9f6SJohn Dyson * 1178c70af487SAlan Cox * min(max(physical memory size / VM_KMEM_SIZE_SCALE, 1179c70af487SAlan Cox * VM_KMEM_SIZE_MIN), VM_KMEM_SIZE_MAX) 1180c70af487SAlan Cox * 1181c70af487SAlan Cox * Every architecture must define an integral value for 1182c70af487SAlan Cox * VM_KMEM_SIZE_SCALE. However, the definitions of VM_KMEM_SIZE_MIN 1183c70af487SAlan Cox * and VM_KMEM_SIZE_MAX, which represent respectively the floor and 1184c70af487SAlan Cox * ceiling on this preallocation, are optional. Typically, 1185c70af487SAlan Cox * VM_KMEM_SIZE_MAX is itself a function of the available KVA space on 1186c70af487SAlan Cox * a given architecture. 11878a58a9f6SJohn Dyson */ 118844f1c916SBryan Drewery mem_size = vm_cnt.v_page_count; 11897c51714eSSean Bruno if (mem_size <= 32768) /* delphij XXX 128MB */ 11907c51714eSSean Bruno kmem_zmax = PAGE_SIZE; 11918a58a9f6SJohn Dyson 1192c70af487SAlan Cox if (vm_kmem_size_scale < 1) 1193c70af487SAlan Cox vm_kmem_size_scale = VM_KMEM_SIZE_SCALE; 1194c70af487SAlan Cox 1195af3b2549SHans Petter Selasky /* 1196af3b2549SHans Petter Selasky * Check if we should use defaults for the "vm_kmem_size" 1197af3b2549SHans Petter Selasky * variable: 1198af3b2549SHans Petter Selasky */ 1199af3b2549SHans Petter Selasky if (vm_kmem_size == 0) { 120028b740daSKonstantin Belousov vm_kmem_size = mem_size / vm_kmem_size_scale; 120128b740daSKonstantin Belousov vm_kmem_size = vm_kmem_size * PAGE_SIZE < vm_kmem_size ? 120228b740daSKonstantin Belousov vm_kmem_size_max : vm_kmem_size * PAGE_SIZE; 1203c70af487SAlan Cox if (vm_kmem_size_min > 0 && vm_kmem_size < vm_kmem_size_min) 12040e5179e4SStephane E. Potvin vm_kmem_size = vm_kmem_size_min; 1205479439b4SDag-Erling Smørgrav if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max) 1206479439b4SDag-Erling Smørgrav vm_kmem_size = vm_kmem_size_max; 1207af3b2549SHans Petter Selasky } 120828b740daSKonstantin Belousov if (vm_kmem_size == 0) 120928b740daSKonstantin Belousov panic("Tune VM_KMEM_SIZE_* for the platform"); 12108a58a9f6SJohn Dyson 121127b8623fSDavid Greenman /* 1212af3b2549SHans Petter Selasky * The amount of KVA space that is preallocated to the 1213c70af487SAlan Cox * kmem arena can be set statically at compile-time or manually 1214c70af487SAlan Cox * through the kernel environment. However, it is still limited to 1215c70af487SAlan Cox * twice the physical memory size, which has been sufficient to handle 1216c70af487SAlan Cox * the most severe cases of external fragmentation in the kmem arena. 121727b8623fSDavid Greenman */ 1218c749c003SAlan Cox if (vm_kmem_size / 2 / PAGE_SIZE > mem_size) 1219c749c003SAlan Cox vm_kmem_size = 2 * mem_size * PAGE_SIZE; 12208a58a9f6SJohn Dyson 1221e137643eSOlivier Houchard vm_kmem_size = round_page(vm_kmem_size); 12226faf45b3SMark Johnston 12236faf45b3SMark Johnston /* 122489786088SMark Johnston * With KASAN or KMSAN enabled, dynamically allocated kernel memory is 122589786088SMark Johnston * shadowed. Account for this when setting the UMA limit. 12266faf45b3SMark Johnston */ 122789786088SMark Johnston #if defined(KASAN) 12286faf45b3SMark Johnston vm_kmem_size = (vm_kmem_size * KASAN_SHADOW_SCALE) / 12296faf45b3SMark Johnston (KASAN_SHADOW_SCALE + 1); 123089786088SMark Johnston #elif defined(KMSAN) 123189786088SMark Johnston vm_kmem_size /= 3; 12326faf45b3SMark Johnston #endif 12336faf45b3SMark Johnston 1234e3813573SMatthew D Fleming #ifdef DEBUG_MEMGUARD 1235f806cdcfSMatthew D Fleming tmp = memguard_fudge(vm_kmem_size, kernel_map); 1236e3813573SMatthew D Fleming #else 1237e3813573SMatthew D Fleming tmp = vm_kmem_size; 1238e3813573SMatthew D Fleming #endif 12392e47807cSJeff Roberson uma_set_limit(tmp); 12408355f576SJeff Roberson 1241e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD 1242e4eb384bSBosko Milekic /* 1243e4eb384bSBosko Milekic * Initialize MemGuard if support compiled in. MemGuard is a 1244e4eb384bSBosko Milekic * replacement allocator used for detecting tamper-after-free 1245e4eb384bSBosko Milekic * scenarios as they occur. It is only used for debugging. 1246e4eb384bSBosko Milekic */ 12472e47807cSJeff Roberson memguard_init(kernel_arena); 1248e4eb384bSBosko Milekic #endif 12495df87b21SJeff Roberson } 12505df87b21SJeff Roberson 12515df87b21SJeff Roberson /* 12525df87b21SJeff Roberson * Initialize the kernel memory allocator 12535df87b21SJeff Roberson */ 12545df87b21SJeff Roberson /* ARGSUSED*/ 12555df87b21SJeff Roberson static void 12565df87b21SJeff Roberson mallocinit(void *dummy) 12575df87b21SJeff Roberson { 12585df87b21SJeff Roberson int i; 12595df87b21SJeff Roberson uint8_t indx; 12605df87b21SJeff Roberson 12615df87b21SJeff Roberson mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF); 12625df87b21SJeff Roberson 12635df87b21SJeff Roberson kmeminit(); 1264e4eb384bSBosko Milekic 12657001d850SXin LI if (kmem_zmax < PAGE_SIZE || kmem_zmax > KMEM_ZMAX) 12667001d850SXin LI kmem_zmax = KMEM_ZMAX; 12677001d850SXin LI 12686f267175SJeff Roberson for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) { 12696f267175SJeff Roberson int size = kmemzones[indx].kz_size; 1270eaa17d42SRyan Libby const char *name = kmemzones[indx].kz_name; 12713b15beb3SKonstantin Belousov size_t align; 1272d7854da1SMatthew D Fleming int subzone; 12738355f576SJeff Roberson 12743b15beb3SKonstantin Belousov align = UMA_ALIGN_PTR; 12753b15beb3SKonstantin Belousov if (powerof2(size) && size > sizeof(void *)) 12760781c79dSKonstantin Belousov align = MIN(size, PAGE_SIZE) - 1; 1277d7854da1SMatthew D Fleming for (subzone = 0; subzone < numzones; subzone++) { 1278d7854da1SMatthew D Fleming kmemzones[indx].kz_zone[subzone] = 1279d7854da1SMatthew D Fleming uma_zcreate(name, size, 128010094910SMark Johnston #if defined(INVARIANTS) && !defined(KASAN) && !defined(KMSAN) 12818f70816cSJeff Roberson mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini, 12828efc4effSJeff Roberson #else 12838efc4effSJeff Roberson NULL, NULL, NULL, NULL, 12848efc4effSJeff Roberson #endif 12853b15beb3SKonstantin Belousov align, UMA_ZONE_MALLOC); 1286d7854da1SMatthew D Fleming } 12878355f576SJeff Roberson for (;i <= size; i+= KMEM_ZBASE) 12886f267175SJeff Roberson kmemsize[i >> KMEM_ZSHIFT] = indx; 1289df8bae1dSRodney W. Grimes } 1290254c6cb3SPoul-Henning Kamp } 1291af3b2549SHans Petter Selasky SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_SECOND, mallocinit, NULL); 1292254c6cb3SPoul-Henning Kamp 1293db669378SPeter Wemm void 129487efd4d5SRobert Watson malloc_init(void *data) 1295254c6cb3SPoul-Henning Kamp { 129663a7e0a3SRobert Watson struct malloc_type_internal *mtip; 129763a7e0a3SRobert Watson struct malloc_type *mtp; 129863a7e0a3SRobert Watson 1299deab5717SMitchell Horne KASSERT(vm_cnt.v_page_count != 0, 1300deab5717SMitchell Horne ("malloc_init() called before vm_mem_init()")); 130163a7e0a3SRobert Watson 130263a7e0a3SRobert Watson mtp = data; 1303bdcc2226SMateusz Guzik if (mtp->ks_version != M_VERSION) 1304e25d8b67SMateusz Guzik panic("malloc_init: type %s with unsupported version %lu", 1305e25d8b67SMateusz Guzik mtp->ks_shortdesc, mtp->ks_version); 1306bb1c7df8SRobert Watson 1307bdcc2226SMateusz Guzik mtip = &mtp->ks_mti; 13088e6526e9SMateusz Guzik mtip->mti_stats = uma_zalloc_pcpu(pcpu_zone_64, M_WAITOK | M_ZERO); 1309c9e05ccdSMateusz Guzik mtp_set_subzone(mtp); 1310254c6cb3SPoul-Henning Kamp 13116f267175SJeff Roberson mtx_lock(&malloc_mtx); 131263a7e0a3SRobert Watson mtp->ks_next = kmemstatistics; 131363a7e0a3SRobert Watson kmemstatistics = mtp; 1314cd814b26SRobert Watson kmemcount++; 13156f267175SJeff Roberson mtx_unlock(&malloc_mtx); 1316df8bae1dSRodney W. Grimes } 1317db669378SPeter Wemm 1318db669378SPeter Wemm void 131987efd4d5SRobert Watson malloc_uninit(void *data) 1320db669378SPeter Wemm { 132163a7e0a3SRobert Watson struct malloc_type_internal *mtip; 13222a143d5bSPawel Jakub Dawidek struct malloc_type_stats *mtsp; 132363a7e0a3SRobert Watson struct malloc_type *mtp, *temp; 13242a143d5bSPawel Jakub Dawidek long temp_allocs, temp_bytes; 13252a143d5bSPawel Jakub Dawidek int i; 1326db669378SPeter Wemm 132763a7e0a3SRobert Watson mtp = data; 1328bdcc2226SMateusz Guzik KASSERT(mtp->ks_version == M_VERSION, 1329bdcc2226SMateusz Guzik ("malloc_uninit: bad malloc type version")); 1330bb1c7df8SRobert Watson 13316f267175SJeff Roberson mtx_lock(&malloc_mtx); 1332bdcc2226SMateusz Guzik mtip = &mtp->ks_mti; 133363a7e0a3SRobert Watson if (mtp != kmemstatistics) { 133463a7e0a3SRobert Watson for (temp = kmemstatistics; temp != NULL; 133563a7e0a3SRobert Watson temp = temp->ks_next) { 1336f121baaaSBrian Somers if (temp->ks_next == mtp) { 133763a7e0a3SRobert Watson temp->ks_next = mtp->ks_next; 1338f121baaaSBrian Somers break; 1339db669378SPeter Wemm } 1340f121baaaSBrian Somers } 1341f121baaaSBrian Somers KASSERT(temp, 1342f121baaaSBrian Somers ("malloc_uninit: type '%s' not found", mtp->ks_shortdesc)); 134363a7e0a3SRobert Watson } else 134463a7e0a3SRobert Watson kmemstatistics = mtp->ks_next; 1345cd814b26SRobert Watson kmemcount--; 13466f267175SJeff Roberson mtx_unlock(&malloc_mtx); 13472a143d5bSPawel Jakub Dawidek 13482a143d5bSPawel Jakub Dawidek /* 13492a143d5bSPawel Jakub Dawidek * Look for memory leaks. 13502a143d5bSPawel Jakub Dawidek */ 13512a143d5bSPawel Jakub Dawidek temp_allocs = temp_bytes = 0; 13529afff6b1SMateusz Guzik for (i = 0; i <= mp_maxid; i++) { 13539afff6b1SMateusz Guzik mtsp = zpcpu_get_cpu(mtip->mti_stats, i); 13542a143d5bSPawel Jakub Dawidek temp_allocs += mtsp->mts_numallocs; 13552a143d5bSPawel Jakub Dawidek temp_allocs -= mtsp->mts_numfrees; 13562a143d5bSPawel Jakub Dawidek temp_bytes += mtsp->mts_memalloced; 13572a143d5bSPawel Jakub Dawidek temp_bytes -= mtsp->mts_memfreed; 13582a143d5bSPawel Jakub Dawidek } 13592a143d5bSPawel Jakub Dawidek if (temp_allocs > 0 || temp_bytes > 0) { 13602a143d5bSPawel Jakub Dawidek printf("Warning: memory type %s leaked memory on destroy " 13612a143d5bSPawel Jakub Dawidek "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc, 13622a143d5bSPawel Jakub Dawidek temp_allocs, temp_bytes); 13632a143d5bSPawel Jakub Dawidek } 13642a143d5bSPawel Jakub Dawidek 13658e6526e9SMateusz Guzik uma_zfree_pcpu(pcpu_zone_64, mtip->mti_stats); 1366db669378SPeter Wemm } 13676f267175SJeff Roberson 1368d362c40dSPawel Jakub Dawidek struct malloc_type * 1369d362c40dSPawel Jakub Dawidek malloc_desc2type(const char *desc) 1370d362c40dSPawel Jakub Dawidek { 1371d362c40dSPawel Jakub Dawidek struct malloc_type *mtp; 1372d362c40dSPawel Jakub Dawidek 1373d362c40dSPawel Jakub Dawidek mtx_assert(&malloc_mtx, MA_OWNED); 1374d362c40dSPawel Jakub Dawidek for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 1375d362c40dSPawel Jakub Dawidek if (strcmp(mtp->ks_shortdesc, desc) == 0) 1376d362c40dSPawel Jakub Dawidek return (mtp); 1377d362c40dSPawel Jakub Dawidek } 1378d362c40dSPawel Jakub Dawidek return (NULL); 1379d362c40dSPawel Jakub Dawidek } 1380d362c40dSPawel Jakub Dawidek 13816f267175SJeff Roberson static int 1382cd814b26SRobert Watson sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS) 1383cd814b26SRobert Watson { 1384cd814b26SRobert Watson struct malloc_type_stream_header mtsh; 1385cd814b26SRobert Watson struct malloc_type_internal *mtip; 13869afff6b1SMateusz Guzik struct malloc_type_stats *mtsp, zeromts; 1387cd814b26SRobert Watson struct malloc_type_header mth; 1388cd814b26SRobert Watson struct malloc_type *mtp; 13894e657159SMatthew D Fleming int error, i; 1390cd814b26SRobert Watson struct sbuf sbuf; 1391cd814b26SRobert Watson 139200f0e671SMatthew D Fleming error = sysctl_wire_old_buffer(req, 0); 139300f0e671SMatthew D Fleming if (error != 0) 139400f0e671SMatthew D Fleming return (error); 13954e657159SMatthew D Fleming sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 13961eafc078SIan Lepore sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL); 1397cd814b26SRobert Watson mtx_lock(&malloc_mtx); 1398cd814b26SRobert Watson 13999afff6b1SMateusz Guzik bzero(&zeromts, sizeof(zeromts)); 14009afff6b1SMateusz Guzik 1401cd814b26SRobert Watson /* 1402cd814b26SRobert Watson * Insert stream header. 1403cd814b26SRobert Watson */ 1404cd814b26SRobert Watson bzero(&mtsh, sizeof(mtsh)); 1405cd814b26SRobert Watson mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION; 1406cd814b26SRobert Watson mtsh.mtsh_maxcpus = MAXCPU; 1407cd814b26SRobert Watson mtsh.mtsh_count = kmemcount; 14084e657159SMatthew D Fleming (void)sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh)); 1409cd814b26SRobert Watson 1410cd814b26SRobert Watson /* 1411cd814b26SRobert Watson * Insert alternating sequence of type headers and type statistics. 1412cd814b26SRobert Watson */ 1413cd814b26SRobert Watson for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 1414bdcc2226SMateusz Guzik mtip = &mtp->ks_mti; 1415cd814b26SRobert Watson 1416cd814b26SRobert Watson /* 1417cd814b26SRobert Watson * Insert type header. 1418cd814b26SRobert Watson */ 1419cd814b26SRobert Watson bzero(&mth, sizeof(mth)); 1420cd814b26SRobert Watson strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME); 14214e657159SMatthew D Fleming (void)sbuf_bcat(&sbuf, &mth, sizeof(mth)); 1422cd814b26SRobert Watson 1423cd814b26SRobert Watson /* 1424cd814b26SRobert Watson * Insert type statistics for each CPU. 1425cd814b26SRobert Watson */ 14269afff6b1SMateusz Guzik for (i = 0; i <= mp_maxid; i++) { 14279afff6b1SMateusz Guzik mtsp = zpcpu_get_cpu(mtip->mti_stats, i); 14289afff6b1SMateusz Guzik (void)sbuf_bcat(&sbuf, mtsp, sizeof(*mtsp)); 1429cd814b26SRobert Watson } 14309afff6b1SMateusz Guzik /* 14319afff6b1SMateusz Guzik * Fill in the missing CPUs. 14329afff6b1SMateusz Guzik */ 14339afff6b1SMateusz Guzik for (; i < MAXCPU; i++) { 14349afff6b1SMateusz Guzik (void)sbuf_bcat(&sbuf, &zeromts, sizeof(zeromts)); 14359afff6b1SMateusz Guzik } 1436cd814b26SRobert Watson } 1437cd814b26SRobert Watson mtx_unlock(&malloc_mtx); 14384e657159SMatthew D Fleming error = sbuf_finish(&sbuf); 1439cd814b26SRobert Watson sbuf_delete(&sbuf); 1440cd814b26SRobert Watson return (error); 1441cd814b26SRobert Watson } 1442cd814b26SRobert Watson 14437029da5cSPawel Biernacki SYSCTL_PROC(_kern, OID_AUTO, malloc_stats, 14447029da5cSPawel Biernacki CTLFLAG_RD | CTLTYPE_STRUCT | CTLFLAG_MPSAFE, 0, 0, 14457029da5cSPawel Biernacki sysctl_kern_malloc_stats, "s,malloc_type_ustats", 1446cd814b26SRobert Watson "Return malloc types"); 1447cd814b26SRobert Watson 1448cd814b26SRobert Watson SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0, 1449cd814b26SRobert Watson "Count of kernel malloc types"); 1450cd814b26SRobert Watson 145191dd776cSJohn Birrell void 145291dd776cSJohn Birrell malloc_type_list(malloc_type_list_func_t *func, void *arg) 145391dd776cSJohn Birrell { 145491dd776cSJohn Birrell struct malloc_type *mtp, **bufmtp; 145591dd776cSJohn Birrell int count, i; 145691dd776cSJohn Birrell size_t buflen; 145791dd776cSJohn Birrell 145891dd776cSJohn Birrell mtx_lock(&malloc_mtx); 145991dd776cSJohn Birrell restart: 146091dd776cSJohn Birrell mtx_assert(&malloc_mtx, MA_OWNED); 146191dd776cSJohn Birrell count = kmemcount; 146291dd776cSJohn Birrell mtx_unlock(&malloc_mtx); 146391dd776cSJohn Birrell 146491dd776cSJohn Birrell buflen = sizeof(struct malloc_type *) * count; 146591dd776cSJohn Birrell bufmtp = malloc(buflen, M_TEMP, M_WAITOK); 146691dd776cSJohn Birrell 146791dd776cSJohn Birrell mtx_lock(&malloc_mtx); 146891dd776cSJohn Birrell 146991dd776cSJohn Birrell if (count < kmemcount) { 147091dd776cSJohn Birrell free(bufmtp, M_TEMP); 147191dd776cSJohn Birrell goto restart; 147291dd776cSJohn Birrell } 147391dd776cSJohn Birrell 147491dd776cSJohn Birrell for (mtp = kmemstatistics, i = 0; mtp != NULL; mtp = mtp->ks_next, i++) 147591dd776cSJohn Birrell bufmtp[i] = mtp; 147691dd776cSJohn Birrell 147791dd776cSJohn Birrell mtx_unlock(&malloc_mtx); 147891dd776cSJohn Birrell 147991dd776cSJohn Birrell for (i = 0; i < count; i++) 148091dd776cSJohn Birrell (func)(bufmtp[i], arg); 148191dd776cSJohn Birrell 148291dd776cSJohn Birrell free(bufmtp, M_TEMP); 148391dd776cSJohn Birrell } 148491dd776cSJohn Birrell 1485909ed16cSRobert Watson #ifdef DDB 148646d70077SConrad Meyer static int64_t 148746d70077SConrad Meyer get_malloc_stats(const struct malloc_type_internal *mtip, uint64_t *allocs, 148846d70077SConrad Meyer uint64_t *inuse) 1489909ed16cSRobert Watson { 149046d70077SConrad Meyer const struct malloc_type_stats *mtsp; 149146d70077SConrad Meyer uint64_t frees, alloced, freed; 1492909ed16cSRobert Watson int i; 1493909ed16cSRobert Watson 149446d70077SConrad Meyer *allocs = 0; 1495909ed16cSRobert Watson frees = 0; 149624076d13SRobert Watson alloced = 0; 149724076d13SRobert Watson freed = 0; 14989afff6b1SMateusz Guzik for (i = 0; i <= mp_maxid; i++) { 14999afff6b1SMateusz Guzik mtsp = zpcpu_get_cpu(mtip->mti_stats, i); 150046d70077SConrad Meyer 150146d70077SConrad Meyer *allocs += mtsp->mts_numallocs; 150226e9d9b0SMark Johnston frees += mtsp->mts_numfrees; 150326e9d9b0SMark Johnston alloced += mtsp->mts_memalloced; 150426e9d9b0SMark Johnston freed += mtsp->mts_memfreed; 1505909ed16cSRobert Watson } 150646d70077SConrad Meyer *inuse = *allocs - frees; 150746d70077SConrad Meyer return (alloced - freed); 150846d70077SConrad Meyer } 150946d70077SConrad Meyer 1510c84c5e00SMitchell Horne DB_SHOW_COMMAND_FLAGS(malloc, db_show_malloc, DB_CMD_MEMSAFE) 151146d70077SConrad Meyer { 151246d70077SConrad Meyer const char *fmt_hdr, *fmt_entry; 151346d70077SConrad Meyer struct malloc_type *mtp; 151446d70077SConrad Meyer uint64_t allocs, inuse; 151546d70077SConrad Meyer int64_t size; 151646d70077SConrad Meyer /* variables for sorting */ 151746d70077SConrad Meyer struct malloc_type *last_mtype, *cur_mtype; 151846d70077SConrad Meyer int64_t cur_size, last_size; 151946d70077SConrad Meyer int ties; 152046d70077SConrad Meyer 152146d70077SConrad Meyer if (modif[0] == 'i') { 152246d70077SConrad Meyer fmt_hdr = "%s,%s,%s,%s\n"; 152346d70077SConrad Meyer fmt_entry = "\"%s\",%ju,%jdK,%ju\n"; 152446d70077SConrad Meyer } else { 152546d70077SConrad Meyer fmt_hdr = "%18s %12s %12s %12s\n"; 152646d70077SConrad Meyer fmt_entry = "%18s %12ju %12jdK %12ju\n"; 152746d70077SConrad Meyer } 152846d70077SConrad Meyer 152946d70077SConrad Meyer db_printf(fmt_hdr, "Type", "InUse", "MemUse", "Requests"); 153046d70077SConrad Meyer 153146d70077SConrad Meyer /* Select sort, largest size first. */ 153246d70077SConrad Meyer last_mtype = NULL; 153346d70077SConrad Meyer last_size = INT64_MAX; 153446d70077SConrad Meyer for (;;) { 153546d70077SConrad Meyer cur_mtype = NULL; 153646d70077SConrad Meyer cur_size = -1; 153746d70077SConrad Meyer ties = 0; 153846d70077SConrad Meyer 153946d70077SConrad Meyer for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 154046d70077SConrad Meyer /* 154146d70077SConrad Meyer * In the case of size ties, print out mtypes 154246d70077SConrad Meyer * in the order they are encountered. That is, 154346d70077SConrad Meyer * when we encounter the most recently output 154446d70077SConrad Meyer * mtype, we have already printed all preceding 154546d70077SConrad Meyer * ties, and we must print all following ties. 154646d70077SConrad Meyer */ 154746d70077SConrad Meyer if (mtp == last_mtype) { 154846d70077SConrad Meyer ties = 1; 154946d70077SConrad Meyer continue; 155046d70077SConrad Meyer } 1551bdcc2226SMateusz Guzik size = get_malloc_stats(&mtp->ks_mti, &allocs, 155246d70077SConrad Meyer &inuse); 155346d70077SConrad Meyer if (size > cur_size && size < last_size + ties) { 155446d70077SConrad Meyer cur_size = size; 155546d70077SConrad Meyer cur_mtype = mtp; 155646d70077SConrad Meyer } 155746d70077SConrad Meyer } 155846d70077SConrad Meyer if (cur_mtype == NULL) 155946d70077SConrad Meyer break; 156046d70077SConrad Meyer 1561bdcc2226SMateusz Guzik size = get_malloc_stats(&cur_mtype->ks_mti, &allocs, &inuse); 156246d70077SConrad Meyer db_printf(fmt_entry, cur_mtype->ks_shortdesc, inuse, 156346d70077SConrad Meyer howmany(size, 1024), allocs); 156446d70077SConrad Meyer 1565687c94aaSJohn Baldwin if (db_pager_quit) 1566687c94aaSJohn Baldwin break; 156746d70077SConrad Meyer 156846d70077SConrad Meyer last_mtype = cur_mtype; 156946d70077SConrad Meyer last_size = cur_size; 1570909ed16cSRobert Watson } 1571909ed16cSRobert Watson } 1572d7854da1SMatthew D Fleming 1573d7854da1SMatthew D Fleming #if MALLOC_DEBUG_MAXZONES > 1 1574d7854da1SMatthew D Fleming DB_SHOW_COMMAND(multizone_matches, db_show_multizone_matches) 1575d7854da1SMatthew D Fleming { 1576d7854da1SMatthew D Fleming struct malloc_type_internal *mtip; 1577d7854da1SMatthew D Fleming struct malloc_type *mtp; 1578d7854da1SMatthew D Fleming u_int subzone; 1579d7854da1SMatthew D Fleming 1580d7854da1SMatthew D Fleming if (!have_addr) { 1581d7854da1SMatthew D Fleming db_printf("Usage: show multizone_matches <malloc type/addr>\n"); 1582d7854da1SMatthew D Fleming return; 1583d7854da1SMatthew D Fleming } 1584d7854da1SMatthew D Fleming mtp = (void *)addr; 1585bdcc2226SMateusz Guzik if (mtp->ks_version != M_VERSION) { 1586bdcc2226SMateusz Guzik db_printf("Version %lx does not match expected %x\n", 1587bdcc2226SMateusz Guzik mtp->ks_version, M_VERSION); 1588d7854da1SMatthew D Fleming return; 1589d7854da1SMatthew D Fleming } 1590d7854da1SMatthew D Fleming 1591bdcc2226SMateusz Guzik mtip = &mtp->ks_mti; 1592d7854da1SMatthew D Fleming subzone = mtip->mti_zone; 1593d7854da1SMatthew D Fleming 1594d7854da1SMatthew D Fleming for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 1595bdcc2226SMateusz Guzik mtip = &mtp->ks_mti; 1596d7854da1SMatthew D Fleming if (mtip->mti_zone != subzone) 1597d7854da1SMatthew D Fleming continue; 1598d7854da1SMatthew D Fleming db_printf("%s\n", mtp->ks_shortdesc); 1599687c94aaSJohn Baldwin if (db_pager_quit) 1600687c94aaSJohn Baldwin break; 1601d7854da1SMatthew D Fleming } 1602d7854da1SMatthew D Fleming } 1603d7854da1SMatthew D Fleming #endif /* MALLOC_DEBUG_MAXZONES > 1 */ 1604d7854da1SMatthew D Fleming #endif /* DDB */ 1605