19454b2d8SWarner Losh /*- 2df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1991, 1993 363a7e0a3SRobert Watson * The Regents of the University of California. 4bb1c7df8SRobert Watson * Copyright (c) 2005-2009 Robert N. M. Watson 563a7e0a3SRobert Watson * All rights reserved. 6df8bae1dSRodney W. Grimes * 7df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 8df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 9df8bae1dSRodney W. Grimes * are met: 10df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 11df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 12df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 13df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 14df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 15df8bae1dSRodney W. Grimes * 4. Neither the name of the University nor the names of its contributors 16df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 17df8bae1dSRodney W. Grimes * without specific prior written permission. 18df8bae1dSRodney W. Grimes * 19df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29df8bae1dSRodney W. Grimes * SUCH DAMAGE. 30df8bae1dSRodney W. Grimes * 31df8bae1dSRodney W. Grimes * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94 32df8bae1dSRodney W. Grimes */ 33df8bae1dSRodney W. Grimes 340ce3f16dSRobert Watson /* 350ce3f16dSRobert Watson * Kernel malloc(9) implementation -- general purpose kernel memory allocator 360ce3f16dSRobert Watson * based on memory types. Back end is implemented using the UMA(9) zone 370ce3f16dSRobert Watson * allocator. A set of fixed-size buckets are used for smaller allocations, 380ce3f16dSRobert Watson * and a special UMA allocation interface is used for larger allocations. 390ce3f16dSRobert Watson * Callers declare memory types, and statistics are maintained independently 400ce3f16dSRobert Watson * for each memory type. Statistics are maintained per-CPU for performance 410ce3f16dSRobert Watson * reasons. See malloc(9) and comments in malloc.h for a detailed 420ce3f16dSRobert Watson * description. 430ce3f16dSRobert Watson */ 440ce3f16dSRobert Watson 45677b542eSDavid E. O'Brien #include <sys/cdefs.h> 46677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 47677b542eSDavid E. O'Brien 48909ed16cSRobert Watson #include "opt_ddb.h" 4991dd776cSJohn Birrell #include "opt_kdtrace.h" 508a58a9f6SJohn Dyson #include "opt_vm.h" 518a58a9f6SJohn Dyson 52df8bae1dSRodney W. Grimes #include <sys/param.h> 5326f9a767SRodney W. Grimes #include <sys/systm.h> 542d50560aSMarcel Moolenaar #include <sys/kdb.h> 55df8bae1dSRodney W. Grimes #include <sys/kernel.h> 56fb919e4dSMark Murray #include <sys/lock.h> 57df8bae1dSRodney W. Grimes #include <sys/malloc.h> 5854e7152cSDavid Greenman #include <sys/mbuf.h> 59eec258d2SJohn Baldwin #include <sys/mutex.h> 60efeaf95aSDavid Greenman #include <sys/vmmeter.h> 61a448b62aSJake Burkholder #include <sys/proc.h> 6263a7e0a3SRobert Watson #include <sys/sbuf.h> 636f267175SJeff Roberson #include <sys/sysctl.h> 641fb14a47SPoul-Henning Kamp #include <sys/time.h> 659a02e8c6SJason Evans 66df8bae1dSRodney W. Grimes #include <vm/vm.h> 6799571dc3SJeff Roberson #include <vm/pmap.h> 68efeaf95aSDavid Greenman #include <vm/vm_param.h> 69df8bae1dSRodney W. Grimes #include <vm/vm_kern.h> 70efeaf95aSDavid Greenman #include <vm/vm_extern.h> 713075778bSJohn Dyson #include <vm/vm_map.h> 7299571dc3SJeff Roberson #include <vm/vm_page.h> 738355f576SJeff Roberson #include <vm/uma.h> 748355f576SJeff Roberson #include <vm/uma_int.h> 758efc4effSJeff Roberson #include <vm/uma_dbg.h> 76df8bae1dSRodney W. Grimes 77e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD 78e4eb384bSBosko Milekic #include <vm/memguard.h> 79e4eb384bSBosko Milekic #endif 80847a2a17SPawel Jakub Dawidek #ifdef DEBUG_REDZONE 81847a2a17SPawel Jakub Dawidek #include <vm/redzone.h> 82847a2a17SPawel Jakub Dawidek #endif 83e4eb384bSBosko Milekic 84984982d6SPoul-Henning Kamp #if defined(INVARIANTS) && defined(__i386__) 85984982d6SPoul-Henning Kamp #include <machine/cpu.h> 86984982d6SPoul-Henning Kamp #endif 87984982d6SPoul-Henning Kamp 88909ed16cSRobert Watson #include <ddb/ddb.h> 89909ed16cSRobert Watson 9091dd776cSJohn Birrell #ifdef KDTRACE_HOOKS 9191dd776cSJohn Birrell #include <sys/dtrace_bsd.h> 9291dd776cSJohn Birrell 9391dd776cSJohn Birrell dtrace_malloc_probe_func_t dtrace_malloc_probe; 9491dd776cSJohn Birrell #endif 9591dd776cSJohn Birrell 9644a8ff31SArchie Cobbs /* 9744a8ff31SArchie Cobbs * When realloc() is called, if the new size is sufficiently smaller than 9844a8ff31SArchie Cobbs * the old size, realloc() will allocate a new, smaller block to avoid 9944a8ff31SArchie Cobbs * wasting memory. 'Sufficiently smaller' is defined as: newsize <= 10044a8ff31SArchie Cobbs * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'. 10144a8ff31SArchie Cobbs */ 10244a8ff31SArchie Cobbs #ifndef REALLOC_FRACTION 10344a8ff31SArchie Cobbs #define REALLOC_FRACTION 1 /* new block if <= half the size */ 10444a8ff31SArchie Cobbs #endif 10544a8ff31SArchie Cobbs 1060ce3f16dSRobert Watson /* 1070ce3f16dSRobert Watson * Centrally define some common malloc types. 1080ce3f16dSRobert Watson */ 1093b6fb885SPoul-Henning Kamp MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 1109ef246c6SBruce Evans MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 1119ef246c6SBruce Evans MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 1129ef246c6SBruce Evans 11382cd038dSYoshinobu Inoue MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options"); 11482cd038dSYoshinobu Inoue MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery"); 11582cd038dSYoshinobu Inoue 1164d77a549SAlfred Perlstein static void kmeminit(void *); 117237fdd78SRobert Watson SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL); 1182b14f991SJulian Elischer 119a1c995b6SPoul-Henning Kamp static MALLOC_DEFINE(M_FREE, "free", "should be on free list"); 120a1c995b6SPoul-Henning Kamp 121db669378SPeter Wemm static struct malloc_type *kmemstatistics; 122dc2e1e3fSRobert Watson static vm_offset_t kmembase; 123dc2e1e3fSRobert Watson static vm_offset_t kmemlimit; 124cd814b26SRobert Watson static int kmemcount; 1251f6889a1SMatthew Dillon 1268355f576SJeff Roberson #define KMEM_ZSHIFT 4 1278355f576SJeff Roberson #define KMEM_ZBASE 16 1288355f576SJeff Roberson #define KMEM_ZMASK (KMEM_ZBASE - 1) 1298355f576SJeff Roberson 1309fb535deSJeff Roberson #define KMEM_ZMAX PAGE_SIZE 1318355f576SJeff Roberson #define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT) 13260ae52f7SEd Schouten static uint8_t kmemsize[KMEM_ZSIZE + 1]; 1336f267175SJeff Roberson 134d7854da1SMatthew D Fleming #ifndef MALLOC_DEBUG_MAXZONES 135d7854da1SMatthew D Fleming #define MALLOC_DEBUG_MAXZONES 1 136d7854da1SMatthew D Fleming #endif 137d7854da1SMatthew D Fleming static int numzones = MALLOC_DEBUG_MAXZONES; 138d7854da1SMatthew D Fleming 1390ce3f16dSRobert Watson /* 1400ce3f16dSRobert Watson * Small malloc(9) memory allocations are allocated from a set of UMA buckets 1410ce3f16dSRobert Watson * of various sizes. 1420ce3f16dSRobert Watson * 1430ce3f16dSRobert Watson * XXX: The comment here used to read "These won't be powers of two for 1440ce3f16dSRobert Watson * long." It's possible that a significant amount of wasted memory could be 1450ce3f16dSRobert Watson * recovered by tuning the sizes of these buckets. 1460ce3f16dSRobert Watson */ 1478355f576SJeff Roberson struct { 1486f267175SJeff Roberson int kz_size; 1496f267175SJeff Roberson char *kz_name; 150d7854da1SMatthew D Fleming uma_zone_t kz_zone[MALLOC_DEBUG_MAXZONES]; 1516f267175SJeff Roberson } kmemzones[] = { 152d7854da1SMatthew D Fleming {16, "16", }, 153d7854da1SMatthew D Fleming {32, "32", }, 154d7854da1SMatthew D Fleming {64, "64", }, 155d7854da1SMatthew D Fleming {128, "128", }, 156d7854da1SMatthew D Fleming {256, "256", }, 157d7854da1SMatthew D Fleming {512, "512", }, 158d7854da1SMatthew D Fleming {1024, "1024", }, 159d7854da1SMatthew D Fleming {2048, "2048", }, 160d7854da1SMatthew D Fleming {4096, "4096", }, 1619fb535deSJeff Roberson #if PAGE_SIZE > 4096 162d7854da1SMatthew D Fleming {8192, "8192", }, 1639fb535deSJeff Roberson #if PAGE_SIZE > 8192 164d7854da1SMatthew D Fleming {16384, "16384", }, 1659fb535deSJeff Roberson #if PAGE_SIZE > 16384 166d7854da1SMatthew D Fleming {32768, "32768", }, 1679fb535deSJeff Roberson #if PAGE_SIZE > 32768 168d7854da1SMatthew D Fleming {65536, "65536", }, 1699fb535deSJeff Roberson #if PAGE_SIZE > 65536 1709fb535deSJeff Roberson #error "Unsupported PAGE_SIZE" 1719fb535deSJeff Roberson #endif /* 65536 */ 1729fb535deSJeff Roberson #endif /* 32768 */ 1739fb535deSJeff Roberson #endif /* 16384 */ 1749fb535deSJeff Roberson #endif /* 8192 */ 1759fb535deSJeff Roberson #endif /* 4096 */ 1768355f576SJeff Roberson {0, NULL}, 1778355f576SJeff Roberson }; 1788355f576SJeff Roberson 1790ce3f16dSRobert Watson /* 1800ce3f16dSRobert Watson * Zone to allocate malloc type descriptions from. For ABI reasons, memory 1810ce3f16dSRobert Watson * types are described by a data structure passed by the declaring code, but 1820ce3f16dSRobert Watson * the malloc(9) implementation has its own data structure describing the 1830ce3f16dSRobert Watson * type and statistics. This permits the malloc(9)-internal data structures 1840ce3f16dSRobert Watson * to be modified without breaking binary-compiled kernel modules that 1850ce3f16dSRobert Watson * declare malloc types. 1860ce3f16dSRobert Watson */ 18763a7e0a3SRobert Watson static uma_zone_t mt_zone; 18863a7e0a3SRobert Watson 189b89eaf4eSAlan Cox u_long vm_kmem_size; 190d801e824SAndriy Gapon SYSCTL_ULONG(_vm, OID_AUTO, kmem_size, CTLFLAG_RDTUN, &vm_kmem_size, 0, 19184344f9fSDag-Erling Smørgrav "Size of kernel memory"); 1925a34a9f0SJeff Roberson 193b89eaf4eSAlan Cox static u_long vm_kmem_size_min; 194d801e824SAndriy Gapon SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_min, CTLFLAG_RDTUN, &vm_kmem_size_min, 0, 1950e5179e4SStephane E. Potvin "Minimum size of kernel memory"); 1960e5179e4SStephane E. Potvin 197b89eaf4eSAlan Cox static u_long vm_kmem_size_max; 198d801e824SAndriy Gapon SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RDTUN, &vm_kmem_size_max, 0, 199479439b4SDag-Erling Smørgrav "Maximum size of kernel memory"); 200479439b4SDag-Erling Smørgrav 201b89eaf4eSAlan Cox static u_int vm_kmem_size_scale; 202d801e824SAndriy Gapon SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale, 0, 203479439b4SDag-Erling Smørgrav "Scale factor for kernel memory size"); 204479439b4SDag-Erling Smørgrav 2057814c80aSAndriy Gapon static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS); 2067814c80aSAndriy Gapon SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size, 2077814c80aSAndriy Gapon CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0, 2087814c80aSAndriy Gapon sysctl_kmem_map_size, "LU", "Current kmem_map allocation size"); 2097814c80aSAndriy Gapon 21095bb9d38SAndriy Gapon static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS); 21195bb9d38SAndriy Gapon SYSCTL_PROC(_vm, OID_AUTO, kmem_map_free, 21295bb9d38SAndriy Gapon CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0, 21395bb9d38SAndriy Gapon sysctl_kmem_map_free, "LU", "Largest contiguous free range in kmem_map"); 21495bb9d38SAndriy Gapon 2155a34a9f0SJeff Roberson /* 21699571dc3SJeff Roberson * The malloc_mtx protects the kmemstatistics linked list. 2175a34a9f0SJeff Roberson */ 2185a34a9f0SJeff Roberson struct mtx malloc_mtx; 21969ef67f9SJason Evans 2205e914b96SJeff Roberson #ifdef MALLOC_PROFILE 2215e914b96SJeff Roberson uint64_t krequests[KMEM_ZSIZE + 1]; 2226f267175SJeff Roberson 2235e914b96SJeff Roberson static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS); 2245e914b96SJeff Roberson #endif 2255e914b96SJeff Roberson 226cd814b26SRobert Watson static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS); 227df8bae1dSRodney W. Grimes 2280ce3f16dSRobert Watson /* 2290ce3f16dSRobert Watson * time_uptime of the last malloc(9) failure (induced or real). 2300ce3f16dSRobert Watson */ 2311fb14a47SPoul-Henning Kamp static time_t t_malloc_fail; 2321fb14a47SPoul-Henning Kamp 233d7854da1SMatthew D Fleming #if defined(MALLOC_MAKE_FAILURES) || (MALLOC_DEBUG_MAXZONES > 1) 2346472ac3dSEd Schouten static SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD, 0, 235d7854da1SMatthew D Fleming "Kernel malloc debugging options"); 236d7854da1SMatthew D Fleming #endif 237d7854da1SMatthew D Fleming 238eae870cdSRobert Watson /* 2390ce3f16dSRobert Watson * malloc(9) fault injection -- cause malloc failures every (n) mallocs when 2400ce3f16dSRobert Watson * the caller specifies M_NOWAIT. If set to 0, no failures are caused. 241eae870cdSRobert Watson */ 2420ce3f16dSRobert Watson #ifdef MALLOC_MAKE_FAILURES 243eae870cdSRobert Watson static int malloc_failure_rate; 244eae870cdSRobert Watson static int malloc_nowait_count; 245eae870cdSRobert Watson static int malloc_failure_count; 246eae870cdSRobert Watson SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RW, 247eae870cdSRobert Watson &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail"); 248f2538508SRobert Watson TUNABLE_INT("debug.malloc.failure_rate", &malloc_failure_rate); 249eae870cdSRobert Watson SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD, 250eae870cdSRobert Watson &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures"); 251eae870cdSRobert Watson #endif 252eae870cdSRobert Watson 2537814c80aSAndriy Gapon static int 2547814c80aSAndriy Gapon sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS) 2557814c80aSAndriy Gapon { 2567814c80aSAndriy Gapon u_long size; 2577814c80aSAndriy Gapon 2587814c80aSAndriy Gapon size = kmem_map->size; 2597814c80aSAndriy Gapon return (sysctl_handle_long(oidp, &size, 0, req)); 2607814c80aSAndriy Gapon } 2617814c80aSAndriy Gapon 26295bb9d38SAndriy Gapon static int 26395bb9d38SAndriy Gapon sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS) 26495bb9d38SAndriy Gapon { 26595bb9d38SAndriy Gapon u_long size; 26695bb9d38SAndriy Gapon 26795bb9d38SAndriy Gapon vm_map_lock_read(kmem_map); 2681549ed03SAlan Cox size = kmem_map->root != NULL ? kmem_map->root->max_free : 2691549ed03SAlan Cox kmem_map->max_offset - kmem_map->min_offset; 27095bb9d38SAndriy Gapon vm_map_unlock_read(kmem_map); 27195bb9d38SAndriy Gapon return (sysctl_handle_long(oidp, &size, 0, req)); 27295bb9d38SAndriy Gapon } 27395bb9d38SAndriy Gapon 274d7854da1SMatthew D Fleming /* 275d7854da1SMatthew D Fleming * malloc(9) uma zone separation -- sub-page buffer overruns in one 276d7854da1SMatthew D Fleming * malloc type will affect only a subset of other malloc types. 277d7854da1SMatthew D Fleming */ 278d7854da1SMatthew D Fleming #if MALLOC_DEBUG_MAXZONES > 1 279d7854da1SMatthew D Fleming static void 280d7854da1SMatthew D Fleming tunable_set_numzones(void) 281d7854da1SMatthew D Fleming { 282d7854da1SMatthew D Fleming 283d7854da1SMatthew D Fleming TUNABLE_INT_FETCH("debug.malloc.numzones", 284d7854da1SMatthew D Fleming &numzones); 285d7854da1SMatthew D Fleming 286d7854da1SMatthew D Fleming /* Sanity check the number of malloc uma zones. */ 287d7854da1SMatthew D Fleming if (numzones <= 0) 288d7854da1SMatthew D Fleming numzones = 1; 289d7854da1SMatthew D Fleming if (numzones > MALLOC_DEBUG_MAXZONES) 290d7854da1SMatthew D Fleming numzones = MALLOC_DEBUG_MAXZONES; 291d7854da1SMatthew D Fleming } 292d7854da1SMatthew D Fleming SYSINIT(numzones, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_set_numzones, NULL); 293d7854da1SMatthew D Fleming SYSCTL_INT(_debug_malloc, OID_AUTO, numzones, CTLFLAG_RDTUN, 294d7854da1SMatthew D Fleming &numzones, 0, "Number of malloc uma subzones"); 295d7854da1SMatthew D Fleming 296d7854da1SMatthew D Fleming /* 297d7854da1SMatthew D Fleming * Any number that changes regularly is an okay choice for the 298d7854da1SMatthew D Fleming * offset. Build numbers are pretty good of you have them. 299d7854da1SMatthew D Fleming */ 300d7854da1SMatthew D Fleming static u_int zone_offset = __FreeBSD_version; 301d7854da1SMatthew D Fleming TUNABLE_INT("debug.malloc.zone_offset", &zone_offset); 302d7854da1SMatthew D Fleming SYSCTL_UINT(_debug_malloc, OID_AUTO, zone_offset, CTLFLAG_RDTUN, 303d7854da1SMatthew D Fleming &zone_offset, 0, "Separate malloc types by examining the " 304d7854da1SMatthew D Fleming "Nth character in the malloc type short description."); 305d7854da1SMatthew D Fleming 306d7854da1SMatthew D Fleming static u_int 307d7854da1SMatthew D Fleming mtp_get_subzone(const char *desc) 308d7854da1SMatthew D Fleming { 309d7854da1SMatthew D Fleming size_t len; 310d7854da1SMatthew D Fleming u_int val; 311d7854da1SMatthew D Fleming 312d7854da1SMatthew D Fleming if (desc == NULL || (len = strlen(desc)) == 0) 313d7854da1SMatthew D Fleming return (0); 314d7854da1SMatthew D Fleming val = desc[zone_offset % len]; 315d7854da1SMatthew D Fleming return (val % numzones); 316d7854da1SMatthew D Fleming } 317d7854da1SMatthew D Fleming #elif MALLOC_DEBUG_MAXZONES == 0 318d7854da1SMatthew D Fleming #error "MALLOC_DEBUG_MAXZONES must be positive." 319d7854da1SMatthew D Fleming #else 320d7854da1SMatthew D Fleming static inline u_int 321d7854da1SMatthew D Fleming mtp_get_subzone(const char *desc) 322d7854da1SMatthew D Fleming { 323d7854da1SMatthew D Fleming 324d7854da1SMatthew D Fleming return (0); 325d7854da1SMatthew D Fleming } 326d7854da1SMatthew D Fleming #endif /* MALLOC_DEBUG_MAXZONES > 1 */ 327d7854da1SMatthew D Fleming 3281fb14a47SPoul-Henning Kamp int 3291fb14a47SPoul-Henning Kamp malloc_last_fail(void) 3301fb14a47SPoul-Henning Kamp { 3311fb14a47SPoul-Henning Kamp 3321fb14a47SPoul-Henning Kamp return (time_uptime - t_malloc_fail); 3331fb14a47SPoul-Henning Kamp } 3341fb14a47SPoul-Henning Kamp 335df8bae1dSRodney W. Grimes /* 3360ce3f16dSRobert Watson * An allocation has succeeded -- update malloc type statistics for the 3370ce3f16dSRobert Watson * amount of bucket size. Occurs within a critical section so that the 3380ce3f16dSRobert Watson * thread isn't preempted and doesn't migrate while updating per-PCU 3390ce3f16dSRobert Watson * statistics. 3404362fadaSBrian Feldman */ 3414362fadaSBrian Feldman static void 34263a7e0a3SRobert Watson malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size, 3434362fadaSBrian Feldman int zindx) 3444362fadaSBrian Feldman { 34563a7e0a3SRobert Watson struct malloc_type_internal *mtip; 34663a7e0a3SRobert Watson struct malloc_type_stats *mtsp; 34763a7e0a3SRobert Watson 34863a7e0a3SRobert Watson critical_enter(); 34963a7e0a3SRobert Watson mtip = mtp->ks_handle; 35063a7e0a3SRobert Watson mtsp = &mtip->mti_stats[curcpu]; 35173864adbSPawel Jakub Dawidek if (size > 0) { 35263a7e0a3SRobert Watson mtsp->mts_memalloced += size; 35363a7e0a3SRobert Watson mtsp->mts_numallocs++; 35473864adbSPawel Jakub Dawidek } 3554362fadaSBrian Feldman if (zindx != -1) 35663a7e0a3SRobert Watson mtsp->mts_size |= 1 << zindx; 35791dd776cSJohn Birrell 35891dd776cSJohn Birrell #ifdef KDTRACE_HOOKS 35991dd776cSJohn Birrell if (dtrace_malloc_probe != NULL) { 36091dd776cSJohn Birrell uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_MALLOC]; 36191dd776cSJohn Birrell if (probe_id != 0) 36291dd776cSJohn Birrell (dtrace_malloc_probe)(probe_id, 36391dd776cSJohn Birrell (uintptr_t) mtp, (uintptr_t) mtip, 36491dd776cSJohn Birrell (uintptr_t) mtsp, size, zindx); 36591dd776cSJohn Birrell } 36691dd776cSJohn Birrell #endif 36791dd776cSJohn Birrell 36863a7e0a3SRobert Watson critical_exit(); 3694362fadaSBrian Feldman } 3704362fadaSBrian Feldman 3714362fadaSBrian Feldman void 37263a7e0a3SRobert Watson malloc_type_allocated(struct malloc_type *mtp, unsigned long size) 3734362fadaSBrian Feldman { 37463a7e0a3SRobert Watson 37573864adbSPawel Jakub Dawidek if (size > 0) 37663a7e0a3SRobert Watson malloc_type_zone_allocated(mtp, size, -1); 3774362fadaSBrian Feldman } 3784362fadaSBrian Feldman 3794362fadaSBrian Feldman /* 3803805385eSRobert Watson * A free operation has occurred -- update malloc type statistics for the 3810ce3f16dSRobert Watson * amount of the bucket size. Occurs within a critical section so that the 3820ce3f16dSRobert Watson * thread isn't preempted and doesn't migrate while updating per-CPU 3830ce3f16dSRobert Watson * statistics. 3844362fadaSBrian Feldman */ 3854362fadaSBrian Feldman void 38663a7e0a3SRobert Watson malloc_type_freed(struct malloc_type *mtp, unsigned long size) 3874362fadaSBrian Feldman { 38863a7e0a3SRobert Watson struct malloc_type_internal *mtip; 38963a7e0a3SRobert Watson struct malloc_type_stats *mtsp; 39063a7e0a3SRobert Watson 39163a7e0a3SRobert Watson critical_enter(); 39263a7e0a3SRobert Watson mtip = mtp->ks_handle; 39363a7e0a3SRobert Watson mtsp = &mtip->mti_stats[curcpu]; 39463a7e0a3SRobert Watson mtsp->mts_memfreed += size; 39563a7e0a3SRobert Watson mtsp->mts_numfrees++; 39691dd776cSJohn Birrell 39791dd776cSJohn Birrell #ifdef KDTRACE_HOOKS 39891dd776cSJohn Birrell if (dtrace_malloc_probe != NULL) { 39991dd776cSJohn Birrell uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_FREE]; 40091dd776cSJohn Birrell if (probe_id != 0) 40191dd776cSJohn Birrell (dtrace_malloc_probe)(probe_id, 40291dd776cSJohn Birrell (uintptr_t) mtp, (uintptr_t) mtip, 40391dd776cSJohn Birrell (uintptr_t) mtsp, size, 0); 40491dd776cSJohn Birrell } 40591dd776cSJohn Birrell #endif 40691dd776cSJohn Birrell 40763a7e0a3SRobert Watson critical_exit(); 4084362fadaSBrian Feldman } 4094362fadaSBrian Feldman 4104362fadaSBrian Feldman /* 411f346986bSAlan Cox * contigmalloc: 412f346986bSAlan Cox * 413f346986bSAlan Cox * Allocate a block of physically contiguous memory. 414f346986bSAlan Cox * 415f346986bSAlan Cox * If M_NOWAIT is set, this routine will not block and return NULL if 416f346986bSAlan Cox * the allocation fails. 417f346986bSAlan Cox */ 418f346986bSAlan Cox void * 419f346986bSAlan Cox contigmalloc(unsigned long size, struct malloc_type *type, int flags, 420f346986bSAlan Cox vm_paddr_t low, vm_paddr_t high, unsigned long alignment, 421f346986bSAlan Cox unsigned long boundary) 422f346986bSAlan Cox { 423f346986bSAlan Cox void *ret; 424f346986bSAlan Cox 425f346986bSAlan Cox ret = (void *)kmem_alloc_contig(kernel_map, size, flags, low, high, 426f346986bSAlan Cox alignment, boundary, VM_MEMATTR_DEFAULT); 427f346986bSAlan Cox if (ret != NULL) 428f346986bSAlan Cox malloc_type_allocated(type, round_page(size)); 429f346986bSAlan Cox return (ret); 430f346986bSAlan Cox } 431f346986bSAlan Cox 432f346986bSAlan Cox /* 433f346986bSAlan Cox * contigfree: 434f346986bSAlan Cox * 435f346986bSAlan Cox * Free a block of memory allocated by contigmalloc. 436f346986bSAlan Cox * 437f346986bSAlan Cox * This routine may not block. 438f346986bSAlan Cox */ 439f346986bSAlan Cox void 440f346986bSAlan Cox contigfree(void *addr, unsigned long size, struct malloc_type *type) 441f346986bSAlan Cox { 442f346986bSAlan Cox 443f346986bSAlan Cox kmem_free(kernel_map, (vm_offset_t)addr, size); 444f346986bSAlan Cox malloc_type_freed(type, round_page(size)); 445f346986bSAlan Cox } 446f346986bSAlan Cox 447f346986bSAlan Cox /* 4481c7c3c6aSMatthew Dillon * malloc: 4491c7c3c6aSMatthew Dillon * 4501c7c3c6aSMatthew Dillon * Allocate a block of memory. 4511c7c3c6aSMatthew Dillon * 4521c7c3c6aSMatthew Dillon * If M_NOWAIT is set, this routine will not block and return NULL if 4531c7c3c6aSMatthew Dillon * the allocation fails. 454df8bae1dSRodney W. Grimes */ 455df8bae1dSRodney W. Grimes void * 45663a7e0a3SRobert Watson malloc(unsigned long size, struct malloc_type *mtp, int flags) 457df8bae1dSRodney W. Grimes { 4586f267175SJeff Roberson int indx; 459d7854da1SMatthew D Fleming struct malloc_type_internal *mtip; 4608355f576SJeff Roberson caddr_t va; 4618355f576SJeff Roberson uma_zone_t zone; 462847a2a17SPawel Jakub Dawidek #if defined(DIAGNOSTIC) || defined(DEBUG_REDZONE) 4634db4f5c8SPoul-Henning Kamp unsigned long osize = size; 4644db4f5c8SPoul-Henning Kamp #endif 465df8bae1dSRodney W. Grimes 466194a0abfSPoul-Henning Kamp #ifdef INVARIANTS 467bb1c7df8SRobert Watson KASSERT(mtp->ks_magic == M_MAGIC, ("malloc: bad malloc type magic")); 468d3c11994SPoul-Henning Kamp /* 46923198357SRuslan Ermilov * Check that exactly one of M_WAITOK or M_NOWAIT is specified. 470d3c11994SPoul-Henning Kamp */ 47123198357SRuslan Ermilov indx = flags & (M_WAITOK | M_NOWAIT); 472d3c11994SPoul-Henning Kamp if (indx != M_NOWAIT && indx != M_WAITOK) { 473d3c11994SPoul-Henning Kamp static struct timeval lasterr; 474d3c11994SPoul-Henning Kamp static int curerr, once; 475d3c11994SPoul-Henning Kamp if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) { 476d3c11994SPoul-Henning Kamp printf("Bad malloc flags: %x\n", indx); 4772d50560aSMarcel Moolenaar kdb_backtrace(); 478d3c11994SPoul-Henning Kamp flags |= M_WAITOK; 479d3c11994SPoul-Henning Kamp once++; 480d3c11994SPoul-Henning Kamp } 481d3c11994SPoul-Henning Kamp } 482194a0abfSPoul-Henning Kamp #endif 483eae870cdSRobert Watson #ifdef MALLOC_MAKE_FAILURES 484eae870cdSRobert Watson if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) { 485eae870cdSRobert Watson atomic_add_int(&malloc_nowait_count, 1); 486eae870cdSRobert Watson if ((malloc_nowait_count % malloc_failure_rate) == 0) { 487eae870cdSRobert Watson atomic_add_int(&malloc_failure_count, 1); 4883f6ee876SPoul-Henning Kamp t_malloc_fail = time_uptime; 489eae870cdSRobert Watson return (NULL); 490eae870cdSRobert Watson } 491eae870cdSRobert Watson } 492eae870cdSRobert Watson #endif 493d3c11994SPoul-Henning Kamp if (flags & M_WAITOK) 494b40ce416SJulian Elischer KASSERT(curthread->td_intr_nesting_level == 0, 495a163d034SWarner Losh ("malloc(M_WAITOK) in interrupt context")); 496e4eb384bSBosko Milekic 497e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD 4988d689e04SGleb Smirnoff if (memguard_cmp_mtp(mtp, size)) { 499e3813573SMatthew D Fleming va = memguard_alloc(size, flags); 500e3813573SMatthew D Fleming if (va != NULL) 501e3813573SMatthew D Fleming return (va); 502e3813573SMatthew D Fleming /* This is unfortunate but should not be fatal. */ 503e3813573SMatthew D Fleming } 504e4eb384bSBosko Milekic #endif 505e4eb384bSBosko Milekic 506847a2a17SPawel Jakub Dawidek #ifdef DEBUG_REDZONE 507847a2a17SPawel Jakub Dawidek size = redzone_size_ntor(size); 508847a2a17SPawel Jakub Dawidek #endif 509847a2a17SPawel Jakub Dawidek 5108355f576SJeff Roberson if (size <= KMEM_ZMAX) { 511d7854da1SMatthew D Fleming mtip = mtp->ks_handle; 5126f267175SJeff Roberson if (size & KMEM_ZMASK) 5136f267175SJeff Roberson size = (size & ~KMEM_ZMASK) + KMEM_ZBASE; 5146f267175SJeff Roberson indx = kmemsize[size >> KMEM_ZSHIFT]; 515d7854da1SMatthew D Fleming KASSERT(mtip->mti_zone < numzones, 516d7854da1SMatthew D Fleming ("mti_zone %u out of range %d", 517d7854da1SMatthew D Fleming mtip->mti_zone, numzones)); 518d7854da1SMatthew D Fleming zone = kmemzones[indx].kz_zone[mtip->mti_zone]; 5196f267175SJeff Roberson #ifdef MALLOC_PROFILE 5206f267175SJeff Roberson krequests[size >> KMEM_ZSHIFT]++; 5216f267175SJeff Roberson #endif 5228355f576SJeff Roberson va = uma_zalloc(zone, flags); 5234362fadaSBrian Feldman if (va != NULL) 524e20a199fSJeff Roberson size = zone->uz_size; 52563a7e0a3SRobert Watson malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx); 5268355f576SJeff Roberson } else { 5276f267175SJeff Roberson size = roundup(size, PAGE_SIZE); 5288355f576SJeff Roberson zone = NULL; 5298355f576SJeff Roberson va = uma_large_malloc(size, flags); 53063a7e0a3SRobert Watson malloc_type_allocated(mtp, va == NULL ? 0 : size); 531df8bae1dSRodney W. Grimes } 5321282e9acSPoul-Henning Kamp if (flags & M_WAITOK) 533a163d034SWarner Losh KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL")); 5341282e9acSPoul-Henning Kamp else if (va == NULL) 5351fb14a47SPoul-Henning Kamp t_malloc_fail = time_uptime; 5364db4f5c8SPoul-Henning Kamp #ifdef DIAGNOSTIC 5371282e9acSPoul-Henning Kamp if (va != NULL && !(flags & M_ZERO)) { 5384db4f5c8SPoul-Henning Kamp memset(va, 0x70, osize); 5394db4f5c8SPoul-Henning Kamp } 5404db4f5c8SPoul-Henning Kamp #endif 541847a2a17SPawel Jakub Dawidek #ifdef DEBUG_REDZONE 542847a2a17SPawel Jakub Dawidek if (va != NULL) 543847a2a17SPawel Jakub Dawidek va = redzone_setup(va, osize); 544847a2a17SPawel Jakub Dawidek #endif 545df8bae1dSRodney W. Grimes return ((void *) va); 546df8bae1dSRodney W. Grimes } 547df8bae1dSRodney W. Grimes 548df8bae1dSRodney W. Grimes /* 5491c7c3c6aSMatthew Dillon * free: 5501c7c3c6aSMatthew Dillon * 551df8bae1dSRodney W. Grimes * Free a block of memory allocated by malloc. 5521c7c3c6aSMatthew Dillon * 5531c7c3c6aSMatthew Dillon * This routine may not block. 554df8bae1dSRodney W. Grimes */ 555df8bae1dSRodney W. Grimes void 55663a7e0a3SRobert Watson free(void *addr, struct malloc_type *mtp) 557df8bae1dSRodney W. Grimes { 55899571dc3SJeff Roberson uma_slab_t slab; 55999571dc3SJeff Roberson u_long size; 560254c6cb3SPoul-Henning Kamp 561bb1c7df8SRobert Watson KASSERT(mtp->ks_magic == M_MAGIC, ("free: bad malloc type magic")); 562bb1c7df8SRobert Watson 56344a8ff31SArchie Cobbs /* free(NULL, ...) does nothing */ 56444a8ff31SArchie Cobbs if (addr == NULL) 56544a8ff31SArchie Cobbs return; 56644a8ff31SArchie Cobbs 567e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD 568e3813573SMatthew D Fleming if (is_memguard_addr(addr)) { 569e4eb384bSBosko Milekic memguard_free(addr); 570e4eb384bSBosko Milekic return; 571e4eb384bSBosko Milekic } 572e4eb384bSBosko Milekic #endif 573e4eb384bSBosko Milekic 574847a2a17SPawel Jakub Dawidek #ifdef DEBUG_REDZONE 575847a2a17SPawel Jakub Dawidek redzone_check(addr); 576847a2a17SPawel Jakub Dawidek addr = redzone_addr_ntor(addr); 577847a2a17SPawel Jakub Dawidek #endif 578847a2a17SPawel Jakub Dawidek 57999571dc3SJeff Roberson slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK)); 5808355f576SJeff Roberson 5818355f576SJeff Roberson if (slab == NULL) 5826f267175SJeff Roberson panic("free: address %p(%p) has not been allocated.\n", 58399571dc3SJeff Roberson addr, (void *)((u_long)addr & (~UMA_SLAB_MASK))); 58499571dc3SJeff Roberson 5858355f576SJeff Roberson 5868355f576SJeff Roberson if (!(slab->us_flags & UMA_SLAB_MALLOC)) { 5878f70816cSJeff Roberson #ifdef INVARIANTS 58863a7e0a3SRobert Watson struct malloc_type **mtpp = addr; 5898f70816cSJeff Roberson #endif 590099a0e58SBosko Milekic size = slab->us_keg->uk_size; 5918f70816cSJeff Roberson #ifdef INVARIANTS 5928f70816cSJeff Roberson /* 5938f70816cSJeff Roberson * Cache a pointer to the malloc_type that most recently freed 5948f70816cSJeff Roberson * this memory here. This way we know who is most likely to 5958f70816cSJeff Roberson * have stepped on it later. 5968f70816cSJeff Roberson * 5978f70816cSJeff Roberson * This code assumes that size is a multiple of 8 bytes for 5988f70816cSJeff Roberson * 64 bit machines 5998f70816cSJeff Roberson */ 60063a7e0a3SRobert Watson mtpp = (struct malloc_type **) 60163a7e0a3SRobert Watson ((unsigned long)mtpp & ~UMA_ALIGN_PTR); 60263a7e0a3SRobert Watson mtpp += (size - sizeof(struct malloc_type *)) / 6038f70816cSJeff Roberson sizeof(struct malloc_type *); 60463a7e0a3SRobert Watson *mtpp = mtp; 6058f70816cSJeff Roberson #endif 606099a0e58SBosko Milekic uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab); 60714bf02f8SJohn Dyson } else { 6088355f576SJeff Roberson size = slab->us_size; 6098355f576SJeff Roberson uma_large_free(slab); 61014bf02f8SJohn Dyson } 61163a7e0a3SRobert Watson malloc_type_freed(mtp, size); 612df8bae1dSRodney W. Grimes } 613df8bae1dSRodney W. Grimes 614df8bae1dSRodney W. Grimes /* 61544a8ff31SArchie Cobbs * realloc: change the size of a memory block 61644a8ff31SArchie Cobbs */ 61744a8ff31SArchie Cobbs void * 61863a7e0a3SRobert Watson realloc(void *addr, unsigned long size, struct malloc_type *mtp, int flags) 61944a8ff31SArchie Cobbs { 6208355f576SJeff Roberson uma_slab_t slab; 62144a8ff31SArchie Cobbs unsigned long alloc; 62244a8ff31SArchie Cobbs void *newaddr; 62344a8ff31SArchie Cobbs 624bb1c7df8SRobert Watson KASSERT(mtp->ks_magic == M_MAGIC, 625bb1c7df8SRobert Watson ("realloc: bad malloc type magic")); 626bb1c7df8SRobert Watson 62744a8ff31SArchie Cobbs /* realloc(NULL, ...) is equivalent to malloc(...) */ 62844a8ff31SArchie Cobbs if (addr == NULL) 62963a7e0a3SRobert Watson return (malloc(size, mtp, flags)); 63063a7e0a3SRobert Watson 63163a7e0a3SRobert Watson /* 63263a7e0a3SRobert Watson * XXX: Should report free of old memory and alloc of new memory to 63363a7e0a3SRobert Watson * per-CPU stats. 63463a7e0a3SRobert Watson */ 63544a8ff31SArchie Cobbs 636e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD 6376d3ed393SMatthew D Fleming if (is_memguard_addr(addr)) 6386d3ed393SMatthew D Fleming return (memguard_realloc(addr, size, mtp, flags)); 639e4eb384bSBosko Milekic #endif 640e4eb384bSBosko Milekic 641847a2a17SPawel Jakub Dawidek #ifdef DEBUG_REDZONE 642847a2a17SPawel Jakub Dawidek slab = NULL; 643847a2a17SPawel Jakub Dawidek alloc = redzone_get_size(addr); 644847a2a17SPawel Jakub Dawidek #else 64599571dc3SJeff Roberson slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK)); 6468355f576SJeff Roberson 64744a8ff31SArchie Cobbs /* Sanity check */ 6488355f576SJeff Roberson KASSERT(slab != NULL, 64944a8ff31SArchie Cobbs ("realloc: address %p out of range", (void *)addr)); 65044a8ff31SArchie Cobbs 65144a8ff31SArchie Cobbs /* Get the size of the original block */ 652619f2841SPawel Jakub Dawidek if (!(slab->us_flags & UMA_SLAB_MALLOC)) 653099a0e58SBosko Milekic alloc = slab->us_keg->uk_size; 6548355f576SJeff Roberson else 6558355f576SJeff Roberson alloc = slab->us_size; 65644a8ff31SArchie Cobbs 65744a8ff31SArchie Cobbs /* Reuse the original block if appropriate */ 65844a8ff31SArchie Cobbs if (size <= alloc 65944a8ff31SArchie Cobbs && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE)) 66044a8ff31SArchie Cobbs return (addr); 661847a2a17SPawel Jakub Dawidek #endif /* !DEBUG_REDZONE */ 66244a8ff31SArchie Cobbs 66344a8ff31SArchie Cobbs /* Allocate a new, bigger (or smaller) block */ 66463a7e0a3SRobert Watson if ((newaddr = malloc(size, mtp, flags)) == NULL) 66544a8ff31SArchie Cobbs return (NULL); 66644a8ff31SArchie Cobbs 66744a8ff31SArchie Cobbs /* Copy over original contents */ 66844a8ff31SArchie Cobbs bcopy(addr, newaddr, min(size, alloc)); 66963a7e0a3SRobert Watson free(addr, mtp); 67044a8ff31SArchie Cobbs return (newaddr); 67144a8ff31SArchie Cobbs } 67244a8ff31SArchie Cobbs 67344a8ff31SArchie Cobbs /* 67444a8ff31SArchie Cobbs * reallocf: same as realloc() but free memory on failure. 67544a8ff31SArchie Cobbs */ 67644a8ff31SArchie Cobbs void * 67763a7e0a3SRobert Watson reallocf(void *addr, unsigned long size, struct malloc_type *mtp, int flags) 67844a8ff31SArchie Cobbs { 67944a8ff31SArchie Cobbs void *mem; 68044a8ff31SArchie Cobbs 68163a7e0a3SRobert Watson if ((mem = realloc(addr, size, mtp, flags)) == NULL) 68263a7e0a3SRobert Watson free(addr, mtp); 68344a8ff31SArchie Cobbs return (mem); 68444a8ff31SArchie Cobbs } 68544a8ff31SArchie Cobbs 68644a8ff31SArchie Cobbs /* 687df8bae1dSRodney W. Grimes * Initialize the kernel memory allocator 688df8bae1dSRodney W. Grimes */ 6892b14f991SJulian Elischer /* ARGSUSED*/ 6902b14f991SJulian Elischer static void 69187efd4d5SRobert Watson kmeminit(void *dummy) 692df8bae1dSRodney W. Grimes { 69360ae52f7SEd Schouten uint8_t indx; 694e3813573SMatthew D Fleming u_long mem_size, tmp; 6958355f576SJeff Roberson int i; 6968a58a9f6SJohn Dyson 6976008862bSJohn Baldwin mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF); 69869ef67f9SJason Evans 6998a58a9f6SJohn Dyson /* 7008a58a9f6SJohn Dyson * Try to auto-tune the kernel memory size, so that it is 7018a58a9f6SJohn Dyson * more applicable for a wider range of machine sizes. 7028a58a9f6SJohn Dyson * On an X86, a VM_KMEM_SIZE_SCALE value of 4 is good, while 7038a58a9f6SJohn Dyson * a VM_KMEM_SIZE of 12MB is a fair compromise. The 7048a58a9f6SJohn Dyson * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space 7058a58a9f6SJohn Dyson * available, and on an X86 with a total KVA space of 256MB, 7068a58a9f6SJohn Dyson * try to keep VM_KMEM_SIZE_MAX at 80MB or below. 7078a58a9f6SJohn Dyson * 7088a58a9f6SJohn Dyson * Note that the kmem_map is also used by the zone allocator, 7098a58a9f6SJohn Dyson * so make sure that there is enough space. 7108a58a9f6SJohn Dyson */ 711099a0e58SBosko Milekic vm_kmem_size = VM_KMEM_SIZE + nmbclusters * PAGE_SIZE; 7122feb50bfSAttilio Rao mem_size = cnt.v_page_count; 7138a58a9f6SJohn Dyson 7148a58a9f6SJohn Dyson #if defined(VM_KMEM_SIZE_SCALE) 715479439b4SDag-Erling Smørgrav vm_kmem_size_scale = VM_KMEM_SIZE_SCALE; 7168a58a9f6SJohn Dyson #endif 717479439b4SDag-Erling Smørgrav TUNABLE_INT_FETCH("vm.kmem_size_scale", &vm_kmem_size_scale); 718479439b4SDag-Erling Smørgrav if (vm_kmem_size_scale > 0 && 719479439b4SDag-Erling Smørgrav (mem_size / vm_kmem_size_scale) > (vm_kmem_size / PAGE_SIZE)) 720479439b4SDag-Erling Smørgrav vm_kmem_size = (mem_size / vm_kmem_size_scale) * PAGE_SIZE; 7218a58a9f6SJohn Dyson 7220e5179e4SStephane E. Potvin #if defined(VM_KMEM_SIZE_MIN) 7230e5179e4SStephane E. Potvin vm_kmem_size_min = VM_KMEM_SIZE_MIN; 7240e5179e4SStephane E. Potvin #endif 725b89eaf4eSAlan Cox TUNABLE_ULONG_FETCH("vm.kmem_size_min", &vm_kmem_size_min); 7260e5179e4SStephane E. Potvin if (vm_kmem_size_min > 0 && vm_kmem_size < vm_kmem_size_min) { 7270e5179e4SStephane E. Potvin vm_kmem_size = vm_kmem_size_min; 7280e5179e4SStephane E. Potvin } 7290e5179e4SStephane E. Potvin 7308a58a9f6SJohn Dyson #if defined(VM_KMEM_SIZE_MAX) 731479439b4SDag-Erling Smørgrav vm_kmem_size_max = VM_KMEM_SIZE_MAX; 7328a58a9f6SJohn Dyson #endif 733b89eaf4eSAlan Cox TUNABLE_ULONG_FETCH("vm.kmem_size_max", &vm_kmem_size_max); 734479439b4SDag-Erling Smørgrav if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max) 735479439b4SDag-Erling Smørgrav vm_kmem_size = vm_kmem_size_max; 7368a58a9f6SJohn Dyson 7378de6e8e1SMike Smith /* Allow final override from the kernel environment */ 738b89eaf4eSAlan Cox TUNABLE_ULONG_FETCH("vm.kmem_size", &vm_kmem_size); 7398de6e8e1SMike Smith 74027b8623fSDavid Greenman /* 74127b8623fSDavid Greenman * Limit kmem virtual size to twice the physical memory. 74227b8623fSDavid Greenman * This allows for kmem map sparseness, but limits the size 74327b8623fSDavid Greenman * to something sane. Be careful to not overflow the 32bit 744*c749c003SAlan Cox * ints while doing the check or the adjustment. 74527b8623fSDavid Greenman */ 746*c749c003SAlan Cox if (vm_kmem_size / 2 / PAGE_SIZE > mem_size) 747*c749c003SAlan Cox vm_kmem_size = 2 * mem_size * PAGE_SIZE; 7488a58a9f6SJohn Dyson 749e3813573SMatthew D Fleming #ifdef DEBUG_MEMGUARD 750e3813573SMatthew D Fleming tmp = memguard_fudge(vm_kmem_size, vm_kmem_size_max); 751e3813573SMatthew D Fleming #else 752e3813573SMatthew D Fleming tmp = vm_kmem_size; 753e3813573SMatthew D Fleming #endif 754dc2e1e3fSRobert Watson kmem_map = kmem_suballoc(kernel_map, &kmembase, &kmemlimit, 755e3813573SMatthew D Fleming tmp, TRUE); 7563075778bSJohn Dyson kmem_map->system_map = 1; 7578355f576SJeff Roberson 758e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD 759e4eb384bSBosko Milekic /* 760e4eb384bSBosko Milekic * Initialize MemGuard if support compiled in. MemGuard is a 761e4eb384bSBosko Milekic * replacement allocator used for detecting tamper-after-free 762e4eb384bSBosko Milekic * scenarios as they occur. It is only used for debugging. 763e4eb384bSBosko Milekic */ 764e3813573SMatthew D Fleming memguard_init(kmem_map); 765e4eb384bSBosko Milekic #endif 766e4eb384bSBosko Milekic 76799571dc3SJeff Roberson uma_startup2(); 7688355f576SJeff Roberson 76963a7e0a3SRobert Watson mt_zone = uma_zcreate("mt_zone", sizeof(struct malloc_type_internal), 77063a7e0a3SRobert Watson #ifdef INVARIANTS 77163a7e0a3SRobert Watson mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini, 77263a7e0a3SRobert Watson #else 77363a7e0a3SRobert Watson NULL, NULL, NULL, NULL, 77463a7e0a3SRobert Watson #endif 77563a7e0a3SRobert Watson UMA_ALIGN_PTR, UMA_ZONE_MALLOC); 7766f267175SJeff Roberson for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) { 7776f267175SJeff Roberson int size = kmemzones[indx].kz_size; 7786f267175SJeff Roberson char *name = kmemzones[indx].kz_name; 779d7854da1SMatthew D Fleming int subzone; 7808355f576SJeff Roberson 781d7854da1SMatthew D Fleming for (subzone = 0; subzone < numzones; subzone++) { 782d7854da1SMatthew D Fleming kmemzones[indx].kz_zone[subzone] = 783d7854da1SMatthew D Fleming uma_zcreate(name, size, 7848efc4effSJeff Roberson #ifdef INVARIANTS 7858f70816cSJeff Roberson mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini, 7868efc4effSJeff Roberson #else 7878efc4effSJeff Roberson NULL, NULL, NULL, NULL, 7888efc4effSJeff Roberson #endif 7898efc4effSJeff Roberson UMA_ALIGN_PTR, UMA_ZONE_MALLOC); 790d7854da1SMatthew D Fleming } 7918355f576SJeff Roberson for (;i <= size; i+= KMEM_ZBASE) 7926f267175SJeff Roberson kmemsize[i >> KMEM_ZSHIFT] = indx; 7938355f576SJeff Roberson 794df8bae1dSRodney W. Grimes } 795254c6cb3SPoul-Henning Kamp } 796254c6cb3SPoul-Henning Kamp 797db669378SPeter Wemm void 79887efd4d5SRobert Watson malloc_init(void *data) 799254c6cb3SPoul-Henning Kamp { 80063a7e0a3SRobert Watson struct malloc_type_internal *mtip; 80163a7e0a3SRobert Watson struct malloc_type *mtp; 80263a7e0a3SRobert Watson 8032feb50bfSAttilio Rao KASSERT(cnt.v_page_count != 0, ("malloc_register before vm_init")); 80463a7e0a3SRobert Watson 80563a7e0a3SRobert Watson mtp = data; 806f121baaaSBrian Somers if (mtp->ks_magic != M_MAGIC) 807f121baaaSBrian Somers panic("malloc_init: bad malloc type magic"); 808bb1c7df8SRobert Watson 80963a7e0a3SRobert Watson mtip = uma_zalloc(mt_zone, M_WAITOK | M_ZERO); 81063a7e0a3SRobert Watson mtp->ks_handle = mtip; 811d7854da1SMatthew D Fleming mtip->mti_zone = mtp_get_subzone(mtp->ks_shortdesc); 812254c6cb3SPoul-Henning Kamp 8136f267175SJeff Roberson mtx_lock(&malloc_mtx); 81463a7e0a3SRobert Watson mtp->ks_next = kmemstatistics; 81563a7e0a3SRobert Watson kmemstatistics = mtp; 816cd814b26SRobert Watson kmemcount++; 8176f267175SJeff Roberson mtx_unlock(&malloc_mtx); 818df8bae1dSRodney W. Grimes } 819db669378SPeter Wemm 820db669378SPeter Wemm void 82187efd4d5SRobert Watson malloc_uninit(void *data) 822db669378SPeter Wemm { 82363a7e0a3SRobert Watson struct malloc_type_internal *mtip; 8242a143d5bSPawel Jakub Dawidek struct malloc_type_stats *mtsp; 82563a7e0a3SRobert Watson struct malloc_type *mtp, *temp; 82645d48bdaSPaul Saab uma_slab_t slab; 8272a143d5bSPawel Jakub Dawidek long temp_allocs, temp_bytes; 8282a143d5bSPawel Jakub Dawidek int i; 829db669378SPeter Wemm 83063a7e0a3SRobert Watson mtp = data; 831bb1c7df8SRobert Watson KASSERT(mtp->ks_magic == M_MAGIC, 832bb1c7df8SRobert Watson ("malloc_uninit: bad malloc type magic")); 83363a7e0a3SRobert Watson KASSERT(mtp->ks_handle != NULL, ("malloc_deregister: cookie NULL")); 834bb1c7df8SRobert Watson 8356f267175SJeff Roberson mtx_lock(&malloc_mtx); 83663a7e0a3SRobert Watson mtip = mtp->ks_handle; 83763a7e0a3SRobert Watson mtp->ks_handle = NULL; 83863a7e0a3SRobert Watson if (mtp != kmemstatistics) { 83963a7e0a3SRobert Watson for (temp = kmemstatistics; temp != NULL; 84063a7e0a3SRobert Watson temp = temp->ks_next) { 841f121baaaSBrian Somers if (temp->ks_next == mtp) { 84263a7e0a3SRobert Watson temp->ks_next = mtp->ks_next; 843f121baaaSBrian Somers break; 844db669378SPeter Wemm } 845f121baaaSBrian Somers } 846f121baaaSBrian Somers KASSERT(temp, 847f121baaaSBrian Somers ("malloc_uninit: type '%s' not found", mtp->ks_shortdesc)); 84863a7e0a3SRobert Watson } else 84963a7e0a3SRobert Watson kmemstatistics = mtp->ks_next; 850cd814b26SRobert Watson kmemcount--; 8516f267175SJeff Roberson mtx_unlock(&malloc_mtx); 8522a143d5bSPawel Jakub Dawidek 8532a143d5bSPawel Jakub Dawidek /* 8542a143d5bSPawel Jakub Dawidek * Look for memory leaks. 8552a143d5bSPawel Jakub Dawidek */ 8562a143d5bSPawel Jakub Dawidek temp_allocs = temp_bytes = 0; 8572a143d5bSPawel Jakub Dawidek for (i = 0; i < MAXCPU; i++) { 8582a143d5bSPawel Jakub Dawidek mtsp = &mtip->mti_stats[i]; 8592a143d5bSPawel Jakub Dawidek temp_allocs += mtsp->mts_numallocs; 8602a143d5bSPawel Jakub Dawidek temp_allocs -= mtsp->mts_numfrees; 8612a143d5bSPawel Jakub Dawidek temp_bytes += mtsp->mts_memalloced; 8622a143d5bSPawel Jakub Dawidek temp_bytes -= mtsp->mts_memfreed; 8632a143d5bSPawel Jakub Dawidek } 8642a143d5bSPawel Jakub Dawidek if (temp_allocs > 0 || temp_bytes > 0) { 8652a143d5bSPawel Jakub Dawidek printf("Warning: memory type %s leaked memory on destroy " 8662a143d5bSPawel Jakub Dawidek "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc, 8672a143d5bSPawel Jakub Dawidek temp_allocs, temp_bytes); 8682a143d5bSPawel Jakub Dawidek } 8692a143d5bSPawel Jakub Dawidek 87045d48bdaSPaul Saab slab = vtoslab((vm_offset_t) mtip & (~UMA_SLAB_MASK)); 87145d48bdaSPaul Saab uma_zfree_arg(mt_zone, mtip, slab); 872db669378SPeter Wemm } 8736f267175SJeff Roberson 874d362c40dSPawel Jakub Dawidek struct malloc_type * 875d362c40dSPawel Jakub Dawidek malloc_desc2type(const char *desc) 876d362c40dSPawel Jakub Dawidek { 877d362c40dSPawel Jakub Dawidek struct malloc_type *mtp; 878d362c40dSPawel Jakub Dawidek 879d362c40dSPawel Jakub Dawidek mtx_assert(&malloc_mtx, MA_OWNED); 880d362c40dSPawel Jakub Dawidek for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 881d362c40dSPawel Jakub Dawidek if (strcmp(mtp->ks_shortdesc, desc) == 0) 882d362c40dSPawel Jakub Dawidek return (mtp); 883d362c40dSPawel Jakub Dawidek } 884d362c40dSPawel Jakub Dawidek return (NULL); 885d362c40dSPawel Jakub Dawidek } 886d362c40dSPawel Jakub Dawidek 8876f267175SJeff Roberson static int 888cd814b26SRobert Watson sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS) 889cd814b26SRobert Watson { 890cd814b26SRobert Watson struct malloc_type_stream_header mtsh; 891cd814b26SRobert Watson struct malloc_type_internal *mtip; 892cd814b26SRobert Watson struct malloc_type_header mth; 893cd814b26SRobert Watson struct malloc_type *mtp; 8944e657159SMatthew D Fleming int error, i; 895cd814b26SRobert Watson struct sbuf sbuf; 896cd814b26SRobert Watson 89700f0e671SMatthew D Fleming error = sysctl_wire_old_buffer(req, 0); 89800f0e671SMatthew D Fleming if (error != 0) 89900f0e671SMatthew D Fleming return (error); 9004e657159SMatthew D Fleming sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 901cd814b26SRobert Watson mtx_lock(&malloc_mtx); 902cd814b26SRobert Watson 903cd814b26SRobert Watson /* 904cd814b26SRobert Watson * Insert stream header. 905cd814b26SRobert Watson */ 906cd814b26SRobert Watson bzero(&mtsh, sizeof(mtsh)); 907cd814b26SRobert Watson mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION; 908cd814b26SRobert Watson mtsh.mtsh_maxcpus = MAXCPU; 909cd814b26SRobert Watson mtsh.mtsh_count = kmemcount; 9104e657159SMatthew D Fleming (void)sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh)); 911cd814b26SRobert Watson 912cd814b26SRobert Watson /* 913cd814b26SRobert Watson * Insert alternating sequence of type headers and type statistics. 914cd814b26SRobert Watson */ 915cd814b26SRobert Watson for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 916cd814b26SRobert Watson mtip = (struct malloc_type_internal *)mtp->ks_handle; 917cd814b26SRobert Watson 918cd814b26SRobert Watson /* 919cd814b26SRobert Watson * Insert type header. 920cd814b26SRobert Watson */ 921cd814b26SRobert Watson bzero(&mth, sizeof(mth)); 922cd814b26SRobert Watson strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME); 9234e657159SMatthew D Fleming (void)sbuf_bcat(&sbuf, &mth, sizeof(mth)); 924cd814b26SRobert Watson 925cd814b26SRobert Watson /* 926cd814b26SRobert Watson * Insert type statistics for each CPU. 927cd814b26SRobert Watson */ 928cd814b26SRobert Watson for (i = 0; i < MAXCPU; i++) { 9294e657159SMatthew D Fleming (void)sbuf_bcat(&sbuf, &mtip->mti_stats[i], 9304e657159SMatthew D Fleming sizeof(mtip->mti_stats[i])); 931cd814b26SRobert Watson } 932cd814b26SRobert Watson } 933cd814b26SRobert Watson mtx_unlock(&malloc_mtx); 9344e657159SMatthew D Fleming error = sbuf_finish(&sbuf); 935cd814b26SRobert Watson sbuf_delete(&sbuf); 936cd814b26SRobert Watson return (error); 937cd814b26SRobert Watson } 938cd814b26SRobert Watson 939cd814b26SRobert Watson SYSCTL_PROC(_kern, OID_AUTO, malloc_stats, CTLFLAG_RD|CTLTYPE_STRUCT, 940cd814b26SRobert Watson 0, 0, sysctl_kern_malloc_stats, "s,malloc_type_ustats", 941cd814b26SRobert Watson "Return malloc types"); 942cd814b26SRobert Watson 943cd814b26SRobert Watson SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0, 944cd814b26SRobert Watson "Count of kernel malloc types"); 945cd814b26SRobert Watson 94691dd776cSJohn Birrell void 94791dd776cSJohn Birrell malloc_type_list(malloc_type_list_func_t *func, void *arg) 94891dd776cSJohn Birrell { 94991dd776cSJohn Birrell struct malloc_type *mtp, **bufmtp; 95091dd776cSJohn Birrell int count, i; 95191dd776cSJohn Birrell size_t buflen; 95291dd776cSJohn Birrell 95391dd776cSJohn Birrell mtx_lock(&malloc_mtx); 95491dd776cSJohn Birrell restart: 95591dd776cSJohn Birrell mtx_assert(&malloc_mtx, MA_OWNED); 95691dd776cSJohn Birrell count = kmemcount; 95791dd776cSJohn Birrell mtx_unlock(&malloc_mtx); 95891dd776cSJohn Birrell 95991dd776cSJohn Birrell buflen = sizeof(struct malloc_type *) * count; 96091dd776cSJohn Birrell bufmtp = malloc(buflen, M_TEMP, M_WAITOK); 96191dd776cSJohn Birrell 96291dd776cSJohn Birrell mtx_lock(&malloc_mtx); 96391dd776cSJohn Birrell 96491dd776cSJohn Birrell if (count < kmemcount) { 96591dd776cSJohn Birrell free(bufmtp, M_TEMP); 96691dd776cSJohn Birrell goto restart; 96791dd776cSJohn Birrell } 96891dd776cSJohn Birrell 96991dd776cSJohn Birrell for (mtp = kmemstatistics, i = 0; mtp != NULL; mtp = mtp->ks_next, i++) 97091dd776cSJohn Birrell bufmtp[i] = mtp; 97191dd776cSJohn Birrell 97291dd776cSJohn Birrell mtx_unlock(&malloc_mtx); 97391dd776cSJohn Birrell 97491dd776cSJohn Birrell for (i = 0; i < count; i++) 97591dd776cSJohn Birrell (func)(bufmtp[i], arg); 97691dd776cSJohn Birrell 97791dd776cSJohn Birrell free(bufmtp, M_TEMP); 97891dd776cSJohn Birrell } 97991dd776cSJohn Birrell 980909ed16cSRobert Watson #ifdef DDB 981909ed16cSRobert Watson DB_SHOW_COMMAND(malloc, db_show_malloc) 982909ed16cSRobert Watson { 983909ed16cSRobert Watson struct malloc_type_internal *mtip; 984909ed16cSRobert Watson struct malloc_type *mtp; 98560ae52f7SEd Schouten uint64_t allocs, frees; 98660ae52f7SEd Schouten uint64_t alloced, freed; 987909ed16cSRobert Watson int i; 988909ed16cSRobert Watson 98924076d13SRobert Watson db_printf("%18s %12s %12s %12s\n", "Type", "InUse", "MemUse", 99024076d13SRobert Watson "Requests"); 991909ed16cSRobert Watson for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 992909ed16cSRobert Watson mtip = (struct malloc_type_internal *)mtp->ks_handle; 993909ed16cSRobert Watson allocs = 0; 994909ed16cSRobert Watson frees = 0; 99524076d13SRobert Watson alloced = 0; 99624076d13SRobert Watson freed = 0; 997909ed16cSRobert Watson for (i = 0; i < MAXCPU; i++) { 998909ed16cSRobert Watson allocs += mtip->mti_stats[i].mts_numallocs; 999909ed16cSRobert Watson frees += mtip->mti_stats[i].mts_numfrees; 100024076d13SRobert Watson alloced += mtip->mti_stats[i].mts_memalloced; 100124076d13SRobert Watson freed += mtip->mti_stats[i].mts_memfreed; 1002909ed16cSRobert Watson } 100324076d13SRobert Watson db_printf("%18s %12ju %12juK %12ju\n", 100424076d13SRobert Watson mtp->ks_shortdesc, allocs - frees, 100524076d13SRobert Watson (alloced - freed + 1023) / 1024, allocs); 1006909ed16cSRobert Watson } 1007909ed16cSRobert Watson } 1008d7854da1SMatthew D Fleming 1009d7854da1SMatthew D Fleming #if MALLOC_DEBUG_MAXZONES > 1 1010d7854da1SMatthew D Fleming DB_SHOW_COMMAND(multizone_matches, db_show_multizone_matches) 1011d7854da1SMatthew D Fleming { 1012d7854da1SMatthew D Fleming struct malloc_type_internal *mtip; 1013d7854da1SMatthew D Fleming struct malloc_type *mtp; 1014d7854da1SMatthew D Fleming u_int subzone; 1015d7854da1SMatthew D Fleming 1016d7854da1SMatthew D Fleming if (!have_addr) { 1017d7854da1SMatthew D Fleming db_printf("Usage: show multizone_matches <malloc type/addr>\n"); 1018d7854da1SMatthew D Fleming return; 1019d7854da1SMatthew D Fleming } 1020d7854da1SMatthew D Fleming mtp = (void *)addr; 1021d7854da1SMatthew D Fleming if (mtp->ks_magic != M_MAGIC) { 1022d7854da1SMatthew D Fleming db_printf("Magic %lx does not match expected %x\n", 1023d7854da1SMatthew D Fleming mtp->ks_magic, M_MAGIC); 1024d7854da1SMatthew D Fleming return; 1025d7854da1SMatthew D Fleming } 1026d7854da1SMatthew D Fleming 1027d7854da1SMatthew D Fleming mtip = mtp->ks_handle; 1028d7854da1SMatthew D Fleming subzone = mtip->mti_zone; 1029d7854da1SMatthew D Fleming 1030d7854da1SMatthew D Fleming for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 1031d7854da1SMatthew D Fleming mtip = mtp->ks_handle; 1032d7854da1SMatthew D Fleming if (mtip->mti_zone != subzone) 1033d7854da1SMatthew D Fleming continue; 1034d7854da1SMatthew D Fleming db_printf("%s\n", mtp->ks_shortdesc); 1035d7854da1SMatthew D Fleming } 1036d7854da1SMatthew D Fleming } 1037d7854da1SMatthew D Fleming #endif /* MALLOC_DEBUG_MAXZONES > 1 */ 1038d7854da1SMatthew D Fleming #endif /* DDB */ 1039909ed16cSRobert Watson 10405e914b96SJeff Roberson #ifdef MALLOC_PROFILE 10415e914b96SJeff Roberson 10425e914b96SJeff Roberson static int 10435e914b96SJeff Roberson sysctl_kern_mprof(SYSCTL_HANDLER_ARGS) 10445e914b96SJeff Roberson { 104563a7e0a3SRobert Watson struct sbuf sbuf; 10465e914b96SJeff Roberson uint64_t count; 10475e914b96SJeff Roberson uint64_t waste; 10485e914b96SJeff Roberson uint64_t mem; 10495e914b96SJeff Roberson int error; 10505e914b96SJeff Roberson int rsize; 10515e914b96SJeff Roberson int size; 10525e914b96SJeff Roberson int i; 10535e914b96SJeff Roberson 10545e914b96SJeff Roberson waste = 0; 10555e914b96SJeff Roberson mem = 0; 10565e914b96SJeff Roberson 105700f0e671SMatthew D Fleming error = sysctl_wire_old_buffer(req, 0); 105800f0e671SMatthew D Fleming if (error != 0) 105900f0e671SMatthew D Fleming return (error); 10604e657159SMatthew D Fleming sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 106163a7e0a3SRobert Watson sbuf_printf(&sbuf, 10625e914b96SJeff Roberson "\n Size Requests Real Size\n"); 10635e914b96SJeff Roberson for (i = 0; i < KMEM_ZSIZE; i++) { 10645e914b96SJeff Roberson size = i << KMEM_ZSHIFT; 10655e914b96SJeff Roberson rsize = kmemzones[kmemsize[i]].kz_size; 10665e914b96SJeff Roberson count = (long long unsigned)krequests[i]; 10675e914b96SJeff Roberson 106863a7e0a3SRobert Watson sbuf_printf(&sbuf, "%6d%28llu%11d\n", size, 106963a7e0a3SRobert Watson (unsigned long long)count, rsize); 10705e914b96SJeff Roberson 10715e914b96SJeff Roberson if ((rsize * count) > (size * count)) 10725e914b96SJeff Roberson waste += (rsize * count) - (size * count); 10735e914b96SJeff Roberson mem += (rsize * count); 10745e914b96SJeff Roberson } 107563a7e0a3SRobert Watson sbuf_printf(&sbuf, 10765e914b96SJeff Roberson "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n", 10775e914b96SJeff Roberson (unsigned long long)mem, (unsigned long long)waste); 10784e657159SMatthew D Fleming error = sbuf_finish(&sbuf); 107963a7e0a3SRobert Watson sbuf_delete(&sbuf); 10805e914b96SJeff Roberson return (error); 10815e914b96SJeff Roberson } 10825e914b96SJeff Roberson 10835e914b96SJeff Roberson SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD, 10845e914b96SJeff Roberson NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling"); 10855e914b96SJeff Roberson #endif /* MALLOC_PROFILE */ 1086