19454b2d8SWarner Losh /*- 251369649SPedro F. Giffuni * SPDX-License-Identifier: BSD-3-Clause 351369649SPedro F. Giffuni * 4df8bae1dSRodney W. Grimes * Copyright (c) 1987, 1991, 1993 563a7e0a3SRobert Watson * The Regents of the University of California. 6bb1c7df8SRobert Watson * Copyright (c) 2005-2009 Robert N. M. Watson 763a7e0a3SRobert Watson * All rights reserved. 8df8bae1dSRodney W. Grimes * 9df8bae1dSRodney W. Grimes * Redistribution and use in source and binary forms, with or without 10df8bae1dSRodney W. Grimes * modification, are permitted provided that the following conditions 11df8bae1dSRodney W. Grimes * are met: 12df8bae1dSRodney W. Grimes * 1. Redistributions of source code must retain the above copyright 13df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer. 14df8bae1dSRodney W. Grimes * 2. Redistributions in binary form must reproduce the above copyright 15df8bae1dSRodney W. Grimes * notice, this list of conditions and the following disclaimer in the 16df8bae1dSRodney W. Grimes * documentation and/or other materials provided with the distribution. 1769a28758SEd Maste * 3. Neither the name of the University nor the names of its contributors 18df8bae1dSRodney W. Grimes * may be used to endorse or promote products derived from this software 19df8bae1dSRodney W. Grimes * without specific prior written permission. 20df8bae1dSRodney W. Grimes * 21df8bae1dSRodney W. Grimes * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 22df8bae1dSRodney W. Grimes * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 23df8bae1dSRodney W. Grimes * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 24df8bae1dSRodney W. Grimes * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 25df8bae1dSRodney W. Grimes * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 26df8bae1dSRodney W. Grimes * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 27df8bae1dSRodney W. Grimes * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 28df8bae1dSRodney W. Grimes * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 29df8bae1dSRodney W. Grimes * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 30df8bae1dSRodney W. Grimes * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 31df8bae1dSRodney W. Grimes * SUCH DAMAGE. 32df8bae1dSRodney W. Grimes * 33df8bae1dSRodney W. Grimes * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94 34df8bae1dSRodney W. Grimes */ 35df8bae1dSRodney W. Grimes 360ce3f16dSRobert Watson /* 370ce3f16dSRobert Watson * Kernel malloc(9) implementation -- general purpose kernel memory allocator 380ce3f16dSRobert Watson * based on memory types. Back end is implemented using the UMA(9) zone 390ce3f16dSRobert Watson * allocator. A set of fixed-size buckets are used for smaller allocations, 400ce3f16dSRobert Watson * and a special UMA allocation interface is used for larger allocations. 410ce3f16dSRobert Watson * Callers declare memory types, and statistics are maintained independently 420ce3f16dSRobert Watson * for each memory type. Statistics are maintained per-CPU for performance 430ce3f16dSRobert Watson * reasons. See malloc(9) and comments in malloc.h for a detailed 440ce3f16dSRobert Watson * description. 450ce3f16dSRobert Watson */ 460ce3f16dSRobert Watson 47677b542eSDavid E. O'Brien #include <sys/cdefs.h> 48677b542eSDavid E. O'Brien __FBSDID("$FreeBSD$"); 49677b542eSDavid E. O'Brien 50909ed16cSRobert Watson #include "opt_ddb.h" 518a58a9f6SJohn Dyson #include "opt_vm.h" 528a58a9f6SJohn Dyson 53df8bae1dSRodney W. Grimes #include <sys/param.h> 5426f9a767SRodney W. Grimes #include <sys/systm.h> 552d50560aSMarcel Moolenaar #include <sys/kdb.h> 56df8bae1dSRodney W. Grimes #include <sys/kernel.h> 57fb919e4dSMark Murray #include <sys/lock.h> 58df8bae1dSRodney W. Grimes #include <sys/malloc.h> 59eec258d2SJohn Baldwin #include <sys/mutex.h> 60efeaf95aSDavid Greenman #include <sys/vmmeter.h> 61a448b62aSJake Burkholder #include <sys/proc.h> 6263a7e0a3SRobert Watson #include <sys/sbuf.h> 636f267175SJeff Roberson #include <sys/sysctl.h> 641fb14a47SPoul-Henning Kamp #include <sys/time.h> 655df87b21SJeff Roberson #include <sys/vmem.h> 669a02e8c6SJason Evans 67df8bae1dSRodney W. Grimes #include <vm/vm.h> 6899571dc3SJeff Roberson #include <vm/pmap.h> 695df87b21SJeff Roberson #include <vm/vm_pageout.h> 70efeaf95aSDavid Greenman #include <vm/vm_param.h> 71df8bae1dSRodney W. Grimes #include <vm/vm_kern.h> 72efeaf95aSDavid Greenman #include <vm/vm_extern.h> 733075778bSJohn Dyson #include <vm/vm_map.h> 7499571dc3SJeff Roberson #include <vm/vm_page.h> 758355f576SJeff Roberson #include <vm/uma.h> 768355f576SJeff Roberson #include <vm/uma_int.h> 778efc4effSJeff Roberson #include <vm/uma_dbg.h> 78df8bae1dSRodney W. Grimes 79e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD 80e4eb384bSBosko Milekic #include <vm/memguard.h> 81e4eb384bSBosko Milekic #endif 82847a2a17SPawel Jakub Dawidek #ifdef DEBUG_REDZONE 83847a2a17SPawel Jakub Dawidek #include <vm/redzone.h> 84847a2a17SPawel Jakub Dawidek #endif 85e4eb384bSBosko Milekic 86984982d6SPoul-Henning Kamp #if defined(INVARIANTS) && defined(__i386__) 87984982d6SPoul-Henning Kamp #include <machine/cpu.h> 88984982d6SPoul-Henning Kamp #endif 89984982d6SPoul-Henning Kamp 90909ed16cSRobert Watson #include <ddb/ddb.h> 91909ed16cSRobert Watson 9291dd776cSJohn Birrell #ifdef KDTRACE_HOOKS 9391dd776cSJohn Birrell #include <sys/dtrace_bsd.h> 9491dd776cSJohn Birrell 9591dd776cSJohn Birrell dtrace_malloc_probe_func_t dtrace_malloc_probe; 9691dd776cSJohn Birrell #endif 9791dd776cSJohn Birrell 9844a8ff31SArchie Cobbs /* 9944a8ff31SArchie Cobbs * When realloc() is called, if the new size is sufficiently smaller than 10044a8ff31SArchie Cobbs * the old size, realloc() will allocate a new, smaller block to avoid 10144a8ff31SArchie Cobbs * wasting memory. 'Sufficiently smaller' is defined as: newsize <= 10244a8ff31SArchie Cobbs * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'. 10344a8ff31SArchie Cobbs */ 10444a8ff31SArchie Cobbs #ifndef REALLOC_FRACTION 10544a8ff31SArchie Cobbs #define REALLOC_FRACTION 1 /* new block if <= half the size */ 10644a8ff31SArchie Cobbs #endif 10744a8ff31SArchie Cobbs 1080ce3f16dSRobert Watson /* 1090ce3f16dSRobert Watson * Centrally define some common malloc types. 1100ce3f16dSRobert Watson */ 1113b6fb885SPoul-Henning Kamp MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 1129ef246c6SBruce Evans MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 1139ef246c6SBruce Evans MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 1149ef246c6SBruce Evans 115db669378SPeter Wemm static struct malloc_type *kmemstatistics; 116cd814b26SRobert Watson static int kmemcount; 1171f6889a1SMatthew Dillon 1188355f576SJeff Roberson #define KMEM_ZSHIFT 4 1198355f576SJeff Roberson #define KMEM_ZBASE 16 1208355f576SJeff Roberson #define KMEM_ZMASK (KMEM_ZBASE - 1) 1218355f576SJeff Roberson 122bda06553SXin LI #define KMEM_ZMAX 65536 1238355f576SJeff Roberson #define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT) 12460ae52f7SEd Schouten static uint8_t kmemsize[KMEM_ZSIZE + 1]; 1256f267175SJeff Roberson 126d7854da1SMatthew D Fleming #ifndef MALLOC_DEBUG_MAXZONES 127d7854da1SMatthew D Fleming #define MALLOC_DEBUG_MAXZONES 1 128d7854da1SMatthew D Fleming #endif 129d7854da1SMatthew D Fleming static int numzones = MALLOC_DEBUG_MAXZONES; 130d7854da1SMatthew D Fleming 1310ce3f16dSRobert Watson /* 1320ce3f16dSRobert Watson * Small malloc(9) memory allocations are allocated from a set of UMA buckets 1330ce3f16dSRobert Watson * of various sizes. 1340ce3f16dSRobert Watson * 1350ce3f16dSRobert Watson * XXX: The comment here used to read "These won't be powers of two for 1360ce3f16dSRobert Watson * long." It's possible that a significant amount of wasted memory could be 1370ce3f16dSRobert Watson * recovered by tuning the sizes of these buckets. 1380ce3f16dSRobert Watson */ 1398355f576SJeff Roberson struct { 1406f267175SJeff Roberson int kz_size; 1416f267175SJeff Roberson char *kz_name; 142d7854da1SMatthew D Fleming uma_zone_t kz_zone[MALLOC_DEBUG_MAXZONES]; 1436f267175SJeff Roberson } kmemzones[] = { 144d7854da1SMatthew D Fleming {16, "16", }, 145d7854da1SMatthew D Fleming {32, "32", }, 146d7854da1SMatthew D Fleming {64, "64", }, 147d7854da1SMatthew D Fleming {128, "128", }, 148d7854da1SMatthew D Fleming {256, "256", }, 149d7854da1SMatthew D Fleming {512, "512", }, 150d7854da1SMatthew D Fleming {1024, "1024", }, 151d7854da1SMatthew D Fleming {2048, "2048", }, 152d7854da1SMatthew D Fleming {4096, "4096", }, 153d7854da1SMatthew D Fleming {8192, "8192", }, 154d7854da1SMatthew D Fleming {16384, "16384", }, 155d7854da1SMatthew D Fleming {32768, "32768", }, 156d7854da1SMatthew D Fleming {65536, "65536", }, 1578355f576SJeff Roberson {0, NULL}, 1588355f576SJeff Roberson }; 1598355f576SJeff Roberson 1600ce3f16dSRobert Watson /* 1610ce3f16dSRobert Watson * Zone to allocate malloc type descriptions from. For ABI reasons, memory 1620ce3f16dSRobert Watson * types are described by a data structure passed by the declaring code, but 1630ce3f16dSRobert Watson * the malloc(9) implementation has its own data structure describing the 1640ce3f16dSRobert Watson * type and statistics. This permits the malloc(9)-internal data structures 1650ce3f16dSRobert Watson * to be modified without breaking binary-compiled kernel modules that 1660ce3f16dSRobert Watson * declare malloc types. 1670ce3f16dSRobert Watson */ 16863a7e0a3SRobert Watson static uma_zone_t mt_zone; 16963a7e0a3SRobert Watson 170b89eaf4eSAlan Cox u_long vm_kmem_size; 171d801e824SAndriy Gapon SYSCTL_ULONG(_vm, OID_AUTO, kmem_size, CTLFLAG_RDTUN, &vm_kmem_size, 0, 17284344f9fSDag-Erling Smørgrav "Size of kernel memory"); 1735a34a9f0SJeff Roberson 1747001d850SXin LI static u_long kmem_zmax = KMEM_ZMAX; 1757001d850SXin LI SYSCTL_ULONG(_vm, OID_AUTO, kmem_zmax, CTLFLAG_RDTUN, &kmem_zmax, 0, 1767001d850SXin LI "Maximum allocation size that malloc(9) would use UMA as backend"); 1777001d850SXin LI 178b89eaf4eSAlan Cox static u_long vm_kmem_size_min; 179d801e824SAndriy Gapon SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_min, CTLFLAG_RDTUN, &vm_kmem_size_min, 0, 1800e5179e4SStephane E. Potvin "Minimum size of kernel memory"); 1810e5179e4SStephane E. Potvin 182b89eaf4eSAlan Cox static u_long vm_kmem_size_max; 183d801e824SAndriy Gapon SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RDTUN, &vm_kmem_size_max, 0, 184479439b4SDag-Erling Smørgrav "Maximum size of kernel memory"); 185479439b4SDag-Erling Smørgrav 1864813ad54SHans Petter Selasky static u_int vm_kmem_size_scale; 187d801e824SAndriy Gapon SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale, 0, 188479439b4SDag-Erling Smørgrav "Scale factor for kernel memory size"); 189479439b4SDag-Erling Smørgrav 1907814c80aSAndriy Gapon static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS); 1917814c80aSAndriy Gapon SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size, 1927814c80aSAndriy Gapon CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0, 1935df87b21SJeff Roberson sysctl_kmem_map_size, "LU", "Current kmem allocation size"); 1947814c80aSAndriy Gapon 19595bb9d38SAndriy Gapon static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS); 19695bb9d38SAndriy Gapon SYSCTL_PROC(_vm, OID_AUTO, kmem_map_free, 19795bb9d38SAndriy Gapon CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0, 1985df87b21SJeff Roberson sysctl_kmem_map_free, "LU", "Free space in kmem"); 19995bb9d38SAndriy Gapon 2005a34a9f0SJeff Roberson /* 20199571dc3SJeff Roberson * The malloc_mtx protects the kmemstatistics linked list. 2025a34a9f0SJeff Roberson */ 2035a34a9f0SJeff Roberson struct mtx malloc_mtx; 20469ef67f9SJason Evans 2055e914b96SJeff Roberson #ifdef MALLOC_PROFILE 2065e914b96SJeff Roberson uint64_t krequests[KMEM_ZSIZE + 1]; 2076f267175SJeff Roberson 2085e914b96SJeff Roberson static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS); 2095e914b96SJeff Roberson #endif 2105e914b96SJeff Roberson 211cd814b26SRobert Watson static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS); 212df8bae1dSRodney W. Grimes 2130ce3f16dSRobert Watson /* 2140ce3f16dSRobert Watson * time_uptime of the last malloc(9) failure (induced or real). 2150ce3f16dSRobert Watson */ 2161fb14a47SPoul-Henning Kamp static time_t t_malloc_fail; 2171fb14a47SPoul-Henning Kamp 218d7854da1SMatthew D Fleming #if defined(MALLOC_MAKE_FAILURES) || (MALLOC_DEBUG_MAXZONES > 1) 2196472ac3dSEd Schouten static SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD, 0, 220d7854da1SMatthew D Fleming "Kernel malloc debugging options"); 221d7854da1SMatthew D Fleming #endif 222d7854da1SMatthew D Fleming 223eae870cdSRobert Watson /* 2240ce3f16dSRobert Watson * malloc(9) fault injection -- cause malloc failures every (n) mallocs when 2250ce3f16dSRobert Watson * the caller specifies M_NOWAIT. If set to 0, no failures are caused. 226eae870cdSRobert Watson */ 2270ce3f16dSRobert Watson #ifdef MALLOC_MAKE_FAILURES 228eae870cdSRobert Watson static int malloc_failure_rate; 229eae870cdSRobert Watson static int malloc_nowait_count; 230eae870cdSRobert Watson static int malloc_failure_count; 231af3b2549SHans Petter Selasky SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RWTUN, 232eae870cdSRobert Watson &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail"); 233eae870cdSRobert Watson SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD, 234eae870cdSRobert Watson &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures"); 235eae870cdSRobert Watson #endif 236eae870cdSRobert Watson 2377814c80aSAndriy Gapon static int 2387814c80aSAndriy Gapon sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS) 2397814c80aSAndriy Gapon { 2407814c80aSAndriy Gapon u_long size; 2417814c80aSAndriy Gapon 242*2e47807cSJeff Roberson size = uma_size(); 2437814c80aSAndriy Gapon return (sysctl_handle_long(oidp, &size, 0, req)); 2447814c80aSAndriy Gapon } 2457814c80aSAndriy Gapon 24695bb9d38SAndriy Gapon static int 24795bb9d38SAndriy Gapon sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS) 24895bb9d38SAndriy Gapon { 249*2e47807cSJeff Roberson u_long size, limit; 25095bb9d38SAndriy Gapon 251*2e47807cSJeff Roberson /* The sysctl is unsigned, implement as a saturation value. */ 252*2e47807cSJeff Roberson size = uma_size(); 253*2e47807cSJeff Roberson limit = uma_limit(); 254*2e47807cSJeff Roberson if (size > limit) 255*2e47807cSJeff Roberson size = 0; 256*2e47807cSJeff Roberson else 257*2e47807cSJeff Roberson size = limit - size; 25895bb9d38SAndriy Gapon return (sysctl_handle_long(oidp, &size, 0, req)); 25995bb9d38SAndriy Gapon } 26095bb9d38SAndriy Gapon 261d7854da1SMatthew D Fleming /* 262d7854da1SMatthew D Fleming * malloc(9) uma zone separation -- sub-page buffer overruns in one 263d7854da1SMatthew D Fleming * malloc type will affect only a subset of other malloc types. 264d7854da1SMatthew D Fleming */ 265d7854da1SMatthew D Fleming #if MALLOC_DEBUG_MAXZONES > 1 266d7854da1SMatthew D Fleming static void 267d7854da1SMatthew D Fleming tunable_set_numzones(void) 268d7854da1SMatthew D Fleming { 269d7854da1SMatthew D Fleming 270d7854da1SMatthew D Fleming TUNABLE_INT_FETCH("debug.malloc.numzones", 271d7854da1SMatthew D Fleming &numzones); 272d7854da1SMatthew D Fleming 273d7854da1SMatthew D Fleming /* Sanity check the number of malloc uma zones. */ 274d7854da1SMatthew D Fleming if (numzones <= 0) 275d7854da1SMatthew D Fleming numzones = 1; 276d7854da1SMatthew D Fleming if (numzones > MALLOC_DEBUG_MAXZONES) 277d7854da1SMatthew D Fleming numzones = MALLOC_DEBUG_MAXZONES; 278d7854da1SMatthew D Fleming } 279d7854da1SMatthew D Fleming SYSINIT(numzones, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_set_numzones, NULL); 280af3b2549SHans Petter Selasky SYSCTL_INT(_debug_malloc, OID_AUTO, numzones, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 281d7854da1SMatthew D Fleming &numzones, 0, "Number of malloc uma subzones"); 282d7854da1SMatthew D Fleming 283d7854da1SMatthew D Fleming /* 284d7854da1SMatthew D Fleming * Any number that changes regularly is an okay choice for the 285d7854da1SMatthew D Fleming * offset. Build numbers are pretty good of you have them. 286d7854da1SMatthew D Fleming */ 287d7854da1SMatthew D Fleming static u_int zone_offset = __FreeBSD_version; 288d7854da1SMatthew D Fleming TUNABLE_INT("debug.malloc.zone_offset", &zone_offset); 289d7854da1SMatthew D Fleming SYSCTL_UINT(_debug_malloc, OID_AUTO, zone_offset, CTLFLAG_RDTUN, 290d7854da1SMatthew D Fleming &zone_offset, 0, "Separate malloc types by examining the " 291d7854da1SMatthew D Fleming "Nth character in the malloc type short description."); 292d7854da1SMatthew D Fleming 293d7854da1SMatthew D Fleming static u_int 294d7854da1SMatthew D Fleming mtp_get_subzone(const char *desc) 295d7854da1SMatthew D Fleming { 296d7854da1SMatthew D Fleming size_t len; 297d7854da1SMatthew D Fleming u_int val; 298d7854da1SMatthew D Fleming 299d7854da1SMatthew D Fleming if (desc == NULL || (len = strlen(desc)) == 0) 300d7854da1SMatthew D Fleming return (0); 301d7854da1SMatthew D Fleming val = desc[zone_offset % len]; 302d7854da1SMatthew D Fleming return (val % numzones); 303d7854da1SMatthew D Fleming } 304d7854da1SMatthew D Fleming #elif MALLOC_DEBUG_MAXZONES == 0 305d7854da1SMatthew D Fleming #error "MALLOC_DEBUG_MAXZONES must be positive." 306d7854da1SMatthew D Fleming #else 307d7854da1SMatthew D Fleming static inline u_int 308d7854da1SMatthew D Fleming mtp_get_subzone(const char *desc) 309d7854da1SMatthew D Fleming { 310d7854da1SMatthew D Fleming 311d7854da1SMatthew D Fleming return (0); 312d7854da1SMatthew D Fleming } 313d7854da1SMatthew D Fleming #endif /* MALLOC_DEBUG_MAXZONES > 1 */ 314d7854da1SMatthew D Fleming 3151fb14a47SPoul-Henning Kamp int 3161fb14a47SPoul-Henning Kamp malloc_last_fail(void) 3171fb14a47SPoul-Henning Kamp { 3181fb14a47SPoul-Henning Kamp 3191fb14a47SPoul-Henning Kamp return (time_uptime - t_malloc_fail); 3201fb14a47SPoul-Henning Kamp } 3211fb14a47SPoul-Henning Kamp 322df8bae1dSRodney W. Grimes /* 3230ce3f16dSRobert Watson * An allocation has succeeded -- update malloc type statistics for the 3240ce3f16dSRobert Watson * amount of bucket size. Occurs within a critical section so that the 3250ce3f16dSRobert Watson * thread isn't preempted and doesn't migrate while updating per-PCU 3260ce3f16dSRobert Watson * statistics. 3274362fadaSBrian Feldman */ 3284362fadaSBrian Feldman static void 32963a7e0a3SRobert Watson malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size, 3304362fadaSBrian Feldman int zindx) 3314362fadaSBrian Feldman { 33263a7e0a3SRobert Watson struct malloc_type_internal *mtip; 33363a7e0a3SRobert Watson struct malloc_type_stats *mtsp; 33463a7e0a3SRobert Watson 33563a7e0a3SRobert Watson critical_enter(); 33663a7e0a3SRobert Watson mtip = mtp->ks_handle; 33763a7e0a3SRobert Watson mtsp = &mtip->mti_stats[curcpu]; 33873864adbSPawel Jakub Dawidek if (size > 0) { 33963a7e0a3SRobert Watson mtsp->mts_memalloced += size; 34063a7e0a3SRobert Watson mtsp->mts_numallocs++; 34173864adbSPawel Jakub Dawidek } 3424362fadaSBrian Feldman if (zindx != -1) 34363a7e0a3SRobert Watson mtsp->mts_size |= 1 << zindx; 34491dd776cSJohn Birrell 34591dd776cSJohn Birrell #ifdef KDTRACE_HOOKS 34691dd776cSJohn Birrell if (dtrace_malloc_probe != NULL) { 34791dd776cSJohn Birrell uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_MALLOC]; 34891dd776cSJohn Birrell if (probe_id != 0) 34991dd776cSJohn Birrell (dtrace_malloc_probe)(probe_id, 35091dd776cSJohn Birrell (uintptr_t) mtp, (uintptr_t) mtip, 35191dd776cSJohn Birrell (uintptr_t) mtsp, size, zindx); 35291dd776cSJohn Birrell } 35391dd776cSJohn Birrell #endif 35491dd776cSJohn Birrell 35563a7e0a3SRobert Watson critical_exit(); 3564362fadaSBrian Feldman } 3574362fadaSBrian Feldman 3584362fadaSBrian Feldman void 35963a7e0a3SRobert Watson malloc_type_allocated(struct malloc_type *mtp, unsigned long size) 3604362fadaSBrian Feldman { 36163a7e0a3SRobert Watson 36273864adbSPawel Jakub Dawidek if (size > 0) 36363a7e0a3SRobert Watson malloc_type_zone_allocated(mtp, size, -1); 3644362fadaSBrian Feldman } 3654362fadaSBrian Feldman 3664362fadaSBrian Feldman /* 3673805385eSRobert Watson * A free operation has occurred -- update malloc type statistics for the 3680ce3f16dSRobert Watson * amount of the bucket size. Occurs within a critical section so that the 3690ce3f16dSRobert Watson * thread isn't preempted and doesn't migrate while updating per-CPU 3700ce3f16dSRobert Watson * statistics. 3714362fadaSBrian Feldman */ 3724362fadaSBrian Feldman void 37363a7e0a3SRobert Watson malloc_type_freed(struct malloc_type *mtp, unsigned long size) 3744362fadaSBrian Feldman { 37563a7e0a3SRobert Watson struct malloc_type_internal *mtip; 37663a7e0a3SRobert Watson struct malloc_type_stats *mtsp; 37763a7e0a3SRobert Watson 37863a7e0a3SRobert Watson critical_enter(); 37963a7e0a3SRobert Watson mtip = mtp->ks_handle; 38063a7e0a3SRobert Watson mtsp = &mtip->mti_stats[curcpu]; 38163a7e0a3SRobert Watson mtsp->mts_memfreed += size; 38263a7e0a3SRobert Watson mtsp->mts_numfrees++; 38391dd776cSJohn Birrell 38491dd776cSJohn Birrell #ifdef KDTRACE_HOOKS 38591dd776cSJohn Birrell if (dtrace_malloc_probe != NULL) { 38691dd776cSJohn Birrell uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_FREE]; 38791dd776cSJohn Birrell if (probe_id != 0) 38891dd776cSJohn Birrell (dtrace_malloc_probe)(probe_id, 38991dd776cSJohn Birrell (uintptr_t) mtp, (uintptr_t) mtip, 39091dd776cSJohn Birrell (uintptr_t) mtsp, size, 0); 39191dd776cSJohn Birrell } 39291dd776cSJohn Birrell #endif 39391dd776cSJohn Birrell 39463a7e0a3SRobert Watson critical_exit(); 3954362fadaSBrian Feldman } 3964362fadaSBrian Feldman 3974362fadaSBrian Feldman /* 398f346986bSAlan Cox * contigmalloc: 399f346986bSAlan Cox * 400f346986bSAlan Cox * Allocate a block of physically contiguous memory. 401f346986bSAlan Cox * 402f346986bSAlan Cox * If M_NOWAIT is set, this routine will not block and return NULL if 403f346986bSAlan Cox * the allocation fails. 404f346986bSAlan Cox */ 405f346986bSAlan Cox void * 406f346986bSAlan Cox contigmalloc(unsigned long size, struct malloc_type *type, int flags, 407f346986bSAlan Cox vm_paddr_t low, vm_paddr_t high, unsigned long alignment, 408831ce4cbSJohn Baldwin vm_paddr_t boundary) 409f346986bSAlan Cox { 410f346986bSAlan Cox void *ret; 411f346986bSAlan Cox 4125df87b21SJeff Roberson ret = (void *)kmem_alloc_contig(kernel_arena, size, flags, low, high, 413f346986bSAlan Cox alignment, boundary, VM_MEMATTR_DEFAULT); 414f346986bSAlan Cox if (ret != NULL) 415f346986bSAlan Cox malloc_type_allocated(type, round_page(size)); 416f346986bSAlan Cox return (ret); 417f346986bSAlan Cox } 418f346986bSAlan Cox 419f346986bSAlan Cox /* 420f346986bSAlan Cox * contigfree: 421f346986bSAlan Cox * 422f346986bSAlan Cox * Free a block of memory allocated by contigmalloc. 423f346986bSAlan Cox * 424f346986bSAlan Cox * This routine may not block. 425f346986bSAlan Cox */ 426f346986bSAlan Cox void 427f346986bSAlan Cox contigfree(void *addr, unsigned long size, struct malloc_type *type) 428f346986bSAlan Cox { 429f346986bSAlan Cox 4305df87b21SJeff Roberson kmem_free(kernel_arena, (vm_offset_t)addr, size); 431f346986bSAlan Cox malloc_type_freed(type, round_page(size)); 432f346986bSAlan Cox } 433f346986bSAlan Cox 434f346986bSAlan Cox /* 4351c7c3c6aSMatthew Dillon * malloc: 4361c7c3c6aSMatthew Dillon * 4371c7c3c6aSMatthew Dillon * Allocate a block of memory. 4381c7c3c6aSMatthew Dillon * 4391c7c3c6aSMatthew Dillon * If M_NOWAIT is set, this routine will not block and return NULL if 4401c7c3c6aSMatthew Dillon * the allocation fails. 441df8bae1dSRodney W. Grimes */ 442df8bae1dSRodney W. Grimes void * 44363a7e0a3SRobert Watson malloc(unsigned long size, struct malloc_type *mtp, int flags) 444df8bae1dSRodney W. Grimes { 4456f267175SJeff Roberson int indx; 446d7854da1SMatthew D Fleming struct malloc_type_internal *mtip; 4478355f576SJeff Roberson caddr_t va; 4488355f576SJeff Roberson uma_zone_t zone; 449847a2a17SPawel Jakub Dawidek #if defined(DIAGNOSTIC) || defined(DEBUG_REDZONE) 4504db4f5c8SPoul-Henning Kamp unsigned long osize = size; 4514db4f5c8SPoul-Henning Kamp #endif 452df8bae1dSRodney W. Grimes 453194a0abfSPoul-Henning Kamp #ifdef INVARIANTS 454bb1c7df8SRobert Watson KASSERT(mtp->ks_magic == M_MAGIC, ("malloc: bad malloc type magic")); 455d3c11994SPoul-Henning Kamp /* 45623198357SRuslan Ermilov * Check that exactly one of M_WAITOK or M_NOWAIT is specified. 457d3c11994SPoul-Henning Kamp */ 45823198357SRuslan Ermilov indx = flags & (M_WAITOK | M_NOWAIT); 459d3c11994SPoul-Henning Kamp if (indx != M_NOWAIT && indx != M_WAITOK) { 460d3c11994SPoul-Henning Kamp static struct timeval lasterr; 461d3c11994SPoul-Henning Kamp static int curerr, once; 462d3c11994SPoul-Henning Kamp if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) { 463d3c11994SPoul-Henning Kamp printf("Bad malloc flags: %x\n", indx); 4642d50560aSMarcel Moolenaar kdb_backtrace(); 465d3c11994SPoul-Henning Kamp flags |= M_WAITOK; 466d3c11994SPoul-Henning Kamp once++; 467d3c11994SPoul-Henning Kamp } 468d3c11994SPoul-Henning Kamp } 469194a0abfSPoul-Henning Kamp #endif 470eae870cdSRobert Watson #ifdef MALLOC_MAKE_FAILURES 471eae870cdSRobert Watson if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) { 472eae870cdSRobert Watson atomic_add_int(&malloc_nowait_count, 1); 473eae870cdSRobert Watson if ((malloc_nowait_count % malloc_failure_rate) == 0) { 474eae870cdSRobert Watson atomic_add_int(&malloc_failure_count, 1); 4753f6ee876SPoul-Henning Kamp t_malloc_fail = time_uptime; 476eae870cdSRobert Watson return (NULL); 477eae870cdSRobert Watson } 478eae870cdSRobert Watson } 479eae870cdSRobert Watson #endif 480d3c11994SPoul-Henning Kamp if (flags & M_WAITOK) 481b40ce416SJulian Elischer KASSERT(curthread->td_intr_nesting_level == 0, 482a163d034SWarner Losh ("malloc(M_WAITOK) in interrupt context")); 483d9e2e68dSMark Johnston KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 4841067a2baSJonathan T. Looney ("malloc: called with spinlock or critical section held")); 4851067a2baSJonathan T. Looney 486e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD 4878d689e04SGleb Smirnoff if (memguard_cmp_mtp(mtp, size)) { 488e3813573SMatthew D Fleming va = memguard_alloc(size, flags); 489e3813573SMatthew D Fleming if (va != NULL) 490e3813573SMatthew D Fleming return (va); 491e3813573SMatthew D Fleming /* This is unfortunate but should not be fatal. */ 492e3813573SMatthew D Fleming } 493e4eb384bSBosko Milekic #endif 494e4eb384bSBosko Milekic 495847a2a17SPawel Jakub Dawidek #ifdef DEBUG_REDZONE 496847a2a17SPawel Jakub Dawidek size = redzone_size_ntor(size); 497847a2a17SPawel Jakub Dawidek #endif 498847a2a17SPawel Jakub Dawidek 4997001d850SXin LI if (size <= kmem_zmax) { 500d7854da1SMatthew D Fleming mtip = mtp->ks_handle; 5016f267175SJeff Roberson if (size & KMEM_ZMASK) 5026f267175SJeff Roberson size = (size & ~KMEM_ZMASK) + KMEM_ZBASE; 5036f267175SJeff Roberson indx = kmemsize[size >> KMEM_ZSHIFT]; 504d7854da1SMatthew D Fleming KASSERT(mtip->mti_zone < numzones, 505d7854da1SMatthew D Fleming ("mti_zone %u out of range %d", 506d7854da1SMatthew D Fleming mtip->mti_zone, numzones)); 507d7854da1SMatthew D Fleming zone = kmemzones[indx].kz_zone[mtip->mti_zone]; 5086f267175SJeff Roberson #ifdef MALLOC_PROFILE 5096f267175SJeff Roberson krequests[size >> KMEM_ZSHIFT]++; 5106f267175SJeff Roberson #endif 5118355f576SJeff Roberson va = uma_zalloc(zone, flags); 5124362fadaSBrian Feldman if (va != NULL) 513e20a199fSJeff Roberson size = zone->uz_size; 51463a7e0a3SRobert Watson malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx); 5158355f576SJeff Roberson } else { 5166f267175SJeff Roberson size = roundup(size, PAGE_SIZE); 5178355f576SJeff Roberson zone = NULL; 5188355f576SJeff Roberson va = uma_large_malloc(size, flags); 51963a7e0a3SRobert Watson malloc_type_allocated(mtp, va == NULL ? 0 : size); 520df8bae1dSRodney W. Grimes } 5211282e9acSPoul-Henning Kamp if (flags & M_WAITOK) 522a163d034SWarner Losh KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL")); 5231282e9acSPoul-Henning Kamp else if (va == NULL) 5241fb14a47SPoul-Henning Kamp t_malloc_fail = time_uptime; 5254db4f5c8SPoul-Henning Kamp #ifdef DIAGNOSTIC 5261282e9acSPoul-Henning Kamp if (va != NULL && !(flags & M_ZERO)) { 5274db4f5c8SPoul-Henning Kamp memset(va, 0x70, osize); 5284db4f5c8SPoul-Henning Kamp } 5294db4f5c8SPoul-Henning Kamp #endif 530847a2a17SPawel Jakub Dawidek #ifdef DEBUG_REDZONE 531847a2a17SPawel Jakub Dawidek if (va != NULL) 532847a2a17SPawel Jakub Dawidek va = redzone_setup(va, osize); 533847a2a17SPawel Jakub Dawidek #endif 534df8bae1dSRodney W. Grimes return ((void *) va); 535df8bae1dSRodney W. Grimes } 536df8bae1dSRodney W. Grimes 537df8bae1dSRodney W. Grimes /* 5381c7c3c6aSMatthew Dillon * free: 5391c7c3c6aSMatthew Dillon * 540df8bae1dSRodney W. Grimes * Free a block of memory allocated by malloc. 5411c7c3c6aSMatthew Dillon * 5421c7c3c6aSMatthew Dillon * This routine may not block. 543df8bae1dSRodney W. Grimes */ 544df8bae1dSRodney W. Grimes void 54563a7e0a3SRobert Watson free(void *addr, struct malloc_type *mtp) 546df8bae1dSRodney W. Grimes { 54799571dc3SJeff Roberson uma_slab_t slab; 54899571dc3SJeff Roberson u_long size; 549254c6cb3SPoul-Henning Kamp 550bb1c7df8SRobert Watson KASSERT(mtp->ks_magic == M_MAGIC, ("free: bad malloc type magic")); 551d9e2e68dSMark Johnston KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 5521067a2baSJonathan T. Looney ("free: called with spinlock or critical section held")); 5531067a2baSJonathan T. Looney 55444a8ff31SArchie Cobbs /* free(NULL, ...) does nothing */ 55544a8ff31SArchie Cobbs if (addr == NULL) 55644a8ff31SArchie Cobbs return; 55744a8ff31SArchie Cobbs 558e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD 559e3813573SMatthew D Fleming if (is_memguard_addr(addr)) { 560e4eb384bSBosko Milekic memguard_free(addr); 561e4eb384bSBosko Milekic return; 562e4eb384bSBosko Milekic } 563e4eb384bSBosko Milekic #endif 564e4eb384bSBosko Milekic 565847a2a17SPawel Jakub Dawidek #ifdef DEBUG_REDZONE 566847a2a17SPawel Jakub Dawidek redzone_check(addr); 567847a2a17SPawel Jakub Dawidek addr = redzone_addr_ntor(addr); 568847a2a17SPawel Jakub Dawidek #endif 569847a2a17SPawel Jakub Dawidek 57099571dc3SJeff Roberson slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK)); 5718355f576SJeff Roberson 5728355f576SJeff Roberson if (slab == NULL) 5736f267175SJeff Roberson panic("free: address %p(%p) has not been allocated.\n", 57499571dc3SJeff Roberson addr, (void *)((u_long)addr & (~UMA_SLAB_MASK))); 57599571dc3SJeff Roberson 5768355f576SJeff Roberson if (!(slab->us_flags & UMA_SLAB_MALLOC)) { 5778f70816cSJeff Roberson #ifdef INVARIANTS 57863a7e0a3SRobert Watson struct malloc_type **mtpp = addr; 5798f70816cSJeff Roberson #endif 580099a0e58SBosko Milekic size = slab->us_keg->uk_size; 5818f70816cSJeff Roberson #ifdef INVARIANTS 5828f70816cSJeff Roberson /* 5838f70816cSJeff Roberson * Cache a pointer to the malloc_type that most recently freed 5848f70816cSJeff Roberson * this memory here. This way we know who is most likely to 5858f70816cSJeff Roberson * have stepped on it later. 5868f70816cSJeff Roberson * 5878f70816cSJeff Roberson * This code assumes that size is a multiple of 8 bytes for 5888f70816cSJeff Roberson * 64 bit machines 5898f70816cSJeff Roberson */ 59063a7e0a3SRobert Watson mtpp = (struct malloc_type **) 59163a7e0a3SRobert Watson ((unsigned long)mtpp & ~UMA_ALIGN_PTR); 59263a7e0a3SRobert Watson mtpp += (size - sizeof(struct malloc_type *)) / 5938f70816cSJeff Roberson sizeof(struct malloc_type *); 59463a7e0a3SRobert Watson *mtpp = mtp; 5958f70816cSJeff Roberson #endif 596099a0e58SBosko Milekic uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab); 59714bf02f8SJohn Dyson } else { 5988355f576SJeff Roberson size = slab->us_size; 5998355f576SJeff Roberson uma_large_free(slab); 60014bf02f8SJohn Dyson } 60163a7e0a3SRobert Watson malloc_type_freed(mtp, size); 602df8bae1dSRodney W. Grimes } 603df8bae1dSRodney W. Grimes 604df8bae1dSRodney W. Grimes /* 60544a8ff31SArchie Cobbs * realloc: change the size of a memory block 60644a8ff31SArchie Cobbs */ 60744a8ff31SArchie Cobbs void * 60863a7e0a3SRobert Watson realloc(void *addr, unsigned long size, struct malloc_type *mtp, int flags) 60944a8ff31SArchie Cobbs { 6108355f576SJeff Roberson uma_slab_t slab; 61144a8ff31SArchie Cobbs unsigned long alloc; 61244a8ff31SArchie Cobbs void *newaddr; 61344a8ff31SArchie Cobbs 614bb1c7df8SRobert Watson KASSERT(mtp->ks_magic == M_MAGIC, 615bb1c7df8SRobert Watson ("realloc: bad malloc type magic")); 616d9e2e68dSMark Johnston KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 6171067a2baSJonathan T. Looney ("realloc: called with spinlock or critical section held")); 6181067a2baSJonathan T. Looney 61944a8ff31SArchie Cobbs /* realloc(NULL, ...) is equivalent to malloc(...) */ 62044a8ff31SArchie Cobbs if (addr == NULL) 62163a7e0a3SRobert Watson return (malloc(size, mtp, flags)); 62263a7e0a3SRobert Watson 62363a7e0a3SRobert Watson /* 62463a7e0a3SRobert Watson * XXX: Should report free of old memory and alloc of new memory to 62563a7e0a3SRobert Watson * per-CPU stats. 62663a7e0a3SRobert Watson */ 62744a8ff31SArchie Cobbs 628e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD 6296d3ed393SMatthew D Fleming if (is_memguard_addr(addr)) 6306d3ed393SMatthew D Fleming return (memguard_realloc(addr, size, mtp, flags)); 631e4eb384bSBosko Milekic #endif 632e4eb384bSBosko Milekic 633847a2a17SPawel Jakub Dawidek #ifdef DEBUG_REDZONE 634847a2a17SPawel Jakub Dawidek slab = NULL; 635847a2a17SPawel Jakub Dawidek alloc = redzone_get_size(addr); 636847a2a17SPawel Jakub Dawidek #else 63799571dc3SJeff Roberson slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK)); 6388355f576SJeff Roberson 63944a8ff31SArchie Cobbs /* Sanity check */ 6408355f576SJeff Roberson KASSERT(slab != NULL, 64144a8ff31SArchie Cobbs ("realloc: address %p out of range", (void *)addr)); 64244a8ff31SArchie Cobbs 64344a8ff31SArchie Cobbs /* Get the size of the original block */ 644619f2841SPawel Jakub Dawidek if (!(slab->us_flags & UMA_SLAB_MALLOC)) 645099a0e58SBosko Milekic alloc = slab->us_keg->uk_size; 6468355f576SJeff Roberson else 6478355f576SJeff Roberson alloc = slab->us_size; 64844a8ff31SArchie Cobbs 64944a8ff31SArchie Cobbs /* Reuse the original block if appropriate */ 65044a8ff31SArchie Cobbs if (size <= alloc 65144a8ff31SArchie Cobbs && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE)) 65244a8ff31SArchie Cobbs return (addr); 653847a2a17SPawel Jakub Dawidek #endif /* !DEBUG_REDZONE */ 65444a8ff31SArchie Cobbs 65544a8ff31SArchie Cobbs /* Allocate a new, bigger (or smaller) block */ 65663a7e0a3SRobert Watson if ((newaddr = malloc(size, mtp, flags)) == NULL) 65744a8ff31SArchie Cobbs return (NULL); 65844a8ff31SArchie Cobbs 65944a8ff31SArchie Cobbs /* Copy over original contents */ 66044a8ff31SArchie Cobbs bcopy(addr, newaddr, min(size, alloc)); 66163a7e0a3SRobert Watson free(addr, mtp); 66244a8ff31SArchie Cobbs return (newaddr); 66344a8ff31SArchie Cobbs } 66444a8ff31SArchie Cobbs 66544a8ff31SArchie Cobbs /* 66644a8ff31SArchie Cobbs * reallocf: same as realloc() but free memory on failure. 66744a8ff31SArchie Cobbs */ 66844a8ff31SArchie Cobbs void * 66963a7e0a3SRobert Watson reallocf(void *addr, unsigned long size, struct malloc_type *mtp, int flags) 67044a8ff31SArchie Cobbs { 67144a8ff31SArchie Cobbs void *mem; 67244a8ff31SArchie Cobbs 67363a7e0a3SRobert Watson if ((mem = realloc(addr, size, mtp, flags)) == NULL) 67463a7e0a3SRobert Watson free(addr, mtp); 67544a8ff31SArchie Cobbs return (mem); 67644a8ff31SArchie Cobbs } 67744a8ff31SArchie Cobbs 678f9d498adSDimitry Andric #ifndef __sparc64__ 679c70af487SAlan Cox CTASSERT(VM_KMEM_SIZE_SCALE >= 1); 680f9d498adSDimitry Andric #endif 681c70af487SAlan Cox 6825df87b21SJeff Roberson /* 683c70af487SAlan Cox * Initialize the kernel memory (kmem) arena. 6845df87b21SJeff Roberson */ 6855df87b21SJeff Roberson void 6865df87b21SJeff Roberson kmeminit(void) 6875df87b21SJeff Roberson { 688af3b2549SHans Petter Selasky u_long mem_size; 689af3b2549SHans Petter Selasky u_long tmp; 69069ef67f9SJason Evans 691af3b2549SHans Petter Selasky #ifdef VM_KMEM_SIZE 692af3b2549SHans Petter Selasky if (vm_kmem_size == 0) 693af3b2549SHans Petter Selasky vm_kmem_size = VM_KMEM_SIZE; 694af3b2549SHans Petter Selasky #endif 695af3b2549SHans Petter Selasky #ifdef VM_KMEM_SIZE_MIN 696af3b2549SHans Petter Selasky if (vm_kmem_size_min == 0) 697af3b2549SHans Petter Selasky vm_kmem_size_min = VM_KMEM_SIZE_MIN; 698af3b2549SHans Petter Selasky #endif 699af3b2549SHans Petter Selasky #ifdef VM_KMEM_SIZE_MAX 700af3b2549SHans Petter Selasky if (vm_kmem_size_max == 0) 701af3b2549SHans Petter Selasky vm_kmem_size_max = VM_KMEM_SIZE_MAX; 702af3b2549SHans Petter Selasky #endif 7038a58a9f6SJohn Dyson /* 704c70af487SAlan Cox * Calculate the amount of kernel virtual address (KVA) space that is 705c70af487SAlan Cox * preallocated to the kmem arena. In order to support a wide range 706c70af487SAlan Cox * of machines, it is a function of the physical memory size, 707c70af487SAlan Cox * specifically, 7088a58a9f6SJohn Dyson * 709c70af487SAlan Cox * min(max(physical memory size / VM_KMEM_SIZE_SCALE, 710c70af487SAlan Cox * VM_KMEM_SIZE_MIN), VM_KMEM_SIZE_MAX) 711c70af487SAlan Cox * 712c70af487SAlan Cox * Every architecture must define an integral value for 713c70af487SAlan Cox * VM_KMEM_SIZE_SCALE. However, the definitions of VM_KMEM_SIZE_MIN 714c70af487SAlan Cox * and VM_KMEM_SIZE_MAX, which represent respectively the floor and 715c70af487SAlan Cox * ceiling on this preallocation, are optional. Typically, 716c70af487SAlan Cox * VM_KMEM_SIZE_MAX is itself a function of the available KVA space on 717c70af487SAlan Cox * a given architecture. 7188a58a9f6SJohn Dyson */ 71944f1c916SBryan Drewery mem_size = vm_cnt.v_page_count; 7207c51714eSSean Bruno if (mem_size <= 32768) /* delphij XXX 128MB */ 7217c51714eSSean Bruno kmem_zmax = PAGE_SIZE; 7228a58a9f6SJohn Dyson 723c70af487SAlan Cox if (vm_kmem_size_scale < 1) 724c70af487SAlan Cox vm_kmem_size_scale = VM_KMEM_SIZE_SCALE; 725c70af487SAlan Cox 726af3b2549SHans Petter Selasky /* 727af3b2549SHans Petter Selasky * Check if we should use defaults for the "vm_kmem_size" 728af3b2549SHans Petter Selasky * variable: 729af3b2549SHans Petter Selasky */ 730af3b2549SHans Petter Selasky if (vm_kmem_size == 0) { 731479439b4SDag-Erling Smørgrav vm_kmem_size = (mem_size / vm_kmem_size_scale) * PAGE_SIZE; 7328a58a9f6SJohn Dyson 733c70af487SAlan Cox if (vm_kmem_size_min > 0 && vm_kmem_size < vm_kmem_size_min) 7340e5179e4SStephane E. Potvin vm_kmem_size = vm_kmem_size_min; 735479439b4SDag-Erling Smørgrav if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max) 736479439b4SDag-Erling Smørgrav vm_kmem_size = vm_kmem_size_max; 737af3b2549SHans Petter Selasky } 7388a58a9f6SJohn Dyson 73927b8623fSDavid Greenman /* 740af3b2549SHans Petter Selasky * The amount of KVA space that is preallocated to the 741c70af487SAlan Cox * kmem arena can be set statically at compile-time or manually 742c70af487SAlan Cox * through the kernel environment. However, it is still limited to 743c70af487SAlan Cox * twice the physical memory size, which has been sufficient to handle 744c70af487SAlan Cox * the most severe cases of external fragmentation in the kmem arena. 74527b8623fSDavid Greenman */ 746c749c003SAlan Cox if (vm_kmem_size / 2 / PAGE_SIZE > mem_size) 747c749c003SAlan Cox vm_kmem_size = 2 * mem_size * PAGE_SIZE; 7488a58a9f6SJohn Dyson 749e137643eSOlivier Houchard vm_kmem_size = round_page(vm_kmem_size); 750e3813573SMatthew D Fleming #ifdef DEBUG_MEMGUARD 751f806cdcfSMatthew D Fleming tmp = memguard_fudge(vm_kmem_size, kernel_map); 752e3813573SMatthew D Fleming #else 753e3813573SMatthew D Fleming tmp = vm_kmem_size; 754e3813573SMatthew D Fleming #endif 755*2e47807cSJeff Roberson uma_set_limit(tmp); 7568355f576SJeff Roberson 757e4eb384bSBosko Milekic #ifdef DEBUG_MEMGUARD 758e4eb384bSBosko Milekic /* 759e4eb384bSBosko Milekic * Initialize MemGuard if support compiled in. MemGuard is a 760e4eb384bSBosko Milekic * replacement allocator used for detecting tamper-after-free 761e4eb384bSBosko Milekic * scenarios as they occur. It is only used for debugging. 762e4eb384bSBosko Milekic */ 763*2e47807cSJeff Roberson memguard_init(kernel_arena); 764e4eb384bSBosko Milekic #endif 7655df87b21SJeff Roberson } 7665df87b21SJeff Roberson 7675df87b21SJeff Roberson /* 7685df87b21SJeff Roberson * Initialize the kernel memory allocator 7695df87b21SJeff Roberson */ 7705df87b21SJeff Roberson /* ARGSUSED*/ 7715df87b21SJeff Roberson static void 7725df87b21SJeff Roberson mallocinit(void *dummy) 7735df87b21SJeff Roberson { 7745df87b21SJeff Roberson int i; 7755df87b21SJeff Roberson uint8_t indx; 7765df87b21SJeff Roberson 7775df87b21SJeff Roberson mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF); 7785df87b21SJeff Roberson 7795df87b21SJeff Roberson kmeminit(); 780e4eb384bSBosko Milekic 78199571dc3SJeff Roberson uma_startup2(); 7828355f576SJeff Roberson 7837001d850SXin LI if (kmem_zmax < PAGE_SIZE || kmem_zmax > KMEM_ZMAX) 7847001d850SXin LI kmem_zmax = KMEM_ZMAX; 7857001d850SXin LI 78663a7e0a3SRobert Watson mt_zone = uma_zcreate("mt_zone", sizeof(struct malloc_type_internal), 78763a7e0a3SRobert Watson #ifdef INVARIANTS 78863a7e0a3SRobert Watson mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini, 78963a7e0a3SRobert Watson #else 79063a7e0a3SRobert Watson NULL, NULL, NULL, NULL, 79163a7e0a3SRobert Watson #endif 79263a7e0a3SRobert Watson UMA_ALIGN_PTR, UMA_ZONE_MALLOC); 7936f267175SJeff Roberson for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) { 7946f267175SJeff Roberson int size = kmemzones[indx].kz_size; 7956f267175SJeff Roberson char *name = kmemzones[indx].kz_name; 796d7854da1SMatthew D Fleming int subzone; 7978355f576SJeff Roberson 798d7854da1SMatthew D Fleming for (subzone = 0; subzone < numzones; subzone++) { 799d7854da1SMatthew D Fleming kmemzones[indx].kz_zone[subzone] = 800d7854da1SMatthew D Fleming uma_zcreate(name, size, 8018efc4effSJeff Roberson #ifdef INVARIANTS 8028f70816cSJeff Roberson mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini, 8038efc4effSJeff Roberson #else 8048efc4effSJeff Roberson NULL, NULL, NULL, NULL, 8058efc4effSJeff Roberson #endif 8068efc4effSJeff Roberson UMA_ALIGN_PTR, UMA_ZONE_MALLOC); 807d7854da1SMatthew D Fleming } 8088355f576SJeff Roberson for (;i <= size; i+= KMEM_ZBASE) 8096f267175SJeff Roberson kmemsize[i >> KMEM_ZSHIFT] = indx; 8108355f576SJeff Roberson 811df8bae1dSRodney W. Grimes } 812254c6cb3SPoul-Henning Kamp } 813af3b2549SHans Petter Selasky SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_SECOND, mallocinit, NULL); 814254c6cb3SPoul-Henning Kamp 815db669378SPeter Wemm void 81687efd4d5SRobert Watson malloc_init(void *data) 817254c6cb3SPoul-Henning Kamp { 81863a7e0a3SRobert Watson struct malloc_type_internal *mtip; 81963a7e0a3SRobert Watson struct malloc_type *mtp; 82063a7e0a3SRobert Watson 82144f1c916SBryan Drewery KASSERT(vm_cnt.v_page_count != 0, ("malloc_register before vm_init")); 82263a7e0a3SRobert Watson 82363a7e0a3SRobert Watson mtp = data; 824f121baaaSBrian Somers if (mtp->ks_magic != M_MAGIC) 825f121baaaSBrian Somers panic("malloc_init: bad malloc type magic"); 826bb1c7df8SRobert Watson 82763a7e0a3SRobert Watson mtip = uma_zalloc(mt_zone, M_WAITOK | M_ZERO); 82863a7e0a3SRobert Watson mtp->ks_handle = mtip; 829d7854da1SMatthew D Fleming mtip->mti_zone = mtp_get_subzone(mtp->ks_shortdesc); 830254c6cb3SPoul-Henning Kamp 8316f267175SJeff Roberson mtx_lock(&malloc_mtx); 83263a7e0a3SRobert Watson mtp->ks_next = kmemstatistics; 83363a7e0a3SRobert Watson kmemstatistics = mtp; 834cd814b26SRobert Watson kmemcount++; 8356f267175SJeff Roberson mtx_unlock(&malloc_mtx); 836df8bae1dSRodney W. Grimes } 837db669378SPeter Wemm 838db669378SPeter Wemm void 83987efd4d5SRobert Watson malloc_uninit(void *data) 840db669378SPeter Wemm { 84163a7e0a3SRobert Watson struct malloc_type_internal *mtip; 8422a143d5bSPawel Jakub Dawidek struct malloc_type_stats *mtsp; 84363a7e0a3SRobert Watson struct malloc_type *mtp, *temp; 84445d48bdaSPaul Saab uma_slab_t slab; 8452a143d5bSPawel Jakub Dawidek long temp_allocs, temp_bytes; 8462a143d5bSPawel Jakub Dawidek int i; 847db669378SPeter Wemm 84863a7e0a3SRobert Watson mtp = data; 849bb1c7df8SRobert Watson KASSERT(mtp->ks_magic == M_MAGIC, 850bb1c7df8SRobert Watson ("malloc_uninit: bad malloc type magic")); 85163a7e0a3SRobert Watson KASSERT(mtp->ks_handle != NULL, ("malloc_deregister: cookie NULL")); 852bb1c7df8SRobert Watson 8536f267175SJeff Roberson mtx_lock(&malloc_mtx); 85463a7e0a3SRobert Watson mtip = mtp->ks_handle; 85563a7e0a3SRobert Watson mtp->ks_handle = NULL; 85663a7e0a3SRobert Watson if (mtp != kmemstatistics) { 85763a7e0a3SRobert Watson for (temp = kmemstatistics; temp != NULL; 85863a7e0a3SRobert Watson temp = temp->ks_next) { 859f121baaaSBrian Somers if (temp->ks_next == mtp) { 86063a7e0a3SRobert Watson temp->ks_next = mtp->ks_next; 861f121baaaSBrian Somers break; 862db669378SPeter Wemm } 863f121baaaSBrian Somers } 864f121baaaSBrian Somers KASSERT(temp, 865f121baaaSBrian Somers ("malloc_uninit: type '%s' not found", mtp->ks_shortdesc)); 86663a7e0a3SRobert Watson } else 86763a7e0a3SRobert Watson kmemstatistics = mtp->ks_next; 868cd814b26SRobert Watson kmemcount--; 8696f267175SJeff Roberson mtx_unlock(&malloc_mtx); 8702a143d5bSPawel Jakub Dawidek 8712a143d5bSPawel Jakub Dawidek /* 8722a143d5bSPawel Jakub Dawidek * Look for memory leaks. 8732a143d5bSPawel Jakub Dawidek */ 8742a143d5bSPawel Jakub Dawidek temp_allocs = temp_bytes = 0; 8752a143d5bSPawel Jakub Dawidek for (i = 0; i < MAXCPU; i++) { 8762a143d5bSPawel Jakub Dawidek mtsp = &mtip->mti_stats[i]; 8772a143d5bSPawel Jakub Dawidek temp_allocs += mtsp->mts_numallocs; 8782a143d5bSPawel Jakub Dawidek temp_allocs -= mtsp->mts_numfrees; 8792a143d5bSPawel Jakub Dawidek temp_bytes += mtsp->mts_memalloced; 8802a143d5bSPawel Jakub Dawidek temp_bytes -= mtsp->mts_memfreed; 8812a143d5bSPawel Jakub Dawidek } 8822a143d5bSPawel Jakub Dawidek if (temp_allocs > 0 || temp_bytes > 0) { 8832a143d5bSPawel Jakub Dawidek printf("Warning: memory type %s leaked memory on destroy " 8842a143d5bSPawel Jakub Dawidek "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc, 8852a143d5bSPawel Jakub Dawidek temp_allocs, temp_bytes); 8862a143d5bSPawel Jakub Dawidek } 8872a143d5bSPawel Jakub Dawidek 88845d48bdaSPaul Saab slab = vtoslab((vm_offset_t) mtip & (~UMA_SLAB_MASK)); 88945d48bdaSPaul Saab uma_zfree_arg(mt_zone, mtip, slab); 890db669378SPeter Wemm } 8916f267175SJeff Roberson 892d362c40dSPawel Jakub Dawidek struct malloc_type * 893d362c40dSPawel Jakub Dawidek malloc_desc2type(const char *desc) 894d362c40dSPawel Jakub Dawidek { 895d362c40dSPawel Jakub Dawidek struct malloc_type *mtp; 896d362c40dSPawel Jakub Dawidek 897d362c40dSPawel Jakub Dawidek mtx_assert(&malloc_mtx, MA_OWNED); 898d362c40dSPawel Jakub Dawidek for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 899d362c40dSPawel Jakub Dawidek if (strcmp(mtp->ks_shortdesc, desc) == 0) 900d362c40dSPawel Jakub Dawidek return (mtp); 901d362c40dSPawel Jakub Dawidek } 902d362c40dSPawel Jakub Dawidek return (NULL); 903d362c40dSPawel Jakub Dawidek } 904d362c40dSPawel Jakub Dawidek 9056f267175SJeff Roberson static int 906cd814b26SRobert Watson sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS) 907cd814b26SRobert Watson { 908cd814b26SRobert Watson struct malloc_type_stream_header mtsh; 909cd814b26SRobert Watson struct malloc_type_internal *mtip; 910cd814b26SRobert Watson struct malloc_type_header mth; 911cd814b26SRobert Watson struct malloc_type *mtp; 9124e657159SMatthew D Fleming int error, i; 913cd814b26SRobert Watson struct sbuf sbuf; 914cd814b26SRobert Watson 91500f0e671SMatthew D Fleming error = sysctl_wire_old_buffer(req, 0); 91600f0e671SMatthew D Fleming if (error != 0) 91700f0e671SMatthew D Fleming return (error); 9184e657159SMatthew D Fleming sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 9191eafc078SIan Lepore sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL); 920cd814b26SRobert Watson mtx_lock(&malloc_mtx); 921cd814b26SRobert Watson 922cd814b26SRobert Watson /* 923cd814b26SRobert Watson * Insert stream header. 924cd814b26SRobert Watson */ 925cd814b26SRobert Watson bzero(&mtsh, sizeof(mtsh)); 926cd814b26SRobert Watson mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION; 927cd814b26SRobert Watson mtsh.mtsh_maxcpus = MAXCPU; 928cd814b26SRobert Watson mtsh.mtsh_count = kmemcount; 9294e657159SMatthew D Fleming (void)sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh)); 930cd814b26SRobert Watson 931cd814b26SRobert Watson /* 932cd814b26SRobert Watson * Insert alternating sequence of type headers and type statistics. 933cd814b26SRobert Watson */ 934cd814b26SRobert Watson for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 935cd814b26SRobert Watson mtip = (struct malloc_type_internal *)mtp->ks_handle; 936cd814b26SRobert Watson 937cd814b26SRobert Watson /* 938cd814b26SRobert Watson * Insert type header. 939cd814b26SRobert Watson */ 940cd814b26SRobert Watson bzero(&mth, sizeof(mth)); 941cd814b26SRobert Watson strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME); 9424e657159SMatthew D Fleming (void)sbuf_bcat(&sbuf, &mth, sizeof(mth)); 943cd814b26SRobert Watson 944cd814b26SRobert Watson /* 945cd814b26SRobert Watson * Insert type statistics for each CPU. 946cd814b26SRobert Watson */ 947cd814b26SRobert Watson for (i = 0; i < MAXCPU; i++) { 9484e657159SMatthew D Fleming (void)sbuf_bcat(&sbuf, &mtip->mti_stats[i], 9494e657159SMatthew D Fleming sizeof(mtip->mti_stats[i])); 950cd814b26SRobert Watson } 951cd814b26SRobert Watson } 952cd814b26SRobert Watson mtx_unlock(&malloc_mtx); 9534e657159SMatthew D Fleming error = sbuf_finish(&sbuf); 954cd814b26SRobert Watson sbuf_delete(&sbuf); 955cd814b26SRobert Watson return (error); 956cd814b26SRobert Watson } 957cd814b26SRobert Watson 958cd814b26SRobert Watson SYSCTL_PROC(_kern, OID_AUTO, malloc_stats, CTLFLAG_RD|CTLTYPE_STRUCT, 959cd814b26SRobert Watson 0, 0, sysctl_kern_malloc_stats, "s,malloc_type_ustats", 960cd814b26SRobert Watson "Return malloc types"); 961cd814b26SRobert Watson 962cd814b26SRobert Watson SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0, 963cd814b26SRobert Watson "Count of kernel malloc types"); 964cd814b26SRobert Watson 96591dd776cSJohn Birrell void 96691dd776cSJohn Birrell malloc_type_list(malloc_type_list_func_t *func, void *arg) 96791dd776cSJohn Birrell { 96891dd776cSJohn Birrell struct malloc_type *mtp, **bufmtp; 96991dd776cSJohn Birrell int count, i; 97091dd776cSJohn Birrell size_t buflen; 97191dd776cSJohn Birrell 97291dd776cSJohn Birrell mtx_lock(&malloc_mtx); 97391dd776cSJohn Birrell restart: 97491dd776cSJohn Birrell mtx_assert(&malloc_mtx, MA_OWNED); 97591dd776cSJohn Birrell count = kmemcount; 97691dd776cSJohn Birrell mtx_unlock(&malloc_mtx); 97791dd776cSJohn Birrell 97891dd776cSJohn Birrell buflen = sizeof(struct malloc_type *) * count; 97991dd776cSJohn Birrell bufmtp = malloc(buflen, M_TEMP, M_WAITOK); 98091dd776cSJohn Birrell 98191dd776cSJohn Birrell mtx_lock(&malloc_mtx); 98291dd776cSJohn Birrell 98391dd776cSJohn Birrell if (count < kmemcount) { 98491dd776cSJohn Birrell free(bufmtp, M_TEMP); 98591dd776cSJohn Birrell goto restart; 98691dd776cSJohn Birrell } 98791dd776cSJohn Birrell 98891dd776cSJohn Birrell for (mtp = kmemstatistics, i = 0; mtp != NULL; mtp = mtp->ks_next, i++) 98991dd776cSJohn Birrell bufmtp[i] = mtp; 99091dd776cSJohn Birrell 99191dd776cSJohn Birrell mtx_unlock(&malloc_mtx); 99291dd776cSJohn Birrell 99391dd776cSJohn Birrell for (i = 0; i < count; i++) 99491dd776cSJohn Birrell (func)(bufmtp[i], arg); 99591dd776cSJohn Birrell 99691dd776cSJohn Birrell free(bufmtp, M_TEMP); 99791dd776cSJohn Birrell } 99891dd776cSJohn Birrell 999909ed16cSRobert Watson #ifdef DDB 1000909ed16cSRobert Watson DB_SHOW_COMMAND(malloc, db_show_malloc) 1001909ed16cSRobert Watson { 1002909ed16cSRobert Watson struct malloc_type_internal *mtip; 1003909ed16cSRobert Watson struct malloc_type *mtp; 100460ae52f7SEd Schouten uint64_t allocs, frees; 100560ae52f7SEd Schouten uint64_t alloced, freed; 1006909ed16cSRobert Watson int i; 1007909ed16cSRobert Watson 100824076d13SRobert Watson db_printf("%18s %12s %12s %12s\n", "Type", "InUse", "MemUse", 100924076d13SRobert Watson "Requests"); 1010909ed16cSRobert Watson for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 1011909ed16cSRobert Watson mtip = (struct malloc_type_internal *)mtp->ks_handle; 1012909ed16cSRobert Watson allocs = 0; 1013909ed16cSRobert Watson frees = 0; 101424076d13SRobert Watson alloced = 0; 101524076d13SRobert Watson freed = 0; 1016909ed16cSRobert Watson for (i = 0; i < MAXCPU; i++) { 1017909ed16cSRobert Watson allocs += mtip->mti_stats[i].mts_numallocs; 1018909ed16cSRobert Watson frees += mtip->mti_stats[i].mts_numfrees; 101924076d13SRobert Watson alloced += mtip->mti_stats[i].mts_memalloced; 102024076d13SRobert Watson freed += mtip->mti_stats[i].mts_memfreed; 1021909ed16cSRobert Watson } 102224076d13SRobert Watson db_printf("%18s %12ju %12juK %12ju\n", 102324076d13SRobert Watson mtp->ks_shortdesc, allocs - frees, 102424076d13SRobert Watson (alloced - freed + 1023) / 1024, allocs); 1025687c94aaSJohn Baldwin if (db_pager_quit) 1026687c94aaSJohn Baldwin break; 1027909ed16cSRobert Watson } 1028909ed16cSRobert Watson } 1029d7854da1SMatthew D Fleming 1030d7854da1SMatthew D Fleming #if MALLOC_DEBUG_MAXZONES > 1 1031d7854da1SMatthew D Fleming DB_SHOW_COMMAND(multizone_matches, db_show_multizone_matches) 1032d7854da1SMatthew D Fleming { 1033d7854da1SMatthew D Fleming struct malloc_type_internal *mtip; 1034d7854da1SMatthew D Fleming struct malloc_type *mtp; 1035d7854da1SMatthew D Fleming u_int subzone; 1036d7854da1SMatthew D Fleming 1037d7854da1SMatthew D Fleming if (!have_addr) { 1038d7854da1SMatthew D Fleming db_printf("Usage: show multizone_matches <malloc type/addr>\n"); 1039d7854da1SMatthew D Fleming return; 1040d7854da1SMatthew D Fleming } 1041d7854da1SMatthew D Fleming mtp = (void *)addr; 1042d7854da1SMatthew D Fleming if (mtp->ks_magic != M_MAGIC) { 1043d7854da1SMatthew D Fleming db_printf("Magic %lx does not match expected %x\n", 1044d7854da1SMatthew D Fleming mtp->ks_magic, M_MAGIC); 1045d7854da1SMatthew D Fleming return; 1046d7854da1SMatthew D Fleming } 1047d7854da1SMatthew D Fleming 1048d7854da1SMatthew D Fleming mtip = mtp->ks_handle; 1049d7854da1SMatthew D Fleming subzone = mtip->mti_zone; 1050d7854da1SMatthew D Fleming 1051d7854da1SMatthew D Fleming for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 1052d7854da1SMatthew D Fleming mtip = mtp->ks_handle; 1053d7854da1SMatthew D Fleming if (mtip->mti_zone != subzone) 1054d7854da1SMatthew D Fleming continue; 1055d7854da1SMatthew D Fleming db_printf("%s\n", mtp->ks_shortdesc); 1056687c94aaSJohn Baldwin if (db_pager_quit) 1057687c94aaSJohn Baldwin break; 1058d7854da1SMatthew D Fleming } 1059d7854da1SMatthew D Fleming } 1060d7854da1SMatthew D Fleming #endif /* MALLOC_DEBUG_MAXZONES > 1 */ 1061d7854da1SMatthew D Fleming #endif /* DDB */ 1062909ed16cSRobert Watson 10635e914b96SJeff Roberson #ifdef MALLOC_PROFILE 10645e914b96SJeff Roberson 10655e914b96SJeff Roberson static int 10665e914b96SJeff Roberson sysctl_kern_mprof(SYSCTL_HANDLER_ARGS) 10675e914b96SJeff Roberson { 106863a7e0a3SRobert Watson struct sbuf sbuf; 10695e914b96SJeff Roberson uint64_t count; 10705e914b96SJeff Roberson uint64_t waste; 10715e914b96SJeff Roberson uint64_t mem; 10725e914b96SJeff Roberson int error; 10735e914b96SJeff Roberson int rsize; 10745e914b96SJeff Roberson int size; 10755e914b96SJeff Roberson int i; 10765e914b96SJeff Roberson 10775e914b96SJeff Roberson waste = 0; 10785e914b96SJeff Roberson mem = 0; 10795e914b96SJeff Roberson 108000f0e671SMatthew D Fleming error = sysctl_wire_old_buffer(req, 0); 108100f0e671SMatthew D Fleming if (error != 0) 108200f0e671SMatthew D Fleming return (error); 10834e657159SMatthew D Fleming sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 108463a7e0a3SRobert Watson sbuf_printf(&sbuf, 10855e914b96SJeff Roberson "\n Size Requests Real Size\n"); 10865e914b96SJeff Roberson for (i = 0; i < KMEM_ZSIZE; i++) { 10875e914b96SJeff Roberson size = i << KMEM_ZSHIFT; 10885e914b96SJeff Roberson rsize = kmemzones[kmemsize[i]].kz_size; 10895e914b96SJeff Roberson count = (long long unsigned)krequests[i]; 10905e914b96SJeff Roberson 109163a7e0a3SRobert Watson sbuf_printf(&sbuf, "%6d%28llu%11d\n", size, 109263a7e0a3SRobert Watson (unsigned long long)count, rsize); 10935e914b96SJeff Roberson 10945e914b96SJeff Roberson if ((rsize * count) > (size * count)) 10955e914b96SJeff Roberson waste += (rsize * count) - (size * count); 10965e914b96SJeff Roberson mem += (rsize * count); 10975e914b96SJeff Roberson } 109863a7e0a3SRobert Watson sbuf_printf(&sbuf, 10995e914b96SJeff Roberson "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n", 11005e914b96SJeff Roberson (unsigned long long)mem, (unsigned long long)waste); 11014e657159SMatthew D Fleming error = sbuf_finish(&sbuf); 110263a7e0a3SRobert Watson sbuf_delete(&sbuf); 11035e914b96SJeff Roberson return (error); 11045e914b96SJeff Roberson } 11055e914b96SJeff Roberson 11065e914b96SJeff Roberson SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD, 11075e914b96SJeff Roberson NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling"); 11085e914b96SJeff Roberson #endif /* MALLOC_PROFILE */ 1109