1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1987, 1991, 1993 5 * The Regents of the University of California. 6 * Copyright (c) 2005-2009 Robert N. M. Watson 7 * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net> (mallocarray) 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94 35 */ 36 37 /* 38 * Kernel malloc(9) implementation -- general purpose kernel memory allocator 39 * based on memory types. Back end is implemented using the UMA(9) zone 40 * allocator. A set of fixed-size buckets are used for smaller allocations, 41 * and a special UMA allocation interface is used for larger allocations. 42 * Callers declare memory types, and statistics are maintained independently 43 * for each memory type. Statistics are maintained per-CPU for performance 44 * reasons. See malloc(9) and comments in malloc.h for a detailed 45 * description. 46 */ 47 48 #include <sys/cdefs.h> 49 __FBSDID("$FreeBSD$"); 50 51 #include "opt_ddb.h" 52 #include "opt_vm.h" 53 54 #include <sys/param.h> 55 #include <sys/systm.h> 56 #include <sys/kdb.h> 57 #include <sys/kernel.h> 58 #include <sys/lock.h> 59 #include <sys/malloc.h> 60 #include <sys/mutex.h> 61 #include <sys/vmmeter.h> 62 #include <sys/proc.h> 63 #include <sys/queue.h> 64 #include <sys/sbuf.h> 65 #include <sys/smp.h> 66 #include <sys/sysctl.h> 67 #include <sys/time.h> 68 #include <sys/vmem.h> 69 #ifdef EPOCH_TRACE 70 #include <sys/epoch.h> 71 #endif 72 73 #include <vm/vm.h> 74 #include <vm/pmap.h> 75 #include <vm/vm_domainset.h> 76 #include <vm/vm_pageout.h> 77 #include <vm/vm_param.h> 78 #include <vm/vm_kern.h> 79 #include <vm/vm_extern.h> 80 #include <vm/vm_map.h> 81 #include <vm/vm_page.h> 82 #include <vm/vm_phys.h> 83 #include <vm/vm_pagequeue.h> 84 #include <vm/uma.h> 85 #include <vm/uma_int.h> 86 #include <vm/uma_dbg.h> 87 88 #ifdef DEBUG_MEMGUARD 89 #include <vm/memguard.h> 90 #endif 91 #ifdef DEBUG_REDZONE 92 #include <vm/redzone.h> 93 #endif 94 95 #if defined(INVARIANTS) && defined(__i386__) 96 #include <machine/cpu.h> 97 #endif 98 99 #include <ddb/ddb.h> 100 101 #ifdef KDTRACE_HOOKS 102 #include <sys/dtrace_bsd.h> 103 104 bool __read_frequently dtrace_malloc_enabled; 105 dtrace_malloc_probe_func_t __read_mostly dtrace_malloc_probe; 106 #endif 107 108 #if defined(INVARIANTS) || defined(MALLOC_MAKE_FAILURES) || \ 109 defined(DEBUG_MEMGUARD) || defined(DEBUG_REDZONE) 110 #define MALLOC_DEBUG 1 111 #endif 112 113 /* 114 * When realloc() is called, if the new size is sufficiently smaller than 115 * the old size, realloc() will allocate a new, smaller block to avoid 116 * wasting memory. 'Sufficiently smaller' is defined as: newsize <= 117 * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'. 118 */ 119 #ifndef REALLOC_FRACTION 120 #define REALLOC_FRACTION 1 /* new block if <= half the size */ 121 #endif 122 123 /* 124 * Centrally define some common malloc types. 125 */ 126 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 127 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 128 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 129 130 static struct malloc_type *kmemstatistics; 131 static int kmemcount; 132 133 #define KMEM_ZSHIFT 4 134 #define KMEM_ZBASE 16 135 #define KMEM_ZMASK (KMEM_ZBASE - 1) 136 137 #define KMEM_ZMAX 65536 138 #define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT) 139 static uint8_t kmemsize[KMEM_ZSIZE + 1]; 140 141 #ifndef MALLOC_DEBUG_MAXZONES 142 #define MALLOC_DEBUG_MAXZONES 1 143 #endif 144 static int numzones = MALLOC_DEBUG_MAXZONES; 145 146 /* 147 * Small malloc(9) memory allocations are allocated from a set of UMA buckets 148 * of various sizes. 149 * 150 * Warning: the layout of the struct is duplicated in libmemstat for KVM support. 151 * 152 * XXX: The comment here used to read "These won't be powers of two for 153 * long." It's possible that a significant amount of wasted memory could be 154 * recovered by tuning the sizes of these buckets. 155 */ 156 struct { 157 int kz_size; 158 const char *kz_name; 159 uma_zone_t kz_zone[MALLOC_DEBUG_MAXZONES]; 160 } kmemzones[] = { 161 {16, "malloc-16", }, 162 {32, "malloc-32", }, 163 {64, "malloc-64", }, 164 {128, "malloc-128", }, 165 {256, "malloc-256", }, 166 {384, "malloc-384", }, 167 {512, "malloc-512", }, 168 {1024, "malloc-1024", }, 169 {2048, "malloc-2048", }, 170 {4096, "malloc-4096", }, 171 {8192, "malloc-8192", }, 172 {16384, "malloc-16384", }, 173 {32768, "malloc-32768", }, 174 {65536, "malloc-65536", }, 175 {0, NULL}, 176 }; 177 178 u_long vm_kmem_size; 179 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size, CTLFLAG_RDTUN, &vm_kmem_size, 0, 180 "Size of kernel memory"); 181 182 static u_long kmem_zmax = KMEM_ZMAX; 183 SYSCTL_ULONG(_vm, OID_AUTO, kmem_zmax, CTLFLAG_RDTUN, &kmem_zmax, 0, 184 "Maximum allocation size that malloc(9) would use UMA as backend"); 185 186 static u_long vm_kmem_size_min; 187 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_min, CTLFLAG_RDTUN, &vm_kmem_size_min, 0, 188 "Minimum size of kernel memory"); 189 190 static u_long vm_kmem_size_max; 191 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RDTUN, &vm_kmem_size_max, 0, 192 "Maximum size of kernel memory"); 193 194 static u_int vm_kmem_size_scale; 195 SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale, 0, 196 "Scale factor for kernel memory size"); 197 198 static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS); 199 SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size, 200 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0, 201 sysctl_kmem_map_size, "LU", "Current kmem allocation size"); 202 203 static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS); 204 SYSCTL_PROC(_vm, OID_AUTO, kmem_map_free, 205 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0, 206 sysctl_kmem_map_free, "LU", "Free space in kmem"); 207 208 static SYSCTL_NODE(_vm, OID_AUTO, malloc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 209 "Malloc information"); 210 211 static u_int vm_malloc_zone_count = nitems(kmemzones); 212 SYSCTL_UINT(_vm_malloc, OID_AUTO, zone_count, 213 CTLFLAG_RD, &vm_malloc_zone_count, 0, 214 "Number of malloc zones"); 215 216 static int sysctl_vm_malloc_zone_sizes(SYSCTL_HANDLER_ARGS); 217 SYSCTL_PROC(_vm_malloc, OID_AUTO, zone_sizes, 218 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, NULL, 0, 219 sysctl_vm_malloc_zone_sizes, "S", "Zone sizes used by malloc"); 220 221 /* 222 * The malloc_mtx protects the kmemstatistics linked list. 223 */ 224 struct mtx malloc_mtx; 225 226 static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS); 227 228 #if defined(MALLOC_MAKE_FAILURES) || (MALLOC_DEBUG_MAXZONES > 1) 229 static SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 230 "Kernel malloc debugging options"); 231 #endif 232 233 /* 234 * malloc(9) fault injection -- cause malloc failures every (n) mallocs when 235 * the caller specifies M_NOWAIT. If set to 0, no failures are caused. 236 */ 237 #ifdef MALLOC_MAKE_FAILURES 238 static int malloc_failure_rate; 239 static int malloc_nowait_count; 240 static int malloc_failure_count; 241 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RWTUN, 242 &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail"); 243 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD, 244 &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures"); 245 #endif 246 247 static int 248 sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS) 249 { 250 u_long size; 251 252 size = uma_size(); 253 return (sysctl_handle_long(oidp, &size, 0, req)); 254 } 255 256 static int 257 sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS) 258 { 259 u_long size, limit; 260 261 /* The sysctl is unsigned, implement as a saturation value. */ 262 size = uma_size(); 263 limit = uma_limit(); 264 if (size > limit) 265 size = 0; 266 else 267 size = limit - size; 268 return (sysctl_handle_long(oidp, &size, 0, req)); 269 } 270 271 static int 272 sysctl_vm_malloc_zone_sizes(SYSCTL_HANDLER_ARGS) 273 { 274 int sizes[nitems(kmemzones)]; 275 int i; 276 277 for (i = 0; i < nitems(kmemzones); i++) { 278 sizes[i] = kmemzones[i].kz_size; 279 } 280 281 return (SYSCTL_OUT(req, &sizes, sizeof(sizes))); 282 } 283 284 /* 285 * malloc(9) uma zone separation -- sub-page buffer overruns in one 286 * malloc type will affect only a subset of other malloc types. 287 */ 288 #if MALLOC_DEBUG_MAXZONES > 1 289 static void 290 tunable_set_numzones(void) 291 { 292 293 TUNABLE_INT_FETCH("debug.malloc.numzones", 294 &numzones); 295 296 /* Sanity check the number of malloc uma zones. */ 297 if (numzones <= 0) 298 numzones = 1; 299 if (numzones > MALLOC_DEBUG_MAXZONES) 300 numzones = MALLOC_DEBUG_MAXZONES; 301 } 302 SYSINIT(numzones, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_set_numzones, NULL); 303 SYSCTL_INT(_debug_malloc, OID_AUTO, numzones, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 304 &numzones, 0, "Number of malloc uma subzones"); 305 306 /* 307 * Any number that changes regularly is an okay choice for the 308 * offset. Build numbers are pretty good of you have them. 309 */ 310 static u_int zone_offset = __FreeBSD_version; 311 TUNABLE_INT("debug.malloc.zone_offset", &zone_offset); 312 SYSCTL_UINT(_debug_malloc, OID_AUTO, zone_offset, CTLFLAG_RDTUN, 313 &zone_offset, 0, "Separate malloc types by examining the " 314 "Nth character in the malloc type short description."); 315 316 static void 317 mtp_set_subzone(struct malloc_type *mtp) 318 { 319 struct malloc_type_internal *mtip; 320 const char *desc; 321 size_t len; 322 u_int val; 323 324 mtip = &mtp->ks_mti; 325 desc = mtp->ks_shortdesc; 326 if (desc == NULL || (len = strlen(desc)) == 0) 327 val = 0; 328 else 329 val = desc[zone_offset % len]; 330 mtip->mti_zone = (val % numzones); 331 } 332 333 static inline u_int 334 mtp_get_subzone(struct malloc_type *mtp) 335 { 336 struct malloc_type_internal *mtip; 337 338 mtip = &mtp->ks_mti; 339 340 KASSERT(mtip->mti_zone < numzones, 341 ("mti_zone %u out of range %d", 342 mtip->mti_zone, numzones)); 343 return (mtip->mti_zone); 344 } 345 #elif MALLOC_DEBUG_MAXZONES == 0 346 #error "MALLOC_DEBUG_MAXZONES must be positive." 347 #else 348 static void 349 mtp_set_subzone(struct malloc_type *mtp) 350 { 351 struct malloc_type_internal *mtip; 352 353 mtip = &mtp->ks_mti; 354 mtip->mti_zone = 0; 355 } 356 357 static inline u_int 358 mtp_get_subzone(struct malloc_type *mtp) 359 { 360 361 return (0); 362 } 363 #endif /* MALLOC_DEBUG_MAXZONES > 1 */ 364 365 /* 366 * An allocation has succeeded -- update malloc type statistics for the 367 * amount of bucket size. Occurs within a critical section so that the 368 * thread isn't preempted and doesn't migrate while updating per-PCU 369 * statistics. 370 */ 371 static void 372 malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size, 373 int zindx) 374 { 375 struct malloc_type_internal *mtip; 376 struct malloc_type_stats *mtsp; 377 378 critical_enter(); 379 mtip = &mtp->ks_mti; 380 mtsp = zpcpu_get(mtip->mti_stats); 381 if (size > 0) { 382 mtsp->mts_memalloced += size; 383 mtsp->mts_numallocs++; 384 } 385 if (zindx != -1) 386 mtsp->mts_size |= 1 << zindx; 387 388 #ifdef KDTRACE_HOOKS 389 if (__predict_false(dtrace_malloc_enabled)) { 390 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_MALLOC]; 391 if (probe_id != 0) 392 (dtrace_malloc_probe)(probe_id, 393 (uintptr_t) mtp, (uintptr_t) mtip, 394 (uintptr_t) mtsp, size, zindx); 395 } 396 #endif 397 398 critical_exit(); 399 } 400 401 void 402 malloc_type_allocated(struct malloc_type *mtp, unsigned long size) 403 { 404 405 if (size > 0) 406 malloc_type_zone_allocated(mtp, size, -1); 407 } 408 409 /* 410 * A free operation has occurred -- update malloc type statistics for the 411 * amount of the bucket size. Occurs within a critical section so that the 412 * thread isn't preempted and doesn't migrate while updating per-CPU 413 * statistics. 414 */ 415 void 416 malloc_type_freed(struct malloc_type *mtp, unsigned long size) 417 { 418 struct malloc_type_internal *mtip; 419 struct malloc_type_stats *mtsp; 420 421 critical_enter(); 422 mtip = &mtp->ks_mti; 423 mtsp = zpcpu_get(mtip->mti_stats); 424 mtsp->mts_memfreed += size; 425 mtsp->mts_numfrees++; 426 427 #ifdef KDTRACE_HOOKS 428 if (__predict_false(dtrace_malloc_enabled)) { 429 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_FREE]; 430 if (probe_id != 0) 431 (dtrace_malloc_probe)(probe_id, 432 (uintptr_t) mtp, (uintptr_t) mtip, 433 (uintptr_t) mtsp, size, 0); 434 } 435 #endif 436 437 critical_exit(); 438 } 439 440 /* 441 * contigmalloc: 442 * 443 * Allocate a block of physically contiguous memory. 444 * 445 * If M_NOWAIT is set, this routine will not block and return NULL if 446 * the allocation fails. 447 */ 448 void * 449 contigmalloc(unsigned long size, struct malloc_type *type, int flags, 450 vm_paddr_t low, vm_paddr_t high, unsigned long alignment, 451 vm_paddr_t boundary) 452 { 453 void *ret; 454 455 ret = (void *)kmem_alloc_contig(size, flags, low, high, alignment, 456 boundary, VM_MEMATTR_DEFAULT); 457 if (ret != NULL) 458 malloc_type_allocated(type, round_page(size)); 459 return (ret); 460 } 461 462 void * 463 contigmalloc_domainset(unsigned long size, struct malloc_type *type, 464 struct domainset *ds, int flags, vm_paddr_t low, vm_paddr_t high, 465 unsigned long alignment, vm_paddr_t boundary) 466 { 467 void *ret; 468 469 ret = (void *)kmem_alloc_contig_domainset(ds, size, flags, low, high, 470 alignment, boundary, VM_MEMATTR_DEFAULT); 471 if (ret != NULL) 472 malloc_type_allocated(type, round_page(size)); 473 return (ret); 474 } 475 476 /* 477 * contigfree: 478 * 479 * Free a block of memory allocated by contigmalloc. 480 * 481 * This routine may not block. 482 */ 483 void 484 contigfree(void *addr, unsigned long size, struct malloc_type *type) 485 { 486 487 kmem_free((vm_offset_t)addr, size); 488 malloc_type_freed(type, round_page(size)); 489 } 490 491 #ifdef MALLOC_DEBUG 492 static int 493 malloc_dbg(caddr_t *vap, size_t *sizep, struct malloc_type *mtp, 494 int flags) 495 { 496 #ifdef INVARIANTS 497 int indx; 498 499 KASSERT(mtp->ks_version == M_VERSION, ("malloc: bad malloc type version")); 500 /* 501 * Check that exactly one of M_WAITOK or M_NOWAIT is specified. 502 */ 503 indx = flags & (M_WAITOK | M_NOWAIT); 504 if (indx != M_NOWAIT && indx != M_WAITOK) { 505 static struct timeval lasterr; 506 static int curerr, once; 507 if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) { 508 printf("Bad malloc flags: %x\n", indx); 509 kdb_backtrace(); 510 flags |= M_WAITOK; 511 once++; 512 } 513 } 514 #endif 515 #ifdef MALLOC_MAKE_FAILURES 516 if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) { 517 atomic_add_int(&malloc_nowait_count, 1); 518 if ((malloc_nowait_count % malloc_failure_rate) == 0) { 519 atomic_add_int(&malloc_failure_count, 1); 520 *vap = NULL; 521 return (EJUSTRETURN); 522 } 523 } 524 #endif 525 if (flags & M_WAITOK) { 526 KASSERT(curthread->td_intr_nesting_level == 0, 527 ("malloc(M_WAITOK) in interrupt context")); 528 if (__predict_false(!THREAD_CAN_SLEEP())) { 529 #ifdef EPOCH_TRACE 530 epoch_trace_list(curthread); 531 #endif 532 KASSERT(1, 533 ("malloc(M_WAITOK) with sleeping prohibited")); 534 } 535 } 536 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 537 ("malloc: called with spinlock or critical section held")); 538 539 #ifdef DEBUG_MEMGUARD 540 if (memguard_cmp_mtp(mtp, *sizep)) { 541 *vap = memguard_alloc(*sizep, flags); 542 if (*vap != NULL) 543 return (EJUSTRETURN); 544 /* This is unfortunate but should not be fatal. */ 545 } 546 #endif 547 548 #ifdef DEBUG_REDZONE 549 *sizep = redzone_size_ntor(*sizep); 550 #endif 551 552 return (0); 553 } 554 #endif 555 556 /* 557 * Handle large allocations and frees by using kmem_malloc directly. 558 */ 559 static inline bool 560 malloc_large_slab(uma_slab_t slab) 561 { 562 uintptr_t va; 563 564 va = (uintptr_t)slab; 565 return ((va & 1) != 0); 566 } 567 568 static inline size_t 569 malloc_large_size(uma_slab_t slab) 570 { 571 uintptr_t va; 572 573 va = (uintptr_t)slab; 574 return (va >> 1); 575 } 576 577 static caddr_t 578 malloc_large(size_t *size, struct domainset *policy, int flags) 579 { 580 vm_offset_t va; 581 size_t sz; 582 583 sz = roundup(*size, PAGE_SIZE); 584 va = kmem_malloc_domainset(policy, sz, flags); 585 if (va != 0) { 586 /* The low bit is unused for slab pointers. */ 587 vsetzoneslab(va, NULL, (void *)((sz << 1) | 1)); 588 uma_total_inc(sz); 589 *size = sz; 590 } 591 return ((caddr_t)va); 592 } 593 594 static void 595 free_large(void *addr, size_t size) 596 { 597 598 kmem_free((vm_offset_t)addr, size); 599 uma_total_dec(size); 600 } 601 602 /* 603 * malloc: 604 * 605 * Allocate a block of memory. 606 * 607 * If M_NOWAIT is set, this routine will not block and return NULL if 608 * the allocation fails. 609 */ 610 void * 611 (malloc)(size_t size, struct malloc_type *mtp, int flags) 612 { 613 int indx; 614 caddr_t va; 615 uma_zone_t zone; 616 #if defined(DEBUG_REDZONE) 617 unsigned long osize = size; 618 #endif 619 620 MPASS((flags & M_EXEC) == 0); 621 #ifdef MALLOC_DEBUG 622 va = NULL; 623 if (malloc_dbg(&va, &size, mtp, flags) != 0) 624 return (va); 625 #endif 626 627 if (size <= kmem_zmax) { 628 if (size & KMEM_ZMASK) 629 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE; 630 indx = kmemsize[size >> KMEM_ZSHIFT]; 631 zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)]; 632 va = uma_zalloc(zone, flags); 633 if (va != NULL) 634 size = zone->uz_size; 635 malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx); 636 } else { 637 va = malloc_large(&size, DOMAINSET_RR(), flags); 638 malloc_type_allocated(mtp, va == NULL ? 0 : size); 639 } 640 if (__predict_false(va == NULL)) { 641 KASSERT((flags & M_WAITOK) == 0, 642 ("malloc(M_WAITOK) returned NULL")); 643 } 644 #ifdef DEBUG_REDZONE 645 if (va != NULL) 646 va = redzone_setup(va, osize); 647 #endif 648 return ((void *) va); 649 } 650 651 static void * 652 malloc_domain(size_t *sizep, int *indxp, struct malloc_type *mtp, int domain, 653 int flags) 654 { 655 uma_zone_t zone; 656 caddr_t va; 657 size_t size; 658 int indx; 659 660 size = *sizep; 661 KASSERT(size <= kmem_zmax && (flags & M_EXEC) == 0, 662 ("malloc_domain: Called with bad flag / size combination.")); 663 if (size & KMEM_ZMASK) 664 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE; 665 indx = kmemsize[size >> KMEM_ZSHIFT]; 666 zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)]; 667 va = uma_zalloc_domain(zone, NULL, domain, flags); 668 if (va != NULL) 669 *sizep = zone->uz_size; 670 *indxp = indx; 671 return ((void *)va); 672 } 673 674 void * 675 malloc_domainset(size_t size, struct malloc_type *mtp, struct domainset *ds, 676 int flags) 677 { 678 struct vm_domainset_iter di; 679 caddr_t va; 680 int domain; 681 int indx; 682 683 #if defined(DEBUG_REDZONE) 684 unsigned long osize = size; 685 #endif 686 MPASS((flags & M_EXEC) == 0); 687 #ifdef MALLOC_DEBUG 688 va = NULL; 689 if (malloc_dbg(&va, &size, mtp, flags) != 0) 690 return (va); 691 #endif 692 if (size <= kmem_zmax) { 693 vm_domainset_iter_policy_init(&di, ds, &domain, &flags); 694 do { 695 va = malloc_domain(&size, &indx, mtp, domain, flags); 696 } while (va == NULL && 697 vm_domainset_iter_policy(&di, &domain) == 0); 698 malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx); 699 } else { 700 /* Policy is handled by kmem. */ 701 va = malloc_large(&size, ds, flags); 702 malloc_type_allocated(mtp, va == NULL ? 0 : size); 703 } 704 if (__predict_false(va == NULL)) { 705 KASSERT((flags & M_WAITOK) == 0, 706 ("malloc(M_WAITOK) returned NULL")); 707 } 708 #ifdef DEBUG_REDZONE 709 if (va != NULL) 710 va = redzone_setup(va, osize); 711 #endif 712 return (va); 713 } 714 715 /* 716 * Allocate an executable area. 717 */ 718 void * 719 malloc_exec(size_t size, struct malloc_type *mtp, int flags) 720 { 721 caddr_t va; 722 #if defined(DEBUG_REDZONE) 723 unsigned long osize = size; 724 #endif 725 726 flags |= M_EXEC; 727 #ifdef MALLOC_DEBUG 728 va = NULL; 729 if (malloc_dbg(&va, &size, mtp, flags) != 0) 730 return (va); 731 #endif 732 va = malloc_large(&size, DOMAINSET_RR(), flags); 733 malloc_type_allocated(mtp, va == NULL ? 0 : size); 734 if (__predict_false(va == NULL)) { 735 KASSERT((flags & M_WAITOK) == 0, 736 ("malloc(M_WAITOK) returned NULL")); 737 } 738 #ifdef DEBUG_REDZONE 739 if (va != NULL) 740 va = redzone_setup(va, osize); 741 #endif 742 return ((void *) va); 743 } 744 745 void * 746 malloc_domainset_exec(size_t size, struct malloc_type *mtp, struct domainset *ds, 747 int flags) 748 { 749 caddr_t va; 750 #if defined(DEBUG_REDZONE) 751 unsigned long osize = size; 752 #endif 753 754 flags |= M_EXEC; 755 #ifdef MALLOC_DEBUG 756 va = NULL; 757 if (malloc_dbg(&va, &size, mtp, flags) != 0) 758 return (va); 759 #endif 760 /* Policy is handled by kmem. */ 761 va = malloc_large(&size, ds, flags); 762 malloc_type_allocated(mtp, va == NULL ? 0 : size); 763 if (__predict_false(va == NULL)) { 764 KASSERT((flags & M_WAITOK) == 0, 765 ("malloc(M_WAITOK) returned NULL")); 766 } 767 #ifdef DEBUG_REDZONE 768 if (va != NULL) 769 va = redzone_setup(va, osize); 770 #endif 771 return (va); 772 } 773 774 void * 775 mallocarray(size_t nmemb, size_t size, struct malloc_type *type, int flags) 776 { 777 778 if (WOULD_OVERFLOW(nmemb, size)) 779 panic("mallocarray: %zu * %zu overflowed", nmemb, size); 780 781 return (malloc(size * nmemb, type, flags)); 782 } 783 784 #ifdef INVARIANTS 785 static void 786 free_save_type(void *addr, struct malloc_type *mtp, u_long size) 787 { 788 struct malloc_type **mtpp = addr; 789 790 /* 791 * Cache a pointer to the malloc_type that most recently freed 792 * this memory here. This way we know who is most likely to 793 * have stepped on it later. 794 * 795 * This code assumes that size is a multiple of 8 bytes for 796 * 64 bit machines 797 */ 798 mtpp = (struct malloc_type **) ((unsigned long)mtpp & ~UMA_ALIGN_PTR); 799 mtpp += (size - sizeof(struct malloc_type *)) / 800 sizeof(struct malloc_type *); 801 *mtpp = mtp; 802 } 803 #endif 804 805 #ifdef MALLOC_DEBUG 806 static int 807 free_dbg(void **addrp, struct malloc_type *mtp) 808 { 809 void *addr; 810 811 addr = *addrp; 812 KASSERT(mtp->ks_version == M_VERSION, ("free: bad malloc type version")); 813 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 814 ("free: called with spinlock or critical section held")); 815 816 /* free(NULL, ...) does nothing */ 817 if (addr == NULL) 818 return (EJUSTRETURN); 819 820 #ifdef DEBUG_MEMGUARD 821 if (is_memguard_addr(addr)) { 822 memguard_free(addr); 823 return (EJUSTRETURN); 824 } 825 #endif 826 827 #ifdef DEBUG_REDZONE 828 redzone_check(addr); 829 *addrp = redzone_addr_ntor(addr); 830 #endif 831 832 return (0); 833 } 834 #endif 835 836 /* 837 * free: 838 * 839 * Free a block of memory allocated by malloc. 840 * 841 * This routine may not block. 842 */ 843 void 844 free(void *addr, struct malloc_type *mtp) 845 { 846 uma_zone_t zone; 847 uma_slab_t slab; 848 u_long size; 849 850 #ifdef MALLOC_DEBUG 851 if (free_dbg(&addr, mtp) != 0) 852 return; 853 #endif 854 /* free(NULL, ...) does nothing */ 855 if (addr == NULL) 856 return; 857 858 vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab); 859 if (slab == NULL) 860 panic("free: address %p(%p) has not been allocated.\n", 861 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK))); 862 863 if (__predict_true(!malloc_large_slab(slab))) { 864 size = zone->uz_size; 865 #ifdef INVARIANTS 866 free_save_type(addr, mtp, size); 867 #endif 868 uma_zfree_arg(zone, addr, slab); 869 } else { 870 size = malloc_large_size(slab); 871 free_large(addr, size); 872 } 873 malloc_type_freed(mtp, size); 874 } 875 876 /* 877 * zfree: 878 * 879 * Zero then free a block of memory allocated by malloc. 880 * 881 * This routine may not block. 882 */ 883 void 884 zfree(void *addr, struct malloc_type *mtp) 885 { 886 uma_zone_t zone; 887 uma_slab_t slab; 888 u_long size; 889 890 #ifdef MALLOC_DEBUG 891 if (free_dbg(&addr, mtp) != 0) 892 return; 893 #endif 894 /* free(NULL, ...) does nothing */ 895 if (addr == NULL) 896 return; 897 898 vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab); 899 if (slab == NULL) 900 panic("free: address %p(%p) has not been allocated.\n", 901 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK))); 902 903 if (__predict_true(!malloc_large_slab(slab))) { 904 size = zone->uz_size; 905 #ifdef INVARIANTS 906 free_save_type(addr, mtp, size); 907 #endif 908 explicit_bzero(addr, size); 909 uma_zfree_arg(zone, addr, slab); 910 } else { 911 size = malloc_large_size(slab); 912 explicit_bzero(addr, size); 913 free_large(addr, size); 914 } 915 malloc_type_freed(mtp, size); 916 } 917 918 /* 919 * realloc: change the size of a memory block 920 */ 921 void * 922 realloc(void *addr, size_t size, struct malloc_type *mtp, int flags) 923 { 924 uma_zone_t zone; 925 uma_slab_t slab; 926 unsigned long alloc; 927 void *newaddr; 928 929 KASSERT(mtp->ks_version == M_VERSION, 930 ("realloc: bad malloc type version")); 931 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 932 ("realloc: called with spinlock or critical section held")); 933 934 /* realloc(NULL, ...) is equivalent to malloc(...) */ 935 if (addr == NULL) 936 return (malloc(size, mtp, flags)); 937 938 /* 939 * XXX: Should report free of old memory and alloc of new memory to 940 * per-CPU stats. 941 */ 942 943 #ifdef DEBUG_MEMGUARD 944 if (is_memguard_addr(addr)) 945 return (memguard_realloc(addr, size, mtp, flags)); 946 #endif 947 948 #ifdef DEBUG_REDZONE 949 slab = NULL; 950 zone = NULL; 951 alloc = redzone_get_size(addr); 952 #else 953 vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab); 954 955 /* Sanity check */ 956 KASSERT(slab != NULL, 957 ("realloc: address %p out of range", (void *)addr)); 958 959 /* Get the size of the original block */ 960 if (!malloc_large_slab(slab)) 961 alloc = zone->uz_size; 962 else 963 alloc = malloc_large_size(slab); 964 965 /* Reuse the original block if appropriate */ 966 if (size <= alloc 967 && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE)) 968 return (addr); 969 #endif /* !DEBUG_REDZONE */ 970 971 /* Allocate a new, bigger (or smaller) block */ 972 if ((newaddr = malloc(size, mtp, flags)) == NULL) 973 return (NULL); 974 975 /* Copy over original contents */ 976 bcopy(addr, newaddr, min(size, alloc)); 977 free(addr, mtp); 978 return (newaddr); 979 } 980 981 /* 982 * reallocf: same as realloc() but free memory on failure. 983 */ 984 void * 985 reallocf(void *addr, size_t size, struct malloc_type *mtp, int flags) 986 { 987 void *mem; 988 989 if ((mem = realloc(addr, size, mtp, flags)) == NULL) 990 free(addr, mtp); 991 return (mem); 992 } 993 994 /* 995 * malloc_size: returns the number of bytes allocated for a request of the 996 * specified size 997 */ 998 size_t 999 malloc_size(size_t size) 1000 { 1001 int indx; 1002 1003 if (size > kmem_zmax) 1004 return (0); 1005 if (size & KMEM_ZMASK) 1006 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE; 1007 indx = kmemsize[size >> KMEM_ZSHIFT]; 1008 return (kmemzones[indx].kz_size); 1009 } 1010 1011 /* 1012 * malloc_usable_size: returns the usable size of the allocation. 1013 */ 1014 size_t 1015 malloc_usable_size(const void *addr) 1016 { 1017 #ifndef DEBUG_REDZONE 1018 uma_zone_t zone; 1019 uma_slab_t slab; 1020 #endif 1021 u_long size; 1022 1023 if (addr == NULL) 1024 return (0); 1025 1026 #ifdef DEBUG_MEMGUARD 1027 if (is_memguard_addr(__DECONST(void *, addr))) 1028 return (memguard_get_req_size(addr)); 1029 #endif 1030 1031 #ifdef DEBUG_REDZONE 1032 size = redzone_get_size(__DECONST(void *, addr)); 1033 #else 1034 vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab); 1035 if (slab == NULL) 1036 panic("malloc_usable_size: address %p(%p) is not allocated.\n", 1037 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK))); 1038 1039 if (!malloc_large_slab(slab)) 1040 size = zone->uz_size; 1041 else 1042 size = malloc_large_size(slab); 1043 #endif 1044 return (size); 1045 } 1046 1047 CTASSERT(VM_KMEM_SIZE_SCALE >= 1); 1048 1049 /* 1050 * Initialize the kernel memory (kmem) arena. 1051 */ 1052 void 1053 kmeminit(void) 1054 { 1055 u_long mem_size; 1056 u_long tmp; 1057 1058 #ifdef VM_KMEM_SIZE 1059 if (vm_kmem_size == 0) 1060 vm_kmem_size = VM_KMEM_SIZE; 1061 #endif 1062 #ifdef VM_KMEM_SIZE_MIN 1063 if (vm_kmem_size_min == 0) 1064 vm_kmem_size_min = VM_KMEM_SIZE_MIN; 1065 #endif 1066 #ifdef VM_KMEM_SIZE_MAX 1067 if (vm_kmem_size_max == 0) 1068 vm_kmem_size_max = VM_KMEM_SIZE_MAX; 1069 #endif 1070 /* 1071 * Calculate the amount of kernel virtual address (KVA) space that is 1072 * preallocated to the kmem arena. In order to support a wide range 1073 * of machines, it is a function of the physical memory size, 1074 * specifically, 1075 * 1076 * min(max(physical memory size / VM_KMEM_SIZE_SCALE, 1077 * VM_KMEM_SIZE_MIN), VM_KMEM_SIZE_MAX) 1078 * 1079 * Every architecture must define an integral value for 1080 * VM_KMEM_SIZE_SCALE. However, the definitions of VM_KMEM_SIZE_MIN 1081 * and VM_KMEM_SIZE_MAX, which represent respectively the floor and 1082 * ceiling on this preallocation, are optional. Typically, 1083 * VM_KMEM_SIZE_MAX is itself a function of the available KVA space on 1084 * a given architecture. 1085 */ 1086 mem_size = vm_cnt.v_page_count; 1087 if (mem_size <= 32768) /* delphij XXX 128MB */ 1088 kmem_zmax = PAGE_SIZE; 1089 1090 if (vm_kmem_size_scale < 1) 1091 vm_kmem_size_scale = VM_KMEM_SIZE_SCALE; 1092 1093 /* 1094 * Check if we should use defaults for the "vm_kmem_size" 1095 * variable: 1096 */ 1097 if (vm_kmem_size == 0) { 1098 vm_kmem_size = mem_size / vm_kmem_size_scale; 1099 vm_kmem_size = vm_kmem_size * PAGE_SIZE < vm_kmem_size ? 1100 vm_kmem_size_max : vm_kmem_size * PAGE_SIZE; 1101 if (vm_kmem_size_min > 0 && vm_kmem_size < vm_kmem_size_min) 1102 vm_kmem_size = vm_kmem_size_min; 1103 if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max) 1104 vm_kmem_size = vm_kmem_size_max; 1105 } 1106 if (vm_kmem_size == 0) 1107 panic("Tune VM_KMEM_SIZE_* for the platform"); 1108 1109 /* 1110 * The amount of KVA space that is preallocated to the 1111 * kmem arena can be set statically at compile-time or manually 1112 * through the kernel environment. However, it is still limited to 1113 * twice the physical memory size, which has been sufficient to handle 1114 * the most severe cases of external fragmentation in the kmem arena. 1115 */ 1116 if (vm_kmem_size / 2 / PAGE_SIZE > mem_size) 1117 vm_kmem_size = 2 * mem_size * PAGE_SIZE; 1118 1119 vm_kmem_size = round_page(vm_kmem_size); 1120 #ifdef DEBUG_MEMGUARD 1121 tmp = memguard_fudge(vm_kmem_size, kernel_map); 1122 #else 1123 tmp = vm_kmem_size; 1124 #endif 1125 uma_set_limit(tmp); 1126 1127 #ifdef DEBUG_MEMGUARD 1128 /* 1129 * Initialize MemGuard if support compiled in. MemGuard is a 1130 * replacement allocator used for detecting tamper-after-free 1131 * scenarios as they occur. It is only used for debugging. 1132 */ 1133 memguard_init(kernel_arena); 1134 #endif 1135 } 1136 1137 /* 1138 * Initialize the kernel memory allocator 1139 */ 1140 /* ARGSUSED*/ 1141 static void 1142 mallocinit(void *dummy) 1143 { 1144 int i; 1145 uint8_t indx; 1146 1147 mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF); 1148 1149 kmeminit(); 1150 1151 if (kmem_zmax < PAGE_SIZE || kmem_zmax > KMEM_ZMAX) 1152 kmem_zmax = KMEM_ZMAX; 1153 1154 for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) { 1155 int size = kmemzones[indx].kz_size; 1156 const char *name = kmemzones[indx].kz_name; 1157 int subzone; 1158 1159 for (subzone = 0; subzone < numzones; subzone++) { 1160 kmemzones[indx].kz_zone[subzone] = 1161 uma_zcreate(name, size, 1162 #ifdef INVARIANTS 1163 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini, 1164 #else 1165 NULL, NULL, NULL, NULL, 1166 #endif 1167 UMA_ALIGN_PTR, UMA_ZONE_MALLOC); 1168 } 1169 for (;i <= size; i+= KMEM_ZBASE) 1170 kmemsize[i >> KMEM_ZSHIFT] = indx; 1171 } 1172 } 1173 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_SECOND, mallocinit, NULL); 1174 1175 void 1176 malloc_init(void *data) 1177 { 1178 struct malloc_type_internal *mtip; 1179 struct malloc_type *mtp; 1180 1181 KASSERT(vm_cnt.v_page_count != 0, ("malloc_register before vm_init")); 1182 1183 mtp = data; 1184 if (mtp->ks_version != M_VERSION) 1185 panic("malloc_init: type %s with unsupported version %lu", 1186 mtp->ks_shortdesc, mtp->ks_version); 1187 1188 mtip = &mtp->ks_mti; 1189 mtip->mti_stats = uma_zalloc_pcpu(pcpu_zone_64, M_WAITOK | M_ZERO); 1190 mtp_set_subzone(mtp); 1191 1192 mtx_lock(&malloc_mtx); 1193 mtp->ks_next = kmemstatistics; 1194 kmemstatistics = mtp; 1195 kmemcount++; 1196 mtx_unlock(&malloc_mtx); 1197 } 1198 1199 void 1200 malloc_uninit(void *data) 1201 { 1202 struct malloc_type_internal *mtip; 1203 struct malloc_type_stats *mtsp; 1204 struct malloc_type *mtp, *temp; 1205 long temp_allocs, temp_bytes; 1206 int i; 1207 1208 mtp = data; 1209 KASSERT(mtp->ks_version == M_VERSION, 1210 ("malloc_uninit: bad malloc type version")); 1211 1212 mtx_lock(&malloc_mtx); 1213 mtip = &mtp->ks_mti; 1214 if (mtp != kmemstatistics) { 1215 for (temp = kmemstatistics; temp != NULL; 1216 temp = temp->ks_next) { 1217 if (temp->ks_next == mtp) { 1218 temp->ks_next = mtp->ks_next; 1219 break; 1220 } 1221 } 1222 KASSERT(temp, 1223 ("malloc_uninit: type '%s' not found", mtp->ks_shortdesc)); 1224 } else 1225 kmemstatistics = mtp->ks_next; 1226 kmemcount--; 1227 mtx_unlock(&malloc_mtx); 1228 1229 /* 1230 * Look for memory leaks. 1231 */ 1232 temp_allocs = temp_bytes = 0; 1233 for (i = 0; i <= mp_maxid; i++) { 1234 mtsp = zpcpu_get_cpu(mtip->mti_stats, i); 1235 temp_allocs += mtsp->mts_numallocs; 1236 temp_allocs -= mtsp->mts_numfrees; 1237 temp_bytes += mtsp->mts_memalloced; 1238 temp_bytes -= mtsp->mts_memfreed; 1239 } 1240 if (temp_allocs > 0 || temp_bytes > 0) { 1241 printf("Warning: memory type %s leaked memory on destroy " 1242 "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc, 1243 temp_allocs, temp_bytes); 1244 } 1245 1246 uma_zfree_pcpu(pcpu_zone_64, mtip->mti_stats); 1247 } 1248 1249 struct malloc_type * 1250 malloc_desc2type(const char *desc) 1251 { 1252 struct malloc_type *mtp; 1253 1254 mtx_assert(&malloc_mtx, MA_OWNED); 1255 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 1256 if (strcmp(mtp->ks_shortdesc, desc) == 0) 1257 return (mtp); 1258 } 1259 return (NULL); 1260 } 1261 1262 static int 1263 sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS) 1264 { 1265 struct malloc_type_stream_header mtsh; 1266 struct malloc_type_internal *mtip; 1267 struct malloc_type_stats *mtsp, zeromts; 1268 struct malloc_type_header mth; 1269 struct malloc_type *mtp; 1270 int error, i; 1271 struct sbuf sbuf; 1272 1273 error = sysctl_wire_old_buffer(req, 0); 1274 if (error != 0) 1275 return (error); 1276 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 1277 sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL); 1278 mtx_lock(&malloc_mtx); 1279 1280 bzero(&zeromts, sizeof(zeromts)); 1281 1282 /* 1283 * Insert stream header. 1284 */ 1285 bzero(&mtsh, sizeof(mtsh)); 1286 mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION; 1287 mtsh.mtsh_maxcpus = MAXCPU; 1288 mtsh.mtsh_count = kmemcount; 1289 (void)sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh)); 1290 1291 /* 1292 * Insert alternating sequence of type headers and type statistics. 1293 */ 1294 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 1295 mtip = &mtp->ks_mti; 1296 1297 /* 1298 * Insert type header. 1299 */ 1300 bzero(&mth, sizeof(mth)); 1301 strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME); 1302 (void)sbuf_bcat(&sbuf, &mth, sizeof(mth)); 1303 1304 /* 1305 * Insert type statistics for each CPU. 1306 */ 1307 for (i = 0; i <= mp_maxid; i++) { 1308 mtsp = zpcpu_get_cpu(mtip->mti_stats, i); 1309 (void)sbuf_bcat(&sbuf, mtsp, sizeof(*mtsp)); 1310 } 1311 /* 1312 * Fill in the missing CPUs. 1313 */ 1314 for (; i < MAXCPU; i++) { 1315 (void)sbuf_bcat(&sbuf, &zeromts, sizeof(zeromts)); 1316 } 1317 } 1318 mtx_unlock(&malloc_mtx); 1319 error = sbuf_finish(&sbuf); 1320 sbuf_delete(&sbuf); 1321 return (error); 1322 } 1323 1324 SYSCTL_PROC(_kern, OID_AUTO, malloc_stats, 1325 CTLFLAG_RD | CTLTYPE_STRUCT | CTLFLAG_MPSAFE, 0, 0, 1326 sysctl_kern_malloc_stats, "s,malloc_type_ustats", 1327 "Return malloc types"); 1328 1329 SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0, 1330 "Count of kernel malloc types"); 1331 1332 void 1333 malloc_type_list(malloc_type_list_func_t *func, void *arg) 1334 { 1335 struct malloc_type *mtp, **bufmtp; 1336 int count, i; 1337 size_t buflen; 1338 1339 mtx_lock(&malloc_mtx); 1340 restart: 1341 mtx_assert(&malloc_mtx, MA_OWNED); 1342 count = kmemcount; 1343 mtx_unlock(&malloc_mtx); 1344 1345 buflen = sizeof(struct malloc_type *) * count; 1346 bufmtp = malloc(buflen, M_TEMP, M_WAITOK); 1347 1348 mtx_lock(&malloc_mtx); 1349 1350 if (count < kmemcount) { 1351 free(bufmtp, M_TEMP); 1352 goto restart; 1353 } 1354 1355 for (mtp = kmemstatistics, i = 0; mtp != NULL; mtp = mtp->ks_next, i++) 1356 bufmtp[i] = mtp; 1357 1358 mtx_unlock(&malloc_mtx); 1359 1360 for (i = 0; i < count; i++) 1361 (func)(bufmtp[i], arg); 1362 1363 free(bufmtp, M_TEMP); 1364 } 1365 1366 #ifdef DDB 1367 static int64_t 1368 get_malloc_stats(const struct malloc_type_internal *mtip, uint64_t *allocs, 1369 uint64_t *inuse) 1370 { 1371 const struct malloc_type_stats *mtsp; 1372 uint64_t frees, alloced, freed; 1373 int i; 1374 1375 *allocs = 0; 1376 frees = 0; 1377 alloced = 0; 1378 freed = 0; 1379 for (i = 0; i <= mp_maxid; i++) { 1380 mtsp = zpcpu_get_cpu(mtip->mti_stats, i); 1381 1382 *allocs += mtsp->mts_numallocs; 1383 frees += mtsp->mts_numfrees; 1384 alloced += mtsp->mts_memalloced; 1385 freed += mtsp->mts_memfreed; 1386 } 1387 *inuse = *allocs - frees; 1388 return (alloced - freed); 1389 } 1390 1391 DB_SHOW_COMMAND(malloc, db_show_malloc) 1392 { 1393 const char *fmt_hdr, *fmt_entry; 1394 struct malloc_type *mtp; 1395 uint64_t allocs, inuse; 1396 int64_t size; 1397 /* variables for sorting */ 1398 struct malloc_type *last_mtype, *cur_mtype; 1399 int64_t cur_size, last_size; 1400 int ties; 1401 1402 if (modif[0] == 'i') { 1403 fmt_hdr = "%s,%s,%s,%s\n"; 1404 fmt_entry = "\"%s\",%ju,%jdK,%ju\n"; 1405 } else { 1406 fmt_hdr = "%18s %12s %12s %12s\n"; 1407 fmt_entry = "%18s %12ju %12jdK %12ju\n"; 1408 } 1409 1410 db_printf(fmt_hdr, "Type", "InUse", "MemUse", "Requests"); 1411 1412 /* Select sort, largest size first. */ 1413 last_mtype = NULL; 1414 last_size = INT64_MAX; 1415 for (;;) { 1416 cur_mtype = NULL; 1417 cur_size = -1; 1418 ties = 0; 1419 1420 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 1421 /* 1422 * In the case of size ties, print out mtypes 1423 * in the order they are encountered. That is, 1424 * when we encounter the most recently output 1425 * mtype, we have already printed all preceding 1426 * ties, and we must print all following ties. 1427 */ 1428 if (mtp == last_mtype) { 1429 ties = 1; 1430 continue; 1431 } 1432 size = get_malloc_stats(&mtp->ks_mti, &allocs, 1433 &inuse); 1434 if (size > cur_size && size < last_size + ties) { 1435 cur_size = size; 1436 cur_mtype = mtp; 1437 } 1438 } 1439 if (cur_mtype == NULL) 1440 break; 1441 1442 size = get_malloc_stats(&cur_mtype->ks_mti, &allocs, &inuse); 1443 db_printf(fmt_entry, cur_mtype->ks_shortdesc, inuse, 1444 howmany(size, 1024), allocs); 1445 1446 if (db_pager_quit) 1447 break; 1448 1449 last_mtype = cur_mtype; 1450 last_size = cur_size; 1451 } 1452 } 1453 1454 #if MALLOC_DEBUG_MAXZONES > 1 1455 DB_SHOW_COMMAND(multizone_matches, db_show_multizone_matches) 1456 { 1457 struct malloc_type_internal *mtip; 1458 struct malloc_type *mtp; 1459 u_int subzone; 1460 1461 if (!have_addr) { 1462 db_printf("Usage: show multizone_matches <malloc type/addr>\n"); 1463 return; 1464 } 1465 mtp = (void *)addr; 1466 if (mtp->ks_version != M_VERSION) { 1467 db_printf("Version %lx does not match expected %x\n", 1468 mtp->ks_version, M_VERSION); 1469 return; 1470 } 1471 1472 mtip = &mtp->ks_mti; 1473 subzone = mtip->mti_zone; 1474 1475 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 1476 mtip = &mtp->ks_mti; 1477 if (mtip->mti_zone != subzone) 1478 continue; 1479 db_printf("%s\n", mtp->ks_shortdesc); 1480 if (db_pager_quit) 1481 break; 1482 } 1483 } 1484 #endif /* MALLOC_DEBUG_MAXZONES > 1 */ 1485 #endif /* DDB */ 1486