1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1987, 1991, 1993 5 * The Regents of the University of California. 6 * Copyright (c) 2005-2009 Robert N. M. Watson 7 * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net> (mallocarray) 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94 35 */ 36 37 /* 38 * Kernel malloc(9) implementation -- general purpose kernel memory allocator 39 * based on memory types. Back end is implemented using the UMA(9) zone 40 * allocator. A set of fixed-size buckets are used for smaller allocations, 41 * and a special UMA allocation interface is used for larger allocations. 42 * Callers declare memory types, and statistics are maintained independently 43 * for each memory type. Statistics are maintained per-CPU for performance 44 * reasons. See malloc(9) and comments in malloc.h for a detailed 45 * description. 46 */ 47 48 #include <sys/cdefs.h> 49 #include "opt_ddb.h" 50 #include "opt_vm.h" 51 52 #include <sys/param.h> 53 #include <sys/systm.h> 54 #include <sys/asan.h> 55 #include <sys/kdb.h> 56 #include <sys/kernel.h> 57 #include <sys/lock.h> 58 #include <sys/malloc.h> 59 #include <sys/msan.h> 60 #include <sys/mutex.h> 61 #include <sys/vmmeter.h> 62 #include <sys/proc.h> 63 #include <sys/queue.h> 64 #include <sys/sbuf.h> 65 #include <sys/smp.h> 66 #include <sys/sysctl.h> 67 #include <sys/time.h> 68 #include <sys/vmem.h> 69 #ifdef EPOCH_TRACE 70 #include <sys/epoch.h> 71 #endif 72 73 #include <vm/vm.h> 74 #include <vm/pmap.h> 75 #include <vm/vm_domainset.h> 76 #include <vm/vm_pageout.h> 77 #include <vm/vm_param.h> 78 #include <vm/vm_kern.h> 79 #include <vm/vm_extern.h> 80 #include <vm/vm_map.h> 81 #include <vm/vm_page.h> 82 #include <vm/vm_phys.h> 83 #include <vm/vm_pagequeue.h> 84 #include <vm/uma.h> 85 #include <vm/uma_int.h> 86 #include <vm/uma_dbg.h> 87 88 #ifdef DEBUG_MEMGUARD 89 #include <vm/memguard.h> 90 #endif 91 #ifdef DEBUG_REDZONE 92 #include <vm/redzone.h> 93 #endif 94 95 #if defined(INVARIANTS) && defined(__i386__) 96 #include <machine/cpu.h> 97 #endif 98 99 #include <ddb/ddb.h> 100 101 #ifdef KDTRACE_HOOKS 102 #include <sys/dtrace_bsd.h> 103 104 bool __read_frequently dtrace_malloc_enabled; 105 dtrace_malloc_probe_func_t __read_mostly dtrace_malloc_probe; 106 #endif 107 108 #if defined(INVARIANTS) || defined(MALLOC_MAKE_FAILURES) || \ 109 defined(DEBUG_MEMGUARD) || defined(DEBUG_REDZONE) 110 #define MALLOC_DEBUG 1 111 #endif 112 113 #if defined(KASAN) || defined(DEBUG_REDZONE) 114 #define DEBUG_REDZONE_ARG_DEF , unsigned long osize 115 #define DEBUG_REDZONE_ARG , osize 116 #else 117 #define DEBUG_REDZONE_ARG_DEF 118 #define DEBUG_REDZONE_ARG 119 #endif 120 121 /* 122 * When realloc() is called, if the new size is sufficiently smaller than 123 * the old size, realloc() will allocate a new, smaller block to avoid 124 * wasting memory. 'Sufficiently smaller' is defined as: newsize <= 125 * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'. 126 */ 127 #ifndef REALLOC_FRACTION 128 #define REALLOC_FRACTION 1 /* new block if <= half the size */ 129 #endif 130 131 /* 132 * Centrally define some common malloc types. 133 */ 134 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 135 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 136 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 137 138 static struct malloc_type *kmemstatistics; 139 static int kmemcount; 140 141 #define KMEM_ZSHIFT 4 142 #define KMEM_ZBASE 16 143 #define KMEM_ZMASK (KMEM_ZBASE - 1) 144 145 #define KMEM_ZMAX 65536 146 #define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT) 147 static uint8_t kmemsize[KMEM_ZSIZE + 1]; 148 149 #ifndef MALLOC_DEBUG_MAXZONES 150 #define MALLOC_DEBUG_MAXZONES 1 151 #endif 152 static int numzones = MALLOC_DEBUG_MAXZONES; 153 154 /* 155 * Small malloc(9) memory allocations are allocated from a set of UMA buckets 156 * of various sizes. 157 * 158 * Warning: the layout of the struct is duplicated in libmemstat for KVM support. 159 * 160 * XXX: The comment here used to read "These won't be powers of two for 161 * long." It's possible that a significant amount of wasted memory could be 162 * recovered by tuning the sizes of these buckets. 163 */ 164 struct { 165 int kz_size; 166 const char *kz_name; 167 uma_zone_t kz_zone[MALLOC_DEBUG_MAXZONES]; 168 } kmemzones[] = { 169 {16, "malloc-16", }, 170 {32, "malloc-32", }, 171 {64, "malloc-64", }, 172 {128, "malloc-128", }, 173 {256, "malloc-256", }, 174 {384, "malloc-384", }, 175 {512, "malloc-512", }, 176 {1024, "malloc-1024", }, 177 {2048, "malloc-2048", }, 178 {4096, "malloc-4096", }, 179 {8192, "malloc-8192", }, 180 {16384, "malloc-16384", }, 181 {32768, "malloc-32768", }, 182 {65536, "malloc-65536", }, 183 {0, NULL}, 184 }; 185 186 u_long vm_kmem_size; 187 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size, CTLFLAG_RDTUN, &vm_kmem_size, 0, 188 "Size of kernel memory"); 189 190 static u_long kmem_zmax = KMEM_ZMAX; 191 SYSCTL_ULONG(_vm, OID_AUTO, kmem_zmax, CTLFLAG_RDTUN, &kmem_zmax, 0, 192 "Maximum allocation size that malloc(9) would use UMA as backend"); 193 194 static u_long vm_kmem_size_min; 195 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_min, CTLFLAG_RDTUN, &vm_kmem_size_min, 0, 196 "Minimum size of kernel memory"); 197 198 static u_long vm_kmem_size_max; 199 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RDTUN, &vm_kmem_size_max, 0, 200 "Maximum size of kernel memory"); 201 202 static u_int vm_kmem_size_scale; 203 SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale, 0, 204 "Scale factor for kernel memory size"); 205 206 static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS); 207 SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size, 208 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0, 209 sysctl_kmem_map_size, "LU", "Current kmem allocation size"); 210 211 static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS); 212 SYSCTL_PROC(_vm, OID_AUTO, kmem_map_free, 213 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0, 214 sysctl_kmem_map_free, "LU", "Free space in kmem"); 215 216 static SYSCTL_NODE(_vm, OID_AUTO, malloc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 217 "Malloc information"); 218 219 static u_int vm_malloc_zone_count = nitems(kmemzones); 220 SYSCTL_UINT(_vm_malloc, OID_AUTO, zone_count, 221 CTLFLAG_RD, &vm_malloc_zone_count, 0, 222 "Number of malloc zones"); 223 224 static int sysctl_vm_malloc_zone_sizes(SYSCTL_HANDLER_ARGS); 225 SYSCTL_PROC(_vm_malloc, OID_AUTO, zone_sizes, 226 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, NULL, 0, 227 sysctl_vm_malloc_zone_sizes, "S", "Zone sizes used by malloc"); 228 229 /* 230 * The malloc_mtx protects the kmemstatistics linked list. 231 */ 232 struct mtx malloc_mtx; 233 234 static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS); 235 236 #if defined(MALLOC_MAKE_FAILURES) || (MALLOC_DEBUG_MAXZONES > 1) 237 static SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 238 "Kernel malloc debugging options"); 239 #endif 240 241 /* 242 * malloc(9) fault injection -- cause malloc failures every (n) mallocs when 243 * the caller specifies M_NOWAIT. If set to 0, no failures are caused. 244 */ 245 #ifdef MALLOC_MAKE_FAILURES 246 static int malloc_failure_rate; 247 static int malloc_nowait_count; 248 static int malloc_failure_count; 249 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RWTUN, 250 &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail"); 251 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD, 252 &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures"); 253 #endif 254 255 static int 256 sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS) 257 { 258 u_long size; 259 260 size = uma_size(); 261 return (sysctl_handle_long(oidp, &size, 0, req)); 262 } 263 264 static int 265 sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS) 266 { 267 u_long size, limit; 268 269 /* The sysctl is unsigned, implement as a saturation value. */ 270 size = uma_size(); 271 limit = uma_limit(); 272 if (size > limit) 273 size = 0; 274 else 275 size = limit - size; 276 return (sysctl_handle_long(oidp, &size, 0, req)); 277 } 278 279 static int 280 sysctl_vm_malloc_zone_sizes(SYSCTL_HANDLER_ARGS) 281 { 282 int sizes[nitems(kmemzones)]; 283 int i; 284 285 for (i = 0; i < nitems(kmemzones); i++) { 286 sizes[i] = kmemzones[i].kz_size; 287 } 288 289 return (SYSCTL_OUT(req, &sizes, sizeof(sizes))); 290 } 291 292 /* 293 * malloc(9) uma zone separation -- sub-page buffer overruns in one 294 * malloc type will affect only a subset of other malloc types. 295 */ 296 #if MALLOC_DEBUG_MAXZONES > 1 297 static void 298 tunable_set_numzones(void) 299 { 300 301 TUNABLE_INT_FETCH("debug.malloc.numzones", 302 &numzones); 303 304 /* Sanity check the number of malloc uma zones. */ 305 if (numzones <= 0) 306 numzones = 1; 307 if (numzones > MALLOC_DEBUG_MAXZONES) 308 numzones = MALLOC_DEBUG_MAXZONES; 309 } 310 SYSINIT(numzones, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_set_numzones, NULL); 311 SYSCTL_INT(_debug_malloc, OID_AUTO, numzones, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 312 &numzones, 0, "Number of malloc uma subzones"); 313 314 /* 315 * Any number that changes regularly is an okay choice for the 316 * offset. Build numbers are pretty good of you have them. 317 */ 318 static u_int zone_offset = __FreeBSD_version; 319 TUNABLE_INT("debug.malloc.zone_offset", &zone_offset); 320 SYSCTL_UINT(_debug_malloc, OID_AUTO, zone_offset, CTLFLAG_RDTUN, 321 &zone_offset, 0, "Separate malloc types by examining the " 322 "Nth character in the malloc type short description."); 323 324 static void 325 mtp_set_subzone(struct malloc_type *mtp) 326 { 327 struct malloc_type_internal *mtip; 328 const char *desc; 329 size_t len; 330 u_int val; 331 332 mtip = &mtp->ks_mti; 333 desc = mtp->ks_shortdesc; 334 if (desc == NULL || (len = strlen(desc)) == 0) 335 val = 0; 336 else 337 val = desc[zone_offset % len]; 338 mtip->mti_zone = (val % numzones); 339 } 340 341 static inline u_int 342 mtp_get_subzone(struct malloc_type *mtp) 343 { 344 struct malloc_type_internal *mtip; 345 346 mtip = &mtp->ks_mti; 347 348 KASSERT(mtip->mti_zone < numzones, 349 ("mti_zone %u out of range %d", 350 mtip->mti_zone, numzones)); 351 return (mtip->mti_zone); 352 } 353 #elif MALLOC_DEBUG_MAXZONES == 0 354 #error "MALLOC_DEBUG_MAXZONES must be positive." 355 #else 356 static void 357 mtp_set_subzone(struct malloc_type *mtp) 358 { 359 struct malloc_type_internal *mtip; 360 361 mtip = &mtp->ks_mti; 362 mtip->mti_zone = 0; 363 } 364 365 static inline u_int 366 mtp_get_subzone(struct malloc_type *mtp) 367 { 368 369 return (0); 370 } 371 #endif /* MALLOC_DEBUG_MAXZONES > 1 */ 372 373 /* 374 * An allocation has succeeded -- update malloc type statistics for the 375 * amount of bucket size. Occurs within a critical section so that the 376 * thread isn't preempted and doesn't migrate while updating per-PCU 377 * statistics. 378 */ 379 static void 380 malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size, 381 int zindx) 382 { 383 struct malloc_type_internal *mtip; 384 struct malloc_type_stats *mtsp; 385 386 critical_enter(); 387 mtip = &mtp->ks_mti; 388 mtsp = zpcpu_get(mtip->mti_stats); 389 if (size > 0) { 390 mtsp->mts_memalloced += size; 391 mtsp->mts_numallocs++; 392 } 393 if (zindx != -1) 394 mtsp->mts_size |= 1 << zindx; 395 396 #ifdef KDTRACE_HOOKS 397 if (__predict_false(dtrace_malloc_enabled)) { 398 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_MALLOC]; 399 if (probe_id != 0) 400 (dtrace_malloc_probe)(probe_id, 401 (uintptr_t) mtp, (uintptr_t) mtip, 402 (uintptr_t) mtsp, size, zindx); 403 } 404 #endif 405 406 critical_exit(); 407 } 408 409 void 410 malloc_type_allocated(struct malloc_type *mtp, unsigned long size) 411 { 412 413 if (size > 0) 414 malloc_type_zone_allocated(mtp, size, -1); 415 } 416 417 /* 418 * A free operation has occurred -- update malloc type statistics for the 419 * amount of the bucket size. Occurs within a critical section so that the 420 * thread isn't preempted and doesn't migrate while updating per-CPU 421 * statistics. 422 */ 423 void 424 malloc_type_freed(struct malloc_type *mtp, unsigned long size) 425 { 426 struct malloc_type_internal *mtip; 427 struct malloc_type_stats *mtsp; 428 429 critical_enter(); 430 mtip = &mtp->ks_mti; 431 mtsp = zpcpu_get(mtip->mti_stats); 432 mtsp->mts_memfreed += size; 433 mtsp->mts_numfrees++; 434 435 #ifdef KDTRACE_HOOKS 436 if (__predict_false(dtrace_malloc_enabled)) { 437 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_FREE]; 438 if (probe_id != 0) 439 (dtrace_malloc_probe)(probe_id, 440 (uintptr_t) mtp, (uintptr_t) mtip, 441 (uintptr_t) mtsp, size, 0); 442 } 443 #endif 444 445 critical_exit(); 446 } 447 448 /* 449 * contigmalloc: 450 * 451 * Allocate a block of physically contiguous memory. 452 * 453 * If M_NOWAIT is set, this routine will not block and return NULL if 454 * the allocation fails. 455 */ 456 void * 457 contigmalloc(unsigned long size, struct malloc_type *type, int flags, 458 vm_paddr_t low, vm_paddr_t high, unsigned long alignment, 459 vm_paddr_t boundary) 460 { 461 void *ret; 462 463 ret = (void *)kmem_alloc_contig(size, flags, low, high, alignment, 464 boundary, VM_MEMATTR_DEFAULT); 465 if (ret != NULL) 466 malloc_type_allocated(type, round_page(size)); 467 return (ret); 468 } 469 470 void * 471 contigmalloc_domainset(unsigned long size, struct malloc_type *type, 472 struct domainset *ds, int flags, vm_paddr_t low, vm_paddr_t high, 473 unsigned long alignment, vm_paddr_t boundary) 474 { 475 void *ret; 476 477 ret = (void *)kmem_alloc_contig_domainset(ds, size, flags, low, high, 478 alignment, boundary, VM_MEMATTR_DEFAULT); 479 if (ret != NULL) 480 malloc_type_allocated(type, round_page(size)); 481 return (ret); 482 } 483 484 /* 485 * contigfree: 486 * 487 * Free a block of memory allocated by contigmalloc. 488 * 489 * This routine may not block. 490 */ 491 void 492 contigfree(void *addr, unsigned long size, struct malloc_type *type) 493 { 494 495 kmem_free(addr, size); 496 malloc_type_freed(type, round_page(size)); 497 } 498 499 #ifdef MALLOC_DEBUG 500 static int 501 malloc_dbg(caddr_t *vap, size_t *sizep, struct malloc_type *mtp, 502 int flags) 503 { 504 #ifdef INVARIANTS 505 int indx; 506 507 KASSERT(mtp->ks_version == M_VERSION, ("malloc: bad malloc type version")); 508 /* 509 * Check that exactly one of M_WAITOK or M_NOWAIT is specified. 510 */ 511 indx = flags & (M_WAITOK | M_NOWAIT); 512 if (indx != M_NOWAIT && indx != M_WAITOK) { 513 static struct timeval lasterr; 514 static int curerr, once; 515 if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) { 516 printf("Bad malloc flags: %x\n", indx); 517 kdb_backtrace(); 518 flags |= M_WAITOK; 519 once++; 520 } 521 } 522 #endif 523 #ifdef MALLOC_MAKE_FAILURES 524 if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) { 525 atomic_add_int(&malloc_nowait_count, 1); 526 if ((malloc_nowait_count % malloc_failure_rate) == 0) { 527 atomic_add_int(&malloc_failure_count, 1); 528 *vap = NULL; 529 return (EJUSTRETURN); 530 } 531 } 532 #endif 533 if (flags & M_WAITOK) { 534 KASSERT(curthread->td_intr_nesting_level == 0, 535 ("malloc(M_WAITOK) in interrupt context")); 536 if (__predict_false(!THREAD_CAN_SLEEP())) { 537 #ifdef EPOCH_TRACE 538 epoch_trace_list(curthread); 539 #endif 540 KASSERT(0, 541 ("malloc(M_WAITOK) with sleeping prohibited")); 542 } 543 } 544 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 545 ("malloc: called with spinlock or critical section held")); 546 547 #ifdef DEBUG_MEMGUARD 548 if (memguard_cmp_mtp(mtp, *sizep)) { 549 *vap = memguard_alloc(*sizep, flags); 550 if (*vap != NULL) 551 return (EJUSTRETURN); 552 /* This is unfortunate but should not be fatal. */ 553 } 554 #endif 555 556 #ifdef DEBUG_REDZONE 557 *sizep = redzone_size_ntor(*sizep); 558 #endif 559 560 return (0); 561 } 562 #endif 563 564 /* 565 * Handle large allocations and frees by using kmem_malloc directly. 566 */ 567 static inline bool 568 malloc_large_slab(uma_slab_t slab) 569 { 570 uintptr_t va; 571 572 va = (uintptr_t)slab; 573 return ((va & 1) != 0); 574 } 575 576 static inline size_t 577 malloc_large_size(uma_slab_t slab) 578 { 579 uintptr_t va; 580 581 va = (uintptr_t)slab; 582 return (va >> 1); 583 } 584 585 static caddr_t __noinline 586 malloc_large(size_t size, struct malloc_type *mtp, struct domainset *policy, 587 int flags DEBUG_REDZONE_ARG_DEF) 588 { 589 void *va; 590 591 size = roundup(size, PAGE_SIZE); 592 va = kmem_malloc_domainset(policy, size, flags); 593 if (va != NULL) { 594 /* The low bit is unused for slab pointers. */ 595 vsetzoneslab((uintptr_t)va, NULL, (void *)((size << 1) | 1)); 596 uma_total_inc(size); 597 } 598 malloc_type_allocated(mtp, va == NULL ? 0 : size); 599 if (__predict_false(va == NULL)) { 600 KASSERT((flags & M_WAITOK) == 0, 601 ("malloc(M_WAITOK) returned NULL")); 602 } else { 603 #ifdef DEBUG_REDZONE 604 va = redzone_setup(va, osize); 605 #endif 606 kasan_mark(va, osize, size, KASAN_MALLOC_REDZONE); 607 } 608 return (va); 609 } 610 611 static void 612 free_large(void *addr, size_t size) 613 { 614 615 kmem_free(addr, size); 616 uma_total_dec(size); 617 } 618 619 /* 620 * malloc: 621 * 622 * Allocate a block of memory. 623 * 624 * If M_NOWAIT is set, this routine will not block and return NULL if 625 * the allocation fails. 626 */ 627 void * 628 (malloc)(size_t size, struct malloc_type *mtp, int flags) 629 { 630 int indx; 631 caddr_t va; 632 uma_zone_t zone; 633 #if defined(DEBUG_REDZONE) || defined(KASAN) 634 unsigned long osize = size; 635 #endif 636 637 MPASS((flags & M_EXEC) == 0); 638 639 #ifdef MALLOC_DEBUG 640 va = NULL; 641 if (malloc_dbg(&va, &size, mtp, flags) != 0) 642 return (va); 643 #endif 644 645 if (__predict_false(size > kmem_zmax)) 646 return (malloc_large(size, mtp, DOMAINSET_RR(), flags 647 DEBUG_REDZONE_ARG)); 648 649 if (size & KMEM_ZMASK) 650 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE; 651 indx = kmemsize[size >> KMEM_ZSHIFT]; 652 zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)]; 653 va = uma_zalloc(zone, flags); 654 if (va != NULL) { 655 size = zone->uz_size; 656 if ((flags & M_ZERO) == 0) { 657 kmsan_mark(va, size, KMSAN_STATE_UNINIT); 658 kmsan_orig(va, size, KMSAN_TYPE_MALLOC, KMSAN_RET_ADDR); 659 } 660 } 661 malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx); 662 if (__predict_false(va == NULL)) { 663 KASSERT((flags & M_WAITOK) == 0, 664 ("malloc(M_WAITOK) returned NULL")); 665 } 666 #ifdef DEBUG_REDZONE 667 if (va != NULL) 668 va = redzone_setup(va, osize); 669 #endif 670 #ifdef KASAN 671 if (va != NULL) 672 kasan_mark((void *)va, osize, size, KASAN_MALLOC_REDZONE); 673 #endif 674 return ((void *) va); 675 } 676 677 static void * 678 malloc_domain(size_t *sizep, int *indxp, struct malloc_type *mtp, int domain, 679 int flags) 680 { 681 uma_zone_t zone; 682 caddr_t va; 683 size_t size; 684 int indx; 685 686 size = *sizep; 687 KASSERT(size <= kmem_zmax && (flags & M_EXEC) == 0, 688 ("malloc_domain: Called with bad flag / size combination.")); 689 if (size & KMEM_ZMASK) 690 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE; 691 indx = kmemsize[size >> KMEM_ZSHIFT]; 692 zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)]; 693 va = uma_zalloc_domain(zone, NULL, domain, flags); 694 if (va != NULL) 695 *sizep = zone->uz_size; 696 *indxp = indx; 697 return ((void *)va); 698 } 699 700 void * 701 malloc_domainset(size_t size, struct malloc_type *mtp, struct domainset *ds, 702 int flags) 703 { 704 struct vm_domainset_iter di; 705 caddr_t va; 706 int domain; 707 int indx; 708 #if defined(KASAN) || defined(DEBUG_REDZONE) 709 unsigned long osize = size; 710 #endif 711 712 MPASS((flags & M_EXEC) == 0); 713 714 #ifdef MALLOC_DEBUG 715 va = NULL; 716 if (malloc_dbg(&va, &size, mtp, flags) != 0) 717 return (va); 718 #endif 719 720 if (__predict_false(size > kmem_zmax)) 721 return (malloc_large(size, mtp, DOMAINSET_RR(), flags 722 DEBUG_REDZONE_ARG)); 723 724 vm_domainset_iter_policy_init(&di, ds, &domain, &flags); 725 do { 726 va = malloc_domain(&size, &indx, mtp, domain, flags); 727 } while (va == NULL && vm_domainset_iter_policy(&di, &domain) == 0); 728 malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx); 729 if (__predict_false(va == NULL)) { 730 KASSERT((flags & M_WAITOK) == 0, 731 ("malloc(M_WAITOK) returned NULL")); 732 } 733 #ifdef DEBUG_REDZONE 734 if (va != NULL) 735 va = redzone_setup(va, osize); 736 #endif 737 #ifdef KASAN 738 if (va != NULL) 739 kasan_mark((void *)va, osize, size, KASAN_MALLOC_REDZONE); 740 #endif 741 #ifdef KMSAN 742 if ((flags & M_ZERO) == 0) { 743 kmsan_mark(va, size, KMSAN_STATE_UNINIT); 744 kmsan_orig(va, size, KMSAN_TYPE_MALLOC, KMSAN_RET_ADDR); 745 } 746 #endif 747 return (va); 748 } 749 750 /* 751 * Allocate an executable area. 752 */ 753 void * 754 malloc_exec(size_t size, struct malloc_type *mtp, int flags) 755 { 756 757 return (malloc_domainset_exec(size, mtp, DOMAINSET_RR(), flags)); 758 } 759 760 void * 761 malloc_domainset_exec(size_t size, struct malloc_type *mtp, struct domainset *ds, 762 int flags) 763 { 764 #if defined(DEBUG_REDZONE) || defined(KASAN) 765 unsigned long osize = size; 766 #endif 767 #ifdef MALLOC_DEBUG 768 caddr_t va; 769 #endif 770 771 flags |= M_EXEC; 772 773 #ifdef MALLOC_DEBUG 774 va = NULL; 775 if (malloc_dbg(&va, &size, mtp, flags) != 0) 776 return (va); 777 #endif 778 779 return (malloc_large(size, mtp, ds, flags DEBUG_REDZONE_ARG)); 780 } 781 782 void * 783 malloc_aligned(size_t size, size_t align, struct malloc_type *type, int flags) 784 { 785 return (malloc_domainset_aligned(size, align, type, DOMAINSET_RR(), 786 flags)); 787 } 788 789 void * 790 malloc_domainset_aligned(size_t size, size_t align, 791 struct malloc_type *mtp, struct domainset *ds, int flags) 792 { 793 void *res; 794 size_t asize; 795 796 KASSERT(powerof2(align), 797 ("malloc_domainset_aligned: wrong align %#zx size %#zx", 798 align, size)); 799 KASSERT(align <= PAGE_SIZE, 800 ("malloc_domainset_aligned: align %#zx (size %#zx) too large", 801 align, size)); 802 803 /* 804 * Round the allocation size up to the next power of 2, 805 * because we can only guarantee alignment for 806 * power-of-2-sized allocations. Further increase the 807 * allocation size to align if the rounded size is less than 808 * align, since malloc zones provide alignment equal to their 809 * size. 810 */ 811 if (size == 0) 812 size = 1; 813 asize = size <= align ? align : 1UL << flsl(size - 1); 814 815 res = malloc_domainset(asize, mtp, ds, flags); 816 KASSERT(res == NULL || ((uintptr_t)res & (align - 1)) == 0, 817 ("malloc_domainset_aligned: result not aligned %p size %#zx " 818 "allocsize %#zx align %#zx", res, size, asize, align)); 819 return (res); 820 } 821 822 void * 823 mallocarray(size_t nmemb, size_t size, struct malloc_type *type, int flags) 824 { 825 826 if (WOULD_OVERFLOW(nmemb, size)) 827 panic("mallocarray: %zu * %zu overflowed", nmemb, size); 828 829 return (malloc(size * nmemb, type, flags)); 830 } 831 832 void * 833 mallocarray_domainset(size_t nmemb, size_t size, struct malloc_type *type, 834 struct domainset *ds, int flags) 835 { 836 837 if (WOULD_OVERFLOW(nmemb, size)) 838 panic("mallocarray_domainset: %zu * %zu overflowed", nmemb, size); 839 840 return (malloc_domainset(size * nmemb, type, ds, flags)); 841 } 842 843 #if defined(INVARIANTS) && !defined(KASAN) 844 static void 845 free_save_type(void *addr, struct malloc_type *mtp, u_long size) 846 { 847 struct malloc_type **mtpp = addr; 848 849 /* 850 * Cache a pointer to the malloc_type that most recently freed 851 * this memory here. This way we know who is most likely to 852 * have stepped on it later. 853 * 854 * This code assumes that size is a multiple of 8 bytes for 855 * 64 bit machines 856 */ 857 mtpp = (struct malloc_type **) ((unsigned long)mtpp & ~UMA_ALIGN_PTR); 858 mtpp += (size - sizeof(struct malloc_type *)) / 859 sizeof(struct malloc_type *); 860 *mtpp = mtp; 861 } 862 #endif 863 864 #ifdef MALLOC_DEBUG 865 static int 866 free_dbg(void **addrp, struct malloc_type *mtp) 867 { 868 void *addr; 869 870 addr = *addrp; 871 KASSERT(mtp->ks_version == M_VERSION, ("free: bad malloc type version")); 872 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 873 ("free: called with spinlock or critical section held")); 874 875 /* free(NULL, ...) does nothing */ 876 if (addr == NULL) 877 return (EJUSTRETURN); 878 879 #ifdef DEBUG_MEMGUARD 880 if (is_memguard_addr(addr)) { 881 memguard_free(addr); 882 return (EJUSTRETURN); 883 } 884 #endif 885 886 #ifdef DEBUG_REDZONE 887 redzone_check(addr); 888 *addrp = redzone_addr_ntor(addr); 889 #endif 890 891 return (0); 892 } 893 #endif 894 895 /* 896 * free: 897 * 898 * Free a block of memory allocated by malloc. 899 * 900 * This routine may not block. 901 */ 902 void 903 free(void *addr, struct malloc_type *mtp) 904 { 905 uma_zone_t zone; 906 uma_slab_t slab; 907 u_long size; 908 909 #ifdef MALLOC_DEBUG 910 if (free_dbg(&addr, mtp) != 0) 911 return; 912 #endif 913 /* free(NULL, ...) does nothing */ 914 if (addr == NULL) 915 return; 916 917 vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab); 918 if (slab == NULL) 919 panic("free: address %p(%p) has not been allocated.\n", 920 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK))); 921 922 if (__predict_true(!malloc_large_slab(slab))) { 923 size = zone->uz_size; 924 #if defined(INVARIANTS) && !defined(KASAN) 925 free_save_type(addr, mtp, size); 926 #endif 927 uma_zfree_arg(zone, addr, slab); 928 } else { 929 size = malloc_large_size(slab); 930 free_large(addr, size); 931 } 932 malloc_type_freed(mtp, size); 933 } 934 935 /* 936 * zfree: 937 * 938 * Zero then free a block of memory allocated by malloc. 939 * 940 * This routine may not block. 941 */ 942 void 943 zfree(void *addr, struct malloc_type *mtp) 944 { 945 uma_zone_t zone; 946 uma_slab_t slab; 947 u_long size; 948 949 #ifdef MALLOC_DEBUG 950 if (free_dbg(&addr, mtp) != 0) 951 return; 952 #endif 953 /* free(NULL, ...) does nothing */ 954 if (addr == NULL) 955 return; 956 957 vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab); 958 if (slab == NULL) 959 panic("free: address %p(%p) has not been allocated.\n", 960 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK))); 961 962 if (__predict_true(!malloc_large_slab(slab))) { 963 size = zone->uz_size; 964 #if defined(INVARIANTS) && !defined(KASAN) 965 free_save_type(addr, mtp, size); 966 #endif 967 kasan_mark(addr, size, size, 0); 968 explicit_bzero(addr, size); 969 uma_zfree_arg(zone, addr, slab); 970 } else { 971 size = malloc_large_size(slab); 972 kasan_mark(addr, size, size, 0); 973 explicit_bzero(addr, size); 974 free_large(addr, size); 975 } 976 malloc_type_freed(mtp, size); 977 } 978 979 /* 980 * realloc: change the size of a memory block 981 */ 982 void * 983 realloc(void *addr, size_t size, struct malloc_type *mtp, int flags) 984 { 985 #ifndef DEBUG_REDZONE 986 uma_zone_t zone; 987 uma_slab_t slab; 988 #endif 989 unsigned long alloc; 990 void *newaddr; 991 992 KASSERT(mtp->ks_version == M_VERSION, 993 ("realloc: bad malloc type version")); 994 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 995 ("realloc: called with spinlock or critical section held")); 996 997 /* realloc(NULL, ...) is equivalent to malloc(...) */ 998 if (addr == NULL) 999 return (malloc(size, mtp, flags)); 1000 1001 /* 1002 * XXX: Should report free of old memory and alloc of new memory to 1003 * per-CPU stats. 1004 */ 1005 1006 #ifdef DEBUG_MEMGUARD 1007 if (is_memguard_addr(addr)) 1008 return (memguard_realloc(addr, size, mtp, flags)); 1009 #endif 1010 1011 #ifdef DEBUG_REDZONE 1012 alloc = redzone_get_size(addr); 1013 #else 1014 vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab); 1015 1016 /* Sanity check */ 1017 KASSERT(slab != NULL, 1018 ("realloc: address %p out of range", (void *)addr)); 1019 1020 /* Get the size of the original block */ 1021 if (!malloc_large_slab(slab)) 1022 alloc = zone->uz_size; 1023 else 1024 alloc = malloc_large_size(slab); 1025 1026 /* Reuse the original block if appropriate */ 1027 if (size <= alloc && 1028 (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE)) { 1029 kasan_mark((void *)addr, size, alloc, KASAN_MALLOC_REDZONE); 1030 return (addr); 1031 } 1032 #endif /* !DEBUG_REDZONE */ 1033 1034 /* Allocate a new, bigger (or smaller) block */ 1035 if ((newaddr = malloc(size, mtp, flags)) == NULL) 1036 return (NULL); 1037 1038 /* 1039 * Copy over original contents. For KASAN, the redzone must be marked 1040 * valid before performing the copy. 1041 */ 1042 kasan_mark(addr, alloc, alloc, 0); 1043 bcopy(addr, newaddr, min(size, alloc)); 1044 free(addr, mtp); 1045 return (newaddr); 1046 } 1047 1048 /* 1049 * reallocf: same as realloc() but free memory on failure. 1050 */ 1051 void * 1052 reallocf(void *addr, size_t size, struct malloc_type *mtp, int flags) 1053 { 1054 void *mem; 1055 1056 if ((mem = realloc(addr, size, mtp, flags)) == NULL) 1057 free(addr, mtp); 1058 return (mem); 1059 } 1060 1061 /* 1062 * malloc_size: returns the number of bytes allocated for a request of the 1063 * specified size 1064 */ 1065 size_t 1066 malloc_size(size_t size) 1067 { 1068 int indx; 1069 1070 if (size > kmem_zmax) 1071 return (0); 1072 if (size & KMEM_ZMASK) 1073 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE; 1074 indx = kmemsize[size >> KMEM_ZSHIFT]; 1075 return (kmemzones[indx].kz_size); 1076 } 1077 1078 /* 1079 * malloc_usable_size: returns the usable size of the allocation. 1080 */ 1081 size_t 1082 malloc_usable_size(const void *addr) 1083 { 1084 #ifndef DEBUG_REDZONE 1085 uma_zone_t zone; 1086 uma_slab_t slab; 1087 #endif 1088 u_long size; 1089 1090 if (addr == NULL) 1091 return (0); 1092 1093 #ifdef DEBUG_MEMGUARD 1094 if (is_memguard_addr(__DECONST(void *, addr))) 1095 return (memguard_get_req_size(addr)); 1096 #endif 1097 1098 #ifdef DEBUG_REDZONE 1099 size = redzone_get_size(__DECONST(void *, addr)); 1100 #else 1101 vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab); 1102 if (slab == NULL) 1103 panic("malloc_usable_size: address %p(%p) is not allocated.\n", 1104 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK))); 1105 1106 if (!malloc_large_slab(slab)) 1107 size = zone->uz_size; 1108 else 1109 size = malloc_large_size(slab); 1110 #endif 1111 1112 /* 1113 * Unmark the redzone to avoid reports from consumers who are 1114 * (presumably) about to use the full allocation size. 1115 */ 1116 kasan_mark(addr, size, size, 0); 1117 1118 return (size); 1119 } 1120 1121 CTASSERT(VM_KMEM_SIZE_SCALE >= 1); 1122 1123 /* 1124 * Initialize the kernel memory (kmem) arena. 1125 */ 1126 void 1127 kmeminit(void) 1128 { 1129 u_long mem_size; 1130 u_long tmp; 1131 1132 #ifdef VM_KMEM_SIZE 1133 if (vm_kmem_size == 0) 1134 vm_kmem_size = VM_KMEM_SIZE; 1135 #endif 1136 #ifdef VM_KMEM_SIZE_MIN 1137 if (vm_kmem_size_min == 0) 1138 vm_kmem_size_min = VM_KMEM_SIZE_MIN; 1139 #endif 1140 #ifdef VM_KMEM_SIZE_MAX 1141 if (vm_kmem_size_max == 0) 1142 vm_kmem_size_max = VM_KMEM_SIZE_MAX; 1143 #endif 1144 /* 1145 * Calculate the amount of kernel virtual address (KVA) space that is 1146 * preallocated to the kmem arena. In order to support a wide range 1147 * of machines, it is a function of the physical memory size, 1148 * specifically, 1149 * 1150 * min(max(physical memory size / VM_KMEM_SIZE_SCALE, 1151 * VM_KMEM_SIZE_MIN), VM_KMEM_SIZE_MAX) 1152 * 1153 * Every architecture must define an integral value for 1154 * VM_KMEM_SIZE_SCALE. However, the definitions of VM_KMEM_SIZE_MIN 1155 * and VM_KMEM_SIZE_MAX, which represent respectively the floor and 1156 * ceiling on this preallocation, are optional. Typically, 1157 * VM_KMEM_SIZE_MAX is itself a function of the available KVA space on 1158 * a given architecture. 1159 */ 1160 mem_size = vm_cnt.v_page_count; 1161 if (mem_size <= 32768) /* delphij XXX 128MB */ 1162 kmem_zmax = PAGE_SIZE; 1163 1164 if (vm_kmem_size_scale < 1) 1165 vm_kmem_size_scale = VM_KMEM_SIZE_SCALE; 1166 1167 /* 1168 * Check if we should use defaults for the "vm_kmem_size" 1169 * variable: 1170 */ 1171 if (vm_kmem_size == 0) { 1172 vm_kmem_size = mem_size / vm_kmem_size_scale; 1173 vm_kmem_size = vm_kmem_size * PAGE_SIZE < vm_kmem_size ? 1174 vm_kmem_size_max : vm_kmem_size * PAGE_SIZE; 1175 if (vm_kmem_size_min > 0 && vm_kmem_size < vm_kmem_size_min) 1176 vm_kmem_size = vm_kmem_size_min; 1177 if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max) 1178 vm_kmem_size = vm_kmem_size_max; 1179 } 1180 if (vm_kmem_size == 0) 1181 panic("Tune VM_KMEM_SIZE_* for the platform"); 1182 1183 /* 1184 * The amount of KVA space that is preallocated to the 1185 * kmem arena can be set statically at compile-time or manually 1186 * through the kernel environment. However, it is still limited to 1187 * twice the physical memory size, which has been sufficient to handle 1188 * the most severe cases of external fragmentation in the kmem arena. 1189 */ 1190 if (vm_kmem_size / 2 / PAGE_SIZE > mem_size) 1191 vm_kmem_size = 2 * mem_size * PAGE_SIZE; 1192 1193 vm_kmem_size = round_page(vm_kmem_size); 1194 1195 /* 1196 * With KASAN or KMSAN enabled, dynamically allocated kernel memory is 1197 * shadowed. Account for this when setting the UMA limit. 1198 */ 1199 #if defined(KASAN) 1200 vm_kmem_size = (vm_kmem_size * KASAN_SHADOW_SCALE) / 1201 (KASAN_SHADOW_SCALE + 1); 1202 #elif defined(KMSAN) 1203 vm_kmem_size /= 3; 1204 #endif 1205 1206 #ifdef DEBUG_MEMGUARD 1207 tmp = memguard_fudge(vm_kmem_size, kernel_map); 1208 #else 1209 tmp = vm_kmem_size; 1210 #endif 1211 uma_set_limit(tmp); 1212 1213 #ifdef DEBUG_MEMGUARD 1214 /* 1215 * Initialize MemGuard if support compiled in. MemGuard is a 1216 * replacement allocator used for detecting tamper-after-free 1217 * scenarios as they occur. It is only used for debugging. 1218 */ 1219 memguard_init(kernel_arena); 1220 #endif 1221 } 1222 1223 /* 1224 * Initialize the kernel memory allocator 1225 */ 1226 /* ARGSUSED*/ 1227 static void 1228 mallocinit(void *dummy) 1229 { 1230 int i; 1231 uint8_t indx; 1232 1233 mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF); 1234 1235 kmeminit(); 1236 1237 if (kmem_zmax < PAGE_SIZE || kmem_zmax > KMEM_ZMAX) 1238 kmem_zmax = KMEM_ZMAX; 1239 1240 for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) { 1241 int size = kmemzones[indx].kz_size; 1242 const char *name = kmemzones[indx].kz_name; 1243 size_t align; 1244 int subzone; 1245 1246 align = UMA_ALIGN_PTR; 1247 if (powerof2(size) && size > sizeof(void *)) 1248 align = MIN(size, PAGE_SIZE) - 1; 1249 for (subzone = 0; subzone < numzones; subzone++) { 1250 kmemzones[indx].kz_zone[subzone] = 1251 uma_zcreate(name, size, 1252 #if defined(INVARIANTS) && !defined(KASAN) && !defined(KMSAN) 1253 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini, 1254 #else 1255 NULL, NULL, NULL, NULL, 1256 #endif 1257 align, UMA_ZONE_MALLOC); 1258 } 1259 for (;i <= size; i+= KMEM_ZBASE) 1260 kmemsize[i >> KMEM_ZSHIFT] = indx; 1261 } 1262 } 1263 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_SECOND, mallocinit, NULL); 1264 1265 void 1266 malloc_init(void *data) 1267 { 1268 struct malloc_type_internal *mtip; 1269 struct malloc_type *mtp; 1270 1271 KASSERT(vm_cnt.v_page_count != 0, ("malloc_register before vm_init")); 1272 1273 mtp = data; 1274 if (mtp->ks_version != M_VERSION) 1275 panic("malloc_init: type %s with unsupported version %lu", 1276 mtp->ks_shortdesc, mtp->ks_version); 1277 1278 mtip = &mtp->ks_mti; 1279 mtip->mti_stats = uma_zalloc_pcpu(pcpu_zone_64, M_WAITOK | M_ZERO); 1280 mtp_set_subzone(mtp); 1281 1282 mtx_lock(&malloc_mtx); 1283 mtp->ks_next = kmemstatistics; 1284 kmemstatistics = mtp; 1285 kmemcount++; 1286 mtx_unlock(&malloc_mtx); 1287 } 1288 1289 void 1290 malloc_uninit(void *data) 1291 { 1292 struct malloc_type_internal *mtip; 1293 struct malloc_type_stats *mtsp; 1294 struct malloc_type *mtp, *temp; 1295 long temp_allocs, temp_bytes; 1296 int i; 1297 1298 mtp = data; 1299 KASSERT(mtp->ks_version == M_VERSION, 1300 ("malloc_uninit: bad malloc type version")); 1301 1302 mtx_lock(&malloc_mtx); 1303 mtip = &mtp->ks_mti; 1304 if (mtp != kmemstatistics) { 1305 for (temp = kmemstatistics; temp != NULL; 1306 temp = temp->ks_next) { 1307 if (temp->ks_next == mtp) { 1308 temp->ks_next = mtp->ks_next; 1309 break; 1310 } 1311 } 1312 KASSERT(temp, 1313 ("malloc_uninit: type '%s' not found", mtp->ks_shortdesc)); 1314 } else 1315 kmemstatistics = mtp->ks_next; 1316 kmemcount--; 1317 mtx_unlock(&malloc_mtx); 1318 1319 /* 1320 * Look for memory leaks. 1321 */ 1322 temp_allocs = temp_bytes = 0; 1323 for (i = 0; i <= mp_maxid; i++) { 1324 mtsp = zpcpu_get_cpu(mtip->mti_stats, i); 1325 temp_allocs += mtsp->mts_numallocs; 1326 temp_allocs -= mtsp->mts_numfrees; 1327 temp_bytes += mtsp->mts_memalloced; 1328 temp_bytes -= mtsp->mts_memfreed; 1329 } 1330 if (temp_allocs > 0 || temp_bytes > 0) { 1331 printf("Warning: memory type %s leaked memory on destroy " 1332 "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc, 1333 temp_allocs, temp_bytes); 1334 } 1335 1336 uma_zfree_pcpu(pcpu_zone_64, mtip->mti_stats); 1337 } 1338 1339 struct malloc_type * 1340 malloc_desc2type(const char *desc) 1341 { 1342 struct malloc_type *mtp; 1343 1344 mtx_assert(&malloc_mtx, MA_OWNED); 1345 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 1346 if (strcmp(mtp->ks_shortdesc, desc) == 0) 1347 return (mtp); 1348 } 1349 return (NULL); 1350 } 1351 1352 static int 1353 sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS) 1354 { 1355 struct malloc_type_stream_header mtsh; 1356 struct malloc_type_internal *mtip; 1357 struct malloc_type_stats *mtsp, zeromts; 1358 struct malloc_type_header mth; 1359 struct malloc_type *mtp; 1360 int error, i; 1361 struct sbuf sbuf; 1362 1363 error = sysctl_wire_old_buffer(req, 0); 1364 if (error != 0) 1365 return (error); 1366 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 1367 sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL); 1368 mtx_lock(&malloc_mtx); 1369 1370 bzero(&zeromts, sizeof(zeromts)); 1371 1372 /* 1373 * Insert stream header. 1374 */ 1375 bzero(&mtsh, sizeof(mtsh)); 1376 mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION; 1377 mtsh.mtsh_maxcpus = MAXCPU; 1378 mtsh.mtsh_count = kmemcount; 1379 (void)sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh)); 1380 1381 /* 1382 * Insert alternating sequence of type headers and type statistics. 1383 */ 1384 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 1385 mtip = &mtp->ks_mti; 1386 1387 /* 1388 * Insert type header. 1389 */ 1390 bzero(&mth, sizeof(mth)); 1391 strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME); 1392 (void)sbuf_bcat(&sbuf, &mth, sizeof(mth)); 1393 1394 /* 1395 * Insert type statistics for each CPU. 1396 */ 1397 for (i = 0; i <= mp_maxid; i++) { 1398 mtsp = zpcpu_get_cpu(mtip->mti_stats, i); 1399 (void)sbuf_bcat(&sbuf, mtsp, sizeof(*mtsp)); 1400 } 1401 /* 1402 * Fill in the missing CPUs. 1403 */ 1404 for (; i < MAXCPU; i++) { 1405 (void)sbuf_bcat(&sbuf, &zeromts, sizeof(zeromts)); 1406 } 1407 } 1408 mtx_unlock(&malloc_mtx); 1409 error = sbuf_finish(&sbuf); 1410 sbuf_delete(&sbuf); 1411 return (error); 1412 } 1413 1414 SYSCTL_PROC(_kern, OID_AUTO, malloc_stats, 1415 CTLFLAG_RD | CTLTYPE_STRUCT | CTLFLAG_MPSAFE, 0, 0, 1416 sysctl_kern_malloc_stats, "s,malloc_type_ustats", 1417 "Return malloc types"); 1418 1419 SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0, 1420 "Count of kernel malloc types"); 1421 1422 void 1423 malloc_type_list(malloc_type_list_func_t *func, void *arg) 1424 { 1425 struct malloc_type *mtp, **bufmtp; 1426 int count, i; 1427 size_t buflen; 1428 1429 mtx_lock(&malloc_mtx); 1430 restart: 1431 mtx_assert(&malloc_mtx, MA_OWNED); 1432 count = kmemcount; 1433 mtx_unlock(&malloc_mtx); 1434 1435 buflen = sizeof(struct malloc_type *) * count; 1436 bufmtp = malloc(buflen, M_TEMP, M_WAITOK); 1437 1438 mtx_lock(&malloc_mtx); 1439 1440 if (count < kmemcount) { 1441 free(bufmtp, M_TEMP); 1442 goto restart; 1443 } 1444 1445 for (mtp = kmemstatistics, i = 0; mtp != NULL; mtp = mtp->ks_next, i++) 1446 bufmtp[i] = mtp; 1447 1448 mtx_unlock(&malloc_mtx); 1449 1450 for (i = 0; i < count; i++) 1451 (func)(bufmtp[i], arg); 1452 1453 free(bufmtp, M_TEMP); 1454 } 1455 1456 #ifdef DDB 1457 static int64_t 1458 get_malloc_stats(const struct malloc_type_internal *mtip, uint64_t *allocs, 1459 uint64_t *inuse) 1460 { 1461 const struct malloc_type_stats *mtsp; 1462 uint64_t frees, alloced, freed; 1463 int i; 1464 1465 *allocs = 0; 1466 frees = 0; 1467 alloced = 0; 1468 freed = 0; 1469 for (i = 0; i <= mp_maxid; i++) { 1470 mtsp = zpcpu_get_cpu(mtip->mti_stats, i); 1471 1472 *allocs += mtsp->mts_numallocs; 1473 frees += mtsp->mts_numfrees; 1474 alloced += mtsp->mts_memalloced; 1475 freed += mtsp->mts_memfreed; 1476 } 1477 *inuse = *allocs - frees; 1478 return (alloced - freed); 1479 } 1480 1481 DB_SHOW_COMMAND_FLAGS(malloc, db_show_malloc, DB_CMD_MEMSAFE) 1482 { 1483 const char *fmt_hdr, *fmt_entry; 1484 struct malloc_type *mtp; 1485 uint64_t allocs, inuse; 1486 int64_t size; 1487 /* variables for sorting */ 1488 struct malloc_type *last_mtype, *cur_mtype; 1489 int64_t cur_size, last_size; 1490 int ties; 1491 1492 if (modif[0] == 'i') { 1493 fmt_hdr = "%s,%s,%s,%s\n"; 1494 fmt_entry = "\"%s\",%ju,%jdK,%ju\n"; 1495 } else { 1496 fmt_hdr = "%18s %12s %12s %12s\n"; 1497 fmt_entry = "%18s %12ju %12jdK %12ju\n"; 1498 } 1499 1500 db_printf(fmt_hdr, "Type", "InUse", "MemUse", "Requests"); 1501 1502 /* Select sort, largest size first. */ 1503 last_mtype = NULL; 1504 last_size = INT64_MAX; 1505 for (;;) { 1506 cur_mtype = NULL; 1507 cur_size = -1; 1508 ties = 0; 1509 1510 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 1511 /* 1512 * In the case of size ties, print out mtypes 1513 * in the order they are encountered. That is, 1514 * when we encounter the most recently output 1515 * mtype, we have already printed all preceding 1516 * ties, and we must print all following ties. 1517 */ 1518 if (mtp == last_mtype) { 1519 ties = 1; 1520 continue; 1521 } 1522 size = get_malloc_stats(&mtp->ks_mti, &allocs, 1523 &inuse); 1524 if (size > cur_size && size < last_size + ties) { 1525 cur_size = size; 1526 cur_mtype = mtp; 1527 } 1528 } 1529 if (cur_mtype == NULL) 1530 break; 1531 1532 size = get_malloc_stats(&cur_mtype->ks_mti, &allocs, &inuse); 1533 db_printf(fmt_entry, cur_mtype->ks_shortdesc, inuse, 1534 howmany(size, 1024), allocs); 1535 1536 if (db_pager_quit) 1537 break; 1538 1539 last_mtype = cur_mtype; 1540 last_size = cur_size; 1541 } 1542 } 1543 1544 #if MALLOC_DEBUG_MAXZONES > 1 1545 DB_SHOW_COMMAND(multizone_matches, db_show_multizone_matches) 1546 { 1547 struct malloc_type_internal *mtip; 1548 struct malloc_type *mtp; 1549 u_int subzone; 1550 1551 if (!have_addr) { 1552 db_printf("Usage: show multizone_matches <malloc type/addr>\n"); 1553 return; 1554 } 1555 mtp = (void *)addr; 1556 if (mtp->ks_version != M_VERSION) { 1557 db_printf("Version %lx does not match expected %x\n", 1558 mtp->ks_version, M_VERSION); 1559 return; 1560 } 1561 1562 mtip = &mtp->ks_mti; 1563 subzone = mtip->mti_zone; 1564 1565 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 1566 mtip = &mtp->ks_mti; 1567 if (mtip->mti_zone != subzone) 1568 continue; 1569 db_printf("%s\n", mtp->ks_shortdesc); 1570 if (db_pager_quit) 1571 break; 1572 } 1573 } 1574 #endif /* MALLOC_DEBUG_MAXZONES > 1 */ 1575 #endif /* DDB */ 1576