1 /*- 2 * SPDX-License-Identifier: BSD-3-Clause 3 * 4 * Copyright (c) 1987, 1991, 1993 5 * The Regents of the University of California. 6 * Copyright (c) 2005-2009 Robert N. M. Watson 7 * Copyright (c) 2008 Otto Moerbeek <otto@drijf.net> (mallocarray) 8 * All rights reserved. 9 * 10 * Redistribution and use in source and binary forms, with or without 11 * modification, are permitted provided that the following conditions 12 * are met: 13 * 1. Redistributions of source code must retain the above copyright 14 * notice, this list of conditions and the following disclaimer. 15 * 2. Redistributions in binary form must reproduce the above copyright 16 * notice, this list of conditions and the following disclaimer in the 17 * documentation and/or other materials provided with the distribution. 18 * 3. Neither the name of the University nor the names of its contributors 19 * may be used to endorse or promote products derived from this software 20 * without specific prior written permission. 21 * 22 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 23 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 24 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 25 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 26 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 27 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 28 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 29 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 30 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 31 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 32 * SUCH DAMAGE. 33 * 34 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94 35 */ 36 37 /* 38 * Kernel malloc(9) implementation -- general purpose kernel memory allocator 39 * based on memory types. Back end is implemented using the UMA(9) zone 40 * allocator. A set of fixed-size buckets are used for smaller allocations, 41 * and a special UMA allocation interface is used for larger allocations. 42 * Callers declare memory types, and statistics are maintained independently 43 * for each memory type. Statistics are maintained per-CPU for performance 44 * reasons. See malloc(9) and comments in malloc.h for a detailed 45 * description. 46 */ 47 48 #include <sys/cdefs.h> 49 __FBSDID("$FreeBSD$"); 50 51 #include "opt_ddb.h" 52 #include "opt_vm.h" 53 54 #include <sys/param.h> 55 #include <sys/systm.h> 56 #include <sys/kdb.h> 57 #include <sys/kernel.h> 58 #include <sys/lock.h> 59 #include <sys/malloc.h> 60 #include <sys/mutex.h> 61 #include <sys/vmmeter.h> 62 #include <sys/proc.h> 63 #include <sys/queue.h> 64 #include <sys/sbuf.h> 65 #include <sys/smp.h> 66 #include <sys/sysctl.h> 67 #include <sys/time.h> 68 #include <sys/vmem.h> 69 #ifdef EPOCH_TRACE 70 #include <sys/epoch.h> 71 #endif 72 73 #include <vm/vm.h> 74 #include <vm/pmap.h> 75 #include <vm/vm_domainset.h> 76 #include <vm/vm_pageout.h> 77 #include <vm/vm_param.h> 78 #include <vm/vm_kern.h> 79 #include <vm/vm_extern.h> 80 #include <vm/vm_map.h> 81 #include <vm/vm_page.h> 82 #include <vm/vm_phys.h> 83 #include <vm/vm_pagequeue.h> 84 #include <vm/uma.h> 85 #include <vm/uma_int.h> 86 #include <vm/uma_dbg.h> 87 88 #ifdef DEBUG_MEMGUARD 89 #include <vm/memguard.h> 90 #endif 91 #ifdef DEBUG_REDZONE 92 #include <vm/redzone.h> 93 #endif 94 95 #if defined(INVARIANTS) && defined(__i386__) 96 #include <machine/cpu.h> 97 #endif 98 99 #include <ddb/ddb.h> 100 101 #ifdef KDTRACE_HOOKS 102 #include <sys/dtrace_bsd.h> 103 104 bool __read_frequently dtrace_malloc_enabled; 105 dtrace_malloc_probe_func_t __read_mostly dtrace_malloc_probe; 106 #endif 107 108 #if defined(INVARIANTS) || defined(MALLOC_MAKE_FAILURES) || \ 109 defined(DEBUG_MEMGUARD) || defined(DEBUG_REDZONE) 110 #define MALLOC_DEBUG 1 111 #endif 112 113 /* 114 * When realloc() is called, if the new size is sufficiently smaller than 115 * the old size, realloc() will allocate a new, smaller block to avoid 116 * wasting memory. 'Sufficiently smaller' is defined as: newsize <= 117 * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'. 118 */ 119 #ifndef REALLOC_FRACTION 120 #define REALLOC_FRACTION 1 /* new block if <= half the size */ 121 #endif 122 123 /* 124 * Centrally define some common malloc types. 125 */ 126 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 127 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 128 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 129 130 static struct malloc_type *kmemstatistics; 131 static int kmemcount; 132 133 #define KMEM_ZSHIFT 4 134 #define KMEM_ZBASE 16 135 #define KMEM_ZMASK (KMEM_ZBASE - 1) 136 137 #define KMEM_ZMAX 65536 138 #define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT) 139 static uint8_t kmemsize[KMEM_ZSIZE + 1]; 140 141 #ifndef MALLOC_DEBUG_MAXZONES 142 #define MALLOC_DEBUG_MAXZONES 1 143 #endif 144 static int numzones = MALLOC_DEBUG_MAXZONES; 145 146 /* 147 * Small malloc(9) memory allocations are allocated from a set of UMA buckets 148 * of various sizes. 149 * 150 * Warning: the layout of the struct is duplicated in libmemstat for KVM support. 151 * 152 * XXX: The comment here used to read "These won't be powers of two for 153 * long." It's possible that a significant amount of wasted memory could be 154 * recovered by tuning the sizes of these buckets. 155 */ 156 struct { 157 int kz_size; 158 const char *kz_name; 159 uma_zone_t kz_zone[MALLOC_DEBUG_MAXZONES]; 160 } kmemzones[] = { 161 {16, "malloc-16", }, 162 {32, "malloc-32", }, 163 {64, "malloc-64", }, 164 {128, "malloc-128", }, 165 {256, "malloc-256", }, 166 {384, "malloc-384", }, 167 {512, "malloc-512", }, 168 {1024, "malloc-1024", }, 169 {2048, "malloc-2048", }, 170 {4096, "malloc-4096", }, 171 {8192, "malloc-8192", }, 172 {16384, "malloc-16384", }, 173 {32768, "malloc-32768", }, 174 {65536, "malloc-65536", }, 175 {0, NULL}, 176 }; 177 178 u_long vm_kmem_size; 179 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size, CTLFLAG_RDTUN, &vm_kmem_size, 0, 180 "Size of kernel memory"); 181 182 static u_long kmem_zmax = KMEM_ZMAX; 183 SYSCTL_ULONG(_vm, OID_AUTO, kmem_zmax, CTLFLAG_RDTUN, &kmem_zmax, 0, 184 "Maximum allocation size that malloc(9) would use UMA as backend"); 185 186 static u_long vm_kmem_size_min; 187 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_min, CTLFLAG_RDTUN, &vm_kmem_size_min, 0, 188 "Minimum size of kernel memory"); 189 190 static u_long vm_kmem_size_max; 191 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RDTUN, &vm_kmem_size_max, 0, 192 "Maximum size of kernel memory"); 193 194 static u_int vm_kmem_size_scale; 195 SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RDTUN, &vm_kmem_size_scale, 0, 196 "Scale factor for kernel memory size"); 197 198 static int sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS); 199 SYSCTL_PROC(_vm, OID_AUTO, kmem_map_size, 200 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0, 201 sysctl_kmem_map_size, "LU", "Current kmem allocation size"); 202 203 static int sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS); 204 SYSCTL_PROC(_vm, OID_AUTO, kmem_map_free, 205 CTLFLAG_RD | CTLTYPE_ULONG | CTLFLAG_MPSAFE, NULL, 0, 206 sysctl_kmem_map_free, "LU", "Free space in kmem"); 207 208 static SYSCTL_NODE(_vm, OID_AUTO, malloc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 209 "Malloc information"); 210 211 static u_int vm_malloc_zone_count = nitems(kmemzones); 212 SYSCTL_UINT(_vm_malloc, OID_AUTO, zone_count, 213 CTLFLAG_RD, &vm_malloc_zone_count, 0, 214 "Number of malloc zones"); 215 216 static int sysctl_vm_malloc_zone_sizes(SYSCTL_HANDLER_ARGS); 217 SYSCTL_PROC(_vm_malloc, OID_AUTO, zone_sizes, 218 CTLFLAG_RD | CTLTYPE_OPAQUE | CTLFLAG_MPSAFE, NULL, 0, 219 sysctl_vm_malloc_zone_sizes, "S", "Zone sizes used by malloc"); 220 221 /* 222 * The malloc_mtx protects the kmemstatistics linked list. 223 */ 224 struct mtx malloc_mtx; 225 226 #ifdef MALLOC_PROFILE 227 uint64_t krequests[KMEM_ZSIZE + 1]; 228 229 static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS); 230 #endif 231 232 static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS); 233 234 /* 235 * time_uptime of the last malloc(9) failure (induced or real). 236 */ 237 static time_t t_malloc_fail; 238 239 #if defined(MALLOC_MAKE_FAILURES) || (MALLOC_DEBUG_MAXZONES > 1) 240 static SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD | CTLFLAG_MPSAFE, 0, 241 "Kernel malloc debugging options"); 242 #endif 243 244 /* 245 * malloc(9) fault injection -- cause malloc failures every (n) mallocs when 246 * the caller specifies M_NOWAIT. If set to 0, no failures are caused. 247 */ 248 #ifdef MALLOC_MAKE_FAILURES 249 static int malloc_failure_rate; 250 static int malloc_nowait_count; 251 static int malloc_failure_count; 252 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RWTUN, 253 &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail"); 254 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD, 255 &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures"); 256 #endif 257 258 static int 259 sysctl_kmem_map_size(SYSCTL_HANDLER_ARGS) 260 { 261 u_long size; 262 263 size = uma_size(); 264 return (sysctl_handle_long(oidp, &size, 0, req)); 265 } 266 267 static int 268 sysctl_kmem_map_free(SYSCTL_HANDLER_ARGS) 269 { 270 u_long size, limit; 271 272 /* The sysctl is unsigned, implement as a saturation value. */ 273 size = uma_size(); 274 limit = uma_limit(); 275 if (size > limit) 276 size = 0; 277 else 278 size = limit - size; 279 return (sysctl_handle_long(oidp, &size, 0, req)); 280 } 281 282 static int 283 sysctl_vm_malloc_zone_sizes(SYSCTL_HANDLER_ARGS) 284 { 285 int sizes[nitems(kmemzones)]; 286 int i; 287 288 for (i = 0; i < nitems(kmemzones); i++) { 289 sizes[i] = kmemzones[i].kz_size; 290 } 291 292 return (SYSCTL_OUT(req, &sizes, sizeof(sizes))); 293 } 294 295 /* 296 * malloc(9) uma zone separation -- sub-page buffer overruns in one 297 * malloc type will affect only a subset of other malloc types. 298 */ 299 #if MALLOC_DEBUG_MAXZONES > 1 300 static void 301 tunable_set_numzones(void) 302 { 303 304 TUNABLE_INT_FETCH("debug.malloc.numzones", 305 &numzones); 306 307 /* Sanity check the number of malloc uma zones. */ 308 if (numzones <= 0) 309 numzones = 1; 310 if (numzones > MALLOC_DEBUG_MAXZONES) 311 numzones = MALLOC_DEBUG_MAXZONES; 312 } 313 SYSINIT(numzones, SI_SUB_TUNABLES, SI_ORDER_ANY, tunable_set_numzones, NULL); 314 SYSCTL_INT(_debug_malloc, OID_AUTO, numzones, CTLFLAG_RDTUN | CTLFLAG_NOFETCH, 315 &numzones, 0, "Number of malloc uma subzones"); 316 317 /* 318 * Any number that changes regularly is an okay choice for the 319 * offset. Build numbers are pretty good of you have them. 320 */ 321 static u_int zone_offset = __FreeBSD_version; 322 TUNABLE_INT("debug.malloc.zone_offset", &zone_offset); 323 SYSCTL_UINT(_debug_malloc, OID_AUTO, zone_offset, CTLFLAG_RDTUN, 324 &zone_offset, 0, "Separate malloc types by examining the " 325 "Nth character in the malloc type short description."); 326 327 static void 328 mtp_set_subzone(struct malloc_type *mtp) 329 { 330 struct malloc_type_internal *mtip; 331 const char *desc; 332 size_t len; 333 u_int val; 334 335 mtip = &mtp->ks_mti; 336 desc = mtp->ks_shortdesc; 337 if (desc == NULL || (len = strlen(desc)) == 0) 338 val = 0; 339 else 340 val = desc[zone_offset % len]; 341 mtip->mti_zone = (val % numzones); 342 } 343 344 static inline u_int 345 mtp_get_subzone(struct malloc_type *mtp) 346 { 347 struct malloc_type_internal *mtip; 348 349 mtip = &mtp->ks_mti; 350 351 KASSERT(mtip->mti_zone < numzones, 352 ("mti_zone %u out of range %d", 353 mtip->mti_zone, numzones)); 354 return (mtip->mti_zone); 355 } 356 #elif MALLOC_DEBUG_MAXZONES == 0 357 #error "MALLOC_DEBUG_MAXZONES must be positive." 358 #else 359 static void 360 mtp_set_subzone(struct malloc_type *mtp) 361 { 362 struct malloc_type_internal *mtip; 363 364 mtip = &mtp->ks_mti; 365 mtip->mti_zone = 0; 366 } 367 368 static inline u_int 369 mtp_get_subzone(struct malloc_type *mtp) 370 { 371 372 return (0); 373 } 374 #endif /* MALLOC_DEBUG_MAXZONES > 1 */ 375 376 int 377 malloc_last_fail(void) 378 { 379 380 return (time_uptime - t_malloc_fail); 381 } 382 383 /* 384 * An allocation has succeeded -- update malloc type statistics for the 385 * amount of bucket size. Occurs within a critical section so that the 386 * thread isn't preempted and doesn't migrate while updating per-PCU 387 * statistics. 388 */ 389 static void 390 malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size, 391 int zindx) 392 { 393 struct malloc_type_internal *mtip; 394 struct malloc_type_stats *mtsp; 395 396 critical_enter(); 397 mtip = &mtp->ks_mti; 398 mtsp = zpcpu_get(mtip->mti_stats); 399 if (size > 0) { 400 mtsp->mts_memalloced += size; 401 mtsp->mts_numallocs++; 402 } 403 if (zindx != -1) 404 mtsp->mts_size |= 1 << zindx; 405 406 #ifdef KDTRACE_HOOKS 407 if (__predict_false(dtrace_malloc_enabled)) { 408 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_MALLOC]; 409 if (probe_id != 0) 410 (dtrace_malloc_probe)(probe_id, 411 (uintptr_t) mtp, (uintptr_t) mtip, 412 (uintptr_t) mtsp, size, zindx); 413 } 414 #endif 415 416 critical_exit(); 417 } 418 419 void 420 malloc_type_allocated(struct malloc_type *mtp, unsigned long size) 421 { 422 423 if (size > 0) 424 malloc_type_zone_allocated(mtp, size, -1); 425 } 426 427 /* 428 * A free operation has occurred -- update malloc type statistics for the 429 * amount of the bucket size. Occurs within a critical section so that the 430 * thread isn't preempted and doesn't migrate while updating per-CPU 431 * statistics. 432 */ 433 void 434 malloc_type_freed(struct malloc_type *mtp, unsigned long size) 435 { 436 struct malloc_type_internal *mtip; 437 struct malloc_type_stats *mtsp; 438 439 critical_enter(); 440 mtip = &mtp->ks_mti; 441 mtsp = zpcpu_get(mtip->mti_stats); 442 mtsp->mts_memfreed += size; 443 mtsp->mts_numfrees++; 444 445 #ifdef KDTRACE_HOOKS 446 if (__predict_false(dtrace_malloc_enabled)) { 447 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_FREE]; 448 if (probe_id != 0) 449 (dtrace_malloc_probe)(probe_id, 450 (uintptr_t) mtp, (uintptr_t) mtip, 451 (uintptr_t) mtsp, size, 0); 452 } 453 #endif 454 455 critical_exit(); 456 } 457 458 /* 459 * contigmalloc: 460 * 461 * Allocate a block of physically contiguous memory. 462 * 463 * If M_NOWAIT is set, this routine will not block and return NULL if 464 * the allocation fails. 465 */ 466 void * 467 contigmalloc(unsigned long size, struct malloc_type *type, int flags, 468 vm_paddr_t low, vm_paddr_t high, unsigned long alignment, 469 vm_paddr_t boundary) 470 { 471 void *ret; 472 473 ret = (void *)kmem_alloc_contig(size, flags, low, high, alignment, 474 boundary, VM_MEMATTR_DEFAULT); 475 if (ret != NULL) 476 malloc_type_allocated(type, round_page(size)); 477 return (ret); 478 } 479 480 void * 481 contigmalloc_domainset(unsigned long size, struct malloc_type *type, 482 struct domainset *ds, int flags, vm_paddr_t low, vm_paddr_t high, 483 unsigned long alignment, vm_paddr_t boundary) 484 { 485 void *ret; 486 487 ret = (void *)kmem_alloc_contig_domainset(ds, size, flags, low, high, 488 alignment, boundary, VM_MEMATTR_DEFAULT); 489 if (ret != NULL) 490 malloc_type_allocated(type, round_page(size)); 491 return (ret); 492 } 493 494 /* 495 * contigfree: 496 * 497 * Free a block of memory allocated by contigmalloc. 498 * 499 * This routine may not block. 500 */ 501 void 502 contigfree(void *addr, unsigned long size, struct malloc_type *type) 503 { 504 505 kmem_free((vm_offset_t)addr, size); 506 malloc_type_freed(type, round_page(size)); 507 } 508 509 #ifdef MALLOC_DEBUG 510 static int 511 malloc_dbg(caddr_t *vap, size_t *sizep, struct malloc_type *mtp, 512 int flags) 513 { 514 #ifdef INVARIANTS 515 int indx; 516 517 KASSERT(mtp->ks_version == M_VERSION, ("malloc: bad malloc type version")); 518 /* 519 * Check that exactly one of M_WAITOK or M_NOWAIT is specified. 520 */ 521 indx = flags & (M_WAITOK | M_NOWAIT); 522 if (indx != M_NOWAIT && indx != M_WAITOK) { 523 static struct timeval lasterr; 524 static int curerr, once; 525 if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) { 526 printf("Bad malloc flags: %x\n", indx); 527 kdb_backtrace(); 528 flags |= M_WAITOK; 529 once++; 530 } 531 } 532 #endif 533 #ifdef MALLOC_MAKE_FAILURES 534 if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) { 535 atomic_add_int(&malloc_nowait_count, 1); 536 if ((malloc_nowait_count % malloc_failure_rate) == 0) { 537 atomic_add_int(&malloc_failure_count, 1); 538 t_malloc_fail = time_uptime; 539 *vap = NULL; 540 return (EJUSTRETURN); 541 } 542 } 543 #endif 544 if (flags & M_WAITOK) { 545 KASSERT(curthread->td_intr_nesting_level == 0, 546 ("malloc(M_WAITOK) in interrupt context")); 547 if (__predict_false(!THREAD_CAN_SLEEP())) { 548 #ifdef EPOCH_TRACE 549 epoch_trace_list(curthread); 550 #endif 551 KASSERT(1, 552 ("malloc(M_WAITOK) with sleeping prohibited")); 553 } 554 } 555 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 556 ("malloc: called with spinlock or critical section held")); 557 558 #ifdef DEBUG_MEMGUARD 559 if (memguard_cmp_mtp(mtp, *sizep)) { 560 *vap = memguard_alloc(*sizep, flags); 561 if (*vap != NULL) 562 return (EJUSTRETURN); 563 /* This is unfortunate but should not be fatal. */ 564 } 565 #endif 566 567 #ifdef DEBUG_REDZONE 568 *sizep = redzone_size_ntor(*sizep); 569 #endif 570 571 return (0); 572 } 573 #endif 574 575 /* 576 * Handle large allocations and frees by using kmem_malloc directly. 577 */ 578 static inline bool 579 malloc_large_slab(uma_slab_t slab) 580 { 581 uintptr_t va; 582 583 va = (uintptr_t)slab; 584 return ((va & 1) != 0); 585 } 586 587 static inline size_t 588 malloc_large_size(uma_slab_t slab) 589 { 590 uintptr_t va; 591 592 va = (uintptr_t)slab; 593 return (va >> 1); 594 } 595 596 static caddr_t 597 malloc_large(size_t *size, struct domainset *policy, int flags) 598 { 599 vm_offset_t va; 600 size_t sz; 601 602 sz = roundup(*size, PAGE_SIZE); 603 va = kmem_malloc_domainset(policy, sz, flags); 604 if (va != 0) { 605 /* The low bit is unused for slab pointers. */ 606 vsetzoneslab(va, NULL, (void *)((sz << 1) | 1)); 607 uma_total_inc(sz); 608 *size = sz; 609 } 610 return ((caddr_t)va); 611 } 612 613 static void 614 free_large(void *addr, size_t size) 615 { 616 617 kmem_free((vm_offset_t)addr, size); 618 uma_total_dec(size); 619 } 620 621 /* 622 * malloc: 623 * 624 * Allocate a block of memory. 625 * 626 * If M_NOWAIT is set, this routine will not block and return NULL if 627 * the allocation fails. 628 */ 629 void * 630 (malloc)(size_t size, struct malloc_type *mtp, int flags) 631 { 632 int indx; 633 caddr_t va; 634 uma_zone_t zone; 635 #if defined(DEBUG_REDZONE) 636 unsigned long osize = size; 637 #endif 638 639 MPASS((flags & M_EXEC) == 0); 640 #ifdef MALLOC_DEBUG 641 va = NULL; 642 if (malloc_dbg(&va, &size, mtp, flags) != 0) 643 return (va); 644 #endif 645 646 if (size <= kmem_zmax) { 647 if (size & KMEM_ZMASK) 648 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE; 649 indx = kmemsize[size >> KMEM_ZSHIFT]; 650 zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)]; 651 #ifdef MALLOC_PROFILE 652 krequests[size >> KMEM_ZSHIFT]++; 653 #endif 654 va = uma_zalloc(zone, flags); 655 if (va != NULL) 656 size = zone->uz_size; 657 malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx); 658 } else { 659 va = malloc_large(&size, DOMAINSET_RR(), flags); 660 malloc_type_allocated(mtp, va == NULL ? 0 : size); 661 } 662 if (__predict_false(va == NULL)) { 663 KASSERT((flags & M_WAITOK) == 0, 664 ("malloc(M_WAITOK) returned NULL")); 665 t_malloc_fail = time_uptime; 666 } 667 #ifdef DEBUG_REDZONE 668 if (va != NULL) 669 va = redzone_setup(va, osize); 670 #endif 671 return ((void *) va); 672 } 673 674 static void * 675 malloc_domain(size_t *sizep, int *indxp, struct malloc_type *mtp, int domain, 676 int flags) 677 { 678 uma_zone_t zone; 679 caddr_t va; 680 size_t size; 681 int indx; 682 683 size = *sizep; 684 KASSERT(size <= kmem_zmax && (flags & M_EXEC) == 0, 685 ("malloc_domain: Called with bad flag / size combination.")); 686 if (size & KMEM_ZMASK) 687 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE; 688 indx = kmemsize[size >> KMEM_ZSHIFT]; 689 zone = kmemzones[indx].kz_zone[mtp_get_subzone(mtp)]; 690 #ifdef MALLOC_PROFILE 691 krequests[size >> KMEM_ZSHIFT]++; 692 #endif 693 va = uma_zalloc_domain(zone, NULL, domain, flags); 694 if (va != NULL) 695 *sizep = zone->uz_size; 696 *indxp = indx; 697 return ((void *)va); 698 } 699 700 void * 701 malloc_domainset(size_t size, struct malloc_type *mtp, struct domainset *ds, 702 int flags) 703 { 704 struct vm_domainset_iter di; 705 caddr_t va; 706 int domain; 707 int indx; 708 709 #if defined(DEBUG_REDZONE) 710 unsigned long osize = size; 711 #endif 712 MPASS((flags & M_EXEC) == 0); 713 #ifdef MALLOC_DEBUG 714 va = NULL; 715 if (malloc_dbg(&va, &size, mtp, flags) != 0) 716 return (va); 717 #endif 718 if (size <= kmem_zmax) { 719 vm_domainset_iter_policy_init(&di, ds, &domain, &flags); 720 do { 721 va = malloc_domain(&size, &indx, mtp, domain, flags); 722 } while (va == NULL && 723 vm_domainset_iter_policy(&di, &domain) == 0); 724 malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx); 725 } else { 726 /* Policy is handled by kmem. */ 727 va = malloc_large(&size, ds, flags); 728 malloc_type_allocated(mtp, va == NULL ? 0 : size); 729 } 730 if (__predict_false(va == NULL)) { 731 KASSERT((flags & M_WAITOK) == 0, 732 ("malloc(M_WAITOK) returned NULL")); 733 t_malloc_fail = time_uptime; 734 } 735 #ifdef DEBUG_REDZONE 736 if (va != NULL) 737 va = redzone_setup(va, osize); 738 #endif 739 return (va); 740 } 741 742 /* 743 * Allocate an executable area. 744 */ 745 void * 746 malloc_exec(size_t size, struct malloc_type *mtp, int flags) 747 { 748 caddr_t va; 749 #if defined(DEBUG_REDZONE) 750 unsigned long osize = size; 751 #endif 752 753 flags |= M_EXEC; 754 #ifdef MALLOC_DEBUG 755 va = NULL; 756 if (malloc_dbg(&va, &size, mtp, flags) != 0) 757 return (va); 758 #endif 759 va = malloc_large(&size, DOMAINSET_RR(), flags); 760 malloc_type_allocated(mtp, va == NULL ? 0 : size); 761 if (__predict_false(va == NULL)) { 762 KASSERT((flags & M_WAITOK) == 0, 763 ("malloc(M_WAITOK) returned NULL")); 764 t_malloc_fail = time_uptime; 765 } 766 #ifdef DEBUG_REDZONE 767 if (va != NULL) 768 va = redzone_setup(va, osize); 769 #endif 770 return ((void *) va); 771 } 772 773 void * 774 malloc_domainset_exec(size_t size, struct malloc_type *mtp, struct domainset *ds, 775 int flags) 776 { 777 caddr_t va; 778 #if defined(DEBUG_REDZONE) 779 unsigned long osize = size; 780 #endif 781 782 flags |= M_EXEC; 783 #ifdef MALLOC_DEBUG 784 va = NULL; 785 if (malloc_dbg(&va, &size, mtp, flags) != 0) 786 return (va); 787 #endif 788 /* Policy is handled by kmem. */ 789 va = malloc_large(&size, ds, flags); 790 malloc_type_allocated(mtp, va == NULL ? 0 : size); 791 if (__predict_false(va == NULL)) { 792 KASSERT((flags & M_WAITOK) == 0, 793 ("malloc(M_WAITOK) returned NULL")); 794 t_malloc_fail = time_uptime; 795 } 796 #ifdef DEBUG_REDZONE 797 if (va != NULL) 798 va = redzone_setup(va, osize); 799 #endif 800 return (va); 801 } 802 803 void * 804 mallocarray(size_t nmemb, size_t size, struct malloc_type *type, int flags) 805 { 806 807 if (WOULD_OVERFLOW(nmemb, size)) 808 panic("mallocarray: %zu * %zu overflowed", nmemb, size); 809 810 return (malloc(size * nmemb, type, flags)); 811 } 812 813 #ifdef INVARIANTS 814 static void 815 free_save_type(void *addr, struct malloc_type *mtp, u_long size) 816 { 817 struct malloc_type **mtpp = addr; 818 819 /* 820 * Cache a pointer to the malloc_type that most recently freed 821 * this memory here. This way we know who is most likely to 822 * have stepped on it later. 823 * 824 * This code assumes that size is a multiple of 8 bytes for 825 * 64 bit machines 826 */ 827 mtpp = (struct malloc_type **) ((unsigned long)mtpp & ~UMA_ALIGN_PTR); 828 mtpp += (size - sizeof(struct malloc_type *)) / 829 sizeof(struct malloc_type *); 830 *mtpp = mtp; 831 } 832 #endif 833 834 #ifdef MALLOC_DEBUG 835 static int 836 free_dbg(void **addrp, struct malloc_type *mtp) 837 { 838 void *addr; 839 840 addr = *addrp; 841 KASSERT(mtp->ks_version == M_VERSION, ("free: bad malloc type version")); 842 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 843 ("free: called with spinlock or critical section held")); 844 845 /* free(NULL, ...) does nothing */ 846 if (addr == NULL) 847 return (EJUSTRETURN); 848 849 #ifdef DEBUG_MEMGUARD 850 if (is_memguard_addr(addr)) { 851 memguard_free(addr); 852 return (EJUSTRETURN); 853 } 854 #endif 855 856 #ifdef DEBUG_REDZONE 857 redzone_check(addr); 858 *addrp = redzone_addr_ntor(addr); 859 #endif 860 861 return (0); 862 } 863 #endif 864 865 /* 866 * free: 867 * 868 * Free a block of memory allocated by malloc. 869 * 870 * This routine may not block. 871 */ 872 void 873 free(void *addr, struct malloc_type *mtp) 874 { 875 uma_zone_t zone; 876 uma_slab_t slab; 877 u_long size; 878 879 #ifdef MALLOC_DEBUG 880 if (free_dbg(&addr, mtp) != 0) 881 return; 882 #endif 883 /* free(NULL, ...) does nothing */ 884 if (addr == NULL) 885 return; 886 887 vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab); 888 if (slab == NULL) 889 panic("free: address %p(%p) has not been allocated.\n", 890 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK))); 891 892 if (__predict_true(!malloc_large_slab(slab))) { 893 size = zone->uz_size; 894 #ifdef INVARIANTS 895 free_save_type(addr, mtp, size); 896 #endif 897 uma_zfree_arg(zone, addr, slab); 898 } else { 899 size = malloc_large_size(slab); 900 free_large(addr, size); 901 } 902 malloc_type_freed(mtp, size); 903 } 904 905 /* 906 * zfree: 907 * 908 * Zero then free a block of memory allocated by malloc. 909 * 910 * This routine may not block. 911 */ 912 void 913 zfree(void *addr, struct malloc_type *mtp) 914 { 915 uma_zone_t zone; 916 uma_slab_t slab; 917 u_long size; 918 919 #ifdef MALLOC_DEBUG 920 if (free_dbg(&addr, mtp) != 0) 921 return; 922 #endif 923 /* free(NULL, ...) does nothing */ 924 if (addr == NULL) 925 return; 926 927 vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab); 928 if (slab == NULL) 929 panic("free: address %p(%p) has not been allocated.\n", 930 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK))); 931 932 if (__predict_true(!malloc_large_slab(slab))) { 933 size = zone->uz_size; 934 #ifdef INVARIANTS 935 free_save_type(addr, mtp, size); 936 #endif 937 explicit_bzero(addr, size); 938 uma_zfree_arg(zone, addr, slab); 939 } else { 940 size = malloc_large_size(slab); 941 explicit_bzero(addr, size); 942 free_large(addr, size); 943 } 944 malloc_type_freed(mtp, size); 945 } 946 947 /* 948 * realloc: change the size of a memory block 949 */ 950 void * 951 realloc(void *addr, size_t size, struct malloc_type *mtp, int flags) 952 { 953 uma_zone_t zone; 954 uma_slab_t slab; 955 unsigned long alloc; 956 void *newaddr; 957 958 KASSERT(mtp->ks_version == M_VERSION, 959 ("realloc: bad malloc type version")); 960 KASSERT(curthread->td_critnest == 0 || SCHEDULER_STOPPED(), 961 ("realloc: called with spinlock or critical section held")); 962 963 /* realloc(NULL, ...) is equivalent to malloc(...) */ 964 if (addr == NULL) 965 return (malloc(size, mtp, flags)); 966 967 /* 968 * XXX: Should report free of old memory and alloc of new memory to 969 * per-CPU stats. 970 */ 971 972 #ifdef DEBUG_MEMGUARD 973 if (is_memguard_addr(addr)) 974 return (memguard_realloc(addr, size, mtp, flags)); 975 #endif 976 977 #ifdef DEBUG_REDZONE 978 slab = NULL; 979 zone = NULL; 980 alloc = redzone_get_size(addr); 981 #else 982 vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab); 983 984 /* Sanity check */ 985 KASSERT(slab != NULL, 986 ("realloc: address %p out of range", (void *)addr)); 987 988 /* Get the size of the original block */ 989 if (!malloc_large_slab(slab)) 990 alloc = zone->uz_size; 991 else 992 alloc = malloc_large_size(slab); 993 994 /* Reuse the original block if appropriate */ 995 if (size <= alloc 996 && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE)) 997 return (addr); 998 #endif /* !DEBUG_REDZONE */ 999 1000 /* Allocate a new, bigger (or smaller) block */ 1001 if ((newaddr = malloc(size, mtp, flags)) == NULL) 1002 return (NULL); 1003 1004 /* Copy over original contents */ 1005 bcopy(addr, newaddr, min(size, alloc)); 1006 free(addr, mtp); 1007 return (newaddr); 1008 } 1009 1010 /* 1011 * reallocf: same as realloc() but free memory on failure. 1012 */ 1013 void * 1014 reallocf(void *addr, size_t size, struct malloc_type *mtp, int flags) 1015 { 1016 void *mem; 1017 1018 if ((mem = realloc(addr, size, mtp, flags)) == NULL) 1019 free(addr, mtp); 1020 return (mem); 1021 } 1022 1023 /* 1024 * malloc_size: returns the number of bytes allocated for a request of the 1025 * specified size 1026 */ 1027 size_t 1028 malloc_size(size_t size) 1029 { 1030 int indx; 1031 1032 if (size > kmem_zmax) 1033 return (0); 1034 if (size & KMEM_ZMASK) 1035 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE; 1036 indx = kmemsize[size >> KMEM_ZSHIFT]; 1037 return (kmemzones[indx].kz_size); 1038 } 1039 1040 /* 1041 * malloc_usable_size: returns the usable size of the allocation. 1042 */ 1043 size_t 1044 malloc_usable_size(const void *addr) 1045 { 1046 #ifndef DEBUG_REDZONE 1047 uma_zone_t zone; 1048 uma_slab_t slab; 1049 #endif 1050 u_long size; 1051 1052 if (addr == NULL) 1053 return (0); 1054 1055 #ifdef DEBUG_MEMGUARD 1056 if (is_memguard_addr(__DECONST(void *, addr))) 1057 return (memguard_get_req_size(addr)); 1058 #endif 1059 1060 #ifdef DEBUG_REDZONE 1061 size = redzone_get_size(__DECONST(void *, addr)); 1062 #else 1063 vtozoneslab((vm_offset_t)addr & (~UMA_SLAB_MASK), &zone, &slab); 1064 if (slab == NULL) 1065 panic("malloc_usable_size: address %p(%p) is not allocated.\n", 1066 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK))); 1067 1068 if (!malloc_large_slab(slab)) 1069 size = zone->uz_size; 1070 else 1071 size = malloc_large_size(slab); 1072 #endif 1073 return (size); 1074 } 1075 1076 CTASSERT(VM_KMEM_SIZE_SCALE >= 1); 1077 1078 /* 1079 * Initialize the kernel memory (kmem) arena. 1080 */ 1081 void 1082 kmeminit(void) 1083 { 1084 u_long mem_size; 1085 u_long tmp; 1086 1087 #ifdef VM_KMEM_SIZE 1088 if (vm_kmem_size == 0) 1089 vm_kmem_size = VM_KMEM_SIZE; 1090 #endif 1091 #ifdef VM_KMEM_SIZE_MIN 1092 if (vm_kmem_size_min == 0) 1093 vm_kmem_size_min = VM_KMEM_SIZE_MIN; 1094 #endif 1095 #ifdef VM_KMEM_SIZE_MAX 1096 if (vm_kmem_size_max == 0) 1097 vm_kmem_size_max = VM_KMEM_SIZE_MAX; 1098 #endif 1099 /* 1100 * Calculate the amount of kernel virtual address (KVA) space that is 1101 * preallocated to the kmem arena. In order to support a wide range 1102 * of machines, it is a function of the physical memory size, 1103 * specifically, 1104 * 1105 * min(max(physical memory size / VM_KMEM_SIZE_SCALE, 1106 * VM_KMEM_SIZE_MIN), VM_KMEM_SIZE_MAX) 1107 * 1108 * Every architecture must define an integral value for 1109 * VM_KMEM_SIZE_SCALE. However, the definitions of VM_KMEM_SIZE_MIN 1110 * and VM_KMEM_SIZE_MAX, which represent respectively the floor and 1111 * ceiling on this preallocation, are optional. Typically, 1112 * VM_KMEM_SIZE_MAX is itself a function of the available KVA space on 1113 * a given architecture. 1114 */ 1115 mem_size = vm_cnt.v_page_count; 1116 if (mem_size <= 32768) /* delphij XXX 128MB */ 1117 kmem_zmax = PAGE_SIZE; 1118 1119 if (vm_kmem_size_scale < 1) 1120 vm_kmem_size_scale = VM_KMEM_SIZE_SCALE; 1121 1122 /* 1123 * Check if we should use defaults for the "vm_kmem_size" 1124 * variable: 1125 */ 1126 if (vm_kmem_size == 0) { 1127 vm_kmem_size = mem_size / vm_kmem_size_scale; 1128 vm_kmem_size = vm_kmem_size * PAGE_SIZE < vm_kmem_size ? 1129 vm_kmem_size_max : vm_kmem_size * PAGE_SIZE; 1130 if (vm_kmem_size_min > 0 && vm_kmem_size < vm_kmem_size_min) 1131 vm_kmem_size = vm_kmem_size_min; 1132 if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max) 1133 vm_kmem_size = vm_kmem_size_max; 1134 } 1135 if (vm_kmem_size == 0) 1136 panic("Tune VM_KMEM_SIZE_* for the platform"); 1137 1138 /* 1139 * The amount of KVA space that is preallocated to the 1140 * kmem arena can be set statically at compile-time or manually 1141 * through the kernel environment. However, it is still limited to 1142 * twice the physical memory size, which has been sufficient to handle 1143 * the most severe cases of external fragmentation in the kmem arena. 1144 */ 1145 if (vm_kmem_size / 2 / PAGE_SIZE > mem_size) 1146 vm_kmem_size = 2 * mem_size * PAGE_SIZE; 1147 1148 vm_kmem_size = round_page(vm_kmem_size); 1149 #ifdef DEBUG_MEMGUARD 1150 tmp = memguard_fudge(vm_kmem_size, kernel_map); 1151 #else 1152 tmp = vm_kmem_size; 1153 #endif 1154 uma_set_limit(tmp); 1155 1156 #ifdef DEBUG_MEMGUARD 1157 /* 1158 * Initialize MemGuard if support compiled in. MemGuard is a 1159 * replacement allocator used for detecting tamper-after-free 1160 * scenarios as they occur. It is only used for debugging. 1161 */ 1162 memguard_init(kernel_arena); 1163 #endif 1164 } 1165 1166 /* 1167 * Initialize the kernel memory allocator 1168 */ 1169 /* ARGSUSED*/ 1170 static void 1171 mallocinit(void *dummy) 1172 { 1173 int i; 1174 uint8_t indx; 1175 1176 mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF); 1177 1178 kmeminit(); 1179 1180 if (kmem_zmax < PAGE_SIZE || kmem_zmax > KMEM_ZMAX) 1181 kmem_zmax = KMEM_ZMAX; 1182 1183 for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) { 1184 int size = kmemzones[indx].kz_size; 1185 const char *name = kmemzones[indx].kz_name; 1186 int subzone; 1187 1188 for (subzone = 0; subzone < numzones; subzone++) { 1189 kmemzones[indx].kz_zone[subzone] = 1190 uma_zcreate(name, size, 1191 #ifdef INVARIANTS 1192 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini, 1193 #else 1194 NULL, NULL, NULL, NULL, 1195 #endif 1196 UMA_ALIGN_PTR, UMA_ZONE_MALLOC); 1197 } 1198 for (;i <= size; i+= KMEM_ZBASE) 1199 kmemsize[i >> KMEM_ZSHIFT] = indx; 1200 } 1201 } 1202 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_SECOND, mallocinit, NULL); 1203 1204 void 1205 malloc_init(void *data) 1206 { 1207 struct malloc_type_internal *mtip; 1208 struct malloc_type *mtp; 1209 1210 KASSERT(vm_cnt.v_page_count != 0, ("malloc_register before vm_init")); 1211 1212 mtp = data; 1213 if (mtp->ks_version != M_VERSION) 1214 panic("malloc_init: type %s with unsupported version %lu", 1215 mtp->ks_shortdesc, mtp->ks_version); 1216 1217 mtip = &mtp->ks_mti; 1218 mtip->mti_stats = uma_zalloc_pcpu(pcpu_zone_64, M_WAITOK | M_ZERO); 1219 mtp_set_subzone(mtp); 1220 1221 mtx_lock(&malloc_mtx); 1222 mtp->ks_next = kmemstatistics; 1223 kmemstatistics = mtp; 1224 kmemcount++; 1225 mtx_unlock(&malloc_mtx); 1226 } 1227 1228 void 1229 malloc_uninit(void *data) 1230 { 1231 struct malloc_type_internal *mtip; 1232 struct malloc_type_stats *mtsp; 1233 struct malloc_type *mtp, *temp; 1234 long temp_allocs, temp_bytes; 1235 int i; 1236 1237 mtp = data; 1238 KASSERT(mtp->ks_version == M_VERSION, 1239 ("malloc_uninit: bad malloc type version")); 1240 1241 mtx_lock(&malloc_mtx); 1242 mtip = &mtp->ks_mti; 1243 if (mtp != kmemstatistics) { 1244 for (temp = kmemstatistics; temp != NULL; 1245 temp = temp->ks_next) { 1246 if (temp->ks_next == mtp) { 1247 temp->ks_next = mtp->ks_next; 1248 break; 1249 } 1250 } 1251 KASSERT(temp, 1252 ("malloc_uninit: type '%s' not found", mtp->ks_shortdesc)); 1253 } else 1254 kmemstatistics = mtp->ks_next; 1255 kmemcount--; 1256 mtx_unlock(&malloc_mtx); 1257 1258 /* 1259 * Look for memory leaks. 1260 */ 1261 temp_allocs = temp_bytes = 0; 1262 for (i = 0; i <= mp_maxid; i++) { 1263 mtsp = zpcpu_get_cpu(mtip->mti_stats, i); 1264 temp_allocs += mtsp->mts_numallocs; 1265 temp_allocs -= mtsp->mts_numfrees; 1266 temp_bytes += mtsp->mts_memalloced; 1267 temp_bytes -= mtsp->mts_memfreed; 1268 } 1269 if (temp_allocs > 0 || temp_bytes > 0) { 1270 printf("Warning: memory type %s leaked memory on destroy " 1271 "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc, 1272 temp_allocs, temp_bytes); 1273 } 1274 1275 uma_zfree_pcpu(pcpu_zone_64, mtip->mti_stats); 1276 } 1277 1278 struct malloc_type * 1279 malloc_desc2type(const char *desc) 1280 { 1281 struct malloc_type *mtp; 1282 1283 mtx_assert(&malloc_mtx, MA_OWNED); 1284 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 1285 if (strcmp(mtp->ks_shortdesc, desc) == 0) 1286 return (mtp); 1287 } 1288 return (NULL); 1289 } 1290 1291 static int 1292 sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS) 1293 { 1294 struct malloc_type_stream_header mtsh; 1295 struct malloc_type_internal *mtip; 1296 struct malloc_type_stats *mtsp, zeromts; 1297 struct malloc_type_header mth; 1298 struct malloc_type *mtp; 1299 int error, i; 1300 struct sbuf sbuf; 1301 1302 error = sysctl_wire_old_buffer(req, 0); 1303 if (error != 0) 1304 return (error); 1305 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 1306 sbuf_clear_flags(&sbuf, SBUF_INCLUDENUL); 1307 mtx_lock(&malloc_mtx); 1308 1309 bzero(&zeromts, sizeof(zeromts)); 1310 1311 /* 1312 * Insert stream header. 1313 */ 1314 bzero(&mtsh, sizeof(mtsh)); 1315 mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION; 1316 mtsh.mtsh_maxcpus = MAXCPU; 1317 mtsh.mtsh_count = kmemcount; 1318 (void)sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh)); 1319 1320 /* 1321 * Insert alternating sequence of type headers and type statistics. 1322 */ 1323 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 1324 mtip = &mtp->ks_mti; 1325 1326 /* 1327 * Insert type header. 1328 */ 1329 bzero(&mth, sizeof(mth)); 1330 strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME); 1331 (void)sbuf_bcat(&sbuf, &mth, sizeof(mth)); 1332 1333 /* 1334 * Insert type statistics for each CPU. 1335 */ 1336 for (i = 0; i <= mp_maxid; i++) { 1337 mtsp = zpcpu_get_cpu(mtip->mti_stats, i); 1338 (void)sbuf_bcat(&sbuf, mtsp, sizeof(*mtsp)); 1339 } 1340 /* 1341 * Fill in the missing CPUs. 1342 */ 1343 for (; i < MAXCPU; i++) { 1344 (void)sbuf_bcat(&sbuf, &zeromts, sizeof(zeromts)); 1345 } 1346 } 1347 mtx_unlock(&malloc_mtx); 1348 error = sbuf_finish(&sbuf); 1349 sbuf_delete(&sbuf); 1350 return (error); 1351 } 1352 1353 SYSCTL_PROC(_kern, OID_AUTO, malloc_stats, 1354 CTLFLAG_RD | CTLTYPE_STRUCT | CTLFLAG_MPSAFE, 0, 0, 1355 sysctl_kern_malloc_stats, "s,malloc_type_ustats", 1356 "Return malloc types"); 1357 1358 SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0, 1359 "Count of kernel malloc types"); 1360 1361 void 1362 malloc_type_list(malloc_type_list_func_t *func, void *arg) 1363 { 1364 struct malloc_type *mtp, **bufmtp; 1365 int count, i; 1366 size_t buflen; 1367 1368 mtx_lock(&malloc_mtx); 1369 restart: 1370 mtx_assert(&malloc_mtx, MA_OWNED); 1371 count = kmemcount; 1372 mtx_unlock(&malloc_mtx); 1373 1374 buflen = sizeof(struct malloc_type *) * count; 1375 bufmtp = malloc(buflen, M_TEMP, M_WAITOK); 1376 1377 mtx_lock(&malloc_mtx); 1378 1379 if (count < kmemcount) { 1380 free(bufmtp, M_TEMP); 1381 goto restart; 1382 } 1383 1384 for (mtp = kmemstatistics, i = 0; mtp != NULL; mtp = mtp->ks_next, i++) 1385 bufmtp[i] = mtp; 1386 1387 mtx_unlock(&malloc_mtx); 1388 1389 for (i = 0; i < count; i++) 1390 (func)(bufmtp[i], arg); 1391 1392 free(bufmtp, M_TEMP); 1393 } 1394 1395 #ifdef DDB 1396 static int64_t 1397 get_malloc_stats(const struct malloc_type_internal *mtip, uint64_t *allocs, 1398 uint64_t *inuse) 1399 { 1400 const struct malloc_type_stats *mtsp; 1401 uint64_t frees, alloced, freed; 1402 int i; 1403 1404 *allocs = 0; 1405 frees = 0; 1406 alloced = 0; 1407 freed = 0; 1408 for (i = 0; i <= mp_maxid; i++) { 1409 mtsp = zpcpu_get_cpu(mtip->mti_stats, i); 1410 1411 *allocs += mtsp->mts_numallocs; 1412 frees += mtsp->mts_numfrees; 1413 alloced += mtsp->mts_memalloced; 1414 freed += mtsp->mts_memfreed; 1415 } 1416 *inuse = *allocs - frees; 1417 return (alloced - freed); 1418 } 1419 1420 DB_SHOW_COMMAND(malloc, db_show_malloc) 1421 { 1422 const char *fmt_hdr, *fmt_entry; 1423 struct malloc_type *mtp; 1424 uint64_t allocs, inuse; 1425 int64_t size; 1426 /* variables for sorting */ 1427 struct malloc_type *last_mtype, *cur_mtype; 1428 int64_t cur_size, last_size; 1429 int ties; 1430 1431 if (modif[0] == 'i') { 1432 fmt_hdr = "%s,%s,%s,%s\n"; 1433 fmt_entry = "\"%s\",%ju,%jdK,%ju\n"; 1434 } else { 1435 fmt_hdr = "%18s %12s %12s %12s\n"; 1436 fmt_entry = "%18s %12ju %12jdK %12ju\n"; 1437 } 1438 1439 db_printf(fmt_hdr, "Type", "InUse", "MemUse", "Requests"); 1440 1441 /* Select sort, largest size first. */ 1442 last_mtype = NULL; 1443 last_size = INT64_MAX; 1444 for (;;) { 1445 cur_mtype = NULL; 1446 cur_size = -1; 1447 ties = 0; 1448 1449 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 1450 /* 1451 * In the case of size ties, print out mtypes 1452 * in the order they are encountered. That is, 1453 * when we encounter the most recently output 1454 * mtype, we have already printed all preceding 1455 * ties, and we must print all following ties. 1456 */ 1457 if (mtp == last_mtype) { 1458 ties = 1; 1459 continue; 1460 } 1461 size = get_malloc_stats(&mtp->ks_mti, &allocs, 1462 &inuse); 1463 if (size > cur_size && size < last_size + ties) { 1464 cur_size = size; 1465 cur_mtype = mtp; 1466 } 1467 } 1468 if (cur_mtype == NULL) 1469 break; 1470 1471 size = get_malloc_stats(&cur_mtype->ks_mti, &allocs, &inuse); 1472 db_printf(fmt_entry, cur_mtype->ks_shortdesc, inuse, 1473 howmany(size, 1024), allocs); 1474 1475 if (db_pager_quit) 1476 break; 1477 1478 last_mtype = cur_mtype; 1479 last_size = cur_size; 1480 } 1481 } 1482 1483 #if MALLOC_DEBUG_MAXZONES > 1 1484 DB_SHOW_COMMAND(multizone_matches, db_show_multizone_matches) 1485 { 1486 struct malloc_type_internal *mtip; 1487 struct malloc_type *mtp; 1488 u_int subzone; 1489 1490 if (!have_addr) { 1491 db_printf("Usage: show multizone_matches <malloc type/addr>\n"); 1492 return; 1493 } 1494 mtp = (void *)addr; 1495 if (mtp->ks_version != M_VERSION) { 1496 db_printf("Version %lx does not match expected %x\n", 1497 mtp->ks_version, M_VERSION); 1498 return; 1499 } 1500 1501 mtip = &mtp->ks_mti; 1502 subzone = mtip->mti_zone; 1503 1504 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 1505 mtip = &mtp->ks_mti; 1506 if (mtip->mti_zone != subzone) 1507 continue; 1508 db_printf("%s\n", mtp->ks_shortdesc); 1509 if (db_pager_quit) 1510 break; 1511 } 1512 } 1513 #endif /* MALLOC_DEBUG_MAXZONES > 1 */ 1514 #endif /* DDB */ 1515 1516 #ifdef MALLOC_PROFILE 1517 1518 static int 1519 sysctl_kern_mprof(SYSCTL_HANDLER_ARGS) 1520 { 1521 struct sbuf sbuf; 1522 uint64_t count; 1523 uint64_t waste; 1524 uint64_t mem; 1525 int error; 1526 int rsize; 1527 int size; 1528 int i; 1529 1530 waste = 0; 1531 mem = 0; 1532 1533 error = sysctl_wire_old_buffer(req, 0); 1534 if (error != 0) 1535 return (error); 1536 sbuf_new_for_sysctl(&sbuf, NULL, 128, req); 1537 sbuf_printf(&sbuf, 1538 "\n Size Requests Real Size\n"); 1539 for (i = 0; i < KMEM_ZSIZE; i++) { 1540 size = i << KMEM_ZSHIFT; 1541 rsize = kmemzones[kmemsize[i]].kz_size; 1542 count = (long long unsigned)krequests[i]; 1543 1544 sbuf_printf(&sbuf, "%6d%28llu%11d\n", size, 1545 (unsigned long long)count, rsize); 1546 1547 if ((rsize * count) > (size * count)) 1548 waste += (rsize * count) - (size * count); 1549 mem += (rsize * count); 1550 } 1551 sbuf_printf(&sbuf, 1552 "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n", 1553 (unsigned long long)mem, (unsigned long long)waste); 1554 error = sbuf_finish(&sbuf); 1555 sbuf_delete(&sbuf); 1556 return (error); 1557 } 1558 1559 SYSCTL_OID(_kern, OID_AUTO, mprof, 1560 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, NULL, 0, 1561 sysctl_kern_mprof, "A", 1562 "Malloc Profiling"); 1563 #endif /* MALLOC_PROFILE */ 1564