1 /*- 2 * Copyright (c) 1987, 1991, 1993 3 * The Regents of the University of California. 4 * Copyright (c) 2005-2006 Robert N. M. Watson 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 4. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94 32 */ 33 34 /* 35 * Kernel malloc(9) implementation -- general purpose kernel memory allocator 36 * based on memory types. Back end is implemented using the UMA(9) zone 37 * allocator. A set of fixed-size buckets are used for smaller allocations, 38 * and a special UMA allocation interface is used for larger allocations. 39 * Callers declare memory types, and statistics are maintained independently 40 * for each memory type. Statistics are maintained per-CPU for performance 41 * reasons. See malloc(9) and comments in malloc.h for a detailed 42 * description. 43 */ 44 45 #include <sys/cdefs.h> 46 __FBSDID("$FreeBSD$"); 47 48 #include "opt_ddb.h" 49 #include "opt_kdtrace.h" 50 #include "opt_vm.h" 51 52 #include <sys/param.h> 53 #include <sys/systm.h> 54 #include <sys/kdb.h> 55 #include <sys/kernel.h> 56 #include <sys/lock.h> 57 #include <sys/malloc.h> 58 #include <sys/mbuf.h> 59 #include <sys/mutex.h> 60 #include <sys/vmmeter.h> 61 #include <sys/proc.h> 62 #include <sys/sbuf.h> 63 #include <sys/sysctl.h> 64 #include <sys/time.h> 65 66 #include <vm/vm.h> 67 #include <vm/pmap.h> 68 #include <vm/vm_param.h> 69 #include <vm/vm_kern.h> 70 #include <vm/vm_extern.h> 71 #include <vm/vm_map.h> 72 #include <vm/vm_page.h> 73 #include <vm/uma.h> 74 #include <vm/uma_int.h> 75 #include <vm/uma_dbg.h> 76 77 #ifdef DEBUG_MEMGUARD 78 #include <vm/memguard.h> 79 #endif 80 #ifdef DEBUG_REDZONE 81 #include <vm/redzone.h> 82 #endif 83 84 #if defined(INVARIANTS) && defined(__i386__) 85 #include <machine/cpu.h> 86 #endif 87 88 #include <ddb/ddb.h> 89 90 #ifdef KDTRACE_HOOKS 91 #include <sys/dtrace_bsd.h> 92 93 dtrace_malloc_probe_func_t dtrace_malloc_probe; 94 #endif 95 96 /* 97 * When realloc() is called, if the new size is sufficiently smaller than 98 * the old size, realloc() will allocate a new, smaller block to avoid 99 * wasting memory. 'Sufficiently smaller' is defined as: newsize <= 100 * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'. 101 */ 102 #ifndef REALLOC_FRACTION 103 #define REALLOC_FRACTION 1 /* new block if <= half the size */ 104 #endif 105 106 /* 107 * Centrally define some common malloc types. 108 */ 109 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 110 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 111 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 112 113 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options"); 114 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery"); 115 116 static void kmeminit(void *); 117 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL); 118 119 static MALLOC_DEFINE(M_FREE, "free", "should be on free list"); 120 121 static struct malloc_type *kmemstatistics; 122 static vm_offset_t kmembase; 123 static vm_offset_t kmemlimit; 124 static int kmemcount; 125 126 #define KMEM_ZSHIFT 4 127 #define KMEM_ZBASE 16 128 #define KMEM_ZMASK (KMEM_ZBASE - 1) 129 130 #define KMEM_ZMAX PAGE_SIZE 131 #define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT) 132 static u_int8_t kmemsize[KMEM_ZSIZE + 1]; 133 134 /* 135 * Small malloc(9) memory allocations are allocated from a set of UMA buckets 136 * of various sizes. 137 * 138 * XXX: The comment here used to read "These won't be powers of two for 139 * long." It's possible that a significant amount of wasted memory could be 140 * recovered by tuning the sizes of these buckets. 141 */ 142 struct { 143 int kz_size; 144 char *kz_name; 145 uma_zone_t kz_zone; 146 } kmemzones[] = { 147 {16, "16", NULL}, 148 {32, "32", NULL}, 149 {64, "64", NULL}, 150 {128, "128", NULL}, 151 {256, "256", NULL}, 152 {512, "512", NULL}, 153 {1024, "1024", NULL}, 154 {2048, "2048", NULL}, 155 {4096, "4096", NULL}, 156 #if PAGE_SIZE > 4096 157 {8192, "8192", NULL}, 158 #if PAGE_SIZE > 8192 159 {16384, "16384", NULL}, 160 #if PAGE_SIZE > 16384 161 {32768, "32768", NULL}, 162 #if PAGE_SIZE > 32768 163 {65536, "65536", NULL}, 164 #if PAGE_SIZE > 65536 165 #error "Unsupported PAGE_SIZE" 166 #endif /* 65536 */ 167 #endif /* 32768 */ 168 #endif /* 16384 */ 169 #endif /* 8192 */ 170 #endif /* 4096 */ 171 {0, NULL}, 172 }; 173 174 /* 175 * Zone to allocate malloc type descriptions from. For ABI reasons, memory 176 * types are described by a data structure passed by the declaring code, but 177 * the malloc(9) implementation has its own data structure describing the 178 * type and statistics. This permits the malloc(9)-internal data structures 179 * to be modified without breaking binary-compiled kernel modules that 180 * declare malloc types. 181 */ 182 static uma_zone_t mt_zone; 183 184 u_long vm_kmem_size; 185 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size, CTLFLAG_RD, &vm_kmem_size, 0, 186 "Size of kernel memory"); 187 188 static u_long vm_kmem_size_min; 189 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_min, CTLFLAG_RD, &vm_kmem_size_min, 0, 190 "Minimum size of kernel memory"); 191 192 static u_long vm_kmem_size_max; 193 SYSCTL_ULONG(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RD, &vm_kmem_size_max, 0, 194 "Maximum size of kernel memory"); 195 196 static u_int vm_kmem_size_scale; 197 SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RD, &vm_kmem_size_scale, 0, 198 "Scale factor for kernel memory size"); 199 200 /* 201 * The malloc_mtx protects the kmemstatistics linked list. 202 */ 203 struct mtx malloc_mtx; 204 205 #ifdef MALLOC_PROFILE 206 uint64_t krequests[KMEM_ZSIZE + 1]; 207 208 static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS); 209 #endif 210 211 static int sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS); 212 213 /* 214 * time_uptime of the last malloc(9) failure (induced or real). 215 */ 216 static time_t t_malloc_fail; 217 218 /* 219 * malloc(9) fault injection -- cause malloc failures every (n) mallocs when 220 * the caller specifies M_NOWAIT. If set to 0, no failures are caused. 221 */ 222 #ifdef MALLOC_MAKE_FAILURES 223 SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD, 0, 224 "Kernel malloc debugging options"); 225 226 static int malloc_failure_rate; 227 static int malloc_nowait_count; 228 static int malloc_failure_count; 229 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RW, 230 &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail"); 231 TUNABLE_INT("debug.malloc.failure_rate", &malloc_failure_rate); 232 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD, 233 &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures"); 234 #endif 235 236 int 237 malloc_last_fail(void) 238 { 239 240 return (time_uptime - t_malloc_fail); 241 } 242 243 /* 244 * An allocation has succeeded -- update malloc type statistics for the 245 * amount of bucket size. Occurs within a critical section so that the 246 * thread isn't preempted and doesn't migrate while updating per-PCU 247 * statistics. 248 */ 249 static void 250 malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size, 251 int zindx) 252 { 253 struct malloc_type_internal *mtip; 254 struct malloc_type_stats *mtsp; 255 256 critical_enter(); 257 mtip = mtp->ks_handle; 258 mtsp = &mtip->mti_stats[curcpu]; 259 if (size > 0) { 260 mtsp->mts_memalloced += size; 261 mtsp->mts_numallocs++; 262 } 263 if (zindx != -1) 264 mtsp->mts_size |= 1 << zindx; 265 266 #ifdef KDTRACE_HOOKS 267 if (dtrace_malloc_probe != NULL) { 268 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_MALLOC]; 269 if (probe_id != 0) 270 (dtrace_malloc_probe)(probe_id, 271 (uintptr_t) mtp, (uintptr_t) mtip, 272 (uintptr_t) mtsp, size, zindx); 273 } 274 #endif 275 276 critical_exit(); 277 } 278 279 void 280 malloc_type_allocated(struct malloc_type *mtp, unsigned long size) 281 { 282 283 if (size > 0) 284 malloc_type_zone_allocated(mtp, size, -1); 285 } 286 287 /* 288 * A free operation has occurred -- update malloc type statistics for the 289 * amount of the bucket size. Occurs within a critical section so that the 290 * thread isn't preempted and doesn't migrate while updating per-CPU 291 * statistics. 292 */ 293 void 294 malloc_type_freed(struct malloc_type *mtp, unsigned long size) 295 { 296 struct malloc_type_internal *mtip; 297 struct malloc_type_stats *mtsp; 298 299 critical_enter(); 300 mtip = mtp->ks_handle; 301 mtsp = &mtip->mti_stats[curcpu]; 302 mtsp->mts_memfreed += size; 303 mtsp->mts_numfrees++; 304 305 #ifdef KDTRACE_HOOKS 306 if (dtrace_malloc_probe != NULL) { 307 uint32_t probe_id = mtip->mti_probes[DTMALLOC_PROBE_FREE]; 308 if (probe_id != 0) 309 (dtrace_malloc_probe)(probe_id, 310 (uintptr_t) mtp, (uintptr_t) mtip, 311 (uintptr_t) mtsp, size, 0); 312 } 313 #endif 314 315 critical_exit(); 316 } 317 318 /* 319 * malloc: 320 * 321 * Allocate a block of memory. 322 * 323 * If M_NOWAIT is set, this routine will not block and return NULL if 324 * the allocation fails. 325 */ 326 void * 327 malloc(unsigned long size, struct malloc_type *mtp, int flags) 328 { 329 int indx; 330 caddr_t va; 331 uma_zone_t zone; 332 #if defined(DIAGNOSTIC) || defined(DEBUG_REDZONE) 333 unsigned long osize = size; 334 #endif 335 336 #ifdef INVARIANTS 337 /* 338 * Check that exactly one of M_WAITOK or M_NOWAIT is specified. 339 */ 340 indx = flags & (M_WAITOK | M_NOWAIT); 341 if (indx != M_NOWAIT && indx != M_WAITOK) { 342 static struct timeval lasterr; 343 static int curerr, once; 344 if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) { 345 printf("Bad malloc flags: %x\n", indx); 346 kdb_backtrace(); 347 flags |= M_WAITOK; 348 once++; 349 } 350 } 351 #endif 352 #ifdef MALLOC_MAKE_FAILURES 353 if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) { 354 atomic_add_int(&malloc_nowait_count, 1); 355 if ((malloc_nowait_count % malloc_failure_rate) == 0) { 356 atomic_add_int(&malloc_failure_count, 1); 357 t_malloc_fail = time_uptime; 358 return (NULL); 359 } 360 } 361 #endif 362 if (flags & M_WAITOK) 363 KASSERT(curthread->td_intr_nesting_level == 0, 364 ("malloc(M_WAITOK) in interrupt context")); 365 366 #ifdef DEBUG_MEMGUARD 367 if (memguard_cmp(mtp)) 368 return memguard_alloc(size, flags); 369 #endif 370 371 #ifdef DEBUG_REDZONE 372 size = redzone_size_ntor(size); 373 #endif 374 375 if (size <= KMEM_ZMAX) { 376 if (size & KMEM_ZMASK) 377 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE; 378 indx = kmemsize[size >> KMEM_ZSHIFT]; 379 zone = kmemzones[indx].kz_zone; 380 #ifdef MALLOC_PROFILE 381 krequests[size >> KMEM_ZSHIFT]++; 382 #endif 383 va = uma_zalloc(zone, flags); 384 if (va != NULL) 385 size = zone->uz_size; 386 malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx); 387 } else { 388 size = roundup(size, PAGE_SIZE); 389 zone = NULL; 390 va = uma_large_malloc(size, flags); 391 malloc_type_allocated(mtp, va == NULL ? 0 : size); 392 } 393 if (flags & M_WAITOK) 394 KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL")); 395 else if (va == NULL) 396 t_malloc_fail = time_uptime; 397 #ifdef DIAGNOSTIC 398 if (va != NULL && !(flags & M_ZERO)) { 399 memset(va, 0x70, osize); 400 } 401 #endif 402 #ifdef DEBUG_REDZONE 403 if (va != NULL) 404 va = redzone_setup(va, osize); 405 #endif 406 return ((void *) va); 407 } 408 409 /* 410 * free: 411 * 412 * Free a block of memory allocated by malloc. 413 * 414 * This routine may not block. 415 */ 416 void 417 free(void *addr, struct malloc_type *mtp) 418 { 419 uma_slab_t slab; 420 u_long size; 421 422 /* free(NULL, ...) does nothing */ 423 if (addr == NULL) 424 return; 425 426 #ifdef DEBUG_MEMGUARD 427 if (memguard_cmp(mtp)) { 428 memguard_free(addr); 429 return; 430 } 431 #endif 432 433 #ifdef DEBUG_REDZONE 434 redzone_check(addr); 435 addr = redzone_addr_ntor(addr); 436 #endif 437 438 size = 0; 439 440 slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK)); 441 442 if (slab == NULL) 443 panic("free: address %p(%p) has not been allocated.\n", 444 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK))); 445 446 447 if (!(slab->us_flags & UMA_SLAB_MALLOC)) { 448 #ifdef INVARIANTS 449 struct malloc_type **mtpp = addr; 450 #endif 451 size = slab->us_keg->uk_size; 452 #ifdef INVARIANTS 453 /* 454 * Cache a pointer to the malloc_type that most recently freed 455 * this memory here. This way we know who is most likely to 456 * have stepped on it later. 457 * 458 * This code assumes that size is a multiple of 8 bytes for 459 * 64 bit machines 460 */ 461 mtpp = (struct malloc_type **) 462 ((unsigned long)mtpp & ~UMA_ALIGN_PTR); 463 mtpp += (size - sizeof(struct malloc_type *)) / 464 sizeof(struct malloc_type *); 465 *mtpp = mtp; 466 #endif 467 uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab); 468 } else { 469 size = slab->us_size; 470 uma_large_free(slab); 471 } 472 malloc_type_freed(mtp, size); 473 } 474 475 /* 476 * realloc: change the size of a memory block 477 */ 478 void * 479 realloc(void *addr, unsigned long size, struct malloc_type *mtp, int flags) 480 { 481 uma_slab_t slab; 482 unsigned long alloc; 483 void *newaddr; 484 485 /* realloc(NULL, ...) is equivalent to malloc(...) */ 486 if (addr == NULL) 487 return (malloc(size, mtp, flags)); 488 489 /* 490 * XXX: Should report free of old memory and alloc of new memory to 491 * per-CPU stats. 492 */ 493 494 #ifdef DEBUG_MEMGUARD 495 if (memguard_cmp(mtp)) { 496 slab = NULL; 497 alloc = size; 498 } else { 499 #endif 500 501 #ifdef DEBUG_REDZONE 502 slab = NULL; 503 alloc = redzone_get_size(addr); 504 #else 505 slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK)); 506 507 /* Sanity check */ 508 KASSERT(slab != NULL, 509 ("realloc: address %p out of range", (void *)addr)); 510 511 /* Get the size of the original block */ 512 if (!(slab->us_flags & UMA_SLAB_MALLOC)) 513 alloc = slab->us_keg->uk_size; 514 else 515 alloc = slab->us_size; 516 517 /* Reuse the original block if appropriate */ 518 if (size <= alloc 519 && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE)) 520 return (addr); 521 #endif /* !DEBUG_REDZONE */ 522 523 #ifdef DEBUG_MEMGUARD 524 } 525 #endif 526 527 /* Allocate a new, bigger (or smaller) block */ 528 if ((newaddr = malloc(size, mtp, flags)) == NULL) 529 return (NULL); 530 531 /* Copy over original contents */ 532 bcopy(addr, newaddr, min(size, alloc)); 533 free(addr, mtp); 534 return (newaddr); 535 } 536 537 /* 538 * reallocf: same as realloc() but free memory on failure. 539 */ 540 void * 541 reallocf(void *addr, unsigned long size, struct malloc_type *mtp, int flags) 542 { 543 void *mem; 544 545 if ((mem = realloc(addr, size, mtp, flags)) == NULL) 546 free(addr, mtp); 547 return (mem); 548 } 549 550 /* 551 * Initialize the kernel memory allocator 552 */ 553 /* ARGSUSED*/ 554 static void 555 kmeminit(void *dummy) 556 { 557 u_int8_t indx; 558 u_long mem_size; 559 int i; 560 561 mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF); 562 563 /* 564 * Try to auto-tune the kernel memory size, so that it is 565 * more applicable for a wider range of machine sizes. 566 * On an X86, a VM_KMEM_SIZE_SCALE value of 4 is good, while 567 * a VM_KMEM_SIZE of 12MB is a fair compromise. The 568 * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space 569 * available, and on an X86 with a total KVA space of 256MB, 570 * try to keep VM_KMEM_SIZE_MAX at 80MB or below. 571 * 572 * Note that the kmem_map is also used by the zone allocator, 573 * so make sure that there is enough space. 574 */ 575 vm_kmem_size = VM_KMEM_SIZE + nmbclusters * PAGE_SIZE; 576 mem_size = cnt.v_page_count; 577 578 #if defined(VM_KMEM_SIZE_SCALE) 579 vm_kmem_size_scale = VM_KMEM_SIZE_SCALE; 580 #endif 581 TUNABLE_INT_FETCH("vm.kmem_size_scale", &vm_kmem_size_scale); 582 if (vm_kmem_size_scale > 0 && 583 (mem_size / vm_kmem_size_scale) > (vm_kmem_size / PAGE_SIZE)) 584 vm_kmem_size = (mem_size / vm_kmem_size_scale) * PAGE_SIZE; 585 586 #if defined(VM_KMEM_SIZE_MIN) 587 vm_kmem_size_min = VM_KMEM_SIZE_MIN; 588 #endif 589 TUNABLE_ULONG_FETCH("vm.kmem_size_min", &vm_kmem_size_min); 590 if (vm_kmem_size_min > 0 && vm_kmem_size < vm_kmem_size_min) { 591 vm_kmem_size = vm_kmem_size_min; 592 } 593 594 #if defined(VM_KMEM_SIZE_MAX) 595 vm_kmem_size_max = VM_KMEM_SIZE_MAX; 596 #endif 597 TUNABLE_ULONG_FETCH("vm.kmem_size_max", &vm_kmem_size_max); 598 if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max) 599 vm_kmem_size = vm_kmem_size_max; 600 601 /* Allow final override from the kernel environment */ 602 #ifndef BURN_BRIDGES 603 if (TUNABLE_ULONG_FETCH("kern.vm.kmem.size", &vm_kmem_size) != 0) 604 printf("kern.vm.kmem.size is now called vm.kmem_size!\n"); 605 #endif 606 TUNABLE_ULONG_FETCH("vm.kmem_size", &vm_kmem_size); 607 608 /* 609 * Limit kmem virtual size to twice the physical memory. 610 * This allows for kmem map sparseness, but limits the size 611 * to something sane. Be careful to not overflow the 32bit 612 * ints while doing the check. 613 */ 614 if (((vm_kmem_size / 2) / PAGE_SIZE) > cnt.v_page_count) 615 vm_kmem_size = 2 * cnt.v_page_count * PAGE_SIZE; 616 617 /* 618 * Tune settings based on the kmem map's size at this time. 619 */ 620 init_param3(vm_kmem_size / PAGE_SIZE); 621 622 kmem_map = kmem_suballoc(kernel_map, &kmembase, &kmemlimit, 623 vm_kmem_size, TRUE); 624 kmem_map->system_map = 1; 625 626 #ifdef DEBUG_MEMGUARD 627 /* 628 * Initialize MemGuard if support compiled in. MemGuard is a 629 * replacement allocator used for detecting tamper-after-free 630 * scenarios as they occur. It is only used for debugging. 631 */ 632 vm_memguard_divisor = 10; 633 TUNABLE_INT_FETCH("vm.memguard.divisor", &vm_memguard_divisor); 634 635 /* Pick a conservative value if provided value sucks. */ 636 if ((vm_memguard_divisor <= 0) || 637 ((vm_kmem_size / vm_memguard_divisor) == 0)) 638 vm_memguard_divisor = 10; 639 memguard_init(kmem_map, vm_kmem_size / vm_memguard_divisor); 640 #endif 641 642 uma_startup2(); 643 644 mt_zone = uma_zcreate("mt_zone", sizeof(struct malloc_type_internal), 645 #ifdef INVARIANTS 646 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini, 647 #else 648 NULL, NULL, NULL, NULL, 649 #endif 650 UMA_ALIGN_PTR, UMA_ZONE_MALLOC); 651 for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) { 652 int size = kmemzones[indx].kz_size; 653 char *name = kmemzones[indx].kz_name; 654 655 kmemzones[indx].kz_zone = uma_zcreate(name, size, 656 #ifdef INVARIANTS 657 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini, 658 #else 659 NULL, NULL, NULL, NULL, 660 #endif 661 UMA_ALIGN_PTR, UMA_ZONE_MALLOC); 662 663 for (;i <= size; i+= KMEM_ZBASE) 664 kmemsize[i >> KMEM_ZSHIFT] = indx; 665 666 } 667 } 668 669 void 670 malloc_init(void *data) 671 { 672 struct malloc_type_internal *mtip; 673 struct malloc_type *mtp; 674 675 KASSERT(cnt.v_page_count != 0, ("malloc_register before vm_init")); 676 677 mtp = data; 678 mtip = uma_zalloc(mt_zone, M_WAITOK | M_ZERO); 679 mtp->ks_handle = mtip; 680 681 mtx_lock(&malloc_mtx); 682 mtp->ks_next = kmemstatistics; 683 kmemstatistics = mtp; 684 kmemcount++; 685 mtx_unlock(&malloc_mtx); 686 } 687 688 void 689 malloc_uninit(void *data) 690 { 691 struct malloc_type_internal *mtip; 692 struct malloc_type_stats *mtsp; 693 struct malloc_type *mtp, *temp; 694 uma_slab_t slab; 695 long temp_allocs, temp_bytes; 696 int i; 697 698 mtp = data; 699 KASSERT(mtp->ks_handle != NULL, ("malloc_deregister: cookie NULL")); 700 mtx_lock(&malloc_mtx); 701 mtip = mtp->ks_handle; 702 mtp->ks_handle = NULL; 703 if (mtp != kmemstatistics) { 704 for (temp = kmemstatistics; temp != NULL; 705 temp = temp->ks_next) { 706 if (temp->ks_next == mtp) 707 temp->ks_next = mtp->ks_next; 708 } 709 } else 710 kmemstatistics = mtp->ks_next; 711 kmemcount--; 712 mtx_unlock(&malloc_mtx); 713 714 /* 715 * Look for memory leaks. 716 */ 717 temp_allocs = temp_bytes = 0; 718 for (i = 0; i < MAXCPU; i++) { 719 mtsp = &mtip->mti_stats[i]; 720 temp_allocs += mtsp->mts_numallocs; 721 temp_allocs -= mtsp->mts_numfrees; 722 temp_bytes += mtsp->mts_memalloced; 723 temp_bytes -= mtsp->mts_memfreed; 724 } 725 if (temp_allocs > 0 || temp_bytes > 0) { 726 printf("Warning: memory type %s leaked memory on destroy " 727 "(%ld allocations, %ld bytes leaked).\n", mtp->ks_shortdesc, 728 temp_allocs, temp_bytes); 729 } 730 731 slab = vtoslab((vm_offset_t) mtip & (~UMA_SLAB_MASK)); 732 uma_zfree_arg(mt_zone, mtip, slab); 733 } 734 735 struct malloc_type * 736 malloc_desc2type(const char *desc) 737 { 738 struct malloc_type *mtp; 739 740 mtx_assert(&malloc_mtx, MA_OWNED); 741 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 742 if (strcmp(mtp->ks_shortdesc, desc) == 0) 743 return (mtp); 744 } 745 return (NULL); 746 } 747 748 static int 749 sysctl_kern_malloc_stats(SYSCTL_HANDLER_ARGS) 750 { 751 struct malloc_type_stream_header mtsh; 752 struct malloc_type_internal *mtip; 753 struct malloc_type_header mth; 754 struct malloc_type *mtp; 755 int buflen, count, error, i; 756 struct sbuf sbuf; 757 char *buffer; 758 759 mtx_lock(&malloc_mtx); 760 restart: 761 mtx_assert(&malloc_mtx, MA_OWNED); 762 count = kmemcount; 763 mtx_unlock(&malloc_mtx); 764 buflen = sizeof(mtsh) + count * (sizeof(mth) + 765 sizeof(struct malloc_type_stats) * MAXCPU) + 1; 766 buffer = malloc(buflen, M_TEMP, M_WAITOK | M_ZERO); 767 mtx_lock(&malloc_mtx); 768 if (count < kmemcount) { 769 free(buffer, M_TEMP); 770 goto restart; 771 } 772 773 sbuf_new(&sbuf, buffer, buflen, SBUF_FIXEDLEN); 774 775 /* 776 * Insert stream header. 777 */ 778 bzero(&mtsh, sizeof(mtsh)); 779 mtsh.mtsh_version = MALLOC_TYPE_STREAM_VERSION; 780 mtsh.mtsh_maxcpus = MAXCPU; 781 mtsh.mtsh_count = kmemcount; 782 if (sbuf_bcat(&sbuf, &mtsh, sizeof(mtsh)) < 0) { 783 mtx_unlock(&malloc_mtx); 784 error = ENOMEM; 785 goto out; 786 } 787 788 /* 789 * Insert alternating sequence of type headers and type statistics. 790 */ 791 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 792 mtip = (struct malloc_type_internal *)mtp->ks_handle; 793 794 /* 795 * Insert type header. 796 */ 797 bzero(&mth, sizeof(mth)); 798 strlcpy(mth.mth_name, mtp->ks_shortdesc, MALLOC_MAX_NAME); 799 if (sbuf_bcat(&sbuf, &mth, sizeof(mth)) < 0) { 800 mtx_unlock(&malloc_mtx); 801 error = ENOMEM; 802 goto out; 803 } 804 805 /* 806 * Insert type statistics for each CPU. 807 */ 808 for (i = 0; i < MAXCPU; i++) { 809 if (sbuf_bcat(&sbuf, &mtip->mti_stats[i], 810 sizeof(mtip->mti_stats[i])) < 0) { 811 mtx_unlock(&malloc_mtx); 812 error = ENOMEM; 813 goto out; 814 } 815 } 816 } 817 mtx_unlock(&malloc_mtx); 818 sbuf_finish(&sbuf); 819 error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf)); 820 out: 821 sbuf_delete(&sbuf); 822 free(buffer, M_TEMP); 823 return (error); 824 } 825 826 SYSCTL_PROC(_kern, OID_AUTO, malloc_stats, CTLFLAG_RD|CTLTYPE_STRUCT, 827 0, 0, sysctl_kern_malloc_stats, "s,malloc_type_ustats", 828 "Return malloc types"); 829 830 SYSCTL_INT(_kern, OID_AUTO, malloc_count, CTLFLAG_RD, &kmemcount, 0, 831 "Count of kernel malloc types"); 832 833 void 834 malloc_type_list(malloc_type_list_func_t *func, void *arg) 835 { 836 struct malloc_type *mtp, **bufmtp; 837 int count, i; 838 size_t buflen; 839 840 mtx_lock(&malloc_mtx); 841 restart: 842 mtx_assert(&malloc_mtx, MA_OWNED); 843 count = kmemcount; 844 mtx_unlock(&malloc_mtx); 845 846 buflen = sizeof(struct malloc_type *) * count; 847 bufmtp = malloc(buflen, M_TEMP, M_WAITOK); 848 849 mtx_lock(&malloc_mtx); 850 851 if (count < kmemcount) { 852 free(bufmtp, M_TEMP); 853 goto restart; 854 } 855 856 for (mtp = kmemstatistics, i = 0; mtp != NULL; mtp = mtp->ks_next, i++) 857 bufmtp[i] = mtp; 858 859 mtx_unlock(&malloc_mtx); 860 861 for (i = 0; i < count; i++) 862 (func)(bufmtp[i], arg); 863 864 free(bufmtp, M_TEMP); 865 } 866 867 #ifdef DDB 868 DB_SHOW_COMMAND(malloc, db_show_malloc) 869 { 870 struct malloc_type_internal *mtip; 871 struct malloc_type *mtp; 872 u_int64_t allocs, frees; 873 u_int64_t alloced, freed; 874 int i; 875 876 db_printf("%18s %12s %12s %12s\n", "Type", "InUse", "MemUse", 877 "Requests"); 878 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) { 879 mtip = (struct malloc_type_internal *)mtp->ks_handle; 880 allocs = 0; 881 frees = 0; 882 alloced = 0; 883 freed = 0; 884 for (i = 0; i < MAXCPU; i++) { 885 allocs += mtip->mti_stats[i].mts_numallocs; 886 frees += mtip->mti_stats[i].mts_numfrees; 887 alloced += mtip->mti_stats[i].mts_memalloced; 888 freed += mtip->mti_stats[i].mts_memfreed; 889 } 890 db_printf("%18s %12ju %12juK %12ju\n", 891 mtp->ks_shortdesc, allocs - frees, 892 (alloced - freed + 1023) / 1024, allocs); 893 } 894 } 895 #endif 896 897 #ifdef MALLOC_PROFILE 898 899 static int 900 sysctl_kern_mprof(SYSCTL_HANDLER_ARGS) 901 { 902 int linesize = 64; 903 struct sbuf sbuf; 904 uint64_t count; 905 uint64_t waste; 906 uint64_t mem; 907 int bufsize; 908 int error; 909 char *buf; 910 int rsize; 911 int size; 912 int i; 913 914 bufsize = linesize * (KMEM_ZSIZE + 1); 915 bufsize += 128; /* For the stats line */ 916 bufsize += 128; /* For the banner line */ 917 waste = 0; 918 mem = 0; 919 920 buf = malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO); 921 sbuf_new(&sbuf, buf, bufsize, SBUF_FIXEDLEN); 922 sbuf_printf(&sbuf, 923 "\n Size Requests Real Size\n"); 924 for (i = 0; i < KMEM_ZSIZE; i++) { 925 size = i << KMEM_ZSHIFT; 926 rsize = kmemzones[kmemsize[i]].kz_size; 927 count = (long long unsigned)krequests[i]; 928 929 sbuf_printf(&sbuf, "%6d%28llu%11d\n", size, 930 (unsigned long long)count, rsize); 931 932 if ((rsize * count) > (size * count)) 933 waste += (rsize * count) - (size * count); 934 mem += (rsize * count); 935 } 936 sbuf_printf(&sbuf, 937 "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n", 938 (unsigned long long)mem, (unsigned long long)waste); 939 sbuf_finish(&sbuf); 940 941 error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf)); 942 943 sbuf_delete(&sbuf); 944 free(buf, M_TEMP); 945 return (error); 946 } 947 948 SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD, 949 NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling"); 950 #endif /* MALLOC_PROFILE */ 951