1 /*- 2 * Copyright (c) 1987, 1991, 1993 3 * The Regents of the University of California. 4 * Copyright (c) 2005 Robert N. M. Watson 5 * All rights reserved. 6 * 7 * Redistribution and use in source and binary forms, with or without 8 * modification, are permitted provided that the following conditions 9 * are met: 10 * 1. Redistributions of source code must retain the above copyright 11 * notice, this list of conditions and the following disclaimer. 12 * 2. Redistributions in binary form must reproduce the above copyright 13 * notice, this list of conditions and the following disclaimer in the 14 * documentation and/or other materials provided with the distribution. 15 * 4. Neither the name of the University nor the names of its contributors 16 * may be used to endorse or promote products derived from this software 17 * without specific prior written permission. 18 * 19 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 22 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 29 * SUCH DAMAGE. 30 * 31 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94 32 */ 33 34 #include <sys/cdefs.h> 35 __FBSDID("$FreeBSD$"); 36 37 #include "opt_vm.h" 38 39 #include <sys/param.h> 40 #include <sys/systm.h> 41 #include <sys/kdb.h> 42 #include <sys/kernel.h> 43 #include <sys/lock.h> 44 #include <sys/malloc.h> 45 #include <sys/mbuf.h> 46 #include <sys/mutex.h> 47 #include <sys/vmmeter.h> 48 #include <sys/proc.h> 49 #include <sys/sbuf.h> 50 #include <sys/sysctl.h> 51 #include <sys/time.h> 52 53 #include <vm/vm.h> 54 #include <vm/pmap.h> 55 #include <vm/vm_param.h> 56 #include <vm/vm_kern.h> 57 #include <vm/vm_extern.h> 58 #include <vm/vm_map.h> 59 #include <vm/vm_page.h> 60 #include <vm/uma.h> 61 #include <vm/uma_int.h> 62 #include <vm/uma_dbg.h> 63 64 #ifdef DEBUG_MEMGUARD 65 #include <vm/memguard.h> 66 #endif 67 68 #if defined(INVARIANTS) && defined(__i386__) 69 #include <machine/cpu.h> 70 #endif 71 72 /* 73 * When realloc() is called, if the new size is sufficiently smaller than 74 * the old size, realloc() will allocate a new, smaller block to avoid 75 * wasting memory. 'Sufficiently smaller' is defined as: newsize <= 76 * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'. 77 */ 78 #ifndef REALLOC_FRACTION 79 #define REALLOC_FRACTION 1 /* new block if <= half the size */ 80 #endif 81 82 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 83 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 84 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 85 86 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options"); 87 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery"); 88 89 static void kmeminit(void *); 90 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL) 91 92 static MALLOC_DEFINE(M_FREE, "free", "should be on free list"); 93 94 static struct malloc_type *kmemstatistics; 95 static char *kmembase; 96 static char *kmemlimit; 97 98 #define KMEM_ZSHIFT 4 99 #define KMEM_ZBASE 16 100 #define KMEM_ZMASK (KMEM_ZBASE - 1) 101 102 #define KMEM_ZMAX PAGE_SIZE 103 #define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT) 104 static u_int8_t kmemsize[KMEM_ZSIZE + 1]; 105 106 /* These won't be powers of two for long */ 107 struct { 108 int kz_size; 109 char *kz_name; 110 uma_zone_t kz_zone; 111 } kmemzones[] = { 112 {16, "16", NULL}, 113 {32, "32", NULL}, 114 {64, "64", NULL}, 115 {128, "128", NULL}, 116 {256, "256", NULL}, 117 {512, "512", NULL}, 118 {1024, "1024", NULL}, 119 {2048, "2048", NULL}, 120 {4096, "4096", NULL}, 121 #if PAGE_SIZE > 4096 122 {8192, "8192", NULL}, 123 #if PAGE_SIZE > 8192 124 {16384, "16384", NULL}, 125 #if PAGE_SIZE > 16384 126 {32768, "32768", NULL}, 127 #if PAGE_SIZE > 32768 128 {65536, "65536", NULL}, 129 #if PAGE_SIZE > 65536 130 #error "Unsupported PAGE_SIZE" 131 #endif /* 65536 */ 132 #endif /* 32768 */ 133 #endif /* 16384 */ 134 #endif /* 8192 */ 135 #endif /* 4096 */ 136 {0, NULL}, 137 }; 138 139 static uma_zone_t mt_zone; 140 141 #ifdef DEBUG_MEMGUARD 142 u_int vm_memguard_divisor; 143 SYSCTL_UINT(_vm, OID_AUTO, memguard_divisor, CTLFLAG_RD, &vm_memguard_divisor, 144 0, "(kmem_size/memguard_divisor) == memguard submap size"); 145 #endif 146 147 u_int vm_kmem_size; 148 SYSCTL_UINT(_vm, OID_AUTO, kmem_size, CTLFLAG_RD, &vm_kmem_size, 0, 149 "Size of kernel memory"); 150 151 u_int vm_kmem_size_max; 152 SYSCTL_UINT(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RD, &vm_kmem_size_max, 0, 153 "Maximum size of kernel memory"); 154 155 u_int vm_kmem_size_scale; 156 SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RD, &vm_kmem_size_scale, 0, 157 "Scale factor for kernel memory size"); 158 159 /* 160 * The malloc_mtx protects the kmemstatistics linked list. 161 */ 162 163 struct mtx malloc_mtx; 164 165 #ifdef MALLOC_PROFILE 166 uint64_t krequests[KMEM_ZSIZE + 1]; 167 168 static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS); 169 #endif 170 171 static int sysctl_kern_malloc(SYSCTL_HANDLER_ARGS); 172 173 /* time_uptime of last malloc(9) failure */ 174 static time_t t_malloc_fail; 175 176 #ifdef MALLOC_MAKE_FAILURES 177 /* 178 * Causes malloc failures every (n) mallocs with M_NOWAIT. If set to 0, 179 * doesn't cause failures. 180 */ 181 SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD, 0, 182 "Kernel malloc debugging options"); 183 184 static int malloc_failure_rate; 185 static int malloc_nowait_count; 186 static int malloc_failure_count; 187 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RW, 188 &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail"); 189 TUNABLE_INT("debug.malloc.failure_rate", &malloc_failure_rate); 190 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD, 191 &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures"); 192 #endif 193 194 int 195 malloc_last_fail(void) 196 { 197 198 return (time_uptime - t_malloc_fail); 199 } 200 201 /* 202 * Add this to the informational malloc_type bucket. 203 */ 204 static void 205 malloc_type_zone_allocated(struct malloc_type *mtp, unsigned long size, 206 int zindx) 207 { 208 struct malloc_type_internal *mtip; 209 struct malloc_type_stats *mtsp; 210 211 critical_enter(); 212 mtip = mtp->ks_handle; 213 mtsp = &mtip->mti_stats[curcpu]; 214 mtsp->mts_memalloced += size; 215 mtsp->mts_numallocs++; 216 if (zindx != -1) 217 mtsp->mts_size |= 1 << zindx; 218 critical_exit(); 219 } 220 221 void 222 malloc_type_allocated(struct malloc_type *mtp, unsigned long size) 223 { 224 225 malloc_type_zone_allocated(mtp, size, -1); 226 } 227 228 /* 229 * Remove this allocation from the informational malloc_type bucket. 230 */ 231 void 232 malloc_type_freed(struct malloc_type *mtp, unsigned long size) 233 { 234 struct malloc_type_internal *mtip; 235 struct malloc_type_stats *mtsp; 236 237 critical_enter(); 238 mtip = mtp->ks_handle; 239 mtsp = &mtip->mti_stats[curcpu]; 240 mtsp->mts_memfreed += size; 241 mtsp->mts_numfrees++; 242 critical_exit(); 243 } 244 245 /* 246 * malloc: 247 * 248 * Allocate a block of memory. 249 * 250 * If M_NOWAIT is set, this routine will not block and return NULL if 251 * the allocation fails. 252 */ 253 void * 254 malloc(unsigned long size, struct malloc_type *mtp, int flags) 255 { 256 int indx; 257 caddr_t va; 258 uma_zone_t zone; 259 uma_keg_t keg; 260 #ifdef DIAGNOSTIC 261 unsigned long osize = size; 262 #endif 263 264 #ifdef INVARIANTS 265 /* 266 * To make sure that WAITOK or NOWAIT is set, but not more than 267 * one, and check against the API botches that are common. 268 */ 269 indx = flags & (M_WAITOK | M_NOWAIT | M_DONTWAIT | M_TRYWAIT); 270 if (indx != M_NOWAIT && indx != M_WAITOK) { 271 static struct timeval lasterr; 272 static int curerr, once; 273 if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) { 274 printf("Bad malloc flags: %x\n", indx); 275 kdb_backtrace(); 276 flags |= M_WAITOK; 277 once++; 278 } 279 } 280 #endif 281 #if 0 282 if (size == 0) 283 kdb_enter("zero size malloc"); 284 #endif 285 #ifdef MALLOC_MAKE_FAILURES 286 if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) { 287 atomic_add_int(&malloc_nowait_count, 1); 288 if ((malloc_nowait_count % malloc_failure_rate) == 0) { 289 atomic_add_int(&malloc_failure_count, 1); 290 t_malloc_fail = time_uptime; 291 return (NULL); 292 } 293 } 294 #endif 295 if (flags & M_WAITOK) 296 KASSERT(curthread->td_intr_nesting_level == 0, 297 ("malloc(M_WAITOK) in interrupt context")); 298 299 #ifdef DEBUG_MEMGUARD 300 /* XXX CHANGEME! */ 301 if (mtp == M_SUBPROC) 302 return memguard_alloc(size, flags); 303 #endif 304 305 if (size <= KMEM_ZMAX) { 306 if (size & KMEM_ZMASK) 307 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE; 308 indx = kmemsize[size >> KMEM_ZSHIFT]; 309 zone = kmemzones[indx].kz_zone; 310 keg = zone->uz_keg; 311 #ifdef MALLOC_PROFILE 312 krequests[size >> KMEM_ZSHIFT]++; 313 #endif 314 va = uma_zalloc(zone, flags); 315 if (va != NULL) 316 size = keg->uk_size; 317 malloc_type_zone_allocated(mtp, va == NULL ? 0 : size, indx); 318 } else { 319 size = roundup(size, PAGE_SIZE); 320 zone = NULL; 321 keg = NULL; 322 va = uma_large_malloc(size, flags); 323 malloc_type_allocated(mtp, va == NULL ? 0 : size); 324 } 325 if (flags & M_WAITOK) 326 KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL")); 327 else if (va == NULL) 328 t_malloc_fail = time_uptime; 329 #ifdef DIAGNOSTIC 330 if (va != NULL && !(flags & M_ZERO)) { 331 memset(va, 0x70, osize); 332 } 333 #endif 334 return ((void *) va); 335 } 336 337 /* 338 * free: 339 * 340 * Free a block of memory allocated by malloc. 341 * 342 * This routine may not block. 343 */ 344 void 345 free(void *addr, struct malloc_type *mtp) 346 { 347 uma_slab_t slab; 348 u_long size; 349 350 /* free(NULL, ...) does nothing */ 351 if (addr == NULL) 352 return; 353 354 #ifdef DEBUG_MEMGUARD 355 /* XXX CHANGEME! */ 356 if (mtp == M_SUBPROC) { 357 memguard_free(addr); 358 return; 359 } 360 #endif 361 362 size = 0; 363 364 slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK)); 365 366 if (slab == NULL) 367 panic("free: address %p(%p) has not been allocated.\n", 368 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK))); 369 370 371 if (!(slab->us_flags & UMA_SLAB_MALLOC)) { 372 #ifdef INVARIANTS 373 struct malloc_type **mtpp = addr; 374 #endif 375 size = slab->us_keg->uk_size; 376 #ifdef INVARIANTS 377 /* 378 * Cache a pointer to the malloc_type that most recently freed 379 * this memory here. This way we know who is most likely to 380 * have stepped on it later. 381 * 382 * This code assumes that size is a multiple of 8 bytes for 383 * 64 bit machines 384 */ 385 mtpp = (struct malloc_type **) 386 ((unsigned long)mtpp & ~UMA_ALIGN_PTR); 387 mtpp += (size - sizeof(struct malloc_type *)) / 388 sizeof(struct malloc_type *); 389 *mtpp = mtp; 390 #endif 391 uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab); 392 } else { 393 size = slab->us_size; 394 uma_large_free(slab); 395 } 396 malloc_type_freed(mtp, size); 397 } 398 399 /* 400 * realloc: change the size of a memory block 401 */ 402 void * 403 realloc(void *addr, unsigned long size, struct malloc_type *mtp, int flags) 404 { 405 uma_slab_t slab; 406 unsigned long alloc; 407 void *newaddr; 408 409 /* realloc(NULL, ...) is equivalent to malloc(...) */ 410 if (addr == NULL) 411 return (malloc(size, mtp, flags)); 412 413 /* 414 * XXX: Should report free of old memory and alloc of new memory to 415 * per-CPU stats. 416 */ 417 418 #ifdef DEBUG_MEMGUARD 419 /* XXX: CHANGEME! */ 420 if (mtp == M_SUBPROC) { 421 slab = NULL; 422 alloc = size; 423 } else { 424 #endif 425 426 slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK)); 427 428 /* Sanity check */ 429 KASSERT(slab != NULL, 430 ("realloc: address %p out of range", (void *)addr)); 431 432 /* Get the size of the original block */ 433 if (slab->us_keg) 434 alloc = slab->us_keg->uk_size; 435 else 436 alloc = slab->us_size; 437 438 /* Reuse the original block if appropriate */ 439 if (size <= alloc 440 && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE)) 441 return (addr); 442 443 #ifdef DEBUG_MEMGUARD 444 } 445 #endif 446 447 /* Allocate a new, bigger (or smaller) block */ 448 if ((newaddr = malloc(size, mtp, flags)) == NULL) 449 return (NULL); 450 451 /* Copy over original contents */ 452 bcopy(addr, newaddr, min(size, alloc)); 453 free(addr, mtp); 454 return (newaddr); 455 } 456 457 /* 458 * reallocf: same as realloc() but free memory on failure. 459 */ 460 void * 461 reallocf(void *addr, unsigned long size, struct malloc_type *mtp, int flags) 462 { 463 void *mem; 464 465 if ((mem = realloc(addr, size, mtp, flags)) == NULL) 466 free(addr, mtp); 467 return (mem); 468 } 469 470 /* 471 * Initialize the kernel memory allocator 472 */ 473 /* ARGSUSED*/ 474 static void 475 kmeminit(void *dummy) 476 { 477 u_int8_t indx; 478 u_long mem_size; 479 int i; 480 481 mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF); 482 483 /* 484 * Try to auto-tune the kernel memory size, so that it is 485 * more applicable for a wider range of machine sizes. 486 * On an X86, a VM_KMEM_SIZE_SCALE value of 4 is good, while 487 * a VM_KMEM_SIZE of 12MB is a fair compromise. The 488 * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space 489 * available, and on an X86 with a total KVA space of 256MB, 490 * try to keep VM_KMEM_SIZE_MAX at 80MB or below. 491 * 492 * Note that the kmem_map is also used by the zone allocator, 493 * so make sure that there is enough space. 494 */ 495 vm_kmem_size = VM_KMEM_SIZE + nmbclusters * PAGE_SIZE; 496 mem_size = cnt.v_page_count; 497 498 #if defined(VM_KMEM_SIZE_SCALE) 499 vm_kmem_size_scale = VM_KMEM_SIZE_SCALE; 500 #endif 501 TUNABLE_INT_FETCH("vm.kmem_size_scale", &vm_kmem_size_scale); 502 if (vm_kmem_size_scale > 0 && 503 (mem_size / vm_kmem_size_scale) > (vm_kmem_size / PAGE_SIZE)) 504 vm_kmem_size = (mem_size / vm_kmem_size_scale) * PAGE_SIZE; 505 506 #if defined(VM_KMEM_SIZE_MAX) 507 vm_kmem_size_max = VM_KMEM_SIZE_MAX; 508 #endif 509 TUNABLE_INT_FETCH("vm.kmem_size_max", &vm_kmem_size_max); 510 if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max) 511 vm_kmem_size = vm_kmem_size_max; 512 513 /* Allow final override from the kernel environment */ 514 #ifndef BURN_BRIDGES 515 if (TUNABLE_INT_FETCH("kern.vm.kmem.size", &vm_kmem_size) != 0) 516 printf("kern.vm.kmem.size is now called vm.kmem_size!\n"); 517 #endif 518 TUNABLE_INT_FETCH("vm.kmem_size", &vm_kmem_size); 519 520 /* 521 * Limit kmem virtual size to twice the physical memory. 522 * This allows for kmem map sparseness, but limits the size 523 * to something sane. Be careful to not overflow the 32bit 524 * ints while doing the check. 525 */ 526 if (((vm_kmem_size / 2) / PAGE_SIZE) > cnt.v_page_count) 527 vm_kmem_size = 2 * cnt.v_page_count * PAGE_SIZE; 528 529 /* 530 * Tune settings based on the kernel map's size at this time. 531 */ 532 init_param3(vm_kmem_size / PAGE_SIZE); 533 534 kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase, 535 (vm_offset_t *)&kmemlimit, vm_kmem_size); 536 kmem_map->system_map = 1; 537 538 #ifdef DEBUG_MEMGUARD 539 /* 540 * Initialize MemGuard if support compiled in. MemGuard is a 541 * replacement allocator used for detecting tamper-after-free 542 * scenarios as they occur. It is only used for debugging. 543 */ 544 vm_memguard_divisor = 10; 545 TUNABLE_INT_FETCH("vm.memguard_divisor", &vm_memguard_divisor); 546 547 /* Pick a conservative value if provided value sucks. */ 548 if ((vm_memguard_divisor <= 0) || 549 ((vm_kmem_size / vm_memguard_divisor) == 0)) 550 vm_memguard_divisor = 10; 551 memguard_init(kmem_map, vm_kmem_size / vm_memguard_divisor); 552 #endif 553 554 uma_startup2(); 555 556 mt_zone = uma_zcreate("mt_zone", sizeof(struct malloc_type_internal), 557 #ifdef INVARIANTS 558 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini, 559 #else 560 NULL, NULL, NULL, NULL, 561 #endif 562 UMA_ALIGN_PTR, UMA_ZONE_MALLOC); 563 for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) { 564 int size = kmemzones[indx].kz_size; 565 char *name = kmemzones[indx].kz_name; 566 567 kmemzones[indx].kz_zone = uma_zcreate(name, size, 568 #ifdef INVARIANTS 569 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini, 570 #else 571 NULL, NULL, NULL, NULL, 572 #endif 573 UMA_ALIGN_PTR, UMA_ZONE_MALLOC); 574 575 for (;i <= size; i+= KMEM_ZBASE) 576 kmemsize[i >> KMEM_ZSHIFT] = indx; 577 578 } 579 } 580 581 void 582 malloc_init(void *data) 583 { 584 struct malloc_type_internal *mtip; 585 struct malloc_type *mtp; 586 587 KASSERT(cnt.v_page_count != 0, ("malloc_register before vm_init")); 588 589 mtp = data; 590 mtip = uma_zalloc(mt_zone, M_WAITOK | M_ZERO); 591 mtp->ks_handle = mtip; 592 593 mtx_lock(&malloc_mtx); 594 mtp->ks_next = kmemstatistics; 595 kmemstatistics = mtp; 596 mtx_unlock(&malloc_mtx); 597 } 598 599 void 600 malloc_uninit(void *data) 601 { 602 struct malloc_type_internal *mtip; 603 struct malloc_type *mtp, *temp; 604 605 mtp = data; 606 KASSERT(mtp->ks_handle != NULL, ("malloc_deregister: cookie NULL")); 607 mtx_lock(&malloc_mtx); 608 mtip = mtp->ks_handle; 609 mtp->ks_handle = NULL; 610 if (mtp != kmemstatistics) { 611 for (temp = kmemstatistics; temp != NULL; 612 temp = temp->ks_next) { 613 if (temp->ks_next == mtp) 614 temp->ks_next = mtp->ks_next; 615 } 616 } else 617 kmemstatistics = mtp->ks_next; 618 mtx_unlock(&malloc_mtx); 619 uma_zfree(mt_zone, mtp); 620 } 621 622 static int 623 sysctl_kern_malloc(SYSCTL_HANDLER_ARGS) 624 { 625 struct malloc_type_stats mts_local, *mtsp; 626 struct malloc_type_internal *mtip; 627 struct malloc_type *mtp; 628 struct sbuf sbuf; 629 long temp_allocs, temp_bytes; 630 int linesize = 128; 631 int bufsize; 632 int first; 633 int error; 634 char *buf; 635 int cnt; 636 int i; 637 638 cnt = 0; 639 640 /* Guess at how much room is needed. */ 641 mtx_lock(&malloc_mtx); 642 for (mtp = kmemstatistics; mtp != NULL; mtp = mtp->ks_next) 643 cnt++; 644 mtx_unlock(&malloc_mtx); 645 646 bufsize = linesize * (cnt + 1); 647 buf = malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO); 648 sbuf_new(&sbuf, buf, bufsize, SBUF_FIXEDLEN); 649 650 mtx_lock(&malloc_mtx); 651 sbuf_printf(&sbuf, 652 "\n Type InUse MemUse HighUse Requests Size(s)\n"); 653 for (mtp = kmemstatistics; cnt != 0 && mtp != NULL; 654 mtp = mtp->ks_next, cnt--) { 655 mtip = mtp->ks_handle; 656 bzero(&mts_local, sizeof(mts_local)); 657 for (i = 0; i < MAXCPU; i++) { 658 mtsp = &mtip->mti_stats[i]; 659 mts_local.mts_memalloced += mtsp->mts_memalloced; 660 mts_local.mts_memfreed += mtsp->mts_memfreed; 661 mts_local.mts_numallocs += mtsp->mts_numallocs; 662 mts_local.mts_numfrees += mtsp->mts_numfrees; 663 mts_local.mts_size |= mtsp->mts_size; 664 } 665 if (mts_local.mts_numallocs == 0) 666 continue; 667 668 /* 669 * Due to races in per-CPU statistics gather, it's possible to 670 * get a slightly negative number here. If we do, approximate 671 * with 0. 672 */ 673 if (mts_local.mts_numallocs > mts_local.mts_numfrees) 674 temp_allocs = mts_local.mts_numallocs - 675 mts_local.mts_numfrees; 676 else 677 temp_allocs = 0; 678 679 /* 680 * Ditto for bytes allocated. 681 */ 682 if (mts_local.mts_memalloced > mts_local.mts_memfreed) 683 temp_bytes = mts_local.mts_memalloced - 684 mts_local.mts_memfreed; 685 else 686 temp_bytes = 0; 687 688 /* 689 * XXXRW: High-waterwark is no longer easily available, so 690 * we just print '-' for that column. 691 */ 692 sbuf_printf(&sbuf, "%13s%6lu%6luK -%9lu", 693 mtp->ks_shortdesc, 694 temp_allocs, 695 (temp_bytes + 1023) / 1024, 696 mts_local.mts_numallocs); 697 698 first = 1; 699 for (i = 0; i < sizeof(kmemzones) / sizeof(kmemzones[0]) - 1; 700 i++) { 701 if (mts_local.mts_size & (1 << i)) { 702 if (first) 703 sbuf_printf(&sbuf, " "); 704 else 705 sbuf_printf(&sbuf, ","); 706 sbuf_printf(&sbuf, "%s", 707 kmemzones[i].kz_name); 708 first = 0; 709 } 710 } 711 sbuf_printf(&sbuf, "\n"); 712 } 713 sbuf_finish(&sbuf); 714 mtx_unlock(&malloc_mtx); 715 716 error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf)); 717 718 sbuf_delete(&sbuf); 719 free(buf, M_TEMP); 720 return (error); 721 } 722 723 SYSCTL_OID(_kern, OID_AUTO, malloc, CTLTYPE_STRING|CTLFLAG_RD, 724 NULL, 0, sysctl_kern_malloc, "A", "Malloc Stats"); 725 726 #ifdef MALLOC_PROFILE 727 728 static int 729 sysctl_kern_mprof(SYSCTL_HANDLER_ARGS) 730 { 731 int linesize = 64; 732 struct sbuf sbuf; 733 uint64_t count; 734 uint64_t waste; 735 uint64_t mem; 736 int bufsize; 737 int error; 738 char *buf; 739 int rsize; 740 int size; 741 int len; 742 int i; 743 744 bufsize = linesize * (KMEM_ZSIZE + 1); 745 bufsize += 128; /* For the stats line */ 746 bufsize += 128; /* For the banner line */ 747 waste = 0; 748 mem = 0; 749 750 buf = malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO); 751 sbuf_new(&sbuf, buf, bufsize, SBUF_FIXEDLEN); 752 sbuf_printf(&sbuf, 753 "\n Size Requests Real Size\n"); 754 for (i = 0; i < KMEM_ZSIZE; i++) { 755 size = i << KMEM_ZSHIFT; 756 rsize = kmemzones[kmemsize[i]].kz_size; 757 count = (long long unsigned)krequests[i]; 758 759 sbuf_printf(&sbuf, "%6d%28llu%11d\n", size, 760 (unsigned long long)count, rsize); 761 762 if ((rsize * count) > (size * count)) 763 waste += (rsize * count) - (size * count); 764 mem += (rsize * count); 765 } 766 sbuf_printf(&sbuf, 767 "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n", 768 (unsigned long long)mem, (unsigned long long)waste); 769 sbuf_finish(&sbuf); 770 771 error = SYSCTL_OUT(req, sbuf_data(&sbuf), sbuf_len(&sbuf)); 772 773 sbuf_delete(&sbuf); 774 free(buf, M_TEMP); 775 return (error); 776 } 777 778 SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD, 779 NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling"); 780 #endif /* MALLOC_PROFILE */ 781