1 /*- 2 * Copyright (c) 1987, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_vm.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/kdb.h> 40 #include <sys/kernel.h> 41 #include <sys/lock.h> 42 #include <sys/malloc.h> 43 #include <sys/mbuf.h> 44 #include <sys/mutex.h> 45 #include <sys/vmmeter.h> 46 #include <sys/proc.h> 47 #include <sys/sysctl.h> 48 #include <sys/time.h> 49 50 #include <vm/vm.h> 51 #include <vm/pmap.h> 52 #include <vm/vm_param.h> 53 #include <vm/vm_kern.h> 54 #include <vm/vm_extern.h> 55 #include <vm/vm_map.h> 56 #include <vm/vm_page.h> 57 #include <vm/uma.h> 58 #include <vm/uma_int.h> 59 #include <vm/uma_dbg.h> 60 61 #ifdef DEBUG_MEMGUARD 62 #include <vm/memguard.h> 63 #endif 64 65 #if defined(INVARIANTS) && defined(__i386__) 66 #include <machine/cpu.h> 67 #endif 68 69 /* 70 * When realloc() is called, if the new size is sufficiently smaller than 71 * the old size, realloc() will allocate a new, smaller block to avoid 72 * wasting memory. 'Sufficiently smaller' is defined as: newsize <= 73 * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'. 74 */ 75 #ifndef REALLOC_FRACTION 76 #define REALLOC_FRACTION 1 /* new block if <= half the size */ 77 #endif 78 79 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 80 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 81 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 82 83 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options"); 84 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery"); 85 86 static void kmeminit(void *); 87 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL) 88 89 static MALLOC_DEFINE(M_FREE, "free", "should be on free list"); 90 91 static struct malloc_type *kmemstatistics; 92 static char *kmembase; 93 static char *kmemlimit; 94 95 #define KMEM_ZSHIFT 4 96 #define KMEM_ZBASE 16 97 #define KMEM_ZMASK (KMEM_ZBASE - 1) 98 99 #define KMEM_ZMAX PAGE_SIZE 100 #define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT) 101 static u_int8_t kmemsize[KMEM_ZSIZE + 1]; 102 103 /* These won't be powers of two for long */ 104 struct { 105 int kz_size; 106 char *kz_name; 107 uma_zone_t kz_zone; 108 } kmemzones[] = { 109 {16, "16", NULL}, 110 {32, "32", NULL}, 111 {64, "64", NULL}, 112 {128, "128", NULL}, 113 {256, "256", NULL}, 114 {512, "512", NULL}, 115 {1024, "1024", NULL}, 116 {2048, "2048", NULL}, 117 {4096, "4096", NULL}, 118 #if PAGE_SIZE > 4096 119 {8192, "8192", NULL}, 120 #if PAGE_SIZE > 8192 121 {16384, "16384", NULL}, 122 #if PAGE_SIZE > 16384 123 {32768, "32768", NULL}, 124 #if PAGE_SIZE > 32768 125 {65536, "65536", NULL}, 126 #if PAGE_SIZE > 65536 127 #error "Unsupported PAGE_SIZE" 128 #endif /* 65536 */ 129 #endif /* 32768 */ 130 #endif /* 16384 */ 131 #endif /* 8192 */ 132 #endif /* 4096 */ 133 {0, NULL}, 134 }; 135 136 #ifdef DEBUG_MEMGUARD 137 u_int vm_memguard_divisor; 138 SYSCTL_UINT(_vm, OID_AUTO, memguard_divisor, CTLFLAG_RD, &vm_memguard_divisor, 139 0, "(kmem_size/memguard_divisor) == memguard submap size"); 140 #endif 141 142 u_int vm_kmem_size; 143 SYSCTL_UINT(_vm, OID_AUTO, kmem_size, CTLFLAG_RD, &vm_kmem_size, 0, 144 "Size of kernel memory"); 145 146 u_int vm_kmem_size_max; 147 SYSCTL_UINT(_vm, OID_AUTO, kmem_size_max, CTLFLAG_RD, &vm_kmem_size_max, 0, 148 "Maximum size of kernel memory"); 149 150 u_int vm_kmem_size_scale; 151 SYSCTL_UINT(_vm, OID_AUTO, kmem_size_scale, CTLFLAG_RD, &vm_kmem_size_scale, 0, 152 "Scale factor for kernel memory size"); 153 154 /* 155 * The malloc_mtx protects the kmemstatistics linked list. 156 */ 157 158 struct mtx malloc_mtx; 159 160 #ifdef MALLOC_PROFILE 161 uint64_t krequests[KMEM_ZSIZE + 1]; 162 163 static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS); 164 #endif 165 166 static int sysctl_kern_malloc(SYSCTL_HANDLER_ARGS); 167 168 /* time_uptime of last malloc(9) failure */ 169 static time_t t_malloc_fail; 170 171 #ifdef MALLOC_MAKE_FAILURES 172 /* 173 * Causes malloc failures every (n) mallocs with M_NOWAIT. If set to 0, 174 * doesn't cause failures. 175 */ 176 SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD, 0, 177 "Kernel malloc debugging options"); 178 179 static int malloc_failure_rate; 180 static int malloc_nowait_count; 181 static int malloc_failure_count; 182 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RW, 183 &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail"); 184 TUNABLE_INT("debug.malloc.failure_rate", &malloc_failure_rate); 185 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD, 186 &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures"); 187 #endif 188 189 int 190 malloc_last_fail(void) 191 { 192 193 return (time_uptime - t_malloc_fail); 194 } 195 196 /* 197 * Add this to the informational malloc_type bucket. 198 */ 199 static void 200 malloc_type_zone_allocated(struct malloc_type *ksp, unsigned long size, 201 int zindx) 202 { 203 mtx_lock(&ksp->ks_mtx); 204 ksp->ks_calls++; 205 if (zindx != -1) 206 ksp->ks_size |= 1 << zindx; 207 if (size != 0) { 208 ksp->ks_memuse += size; 209 ksp->ks_inuse++; 210 if (ksp->ks_memuse > ksp->ks_maxused) 211 ksp->ks_maxused = ksp->ks_memuse; 212 } 213 mtx_unlock(&ksp->ks_mtx); 214 } 215 216 void 217 malloc_type_allocated(struct malloc_type *ksp, unsigned long size) 218 { 219 malloc_type_zone_allocated(ksp, size, -1); 220 } 221 222 /* 223 * Remove this allocation from the informational malloc_type bucket. 224 */ 225 void 226 malloc_type_freed(struct malloc_type *ksp, unsigned long size) 227 { 228 mtx_lock(&ksp->ks_mtx); 229 KASSERT(size <= ksp->ks_memuse, 230 ("malloc(9)/free(9) confusion.\n%s", 231 "Probably freeing with wrong type, but maybe not here.")); 232 ksp->ks_memuse -= size; 233 ksp->ks_inuse--; 234 mtx_unlock(&ksp->ks_mtx); 235 } 236 237 /* 238 * malloc: 239 * 240 * Allocate a block of memory. 241 * 242 * If M_NOWAIT is set, this routine will not block and return NULL if 243 * the allocation fails. 244 */ 245 void * 246 malloc(unsigned long size, struct malloc_type *type, int flags) 247 { 248 int indx; 249 caddr_t va; 250 uma_zone_t zone; 251 uma_keg_t keg; 252 #ifdef DIAGNOSTIC 253 unsigned long osize = size; 254 #endif 255 256 #ifdef INVARIANTS 257 /* 258 * To make sure that WAITOK or NOWAIT is set, but not more than 259 * one, and check against the API botches that are common. 260 */ 261 indx = flags & (M_WAITOK | M_NOWAIT | M_DONTWAIT | M_TRYWAIT); 262 if (indx != M_NOWAIT && indx != M_WAITOK) { 263 static struct timeval lasterr; 264 static int curerr, once; 265 if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) { 266 printf("Bad malloc flags: %x\n", indx); 267 kdb_backtrace(); 268 flags |= M_WAITOK; 269 once++; 270 } 271 } 272 #endif 273 #if 0 274 if (size == 0) 275 kdb_enter("zero size malloc"); 276 #endif 277 #ifdef MALLOC_MAKE_FAILURES 278 if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) { 279 atomic_add_int(&malloc_nowait_count, 1); 280 if ((malloc_nowait_count % malloc_failure_rate) == 0) { 281 atomic_add_int(&malloc_failure_count, 1); 282 t_malloc_fail = time_uptime; 283 return (NULL); 284 } 285 } 286 #endif 287 if (flags & M_WAITOK) 288 KASSERT(curthread->td_intr_nesting_level == 0, 289 ("malloc(M_WAITOK) in interrupt context")); 290 291 #ifdef DEBUG_MEMGUARD 292 /* XXX CHANGEME! */ 293 if (type == M_SUBPROC) 294 return memguard_alloc(size, flags); 295 #endif 296 297 if (size <= KMEM_ZMAX) { 298 if (size & KMEM_ZMASK) 299 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE; 300 indx = kmemsize[size >> KMEM_ZSHIFT]; 301 zone = kmemzones[indx].kz_zone; 302 keg = zone->uz_keg; 303 #ifdef MALLOC_PROFILE 304 krequests[size >> KMEM_ZSHIFT]++; 305 #endif 306 va = uma_zalloc(zone, flags); 307 if (va != NULL) 308 size = keg->uk_size; 309 malloc_type_zone_allocated(type, va == NULL ? 0 : size, indx); 310 } else { 311 size = roundup(size, PAGE_SIZE); 312 zone = NULL; 313 keg = NULL; 314 va = uma_large_malloc(size, flags); 315 malloc_type_allocated(type, va == NULL ? 0 : size); 316 } 317 if (flags & M_WAITOK) 318 KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL")); 319 else if (va == NULL) 320 t_malloc_fail = time_uptime; 321 #ifdef DIAGNOSTIC 322 if (va != NULL && !(flags & M_ZERO)) { 323 memset(va, 0x70, osize); 324 } 325 #endif 326 return ((void *) va); 327 } 328 329 /* 330 * free: 331 * 332 * Free a block of memory allocated by malloc. 333 * 334 * This routine may not block. 335 */ 336 void 337 free(void *addr, struct malloc_type *type) 338 { 339 uma_slab_t slab; 340 u_long size; 341 342 /* free(NULL, ...) does nothing */ 343 if (addr == NULL) 344 return; 345 346 #ifdef DEBUG_MEMGUARD 347 /* XXX CHANGEME! */ 348 if (type == M_SUBPROC) { 349 memguard_free(addr); 350 return; 351 } 352 #endif 353 354 KASSERT(type->ks_memuse > 0, 355 ("malloc(9)/free(9) confusion.\n%s", 356 "Probably freeing with wrong type, but maybe not here.")); 357 size = 0; 358 359 slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK)); 360 361 if (slab == NULL) 362 panic("free: address %p(%p) has not been allocated.\n", 363 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK))); 364 365 366 if (!(slab->us_flags & UMA_SLAB_MALLOC)) { 367 #ifdef INVARIANTS 368 struct malloc_type **mtp = addr; 369 #endif 370 size = slab->us_keg->uk_size; 371 #ifdef INVARIANTS 372 /* 373 * Cache a pointer to the malloc_type that most recently freed 374 * this memory here. This way we know who is most likely to 375 * have stepped on it later. 376 * 377 * This code assumes that size is a multiple of 8 bytes for 378 * 64 bit machines 379 */ 380 mtp = (struct malloc_type **) 381 ((unsigned long)mtp & ~UMA_ALIGN_PTR); 382 mtp += (size - sizeof(struct malloc_type *)) / 383 sizeof(struct malloc_type *); 384 *mtp = type; 385 #endif 386 uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab); 387 } else { 388 size = slab->us_size; 389 uma_large_free(slab); 390 } 391 malloc_type_freed(type, size); 392 } 393 394 /* 395 * realloc: change the size of a memory block 396 */ 397 void * 398 realloc(void *addr, unsigned long size, struct malloc_type *type, int flags) 399 { 400 uma_slab_t slab; 401 unsigned long alloc; 402 void *newaddr; 403 404 /* realloc(NULL, ...) is equivalent to malloc(...) */ 405 if (addr == NULL) 406 return (malloc(size, type, flags)); 407 408 #ifdef DEBUG_MEMGUARD 409 /* XXX: CHANGEME! */ 410 if (type == M_SUBPROC) { 411 slab = NULL; 412 alloc = size; 413 } else { 414 #endif 415 416 slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK)); 417 418 /* Sanity check */ 419 KASSERT(slab != NULL, 420 ("realloc: address %p out of range", (void *)addr)); 421 422 /* Get the size of the original block */ 423 if (slab->us_keg) 424 alloc = slab->us_keg->uk_size; 425 else 426 alloc = slab->us_size; 427 428 /* Reuse the original block if appropriate */ 429 if (size <= alloc 430 && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE)) 431 return (addr); 432 433 #ifdef DEBUG_MEMGUARD 434 } 435 #endif 436 437 /* Allocate a new, bigger (or smaller) block */ 438 if ((newaddr = malloc(size, type, flags)) == NULL) 439 return (NULL); 440 441 /* Copy over original contents */ 442 bcopy(addr, newaddr, min(size, alloc)); 443 free(addr, type); 444 return (newaddr); 445 } 446 447 /* 448 * reallocf: same as realloc() but free memory on failure. 449 */ 450 void * 451 reallocf(void *addr, unsigned long size, struct malloc_type *type, int flags) 452 { 453 void *mem; 454 455 if ((mem = realloc(addr, size, type, flags)) == NULL) 456 free(addr, type); 457 return (mem); 458 } 459 460 /* 461 * Initialize the kernel memory allocator 462 */ 463 /* ARGSUSED*/ 464 static void 465 kmeminit(void *dummy) 466 { 467 u_int8_t indx; 468 u_long mem_size; 469 int i; 470 471 mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF); 472 473 /* 474 * Try to auto-tune the kernel memory size, so that it is 475 * more applicable for a wider range of machine sizes. 476 * On an X86, a VM_KMEM_SIZE_SCALE value of 4 is good, while 477 * a VM_KMEM_SIZE of 12MB is a fair compromise. The 478 * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space 479 * available, and on an X86 with a total KVA space of 256MB, 480 * try to keep VM_KMEM_SIZE_MAX at 80MB or below. 481 * 482 * Note that the kmem_map is also used by the zone allocator, 483 * so make sure that there is enough space. 484 */ 485 vm_kmem_size = VM_KMEM_SIZE + nmbclusters * PAGE_SIZE; 486 mem_size = cnt.v_page_count; 487 488 #if defined(VM_KMEM_SIZE_SCALE) 489 vm_kmem_size_scale = VM_KMEM_SIZE_SCALE; 490 #endif 491 TUNABLE_INT_FETCH("vm.kmem_size_scale", &vm_kmem_size_scale); 492 if (vm_kmem_size_scale > 0 && 493 (mem_size / vm_kmem_size_scale) > (vm_kmem_size / PAGE_SIZE)) 494 vm_kmem_size = (mem_size / vm_kmem_size_scale) * PAGE_SIZE; 495 496 #if defined(VM_KMEM_SIZE_MAX) 497 vm_kmem_size_max = VM_KMEM_SIZE_MAX; 498 #endif 499 TUNABLE_INT_FETCH("vm.kmem_size_max", &vm_kmem_size_max); 500 if (vm_kmem_size_max > 0 && vm_kmem_size >= vm_kmem_size_max) 501 vm_kmem_size = vm_kmem_size_max; 502 503 /* Allow final override from the kernel environment */ 504 #ifndef BURN_BRIDGES 505 if (TUNABLE_INT_FETCH("kern.vm.kmem.size", &vm_kmem_size) != 0) 506 printf("kern.vm.kmem.size is now called vm.kmem_size!\n"); 507 #endif 508 TUNABLE_INT_FETCH("vm.kmem_size", &vm_kmem_size); 509 510 /* 511 * Limit kmem virtual size to twice the physical memory. 512 * This allows for kmem map sparseness, but limits the size 513 * to something sane. Be careful to not overflow the 32bit 514 * ints while doing the check. 515 */ 516 if (((vm_kmem_size / 2) / PAGE_SIZE) > cnt.v_page_count) 517 vm_kmem_size = 2 * cnt.v_page_count * PAGE_SIZE; 518 519 /* 520 * Tune settings based on the kernel map's size at this time. 521 */ 522 init_param3(vm_kmem_size / PAGE_SIZE); 523 524 kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase, 525 (vm_offset_t *)&kmemlimit, vm_kmem_size); 526 kmem_map->system_map = 1; 527 528 #ifdef DEBUG_MEMGUARD 529 /* 530 * Initialize MemGuard if support compiled in. MemGuard is a 531 * replacement allocator used for detecting tamper-after-free 532 * scenarios as they occur. It is only used for debugging. 533 */ 534 vm_memguard_divisor = 10; 535 TUNABLE_INT_FETCH("vm.memguard_divisor", &vm_memguard_divisor); 536 537 /* Pick a conservative value if provided value sucks. */ 538 if ((vm_memguard_divisor <= 0) || 539 ((vm_kmem_size / vm_memguard_divisor) == 0)) 540 vm_memguard_divisor = 10; 541 memguard_init(kmem_map, vm_kmem_size / vm_memguard_divisor); 542 #endif 543 544 uma_startup2(); 545 546 for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) { 547 int size = kmemzones[indx].kz_size; 548 char *name = kmemzones[indx].kz_name; 549 550 kmemzones[indx].kz_zone = uma_zcreate(name, size, 551 #ifdef INVARIANTS 552 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini, 553 #else 554 NULL, NULL, NULL, NULL, 555 #endif 556 UMA_ALIGN_PTR, UMA_ZONE_MALLOC); 557 558 for (;i <= size; i+= KMEM_ZBASE) 559 kmemsize[i >> KMEM_ZSHIFT] = indx; 560 561 } 562 } 563 564 void 565 malloc_init(void *data) 566 { 567 struct malloc_type *type = (struct malloc_type *)data; 568 569 mtx_lock(&malloc_mtx); 570 if (type->ks_magic != M_MAGIC) 571 panic("malloc type lacks magic"); 572 573 if (cnt.v_page_count == 0) 574 panic("malloc_init not allowed before vm init"); 575 576 if (type->ks_next != NULL) 577 return; 578 579 type->ks_next = kmemstatistics; 580 kmemstatistics = type; 581 mtx_init(&type->ks_mtx, type->ks_shortdesc, "Malloc Stats", MTX_DEF); 582 mtx_unlock(&malloc_mtx); 583 } 584 585 void 586 malloc_uninit(void *data) 587 { 588 struct malloc_type *type = (struct malloc_type *)data; 589 struct malloc_type *t; 590 591 mtx_lock(&malloc_mtx); 592 mtx_lock(&type->ks_mtx); 593 if (type->ks_magic != M_MAGIC) 594 panic("malloc type lacks magic"); 595 596 if (cnt.v_page_count == 0) 597 panic("malloc_uninit not allowed before vm init"); 598 599 if (type == kmemstatistics) 600 kmemstatistics = type->ks_next; 601 else { 602 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) { 603 if (t->ks_next == type) { 604 t->ks_next = type->ks_next; 605 break; 606 } 607 } 608 } 609 type->ks_next = NULL; 610 mtx_destroy(&type->ks_mtx); 611 mtx_unlock(&malloc_mtx); 612 } 613 614 static int 615 sysctl_kern_malloc(SYSCTL_HANDLER_ARGS) 616 { 617 struct malloc_type *type; 618 int linesize = 128; 619 int curline; 620 int bufsize; 621 int first; 622 int error; 623 char *buf; 624 char *p; 625 int cnt; 626 int len; 627 int i; 628 629 cnt = 0; 630 631 mtx_lock(&malloc_mtx); 632 for (type = kmemstatistics; type != NULL; type = type->ks_next) 633 cnt++; 634 635 mtx_unlock(&malloc_mtx); 636 bufsize = linesize * (cnt + 1); 637 p = buf = (char *)malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO); 638 mtx_lock(&malloc_mtx); 639 640 len = snprintf(p, linesize, 641 "\n Type InUse MemUse HighUse Requests Size(s)\n"); 642 p += len; 643 644 for (type = kmemstatistics; cnt != 0 && type != NULL; 645 type = type->ks_next, cnt--) { 646 if (type->ks_calls == 0) 647 continue; 648 649 curline = linesize - 2; /* Leave room for the \n */ 650 len = snprintf(p, curline, "%13s%6lu%6luK%7luK%9llu", 651 type->ks_shortdesc, 652 type->ks_inuse, 653 (type->ks_memuse + 1023) / 1024, 654 (type->ks_maxused + 1023) / 1024, 655 (long long unsigned)type->ks_calls); 656 curline -= len; 657 p += len; 658 659 first = 1; 660 for (i = 0; i < sizeof(kmemzones) / sizeof(kmemzones[0]) - 1; 661 i++) { 662 if (type->ks_size & (1 << i)) { 663 if (first) 664 len = snprintf(p, curline, " "); 665 else 666 len = snprintf(p, curline, ","); 667 curline -= len; 668 p += len; 669 670 len = snprintf(p, curline, 671 "%s", kmemzones[i].kz_name); 672 curline -= len; 673 p += len; 674 675 first = 0; 676 } 677 } 678 679 len = snprintf(p, 2, "\n"); 680 p += len; 681 } 682 683 mtx_unlock(&malloc_mtx); 684 error = SYSCTL_OUT(req, buf, p - buf); 685 686 free(buf, M_TEMP); 687 return (error); 688 } 689 690 SYSCTL_OID(_kern, OID_AUTO, malloc, CTLTYPE_STRING|CTLFLAG_RD, 691 NULL, 0, sysctl_kern_malloc, "A", "Malloc Stats"); 692 693 #ifdef MALLOC_PROFILE 694 695 static int 696 sysctl_kern_mprof(SYSCTL_HANDLER_ARGS) 697 { 698 int linesize = 64; 699 uint64_t count; 700 uint64_t waste; 701 uint64_t mem; 702 int bufsize; 703 int error; 704 char *buf; 705 int rsize; 706 int size; 707 char *p; 708 int len; 709 int i; 710 711 bufsize = linesize * (KMEM_ZSIZE + 1); 712 bufsize += 128; /* For the stats line */ 713 bufsize += 128; /* For the banner line */ 714 waste = 0; 715 mem = 0; 716 717 p = buf = (char *)malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO); 718 len = snprintf(p, bufsize, 719 "\n Size Requests Real Size\n"); 720 bufsize -= len; 721 p += len; 722 723 for (i = 0; i < KMEM_ZSIZE; i++) { 724 size = i << KMEM_ZSHIFT; 725 rsize = kmemzones[kmemsize[i]].kz_size; 726 count = (long long unsigned)krequests[i]; 727 728 len = snprintf(p, bufsize, "%6d%28llu%11d\n", 729 size, (unsigned long long)count, rsize); 730 bufsize -= len; 731 p += len; 732 733 if ((rsize * count) > (size * count)) 734 waste += (rsize * count) - (size * count); 735 mem += (rsize * count); 736 } 737 738 len = snprintf(p, bufsize, 739 "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n", 740 (unsigned long long)mem, (unsigned long long)waste); 741 p += len; 742 743 error = SYSCTL_OUT(req, buf, p - buf); 744 745 free(buf, M_TEMP); 746 return (error); 747 } 748 749 SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD, 750 NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling"); 751 #endif /* MALLOC_PROFILE */ 752