1 /* 2 * Copyright (c) 1987, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_vm.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/kdb.h> 40 #include <sys/kernel.h> 41 #include <sys/lock.h> 42 #include <sys/malloc.h> 43 #include <sys/mbuf.h> 44 #include <sys/mutex.h> 45 #include <sys/vmmeter.h> 46 #include <sys/proc.h> 47 #include <sys/sysctl.h> 48 #include <sys/time.h> 49 50 #include <vm/vm.h> 51 #include <vm/pmap.h> 52 #include <vm/vm_param.h> 53 #include <vm/vm_kern.h> 54 #include <vm/vm_extern.h> 55 #include <vm/vm_map.h> 56 #include <vm/vm_page.h> 57 #include <vm/uma.h> 58 #include <vm/uma_int.h> 59 #include <vm/uma_dbg.h> 60 61 #if defined(INVARIANTS) && defined(__i386__) 62 #include <machine/cpu.h> 63 #endif 64 65 /* 66 * When realloc() is called, if the new size is sufficiently smaller than 67 * the old size, realloc() will allocate a new, smaller block to avoid 68 * wasting memory. 'Sufficiently smaller' is defined as: newsize <= 69 * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'. 70 */ 71 #ifndef REALLOC_FRACTION 72 #define REALLOC_FRACTION 1 /* new block if <= half the size */ 73 #endif 74 75 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 76 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 77 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 78 79 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options"); 80 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery"); 81 82 static void kmeminit(void *); 83 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL) 84 85 static MALLOC_DEFINE(M_FREE, "free", "should be on free list"); 86 87 static struct malloc_type *kmemstatistics; 88 static char *kmembase; 89 static char *kmemlimit; 90 91 #define KMEM_ZSHIFT 4 92 #define KMEM_ZBASE 16 93 #define KMEM_ZMASK (KMEM_ZBASE - 1) 94 95 #define KMEM_ZMAX PAGE_SIZE 96 #define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT) 97 static u_int8_t kmemsize[KMEM_ZSIZE + 1]; 98 99 /* These won't be powers of two for long */ 100 struct { 101 int kz_size; 102 char *kz_name; 103 uma_zone_t kz_zone; 104 } kmemzones[] = { 105 {16, "16", NULL}, 106 {32, "32", NULL}, 107 {64, "64", NULL}, 108 {128, "128", NULL}, 109 {256, "256", NULL}, 110 {512, "512", NULL}, 111 {1024, "1024", NULL}, 112 {2048, "2048", NULL}, 113 {4096, "4096", NULL}, 114 #if PAGE_SIZE > 4096 115 {8192, "8192", NULL}, 116 #if PAGE_SIZE > 8192 117 {16384, "16384", NULL}, 118 #if PAGE_SIZE > 16384 119 {32768, "32768", NULL}, 120 #if PAGE_SIZE > 32768 121 {65536, "65536", NULL}, 122 #if PAGE_SIZE > 65536 123 #error "Unsupported PAGE_SIZE" 124 #endif /* 65536 */ 125 #endif /* 32768 */ 126 #endif /* 16384 */ 127 #endif /* 8192 */ 128 #endif /* 4096 */ 129 {0, NULL}, 130 }; 131 132 u_int vm_kmem_size; 133 SYSCTL_UINT(_vm, OID_AUTO, kmem_size, CTLFLAG_RD, &vm_kmem_size, 0, 134 "Size of kernel memory"); 135 136 /* 137 * The malloc_mtx protects the kmemstatistics linked list. 138 */ 139 140 struct mtx malloc_mtx; 141 142 #ifdef MALLOC_PROFILE 143 uint64_t krequests[KMEM_ZSIZE + 1]; 144 145 static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS); 146 #endif 147 148 static int sysctl_kern_malloc(SYSCTL_HANDLER_ARGS); 149 150 /* time_uptime of last malloc(9) failure */ 151 static time_t t_malloc_fail; 152 153 #ifdef MALLOC_MAKE_FAILURES 154 /* 155 * Causes malloc failures every (n) mallocs with M_NOWAIT. If set to 0, 156 * doesn't cause failures. 157 */ 158 SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD, 0, 159 "Kernel malloc debugging options"); 160 161 static int malloc_failure_rate; 162 static int malloc_nowait_count; 163 static int malloc_failure_count; 164 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RW, 165 &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail"); 166 TUNABLE_INT("debug.malloc.failure_rate", &malloc_failure_rate); 167 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD, 168 &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures"); 169 #endif 170 171 int 172 malloc_last_fail(void) 173 { 174 175 return (time_uptime - t_malloc_fail); 176 } 177 178 /* 179 * Add this to the informational malloc_type bucket. 180 */ 181 static void 182 malloc_type_zone_allocated(struct malloc_type *ksp, unsigned long size, 183 int zindx) 184 { 185 mtx_lock(&ksp->ks_mtx); 186 ksp->ks_calls++; 187 if (zindx != -1) 188 ksp->ks_size |= 1 << zindx; 189 if (size != 0) { 190 ksp->ks_memuse += size; 191 ksp->ks_inuse++; 192 if (ksp->ks_memuse > ksp->ks_maxused) 193 ksp->ks_maxused = ksp->ks_memuse; 194 } 195 mtx_unlock(&ksp->ks_mtx); 196 } 197 198 void 199 malloc_type_allocated(struct malloc_type *ksp, unsigned long size) 200 { 201 malloc_type_zone_allocated(ksp, size, -1); 202 } 203 204 /* 205 * Remove this allocation from the informational malloc_type bucket. 206 */ 207 void 208 malloc_type_freed(struct malloc_type *ksp, unsigned long size) 209 { 210 mtx_lock(&ksp->ks_mtx); 211 KASSERT(size <= ksp->ks_memuse, 212 ("malloc(9)/free(9) confusion.\n%s", 213 "Probably freeing with wrong type, but maybe not here.")); 214 ksp->ks_memuse -= size; 215 ksp->ks_inuse--; 216 mtx_unlock(&ksp->ks_mtx); 217 } 218 219 /* 220 * malloc: 221 * 222 * Allocate a block of memory. 223 * 224 * If M_NOWAIT is set, this routine will not block and return NULL if 225 * the allocation fails. 226 */ 227 void * 228 malloc(size, type, flags) 229 unsigned long size; 230 struct malloc_type *type; 231 int flags; 232 { 233 int indx; 234 caddr_t va; 235 uma_zone_t zone; 236 uma_keg_t keg; 237 #ifdef DIAGNOSTIC 238 unsigned long osize = size; 239 #endif 240 241 #ifdef INVARIANTS 242 /* 243 * To make sure that WAITOK or NOWAIT is set, but not more than 244 * one, and check against the API botches that are common. 245 */ 246 indx = flags & (M_WAITOK | M_NOWAIT | M_DONTWAIT | M_TRYWAIT); 247 if (indx != M_NOWAIT && indx != M_WAITOK) { 248 static struct timeval lasterr; 249 static int curerr, once; 250 if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) { 251 printf("Bad malloc flags: %x\n", indx); 252 kdb_backtrace(); 253 flags |= M_WAITOK; 254 once++; 255 } 256 } 257 #endif 258 #if 0 259 if (size == 0) 260 kdb_enter("zero size malloc"); 261 #endif 262 #ifdef MALLOC_MAKE_FAILURES 263 if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) { 264 atomic_add_int(&malloc_nowait_count, 1); 265 if ((malloc_nowait_count % malloc_failure_rate) == 0) { 266 atomic_add_int(&malloc_failure_count, 1); 267 t_malloc_fail = time_uptime; 268 return (NULL); 269 } 270 } 271 #endif 272 if (flags & M_WAITOK) 273 KASSERT(curthread->td_intr_nesting_level == 0, 274 ("malloc(M_WAITOK) in interrupt context")); 275 if (size <= KMEM_ZMAX) { 276 if (size & KMEM_ZMASK) 277 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE; 278 indx = kmemsize[size >> KMEM_ZSHIFT]; 279 zone = kmemzones[indx].kz_zone; 280 keg = zone->uz_keg; 281 #ifdef MALLOC_PROFILE 282 krequests[size >> KMEM_ZSHIFT]++; 283 #endif 284 va = uma_zalloc(zone, flags); 285 if (va != NULL) 286 size = keg->uk_size; 287 malloc_type_zone_allocated(type, va == NULL ? 0 : size, indx); 288 } else { 289 size = roundup(size, PAGE_SIZE); 290 zone = NULL; 291 keg = NULL; 292 va = uma_large_malloc(size, flags); 293 malloc_type_allocated(type, va == NULL ? 0 : size); 294 } 295 if (flags & M_WAITOK) 296 KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL")); 297 else if (va == NULL) 298 t_malloc_fail = time_uptime; 299 #ifdef DIAGNOSTIC 300 if (va != NULL && !(flags & M_ZERO)) { 301 memset(va, 0x70, osize); 302 } 303 #endif 304 return ((void *) va); 305 } 306 307 /* 308 * free: 309 * 310 * Free a block of memory allocated by malloc. 311 * 312 * This routine may not block. 313 */ 314 void 315 free(addr, type) 316 void *addr; 317 struct malloc_type *type; 318 { 319 uma_slab_t slab; 320 u_long size; 321 322 /* free(NULL, ...) does nothing */ 323 if (addr == NULL) 324 return; 325 326 KASSERT(type->ks_memuse > 0, 327 ("malloc(9)/free(9) confusion.\n%s", 328 "Probably freeing with wrong type, but maybe not here.")); 329 size = 0; 330 331 slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK)); 332 333 if (slab == NULL) 334 panic("free: address %p(%p) has not been allocated.\n", 335 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK))); 336 337 338 if (!(slab->us_flags & UMA_SLAB_MALLOC)) { 339 #ifdef INVARIANTS 340 struct malloc_type **mtp = addr; 341 #endif 342 size = slab->us_keg->uk_size; 343 #ifdef INVARIANTS 344 /* 345 * Cache a pointer to the malloc_type that most recently freed 346 * this memory here. This way we know who is most likely to 347 * have stepped on it later. 348 * 349 * This code assumes that size is a multiple of 8 bytes for 350 * 64 bit machines 351 */ 352 mtp = (struct malloc_type **) 353 ((unsigned long)mtp & ~UMA_ALIGN_PTR); 354 mtp += (size - sizeof(struct malloc_type *)) / 355 sizeof(struct malloc_type *); 356 *mtp = type; 357 #endif 358 uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab); 359 } else { 360 size = slab->us_size; 361 uma_large_free(slab); 362 } 363 malloc_type_freed(type, size); 364 } 365 366 /* 367 * realloc: change the size of a memory block 368 */ 369 void * 370 realloc(addr, size, type, flags) 371 void *addr; 372 unsigned long size; 373 struct malloc_type *type; 374 int flags; 375 { 376 uma_slab_t slab; 377 unsigned long alloc; 378 void *newaddr; 379 380 /* realloc(NULL, ...) is equivalent to malloc(...) */ 381 if (addr == NULL) 382 return (malloc(size, type, flags)); 383 384 slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK)); 385 386 /* Sanity check */ 387 KASSERT(slab != NULL, 388 ("realloc: address %p out of range", (void *)addr)); 389 390 /* Get the size of the original block */ 391 if (slab->us_keg) 392 alloc = slab->us_keg->uk_size; 393 else 394 alloc = slab->us_size; 395 396 /* Reuse the original block if appropriate */ 397 if (size <= alloc 398 && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE)) 399 return (addr); 400 401 /* Allocate a new, bigger (or smaller) block */ 402 if ((newaddr = malloc(size, type, flags)) == NULL) 403 return (NULL); 404 405 /* Copy over original contents */ 406 bcopy(addr, newaddr, min(size, alloc)); 407 free(addr, type); 408 return (newaddr); 409 } 410 411 /* 412 * reallocf: same as realloc() but free memory on failure. 413 */ 414 void * 415 reallocf(addr, size, type, flags) 416 void *addr; 417 unsigned long size; 418 struct malloc_type *type; 419 int flags; 420 { 421 void *mem; 422 423 if ((mem = realloc(addr, size, type, flags)) == NULL) 424 free(addr, type); 425 return (mem); 426 } 427 428 /* 429 * Initialize the kernel memory allocator 430 */ 431 /* ARGSUSED*/ 432 static void 433 kmeminit(dummy) 434 void *dummy; 435 { 436 u_int8_t indx; 437 u_long mem_size; 438 int i; 439 440 mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF); 441 442 /* 443 * Try to auto-tune the kernel memory size, so that it is 444 * more applicable for a wider range of machine sizes. 445 * On an X86, a VM_KMEM_SIZE_SCALE value of 4 is good, while 446 * a VM_KMEM_SIZE of 12MB is a fair compromise. The 447 * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space 448 * available, and on an X86 with a total KVA space of 256MB, 449 * try to keep VM_KMEM_SIZE_MAX at 80MB or below. 450 * 451 * Note that the kmem_map is also used by the zone allocator, 452 * so make sure that there is enough space. 453 */ 454 vm_kmem_size = VM_KMEM_SIZE + nmbclusters * PAGE_SIZE; 455 mem_size = cnt.v_page_count; 456 457 #if defined(VM_KMEM_SIZE_SCALE) 458 if ((mem_size / VM_KMEM_SIZE_SCALE) > (vm_kmem_size / PAGE_SIZE)) 459 vm_kmem_size = (mem_size / VM_KMEM_SIZE_SCALE) * PAGE_SIZE; 460 #endif 461 462 #if defined(VM_KMEM_SIZE_MAX) 463 if (vm_kmem_size >= VM_KMEM_SIZE_MAX) 464 vm_kmem_size = VM_KMEM_SIZE_MAX; 465 #endif 466 467 /* Allow final override from the kernel environment */ 468 #ifndef BURN_BRIDGES 469 if (TUNABLE_INT_FETCH("kern.vm.kmem.size", &vm_kmem_size) != 0) 470 printf("kern.vm.kmem.size is now called vm.kmem_size!\n"); 471 #endif 472 TUNABLE_INT_FETCH("vm.kmem_size", &vm_kmem_size); 473 474 /* 475 * Limit kmem virtual size to twice the physical memory. 476 * This allows for kmem map sparseness, but limits the size 477 * to something sane. Be careful to not overflow the 32bit 478 * ints while doing the check. 479 */ 480 if (((vm_kmem_size / 2) / PAGE_SIZE) > cnt.v_page_count) 481 vm_kmem_size = 2 * cnt.v_page_count * PAGE_SIZE; 482 483 /* 484 * Tune settings based on the kernel map's size at this time. 485 */ 486 init_param3(vm_kmem_size / PAGE_SIZE); 487 488 kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase, 489 (vm_offset_t *)&kmemlimit, vm_kmem_size); 490 kmem_map->system_map = 1; 491 492 uma_startup2(); 493 494 for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) { 495 int size = kmemzones[indx].kz_size; 496 char *name = kmemzones[indx].kz_name; 497 498 kmemzones[indx].kz_zone = uma_zcreate(name, size, 499 #ifdef INVARIANTS 500 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini, 501 #else 502 NULL, NULL, NULL, NULL, 503 #endif 504 UMA_ALIGN_PTR, UMA_ZONE_MALLOC); 505 506 for (;i <= size; i+= KMEM_ZBASE) 507 kmemsize[i >> KMEM_ZSHIFT] = indx; 508 509 } 510 } 511 512 void 513 malloc_init(data) 514 void *data; 515 { 516 struct malloc_type *type = (struct malloc_type *)data; 517 518 mtx_lock(&malloc_mtx); 519 if (type->ks_magic != M_MAGIC) 520 panic("malloc type lacks magic"); 521 522 if (cnt.v_page_count == 0) 523 panic("malloc_init not allowed before vm init"); 524 525 if (type->ks_next != NULL) 526 return; 527 528 type->ks_next = kmemstatistics; 529 kmemstatistics = type; 530 mtx_init(&type->ks_mtx, type->ks_shortdesc, "Malloc Stats", MTX_DEF); 531 mtx_unlock(&malloc_mtx); 532 } 533 534 void 535 malloc_uninit(data) 536 void *data; 537 { 538 struct malloc_type *type = (struct malloc_type *)data; 539 struct malloc_type *t; 540 541 mtx_lock(&malloc_mtx); 542 mtx_lock(&type->ks_mtx); 543 if (type->ks_magic != M_MAGIC) 544 panic("malloc type lacks magic"); 545 546 if (cnt.v_page_count == 0) 547 panic("malloc_uninit not allowed before vm init"); 548 549 if (type == kmemstatistics) 550 kmemstatistics = type->ks_next; 551 else { 552 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) { 553 if (t->ks_next == type) { 554 t->ks_next = type->ks_next; 555 break; 556 } 557 } 558 } 559 type->ks_next = NULL; 560 mtx_destroy(&type->ks_mtx); 561 mtx_unlock(&malloc_mtx); 562 } 563 564 static int 565 sysctl_kern_malloc(SYSCTL_HANDLER_ARGS) 566 { 567 struct malloc_type *type; 568 int linesize = 128; 569 int curline; 570 int bufsize; 571 int first; 572 int error; 573 char *buf; 574 char *p; 575 int cnt; 576 int len; 577 int i; 578 579 cnt = 0; 580 581 mtx_lock(&malloc_mtx); 582 for (type = kmemstatistics; type != NULL; type = type->ks_next) 583 cnt++; 584 585 mtx_unlock(&malloc_mtx); 586 bufsize = linesize * (cnt + 1); 587 p = buf = (char *)malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO); 588 mtx_lock(&malloc_mtx); 589 590 len = snprintf(p, linesize, 591 "\n Type InUse MemUse HighUse Requests Size(s)\n"); 592 p += len; 593 594 for (type = kmemstatistics; cnt != 0 && type != NULL; 595 type = type->ks_next, cnt--) { 596 if (type->ks_calls == 0) 597 continue; 598 599 curline = linesize - 2; /* Leave room for the \n */ 600 len = snprintf(p, curline, "%13s%6lu%6luK%7luK%9llu", 601 type->ks_shortdesc, 602 type->ks_inuse, 603 (type->ks_memuse + 1023) / 1024, 604 (type->ks_maxused + 1023) / 1024, 605 (long long unsigned)type->ks_calls); 606 curline -= len; 607 p += len; 608 609 first = 1; 610 for (i = 0; i < sizeof(kmemzones) / sizeof(kmemzones[0]) - 1; 611 i++) { 612 if (type->ks_size & (1 << i)) { 613 if (first) 614 len = snprintf(p, curline, " "); 615 else 616 len = snprintf(p, curline, ","); 617 curline -= len; 618 p += len; 619 620 len = snprintf(p, curline, 621 "%s", kmemzones[i].kz_name); 622 curline -= len; 623 p += len; 624 625 first = 0; 626 } 627 } 628 629 len = snprintf(p, 2, "\n"); 630 p += len; 631 } 632 633 mtx_unlock(&malloc_mtx); 634 error = SYSCTL_OUT(req, buf, p - buf); 635 636 free(buf, M_TEMP); 637 return (error); 638 } 639 640 SYSCTL_OID(_kern, OID_AUTO, malloc, CTLTYPE_STRING|CTLFLAG_RD, 641 NULL, 0, sysctl_kern_malloc, "A", "Malloc Stats"); 642 643 #ifdef MALLOC_PROFILE 644 645 static int 646 sysctl_kern_mprof(SYSCTL_HANDLER_ARGS) 647 { 648 int linesize = 64; 649 uint64_t count; 650 uint64_t waste; 651 uint64_t mem; 652 int bufsize; 653 int error; 654 char *buf; 655 int rsize; 656 int size; 657 char *p; 658 int len; 659 int i; 660 661 bufsize = linesize * (KMEM_ZSIZE + 1); 662 bufsize += 128; /* For the stats line */ 663 bufsize += 128; /* For the banner line */ 664 waste = 0; 665 mem = 0; 666 667 p = buf = (char *)malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO); 668 len = snprintf(p, bufsize, 669 "\n Size Requests Real Size\n"); 670 bufsize -= len; 671 p += len; 672 673 for (i = 0; i < KMEM_ZSIZE; i++) { 674 size = i << KMEM_ZSHIFT; 675 rsize = kmemzones[kmemsize[i]].kz_size; 676 count = (long long unsigned)krequests[i]; 677 678 len = snprintf(p, bufsize, "%6d%28llu%11d\n", 679 size, (unsigned long long)count, rsize); 680 bufsize -= len; 681 p += len; 682 683 if ((rsize * count) > (size * count)) 684 waste += (rsize * count) - (size * count); 685 mem += (rsize * count); 686 } 687 688 len = snprintf(p, bufsize, 689 "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n", 690 (unsigned long long)mem, (unsigned long long)waste); 691 p += len; 692 693 error = SYSCTL_OUT(req, buf, p - buf); 694 695 free(buf, M_TEMP); 696 return (error); 697 } 698 699 SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD, 700 NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling"); 701 #endif /* MALLOC_PROFILE */ 702