1 /* 2 * Copyright (c) 1987, 1991, 1993 3 * The Regents of the University of California. All rights reserved. 4 * 5 * Redistribution and use in source and binary forms, with or without 6 * modification, are permitted provided that the following conditions 7 * are met: 8 * 1. Redistributions of source code must retain the above copyright 9 * notice, this list of conditions and the following disclaimer. 10 * 2. Redistributions in binary form must reproduce the above copyright 11 * notice, this list of conditions and the following disclaimer in the 12 * documentation and/or other materials provided with the distribution. 13 * 4. Neither the name of the University nor the names of its contributors 14 * may be used to endorse or promote products derived from this software 15 * without specific prior written permission. 16 * 17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND 18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE 21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 27 * SUCH DAMAGE. 28 * 29 * @(#)kern_malloc.c 8.3 (Berkeley) 1/4/94 30 */ 31 32 #include <sys/cdefs.h> 33 __FBSDID("$FreeBSD$"); 34 35 #include "opt_vm.h" 36 37 #include <sys/param.h> 38 #include <sys/systm.h> 39 #include <sys/kdb.h> 40 #include <sys/kernel.h> 41 #include <sys/lock.h> 42 #include <sys/malloc.h> 43 #include <sys/mbuf.h> 44 #include <sys/mutex.h> 45 #include <sys/vmmeter.h> 46 #include <sys/proc.h> 47 #include <sys/sysctl.h> 48 #include <sys/time.h> 49 50 #include <vm/vm.h> 51 #include <vm/pmap.h> 52 #include <vm/vm_param.h> 53 #include <vm/vm_kern.h> 54 #include <vm/vm_extern.h> 55 #include <vm/vm_map.h> 56 #include <vm/vm_page.h> 57 #include <vm/uma.h> 58 #include <vm/uma_int.h> 59 #include <vm/uma_dbg.h> 60 61 #if defined(INVARIANTS) && defined(__i386__) 62 #include <machine/cpu.h> 63 #endif 64 65 /* 66 * When realloc() is called, if the new size is sufficiently smaller than 67 * the old size, realloc() will allocate a new, smaller block to avoid 68 * wasting memory. 'Sufficiently smaller' is defined as: newsize <= 69 * oldsize / 2^n, where REALLOC_FRACTION defines the value of 'n'. 70 */ 71 #ifndef REALLOC_FRACTION 72 #define REALLOC_FRACTION 1 /* new block if <= half the size */ 73 #endif 74 75 MALLOC_DEFINE(M_CACHE, "cache", "Various Dynamically allocated caches"); 76 MALLOC_DEFINE(M_DEVBUF, "devbuf", "device driver memory"); 77 MALLOC_DEFINE(M_TEMP, "temp", "misc temporary data buffers"); 78 79 MALLOC_DEFINE(M_IP6OPT, "ip6opt", "IPv6 options"); 80 MALLOC_DEFINE(M_IP6NDP, "ip6ndp", "IPv6 Neighbor Discovery"); 81 82 static void kmeminit(void *); 83 SYSINIT(kmem, SI_SUB_KMEM, SI_ORDER_FIRST, kmeminit, NULL) 84 85 static MALLOC_DEFINE(M_FREE, "free", "should be on free list"); 86 87 static struct malloc_type *kmemstatistics; 88 static char *kmembase; 89 static char *kmemlimit; 90 91 #define KMEM_ZSHIFT 4 92 #define KMEM_ZBASE 16 93 #define KMEM_ZMASK (KMEM_ZBASE - 1) 94 95 #define KMEM_ZMAX PAGE_SIZE 96 #define KMEM_ZSIZE (KMEM_ZMAX >> KMEM_ZSHIFT) 97 static u_int8_t kmemsize[KMEM_ZSIZE + 1]; 98 99 /* These won't be powers of two for long */ 100 struct { 101 int kz_size; 102 char *kz_name; 103 uma_zone_t kz_zone; 104 } kmemzones[] = { 105 {16, "16", NULL}, 106 {32, "32", NULL}, 107 {64, "64", NULL}, 108 {128, "128", NULL}, 109 {256, "256", NULL}, 110 {512, "512", NULL}, 111 {1024, "1024", NULL}, 112 {2048, "2048", NULL}, 113 {4096, "4096", NULL}, 114 #if PAGE_SIZE > 4096 115 {8192, "8192", NULL}, 116 #if PAGE_SIZE > 8192 117 {16384, "16384", NULL}, 118 #if PAGE_SIZE > 16384 119 {32768, "32768", NULL}, 120 #if PAGE_SIZE > 32768 121 {65536, "65536", NULL}, 122 #if PAGE_SIZE > 65536 123 #error "Unsupported PAGE_SIZE" 124 #endif /* 65536 */ 125 #endif /* 32768 */ 126 #endif /* 16384 */ 127 #endif /* 8192 */ 128 #endif /* 4096 */ 129 {0, NULL}, 130 }; 131 132 u_int vm_kmem_size; 133 SYSCTL_UINT(_vm, OID_AUTO, kmem_size, CTLFLAG_RD, &vm_kmem_size, 0, 134 "Size of kernel memory"); 135 136 /* 137 * The malloc_mtx protects the kmemstatistics linked list. 138 */ 139 140 struct mtx malloc_mtx; 141 142 #ifdef MALLOC_PROFILE 143 uint64_t krequests[KMEM_ZSIZE + 1]; 144 145 static int sysctl_kern_mprof(SYSCTL_HANDLER_ARGS); 146 #endif 147 148 static int sysctl_kern_malloc(SYSCTL_HANDLER_ARGS); 149 150 /* time_uptime of last malloc(9) failure */ 151 static time_t t_malloc_fail; 152 153 #ifdef MALLOC_MAKE_FAILURES 154 /* 155 * Causes malloc failures every (n) mallocs with M_NOWAIT. If set to 0, 156 * doesn't cause failures. 157 */ 158 SYSCTL_NODE(_debug, OID_AUTO, malloc, CTLFLAG_RD, 0, 159 "Kernel malloc debugging options"); 160 161 static int malloc_failure_rate; 162 static int malloc_nowait_count; 163 static int malloc_failure_count; 164 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_rate, CTLFLAG_RW, 165 &malloc_failure_rate, 0, "Every (n) mallocs with M_NOWAIT will fail"); 166 TUNABLE_INT("debug.malloc.failure_rate", &malloc_failure_rate); 167 SYSCTL_INT(_debug_malloc, OID_AUTO, failure_count, CTLFLAG_RD, 168 &malloc_failure_count, 0, "Number of imposed M_NOWAIT malloc failures"); 169 #endif 170 171 int 172 malloc_last_fail(void) 173 { 174 175 return (time_uptime - t_malloc_fail); 176 } 177 178 /* 179 * malloc: 180 * 181 * Allocate a block of memory. 182 * 183 * If M_NOWAIT is set, this routine will not block and return NULL if 184 * the allocation fails. 185 */ 186 void * 187 malloc(size, type, flags) 188 unsigned long size; 189 struct malloc_type *type; 190 int flags; 191 { 192 int indx; 193 caddr_t va; 194 uma_zone_t zone; 195 uma_keg_t keg; 196 #ifdef DIAGNOSTIC 197 unsigned long osize = size; 198 #endif 199 register struct malloc_type *ksp = type; 200 201 #ifdef INVARIANTS 202 /* 203 * To make sure that WAITOK or NOWAIT is set, but not more than 204 * one, and check against the API botches that are common. 205 */ 206 indx = flags & (M_WAITOK | M_NOWAIT | M_DONTWAIT | M_TRYWAIT); 207 if (indx != M_NOWAIT && indx != M_WAITOK) { 208 static struct timeval lasterr; 209 static int curerr, once; 210 if (once == 0 && ppsratecheck(&lasterr, &curerr, 1)) { 211 printf("Bad malloc flags: %x\n", indx); 212 kdb_backtrace(); 213 flags |= M_WAITOK; 214 once++; 215 } 216 } 217 #endif 218 #if 0 219 if (size == 0) 220 kdb_enter("zero size malloc"); 221 #endif 222 #ifdef MALLOC_MAKE_FAILURES 223 if ((flags & M_NOWAIT) && (malloc_failure_rate != 0)) { 224 atomic_add_int(&malloc_nowait_count, 1); 225 if ((malloc_nowait_count % malloc_failure_rate) == 0) { 226 atomic_add_int(&malloc_failure_count, 1); 227 t_malloc_fail = time_uptime; 228 return (NULL); 229 } 230 } 231 #endif 232 if (flags & M_WAITOK) 233 KASSERT(curthread->td_intr_nesting_level == 0, 234 ("malloc(M_WAITOK) in interrupt context")); 235 if (size <= KMEM_ZMAX) { 236 if (size & KMEM_ZMASK) 237 size = (size & ~KMEM_ZMASK) + KMEM_ZBASE; 238 indx = kmemsize[size >> KMEM_ZSHIFT]; 239 zone = kmemzones[indx].kz_zone; 240 keg = zone->uz_keg; 241 #ifdef MALLOC_PROFILE 242 krequests[size >> KMEM_ZSHIFT]++; 243 #endif 244 va = uma_zalloc(zone, flags); 245 mtx_lock(&ksp->ks_mtx); 246 if (va == NULL) 247 goto out; 248 249 ksp->ks_size |= 1 << indx; 250 size = keg->uk_size; 251 } else { 252 size = roundup(size, PAGE_SIZE); 253 zone = NULL; 254 keg = NULL; 255 va = uma_large_malloc(size, flags); 256 mtx_lock(&ksp->ks_mtx); 257 if (va == NULL) 258 goto out; 259 } 260 ksp->ks_memuse += size; 261 ksp->ks_inuse++; 262 out: 263 ksp->ks_calls++; 264 if (ksp->ks_memuse > ksp->ks_maxused) 265 ksp->ks_maxused = ksp->ks_memuse; 266 267 mtx_unlock(&ksp->ks_mtx); 268 if (flags & M_WAITOK) 269 KASSERT(va != NULL, ("malloc(M_WAITOK) returned NULL")); 270 else if (va == NULL) 271 t_malloc_fail = time_uptime; 272 #ifdef DIAGNOSTIC 273 if (va != NULL && !(flags & M_ZERO)) { 274 memset(va, 0x70, osize); 275 } 276 #endif 277 return ((void *) va); 278 } 279 280 /* 281 * free: 282 * 283 * Free a block of memory allocated by malloc. 284 * 285 * This routine may not block. 286 */ 287 void 288 free(addr, type) 289 void *addr; 290 struct malloc_type *type; 291 { 292 register struct malloc_type *ksp = type; 293 uma_slab_t slab; 294 u_long size; 295 296 /* free(NULL, ...) does nothing */ 297 if (addr == NULL) 298 return; 299 300 KASSERT(ksp->ks_memuse > 0, 301 ("malloc(9)/free(9) confusion.\n%s", 302 "Probably freeing with wrong type, but maybe not here.")); 303 size = 0; 304 305 slab = vtoslab((vm_offset_t)addr & (~UMA_SLAB_MASK)); 306 307 if (slab == NULL) 308 panic("free: address %p(%p) has not been allocated.\n", 309 addr, (void *)((u_long)addr & (~UMA_SLAB_MASK))); 310 311 312 if (!(slab->us_flags & UMA_SLAB_MALLOC)) { 313 #ifdef INVARIANTS 314 struct malloc_type **mtp = addr; 315 #endif 316 size = slab->us_keg->uk_size; 317 #ifdef INVARIANTS 318 /* 319 * Cache a pointer to the malloc_type that most recently freed 320 * this memory here. This way we know who is most likely to 321 * have stepped on it later. 322 * 323 * This code assumes that size is a multiple of 8 bytes for 324 * 64 bit machines 325 */ 326 mtp = (struct malloc_type **) 327 ((unsigned long)mtp & ~UMA_ALIGN_PTR); 328 mtp += (size - sizeof(struct malloc_type *)) / 329 sizeof(struct malloc_type *); 330 *mtp = type; 331 #endif 332 uma_zfree_arg(LIST_FIRST(&slab->us_keg->uk_zones), addr, slab); 333 } else { 334 size = slab->us_size; 335 uma_large_free(slab); 336 } 337 mtx_lock(&ksp->ks_mtx); 338 KASSERT(size <= ksp->ks_memuse, 339 ("malloc(9)/free(9) confusion.\n%s", 340 "Probably freeing with wrong type, but maybe not here.")); 341 ksp->ks_memuse -= size; 342 ksp->ks_inuse--; 343 mtx_unlock(&ksp->ks_mtx); 344 } 345 346 /* 347 * realloc: change the size of a memory block 348 */ 349 void * 350 realloc(addr, size, type, flags) 351 void *addr; 352 unsigned long size; 353 struct malloc_type *type; 354 int flags; 355 { 356 uma_slab_t slab; 357 unsigned long alloc; 358 void *newaddr; 359 360 /* realloc(NULL, ...) is equivalent to malloc(...) */ 361 if (addr == NULL) 362 return (malloc(size, type, flags)); 363 364 slab = vtoslab((vm_offset_t)addr & ~(UMA_SLAB_MASK)); 365 366 /* Sanity check */ 367 KASSERT(slab != NULL, 368 ("realloc: address %p out of range", (void *)addr)); 369 370 /* Get the size of the original block */ 371 if (slab->us_keg) 372 alloc = slab->us_keg->uk_size; 373 else 374 alloc = slab->us_size; 375 376 /* Reuse the original block if appropriate */ 377 if (size <= alloc 378 && (size > (alloc >> REALLOC_FRACTION) || alloc == MINALLOCSIZE)) 379 return (addr); 380 381 /* Allocate a new, bigger (or smaller) block */ 382 if ((newaddr = malloc(size, type, flags)) == NULL) 383 return (NULL); 384 385 /* Copy over original contents */ 386 bcopy(addr, newaddr, min(size, alloc)); 387 free(addr, type); 388 return (newaddr); 389 } 390 391 /* 392 * reallocf: same as realloc() but free memory on failure. 393 */ 394 void * 395 reallocf(addr, size, type, flags) 396 void *addr; 397 unsigned long size; 398 struct malloc_type *type; 399 int flags; 400 { 401 void *mem; 402 403 if ((mem = realloc(addr, size, type, flags)) == NULL) 404 free(addr, type); 405 return (mem); 406 } 407 408 /* 409 * Initialize the kernel memory allocator 410 */ 411 /* ARGSUSED*/ 412 static void 413 kmeminit(dummy) 414 void *dummy; 415 { 416 u_int8_t indx; 417 u_long mem_size; 418 int i; 419 420 mtx_init(&malloc_mtx, "malloc", NULL, MTX_DEF); 421 422 /* 423 * Try to auto-tune the kernel memory size, so that it is 424 * more applicable for a wider range of machine sizes. 425 * On an X86, a VM_KMEM_SIZE_SCALE value of 4 is good, while 426 * a VM_KMEM_SIZE of 12MB is a fair compromise. The 427 * VM_KMEM_SIZE_MAX is dependent on the maximum KVA space 428 * available, and on an X86 with a total KVA space of 256MB, 429 * try to keep VM_KMEM_SIZE_MAX at 80MB or below. 430 * 431 * Note that the kmem_map is also used by the zone allocator, 432 * so make sure that there is enough space. 433 */ 434 vm_kmem_size = VM_KMEM_SIZE + nmbclusters * PAGE_SIZE; 435 mem_size = cnt.v_page_count; 436 437 #if defined(VM_KMEM_SIZE_SCALE) 438 if ((mem_size / VM_KMEM_SIZE_SCALE) > (vm_kmem_size / PAGE_SIZE)) 439 vm_kmem_size = (mem_size / VM_KMEM_SIZE_SCALE) * PAGE_SIZE; 440 #endif 441 442 #if defined(VM_KMEM_SIZE_MAX) 443 if (vm_kmem_size >= VM_KMEM_SIZE_MAX) 444 vm_kmem_size = VM_KMEM_SIZE_MAX; 445 #endif 446 447 /* Allow final override from the kernel environment */ 448 #ifndef BURN_BRIDGES 449 if (TUNABLE_INT_FETCH("kern.vm.kmem.size", &vm_kmem_size) != 0) 450 printf("kern.vm.kmem.size is now called vm.kmem_size!\n"); 451 #endif 452 TUNABLE_INT_FETCH("vm.kmem_size", &vm_kmem_size); 453 454 /* 455 * Limit kmem virtual size to twice the physical memory. 456 * This allows for kmem map sparseness, but limits the size 457 * to something sane. Be careful to not overflow the 32bit 458 * ints while doing the check. 459 */ 460 if (((vm_kmem_size / 2) / PAGE_SIZE) > cnt.v_page_count) 461 vm_kmem_size = 2 * cnt.v_page_count * PAGE_SIZE; 462 463 /* 464 * Tune settings based on the kernel map's size at this time. 465 */ 466 init_param3(vm_kmem_size / PAGE_SIZE); 467 468 kmem_map = kmem_suballoc(kernel_map, (vm_offset_t *)&kmembase, 469 (vm_offset_t *)&kmemlimit, vm_kmem_size); 470 kmem_map->system_map = 1; 471 472 uma_startup2(); 473 474 for (i = 0, indx = 0; kmemzones[indx].kz_size != 0; indx++) { 475 int size = kmemzones[indx].kz_size; 476 char *name = kmemzones[indx].kz_name; 477 478 kmemzones[indx].kz_zone = uma_zcreate(name, size, 479 #ifdef INVARIANTS 480 mtrash_ctor, mtrash_dtor, mtrash_init, mtrash_fini, 481 #else 482 NULL, NULL, NULL, NULL, 483 #endif 484 UMA_ALIGN_PTR, UMA_ZONE_MALLOC); 485 486 for (;i <= size; i+= KMEM_ZBASE) 487 kmemsize[i >> KMEM_ZSHIFT] = indx; 488 489 } 490 } 491 492 void 493 malloc_init(data) 494 void *data; 495 { 496 struct malloc_type *type = (struct malloc_type *)data; 497 498 mtx_lock(&malloc_mtx); 499 if (type->ks_magic != M_MAGIC) 500 panic("malloc type lacks magic"); 501 502 if (cnt.v_page_count == 0) 503 panic("malloc_init not allowed before vm init"); 504 505 if (type->ks_next != NULL) 506 return; 507 508 type->ks_next = kmemstatistics; 509 kmemstatistics = type; 510 mtx_init(&type->ks_mtx, type->ks_shortdesc, "Malloc Stats", MTX_DEF); 511 mtx_unlock(&malloc_mtx); 512 } 513 514 void 515 malloc_uninit(data) 516 void *data; 517 { 518 struct malloc_type *type = (struct malloc_type *)data; 519 struct malloc_type *t; 520 521 mtx_lock(&malloc_mtx); 522 mtx_lock(&type->ks_mtx); 523 if (type->ks_magic != M_MAGIC) 524 panic("malloc type lacks magic"); 525 526 if (cnt.v_page_count == 0) 527 panic("malloc_uninit not allowed before vm init"); 528 529 if (type == kmemstatistics) 530 kmemstatistics = type->ks_next; 531 else { 532 for (t = kmemstatistics; t->ks_next != NULL; t = t->ks_next) { 533 if (t->ks_next == type) { 534 t->ks_next = type->ks_next; 535 break; 536 } 537 } 538 } 539 type->ks_next = NULL; 540 mtx_destroy(&type->ks_mtx); 541 mtx_unlock(&malloc_mtx); 542 } 543 544 static int 545 sysctl_kern_malloc(SYSCTL_HANDLER_ARGS) 546 { 547 struct malloc_type *type; 548 int linesize = 128; 549 int curline; 550 int bufsize; 551 int first; 552 int error; 553 char *buf; 554 char *p; 555 int cnt; 556 int len; 557 int i; 558 559 cnt = 0; 560 561 mtx_lock(&malloc_mtx); 562 for (type = kmemstatistics; type != NULL; type = type->ks_next) 563 cnt++; 564 565 mtx_unlock(&malloc_mtx); 566 bufsize = linesize * (cnt + 1); 567 p = buf = (char *)malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO); 568 mtx_lock(&malloc_mtx); 569 570 len = snprintf(p, linesize, 571 "\n Type InUse MemUse HighUse Requests Size(s)\n"); 572 p += len; 573 574 for (type = kmemstatistics; cnt != 0 && type != NULL; 575 type = type->ks_next, cnt--) { 576 if (type->ks_calls == 0) 577 continue; 578 579 curline = linesize - 2; /* Leave room for the \n */ 580 len = snprintf(p, curline, "%13s%6lu%6luK%7luK%9llu", 581 type->ks_shortdesc, 582 type->ks_inuse, 583 (type->ks_memuse + 1023) / 1024, 584 (type->ks_maxused + 1023) / 1024, 585 (long long unsigned)type->ks_calls); 586 curline -= len; 587 p += len; 588 589 first = 1; 590 for (i = 0; i < sizeof(kmemzones) / sizeof(kmemzones[0]) - 1; 591 i++) { 592 if (type->ks_size & (1 << i)) { 593 if (first) 594 len = snprintf(p, curline, " "); 595 else 596 len = snprintf(p, curline, ","); 597 curline -= len; 598 p += len; 599 600 len = snprintf(p, curline, 601 "%s", kmemzones[i].kz_name); 602 curline -= len; 603 p += len; 604 605 first = 0; 606 } 607 } 608 609 len = snprintf(p, 2, "\n"); 610 p += len; 611 } 612 613 mtx_unlock(&malloc_mtx); 614 error = SYSCTL_OUT(req, buf, p - buf); 615 616 free(buf, M_TEMP); 617 return (error); 618 } 619 620 SYSCTL_OID(_kern, OID_AUTO, malloc, CTLTYPE_STRING|CTLFLAG_RD, 621 NULL, 0, sysctl_kern_malloc, "A", "Malloc Stats"); 622 623 #ifdef MALLOC_PROFILE 624 625 static int 626 sysctl_kern_mprof(SYSCTL_HANDLER_ARGS) 627 { 628 int linesize = 64; 629 uint64_t count; 630 uint64_t waste; 631 uint64_t mem; 632 int bufsize; 633 int error; 634 char *buf; 635 int rsize; 636 int size; 637 char *p; 638 int len; 639 int i; 640 641 bufsize = linesize * (KMEM_ZSIZE + 1); 642 bufsize += 128; /* For the stats line */ 643 bufsize += 128; /* For the banner line */ 644 waste = 0; 645 mem = 0; 646 647 p = buf = (char *)malloc(bufsize, M_TEMP, M_WAITOK|M_ZERO); 648 len = snprintf(p, bufsize, 649 "\n Size Requests Real Size\n"); 650 bufsize -= len; 651 p += len; 652 653 for (i = 0; i < KMEM_ZSIZE; i++) { 654 size = i << KMEM_ZSHIFT; 655 rsize = kmemzones[kmemsize[i]].kz_size; 656 count = (long long unsigned)krequests[i]; 657 658 len = snprintf(p, bufsize, "%6d%28llu%11d\n", 659 size, (unsigned long long)count, rsize); 660 bufsize -= len; 661 p += len; 662 663 if ((rsize * count) > (size * count)) 664 waste += (rsize * count) - (size * count); 665 mem += (rsize * count); 666 } 667 668 len = snprintf(p, bufsize, 669 "\nTotal memory used:\t%30llu\nTotal Memory wasted:\t%30llu\n", 670 (unsigned long long)mem, (unsigned long long)waste); 671 p += len; 672 673 error = SYSCTL_OUT(req, buf, p - buf); 674 675 free(buf, M_TEMP); 676 return (error); 677 } 678 679 SYSCTL_OID(_kern, OID_AUTO, mprof, CTLTYPE_STRING|CTLFLAG_RD, 680 NULL, 0, sysctl_kern_mprof, "A", "Malloc Profiling"); 681 #endif /* MALLOC_PROFILE */ 682