1 #define JEMALLOC_CTL_C_ 2 #include "jemalloc/internal/jemalloc_internal.h" 3 4 /******************************************************************************/ 5 /* Data. */ 6 7 /* 8 * ctl_mtx protects the following: 9 * - ctl_stats.* 10 * - opt_prof_active 11 */ 12 static malloc_mutex_t ctl_mtx; 13 static bool ctl_initialized; 14 static uint64_t ctl_epoch; 15 static ctl_stats_t ctl_stats; 16 17 /******************************************************************************/ 18 /* Helpers for named and indexed nodes. */ 19 20 static inline const ctl_named_node_t * 21 ctl_named_node(const ctl_node_t *node) 22 { 23 24 return ((node->named) ? (const ctl_named_node_t *)node : NULL); 25 } 26 27 static inline const ctl_named_node_t * 28 ctl_named_children(const ctl_named_node_t *node, int index) 29 { 30 const ctl_named_node_t *children = ctl_named_node(node->children); 31 32 return (children ? &children[index] : NULL); 33 } 34 35 static inline const ctl_indexed_node_t * 36 ctl_indexed_node(const ctl_node_t *node) 37 { 38 39 return ((node->named == false) ? (const ctl_indexed_node_t *)node : 40 NULL); 41 } 42 43 /******************************************************************************/ 44 /* Function prototypes for non-inline static functions. */ 45 46 #define CTL_PROTO(n) \ 47 static int n##_ctl(const size_t *mib, size_t miblen, void *oldp, \ 48 size_t *oldlenp, void *newp, size_t newlen); 49 50 #define INDEX_PROTO(n) \ 51 static const ctl_named_node_t *n##_index(const size_t *mib, \ 52 size_t miblen, size_t i); 53 54 static bool ctl_arena_init(ctl_arena_stats_t *astats); 55 static void ctl_arena_clear(ctl_arena_stats_t *astats); 56 static void ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, 57 arena_t *arena); 58 static void ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, 59 ctl_arena_stats_t *astats); 60 static void ctl_arena_refresh(arena_t *arena, unsigned i); 61 static bool ctl_grow(void); 62 static void ctl_refresh(void); 63 static bool ctl_init(void); 64 static int ctl_lookup(const char *name, ctl_node_t const **nodesp, 65 size_t *mibp, size_t *depthp); 66 67 CTL_PROTO(version) 68 CTL_PROTO(epoch) 69 CTL_PROTO(thread_tcache_enabled) 70 CTL_PROTO(thread_tcache_flush) 71 CTL_PROTO(thread_arena) 72 CTL_PROTO(thread_allocated) 73 CTL_PROTO(thread_allocatedp) 74 CTL_PROTO(thread_deallocated) 75 CTL_PROTO(thread_deallocatedp) 76 CTL_PROTO(config_debug) 77 CTL_PROTO(config_dss) 78 CTL_PROTO(config_fill) 79 CTL_PROTO(config_lazy_lock) 80 CTL_PROTO(config_mremap) 81 CTL_PROTO(config_munmap) 82 CTL_PROTO(config_prof) 83 CTL_PROTO(config_prof_libgcc) 84 CTL_PROTO(config_prof_libunwind) 85 CTL_PROTO(config_stats) 86 CTL_PROTO(config_tcache) 87 CTL_PROTO(config_tls) 88 CTL_PROTO(config_utrace) 89 CTL_PROTO(config_valgrind) 90 CTL_PROTO(config_xmalloc) 91 CTL_PROTO(opt_abort) 92 CTL_PROTO(opt_dss) 93 CTL_PROTO(opt_lg_chunk) 94 CTL_PROTO(opt_narenas) 95 CTL_PROTO(opt_lg_dirty_mult) 96 CTL_PROTO(opt_stats_print) 97 CTL_PROTO(opt_junk) 98 CTL_PROTO(opt_zero) 99 CTL_PROTO(opt_quarantine) 100 CTL_PROTO(opt_redzone) 101 CTL_PROTO(opt_utrace) 102 CTL_PROTO(opt_valgrind) 103 CTL_PROTO(opt_xmalloc) 104 CTL_PROTO(opt_tcache) 105 CTL_PROTO(opt_lg_tcache_max) 106 CTL_PROTO(opt_prof) 107 CTL_PROTO(opt_prof_prefix) 108 CTL_PROTO(opt_prof_active) 109 CTL_PROTO(opt_lg_prof_sample) 110 CTL_PROTO(opt_lg_prof_interval) 111 CTL_PROTO(opt_prof_gdump) 112 CTL_PROTO(opt_prof_final) 113 CTL_PROTO(opt_prof_leak) 114 CTL_PROTO(opt_prof_accum) 115 CTL_PROTO(arena_i_purge) 116 static void arena_purge(unsigned arena_ind); 117 CTL_PROTO(arena_i_dss) 118 INDEX_PROTO(arena_i) 119 CTL_PROTO(arenas_bin_i_size) 120 CTL_PROTO(arenas_bin_i_nregs) 121 CTL_PROTO(arenas_bin_i_run_size) 122 INDEX_PROTO(arenas_bin_i) 123 CTL_PROTO(arenas_lrun_i_size) 124 INDEX_PROTO(arenas_lrun_i) 125 CTL_PROTO(arenas_narenas) 126 CTL_PROTO(arenas_initialized) 127 CTL_PROTO(arenas_quantum) 128 CTL_PROTO(arenas_page) 129 CTL_PROTO(arenas_tcache_max) 130 CTL_PROTO(arenas_nbins) 131 CTL_PROTO(arenas_nhbins) 132 CTL_PROTO(arenas_nlruns) 133 CTL_PROTO(arenas_purge) 134 CTL_PROTO(arenas_extend) 135 CTL_PROTO(prof_active) 136 CTL_PROTO(prof_dump) 137 CTL_PROTO(prof_interval) 138 CTL_PROTO(stats_chunks_current) 139 CTL_PROTO(stats_chunks_total) 140 CTL_PROTO(stats_chunks_high) 141 CTL_PROTO(stats_huge_allocated) 142 CTL_PROTO(stats_huge_nmalloc) 143 CTL_PROTO(stats_huge_ndalloc) 144 CTL_PROTO(stats_arenas_i_small_allocated) 145 CTL_PROTO(stats_arenas_i_small_nmalloc) 146 CTL_PROTO(stats_arenas_i_small_ndalloc) 147 CTL_PROTO(stats_arenas_i_small_nrequests) 148 CTL_PROTO(stats_arenas_i_large_allocated) 149 CTL_PROTO(stats_arenas_i_large_nmalloc) 150 CTL_PROTO(stats_arenas_i_large_ndalloc) 151 CTL_PROTO(stats_arenas_i_large_nrequests) 152 CTL_PROTO(stats_arenas_i_bins_j_allocated) 153 CTL_PROTO(stats_arenas_i_bins_j_nmalloc) 154 CTL_PROTO(stats_arenas_i_bins_j_ndalloc) 155 CTL_PROTO(stats_arenas_i_bins_j_nrequests) 156 CTL_PROTO(stats_arenas_i_bins_j_nfills) 157 CTL_PROTO(stats_arenas_i_bins_j_nflushes) 158 CTL_PROTO(stats_arenas_i_bins_j_nruns) 159 CTL_PROTO(stats_arenas_i_bins_j_nreruns) 160 CTL_PROTO(stats_arenas_i_bins_j_curruns) 161 INDEX_PROTO(stats_arenas_i_bins_j) 162 CTL_PROTO(stats_arenas_i_lruns_j_nmalloc) 163 CTL_PROTO(stats_arenas_i_lruns_j_ndalloc) 164 CTL_PROTO(stats_arenas_i_lruns_j_nrequests) 165 CTL_PROTO(stats_arenas_i_lruns_j_curruns) 166 INDEX_PROTO(stats_arenas_i_lruns_j) 167 CTL_PROTO(stats_arenas_i_nthreads) 168 CTL_PROTO(stats_arenas_i_dss) 169 CTL_PROTO(stats_arenas_i_pactive) 170 CTL_PROTO(stats_arenas_i_pdirty) 171 CTL_PROTO(stats_arenas_i_mapped) 172 CTL_PROTO(stats_arenas_i_npurge) 173 CTL_PROTO(stats_arenas_i_nmadvise) 174 CTL_PROTO(stats_arenas_i_purged) 175 INDEX_PROTO(stats_arenas_i) 176 CTL_PROTO(stats_cactive) 177 CTL_PROTO(stats_allocated) 178 CTL_PROTO(stats_active) 179 CTL_PROTO(stats_mapped) 180 181 /******************************************************************************/ 182 /* mallctl tree. */ 183 184 /* Maximum tree depth. */ 185 #define CTL_MAX_DEPTH 6 186 187 #define NAME(n) {true}, n 188 #define CHILD(t, c) \ 189 sizeof(c##_node) / sizeof(ctl_##t##_node_t), \ 190 (ctl_node_t *)c##_node, \ 191 NULL 192 #define CTL(c) 0, NULL, c##_ctl 193 194 /* 195 * Only handles internal indexed nodes, since there are currently no external 196 * ones. 197 */ 198 #define INDEX(i) {false}, i##_index 199 200 static const ctl_named_node_t tcache_node[] = { 201 {NAME("enabled"), CTL(thread_tcache_enabled)}, 202 {NAME("flush"), CTL(thread_tcache_flush)} 203 }; 204 205 static const ctl_named_node_t thread_node[] = { 206 {NAME("arena"), CTL(thread_arena)}, 207 {NAME("allocated"), CTL(thread_allocated)}, 208 {NAME("allocatedp"), CTL(thread_allocatedp)}, 209 {NAME("deallocated"), CTL(thread_deallocated)}, 210 {NAME("deallocatedp"), CTL(thread_deallocatedp)}, 211 {NAME("tcache"), CHILD(named, tcache)} 212 }; 213 214 static const ctl_named_node_t config_node[] = { 215 {NAME("debug"), CTL(config_debug)}, 216 {NAME("dss"), CTL(config_dss)}, 217 {NAME("fill"), CTL(config_fill)}, 218 {NAME("lazy_lock"), CTL(config_lazy_lock)}, 219 {NAME("mremap"), CTL(config_mremap)}, 220 {NAME("munmap"), CTL(config_munmap)}, 221 {NAME("prof"), CTL(config_prof)}, 222 {NAME("prof_libgcc"), CTL(config_prof_libgcc)}, 223 {NAME("prof_libunwind"), CTL(config_prof_libunwind)}, 224 {NAME("stats"), CTL(config_stats)}, 225 {NAME("tcache"), CTL(config_tcache)}, 226 {NAME("tls"), CTL(config_tls)}, 227 {NAME("utrace"), CTL(config_utrace)}, 228 {NAME("valgrind"), CTL(config_valgrind)}, 229 {NAME("xmalloc"), CTL(config_xmalloc)} 230 }; 231 232 static const ctl_named_node_t opt_node[] = { 233 {NAME("abort"), CTL(opt_abort)}, 234 {NAME("dss"), CTL(opt_dss)}, 235 {NAME("lg_chunk"), CTL(opt_lg_chunk)}, 236 {NAME("narenas"), CTL(opt_narenas)}, 237 {NAME("lg_dirty_mult"), CTL(opt_lg_dirty_mult)}, 238 {NAME("stats_print"), CTL(opt_stats_print)}, 239 {NAME("junk"), CTL(opt_junk)}, 240 {NAME("zero"), CTL(opt_zero)}, 241 {NAME("quarantine"), CTL(opt_quarantine)}, 242 {NAME("redzone"), CTL(opt_redzone)}, 243 {NAME("utrace"), CTL(opt_utrace)}, 244 {NAME("valgrind"), CTL(opt_valgrind)}, 245 {NAME("xmalloc"), CTL(opt_xmalloc)}, 246 {NAME("tcache"), CTL(opt_tcache)}, 247 {NAME("lg_tcache_max"), CTL(opt_lg_tcache_max)}, 248 {NAME("prof"), CTL(opt_prof)}, 249 {NAME("prof_prefix"), CTL(opt_prof_prefix)}, 250 {NAME("prof_active"), CTL(opt_prof_active)}, 251 {NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)}, 252 {NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)}, 253 {NAME("prof_gdump"), CTL(opt_prof_gdump)}, 254 {NAME("prof_final"), CTL(opt_prof_final)}, 255 {NAME("prof_leak"), CTL(opt_prof_leak)}, 256 {NAME("prof_accum"), CTL(opt_prof_accum)} 257 }; 258 259 static const ctl_named_node_t arena_i_node[] = { 260 {NAME("purge"), CTL(arena_i_purge)}, 261 {NAME("dss"), CTL(arena_i_dss)} 262 }; 263 static const ctl_named_node_t super_arena_i_node[] = { 264 {NAME(""), CHILD(named, arena_i)} 265 }; 266 267 static const ctl_indexed_node_t arena_node[] = { 268 {INDEX(arena_i)} 269 }; 270 271 static const ctl_named_node_t arenas_bin_i_node[] = { 272 {NAME("size"), CTL(arenas_bin_i_size)}, 273 {NAME("nregs"), CTL(arenas_bin_i_nregs)}, 274 {NAME("run_size"), CTL(arenas_bin_i_run_size)} 275 }; 276 static const ctl_named_node_t super_arenas_bin_i_node[] = { 277 {NAME(""), CHILD(named, arenas_bin_i)} 278 }; 279 280 static const ctl_indexed_node_t arenas_bin_node[] = { 281 {INDEX(arenas_bin_i)} 282 }; 283 284 static const ctl_named_node_t arenas_lrun_i_node[] = { 285 {NAME("size"), CTL(arenas_lrun_i_size)} 286 }; 287 static const ctl_named_node_t super_arenas_lrun_i_node[] = { 288 {NAME(""), CHILD(named, arenas_lrun_i)} 289 }; 290 291 static const ctl_indexed_node_t arenas_lrun_node[] = { 292 {INDEX(arenas_lrun_i)} 293 }; 294 295 static const ctl_named_node_t arenas_node[] = { 296 {NAME("narenas"), CTL(arenas_narenas)}, 297 {NAME("initialized"), CTL(arenas_initialized)}, 298 {NAME("quantum"), CTL(arenas_quantum)}, 299 {NAME("page"), CTL(arenas_page)}, 300 {NAME("tcache_max"), CTL(arenas_tcache_max)}, 301 {NAME("nbins"), CTL(arenas_nbins)}, 302 {NAME("nhbins"), CTL(arenas_nhbins)}, 303 {NAME("bin"), CHILD(indexed, arenas_bin)}, 304 {NAME("nlruns"), CTL(arenas_nlruns)}, 305 {NAME("lrun"), CHILD(indexed, arenas_lrun)}, 306 {NAME("purge"), CTL(arenas_purge)}, 307 {NAME("extend"), CTL(arenas_extend)} 308 }; 309 310 static const ctl_named_node_t prof_node[] = { 311 {NAME("active"), CTL(prof_active)}, 312 {NAME("dump"), CTL(prof_dump)}, 313 {NAME("interval"), CTL(prof_interval)} 314 }; 315 316 static const ctl_named_node_t stats_chunks_node[] = { 317 {NAME("current"), CTL(stats_chunks_current)}, 318 {NAME("total"), CTL(stats_chunks_total)}, 319 {NAME("high"), CTL(stats_chunks_high)} 320 }; 321 322 static const ctl_named_node_t stats_huge_node[] = { 323 {NAME("allocated"), CTL(stats_huge_allocated)}, 324 {NAME("nmalloc"), CTL(stats_huge_nmalloc)}, 325 {NAME("ndalloc"), CTL(stats_huge_ndalloc)} 326 }; 327 328 static const ctl_named_node_t stats_arenas_i_small_node[] = { 329 {NAME("allocated"), CTL(stats_arenas_i_small_allocated)}, 330 {NAME("nmalloc"), CTL(stats_arenas_i_small_nmalloc)}, 331 {NAME("ndalloc"), CTL(stats_arenas_i_small_ndalloc)}, 332 {NAME("nrequests"), CTL(stats_arenas_i_small_nrequests)} 333 }; 334 335 static const ctl_named_node_t stats_arenas_i_large_node[] = { 336 {NAME("allocated"), CTL(stats_arenas_i_large_allocated)}, 337 {NAME("nmalloc"), CTL(stats_arenas_i_large_nmalloc)}, 338 {NAME("ndalloc"), CTL(stats_arenas_i_large_ndalloc)}, 339 {NAME("nrequests"), CTL(stats_arenas_i_large_nrequests)} 340 }; 341 342 static const ctl_named_node_t stats_arenas_i_bins_j_node[] = { 343 {NAME("allocated"), CTL(stats_arenas_i_bins_j_allocated)}, 344 {NAME("nmalloc"), CTL(stats_arenas_i_bins_j_nmalloc)}, 345 {NAME("ndalloc"), CTL(stats_arenas_i_bins_j_ndalloc)}, 346 {NAME("nrequests"), CTL(stats_arenas_i_bins_j_nrequests)}, 347 {NAME("nfills"), CTL(stats_arenas_i_bins_j_nfills)}, 348 {NAME("nflushes"), CTL(stats_arenas_i_bins_j_nflushes)}, 349 {NAME("nruns"), CTL(stats_arenas_i_bins_j_nruns)}, 350 {NAME("nreruns"), CTL(stats_arenas_i_bins_j_nreruns)}, 351 {NAME("curruns"), CTL(stats_arenas_i_bins_j_curruns)} 352 }; 353 static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = { 354 {NAME(""), CHILD(named, stats_arenas_i_bins_j)} 355 }; 356 357 static const ctl_indexed_node_t stats_arenas_i_bins_node[] = { 358 {INDEX(stats_arenas_i_bins_j)} 359 }; 360 361 static const ctl_named_node_t stats_arenas_i_lruns_j_node[] = { 362 {NAME("nmalloc"), CTL(stats_arenas_i_lruns_j_nmalloc)}, 363 {NAME("ndalloc"), CTL(stats_arenas_i_lruns_j_ndalloc)}, 364 {NAME("nrequests"), CTL(stats_arenas_i_lruns_j_nrequests)}, 365 {NAME("curruns"), CTL(stats_arenas_i_lruns_j_curruns)} 366 }; 367 static const ctl_named_node_t super_stats_arenas_i_lruns_j_node[] = { 368 {NAME(""), CHILD(named, stats_arenas_i_lruns_j)} 369 }; 370 371 static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = { 372 {INDEX(stats_arenas_i_lruns_j)} 373 }; 374 375 static const ctl_named_node_t stats_arenas_i_node[] = { 376 {NAME("nthreads"), CTL(stats_arenas_i_nthreads)}, 377 {NAME("dss"), CTL(stats_arenas_i_dss)}, 378 {NAME("pactive"), CTL(stats_arenas_i_pactive)}, 379 {NAME("pdirty"), CTL(stats_arenas_i_pdirty)}, 380 {NAME("mapped"), CTL(stats_arenas_i_mapped)}, 381 {NAME("npurge"), CTL(stats_arenas_i_npurge)}, 382 {NAME("nmadvise"), CTL(stats_arenas_i_nmadvise)}, 383 {NAME("purged"), CTL(stats_arenas_i_purged)}, 384 {NAME("small"), CHILD(named, stats_arenas_i_small)}, 385 {NAME("large"), CHILD(named, stats_arenas_i_large)}, 386 {NAME("bins"), CHILD(indexed, stats_arenas_i_bins)}, 387 {NAME("lruns"), CHILD(indexed, stats_arenas_i_lruns)} 388 }; 389 static const ctl_named_node_t super_stats_arenas_i_node[] = { 390 {NAME(""), CHILD(named, stats_arenas_i)} 391 }; 392 393 static const ctl_indexed_node_t stats_arenas_node[] = { 394 {INDEX(stats_arenas_i)} 395 }; 396 397 static const ctl_named_node_t stats_node[] = { 398 {NAME("cactive"), CTL(stats_cactive)}, 399 {NAME("allocated"), CTL(stats_allocated)}, 400 {NAME("active"), CTL(stats_active)}, 401 {NAME("mapped"), CTL(stats_mapped)}, 402 {NAME("chunks"), CHILD(named, stats_chunks)}, 403 {NAME("huge"), CHILD(named, stats_huge)}, 404 {NAME("arenas"), CHILD(indexed, stats_arenas)} 405 }; 406 407 static const ctl_named_node_t root_node[] = { 408 {NAME("version"), CTL(version)}, 409 {NAME("epoch"), CTL(epoch)}, 410 {NAME("thread"), CHILD(named, thread)}, 411 {NAME("config"), CHILD(named, config)}, 412 {NAME("opt"), CHILD(named, opt)}, 413 {NAME("arena"), CHILD(indexed, arena)}, 414 {NAME("arenas"), CHILD(named, arenas)}, 415 {NAME("prof"), CHILD(named, prof)}, 416 {NAME("stats"), CHILD(named, stats)} 417 }; 418 static const ctl_named_node_t super_root_node[] = { 419 {NAME(""), CHILD(named, root)} 420 }; 421 422 #undef NAME 423 #undef CHILD 424 #undef CTL 425 #undef INDEX 426 427 /******************************************************************************/ 428 429 static bool 430 ctl_arena_init(ctl_arena_stats_t *astats) 431 { 432 433 if (astats->lstats == NULL) { 434 astats->lstats = (malloc_large_stats_t *)base_alloc(nlclasses * 435 sizeof(malloc_large_stats_t)); 436 if (astats->lstats == NULL) 437 return (true); 438 } 439 440 return (false); 441 } 442 443 static void 444 ctl_arena_clear(ctl_arena_stats_t *astats) 445 { 446 447 astats->dss = dss_prec_names[dss_prec_limit]; 448 astats->pactive = 0; 449 astats->pdirty = 0; 450 if (config_stats) { 451 memset(&astats->astats, 0, sizeof(arena_stats_t)); 452 astats->allocated_small = 0; 453 astats->nmalloc_small = 0; 454 astats->ndalloc_small = 0; 455 astats->nrequests_small = 0; 456 memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t)); 457 memset(astats->lstats, 0, nlclasses * 458 sizeof(malloc_large_stats_t)); 459 } 460 } 461 462 static void 463 ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena) 464 { 465 unsigned i; 466 467 arena_stats_merge(arena, &cstats->dss, &cstats->pactive, 468 &cstats->pdirty, &cstats->astats, cstats->bstats, cstats->lstats); 469 470 for (i = 0; i < NBINS; i++) { 471 cstats->allocated_small += cstats->bstats[i].allocated; 472 cstats->nmalloc_small += cstats->bstats[i].nmalloc; 473 cstats->ndalloc_small += cstats->bstats[i].ndalloc; 474 cstats->nrequests_small += cstats->bstats[i].nrequests; 475 } 476 } 477 478 static void 479 ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats) 480 { 481 unsigned i; 482 483 sstats->pactive += astats->pactive; 484 sstats->pdirty += astats->pdirty; 485 486 sstats->astats.mapped += astats->astats.mapped; 487 sstats->astats.npurge += astats->astats.npurge; 488 sstats->astats.nmadvise += astats->astats.nmadvise; 489 sstats->astats.purged += astats->astats.purged; 490 491 sstats->allocated_small += astats->allocated_small; 492 sstats->nmalloc_small += astats->nmalloc_small; 493 sstats->ndalloc_small += astats->ndalloc_small; 494 sstats->nrequests_small += astats->nrequests_small; 495 496 sstats->astats.allocated_large += astats->astats.allocated_large; 497 sstats->astats.nmalloc_large += astats->astats.nmalloc_large; 498 sstats->astats.ndalloc_large += astats->astats.ndalloc_large; 499 sstats->astats.nrequests_large += astats->astats.nrequests_large; 500 501 for (i = 0; i < nlclasses; i++) { 502 sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc; 503 sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc; 504 sstats->lstats[i].nrequests += astats->lstats[i].nrequests; 505 sstats->lstats[i].curruns += astats->lstats[i].curruns; 506 } 507 508 for (i = 0; i < NBINS; i++) { 509 sstats->bstats[i].allocated += astats->bstats[i].allocated; 510 sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc; 511 sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc; 512 sstats->bstats[i].nrequests += astats->bstats[i].nrequests; 513 if (config_tcache) { 514 sstats->bstats[i].nfills += astats->bstats[i].nfills; 515 sstats->bstats[i].nflushes += 516 astats->bstats[i].nflushes; 517 } 518 sstats->bstats[i].nruns += astats->bstats[i].nruns; 519 sstats->bstats[i].reruns += astats->bstats[i].reruns; 520 sstats->bstats[i].curruns += astats->bstats[i].curruns; 521 } 522 } 523 524 static void 525 ctl_arena_refresh(arena_t *arena, unsigned i) 526 { 527 ctl_arena_stats_t *astats = &ctl_stats.arenas[i]; 528 ctl_arena_stats_t *sstats = &ctl_stats.arenas[ctl_stats.narenas]; 529 530 ctl_arena_clear(astats); 531 532 sstats->nthreads += astats->nthreads; 533 if (config_stats) { 534 ctl_arena_stats_amerge(astats, arena); 535 /* Merge into sum stats as well. */ 536 ctl_arena_stats_smerge(sstats, astats); 537 } else { 538 astats->pactive += arena->nactive; 539 astats->pdirty += arena->ndirty; 540 /* Merge into sum stats as well. */ 541 sstats->pactive += arena->nactive; 542 sstats->pdirty += arena->ndirty; 543 } 544 } 545 546 static bool 547 ctl_grow(void) 548 { 549 ctl_arena_stats_t *astats; 550 arena_t **tarenas; 551 552 /* Allocate extended arena stats and arenas arrays. */ 553 astats = (ctl_arena_stats_t *)imalloc((ctl_stats.narenas + 2) * 554 sizeof(ctl_arena_stats_t)); 555 if (astats == NULL) 556 return (true); 557 tarenas = (arena_t **)imalloc((ctl_stats.narenas + 1) * 558 sizeof(arena_t *)); 559 if (tarenas == NULL) { 560 idalloc(astats); 561 return (true); 562 } 563 564 /* Initialize the new astats element. */ 565 memcpy(astats, ctl_stats.arenas, (ctl_stats.narenas + 1) * 566 sizeof(ctl_arena_stats_t)); 567 memset(&astats[ctl_stats.narenas + 1], 0, sizeof(ctl_arena_stats_t)); 568 if (ctl_arena_init(&astats[ctl_stats.narenas + 1])) { 569 idalloc(tarenas); 570 idalloc(astats); 571 return (true); 572 } 573 /* Swap merged stats to their new location. */ 574 { 575 ctl_arena_stats_t tstats; 576 memcpy(&tstats, &astats[ctl_stats.narenas], 577 sizeof(ctl_arena_stats_t)); 578 memcpy(&astats[ctl_stats.narenas], 579 &astats[ctl_stats.narenas + 1], sizeof(ctl_arena_stats_t)); 580 memcpy(&astats[ctl_stats.narenas + 1], &tstats, 581 sizeof(ctl_arena_stats_t)); 582 } 583 /* Initialize the new arenas element. */ 584 tarenas[ctl_stats.narenas] = NULL; 585 { 586 arena_t **arenas_old = arenas; 587 /* 588 * Swap extended arenas array into place. Although ctl_mtx 589 * protects this function from other threads extending the 590 * array, it does not protect from other threads mutating it 591 * (i.e. initializing arenas and setting array elements to 592 * point to them). Therefore, array copying must happen under 593 * the protection of arenas_lock. 594 */ 595 malloc_mutex_lock(&arenas_lock); 596 arenas = tarenas; 597 memcpy(arenas, arenas_old, ctl_stats.narenas * 598 sizeof(arena_t *)); 599 narenas_total++; 600 arenas_extend(narenas_total - 1); 601 malloc_mutex_unlock(&arenas_lock); 602 /* 603 * Deallocate arenas_old only if it came from imalloc() (not 604 * base_alloc()). 605 */ 606 if (ctl_stats.narenas != narenas_auto) 607 idalloc(arenas_old); 608 } 609 ctl_stats.arenas = astats; 610 ctl_stats.narenas++; 611 612 return (false); 613 } 614 615 static void 616 ctl_refresh(void) 617 { 618 unsigned i; 619 VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas); 620 621 if (config_stats) { 622 malloc_mutex_lock(&chunks_mtx); 623 ctl_stats.chunks.current = stats_chunks.curchunks; 624 ctl_stats.chunks.total = stats_chunks.nchunks; 625 ctl_stats.chunks.high = stats_chunks.highchunks; 626 malloc_mutex_unlock(&chunks_mtx); 627 628 malloc_mutex_lock(&huge_mtx); 629 ctl_stats.huge.allocated = huge_allocated; 630 ctl_stats.huge.nmalloc = huge_nmalloc; 631 ctl_stats.huge.ndalloc = huge_ndalloc; 632 malloc_mutex_unlock(&huge_mtx); 633 } 634 635 /* 636 * Clear sum stats, since they will be merged into by 637 * ctl_arena_refresh(). 638 */ 639 ctl_stats.arenas[ctl_stats.narenas].nthreads = 0; 640 ctl_arena_clear(&ctl_stats.arenas[ctl_stats.narenas]); 641 642 malloc_mutex_lock(&arenas_lock); 643 memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas); 644 for (i = 0; i < ctl_stats.narenas; i++) { 645 if (arenas[i] != NULL) 646 ctl_stats.arenas[i].nthreads = arenas[i]->nthreads; 647 else 648 ctl_stats.arenas[i].nthreads = 0; 649 } 650 malloc_mutex_unlock(&arenas_lock); 651 for (i = 0; i < ctl_stats.narenas; i++) { 652 bool initialized = (tarenas[i] != NULL); 653 654 ctl_stats.arenas[i].initialized = initialized; 655 if (initialized) 656 ctl_arena_refresh(tarenas[i], i); 657 } 658 659 if (config_stats) { 660 ctl_stats.allocated = 661 ctl_stats.arenas[ctl_stats.narenas].allocated_small 662 + ctl_stats.arenas[ctl_stats.narenas].astats.allocated_large 663 + ctl_stats.huge.allocated; 664 ctl_stats.active = 665 (ctl_stats.arenas[ctl_stats.narenas].pactive << LG_PAGE) 666 + ctl_stats.huge.allocated; 667 ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk); 668 } 669 670 ctl_epoch++; 671 } 672 673 static bool 674 ctl_init(void) 675 { 676 bool ret; 677 678 malloc_mutex_lock(&ctl_mtx); 679 if (ctl_initialized == false) { 680 /* 681 * Allocate space for one extra arena stats element, which 682 * contains summed stats across all arenas. 683 */ 684 assert(narenas_auto == narenas_total_get()); 685 ctl_stats.narenas = narenas_auto; 686 ctl_stats.arenas = (ctl_arena_stats_t *)base_alloc( 687 (ctl_stats.narenas + 1) * sizeof(ctl_arena_stats_t)); 688 if (ctl_stats.arenas == NULL) { 689 ret = true; 690 goto label_return; 691 } 692 memset(ctl_stats.arenas, 0, (ctl_stats.narenas + 1) * 693 sizeof(ctl_arena_stats_t)); 694 695 /* 696 * Initialize all stats structures, regardless of whether they 697 * ever get used. Lazy initialization would allow errors to 698 * cause inconsistent state to be viewable by the application. 699 */ 700 if (config_stats) { 701 unsigned i; 702 for (i = 0; i <= ctl_stats.narenas; i++) { 703 if (ctl_arena_init(&ctl_stats.arenas[i])) { 704 ret = true; 705 goto label_return; 706 } 707 } 708 } 709 ctl_stats.arenas[ctl_stats.narenas].initialized = true; 710 711 ctl_epoch = 0; 712 ctl_refresh(); 713 ctl_initialized = true; 714 } 715 716 ret = false; 717 label_return: 718 malloc_mutex_unlock(&ctl_mtx); 719 return (ret); 720 } 721 722 static int 723 ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp, 724 size_t *depthp) 725 { 726 int ret; 727 const char *elm, *tdot, *dot; 728 size_t elen, i, j; 729 const ctl_named_node_t *node; 730 731 elm = name; 732 /* Equivalent to strchrnul(). */ 733 dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0'); 734 elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); 735 if (elen == 0) { 736 ret = ENOENT; 737 goto label_return; 738 } 739 node = super_root_node; 740 for (i = 0; i < *depthp; i++) { 741 assert(node); 742 assert(node->nchildren > 0); 743 if (ctl_named_node(node->children) != NULL) { 744 const ctl_named_node_t *pnode = node; 745 746 /* Children are named. */ 747 for (j = 0; j < node->nchildren; j++) { 748 const ctl_named_node_t *child = 749 ctl_named_children(node, j); 750 if (strlen(child->name) == elen && 751 strncmp(elm, child->name, elen) == 0) { 752 node = child; 753 if (nodesp != NULL) 754 nodesp[i] = 755 (const ctl_node_t *)node; 756 mibp[i] = j; 757 break; 758 } 759 } 760 if (node == pnode) { 761 ret = ENOENT; 762 goto label_return; 763 } 764 } else { 765 uintmax_t index; 766 const ctl_indexed_node_t *inode; 767 768 /* Children are indexed. */ 769 index = malloc_strtoumax(elm, NULL, 10); 770 if (index == UINTMAX_MAX || index > SIZE_T_MAX) { 771 ret = ENOENT; 772 goto label_return; 773 } 774 775 inode = ctl_indexed_node(node->children); 776 node = inode->index(mibp, *depthp, (size_t)index); 777 if (node == NULL) { 778 ret = ENOENT; 779 goto label_return; 780 } 781 782 if (nodesp != NULL) 783 nodesp[i] = (const ctl_node_t *)node; 784 mibp[i] = (size_t)index; 785 } 786 787 if (node->ctl != NULL) { 788 /* Terminal node. */ 789 if (*dot != '\0') { 790 /* 791 * The name contains more elements than are 792 * in this path through the tree. 793 */ 794 ret = ENOENT; 795 goto label_return; 796 } 797 /* Complete lookup successful. */ 798 *depthp = i + 1; 799 break; 800 } 801 802 /* Update elm. */ 803 if (*dot == '\0') { 804 /* No more elements. */ 805 ret = ENOENT; 806 goto label_return; 807 } 808 elm = &dot[1]; 809 dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : 810 strchr(elm, '\0'); 811 elen = (size_t)((uintptr_t)dot - (uintptr_t)elm); 812 } 813 814 ret = 0; 815 label_return: 816 return (ret); 817 } 818 819 int 820 ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp, 821 size_t newlen) 822 { 823 int ret; 824 size_t depth; 825 ctl_node_t const *nodes[CTL_MAX_DEPTH]; 826 size_t mib[CTL_MAX_DEPTH]; 827 const ctl_named_node_t *node; 828 829 if (ctl_initialized == false && ctl_init()) { 830 ret = EAGAIN; 831 goto label_return; 832 } 833 834 depth = CTL_MAX_DEPTH; 835 ret = ctl_lookup(name, nodes, mib, &depth); 836 if (ret != 0) 837 goto label_return; 838 839 node = ctl_named_node(nodes[depth-1]); 840 if (node != NULL && node->ctl) 841 ret = node->ctl(mib, depth, oldp, oldlenp, newp, newlen); 842 else { 843 /* The name refers to a partial path through the ctl tree. */ 844 ret = ENOENT; 845 } 846 847 label_return: 848 return(ret); 849 } 850 851 int 852 ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp) 853 { 854 int ret; 855 856 if (ctl_initialized == false && ctl_init()) { 857 ret = EAGAIN; 858 goto label_return; 859 } 860 861 ret = ctl_lookup(name, NULL, mibp, miblenp); 862 label_return: 863 return(ret); 864 } 865 866 int 867 ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, 868 void *newp, size_t newlen) 869 { 870 int ret; 871 const ctl_named_node_t *node; 872 size_t i; 873 874 if (ctl_initialized == false && ctl_init()) { 875 ret = EAGAIN; 876 goto label_return; 877 } 878 879 /* Iterate down the tree. */ 880 node = super_root_node; 881 for (i = 0; i < miblen; i++) { 882 assert(node); 883 assert(node->nchildren > 0); 884 if (ctl_named_node(node->children) != NULL) { 885 /* Children are named. */ 886 if (node->nchildren <= mib[i]) { 887 ret = ENOENT; 888 goto label_return; 889 } 890 node = ctl_named_children(node, mib[i]); 891 } else { 892 const ctl_indexed_node_t *inode; 893 894 /* Indexed element. */ 895 inode = ctl_indexed_node(node->children); 896 node = inode->index(mib, miblen, mib[i]); 897 if (node == NULL) { 898 ret = ENOENT; 899 goto label_return; 900 } 901 } 902 } 903 904 /* Call the ctl function. */ 905 if (node && node->ctl) 906 ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen); 907 else { 908 /* Partial MIB. */ 909 ret = ENOENT; 910 } 911 912 label_return: 913 return(ret); 914 } 915 916 bool 917 ctl_boot(void) 918 { 919 920 if (malloc_mutex_init(&ctl_mtx)) 921 return (true); 922 923 ctl_initialized = false; 924 925 return (false); 926 } 927 928 void 929 ctl_prefork(void) 930 { 931 932 malloc_mutex_lock(&ctl_mtx); 933 } 934 935 void 936 ctl_postfork_parent(void) 937 { 938 939 malloc_mutex_postfork_parent(&ctl_mtx); 940 } 941 942 void 943 ctl_postfork_child(void) 944 { 945 946 malloc_mutex_postfork_child(&ctl_mtx); 947 } 948 949 /******************************************************************************/ 950 /* *_ctl() functions. */ 951 952 #define READONLY() do { \ 953 if (newp != NULL || newlen != 0) { \ 954 ret = EPERM; \ 955 goto label_return; \ 956 } \ 957 } while (0) 958 959 #define WRITEONLY() do { \ 960 if (oldp != NULL || oldlenp != NULL) { \ 961 ret = EPERM; \ 962 goto label_return; \ 963 } \ 964 } while (0) 965 966 #define READ(v, t) do { \ 967 if (oldp != NULL && oldlenp != NULL) { \ 968 if (*oldlenp != sizeof(t)) { \ 969 size_t copylen = (sizeof(t) <= *oldlenp) \ 970 ? sizeof(t) : *oldlenp; \ 971 memcpy(oldp, (void *)&(v), copylen); \ 972 ret = EINVAL; \ 973 goto label_return; \ 974 } else \ 975 *(t *)oldp = (v); \ 976 } \ 977 } while (0) 978 979 #define WRITE(v, t) do { \ 980 if (newp != NULL) { \ 981 if (newlen != sizeof(t)) { \ 982 ret = EINVAL; \ 983 goto label_return; \ 984 } \ 985 (v) = *(t *)newp; \ 986 } \ 987 } while (0) 988 989 /* 990 * There's a lot of code duplication in the following macros due to limitations 991 * in how nested cpp macros are expanded. 992 */ 993 #define CTL_RO_CLGEN(c, l, n, v, t) \ 994 static int \ 995 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ 996 void *newp, size_t newlen) \ 997 { \ 998 int ret; \ 999 t oldval; \ 1000 \ 1001 if ((c) == false) \ 1002 return (ENOENT); \ 1003 if (l) \ 1004 malloc_mutex_lock(&ctl_mtx); \ 1005 READONLY(); \ 1006 oldval = (v); \ 1007 READ(oldval, t); \ 1008 \ 1009 ret = 0; \ 1010 label_return: \ 1011 if (l) \ 1012 malloc_mutex_unlock(&ctl_mtx); \ 1013 return (ret); \ 1014 } 1015 1016 #define CTL_RO_CGEN(c, n, v, t) \ 1017 static int \ 1018 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ 1019 void *newp, size_t newlen) \ 1020 { \ 1021 int ret; \ 1022 t oldval; \ 1023 \ 1024 if ((c) == false) \ 1025 return (ENOENT); \ 1026 malloc_mutex_lock(&ctl_mtx); \ 1027 READONLY(); \ 1028 oldval = (v); \ 1029 READ(oldval, t); \ 1030 \ 1031 ret = 0; \ 1032 label_return: \ 1033 malloc_mutex_unlock(&ctl_mtx); \ 1034 return (ret); \ 1035 } 1036 1037 #define CTL_RO_GEN(n, v, t) \ 1038 static int \ 1039 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ 1040 void *newp, size_t newlen) \ 1041 { \ 1042 int ret; \ 1043 t oldval; \ 1044 \ 1045 malloc_mutex_lock(&ctl_mtx); \ 1046 READONLY(); \ 1047 oldval = (v); \ 1048 READ(oldval, t); \ 1049 \ 1050 ret = 0; \ 1051 label_return: \ 1052 malloc_mutex_unlock(&ctl_mtx); \ 1053 return (ret); \ 1054 } 1055 1056 /* 1057 * ctl_mtx is not acquired, under the assumption that no pertinent data will 1058 * mutate during the call. 1059 */ 1060 #define CTL_RO_NL_CGEN(c, n, v, t) \ 1061 static int \ 1062 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ 1063 void *newp, size_t newlen) \ 1064 { \ 1065 int ret; \ 1066 t oldval; \ 1067 \ 1068 if ((c) == false) \ 1069 return (ENOENT); \ 1070 READONLY(); \ 1071 oldval = (v); \ 1072 READ(oldval, t); \ 1073 \ 1074 ret = 0; \ 1075 label_return: \ 1076 return (ret); \ 1077 } 1078 1079 #define CTL_RO_NL_GEN(n, v, t) \ 1080 static int \ 1081 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ 1082 void *newp, size_t newlen) \ 1083 { \ 1084 int ret; \ 1085 t oldval; \ 1086 \ 1087 READONLY(); \ 1088 oldval = (v); \ 1089 READ(oldval, t); \ 1090 \ 1091 ret = 0; \ 1092 label_return: \ 1093 return (ret); \ 1094 } 1095 1096 #define CTL_RO_BOOL_CONFIG_GEN(n) \ 1097 static int \ 1098 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, \ 1099 void *newp, size_t newlen) \ 1100 { \ 1101 int ret; \ 1102 bool oldval; \ 1103 \ 1104 READONLY(); \ 1105 oldval = n; \ 1106 READ(oldval, bool); \ 1107 \ 1108 ret = 0; \ 1109 label_return: \ 1110 return (ret); \ 1111 } 1112 1113 CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *) 1114 1115 static int 1116 epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, 1117 void *newp, size_t newlen) 1118 { 1119 int ret; 1120 UNUSED uint64_t newval; 1121 1122 malloc_mutex_lock(&ctl_mtx); 1123 WRITE(newval, uint64_t); 1124 if (newp != NULL) 1125 ctl_refresh(); 1126 READ(ctl_epoch, uint64_t); 1127 1128 ret = 0; 1129 label_return: 1130 malloc_mutex_unlock(&ctl_mtx); 1131 return (ret); 1132 } 1133 1134 static int 1135 thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp, 1136 size_t *oldlenp, void *newp, size_t newlen) 1137 { 1138 int ret; 1139 bool oldval; 1140 1141 if (config_tcache == false) 1142 return (ENOENT); 1143 1144 oldval = tcache_enabled_get(); 1145 if (newp != NULL) { 1146 if (newlen != sizeof(bool)) { 1147 ret = EINVAL; 1148 goto label_return; 1149 } 1150 tcache_enabled_set(*(bool *)newp); 1151 } 1152 READ(oldval, bool); 1153 1154 ret = 0; 1155 label_return: 1156 return (ret); 1157 } 1158 1159 static int 1160 thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp, 1161 size_t *oldlenp, void *newp, size_t newlen) 1162 { 1163 int ret; 1164 1165 if (config_tcache == false) 1166 return (ENOENT); 1167 1168 READONLY(); 1169 WRITEONLY(); 1170 1171 tcache_flush(); 1172 1173 ret = 0; 1174 label_return: 1175 return (ret); 1176 } 1177 1178 static int 1179 thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, 1180 void *newp, size_t newlen) 1181 { 1182 int ret; 1183 unsigned newind, oldind; 1184 1185 malloc_mutex_lock(&ctl_mtx); 1186 newind = oldind = choose_arena(NULL)->ind; 1187 WRITE(newind, unsigned); 1188 READ(oldind, unsigned); 1189 if (newind != oldind) { 1190 arena_t *arena; 1191 1192 if (newind >= ctl_stats.narenas) { 1193 /* New arena index is out of range. */ 1194 ret = EFAULT; 1195 goto label_return; 1196 } 1197 1198 /* Initialize arena if necessary. */ 1199 malloc_mutex_lock(&arenas_lock); 1200 if ((arena = arenas[newind]) == NULL && (arena = 1201 arenas_extend(newind)) == NULL) { 1202 malloc_mutex_unlock(&arenas_lock); 1203 ret = EAGAIN; 1204 goto label_return; 1205 } 1206 assert(arena == arenas[newind]); 1207 arenas[oldind]->nthreads--; 1208 arenas[newind]->nthreads++; 1209 malloc_mutex_unlock(&arenas_lock); 1210 1211 /* Set new arena association. */ 1212 if (config_tcache) { 1213 tcache_t *tcache; 1214 if ((uintptr_t)(tcache = *tcache_tsd_get()) > 1215 (uintptr_t)TCACHE_STATE_MAX) { 1216 tcache_arena_dissociate(tcache); 1217 tcache_arena_associate(tcache, arena); 1218 } 1219 } 1220 arenas_tsd_set(&arena); 1221 } 1222 1223 ret = 0; 1224 label_return: 1225 malloc_mutex_unlock(&ctl_mtx); 1226 return (ret); 1227 } 1228 1229 CTL_RO_NL_CGEN(config_stats, thread_allocated, 1230 thread_allocated_tsd_get()->allocated, uint64_t) 1231 CTL_RO_NL_CGEN(config_stats, thread_allocatedp, 1232 &thread_allocated_tsd_get()->allocated, uint64_t *) 1233 CTL_RO_NL_CGEN(config_stats, thread_deallocated, 1234 thread_allocated_tsd_get()->deallocated, uint64_t) 1235 CTL_RO_NL_CGEN(config_stats, thread_deallocatedp, 1236 &thread_allocated_tsd_get()->deallocated, uint64_t *) 1237 1238 /******************************************************************************/ 1239 1240 CTL_RO_BOOL_CONFIG_GEN(config_debug) 1241 CTL_RO_BOOL_CONFIG_GEN(config_dss) 1242 CTL_RO_BOOL_CONFIG_GEN(config_fill) 1243 CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock) 1244 CTL_RO_BOOL_CONFIG_GEN(config_mremap) 1245 CTL_RO_BOOL_CONFIG_GEN(config_munmap) 1246 CTL_RO_BOOL_CONFIG_GEN(config_prof) 1247 CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc) 1248 CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind) 1249 CTL_RO_BOOL_CONFIG_GEN(config_stats) 1250 CTL_RO_BOOL_CONFIG_GEN(config_tcache) 1251 CTL_RO_BOOL_CONFIG_GEN(config_tls) 1252 CTL_RO_BOOL_CONFIG_GEN(config_utrace) 1253 CTL_RO_BOOL_CONFIG_GEN(config_valgrind) 1254 CTL_RO_BOOL_CONFIG_GEN(config_xmalloc) 1255 1256 /******************************************************************************/ 1257 1258 CTL_RO_NL_GEN(opt_abort, opt_abort, bool) 1259 CTL_RO_NL_GEN(opt_dss, opt_dss, const char *) 1260 CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t) 1261 CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t) 1262 CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t) 1263 CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool) 1264 CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool) 1265 CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool) 1266 CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t) 1267 CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool) 1268 CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool) 1269 CTL_RO_NL_CGEN(config_valgrind, opt_valgrind, opt_valgrind, bool) 1270 CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool) 1271 CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool) 1272 CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t) 1273 CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool) 1274 CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *) 1275 CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */ 1276 CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t) 1277 CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t) 1278 CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool) 1279 CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool) 1280 CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool) 1281 CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool) 1282 1283 /******************************************************************************/ 1284 1285 /* ctl_mutex must be held during execution of this function. */ 1286 static void 1287 arena_purge(unsigned arena_ind) 1288 { 1289 VARIABLE_ARRAY(arena_t *, tarenas, ctl_stats.narenas); 1290 1291 malloc_mutex_lock(&arenas_lock); 1292 memcpy(tarenas, arenas, sizeof(arena_t *) * ctl_stats.narenas); 1293 malloc_mutex_unlock(&arenas_lock); 1294 1295 if (arena_ind == ctl_stats.narenas) { 1296 unsigned i; 1297 for (i = 0; i < ctl_stats.narenas; i++) { 1298 if (tarenas[i] != NULL) 1299 arena_purge_all(tarenas[i]); 1300 } 1301 } else { 1302 assert(arena_ind < ctl_stats.narenas); 1303 if (tarenas[arena_ind] != NULL) 1304 arena_purge_all(tarenas[arena_ind]); 1305 } 1306 } 1307 1308 static int 1309 arena_i_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, 1310 void *newp, size_t newlen) 1311 { 1312 int ret; 1313 1314 READONLY(); 1315 WRITEONLY(); 1316 malloc_mutex_lock(&ctl_mtx); 1317 arena_purge(mib[1]); 1318 malloc_mutex_unlock(&ctl_mtx); 1319 1320 ret = 0; 1321 label_return: 1322 return (ret); 1323 } 1324 1325 static int 1326 arena_i_dss_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, 1327 void *newp, size_t newlen) 1328 { 1329 int ret, i; 1330 bool match, err; 1331 const char *dss; 1332 unsigned arena_ind = mib[1]; 1333 dss_prec_t dss_prec_old = dss_prec_limit; 1334 dss_prec_t dss_prec = dss_prec_limit; 1335 1336 malloc_mutex_lock(&ctl_mtx); 1337 WRITE(dss, const char *); 1338 match = false; 1339 for (i = 0; i < dss_prec_limit; i++) { 1340 if (strcmp(dss_prec_names[i], dss) == 0) { 1341 dss_prec = i; 1342 match = true; 1343 break; 1344 } 1345 } 1346 if (match == false) { 1347 ret = EINVAL; 1348 goto label_return; 1349 } 1350 1351 if (arena_ind < ctl_stats.narenas) { 1352 arena_t *arena = arenas[arena_ind]; 1353 if (arena != NULL) { 1354 dss_prec_old = arena_dss_prec_get(arena); 1355 arena_dss_prec_set(arena, dss_prec); 1356 err = false; 1357 } else 1358 err = true; 1359 } else { 1360 dss_prec_old = chunk_dss_prec_get(); 1361 err = chunk_dss_prec_set(dss_prec); 1362 } 1363 dss = dss_prec_names[dss_prec_old]; 1364 READ(dss, const char *); 1365 if (err) { 1366 ret = EFAULT; 1367 goto label_return; 1368 } 1369 1370 ret = 0; 1371 label_return: 1372 malloc_mutex_unlock(&ctl_mtx); 1373 return (ret); 1374 } 1375 1376 static const ctl_named_node_t * 1377 arena_i_index(const size_t *mib, size_t miblen, size_t i) 1378 { 1379 const ctl_named_node_t * ret; 1380 1381 malloc_mutex_lock(&ctl_mtx); 1382 if (i > ctl_stats.narenas) { 1383 ret = NULL; 1384 goto label_return; 1385 } 1386 1387 ret = super_arena_i_node; 1388 label_return: 1389 malloc_mutex_unlock(&ctl_mtx); 1390 return (ret); 1391 } 1392 1393 1394 /******************************************************************************/ 1395 1396 CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t) 1397 CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t) 1398 CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t) 1399 static const ctl_named_node_t * 1400 arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i) 1401 { 1402 1403 if (i > NBINS) 1404 return (NULL); 1405 return (super_arenas_bin_i_node); 1406 } 1407 1408 CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << LG_PAGE), size_t) 1409 static const ctl_named_node_t * 1410 arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i) 1411 { 1412 1413 if (i > nlclasses) 1414 return (NULL); 1415 return (super_arenas_lrun_i_node); 1416 } 1417 1418 static int 1419 arenas_narenas_ctl(const size_t *mib, size_t miblen, void *oldp, 1420 size_t *oldlenp, void *newp, size_t newlen) 1421 { 1422 int ret; 1423 unsigned narenas; 1424 1425 malloc_mutex_lock(&ctl_mtx); 1426 READONLY(); 1427 if (*oldlenp != sizeof(unsigned)) { 1428 ret = EINVAL; 1429 goto label_return; 1430 } 1431 narenas = ctl_stats.narenas; 1432 READ(narenas, unsigned); 1433 1434 ret = 0; 1435 label_return: 1436 malloc_mutex_unlock(&ctl_mtx); 1437 return (ret); 1438 } 1439 1440 static int 1441 arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp, 1442 size_t *oldlenp, void *newp, size_t newlen) 1443 { 1444 int ret; 1445 unsigned nread, i; 1446 1447 malloc_mutex_lock(&ctl_mtx); 1448 READONLY(); 1449 if (*oldlenp != ctl_stats.narenas * sizeof(bool)) { 1450 ret = EINVAL; 1451 nread = (*oldlenp < ctl_stats.narenas * sizeof(bool)) 1452 ? (*oldlenp / sizeof(bool)) : ctl_stats.narenas; 1453 } else { 1454 ret = 0; 1455 nread = ctl_stats.narenas; 1456 } 1457 1458 for (i = 0; i < nread; i++) 1459 ((bool *)oldp)[i] = ctl_stats.arenas[i].initialized; 1460 1461 label_return: 1462 malloc_mutex_unlock(&ctl_mtx); 1463 return (ret); 1464 } 1465 1466 CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t) 1467 CTL_RO_NL_GEN(arenas_page, PAGE, size_t) 1468 CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t) 1469 CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned) 1470 CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned) 1471 CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t) 1472 1473 static int 1474 arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, 1475 void *newp, size_t newlen) 1476 { 1477 int ret; 1478 unsigned arena_ind; 1479 1480 malloc_mutex_lock(&ctl_mtx); 1481 WRITEONLY(); 1482 arena_ind = UINT_MAX; 1483 WRITE(arena_ind, unsigned); 1484 if (newp != NULL && arena_ind >= ctl_stats.narenas) 1485 ret = EFAULT; 1486 else { 1487 if (arena_ind == UINT_MAX) 1488 arena_ind = ctl_stats.narenas; 1489 arena_purge(arena_ind); 1490 ret = 0; 1491 } 1492 1493 label_return: 1494 malloc_mutex_unlock(&ctl_mtx); 1495 return (ret); 1496 } 1497 1498 static int 1499 arenas_extend_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, 1500 void *newp, size_t newlen) 1501 { 1502 int ret; 1503 unsigned narenas; 1504 1505 malloc_mutex_lock(&ctl_mtx); 1506 READONLY(); 1507 if (ctl_grow()) { 1508 ret = EAGAIN; 1509 goto label_return; 1510 } 1511 narenas = ctl_stats.narenas - 1; 1512 READ(narenas, unsigned); 1513 1514 ret = 0; 1515 label_return: 1516 malloc_mutex_unlock(&ctl_mtx); 1517 return (ret); 1518 } 1519 1520 /******************************************************************************/ 1521 1522 static int 1523 prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, 1524 void *newp, size_t newlen) 1525 { 1526 int ret; 1527 bool oldval; 1528 1529 if (config_prof == false) 1530 return (ENOENT); 1531 1532 malloc_mutex_lock(&ctl_mtx); /* Protect opt_prof_active. */ 1533 oldval = opt_prof_active; 1534 if (newp != NULL) { 1535 /* 1536 * The memory barriers will tend to make opt_prof_active 1537 * propagate faster on systems with weak memory ordering. 1538 */ 1539 mb_write(); 1540 WRITE(opt_prof_active, bool); 1541 mb_write(); 1542 } 1543 READ(oldval, bool); 1544 1545 ret = 0; 1546 label_return: 1547 malloc_mutex_unlock(&ctl_mtx); 1548 return (ret); 1549 } 1550 1551 static int 1552 prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, 1553 void *newp, size_t newlen) 1554 { 1555 int ret; 1556 const char *filename = NULL; 1557 1558 if (config_prof == false) 1559 return (ENOENT); 1560 1561 WRITEONLY(); 1562 WRITE(filename, const char *); 1563 1564 if (prof_mdump(filename)) { 1565 ret = EFAULT; 1566 goto label_return; 1567 } 1568 1569 ret = 0; 1570 label_return: 1571 return (ret); 1572 } 1573 1574 CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t) 1575 1576 /******************************************************************************/ 1577 1578 CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current, 1579 size_t) 1580 CTL_RO_CGEN(config_stats, stats_chunks_total, ctl_stats.chunks.total, uint64_t) 1581 CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t) 1582 CTL_RO_CGEN(config_stats, stats_huge_allocated, huge_allocated, size_t) 1583 CTL_RO_CGEN(config_stats, stats_huge_nmalloc, huge_nmalloc, uint64_t) 1584 CTL_RO_CGEN(config_stats, stats_huge_ndalloc, huge_ndalloc, uint64_t) 1585 CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated, 1586 ctl_stats.arenas[mib[2]].allocated_small, size_t) 1587 CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc, 1588 ctl_stats.arenas[mib[2]].nmalloc_small, uint64_t) 1589 CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc, 1590 ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t) 1591 CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests, 1592 ctl_stats.arenas[mib[2]].nrequests_small, uint64_t) 1593 CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated, 1594 ctl_stats.arenas[mib[2]].astats.allocated_large, size_t) 1595 CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc, 1596 ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t) 1597 CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc, 1598 ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t) 1599 CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests, 1600 ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t) 1601 1602 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_allocated, 1603 ctl_stats.arenas[mib[2]].bstats[mib[4]].allocated, size_t) 1604 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc, 1605 ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t) 1606 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc, 1607 ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t) 1608 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests, 1609 ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t) 1610 CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills, 1611 ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t) 1612 CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes, 1613 ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t) 1614 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nruns, 1615 ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t) 1616 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns, 1617 ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t) 1618 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns, 1619 ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t) 1620 1621 static const ctl_named_node_t * 1622 stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j) 1623 { 1624 1625 if (j > NBINS) 1626 return (NULL); 1627 return (super_stats_arenas_i_bins_j_node); 1628 } 1629 1630 CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc, 1631 ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t) 1632 CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc, 1633 ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t) 1634 CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests, 1635 ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t) 1636 CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns, 1637 ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t) 1638 1639 static const ctl_named_node_t * 1640 stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j) 1641 { 1642 1643 if (j > nlclasses) 1644 return (NULL); 1645 return (super_stats_arenas_i_lruns_j_node); 1646 } 1647 1648 CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned) 1649 CTL_RO_GEN(stats_arenas_i_dss, ctl_stats.arenas[mib[2]].dss, const char *) 1650 CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t) 1651 CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t) 1652 CTL_RO_CGEN(config_stats, stats_arenas_i_mapped, 1653 ctl_stats.arenas[mib[2]].astats.mapped, size_t) 1654 CTL_RO_CGEN(config_stats, stats_arenas_i_npurge, 1655 ctl_stats.arenas[mib[2]].astats.npurge, uint64_t) 1656 CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise, 1657 ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t) 1658 CTL_RO_CGEN(config_stats, stats_arenas_i_purged, 1659 ctl_stats.arenas[mib[2]].astats.purged, uint64_t) 1660 1661 static const ctl_named_node_t * 1662 stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i) 1663 { 1664 const ctl_named_node_t * ret; 1665 1666 malloc_mutex_lock(&ctl_mtx); 1667 if (i > ctl_stats.narenas || ctl_stats.arenas[i].initialized == false) { 1668 ret = NULL; 1669 goto label_return; 1670 } 1671 1672 ret = super_stats_arenas_i_node; 1673 label_return: 1674 malloc_mutex_unlock(&ctl_mtx); 1675 return (ret); 1676 } 1677 1678 CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *) 1679 CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t) 1680 CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t) 1681 CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t) 1682