xref: /freebsd/contrib/jemalloc/src/ctl.c (revision a10cee30c94cf5944826d2a495e9cdf339dfbcc8)
1 #define	JEMALLOC_CTL_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 
4 /******************************************************************************/
5 /* Data. */
6 
7 /*
8  * ctl_mtx protects the following:
9  * - ctl_stats.*
10  * - opt_prof_active
11  */
12 static malloc_mutex_t	ctl_mtx;
13 static bool		ctl_initialized;
14 static uint64_t		ctl_epoch;
15 static ctl_stats_t	ctl_stats;
16 
17 /******************************************************************************/
18 /* Helpers for named and indexed nodes. */
19 
20 static inline const ctl_named_node_t *
21 ctl_named_node(const ctl_node_t *node)
22 {
23 
24 	return ((node->named) ? (const ctl_named_node_t *)node : NULL);
25 }
26 
27 static inline const ctl_named_node_t *
28 ctl_named_children(const ctl_named_node_t *node, int index)
29 {
30 	const ctl_named_node_t *children = ctl_named_node(node->children);
31 
32 	return (children ? &children[index] : NULL);
33 }
34 
35 static inline const ctl_indexed_node_t *
36 ctl_indexed_node(const ctl_node_t *node)
37 {
38 
39 	return ((node->named == false) ? (const ctl_indexed_node_t *)node :
40 	    NULL);
41 }
42 
43 /******************************************************************************/
44 /* Function prototypes for non-inline static functions. */
45 
46 #define	CTL_PROTO(n)							\
47 static int	n##_ctl(const size_t *mib, size_t miblen, void *oldp,	\
48     size_t *oldlenp, void *newp, size_t newlen);
49 
50 #define	INDEX_PROTO(n)							\
51 const ctl_named_node_t	*n##_index(const size_t *mib, size_t miblen,	\
52     size_t i);
53 
54 static bool	ctl_arena_init(ctl_arena_stats_t *astats);
55 static void	ctl_arena_clear(ctl_arena_stats_t *astats);
56 static void	ctl_arena_stats_amerge(ctl_arena_stats_t *cstats,
57     arena_t *arena);
58 static void	ctl_arena_stats_smerge(ctl_arena_stats_t *sstats,
59     ctl_arena_stats_t *astats);
60 static void	ctl_arena_refresh(arena_t *arena, unsigned i);
61 static void	ctl_refresh(void);
62 static bool	ctl_init(void);
63 static int	ctl_lookup(const char *name, ctl_node_t const **nodesp,
64     size_t *mibp, size_t *depthp);
65 
66 CTL_PROTO(version)
67 CTL_PROTO(epoch)
68 CTL_PROTO(thread_tcache_enabled)
69 CTL_PROTO(thread_tcache_flush)
70 CTL_PROTO(thread_arena)
71 CTL_PROTO(thread_allocated)
72 CTL_PROTO(thread_allocatedp)
73 CTL_PROTO(thread_deallocated)
74 CTL_PROTO(thread_deallocatedp)
75 CTL_PROTO(config_debug)
76 CTL_PROTO(config_dss)
77 CTL_PROTO(config_fill)
78 CTL_PROTO(config_lazy_lock)
79 CTL_PROTO(config_mremap)
80 CTL_PROTO(config_munmap)
81 CTL_PROTO(config_prof)
82 CTL_PROTO(config_prof_libgcc)
83 CTL_PROTO(config_prof_libunwind)
84 CTL_PROTO(config_stats)
85 CTL_PROTO(config_tcache)
86 CTL_PROTO(config_tls)
87 CTL_PROTO(config_utrace)
88 CTL_PROTO(config_valgrind)
89 CTL_PROTO(config_xmalloc)
90 CTL_PROTO(opt_abort)
91 CTL_PROTO(opt_lg_chunk)
92 CTL_PROTO(opt_narenas)
93 CTL_PROTO(opt_lg_dirty_mult)
94 CTL_PROTO(opt_stats_print)
95 CTL_PROTO(opt_junk)
96 CTL_PROTO(opt_zero)
97 CTL_PROTO(opt_quarantine)
98 CTL_PROTO(opt_redzone)
99 CTL_PROTO(opt_utrace)
100 CTL_PROTO(opt_valgrind)
101 CTL_PROTO(opt_xmalloc)
102 CTL_PROTO(opt_tcache)
103 CTL_PROTO(opt_lg_tcache_max)
104 CTL_PROTO(opt_prof)
105 CTL_PROTO(opt_prof_prefix)
106 CTL_PROTO(opt_prof_active)
107 CTL_PROTO(opt_lg_prof_sample)
108 CTL_PROTO(opt_lg_prof_interval)
109 CTL_PROTO(opt_prof_gdump)
110 CTL_PROTO(opt_prof_final)
111 CTL_PROTO(opt_prof_leak)
112 CTL_PROTO(opt_prof_accum)
113 CTL_PROTO(arenas_bin_i_size)
114 CTL_PROTO(arenas_bin_i_nregs)
115 CTL_PROTO(arenas_bin_i_run_size)
116 INDEX_PROTO(arenas_bin_i)
117 CTL_PROTO(arenas_lrun_i_size)
118 INDEX_PROTO(arenas_lrun_i)
119 CTL_PROTO(arenas_narenas)
120 CTL_PROTO(arenas_initialized)
121 CTL_PROTO(arenas_quantum)
122 CTL_PROTO(arenas_page)
123 CTL_PROTO(arenas_tcache_max)
124 CTL_PROTO(arenas_nbins)
125 CTL_PROTO(arenas_nhbins)
126 CTL_PROTO(arenas_nlruns)
127 CTL_PROTO(arenas_purge)
128 CTL_PROTO(prof_active)
129 CTL_PROTO(prof_dump)
130 CTL_PROTO(prof_interval)
131 CTL_PROTO(stats_chunks_current)
132 CTL_PROTO(stats_chunks_total)
133 CTL_PROTO(stats_chunks_high)
134 CTL_PROTO(stats_huge_allocated)
135 CTL_PROTO(stats_huge_nmalloc)
136 CTL_PROTO(stats_huge_ndalloc)
137 CTL_PROTO(stats_arenas_i_small_allocated)
138 CTL_PROTO(stats_arenas_i_small_nmalloc)
139 CTL_PROTO(stats_arenas_i_small_ndalloc)
140 CTL_PROTO(stats_arenas_i_small_nrequests)
141 CTL_PROTO(stats_arenas_i_large_allocated)
142 CTL_PROTO(stats_arenas_i_large_nmalloc)
143 CTL_PROTO(stats_arenas_i_large_ndalloc)
144 CTL_PROTO(stats_arenas_i_large_nrequests)
145 CTL_PROTO(stats_arenas_i_bins_j_allocated)
146 CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
147 CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
148 CTL_PROTO(stats_arenas_i_bins_j_nrequests)
149 CTL_PROTO(stats_arenas_i_bins_j_nfills)
150 CTL_PROTO(stats_arenas_i_bins_j_nflushes)
151 CTL_PROTO(stats_arenas_i_bins_j_nruns)
152 CTL_PROTO(stats_arenas_i_bins_j_nreruns)
153 CTL_PROTO(stats_arenas_i_bins_j_curruns)
154 INDEX_PROTO(stats_arenas_i_bins_j)
155 CTL_PROTO(stats_arenas_i_lruns_j_nmalloc)
156 CTL_PROTO(stats_arenas_i_lruns_j_ndalloc)
157 CTL_PROTO(stats_arenas_i_lruns_j_nrequests)
158 CTL_PROTO(stats_arenas_i_lruns_j_curruns)
159 INDEX_PROTO(stats_arenas_i_lruns_j)
160 CTL_PROTO(stats_arenas_i_nthreads)
161 CTL_PROTO(stats_arenas_i_pactive)
162 CTL_PROTO(stats_arenas_i_pdirty)
163 CTL_PROTO(stats_arenas_i_mapped)
164 CTL_PROTO(stats_arenas_i_npurge)
165 CTL_PROTO(stats_arenas_i_nmadvise)
166 CTL_PROTO(stats_arenas_i_purged)
167 INDEX_PROTO(stats_arenas_i)
168 CTL_PROTO(stats_cactive)
169 CTL_PROTO(stats_allocated)
170 CTL_PROTO(stats_active)
171 CTL_PROTO(stats_mapped)
172 
173 /******************************************************************************/
174 /* mallctl tree. */
175 
176 /* Maximum tree depth. */
177 #define	CTL_MAX_DEPTH	6
178 
179 #define	NAME(n)	{true},	n
180 #define	CHILD(t, c)							\
181 	sizeof(c##_node) / sizeof(ctl_##t##_node_t),			\
182 	(ctl_node_t *)c##_node,						\
183 	NULL
184 #define	CTL(c)	0, NULL, c##_ctl
185 
186 /*
187  * Only handles internal indexed nodes, since there are currently no external
188  * ones.
189  */
190 #define	INDEX(i)	{false},	i##_index
191 
192 static const ctl_named_node_t	tcache_node[] = {
193 	{NAME("enabled"),	CTL(thread_tcache_enabled)},
194 	{NAME("flush"),		CTL(thread_tcache_flush)}
195 };
196 
197 static const ctl_named_node_t	thread_node[] = {
198 	{NAME("arena"),		CTL(thread_arena)},
199 	{NAME("allocated"),	CTL(thread_allocated)},
200 	{NAME("allocatedp"),	CTL(thread_allocatedp)},
201 	{NAME("deallocated"),	CTL(thread_deallocated)},
202 	{NAME("deallocatedp"),	CTL(thread_deallocatedp)},
203 	{NAME("tcache"),	CHILD(named, tcache)}
204 };
205 
206 static const ctl_named_node_t	config_node[] = {
207 	{NAME("debug"),			CTL(config_debug)},
208 	{NAME("dss"),			CTL(config_dss)},
209 	{NAME("fill"),			CTL(config_fill)},
210 	{NAME("lazy_lock"),		CTL(config_lazy_lock)},
211 	{NAME("mremap"),		CTL(config_mremap)},
212 	{NAME("munmap"),		CTL(config_munmap)},
213 	{NAME("prof"),			CTL(config_prof)},
214 	{NAME("prof_libgcc"),		CTL(config_prof_libgcc)},
215 	{NAME("prof_libunwind"),	CTL(config_prof_libunwind)},
216 	{NAME("stats"),			CTL(config_stats)},
217 	{NAME("tcache"),		CTL(config_tcache)},
218 	{NAME("tls"),			CTL(config_tls)},
219 	{NAME("utrace"),		CTL(config_utrace)},
220 	{NAME("valgrind"),		CTL(config_valgrind)},
221 	{NAME("xmalloc"),		CTL(config_xmalloc)}
222 };
223 
224 static const ctl_named_node_t opt_node[] = {
225 	{NAME("abort"),			CTL(opt_abort)},
226 	{NAME("lg_chunk"),		CTL(opt_lg_chunk)},
227 	{NAME("narenas"),		CTL(opt_narenas)},
228 	{NAME("lg_dirty_mult"),		CTL(opt_lg_dirty_mult)},
229 	{NAME("stats_print"),		CTL(opt_stats_print)},
230 	{NAME("junk"),			CTL(opt_junk)},
231 	{NAME("zero"),			CTL(opt_zero)},
232 	{NAME("quarantine"),		CTL(opt_quarantine)},
233 	{NAME("redzone"),		CTL(opt_redzone)},
234 	{NAME("utrace"),		CTL(opt_utrace)},
235 	{NAME("valgrind"),		CTL(opt_valgrind)},
236 	{NAME("xmalloc"),		CTL(opt_xmalloc)},
237 	{NAME("tcache"),		CTL(opt_tcache)},
238 	{NAME("lg_tcache_max"),		CTL(opt_lg_tcache_max)},
239 	{NAME("prof"),			CTL(opt_prof)},
240 	{NAME("prof_prefix"),		CTL(opt_prof_prefix)},
241 	{NAME("prof_active"),		CTL(opt_prof_active)},
242 	{NAME("lg_prof_sample"),	CTL(opt_lg_prof_sample)},
243 	{NAME("lg_prof_interval"),	CTL(opt_lg_prof_interval)},
244 	{NAME("prof_gdump"),		CTL(opt_prof_gdump)},
245 	{NAME("prof_final"),		CTL(opt_prof_final)},
246 	{NAME("prof_leak"),		CTL(opt_prof_leak)},
247 	{NAME("prof_accum"),		CTL(opt_prof_accum)}
248 };
249 
250 static const ctl_named_node_t arenas_bin_i_node[] = {
251 	{NAME("size"),			CTL(arenas_bin_i_size)},
252 	{NAME("nregs"),			CTL(arenas_bin_i_nregs)},
253 	{NAME("run_size"),		CTL(arenas_bin_i_run_size)}
254 };
255 static const ctl_named_node_t super_arenas_bin_i_node[] = {
256 	{NAME(""),			CHILD(named, arenas_bin_i)}
257 };
258 
259 static const ctl_indexed_node_t arenas_bin_node[] = {
260 	{INDEX(arenas_bin_i)}
261 };
262 
263 static const ctl_named_node_t arenas_lrun_i_node[] = {
264 	{NAME("size"),			CTL(arenas_lrun_i_size)}
265 };
266 static const ctl_named_node_t super_arenas_lrun_i_node[] = {
267 	{NAME(""),			CHILD(named, arenas_lrun_i)}
268 };
269 
270 static const ctl_indexed_node_t arenas_lrun_node[] = {
271 	{INDEX(arenas_lrun_i)}
272 };
273 
274 static const ctl_named_node_t arenas_node[] = {
275 	{NAME("narenas"),		CTL(arenas_narenas)},
276 	{NAME("initialized"),		CTL(arenas_initialized)},
277 	{NAME("quantum"),		CTL(arenas_quantum)},
278 	{NAME("page"),			CTL(arenas_page)},
279 	{NAME("tcache_max"),		CTL(arenas_tcache_max)},
280 	{NAME("nbins"),			CTL(arenas_nbins)},
281 	{NAME("nhbins"),		CTL(arenas_nhbins)},
282 	{NAME("bin"),			CHILD(indexed, arenas_bin)},
283 	{NAME("nlruns"),		CTL(arenas_nlruns)},
284 	{NAME("lrun"),			CHILD(indexed, arenas_lrun)},
285 	{NAME("purge"),			CTL(arenas_purge)}
286 };
287 
288 static const ctl_named_node_t	prof_node[] = {
289 	{NAME("active"),	CTL(prof_active)},
290 	{NAME("dump"),		CTL(prof_dump)},
291 	{NAME("interval"),	CTL(prof_interval)}
292 };
293 
294 static const ctl_named_node_t stats_chunks_node[] = {
295 	{NAME("current"),		CTL(stats_chunks_current)},
296 	{NAME("total"),			CTL(stats_chunks_total)},
297 	{NAME("high"),			CTL(stats_chunks_high)}
298 };
299 
300 static const ctl_named_node_t stats_huge_node[] = {
301 	{NAME("allocated"),		CTL(stats_huge_allocated)},
302 	{NAME("nmalloc"),		CTL(stats_huge_nmalloc)},
303 	{NAME("ndalloc"),		CTL(stats_huge_ndalloc)}
304 };
305 
306 static const ctl_named_node_t stats_arenas_i_small_node[] = {
307 	{NAME("allocated"),		CTL(stats_arenas_i_small_allocated)},
308 	{NAME("nmalloc"),		CTL(stats_arenas_i_small_nmalloc)},
309 	{NAME("ndalloc"),		CTL(stats_arenas_i_small_ndalloc)},
310 	{NAME("nrequests"),		CTL(stats_arenas_i_small_nrequests)}
311 };
312 
313 static const ctl_named_node_t stats_arenas_i_large_node[] = {
314 	{NAME("allocated"),		CTL(stats_arenas_i_large_allocated)},
315 	{NAME("nmalloc"),		CTL(stats_arenas_i_large_nmalloc)},
316 	{NAME("ndalloc"),		CTL(stats_arenas_i_large_ndalloc)},
317 	{NAME("nrequests"),		CTL(stats_arenas_i_large_nrequests)}
318 };
319 
320 static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
321 	{NAME("allocated"),		CTL(stats_arenas_i_bins_j_allocated)},
322 	{NAME("nmalloc"),		CTL(stats_arenas_i_bins_j_nmalloc)},
323 	{NAME("ndalloc"),		CTL(stats_arenas_i_bins_j_ndalloc)},
324 	{NAME("nrequests"),		CTL(stats_arenas_i_bins_j_nrequests)},
325 	{NAME("nfills"),		CTL(stats_arenas_i_bins_j_nfills)},
326 	{NAME("nflushes"),		CTL(stats_arenas_i_bins_j_nflushes)},
327 	{NAME("nruns"),			CTL(stats_arenas_i_bins_j_nruns)},
328 	{NAME("nreruns"),		CTL(stats_arenas_i_bins_j_nreruns)},
329 	{NAME("curruns"),		CTL(stats_arenas_i_bins_j_curruns)}
330 };
331 static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = {
332 	{NAME(""),			CHILD(named, stats_arenas_i_bins_j)}
333 };
334 
335 static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
336 	{INDEX(stats_arenas_i_bins_j)}
337 };
338 
339 static const ctl_named_node_t stats_arenas_i_lruns_j_node[] = {
340 	{NAME("nmalloc"),		CTL(stats_arenas_i_lruns_j_nmalloc)},
341 	{NAME("ndalloc"),		CTL(stats_arenas_i_lruns_j_ndalloc)},
342 	{NAME("nrequests"),		CTL(stats_arenas_i_lruns_j_nrequests)},
343 	{NAME("curruns"),		CTL(stats_arenas_i_lruns_j_curruns)}
344 };
345 static const ctl_named_node_t super_stats_arenas_i_lruns_j_node[] = {
346 	{NAME(""),			CHILD(named, stats_arenas_i_lruns_j)}
347 };
348 
349 static const ctl_indexed_node_t stats_arenas_i_lruns_node[] = {
350 	{INDEX(stats_arenas_i_lruns_j)}
351 };
352 
353 static const ctl_named_node_t stats_arenas_i_node[] = {
354 	{NAME("nthreads"),		CTL(stats_arenas_i_nthreads)},
355 	{NAME("pactive"),		CTL(stats_arenas_i_pactive)},
356 	{NAME("pdirty"),		CTL(stats_arenas_i_pdirty)},
357 	{NAME("mapped"),		CTL(stats_arenas_i_mapped)},
358 	{NAME("npurge"),		CTL(stats_arenas_i_npurge)},
359 	{NAME("nmadvise"),		CTL(stats_arenas_i_nmadvise)},
360 	{NAME("purged"),		CTL(stats_arenas_i_purged)},
361 	{NAME("small"),			CHILD(named, stats_arenas_i_small)},
362 	{NAME("large"),			CHILD(named, stats_arenas_i_large)},
363 	{NAME("bins"),			CHILD(indexed, stats_arenas_i_bins)},
364 	{NAME("lruns"),			CHILD(indexed, stats_arenas_i_lruns)}
365 };
366 static const ctl_named_node_t super_stats_arenas_i_node[] = {
367 	{NAME(""),			CHILD(named, stats_arenas_i)}
368 };
369 
370 static const ctl_indexed_node_t stats_arenas_node[] = {
371 	{INDEX(stats_arenas_i)}
372 };
373 
374 static const ctl_named_node_t stats_node[] = {
375 	{NAME("cactive"),		CTL(stats_cactive)},
376 	{NAME("allocated"),		CTL(stats_allocated)},
377 	{NAME("active"),		CTL(stats_active)},
378 	{NAME("mapped"),		CTL(stats_mapped)},
379 	{NAME("chunks"),		CHILD(named, stats_chunks)},
380 	{NAME("huge"),			CHILD(named, stats_huge)},
381 	{NAME("arenas"),		CHILD(indexed, stats_arenas)}
382 };
383 
384 static const ctl_named_node_t	root_node[] = {
385 	{NAME("version"),	CTL(version)},
386 	{NAME("epoch"),		CTL(epoch)},
387 	{NAME("thread"),	CHILD(named, thread)},
388 	{NAME("config"),	CHILD(named, config)},
389 	{NAME("opt"),		CHILD(named, opt)},
390 	{NAME("arenas"),	CHILD(named, arenas)},
391 	{NAME("prof"),		CHILD(named, prof)},
392 	{NAME("stats"),		CHILD(named, stats)}
393 };
394 static const ctl_named_node_t super_root_node[] = {
395 	{NAME(""),		CHILD(named, root)}
396 };
397 
398 #undef NAME
399 #undef CHILD
400 #undef CTL
401 #undef INDEX
402 
403 /******************************************************************************/
404 
405 static bool
406 ctl_arena_init(ctl_arena_stats_t *astats)
407 {
408 
409 	if (astats->lstats == NULL) {
410 		astats->lstats = (malloc_large_stats_t *)base_alloc(nlclasses *
411 		    sizeof(malloc_large_stats_t));
412 		if (astats->lstats == NULL)
413 			return (true);
414 	}
415 
416 	return (false);
417 }
418 
419 static void
420 ctl_arena_clear(ctl_arena_stats_t *astats)
421 {
422 
423 	astats->pactive = 0;
424 	astats->pdirty = 0;
425 	if (config_stats) {
426 		memset(&astats->astats, 0, sizeof(arena_stats_t));
427 		astats->allocated_small = 0;
428 		astats->nmalloc_small = 0;
429 		astats->ndalloc_small = 0;
430 		astats->nrequests_small = 0;
431 		memset(astats->bstats, 0, NBINS * sizeof(malloc_bin_stats_t));
432 		memset(astats->lstats, 0, nlclasses *
433 		    sizeof(malloc_large_stats_t));
434 	}
435 }
436 
437 static void
438 ctl_arena_stats_amerge(ctl_arena_stats_t *cstats, arena_t *arena)
439 {
440 	unsigned i;
441 
442 	arena_stats_merge(arena, &cstats->pactive, &cstats->pdirty,
443 	    &cstats->astats, cstats->bstats, cstats->lstats);
444 
445 	for (i = 0; i < NBINS; i++) {
446 		cstats->allocated_small += cstats->bstats[i].allocated;
447 		cstats->nmalloc_small += cstats->bstats[i].nmalloc;
448 		cstats->ndalloc_small += cstats->bstats[i].ndalloc;
449 		cstats->nrequests_small += cstats->bstats[i].nrequests;
450 	}
451 }
452 
453 static void
454 ctl_arena_stats_smerge(ctl_arena_stats_t *sstats, ctl_arena_stats_t *astats)
455 {
456 	unsigned i;
457 
458 	sstats->pactive += astats->pactive;
459 	sstats->pdirty += astats->pdirty;
460 
461 	sstats->astats.mapped += astats->astats.mapped;
462 	sstats->astats.npurge += astats->astats.npurge;
463 	sstats->astats.nmadvise += astats->astats.nmadvise;
464 	sstats->astats.purged += astats->astats.purged;
465 
466 	sstats->allocated_small += astats->allocated_small;
467 	sstats->nmalloc_small += astats->nmalloc_small;
468 	sstats->ndalloc_small += astats->ndalloc_small;
469 	sstats->nrequests_small += astats->nrequests_small;
470 
471 	sstats->astats.allocated_large += astats->astats.allocated_large;
472 	sstats->astats.nmalloc_large += astats->astats.nmalloc_large;
473 	sstats->astats.ndalloc_large += astats->astats.ndalloc_large;
474 	sstats->astats.nrequests_large += astats->astats.nrequests_large;
475 
476 	for (i = 0; i < nlclasses; i++) {
477 		sstats->lstats[i].nmalloc += astats->lstats[i].nmalloc;
478 		sstats->lstats[i].ndalloc += astats->lstats[i].ndalloc;
479 		sstats->lstats[i].nrequests += astats->lstats[i].nrequests;
480 		sstats->lstats[i].curruns += astats->lstats[i].curruns;
481 	}
482 
483 	for (i = 0; i < NBINS; i++) {
484 		sstats->bstats[i].allocated += astats->bstats[i].allocated;
485 		sstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
486 		sstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
487 		sstats->bstats[i].nrequests += astats->bstats[i].nrequests;
488 		if (config_tcache) {
489 			sstats->bstats[i].nfills += astats->bstats[i].nfills;
490 			sstats->bstats[i].nflushes +=
491 			    astats->bstats[i].nflushes;
492 		}
493 		sstats->bstats[i].nruns += astats->bstats[i].nruns;
494 		sstats->bstats[i].reruns += astats->bstats[i].reruns;
495 		sstats->bstats[i].curruns += astats->bstats[i].curruns;
496 	}
497 }
498 
499 static void
500 ctl_arena_refresh(arena_t *arena, unsigned i)
501 {
502 	ctl_arena_stats_t *astats = &ctl_stats.arenas[i];
503 	ctl_arena_stats_t *sstats = &ctl_stats.arenas[narenas];
504 
505 	ctl_arena_clear(astats);
506 
507 	sstats->nthreads += astats->nthreads;
508 	if (config_stats) {
509 		ctl_arena_stats_amerge(astats, arena);
510 		/* Merge into sum stats as well. */
511 		ctl_arena_stats_smerge(sstats, astats);
512 	} else {
513 		astats->pactive += arena->nactive;
514 		astats->pdirty += arena->ndirty;
515 		/* Merge into sum stats as well. */
516 		sstats->pactive += arena->nactive;
517 		sstats->pdirty += arena->ndirty;
518 	}
519 }
520 
521 static void
522 ctl_refresh(void)
523 {
524 	unsigned i;
525 	VARIABLE_ARRAY(arena_t *, tarenas, narenas);
526 
527 	if (config_stats) {
528 		malloc_mutex_lock(&chunks_mtx);
529 		ctl_stats.chunks.current = stats_chunks.curchunks;
530 		ctl_stats.chunks.total = stats_chunks.nchunks;
531 		ctl_stats.chunks.high = stats_chunks.highchunks;
532 		malloc_mutex_unlock(&chunks_mtx);
533 
534 		malloc_mutex_lock(&huge_mtx);
535 		ctl_stats.huge.allocated = huge_allocated;
536 		ctl_stats.huge.nmalloc = huge_nmalloc;
537 		ctl_stats.huge.ndalloc = huge_ndalloc;
538 		malloc_mutex_unlock(&huge_mtx);
539 	}
540 
541 	/*
542 	 * Clear sum stats, since they will be merged into by
543 	 * ctl_arena_refresh().
544 	 */
545 	ctl_stats.arenas[narenas].nthreads = 0;
546 	ctl_arena_clear(&ctl_stats.arenas[narenas]);
547 
548 	malloc_mutex_lock(&arenas_lock);
549 	memcpy(tarenas, arenas, sizeof(arena_t *) * narenas);
550 	for (i = 0; i < narenas; i++) {
551 		if (arenas[i] != NULL)
552 			ctl_stats.arenas[i].nthreads = arenas[i]->nthreads;
553 		else
554 			ctl_stats.arenas[i].nthreads = 0;
555 	}
556 	malloc_mutex_unlock(&arenas_lock);
557 	for (i = 0; i < narenas; i++) {
558 		bool initialized = (tarenas[i] != NULL);
559 
560 		ctl_stats.arenas[i].initialized = initialized;
561 		if (initialized)
562 			ctl_arena_refresh(tarenas[i], i);
563 	}
564 
565 	if (config_stats) {
566 		ctl_stats.allocated = ctl_stats.arenas[narenas].allocated_small
567 		    + ctl_stats.arenas[narenas].astats.allocated_large
568 		    + ctl_stats.huge.allocated;
569 		ctl_stats.active = (ctl_stats.arenas[narenas].pactive <<
570 		    LG_PAGE) + ctl_stats.huge.allocated;
571 		ctl_stats.mapped = (ctl_stats.chunks.current << opt_lg_chunk);
572 	}
573 
574 	ctl_epoch++;
575 }
576 
577 static bool
578 ctl_init(void)
579 {
580 	bool ret;
581 
582 	malloc_mutex_lock(&ctl_mtx);
583 	if (ctl_initialized == false) {
584 		/*
585 		 * Allocate space for one extra arena stats element, which
586 		 * contains summed stats across all arenas.
587 		 */
588 		ctl_stats.arenas = (ctl_arena_stats_t *)base_alloc(
589 		    (narenas + 1) * sizeof(ctl_arena_stats_t));
590 		if (ctl_stats.arenas == NULL) {
591 			ret = true;
592 			goto label_return;
593 		}
594 		memset(ctl_stats.arenas, 0, (narenas + 1) *
595 		    sizeof(ctl_arena_stats_t));
596 
597 		/*
598 		 * Initialize all stats structures, regardless of whether they
599 		 * ever get used.  Lazy initialization would allow errors to
600 		 * cause inconsistent state to be viewable by the application.
601 		 */
602 		if (config_stats) {
603 			unsigned i;
604 			for (i = 0; i <= narenas; i++) {
605 				if (ctl_arena_init(&ctl_stats.arenas[i])) {
606 					ret = true;
607 					goto label_return;
608 				}
609 			}
610 		}
611 		ctl_stats.arenas[narenas].initialized = true;
612 
613 		ctl_epoch = 0;
614 		ctl_refresh();
615 		ctl_initialized = true;
616 	}
617 
618 	ret = false;
619 label_return:
620 	malloc_mutex_unlock(&ctl_mtx);
621 	return (ret);
622 }
623 
624 static int
625 ctl_lookup(const char *name, ctl_node_t const **nodesp, size_t *mibp,
626     size_t *depthp)
627 {
628 	int ret;
629 	const char *elm, *tdot, *dot;
630 	size_t elen, i, j;
631 	const ctl_named_node_t *node;
632 
633 	elm = name;
634 	/* Equivalent to strchrnul(). */
635 	dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0');
636 	elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
637 	if (elen == 0) {
638 		ret = ENOENT;
639 		goto label_return;
640 	}
641 	node = super_root_node;
642 	for (i = 0; i < *depthp; i++) {
643 		assert(node);
644 		assert(node->nchildren > 0);
645 		if (ctl_named_node(node->children) != NULL) {
646 			const ctl_named_node_t *pnode = node;
647 
648 			/* Children are named. */
649 			for (j = 0; j < node->nchildren; j++) {
650 				const ctl_named_node_t *child =
651 				    ctl_named_children(node, j);
652 				if (strlen(child->name) == elen &&
653 				    strncmp(elm, child->name, elen) == 0) {
654 					node = child;
655 					if (nodesp != NULL)
656 						nodesp[i] =
657 						    (const ctl_node_t *)node;
658 					mibp[i] = j;
659 					break;
660 				}
661 			}
662 			if (node == pnode) {
663 				ret = ENOENT;
664 				goto label_return;
665 			}
666 		} else {
667 			uintmax_t index;
668 			const ctl_indexed_node_t *inode;
669 
670 			/* Children are indexed. */
671 			index = malloc_strtoumax(elm, NULL, 10);
672 			if (index == UINTMAX_MAX || index > SIZE_T_MAX) {
673 				ret = ENOENT;
674 				goto label_return;
675 			}
676 
677 			inode = ctl_indexed_node(node->children);
678 			node = inode->index(mibp, *depthp, (size_t)index);
679 			if (node == NULL) {
680 				ret = ENOENT;
681 				goto label_return;
682 			}
683 
684 			if (nodesp != NULL)
685 				nodesp[i] = (const ctl_node_t *)node;
686 			mibp[i] = (size_t)index;
687 		}
688 
689 		if (node->ctl != NULL) {
690 			/* Terminal node. */
691 			if (*dot != '\0') {
692 				/*
693 				 * The name contains more elements than are
694 				 * in this path through the tree.
695 				 */
696 				ret = ENOENT;
697 				goto label_return;
698 			}
699 			/* Complete lookup successful. */
700 			*depthp = i + 1;
701 			break;
702 		}
703 
704 		/* Update elm. */
705 		if (*dot == '\0') {
706 			/* No more elements. */
707 			ret = ENOENT;
708 			goto label_return;
709 		}
710 		elm = &dot[1];
711 		dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot :
712 		    strchr(elm, '\0');
713 		elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
714 	}
715 
716 	ret = 0;
717 label_return:
718 	return (ret);
719 }
720 
721 int
722 ctl_byname(const char *name, void *oldp, size_t *oldlenp, void *newp,
723     size_t newlen)
724 {
725 	int ret;
726 	size_t depth;
727 	ctl_node_t const *nodes[CTL_MAX_DEPTH];
728 	size_t mib[CTL_MAX_DEPTH];
729 	const ctl_named_node_t *node;
730 
731 	if (ctl_initialized == false && ctl_init()) {
732 		ret = EAGAIN;
733 		goto label_return;
734 	}
735 
736 	depth = CTL_MAX_DEPTH;
737 	ret = ctl_lookup(name, nodes, mib, &depth);
738 	if (ret != 0)
739 		goto label_return;
740 
741 	node = ctl_named_node(nodes[depth-1]);
742 	if (node != NULL && node->ctl)
743 		ret = node->ctl(mib, depth, oldp, oldlenp, newp, newlen);
744 	else {
745 		/* The name refers to a partial path through the ctl tree. */
746 		ret = ENOENT;
747 	}
748 
749 label_return:
750 	return(ret);
751 }
752 
753 int
754 ctl_nametomib(const char *name, size_t *mibp, size_t *miblenp)
755 {
756 	int ret;
757 
758 	if (ctl_initialized == false && ctl_init()) {
759 		ret = EAGAIN;
760 		goto label_return;
761 	}
762 
763 	ret = ctl_lookup(name, NULL, mibp, miblenp);
764 label_return:
765 	return(ret);
766 }
767 
768 int
769 ctl_bymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
770     void *newp, size_t newlen)
771 {
772 	int ret;
773 	const ctl_named_node_t *node;
774 	size_t i;
775 
776 	if (ctl_initialized == false && ctl_init()) {
777 		ret = EAGAIN;
778 		goto label_return;
779 	}
780 
781 	/* Iterate down the tree. */
782 	node = super_root_node;
783 	for (i = 0; i < miblen; i++) {
784 		assert(node);
785 		assert(node->nchildren > 0);
786 		if (ctl_named_node(node->children) != NULL) {
787 			/* Children are named. */
788 			if (node->nchildren <= mib[i]) {
789 				ret = ENOENT;
790 				goto label_return;
791 			}
792 			node = ctl_named_children(node, mib[i]);
793 		} else {
794 			const ctl_indexed_node_t *inode;
795 
796 			/* Indexed element. */
797 			inode = ctl_indexed_node(node->children);
798 			node = inode->index(mib, miblen, mib[i]);
799 			if (node == NULL) {
800 				ret = ENOENT;
801 				goto label_return;
802 			}
803 		}
804 	}
805 
806 	/* Call the ctl function. */
807 	if (node && node->ctl)
808 		ret = node->ctl(mib, miblen, oldp, oldlenp, newp, newlen);
809 	else {
810 		/* Partial MIB. */
811 		ret = ENOENT;
812 	}
813 
814 label_return:
815 	return(ret);
816 }
817 
818 bool
819 ctl_boot(void)
820 {
821 
822 	if (malloc_mutex_init(&ctl_mtx))
823 		return (true);
824 
825 	ctl_initialized = false;
826 
827 	return (false);
828 }
829 
830 /******************************************************************************/
831 /* *_ctl() functions. */
832 
833 #define	READONLY()	do {						\
834 	if (newp != NULL || newlen != 0) {				\
835 		ret = EPERM;						\
836 		goto label_return;					\
837 	}								\
838 } while (0)
839 
840 #define	WRITEONLY()	do {						\
841 	if (oldp != NULL || oldlenp != NULL) {				\
842 		ret = EPERM;						\
843 		goto label_return;					\
844 	}								\
845 } while (0)
846 
847 #define	READ(v, t)	do {						\
848 	if (oldp != NULL && oldlenp != NULL) {				\
849 		if (*oldlenp != sizeof(t)) {				\
850 			size_t	copylen = (sizeof(t) <= *oldlenp)	\
851 			    ? sizeof(t) : *oldlenp;			\
852 			memcpy(oldp, (void *)&v, copylen);		\
853 			ret = EINVAL;					\
854 			goto label_return;				\
855 		} else							\
856 			*(t *)oldp = v;					\
857 	}								\
858 } while (0)
859 
860 #define	WRITE(v, t)	do {						\
861 	if (newp != NULL) {						\
862 		if (newlen != sizeof(t)) {				\
863 			ret = EINVAL;					\
864 			goto label_return;				\
865 		}							\
866 		v = *(t *)newp;						\
867 	}								\
868 } while (0)
869 
870 /*
871  * There's a lot of code duplication in the following macros due to limitations
872  * in how nested cpp macros are expanded.
873  */
874 #define	CTL_RO_CLGEN(c, l, n, v, t)					\
875 static int								\
876 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
877     void *newp, size_t newlen)						\
878 {									\
879 	int ret;							\
880 	t oldval;							\
881 									\
882 	if ((c) == false)						\
883 		return (ENOENT);					\
884 	if (l)								\
885 		malloc_mutex_lock(&ctl_mtx);				\
886 	READONLY();							\
887 	oldval = v;							\
888 	READ(oldval, t);						\
889 									\
890 	ret = 0;							\
891 label_return:								\
892 	if (l)								\
893 		malloc_mutex_unlock(&ctl_mtx);				\
894 	return (ret);							\
895 }
896 
897 #define	CTL_RO_CGEN(c, n, v, t)						\
898 static int								\
899 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
900     void *newp, size_t newlen)						\
901 {									\
902 	int ret;							\
903 	t oldval;							\
904 									\
905 	if ((c) == false)						\
906 		return (ENOENT);					\
907 	malloc_mutex_lock(&ctl_mtx);					\
908 	READONLY();							\
909 	oldval = v;							\
910 	READ(oldval, t);						\
911 									\
912 	ret = 0;							\
913 label_return:								\
914 	malloc_mutex_unlock(&ctl_mtx);					\
915 	return (ret);							\
916 }
917 
918 #define	CTL_RO_GEN(n, v, t)						\
919 static int								\
920 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
921     void *newp, size_t newlen)						\
922 {									\
923 	int ret;							\
924 	t oldval;							\
925 									\
926 	malloc_mutex_lock(&ctl_mtx);					\
927 	READONLY();							\
928 	oldval = v;							\
929 	READ(oldval, t);						\
930 									\
931 	ret = 0;							\
932 label_return:								\
933 	malloc_mutex_unlock(&ctl_mtx);					\
934 	return (ret);							\
935 }
936 
937 /*
938  * ctl_mtx is not acquired, under the assumption that no pertinent data will
939  * mutate during the call.
940  */
941 #define	CTL_RO_NL_CGEN(c, n, v, t)					\
942 static int								\
943 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
944     void *newp, size_t newlen)						\
945 {									\
946 	int ret;							\
947 	t oldval;							\
948 									\
949 	if ((c) == false)						\
950 		return (ENOENT);					\
951 	READONLY();							\
952 	oldval = v;							\
953 	READ(oldval, t);						\
954 									\
955 	ret = 0;							\
956 label_return:								\
957 	return (ret);							\
958 }
959 
960 #define	CTL_RO_NL_GEN(n, v, t)						\
961 static int								\
962 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
963     void *newp, size_t newlen)						\
964 {									\
965 	int ret;							\
966 	t oldval;							\
967 									\
968 	READONLY();							\
969 	oldval = v;							\
970 	READ(oldval, t);						\
971 									\
972 	ret = 0;							\
973 label_return:								\
974 	return (ret);							\
975 }
976 
977 #define	CTL_RO_BOOL_CONFIG_GEN(n)					\
978 static int								\
979 n##_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,	\
980     void *newp, size_t newlen)						\
981 {									\
982 	int ret;							\
983 	bool oldval;							\
984 									\
985 	READONLY();							\
986 	oldval = n;							\
987 	READ(oldval, bool);						\
988 									\
989 	ret = 0;							\
990 label_return:								\
991 	return (ret);							\
992 }
993 
994 CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
995 
996 static int
997 epoch_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
998     void *newp, size_t newlen)
999 {
1000 	int ret;
1001 	uint64_t newval;
1002 
1003 	malloc_mutex_lock(&ctl_mtx);
1004 	WRITE(newval, uint64_t);
1005 	if (newp != NULL)
1006 		ctl_refresh();
1007 	READ(ctl_epoch, uint64_t);
1008 
1009 	ret = 0;
1010 label_return:
1011 	malloc_mutex_unlock(&ctl_mtx);
1012 	return (ret);
1013 }
1014 
1015 static int
1016 thread_tcache_enabled_ctl(const size_t *mib, size_t miblen, void *oldp,
1017     size_t *oldlenp, void *newp, size_t newlen)
1018 {
1019 	int ret;
1020 	bool oldval;
1021 
1022 	if (config_tcache == false)
1023 		return (ENOENT);
1024 
1025 	oldval = tcache_enabled_get();
1026 	if (newp != NULL) {
1027 		if (newlen != sizeof(bool)) {
1028 			ret = EINVAL;
1029 			goto label_return;
1030 		}
1031 		tcache_enabled_set(*(bool *)newp);
1032 	}
1033 	READ(oldval, bool);
1034 
1035 label_return:
1036 	ret = 0;
1037 	return (ret);
1038 }
1039 
1040 static int
1041 thread_tcache_flush_ctl(const size_t *mib, size_t miblen, void *oldp,
1042     size_t *oldlenp, void *newp, size_t newlen)
1043 {
1044 	int ret;
1045 
1046 	if (config_tcache == false)
1047 		return (ENOENT);
1048 
1049 	READONLY();
1050 	WRITEONLY();
1051 
1052 	tcache_flush();
1053 
1054 	ret = 0;
1055 label_return:
1056 	return (ret);
1057 }
1058 
1059 static int
1060 thread_arena_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1061     void *newp, size_t newlen)
1062 {
1063 	int ret;
1064 	unsigned newind, oldind;
1065 
1066 	newind = oldind = choose_arena(NULL)->ind;
1067 	WRITE(newind, unsigned);
1068 	READ(oldind, unsigned);
1069 	if (newind != oldind) {
1070 		arena_t *arena;
1071 
1072 		if (newind >= narenas) {
1073 			/* New arena index is out of range. */
1074 			ret = EFAULT;
1075 			goto label_return;
1076 		}
1077 
1078 		/* Initialize arena if necessary. */
1079 		malloc_mutex_lock(&arenas_lock);
1080 		if ((arena = arenas[newind]) == NULL && (arena =
1081 		    arenas_extend(newind)) == NULL) {
1082 			malloc_mutex_unlock(&arenas_lock);
1083 			ret = EAGAIN;
1084 			goto label_return;
1085 		}
1086 		assert(arena == arenas[newind]);
1087 		arenas[oldind]->nthreads--;
1088 		arenas[newind]->nthreads++;
1089 		malloc_mutex_unlock(&arenas_lock);
1090 
1091 		/* Set new arena association. */
1092 		if (config_tcache) {
1093 			tcache_t *tcache;
1094 			if ((uintptr_t)(tcache = *tcache_tsd_get()) >
1095 			    (uintptr_t)TCACHE_STATE_MAX) {
1096 				tcache_arena_dissociate(tcache);
1097 				tcache_arena_associate(tcache, arena);
1098 			}
1099 		}
1100 		arenas_tsd_set(&arena);
1101 	}
1102 
1103 	ret = 0;
1104 label_return:
1105 	return (ret);
1106 }
1107 
1108 CTL_RO_NL_CGEN(config_stats, thread_allocated,
1109     thread_allocated_tsd_get()->allocated, uint64_t)
1110 CTL_RO_NL_CGEN(config_stats, thread_allocatedp,
1111     &thread_allocated_tsd_get()->allocated, uint64_t *)
1112 CTL_RO_NL_CGEN(config_stats, thread_deallocated,
1113     thread_allocated_tsd_get()->deallocated, uint64_t)
1114 CTL_RO_NL_CGEN(config_stats, thread_deallocatedp,
1115     &thread_allocated_tsd_get()->deallocated, uint64_t *)
1116 
1117 /******************************************************************************/
1118 
1119 CTL_RO_BOOL_CONFIG_GEN(config_debug)
1120 CTL_RO_BOOL_CONFIG_GEN(config_dss)
1121 CTL_RO_BOOL_CONFIG_GEN(config_fill)
1122 CTL_RO_BOOL_CONFIG_GEN(config_lazy_lock)
1123 CTL_RO_BOOL_CONFIG_GEN(config_mremap)
1124 CTL_RO_BOOL_CONFIG_GEN(config_munmap)
1125 CTL_RO_BOOL_CONFIG_GEN(config_prof)
1126 CTL_RO_BOOL_CONFIG_GEN(config_prof_libgcc)
1127 CTL_RO_BOOL_CONFIG_GEN(config_prof_libunwind)
1128 CTL_RO_BOOL_CONFIG_GEN(config_stats)
1129 CTL_RO_BOOL_CONFIG_GEN(config_tcache)
1130 CTL_RO_BOOL_CONFIG_GEN(config_tls)
1131 CTL_RO_BOOL_CONFIG_GEN(config_utrace)
1132 CTL_RO_BOOL_CONFIG_GEN(config_valgrind)
1133 CTL_RO_BOOL_CONFIG_GEN(config_xmalloc)
1134 
1135 /******************************************************************************/
1136 
1137 CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
1138 CTL_RO_NL_GEN(opt_lg_chunk, opt_lg_chunk, size_t)
1139 CTL_RO_NL_GEN(opt_narenas, opt_narenas, size_t)
1140 CTL_RO_NL_GEN(opt_lg_dirty_mult, opt_lg_dirty_mult, ssize_t)
1141 CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
1142 CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, bool)
1143 CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
1144 CTL_RO_NL_CGEN(config_fill, opt_quarantine, opt_quarantine, size_t)
1145 CTL_RO_NL_CGEN(config_fill, opt_redzone, opt_redzone, bool)
1146 CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
1147 CTL_RO_NL_CGEN(config_valgrind, opt_valgrind, opt_valgrind, bool)
1148 CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
1149 CTL_RO_NL_CGEN(config_tcache, opt_tcache, opt_tcache, bool)
1150 CTL_RO_NL_CGEN(config_tcache, opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
1151 CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
1152 CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
1153 CTL_RO_CGEN(config_prof, opt_prof_active, opt_prof_active, bool) /* Mutable. */
1154 CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
1155 CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
1156 CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
1157 CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
1158 CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
1159 CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
1160 
1161 /******************************************************************************/
1162 
1163 CTL_RO_NL_GEN(arenas_bin_i_size, arena_bin_info[mib[2]].reg_size, size_t)
1164 CTL_RO_NL_GEN(arenas_bin_i_nregs, arena_bin_info[mib[2]].nregs, uint32_t)
1165 CTL_RO_NL_GEN(arenas_bin_i_run_size, arena_bin_info[mib[2]].run_size, size_t)
1166 const ctl_named_node_t *
1167 arenas_bin_i_index(const size_t *mib, size_t miblen, size_t i)
1168 {
1169 
1170 	if (i > NBINS)
1171 		return (NULL);
1172 	return (super_arenas_bin_i_node);
1173 }
1174 
1175 CTL_RO_NL_GEN(arenas_lrun_i_size, ((mib[2]+1) << LG_PAGE), size_t)
1176 const ctl_named_node_t *
1177 arenas_lrun_i_index(const size_t *mib, size_t miblen, size_t i)
1178 {
1179 
1180 	if (i > nlclasses)
1181 		return (NULL);
1182 	return (super_arenas_lrun_i_node);
1183 }
1184 
1185 CTL_RO_NL_GEN(arenas_narenas, narenas, unsigned)
1186 
1187 static int
1188 arenas_initialized_ctl(const size_t *mib, size_t miblen, void *oldp,
1189     size_t *oldlenp, void *newp, size_t newlen)
1190 {
1191 	int ret;
1192 	unsigned nread, i;
1193 
1194 	malloc_mutex_lock(&ctl_mtx);
1195 	READONLY();
1196 	if (*oldlenp != narenas * sizeof(bool)) {
1197 		ret = EINVAL;
1198 		nread = (*oldlenp < narenas * sizeof(bool))
1199 		    ? (*oldlenp / sizeof(bool)) : narenas;
1200 	} else {
1201 		ret = 0;
1202 		nread = narenas;
1203 	}
1204 
1205 	for (i = 0; i < nread; i++)
1206 		((bool *)oldp)[i] = ctl_stats.arenas[i].initialized;
1207 
1208 label_return:
1209 	malloc_mutex_unlock(&ctl_mtx);
1210 	return (ret);
1211 }
1212 
1213 CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
1214 CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
1215 CTL_RO_NL_CGEN(config_tcache, arenas_tcache_max, tcache_maxclass, size_t)
1216 CTL_RO_NL_GEN(arenas_nbins, NBINS, unsigned)
1217 CTL_RO_NL_CGEN(config_tcache, arenas_nhbins, nhbins, unsigned)
1218 CTL_RO_NL_GEN(arenas_nlruns, nlclasses, size_t)
1219 
1220 static int
1221 arenas_purge_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1222     void *newp, size_t newlen)
1223 {
1224 	int ret;
1225 	unsigned arena;
1226 
1227 	WRITEONLY();
1228 	arena = UINT_MAX;
1229 	WRITE(arena, unsigned);
1230 	if (newp != NULL && arena >= narenas) {
1231 		ret = EFAULT;
1232 		goto label_return;
1233 	} else {
1234 		VARIABLE_ARRAY(arena_t *, tarenas, narenas);
1235 
1236 		malloc_mutex_lock(&arenas_lock);
1237 		memcpy(tarenas, arenas, sizeof(arena_t *) * narenas);
1238 		malloc_mutex_unlock(&arenas_lock);
1239 
1240 		if (arena == UINT_MAX) {
1241 			unsigned i;
1242 			for (i = 0; i < narenas; i++) {
1243 				if (tarenas[i] != NULL)
1244 					arena_purge_all(tarenas[i]);
1245 			}
1246 		} else {
1247 			assert(arena < narenas);
1248 			if (tarenas[arena] != NULL)
1249 				arena_purge_all(tarenas[arena]);
1250 		}
1251 	}
1252 
1253 	ret = 0;
1254 label_return:
1255 	return (ret);
1256 }
1257 
1258 /******************************************************************************/
1259 
1260 static int
1261 prof_active_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1262     void *newp, size_t newlen)
1263 {
1264 	int ret;
1265 	bool oldval;
1266 
1267 	if (config_prof == false)
1268 		return (ENOENT);
1269 
1270 	malloc_mutex_lock(&ctl_mtx); /* Protect opt_prof_active. */
1271 	oldval = opt_prof_active;
1272 	if (newp != NULL) {
1273 		/*
1274 		 * The memory barriers will tend to make opt_prof_active
1275 		 * propagate faster on systems with weak memory ordering.
1276 		 */
1277 		mb_write();
1278 		WRITE(opt_prof_active, bool);
1279 		mb_write();
1280 	}
1281 	READ(oldval, bool);
1282 
1283 	ret = 0;
1284 label_return:
1285 	malloc_mutex_unlock(&ctl_mtx);
1286 	return (ret);
1287 }
1288 
1289 static int
1290 prof_dump_ctl(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1291     void *newp, size_t newlen)
1292 {
1293 	int ret;
1294 	const char *filename = NULL;
1295 
1296 	if (config_prof == false)
1297 		return (ENOENT);
1298 
1299 	WRITEONLY();
1300 	WRITE(filename, const char *);
1301 
1302 	if (prof_mdump(filename)) {
1303 		ret = EFAULT;
1304 		goto label_return;
1305 	}
1306 
1307 	ret = 0;
1308 label_return:
1309 	return (ret);
1310 }
1311 
1312 CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
1313 
1314 /******************************************************************************/
1315 
1316 CTL_RO_CGEN(config_stats, stats_chunks_current, ctl_stats.chunks.current,
1317     size_t)
1318 CTL_RO_CGEN(config_stats, stats_chunks_total, ctl_stats.chunks.total, uint64_t)
1319 CTL_RO_CGEN(config_stats, stats_chunks_high, ctl_stats.chunks.high, size_t)
1320 CTL_RO_CGEN(config_stats, stats_huge_allocated, huge_allocated, size_t)
1321 CTL_RO_CGEN(config_stats, stats_huge_nmalloc, huge_nmalloc, uint64_t)
1322 CTL_RO_CGEN(config_stats, stats_huge_ndalloc, huge_ndalloc, uint64_t)
1323 CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
1324     ctl_stats.arenas[mib[2]].allocated_small, size_t)
1325 CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
1326     ctl_stats.arenas[mib[2]].nmalloc_small, uint64_t)
1327 CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
1328     ctl_stats.arenas[mib[2]].ndalloc_small, uint64_t)
1329 CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
1330     ctl_stats.arenas[mib[2]].nrequests_small, uint64_t)
1331 CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
1332     ctl_stats.arenas[mib[2]].astats.allocated_large, size_t)
1333 CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
1334     ctl_stats.arenas[mib[2]].astats.nmalloc_large, uint64_t)
1335 CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
1336     ctl_stats.arenas[mib[2]].astats.ndalloc_large, uint64_t)
1337 CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
1338     ctl_stats.arenas[mib[2]].astats.nrequests_large, uint64_t)
1339 
1340 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_allocated,
1341     ctl_stats.arenas[mib[2]].bstats[mib[4]].allocated, size_t)
1342 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
1343     ctl_stats.arenas[mib[2]].bstats[mib[4]].nmalloc, uint64_t)
1344 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
1345     ctl_stats.arenas[mib[2]].bstats[mib[4]].ndalloc, uint64_t)
1346 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
1347     ctl_stats.arenas[mib[2]].bstats[mib[4]].nrequests, uint64_t)
1348 CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nfills,
1349     ctl_stats.arenas[mib[2]].bstats[mib[4]].nfills, uint64_t)
1350 CTL_RO_CGEN(config_stats && config_tcache, stats_arenas_i_bins_j_nflushes,
1351     ctl_stats.arenas[mib[2]].bstats[mib[4]].nflushes, uint64_t)
1352 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nruns,
1353     ctl_stats.arenas[mib[2]].bstats[mib[4]].nruns, uint64_t)
1354 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreruns,
1355     ctl_stats.arenas[mib[2]].bstats[mib[4]].reruns, uint64_t)
1356 CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curruns,
1357     ctl_stats.arenas[mib[2]].bstats[mib[4]].curruns, size_t)
1358 
1359 const ctl_named_node_t *
1360 stats_arenas_i_bins_j_index(const size_t *mib, size_t miblen, size_t j)
1361 {
1362 
1363 	if (j > NBINS)
1364 		return (NULL);
1365 	return (super_stats_arenas_i_bins_j_node);
1366 }
1367 
1368 CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nmalloc,
1369     ctl_stats.arenas[mib[2]].lstats[mib[4]].nmalloc, uint64_t)
1370 CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_ndalloc,
1371     ctl_stats.arenas[mib[2]].lstats[mib[4]].ndalloc, uint64_t)
1372 CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_nrequests,
1373     ctl_stats.arenas[mib[2]].lstats[mib[4]].nrequests, uint64_t)
1374 CTL_RO_CGEN(config_stats, stats_arenas_i_lruns_j_curruns,
1375     ctl_stats.arenas[mib[2]].lstats[mib[4]].curruns, size_t)
1376 
1377 const ctl_named_node_t *
1378 stats_arenas_i_lruns_j_index(const size_t *mib, size_t miblen, size_t j)
1379 {
1380 
1381 	if (j > nlclasses)
1382 		return (NULL);
1383 	return (super_stats_arenas_i_lruns_j_node);
1384 }
1385 
1386 CTL_RO_GEN(stats_arenas_i_nthreads, ctl_stats.arenas[mib[2]].nthreads, unsigned)
1387 CTL_RO_GEN(stats_arenas_i_pactive, ctl_stats.arenas[mib[2]].pactive, size_t)
1388 CTL_RO_GEN(stats_arenas_i_pdirty, ctl_stats.arenas[mib[2]].pdirty, size_t)
1389 CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
1390     ctl_stats.arenas[mib[2]].astats.mapped, size_t)
1391 CTL_RO_CGEN(config_stats, stats_arenas_i_npurge,
1392     ctl_stats.arenas[mib[2]].astats.npurge, uint64_t)
1393 CTL_RO_CGEN(config_stats, stats_arenas_i_nmadvise,
1394     ctl_stats.arenas[mib[2]].astats.nmadvise, uint64_t)
1395 CTL_RO_CGEN(config_stats, stats_arenas_i_purged,
1396     ctl_stats.arenas[mib[2]].astats.purged, uint64_t)
1397 
1398 const ctl_named_node_t *
1399 stats_arenas_i_index(const size_t *mib, size_t miblen, size_t i)
1400 {
1401 	const ctl_named_node_t * ret;
1402 
1403 	malloc_mutex_lock(&ctl_mtx);
1404 	if (ctl_stats.arenas[i].initialized == false) {
1405 		ret = NULL;
1406 		goto label_return;
1407 	}
1408 
1409 	ret = super_stats_arenas_i_node;
1410 label_return:
1411 	malloc_mutex_unlock(&ctl_mtx);
1412 	return (ret);
1413 }
1414 
1415 CTL_RO_CGEN(config_stats, stats_cactive, &stats_cactive, size_t *)
1416 CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats.allocated, size_t)
1417 CTL_RO_CGEN(config_stats, stats_active, ctl_stats.active, size_t)
1418 CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats.mapped, size_t)
1419