xref: /freebsd/contrib/jemalloc/src/ctl.c (revision c5ad81420c495d1d5de04209b0ec4fcb435c322c)
1  #define JEMALLOC_CTL_C_
2  #include "jemalloc/internal/jemalloc_preamble.h"
3  #include "jemalloc/internal/jemalloc_internal_includes.h"
4  
5  #include "jemalloc/internal/assert.h"
6  #include "jemalloc/internal/ctl.h"
7  #include "jemalloc/internal/extent_dss.h"
8  #include "jemalloc/internal/extent_mmap.h"
9  #include "jemalloc/internal/mutex.h"
10  #include "jemalloc/internal/nstime.h"
11  #include "jemalloc/internal/sc.h"
12  #include "jemalloc/internal/util.h"
13  
14  /******************************************************************************/
15  /* Data. */
16  
17  /*
18   * ctl_mtx protects the following:
19   * - ctl_stats->*
20   */
21  static malloc_mutex_t	ctl_mtx;
22  static bool		ctl_initialized;
23  static ctl_stats_t	*ctl_stats;
24  static ctl_arenas_t	*ctl_arenas;
25  
26  /******************************************************************************/
27  /* Helpers for named and indexed nodes. */
28  
29  static const ctl_named_node_t *
ctl_named_node(const ctl_node_t * node)30  ctl_named_node(const ctl_node_t *node) {
31  	return ((node->named) ? (const ctl_named_node_t *)node : NULL);
32  }
33  
34  static const ctl_named_node_t *
ctl_named_children(const ctl_named_node_t * node,size_t index)35  ctl_named_children(const ctl_named_node_t *node, size_t index) {
36  	const ctl_named_node_t *children = ctl_named_node(node->children);
37  
38  	return (children ? &children[index] : NULL);
39  }
40  
41  static const ctl_indexed_node_t *
ctl_indexed_node(const ctl_node_t * node)42  ctl_indexed_node(const ctl_node_t *node) {
43  	return (!node->named ? (const ctl_indexed_node_t *)node : NULL);
44  }
45  
46  /******************************************************************************/
47  /* Function prototypes for non-inline static functions. */
48  
49  #define CTL_PROTO(n)							\
50  static int	n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,	\
51      void *oldp, size_t *oldlenp, void *newp, size_t newlen);
52  
53  #define INDEX_PROTO(n)							\
54  static const ctl_named_node_t	*n##_index(tsdn_t *tsdn,		\
55      const size_t *mib, size_t miblen, size_t i);
56  
57  CTL_PROTO(version)
58  CTL_PROTO(epoch)
59  CTL_PROTO(background_thread)
60  CTL_PROTO(max_background_threads)
61  CTL_PROTO(thread_tcache_enabled)
62  CTL_PROTO(thread_tcache_flush)
63  CTL_PROTO(thread_prof_name)
64  CTL_PROTO(thread_prof_active)
65  CTL_PROTO(thread_arena)
66  CTL_PROTO(thread_allocated)
67  CTL_PROTO(thread_allocatedp)
68  CTL_PROTO(thread_deallocated)
69  CTL_PROTO(thread_deallocatedp)
70  CTL_PROTO(config_cache_oblivious)
71  CTL_PROTO(config_debug)
72  CTL_PROTO(config_fill)
73  CTL_PROTO(config_lazy_lock)
74  CTL_PROTO(config_malloc_conf)
75  CTL_PROTO(config_opt_safety_checks)
76  CTL_PROTO(config_prof)
77  CTL_PROTO(config_prof_libgcc)
78  CTL_PROTO(config_prof_libunwind)
79  CTL_PROTO(config_stats)
80  CTL_PROTO(config_utrace)
81  CTL_PROTO(config_xmalloc)
82  CTL_PROTO(opt_abort)
83  CTL_PROTO(opt_abort_conf)
84  CTL_PROTO(opt_confirm_conf)
85  CTL_PROTO(opt_metadata_thp)
86  CTL_PROTO(opt_retain)
87  CTL_PROTO(opt_dss)
88  CTL_PROTO(opt_narenas)
89  CTL_PROTO(opt_percpu_arena)
90  CTL_PROTO(opt_oversize_threshold)
91  CTL_PROTO(opt_background_thread)
92  CTL_PROTO(opt_max_background_threads)
93  CTL_PROTO(opt_dirty_decay_ms)
94  CTL_PROTO(opt_muzzy_decay_ms)
95  CTL_PROTO(opt_stats_print)
96  CTL_PROTO(opt_stats_print_opts)
97  CTL_PROTO(opt_junk)
98  CTL_PROTO(opt_zero)
99  CTL_PROTO(opt_utrace)
100  CTL_PROTO(opt_xmalloc)
101  CTL_PROTO(opt_tcache)
102  CTL_PROTO(opt_thp)
103  CTL_PROTO(opt_lg_extent_max_active_fit)
104  CTL_PROTO(opt_lg_tcache_max)
105  CTL_PROTO(opt_prof)
106  CTL_PROTO(opt_prof_prefix)
107  CTL_PROTO(opt_prof_active)
108  CTL_PROTO(opt_prof_thread_active_init)
109  CTL_PROTO(opt_lg_prof_sample)
110  CTL_PROTO(opt_lg_prof_interval)
111  CTL_PROTO(opt_prof_gdump)
112  CTL_PROTO(opt_prof_final)
113  CTL_PROTO(opt_prof_leak)
114  CTL_PROTO(opt_prof_accum)
115  CTL_PROTO(tcache_create)
116  CTL_PROTO(tcache_flush)
117  CTL_PROTO(tcache_destroy)
118  CTL_PROTO(arena_i_initialized)
119  CTL_PROTO(arena_i_decay)
120  CTL_PROTO(arena_i_purge)
121  CTL_PROTO(arena_i_reset)
122  CTL_PROTO(arena_i_destroy)
123  CTL_PROTO(arena_i_dss)
124  CTL_PROTO(arena_i_dirty_decay_ms)
125  CTL_PROTO(arena_i_muzzy_decay_ms)
126  CTL_PROTO(arena_i_extent_hooks)
127  CTL_PROTO(arena_i_retain_grow_limit)
128  INDEX_PROTO(arena_i)
129  CTL_PROTO(arenas_bin_i_size)
130  CTL_PROTO(arenas_bin_i_nregs)
131  CTL_PROTO(arenas_bin_i_slab_size)
132  CTL_PROTO(arenas_bin_i_nshards)
133  INDEX_PROTO(arenas_bin_i)
134  CTL_PROTO(arenas_lextent_i_size)
135  INDEX_PROTO(arenas_lextent_i)
136  CTL_PROTO(arenas_narenas)
137  CTL_PROTO(arenas_dirty_decay_ms)
138  CTL_PROTO(arenas_muzzy_decay_ms)
139  CTL_PROTO(arenas_quantum)
140  CTL_PROTO(arenas_page)
141  CTL_PROTO(arenas_tcache_max)
142  CTL_PROTO(arenas_nbins)
143  CTL_PROTO(arenas_nhbins)
144  CTL_PROTO(arenas_nlextents)
145  CTL_PROTO(arenas_create)
146  CTL_PROTO(arenas_lookup)
147  CTL_PROTO(prof_thread_active_init)
148  CTL_PROTO(prof_active)
149  CTL_PROTO(prof_dump)
150  CTL_PROTO(prof_gdump)
151  CTL_PROTO(prof_reset)
152  CTL_PROTO(prof_interval)
153  CTL_PROTO(lg_prof_sample)
154  CTL_PROTO(prof_log_start)
155  CTL_PROTO(prof_log_stop)
156  CTL_PROTO(stats_arenas_i_small_allocated)
157  CTL_PROTO(stats_arenas_i_small_nmalloc)
158  CTL_PROTO(stats_arenas_i_small_ndalloc)
159  CTL_PROTO(stats_arenas_i_small_nrequests)
160  CTL_PROTO(stats_arenas_i_small_nfills)
161  CTL_PROTO(stats_arenas_i_small_nflushes)
162  CTL_PROTO(stats_arenas_i_large_allocated)
163  CTL_PROTO(stats_arenas_i_large_nmalloc)
164  CTL_PROTO(stats_arenas_i_large_ndalloc)
165  CTL_PROTO(stats_arenas_i_large_nrequests)
166  CTL_PROTO(stats_arenas_i_large_nfills)
167  CTL_PROTO(stats_arenas_i_large_nflushes)
168  CTL_PROTO(stats_arenas_i_bins_j_nmalloc)
169  CTL_PROTO(stats_arenas_i_bins_j_ndalloc)
170  CTL_PROTO(stats_arenas_i_bins_j_nrequests)
171  CTL_PROTO(stats_arenas_i_bins_j_curregs)
172  CTL_PROTO(stats_arenas_i_bins_j_nfills)
173  CTL_PROTO(stats_arenas_i_bins_j_nflushes)
174  CTL_PROTO(stats_arenas_i_bins_j_nslabs)
175  CTL_PROTO(stats_arenas_i_bins_j_nreslabs)
176  CTL_PROTO(stats_arenas_i_bins_j_curslabs)
177  CTL_PROTO(stats_arenas_i_bins_j_nonfull_slabs)
178  INDEX_PROTO(stats_arenas_i_bins_j)
179  CTL_PROTO(stats_arenas_i_lextents_j_nmalloc)
180  CTL_PROTO(stats_arenas_i_lextents_j_ndalloc)
181  CTL_PROTO(stats_arenas_i_lextents_j_nrequests)
182  CTL_PROTO(stats_arenas_i_lextents_j_curlextents)
183  INDEX_PROTO(stats_arenas_i_lextents_j)
184  CTL_PROTO(stats_arenas_i_extents_j_ndirty)
185  CTL_PROTO(stats_arenas_i_extents_j_nmuzzy)
186  CTL_PROTO(stats_arenas_i_extents_j_nretained)
187  CTL_PROTO(stats_arenas_i_extents_j_dirty_bytes)
188  CTL_PROTO(stats_arenas_i_extents_j_muzzy_bytes)
189  CTL_PROTO(stats_arenas_i_extents_j_retained_bytes)
190  INDEX_PROTO(stats_arenas_i_extents_j)
191  CTL_PROTO(stats_arenas_i_nthreads)
192  CTL_PROTO(stats_arenas_i_uptime)
193  CTL_PROTO(stats_arenas_i_dss)
194  CTL_PROTO(stats_arenas_i_dirty_decay_ms)
195  CTL_PROTO(stats_arenas_i_muzzy_decay_ms)
196  CTL_PROTO(stats_arenas_i_pactive)
197  CTL_PROTO(stats_arenas_i_pdirty)
198  CTL_PROTO(stats_arenas_i_pmuzzy)
199  CTL_PROTO(stats_arenas_i_mapped)
200  CTL_PROTO(stats_arenas_i_retained)
201  CTL_PROTO(stats_arenas_i_extent_avail)
202  CTL_PROTO(stats_arenas_i_dirty_npurge)
203  CTL_PROTO(stats_arenas_i_dirty_nmadvise)
204  CTL_PROTO(stats_arenas_i_dirty_purged)
205  CTL_PROTO(stats_arenas_i_muzzy_npurge)
206  CTL_PROTO(stats_arenas_i_muzzy_nmadvise)
207  CTL_PROTO(stats_arenas_i_muzzy_purged)
208  CTL_PROTO(stats_arenas_i_base)
209  CTL_PROTO(stats_arenas_i_internal)
210  CTL_PROTO(stats_arenas_i_metadata_thp)
211  CTL_PROTO(stats_arenas_i_tcache_bytes)
212  CTL_PROTO(stats_arenas_i_resident)
213  CTL_PROTO(stats_arenas_i_abandoned_vm)
214  INDEX_PROTO(stats_arenas_i)
215  CTL_PROTO(stats_allocated)
216  CTL_PROTO(stats_active)
217  CTL_PROTO(stats_background_thread_num_threads)
218  CTL_PROTO(stats_background_thread_num_runs)
219  CTL_PROTO(stats_background_thread_run_interval)
220  CTL_PROTO(stats_metadata)
221  CTL_PROTO(stats_metadata_thp)
222  CTL_PROTO(stats_resident)
223  CTL_PROTO(stats_mapped)
224  CTL_PROTO(stats_retained)
225  CTL_PROTO(experimental_hooks_install)
226  CTL_PROTO(experimental_hooks_remove)
227  CTL_PROTO(experimental_utilization_query)
228  CTL_PROTO(experimental_utilization_batch_query)
229  CTL_PROTO(experimental_arenas_i_pactivep)
230  INDEX_PROTO(experimental_arenas_i)
231  
232  #define MUTEX_STATS_CTL_PROTO_GEN(n)					\
233  CTL_PROTO(stats_##n##_num_ops)						\
234  CTL_PROTO(stats_##n##_num_wait)						\
235  CTL_PROTO(stats_##n##_num_spin_acq)					\
236  CTL_PROTO(stats_##n##_num_owner_switch)					\
237  CTL_PROTO(stats_##n##_total_wait_time)					\
238  CTL_PROTO(stats_##n##_max_wait_time)					\
239  CTL_PROTO(stats_##n##_max_num_thds)
240  
241  /* Global mutexes. */
242  #define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(mutexes_##mtx)
243  MUTEX_PROF_GLOBAL_MUTEXES
244  #undef OP
245  
246  /* Per arena mutexes. */
247  #define OP(mtx) MUTEX_STATS_CTL_PROTO_GEN(arenas_i_mutexes_##mtx)
248  MUTEX_PROF_ARENA_MUTEXES
249  #undef OP
250  
251  /* Arena bin mutexes. */
252  MUTEX_STATS_CTL_PROTO_GEN(arenas_i_bins_j_mutex)
253  #undef MUTEX_STATS_CTL_PROTO_GEN
254  
255  CTL_PROTO(stats_mutexes_reset)
256  
257  /******************************************************************************/
258  /* mallctl tree. */
259  
260  #define NAME(n)	{true},	n
261  #define CHILD(t, c)							\
262  	sizeof(c##_node) / sizeof(ctl_##t##_node_t),			\
263  	(ctl_node_t *)c##_node,						\
264  	NULL
265  #define CTL(c)	0, NULL, c##_ctl
266  
267  /*
268   * Only handles internal indexed nodes, since there are currently no external
269   * ones.
270   */
271  #define INDEX(i)	{false},	i##_index
272  
273  static const ctl_named_node_t	thread_tcache_node[] = {
274  	{NAME("enabled"),	CTL(thread_tcache_enabled)},
275  	{NAME("flush"),		CTL(thread_tcache_flush)}
276  };
277  
278  static const ctl_named_node_t	thread_prof_node[] = {
279  	{NAME("name"),		CTL(thread_prof_name)},
280  	{NAME("active"),	CTL(thread_prof_active)}
281  };
282  
283  static const ctl_named_node_t	thread_node[] = {
284  	{NAME("arena"),		CTL(thread_arena)},
285  	{NAME("allocated"),	CTL(thread_allocated)},
286  	{NAME("allocatedp"),	CTL(thread_allocatedp)},
287  	{NAME("deallocated"),	CTL(thread_deallocated)},
288  	{NAME("deallocatedp"),	CTL(thread_deallocatedp)},
289  	{NAME("tcache"),	CHILD(named, thread_tcache)},
290  	{NAME("prof"),		CHILD(named, thread_prof)}
291  };
292  
293  static const ctl_named_node_t	config_node[] = {
294  	{NAME("cache_oblivious"), CTL(config_cache_oblivious)},
295  	{NAME("debug"),		CTL(config_debug)},
296  	{NAME("fill"),		CTL(config_fill)},
297  	{NAME("lazy_lock"),	CTL(config_lazy_lock)},
298  	{NAME("malloc_conf"),	CTL(config_malloc_conf)},
299  	{NAME("opt_safety_checks"),	CTL(config_opt_safety_checks)},
300  	{NAME("prof"),		CTL(config_prof)},
301  	{NAME("prof_libgcc"),	CTL(config_prof_libgcc)},
302  	{NAME("prof_libunwind"), CTL(config_prof_libunwind)},
303  	{NAME("stats"),		CTL(config_stats)},
304  	{NAME("utrace"),	CTL(config_utrace)},
305  	{NAME("xmalloc"),	CTL(config_xmalloc)}
306  };
307  
308  static const ctl_named_node_t opt_node[] = {
309  	{NAME("abort"),		CTL(opt_abort)},
310  	{NAME("abort_conf"),	CTL(opt_abort_conf)},
311  	{NAME("confirm_conf"),	CTL(opt_confirm_conf)},
312  	{NAME("metadata_thp"),	CTL(opt_metadata_thp)},
313  	{NAME("retain"),	CTL(opt_retain)},
314  	{NAME("dss"),		CTL(opt_dss)},
315  	{NAME("narenas"),	CTL(opt_narenas)},
316  	{NAME("percpu_arena"),	CTL(opt_percpu_arena)},
317  	{NAME("oversize_threshold"),	CTL(opt_oversize_threshold)},
318  	{NAME("background_thread"),	CTL(opt_background_thread)},
319  	{NAME("max_background_threads"),	CTL(opt_max_background_threads)},
320  	{NAME("dirty_decay_ms"), CTL(opt_dirty_decay_ms)},
321  	{NAME("muzzy_decay_ms"), CTL(opt_muzzy_decay_ms)},
322  	{NAME("stats_print"),	CTL(opt_stats_print)},
323  	{NAME("stats_print_opts"),	CTL(opt_stats_print_opts)},
324  	{NAME("junk"),		CTL(opt_junk)},
325  	{NAME("zero"),		CTL(opt_zero)},
326  	{NAME("utrace"),	CTL(opt_utrace)},
327  	{NAME("xmalloc"),	CTL(opt_xmalloc)},
328  	{NAME("tcache"),	CTL(opt_tcache)},
329  	{NAME("thp"),		CTL(opt_thp)},
330  	{NAME("lg_extent_max_active_fit"), CTL(opt_lg_extent_max_active_fit)},
331  	{NAME("lg_tcache_max"),	CTL(opt_lg_tcache_max)},
332  	{NAME("prof"),		CTL(opt_prof)},
333  	{NAME("prof_prefix"),	CTL(opt_prof_prefix)},
334  	{NAME("prof_active"),	CTL(opt_prof_active)},
335  	{NAME("prof_thread_active_init"), CTL(opt_prof_thread_active_init)},
336  	{NAME("lg_prof_sample"), CTL(opt_lg_prof_sample)},
337  	{NAME("lg_prof_interval"), CTL(opt_lg_prof_interval)},
338  	{NAME("prof_gdump"),	CTL(opt_prof_gdump)},
339  	{NAME("prof_final"),	CTL(opt_prof_final)},
340  	{NAME("prof_leak"),	CTL(opt_prof_leak)},
341  	{NAME("prof_accum"),	CTL(opt_prof_accum)}
342  };
343  
344  static const ctl_named_node_t	tcache_node[] = {
345  	{NAME("create"),	CTL(tcache_create)},
346  	{NAME("flush"),		CTL(tcache_flush)},
347  	{NAME("destroy"),	CTL(tcache_destroy)}
348  };
349  
350  static const ctl_named_node_t arena_i_node[] = {
351  	{NAME("initialized"),	CTL(arena_i_initialized)},
352  	{NAME("decay"),		CTL(arena_i_decay)},
353  	{NAME("purge"),		CTL(arena_i_purge)},
354  	{NAME("reset"),		CTL(arena_i_reset)},
355  	{NAME("destroy"),	CTL(arena_i_destroy)},
356  	{NAME("dss"),		CTL(arena_i_dss)},
357  	{NAME("dirty_decay_ms"), CTL(arena_i_dirty_decay_ms)},
358  	{NAME("muzzy_decay_ms"), CTL(arena_i_muzzy_decay_ms)},
359  	{NAME("extent_hooks"),	CTL(arena_i_extent_hooks)},
360  	{NAME("retain_grow_limit"),	CTL(arena_i_retain_grow_limit)}
361  };
362  static const ctl_named_node_t super_arena_i_node[] = {
363  	{NAME(""),		CHILD(named, arena_i)}
364  };
365  
366  static const ctl_indexed_node_t arena_node[] = {
367  	{INDEX(arena_i)}
368  };
369  
370  static const ctl_named_node_t arenas_bin_i_node[] = {
371  	{NAME("size"),		CTL(arenas_bin_i_size)},
372  	{NAME("nregs"),		CTL(arenas_bin_i_nregs)},
373  	{NAME("slab_size"),	CTL(arenas_bin_i_slab_size)},
374  	{NAME("nshards"),	CTL(arenas_bin_i_nshards)}
375  };
376  static const ctl_named_node_t super_arenas_bin_i_node[] = {
377  	{NAME(""),		CHILD(named, arenas_bin_i)}
378  };
379  
380  static const ctl_indexed_node_t arenas_bin_node[] = {
381  	{INDEX(arenas_bin_i)}
382  };
383  
384  static const ctl_named_node_t arenas_lextent_i_node[] = {
385  	{NAME("size"),		CTL(arenas_lextent_i_size)}
386  };
387  static const ctl_named_node_t super_arenas_lextent_i_node[] = {
388  	{NAME(""),		CHILD(named, arenas_lextent_i)}
389  };
390  
391  static const ctl_indexed_node_t arenas_lextent_node[] = {
392  	{INDEX(arenas_lextent_i)}
393  };
394  
395  static const ctl_named_node_t arenas_node[] = {
396  	{NAME("narenas"),	CTL(arenas_narenas)},
397  	{NAME("dirty_decay_ms"), CTL(arenas_dirty_decay_ms)},
398  	{NAME("muzzy_decay_ms"), CTL(arenas_muzzy_decay_ms)},
399  	{NAME("quantum"),	CTL(arenas_quantum)},
400  	{NAME("page"),		CTL(arenas_page)},
401  	{NAME("tcache_max"),	CTL(arenas_tcache_max)},
402  	{NAME("nbins"),		CTL(arenas_nbins)},
403  	{NAME("nhbins"),	CTL(arenas_nhbins)},
404  	{NAME("bin"),		CHILD(indexed, arenas_bin)},
405  	{NAME("nlextents"),	CTL(arenas_nlextents)},
406  	{NAME("lextent"),	CHILD(indexed, arenas_lextent)},
407  	{NAME("create"),	CTL(arenas_create)},
408  	{NAME("lookup"),	CTL(arenas_lookup)}
409  };
410  
411  static const ctl_named_node_t	prof_node[] = {
412  	{NAME("thread_active_init"), CTL(prof_thread_active_init)},
413  	{NAME("active"),	CTL(prof_active)},
414  	{NAME("dump"),		CTL(prof_dump)},
415  	{NAME("gdump"),		CTL(prof_gdump)},
416  	{NAME("reset"),		CTL(prof_reset)},
417  	{NAME("interval"),	CTL(prof_interval)},
418  	{NAME("lg_sample"),	CTL(lg_prof_sample)},
419  	{NAME("log_start"),	CTL(prof_log_start)},
420  	{NAME("log_stop"),	CTL(prof_log_stop)}
421  };
422  static const ctl_named_node_t stats_arenas_i_small_node[] = {
423  	{NAME("allocated"),	CTL(stats_arenas_i_small_allocated)},
424  	{NAME("nmalloc"),	CTL(stats_arenas_i_small_nmalloc)},
425  	{NAME("ndalloc"),	CTL(stats_arenas_i_small_ndalloc)},
426  	{NAME("nrequests"),	CTL(stats_arenas_i_small_nrequests)},
427  	{NAME("nfills"),	CTL(stats_arenas_i_small_nfills)},
428  	{NAME("nflushes"),	CTL(stats_arenas_i_small_nflushes)}
429  };
430  
431  static const ctl_named_node_t stats_arenas_i_large_node[] = {
432  	{NAME("allocated"),	CTL(stats_arenas_i_large_allocated)},
433  	{NAME("nmalloc"),	CTL(stats_arenas_i_large_nmalloc)},
434  	{NAME("ndalloc"),	CTL(stats_arenas_i_large_ndalloc)},
435  	{NAME("nrequests"),	CTL(stats_arenas_i_large_nrequests)},
436  	{NAME("nfills"),	CTL(stats_arenas_i_large_nfills)},
437  	{NAME("nflushes"),	CTL(stats_arenas_i_large_nflushes)}
438  };
439  
440  #define MUTEX_PROF_DATA_NODE(prefix)					\
441  static const ctl_named_node_t stats_##prefix##_node[] = {		\
442  	{NAME("num_ops"),						\
443  	 CTL(stats_##prefix##_num_ops)},				\
444  	{NAME("num_wait"),						\
445  	 CTL(stats_##prefix##_num_wait)},				\
446  	{NAME("num_spin_acq"),						\
447  	 CTL(stats_##prefix##_num_spin_acq)},				\
448  	{NAME("num_owner_switch"),					\
449  	 CTL(stats_##prefix##_num_owner_switch)},			\
450  	{NAME("total_wait_time"),					\
451  	 CTL(stats_##prefix##_total_wait_time)},			\
452  	{NAME("max_wait_time"),						\
453  	 CTL(stats_##prefix##_max_wait_time)},				\
454  	{NAME("max_num_thds"),						\
455  	 CTL(stats_##prefix##_max_num_thds)}				\
456  	/* Note that # of current waiting thread not provided. */	\
457  };
458  
459  MUTEX_PROF_DATA_NODE(arenas_i_bins_j_mutex)
460  
461  static const ctl_named_node_t stats_arenas_i_bins_j_node[] = {
462  	{NAME("nmalloc"),	CTL(stats_arenas_i_bins_j_nmalloc)},
463  	{NAME("ndalloc"),	CTL(stats_arenas_i_bins_j_ndalloc)},
464  	{NAME("nrequests"),	CTL(stats_arenas_i_bins_j_nrequests)},
465  	{NAME("curregs"),	CTL(stats_arenas_i_bins_j_curregs)},
466  	{NAME("nfills"),	CTL(stats_arenas_i_bins_j_nfills)},
467  	{NAME("nflushes"),	CTL(stats_arenas_i_bins_j_nflushes)},
468  	{NAME("nslabs"),	CTL(stats_arenas_i_bins_j_nslabs)},
469  	{NAME("nreslabs"),	CTL(stats_arenas_i_bins_j_nreslabs)},
470  	{NAME("curslabs"),	CTL(stats_arenas_i_bins_j_curslabs)},
471  	{NAME("nonfull_slabs"),	CTL(stats_arenas_i_bins_j_nonfull_slabs)},
472  	{NAME("mutex"),		CHILD(named, stats_arenas_i_bins_j_mutex)}
473  };
474  
475  static const ctl_named_node_t super_stats_arenas_i_bins_j_node[] = {
476  	{NAME(""),		CHILD(named, stats_arenas_i_bins_j)}
477  };
478  
479  static const ctl_indexed_node_t stats_arenas_i_bins_node[] = {
480  	{INDEX(stats_arenas_i_bins_j)}
481  };
482  
483  static const ctl_named_node_t stats_arenas_i_lextents_j_node[] = {
484  	{NAME("nmalloc"),	CTL(stats_arenas_i_lextents_j_nmalloc)},
485  	{NAME("ndalloc"),	CTL(stats_arenas_i_lextents_j_ndalloc)},
486  	{NAME("nrequests"),	CTL(stats_arenas_i_lextents_j_nrequests)},
487  	{NAME("curlextents"),	CTL(stats_arenas_i_lextents_j_curlextents)}
488  };
489  static const ctl_named_node_t super_stats_arenas_i_lextents_j_node[] = {
490  	{NAME(""),		CHILD(named, stats_arenas_i_lextents_j)}
491  };
492  
493  static const ctl_indexed_node_t stats_arenas_i_lextents_node[] = {
494  	{INDEX(stats_arenas_i_lextents_j)}
495  };
496  
497  static const ctl_named_node_t stats_arenas_i_extents_j_node[] = {
498  	{NAME("ndirty"),	CTL(stats_arenas_i_extents_j_ndirty)},
499  	{NAME("nmuzzy"),	CTL(stats_arenas_i_extents_j_nmuzzy)},
500  	{NAME("nretained"),	CTL(stats_arenas_i_extents_j_nretained)},
501  	{NAME("dirty_bytes"),	CTL(stats_arenas_i_extents_j_dirty_bytes)},
502  	{NAME("muzzy_bytes"),	CTL(stats_arenas_i_extents_j_muzzy_bytes)},
503  	{NAME("retained_bytes"), CTL(stats_arenas_i_extents_j_retained_bytes)}
504  };
505  
506  static const ctl_named_node_t super_stats_arenas_i_extents_j_node[] = {
507  	{NAME(""),		CHILD(named, stats_arenas_i_extents_j)}
508  };
509  
510  static const ctl_indexed_node_t stats_arenas_i_extents_node[] = {
511  	{INDEX(stats_arenas_i_extents_j)}
512  };
513  
514  #define OP(mtx)  MUTEX_PROF_DATA_NODE(arenas_i_mutexes_##mtx)
515  MUTEX_PROF_ARENA_MUTEXES
516  #undef OP
517  
518  static const ctl_named_node_t stats_arenas_i_mutexes_node[] = {
519  #define OP(mtx) {NAME(#mtx), CHILD(named, stats_arenas_i_mutexes_##mtx)},
520  MUTEX_PROF_ARENA_MUTEXES
521  #undef OP
522  };
523  
524  static const ctl_named_node_t stats_arenas_i_node[] = {
525  	{NAME("nthreads"),	CTL(stats_arenas_i_nthreads)},
526  	{NAME("uptime"),	CTL(stats_arenas_i_uptime)},
527  	{NAME("dss"),		CTL(stats_arenas_i_dss)},
528  	{NAME("dirty_decay_ms"), CTL(stats_arenas_i_dirty_decay_ms)},
529  	{NAME("muzzy_decay_ms"), CTL(stats_arenas_i_muzzy_decay_ms)},
530  	{NAME("pactive"),	CTL(stats_arenas_i_pactive)},
531  	{NAME("pdirty"),	CTL(stats_arenas_i_pdirty)},
532  	{NAME("pmuzzy"),	CTL(stats_arenas_i_pmuzzy)},
533  	{NAME("mapped"),	CTL(stats_arenas_i_mapped)},
534  	{NAME("retained"),	CTL(stats_arenas_i_retained)},
535  	{NAME("extent_avail"),	CTL(stats_arenas_i_extent_avail)},
536  	{NAME("dirty_npurge"),	CTL(stats_arenas_i_dirty_npurge)},
537  	{NAME("dirty_nmadvise"), CTL(stats_arenas_i_dirty_nmadvise)},
538  	{NAME("dirty_purged"),	CTL(stats_arenas_i_dirty_purged)},
539  	{NAME("muzzy_npurge"),	CTL(stats_arenas_i_muzzy_npurge)},
540  	{NAME("muzzy_nmadvise"), CTL(stats_arenas_i_muzzy_nmadvise)},
541  	{NAME("muzzy_purged"),	CTL(stats_arenas_i_muzzy_purged)},
542  	{NAME("base"),		CTL(stats_arenas_i_base)},
543  	{NAME("internal"),	CTL(stats_arenas_i_internal)},
544  	{NAME("metadata_thp"),	CTL(stats_arenas_i_metadata_thp)},
545  	{NAME("tcache_bytes"),	CTL(stats_arenas_i_tcache_bytes)},
546  	{NAME("resident"),	CTL(stats_arenas_i_resident)},
547  	{NAME("abandoned_vm"),	CTL(stats_arenas_i_abandoned_vm)},
548  	{NAME("small"),		CHILD(named, stats_arenas_i_small)},
549  	{NAME("large"),		CHILD(named, stats_arenas_i_large)},
550  	{NAME("bins"),		CHILD(indexed, stats_arenas_i_bins)},
551  	{NAME("lextents"),	CHILD(indexed, stats_arenas_i_lextents)},
552  	{NAME("extents"),	CHILD(indexed, stats_arenas_i_extents)},
553  	{NAME("mutexes"),	CHILD(named, stats_arenas_i_mutexes)}
554  };
555  static const ctl_named_node_t super_stats_arenas_i_node[] = {
556  	{NAME(""),		CHILD(named, stats_arenas_i)}
557  };
558  
559  static const ctl_indexed_node_t stats_arenas_node[] = {
560  	{INDEX(stats_arenas_i)}
561  };
562  
563  static const ctl_named_node_t stats_background_thread_node[] = {
564  	{NAME("num_threads"),	CTL(stats_background_thread_num_threads)},
565  	{NAME("num_runs"),	CTL(stats_background_thread_num_runs)},
566  	{NAME("run_interval"),	CTL(stats_background_thread_run_interval)}
567  };
568  
569  #define OP(mtx) MUTEX_PROF_DATA_NODE(mutexes_##mtx)
570  MUTEX_PROF_GLOBAL_MUTEXES
571  #undef OP
572  
573  static const ctl_named_node_t stats_mutexes_node[] = {
574  #define OP(mtx) {NAME(#mtx), CHILD(named, stats_mutexes_##mtx)},
575  MUTEX_PROF_GLOBAL_MUTEXES
576  #undef OP
577  	{NAME("reset"),		CTL(stats_mutexes_reset)}
578  };
579  #undef MUTEX_PROF_DATA_NODE
580  
581  static const ctl_named_node_t stats_node[] = {
582  	{NAME("allocated"),	CTL(stats_allocated)},
583  	{NAME("active"),	CTL(stats_active)},
584  	{NAME("metadata"),	CTL(stats_metadata)},
585  	{NAME("metadata_thp"),	CTL(stats_metadata_thp)},
586  	{NAME("resident"),	CTL(stats_resident)},
587  	{NAME("mapped"),	CTL(stats_mapped)},
588  	{NAME("retained"),	CTL(stats_retained)},
589  	{NAME("background_thread"),
590  	 CHILD(named, stats_background_thread)},
591  	{NAME("mutexes"),	CHILD(named, stats_mutexes)},
592  	{NAME("arenas"),	CHILD(indexed, stats_arenas)}
593  };
594  
595  static const ctl_named_node_t experimental_hooks_node[] = {
596  	{NAME("install"),	CTL(experimental_hooks_install)},
597  	{NAME("remove"),	CTL(experimental_hooks_remove)}
598  };
599  
600  static const ctl_named_node_t experimental_utilization_node[] = {
601  	{NAME("query"),		CTL(experimental_utilization_query)},
602  	{NAME("batch_query"),	CTL(experimental_utilization_batch_query)}
603  };
604  
605  static const ctl_named_node_t experimental_arenas_i_node[] = {
606  	{NAME("pactivep"),	CTL(experimental_arenas_i_pactivep)}
607  };
608  static const ctl_named_node_t super_experimental_arenas_i_node[] = {
609  	{NAME(""),		CHILD(named, experimental_arenas_i)}
610  };
611  
612  static const ctl_indexed_node_t experimental_arenas_node[] = {
613  	{INDEX(experimental_arenas_i)}
614  };
615  
616  static const ctl_named_node_t experimental_node[] = {
617  	{NAME("hooks"),		CHILD(named, experimental_hooks)},
618  	{NAME("utilization"),	CHILD(named, experimental_utilization)},
619  	{NAME("arenas"),	CHILD(indexed, experimental_arenas)}
620  };
621  
622  static const ctl_named_node_t	root_node[] = {
623  	{NAME("version"),	CTL(version)},
624  	{NAME("epoch"),		CTL(epoch)},
625  	{NAME("background_thread"),	CTL(background_thread)},
626  	{NAME("max_background_threads"),	CTL(max_background_threads)},
627  	{NAME("thread"),	CHILD(named, thread)},
628  	{NAME("config"),	CHILD(named, config)},
629  	{NAME("opt"),		CHILD(named, opt)},
630  	{NAME("tcache"),	CHILD(named, tcache)},
631  	{NAME("arena"),		CHILD(indexed, arena)},
632  	{NAME("arenas"),	CHILD(named, arenas)},
633  	{NAME("prof"),		CHILD(named, prof)},
634  	{NAME("stats"),		CHILD(named, stats)},
635  	{NAME("experimental"),	CHILD(named, experimental)}
636  };
637  static const ctl_named_node_t super_root_node[] = {
638  	{NAME(""),		CHILD(named, root)}
639  };
640  
641  #undef NAME
642  #undef CHILD
643  #undef CTL
644  #undef INDEX
645  
646  /******************************************************************************/
647  
648  /*
649   * Sets *dst + *src non-atomically.  This is safe, since everything is
650   * synchronized by the ctl mutex.
651   */
652  static void
ctl_accum_arena_stats_u64(arena_stats_u64_t * dst,arena_stats_u64_t * src)653  ctl_accum_arena_stats_u64(arena_stats_u64_t *dst, arena_stats_u64_t *src) {
654  #ifdef JEMALLOC_ATOMIC_U64
655  	uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED);
656  	uint64_t cur_src = atomic_load_u64(src, ATOMIC_RELAXED);
657  	atomic_store_u64(dst, cur_dst + cur_src, ATOMIC_RELAXED);
658  #else
659  	*dst += *src;
660  #endif
661  }
662  
663  /* Likewise: with ctl mutex synchronization, reading is simple. */
664  static uint64_t
ctl_arena_stats_read_u64(arena_stats_u64_t * p)665  ctl_arena_stats_read_u64(arena_stats_u64_t *p) {
666  #ifdef JEMALLOC_ATOMIC_U64
667  	return atomic_load_u64(p, ATOMIC_RELAXED);
668  #else
669  	return *p;
670  #endif
671  }
672  
673  static void
accum_atomic_zu(atomic_zu_t * dst,atomic_zu_t * src)674  accum_atomic_zu(atomic_zu_t *dst, atomic_zu_t *src) {
675  	size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED);
676  	size_t cur_src = atomic_load_zu(src, ATOMIC_RELAXED);
677  	atomic_store_zu(dst, cur_dst + cur_src, ATOMIC_RELAXED);
678  }
679  
680  /******************************************************************************/
681  
682  static unsigned
arenas_i2a_impl(size_t i,bool compat,bool validate)683  arenas_i2a_impl(size_t i, bool compat, bool validate) {
684  	unsigned a;
685  
686  	switch (i) {
687  	case MALLCTL_ARENAS_ALL:
688  		a = 0;
689  		break;
690  	case MALLCTL_ARENAS_DESTROYED:
691  		a = 1;
692  		break;
693  	default:
694  		if (compat && i == ctl_arenas->narenas) {
695  			/*
696  			 * Provide deprecated backward compatibility for
697  			 * accessing the merged stats at index narenas rather
698  			 * than via MALLCTL_ARENAS_ALL.  This is scheduled for
699  			 * removal in 6.0.0.
700  			 */
701  			a = 0;
702  		} else if (validate && i >= ctl_arenas->narenas) {
703  			a = UINT_MAX;
704  		} else {
705  			/*
706  			 * This function should never be called for an index
707  			 * more than one past the range of indices that have
708  			 * initialized ctl data.
709  			 */
710  			assert(i < ctl_arenas->narenas || (!validate && i ==
711  			    ctl_arenas->narenas));
712  			a = (unsigned)i + 2;
713  		}
714  		break;
715  	}
716  
717  	return a;
718  }
719  
720  static unsigned
arenas_i2a(size_t i)721  arenas_i2a(size_t i) {
722  	return arenas_i2a_impl(i, true, false);
723  }
724  
725  static ctl_arena_t *
arenas_i_impl(tsd_t * tsd,size_t i,bool compat,bool init)726  arenas_i_impl(tsd_t *tsd, size_t i, bool compat, bool init) {
727  	ctl_arena_t *ret;
728  
729  	assert(!compat || !init);
730  
731  	ret = ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)];
732  	if (init && ret == NULL) {
733  		if (config_stats) {
734  			struct container_s {
735  				ctl_arena_t		ctl_arena;
736  				ctl_arena_stats_t	astats;
737  			};
738  			struct container_s *cont =
739  			    (struct container_s *)base_alloc(tsd_tsdn(tsd),
740  			    b0get(), sizeof(struct container_s), QUANTUM);
741  			if (cont == NULL) {
742  				return NULL;
743  			}
744  			ret = &cont->ctl_arena;
745  			ret->astats = &cont->astats;
746  		} else {
747  			ret = (ctl_arena_t *)base_alloc(tsd_tsdn(tsd), b0get(),
748  			    sizeof(ctl_arena_t), QUANTUM);
749  			if (ret == NULL) {
750  				return NULL;
751  			}
752  		}
753  		ret->arena_ind = (unsigned)i;
754  		ctl_arenas->arenas[arenas_i2a_impl(i, compat, false)] = ret;
755  	}
756  
757  	assert(ret == NULL || arenas_i2a(ret->arena_ind) == arenas_i2a(i));
758  	return ret;
759  }
760  
761  static ctl_arena_t *
arenas_i(size_t i)762  arenas_i(size_t i) {
763  	ctl_arena_t *ret = arenas_i_impl(tsd_fetch(), i, true, false);
764  	assert(ret != NULL);
765  	return ret;
766  }
767  
768  static void
ctl_arena_clear(ctl_arena_t * ctl_arena)769  ctl_arena_clear(ctl_arena_t *ctl_arena) {
770  	ctl_arena->nthreads = 0;
771  	ctl_arena->dss = dss_prec_names[dss_prec_limit];
772  	ctl_arena->dirty_decay_ms = -1;
773  	ctl_arena->muzzy_decay_ms = -1;
774  	ctl_arena->pactive = 0;
775  	ctl_arena->pdirty = 0;
776  	ctl_arena->pmuzzy = 0;
777  	if (config_stats) {
778  		memset(&ctl_arena->astats->astats, 0, sizeof(arena_stats_t));
779  		ctl_arena->astats->allocated_small = 0;
780  		ctl_arena->astats->nmalloc_small = 0;
781  		ctl_arena->astats->ndalloc_small = 0;
782  		ctl_arena->astats->nrequests_small = 0;
783  		ctl_arena->astats->nfills_small = 0;
784  		ctl_arena->astats->nflushes_small = 0;
785  		memset(ctl_arena->astats->bstats, 0, SC_NBINS *
786  		    sizeof(bin_stats_t));
787  		memset(ctl_arena->astats->lstats, 0, (SC_NSIZES - SC_NBINS) *
788  		    sizeof(arena_stats_large_t));
789  		memset(ctl_arena->astats->estats, 0, SC_NPSIZES *
790  		    sizeof(arena_stats_extents_t));
791  	}
792  }
793  
794  static void
ctl_arena_stats_amerge(tsdn_t * tsdn,ctl_arena_t * ctl_arena,arena_t * arena)795  ctl_arena_stats_amerge(tsdn_t *tsdn, ctl_arena_t *ctl_arena, arena_t *arena) {
796  	unsigned i;
797  
798  	if (config_stats) {
799  		arena_stats_merge(tsdn, arena, &ctl_arena->nthreads,
800  		    &ctl_arena->dss, &ctl_arena->dirty_decay_ms,
801  		    &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive,
802  		    &ctl_arena->pdirty, &ctl_arena->pmuzzy,
803  		    &ctl_arena->astats->astats, ctl_arena->astats->bstats,
804  		    ctl_arena->astats->lstats, ctl_arena->astats->estats);
805  
806  		for (i = 0; i < SC_NBINS; i++) {
807  			ctl_arena->astats->allocated_small +=
808  			    ctl_arena->astats->bstats[i].curregs *
809  			    sz_index2size(i);
810  			ctl_arena->astats->nmalloc_small +=
811  			    ctl_arena->astats->bstats[i].nmalloc;
812  			ctl_arena->astats->ndalloc_small +=
813  			    ctl_arena->astats->bstats[i].ndalloc;
814  			ctl_arena->astats->nrequests_small +=
815  			    ctl_arena->astats->bstats[i].nrequests;
816  			ctl_arena->astats->nfills_small +=
817  			    ctl_arena->astats->bstats[i].nfills;
818  			ctl_arena->astats->nflushes_small +=
819  			    ctl_arena->astats->bstats[i].nflushes;
820  		}
821  	} else {
822  		arena_basic_stats_merge(tsdn, arena, &ctl_arena->nthreads,
823  		    &ctl_arena->dss, &ctl_arena->dirty_decay_ms,
824  		    &ctl_arena->muzzy_decay_ms, &ctl_arena->pactive,
825  		    &ctl_arena->pdirty, &ctl_arena->pmuzzy);
826  	}
827  }
828  
829  static void
ctl_arena_stats_sdmerge(ctl_arena_t * ctl_sdarena,ctl_arena_t * ctl_arena,bool destroyed)830  ctl_arena_stats_sdmerge(ctl_arena_t *ctl_sdarena, ctl_arena_t *ctl_arena,
831      bool destroyed) {
832  	unsigned i;
833  
834  	if (!destroyed) {
835  		ctl_sdarena->nthreads += ctl_arena->nthreads;
836  		ctl_sdarena->pactive += ctl_arena->pactive;
837  		ctl_sdarena->pdirty += ctl_arena->pdirty;
838  		ctl_sdarena->pmuzzy += ctl_arena->pmuzzy;
839  	} else {
840  		assert(ctl_arena->nthreads == 0);
841  		assert(ctl_arena->pactive == 0);
842  		assert(ctl_arena->pdirty == 0);
843  		assert(ctl_arena->pmuzzy == 0);
844  	}
845  
846  	if (config_stats) {
847  		ctl_arena_stats_t *sdstats = ctl_sdarena->astats;
848  		ctl_arena_stats_t *astats = ctl_arena->astats;
849  
850  		if (!destroyed) {
851  			accum_atomic_zu(&sdstats->astats.mapped,
852  			    &astats->astats.mapped);
853  			accum_atomic_zu(&sdstats->astats.retained,
854  			    &astats->astats.retained);
855  			accum_atomic_zu(&sdstats->astats.extent_avail,
856  			    &astats->astats.extent_avail);
857  		}
858  
859  		ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.npurge,
860  		    &astats->astats.decay_dirty.npurge);
861  		ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.nmadvise,
862  		    &astats->astats.decay_dirty.nmadvise);
863  		ctl_accum_arena_stats_u64(&sdstats->astats.decay_dirty.purged,
864  		    &astats->astats.decay_dirty.purged);
865  
866  		ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.npurge,
867  		    &astats->astats.decay_muzzy.npurge);
868  		ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.nmadvise,
869  		    &astats->astats.decay_muzzy.nmadvise);
870  		ctl_accum_arena_stats_u64(&sdstats->astats.decay_muzzy.purged,
871  		    &astats->astats.decay_muzzy.purged);
872  
873  #define OP(mtx) malloc_mutex_prof_merge(				\
874  		    &(sdstats->astats.mutex_prof_data[			\
875  		        arena_prof_mutex_##mtx]),			\
876  		    &(astats->astats.mutex_prof_data[			\
877  		        arena_prof_mutex_##mtx]));
878  MUTEX_PROF_ARENA_MUTEXES
879  #undef OP
880  		if (!destroyed) {
881  			accum_atomic_zu(&sdstats->astats.base,
882  			    &astats->astats.base);
883  			accum_atomic_zu(&sdstats->astats.internal,
884  			    &astats->astats.internal);
885  			accum_atomic_zu(&sdstats->astats.resident,
886  			    &astats->astats.resident);
887  			accum_atomic_zu(&sdstats->astats.metadata_thp,
888  			    &astats->astats.metadata_thp);
889  		} else {
890  			assert(atomic_load_zu(
891  			    &astats->astats.internal, ATOMIC_RELAXED) == 0);
892  		}
893  
894  		if (!destroyed) {
895  			sdstats->allocated_small += astats->allocated_small;
896  		} else {
897  			assert(astats->allocated_small == 0);
898  		}
899  		sdstats->nmalloc_small += astats->nmalloc_small;
900  		sdstats->ndalloc_small += astats->ndalloc_small;
901  		sdstats->nrequests_small += astats->nrequests_small;
902  		sdstats->nfills_small += astats->nfills_small;
903  		sdstats->nflushes_small += astats->nflushes_small;
904  
905  		if (!destroyed) {
906  			accum_atomic_zu(&sdstats->astats.allocated_large,
907  			    &astats->astats.allocated_large);
908  		} else {
909  			assert(atomic_load_zu(&astats->astats.allocated_large,
910  			    ATOMIC_RELAXED) == 0);
911  		}
912  		ctl_accum_arena_stats_u64(&sdstats->astats.nmalloc_large,
913  		    &astats->astats.nmalloc_large);
914  		ctl_accum_arena_stats_u64(&sdstats->astats.ndalloc_large,
915  		    &astats->astats.ndalloc_large);
916  		ctl_accum_arena_stats_u64(&sdstats->astats.nrequests_large,
917  		    &astats->astats.nrequests_large);
918  		accum_atomic_zu(&sdstats->astats.abandoned_vm,
919  		    &astats->astats.abandoned_vm);
920  
921  		accum_atomic_zu(&sdstats->astats.tcache_bytes,
922  		    &astats->astats.tcache_bytes);
923  
924  		if (ctl_arena->arena_ind == 0) {
925  			sdstats->astats.uptime = astats->astats.uptime;
926  		}
927  
928  		/* Merge bin stats. */
929  		for (i = 0; i < SC_NBINS; i++) {
930  			sdstats->bstats[i].nmalloc += astats->bstats[i].nmalloc;
931  			sdstats->bstats[i].ndalloc += astats->bstats[i].ndalloc;
932  			sdstats->bstats[i].nrequests +=
933  			    astats->bstats[i].nrequests;
934  			if (!destroyed) {
935  				sdstats->bstats[i].curregs +=
936  				    astats->bstats[i].curregs;
937  			} else {
938  				assert(astats->bstats[i].curregs == 0);
939  			}
940  			sdstats->bstats[i].nfills += astats->bstats[i].nfills;
941  			sdstats->bstats[i].nflushes +=
942  			    astats->bstats[i].nflushes;
943  			sdstats->bstats[i].nslabs += astats->bstats[i].nslabs;
944  			sdstats->bstats[i].reslabs += astats->bstats[i].reslabs;
945  			if (!destroyed) {
946  				sdstats->bstats[i].curslabs +=
947  				    astats->bstats[i].curslabs;
948  				sdstats->bstats[i].nonfull_slabs +=
949  				    astats->bstats[i].nonfull_slabs;
950  			} else {
951  				assert(astats->bstats[i].curslabs == 0);
952  				assert(astats->bstats[i].nonfull_slabs == 0);
953  			}
954  			malloc_mutex_prof_merge(&sdstats->bstats[i].mutex_data,
955  			    &astats->bstats[i].mutex_data);
956  		}
957  
958  		/* Merge stats for large allocations. */
959  		for (i = 0; i < SC_NSIZES - SC_NBINS; i++) {
960  			ctl_accum_arena_stats_u64(&sdstats->lstats[i].nmalloc,
961  			    &astats->lstats[i].nmalloc);
962  			ctl_accum_arena_stats_u64(&sdstats->lstats[i].ndalloc,
963  			    &astats->lstats[i].ndalloc);
964  			ctl_accum_arena_stats_u64(&sdstats->lstats[i].nrequests,
965  			    &astats->lstats[i].nrequests);
966  			if (!destroyed) {
967  				sdstats->lstats[i].curlextents +=
968  				    astats->lstats[i].curlextents;
969  			} else {
970  				assert(astats->lstats[i].curlextents == 0);
971  			}
972  		}
973  
974  		/* Merge extents stats. */
975  		for (i = 0; i < SC_NPSIZES; i++) {
976  			accum_atomic_zu(&sdstats->estats[i].ndirty,
977  			    &astats->estats[i].ndirty);
978  			accum_atomic_zu(&sdstats->estats[i].nmuzzy,
979  			    &astats->estats[i].nmuzzy);
980  			accum_atomic_zu(&sdstats->estats[i].nretained,
981  			    &astats->estats[i].nretained);
982  			accum_atomic_zu(&sdstats->estats[i].dirty_bytes,
983  			    &astats->estats[i].dirty_bytes);
984  			accum_atomic_zu(&sdstats->estats[i].muzzy_bytes,
985  			    &astats->estats[i].muzzy_bytes);
986  			accum_atomic_zu(&sdstats->estats[i].retained_bytes,
987  			    &astats->estats[i].retained_bytes);
988  		}
989  	}
990  }
991  
992  static void
ctl_arena_refresh(tsdn_t * tsdn,arena_t * arena,ctl_arena_t * ctl_sdarena,unsigned i,bool destroyed)993  ctl_arena_refresh(tsdn_t *tsdn, arena_t *arena, ctl_arena_t *ctl_sdarena,
994      unsigned i, bool destroyed) {
995  	ctl_arena_t *ctl_arena = arenas_i(i);
996  
997  	ctl_arena_clear(ctl_arena);
998  	ctl_arena_stats_amerge(tsdn, ctl_arena, arena);
999  	/* Merge into sum stats as well. */
1000  	ctl_arena_stats_sdmerge(ctl_sdarena, ctl_arena, destroyed);
1001  }
1002  
1003  static unsigned
ctl_arena_init(tsd_t * tsd,extent_hooks_t * extent_hooks)1004  ctl_arena_init(tsd_t *tsd, extent_hooks_t *extent_hooks) {
1005  	unsigned arena_ind;
1006  	ctl_arena_t *ctl_arena;
1007  
1008  	if ((ctl_arena = ql_last(&ctl_arenas->destroyed, destroyed_link)) !=
1009  	    NULL) {
1010  		ql_remove(&ctl_arenas->destroyed, ctl_arena, destroyed_link);
1011  		arena_ind = ctl_arena->arena_ind;
1012  	} else {
1013  		arena_ind = ctl_arenas->narenas;
1014  	}
1015  
1016  	/* Trigger stats allocation. */
1017  	if (arenas_i_impl(tsd, arena_ind, false, true) == NULL) {
1018  		return UINT_MAX;
1019  	}
1020  
1021  	/* Initialize new arena. */
1022  	if (arena_init(tsd_tsdn(tsd), arena_ind, extent_hooks) == NULL) {
1023  		return UINT_MAX;
1024  	}
1025  
1026  	if (arena_ind == ctl_arenas->narenas) {
1027  		ctl_arenas->narenas++;
1028  	}
1029  
1030  	return arena_ind;
1031  }
1032  
1033  static void
ctl_background_thread_stats_read(tsdn_t * tsdn)1034  ctl_background_thread_stats_read(tsdn_t *tsdn) {
1035  	background_thread_stats_t *stats = &ctl_stats->background_thread;
1036  	if (!have_background_thread ||
1037  	    background_thread_stats_read(tsdn, stats)) {
1038  		memset(stats, 0, sizeof(background_thread_stats_t));
1039  		nstime_init(&stats->run_interval, 0);
1040  	}
1041  }
1042  
1043  static void
ctl_refresh(tsdn_t * tsdn)1044  ctl_refresh(tsdn_t *tsdn) {
1045  	unsigned i;
1046  	ctl_arena_t *ctl_sarena = arenas_i(MALLCTL_ARENAS_ALL);
1047  	VARIABLE_ARRAY(arena_t *, tarenas, ctl_arenas->narenas);
1048  
1049  	/*
1050  	 * Clear sum stats, since they will be merged into by
1051  	 * ctl_arena_refresh().
1052  	 */
1053  	ctl_arena_clear(ctl_sarena);
1054  
1055  	for (i = 0; i < ctl_arenas->narenas; i++) {
1056  		tarenas[i] = arena_get(tsdn, i, false);
1057  	}
1058  
1059  	for (i = 0; i < ctl_arenas->narenas; i++) {
1060  		ctl_arena_t *ctl_arena = arenas_i(i);
1061  		bool initialized = (tarenas[i] != NULL);
1062  
1063  		ctl_arena->initialized = initialized;
1064  		if (initialized) {
1065  			ctl_arena_refresh(tsdn, tarenas[i], ctl_sarena, i,
1066  			    false);
1067  		}
1068  	}
1069  
1070  	if (config_stats) {
1071  		ctl_stats->allocated = ctl_sarena->astats->allocated_small +
1072  		    atomic_load_zu(&ctl_sarena->astats->astats.allocated_large,
1073  			ATOMIC_RELAXED);
1074  		ctl_stats->active = (ctl_sarena->pactive << LG_PAGE);
1075  		ctl_stats->metadata = atomic_load_zu(
1076  		    &ctl_sarena->astats->astats.base, ATOMIC_RELAXED) +
1077  		    atomic_load_zu(&ctl_sarena->astats->astats.internal,
1078  			ATOMIC_RELAXED);
1079  		ctl_stats->metadata_thp = atomic_load_zu(
1080  		    &ctl_sarena->astats->astats.metadata_thp, ATOMIC_RELAXED);
1081  		ctl_stats->resident = atomic_load_zu(
1082  		    &ctl_sarena->astats->astats.resident, ATOMIC_RELAXED);
1083  		ctl_stats->mapped = atomic_load_zu(
1084  		    &ctl_sarena->astats->astats.mapped, ATOMIC_RELAXED);
1085  		ctl_stats->retained = atomic_load_zu(
1086  		    &ctl_sarena->astats->astats.retained, ATOMIC_RELAXED);
1087  
1088  		ctl_background_thread_stats_read(tsdn);
1089  
1090  #define READ_GLOBAL_MUTEX_PROF_DATA(i, mtx)				\
1091      malloc_mutex_lock(tsdn, &mtx);					\
1092      malloc_mutex_prof_read(tsdn, &ctl_stats->mutex_prof_data[i], &mtx);	\
1093      malloc_mutex_unlock(tsdn, &mtx);
1094  
1095  		if (config_prof && opt_prof) {
1096  			READ_GLOBAL_MUTEX_PROF_DATA(global_prof_mutex_prof,
1097  			    bt2gctx_mtx);
1098  		}
1099  		if (have_background_thread) {
1100  			READ_GLOBAL_MUTEX_PROF_DATA(
1101  			    global_prof_mutex_background_thread,
1102  			    background_thread_lock);
1103  		} else {
1104  			memset(&ctl_stats->mutex_prof_data[
1105  			    global_prof_mutex_background_thread], 0,
1106  			    sizeof(mutex_prof_data_t));
1107  		}
1108  		/* We own ctl mutex already. */
1109  		malloc_mutex_prof_read(tsdn,
1110  		    &ctl_stats->mutex_prof_data[global_prof_mutex_ctl],
1111  		    &ctl_mtx);
1112  #undef READ_GLOBAL_MUTEX_PROF_DATA
1113  	}
1114  	ctl_arenas->epoch++;
1115  }
1116  
1117  static bool
ctl_init(tsd_t * tsd)1118  ctl_init(tsd_t *tsd) {
1119  	bool ret;
1120  	tsdn_t *tsdn = tsd_tsdn(tsd);
1121  
1122  	malloc_mutex_lock(tsdn, &ctl_mtx);
1123  	if (!ctl_initialized) {
1124  		ctl_arena_t *ctl_sarena, *ctl_darena;
1125  		unsigned i;
1126  
1127  		/*
1128  		 * Allocate demand-zeroed space for pointers to the full
1129  		 * range of supported arena indices.
1130  		 */
1131  		if (ctl_arenas == NULL) {
1132  			ctl_arenas = (ctl_arenas_t *)base_alloc(tsdn,
1133  			    b0get(), sizeof(ctl_arenas_t), QUANTUM);
1134  			if (ctl_arenas == NULL) {
1135  				ret = true;
1136  				goto label_return;
1137  			}
1138  		}
1139  
1140  		if (config_stats && ctl_stats == NULL) {
1141  			ctl_stats = (ctl_stats_t *)base_alloc(tsdn, b0get(),
1142  			    sizeof(ctl_stats_t), QUANTUM);
1143  			if (ctl_stats == NULL) {
1144  				ret = true;
1145  				goto label_return;
1146  			}
1147  		}
1148  
1149  		/*
1150  		 * Allocate space for the current full range of arenas
1151  		 * here rather than doing it lazily elsewhere, in order
1152  		 * to limit when OOM-caused errors can occur.
1153  		 */
1154  		if ((ctl_sarena = arenas_i_impl(tsd, MALLCTL_ARENAS_ALL, false,
1155  		    true)) == NULL) {
1156  			ret = true;
1157  			goto label_return;
1158  		}
1159  		ctl_sarena->initialized = true;
1160  
1161  		if ((ctl_darena = arenas_i_impl(tsd, MALLCTL_ARENAS_DESTROYED,
1162  		    false, true)) == NULL) {
1163  			ret = true;
1164  			goto label_return;
1165  		}
1166  		ctl_arena_clear(ctl_darena);
1167  		/*
1168  		 * Don't toggle ctl_darena to initialized until an arena is
1169  		 * actually destroyed, so that arena.<i>.initialized can be used
1170  		 * to query whether the stats are relevant.
1171  		 */
1172  
1173  		ctl_arenas->narenas = narenas_total_get();
1174  		for (i = 0; i < ctl_arenas->narenas; i++) {
1175  			if (arenas_i_impl(tsd, i, false, true) == NULL) {
1176  				ret = true;
1177  				goto label_return;
1178  			}
1179  		}
1180  
1181  		ql_new(&ctl_arenas->destroyed);
1182  		ctl_refresh(tsdn);
1183  
1184  		ctl_initialized = true;
1185  	}
1186  
1187  	ret = false;
1188  label_return:
1189  	malloc_mutex_unlock(tsdn, &ctl_mtx);
1190  	return ret;
1191  }
1192  
1193  static int
ctl_lookup(tsdn_t * tsdn,const char * name,ctl_node_t const ** nodesp,size_t * mibp,size_t * depthp)1194  ctl_lookup(tsdn_t *tsdn, const char *name, ctl_node_t const **nodesp,
1195      size_t *mibp, size_t *depthp) {
1196  	int ret;
1197  	const char *elm, *tdot, *dot;
1198  	size_t elen, i, j;
1199  	const ctl_named_node_t *node;
1200  
1201  	elm = name;
1202  	/* Equivalent to strchrnul(). */
1203  	dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot : strchr(elm, '\0');
1204  	elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
1205  	if (elen == 0) {
1206  		ret = ENOENT;
1207  		goto label_return;
1208  	}
1209  	node = super_root_node;
1210  	for (i = 0; i < *depthp; i++) {
1211  		assert(node);
1212  		assert(node->nchildren > 0);
1213  		if (ctl_named_node(node->children) != NULL) {
1214  			const ctl_named_node_t *pnode = node;
1215  
1216  			/* Children are named. */
1217  			for (j = 0; j < node->nchildren; j++) {
1218  				const ctl_named_node_t *child =
1219  				    ctl_named_children(node, j);
1220  				if (strlen(child->name) == elen &&
1221  				    strncmp(elm, child->name, elen) == 0) {
1222  					node = child;
1223  					if (nodesp != NULL) {
1224  						nodesp[i] =
1225  						    (const ctl_node_t *)node;
1226  					}
1227  					mibp[i] = j;
1228  					break;
1229  				}
1230  			}
1231  			if (node == pnode) {
1232  				ret = ENOENT;
1233  				goto label_return;
1234  			}
1235  		} else {
1236  			uintmax_t index;
1237  			const ctl_indexed_node_t *inode;
1238  
1239  			/* Children are indexed. */
1240  			index = malloc_strtoumax(elm, NULL, 10);
1241  			if (index == UINTMAX_MAX || index > SIZE_T_MAX) {
1242  				ret = ENOENT;
1243  				goto label_return;
1244  			}
1245  
1246  			inode = ctl_indexed_node(node->children);
1247  			node = inode->index(tsdn, mibp, *depthp, (size_t)index);
1248  			if (node == NULL) {
1249  				ret = ENOENT;
1250  				goto label_return;
1251  			}
1252  
1253  			if (nodesp != NULL) {
1254  				nodesp[i] = (const ctl_node_t *)node;
1255  			}
1256  			mibp[i] = (size_t)index;
1257  		}
1258  
1259  		if (node->ctl != NULL) {
1260  			/* Terminal node. */
1261  			if (*dot != '\0') {
1262  				/*
1263  				 * The name contains more elements than are
1264  				 * in this path through the tree.
1265  				 */
1266  				ret = ENOENT;
1267  				goto label_return;
1268  			}
1269  			/* Complete lookup successful. */
1270  			*depthp = i + 1;
1271  			break;
1272  		}
1273  
1274  		/* Update elm. */
1275  		if (*dot == '\0') {
1276  			/* No more elements. */
1277  			ret = ENOENT;
1278  			goto label_return;
1279  		}
1280  		elm = &dot[1];
1281  		dot = ((tdot = strchr(elm, '.')) != NULL) ? tdot :
1282  		    strchr(elm, '\0');
1283  		elen = (size_t)((uintptr_t)dot - (uintptr_t)elm);
1284  	}
1285  
1286  	ret = 0;
1287  label_return:
1288  	return ret;
1289  }
1290  
1291  int
ctl_byname(tsd_t * tsd,const char * name,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1292  ctl_byname(tsd_t *tsd, const char *name, void *oldp, size_t *oldlenp,
1293      void *newp, size_t newlen) {
1294  	int ret;
1295  	size_t depth;
1296  	ctl_node_t const *nodes[CTL_MAX_DEPTH];
1297  	size_t mib[CTL_MAX_DEPTH];
1298  	const ctl_named_node_t *node;
1299  
1300  	if (!ctl_initialized && ctl_init(tsd)) {
1301  		ret = EAGAIN;
1302  		goto label_return;
1303  	}
1304  
1305  	depth = CTL_MAX_DEPTH;
1306  	ret = ctl_lookup(tsd_tsdn(tsd), name, nodes, mib, &depth);
1307  	if (ret != 0) {
1308  		goto label_return;
1309  	}
1310  
1311  	node = ctl_named_node(nodes[depth-1]);
1312  	if (node != NULL && node->ctl) {
1313  		ret = node->ctl(tsd, mib, depth, oldp, oldlenp, newp, newlen);
1314  	} else {
1315  		/* The name refers to a partial path through the ctl tree. */
1316  		ret = ENOENT;
1317  	}
1318  
1319  label_return:
1320  	return(ret);
1321  }
1322  
1323  int
ctl_nametomib(tsd_t * tsd,const char * name,size_t * mibp,size_t * miblenp)1324  ctl_nametomib(tsd_t *tsd, const char *name, size_t *mibp, size_t *miblenp) {
1325  	int ret;
1326  
1327  	if (!ctl_initialized && ctl_init(tsd)) {
1328  		ret = EAGAIN;
1329  		goto label_return;
1330  	}
1331  
1332  	ret = ctl_lookup(tsd_tsdn(tsd), name, NULL, mibp, miblenp);
1333  label_return:
1334  	return(ret);
1335  }
1336  
1337  int
ctl_bymib(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1338  ctl_bymib(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
1339      size_t *oldlenp, void *newp, size_t newlen) {
1340  	int ret;
1341  	const ctl_named_node_t *node;
1342  	size_t i;
1343  
1344  	if (!ctl_initialized && ctl_init(tsd)) {
1345  		ret = EAGAIN;
1346  		goto label_return;
1347  	}
1348  
1349  	/* Iterate down the tree. */
1350  	node = super_root_node;
1351  	for (i = 0; i < miblen; i++) {
1352  		assert(node);
1353  		assert(node->nchildren > 0);
1354  		if (ctl_named_node(node->children) != NULL) {
1355  			/* Children are named. */
1356  			if (node->nchildren <= mib[i]) {
1357  				ret = ENOENT;
1358  				goto label_return;
1359  			}
1360  			node = ctl_named_children(node, mib[i]);
1361  		} else {
1362  			const ctl_indexed_node_t *inode;
1363  
1364  			/* Indexed element. */
1365  			inode = ctl_indexed_node(node->children);
1366  			node = inode->index(tsd_tsdn(tsd), mib, miblen, mib[i]);
1367  			if (node == NULL) {
1368  				ret = ENOENT;
1369  				goto label_return;
1370  			}
1371  		}
1372  	}
1373  
1374  	/* Call the ctl function. */
1375  	if (node && node->ctl) {
1376  		ret = node->ctl(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
1377  	} else {
1378  		/* Partial MIB. */
1379  		ret = ENOENT;
1380  	}
1381  
1382  label_return:
1383  	return(ret);
1384  }
1385  
1386  bool
ctl_boot(void)1387  ctl_boot(void) {
1388  	if (malloc_mutex_init(&ctl_mtx, "ctl", WITNESS_RANK_CTL,
1389  	    malloc_mutex_rank_exclusive)) {
1390  		return true;
1391  	}
1392  
1393  	ctl_initialized = false;
1394  
1395  	return false;
1396  }
1397  
1398  void
ctl_prefork(tsdn_t * tsdn)1399  ctl_prefork(tsdn_t *tsdn) {
1400  	malloc_mutex_prefork(tsdn, &ctl_mtx);
1401  }
1402  
1403  void
ctl_postfork_parent(tsdn_t * tsdn)1404  ctl_postfork_parent(tsdn_t *tsdn) {
1405  	malloc_mutex_postfork_parent(tsdn, &ctl_mtx);
1406  }
1407  
1408  void
ctl_postfork_child(tsdn_t * tsdn)1409  ctl_postfork_child(tsdn_t *tsdn) {
1410  	malloc_mutex_postfork_child(tsdn, &ctl_mtx);
1411  }
1412  
1413  /******************************************************************************/
1414  /* *_ctl() functions. */
1415  
1416  #define READONLY()	do {						\
1417  	if (newp != NULL || newlen != 0) {				\
1418  		ret = EPERM;						\
1419  		goto label_return;					\
1420  	}								\
1421  } while (0)
1422  
1423  #define WRITEONLY()	do {						\
1424  	if (oldp != NULL || oldlenp != NULL) {				\
1425  		ret = EPERM;						\
1426  		goto label_return;					\
1427  	}								\
1428  } while (0)
1429  
1430  #define READ_XOR_WRITE()	do {					\
1431  	if ((oldp != NULL && oldlenp != NULL) && (newp != NULL ||	\
1432  	    newlen != 0)) {						\
1433  		ret = EPERM;						\
1434  		goto label_return;					\
1435  	}								\
1436  } while (0)
1437  
1438  #define READ(v, t)	do {						\
1439  	if (oldp != NULL && oldlenp != NULL) {				\
1440  		if (*oldlenp != sizeof(t)) {				\
1441  			size_t	copylen = (sizeof(t) <= *oldlenp)	\
1442  			    ? sizeof(t) : *oldlenp;			\
1443  			memcpy(oldp, (void *)&(v), copylen);		\
1444  			ret = EINVAL;					\
1445  			goto label_return;				\
1446  		}							\
1447  		*(t *)oldp = (v);					\
1448  	}								\
1449  } while (0)
1450  
1451  #define WRITE(v, t)	do {						\
1452  	if (newp != NULL) {						\
1453  		if (newlen != sizeof(t)) {				\
1454  			ret = EINVAL;					\
1455  			goto label_return;				\
1456  		}							\
1457  		(v) = *(t *)newp;					\
1458  	}								\
1459  } while (0)
1460  
1461  #define MIB_UNSIGNED(v, i) do {						\
1462  	if (mib[i] > UINT_MAX) {					\
1463  		ret = EFAULT;						\
1464  		goto label_return;					\
1465  	}								\
1466  	v = (unsigned)mib[i];						\
1467  } while (0)
1468  
1469  /*
1470   * There's a lot of code duplication in the following macros due to limitations
1471   * in how nested cpp macros are expanded.
1472   */
1473  #define CTL_RO_CLGEN(c, l, n, v, t)					\
1474  static int								\
1475  n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,	\
1476      size_t *oldlenp, void *newp, size_t newlen) {			\
1477  	int ret;							\
1478  	t oldval;							\
1479  									\
1480  	if (!(c)) {							\
1481  		return ENOENT;						\
1482  	}								\
1483  	if (l) {							\
1484  		malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);		\
1485  	}								\
1486  	READONLY();							\
1487  	oldval = (v);							\
1488  	READ(oldval, t);						\
1489  									\
1490  	ret = 0;							\
1491  label_return:								\
1492  	if (l) {							\
1493  		malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);		\
1494  	}								\
1495  	return ret;							\
1496  }
1497  
1498  #define CTL_RO_CGEN(c, n, v, t)						\
1499  static int								\
1500  n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
1501      void *oldp, size_t *oldlenp, void *newp, size_t newlen) {			\
1502  	int ret;							\
1503  	t oldval;							\
1504  									\
1505  	if (!(c)) {							\
1506  		return ENOENT;						\
1507  	}								\
1508  	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);			\
1509  	READONLY();							\
1510  	oldval = (v);							\
1511  	READ(oldval, t);						\
1512  									\
1513  	ret = 0;							\
1514  label_return:								\
1515  	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);			\
1516  	return ret;							\
1517  }
1518  
1519  #define CTL_RO_GEN(n, v, t)						\
1520  static int								\
1521  n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,	\
1522      size_t *oldlenp, void *newp, size_t newlen) {			\
1523  	int ret;							\
1524  	t oldval;							\
1525  									\
1526  	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);			\
1527  	READONLY();							\
1528  	oldval = (v);							\
1529  	READ(oldval, t);						\
1530  									\
1531  	ret = 0;							\
1532  label_return:								\
1533  	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);			\
1534  	return ret;							\
1535  }
1536  
1537  /*
1538   * ctl_mtx is not acquired, under the assumption that no pertinent data will
1539   * mutate during the call.
1540   */
1541  #define CTL_RO_NL_CGEN(c, n, v, t)					\
1542  static int								\
1543  n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
1544      void *oldp, size_t *oldlenp, void *newp, size_t newlen) {			\
1545  	int ret;							\
1546  	t oldval;							\
1547  									\
1548  	if (!(c)) {							\
1549  		return ENOENT;						\
1550  	}								\
1551  	READONLY();							\
1552  	oldval = (v);							\
1553  	READ(oldval, t);						\
1554  									\
1555  	ret = 0;							\
1556  label_return:								\
1557  	return ret;							\
1558  }
1559  
1560  #define CTL_RO_NL_GEN(n, v, t)						\
1561  static int								\
1562  n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
1563      void *oldp, size_t *oldlenp, void *newp, size_t newlen) {			\
1564  	int ret;							\
1565  	t oldval;							\
1566  									\
1567  	READONLY();							\
1568  	oldval = (v);							\
1569  	READ(oldval, t);						\
1570  									\
1571  	ret = 0;							\
1572  label_return:								\
1573  	return ret;							\
1574  }
1575  
1576  #define CTL_TSD_RO_NL_CGEN(c, n, m, t)					\
1577  static int								\
1578  n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,	\
1579      size_t *oldlenp, void *newp, size_t newlen) {			\
1580  	int ret;							\
1581  	t oldval;							\
1582  									\
1583  	if (!(c)) {							\
1584  		return ENOENT;						\
1585  	}								\
1586  	READONLY();							\
1587  	oldval = (m(tsd));						\
1588  	READ(oldval, t);						\
1589  									\
1590  	ret = 0;							\
1591  label_return:								\
1592  	return ret;							\
1593  }
1594  
1595  #define CTL_RO_CONFIG_GEN(n, t)						\
1596  static int								\
1597  n##_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, \
1598      void *oldp, size_t *oldlenp, void *newp, size_t newlen) {			\
1599  	int ret;							\
1600  	t oldval;							\
1601  									\
1602  	READONLY();							\
1603  	oldval = n;							\
1604  	READ(oldval, t);						\
1605  									\
1606  	ret = 0;							\
1607  label_return:								\
1608  	return ret;							\
1609  }
1610  
1611  /******************************************************************************/
1612  
CTL_RO_NL_GEN(version,JEMALLOC_VERSION,const char *)1613  CTL_RO_NL_GEN(version, JEMALLOC_VERSION, const char *)
1614  
1615  static int
1616  epoch_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1617      void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
1618  	int ret;
1619  	UNUSED uint64_t newval;
1620  
1621  	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
1622  	WRITE(newval, uint64_t);
1623  	if (newp != NULL) {
1624  		ctl_refresh(tsd_tsdn(tsd));
1625  	}
1626  	READ(ctl_arenas->epoch, uint64_t);
1627  
1628  	ret = 0;
1629  label_return:
1630  	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
1631  	return ret;
1632  }
1633  
1634  static int
background_thread_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1635  background_thread_ctl(tsd_t *tsd, const size_t *mib,
1636      size_t miblen, void *oldp, size_t *oldlenp,
1637      void *newp, size_t newlen) {
1638  	int ret;
1639  	bool oldval;
1640  
1641  	if (!have_background_thread) {
1642  		return ENOENT;
1643  	}
1644  	background_thread_ctl_init(tsd_tsdn(tsd));
1645  
1646  	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
1647  	malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
1648  	if (newp == NULL) {
1649  		oldval = background_thread_enabled();
1650  		READ(oldval, bool);
1651  	} else {
1652  		if (newlen != sizeof(bool)) {
1653  			ret = EINVAL;
1654  			goto label_return;
1655  		}
1656  		oldval = background_thread_enabled();
1657  		READ(oldval, bool);
1658  
1659  		bool newval = *(bool *)newp;
1660  		if (newval == oldval) {
1661  			ret = 0;
1662  			goto label_return;
1663  		}
1664  
1665  		background_thread_enabled_set(tsd_tsdn(tsd), newval);
1666  		if (newval) {
1667  			if (background_threads_enable(tsd)) {
1668  				ret = EFAULT;
1669  				goto label_return;
1670  			}
1671  		} else {
1672  			if (background_threads_disable(tsd)) {
1673  				ret = EFAULT;
1674  				goto label_return;
1675  			}
1676  		}
1677  	}
1678  	ret = 0;
1679  label_return:
1680  	malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
1681  	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
1682  
1683  	return ret;
1684  }
1685  
1686  static int
max_background_threads_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1687  max_background_threads_ctl(tsd_t *tsd, const size_t *mib,
1688      size_t miblen, void *oldp, size_t *oldlenp, void *newp,
1689      size_t newlen) {
1690  	int ret;
1691  	size_t oldval;
1692  
1693  	if (!have_background_thread) {
1694  		return ENOENT;
1695  	}
1696  	background_thread_ctl_init(tsd_tsdn(tsd));
1697  
1698  	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
1699  	malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
1700  	if (newp == NULL) {
1701  		oldval = max_background_threads;
1702  		READ(oldval, size_t);
1703  	} else {
1704  		if (newlen != sizeof(size_t)) {
1705  			ret = EINVAL;
1706  			goto label_return;
1707  		}
1708  		oldval = max_background_threads;
1709  		READ(oldval, size_t);
1710  
1711  		size_t newval = *(size_t *)newp;
1712  		if (newval == oldval) {
1713  			ret = 0;
1714  			goto label_return;
1715  		}
1716  		if (newval > opt_max_background_threads) {
1717  			ret = EINVAL;
1718  			goto label_return;
1719  		}
1720  
1721  		if (background_thread_enabled()) {
1722  			background_thread_enabled_set(tsd_tsdn(tsd), false);
1723  			if (background_threads_disable(tsd)) {
1724  				ret = EFAULT;
1725  				goto label_return;
1726  			}
1727  			max_background_threads = newval;
1728  			background_thread_enabled_set(tsd_tsdn(tsd), true);
1729  			if (background_threads_enable(tsd)) {
1730  				ret = EFAULT;
1731  				goto label_return;
1732  			}
1733  		} else {
1734  			max_background_threads = newval;
1735  		}
1736  	}
1737  	ret = 0;
1738  label_return:
1739  	malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
1740  	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
1741  
1742  	return ret;
1743  }
1744  
1745  /******************************************************************************/
1746  
CTL_RO_CONFIG_GEN(config_cache_oblivious,bool)1747  CTL_RO_CONFIG_GEN(config_cache_oblivious, bool)
1748  CTL_RO_CONFIG_GEN(config_debug, bool)
1749  CTL_RO_CONFIG_GEN(config_fill, bool)
1750  CTL_RO_CONFIG_GEN(config_lazy_lock, bool)
1751  CTL_RO_CONFIG_GEN(config_malloc_conf, const char *)
1752  CTL_RO_CONFIG_GEN(config_opt_safety_checks, bool)
1753  CTL_RO_CONFIG_GEN(config_prof, bool)
1754  CTL_RO_CONFIG_GEN(config_prof_libgcc, bool)
1755  CTL_RO_CONFIG_GEN(config_prof_libunwind, bool)
1756  CTL_RO_CONFIG_GEN(config_stats, bool)
1757  CTL_RO_CONFIG_GEN(config_utrace, bool)
1758  CTL_RO_CONFIG_GEN(config_xmalloc, bool)
1759  
1760  /******************************************************************************/
1761  
1762  CTL_RO_NL_GEN(opt_abort, opt_abort, bool)
1763  CTL_RO_NL_GEN(opt_abort_conf, opt_abort_conf, bool)
1764  CTL_RO_NL_GEN(opt_confirm_conf, opt_confirm_conf, bool)
1765  CTL_RO_NL_GEN(opt_metadata_thp, metadata_thp_mode_names[opt_metadata_thp],
1766      const char *)
1767  CTL_RO_NL_GEN(opt_retain, opt_retain, bool)
1768  CTL_RO_NL_GEN(opt_dss, opt_dss, const char *)
1769  CTL_RO_NL_GEN(opt_narenas, opt_narenas, unsigned)
1770  CTL_RO_NL_GEN(opt_percpu_arena, percpu_arena_mode_names[opt_percpu_arena],
1771      const char *)
1772  CTL_RO_NL_GEN(opt_oversize_threshold, opt_oversize_threshold, size_t)
1773  CTL_RO_NL_GEN(opt_background_thread, opt_background_thread, bool)
1774  CTL_RO_NL_GEN(opt_max_background_threads, opt_max_background_threads, size_t)
1775  CTL_RO_NL_GEN(opt_dirty_decay_ms, opt_dirty_decay_ms, ssize_t)
1776  CTL_RO_NL_GEN(opt_muzzy_decay_ms, opt_muzzy_decay_ms, ssize_t)
1777  CTL_RO_NL_GEN(opt_stats_print, opt_stats_print, bool)
1778  CTL_RO_NL_GEN(opt_stats_print_opts, opt_stats_print_opts, const char *)
1779  CTL_RO_NL_CGEN(config_fill, opt_junk, opt_junk, const char *)
1780  CTL_RO_NL_CGEN(config_fill, opt_zero, opt_zero, bool)
1781  CTL_RO_NL_CGEN(config_utrace, opt_utrace, opt_utrace, bool)
1782  CTL_RO_NL_CGEN(config_xmalloc, opt_xmalloc, opt_xmalloc, bool)
1783  CTL_RO_NL_GEN(opt_tcache, opt_tcache, bool)
1784  CTL_RO_NL_GEN(opt_thp, thp_mode_names[opt_thp], const char *)
1785  CTL_RO_NL_GEN(opt_lg_extent_max_active_fit, opt_lg_extent_max_active_fit,
1786      size_t)
1787  CTL_RO_NL_GEN(opt_lg_tcache_max, opt_lg_tcache_max, ssize_t)
1788  CTL_RO_NL_CGEN(config_prof, opt_prof, opt_prof, bool)
1789  CTL_RO_NL_CGEN(config_prof, opt_prof_prefix, opt_prof_prefix, const char *)
1790  CTL_RO_NL_CGEN(config_prof, opt_prof_active, opt_prof_active, bool)
1791  CTL_RO_NL_CGEN(config_prof, opt_prof_thread_active_init,
1792      opt_prof_thread_active_init, bool)
1793  CTL_RO_NL_CGEN(config_prof, opt_lg_prof_sample, opt_lg_prof_sample, size_t)
1794  CTL_RO_NL_CGEN(config_prof, opt_prof_accum, opt_prof_accum, bool)
1795  CTL_RO_NL_CGEN(config_prof, opt_lg_prof_interval, opt_lg_prof_interval, ssize_t)
1796  CTL_RO_NL_CGEN(config_prof, opt_prof_gdump, opt_prof_gdump, bool)
1797  CTL_RO_NL_CGEN(config_prof, opt_prof_final, opt_prof_final, bool)
1798  CTL_RO_NL_CGEN(config_prof, opt_prof_leak, opt_prof_leak, bool)
1799  
1800  /******************************************************************************/
1801  
1802  static int
1803  thread_arena_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1804      void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
1805  	int ret;
1806  	arena_t *oldarena;
1807  	unsigned newind, oldind;
1808  
1809  	oldarena = arena_choose(tsd, NULL);
1810  	if (oldarena == NULL) {
1811  		return EAGAIN;
1812  	}
1813  	newind = oldind = arena_ind_get(oldarena);
1814  	WRITE(newind, unsigned);
1815  	READ(oldind, unsigned);
1816  
1817  	if (newind != oldind) {
1818  		arena_t *newarena;
1819  
1820  		if (newind >= narenas_total_get()) {
1821  			/* New arena index is out of range. */
1822  			ret = EFAULT;
1823  			goto label_return;
1824  		}
1825  
1826  		if (have_percpu_arena &&
1827  		    PERCPU_ARENA_ENABLED(opt_percpu_arena)) {
1828  			if (newind < percpu_arena_ind_limit(opt_percpu_arena)) {
1829  				/*
1830  				 * If perCPU arena is enabled, thread_arena
1831  				 * control is not allowed for the auto arena
1832  				 * range.
1833  				 */
1834  				ret = EPERM;
1835  				goto label_return;
1836  			}
1837  		}
1838  
1839  		/* Initialize arena if necessary. */
1840  		newarena = arena_get(tsd_tsdn(tsd), newind, true);
1841  		if (newarena == NULL) {
1842  			ret = EAGAIN;
1843  			goto label_return;
1844  		}
1845  		/* Set new arena/tcache associations. */
1846  		arena_migrate(tsd, oldind, newind);
1847  		if (tcache_available(tsd)) {
1848  			tcache_arena_reassociate(tsd_tsdn(tsd),
1849  			    tsd_tcachep_get(tsd), newarena);
1850  		}
1851  	}
1852  
1853  	ret = 0;
1854  label_return:
1855  	return ret;
1856  }
1857  
CTL_TSD_RO_NL_CGEN(config_stats,thread_allocated,tsd_thread_allocated_get,uint64_t)1858  CTL_TSD_RO_NL_CGEN(config_stats, thread_allocated, tsd_thread_allocated_get,
1859      uint64_t)
1860  CTL_TSD_RO_NL_CGEN(config_stats, thread_allocatedp, tsd_thread_allocatedp_get,
1861      uint64_t *)
1862  CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocated, tsd_thread_deallocated_get,
1863      uint64_t)
1864  CTL_TSD_RO_NL_CGEN(config_stats, thread_deallocatedp,
1865      tsd_thread_deallocatedp_get, uint64_t *)
1866  
1867  static int
1868  thread_tcache_enabled_ctl(tsd_t *tsd, const size_t *mib,
1869      size_t miblen, void *oldp, size_t *oldlenp, void *newp,
1870      size_t newlen) {
1871  	int ret;
1872  	bool oldval;
1873  
1874  	oldval = tcache_enabled_get(tsd);
1875  	if (newp != NULL) {
1876  		if (newlen != sizeof(bool)) {
1877  			ret = EINVAL;
1878  			goto label_return;
1879  		}
1880  		tcache_enabled_set(tsd, *(bool *)newp);
1881  	}
1882  	READ(oldval, bool);
1883  
1884  	ret = 0;
1885  label_return:
1886  	return ret;
1887  }
1888  
1889  static int
thread_tcache_flush_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1890  thread_tcache_flush_ctl(tsd_t *tsd, const size_t *mib,
1891      size_t miblen, void *oldp, size_t *oldlenp, void *newp,
1892      size_t newlen) {
1893  	int ret;
1894  
1895  	if (!tcache_available(tsd)) {
1896  		ret = EFAULT;
1897  		goto label_return;
1898  	}
1899  
1900  	READONLY();
1901  	WRITEONLY();
1902  
1903  	tcache_flush(tsd);
1904  
1905  	ret = 0;
1906  label_return:
1907  	return ret;
1908  }
1909  
1910  static int
thread_prof_name_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1911  thread_prof_name_ctl(tsd_t *tsd, const size_t *mib,
1912      size_t miblen, void *oldp, size_t *oldlenp, void *newp,
1913      size_t newlen) {
1914  	int ret;
1915  
1916  	if (!config_prof) {
1917  		return ENOENT;
1918  	}
1919  
1920  	READ_XOR_WRITE();
1921  
1922  	if (newp != NULL) {
1923  		if (newlen != sizeof(const char *)) {
1924  			ret = EINVAL;
1925  			goto label_return;
1926  		}
1927  
1928  		if ((ret = prof_thread_name_set(tsd, *(const char **)newp)) !=
1929  		    0) {
1930  			goto label_return;
1931  		}
1932  	} else {
1933  		const char *oldname = prof_thread_name_get(tsd);
1934  		READ(oldname, const char *);
1935  	}
1936  
1937  	ret = 0;
1938  label_return:
1939  	return ret;
1940  }
1941  
1942  static int
thread_prof_active_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1943  thread_prof_active_ctl(tsd_t *tsd, const size_t *mib,
1944      size_t miblen, void *oldp, size_t *oldlenp, void *newp,
1945      size_t newlen) {
1946  	int ret;
1947  	bool oldval;
1948  
1949  	if (!config_prof) {
1950  		return ENOENT;
1951  	}
1952  
1953  	oldval = prof_thread_active_get(tsd);
1954  	if (newp != NULL) {
1955  		if (newlen != sizeof(bool)) {
1956  			ret = EINVAL;
1957  			goto label_return;
1958  		}
1959  		if (prof_thread_active_set(tsd, *(bool *)newp)) {
1960  			ret = EAGAIN;
1961  			goto label_return;
1962  		}
1963  	}
1964  	READ(oldval, bool);
1965  
1966  	ret = 0;
1967  label_return:
1968  	return ret;
1969  }
1970  
1971  /******************************************************************************/
1972  
1973  static int
tcache_create_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1974  tcache_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1975      void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
1976  	int ret;
1977  	unsigned tcache_ind;
1978  
1979  	READONLY();
1980  	if (tcaches_create(tsd, &tcache_ind)) {
1981  		ret = EFAULT;
1982  		goto label_return;
1983  	}
1984  	READ(tcache_ind, unsigned);
1985  
1986  	ret = 0;
1987  label_return:
1988  	return ret;
1989  }
1990  
1991  static int
tcache_flush_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)1992  tcache_flush_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
1993      void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
1994  	int ret;
1995  	unsigned tcache_ind;
1996  
1997  	WRITEONLY();
1998  	tcache_ind = UINT_MAX;
1999  	WRITE(tcache_ind, unsigned);
2000  	if (tcache_ind == UINT_MAX) {
2001  		ret = EFAULT;
2002  		goto label_return;
2003  	}
2004  	tcaches_flush(tsd, tcache_ind);
2005  
2006  	ret = 0;
2007  label_return:
2008  	return ret;
2009  }
2010  
2011  static int
tcache_destroy_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2012  tcache_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2013      void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2014  	int ret;
2015  	unsigned tcache_ind;
2016  
2017  	WRITEONLY();
2018  	tcache_ind = UINT_MAX;
2019  	WRITE(tcache_ind, unsigned);
2020  	if (tcache_ind == UINT_MAX) {
2021  		ret = EFAULT;
2022  		goto label_return;
2023  	}
2024  	tcaches_destroy(tsd, tcache_ind);
2025  
2026  	ret = 0;
2027  label_return:
2028  	return ret;
2029  }
2030  
2031  /******************************************************************************/
2032  
2033  static int
arena_i_initialized_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2034  arena_i_initialized_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2035      void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2036  	int ret;
2037  	tsdn_t *tsdn = tsd_tsdn(tsd);
2038  	unsigned arena_ind;
2039  	bool initialized;
2040  
2041  	READONLY();
2042  	MIB_UNSIGNED(arena_ind, 1);
2043  
2044  	malloc_mutex_lock(tsdn, &ctl_mtx);
2045  	initialized = arenas_i(arena_ind)->initialized;
2046  	malloc_mutex_unlock(tsdn, &ctl_mtx);
2047  
2048  	READ(initialized, bool);
2049  
2050  	ret = 0;
2051  label_return:
2052  	return ret;
2053  }
2054  
2055  static void
arena_i_decay(tsdn_t * tsdn,unsigned arena_ind,bool all)2056  arena_i_decay(tsdn_t *tsdn, unsigned arena_ind, bool all) {
2057  	malloc_mutex_lock(tsdn, &ctl_mtx);
2058  	{
2059  		unsigned narenas = ctl_arenas->narenas;
2060  
2061  		/*
2062  		 * Access via index narenas is deprecated, and scheduled for
2063  		 * removal in 6.0.0.
2064  		 */
2065  		if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind == narenas) {
2066  			unsigned i;
2067  			VARIABLE_ARRAY(arena_t *, tarenas, narenas);
2068  
2069  			for (i = 0; i < narenas; i++) {
2070  				tarenas[i] = arena_get(tsdn, i, false);
2071  			}
2072  
2073  			/*
2074  			 * No further need to hold ctl_mtx, since narenas and
2075  			 * tarenas contain everything needed below.
2076  			 */
2077  			malloc_mutex_unlock(tsdn, &ctl_mtx);
2078  
2079  			for (i = 0; i < narenas; i++) {
2080  				if (tarenas[i] != NULL) {
2081  					arena_decay(tsdn, tarenas[i], false,
2082  					    all);
2083  				}
2084  			}
2085  		} else {
2086  			arena_t *tarena;
2087  
2088  			assert(arena_ind < narenas);
2089  
2090  			tarena = arena_get(tsdn, arena_ind, false);
2091  
2092  			/* No further need to hold ctl_mtx. */
2093  			malloc_mutex_unlock(tsdn, &ctl_mtx);
2094  
2095  			if (tarena != NULL) {
2096  				arena_decay(tsdn, tarena, false, all);
2097  			}
2098  		}
2099  	}
2100  }
2101  
2102  static int
arena_i_decay_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2103  arena_i_decay_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2104      size_t *oldlenp, void *newp, size_t newlen) {
2105  	int ret;
2106  	unsigned arena_ind;
2107  
2108  	READONLY();
2109  	WRITEONLY();
2110  	MIB_UNSIGNED(arena_ind, 1);
2111  	arena_i_decay(tsd_tsdn(tsd), arena_ind, false);
2112  
2113  	ret = 0;
2114  label_return:
2115  	return ret;
2116  }
2117  
2118  static int
arena_i_purge_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2119  arena_i_purge_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2120      size_t *oldlenp, void *newp, size_t newlen) {
2121  	int ret;
2122  	unsigned arena_ind;
2123  
2124  	READONLY();
2125  	WRITEONLY();
2126  	MIB_UNSIGNED(arena_ind, 1);
2127  	arena_i_decay(tsd_tsdn(tsd), arena_ind, true);
2128  
2129  	ret = 0;
2130  label_return:
2131  	return ret;
2132  }
2133  
2134  static int
arena_i_reset_destroy_helper(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen,unsigned * arena_ind,arena_t ** arena)2135  arena_i_reset_destroy_helper(tsd_t *tsd, const size_t *mib, size_t miblen,
2136      void *oldp, size_t *oldlenp, void *newp, size_t newlen, unsigned *arena_ind,
2137      arena_t **arena) {
2138  	int ret;
2139  
2140  	READONLY();
2141  	WRITEONLY();
2142  	MIB_UNSIGNED(*arena_ind, 1);
2143  
2144  	*arena = arena_get(tsd_tsdn(tsd), *arena_ind, false);
2145  	if (*arena == NULL || arena_is_auto(*arena)) {
2146  		ret = EFAULT;
2147  		goto label_return;
2148  	}
2149  
2150  	ret = 0;
2151  label_return:
2152  	return ret;
2153  }
2154  
2155  static void
arena_reset_prepare_background_thread(tsd_t * tsd,unsigned arena_ind)2156  arena_reset_prepare_background_thread(tsd_t *tsd, unsigned arena_ind) {
2157  	/* Temporarily disable the background thread during arena reset. */
2158  	if (have_background_thread) {
2159  		malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
2160  		if (background_thread_enabled()) {
2161  			background_thread_info_t *info =
2162  			    background_thread_info_get(arena_ind);
2163  			assert(info->state == background_thread_started);
2164  			malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
2165  			info->state = background_thread_paused;
2166  			malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
2167  		}
2168  	}
2169  }
2170  
2171  static void
arena_reset_finish_background_thread(tsd_t * tsd,unsigned arena_ind)2172  arena_reset_finish_background_thread(tsd_t *tsd, unsigned arena_ind) {
2173  	if (have_background_thread) {
2174  		if (background_thread_enabled()) {
2175  			background_thread_info_t *info =
2176  			    background_thread_info_get(arena_ind);
2177  			assert(info->state == background_thread_paused);
2178  			malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
2179  			info->state = background_thread_started;
2180  			malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
2181  		}
2182  		malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
2183  	}
2184  }
2185  
2186  static int
arena_i_reset_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2187  arena_i_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2188      size_t *oldlenp, void *newp, size_t newlen) {
2189  	int ret;
2190  	unsigned arena_ind;
2191  	arena_t *arena;
2192  
2193  	ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
2194  	    newp, newlen, &arena_ind, &arena);
2195  	if (ret != 0) {
2196  		return ret;
2197  	}
2198  
2199  	arena_reset_prepare_background_thread(tsd, arena_ind);
2200  	arena_reset(tsd, arena);
2201  	arena_reset_finish_background_thread(tsd, arena_ind);
2202  
2203  	return ret;
2204  }
2205  
2206  static int
arena_i_destroy_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2207  arena_i_destroy_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2208      size_t *oldlenp, void *newp, size_t newlen) {
2209  	int ret;
2210  	unsigned arena_ind;
2211  	arena_t *arena;
2212  	ctl_arena_t *ctl_darena, *ctl_arena;
2213  
2214  	ret = arena_i_reset_destroy_helper(tsd, mib, miblen, oldp, oldlenp,
2215  	    newp, newlen, &arena_ind, &arena);
2216  	if (ret != 0) {
2217  		goto label_return;
2218  	}
2219  
2220  	if (arena_nthreads_get(arena, false) != 0 || arena_nthreads_get(arena,
2221  	    true) != 0) {
2222  		ret = EFAULT;
2223  		goto label_return;
2224  	}
2225  
2226  	arena_reset_prepare_background_thread(tsd, arena_ind);
2227  	/* Merge stats after resetting and purging arena. */
2228  	arena_reset(tsd, arena);
2229  	arena_decay(tsd_tsdn(tsd), arena, false, true);
2230  	ctl_darena = arenas_i(MALLCTL_ARENAS_DESTROYED);
2231  	ctl_darena->initialized = true;
2232  	ctl_arena_refresh(tsd_tsdn(tsd), arena, ctl_darena, arena_ind, true);
2233  	/* Destroy arena. */
2234  	arena_destroy(tsd, arena);
2235  	ctl_arena = arenas_i(arena_ind);
2236  	ctl_arena->initialized = false;
2237  	/* Record arena index for later recycling via arenas.create. */
2238  	ql_elm_new(ctl_arena, destroyed_link);
2239  	ql_tail_insert(&ctl_arenas->destroyed, ctl_arena, destroyed_link);
2240  	arena_reset_finish_background_thread(tsd, arena_ind);
2241  
2242  	assert(ret == 0);
2243  label_return:
2244  	return ret;
2245  }
2246  
2247  static int
arena_i_dss_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2248  arena_i_dss_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2249      size_t *oldlenp, void *newp, size_t newlen) {
2250  	int ret;
2251  	const char *dss = NULL;
2252  	unsigned arena_ind;
2253  	dss_prec_t dss_prec_old = dss_prec_limit;
2254  	dss_prec_t dss_prec = dss_prec_limit;
2255  
2256  	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2257  	WRITE(dss, const char *);
2258  	MIB_UNSIGNED(arena_ind, 1);
2259  	if (dss != NULL) {
2260  		int i;
2261  		bool match = false;
2262  
2263  		for (i = 0; i < dss_prec_limit; i++) {
2264  			if (strcmp(dss_prec_names[i], dss) == 0) {
2265  				dss_prec = i;
2266  				match = true;
2267  				break;
2268  			}
2269  		}
2270  
2271  		if (!match) {
2272  			ret = EINVAL;
2273  			goto label_return;
2274  		}
2275  	}
2276  
2277  	/*
2278  	 * Access via index narenas is deprecated, and scheduled for removal in
2279  	 * 6.0.0.
2280  	 */
2281  	if (arena_ind == MALLCTL_ARENAS_ALL || arena_ind ==
2282  	    ctl_arenas->narenas) {
2283  		if (dss_prec != dss_prec_limit &&
2284  		    extent_dss_prec_set(dss_prec)) {
2285  			ret = EFAULT;
2286  			goto label_return;
2287  		}
2288  		dss_prec_old = extent_dss_prec_get();
2289  	} else {
2290  		arena_t *arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
2291  		if (arena == NULL || (dss_prec != dss_prec_limit &&
2292  		    arena_dss_prec_set(arena, dss_prec))) {
2293  			ret = EFAULT;
2294  			goto label_return;
2295  		}
2296  		dss_prec_old = arena_dss_prec_get(arena);
2297  	}
2298  
2299  	dss = dss_prec_names[dss_prec_old];
2300  	READ(dss, const char *);
2301  
2302  	ret = 0;
2303  label_return:
2304  	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2305  	return ret;
2306  }
2307  
2308  static int
arena_i_decay_ms_ctl_impl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen,bool dirty)2309  arena_i_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib, size_t miblen,
2310      void *oldp, size_t *oldlenp, void *newp, size_t newlen, bool dirty) {
2311  	int ret;
2312  	unsigned arena_ind;
2313  	arena_t *arena;
2314  
2315  	MIB_UNSIGNED(arena_ind, 1);
2316  	arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
2317  	if (arena == NULL) {
2318  		ret = EFAULT;
2319  		goto label_return;
2320  	}
2321  
2322  	if (oldp != NULL && oldlenp != NULL) {
2323  		size_t oldval = dirty ? arena_dirty_decay_ms_get(arena) :
2324  		    arena_muzzy_decay_ms_get(arena);
2325  		READ(oldval, ssize_t);
2326  	}
2327  	if (newp != NULL) {
2328  		if (newlen != sizeof(ssize_t)) {
2329  			ret = EINVAL;
2330  			goto label_return;
2331  		}
2332  		if (arena_is_huge(arena_ind) && *(ssize_t *)newp > 0) {
2333  			/*
2334  			 * By default the huge arena purges eagerly.  If it is
2335  			 * set to non-zero decay time afterwards, background
2336  			 * thread might be needed.
2337  			 */
2338  			if (background_thread_create(tsd, arena_ind)) {
2339  				ret = EFAULT;
2340  				goto label_return;
2341  			}
2342  		}
2343  		if (dirty ? arena_dirty_decay_ms_set(tsd_tsdn(tsd), arena,
2344  		    *(ssize_t *)newp) : arena_muzzy_decay_ms_set(tsd_tsdn(tsd),
2345  		    arena, *(ssize_t *)newp)) {
2346  			ret = EFAULT;
2347  			goto label_return;
2348  		}
2349  	}
2350  
2351  	ret = 0;
2352  label_return:
2353  	return ret;
2354  }
2355  
2356  static int
arena_i_dirty_decay_ms_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2357  arena_i_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2358      void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2359  	return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
2360  	    newlen, true);
2361  }
2362  
2363  static int
arena_i_muzzy_decay_ms_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2364  arena_i_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2365      void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2366  	return arena_i_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
2367  	    newlen, false);
2368  }
2369  
2370  static int
arena_i_extent_hooks_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2371  arena_i_extent_hooks_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2372      void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2373  	int ret;
2374  	unsigned arena_ind;
2375  	arena_t *arena;
2376  
2377  	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2378  	MIB_UNSIGNED(arena_ind, 1);
2379  	if (arena_ind < narenas_total_get()) {
2380  		extent_hooks_t *old_extent_hooks;
2381  		arena = arena_get(tsd_tsdn(tsd), arena_ind, false);
2382  		if (arena == NULL) {
2383  			if (arena_ind >= narenas_auto) {
2384  				ret = EFAULT;
2385  				goto label_return;
2386  			}
2387  			old_extent_hooks =
2388  			    (extent_hooks_t *)&extent_hooks_default;
2389  			READ(old_extent_hooks, extent_hooks_t *);
2390  			if (newp != NULL) {
2391  				/* Initialize a new arena as a side effect. */
2392  				extent_hooks_t *new_extent_hooks
2393  				    JEMALLOC_CC_SILENCE_INIT(NULL);
2394  				WRITE(new_extent_hooks, extent_hooks_t *);
2395  				arena = arena_init(tsd_tsdn(tsd), arena_ind,
2396  				    new_extent_hooks);
2397  				if (arena == NULL) {
2398  					ret = EFAULT;
2399  					goto label_return;
2400  				}
2401  			}
2402  		} else {
2403  			if (newp != NULL) {
2404  				extent_hooks_t *new_extent_hooks
2405  				    JEMALLOC_CC_SILENCE_INIT(NULL);
2406  				WRITE(new_extent_hooks, extent_hooks_t *);
2407  				old_extent_hooks = extent_hooks_set(tsd, arena,
2408  				    new_extent_hooks);
2409  				READ(old_extent_hooks, extent_hooks_t *);
2410  			} else {
2411  				old_extent_hooks = extent_hooks_get(arena);
2412  				READ(old_extent_hooks, extent_hooks_t *);
2413  			}
2414  		}
2415  	} else {
2416  		ret = EFAULT;
2417  		goto label_return;
2418  	}
2419  	ret = 0;
2420  label_return:
2421  	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2422  	return ret;
2423  }
2424  
2425  static int
arena_i_retain_grow_limit_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2426  arena_i_retain_grow_limit_ctl(tsd_t *tsd, const size_t *mib,
2427      size_t miblen, void *oldp, size_t *oldlenp, void *newp,
2428      size_t newlen) {
2429  	int ret;
2430  	unsigned arena_ind;
2431  	arena_t *arena;
2432  
2433  	if (!opt_retain) {
2434  		/* Only relevant when retain is enabled. */
2435  		return ENOENT;
2436  	}
2437  
2438  	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2439  	MIB_UNSIGNED(arena_ind, 1);
2440  	if (arena_ind < narenas_total_get() && (arena =
2441  	    arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) {
2442  		size_t old_limit, new_limit;
2443  		if (newp != NULL) {
2444  			WRITE(new_limit, size_t);
2445  		}
2446  		bool err = arena_retain_grow_limit_get_set(tsd, arena,
2447  		    &old_limit, newp != NULL ? &new_limit : NULL);
2448  		if (!err) {
2449  			READ(old_limit, size_t);
2450  			ret = 0;
2451  		} else {
2452  			ret = EFAULT;
2453  		}
2454  	} else {
2455  		ret = EFAULT;
2456  	}
2457  label_return:
2458  	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2459  	return ret;
2460  }
2461  
2462  static const ctl_named_node_t *
arena_i_index(tsdn_t * tsdn,const size_t * mib,size_t miblen,size_t i)2463  arena_i_index(tsdn_t *tsdn, const size_t *mib, size_t miblen,
2464      size_t i) {
2465  	const ctl_named_node_t *ret;
2466  
2467  	malloc_mutex_lock(tsdn, &ctl_mtx);
2468  	switch (i) {
2469  	case MALLCTL_ARENAS_ALL:
2470  	case MALLCTL_ARENAS_DESTROYED:
2471  		break;
2472  	default:
2473  		if (i > ctl_arenas->narenas) {
2474  			ret = NULL;
2475  			goto label_return;
2476  		}
2477  		break;
2478  	}
2479  
2480  	ret = super_arena_i_node;
2481  label_return:
2482  	malloc_mutex_unlock(tsdn, &ctl_mtx);
2483  	return ret;
2484  }
2485  
2486  /******************************************************************************/
2487  
2488  static int
arenas_narenas_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2489  arenas_narenas_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2490      void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2491  	int ret;
2492  	unsigned narenas;
2493  
2494  	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2495  	READONLY();
2496  	if (*oldlenp != sizeof(unsigned)) {
2497  		ret = EINVAL;
2498  		goto label_return;
2499  	}
2500  	narenas = ctl_arenas->narenas;
2501  	READ(narenas, unsigned);
2502  
2503  	ret = 0;
2504  label_return:
2505  	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2506  	return ret;
2507  }
2508  
2509  static int
arenas_decay_ms_ctl_impl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen,bool dirty)2510  arenas_decay_ms_ctl_impl(tsd_t *tsd, const size_t *mib,
2511      size_t miblen, void *oldp, size_t *oldlenp, void *newp,
2512      size_t newlen, bool dirty) {
2513  	int ret;
2514  
2515  	if (oldp != NULL && oldlenp != NULL) {
2516  		size_t oldval = (dirty ? arena_dirty_decay_ms_default_get() :
2517  		    arena_muzzy_decay_ms_default_get());
2518  		READ(oldval, ssize_t);
2519  	}
2520  	if (newp != NULL) {
2521  		if (newlen != sizeof(ssize_t)) {
2522  			ret = EINVAL;
2523  			goto label_return;
2524  		}
2525  		if (dirty ? arena_dirty_decay_ms_default_set(*(ssize_t *)newp)
2526  		    : arena_muzzy_decay_ms_default_set(*(ssize_t *)newp)) {
2527  			ret = EFAULT;
2528  			goto label_return;
2529  		}
2530  	}
2531  
2532  	ret = 0;
2533  label_return:
2534  	return ret;
2535  }
2536  
2537  static int
arenas_dirty_decay_ms_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2538  arenas_dirty_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2539      void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2540  	return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
2541  	    newlen, true);
2542  }
2543  
2544  static int
arenas_muzzy_decay_ms_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2545  arenas_muzzy_decay_ms_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2546      void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2547  	return arenas_decay_ms_ctl_impl(tsd, mib, miblen, oldp, oldlenp, newp,
2548  	    newlen, false);
2549  }
2550  
CTL_RO_NL_GEN(arenas_quantum,QUANTUM,size_t)2551  CTL_RO_NL_GEN(arenas_quantum, QUANTUM, size_t)
2552  CTL_RO_NL_GEN(arenas_page, PAGE, size_t)
2553  CTL_RO_NL_GEN(arenas_tcache_max, tcache_maxclass, size_t)
2554  CTL_RO_NL_GEN(arenas_nbins, SC_NBINS, unsigned)
2555  CTL_RO_NL_GEN(arenas_nhbins, nhbins, unsigned)
2556  CTL_RO_NL_GEN(arenas_bin_i_size, bin_infos[mib[2]].reg_size, size_t)
2557  CTL_RO_NL_GEN(arenas_bin_i_nregs, bin_infos[mib[2]].nregs, uint32_t)
2558  CTL_RO_NL_GEN(arenas_bin_i_slab_size, bin_infos[mib[2]].slab_size, size_t)
2559  CTL_RO_NL_GEN(arenas_bin_i_nshards, bin_infos[mib[2]].n_shards, uint32_t)
2560  static const ctl_named_node_t *
2561  arenas_bin_i_index(tsdn_t *tsdn, const size_t *mib,
2562      size_t miblen, size_t i) {
2563  	if (i > SC_NBINS) {
2564  		return NULL;
2565  	}
2566  	return super_arenas_bin_i_node;
2567  }
2568  
2569  CTL_RO_NL_GEN(arenas_nlextents, SC_NSIZES - SC_NBINS, unsigned)
2570  CTL_RO_NL_GEN(arenas_lextent_i_size, sz_index2size(SC_NBINS+(szind_t)mib[2]),
2571      size_t)
2572  static const ctl_named_node_t *
arenas_lextent_i_index(tsdn_t * tsdn,const size_t * mib,size_t miblen,size_t i)2573  arenas_lextent_i_index(tsdn_t *tsdn, const size_t *mib,
2574      size_t miblen, size_t i) {
2575  	if (i > SC_NSIZES - SC_NBINS) {
2576  		return NULL;
2577  	}
2578  	return super_arenas_lextent_i_node;
2579  }
2580  
2581  static int
arenas_create_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2582  arenas_create_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2583      void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2584  	int ret;
2585  	extent_hooks_t *extent_hooks;
2586  	unsigned arena_ind;
2587  
2588  	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2589  
2590  	extent_hooks = (extent_hooks_t *)&extent_hooks_default;
2591  	WRITE(extent_hooks, extent_hooks_t *);
2592  	if ((arena_ind = ctl_arena_init(tsd, extent_hooks)) == UINT_MAX) {
2593  		ret = EAGAIN;
2594  		goto label_return;
2595  	}
2596  	READ(arena_ind, unsigned);
2597  
2598  	ret = 0;
2599  label_return:
2600  	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2601  	return ret;
2602  }
2603  
2604  static int
arenas_lookup_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2605  arenas_lookup_ctl(tsd_t *tsd, const size_t *mib,
2606      size_t miblen, void *oldp, size_t *oldlenp, void *newp,
2607      size_t newlen) {
2608  	int ret;
2609  	unsigned arena_ind;
2610  	void *ptr;
2611  	extent_t *extent;
2612  	arena_t *arena;
2613  
2614  	ptr = NULL;
2615  	ret = EINVAL;
2616  	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
2617  	WRITE(ptr, void *);
2618  	extent = iealloc(tsd_tsdn(tsd), ptr);
2619  	if (extent == NULL)
2620  		goto label_return;
2621  
2622  	arena = extent_arena_get(extent);
2623  	if (arena == NULL)
2624  		goto label_return;
2625  
2626  	arena_ind = arena_ind_get(arena);
2627  	READ(arena_ind, unsigned);
2628  
2629  	ret = 0;
2630  label_return:
2631  	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
2632  	return ret;
2633  }
2634  
2635  /******************************************************************************/
2636  
2637  static int
prof_thread_active_init_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2638  prof_thread_active_init_ctl(tsd_t *tsd, const size_t *mib,
2639      size_t miblen, void *oldp, size_t *oldlenp, void *newp,
2640      size_t newlen) {
2641  	int ret;
2642  	bool oldval;
2643  
2644  	if (!config_prof) {
2645  		return ENOENT;
2646  	}
2647  
2648  	if (newp != NULL) {
2649  		if (newlen != sizeof(bool)) {
2650  			ret = EINVAL;
2651  			goto label_return;
2652  		}
2653  		oldval = prof_thread_active_init_set(tsd_tsdn(tsd),
2654  		    *(bool *)newp);
2655  	} else {
2656  		oldval = prof_thread_active_init_get(tsd_tsdn(tsd));
2657  	}
2658  	READ(oldval, bool);
2659  
2660  	ret = 0;
2661  label_return:
2662  	return ret;
2663  }
2664  
2665  static int
prof_active_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2666  prof_active_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2667      void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2668  	int ret;
2669  	bool oldval;
2670  
2671  	if (!config_prof) {
2672  		return ENOENT;
2673  	}
2674  
2675  	if (newp != NULL) {
2676  		if (newlen != sizeof(bool)) {
2677  			ret = EINVAL;
2678  			goto label_return;
2679  		}
2680  		oldval = prof_active_set(tsd_tsdn(tsd), *(bool *)newp);
2681  	} else {
2682  		oldval = prof_active_get(tsd_tsdn(tsd));
2683  	}
2684  	READ(oldval, bool);
2685  
2686  	ret = 0;
2687  label_return:
2688  	return ret;
2689  }
2690  
2691  static int
prof_dump_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2692  prof_dump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2693      void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2694  	int ret;
2695  	const char *filename = NULL;
2696  
2697  	if (!config_prof) {
2698  		return ENOENT;
2699  	}
2700  
2701  	WRITEONLY();
2702  	WRITE(filename, const char *);
2703  
2704  	if (prof_mdump(tsd, filename)) {
2705  		ret = EFAULT;
2706  		goto label_return;
2707  	}
2708  
2709  	ret = 0;
2710  label_return:
2711  	return ret;
2712  }
2713  
2714  static int
prof_gdump_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2715  prof_gdump_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2716      void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2717  	int ret;
2718  	bool oldval;
2719  
2720  	if (!config_prof) {
2721  		return ENOENT;
2722  	}
2723  
2724  	if (newp != NULL) {
2725  		if (newlen != sizeof(bool)) {
2726  			ret = EINVAL;
2727  			goto label_return;
2728  		}
2729  		oldval = prof_gdump_set(tsd_tsdn(tsd), *(bool *)newp);
2730  	} else {
2731  		oldval = prof_gdump_get(tsd_tsdn(tsd));
2732  	}
2733  	READ(oldval, bool);
2734  
2735  	ret = 0;
2736  label_return:
2737  	return ret;
2738  }
2739  
2740  static int
prof_reset_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2741  prof_reset_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
2742      void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
2743  	int ret;
2744  	size_t lg_sample = lg_prof_sample;
2745  
2746  	if (!config_prof) {
2747  		return ENOENT;
2748  	}
2749  
2750  	WRITEONLY();
2751  	WRITE(lg_sample, size_t);
2752  	if (lg_sample >= (sizeof(uint64_t) << 3)) {
2753  		lg_sample = (sizeof(uint64_t) << 3) - 1;
2754  	}
2755  
2756  	prof_reset(tsd, lg_sample);
2757  
2758  	ret = 0;
2759  label_return:
2760  	return ret;
2761  }
2762  
CTL_RO_NL_CGEN(config_prof,prof_interval,prof_interval,uint64_t)2763  CTL_RO_NL_CGEN(config_prof, prof_interval, prof_interval, uint64_t)
2764  CTL_RO_NL_CGEN(config_prof, lg_prof_sample, lg_prof_sample, size_t)
2765  
2766  static int
2767  prof_log_start_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2768      size_t *oldlenp, void *newp, size_t newlen) {
2769  	int ret;
2770  
2771  	const char *filename = NULL;
2772  
2773  	if (!config_prof) {
2774  		return ENOENT;
2775  	}
2776  
2777  	WRITEONLY();
2778  	WRITE(filename, const char *);
2779  
2780  	if (prof_log_start(tsd_tsdn(tsd), filename)) {
2781  		ret = EFAULT;
2782  		goto label_return;
2783  	}
2784  
2785  	ret = 0;
2786  label_return:
2787  	return ret;
2788  }
2789  
2790  static int
prof_log_stop_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2791  prof_log_stop_ctl(tsd_t *tsd, const size_t *mib, size_t miblen, void *oldp,
2792      size_t *oldlenp, void *newp, size_t newlen) {
2793  	if (!config_prof) {
2794  		return ENOENT;
2795  	}
2796  
2797  	if (prof_log_stop(tsd_tsdn(tsd))) {
2798  		return EFAULT;
2799  	}
2800  
2801  	return 0;
2802  }
2803  
2804  /******************************************************************************/
2805  
2806  CTL_RO_CGEN(config_stats, stats_allocated, ctl_stats->allocated, size_t)
2807  CTL_RO_CGEN(config_stats, stats_active, ctl_stats->active, size_t)
2808  CTL_RO_CGEN(config_stats, stats_metadata, ctl_stats->metadata, size_t)
2809  CTL_RO_CGEN(config_stats, stats_metadata_thp, ctl_stats->metadata_thp, size_t)
2810  CTL_RO_CGEN(config_stats, stats_resident, ctl_stats->resident, size_t)
2811  CTL_RO_CGEN(config_stats, stats_mapped, ctl_stats->mapped, size_t)
2812  CTL_RO_CGEN(config_stats, stats_retained, ctl_stats->retained, size_t)
2813  
2814  CTL_RO_CGEN(config_stats, stats_background_thread_num_threads,
2815      ctl_stats->background_thread.num_threads, size_t)
2816  CTL_RO_CGEN(config_stats, stats_background_thread_num_runs,
2817      ctl_stats->background_thread.num_runs, uint64_t)
2818  CTL_RO_CGEN(config_stats, stats_background_thread_run_interval,
2819      nstime_ns(&ctl_stats->background_thread.run_interval), uint64_t)
2820  
2821  CTL_RO_GEN(stats_arenas_i_dss, arenas_i(mib[2])->dss, const char *)
2822  CTL_RO_GEN(stats_arenas_i_dirty_decay_ms, arenas_i(mib[2])->dirty_decay_ms,
2823      ssize_t)
2824  CTL_RO_GEN(stats_arenas_i_muzzy_decay_ms, arenas_i(mib[2])->muzzy_decay_ms,
2825      ssize_t)
2826  CTL_RO_GEN(stats_arenas_i_nthreads, arenas_i(mib[2])->nthreads, unsigned)
2827  CTL_RO_GEN(stats_arenas_i_uptime,
2828      nstime_ns(&arenas_i(mib[2])->astats->astats.uptime), uint64_t)
2829  CTL_RO_GEN(stats_arenas_i_pactive, arenas_i(mib[2])->pactive, size_t)
2830  CTL_RO_GEN(stats_arenas_i_pdirty, arenas_i(mib[2])->pdirty, size_t)
2831  CTL_RO_GEN(stats_arenas_i_pmuzzy, arenas_i(mib[2])->pmuzzy, size_t)
2832  CTL_RO_CGEN(config_stats, stats_arenas_i_mapped,
2833      atomic_load_zu(&arenas_i(mib[2])->astats->astats.mapped, ATOMIC_RELAXED),
2834      size_t)
2835  CTL_RO_CGEN(config_stats, stats_arenas_i_retained,
2836      atomic_load_zu(&arenas_i(mib[2])->astats->astats.retained, ATOMIC_RELAXED),
2837      size_t)
2838  CTL_RO_CGEN(config_stats, stats_arenas_i_extent_avail,
2839      atomic_load_zu(&arenas_i(mib[2])->astats->astats.extent_avail,
2840          ATOMIC_RELAXED),
2841      size_t)
2842  
2843  CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_npurge,
2844      ctl_arena_stats_read_u64(
2845      &arenas_i(mib[2])->astats->astats.decay_dirty.npurge), uint64_t)
2846  CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_nmadvise,
2847      ctl_arena_stats_read_u64(
2848      &arenas_i(mib[2])->astats->astats.decay_dirty.nmadvise), uint64_t)
2849  CTL_RO_CGEN(config_stats, stats_arenas_i_dirty_purged,
2850      ctl_arena_stats_read_u64(
2851      &arenas_i(mib[2])->astats->astats.decay_dirty.purged), uint64_t)
2852  
2853  CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_npurge,
2854      ctl_arena_stats_read_u64(
2855      &arenas_i(mib[2])->astats->astats.decay_muzzy.npurge), uint64_t)
2856  CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_nmadvise,
2857      ctl_arena_stats_read_u64(
2858      &arenas_i(mib[2])->astats->astats.decay_muzzy.nmadvise), uint64_t)
2859  CTL_RO_CGEN(config_stats, stats_arenas_i_muzzy_purged,
2860      ctl_arena_stats_read_u64(
2861      &arenas_i(mib[2])->astats->astats.decay_muzzy.purged), uint64_t)
2862  
2863  CTL_RO_CGEN(config_stats, stats_arenas_i_base,
2864      atomic_load_zu(&arenas_i(mib[2])->astats->astats.base, ATOMIC_RELAXED),
2865      size_t)
2866  CTL_RO_CGEN(config_stats, stats_arenas_i_internal,
2867      atomic_load_zu(&arenas_i(mib[2])->astats->astats.internal, ATOMIC_RELAXED),
2868      size_t)
2869  CTL_RO_CGEN(config_stats, stats_arenas_i_metadata_thp,
2870      atomic_load_zu(&arenas_i(mib[2])->astats->astats.metadata_thp,
2871      ATOMIC_RELAXED), size_t)
2872  CTL_RO_CGEN(config_stats, stats_arenas_i_tcache_bytes,
2873      atomic_load_zu(&arenas_i(mib[2])->astats->astats.tcache_bytes,
2874      ATOMIC_RELAXED), size_t)
2875  CTL_RO_CGEN(config_stats, stats_arenas_i_resident,
2876      atomic_load_zu(&arenas_i(mib[2])->astats->astats.resident, ATOMIC_RELAXED),
2877      size_t)
2878  CTL_RO_CGEN(config_stats, stats_arenas_i_abandoned_vm,
2879      atomic_load_zu(&arenas_i(mib[2])->astats->astats.abandoned_vm,
2880      ATOMIC_RELAXED), size_t)
2881  
2882  CTL_RO_CGEN(config_stats, stats_arenas_i_small_allocated,
2883      arenas_i(mib[2])->astats->allocated_small, size_t)
2884  CTL_RO_CGEN(config_stats, stats_arenas_i_small_nmalloc,
2885      arenas_i(mib[2])->astats->nmalloc_small, uint64_t)
2886  CTL_RO_CGEN(config_stats, stats_arenas_i_small_ndalloc,
2887      arenas_i(mib[2])->astats->ndalloc_small, uint64_t)
2888  CTL_RO_CGEN(config_stats, stats_arenas_i_small_nrequests,
2889      arenas_i(mib[2])->astats->nrequests_small, uint64_t)
2890  CTL_RO_CGEN(config_stats, stats_arenas_i_small_nfills,
2891      arenas_i(mib[2])->astats->nfills_small, uint64_t)
2892  CTL_RO_CGEN(config_stats, stats_arenas_i_small_nflushes,
2893      arenas_i(mib[2])->astats->nflushes_small, uint64_t)
2894  CTL_RO_CGEN(config_stats, stats_arenas_i_large_allocated,
2895      atomic_load_zu(&arenas_i(mib[2])->astats->astats.allocated_large,
2896      ATOMIC_RELAXED), size_t)
2897  CTL_RO_CGEN(config_stats, stats_arenas_i_large_nmalloc,
2898      ctl_arena_stats_read_u64(
2899      &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t)
2900  CTL_RO_CGEN(config_stats, stats_arenas_i_large_ndalloc,
2901      ctl_arena_stats_read_u64(
2902      &arenas_i(mib[2])->astats->astats.ndalloc_large), uint64_t)
2903  CTL_RO_CGEN(config_stats, stats_arenas_i_large_nrequests,
2904      ctl_arena_stats_read_u64(
2905      &arenas_i(mib[2])->astats->astats.nrequests_large), uint64_t)
2906  /*
2907   * Note: "nmalloc_large" here instead of "nfills" in the read.  This is
2908   * intentional (large has no batch fill).
2909   */
2910  CTL_RO_CGEN(config_stats, stats_arenas_i_large_nfills,
2911      ctl_arena_stats_read_u64(
2912      &arenas_i(mib[2])->astats->astats.nmalloc_large), uint64_t)
2913  CTL_RO_CGEN(config_stats, stats_arenas_i_large_nflushes,
2914      ctl_arena_stats_read_u64(
2915      &arenas_i(mib[2])->astats->astats.nflushes_large), uint64_t)
2916  
2917  /* Lock profiling related APIs below. */
2918  #define RO_MUTEX_CTL_GEN(n, l)						\
2919  CTL_RO_CGEN(config_stats, stats_##n##_num_ops,				\
2920      l.n_lock_ops, uint64_t)						\
2921  CTL_RO_CGEN(config_stats, stats_##n##_num_wait,				\
2922      l.n_wait_times, uint64_t)						\
2923  CTL_RO_CGEN(config_stats, stats_##n##_num_spin_acq,			\
2924      l.n_spin_acquired, uint64_t)					\
2925  CTL_RO_CGEN(config_stats, stats_##n##_num_owner_switch,			\
2926      l.n_owner_switches, uint64_t) 					\
2927  CTL_RO_CGEN(config_stats, stats_##n##_total_wait_time,			\
2928      nstime_ns(&l.tot_wait_time), uint64_t)				\
2929  CTL_RO_CGEN(config_stats, stats_##n##_max_wait_time,			\
2930      nstime_ns(&l.max_wait_time), uint64_t)				\
2931  CTL_RO_CGEN(config_stats, stats_##n##_max_num_thds,			\
2932      l.max_n_thds, uint32_t)
2933  
2934  /* Global mutexes. */
2935  #define OP(mtx)								\
2936      RO_MUTEX_CTL_GEN(mutexes_##mtx,					\
2937          ctl_stats->mutex_prof_data[global_prof_mutex_##mtx])
2938  MUTEX_PROF_GLOBAL_MUTEXES
2939  #undef OP
2940  
2941  /* Per arena mutexes */
2942  #define OP(mtx) RO_MUTEX_CTL_GEN(arenas_i_mutexes_##mtx,		\
2943      arenas_i(mib[2])->astats->astats.mutex_prof_data[arena_prof_mutex_##mtx])
2944  MUTEX_PROF_ARENA_MUTEXES
2945  #undef OP
2946  
2947  /* tcache bin mutex */
2948  RO_MUTEX_CTL_GEN(arenas_i_bins_j_mutex,
2949      arenas_i(mib[2])->astats->bstats[mib[4]].mutex_data)
2950  #undef RO_MUTEX_CTL_GEN
2951  
2952  /* Resets all mutex stats, including global, arena and bin mutexes. */
2953  static int
stats_mutexes_reset_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)2954  stats_mutexes_reset_ctl(tsd_t *tsd, const size_t *mib,
2955      size_t miblen, void *oldp, size_t *oldlenp,
2956      void *newp, size_t newlen) {
2957  	if (!config_stats) {
2958  		return ENOENT;
2959  	}
2960  
2961  	tsdn_t *tsdn = tsd_tsdn(tsd);
2962  
2963  #define MUTEX_PROF_RESET(mtx)						\
2964      malloc_mutex_lock(tsdn, &mtx);					\
2965      malloc_mutex_prof_data_reset(tsdn, &mtx);				\
2966      malloc_mutex_unlock(tsdn, &mtx);
2967  
2968  	/* Global mutexes: ctl and prof. */
2969  	MUTEX_PROF_RESET(ctl_mtx);
2970  	if (have_background_thread) {
2971  		MUTEX_PROF_RESET(background_thread_lock);
2972  	}
2973  	if (config_prof && opt_prof) {
2974  		MUTEX_PROF_RESET(bt2gctx_mtx);
2975  	}
2976  
2977  
2978  	/* Per arena mutexes. */
2979  	unsigned n = narenas_total_get();
2980  
2981  	for (unsigned i = 0; i < n; i++) {
2982  		arena_t *arena = arena_get(tsdn, i, false);
2983  		if (!arena) {
2984  			continue;
2985  		}
2986  		MUTEX_PROF_RESET(arena->large_mtx);
2987  		MUTEX_PROF_RESET(arena->extent_avail_mtx);
2988  		MUTEX_PROF_RESET(arena->extents_dirty.mtx);
2989  		MUTEX_PROF_RESET(arena->extents_muzzy.mtx);
2990  		MUTEX_PROF_RESET(arena->extents_retained.mtx);
2991  		MUTEX_PROF_RESET(arena->decay_dirty.mtx);
2992  		MUTEX_PROF_RESET(arena->decay_muzzy.mtx);
2993  		MUTEX_PROF_RESET(arena->tcache_ql_mtx);
2994  		MUTEX_PROF_RESET(arena->base->mtx);
2995  
2996  		for (szind_t i = 0; i < SC_NBINS; i++) {
2997  			for (unsigned j = 0; j < bin_infos[i].n_shards; j++) {
2998  				bin_t *bin = &arena->bins[i].bin_shards[j];
2999  				MUTEX_PROF_RESET(bin->lock);
3000  			}
3001  		}
3002  	}
3003  #undef MUTEX_PROF_RESET
3004  	return 0;
3005  }
3006  
3007  CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nmalloc,
3008      arenas_i(mib[2])->astats->bstats[mib[4]].nmalloc, uint64_t)
3009  CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_ndalloc,
3010      arenas_i(mib[2])->astats->bstats[mib[4]].ndalloc, uint64_t)
3011  CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nrequests,
3012      arenas_i(mib[2])->astats->bstats[mib[4]].nrequests, uint64_t)
3013  CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curregs,
3014      arenas_i(mib[2])->astats->bstats[mib[4]].curregs, size_t)
3015  CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nfills,
3016      arenas_i(mib[2])->astats->bstats[mib[4]].nfills, uint64_t)
3017  CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nflushes,
3018      arenas_i(mib[2])->astats->bstats[mib[4]].nflushes, uint64_t)
3019  CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nslabs,
3020      arenas_i(mib[2])->astats->bstats[mib[4]].nslabs, uint64_t)
3021  CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nreslabs,
3022      arenas_i(mib[2])->astats->bstats[mib[4]].reslabs, uint64_t)
3023  CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_curslabs,
3024      arenas_i(mib[2])->astats->bstats[mib[4]].curslabs, size_t)
3025  CTL_RO_CGEN(config_stats, stats_arenas_i_bins_j_nonfull_slabs,
3026      arenas_i(mib[2])->astats->bstats[mib[4]].nonfull_slabs, size_t)
3027  
3028  static const ctl_named_node_t *
stats_arenas_i_bins_j_index(tsdn_t * tsdn,const size_t * mib,size_t miblen,size_t j)3029  stats_arenas_i_bins_j_index(tsdn_t *tsdn, const size_t *mib,
3030      size_t miblen, size_t j) {
3031  	if (j > SC_NBINS) {
3032  		return NULL;
3033  	}
3034  	return super_stats_arenas_i_bins_j_node;
3035  }
3036  
3037  CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nmalloc,
3038      ctl_arena_stats_read_u64(
3039      &arenas_i(mib[2])->astats->lstats[mib[4]].nmalloc), uint64_t)
3040  CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_ndalloc,
3041      ctl_arena_stats_read_u64(
3042      &arenas_i(mib[2])->astats->lstats[mib[4]].ndalloc), uint64_t)
3043  CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_nrequests,
3044      ctl_arena_stats_read_u64(
3045      &arenas_i(mib[2])->astats->lstats[mib[4]].nrequests), uint64_t)
3046  CTL_RO_CGEN(config_stats, stats_arenas_i_lextents_j_curlextents,
3047      arenas_i(mib[2])->astats->lstats[mib[4]].curlextents, size_t)
3048  
3049  static const ctl_named_node_t *
stats_arenas_i_lextents_j_index(tsdn_t * tsdn,const size_t * mib,size_t miblen,size_t j)3050  stats_arenas_i_lextents_j_index(tsdn_t *tsdn, const size_t *mib,
3051      size_t miblen, size_t j) {
3052  	if (j > SC_NSIZES - SC_NBINS) {
3053  		return NULL;
3054  	}
3055  	return super_stats_arenas_i_lextents_j_node;
3056  }
3057  
3058  CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_ndirty,
3059      atomic_load_zu(
3060          &arenas_i(mib[2])->astats->estats[mib[4]].ndirty,
3061  	ATOMIC_RELAXED), size_t);
3062  CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nmuzzy,
3063      atomic_load_zu(
3064          &arenas_i(mib[2])->astats->estats[mib[4]].nmuzzy,
3065  	ATOMIC_RELAXED), size_t);
3066  CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_nretained,
3067      atomic_load_zu(
3068          &arenas_i(mib[2])->astats->estats[mib[4]].nretained,
3069  	ATOMIC_RELAXED), size_t);
3070  CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_dirty_bytes,
3071      atomic_load_zu(
3072          &arenas_i(mib[2])->astats->estats[mib[4]].dirty_bytes,
3073  	ATOMIC_RELAXED), size_t);
3074  CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_muzzy_bytes,
3075      atomic_load_zu(
3076          &arenas_i(mib[2])->astats->estats[mib[4]].muzzy_bytes,
3077  	ATOMIC_RELAXED), size_t);
3078  CTL_RO_CGEN(config_stats, stats_arenas_i_extents_j_retained_bytes,
3079      atomic_load_zu(
3080          &arenas_i(mib[2])->astats->estats[mib[4]].retained_bytes,
3081  	ATOMIC_RELAXED), size_t);
3082  
3083  static const ctl_named_node_t *
stats_arenas_i_extents_j_index(tsdn_t * tsdn,const size_t * mib,size_t miblen,size_t j)3084  stats_arenas_i_extents_j_index(tsdn_t *tsdn, const size_t *mib,
3085      size_t miblen, size_t j) {
3086  	if (j >= SC_NPSIZES) {
3087  		return NULL;
3088  	}
3089  	return super_stats_arenas_i_extents_j_node;
3090  }
3091  
3092  static bool
ctl_arenas_i_verify(size_t i)3093  ctl_arenas_i_verify(size_t i) {
3094  	size_t a = arenas_i2a_impl(i, true, true);
3095  	if (a == UINT_MAX || !ctl_arenas->arenas[a]->initialized) {
3096  		return true;
3097  	}
3098  
3099  	return false;
3100  }
3101  
3102  static const ctl_named_node_t *
stats_arenas_i_index(tsdn_t * tsdn,const size_t * mib,size_t miblen,size_t i)3103  stats_arenas_i_index(tsdn_t *tsdn, const size_t *mib,
3104      size_t miblen, size_t i) {
3105  	const ctl_named_node_t *ret;
3106  
3107  	malloc_mutex_lock(tsdn, &ctl_mtx);
3108  	if (ctl_arenas_i_verify(i)) {
3109  		ret = NULL;
3110  		goto label_return;
3111  	}
3112  
3113  	ret = super_stats_arenas_i_node;
3114  label_return:
3115  	malloc_mutex_unlock(tsdn, &ctl_mtx);
3116  	return ret;
3117  }
3118  
3119  static int
experimental_hooks_install_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)3120  experimental_hooks_install_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
3121      void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3122  	int ret;
3123  	if (oldp == NULL || oldlenp == NULL|| newp == NULL) {
3124  		ret = EINVAL;
3125  		goto label_return;
3126  	}
3127  	/*
3128  	 * Note: this is a *private* struct.  This is an experimental interface;
3129  	 * forcing the user to know the jemalloc internals well enough to
3130  	 * extract the ABI hopefully ensures nobody gets too comfortable with
3131  	 * this API, which can change at a moment's notice.
3132  	 */
3133  	hooks_t hooks;
3134  	WRITE(hooks, hooks_t);
3135  	void *handle = hook_install(tsd_tsdn(tsd), &hooks);
3136  	if (handle == NULL) {
3137  		ret = EAGAIN;
3138  		goto label_return;
3139  	}
3140  	READ(handle, void *);
3141  
3142  	ret = 0;
3143  label_return:
3144  	return ret;
3145  }
3146  
3147  static int
experimental_hooks_remove_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)3148  experimental_hooks_remove_ctl(tsd_t *tsd, const size_t *mib, size_t miblen,
3149      void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3150  	int ret;
3151  	WRITEONLY();
3152  	void *handle = NULL;
3153  	WRITE(handle, void *);
3154  	if (handle == NULL) {
3155  		ret = EINVAL;
3156  		goto label_return;
3157  	}
3158  	hook_remove(tsd_tsdn(tsd), handle);
3159  	ret = 0;
3160  label_return:
3161  	return ret;
3162  }
3163  
3164  /*
3165   * Output six memory utilization entries for an input pointer, the first one of
3166   * type (void *) and the remaining five of type size_t, describing the following
3167   * (in the same order):
3168   *
3169   * (a) memory address of the extent a potential reallocation would go into,
3170   * == the five fields below describe about the extent the pointer resides in ==
3171   * (b) number of free regions in the extent,
3172   * (c) number of regions in the extent,
3173   * (d) size of the extent in terms of bytes,
3174   * (e) total number of free regions in the bin the extent belongs to, and
3175   * (f) total number of regions in the bin the extent belongs to.
3176   *
3177   * Note that "(e)" and "(f)" are only available when stats are enabled;
3178   * otherwise their values are undefined.
3179   *
3180   * This API is mainly intended for small class allocations, where extents are
3181   * used as slab.
3182   *
3183   * In case of large class allocations, "(a)" will be NULL, and "(e)" and "(f)"
3184   * will be zero (if stats are enabled; otherwise undefined).  The other three
3185   * fields will be properly set though the values are trivial: "(b)" will be 0,
3186   * "(c)" will be 1, and "(d)" will be the usable size.
3187   *
3188   * The input pointer and size are respectively passed in by newp and newlen,
3189   * and the output fields and size are respectively oldp and *oldlenp.
3190   *
3191   * It can be beneficial to define the following macros to make it easier to
3192   * access the output:
3193   *
3194   * #define SLABCUR_READ(out) (*(void **)out)
3195   * #define COUNTS(out) ((size_t *)((void **)out + 1))
3196   * #define NFREE_READ(out) COUNTS(out)[0]
3197   * #define NREGS_READ(out) COUNTS(out)[1]
3198   * #define SIZE_READ(out) COUNTS(out)[2]
3199   * #define BIN_NFREE_READ(out) COUNTS(out)[3]
3200   * #define BIN_NREGS_READ(out) COUNTS(out)[4]
3201   *
3202   * and then write e.g. NFREE_READ(oldp) to fetch the output.  See the unit test
3203   * test_query in test/unit/extent_util.c for an example.
3204   *
3205   * For a typical defragmentation workflow making use of this API for
3206   * understanding the fragmentation level, please refer to the comment for
3207   * experimental_utilization_batch_query_ctl.
3208   *
3209   * It's up to the application how to determine the significance of
3210   * fragmentation relying on the outputs returned.  Possible choices are:
3211   *
3212   * (a) if extent utilization ratio is below certain threshold,
3213   * (b) if extent memory consumption is above certain threshold,
3214   * (c) if extent utilization ratio is significantly below bin utilization ratio,
3215   * (d) if input pointer deviates a lot from potential reallocation address, or
3216   * (e) some selection/combination of the above.
3217   *
3218   * The caller needs to make sure that the input/output arguments are valid,
3219   * in particular, that the size of the output is correct, i.e.:
3220   *
3221   *     *oldlenp = sizeof(void *) + sizeof(size_t) * 5
3222   *
3223   * Otherwise, the function immediately returns EINVAL without touching anything.
3224   *
3225   * In the rare case where there's no associated extent found for the input
3226   * pointer, the function zeros out all output fields and return.  Please refer
3227   * to the comment for experimental_utilization_batch_query_ctl to understand the
3228   * motivation from C++.
3229   */
3230  static int
experimental_utilization_query_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)3231  experimental_utilization_query_ctl(tsd_t *tsd, const size_t *mib,
3232      size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3233  	int ret;
3234  
3235  	assert(sizeof(extent_util_stats_verbose_t)
3236  	    == sizeof(void *) + sizeof(size_t) * 5);
3237  
3238  	if (oldp == NULL || oldlenp == NULL
3239  	    || *oldlenp != sizeof(extent_util_stats_verbose_t)
3240  	    || newp == NULL) {
3241  		ret = EINVAL;
3242  		goto label_return;
3243  	}
3244  
3245  	void *ptr = NULL;
3246  	WRITE(ptr, void *);
3247  	extent_util_stats_verbose_t *util_stats
3248  	    = (extent_util_stats_verbose_t *)oldp;
3249  	extent_util_stats_verbose_get(tsd_tsdn(tsd), ptr,
3250  	    &util_stats->nfree, &util_stats->nregs, &util_stats->size,
3251  	    &util_stats->bin_nfree, &util_stats->bin_nregs,
3252  	    &util_stats->slabcur_addr);
3253  	ret = 0;
3254  
3255  label_return:
3256  	return ret;
3257  }
3258  
3259  /*
3260   * Given an input array of pointers, output three memory utilization entries of
3261   * type size_t for each input pointer about the extent it resides in:
3262   *
3263   * (a) number of free regions in the extent,
3264   * (b) number of regions in the extent, and
3265   * (c) size of the extent in terms of bytes.
3266   *
3267   * This API is mainly intended for small class allocations, where extents are
3268   * used as slab.  In case of large class allocations, the outputs are trivial:
3269   * "(a)" will be 0, "(b)" will be 1, and "(c)" will be the usable size.
3270   *
3271   * Note that multiple input pointers may reside on a same extent so the output
3272   * fields may contain duplicates.
3273   *
3274   * The format of the input/output looks like:
3275   *
3276   * input[0]:  1st_pointer_to_query	|  output[0]: 1st_extent_n_free_regions
3277   *					|  output[1]: 1st_extent_n_regions
3278   *					|  output[2]: 1st_extent_size
3279   * input[1]:  2nd_pointer_to_query	|  output[3]: 2nd_extent_n_free_regions
3280   *					|  output[4]: 2nd_extent_n_regions
3281   *					|  output[5]: 2nd_extent_size
3282   * ...					|  ...
3283   *
3284   * The input array and size are respectively passed in by newp and newlen, and
3285   * the output array and size are respectively oldp and *oldlenp.
3286   *
3287   * It can be beneficial to define the following macros to make it easier to
3288   * access the output:
3289   *
3290   * #define NFREE_READ(out, i) out[(i) * 3]
3291   * #define NREGS_READ(out, i) out[(i) * 3 + 1]
3292   * #define SIZE_READ(out, i) out[(i) * 3 + 2]
3293   *
3294   * and then write e.g. NFREE_READ(oldp, i) to fetch the output.  See the unit
3295   * test test_batch in test/unit/extent_util.c for a concrete example.
3296   *
3297   * A typical workflow would be composed of the following steps:
3298   *
3299   * (1) flush tcache: mallctl("thread.tcache.flush", ...)
3300   * (2) initialize input array of pointers to query fragmentation
3301   * (3) allocate output array to hold utilization statistics
3302   * (4) query utilization: mallctl("experimental.utilization.batch_query", ...)
3303   * (5) (optional) decide if it's worthwhile to defragment; otherwise stop here
3304   * (6) disable tcache: mallctl("thread.tcache.enabled", ...)
3305   * (7) defragment allocations with significant fragmentation, e.g.:
3306   *         for each allocation {
3307   *             if it's fragmented {
3308   *                 malloc(...);
3309   *                 memcpy(...);
3310   *                 free(...);
3311   *             }
3312   *         }
3313   * (8) enable tcache: mallctl("thread.tcache.enabled", ...)
3314   *
3315   * The application can determine the significance of fragmentation themselves
3316   * relying on the statistics returned, both at the overall level i.e. step "(5)"
3317   * and at individual allocation level i.e. within step "(7)".  Possible choices
3318   * are:
3319   *
3320   * (a) whether memory utilization ratio is below certain threshold,
3321   * (b) whether memory consumption is above certain threshold, or
3322   * (c) some combination of the two.
3323   *
3324   * The caller needs to make sure that the input/output arrays are valid and
3325   * their sizes are proper as well as matched, meaning:
3326   *
3327   * (a) newlen = n_pointers * sizeof(const void *)
3328   * (b) *oldlenp = n_pointers * sizeof(size_t) * 3
3329   * (c) n_pointers > 0
3330   *
3331   * Otherwise, the function immediately returns EINVAL without touching anything.
3332   *
3333   * In the rare case where there's no associated extent found for some pointers,
3334   * rather than immediately terminating the computation and raising an error,
3335   * the function simply zeros out the corresponding output fields and continues
3336   * the computation until all input pointers are handled.  The motivations of
3337   * such a design are as follows:
3338   *
3339   * (a) The function always either processes nothing or processes everything, and
3340   * never leaves the output half touched and half untouched.
3341   *
3342   * (b) It facilitates usage needs especially common in C++.  A vast variety of
3343   * C++ objects are instantiated with multiple dynamic memory allocations.  For
3344   * example, std::string and std::vector typically use at least two allocations,
3345   * one for the metadata and one for the actual content.  Other types may use
3346   * even more allocations.  When inquiring about utilization statistics, the
3347   * caller often wants to examine into all such allocations, especially internal
3348   * one(s), rather than just the topmost one.  The issue comes when some
3349   * implementations do certain optimizations to reduce/aggregate some internal
3350   * allocations, e.g. putting short strings directly into the metadata, and such
3351   * decisions are not known to the caller.  Therefore, we permit pointers to
3352   * memory usages that may not be returned by previous malloc calls, and we
3353   * provide the caller a convenient way to identify such cases.
3354   */
3355  static int
experimental_utilization_batch_query_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)3356  experimental_utilization_batch_query_ctl(tsd_t *tsd, const size_t *mib,
3357      size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3358  	int ret;
3359  
3360  	assert(sizeof(extent_util_stats_t) == sizeof(size_t) * 3);
3361  
3362  	const size_t len = newlen / sizeof(const void *);
3363  	if (oldp == NULL || oldlenp == NULL || newp == NULL || newlen == 0
3364  	    || newlen != len * sizeof(const void *)
3365  	    || *oldlenp != len * sizeof(extent_util_stats_t)) {
3366  		ret = EINVAL;
3367  		goto label_return;
3368  	}
3369  
3370  	void **ptrs = (void **)newp;
3371  	extent_util_stats_t *util_stats = (extent_util_stats_t *)oldp;
3372  	size_t i;
3373  	for (i = 0; i < len; ++i) {
3374  		extent_util_stats_get(tsd_tsdn(tsd), ptrs[i],
3375  		    &util_stats[i].nfree, &util_stats[i].nregs,
3376  		    &util_stats[i].size);
3377  	}
3378  	ret = 0;
3379  
3380  label_return:
3381  	return ret;
3382  }
3383  
3384  static const ctl_named_node_t *
experimental_arenas_i_index(tsdn_t * tsdn,const size_t * mib,size_t miblen,size_t i)3385  experimental_arenas_i_index(tsdn_t *tsdn, const size_t *mib,
3386      size_t miblen, size_t i) {
3387  	const ctl_named_node_t *ret;
3388  
3389  	malloc_mutex_lock(tsdn, &ctl_mtx);
3390  	if (ctl_arenas_i_verify(i)) {
3391  		ret = NULL;
3392  		goto label_return;
3393  	}
3394  	ret = super_experimental_arenas_i_node;
3395  label_return:
3396  	malloc_mutex_unlock(tsdn, &ctl_mtx);
3397  	return ret;
3398  }
3399  
3400  static int
experimental_arenas_i_pactivep_ctl(tsd_t * tsd,const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)3401  experimental_arenas_i_pactivep_ctl(tsd_t *tsd, const size_t *mib,
3402      size_t miblen, void *oldp, size_t *oldlenp, void *newp, size_t newlen) {
3403  	if (!config_stats) {
3404  		return ENOENT;
3405  	}
3406  	if (oldp == NULL || oldlenp == NULL || *oldlenp != sizeof(size_t *)) {
3407  		return EINVAL;
3408  	}
3409  
3410  	unsigned arena_ind;
3411  	arena_t *arena;
3412  	int ret;
3413  	size_t *pactivep;
3414  
3415  	malloc_mutex_lock(tsd_tsdn(tsd), &ctl_mtx);
3416  	READONLY();
3417  	MIB_UNSIGNED(arena_ind, 2);
3418  	if (arena_ind < narenas_total_get() && (arena =
3419  	    arena_get(tsd_tsdn(tsd), arena_ind, false)) != NULL) {
3420  #if defined(JEMALLOC_GCC_ATOMIC_ATOMICS) ||				\
3421      defined(JEMALLOC_GCC_SYNC_ATOMICS) || defined(_MSC_VER)
3422  		/* Expose the underlying counter for fast read. */
3423  		pactivep = (size_t *)&(arena->nactive.repr);
3424  		READ(pactivep, size_t *);
3425  		ret = 0;
3426  #else
3427  		ret = EFAULT;
3428  #endif
3429  	} else {
3430  		ret = EFAULT;
3431  	}
3432  label_return:
3433  	malloc_mutex_unlock(tsd_tsdn(tsd), &ctl_mtx);
3434  	return ret;
3435  }
3436