xref: /freebsd/contrib/jemalloc/include/jemalloc/internal/arena_externs.h (revision c43cad87172039ccf38172129c79755ea79e6102)
1 #ifndef JEMALLOC_INTERNAL_ARENA_EXTERNS_H
2 #define JEMALLOC_INTERNAL_ARENA_EXTERNS_H
3 
4 #include "jemalloc/internal/bin.h"
5 #include "jemalloc/internal/div.h"
6 #include "jemalloc/internal/extent_dss.h"
7 #include "jemalloc/internal/hook.h"
8 #include "jemalloc/internal/pages.h"
9 #include "jemalloc/internal/stats.h"
10 
11 /*
12  * When the amount of pages to be purged exceeds this amount, deferred purge
13  * should happen.
14  */
15 #define ARENA_DEFERRED_PURGE_NPAGES_THRESHOLD UINT64_C(1024)
16 
17 extern ssize_t opt_dirty_decay_ms;
18 extern ssize_t opt_muzzy_decay_ms;
19 
20 extern percpu_arena_mode_t opt_percpu_arena;
21 extern const char *percpu_arena_mode_names[];
22 
23 extern div_info_t arena_binind_div_info[SC_NBINS];
24 
25 extern malloc_mutex_t arenas_lock;
26 extern emap_t arena_emap_global;
27 
28 extern size_t opt_oversize_threshold;
29 extern size_t oversize_threshold;
30 
31 /*
32  * arena_bin_offsets[binind] is the offset of the first bin shard for size class
33  * binind.
34  */
35 extern uint32_t arena_bin_offsets[SC_NBINS];
36 
37 void arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena,
38     unsigned *nthreads, const char **dss, ssize_t *dirty_decay_ms,
39     ssize_t *muzzy_decay_ms, size_t *nactive, size_t *ndirty, size_t *nmuzzy);
40 void arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
41     const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms,
42     size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats,
43     bin_stats_data_t *bstats, arena_stats_large_t *lstats,
44     pac_estats_t *estats, hpa_shard_stats_t *hpastats, sec_stats_t *secstats);
45 void arena_handle_deferred_work(tsdn_t *tsdn, arena_t *arena);
46 edata_t *arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena,
47     size_t usize, size_t alignment, bool zero);
48 void arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena,
49     edata_t *edata);
50 void arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena,
51     edata_t *edata, size_t oldsize);
52 void arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena,
53     edata_t *edata, size_t oldsize);
54 bool arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, extent_state_t state,
55     ssize_t decay_ms);
56 ssize_t arena_decay_ms_get(arena_t *arena, extent_state_t state);
57 void arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread,
58     bool all);
59 uint64_t arena_time_until_deferred(tsdn_t *tsdn, arena_t *arena);
60 void arena_do_deferred_work(tsdn_t *tsdn, arena_t *arena);
61 void arena_reset(tsd_t *tsd, arena_t *arena);
62 void arena_destroy(tsd_t *tsd, arena_t *arena);
63 void arena_cache_bin_fill_small(tsdn_t *tsdn, arena_t *arena,
64     cache_bin_t *cache_bin, cache_bin_info_t *cache_bin_info, szind_t binind,
65     const unsigned nfill);
66 
67 void *arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size,
68     szind_t ind, bool zero);
69 void *arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize,
70     size_t alignment, bool zero, tcache_t *tcache);
71 void arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize);
72 void arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache,
73     bool slow_path);
74 void arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, edata_t *slab);
75 
76 void arena_dalloc_bin_locked_handle_newly_empty(tsdn_t *tsdn, arena_t *arena,
77     edata_t *slab, bin_t *bin);
78 void arena_dalloc_bin_locked_handle_newly_nonempty(tsdn_t *tsdn, arena_t *arena,
79     edata_t *slab, bin_t *bin);
80 void arena_dalloc_small(tsdn_t *tsdn, void *ptr);
81 bool arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
82     size_t extra, bool zero, size_t *newsize);
83 void *arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize,
84     size_t size, size_t alignment, bool zero, tcache_t *tcache,
85     hook_ralloc_args_t *hook_args);
86 dss_prec_t arena_dss_prec_get(arena_t *arena);
87 ehooks_t *arena_get_ehooks(arena_t *arena);
88 extent_hooks_t *arena_set_extent_hooks(tsd_t *tsd, arena_t *arena,
89     extent_hooks_t *extent_hooks);
90 bool arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec);
91 ssize_t arena_dirty_decay_ms_default_get(void);
92 bool arena_dirty_decay_ms_default_set(ssize_t decay_ms);
93 ssize_t arena_muzzy_decay_ms_default_get(void);
94 bool arena_muzzy_decay_ms_default_set(ssize_t decay_ms);
95 bool arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena,
96     size_t *old_limit, size_t *new_limit);
97 unsigned arena_nthreads_get(arena_t *arena, bool internal);
98 void arena_nthreads_inc(arena_t *arena, bool internal);
99 void arena_nthreads_dec(arena_t *arena, bool internal);
100 arena_t *arena_new(tsdn_t *tsdn, unsigned ind, const arena_config_t *config);
101 bool arena_init_huge(void);
102 bool arena_is_huge(unsigned arena_ind);
103 arena_t *arena_choose_huge(tsd_t *tsd);
104 bin_t *arena_bin_choose(tsdn_t *tsdn, arena_t *arena, szind_t binind,
105     unsigned *binshard);
106 size_t arena_fill_small_fresh(tsdn_t *tsdn, arena_t *arena, szind_t binind,
107     void **ptrs, size_t nfill, bool zero);
108 bool arena_boot(sc_data_t *sc_data, base_t *base, bool hpa);
109 void arena_prefork0(tsdn_t *tsdn, arena_t *arena);
110 void arena_prefork1(tsdn_t *tsdn, arena_t *arena);
111 void arena_prefork2(tsdn_t *tsdn, arena_t *arena);
112 void arena_prefork3(tsdn_t *tsdn, arena_t *arena);
113 void arena_prefork4(tsdn_t *tsdn, arena_t *arena);
114 void arena_prefork5(tsdn_t *tsdn, arena_t *arena);
115 void arena_prefork6(tsdn_t *tsdn, arena_t *arena);
116 void arena_prefork7(tsdn_t *tsdn, arena_t *arena);
117 void arena_prefork8(tsdn_t *tsdn, arena_t *arena);
118 void arena_postfork_parent(tsdn_t *tsdn, arena_t *arena);
119 void arena_postfork_child(tsdn_t *tsdn, arena_t *arena);
120 
121 #endif /* JEMALLOC_INTERNAL_ARENA_EXTERNS_H */
122