1a4bd5210SJason Evans #define JEMALLOC_TCACHE_C_
2b7eaed25SJason Evans #include "jemalloc/internal/jemalloc_preamble.h"
3b7eaed25SJason Evans #include "jemalloc/internal/jemalloc_internal_includes.h"
4b7eaed25SJason Evans
5b7eaed25SJason Evans #include "jemalloc/internal/assert.h"
6b7eaed25SJason Evans #include "jemalloc/internal/mutex.h"
7*c5ad8142SEric van Gyzen #include "jemalloc/internal/safety_check.h"
8*c5ad8142SEric van Gyzen #include "jemalloc/internal/sc.h"
9a4bd5210SJason Evans
10a4bd5210SJason Evans /******************************************************************************/
11a4bd5210SJason Evans /* Data. */
12a4bd5210SJason Evans
13a4bd5210SJason Evans bool opt_tcache = true;
14a4bd5210SJason Evans ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT;
15a4bd5210SJason Evans
160ef50b4eSJason Evans cache_bin_info_t *tcache_bin_info;
17a4bd5210SJason Evans static unsigned stack_nelms; /* Total stack elms per tcache. */
18a4bd5210SJason Evans
19df0d881dSJason Evans unsigned nhbins;
20a4bd5210SJason Evans size_t tcache_maxclass;
21a4bd5210SJason Evans
22d0e79aa3SJason Evans tcaches_t *tcaches;
23d0e79aa3SJason Evans
24d0e79aa3SJason Evans /* Index of first element within tcaches that has never been used. */
25d0e79aa3SJason Evans static unsigned tcaches_past;
26d0e79aa3SJason Evans
27d0e79aa3SJason Evans /* Head of singly linked list tracking available tcaches elements. */
28d0e79aa3SJason Evans static tcaches_t *tcaches_avail;
29d0e79aa3SJason Evans
308244f2aaSJason Evans /* Protects tcaches{,_past,_avail}. */
318244f2aaSJason Evans static malloc_mutex_t tcaches_mtx;
328244f2aaSJason Evans
33a4bd5210SJason Evans /******************************************************************************/
34a4bd5210SJason Evans
351f0a49e8SJason Evans size_t
tcache_salloc(tsdn_t * tsdn,const void * ptr)36b7eaed25SJason Evans tcache_salloc(tsdn_t *tsdn, const void *ptr) {
37b7eaed25SJason Evans return arena_salloc(tsdn, ptr);
388ed34ab0SJason Evans }
398ed34ab0SJason Evans
40e722f8f8SJason Evans void
tcache_event_hard(tsd_t * tsd,tcache_t * tcache)41b7eaed25SJason Evans tcache_event_hard(tsd_t *tsd, tcache_t *tcache) {
42536b3538SJason Evans szind_t binind = tcache->next_gc_bin;
43e722f8f8SJason Evans
440ef50b4eSJason Evans cache_bin_t *tbin;
45*c5ad8142SEric van Gyzen if (binind < SC_NBINS) {
46b7eaed25SJason Evans tbin = tcache_small_bin_get(tcache, binind);
47b7eaed25SJason Evans } else {
48b7eaed25SJason Evans tbin = tcache_large_bin_get(tcache, binind);
49b7eaed25SJason Evans }
50e722f8f8SJason Evans if (tbin->low_water > 0) {
51e722f8f8SJason Evans /*
52e722f8f8SJason Evans * Flush (ceiling) 3/4 of the objects below the low water mark.
53e722f8f8SJason Evans */
54*c5ad8142SEric van Gyzen if (binind < SC_NBINS) {
55d0e79aa3SJason Evans tcache_bin_flush_small(tsd, tcache, tbin, binind,
56d0e79aa3SJason Evans tbin->ncached - tbin->low_water + (tbin->low_water
57d0e79aa3SJason Evans >> 2));
58b7eaed25SJason Evans /*
59b7eaed25SJason Evans * Reduce fill count by 2X. Limit lg_fill_div such that
60b7eaed25SJason Evans * the fill count is always at least 1.
61b7eaed25SJason Evans */
620ef50b4eSJason Evans cache_bin_info_t *tbin_info = &tcache_bin_info[binind];
63b7eaed25SJason Evans if ((tbin_info->ncached_max >>
64b7eaed25SJason Evans (tcache->lg_fill_div[binind] + 1)) >= 1) {
65b7eaed25SJason Evans tcache->lg_fill_div[binind]++;
66b7eaed25SJason Evans }
67e722f8f8SJason Evans } else {
68d0e79aa3SJason Evans tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached
69d0e79aa3SJason Evans - tbin->low_water + (tbin->low_water >> 2), tcache);
70e722f8f8SJason Evans }
71e722f8f8SJason Evans } else if (tbin->low_water < 0) {
72e722f8f8SJason Evans /*
73b7eaed25SJason Evans * Increase fill count by 2X for small bins. Make sure
74b7eaed25SJason Evans * lg_fill_div stays greater than 0.
75e722f8f8SJason Evans */
76*c5ad8142SEric van Gyzen if (binind < SC_NBINS && tcache->lg_fill_div[binind] > 1) {
77b7eaed25SJason Evans tcache->lg_fill_div[binind]--;
78b7eaed25SJason Evans }
79e722f8f8SJason Evans }
80e722f8f8SJason Evans tbin->low_water = tbin->ncached;
81e722f8f8SJason Evans
82e722f8f8SJason Evans tcache->next_gc_bin++;
83b7eaed25SJason Evans if (tcache->next_gc_bin == nhbins) {
84e722f8f8SJason Evans tcache->next_gc_bin = 0;
85e722f8f8SJason Evans }
86b7eaed25SJason Evans }
87e722f8f8SJason Evans
88a4bd5210SJason Evans void *
tcache_alloc_small_hard(tsdn_t * tsdn,arena_t * arena,tcache_t * tcache,cache_bin_t * tbin,szind_t binind,bool * tcache_success)891f0a49e8SJason Evans tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache,
900ef50b4eSJason Evans cache_bin_t *tbin, szind_t binind, bool *tcache_success) {
91a4bd5210SJason Evans void *ret;
92a4bd5210SJason Evans
93b7eaed25SJason Evans assert(tcache->arena != NULL);
94b7eaed25SJason Evans arena_tcache_fill_small(tsdn, arena, tcache, tbin, binind,
95b7eaed25SJason Evans config_prof ? tcache->prof_accumbytes : 0);
96b7eaed25SJason Evans if (config_prof) {
97a4bd5210SJason Evans tcache->prof_accumbytes = 0;
98b7eaed25SJason Evans }
990ef50b4eSJason Evans ret = cache_bin_alloc_easy(tbin, tcache_success);
100a4bd5210SJason Evans
101b7eaed25SJason Evans return ret;
102a4bd5210SJason Evans }
103a4bd5210SJason Evans
104*c5ad8142SEric van Gyzen /* Enabled with --enable-extra-size-check. */
105*c5ad8142SEric van Gyzen static void
tbin_extents_lookup_size_check(tsdn_t * tsdn,cache_bin_t * tbin,szind_t binind,size_t nflush,extent_t ** extents)106*c5ad8142SEric van Gyzen tbin_extents_lookup_size_check(tsdn_t *tsdn, cache_bin_t *tbin, szind_t binind,
107*c5ad8142SEric van Gyzen size_t nflush, extent_t **extents){
108*c5ad8142SEric van Gyzen rtree_ctx_t rtree_ctx_fallback;
109*c5ad8142SEric van Gyzen rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
110*c5ad8142SEric van Gyzen
111*c5ad8142SEric van Gyzen /*
112*c5ad8142SEric van Gyzen * Verify that the items in the tcache all have the correct size; this
113*c5ad8142SEric van Gyzen * is useful for catching sized deallocation bugs, also to fail early
114*c5ad8142SEric van Gyzen * instead of corrupting metadata. Since this can be turned on for opt
115*c5ad8142SEric van Gyzen * builds, avoid the branch in the loop.
116*c5ad8142SEric van Gyzen */
117*c5ad8142SEric van Gyzen szind_t szind;
118*c5ad8142SEric van Gyzen size_t sz_sum = binind * nflush;
119*c5ad8142SEric van Gyzen for (unsigned i = 0 ; i < nflush; i++) {
120*c5ad8142SEric van Gyzen rtree_extent_szind_read(tsdn, &extents_rtree,
121*c5ad8142SEric van Gyzen rtree_ctx, (uintptr_t)*(tbin->avail - 1 - i), true,
122*c5ad8142SEric van Gyzen &extents[i], &szind);
123*c5ad8142SEric van Gyzen sz_sum -= szind;
124*c5ad8142SEric van Gyzen }
125*c5ad8142SEric van Gyzen if (sz_sum != 0) {
126*c5ad8142SEric van Gyzen safety_check_fail("<jemalloc>: size mismatch in thread cache "
127*c5ad8142SEric van Gyzen "detected, likely caused by sized deallocation bugs by "
128*c5ad8142SEric van Gyzen "application. Abort.\n");
129*c5ad8142SEric van Gyzen abort();
130*c5ad8142SEric van Gyzen }
131*c5ad8142SEric van Gyzen }
132*c5ad8142SEric van Gyzen
133a4bd5210SJason Evans void
tcache_bin_flush_small(tsd_t * tsd,tcache_t * tcache,cache_bin_t * tbin,szind_t binind,unsigned rem)1340ef50b4eSJason Evans tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, cache_bin_t *tbin,
135b7eaed25SJason Evans szind_t binind, unsigned rem) {
136a4bd5210SJason Evans bool merged_stats = false;
137a4bd5210SJason Evans
138*c5ad8142SEric van Gyzen assert(binind < SC_NBINS);
1390ef50b4eSJason Evans assert((cache_bin_sz_t)rem <= tbin->ncached);
140a4bd5210SJason Evans
141b7eaed25SJason Evans arena_t *arena = tcache->arena;
142d0e79aa3SJason Evans assert(arena != NULL);
143b7eaed25SJason Evans unsigned nflush = tbin->ncached - rem;
144b7eaed25SJason Evans VARIABLE_ARRAY(extent_t *, item_extent, nflush);
145f2cb2907SJason Evans
146*c5ad8142SEric van Gyzen /* Look up extent once per item. */
147*c5ad8142SEric van Gyzen if (config_opt_safety_checks) {
148*c5ad8142SEric van Gyzen tbin_extents_lookup_size_check(tsd_tsdn(tsd), tbin, binind,
149*c5ad8142SEric van Gyzen nflush, item_extent);
150*c5ad8142SEric van Gyzen } else {
151*c5ad8142SEric van Gyzen for (unsigned i = 0 ; i < nflush; i++) {
152*c5ad8142SEric van Gyzen item_extent[i] = iealloc(tsd_tsdn(tsd),
153*c5ad8142SEric van Gyzen *(tbin->avail - 1 - i));
154*c5ad8142SEric van Gyzen }
155*c5ad8142SEric van Gyzen }
156b7eaed25SJason Evans while (nflush > 0) {
157a4bd5210SJason Evans /* Lock the arena bin associated with the first object. */
158b7eaed25SJason Evans extent_t *extent = item_extent[0];
159*c5ad8142SEric van Gyzen unsigned bin_arena_ind = extent_arena_ind_get(extent);
160*c5ad8142SEric van Gyzen arena_t *bin_arena = arena_get(tsd_tsdn(tsd), bin_arena_ind,
161*c5ad8142SEric van Gyzen false);
162*c5ad8142SEric van Gyzen unsigned binshard = extent_binshard_get(extent);
163*c5ad8142SEric van Gyzen assert(binshard < bin_infos[binind].n_shards);
164*c5ad8142SEric van Gyzen bin_t *bin = &bin_arena->bins[binind].bin_shards[binshard];
165a4bd5210SJason Evans
166d0e79aa3SJason Evans if (config_prof && bin_arena == arena) {
1671f0a49e8SJason Evans if (arena_prof_accum(tsd_tsdn(tsd), arena,
168b7eaed25SJason Evans tcache->prof_accumbytes)) {
1691f0a49e8SJason Evans prof_idump(tsd_tsdn(tsd));
170b7eaed25SJason Evans }
171a4bd5210SJason Evans tcache->prof_accumbytes = 0;
172a4bd5210SJason Evans }
173a4bd5210SJason Evans
1741f0a49e8SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
175*c5ad8142SEric van Gyzen if (config_stats && bin_arena == arena && !merged_stats) {
176a4bd5210SJason Evans merged_stats = true;
177a4bd5210SJason Evans bin->stats.nflushes++;
178a4bd5210SJason Evans bin->stats.nrequests += tbin->tstats.nrequests;
179a4bd5210SJason Evans tbin->tstats.nrequests = 0;
180a4bd5210SJason Evans }
181b7eaed25SJason Evans unsigned ndeferred = 0;
182b7eaed25SJason Evans for (unsigned i = 0; i < nflush; i++) {
183b7eaed25SJason Evans void *ptr = *(tbin->avail - 1 - i);
184b7eaed25SJason Evans extent = item_extent[i];
185b7eaed25SJason Evans assert(ptr != NULL && extent != NULL);
186b7eaed25SJason Evans
187*c5ad8142SEric van Gyzen if (extent_arena_ind_get(extent) == bin_arena_ind
188*c5ad8142SEric van Gyzen && extent_binshard_get(extent) == binshard) {
1891f0a49e8SJason Evans arena_dalloc_bin_junked_locked(tsd_tsdn(tsd),
190*c5ad8142SEric van Gyzen bin_arena, bin, binind, extent, ptr);
191a4bd5210SJason Evans } else {
192a4bd5210SJason Evans /*
193a4bd5210SJason Evans * This object was allocated via a different
194a4bd5210SJason Evans * arena bin than the one that is currently
195a4bd5210SJason Evans * locked. Stash the object, so that it can be
196a4bd5210SJason Evans * handled in a future pass.
197a4bd5210SJason Evans */
198df0d881dSJason Evans *(tbin->avail - 1 - ndeferred) = ptr;
199b7eaed25SJason Evans item_extent[ndeferred] = extent;
200a4bd5210SJason Evans ndeferred++;
201a4bd5210SJason Evans }
202a4bd5210SJason Evans }
2031f0a49e8SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
2041f0a49e8SJason Evans arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred);
205b7eaed25SJason Evans nflush = ndeferred;
206a4bd5210SJason Evans }
207d0e79aa3SJason Evans if (config_stats && !merged_stats) {
208a4bd5210SJason Evans /*
209a4bd5210SJason Evans * The flush loop didn't happen to flush to this thread's
210a4bd5210SJason Evans * arena, so the stats didn't get merged. Manually do so now.
211a4bd5210SJason Evans */
212*c5ad8142SEric van Gyzen unsigned binshard;
213*c5ad8142SEric van Gyzen bin_t *bin = arena_bin_choose_lock(tsd_tsdn(tsd), arena, binind,
214*c5ad8142SEric van Gyzen &binshard);
215a4bd5210SJason Evans bin->stats.nflushes++;
216a4bd5210SJason Evans bin->stats.nrequests += tbin->tstats.nrequests;
217a4bd5210SJason Evans tbin->tstats.nrequests = 0;
2181f0a49e8SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
219a4bd5210SJason Evans }
220a4bd5210SJason Evans
221df0d881dSJason Evans memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
222df0d881dSJason Evans sizeof(void *));
223a4bd5210SJason Evans tbin->ncached = rem;
2240ef50b4eSJason Evans if (tbin->ncached < tbin->low_water) {
225a4bd5210SJason Evans tbin->low_water = tbin->ncached;
226a4bd5210SJason Evans }
227b7eaed25SJason Evans }
228a4bd5210SJason Evans
229a4bd5210SJason Evans void
tcache_bin_flush_large(tsd_t * tsd,cache_bin_t * tbin,szind_t binind,unsigned rem,tcache_t * tcache)2300ef50b4eSJason Evans tcache_bin_flush_large(tsd_t *tsd, cache_bin_t *tbin, szind_t binind,
231b7eaed25SJason Evans unsigned rem, tcache_t *tcache) {
232a4bd5210SJason Evans bool merged_stats = false;
233a4bd5210SJason Evans
234a4bd5210SJason Evans assert(binind < nhbins);
2350ef50b4eSJason Evans assert((cache_bin_sz_t)rem <= tbin->ncached);
236a4bd5210SJason Evans
237*c5ad8142SEric van Gyzen arena_t *tcache_arena = tcache->arena;
238*c5ad8142SEric van Gyzen assert(tcache_arena != NULL);
239b7eaed25SJason Evans unsigned nflush = tbin->ncached - rem;
240b7eaed25SJason Evans VARIABLE_ARRAY(extent_t *, item_extent, nflush);
241*c5ad8142SEric van Gyzen
242*c5ad8142SEric van Gyzen #ifndef JEMALLOC_EXTRA_SIZE_CHECK
243b7eaed25SJason Evans /* Look up extent once per item. */
244b7eaed25SJason Evans for (unsigned i = 0 ; i < nflush; i++) {
245b7eaed25SJason Evans item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i));
246b7eaed25SJason Evans }
247*c5ad8142SEric van Gyzen #else
248*c5ad8142SEric van Gyzen tbin_extents_lookup_size_check(tsd_tsdn(tsd), tbin, binind, nflush,
249*c5ad8142SEric van Gyzen item_extent);
250*c5ad8142SEric van Gyzen #endif
251b7eaed25SJason Evans while (nflush > 0) {
252a4bd5210SJason Evans /* Lock the arena associated with the first object. */
253b7eaed25SJason Evans extent_t *extent = item_extent[0];
254*c5ad8142SEric van Gyzen unsigned locked_arena_ind = extent_arena_ind_get(extent);
255*c5ad8142SEric van Gyzen arena_t *locked_arena = arena_get(tsd_tsdn(tsd),
256*c5ad8142SEric van Gyzen locked_arena_ind, false);
257*c5ad8142SEric van Gyzen bool idump;
258a4bd5210SJason Evans
259b7eaed25SJason Evans if (config_prof) {
260f8ca2db1SJason Evans idump = false;
261b7eaed25SJason Evans }
262b7eaed25SJason Evans
263*c5ad8142SEric van Gyzen bool lock_large = !arena_is_auto(locked_arena);
264*c5ad8142SEric van Gyzen if (lock_large) {
265b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->large_mtx);
266*c5ad8142SEric van Gyzen }
267b7eaed25SJason Evans for (unsigned i = 0; i < nflush; i++) {
268b7eaed25SJason Evans void *ptr = *(tbin->avail - 1 - i);
269b7eaed25SJason Evans assert(ptr != NULL);
270b7eaed25SJason Evans extent = item_extent[i];
271*c5ad8142SEric van Gyzen if (extent_arena_ind_get(extent) == locked_arena_ind) {
272b7eaed25SJason Evans large_dalloc_prep_junked_locked(tsd_tsdn(tsd),
273b7eaed25SJason Evans extent);
274b7eaed25SJason Evans }
275b7eaed25SJason Evans }
276*c5ad8142SEric van Gyzen if ((config_prof || config_stats) &&
277*c5ad8142SEric van Gyzen (locked_arena == tcache_arena)) {
278a4bd5210SJason Evans if (config_prof) {
279*c5ad8142SEric van Gyzen idump = arena_prof_accum(tsd_tsdn(tsd),
280*c5ad8142SEric van Gyzen tcache_arena, tcache->prof_accumbytes);
281a4bd5210SJason Evans tcache->prof_accumbytes = 0;
282a4bd5210SJason Evans }
283a4bd5210SJason Evans if (config_stats) {
284a4bd5210SJason Evans merged_stats = true;
285*c5ad8142SEric van Gyzen arena_stats_large_flush_nrequests_add(
286*c5ad8142SEric van Gyzen tsd_tsdn(tsd), &tcache_arena->stats, binind,
287b7eaed25SJason Evans tbin->tstats.nrequests);
288a4bd5210SJason Evans tbin->tstats.nrequests = 0;
289a4bd5210SJason Evans }
290a4bd5210SJason Evans }
291*c5ad8142SEric van Gyzen if (lock_large) {
292b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->large_mtx);
293*c5ad8142SEric van Gyzen }
294b7eaed25SJason Evans
295b7eaed25SJason Evans unsigned ndeferred = 0;
296b7eaed25SJason Evans for (unsigned i = 0; i < nflush; i++) {
297b7eaed25SJason Evans void *ptr = *(tbin->avail - 1 - i);
298b7eaed25SJason Evans extent = item_extent[i];
299b7eaed25SJason Evans assert(ptr != NULL && extent != NULL);
300b7eaed25SJason Evans
301*c5ad8142SEric van Gyzen if (extent_arena_ind_get(extent) == locked_arena_ind) {
302b7eaed25SJason Evans large_dalloc_finish(tsd_tsdn(tsd), extent);
303d0e79aa3SJason Evans } else {
304a4bd5210SJason Evans /*
305a4bd5210SJason Evans * This object was allocated via a different
306a4bd5210SJason Evans * arena than the one that is currently locked.
307a4bd5210SJason Evans * Stash the object, so that it can be handled
308a4bd5210SJason Evans * in a future pass.
309a4bd5210SJason Evans */
310df0d881dSJason Evans *(tbin->avail - 1 - ndeferred) = ptr;
311b7eaed25SJason Evans item_extent[ndeferred] = extent;
312a4bd5210SJason Evans ndeferred++;
313a4bd5210SJason Evans }
314a4bd5210SJason Evans }
315b7eaed25SJason Evans if (config_prof && idump) {
3161f0a49e8SJason Evans prof_idump(tsd_tsdn(tsd));
317b7eaed25SJason Evans }
3181f0a49e8SJason Evans arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush -
3191f0a49e8SJason Evans ndeferred);
320b7eaed25SJason Evans nflush = ndeferred;
321a4bd5210SJason Evans }
322d0e79aa3SJason Evans if (config_stats && !merged_stats) {
323a4bd5210SJason Evans /*
324a4bd5210SJason Evans * The flush loop didn't happen to flush to this thread's
325a4bd5210SJason Evans * arena, so the stats didn't get merged. Manually do so now.
326a4bd5210SJason Evans */
327*c5ad8142SEric van Gyzen arena_stats_large_flush_nrequests_add(tsd_tsdn(tsd),
328*c5ad8142SEric van Gyzen &tcache_arena->stats, binind, tbin->tstats.nrequests);
329a4bd5210SJason Evans tbin->tstats.nrequests = 0;
330a4bd5210SJason Evans }
331a4bd5210SJason Evans
332df0d881dSJason Evans memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem *
333df0d881dSJason Evans sizeof(void *));
334a4bd5210SJason Evans tbin->ncached = rem;
3350ef50b4eSJason Evans if (tbin->ncached < tbin->low_water) {
336a4bd5210SJason Evans tbin->low_water = tbin->ncached;
337a4bd5210SJason Evans }
338b7eaed25SJason Evans }
339a4bd5210SJason Evans
340b7eaed25SJason Evans void
tcache_arena_associate(tsdn_t * tsdn,tcache_t * tcache,arena_t * arena)341b7eaed25SJason Evans tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
342b7eaed25SJason Evans assert(tcache->arena == NULL);
343b7eaed25SJason Evans tcache->arena = arena;
344a4bd5210SJason Evans
345a4bd5210SJason Evans if (config_stats) {
346a4bd5210SJason Evans /* Link into list of extant tcaches. */
347b7eaed25SJason Evans malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
3480ef50b4eSJason Evans
349a4bd5210SJason Evans ql_elm_new(tcache, link);
350a4bd5210SJason Evans ql_tail_insert(&arena->tcache_ql, tcache, link);
3510ef50b4eSJason Evans cache_bin_array_descriptor_init(
3520ef50b4eSJason Evans &tcache->cache_bin_array_descriptor, tcache->bins_small,
3530ef50b4eSJason Evans tcache->bins_large);
3540ef50b4eSJason Evans ql_tail_insert(&arena->cache_bin_array_descriptor_ql,
3550ef50b4eSJason Evans &tcache->cache_bin_array_descriptor, link);
3560ef50b4eSJason Evans
357b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
358a4bd5210SJason Evans }
359a4bd5210SJason Evans }
360a4bd5210SJason Evans
3611f0a49e8SJason Evans static void
tcache_arena_dissociate(tsdn_t * tsdn,tcache_t * tcache)362b7eaed25SJason Evans tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache) {
363b7eaed25SJason Evans arena_t *arena = tcache->arena;
364b7eaed25SJason Evans assert(arena != NULL);
365a4bd5210SJason Evans if (config_stats) {
366a4bd5210SJason Evans /* Unlink from list of extant tcaches. */
367b7eaed25SJason Evans malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
368d0e79aa3SJason Evans if (config_debug) {
369d0e79aa3SJason Evans bool in_ql = false;
370d0e79aa3SJason Evans tcache_t *iter;
371d0e79aa3SJason Evans ql_foreach(iter, &arena->tcache_ql, link) {
372d0e79aa3SJason Evans if (iter == tcache) {
373d0e79aa3SJason Evans in_ql = true;
374d0e79aa3SJason Evans break;
375d0e79aa3SJason Evans }
376d0e79aa3SJason Evans }
377d0e79aa3SJason Evans assert(in_ql);
378d0e79aa3SJason Evans }
379d0e79aa3SJason Evans ql_remove(&arena->tcache_ql, tcache, link);
3800ef50b4eSJason Evans ql_remove(&arena->cache_bin_array_descriptor_ql,
3810ef50b4eSJason Evans &tcache->cache_bin_array_descriptor, link);
3821f0a49e8SJason Evans tcache_stats_merge(tsdn, tcache, arena);
383b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
384a4bd5210SJason Evans }
385b7eaed25SJason Evans tcache->arena = NULL;
386a4bd5210SJason Evans }
387a4bd5210SJason Evans
3881f0a49e8SJason Evans void
tcache_arena_reassociate(tsdn_t * tsdn,tcache_t * tcache,arena_t * arena)389b7eaed25SJason Evans tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
390b7eaed25SJason Evans tcache_arena_dissociate(tsdn, tcache);
3911f0a49e8SJason Evans tcache_arena_associate(tsdn, tcache, arena);
392b7eaed25SJason Evans }
393b7eaed25SJason Evans
394b7eaed25SJason Evans bool
tsd_tcache_enabled_data_init(tsd_t * tsd)395b7eaed25SJason Evans tsd_tcache_enabled_data_init(tsd_t *tsd) {
396b7eaed25SJason Evans /* Called upon tsd initialization. */
397b7eaed25SJason Evans tsd_tcache_enabled_set(tsd, opt_tcache);
398b7eaed25SJason Evans tsd_slow_update(tsd);
399b7eaed25SJason Evans
400b7eaed25SJason Evans if (opt_tcache) {
401b7eaed25SJason Evans /* Trigger tcache init. */
402b7eaed25SJason Evans tsd_tcache_data_init(tsd);
403b7eaed25SJason Evans }
404b7eaed25SJason Evans
405b7eaed25SJason Evans return false;
406b7eaed25SJason Evans }
407b7eaed25SJason Evans
408b7eaed25SJason Evans /* Initialize auto tcache (embedded in TSD). */
409b7eaed25SJason Evans static void
tcache_init(tsd_t * tsd,tcache_t * tcache,void * avail_stack)410b7eaed25SJason Evans tcache_init(tsd_t *tsd, tcache_t *tcache, void *avail_stack) {
411b7eaed25SJason Evans memset(&tcache->link, 0, sizeof(ql_elm(tcache_t)));
412b7eaed25SJason Evans tcache->prof_accumbytes = 0;
413b7eaed25SJason Evans tcache->next_gc_bin = 0;
414b7eaed25SJason Evans tcache->arena = NULL;
415a4bd5210SJason Evans
416df0d881dSJason Evans ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR);
417df0d881dSJason Evans
418b7eaed25SJason Evans size_t stack_offset = 0;
419a4bd5210SJason Evans assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0);
420*c5ad8142SEric van Gyzen memset(tcache->bins_small, 0, sizeof(cache_bin_t) * SC_NBINS);
421*c5ad8142SEric van Gyzen memset(tcache->bins_large, 0, sizeof(cache_bin_t) * (nhbins - SC_NBINS));
422b7eaed25SJason Evans unsigned i = 0;
423*c5ad8142SEric van Gyzen for (; i < SC_NBINS; i++) {
424b7eaed25SJason Evans tcache->lg_fill_div[i] = 1;
425df0d881dSJason Evans stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
426df0d881dSJason Evans /*
427df0d881dSJason Evans * avail points past the available space. Allocations will
428df0d881dSJason Evans * access the slots toward higher addresses (for the benefit of
429df0d881dSJason Evans * prefetch).
430df0d881dSJason Evans */
431b7eaed25SJason Evans tcache_small_bin_get(tcache, i)->avail =
432b7eaed25SJason Evans (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset);
433b7eaed25SJason Evans }
434b7eaed25SJason Evans for (; i < nhbins; i++) {
435b7eaed25SJason Evans stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *);
436b7eaed25SJason Evans tcache_large_bin_get(tcache, i)->avail =
437b7eaed25SJason Evans (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset);
438b7eaed25SJason Evans }
439b7eaed25SJason Evans assert(stack_offset == stack_nelms * sizeof(void *));
440a4bd5210SJason Evans }
441a4bd5210SJason Evans
442b7eaed25SJason Evans /* Initialize auto tcache (embedded in TSD). */
443b7eaed25SJason Evans bool
tsd_tcache_data_init(tsd_t * tsd)444b7eaed25SJason Evans tsd_tcache_data_init(tsd_t *tsd) {
445b7eaed25SJason Evans tcache_t *tcache = tsd_tcachep_get_unsafe(tsd);
446b7eaed25SJason Evans assert(tcache_small_bin_get(tcache, 0)->avail == NULL);
447b7eaed25SJason Evans size_t size = stack_nelms * sizeof(void *);
448b7eaed25SJason Evans /* Avoid false cacheline sharing. */
449b7eaed25SJason Evans size = sz_sa2u(size, CACHELINE);
450b7eaed25SJason Evans
451b7eaed25SJason Evans void *avail_array = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true,
452b7eaed25SJason Evans NULL, true, arena_get(TSDN_NULL, 0, true));
453b7eaed25SJason Evans if (avail_array == NULL) {
454b7eaed25SJason Evans return true;
455b7eaed25SJason Evans }
456b7eaed25SJason Evans
457b7eaed25SJason Evans tcache_init(tsd, tcache, avail_array);
458b7eaed25SJason Evans /*
459b7eaed25SJason Evans * Initialization is a bit tricky here. After malloc init is done, all
460b7eaed25SJason Evans * threads can rely on arena_choose and associate tcache accordingly.
461b7eaed25SJason Evans * However, the thread that does actual malloc bootstrapping relies on
462b7eaed25SJason Evans * functional tsd, and it can only rely on a0. In that case, we
463b7eaed25SJason Evans * associate its tcache to a0 temporarily, and later on
464b7eaed25SJason Evans * arena_choose_hard() will re-associate properly.
465b7eaed25SJason Evans */
466b7eaed25SJason Evans tcache->arena = NULL;
467b7eaed25SJason Evans arena_t *arena;
468b7eaed25SJason Evans if (!malloc_initialized()) {
469b7eaed25SJason Evans /* If in initialization, assign to a0. */
470b7eaed25SJason Evans arena = arena_get(tsd_tsdn(tsd), 0, false);
471b7eaed25SJason Evans tcache_arena_associate(tsd_tsdn(tsd), tcache, arena);
472b7eaed25SJason Evans } else {
473b7eaed25SJason Evans arena = arena_choose(tsd, NULL);
474b7eaed25SJason Evans /* This may happen if thread.tcache.enabled is used. */
475b7eaed25SJason Evans if (tcache->arena == NULL) {
476b7eaed25SJason Evans tcache_arena_associate(tsd_tsdn(tsd), tcache, arena);
477b7eaed25SJason Evans }
478b7eaed25SJason Evans }
479b7eaed25SJason Evans assert(arena == tcache->arena);
480b7eaed25SJason Evans
481b7eaed25SJason Evans return false;
482b7eaed25SJason Evans }
483b7eaed25SJason Evans
484b7eaed25SJason Evans /* Created manual tcache for tcache.create mallctl. */
485b7eaed25SJason Evans tcache_t *
tcache_create_explicit(tsd_t * tsd)486b7eaed25SJason Evans tcache_create_explicit(tsd_t *tsd) {
487b7eaed25SJason Evans tcache_t *tcache;
488b7eaed25SJason Evans size_t size, stack_offset;
489b7eaed25SJason Evans
490b7eaed25SJason Evans size = sizeof(tcache_t);
491b7eaed25SJason Evans /* Naturally align the pointer stacks. */
492b7eaed25SJason Evans size = PTR_CEILING(size);
493b7eaed25SJason Evans stack_offset = size;
494b7eaed25SJason Evans size += stack_nelms * sizeof(void *);
495b7eaed25SJason Evans /* Avoid false cacheline sharing. */
496b7eaed25SJason Evans size = sz_sa2u(size, CACHELINE);
497b7eaed25SJason Evans
498b7eaed25SJason Evans tcache = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, NULL, true,
499b7eaed25SJason Evans arena_get(TSDN_NULL, 0, true));
500b7eaed25SJason Evans if (tcache == NULL) {
501b7eaed25SJason Evans return NULL;
502b7eaed25SJason Evans }
503b7eaed25SJason Evans
504b7eaed25SJason Evans tcache_init(tsd, tcache,
505b7eaed25SJason Evans (void *)((uintptr_t)tcache + (uintptr_t)stack_offset));
506b7eaed25SJason Evans tcache_arena_associate(tsd_tsdn(tsd), tcache, arena_ichoose(tsd, NULL));
507b7eaed25SJason Evans
508b7eaed25SJason Evans return tcache;
509a4bd5210SJason Evans }
510a4bd5210SJason Evans
511d0e79aa3SJason Evans static void
tcache_flush_cache(tsd_t * tsd,tcache_t * tcache)512b7eaed25SJason Evans tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) {
513b7eaed25SJason Evans assert(tcache->arena != NULL);
514a4bd5210SJason Evans
515*c5ad8142SEric van Gyzen for (unsigned i = 0; i < SC_NBINS; i++) {
5160ef50b4eSJason Evans cache_bin_t *tbin = tcache_small_bin_get(tcache, i);
517d0e79aa3SJason Evans tcache_bin_flush_small(tsd, tcache, tbin, i, 0);
518a4bd5210SJason Evans
519b7eaed25SJason Evans if (config_stats) {
520b7eaed25SJason Evans assert(tbin->tstats.nrequests == 0);
521a4bd5210SJason Evans }
522a4bd5210SJason Evans }
523*c5ad8142SEric van Gyzen for (unsigned i = SC_NBINS; i < nhbins; i++) {
5240ef50b4eSJason Evans cache_bin_t *tbin = tcache_large_bin_get(tcache, i);
525d0e79aa3SJason Evans tcache_bin_flush_large(tsd, tbin, i, 0, tcache);
526a4bd5210SJason Evans
527b7eaed25SJason Evans if (config_stats) {
528b7eaed25SJason Evans assert(tbin->tstats.nrequests == 0);
529a4bd5210SJason Evans }
530a4bd5210SJason Evans }
531a4bd5210SJason Evans
532f8ca2db1SJason Evans if (config_prof && tcache->prof_accumbytes > 0 &&
533b7eaed25SJason Evans arena_prof_accum(tsd_tsdn(tsd), tcache->arena,
534b7eaed25SJason Evans tcache->prof_accumbytes)) {
5351f0a49e8SJason Evans prof_idump(tsd_tsdn(tsd));
536b7eaed25SJason Evans }
537a4bd5210SJason Evans }
538a4bd5210SJason Evans
539a4bd5210SJason Evans void
tcache_flush(tsd_t * tsd)5408b2f5aafSJason Evans tcache_flush(tsd_t *tsd) {
541b7eaed25SJason Evans assert(tcache_available(tsd));
542b7eaed25SJason Evans tcache_flush_cache(tsd, tsd_tcachep_get(tsd));
543b7eaed25SJason Evans }
544a4bd5210SJason Evans
545b7eaed25SJason Evans static void
tcache_destroy(tsd_t * tsd,tcache_t * tcache,bool tsd_tcache)546b7eaed25SJason Evans tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) {
547b7eaed25SJason Evans tcache_flush_cache(tsd, tcache);
548*c5ad8142SEric van Gyzen arena_t *arena = tcache->arena;
549b7eaed25SJason Evans tcache_arena_dissociate(tsd_tsdn(tsd), tcache);
550b7eaed25SJason Evans
551b7eaed25SJason Evans if (tsd_tcache) {
552b7eaed25SJason Evans /* Release the avail array for the TSD embedded auto tcache. */
553b7eaed25SJason Evans void *avail_array =
554b7eaed25SJason Evans (void *)((uintptr_t)tcache_small_bin_get(tcache, 0)->avail -
555b7eaed25SJason Evans (uintptr_t)tcache_bin_info[0].ncached_max * sizeof(void *));
556b7eaed25SJason Evans idalloctm(tsd_tsdn(tsd), avail_array, NULL, NULL, true, true);
557b7eaed25SJason Evans } else {
558b7eaed25SJason Evans /* Release both the tcache struct and avail array. */
559b7eaed25SJason Evans idalloctm(tsd_tsdn(tsd), tcache, NULL, NULL, true, true);
560b7eaed25SJason Evans }
561*c5ad8142SEric van Gyzen
562*c5ad8142SEric van Gyzen /*
563*c5ad8142SEric van Gyzen * The deallocation and tcache flush above may not trigger decay since
564*c5ad8142SEric van Gyzen * we are on the tcache shutdown path (potentially with non-nominal
565*c5ad8142SEric van Gyzen * tsd). Manually trigger decay to avoid pathological cases. Also
566*c5ad8142SEric van Gyzen * include arena 0 because the tcache array is allocated from it.
567*c5ad8142SEric van Gyzen */
568*c5ad8142SEric van Gyzen arena_decay(tsd_tsdn(tsd), arena_get(tsd_tsdn(tsd), 0, false),
569*c5ad8142SEric van Gyzen false, false);
570*c5ad8142SEric van Gyzen
571*c5ad8142SEric van Gyzen if (arena_nthreads_get(arena, false) == 0 &&
572*c5ad8142SEric van Gyzen !background_thread_enabled()) {
573*c5ad8142SEric van Gyzen /* Force purging when no threads assigned to the arena anymore. */
574*c5ad8142SEric van Gyzen arena_decay(tsd_tsdn(tsd), arena, false, true);
575*c5ad8142SEric van Gyzen } else {
576*c5ad8142SEric van Gyzen arena_decay(tsd_tsdn(tsd), arena, false, false);
577*c5ad8142SEric van Gyzen }
578b7eaed25SJason Evans }
579b7eaed25SJason Evans
580b7eaed25SJason Evans /* For auto tcache (embedded in TSD) only. */
581b7eaed25SJason Evans void
tcache_cleanup(tsd_t * tsd)582b7eaed25SJason Evans tcache_cleanup(tsd_t *tsd) {
583b7eaed25SJason Evans tcache_t *tcache = tsd_tcachep_get(tsd);
584b7eaed25SJason Evans if (!tcache_available(tsd)) {
585b7eaed25SJason Evans assert(tsd_tcache_enabled_get(tsd) == false);
586b7eaed25SJason Evans if (config_debug) {
587b7eaed25SJason Evans assert(tcache_small_bin_get(tcache, 0)->avail == NULL);
588b7eaed25SJason Evans }
589d0e79aa3SJason Evans return;
590b7eaed25SJason Evans }
591b7eaed25SJason Evans assert(tsd_tcache_enabled_get(tsd));
592b7eaed25SJason Evans assert(tcache_small_bin_get(tcache, 0)->avail != NULL);
593d0e79aa3SJason Evans
594b7eaed25SJason Evans tcache_destroy(tsd, tcache, true);
595b7eaed25SJason Evans if (config_debug) {
596b7eaed25SJason Evans tcache_small_bin_get(tcache, 0)->avail = NULL;
597a4bd5210SJason Evans }
598a4bd5210SJason Evans }
599a4bd5210SJason Evans
600d0e79aa3SJason Evans void
tcache_stats_merge(tsdn_t * tsdn,tcache_t * tcache,arena_t * arena)601b7eaed25SJason Evans tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) {
602a4bd5210SJason Evans unsigned i;
603a4bd5210SJason Evans
604f921d10fSJason Evans cassert(config_stats);
605f921d10fSJason Evans
606a4bd5210SJason Evans /* Merge and reset tcache stats. */
607*c5ad8142SEric van Gyzen for (i = 0; i < SC_NBINS; i++) {
6080ef50b4eSJason Evans cache_bin_t *tbin = tcache_small_bin_get(tcache, i);
609*c5ad8142SEric van Gyzen unsigned binshard;
610*c5ad8142SEric van Gyzen bin_t *bin = arena_bin_choose_lock(tsdn, arena, i, &binshard);
611a4bd5210SJason Evans bin->stats.nrequests += tbin->tstats.nrequests;
6121f0a49e8SJason Evans malloc_mutex_unlock(tsdn, &bin->lock);
613a4bd5210SJason Evans tbin->tstats.nrequests = 0;
614a4bd5210SJason Evans }
615a4bd5210SJason Evans
616a4bd5210SJason Evans for (; i < nhbins; i++) {
6170ef50b4eSJason Evans cache_bin_t *tbin = tcache_large_bin_get(tcache, i);
618*c5ad8142SEric van Gyzen arena_stats_large_flush_nrequests_add(tsdn, &arena->stats, i,
619b7eaed25SJason Evans tbin->tstats.nrequests);
620a4bd5210SJason Evans tbin->tstats.nrequests = 0;
621a4bd5210SJason Evans }
622a4bd5210SJason Evans }
623a4bd5210SJason Evans
6248244f2aaSJason Evans static bool
tcaches_create_prep(tsd_t * tsd)6258244f2aaSJason Evans tcaches_create_prep(tsd_t *tsd) {
6268244f2aaSJason Evans bool err;
6278244f2aaSJason Evans
6288244f2aaSJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
629d0e79aa3SJason Evans
630d0e79aa3SJason Evans if (tcaches == NULL) {
631b7eaed25SJason Evans tcaches = base_alloc(tsd_tsdn(tsd), b0get(), sizeof(tcache_t *)
632b7eaed25SJason Evans * (MALLOCX_TCACHE_MAX+1), CACHELINE);
6338244f2aaSJason Evans if (tcaches == NULL) {
6348244f2aaSJason Evans err = true;
6358244f2aaSJason Evans goto label_return;
6368244f2aaSJason Evans }
637d0e79aa3SJason Evans }
638d0e79aa3SJason Evans
6398244f2aaSJason Evans if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) {
6408244f2aaSJason Evans err = true;
6418244f2aaSJason Evans goto label_return;
6428244f2aaSJason Evans }
643d0e79aa3SJason Evans
6448244f2aaSJason Evans err = false;
6458244f2aaSJason Evans label_return:
6468244f2aaSJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
6478244f2aaSJason Evans return err;
6488244f2aaSJason Evans }
6498244f2aaSJason Evans
6508244f2aaSJason Evans bool
tcaches_create(tsd_t * tsd,unsigned * r_ind)6518244f2aaSJason Evans tcaches_create(tsd_t *tsd, unsigned *r_ind) {
652b7eaed25SJason Evans witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0);
653b7eaed25SJason Evans
6548244f2aaSJason Evans bool err;
6558244f2aaSJason Evans
6568244f2aaSJason Evans if (tcaches_create_prep(tsd)) {
6578244f2aaSJason Evans err = true;
6588244f2aaSJason Evans goto label_return;
6598244f2aaSJason Evans }
6608244f2aaSJason Evans
661b7eaed25SJason Evans tcache_t *tcache = tcache_create_explicit(tsd);
6628244f2aaSJason Evans if (tcache == NULL) {
6638244f2aaSJason Evans err = true;
6648244f2aaSJason Evans goto label_return;
6658244f2aaSJason Evans }
6668244f2aaSJason Evans
667b7eaed25SJason Evans tcaches_t *elm;
6688244f2aaSJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
669d0e79aa3SJason Evans if (tcaches_avail != NULL) {
670d0e79aa3SJason Evans elm = tcaches_avail;
671d0e79aa3SJason Evans tcaches_avail = tcaches_avail->next;
672d0e79aa3SJason Evans elm->tcache = tcache;
673df0d881dSJason Evans *r_ind = (unsigned)(elm - tcaches);
674d0e79aa3SJason Evans } else {
675d0e79aa3SJason Evans elm = &tcaches[tcaches_past];
676d0e79aa3SJason Evans elm->tcache = tcache;
677d0e79aa3SJason Evans *r_ind = tcaches_past;
678d0e79aa3SJason Evans tcaches_past++;
679d0e79aa3SJason Evans }
6808244f2aaSJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
681d0e79aa3SJason Evans
6828244f2aaSJason Evans err = false;
6838244f2aaSJason Evans label_return:
684b7eaed25SJason Evans witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0);
6858244f2aaSJason Evans return err;
686d0e79aa3SJason Evans }
687d0e79aa3SJason Evans
688b7eaed25SJason Evans static tcache_t *
tcaches_elm_remove(tsd_t * tsd,tcaches_t * elm,bool allow_reinit)689*c5ad8142SEric van Gyzen tcaches_elm_remove(tsd_t *tsd, tcaches_t *elm, bool allow_reinit) {
6908244f2aaSJason Evans malloc_mutex_assert_owner(tsd_tsdn(tsd), &tcaches_mtx);
691d0e79aa3SJason Evans
6928244f2aaSJason Evans if (elm->tcache == NULL) {
693b7eaed25SJason Evans return NULL;
6948244f2aaSJason Evans }
695b7eaed25SJason Evans tcache_t *tcache = elm->tcache;
696*c5ad8142SEric van Gyzen if (allow_reinit) {
697*c5ad8142SEric van Gyzen elm->tcache = TCACHES_ELM_NEED_REINIT;
698*c5ad8142SEric van Gyzen } else {
699d0e79aa3SJason Evans elm->tcache = NULL;
700*c5ad8142SEric van Gyzen }
701*c5ad8142SEric van Gyzen
702*c5ad8142SEric van Gyzen if (tcache == TCACHES_ELM_NEED_REINIT) {
703*c5ad8142SEric van Gyzen return NULL;
704*c5ad8142SEric van Gyzen }
705b7eaed25SJason Evans return tcache;
706d0e79aa3SJason Evans }
707d0e79aa3SJason Evans
708d0e79aa3SJason Evans void
tcaches_flush(tsd_t * tsd,unsigned ind)7098244f2aaSJason Evans tcaches_flush(tsd_t *tsd, unsigned ind) {
7108244f2aaSJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
711*c5ad8142SEric van Gyzen tcache_t *tcache = tcaches_elm_remove(tsd, &tcaches[ind], true);
7128244f2aaSJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
713b7eaed25SJason Evans if (tcache != NULL) {
714*c5ad8142SEric van Gyzen /* Destroy the tcache; recreate in tcaches_get() if needed. */
715b7eaed25SJason Evans tcache_destroy(tsd, tcache, false);
716b7eaed25SJason Evans }
717d0e79aa3SJason Evans }
718d0e79aa3SJason Evans
719d0e79aa3SJason Evans void
tcaches_destroy(tsd_t * tsd,unsigned ind)7208244f2aaSJason Evans tcaches_destroy(tsd_t *tsd, unsigned ind) {
7218244f2aaSJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx);
722b7eaed25SJason Evans tcaches_t *elm = &tcaches[ind];
723*c5ad8142SEric van Gyzen tcache_t *tcache = tcaches_elm_remove(tsd, elm, false);
724d0e79aa3SJason Evans elm->next = tcaches_avail;
725d0e79aa3SJason Evans tcaches_avail = elm;
7268244f2aaSJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx);
727b7eaed25SJason Evans if (tcache != NULL) {
728b7eaed25SJason Evans tcache_destroy(tsd, tcache, false);
729b7eaed25SJason Evans }
730d0e79aa3SJason Evans }
731d0e79aa3SJason Evans
732d0e79aa3SJason Evans bool
tcache_boot(tsdn_t * tsdn)7338244f2aaSJason Evans tcache_boot(tsdn_t *tsdn) {
734b7eaed25SJason Evans /* If necessary, clamp opt_lg_tcache_max. */
735b7eaed25SJason Evans if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) <
736*c5ad8142SEric van Gyzen SC_SMALL_MAXCLASS) {
737*c5ad8142SEric van Gyzen tcache_maxclass = SC_SMALL_MAXCLASS;
738b7eaed25SJason Evans } else {
7397fa7f12fSJason Evans tcache_maxclass = (ZU(1) << opt_lg_tcache_max);
740b7eaed25SJason Evans }
741a4bd5210SJason Evans
742b7eaed25SJason Evans if (malloc_mutex_init(&tcaches_mtx, "tcaches", WITNESS_RANK_TCACHES,
743b7eaed25SJason Evans malloc_mutex_rank_exclusive)) {
7448244f2aaSJason Evans return true;
7458244f2aaSJason Evans }
7468244f2aaSJason Evans
747b7eaed25SJason Evans nhbins = sz_size2index(tcache_maxclass) + 1;
748a4bd5210SJason Evans
749a4bd5210SJason Evans /* Initialize tcache_bin_info. */
7500ef50b4eSJason Evans tcache_bin_info = (cache_bin_info_t *)base_alloc(tsdn, b0get(), nhbins
7510ef50b4eSJason Evans * sizeof(cache_bin_info_t), CACHELINE);
752b7eaed25SJason Evans if (tcache_bin_info == NULL) {
753b7eaed25SJason Evans return true;
754b7eaed25SJason Evans }
755a4bd5210SJason Evans stack_nelms = 0;
756b7eaed25SJason Evans unsigned i;
757*c5ad8142SEric van Gyzen for (i = 0; i < SC_NBINS; i++) {
7580ef50b4eSJason Evans if ((bin_infos[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) {
759d0e79aa3SJason Evans tcache_bin_info[i].ncached_max =
760d0e79aa3SJason Evans TCACHE_NSLOTS_SMALL_MIN;
7610ef50b4eSJason Evans } else if ((bin_infos[i].nregs << 1) <=
762d0e79aa3SJason Evans TCACHE_NSLOTS_SMALL_MAX) {
763a4bd5210SJason Evans tcache_bin_info[i].ncached_max =
7640ef50b4eSJason Evans (bin_infos[i].nregs << 1);
765a4bd5210SJason Evans } else {
766a4bd5210SJason Evans tcache_bin_info[i].ncached_max =
767a4bd5210SJason Evans TCACHE_NSLOTS_SMALL_MAX;
768a4bd5210SJason Evans }
769a4bd5210SJason Evans stack_nelms += tcache_bin_info[i].ncached_max;
770a4bd5210SJason Evans }
771a4bd5210SJason Evans for (; i < nhbins; i++) {
772a4bd5210SJason Evans tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE;
773a4bd5210SJason Evans stack_nelms += tcache_bin_info[i].ncached_max;
774a4bd5210SJason Evans }
775a4bd5210SJason Evans
776b7eaed25SJason Evans return false;
777a4bd5210SJason Evans }
7788244f2aaSJason Evans
7798244f2aaSJason Evans void
tcache_prefork(tsdn_t * tsdn)7808244f2aaSJason Evans tcache_prefork(tsdn_t *tsdn) {
7818244f2aaSJason Evans if (!config_prof && opt_tcache) {
7828244f2aaSJason Evans malloc_mutex_prefork(tsdn, &tcaches_mtx);
7838244f2aaSJason Evans }
7848244f2aaSJason Evans }
7858244f2aaSJason Evans
7868244f2aaSJason Evans void
tcache_postfork_parent(tsdn_t * tsdn)7878244f2aaSJason Evans tcache_postfork_parent(tsdn_t *tsdn) {
7888244f2aaSJason Evans if (!config_prof && opt_tcache) {
7898244f2aaSJason Evans malloc_mutex_postfork_parent(tsdn, &tcaches_mtx);
7908244f2aaSJason Evans }
7918244f2aaSJason Evans }
7928244f2aaSJason Evans
7938244f2aaSJason Evans void
tcache_postfork_child(tsdn_t * tsdn)7948244f2aaSJason Evans tcache_postfork_child(tsdn_t *tsdn) {
7958244f2aaSJason Evans if (!config_prof && opt_tcache) {
7968244f2aaSJason Evans malloc_mutex_postfork_child(tsdn, &tcaches_mtx);
7978244f2aaSJason Evans }
7988244f2aaSJason Evans }
799