1a4bd5210SJason Evans #define JEMALLOC_TCACHE_C_ 2a4bd5210SJason Evans #include "jemalloc/internal/jemalloc_internal.h" 3a4bd5210SJason Evans 4a4bd5210SJason Evans /******************************************************************************/ 5a4bd5210SJason Evans /* Data. */ 6a4bd5210SJason Evans 7a4bd5210SJason Evans bool opt_tcache = true; 8a4bd5210SJason Evans ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT; 9a4bd5210SJason Evans 10a4bd5210SJason Evans tcache_bin_info_t *tcache_bin_info; 11a4bd5210SJason Evans static unsigned stack_nelms; /* Total stack elms per tcache. */ 12a4bd5210SJason Evans 13a4bd5210SJason Evans size_t nhbins; 14a4bd5210SJason Evans size_t tcache_maxclass; 15a4bd5210SJason Evans 16*d0e79aa3SJason Evans tcaches_t *tcaches; 17*d0e79aa3SJason Evans 18*d0e79aa3SJason Evans /* Index of first element within tcaches that has never been used. */ 19*d0e79aa3SJason Evans static unsigned tcaches_past; 20*d0e79aa3SJason Evans 21*d0e79aa3SJason Evans /* Head of singly linked list tracking available tcaches elements. */ 22*d0e79aa3SJason Evans static tcaches_t *tcaches_avail; 23*d0e79aa3SJason Evans 24a4bd5210SJason Evans /******************************************************************************/ 25a4bd5210SJason Evans 268ed34ab0SJason Evans size_t tcache_salloc(const void *ptr) 278ed34ab0SJason Evans { 288ed34ab0SJason Evans 298ed34ab0SJason Evans return (arena_salloc(ptr, false)); 308ed34ab0SJason Evans } 318ed34ab0SJason Evans 32e722f8f8SJason Evans void 33*d0e79aa3SJason Evans tcache_event_hard(tsd_t *tsd, tcache_t *tcache) 34e722f8f8SJason Evans { 35*d0e79aa3SJason Evans index_t binind = tcache->next_gc_bin; 36e722f8f8SJason Evans tcache_bin_t *tbin = &tcache->tbins[binind]; 37e722f8f8SJason Evans tcache_bin_info_t *tbin_info = &tcache_bin_info[binind]; 38e722f8f8SJason Evans 39e722f8f8SJason Evans if (tbin->low_water > 0) { 40e722f8f8SJason Evans /* 41e722f8f8SJason Evans * Flush (ceiling) 3/4 of the objects below the low water mark. 42e722f8f8SJason Evans */ 43e722f8f8SJason Evans if (binind < NBINS) { 44*d0e79aa3SJason Evans tcache_bin_flush_small(tsd, tcache, tbin, binind, 45*d0e79aa3SJason Evans tbin->ncached - tbin->low_water + (tbin->low_water 46*d0e79aa3SJason Evans >> 2)); 47e722f8f8SJason Evans } else { 48*d0e79aa3SJason Evans tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached 49*d0e79aa3SJason Evans - tbin->low_water + (tbin->low_water >> 2), tcache); 50e722f8f8SJason Evans } 51e722f8f8SJason Evans /* 52e722f8f8SJason Evans * Reduce fill count by 2X. Limit lg_fill_div such that the 53e722f8f8SJason Evans * fill count is always at least 1. 54e722f8f8SJason Evans */ 55e722f8f8SJason Evans if ((tbin_info->ncached_max >> (tbin->lg_fill_div+1)) >= 1) 56e722f8f8SJason Evans tbin->lg_fill_div++; 57e722f8f8SJason Evans } else if (tbin->low_water < 0) { 58e722f8f8SJason Evans /* 59e722f8f8SJason Evans * Increase fill count by 2X. Make sure lg_fill_div stays 60e722f8f8SJason Evans * greater than 0. 61e722f8f8SJason Evans */ 62e722f8f8SJason Evans if (tbin->lg_fill_div > 1) 63e722f8f8SJason Evans tbin->lg_fill_div--; 64e722f8f8SJason Evans } 65e722f8f8SJason Evans tbin->low_water = tbin->ncached; 66e722f8f8SJason Evans 67e722f8f8SJason Evans tcache->next_gc_bin++; 68e722f8f8SJason Evans if (tcache->next_gc_bin == nhbins) 69e722f8f8SJason Evans tcache->next_gc_bin = 0; 70e722f8f8SJason Evans tcache->ev_cnt = 0; 71e722f8f8SJason Evans } 72e722f8f8SJason Evans 73a4bd5210SJason Evans void * 74*d0e79aa3SJason Evans tcache_alloc_small_hard(tsd_t *tsd, arena_t *arena, tcache_t *tcache, 75*d0e79aa3SJason Evans tcache_bin_t *tbin, index_t binind) 76a4bd5210SJason Evans { 77a4bd5210SJason Evans void *ret; 78a4bd5210SJason Evans 79*d0e79aa3SJason Evans arena_tcache_fill_small(arena, tbin, binind, config_prof ? 80*d0e79aa3SJason Evans tcache->prof_accumbytes : 0); 81a4bd5210SJason Evans if (config_prof) 82a4bd5210SJason Evans tcache->prof_accumbytes = 0; 83a4bd5210SJason Evans ret = tcache_alloc_easy(tbin); 84a4bd5210SJason Evans 85a4bd5210SJason Evans return (ret); 86a4bd5210SJason Evans } 87a4bd5210SJason Evans 88a4bd5210SJason Evans void 89*d0e79aa3SJason Evans tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, 90*d0e79aa3SJason Evans index_t binind, unsigned rem) 91a4bd5210SJason Evans { 92*d0e79aa3SJason Evans arena_t *arena; 93a4bd5210SJason Evans void *ptr; 94a4bd5210SJason Evans unsigned i, nflush, ndeferred; 95a4bd5210SJason Evans bool merged_stats = false; 96a4bd5210SJason Evans 97a4bd5210SJason Evans assert(binind < NBINS); 98a4bd5210SJason Evans assert(rem <= tbin->ncached); 99a4bd5210SJason Evans 100*d0e79aa3SJason Evans arena = arena_choose(tsd, NULL); 101*d0e79aa3SJason Evans assert(arena != NULL); 102a4bd5210SJason Evans for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { 103a4bd5210SJason Evans /* Lock the arena bin associated with the first object. */ 104a4bd5210SJason Evans arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( 105a4bd5210SJason Evans tbin->avail[0]); 106*d0e79aa3SJason Evans arena_t *bin_arena = extent_node_arena_get(&chunk->node); 107*d0e79aa3SJason Evans arena_bin_t *bin = &bin_arena->bins[binind]; 108a4bd5210SJason Evans 109*d0e79aa3SJason Evans if (config_prof && bin_arena == arena) { 110f8ca2db1SJason Evans if (arena_prof_accum(arena, tcache->prof_accumbytes)) 111f8ca2db1SJason Evans prof_idump(); 112a4bd5210SJason Evans tcache->prof_accumbytes = 0; 113a4bd5210SJason Evans } 114a4bd5210SJason Evans 115a4bd5210SJason Evans malloc_mutex_lock(&bin->lock); 116*d0e79aa3SJason Evans if (config_stats && bin_arena == arena) { 117*d0e79aa3SJason Evans assert(!merged_stats); 118a4bd5210SJason Evans merged_stats = true; 119a4bd5210SJason Evans bin->stats.nflushes++; 120a4bd5210SJason Evans bin->stats.nrequests += tbin->tstats.nrequests; 121a4bd5210SJason Evans tbin->tstats.nrequests = 0; 122a4bd5210SJason Evans } 123a4bd5210SJason Evans ndeferred = 0; 124a4bd5210SJason Evans for (i = 0; i < nflush; i++) { 125a4bd5210SJason Evans ptr = tbin->avail[i]; 126a4bd5210SJason Evans assert(ptr != NULL); 127a4bd5210SJason Evans chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 128*d0e79aa3SJason Evans if (extent_node_arena_get(&chunk->node) == bin_arena) { 129a4bd5210SJason Evans size_t pageind = ((uintptr_t)ptr - 130a4bd5210SJason Evans (uintptr_t)chunk) >> LG_PAGE; 131*d0e79aa3SJason Evans arena_chunk_map_bits_t *bitselm = 132*d0e79aa3SJason Evans arena_bitselm_get(chunk, pageind); 133*d0e79aa3SJason Evans arena_dalloc_bin_junked_locked(bin_arena, chunk, 134*d0e79aa3SJason Evans ptr, bitselm); 135a4bd5210SJason Evans } else { 136a4bd5210SJason Evans /* 137a4bd5210SJason Evans * This object was allocated via a different 138a4bd5210SJason Evans * arena bin than the one that is currently 139a4bd5210SJason Evans * locked. Stash the object, so that it can be 140a4bd5210SJason Evans * handled in a future pass. 141a4bd5210SJason Evans */ 142a4bd5210SJason Evans tbin->avail[ndeferred] = ptr; 143a4bd5210SJason Evans ndeferred++; 144a4bd5210SJason Evans } 145a4bd5210SJason Evans } 146a4bd5210SJason Evans malloc_mutex_unlock(&bin->lock); 147a4bd5210SJason Evans } 148*d0e79aa3SJason Evans if (config_stats && !merged_stats) { 149a4bd5210SJason Evans /* 150a4bd5210SJason Evans * The flush loop didn't happen to flush to this thread's 151a4bd5210SJason Evans * arena, so the stats didn't get merged. Manually do so now. 152a4bd5210SJason Evans */ 153*d0e79aa3SJason Evans arena_bin_t *bin = &arena->bins[binind]; 154a4bd5210SJason Evans malloc_mutex_lock(&bin->lock); 155a4bd5210SJason Evans bin->stats.nflushes++; 156a4bd5210SJason Evans bin->stats.nrequests += tbin->tstats.nrequests; 157a4bd5210SJason Evans tbin->tstats.nrequests = 0; 158a4bd5210SJason Evans malloc_mutex_unlock(&bin->lock); 159a4bd5210SJason Evans } 160a4bd5210SJason Evans 161a4bd5210SJason Evans memmove(tbin->avail, &tbin->avail[tbin->ncached - rem], 162a4bd5210SJason Evans rem * sizeof(void *)); 163a4bd5210SJason Evans tbin->ncached = rem; 164a4bd5210SJason Evans if ((int)tbin->ncached < tbin->low_water) 165a4bd5210SJason Evans tbin->low_water = tbin->ncached; 166a4bd5210SJason Evans } 167a4bd5210SJason Evans 168a4bd5210SJason Evans void 169*d0e79aa3SJason Evans tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, index_t binind, 170*d0e79aa3SJason Evans unsigned rem, tcache_t *tcache) 171a4bd5210SJason Evans { 172*d0e79aa3SJason Evans arena_t *arena; 173a4bd5210SJason Evans void *ptr; 174a4bd5210SJason Evans unsigned i, nflush, ndeferred; 175a4bd5210SJason Evans bool merged_stats = false; 176a4bd5210SJason Evans 177a4bd5210SJason Evans assert(binind < nhbins); 178a4bd5210SJason Evans assert(rem <= tbin->ncached); 179a4bd5210SJason Evans 180*d0e79aa3SJason Evans arena = arena_choose(tsd, NULL); 181*d0e79aa3SJason Evans assert(arena != NULL); 182a4bd5210SJason Evans for (nflush = tbin->ncached - rem; nflush > 0; nflush = ndeferred) { 183a4bd5210SJason Evans /* Lock the arena associated with the first object. */ 184a4bd5210SJason Evans arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( 185a4bd5210SJason Evans tbin->avail[0]); 186*d0e79aa3SJason Evans arena_t *locked_arena = extent_node_arena_get(&chunk->node); 187f8ca2db1SJason Evans UNUSED bool idump; 188a4bd5210SJason Evans 189f8ca2db1SJason Evans if (config_prof) 190f8ca2db1SJason Evans idump = false; 191*d0e79aa3SJason Evans malloc_mutex_lock(&locked_arena->lock); 192*d0e79aa3SJason Evans if ((config_prof || config_stats) && locked_arena == arena) { 193a4bd5210SJason Evans if (config_prof) { 194f8ca2db1SJason Evans idump = arena_prof_accum_locked(arena, 195a4bd5210SJason Evans tcache->prof_accumbytes); 196a4bd5210SJason Evans tcache->prof_accumbytes = 0; 197a4bd5210SJason Evans } 198a4bd5210SJason Evans if (config_stats) { 199a4bd5210SJason Evans merged_stats = true; 200a4bd5210SJason Evans arena->stats.nrequests_large += 201a4bd5210SJason Evans tbin->tstats.nrequests; 202a4bd5210SJason Evans arena->stats.lstats[binind - NBINS].nrequests += 203a4bd5210SJason Evans tbin->tstats.nrequests; 204a4bd5210SJason Evans tbin->tstats.nrequests = 0; 205a4bd5210SJason Evans } 206a4bd5210SJason Evans } 207a4bd5210SJason Evans ndeferred = 0; 208a4bd5210SJason Evans for (i = 0; i < nflush; i++) { 209a4bd5210SJason Evans ptr = tbin->avail[i]; 210a4bd5210SJason Evans assert(ptr != NULL); 211a4bd5210SJason Evans chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 212*d0e79aa3SJason Evans if (extent_node_arena_get(&chunk->node) == 213*d0e79aa3SJason Evans locked_arena) { 214*d0e79aa3SJason Evans arena_dalloc_large_junked_locked(locked_arena, 215*d0e79aa3SJason Evans chunk, ptr); 216*d0e79aa3SJason Evans } else { 217a4bd5210SJason Evans /* 218a4bd5210SJason Evans * This object was allocated via a different 219a4bd5210SJason Evans * arena than the one that is currently locked. 220a4bd5210SJason Evans * Stash the object, so that it can be handled 221a4bd5210SJason Evans * in a future pass. 222a4bd5210SJason Evans */ 223a4bd5210SJason Evans tbin->avail[ndeferred] = ptr; 224a4bd5210SJason Evans ndeferred++; 225a4bd5210SJason Evans } 226a4bd5210SJason Evans } 227*d0e79aa3SJason Evans malloc_mutex_unlock(&locked_arena->lock); 228f8ca2db1SJason Evans if (config_prof && idump) 229f8ca2db1SJason Evans prof_idump(); 230a4bd5210SJason Evans } 231*d0e79aa3SJason Evans if (config_stats && !merged_stats) { 232a4bd5210SJason Evans /* 233a4bd5210SJason Evans * The flush loop didn't happen to flush to this thread's 234a4bd5210SJason Evans * arena, so the stats didn't get merged. Manually do so now. 235a4bd5210SJason Evans */ 236a4bd5210SJason Evans malloc_mutex_lock(&arena->lock); 237a4bd5210SJason Evans arena->stats.nrequests_large += tbin->tstats.nrequests; 238a4bd5210SJason Evans arena->stats.lstats[binind - NBINS].nrequests += 239a4bd5210SJason Evans tbin->tstats.nrequests; 240a4bd5210SJason Evans tbin->tstats.nrequests = 0; 241a4bd5210SJason Evans malloc_mutex_unlock(&arena->lock); 242a4bd5210SJason Evans } 243a4bd5210SJason Evans 244a4bd5210SJason Evans memmove(tbin->avail, &tbin->avail[tbin->ncached - rem], 245a4bd5210SJason Evans rem * sizeof(void *)); 246a4bd5210SJason Evans tbin->ncached = rem; 247a4bd5210SJason Evans if ((int)tbin->ncached < tbin->low_water) 248a4bd5210SJason Evans tbin->low_water = tbin->ncached; 249a4bd5210SJason Evans } 250a4bd5210SJason Evans 251a4bd5210SJason Evans void 252a4bd5210SJason Evans tcache_arena_associate(tcache_t *tcache, arena_t *arena) 253a4bd5210SJason Evans { 254a4bd5210SJason Evans 255a4bd5210SJason Evans if (config_stats) { 256a4bd5210SJason Evans /* Link into list of extant tcaches. */ 257a4bd5210SJason Evans malloc_mutex_lock(&arena->lock); 258a4bd5210SJason Evans ql_elm_new(tcache, link); 259a4bd5210SJason Evans ql_tail_insert(&arena->tcache_ql, tcache, link); 260a4bd5210SJason Evans malloc_mutex_unlock(&arena->lock); 261a4bd5210SJason Evans } 262a4bd5210SJason Evans } 263a4bd5210SJason Evans 264a4bd5210SJason Evans void 265*d0e79aa3SJason Evans tcache_arena_reassociate(tcache_t *tcache, arena_t *oldarena, arena_t *newarena) 266*d0e79aa3SJason Evans { 267*d0e79aa3SJason Evans 268*d0e79aa3SJason Evans tcache_arena_dissociate(tcache, oldarena); 269*d0e79aa3SJason Evans tcache_arena_associate(tcache, newarena); 270*d0e79aa3SJason Evans } 271*d0e79aa3SJason Evans 272*d0e79aa3SJason Evans void 273*d0e79aa3SJason Evans tcache_arena_dissociate(tcache_t *tcache, arena_t *arena) 274a4bd5210SJason Evans { 275a4bd5210SJason Evans 276a4bd5210SJason Evans if (config_stats) { 277a4bd5210SJason Evans /* Unlink from list of extant tcaches. */ 278*d0e79aa3SJason Evans malloc_mutex_lock(&arena->lock); 279*d0e79aa3SJason Evans if (config_debug) { 280*d0e79aa3SJason Evans bool in_ql = false; 281*d0e79aa3SJason Evans tcache_t *iter; 282*d0e79aa3SJason Evans ql_foreach(iter, &arena->tcache_ql, link) { 283*d0e79aa3SJason Evans if (iter == tcache) { 284*d0e79aa3SJason Evans in_ql = true; 285*d0e79aa3SJason Evans break; 286*d0e79aa3SJason Evans } 287*d0e79aa3SJason Evans } 288*d0e79aa3SJason Evans assert(in_ql); 289*d0e79aa3SJason Evans } 290*d0e79aa3SJason Evans ql_remove(&arena->tcache_ql, tcache, link); 291*d0e79aa3SJason Evans tcache_stats_merge(tcache, arena); 292*d0e79aa3SJason Evans malloc_mutex_unlock(&arena->lock); 293a4bd5210SJason Evans } 294a4bd5210SJason Evans } 295a4bd5210SJason Evans 296a4bd5210SJason Evans tcache_t * 297*d0e79aa3SJason Evans tcache_get_hard(tsd_t *tsd) 298*d0e79aa3SJason Evans { 299*d0e79aa3SJason Evans arena_t *arena; 300*d0e79aa3SJason Evans 301*d0e79aa3SJason Evans if (!tcache_enabled_get()) { 302*d0e79aa3SJason Evans if (tsd_nominal(tsd)) 303*d0e79aa3SJason Evans tcache_enabled_set(false); /* Memoize. */ 304*d0e79aa3SJason Evans return (NULL); 305*d0e79aa3SJason Evans } 306*d0e79aa3SJason Evans arena = arena_choose(tsd, NULL); 307*d0e79aa3SJason Evans if (unlikely(arena == NULL)) 308*d0e79aa3SJason Evans return (NULL); 309*d0e79aa3SJason Evans return (tcache_create(tsd, arena)); 310*d0e79aa3SJason Evans } 311*d0e79aa3SJason Evans 312*d0e79aa3SJason Evans tcache_t * 313*d0e79aa3SJason Evans tcache_create(tsd_t *tsd, arena_t *arena) 314a4bd5210SJason Evans { 315a4bd5210SJason Evans tcache_t *tcache; 316a4bd5210SJason Evans size_t size, stack_offset; 317a4bd5210SJason Evans unsigned i; 318a4bd5210SJason Evans 319a4bd5210SJason Evans size = offsetof(tcache_t, tbins) + (sizeof(tcache_bin_t) * nhbins); 320a4bd5210SJason Evans /* Naturally align the pointer stacks. */ 321a4bd5210SJason Evans size = PTR_CEILING(size); 322a4bd5210SJason Evans stack_offset = size; 323a4bd5210SJason Evans size += stack_nelms * sizeof(void *); 324*d0e79aa3SJason Evans /* Avoid false cacheline sharing. */ 325*d0e79aa3SJason Evans size = sa2u(size, CACHELINE); 326a4bd5210SJason Evans 327*d0e79aa3SJason Evans tcache = ipallocztm(tsd, size, CACHELINE, true, false, true, a0get()); 328a4bd5210SJason Evans if (tcache == NULL) 329a4bd5210SJason Evans return (NULL); 330a4bd5210SJason Evans 331a4bd5210SJason Evans tcache_arena_associate(tcache, arena); 332a4bd5210SJason Evans 333a4bd5210SJason Evans assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0); 334a4bd5210SJason Evans for (i = 0; i < nhbins; i++) { 335a4bd5210SJason Evans tcache->tbins[i].lg_fill_div = 1; 336a4bd5210SJason Evans tcache->tbins[i].avail = (void **)((uintptr_t)tcache + 337a4bd5210SJason Evans (uintptr_t)stack_offset); 338a4bd5210SJason Evans stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); 339a4bd5210SJason Evans } 340a4bd5210SJason Evans 341a4bd5210SJason Evans return (tcache); 342a4bd5210SJason Evans } 343a4bd5210SJason Evans 344*d0e79aa3SJason Evans static void 345*d0e79aa3SJason Evans tcache_destroy(tsd_t *tsd, tcache_t *tcache) 346a4bd5210SJason Evans { 347*d0e79aa3SJason Evans arena_t *arena; 348a4bd5210SJason Evans unsigned i; 349a4bd5210SJason Evans 350*d0e79aa3SJason Evans arena = arena_choose(tsd, NULL); 351*d0e79aa3SJason Evans tcache_arena_dissociate(tcache, arena); 352a4bd5210SJason Evans 353a4bd5210SJason Evans for (i = 0; i < NBINS; i++) { 354a4bd5210SJason Evans tcache_bin_t *tbin = &tcache->tbins[i]; 355*d0e79aa3SJason Evans tcache_bin_flush_small(tsd, tcache, tbin, i, 0); 356a4bd5210SJason Evans 357a4bd5210SJason Evans if (config_stats && tbin->tstats.nrequests != 0) { 358a4bd5210SJason Evans arena_bin_t *bin = &arena->bins[i]; 359a4bd5210SJason Evans malloc_mutex_lock(&bin->lock); 360a4bd5210SJason Evans bin->stats.nrequests += tbin->tstats.nrequests; 361a4bd5210SJason Evans malloc_mutex_unlock(&bin->lock); 362a4bd5210SJason Evans } 363a4bd5210SJason Evans } 364a4bd5210SJason Evans 365a4bd5210SJason Evans for (; i < nhbins; i++) { 366a4bd5210SJason Evans tcache_bin_t *tbin = &tcache->tbins[i]; 367*d0e79aa3SJason Evans tcache_bin_flush_large(tsd, tbin, i, 0, tcache); 368a4bd5210SJason Evans 369a4bd5210SJason Evans if (config_stats && tbin->tstats.nrequests != 0) { 370a4bd5210SJason Evans malloc_mutex_lock(&arena->lock); 371a4bd5210SJason Evans arena->stats.nrequests_large += tbin->tstats.nrequests; 372a4bd5210SJason Evans arena->stats.lstats[i - NBINS].nrequests += 373a4bd5210SJason Evans tbin->tstats.nrequests; 374a4bd5210SJason Evans malloc_mutex_unlock(&arena->lock); 375a4bd5210SJason Evans } 376a4bd5210SJason Evans } 377a4bd5210SJason Evans 378f8ca2db1SJason Evans if (config_prof && tcache->prof_accumbytes > 0 && 379*d0e79aa3SJason Evans arena_prof_accum(arena, tcache->prof_accumbytes)) 380f8ca2db1SJason Evans prof_idump(); 381a4bd5210SJason Evans 382*d0e79aa3SJason Evans idalloctm(tsd, tcache, false, true); 383a4bd5210SJason Evans } 384a4bd5210SJason Evans 385a4bd5210SJason Evans void 386*d0e79aa3SJason Evans tcache_cleanup(tsd_t *tsd) 387a4bd5210SJason Evans { 388*d0e79aa3SJason Evans tcache_t *tcache; 389a4bd5210SJason Evans 390*d0e79aa3SJason Evans if (!config_tcache) 391*d0e79aa3SJason Evans return; 392*d0e79aa3SJason Evans 393*d0e79aa3SJason Evans if ((tcache = tsd_tcache_get(tsd)) != NULL) { 394*d0e79aa3SJason Evans tcache_destroy(tsd, tcache); 395*d0e79aa3SJason Evans tsd_tcache_set(tsd, NULL); 396a4bd5210SJason Evans } 397a4bd5210SJason Evans } 398a4bd5210SJason Evans 399*d0e79aa3SJason Evans void 400*d0e79aa3SJason Evans tcache_enabled_cleanup(tsd_t *tsd) 401*d0e79aa3SJason Evans { 402*d0e79aa3SJason Evans 403*d0e79aa3SJason Evans /* Do nothing. */ 404*d0e79aa3SJason Evans } 405*d0e79aa3SJason Evans 406f921d10fSJason Evans /* Caller must own arena->lock. */ 407a4bd5210SJason Evans void 408a4bd5210SJason Evans tcache_stats_merge(tcache_t *tcache, arena_t *arena) 409a4bd5210SJason Evans { 410a4bd5210SJason Evans unsigned i; 411a4bd5210SJason Evans 412f921d10fSJason Evans cassert(config_stats); 413f921d10fSJason Evans 414a4bd5210SJason Evans /* Merge and reset tcache stats. */ 415a4bd5210SJason Evans for (i = 0; i < NBINS; i++) { 416a4bd5210SJason Evans arena_bin_t *bin = &arena->bins[i]; 417a4bd5210SJason Evans tcache_bin_t *tbin = &tcache->tbins[i]; 418a4bd5210SJason Evans malloc_mutex_lock(&bin->lock); 419a4bd5210SJason Evans bin->stats.nrequests += tbin->tstats.nrequests; 420a4bd5210SJason Evans malloc_mutex_unlock(&bin->lock); 421a4bd5210SJason Evans tbin->tstats.nrequests = 0; 422a4bd5210SJason Evans } 423a4bd5210SJason Evans 424a4bd5210SJason Evans for (; i < nhbins; i++) { 425a4bd5210SJason Evans malloc_large_stats_t *lstats = &arena->stats.lstats[i - NBINS]; 426a4bd5210SJason Evans tcache_bin_t *tbin = &tcache->tbins[i]; 427a4bd5210SJason Evans arena->stats.nrequests_large += tbin->tstats.nrequests; 428a4bd5210SJason Evans lstats->nrequests += tbin->tstats.nrequests; 429a4bd5210SJason Evans tbin->tstats.nrequests = 0; 430a4bd5210SJason Evans } 431a4bd5210SJason Evans } 432a4bd5210SJason Evans 433a4bd5210SJason Evans bool 434*d0e79aa3SJason Evans tcaches_create(tsd_t *tsd, unsigned *r_ind) 435*d0e79aa3SJason Evans { 436*d0e79aa3SJason Evans tcache_t *tcache; 437*d0e79aa3SJason Evans tcaches_t *elm; 438*d0e79aa3SJason Evans 439*d0e79aa3SJason Evans if (tcaches == NULL) { 440*d0e79aa3SJason Evans tcaches = base_alloc(sizeof(tcache_t *) * 441*d0e79aa3SJason Evans (MALLOCX_TCACHE_MAX+1)); 442*d0e79aa3SJason Evans if (tcaches == NULL) 443*d0e79aa3SJason Evans return (true); 444*d0e79aa3SJason Evans } 445*d0e79aa3SJason Evans 446*d0e79aa3SJason Evans if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) 447*d0e79aa3SJason Evans return (true); 448*d0e79aa3SJason Evans tcache = tcache_create(tsd, a0get()); 449*d0e79aa3SJason Evans if (tcache == NULL) 450*d0e79aa3SJason Evans return (true); 451*d0e79aa3SJason Evans 452*d0e79aa3SJason Evans if (tcaches_avail != NULL) { 453*d0e79aa3SJason Evans elm = tcaches_avail; 454*d0e79aa3SJason Evans tcaches_avail = tcaches_avail->next; 455*d0e79aa3SJason Evans elm->tcache = tcache; 456*d0e79aa3SJason Evans *r_ind = elm - tcaches; 457*d0e79aa3SJason Evans } else { 458*d0e79aa3SJason Evans elm = &tcaches[tcaches_past]; 459*d0e79aa3SJason Evans elm->tcache = tcache; 460*d0e79aa3SJason Evans *r_ind = tcaches_past; 461*d0e79aa3SJason Evans tcaches_past++; 462*d0e79aa3SJason Evans } 463*d0e79aa3SJason Evans 464*d0e79aa3SJason Evans return (false); 465*d0e79aa3SJason Evans } 466*d0e79aa3SJason Evans 467*d0e79aa3SJason Evans static void 468*d0e79aa3SJason Evans tcaches_elm_flush(tsd_t *tsd, tcaches_t *elm) 469*d0e79aa3SJason Evans { 470*d0e79aa3SJason Evans 471*d0e79aa3SJason Evans if (elm->tcache == NULL) 472*d0e79aa3SJason Evans return; 473*d0e79aa3SJason Evans tcache_destroy(tsd, elm->tcache); 474*d0e79aa3SJason Evans elm->tcache = NULL; 475*d0e79aa3SJason Evans } 476*d0e79aa3SJason Evans 477*d0e79aa3SJason Evans void 478*d0e79aa3SJason Evans tcaches_flush(tsd_t *tsd, unsigned ind) 479*d0e79aa3SJason Evans { 480*d0e79aa3SJason Evans 481*d0e79aa3SJason Evans tcaches_elm_flush(tsd, &tcaches[ind]); 482*d0e79aa3SJason Evans } 483*d0e79aa3SJason Evans 484*d0e79aa3SJason Evans void 485*d0e79aa3SJason Evans tcaches_destroy(tsd_t *tsd, unsigned ind) 486*d0e79aa3SJason Evans { 487*d0e79aa3SJason Evans tcaches_t *elm = &tcaches[ind]; 488*d0e79aa3SJason Evans tcaches_elm_flush(tsd, elm); 489*d0e79aa3SJason Evans elm->next = tcaches_avail; 490*d0e79aa3SJason Evans tcaches_avail = elm; 491*d0e79aa3SJason Evans } 492*d0e79aa3SJason Evans 493*d0e79aa3SJason Evans bool 494*d0e79aa3SJason Evans tcache_boot(void) 495a4bd5210SJason Evans { 496a4bd5210SJason Evans unsigned i; 497a4bd5210SJason Evans 498a4bd5210SJason Evans /* 499a4bd5210SJason Evans * If necessary, clamp opt_lg_tcache_max, now that arena_maxclass is 500a4bd5210SJason Evans * known. 501a4bd5210SJason Evans */ 502a4bd5210SJason Evans if (opt_lg_tcache_max < 0 || (1U << opt_lg_tcache_max) < SMALL_MAXCLASS) 503a4bd5210SJason Evans tcache_maxclass = SMALL_MAXCLASS; 504a4bd5210SJason Evans else if ((1U << opt_lg_tcache_max) > arena_maxclass) 505a4bd5210SJason Evans tcache_maxclass = arena_maxclass; 506a4bd5210SJason Evans else 507a4bd5210SJason Evans tcache_maxclass = (1U << opt_lg_tcache_max); 508a4bd5210SJason Evans 509*d0e79aa3SJason Evans nhbins = size2index(tcache_maxclass) + 1; 510a4bd5210SJason Evans 511a4bd5210SJason Evans /* Initialize tcache_bin_info. */ 512a4bd5210SJason Evans tcache_bin_info = (tcache_bin_info_t *)base_alloc(nhbins * 513a4bd5210SJason Evans sizeof(tcache_bin_info_t)); 514a4bd5210SJason Evans if (tcache_bin_info == NULL) 515a4bd5210SJason Evans return (true); 516a4bd5210SJason Evans stack_nelms = 0; 517a4bd5210SJason Evans for (i = 0; i < NBINS; i++) { 518*d0e79aa3SJason Evans if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) { 519*d0e79aa3SJason Evans tcache_bin_info[i].ncached_max = 520*d0e79aa3SJason Evans TCACHE_NSLOTS_SMALL_MIN; 521*d0e79aa3SJason Evans } else if ((arena_bin_info[i].nregs << 1) <= 522*d0e79aa3SJason Evans TCACHE_NSLOTS_SMALL_MAX) { 523a4bd5210SJason Evans tcache_bin_info[i].ncached_max = 524a4bd5210SJason Evans (arena_bin_info[i].nregs << 1); 525a4bd5210SJason Evans } else { 526a4bd5210SJason Evans tcache_bin_info[i].ncached_max = 527a4bd5210SJason Evans TCACHE_NSLOTS_SMALL_MAX; 528a4bd5210SJason Evans } 529a4bd5210SJason Evans stack_nelms += tcache_bin_info[i].ncached_max; 530a4bd5210SJason Evans } 531a4bd5210SJason Evans for (; i < nhbins; i++) { 532a4bd5210SJason Evans tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE; 533a4bd5210SJason Evans stack_nelms += tcache_bin_info[i].ncached_max; 534a4bd5210SJason Evans } 535a4bd5210SJason Evans 536a4bd5210SJason Evans return (false); 537a4bd5210SJason Evans } 538