1a4bd5210SJason Evans #define JEMALLOC_ARENA_C_ 2b7eaed25SJason Evans #include "jemalloc/internal/jemalloc_preamble.h" 3b7eaed25SJason Evans #include "jemalloc/internal/jemalloc_internal_includes.h" 4b7eaed25SJason Evans 5b7eaed25SJason Evans #include "jemalloc/internal/assert.h" 60ef50b4eSJason Evans #include "jemalloc/internal/div.h" 7b7eaed25SJason Evans #include "jemalloc/internal/extent_dss.h" 8b7eaed25SJason Evans #include "jemalloc/internal/extent_mmap.h" 9b7eaed25SJason Evans #include "jemalloc/internal/mutex.h" 10b7eaed25SJason Evans #include "jemalloc/internal/rtree.h" 11*e1c167d0SJason Evans #include "jemalloc/internal/safety_check.h" 12b7eaed25SJason Evans #include "jemalloc/internal/util.h" 13a4bd5210SJason Evans 14*e1c167d0SJason Evans JEMALLOC_DIAGNOSTIC_DISABLE_SPURIOUS 15*e1c167d0SJason Evans 16a4bd5210SJason Evans /******************************************************************************/ 17a4bd5210SJason Evans /* Data. */ 18a4bd5210SJason Evans 19b7eaed25SJason Evans /* 20b7eaed25SJason Evans * Define names for both unininitialized and initialized phases, so that 21b7eaed25SJason Evans * options and mallctl processing are straightforward. 22b7eaed25SJason Evans */ 23b7eaed25SJason Evans const char *percpu_arena_mode_names[] = { 24b7eaed25SJason Evans "percpu", 25b7eaed25SJason Evans "phycpu", 26b7eaed25SJason Evans "disabled", 27b7eaed25SJason Evans "percpu", 28b7eaed25SJason Evans "phycpu" 29df0d881dSJason Evans }; 30b7eaed25SJason Evans percpu_arena_mode_t opt_percpu_arena = PERCPU_ARENA_DEFAULT; 31df0d881dSJason Evans 32b7eaed25SJason Evans ssize_t opt_dirty_decay_ms = DIRTY_DECAY_MS_DEFAULT; 33b7eaed25SJason Evans ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT; 34a4bd5210SJason Evans 35b7eaed25SJason Evans static atomic_zd_t dirty_decay_ms_default; 36b7eaed25SJason Evans static atomic_zd_t muzzy_decay_ms_default; 37b7eaed25SJason Evans 38b7eaed25SJason Evans const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = { 39b7eaed25SJason Evans #define STEP(step, h, x, y) \ 40b7eaed25SJason Evans h, 41b7eaed25SJason Evans SMOOTHSTEP 42b7eaed25SJason Evans #undef STEP 43b7eaed25SJason Evans }; 44a4bd5210SJason Evans 45*e1c167d0SJason Evans static div_info_t arena_binind_div_info[SC_NBINS]; 46*e1c167d0SJason Evans 47*e1c167d0SJason Evans size_t opt_oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT; 48*e1c167d0SJason Evans size_t oversize_threshold = OVERSIZE_THRESHOLD_DEFAULT; 49*e1c167d0SJason Evans static unsigned huge_arena_ind; 500ef50b4eSJason Evans 51a4bd5210SJason Evans /******************************************************************************/ 52f921d10fSJason Evans /* 53f921d10fSJason Evans * Function prototypes for static functions that are referenced prior to 54f921d10fSJason Evans * definition. 55f921d10fSJason Evans */ 56a4bd5210SJason Evans 57b7eaed25SJason Evans static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, 588b2f5aafSJason Evans arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit, 590ef50b4eSJason Evans size_t npages_decay_max, bool is_background_thread); 60b7eaed25SJason Evans static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, 61b7eaed25SJason Evans bool is_background_thread, bool all); 62b7eaed25SJason Evans static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, 630ef50b4eSJason Evans bin_t *bin); 64b7eaed25SJason Evans static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, 650ef50b4eSJason Evans bin_t *bin); 66a4bd5210SJason Evans 67a4bd5210SJason Evans /******************************************************************************/ 68a4bd5210SJason Evans 69d0e79aa3SJason Evans void 70*e1c167d0SJason Evans arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, 71b7eaed25SJason Evans const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, 72b7eaed25SJason Evans size_t *nactive, size_t *ndirty, size_t *nmuzzy) { 73b7eaed25SJason Evans *nthreads += arena_nthreads_get(arena, false); 74b7eaed25SJason Evans *dss = dss_prec_names[arena_dss_prec_get(arena)]; 75b7eaed25SJason Evans *dirty_decay_ms = arena_dirty_decay_ms_get(arena); 76b7eaed25SJason Evans *muzzy_decay_ms = arena_muzzy_decay_ms_get(arena); 77b7eaed25SJason Evans *nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED); 78b7eaed25SJason Evans *ndirty += extents_npages_get(&arena->extents_dirty); 79b7eaed25SJason Evans *nmuzzy += extents_npages_get(&arena->extents_muzzy); 80b7eaed25SJason Evans } 81b7eaed25SJason Evans 82b7eaed25SJason Evans void 83b7eaed25SJason Evans arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, 84b7eaed25SJason Evans const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, 85b7eaed25SJason Evans size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats, 86*e1c167d0SJason Evans bin_stats_t *bstats, arena_stats_large_t *lstats, 87*e1c167d0SJason Evans arena_stats_extents_t *estats) { 88b7eaed25SJason Evans cassert(config_stats); 89b7eaed25SJason Evans 90b7eaed25SJason Evans arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms, 91b7eaed25SJason Evans muzzy_decay_ms, nactive, ndirty, nmuzzy); 92b7eaed25SJason Evans 930ef50b4eSJason Evans size_t base_allocated, base_resident, base_mapped, metadata_thp; 94b7eaed25SJason Evans base_stats_get(tsdn, arena->base, &base_allocated, &base_resident, 950ef50b4eSJason Evans &base_mapped, &metadata_thp); 96b7eaed25SJason Evans 97b7eaed25SJason Evans arena_stats_lock(tsdn, &arena->stats); 98b7eaed25SJason Evans 99b7eaed25SJason Evans arena_stats_accum_zu(&astats->mapped, base_mapped 100b7eaed25SJason Evans + arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped)); 101b7eaed25SJason Evans arena_stats_accum_zu(&astats->retained, 102b7eaed25SJason Evans extents_npages_get(&arena->extents_retained) << LG_PAGE); 103b7eaed25SJason Evans 104*e1c167d0SJason Evans atomic_store_zu(&astats->extent_avail, 105*e1c167d0SJason Evans atomic_load_zu(&arena->extent_avail_cnt, ATOMIC_RELAXED), 106*e1c167d0SJason Evans ATOMIC_RELAXED); 107*e1c167d0SJason Evans 108b7eaed25SJason Evans arena_stats_accum_u64(&astats->decay_dirty.npurge, 109b7eaed25SJason Evans arena_stats_read_u64(tsdn, &arena->stats, 110b7eaed25SJason Evans &arena->stats.decay_dirty.npurge)); 111b7eaed25SJason Evans arena_stats_accum_u64(&astats->decay_dirty.nmadvise, 112b7eaed25SJason Evans arena_stats_read_u64(tsdn, &arena->stats, 113b7eaed25SJason Evans &arena->stats.decay_dirty.nmadvise)); 114b7eaed25SJason Evans arena_stats_accum_u64(&astats->decay_dirty.purged, 115b7eaed25SJason Evans arena_stats_read_u64(tsdn, &arena->stats, 116b7eaed25SJason Evans &arena->stats.decay_dirty.purged)); 117b7eaed25SJason Evans 118b7eaed25SJason Evans arena_stats_accum_u64(&astats->decay_muzzy.npurge, 119b7eaed25SJason Evans arena_stats_read_u64(tsdn, &arena->stats, 120b7eaed25SJason Evans &arena->stats.decay_muzzy.npurge)); 121b7eaed25SJason Evans arena_stats_accum_u64(&astats->decay_muzzy.nmadvise, 122b7eaed25SJason Evans arena_stats_read_u64(tsdn, &arena->stats, 123b7eaed25SJason Evans &arena->stats.decay_muzzy.nmadvise)); 124b7eaed25SJason Evans arena_stats_accum_u64(&astats->decay_muzzy.purged, 125b7eaed25SJason Evans arena_stats_read_u64(tsdn, &arena->stats, 126b7eaed25SJason Evans &arena->stats.decay_muzzy.purged)); 127b7eaed25SJason Evans 128b7eaed25SJason Evans arena_stats_accum_zu(&astats->base, base_allocated); 129b7eaed25SJason Evans arena_stats_accum_zu(&astats->internal, arena_internal_get(arena)); 1300ef50b4eSJason Evans arena_stats_accum_zu(&astats->metadata_thp, metadata_thp); 131b7eaed25SJason Evans arena_stats_accum_zu(&astats->resident, base_resident + 132b7eaed25SJason Evans (((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) + 133b7eaed25SJason Evans extents_npages_get(&arena->extents_dirty) + 134b7eaed25SJason Evans extents_npages_get(&arena->extents_muzzy)) << LG_PAGE))); 135*e1c167d0SJason Evans arena_stats_accum_zu(&astats->abandoned_vm, atomic_load_zu( 136*e1c167d0SJason Evans &arena->stats.abandoned_vm, ATOMIC_RELAXED)); 137b7eaed25SJason Evans 138*e1c167d0SJason Evans for (szind_t i = 0; i < SC_NSIZES - SC_NBINS; i++) { 139b7eaed25SJason Evans uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats, 140b7eaed25SJason Evans &arena->stats.lstats[i].nmalloc); 141b7eaed25SJason Evans arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc); 142b7eaed25SJason Evans arena_stats_accum_u64(&astats->nmalloc_large, nmalloc); 143b7eaed25SJason Evans 144b7eaed25SJason Evans uint64_t ndalloc = arena_stats_read_u64(tsdn, &arena->stats, 145b7eaed25SJason Evans &arena->stats.lstats[i].ndalloc); 146b7eaed25SJason Evans arena_stats_accum_u64(&lstats[i].ndalloc, ndalloc); 147b7eaed25SJason Evans arena_stats_accum_u64(&astats->ndalloc_large, ndalloc); 148b7eaed25SJason Evans 149b7eaed25SJason Evans uint64_t nrequests = arena_stats_read_u64(tsdn, &arena->stats, 150b7eaed25SJason Evans &arena->stats.lstats[i].nrequests); 151b7eaed25SJason Evans arena_stats_accum_u64(&lstats[i].nrequests, 152b7eaed25SJason Evans nmalloc + nrequests); 153b7eaed25SJason Evans arena_stats_accum_u64(&astats->nrequests_large, 154b7eaed25SJason Evans nmalloc + nrequests); 155b7eaed25SJason Evans 156*e1c167d0SJason Evans /* nfill == nmalloc for large currently. */ 157*e1c167d0SJason Evans arena_stats_accum_u64(&lstats[i].nfills, nmalloc); 158*e1c167d0SJason Evans arena_stats_accum_u64(&astats->nfills_large, nmalloc); 159*e1c167d0SJason Evans 160*e1c167d0SJason Evans uint64_t nflush = arena_stats_read_u64(tsdn, &arena->stats, 161*e1c167d0SJason Evans &arena->stats.lstats[i].nflushes); 162*e1c167d0SJason Evans arena_stats_accum_u64(&lstats[i].nflushes, nflush); 163*e1c167d0SJason Evans arena_stats_accum_u64(&astats->nflushes_large, nflush); 164*e1c167d0SJason Evans 165b7eaed25SJason Evans assert(nmalloc >= ndalloc); 166b7eaed25SJason Evans assert(nmalloc - ndalloc <= SIZE_T_MAX); 167b7eaed25SJason Evans size_t curlextents = (size_t)(nmalloc - ndalloc); 168b7eaed25SJason Evans lstats[i].curlextents += curlextents; 169b7eaed25SJason Evans arena_stats_accum_zu(&astats->allocated_large, 170*e1c167d0SJason Evans curlextents * sz_index2size(SC_NBINS + i)); 171*e1c167d0SJason Evans } 172*e1c167d0SJason Evans 173*e1c167d0SJason Evans for (pszind_t i = 0; i < SC_NPSIZES; i++) { 174*e1c167d0SJason Evans size_t dirty, muzzy, retained, dirty_bytes, muzzy_bytes, 175*e1c167d0SJason Evans retained_bytes; 176*e1c167d0SJason Evans dirty = extents_nextents_get(&arena->extents_dirty, i); 177*e1c167d0SJason Evans muzzy = extents_nextents_get(&arena->extents_muzzy, i); 178*e1c167d0SJason Evans retained = extents_nextents_get(&arena->extents_retained, i); 179*e1c167d0SJason Evans dirty_bytes = extents_nbytes_get(&arena->extents_dirty, i); 180*e1c167d0SJason Evans muzzy_bytes = extents_nbytes_get(&arena->extents_muzzy, i); 181*e1c167d0SJason Evans retained_bytes = 182*e1c167d0SJason Evans extents_nbytes_get(&arena->extents_retained, i); 183*e1c167d0SJason Evans 184*e1c167d0SJason Evans atomic_store_zu(&estats[i].ndirty, dirty, ATOMIC_RELAXED); 185*e1c167d0SJason Evans atomic_store_zu(&estats[i].nmuzzy, muzzy, ATOMIC_RELAXED); 186*e1c167d0SJason Evans atomic_store_zu(&estats[i].nretained, retained, ATOMIC_RELAXED); 187*e1c167d0SJason Evans atomic_store_zu(&estats[i].dirty_bytes, dirty_bytes, 188*e1c167d0SJason Evans ATOMIC_RELAXED); 189*e1c167d0SJason Evans atomic_store_zu(&estats[i].muzzy_bytes, muzzy_bytes, 190*e1c167d0SJason Evans ATOMIC_RELAXED); 191*e1c167d0SJason Evans atomic_store_zu(&estats[i].retained_bytes, retained_bytes, 192*e1c167d0SJason Evans ATOMIC_RELAXED); 193b7eaed25SJason Evans } 194b7eaed25SJason Evans 195b7eaed25SJason Evans arena_stats_unlock(tsdn, &arena->stats); 196b7eaed25SJason Evans 197b7eaed25SJason Evans /* tcache_bytes counts currently cached bytes. */ 198b7eaed25SJason Evans atomic_store_zu(&astats->tcache_bytes, 0, ATOMIC_RELAXED); 199b7eaed25SJason Evans malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); 2000ef50b4eSJason Evans cache_bin_array_descriptor_t *descriptor; 2010ef50b4eSJason Evans ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) { 202b7eaed25SJason Evans szind_t i = 0; 203*e1c167d0SJason Evans for (; i < SC_NBINS; i++) { 2040ef50b4eSJason Evans cache_bin_t *tbin = &descriptor->bins_small[i]; 205b7eaed25SJason Evans arena_stats_accum_zu(&astats->tcache_bytes, 206b7eaed25SJason Evans tbin->ncached * sz_index2size(i)); 207b7eaed25SJason Evans } 208b7eaed25SJason Evans for (; i < nhbins; i++) { 2090ef50b4eSJason Evans cache_bin_t *tbin = &descriptor->bins_large[i]; 210b7eaed25SJason Evans arena_stats_accum_zu(&astats->tcache_bytes, 211b7eaed25SJason Evans tbin->ncached * sz_index2size(i)); 212b7eaed25SJason Evans } 213b7eaed25SJason Evans } 214b7eaed25SJason Evans malloc_mutex_prof_read(tsdn, 215b7eaed25SJason Evans &astats->mutex_prof_data[arena_prof_mutex_tcache_list], 216b7eaed25SJason Evans &arena->tcache_ql_mtx); 217b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); 218b7eaed25SJason Evans 219b7eaed25SJason Evans #define READ_ARENA_MUTEX_PROF_DATA(mtx, ind) \ 220b7eaed25SJason Evans malloc_mutex_lock(tsdn, &arena->mtx); \ 221b7eaed25SJason Evans malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind], \ 222b7eaed25SJason Evans &arena->mtx); \ 223b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &arena->mtx); 224b7eaed25SJason Evans 225b7eaed25SJason Evans /* Gather per arena mutex profiling data. */ 226b7eaed25SJason Evans READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large); 227b7eaed25SJason Evans READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx, 228b7eaed25SJason Evans arena_prof_mutex_extent_avail) 229b7eaed25SJason Evans READ_ARENA_MUTEX_PROF_DATA(extents_dirty.mtx, 230b7eaed25SJason Evans arena_prof_mutex_extents_dirty) 231b7eaed25SJason Evans READ_ARENA_MUTEX_PROF_DATA(extents_muzzy.mtx, 232b7eaed25SJason Evans arena_prof_mutex_extents_muzzy) 233b7eaed25SJason Evans READ_ARENA_MUTEX_PROF_DATA(extents_retained.mtx, 234b7eaed25SJason Evans arena_prof_mutex_extents_retained) 235b7eaed25SJason Evans READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx, 236b7eaed25SJason Evans arena_prof_mutex_decay_dirty) 237b7eaed25SJason Evans READ_ARENA_MUTEX_PROF_DATA(decay_muzzy.mtx, 238b7eaed25SJason Evans arena_prof_mutex_decay_muzzy) 239b7eaed25SJason Evans READ_ARENA_MUTEX_PROF_DATA(base->mtx, 240b7eaed25SJason Evans arena_prof_mutex_base) 241b7eaed25SJason Evans #undef READ_ARENA_MUTEX_PROF_DATA 242b7eaed25SJason Evans 243b7eaed25SJason Evans nstime_copy(&astats->uptime, &arena->create_time); 244b7eaed25SJason Evans nstime_update(&astats->uptime); 245b7eaed25SJason Evans nstime_subtract(&astats->uptime, &arena->create_time); 246b7eaed25SJason Evans 247*e1c167d0SJason Evans for (szind_t i = 0; i < SC_NBINS; i++) { 248*e1c167d0SJason Evans for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { 249*e1c167d0SJason Evans bin_stats_merge(tsdn, &bstats[i], 250*e1c167d0SJason Evans &arena->bins[i].bin_shards[j]); 251*e1c167d0SJason Evans } 252b7eaed25SJason Evans } 253b7eaed25SJason Evans } 254b7eaed25SJason Evans 255b7eaed25SJason Evans void 256b7eaed25SJason Evans arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, 257b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *extent) { 258b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 259b7eaed25SJason Evans WITNESS_RANK_CORE, 0); 260b7eaed25SJason Evans 261b7eaed25SJason Evans extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_dirty, 262b7eaed25SJason Evans extent); 263b7eaed25SJason Evans if (arena_dirty_decay_ms_get(arena) == 0) { 264b7eaed25SJason Evans arena_decay_dirty(tsdn, arena, false, true); 265b7eaed25SJason Evans } else { 2668b2f5aafSJason Evans arena_background_thread_inactivity_check(tsdn, arena, false); 267b7eaed25SJason Evans } 268d0e79aa3SJason Evans } 269d0e79aa3SJason Evans 270d0e79aa3SJason Evans static void * 2710ef50b4eSJason Evans arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) { 272d0e79aa3SJason Evans void *ret; 273b7eaed25SJason Evans arena_slab_data_t *slab_data = extent_slab_data_get(slab); 274b7eaed25SJason Evans size_t regind; 275d0e79aa3SJason Evans 276b7eaed25SJason Evans assert(extent_nfree_get(slab) > 0); 277b7eaed25SJason Evans assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info)); 278b7eaed25SJason Evans 279b7eaed25SJason Evans regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info); 280b7eaed25SJason Evans ret = (void *)((uintptr_t)extent_addr_get(slab) + 281b7eaed25SJason Evans (uintptr_t)(bin_info->reg_size * regind)); 282b7eaed25SJason Evans extent_nfree_dec(slab); 283b7eaed25SJason Evans return ret; 284d0e79aa3SJason Evans } 285d0e79aa3SJason Evans 286*e1c167d0SJason Evans static void 287*e1c167d0SJason Evans arena_slab_reg_alloc_batch(extent_t *slab, const bin_info_t *bin_info, 288*e1c167d0SJason Evans unsigned cnt, void** ptrs) { 289*e1c167d0SJason Evans arena_slab_data_t *slab_data = extent_slab_data_get(slab); 290*e1c167d0SJason Evans 291*e1c167d0SJason Evans assert(extent_nfree_get(slab) >= cnt); 292*e1c167d0SJason Evans assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info)); 293*e1c167d0SJason Evans 294*e1c167d0SJason Evans #if (! defined JEMALLOC_INTERNAL_POPCOUNTL) || (defined BITMAP_USE_TREE) 295*e1c167d0SJason Evans for (unsigned i = 0; i < cnt; i++) { 296*e1c167d0SJason Evans size_t regind = bitmap_sfu(slab_data->bitmap, 297*e1c167d0SJason Evans &bin_info->bitmap_info); 298*e1c167d0SJason Evans *(ptrs + i) = (void *)((uintptr_t)extent_addr_get(slab) + 299*e1c167d0SJason Evans (uintptr_t)(bin_info->reg_size * regind)); 300*e1c167d0SJason Evans } 301*e1c167d0SJason Evans #else 302*e1c167d0SJason Evans unsigned group = 0; 303*e1c167d0SJason Evans bitmap_t g = slab_data->bitmap[group]; 304*e1c167d0SJason Evans unsigned i = 0; 305*e1c167d0SJason Evans while (i < cnt) { 306*e1c167d0SJason Evans while (g == 0) { 307*e1c167d0SJason Evans g = slab_data->bitmap[++group]; 308*e1c167d0SJason Evans } 309*e1c167d0SJason Evans size_t shift = group << LG_BITMAP_GROUP_NBITS; 310*e1c167d0SJason Evans size_t pop = popcount_lu(g); 311*e1c167d0SJason Evans if (pop > (cnt - i)) { 312*e1c167d0SJason Evans pop = cnt - i; 313*e1c167d0SJason Evans } 314*e1c167d0SJason Evans 315*e1c167d0SJason Evans /* 316*e1c167d0SJason Evans * Load from memory locations only once, outside the 317*e1c167d0SJason Evans * hot loop below. 318*e1c167d0SJason Evans */ 319*e1c167d0SJason Evans uintptr_t base = (uintptr_t)extent_addr_get(slab); 320*e1c167d0SJason Evans uintptr_t regsize = (uintptr_t)bin_info->reg_size; 321*e1c167d0SJason Evans while (pop--) { 322*e1c167d0SJason Evans size_t bit = cfs_lu(&g); 323*e1c167d0SJason Evans size_t regind = shift + bit; 324*e1c167d0SJason Evans *(ptrs + i) = (void *)(base + regsize * regind); 325*e1c167d0SJason Evans 326*e1c167d0SJason Evans i++; 327*e1c167d0SJason Evans } 328*e1c167d0SJason Evans slab_data->bitmap[group] = g; 329*e1c167d0SJason Evans } 330*e1c167d0SJason Evans #endif 331*e1c167d0SJason Evans extent_nfree_sub(slab, cnt); 332*e1c167d0SJason Evans } 333*e1c167d0SJason Evans 334b7eaed25SJason Evans #ifndef JEMALLOC_JET 335b7eaed25SJason Evans static 336b7eaed25SJason Evans #endif 337b7eaed25SJason Evans size_t 338b7eaed25SJason Evans arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) { 339b7eaed25SJason Evans size_t diff, regind; 340b7eaed25SJason Evans 341b7eaed25SJason Evans /* Freeing a pointer outside the slab can cause assertion failure. */ 342b7eaed25SJason Evans assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab)); 343b7eaed25SJason Evans assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab)); 344b7eaed25SJason Evans /* Freeing an interior pointer can cause assertion failure. */ 345b7eaed25SJason Evans assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) % 3460ef50b4eSJason Evans (uintptr_t)bin_infos[binind].reg_size == 0); 3470ef50b4eSJason Evans 3480ef50b4eSJason Evans diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)); 349b7eaed25SJason Evans 350b7eaed25SJason Evans /* Avoid doing division with a variable divisor. */ 3510ef50b4eSJason Evans regind = div_compute(&arena_binind_div_info[binind], diff); 352d0e79aa3SJason Evans 3530ef50b4eSJason Evans assert(regind < bin_infos[binind].nregs); 354d0e79aa3SJason Evans 355b7eaed25SJason Evans return regind; 356d0e79aa3SJason Evans } 357d0e79aa3SJason Evans 358df0d881dSJason Evans static void 3590ef50b4eSJason Evans arena_slab_reg_dalloc(extent_t *slab, arena_slab_data_t *slab_data, void *ptr) { 360b7eaed25SJason Evans szind_t binind = extent_szind_get(slab); 3610ef50b4eSJason Evans const bin_info_t *bin_info = &bin_infos[binind]; 362b7eaed25SJason Evans size_t regind = arena_slab_regind(slab, binind, ptr); 363a4bd5210SJason Evans 364b7eaed25SJason Evans assert(extent_nfree_get(slab) < bin_info->nregs); 365b7eaed25SJason Evans /* Freeing an unallocated pointer can cause assertion failure. */ 366b7eaed25SJason Evans assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind)); 367df0d881dSJason Evans 368b7eaed25SJason Evans bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind); 369b7eaed25SJason Evans extent_nfree_inc(slab); 370b7eaed25SJason Evans } 371b7eaed25SJason Evans 372b7eaed25SJason Evans static void 373b7eaed25SJason Evans arena_nactive_add(arena_t *arena, size_t add_pages) { 374b7eaed25SJason Evans atomic_fetch_add_zu(&arena->nactive, add_pages, ATOMIC_RELAXED); 375b7eaed25SJason Evans } 376b7eaed25SJason Evans 377b7eaed25SJason Evans static void 378b7eaed25SJason Evans arena_nactive_sub(arena_t *arena, size_t sub_pages) { 379b7eaed25SJason Evans assert(atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) >= sub_pages); 380b7eaed25SJason Evans atomic_fetch_sub_zu(&arena->nactive, sub_pages, ATOMIC_RELAXED); 381b7eaed25SJason Evans } 382b7eaed25SJason Evans 383b7eaed25SJason Evans static void 384b7eaed25SJason Evans arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { 385b7eaed25SJason Evans szind_t index, hindex; 386b7eaed25SJason Evans 387b7eaed25SJason Evans cassert(config_stats); 388b7eaed25SJason Evans 389*e1c167d0SJason Evans if (usize < SC_LARGE_MINCLASS) { 390*e1c167d0SJason Evans usize = SC_LARGE_MINCLASS; 391b7eaed25SJason Evans } 392b7eaed25SJason Evans index = sz_size2index(usize); 393*e1c167d0SJason Evans hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0; 394b7eaed25SJason Evans 395b7eaed25SJason Evans arena_stats_add_u64(tsdn, &arena->stats, 396b7eaed25SJason Evans &arena->stats.lstats[hindex].nmalloc, 1); 397b7eaed25SJason Evans } 398b7eaed25SJason Evans 399b7eaed25SJason Evans static void 400b7eaed25SJason Evans arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { 401b7eaed25SJason Evans szind_t index, hindex; 402b7eaed25SJason Evans 403b7eaed25SJason Evans cassert(config_stats); 404b7eaed25SJason Evans 405*e1c167d0SJason Evans if (usize < SC_LARGE_MINCLASS) { 406*e1c167d0SJason Evans usize = SC_LARGE_MINCLASS; 407b7eaed25SJason Evans } 408b7eaed25SJason Evans index = sz_size2index(usize); 409*e1c167d0SJason Evans hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0; 410b7eaed25SJason Evans 411b7eaed25SJason Evans arena_stats_add_u64(tsdn, &arena->stats, 412b7eaed25SJason Evans &arena->stats.lstats[hindex].ndalloc, 1); 413b7eaed25SJason Evans } 414b7eaed25SJason Evans 415b7eaed25SJason Evans static void 416b7eaed25SJason Evans arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize, 417b7eaed25SJason Evans size_t usize) { 418b7eaed25SJason Evans arena_large_dalloc_stats_update(tsdn, arena, oldusize); 419b7eaed25SJason Evans arena_large_malloc_stats_update(tsdn, arena, usize); 420b7eaed25SJason Evans } 421b7eaed25SJason Evans 422*e1c167d0SJason Evans static bool 423*e1c167d0SJason Evans arena_may_have_muzzy(arena_t *arena) { 424*e1c167d0SJason Evans return (pages_can_purge_lazy && (arena_muzzy_decay_ms_get(arena) != 0)); 425*e1c167d0SJason Evans } 426*e1c167d0SJason Evans 427b7eaed25SJason Evans extent_t * 428b7eaed25SJason Evans arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, 429b7eaed25SJason Evans size_t alignment, bool *zero) { 430b7eaed25SJason Evans extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; 431b7eaed25SJason Evans 432b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 433b7eaed25SJason Evans WITNESS_RANK_CORE, 0); 434b7eaed25SJason Evans 435b7eaed25SJason Evans szind_t szind = sz_size2index(usize); 436b7eaed25SJason Evans size_t mapped_add; 437b7eaed25SJason Evans bool commit = true; 438b7eaed25SJason Evans extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks, 439b7eaed25SJason Evans &arena->extents_dirty, NULL, usize, sz_large_pad, alignment, false, 440b7eaed25SJason Evans szind, zero, &commit); 441*e1c167d0SJason Evans if (extent == NULL && arena_may_have_muzzy(arena)) { 442b7eaed25SJason Evans extent = extents_alloc(tsdn, arena, &extent_hooks, 443b7eaed25SJason Evans &arena->extents_muzzy, NULL, usize, sz_large_pad, alignment, 444b7eaed25SJason Evans false, szind, zero, &commit); 445b7eaed25SJason Evans } 446b7eaed25SJason Evans size_t size = usize + sz_large_pad; 447b7eaed25SJason Evans if (extent == NULL) { 448b7eaed25SJason Evans extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL, 449b7eaed25SJason Evans usize, sz_large_pad, alignment, false, szind, zero, 450b7eaed25SJason Evans &commit); 451b7eaed25SJason Evans if (config_stats) { 452b7eaed25SJason Evans /* 453b7eaed25SJason Evans * extent may be NULL on OOM, but in that case 454b7eaed25SJason Evans * mapped_add isn't used below, so there's no need to 455b7eaed25SJason Evans * conditionlly set it to 0 here. 456b7eaed25SJason Evans */ 457b7eaed25SJason Evans mapped_add = size; 458b7eaed25SJason Evans } 459b7eaed25SJason Evans } else if (config_stats) { 460b7eaed25SJason Evans mapped_add = 0; 461b7eaed25SJason Evans } 462b7eaed25SJason Evans 463b7eaed25SJason Evans if (extent != NULL) { 464b7eaed25SJason Evans if (config_stats) { 465b7eaed25SJason Evans arena_stats_lock(tsdn, &arena->stats); 466b7eaed25SJason Evans arena_large_malloc_stats_update(tsdn, arena, usize); 467b7eaed25SJason Evans if (mapped_add != 0) { 468b7eaed25SJason Evans arena_stats_add_zu(tsdn, &arena->stats, 469b7eaed25SJason Evans &arena->stats.mapped, mapped_add); 470b7eaed25SJason Evans } 471b7eaed25SJason Evans arena_stats_unlock(tsdn, &arena->stats); 472b7eaed25SJason Evans } 473b7eaed25SJason Evans arena_nactive_add(arena, size >> LG_PAGE); 474b7eaed25SJason Evans } 475b7eaed25SJason Evans 476b7eaed25SJason Evans return extent; 477b7eaed25SJason Evans } 478b7eaed25SJason Evans 479b7eaed25SJason Evans void 480b7eaed25SJason Evans arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { 481b7eaed25SJason Evans if (config_stats) { 482b7eaed25SJason Evans arena_stats_lock(tsdn, &arena->stats); 483b7eaed25SJason Evans arena_large_dalloc_stats_update(tsdn, arena, 484b7eaed25SJason Evans extent_usize_get(extent)); 485b7eaed25SJason Evans arena_stats_unlock(tsdn, &arena->stats); 486b7eaed25SJason Evans } 487b7eaed25SJason Evans arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE); 488b7eaed25SJason Evans } 489b7eaed25SJason Evans 490b7eaed25SJason Evans void 491b7eaed25SJason Evans arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent, 492b7eaed25SJason Evans size_t oldusize) { 493b7eaed25SJason Evans size_t usize = extent_usize_get(extent); 494b7eaed25SJason Evans size_t udiff = oldusize - usize; 495b7eaed25SJason Evans 496b7eaed25SJason Evans if (config_stats) { 497b7eaed25SJason Evans arena_stats_lock(tsdn, &arena->stats); 498b7eaed25SJason Evans arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); 499b7eaed25SJason Evans arena_stats_unlock(tsdn, &arena->stats); 500b7eaed25SJason Evans } 501b7eaed25SJason Evans arena_nactive_sub(arena, udiff >> LG_PAGE); 502b7eaed25SJason Evans } 503b7eaed25SJason Evans 504b7eaed25SJason Evans void 505b7eaed25SJason Evans arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent, 506b7eaed25SJason Evans size_t oldusize) { 507b7eaed25SJason Evans size_t usize = extent_usize_get(extent); 508b7eaed25SJason Evans size_t udiff = usize - oldusize; 509b7eaed25SJason Evans 510b7eaed25SJason Evans if (config_stats) { 511b7eaed25SJason Evans arena_stats_lock(tsdn, &arena->stats); 512b7eaed25SJason Evans arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); 513b7eaed25SJason Evans arena_stats_unlock(tsdn, &arena->stats); 514b7eaed25SJason Evans } 515b7eaed25SJason Evans arena_nactive_add(arena, udiff >> LG_PAGE); 516b7eaed25SJason Evans } 517b7eaed25SJason Evans 518b7eaed25SJason Evans static ssize_t 519b7eaed25SJason Evans arena_decay_ms_read(arena_decay_t *decay) { 520b7eaed25SJason Evans return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED); 521b7eaed25SJason Evans } 522b7eaed25SJason Evans 523b7eaed25SJason Evans static void 524b7eaed25SJason Evans arena_decay_ms_write(arena_decay_t *decay, ssize_t decay_ms) { 525b7eaed25SJason Evans atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED); 526b7eaed25SJason Evans } 527b7eaed25SJason Evans 528b7eaed25SJason Evans static void 529b7eaed25SJason Evans arena_decay_deadline_init(arena_decay_t *decay) { 530df0d881dSJason Evans /* 531df0d881dSJason Evans * Generate a new deadline that is uniformly random within the next 532df0d881dSJason Evans * epoch after the current one. 533df0d881dSJason Evans */ 534b7eaed25SJason Evans nstime_copy(&decay->deadline, &decay->epoch); 535b7eaed25SJason Evans nstime_add(&decay->deadline, &decay->interval); 536b7eaed25SJason Evans if (arena_decay_ms_read(decay) > 0) { 537df0d881dSJason Evans nstime_t jitter; 538df0d881dSJason Evans 539b7eaed25SJason Evans nstime_init(&jitter, prng_range_u64(&decay->jitter_state, 540b7eaed25SJason Evans nstime_ns(&decay->interval))); 541b7eaed25SJason Evans nstime_add(&decay->deadline, &jitter); 542df0d881dSJason Evans } 543df0d881dSJason Evans } 544df0d881dSJason Evans 545df0d881dSJason Evans static bool 546b7eaed25SJason Evans arena_decay_deadline_reached(const arena_decay_t *decay, const nstime_t *time) { 547b7eaed25SJason Evans return (nstime_compare(&decay->deadline, time) <= 0); 548df0d881dSJason Evans } 549df0d881dSJason Evans 550df0d881dSJason Evans static size_t 551b7eaed25SJason Evans arena_decay_backlog_npages_limit(const arena_decay_t *decay) { 552df0d881dSJason Evans uint64_t sum; 553df0d881dSJason Evans size_t npages_limit_backlog; 554df0d881dSJason Evans unsigned i; 555df0d881dSJason Evans 556df0d881dSJason Evans /* 557df0d881dSJason Evans * For each element of decay_backlog, multiply by the corresponding 558df0d881dSJason Evans * fixed-point smoothstep decay factor. Sum the products, then divide 559df0d881dSJason Evans * to round down to the nearest whole number of pages. 560df0d881dSJason Evans */ 561df0d881dSJason Evans sum = 0; 562b7eaed25SJason Evans for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { 563b7eaed25SJason Evans sum += decay->backlog[i] * h_steps[i]; 564b7eaed25SJason Evans } 5651f0a49e8SJason Evans npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP); 566df0d881dSJason Evans 567b7eaed25SJason Evans return npages_limit_backlog; 568df0d881dSJason Evans } 569df0d881dSJason Evans 570df0d881dSJason Evans static void 571b7eaed25SJason Evans arena_decay_backlog_update_last(arena_decay_t *decay, size_t current_npages) { 572b7eaed25SJason Evans size_t npages_delta = (current_npages > decay->nunpurged) ? 573b7eaed25SJason Evans current_npages - decay->nunpurged : 0; 574b7eaed25SJason Evans decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta; 575b7eaed25SJason Evans 576b7eaed25SJason Evans if (config_debug) { 577b7eaed25SJason Evans if (current_npages > decay->ceil_npages) { 578b7eaed25SJason Evans decay->ceil_npages = current_npages; 579b7eaed25SJason Evans } 580b7eaed25SJason Evans size_t npages_limit = arena_decay_backlog_npages_limit(decay); 581b7eaed25SJason Evans assert(decay->ceil_npages >= npages_limit); 582b7eaed25SJason Evans if (decay->ceil_npages > npages_limit) { 583b7eaed25SJason Evans decay->ceil_npages = npages_limit; 584b7eaed25SJason Evans } 585b7eaed25SJason Evans } 586bde95144SJason Evans } 587df0d881dSJason Evans 588bde95144SJason Evans static void 589b7eaed25SJason Evans arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64, 590b7eaed25SJason Evans size_t current_npages) { 5911f0a49e8SJason Evans if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) { 592b7eaed25SJason Evans memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) * 593df0d881dSJason Evans sizeof(size_t)); 594df0d881dSJason Evans } else { 5951f0a49e8SJason Evans size_t nadvance_z = (size_t)nadvance_u64; 5961f0a49e8SJason Evans 5971f0a49e8SJason Evans assert((uint64_t)nadvance_z == nadvance_u64); 5981f0a49e8SJason Evans 599b7eaed25SJason Evans memmove(decay->backlog, &decay->backlog[nadvance_z], 6001f0a49e8SJason Evans (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t)); 6011f0a49e8SJason Evans if (nadvance_z > 1) { 602b7eaed25SJason Evans memset(&decay->backlog[SMOOTHSTEP_NSTEPS - 6031f0a49e8SJason Evans nadvance_z], 0, (nadvance_z-1) * sizeof(size_t)); 604df0d881dSJason Evans } 605df0d881dSJason Evans } 606bde95144SJason Evans 607b7eaed25SJason Evans arena_decay_backlog_update_last(decay, current_npages); 608df0d881dSJason Evans } 609df0d881dSJason Evans 610bde95144SJason Evans static void 611b7eaed25SJason Evans arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, 6128b2f5aafSJason Evans extents_t *extents, size_t current_npages, size_t npages_limit, 6138b2f5aafSJason Evans bool is_background_thread) { 614b7eaed25SJason Evans if (current_npages > npages_limit) { 615b7eaed25SJason Evans arena_decay_to_limit(tsdn, arena, decay, extents, false, 6160ef50b4eSJason Evans npages_limit, current_npages - npages_limit, 6170ef50b4eSJason Evans is_background_thread); 618b7eaed25SJason Evans } 619b7eaed25SJason Evans } 620b7eaed25SJason Evans 621b7eaed25SJason Evans static void 622b7eaed25SJason Evans arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time, 623b7eaed25SJason Evans size_t current_npages) { 624b7eaed25SJason Evans assert(arena_decay_deadline_reached(decay, time)); 625b7eaed25SJason Evans 626bde95144SJason Evans nstime_t delta; 627bde95144SJason Evans nstime_copy(&delta, time); 628b7eaed25SJason Evans nstime_subtract(&delta, &decay->epoch); 629b7eaed25SJason Evans 630b7eaed25SJason Evans uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval); 631bde95144SJason Evans assert(nadvance_u64 > 0); 632df0d881dSJason Evans 633bde95144SJason Evans /* Add nadvance_u64 decay intervals to epoch. */ 634b7eaed25SJason Evans nstime_copy(&delta, &decay->interval); 635bde95144SJason Evans nstime_imultiply(&delta, nadvance_u64); 636b7eaed25SJason Evans nstime_add(&decay->epoch, &delta); 637df0d881dSJason Evans 638bde95144SJason Evans /* Set a new deadline. */ 639b7eaed25SJason Evans arena_decay_deadline_init(decay); 640bde95144SJason Evans 641bde95144SJason Evans /* Update the backlog. */ 642b7eaed25SJason Evans arena_decay_backlog_update(decay, nadvance_u64, current_npages); 643bde95144SJason Evans } 644bde95144SJason Evans 645bde95144SJason Evans static void 646b7eaed25SJason Evans arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, 6478b2f5aafSJason Evans extents_t *extents, const nstime_t *time, bool is_background_thread) { 648b7eaed25SJason Evans size_t current_npages = extents_npages_get(extents); 649b7eaed25SJason Evans arena_decay_epoch_advance_helper(decay, time, current_npages); 650bde95144SJason Evans 651b7eaed25SJason Evans size_t npages_limit = arena_decay_backlog_npages_limit(decay); 652b7eaed25SJason Evans /* We may unlock decay->mtx when try_purge(). Finish logging first. */ 653b7eaed25SJason Evans decay->nunpurged = (npages_limit > current_npages) ? npages_limit : 654b7eaed25SJason Evans current_npages; 6558b2f5aafSJason Evans 6568b2f5aafSJason Evans if (!background_thread_enabled() || is_background_thread) { 657b7eaed25SJason Evans arena_decay_try_purge(tsdn, arena, decay, extents, 6588b2f5aafSJason Evans current_npages, npages_limit, is_background_thread); 659b7eaed25SJason Evans } 660bde95144SJason Evans } 661bde95144SJason Evans 662bde95144SJason Evans static void 6630ef50b4eSJason Evans arena_decay_reinit(arena_decay_t *decay, ssize_t decay_ms) { 664b7eaed25SJason Evans arena_decay_ms_write(decay, decay_ms); 665b7eaed25SJason Evans if (decay_ms > 0) { 666b7eaed25SJason Evans nstime_init(&decay->interval, (uint64_t)decay_ms * 667b7eaed25SJason Evans KQU(1000000)); 668b7eaed25SJason Evans nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS); 669df0d881dSJason Evans } 670df0d881dSJason Evans 671b7eaed25SJason Evans nstime_init(&decay->epoch, 0); 672b7eaed25SJason Evans nstime_update(&decay->epoch); 673b7eaed25SJason Evans decay->jitter_state = (uint64_t)(uintptr_t)decay; 674b7eaed25SJason Evans arena_decay_deadline_init(decay); 675b7eaed25SJason Evans decay->nunpurged = 0; 676b7eaed25SJason Evans memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t)); 677df0d881dSJason Evans } 678df0d881dSJason Evans 679df0d881dSJason Evans static bool 6800ef50b4eSJason Evans arena_decay_init(arena_decay_t *decay, ssize_t decay_ms, 6810ef50b4eSJason Evans arena_stats_decay_t *stats) { 682b7eaed25SJason Evans if (config_debug) { 683b7eaed25SJason Evans for (size_t i = 0; i < sizeof(arena_decay_t); i++) { 684b7eaed25SJason Evans assert(((char *)decay)[i] == 0); 685b7eaed25SJason Evans } 686b7eaed25SJason Evans decay->ceil_npages = 0; 687b7eaed25SJason Evans } 688b7eaed25SJason Evans if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY, 689b7eaed25SJason Evans malloc_mutex_rank_exclusive)) { 690b7eaed25SJason Evans return true; 691b7eaed25SJason Evans } 692b7eaed25SJason Evans decay->purging = false; 6930ef50b4eSJason Evans arena_decay_reinit(decay, decay_ms); 694b7eaed25SJason Evans /* Memory is zeroed, so there is no need to clear stats. */ 695b7eaed25SJason Evans if (config_stats) { 696b7eaed25SJason Evans decay->stats = stats; 697b7eaed25SJason Evans } 698b7eaed25SJason Evans return false; 699df0d881dSJason Evans } 700df0d881dSJason Evans 701b7eaed25SJason Evans static bool 702b7eaed25SJason Evans arena_decay_ms_valid(ssize_t decay_ms) { 703b7eaed25SJason Evans if (decay_ms < -1) { 704b7eaed25SJason Evans return false; 705b7eaed25SJason Evans } 706b7eaed25SJason Evans if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX * 707b7eaed25SJason Evans KQU(1000)) { 708b7eaed25SJason Evans return true; 709b7eaed25SJason Evans } 710b7eaed25SJason Evans return false; 711df0d881dSJason Evans } 712df0d881dSJason Evans 713b7eaed25SJason Evans static bool 714b7eaed25SJason Evans arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, 715b7eaed25SJason Evans extents_t *extents, bool is_background_thread) { 716b7eaed25SJason Evans malloc_mutex_assert_owner(tsdn, &decay->mtx); 717df0d881dSJason Evans 718df0d881dSJason Evans /* Purge all or nothing if the option is disabled. */ 719b7eaed25SJason Evans ssize_t decay_ms = arena_decay_ms_read(decay); 720b7eaed25SJason Evans if (decay_ms <= 0) { 721b7eaed25SJason Evans if (decay_ms == 0) { 722b7eaed25SJason Evans arena_decay_to_limit(tsdn, arena, decay, extents, false, 7230ef50b4eSJason Evans 0, extents_npages_get(extents), 7240ef50b4eSJason Evans is_background_thread); 725b7eaed25SJason Evans } 726b7eaed25SJason Evans return false; 727df0d881dSJason Evans } 728df0d881dSJason Evans 729b7eaed25SJason Evans nstime_t time; 730bde95144SJason Evans nstime_init(&time, 0); 731bde95144SJason Evans nstime_update(&time); 732b7eaed25SJason Evans if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch, &time) 733b7eaed25SJason Evans > 0)) { 734bde95144SJason Evans /* 735bde95144SJason Evans * Time went backwards. Move the epoch back in time and 736bde95144SJason Evans * generate a new deadline, with the expectation that time 737bde95144SJason Evans * typically flows forward for long enough periods of time that 738bde95144SJason Evans * epochs complete. Unfortunately, this strategy is susceptible 739bde95144SJason Evans * to clock jitter triggering premature epoch advances, but 740bde95144SJason Evans * clock jitter estimation and compensation isn't feasible here 741bde95144SJason Evans * because calls into this code are event-driven. 742bde95144SJason Evans */ 743b7eaed25SJason Evans nstime_copy(&decay->epoch, &time); 744b7eaed25SJason Evans arena_decay_deadline_init(decay); 745bde95144SJason Evans } else { 746bde95144SJason Evans /* Verify that time does not go backwards. */ 747b7eaed25SJason Evans assert(nstime_compare(&decay->epoch, &time) <= 0); 748df0d881dSJason Evans } 749df0d881dSJason Evans 750df0d881dSJason Evans /* 751bde95144SJason Evans * If the deadline has been reached, advance to the current epoch and 752bde95144SJason Evans * purge to the new limit if necessary. Note that dirty pages created 753bde95144SJason Evans * during the current epoch are not subject to purge until a future 754b7eaed25SJason Evans * epoch, so as a result purging only happens during epoch advances, or 755b7eaed25SJason Evans * being triggered by background threads (scheduled event). 756df0d881dSJason Evans */ 757b7eaed25SJason Evans bool advance_epoch = arena_decay_deadline_reached(decay, &time); 758b7eaed25SJason Evans if (advance_epoch) { 759b7eaed25SJason Evans arena_decay_epoch_advance(tsdn, arena, decay, extents, &time, 7608b2f5aafSJason Evans is_background_thread); 761b7eaed25SJason Evans } else if (is_background_thread) { 762b7eaed25SJason Evans arena_decay_try_purge(tsdn, arena, decay, extents, 763b7eaed25SJason Evans extents_npages_get(extents), 7648b2f5aafSJason Evans arena_decay_backlog_npages_limit(decay), 7658b2f5aafSJason Evans is_background_thread); 766df0d881dSJason Evans } 767df0d881dSJason Evans 768b7eaed25SJason Evans return advance_epoch; 769b7eaed25SJason Evans } 770df0d881dSJason Evans 771b7eaed25SJason Evans static ssize_t 772b7eaed25SJason Evans arena_decay_ms_get(arena_decay_t *decay) { 773b7eaed25SJason Evans return arena_decay_ms_read(decay); 774b7eaed25SJason Evans } 775df0d881dSJason Evans 776b7eaed25SJason Evans ssize_t 777b7eaed25SJason Evans arena_dirty_decay_ms_get(arena_t *arena) { 778b7eaed25SJason Evans return arena_decay_ms_get(&arena->decay_dirty); 779b7eaed25SJason Evans } 780b7eaed25SJason Evans 781b7eaed25SJason Evans ssize_t 782b7eaed25SJason Evans arena_muzzy_decay_ms_get(arena_t *arena) { 783b7eaed25SJason Evans return arena_decay_ms_get(&arena->decay_muzzy); 784b7eaed25SJason Evans } 785b7eaed25SJason Evans 786b7eaed25SJason Evans static bool 787b7eaed25SJason Evans arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, 788b7eaed25SJason Evans extents_t *extents, ssize_t decay_ms) { 789b7eaed25SJason Evans if (!arena_decay_ms_valid(decay_ms)) { 790b7eaed25SJason Evans return true; 791b7eaed25SJason Evans } 792b7eaed25SJason Evans 793b7eaed25SJason Evans malloc_mutex_lock(tsdn, &decay->mtx); 794b7eaed25SJason Evans /* 795b7eaed25SJason Evans * Restart decay backlog from scratch, which may cause many dirty pages 796b7eaed25SJason Evans * to be immediately purged. It would conceptually be possible to map 797b7eaed25SJason Evans * the old backlog onto the new backlog, but there is no justification 798b7eaed25SJason Evans * for such complexity since decay_ms changes are intended to be 799b7eaed25SJason Evans * infrequent, either between the {-1, 0, >0} states, or a one-time 800b7eaed25SJason Evans * arbitrary change during initial arena configuration. 801b7eaed25SJason Evans */ 8020ef50b4eSJason Evans arena_decay_reinit(decay, decay_ms); 803b7eaed25SJason Evans arena_maybe_decay(tsdn, arena, decay, extents, false); 804b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &decay->mtx); 805b7eaed25SJason Evans 806b7eaed25SJason Evans return false; 807b7eaed25SJason Evans } 808b7eaed25SJason Evans 809b7eaed25SJason Evans bool 810b7eaed25SJason Evans arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, 811b7eaed25SJason Evans ssize_t decay_ms) { 812b7eaed25SJason Evans return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty, 813b7eaed25SJason Evans &arena->extents_dirty, decay_ms); 814b7eaed25SJason Evans } 815b7eaed25SJason Evans 816b7eaed25SJason Evans bool 817b7eaed25SJason Evans arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena, 818b7eaed25SJason Evans ssize_t decay_ms) { 819b7eaed25SJason Evans return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy, 820b7eaed25SJason Evans &arena->extents_muzzy, decay_ms); 821df0d881dSJason Evans } 822df0d881dSJason Evans 823f921d10fSJason Evans static size_t 824b7eaed25SJason Evans arena_stash_decayed(tsdn_t *tsdn, arena_t *arena, 825b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_limit, 8260ef50b4eSJason Evans size_t npages_decay_max, extent_list_t *decay_extents) { 827b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 828b7eaed25SJason Evans WITNESS_RANK_CORE, 0); 829d0e79aa3SJason Evans 830b7eaed25SJason Evans /* Stash extents according to npages_limit. */ 831d0e79aa3SJason Evans size_t nstashed = 0; 832b7eaed25SJason Evans extent_t *extent; 8330ef50b4eSJason Evans while (nstashed < npages_decay_max && 8340ef50b4eSJason Evans (extent = extents_evict(tsdn, arena, r_extent_hooks, extents, 835b7eaed25SJason Evans npages_limit)) != NULL) { 836b7eaed25SJason Evans extent_list_append(decay_extents, extent); 837b7eaed25SJason Evans nstashed += extent_size_get(extent) >> LG_PAGE; 838f921d10fSJason Evans } 839b7eaed25SJason Evans return nstashed; 840f921d10fSJason Evans } 841f921d10fSJason Evans 842f921d10fSJason Evans static size_t 843b7eaed25SJason Evans arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, 844b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents, 8458b2f5aafSJason Evans bool all, extent_list_t *decay_extents, bool is_background_thread) { 846*e1c167d0SJason Evans size_t nmadvise, nunmapped; 847b7eaed25SJason Evans size_t npurged; 848d0e79aa3SJason Evans 849d0e79aa3SJason Evans if (config_stats) { 850b7eaed25SJason Evans nmadvise = 0; 851b7eaed25SJason Evans nunmapped = 0; 852b7eaed25SJason Evans } 853b7eaed25SJason Evans npurged = 0; 854b7eaed25SJason Evans 855b7eaed25SJason Evans ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena); 856b7eaed25SJason Evans for (extent_t *extent = extent_list_first(decay_extents); extent != 857b7eaed25SJason Evans NULL; extent = extent_list_first(decay_extents)) { 858b7eaed25SJason Evans if (config_stats) { 859b7eaed25SJason Evans nmadvise++; 860b7eaed25SJason Evans } 861b7eaed25SJason Evans size_t npages = extent_size_get(extent) >> LG_PAGE; 862b7eaed25SJason Evans npurged += npages; 863b7eaed25SJason Evans extent_list_remove(decay_extents, extent); 864b7eaed25SJason Evans switch (extents_state_get(extents)) { 865b7eaed25SJason Evans case extent_state_active: 866b7eaed25SJason Evans not_reached(); 867b7eaed25SJason Evans case extent_state_dirty: 868b7eaed25SJason Evans if (!all && muzzy_decay_ms != 0 && 869b7eaed25SJason Evans !extent_purge_lazy_wrapper(tsdn, arena, 870b7eaed25SJason Evans r_extent_hooks, extent, 0, 871b7eaed25SJason Evans extent_size_get(extent))) { 872b7eaed25SJason Evans extents_dalloc(tsdn, arena, r_extent_hooks, 873b7eaed25SJason Evans &arena->extents_muzzy, extent); 874b7eaed25SJason Evans arena_background_thread_inactivity_check(tsdn, 8758b2f5aafSJason Evans arena, is_background_thread); 876b7eaed25SJason Evans break; 877b7eaed25SJason Evans } 878b7eaed25SJason Evans /* Fall through. */ 879b7eaed25SJason Evans case extent_state_muzzy: 880b7eaed25SJason Evans extent_dalloc_wrapper(tsdn, arena, r_extent_hooks, 881b7eaed25SJason Evans extent); 882b7eaed25SJason Evans if (config_stats) { 883b7eaed25SJason Evans nunmapped += npages; 884b7eaed25SJason Evans } 885b7eaed25SJason Evans break; 886b7eaed25SJason Evans case extent_state_retained: 887b7eaed25SJason Evans default: 888b7eaed25SJason Evans not_reached(); 889b7eaed25SJason Evans } 890d0e79aa3SJason Evans } 891f921d10fSJason Evans 892b7eaed25SJason Evans if (config_stats) { 893b7eaed25SJason Evans arena_stats_lock(tsdn, &arena->stats); 894b7eaed25SJason Evans arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->npurge, 895b7eaed25SJason Evans 1); 896b7eaed25SJason Evans arena_stats_add_u64(tsdn, &arena->stats, 897b7eaed25SJason Evans &decay->stats->nmadvise, nmadvise); 898b7eaed25SJason Evans arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->purged, 899b7eaed25SJason Evans npurged); 900b7eaed25SJason Evans arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped, 901b7eaed25SJason Evans nunmapped << LG_PAGE); 902b7eaed25SJason Evans arena_stats_unlock(tsdn, &arena->stats); 903f921d10fSJason Evans } 904f921d10fSJason Evans 905b7eaed25SJason Evans return npurged; 90682872ac0SJason Evans } 90782872ac0SJason Evans 908df0d881dSJason Evans /* 9090ef50b4eSJason Evans * npages_limit: Decay at most npages_decay_max pages without violating the 9100ef50b4eSJason Evans * invariant: (extents_npages_get(extents) >= npages_limit). We need an upper 9110ef50b4eSJason Evans * bound on number of pages in order to prevent unbounded growth (namely in 9120ef50b4eSJason Evans * stashed), otherwise unbounded new pages could be added to extents during the 9130ef50b4eSJason Evans * current decay run, so that the purging thread never finishes. 914df0d881dSJason Evans */ 915a4bd5210SJason Evans static void 916b7eaed25SJason Evans arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, 9170ef50b4eSJason Evans extents_t *extents, bool all, size_t npages_limit, size_t npages_decay_max, 9188b2f5aafSJason Evans bool is_background_thread) { 919b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 920b7eaed25SJason Evans WITNESS_RANK_CORE, 1); 921b7eaed25SJason Evans malloc_mutex_assert_owner(tsdn, &decay->mtx); 922a4bd5210SJason Evans 923b7eaed25SJason Evans if (decay->purging) { 924b7eaed25SJason Evans return; 925a4bd5210SJason Evans } 926b7eaed25SJason Evans decay->purging = true; 927b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &decay->mtx); 928df0d881dSJason Evans 929b7eaed25SJason Evans extent_hooks_t *extent_hooks = extent_hooks_get(arena); 930df0d881dSJason Evans 931b7eaed25SJason Evans extent_list_t decay_extents; 932b7eaed25SJason Evans extent_list_init(&decay_extents); 933b7eaed25SJason Evans 934b7eaed25SJason Evans size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents, 9350ef50b4eSJason Evans npages_limit, npages_decay_max, &decay_extents); 936b7eaed25SJason Evans if (npurge != 0) { 937*e1c167d0SJason Evans size_t npurged = arena_decay_stashed(tsdn, arena, 9388b2f5aafSJason Evans &extent_hooks, decay, extents, all, &decay_extents, 9398b2f5aafSJason Evans is_background_thread); 940df0d881dSJason Evans assert(npurged == npurge); 941b7eaed25SJason Evans } 942a4bd5210SJason Evans 943b7eaed25SJason Evans malloc_mutex_lock(tsdn, &decay->mtx); 944b7eaed25SJason Evans decay->purging = false; 945b7eaed25SJason Evans } 946a4bd5210SJason Evans 947b7eaed25SJason Evans static bool 948b7eaed25SJason Evans arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, 949b7eaed25SJason Evans extents_t *extents, bool is_background_thread, bool all) { 950b7eaed25SJason Evans if (all) { 951b7eaed25SJason Evans malloc_mutex_lock(tsdn, &decay->mtx); 9528b2f5aafSJason Evans arena_decay_to_limit(tsdn, arena, decay, extents, all, 0, 9530ef50b4eSJason Evans extents_npages_get(extents), is_background_thread); 954b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &decay->mtx); 955b7eaed25SJason Evans 956b7eaed25SJason Evans return false; 957b7eaed25SJason Evans } 958b7eaed25SJason Evans 959b7eaed25SJason Evans if (malloc_mutex_trylock(tsdn, &decay->mtx)) { 960b7eaed25SJason Evans /* No need to wait if another thread is in progress. */ 961b7eaed25SJason Evans return true; 962b7eaed25SJason Evans } 963b7eaed25SJason Evans 964b7eaed25SJason Evans bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents, 965b7eaed25SJason Evans is_background_thread); 966*e1c167d0SJason Evans size_t npages_new; 967b7eaed25SJason Evans if (epoch_advanced) { 968b7eaed25SJason Evans /* Backlog is updated on epoch advance. */ 969b7eaed25SJason Evans npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1]; 970b7eaed25SJason Evans } 971b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &decay->mtx); 972b7eaed25SJason Evans 973b7eaed25SJason Evans if (have_background_thread && background_thread_enabled() && 974b7eaed25SJason Evans epoch_advanced && !is_background_thread) { 9750ef50b4eSJason Evans background_thread_interval_check(tsdn, arena, decay, 9760ef50b4eSJason Evans npages_new); 977b7eaed25SJason Evans } 978b7eaed25SJason Evans 979b7eaed25SJason Evans return false; 980b7eaed25SJason Evans } 981b7eaed25SJason Evans 982b7eaed25SJason Evans static bool 983b7eaed25SJason Evans arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, 984b7eaed25SJason Evans bool all) { 985b7eaed25SJason Evans return arena_decay_impl(tsdn, arena, &arena->decay_dirty, 986b7eaed25SJason Evans &arena->extents_dirty, is_background_thread, all); 987b7eaed25SJason Evans } 988b7eaed25SJason Evans 989b7eaed25SJason Evans static bool 990b7eaed25SJason Evans arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, 991b7eaed25SJason Evans bool all) { 992b7eaed25SJason Evans return arena_decay_impl(tsdn, arena, &arena->decay_muzzy, 993b7eaed25SJason Evans &arena->extents_muzzy, is_background_thread, all); 994a4bd5210SJason Evans } 995a4bd5210SJason Evans 996a4bd5210SJason Evans void 997b7eaed25SJason Evans arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) { 998b7eaed25SJason Evans if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) { 999b7eaed25SJason Evans return; 1000b7eaed25SJason Evans } 1001b7eaed25SJason Evans arena_decay_muzzy(tsdn, arena, is_background_thread, all); 10021f0a49e8SJason Evans } 10031f0a49e8SJason Evans 10041f0a49e8SJason Evans static void 1005b7eaed25SJason Evans arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) { 1006b7eaed25SJason Evans arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE); 10071f0a49e8SJason Evans 1008b7eaed25SJason Evans extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; 1009b7eaed25SJason Evans arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, slab); 1010b7eaed25SJason Evans } 10111f0a49e8SJason Evans 1012b7eaed25SJason Evans static void 10130ef50b4eSJason Evans arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) { 1014b7eaed25SJason Evans assert(extent_nfree_get(slab) > 0); 1015b7eaed25SJason Evans extent_heap_insert(&bin->slabs_nonfull, slab); 1016*e1c167d0SJason Evans if (config_stats) { 1017*e1c167d0SJason Evans bin->stats.nonfull_slabs++; 1018*e1c167d0SJason Evans } 1019b7eaed25SJason Evans } 1020b7eaed25SJason Evans 1021b7eaed25SJason Evans static void 10220ef50b4eSJason Evans arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) { 1023b7eaed25SJason Evans extent_heap_remove(&bin->slabs_nonfull, slab); 1024*e1c167d0SJason Evans if (config_stats) { 1025*e1c167d0SJason Evans bin->stats.nonfull_slabs--; 1026*e1c167d0SJason Evans } 1027b7eaed25SJason Evans } 1028b7eaed25SJason Evans 1029b7eaed25SJason Evans static extent_t * 10300ef50b4eSJason Evans arena_bin_slabs_nonfull_tryget(bin_t *bin) { 1031b7eaed25SJason Evans extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull); 1032b7eaed25SJason Evans if (slab == NULL) { 1033b7eaed25SJason Evans return NULL; 1034b7eaed25SJason Evans } 1035b7eaed25SJason Evans if (config_stats) { 1036b7eaed25SJason Evans bin->stats.reslabs++; 1037*e1c167d0SJason Evans bin->stats.nonfull_slabs--; 1038b7eaed25SJason Evans } 1039b7eaed25SJason Evans return slab; 1040b7eaed25SJason Evans } 1041b7eaed25SJason Evans 1042b7eaed25SJason Evans static void 10430ef50b4eSJason Evans arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, extent_t *slab) { 1044b7eaed25SJason Evans assert(extent_nfree_get(slab) == 0); 10451f0a49e8SJason Evans /* 1046b7eaed25SJason Evans * Tracking extents is required by arena_reset, which is not allowed 1047b7eaed25SJason Evans * for auto arenas. Bypass this step to avoid touching the extent 1048b7eaed25SJason Evans * linkage (often results in cache misses) for auto arenas. 10491f0a49e8SJason Evans */ 1050b7eaed25SJason Evans if (arena_is_auto(arena)) { 1051b7eaed25SJason Evans return; 1052b7eaed25SJason Evans } 1053b7eaed25SJason Evans extent_list_append(&bin->slabs_full, slab); 1054b7eaed25SJason Evans } 10551f0a49e8SJason Evans 1056b7eaed25SJason Evans static void 10570ef50b4eSJason Evans arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) { 1058b7eaed25SJason Evans if (arena_is_auto(arena)) { 1059b7eaed25SJason Evans return; 10601f0a49e8SJason Evans } 1061b7eaed25SJason Evans extent_list_remove(&bin->slabs_full, slab); 10621f0a49e8SJason Evans } 10631f0a49e8SJason Evans 1064*e1c167d0SJason Evans static void 1065*e1c167d0SJason Evans arena_bin_reset(tsd_t *tsd, arena_t *arena, bin_t *bin) { 1066*e1c167d0SJason Evans extent_t *slab; 1067*e1c167d0SJason Evans 1068*e1c167d0SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); 1069*e1c167d0SJason Evans if (bin->slabcur != NULL) { 1070*e1c167d0SJason Evans slab = bin->slabcur; 1071*e1c167d0SJason Evans bin->slabcur = NULL; 1072*e1c167d0SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); 1073*e1c167d0SJason Evans arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); 1074*e1c167d0SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); 1075*e1c167d0SJason Evans } 1076*e1c167d0SJason Evans while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) != NULL) { 1077*e1c167d0SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); 1078*e1c167d0SJason Evans arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); 1079*e1c167d0SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); 1080*e1c167d0SJason Evans } 1081*e1c167d0SJason Evans for (slab = extent_list_first(&bin->slabs_full); slab != NULL; 1082*e1c167d0SJason Evans slab = extent_list_first(&bin->slabs_full)) { 1083*e1c167d0SJason Evans arena_bin_slabs_full_remove(arena, bin, slab); 1084*e1c167d0SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); 1085*e1c167d0SJason Evans arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); 1086*e1c167d0SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); 1087*e1c167d0SJason Evans } 1088*e1c167d0SJason Evans if (config_stats) { 1089*e1c167d0SJason Evans bin->stats.curregs = 0; 1090*e1c167d0SJason Evans bin->stats.curslabs = 0; 1091*e1c167d0SJason Evans } 1092*e1c167d0SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); 1093*e1c167d0SJason Evans } 1094*e1c167d0SJason Evans 10951f0a49e8SJason Evans void 1096b7eaed25SJason Evans arena_reset(tsd_t *tsd, arena_t *arena) { 10971f0a49e8SJason Evans /* 10981f0a49e8SJason Evans * Locking in this function is unintuitive. The caller guarantees that 10991f0a49e8SJason Evans * no concurrent operations are happening in this arena, but there are 11001f0a49e8SJason Evans * still reasons that some locking is necessary: 11011f0a49e8SJason Evans * 11021f0a49e8SJason Evans * - Some of the functions in the transitive closure of calls assume 11031f0a49e8SJason Evans * appropriate locks are held, and in some cases these locks are 11041f0a49e8SJason Evans * temporarily dropped to avoid lock order reversal or deadlock due to 11051f0a49e8SJason Evans * reentry. 11061f0a49e8SJason Evans * - mallctl("epoch", ...) may concurrently refresh stats. While 11071f0a49e8SJason Evans * strictly speaking this is a "concurrent operation", disallowing 11081f0a49e8SJason Evans * stats refreshes would impose an inconvenient burden. 11091f0a49e8SJason Evans */ 11101f0a49e8SJason Evans 1111b7eaed25SJason Evans /* Large allocations. */ 1112b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); 11131f0a49e8SJason Evans 1114b7eaed25SJason Evans for (extent_t *extent = extent_list_first(&arena->large); extent != 1115b7eaed25SJason Evans NULL; extent = extent_list_first(&arena->large)) { 1116b7eaed25SJason Evans void *ptr = extent_base_get(extent); 11171f0a49e8SJason Evans size_t usize; 11181f0a49e8SJason Evans 1119b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); 1120b7eaed25SJason Evans alloc_ctx_t alloc_ctx; 1121b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); 1122b7eaed25SJason Evans rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, 1123b7eaed25SJason Evans (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); 1124*e1c167d0SJason Evans assert(alloc_ctx.szind != SC_NSIZES); 11251f0a49e8SJason Evans 1126b7eaed25SJason Evans if (config_stats || (config_prof && opt_prof)) { 1127b7eaed25SJason Evans usize = sz_index2size(alloc_ctx.szind); 1128b7eaed25SJason Evans assert(usize == isalloc(tsd_tsdn(tsd), ptr)); 1129b7eaed25SJason Evans } 1130b7eaed25SJason Evans /* Remove large allocation from prof sample set. */ 1131b7eaed25SJason Evans if (config_prof && opt_prof) { 1132b7eaed25SJason Evans prof_free(tsd, ptr, usize, &alloc_ctx); 1133b7eaed25SJason Evans } 1134b7eaed25SJason Evans large_dalloc(tsd_tsdn(tsd), extent); 1135b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); 1136b7eaed25SJason Evans } 1137b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); 11381f0a49e8SJason Evans 11391f0a49e8SJason Evans /* Bins. */ 1140*e1c167d0SJason Evans for (unsigned i = 0; i < SC_NBINS; i++) { 1141*e1c167d0SJason Evans for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { 1142*e1c167d0SJason Evans arena_bin_reset(tsd, arena, 1143*e1c167d0SJason Evans &arena->bins[i].bin_shards[j]); 1144b7eaed25SJason Evans } 11451f0a49e8SJason Evans } 11461f0a49e8SJason Evans 1147b7eaed25SJason Evans atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED); 1148a4bd5210SJason Evans } 1149a4bd5210SJason Evans 1150a4bd5210SJason Evans static void 1151b7eaed25SJason Evans arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) { 1152a4bd5210SJason Evans /* 1153b7eaed25SJason Evans * Iterate over the retained extents and destroy them. This gives the 1154b7eaed25SJason Evans * extent allocator underlying the extent hooks an opportunity to unmap 1155b7eaed25SJason Evans * all retained memory without having to keep its own metadata 1156b7eaed25SJason Evans * structures. In practice, virtual memory for dss-allocated extents is 1157b7eaed25SJason Evans * leaked here, so best practice is to avoid dss for arenas to be 1158b7eaed25SJason Evans * destroyed, or provide custom extent hooks that track retained 1159b7eaed25SJason Evans * dss-based extents for later reuse. 1160a4bd5210SJason Evans */ 1161b7eaed25SJason Evans extent_hooks_t *extent_hooks = extent_hooks_get(arena); 1162b7eaed25SJason Evans extent_t *extent; 1163b7eaed25SJason Evans while ((extent = extents_evict(tsdn, arena, &extent_hooks, 1164b7eaed25SJason Evans &arena->extents_retained, 0)) != NULL) { 1165b7eaed25SJason Evans extent_destroy_wrapper(tsdn, arena, &extent_hooks, extent); 1166d0e79aa3SJason Evans } 1167a4bd5210SJason Evans } 1168a4bd5210SJason Evans 1169a4bd5210SJason Evans void 1170b7eaed25SJason Evans arena_destroy(tsd_t *tsd, arena_t *arena) { 1171b7eaed25SJason Evans assert(base_ind_get(arena->base) >= narenas_auto); 1172b7eaed25SJason Evans assert(arena_nthreads_get(arena, false) == 0); 1173b7eaed25SJason Evans assert(arena_nthreads_get(arena, true) == 0); 1174b7eaed25SJason Evans 1175b7eaed25SJason Evans /* 1176b7eaed25SJason Evans * No allocations have occurred since arena_reset() was called. 1177b7eaed25SJason Evans * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached 1178b7eaed25SJason Evans * extents, so only retained extents may remain. 1179b7eaed25SJason Evans */ 1180b7eaed25SJason Evans assert(extents_npages_get(&arena->extents_dirty) == 0); 1181b7eaed25SJason Evans assert(extents_npages_get(&arena->extents_muzzy) == 0); 1182b7eaed25SJason Evans 1183b7eaed25SJason Evans /* Deallocate retained memory. */ 1184b7eaed25SJason Evans arena_destroy_retained(tsd_tsdn(tsd), arena); 1185b7eaed25SJason Evans 1186b7eaed25SJason Evans /* 1187b7eaed25SJason Evans * Remove the arena pointer from the arenas array. We rely on the fact 1188b7eaed25SJason Evans * that there is no way for the application to get a dirty read from the 1189b7eaed25SJason Evans * arenas array unless there is an inherent race in the application 1190b7eaed25SJason Evans * involving access of an arena being concurrently destroyed. The 1191b7eaed25SJason Evans * application must synchronize knowledge of the arena's validity, so as 1192b7eaed25SJason Evans * long as we use an atomic write to update the arenas array, the 1193b7eaed25SJason Evans * application will get a clean read any time after it synchronizes 1194b7eaed25SJason Evans * knowledge that the arena is no longer valid. 1195b7eaed25SJason Evans */ 1196b7eaed25SJason Evans arena_set(base_ind_get(arena->base), NULL); 1197b7eaed25SJason Evans 1198b7eaed25SJason Evans /* 1199b7eaed25SJason Evans * Destroy the base allocator, which manages all metadata ever mapped by 1200b7eaed25SJason Evans * this arena. 1201b7eaed25SJason Evans */ 12028b2f5aafSJason Evans base_delete(tsd_tsdn(tsd), arena->base); 1203b7eaed25SJason Evans } 1204b7eaed25SJason Evans 1205b7eaed25SJason Evans static extent_t * 1206b7eaed25SJason Evans arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, 12070ef50b4eSJason Evans extent_hooks_t **r_extent_hooks, const bin_info_t *bin_info, 1208b7eaed25SJason Evans szind_t szind) { 1209b7eaed25SJason Evans extent_t *slab; 1210b7eaed25SJason Evans bool zero, commit; 1211b7eaed25SJason Evans 1212b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1213b7eaed25SJason Evans WITNESS_RANK_CORE, 0); 1214b7eaed25SJason Evans 1215b7eaed25SJason Evans zero = false; 1216b7eaed25SJason Evans commit = true; 1217b7eaed25SJason Evans slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL, 1218b7eaed25SJason Evans bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit); 1219b7eaed25SJason Evans 1220b7eaed25SJason Evans if (config_stats && slab != NULL) { 1221b7eaed25SJason Evans arena_stats_mapped_add(tsdn, &arena->stats, 1222b7eaed25SJason Evans bin_info->slab_size); 1223b7eaed25SJason Evans } 1224b7eaed25SJason Evans 1225b7eaed25SJason Evans return slab; 1226b7eaed25SJason Evans } 1227b7eaed25SJason Evans 1228b7eaed25SJason Evans static extent_t * 1229*e1c167d0SJason Evans arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, unsigned binshard, 12300ef50b4eSJason Evans const bin_info_t *bin_info) { 1231b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1232b7eaed25SJason Evans WITNESS_RANK_CORE, 0); 1233b7eaed25SJason Evans 1234b7eaed25SJason Evans extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; 1235b7eaed25SJason Evans szind_t szind = sz_size2index(bin_info->reg_size); 1236b7eaed25SJason Evans bool zero = false; 1237b7eaed25SJason Evans bool commit = true; 1238b7eaed25SJason Evans extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks, 1239b7eaed25SJason Evans &arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, true, 1240b7eaed25SJason Evans binind, &zero, &commit); 1241*e1c167d0SJason Evans if (slab == NULL && arena_may_have_muzzy(arena)) { 1242b7eaed25SJason Evans slab = extents_alloc(tsdn, arena, &extent_hooks, 1243b7eaed25SJason Evans &arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE, 1244b7eaed25SJason Evans true, binind, &zero, &commit); 1245b7eaed25SJason Evans } 1246b7eaed25SJason Evans if (slab == NULL) { 1247b7eaed25SJason Evans slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks, 1248b7eaed25SJason Evans bin_info, szind); 1249b7eaed25SJason Evans if (slab == NULL) { 1250b7eaed25SJason Evans return NULL; 1251b7eaed25SJason Evans } 1252b7eaed25SJason Evans } 1253b7eaed25SJason Evans assert(extent_slab_get(slab)); 1254b7eaed25SJason Evans 1255b7eaed25SJason Evans /* Initialize slab internals. */ 1256b7eaed25SJason Evans arena_slab_data_t *slab_data = extent_slab_data_get(slab); 1257*e1c167d0SJason Evans extent_nfree_binshard_set(slab, bin_info->nregs, binshard); 1258b7eaed25SJason Evans bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false); 1259b7eaed25SJason Evans 1260b7eaed25SJason Evans arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE); 1261b7eaed25SJason Evans 1262b7eaed25SJason Evans return slab; 1263b7eaed25SJason Evans } 1264b7eaed25SJason Evans 1265b7eaed25SJason Evans static extent_t * 12660ef50b4eSJason Evans arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin, 1267*e1c167d0SJason Evans szind_t binind, unsigned binshard) { 1268b7eaed25SJason Evans extent_t *slab; 12690ef50b4eSJason Evans const bin_info_t *bin_info; 1270b7eaed25SJason Evans 1271b7eaed25SJason Evans /* Look for a usable slab. */ 1272b7eaed25SJason Evans slab = arena_bin_slabs_nonfull_tryget(bin); 1273b7eaed25SJason Evans if (slab != NULL) { 1274b7eaed25SJason Evans return slab; 1275b7eaed25SJason Evans } 1276b7eaed25SJason Evans /* No existing slabs have any space available. */ 1277b7eaed25SJason Evans 12780ef50b4eSJason Evans bin_info = &bin_infos[binind]; 1279b7eaed25SJason Evans 1280b7eaed25SJason Evans /* Allocate a new slab. */ 1281b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &bin->lock); 1282b7eaed25SJason Evans /******************************/ 1283*e1c167d0SJason Evans slab = arena_slab_alloc(tsdn, arena, binind, binshard, bin_info); 1284b7eaed25SJason Evans /********************************/ 1285b7eaed25SJason Evans malloc_mutex_lock(tsdn, &bin->lock); 1286b7eaed25SJason Evans if (slab != NULL) { 1287b7eaed25SJason Evans if (config_stats) { 1288b7eaed25SJason Evans bin->stats.nslabs++; 1289b7eaed25SJason Evans bin->stats.curslabs++; 1290b7eaed25SJason Evans } 1291b7eaed25SJason Evans return slab; 1292b7eaed25SJason Evans } 1293b7eaed25SJason Evans 1294b7eaed25SJason Evans /* 1295b7eaed25SJason Evans * arena_slab_alloc() failed, but another thread may have made 1296b7eaed25SJason Evans * sufficient memory available while this one dropped bin->lock above, 1297b7eaed25SJason Evans * so search one more time. 1298b7eaed25SJason Evans */ 1299b7eaed25SJason Evans slab = arena_bin_slabs_nonfull_tryget(bin); 1300b7eaed25SJason Evans if (slab != NULL) { 1301b7eaed25SJason Evans return slab; 1302b7eaed25SJason Evans } 1303b7eaed25SJason Evans 1304b7eaed25SJason Evans return NULL; 1305b7eaed25SJason Evans } 1306b7eaed25SJason Evans 1307b7eaed25SJason Evans /* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */ 1308b7eaed25SJason Evans static void * 13090ef50b4eSJason Evans arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin, 1310*e1c167d0SJason Evans szind_t binind, unsigned binshard) { 13110ef50b4eSJason Evans const bin_info_t *bin_info; 1312b7eaed25SJason Evans extent_t *slab; 1313b7eaed25SJason Evans 13140ef50b4eSJason Evans bin_info = &bin_infos[binind]; 1315b7eaed25SJason Evans if (!arena_is_auto(arena) && bin->slabcur != NULL) { 1316b7eaed25SJason Evans arena_bin_slabs_full_insert(arena, bin, bin->slabcur); 1317b7eaed25SJason Evans bin->slabcur = NULL; 1318b7eaed25SJason Evans } 1319*e1c167d0SJason Evans slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind, binshard); 1320b7eaed25SJason Evans if (bin->slabcur != NULL) { 1321b7eaed25SJason Evans /* 1322b7eaed25SJason Evans * Another thread updated slabcur while this one ran without the 1323b7eaed25SJason Evans * bin lock in arena_bin_nonfull_slab_get(). 1324b7eaed25SJason Evans */ 1325b7eaed25SJason Evans if (extent_nfree_get(bin->slabcur) > 0) { 13260ef50b4eSJason Evans void *ret = arena_slab_reg_alloc(bin->slabcur, 1327b7eaed25SJason Evans bin_info); 1328b7eaed25SJason Evans if (slab != NULL) { 1329b7eaed25SJason Evans /* 1330b7eaed25SJason Evans * arena_slab_alloc() may have allocated slab, 1331b7eaed25SJason Evans * or it may have been pulled from 1332b7eaed25SJason Evans * slabs_nonfull. Therefore it is unsafe to 1333b7eaed25SJason Evans * make any assumptions about how slab has 1334b7eaed25SJason Evans * previously been used, and 1335b7eaed25SJason Evans * arena_bin_lower_slab() must be called, as if 1336b7eaed25SJason Evans * a region were just deallocated from the slab. 1337b7eaed25SJason Evans */ 1338b7eaed25SJason Evans if (extent_nfree_get(slab) == bin_info->nregs) { 1339b7eaed25SJason Evans arena_dalloc_bin_slab(tsdn, arena, slab, 1340b7eaed25SJason Evans bin); 1341b7eaed25SJason Evans } else { 1342b7eaed25SJason Evans arena_bin_lower_slab(tsdn, arena, slab, 1343b7eaed25SJason Evans bin); 1344b7eaed25SJason Evans } 1345b7eaed25SJason Evans } 1346b7eaed25SJason Evans return ret; 1347b7eaed25SJason Evans } 1348b7eaed25SJason Evans 1349b7eaed25SJason Evans arena_bin_slabs_full_insert(arena, bin, bin->slabcur); 1350b7eaed25SJason Evans bin->slabcur = NULL; 1351b7eaed25SJason Evans } 1352b7eaed25SJason Evans 1353b7eaed25SJason Evans if (slab == NULL) { 1354b7eaed25SJason Evans return NULL; 1355b7eaed25SJason Evans } 1356b7eaed25SJason Evans bin->slabcur = slab; 1357b7eaed25SJason Evans 1358b7eaed25SJason Evans assert(extent_nfree_get(bin->slabcur) > 0); 1359b7eaed25SJason Evans 13600ef50b4eSJason Evans return arena_slab_reg_alloc(slab, bin_info); 1361b7eaed25SJason Evans } 1362b7eaed25SJason Evans 1363*e1c167d0SJason Evans /* Choose a bin shard and return the locked bin. */ 1364*e1c167d0SJason Evans bin_t * 1365*e1c167d0SJason Evans arena_bin_choose_lock(tsdn_t *tsdn, arena_t *arena, szind_t binind, 1366*e1c167d0SJason Evans unsigned *binshard) { 1367*e1c167d0SJason Evans bin_t *bin; 1368*e1c167d0SJason Evans if (tsdn_null(tsdn) || tsd_arena_get(tsdn_tsd(tsdn)) == NULL) { 1369*e1c167d0SJason Evans *binshard = 0; 1370*e1c167d0SJason Evans } else { 1371*e1c167d0SJason Evans *binshard = tsd_binshardsp_get(tsdn_tsd(tsdn))->binshard[binind]; 1372*e1c167d0SJason Evans } 1373*e1c167d0SJason Evans assert(*binshard < bin_infos[binind].n_shards); 1374*e1c167d0SJason Evans bin = &arena->bins[binind].bin_shards[*binshard]; 1375*e1c167d0SJason Evans malloc_mutex_lock(tsdn, &bin->lock); 1376*e1c167d0SJason Evans 1377*e1c167d0SJason Evans return bin; 1378*e1c167d0SJason Evans } 1379*e1c167d0SJason Evans 1380b7eaed25SJason Evans void 1381b7eaed25SJason Evans arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, 13820ef50b4eSJason Evans cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) { 1383*e1c167d0SJason Evans unsigned i, nfill, cnt; 1384a4bd5210SJason Evans 1385a4bd5210SJason Evans assert(tbin->ncached == 0); 1386a4bd5210SJason Evans 1387b7eaed25SJason Evans if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) { 13881f0a49e8SJason Evans prof_idump(tsdn); 1389b7eaed25SJason Evans } 1390*e1c167d0SJason Evans 1391*e1c167d0SJason Evans unsigned binshard; 1392*e1c167d0SJason Evans bin_t *bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard); 1393*e1c167d0SJason Evans 1394a4bd5210SJason Evans for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> 1395*e1c167d0SJason Evans tcache->lg_fill_div[binind]); i < nfill; i += cnt) { 1396b7eaed25SJason Evans extent_t *slab; 1397b7eaed25SJason Evans if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 1398b7eaed25SJason Evans 0) { 1399*e1c167d0SJason Evans unsigned tofill = nfill - i; 1400*e1c167d0SJason Evans cnt = tofill < extent_nfree_get(slab) ? 1401*e1c167d0SJason Evans tofill : extent_nfree_get(slab); 1402*e1c167d0SJason Evans arena_slab_reg_alloc_batch( 1403*e1c167d0SJason Evans slab, &bin_infos[binind], cnt, 1404*e1c167d0SJason Evans tbin->avail - nfill + i); 1405b7eaed25SJason Evans } else { 1406*e1c167d0SJason Evans cnt = 1; 1407*e1c167d0SJason Evans void *ptr = arena_bin_malloc_hard(tsdn, arena, bin, 1408*e1c167d0SJason Evans binind, binshard); 1409d0e79aa3SJason Evans /* 1410d0e79aa3SJason Evans * OOM. tbin->avail isn't yet filled down to its first 1411d0e79aa3SJason Evans * element, so the successful allocations (if any) must 1412df0d881dSJason Evans * be moved just before tbin->avail before bailing out. 1413d0e79aa3SJason Evans */ 1414*e1c167d0SJason Evans if (ptr == NULL) { 1415d0e79aa3SJason Evans if (i > 0) { 1416*e1c167d0SJason Evans memmove(tbin->avail - i, 1417*e1c167d0SJason Evans tbin->avail - nfill, 1418d0e79aa3SJason Evans i * sizeof(void *)); 1419d0e79aa3SJason Evans } 1420a4bd5210SJason Evans break; 1421d0e79aa3SJason Evans } 1422a4bd5210SJason Evans /* Insert such that low regions get used first. */ 1423df0d881dSJason Evans *(tbin->avail - nfill + i) = ptr; 1424a4bd5210SJason Evans } 1425*e1c167d0SJason Evans if (config_fill && unlikely(opt_junk_alloc)) { 1426*e1c167d0SJason Evans for (unsigned j = 0; j < cnt; j++) { 1427*e1c167d0SJason Evans void* ptr = *(tbin->avail - nfill + i + j); 1428*e1c167d0SJason Evans arena_alloc_junk_small(ptr, &bin_infos[binind], 1429*e1c167d0SJason Evans true); 1430*e1c167d0SJason Evans } 1431*e1c167d0SJason Evans } 1432*e1c167d0SJason Evans } 1433a4bd5210SJason Evans if (config_stats) { 1434a4bd5210SJason Evans bin->stats.nmalloc += i; 1435a4bd5210SJason Evans bin->stats.nrequests += tbin->tstats.nrequests; 1436d0e79aa3SJason Evans bin->stats.curregs += i; 1437a4bd5210SJason Evans bin->stats.nfills++; 1438a4bd5210SJason Evans tbin->tstats.nrequests = 0; 1439a4bd5210SJason Evans } 14401f0a49e8SJason Evans malloc_mutex_unlock(tsdn, &bin->lock); 1441a4bd5210SJason Evans tbin->ncached = i; 14421f0a49e8SJason Evans arena_decay_tick(tsdn, arena); 1443a4bd5210SJason Evans } 1444a4bd5210SJason Evans 1445a4bd5210SJason Evans void 14460ef50b4eSJason Evans arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, bool zero) { 1447b7eaed25SJason Evans if (!zero) { 1448b7eaed25SJason Evans memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size); 1449a4bd5210SJason Evans } 1450a4bd5210SJason Evans } 1451a4bd5210SJason Evans 1452f921d10fSJason Evans static void 14530ef50b4eSJason Evans arena_dalloc_junk_small_impl(void *ptr, const bin_info_t *bin_info) { 1454b7eaed25SJason Evans memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size); 1455a4bd5210SJason Evans } 1456b7eaed25SJason Evans arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small = 1457b7eaed25SJason Evans arena_dalloc_junk_small_impl; 1458a4bd5210SJason Evans 1459df0d881dSJason Evans static void * 1460b7eaed25SJason Evans arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) { 1461a4bd5210SJason Evans void *ret; 14620ef50b4eSJason Evans bin_t *bin; 1463df0d881dSJason Evans size_t usize; 1464b7eaed25SJason Evans extent_t *slab; 1465a4bd5210SJason Evans 1466*e1c167d0SJason Evans assert(binind < SC_NBINS); 1467b7eaed25SJason Evans usize = sz_index2size(binind); 1468*e1c167d0SJason Evans unsigned binshard; 1469*e1c167d0SJason Evans bin = arena_bin_choose_lock(tsdn, arena, binind, &binshard); 1470a4bd5210SJason Evans 1471b7eaed25SJason Evans if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) { 14720ef50b4eSJason Evans ret = arena_slab_reg_alloc(slab, &bin_infos[binind]); 1473b7eaed25SJason Evans } else { 1474*e1c167d0SJason Evans ret = arena_bin_malloc_hard(tsdn, arena, bin, binind, binshard); 1475b7eaed25SJason Evans } 1476a4bd5210SJason Evans 1477a4bd5210SJason Evans if (ret == NULL) { 14781f0a49e8SJason Evans malloc_mutex_unlock(tsdn, &bin->lock); 1479b7eaed25SJason Evans return NULL; 1480a4bd5210SJason Evans } 1481a4bd5210SJason Evans 1482a4bd5210SJason Evans if (config_stats) { 1483a4bd5210SJason Evans bin->stats.nmalloc++; 1484a4bd5210SJason Evans bin->stats.nrequests++; 1485d0e79aa3SJason Evans bin->stats.curregs++; 1486a4bd5210SJason Evans } 14871f0a49e8SJason Evans malloc_mutex_unlock(tsdn, &bin->lock); 1488b7eaed25SJason Evans if (config_prof && arena_prof_accum(tsdn, arena, usize)) { 14891f0a49e8SJason Evans prof_idump(tsdn); 1490b7eaed25SJason Evans } 1491a4bd5210SJason Evans 1492d0e79aa3SJason Evans if (!zero) { 1493a4bd5210SJason Evans if (config_fill) { 1494d0e79aa3SJason Evans if (unlikely(opt_junk_alloc)) { 1495a4bd5210SJason Evans arena_alloc_junk_small(ret, 14960ef50b4eSJason Evans &bin_infos[binind], false); 1497b7eaed25SJason Evans } else if (unlikely(opt_zero)) { 1498df0d881dSJason Evans memset(ret, 0, usize); 1499a4bd5210SJason Evans } 1500b7eaed25SJason Evans } 1501a4bd5210SJason Evans } else { 1502d0e79aa3SJason Evans if (config_fill && unlikely(opt_junk_alloc)) { 15030ef50b4eSJason Evans arena_alloc_junk_small(ret, &bin_infos[binind], 1504a4bd5210SJason Evans true); 1505a4bd5210SJason Evans } 1506df0d881dSJason Evans memset(ret, 0, usize); 1507a4bd5210SJason Evans } 1508a4bd5210SJason Evans 15091f0a49e8SJason Evans arena_decay_tick(tsdn, arena); 1510b7eaed25SJason Evans return ret; 1511a4bd5210SJason Evans } 1512a4bd5210SJason Evans 1513df0d881dSJason Evans void * 15141f0a49e8SJason Evans arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, 1515b7eaed25SJason Evans bool zero) { 15161f0a49e8SJason Evans assert(!tsdn_null(tsdn) || arena != NULL); 15171f0a49e8SJason Evans 1518b7eaed25SJason Evans if (likely(!tsdn_null(tsdn))) { 1519*e1c167d0SJason Evans arena = arena_choose_maybe_huge(tsdn_tsd(tsdn), arena, size); 1520b7eaed25SJason Evans } 1521b7eaed25SJason Evans if (unlikely(arena == NULL)) { 1522b7eaed25SJason Evans return NULL; 1523df0d881dSJason Evans } 1524df0d881dSJason Evans 1525*e1c167d0SJason Evans if (likely(size <= SC_SMALL_MAXCLASS)) { 1526b7eaed25SJason Evans return arena_malloc_small(tsdn, arena, ind, zero); 1527a4bd5210SJason Evans } 1528b7eaed25SJason Evans return large_malloc(tsdn, arena, sz_index2size(ind), zero); 1529d0e79aa3SJason Evans } 1530d0e79aa3SJason Evans 1531d0e79aa3SJason Evans void * 15321f0a49e8SJason Evans arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, 1533b7eaed25SJason Evans bool zero, tcache_t *tcache) { 1534d0e79aa3SJason Evans void *ret; 1535d0e79aa3SJason Evans 1536*e1c167d0SJason Evans if (usize <= SC_SMALL_MAXCLASS 1537*e1c167d0SJason Evans && (alignment < PAGE 1538*e1c167d0SJason Evans || (alignment == PAGE && (usize & PAGE_MASK) == 0))) { 1539b7eaed25SJason Evans /* Small; alignment doesn't require special slab placement. */ 1540b7eaed25SJason Evans ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize), 1541b7eaed25SJason Evans zero, tcache, true); 1542d0e79aa3SJason Evans } else { 1543b7eaed25SJason Evans if (likely(alignment <= CACHELINE)) { 1544b7eaed25SJason Evans ret = large_malloc(tsdn, arena, usize, zero); 1545b7eaed25SJason Evans } else { 1546b7eaed25SJason Evans ret = large_palloc(tsdn, arena, usize, alignment, zero); 1547d0e79aa3SJason Evans } 1548a4bd5210SJason Evans } 1549b7eaed25SJason Evans return ret; 1550a4bd5210SJason Evans } 1551a4bd5210SJason Evans 1552a4bd5210SJason Evans void 1553*e1c167d0SJason Evans arena_prof_promote(tsdn_t *tsdn, void *ptr, size_t usize) { 15548ed34ab0SJason Evans cassert(config_prof); 1555a4bd5210SJason Evans assert(ptr != NULL); 1556*e1c167d0SJason Evans assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS); 1557*e1c167d0SJason Evans assert(usize <= SC_SMALL_MAXCLASS); 1558*e1c167d0SJason Evans 1559*e1c167d0SJason Evans if (config_opt_safety_checks) { 1560*e1c167d0SJason Evans safety_check_set_redzone(ptr, usize, SC_LARGE_MINCLASS); 1561*e1c167d0SJason Evans } 1562a4bd5210SJason Evans 1563b7eaed25SJason Evans rtree_ctx_t rtree_ctx_fallback; 1564b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 1565a4bd5210SJason Evans 1566b7eaed25SJason Evans extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, 1567b7eaed25SJason Evans (uintptr_t)ptr, true); 1568b7eaed25SJason Evans arena_t *arena = extent_arena_get(extent); 1569b7eaed25SJason Evans 1570b7eaed25SJason Evans szind_t szind = sz_size2index(usize); 1571b7eaed25SJason Evans extent_szind_set(extent, szind); 1572b7eaed25SJason Evans rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, 1573b7eaed25SJason Evans szind, false); 1574b7eaed25SJason Evans 1575b7eaed25SJason Evans prof_accum_cancel(tsdn, &arena->prof_accum, usize); 1576b7eaed25SJason Evans 1577b7eaed25SJason Evans assert(isalloc(tsdn, ptr) == usize); 1578b7eaed25SJason Evans } 1579b7eaed25SJason Evans 1580b7eaed25SJason Evans static size_t 1581b7eaed25SJason Evans arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) { 1582b7eaed25SJason Evans cassert(config_prof); 1583b7eaed25SJason Evans assert(ptr != NULL); 1584b7eaed25SJason Evans 1585*e1c167d0SJason Evans extent_szind_set(extent, SC_NBINS); 1586b7eaed25SJason Evans rtree_ctx_t rtree_ctx_fallback; 1587b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 1588b7eaed25SJason Evans rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, 1589*e1c167d0SJason Evans SC_NBINS, false); 1590b7eaed25SJason Evans 1591*e1c167d0SJason Evans assert(isalloc(tsdn, ptr) == SC_LARGE_MINCLASS); 1592b7eaed25SJason Evans 1593*e1c167d0SJason Evans return SC_LARGE_MINCLASS; 1594b7eaed25SJason Evans } 1595b7eaed25SJason Evans 1596b7eaed25SJason Evans void 1597b7eaed25SJason Evans arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache, 1598b7eaed25SJason Evans bool slow_path) { 1599b7eaed25SJason Evans cassert(config_prof); 1600b7eaed25SJason Evans assert(opt_prof); 1601b7eaed25SJason Evans 1602b7eaed25SJason Evans extent_t *extent = iealloc(tsdn, ptr); 1603*e1c167d0SJason Evans size_t usize = extent_usize_get(extent); 1604*e1c167d0SJason Evans size_t bumped_usize = arena_prof_demote(tsdn, extent, ptr); 1605*e1c167d0SJason Evans if (config_opt_safety_checks && usize < SC_LARGE_MINCLASS) { 1606*e1c167d0SJason Evans /* 1607*e1c167d0SJason Evans * Currently, we only do redzoning for small sampled 1608*e1c167d0SJason Evans * allocations. 1609*e1c167d0SJason Evans */ 1610*e1c167d0SJason Evans assert(bumped_usize == SC_LARGE_MINCLASS); 1611*e1c167d0SJason Evans safety_check_verify_redzone(ptr, usize, bumped_usize); 1612*e1c167d0SJason Evans } 1613*e1c167d0SJason Evans if (bumped_usize <= tcache_maxclass && tcache != NULL) { 1614b7eaed25SJason Evans tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, 1615*e1c167d0SJason Evans sz_size2index(bumped_usize), slow_path); 1616b7eaed25SJason Evans } else { 1617b7eaed25SJason Evans large_dalloc(tsdn, extent); 1618b7eaed25SJason Evans } 1619a4bd5210SJason Evans } 1620a4bd5210SJason Evans 1621a4bd5210SJason Evans static void 16220ef50b4eSJason Evans arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) { 1623b7eaed25SJason Evans /* Dissociate slab from bin. */ 1624b7eaed25SJason Evans if (slab == bin->slabcur) { 1625b7eaed25SJason Evans bin->slabcur = NULL; 1626b7eaed25SJason Evans } else { 1627b7eaed25SJason Evans szind_t binind = extent_szind_get(slab); 16280ef50b4eSJason Evans const bin_info_t *bin_info = &bin_infos[binind]; 1629a4bd5210SJason Evans 1630a4bd5210SJason Evans /* 16311f0a49e8SJason Evans * The following block's conditional is necessary because if the 1632b7eaed25SJason Evans * slab only contains one region, then it never gets inserted 1633b7eaed25SJason Evans * into the non-full slabs heap. 1634a4bd5210SJason Evans */ 1635b7eaed25SJason Evans if (bin_info->nregs == 1) { 1636b7eaed25SJason Evans arena_bin_slabs_full_remove(arena, bin, slab); 1637b7eaed25SJason Evans } else { 1638b7eaed25SJason Evans arena_bin_slabs_nonfull_remove(bin, slab); 1639a4bd5210SJason Evans } 1640a4bd5210SJason Evans } 1641a4bd5210SJason Evans } 1642a4bd5210SJason Evans 1643a4bd5210SJason Evans static void 1644b7eaed25SJason Evans arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, 16450ef50b4eSJason Evans bin_t *bin) { 1646b7eaed25SJason Evans assert(slab != bin->slabcur); 1647a4bd5210SJason Evans 16481f0a49e8SJason Evans malloc_mutex_unlock(tsdn, &bin->lock); 1649a4bd5210SJason Evans /******************************/ 1650b7eaed25SJason Evans arena_slab_dalloc(tsdn, arena, slab); 1651a4bd5210SJason Evans /****************************/ 16521f0a49e8SJason Evans malloc_mutex_lock(tsdn, &bin->lock); 1653b7eaed25SJason Evans if (config_stats) { 1654b7eaed25SJason Evans bin->stats.curslabs--; 1655b7eaed25SJason Evans } 1656a4bd5210SJason Evans } 1657a4bd5210SJason Evans 1658a4bd5210SJason Evans static void 1659*e1c167d0SJason Evans arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, 16600ef50b4eSJason Evans bin_t *bin) { 1661b7eaed25SJason Evans assert(extent_nfree_get(slab) > 0); 1662a4bd5210SJason Evans 1663a4bd5210SJason Evans /* 1664b7eaed25SJason Evans * Make sure that if bin->slabcur is non-NULL, it refers to the 1665b7eaed25SJason Evans * oldest/lowest non-full slab. It is okay to NULL slabcur out rather 16667fa7f12fSJason Evans * than proactively keeping it pointing at the oldest/lowest non-full 1667b7eaed25SJason Evans * slab. 1668a4bd5210SJason Evans */ 1669b7eaed25SJason Evans if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) { 1670b7eaed25SJason Evans /* Switch slabcur. */ 1671b7eaed25SJason Evans if (extent_nfree_get(bin->slabcur) > 0) { 1672b7eaed25SJason Evans arena_bin_slabs_nonfull_insert(bin, bin->slabcur); 1673b7eaed25SJason Evans } else { 1674b7eaed25SJason Evans arena_bin_slabs_full_insert(arena, bin, bin->slabcur); 1675b7eaed25SJason Evans } 1676b7eaed25SJason Evans bin->slabcur = slab; 1677b7eaed25SJason Evans if (config_stats) { 1678b7eaed25SJason Evans bin->stats.reslabs++; 1679b7eaed25SJason Evans } 1680b7eaed25SJason Evans } else { 1681b7eaed25SJason Evans arena_bin_slabs_nonfull_insert(bin, slab); 1682b7eaed25SJason Evans } 1683a4bd5210SJason Evans } 1684a4bd5210SJason Evans 1685d0e79aa3SJason Evans static void 1686*e1c167d0SJason Evans arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, bin_t *bin, 1687*e1c167d0SJason Evans szind_t binind, extent_t *slab, void *ptr, bool junked) { 1688b7eaed25SJason Evans arena_slab_data_t *slab_data = extent_slab_data_get(slab); 16890ef50b4eSJason Evans const bin_info_t *bin_info = &bin_infos[binind]; 1690a4bd5210SJason Evans 1691b7eaed25SJason Evans if (!junked && config_fill && unlikely(opt_junk_free)) { 1692a4bd5210SJason Evans arena_dalloc_junk_small(ptr, bin_info); 1693b7eaed25SJason Evans } 1694a4bd5210SJason Evans 16950ef50b4eSJason Evans arena_slab_reg_dalloc(slab, slab_data, ptr); 1696b7eaed25SJason Evans unsigned nfree = extent_nfree_get(slab); 1697b7eaed25SJason Evans if (nfree == bin_info->nregs) { 1698b7eaed25SJason Evans arena_dissociate_bin_slab(arena, slab, bin); 1699b7eaed25SJason Evans arena_dalloc_bin_slab(tsdn, arena, slab, bin); 1700b7eaed25SJason Evans } else if (nfree == 1 && slab != bin->slabcur) { 1701b7eaed25SJason Evans arena_bin_slabs_full_remove(arena, bin, slab); 1702b7eaed25SJason Evans arena_bin_lower_slab(tsdn, arena, slab, bin); 1703b7eaed25SJason Evans } 1704a4bd5210SJason Evans 1705a4bd5210SJason Evans if (config_stats) { 1706a4bd5210SJason Evans bin->stats.ndalloc++; 1707d0e79aa3SJason Evans bin->stats.curregs--; 1708a4bd5210SJason Evans } 1709a4bd5210SJason Evans } 1710a4bd5210SJason Evans 1711a4bd5210SJason Evans void 1712*e1c167d0SJason Evans arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, bin_t *bin, 1713*e1c167d0SJason Evans szind_t binind, extent_t *extent, void *ptr) { 1714*e1c167d0SJason Evans arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr, 1715*e1c167d0SJason Evans true); 1716d0e79aa3SJason Evans } 1717d0e79aa3SJason Evans 1718b7eaed25SJason Evans static void 1719b7eaed25SJason Evans arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) { 1720b7eaed25SJason Evans szind_t binind = extent_szind_get(extent); 1721*e1c167d0SJason Evans unsigned binshard = extent_binshard_get(extent); 1722*e1c167d0SJason Evans bin_t *bin = &arena->bins[binind].bin_shards[binshard]; 1723e722f8f8SJason Evans 17241f0a49e8SJason Evans malloc_mutex_lock(tsdn, &bin->lock); 1725*e1c167d0SJason Evans arena_dalloc_bin_locked_impl(tsdn, arena, bin, binind, extent, ptr, 1726*e1c167d0SJason Evans false); 17271f0a49e8SJason Evans malloc_mutex_unlock(tsdn, &bin->lock); 1728e722f8f8SJason Evans } 1729e722f8f8SJason Evans 1730e722f8f8SJason Evans void 1731b7eaed25SJason Evans arena_dalloc_small(tsdn_t *tsdn, void *ptr) { 1732b7eaed25SJason Evans extent_t *extent = iealloc(tsdn, ptr); 1733b7eaed25SJason Evans arena_t *arena = extent_arena_get(extent); 1734e722f8f8SJason Evans 1735b7eaed25SJason Evans arena_dalloc_bin(tsdn, arena, extent, ptr); 17361f0a49e8SJason Evans arena_decay_tick(tsdn, arena); 1737e722f8f8SJason Evans } 1738a4bd5210SJason Evans 1739f921d10fSJason Evans bool 17401f0a49e8SJason Evans arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, 1741*e1c167d0SJason Evans size_t extra, bool zero, size_t *newsize) { 1742*e1c167d0SJason Evans bool ret; 1743df0d881dSJason Evans /* Calls with non-zero extra had to clamp extra. */ 1744*e1c167d0SJason Evans assert(extra == 0 || size + extra <= SC_LARGE_MAXCLASS); 1745df0d881dSJason Evans 1746b7eaed25SJason Evans extent_t *extent = iealloc(tsdn, ptr); 1747*e1c167d0SJason Evans if (unlikely(size > SC_LARGE_MAXCLASS)) { 1748*e1c167d0SJason Evans ret = true; 1749*e1c167d0SJason Evans goto done; 1750*e1c167d0SJason Evans } 1751*e1c167d0SJason Evans 1752b7eaed25SJason Evans size_t usize_min = sz_s2u(size); 1753b7eaed25SJason Evans size_t usize_max = sz_s2u(size + extra); 1754*e1c167d0SJason Evans if (likely(oldsize <= SC_SMALL_MAXCLASS && usize_min 1755*e1c167d0SJason Evans <= SC_SMALL_MAXCLASS)) { 1756a4bd5210SJason Evans /* 1757d0e79aa3SJason Evans * Avoid moving the allocation if the size class can be left the 1758d0e79aa3SJason Evans * same. 1759a4bd5210SJason Evans */ 17600ef50b4eSJason Evans assert(bin_infos[sz_size2index(oldsize)].reg_size == 1761536b3538SJason Evans oldsize); 1762*e1c167d0SJason Evans if ((usize_max > SC_SMALL_MAXCLASS 1763*e1c167d0SJason Evans || sz_size2index(usize_max) != sz_size2index(oldsize)) 1764*e1c167d0SJason Evans && (size > oldsize || usize_max < oldsize)) { 1765*e1c167d0SJason Evans ret = true; 1766*e1c167d0SJason Evans goto done; 1767df0d881dSJason Evans } 1768df0d881dSJason Evans 1769b7eaed25SJason Evans arena_decay_tick(tsdn, extent_arena_get(extent)); 1770*e1c167d0SJason Evans ret = false; 1771*e1c167d0SJason Evans } else if (oldsize >= SC_LARGE_MINCLASS 1772*e1c167d0SJason Evans && usize_max >= SC_LARGE_MINCLASS) { 1773*e1c167d0SJason Evans ret = large_ralloc_no_move(tsdn, extent, usize_min, usize_max, 1774b7eaed25SJason Evans zero); 1775*e1c167d0SJason Evans } else { 1776*e1c167d0SJason Evans ret = true; 1777536b3538SJason Evans } 1778*e1c167d0SJason Evans done: 1779*e1c167d0SJason Evans assert(extent == iealloc(tsdn, ptr)); 1780*e1c167d0SJason Evans *newsize = extent_usize_get(extent); 1781b7eaed25SJason Evans 1782*e1c167d0SJason Evans return ret; 1783536b3538SJason Evans } 1784536b3538SJason Evans 1785536b3538SJason Evans static void * 17861f0a49e8SJason Evans arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, 1787b7eaed25SJason Evans size_t alignment, bool zero, tcache_t *tcache) { 1788b7eaed25SJason Evans if (alignment == 0) { 1789b7eaed25SJason Evans return arena_malloc(tsdn, arena, usize, sz_size2index(usize), 1790b7eaed25SJason Evans zero, tcache, true); 1791b7eaed25SJason Evans } 1792b7eaed25SJason Evans usize = sz_sa2u(usize, alignment); 1793*e1c167d0SJason Evans if (unlikely(usize == 0 || usize > SC_LARGE_MAXCLASS)) { 1794b7eaed25SJason Evans return NULL; 1795b7eaed25SJason Evans } 1796b7eaed25SJason Evans return ipalloct(tsdn, usize, alignment, zero, tcache, arena); 1797a4bd5210SJason Evans } 1798a4bd5210SJason Evans 1799a4bd5210SJason Evans void * 1800b7eaed25SJason Evans arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize, 1801*e1c167d0SJason Evans size_t size, size_t alignment, bool zero, tcache_t *tcache, 1802*e1c167d0SJason Evans hook_ralloc_args_t *hook_args) { 1803b7eaed25SJason Evans size_t usize = sz_s2u(size); 1804*e1c167d0SJason Evans if (unlikely(usize == 0 || size > SC_LARGE_MAXCLASS)) { 1805b7eaed25SJason Evans return NULL; 1806b7eaed25SJason Evans } 1807d0e79aa3SJason Evans 1808*e1c167d0SJason Evans if (likely(usize <= SC_SMALL_MAXCLASS)) { 1809a4bd5210SJason Evans /* Try to avoid moving the allocation. */ 1810*e1c167d0SJason Evans UNUSED size_t newsize; 1811*e1c167d0SJason Evans if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero, 1812*e1c167d0SJason Evans &newsize)) { 1813*e1c167d0SJason Evans hook_invoke_expand(hook_args->is_realloc 1814*e1c167d0SJason Evans ? hook_expand_realloc : hook_expand_rallocx, 1815*e1c167d0SJason Evans ptr, oldsize, usize, (uintptr_t)ptr, 1816*e1c167d0SJason Evans hook_args->args); 1817b7eaed25SJason Evans return ptr; 1818b7eaed25SJason Evans } 1819b7eaed25SJason Evans } 1820b7eaed25SJason Evans 1821*e1c167d0SJason Evans if (oldsize >= SC_LARGE_MINCLASS 1822*e1c167d0SJason Evans && usize >= SC_LARGE_MINCLASS) { 1823*e1c167d0SJason Evans return large_ralloc(tsdn, arena, ptr, usize, 1824*e1c167d0SJason Evans alignment, zero, tcache, hook_args); 1825b7eaed25SJason Evans } 1826a4bd5210SJason Evans 1827a4bd5210SJason Evans /* 1828b7eaed25SJason Evans * size and oldsize are different enough that we need to move the 1829b7eaed25SJason Evans * object. In that case, fall back to allocating new space and copying. 1830a4bd5210SJason Evans */ 1831b7eaed25SJason Evans void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment, 1832b7eaed25SJason Evans zero, tcache); 1833b7eaed25SJason Evans if (ret == NULL) { 1834b7eaed25SJason Evans return NULL; 1835b7eaed25SJason Evans } 1836a4bd5210SJason Evans 1837*e1c167d0SJason Evans hook_invoke_alloc(hook_args->is_realloc 1838*e1c167d0SJason Evans ? hook_alloc_realloc : hook_alloc_rallocx, ret, (uintptr_t)ret, 1839*e1c167d0SJason Evans hook_args->args); 1840*e1c167d0SJason Evans hook_invoke_dalloc(hook_args->is_realloc 1841*e1c167d0SJason Evans ? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args); 1842*e1c167d0SJason Evans 1843d0e79aa3SJason Evans /* 1844d0e79aa3SJason Evans * Junk/zero-filling were already done by 1845d0e79aa3SJason Evans * ipalloc()/arena_malloc(). 1846d0e79aa3SJason Evans */ 1847b7eaed25SJason Evans size_t copysize = (usize < oldsize) ? usize : oldsize; 1848a4bd5210SJason Evans memcpy(ret, ptr, copysize); 1849b7eaed25SJason Evans isdalloct(tsdn, ptr, oldsize, tcache, NULL, true); 1850b7eaed25SJason Evans return ret; 1851a4bd5210SJason Evans } 1852a4bd5210SJason Evans 185382872ac0SJason Evans dss_prec_t 1854b7eaed25SJason Evans arena_dss_prec_get(arena_t *arena) { 1855b7eaed25SJason Evans return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE); 185682872ac0SJason Evans } 185782872ac0SJason Evans 1858d0e79aa3SJason Evans bool 1859b7eaed25SJason Evans arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) { 1860b7eaed25SJason Evans if (!have_dss) { 1861d0e79aa3SJason Evans return (dss_prec != dss_prec_disabled); 1862b7eaed25SJason Evans } 1863b7eaed25SJason Evans atomic_store_u(&arena->dss_prec, (unsigned)dss_prec, ATOMIC_RELEASE); 1864b7eaed25SJason Evans return false; 1865d0e79aa3SJason Evans } 1866d0e79aa3SJason Evans 1867d0e79aa3SJason Evans ssize_t 1868b7eaed25SJason Evans arena_dirty_decay_ms_default_get(void) { 1869b7eaed25SJason Evans return atomic_load_zd(&dirty_decay_ms_default, ATOMIC_RELAXED); 1870d0e79aa3SJason Evans } 1871d0e79aa3SJason Evans 1872d0e79aa3SJason Evans bool 1873b7eaed25SJason Evans arena_dirty_decay_ms_default_set(ssize_t decay_ms) { 1874b7eaed25SJason Evans if (!arena_decay_ms_valid(decay_ms)) { 1875b7eaed25SJason Evans return true; 1876b7eaed25SJason Evans } 1877b7eaed25SJason Evans atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED); 1878b7eaed25SJason Evans return false; 187982872ac0SJason Evans } 188082872ac0SJason Evans 1881df0d881dSJason Evans ssize_t 1882b7eaed25SJason Evans arena_muzzy_decay_ms_default_get(void) { 1883b7eaed25SJason Evans return atomic_load_zd(&muzzy_decay_ms_default, ATOMIC_RELAXED); 1884df0d881dSJason Evans } 1885df0d881dSJason Evans 1886df0d881dSJason Evans bool 1887b7eaed25SJason Evans arena_muzzy_decay_ms_default_set(ssize_t decay_ms) { 1888b7eaed25SJason Evans if (!arena_decay_ms_valid(decay_ms)) { 1889b7eaed25SJason Evans return true; 1890df0d881dSJason Evans } 1891b7eaed25SJason Evans atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED); 1892b7eaed25SJason Evans return false; 189382872ac0SJason Evans } 189482872ac0SJason Evans 18950ef50b4eSJason Evans bool 18960ef50b4eSJason Evans arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit, 18970ef50b4eSJason Evans size_t *new_limit) { 18980ef50b4eSJason Evans assert(opt_retain); 18990ef50b4eSJason Evans 19000ef50b4eSJason Evans pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0); 19010ef50b4eSJason Evans if (new_limit != NULL) { 19020ef50b4eSJason Evans size_t limit = *new_limit; 19030ef50b4eSJason Evans /* Grow no more than the new limit. */ 1904*e1c167d0SJason Evans if ((new_ind = sz_psz2ind(limit + 1) - 1) >= SC_NPSIZES) { 19050ef50b4eSJason Evans return true; 19060ef50b4eSJason Evans } 19070ef50b4eSJason Evans } 19080ef50b4eSJason Evans 19090ef50b4eSJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &arena->extent_grow_mtx); 19100ef50b4eSJason Evans if (old_limit != NULL) { 19110ef50b4eSJason Evans *old_limit = sz_pind2sz(arena->retain_grow_limit); 19120ef50b4eSJason Evans } 19130ef50b4eSJason Evans if (new_limit != NULL) { 19140ef50b4eSJason Evans arena->retain_grow_limit = new_ind; 19150ef50b4eSJason Evans } 19160ef50b4eSJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &arena->extent_grow_mtx); 19170ef50b4eSJason Evans 19180ef50b4eSJason Evans return false; 19190ef50b4eSJason Evans } 19200ef50b4eSJason Evans 1921df0d881dSJason Evans unsigned 1922b7eaed25SJason Evans arena_nthreads_get(arena_t *arena, bool internal) { 1923b7eaed25SJason Evans return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXED); 1924df0d881dSJason Evans } 1925df0d881dSJason Evans 1926df0d881dSJason Evans void 1927b7eaed25SJason Evans arena_nthreads_inc(arena_t *arena, bool internal) { 1928b7eaed25SJason Evans atomic_fetch_add_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED); 1929df0d881dSJason Evans } 1930df0d881dSJason Evans 1931df0d881dSJason Evans void 1932b7eaed25SJason Evans arena_nthreads_dec(arena_t *arena, bool internal) { 1933b7eaed25SJason Evans atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED); 1934df0d881dSJason Evans } 1935df0d881dSJason Evans 19367fa7f12fSJason Evans size_t 1937b7eaed25SJason Evans arena_extent_sn_next(arena_t *arena) { 1938b7eaed25SJason Evans return atomic_fetch_add_zu(&arena->extent_sn_next, 1, ATOMIC_RELAXED); 19397fa7f12fSJason Evans } 19407fa7f12fSJason Evans 1941d0e79aa3SJason Evans arena_t * 1942b7eaed25SJason Evans arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { 1943d0e79aa3SJason Evans arena_t *arena; 1944b7eaed25SJason Evans base_t *base; 1945a4bd5210SJason Evans unsigned i; 1946a4bd5210SJason Evans 1947b7eaed25SJason Evans if (ind == 0) { 1948b7eaed25SJason Evans base = b0get(); 1949b7eaed25SJason Evans } else { 1950b7eaed25SJason Evans base = base_new(tsdn, ind, extent_hooks); 1951b7eaed25SJason Evans if (base == NULL) { 1952b7eaed25SJason Evans return NULL; 1953b7eaed25SJason Evans } 1954a4bd5210SJason Evans } 1955a4bd5210SJason Evans 1956*e1c167d0SJason Evans unsigned nbins_total = 0; 1957*e1c167d0SJason Evans for (i = 0; i < SC_NBINS; i++) { 1958*e1c167d0SJason Evans nbins_total += bin_infos[i].n_shards; 1959*e1c167d0SJason Evans } 1960*e1c167d0SJason Evans size_t arena_size = sizeof(arena_t) + sizeof(bin_t) * nbins_total; 1961*e1c167d0SJason Evans arena = (arena_t *)base_alloc(tsdn, base, arena_size, CACHELINE); 1962b7eaed25SJason Evans if (arena == NULL) { 1963b7eaed25SJason Evans goto label_error; 1964b7eaed25SJason Evans } 1965b7eaed25SJason Evans 1966b7eaed25SJason Evans atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED); 1967b7eaed25SJason Evans atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED); 1968b7eaed25SJason Evans arena->last_thd = NULL; 1969b7eaed25SJason Evans 1970b7eaed25SJason Evans if (config_stats) { 1971b7eaed25SJason Evans if (arena_stats_init(tsdn, &arena->stats)) { 1972b7eaed25SJason Evans goto label_error; 1973b7eaed25SJason Evans } 1974b7eaed25SJason Evans 1975b7eaed25SJason Evans ql_new(&arena->tcache_ql); 19760ef50b4eSJason Evans ql_new(&arena->cache_bin_array_descriptor_ql); 1977b7eaed25SJason Evans if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql", 1978b7eaed25SJason Evans WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) { 1979b7eaed25SJason Evans goto label_error; 1980b7eaed25SJason Evans } 1981b7eaed25SJason Evans } 1982b7eaed25SJason Evans 1983b7eaed25SJason Evans if (config_prof) { 1984b7eaed25SJason Evans if (prof_accum_init(tsdn, &arena->prof_accum)) { 1985b7eaed25SJason Evans goto label_error; 1986b7eaed25SJason Evans } 1987b7eaed25SJason Evans } 1988a4bd5210SJason Evans 1989d0e79aa3SJason Evans if (config_cache_oblivious) { 1990d0e79aa3SJason Evans /* 1991d0e79aa3SJason Evans * A nondeterministic seed based on the address of arena reduces 1992d0e79aa3SJason Evans * the likelihood of lockstep non-uniform cache index 1993d0e79aa3SJason Evans * utilization among identical concurrent processes, but at the 1994d0e79aa3SJason Evans * cost of test repeatability. For debug builds, instead use a 1995d0e79aa3SJason Evans * deterministic seed. 1996d0e79aa3SJason Evans */ 1997b7eaed25SJason Evans atomic_store_zu(&arena->offset_state, config_debug ? ind : 1998b7eaed25SJason Evans (size_t)(uintptr_t)arena, ATOMIC_RELAXED); 1999d0e79aa3SJason Evans } 2000d0e79aa3SJason Evans 2001b7eaed25SJason Evans atomic_store_zu(&arena->extent_sn_next, 0, ATOMIC_RELAXED); 20021f0a49e8SJason Evans 2003b7eaed25SJason Evans atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(), 2004b7eaed25SJason Evans ATOMIC_RELAXED); 200582872ac0SJason Evans 2006b7eaed25SJason Evans atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED); 20077fa7f12fSJason Evans 2008b7eaed25SJason Evans extent_list_init(&arena->large); 2009b7eaed25SJason Evans if (malloc_mutex_init(&arena->large_mtx, "arena_large", 2010b7eaed25SJason Evans WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) { 2011b7eaed25SJason Evans goto label_error; 2012b7eaed25SJason Evans } 2013a4bd5210SJason Evans 2014b7eaed25SJason Evans /* 2015b7eaed25SJason Evans * Delay coalescing for dirty extents despite the disruptive effect on 2016b7eaed25SJason Evans * memory layout for best-fit extent allocation, since cached extents 2017b7eaed25SJason Evans * are likely to be reused soon after deallocation, and the cost of 2018b7eaed25SJason Evans * merging/splitting extents is non-trivial. 2019b7eaed25SJason Evans */ 2020b7eaed25SJason Evans if (extents_init(tsdn, &arena->extents_dirty, extent_state_dirty, 2021b7eaed25SJason Evans true)) { 2022b7eaed25SJason Evans goto label_error; 2023b7eaed25SJason Evans } 2024b7eaed25SJason Evans /* 2025b7eaed25SJason Evans * Coalesce muzzy extents immediately, because operations on them are in 2026b7eaed25SJason Evans * the critical path much less often than for dirty extents. 2027b7eaed25SJason Evans */ 2028b7eaed25SJason Evans if (extents_init(tsdn, &arena->extents_muzzy, extent_state_muzzy, 2029b7eaed25SJason Evans false)) { 2030b7eaed25SJason Evans goto label_error; 2031b7eaed25SJason Evans } 2032b7eaed25SJason Evans /* 2033b7eaed25SJason Evans * Coalesce retained extents immediately, in part because they will 2034b7eaed25SJason Evans * never be evicted (and therefore there's no opportunity for delayed 2035b7eaed25SJason Evans * coalescing), but also because operations on retained extents are not 2036b7eaed25SJason Evans * in the critical path. 2037b7eaed25SJason Evans */ 2038b7eaed25SJason Evans if (extents_init(tsdn, &arena->extents_retained, extent_state_retained, 2039b7eaed25SJason Evans false)) { 2040b7eaed25SJason Evans goto label_error; 2041b7eaed25SJason Evans } 2042a4bd5210SJason Evans 20430ef50b4eSJason Evans if (arena_decay_init(&arena->decay_dirty, 2044b7eaed25SJason Evans arena_dirty_decay_ms_default_get(), &arena->stats.decay_dirty)) { 2045b7eaed25SJason Evans goto label_error; 2046b7eaed25SJason Evans } 20470ef50b4eSJason Evans if (arena_decay_init(&arena->decay_muzzy, 2048b7eaed25SJason Evans arena_muzzy_decay_ms_default_get(), &arena->stats.decay_muzzy)) { 2049b7eaed25SJason Evans goto label_error; 2050b7eaed25SJason Evans } 2051bde95144SJason Evans 2052b7eaed25SJason Evans arena->extent_grow_next = sz_psz2ind(HUGEPAGE); 2053*e1c167d0SJason Evans arena->retain_grow_limit = sz_psz2ind(SC_LARGE_MAXCLASS); 2054b7eaed25SJason Evans if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow", 2055b7eaed25SJason Evans WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) { 2056b7eaed25SJason Evans goto label_error; 2057b7eaed25SJason Evans } 2058d0e79aa3SJason Evans 2059b7eaed25SJason Evans extent_avail_new(&arena->extent_avail); 2060b7eaed25SJason Evans if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail", 2061b7eaed25SJason Evans WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) { 2062b7eaed25SJason Evans goto label_error; 2063b7eaed25SJason Evans } 2064a4bd5210SJason Evans 2065a4bd5210SJason Evans /* Initialize bins. */ 2066*e1c167d0SJason Evans uintptr_t bin_addr = (uintptr_t)arena + sizeof(arena_t); 2067*e1c167d0SJason Evans atomic_store_u(&arena->binshard_next, 0, ATOMIC_RELEASE); 2068*e1c167d0SJason Evans for (i = 0; i < SC_NBINS; i++) { 2069*e1c167d0SJason Evans unsigned nshards = bin_infos[i].n_shards; 2070*e1c167d0SJason Evans arena->bins[i].bin_shards = (bin_t *)bin_addr; 2071*e1c167d0SJason Evans bin_addr += nshards * sizeof(bin_t); 2072*e1c167d0SJason Evans for (unsigned j = 0; j < nshards; j++) { 2073*e1c167d0SJason Evans bool err = bin_init(&arena->bins[i].bin_shards[j]); 20740ef50b4eSJason Evans if (err) { 2075b7eaed25SJason Evans goto label_error; 2076b7eaed25SJason Evans } 2077a4bd5210SJason Evans } 2078*e1c167d0SJason Evans } 2079*e1c167d0SJason Evans assert(bin_addr == (uintptr_t)arena + arena_size); 2080a4bd5210SJason Evans 2081b7eaed25SJason Evans arena->base = base; 2082b7eaed25SJason Evans /* Set arena before creating background threads. */ 2083b7eaed25SJason Evans arena_set(ind, arena); 2084b7eaed25SJason Evans 2085b7eaed25SJason Evans nstime_init(&arena->create_time, 0); 2086b7eaed25SJason Evans nstime_update(&arena->create_time); 2087b7eaed25SJason Evans 2088b7eaed25SJason Evans /* We don't support reentrancy for arena 0 bootstrapping. */ 2089b7eaed25SJason Evans if (ind != 0) { 2090a4bd5210SJason Evans /* 2091b7eaed25SJason Evans * If we're here, then arena 0 already exists, so bootstrapping 2092b7eaed25SJason Evans * is done enough that we should have tsd. 2093a4bd5210SJason Evans */ 2094b7eaed25SJason Evans assert(!tsdn_null(tsdn)); 20958b2f5aafSJason Evans pre_reentrancy(tsdn_tsd(tsdn), arena); 2096*e1c167d0SJason Evans if (test_hooks_arena_new_hook) { 2097*e1c167d0SJason Evans test_hooks_arena_new_hook(); 2098a4bd5210SJason Evans } 2099b7eaed25SJason Evans post_reentrancy(tsdn_tsd(tsdn)); 2100d0e79aa3SJason Evans } 2101d0e79aa3SJason Evans 2102b7eaed25SJason Evans return arena; 21038244f2aaSJason Evans label_error: 2104b7eaed25SJason Evans if (ind != 0) { 21058b2f5aafSJason Evans base_delete(tsdn, base); 2106b7eaed25SJason Evans } 2107b7eaed25SJason Evans return NULL; 21088244f2aaSJason Evans } 21098244f2aaSJason Evans 2110*e1c167d0SJason Evans arena_t * 2111*e1c167d0SJason Evans arena_choose_huge(tsd_t *tsd) { 2112*e1c167d0SJason Evans /* huge_arena_ind can be 0 during init (will use a0). */ 2113*e1c167d0SJason Evans if (huge_arena_ind == 0) { 2114*e1c167d0SJason Evans assert(!malloc_initialized()); 2115*e1c167d0SJason Evans } 2116*e1c167d0SJason Evans 2117*e1c167d0SJason Evans arena_t *huge_arena = arena_get(tsd_tsdn(tsd), huge_arena_ind, false); 2118*e1c167d0SJason Evans if (huge_arena == NULL) { 2119*e1c167d0SJason Evans /* Create the huge arena on demand. */ 2120*e1c167d0SJason Evans assert(huge_arena_ind != 0); 2121*e1c167d0SJason Evans huge_arena = arena_get(tsd_tsdn(tsd), huge_arena_ind, true); 2122*e1c167d0SJason Evans if (huge_arena == NULL) { 2123*e1c167d0SJason Evans return NULL; 2124*e1c167d0SJason Evans } 2125*e1c167d0SJason Evans /* 2126*e1c167d0SJason Evans * Purge eagerly for huge allocations, because: 1) number of 2127*e1c167d0SJason Evans * huge allocations is usually small, which means ticker based 2128*e1c167d0SJason Evans * decay is not reliable; and 2) less immediate reuse is 2129*e1c167d0SJason Evans * expected for huge allocations. 2130*e1c167d0SJason Evans */ 2131*e1c167d0SJason Evans if (arena_dirty_decay_ms_default_get() > 0) { 2132*e1c167d0SJason Evans arena_dirty_decay_ms_set(tsd_tsdn(tsd), huge_arena, 0); 2133*e1c167d0SJason Evans } 2134*e1c167d0SJason Evans if (arena_muzzy_decay_ms_default_get() > 0) { 2135*e1c167d0SJason Evans arena_muzzy_decay_ms_set(tsd_tsdn(tsd), huge_arena, 0); 2136*e1c167d0SJason Evans } 2137*e1c167d0SJason Evans } 2138*e1c167d0SJason Evans 2139*e1c167d0SJason Evans return huge_arena; 2140*e1c167d0SJason Evans } 2141*e1c167d0SJason Evans 2142*e1c167d0SJason Evans bool 2143*e1c167d0SJason Evans arena_init_huge(void) { 2144*e1c167d0SJason Evans bool huge_enabled; 2145*e1c167d0SJason Evans 2146*e1c167d0SJason Evans /* The threshold should be large size class. */ 2147*e1c167d0SJason Evans if (opt_oversize_threshold > SC_LARGE_MAXCLASS || 2148*e1c167d0SJason Evans opt_oversize_threshold < SC_LARGE_MINCLASS) { 2149*e1c167d0SJason Evans opt_oversize_threshold = 0; 2150*e1c167d0SJason Evans oversize_threshold = SC_LARGE_MAXCLASS + PAGE; 2151*e1c167d0SJason Evans huge_enabled = false; 2152*e1c167d0SJason Evans } else { 2153*e1c167d0SJason Evans /* Reserve the index for the huge arena. */ 2154*e1c167d0SJason Evans huge_arena_ind = narenas_total_get(); 2155*e1c167d0SJason Evans oversize_threshold = opt_oversize_threshold; 2156*e1c167d0SJason Evans huge_enabled = true; 2157*e1c167d0SJason Evans } 2158*e1c167d0SJason Evans 2159*e1c167d0SJason Evans return huge_enabled; 2160*e1c167d0SJason Evans } 2161*e1c167d0SJason Evans 2162*e1c167d0SJason Evans bool 2163*e1c167d0SJason Evans arena_is_huge(unsigned arena_ind) { 2164*e1c167d0SJason Evans if (huge_arena_ind == 0) { 2165*e1c167d0SJason Evans return false; 2166*e1c167d0SJason Evans } 2167*e1c167d0SJason Evans return (arena_ind == huge_arena_ind); 2168*e1c167d0SJason Evans } 2169*e1c167d0SJason Evans 2170bde95144SJason Evans void 2171*e1c167d0SJason Evans arena_boot(sc_data_t *sc_data) { 2172b7eaed25SJason Evans arena_dirty_decay_ms_default_set(opt_dirty_decay_ms); 2173b7eaed25SJason Evans arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms); 2174*e1c167d0SJason Evans for (unsigned i = 0; i < SC_NBINS; i++) { 2175*e1c167d0SJason Evans sc_t *sc = &sc_data->sc[i]; 2176*e1c167d0SJason Evans div_init(&arena_binind_div_info[i], 2177*e1c167d0SJason Evans (1U << sc->lg_base) + (sc->ndelta << sc->lg_delta)); 2178*e1c167d0SJason Evans } 2179a4bd5210SJason Evans } 2180a4bd5210SJason Evans 2181a4bd5210SJason Evans void 2182b7eaed25SJason Evans arena_prefork0(tsdn_t *tsdn, arena_t *arena) { 2183b7eaed25SJason Evans malloc_mutex_prefork(tsdn, &arena->decay_dirty.mtx); 2184b7eaed25SJason Evans malloc_mutex_prefork(tsdn, &arena->decay_muzzy.mtx); 2185a4bd5210SJason Evans } 2186a4bd5210SJason Evans 2187a4bd5210SJason Evans void 2188b7eaed25SJason Evans arena_prefork1(tsdn_t *tsdn, arena_t *arena) { 2189b7eaed25SJason Evans if (config_stats) { 2190b7eaed25SJason Evans malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx); 2191b7eaed25SJason Evans } 2192a4bd5210SJason Evans } 2193a4bd5210SJason Evans 2194a4bd5210SJason Evans void 2195b7eaed25SJason Evans arena_prefork2(tsdn_t *tsdn, arena_t *arena) { 21968b2f5aafSJason Evans malloc_mutex_prefork(tsdn, &arena->extent_grow_mtx); 21978b2f5aafSJason Evans } 21988b2f5aafSJason Evans 21998b2f5aafSJason Evans void 22008b2f5aafSJason Evans arena_prefork3(tsdn_t *tsdn, arena_t *arena) { 2201b7eaed25SJason Evans extents_prefork(tsdn, &arena->extents_dirty); 2202b7eaed25SJason Evans extents_prefork(tsdn, &arena->extents_muzzy); 2203b7eaed25SJason Evans extents_prefork(tsdn, &arena->extents_retained); 22041f0a49e8SJason Evans } 22051f0a49e8SJason Evans 22061f0a49e8SJason Evans void 22078b2f5aafSJason Evans arena_prefork4(tsdn_t *tsdn, arena_t *arena) { 2208b7eaed25SJason Evans malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx); 2209b7eaed25SJason Evans } 2210a4bd5210SJason Evans 2211b7eaed25SJason Evans void 22128b2f5aafSJason Evans arena_prefork5(tsdn_t *tsdn, arena_t *arena) { 2213b7eaed25SJason Evans base_prefork(tsdn, arena->base); 2214b7eaed25SJason Evans } 2215b7eaed25SJason Evans 2216b7eaed25SJason Evans void 22178b2f5aafSJason Evans arena_prefork6(tsdn_t *tsdn, arena_t *arena) { 2218b7eaed25SJason Evans malloc_mutex_prefork(tsdn, &arena->large_mtx); 2219b7eaed25SJason Evans } 2220b7eaed25SJason Evans 2221b7eaed25SJason Evans void 22228b2f5aafSJason Evans arena_prefork7(tsdn_t *tsdn, arena_t *arena) { 2223*e1c167d0SJason Evans for (unsigned i = 0; i < SC_NBINS; i++) { 2224*e1c167d0SJason Evans for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { 2225*e1c167d0SJason Evans bin_prefork(tsdn, &arena->bins[i].bin_shards[j]); 2226*e1c167d0SJason Evans } 2227b7eaed25SJason Evans } 22281f0a49e8SJason Evans } 22291f0a49e8SJason Evans 22301f0a49e8SJason Evans void 2231b7eaed25SJason Evans arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) { 22321f0a49e8SJason Evans unsigned i; 22331f0a49e8SJason Evans 2234*e1c167d0SJason Evans for (i = 0; i < SC_NBINS; i++) { 2235*e1c167d0SJason Evans for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { 2236*e1c167d0SJason Evans bin_postfork_parent(tsdn, 2237*e1c167d0SJason Evans &arena->bins[i].bin_shards[j]); 2238*e1c167d0SJason Evans } 2239b7eaed25SJason Evans } 2240b7eaed25SJason Evans malloc_mutex_postfork_parent(tsdn, &arena->large_mtx); 2241b7eaed25SJason Evans base_postfork_parent(tsdn, arena->base); 2242b7eaed25SJason Evans malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx); 2243b7eaed25SJason Evans extents_postfork_parent(tsdn, &arena->extents_dirty); 2244b7eaed25SJason Evans extents_postfork_parent(tsdn, &arena->extents_muzzy); 2245b7eaed25SJason Evans extents_postfork_parent(tsdn, &arena->extents_retained); 22468b2f5aafSJason Evans malloc_mutex_postfork_parent(tsdn, &arena->extent_grow_mtx); 2247b7eaed25SJason Evans malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx); 2248b7eaed25SJason Evans malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx); 2249b7eaed25SJason Evans if (config_stats) { 2250b7eaed25SJason Evans malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx); 2251b7eaed25SJason Evans } 22521f0a49e8SJason Evans } 22531f0a49e8SJason Evans 22541f0a49e8SJason Evans void 2255b7eaed25SJason Evans arena_postfork_child(tsdn_t *tsdn, arena_t *arena) { 22561f0a49e8SJason Evans unsigned i; 22571f0a49e8SJason Evans 22588b2f5aafSJason Evans atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED); 22598b2f5aafSJason Evans atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED); 22608b2f5aafSJason Evans if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) { 22618b2f5aafSJason Evans arena_nthreads_inc(arena, false); 22628b2f5aafSJason Evans } 22638b2f5aafSJason Evans if (tsd_iarena_get(tsdn_tsd(tsdn)) == arena) { 22648b2f5aafSJason Evans arena_nthreads_inc(arena, true); 22658b2f5aafSJason Evans } 22668b2f5aafSJason Evans if (config_stats) { 22678b2f5aafSJason Evans ql_new(&arena->tcache_ql); 22680ef50b4eSJason Evans ql_new(&arena->cache_bin_array_descriptor_ql); 22698b2f5aafSJason Evans tcache_t *tcache = tcache_get(tsdn_tsd(tsdn)); 22708b2f5aafSJason Evans if (tcache != NULL && tcache->arena == arena) { 22718b2f5aafSJason Evans ql_elm_new(tcache, link); 22728b2f5aafSJason Evans ql_tail_insert(&arena->tcache_ql, tcache, link); 22730ef50b4eSJason Evans cache_bin_array_descriptor_init( 22740ef50b4eSJason Evans &tcache->cache_bin_array_descriptor, 22750ef50b4eSJason Evans tcache->bins_small, tcache->bins_large); 22760ef50b4eSJason Evans ql_tail_insert(&arena->cache_bin_array_descriptor_ql, 22770ef50b4eSJason Evans &tcache->cache_bin_array_descriptor, link); 22788b2f5aafSJason Evans } 22798b2f5aafSJason Evans } 22808b2f5aafSJason Evans 2281*e1c167d0SJason Evans for (i = 0; i < SC_NBINS; i++) { 2282*e1c167d0SJason Evans for (unsigned j = 0; j < bin_infos[i].n_shards; j++) { 2283*e1c167d0SJason Evans bin_postfork_child(tsdn, &arena->bins[i].bin_shards[j]); 2284*e1c167d0SJason Evans } 2285b7eaed25SJason Evans } 2286b7eaed25SJason Evans malloc_mutex_postfork_child(tsdn, &arena->large_mtx); 2287b7eaed25SJason Evans base_postfork_child(tsdn, arena->base); 2288b7eaed25SJason Evans malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx); 2289b7eaed25SJason Evans extents_postfork_child(tsdn, &arena->extents_dirty); 2290b7eaed25SJason Evans extents_postfork_child(tsdn, &arena->extents_muzzy); 2291b7eaed25SJason Evans extents_postfork_child(tsdn, &arena->extents_retained); 22928b2f5aafSJason Evans malloc_mutex_postfork_child(tsdn, &arena->extent_grow_mtx); 2293b7eaed25SJason Evans malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx); 2294b7eaed25SJason Evans malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx); 2295b7eaed25SJason Evans if (config_stats) { 2296b7eaed25SJason Evans malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx); 2297b7eaed25SJason Evans } 2298a4bd5210SJason Evans } 2299