1a4bd5210SJason Evans #define JEMALLOC_ARENA_C_ 2b7eaed25SJason Evans #include "jemalloc/internal/jemalloc_preamble.h" 3b7eaed25SJason Evans #include "jemalloc/internal/jemalloc_internal_includes.h" 4b7eaed25SJason Evans 5b7eaed25SJason Evans #include "jemalloc/internal/assert.h" 6*0ef50b4eSJason Evans #include "jemalloc/internal/div.h" 7b7eaed25SJason Evans #include "jemalloc/internal/extent_dss.h" 8b7eaed25SJason Evans #include "jemalloc/internal/extent_mmap.h" 9b7eaed25SJason Evans #include "jemalloc/internal/mutex.h" 10b7eaed25SJason Evans #include "jemalloc/internal/rtree.h" 11b7eaed25SJason Evans #include "jemalloc/internal/size_classes.h" 12b7eaed25SJason Evans #include "jemalloc/internal/util.h" 13a4bd5210SJason Evans 14a4bd5210SJason Evans /******************************************************************************/ 15a4bd5210SJason Evans /* Data. */ 16a4bd5210SJason Evans 17b7eaed25SJason Evans /* 18b7eaed25SJason Evans * Define names for both unininitialized and initialized phases, so that 19b7eaed25SJason Evans * options and mallctl processing are straightforward. 20b7eaed25SJason Evans */ 21b7eaed25SJason Evans const char *percpu_arena_mode_names[] = { 22b7eaed25SJason Evans "percpu", 23b7eaed25SJason Evans "phycpu", 24b7eaed25SJason Evans "disabled", 25b7eaed25SJason Evans "percpu", 26b7eaed25SJason Evans "phycpu" 27df0d881dSJason Evans }; 28b7eaed25SJason Evans percpu_arena_mode_t opt_percpu_arena = PERCPU_ARENA_DEFAULT; 29df0d881dSJason Evans 30b7eaed25SJason Evans ssize_t opt_dirty_decay_ms = DIRTY_DECAY_MS_DEFAULT; 31b7eaed25SJason Evans ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT; 32a4bd5210SJason Evans 33b7eaed25SJason Evans static atomic_zd_t dirty_decay_ms_default; 34b7eaed25SJason Evans static atomic_zd_t muzzy_decay_ms_default; 35b7eaed25SJason Evans 36b7eaed25SJason Evans const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = { 37b7eaed25SJason Evans #define STEP(step, h, x, y) \ 38b7eaed25SJason Evans h, 39b7eaed25SJason Evans SMOOTHSTEP 40b7eaed25SJason Evans #undef STEP 41b7eaed25SJason Evans }; 42a4bd5210SJason Evans 43*0ef50b4eSJason Evans static div_info_t arena_binind_div_info[NBINS]; 44*0ef50b4eSJason Evans 45a4bd5210SJason Evans /******************************************************************************/ 46f921d10fSJason Evans /* 47f921d10fSJason Evans * Function prototypes for static functions that are referenced prior to 48f921d10fSJason Evans * definition. 49f921d10fSJason Evans */ 50a4bd5210SJason Evans 51b7eaed25SJason Evans static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, 528b2f5aafSJason Evans arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit, 53*0ef50b4eSJason Evans size_t npages_decay_max, bool is_background_thread); 54b7eaed25SJason Evans static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, 55b7eaed25SJason Evans bool is_background_thread, bool all); 56b7eaed25SJason Evans static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, 57*0ef50b4eSJason Evans bin_t *bin); 58b7eaed25SJason Evans static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, 59*0ef50b4eSJason Evans bin_t *bin); 60a4bd5210SJason Evans 61a4bd5210SJason Evans /******************************************************************************/ 62a4bd5210SJason Evans 63d0e79aa3SJason Evans void 64*0ef50b4eSJason Evans arena_basic_stats_merge(UNUSED tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, 65b7eaed25SJason Evans const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, 66b7eaed25SJason Evans size_t *nactive, size_t *ndirty, size_t *nmuzzy) { 67b7eaed25SJason Evans *nthreads += arena_nthreads_get(arena, false); 68b7eaed25SJason Evans *dss = dss_prec_names[arena_dss_prec_get(arena)]; 69b7eaed25SJason Evans *dirty_decay_ms = arena_dirty_decay_ms_get(arena); 70b7eaed25SJason Evans *muzzy_decay_ms = arena_muzzy_decay_ms_get(arena); 71b7eaed25SJason Evans *nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED); 72b7eaed25SJason Evans *ndirty += extents_npages_get(&arena->extents_dirty); 73b7eaed25SJason Evans *nmuzzy += extents_npages_get(&arena->extents_muzzy); 74b7eaed25SJason Evans } 75b7eaed25SJason Evans 76b7eaed25SJason Evans void 77b7eaed25SJason Evans arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, 78b7eaed25SJason Evans const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, 79b7eaed25SJason Evans size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats, 80*0ef50b4eSJason Evans bin_stats_t *bstats, arena_stats_large_t *lstats) { 81b7eaed25SJason Evans cassert(config_stats); 82b7eaed25SJason Evans 83b7eaed25SJason Evans arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms, 84b7eaed25SJason Evans muzzy_decay_ms, nactive, ndirty, nmuzzy); 85b7eaed25SJason Evans 86*0ef50b4eSJason Evans size_t base_allocated, base_resident, base_mapped, metadata_thp; 87b7eaed25SJason Evans base_stats_get(tsdn, arena->base, &base_allocated, &base_resident, 88*0ef50b4eSJason Evans &base_mapped, &metadata_thp); 89b7eaed25SJason Evans 90b7eaed25SJason Evans arena_stats_lock(tsdn, &arena->stats); 91b7eaed25SJason Evans 92b7eaed25SJason Evans arena_stats_accum_zu(&astats->mapped, base_mapped 93b7eaed25SJason Evans + arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped)); 94b7eaed25SJason Evans arena_stats_accum_zu(&astats->retained, 95b7eaed25SJason Evans extents_npages_get(&arena->extents_retained) << LG_PAGE); 96b7eaed25SJason Evans 97b7eaed25SJason Evans arena_stats_accum_u64(&astats->decay_dirty.npurge, 98b7eaed25SJason Evans arena_stats_read_u64(tsdn, &arena->stats, 99b7eaed25SJason Evans &arena->stats.decay_dirty.npurge)); 100b7eaed25SJason Evans arena_stats_accum_u64(&astats->decay_dirty.nmadvise, 101b7eaed25SJason Evans arena_stats_read_u64(tsdn, &arena->stats, 102b7eaed25SJason Evans &arena->stats.decay_dirty.nmadvise)); 103b7eaed25SJason Evans arena_stats_accum_u64(&astats->decay_dirty.purged, 104b7eaed25SJason Evans arena_stats_read_u64(tsdn, &arena->stats, 105b7eaed25SJason Evans &arena->stats.decay_dirty.purged)); 106b7eaed25SJason Evans 107b7eaed25SJason Evans arena_stats_accum_u64(&astats->decay_muzzy.npurge, 108b7eaed25SJason Evans arena_stats_read_u64(tsdn, &arena->stats, 109b7eaed25SJason Evans &arena->stats.decay_muzzy.npurge)); 110b7eaed25SJason Evans arena_stats_accum_u64(&astats->decay_muzzy.nmadvise, 111b7eaed25SJason Evans arena_stats_read_u64(tsdn, &arena->stats, 112b7eaed25SJason Evans &arena->stats.decay_muzzy.nmadvise)); 113b7eaed25SJason Evans arena_stats_accum_u64(&astats->decay_muzzy.purged, 114b7eaed25SJason Evans arena_stats_read_u64(tsdn, &arena->stats, 115b7eaed25SJason Evans &arena->stats.decay_muzzy.purged)); 116b7eaed25SJason Evans 117b7eaed25SJason Evans arena_stats_accum_zu(&astats->base, base_allocated); 118b7eaed25SJason Evans arena_stats_accum_zu(&astats->internal, arena_internal_get(arena)); 119*0ef50b4eSJason Evans arena_stats_accum_zu(&astats->metadata_thp, metadata_thp); 120b7eaed25SJason Evans arena_stats_accum_zu(&astats->resident, base_resident + 121b7eaed25SJason Evans (((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) + 122b7eaed25SJason Evans extents_npages_get(&arena->extents_dirty) + 123b7eaed25SJason Evans extents_npages_get(&arena->extents_muzzy)) << LG_PAGE))); 124b7eaed25SJason Evans 125b7eaed25SJason Evans for (szind_t i = 0; i < NSIZES - NBINS; i++) { 126b7eaed25SJason Evans uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats, 127b7eaed25SJason Evans &arena->stats.lstats[i].nmalloc); 128b7eaed25SJason Evans arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc); 129b7eaed25SJason Evans arena_stats_accum_u64(&astats->nmalloc_large, nmalloc); 130b7eaed25SJason Evans 131b7eaed25SJason Evans uint64_t ndalloc = arena_stats_read_u64(tsdn, &arena->stats, 132b7eaed25SJason Evans &arena->stats.lstats[i].ndalloc); 133b7eaed25SJason Evans arena_stats_accum_u64(&lstats[i].ndalloc, ndalloc); 134b7eaed25SJason Evans arena_stats_accum_u64(&astats->ndalloc_large, ndalloc); 135b7eaed25SJason Evans 136b7eaed25SJason Evans uint64_t nrequests = arena_stats_read_u64(tsdn, &arena->stats, 137b7eaed25SJason Evans &arena->stats.lstats[i].nrequests); 138b7eaed25SJason Evans arena_stats_accum_u64(&lstats[i].nrequests, 139b7eaed25SJason Evans nmalloc + nrequests); 140b7eaed25SJason Evans arena_stats_accum_u64(&astats->nrequests_large, 141b7eaed25SJason Evans nmalloc + nrequests); 142b7eaed25SJason Evans 143b7eaed25SJason Evans assert(nmalloc >= ndalloc); 144b7eaed25SJason Evans assert(nmalloc - ndalloc <= SIZE_T_MAX); 145b7eaed25SJason Evans size_t curlextents = (size_t)(nmalloc - ndalloc); 146b7eaed25SJason Evans lstats[i].curlextents += curlextents; 147b7eaed25SJason Evans arena_stats_accum_zu(&astats->allocated_large, 148b7eaed25SJason Evans curlextents * sz_index2size(NBINS + i)); 149b7eaed25SJason Evans } 150b7eaed25SJason Evans 151b7eaed25SJason Evans arena_stats_unlock(tsdn, &arena->stats); 152b7eaed25SJason Evans 153b7eaed25SJason Evans /* tcache_bytes counts currently cached bytes. */ 154b7eaed25SJason Evans atomic_store_zu(&astats->tcache_bytes, 0, ATOMIC_RELAXED); 155b7eaed25SJason Evans malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); 156*0ef50b4eSJason Evans cache_bin_array_descriptor_t *descriptor; 157*0ef50b4eSJason Evans ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) { 158b7eaed25SJason Evans szind_t i = 0; 159b7eaed25SJason Evans for (; i < NBINS; i++) { 160*0ef50b4eSJason Evans cache_bin_t *tbin = &descriptor->bins_small[i]; 161b7eaed25SJason Evans arena_stats_accum_zu(&astats->tcache_bytes, 162b7eaed25SJason Evans tbin->ncached * sz_index2size(i)); 163b7eaed25SJason Evans } 164b7eaed25SJason Evans for (; i < nhbins; i++) { 165*0ef50b4eSJason Evans cache_bin_t *tbin = &descriptor->bins_large[i]; 166b7eaed25SJason Evans arena_stats_accum_zu(&astats->tcache_bytes, 167b7eaed25SJason Evans tbin->ncached * sz_index2size(i)); 168b7eaed25SJason Evans } 169b7eaed25SJason Evans } 170b7eaed25SJason Evans malloc_mutex_prof_read(tsdn, 171b7eaed25SJason Evans &astats->mutex_prof_data[arena_prof_mutex_tcache_list], 172b7eaed25SJason Evans &arena->tcache_ql_mtx); 173b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); 174b7eaed25SJason Evans 175b7eaed25SJason Evans #define READ_ARENA_MUTEX_PROF_DATA(mtx, ind) \ 176b7eaed25SJason Evans malloc_mutex_lock(tsdn, &arena->mtx); \ 177b7eaed25SJason Evans malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind], \ 178b7eaed25SJason Evans &arena->mtx); \ 179b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &arena->mtx); 180b7eaed25SJason Evans 181b7eaed25SJason Evans /* Gather per arena mutex profiling data. */ 182b7eaed25SJason Evans READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large); 183b7eaed25SJason Evans READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx, 184b7eaed25SJason Evans arena_prof_mutex_extent_avail) 185b7eaed25SJason Evans READ_ARENA_MUTEX_PROF_DATA(extents_dirty.mtx, 186b7eaed25SJason Evans arena_prof_mutex_extents_dirty) 187b7eaed25SJason Evans READ_ARENA_MUTEX_PROF_DATA(extents_muzzy.mtx, 188b7eaed25SJason Evans arena_prof_mutex_extents_muzzy) 189b7eaed25SJason Evans READ_ARENA_MUTEX_PROF_DATA(extents_retained.mtx, 190b7eaed25SJason Evans arena_prof_mutex_extents_retained) 191b7eaed25SJason Evans READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx, 192b7eaed25SJason Evans arena_prof_mutex_decay_dirty) 193b7eaed25SJason Evans READ_ARENA_MUTEX_PROF_DATA(decay_muzzy.mtx, 194b7eaed25SJason Evans arena_prof_mutex_decay_muzzy) 195b7eaed25SJason Evans READ_ARENA_MUTEX_PROF_DATA(base->mtx, 196b7eaed25SJason Evans arena_prof_mutex_base) 197b7eaed25SJason Evans #undef READ_ARENA_MUTEX_PROF_DATA 198b7eaed25SJason Evans 199b7eaed25SJason Evans nstime_copy(&astats->uptime, &arena->create_time); 200b7eaed25SJason Evans nstime_update(&astats->uptime); 201b7eaed25SJason Evans nstime_subtract(&astats->uptime, &arena->create_time); 202b7eaed25SJason Evans 203b7eaed25SJason Evans for (szind_t i = 0; i < NBINS; i++) { 204*0ef50b4eSJason Evans bin_stats_merge(tsdn, &bstats[i], &arena->bins[i]); 205b7eaed25SJason Evans } 206b7eaed25SJason Evans } 207b7eaed25SJason Evans 208b7eaed25SJason Evans void 209b7eaed25SJason Evans arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, 210b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *extent) { 211b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 212b7eaed25SJason Evans WITNESS_RANK_CORE, 0); 213b7eaed25SJason Evans 214b7eaed25SJason Evans extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_dirty, 215b7eaed25SJason Evans extent); 216b7eaed25SJason Evans if (arena_dirty_decay_ms_get(arena) == 0) { 217b7eaed25SJason Evans arena_decay_dirty(tsdn, arena, false, true); 218b7eaed25SJason Evans } else { 2198b2f5aafSJason Evans arena_background_thread_inactivity_check(tsdn, arena, false); 220b7eaed25SJason Evans } 221d0e79aa3SJason Evans } 222d0e79aa3SJason Evans 223d0e79aa3SJason Evans static void * 224*0ef50b4eSJason Evans arena_slab_reg_alloc(extent_t *slab, const bin_info_t *bin_info) { 225d0e79aa3SJason Evans void *ret; 226b7eaed25SJason Evans arena_slab_data_t *slab_data = extent_slab_data_get(slab); 227b7eaed25SJason Evans size_t regind; 228d0e79aa3SJason Evans 229b7eaed25SJason Evans assert(extent_nfree_get(slab) > 0); 230b7eaed25SJason Evans assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info)); 231b7eaed25SJason Evans 232b7eaed25SJason Evans regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info); 233b7eaed25SJason Evans ret = (void *)((uintptr_t)extent_addr_get(slab) + 234b7eaed25SJason Evans (uintptr_t)(bin_info->reg_size * regind)); 235b7eaed25SJason Evans extent_nfree_dec(slab); 236b7eaed25SJason Evans return ret; 237d0e79aa3SJason Evans } 238d0e79aa3SJason Evans 239b7eaed25SJason Evans #ifndef JEMALLOC_JET 240b7eaed25SJason Evans static 241b7eaed25SJason Evans #endif 242b7eaed25SJason Evans size_t 243b7eaed25SJason Evans arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) { 244b7eaed25SJason Evans size_t diff, regind; 245b7eaed25SJason Evans 246b7eaed25SJason Evans /* Freeing a pointer outside the slab can cause assertion failure. */ 247b7eaed25SJason Evans assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab)); 248b7eaed25SJason Evans assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab)); 249b7eaed25SJason Evans /* Freeing an interior pointer can cause assertion failure. */ 250b7eaed25SJason Evans assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) % 251*0ef50b4eSJason Evans (uintptr_t)bin_infos[binind].reg_size == 0); 252*0ef50b4eSJason Evans 253*0ef50b4eSJason Evans diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)); 254b7eaed25SJason Evans 255b7eaed25SJason Evans /* Avoid doing division with a variable divisor. */ 256*0ef50b4eSJason Evans regind = div_compute(&arena_binind_div_info[binind], diff); 257d0e79aa3SJason Evans 258*0ef50b4eSJason Evans assert(regind < bin_infos[binind].nregs); 259d0e79aa3SJason Evans 260b7eaed25SJason Evans return regind; 261d0e79aa3SJason Evans } 262d0e79aa3SJason Evans 263df0d881dSJason Evans static void 264*0ef50b4eSJason Evans arena_slab_reg_dalloc(extent_t *slab, arena_slab_data_t *slab_data, void *ptr) { 265b7eaed25SJason Evans szind_t binind = extent_szind_get(slab); 266*0ef50b4eSJason Evans const bin_info_t *bin_info = &bin_infos[binind]; 267b7eaed25SJason Evans size_t regind = arena_slab_regind(slab, binind, ptr); 268a4bd5210SJason Evans 269b7eaed25SJason Evans assert(extent_nfree_get(slab) < bin_info->nregs); 270b7eaed25SJason Evans /* Freeing an unallocated pointer can cause assertion failure. */ 271b7eaed25SJason Evans assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind)); 272df0d881dSJason Evans 273b7eaed25SJason Evans bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind); 274b7eaed25SJason Evans extent_nfree_inc(slab); 275b7eaed25SJason Evans } 276b7eaed25SJason Evans 277b7eaed25SJason Evans static void 278b7eaed25SJason Evans arena_nactive_add(arena_t *arena, size_t add_pages) { 279b7eaed25SJason Evans atomic_fetch_add_zu(&arena->nactive, add_pages, ATOMIC_RELAXED); 280b7eaed25SJason Evans } 281b7eaed25SJason Evans 282b7eaed25SJason Evans static void 283b7eaed25SJason Evans arena_nactive_sub(arena_t *arena, size_t sub_pages) { 284b7eaed25SJason Evans assert(atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) >= sub_pages); 285b7eaed25SJason Evans atomic_fetch_sub_zu(&arena->nactive, sub_pages, ATOMIC_RELAXED); 286b7eaed25SJason Evans } 287b7eaed25SJason Evans 288b7eaed25SJason Evans static void 289b7eaed25SJason Evans arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { 290b7eaed25SJason Evans szind_t index, hindex; 291b7eaed25SJason Evans 292b7eaed25SJason Evans cassert(config_stats); 293b7eaed25SJason Evans 294b7eaed25SJason Evans if (usize < LARGE_MINCLASS) { 295b7eaed25SJason Evans usize = LARGE_MINCLASS; 296b7eaed25SJason Evans } 297b7eaed25SJason Evans index = sz_size2index(usize); 298b7eaed25SJason Evans hindex = (index >= NBINS) ? index - NBINS : 0; 299b7eaed25SJason Evans 300b7eaed25SJason Evans arena_stats_add_u64(tsdn, &arena->stats, 301b7eaed25SJason Evans &arena->stats.lstats[hindex].nmalloc, 1); 302b7eaed25SJason Evans } 303b7eaed25SJason Evans 304b7eaed25SJason Evans static void 305b7eaed25SJason Evans arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { 306b7eaed25SJason Evans szind_t index, hindex; 307b7eaed25SJason Evans 308b7eaed25SJason Evans cassert(config_stats); 309b7eaed25SJason Evans 310b7eaed25SJason Evans if (usize < LARGE_MINCLASS) { 311b7eaed25SJason Evans usize = LARGE_MINCLASS; 312b7eaed25SJason Evans } 313b7eaed25SJason Evans index = sz_size2index(usize); 314b7eaed25SJason Evans hindex = (index >= NBINS) ? index - NBINS : 0; 315b7eaed25SJason Evans 316b7eaed25SJason Evans arena_stats_add_u64(tsdn, &arena->stats, 317b7eaed25SJason Evans &arena->stats.lstats[hindex].ndalloc, 1); 318b7eaed25SJason Evans } 319b7eaed25SJason Evans 320b7eaed25SJason Evans static void 321b7eaed25SJason Evans arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize, 322b7eaed25SJason Evans size_t usize) { 323b7eaed25SJason Evans arena_large_dalloc_stats_update(tsdn, arena, oldusize); 324b7eaed25SJason Evans arena_large_malloc_stats_update(tsdn, arena, usize); 325b7eaed25SJason Evans } 326b7eaed25SJason Evans 327b7eaed25SJason Evans extent_t * 328b7eaed25SJason Evans arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, 329b7eaed25SJason Evans size_t alignment, bool *zero) { 330b7eaed25SJason Evans extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; 331b7eaed25SJason Evans 332b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 333b7eaed25SJason Evans WITNESS_RANK_CORE, 0); 334b7eaed25SJason Evans 335b7eaed25SJason Evans szind_t szind = sz_size2index(usize); 336b7eaed25SJason Evans size_t mapped_add; 337b7eaed25SJason Evans bool commit = true; 338b7eaed25SJason Evans extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks, 339b7eaed25SJason Evans &arena->extents_dirty, NULL, usize, sz_large_pad, alignment, false, 340b7eaed25SJason Evans szind, zero, &commit); 341b7eaed25SJason Evans if (extent == NULL) { 342b7eaed25SJason Evans extent = extents_alloc(tsdn, arena, &extent_hooks, 343b7eaed25SJason Evans &arena->extents_muzzy, NULL, usize, sz_large_pad, alignment, 344b7eaed25SJason Evans false, szind, zero, &commit); 345b7eaed25SJason Evans } 346b7eaed25SJason Evans size_t size = usize + sz_large_pad; 347b7eaed25SJason Evans if (extent == NULL) { 348b7eaed25SJason Evans extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL, 349b7eaed25SJason Evans usize, sz_large_pad, alignment, false, szind, zero, 350b7eaed25SJason Evans &commit); 351b7eaed25SJason Evans if (config_stats) { 352b7eaed25SJason Evans /* 353b7eaed25SJason Evans * extent may be NULL on OOM, but in that case 354b7eaed25SJason Evans * mapped_add isn't used below, so there's no need to 355b7eaed25SJason Evans * conditionlly set it to 0 here. 356b7eaed25SJason Evans */ 357b7eaed25SJason Evans mapped_add = size; 358b7eaed25SJason Evans } 359b7eaed25SJason Evans } else if (config_stats) { 360b7eaed25SJason Evans mapped_add = 0; 361b7eaed25SJason Evans } 362b7eaed25SJason Evans 363b7eaed25SJason Evans if (extent != NULL) { 364b7eaed25SJason Evans if (config_stats) { 365b7eaed25SJason Evans arena_stats_lock(tsdn, &arena->stats); 366b7eaed25SJason Evans arena_large_malloc_stats_update(tsdn, arena, usize); 367b7eaed25SJason Evans if (mapped_add != 0) { 368b7eaed25SJason Evans arena_stats_add_zu(tsdn, &arena->stats, 369b7eaed25SJason Evans &arena->stats.mapped, mapped_add); 370b7eaed25SJason Evans } 371b7eaed25SJason Evans arena_stats_unlock(tsdn, &arena->stats); 372b7eaed25SJason Evans } 373b7eaed25SJason Evans arena_nactive_add(arena, size >> LG_PAGE); 374b7eaed25SJason Evans } 375b7eaed25SJason Evans 376b7eaed25SJason Evans return extent; 377b7eaed25SJason Evans } 378b7eaed25SJason Evans 379b7eaed25SJason Evans void 380b7eaed25SJason Evans arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { 381b7eaed25SJason Evans if (config_stats) { 382b7eaed25SJason Evans arena_stats_lock(tsdn, &arena->stats); 383b7eaed25SJason Evans arena_large_dalloc_stats_update(tsdn, arena, 384b7eaed25SJason Evans extent_usize_get(extent)); 385b7eaed25SJason Evans arena_stats_unlock(tsdn, &arena->stats); 386b7eaed25SJason Evans } 387b7eaed25SJason Evans arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE); 388b7eaed25SJason Evans } 389b7eaed25SJason Evans 390b7eaed25SJason Evans void 391b7eaed25SJason Evans arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent, 392b7eaed25SJason Evans size_t oldusize) { 393b7eaed25SJason Evans size_t usize = extent_usize_get(extent); 394b7eaed25SJason Evans size_t udiff = oldusize - usize; 395b7eaed25SJason Evans 396b7eaed25SJason Evans if (config_stats) { 397b7eaed25SJason Evans arena_stats_lock(tsdn, &arena->stats); 398b7eaed25SJason Evans arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); 399b7eaed25SJason Evans arena_stats_unlock(tsdn, &arena->stats); 400b7eaed25SJason Evans } 401b7eaed25SJason Evans arena_nactive_sub(arena, udiff >> LG_PAGE); 402b7eaed25SJason Evans } 403b7eaed25SJason Evans 404b7eaed25SJason Evans void 405b7eaed25SJason Evans arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent, 406b7eaed25SJason Evans size_t oldusize) { 407b7eaed25SJason Evans size_t usize = extent_usize_get(extent); 408b7eaed25SJason Evans size_t udiff = usize - oldusize; 409b7eaed25SJason Evans 410b7eaed25SJason Evans if (config_stats) { 411b7eaed25SJason Evans arena_stats_lock(tsdn, &arena->stats); 412b7eaed25SJason Evans arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); 413b7eaed25SJason Evans arena_stats_unlock(tsdn, &arena->stats); 414b7eaed25SJason Evans } 415b7eaed25SJason Evans arena_nactive_add(arena, udiff >> LG_PAGE); 416b7eaed25SJason Evans } 417b7eaed25SJason Evans 418b7eaed25SJason Evans static ssize_t 419b7eaed25SJason Evans arena_decay_ms_read(arena_decay_t *decay) { 420b7eaed25SJason Evans return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED); 421b7eaed25SJason Evans } 422b7eaed25SJason Evans 423b7eaed25SJason Evans static void 424b7eaed25SJason Evans arena_decay_ms_write(arena_decay_t *decay, ssize_t decay_ms) { 425b7eaed25SJason Evans atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED); 426b7eaed25SJason Evans } 427b7eaed25SJason Evans 428b7eaed25SJason Evans static void 429b7eaed25SJason Evans arena_decay_deadline_init(arena_decay_t *decay) { 430df0d881dSJason Evans /* 431df0d881dSJason Evans * Generate a new deadline that is uniformly random within the next 432df0d881dSJason Evans * epoch after the current one. 433df0d881dSJason Evans */ 434b7eaed25SJason Evans nstime_copy(&decay->deadline, &decay->epoch); 435b7eaed25SJason Evans nstime_add(&decay->deadline, &decay->interval); 436b7eaed25SJason Evans if (arena_decay_ms_read(decay) > 0) { 437df0d881dSJason Evans nstime_t jitter; 438df0d881dSJason Evans 439b7eaed25SJason Evans nstime_init(&jitter, prng_range_u64(&decay->jitter_state, 440b7eaed25SJason Evans nstime_ns(&decay->interval))); 441b7eaed25SJason Evans nstime_add(&decay->deadline, &jitter); 442df0d881dSJason Evans } 443df0d881dSJason Evans } 444df0d881dSJason Evans 445df0d881dSJason Evans static bool 446b7eaed25SJason Evans arena_decay_deadline_reached(const arena_decay_t *decay, const nstime_t *time) { 447b7eaed25SJason Evans return (nstime_compare(&decay->deadline, time) <= 0); 448df0d881dSJason Evans } 449df0d881dSJason Evans 450df0d881dSJason Evans static size_t 451b7eaed25SJason Evans arena_decay_backlog_npages_limit(const arena_decay_t *decay) { 452df0d881dSJason Evans uint64_t sum; 453df0d881dSJason Evans size_t npages_limit_backlog; 454df0d881dSJason Evans unsigned i; 455df0d881dSJason Evans 456df0d881dSJason Evans /* 457df0d881dSJason Evans * For each element of decay_backlog, multiply by the corresponding 458df0d881dSJason Evans * fixed-point smoothstep decay factor. Sum the products, then divide 459df0d881dSJason Evans * to round down to the nearest whole number of pages. 460df0d881dSJason Evans */ 461df0d881dSJason Evans sum = 0; 462b7eaed25SJason Evans for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { 463b7eaed25SJason Evans sum += decay->backlog[i] * h_steps[i]; 464b7eaed25SJason Evans } 4651f0a49e8SJason Evans npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP); 466df0d881dSJason Evans 467b7eaed25SJason Evans return npages_limit_backlog; 468df0d881dSJason Evans } 469df0d881dSJason Evans 470df0d881dSJason Evans static void 471b7eaed25SJason Evans arena_decay_backlog_update_last(arena_decay_t *decay, size_t current_npages) { 472b7eaed25SJason Evans size_t npages_delta = (current_npages > decay->nunpurged) ? 473b7eaed25SJason Evans current_npages - decay->nunpurged : 0; 474b7eaed25SJason Evans decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta; 475b7eaed25SJason Evans 476b7eaed25SJason Evans if (config_debug) { 477b7eaed25SJason Evans if (current_npages > decay->ceil_npages) { 478b7eaed25SJason Evans decay->ceil_npages = current_npages; 479b7eaed25SJason Evans } 480b7eaed25SJason Evans size_t npages_limit = arena_decay_backlog_npages_limit(decay); 481b7eaed25SJason Evans assert(decay->ceil_npages >= npages_limit); 482b7eaed25SJason Evans if (decay->ceil_npages > npages_limit) { 483b7eaed25SJason Evans decay->ceil_npages = npages_limit; 484b7eaed25SJason Evans } 485b7eaed25SJason Evans } 486bde95144SJason Evans } 487df0d881dSJason Evans 488bde95144SJason Evans static void 489b7eaed25SJason Evans arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64, 490b7eaed25SJason Evans size_t current_npages) { 4911f0a49e8SJason Evans if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) { 492b7eaed25SJason Evans memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) * 493df0d881dSJason Evans sizeof(size_t)); 494df0d881dSJason Evans } else { 4951f0a49e8SJason Evans size_t nadvance_z = (size_t)nadvance_u64; 4961f0a49e8SJason Evans 4971f0a49e8SJason Evans assert((uint64_t)nadvance_z == nadvance_u64); 4981f0a49e8SJason Evans 499b7eaed25SJason Evans memmove(decay->backlog, &decay->backlog[nadvance_z], 5001f0a49e8SJason Evans (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t)); 5011f0a49e8SJason Evans if (nadvance_z > 1) { 502b7eaed25SJason Evans memset(&decay->backlog[SMOOTHSTEP_NSTEPS - 5031f0a49e8SJason Evans nadvance_z], 0, (nadvance_z-1) * sizeof(size_t)); 504df0d881dSJason Evans } 505df0d881dSJason Evans } 506bde95144SJason Evans 507b7eaed25SJason Evans arena_decay_backlog_update_last(decay, current_npages); 508df0d881dSJason Evans } 509df0d881dSJason Evans 510bde95144SJason Evans static void 511b7eaed25SJason Evans arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, 5128b2f5aafSJason Evans extents_t *extents, size_t current_npages, size_t npages_limit, 5138b2f5aafSJason Evans bool is_background_thread) { 514b7eaed25SJason Evans if (current_npages > npages_limit) { 515b7eaed25SJason Evans arena_decay_to_limit(tsdn, arena, decay, extents, false, 516*0ef50b4eSJason Evans npages_limit, current_npages - npages_limit, 517*0ef50b4eSJason Evans is_background_thread); 518b7eaed25SJason Evans } 519b7eaed25SJason Evans } 520b7eaed25SJason Evans 521b7eaed25SJason Evans static void 522b7eaed25SJason Evans arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time, 523b7eaed25SJason Evans size_t current_npages) { 524b7eaed25SJason Evans assert(arena_decay_deadline_reached(decay, time)); 525b7eaed25SJason Evans 526bde95144SJason Evans nstime_t delta; 527bde95144SJason Evans nstime_copy(&delta, time); 528b7eaed25SJason Evans nstime_subtract(&delta, &decay->epoch); 529b7eaed25SJason Evans 530b7eaed25SJason Evans uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval); 531bde95144SJason Evans assert(nadvance_u64 > 0); 532df0d881dSJason Evans 533bde95144SJason Evans /* Add nadvance_u64 decay intervals to epoch. */ 534b7eaed25SJason Evans nstime_copy(&delta, &decay->interval); 535bde95144SJason Evans nstime_imultiply(&delta, nadvance_u64); 536b7eaed25SJason Evans nstime_add(&decay->epoch, &delta); 537df0d881dSJason Evans 538bde95144SJason Evans /* Set a new deadline. */ 539b7eaed25SJason Evans arena_decay_deadline_init(decay); 540bde95144SJason Evans 541bde95144SJason Evans /* Update the backlog. */ 542b7eaed25SJason Evans arena_decay_backlog_update(decay, nadvance_u64, current_npages); 543bde95144SJason Evans } 544bde95144SJason Evans 545bde95144SJason Evans static void 546b7eaed25SJason Evans arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, 5478b2f5aafSJason Evans extents_t *extents, const nstime_t *time, bool is_background_thread) { 548b7eaed25SJason Evans size_t current_npages = extents_npages_get(extents); 549b7eaed25SJason Evans arena_decay_epoch_advance_helper(decay, time, current_npages); 550bde95144SJason Evans 551b7eaed25SJason Evans size_t npages_limit = arena_decay_backlog_npages_limit(decay); 552b7eaed25SJason Evans /* We may unlock decay->mtx when try_purge(). Finish logging first. */ 553b7eaed25SJason Evans decay->nunpurged = (npages_limit > current_npages) ? npages_limit : 554b7eaed25SJason Evans current_npages; 5558b2f5aafSJason Evans 5568b2f5aafSJason Evans if (!background_thread_enabled() || is_background_thread) { 557b7eaed25SJason Evans arena_decay_try_purge(tsdn, arena, decay, extents, 5588b2f5aafSJason Evans current_npages, npages_limit, is_background_thread); 559b7eaed25SJason Evans } 560bde95144SJason Evans } 561bde95144SJason Evans 562bde95144SJason Evans static void 563*0ef50b4eSJason Evans arena_decay_reinit(arena_decay_t *decay, ssize_t decay_ms) { 564b7eaed25SJason Evans arena_decay_ms_write(decay, decay_ms); 565b7eaed25SJason Evans if (decay_ms > 0) { 566b7eaed25SJason Evans nstime_init(&decay->interval, (uint64_t)decay_ms * 567b7eaed25SJason Evans KQU(1000000)); 568b7eaed25SJason Evans nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS); 569df0d881dSJason Evans } 570df0d881dSJason Evans 571b7eaed25SJason Evans nstime_init(&decay->epoch, 0); 572b7eaed25SJason Evans nstime_update(&decay->epoch); 573b7eaed25SJason Evans decay->jitter_state = (uint64_t)(uintptr_t)decay; 574b7eaed25SJason Evans arena_decay_deadline_init(decay); 575b7eaed25SJason Evans decay->nunpurged = 0; 576b7eaed25SJason Evans memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t)); 577df0d881dSJason Evans } 578df0d881dSJason Evans 579df0d881dSJason Evans static bool 580*0ef50b4eSJason Evans arena_decay_init(arena_decay_t *decay, ssize_t decay_ms, 581*0ef50b4eSJason Evans arena_stats_decay_t *stats) { 582b7eaed25SJason Evans if (config_debug) { 583b7eaed25SJason Evans for (size_t i = 0; i < sizeof(arena_decay_t); i++) { 584b7eaed25SJason Evans assert(((char *)decay)[i] == 0); 585b7eaed25SJason Evans } 586b7eaed25SJason Evans decay->ceil_npages = 0; 587b7eaed25SJason Evans } 588b7eaed25SJason Evans if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY, 589b7eaed25SJason Evans malloc_mutex_rank_exclusive)) { 590b7eaed25SJason Evans return true; 591b7eaed25SJason Evans } 592b7eaed25SJason Evans decay->purging = false; 593*0ef50b4eSJason Evans arena_decay_reinit(decay, decay_ms); 594b7eaed25SJason Evans /* Memory is zeroed, so there is no need to clear stats. */ 595b7eaed25SJason Evans if (config_stats) { 596b7eaed25SJason Evans decay->stats = stats; 597b7eaed25SJason Evans } 598b7eaed25SJason Evans return false; 599df0d881dSJason Evans } 600df0d881dSJason Evans 601b7eaed25SJason Evans static bool 602b7eaed25SJason Evans arena_decay_ms_valid(ssize_t decay_ms) { 603b7eaed25SJason Evans if (decay_ms < -1) { 604b7eaed25SJason Evans return false; 605b7eaed25SJason Evans } 606b7eaed25SJason Evans if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX * 607b7eaed25SJason Evans KQU(1000)) { 608b7eaed25SJason Evans return true; 609b7eaed25SJason Evans } 610b7eaed25SJason Evans return false; 611df0d881dSJason Evans } 612df0d881dSJason Evans 613b7eaed25SJason Evans static bool 614b7eaed25SJason Evans arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, 615b7eaed25SJason Evans extents_t *extents, bool is_background_thread) { 616b7eaed25SJason Evans malloc_mutex_assert_owner(tsdn, &decay->mtx); 617df0d881dSJason Evans 618df0d881dSJason Evans /* Purge all or nothing if the option is disabled. */ 619b7eaed25SJason Evans ssize_t decay_ms = arena_decay_ms_read(decay); 620b7eaed25SJason Evans if (decay_ms <= 0) { 621b7eaed25SJason Evans if (decay_ms == 0) { 622b7eaed25SJason Evans arena_decay_to_limit(tsdn, arena, decay, extents, false, 623*0ef50b4eSJason Evans 0, extents_npages_get(extents), 624*0ef50b4eSJason Evans is_background_thread); 625b7eaed25SJason Evans } 626b7eaed25SJason Evans return false; 627df0d881dSJason Evans } 628df0d881dSJason Evans 629b7eaed25SJason Evans nstime_t time; 630bde95144SJason Evans nstime_init(&time, 0); 631bde95144SJason Evans nstime_update(&time); 632b7eaed25SJason Evans if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch, &time) 633b7eaed25SJason Evans > 0)) { 634bde95144SJason Evans /* 635bde95144SJason Evans * Time went backwards. Move the epoch back in time and 636bde95144SJason Evans * generate a new deadline, with the expectation that time 637bde95144SJason Evans * typically flows forward for long enough periods of time that 638bde95144SJason Evans * epochs complete. Unfortunately, this strategy is susceptible 639bde95144SJason Evans * to clock jitter triggering premature epoch advances, but 640bde95144SJason Evans * clock jitter estimation and compensation isn't feasible here 641bde95144SJason Evans * because calls into this code are event-driven. 642bde95144SJason Evans */ 643b7eaed25SJason Evans nstime_copy(&decay->epoch, &time); 644b7eaed25SJason Evans arena_decay_deadline_init(decay); 645bde95144SJason Evans } else { 646bde95144SJason Evans /* Verify that time does not go backwards. */ 647b7eaed25SJason Evans assert(nstime_compare(&decay->epoch, &time) <= 0); 648df0d881dSJason Evans } 649df0d881dSJason Evans 650df0d881dSJason Evans /* 651bde95144SJason Evans * If the deadline has been reached, advance to the current epoch and 652bde95144SJason Evans * purge to the new limit if necessary. Note that dirty pages created 653bde95144SJason Evans * during the current epoch are not subject to purge until a future 654b7eaed25SJason Evans * epoch, so as a result purging only happens during epoch advances, or 655b7eaed25SJason Evans * being triggered by background threads (scheduled event). 656df0d881dSJason Evans */ 657b7eaed25SJason Evans bool advance_epoch = arena_decay_deadline_reached(decay, &time); 658b7eaed25SJason Evans if (advance_epoch) { 659b7eaed25SJason Evans arena_decay_epoch_advance(tsdn, arena, decay, extents, &time, 6608b2f5aafSJason Evans is_background_thread); 661b7eaed25SJason Evans } else if (is_background_thread) { 662b7eaed25SJason Evans arena_decay_try_purge(tsdn, arena, decay, extents, 663b7eaed25SJason Evans extents_npages_get(extents), 6648b2f5aafSJason Evans arena_decay_backlog_npages_limit(decay), 6658b2f5aafSJason Evans is_background_thread); 666df0d881dSJason Evans } 667df0d881dSJason Evans 668b7eaed25SJason Evans return advance_epoch; 669b7eaed25SJason Evans } 670df0d881dSJason Evans 671b7eaed25SJason Evans static ssize_t 672b7eaed25SJason Evans arena_decay_ms_get(arena_decay_t *decay) { 673b7eaed25SJason Evans return arena_decay_ms_read(decay); 674b7eaed25SJason Evans } 675df0d881dSJason Evans 676b7eaed25SJason Evans ssize_t 677b7eaed25SJason Evans arena_dirty_decay_ms_get(arena_t *arena) { 678b7eaed25SJason Evans return arena_decay_ms_get(&arena->decay_dirty); 679b7eaed25SJason Evans } 680b7eaed25SJason Evans 681b7eaed25SJason Evans ssize_t 682b7eaed25SJason Evans arena_muzzy_decay_ms_get(arena_t *arena) { 683b7eaed25SJason Evans return arena_decay_ms_get(&arena->decay_muzzy); 684b7eaed25SJason Evans } 685b7eaed25SJason Evans 686b7eaed25SJason Evans static bool 687b7eaed25SJason Evans arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, 688b7eaed25SJason Evans extents_t *extents, ssize_t decay_ms) { 689b7eaed25SJason Evans if (!arena_decay_ms_valid(decay_ms)) { 690b7eaed25SJason Evans return true; 691b7eaed25SJason Evans } 692b7eaed25SJason Evans 693b7eaed25SJason Evans malloc_mutex_lock(tsdn, &decay->mtx); 694b7eaed25SJason Evans /* 695b7eaed25SJason Evans * Restart decay backlog from scratch, which may cause many dirty pages 696b7eaed25SJason Evans * to be immediately purged. It would conceptually be possible to map 697b7eaed25SJason Evans * the old backlog onto the new backlog, but there is no justification 698b7eaed25SJason Evans * for such complexity since decay_ms changes are intended to be 699b7eaed25SJason Evans * infrequent, either between the {-1, 0, >0} states, or a one-time 700b7eaed25SJason Evans * arbitrary change during initial arena configuration. 701b7eaed25SJason Evans */ 702*0ef50b4eSJason Evans arena_decay_reinit(decay, decay_ms); 703b7eaed25SJason Evans arena_maybe_decay(tsdn, arena, decay, extents, false); 704b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &decay->mtx); 705b7eaed25SJason Evans 706b7eaed25SJason Evans return false; 707b7eaed25SJason Evans } 708b7eaed25SJason Evans 709b7eaed25SJason Evans bool 710b7eaed25SJason Evans arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, 711b7eaed25SJason Evans ssize_t decay_ms) { 712b7eaed25SJason Evans return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty, 713b7eaed25SJason Evans &arena->extents_dirty, decay_ms); 714b7eaed25SJason Evans } 715b7eaed25SJason Evans 716b7eaed25SJason Evans bool 717b7eaed25SJason Evans arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena, 718b7eaed25SJason Evans ssize_t decay_ms) { 719b7eaed25SJason Evans return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy, 720b7eaed25SJason Evans &arena->extents_muzzy, decay_ms); 721df0d881dSJason Evans } 722df0d881dSJason Evans 723f921d10fSJason Evans static size_t 724b7eaed25SJason Evans arena_stash_decayed(tsdn_t *tsdn, arena_t *arena, 725b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_limit, 726*0ef50b4eSJason Evans size_t npages_decay_max, extent_list_t *decay_extents) { 727b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 728b7eaed25SJason Evans WITNESS_RANK_CORE, 0); 729d0e79aa3SJason Evans 730b7eaed25SJason Evans /* Stash extents according to npages_limit. */ 731d0e79aa3SJason Evans size_t nstashed = 0; 732b7eaed25SJason Evans extent_t *extent; 733*0ef50b4eSJason Evans while (nstashed < npages_decay_max && 734*0ef50b4eSJason Evans (extent = extents_evict(tsdn, arena, r_extent_hooks, extents, 735b7eaed25SJason Evans npages_limit)) != NULL) { 736b7eaed25SJason Evans extent_list_append(decay_extents, extent); 737b7eaed25SJason Evans nstashed += extent_size_get(extent) >> LG_PAGE; 738f921d10fSJason Evans } 739b7eaed25SJason Evans return nstashed; 740f921d10fSJason Evans } 741f921d10fSJason Evans 742f921d10fSJason Evans static size_t 743b7eaed25SJason Evans arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, 744b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents, 7458b2f5aafSJason Evans bool all, extent_list_t *decay_extents, bool is_background_thread) { 746b7eaed25SJason Evans UNUSED size_t nmadvise, nunmapped; 747b7eaed25SJason Evans size_t npurged; 748d0e79aa3SJason Evans 749d0e79aa3SJason Evans if (config_stats) { 750b7eaed25SJason Evans nmadvise = 0; 751b7eaed25SJason Evans nunmapped = 0; 752b7eaed25SJason Evans } 753b7eaed25SJason Evans npurged = 0; 754b7eaed25SJason Evans 755b7eaed25SJason Evans ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena); 756b7eaed25SJason Evans for (extent_t *extent = extent_list_first(decay_extents); extent != 757b7eaed25SJason Evans NULL; extent = extent_list_first(decay_extents)) { 758b7eaed25SJason Evans if (config_stats) { 759b7eaed25SJason Evans nmadvise++; 760b7eaed25SJason Evans } 761b7eaed25SJason Evans size_t npages = extent_size_get(extent) >> LG_PAGE; 762b7eaed25SJason Evans npurged += npages; 763b7eaed25SJason Evans extent_list_remove(decay_extents, extent); 764b7eaed25SJason Evans switch (extents_state_get(extents)) { 765b7eaed25SJason Evans case extent_state_active: 766b7eaed25SJason Evans not_reached(); 767b7eaed25SJason Evans case extent_state_dirty: 768b7eaed25SJason Evans if (!all && muzzy_decay_ms != 0 && 769b7eaed25SJason Evans !extent_purge_lazy_wrapper(tsdn, arena, 770b7eaed25SJason Evans r_extent_hooks, extent, 0, 771b7eaed25SJason Evans extent_size_get(extent))) { 772b7eaed25SJason Evans extents_dalloc(tsdn, arena, r_extent_hooks, 773b7eaed25SJason Evans &arena->extents_muzzy, extent); 774b7eaed25SJason Evans arena_background_thread_inactivity_check(tsdn, 7758b2f5aafSJason Evans arena, is_background_thread); 776b7eaed25SJason Evans break; 777b7eaed25SJason Evans } 778b7eaed25SJason Evans /* Fall through. */ 779b7eaed25SJason Evans case extent_state_muzzy: 780b7eaed25SJason Evans extent_dalloc_wrapper(tsdn, arena, r_extent_hooks, 781b7eaed25SJason Evans extent); 782b7eaed25SJason Evans if (config_stats) { 783b7eaed25SJason Evans nunmapped += npages; 784b7eaed25SJason Evans } 785b7eaed25SJason Evans break; 786b7eaed25SJason Evans case extent_state_retained: 787b7eaed25SJason Evans default: 788b7eaed25SJason Evans not_reached(); 789b7eaed25SJason Evans } 790d0e79aa3SJason Evans } 791f921d10fSJason Evans 792b7eaed25SJason Evans if (config_stats) { 793b7eaed25SJason Evans arena_stats_lock(tsdn, &arena->stats); 794b7eaed25SJason Evans arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->npurge, 795b7eaed25SJason Evans 1); 796b7eaed25SJason Evans arena_stats_add_u64(tsdn, &arena->stats, 797b7eaed25SJason Evans &decay->stats->nmadvise, nmadvise); 798b7eaed25SJason Evans arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->purged, 799b7eaed25SJason Evans npurged); 800b7eaed25SJason Evans arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped, 801b7eaed25SJason Evans nunmapped << LG_PAGE); 802b7eaed25SJason Evans arena_stats_unlock(tsdn, &arena->stats); 803f921d10fSJason Evans } 804f921d10fSJason Evans 805b7eaed25SJason Evans return npurged; 80682872ac0SJason Evans } 80782872ac0SJason Evans 808df0d881dSJason Evans /* 809*0ef50b4eSJason Evans * npages_limit: Decay at most npages_decay_max pages without violating the 810*0ef50b4eSJason Evans * invariant: (extents_npages_get(extents) >= npages_limit). We need an upper 811*0ef50b4eSJason Evans * bound on number of pages in order to prevent unbounded growth (namely in 812*0ef50b4eSJason Evans * stashed), otherwise unbounded new pages could be added to extents during the 813*0ef50b4eSJason Evans * current decay run, so that the purging thread never finishes. 814df0d881dSJason Evans */ 815a4bd5210SJason Evans static void 816b7eaed25SJason Evans arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, 817*0ef50b4eSJason Evans extents_t *extents, bool all, size_t npages_limit, size_t npages_decay_max, 8188b2f5aafSJason Evans bool is_background_thread) { 819b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 820b7eaed25SJason Evans WITNESS_RANK_CORE, 1); 821b7eaed25SJason Evans malloc_mutex_assert_owner(tsdn, &decay->mtx); 822a4bd5210SJason Evans 823b7eaed25SJason Evans if (decay->purging) { 824b7eaed25SJason Evans return; 825a4bd5210SJason Evans } 826b7eaed25SJason Evans decay->purging = true; 827b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &decay->mtx); 828df0d881dSJason Evans 829b7eaed25SJason Evans extent_hooks_t *extent_hooks = extent_hooks_get(arena); 830df0d881dSJason Evans 831b7eaed25SJason Evans extent_list_t decay_extents; 832b7eaed25SJason Evans extent_list_init(&decay_extents); 833b7eaed25SJason Evans 834b7eaed25SJason Evans size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents, 835*0ef50b4eSJason Evans npages_limit, npages_decay_max, &decay_extents); 836b7eaed25SJason Evans if (npurge != 0) { 837b7eaed25SJason Evans UNUSED size_t npurged = arena_decay_stashed(tsdn, arena, 8388b2f5aafSJason Evans &extent_hooks, decay, extents, all, &decay_extents, 8398b2f5aafSJason Evans is_background_thread); 840df0d881dSJason Evans assert(npurged == npurge); 841b7eaed25SJason Evans } 842a4bd5210SJason Evans 843b7eaed25SJason Evans malloc_mutex_lock(tsdn, &decay->mtx); 844b7eaed25SJason Evans decay->purging = false; 845b7eaed25SJason Evans } 846a4bd5210SJason Evans 847b7eaed25SJason Evans static bool 848b7eaed25SJason Evans arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, 849b7eaed25SJason Evans extents_t *extents, bool is_background_thread, bool all) { 850b7eaed25SJason Evans if (all) { 851b7eaed25SJason Evans malloc_mutex_lock(tsdn, &decay->mtx); 8528b2f5aafSJason Evans arena_decay_to_limit(tsdn, arena, decay, extents, all, 0, 853*0ef50b4eSJason Evans extents_npages_get(extents), is_background_thread); 854b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &decay->mtx); 855b7eaed25SJason Evans 856b7eaed25SJason Evans return false; 857b7eaed25SJason Evans } 858b7eaed25SJason Evans 859b7eaed25SJason Evans if (malloc_mutex_trylock(tsdn, &decay->mtx)) { 860b7eaed25SJason Evans /* No need to wait if another thread is in progress. */ 861b7eaed25SJason Evans return true; 862b7eaed25SJason Evans } 863b7eaed25SJason Evans 864b7eaed25SJason Evans bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents, 865b7eaed25SJason Evans is_background_thread); 866*0ef50b4eSJason Evans UNUSED size_t npages_new; 867b7eaed25SJason Evans if (epoch_advanced) { 868b7eaed25SJason Evans /* Backlog is updated on epoch advance. */ 869b7eaed25SJason Evans npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1]; 870b7eaed25SJason Evans } 871b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &decay->mtx); 872b7eaed25SJason Evans 873b7eaed25SJason Evans if (have_background_thread && background_thread_enabled() && 874b7eaed25SJason Evans epoch_advanced && !is_background_thread) { 875*0ef50b4eSJason Evans background_thread_interval_check(tsdn, arena, decay, 876*0ef50b4eSJason Evans npages_new); 877b7eaed25SJason Evans } 878b7eaed25SJason Evans 879b7eaed25SJason Evans return false; 880b7eaed25SJason Evans } 881b7eaed25SJason Evans 882b7eaed25SJason Evans static bool 883b7eaed25SJason Evans arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, 884b7eaed25SJason Evans bool all) { 885b7eaed25SJason Evans return arena_decay_impl(tsdn, arena, &arena->decay_dirty, 886b7eaed25SJason Evans &arena->extents_dirty, is_background_thread, all); 887b7eaed25SJason Evans } 888b7eaed25SJason Evans 889b7eaed25SJason Evans static bool 890b7eaed25SJason Evans arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, 891b7eaed25SJason Evans bool all) { 892b7eaed25SJason Evans return arena_decay_impl(tsdn, arena, &arena->decay_muzzy, 893b7eaed25SJason Evans &arena->extents_muzzy, is_background_thread, all); 894a4bd5210SJason Evans } 895a4bd5210SJason Evans 896a4bd5210SJason Evans void 897b7eaed25SJason Evans arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) { 898b7eaed25SJason Evans if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) { 899b7eaed25SJason Evans return; 900b7eaed25SJason Evans } 901b7eaed25SJason Evans arena_decay_muzzy(tsdn, arena, is_background_thread, all); 9021f0a49e8SJason Evans } 9031f0a49e8SJason Evans 9041f0a49e8SJason Evans static void 905b7eaed25SJason Evans arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) { 906b7eaed25SJason Evans arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE); 9071f0a49e8SJason Evans 908b7eaed25SJason Evans extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; 909b7eaed25SJason Evans arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, slab); 910b7eaed25SJason Evans } 9111f0a49e8SJason Evans 912b7eaed25SJason Evans static void 913*0ef50b4eSJason Evans arena_bin_slabs_nonfull_insert(bin_t *bin, extent_t *slab) { 914b7eaed25SJason Evans assert(extent_nfree_get(slab) > 0); 915b7eaed25SJason Evans extent_heap_insert(&bin->slabs_nonfull, slab); 916b7eaed25SJason Evans } 917b7eaed25SJason Evans 918b7eaed25SJason Evans static void 919*0ef50b4eSJason Evans arena_bin_slabs_nonfull_remove(bin_t *bin, extent_t *slab) { 920b7eaed25SJason Evans extent_heap_remove(&bin->slabs_nonfull, slab); 921b7eaed25SJason Evans } 922b7eaed25SJason Evans 923b7eaed25SJason Evans static extent_t * 924*0ef50b4eSJason Evans arena_bin_slabs_nonfull_tryget(bin_t *bin) { 925b7eaed25SJason Evans extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull); 926b7eaed25SJason Evans if (slab == NULL) { 927b7eaed25SJason Evans return NULL; 928b7eaed25SJason Evans } 929b7eaed25SJason Evans if (config_stats) { 930b7eaed25SJason Evans bin->stats.reslabs++; 931b7eaed25SJason Evans } 932b7eaed25SJason Evans return slab; 933b7eaed25SJason Evans } 934b7eaed25SJason Evans 935b7eaed25SJason Evans static void 936*0ef50b4eSJason Evans arena_bin_slabs_full_insert(arena_t *arena, bin_t *bin, extent_t *slab) { 937b7eaed25SJason Evans assert(extent_nfree_get(slab) == 0); 9381f0a49e8SJason Evans /* 939b7eaed25SJason Evans * Tracking extents is required by arena_reset, which is not allowed 940b7eaed25SJason Evans * for auto arenas. Bypass this step to avoid touching the extent 941b7eaed25SJason Evans * linkage (often results in cache misses) for auto arenas. 9421f0a49e8SJason Evans */ 943b7eaed25SJason Evans if (arena_is_auto(arena)) { 944b7eaed25SJason Evans return; 945b7eaed25SJason Evans } 946b7eaed25SJason Evans extent_list_append(&bin->slabs_full, slab); 947b7eaed25SJason Evans } 9481f0a49e8SJason Evans 949b7eaed25SJason Evans static void 950*0ef50b4eSJason Evans arena_bin_slabs_full_remove(arena_t *arena, bin_t *bin, extent_t *slab) { 951b7eaed25SJason Evans if (arena_is_auto(arena)) { 952b7eaed25SJason Evans return; 9531f0a49e8SJason Evans } 954b7eaed25SJason Evans extent_list_remove(&bin->slabs_full, slab); 9551f0a49e8SJason Evans } 9561f0a49e8SJason Evans 9571f0a49e8SJason Evans void 958b7eaed25SJason Evans arena_reset(tsd_t *tsd, arena_t *arena) { 9591f0a49e8SJason Evans /* 9601f0a49e8SJason Evans * Locking in this function is unintuitive. The caller guarantees that 9611f0a49e8SJason Evans * no concurrent operations are happening in this arena, but there are 9621f0a49e8SJason Evans * still reasons that some locking is necessary: 9631f0a49e8SJason Evans * 9641f0a49e8SJason Evans * - Some of the functions in the transitive closure of calls assume 9651f0a49e8SJason Evans * appropriate locks are held, and in some cases these locks are 9661f0a49e8SJason Evans * temporarily dropped to avoid lock order reversal or deadlock due to 9671f0a49e8SJason Evans * reentry. 9681f0a49e8SJason Evans * - mallctl("epoch", ...) may concurrently refresh stats. While 9691f0a49e8SJason Evans * strictly speaking this is a "concurrent operation", disallowing 9701f0a49e8SJason Evans * stats refreshes would impose an inconvenient burden. 9711f0a49e8SJason Evans */ 9721f0a49e8SJason Evans 973b7eaed25SJason Evans /* Large allocations. */ 974b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); 9751f0a49e8SJason Evans 976b7eaed25SJason Evans for (extent_t *extent = extent_list_first(&arena->large); extent != 977b7eaed25SJason Evans NULL; extent = extent_list_first(&arena->large)) { 978b7eaed25SJason Evans void *ptr = extent_base_get(extent); 9791f0a49e8SJason Evans size_t usize; 9801f0a49e8SJason Evans 981b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); 982b7eaed25SJason Evans alloc_ctx_t alloc_ctx; 983b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); 984b7eaed25SJason Evans rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, 985b7eaed25SJason Evans (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); 986b7eaed25SJason Evans assert(alloc_ctx.szind != NSIZES); 9871f0a49e8SJason Evans 988b7eaed25SJason Evans if (config_stats || (config_prof && opt_prof)) { 989b7eaed25SJason Evans usize = sz_index2size(alloc_ctx.szind); 990b7eaed25SJason Evans assert(usize == isalloc(tsd_tsdn(tsd), ptr)); 991b7eaed25SJason Evans } 992b7eaed25SJason Evans /* Remove large allocation from prof sample set. */ 993b7eaed25SJason Evans if (config_prof && opt_prof) { 994b7eaed25SJason Evans prof_free(tsd, ptr, usize, &alloc_ctx); 995b7eaed25SJason Evans } 996b7eaed25SJason Evans large_dalloc(tsd_tsdn(tsd), extent); 997b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); 998b7eaed25SJason Evans } 999b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); 10001f0a49e8SJason Evans 10011f0a49e8SJason Evans /* Bins. */ 1002b7eaed25SJason Evans for (unsigned i = 0; i < NBINS; i++) { 1003b7eaed25SJason Evans extent_t *slab; 1004*0ef50b4eSJason Evans bin_t *bin = &arena->bins[i]; 10051f0a49e8SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); 1006b7eaed25SJason Evans if (bin->slabcur != NULL) { 1007b7eaed25SJason Evans slab = bin->slabcur; 1008b7eaed25SJason Evans bin->slabcur = NULL; 1009b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); 1010b7eaed25SJason Evans arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); 1011b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); 1012b7eaed25SJason Evans } 1013b7eaed25SJason Evans while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) != 1014b7eaed25SJason Evans NULL) { 1015b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); 1016b7eaed25SJason Evans arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); 1017b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); 1018b7eaed25SJason Evans } 1019b7eaed25SJason Evans for (slab = extent_list_first(&bin->slabs_full); slab != NULL; 1020b7eaed25SJason Evans slab = extent_list_first(&bin->slabs_full)) { 1021b7eaed25SJason Evans arena_bin_slabs_full_remove(arena, bin, slab); 1022b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); 1023b7eaed25SJason Evans arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); 1024b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); 1025b7eaed25SJason Evans } 10261f0a49e8SJason Evans if (config_stats) { 10271f0a49e8SJason Evans bin->stats.curregs = 0; 1028b7eaed25SJason Evans bin->stats.curslabs = 0; 10291f0a49e8SJason Evans } 10301f0a49e8SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); 10311f0a49e8SJason Evans } 10321f0a49e8SJason Evans 1033b7eaed25SJason Evans atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED); 1034a4bd5210SJason Evans } 1035a4bd5210SJason Evans 1036a4bd5210SJason Evans static void 1037b7eaed25SJason Evans arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) { 1038a4bd5210SJason Evans /* 1039b7eaed25SJason Evans * Iterate over the retained extents and destroy them. This gives the 1040b7eaed25SJason Evans * extent allocator underlying the extent hooks an opportunity to unmap 1041b7eaed25SJason Evans * all retained memory without having to keep its own metadata 1042b7eaed25SJason Evans * structures. In practice, virtual memory for dss-allocated extents is 1043b7eaed25SJason Evans * leaked here, so best practice is to avoid dss for arenas to be 1044b7eaed25SJason Evans * destroyed, or provide custom extent hooks that track retained 1045b7eaed25SJason Evans * dss-based extents for later reuse. 1046a4bd5210SJason Evans */ 1047b7eaed25SJason Evans extent_hooks_t *extent_hooks = extent_hooks_get(arena); 1048b7eaed25SJason Evans extent_t *extent; 1049b7eaed25SJason Evans while ((extent = extents_evict(tsdn, arena, &extent_hooks, 1050b7eaed25SJason Evans &arena->extents_retained, 0)) != NULL) { 1051b7eaed25SJason Evans extent_destroy_wrapper(tsdn, arena, &extent_hooks, extent); 1052d0e79aa3SJason Evans } 1053a4bd5210SJason Evans } 1054a4bd5210SJason Evans 1055a4bd5210SJason Evans void 1056b7eaed25SJason Evans arena_destroy(tsd_t *tsd, arena_t *arena) { 1057b7eaed25SJason Evans assert(base_ind_get(arena->base) >= narenas_auto); 1058b7eaed25SJason Evans assert(arena_nthreads_get(arena, false) == 0); 1059b7eaed25SJason Evans assert(arena_nthreads_get(arena, true) == 0); 1060b7eaed25SJason Evans 1061b7eaed25SJason Evans /* 1062b7eaed25SJason Evans * No allocations have occurred since arena_reset() was called. 1063b7eaed25SJason Evans * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached 1064b7eaed25SJason Evans * extents, so only retained extents may remain. 1065b7eaed25SJason Evans */ 1066b7eaed25SJason Evans assert(extents_npages_get(&arena->extents_dirty) == 0); 1067b7eaed25SJason Evans assert(extents_npages_get(&arena->extents_muzzy) == 0); 1068b7eaed25SJason Evans 1069b7eaed25SJason Evans /* Deallocate retained memory. */ 1070b7eaed25SJason Evans arena_destroy_retained(tsd_tsdn(tsd), arena); 1071b7eaed25SJason Evans 1072b7eaed25SJason Evans /* 1073b7eaed25SJason Evans * Remove the arena pointer from the arenas array. We rely on the fact 1074b7eaed25SJason Evans * that there is no way for the application to get a dirty read from the 1075b7eaed25SJason Evans * arenas array unless there is an inherent race in the application 1076b7eaed25SJason Evans * involving access of an arena being concurrently destroyed. The 1077b7eaed25SJason Evans * application must synchronize knowledge of the arena's validity, so as 1078b7eaed25SJason Evans * long as we use an atomic write to update the arenas array, the 1079b7eaed25SJason Evans * application will get a clean read any time after it synchronizes 1080b7eaed25SJason Evans * knowledge that the arena is no longer valid. 1081b7eaed25SJason Evans */ 1082b7eaed25SJason Evans arena_set(base_ind_get(arena->base), NULL); 1083b7eaed25SJason Evans 1084b7eaed25SJason Evans /* 1085b7eaed25SJason Evans * Destroy the base allocator, which manages all metadata ever mapped by 1086b7eaed25SJason Evans * this arena. 1087b7eaed25SJason Evans */ 10888b2f5aafSJason Evans base_delete(tsd_tsdn(tsd), arena->base); 1089b7eaed25SJason Evans } 1090b7eaed25SJason Evans 1091b7eaed25SJason Evans static extent_t * 1092b7eaed25SJason Evans arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, 1093*0ef50b4eSJason Evans extent_hooks_t **r_extent_hooks, const bin_info_t *bin_info, 1094b7eaed25SJason Evans szind_t szind) { 1095b7eaed25SJason Evans extent_t *slab; 1096b7eaed25SJason Evans bool zero, commit; 1097b7eaed25SJason Evans 1098b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1099b7eaed25SJason Evans WITNESS_RANK_CORE, 0); 1100b7eaed25SJason Evans 1101b7eaed25SJason Evans zero = false; 1102b7eaed25SJason Evans commit = true; 1103b7eaed25SJason Evans slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL, 1104b7eaed25SJason Evans bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit); 1105b7eaed25SJason Evans 1106b7eaed25SJason Evans if (config_stats && slab != NULL) { 1107b7eaed25SJason Evans arena_stats_mapped_add(tsdn, &arena->stats, 1108b7eaed25SJason Evans bin_info->slab_size); 1109b7eaed25SJason Evans } 1110b7eaed25SJason Evans 1111b7eaed25SJason Evans return slab; 1112b7eaed25SJason Evans } 1113b7eaed25SJason Evans 1114b7eaed25SJason Evans static extent_t * 1115b7eaed25SJason Evans arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, 1116*0ef50b4eSJason Evans const bin_info_t *bin_info) { 1117b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1118b7eaed25SJason Evans WITNESS_RANK_CORE, 0); 1119b7eaed25SJason Evans 1120b7eaed25SJason Evans extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; 1121b7eaed25SJason Evans szind_t szind = sz_size2index(bin_info->reg_size); 1122b7eaed25SJason Evans bool zero = false; 1123b7eaed25SJason Evans bool commit = true; 1124b7eaed25SJason Evans extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks, 1125b7eaed25SJason Evans &arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, true, 1126b7eaed25SJason Evans binind, &zero, &commit); 1127b7eaed25SJason Evans if (slab == NULL) { 1128b7eaed25SJason Evans slab = extents_alloc(tsdn, arena, &extent_hooks, 1129b7eaed25SJason Evans &arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE, 1130b7eaed25SJason Evans true, binind, &zero, &commit); 1131b7eaed25SJason Evans } 1132b7eaed25SJason Evans if (slab == NULL) { 1133b7eaed25SJason Evans slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks, 1134b7eaed25SJason Evans bin_info, szind); 1135b7eaed25SJason Evans if (slab == NULL) { 1136b7eaed25SJason Evans return NULL; 1137b7eaed25SJason Evans } 1138b7eaed25SJason Evans } 1139b7eaed25SJason Evans assert(extent_slab_get(slab)); 1140b7eaed25SJason Evans 1141b7eaed25SJason Evans /* Initialize slab internals. */ 1142b7eaed25SJason Evans arena_slab_data_t *slab_data = extent_slab_data_get(slab); 1143b7eaed25SJason Evans extent_nfree_set(slab, bin_info->nregs); 1144b7eaed25SJason Evans bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false); 1145b7eaed25SJason Evans 1146b7eaed25SJason Evans arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE); 1147b7eaed25SJason Evans 1148b7eaed25SJason Evans return slab; 1149b7eaed25SJason Evans } 1150b7eaed25SJason Evans 1151b7eaed25SJason Evans static extent_t * 1152*0ef50b4eSJason Evans arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, bin_t *bin, 1153b7eaed25SJason Evans szind_t binind) { 1154b7eaed25SJason Evans extent_t *slab; 1155*0ef50b4eSJason Evans const bin_info_t *bin_info; 1156b7eaed25SJason Evans 1157b7eaed25SJason Evans /* Look for a usable slab. */ 1158b7eaed25SJason Evans slab = arena_bin_slabs_nonfull_tryget(bin); 1159b7eaed25SJason Evans if (slab != NULL) { 1160b7eaed25SJason Evans return slab; 1161b7eaed25SJason Evans } 1162b7eaed25SJason Evans /* No existing slabs have any space available. */ 1163b7eaed25SJason Evans 1164*0ef50b4eSJason Evans bin_info = &bin_infos[binind]; 1165b7eaed25SJason Evans 1166b7eaed25SJason Evans /* Allocate a new slab. */ 1167b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &bin->lock); 1168b7eaed25SJason Evans /******************************/ 1169b7eaed25SJason Evans slab = arena_slab_alloc(tsdn, arena, binind, bin_info); 1170b7eaed25SJason Evans /********************************/ 1171b7eaed25SJason Evans malloc_mutex_lock(tsdn, &bin->lock); 1172b7eaed25SJason Evans if (slab != NULL) { 1173b7eaed25SJason Evans if (config_stats) { 1174b7eaed25SJason Evans bin->stats.nslabs++; 1175b7eaed25SJason Evans bin->stats.curslabs++; 1176b7eaed25SJason Evans } 1177b7eaed25SJason Evans return slab; 1178b7eaed25SJason Evans } 1179b7eaed25SJason Evans 1180b7eaed25SJason Evans /* 1181b7eaed25SJason Evans * arena_slab_alloc() failed, but another thread may have made 1182b7eaed25SJason Evans * sufficient memory available while this one dropped bin->lock above, 1183b7eaed25SJason Evans * so search one more time. 1184b7eaed25SJason Evans */ 1185b7eaed25SJason Evans slab = arena_bin_slabs_nonfull_tryget(bin); 1186b7eaed25SJason Evans if (slab != NULL) { 1187b7eaed25SJason Evans return slab; 1188b7eaed25SJason Evans } 1189b7eaed25SJason Evans 1190b7eaed25SJason Evans return NULL; 1191b7eaed25SJason Evans } 1192b7eaed25SJason Evans 1193b7eaed25SJason Evans /* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */ 1194b7eaed25SJason Evans static void * 1195*0ef50b4eSJason Evans arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, bin_t *bin, 1196b7eaed25SJason Evans szind_t binind) { 1197*0ef50b4eSJason Evans const bin_info_t *bin_info; 1198b7eaed25SJason Evans extent_t *slab; 1199b7eaed25SJason Evans 1200*0ef50b4eSJason Evans bin_info = &bin_infos[binind]; 1201b7eaed25SJason Evans if (!arena_is_auto(arena) && bin->slabcur != NULL) { 1202b7eaed25SJason Evans arena_bin_slabs_full_insert(arena, bin, bin->slabcur); 1203b7eaed25SJason Evans bin->slabcur = NULL; 1204b7eaed25SJason Evans } 1205b7eaed25SJason Evans slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind); 1206b7eaed25SJason Evans if (bin->slabcur != NULL) { 1207b7eaed25SJason Evans /* 1208b7eaed25SJason Evans * Another thread updated slabcur while this one ran without the 1209b7eaed25SJason Evans * bin lock in arena_bin_nonfull_slab_get(). 1210b7eaed25SJason Evans */ 1211b7eaed25SJason Evans if (extent_nfree_get(bin->slabcur) > 0) { 1212*0ef50b4eSJason Evans void *ret = arena_slab_reg_alloc(bin->slabcur, 1213b7eaed25SJason Evans bin_info); 1214b7eaed25SJason Evans if (slab != NULL) { 1215b7eaed25SJason Evans /* 1216b7eaed25SJason Evans * arena_slab_alloc() may have allocated slab, 1217b7eaed25SJason Evans * or it may have been pulled from 1218b7eaed25SJason Evans * slabs_nonfull. Therefore it is unsafe to 1219b7eaed25SJason Evans * make any assumptions about how slab has 1220b7eaed25SJason Evans * previously been used, and 1221b7eaed25SJason Evans * arena_bin_lower_slab() must be called, as if 1222b7eaed25SJason Evans * a region were just deallocated from the slab. 1223b7eaed25SJason Evans */ 1224b7eaed25SJason Evans if (extent_nfree_get(slab) == bin_info->nregs) { 1225b7eaed25SJason Evans arena_dalloc_bin_slab(tsdn, arena, slab, 1226b7eaed25SJason Evans bin); 1227b7eaed25SJason Evans } else { 1228b7eaed25SJason Evans arena_bin_lower_slab(tsdn, arena, slab, 1229b7eaed25SJason Evans bin); 1230b7eaed25SJason Evans } 1231b7eaed25SJason Evans } 1232b7eaed25SJason Evans return ret; 1233b7eaed25SJason Evans } 1234b7eaed25SJason Evans 1235b7eaed25SJason Evans arena_bin_slabs_full_insert(arena, bin, bin->slabcur); 1236b7eaed25SJason Evans bin->slabcur = NULL; 1237b7eaed25SJason Evans } 1238b7eaed25SJason Evans 1239b7eaed25SJason Evans if (slab == NULL) { 1240b7eaed25SJason Evans return NULL; 1241b7eaed25SJason Evans } 1242b7eaed25SJason Evans bin->slabcur = slab; 1243b7eaed25SJason Evans 1244b7eaed25SJason Evans assert(extent_nfree_get(bin->slabcur) > 0); 1245b7eaed25SJason Evans 1246*0ef50b4eSJason Evans return arena_slab_reg_alloc(slab, bin_info); 1247b7eaed25SJason Evans } 1248b7eaed25SJason Evans 1249b7eaed25SJason Evans void 1250b7eaed25SJason Evans arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, 1251*0ef50b4eSJason Evans cache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) { 1252a4bd5210SJason Evans unsigned i, nfill; 1253*0ef50b4eSJason Evans bin_t *bin; 1254a4bd5210SJason Evans 1255a4bd5210SJason Evans assert(tbin->ncached == 0); 1256a4bd5210SJason Evans 1257b7eaed25SJason Evans if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) { 12581f0a49e8SJason Evans prof_idump(tsdn); 1259b7eaed25SJason Evans } 1260a4bd5210SJason Evans bin = &arena->bins[binind]; 12611f0a49e8SJason Evans malloc_mutex_lock(tsdn, &bin->lock); 1262a4bd5210SJason Evans for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> 1263b7eaed25SJason Evans tcache->lg_fill_div[binind]); i < nfill; i++) { 1264b7eaed25SJason Evans extent_t *slab; 1265536b3538SJason Evans void *ptr; 1266b7eaed25SJason Evans if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 1267b7eaed25SJason Evans 0) { 1268*0ef50b4eSJason Evans ptr = arena_slab_reg_alloc(slab, &bin_infos[binind]); 1269b7eaed25SJason Evans } else { 1270b7eaed25SJason Evans ptr = arena_bin_malloc_hard(tsdn, arena, bin, binind); 1271b7eaed25SJason Evans } 1272d0e79aa3SJason Evans if (ptr == NULL) { 1273d0e79aa3SJason Evans /* 1274d0e79aa3SJason Evans * OOM. tbin->avail isn't yet filled down to its first 1275d0e79aa3SJason Evans * element, so the successful allocations (if any) must 1276df0d881dSJason Evans * be moved just before tbin->avail before bailing out. 1277d0e79aa3SJason Evans */ 1278d0e79aa3SJason Evans if (i > 0) { 1279df0d881dSJason Evans memmove(tbin->avail - i, tbin->avail - nfill, 1280d0e79aa3SJason Evans i * sizeof(void *)); 1281d0e79aa3SJason Evans } 1282a4bd5210SJason Evans break; 1283d0e79aa3SJason Evans } 1284d0e79aa3SJason Evans if (config_fill && unlikely(opt_junk_alloc)) { 1285*0ef50b4eSJason Evans arena_alloc_junk_small(ptr, &bin_infos[binind], true); 1286a4bd5210SJason Evans } 1287a4bd5210SJason Evans /* Insert such that low regions get used first. */ 1288df0d881dSJason Evans *(tbin->avail - nfill + i) = ptr; 1289a4bd5210SJason Evans } 1290a4bd5210SJason Evans if (config_stats) { 1291a4bd5210SJason Evans bin->stats.nmalloc += i; 1292a4bd5210SJason Evans bin->stats.nrequests += tbin->tstats.nrequests; 1293d0e79aa3SJason Evans bin->stats.curregs += i; 1294a4bd5210SJason Evans bin->stats.nfills++; 1295a4bd5210SJason Evans tbin->tstats.nrequests = 0; 1296a4bd5210SJason Evans } 12971f0a49e8SJason Evans malloc_mutex_unlock(tsdn, &bin->lock); 1298a4bd5210SJason Evans tbin->ncached = i; 12991f0a49e8SJason Evans arena_decay_tick(tsdn, arena); 1300a4bd5210SJason Evans } 1301a4bd5210SJason Evans 1302a4bd5210SJason Evans void 1303*0ef50b4eSJason Evans arena_alloc_junk_small(void *ptr, const bin_info_t *bin_info, bool zero) { 1304b7eaed25SJason Evans if (!zero) { 1305b7eaed25SJason Evans memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size); 1306a4bd5210SJason Evans } 1307a4bd5210SJason Evans } 1308a4bd5210SJason Evans 1309f921d10fSJason Evans static void 1310*0ef50b4eSJason Evans arena_dalloc_junk_small_impl(void *ptr, const bin_info_t *bin_info) { 1311b7eaed25SJason Evans memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size); 1312a4bd5210SJason Evans } 1313b7eaed25SJason Evans arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small = 1314b7eaed25SJason Evans arena_dalloc_junk_small_impl; 1315a4bd5210SJason Evans 1316df0d881dSJason Evans static void * 1317b7eaed25SJason Evans arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) { 1318a4bd5210SJason Evans void *ret; 1319*0ef50b4eSJason Evans bin_t *bin; 1320df0d881dSJason Evans size_t usize; 1321b7eaed25SJason Evans extent_t *slab; 1322a4bd5210SJason Evans 1323a4bd5210SJason Evans assert(binind < NBINS); 1324a4bd5210SJason Evans bin = &arena->bins[binind]; 1325b7eaed25SJason Evans usize = sz_index2size(binind); 1326a4bd5210SJason Evans 13271f0a49e8SJason Evans malloc_mutex_lock(tsdn, &bin->lock); 1328b7eaed25SJason Evans if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) { 1329*0ef50b4eSJason Evans ret = arena_slab_reg_alloc(slab, &bin_infos[binind]); 1330b7eaed25SJason Evans } else { 1331b7eaed25SJason Evans ret = arena_bin_malloc_hard(tsdn, arena, bin, binind); 1332b7eaed25SJason Evans } 1333a4bd5210SJason Evans 1334a4bd5210SJason Evans if (ret == NULL) { 13351f0a49e8SJason Evans malloc_mutex_unlock(tsdn, &bin->lock); 1336b7eaed25SJason Evans return NULL; 1337a4bd5210SJason Evans } 1338a4bd5210SJason Evans 1339a4bd5210SJason Evans if (config_stats) { 1340a4bd5210SJason Evans bin->stats.nmalloc++; 1341a4bd5210SJason Evans bin->stats.nrequests++; 1342d0e79aa3SJason Evans bin->stats.curregs++; 1343a4bd5210SJason Evans } 13441f0a49e8SJason Evans malloc_mutex_unlock(tsdn, &bin->lock); 1345b7eaed25SJason Evans if (config_prof && arena_prof_accum(tsdn, arena, usize)) { 13461f0a49e8SJason Evans prof_idump(tsdn); 1347b7eaed25SJason Evans } 1348a4bd5210SJason Evans 1349d0e79aa3SJason Evans if (!zero) { 1350a4bd5210SJason Evans if (config_fill) { 1351d0e79aa3SJason Evans if (unlikely(opt_junk_alloc)) { 1352a4bd5210SJason Evans arena_alloc_junk_small(ret, 1353*0ef50b4eSJason Evans &bin_infos[binind], false); 1354b7eaed25SJason Evans } else if (unlikely(opt_zero)) { 1355df0d881dSJason Evans memset(ret, 0, usize); 1356a4bd5210SJason Evans } 1357b7eaed25SJason Evans } 1358a4bd5210SJason Evans } else { 1359d0e79aa3SJason Evans if (config_fill && unlikely(opt_junk_alloc)) { 1360*0ef50b4eSJason Evans arena_alloc_junk_small(ret, &bin_infos[binind], 1361a4bd5210SJason Evans true); 1362a4bd5210SJason Evans } 1363df0d881dSJason Evans memset(ret, 0, usize); 1364a4bd5210SJason Evans } 1365a4bd5210SJason Evans 13661f0a49e8SJason Evans arena_decay_tick(tsdn, arena); 1367b7eaed25SJason Evans return ret; 1368a4bd5210SJason Evans } 1369a4bd5210SJason Evans 1370df0d881dSJason Evans void * 13711f0a49e8SJason Evans arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, 1372b7eaed25SJason Evans bool zero) { 13731f0a49e8SJason Evans assert(!tsdn_null(tsdn) || arena != NULL); 13741f0a49e8SJason Evans 1375b7eaed25SJason Evans if (likely(!tsdn_null(tsdn))) { 13761f0a49e8SJason Evans arena = arena_choose(tsdn_tsd(tsdn), arena); 1377b7eaed25SJason Evans } 1378b7eaed25SJason Evans if (unlikely(arena == NULL)) { 1379b7eaed25SJason Evans return NULL; 1380df0d881dSJason Evans } 1381df0d881dSJason Evans 1382b7eaed25SJason Evans if (likely(size <= SMALL_MAXCLASS)) { 1383b7eaed25SJason Evans return arena_malloc_small(tsdn, arena, ind, zero); 1384a4bd5210SJason Evans } 1385b7eaed25SJason Evans return large_malloc(tsdn, arena, sz_index2size(ind), zero); 1386d0e79aa3SJason Evans } 1387d0e79aa3SJason Evans 1388d0e79aa3SJason Evans void * 13891f0a49e8SJason Evans arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, 1390b7eaed25SJason Evans bool zero, tcache_t *tcache) { 1391d0e79aa3SJason Evans void *ret; 1392d0e79aa3SJason Evans 1393d0e79aa3SJason Evans if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE 1394d0e79aa3SJason Evans && (usize & PAGE_MASK) == 0))) { 1395b7eaed25SJason Evans /* Small; alignment doesn't require special slab placement. */ 1396b7eaed25SJason Evans ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize), 1397b7eaed25SJason Evans zero, tcache, true); 1398d0e79aa3SJason Evans } else { 1399b7eaed25SJason Evans if (likely(alignment <= CACHELINE)) { 1400b7eaed25SJason Evans ret = large_malloc(tsdn, arena, usize, zero); 1401b7eaed25SJason Evans } else { 1402b7eaed25SJason Evans ret = large_palloc(tsdn, arena, usize, alignment, zero); 1403d0e79aa3SJason Evans } 1404a4bd5210SJason Evans } 1405b7eaed25SJason Evans return ret; 1406a4bd5210SJason Evans } 1407a4bd5210SJason Evans 1408a4bd5210SJason Evans void 1409b7eaed25SJason Evans arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize) { 14108ed34ab0SJason Evans cassert(config_prof); 1411a4bd5210SJason Evans assert(ptr != NULL); 1412b7eaed25SJason Evans assert(isalloc(tsdn, ptr) == LARGE_MINCLASS); 1413b7eaed25SJason Evans assert(usize <= SMALL_MAXCLASS); 1414a4bd5210SJason Evans 1415b7eaed25SJason Evans rtree_ctx_t rtree_ctx_fallback; 1416b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 1417a4bd5210SJason Evans 1418b7eaed25SJason Evans extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, 1419b7eaed25SJason Evans (uintptr_t)ptr, true); 1420b7eaed25SJason Evans arena_t *arena = extent_arena_get(extent); 1421b7eaed25SJason Evans 1422b7eaed25SJason Evans szind_t szind = sz_size2index(usize); 1423b7eaed25SJason Evans extent_szind_set(extent, szind); 1424b7eaed25SJason Evans rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, 1425b7eaed25SJason Evans szind, false); 1426b7eaed25SJason Evans 1427b7eaed25SJason Evans prof_accum_cancel(tsdn, &arena->prof_accum, usize); 1428b7eaed25SJason Evans 1429b7eaed25SJason Evans assert(isalloc(tsdn, ptr) == usize); 1430b7eaed25SJason Evans } 1431b7eaed25SJason Evans 1432b7eaed25SJason Evans static size_t 1433b7eaed25SJason Evans arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) { 1434b7eaed25SJason Evans cassert(config_prof); 1435b7eaed25SJason Evans assert(ptr != NULL); 1436b7eaed25SJason Evans 1437b7eaed25SJason Evans extent_szind_set(extent, NBINS); 1438b7eaed25SJason Evans rtree_ctx_t rtree_ctx_fallback; 1439b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 1440b7eaed25SJason Evans rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, 1441b7eaed25SJason Evans NBINS, false); 1442b7eaed25SJason Evans 1443b7eaed25SJason Evans assert(isalloc(tsdn, ptr) == LARGE_MINCLASS); 1444b7eaed25SJason Evans 1445b7eaed25SJason Evans return LARGE_MINCLASS; 1446b7eaed25SJason Evans } 1447b7eaed25SJason Evans 1448b7eaed25SJason Evans void 1449b7eaed25SJason Evans arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache, 1450b7eaed25SJason Evans bool slow_path) { 1451b7eaed25SJason Evans cassert(config_prof); 1452b7eaed25SJason Evans assert(opt_prof); 1453b7eaed25SJason Evans 1454b7eaed25SJason Evans extent_t *extent = iealloc(tsdn, ptr); 1455b7eaed25SJason Evans size_t usize = arena_prof_demote(tsdn, extent, ptr); 1456b7eaed25SJason Evans if (usize <= tcache_maxclass) { 1457b7eaed25SJason Evans tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, 1458b7eaed25SJason Evans sz_size2index(usize), slow_path); 1459b7eaed25SJason Evans } else { 1460b7eaed25SJason Evans large_dalloc(tsdn, extent); 1461b7eaed25SJason Evans } 1462a4bd5210SJason Evans } 1463a4bd5210SJason Evans 1464a4bd5210SJason Evans static void 1465*0ef50b4eSJason Evans arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, bin_t *bin) { 1466b7eaed25SJason Evans /* Dissociate slab from bin. */ 1467b7eaed25SJason Evans if (slab == bin->slabcur) { 1468b7eaed25SJason Evans bin->slabcur = NULL; 1469b7eaed25SJason Evans } else { 1470b7eaed25SJason Evans szind_t binind = extent_szind_get(slab); 1471*0ef50b4eSJason Evans const bin_info_t *bin_info = &bin_infos[binind]; 1472a4bd5210SJason Evans 1473a4bd5210SJason Evans /* 14741f0a49e8SJason Evans * The following block's conditional is necessary because if the 1475b7eaed25SJason Evans * slab only contains one region, then it never gets inserted 1476b7eaed25SJason Evans * into the non-full slabs heap. 1477a4bd5210SJason Evans */ 1478b7eaed25SJason Evans if (bin_info->nregs == 1) { 1479b7eaed25SJason Evans arena_bin_slabs_full_remove(arena, bin, slab); 1480b7eaed25SJason Evans } else { 1481b7eaed25SJason Evans arena_bin_slabs_nonfull_remove(bin, slab); 1482a4bd5210SJason Evans } 1483a4bd5210SJason Evans } 1484a4bd5210SJason Evans } 1485a4bd5210SJason Evans 1486a4bd5210SJason Evans static void 1487b7eaed25SJason Evans arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, 1488*0ef50b4eSJason Evans bin_t *bin) { 1489b7eaed25SJason Evans assert(slab != bin->slabcur); 1490a4bd5210SJason Evans 14911f0a49e8SJason Evans malloc_mutex_unlock(tsdn, &bin->lock); 1492a4bd5210SJason Evans /******************************/ 1493b7eaed25SJason Evans arena_slab_dalloc(tsdn, arena, slab); 1494a4bd5210SJason Evans /****************************/ 14951f0a49e8SJason Evans malloc_mutex_lock(tsdn, &bin->lock); 1496b7eaed25SJason Evans if (config_stats) { 1497b7eaed25SJason Evans bin->stats.curslabs--; 1498b7eaed25SJason Evans } 1499a4bd5210SJason Evans } 1500a4bd5210SJason Evans 1501a4bd5210SJason Evans static void 1502*0ef50b4eSJason Evans arena_bin_lower_slab(UNUSED tsdn_t *tsdn, arena_t *arena, extent_t *slab, 1503*0ef50b4eSJason Evans bin_t *bin) { 1504b7eaed25SJason Evans assert(extent_nfree_get(slab) > 0); 1505a4bd5210SJason Evans 1506a4bd5210SJason Evans /* 1507b7eaed25SJason Evans * Make sure that if bin->slabcur is non-NULL, it refers to the 1508b7eaed25SJason Evans * oldest/lowest non-full slab. It is okay to NULL slabcur out rather 15097fa7f12fSJason Evans * than proactively keeping it pointing at the oldest/lowest non-full 1510b7eaed25SJason Evans * slab. 1511a4bd5210SJason Evans */ 1512b7eaed25SJason Evans if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) { 1513b7eaed25SJason Evans /* Switch slabcur. */ 1514b7eaed25SJason Evans if (extent_nfree_get(bin->slabcur) > 0) { 1515b7eaed25SJason Evans arena_bin_slabs_nonfull_insert(bin, bin->slabcur); 1516b7eaed25SJason Evans } else { 1517b7eaed25SJason Evans arena_bin_slabs_full_insert(arena, bin, bin->slabcur); 1518b7eaed25SJason Evans } 1519b7eaed25SJason Evans bin->slabcur = slab; 1520b7eaed25SJason Evans if (config_stats) { 1521b7eaed25SJason Evans bin->stats.reslabs++; 1522b7eaed25SJason Evans } 1523b7eaed25SJason Evans } else { 1524b7eaed25SJason Evans arena_bin_slabs_nonfull_insert(bin, slab); 1525b7eaed25SJason Evans } 1526a4bd5210SJason Evans } 1527a4bd5210SJason Evans 1528d0e79aa3SJason Evans static void 1529b7eaed25SJason Evans arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab, 1530b7eaed25SJason Evans void *ptr, bool junked) { 1531b7eaed25SJason Evans arena_slab_data_t *slab_data = extent_slab_data_get(slab); 1532b7eaed25SJason Evans szind_t binind = extent_szind_get(slab); 1533*0ef50b4eSJason Evans bin_t *bin = &arena->bins[binind]; 1534*0ef50b4eSJason Evans const bin_info_t *bin_info = &bin_infos[binind]; 1535a4bd5210SJason Evans 1536b7eaed25SJason Evans if (!junked && config_fill && unlikely(opt_junk_free)) { 1537a4bd5210SJason Evans arena_dalloc_junk_small(ptr, bin_info); 1538b7eaed25SJason Evans } 1539a4bd5210SJason Evans 1540*0ef50b4eSJason Evans arena_slab_reg_dalloc(slab, slab_data, ptr); 1541b7eaed25SJason Evans unsigned nfree = extent_nfree_get(slab); 1542b7eaed25SJason Evans if (nfree == bin_info->nregs) { 1543b7eaed25SJason Evans arena_dissociate_bin_slab(arena, slab, bin); 1544b7eaed25SJason Evans arena_dalloc_bin_slab(tsdn, arena, slab, bin); 1545b7eaed25SJason Evans } else if (nfree == 1 && slab != bin->slabcur) { 1546b7eaed25SJason Evans arena_bin_slabs_full_remove(arena, bin, slab); 1547b7eaed25SJason Evans arena_bin_lower_slab(tsdn, arena, slab, bin); 1548b7eaed25SJason Evans } 1549a4bd5210SJason Evans 1550a4bd5210SJason Evans if (config_stats) { 1551a4bd5210SJason Evans bin->stats.ndalloc++; 1552d0e79aa3SJason Evans bin->stats.curregs--; 1553a4bd5210SJason Evans } 1554a4bd5210SJason Evans } 1555a4bd5210SJason Evans 1556a4bd5210SJason Evans void 1557b7eaed25SJason Evans arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, extent_t *extent, 1558b7eaed25SJason Evans void *ptr) { 1559b7eaed25SJason Evans arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, true); 1560d0e79aa3SJason Evans } 1561d0e79aa3SJason Evans 1562b7eaed25SJason Evans static void 1563b7eaed25SJason Evans arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) { 1564b7eaed25SJason Evans szind_t binind = extent_szind_get(extent); 1565*0ef50b4eSJason Evans bin_t *bin = &arena->bins[binind]; 1566e722f8f8SJason Evans 15671f0a49e8SJason Evans malloc_mutex_lock(tsdn, &bin->lock); 1568b7eaed25SJason Evans arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, false); 15691f0a49e8SJason Evans malloc_mutex_unlock(tsdn, &bin->lock); 1570e722f8f8SJason Evans } 1571e722f8f8SJason Evans 1572e722f8f8SJason Evans void 1573b7eaed25SJason Evans arena_dalloc_small(tsdn_t *tsdn, void *ptr) { 1574b7eaed25SJason Evans extent_t *extent = iealloc(tsdn, ptr); 1575b7eaed25SJason Evans arena_t *arena = extent_arena_get(extent); 1576e722f8f8SJason Evans 1577b7eaed25SJason Evans arena_dalloc_bin(tsdn, arena, extent, ptr); 15781f0a49e8SJason Evans arena_decay_tick(tsdn, arena); 1579e722f8f8SJason Evans } 1580a4bd5210SJason Evans 1581f921d10fSJason Evans bool 15821f0a49e8SJason Evans arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, 1583b7eaed25SJason Evans size_t extra, bool zero) { 1584df0d881dSJason Evans /* Calls with non-zero extra had to clamp extra. */ 1585b7eaed25SJason Evans assert(extra == 0 || size + extra <= LARGE_MAXCLASS); 1586df0d881dSJason Evans 1587b7eaed25SJason Evans if (unlikely(size > LARGE_MAXCLASS)) { 1588b7eaed25SJason Evans return true; 1589b7eaed25SJason Evans } 1590df0d881dSJason Evans 1591b7eaed25SJason Evans extent_t *extent = iealloc(tsdn, ptr); 1592b7eaed25SJason Evans size_t usize_min = sz_s2u(size); 1593b7eaed25SJason Evans size_t usize_max = sz_s2u(size + extra); 1594b7eaed25SJason Evans if (likely(oldsize <= SMALL_MAXCLASS && usize_min <= SMALL_MAXCLASS)) { 1595a4bd5210SJason Evans /* 1596d0e79aa3SJason Evans * Avoid moving the allocation if the size class can be left the 1597d0e79aa3SJason Evans * same. 1598a4bd5210SJason Evans */ 1599*0ef50b4eSJason Evans assert(bin_infos[sz_size2index(oldsize)].reg_size == 1600536b3538SJason Evans oldsize); 1601b7eaed25SJason Evans if ((usize_max > SMALL_MAXCLASS || sz_size2index(usize_max) != 1602b7eaed25SJason Evans sz_size2index(oldsize)) && (size > oldsize || usize_max < 1603b7eaed25SJason Evans oldsize)) { 1604b7eaed25SJason Evans return true; 1605df0d881dSJason Evans } 1606df0d881dSJason Evans 1607b7eaed25SJason Evans arena_decay_tick(tsdn, extent_arena_get(extent)); 1608b7eaed25SJason Evans return false; 1609b7eaed25SJason Evans } else if (oldsize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS) { 1610b7eaed25SJason Evans return large_ralloc_no_move(tsdn, extent, usize_min, usize_max, 1611b7eaed25SJason Evans zero); 1612536b3538SJason Evans } 1613b7eaed25SJason Evans 1614b7eaed25SJason Evans return true; 1615536b3538SJason Evans } 1616536b3538SJason Evans 1617536b3538SJason Evans static void * 16181f0a49e8SJason Evans arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, 1619b7eaed25SJason Evans size_t alignment, bool zero, tcache_t *tcache) { 1620b7eaed25SJason Evans if (alignment == 0) { 1621b7eaed25SJason Evans return arena_malloc(tsdn, arena, usize, sz_size2index(usize), 1622b7eaed25SJason Evans zero, tcache, true); 1623b7eaed25SJason Evans } 1624b7eaed25SJason Evans usize = sz_sa2u(usize, alignment); 1625b7eaed25SJason Evans if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { 1626b7eaed25SJason Evans return NULL; 1627b7eaed25SJason Evans } 1628b7eaed25SJason Evans return ipalloct(tsdn, usize, alignment, zero, tcache, arena); 1629a4bd5210SJason Evans } 1630a4bd5210SJason Evans 1631a4bd5210SJason Evans void * 1632b7eaed25SJason Evans arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize, 1633b7eaed25SJason Evans size_t size, size_t alignment, bool zero, tcache_t *tcache) { 1634b7eaed25SJason Evans size_t usize = sz_s2u(size); 1635b7eaed25SJason Evans if (unlikely(usize == 0 || size > LARGE_MAXCLASS)) { 1636b7eaed25SJason Evans return NULL; 1637b7eaed25SJason Evans } 1638d0e79aa3SJason Evans 1639b7eaed25SJason Evans if (likely(usize <= SMALL_MAXCLASS)) { 1640a4bd5210SJason Evans /* Try to avoid moving the allocation. */ 1641b7eaed25SJason Evans if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero)) { 1642b7eaed25SJason Evans return ptr; 1643b7eaed25SJason Evans } 1644b7eaed25SJason Evans } 1645b7eaed25SJason Evans 1646b7eaed25SJason Evans if (oldsize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS) { 1647b7eaed25SJason Evans return large_ralloc(tsdn, arena, iealloc(tsdn, ptr), usize, 1648b7eaed25SJason Evans alignment, zero, tcache); 1649b7eaed25SJason Evans } 1650a4bd5210SJason Evans 1651a4bd5210SJason Evans /* 1652b7eaed25SJason Evans * size and oldsize are different enough that we need to move the 1653b7eaed25SJason Evans * object. In that case, fall back to allocating new space and copying. 1654a4bd5210SJason Evans */ 1655b7eaed25SJason Evans void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment, 1656b7eaed25SJason Evans zero, tcache); 1657b7eaed25SJason Evans if (ret == NULL) { 1658b7eaed25SJason Evans return NULL; 1659b7eaed25SJason Evans } 1660a4bd5210SJason Evans 1661d0e79aa3SJason Evans /* 1662d0e79aa3SJason Evans * Junk/zero-filling were already done by 1663d0e79aa3SJason Evans * ipalloc()/arena_malloc(). 1664d0e79aa3SJason Evans */ 1665a4bd5210SJason Evans 1666b7eaed25SJason Evans size_t copysize = (usize < oldsize) ? usize : oldsize; 1667a4bd5210SJason Evans memcpy(ret, ptr, copysize); 1668b7eaed25SJason Evans isdalloct(tsdn, ptr, oldsize, tcache, NULL, true); 1669b7eaed25SJason Evans return ret; 1670a4bd5210SJason Evans } 1671a4bd5210SJason Evans 167282872ac0SJason Evans dss_prec_t 1673b7eaed25SJason Evans arena_dss_prec_get(arena_t *arena) { 1674b7eaed25SJason Evans return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE); 167582872ac0SJason Evans } 167682872ac0SJason Evans 1677d0e79aa3SJason Evans bool 1678b7eaed25SJason Evans arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) { 1679b7eaed25SJason Evans if (!have_dss) { 1680d0e79aa3SJason Evans return (dss_prec != dss_prec_disabled); 1681b7eaed25SJason Evans } 1682b7eaed25SJason Evans atomic_store_u(&arena->dss_prec, (unsigned)dss_prec, ATOMIC_RELEASE); 1683b7eaed25SJason Evans return false; 1684d0e79aa3SJason Evans } 1685d0e79aa3SJason Evans 1686d0e79aa3SJason Evans ssize_t 1687b7eaed25SJason Evans arena_dirty_decay_ms_default_get(void) { 1688b7eaed25SJason Evans return atomic_load_zd(&dirty_decay_ms_default, ATOMIC_RELAXED); 1689d0e79aa3SJason Evans } 1690d0e79aa3SJason Evans 1691d0e79aa3SJason Evans bool 1692b7eaed25SJason Evans arena_dirty_decay_ms_default_set(ssize_t decay_ms) { 1693b7eaed25SJason Evans if (!arena_decay_ms_valid(decay_ms)) { 1694b7eaed25SJason Evans return true; 1695b7eaed25SJason Evans } 1696b7eaed25SJason Evans atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED); 1697b7eaed25SJason Evans return false; 169882872ac0SJason Evans } 169982872ac0SJason Evans 1700df0d881dSJason Evans ssize_t 1701b7eaed25SJason Evans arena_muzzy_decay_ms_default_get(void) { 1702b7eaed25SJason Evans return atomic_load_zd(&muzzy_decay_ms_default, ATOMIC_RELAXED); 1703df0d881dSJason Evans } 1704df0d881dSJason Evans 1705df0d881dSJason Evans bool 1706b7eaed25SJason Evans arena_muzzy_decay_ms_default_set(ssize_t decay_ms) { 1707b7eaed25SJason Evans if (!arena_decay_ms_valid(decay_ms)) { 1708b7eaed25SJason Evans return true; 1709df0d881dSJason Evans } 1710b7eaed25SJason Evans atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED); 1711b7eaed25SJason Evans return false; 171282872ac0SJason Evans } 171382872ac0SJason Evans 1714*0ef50b4eSJason Evans bool 1715*0ef50b4eSJason Evans arena_retain_grow_limit_get_set(tsd_t *tsd, arena_t *arena, size_t *old_limit, 1716*0ef50b4eSJason Evans size_t *new_limit) { 1717*0ef50b4eSJason Evans assert(opt_retain); 1718*0ef50b4eSJason Evans 1719*0ef50b4eSJason Evans pszind_t new_ind JEMALLOC_CC_SILENCE_INIT(0); 1720*0ef50b4eSJason Evans if (new_limit != NULL) { 1721*0ef50b4eSJason Evans size_t limit = *new_limit; 1722*0ef50b4eSJason Evans /* Grow no more than the new limit. */ 1723*0ef50b4eSJason Evans if ((new_ind = sz_psz2ind(limit + 1) - 1) > 1724*0ef50b4eSJason Evans EXTENT_GROW_MAX_PIND) { 1725*0ef50b4eSJason Evans return true; 1726*0ef50b4eSJason Evans } 1727*0ef50b4eSJason Evans } 1728*0ef50b4eSJason Evans 1729*0ef50b4eSJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &arena->extent_grow_mtx); 1730*0ef50b4eSJason Evans if (old_limit != NULL) { 1731*0ef50b4eSJason Evans *old_limit = sz_pind2sz(arena->retain_grow_limit); 1732*0ef50b4eSJason Evans } 1733*0ef50b4eSJason Evans if (new_limit != NULL) { 1734*0ef50b4eSJason Evans arena->retain_grow_limit = new_ind; 1735*0ef50b4eSJason Evans } 1736*0ef50b4eSJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &arena->extent_grow_mtx); 1737*0ef50b4eSJason Evans 1738*0ef50b4eSJason Evans return false; 1739*0ef50b4eSJason Evans } 1740*0ef50b4eSJason Evans 1741df0d881dSJason Evans unsigned 1742b7eaed25SJason Evans arena_nthreads_get(arena_t *arena, bool internal) { 1743b7eaed25SJason Evans return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXED); 1744df0d881dSJason Evans } 1745df0d881dSJason Evans 1746df0d881dSJason Evans void 1747b7eaed25SJason Evans arena_nthreads_inc(arena_t *arena, bool internal) { 1748b7eaed25SJason Evans atomic_fetch_add_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED); 1749df0d881dSJason Evans } 1750df0d881dSJason Evans 1751df0d881dSJason Evans void 1752b7eaed25SJason Evans arena_nthreads_dec(arena_t *arena, bool internal) { 1753b7eaed25SJason Evans atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED); 1754df0d881dSJason Evans } 1755df0d881dSJason Evans 17567fa7f12fSJason Evans size_t 1757b7eaed25SJason Evans arena_extent_sn_next(arena_t *arena) { 1758b7eaed25SJason Evans return atomic_fetch_add_zu(&arena->extent_sn_next, 1, ATOMIC_RELAXED); 17597fa7f12fSJason Evans } 17607fa7f12fSJason Evans 1761d0e79aa3SJason Evans arena_t * 1762b7eaed25SJason Evans arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { 1763d0e79aa3SJason Evans arena_t *arena; 1764b7eaed25SJason Evans base_t *base; 1765a4bd5210SJason Evans unsigned i; 1766a4bd5210SJason Evans 1767b7eaed25SJason Evans if (ind == 0) { 1768b7eaed25SJason Evans base = b0get(); 1769b7eaed25SJason Evans } else { 1770b7eaed25SJason Evans base = base_new(tsdn, ind, extent_hooks); 1771b7eaed25SJason Evans if (base == NULL) { 1772b7eaed25SJason Evans return NULL; 1773b7eaed25SJason Evans } 1774a4bd5210SJason Evans } 1775a4bd5210SJason Evans 1776b7eaed25SJason Evans arena = (arena_t *)base_alloc(tsdn, base, sizeof(arena_t), CACHELINE); 1777b7eaed25SJason Evans if (arena == NULL) { 1778b7eaed25SJason Evans goto label_error; 1779b7eaed25SJason Evans } 1780b7eaed25SJason Evans 1781b7eaed25SJason Evans atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED); 1782b7eaed25SJason Evans atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED); 1783b7eaed25SJason Evans arena->last_thd = NULL; 1784b7eaed25SJason Evans 1785b7eaed25SJason Evans if (config_stats) { 1786b7eaed25SJason Evans if (arena_stats_init(tsdn, &arena->stats)) { 1787b7eaed25SJason Evans goto label_error; 1788b7eaed25SJason Evans } 1789b7eaed25SJason Evans 1790b7eaed25SJason Evans ql_new(&arena->tcache_ql); 1791*0ef50b4eSJason Evans ql_new(&arena->cache_bin_array_descriptor_ql); 1792b7eaed25SJason Evans if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql", 1793b7eaed25SJason Evans WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) { 1794b7eaed25SJason Evans goto label_error; 1795b7eaed25SJason Evans } 1796b7eaed25SJason Evans } 1797b7eaed25SJason Evans 1798b7eaed25SJason Evans if (config_prof) { 1799b7eaed25SJason Evans if (prof_accum_init(tsdn, &arena->prof_accum)) { 1800b7eaed25SJason Evans goto label_error; 1801b7eaed25SJason Evans } 1802b7eaed25SJason Evans } 1803a4bd5210SJason Evans 1804d0e79aa3SJason Evans if (config_cache_oblivious) { 1805d0e79aa3SJason Evans /* 1806d0e79aa3SJason Evans * A nondeterministic seed based on the address of arena reduces 1807d0e79aa3SJason Evans * the likelihood of lockstep non-uniform cache index 1808d0e79aa3SJason Evans * utilization among identical concurrent processes, but at the 1809d0e79aa3SJason Evans * cost of test repeatability. For debug builds, instead use a 1810d0e79aa3SJason Evans * deterministic seed. 1811d0e79aa3SJason Evans */ 1812b7eaed25SJason Evans atomic_store_zu(&arena->offset_state, config_debug ? ind : 1813b7eaed25SJason Evans (size_t)(uintptr_t)arena, ATOMIC_RELAXED); 1814d0e79aa3SJason Evans } 1815d0e79aa3SJason Evans 1816b7eaed25SJason Evans atomic_store_zu(&arena->extent_sn_next, 0, ATOMIC_RELAXED); 18171f0a49e8SJason Evans 1818b7eaed25SJason Evans atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(), 1819b7eaed25SJason Evans ATOMIC_RELAXED); 182082872ac0SJason Evans 1821b7eaed25SJason Evans atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED); 18227fa7f12fSJason Evans 1823b7eaed25SJason Evans extent_list_init(&arena->large); 1824b7eaed25SJason Evans if (malloc_mutex_init(&arena->large_mtx, "arena_large", 1825b7eaed25SJason Evans WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) { 1826b7eaed25SJason Evans goto label_error; 1827b7eaed25SJason Evans } 1828a4bd5210SJason Evans 1829b7eaed25SJason Evans /* 1830b7eaed25SJason Evans * Delay coalescing for dirty extents despite the disruptive effect on 1831b7eaed25SJason Evans * memory layout for best-fit extent allocation, since cached extents 1832b7eaed25SJason Evans * are likely to be reused soon after deallocation, and the cost of 1833b7eaed25SJason Evans * merging/splitting extents is non-trivial. 1834b7eaed25SJason Evans */ 1835b7eaed25SJason Evans if (extents_init(tsdn, &arena->extents_dirty, extent_state_dirty, 1836b7eaed25SJason Evans true)) { 1837b7eaed25SJason Evans goto label_error; 1838b7eaed25SJason Evans } 1839b7eaed25SJason Evans /* 1840b7eaed25SJason Evans * Coalesce muzzy extents immediately, because operations on them are in 1841b7eaed25SJason Evans * the critical path much less often than for dirty extents. 1842b7eaed25SJason Evans */ 1843b7eaed25SJason Evans if (extents_init(tsdn, &arena->extents_muzzy, extent_state_muzzy, 1844b7eaed25SJason Evans false)) { 1845b7eaed25SJason Evans goto label_error; 1846b7eaed25SJason Evans } 1847b7eaed25SJason Evans /* 1848b7eaed25SJason Evans * Coalesce retained extents immediately, in part because they will 1849b7eaed25SJason Evans * never be evicted (and therefore there's no opportunity for delayed 1850b7eaed25SJason Evans * coalescing), but also because operations on retained extents are not 1851b7eaed25SJason Evans * in the critical path. 1852b7eaed25SJason Evans */ 1853b7eaed25SJason Evans if (extents_init(tsdn, &arena->extents_retained, extent_state_retained, 1854b7eaed25SJason Evans false)) { 1855b7eaed25SJason Evans goto label_error; 1856b7eaed25SJason Evans } 1857a4bd5210SJason Evans 1858*0ef50b4eSJason Evans if (arena_decay_init(&arena->decay_dirty, 1859b7eaed25SJason Evans arena_dirty_decay_ms_default_get(), &arena->stats.decay_dirty)) { 1860b7eaed25SJason Evans goto label_error; 1861b7eaed25SJason Evans } 1862*0ef50b4eSJason Evans if (arena_decay_init(&arena->decay_muzzy, 1863b7eaed25SJason Evans arena_muzzy_decay_ms_default_get(), &arena->stats.decay_muzzy)) { 1864b7eaed25SJason Evans goto label_error; 1865b7eaed25SJason Evans } 1866bde95144SJason Evans 1867b7eaed25SJason Evans arena->extent_grow_next = sz_psz2ind(HUGEPAGE); 1868*0ef50b4eSJason Evans arena->retain_grow_limit = EXTENT_GROW_MAX_PIND; 1869b7eaed25SJason Evans if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow", 1870b7eaed25SJason Evans WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) { 1871b7eaed25SJason Evans goto label_error; 1872b7eaed25SJason Evans } 1873d0e79aa3SJason Evans 1874b7eaed25SJason Evans extent_avail_new(&arena->extent_avail); 1875b7eaed25SJason Evans if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail", 1876b7eaed25SJason Evans WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) { 1877b7eaed25SJason Evans goto label_error; 1878b7eaed25SJason Evans } 1879a4bd5210SJason Evans 1880a4bd5210SJason Evans /* Initialize bins. */ 1881a4bd5210SJason Evans for (i = 0; i < NBINS; i++) { 1882*0ef50b4eSJason Evans bool err = bin_init(&arena->bins[i]); 1883*0ef50b4eSJason Evans if (err) { 1884b7eaed25SJason Evans goto label_error; 1885b7eaed25SJason Evans } 1886a4bd5210SJason Evans } 1887a4bd5210SJason Evans 1888b7eaed25SJason Evans arena->base = base; 1889b7eaed25SJason Evans /* Set arena before creating background threads. */ 1890b7eaed25SJason Evans arena_set(ind, arena); 1891b7eaed25SJason Evans 1892b7eaed25SJason Evans nstime_init(&arena->create_time, 0); 1893b7eaed25SJason Evans nstime_update(&arena->create_time); 1894b7eaed25SJason Evans 1895b7eaed25SJason Evans /* We don't support reentrancy for arena 0 bootstrapping. */ 1896b7eaed25SJason Evans if (ind != 0) { 1897a4bd5210SJason Evans /* 1898b7eaed25SJason Evans * If we're here, then arena 0 already exists, so bootstrapping 1899b7eaed25SJason Evans * is done enough that we should have tsd. 1900a4bd5210SJason Evans */ 1901b7eaed25SJason Evans assert(!tsdn_null(tsdn)); 19028b2f5aafSJason Evans pre_reentrancy(tsdn_tsd(tsdn), arena); 1903b7eaed25SJason Evans if (hooks_arena_new_hook) { 1904b7eaed25SJason Evans hooks_arena_new_hook(); 1905a4bd5210SJason Evans } 1906b7eaed25SJason Evans post_reentrancy(tsdn_tsd(tsdn)); 1907d0e79aa3SJason Evans } 1908d0e79aa3SJason Evans 1909b7eaed25SJason Evans return arena; 19108244f2aaSJason Evans label_error: 1911b7eaed25SJason Evans if (ind != 0) { 19128b2f5aafSJason Evans base_delete(tsdn, base); 1913b7eaed25SJason Evans } 1914b7eaed25SJason Evans return NULL; 19158244f2aaSJason Evans } 19168244f2aaSJason Evans 1917bde95144SJason Evans void 1918b7eaed25SJason Evans arena_boot(void) { 1919b7eaed25SJason Evans arena_dirty_decay_ms_default_set(opt_dirty_decay_ms); 1920b7eaed25SJason Evans arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms); 1921*0ef50b4eSJason Evans #define REGIND_bin_yes(index, reg_size) \ 1922*0ef50b4eSJason Evans div_init(&arena_binind_div_info[(index)], (reg_size)); 1923*0ef50b4eSJason Evans #define REGIND_bin_no(index, reg_size) 1924*0ef50b4eSJason Evans #define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \ 1925*0ef50b4eSJason Evans lg_delta_lookup) \ 1926*0ef50b4eSJason Evans REGIND_bin_##bin(index, (1U<<lg_grp) + (ndelta << lg_delta)) 1927*0ef50b4eSJason Evans SIZE_CLASSES 1928*0ef50b4eSJason Evans #undef REGIND_bin_yes 1929*0ef50b4eSJason Evans #undef REGIND_bin_no 1930*0ef50b4eSJason Evans #undef SC 1931a4bd5210SJason Evans } 1932a4bd5210SJason Evans 1933a4bd5210SJason Evans void 1934b7eaed25SJason Evans arena_prefork0(tsdn_t *tsdn, arena_t *arena) { 1935b7eaed25SJason Evans malloc_mutex_prefork(tsdn, &arena->decay_dirty.mtx); 1936b7eaed25SJason Evans malloc_mutex_prefork(tsdn, &arena->decay_muzzy.mtx); 1937a4bd5210SJason Evans } 1938a4bd5210SJason Evans 1939a4bd5210SJason Evans void 1940b7eaed25SJason Evans arena_prefork1(tsdn_t *tsdn, arena_t *arena) { 1941b7eaed25SJason Evans if (config_stats) { 1942b7eaed25SJason Evans malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx); 1943b7eaed25SJason Evans } 1944a4bd5210SJason Evans } 1945a4bd5210SJason Evans 1946a4bd5210SJason Evans void 1947b7eaed25SJason Evans arena_prefork2(tsdn_t *tsdn, arena_t *arena) { 19488b2f5aafSJason Evans malloc_mutex_prefork(tsdn, &arena->extent_grow_mtx); 19498b2f5aafSJason Evans } 19508b2f5aafSJason Evans 19518b2f5aafSJason Evans void 19528b2f5aafSJason Evans arena_prefork3(tsdn_t *tsdn, arena_t *arena) { 1953b7eaed25SJason Evans extents_prefork(tsdn, &arena->extents_dirty); 1954b7eaed25SJason Evans extents_prefork(tsdn, &arena->extents_muzzy); 1955b7eaed25SJason Evans extents_prefork(tsdn, &arena->extents_retained); 19561f0a49e8SJason Evans } 19571f0a49e8SJason Evans 19581f0a49e8SJason Evans void 19598b2f5aafSJason Evans arena_prefork4(tsdn_t *tsdn, arena_t *arena) { 1960b7eaed25SJason Evans malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx); 1961b7eaed25SJason Evans } 1962a4bd5210SJason Evans 1963b7eaed25SJason Evans void 19648b2f5aafSJason Evans arena_prefork5(tsdn_t *tsdn, arena_t *arena) { 1965b7eaed25SJason Evans base_prefork(tsdn, arena->base); 1966b7eaed25SJason Evans } 1967b7eaed25SJason Evans 1968b7eaed25SJason Evans void 19698b2f5aafSJason Evans arena_prefork6(tsdn_t *tsdn, arena_t *arena) { 1970b7eaed25SJason Evans malloc_mutex_prefork(tsdn, &arena->large_mtx); 1971b7eaed25SJason Evans } 1972b7eaed25SJason Evans 1973b7eaed25SJason Evans void 19748b2f5aafSJason Evans arena_prefork7(tsdn_t *tsdn, arena_t *arena) { 1975b7eaed25SJason Evans for (unsigned i = 0; i < NBINS; i++) { 1976*0ef50b4eSJason Evans bin_prefork(tsdn, &arena->bins[i]); 1977b7eaed25SJason Evans } 19781f0a49e8SJason Evans } 19791f0a49e8SJason Evans 19801f0a49e8SJason Evans void 1981b7eaed25SJason Evans arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) { 19821f0a49e8SJason Evans unsigned i; 19831f0a49e8SJason Evans 1984b7eaed25SJason Evans for (i = 0; i < NBINS; i++) { 1985*0ef50b4eSJason Evans bin_postfork_parent(tsdn, &arena->bins[i]); 1986b7eaed25SJason Evans } 1987b7eaed25SJason Evans malloc_mutex_postfork_parent(tsdn, &arena->large_mtx); 1988b7eaed25SJason Evans base_postfork_parent(tsdn, arena->base); 1989b7eaed25SJason Evans malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx); 1990b7eaed25SJason Evans extents_postfork_parent(tsdn, &arena->extents_dirty); 1991b7eaed25SJason Evans extents_postfork_parent(tsdn, &arena->extents_muzzy); 1992b7eaed25SJason Evans extents_postfork_parent(tsdn, &arena->extents_retained); 19938b2f5aafSJason Evans malloc_mutex_postfork_parent(tsdn, &arena->extent_grow_mtx); 1994b7eaed25SJason Evans malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx); 1995b7eaed25SJason Evans malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx); 1996b7eaed25SJason Evans if (config_stats) { 1997b7eaed25SJason Evans malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx); 1998b7eaed25SJason Evans } 19991f0a49e8SJason Evans } 20001f0a49e8SJason Evans 20011f0a49e8SJason Evans void 2002b7eaed25SJason Evans arena_postfork_child(tsdn_t *tsdn, arena_t *arena) { 20031f0a49e8SJason Evans unsigned i; 20041f0a49e8SJason Evans 20058b2f5aafSJason Evans atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED); 20068b2f5aafSJason Evans atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED); 20078b2f5aafSJason Evans if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) { 20088b2f5aafSJason Evans arena_nthreads_inc(arena, false); 20098b2f5aafSJason Evans } 20108b2f5aafSJason Evans if (tsd_iarena_get(tsdn_tsd(tsdn)) == arena) { 20118b2f5aafSJason Evans arena_nthreads_inc(arena, true); 20128b2f5aafSJason Evans } 20138b2f5aafSJason Evans if (config_stats) { 20148b2f5aafSJason Evans ql_new(&arena->tcache_ql); 2015*0ef50b4eSJason Evans ql_new(&arena->cache_bin_array_descriptor_ql); 20168b2f5aafSJason Evans tcache_t *tcache = tcache_get(tsdn_tsd(tsdn)); 20178b2f5aafSJason Evans if (tcache != NULL && tcache->arena == arena) { 20188b2f5aafSJason Evans ql_elm_new(tcache, link); 20198b2f5aafSJason Evans ql_tail_insert(&arena->tcache_ql, tcache, link); 2020*0ef50b4eSJason Evans cache_bin_array_descriptor_init( 2021*0ef50b4eSJason Evans &tcache->cache_bin_array_descriptor, 2022*0ef50b4eSJason Evans tcache->bins_small, tcache->bins_large); 2023*0ef50b4eSJason Evans ql_tail_insert(&arena->cache_bin_array_descriptor_ql, 2024*0ef50b4eSJason Evans &tcache->cache_bin_array_descriptor, link); 20258b2f5aafSJason Evans } 20268b2f5aafSJason Evans } 20278b2f5aafSJason Evans 2028b7eaed25SJason Evans for (i = 0; i < NBINS; i++) { 2029*0ef50b4eSJason Evans bin_postfork_child(tsdn, &arena->bins[i]); 2030b7eaed25SJason Evans } 2031b7eaed25SJason Evans malloc_mutex_postfork_child(tsdn, &arena->large_mtx); 2032b7eaed25SJason Evans base_postfork_child(tsdn, arena->base); 2033b7eaed25SJason Evans malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx); 2034b7eaed25SJason Evans extents_postfork_child(tsdn, &arena->extents_dirty); 2035b7eaed25SJason Evans extents_postfork_child(tsdn, &arena->extents_muzzy); 2036b7eaed25SJason Evans extents_postfork_child(tsdn, &arena->extents_retained); 20378b2f5aafSJason Evans malloc_mutex_postfork_child(tsdn, &arena->extent_grow_mtx); 2038b7eaed25SJason Evans malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx); 2039b7eaed25SJason Evans malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx); 2040b7eaed25SJason Evans if (config_stats) { 2041b7eaed25SJason Evans malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx); 2042b7eaed25SJason Evans } 2043a4bd5210SJason Evans } 2044