1a4bd5210SJason Evans #define JEMALLOC_EXTENT_C_ 2b7eaed25SJason Evans #include "jemalloc/internal/jemalloc_preamble.h" 3b7eaed25SJason Evans #include "jemalloc/internal/jemalloc_internal_includes.h" 4b7eaed25SJason Evans 5b7eaed25SJason Evans #include "jemalloc/internal/assert.h" 6b7eaed25SJason Evans #include "jemalloc/internal/extent_dss.h" 7b7eaed25SJason Evans #include "jemalloc/internal/extent_mmap.h" 8b7eaed25SJason Evans #include "jemalloc/internal/ph.h" 9b7eaed25SJason Evans #include "jemalloc/internal/rtree.h" 10b7eaed25SJason Evans #include "jemalloc/internal/mutex.h" 11b7eaed25SJason Evans #include "jemalloc/internal/mutex_pool.h" 12a4bd5210SJason Evans 13a4bd5210SJason Evans /******************************************************************************/ 14b7eaed25SJason Evans /* Data. */ 15b7eaed25SJason Evans 16b7eaed25SJason Evans rtree_t extents_rtree; 17b7eaed25SJason Evans /* Keyed by the address of the extent_t being protected. */ 18b7eaed25SJason Evans mutex_pool_t extent_mutex_pool; 19b7eaed25SJason Evans 200ef50b4eSJason Evans size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT; 210ef50b4eSJason Evans 22b7eaed25SJason Evans static const bitmap_info_t extents_bitmap_info = 23*c5ad8142SEric van Gyzen BITMAP_INFO_INITIALIZER(SC_NPSIZES+1); 24b7eaed25SJason Evans 25b7eaed25SJason Evans static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, 26b7eaed25SJason Evans size_t size, size_t alignment, bool *zero, bool *commit, 27b7eaed25SJason Evans unsigned arena_ind); 28b7eaed25SJason Evans static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, 29b7eaed25SJason Evans size_t size, bool committed, unsigned arena_ind); 30b7eaed25SJason Evans static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, 31b7eaed25SJason Evans size_t size, bool committed, unsigned arena_ind); 32b7eaed25SJason Evans static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr, 33b7eaed25SJason Evans size_t size, size_t offset, size_t length, unsigned arena_ind); 34b7eaed25SJason Evans static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena, 35b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, 36b7eaed25SJason Evans size_t length, bool growing_retained); 37b7eaed25SJason Evans static bool extent_decommit_default(extent_hooks_t *extent_hooks, 38b7eaed25SJason Evans void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); 39b7eaed25SJason Evans #ifdef PAGES_CAN_PURGE_LAZY 40b7eaed25SJason Evans static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, 41b7eaed25SJason Evans size_t size, size_t offset, size_t length, unsigned arena_ind); 42b7eaed25SJason Evans #endif 43b7eaed25SJason Evans static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena, 44b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, 45b7eaed25SJason Evans size_t length, bool growing_retained); 46b7eaed25SJason Evans #ifdef PAGES_CAN_PURGE_FORCED 47b7eaed25SJason Evans static bool extent_purge_forced_default(extent_hooks_t *extent_hooks, 48b7eaed25SJason Evans void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); 49b7eaed25SJason Evans #endif 50b7eaed25SJason Evans static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena, 51b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, 52b7eaed25SJason Evans size_t length, bool growing_retained); 53b7eaed25SJason Evans static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr, 54b7eaed25SJason Evans size_t size, size_t size_a, size_t size_b, bool committed, 55b7eaed25SJason Evans unsigned arena_ind); 56b7eaed25SJason Evans static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena, 57b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, 58b7eaed25SJason Evans szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b, 59b7eaed25SJason Evans bool growing_retained); 60b7eaed25SJason Evans static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, 61b7eaed25SJason Evans size_t size_a, void *addr_b, size_t size_b, bool committed, 62b7eaed25SJason Evans unsigned arena_ind); 63b7eaed25SJason Evans static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena, 64b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b, 65b7eaed25SJason Evans bool growing_retained); 66b7eaed25SJason Evans 67b7eaed25SJason Evans const extent_hooks_t extent_hooks_default = { 68b7eaed25SJason Evans extent_alloc_default, 69b7eaed25SJason Evans extent_dalloc_default, 70b7eaed25SJason Evans extent_destroy_default, 71b7eaed25SJason Evans extent_commit_default, 72b7eaed25SJason Evans extent_decommit_default 73b7eaed25SJason Evans #ifdef PAGES_CAN_PURGE_LAZY 74b7eaed25SJason Evans , 75b7eaed25SJason Evans extent_purge_lazy_default 76b7eaed25SJason Evans #else 77b7eaed25SJason Evans , 78b7eaed25SJason Evans NULL 79b7eaed25SJason Evans #endif 80b7eaed25SJason Evans #ifdef PAGES_CAN_PURGE_FORCED 81b7eaed25SJason Evans , 82b7eaed25SJason Evans extent_purge_forced_default 83b7eaed25SJason Evans #else 84b7eaed25SJason Evans , 85b7eaed25SJason Evans NULL 86b7eaed25SJason Evans #endif 87b7eaed25SJason Evans , 88b7eaed25SJason Evans extent_split_default, 89b7eaed25SJason Evans extent_merge_default 90b7eaed25SJason Evans }; 91b7eaed25SJason Evans 92b7eaed25SJason Evans /* Used exclusively for gdump triggering. */ 93b7eaed25SJason Evans static atomic_zu_t curpages; 94b7eaed25SJason Evans static atomic_zu_t highpages; 95b7eaed25SJason Evans 96b7eaed25SJason Evans /******************************************************************************/ 97b7eaed25SJason Evans /* 98b7eaed25SJason Evans * Function prototypes for static functions that are referenced prior to 99b7eaed25SJason Evans * definition. 100b7eaed25SJason Evans */ 101b7eaed25SJason Evans 102b7eaed25SJason Evans static void extent_deregister(tsdn_t *tsdn, extent_t *extent); 103b7eaed25SJason Evans static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena, 104b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr, 105b7eaed25SJason Evans size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind, 106b7eaed25SJason Evans bool *zero, bool *commit, bool growing_retained); 107b7eaed25SJason Evans static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, 108b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, 109b7eaed25SJason Evans extent_t *extent, bool *coalesced, bool growing_retained); 110b7eaed25SJason Evans static void extent_record(tsdn_t *tsdn, arena_t *arena, 111b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent, 112b7eaed25SJason Evans bool growing_retained); 113b7eaed25SJason Evans 114b7eaed25SJason Evans /******************************************************************************/ 115b7eaed25SJason Evans 116*c5ad8142SEric van Gyzen #define ATTR_NONE /* does nothing */ 117*c5ad8142SEric van Gyzen 118*c5ad8142SEric van Gyzen ph_gen(ATTR_NONE, extent_avail_, extent_tree_t, extent_t, ph_link, 119b7eaed25SJason Evans extent_esnead_comp) 120b7eaed25SJason Evans 121*c5ad8142SEric van Gyzen #undef ATTR_NONE 122*c5ad8142SEric van Gyzen 123b7eaed25SJason Evans typedef enum { 124b7eaed25SJason Evans lock_result_success, 125b7eaed25SJason Evans lock_result_failure, 126b7eaed25SJason Evans lock_result_no_extent 127b7eaed25SJason Evans } lock_result_t; 128b7eaed25SJason Evans 129b7eaed25SJason Evans static lock_result_t 130b7eaed25SJason Evans extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm, 131*c5ad8142SEric van Gyzen extent_t **result, bool inactive_only) { 132b7eaed25SJason Evans extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree, 133b7eaed25SJason Evans elm, true); 134b7eaed25SJason Evans 135*c5ad8142SEric van Gyzen /* Slab implies active extents and should be skipped. */ 136*c5ad8142SEric van Gyzen if (extent1 == NULL || (inactive_only && rtree_leaf_elm_slab_read(tsdn, 137*c5ad8142SEric van Gyzen &extents_rtree, elm, true))) { 138b7eaed25SJason Evans return lock_result_no_extent; 139b7eaed25SJason Evans } 140*c5ad8142SEric van Gyzen 141b7eaed25SJason Evans /* 142b7eaed25SJason Evans * It's possible that the extent changed out from under us, and with it 143b7eaed25SJason Evans * the leaf->extent mapping. We have to recheck while holding the lock. 144b7eaed25SJason Evans */ 145b7eaed25SJason Evans extent_lock(tsdn, extent1); 146b7eaed25SJason Evans extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn, 147b7eaed25SJason Evans &extents_rtree, elm, true); 148b7eaed25SJason Evans 149b7eaed25SJason Evans if (extent1 == extent2) { 150b7eaed25SJason Evans *result = extent1; 151b7eaed25SJason Evans return lock_result_success; 152b7eaed25SJason Evans } else { 153b7eaed25SJason Evans extent_unlock(tsdn, extent1); 154b7eaed25SJason Evans return lock_result_failure; 155b7eaed25SJason Evans } 156b7eaed25SJason Evans } 157b7eaed25SJason Evans 158b7eaed25SJason Evans /* 159b7eaed25SJason Evans * Returns a pool-locked extent_t * if there's one associated with the given 160b7eaed25SJason Evans * address, and NULL otherwise. 161b7eaed25SJason Evans */ 162b7eaed25SJason Evans static extent_t * 163*c5ad8142SEric van Gyzen extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr, 164*c5ad8142SEric van Gyzen bool inactive_only) { 165b7eaed25SJason Evans extent_t *ret = NULL; 166b7eaed25SJason Evans rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree, 167b7eaed25SJason Evans rtree_ctx, (uintptr_t)addr, false, false); 168b7eaed25SJason Evans if (elm == NULL) { 169b7eaed25SJason Evans return NULL; 170b7eaed25SJason Evans } 171b7eaed25SJason Evans lock_result_t lock_result; 172b7eaed25SJason Evans do { 173*c5ad8142SEric van Gyzen lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret, 174*c5ad8142SEric van Gyzen inactive_only); 175b7eaed25SJason Evans } while (lock_result == lock_result_failure); 176b7eaed25SJason Evans return ret; 177b7eaed25SJason Evans } 178b7eaed25SJason Evans 179b7eaed25SJason Evans extent_t * 180b7eaed25SJason Evans extent_alloc(tsdn_t *tsdn, arena_t *arena) { 181b7eaed25SJason Evans malloc_mutex_lock(tsdn, &arena->extent_avail_mtx); 182b7eaed25SJason Evans extent_t *extent = extent_avail_first(&arena->extent_avail); 183b7eaed25SJason Evans if (extent == NULL) { 184b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); 185b7eaed25SJason Evans return base_alloc_extent(tsdn, arena->base); 186b7eaed25SJason Evans } 187b7eaed25SJason Evans extent_avail_remove(&arena->extent_avail, extent); 188*c5ad8142SEric van Gyzen atomic_fetch_sub_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED); 189b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); 190b7eaed25SJason Evans return extent; 191b7eaed25SJason Evans } 192b7eaed25SJason Evans 193b7eaed25SJason Evans void 194b7eaed25SJason Evans extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { 195b7eaed25SJason Evans malloc_mutex_lock(tsdn, &arena->extent_avail_mtx); 196b7eaed25SJason Evans extent_avail_insert(&arena->extent_avail, extent); 197*c5ad8142SEric van Gyzen atomic_fetch_add_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED); 198b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); 199b7eaed25SJason Evans } 200b7eaed25SJason Evans 201b7eaed25SJason Evans extent_hooks_t * 202b7eaed25SJason Evans extent_hooks_get(arena_t *arena) { 203b7eaed25SJason Evans return base_extent_hooks_get(arena->base); 204b7eaed25SJason Evans } 205b7eaed25SJason Evans 206b7eaed25SJason Evans extent_hooks_t * 207b7eaed25SJason Evans extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) { 208b7eaed25SJason Evans background_thread_info_t *info; 209b7eaed25SJason Evans if (have_background_thread) { 210b7eaed25SJason Evans info = arena_background_thread_info_get(arena); 211b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); 212b7eaed25SJason Evans } 213b7eaed25SJason Evans extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks); 214b7eaed25SJason Evans if (have_background_thread) { 215b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); 216b7eaed25SJason Evans } 217b7eaed25SJason Evans 218b7eaed25SJason Evans return ret; 219b7eaed25SJason Evans } 220b7eaed25SJason Evans 221b7eaed25SJason Evans static void 222b7eaed25SJason Evans extent_hooks_assure_initialized(arena_t *arena, 223b7eaed25SJason Evans extent_hooks_t **r_extent_hooks) { 224b7eaed25SJason Evans if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) { 225b7eaed25SJason Evans *r_extent_hooks = extent_hooks_get(arena); 226b7eaed25SJason Evans } 227b7eaed25SJason Evans } 228a4bd5210SJason Evans 2298244f2aaSJason Evans #ifndef JEMALLOC_JET 2308244f2aaSJason Evans static 2318244f2aaSJason Evans #endif 2328244f2aaSJason Evans size_t 2338244f2aaSJason Evans extent_size_quantize_floor(size_t size) { 2347fa7f12fSJason Evans size_t ret; 235b7eaed25SJason Evans pszind_t pind; 236d0e79aa3SJason Evans 2377fa7f12fSJason Evans assert(size > 0); 238b7eaed25SJason Evans assert((size & PAGE_MASK) == 0); 2397fa7f12fSJason Evans 240b7eaed25SJason Evans pind = sz_psz2ind(size - sz_large_pad + 1); 241b7eaed25SJason Evans if (pind == 0) { 242b7eaed25SJason Evans /* 243b7eaed25SJason Evans * Avoid underflow. This short-circuit would also do the right 244b7eaed25SJason Evans * thing for all sizes in the range for which there are 245b7eaed25SJason Evans * PAGE-spaced size classes, but it's simplest to just handle 246b7eaed25SJason Evans * the one case that would cause erroneous results. 247b7eaed25SJason Evans */ 248b7eaed25SJason Evans return size; 249d0e79aa3SJason Evans } 250b7eaed25SJason Evans ret = sz_pind2sz(pind - 1) + sz_large_pad; 2517fa7f12fSJason Evans assert(ret <= size); 252b7eaed25SJason Evans return ret; 253a4bd5210SJason Evans } 254a4bd5210SJason Evans 255b7eaed25SJason Evans #ifndef JEMALLOC_JET 256b7eaed25SJason Evans static 257b7eaed25SJason Evans #endif 2588244f2aaSJason Evans size_t 2598244f2aaSJason Evans extent_size_quantize_ceil(size_t size) { 2608244f2aaSJason Evans size_t ret; 2618244f2aaSJason Evans 2628244f2aaSJason Evans assert(size > 0); 263*c5ad8142SEric van Gyzen assert(size - sz_large_pad <= SC_LARGE_MAXCLASS); 264b7eaed25SJason Evans assert((size & PAGE_MASK) == 0); 2658244f2aaSJason Evans 2668244f2aaSJason Evans ret = extent_size_quantize_floor(size); 2678244f2aaSJason Evans if (ret < size) { 2688244f2aaSJason Evans /* 2698244f2aaSJason Evans * Skip a quantization that may have an adequately large extent, 2708244f2aaSJason Evans * because under-sized extents may be mixed in. This only 2718244f2aaSJason Evans * happens when an unusual size is requested, i.e. for aligned 2728244f2aaSJason Evans * allocation, and is just one of several places where linear 2738244f2aaSJason Evans * search would potentially find sufficiently aligned available 2748244f2aaSJason Evans * memory somewhere lower. 2758244f2aaSJason Evans */ 276b7eaed25SJason Evans ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) + 277b7eaed25SJason Evans sz_large_pad; 2788244f2aaSJason Evans } 2798244f2aaSJason Evans return ret; 2808244f2aaSJason Evans } 2818244f2aaSJason Evans 282b7eaed25SJason Evans /* Generate pairing heap functions. */ 283b7eaed25SJason Evans ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp) 2847fa7f12fSJason Evans 285b7eaed25SJason Evans bool 286b7eaed25SJason Evans extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state, 287b7eaed25SJason Evans bool delay_coalesce) { 288b7eaed25SJason Evans if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS, 289b7eaed25SJason Evans malloc_mutex_rank_exclusive)) { 290b7eaed25SJason Evans return true; 291b7eaed25SJason Evans } 292*c5ad8142SEric van Gyzen for (unsigned i = 0; i < SC_NPSIZES + 1; i++) { 293b7eaed25SJason Evans extent_heap_new(&extents->heaps[i]); 294b7eaed25SJason Evans } 295b7eaed25SJason Evans bitmap_init(extents->bitmap, &extents_bitmap_info, true); 296b7eaed25SJason Evans extent_list_init(&extents->lru); 297b7eaed25SJason Evans atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED); 298b7eaed25SJason Evans extents->state = state; 299b7eaed25SJason Evans extents->delay_coalesce = delay_coalesce; 300b7eaed25SJason Evans return false; 3017fa7f12fSJason Evans } 3027fa7f12fSJason Evans 303b7eaed25SJason Evans extent_state_t 304b7eaed25SJason Evans extents_state_get(const extents_t *extents) { 305b7eaed25SJason Evans return extents->state; 3067fa7f12fSJason Evans } 307a4bd5210SJason Evans 308b7eaed25SJason Evans size_t 309b7eaed25SJason Evans extents_npages_get(extents_t *extents) { 310b7eaed25SJason Evans return atomic_load_zu(&extents->npages, ATOMIC_RELAXED); 311a4bd5210SJason Evans } 312a4bd5210SJason Evans 313*c5ad8142SEric van Gyzen size_t 314*c5ad8142SEric van Gyzen extents_nextents_get(extents_t *extents, pszind_t pind) { 315*c5ad8142SEric van Gyzen return atomic_load_zu(&extents->nextents[pind], ATOMIC_RELAXED); 316*c5ad8142SEric van Gyzen } 317*c5ad8142SEric van Gyzen 318*c5ad8142SEric van Gyzen size_t 319*c5ad8142SEric van Gyzen extents_nbytes_get(extents_t *extents, pszind_t pind) { 320*c5ad8142SEric van Gyzen return atomic_load_zu(&extents->nbytes[pind], ATOMIC_RELAXED); 321*c5ad8142SEric van Gyzen } 322*c5ad8142SEric van Gyzen 323*c5ad8142SEric van Gyzen static void 324*c5ad8142SEric van Gyzen extents_stats_add(extents_t *extent, pszind_t pind, size_t sz) { 325*c5ad8142SEric van Gyzen size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED); 326*c5ad8142SEric van Gyzen atomic_store_zu(&extent->nextents[pind], cur + 1, ATOMIC_RELAXED); 327*c5ad8142SEric van Gyzen cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED); 328*c5ad8142SEric van Gyzen atomic_store_zu(&extent->nbytes[pind], cur + sz, ATOMIC_RELAXED); 329*c5ad8142SEric van Gyzen } 330*c5ad8142SEric van Gyzen 331*c5ad8142SEric van Gyzen static void 332*c5ad8142SEric van Gyzen extents_stats_sub(extents_t *extent, pszind_t pind, size_t sz) { 333*c5ad8142SEric van Gyzen size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED); 334*c5ad8142SEric van Gyzen atomic_store_zu(&extent->nextents[pind], cur - 1, ATOMIC_RELAXED); 335*c5ad8142SEric van Gyzen cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED); 336*c5ad8142SEric van Gyzen atomic_store_zu(&extent->nbytes[pind], cur - sz, ATOMIC_RELAXED); 337*c5ad8142SEric van Gyzen } 338*c5ad8142SEric van Gyzen 339b7eaed25SJason Evans static void 3400ef50b4eSJason Evans extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) { 341b7eaed25SJason Evans malloc_mutex_assert_owner(tsdn, &extents->mtx); 342b7eaed25SJason Evans assert(extent_state_get(extent) == extents->state); 3437fa7f12fSJason Evans 344b7eaed25SJason Evans size_t size = extent_size_get(extent); 345b7eaed25SJason Evans size_t psz = extent_size_quantize_floor(size); 346b7eaed25SJason Evans pszind_t pind = sz_psz2ind(psz); 347b7eaed25SJason Evans if (extent_heap_empty(&extents->heaps[pind])) { 348b7eaed25SJason Evans bitmap_unset(extents->bitmap, &extents_bitmap_info, 349b7eaed25SJason Evans (size_t)pind); 350b7eaed25SJason Evans } 351b7eaed25SJason Evans extent_heap_insert(&extents->heaps[pind], extent); 352*c5ad8142SEric van Gyzen 353*c5ad8142SEric van Gyzen if (config_stats) { 354*c5ad8142SEric van Gyzen extents_stats_add(extents, pind, size); 355*c5ad8142SEric van Gyzen } 356*c5ad8142SEric van Gyzen 357b7eaed25SJason Evans extent_list_append(&extents->lru, extent); 358b7eaed25SJason Evans size_t npages = size >> LG_PAGE; 359b7eaed25SJason Evans /* 360b7eaed25SJason Evans * All modifications to npages hold the mutex (as asserted above), so we 361b7eaed25SJason Evans * don't need an atomic fetch-add; we can get by with a load followed by 362b7eaed25SJason Evans * a store. 363b7eaed25SJason Evans */ 364b7eaed25SJason Evans size_t cur_extents_npages = 365b7eaed25SJason Evans atomic_load_zu(&extents->npages, ATOMIC_RELAXED); 366b7eaed25SJason Evans atomic_store_zu(&extents->npages, cur_extents_npages + npages, 367b7eaed25SJason Evans ATOMIC_RELAXED); 3687fa7f12fSJason Evans } 3697fa7f12fSJason Evans 370b7eaed25SJason Evans static void 3710ef50b4eSJason Evans extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) { 372b7eaed25SJason Evans malloc_mutex_assert_owner(tsdn, &extents->mtx); 373b7eaed25SJason Evans assert(extent_state_get(extent) == extents->state); 3747fa7f12fSJason Evans 375b7eaed25SJason Evans size_t size = extent_size_get(extent); 376b7eaed25SJason Evans size_t psz = extent_size_quantize_floor(size); 377b7eaed25SJason Evans pszind_t pind = sz_psz2ind(psz); 378b7eaed25SJason Evans extent_heap_remove(&extents->heaps[pind], extent); 379*c5ad8142SEric van Gyzen 380*c5ad8142SEric van Gyzen if (config_stats) { 381*c5ad8142SEric van Gyzen extents_stats_sub(extents, pind, size); 382*c5ad8142SEric van Gyzen } 383*c5ad8142SEric van Gyzen 384b7eaed25SJason Evans if (extent_heap_empty(&extents->heaps[pind])) { 385b7eaed25SJason Evans bitmap_set(extents->bitmap, &extents_bitmap_info, 386b7eaed25SJason Evans (size_t)pind); 387b7eaed25SJason Evans } 388b7eaed25SJason Evans extent_list_remove(&extents->lru, extent); 389b7eaed25SJason Evans size_t npages = size >> LG_PAGE; 390b7eaed25SJason Evans /* 391b7eaed25SJason Evans * As in extents_insert_locked, we hold extents->mtx and so don't need 392b7eaed25SJason Evans * atomic operations for updating extents->npages. 393b7eaed25SJason Evans */ 394b7eaed25SJason Evans size_t cur_extents_npages = 395b7eaed25SJason Evans atomic_load_zu(&extents->npages, ATOMIC_RELAXED); 396b7eaed25SJason Evans assert(cur_extents_npages >= npages); 397b7eaed25SJason Evans atomic_store_zu(&extents->npages, 398b7eaed25SJason Evans cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED); 399b7eaed25SJason Evans } 400b7eaed25SJason Evans 4010ef50b4eSJason Evans /* 4020ef50b4eSJason Evans * Find an extent with size [min_size, max_size) to satisfy the alignment 4030ef50b4eSJason Evans * requirement. For each size, try only the first extent in the heap. 4040ef50b4eSJason Evans */ 4050ef50b4eSJason Evans static extent_t * 4060ef50b4eSJason Evans extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size, 4070ef50b4eSJason Evans size_t alignment) { 4080ef50b4eSJason Evans pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(min_size)); 4090ef50b4eSJason Evans pszind_t pind_max = sz_psz2ind(extent_size_quantize_ceil(max_size)); 4100ef50b4eSJason Evans 4110ef50b4eSJason Evans for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, 4120ef50b4eSJason Evans &extents_bitmap_info, (size_t)pind); i < pind_max; i = 4130ef50b4eSJason Evans (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, 4140ef50b4eSJason Evans (size_t)i+1)) { 415*c5ad8142SEric van Gyzen assert(i < SC_NPSIZES); 4160ef50b4eSJason Evans assert(!extent_heap_empty(&extents->heaps[i])); 4170ef50b4eSJason Evans extent_t *extent = extent_heap_first(&extents->heaps[i]); 4180ef50b4eSJason Evans uintptr_t base = (uintptr_t)extent_base_get(extent); 4190ef50b4eSJason Evans size_t candidate_size = extent_size_get(extent); 4200ef50b4eSJason Evans assert(candidate_size >= min_size); 4210ef50b4eSJason Evans 4220ef50b4eSJason Evans uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base, 4230ef50b4eSJason Evans PAGE_CEILING(alignment)); 4240ef50b4eSJason Evans if (base > next_align || base + candidate_size <= next_align) { 4250ef50b4eSJason Evans /* Overflow or not crossing the next alignment. */ 4260ef50b4eSJason Evans continue; 4270ef50b4eSJason Evans } 4280ef50b4eSJason Evans 4290ef50b4eSJason Evans size_t leadsize = next_align - base; 4300ef50b4eSJason Evans if (candidate_size - leadsize >= min_size) { 4310ef50b4eSJason Evans return extent; 4320ef50b4eSJason Evans } 4330ef50b4eSJason Evans } 4340ef50b4eSJason Evans 4350ef50b4eSJason Evans return NULL; 4360ef50b4eSJason Evans } 4370ef50b4eSJason Evans 438b7eaed25SJason Evans /* 439b7eaed25SJason Evans * Do first-fit extent selection, i.e. select the oldest/lowest extent that is 440b7eaed25SJason Evans * large enough. 441b7eaed25SJason Evans */ 442b7eaed25SJason Evans static extent_t * 443b7eaed25SJason Evans extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, 444b7eaed25SJason Evans size_t size) { 445b7eaed25SJason Evans extent_t *ret = NULL; 446b7eaed25SJason Evans 447b7eaed25SJason Evans pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size)); 448*c5ad8142SEric van Gyzen 449*c5ad8142SEric van Gyzen if (!maps_coalesce && !opt_retain) { 450*c5ad8142SEric van Gyzen /* 451*c5ad8142SEric van Gyzen * No split / merge allowed (Windows w/o retain). Try exact fit 452*c5ad8142SEric van Gyzen * only. 453*c5ad8142SEric van Gyzen */ 454*c5ad8142SEric van Gyzen return extent_heap_empty(&extents->heaps[pind]) ? NULL : 455*c5ad8142SEric van Gyzen extent_heap_first(&extents->heaps[pind]); 456*c5ad8142SEric van Gyzen } 457*c5ad8142SEric van Gyzen 458b7eaed25SJason Evans for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, 459*c5ad8142SEric van Gyzen &extents_bitmap_info, (size_t)pind); 460*c5ad8142SEric van Gyzen i < SC_NPSIZES + 1; 461*c5ad8142SEric van Gyzen i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, 462b7eaed25SJason Evans (size_t)i+1)) { 463b7eaed25SJason Evans assert(!extent_heap_empty(&extents->heaps[i])); 464b7eaed25SJason Evans extent_t *extent = extent_heap_first(&extents->heaps[i]); 465b7eaed25SJason Evans assert(extent_size_get(extent) >= size); 466*c5ad8142SEric van Gyzen /* 467*c5ad8142SEric van Gyzen * In order to reduce fragmentation, avoid reusing and splitting 468*c5ad8142SEric van Gyzen * large extents for much smaller sizes. 469*c5ad8142SEric van Gyzen * 470*c5ad8142SEric van Gyzen * Only do check for dirty extents (delay_coalesce). 471*c5ad8142SEric van Gyzen */ 472*c5ad8142SEric van Gyzen if (extents->delay_coalesce && 473*c5ad8142SEric van Gyzen (sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) { 474*c5ad8142SEric van Gyzen break; 475*c5ad8142SEric van Gyzen } 476b7eaed25SJason Evans if (ret == NULL || extent_snad_comp(extent, ret) < 0) { 477b7eaed25SJason Evans ret = extent; 478b7eaed25SJason Evans } 479*c5ad8142SEric van Gyzen if (i == SC_NPSIZES) { 480b7eaed25SJason Evans break; 481b7eaed25SJason Evans } 482*c5ad8142SEric van Gyzen assert(i < SC_NPSIZES); 483b7eaed25SJason Evans } 484b7eaed25SJason Evans 485b7eaed25SJason Evans return ret; 486b7eaed25SJason Evans } 487b7eaed25SJason Evans 488b7eaed25SJason Evans /* 489*c5ad8142SEric van Gyzen * Do first-fit extent selection, where the selection policy choice is 490*c5ad8142SEric van Gyzen * based on extents->delay_coalesce. 491b7eaed25SJason Evans */ 492b7eaed25SJason Evans static extent_t * 493b7eaed25SJason Evans extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, 4940ef50b4eSJason Evans size_t esize, size_t alignment) { 495b7eaed25SJason Evans malloc_mutex_assert_owner(tsdn, &extents->mtx); 496b7eaed25SJason Evans 4970ef50b4eSJason Evans size_t max_size = esize + PAGE_CEILING(alignment) - PAGE; 4980ef50b4eSJason Evans /* Beware size_t wrap-around. */ 4990ef50b4eSJason Evans if (max_size < esize) { 5000ef50b4eSJason Evans return NULL; 5010ef50b4eSJason Evans } 5020ef50b4eSJason Evans 503*c5ad8142SEric van Gyzen extent_t *extent = 5040ef50b4eSJason Evans extents_first_fit_locked(tsdn, arena, extents, max_size); 5050ef50b4eSJason Evans 5060ef50b4eSJason Evans if (alignment > PAGE && extent == NULL) { 5070ef50b4eSJason Evans /* 5080ef50b4eSJason Evans * max_size guarantees the alignment requirement but is rather 5090ef50b4eSJason Evans * pessimistic. Next we try to satisfy the aligned allocation 5100ef50b4eSJason Evans * with sizes in [esize, max_size). 5110ef50b4eSJason Evans */ 5120ef50b4eSJason Evans extent = extents_fit_alignment(extents, esize, max_size, 5130ef50b4eSJason Evans alignment); 5140ef50b4eSJason Evans } 5150ef50b4eSJason Evans 5160ef50b4eSJason Evans return extent; 517b7eaed25SJason Evans } 518b7eaed25SJason Evans 519b7eaed25SJason Evans static bool 520b7eaed25SJason Evans extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena, 521b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, 522b7eaed25SJason Evans extent_t *extent) { 523b7eaed25SJason Evans extent_state_set(extent, extent_state_active); 524b7eaed25SJason Evans bool coalesced; 525b7eaed25SJason Evans extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx, 526b7eaed25SJason Evans extents, extent, &coalesced, false); 527b7eaed25SJason Evans extent_state_set(extent, extents_state_get(extents)); 528b7eaed25SJason Evans 529b7eaed25SJason Evans if (!coalesced) { 530b7eaed25SJason Evans return true; 531b7eaed25SJason Evans } 5320ef50b4eSJason Evans extents_insert_locked(tsdn, extents, extent); 533b7eaed25SJason Evans return false; 534b7eaed25SJason Evans } 535b7eaed25SJason Evans 536b7eaed25SJason Evans extent_t * 537b7eaed25SJason Evans extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, 538b7eaed25SJason Evans extents_t *extents, void *new_addr, size_t size, size_t pad, 539b7eaed25SJason Evans size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { 540b7eaed25SJason Evans assert(size + pad != 0); 541b7eaed25SJason Evans assert(alignment != 0); 542b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 543b7eaed25SJason Evans WITNESS_RANK_CORE, 0); 544b7eaed25SJason Evans 5450ef50b4eSJason Evans extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, extents, 5460ef50b4eSJason Evans new_addr, size, pad, alignment, slab, szind, zero, commit, false); 5470ef50b4eSJason Evans assert(extent == NULL || extent_dumpable_get(extent)); 5480ef50b4eSJason Evans return extent; 549b7eaed25SJason Evans } 550b7eaed25SJason Evans 551b7eaed25SJason Evans void 552b7eaed25SJason Evans extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, 553b7eaed25SJason Evans extents_t *extents, extent_t *extent) { 554b7eaed25SJason Evans assert(extent_base_get(extent) != NULL); 555b7eaed25SJason Evans assert(extent_size_get(extent) != 0); 5560ef50b4eSJason Evans assert(extent_dumpable_get(extent)); 557b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 558b7eaed25SJason Evans WITNESS_RANK_CORE, 0); 559b7eaed25SJason Evans 560b7eaed25SJason Evans extent_addr_set(extent, extent_base_get(extent)); 561b7eaed25SJason Evans extent_zeroed_set(extent, false); 562b7eaed25SJason Evans 563b7eaed25SJason Evans extent_record(tsdn, arena, r_extent_hooks, extents, extent, false); 564b7eaed25SJason Evans } 565b7eaed25SJason Evans 566b7eaed25SJason Evans extent_t * 567b7eaed25SJason Evans extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, 568b7eaed25SJason Evans extents_t *extents, size_t npages_min) { 569b7eaed25SJason Evans rtree_ctx_t rtree_ctx_fallback; 570b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 571b7eaed25SJason Evans 572b7eaed25SJason Evans malloc_mutex_lock(tsdn, &extents->mtx); 573b7eaed25SJason Evans 574b7eaed25SJason Evans /* 575b7eaed25SJason Evans * Get the LRU coalesced extent, if any. If coalescing was delayed, 576b7eaed25SJason Evans * the loop will iterate until the LRU extent is fully coalesced. 577b7eaed25SJason Evans */ 578b7eaed25SJason Evans extent_t *extent; 579b7eaed25SJason Evans while (true) { 580b7eaed25SJason Evans /* Get the LRU extent, if any. */ 581b7eaed25SJason Evans extent = extent_list_first(&extents->lru); 582b7eaed25SJason Evans if (extent == NULL) { 583b7eaed25SJason Evans goto label_return; 584b7eaed25SJason Evans } 585b7eaed25SJason Evans /* Check the eviction limit. */ 586b7eaed25SJason Evans size_t extents_npages = atomic_load_zu(&extents->npages, 587b7eaed25SJason Evans ATOMIC_RELAXED); 5880ef50b4eSJason Evans if (extents_npages <= npages_min) { 589b7eaed25SJason Evans extent = NULL; 590b7eaed25SJason Evans goto label_return; 591b7eaed25SJason Evans } 5920ef50b4eSJason Evans extents_remove_locked(tsdn, extents, extent); 593b7eaed25SJason Evans if (!extents->delay_coalesce) { 594b7eaed25SJason Evans break; 595b7eaed25SJason Evans } 596b7eaed25SJason Evans /* Try to coalesce. */ 597b7eaed25SJason Evans if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks, 598b7eaed25SJason Evans rtree_ctx, extents, extent)) { 599b7eaed25SJason Evans break; 600b7eaed25SJason Evans } 601b7eaed25SJason Evans /* 602b7eaed25SJason Evans * The LRU extent was just coalesced and the result placed in 603b7eaed25SJason Evans * the LRU at its neighbor's position. Start over. 604b7eaed25SJason Evans */ 605b7eaed25SJason Evans } 606b7eaed25SJason Evans 607b7eaed25SJason Evans /* 608b7eaed25SJason Evans * Either mark the extent active or deregister it to protect against 609b7eaed25SJason Evans * concurrent operations. 610b7eaed25SJason Evans */ 611b7eaed25SJason Evans switch (extents_state_get(extents)) { 612b7eaed25SJason Evans case extent_state_active: 613b7eaed25SJason Evans not_reached(); 614b7eaed25SJason Evans case extent_state_dirty: 615b7eaed25SJason Evans case extent_state_muzzy: 616b7eaed25SJason Evans extent_state_set(extent, extent_state_active); 617b7eaed25SJason Evans break; 618b7eaed25SJason Evans case extent_state_retained: 619b7eaed25SJason Evans extent_deregister(tsdn, extent); 620b7eaed25SJason Evans break; 621b7eaed25SJason Evans default: 622b7eaed25SJason Evans not_reached(); 623b7eaed25SJason Evans } 624b7eaed25SJason Evans 625b7eaed25SJason Evans label_return: 626b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &extents->mtx); 627b7eaed25SJason Evans return extent; 628b7eaed25SJason Evans } 629b7eaed25SJason Evans 630*c5ad8142SEric van Gyzen /* 631*c5ad8142SEric van Gyzen * This can only happen when we fail to allocate a new extent struct (which 632*c5ad8142SEric van Gyzen * indicates OOM), e.g. when trying to split an existing extent. 633*c5ad8142SEric van Gyzen */ 634b7eaed25SJason Evans static void 635*c5ad8142SEric van Gyzen extents_abandon_vm(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, 636b7eaed25SJason Evans extents_t *extents, extent_t *extent, bool growing_retained) { 637*c5ad8142SEric van Gyzen size_t sz = extent_size_get(extent); 638*c5ad8142SEric van Gyzen if (config_stats) { 639*c5ad8142SEric van Gyzen arena_stats_accum_zu(&arena->stats.abandoned_vm, sz); 640*c5ad8142SEric van Gyzen } 641b7eaed25SJason Evans /* 642b7eaed25SJason Evans * Leak extent after making sure its pages have already been purged, so 643b7eaed25SJason Evans * that this is only a virtual memory leak. 644b7eaed25SJason Evans */ 645b7eaed25SJason Evans if (extents_state_get(extents) == extent_state_dirty) { 646b7eaed25SJason Evans if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, 647*c5ad8142SEric van Gyzen extent, 0, sz, growing_retained)) { 648b7eaed25SJason Evans extent_purge_forced_impl(tsdn, arena, r_extent_hooks, 649b7eaed25SJason Evans extent, 0, extent_size_get(extent), 650b7eaed25SJason Evans growing_retained); 651b7eaed25SJason Evans } 652b7eaed25SJason Evans } 653b7eaed25SJason Evans extent_dalloc(tsdn, arena, extent); 654b7eaed25SJason Evans } 655b7eaed25SJason Evans 656b7eaed25SJason Evans void 657b7eaed25SJason Evans extents_prefork(tsdn_t *tsdn, extents_t *extents) { 658b7eaed25SJason Evans malloc_mutex_prefork(tsdn, &extents->mtx); 659b7eaed25SJason Evans } 660b7eaed25SJason Evans 661b7eaed25SJason Evans void 662b7eaed25SJason Evans extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) { 663b7eaed25SJason Evans malloc_mutex_postfork_parent(tsdn, &extents->mtx); 664b7eaed25SJason Evans } 665b7eaed25SJason Evans 666b7eaed25SJason Evans void 667b7eaed25SJason Evans extents_postfork_child(tsdn_t *tsdn, extents_t *extents) { 668b7eaed25SJason Evans malloc_mutex_postfork_child(tsdn, &extents->mtx); 669b7eaed25SJason Evans } 670b7eaed25SJason Evans 671b7eaed25SJason Evans static void 672b7eaed25SJason Evans extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, 6730ef50b4eSJason Evans extent_t *extent) { 674b7eaed25SJason Evans assert(extent_arena_get(extent) == arena); 675b7eaed25SJason Evans assert(extent_state_get(extent) == extent_state_active); 676b7eaed25SJason Evans 677b7eaed25SJason Evans extent_state_set(extent, extents_state_get(extents)); 6780ef50b4eSJason Evans extents_insert_locked(tsdn, extents, extent); 679b7eaed25SJason Evans } 680b7eaed25SJason Evans 681b7eaed25SJason Evans static void 682b7eaed25SJason Evans extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents, 6830ef50b4eSJason Evans extent_t *extent) { 684b7eaed25SJason Evans malloc_mutex_lock(tsdn, &extents->mtx); 6850ef50b4eSJason Evans extent_deactivate_locked(tsdn, arena, extents, extent); 686b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &extents->mtx); 687b7eaed25SJason Evans } 688b7eaed25SJason Evans 689b7eaed25SJason Evans static void 690b7eaed25SJason Evans extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, 6910ef50b4eSJason Evans extent_t *extent) { 692b7eaed25SJason Evans assert(extent_arena_get(extent) == arena); 693b7eaed25SJason Evans assert(extent_state_get(extent) == extents_state_get(extents)); 694b7eaed25SJason Evans 6950ef50b4eSJason Evans extents_remove_locked(tsdn, extents, extent); 696b7eaed25SJason Evans extent_state_set(extent, extent_state_active); 697b7eaed25SJason Evans } 698b7eaed25SJason Evans 699b7eaed25SJason Evans static bool 700b7eaed25SJason Evans extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, 701b7eaed25SJason Evans const extent_t *extent, bool dependent, bool init_missing, 702b7eaed25SJason Evans rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) { 703b7eaed25SJason Evans *r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx, 704b7eaed25SJason Evans (uintptr_t)extent_base_get(extent), dependent, init_missing); 705b7eaed25SJason Evans if (!dependent && *r_elm_a == NULL) { 706b7eaed25SJason Evans return true; 707b7eaed25SJason Evans } 708b7eaed25SJason Evans assert(*r_elm_a != NULL); 709b7eaed25SJason Evans 710b7eaed25SJason Evans *r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx, 711b7eaed25SJason Evans (uintptr_t)extent_last_get(extent), dependent, init_missing); 712b7eaed25SJason Evans if (!dependent && *r_elm_b == NULL) { 713b7eaed25SJason Evans return true; 714b7eaed25SJason Evans } 715b7eaed25SJason Evans assert(*r_elm_b != NULL); 716b7eaed25SJason Evans 717b7eaed25SJason Evans return false; 718b7eaed25SJason Evans } 719b7eaed25SJason Evans 720b7eaed25SJason Evans static void 721b7eaed25SJason Evans extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a, 722b7eaed25SJason Evans rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) { 723b7eaed25SJason Evans rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab); 724b7eaed25SJason Evans if (elm_b != NULL) { 725b7eaed25SJason Evans rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind, 726b7eaed25SJason Evans slab); 727b7eaed25SJason Evans } 728b7eaed25SJason Evans } 729b7eaed25SJason Evans 730b7eaed25SJason Evans static void 731b7eaed25SJason Evans extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent, 732b7eaed25SJason Evans szind_t szind) { 733b7eaed25SJason Evans assert(extent_slab_get(extent)); 734b7eaed25SJason Evans 735b7eaed25SJason Evans /* Register interior. */ 736b7eaed25SJason Evans for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) { 737b7eaed25SJason Evans rtree_write(tsdn, &extents_rtree, rtree_ctx, 738b7eaed25SJason Evans (uintptr_t)extent_base_get(extent) + (uintptr_t)(i << 739b7eaed25SJason Evans LG_PAGE), extent, szind, true); 740b7eaed25SJason Evans } 741b7eaed25SJason Evans } 742b7eaed25SJason Evans 743b7eaed25SJason Evans static void 744b7eaed25SJason Evans extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) { 745b7eaed25SJason Evans cassert(config_prof); 746b7eaed25SJason Evans /* prof_gdump() requirement. */ 747b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 748b7eaed25SJason Evans WITNESS_RANK_CORE, 0); 749b7eaed25SJason Evans 750b7eaed25SJason Evans if (opt_prof && extent_state_get(extent) == extent_state_active) { 751b7eaed25SJason Evans size_t nadd = extent_size_get(extent) >> LG_PAGE; 752b7eaed25SJason Evans size_t cur = atomic_fetch_add_zu(&curpages, nadd, 753b7eaed25SJason Evans ATOMIC_RELAXED) + nadd; 754b7eaed25SJason Evans size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED); 755b7eaed25SJason Evans while (cur > high && !atomic_compare_exchange_weak_zu( 756b7eaed25SJason Evans &highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) { 757b7eaed25SJason Evans /* 758b7eaed25SJason Evans * Don't refresh cur, because it may have decreased 759b7eaed25SJason Evans * since this thread lost the highpages update race. 760b7eaed25SJason Evans * Note that high is updated in case of CAS failure. 761b7eaed25SJason Evans */ 762b7eaed25SJason Evans } 763b7eaed25SJason Evans if (cur > high && prof_gdump_get_unlocked()) { 764b7eaed25SJason Evans prof_gdump(tsdn); 765b7eaed25SJason Evans } 766b7eaed25SJason Evans } 767b7eaed25SJason Evans } 768b7eaed25SJason Evans 769b7eaed25SJason Evans static void 770b7eaed25SJason Evans extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) { 771b7eaed25SJason Evans cassert(config_prof); 772b7eaed25SJason Evans 773b7eaed25SJason Evans if (opt_prof && extent_state_get(extent) == extent_state_active) { 774b7eaed25SJason Evans size_t nsub = extent_size_get(extent) >> LG_PAGE; 775b7eaed25SJason Evans assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub); 776b7eaed25SJason Evans atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED); 777b7eaed25SJason Evans } 778b7eaed25SJason Evans } 779b7eaed25SJason Evans 780b7eaed25SJason Evans static bool 781b7eaed25SJason Evans extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) { 782b7eaed25SJason Evans rtree_ctx_t rtree_ctx_fallback; 783b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 784b7eaed25SJason Evans rtree_leaf_elm_t *elm_a, *elm_b; 785b7eaed25SJason Evans 786b7eaed25SJason Evans /* 787b7eaed25SJason Evans * We need to hold the lock to protect against a concurrent coalesce 788b7eaed25SJason Evans * operation that sees us in a partial state. 789b7eaed25SJason Evans */ 790b7eaed25SJason Evans extent_lock(tsdn, extent); 791b7eaed25SJason Evans 792b7eaed25SJason Evans if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true, 793b7eaed25SJason Evans &elm_a, &elm_b)) { 794*c5ad8142SEric van Gyzen extent_unlock(tsdn, extent); 795b7eaed25SJason Evans return true; 796b7eaed25SJason Evans } 797b7eaed25SJason Evans 798b7eaed25SJason Evans szind_t szind = extent_szind_get_maybe_invalid(extent); 799b7eaed25SJason Evans bool slab = extent_slab_get(extent); 800b7eaed25SJason Evans extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab); 801b7eaed25SJason Evans if (slab) { 802b7eaed25SJason Evans extent_interior_register(tsdn, rtree_ctx, extent, szind); 803b7eaed25SJason Evans } 804b7eaed25SJason Evans 805b7eaed25SJason Evans extent_unlock(tsdn, extent); 806b7eaed25SJason Evans 807b7eaed25SJason Evans if (config_prof && gdump_add) { 808b7eaed25SJason Evans extent_gdump_add(tsdn, extent); 809b7eaed25SJason Evans } 810b7eaed25SJason Evans 811b7eaed25SJason Evans return false; 812b7eaed25SJason Evans } 813b7eaed25SJason Evans 814b7eaed25SJason Evans static bool 815b7eaed25SJason Evans extent_register(tsdn_t *tsdn, extent_t *extent) { 816b7eaed25SJason Evans return extent_register_impl(tsdn, extent, true); 817b7eaed25SJason Evans } 818b7eaed25SJason Evans 819b7eaed25SJason Evans static bool 820b7eaed25SJason Evans extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) { 821b7eaed25SJason Evans return extent_register_impl(tsdn, extent, false); 822b7eaed25SJason Evans } 823b7eaed25SJason Evans 824b7eaed25SJason Evans static void 825b7eaed25SJason Evans extent_reregister(tsdn_t *tsdn, extent_t *extent) { 826b7eaed25SJason Evans bool err = extent_register(tsdn, extent); 827b7eaed25SJason Evans assert(!err); 828b7eaed25SJason Evans } 829b7eaed25SJason Evans 8300ef50b4eSJason Evans /* 8310ef50b4eSJason Evans * Removes all pointers to the given extent from the global rtree indices for 8320ef50b4eSJason Evans * its interior. This is relevant for slab extents, for which we need to do 8330ef50b4eSJason Evans * metadata lookups at places other than the head of the extent. We deregister 8340ef50b4eSJason Evans * on the interior, then, when an extent moves from being an active slab to an 8350ef50b4eSJason Evans * inactive state. 8360ef50b4eSJason Evans */ 837b7eaed25SJason Evans static void 838b7eaed25SJason Evans extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, 839b7eaed25SJason Evans extent_t *extent) { 840b7eaed25SJason Evans size_t i; 841b7eaed25SJason Evans 842b7eaed25SJason Evans assert(extent_slab_get(extent)); 843b7eaed25SJason Evans 844b7eaed25SJason Evans for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) { 845b7eaed25SJason Evans rtree_clear(tsdn, &extents_rtree, rtree_ctx, 846b7eaed25SJason Evans (uintptr_t)extent_base_get(extent) + (uintptr_t)(i << 847b7eaed25SJason Evans LG_PAGE)); 848b7eaed25SJason Evans } 849b7eaed25SJason Evans } 850b7eaed25SJason Evans 8510ef50b4eSJason Evans /* 8520ef50b4eSJason Evans * Removes all pointers to the given extent from the global rtree. 8530ef50b4eSJason Evans */ 854b7eaed25SJason Evans static void 8550ef50b4eSJason Evans extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) { 856b7eaed25SJason Evans rtree_ctx_t rtree_ctx_fallback; 857b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 858b7eaed25SJason Evans rtree_leaf_elm_t *elm_a, *elm_b; 859b7eaed25SJason Evans extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false, 860b7eaed25SJason Evans &elm_a, &elm_b); 861b7eaed25SJason Evans 862b7eaed25SJason Evans extent_lock(tsdn, extent); 863b7eaed25SJason Evans 864*c5ad8142SEric van Gyzen extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, SC_NSIZES, false); 865b7eaed25SJason Evans if (extent_slab_get(extent)) { 866b7eaed25SJason Evans extent_interior_deregister(tsdn, rtree_ctx, extent); 867b7eaed25SJason Evans extent_slab_set(extent, false); 868b7eaed25SJason Evans } 869b7eaed25SJason Evans 870b7eaed25SJason Evans extent_unlock(tsdn, extent); 871b7eaed25SJason Evans 8720ef50b4eSJason Evans if (config_prof && gdump) { 873b7eaed25SJason Evans extent_gdump_sub(tsdn, extent); 874b7eaed25SJason Evans } 875b7eaed25SJason Evans } 876b7eaed25SJason Evans 8770ef50b4eSJason Evans static void 8780ef50b4eSJason Evans extent_deregister(tsdn_t *tsdn, extent_t *extent) { 8790ef50b4eSJason Evans extent_deregister_impl(tsdn, extent, true); 8800ef50b4eSJason Evans } 8810ef50b4eSJason Evans 8820ef50b4eSJason Evans static void 8830ef50b4eSJason Evans extent_deregister_no_gdump_sub(tsdn_t *tsdn, extent_t *extent) { 8840ef50b4eSJason Evans extent_deregister_impl(tsdn, extent, false); 8850ef50b4eSJason Evans } 8860ef50b4eSJason Evans 8870ef50b4eSJason Evans /* 8880ef50b4eSJason Evans * Tries to find and remove an extent from extents that can be used for the 8890ef50b4eSJason Evans * given allocation request. 8900ef50b4eSJason Evans */ 891b7eaed25SJason Evans static extent_t * 892b7eaed25SJason Evans extent_recycle_extract(tsdn_t *tsdn, arena_t *arena, 893b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, 894b7eaed25SJason Evans void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, 8950ef50b4eSJason Evans bool growing_retained) { 896b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 897b7eaed25SJason Evans WITNESS_RANK_CORE, growing_retained ? 1 : 0); 898b7eaed25SJason Evans assert(alignment > 0); 899b7eaed25SJason Evans if (config_debug && new_addr != NULL) { 900b7eaed25SJason Evans /* 901b7eaed25SJason Evans * Non-NULL new_addr has two use cases: 902b7eaed25SJason Evans * 903b7eaed25SJason Evans * 1) Recycle a known-extant extent, e.g. during purging. 904b7eaed25SJason Evans * 2) Perform in-place expanding reallocation. 905b7eaed25SJason Evans * 906b7eaed25SJason Evans * Regardless of use case, new_addr must either refer to a 907b7eaed25SJason Evans * non-existing extent, or to the base of an extant extent, 908b7eaed25SJason Evans * since only active slabs support interior lookups (which of 909b7eaed25SJason Evans * course cannot be recycled). 910b7eaed25SJason Evans */ 911b7eaed25SJason Evans assert(PAGE_ADDR2BASE(new_addr) == new_addr); 912b7eaed25SJason Evans assert(pad == 0); 913b7eaed25SJason Evans assert(alignment <= PAGE); 914b7eaed25SJason Evans } 915b7eaed25SJason Evans 916b7eaed25SJason Evans size_t esize = size + pad; 917b7eaed25SJason Evans malloc_mutex_lock(tsdn, &extents->mtx); 918b7eaed25SJason Evans extent_hooks_assure_initialized(arena, r_extent_hooks); 919b7eaed25SJason Evans extent_t *extent; 920b7eaed25SJason Evans if (new_addr != NULL) { 921*c5ad8142SEric van Gyzen extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr, 922*c5ad8142SEric van Gyzen false); 923b7eaed25SJason Evans if (extent != NULL) { 924b7eaed25SJason Evans /* 925b7eaed25SJason Evans * We might null-out extent to report an error, but we 926b7eaed25SJason Evans * still need to unlock the associated mutex after. 927b7eaed25SJason Evans */ 928b7eaed25SJason Evans extent_t *unlock_extent = extent; 929b7eaed25SJason Evans assert(extent_base_get(extent) == new_addr); 930b7eaed25SJason Evans if (extent_arena_get(extent) != arena || 931b7eaed25SJason Evans extent_size_get(extent) < esize || 932b7eaed25SJason Evans extent_state_get(extent) != 933b7eaed25SJason Evans extents_state_get(extents)) { 934b7eaed25SJason Evans extent = NULL; 935b7eaed25SJason Evans } 936b7eaed25SJason Evans extent_unlock(tsdn, unlock_extent); 937b7eaed25SJason Evans } 938b7eaed25SJason Evans } else { 9390ef50b4eSJason Evans extent = extents_fit_locked(tsdn, arena, extents, esize, 9400ef50b4eSJason Evans alignment); 941b7eaed25SJason Evans } 942b7eaed25SJason Evans if (extent == NULL) { 943b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &extents->mtx); 944b7eaed25SJason Evans return NULL; 945b7eaed25SJason Evans } 946b7eaed25SJason Evans 9470ef50b4eSJason Evans extent_activate_locked(tsdn, arena, extents, extent); 948b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &extents->mtx); 949b7eaed25SJason Evans 950b7eaed25SJason Evans return extent; 951b7eaed25SJason Evans } 952b7eaed25SJason Evans 9530ef50b4eSJason Evans /* 9540ef50b4eSJason Evans * Given an allocation request and an extent guaranteed to be able to satisfy 9550ef50b4eSJason Evans * it, this splits off lead and trail extents, leaving extent pointing to an 9560ef50b4eSJason Evans * extent satisfying the allocation. 9570ef50b4eSJason Evans * This function doesn't put lead or trail into any extents_t; it's the caller's 9580ef50b4eSJason Evans * job to ensure that they can be reused. 9590ef50b4eSJason Evans */ 9600ef50b4eSJason Evans typedef enum { 9610ef50b4eSJason Evans /* 9620ef50b4eSJason Evans * Split successfully. lead, extent, and trail, are modified to extents 9630ef50b4eSJason Evans * describing the ranges before, in, and after the given allocation. 9640ef50b4eSJason Evans */ 9650ef50b4eSJason Evans extent_split_interior_ok, 9660ef50b4eSJason Evans /* 9670ef50b4eSJason Evans * The extent can't satisfy the given allocation request. None of the 9680ef50b4eSJason Evans * input extent_t *s are touched. 9690ef50b4eSJason Evans */ 9700ef50b4eSJason Evans extent_split_interior_cant_alloc, 9710ef50b4eSJason Evans /* 9720ef50b4eSJason Evans * In a potentially invalid state. Must leak (if *to_leak is non-NULL), 9730ef50b4eSJason Evans * and salvage what's still salvageable (if *to_salvage is non-NULL). 9740ef50b4eSJason Evans * None of lead, extent, or trail are valid. 9750ef50b4eSJason Evans */ 9760ef50b4eSJason Evans extent_split_interior_error 9770ef50b4eSJason Evans } extent_split_interior_result_t; 9780ef50b4eSJason Evans 9790ef50b4eSJason Evans static extent_split_interior_result_t 9800ef50b4eSJason Evans extent_split_interior(tsdn_t *tsdn, arena_t *arena, 9810ef50b4eSJason Evans extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, 9820ef50b4eSJason Evans /* The result of splitting, in case of success. */ 9830ef50b4eSJason Evans extent_t **extent, extent_t **lead, extent_t **trail, 9840ef50b4eSJason Evans /* The mess to clean up, in case of error. */ 9850ef50b4eSJason Evans extent_t **to_leak, extent_t **to_salvage, 986b7eaed25SJason Evans void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, 9870ef50b4eSJason Evans szind_t szind, bool growing_retained) { 988b7eaed25SJason Evans size_t esize = size + pad; 9890ef50b4eSJason Evans size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(*extent), 9900ef50b4eSJason Evans PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(*extent); 991b7eaed25SJason Evans assert(new_addr == NULL || leadsize == 0); 9920ef50b4eSJason Evans if (extent_size_get(*extent) < leadsize + esize) { 9930ef50b4eSJason Evans return extent_split_interior_cant_alloc; 9940ef50b4eSJason Evans } 9950ef50b4eSJason Evans size_t trailsize = extent_size_get(*extent) - leadsize - esize; 9960ef50b4eSJason Evans 9970ef50b4eSJason Evans *lead = NULL; 9980ef50b4eSJason Evans *trail = NULL; 9990ef50b4eSJason Evans *to_leak = NULL; 10000ef50b4eSJason Evans *to_salvage = NULL; 1001b7eaed25SJason Evans 1002b7eaed25SJason Evans /* Split the lead. */ 1003b7eaed25SJason Evans if (leadsize != 0) { 10040ef50b4eSJason Evans *lead = *extent; 10050ef50b4eSJason Evans *extent = extent_split_impl(tsdn, arena, r_extent_hooks, 1006*c5ad8142SEric van Gyzen *lead, leadsize, SC_NSIZES, false, esize + trailsize, szind, 1007b7eaed25SJason Evans slab, growing_retained); 10080ef50b4eSJason Evans if (*extent == NULL) { 10090ef50b4eSJason Evans *to_leak = *lead; 10100ef50b4eSJason Evans *lead = NULL; 10110ef50b4eSJason Evans return extent_split_interior_error; 1012b7eaed25SJason Evans } 1013b7eaed25SJason Evans } 1014b7eaed25SJason Evans 1015b7eaed25SJason Evans /* Split the trail. */ 1016b7eaed25SJason Evans if (trailsize != 0) { 10170ef50b4eSJason Evans *trail = extent_split_impl(tsdn, arena, r_extent_hooks, *extent, 1018*c5ad8142SEric van Gyzen esize, szind, slab, trailsize, SC_NSIZES, false, 10190ef50b4eSJason Evans growing_retained); 10200ef50b4eSJason Evans if (*trail == NULL) { 10210ef50b4eSJason Evans *to_leak = *extent; 10220ef50b4eSJason Evans *to_salvage = *lead; 10230ef50b4eSJason Evans *lead = NULL; 10240ef50b4eSJason Evans *extent = NULL; 10250ef50b4eSJason Evans return extent_split_interior_error; 1026b7eaed25SJason Evans } 10270ef50b4eSJason Evans } 10280ef50b4eSJason Evans 10290ef50b4eSJason Evans if (leadsize == 0 && trailsize == 0) { 1030b7eaed25SJason Evans /* 1031b7eaed25SJason Evans * Splitting causes szind to be set as a side effect, but no 1032b7eaed25SJason Evans * splitting occurred. 1033b7eaed25SJason Evans */ 10340ef50b4eSJason Evans extent_szind_set(*extent, szind); 1035*c5ad8142SEric van Gyzen if (szind != SC_NSIZES) { 1036b7eaed25SJason Evans rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, 10370ef50b4eSJason Evans (uintptr_t)extent_addr_get(*extent), szind, slab); 10380ef50b4eSJason Evans if (slab && extent_size_get(*extent) > PAGE) { 1039b7eaed25SJason Evans rtree_szind_slab_update(tsdn, &extents_rtree, 1040b7eaed25SJason Evans rtree_ctx, 10410ef50b4eSJason Evans (uintptr_t)extent_past_get(*extent) - 1042b7eaed25SJason Evans (uintptr_t)PAGE, szind, slab); 1043b7eaed25SJason Evans } 1044b7eaed25SJason Evans } 1045b7eaed25SJason Evans } 1046b7eaed25SJason Evans 10470ef50b4eSJason Evans return extent_split_interior_ok; 1048b7eaed25SJason Evans } 1049b7eaed25SJason Evans 10500ef50b4eSJason Evans /* 10510ef50b4eSJason Evans * This fulfills the indicated allocation request out of the given extent (which 10520ef50b4eSJason Evans * the caller should have ensured was big enough). If there's any unused space 10530ef50b4eSJason Evans * before or after the resulting allocation, that space is given its own extent 10540ef50b4eSJason Evans * and put back into extents. 10550ef50b4eSJason Evans */ 10560ef50b4eSJason Evans static extent_t * 10570ef50b4eSJason Evans extent_recycle_split(tsdn_t *tsdn, arena_t *arena, 10580ef50b4eSJason Evans extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, 10590ef50b4eSJason Evans void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, 10600ef50b4eSJason Evans szind_t szind, extent_t *extent, bool growing_retained) { 10610ef50b4eSJason Evans extent_t *lead; 10620ef50b4eSJason Evans extent_t *trail; 10630ef50b4eSJason Evans extent_t *to_leak; 10640ef50b4eSJason Evans extent_t *to_salvage; 10650ef50b4eSJason Evans 10660ef50b4eSJason Evans extent_split_interior_result_t result = extent_split_interior( 10670ef50b4eSJason Evans tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail, 10680ef50b4eSJason Evans &to_leak, &to_salvage, new_addr, size, pad, alignment, slab, szind, 10690ef50b4eSJason Evans growing_retained); 10700ef50b4eSJason Evans 1071*c5ad8142SEric van Gyzen if (!maps_coalesce && result != extent_split_interior_ok 1072*c5ad8142SEric van Gyzen && !opt_retain) { 1073*c5ad8142SEric van Gyzen /* 1074*c5ad8142SEric van Gyzen * Split isn't supported (implies Windows w/o retain). Avoid 1075*c5ad8142SEric van Gyzen * leaking the extents. 1076*c5ad8142SEric van Gyzen */ 1077*c5ad8142SEric van Gyzen assert(to_leak != NULL && lead == NULL && trail == NULL); 1078*c5ad8142SEric van Gyzen extent_deactivate(tsdn, arena, extents, to_leak); 1079*c5ad8142SEric van Gyzen return NULL; 1080*c5ad8142SEric van Gyzen } 1081*c5ad8142SEric van Gyzen 10820ef50b4eSJason Evans if (result == extent_split_interior_ok) { 10830ef50b4eSJason Evans if (lead != NULL) { 10840ef50b4eSJason Evans extent_deactivate(tsdn, arena, extents, lead); 10850ef50b4eSJason Evans } 10860ef50b4eSJason Evans if (trail != NULL) { 10870ef50b4eSJason Evans extent_deactivate(tsdn, arena, extents, trail); 10880ef50b4eSJason Evans } 10890ef50b4eSJason Evans return extent; 10900ef50b4eSJason Evans } else { 10910ef50b4eSJason Evans /* 10920ef50b4eSJason Evans * We should have picked an extent that was large enough to 10930ef50b4eSJason Evans * fulfill our allocation request. 10940ef50b4eSJason Evans */ 10950ef50b4eSJason Evans assert(result == extent_split_interior_error); 10960ef50b4eSJason Evans if (to_salvage != NULL) { 10970ef50b4eSJason Evans extent_deregister(tsdn, to_salvage); 10980ef50b4eSJason Evans } 10990ef50b4eSJason Evans if (to_leak != NULL) { 11000ef50b4eSJason Evans void *leak = extent_base_get(to_leak); 11010ef50b4eSJason Evans extent_deregister_no_gdump_sub(tsdn, to_leak); 1102*c5ad8142SEric van Gyzen extents_abandon_vm(tsdn, arena, r_extent_hooks, extents, 11030ef50b4eSJason Evans to_leak, growing_retained); 1104*c5ad8142SEric van Gyzen assert(extent_lock_from_addr(tsdn, rtree_ctx, leak, 1105*c5ad8142SEric van Gyzen false) == NULL); 11060ef50b4eSJason Evans } 11070ef50b4eSJason Evans return NULL; 11080ef50b4eSJason Evans } 11090ef50b4eSJason Evans unreachable(); 11100ef50b4eSJason Evans } 11110ef50b4eSJason Evans 1112*c5ad8142SEric van Gyzen static bool 1113*c5ad8142SEric van Gyzen extent_need_manual_zero(arena_t *arena) { 1114*c5ad8142SEric van Gyzen /* 1115*c5ad8142SEric van Gyzen * Need to manually zero the extent on repopulating if either; 1) non 1116*c5ad8142SEric van Gyzen * default extent hooks installed (in which case the purge semantics may 1117*c5ad8142SEric van Gyzen * change); or 2) transparent huge pages enabled. 1118*c5ad8142SEric van Gyzen */ 1119*c5ad8142SEric van Gyzen return (!arena_has_default_hooks(arena) || 1120*c5ad8142SEric van Gyzen (opt_thp == thp_mode_always)); 1121*c5ad8142SEric van Gyzen } 1122*c5ad8142SEric van Gyzen 11230ef50b4eSJason Evans /* 11240ef50b4eSJason Evans * Tries to satisfy the given allocation request by reusing one of the extents 11250ef50b4eSJason Evans * in the given extents_t. 11260ef50b4eSJason Evans */ 1127b7eaed25SJason Evans static extent_t * 1128b7eaed25SJason Evans extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, 1129b7eaed25SJason Evans extents_t *extents, void *new_addr, size_t size, size_t pad, 1130b7eaed25SJason Evans size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit, 1131b7eaed25SJason Evans bool growing_retained) { 1132b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1133b7eaed25SJason Evans WITNESS_RANK_CORE, growing_retained ? 1 : 0); 1134b7eaed25SJason Evans assert(new_addr == NULL || !slab); 1135b7eaed25SJason Evans assert(pad == 0 || !slab); 1136b7eaed25SJason Evans assert(!*zero || !slab); 1137b7eaed25SJason Evans 1138b7eaed25SJason Evans rtree_ctx_t rtree_ctx_fallback; 1139b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 1140b7eaed25SJason Evans 1141b7eaed25SJason Evans extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks, 11420ef50b4eSJason Evans rtree_ctx, extents, new_addr, size, pad, alignment, slab, 11430ef50b4eSJason Evans growing_retained); 1144b7eaed25SJason Evans if (extent == NULL) { 1145b7eaed25SJason Evans return NULL; 1146b7eaed25SJason Evans } 1147b7eaed25SJason Evans 1148b7eaed25SJason Evans extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx, 1149b7eaed25SJason Evans extents, new_addr, size, pad, alignment, slab, szind, extent, 1150b7eaed25SJason Evans growing_retained); 1151b7eaed25SJason Evans if (extent == NULL) { 1152b7eaed25SJason Evans return NULL; 1153b7eaed25SJason Evans } 1154b7eaed25SJason Evans 1155b7eaed25SJason Evans if (*commit && !extent_committed_get(extent)) { 1156b7eaed25SJason Evans if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 1157b7eaed25SJason Evans 0, extent_size_get(extent), growing_retained)) { 1158b7eaed25SJason Evans extent_record(tsdn, arena, r_extent_hooks, extents, 1159b7eaed25SJason Evans extent, growing_retained); 1160b7eaed25SJason Evans return NULL; 1161b7eaed25SJason Evans } 1162*c5ad8142SEric van Gyzen if (!extent_need_manual_zero(arena)) { 1163b7eaed25SJason Evans extent_zeroed_set(extent, true); 1164b7eaed25SJason Evans } 1165*c5ad8142SEric van Gyzen } 1166b7eaed25SJason Evans 11670ef50b4eSJason Evans if (extent_committed_get(extent)) { 11680ef50b4eSJason Evans *commit = true; 11690ef50b4eSJason Evans } 11700ef50b4eSJason Evans if (extent_zeroed_get(extent)) { 11710ef50b4eSJason Evans *zero = true; 11720ef50b4eSJason Evans } 11730ef50b4eSJason Evans 1174b7eaed25SJason Evans if (pad != 0) { 1175b7eaed25SJason Evans extent_addr_randomize(tsdn, extent, alignment); 1176b7eaed25SJason Evans } 1177b7eaed25SJason Evans assert(extent_state_get(extent) == extent_state_active); 1178b7eaed25SJason Evans if (slab) { 1179b7eaed25SJason Evans extent_slab_set(extent, slab); 1180b7eaed25SJason Evans extent_interior_register(tsdn, rtree_ctx, extent, szind); 1181b7eaed25SJason Evans } 1182b7eaed25SJason Evans 1183b7eaed25SJason Evans if (*zero) { 1184b7eaed25SJason Evans void *addr = extent_base_get(extent); 1185b7eaed25SJason Evans if (!extent_zeroed_get(extent)) { 1186d5aef6d6SEdward Tomasz Napierala size_t size = extent_size_get(extent); 1187*c5ad8142SEric van Gyzen if (extent_need_manual_zero(arena) || 1188*c5ad8142SEric van Gyzen pages_purge_forced(addr, size)) { 1189b7eaed25SJason Evans memset(addr, 0, size); 1190b7eaed25SJason Evans } 1191b7eaed25SJason Evans } else if (config_debug) { 1192b7eaed25SJason Evans size_t *p = (size_t *)(uintptr_t)addr; 1193d5aef6d6SEdward Tomasz Napierala /* Check the first page only. */ 1194d5aef6d6SEdward Tomasz Napierala for (size_t i = 0; i < PAGE / sizeof(size_t); i++) { 1195b7eaed25SJason Evans assert(p[i] == 0); 1196b7eaed25SJason Evans } 1197b7eaed25SJason Evans } 1198b7eaed25SJason Evans } 1199b7eaed25SJason Evans return extent; 1200b7eaed25SJason Evans } 1201b7eaed25SJason Evans 1202b7eaed25SJason Evans /* 1203b7eaed25SJason Evans * If the caller specifies (!*zero), it is still possible to receive zeroed 1204b7eaed25SJason Evans * memory, in which case *zero is toggled to true. arena_extent_alloc() takes 1205b7eaed25SJason Evans * advantage of this to avoid demanding zeroed extents, but taking advantage of 1206b7eaed25SJason Evans * them if they are returned. 1207b7eaed25SJason Evans */ 1208b7eaed25SJason Evans static void * 1209b7eaed25SJason Evans extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, 1210b7eaed25SJason Evans size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) { 1211b7eaed25SJason Evans void *ret; 1212b7eaed25SJason Evans 1213b7eaed25SJason Evans assert(size != 0); 1214b7eaed25SJason Evans assert(alignment != 0); 1215b7eaed25SJason Evans 1216b7eaed25SJason Evans /* "primary" dss. */ 1217b7eaed25SJason Evans if (have_dss && dss_prec == dss_prec_primary && (ret = 1218b7eaed25SJason Evans extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, 1219b7eaed25SJason Evans commit)) != NULL) { 1220b7eaed25SJason Evans return ret; 1221b7eaed25SJason Evans } 1222b7eaed25SJason Evans /* mmap. */ 1223b7eaed25SJason Evans if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit)) 1224b7eaed25SJason Evans != NULL) { 1225b7eaed25SJason Evans return ret; 1226b7eaed25SJason Evans } 1227b7eaed25SJason Evans /* "secondary" dss. */ 1228b7eaed25SJason Evans if (have_dss && dss_prec == dss_prec_secondary && (ret = 1229b7eaed25SJason Evans extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, 1230b7eaed25SJason Evans commit)) != NULL) { 1231b7eaed25SJason Evans return ret; 1232b7eaed25SJason Evans } 1233b7eaed25SJason Evans 1234b7eaed25SJason Evans /* All strategies for allocation failed. */ 1235b7eaed25SJason Evans return NULL; 1236b7eaed25SJason Evans } 1237b7eaed25SJason Evans 1238b7eaed25SJason Evans static void * 1239b7eaed25SJason Evans extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr, 1240b7eaed25SJason Evans size_t size, size_t alignment, bool *zero, bool *commit) { 12410ef50b4eSJason Evans void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero, 1242b7eaed25SJason Evans commit, (dss_prec_t)atomic_load_u(&arena->dss_prec, 1243b7eaed25SJason Evans ATOMIC_RELAXED)); 12440ef50b4eSJason Evans if (have_madvise_huge && ret) { 12450ef50b4eSJason Evans pages_set_thp_state(ret, size); 12460ef50b4eSJason Evans } 1247b7eaed25SJason Evans return ret; 1248b7eaed25SJason Evans } 1249b7eaed25SJason Evans 1250b7eaed25SJason Evans static void * 1251b7eaed25SJason Evans extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size, 1252b7eaed25SJason Evans size_t alignment, bool *zero, bool *commit, unsigned arena_ind) { 1253b7eaed25SJason Evans tsdn_t *tsdn; 1254b7eaed25SJason Evans arena_t *arena; 1255b7eaed25SJason Evans 1256b7eaed25SJason Evans tsdn = tsdn_fetch(); 1257b7eaed25SJason Evans arena = arena_get(tsdn, arena_ind, false); 1258b7eaed25SJason Evans /* 1259b7eaed25SJason Evans * The arena we're allocating on behalf of must have been initialized 1260b7eaed25SJason Evans * already. 1261b7eaed25SJason Evans */ 1262b7eaed25SJason Evans assert(arena != NULL); 1263b7eaed25SJason Evans 1264b7eaed25SJason Evans return extent_alloc_default_impl(tsdn, arena, new_addr, size, 1265*c5ad8142SEric van Gyzen ALIGNMENT_CEILING(alignment, PAGE), zero, commit); 1266b7eaed25SJason Evans } 1267b7eaed25SJason Evans 12688b2f5aafSJason Evans static void 12698b2f5aafSJason Evans extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) { 12708b2f5aafSJason Evans tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); 12710ef50b4eSJason Evans if (arena == arena_get(tsd_tsdn(tsd), 0, false)) { 12720ef50b4eSJason Evans /* 12730ef50b4eSJason Evans * The only legitimate case of customized extent hooks for a0 is 12740ef50b4eSJason Evans * hooks with no allocation activities. One such example is to 12750ef50b4eSJason Evans * place metadata on pre-allocated resources such as huge pages. 12760ef50b4eSJason Evans * In that case, rely on reentrancy_level checks to catch 12770ef50b4eSJason Evans * infinite recursions. 12780ef50b4eSJason Evans */ 12790ef50b4eSJason Evans pre_reentrancy(tsd, NULL); 12800ef50b4eSJason Evans } else { 12818b2f5aafSJason Evans pre_reentrancy(tsd, arena); 12828b2f5aafSJason Evans } 12830ef50b4eSJason Evans } 12848b2f5aafSJason Evans 12858b2f5aafSJason Evans static void 12868b2f5aafSJason Evans extent_hook_post_reentrancy(tsdn_t *tsdn) { 12878b2f5aafSJason Evans tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); 12888b2f5aafSJason Evans post_reentrancy(tsd); 12898b2f5aafSJason Evans } 12908b2f5aafSJason Evans 1291b7eaed25SJason Evans /* 1292b7eaed25SJason Evans * If virtual memory is retained, create increasingly larger extents from which 1293b7eaed25SJason Evans * to split requested extents in order to limit the total number of disjoint 1294b7eaed25SJason Evans * virtual memory ranges retained by each arena. 1295b7eaed25SJason Evans */ 1296b7eaed25SJason Evans static extent_t * 1297b7eaed25SJason Evans extent_grow_retained(tsdn_t *tsdn, arena_t *arena, 1298b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, size_t size, size_t pad, size_t alignment, 1299b7eaed25SJason Evans bool slab, szind_t szind, bool *zero, bool *commit) { 1300b7eaed25SJason Evans malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx); 1301b7eaed25SJason Evans assert(pad == 0 || !slab); 1302b7eaed25SJason Evans assert(!*zero || !slab); 1303b7eaed25SJason Evans 1304b7eaed25SJason Evans size_t esize = size + pad; 1305b7eaed25SJason Evans size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE; 1306b7eaed25SJason Evans /* Beware size_t wrap-around. */ 1307b7eaed25SJason Evans if (alloc_size_min < esize) { 1308b7eaed25SJason Evans goto label_err; 1309b7eaed25SJason Evans } 1310b7eaed25SJason Evans /* 1311b7eaed25SJason Evans * Find the next extent size in the series that would be large enough to 1312b7eaed25SJason Evans * satisfy this request. 1313b7eaed25SJason Evans */ 1314b7eaed25SJason Evans pszind_t egn_skip = 0; 1315b7eaed25SJason Evans size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip); 1316b7eaed25SJason Evans while (alloc_size < alloc_size_min) { 1317b7eaed25SJason Evans egn_skip++; 1318*c5ad8142SEric van Gyzen if (arena->extent_grow_next + egn_skip >= 1319*c5ad8142SEric van Gyzen sz_psz2ind(SC_LARGE_MAXCLASS)) { 1320b7eaed25SJason Evans /* Outside legal range. */ 1321b7eaed25SJason Evans goto label_err; 1322b7eaed25SJason Evans } 1323b7eaed25SJason Evans alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip); 1324b7eaed25SJason Evans } 1325b7eaed25SJason Evans 1326b7eaed25SJason Evans extent_t *extent = extent_alloc(tsdn, arena); 1327b7eaed25SJason Evans if (extent == NULL) { 1328b7eaed25SJason Evans goto label_err; 1329b7eaed25SJason Evans } 1330b7eaed25SJason Evans bool zeroed = false; 1331b7eaed25SJason Evans bool committed = false; 1332b7eaed25SJason Evans 1333b7eaed25SJason Evans void *ptr; 1334b7eaed25SJason Evans if (*r_extent_hooks == &extent_hooks_default) { 13350ef50b4eSJason Evans ptr = extent_alloc_default_impl(tsdn, arena, NULL, 13360ef50b4eSJason Evans alloc_size, PAGE, &zeroed, &committed); 1337b7eaed25SJason Evans } else { 13388b2f5aafSJason Evans extent_hook_pre_reentrancy(tsdn, arena); 1339b7eaed25SJason Evans ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL, 1340b7eaed25SJason Evans alloc_size, PAGE, &zeroed, &committed, 1341b7eaed25SJason Evans arena_ind_get(arena)); 13428b2f5aafSJason Evans extent_hook_post_reentrancy(tsdn); 1343b7eaed25SJason Evans } 1344b7eaed25SJason Evans 1345*c5ad8142SEric van Gyzen extent_init(extent, arena, ptr, alloc_size, false, SC_NSIZES, 1346b7eaed25SJason Evans arena_extent_sn_next(arena), extent_state_active, zeroed, 1347*c5ad8142SEric van Gyzen committed, true, EXTENT_IS_HEAD); 1348b7eaed25SJason Evans if (ptr == NULL) { 1349b7eaed25SJason Evans extent_dalloc(tsdn, arena, extent); 1350b7eaed25SJason Evans goto label_err; 1351b7eaed25SJason Evans } 13520ef50b4eSJason Evans 1353b7eaed25SJason Evans if (extent_register_no_gdump_add(tsdn, extent)) { 1354*c5ad8142SEric van Gyzen extent_dalloc(tsdn, arena, extent); 1355b7eaed25SJason Evans goto label_err; 1356b7eaed25SJason Evans } 1357b7eaed25SJason Evans 1358b7eaed25SJason Evans if (extent_zeroed_get(extent) && extent_committed_get(extent)) { 1359b7eaed25SJason Evans *zero = true; 1360b7eaed25SJason Evans } 1361b7eaed25SJason Evans if (extent_committed_get(extent)) { 1362b7eaed25SJason Evans *commit = true; 1363b7eaed25SJason Evans } 1364b7eaed25SJason Evans 13650ef50b4eSJason Evans rtree_ctx_t rtree_ctx_fallback; 13660ef50b4eSJason Evans rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 13670ef50b4eSJason Evans 13680ef50b4eSJason Evans extent_t *lead; 13690ef50b4eSJason Evans extent_t *trail; 13700ef50b4eSJason Evans extent_t *to_leak; 13710ef50b4eSJason Evans extent_t *to_salvage; 13720ef50b4eSJason Evans extent_split_interior_result_t result = extent_split_interior( 13730ef50b4eSJason Evans tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail, 13740ef50b4eSJason Evans &to_leak, &to_salvage, NULL, size, pad, alignment, slab, szind, 1375b7eaed25SJason Evans true); 13760ef50b4eSJason Evans 13770ef50b4eSJason Evans if (result == extent_split_interior_ok) { 13780ef50b4eSJason Evans if (lead != NULL) { 1379b7eaed25SJason Evans extent_record(tsdn, arena, r_extent_hooks, 1380b7eaed25SJason Evans &arena->extents_retained, lead, true); 1381b7eaed25SJason Evans } 13820ef50b4eSJason Evans if (trail != NULL) { 1383b7eaed25SJason Evans extent_record(tsdn, arena, r_extent_hooks, 1384b7eaed25SJason Evans &arena->extents_retained, trail, true); 13850ef50b4eSJason Evans } 13860ef50b4eSJason Evans } else { 1387b7eaed25SJason Evans /* 13880ef50b4eSJason Evans * We should have allocated a sufficiently large extent; the 13890ef50b4eSJason Evans * cant_alloc case should not occur. 1390b7eaed25SJason Evans */ 13910ef50b4eSJason Evans assert(result == extent_split_interior_error); 13920ef50b4eSJason Evans if (to_salvage != NULL) { 13930ef50b4eSJason Evans if (config_prof) { 13940ef50b4eSJason Evans extent_gdump_add(tsdn, to_salvage); 1395b7eaed25SJason Evans } 13960ef50b4eSJason Evans extent_record(tsdn, arena, r_extent_hooks, 13970ef50b4eSJason Evans &arena->extents_retained, to_salvage, true); 1398b7eaed25SJason Evans } 13990ef50b4eSJason Evans if (to_leak != NULL) { 14000ef50b4eSJason Evans extent_deregister_no_gdump_sub(tsdn, to_leak); 1401*c5ad8142SEric van Gyzen extents_abandon_vm(tsdn, arena, r_extent_hooks, 14020ef50b4eSJason Evans &arena->extents_retained, to_leak, true); 14030ef50b4eSJason Evans } 14040ef50b4eSJason Evans goto label_err; 1405b7eaed25SJason Evans } 1406b7eaed25SJason Evans 1407b7eaed25SJason Evans if (*commit && !extent_committed_get(extent)) { 1408b7eaed25SJason Evans if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0, 1409b7eaed25SJason Evans extent_size_get(extent), true)) { 1410b7eaed25SJason Evans extent_record(tsdn, arena, r_extent_hooks, 1411b7eaed25SJason Evans &arena->extents_retained, extent, true); 1412b7eaed25SJason Evans goto label_err; 1413b7eaed25SJason Evans } 1414*c5ad8142SEric van Gyzen if (!extent_need_manual_zero(arena)) { 1415b7eaed25SJason Evans extent_zeroed_set(extent, true); 1416b7eaed25SJason Evans } 1417*c5ad8142SEric van Gyzen } 1418b7eaed25SJason Evans 1419b7eaed25SJason Evans /* 14200ef50b4eSJason Evans * Increment extent_grow_next if doing so wouldn't exceed the allowed 1421b7eaed25SJason Evans * range. 1422b7eaed25SJason Evans */ 14230ef50b4eSJason Evans if (arena->extent_grow_next + egn_skip + 1 <= 14240ef50b4eSJason Evans arena->retain_grow_limit) { 1425b7eaed25SJason Evans arena->extent_grow_next += egn_skip + 1; 1426b7eaed25SJason Evans } else { 14270ef50b4eSJason Evans arena->extent_grow_next = arena->retain_grow_limit; 1428b7eaed25SJason Evans } 1429b7eaed25SJason Evans /* All opportunities for failure are past. */ 1430b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); 1431b7eaed25SJason Evans 1432b7eaed25SJason Evans if (config_prof) { 1433b7eaed25SJason Evans /* Adjust gdump stats now that extent is final size. */ 1434b7eaed25SJason Evans extent_gdump_add(tsdn, extent); 1435b7eaed25SJason Evans } 1436b7eaed25SJason Evans if (pad != 0) { 1437b7eaed25SJason Evans extent_addr_randomize(tsdn, extent, alignment); 1438b7eaed25SJason Evans } 1439b7eaed25SJason Evans if (slab) { 1440b7eaed25SJason Evans rtree_ctx_t rtree_ctx_fallback; 1441b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, 1442b7eaed25SJason Evans &rtree_ctx_fallback); 1443b7eaed25SJason Evans 1444b7eaed25SJason Evans extent_slab_set(extent, true); 1445b7eaed25SJason Evans extent_interior_register(tsdn, rtree_ctx, extent, szind); 1446b7eaed25SJason Evans } 1447b7eaed25SJason Evans if (*zero && !extent_zeroed_get(extent)) { 1448b7eaed25SJason Evans void *addr = extent_base_get(extent); 1449b7eaed25SJason Evans size_t size = extent_size_get(extent); 1450*c5ad8142SEric van Gyzen if (extent_need_manual_zero(arena) || 1451*c5ad8142SEric van Gyzen pages_purge_forced(addr, size)) { 1452b7eaed25SJason Evans memset(addr, 0, size); 1453b7eaed25SJason Evans } 1454b7eaed25SJason Evans } 1455b7eaed25SJason Evans 1456b7eaed25SJason Evans return extent; 1457b7eaed25SJason Evans label_err: 1458b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); 1459b7eaed25SJason Evans return NULL; 1460b7eaed25SJason Evans } 1461b7eaed25SJason Evans 1462b7eaed25SJason Evans static extent_t * 1463b7eaed25SJason Evans extent_alloc_retained(tsdn_t *tsdn, arena_t *arena, 1464b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, 1465b7eaed25SJason Evans size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { 1466b7eaed25SJason Evans assert(size != 0); 1467b7eaed25SJason Evans assert(alignment != 0); 1468b7eaed25SJason Evans 1469b7eaed25SJason Evans malloc_mutex_lock(tsdn, &arena->extent_grow_mtx); 1470b7eaed25SJason Evans 1471b7eaed25SJason Evans extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, 1472b7eaed25SJason Evans &arena->extents_retained, new_addr, size, pad, alignment, slab, 1473b7eaed25SJason Evans szind, zero, commit, true); 1474b7eaed25SJason Evans if (extent != NULL) { 1475b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); 1476b7eaed25SJason Evans if (config_prof) { 1477b7eaed25SJason Evans extent_gdump_add(tsdn, extent); 1478b7eaed25SJason Evans } 1479b7eaed25SJason Evans } else if (opt_retain && new_addr == NULL) { 1480b7eaed25SJason Evans extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size, 1481b7eaed25SJason Evans pad, alignment, slab, szind, zero, commit); 1482b7eaed25SJason Evans /* extent_grow_retained() always releases extent_grow_mtx. */ 1483b7eaed25SJason Evans } else { 1484b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); 1485b7eaed25SJason Evans } 1486b7eaed25SJason Evans malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx); 1487b7eaed25SJason Evans 1488b7eaed25SJason Evans return extent; 1489b7eaed25SJason Evans } 1490b7eaed25SJason Evans 1491b7eaed25SJason Evans static extent_t * 1492b7eaed25SJason Evans extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena, 1493b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, 1494b7eaed25SJason Evans size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { 1495b7eaed25SJason Evans size_t esize = size + pad; 1496b7eaed25SJason Evans extent_t *extent = extent_alloc(tsdn, arena); 1497b7eaed25SJason Evans if (extent == NULL) { 1498b7eaed25SJason Evans return NULL; 1499b7eaed25SJason Evans } 1500b7eaed25SJason Evans void *addr; 1501*c5ad8142SEric van Gyzen size_t palignment = ALIGNMENT_CEILING(alignment, PAGE); 1502b7eaed25SJason Evans if (*r_extent_hooks == &extent_hooks_default) { 1503b7eaed25SJason Evans /* Call directly to propagate tsdn. */ 1504b7eaed25SJason Evans addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize, 1505*c5ad8142SEric van Gyzen palignment, zero, commit); 1506b7eaed25SJason Evans } else { 15078b2f5aafSJason Evans extent_hook_pre_reentrancy(tsdn, arena); 1508b7eaed25SJason Evans addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr, 1509*c5ad8142SEric van Gyzen esize, palignment, zero, commit, arena_ind_get(arena)); 15108b2f5aafSJason Evans extent_hook_post_reentrancy(tsdn); 1511b7eaed25SJason Evans } 1512b7eaed25SJason Evans if (addr == NULL) { 1513b7eaed25SJason Evans extent_dalloc(tsdn, arena, extent); 1514b7eaed25SJason Evans return NULL; 1515b7eaed25SJason Evans } 1516b7eaed25SJason Evans extent_init(extent, arena, addr, esize, slab, szind, 15170ef50b4eSJason Evans arena_extent_sn_next(arena), extent_state_active, *zero, *commit, 1518*c5ad8142SEric van Gyzen true, EXTENT_NOT_HEAD); 1519b7eaed25SJason Evans if (pad != 0) { 1520b7eaed25SJason Evans extent_addr_randomize(tsdn, extent, alignment); 1521b7eaed25SJason Evans } 1522b7eaed25SJason Evans if (extent_register(tsdn, extent)) { 1523*c5ad8142SEric van Gyzen extent_dalloc(tsdn, arena, extent); 1524b7eaed25SJason Evans return NULL; 1525b7eaed25SJason Evans } 1526b7eaed25SJason Evans 1527b7eaed25SJason Evans return extent; 1528b7eaed25SJason Evans } 1529b7eaed25SJason Evans 1530b7eaed25SJason Evans extent_t * 1531b7eaed25SJason Evans extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, 1532b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, 1533b7eaed25SJason Evans size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { 1534b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1535b7eaed25SJason Evans WITNESS_RANK_CORE, 0); 1536b7eaed25SJason Evans 1537b7eaed25SJason Evans extent_hooks_assure_initialized(arena, r_extent_hooks); 1538b7eaed25SJason Evans 1539b7eaed25SJason Evans extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks, 1540b7eaed25SJason Evans new_addr, size, pad, alignment, slab, szind, zero, commit); 1541b7eaed25SJason Evans if (extent == NULL) { 15420ef50b4eSJason Evans if (opt_retain && new_addr != NULL) { 15430ef50b4eSJason Evans /* 15440ef50b4eSJason Evans * When retain is enabled and new_addr is set, we do not 15450ef50b4eSJason Evans * attempt extent_alloc_wrapper_hard which does mmap 15460ef50b4eSJason Evans * that is very unlikely to succeed (unless it happens 15470ef50b4eSJason Evans * to be at the end). 15480ef50b4eSJason Evans */ 15490ef50b4eSJason Evans return NULL; 15500ef50b4eSJason Evans } 1551b7eaed25SJason Evans extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks, 1552b7eaed25SJason Evans new_addr, size, pad, alignment, slab, szind, zero, commit); 1553b7eaed25SJason Evans } 1554b7eaed25SJason Evans 15550ef50b4eSJason Evans assert(extent == NULL || extent_dumpable_get(extent)); 1556b7eaed25SJason Evans return extent; 1557b7eaed25SJason Evans } 1558b7eaed25SJason Evans 1559b7eaed25SJason Evans static bool 1560b7eaed25SJason Evans extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner, 1561b7eaed25SJason Evans const extent_t *outer) { 1562b7eaed25SJason Evans assert(extent_arena_get(inner) == arena); 1563b7eaed25SJason Evans if (extent_arena_get(outer) != arena) { 1564b7eaed25SJason Evans return false; 1565b7eaed25SJason Evans } 1566b7eaed25SJason Evans 1567b7eaed25SJason Evans assert(extent_state_get(inner) == extent_state_active); 1568b7eaed25SJason Evans if (extent_state_get(outer) != extents->state) { 1569b7eaed25SJason Evans return false; 1570b7eaed25SJason Evans } 1571b7eaed25SJason Evans 1572b7eaed25SJason Evans if (extent_committed_get(inner) != extent_committed_get(outer)) { 1573b7eaed25SJason Evans return false; 1574b7eaed25SJason Evans } 1575b7eaed25SJason Evans 1576b7eaed25SJason Evans return true; 1577b7eaed25SJason Evans } 1578b7eaed25SJason Evans 1579b7eaed25SJason Evans static bool 1580b7eaed25SJason Evans extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, 1581b7eaed25SJason Evans extents_t *extents, extent_t *inner, extent_t *outer, bool forward, 1582b7eaed25SJason Evans bool growing_retained) { 1583b7eaed25SJason Evans assert(extent_can_coalesce(arena, extents, inner, outer)); 1584b7eaed25SJason Evans 15850ef50b4eSJason Evans extent_activate_locked(tsdn, arena, extents, outer); 1586b7eaed25SJason Evans 1587b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &extents->mtx); 1588b7eaed25SJason Evans bool err = extent_merge_impl(tsdn, arena, r_extent_hooks, 1589b7eaed25SJason Evans forward ? inner : outer, forward ? outer : inner, growing_retained); 1590b7eaed25SJason Evans malloc_mutex_lock(tsdn, &extents->mtx); 1591b7eaed25SJason Evans 1592b7eaed25SJason Evans if (err) { 15930ef50b4eSJason Evans extent_deactivate_locked(tsdn, arena, extents, outer); 1594b7eaed25SJason Evans } 1595b7eaed25SJason Evans 1596b7eaed25SJason Evans return err; 1597b7eaed25SJason Evans } 1598b7eaed25SJason Evans 1599b7eaed25SJason Evans static extent_t * 1600*c5ad8142SEric van Gyzen extent_try_coalesce_impl(tsdn_t *tsdn, arena_t *arena, 1601b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, 1602*c5ad8142SEric van Gyzen extent_t *extent, bool *coalesced, bool growing_retained, 1603*c5ad8142SEric van Gyzen bool inactive_only) { 1604*c5ad8142SEric van Gyzen /* 1605*c5ad8142SEric van Gyzen * We avoid checking / locking inactive neighbors for large size 1606*c5ad8142SEric van Gyzen * classes, since they are eagerly coalesced on deallocation which can 1607*c5ad8142SEric van Gyzen * cause lock contention. 1608*c5ad8142SEric van Gyzen */ 1609b7eaed25SJason Evans /* 1610b7eaed25SJason Evans * Continue attempting to coalesce until failure, to protect against 1611b7eaed25SJason Evans * races with other threads that are thwarted by this one. 1612b7eaed25SJason Evans */ 1613b7eaed25SJason Evans bool again; 1614b7eaed25SJason Evans do { 1615b7eaed25SJason Evans again = false; 1616b7eaed25SJason Evans 1617b7eaed25SJason Evans /* Try to coalesce forward. */ 1618b7eaed25SJason Evans extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx, 1619*c5ad8142SEric van Gyzen extent_past_get(extent), inactive_only); 1620b7eaed25SJason Evans if (next != NULL) { 1621b7eaed25SJason Evans /* 1622b7eaed25SJason Evans * extents->mtx only protects against races for 1623b7eaed25SJason Evans * like-state extents, so call extent_can_coalesce() 1624b7eaed25SJason Evans * before releasing next's pool lock. 1625b7eaed25SJason Evans */ 1626b7eaed25SJason Evans bool can_coalesce = extent_can_coalesce(arena, extents, 1627b7eaed25SJason Evans extent, next); 1628b7eaed25SJason Evans 1629b7eaed25SJason Evans extent_unlock(tsdn, next); 1630b7eaed25SJason Evans 1631b7eaed25SJason Evans if (can_coalesce && !extent_coalesce(tsdn, arena, 1632b7eaed25SJason Evans r_extent_hooks, extents, extent, next, true, 1633b7eaed25SJason Evans growing_retained)) { 1634b7eaed25SJason Evans if (extents->delay_coalesce) { 1635b7eaed25SJason Evans /* Do minimal coalescing. */ 1636b7eaed25SJason Evans *coalesced = true; 1637b7eaed25SJason Evans return extent; 1638b7eaed25SJason Evans } 1639b7eaed25SJason Evans again = true; 1640b7eaed25SJason Evans } 1641b7eaed25SJason Evans } 1642b7eaed25SJason Evans 1643b7eaed25SJason Evans /* Try to coalesce backward. */ 1644b7eaed25SJason Evans extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx, 1645*c5ad8142SEric van Gyzen extent_before_get(extent), inactive_only); 1646b7eaed25SJason Evans if (prev != NULL) { 1647b7eaed25SJason Evans bool can_coalesce = extent_can_coalesce(arena, extents, 1648b7eaed25SJason Evans extent, prev); 1649b7eaed25SJason Evans extent_unlock(tsdn, prev); 1650b7eaed25SJason Evans 1651b7eaed25SJason Evans if (can_coalesce && !extent_coalesce(tsdn, arena, 1652b7eaed25SJason Evans r_extent_hooks, extents, extent, prev, false, 1653b7eaed25SJason Evans growing_retained)) { 1654b7eaed25SJason Evans extent = prev; 1655b7eaed25SJason Evans if (extents->delay_coalesce) { 1656b7eaed25SJason Evans /* Do minimal coalescing. */ 1657b7eaed25SJason Evans *coalesced = true; 1658b7eaed25SJason Evans return extent; 1659b7eaed25SJason Evans } 1660b7eaed25SJason Evans again = true; 1661b7eaed25SJason Evans } 1662b7eaed25SJason Evans } 1663b7eaed25SJason Evans } while (again); 1664b7eaed25SJason Evans 1665b7eaed25SJason Evans if (extents->delay_coalesce) { 1666b7eaed25SJason Evans *coalesced = false; 1667b7eaed25SJason Evans } 1668b7eaed25SJason Evans return extent; 1669b7eaed25SJason Evans } 1670b7eaed25SJason Evans 1671*c5ad8142SEric van Gyzen static extent_t * 1672*c5ad8142SEric van Gyzen extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, 1673*c5ad8142SEric van Gyzen extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, 1674*c5ad8142SEric van Gyzen extent_t *extent, bool *coalesced, bool growing_retained) { 1675*c5ad8142SEric van Gyzen return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx, 1676*c5ad8142SEric van Gyzen extents, extent, coalesced, growing_retained, false); 1677*c5ad8142SEric van Gyzen } 1678*c5ad8142SEric van Gyzen 1679*c5ad8142SEric van Gyzen static extent_t * 1680*c5ad8142SEric van Gyzen extent_try_coalesce_large(tsdn_t *tsdn, arena_t *arena, 1681*c5ad8142SEric van Gyzen extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, 1682*c5ad8142SEric van Gyzen extent_t *extent, bool *coalesced, bool growing_retained) { 1683*c5ad8142SEric van Gyzen return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx, 1684*c5ad8142SEric van Gyzen extents, extent, coalesced, growing_retained, true); 1685*c5ad8142SEric van Gyzen } 1686*c5ad8142SEric van Gyzen 16870ef50b4eSJason Evans /* 16880ef50b4eSJason Evans * Does the metadata management portions of putting an unused extent into the 16890ef50b4eSJason Evans * given extents_t (coalesces, deregisters slab interiors, the heap operations). 16900ef50b4eSJason Evans */ 1691b7eaed25SJason Evans static void 1692b7eaed25SJason Evans extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, 1693b7eaed25SJason Evans extents_t *extents, extent_t *extent, bool growing_retained) { 1694b7eaed25SJason Evans rtree_ctx_t rtree_ctx_fallback; 1695b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 1696b7eaed25SJason Evans 1697b7eaed25SJason Evans assert((extents_state_get(extents) != extent_state_dirty && 1698b7eaed25SJason Evans extents_state_get(extents) != extent_state_muzzy) || 1699b7eaed25SJason Evans !extent_zeroed_get(extent)); 1700b7eaed25SJason Evans 1701b7eaed25SJason Evans malloc_mutex_lock(tsdn, &extents->mtx); 1702b7eaed25SJason Evans extent_hooks_assure_initialized(arena, r_extent_hooks); 1703b7eaed25SJason Evans 1704*c5ad8142SEric van Gyzen extent_szind_set(extent, SC_NSIZES); 1705b7eaed25SJason Evans if (extent_slab_get(extent)) { 1706b7eaed25SJason Evans extent_interior_deregister(tsdn, rtree_ctx, extent); 1707b7eaed25SJason Evans extent_slab_set(extent, false); 1708b7eaed25SJason Evans } 1709b7eaed25SJason Evans 1710b7eaed25SJason Evans assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, 1711b7eaed25SJason Evans (uintptr_t)extent_base_get(extent), true) == extent); 1712b7eaed25SJason Evans 1713b7eaed25SJason Evans if (!extents->delay_coalesce) { 1714b7eaed25SJason Evans extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, 1715b7eaed25SJason Evans rtree_ctx, extents, extent, NULL, growing_retained); 1716*c5ad8142SEric van Gyzen } else if (extent_size_get(extent) >= SC_LARGE_MINCLASS) { 1717*c5ad8142SEric van Gyzen assert(extents == &arena->extents_dirty); 17180ef50b4eSJason Evans /* Always coalesce large extents eagerly. */ 17190ef50b4eSJason Evans bool coalesced; 17200ef50b4eSJason Evans do { 17210ef50b4eSJason Evans assert(extent_state_get(extent) == extent_state_active); 1722*c5ad8142SEric van Gyzen extent = extent_try_coalesce_large(tsdn, arena, 17230ef50b4eSJason Evans r_extent_hooks, rtree_ctx, extents, extent, 17240ef50b4eSJason Evans &coalesced, growing_retained); 1725*c5ad8142SEric van Gyzen } while (coalesced); 1726*c5ad8142SEric van Gyzen if (extent_size_get(extent) >= oversize_threshold) { 1727*c5ad8142SEric van Gyzen /* Shortcut to purge the oversize extent eagerly. */ 1728*c5ad8142SEric van Gyzen malloc_mutex_unlock(tsdn, &extents->mtx); 1729*c5ad8142SEric van Gyzen arena_decay_extent(tsdn, arena, r_extent_hooks, extent); 1730*c5ad8142SEric van Gyzen return; 1731*c5ad8142SEric van Gyzen } 1732b7eaed25SJason Evans } 17330ef50b4eSJason Evans extent_deactivate_locked(tsdn, arena, extents, extent); 1734b7eaed25SJason Evans 1735b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &extents->mtx); 1736b7eaed25SJason Evans } 1737b7eaed25SJason Evans 1738b7eaed25SJason Evans void 1739b7eaed25SJason Evans extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { 1740b7eaed25SJason Evans extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; 1741b7eaed25SJason Evans 1742b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1743b7eaed25SJason Evans WITNESS_RANK_CORE, 0); 1744b7eaed25SJason Evans 1745b7eaed25SJason Evans if (extent_register(tsdn, extent)) { 1746*c5ad8142SEric van Gyzen extent_dalloc(tsdn, arena, extent); 1747b7eaed25SJason Evans return; 1748b7eaed25SJason Evans } 1749b7eaed25SJason Evans extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent); 1750b7eaed25SJason Evans } 1751b7eaed25SJason Evans 1752b7eaed25SJason Evans static bool 1753*c5ad8142SEric van Gyzen extent_may_dalloc(void) { 1754*c5ad8142SEric van Gyzen /* With retain enabled, the default dalloc always fails. */ 1755*c5ad8142SEric van Gyzen return !opt_retain; 1756*c5ad8142SEric van Gyzen } 1757*c5ad8142SEric van Gyzen 1758*c5ad8142SEric van Gyzen static bool 1759b7eaed25SJason Evans extent_dalloc_default_impl(void *addr, size_t size) { 1760b7eaed25SJason Evans if (!have_dss || !extent_in_dss(addr)) { 1761b7eaed25SJason Evans return extent_dalloc_mmap(addr, size); 1762b7eaed25SJason Evans } 1763b7eaed25SJason Evans return true; 1764b7eaed25SJason Evans } 1765b7eaed25SJason Evans 1766b7eaed25SJason Evans static bool 1767b7eaed25SJason Evans extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size, 1768b7eaed25SJason Evans bool committed, unsigned arena_ind) { 1769b7eaed25SJason Evans return extent_dalloc_default_impl(addr, size); 1770b7eaed25SJason Evans } 1771b7eaed25SJason Evans 1772b7eaed25SJason Evans static bool 1773b7eaed25SJason Evans extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena, 1774b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *extent) { 1775b7eaed25SJason Evans bool err; 1776b7eaed25SJason Evans 1777b7eaed25SJason Evans assert(extent_base_get(extent) != NULL); 1778b7eaed25SJason Evans assert(extent_size_get(extent) != 0); 1779b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1780b7eaed25SJason Evans WITNESS_RANK_CORE, 0); 1781b7eaed25SJason Evans 1782b7eaed25SJason Evans extent_addr_set(extent, extent_base_get(extent)); 1783b7eaed25SJason Evans 1784b7eaed25SJason Evans extent_hooks_assure_initialized(arena, r_extent_hooks); 1785b7eaed25SJason Evans /* Try to deallocate. */ 1786b7eaed25SJason Evans if (*r_extent_hooks == &extent_hooks_default) { 1787b7eaed25SJason Evans /* Call directly to propagate tsdn. */ 1788b7eaed25SJason Evans err = extent_dalloc_default_impl(extent_base_get(extent), 1789b7eaed25SJason Evans extent_size_get(extent)); 1790b7eaed25SJason Evans } else { 17918b2f5aafSJason Evans extent_hook_pre_reentrancy(tsdn, arena); 1792b7eaed25SJason Evans err = ((*r_extent_hooks)->dalloc == NULL || 1793b7eaed25SJason Evans (*r_extent_hooks)->dalloc(*r_extent_hooks, 1794b7eaed25SJason Evans extent_base_get(extent), extent_size_get(extent), 1795b7eaed25SJason Evans extent_committed_get(extent), arena_ind_get(arena))); 17968b2f5aafSJason Evans extent_hook_post_reentrancy(tsdn); 1797b7eaed25SJason Evans } 1798b7eaed25SJason Evans 1799b7eaed25SJason Evans if (!err) { 1800b7eaed25SJason Evans extent_dalloc(tsdn, arena, extent); 1801b7eaed25SJason Evans } 1802b7eaed25SJason Evans 1803b7eaed25SJason Evans return err; 1804b7eaed25SJason Evans } 1805b7eaed25SJason Evans 1806b7eaed25SJason Evans void 1807b7eaed25SJason Evans extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, 1808b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *extent) { 18090ef50b4eSJason Evans assert(extent_dumpable_get(extent)); 1810b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1811b7eaed25SJason Evans WITNESS_RANK_CORE, 0); 1812b7eaed25SJason Evans 1813*c5ad8142SEric van Gyzen /* Avoid calling the default extent_dalloc unless have to. */ 1814*c5ad8142SEric van Gyzen if (*r_extent_hooks != &extent_hooks_default || extent_may_dalloc()) { 1815b7eaed25SJason Evans /* 1816*c5ad8142SEric van Gyzen * Deregister first to avoid a race with other allocating 1817*c5ad8142SEric van Gyzen * threads, and reregister if deallocation fails. 1818b7eaed25SJason Evans */ 1819b7eaed25SJason Evans extent_deregister(tsdn, extent); 1820*c5ad8142SEric van Gyzen if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks, 1821*c5ad8142SEric van Gyzen extent)) { 1822b7eaed25SJason Evans return; 1823b7eaed25SJason Evans } 1824f2cb2907SJason Evans extent_reregister(tsdn, extent); 1825*c5ad8142SEric van Gyzen } 1826*c5ad8142SEric van Gyzen 18278b2f5aafSJason Evans if (*r_extent_hooks != &extent_hooks_default) { 18288b2f5aafSJason Evans extent_hook_pre_reentrancy(tsdn, arena); 18298b2f5aafSJason Evans } 1830b7eaed25SJason Evans /* Try to decommit; purge if that fails. */ 1831b7eaed25SJason Evans bool zeroed; 1832b7eaed25SJason Evans if (!extent_committed_get(extent)) { 1833b7eaed25SJason Evans zeroed = true; 1834b7eaed25SJason Evans } else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent, 1835b7eaed25SJason Evans 0, extent_size_get(extent))) { 1836b7eaed25SJason Evans zeroed = true; 1837b7eaed25SJason Evans } else if ((*r_extent_hooks)->purge_forced != NULL && 1838b7eaed25SJason Evans !(*r_extent_hooks)->purge_forced(*r_extent_hooks, 1839b7eaed25SJason Evans extent_base_get(extent), extent_size_get(extent), 0, 1840b7eaed25SJason Evans extent_size_get(extent), arena_ind_get(arena))) { 1841b7eaed25SJason Evans zeroed = true; 1842b7eaed25SJason Evans } else if (extent_state_get(extent) == extent_state_muzzy || 1843b7eaed25SJason Evans ((*r_extent_hooks)->purge_lazy != NULL && 1844b7eaed25SJason Evans !(*r_extent_hooks)->purge_lazy(*r_extent_hooks, 1845b7eaed25SJason Evans extent_base_get(extent), extent_size_get(extent), 0, 1846b7eaed25SJason Evans extent_size_get(extent), arena_ind_get(arena)))) { 1847b7eaed25SJason Evans zeroed = false; 1848b7eaed25SJason Evans } else { 1849b7eaed25SJason Evans zeroed = false; 1850b7eaed25SJason Evans } 18518b2f5aafSJason Evans if (*r_extent_hooks != &extent_hooks_default) { 18528b2f5aafSJason Evans extent_hook_post_reentrancy(tsdn); 18538b2f5aafSJason Evans } 1854b7eaed25SJason Evans extent_zeroed_set(extent, zeroed); 1855b7eaed25SJason Evans 1856b7eaed25SJason Evans if (config_prof) { 1857b7eaed25SJason Evans extent_gdump_sub(tsdn, extent); 1858b7eaed25SJason Evans } 1859b7eaed25SJason Evans 1860b7eaed25SJason Evans extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained, 1861b7eaed25SJason Evans extent, false); 1862b7eaed25SJason Evans } 1863b7eaed25SJason Evans 1864b7eaed25SJason Evans static void 1865b7eaed25SJason Evans extent_destroy_default_impl(void *addr, size_t size) { 1866b7eaed25SJason Evans if (!have_dss || !extent_in_dss(addr)) { 1867b7eaed25SJason Evans pages_unmap(addr, size); 1868b7eaed25SJason Evans } 1869b7eaed25SJason Evans } 1870b7eaed25SJason Evans 1871b7eaed25SJason Evans static void 1872b7eaed25SJason Evans extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size, 1873b7eaed25SJason Evans bool committed, unsigned arena_ind) { 1874b7eaed25SJason Evans extent_destroy_default_impl(addr, size); 1875b7eaed25SJason Evans } 1876b7eaed25SJason Evans 1877b7eaed25SJason Evans void 1878b7eaed25SJason Evans extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena, 1879b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *extent) { 1880b7eaed25SJason Evans assert(extent_base_get(extent) != NULL); 1881b7eaed25SJason Evans assert(extent_size_get(extent) != 0); 1882b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1883b7eaed25SJason Evans WITNESS_RANK_CORE, 0); 1884b7eaed25SJason Evans 1885b7eaed25SJason Evans /* Deregister first to avoid a race with other allocating threads. */ 1886b7eaed25SJason Evans extent_deregister(tsdn, extent); 1887b7eaed25SJason Evans 1888b7eaed25SJason Evans extent_addr_set(extent, extent_base_get(extent)); 1889b7eaed25SJason Evans 1890b7eaed25SJason Evans extent_hooks_assure_initialized(arena, r_extent_hooks); 1891b7eaed25SJason Evans /* Try to destroy; silently fail otherwise. */ 1892b7eaed25SJason Evans if (*r_extent_hooks == &extent_hooks_default) { 1893b7eaed25SJason Evans /* Call directly to propagate tsdn. */ 1894b7eaed25SJason Evans extent_destroy_default_impl(extent_base_get(extent), 1895b7eaed25SJason Evans extent_size_get(extent)); 1896b7eaed25SJason Evans } else if ((*r_extent_hooks)->destroy != NULL) { 18978b2f5aafSJason Evans extent_hook_pre_reentrancy(tsdn, arena); 1898b7eaed25SJason Evans (*r_extent_hooks)->destroy(*r_extent_hooks, 1899b7eaed25SJason Evans extent_base_get(extent), extent_size_get(extent), 1900b7eaed25SJason Evans extent_committed_get(extent), arena_ind_get(arena)); 19018b2f5aafSJason Evans extent_hook_post_reentrancy(tsdn); 1902b7eaed25SJason Evans } 1903b7eaed25SJason Evans 1904b7eaed25SJason Evans extent_dalloc(tsdn, arena, extent); 1905b7eaed25SJason Evans } 1906b7eaed25SJason Evans 1907b7eaed25SJason Evans static bool 1908b7eaed25SJason Evans extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size, 1909b7eaed25SJason Evans size_t offset, size_t length, unsigned arena_ind) { 1910b7eaed25SJason Evans return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset), 1911b7eaed25SJason Evans length); 1912b7eaed25SJason Evans } 1913b7eaed25SJason Evans 1914b7eaed25SJason Evans static bool 1915b7eaed25SJason Evans extent_commit_impl(tsdn_t *tsdn, arena_t *arena, 1916b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, 1917b7eaed25SJason Evans size_t length, bool growing_retained) { 1918b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1919b7eaed25SJason Evans WITNESS_RANK_CORE, growing_retained ? 1 : 0); 1920b7eaed25SJason Evans 1921b7eaed25SJason Evans extent_hooks_assure_initialized(arena, r_extent_hooks); 19228b2f5aafSJason Evans if (*r_extent_hooks != &extent_hooks_default) { 19238b2f5aafSJason Evans extent_hook_pre_reentrancy(tsdn, arena); 19248b2f5aafSJason Evans } 1925b7eaed25SJason Evans bool err = ((*r_extent_hooks)->commit == NULL || 1926b7eaed25SJason Evans (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent), 1927b7eaed25SJason Evans extent_size_get(extent), offset, length, arena_ind_get(arena))); 19288b2f5aafSJason Evans if (*r_extent_hooks != &extent_hooks_default) { 19298b2f5aafSJason Evans extent_hook_post_reentrancy(tsdn); 19308b2f5aafSJason Evans } 1931b7eaed25SJason Evans extent_committed_set(extent, extent_committed_get(extent) || !err); 1932b7eaed25SJason Evans return err; 1933b7eaed25SJason Evans } 1934b7eaed25SJason Evans 1935b7eaed25SJason Evans bool 1936b7eaed25SJason Evans extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena, 1937b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, 1938b7eaed25SJason Evans size_t length) { 1939b7eaed25SJason Evans return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset, 1940b7eaed25SJason Evans length, false); 1941b7eaed25SJason Evans } 1942b7eaed25SJason Evans 1943b7eaed25SJason Evans static bool 1944b7eaed25SJason Evans extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size, 1945b7eaed25SJason Evans size_t offset, size_t length, unsigned arena_ind) { 1946b7eaed25SJason Evans return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset), 1947b7eaed25SJason Evans length); 1948b7eaed25SJason Evans } 1949b7eaed25SJason Evans 1950b7eaed25SJason Evans bool 1951b7eaed25SJason Evans extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena, 1952b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, 1953b7eaed25SJason Evans size_t length) { 1954b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1955b7eaed25SJason Evans WITNESS_RANK_CORE, 0); 1956b7eaed25SJason Evans 1957b7eaed25SJason Evans extent_hooks_assure_initialized(arena, r_extent_hooks); 1958b7eaed25SJason Evans 19598b2f5aafSJason Evans if (*r_extent_hooks != &extent_hooks_default) { 19608b2f5aafSJason Evans extent_hook_pre_reentrancy(tsdn, arena); 19618b2f5aafSJason Evans } 1962b7eaed25SJason Evans bool err = ((*r_extent_hooks)->decommit == NULL || 1963b7eaed25SJason Evans (*r_extent_hooks)->decommit(*r_extent_hooks, 1964b7eaed25SJason Evans extent_base_get(extent), extent_size_get(extent), offset, length, 1965b7eaed25SJason Evans arena_ind_get(arena))); 19668b2f5aafSJason Evans if (*r_extent_hooks != &extent_hooks_default) { 19678b2f5aafSJason Evans extent_hook_post_reentrancy(tsdn); 19688b2f5aafSJason Evans } 1969b7eaed25SJason Evans extent_committed_set(extent, extent_committed_get(extent) && err); 1970b7eaed25SJason Evans return err; 1971b7eaed25SJason Evans } 1972b7eaed25SJason Evans 1973b7eaed25SJason Evans #ifdef PAGES_CAN_PURGE_LAZY 1974b7eaed25SJason Evans static bool 1975b7eaed25SJason Evans extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size, 1976b7eaed25SJason Evans size_t offset, size_t length, unsigned arena_ind) { 1977b7eaed25SJason Evans assert(addr != NULL); 1978b7eaed25SJason Evans assert((offset & PAGE_MASK) == 0); 1979b7eaed25SJason Evans assert(length != 0); 1980b7eaed25SJason Evans assert((length & PAGE_MASK) == 0); 1981b7eaed25SJason Evans 1982b7eaed25SJason Evans return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset), 1983b7eaed25SJason Evans length); 1984b7eaed25SJason Evans } 1985b7eaed25SJason Evans #endif 1986b7eaed25SJason Evans 1987b7eaed25SJason Evans static bool 1988b7eaed25SJason Evans extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena, 1989b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, 1990b7eaed25SJason Evans size_t length, bool growing_retained) { 1991b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1992b7eaed25SJason Evans WITNESS_RANK_CORE, growing_retained ? 1 : 0); 1993b7eaed25SJason Evans 1994b7eaed25SJason Evans extent_hooks_assure_initialized(arena, r_extent_hooks); 19958b2f5aafSJason Evans 19968b2f5aafSJason Evans if ((*r_extent_hooks)->purge_lazy == NULL) { 19978b2f5aafSJason Evans return true; 19988b2f5aafSJason Evans } 19998b2f5aafSJason Evans if (*r_extent_hooks != &extent_hooks_default) { 20008b2f5aafSJason Evans extent_hook_pre_reentrancy(tsdn, arena); 20018b2f5aafSJason Evans } 20028b2f5aafSJason Evans bool err = (*r_extent_hooks)->purge_lazy(*r_extent_hooks, 2003b7eaed25SJason Evans extent_base_get(extent), extent_size_get(extent), offset, length, 20048b2f5aafSJason Evans arena_ind_get(arena)); 20058b2f5aafSJason Evans if (*r_extent_hooks != &extent_hooks_default) { 20068b2f5aafSJason Evans extent_hook_post_reentrancy(tsdn); 20078b2f5aafSJason Evans } 20088b2f5aafSJason Evans 20098b2f5aafSJason Evans return err; 2010b7eaed25SJason Evans } 2011b7eaed25SJason Evans 2012b7eaed25SJason Evans bool 2013b7eaed25SJason Evans extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena, 2014b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, 2015b7eaed25SJason Evans size_t length) { 2016b7eaed25SJason Evans return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent, 2017b7eaed25SJason Evans offset, length, false); 2018b7eaed25SJason Evans } 2019b7eaed25SJason Evans 2020b7eaed25SJason Evans #ifdef PAGES_CAN_PURGE_FORCED 2021b7eaed25SJason Evans static bool 2022b7eaed25SJason Evans extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr, 2023b7eaed25SJason Evans size_t size, size_t offset, size_t length, unsigned arena_ind) { 2024b7eaed25SJason Evans assert(addr != NULL); 2025b7eaed25SJason Evans assert((offset & PAGE_MASK) == 0); 2026b7eaed25SJason Evans assert(length != 0); 2027b7eaed25SJason Evans assert((length & PAGE_MASK) == 0); 2028b7eaed25SJason Evans 2029b7eaed25SJason Evans return pages_purge_forced((void *)((uintptr_t)addr + 2030b7eaed25SJason Evans (uintptr_t)offset), length); 2031b7eaed25SJason Evans } 2032b7eaed25SJason Evans #endif 2033b7eaed25SJason Evans 2034b7eaed25SJason Evans static bool 2035b7eaed25SJason Evans extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena, 2036b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, 2037b7eaed25SJason Evans size_t length, bool growing_retained) { 2038b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 2039b7eaed25SJason Evans WITNESS_RANK_CORE, growing_retained ? 1 : 0); 2040b7eaed25SJason Evans 2041b7eaed25SJason Evans extent_hooks_assure_initialized(arena, r_extent_hooks); 20428b2f5aafSJason Evans 20438b2f5aafSJason Evans if ((*r_extent_hooks)->purge_forced == NULL) { 20448b2f5aafSJason Evans return true; 20458b2f5aafSJason Evans } 20468b2f5aafSJason Evans if (*r_extent_hooks != &extent_hooks_default) { 20478b2f5aafSJason Evans extent_hook_pre_reentrancy(tsdn, arena); 20488b2f5aafSJason Evans } 20498b2f5aafSJason Evans bool err = (*r_extent_hooks)->purge_forced(*r_extent_hooks, 2050b7eaed25SJason Evans extent_base_get(extent), extent_size_get(extent), offset, length, 20518b2f5aafSJason Evans arena_ind_get(arena)); 20528b2f5aafSJason Evans if (*r_extent_hooks != &extent_hooks_default) { 20538b2f5aafSJason Evans extent_hook_post_reentrancy(tsdn); 20548b2f5aafSJason Evans } 20558b2f5aafSJason Evans return err; 2056b7eaed25SJason Evans } 2057b7eaed25SJason Evans 2058b7eaed25SJason Evans bool 2059b7eaed25SJason Evans extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena, 2060b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, 2061b7eaed25SJason Evans size_t length) { 2062b7eaed25SJason Evans return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent, 2063b7eaed25SJason Evans offset, length, false); 2064b7eaed25SJason Evans } 2065b7eaed25SJason Evans 2066b7eaed25SJason Evans static bool 2067b7eaed25SJason Evans extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size, 2068b7eaed25SJason Evans size_t size_a, size_t size_b, bool committed, unsigned arena_ind) { 2069*c5ad8142SEric van Gyzen if (!maps_coalesce) { 2070*c5ad8142SEric van Gyzen /* 2071*c5ad8142SEric van Gyzen * Without retain, only whole regions can be purged (required by 2072*c5ad8142SEric van Gyzen * MEM_RELEASE on Windows) -- therefore disallow splitting. See 2073*c5ad8142SEric van Gyzen * comments in extent_head_no_merge(). 2074*c5ad8142SEric van Gyzen */ 2075*c5ad8142SEric van Gyzen return !opt_retain; 2076b7eaed25SJason Evans } 2077*c5ad8142SEric van Gyzen 2078*c5ad8142SEric van Gyzen return false; 2079*c5ad8142SEric van Gyzen } 2080b7eaed25SJason Evans 20810ef50b4eSJason Evans /* 20820ef50b4eSJason Evans * Accepts the extent to split, and the characteristics of each side of the 20830ef50b4eSJason Evans * split. The 'a' parameters go with the 'lead' of the resulting pair of 20840ef50b4eSJason Evans * extents (the lower addressed portion of the split), and the 'b' parameters go 20850ef50b4eSJason Evans * with the trail (the higher addressed portion). This makes 'extent' the lead, 20860ef50b4eSJason Evans * and returns the trail (except in case of error). 20870ef50b4eSJason Evans */ 2088b7eaed25SJason Evans static extent_t * 2089b7eaed25SJason Evans extent_split_impl(tsdn_t *tsdn, arena_t *arena, 2090b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, 2091b7eaed25SJason Evans szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b, 2092b7eaed25SJason Evans bool growing_retained) { 2093b7eaed25SJason Evans assert(extent_size_get(extent) == size_a + size_b); 2094b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 2095b7eaed25SJason Evans WITNESS_RANK_CORE, growing_retained ? 1 : 0); 2096b7eaed25SJason Evans 2097b7eaed25SJason Evans extent_hooks_assure_initialized(arena, r_extent_hooks); 2098b7eaed25SJason Evans 2099b7eaed25SJason Evans if ((*r_extent_hooks)->split == NULL) { 2100b7eaed25SJason Evans return NULL; 2101b7eaed25SJason Evans } 2102b7eaed25SJason Evans 2103b7eaed25SJason Evans extent_t *trail = extent_alloc(tsdn, arena); 2104b7eaed25SJason Evans if (trail == NULL) { 2105b7eaed25SJason Evans goto label_error_a; 2106b7eaed25SJason Evans } 2107b7eaed25SJason Evans 2108b7eaed25SJason Evans extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) + 2109b7eaed25SJason Evans size_a), size_b, slab_b, szind_b, extent_sn_get(extent), 2110b7eaed25SJason Evans extent_state_get(extent), extent_zeroed_get(extent), 2111*c5ad8142SEric van Gyzen extent_committed_get(extent), extent_dumpable_get(extent), 2112*c5ad8142SEric van Gyzen EXTENT_NOT_HEAD); 2113b7eaed25SJason Evans 2114b7eaed25SJason Evans rtree_ctx_t rtree_ctx_fallback; 2115b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 2116b7eaed25SJason Evans rtree_leaf_elm_t *lead_elm_a, *lead_elm_b; 2117b7eaed25SJason Evans { 2118b7eaed25SJason Evans extent_t lead; 2119b7eaed25SJason Evans 2120b7eaed25SJason Evans extent_init(&lead, arena, extent_addr_get(extent), size_a, 2121b7eaed25SJason Evans slab_a, szind_a, extent_sn_get(extent), 2122b7eaed25SJason Evans extent_state_get(extent), extent_zeroed_get(extent), 2123*c5ad8142SEric van Gyzen extent_committed_get(extent), extent_dumpable_get(extent), 2124*c5ad8142SEric van Gyzen EXTENT_NOT_HEAD); 2125b7eaed25SJason Evans 2126b7eaed25SJason Evans extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false, 2127b7eaed25SJason Evans true, &lead_elm_a, &lead_elm_b); 2128b7eaed25SJason Evans } 2129b7eaed25SJason Evans rtree_leaf_elm_t *trail_elm_a, *trail_elm_b; 2130b7eaed25SJason Evans extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true, 2131b7eaed25SJason Evans &trail_elm_a, &trail_elm_b); 2132b7eaed25SJason Evans 2133b7eaed25SJason Evans if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL 2134b7eaed25SJason Evans || trail_elm_b == NULL) { 2135b7eaed25SJason Evans goto label_error_b; 2136b7eaed25SJason Evans } 2137b7eaed25SJason Evans 2138b7eaed25SJason Evans extent_lock2(tsdn, extent, trail); 2139b7eaed25SJason Evans 21408b2f5aafSJason Evans if (*r_extent_hooks != &extent_hooks_default) { 21418b2f5aafSJason Evans extent_hook_pre_reentrancy(tsdn, arena); 21428b2f5aafSJason Evans } 21438b2f5aafSJason Evans bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent), 2144b7eaed25SJason Evans size_a + size_b, size_a, size_b, extent_committed_get(extent), 21458b2f5aafSJason Evans arena_ind_get(arena)); 21468b2f5aafSJason Evans if (*r_extent_hooks != &extent_hooks_default) { 21478b2f5aafSJason Evans extent_hook_post_reentrancy(tsdn); 21488b2f5aafSJason Evans } 21498b2f5aafSJason Evans if (err) { 2150b7eaed25SJason Evans goto label_error_c; 2151b7eaed25SJason Evans } 2152b7eaed25SJason Evans 2153b7eaed25SJason Evans extent_size_set(extent, size_a); 2154b7eaed25SJason Evans extent_szind_set(extent, szind_a); 2155b7eaed25SJason Evans 2156b7eaed25SJason Evans extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent, 2157b7eaed25SJason Evans szind_a, slab_a); 2158b7eaed25SJason Evans extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail, 2159b7eaed25SJason Evans szind_b, slab_b); 2160b7eaed25SJason Evans 2161b7eaed25SJason Evans extent_unlock2(tsdn, extent, trail); 2162b7eaed25SJason Evans 2163b7eaed25SJason Evans return trail; 2164b7eaed25SJason Evans label_error_c: 2165b7eaed25SJason Evans extent_unlock2(tsdn, extent, trail); 2166b7eaed25SJason Evans label_error_b: 2167b7eaed25SJason Evans extent_dalloc(tsdn, arena, trail); 2168b7eaed25SJason Evans label_error_a: 2169b7eaed25SJason Evans return NULL; 2170b7eaed25SJason Evans } 2171b7eaed25SJason Evans 2172b7eaed25SJason Evans extent_t * 2173b7eaed25SJason Evans extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, 2174b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, 2175b7eaed25SJason Evans szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) { 2176b7eaed25SJason Evans return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a, 2177b7eaed25SJason Evans szind_a, slab_a, size_b, szind_b, slab_b, false); 2178b7eaed25SJason Evans } 2179b7eaed25SJason Evans 2180b7eaed25SJason Evans static bool 2181b7eaed25SJason Evans extent_merge_default_impl(void *addr_a, void *addr_b) { 2182*c5ad8142SEric van Gyzen if (!maps_coalesce && !opt_retain) { 2183b7eaed25SJason Evans return true; 2184b7eaed25SJason Evans } 2185b7eaed25SJason Evans if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) { 2186b7eaed25SJason Evans return true; 2187b7eaed25SJason Evans } 2188b7eaed25SJason Evans 2189b7eaed25SJason Evans return false; 2190b7eaed25SJason Evans } 2191b7eaed25SJason Evans 2192*c5ad8142SEric van Gyzen /* 2193*c5ad8142SEric van Gyzen * Returns true if the given extents can't be merged because of their head bit 2194*c5ad8142SEric van Gyzen * settings. Assumes the second extent has the higher address. 2195*c5ad8142SEric van Gyzen */ 2196*c5ad8142SEric van Gyzen static bool 2197*c5ad8142SEric van Gyzen extent_head_no_merge(extent_t *a, extent_t *b) { 2198*c5ad8142SEric van Gyzen assert(extent_base_get(a) < extent_base_get(b)); 2199*c5ad8142SEric van Gyzen /* 2200*c5ad8142SEric van Gyzen * When coalesce is not always allowed (Windows), only merge extents 2201*c5ad8142SEric van Gyzen * from the same VirtualAlloc region under opt.retain (in which case 2202*c5ad8142SEric van Gyzen * MEM_DECOMMIT is utilized for purging). 2203*c5ad8142SEric van Gyzen */ 2204*c5ad8142SEric van Gyzen if (maps_coalesce) { 2205*c5ad8142SEric van Gyzen return false; 2206*c5ad8142SEric van Gyzen } 2207*c5ad8142SEric van Gyzen if (!opt_retain) { 2208*c5ad8142SEric van Gyzen return true; 2209*c5ad8142SEric van Gyzen } 2210*c5ad8142SEric van Gyzen /* If b is a head extent, disallow the cross-region merge. */ 2211*c5ad8142SEric van Gyzen if (extent_is_head_get(b)) { 2212*c5ad8142SEric van Gyzen /* 2213*c5ad8142SEric van Gyzen * Additionally, sn should not overflow with retain; sanity 2214*c5ad8142SEric van Gyzen * check that different regions have unique sn. 2215*c5ad8142SEric van Gyzen */ 2216*c5ad8142SEric van Gyzen assert(extent_sn_comp(a, b) != 0); 2217*c5ad8142SEric van Gyzen return true; 2218*c5ad8142SEric van Gyzen } 2219*c5ad8142SEric van Gyzen assert(extent_sn_comp(a, b) == 0); 2220*c5ad8142SEric van Gyzen 2221*c5ad8142SEric van Gyzen return false; 2222*c5ad8142SEric van Gyzen } 2223*c5ad8142SEric van Gyzen 2224b7eaed25SJason Evans static bool 2225b7eaed25SJason Evans extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, 2226b7eaed25SJason Evans void *addr_b, size_t size_b, bool committed, unsigned arena_ind) { 2227*c5ad8142SEric van Gyzen if (!maps_coalesce) { 2228*c5ad8142SEric van Gyzen tsdn_t *tsdn = tsdn_fetch(); 2229*c5ad8142SEric van Gyzen extent_t *a = iealloc(tsdn, addr_a); 2230*c5ad8142SEric van Gyzen extent_t *b = iealloc(tsdn, addr_b); 2231*c5ad8142SEric van Gyzen if (extent_head_no_merge(a, b)) { 2232*c5ad8142SEric van Gyzen return true; 2233*c5ad8142SEric van Gyzen } 2234*c5ad8142SEric van Gyzen } 2235b7eaed25SJason Evans return extent_merge_default_impl(addr_a, addr_b); 2236b7eaed25SJason Evans } 2237b7eaed25SJason Evans 2238b7eaed25SJason Evans static bool 2239b7eaed25SJason Evans extent_merge_impl(tsdn_t *tsdn, arena_t *arena, 2240b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b, 2241b7eaed25SJason Evans bool growing_retained) { 2242b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 2243b7eaed25SJason Evans WITNESS_RANK_CORE, growing_retained ? 1 : 0); 2244*c5ad8142SEric van Gyzen assert(extent_base_get(a) < extent_base_get(b)); 2245b7eaed25SJason Evans 2246b7eaed25SJason Evans extent_hooks_assure_initialized(arena, r_extent_hooks); 2247b7eaed25SJason Evans 2248*c5ad8142SEric van Gyzen if ((*r_extent_hooks)->merge == NULL || extent_head_no_merge(a, b)) { 2249b7eaed25SJason Evans return true; 2250b7eaed25SJason Evans } 2251b7eaed25SJason Evans 2252b7eaed25SJason Evans bool err; 2253b7eaed25SJason Evans if (*r_extent_hooks == &extent_hooks_default) { 2254b7eaed25SJason Evans /* Call directly to propagate tsdn. */ 2255b7eaed25SJason Evans err = extent_merge_default_impl(extent_base_get(a), 2256b7eaed25SJason Evans extent_base_get(b)); 2257b7eaed25SJason Evans } else { 22588b2f5aafSJason Evans extent_hook_pre_reentrancy(tsdn, arena); 2259b7eaed25SJason Evans err = (*r_extent_hooks)->merge(*r_extent_hooks, 2260b7eaed25SJason Evans extent_base_get(a), extent_size_get(a), extent_base_get(b), 2261b7eaed25SJason Evans extent_size_get(b), extent_committed_get(a), 2262b7eaed25SJason Evans arena_ind_get(arena)); 22638b2f5aafSJason Evans extent_hook_post_reentrancy(tsdn); 2264b7eaed25SJason Evans } 2265b7eaed25SJason Evans 2266b7eaed25SJason Evans if (err) { 2267b7eaed25SJason Evans return true; 2268b7eaed25SJason Evans } 2269b7eaed25SJason Evans 2270b7eaed25SJason Evans /* 2271b7eaed25SJason Evans * The rtree writes must happen while all the relevant elements are 2272b7eaed25SJason Evans * owned, so the following code uses decomposed helper functions rather 2273b7eaed25SJason Evans * than extent_{,de}register() to do things in the right order. 2274b7eaed25SJason Evans */ 2275b7eaed25SJason Evans rtree_ctx_t rtree_ctx_fallback; 2276b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 2277b7eaed25SJason Evans rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b; 2278b7eaed25SJason Evans extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a, 2279b7eaed25SJason Evans &a_elm_b); 2280b7eaed25SJason Evans extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a, 2281b7eaed25SJason Evans &b_elm_b); 2282b7eaed25SJason Evans 2283b7eaed25SJason Evans extent_lock2(tsdn, a, b); 2284b7eaed25SJason Evans 2285b7eaed25SJason Evans if (a_elm_b != NULL) { 2286b7eaed25SJason Evans rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL, 2287*c5ad8142SEric van Gyzen SC_NSIZES, false); 2288b7eaed25SJason Evans } 2289b7eaed25SJason Evans if (b_elm_b != NULL) { 2290b7eaed25SJason Evans rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL, 2291*c5ad8142SEric van Gyzen SC_NSIZES, false); 2292b7eaed25SJason Evans } else { 2293b7eaed25SJason Evans b_elm_b = b_elm_a; 2294b7eaed25SJason Evans } 2295b7eaed25SJason Evans 2296b7eaed25SJason Evans extent_size_set(a, extent_size_get(a) + extent_size_get(b)); 2297*c5ad8142SEric van Gyzen extent_szind_set(a, SC_NSIZES); 2298b7eaed25SJason Evans extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ? 2299b7eaed25SJason Evans extent_sn_get(a) : extent_sn_get(b)); 2300b7eaed25SJason Evans extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b)); 2301b7eaed25SJason Evans 2302*c5ad8142SEric van Gyzen extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, SC_NSIZES, 2303*c5ad8142SEric van Gyzen false); 2304b7eaed25SJason Evans 2305b7eaed25SJason Evans extent_unlock2(tsdn, a, b); 2306b7eaed25SJason Evans 2307b7eaed25SJason Evans extent_dalloc(tsdn, extent_arena_get(b), b); 2308b7eaed25SJason Evans 2309b7eaed25SJason Evans return false; 2310b7eaed25SJason Evans } 2311b7eaed25SJason Evans 2312b7eaed25SJason Evans bool 2313b7eaed25SJason Evans extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena, 2314b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) { 2315b7eaed25SJason Evans return extent_merge_impl(tsdn, arena, r_extent_hooks, a, b, false); 2316b7eaed25SJason Evans } 2317b7eaed25SJason Evans 2318b7eaed25SJason Evans bool 2319b7eaed25SJason Evans extent_boot(void) { 2320b7eaed25SJason Evans if (rtree_new(&extents_rtree, true)) { 2321b7eaed25SJason Evans return true; 2322b7eaed25SJason Evans } 2323b7eaed25SJason Evans 2324b7eaed25SJason Evans if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool", 2325b7eaed25SJason Evans WITNESS_RANK_EXTENT_POOL)) { 2326b7eaed25SJason Evans return true; 2327b7eaed25SJason Evans } 2328b7eaed25SJason Evans 2329b7eaed25SJason Evans if (have_dss) { 2330b7eaed25SJason Evans extent_dss_boot(); 2331b7eaed25SJason Evans } 2332b7eaed25SJason Evans 2333b7eaed25SJason Evans return false; 2334b7eaed25SJason Evans } 2335*c5ad8142SEric van Gyzen 2336*c5ad8142SEric van Gyzen void 2337*c5ad8142SEric van Gyzen extent_util_stats_get(tsdn_t *tsdn, const void *ptr, 2338*c5ad8142SEric van Gyzen size_t *nfree, size_t *nregs, size_t *size) { 2339*c5ad8142SEric van Gyzen assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL); 2340*c5ad8142SEric van Gyzen 2341*c5ad8142SEric van Gyzen const extent_t *extent = iealloc(tsdn, ptr); 2342*c5ad8142SEric van Gyzen if (unlikely(extent == NULL)) { 2343*c5ad8142SEric van Gyzen *nfree = *nregs = *size = 0; 2344*c5ad8142SEric van Gyzen return; 2345*c5ad8142SEric van Gyzen } 2346*c5ad8142SEric van Gyzen 2347*c5ad8142SEric van Gyzen *size = extent_size_get(extent); 2348*c5ad8142SEric van Gyzen if (!extent_slab_get(extent)) { 2349*c5ad8142SEric van Gyzen *nfree = 0; 2350*c5ad8142SEric van Gyzen *nregs = 1; 2351*c5ad8142SEric van Gyzen } else { 2352*c5ad8142SEric van Gyzen *nfree = extent_nfree_get(extent); 2353*c5ad8142SEric van Gyzen *nregs = bin_infos[extent_szind_get(extent)].nregs; 2354*c5ad8142SEric van Gyzen assert(*nfree <= *nregs); 2355*c5ad8142SEric van Gyzen assert(*nfree * extent_usize_get(extent) <= *size); 2356*c5ad8142SEric van Gyzen } 2357*c5ad8142SEric van Gyzen } 2358*c5ad8142SEric van Gyzen 2359*c5ad8142SEric van Gyzen void 2360*c5ad8142SEric van Gyzen extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr, 2361*c5ad8142SEric van Gyzen size_t *nfree, size_t *nregs, size_t *size, 2362*c5ad8142SEric van Gyzen size_t *bin_nfree, size_t *bin_nregs, void **slabcur_addr) { 2363*c5ad8142SEric van Gyzen assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL 2364*c5ad8142SEric van Gyzen && bin_nfree != NULL && bin_nregs != NULL && slabcur_addr != NULL); 2365*c5ad8142SEric van Gyzen 2366*c5ad8142SEric van Gyzen const extent_t *extent = iealloc(tsdn, ptr); 2367*c5ad8142SEric van Gyzen if (unlikely(extent == NULL)) { 2368*c5ad8142SEric van Gyzen *nfree = *nregs = *size = *bin_nfree = *bin_nregs = 0; 2369*c5ad8142SEric van Gyzen *slabcur_addr = NULL; 2370*c5ad8142SEric van Gyzen return; 2371*c5ad8142SEric van Gyzen } 2372*c5ad8142SEric van Gyzen 2373*c5ad8142SEric van Gyzen *size = extent_size_get(extent); 2374*c5ad8142SEric van Gyzen if (!extent_slab_get(extent)) { 2375*c5ad8142SEric van Gyzen *nfree = *bin_nfree = *bin_nregs = 0; 2376*c5ad8142SEric van Gyzen *nregs = 1; 2377*c5ad8142SEric van Gyzen *slabcur_addr = NULL; 2378*c5ad8142SEric van Gyzen return; 2379*c5ad8142SEric van Gyzen } 2380*c5ad8142SEric van Gyzen 2381*c5ad8142SEric van Gyzen *nfree = extent_nfree_get(extent); 2382*c5ad8142SEric van Gyzen const szind_t szind = extent_szind_get(extent); 2383*c5ad8142SEric van Gyzen *nregs = bin_infos[szind].nregs; 2384*c5ad8142SEric van Gyzen assert(*nfree <= *nregs); 2385*c5ad8142SEric van Gyzen assert(*nfree * extent_usize_get(extent) <= *size); 2386*c5ad8142SEric van Gyzen 2387*c5ad8142SEric van Gyzen const arena_t *arena = extent_arena_get(extent); 2388*c5ad8142SEric van Gyzen assert(arena != NULL); 2389*c5ad8142SEric van Gyzen const unsigned binshard = extent_binshard_get(extent); 2390*c5ad8142SEric van Gyzen bin_t *bin = &arena->bins[szind].bin_shards[binshard]; 2391*c5ad8142SEric van Gyzen 2392*c5ad8142SEric van Gyzen malloc_mutex_lock(tsdn, &bin->lock); 2393*c5ad8142SEric van Gyzen if (config_stats) { 2394*c5ad8142SEric van Gyzen *bin_nregs = *nregs * bin->stats.curslabs; 2395*c5ad8142SEric van Gyzen assert(*bin_nregs >= bin->stats.curregs); 2396*c5ad8142SEric van Gyzen *bin_nfree = *bin_nregs - bin->stats.curregs; 2397*c5ad8142SEric van Gyzen } else { 2398*c5ad8142SEric van Gyzen *bin_nfree = *bin_nregs = 0; 2399*c5ad8142SEric van Gyzen } 2400*c5ad8142SEric van Gyzen *slabcur_addr = extent_addr_get(bin->slabcur); 2401*c5ad8142SEric van Gyzen assert(*slabcur_addr != NULL); 2402*c5ad8142SEric van Gyzen malloc_mutex_unlock(tsdn, &bin->lock); 2403*c5ad8142SEric van Gyzen } 2404