1a4bd5210SJason Evans #define JEMALLOC_EXTENT_C_ 2b7eaed25SJason Evans #include "jemalloc/internal/jemalloc_preamble.h" 3b7eaed25SJason Evans #include "jemalloc/internal/jemalloc_internal_includes.h" 4b7eaed25SJason Evans 5b7eaed25SJason Evans #include "jemalloc/internal/assert.h" 6b7eaed25SJason Evans #include "jemalloc/internal/extent_dss.h" 7b7eaed25SJason Evans #include "jemalloc/internal/extent_mmap.h" 8b7eaed25SJason Evans #include "jemalloc/internal/ph.h" 9b7eaed25SJason Evans #include "jemalloc/internal/rtree.h" 10b7eaed25SJason Evans #include "jemalloc/internal/mutex.h" 11b7eaed25SJason Evans #include "jemalloc/internal/mutex_pool.h" 12a4bd5210SJason Evans 13a4bd5210SJason Evans /******************************************************************************/ 14b7eaed25SJason Evans /* Data. */ 15b7eaed25SJason Evans 16b7eaed25SJason Evans rtree_t extents_rtree; 17b7eaed25SJason Evans /* Keyed by the address of the extent_t being protected. */ 18b7eaed25SJason Evans mutex_pool_t extent_mutex_pool; 19b7eaed25SJason Evans 20*0ef50b4eSJason Evans size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT; 21*0ef50b4eSJason Evans 22b7eaed25SJason Evans static const bitmap_info_t extents_bitmap_info = 23b7eaed25SJason Evans BITMAP_INFO_INITIALIZER(NPSIZES+1); 24b7eaed25SJason Evans 25b7eaed25SJason Evans static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, 26b7eaed25SJason Evans size_t size, size_t alignment, bool *zero, bool *commit, 27b7eaed25SJason Evans unsigned arena_ind); 28b7eaed25SJason Evans static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, 29b7eaed25SJason Evans size_t size, bool committed, unsigned arena_ind); 30b7eaed25SJason Evans static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, 31b7eaed25SJason Evans size_t size, bool committed, unsigned arena_ind); 32b7eaed25SJason Evans static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr, 33b7eaed25SJason Evans size_t size, size_t offset, size_t length, unsigned arena_ind); 34b7eaed25SJason Evans static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena, 35b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, 36b7eaed25SJason Evans size_t length, bool growing_retained); 37b7eaed25SJason Evans static bool extent_decommit_default(extent_hooks_t *extent_hooks, 38b7eaed25SJason Evans void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); 39b7eaed25SJason Evans #ifdef PAGES_CAN_PURGE_LAZY 40b7eaed25SJason Evans static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, 41b7eaed25SJason Evans size_t size, size_t offset, size_t length, unsigned arena_ind); 42b7eaed25SJason Evans #endif 43b7eaed25SJason Evans static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena, 44b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, 45b7eaed25SJason Evans size_t length, bool growing_retained); 46b7eaed25SJason Evans #ifdef PAGES_CAN_PURGE_FORCED 47b7eaed25SJason Evans static bool extent_purge_forced_default(extent_hooks_t *extent_hooks, 48b7eaed25SJason Evans void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); 49b7eaed25SJason Evans #endif 50b7eaed25SJason Evans static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena, 51b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, 52b7eaed25SJason Evans size_t length, bool growing_retained); 53b7eaed25SJason Evans #ifdef JEMALLOC_MAPS_COALESCE 54b7eaed25SJason Evans static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr, 55b7eaed25SJason Evans size_t size, size_t size_a, size_t size_b, bool committed, 56b7eaed25SJason Evans unsigned arena_ind); 57b7eaed25SJason Evans #endif 58b7eaed25SJason Evans static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena, 59b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, 60b7eaed25SJason Evans szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b, 61b7eaed25SJason Evans bool growing_retained); 62b7eaed25SJason Evans #ifdef JEMALLOC_MAPS_COALESCE 63b7eaed25SJason Evans static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, 64b7eaed25SJason Evans size_t size_a, void *addr_b, size_t size_b, bool committed, 65b7eaed25SJason Evans unsigned arena_ind); 66b7eaed25SJason Evans #endif 67b7eaed25SJason Evans static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena, 68b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b, 69b7eaed25SJason Evans bool growing_retained); 70b7eaed25SJason Evans 71b7eaed25SJason Evans const extent_hooks_t extent_hooks_default = { 72b7eaed25SJason Evans extent_alloc_default, 73b7eaed25SJason Evans extent_dalloc_default, 74b7eaed25SJason Evans extent_destroy_default, 75b7eaed25SJason Evans extent_commit_default, 76b7eaed25SJason Evans extent_decommit_default 77b7eaed25SJason Evans #ifdef PAGES_CAN_PURGE_LAZY 78b7eaed25SJason Evans , 79b7eaed25SJason Evans extent_purge_lazy_default 80b7eaed25SJason Evans #else 81b7eaed25SJason Evans , 82b7eaed25SJason Evans NULL 83b7eaed25SJason Evans #endif 84b7eaed25SJason Evans #ifdef PAGES_CAN_PURGE_FORCED 85b7eaed25SJason Evans , 86b7eaed25SJason Evans extent_purge_forced_default 87b7eaed25SJason Evans #else 88b7eaed25SJason Evans , 89b7eaed25SJason Evans NULL 90b7eaed25SJason Evans #endif 91b7eaed25SJason Evans #ifdef JEMALLOC_MAPS_COALESCE 92b7eaed25SJason Evans , 93b7eaed25SJason Evans extent_split_default, 94b7eaed25SJason Evans extent_merge_default 95b7eaed25SJason Evans #endif 96b7eaed25SJason Evans }; 97b7eaed25SJason Evans 98b7eaed25SJason Evans /* Used exclusively for gdump triggering. */ 99b7eaed25SJason Evans static atomic_zu_t curpages; 100b7eaed25SJason Evans static atomic_zu_t highpages; 101b7eaed25SJason Evans 102b7eaed25SJason Evans /******************************************************************************/ 103b7eaed25SJason Evans /* 104b7eaed25SJason Evans * Function prototypes for static functions that are referenced prior to 105b7eaed25SJason Evans * definition. 106b7eaed25SJason Evans */ 107b7eaed25SJason Evans 108b7eaed25SJason Evans static void extent_deregister(tsdn_t *tsdn, extent_t *extent); 109b7eaed25SJason Evans static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena, 110b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr, 111b7eaed25SJason Evans size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind, 112b7eaed25SJason Evans bool *zero, bool *commit, bool growing_retained); 113b7eaed25SJason Evans static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, 114b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, 115b7eaed25SJason Evans extent_t *extent, bool *coalesced, bool growing_retained); 116b7eaed25SJason Evans static void extent_record(tsdn_t *tsdn, arena_t *arena, 117b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent, 118b7eaed25SJason Evans bool growing_retained); 119b7eaed25SJason Evans 120b7eaed25SJason Evans /******************************************************************************/ 121b7eaed25SJason Evans 122*0ef50b4eSJason Evans ph_gen(UNUSED, extent_avail_, extent_tree_t, extent_t, ph_link, 123b7eaed25SJason Evans extent_esnead_comp) 124b7eaed25SJason Evans 125b7eaed25SJason Evans typedef enum { 126b7eaed25SJason Evans lock_result_success, 127b7eaed25SJason Evans lock_result_failure, 128b7eaed25SJason Evans lock_result_no_extent 129b7eaed25SJason Evans } lock_result_t; 130b7eaed25SJason Evans 131b7eaed25SJason Evans static lock_result_t 132b7eaed25SJason Evans extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm, 133b7eaed25SJason Evans extent_t **result) { 134b7eaed25SJason Evans extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree, 135b7eaed25SJason Evans elm, true); 136b7eaed25SJason Evans 137b7eaed25SJason Evans if (extent1 == NULL) { 138b7eaed25SJason Evans return lock_result_no_extent; 139b7eaed25SJason Evans } 140b7eaed25SJason Evans /* 141b7eaed25SJason Evans * It's possible that the extent changed out from under us, and with it 142b7eaed25SJason Evans * the leaf->extent mapping. We have to recheck while holding the lock. 143b7eaed25SJason Evans */ 144b7eaed25SJason Evans extent_lock(tsdn, extent1); 145b7eaed25SJason Evans extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn, 146b7eaed25SJason Evans &extents_rtree, elm, true); 147b7eaed25SJason Evans 148b7eaed25SJason Evans if (extent1 == extent2) { 149b7eaed25SJason Evans *result = extent1; 150b7eaed25SJason Evans return lock_result_success; 151b7eaed25SJason Evans } else { 152b7eaed25SJason Evans extent_unlock(tsdn, extent1); 153b7eaed25SJason Evans return lock_result_failure; 154b7eaed25SJason Evans } 155b7eaed25SJason Evans } 156b7eaed25SJason Evans 157b7eaed25SJason Evans /* 158b7eaed25SJason Evans * Returns a pool-locked extent_t * if there's one associated with the given 159b7eaed25SJason Evans * address, and NULL otherwise. 160b7eaed25SJason Evans */ 161b7eaed25SJason Evans static extent_t * 162b7eaed25SJason Evans extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr) { 163b7eaed25SJason Evans extent_t *ret = NULL; 164b7eaed25SJason Evans rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree, 165b7eaed25SJason Evans rtree_ctx, (uintptr_t)addr, false, false); 166b7eaed25SJason Evans if (elm == NULL) { 167b7eaed25SJason Evans return NULL; 168b7eaed25SJason Evans } 169b7eaed25SJason Evans lock_result_t lock_result; 170b7eaed25SJason Evans do { 171b7eaed25SJason Evans lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret); 172b7eaed25SJason Evans } while (lock_result == lock_result_failure); 173b7eaed25SJason Evans return ret; 174b7eaed25SJason Evans } 175b7eaed25SJason Evans 176b7eaed25SJason Evans extent_t * 177b7eaed25SJason Evans extent_alloc(tsdn_t *tsdn, arena_t *arena) { 178b7eaed25SJason Evans malloc_mutex_lock(tsdn, &arena->extent_avail_mtx); 179b7eaed25SJason Evans extent_t *extent = extent_avail_first(&arena->extent_avail); 180b7eaed25SJason Evans if (extent == NULL) { 181b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); 182b7eaed25SJason Evans return base_alloc_extent(tsdn, arena->base); 183b7eaed25SJason Evans } 184b7eaed25SJason Evans extent_avail_remove(&arena->extent_avail, extent); 185b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); 186b7eaed25SJason Evans return extent; 187b7eaed25SJason Evans } 188b7eaed25SJason Evans 189b7eaed25SJason Evans void 190b7eaed25SJason Evans extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { 191b7eaed25SJason Evans malloc_mutex_lock(tsdn, &arena->extent_avail_mtx); 192b7eaed25SJason Evans extent_avail_insert(&arena->extent_avail, extent); 193b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); 194b7eaed25SJason Evans } 195b7eaed25SJason Evans 196b7eaed25SJason Evans extent_hooks_t * 197b7eaed25SJason Evans extent_hooks_get(arena_t *arena) { 198b7eaed25SJason Evans return base_extent_hooks_get(arena->base); 199b7eaed25SJason Evans } 200b7eaed25SJason Evans 201b7eaed25SJason Evans extent_hooks_t * 202b7eaed25SJason Evans extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) { 203b7eaed25SJason Evans background_thread_info_t *info; 204b7eaed25SJason Evans if (have_background_thread) { 205b7eaed25SJason Evans info = arena_background_thread_info_get(arena); 206b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); 207b7eaed25SJason Evans } 208b7eaed25SJason Evans extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks); 209b7eaed25SJason Evans if (have_background_thread) { 210b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); 211b7eaed25SJason Evans } 212b7eaed25SJason Evans 213b7eaed25SJason Evans return ret; 214b7eaed25SJason Evans } 215b7eaed25SJason Evans 216b7eaed25SJason Evans static void 217b7eaed25SJason Evans extent_hooks_assure_initialized(arena_t *arena, 218b7eaed25SJason Evans extent_hooks_t **r_extent_hooks) { 219b7eaed25SJason Evans if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) { 220b7eaed25SJason Evans *r_extent_hooks = extent_hooks_get(arena); 221b7eaed25SJason Evans } 222b7eaed25SJason Evans } 223a4bd5210SJason Evans 2248244f2aaSJason Evans #ifndef JEMALLOC_JET 2258244f2aaSJason Evans static 2268244f2aaSJason Evans #endif 2278244f2aaSJason Evans size_t 2288244f2aaSJason Evans extent_size_quantize_floor(size_t size) { 2297fa7f12fSJason Evans size_t ret; 230b7eaed25SJason Evans pszind_t pind; 231d0e79aa3SJason Evans 2327fa7f12fSJason Evans assert(size > 0); 233b7eaed25SJason Evans assert((size & PAGE_MASK) == 0); 2347fa7f12fSJason Evans 235b7eaed25SJason Evans pind = sz_psz2ind(size - sz_large_pad + 1); 236b7eaed25SJason Evans if (pind == 0) { 237b7eaed25SJason Evans /* 238b7eaed25SJason Evans * Avoid underflow. This short-circuit would also do the right 239b7eaed25SJason Evans * thing for all sizes in the range for which there are 240b7eaed25SJason Evans * PAGE-spaced size classes, but it's simplest to just handle 241b7eaed25SJason Evans * the one case that would cause erroneous results. 242b7eaed25SJason Evans */ 243b7eaed25SJason Evans return size; 244d0e79aa3SJason Evans } 245b7eaed25SJason Evans ret = sz_pind2sz(pind - 1) + sz_large_pad; 2467fa7f12fSJason Evans assert(ret <= size); 247b7eaed25SJason Evans return ret; 248a4bd5210SJason Evans } 249a4bd5210SJason Evans 250b7eaed25SJason Evans #ifndef JEMALLOC_JET 251b7eaed25SJason Evans static 252b7eaed25SJason Evans #endif 2538244f2aaSJason Evans size_t 2548244f2aaSJason Evans extent_size_quantize_ceil(size_t size) { 2558244f2aaSJason Evans size_t ret; 2568244f2aaSJason Evans 2578244f2aaSJason Evans assert(size > 0); 258b7eaed25SJason Evans assert(size - sz_large_pad <= LARGE_MAXCLASS); 259b7eaed25SJason Evans assert((size & PAGE_MASK) == 0); 2608244f2aaSJason Evans 2618244f2aaSJason Evans ret = extent_size_quantize_floor(size); 2628244f2aaSJason Evans if (ret < size) { 2638244f2aaSJason Evans /* 2648244f2aaSJason Evans * Skip a quantization that may have an adequately large extent, 2658244f2aaSJason Evans * because under-sized extents may be mixed in. This only 2668244f2aaSJason Evans * happens when an unusual size is requested, i.e. for aligned 2678244f2aaSJason Evans * allocation, and is just one of several places where linear 2688244f2aaSJason Evans * search would potentially find sufficiently aligned available 2698244f2aaSJason Evans * memory somewhere lower. 2708244f2aaSJason Evans */ 271b7eaed25SJason Evans ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) + 272b7eaed25SJason Evans sz_large_pad; 2738244f2aaSJason Evans } 2748244f2aaSJason Evans return ret; 2758244f2aaSJason Evans } 2768244f2aaSJason Evans 277b7eaed25SJason Evans /* Generate pairing heap functions. */ 278b7eaed25SJason Evans ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp) 2797fa7f12fSJason Evans 280b7eaed25SJason Evans bool 281b7eaed25SJason Evans extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state, 282b7eaed25SJason Evans bool delay_coalesce) { 283b7eaed25SJason Evans if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS, 284b7eaed25SJason Evans malloc_mutex_rank_exclusive)) { 285b7eaed25SJason Evans return true; 286b7eaed25SJason Evans } 287b7eaed25SJason Evans for (unsigned i = 0; i < NPSIZES+1; i++) { 288b7eaed25SJason Evans extent_heap_new(&extents->heaps[i]); 289b7eaed25SJason Evans } 290b7eaed25SJason Evans bitmap_init(extents->bitmap, &extents_bitmap_info, true); 291b7eaed25SJason Evans extent_list_init(&extents->lru); 292b7eaed25SJason Evans atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED); 293b7eaed25SJason Evans extents->state = state; 294b7eaed25SJason Evans extents->delay_coalesce = delay_coalesce; 295b7eaed25SJason Evans return false; 2967fa7f12fSJason Evans } 2977fa7f12fSJason Evans 298b7eaed25SJason Evans extent_state_t 299b7eaed25SJason Evans extents_state_get(const extents_t *extents) { 300b7eaed25SJason Evans return extents->state; 3017fa7f12fSJason Evans } 302a4bd5210SJason Evans 303b7eaed25SJason Evans size_t 304b7eaed25SJason Evans extents_npages_get(extents_t *extents) { 305b7eaed25SJason Evans return atomic_load_zu(&extents->npages, ATOMIC_RELAXED); 306a4bd5210SJason Evans } 307a4bd5210SJason Evans 308b7eaed25SJason Evans static void 309*0ef50b4eSJason Evans extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) { 310b7eaed25SJason Evans malloc_mutex_assert_owner(tsdn, &extents->mtx); 311b7eaed25SJason Evans assert(extent_state_get(extent) == extents->state); 3127fa7f12fSJason Evans 313b7eaed25SJason Evans size_t size = extent_size_get(extent); 314b7eaed25SJason Evans size_t psz = extent_size_quantize_floor(size); 315b7eaed25SJason Evans pszind_t pind = sz_psz2ind(psz); 316b7eaed25SJason Evans if (extent_heap_empty(&extents->heaps[pind])) { 317b7eaed25SJason Evans bitmap_unset(extents->bitmap, &extents_bitmap_info, 318b7eaed25SJason Evans (size_t)pind); 319b7eaed25SJason Evans } 320b7eaed25SJason Evans extent_heap_insert(&extents->heaps[pind], extent); 321b7eaed25SJason Evans extent_list_append(&extents->lru, extent); 322b7eaed25SJason Evans size_t npages = size >> LG_PAGE; 323b7eaed25SJason Evans /* 324b7eaed25SJason Evans * All modifications to npages hold the mutex (as asserted above), so we 325b7eaed25SJason Evans * don't need an atomic fetch-add; we can get by with a load followed by 326b7eaed25SJason Evans * a store. 327b7eaed25SJason Evans */ 328b7eaed25SJason Evans size_t cur_extents_npages = 329b7eaed25SJason Evans atomic_load_zu(&extents->npages, ATOMIC_RELAXED); 330b7eaed25SJason Evans atomic_store_zu(&extents->npages, cur_extents_npages + npages, 331b7eaed25SJason Evans ATOMIC_RELAXED); 3327fa7f12fSJason Evans } 3337fa7f12fSJason Evans 334b7eaed25SJason Evans static void 335*0ef50b4eSJason Evans extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) { 336b7eaed25SJason Evans malloc_mutex_assert_owner(tsdn, &extents->mtx); 337b7eaed25SJason Evans assert(extent_state_get(extent) == extents->state); 3387fa7f12fSJason Evans 339b7eaed25SJason Evans size_t size = extent_size_get(extent); 340b7eaed25SJason Evans size_t psz = extent_size_quantize_floor(size); 341b7eaed25SJason Evans pszind_t pind = sz_psz2ind(psz); 342b7eaed25SJason Evans extent_heap_remove(&extents->heaps[pind], extent); 343b7eaed25SJason Evans if (extent_heap_empty(&extents->heaps[pind])) { 344b7eaed25SJason Evans bitmap_set(extents->bitmap, &extents_bitmap_info, 345b7eaed25SJason Evans (size_t)pind); 346b7eaed25SJason Evans } 347b7eaed25SJason Evans extent_list_remove(&extents->lru, extent); 348b7eaed25SJason Evans size_t npages = size >> LG_PAGE; 349b7eaed25SJason Evans /* 350b7eaed25SJason Evans * As in extents_insert_locked, we hold extents->mtx and so don't need 351b7eaed25SJason Evans * atomic operations for updating extents->npages. 352b7eaed25SJason Evans */ 353b7eaed25SJason Evans size_t cur_extents_npages = 354b7eaed25SJason Evans atomic_load_zu(&extents->npages, ATOMIC_RELAXED); 355b7eaed25SJason Evans assert(cur_extents_npages >= npages); 356b7eaed25SJason Evans atomic_store_zu(&extents->npages, 357b7eaed25SJason Evans cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED); 358b7eaed25SJason Evans } 359b7eaed25SJason Evans 360*0ef50b4eSJason Evans /* 361*0ef50b4eSJason Evans * Find an extent with size [min_size, max_size) to satisfy the alignment 362*0ef50b4eSJason Evans * requirement. For each size, try only the first extent in the heap. 363*0ef50b4eSJason Evans */ 364*0ef50b4eSJason Evans static extent_t * 365*0ef50b4eSJason Evans extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size, 366*0ef50b4eSJason Evans size_t alignment) { 367*0ef50b4eSJason Evans pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(min_size)); 368*0ef50b4eSJason Evans pszind_t pind_max = sz_psz2ind(extent_size_quantize_ceil(max_size)); 369*0ef50b4eSJason Evans 370*0ef50b4eSJason Evans for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, 371*0ef50b4eSJason Evans &extents_bitmap_info, (size_t)pind); i < pind_max; i = 372*0ef50b4eSJason Evans (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, 373*0ef50b4eSJason Evans (size_t)i+1)) { 374*0ef50b4eSJason Evans assert(i < NPSIZES); 375*0ef50b4eSJason Evans assert(!extent_heap_empty(&extents->heaps[i])); 376*0ef50b4eSJason Evans extent_t *extent = extent_heap_first(&extents->heaps[i]); 377*0ef50b4eSJason Evans uintptr_t base = (uintptr_t)extent_base_get(extent); 378*0ef50b4eSJason Evans size_t candidate_size = extent_size_get(extent); 379*0ef50b4eSJason Evans assert(candidate_size >= min_size); 380*0ef50b4eSJason Evans 381*0ef50b4eSJason Evans uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base, 382*0ef50b4eSJason Evans PAGE_CEILING(alignment)); 383*0ef50b4eSJason Evans if (base > next_align || base + candidate_size <= next_align) { 384*0ef50b4eSJason Evans /* Overflow or not crossing the next alignment. */ 385*0ef50b4eSJason Evans continue; 386*0ef50b4eSJason Evans } 387*0ef50b4eSJason Evans 388*0ef50b4eSJason Evans size_t leadsize = next_align - base; 389*0ef50b4eSJason Evans if (candidate_size - leadsize >= min_size) { 390*0ef50b4eSJason Evans return extent; 391*0ef50b4eSJason Evans } 392*0ef50b4eSJason Evans } 393*0ef50b4eSJason Evans 394*0ef50b4eSJason Evans return NULL; 395*0ef50b4eSJason Evans } 396*0ef50b4eSJason Evans 397b7eaed25SJason Evans /* Do any-best-fit extent selection, i.e. select any extent that best fits. */ 398b7eaed25SJason Evans static extent_t * 399b7eaed25SJason Evans extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, 400b7eaed25SJason Evans size_t size) { 401b7eaed25SJason Evans pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size)); 402b7eaed25SJason Evans pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, 403b7eaed25SJason Evans (size_t)pind); 404b7eaed25SJason Evans if (i < NPSIZES+1) { 405*0ef50b4eSJason Evans /* 406*0ef50b4eSJason Evans * In order to reduce fragmentation, avoid reusing and splitting 407*0ef50b4eSJason Evans * large extents for much smaller sizes. 408*0ef50b4eSJason Evans */ 409*0ef50b4eSJason Evans if ((sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) { 410*0ef50b4eSJason Evans return NULL; 411*0ef50b4eSJason Evans } 412b7eaed25SJason Evans assert(!extent_heap_empty(&extents->heaps[i])); 413*0ef50b4eSJason Evans extent_t *extent = extent_heap_first(&extents->heaps[i]); 414b7eaed25SJason Evans assert(extent_size_get(extent) >= size); 415b7eaed25SJason Evans return extent; 416b7eaed25SJason Evans } 417b7eaed25SJason Evans 418b7eaed25SJason Evans return NULL; 419b7eaed25SJason Evans } 420b7eaed25SJason Evans 421b7eaed25SJason Evans /* 422b7eaed25SJason Evans * Do first-fit extent selection, i.e. select the oldest/lowest extent that is 423b7eaed25SJason Evans * large enough. 424b7eaed25SJason Evans */ 425b7eaed25SJason Evans static extent_t * 426b7eaed25SJason Evans extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, 427b7eaed25SJason Evans size_t size) { 428b7eaed25SJason Evans extent_t *ret = NULL; 429b7eaed25SJason Evans 430b7eaed25SJason Evans pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size)); 431b7eaed25SJason Evans for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, 432b7eaed25SJason Evans &extents_bitmap_info, (size_t)pind); i < NPSIZES+1; i = 433b7eaed25SJason Evans (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, 434b7eaed25SJason Evans (size_t)i+1)) { 435b7eaed25SJason Evans assert(!extent_heap_empty(&extents->heaps[i])); 436b7eaed25SJason Evans extent_t *extent = extent_heap_first(&extents->heaps[i]); 437b7eaed25SJason Evans assert(extent_size_get(extent) >= size); 438b7eaed25SJason Evans if (ret == NULL || extent_snad_comp(extent, ret) < 0) { 439b7eaed25SJason Evans ret = extent; 440b7eaed25SJason Evans } 441b7eaed25SJason Evans if (i == NPSIZES) { 442b7eaed25SJason Evans break; 443b7eaed25SJason Evans } 444b7eaed25SJason Evans assert(i < NPSIZES); 445b7eaed25SJason Evans } 446b7eaed25SJason Evans 447b7eaed25SJason Evans return ret; 448b7eaed25SJason Evans } 449b7eaed25SJason Evans 450b7eaed25SJason Evans /* 451b7eaed25SJason Evans * Do {best,first}-fit extent selection, where the selection policy choice is 452b7eaed25SJason Evans * based on extents->delay_coalesce. Best-fit selection requires less 453b7eaed25SJason Evans * searching, but its layout policy is less stable and may cause higher virtual 454b7eaed25SJason Evans * memory fragmentation as a side effect. 455b7eaed25SJason Evans */ 456b7eaed25SJason Evans static extent_t * 457b7eaed25SJason Evans extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, 458*0ef50b4eSJason Evans size_t esize, size_t alignment) { 459b7eaed25SJason Evans malloc_mutex_assert_owner(tsdn, &extents->mtx); 460b7eaed25SJason Evans 461*0ef50b4eSJason Evans size_t max_size = esize + PAGE_CEILING(alignment) - PAGE; 462*0ef50b4eSJason Evans /* Beware size_t wrap-around. */ 463*0ef50b4eSJason Evans if (max_size < esize) { 464*0ef50b4eSJason Evans return NULL; 465*0ef50b4eSJason Evans } 466*0ef50b4eSJason Evans 467*0ef50b4eSJason Evans extent_t *extent = extents->delay_coalesce ? 468*0ef50b4eSJason Evans extents_best_fit_locked(tsdn, arena, extents, max_size) : 469*0ef50b4eSJason Evans extents_first_fit_locked(tsdn, arena, extents, max_size); 470*0ef50b4eSJason Evans 471*0ef50b4eSJason Evans if (alignment > PAGE && extent == NULL) { 472*0ef50b4eSJason Evans /* 473*0ef50b4eSJason Evans * max_size guarantees the alignment requirement but is rather 474*0ef50b4eSJason Evans * pessimistic. Next we try to satisfy the aligned allocation 475*0ef50b4eSJason Evans * with sizes in [esize, max_size). 476*0ef50b4eSJason Evans */ 477*0ef50b4eSJason Evans extent = extents_fit_alignment(extents, esize, max_size, 478*0ef50b4eSJason Evans alignment); 479*0ef50b4eSJason Evans } 480*0ef50b4eSJason Evans 481*0ef50b4eSJason Evans return extent; 482b7eaed25SJason Evans } 483b7eaed25SJason Evans 484b7eaed25SJason Evans static bool 485b7eaed25SJason Evans extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena, 486b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, 487b7eaed25SJason Evans extent_t *extent) { 488b7eaed25SJason Evans extent_state_set(extent, extent_state_active); 489b7eaed25SJason Evans bool coalesced; 490b7eaed25SJason Evans extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx, 491b7eaed25SJason Evans extents, extent, &coalesced, false); 492b7eaed25SJason Evans extent_state_set(extent, extents_state_get(extents)); 493b7eaed25SJason Evans 494b7eaed25SJason Evans if (!coalesced) { 495b7eaed25SJason Evans return true; 496b7eaed25SJason Evans } 497*0ef50b4eSJason Evans extents_insert_locked(tsdn, extents, extent); 498b7eaed25SJason Evans return false; 499b7eaed25SJason Evans } 500b7eaed25SJason Evans 501b7eaed25SJason Evans extent_t * 502b7eaed25SJason Evans extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, 503b7eaed25SJason Evans extents_t *extents, void *new_addr, size_t size, size_t pad, 504b7eaed25SJason Evans size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { 505b7eaed25SJason Evans assert(size + pad != 0); 506b7eaed25SJason Evans assert(alignment != 0); 507b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 508b7eaed25SJason Evans WITNESS_RANK_CORE, 0); 509b7eaed25SJason Evans 510*0ef50b4eSJason Evans extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, extents, 511*0ef50b4eSJason Evans new_addr, size, pad, alignment, slab, szind, zero, commit, false); 512*0ef50b4eSJason Evans assert(extent == NULL || extent_dumpable_get(extent)); 513*0ef50b4eSJason Evans return extent; 514b7eaed25SJason Evans } 515b7eaed25SJason Evans 516b7eaed25SJason Evans void 517b7eaed25SJason Evans extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, 518b7eaed25SJason Evans extents_t *extents, extent_t *extent) { 519b7eaed25SJason Evans assert(extent_base_get(extent) != NULL); 520b7eaed25SJason Evans assert(extent_size_get(extent) != 0); 521*0ef50b4eSJason Evans assert(extent_dumpable_get(extent)); 522b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 523b7eaed25SJason Evans WITNESS_RANK_CORE, 0); 524b7eaed25SJason Evans 525b7eaed25SJason Evans extent_addr_set(extent, extent_base_get(extent)); 526b7eaed25SJason Evans extent_zeroed_set(extent, false); 527b7eaed25SJason Evans 528b7eaed25SJason Evans extent_record(tsdn, arena, r_extent_hooks, extents, extent, false); 529b7eaed25SJason Evans } 530b7eaed25SJason Evans 531b7eaed25SJason Evans extent_t * 532b7eaed25SJason Evans extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, 533b7eaed25SJason Evans extents_t *extents, size_t npages_min) { 534b7eaed25SJason Evans rtree_ctx_t rtree_ctx_fallback; 535b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 536b7eaed25SJason Evans 537b7eaed25SJason Evans malloc_mutex_lock(tsdn, &extents->mtx); 538b7eaed25SJason Evans 539b7eaed25SJason Evans /* 540b7eaed25SJason Evans * Get the LRU coalesced extent, if any. If coalescing was delayed, 541b7eaed25SJason Evans * the loop will iterate until the LRU extent is fully coalesced. 542b7eaed25SJason Evans */ 543b7eaed25SJason Evans extent_t *extent; 544b7eaed25SJason Evans while (true) { 545b7eaed25SJason Evans /* Get the LRU extent, if any. */ 546b7eaed25SJason Evans extent = extent_list_first(&extents->lru); 547b7eaed25SJason Evans if (extent == NULL) { 548b7eaed25SJason Evans goto label_return; 549b7eaed25SJason Evans } 550b7eaed25SJason Evans /* Check the eviction limit. */ 551b7eaed25SJason Evans size_t extents_npages = atomic_load_zu(&extents->npages, 552b7eaed25SJason Evans ATOMIC_RELAXED); 553*0ef50b4eSJason Evans if (extents_npages <= npages_min) { 554b7eaed25SJason Evans extent = NULL; 555b7eaed25SJason Evans goto label_return; 556b7eaed25SJason Evans } 557*0ef50b4eSJason Evans extents_remove_locked(tsdn, extents, extent); 558b7eaed25SJason Evans if (!extents->delay_coalesce) { 559b7eaed25SJason Evans break; 560b7eaed25SJason Evans } 561b7eaed25SJason Evans /* Try to coalesce. */ 562b7eaed25SJason Evans if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks, 563b7eaed25SJason Evans rtree_ctx, extents, extent)) { 564b7eaed25SJason Evans break; 565b7eaed25SJason Evans } 566b7eaed25SJason Evans /* 567b7eaed25SJason Evans * The LRU extent was just coalesced and the result placed in 568b7eaed25SJason Evans * the LRU at its neighbor's position. Start over. 569b7eaed25SJason Evans */ 570b7eaed25SJason Evans } 571b7eaed25SJason Evans 572b7eaed25SJason Evans /* 573b7eaed25SJason Evans * Either mark the extent active or deregister it to protect against 574b7eaed25SJason Evans * concurrent operations. 575b7eaed25SJason Evans */ 576b7eaed25SJason Evans switch (extents_state_get(extents)) { 577b7eaed25SJason Evans case extent_state_active: 578b7eaed25SJason Evans not_reached(); 579b7eaed25SJason Evans case extent_state_dirty: 580b7eaed25SJason Evans case extent_state_muzzy: 581b7eaed25SJason Evans extent_state_set(extent, extent_state_active); 582b7eaed25SJason Evans break; 583b7eaed25SJason Evans case extent_state_retained: 584b7eaed25SJason Evans extent_deregister(tsdn, extent); 585b7eaed25SJason Evans break; 586b7eaed25SJason Evans default: 587b7eaed25SJason Evans not_reached(); 588b7eaed25SJason Evans } 589b7eaed25SJason Evans 590b7eaed25SJason Evans label_return: 591b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &extents->mtx); 592b7eaed25SJason Evans return extent; 593b7eaed25SJason Evans } 594b7eaed25SJason Evans 595b7eaed25SJason Evans static void 596b7eaed25SJason Evans extents_leak(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, 597b7eaed25SJason Evans extents_t *extents, extent_t *extent, bool growing_retained) { 598b7eaed25SJason Evans /* 599b7eaed25SJason Evans * Leak extent after making sure its pages have already been purged, so 600b7eaed25SJason Evans * that this is only a virtual memory leak. 601b7eaed25SJason Evans */ 602b7eaed25SJason Evans if (extents_state_get(extents) == extent_state_dirty) { 603b7eaed25SJason Evans if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, 604b7eaed25SJason Evans extent, 0, extent_size_get(extent), growing_retained)) { 605b7eaed25SJason Evans extent_purge_forced_impl(tsdn, arena, r_extent_hooks, 606b7eaed25SJason Evans extent, 0, extent_size_get(extent), 607b7eaed25SJason Evans growing_retained); 608b7eaed25SJason Evans } 609b7eaed25SJason Evans } 610b7eaed25SJason Evans extent_dalloc(tsdn, arena, extent); 611b7eaed25SJason Evans } 612b7eaed25SJason Evans 613b7eaed25SJason Evans void 614b7eaed25SJason Evans extents_prefork(tsdn_t *tsdn, extents_t *extents) { 615b7eaed25SJason Evans malloc_mutex_prefork(tsdn, &extents->mtx); 616b7eaed25SJason Evans } 617b7eaed25SJason Evans 618b7eaed25SJason Evans void 619b7eaed25SJason Evans extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) { 620b7eaed25SJason Evans malloc_mutex_postfork_parent(tsdn, &extents->mtx); 621b7eaed25SJason Evans } 622b7eaed25SJason Evans 623b7eaed25SJason Evans void 624b7eaed25SJason Evans extents_postfork_child(tsdn_t *tsdn, extents_t *extents) { 625b7eaed25SJason Evans malloc_mutex_postfork_child(tsdn, &extents->mtx); 626b7eaed25SJason Evans } 627b7eaed25SJason Evans 628b7eaed25SJason Evans static void 629b7eaed25SJason Evans extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, 630*0ef50b4eSJason Evans extent_t *extent) { 631b7eaed25SJason Evans assert(extent_arena_get(extent) == arena); 632b7eaed25SJason Evans assert(extent_state_get(extent) == extent_state_active); 633b7eaed25SJason Evans 634b7eaed25SJason Evans extent_state_set(extent, extents_state_get(extents)); 635*0ef50b4eSJason Evans extents_insert_locked(tsdn, extents, extent); 636b7eaed25SJason Evans } 637b7eaed25SJason Evans 638b7eaed25SJason Evans static void 639b7eaed25SJason Evans extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents, 640*0ef50b4eSJason Evans extent_t *extent) { 641b7eaed25SJason Evans malloc_mutex_lock(tsdn, &extents->mtx); 642*0ef50b4eSJason Evans extent_deactivate_locked(tsdn, arena, extents, extent); 643b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &extents->mtx); 644b7eaed25SJason Evans } 645b7eaed25SJason Evans 646b7eaed25SJason Evans static void 647b7eaed25SJason Evans extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, 648*0ef50b4eSJason Evans extent_t *extent) { 649b7eaed25SJason Evans assert(extent_arena_get(extent) == arena); 650b7eaed25SJason Evans assert(extent_state_get(extent) == extents_state_get(extents)); 651b7eaed25SJason Evans 652*0ef50b4eSJason Evans extents_remove_locked(tsdn, extents, extent); 653b7eaed25SJason Evans extent_state_set(extent, extent_state_active); 654b7eaed25SJason Evans } 655b7eaed25SJason Evans 656b7eaed25SJason Evans static bool 657b7eaed25SJason Evans extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, 658b7eaed25SJason Evans const extent_t *extent, bool dependent, bool init_missing, 659b7eaed25SJason Evans rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) { 660b7eaed25SJason Evans *r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx, 661b7eaed25SJason Evans (uintptr_t)extent_base_get(extent), dependent, init_missing); 662b7eaed25SJason Evans if (!dependent && *r_elm_a == NULL) { 663b7eaed25SJason Evans return true; 664b7eaed25SJason Evans } 665b7eaed25SJason Evans assert(*r_elm_a != NULL); 666b7eaed25SJason Evans 667b7eaed25SJason Evans *r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx, 668b7eaed25SJason Evans (uintptr_t)extent_last_get(extent), dependent, init_missing); 669b7eaed25SJason Evans if (!dependent && *r_elm_b == NULL) { 670b7eaed25SJason Evans return true; 671b7eaed25SJason Evans } 672b7eaed25SJason Evans assert(*r_elm_b != NULL); 673b7eaed25SJason Evans 674b7eaed25SJason Evans return false; 675b7eaed25SJason Evans } 676b7eaed25SJason Evans 677b7eaed25SJason Evans static void 678b7eaed25SJason Evans extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a, 679b7eaed25SJason Evans rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) { 680b7eaed25SJason Evans rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab); 681b7eaed25SJason Evans if (elm_b != NULL) { 682b7eaed25SJason Evans rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind, 683b7eaed25SJason Evans slab); 684b7eaed25SJason Evans } 685b7eaed25SJason Evans } 686b7eaed25SJason Evans 687b7eaed25SJason Evans static void 688b7eaed25SJason Evans extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent, 689b7eaed25SJason Evans szind_t szind) { 690b7eaed25SJason Evans assert(extent_slab_get(extent)); 691b7eaed25SJason Evans 692b7eaed25SJason Evans /* Register interior. */ 693b7eaed25SJason Evans for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) { 694b7eaed25SJason Evans rtree_write(tsdn, &extents_rtree, rtree_ctx, 695b7eaed25SJason Evans (uintptr_t)extent_base_get(extent) + (uintptr_t)(i << 696b7eaed25SJason Evans LG_PAGE), extent, szind, true); 697b7eaed25SJason Evans } 698b7eaed25SJason Evans } 699b7eaed25SJason Evans 700b7eaed25SJason Evans static void 701b7eaed25SJason Evans extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) { 702b7eaed25SJason Evans cassert(config_prof); 703b7eaed25SJason Evans /* prof_gdump() requirement. */ 704b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 705b7eaed25SJason Evans WITNESS_RANK_CORE, 0); 706b7eaed25SJason Evans 707b7eaed25SJason Evans if (opt_prof && extent_state_get(extent) == extent_state_active) { 708b7eaed25SJason Evans size_t nadd = extent_size_get(extent) >> LG_PAGE; 709b7eaed25SJason Evans size_t cur = atomic_fetch_add_zu(&curpages, nadd, 710b7eaed25SJason Evans ATOMIC_RELAXED) + nadd; 711b7eaed25SJason Evans size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED); 712b7eaed25SJason Evans while (cur > high && !atomic_compare_exchange_weak_zu( 713b7eaed25SJason Evans &highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) { 714b7eaed25SJason Evans /* 715b7eaed25SJason Evans * Don't refresh cur, because it may have decreased 716b7eaed25SJason Evans * since this thread lost the highpages update race. 717b7eaed25SJason Evans * Note that high is updated in case of CAS failure. 718b7eaed25SJason Evans */ 719b7eaed25SJason Evans } 720b7eaed25SJason Evans if (cur > high && prof_gdump_get_unlocked()) { 721b7eaed25SJason Evans prof_gdump(tsdn); 722b7eaed25SJason Evans } 723b7eaed25SJason Evans } 724b7eaed25SJason Evans } 725b7eaed25SJason Evans 726b7eaed25SJason Evans static void 727b7eaed25SJason Evans extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) { 728b7eaed25SJason Evans cassert(config_prof); 729b7eaed25SJason Evans 730b7eaed25SJason Evans if (opt_prof && extent_state_get(extent) == extent_state_active) { 731b7eaed25SJason Evans size_t nsub = extent_size_get(extent) >> LG_PAGE; 732b7eaed25SJason Evans assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub); 733b7eaed25SJason Evans atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED); 734b7eaed25SJason Evans } 735b7eaed25SJason Evans } 736b7eaed25SJason Evans 737b7eaed25SJason Evans static bool 738b7eaed25SJason Evans extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) { 739b7eaed25SJason Evans rtree_ctx_t rtree_ctx_fallback; 740b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 741b7eaed25SJason Evans rtree_leaf_elm_t *elm_a, *elm_b; 742b7eaed25SJason Evans 743b7eaed25SJason Evans /* 744b7eaed25SJason Evans * We need to hold the lock to protect against a concurrent coalesce 745b7eaed25SJason Evans * operation that sees us in a partial state. 746b7eaed25SJason Evans */ 747b7eaed25SJason Evans extent_lock(tsdn, extent); 748b7eaed25SJason Evans 749b7eaed25SJason Evans if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true, 750b7eaed25SJason Evans &elm_a, &elm_b)) { 751b7eaed25SJason Evans return true; 752b7eaed25SJason Evans } 753b7eaed25SJason Evans 754b7eaed25SJason Evans szind_t szind = extent_szind_get_maybe_invalid(extent); 755b7eaed25SJason Evans bool slab = extent_slab_get(extent); 756b7eaed25SJason Evans extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab); 757b7eaed25SJason Evans if (slab) { 758b7eaed25SJason Evans extent_interior_register(tsdn, rtree_ctx, extent, szind); 759b7eaed25SJason Evans } 760b7eaed25SJason Evans 761b7eaed25SJason Evans extent_unlock(tsdn, extent); 762b7eaed25SJason Evans 763b7eaed25SJason Evans if (config_prof && gdump_add) { 764b7eaed25SJason Evans extent_gdump_add(tsdn, extent); 765b7eaed25SJason Evans } 766b7eaed25SJason Evans 767b7eaed25SJason Evans return false; 768b7eaed25SJason Evans } 769b7eaed25SJason Evans 770b7eaed25SJason Evans static bool 771b7eaed25SJason Evans extent_register(tsdn_t *tsdn, extent_t *extent) { 772b7eaed25SJason Evans return extent_register_impl(tsdn, extent, true); 773b7eaed25SJason Evans } 774b7eaed25SJason Evans 775b7eaed25SJason Evans static bool 776b7eaed25SJason Evans extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) { 777b7eaed25SJason Evans return extent_register_impl(tsdn, extent, false); 778b7eaed25SJason Evans } 779b7eaed25SJason Evans 780b7eaed25SJason Evans static void 781b7eaed25SJason Evans extent_reregister(tsdn_t *tsdn, extent_t *extent) { 782b7eaed25SJason Evans bool err = extent_register(tsdn, extent); 783b7eaed25SJason Evans assert(!err); 784b7eaed25SJason Evans } 785b7eaed25SJason Evans 786*0ef50b4eSJason Evans /* 787*0ef50b4eSJason Evans * Removes all pointers to the given extent from the global rtree indices for 788*0ef50b4eSJason Evans * its interior. This is relevant for slab extents, for which we need to do 789*0ef50b4eSJason Evans * metadata lookups at places other than the head of the extent. We deregister 790*0ef50b4eSJason Evans * on the interior, then, when an extent moves from being an active slab to an 791*0ef50b4eSJason Evans * inactive state. 792*0ef50b4eSJason Evans */ 793b7eaed25SJason Evans static void 794b7eaed25SJason Evans extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, 795b7eaed25SJason Evans extent_t *extent) { 796b7eaed25SJason Evans size_t i; 797b7eaed25SJason Evans 798b7eaed25SJason Evans assert(extent_slab_get(extent)); 799b7eaed25SJason Evans 800b7eaed25SJason Evans for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) { 801b7eaed25SJason Evans rtree_clear(tsdn, &extents_rtree, rtree_ctx, 802b7eaed25SJason Evans (uintptr_t)extent_base_get(extent) + (uintptr_t)(i << 803b7eaed25SJason Evans LG_PAGE)); 804b7eaed25SJason Evans } 805b7eaed25SJason Evans } 806b7eaed25SJason Evans 807*0ef50b4eSJason Evans /* 808*0ef50b4eSJason Evans * Removes all pointers to the given extent from the global rtree. 809*0ef50b4eSJason Evans */ 810b7eaed25SJason Evans static void 811*0ef50b4eSJason Evans extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) { 812b7eaed25SJason Evans rtree_ctx_t rtree_ctx_fallback; 813b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 814b7eaed25SJason Evans rtree_leaf_elm_t *elm_a, *elm_b; 815b7eaed25SJason Evans extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false, 816b7eaed25SJason Evans &elm_a, &elm_b); 817b7eaed25SJason Evans 818b7eaed25SJason Evans extent_lock(tsdn, extent); 819b7eaed25SJason Evans 820b7eaed25SJason Evans extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, NSIZES, false); 821b7eaed25SJason Evans if (extent_slab_get(extent)) { 822b7eaed25SJason Evans extent_interior_deregister(tsdn, rtree_ctx, extent); 823b7eaed25SJason Evans extent_slab_set(extent, false); 824b7eaed25SJason Evans } 825b7eaed25SJason Evans 826b7eaed25SJason Evans extent_unlock(tsdn, extent); 827b7eaed25SJason Evans 828*0ef50b4eSJason Evans if (config_prof && gdump) { 829b7eaed25SJason Evans extent_gdump_sub(tsdn, extent); 830b7eaed25SJason Evans } 831b7eaed25SJason Evans } 832b7eaed25SJason Evans 833*0ef50b4eSJason Evans static void 834*0ef50b4eSJason Evans extent_deregister(tsdn_t *tsdn, extent_t *extent) { 835*0ef50b4eSJason Evans extent_deregister_impl(tsdn, extent, true); 836*0ef50b4eSJason Evans } 837*0ef50b4eSJason Evans 838*0ef50b4eSJason Evans static void 839*0ef50b4eSJason Evans extent_deregister_no_gdump_sub(tsdn_t *tsdn, extent_t *extent) { 840*0ef50b4eSJason Evans extent_deregister_impl(tsdn, extent, false); 841*0ef50b4eSJason Evans } 842*0ef50b4eSJason Evans 843*0ef50b4eSJason Evans /* 844*0ef50b4eSJason Evans * Tries to find and remove an extent from extents that can be used for the 845*0ef50b4eSJason Evans * given allocation request. 846*0ef50b4eSJason Evans */ 847b7eaed25SJason Evans static extent_t * 848b7eaed25SJason Evans extent_recycle_extract(tsdn_t *tsdn, arena_t *arena, 849b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, 850b7eaed25SJason Evans void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, 851*0ef50b4eSJason Evans bool growing_retained) { 852b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 853b7eaed25SJason Evans WITNESS_RANK_CORE, growing_retained ? 1 : 0); 854b7eaed25SJason Evans assert(alignment > 0); 855b7eaed25SJason Evans if (config_debug && new_addr != NULL) { 856b7eaed25SJason Evans /* 857b7eaed25SJason Evans * Non-NULL new_addr has two use cases: 858b7eaed25SJason Evans * 859b7eaed25SJason Evans * 1) Recycle a known-extant extent, e.g. during purging. 860b7eaed25SJason Evans * 2) Perform in-place expanding reallocation. 861b7eaed25SJason Evans * 862b7eaed25SJason Evans * Regardless of use case, new_addr must either refer to a 863b7eaed25SJason Evans * non-existing extent, or to the base of an extant extent, 864b7eaed25SJason Evans * since only active slabs support interior lookups (which of 865b7eaed25SJason Evans * course cannot be recycled). 866b7eaed25SJason Evans */ 867b7eaed25SJason Evans assert(PAGE_ADDR2BASE(new_addr) == new_addr); 868b7eaed25SJason Evans assert(pad == 0); 869b7eaed25SJason Evans assert(alignment <= PAGE); 870b7eaed25SJason Evans } 871b7eaed25SJason Evans 872b7eaed25SJason Evans size_t esize = size + pad; 873b7eaed25SJason Evans malloc_mutex_lock(tsdn, &extents->mtx); 874b7eaed25SJason Evans extent_hooks_assure_initialized(arena, r_extent_hooks); 875b7eaed25SJason Evans extent_t *extent; 876b7eaed25SJason Evans if (new_addr != NULL) { 877b7eaed25SJason Evans extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr); 878b7eaed25SJason Evans if (extent != NULL) { 879b7eaed25SJason Evans /* 880b7eaed25SJason Evans * We might null-out extent to report an error, but we 881b7eaed25SJason Evans * still need to unlock the associated mutex after. 882b7eaed25SJason Evans */ 883b7eaed25SJason Evans extent_t *unlock_extent = extent; 884b7eaed25SJason Evans assert(extent_base_get(extent) == new_addr); 885b7eaed25SJason Evans if (extent_arena_get(extent) != arena || 886b7eaed25SJason Evans extent_size_get(extent) < esize || 887b7eaed25SJason Evans extent_state_get(extent) != 888b7eaed25SJason Evans extents_state_get(extents)) { 889b7eaed25SJason Evans extent = NULL; 890b7eaed25SJason Evans } 891b7eaed25SJason Evans extent_unlock(tsdn, unlock_extent); 892b7eaed25SJason Evans } 893b7eaed25SJason Evans } else { 894*0ef50b4eSJason Evans extent = extents_fit_locked(tsdn, arena, extents, esize, 895*0ef50b4eSJason Evans alignment); 896b7eaed25SJason Evans } 897b7eaed25SJason Evans if (extent == NULL) { 898b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &extents->mtx); 899b7eaed25SJason Evans return NULL; 900b7eaed25SJason Evans } 901b7eaed25SJason Evans 902*0ef50b4eSJason Evans extent_activate_locked(tsdn, arena, extents, extent); 903b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &extents->mtx); 904b7eaed25SJason Evans 905b7eaed25SJason Evans return extent; 906b7eaed25SJason Evans } 907b7eaed25SJason Evans 908*0ef50b4eSJason Evans /* 909*0ef50b4eSJason Evans * Given an allocation request and an extent guaranteed to be able to satisfy 910*0ef50b4eSJason Evans * it, this splits off lead and trail extents, leaving extent pointing to an 911*0ef50b4eSJason Evans * extent satisfying the allocation. 912*0ef50b4eSJason Evans * This function doesn't put lead or trail into any extents_t; it's the caller's 913*0ef50b4eSJason Evans * job to ensure that they can be reused. 914*0ef50b4eSJason Evans */ 915*0ef50b4eSJason Evans typedef enum { 916*0ef50b4eSJason Evans /* 917*0ef50b4eSJason Evans * Split successfully. lead, extent, and trail, are modified to extents 918*0ef50b4eSJason Evans * describing the ranges before, in, and after the given allocation. 919*0ef50b4eSJason Evans */ 920*0ef50b4eSJason Evans extent_split_interior_ok, 921*0ef50b4eSJason Evans /* 922*0ef50b4eSJason Evans * The extent can't satisfy the given allocation request. None of the 923*0ef50b4eSJason Evans * input extent_t *s are touched. 924*0ef50b4eSJason Evans */ 925*0ef50b4eSJason Evans extent_split_interior_cant_alloc, 926*0ef50b4eSJason Evans /* 927*0ef50b4eSJason Evans * In a potentially invalid state. Must leak (if *to_leak is non-NULL), 928*0ef50b4eSJason Evans * and salvage what's still salvageable (if *to_salvage is non-NULL). 929*0ef50b4eSJason Evans * None of lead, extent, or trail are valid. 930*0ef50b4eSJason Evans */ 931*0ef50b4eSJason Evans extent_split_interior_error 932*0ef50b4eSJason Evans } extent_split_interior_result_t; 933*0ef50b4eSJason Evans 934*0ef50b4eSJason Evans static extent_split_interior_result_t 935*0ef50b4eSJason Evans extent_split_interior(tsdn_t *tsdn, arena_t *arena, 936*0ef50b4eSJason Evans extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, 937*0ef50b4eSJason Evans /* The result of splitting, in case of success. */ 938*0ef50b4eSJason Evans extent_t **extent, extent_t **lead, extent_t **trail, 939*0ef50b4eSJason Evans /* The mess to clean up, in case of error. */ 940*0ef50b4eSJason Evans extent_t **to_leak, extent_t **to_salvage, 941b7eaed25SJason Evans void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, 942*0ef50b4eSJason Evans szind_t szind, bool growing_retained) { 943b7eaed25SJason Evans size_t esize = size + pad; 944*0ef50b4eSJason Evans size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(*extent), 945*0ef50b4eSJason Evans PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(*extent); 946b7eaed25SJason Evans assert(new_addr == NULL || leadsize == 0); 947*0ef50b4eSJason Evans if (extent_size_get(*extent) < leadsize + esize) { 948*0ef50b4eSJason Evans return extent_split_interior_cant_alloc; 949*0ef50b4eSJason Evans } 950*0ef50b4eSJason Evans size_t trailsize = extent_size_get(*extent) - leadsize - esize; 951*0ef50b4eSJason Evans 952*0ef50b4eSJason Evans *lead = NULL; 953*0ef50b4eSJason Evans *trail = NULL; 954*0ef50b4eSJason Evans *to_leak = NULL; 955*0ef50b4eSJason Evans *to_salvage = NULL; 956b7eaed25SJason Evans 957b7eaed25SJason Evans /* Split the lead. */ 958b7eaed25SJason Evans if (leadsize != 0) { 959*0ef50b4eSJason Evans *lead = *extent; 960*0ef50b4eSJason Evans *extent = extent_split_impl(tsdn, arena, r_extent_hooks, 961*0ef50b4eSJason Evans *lead, leadsize, NSIZES, false, esize + trailsize, szind, 962b7eaed25SJason Evans slab, growing_retained); 963*0ef50b4eSJason Evans if (*extent == NULL) { 964*0ef50b4eSJason Evans *to_leak = *lead; 965*0ef50b4eSJason Evans *lead = NULL; 966*0ef50b4eSJason Evans return extent_split_interior_error; 967b7eaed25SJason Evans } 968b7eaed25SJason Evans } 969b7eaed25SJason Evans 970b7eaed25SJason Evans /* Split the trail. */ 971b7eaed25SJason Evans if (trailsize != 0) { 972*0ef50b4eSJason Evans *trail = extent_split_impl(tsdn, arena, r_extent_hooks, *extent, 973*0ef50b4eSJason Evans esize, szind, slab, trailsize, NSIZES, false, 974*0ef50b4eSJason Evans growing_retained); 975*0ef50b4eSJason Evans if (*trail == NULL) { 976*0ef50b4eSJason Evans *to_leak = *extent; 977*0ef50b4eSJason Evans *to_salvage = *lead; 978*0ef50b4eSJason Evans *lead = NULL; 979*0ef50b4eSJason Evans *extent = NULL; 980*0ef50b4eSJason Evans return extent_split_interior_error; 981b7eaed25SJason Evans } 982*0ef50b4eSJason Evans } 983*0ef50b4eSJason Evans 984*0ef50b4eSJason Evans if (leadsize == 0 && trailsize == 0) { 985b7eaed25SJason Evans /* 986b7eaed25SJason Evans * Splitting causes szind to be set as a side effect, but no 987b7eaed25SJason Evans * splitting occurred. 988b7eaed25SJason Evans */ 989*0ef50b4eSJason Evans extent_szind_set(*extent, szind); 990b7eaed25SJason Evans if (szind != NSIZES) { 991b7eaed25SJason Evans rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, 992*0ef50b4eSJason Evans (uintptr_t)extent_addr_get(*extent), szind, slab); 993*0ef50b4eSJason Evans if (slab && extent_size_get(*extent) > PAGE) { 994b7eaed25SJason Evans rtree_szind_slab_update(tsdn, &extents_rtree, 995b7eaed25SJason Evans rtree_ctx, 996*0ef50b4eSJason Evans (uintptr_t)extent_past_get(*extent) - 997b7eaed25SJason Evans (uintptr_t)PAGE, szind, slab); 998b7eaed25SJason Evans } 999b7eaed25SJason Evans } 1000b7eaed25SJason Evans } 1001b7eaed25SJason Evans 1002*0ef50b4eSJason Evans return extent_split_interior_ok; 1003b7eaed25SJason Evans } 1004b7eaed25SJason Evans 1005*0ef50b4eSJason Evans /* 1006*0ef50b4eSJason Evans * This fulfills the indicated allocation request out of the given extent (which 1007*0ef50b4eSJason Evans * the caller should have ensured was big enough). If there's any unused space 1008*0ef50b4eSJason Evans * before or after the resulting allocation, that space is given its own extent 1009*0ef50b4eSJason Evans * and put back into extents. 1010*0ef50b4eSJason Evans */ 1011*0ef50b4eSJason Evans static extent_t * 1012*0ef50b4eSJason Evans extent_recycle_split(tsdn_t *tsdn, arena_t *arena, 1013*0ef50b4eSJason Evans extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, 1014*0ef50b4eSJason Evans void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, 1015*0ef50b4eSJason Evans szind_t szind, extent_t *extent, bool growing_retained) { 1016*0ef50b4eSJason Evans extent_t *lead; 1017*0ef50b4eSJason Evans extent_t *trail; 1018*0ef50b4eSJason Evans extent_t *to_leak; 1019*0ef50b4eSJason Evans extent_t *to_salvage; 1020*0ef50b4eSJason Evans 1021*0ef50b4eSJason Evans extent_split_interior_result_t result = extent_split_interior( 1022*0ef50b4eSJason Evans tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail, 1023*0ef50b4eSJason Evans &to_leak, &to_salvage, new_addr, size, pad, alignment, slab, szind, 1024*0ef50b4eSJason Evans growing_retained); 1025*0ef50b4eSJason Evans 1026*0ef50b4eSJason Evans if (result == extent_split_interior_ok) { 1027*0ef50b4eSJason Evans if (lead != NULL) { 1028*0ef50b4eSJason Evans extent_deactivate(tsdn, arena, extents, lead); 1029*0ef50b4eSJason Evans } 1030*0ef50b4eSJason Evans if (trail != NULL) { 1031*0ef50b4eSJason Evans extent_deactivate(tsdn, arena, extents, trail); 1032*0ef50b4eSJason Evans } 1033*0ef50b4eSJason Evans return extent; 1034*0ef50b4eSJason Evans } else { 1035*0ef50b4eSJason Evans /* 1036*0ef50b4eSJason Evans * We should have picked an extent that was large enough to 1037*0ef50b4eSJason Evans * fulfill our allocation request. 1038*0ef50b4eSJason Evans */ 1039*0ef50b4eSJason Evans assert(result == extent_split_interior_error); 1040*0ef50b4eSJason Evans if (to_salvage != NULL) { 1041*0ef50b4eSJason Evans extent_deregister(tsdn, to_salvage); 1042*0ef50b4eSJason Evans } 1043*0ef50b4eSJason Evans if (to_leak != NULL) { 1044*0ef50b4eSJason Evans void *leak = extent_base_get(to_leak); 1045*0ef50b4eSJason Evans extent_deregister_no_gdump_sub(tsdn, to_leak); 1046*0ef50b4eSJason Evans extents_leak(tsdn, arena, r_extent_hooks, extents, 1047*0ef50b4eSJason Evans to_leak, growing_retained); 1048*0ef50b4eSJason Evans assert(extent_lock_from_addr(tsdn, rtree_ctx, leak) 1049*0ef50b4eSJason Evans == NULL); 1050*0ef50b4eSJason Evans } 1051*0ef50b4eSJason Evans return NULL; 1052*0ef50b4eSJason Evans } 1053*0ef50b4eSJason Evans unreachable(); 1054*0ef50b4eSJason Evans } 1055*0ef50b4eSJason Evans 1056*0ef50b4eSJason Evans /* 1057*0ef50b4eSJason Evans * Tries to satisfy the given allocation request by reusing one of the extents 1058*0ef50b4eSJason Evans * in the given extents_t. 1059*0ef50b4eSJason Evans */ 1060b7eaed25SJason Evans static extent_t * 1061b7eaed25SJason Evans extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, 1062b7eaed25SJason Evans extents_t *extents, void *new_addr, size_t size, size_t pad, 1063b7eaed25SJason Evans size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit, 1064b7eaed25SJason Evans bool growing_retained) { 1065b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1066b7eaed25SJason Evans WITNESS_RANK_CORE, growing_retained ? 1 : 0); 1067b7eaed25SJason Evans assert(new_addr == NULL || !slab); 1068b7eaed25SJason Evans assert(pad == 0 || !slab); 1069b7eaed25SJason Evans assert(!*zero || !slab); 1070b7eaed25SJason Evans 1071b7eaed25SJason Evans rtree_ctx_t rtree_ctx_fallback; 1072b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 1073b7eaed25SJason Evans 1074b7eaed25SJason Evans extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks, 1075*0ef50b4eSJason Evans rtree_ctx, extents, new_addr, size, pad, alignment, slab, 1076*0ef50b4eSJason Evans growing_retained); 1077b7eaed25SJason Evans if (extent == NULL) { 1078b7eaed25SJason Evans return NULL; 1079b7eaed25SJason Evans } 1080b7eaed25SJason Evans 1081b7eaed25SJason Evans extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx, 1082b7eaed25SJason Evans extents, new_addr, size, pad, alignment, slab, szind, extent, 1083b7eaed25SJason Evans growing_retained); 1084b7eaed25SJason Evans if (extent == NULL) { 1085b7eaed25SJason Evans return NULL; 1086b7eaed25SJason Evans } 1087b7eaed25SJason Evans 1088b7eaed25SJason Evans if (*commit && !extent_committed_get(extent)) { 1089b7eaed25SJason Evans if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 1090b7eaed25SJason Evans 0, extent_size_get(extent), growing_retained)) { 1091b7eaed25SJason Evans extent_record(tsdn, arena, r_extent_hooks, extents, 1092b7eaed25SJason Evans extent, growing_retained); 1093b7eaed25SJason Evans return NULL; 1094b7eaed25SJason Evans } 1095b7eaed25SJason Evans extent_zeroed_set(extent, true); 1096b7eaed25SJason Evans } 1097b7eaed25SJason Evans 1098*0ef50b4eSJason Evans if (extent_committed_get(extent)) { 1099*0ef50b4eSJason Evans *commit = true; 1100*0ef50b4eSJason Evans } 1101*0ef50b4eSJason Evans if (extent_zeroed_get(extent)) { 1102*0ef50b4eSJason Evans *zero = true; 1103*0ef50b4eSJason Evans } 1104*0ef50b4eSJason Evans 1105b7eaed25SJason Evans if (pad != 0) { 1106b7eaed25SJason Evans extent_addr_randomize(tsdn, extent, alignment); 1107b7eaed25SJason Evans } 1108b7eaed25SJason Evans assert(extent_state_get(extent) == extent_state_active); 1109b7eaed25SJason Evans if (slab) { 1110b7eaed25SJason Evans extent_slab_set(extent, slab); 1111b7eaed25SJason Evans extent_interior_register(tsdn, rtree_ctx, extent, szind); 1112b7eaed25SJason Evans } 1113b7eaed25SJason Evans 1114b7eaed25SJason Evans if (*zero) { 1115b7eaed25SJason Evans void *addr = extent_base_get(extent); 1116b7eaed25SJason Evans size_t size = extent_size_get(extent); 1117b7eaed25SJason Evans if (!extent_zeroed_get(extent)) { 1118b7eaed25SJason Evans if (pages_purge_forced(addr, size)) { 1119b7eaed25SJason Evans memset(addr, 0, size); 1120b7eaed25SJason Evans } 1121b7eaed25SJason Evans } else if (config_debug) { 1122b7eaed25SJason Evans size_t *p = (size_t *)(uintptr_t)addr; 1123b7eaed25SJason Evans for (size_t i = 0; i < size / sizeof(size_t); i++) { 1124b7eaed25SJason Evans assert(p[i] == 0); 1125b7eaed25SJason Evans } 1126b7eaed25SJason Evans } 1127b7eaed25SJason Evans } 1128b7eaed25SJason Evans return extent; 1129b7eaed25SJason Evans } 1130b7eaed25SJason Evans 1131b7eaed25SJason Evans /* 1132b7eaed25SJason Evans * If the caller specifies (!*zero), it is still possible to receive zeroed 1133b7eaed25SJason Evans * memory, in which case *zero is toggled to true. arena_extent_alloc() takes 1134b7eaed25SJason Evans * advantage of this to avoid demanding zeroed extents, but taking advantage of 1135b7eaed25SJason Evans * them if they are returned. 1136b7eaed25SJason Evans */ 1137b7eaed25SJason Evans static void * 1138b7eaed25SJason Evans extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, 1139b7eaed25SJason Evans size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) { 1140b7eaed25SJason Evans void *ret; 1141b7eaed25SJason Evans 1142b7eaed25SJason Evans assert(size != 0); 1143b7eaed25SJason Evans assert(alignment != 0); 1144b7eaed25SJason Evans 1145b7eaed25SJason Evans /* "primary" dss. */ 1146b7eaed25SJason Evans if (have_dss && dss_prec == dss_prec_primary && (ret = 1147b7eaed25SJason Evans extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, 1148b7eaed25SJason Evans commit)) != NULL) { 1149b7eaed25SJason Evans return ret; 1150b7eaed25SJason Evans } 1151b7eaed25SJason Evans /* mmap. */ 1152b7eaed25SJason Evans if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit)) 1153b7eaed25SJason Evans != NULL) { 1154b7eaed25SJason Evans return ret; 1155b7eaed25SJason Evans } 1156b7eaed25SJason Evans /* "secondary" dss. */ 1157b7eaed25SJason Evans if (have_dss && dss_prec == dss_prec_secondary && (ret = 1158b7eaed25SJason Evans extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, 1159b7eaed25SJason Evans commit)) != NULL) { 1160b7eaed25SJason Evans return ret; 1161b7eaed25SJason Evans } 1162b7eaed25SJason Evans 1163b7eaed25SJason Evans /* All strategies for allocation failed. */ 1164b7eaed25SJason Evans return NULL; 1165b7eaed25SJason Evans } 1166b7eaed25SJason Evans 1167b7eaed25SJason Evans static void * 1168b7eaed25SJason Evans extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr, 1169b7eaed25SJason Evans size_t size, size_t alignment, bool *zero, bool *commit) { 1170*0ef50b4eSJason Evans void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero, 1171b7eaed25SJason Evans commit, (dss_prec_t)atomic_load_u(&arena->dss_prec, 1172b7eaed25SJason Evans ATOMIC_RELAXED)); 1173*0ef50b4eSJason Evans if (have_madvise_huge && ret) { 1174*0ef50b4eSJason Evans pages_set_thp_state(ret, size); 1175*0ef50b4eSJason Evans } 1176b7eaed25SJason Evans return ret; 1177b7eaed25SJason Evans } 1178b7eaed25SJason Evans 1179b7eaed25SJason Evans static void * 1180b7eaed25SJason Evans extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size, 1181b7eaed25SJason Evans size_t alignment, bool *zero, bool *commit, unsigned arena_ind) { 1182b7eaed25SJason Evans tsdn_t *tsdn; 1183b7eaed25SJason Evans arena_t *arena; 1184b7eaed25SJason Evans 1185b7eaed25SJason Evans tsdn = tsdn_fetch(); 1186b7eaed25SJason Evans arena = arena_get(tsdn, arena_ind, false); 1187b7eaed25SJason Evans /* 1188b7eaed25SJason Evans * The arena we're allocating on behalf of must have been initialized 1189b7eaed25SJason Evans * already. 1190b7eaed25SJason Evans */ 1191b7eaed25SJason Evans assert(arena != NULL); 1192b7eaed25SJason Evans 1193b7eaed25SJason Evans return extent_alloc_default_impl(tsdn, arena, new_addr, size, 1194b7eaed25SJason Evans alignment, zero, commit); 1195b7eaed25SJason Evans } 1196b7eaed25SJason Evans 11978b2f5aafSJason Evans static void 11988b2f5aafSJason Evans extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) { 11998b2f5aafSJason Evans tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); 1200*0ef50b4eSJason Evans if (arena == arena_get(tsd_tsdn(tsd), 0, false)) { 1201*0ef50b4eSJason Evans /* 1202*0ef50b4eSJason Evans * The only legitimate case of customized extent hooks for a0 is 1203*0ef50b4eSJason Evans * hooks with no allocation activities. One such example is to 1204*0ef50b4eSJason Evans * place metadata on pre-allocated resources such as huge pages. 1205*0ef50b4eSJason Evans * In that case, rely on reentrancy_level checks to catch 1206*0ef50b4eSJason Evans * infinite recursions. 1207*0ef50b4eSJason Evans */ 1208*0ef50b4eSJason Evans pre_reentrancy(tsd, NULL); 1209*0ef50b4eSJason Evans } else { 12108b2f5aafSJason Evans pre_reentrancy(tsd, arena); 12118b2f5aafSJason Evans } 1212*0ef50b4eSJason Evans } 12138b2f5aafSJason Evans 12148b2f5aafSJason Evans static void 12158b2f5aafSJason Evans extent_hook_post_reentrancy(tsdn_t *tsdn) { 12168b2f5aafSJason Evans tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); 12178b2f5aafSJason Evans post_reentrancy(tsd); 12188b2f5aafSJason Evans } 12198b2f5aafSJason Evans 1220b7eaed25SJason Evans /* 1221b7eaed25SJason Evans * If virtual memory is retained, create increasingly larger extents from which 1222b7eaed25SJason Evans * to split requested extents in order to limit the total number of disjoint 1223b7eaed25SJason Evans * virtual memory ranges retained by each arena. 1224b7eaed25SJason Evans */ 1225b7eaed25SJason Evans static extent_t * 1226b7eaed25SJason Evans extent_grow_retained(tsdn_t *tsdn, arena_t *arena, 1227b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, size_t size, size_t pad, size_t alignment, 1228b7eaed25SJason Evans bool slab, szind_t szind, bool *zero, bool *commit) { 1229b7eaed25SJason Evans malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx); 1230b7eaed25SJason Evans assert(pad == 0 || !slab); 1231b7eaed25SJason Evans assert(!*zero || !slab); 1232b7eaed25SJason Evans 1233b7eaed25SJason Evans size_t esize = size + pad; 1234b7eaed25SJason Evans size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE; 1235b7eaed25SJason Evans /* Beware size_t wrap-around. */ 1236b7eaed25SJason Evans if (alloc_size_min < esize) { 1237b7eaed25SJason Evans goto label_err; 1238b7eaed25SJason Evans } 1239b7eaed25SJason Evans /* 1240b7eaed25SJason Evans * Find the next extent size in the series that would be large enough to 1241b7eaed25SJason Evans * satisfy this request. 1242b7eaed25SJason Evans */ 1243b7eaed25SJason Evans pszind_t egn_skip = 0; 1244b7eaed25SJason Evans size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip); 1245b7eaed25SJason Evans while (alloc_size < alloc_size_min) { 1246b7eaed25SJason Evans egn_skip++; 1247b7eaed25SJason Evans if (arena->extent_grow_next + egn_skip == NPSIZES) { 1248b7eaed25SJason Evans /* Outside legal range. */ 1249b7eaed25SJason Evans goto label_err; 1250b7eaed25SJason Evans } 1251b7eaed25SJason Evans assert(arena->extent_grow_next + egn_skip < NPSIZES); 1252b7eaed25SJason Evans alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip); 1253b7eaed25SJason Evans } 1254b7eaed25SJason Evans 1255b7eaed25SJason Evans extent_t *extent = extent_alloc(tsdn, arena); 1256b7eaed25SJason Evans if (extent == NULL) { 1257b7eaed25SJason Evans goto label_err; 1258b7eaed25SJason Evans } 1259b7eaed25SJason Evans bool zeroed = false; 1260b7eaed25SJason Evans bool committed = false; 1261b7eaed25SJason Evans 1262b7eaed25SJason Evans void *ptr; 1263b7eaed25SJason Evans if (*r_extent_hooks == &extent_hooks_default) { 1264*0ef50b4eSJason Evans ptr = extent_alloc_default_impl(tsdn, arena, NULL, 1265*0ef50b4eSJason Evans alloc_size, PAGE, &zeroed, &committed); 1266b7eaed25SJason Evans } else { 12678b2f5aafSJason Evans extent_hook_pre_reentrancy(tsdn, arena); 1268b7eaed25SJason Evans ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL, 1269b7eaed25SJason Evans alloc_size, PAGE, &zeroed, &committed, 1270b7eaed25SJason Evans arena_ind_get(arena)); 12718b2f5aafSJason Evans extent_hook_post_reentrancy(tsdn); 1272b7eaed25SJason Evans } 1273b7eaed25SJason Evans 1274b7eaed25SJason Evans extent_init(extent, arena, ptr, alloc_size, false, NSIZES, 1275b7eaed25SJason Evans arena_extent_sn_next(arena), extent_state_active, zeroed, 1276*0ef50b4eSJason Evans committed, true); 1277b7eaed25SJason Evans if (ptr == NULL) { 1278b7eaed25SJason Evans extent_dalloc(tsdn, arena, extent); 1279b7eaed25SJason Evans goto label_err; 1280b7eaed25SJason Evans } 1281*0ef50b4eSJason Evans 1282b7eaed25SJason Evans if (extent_register_no_gdump_add(tsdn, extent)) { 1283b7eaed25SJason Evans extents_leak(tsdn, arena, r_extent_hooks, 1284b7eaed25SJason Evans &arena->extents_retained, extent, true); 1285b7eaed25SJason Evans goto label_err; 1286b7eaed25SJason Evans } 1287b7eaed25SJason Evans 1288b7eaed25SJason Evans if (extent_zeroed_get(extent) && extent_committed_get(extent)) { 1289b7eaed25SJason Evans *zero = true; 1290b7eaed25SJason Evans } 1291b7eaed25SJason Evans if (extent_committed_get(extent)) { 1292b7eaed25SJason Evans *commit = true; 1293b7eaed25SJason Evans } 1294b7eaed25SJason Evans 1295*0ef50b4eSJason Evans rtree_ctx_t rtree_ctx_fallback; 1296*0ef50b4eSJason Evans rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 1297*0ef50b4eSJason Evans 1298*0ef50b4eSJason Evans extent_t *lead; 1299*0ef50b4eSJason Evans extent_t *trail; 1300*0ef50b4eSJason Evans extent_t *to_leak; 1301*0ef50b4eSJason Evans extent_t *to_salvage; 1302*0ef50b4eSJason Evans extent_split_interior_result_t result = extent_split_interior( 1303*0ef50b4eSJason Evans tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail, 1304*0ef50b4eSJason Evans &to_leak, &to_salvage, NULL, size, pad, alignment, slab, szind, 1305b7eaed25SJason Evans true); 1306*0ef50b4eSJason Evans 1307*0ef50b4eSJason Evans if (result == extent_split_interior_ok) { 1308*0ef50b4eSJason Evans if (lead != NULL) { 1309b7eaed25SJason Evans extent_record(tsdn, arena, r_extent_hooks, 1310b7eaed25SJason Evans &arena->extents_retained, lead, true); 1311b7eaed25SJason Evans } 1312*0ef50b4eSJason Evans if (trail != NULL) { 1313b7eaed25SJason Evans extent_record(tsdn, arena, r_extent_hooks, 1314b7eaed25SJason Evans &arena->extents_retained, trail, true); 1315*0ef50b4eSJason Evans } 1316*0ef50b4eSJason Evans } else { 1317b7eaed25SJason Evans /* 1318*0ef50b4eSJason Evans * We should have allocated a sufficiently large extent; the 1319*0ef50b4eSJason Evans * cant_alloc case should not occur. 1320b7eaed25SJason Evans */ 1321*0ef50b4eSJason Evans assert(result == extent_split_interior_error); 1322*0ef50b4eSJason Evans if (to_salvage != NULL) { 1323*0ef50b4eSJason Evans if (config_prof) { 1324*0ef50b4eSJason Evans extent_gdump_add(tsdn, to_salvage); 1325b7eaed25SJason Evans } 1326*0ef50b4eSJason Evans extent_record(tsdn, arena, r_extent_hooks, 1327*0ef50b4eSJason Evans &arena->extents_retained, to_salvage, true); 1328b7eaed25SJason Evans } 1329*0ef50b4eSJason Evans if (to_leak != NULL) { 1330*0ef50b4eSJason Evans extent_deregister_no_gdump_sub(tsdn, to_leak); 1331*0ef50b4eSJason Evans extents_leak(tsdn, arena, r_extent_hooks, 1332*0ef50b4eSJason Evans &arena->extents_retained, to_leak, true); 1333*0ef50b4eSJason Evans } 1334*0ef50b4eSJason Evans goto label_err; 1335b7eaed25SJason Evans } 1336b7eaed25SJason Evans 1337b7eaed25SJason Evans if (*commit && !extent_committed_get(extent)) { 1338b7eaed25SJason Evans if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0, 1339b7eaed25SJason Evans extent_size_get(extent), true)) { 1340b7eaed25SJason Evans extent_record(tsdn, arena, r_extent_hooks, 1341b7eaed25SJason Evans &arena->extents_retained, extent, true); 1342b7eaed25SJason Evans goto label_err; 1343b7eaed25SJason Evans } 1344b7eaed25SJason Evans extent_zeroed_set(extent, true); 1345b7eaed25SJason Evans } 1346b7eaed25SJason Evans 1347b7eaed25SJason Evans /* 1348*0ef50b4eSJason Evans * Increment extent_grow_next if doing so wouldn't exceed the allowed 1349b7eaed25SJason Evans * range. 1350b7eaed25SJason Evans */ 1351*0ef50b4eSJason Evans if (arena->extent_grow_next + egn_skip + 1 <= 1352*0ef50b4eSJason Evans arena->retain_grow_limit) { 1353b7eaed25SJason Evans arena->extent_grow_next += egn_skip + 1; 1354b7eaed25SJason Evans } else { 1355*0ef50b4eSJason Evans arena->extent_grow_next = arena->retain_grow_limit; 1356b7eaed25SJason Evans } 1357b7eaed25SJason Evans /* All opportunities for failure are past. */ 1358b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); 1359b7eaed25SJason Evans 1360b7eaed25SJason Evans if (config_prof) { 1361b7eaed25SJason Evans /* Adjust gdump stats now that extent is final size. */ 1362b7eaed25SJason Evans extent_gdump_add(tsdn, extent); 1363b7eaed25SJason Evans } 1364b7eaed25SJason Evans if (pad != 0) { 1365b7eaed25SJason Evans extent_addr_randomize(tsdn, extent, alignment); 1366b7eaed25SJason Evans } 1367b7eaed25SJason Evans if (slab) { 1368b7eaed25SJason Evans rtree_ctx_t rtree_ctx_fallback; 1369b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, 1370b7eaed25SJason Evans &rtree_ctx_fallback); 1371b7eaed25SJason Evans 1372b7eaed25SJason Evans extent_slab_set(extent, true); 1373b7eaed25SJason Evans extent_interior_register(tsdn, rtree_ctx, extent, szind); 1374b7eaed25SJason Evans } 1375b7eaed25SJason Evans if (*zero && !extent_zeroed_get(extent)) { 1376b7eaed25SJason Evans void *addr = extent_base_get(extent); 1377b7eaed25SJason Evans size_t size = extent_size_get(extent); 1378b7eaed25SJason Evans if (pages_purge_forced(addr, size)) { 1379b7eaed25SJason Evans memset(addr, 0, size); 1380b7eaed25SJason Evans } 1381b7eaed25SJason Evans } 1382b7eaed25SJason Evans 1383b7eaed25SJason Evans return extent; 1384b7eaed25SJason Evans label_err: 1385b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); 1386b7eaed25SJason Evans return NULL; 1387b7eaed25SJason Evans } 1388b7eaed25SJason Evans 1389b7eaed25SJason Evans static extent_t * 1390b7eaed25SJason Evans extent_alloc_retained(tsdn_t *tsdn, arena_t *arena, 1391b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, 1392b7eaed25SJason Evans size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { 1393b7eaed25SJason Evans assert(size != 0); 1394b7eaed25SJason Evans assert(alignment != 0); 1395b7eaed25SJason Evans 1396b7eaed25SJason Evans malloc_mutex_lock(tsdn, &arena->extent_grow_mtx); 1397b7eaed25SJason Evans 1398b7eaed25SJason Evans extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, 1399b7eaed25SJason Evans &arena->extents_retained, new_addr, size, pad, alignment, slab, 1400b7eaed25SJason Evans szind, zero, commit, true); 1401b7eaed25SJason Evans if (extent != NULL) { 1402b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); 1403b7eaed25SJason Evans if (config_prof) { 1404b7eaed25SJason Evans extent_gdump_add(tsdn, extent); 1405b7eaed25SJason Evans } 1406b7eaed25SJason Evans } else if (opt_retain && new_addr == NULL) { 1407b7eaed25SJason Evans extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size, 1408b7eaed25SJason Evans pad, alignment, slab, szind, zero, commit); 1409b7eaed25SJason Evans /* extent_grow_retained() always releases extent_grow_mtx. */ 1410b7eaed25SJason Evans } else { 1411b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); 1412b7eaed25SJason Evans } 1413b7eaed25SJason Evans malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx); 1414b7eaed25SJason Evans 1415b7eaed25SJason Evans return extent; 1416b7eaed25SJason Evans } 1417b7eaed25SJason Evans 1418b7eaed25SJason Evans static extent_t * 1419b7eaed25SJason Evans extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena, 1420b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, 1421b7eaed25SJason Evans size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { 1422b7eaed25SJason Evans size_t esize = size + pad; 1423b7eaed25SJason Evans extent_t *extent = extent_alloc(tsdn, arena); 1424b7eaed25SJason Evans if (extent == NULL) { 1425b7eaed25SJason Evans return NULL; 1426b7eaed25SJason Evans } 1427b7eaed25SJason Evans void *addr; 1428b7eaed25SJason Evans if (*r_extent_hooks == &extent_hooks_default) { 1429b7eaed25SJason Evans /* Call directly to propagate tsdn. */ 1430b7eaed25SJason Evans addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize, 1431b7eaed25SJason Evans alignment, zero, commit); 1432b7eaed25SJason Evans } else { 14338b2f5aafSJason Evans extent_hook_pre_reentrancy(tsdn, arena); 1434b7eaed25SJason Evans addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr, 1435b7eaed25SJason Evans esize, alignment, zero, commit, arena_ind_get(arena)); 14368b2f5aafSJason Evans extent_hook_post_reentrancy(tsdn); 1437b7eaed25SJason Evans } 1438b7eaed25SJason Evans if (addr == NULL) { 1439b7eaed25SJason Evans extent_dalloc(tsdn, arena, extent); 1440b7eaed25SJason Evans return NULL; 1441b7eaed25SJason Evans } 1442b7eaed25SJason Evans extent_init(extent, arena, addr, esize, slab, szind, 1443*0ef50b4eSJason Evans arena_extent_sn_next(arena), extent_state_active, *zero, *commit, 1444*0ef50b4eSJason Evans true); 1445b7eaed25SJason Evans if (pad != 0) { 1446b7eaed25SJason Evans extent_addr_randomize(tsdn, extent, alignment); 1447b7eaed25SJason Evans } 1448b7eaed25SJason Evans if (extent_register(tsdn, extent)) { 1449b7eaed25SJason Evans extents_leak(tsdn, arena, r_extent_hooks, 1450b7eaed25SJason Evans &arena->extents_retained, extent, false); 1451b7eaed25SJason Evans return NULL; 1452b7eaed25SJason Evans } 1453b7eaed25SJason Evans 1454b7eaed25SJason Evans return extent; 1455b7eaed25SJason Evans } 1456b7eaed25SJason Evans 1457b7eaed25SJason Evans extent_t * 1458b7eaed25SJason Evans extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, 1459b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, 1460b7eaed25SJason Evans size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { 1461b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1462b7eaed25SJason Evans WITNESS_RANK_CORE, 0); 1463b7eaed25SJason Evans 1464b7eaed25SJason Evans extent_hooks_assure_initialized(arena, r_extent_hooks); 1465b7eaed25SJason Evans 1466b7eaed25SJason Evans extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks, 1467b7eaed25SJason Evans new_addr, size, pad, alignment, slab, szind, zero, commit); 1468b7eaed25SJason Evans if (extent == NULL) { 1469*0ef50b4eSJason Evans if (opt_retain && new_addr != NULL) { 1470*0ef50b4eSJason Evans /* 1471*0ef50b4eSJason Evans * When retain is enabled and new_addr is set, we do not 1472*0ef50b4eSJason Evans * attempt extent_alloc_wrapper_hard which does mmap 1473*0ef50b4eSJason Evans * that is very unlikely to succeed (unless it happens 1474*0ef50b4eSJason Evans * to be at the end). 1475*0ef50b4eSJason Evans */ 1476*0ef50b4eSJason Evans return NULL; 1477*0ef50b4eSJason Evans } 1478b7eaed25SJason Evans extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks, 1479b7eaed25SJason Evans new_addr, size, pad, alignment, slab, szind, zero, commit); 1480b7eaed25SJason Evans } 1481b7eaed25SJason Evans 1482*0ef50b4eSJason Evans assert(extent == NULL || extent_dumpable_get(extent)); 1483b7eaed25SJason Evans return extent; 1484b7eaed25SJason Evans } 1485b7eaed25SJason Evans 1486b7eaed25SJason Evans static bool 1487b7eaed25SJason Evans extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner, 1488b7eaed25SJason Evans const extent_t *outer) { 1489b7eaed25SJason Evans assert(extent_arena_get(inner) == arena); 1490b7eaed25SJason Evans if (extent_arena_get(outer) != arena) { 1491b7eaed25SJason Evans return false; 1492b7eaed25SJason Evans } 1493b7eaed25SJason Evans 1494b7eaed25SJason Evans assert(extent_state_get(inner) == extent_state_active); 1495b7eaed25SJason Evans if (extent_state_get(outer) != extents->state) { 1496b7eaed25SJason Evans return false; 1497b7eaed25SJason Evans } 1498b7eaed25SJason Evans 1499b7eaed25SJason Evans if (extent_committed_get(inner) != extent_committed_get(outer)) { 1500b7eaed25SJason Evans return false; 1501b7eaed25SJason Evans } 1502b7eaed25SJason Evans 1503b7eaed25SJason Evans return true; 1504b7eaed25SJason Evans } 1505b7eaed25SJason Evans 1506b7eaed25SJason Evans static bool 1507b7eaed25SJason Evans extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, 1508b7eaed25SJason Evans extents_t *extents, extent_t *inner, extent_t *outer, bool forward, 1509b7eaed25SJason Evans bool growing_retained) { 1510b7eaed25SJason Evans assert(extent_can_coalesce(arena, extents, inner, outer)); 1511b7eaed25SJason Evans 1512*0ef50b4eSJason Evans extent_activate_locked(tsdn, arena, extents, outer); 1513b7eaed25SJason Evans 1514b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &extents->mtx); 1515b7eaed25SJason Evans bool err = extent_merge_impl(tsdn, arena, r_extent_hooks, 1516b7eaed25SJason Evans forward ? inner : outer, forward ? outer : inner, growing_retained); 1517b7eaed25SJason Evans malloc_mutex_lock(tsdn, &extents->mtx); 1518b7eaed25SJason Evans 1519b7eaed25SJason Evans if (err) { 1520*0ef50b4eSJason Evans extent_deactivate_locked(tsdn, arena, extents, outer); 1521b7eaed25SJason Evans } 1522b7eaed25SJason Evans 1523b7eaed25SJason Evans return err; 1524b7eaed25SJason Evans } 1525b7eaed25SJason Evans 1526b7eaed25SJason Evans static extent_t * 1527b7eaed25SJason Evans extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, 1528b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, 1529b7eaed25SJason Evans extent_t *extent, bool *coalesced, bool growing_retained) { 1530b7eaed25SJason Evans /* 1531b7eaed25SJason Evans * Continue attempting to coalesce until failure, to protect against 1532b7eaed25SJason Evans * races with other threads that are thwarted by this one. 1533b7eaed25SJason Evans */ 1534b7eaed25SJason Evans bool again; 1535b7eaed25SJason Evans do { 1536b7eaed25SJason Evans again = false; 1537b7eaed25SJason Evans 1538b7eaed25SJason Evans /* Try to coalesce forward. */ 1539b7eaed25SJason Evans extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx, 1540b7eaed25SJason Evans extent_past_get(extent)); 1541b7eaed25SJason Evans if (next != NULL) { 1542b7eaed25SJason Evans /* 1543b7eaed25SJason Evans * extents->mtx only protects against races for 1544b7eaed25SJason Evans * like-state extents, so call extent_can_coalesce() 1545b7eaed25SJason Evans * before releasing next's pool lock. 1546b7eaed25SJason Evans */ 1547b7eaed25SJason Evans bool can_coalesce = extent_can_coalesce(arena, extents, 1548b7eaed25SJason Evans extent, next); 1549b7eaed25SJason Evans 1550b7eaed25SJason Evans extent_unlock(tsdn, next); 1551b7eaed25SJason Evans 1552b7eaed25SJason Evans if (can_coalesce && !extent_coalesce(tsdn, arena, 1553b7eaed25SJason Evans r_extent_hooks, extents, extent, next, true, 1554b7eaed25SJason Evans growing_retained)) { 1555b7eaed25SJason Evans if (extents->delay_coalesce) { 1556b7eaed25SJason Evans /* Do minimal coalescing. */ 1557b7eaed25SJason Evans *coalesced = true; 1558b7eaed25SJason Evans return extent; 1559b7eaed25SJason Evans } 1560b7eaed25SJason Evans again = true; 1561b7eaed25SJason Evans } 1562b7eaed25SJason Evans } 1563b7eaed25SJason Evans 1564b7eaed25SJason Evans /* Try to coalesce backward. */ 1565b7eaed25SJason Evans extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx, 1566b7eaed25SJason Evans extent_before_get(extent)); 1567b7eaed25SJason Evans if (prev != NULL) { 1568b7eaed25SJason Evans bool can_coalesce = extent_can_coalesce(arena, extents, 1569b7eaed25SJason Evans extent, prev); 1570b7eaed25SJason Evans extent_unlock(tsdn, prev); 1571b7eaed25SJason Evans 1572b7eaed25SJason Evans if (can_coalesce && !extent_coalesce(tsdn, arena, 1573b7eaed25SJason Evans r_extent_hooks, extents, extent, prev, false, 1574b7eaed25SJason Evans growing_retained)) { 1575b7eaed25SJason Evans extent = prev; 1576b7eaed25SJason Evans if (extents->delay_coalesce) { 1577b7eaed25SJason Evans /* Do minimal coalescing. */ 1578b7eaed25SJason Evans *coalesced = true; 1579b7eaed25SJason Evans return extent; 1580b7eaed25SJason Evans } 1581b7eaed25SJason Evans again = true; 1582b7eaed25SJason Evans } 1583b7eaed25SJason Evans } 1584b7eaed25SJason Evans } while (again); 1585b7eaed25SJason Evans 1586b7eaed25SJason Evans if (extents->delay_coalesce) { 1587b7eaed25SJason Evans *coalesced = false; 1588b7eaed25SJason Evans } 1589b7eaed25SJason Evans return extent; 1590b7eaed25SJason Evans } 1591b7eaed25SJason Evans 1592*0ef50b4eSJason Evans /* 1593*0ef50b4eSJason Evans * Does the metadata management portions of putting an unused extent into the 1594*0ef50b4eSJason Evans * given extents_t (coalesces, deregisters slab interiors, the heap operations). 1595*0ef50b4eSJason Evans */ 1596b7eaed25SJason Evans static void 1597b7eaed25SJason Evans extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, 1598b7eaed25SJason Evans extents_t *extents, extent_t *extent, bool growing_retained) { 1599b7eaed25SJason Evans rtree_ctx_t rtree_ctx_fallback; 1600b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 1601b7eaed25SJason Evans 1602b7eaed25SJason Evans assert((extents_state_get(extents) != extent_state_dirty && 1603b7eaed25SJason Evans extents_state_get(extents) != extent_state_muzzy) || 1604b7eaed25SJason Evans !extent_zeroed_get(extent)); 1605b7eaed25SJason Evans 1606b7eaed25SJason Evans malloc_mutex_lock(tsdn, &extents->mtx); 1607b7eaed25SJason Evans extent_hooks_assure_initialized(arena, r_extent_hooks); 1608b7eaed25SJason Evans 1609b7eaed25SJason Evans extent_szind_set(extent, NSIZES); 1610b7eaed25SJason Evans if (extent_slab_get(extent)) { 1611b7eaed25SJason Evans extent_interior_deregister(tsdn, rtree_ctx, extent); 1612b7eaed25SJason Evans extent_slab_set(extent, false); 1613b7eaed25SJason Evans } 1614b7eaed25SJason Evans 1615b7eaed25SJason Evans assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, 1616b7eaed25SJason Evans (uintptr_t)extent_base_get(extent), true) == extent); 1617b7eaed25SJason Evans 1618b7eaed25SJason Evans if (!extents->delay_coalesce) { 1619b7eaed25SJason Evans extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, 1620b7eaed25SJason Evans rtree_ctx, extents, extent, NULL, growing_retained); 1621*0ef50b4eSJason Evans } else if (extent_size_get(extent) >= LARGE_MINCLASS) { 1622*0ef50b4eSJason Evans /* Always coalesce large extents eagerly. */ 1623*0ef50b4eSJason Evans bool coalesced; 1624*0ef50b4eSJason Evans size_t prev_size; 1625*0ef50b4eSJason Evans do { 1626*0ef50b4eSJason Evans prev_size = extent_size_get(extent); 1627*0ef50b4eSJason Evans assert(extent_state_get(extent) == extent_state_active); 1628*0ef50b4eSJason Evans extent = extent_try_coalesce(tsdn, arena, 1629*0ef50b4eSJason Evans r_extent_hooks, rtree_ctx, extents, extent, 1630*0ef50b4eSJason Evans &coalesced, growing_retained); 1631*0ef50b4eSJason Evans } while (coalesced && 1632*0ef50b4eSJason Evans extent_size_get(extent) >= prev_size + LARGE_MINCLASS); 1633b7eaed25SJason Evans } 1634*0ef50b4eSJason Evans extent_deactivate_locked(tsdn, arena, extents, extent); 1635b7eaed25SJason Evans 1636b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &extents->mtx); 1637b7eaed25SJason Evans } 1638b7eaed25SJason Evans 1639b7eaed25SJason Evans void 1640b7eaed25SJason Evans extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { 1641b7eaed25SJason Evans extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; 1642b7eaed25SJason Evans 1643b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1644b7eaed25SJason Evans WITNESS_RANK_CORE, 0); 1645b7eaed25SJason Evans 1646b7eaed25SJason Evans if (extent_register(tsdn, extent)) { 1647b7eaed25SJason Evans extents_leak(tsdn, arena, &extent_hooks, 1648b7eaed25SJason Evans &arena->extents_retained, extent, false); 1649b7eaed25SJason Evans return; 1650b7eaed25SJason Evans } 1651b7eaed25SJason Evans extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent); 1652b7eaed25SJason Evans } 1653b7eaed25SJason Evans 1654b7eaed25SJason Evans static bool 1655b7eaed25SJason Evans extent_dalloc_default_impl(void *addr, size_t size) { 1656b7eaed25SJason Evans if (!have_dss || !extent_in_dss(addr)) { 1657b7eaed25SJason Evans return extent_dalloc_mmap(addr, size); 1658b7eaed25SJason Evans } 1659b7eaed25SJason Evans return true; 1660b7eaed25SJason Evans } 1661b7eaed25SJason Evans 1662b7eaed25SJason Evans static bool 1663b7eaed25SJason Evans extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size, 1664b7eaed25SJason Evans bool committed, unsigned arena_ind) { 1665b7eaed25SJason Evans return extent_dalloc_default_impl(addr, size); 1666b7eaed25SJason Evans } 1667b7eaed25SJason Evans 1668b7eaed25SJason Evans static bool 1669b7eaed25SJason Evans extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena, 1670b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *extent) { 1671b7eaed25SJason Evans bool err; 1672b7eaed25SJason Evans 1673b7eaed25SJason Evans assert(extent_base_get(extent) != NULL); 1674b7eaed25SJason Evans assert(extent_size_get(extent) != 0); 1675b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1676b7eaed25SJason Evans WITNESS_RANK_CORE, 0); 1677b7eaed25SJason Evans 1678b7eaed25SJason Evans extent_addr_set(extent, extent_base_get(extent)); 1679b7eaed25SJason Evans 1680b7eaed25SJason Evans extent_hooks_assure_initialized(arena, r_extent_hooks); 1681b7eaed25SJason Evans /* Try to deallocate. */ 1682b7eaed25SJason Evans if (*r_extent_hooks == &extent_hooks_default) { 1683b7eaed25SJason Evans /* Call directly to propagate tsdn. */ 1684b7eaed25SJason Evans err = extent_dalloc_default_impl(extent_base_get(extent), 1685b7eaed25SJason Evans extent_size_get(extent)); 1686b7eaed25SJason Evans } else { 16878b2f5aafSJason Evans extent_hook_pre_reentrancy(tsdn, arena); 1688b7eaed25SJason Evans err = ((*r_extent_hooks)->dalloc == NULL || 1689b7eaed25SJason Evans (*r_extent_hooks)->dalloc(*r_extent_hooks, 1690b7eaed25SJason Evans extent_base_get(extent), extent_size_get(extent), 1691b7eaed25SJason Evans extent_committed_get(extent), arena_ind_get(arena))); 16928b2f5aafSJason Evans extent_hook_post_reentrancy(tsdn); 1693b7eaed25SJason Evans } 1694b7eaed25SJason Evans 1695b7eaed25SJason Evans if (!err) { 1696b7eaed25SJason Evans extent_dalloc(tsdn, arena, extent); 1697b7eaed25SJason Evans } 1698b7eaed25SJason Evans 1699b7eaed25SJason Evans return err; 1700b7eaed25SJason Evans } 1701b7eaed25SJason Evans 1702b7eaed25SJason Evans void 1703b7eaed25SJason Evans extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, 1704b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *extent) { 1705*0ef50b4eSJason Evans assert(extent_dumpable_get(extent)); 1706b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1707b7eaed25SJason Evans WITNESS_RANK_CORE, 0); 1708b7eaed25SJason Evans 1709b7eaed25SJason Evans /* 1710b7eaed25SJason Evans * Deregister first to avoid a race with other allocating threads, and 1711b7eaed25SJason Evans * reregister if deallocation fails. 1712b7eaed25SJason Evans */ 1713b7eaed25SJason Evans extent_deregister(tsdn, extent); 1714b7eaed25SJason Evans if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks, extent)) { 1715b7eaed25SJason Evans return; 1716b7eaed25SJason Evans } 1717b7eaed25SJason Evans 1718b7eaed25SJason Evans extent_reregister(tsdn, extent); 17198b2f5aafSJason Evans if (*r_extent_hooks != &extent_hooks_default) { 17208b2f5aafSJason Evans extent_hook_pre_reentrancy(tsdn, arena); 17218b2f5aafSJason Evans } 1722b7eaed25SJason Evans /* Try to decommit; purge if that fails. */ 1723b7eaed25SJason Evans bool zeroed; 1724b7eaed25SJason Evans if (!extent_committed_get(extent)) { 1725b7eaed25SJason Evans zeroed = true; 1726b7eaed25SJason Evans } else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent, 1727b7eaed25SJason Evans 0, extent_size_get(extent))) { 1728b7eaed25SJason Evans zeroed = true; 1729b7eaed25SJason Evans } else if ((*r_extent_hooks)->purge_forced != NULL && 1730b7eaed25SJason Evans !(*r_extent_hooks)->purge_forced(*r_extent_hooks, 1731b7eaed25SJason Evans extent_base_get(extent), extent_size_get(extent), 0, 1732b7eaed25SJason Evans extent_size_get(extent), arena_ind_get(arena))) { 1733b7eaed25SJason Evans zeroed = true; 1734b7eaed25SJason Evans } else if (extent_state_get(extent) == extent_state_muzzy || 1735b7eaed25SJason Evans ((*r_extent_hooks)->purge_lazy != NULL && 1736b7eaed25SJason Evans !(*r_extent_hooks)->purge_lazy(*r_extent_hooks, 1737b7eaed25SJason Evans extent_base_get(extent), extent_size_get(extent), 0, 1738b7eaed25SJason Evans extent_size_get(extent), arena_ind_get(arena)))) { 1739b7eaed25SJason Evans zeroed = false; 1740b7eaed25SJason Evans } else { 1741b7eaed25SJason Evans zeroed = false; 1742b7eaed25SJason Evans } 17438b2f5aafSJason Evans if (*r_extent_hooks != &extent_hooks_default) { 17448b2f5aafSJason Evans extent_hook_post_reentrancy(tsdn); 17458b2f5aafSJason Evans } 1746b7eaed25SJason Evans extent_zeroed_set(extent, zeroed); 1747b7eaed25SJason Evans 1748b7eaed25SJason Evans if (config_prof) { 1749b7eaed25SJason Evans extent_gdump_sub(tsdn, extent); 1750b7eaed25SJason Evans } 1751b7eaed25SJason Evans 1752b7eaed25SJason Evans extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained, 1753b7eaed25SJason Evans extent, false); 1754b7eaed25SJason Evans } 1755b7eaed25SJason Evans 1756b7eaed25SJason Evans static void 1757b7eaed25SJason Evans extent_destroy_default_impl(void *addr, size_t size) { 1758b7eaed25SJason Evans if (!have_dss || !extent_in_dss(addr)) { 1759b7eaed25SJason Evans pages_unmap(addr, size); 1760b7eaed25SJason Evans } 1761b7eaed25SJason Evans } 1762b7eaed25SJason Evans 1763b7eaed25SJason Evans static void 1764b7eaed25SJason Evans extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size, 1765b7eaed25SJason Evans bool committed, unsigned arena_ind) { 1766b7eaed25SJason Evans extent_destroy_default_impl(addr, size); 1767b7eaed25SJason Evans } 1768b7eaed25SJason Evans 1769b7eaed25SJason Evans void 1770b7eaed25SJason Evans extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena, 1771b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *extent) { 1772b7eaed25SJason Evans assert(extent_base_get(extent) != NULL); 1773b7eaed25SJason Evans assert(extent_size_get(extent) != 0); 1774b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1775b7eaed25SJason Evans WITNESS_RANK_CORE, 0); 1776b7eaed25SJason Evans 1777b7eaed25SJason Evans /* Deregister first to avoid a race with other allocating threads. */ 1778b7eaed25SJason Evans extent_deregister(tsdn, extent); 1779b7eaed25SJason Evans 1780b7eaed25SJason Evans extent_addr_set(extent, extent_base_get(extent)); 1781b7eaed25SJason Evans 1782b7eaed25SJason Evans extent_hooks_assure_initialized(arena, r_extent_hooks); 1783b7eaed25SJason Evans /* Try to destroy; silently fail otherwise. */ 1784b7eaed25SJason Evans if (*r_extent_hooks == &extent_hooks_default) { 1785b7eaed25SJason Evans /* Call directly to propagate tsdn. */ 1786b7eaed25SJason Evans extent_destroy_default_impl(extent_base_get(extent), 1787b7eaed25SJason Evans extent_size_get(extent)); 1788b7eaed25SJason Evans } else if ((*r_extent_hooks)->destroy != NULL) { 17898b2f5aafSJason Evans extent_hook_pre_reentrancy(tsdn, arena); 1790b7eaed25SJason Evans (*r_extent_hooks)->destroy(*r_extent_hooks, 1791b7eaed25SJason Evans extent_base_get(extent), extent_size_get(extent), 1792b7eaed25SJason Evans extent_committed_get(extent), arena_ind_get(arena)); 17938b2f5aafSJason Evans extent_hook_post_reentrancy(tsdn); 1794b7eaed25SJason Evans } 1795b7eaed25SJason Evans 1796b7eaed25SJason Evans extent_dalloc(tsdn, arena, extent); 1797b7eaed25SJason Evans } 1798b7eaed25SJason Evans 1799b7eaed25SJason Evans static bool 1800b7eaed25SJason Evans extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size, 1801b7eaed25SJason Evans size_t offset, size_t length, unsigned arena_ind) { 1802b7eaed25SJason Evans return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset), 1803b7eaed25SJason Evans length); 1804b7eaed25SJason Evans } 1805b7eaed25SJason Evans 1806b7eaed25SJason Evans static bool 1807b7eaed25SJason Evans extent_commit_impl(tsdn_t *tsdn, arena_t *arena, 1808b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, 1809b7eaed25SJason Evans size_t length, bool growing_retained) { 1810b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1811b7eaed25SJason Evans WITNESS_RANK_CORE, growing_retained ? 1 : 0); 1812b7eaed25SJason Evans 1813b7eaed25SJason Evans extent_hooks_assure_initialized(arena, r_extent_hooks); 18148b2f5aafSJason Evans if (*r_extent_hooks != &extent_hooks_default) { 18158b2f5aafSJason Evans extent_hook_pre_reentrancy(tsdn, arena); 18168b2f5aafSJason Evans } 1817b7eaed25SJason Evans bool err = ((*r_extent_hooks)->commit == NULL || 1818b7eaed25SJason Evans (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent), 1819b7eaed25SJason Evans extent_size_get(extent), offset, length, arena_ind_get(arena))); 18208b2f5aafSJason Evans if (*r_extent_hooks != &extent_hooks_default) { 18218b2f5aafSJason Evans extent_hook_post_reentrancy(tsdn); 18228b2f5aafSJason Evans } 1823b7eaed25SJason Evans extent_committed_set(extent, extent_committed_get(extent) || !err); 1824b7eaed25SJason Evans return err; 1825b7eaed25SJason Evans } 1826b7eaed25SJason Evans 1827b7eaed25SJason Evans bool 1828b7eaed25SJason Evans extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena, 1829b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, 1830b7eaed25SJason Evans size_t length) { 1831b7eaed25SJason Evans return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset, 1832b7eaed25SJason Evans length, false); 1833b7eaed25SJason Evans } 1834b7eaed25SJason Evans 1835b7eaed25SJason Evans static bool 1836b7eaed25SJason Evans extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size, 1837b7eaed25SJason Evans size_t offset, size_t length, unsigned arena_ind) { 1838b7eaed25SJason Evans return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset), 1839b7eaed25SJason Evans length); 1840b7eaed25SJason Evans } 1841b7eaed25SJason Evans 1842b7eaed25SJason Evans bool 1843b7eaed25SJason Evans extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena, 1844b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, 1845b7eaed25SJason Evans size_t length) { 1846b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1847b7eaed25SJason Evans WITNESS_RANK_CORE, 0); 1848b7eaed25SJason Evans 1849b7eaed25SJason Evans extent_hooks_assure_initialized(arena, r_extent_hooks); 1850b7eaed25SJason Evans 18518b2f5aafSJason Evans if (*r_extent_hooks != &extent_hooks_default) { 18528b2f5aafSJason Evans extent_hook_pre_reentrancy(tsdn, arena); 18538b2f5aafSJason Evans } 1854b7eaed25SJason Evans bool err = ((*r_extent_hooks)->decommit == NULL || 1855b7eaed25SJason Evans (*r_extent_hooks)->decommit(*r_extent_hooks, 1856b7eaed25SJason Evans extent_base_get(extent), extent_size_get(extent), offset, length, 1857b7eaed25SJason Evans arena_ind_get(arena))); 18588b2f5aafSJason Evans if (*r_extent_hooks != &extent_hooks_default) { 18598b2f5aafSJason Evans extent_hook_post_reentrancy(tsdn); 18608b2f5aafSJason Evans } 1861b7eaed25SJason Evans extent_committed_set(extent, extent_committed_get(extent) && err); 1862b7eaed25SJason Evans return err; 1863b7eaed25SJason Evans } 1864b7eaed25SJason Evans 1865b7eaed25SJason Evans #ifdef PAGES_CAN_PURGE_LAZY 1866b7eaed25SJason Evans static bool 1867b7eaed25SJason Evans extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size, 1868b7eaed25SJason Evans size_t offset, size_t length, unsigned arena_ind) { 1869b7eaed25SJason Evans assert(addr != NULL); 1870b7eaed25SJason Evans assert((offset & PAGE_MASK) == 0); 1871b7eaed25SJason Evans assert(length != 0); 1872b7eaed25SJason Evans assert((length & PAGE_MASK) == 0); 1873b7eaed25SJason Evans 1874b7eaed25SJason Evans return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset), 1875b7eaed25SJason Evans length); 1876b7eaed25SJason Evans } 1877b7eaed25SJason Evans #endif 1878b7eaed25SJason Evans 1879b7eaed25SJason Evans static bool 1880b7eaed25SJason Evans extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena, 1881b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, 1882b7eaed25SJason Evans size_t length, bool growing_retained) { 1883b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1884b7eaed25SJason Evans WITNESS_RANK_CORE, growing_retained ? 1 : 0); 1885b7eaed25SJason Evans 1886b7eaed25SJason Evans extent_hooks_assure_initialized(arena, r_extent_hooks); 18878b2f5aafSJason Evans 18888b2f5aafSJason Evans if ((*r_extent_hooks)->purge_lazy == NULL) { 18898b2f5aafSJason Evans return true; 18908b2f5aafSJason Evans } 18918b2f5aafSJason Evans if (*r_extent_hooks != &extent_hooks_default) { 18928b2f5aafSJason Evans extent_hook_pre_reentrancy(tsdn, arena); 18938b2f5aafSJason Evans } 18948b2f5aafSJason Evans bool err = (*r_extent_hooks)->purge_lazy(*r_extent_hooks, 1895b7eaed25SJason Evans extent_base_get(extent), extent_size_get(extent), offset, length, 18968b2f5aafSJason Evans arena_ind_get(arena)); 18978b2f5aafSJason Evans if (*r_extent_hooks != &extent_hooks_default) { 18988b2f5aafSJason Evans extent_hook_post_reentrancy(tsdn); 18998b2f5aafSJason Evans } 19008b2f5aafSJason Evans 19018b2f5aafSJason Evans return err; 1902b7eaed25SJason Evans } 1903b7eaed25SJason Evans 1904b7eaed25SJason Evans bool 1905b7eaed25SJason Evans extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena, 1906b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, 1907b7eaed25SJason Evans size_t length) { 1908b7eaed25SJason Evans return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent, 1909b7eaed25SJason Evans offset, length, false); 1910b7eaed25SJason Evans } 1911b7eaed25SJason Evans 1912b7eaed25SJason Evans #ifdef PAGES_CAN_PURGE_FORCED 1913b7eaed25SJason Evans static bool 1914b7eaed25SJason Evans extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr, 1915b7eaed25SJason Evans size_t size, size_t offset, size_t length, unsigned arena_ind) { 1916b7eaed25SJason Evans assert(addr != NULL); 1917b7eaed25SJason Evans assert((offset & PAGE_MASK) == 0); 1918b7eaed25SJason Evans assert(length != 0); 1919b7eaed25SJason Evans assert((length & PAGE_MASK) == 0); 1920b7eaed25SJason Evans 1921b7eaed25SJason Evans return pages_purge_forced((void *)((uintptr_t)addr + 1922b7eaed25SJason Evans (uintptr_t)offset), length); 1923b7eaed25SJason Evans } 1924b7eaed25SJason Evans #endif 1925b7eaed25SJason Evans 1926b7eaed25SJason Evans static bool 1927b7eaed25SJason Evans extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena, 1928b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, 1929b7eaed25SJason Evans size_t length, bool growing_retained) { 1930b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1931b7eaed25SJason Evans WITNESS_RANK_CORE, growing_retained ? 1 : 0); 1932b7eaed25SJason Evans 1933b7eaed25SJason Evans extent_hooks_assure_initialized(arena, r_extent_hooks); 19348b2f5aafSJason Evans 19358b2f5aafSJason Evans if ((*r_extent_hooks)->purge_forced == NULL) { 19368b2f5aafSJason Evans return true; 19378b2f5aafSJason Evans } 19388b2f5aafSJason Evans if (*r_extent_hooks != &extent_hooks_default) { 19398b2f5aafSJason Evans extent_hook_pre_reentrancy(tsdn, arena); 19408b2f5aafSJason Evans } 19418b2f5aafSJason Evans bool err = (*r_extent_hooks)->purge_forced(*r_extent_hooks, 1942b7eaed25SJason Evans extent_base_get(extent), extent_size_get(extent), offset, length, 19438b2f5aafSJason Evans arena_ind_get(arena)); 19448b2f5aafSJason Evans if (*r_extent_hooks != &extent_hooks_default) { 19458b2f5aafSJason Evans extent_hook_post_reentrancy(tsdn); 19468b2f5aafSJason Evans } 19478b2f5aafSJason Evans return err; 1948b7eaed25SJason Evans } 1949b7eaed25SJason Evans 1950b7eaed25SJason Evans bool 1951b7eaed25SJason Evans extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena, 1952b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, 1953b7eaed25SJason Evans size_t length) { 1954b7eaed25SJason Evans return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent, 1955b7eaed25SJason Evans offset, length, false); 1956b7eaed25SJason Evans } 1957b7eaed25SJason Evans 1958b7eaed25SJason Evans #ifdef JEMALLOC_MAPS_COALESCE 1959b7eaed25SJason Evans static bool 1960b7eaed25SJason Evans extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size, 1961b7eaed25SJason Evans size_t size_a, size_t size_b, bool committed, unsigned arena_ind) { 1962b7eaed25SJason Evans return !maps_coalesce; 1963b7eaed25SJason Evans } 1964b7eaed25SJason Evans #endif 1965b7eaed25SJason Evans 1966*0ef50b4eSJason Evans /* 1967*0ef50b4eSJason Evans * Accepts the extent to split, and the characteristics of each side of the 1968*0ef50b4eSJason Evans * split. The 'a' parameters go with the 'lead' of the resulting pair of 1969*0ef50b4eSJason Evans * extents (the lower addressed portion of the split), and the 'b' parameters go 1970*0ef50b4eSJason Evans * with the trail (the higher addressed portion). This makes 'extent' the lead, 1971*0ef50b4eSJason Evans * and returns the trail (except in case of error). 1972*0ef50b4eSJason Evans */ 1973b7eaed25SJason Evans static extent_t * 1974b7eaed25SJason Evans extent_split_impl(tsdn_t *tsdn, arena_t *arena, 1975b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, 1976b7eaed25SJason Evans szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b, 1977b7eaed25SJason Evans bool growing_retained) { 1978b7eaed25SJason Evans assert(extent_size_get(extent) == size_a + size_b); 1979b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1980b7eaed25SJason Evans WITNESS_RANK_CORE, growing_retained ? 1 : 0); 1981b7eaed25SJason Evans 1982b7eaed25SJason Evans extent_hooks_assure_initialized(arena, r_extent_hooks); 1983b7eaed25SJason Evans 1984b7eaed25SJason Evans if ((*r_extent_hooks)->split == NULL) { 1985b7eaed25SJason Evans return NULL; 1986b7eaed25SJason Evans } 1987b7eaed25SJason Evans 1988b7eaed25SJason Evans extent_t *trail = extent_alloc(tsdn, arena); 1989b7eaed25SJason Evans if (trail == NULL) { 1990b7eaed25SJason Evans goto label_error_a; 1991b7eaed25SJason Evans } 1992b7eaed25SJason Evans 1993b7eaed25SJason Evans extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) + 1994b7eaed25SJason Evans size_a), size_b, slab_b, szind_b, extent_sn_get(extent), 1995b7eaed25SJason Evans extent_state_get(extent), extent_zeroed_get(extent), 1996*0ef50b4eSJason Evans extent_committed_get(extent), extent_dumpable_get(extent)); 1997b7eaed25SJason Evans 1998b7eaed25SJason Evans rtree_ctx_t rtree_ctx_fallback; 1999b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 2000b7eaed25SJason Evans rtree_leaf_elm_t *lead_elm_a, *lead_elm_b; 2001b7eaed25SJason Evans { 2002b7eaed25SJason Evans extent_t lead; 2003b7eaed25SJason Evans 2004b7eaed25SJason Evans extent_init(&lead, arena, extent_addr_get(extent), size_a, 2005b7eaed25SJason Evans slab_a, szind_a, extent_sn_get(extent), 2006b7eaed25SJason Evans extent_state_get(extent), extent_zeroed_get(extent), 2007*0ef50b4eSJason Evans extent_committed_get(extent), extent_dumpable_get(extent)); 2008b7eaed25SJason Evans 2009b7eaed25SJason Evans extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false, 2010b7eaed25SJason Evans true, &lead_elm_a, &lead_elm_b); 2011b7eaed25SJason Evans } 2012b7eaed25SJason Evans rtree_leaf_elm_t *trail_elm_a, *trail_elm_b; 2013b7eaed25SJason Evans extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true, 2014b7eaed25SJason Evans &trail_elm_a, &trail_elm_b); 2015b7eaed25SJason Evans 2016b7eaed25SJason Evans if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL 2017b7eaed25SJason Evans || trail_elm_b == NULL) { 2018b7eaed25SJason Evans goto label_error_b; 2019b7eaed25SJason Evans } 2020b7eaed25SJason Evans 2021b7eaed25SJason Evans extent_lock2(tsdn, extent, trail); 2022b7eaed25SJason Evans 20238b2f5aafSJason Evans if (*r_extent_hooks != &extent_hooks_default) { 20248b2f5aafSJason Evans extent_hook_pre_reentrancy(tsdn, arena); 20258b2f5aafSJason Evans } 20268b2f5aafSJason Evans bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent), 2027b7eaed25SJason Evans size_a + size_b, size_a, size_b, extent_committed_get(extent), 20288b2f5aafSJason Evans arena_ind_get(arena)); 20298b2f5aafSJason Evans if (*r_extent_hooks != &extent_hooks_default) { 20308b2f5aafSJason Evans extent_hook_post_reentrancy(tsdn); 20318b2f5aafSJason Evans } 20328b2f5aafSJason Evans if (err) { 2033b7eaed25SJason Evans goto label_error_c; 2034b7eaed25SJason Evans } 2035b7eaed25SJason Evans 2036b7eaed25SJason Evans extent_size_set(extent, size_a); 2037b7eaed25SJason Evans extent_szind_set(extent, szind_a); 2038b7eaed25SJason Evans 2039b7eaed25SJason Evans extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent, 2040b7eaed25SJason Evans szind_a, slab_a); 2041b7eaed25SJason Evans extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail, 2042b7eaed25SJason Evans szind_b, slab_b); 2043b7eaed25SJason Evans 2044b7eaed25SJason Evans extent_unlock2(tsdn, extent, trail); 2045b7eaed25SJason Evans 2046b7eaed25SJason Evans return trail; 2047b7eaed25SJason Evans label_error_c: 2048b7eaed25SJason Evans extent_unlock2(tsdn, extent, trail); 2049b7eaed25SJason Evans label_error_b: 2050b7eaed25SJason Evans extent_dalloc(tsdn, arena, trail); 2051b7eaed25SJason Evans label_error_a: 2052b7eaed25SJason Evans return NULL; 2053b7eaed25SJason Evans } 2054b7eaed25SJason Evans 2055b7eaed25SJason Evans extent_t * 2056b7eaed25SJason Evans extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, 2057b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, 2058b7eaed25SJason Evans szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) { 2059b7eaed25SJason Evans return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a, 2060b7eaed25SJason Evans szind_a, slab_a, size_b, szind_b, slab_b, false); 2061b7eaed25SJason Evans } 2062b7eaed25SJason Evans 2063b7eaed25SJason Evans static bool 2064b7eaed25SJason Evans extent_merge_default_impl(void *addr_a, void *addr_b) { 2065b7eaed25SJason Evans if (!maps_coalesce) { 2066b7eaed25SJason Evans return true; 2067b7eaed25SJason Evans } 2068b7eaed25SJason Evans if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) { 2069b7eaed25SJason Evans return true; 2070b7eaed25SJason Evans } 2071b7eaed25SJason Evans 2072b7eaed25SJason Evans return false; 2073b7eaed25SJason Evans } 2074b7eaed25SJason Evans 2075b7eaed25SJason Evans #ifdef JEMALLOC_MAPS_COALESCE 2076b7eaed25SJason Evans static bool 2077b7eaed25SJason Evans extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, 2078b7eaed25SJason Evans void *addr_b, size_t size_b, bool committed, unsigned arena_ind) { 2079b7eaed25SJason Evans return extent_merge_default_impl(addr_a, addr_b); 2080b7eaed25SJason Evans } 2081b7eaed25SJason Evans #endif 2082b7eaed25SJason Evans 2083b7eaed25SJason Evans static bool 2084b7eaed25SJason Evans extent_merge_impl(tsdn_t *tsdn, arena_t *arena, 2085b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b, 2086b7eaed25SJason Evans bool growing_retained) { 2087b7eaed25SJason Evans witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 2088b7eaed25SJason Evans WITNESS_RANK_CORE, growing_retained ? 1 : 0); 2089b7eaed25SJason Evans 2090b7eaed25SJason Evans extent_hooks_assure_initialized(arena, r_extent_hooks); 2091b7eaed25SJason Evans 2092b7eaed25SJason Evans if ((*r_extent_hooks)->merge == NULL) { 2093b7eaed25SJason Evans return true; 2094b7eaed25SJason Evans } 2095b7eaed25SJason Evans 2096b7eaed25SJason Evans bool err; 2097b7eaed25SJason Evans if (*r_extent_hooks == &extent_hooks_default) { 2098b7eaed25SJason Evans /* Call directly to propagate tsdn. */ 2099b7eaed25SJason Evans err = extent_merge_default_impl(extent_base_get(a), 2100b7eaed25SJason Evans extent_base_get(b)); 2101b7eaed25SJason Evans } else { 21028b2f5aafSJason Evans extent_hook_pre_reentrancy(tsdn, arena); 2103b7eaed25SJason Evans err = (*r_extent_hooks)->merge(*r_extent_hooks, 2104b7eaed25SJason Evans extent_base_get(a), extent_size_get(a), extent_base_get(b), 2105b7eaed25SJason Evans extent_size_get(b), extent_committed_get(a), 2106b7eaed25SJason Evans arena_ind_get(arena)); 21078b2f5aafSJason Evans extent_hook_post_reentrancy(tsdn); 2108b7eaed25SJason Evans } 2109b7eaed25SJason Evans 2110b7eaed25SJason Evans if (err) { 2111b7eaed25SJason Evans return true; 2112b7eaed25SJason Evans } 2113b7eaed25SJason Evans 2114b7eaed25SJason Evans /* 2115b7eaed25SJason Evans * The rtree writes must happen while all the relevant elements are 2116b7eaed25SJason Evans * owned, so the following code uses decomposed helper functions rather 2117b7eaed25SJason Evans * than extent_{,de}register() to do things in the right order. 2118b7eaed25SJason Evans */ 2119b7eaed25SJason Evans rtree_ctx_t rtree_ctx_fallback; 2120b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 2121b7eaed25SJason Evans rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b; 2122b7eaed25SJason Evans extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a, 2123b7eaed25SJason Evans &a_elm_b); 2124b7eaed25SJason Evans extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a, 2125b7eaed25SJason Evans &b_elm_b); 2126b7eaed25SJason Evans 2127b7eaed25SJason Evans extent_lock2(tsdn, a, b); 2128b7eaed25SJason Evans 2129b7eaed25SJason Evans if (a_elm_b != NULL) { 2130b7eaed25SJason Evans rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL, 2131b7eaed25SJason Evans NSIZES, false); 2132b7eaed25SJason Evans } 2133b7eaed25SJason Evans if (b_elm_b != NULL) { 2134b7eaed25SJason Evans rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL, 2135b7eaed25SJason Evans NSIZES, false); 2136b7eaed25SJason Evans } else { 2137b7eaed25SJason Evans b_elm_b = b_elm_a; 2138b7eaed25SJason Evans } 2139b7eaed25SJason Evans 2140b7eaed25SJason Evans extent_size_set(a, extent_size_get(a) + extent_size_get(b)); 2141b7eaed25SJason Evans extent_szind_set(a, NSIZES); 2142b7eaed25SJason Evans extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ? 2143b7eaed25SJason Evans extent_sn_get(a) : extent_sn_get(b)); 2144b7eaed25SJason Evans extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b)); 2145b7eaed25SJason Evans 2146b7eaed25SJason Evans extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, NSIZES, false); 2147b7eaed25SJason Evans 2148b7eaed25SJason Evans extent_unlock2(tsdn, a, b); 2149b7eaed25SJason Evans 2150b7eaed25SJason Evans extent_dalloc(tsdn, extent_arena_get(b), b); 2151b7eaed25SJason Evans 2152b7eaed25SJason Evans return false; 2153b7eaed25SJason Evans } 2154b7eaed25SJason Evans 2155b7eaed25SJason Evans bool 2156b7eaed25SJason Evans extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena, 2157b7eaed25SJason Evans extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) { 2158b7eaed25SJason Evans return extent_merge_impl(tsdn, arena, r_extent_hooks, a, b, false); 2159b7eaed25SJason Evans } 2160b7eaed25SJason Evans 2161b7eaed25SJason Evans bool 2162b7eaed25SJason Evans extent_boot(void) { 2163b7eaed25SJason Evans if (rtree_new(&extents_rtree, true)) { 2164b7eaed25SJason Evans return true; 2165b7eaed25SJason Evans } 2166b7eaed25SJason Evans 2167b7eaed25SJason Evans if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool", 2168b7eaed25SJason Evans WITNESS_RANK_EXTENT_POOL)) { 2169b7eaed25SJason Evans return true; 2170b7eaed25SJason Evans } 2171b7eaed25SJason Evans 2172b7eaed25SJason Evans if (have_dss) { 2173b7eaed25SJason Evans extent_dss_boot(); 2174b7eaed25SJason Evans } 2175b7eaed25SJason Evans 2176b7eaed25SJason Evans return false; 2177b7eaed25SJason Evans } 2178