1 #define JEMALLOC_EXTENT_C_ 2 #include "jemalloc/internal/jemalloc_preamble.h" 3 #include "jemalloc/internal/jemalloc_internal_includes.h" 4 5 #include "jemalloc/internal/assert.h" 6 #include "jemalloc/internal/extent_dss.h" 7 #include "jemalloc/internal/extent_mmap.h" 8 #include "jemalloc/internal/ph.h" 9 #include "jemalloc/internal/rtree.h" 10 #include "jemalloc/internal/mutex.h" 11 #include "jemalloc/internal/mutex_pool.h" 12 13 /******************************************************************************/ 14 /* Data. */ 15 16 rtree_t extents_rtree; 17 /* Keyed by the address of the extent_t being protected. */ 18 mutex_pool_t extent_mutex_pool; 19 20 static const bitmap_info_t extents_bitmap_info = 21 BITMAP_INFO_INITIALIZER(NPSIZES+1); 22 23 static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, 24 size_t size, size_t alignment, bool *zero, bool *commit, 25 unsigned arena_ind); 26 static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, 27 size_t size, bool committed, unsigned arena_ind); 28 static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, 29 size_t size, bool committed, unsigned arena_ind); 30 static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr, 31 size_t size, size_t offset, size_t length, unsigned arena_ind); 32 static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena, 33 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, 34 size_t length, bool growing_retained); 35 static bool extent_decommit_default(extent_hooks_t *extent_hooks, 36 void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); 37 #ifdef PAGES_CAN_PURGE_LAZY 38 static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, 39 size_t size, size_t offset, size_t length, unsigned arena_ind); 40 #endif 41 static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena, 42 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, 43 size_t length, bool growing_retained); 44 #ifdef PAGES_CAN_PURGE_FORCED 45 static bool extent_purge_forced_default(extent_hooks_t *extent_hooks, 46 void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind); 47 #endif 48 static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena, 49 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, 50 size_t length, bool growing_retained); 51 #ifdef JEMALLOC_MAPS_COALESCE 52 static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr, 53 size_t size, size_t size_a, size_t size_b, bool committed, 54 unsigned arena_ind); 55 #endif 56 static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena, 57 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, 58 szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b, 59 bool growing_retained); 60 #ifdef JEMALLOC_MAPS_COALESCE 61 static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, 62 size_t size_a, void *addr_b, size_t size_b, bool committed, 63 unsigned arena_ind); 64 #endif 65 static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena, 66 extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b, 67 bool growing_retained); 68 69 const extent_hooks_t extent_hooks_default = { 70 extent_alloc_default, 71 extent_dalloc_default, 72 extent_destroy_default, 73 extent_commit_default, 74 extent_decommit_default 75 #ifdef PAGES_CAN_PURGE_LAZY 76 , 77 extent_purge_lazy_default 78 #else 79 , 80 NULL 81 #endif 82 #ifdef PAGES_CAN_PURGE_FORCED 83 , 84 extent_purge_forced_default 85 #else 86 , 87 NULL 88 #endif 89 #ifdef JEMALLOC_MAPS_COALESCE 90 , 91 extent_split_default, 92 extent_merge_default 93 #endif 94 }; 95 96 /* Used exclusively for gdump triggering. */ 97 static atomic_zu_t curpages; 98 static atomic_zu_t highpages; 99 100 /******************************************************************************/ 101 /* 102 * Function prototypes for static functions that are referenced prior to 103 * definition. 104 */ 105 106 static void extent_deregister(tsdn_t *tsdn, extent_t *extent); 107 static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena, 108 extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr, 109 size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind, 110 bool *zero, bool *commit, bool growing_retained); 111 static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, 112 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, 113 extent_t *extent, bool *coalesced, bool growing_retained); 114 static void extent_record(tsdn_t *tsdn, arena_t *arena, 115 extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent, 116 bool growing_retained); 117 118 /******************************************************************************/ 119 120 rb_gen(UNUSED, extent_avail_, extent_tree_t, extent_t, rb_link, 121 extent_esnead_comp) 122 123 typedef enum { 124 lock_result_success, 125 lock_result_failure, 126 lock_result_no_extent 127 } lock_result_t; 128 129 static lock_result_t 130 extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm, 131 extent_t **result) { 132 extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree, 133 elm, true); 134 135 if (extent1 == NULL) { 136 return lock_result_no_extent; 137 } 138 /* 139 * It's possible that the extent changed out from under us, and with it 140 * the leaf->extent mapping. We have to recheck while holding the lock. 141 */ 142 extent_lock(tsdn, extent1); 143 extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn, 144 &extents_rtree, elm, true); 145 146 if (extent1 == extent2) { 147 *result = extent1; 148 return lock_result_success; 149 } else { 150 extent_unlock(tsdn, extent1); 151 return lock_result_failure; 152 } 153 } 154 155 /* 156 * Returns a pool-locked extent_t * if there's one associated with the given 157 * address, and NULL otherwise. 158 */ 159 static extent_t * 160 extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr) { 161 extent_t *ret = NULL; 162 rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree, 163 rtree_ctx, (uintptr_t)addr, false, false); 164 if (elm == NULL) { 165 return NULL; 166 } 167 lock_result_t lock_result; 168 do { 169 lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret); 170 } while (lock_result == lock_result_failure); 171 return ret; 172 } 173 174 extent_t * 175 extent_alloc(tsdn_t *tsdn, arena_t *arena) { 176 malloc_mutex_lock(tsdn, &arena->extent_avail_mtx); 177 extent_t *extent = extent_avail_first(&arena->extent_avail); 178 if (extent == NULL) { 179 malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); 180 return base_alloc_extent(tsdn, arena->base); 181 } 182 extent_avail_remove(&arena->extent_avail, extent); 183 malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); 184 return extent; 185 } 186 187 void 188 extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { 189 malloc_mutex_lock(tsdn, &arena->extent_avail_mtx); 190 extent_avail_insert(&arena->extent_avail, extent); 191 malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx); 192 } 193 194 extent_hooks_t * 195 extent_hooks_get(arena_t *arena) { 196 return base_extent_hooks_get(arena->base); 197 } 198 199 extent_hooks_t * 200 extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) { 201 background_thread_info_t *info; 202 if (have_background_thread) { 203 info = arena_background_thread_info_get(arena); 204 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx); 205 } 206 extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks); 207 if (have_background_thread) { 208 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx); 209 } 210 211 return ret; 212 } 213 214 static void 215 extent_hooks_assure_initialized(arena_t *arena, 216 extent_hooks_t **r_extent_hooks) { 217 if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) { 218 *r_extent_hooks = extent_hooks_get(arena); 219 } 220 } 221 222 #ifndef JEMALLOC_JET 223 static 224 #endif 225 size_t 226 extent_size_quantize_floor(size_t size) { 227 size_t ret; 228 pszind_t pind; 229 230 assert(size > 0); 231 assert((size & PAGE_MASK) == 0); 232 233 pind = sz_psz2ind(size - sz_large_pad + 1); 234 if (pind == 0) { 235 /* 236 * Avoid underflow. This short-circuit would also do the right 237 * thing for all sizes in the range for which there are 238 * PAGE-spaced size classes, but it's simplest to just handle 239 * the one case that would cause erroneous results. 240 */ 241 return size; 242 } 243 ret = sz_pind2sz(pind - 1) + sz_large_pad; 244 assert(ret <= size); 245 return ret; 246 } 247 248 #ifndef JEMALLOC_JET 249 static 250 #endif 251 size_t 252 extent_size_quantize_ceil(size_t size) { 253 size_t ret; 254 255 assert(size > 0); 256 assert(size - sz_large_pad <= LARGE_MAXCLASS); 257 assert((size & PAGE_MASK) == 0); 258 259 ret = extent_size_quantize_floor(size); 260 if (ret < size) { 261 /* 262 * Skip a quantization that may have an adequately large extent, 263 * because under-sized extents may be mixed in. This only 264 * happens when an unusual size is requested, i.e. for aligned 265 * allocation, and is just one of several places where linear 266 * search would potentially find sufficiently aligned available 267 * memory somewhere lower. 268 */ 269 ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) + 270 sz_large_pad; 271 } 272 return ret; 273 } 274 275 /* Generate pairing heap functions. */ 276 ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp) 277 278 bool 279 extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state, 280 bool delay_coalesce) { 281 if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS, 282 malloc_mutex_rank_exclusive)) { 283 return true; 284 } 285 for (unsigned i = 0; i < NPSIZES+1; i++) { 286 extent_heap_new(&extents->heaps[i]); 287 } 288 bitmap_init(extents->bitmap, &extents_bitmap_info, true); 289 extent_list_init(&extents->lru); 290 atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED); 291 extents->state = state; 292 extents->delay_coalesce = delay_coalesce; 293 return false; 294 } 295 296 extent_state_t 297 extents_state_get(const extents_t *extents) { 298 return extents->state; 299 } 300 301 size_t 302 extents_npages_get(extents_t *extents) { 303 return atomic_load_zu(&extents->npages, ATOMIC_RELAXED); 304 } 305 306 static void 307 extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent, 308 bool preserve_lru) { 309 malloc_mutex_assert_owner(tsdn, &extents->mtx); 310 assert(extent_state_get(extent) == extents->state); 311 312 size_t size = extent_size_get(extent); 313 size_t psz = extent_size_quantize_floor(size); 314 pszind_t pind = sz_psz2ind(psz); 315 if (extent_heap_empty(&extents->heaps[pind])) { 316 bitmap_unset(extents->bitmap, &extents_bitmap_info, 317 (size_t)pind); 318 } 319 extent_heap_insert(&extents->heaps[pind], extent); 320 if (!preserve_lru) { 321 extent_list_append(&extents->lru, extent); 322 } 323 size_t npages = size >> LG_PAGE; 324 /* 325 * All modifications to npages hold the mutex (as asserted above), so we 326 * don't need an atomic fetch-add; we can get by with a load followed by 327 * a store. 328 */ 329 size_t cur_extents_npages = 330 atomic_load_zu(&extents->npages, ATOMIC_RELAXED); 331 atomic_store_zu(&extents->npages, cur_extents_npages + npages, 332 ATOMIC_RELAXED); 333 } 334 335 static void 336 extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent, 337 bool preserve_lru) { 338 malloc_mutex_assert_owner(tsdn, &extents->mtx); 339 assert(extent_state_get(extent) == extents->state); 340 341 size_t size = extent_size_get(extent); 342 size_t psz = extent_size_quantize_floor(size); 343 pszind_t pind = sz_psz2ind(psz); 344 extent_heap_remove(&extents->heaps[pind], extent); 345 if (extent_heap_empty(&extents->heaps[pind])) { 346 bitmap_set(extents->bitmap, &extents_bitmap_info, 347 (size_t)pind); 348 } 349 if (!preserve_lru) { 350 extent_list_remove(&extents->lru, extent); 351 } 352 size_t npages = size >> LG_PAGE; 353 /* 354 * As in extents_insert_locked, we hold extents->mtx and so don't need 355 * atomic operations for updating extents->npages. 356 */ 357 size_t cur_extents_npages = 358 atomic_load_zu(&extents->npages, ATOMIC_RELAXED); 359 assert(cur_extents_npages >= npages); 360 atomic_store_zu(&extents->npages, 361 cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED); 362 } 363 364 /* Do any-best-fit extent selection, i.e. select any extent that best fits. */ 365 static extent_t * 366 extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, 367 size_t size) { 368 pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size)); 369 pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, 370 (size_t)pind); 371 if (i < NPSIZES+1) { 372 assert(!extent_heap_empty(&extents->heaps[i])); 373 extent_t *extent = extent_heap_any(&extents->heaps[i]); 374 assert(extent_size_get(extent) >= size); 375 return extent; 376 } 377 378 return NULL; 379 } 380 381 /* 382 * Do first-fit extent selection, i.e. select the oldest/lowest extent that is 383 * large enough. 384 */ 385 static extent_t * 386 extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, 387 size_t size) { 388 extent_t *ret = NULL; 389 390 pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size)); 391 for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, 392 &extents_bitmap_info, (size_t)pind); i < NPSIZES+1; i = 393 (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info, 394 (size_t)i+1)) { 395 assert(!extent_heap_empty(&extents->heaps[i])); 396 extent_t *extent = extent_heap_first(&extents->heaps[i]); 397 assert(extent_size_get(extent) >= size); 398 if (ret == NULL || extent_snad_comp(extent, ret) < 0) { 399 ret = extent; 400 } 401 if (i == NPSIZES) { 402 break; 403 } 404 assert(i < NPSIZES); 405 } 406 407 return ret; 408 } 409 410 /* 411 * Do {best,first}-fit extent selection, where the selection policy choice is 412 * based on extents->delay_coalesce. Best-fit selection requires less 413 * searching, but its layout policy is less stable and may cause higher virtual 414 * memory fragmentation as a side effect. 415 */ 416 static extent_t * 417 extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, 418 size_t size) { 419 malloc_mutex_assert_owner(tsdn, &extents->mtx); 420 421 return extents->delay_coalesce ? extents_best_fit_locked(tsdn, arena, 422 extents, size) : extents_first_fit_locked(tsdn, arena, extents, 423 size); 424 } 425 426 static bool 427 extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena, 428 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, 429 extent_t *extent) { 430 extent_state_set(extent, extent_state_active); 431 bool coalesced; 432 extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx, 433 extents, extent, &coalesced, false); 434 extent_state_set(extent, extents_state_get(extents)); 435 436 if (!coalesced) { 437 return true; 438 } 439 extents_insert_locked(tsdn, extents, extent, true); 440 return false; 441 } 442 443 extent_t * 444 extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, 445 extents_t *extents, void *new_addr, size_t size, size_t pad, 446 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { 447 assert(size + pad != 0); 448 assert(alignment != 0); 449 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 450 WITNESS_RANK_CORE, 0); 451 452 return extent_recycle(tsdn, arena, r_extent_hooks, extents, new_addr, 453 size, pad, alignment, slab, szind, zero, commit, false); 454 } 455 456 void 457 extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, 458 extents_t *extents, extent_t *extent) { 459 assert(extent_base_get(extent) != NULL); 460 assert(extent_size_get(extent) != 0); 461 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 462 WITNESS_RANK_CORE, 0); 463 464 extent_addr_set(extent, extent_base_get(extent)); 465 extent_zeroed_set(extent, false); 466 467 extent_record(tsdn, arena, r_extent_hooks, extents, extent, false); 468 } 469 470 extent_t * 471 extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, 472 extents_t *extents, size_t npages_min) { 473 rtree_ctx_t rtree_ctx_fallback; 474 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 475 476 malloc_mutex_lock(tsdn, &extents->mtx); 477 478 /* 479 * Get the LRU coalesced extent, if any. If coalescing was delayed, 480 * the loop will iterate until the LRU extent is fully coalesced. 481 */ 482 extent_t *extent; 483 while (true) { 484 /* Get the LRU extent, if any. */ 485 extent = extent_list_first(&extents->lru); 486 if (extent == NULL) { 487 goto label_return; 488 } 489 /* Check the eviction limit. */ 490 size_t npages = extent_size_get(extent) >> LG_PAGE; 491 size_t extents_npages = atomic_load_zu(&extents->npages, 492 ATOMIC_RELAXED); 493 if (extents_npages - npages < npages_min) { 494 extent = NULL; 495 goto label_return; 496 } 497 extents_remove_locked(tsdn, extents, extent, false); 498 if (!extents->delay_coalesce) { 499 break; 500 } 501 /* Try to coalesce. */ 502 if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks, 503 rtree_ctx, extents, extent)) { 504 break; 505 } 506 /* 507 * The LRU extent was just coalesced and the result placed in 508 * the LRU at its neighbor's position. Start over. 509 */ 510 } 511 512 /* 513 * Either mark the extent active or deregister it to protect against 514 * concurrent operations. 515 */ 516 switch (extents_state_get(extents)) { 517 case extent_state_active: 518 not_reached(); 519 case extent_state_dirty: 520 case extent_state_muzzy: 521 extent_state_set(extent, extent_state_active); 522 break; 523 case extent_state_retained: 524 extent_deregister(tsdn, extent); 525 break; 526 default: 527 not_reached(); 528 } 529 530 label_return: 531 malloc_mutex_unlock(tsdn, &extents->mtx); 532 return extent; 533 } 534 535 static void 536 extents_leak(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, 537 extents_t *extents, extent_t *extent, bool growing_retained) { 538 /* 539 * Leak extent after making sure its pages have already been purged, so 540 * that this is only a virtual memory leak. 541 */ 542 if (extents_state_get(extents) == extent_state_dirty) { 543 if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, 544 extent, 0, extent_size_get(extent), growing_retained)) { 545 extent_purge_forced_impl(tsdn, arena, r_extent_hooks, 546 extent, 0, extent_size_get(extent), 547 growing_retained); 548 } 549 } 550 extent_dalloc(tsdn, arena, extent); 551 } 552 553 void 554 extents_prefork(tsdn_t *tsdn, extents_t *extents) { 555 malloc_mutex_prefork(tsdn, &extents->mtx); 556 } 557 558 void 559 extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) { 560 malloc_mutex_postfork_parent(tsdn, &extents->mtx); 561 } 562 563 void 564 extents_postfork_child(tsdn_t *tsdn, extents_t *extents) { 565 malloc_mutex_postfork_child(tsdn, &extents->mtx); 566 } 567 568 static void 569 extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, 570 extent_t *extent, bool preserve_lru) { 571 assert(extent_arena_get(extent) == arena); 572 assert(extent_state_get(extent) == extent_state_active); 573 574 extent_state_set(extent, extents_state_get(extents)); 575 extents_insert_locked(tsdn, extents, extent, preserve_lru); 576 } 577 578 static void 579 extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents, 580 extent_t *extent, bool preserve_lru) { 581 malloc_mutex_lock(tsdn, &extents->mtx); 582 extent_deactivate_locked(tsdn, arena, extents, extent, preserve_lru); 583 malloc_mutex_unlock(tsdn, &extents->mtx); 584 } 585 586 static void 587 extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents, 588 extent_t *extent, bool preserve_lru) { 589 assert(extent_arena_get(extent) == arena); 590 assert(extent_state_get(extent) == extents_state_get(extents)); 591 592 extents_remove_locked(tsdn, extents, extent, preserve_lru); 593 extent_state_set(extent, extent_state_active); 594 } 595 596 static bool 597 extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, 598 const extent_t *extent, bool dependent, bool init_missing, 599 rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) { 600 *r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx, 601 (uintptr_t)extent_base_get(extent), dependent, init_missing); 602 if (!dependent && *r_elm_a == NULL) { 603 return true; 604 } 605 assert(*r_elm_a != NULL); 606 607 *r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx, 608 (uintptr_t)extent_last_get(extent), dependent, init_missing); 609 if (!dependent && *r_elm_b == NULL) { 610 return true; 611 } 612 assert(*r_elm_b != NULL); 613 614 return false; 615 } 616 617 static void 618 extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a, 619 rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) { 620 rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab); 621 if (elm_b != NULL) { 622 rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind, 623 slab); 624 } 625 } 626 627 static void 628 extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent, 629 szind_t szind) { 630 assert(extent_slab_get(extent)); 631 632 /* Register interior. */ 633 for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) { 634 rtree_write(tsdn, &extents_rtree, rtree_ctx, 635 (uintptr_t)extent_base_get(extent) + (uintptr_t)(i << 636 LG_PAGE), extent, szind, true); 637 } 638 } 639 640 static void 641 extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) { 642 cassert(config_prof); 643 /* prof_gdump() requirement. */ 644 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 645 WITNESS_RANK_CORE, 0); 646 647 if (opt_prof && extent_state_get(extent) == extent_state_active) { 648 size_t nadd = extent_size_get(extent) >> LG_PAGE; 649 size_t cur = atomic_fetch_add_zu(&curpages, nadd, 650 ATOMIC_RELAXED) + nadd; 651 size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED); 652 while (cur > high && !atomic_compare_exchange_weak_zu( 653 &highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) { 654 /* 655 * Don't refresh cur, because it may have decreased 656 * since this thread lost the highpages update race. 657 * Note that high is updated in case of CAS failure. 658 */ 659 } 660 if (cur > high && prof_gdump_get_unlocked()) { 661 prof_gdump(tsdn); 662 } 663 } 664 } 665 666 static void 667 extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) { 668 cassert(config_prof); 669 670 if (opt_prof && extent_state_get(extent) == extent_state_active) { 671 size_t nsub = extent_size_get(extent) >> LG_PAGE; 672 assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub); 673 atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED); 674 } 675 } 676 677 static bool 678 extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) { 679 rtree_ctx_t rtree_ctx_fallback; 680 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 681 rtree_leaf_elm_t *elm_a, *elm_b; 682 683 /* 684 * We need to hold the lock to protect against a concurrent coalesce 685 * operation that sees us in a partial state. 686 */ 687 extent_lock(tsdn, extent); 688 689 if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true, 690 &elm_a, &elm_b)) { 691 return true; 692 } 693 694 szind_t szind = extent_szind_get_maybe_invalid(extent); 695 bool slab = extent_slab_get(extent); 696 extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab); 697 if (slab) { 698 extent_interior_register(tsdn, rtree_ctx, extent, szind); 699 } 700 701 extent_unlock(tsdn, extent); 702 703 if (config_prof && gdump_add) { 704 extent_gdump_add(tsdn, extent); 705 } 706 707 return false; 708 } 709 710 static bool 711 extent_register(tsdn_t *tsdn, extent_t *extent) { 712 return extent_register_impl(tsdn, extent, true); 713 } 714 715 static bool 716 extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) { 717 return extent_register_impl(tsdn, extent, false); 718 } 719 720 static void 721 extent_reregister(tsdn_t *tsdn, extent_t *extent) { 722 bool err = extent_register(tsdn, extent); 723 assert(!err); 724 } 725 726 static void 727 extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, 728 extent_t *extent) { 729 size_t i; 730 731 assert(extent_slab_get(extent)); 732 733 for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) { 734 rtree_clear(tsdn, &extents_rtree, rtree_ctx, 735 (uintptr_t)extent_base_get(extent) + (uintptr_t)(i << 736 LG_PAGE)); 737 } 738 } 739 740 static void 741 extent_deregister(tsdn_t *tsdn, extent_t *extent) { 742 rtree_ctx_t rtree_ctx_fallback; 743 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 744 rtree_leaf_elm_t *elm_a, *elm_b; 745 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false, 746 &elm_a, &elm_b); 747 748 extent_lock(tsdn, extent); 749 750 extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, NSIZES, false); 751 if (extent_slab_get(extent)) { 752 extent_interior_deregister(tsdn, rtree_ctx, extent); 753 extent_slab_set(extent, false); 754 } 755 756 extent_unlock(tsdn, extent); 757 758 if (config_prof) { 759 extent_gdump_sub(tsdn, extent); 760 } 761 } 762 763 static extent_t * 764 extent_recycle_extract(tsdn_t *tsdn, arena_t *arena, 765 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, 766 void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, 767 bool *zero, bool *commit, bool growing_retained) { 768 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 769 WITNESS_RANK_CORE, growing_retained ? 1 : 0); 770 assert(alignment > 0); 771 if (config_debug && new_addr != NULL) { 772 /* 773 * Non-NULL new_addr has two use cases: 774 * 775 * 1) Recycle a known-extant extent, e.g. during purging. 776 * 2) Perform in-place expanding reallocation. 777 * 778 * Regardless of use case, new_addr must either refer to a 779 * non-existing extent, or to the base of an extant extent, 780 * since only active slabs support interior lookups (which of 781 * course cannot be recycled). 782 */ 783 assert(PAGE_ADDR2BASE(new_addr) == new_addr); 784 assert(pad == 0); 785 assert(alignment <= PAGE); 786 } 787 788 size_t esize = size + pad; 789 size_t alloc_size = esize + PAGE_CEILING(alignment) - PAGE; 790 /* Beware size_t wrap-around. */ 791 if (alloc_size < esize) { 792 return NULL; 793 } 794 malloc_mutex_lock(tsdn, &extents->mtx); 795 extent_hooks_assure_initialized(arena, r_extent_hooks); 796 extent_t *extent; 797 if (new_addr != NULL) { 798 extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr); 799 if (extent != NULL) { 800 /* 801 * We might null-out extent to report an error, but we 802 * still need to unlock the associated mutex after. 803 */ 804 extent_t *unlock_extent = extent; 805 assert(extent_base_get(extent) == new_addr); 806 if (extent_arena_get(extent) != arena || 807 extent_size_get(extent) < esize || 808 extent_state_get(extent) != 809 extents_state_get(extents)) { 810 extent = NULL; 811 } 812 extent_unlock(tsdn, unlock_extent); 813 } 814 } else { 815 extent = extents_fit_locked(tsdn, arena, extents, alloc_size); 816 } 817 if (extent == NULL) { 818 malloc_mutex_unlock(tsdn, &extents->mtx); 819 return NULL; 820 } 821 822 extent_activate_locked(tsdn, arena, extents, extent, false); 823 malloc_mutex_unlock(tsdn, &extents->mtx); 824 825 if (extent_zeroed_get(extent)) { 826 *zero = true; 827 } 828 if (extent_committed_get(extent)) { 829 *commit = true; 830 } 831 832 return extent; 833 } 834 835 static extent_t * 836 extent_recycle_split(tsdn_t *tsdn, arena_t *arena, 837 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, 838 void *new_addr, size_t size, size_t pad, size_t alignment, bool slab, 839 szind_t szind, extent_t *extent, bool growing_retained) { 840 size_t esize = size + pad; 841 size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(extent), 842 PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(extent); 843 assert(new_addr == NULL || leadsize == 0); 844 assert(extent_size_get(extent) >= leadsize + esize); 845 size_t trailsize = extent_size_get(extent) - leadsize - esize; 846 847 /* Split the lead. */ 848 if (leadsize != 0) { 849 extent_t *lead = extent; 850 extent = extent_split_impl(tsdn, arena, r_extent_hooks, 851 lead, leadsize, NSIZES, false, esize + trailsize, szind, 852 slab, growing_retained); 853 if (extent == NULL) { 854 extent_deregister(tsdn, lead); 855 extents_leak(tsdn, arena, r_extent_hooks, extents, 856 lead, growing_retained); 857 return NULL; 858 } 859 extent_deactivate(tsdn, arena, extents, lead, false); 860 } 861 862 /* Split the trail. */ 863 if (trailsize != 0) { 864 extent_t *trail = extent_split_impl(tsdn, arena, 865 r_extent_hooks, extent, esize, szind, slab, trailsize, 866 NSIZES, false, growing_retained); 867 if (trail == NULL) { 868 extent_deregister(tsdn, extent); 869 extents_leak(tsdn, arena, r_extent_hooks, extents, 870 extent, growing_retained); 871 return NULL; 872 } 873 extent_deactivate(tsdn, arena, extents, trail, false); 874 } else if (leadsize == 0) { 875 /* 876 * Splitting causes szind to be set as a side effect, but no 877 * splitting occurred. 878 */ 879 extent_szind_set(extent, szind); 880 if (szind != NSIZES) { 881 rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, 882 (uintptr_t)extent_addr_get(extent), szind, slab); 883 if (slab && extent_size_get(extent) > PAGE) { 884 rtree_szind_slab_update(tsdn, &extents_rtree, 885 rtree_ctx, 886 (uintptr_t)extent_past_get(extent) - 887 (uintptr_t)PAGE, szind, slab); 888 } 889 } 890 } 891 892 return extent; 893 } 894 895 static extent_t * 896 extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, 897 extents_t *extents, void *new_addr, size_t size, size_t pad, 898 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit, 899 bool growing_retained) { 900 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 901 WITNESS_RANK_CORE, growing_retained ? 1 : 0); 902 assert(new_addr == NULL || !slab); 903 assert(pad == 0 || !slab); 904 assert(!*zero || !slab); 905 906 rtree_ctx_t rtree_ctx_fallback; 907 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 908 909 bool committed = false; 910 extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks, 911 rtree_ctx, extents, new_addr, size, pad, alignment, slab, zero, 912 &committed, growing_retained); 913 if (extent == NULL) { 914 return NULL; 915 } 916 if (committed) { 917 *commit = true; 918 } 919 920 extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx, 921 extents, new_addr, size, pad, alignment, slab, szind, extent, 922 growing_retained); 923 if (extent == NULL) { 924 return NULL; 925 } 926 927 if (*commit && !extent_committed_get(extent)) { 928 if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 929 0, extent_size_get(extent), growing_retained)) { 930 extent_record(tsdn, arena, r_extent_hooks, extents, 931 extent, growing_retained); 932 return NULL; 933 } 934 extent_zeroed_set(extent, true); 935 } 936 937 if (pad != 0) { 938 extent_addr_randomize(tsdn, extent, alignment); 939 } 940 assert(extent_state_get(extent) == extent_state_active); 941 if (slab) { 942 extent_slab_set(extent, slab); 943 extent_interior_register(tsdn, rtree_ctx, extent, szind); 944 } 945 946 if (*zero) { 947 void *addr = extent_base_get(extent); 948 size_t size = extent_size_get(extent); 949 if (!extent_zeroed_get(extent)) { 950 if (pages_purge_forced(addr, size)) { 951 memset(addr, 0, size); 952 } 953 } else if (config_debug) { 954 size_t *p = (size_t *)(uintptr_t)addr; 955 for (size_t i = 0; i < size / sizeof(size_t); i++) { 956 assert(p[i] == 0); 957 } 958 } 959 } 960 return extent; 961 } 962 963 /* 964 * If the caller specifies (!*zero), it is still possible to receive zeroed 965 * memory, in which case *zero is toggled to true. arena_extent_alloc() takes 966 * advantage of this to avoid demanding zeroed extents, but taking advantage of 967 * them if they are returned. 968 */ 969 static void * 970 extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size, 971 size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) { 972 void *ret; 973 974 assert(size != 0); 975 assert(alignment != 0); 976 977 /* "primary" dss. */ 978 if (have_dss && dss_prec == dss_prec_primary && (ret = 979 extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, 980 commit)) != NULL) { 981 return ret; 982 } 983 /* mmap. */ 984 if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit)) 985 != NULL) { 986 return ret; 987 } 988 /* "secondary" dss. */ 989 if (have_dss && dss_prec == dss_prec_secondary && (ret = 990 extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero, 991 commit)) != NULL) { 992 return ret; 993 } 994 995 /* All strategies for allocation failed. */ 996 return NULL; 997 } 998 999 static void * 1000 extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr, 1001 size_t size, size_t alignment, bool *zero, bool *commit) { 1002 void *ret; 1003 1004 ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero, 1005 commit, (dss_prec_t)atomic_load_u(&arena->dss_prec, 1006 ATOMIC_RELAXED)); 1007 return ret; 1008 } 1009 1010 static void * 1011 extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size, 1012 size_t alignment, bool *zero, bool *commit, unsigned arena_ind) { 1013 tsdn_t *tsdn; 1014 arena_t *arena; 1015 1016 tsdn = tsdn_fetch(); 1017 arena = arena_get(tsdn, arena_ind, false); 1018 /* 1019 * The arena we're allocating on behalf of must have been initialized 1020 * already. 1021 */ 1022 assert(arena != NULL); 1023 1024 return extent_alloc_default_impl(tsdn, arena, new_addr, size, 1025 alignment, zero, commit); 1026 } 1027 1028 static void 1029 extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) { 1030 tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); 1031 pre_reentrancy(tsd, arena); 1032 } 1033 1034 static void 1035 extent_hook_post_reentrancy(tsdn_t *tsdn) { 1036 tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn); 1037 post_reentrancy(tsd); 1038 } 1039 1040 /* 1041 * If virtual memory is retained, create increasingly larger extents from which 1042 * to split requested extents in order to limit the total number of disjoint 1043 * virtual memory ranges retained by each arena. 1044 */ 1045 static extent_t * 1046 extent_grow_retained(tsdn_t *tsdn, arena_t *arena, 1047 extent_hooks_t **r_extent_hooks, size_t size, size_t pad, size_t alignment, 1048 bool slab, szind_t szind, bool *zero, bool *commit) { 1049 malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx); 1050 assert(pad == 0 || !slab); 1051 assert(!*zero || !slab); 1052 1053 size_t esize = size + pad; 1054 size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE; 1055 /* Beware size_t wrap-around. */ 1056 if (alloc_size_min < esize) { 1057 goto label_err; 1058 } 1059 /* 1060 * Find the next extent size in the series that would be large enough to 1061 * satisfy this request. 1062 */ 1063 pszind_t egn_skip = 0; 1064 size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip); 1065 while (alloc_size < alloc_size_min) { 1066 egn_skip++; 1067 if (arena->extent_grow_next + egn_skip == NPSIZES) { 1068 /* Outside legal range. */ 1069 goto label_err; 1070 } 1071 assert(arena->extent_grow_next + egn_skip < NPSIZES); 1072 alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip); 1073 } 1074 1075 extent_t *extent = extent_alloc(tsdn, arena); 1076 if (extent == NULL) { 1077 goto label_err; 1078 } 1079 bool zeroed = false; 1080 bool committed = false; 1081 1082 void *ptr; 1083 if (*r_extent_hooks == &extent_hooks_default) { 1084 ptr = extent_alloc_core(tsdn, arena, NULL, alloc_size, PAGE, 1085 &zeroed, &committed, (dss_prec_t)atomic_load_u( 1086 &arena->dss_prec, ATOMIC_RELAXED)); 1087 } else { 1088 extent_hook_pre_reentrancy(tsdn, arena); 1089 ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL, 1090 alloc_size, PAGE, &zeroed, &committed, 1091 arena_ind_get(arena)); 1092 extent_hook_post_reentrancy(tsdn); 1093 } 1094 1095 extent_init(extent, arena, ptr, alloc_size, false, NSIZES, 1096 arena_extent_sn_next(arena), extent_state_active, zeroed, 1097 committed); 1098 if (ptr == NULL) { 1099 extent_dalloc(tsdn, arena, extent); 1100 goto label_err; 1101 } 1102 if (extent_register_no_gdump_add(tsdn, extent)) { 1103 extents_leak(tsdn, arena, r_extent_hooks, 1104 &arena->extents_retained, extent, true); 1105 goto label_err; 1106 } 1107 1108 size_t leadsize = ALIGNMENT_CEILING((uintptr_t)ptr, 1109 PAGE_CEILING(alignment)) - (uintptr_t)ptr; 1110 assert(alloc_size >= leadsize + esize); 1111 size_t trailsize = alloc_size - leadsize - esize; 1112 if (extent_zeroed_get(extent) && extent_committed_get(extent)) { 1113 *zero = true; 1114 } 1115 if (extent_committed_get(extent)) { 1116 *commit = true; 1117 } 1118 1119 /* Split the lead. */ 1120 if (leadsize != 0) { 1121 extent_t *lead = extent; 1122 extent = extent_split_impl(tsdn, arena, r_extent_hooks, lead, 1123 leadsize, NSIZES, false, esize + trailsize, szind, slab, 1124 true); 1125 if (extent == NULL) { 1126 extent_deregister(tsdn, lead); 1127 extents_leak(tsdn, arena, r_extent_hooks, 1128 &arena->extents_retained, lead, true); 1129 goto label_err; 1130 } 1131 extent_record(tsdn, arena, r_extent_hooks, 1132 &arena->extents_retained, lead, true); 1133 } 1134 1135 /* Split the trail. */ 1136 if (trailsize != 0) { 1137 extent_t *trail = extent_split_impl(tsdn, arena, r_extent_hooks, 1138 extent, esize, szind, slab, trailsize, NSIZES, false, true); 1139 if (trail == NULL) { 1140 extent_deregister(tsdn, extent); 1141 extents_leak(tsdn, arena, r_extent_hooks, 1142 &arena->extents_retained, extent, true); 1143 goto label_err; 1144 } 1145 extent_record(tsdn, arena, r_extent_hooks, 1146 &arena->extents_retained, trail, true); 1147 } else if (leadsize == 0) { 1148 /* 1149 * Splitting causes szind to be set as a side effect, but no 1150 * splitting occurred. 1151 */ 1152 rtree_ctx_t rtree_ctx_fallback; 1153 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, 1154 &rtree_ctx_fallback); 1155 1156 extent_szind_set(extent, szind); 1157 if (szind != NSIZES) { 1158 rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, 1159 (uintptr_t)extent_addr_get(extent), szind, slab); 1160 if (slab && extent_size_get(extent) > PAGE) { 1161 rtree_szind_slab_update(tsdn, &extents_rtree, 1162 rtree_ctx, 1163 (uintptr_t)extent_past_get(extent) - 1164 (uintptr_t)PAGE, szind, slab); 1165 } 1166 } 1167 } 1168 1169 if (*commit && !extent_committed_get(extent)) { 1170 if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0, 1171 extent_size_get(extent), true)) { 1172 extent_record(tsdn, arena, r_extent_hooks, 1173 &arena->extents_retained, extent, true); 1174 goto label_err; 1175 } 1176 extent_zeroed_set(extent, true); 1177 } 1178 1179 /* 1180 * Increment extent_grow_next if doing so wouldn't exceed the legal 1181 * range. 1182 */ 1183 if (arena->extent_grow_next + egn_skip + 1 < NPSIZES) { 1184 arena->extent_grow_next += egn_skip + 1; 1185 } else { 1186 arena->extent_grow_next = NPSIZES - 1; 1187 } 1188 /* All opportunities for failure are past. */ 1189 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); 1190 1191 if (config_prof) { 1192 /* Adjust gdump stats now that extent is final size. */ 1193 extent_gdump_add(tsdn, extent); 1194 } 1195 if (pad != 0) { 1196 extent_addr_randomize(tsdn, extent, alignment); 1197 } 1198 if (slab) { 1199 rtree_ctx_t rtree_ctx_fallback; 1200 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, 1201 &rtree_ctx_fallback); 1202 1203 extent_slab_set(extent, true); 1204 extent_interior_register(tsdn, rtree_ctx, extent, szind); 1205 } 1206 if (*zero && !extent_zeroed_get(extent)) { 1207 void *addr = extent_base_get(extent); 1208 size_t size = extent_size_get(extent); 1209 if (pages_purge_forced(addr, size)) { 1210 memset(addr, 0, size); 1211 } 1212 } 1213 1214 return extent; 1215 label_err: 1216 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); 1217 return NULL; 1218 } 1219 1220 static extent_t * 1221 extent_alloc_retained(tsdn_t *tsdn, arena_t *arena, 1222 extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, 1223 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { 1224 assert(size != 0); 1225 assert(alignment != 0); 1226 1227 malloc_mutex_lock(tsdn, &arena->extent_grow_mtx); 1228 1229 extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, 1230 &arena->extents_retained, new_addr, size, pad, alignment, slab, 1231 szind, zero, commit, true); 1232 if (extent != NULL) { 1233 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); 1234 if (config_prof) { 1235 extent_gdump_add(tsdn, extent); 1236 } 1237 } else if (opt_retain && new_addr == NULL) { 1238 extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size, 1239 pad, alignment, slab, szind, zero, commit); 1240 /* extent_grow_retained() always releases extent_grow_mtx. */ 1241 } else { 1242 malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx); 1243 } 1244 malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx); 1245 1246 return extent; 1247 } 1248 1249 static extent_t * 1250 extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena, 1251 extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, 1252 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { 1253 size_t esize = size + pad; 1254 extent_t *extent = extent_alloc(tsdn, arena); 1255 if (extent == NULL) { 1256 return NULL; 1257 } 1258 void *addr; 1259 if (*r_extent_hooks == &extent_hooks_default) { 1260 /* Call directly to propagate tsdn. */ 1261 addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize, 1262 alignment, zero, commit); 1263 } else { 1264 extent_hook_pre_reentrancy(tsdn, arena); 1265 addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr, 1266 esize, alignment, zero, commit, arena_ind_get(arena)); 1267 extent_hook_post_reentrancy(tsdn); 1268 } 1269 if (addr == NULL) { 1270 extent_dalloc(tsdn, arena, extent); 1271 return NULL; 1272 } 1273 extent_init(extent, arena, addr, esize, slab, szind, 1274 arena_extent_sn_next(arena), extent_state_active, zero, commit); 1275 if (pad != 0) { 1276 extent_addr_randomize(tsdn, extent, alignment); 1277 } 1278 if (extent_register(tsdn, extent)) { 1279 extents_leak(tsdn, arena, r_extent_hooks, 1280 &arena->extents_retained, extent, false); 1281 return NULL; 1282 } 1283 1284 return extent; 1285 } 1286 1287 extent_t * 1288 extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena, 1289 extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad, 1290 size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) { 1291 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1292 WITNESS_RANK_CORE, 0); 1293 1294 extent_hooks_assure_initialized(arena, r_extent_hooks); 1295 1296 extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks, 1297 new_addr, size, pad, alignment, slab, szind, zero, commit); 1298 if (extent == NULL) { 1299 extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks, 1300 new_addr, size, pad, alignment, slab, szind, zero, commit); 1301 } 1302 1303 return extent; 1304 } 1305 1306 static bool 1307 extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner, 1308 const extent_t *outer) { 1309 assert(extent_arena_get(inner) == arena); 1310 if (extent_arena_get(outer) != arena) { 1311 return false; 1312 } 1313 1314 assert(extent_state_get(inner) == extent_state_active); 1315 if (extent_state_get(outer) != extents->state) { 1316 return false; 1317 } 1318 1319 if (extent_committed_get(inner) != extent_committed_get(outer)) { 1320 return false; 1321 } 1322 1323 return true; 1324 } 1325 1326 static bool 1327 extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, 1328 extents_t *extents, extent_t *inner, extent_t *outer, bool forward, 1329 bool growing_retained) { 1330 assert(extent_can_coalesce(arena, extents, inner, outer)); 1331 1332 if (forward && extents->delay_coalesce) { 1333 /* 1334 * The extent that remains after coalescing must occupy the 1335 * outer extent's position in the LRU. For forward coalescing, 1336 * swap the inner extent into the LRU. 1337 */ 1338 extent_list_replace(&extents->lru, outer, inner); 1339 } 1340 extent_activate_locked(tsdn, arena, extents, outer, 1341 extents->delay_coalesce); 1342 1343 malloc_mutex_unlock(tsdn, &extents->mtx); 1344 bool err = extent_merge_impl(tsdn, arena, r_extent_hooks, 1345 forward ? inner : outer, forward ? outer : inner, growing_retained); 1346 malloc_mutex_lock(tsdn, &extents->mtx); 1347 1348 if (err) { 1349 if (forward && extents->delay_coalesce) { 1350 extent_list_replace(&extents->lru, inner, outer); 1351 } 1352 extent_deactivate_locked(tsdn, arena, extents, outer, 1353 extents->delay_coalesce); 1354 } 1355 1356 return err; 1357 } 1358 1359 static extent_t * 1360 extent_try_coalesce(tsdn_t *tsdn, arena_t *arena, 1361 extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents, 1362 extent_t *extent, bool *coalesced, bool growing_retained) { 1363 /* 1364 * Continue attempting to coalesce until failure, to protect against 1365 * races with other threads that are thwarted by this one. 1366 */ 1367 bool again; 1368 do { 1369 again = false; 1370 1371 /* Try to coalesce forward. */ 1372 extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx, 1373 extent_past_get(extent)); 1374 if (next != NULL) { 1375 /* 1376 * extents->mtx only protects against races for 1377 * like-state extents, so call extent_can_coalesce() 1378 * before releasing next's pool lock. 1379 */ 1380 bool can_coalesce = extent_can_coalesce(arena, extents, 1381 extent, next); 1382 1383 extent_unlock(tsdn, next); 1384 1385 if (can_coalesce && !extent_coalesce(tsdn, arena, 1386 r_extent_hooks, extents, extent, next, true, 1387 growing_retained)) { 1388 if (extents->delay_coalesce) { 1389 /* Do minimal coalescing. */ 1390 *coalesced = true; 1391 return extent; 1392 } 1393 again = true; 1394 } 1395 } 1396 1397 /* Try to coalesce backward. */ 1398 extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx, 1399 extent_before_get(extent)); 1400 if (prev != NULL) { 1401 bool can_coalesce = extent_can_coalesce(arena, extents, 1402 extent, prev); 1403 extent_unlock(tsdn, prev); 1404 1405 if (can_coalesce && !extent_coalesce(tsdn, arena, 1406 r_extent_hooks, extents, extent, prev, false, 1407 growing_retained)) { 1408 extent = prev; 1409 if (extents->delay_coalesce) { 1410 /* Do minimal coalescing. */ 1411 *coalesced = true; 1412 return extent; 1413 } 1414 again = true; 1415 } 1416 } 1417 } while (again); 1418 1419 if (extents->delay_coalesce) { 1420 *coalesced = false; 1421 } 1422 return extent; 1423 } 1424 1425 static void 1426 extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks, 1427 extents_t *extents, extent_t *extent, bool growing_retained) { 1428 rtree_ctx_t rtree_ctx_fallback; 1429 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 1430 1431 assert((extents_state_get(extents) != extent_state_dirty && 1432 extents_state_get(extents) != extent_state_muzzy) || 1433 !extent_zeroed_get(extent)); 1434 1435 malloc_mutex_lock(tsdn, &extents->mtx); 1436 extent_hooks_assure_initialized(arena, r_extent_hooks); 1437 1438 extent_szind_set(extent, NSIZES); 1439 if (extent_slab_get(extent)) { 1440 extent_interior_deregister(tsdn, rtree_ctx, extent); 1441 extent_slab_set(extent, false); 1442 } 1443 1444 assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, 1445 (uintptr_t)extent_base_get(extent), true) == extent); 1446 1447 if (!extents->delay_coalesce) { 1448 extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, 1449 rtree_ctx, extents, extent, NULL, growing_retained); 1450 } 1451 1452 extent_deactivate_locked(tsdn, arena, extents, extent, false); 1453 1454 malloc_mutex_unlock(tsdn, &extents->mtx); 1455 } 1456 1457 void 1458 extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { 1459 extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; 1460 1461 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1462 WITNESS_RANK_CORE, 0); 1463 1464 if (extent_register(tsdn, extent)) { 1465 extents_leak(tsdn, arena, &extent_hooks, 1466 &arena->extents_retained, extent, false); 1467 return; 1468 } 1469 extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent); 1470 } 1471 1472 static bool 1473 extent_dalloc_default_impl(void *addr, size_t size) { 1474 if (!have_dss || !extent_in_dss(addr)) { 1475 return extent_dalloc_mmap(addr, size); 1476 } 1477 return true; 1478 } 1479 1480 static bool 1481 extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size, 1482 bool committed, unsigned arena_ind) { 1483 return extent_dalloc_default_impl(addr, size); 1484 } 1485 1486 static bool 1487 extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena, 1488 extent_hooks_t **r_extent_hooks, extent_t *extent) { 1489 bool err; 1490 1491 assert(extent_base_get(extent) != NULL); 1492 assert(extent_size_get(extent) != 0); 1493 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1494 WITNESS_RANK_CORE, 0); 1495 1496 extent_addr_set(extent, extent_base_get(extent)); 1497 1498 extent_hooks_assure_initialized(arena, r_extent_hooks); 1499 /* Try to deallocate. */ 1500 if (*r_extent_hooks == &extent_hooks_default) { 1501 /* Call directly to propagate tsdn. */ 1502 err = extent_dalloc_default_impl(extent_base_get(extent), 1503 extent_size_get(extent)); 1504 } else { 1505 extent_hook_pre_reentrancy(tsdn, arena); 1506 err = ((*r_extent_hooks)->dalloc == NULL || 1507 (*r_extent_hooks)->dalloc(*r_extent_hooks, 1508 extent_base_get(extent), extent_size_get(extent), 1509 extent_committed_get(extent), arena_ind_get(arena))); 1510 extent_hook_post_reentrancy(tsdn); 1511 } 1512 1513 if (!err) { 1514 extent_dalloc(tsdn, arena, extent); 1515 } 1516 1517 return err; 1518 } 1519 1520 void 1521 extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena, 1522 extent_hooks_t **r_extent_hooks, extent_t *extent) { 1523 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1524 WITNESS_RANK_CORE, 0); 1525 1526 /* 1527 * Deregister first to avoid a race with other allocating threads, and 1528 * reregister if deallocation fails. 1529 */ 1530 extent_deregister(tsdn, extent); 1531 if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks, extent)) { 1532 return; 1533 } 1534 1535 extent_reregister(tsdn, extent); 1536 if (*r_extent_hooks != &extent_hooks_default) { 1537 extent_hook_pre_reentrancy(tsdn, arena); 1538 } 1539 /* Try to decommit; purge if that fails. */ 1540 bool zeroed; 1541 if (!extent_committed_get(extent)) { 1542 zeroed = true; 1543 } else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent, 1544 0, extent_size_get(extent))) { 1545 zeroed = true; 1546 } else if ((*r_extent_hooks)->purge_forced != NULL && 1547 !(*r_extent_hooks)->purge_forced(*r_extent_hooks, 1548 extent_base_get(extent), extent_size_get(extent), 0, 1549 extent_size_get(extent), arena_ind_get(arena))) { 1550 zeroed = true; 1551 } else if (extent_state_get(extent) == extent_state_muzzy || 1552 ((*r_extent_hooks)->purge_lazy != NULL && 1553 !(*r_extent_hooks)->purge_lazy(*r_extent_hooks, 1554 extent_base_get(extent), extent_size_get(extent), 0, 1555 extent_size_get(extent), arena_ind_get(arena)))) { 1556 zeroed = false; 1557 } else { 1558 zeroed = false; 1559 } 1560 if (*r_extent_hooks != &extent_hooks_default) { 1561 extent_hook_post_reentrancy(tsdn); 1562 } 1563 extent_zeroed_set(extent, zeroed); 1564 1565 if (config_prof) { 1566 extent_gdump_sub(tsdn, extent); 1567 } 1568 1569 extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained, 1570 extent, false); 1571 } 1572 1573 static void 1574 extent_destroy_default_impl(void *addr, size_t size) { 1575 if (!have_dss || !extent_in_dss(addr)) { 1576 pages_unmap(addr, size); 1577 } 1578 } 1579 1580 static void 1581 extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size, 1582 bool committed, unsigned arena_ind) { 1583 extent_destroy_default_impl(addr, size); 1584 } 1585 1586 void 1587 extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena, 1588 extent_hooks_t **r_extent_hooks, extent_t *extent) { 1589 assert(extent_base_get(extent) != NULL); 1590 assert(extent_size_get(extent) != 0); 1591 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1592 WITNESS_RANK_CORE, 0); 1593 1594 /* Deregister first to avoid a race with other allocating threads. */ 1595 extent_deregister(tsdn, extent); 1596 1597 extent_addr_set(extent, extent_base_get(extent)); 1598 1599 extent_hooks_assure_initialized(arena, r_extent_hooks); 1600 /* Try to destroy; silently fail otherwise. */ 1601 if (*r_extent_hooks == &extent_hooks_default) { 1602 /* Call directly to propagate tsdn. */ 1603 extent_destroy_default_impl(extent_base_get(extent), 1604 extent_size_get(extent)); 1605 } else if ((*r_extent_hooks)->destroy != NULL) { 1606 extent_hook_pre_reentrancy(tsdn, arena); 1607 (*r_extent_hooks)->destroy(*r_extent_hooks, 1608 extent_base_get(extent), extent_size_get(extent), 1609 extent_committed_get(extent), arena_ind_get(arena)); 1610 extent_hook_post_reentrancy(tsdn); 1611 } 1612 1613 extent_dalloc(tsdn, arena, extent); 1614 } 1615 1616 static bool 1617 extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size, 1618 size_t offset, size_t length, unsigned arena_ind) { 1619 return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset), 1620 length); 1621 } 1622 1623 static bool 1624 extent_commit_impl(tsdn_t *tsdn, arena_t *arena, 1625 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, 1626 size_t length, bool growing_retained) { 1627 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1628 WITNESS_RANK_CORE, growing_retained ? 1 : 0); 1629 1630 extent_hooks_assure_initialized(arena, r_extent_hooks); 1631 if (*r_extent_hooks != &extent_hooks_default) { 1632 extent_hook_pre_reentrancy(tsdn, arena); 1633 } 1634 bool err = ((*r_extent_hooks)->commit == NULL || 1635 (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent), 1636 extent_size_get(extent), offset, length, arena_ind_get(arena))); 1637 if (*r_extent_hooks != &extent_hooks_default) { 1638 extent_hook_post_reentrancy(tsdn); 1639 } 1640 extent_committed_set(extent, extent_committed_get(extent) || !err); 1641 return err; 1642 } 1643 1644 bool 1645 extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena, 1646 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, 1647 size_t length) { 1648 return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset, 1649 length, false); 1650 } 1651 1652 static bool 1653 extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size, 1654 size_t offset, size_t length, unsigned arena_ind) { 1655 return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset), 1656 length); 1657 } 1658 1659 bool 1660 extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena, 1661 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, 1662 size_t length) { 1663 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1664 WITNESS_RANK_CORE, 0); 1665 1666 extent_hooks_assure_initialized(arena, r_extent_hooks); 1667 1668 if (*r_extent_hooks != &extent_hooks_default) { 1669 extent_hook_pre_reentrancy(tsdn, arena); 1670 } 1671 bool err = ((*r_extent_hooks)->decommit == NULL || 1672 (*r_extent_hooks)->decommit(*r_extent_hooks, 1673 extent_base_get(extent), extent_size_get(extent), offset, length, 1674 arena_ind_get(arena))); 1675 if (*r_extent_hooks != &extent_hooks_default) { 1676 extent_hook_post_reentrancy(tsdn); 1677 } 1678 extent_committed_set(extent, extent_committed_get(extent) && err); 1679 return err; 1680 } 1681 1682 #ifdef PAGES_CAN_PURGE_LAZY 1683 static bool 1684 extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size, 1685 size_t offset, size_t length, unsigned arena_ind) { 1686 assert(addr != NULL); 1687 assert((offset & PAGE_MASK) == 0); 1688 assert(length != 0); 1689 assert((length & PAGE_MASK) == 0); 1690 1691 return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset), 1692 length); 1693 } 1694 #endif 1695 1696 static bool 1697 extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena, 1698 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, 1699 size_t length, bool growing_retained) { 1700 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1701 WITNESS_RANK_CORE, growing_retained ? 1 : 0); 1702 1703 extent_hooks_assure_initialized(arena, r_extent_hooks); 1704 1705 if ((*r_extent_hooks)->purge_lazy == NULL) { 1706 return true; 1707 } 1708 if (*r_extent_hooks != &extent_hooks_default) { 1709 extent_hook_pre_reentrancy(tsdn, arena); 1710 } 1711 bool err = (*r_extent_hooks)->purge_lazy(*r_extent_hooks, 1712 extent_base_get(extent), extent_size_get(extent), offset, length, 1713 arena_ind_get(arena)); 1714 if (*r_extent_hooks != &extent_hooks_default) { 1715 extent_hook_post_reentrancy(tsdn); 1716 } 1717 1718 return err; 1719 } 1720 1721 bool 1722 extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena, 1723 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, 1724 size_t length) { 1725 return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent, 1726 offset, length, false); 1727 } 1728 1729 #ifdef PAGES_CAN_PURGE_FORCED 1730 static bool 1731 extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr, 1732 size_t size, size_t offset, size_t length, unsigned arena_ind) { 1733 assert(addr != NULL); 1734 assert((offset & PAGE_MASK) == 0); 1735 assert(length != 0); 1736 assert((length & PAGE_MASK) == 0); 1737 1738 return pages_purge_forced((void *)((uintptr_t)addr + 1739 (uintptr_t)offset), length); 1740 } 1741 #endif 1742 1743 static bool 1744 extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena, 1745 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, 1746 size_t length, bool growing_retained) { 1747 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1748 WITNESS_RANK_CORE, growing_retained ? 1 : 0); 1749 1750 extent_hooks_assure_initialized(arena, r_extent_hooks); 1751 1752 if ((*r_extent_hooks)->purge_forced == NULL) { 1753 return true; 1754 } 1755 if (*r_extent_hooks != &extent_hooks_default) { 1756 extent_hook_pre_reentrancy(tsdn, arena); 1757 } 1758 bool err = (*r_extent_hooks)->purge_forced(*r_extent_hooks, 1759 extent_base_get(extent), extent_size_get(extent), offset, length, 1760 arena_ind_get(arena)); 1761 if (*r_extent_hooks != &extent_hooks_default) { 1762 extent_hook_post_reentrancy(tsdn); 1763 } 1764 return err; 1765 } 1766 1767 bool 1768 extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena, 1769 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset, 1770 size_t length) { 1771 return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent, 1772 offset, length, false); 1773 } 1774 1775 #ifdef JEMALLOC_MAPS_COALESCE 1776 static bool 1777 extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size, 1778 size_t size_a, size_t size_b, bool committed, unsigned arena_ind) { 1779 return !maps_coalesce; 1780 } 1781 #endif 1782 1783 static extent_t * 1784 extent_split_impl(tsdn_t *tsdn, arena_t *arena, 1785 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, 1786 szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b, 1787 bool growing_retained) { 1788 assert(extent_size_get(extent) == size_a + size_b); 1789 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1790 WITNESS_RANK_CORE, growing_retained ? 1 : 0); 1791 1792 extent_hooks_assure_initialized(arena, r_extent_hooks); 1793 1794 if ((*r_extent_hooks)->split == NULL) { 1795 return NULL; 1796 } 1797 1798 extent_t *trail = extent_alloc(tsdn, arena); 1799 if (trail == NULL) { 1800 goto label_error_a; 1801 } 1802 1803 extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) + 1804 size_a), size_b, slab_b, szind_b, extent_sn_get(extent), 1805 extent_state_get(extent), extent_zeroed_get(extent), 1806 extent_committed_get(extent)); 1807 1808 rtree_ctx_t rtree_ctx_fallback; 1809 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 1810 rtree_leaf_elm_t *lead_elm_a, *lead_elm_b; 1811 { 1812 extent_t lead; 1813 1814 extent_init(&lead, arena, extent_addr_get(extent), size_a, 1815 slab_a, szind_a, extent_sn_get(extent), 1816 extent_state_get(extent), extent_zeroed_get(extent), 1817 extent_committed_get(extent)); 1818 1819 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false, 1820 true, &lead_elm_a, &lead_elm_b); 1821 } 1822 rtree_leaf_elm_t *trail_elm_a, *trail_elm_b; 1823 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true, 1824 &trail_elm_a, &trail_elm_b); 1825 1826 if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL 1827 || trail_elm_b == NULL) { 1828 goto label_error_b; 1829 } 1830 1831 extent_lock2(tsdn, extent, trail); 1832 1833 if (*r_extent_hooks != &extent_hooks_default) { 1834 extent_hook_pre_reentrancy(tsdn, arena); 1835 } 1836 bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent), 1837 size_a + size_b, size_a, size_b, extent_committed_get(extent), 1838 arena_ind_get(arena)); 1839 if (*r_extent_hooks != &extent_hooks_default) { 1840 extent_hook_post_reentrancy(tsdn); 1841 } 1842 if (err) { 1843 goto label_error_c; 1844 } 1845 1846 extent_size_set(extent, size_a); 1847 extent_szind_set(extent, szind_a); 1848 1849 extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent, 1850 szind_a, slab_a); 1851 extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail, 1852 szind_b, slab_b); 1853 1854 extent_unlock2(tsdn, extent, trail); 1855 1856 return trail; 1857 label_error_c: 1858 extent_unlock2(tsdn, extent, trail); 1859 label_error_b: 1860 extent_dalloc(tsdn, arena, trail); 1861 label_error_a: 1862 return NULL; 1863 } 1864 1865 extent_t * 1866 extent_split_wrapper(tsdn_t *tsdn, arena_t *arena, 1867 extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a, 1868 szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) { 1869 return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a, 1870 szind_a, slab_a, size_b, szind_b, slab_b, false); 1871 } 1872 1873 static bool 1874 extent_merge_default_impl(void *addr_a, void *addr_b) { 1875 if (!maps_coalesce) { 1876 return true; 1877 } 1878 if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) { 1879 return true; 1880 } 1881 1882 return false; 1883 } 1884 1885 #ifdef JEMALLOC_MAPS_COALESCE 1886 static bool 1887 extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a, 1888 void *addr_b, size_t size_b, bool committed, unsigned arena_ind) { 1889 return extent_merge_default_impl(addr_a, addr_b); 1890 } 1891 #endif 1892 1893 static bool 1894 extent_merge_impl(tsdn_t *tsdn, arena_t *arena, 1895 extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b, 1896 bool growing_retained) { 1897 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1898 WITNESS_RANK_CORE, growing_retained ? 1 : 0); 1899 1900 extent_hooks_assure_initialized(arena, r_extent_hooks); 1901 1902 if ((*r_extent_hooks)->merge == NULL) { 1903 return true; 1904 } 1905 1906 bool err; 1907 if (*r_extent_hooks == &extent_hooks_default) { 1908 /* Call directly to propagate tsdn. */ 1909 err = extent_merge_default_impl(extent_base_get(a), 1910 extent_base_get(b)); 1911 } else { 1912 extent_hook_pre_reentrancy(tsdn, arena); 1913 err = (*r_extent_hooks)->merge(*r_extent_hooks, 1914 extent_base_get(a), extent_size_get(a), extent_base_get(b), 1915 extent_size_get(b), extent_committed_get(a), 1916 arena_ind_get(arena)); 1917 extent_hook_post_reentrancy(tsdn); 1918 } 1919 1920 if (err) { 1921 return true; 1922 } 1923 1924 /* 1925 * The rtree writes must happen while all the relevant elements are 1926 * owned, so the following code uses decomposed helper functions rather 1927 * than extent_{,de}register() to do things in the right order. 1928 */ 1929 rtree_ctx_t rtree_ctx_fallback; 1930 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 1931 rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b; 1932 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a, 1933 &a_elm_b); 1934 extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a, 1935 &b_elm_b); 1936 1937 extent_lock2(tsdn, a, b); 1938 1939 if (a_elm_b != NULL) { 1940 rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL, 1941 NSIZES, false); 1942 } 1943 if (b_elm_b != NULL) { 1944 rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL, 1945 NSIZES, false); 1946 } else { 1947 b_elm_b = b_elm_a; 1948 } 1949 1950 extent_size_set(a, extent_size_get(a) + extent_size_get(b)); 1951 extent_szind_set(a, NSIZES); 1952 extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ? 1953 extent_sn_get(a) : extent_sn_get(b)); 1954 extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b)); 1955 1956 extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, NSIZES, false); 1957 1958 extent_unlock2(tsdn, a, b); 1959 1960 extent_dalloc(tsdn, extent_arena_get(b), b); 1961 1962 return false; 1963 } 1964 1965 bool 1966 extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena, 1967 extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) { 1968 return extent_merge_impl(tsdn, arena, r_extent_hooks, a, b, false); 1969 } 1970 1971 bool 1972 extent_boot(void) { 1973 if (rtree_new(&extents_rtree, true)) { 1974 return true; 1975 } 1976 1977 if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool", 1978 WITNESS_RANK_EXTENT_POOL)) { 1979 return true; 1980 } 1981 1982 if (have_dss) { 1983 extent_dss_boot(); 1984 } 1985 1986 return false; 1987 } 1988