xref: /freebsd/contrib/jemalloc/src/extent.c (revision c99b67a7947ea215f9c1d44ec022680e98920cd1)
1 #define JEMALLOC_EXTENT_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
4 
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/extent_dss.h"
7 #include "jemalloc/internal/extent_mmap.h"
8 #include "jemalloc/internal/ph.h"
9 #include "jemalloc/internal/rtree.h"
10 #include "jemalloc/internal/mutex.h"
11 #include "jemalloc/internal/mutex_pool.h"
12 
13 /******************************************************************************/
14 /* Data. */
15 
16 rtree_t		extents_rtree;
17 /* Keyed by the address of the extent_t being protected. */
18 mutex_pool_t	extent_mutex_pool;
19 
20 static const bitmap_info_t extents_bitmap_info =
21     BITMAP_INFO_INITIALIZER(NPSIZES+1);
22 
23 static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr,
24     size_t size, size_t alignment, bool *zero, bool *commit,
25     unsigned arena_ind);
26 static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr,
27     size_t size, bool committed, unsigned arena_ind);
28 static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr,
29     size_t size, bool committed, unsigned arena_ind);
30 static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr,
31     size_t size, size_t offset, size_t length, unsigned arena_ind);
32 static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
33     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
34     size_t length, bool growing_retained);
35 static bool extent_decommit_default(extent_hooks_t *extent_hooks,
36     void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
37 #ifdef PAGES_CAN_PURGE_LAZY
38 static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr,
39     size_t size, size_t offset, size_t length, unsigned arena_ind);
40 #endif
41 static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
42     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
43     size_t length, bool growing_retained);
44 #ifdef PAGES_CAN_PURGE_FORCED
45 static bool extent_purge_forced_default(extent_hooks_t *extent_hooks,
46     void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
47 #endif
48 static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
49     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
50     size_t length, bool growing_retained);
51 #ifdef JEMALLOC_MAPS_COALESCE
52 static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr,
53     size_t size, size_t size_a, size_t size_b, bool committed,
54     unsigned arena_ind);
55 #endif
56 static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena,
57     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
58     szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
59     bool growing_retained);
60 #ifdef JEMALLOC_MAPS_COALESCE
61 static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a,
62     size_t size_a, void *addr_b, size_t size_b, bool committed,
63     unsigned arena_ind);
64 #endif
65 static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
66     extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
67     bool growing_retained);
68 
69 const extent_hooks_t	extent_hooks_default = {
70 	extent_alloc_default,
71 	extent_dalloc_default,
72 	extent_destroy_default,
73 	extent_commit_default,
74 	extent_decommit_default
75 #ifdef PAGES_CAN_PURGE_LAZY
76 	,
77 	extent_purge_lazy_default
78 #else
79 	,
80 	NULL
81 #endif
82 #ifdef PAGES_CAN_PURGE_FORCED
83 	,
84 	extent_purge_forced_default
85 #else
86 	,
87 	NULL
88 #endif
89 #ifdef JEMALLOC_MAPS_COALESCE
90 	,
91 	extent_split_default,
92 	extent_merge_default
93 #endif
94 };
95 
96 /* Used exclusively for gdump triggering. */
97 static atomic_zu_t curpages;
98 static atomic_zu_t highpages;
99 
100 /******************************************************************************/
101 /*
102  * Function prototypes for static functions that are referenced prior to
103  * definition.
104  */
105 
106 static void extent_deregister(tsdn_t *tsdn, extent_t *extent);
107 static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena,
108     extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
109     size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind,
110     bool *zero, bool *commit, bool growing_retained);
111 static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
112     extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
113     extent_t *extent, bool *coalesced, bool growing_retained);
114 static void extent_record(tsdn_t *tsdn, arena_t *arena,
115     extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent,
116     bool growing_retained);
117 
118 /******************************************************************************/
119 
120 rb_gen(UNUSED, extent_avail_, extent_tree_t, extent_t, rb_link,
121     extent_esnead_comp)
122 
123 typedef enum {
124 	lock_result_success,
125 	lock_result_failure,
126 	lock_result_no_extent
127 } lock_result_t;
128 
129 static lock_result_t
130 extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm,
131     extent_t **result) {
132 	extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree,
133 	    elm, true);
134 
135 	if (extent1 == NULL) {
136 		return lock_result_no_extent;
137 	}
138 	/*
139 	 * It's possible that the extent changed out from under us, and with it
140 	 * the leaf->extent mapping.  We have to recheck while holding the lock.
141 	 */
142 	extent_lock(tsdn, extent1);
143 	extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn,
144 	    &extents_rtree, elm, true);
145 
146 	if (extent1 == extent2) {
147 		*result = extent1;
148 		return lock_result_success;
149 	} else {
150 		extent_unlock(tsdn, extent1);
151 		return lock_result_failure;
152 	}
153 }
154 
155 /*
156  * Returns a pool-locked extent_t * if there's one associated with the given
157  * address, and NULL otherwise.
158  */
159 static extent_t *
160 extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr) {
161 	extent_t *ret = NULL;
162 	rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree,
163 	    rtree_ctx, (uintptr_t)addr, false, false);
164 	if (elm == NULL) {
165 		return NULL;
166 	}
167 	lock_result_t lock_result;
168 	do {
169 		lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret);
170 	} while (lock_result == lock_result_failure);
171 	return ret;
172 }
173 
174 extent_t *
175 extent_alloc(tsdn_t *tsdn, arena_t *arena) {
176 	malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
177 	extent_t *extent = extent_avail_first(&arena->extent_avail);
178 	if (extent == NULL) {
179 		malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
180 		return base_alloc_extent(tsdn, arena->base);
181 	}
182 	extent_avail_remove(&arena->extent_avail, extent);
183 	malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
184 	return extent;
185 }
186 
187 void
188 extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
189 	malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
190 	extent_avail_insert(&arena->extent_avail, extent);
191 	malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
192 }
193 
194 extent_hooks_t *
195 extent_hooks_get(arena_t *arena) {
196 	return base_extent_hooks_get(arena->base);
197 }
198 
199 extent_hooks_t *
200 extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) {
201 	background_thread_info_t *info;
202 	if (have_background_thread) {
203 		info = arena_background_thread_info_get(arena);
204 		malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
205 	}
206 	extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks);
207 	if (have_background_thread) {
208 		malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
209 	}
210 
211 	return ret;
212 }
213 
214 static void
215 extent_hooks_assure_initialized(arena_t *arena,
216     extent_hooks_t **r_extent_hooks) {
217 	if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) {
218 		*r_extent_hooks = extent_hooks_get(arena);
219 	}
220 }
221 
222 #ifndef JEMALLOC_JET
223 static
224 #endif
225 size_t
226 extent_size_quantize_floor(size_t size) {
227 	size_t ret;
228 	pszind_t pind;
229 
230 	assert(size > 0);
231 	assert((size & PAGE_MASK) == 0);
232 
233 	pind = sz_psz2ind(size - sz_large_pad + 1);
234 	if (pind == 0) {
235 		/*
236 		 * Avoid underflow.  This short-circuit would also do the right
237 		 * thing for all sizes in the range for which there are
238 		 * PAGE-spaced size classes, but it's simplest to just handle
239 		 * the one case that would cause erroneous results.
240 		 */
241 		return size;
242 	}
243 	ret = sz_pind2sz(pind - 1) + sz_large_pad;
244 	assert(ret <= size);
245 	return ret;
246 }
247 
248 #ifndef JEMALLOC_JET
249 static
250 #endif
251 size_t
252 extent_size_quantize_ceil(size_t size) {
253 	size_t ret;
254 
255 	assert(size > 0);
256 	assert(size - sz_large_pad <= LARGE_MAXCLASS);
257 	assert((size & PAGE_MASK) == 0);
258 
259 	ret = extent_size_quantize_floor(size);
260 	if (ret < size) {
261 		/*
262 		 * Skip a quantization that may have an adequately large extent,
263 		 * because under-sized extents may be mixed in.  This only
264 		 * happens when an unusual size is requested, i.e. for aligned
265 		 * allocation, and is just one of several places where linear
266 		 * search would potentially find sufficiently aligned available
267 		 * memory somewhere lower.
268 		 */
269 		ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) +
270 		    sz_large_pad;
271 	}
272 	return ret;
273 }
274 
275 /* Generate pairing heap functions. */
276 ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
277 
278 bool
279 extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
280     bool delay_coalesce) {
281 	if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS,
282 	    malloc_mutex_rank_exclusive)) {
283 		return true;
284 	}
285 	for (unsigned i = 0; i < NPSIZES+1; i++) {
286 		extent_heap_new(&extents->heaps[i]);
287 	}
288 	bitmap_init(extents->bitmap, &extents_bitmap_info, true);
289 	extent_list_init(&extents->lru);
290 	atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED);
291 	extents->state = state;
292 	extents->delay_coalesce = delay_coalesce;
293 	return false;
294 }
295 
296 extent_state_t
297 extents_state_get(const extents_t *extents) {
298 	return extents->state;
299 }
300 
301 size_t
302 extents_npages_get(extents_t *extents) {
303 	return atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
304 }
305 
306 static void
307 extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent,
308     bool preserve_lru) {
309 	malloc_mutex_assert_owner(tsdn, &extents->mtx);
310 	assert(extent_state_get(extent) == extents->state);
311 
312 	size_t size = extent_size_get(extent);
313 	size_t psz = extent_size_quantize_floor(size);
314 	pszind_t pind = sz_psz2ind(psz);
315 	if (extent_heap_empty(&extents->heaps[pind])) {
316 		bitmap_unset(extents->bitmap, &extents_bitmap_info,
317 		    (size_t)pind);
318 	}
319 	extent_heap_insert(&extents->heaps[pind], extent);
320 	if (!preserve_lru) {
321 		extent_list_append(&extents->lru, extent);
322 	}
323 	size_t npages = size >> LG_PAGE;
324 	/*
325 	 * All modifications to npages hold the mutex (as asserted above), so we
326 	 * don't need an atomic fetch-add; we can get by with a load followed by
327 	 * a store.
328 	 */
329 	size_t cur_extents_npages =
330 	    atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
331 	atomic_store_zu(&extents->npages, cur_extents_npages + npages,
332 	    ATOMIC_RELAXED);
333 }
334 
335 static void
336 extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent,
337     bool preserve_lru) {
338 	malloc_mutex_assert_owner(tsdn, &extents->mtx);
339 	assert(extent_state_get(extent) == extents->state);
340 
341 	size_t size = extent_size_get(extent);
342 	size_t psz = extent_size_quantize_floor(size);
343 	pszind_t pind = sz_psz2ind(psz);
344 	extent_heap_remove(&extents->heaps[pind], extent);
345 	if (extent_heap_empty(&extents->heaps[pind])) {
346 		bitmap_set(extents->bitmap, &extents_bitmap_info,
347 		    (size_t)pind);
348 	}
349 	if (!preserve_lru) {
350 		extent_list_remove(&extents->lru, extent);
351 	}
352 	size_t npages = size >> LG_PAGE;
353 	/*
354 	 * As in extents_insert_locked, we hold extents->mtx and so don't need
355 	 * atomic operations for updating extents->npages.
356 	 */
357 	size_t cur_extents_npages =
358 	    atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
359 	assert(cur_extents_npages >= npages);
360 	atomic_store_zu(&extents->npages,
361 	    cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
362 }
363 
364 /* Do any-best-fit extent selection, i.e. select any extent that best fits. */
365 static extent_t *
366 extents_best_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
367     size_t size) {
368 	pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
369 	pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
370 	    (size_t)pind);
371 	if (i < NPSIZES+1) {
372 		assert(!extent_heap_empty(&extents->heaps[i]));
373 		extent_t *extent = extent_heap_any(&extents->heaps[i]);
374 		assert(extent_size_get(extent) >= size);
375 		return extent;
376 	}
377 
378 	return NULL;
379 }
380 
381 /*
382  * Do first-fit extent selection, i.e. select the oldest/lowest extent that is
383  * large enough.
384  */
385 static extent_t *
386 extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
387     size_t size) {
388 	extent_t *ret = NULL;
389 
390 	pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
391 	for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
392 	    &extents_bitmap_info, (size_t)pind); i < NPSIZES+1; i =
393 	    (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
394 	    (size_t)i+1)) {
395 		assert(!extent_heap_empty(&extents->heaps[i]));
396 		extent_t *extent = extent_heap_first(&extents->heaps[i]);
397 		assert(extent_size_get(extent) >= size);
398 		if (ret == NULL || extent_snad_comp(extent, ret) < 0) {
399 			ret = extent;
400 		}
401 		if (i == NPSIZES) {
402 			break;
403 		}
404 		assert(i < NPSIZES);
405 	}
406 
407 	return ret;
408 }
409 
410 /*
411  * Do {best,first}-fit extent selection, where the selection policy choice is
412  * based on extents->delay_coalesce.  Best-fit selection requires less
413  * searching, but its layout policy is less stable and may cause higher virtual
414  * memory fragmentation as a side effect.
415  */
416 static extent_t *
417 extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
418     size_t size) {
419 	malloc_mutex_assert_owner(tsdn, &extents->mtx);
420 
421 	return extents->delay_coalesce ? extents_best_fit_locked(tsdn, arena,
422 	    extents, size) : extents_first_fit_locked(tsdn, arena, extents,
423 	    size);
424 }
425 
426 static bool
427 extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena,
428     extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
429     extent_t *extent) {
430 	extent_state_set(extent, extent_state_active);
431 	bool coalesced;
432 	extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx,
433 	    extents, extent, &coalesced, false);
434 	extent_state_set(extent, extents_state_get(extents));
435 
436 	if (!coalesced) {
437 		return true;
438 	}
439 	extents_insert_locked(tsdn, extents, extent, true);
440 	return false;
441 }
442 
443 extent_t *
444 extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
445     extents_t *extents, void *new_addr, size_t size, size_t pad,
446     size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
447 	assert(size + pad != 0);
448 	assert(alignment != 0);
449 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
450 	    WITNESS_RANK_CORE, 0);
451 
452 	return extent_recycle(tsdn, arena, r_extent_hooks, extents, new_addr,
453 	    size, pad, alignment, slab, szind, zero, commit, false);
454 }
455 
456 void
457 extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
458     extents_t *extents, extent_t *extent) {
459 	assert(extent_base_get(extent) != NULL);
460 	assert(extent_size_get(extent) != 0);
461 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
462 	    WITNESS_RANK_CORE, 0);
463 
464 	extent_addr_set(extent, extent_base_get(extent));
465 	extent_zeroed_set(extent, false);
466 
467 	extent_record(tsdn, arena, r_extent_hooks, extents, extent, false);
468 }
469 
470 extent_t *
471 extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
472     extents_t *extents, size_t npages_min) {
473 	rtree_ctx_t rtree_ctx_fallback;
474 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
475 
476 	malloc_mutex_lock(tsdn, &extents->mtx);
477 
478 	/*
479 	 * Get the LRU coalesced extent, if any.  If coalescing was delayed,
480 	 * the loop will iterate until the LRU extent is fully coalesced.
481 	 */
482 	extent_t *extent;
483 	while (true) {
484 		/* Get the LRU extent, if any. */
485 		extent = extent_list_first(&extents->lru);
486 		if (extent == NULL) {
487 			goto label_return;
488 		}
489 		/* Check the eviction limit. */
490 		size_t npages = extent_size_get(extent) >> LG_PAGE;
491 		size_t extents_npages = atomic_load_zu(&extents->npages,
492 		    ATOMIC_RELAXED);
493 		if (extents_npages - npages < npages_min) {
494 			extent = NULL;
495 			goto label_return;
496 		}
497 		extents_remove_locked(tsdn, extents, extent, false);
498 		if (!extents->delay_coalesce) {
499 			break;
500 		}
501 		/* Try to coalesce. */
502 		if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks,
503 		    rtree_ctx, extents, extent)) {
504 			break;
505 		}
506 		/*
507 		 * The LRU extent was just coalesced and the result placed in
508 		 * the LRU at its neighbor's position.  Start over.
509 		 */
510 	}
511 
512 	/*
513 	 * Either mark the extent active or deregister it to protect against
514 	 * concurrent operations.
515 	 */
516 	switch (extents_state_get(extents)) {
517 	case extent_state_active:
518 		not_reached();
519 	case extent_state_dirty:
520 	case extent_state_muzzy:
521 		extent_state_set(extent, extent_state_active);
522 		break;
523 	case extent_state_retained:
524 		extent_deregister(tsdn, extent);
525 		break;
526 	default:
527 		not_reached();
528 	}
529 
530 label_return:
531 	malloc_mutex_unlock(tsdn, &extents->mtx);
532 	return extent;
533 }
534 
535 static void
536 extents_leak(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
537     extents_t *extents, extent_t *extent, bool growing_retained) {
538 	/*
539 	 * Leak extent after making sure its pages have already been purged, so
540 	 * that this is only a virtual memory leak.
541 	 */
542 	if (extents_state_get(extents) == extent_state_dirty) {
543 		if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks,
544 		    extent, 0, extent_size_get(extent), growing_retained)) {
545 			extent_purge_forced_impl(tsdn, arena, r_extent_hooks,
546 			    extent, 0, extent_size_get(extent),
547 			    growing_retained);
548 		}
549 	}
550 	extent_dalloc(tsdn, arena, extent);
551 }
552 
553 void
554 extents_prefork(tsdn_t *tsdn, extents_t *extents) {
555 	malloc_mutex_prefork(tsdn, &extents->mtx);
556 }
557 
558 void
559 extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) {
560 	malloc_mutex_postfork_parent(tsdn, &extents->mtx);
561 }
562 
563 void
564 extents_postfork_child(tsdn_t *tsdn, extents_t *extents) {
565 	malloc_mutex_postfork_child(tsdn, &extents->mtx);
566 }
567 
568 static void
569 extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
570     extent_t *extent, bool preserve_lru) {
571 	assert(extent_arena_get(extent) == arena);
572 	assert(extent_state_get(extent) == extent_state_active);
573 
574 	extent_state_set(extent, extents_state_get(extents));
575 	extents_insert_locked(tsdn, extents, extent, preserve_lru);
576 }
577 
578 static void
579 extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
580     extent_t *extent, bool preserve_lru) {
581 	malloc_mutex_lock(tsdn, &extents->mtx);
582 	extent_deactivate_locked(tsdn, arena, extents, extent, preserve_lru);
583 	malloc_mutex_unlock(tsdn, &extents->mtx);
584 }
585 
586 static void
587 extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
588     extent_t *extent, bool preserve_lru) {
589 	assert(extent_arena_get(extent) == arena);
590 	assert(extent_state_get(extent) == extents_state_get(extents));
591 
592 	extents_remove_locked(tsdn, extents, extent, preserve_lru);
593 	extent_state_set(extent, extent_state_active);
594 }
595 
596 static bool
597 extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
598     const extent_t *extent, bool dependent, bool init_missing,
599     rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) {
600 	*r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
601 	    (uintptr_t)extent_base_get(extent), dependent, init_missing);
602 	if (!dependent && *r_elm_a == NULL) {
603 		return true;
604 	}
605 	assert(*r_elm_a != NULL);
606 
607 	*r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
608 	    (uintptr_t)extent_last_get(extent), dependent, init_missing);
609 	if (!dependent && *r_elm_b == NULL) {
610 		return true;
611 	}
612 	assert(*r_elm_b != NULL);
613 
614 	return false;
615 }
616 
617 static void
618 extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a,
619     rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) {
620 	rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab);
621 	if (elm_b != NULL) {
622 		rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind,
623 		    slab);
624 	}
625 }
626 
627 static void
628 extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent,
629     szind_t szind) {
630 	assert(extent_slab_get(extent));
631 
632 	/* Register interior. */
633 	for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
634 		rtree_write(tsdn, &extents_rtree, rtree_ctx,
635 		    (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
636 		    LG_PAGE), extent, szind, true);
637 	}
638 }
639 
640 static void
641 extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) {
642 	cassert(config_prof);
643 	/* prof_gdump() requirement. */
644 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
645 	    WITNESS_RANK_CORE, 0);
646 
647 	if (opt_prof && extent_state_get(extent) == extent_state_active) {
648 		size_t nadd = extent_size_get(extent) >> LG_PAGE;
649 		size_t cur = atomic_fetch_add_zu(&curpages, nadd,
650 		    ATOMIC_RELAXED) + nadd;
651 		size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED);
652 		while (cur > high && !atomic_compare_exchange_weak_zu(
653 		    &highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) {
654 			/*
655 			 * Don't refresh cur, because it may have decreased
656 			 * since this thread lost the highpages update race.
657 			 * Note that high is updated in case of CAS failure.
658 			 */
659 		}
660 		if (cur > high && prof_gdump_get_unlocked()) {
661 			prof_gdump(tsdn);
662 		}
663 	}
664 }
665 
666 static void
667 extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) {
668 	cassert(config_prof);
669 
670 	if (opt_prof && extent_state_get(extent) == extent_state_active) {
671 		size_t nsub = extent_size_get(extent) >> LG_PAGE;
672 		assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub);
673 		atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED);
674 	}
675 }
676 
677 static bool
678 extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) {
679 	rtree_ctx_t rtree_ctx_fallback;
680 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
681 	rtree_leaf_elm_t *elm_a, *elm_b;
682 
683 	/*
684 	 * We need to hold the lock to protect against a concurrent coalesce
685 	 * operation that sees us in a partial state.
686 	 */
687 	extent_lock(tsdn, extent);
688 
689 	if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true,
690 	    &elm_a, &elm_b)) {
691 		return true;
692 	}
693 
694 	szind_t szind = extent_szind_get_maybe_invalid(extent);
695 	bool slab = extent_slab_get(extent);
696 	extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab);
697 	if (slab) {
698 		extent_interior_register(tsdn, rtree_ctx, extent, szind);
699 	}
700 
701 	extent_unlock(tsdn, extent);
702 
703 	if (config_prof && gdump_add) {
704 		extent_gdump_add(tsdn, extent);
705 	}
706 
707 	return false;
708 }
709 
710 static bool
711 extent_register(tsdn_t *tsdn, extent_t *extent) {
712 	return extent_register_impl(tsdn, extent, true);
713 }
714 
715 static bool
716 extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) {
717 	return extent_register_impl(tsdn, extent, false);
718 }
719 
720 static void
721 extent_reregister(tsdn_t *tsdn, extent_t *extent) {
722 	bool err = extent_register(tsdn, extent);
723 	assert(!err);
724 }
725 
726 static void
727 extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
728     extent_t *extent) {
729 	size_t i;
730 
731 	assert(extent_slab_get(extent));
732 
733 	for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
734 		rtree_clear(tsdn, &extents_rtree, rtree_ctx,
735 		    (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
736 		    LG_PAGE));
737 	}
738 }
739 
740 static void
741 extent_deregister(tsdn_t *tsdn, extent_t *extent) {
742 	rtree_ctx_t rtree_ctx_fallback;
743 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
744 	rtree_leaf_elm_t *elm_a, *elm_b;
745 	extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false,
746 	    &elm_a, &elm_b);
747 
748 	extent_lock(tsdn, extent);
749 
750 	extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, NSIZES, false);
751 	if (extent_slab_get(extent)) {
752 		extent_interior_deregister(tsdn, rtree_ctx, extent);
753 		extent_slab_set(extent, false);
754 	}
755 
756 	extent_unlock(tsdn, extent);
757 
758 	if (config_prof) {
759 		extent_gdump_sub(tsdn, extent);
760 	}
761 }
762 
763 static extent_t *
764 extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
765     extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
766     void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
767     bool *zero, bool *commit, bool growing_retained) {
768 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
769 	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
770 	assert(alignment > 0);
771 	if (config_debug && new_addr != NULL) {
772 		/*
773 		 * Non-NULL new_addr has two use cases:
774 		 *
775 		 *   1) Recycle a known-extant extent, e.g. during purging.
776 		 *   2) Perform in-place expanding reallocation.
777 		 *
778 		 * Regardless of use case, new_addr must either refer to a
779 		 * non-existing extent, or to the base of an extant extent,
780 		 * since only active slabs support interior lookups (which of
781 		 * course cannot be recycled).
782 		 */
783 		assert(PAGE_ADDR2BASE(new_addr) == new_addr);
784 		assert(pad == 0);
785 		assert(alignment <= PAGE);
786 	}
787 
788 	size_t esize = size + pad;
789 	size_t alloc_size = esize + PAGE_CEILING(alignment) - PAGE;
790 	/* Beware size_t wrap-around. */
791 	if (alloc_size < esize) {
792 		return NULL;
793 	}
794 	malloc_mutex_lock(tsdn, &extents->mtx);
795 	extent_hooks_assure_initialized(arena, r_extent_hooks);
796 	extent_t *extent;
797 	if (new_addr != NULL) {
798 		extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr);
799 		if (extent != NULL) {
800 			/*
801 			 * We might null-out extent to report an error, but we
802 			 * still need to unlock the associated mutex after.
803 			 */
804 			extent_t *unlock_extent = extent;
805 			assert(extent_base_get(extent) == new_addr);
806 			if (extent_arena_get(extent) != arena ||
807 			    extent_size_get(extent) < esize ||
808 			    extent_state_get(extent) !=
809 			    extents_state_get(extents)) {
810 				extent = NULL;
811 			}
812 			extent_unlock(tsdn, unlock_extent);
813 		}
814 	} else {
815 		extent = extents_fit_locked(tsdn, arena, extents, alloc_size);
816 	}
817 	if (extent == NULL) {
818 		malloc_mutex_unlock(tsdn, &extents->mtx);
819 		return NULL;
820 	}
821 
822 	extent_activate_locked(tsdn, arena, extents, extent, false);
823 	malloc_mutex_unlock(tsdn, &extents->mtx);
824 
825 	if (extent_zeroed_get(extent)) {
826 		*zero = true;
827 	}
828 	if (extent_committed_get(extent)) {
829 		*commit = true;
830 	}
831 
832 	return extent;
833 }
834 
835 static extent_t *
836 extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
837     extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
838     void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
839     szind_t szind, extent_t *extent, bool growing_retained) {
840 	size_t esize = size + pad;
841 	size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(extent),
842 	    PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(extent);
843 	assert(new_addr == NULL || leadsize == 0);
844 	assert(extent_size_get(extent) >= leadsize + esize);
845 	size_t trailsize = extent_size_get(extent) - leadsize - esize;
846 
847 	/* Split the lead. */
848 	if (leadsize != 0) {
849 		extent_t *lead = extent;
850 		extent = extent_split_impl(tsdn, arena, r_extent_hooks,
851 		    lead, leadsize, NSIZES, false, esize + trailsize, szind,
852 		    slab, growing_retained);
853 		if (extent == NULL) {
854 			extent_deregister(tsdn, lead);
855 			extents_leak(tsdn, arena, r_extent_hooks, extents,
856 			    lead, growing_retained);
857 			return NULL;
858 		}
859 		extent_deactivate(tsdn, arena, extents, lead, false);
860 	}
861 
862 	/* Split the trail. */
863 	if (trailsize != 0) {
864 		extent_t *trail = extent_split_impl(tsdn, arena,
865 		    r_extent_hooks, extent, esize, szind, slab, trailsize,
866 		    NSIZES, false, growing_retained);
867 		if (trail == NULL) {
868 			extent_deregister(tsdn, extent);
869 			extents_leak(tsdn, arena, r_extent_hooks, extents,
870 			    extent, growing_retained);
871 			return NULL;
872 		}
873 		extent_deactivate(tsdn, arena, extents, trail, false);
874 	} else if (leadsize == 0) {
875 		/*
876 		 * Splitting causes szind to be set as a side effect, but no
877 		 * splitting occurred.
878 		 */
879 		extent_szind_set(extent, szind);
880 		if (szind != NSIZES) {
881 			rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
882 			    (uintptr_t)extent_addr_get(extent), szind, slab);
883 			if (slab && extent_size_get(extent) > PAGE) {
884 				rtree_szind_slab_update(tsdn, &extents_rtree,
885 				    rtree_ctx,
886 				    (uintptr_t)extent_past_get(extent) -
887 				    (uintptr_t)PAGE, szind, slab);
888 			}
889 		}
890 	}
891 
892 	return extent;
893 }
894 
895 static extent_t *
896 extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
897     extents_t *extents, void *new_addr, size_t size, size_t pad,
898     size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit,
899     bool growing_retained) {
900 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
901 	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
902 	assert(new_addr == NULL || !slab);
903 	assert(pad == 0 || !slab);
904 	assert(!*zero || !slab);
905 
906 	rtree_ctx_t rtree_ctx_fallback;
907 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
908 
909 	bool committed = false;
910 	extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks,
911 	    rtree_ctx, extents, new_addr, size, pad, alignment, slab, zero,
912 	    &committed, growing_retained);
913 	if (extent == NULL) {
914 		return NULL;
915 	}
916 	if (committed) {
917 		*commit = true;
918 	}
919 
920 	extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx,
921 	    extents, new_addr, size, pad, alignment, slab, szind, extent,
922 	    growing_retained);
923 	if (extent == NULL) {
924 		return NULL;
925 	}
926 
927 	if (*commit && !extent_committed_get(extent)) {
928 		if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent,
929 		    0, extent_size_get(extent), growing_retained)) {
930 			extent_record(tsdn, arena, r_extent_hooks, extents,
931 			    extent, growing_retained);
932 			return NULL;
933 		}
934 		extent_zeroed_set(extent, true);
935 	}
936 
937 	if (pad != 0) {
938 		extent_addr_randomize(tsdn, extent, alignment);
939 	}
940 	assert(extent_state_get(extent) == extent_state_active);
941 	if (slab) {
942 		extent_slab_set(extent, slab);
943 		extent_interior_register(tsdn, rtree_ctx, extent, szind);
944 	}
945 
946 	if (*zero) {
947 		void *addr = extent_base_get(extent);
948 		size_t size = extent_size_get(extent);
949 		if (!extent_zeroed_get(extent)) {
950 			if (pages_purge_forced(addr, size)) {
951 				memset(addr, 0, size);
952 			}
953 		} else if (config_debug) {
954 			size_t *p = (size_t *)(uintptr_t)addr;
955 			for (size_t i = 0; i < size / sizeof(size_t); i++) {
956 				assert(p[i] == 0);
957 			}
958 		}
959 	}
960 	return extent;
961 }
962 
963 /*
964  * If the caller specifies (!*zero), it is still possible to receive zeroed
965  * memory, in which case *zero is toggled to true.  arena_extent_alloc() takes
966  * advantage of this to avoid demanding zeroed extents, but taking advantage of
967  * them if they are returned.
968  */
969 static void *
970 extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
971     size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) {
972 	void *ret;
973 
974 	assert(size != 0);
975 	assert(alignment != 0);
976 
977 	/* "primary" dss. */
978 	if (have_dss && dss_prec == dss_prec_primary && (ret =
979 	    extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
980 	    commit)) != NULL) {
981 		return ret;
982 	}
983 	/* mmap. */
984 	if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
985 	    != NULL) {
986 		return ret;
987 	}
988 	/* "secondary" dss. */
989 	if (have_dss && dss_prec == dss_prec_secondary && (ret =
990 	    extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
991 	    commit)) != NULL) {
992 		return ret;
993 	}
994 
995 	/* All strategies for allocation failed. */
996 	return NULL;
997 }
998 
999 static void *
1000 extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
1001     size_t size, size_t alignment, bool *zero, bool *commit) {
1002 	void *ret;
1003 
1004 	ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
1005 	    commit, (dss_prec_t)atomic_load_u(&arena->dss_prec,
1006 	    ATOMIC_RELAXED));
1007 	return ret;
1008 }
1009 
1010 static void *
1011 extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
1012     size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
1013 	tsdn_t *tsdn;
1014 	arena_t *arena;
1015 
1016 	tsdn = tsdn_fetch();
1017 	arena = arena_get(tsdn, arena_ind, false);
1018 	/*
1019 	 * The arena we're allocating on behalf of must have been initialized
1020 	 * already.
1021 	 */
1022 	assert(arena != NULL);
1023 
1024 	return extent_alloc_default_impl(tsdn, arena, new_addr, size,
1025 	    alignment, zero, commit);
1026 }
1027 
1028 /*
1029  * If virtual memory is retained, create increasingly larger extents from which
1030  * to split requested extents in order to limit the total number of disjoint
1031  * virtual memory ranges retained by each arena.
1032  */
1033 static extent_t *
1034 extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
1035     extent_hooks_t **r_extent_hooks, size_t size, size_t pad, size_t alignment,
1036     bool slab, szind_t szind, bool *zero, bool *commit) {
1037 	malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx);
1038 	assert(pad == 0 || !slab);
1039 	assert(!*zero || !slab);
1040 
1041 	size_t esize = size + pad;
1042 	size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE;
1043 	/* Beware size_t wrap-around. */
1044 	if (alloc_size_min < esize) {
1045 		goto label_err;
1046 	}
1047 	/*
1048 	 * Find the next extent size in the series that would be large enough to
1049 	 * satisfy this request.
1050 	 */
1051 	pszind_t egn_skip = 0;
1052 	size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
1053 	while (alloc_size < alloc_size_min) {
1054 		egn_skip++;
1055 		if (arena->extent_grow_next + egn_skip == NPSIZES) {
1056 			/* Outside legal range. */
1057 			goto label_err;
1058 		}
1059 		assert(arena->extent_grow_next + egn_skip < NPSIZES);
1060 		alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
1061 	}
1062 
1063 	extent_t *extent = extent_alloc(tsdn, arena);
1064 	if (extent == NULL) {
1065 		goto label_err;
1066 	}
1067 	bool zeroed = false;
1068 	bool committed = false;
1069 
1070 	void *ptr;
1071 	if (*r_extent_hooks == &extent_hooks_default) {
1072 		ptr = extent_alloc_core(tsdn, arena, NULL, alloc_size, PAGE,
1073 		    &zeroed, &committed, (dss_prec_t)atomic_load_u(
1074 		    &arena->dss_prec, ATOMIC_RELAXED));
1075 	} else {
1076 		ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL,
1077 		    alloc_size, PAGE, &zeroed, &committed,
1078 		    arena_ind_get(arena));
1079 	}
1080 
1081 	extent_init(extent, arena, ptr, alloc_size, false, NSIZES,
1082 	    arena_extent_sn_next(arena), extent_state_active, zeroed,
1083 	    committed);
1084 	if (ptr == NULL) {
1085 		extent_dalloc(tsdn, arena, extent);
1086 		goto label_err;
1087 	}
1088 	if (extent_register_no_gdump_add(tsdn, extent)) {
1089 		extents_leak(tsdn, arena, r_extent_hooks,
1090 		    &arena->extents_retained, extent, true);
1091 		goto label_err;
1092 	}
1093 
1094 	size_t leadsize = ALIGNMENT_CEILING((uintptr_t)ptr,
1095 	    PAGE_CEILING(alignment)) - (uintptr_t)ptr;
1096 	assert(alloc_size >= leadsize + esize);
1097 	size_t trailsize = alloc_size - leadsize - esize;
1098 	if (extent_zeroed_get(extent) && extent_committed_get(extent)) {
1099 		*zero = true;
1100 	}
1101 	if (extent_committed_get(extent)) {
1102 		*commit = true;
1103 	}
1104 
1105 	/* Split the lead. */
1106 	if (leadsize != 0) {
1107 		extent_t *lead = extent;
1108 		extent = extent_split_impl(tsdn, arena, r_extent_hooks, lead,
1109 		    leadsize, NSIZES, false, esize + trailsize, szind, slab,
1110 		    true);
1111 		if (extent == NULL) {
1112 			extent_deregister(tsdn, lead);
1113 			extents_leak(tsdn, arena, r_extent_hooks,
1114 			    &arena->extents_retained, lead, true);
1115 			goto label_err;
1116 		}
1117 		extent_record(tsdn, arena, r_extent_hooks,
1118 		    &arena->extents_retained, lead, true);
1119 	}
1120 
1121 	/* Split the trail. */
1122 	if (trailsize != 0) {
1123 		extent_t *trail = extent_split_impl(tsdn, arena, r_extent_hooks,
1124 		    extent, esize, szind, slab, trailsize, NSIZES, false, true);
1125 		if (trail == NULL) {
1126 			extent_deregister(tsdn, extent);
1127 			extents_leak(tsdn, arena, r_extent_hooks,
1128 			    &arena->extents_retained, extent, true);
1129 			goto label_err;
1130 		}
1131 		extent_record(tsdn, arena, r_extent_hooks,
1132 		    &arena->extents_retained, trail, true);
1133 	} else if (leadsize == 0) {
1134 		/*
1135 		 * Splitting causes szind to be set as a side effect, but no
1136 		 * splitting occurred.
1137 		 */
1138 		rtree_ctx_t rtree_ctx_fallback;
1139 		rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
1140 		    &rtree_ctx_fallback);
1141 
1142 		extent_szind_set(extent, szind);
1143 		if (szind != NSIZES) {
1144 			rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
1145 			    (uintptr_t)extent_addr_get(extent), szind, slab);
1146 			if (slab && extent_size_get(extent) > PAGE) {
1147 				rtree_szind_slab_update(tsdn, &extents_rtree,
1148 				    rtree_ctx,
1149 				    (uintptr_t)extent_past_get(extent) -
1150 				    (uintptr_t)PAGE, szind, slab);
1151 			}
1152 		}
1153 	}
1154 
1155 	if (*commit && !extent_committed_get(extent)) {
1156 		if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0,
1157 		    extent_size_get(extent), true)) {
1158 			extent_record(tsdn, arena, r_extent_hooks,
1159 			    &arena->extents_retained, extent, true);
1160 			goto label_err;
1161 		}
1162 		extent_zeroed_set(extent, true);
1163 	}
1164 
1165 	/*
1166 	 * Increment extent_grow_next if doing so wouldn't exceed the legal
1167 	 * range.
1168 	 */
1169 	if (arena->extent_grow_next + egn_skip + 1 < NPSIZES) {
1170 		arena->extent_grow_next += egn_skip + 1;
1171 	} else {
1172 		arena->extent_grow_next = NPSIZES - 1;
1173 	}
1174 	/* All opportunities for failure are past. */
1175 	malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1176 
1177 	if (config_prof) {
1178 		/* Adjust gdump stats now that extent is final size. */
1179 		extent_gdump_add(tsdn, extent);
1180 	}
1181 	if (pad != 0) {
1182 		extent_addr_randomize(tsdn, extent, alignment);
1183 	}
1184 	if (slab) {
1185 		rtree_ctx_t rtree_ctx_fallback;
1186 		rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
1187 		    &rtree_ctx_fallback);
1188 
1189 		extent_slab_set(extent, true);
1190 		extent_interior_register(tsdn, rtree_ctx, extent, szind);
1191 	}
1192 	if (*zero && !extent_zeroed_get(extent)) {
1193 		void *addr = extent_base_get(extent);
1194 		size_t size = extent_size_get(extent);
1195 		if (pages_purge_forced(addr, size)) {
1196 			memset(addr, 0, size);
1197 		}
1198 	}
1199 
1200 	return extent;
1201 label_err:
1202 	malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1203 	return NULL;
1204 }
1205 
1206 static extent_t *
1207 extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
1208     extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1209     size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1210 	assert(size != 0);
1211 	assert(alignment != 0);
1212 
1213 	malloc_mutex_lock(tsdn, &arena->extent_grow_mtx);
1214 
1215 	extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks,
1216 	    &arena->extents_retained, new_addr, size, pad, alignment, slab,
1217 	    szind, zero, commit, true);
1218 	if (extent != NULL) {
1219 		malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1220 		if (config_prof) {
1221 			extent_gdump_add(tsdn, extent);
1222 		}
1223 	} else if (opt_retain && new_addr == NULL) {
1224 		extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size,
1225 		    pad, alignment, slab, szind, zero, commit);
1226 		/* extent_grow_retained() always releases extent_grow_mtx. */
1227 	} else {
1228 		malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1229 	}
1230 	malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx);
1231 
1232 	return extent;
1233 }
1234 
1235 static extent_t *
1236 extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
1237     extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1238     size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1239 	size_t esize = size + pad;
1240 	extent_t *extent = extent_alloc(tsdn, arena);
1241 	if (extent == NULL) {
1242 		return NULL;
1243 	}
1244 	void *addr;
1245 	if (*r_extent_hooks == &extent_hooks_default) {
1246 		/* Call directly to propagate tsdn. */
1247 		addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize,
1248 		    alignment, zero, commit);
1249 	} else {
1250 		addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr,
1251 		    esize, alignment, zero, commit, arena_ind_get(arena));
1252 	}
1253 	if (addr == NULL) {
1254 		extent_dalloc(tsdn, arena, extent);
1255 		return NULL;
1256 	}
1257 	extent_init(extent, arena, addr, esize, slab, szind,
1258 	    arena_extent_sn_next(arena), extent_state_active, zero, commit);
1259 	if (pad != 0) {
1260 		extent_addr_randomize(tsdn, extent, alignment);
1261 	}
1262 	if (extent_register(tsdn, extent)) {
1263 		extents_leak(tsdn, arena, r_extent_hooks,
1264 		    &arena->extents_retained, extent, false);
1265 		return NULL;
1266 	}
1267 
1268 	return extent;
1269 }
1270 
1271 extent_t *
1272 extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
1273     extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1274     size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1275 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1276 	    WITNESS_RANK_CORE, 0);
1277 
1278 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1279 
1280 	extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks,
1281 	    new_addr, size, pad, alignment, slab, szind, zero, commit);
1282 	if (extent == NULL) {
1283 		extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks,
1284 		    new_addr, size, pad, alignment, slab, szind, zero, commit);
1285 	}
1286 
1287 	return extent;
1288 }
1289 
1290 static bool
1291 extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner,
1292     const extent_t *outer) {
1293 	assert(extent_arena_get(inner) == arena);
1294 	if (extent_arena_get(outer) != arena) {
1295 		return false;
1296 	}
1297 
1298 	assert(extent_state_get(inner) == extent_state_active);
1299 	if (extent_state_get(outer) != extents->state) {
1300 		return false;
1301 	}
1302 
1303 	if (extent_committed_get(inner) != extent_committed_get(outer)) {
1304 		return false;
1305 	}
1306 
1307 	return true;
1308 }
1309 
1310 static bool
1311 extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1312     extents_t *extents, extent_t *inner, extent_t *outer, bool forward,
1313     bool growing_retained) {
1314 	assert(extent_can_coalesce(arena, extents, inner, outer));
1315 
1316 	if (forward && extents->delay_coalesce) {
1317 		/*
1318 		 * The extent that remains after coalescing must occupy the
1319 		 * outer extent's position in the LRU.  For forward coalescing,
1320 		 * swap the inner extent into the LRU.
1321 		 */
1322 		extent_list_replace(&extents->lru, outer, inner);
1323 	}
1324 	extent_activate_locked(tsdn, arena, extents, outer,
1325 	    extents->delay_coalesce);
1326 
1327 	malloc_mutex_unlock(tsdn, &extents->mtx);
1328 	bool err = extent_merge_impl(tsdn, arena, r_extent_hooks,
1329 	    forward ? inner : outer, forward ? outer : inner, growing_retained);
1330 	malloc_mutex_lock(tsdn, &extents->mtx);
1331 
1332 	if (err) {
1333 		if (forward && extents->delay_coalesce) {
1334 			extent_list_replace(&extents->lru, inner, outer);
1335 		}
1336 		extent_deactivate_locked(tsdn, arena, extents, outer,
1337 		    extents->delay_coalesce);
1338 	}
1339 
1340 	return err;
1341 }
1342 
1343 static extent_t *
1344 extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
1345     extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1346     extent_t *extent, bool *coalesced, bool growing_retained) {
1347 	/*
1348 	 * Continue attempting to coalesce until failure, to protect against
1349 	 * races with other threads that are thwarted by this one.
1350 	 */
1351 	bool again;
1352 	do {
1353 		again = false;
1354 
1355 		/* Try to coalesce forward. */
1356 		extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx,
1357 		    extent_past_get(extent));
1358 		if (next != NULL) {
1359 			/*
1360 			 * extents->mtx only protects against races for
1361 			 * like-state extents, so call extent_can_coalesce()
1362 			 * before releasing next's pool lock.
1363 			 */
1364 			bool can_coalesce = extent_can_coalesce(arena, extents,
1365 			    extent, next);
1366 
1367 			extent_unlock(tsdn, next);
1368 
1369 			if (can_coalesce && !extent_coalesce(tsdn, arena,
1370 			    r_extent_hooks, extents, extent, next, true,
1371 			    growing_retained)) {
1372 				if (extents->delay_coalesce) {
1373 					/* Do minimal coalescing. */
1374 					*coalesced = true;
1375 					return extent;
1376 				}
1377 				again = true;
1378 			}
1379 		}
1380 
1381 		/* Try to coalesce backward. */
1382 		extent_t *prev = extent_lock_from_addr(tsdn, rtree_ctx,
1383 		    extent_before_get(extent));
1384 		if (prev != NULL) {
1385 			bool can_coalesce = extent_can_coalesce(arena, extents,
1386 			    extent, prev);
1387 			extent_unlock(tsdn, prev);
1388 
1389 			if (can_coalesce && !extent_coalesce(tsdn, arena,
1390 			    r_extent_hooks, extents, extent, prev, false,
1391 			    growing_retained)) {
1392 				extent = prev;
1393 				if (extents->delay_coalesce) {
1394 					/* Do minimal coalescing. */
1395 					*coalesced = true;
1396 					return extent;
1397 				}
1398 				again = true;
1399 			}
1400 		}
1401 	} while (again);
1402 
1403 	if (extents->delay_coalesce) {
1404 		*coalesced = false;
1405 	}
1406 	return extent;
1407 }
1408 
1409 static void
1410 extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1411     extents_t *extents, extent_t *extent, bool growing_retained) {
1412 	rtree_ctx_t rtree_ctx_fallback;
1413 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1414 
1415 	assert((extents_state_get(extents) != extent_state_dirty &&
1416 	    extents_state_get(extents) != extent_state_muzzy) ||
1417 	    !extent_zeroed_get(extent));
1418 
1419 	malloc_mutex_lock(tsdn, &extents->mtx);
1420 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1421 
1422 	extent_szind_set(extent, NSIZES);
1423 	if (extent_slab_get(extent)) {
1424 		extent_interior_deregister(tsdn, rtree_ctx, extent);
1425 		extent_slab_set(extent, false);
1426 	}
1427 
1428 	assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
1429 	    (uintptr_t)extent_base_get(extent), true) == extent);
1430 
1431 	if (!extents->delay_coalesce) {
1432 		extent = extent_try_coalesce(tsdn, arena, r_extent_hooks,
1433 		    rtree_ctx, extents, extent, NULL, growing_retained);
1434 	}
1435 
1436 	extent_deactivate_locked(tsdn, arena, extents, extent, false);
1437 
1438 	malloc_mutex_unlock(tsdn, &extents->mtx);
1439 }
1440 
1441 void
1442 extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
1443 	extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
1444 
1445 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1446 	    WITNESS_RANK_CORE, 0);
1447 
1448 	if (extent_register(tsdn, extent)) {
1449 		extents_leak(tsdn, arena, &extent_hooks,
1450 		    &arena->extents_retained, extent, false);
1451 		return;
1452 	}
1453 	extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent);
1454 }
1455 
1456 static bool
1457 extent_dalloc_default_impl(void *addr, size_t size) {
1458 	if (!have_dss || !extent_in_dss(addr)) {
1459 		return extent_dalloc_mmap(addr, size);
1460 	}
1461 	return true;
1462 }
1463 
1464 static bool
1465 extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1466     bool committed, unsigned arena_ind) {
1467 	return extent_dalloc_default_impl(addr, size);
1468 }
1469 
1470 static bool
1471 extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
1472     extent_hooks_t **r_extent_hooks, extent_t *extent) {
1473 	bool err;
1474 
1475 	assert(extent_base_get(extent) != NULL);
1476 	assert(extent_size_get(extent) != 0);
1477 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1478 	    WITNESS_RANK_CORE, 0);
1479 
1480 	extent_addr_set(extent, extent_base_get(extent));
1481 
1482 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1483 	/* Try to deallocate. */
1484 	if (*r_extent_hooks == &extent_hooks_default) {
1485 		/* Call directly to propagate tsdn. */
1486 		err = extent_dalloc_default_impl(extent_base_get(extent),
1487 		    extent_size_get(extent));
1488 	} else {
1489 		err = ((*r_extent_hooks)->dalloc == NULL ||
1490 		    (*r_extent_hooks)->dalloc(*r_extent_hooks,
1491 		    extent_base_get(extent), extent_size_get(extent),
1492 		    extent_committed_get(extent), arena_ind_get(arena)));
1493 	}
1494 
1495 	if (!err) {
1496 		extent_dalloc(tsdn, arena, extent);
1497 	}
1498 
1499 	return err;
1500 }
1501 
1502 void
1503 extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
1504     extent_hooks_t **r_extent_hooks, extent_t *extent) {
1505 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1506 	    WITNESS_RANK_CORE, 0);
1507 
1508 	/*
1509 	 * Deregister first to avoid a race with other allocating threads, and
1510 	 * reregister if deallocation fails.
1511 	 */
1512 	extent_deregister(tsdn, extent);
1513 	if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks, extent)) {
1514 		return;
1515 	}
1516 
1517 	extent_reregister(tsdn, extent);
1518 	/* Try to decommit; purge if that fails. */
1519 	bool zeroed;
1520 	if (!extent_committed_get(extent)) {
1521 		zeroed = true;
1522 	} else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent,
1523 	    0, extent_size_get(extent))) {
1524 		zeroed = true;
1525 	} else if ((*r_extent_hooks)->purge_forced != NULL &&
1526 	    !(*r_extent_hooks)->purge_forced(*r_extent_hooks,
1527 	    extent_base_get(extent), extent_size_get(extent), 0,
1528 	    extent_size_get(extent), arena_ind_get(arena))) {
1529 		zeroed = true;
1530 	} else if (extent_state_get(extent) == extent_state_muzzy ||
1531 	    ((*r_extent_hooks)->purge_lazy != NULL &&
1532 	    !(*r_extent_hooks)->purge_lazy(*r_extent_hooks,
1533 	    extent_base_get(extent), extent_size_get(extent), 0,
1534 	    extent_size_get(extent), arena_ind_get(arena)))) {
1535 		zeroed = false;
1536 	} else {
1537 		zeroed = false;
1538 	}
1539 	extent_zeroed_set(extent, zeroed);
1540 
1541 	if (config_prof) {
1542 		extent_gdump_sub(tsdn, extent);
1543 	}
1544 
1545 	extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained,
1546 	    extent, false);
1547 }
1548 
1549 static void
1550 extent_destroy_default_impl(void *addr, size_t size) {
1551 	if (!have_dss || !extent_in_dss(addr)) {
1552 		pages_unmap(addr, size);
1553 	}
1554 }
1555 
1556 static void
1557 extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1558     bool committed, unsigned arena_ind) {
1559 	extent_destroy_default_impl(addr, size);
1560 }
1561 
1562 void
1563 extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
1564     extent_hooks_t **r_extent_hooks, extent_t *extent) {
1565 	assert(extent_base_get(extent) != NULL);
1566 	assert(extent_size_get(extent) != 0);
1567 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1568 	    WITNESS_RANK_CORE, 0);
1569 
1570 	/* Deregister first to avoid a race with other allocating threads. */
1571 	extent_deregister(tsdn, extent);
1572 
1573 	extent_addr_set(extent, extent_base_get(extent));
1574 
1575 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1576 	/* Try to destroy; silently fail otherwise. */
1577 	if (*r_extent_hooks == &extent_hooks_default) {
1578 		/* Call directly to propagate tsdn. */
1579 		extent_destroy_default_impl(extent_base_get(extent),
1580 		    extent_size_get(extent));
1581 	} else if ((*r_extent_hooks)->destroy != NULL) {
1582 		(*r_extent_hooks)->destroy(*r_extent_hooks,
1583 		    extent_base_get(extent), extent_size_get(extent),
1584 		    extent_committed_get(extent), arena_ind_get(arena));
1585 	}
1586 
1587 	extent_dalloc(tsdn, arena, extent);
1588 }
1589 
1590 static bool
1591 extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1592     size_t offset, size_t length, unsigned arena_ind) {
1593 	return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
1594 	    length);
1595 }
1596 
1597 static bool
1598 extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
1599     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1600     size_t length, bool growing_retained) {
1601 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1602 	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1603 
1604 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1605 	bool err = ((*r_extent_hooks)->commit == NULL ||
1606 	    (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent),
1607 	    extent_size_get(extent), offset, length, arena_ind_get(arena)));
1608 	extent_committed_set(extent, extent_committed_get(extent) || !err);
1609 	return err;
1610 }
1611 
1612 bool
1613 extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
1614     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1615     size_t length) {
1616 	return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset,
1617 	    length, false);
1618 }
1619 
1620 static bool
1621 extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1622     size_t offset, size_t length, unsigned arena_ind) {
1623 	return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
1624 	    length);
1625 }
1626 
1627 bool
1628 extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
1629     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1630     size_t length) {
1631 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1632 	    WITNESS_RANK_CORE, 0);
1633 
1634 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1635 
1636 	bool err = ((*r_extent_hooks)->decommit == NULL ||
1637 	    (*r_extent_hooks)->decommit(*r_extent_hooks,
1638 	    extent_base_get(extent), extent_size_get(extent), offset, length,
1639 	    arena_ind_get(arena)));
1640 	extent_committed_set(extent, extent_committed_get(extent) && err);
1641 	return err;
1642 }
1643 
1644 #ifdef PAGES_CAN_PURGE_LAZY
1645 static bool
1646 extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1647     size_t offset, size_t length, unsigned arena_ind) {
1648 	assert(addr != NULL);
1649 	assert((offset & PAGE_MASK) == 0);
1650 	assert(length != 0);
1651 	assert((length & PAGE_MASK) == 0);
1652 
1653 	return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
1654 	    length);
1655 }
1656 #endif
1657 
1658 static bool
1659 extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
1660     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1661     size_t length, bool growing_retained) {
1662 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1663 	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1664 
1665 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1666 	return ((*r_extent_hooks)->purge_lazy == NULL ||
1667 	    (*r_extent_hooks)->purge_lazy(*r_extent_hooks,
1668 	    extent_base_get(extent), extent_size_get(extent), offset, length,
1669 	    arena_ind_get(arena)));
1670 }
1671 
1672 bool
1673 extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
1674     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1675     size_t length) {
1676 	return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent,
1677 	    offset, length, false);
1678 }
1679 
1680 #ifdef PAGES_CAN_PURGE_FORCED
1681 static bool
1682 extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr,
1683     size_t size, size_t offset, size_t length, unsigned arena_ind) {
1684 	assert(addr != NULL);
1685 	assert((offset & PAGE_MASK) == 0);
1686 	assert(length != 0);
1687 	assert((length & PAGE_MASK) == 0);
1688 
1689 	return pages_purge_forced((void *)((uintptr_t)addr +
1690 	    (uintptr_t)offset), length);
1691 }
1692 #endif
1693 
1694 static bool
1695 extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
1696     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1697     size_t length, bool growing_retained) {
1698 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1699 	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1700 
1701 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1702 	return ((*r_extent_hooks)->purge_forced == NULL ||
1703 	    (*r_extent_hooks)->purge_forced(*r_extent_hooks,
1704 	    extent_base_get(extent), extent_size_get(extent), offset, length,
1705 	    arena_ind_get(arena)));
1706 }
1707 
1708 bool
1709 extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
1710     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1711     size_t length) {
1712 	return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent,
1713 	    offset, length, false);
1714 }
1715 
1716 #ifdef JEMALLOC_MAPS_COALESCE
1717 static bool
1718 extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1719     size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
1720 	return !maps_coalesce;
1721 }
1722 #endif
1723 
1724 static extent_t *
1725 extent_split_impl(tsdn_t *tsdn, arena_t *arena,
1726     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
1727     szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
1728     bool growing_retained) {
1729 	assert(extent_size_get(extent) == size_a + size_b);
1730 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1731 	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1732 
1733 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1734 
1735 	if ((*r_extent_hooks)->split == NULL) {
1736 		return NULL;
1737 	}
1738 
1739 	extent_t *trail = extent_alloc(tsdn, arena);
1740 	if (trail == NULL) {
1741 		goto label_error_a;
1742 	}
1743 
1744 	extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
1745 	    size_a), size_b, slab_b, szind_b, extent_sn_get(extent),
1746 	    extent_state_get(extent), extent_zeroed_get(extent),
1747 	    extent_committed_get(extent));
1748 
1749 	rtree_ctx_t rtree_ctx_fallback;
1750 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1751 	rtree_leaf_elm_t *lead_elm_a, *lead_elm_b;
1752 	{
1753 		extent_t lead;
1754 
1755 		extent_init(&lead, arena, extent_addr_get(extent), size_a,
1756 		    slab_a, szind_a, extent_sn_get(extent),
1757 		    extent_state_get(extent), extent_zeroed_get(extent),
1758 		    extent_committed_get(extent));
1759 
1760 		extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false,
1761 		    true, &lead_elm_a, &lead_elm_b);
1762 	}
1763 	rtree_leaf_elm_t *trail_elm_a, *trail_elm_b;
1764 	extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true,
1765 	    &trail_elm_a, &trail_elm_b);
1766 
1767 	if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL
1768 	    || trail_elm_b == NULL) {
1769 		goto label_error_b;
1770 	}
1771 
1772 	extent_lock2(tsdn, extent, trail);
1773 
1774 	if ((*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent),
1775 	    size_a + size_b, size_a, size_b, extent_committed_get(extent),
1776 	    arena_ind_get(arena))) {
1777 		goto label_error_c;
1778 	}
1779 
1780 	extent_size_set(extent, size_a);
1781 	extent_szind_set(extent, szind_a);
1782 
1783 	extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent,
1784 	    szind_a, slab_a);
1785 	extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail,
1786 	    szind_b, slab_b);
1787 
1788 	extent_unlock2(tsdn, extent, trail);
1789 
1790 	return trail;
1791 label_error_c:
1792 	extent_unlock2(tsdn, extent, trail);
1793 label_error_b:
1794 	extent_dalloc(tsdn, arena, trail);
1795 label_error_a:
1796 	return NULL;
1797 }
1798 
1799 extent_t *
1800 extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
1801     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
1802     szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) {
1803 	return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a,
1804 	    szind_a, slab_a, size_b, szind_b, slab_b, false);
1805 }
1806 
1807 static bool
1808 extent_merge_default_impl(void *addr_a, void *addr_b) {
1809 	if (!maps_coalesce) {
1810 		return true;
1811 	}
1812 	if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
1813 		return true;
1814 	}
1815 
1816 	return false;
1817 }
1818 
1819 #ifdef JEMALLOC_MAPS_COALESCE
1820 static bool
1821 extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
1822     void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
1823 	return extent_merge_default_impl(addr_a, addr_b);
1824 }
1825 #endif
1826 
1827 static bool
1828 extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
1829     extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
1830     bool growing_retained) {
1831 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1832 	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1833 
1834 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1835 
1836 	if ((*r_extent_hooks)->merge == NULL) {
1837 		return true;
1838 	}
1839 
1840 	bool err;
1841 	if (*r_extent_hooks == &extent_hooks_default) {
1842 		/* Call directly to propagate tsdn. */
1843 		err = extent_merge_default_impl(extent_base_get(a),
1844 		    extent_base_get(b));
1845 	} else {
1846 		err = (*r_extent_hooks)->merge(*r_extent_hooks,
1847 		    extent_base_get(a), extent_size_get(a), extent_base_get(b),
1848 		    extent_size_get(b), extent_committed_get(a),
1849 		    arena_ind_get(arena));
1850 	}
1851 
1852 	if (err) {
1853 		return true;
1854 	}
1855 
1856 	/*
1857 	 * The rtree writes must happen while all the relevant elements are
1858 	 * owned, so the following code uses decomposed helper functions rather
1859 	 * than extent_{,de}register() to do things in the right order.
1860 	 */
1861 	rtree_ctx_t rtree_ctx_fallback;
1862 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1863 	rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
1864 	extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a,
1865 	    &a_elm_b);
1866 	extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a,
1867 	    &b_elm_b);
1868 
1869 	extent_lock2(tsdn, a, b);
1870 
1871 	if (a_elm_b != NULL) {
1872 		rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL,
1873 		    NSIZES, false);
1874 	}
1875 	if (b_elm_b != NULL) {
1876 		rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL,
1877 		    NSIZES, false);
1878 	} else {
1879 		b_elm_b = b_elm_a;
1880 	}
1881 
1882 	extent_size_set(a, extent_size_get(a) + extent_size_get(b));
1883 	extent_szind_set(a, NSIZES);
1884 	extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ?
1885 	    extent_sn_get(a) : extent_sn_get(b));
1886 	extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
1887 
1888 	extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, NSIZES, false);
1889 
1890 	extent_unlock2(tsdn, a, b);
1891 
1892 	extent_dalloc(tsdn, extent_arena_get(b), b);
1893 
1894 	return false;
1895 }
1896 
1897 bool
1898 extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
1899     extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) {
1900 	return extent_merge_impl(tsdn, arena, r_extent_hooks, a, b, false);
1901 }
1902 
1903 bool
1904 extent_boot(void) {
1905 	if (rtree_new(&extents_rtree, true)) {
1906 		return true;
1907 	}
1908 
1909 	if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool",
1910 	    WITNESS_RANK_EXTENT_POOL)) {
1911 		return true;
1912 	}
1913 
1914 	if (have_dss) {
1915 		extent_dss_boot();
1916 	}
1917 
1918 	return false;
1919 }
1920