xref: /freebsd/contrib/jemalloc/src/extent.c (revision b62ae61446ee19ab524fea4a066f585cbd7aa727)
1 #define JEMALLOC_EXTENT_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
4 
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/extent_dss.h"
7 #include "jemalloc/internal/extent_mmap.h"
8 #include "jemalloc/internal/ph.h"
9 #include "jemalloc/internal/rtree.h"
10 #include "jemalloc/internal/mutex.h"
11 #include "jemalloc/internal/mutex_pool.h"
12 
13 /******************************************************************************/
14 /* Data. */
15 
16 rtree_t		extents_rtree;
17 /* Keyed by the address of the extent_t being protected. */
18 mutex_pool_t	extent_mutex_pool;
19 
20 size_t opt_lg_extent_max_active_fit = LG_EXTENT_MAX_ACTIVE_FIT_DEFAULT;
21 
22 static const bitmap_info_t extents_bitmap_info =
23     BITMAP_INFO_INITIALIZER(SC_NPSIZES+1);
24 
25 static void *extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr,
26     size_t size, size_t alignment, bool *zero, bool *commit,
27     unsigned arena_ind);
28 static bool extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr,
29     size_t size, bool committed, unsigned arena_ind);
30 static void extent_destroy_default(extent_hooks_t *extent_hooks, void *addr,
31     size_t size, bool committed, unsigned arena_ind);
32 static bool extent_commit_default(extent_hooks_t *extent_hooks, void *addr,
33     size_t size, size_t offset, size_t length, unsigned arena_ind);
34 static bool extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
35     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
36     size_t length, bool growing_retained);
37 static bool extent_decommit_default(extent_hooks_t *extent_hooks,
38     void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
39 #ifdef PAGES_CAN_PURGE_LAZY
40 static bool extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr,
41     size_t size, size_t offset, size_t length, unsigned arena_ind);
42 #endif
43 static bool extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
44     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
45     size_t length, bool growing_retained);
46 #ifdef PAGES_CAN_PURGE_FORCED
47 static bool extent_purge_forced_default(extent_hooks_t *extent_hooks,
48     void *addr, size_t size, size_t offset, size_t length, unsigned arena_ind);
49 #endif
50 static bool extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
51     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
52     size_t length, bool growing_retained);
53 static bool extent_split_default(extent_hooks_t *extent_hooks, void *addr,
54     size_t size, size_t size_a, size_t size_b, bool committed,
55     unsigned arena_ind);
56 static extent_t *extent_split_impl(tsdn_t *tsdn, arena_t *arena,
57     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
58     szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
59     bool growing_retained);
60 static bool extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a,
61     size_t size_a, void *addr_b, size_t size_b, bool committed,
62     unsigned arena_ind);
63 static bool extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
64     extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
65     bool growing_retained);
66 
67 const extent_hooks_t	extent_hooks_default = {
68 	extent_alloc_default,
69 	extent_dalloc_default,
70 	extent_destroy_default,
71 	extent_commit_default,
72 	extent_decommit_default
73 #ifdef PAGES_CAN_PURGE_LAZY
74 	,
75 	extent_purge_lazy_default
76 #else
77 	,
78 	NULL
79 #endif
80 #ifdef PAGES_CAN_PURGE_FORCED
81 	,
82 	extent_purge_forced_default
83 #else
84 	,
85 	NULL
86 #endif
87 	,
88 	extent_split_default,
89 	extent_merge_default
90 };
91 
92 /* Used exclusively for gdump triggering. */
93 static atomic_zu_t curpages;
94 static atomic_zu_t highpages;
95 
96 /******************************************************************************/
97 /*
98  * Function prototypes for static functions that are referenced prior to
99  * definition.
100  */
101 
102 static void extent_deregister(tsdn_t *tsdn, extent_t *extent);
103 static extent_t *extent_recycle(tsdn_t *tsdn, arena_t *arena,
104     extent_hooks_t **r_extent_hooks, extents_t *extents, void *new_addr,
105     size_t usize, size_t pad, size_t alignment, bool slab, szind_t szind,
106     bool *zero, bool *commit, bool growing_retained);
107 static extent_t *extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
108     extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
109     extent_t *extent, bool *coalesced, bool growing_retained);
110 static void extent_record(tsdn_t *tsdn, arena_t *arena,
111     extent_hooks_t **r_extent_hooks, extents_t *extents, extent_t *extent,
112     bool growing_retained);
113 
114 /******************************************************************************/
115 
116 #define ATTR_NONE /* does nothing */
117 
118 ph_gen(ATTR_NONE, extent_avail_, extent_tree_t, extent_t, ph_link,
119     extent_esnead_comp)
120 
121 #undef ATTR_NONE
122 
123 typedef enum {
124 	lock_result_success,
125 	lock_result_failure,
126 	lock_result_no_extent
127 } lock_result_t;
128 
129 static lock_result_t
extent_rtree_leaf_elm_try_lock(tsdn_t * tsdn,rtree_leaf_elm_t * elm,extent_t ** result,bool inactive_only)130 extent_rtree_leaf_elm_try_lock(tsdn_t *tsdn, rtree_leaf_elm_t *elm,
131     extent_t **result, bool inactive_only) {
132 	extent_t *extent1 = rtree_leaf_elm_extent_read(tsdn, &extents_rtree,
133 	    elm, true);
134 
135 	/* Slab implies active extents and should be skipped. */
136 	if (extent1 == NULL || (inactive_only && rtree_leaf_elm_slab_read(tsdn,
137 	    &extents_rtree, elm, true))) {
138 		return lock_result_no_extent;
139 	}
140 
141 	/*
142 	 * It's possible that the extent changed out from under us, and with it
143 	 * the leaf->extent mapping.  We have to recheck while holding the lock.
144 	 */
145 	extent_lock(tsdn, extent1);
146 	extent_t *extent2 = rtree_leaf_elm_extent_read(tsdn,
147 	    &extents_rtree, elm, true);
148 
149 	if (extent1 == extent2) {
150 		*result = extent1;
151 		return lock_result_success;
152 	} else {
153 		extent_unlock(tsdn, extent1);
154 		return lock_result_failure;
155 	}
156 }
157 
158 /*
159  * Returns a pool-locked extent_t * if there's one associated with the given
160  * address, and NULL otherwise.
161  */
162 static extent_t *
extent_lock_from_addr(tsdn_t * tsdn,rtree_ctx_t * rtree_ctx,void * addr,bool inactive_only)163 extent_lock_from_addr(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, void *addr,
164     bool inactive_only) {
165 	extent_t *ret = NULL;
166 	rtree_leaf_elm_t *elm = rtree_leaf_elm_lookup(tsdn, &extents_rtree,
167 	    rtree_ctx, (uintptr_t)addr, false, false);
168 	if (elm == NULL) {
169 		return NULL;
170 	}
171 	lock_result_t lock_result;
172 	do {
173 		lock_result = extent_rtree_leaf_elm_try_lock(tsdn, elm, &ret,
174 		    inactive_only);
175 	} while (lock_result == lock_result_failure);
176 	return ret;
177 }
178 
179 extent_t *
extent_alloc(tsdn_t * tsdn,arena_t * arena)180 extent_alloc(tsdn_t *tsdn, arena_t *arena) {
181 	malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
182 	extent_t *extent = extent_avail_first(&arena->extent_avail);
183 	if (extent == NULL) {
184 		malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
185 		return base_alloc_extent(tsdn, arena->base);
186 	}
187 	extent_avail_remove(&arena->extent_avail, extent);
188 	atomic_fetch_sub_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED);
189 	malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
190 	return extent;
191 }
192 
193 void
extent_dalloc(tsdn_t * tsdn,arena_t * arena,extent_t * extent)194 extent_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
195 	malloc_mutex_lock(tsdn, &arena->extent_avail_mtx);
196 	extent_avail_insert(&arena->extent_avail, extent);
197 	atomic_fetch_add_zu(&arena->extent_avail_cnt, 1, ATOMIC_RELAXED);
198 	malloc_mutex_unlock(tsdn, &arena->extent_avail_mtx);
199 }
200 
201 extent_hooks_t *
extent_hooks_get(arena_t * arena)202 extent_hooks_get(arena_t *arena) {
203 	return base_extent_hooks_get(arena->base);
204 }
205 
206 extent_hooks_t *
extent_hooks_set(tsd_t * tsd,arena_t * arena,extent_hooks_t * extent_hooks)207 extent_hooks_set(tsd_t *tsd, arena_t *arena, extent_hooks_t *extent_hooks) {
208 	background_thread_info_t *info;
209 	if (have_background_thread) {
210 		info = arena_background_thread_info_get(arena);
211 		malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
212 	}
213 	extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks);
214 	if (have_background_thread) {
215 		malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
216 	}
217 
218 	return ret;
219 }
220 
221 static void
extent_hooks_assure_initialized(arena_t * arena,extent_hooks_t ** r_extent_hooks)222 extent_hooks_assure_initialized(arena_t *arena,
223     extent_hooks_t **r_extent_hooks) {
224 	if (*r_extent_hooks == EXTENT_HOOKS_INITIALIZER) {
225 		*r_extent_hooks = extent_hooks_get(arena);
226 	}
227 }
228 
229 #ifndef JEMALLOC_JET
230 static
231 #endif
232 size_t
extent_size_quantize_floor(size_t size)233 extent_size_quantize_floor(size_t size) {
234 	size_t ret;
235 	pszind_t pind;
236 
237 	assert(size > 0);
238 	assert((size & PAGE_MASK) == 0);
239 
240 	pind = sz_psz2ind(size - sz_large_pad + 1);
241 	if (pind == 0) {
242 		/*
243 		 * Avoid underflow.  This short-circuit would also do the right
244 		 * thing for all sizes in the range for which there are
245 		 * PAGE-spaced size classes, but it's simplest to just handle
246 		 * the one case that would cause erroneous results.
247 		 */
248 		return size;
249 	}
250 	ret = sz_pind2sz(pind - 1) + sz_large_pad;
251 	assert(ret <= size);
252 	return ret;
253 }
254 
255 #ifndef JEMALLOC_JET
256 static
257 #endif
258 size_t
extent_size_quantize_ceil(size_t size)259 extent_size_quantize_ceil(size_t size) {
260 	size_t ret;
261 
262 	assert(size > 0);
263 	assert(size - sz_large_pad <= SC_LARGE_MAXCLASS);
264 	assert((size & PAGE_MASK) == 0);
265 
266 	ret = extent_size_quantize_floor(size);
267 	if (ret < size) {
268 		/*
269 		 * Skip a quantization that may have an adequately large extent,
270 		 * because under-sized extents may be mixed in.  This only
271 		 * happens when an unusual size is requested, i.e. for aligned
272 		 * allocation, and is just one of several places where linear
273 		 * search would potentially find sufficiently aligned available
274 		 * memory somewhere lower.
275 		 */
276 		ret = sz_pind2sz(sz_psz2ind(ret - sz_large_pad + 1)) +
277 		    sz_large_pad;
278 	}
279 	return ret;
280 }
281 
282 /* Generate pairing heap functions. */
283 ph_gen(, extent_heap_, extent_heap_t, extent_t, ph_link, extent_snad_comp)
284 
285 bool
extents_init(tsdn_t * tsdn,extents_t * extents,extent_state_t state,bool delay_coalesce)286 extents_init(tsdn_t *tsdn, extents_t *extents, extent_state_t state,
287     bool delay_coalesce) {
288 	if (malloc_mutex_init(&extents->mtx, "extents", WITNESS_RANK_EXTENTS,
289 	    malloc_mutex_rank_exclusive)) {
290 		return true;
291 	}
292 	for (unsigned i = 0; i < SC_NPSIZES + 1; i++) {
293 		extent_heap_new(&extents->heaps[i]);
294 	}
295 	bitmap_init(extents->bitmap, &extents_bitmap_info, true);
296 	extent_list_init(&extents->lru);
297 	atomic_store_zu(&extents->npages, 0, ATOMIC_RELAXED);
298 	extents->state = state;
299 	extents->delay_coalesce = delay_coalesce;
300 	return false;
301 }
302 
303 extent_state_t
extents_state_get(const extents_t * extents)304 extents_state_get(const extents_t *extents) {
305 	return extents->state;
306 }
307 
308 size_t
extents_npages_get(extents_t * extents)309 extents_npages_get(extents_t *extents) {
310 	return atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
311 }
312 
313 size_t
extents_nextents_get(extents_t * extents,pszind_t pind)314 extents_nextents_get(extents_t *extents, pszind_t pind) {
315 	return atomic_load_zu(&extents->nextents[pind], ATOMIC_RELAXED);
316 }
317 
318 size_t
extents_nbytes_get(extents_t * extents,pszind_t pind)319 extents_nbytes_get(extents_t *extents, pszind_t pind) {
320 	return atomic_load_zu(&extents->nbytes[pind], ATOMIC_RELAXED);
321 }
322 
323 static void
extents_stats_add(extents_t * extent,pszind_t pind,size_t sz)324 extents_stats_add(extents_t *extent, pszind_t pind, size_t sz) {
325 	size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED);
326 	atomic_store_zu(&extent->nextents[pind], cur + 1, ATOMIC_RELAXED);
327 	cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED);
328 	atomic_store_zu(&extent->nbytes[pind], cur + sz, ATOMIC_RELAXED);
329 }
330 
331 static void
extents_stats_sub(extents_t * extent,pszind_t pind,size_t sz)332 extents_stats_sub(extents_t *extent, pszind_t pind, size_t sz) {
333 	size_t cur = atomic_load_zu(&extent->nextents[pind], ATOMIC_RELAXED);
334 	atomic_store_zu(&extent->nextents[pind], cur - 1, ATOMIC_RELAXED);
335 	cur = atomic_load_zu(&extent->nbytes[pind], ATOMIC_RELAXED);
336 	atomic_store_zu(&extent->nbytes[pind], cur - sz, ATOMIC_RELAXED);
337 }
338 
339 static void
extents_insert_locked(tsdn_t * tsdn,extents_t * extents,extent_t * extent)340 extents_insert_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
341 	malloc_mutex_assert_owner(tsdn, &extents->mtx);
342 	assert(extent_state_get(extent) == extents->state);
343 
344 	size_t size = extent_size_get(extent);
345 	size_t psz = extent_size_quantize_floor(size);
346 	pszind_t pind = sz_psz2ind(psz);
347 	if (extent_heap_empty(&extents->heaps[pind])) {
348 		bitmap_unset(extents->bitmap, &extents_bitmap_info,
349 		    (size_t)pind);
350 	}
351 	extent_heap_insert(&extents->heaps[pind], extent);
352 
353 	if (config_stats) {
354 		extents_stats_add(extents, pind, size);
355 	}
356 
357 	extent_list_append(&extents->lru, extent);
358 	size_t npages = size >> LG_PAGE;
359 	/*
360 	 * All modifications to npages hold the mutex (as asserted above), so we
361 	 * don't need an atomic fetch-add; we can get by with a load followed by
362 	 * a store.
363 	 */
364 	size_t cur_extents_npages =
365 	    atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
366 	atomic_store_zu(&extents->npages, cur_extents_npages + npages,
367 	    ATOMIC_RELAXED);
368 }
369 
370 static void
extents_remove_locked(tsdn_t * tsdn,extents_t * extents,extent_t * extent)371 extents_remove_locked(tsdn_t *tsdn, extents_t *extents, extent_t *extent) {
372 	malloc_mutex_assert_owner(tsdn, &extents->mtx);
373 	assert(extent_state_get(extent) == extents->state);
374 
375 	size_t size = extent_size_get(extent);
376 	size_t psz = extent_size_quantize_floor(size);
377 	pszind_t pind = sz_psz2ind(psz);
378 	extent_heap_remove(&extents->heaps[pind], extent);
379 
380 	if (config_stats) {
381 		extents_stats_sub(extents, pind, size);
382 	}
383 
384 	if (extent_heap_empty(&extents->heaps[pind])) {
385 		bitmap_set(extents->bitmap, &extents_bitmap_info,
386 		    (size_t)pind);
387 	}
388 	extent_list_remove(&extents->lru, extent);
389 	size_t npages = size >> LG_PAGE;
390 	/*
391 	 * As in extents_insert_locked, we hold extents->mtx and so don't need
392 	 * atomic operations for updating extents->npages.
393 	 */
394 	size_t cur_extents_npages =
395 	    atomic_load_zu(&extents->npages, ATOMIC_RELAXED);
396 	assert(cur_extents_npages >= npages);
397 	atomic_store_zu(&extents->npages,
398 	    cur_extents_npages - (size >> LG_PAGE), ATOMIC_RELAXED);
399 }
400 
401 /*
402  * Find an extent with size [min_size, max_size) to satisfy the alignment
403  * requirement.  For each size, try only the first extent in the heap.
404  */
405 static extent_t *
extents_fit_alignment(extents_t * extents,size_t min_size,size_t max_size,size_t alignment)406 extents_fit_alignment(extents_t *extents, size_t min_size, size_t max_size,
407     size_t alignment) {
408         pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(min_size));
409         pszind_t pind_max = sz_psz2ind(extent_size_quantize_ceil(max_size));
410 
411 	for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
412 	    &extents_bitmap_info, (size_t)pind); i < pind_max; i =
413 	    (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
414 	    (size_t)i+1)) {
415 		assert(i < SC_NPSIZES);
416 		assert(!extent_heap_empty(&extents->heaps[i]));
417 		extent_t *extent = extent_heap_first(&extents->heaps[i]);
418 		uintptr_t base = (uintptr_t)extent_base_get(extent);
419 		size_t candidate_size = extent_size_get(extent);
420 		assert(candidate_size >= min_size);
421 
422 		uintptr_t next_align = ALIGNMENT_CEILING((uintptr_t)base,
423 		    PAGE_CEILING(alignment));
424 		if (base > next_align || base + candidate_size <= next_align) {
425 			/* Overflow or not crossing the next alignment. */
426 			continue;
427 		}
428 
429 		size_t leadsize = next_align - base;
430 		if (candidate_size - leadsize >= min_size) {
431 			return extent;
432 		}
433 	}
434 
435 	return NULL;
436 }
437 
438 /*
439  * Do first-fit extent selection, i.e. select the oldest/lowest extent that is
440  * large enough.
441  */
442 static extent_t *
extents_first_fit_locked(tsdn_t * tsdn,arena_t * arena,extents_t * extents,size_t size)443 extents_first_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
444     size_t size) {
445 	extent_t *ret = NULL;
446 
447 	pszind_t pind = sz_psz2ind(extent_size_quantize_ceil(size));
448 
449 	if (!maps_coalesce && !opt_retain) {
450 		/*
451 		 * No split / merge allowed (Windows w/o retain). Try exact fit
452 		 * only.
453 		 */
454 		return extent_heap_empty(&extents->heaps[pind]) ? NULL :
455 		    extent_heap_first(&extents->heaps[pind]);
456 	}
457 
458 	for (pszind_t i = (pszind_t)bitmap_ffu(extents->bitmap,
459 	    &extents_bitmap_info, (size_t)pind);
460 	    i < SC_NPSIZES + 1;
461 	    i = (pszind_t)bitmap_ffu(extents->bitmap, &extents_bitmap_info,
462 	    (size_t)i+1)) {
463 		assert(!extent_heap_empty(&extents->heaps[i]));
464 		extent_t *extent = extent_heap_first(&extents->heaps[i]);
465 		assert(extent_size_get(extent) >= size);
466 		/*
467 		 * In order to reduce fragmentation, avoid reusing and splitting
468 		 * large extents for much smaller sizes.
469 		 *
470 		 * Only do check for dirty extents (delay_coalesce).
471 		 */
472 		if (extents->delay_coalesce &&
473 		    (sz_pind2sz(i) >> opt_lg_extent_max_active_fit) > size) {
474 			break;
475 		}
476 		if (ret == NULL || extent_snad_comp(extent, ret) < 0) {
477 			ret = extent;
478 		}
479 		if (i == SC_NPSIZES) {
480 			break;
481 		}
482 		assert(i < SC_NPSIZES);
483 	}
484 
485 	return ret;
486 }
487 
488 /*
489  * Do first-fit extent selection, where the selection policy choice is
490  * based on extents->delay_coalesce.
491  */
492 static extent_t *
extents_fit_locked(tsdn_t * tsdn,arena_t * arena,extents_t * extents,size_t esize,size_t alignment)493 extents_fit_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
494     size_t esize, size_t alignment) {
495 	malloc_mutex_assert_owner(tsdn, &extents->mtx);
496 
497 	size_t max_size = esize + PAGE_CEILING(alignment) - PAGE;
498 	/* Beware size_t wrap-around. */
499 	if (max_size < esize) {
500 		return NULL;
501 	}
502 
503 	extent_t *extent =
504 	    extents_first_fit_locked(tsdn, arena, extents, max_size);
505 
506 	if (alignment > PAGE && extent == NULL) {
507 		/*
508 		 * max_size guarantees the alignment requirement but is rather
509 		 * pessimistic.  Next we try to satisfy the aligned allocation
510 		 * with sizes in [esize, max_size).
511 		 */
512 		extent = extents_fit_alignment(extents, esize, max_size,
513 		    alignment);
514 	}
515 
516 	return extent;
517 }
518 
519 static bool
extent_try_delayed_coalesce(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,rtree_ctx_t * rtree_ctx,extents_t * extents,extent_t * extent)520 extent_try_delayed_coalesce(tsdn_t *tsdn, arena_t *arena,
521     extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
522     extent_t *extent) {
523 	extent_state_set(extent, extent_state_active);
524 	bool coalesced;
525 	extent = extent_try_coalesce(tsdn, arena, r_extent_hooks, rtree_ctx,
526 	    extents, extent, &coalesced, false);
527 	extent_state_set(extent, extents_state_get(extents));
528 
529 	if (!coalesced) {
530 		return true;
531 	}
532 	extents_insert_locked(tsdn, extents, extent);
533 	return false;
534 }
535 
536 extent_t *
extents_alloc(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extents_t * extents,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,bool * zero,bool * commit)537 extents_alloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
538     extents_t *extents, void *new_addr, size_t size, size_t pad,
539     size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
540 	assert(size + pad != 0);
541 	assert(alignment != 0);
542 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
543 	    WITNESS_RANK_CORE, 0);
544 
545 	extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks, extents,
546 	    new_addr, size, pad, alignment, slab, szind, zero, commit, false);
547 	assert(extent == NULL || extent_dumpable_get(extent));
548 	return extent;
549 }
550 
551 void
extents_dalloc(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extents_t * extents,extent_t * extent)552 extents_dalloc(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
553     extents_t *extents, extent_t *extent) {
554 	assert(extent_base_get(extent) != NULL);
555 	assert(extent_size_get(extent) != 0);
556 	assert(extent_dumpable_get(extent));
557 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
558 	    WITNESS_RANK_CORE, 0);
559 
560 	extent_addr_set(extent, extent_base_get(extent));
561 	extent_zeroed_set(extent, false);
562 
563 	extent_record(tsdn, arena, r_extent_hooks, extents, extent, false);
564 }
565 
566 extent_t *
extents_evict(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extents_t * extents,size_t npages_min)567 extents_evict(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
568     extents_t *extents, size_t npages_min) {
569 	rtree_ctx_t rtree_ctx_fallback;
570 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
571 
572 	malloc_mutex_lock(tsdn, &extents->mtx);
573 
574 	/*
575 	 * Get the LRU coalesced extent, if any.  If coalescing was delayed,
576 	 * the loop will iterate until the LRU extent is fully coalesced.
577 	 */
578 	extent_t *extent;
579 	while (true) {
580 		/* Get the LRU extent, if any. */
581 		extent = extent_list_first(&extents->lru);
582 		if (extent == NULL) {
583 			goto label_return;
584 		}
585 		/* Check the eviction limit. */
586 		size_t extents_npages = atomic_load_zu(&extents->npages,
587 		    ATOMIC_RELAXED);
588 		if (extents_npages <= npages_min) {
589 			extent = NULL;
590 			goto label_return;
591 		}
592 		extents_remove_locked(tsdn, extents, extent);
593 		if (!extents->delay_coalesce) {
594 			break;
595 		}
596 		/* Try to coalesce. */
597 		if (extent_try_delayed_coalesce(tsdn, arena, r_extent_hooks,
598 		    rtree_ctx, extents, extent)) {
599 			break;
600 		}
601 		/*
602 		 * The LRU extent was just coalesced and the result placed in
603 		 * the LRU at its neighbor's position.  Start over.
604 		 */
605 	}
606 
607 	/*
608 	 * Either mark the extent active or deregister it to protect against
609 	 * concurrent operations.
610 	 */
611 	switch (extents_state_get(extents)) {
612 	case extent_state_active:
613 		not_reached();
614 	case extent_state_dirty:
615 	case extent_state_muzzy:
616 		extent_state_set(extent, extent_state_active);
617 		break;
618 	case extent_state_retained:
619 		extent_deregister(tsdn, extent);
620 		break;
621 	default:
622 		not_reached();
623 	}
624 
625 label_return:
626 	malloc_mutex_unlock(tsdn, &extents->mtx);
627 	return extent;
628 }
629 
630 /*
631  * This can only happen when we fail to allocate a new extent struct (which
632  * indicates OOM), e.g. when trying to split an existing extent.
633  */
634 static void
extents_abandon_vm(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extents_t * extents,extent_t * extent,bool growing_retained)635 extents_abandon_vm(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
636     extents_t *extents, extent_t *extent, bool growing_retained) {
637 	size_t sz = extent_size_get(extent);
638 	if (config_stats) {
639 		arena_stats_accum_zu(&arena->stats.abandoned_vm, sz);
640 	}
641 	/*
642 	 * Leak extent after making sure its pages have already been purged, so
643 	 * that this is only a virtual memory leak.
644 	 */
645 	if (extents_state_get(extents) == extent_state_dirty) {
646 		if (extent_purge_lazy_impl(tsdn, arena, r_extent_hooks,
647 		    extent, 0, sz, growing_retained)) {
648 			extent_purge_forced_impl(tsdn, arena, r_extent_hooks,
649 			    extent, 0, extent_size_get(extent),
650 			    growing_retained);
651 		}
652 	}
653 	extent_dalloc(tsdn, arena, extent);
654 }
655 
656 void
extents_prefork(tsdn_t * tsdn,extents_t * extents)657 extents_prefork(tsdn_t *tsdn, extents_t *extents) {
658 	malloc_mutex_prefork(tsdn, &extents->mtx);
659 }
660 
661 void
extents_postfork_parent(tsdn_t * tsdn,extents_t * extents)662 extents_postfork_parent(tsdn_t *tsdn, extents_t *extents) {
663 	malloc_mutex_postfork_parent(tsdn, &extents->mtx);
664 }
665 
666 void
extents_postfork_child(tsdn_t * tsdn,extents_t * extents)667 extents_postfork_child(tsdn_t *tsdn, extents_t *extents) {
668 	malloc_mutex_postfork_child(tsdn, &extents->mtx);
669 }
670 
671 static void
extent_deactivate_locked(tsdn_t * tsdn,arena_t * arena,extents_t * extents,extent_t * extent)672 extent_deactivate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
673     extent_t *extent) {
674 	assert(extent_arena_get(extent) == arena);
675 	assert(extent_state_get(extent) == extent_state_active);
676 
677 	extent_state_set(extent, extents_state_get(extents));
678 	extents_insert_locked(tsdn, extents, extent);
679 }
680 
681 static void
extent_deactivate(tsdn_t * tsdn,arena_t * arena,extents_t * extents,extent_t * extent)682 extent_deactivate(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
683     extent_t *extent) {
684 	malloc_mutex_lock(tsdn, &extents->mtx);
685 	extent_deactivate_locked(tsdn, arena, extents, extent);
686 	malloc_mutex_unlock(tsdn, &extents->mtx);
687 }
688 
689 static void
extent_activate_locked(tsdn_t * tsdn,arena_t * arena,extents_t * extents,extent_t * extent)690 extent_activate_locked(tsdn_t *tsdn, arena_t *arena, extents_t *extents,
691     extent_t *extent) {
692 	assert(extent_arena_get(extent) == arena);
693 	assert(extent_state_get(extent) == extents_state_get(extents));
694 
695 	extents_remove_locked(tsdn, extents, extent);
696 	extent_state_set(extent, extent_state_active);
697 }
698 
699 static bool
extent_rtree_leaf_elms_lookup(tsdn_t * tsdn,rtree_ctx_t * rtree_ctx,const extent_t * extent,bool dependent,bool init_missing,rtree_leaf_elm_t ** r_elm_a,rtree_leaf_elm_t ** r_elm_b)700 extent_rtree_leaf_elms_lookup(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
701     const extent_t *extent, bool dependent, bool init_missing,
702     rtree_leaf_elm_t **r_elm_a, rtree_leaf_elm_t **r_elm_b) {
703 	*r_elm_a = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
704 	    (uintptr_t)extent_base_get(extent), dependent, init_missing);
705 	if (!dependent && *r_elm_a == NULL) {
706 		return true;
707 	}
708 	assert(*r_elm_a != NULL);
709 
710 	*r_elm_b = rtree_leaf_elm_lookup(tsdn, &extents_rtree, rtree_ctx,
711 	    (uintptr_t)extent_last_get(extent), dependent, init_missing);
712 	if (!dependent && *r_elm_b == NULL) {
713 		return true;
714 	}
715 	assert(*r_elm_b != NULL);
716 
717 	return false;
718 }
719 
720 static void
extent_rtree_write_acquired(tsdn_t * tsdn,rtree_leaf_elm_t * elm_a,rtree_leaf_elm_t * elm_b,extent_t * extent,szind_t szind,bool slab)721 extent_rtree_write_acquired(tsdn_t *tsdn, rtree_leaf_elm_t *elm_a,
722     rtree_leaf_elm_t *elm_b, extent_t *extent, szind_t szind, bool slab) {
723 	rtree_leaf_elm_write(tsdn, &extents_rtree, elm_a, extent, szind, slab);
724 	if (elm_b != NULL) {
725 		rtree_leaf_elm_write(tsdn, &extents_rtree, elm_b, extent, szind,
726 		    slab);
727 	}
728 }
729 
730 static void
extent_interior_register(tsdn_t * tsdn,rtree_ctx_t * rtree_ctx,extent_t * extent,szind_t szind)731 extent_interior_register(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx, extent_t *extent,
732     szind_t szind) {
733 	assert(extent_slab_get(extent));
734 
735 	/* Register interior. */
736 	for (size_t i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
737 		rtree_write(tsdn, &extents_rtree, rtree_ctx,
738 		    (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
739 		    LG_PAGE), extent, szind, true);
740 	}
741 }
742 
743 static void
extent_gdump_add(tsdn_t * tsdn,const extent_t * extent)744 extent_gdump_add(tsdn_t *tsdn, const extent_t *extent) {
745 	cassert(config_prof);
746 	/* prof_gdump() requirement. */
747 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
748 	    WITNESS_RANK_CORE, 0);
749 
750 	if (opt_prof && extent_state_get(extent) == extent_state_active) {
751 		size_t nadd = extent_size_get(extent) >> LG_PAGE;
752 		size_t cur = atomic_fetch_add_zu(&curpages, nadd,
753 		    ATOMIC_RELAXED) + nadd;
754 		size_t high = atomic_load_zu(&highpages, ATOMIC_RELAXED);
755 		while (cur > high && !atomic_compare_exchange_weak_zu(
756 		    &highpages, &high, cur, ATOMIC_RELAXED, ATOMIC_RELAXED)) {
757 			/*
758 			 * Don't refresh cur, because it may have decreased
759 			 * since this thread lost the highpages update race.
760 			 * Note that high is updated in case of CAS failure.
761 			 */
762 		}
763 		if (cur > high && prof_gdump_get_unlocked()) {
764 			prof_gdump(tsdn);
765 		}
766 	}
767 }
768 
769 static void
extent_gdump_sub(tsdn_t * tsdn,const extent_t * extent)770 extent_gdump_sub(tsdn_t *tsdn, const extent_t *extent) {
771 	cassert(config_prof);
772 
773 	if (opt_prof && extent_state_get(extent) == extent_state_active) {
774 		size_t nsub = extent_size_get(extent) >> LG_PAGE;
775 		assert(atomic_load_zu(&curpages, ATOMIC_RELAXED) >= nsub);
776 		atomic_fetch_sub_zu(&curpages, nsub, ATOMIC_RELAXED);
777 	}
778 }
779 
780 static bool
extent_register_impl(tsdn_t * tsdn,extent_t * extent,bool gdump_add)781 extent_register_impl(tsdn_t *tsdn, extent_t *extent, bool gdump_add) {
782 	rtree_ctx_t rtree_ctx_fallback;
783 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
784 	rtree_leaf_elm_t *elm_a, *elm_b;
785 
786 	/*
787 	 * We need to hold the lock to protect against a concurrent coalesce
788 	 * operation that sees us in a partial state.
789 	 */
790 	extent_lock(tsdn, extent);
791 
792 	if (extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, false, true,
793 	    &elm_a, &elm_b)) {
794 		extent_unlock(tsdn, extent);
795 		return true;
796 	}
797 
798 	szind_t szind = extent_szind_get_maybe_invalid(extent);
799 	bool slab = extent_slab_get(extent);
800 	extent_rtree_write_acquired(tsdn, elm_a, elm_b, extent, szind, slab);
801 	if (slab) {
802 		extent_interior_register(tsdn, rtree_ctx, extent, szind);
803 	}
804 
805 	extent_unlock(tsdn, extent);
806 
807 	if (config_prof && gdump_add) {
808 		extent_gdump_add(tsdn, extent);
809 	}
810 
811 	return false;
812 }
813 
814 static bool
extent_register(tsdn_t * tsdn,extent_t * extent)815 extent_register(tsdn_t *tsdn, extent_t *extent) {
816 	return extent_register_impl(tsdn, extent, true);
817 }
818 
819 static bool
extent_register_no_gdump_add(tsdn_t * tsdn,extent_t * extent)820 extent_register_no_gdump_add(tsdn_t *tsdn, extent_t *extent) {
821 	return extent_register_impl(tsdn, extent, false);
822 }
823 
824 static void
extent_reregister(tsdn_t * tsdn,extent_t * extent)825 extent_reregister(tsdn_t *tsdn, extent_t *extent) {
826 	bool err = extent_register(tsdn, extent);
827 	assert(!err);
828 }
829 
830 /*
831  * Removes all pointers to the given extent from the global rtree indices for
832  * its interior.  This is relevant for slab extents, for which we need to do
833  * metadata lookups at places other than the head of the extent.  We deregister
834  * on the interior, then, when an extent moves from being an active slab to an
835  * inactive state.
836  */
837 static void
extent_interior_deregister(tsdn_t * tsdn,rtree_ctx_t * rtree_ctx,extent_t * extent)838 extent_interior_deregister(tsdn_t *tsdn, rtree_ctx_t *rtree_ctx,
839     extent_t *extent) {
840 	size_t i;
841 
842 	assert(extent_slab_get(extent));
843 
844 	for (i = 1; i < (extent_size_get(extent) >> LG_PAGE) - 1; i++) {
845 		rtree_clear(tsdn, &extents_rtree, rtree_ctx,
846 		    (uintptr_t)extent_base_get(extent) + (uintptr_t)(i <<
847 		    LG_PAGE));
848 	}
849 }
850 
851 /*
852  * Removes all pointers to the given extent from the global rtree.
853  */
854 static void
extent_deregister_impl(tsdn_t * tsdn,extent_t * extent,bool gdump)855 extent_deregister_impl(tsdn_t *tsdn, extent_t *extent, bool gdump) {
856 	rtree_ctx_t rtree_ctx_fallback;
857 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
858 	rtree_leaf_elm_t *elm_a, *elm_b;
859 	extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, extent, true, false,
860 	    &elm_a, &elm_b);
861 
862 	extent_lock(tsdn, extent);
863 
864 	extent_rtree_write_acquired(tsdn, elm_a, elm_b, NULL, SC_NSIZES, false);
865 	if (extent_slab_get(extent)) {
866 		extent_interior_deregister(tsdn, rtree_ctx, extent);
867 		extent_slab_set(extent, false);
868 	}
869 
870 	extent_unlock(tsdn, extent);
871 
872 	if (config_prof && gdump) {
873 		extent_gdump_sub(tsdn, extent);
874 	}
875 }
876 
877 static void
extent_deregister(tsdn_t * tsdn,extent_t * extent)878 extent_deregister(tsdn_t *tsdn, extent_t *extent) {
879 	extent_deregister_impl(tsdn, extent, true);
880 }
881 
882 static void
extent_deregister_no_gdump_sub(tsdn_t * tsdn,extent_t * extent)883 extent_deregister_no_gdump_sub(tsdn_t *tsdn, extent_t *extent) {
884 	extent_deregister_impl(tsdn, extent, false);
885 }
886 
887 /*
888  * Tries to find and remove an extent from extents that can be used for the
889  * given allocation request.
890  */
891 static extent_t *
extent_recycle_extract(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,rtree_ctx_t * rtree_ctx,extents_t * extents,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,bool growing_retained)892 extent_recycle_extract(tsdn_t *tsdn, arena_t *arena,
893     extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
894     void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
895     bool growing_retained) {
896 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
897 	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
898 	assert(alignment > 0);
899 	if (config_debug && new_addr != NULL) {
900 		/*
901 		 * Non-NULL new_addr has two use cases:
902 		 *
903 		 *   1) Recycle a known-extant extent, e.g. during purging.
904 		 *   2) Perform in-place expanding reallocation.
905 		 *
906 		 * Regardless of use case, new_addr must either refer to a
907 		 * non-existing extent, or to the base of an extant extent,
908 		 * since only active slabs support interior lookups (which of
909 		 * course cannot be recycled).
910 		 */
911 		assert(PAGE_ADDR2BASE(new_addr) == new_addr);
912 		assert(pad == 0);
913 		assert(alignment <= PAGE);
914 	}
915 
916 	size_t esize = size + pad;
917 	malloc_mutex_lock(tsdn, &extents->mtx);
918 	extent_hooks_assure_initialized(arena, r_extent_hooks);
919 	extent_t *extent;
920 	if (new_addr != NULL) {
921 		extent = extent_lock_from_addr(tsdn, rtree_ctx, new_addr,
922 		    false);
923 		if (extent != NULL) {
924 			/*
925 			 * We might null-out extent to report an error, but we
926 			 * still need to unlock the associated mutex after.
927 			 */
928 			extent_t *unlock_extent = extent;
929 			assert(extent_base_get(extent) == new_addr);
930 			if (extent_arena_get(extent) != arena ||
931 			    extent_size_get(extent) < esize ||
932 			    extent_state_get(extent) !=
933 			    extents_state_get(extents)) {
934 				extent = NULL;
935 			}
936 			extent_unlock(tsdn, unlock_extent);
937 		}
938 	} else {
939 		extent = extents_fit_locked(tsdn, arena, extents, esize,
940 		    alignment);
941 	}
942 	if (extent == NULL) {
943 		malloc_mutex_unlock(tsdn, &extents->mtx);
944 		return NULL;
945 	}
946 
947 	extent_activate_locked(tsdn, arena, extents, extent);
948 	malloc_mutex_unlock(tsdn, &extents->mtx);
949 
950 	return extent;
951 }
952 
953 /*
954  * Given an allocation request and an extent guaranteed to be able to satisfy
955  * it, this splits off lead and trail extents, leaving extent pointing to an
956  * extent satisfying the allocation.
957  * This function doesn't put lead or trail into any extents_t; it's the caller's
958  * job to ensure that they can be reused.
959  */
960 typedef enum {
961 	/*
962 	 * Split successfully.  lead, extent, and trail, are modified to extents
963 	 * describing the ranges before, in, and after the given allocation.
964 	 */
965 	extent_split_interior_ok,
966 	/*
967 	 * The extent can't satisfy the given allocation request.  None of the
968 	 * input extent_t *s are touched.
969 	 */
970 	extent_split_interior_cant_alloc,
971 	/*
972 	 * In a potentially invalid state.  Must leak (if *to_leak is non-NULL),
973 	 * and salvage what's still salvageable (if *to_salvage is non-NULL).
974 	 * None of lead, extent, or trail are valid.
975 	 */
976 	extent_split_interior_error
977 } extent_split_interior_result_t;
978 
979 static extent_split_interior_result_t
extent_split_interior(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,rtree_ctx_t * rtree_ctx,extent_t ** extent,extent_t ** lead,extent_t ** trail,extent_t ** to_leak,extent_t ** to_salvage,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,bool growing_retained)980 extent_split_interior(tsdn_t *tsdn, arena_t *arena,
981     extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx,
982     /* The result of splitting, in case of success. */
983     extent_t **extent, extent_t **lead, extent_t **trail,
984     /* The mess to clean up, in case of error. */
985     extent_t **to_leak, extent_t **to_salvage,
986     void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
987     szind_t szind, bool growing_retained) {
988 	size_t esize = size + pad;
989 	size_t leadsize = ALIGNMENT_CEILING((uintptr_t)extent_base_get(*extent),
990 	    PAGE_CEILING(alignment)) - (uintptr_t)extent_base_get(*extent);
991 	assert(new_addr == NULL || leadsize == 0);
992 	if (extent_size_get(*extent) < leadsize + esize) {
993 		return extent_split_interior_cant_alloc;
994 	}
995 	size_t trailsize = extent_size_get(*extent) - leadsize - esize;
996 
997 	*lead = NULL;
998 	*trail = NULL;
999 	*to_leak = NULL;
1000 	*to_salvage = NULL;
1001 
1002 	/* Split the lead. */
1003 	if (leadsize != 0) {
1004 		*lead = *extent;
1005 		*extent = extent_split_impl(tsdn, arena, r_extent_hooks,
1006 		    *lead, leadsize, SC_NSIZES, false, esize + trailsize, szind,
1007 		    slab, growing_retained);
1008 		if (*extent == NULL) {
1009 			*to_leak = *lead;
1010 			*lead = NULL;
1011 			return extent_split_interior_error;
1012 		}
1013 	}
1014 
1015 	/* Split the trail. */
1016 	if (trailsize != 0) {
1017 		*trail = extent_split_impl(tsdn, arena, r_extent_hooks, *extent,
1018 		    esize, szind, slab, trailsize, SC_NSIZES, false,
1019 		    growing_retained);
1020 		if (*trail == NULL) {
1021 			*to_leak = *extent;
1022 			*to_salvage = *lead;
1023 			*lead = NULL;
1024 			*extent = NULL;
1025 			return extent_split_interior_error;
1026 		}
1027 	}
1028 
1029 	if (leadsize == 0 && trailsize == 0) {
1030 		/*
1031 		 * Splitting causes szind to be set as a side effect, but no
1032 		 * splitting occurred.
1033 		 */
1034 		extent_szind_set(*extent, szind);
1035 		if (szind != SC_NSIZES) {
1036 			rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx,
1037 			    (uintptr_t)extent_addr_get(*extent), szind, slab);
1038 			if (slab && extent_size_get(*extent) > PAGE) {
1039 				rtree_szind_slab_update(tsdn, &extents_rtree,
1040 				    rtree_ctx,
1041 				    (uintptr_t)extent_past_get(*extent) -
1042 				    (uintptr_t)PAGE, szind, slab);
1043 			}
1044 		}
1045 	}
1046 
1047 	return extent_split_interior_ok;
1048 }
1049 
1050 /*
1051  * This fulfills the indicated allocation request out of the given extent (which
1052  * the caller should have ensured was big enough).  If there's any unused space
1053  * before or after the resulting allocation, that space is given its own extent
1054  * and put back into extents.
1055  */
1056 static extent_t *
extent_recycle_split(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,rtree_ctx_t * rtree_ctx,extents_t * extents,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,extent_t * extent,bool growing_retained)1057 extent_recycle_split(tsdn_t *tsdn, arena_t *arena,
1058     extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1059     void *new_addr, size_t size, size_t pad, size_t alignment, bool slab,
1060     szind_t szind, extent_t *extent, bool growing_retained) {
1061 	extent_t *lead;
1062 	extent_t *trail;
1063 	extent_t *to_leak;
1064 	extent_t *to_salvage;
1065 
1066 	extent_split_interior_result_t result = extent_split_interior(
1067 	    tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
1068 	    &to_leak, &to_salvage, new_addr, size, pad, alignment, slab, szind,
1069 	    growing_retained);
1070 
1071 	if (!maps_coalesce && result != extent_split_interior_ok
1072 	    && !opt_retain) {
1073 		/*
1074 		 * Split isn't supported (implies Windows w/o retain).  Avoid
1075 		 * leaking the extents.
1076 		 */
1077 		assert(to_leak != NULL && lead == NULL && trail == NULL);
1078 		extent_deactivate(tsdn, arena, extents, to_leak);
1079 		return NULL;
1080 	}
1081 
1082 	if (result == extent_split_interior_ok) {
1083 		if (lead != NULL) {
1084 			extent_deactivate(tsdn, arena, extents, lead);
1085 		}
1086 		if (trail != NULL) {
1087 			extent_deactivate(tsdn, arena, extents, trail);
1088 		}
1089 		return extent;
1090 	} else {
1091 		/*
1092 		 * We should have picked an extent that was large enough to
1093 		 * fulfill our allocation request.
1094 		 */
1095 		assert(result == extent_split_interior_error);
1096 		if (to_salvage != NULL) {
1097 			extent_deregister(tsdn, to_salvage);
1098 		}
1099 		if (to_leak != NULL) {
1100 			void *leak = extent_base_get(to_leak);
1101 			extent_deregister_no_gdump_sub(tsdn, to_leak);
1102 			extents_abandon_vm(tsdn, arena, r_extent_hooks, extents,
1103 			    to_leak, growing_retained);
1104 			assert(extent_lock_from_addr(tsdn, rtree_ctx, leak,
1105 			    false) == NULL);
1106 		}
1107 		return NULL;
1108 	}
1109 	unreachable();
1110 }
1111 
1112 static bool
extent_need_manual_zero(arena_t * arena)1113 extent_need_manual_zero(arena_t *arena) {
1114 	/*
1115 	 * Need to manually zero the extent on repopulating if either; 1) non
1116 	 * default extent hooks installed (in which case the purge semantics may
1117 	 * change); or 2) transparent huge pages enabled.
1118 	 */
1119 	return (!arena_has_default_hooks(arena) ||
1120 		(opt_thp == thp_mode_always));
1121 }
1122 
1123 /*
1124  * Tries to satisfy the given allocation request by reusing one of the extents
1125  * in the given extents_t.
1126  */
1127 static extent_t *
extent_recycle(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extents_t * extents,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,bool * zero,bool * commit,bool growing_retained)1128 extent_recycle(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1129     extents_t *extents, void *new_addr, size_t size, size_t pad,
1130     size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit,
1131     bool growing_retained) {
1132 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1133 	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1134 	assert(new_addr == NULL || !slab);
1135 	assert(pad == 0 || !slab);
1136 	assert(!*zero || !slab);
1137 
1138 	rtree_ctx_t rtree_ctx_fallback;
1139 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1140 
1141 	extent_t *extent = extent_recycle_extract(tsdn, arena, r_extent_hooks,
1142 	    rtree_ctx, extents, new_addr, size, pad, alignment, slab,
1143 	    growing_retained);
1144 	if (extent == NULL) {
1145 		return NULL;
1146 	}
1147 
1148 	extent = extent_recycle_split(tsdn, arena, r_extent_hooks, rtree_ctx,
1149 	    extents, new_addr, size, pad, alignment, slab, szind, extent,
1150 	    growing_retained);
1151 	if (extent == NULL) {
1152 		return NULL;
1153 	}
1154 
1155 	if (*commit && !extent_committed_get(extent)) {
1156 		if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent,
1157 		    0, extent_size_get(extent), growing_retained)) {
1158 			extent_record(tsdn, arena, r_extent_hooks, extents,
1159 			    extent, growing_retained);
1160 			return NULL;
1161 		}
1162 		if (!extent_need_manual_zero(arena)) {
1163 			extent_zeroed_set(extent, true);
1164 		}
1165 	}
1166 
1167 	if (extent_committed_get(extent)) {
1168 		*commit = true;
1169 	}
1170 	if (extent_zeroed_get(extent)) {
1171 		*zero = true;
1172 	}
1173 
1174 	if (pad != 0) {
1175 		extent_addr_randomize(tsdn, extent, alignment);
1176 	}
1177 	assert(extent_state_get(extent) == extent_state_active);
1178 	if (slab) {
1179 		extent_slab_set(extent, slab);
1180 		extent_interior_register(tsdn, rtree_ctx, extent, szind);
1181 	}
1182 
1183 	if (*zero) {
1184 		void *addr = extent_base_get(extent);
1185 		if (!extent_zeroed_get(extent)) {
1186 			size_t size = extent_size_get(extent);
1187 			if (extent_need_manual_zero(arena) ||
1188 			    pages_purge_forced(addr, size)) {
1189 				memset(addr, 0, size);
1190 			}
1191 		} else if (config_debug) {
1192 			size_t *p = (size_t *)(uintptr_t)addr;
1193 			/* Check the first page only. */
1194 			for (size_t i = 0; i < PAGE / sizeof(size_t); i++) {
1195 				assert(p[i] == 0);
1196 			}
1197 		}
1198 	}
1199 	return extent;
1200 }
1201 
1202 /*
1203  * If the caller specifies (!*zero), it is still possible to receive zeroed
1204  * memory, in which case *zero is toggled to true.  arena_extent_alloc() takes
1205  * advantage of this to avoid demanding zeroed extents, but taking advantage of
1206  * them if they are returned.
1207  */
1208 static void *
extent_alloc_core(tsdn_t * tsdn,arena_t * arena,void * new_addr,size_t size,size_t alignment,bool * zero,bool * commit,dss_prec_t dss_prec)1209 extent_alloc_core(tsdn_t *tsdn, arena_t *arena, void *new_addr, size_t size,
1210     size_t alignment, bool *zero, bool *commit, dss_prec_t dss_prec) {
1211 	void *ret;
1212 
1213 	assert(size != 0);
1214 	assert(alignment != 0);
1215 
1216 	/* "primary" dss. */
1217 	if (have_dss && dss_prec == dss_prec_primary && (ret =
1218 	    extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
1219 	    commit)) != NULL) {
1220 		return ret;
1221 	}
1222 	/* mmap. */
1223 	if ((ret = extent_alloc_mmap(new_addr, size, alignment, zero, commit))
1224 	    != NULL) {
1225 		return ret;
1226 	}
1227 	/* "secondary" dss. */
1228 	if (have_dss && dss_prec == dss_prec_secondary && (ret =
1229 	    extent_alloc_dss(tsdn, arena, new_addr, size, alignment, zero,
1230 	    commit)) != NULL) {
1231 		return ret;
1232 	}
1233 
1234 	/* All strategies for allocation failed. */
1235 	return NULL;
1236 }
1237 
1238 static void *
extent_alloc_default_impl(tsdn_t * tsdn,arena_t * arena,void * new_addr,size_t size,size_t alignment,bool * zero,bool * commit)1239 extent_alloc_default_impl(tsdn_t *tsdn, arena_t *arena, void *new_addr,
1240     size_t size, size_t alignment, bool *zero, bool *commit) {
1241 	void *ret = extent_alloc_core(tsdn, arena, new_addr, size, alignment, zero,
1242 	    commit, (dss_prec_t)atomic_load_u(&arena->dss_prec,
1243 	    ATOMIC_RELAXED));
1244 	if (have_madvise_huge && ret) {
1245 		pages_set_thp_state(ret, size);
1246 	}
1247 	return ret;
1248 }
1249 
1250 static void *
extent_alloc_default(extent_hooks_t * extent_hooks,void * new_addr,size_t size,size_t alignment,bool * zero,bool * commit,unsigned arena_ind)1251 extent_alloc_default(extent_hooks_t *extent_hooks, void *new_addr, size_t size,
1252     size_t alignment, bool *zero, bool *commit, unsigned arena_ind) {
1253 	tsdn_t *tsdn;
1254 	arena_t *arena;
1255 
1256 	tsdn = tsdn_fetch();
1257 	arena = arena_get(tsdn, arena_ind, false);
1258 	/*
1259 	 * The arena we're allocating on behalf of must have been initialized
1260 	 * already.
1261 	 */
1262 	assert(arena != NULL);
1263 
1264 	return extent_alloc_default_impl(tsdn, arena, new_addr, size,
1265 	    ALIGNMENT_CEILING(alignment, PAGE), zero, commit);
1266 }
1267 
1268 static void
extent_hook_pre_reentrancy(tsdn_t * tsdn,arena_t * arena)1269 extent_hook_pre_reentrancy(tsdn_t *tsdn, arena_t *arena) {
1270 	tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
1271 	if (arena == arena_get(tsd_tsdn(tsd), 0, false)) {
1272 		/*
1273 		 * The only legitimate case of customized extent hooks for a0 is
1274 		 * hooks with no allocation activities.  One such example is to
1275 		 * place metadata on pre-allocated resources such as huge pages.
1276 		 * In that case, rely on reentrancy_level checks to catch
1277 		 * infinite recursions.
1278 		 */
1279 		pre_reentrancy(tsd, NULL);
1280 	} else {
1281 		pre_reentrancy(tsd, arena);
1282 	}
1283 }
1284 
1285 static void
extent_hook_post_reentrancy(tsdn_t * tsdn)1286 extent_hook_post_reentrancy(tsdn_t *tsdn) {
1287 	tsd_t *tsd = tsdn_null(tsdn) ? tsd_fetch() : tsdn_tsd(tsdn);
1288 	post_reentrancy(tsd);
1289 }
1290 
1291 /*
1292  * If virtual memory is retained, create increasingly larger extents from which
1293  * to split requested extents in order to limit the total number of disjoint
1294  * virtual memory ranges retained by each arena.
1295  */
1296 static extent_t *
extent_grow_retained(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,bool * zero,bool * commit)1297 extent_grow_retained(tsdn_t *tsdn, arena_t *arena,
1298     extent_hooks_t **r_extent_hooks, size_t size, size_t pad, size_t alignment,
1299     bool slab, szind_t szind, bool *zero, bool *commit) {
1300 	malloc_mutex_assert_owner(tsdn, &arena->extent_grow_mtx);
1301 	assert(pad == 0 || !slab);
1302 	assert(!*zero || !slab);
1303 
1304 	size_t esize = size + pad;
1305 	size_t alloc_size_min = esize + PAGE_CEILING(alignment) - PAGE;
1306 	/* Beware size_t wrap-around. */
1307 	if (alloc_size_min < esize) {
1308 		goto label_err;
1309 	}
1310 	/*
1311 	 * Find the next extent size in the series that would be large enough to
1312 	 * satisfy this request.
1313 	 */
1314 	pszind_t egn_skip = 0;
1315 	size_t alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
1316 	while (alloc_size < alloc_size_min) {
1317 		egn_skip++;
1318 		if (arena->extent_grow_next + egn_skip >=
1319 		    sz_psz2ind(SC_LARGE_MAXCLASS)) {
1320 			/* Outside legal range. */
1321 			goto label_err;
1322 		}
1323 		alloc_size = sz_pind2sz(arena->extent_grow_next + egn_skip);
1324 	}
1325 
1326 	extent_t *extent = extent_alloc(tsdn, arena);
1327 	if (extent == NULL) {
1328 		goto label_err;
1329 	}
1330 	bool zeroed = false;
1331 	bool committed = false;
1332 
1333 	void *ptr;
1334 	if (*r_extent_hooks == &extent_hooks_default) {
1335 		ptr = extent_alloc_default_impl(tsdn, arena, NULL,
1336 		    alloc_size, PAGE, &zeroed, &committed);
1337 	} else {
1338 		extent_hook_pre_reentrancy(tsdn, arena);
1339 		ptr = (*r_extent_hooks)->alloc(*r_extent_hooks, NULL,
1340 		    alloc_size, PAGE, &zeroed, &committed,
1341 		    arena_ind_get(arena));
1342 		extent_hook_post_reentrancy(tsdn);
1343 	}
1344 
1345 	extent_init(extent, arena, ptr, alloc_size, false, SC_NSIZES,
1346 	    arena_extent_sn_next(arena), extent_state_active, zeroed,
1347 	    committed, true, EXTENT_IS_HEAD);
1348 	if (ptr == NULL) {
1349 		extent_dalloc(tsdn, arena, extent);
1350 		goto label_err;
1351 	}
1352 
1353 	if (extent_register_no_gdump_add(tsdn, extent)) {
1354 		extent_dalloc(tsdn, arena, extent);
1355 		goto label_err;
1356 	}
1357 
1358 	if (extent_zeroed_get(extent) && extent_committed_get(extent)) {
1359 		*zero = true;
1360 	}
1361 	if (extent_committed_get(extent)) {
1362 		*commit = true;
1363 	}
1364 
1365 	rtree_ctx_t rtree_ctx_fallback;
1366 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1367 
1368 	extent_t *lead;
1369 	extent_t *trail;
1370 	extent_t *to_leak;
1371 	extent_t *to_salvage;
1372 	extent_split_interior_result_t result = extent_split_interior(
1373 	    tsdn, arena, r_extent_hooks, rtree_ctx, &extent, &lead, &trail,
1374 	    &to_leak, &to_salvage, NULL, size, pad, alignment, slab, szind,
1375 	    true);
1376 
1377 	if (result == extent_split_interior_ok) {
1378 		if (lead != NULL) {
1379 			extent_record(tsdn, arena, r_extent_hooks,
1380 			    &arena->extents_retained, lead, true);
1381 		}
1382 		if (trail != NULL) {
1383 			extent_record(tsdn, arena, r_extent_hooks,
1384 			    &arena->extents_retained, trail, true);
1385 		}
1386 	} else {
1387 		/*
1388 		 * We should have allocated a sufficiently large extent; the
1389 		 * cant_alloc case should not occur.
1390 		 */
1391 		assert(result == extent_split_interior_error);
1392 		if (to_salvage != NULL) {
1393 			if (config_prof) {
1394 				extent_gdump_add(tsdn, to_salvage);
1395 			}
1396 			extent_record(tsdn, arena, r_extent_hooks,
1397 			    &arena->extents_retained, to_salvage, true);
1398 		}
1399 		if (to_leak != NULL) {
1400 			extent_deregister_no_gdump_sub(tsdn, to_leak);
1401 			extents_abandon_vm(tsdn, arena, r_extent_hooks,
1402 			    &arena->extents_retained, to_leak, true);
1403 		}
1404 		goto label_err;
1405 	}
1406 
1407 	if (*commit && !extent_committed_get(extent)) {
1408 		if (extent_commit_impl(tsdn, arena, r_extent_hooks, extent, 0,
1409 		    extent_size_get(extent), true)) {
1410 			extent_record(tsdn, arena, r_extent_hooks,
1411 			    &arena->extents_retained, extent, true);
1412 			goto label_err;
1413 		}
1414 		if (!extent_need_manual_zero(arena)) {
1415 			extent_zeroed_set(extent, true);
1416 		}
1417 	}
1418 
1419 	/*
1420 	 * Increment extent_grow_next if doing so wouldn't exceed the allowed
1421 	 * range.
1422 	 */
1423 	if (arena->extent_grow_next + egn_skip + 1 <=
1424 	    arena->retain_grow_limit) {
1425 		arena->extent_grow_next += egn_skip + 1;
1426 	} else {
1427 		arena->extent_grow_next = arena->retain_grow_limit;
1428 	}
1429 	/* All opportunities for failure are past. */
1430 	malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1431 
1432 	if (config_prof) {
1433 		/* Adjust gdump stats now that extent is final size. */
1434 		extent_gdump_add(tsdn, extent);
1435 	}
1436 	if (pad != 0) {
1437 		extent_addr_randomize(tsdn, extent, alignment);
1438 	}
1439 	if (slab) {
1440 		rtree_ctx_t rtree_ctx_fallback;
1441 		rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn,
1442 		    &rtree_ctx_fallback);
1443 
1444 		extent_slab_set(extent, true);
1445 		extent_interior_register(tsdn, rtree_ctx, extent, szind);
1446 	}
1447 	if (*zero && !extent_zeroed_get(extent)) {
1448 		void *addr = extent_base_get(extent);
1449 		size_t size = extent_size_get(extent);
1450 		if (extent_need_manual_zero(arena) ||
1451 		    pages_purge_forced(addr, size)) {
1452 			memset(addr, 0, size);
1453 		}
1454 	}
1455 
1456 	return extent;
1457 label_err:
1458 	malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1459 	return NULL;
1460 }
1461 
1462 static extent_t *
extent_alloc_retained(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,bool * zero,bool * commit)1463 extent_alloc_retained(tsdn_t *tsdn, arena_t *arena,
1464     extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1465     size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1466 	assert(size != 0);
1467 	assert(alignment != 0);
1468 
1469 	malloc_mutex_lock(tsdn, &arena->extent_grow_mtx);
1470 
1471 	extent_t *extent = extent_recycle(tsdn, arena, r_extent_hooks,
1472 	    &arena->extents_retained, new_addr, size, pad, alignment, slab,
1473 	    szind, zero, commit, true);
1474 	if (extent != NULL) {
1475 		malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1476 		if (config_prof) {
1477 			extent_gdump_add(tsdn, extent);
1478 		}
1479 	} else if (opt_retain && new_addr == NULL) {
1480 		extent = extent_grow_retained(tsdn, arena, r_extent_hooks, size,
1481 		    pad, alignment, slab, szind, zero, commit);
1482 		/* extent_grow_retained() always releases extent_grow_mtx. */
1483 	} else {
1484 		malloc_mutex_unlock(tsdn, &arena->extent_grow_mtx);
1485 	}
1486 	malloc_mutex_assert_not_owner(tsdn, &arena->extent_grow_mtx);
1487 
1488 	return extent;
1489 }
1490 
1491 static extent_t *
extent_alloc_wrapper_hard(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,bool * zero,bool * commit)1492 extent_alloc_wrapper_hard(tsdn_t *tsdn, arena_t *arena,
1493     extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1494     size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1495 	size_t esize = size + pad;
1496 	extent_t *extent = extent_alloc(tsdn, arena);
1497 	if (extent == NULL) {
1498 		return NULL;
1499 	}
1500 	void *addr;
1501 	size_t palignment = ALIGNMENT_CEILING(alignment, PAGE);
1502 	if (*r_extent_hooks == &extent_hooks_default) {
1503 		/* Call directly to propagate tsdn. */
1504 		addr = extent_alloc_default_impl(tsdn, arena, new_addr, esize,
1505 		    palignment, zero, commit);
1506 	} else {
1507 		extent_hook_pre_reentrancy(tsdn, arena);
1508 		addr = (*r_extent_hooks)->alloc(*r_extent_hooks, new_addr,
1509 		    esize, palignment, zero, commit, arena_ind_get(arena));
1510 		extent_hook_post_reentrancy(tsdn);
1511 	}
1512 	if (addr == NULL) {
1513 		extent_dalloc(tsdn, arena, extent);
1514 		return NULL;
1515 	}
1516 	extent_init(extent, arena, addr, esize, slab, szind,
1517 	    arena_extent_sn_next(arena), extent_state_active, *zero, *commit,
1518 	    true, EXTENT_NOT_HEAD);
1519 	if (pad != 0) {
1520 		extent_addr_randomize(tsdn, extent, alignment);
1521 	}
1522 	if (extent_register(tsdn, extent)) {
1523 		extent_dalloc(tsdn, arena, extent);
1524 		return NULL;
1525 	}
1526 
1527 	return extent;
1528 }
1529 
1530 extent_t *
extent_alloc_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,void * new_addr,size_t size,size_t pad,size_t alignment,bool slab,szind_t szind,bool * zero,bool * commit)1531 extent_alloc_wrapper(tsdn_t *tsdn, arena_t *arena,
1532     extent_hooks_t **r_extent_hooks, void *new_addr, size_t size, size_t pad,
1533     size_t alignment, bool slab, szind_t szind, bool *zero, bool *commit) {
1534 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1535 	    WITNESS_RANK_CORE, 0);
1536 
1537 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1538 
1539 	extent_t *extent = extent_alloc_retained(tsdn, arena, r_extent_hooks,
1540 	    new_addr, size, pad, alignment, slab, szind, zero, commit);
1541 	if (extent == NULL) {
1542 		if (opt_retain && new_addr != NULL) {
1543 			/*
1544 			 * When retain is enabled and new_addr is set, we do not
1545 			 * attempt extent_alloc_wrapper_hard which does mmap
1546 			 * that is very unlikely to succeed (unless it happens
1547 			 * to be at the end).
1548 			 */
1549 			return NULL;
1550 		}
1551 		extent = extent_alloc_wrapper_hard(tsdn, arena, r_extent_hooks,
1552 		    new_addr, size, pad, alignment, slab, szind, zero, commit);
1553 	}
1554 
1555 	assert(extent == NULL || extent_dumpable_get(extent));
1556 	return extent;
1557 }
1558 
1559 static bool
extent_can_coalesce(arena_t * arena,extents_t * extents,const extent_t * inner,const extent_t * outer)1560 extent_can_coalesce(arena_t *arena, extents_t *extents, const extent_t *inner,
1561     const extent_t *outer) {
1562 	assert(extent_arena_get(inner) == arena);
1563 	if (extent_arena_get(outer) != arena) {
1564 		return false;
1565 	}
1566 
1567 	assert(extent_state_get(inner) == extent_state_active);
1568 	if (extent_state_get(outer) != extents->state) {
1569 		return false;
1570 	}
1571 
1572 	if (extent_committed_get(inner) != extent_committed_get(outer)) {
1573 		return false;
1574 	}
1575 
1576 	return true;
1577 }
1578 
1579 static bool
extent_coalesce(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extents_t * extents,extent_t * inner,extent_t * outer,bool forward,bool growing_retained)1580 extent_coalesce(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1581     extents_t *extents, extent_t *inner, extent_t *outer, bool forward,
1582     bool growing_retained) {
1583 	assert(extent_can_coalesce(arena, extents, inner, outer));
1584 
1585 	extent_activate_locked(tsdn, arena, extents, outer);
1586 
1587 	malloc_mutex_unlock(tsdn, &extents->mtx);
1588 	bool err = extent_merge_impl(tsdn, arena, r_extent_hooks,
1589 	    forward ? inner : outer, forward ? outer : inner, growing_retained);
1590 	malloc_mutex_lock(tsdn, &extents->mtx);
1591 
1592 	if (err) {
1593 		extent_deactivate_locked(tsdn, arena, extents, outer);
1594 	}
1595 
1596 	return err;
1597 }
1598 
1599 static extent_t *
extent_try_coalesce_impl(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,rtree_ctx_t * rtree_ctx,extents_t * extents,extent_t * extent,bool * coalesced,bool growing_retained,bool inactive_only)1600 extent_try_coalesce_impl(tsdn_t *tsdn, arena_t *arena,
1601     extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1602     extent_t *extent, bool *coalesced, bool growing_retained,
1603     bool inactive_only) {
1604 	/*
1605 	 * We avoid checking / locking inactive neighbors for large size
1606 	 * classes, since they are eagerly coalesced on deallocation which can
1607 	 * cause lock contention.
1608 	 */
1609 	/*
1610 	 * Continue attempting to coalesce until failure, to protect against
1611 	 * races with other threads that are thwarted by this one.
1612 	 */
1613 	bool again;
1614 	do {
1615 		again = false;
1616 
1617 		/* Try to coalesce forward. */
1618 		extent_t *next = extent_lock_from_addr(tsdn, rtree_ctx,
1619 		    extent_past_get(extent), inactive_only);
1620 		if (next != NULL) {
1621 			/*
1622 			 * extents->mtx only protects against races for
1623 			 * like-state extents, so call extent_can_coalesce()
1624 			 * before releasing next's pool lock.
1625 			 */
1626 			bool can_coalesce = extent_can_coalesce(arena, extents,
1627 			    extent, next);
1628 
1629 			extent_unlock(tsdn, next);
1630 
1631 			if (can_coalesce && !extent_coalesce(tsdn, arena,
1632 			    r_extent_hooks, extents, extent, next, true,
1633 			    growing_retained)) {
1634 				if (extents->delay_coalesce) {
1635 					/* Do minimal coalescing. */
1636 					*coalesced = true;
1637 					return extent;
1638 				}
1639 				again = true;
1640 			}
1641 		}
1642 
1643 		/* Try to coalesce backward. */
1644 		extent_t *prev = NULL;
1645 		if (extent_before_get(extent) != NULL) {
1646 			prev = extent_lock_from_addr(tsdn, rtree_ctx,
1647 			    extent_before_get(extent), inactive_only);
1648 		}
1649 		if (prev != NULL) {
1650 			bool can_coalesce = extent_can_coalesce(arena, extents,
1651 			    extent, prev);
1652 			extent_unlock(tsdn, prev);
1653 
1654 			if (can_coalesce && !extent_coalesce(tsdn, arena,
1655 			    r_extent_hooks, extents, extent, prev, false,
1656 			    growing_retained)) {
1657 				extent = prev;
1658 				if (extents->delay_coalesce) {
1659 					/* Do minimal coalescing. */
1660 					*coalesced = true;
1661 					return extent;
1662 				}
1663 				again = true;
1664 			}
1665 		}
1666 	} while (again);
1667 
1668 	if (extents->delay_coalesce) {
1669 		*coalesced = false;
1670 	}
1671 	return extent;
1672 }
1673 
1674 static extent_t *
extent_try_coalesce(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,rtree_ctx_t * rtree_ctx,extents_t * extents,extent_t * extent,bool * coalesced,bool growing_retained)1675 extent_try_coalesce(tsdn_t *tsdn, arena_t *arena,
1676     extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1677     extent_t *extent, bool *coalesced, bool growing_retained) {
1678 	return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx,
1679 	    extents, extent, coalesced, growing_retained, false);
1680 }
1681 
1682 static extent_t *
extent_try_coalesce_large(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,rtree_ctx_t * rtree_ctx,extents_t * extents,extent_t * extent,bool * coalesced,bool growing_retained)1683 extent_try_coalesce_large(tsdn_t *tsdn, arena_t *arena,
1684     extent_hooks_t **r_extent_hooks, rtree_ctx_t *rtree_ctx, extents_t *extents,
1685     extent_t *extent, bool *coalesced, bool growing_retained) {
1686 	return extent_try_coalesce_impl(tsdn, arena, r_extent_hooks, rtree_ctx,
1687 	    extents, extent, coalesced, growing_retained, true);
1688 }
1689 
1690 /*
1691  * Does the metadata management portions of putting an unused extent into the
1692  * given extents_t (coalesces, deregisters slab interiors, the heap operations).
1693  */
1694 static void
extent_record(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extents_t * extents,extent_t * extent,bool growing_retained)1695 extent_record(tsdn_t *tsdn, arena_t *arena, extent_hooks_t **r_extent_hooks,
1696     extents_t *extents, extent_t *extent, bool growing_retained) {
1697 	rtree_ctx_t rtree_ctx_fallback;
1698 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
1699 
1700 	assert((extents_state_get(extents) != extent_state_dirty &&
1701 	    extents_state_get(extents) != extent_state_muzzy) ||
1702 	    !extent_zeroed_get(extent));
1703 
1704 	malloc_mutex_lock(tsdn, &extents->mtx);
1705 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1706 
1707 	extent_szind_set(extent, SC_NSIZES);
1708 	if (extent_slab_get(extent)) {
1709 		extent_interior_deregister(tsdn, rtree_ctx, extent);
1710 		extent_slab_set(extent, false);
1711 	}
1712 
1713 	assert(rtree_extent_read(tsdn, &extents_rtree, rtree_ctx,
1714 	    (uintptr_t)extent_base_get(extent), true) == extent);
1715 
1716 	if (!extents->delay_coalesce) {
1717 		extent = extent_try_coalesce(tsdn, arena, r_extent_hooks,
1718 		    rtree_ctx, extents, extent, NULL, growing_retained);
1719 	} else if (extent_size_get(extent) >= SC_LARGE_MINCLASS) {
1720 		assert(extents == &arena->extents_dirty);
1721 		/* Always coalesce large extents eagerly. */
1722 		bool coalesced;
1723 		do {
1724 			assert(extent_state_get(extent) == extent_state_active);
1725 			extent = extent_try_coalesce_large(tsdn, arena,
1726 			    r_extent_hooks, rtree_ctx, extents, extent,
1727 			    &coalesced, growing_retained);
1728 		} while (coalesced);
1729 		if (extent_size_get(extent) >= oversize_threshold) {
1730 			/* Shortcut to purge the oversize extent eagerly. */
1731 			malloc_mutex_unlock(tsdn, &extents->mtx);
1732 			arena_decay_extent(tsdn, arena, r_extent_hooks, extent);
1733 			return;
1734 		}
1735 	}
1736 	extent_deactivate_locked(tsdn, arena, extents, extent);
1737 
1738 	malloc_mutex_unlock(tsdn, &extents->mtx);
1739 }
1740 
1741 void
extent_dalloc_gap(tsdn_t * tsdn,arena_t * arena,extent_t * extent)1742 extent_dalloc_gap(tsdn_t *tsdn, arena_t *arena, extent_t *extent) {
1743 	extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER;
1744 
1745 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1746 	    WITNESS_RANK_CORE, 0);
1747 
1748 	if (extent_register(tsdn, extent)) {
1749 		extent_dalloc(tsdn, arena, extent);
1750 		return;
1751 	}
1752 	extent_dalloc_wrapper(tsdn, arena, &extent_hooks, extent);
1753 }
1754 
1755 static bool
extent_may_dalloc(void)1756 extent_may_dalloc(void) {
1757 	/* With retain enabled, the default dalloc always fails. */
1758 	return !opt_retain;
1759 }
1760 
1761 static bool
extent_dalloc_default_impl(void * addr,size_t size)1762 extent_dalloc_default_impl(void *addr, size_t size) {
1763 	if (!have_dss || !extent_in_dss(addr)) {
1764 		return extent_dalloc_mmap(addr, size);
1765 	}
1766 	return true;
1767 }
1768 
1769 static bool
extent_dalloc_default(extent_hooks_t * extent_hooks,void * addr,size_t size,bool committed,unsigned arena_ind)1770 extent_dalloc_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1771     bool committed, unsigned arena_ind) {
1772 	return extent_dalloc_default_impl(addr, size);
1773 }
1774 
1775 static bool
extent_dalloc_wrapper_try(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent)1776 extent_dalloc_wrapper_try(tsdn_t *tsdn, arena_t *arena,
1777     extent_hooks_t **r_extent_hooks, extent_t *extent) {
1778 	bool err;
1779 
1780 	assert(extent_base_get(extent) != NULL);
1781 	assert(extent_size_get(extent) != 0);
1782 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1783 	    WITNESS_RANK_CORE, 0);
1784 
1785 	extent_addr_set(extent, extent_base_get(extent));
1786 
1787 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1788 	/* Try to deallocate. */
1789 	if (*r_extent_hooks == &extent_hooks_default) {
1790 		/* Call directly to propagate tsdn. */
1791 		err = extent_dalloc_default_impl(extent_base_get(extent),
1792 		    extent_size_get(extent));
1793 	} else {
1794 		extent_hook_pre_reentrancy(tsdn, arena);
1795 		err = ((*r_extent_hooks)->dalloc == NULL ||
1796 		    (*r_extent_hooks)->dalloc(*r_extent_hooks,
1797 		    extent_base_get(extent), extent_size_get(extent),
1798 		    extent_committed_get(extent), arena_ind_get(arena)));
1799 		extent_hook_post_reentrancy(tsdn);
1800 	}
1801 
1802 	if (!err) {
1803 		extent_dalloc(tsdn, arena, extent);
1804 	}
1805 
1806 	return err;
1807 }
1808 
1809 void
extent_dalloc_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent)1810 extent_dalloc_wrapper(tsdn_t *tsdn, arena_t *arena,
1811     extent_hooks_t **r_extent_hooks, extent_t *extent) {
1812 	assert(extent_dumpable_get(extent));
1813 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1814 	    WITNESS_RANK_CORE, 0);
1815 
1816 	/* Avoid calling the default extent_dalloc unless have to. */
1817 	if (*r_extent_hooks != &extent_hooks_default || extent_may_dalloc()) {
1818 		/*
1819 		 * Deregister first to avoid a race with other allocating
1820 		 * threads, and reregister if deallocation fails.
1821 		 */
1822 		extent_deregister(tsdn, extent);
1823 		if (!extent_dalloc_wrapper_try(tsdn, arena, r_extent_hooks,
1824 		    extent)) {
1825 			return;
1826 		}
1827 		extent_reregister(tsdn, extent);
1828 	}
1829 
1830 	if (*r_extent_hooks != &extent_hooks_default) {
1831 		extent_hook_pre_reentrancy(tsdn, arena);
1832 	}
1833 	/* Try to decommit; purge if that fails. */
1834 	bool zeroed;
1835 	if (!extent_committed_get(extent)) {
1836 		zeroed = true;
1837 	} else if (!extent_decommit_wrapper(tsdn, arena, r_extent_hooks, extent,
1838 	    0, extent_size_get(extent))) {
1839 		zeroed = true;
1840 	} else if ((*r_extent_hooks)->purge_forced != NULL &&
1841 	    !(*r_extent_hooks)->purge_forced(*r_extent_hooks,
1842 	    extent_base_get(extent), extent_size_get(extent), 0,
1843 	    extent_size_get(extent), arena_ind_get(arena))) {
1844 		zeroed = true;
1845 	} else if (extent_state_get(extent) == extent_state_muzzy ||
1846 	    ((*r_extent_hooks)->purge_lazy != NULL &&
1847 	    !(*r_extent_hooks)->purge_lazy(*r_extent_hooks,
1848 	    extent_base_get(extent), extent_size_get(extent), 0,
1849 	    extent_size_get(extent), arena_ind_get(arena)))) {
1850 		zeroed = false;
1851 	} else {
1852 		zeroed = false;
1853 	}
1854 	if (*r_extent_hooks != &extent_hooks_default) {
1855 		extent_hook_post_reentrancy(tsdn);
1856 	}
1857 	extent_zeroed_set(extent, zeroed);
1858 
1859 	if (config_prof) {
1860 		extent_gdump_sub(tsdn, extent);
1861 	}
1862 
1863 	extent_record(tsdn, arena, r_extent_hooks, &arena->extents_retained,
1864 	    extent, false);
1865 }
1866 
1867 static void
extent_destroy_default_impl(void * addr,size_t size)1868 extent_destroy_default_impl(void *addr, size_t size) {
1869 	if (!have_dss || !extent_in_dss(addr)) {
1870 		pages_unmap(addr, size);
1871 	}
1872 }
1873 
1874 static void
extent_destroy_default(extent_hooks_t * extent_hooks,void * addr,size_t size,bool committed,unsigned arena_ind)1875 extent_destroy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1876     bool committed, unsigned arena_ind) {
1877 	extent_destroy_default_impl(addr, size);
1878 }
1879 
1880 void
extent_destroy_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent)1881 extent_destroy_wrapper(tsdn_t *tsdn, arena_t *arena,
1882     extent_hooks_t **r_extent_hooks, extent_t *extent) {
1883 	assert(extent_base_get(extent) != NULL);
1884 	assert(extent_size_get(extent) != 0);
1885 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1886 	    WITNESS_RANK_CORE, 0);
1887 
1888 	/* Deregister first to avoid a race with other allocating threads. */
1889 	extent_deregister(tsdn, extent);
1890 
1891 	extent_addr_set(extent, extent_base_get(extent));
1892 
1893 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1894 	/* Try to destroy; silently fail otherwise. */
1895 	if (*r_extent_hooks == &extent_hooks_default) {
1896 		/* Call directly to propagate tsdn. */
1897 		extent_destroy_default_impl(extent_base_get(extent),
1898 		    extent_size_get(extent));
1899 	} else if ((*r_extent_hooks)->destroy != NULL) {
1900 		extent_hook_pre_reentrancy(tsdn, arena);
1901 		(*r_extent_hooks)->destroy(*r_extent_hooks,
1902 		    extent_base_get(extent), extent_size_get(extent),
1903 		    extent_committed_get(extent), arena_ind_get(arena));
1904 		extent_hook_post_reentrancy(tsdn);
1905 	}
1906 
1907 	extent_dalloc(tsdn, arena, extent);
1908 }
1909 
1910 static bool
extent_commit_default(extent_hooks_t * extent_hooks,void * addr,size_t size,size_t offset,size_t length,unsigned arena_ind)1911 extent_commit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1912     size_t offset, size_t length, unsigned arena_ind) {
1913 	return pages_commit((void *)((uintptr_t)addr + (uintptr_t)offset),
1914 	    length);
1915 }
1916 
1917 static bool
extent_commit_impl(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t offset,size_t length,bool growing_retained)1918 extent_commit_impl(tsdn_t *tsdn, arena_t *arena,
1919     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1920     size_t length, bool growing_retained) {
1921 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1922 	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1923 
1924 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1925 	if (*r_extent_hooks != &extent_hooks_default) {
1926 		extent_hook_pre_reentrancy(tsdn, arena);
1927 	}
1928 	bool err = ((*r_extent_hooks)->commit == NULL ||
1929 	    (*r_extent_hooks)->commit(*r_extent_hooks, extent_base_get(extent),
1930 	    extent_size_get(extent), offset, length, arena_ind_get(arena)));
1931 	if (*r_extent_hooks != &extent_hooks_default) {
1932 		extent_hook_post_reentrancy(tsdn);
1933 	}
1934 	extent_committed_set(extent, extent_committed_get(extent) || !err);
1935 	return err;
1936 }
1937 
1938 bool
extent_commit_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t offset,size_t length)1939 extent_commit_wrapper(tsdn_t *tsdn, arena_t *arena,
1940     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1941     size_t length) {
1942 	return extent_commit_impl(tsdn, arena, r_extent_hooks, extent, offset,
1943 	    length, false);
1944 }
1945 
1946 static bool
extent_decommit_default(extent_hooks_t * extent_hooks,void * addr,size_t size,size_t offset,size_t length,unsigned arena_ind)1947 extent_decommit_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1948     size_t offset, size_t length, unsigned arena_ind) {
1949 	return pages_decommit((void *)((uintptr_t)addr + (uintptr_t)offset),
1950 	    length);
1951 }
1952 
1953 bool
extent_decommit_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t offset,size_t length)1954 extent_decommit_wrapper(tsdn_t *tsdn, arena_t *arena,
1955     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1956     size_t length) {
1957 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1958 	    WITNESS_RANK_CORE, 0);
1959 
1960 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1961 
1962 	if (*r_extent_hooks != &extent_hooks_default) {
1963 		extent_hook_pre_reentrancy(tsdn, arena);
1964 	}
1965 	bool err = ((*r_extent_hooks)->decommit == NULL ||
1966 	    (*r_extent_hooks)->decommit(*r_extent_hooks,
1967 	    extent_base_get(extent), extent_size_get(extent), offset, length,
1968 	    arena_ind_get(arena)));
1969 	if (*r_extent_hooks != &extent_hooks_default) {
1970 		extent_hook_post_reentrancy(tsdn);
1971 	}
1972 	extent_committed_set(extent, extent_committed_get(extent) && err);
1973 	return err;
1974 }
1975 
1976 #ifdef PAGES_CAN_PURGE_LAZY
1977 static bool
extent_purge_lazy_default(extent_hooks_t * extent_hooks,void * addr,size_t size,size_t offset,size_t length,unsigned arena_ind)1978 extent_purge_lazy_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
1979     size_t offset, size_t length, unsigned arena_ind) {
1980 	assert(addr != NULL);
1981 	assert((offset & PAGE_MASK) == 0);
1982 	assert(length != 0);
1983 	assert((length & PAGE_MASK) == 0);
1984 
1985 	return pages_purge_lazy((void *)((uintptr_t)addr + (uintptr_t)offset),
1986 	    length);
1987 }
1988 #endif
1989 
1990 static bool
extent_purge_lazy_impl(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t offset,size_t length,bool growing_retained)1991 extent_purge_lazy_impl(tsdn_t *tsdn, arena_t *arena,
1992     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
1993     size_t length, bool growing_retained) {
1994 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
1995 	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
1996 
1997 	extent_hooks_assure_initialized(arena, r_extent_hooks);
1998 
1999 	if ((*r_extent_hooks)->purge_lazy == NULL) {
2000 		return true;
2001 	}
2002 	if (*r_extent_hooks != &extent_hooks_default) {
2003 		extent_hook_pre_reentrancy(tsdn, arena);
2004 	}
2005 	bool err = (*r_extent_hooks)->purge_lazy(*r_extent_hooks,
2006 	    extent_base_get(extent), extent_size_get(extent), offset, length,
2007 	    arena_ind_get(arena));
2008 	if (*r_extent_hooks != &extent_hooks_default) {
2009 		extent_hook_post_reentrancy(tsdn);
2010 	}
2011 
2012 	return err;
2013 }
2014 
2015 bool
extent_purge_lazy_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t offset,size_t length)2016 extent_purge_lazy_wrapper(tsdn_t *tsdn, arena_t *arena,
2017     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
2018     size_t length) {
2019 	return extent_purge_lazy_impl(tsdn, arena, r_extent_hooks, extent,
2020 	    offset, length, false);
2021 }
2022 
2023 #ifdef PAGES_CAN_PURGE_FORCED
2024 static bool
extent_purge_forced_default(extent_hooks_t * extent_hooks,void * addr,size_t size,size_t offset,size_t length,unsigned arena_ind)2025 extent_purge_forced_default(extent_hooks_t *extent_hooks, void *addr,
2026     size_t size, size_t offset, size_t length, unsigned arena_ind) {
2027 	assert(addr != NULL);
2028 	assert((offset & PAGE_MASK) == 0);
2029 	assert(length != 0);
2030 	assert((length & PAGE_MASK) == 0);
2031 
2032 	return pages_purge_forced((void *)((uintptr_t)addr +
2033 	    (uintptr_t)offset), length);
2034 }
2035 #endif
2036 
2037 static bool
extent_purge_forced_impl(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t offset,size_t length,bool growing_retained)2038 extent_purge_forced_impl(tsdn_t *tsdn, arena_t *arena,
2039     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
2040     size_t length, bool growing_retained) {
2041 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
2042 	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
2043 
2044 	extent_hooks_assure_initialized(arena, r_extent_hooks);
2045 
2046 	if ((*r_extent_hooks)->purge_forced == NULL) {
2047 		return true;
2048 	}
2049 	if (*r_extent_hooks != &extent_hooks_default) {
2050 		extent_hook_pre_reentrancy(tsdn, arena);
2051 	}
2052 	bool err = (*r_extent_hooks)->purge_forced(*r_extent_hooks,
2053 	    extent_base_get(extent), extent_size_get(extent), offset, length,
2054 	    arena_ind_get(arena));
2055 	if (*r_extent_hooks != &extent_hooks_default) {
2056 		extent_hook_post_reentrancy(tsdn);
2057 	}
2058 	return err;
2059 }
2060 
2061 bool
extent_purge_forced_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t offset,size_t length)2062 extent_purge_forced_wrapper(tsdn_t *tsdn, arena_t *arena,
2063     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t offset,
2064     size_t length) {
2065 	return extent_purge_forced_impl(tsdn, arena, r_extent_hooks, extent,
2066 	    offset, length, false);
2067 }
2068 
2069 static bool
extent_split_default(extent_hooks_t * extent_hooks,void * addr,size_t size,size_t size_a,size_t size_b,bool committed,unsigned arena_ind)2070 extent_split_default(extent_hooks_t *extent_hooks, void *addr, size_t size,
2071     size_t size_a, size_t size_b, bool committed, unsigned arena_ind) {
2072 	if (!maps_coalesce) {
2073 		/*
2074 		 * Without retain, only whole regions can be purged (required by
2075 		 * MEM_RELEASE on Windows) -- therefore disallow splitting.  See
2076 		 * comments in extent_head_no_merge().
2077 		 */
2078 		return !opt_retain;
2079 	}
2080 
2081 	return false;
2082 }
2083 
2084 /*
2085  * Accepts the extent to split, and the characteristics of each side of the
2086  * split.  The 'a' parameters go with the 'lead' of the resulting pair of
2087  * extents (the lower addressed portion of the split), and the 'b' parameters go
2088  * with the trail (the higher addressed portion).  This makes 'extent' the lead,
2089  * and returns the trail (except in case of error).
2090  */
2091 static extent_t *
extent_split_impl(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t size_a,szind_t szind_a,bool slab_a,size_t size_b,szind_t szind_b,bool slab_b,bool growing_retained)2092 extent_split_impl(tsdn_t *tsdn, arena_t *arena,
2093     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
2094     szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b,
2095     bool growing_retained) {
2096 	assert(extent_size_get(extent) == size_a + size_b);
2097 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
2098 	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
2099 
2100 	extent_hooks_assure_initialized(arena, r_extent_hooks);
2101 
2102 	if ((*r_extent_hooks)->split == NULL) {
2103 		return NULL;
2104 	}
2105 
2106 	extent_t *trail = extent_alloc(tsdn, arena);
2107 	if (trail == NULL) {
2108 		goto label_error_a;
2109 	}
2110 
2111 	extent_init(trail, arena, (void *)((uintptr_t)extent_base_get(extent) +
2112 	    size_a), size_b, slab_b, szind_b, extent_sn_get(extent),
2113 	    extent_state_get(extent), extent_zeroed_get(extent),
2114 	    extent_committed_get(extent), extent_dumpable_get(extent),
2115 	    EXTENT_NOT_HEAD);
2116 
2117 	rtree_ctx_t rtree_ctx_fallback;
2118 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
2119 	rtree_leaf_elm_t *lead_elm_a, *lead_elm_b;
2120 	{
2121 		extent_t lead;
2122 
2123 		extent_init(&lead, arena, extent_addr_get(extent), size_a,
2124 		    slab_a, szind_a, extent_sn_get(extent),
2125 		    extent_state_get(extent), extent_zeroed_get(extent),
2126 		    extent_committed_get(extent), extent_dumpable_get(extent),
2127 		    EXTENT_NOT_HEAD);
2128 
2129 		extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, &lead, false,
2130 		    true, &lead_elm_a, &lead_elm_b);
2131 	}
2132 	rtree_leaf_elm_t *trail_elm_a, *trail_elm_b;
2133 	extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, trail, false, true,
2134 	    &trail_elm_a, &trail_elm_b);
2135 
2136 	if (lead_elm_a == NULL || lead_elm_b == NULL || trail_elm_a == NULL
2137 	    || trail_elm_b == NULL) {
2138 		goto label_error_b;
2139 	}
2140 
2141 	extent_lock2(tsdn, extent, trail);
2142 
2143 	if (*r_extent_hooks != &extent_hooks_default) {
2144 		extent_hook_pre_reentrancy(tsdn, arena);
2145 	}
2146 	bool err = (*r_extent_hooks)->split(*r_extent_hooks, extent_base_get(extent),
2147 	    size_a + size_b, size_a, size_b, extent_committed_get(extent),
2148 	    arena_ind_get(arena));
2149 	if (*r_extent_hooks != &extent_hooks_default) {
2150 		extent_hook_post_reentrancy(tsdn);
2151 	}
2152 	if (err) {
2153 		goto label_error_c;
2154 	}
2155 
2156 	extent_size_set(extent, size_a);
2157 	extent_szind_set(extent, szind_a);
2158 
2159 	extent_rtree_write_acquired(tsdn, lead_elm_a, lead_elm_b, extent,
2160 	    szind_a, slab_a);
2161 	extent_rtree_write_acquired(tsdn, trail_elm_a, trail_elm_b, trail,
2162 	    szind_b, slab_b);
2163 
2164 	extent_unlock2(tsdn, extent, trail);
2165 
2166 	return trail;
2167 label_error_c:
2168 	extent_unlock2(tsdn, extent, trail);
2169 label_error_b:
2170 	extent_dalloc(tsdn, arena, trail);
2171 label_error_a:
2172 	return NULL;
2173 }
2174 
2175 extent_t *
extent_split_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * extent,size_t size_a,szind_t szind_a,bool slab_a,size_t size_b,szind_t szind_b,bool slab_b)2176 extent_split_wrapper(tsdn_t *tsdn, arena_t *arena,
2177     extent_hooks_t **r_extent_hooks, extent_t *extent, size_t size_a,
2178     szind_t szind_a, bool slab_a, size_t size_b, szind_t szind_b, bool slab_b) {
2179 	return extent_split_impl(tsdn, arena, r_extent_hooks, extent, size_a,
2180 	    szind_a, slab_a, size_b, szind_b, slab_b, false);
2181 }
2182 
2183 static bool
extent_merge_default_impl(void * addr_a,void * addr_b)2184 extent_merge_default_impl(void *addr_a, void *addr_b) {
2185 	if (!maps_coalesce && !opt_retain) {
2186 		return true;
2187 	}
2188 	if (have_dss && !extent_dss_mergeable(addr_a, addr_b)) {
2189 		return true;
2190 	}
2191 
2192 	return false;
2193 }
2194 
2195 /*
2196  * Returns true if the given extents can't be merged because of their head bit
2197  * settings.  Assumes the second extent has the higher address.
2198  */
2199 static bool
extent_head_no_merge(extent_t * a,extent_t * b)2200 extent_head_no_merge(extent_t *a, extent_t *b) {
2201 	assert(extent_base_get(a) < extent_base_get(b));
2202 	/*
2203 	 * When coalesce is not always allowed (Windows), only merge extents
2204 	 * from the same VirtualAlloc region under opt.retain (in which case
2205 	 * MEM_DECOMMIT is utilized for purging).
2206 	 */
2207 	if (maps_coalesce) {
2208 		return false;
2209 	}
2210 	if (!opt_retain) {
2211 		return true;
2212 	}
2213 	/* If b is a head extent, disallow the cross-region merge. */
2214 	if (extent_is_head_get(b)) {
2215 		/*
2216 		 * Additionally, sn should not overflow with retain; sanity
2217 		 * check that different regions have unique sn.
2218 		 */
2219 		assert(extent_sn_comp(a, b) != 0);
2220 		return true;
2221 	}
2222 	assert(extent_sn_comp(a, b) == 0);
2223 
2224 	return false;
2225 }
2226 
2227 static bool
extent_merge_default(extent_hooks_t * extent_hooks,void * addr_a,size_t size_a,void * addr_b,size_t size_b,bool committed,unsigned arena_ind)2228 extent_merge_default(extent_hooks_t *extent_hooks, void *addr_a, size_t size_a,
2229     void *addr_b, size_t size_b, bool committed, unsigned arena_ind) {
2230 	if (!maps_coalesce) {
2231 		tsdn_t *tsdn = tsdn_fetch();
2232 		extent_t *a = iealloc(tsdn, addr_a);
2233 		extent_t *b = iealloc(tsdn, addr_b);
2234 		if (extent_head_no_merge(a, b)) {
2235 			return true;
2236 		}
2237 	}
2238 	return extent_merge_default_impl(addr_a, addr_b);
2239 }
2240 
2241 static bool
extent_merge_impl(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * a,extent_t * b,bool growing_retained)2242 extent_merge_impl(tsdn_t *tsdn, arena_t *arena,
2243     extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b,
2244     bool growing_retained) {
2245 	witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn),
2246 	    WITNESS_RANK_CORE, growing_retained ? 1 : 0);
2247 	assert(extent_base_get(a) < extent_base_get(b));
2248 
2249 	extent_hooks_assure_initialized(arena, r_extent_hooks);
2250 
2251 	if ((*r_extent_hooks)->merge == NULL || extent_head_no_merge(a, b)) {
2252 		return true;
2253 	}
2254 
2255 	bool err;
2256 	if (*r_extent_hooks == &extent_hooks_default) {
2257 		/* Call directly to propagate tsdn. */
2258 		err = extent_merge_default_impl(extent_base_get(a),
2259 		    extent_base_get(b));
2260 	} else {
2261 		extent_hook_pre_reentrancy(tsdn, arena);
2262 		err = (*r_extent_hooks)->merge(*r_extent_hooks,
2263 		    extent_base_get(a), extent_size_get(a), extent_base_get(b),
2264 		    extent_size_get(b), extent_committed_get(a),
2265 		    arena_ind_get(arena));
2266 		extent_hook_post_reentrancy(tsdn);
2267 	}
2268 
2269 	if (err) {
2270 		return true;
2271 	}
2272 
2273 	/*
2274 	 * The rtree writes must happen while all the relevant elements are
2275 	 * owned, so the following code uses decomposed helper functions rather
2276 	 * than extent_{,de}register() to do things in the right order.
2277 	 */
2278 	rtree_ctx_t rtree_ctx_fallback;
2279 	rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback);
2280 	rtree_leaf_elm_t *a_elm_a, *a_elm_b, *b_elm_a, *b_elm_b;
2281 	extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, a, true, false, &a_elm_a,
2282 	    &a_elm_b);
2283 	extent_rtree_leaf_elms_lookup(tsdn, rtree_ctx, b, true, false, &b_elm_a,
2284 	    &b_elm_b);
2285 
2286 	extent_lock2(tsdn, a, b);
2287 
2288 	if (a_elm_b != NULL) {
2289 		rtree_leaf_elm_write(tsdn, &extents_rtree, a_elm_b, NULL,
2290 		    SC_NSIZES, false);
2291 	}
2292 	if (b_elm_b != NULL) {
2293 		rtree_leaf_elm_write(tsdn, &extents_rtree, b_elm_a, NULL,
2294 		    SC_NSIZES, false);
2295 	} else {
2296 		b_elm_b = b_elm_a;
2297 	}
2298 
2299 	extent_size_set(a, extent_size_get(a) + extent_size_get(b));
2300 	extent_szind_set(a, SC_NSIZES);
2301 	extent_sn_set(a, (extent_sn_get(a) < extent_sn_get(b)) ?
2302 	    extent_sn_get(a) : extent_sn_get(b));
2303 	extent_zeroed_set(a, extent_zeroed_get(a) && extent_zeroed_get(b));
2304 
2305 	extent_rtree_write_acquired(tsdn, a_elm_a, b_elm_b, a, SC_NSIZES,
2306 	    false);
2307 
2308 	extent_unlock2(tsdn, a, b);
2309 
2310 	extent_dalloc(tsdn, extent_arena_get(b), b);
2311 
2312 	return false;
2313 }
2314 
2315 bool
extent_merge_wrapper(tsdn_t * tsdn,arena_t * arena,extent_hooks_t ** r_extent_hooks,extent_t * a,extent_t * b)2316 extent_merge_wrapper(tsdn_t *tsdn, arena_t *arena,
2317     extent_hooks_t **r_extent_hooks, extent_t *a, extent_t *b) {
2318 	return extent_merge_impl(tsdn, arena, r_extent_hooks, a, b, false);
2319 }
2320 
2321 bool
extent_boot(void)2322 extent_boot(void) {
2323 	if (rtree_new(&extents_rtree, true)) {
2324 		return true;
2325 	}
2326 
2327 	if (mutex_pool_init(&extent_mutex_pool, "extent_mutex_pool",
2328 	    WITNESS_RANK_EXTENT_POOL)) {
2329 		return true;
2330 	}
2331 
2332 	if (have_dss) {
2333 		extent_dss_boot();
2334 	}
2335 
2336 	return false;
2337 }
2338 
2339 void
extent_util_stats_get(tsdn_t * tsdn,const void * ptr,size_t * nfree,size_t * nregs,size_t * size)2340 extent_util_stats_get(tsdn_t *tsdn, const void *ptr,
2341     size_t *nfree, size_t *nregs, size_t *size) {
2342 	assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL);
2343 
2344 	const extent_t *extent = iealloc(tsdn, ptr);
2345 	if (unlikely(extent == NULL)) {
2346 		*nfree = *nregs = *size = 0;
2347 		return;
2348 	}
2349 
2350 	*size = extent_size_get(extent);
2351 	if (!extent_slab_get(extent)) {
2352 		*nfree = 0;
2353 		*nregs = 1;
2354 	} else {
2355 		*nfree = extent_nfree_get(extent);
2356 		*nregs = bin_infos[extent_szind_get(extent)].nregs;
2357 		assert(*nfree <= *nregs);
2358 		assert(*nfree * extent_usize_get(extent) <= *size);
2359 	}
2360 }
2361 
2362 void
extent_util_stats_verbose_get(tsdn_t * tsdn,const void * ptr,size_t * nfree,size_t * nregs,size_t * size,size_t * bin_nfree,size_t * bin_nregs,void ** slabcur_addr)2363 extent_util_stats_verbose_get(tsdn_t *tsdn, const void *ptr,
2364     size_t *nfree, size_t *nregs, size_t *size,
2365     size_t *bin_nfree, size_t *bin_nregs, void **slabcur_addr) {
2366 	assert(ptr != NULL && nfree != NULL && nregs != NULL && size != NULL
2367 	    && bin_nfree != NULL && bin_nregs != NULL && slabcur_addr != NULL);
2368 
2369 	const extent_t *extent = iealloc(tsdn, ptr);
2370 	if (unlikely(extent == NULL)) {
2371 		*nfree = *nregs = *size = *bin_nfree = *bin_nregs = 0;
2372 		*slabcur_addr = NULL;
2373 		return;
2374 	}
2375 
2376 	*size = extent_size_get(extent);
2377 	if (!extent_slab_get(extent)) {
2378 		*nfree = *bin_nfree = *bin_nregs = 0;
2379 		*nregs = 1;
2380 		*slabcur_addr = NULL;
2381 		return;
2382 	}
2383 
2384 	*nfree = extent_nfree_get(extent);
2385 	const szind_t szind = extent_szind_get(extent);
2386 	*nregs = bin_infos[szind].nregs;
2387 	assert(*nfree <= *nregs);
2388 	assert(*nfree * extent_usize_get(extent) <= *size);
2389 
2390 	const arena_t *arena = extent_arena_get(extent);
2391 	assert(arena != NULL);
2392 	const unsigned binshard = extent_binshard_get(extent);
2393 	bin_t *bin = &arena->bins[szind].bin_shards[binshard];
2394 
2395 	malloc_mutex_lock(tsdn, &bin->lock);
2396 	if (config_stats) {
2397 		*bin_nregs = *nregs * bin->stats.curslabs;
2398 		assert(*bin_nregs >= bin->stats.curregs);
2399 		*bin_nfree = *bin_nregs - bin->stats.curregs;
2400 	} else {
2401 		*bin_nfree = *bin_nregs = 0;
2402 	}
2403 	*slabcur_addr = extent_addr_get(bin->slabcur);
2404 	assert(*slabcur_addr != NULL);
2405 	malloc_mutex_unlock(tsdn, &bin->lock);
2406 }
2407