xref: /freebsd/contrib/jemalloc/src/arena.c (revision 5e386598a6d77973b93c073080f0cc574edda9e2)
1 #define	JEMALLOC_ARENA_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 
4 /******************************************************************************/
5 /* Data. */
6 
7 bool		opt_thp = true;
8 static bool	thp_initially_huge;
9 purge_mode_t	opt_purge = PURGE_DEFAULT;
10 const char	*purge_mode_names[] = {
11 	"ratio",
12 	"decay",
13 	"N/A"
14 };
15 ssize_t		opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
16 static ssize_t	lg_dirty_mult_default;
17 ssize_t		opt_decay_time = DECAY_TIME_DEFAULT;
18 static ssize_t	decay_time_default;
19 
20 arena_bin_info_t	arena_bin_info[NBINS];
21 
22 size_t		map_bias;
23 size_t		map_misc_offset;
24 size_t		arena_maxrun; /* Max run size for arenas. */
25 size_t		large_maxclass; /* Max large size class. */
26 unsigned	nlclasses; /* Number of large size classes. */
27 unsigned	nhclasses; /* Number of huge size classes. */
28 
29 /******************************************************************************/
30 /*
31  * Function prototypes for static functions that are referenced prior to
32  * definition.
33  */
34 
35 static void	arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena,
36     arena_chunk_t *chunk);
37 static void	arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena,
38     size_t ndirty_limit);
39 static void	arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run,
40     bool dirty, bool cleaned, bool decommitted);
41 static void	arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena,
42     arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin);
43 static void	arena_bin_lower_run(arena_t *arena, arena_run_t *run,
44     arena_bin_t *bin);
45 
46 /******************************************************************************/
47 
48 JEMALLOC_INLINE_C size_t
49 arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm)
50 {
51 	arena_chunk_t *chunk;
52 	size_t pageind, mapbits;
53 
54 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
55 	pageind = arena_miscelm_to_pageind(miscelm);
56 	mapbits = arena_mapbits_get(chunk, pageind);
57 	return (arena_mapbits_size_decode(mapbits));
58 }
59 
60 JEMALLOC_INLINE_C const extent_node_t *
61 arena_miscelm_extent_get(const arena_chunk_map_misc_t *miscelm)
62 {
63 	arena_chunk_t *chunk;
64 
65 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
66 	return (&chunk->node);
67 }
68 
69 JEMALLOC_INLINE_C int
70 arena_sn_comp(const arena_chunk_map_misc_t *a, const arena_chunk_map_misc_t *b)
71 {
72 	size_t a_sn, b_sn;
73 
74 	assert(a != NULL);
75 	assert(b != NULL);
76 
77 	a_sn = extent_node_sn_get(arena_miscelm_extent_get(a));
78 	b_sn = extent_node_sn_get(arena_miscelm_extent_get(b));
79 
80 	return ((a_sn > b_sn) - (a_sn < b_sn));
81 }
82 
83 JEMALLOC_INLINE_C int
84 arena_ad_comp(const arena_chunk_map_misc_t *a,
85     const arena_chunk_map_misc_t *b)
86 {
87 	uintptr_t a_miscelm = (uintptr_t)a;
88 	uintptr_t b_miscelm = (uintptr_t)b;
89 
90 	assert(a != NULL);
91 	assert(b != NULL);
92 
93 	return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
94 }
95 
96 JEMALLOC_INLINE_C int
97 arena_snad_comp(const arena_chunk_map_misc_t *a,
98     const arena_chunk_map_misc_t *b)
99 {
100 	int ret;
101 
102 	assert(a != NULL);
103 	assert(b != NULL);
104 
105 	ret = arena_sn_comp(a, b);
106 	if (ret != 0)
107 		return (ret);
108 
109 	ret = arena_ad_comp(a, b);
110 	return (ret);
111 }
112 
113 /* Generate pairing heap functions. */
114 ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t,
115     ph_link, arena_snad_comp)
116 
117 #ifdef JEMALLOC_JET
118 #undef run_quantize_floor
119 #define	run_quantize_floor JEMALLOC_N(n_run_quantize_floor)
120 #endif
121 static size_t
122 run_quantize_floor(size_t size)
123 {
124 	size_t ret;
125 	pszind_t pind;
126 
127 	assert(size > 0);
128 	assert(size <= HUGE_MAXCLASS);
129 	assert((size & PAGE_MASK) == 0);
130 
131 	assert(size != 0);
132 	assert(size == PAGE_CEILING(size));
133 
134 	pind = psz2ind(size - large_pad + 1);
135 	if (pind == 0) {
136 		/*
137 		 * Avoid underflow.  This short-circuit would also do the right
138 		 * thing for all sizes in the range for which there are
139 		 * PAGE-spaced size classes, but it's simplest to just handle
140 		 * the one case that would cause erroneous results.
141 		 */
142 		return (size);
143 	}
144 	ret = pind2sz(pind - 1) + large_pad;
145 	assert(ret <= size);
146 	return (ret);
147 }
148 #ifdef JEMALLOC_JET
149 #undef run_quantize_floor
150 #define	run_quantize_floor JEMALLOC_N(run_quantize_floor)
151 run_quantize_t *run_quantize_floor = JEMALLOC_N(n_run_quantize_floor);
152 #endif
153 
154 #ifdef JEMALLOC_JET
155 #undef run_quantize_ceil
156 #define	run_quantize_ceil JEMALLOC_N(n_run_quantize_ceil)
157 #endif
158 static size_t
159 run_quantize_ceil(size_t size)
160 {
161 	size_t ret;
162 
163 	assert(size > 0);
164 	assert(size <= HUGE_MAXCLASS);
165 	assert((size & PAGE_MASK) == 0);
166 
167 	ret = run_quantize_floor(size);
168 	if (ret < size) {
169 		/*
170 		 * Skip a quantization that may have an adequately large run,
171 		 * because under-sized runs may be mixed in.  This only happens
172 		 * when an unusual size is requested, i.e. for aligned
173 		 * allocation, and is just one of several places where linear
174 		 * search would potentially find sufficiently aligned available
175 		 * memory somewhere lower.
176 		 */
177 		ret = pind2sz(psz2ind(ret - large_pad + 1)) + large_pad;
178 	}
179 	return (ret);
180 }
181 #ifdef JEMALLOC_JET
182 #undef run_quantize_ceil
183 #define	run_quantize_ceil JEMALLOC_N(run_quantize_ceil)
184 run_quantize_t *run_quantize_ceil = JEMALLOC_N(n_run_quantize_ceil);
185 #endif
186 
187 static void
188 arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
189     size_t npages)
190 {
191 	pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
192 	    arena_miscelm_get_const(chunk, pageind))));
193 	assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
194 	    LG_PAGE));
195 	assert((npages << LG_PAGE) < chunksize);
196 	assert(pind2sz(pind) <= chunksize);
197 	arena_run_heap_insert(&arena->runs_avail[pind],
198 	    arena_miscelm_get_mutable(chunk, pageind));
199 }
200 
201 static void
202 arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
203     size_t npages)
204 {
205 	pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
206 	    arena_miscelm_get_const(chunk, pageind))));
207 	assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
208 	    LG_PAGE));
209 	assert((npages << LG_PAGE) < chunksize);
210 	assert(pind2sz(pind) <= chunksize);
211 	arena_run_heap_remove(&arena->runs_avail[pind],
212 	    arena_miscelm_get_mutable(chunk, pageind));
213 }
214 
215 static void
216 arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
217     size_t npages)
218 {
219 	arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
220 	    pageind);
221 
222 	assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
223 	    LG_PAGE));
224 	assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
225 	assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
226 	    CHUNK_MAP_DIRTY);
227 
228 	qr_new(&miscelm->rd, rd_link);
229 	qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link);
230 	arena->ndirty += npages;
231 }
232 
233 static void
234 arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
235     size_t npages)
236 {
237 	arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
238 	    pageind);
239 
240 	assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
241 	    LG_PAGE));
242 	assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
243 	assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
244 	    CHUNK_MAP_DIRTY);
245 
246 	qr_remove(&miscelm->rd, rd_link);
247 	assert(arena->ndirty >= npages);
248 	arena->ndirty -= npages;
249 }
250 
251 static size_t
252 arena_chunk_dirty_npages(const extent_node_t *node)
253 {
254 
255 	return (extent_node_size_get(node) >> LG_PAGE);
256 }
257 
258 void
259 arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache)
260 {
261 
262 	if (cache) {
263 		extent_node_dirty_linkage_init(node);
264 		extent_node_dirty_insert(node, &arena->runs_dirty,
265 		    &arena->chunks_cache);
266 		arena->ndirty += arena_chunk_dirty_npages(node);
267 	}
268 }
269 
270 void
271 arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty)
272 {
273 
274 	if (dirty) {
275 		extent_node_dirty_remove(node);
276 		assert(arena->ndirty >= arena_chunk_dirty_npages(node));
277 		arena->ndirty -= arena_chunk_dirty_npages(node);
278 	}
279 }
280 
281 JEMALLOC_INLINE_C void *
282 arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
283 {
284 	void *ret;
285 	size_t regind;
286 	arena_chunk_map_misc_t *miscelm;
287 	void *rpages;
288 
289 	assert(run->nfree > 0);
290 	assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info));
291 
292 	regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
293 	miscelm = arena_run_to_miscelm(run);
294 	rpages = arena_miscelm_to_rpages(miscelm);
295 	ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset +
296 	    (uintptr_t)(bin_info->reg_interval * regind));
297 	run->nfree--;
298 	return (ret);
299 }
300 
301 JEMALLOC_INLINE_C void
302 arena_run_reg_dalloc(arena_run_t *run, void *ptr)
303 {
304 	arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
305 	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
306 	size_t mapbits = arena_mapbits_get(chunk, pageind);
307 	szind_t binind = arena_ptr_small_binind_get(ptr, mapbits);
308 	arena_bin_info_t *bin_info = &arena_bin_info[binind];
309 	size_t regind = arena_run_regind(run, bin_info, ptr);
310 
311 	assert(run->nfree < bin_info->nregs);
312 	/* Freeing an interior pointer can cause assertion failure. */
313 	assert(((uintptr_t)ptr -
314 	    ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
315 	    (uintptr_t)bin_info->reg0_offset)) %
316 	    (uintptr_t)bin_info->reg_interval == 0);
317 	assert((uintptr_t)ptr >=
318 	    (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
319 	    (uintptr_t)bin_info->reg0_offset);
320 	/* Freeing an unallocated pointer can cause assertion failure. */
321 	assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind));
322 
323 	bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind);
324 	run->nfree++;
325 }
326 
327 JEMALLOC_INLINE_C void
328 arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
329 {
330 
331 	JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
332 	    (run_ind << LG_PAGE)), (npages << LG_PAGE));
333 	memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
334 	    (npages << LG_PAGE));
335 }
336 
337 JEMALLOC_INLINE_C void
338 arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
339 {
340 
341 	JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind
342 	    << LG_PAGE)), PAGE);
343 }
344 
345 JEMALLOC_INLINE_C void
346 arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
347 {
348 	size_t i;
349 	UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
350 
351 	arena_run_page_mark_zeroed(chunk, run_ind);
352 	for (i = 0; i < PAGE / sizeof(size_t); i++)
353 		assert(p[i] == 0);
354 }
355 
356 static void
357 arena_nactive_add(arena_t *arena, size_t add_pages)
358 {
359 
360 	if (config_stats) {
361 		size_t cactive_add = CHUNK_CEILING((arena->nactive +
362 		    add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
363 		    LG_PAGE);
364 		if (cactive_add != 0)
365 			stats_cactive_add(cactive_add);
366 	}
367 	arena->nactive += add_pages;
368 }
369 
370 static void
371 arena_nactive_sub(arena_t *arena, size_t sub_pages)
372 {
373 
374 	if (config_stats) {
375 		size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) -
376 		    CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE);
377 		if (cactive_sub != 0)
378 			stats_cactive_sub(cactive_sub);
379 	}
380 	arena->nactive -= sub_pages;
381 }
382 
383 static void
384 arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
385     size_t flag_dirty, size_t flag_decommitted, size_t need_pages)
386 {
387 	size_t total_pages, rem_pages;
388 
389 	assert(flag_dirty == 0 || flag_decommitted == 0);
390 
391 	total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
392 	    LG_PAGE;
393 	assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
394 	    flag_dirty);
395 	assert(need_pages <= total_pages);
396 	rem_pages = total_pages - need_pages;
397 
398 	arena_avail_remove(arena, chunk, run_ind, total_pages);
399 	if (flag_dirty != 0)
400 		arena_run_dirty_remove(arena, chunk, run_ind, total_pages);
401 	arena_nactive_add(arena, need_pages);
402 
403 	/* Keep track of trailing unused pages for later use. */
404 	if (rem_pages > 0) {
405 		size_t flags = flag_dirty | flag_decommitted;
406 		size_t flag_unzeroed_mask = (flags == 0) ?  CHUNK_MAP_UNZEROED :
407 		    0;
408 
409 		arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
410 		    (rem_pages << LG_PAGE), flags |
411 		    (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) &
412 		    flag_unzeroed_mask));
413 		arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1,
414 		    (rem_pages << LG_PAGE), flags |
415 		    (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) &
416 		    flag_unzeroed_mask));
417 		if (flag_dirty != 0) {
418 			arena_run_dirty_insert(arena, chunk, run_ind+need_pages,
419 			    rem_pages);
420 		}
421 		arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages);
422 	}
423 }
424 
425 static bool
426 arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
427     bool remove, bool zero)
428 {
429 	arena_chunk_t *chunk;
430 	arena_chunk_map_misc_t *miscelm;
431 	size_t flag_dirty, flag_decommitted, run_ind, need_pages;
432 	size_t flag_unzeroed_mask;
433 
434 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
435 	miscelm = arena_run_to_miscelm(run);
436 	run_ind = arena_miscelm_to_pageind(miscelm);
437 	flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
438 	flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
439 	need_pages = (size >> LG_PAGE);
440 	assert(need_pages > 0);
441 
442 	if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
443 	    run_ind << LG_PAGE, size, arena->ind))
444 		return (true);
445 
446 	if (remove) {
447 		arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
448 		    flag_decommitted, need_pages);
449 	}
450 
451 	if (zero) {
452 		if (flag_decommitted != 0) {
453 			/* The run is untouched, and therefore zeroed. */
454 			JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
455 			    *)((uintptr_t)chunk + (run_ind << LG_PAGE)),
456 			    (need_pages << LG_PAGE));
457 		} else if (flag_dirty != 0) {
458 			/* The run is dirty, so all pages must be zeroed. */
459 			arena_run_zero(chunk, run_ind, need_pages);
460 		} else {
461 			/*
462 			 * The run is clean, so some pages may be zeroed (i.e.
463 			 * never before touched).
464 			 */
465 			size_t i;
466 			for (i = 0; i < need_pages; i++) {
467 				if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
468 				    != 0)
469 					arena_run_zero(chunk, run_ind+i, 1);
470 				else if (config_debug) {
471 					arena_run_page_validate_zeroed(chunk,
472 					    run_ind+i);
473 				} else {
474 					arena_run_page_mark_zeroed(chunk,
475 					    run_ind+i);
476 				}
477 			}
478 		}
479 	} else {
480 		JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
481 		    (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
482 	}
483 
484 	/*
485 	 * Set the last element first, in case the run only contains one page
486 	 * (i.e. both statements set the same element).
487 	 */
488 	flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
489 	    CHUNK_MAP_UNZEROED : 0;
490 	arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty |
491 	    (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
492 	    run_ind+need_pages-1)));
493 	arena_mapbits_large_set(chunk, run_ind, size, flag_dirty |
494 	    (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind)));
495 	return (false);
496 }
497 
498 static bool
499 arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
500 {
501 
502 	return (arena_run_split_large_helper(arena, run, size, true, zero));
503 }
504 
505 static bool
506 arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
507 {
508 
509 	return (arena_run_split_large_helper(arena, run, size, false, zero));
510 }
511 
512 static bool
513 arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
514     szind_t binind)
515 {
516 	arena_chunk_t *chunk;
517 	arena_chunk_map_misc_t *miscelm;
518 	size_t flag_dirty, flag_decommitted, run_ind, need_pages, i;
519 
520 	assert(binind != BININD_INVALID);
521 
522 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
523 	miscelm = arena_run_to_miscelm(run);
524 	run_ind = arena_miscelm_to_pageind(miscelm);
525 	flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
526 	flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
527 	need_pages = (size >> LG_PAGE);
528 	assert(need_pages > 0);
529 
530 	if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
531 	    run_ind << LG_PAGE, size, arena->ind))
532 		return (true);
533 
534 	arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
535 	    flag_decommitted, need_pages);
536 
537 	for (i = 0; i < need_pages; i++) {
538 		size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk,
539 		    run_ind+i);
540 		arena_mapbits_small_set(chunk, run_ind+i, i, binind,
541 		    flag_unzeroed);
542 		if (config_debug && flag_dirty == 0 && flag_unzeroed == 0)
543 			arena_run_page_validate_zeroed(chunk, run_ind+i);
544 	}
545 	JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
546 	    (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
547 	return (false);
548 }
549 
550 static arena_chunk_t *
551 arena_chunk_init_spare(arena_t *arena)
552 {
553 	arena_chunk_t *chunk;
554 
555 	assert(arena->spare != NULL);
556 
557 	chunk = arena->spare;
558 	arena->spare = NULL;
559 
560 	assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
561 	assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
562 	assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
563 	    arena_maxrun);
564 	assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
565 	    arena_maxrun);
566 	assert(arena_mapbits_dirty_get(chunk, map_bias) ==
567 	    arena_mapbits_dirty_get(chunk, chunk_npages-1));
568 
569 	return (chunk);
570 }
571 
572 static bool
573 arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, size_t sn, bool zero,
574     bool *gdump)
575 {
576 
577 	/*
578 	 * The extent node notion of "committed" doesn't directly apply to
579 	 * arena chunks.  Arbitrarily mark them as committed.  The commit state
580 	 * of runs is tracked individually, and upon chunk deallocation the
581 	 * entire chunk is in a consistent commit state.
582 	 */
583 	extent_node_init(&chunk->node, arena, chunk, chunksize, sn, zero, true);
584 	extent_node_achunk_set(&chunk->node, true);
585 	return (chunk_register(chunk, &chunk->node, gdump));
586 }
587 
588 static arena_chunk_t *
589 arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
590     chunk_hooks_t *chunk_hooks, bool *zero, bool *commit)
591 {
592 	arena_chunk_t *chunk;
593 	size_t sn;
594 
595 	malloc_mutex_unlock(tsdn, &arena->lock);
596 	/* prof_gdump() requirement. */
597 	witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 0);
598 
599 	chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
600 	    NULL, chunksize, chunksize, &sn, zero, commit);
601 	if (chunk != NULL && !*commit) {
602 		/* Commit header. */
603 		if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
604 		    LG_PAGE, arena->ind)) {
605 			chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
606 			    (void *)chunk, chunksize, sn, *zero, *commit);
607 			chunk = NULL;
608 		}
609 	}
610 	if (chunk != NULL) {
611 		bool gdump;
612 		if (arena_chunk_register(arena, chunk, sn, *zero, &gdump)) {
613 			if (!*commit) {
614 				/* Undo commit of header. */
615 				chunk_hooks->decommit(chunk, chunksize, 0,
616 				    map_bias << LG_PAGE, arena->ind);
617 			}
618 			chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
619 			    (void *)chunk, chunksize, sn, *zero, *commit);
620 			chunk = NULL;
621 		}
622 		if (config_prof && opt_prof && gdump)
623 			prof_gdump(tsdn);
624 	}
625 
626 	malloc_mutex_lock(tsdn, &arena->lock);
627 	return (chunk);
628 }
629 
630 static arena_chunk_t *
631 arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
632     bool *commit)
633 {
634 	arena_chunk_t *chunk;
635 	chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
636 	size_t sn;
637 
638 	/* prof_gdump() requirement. */
639 	witness_assert_depth_to_rank(tsdn, WITNESS_RANK_CORE, 1);
640 	malloc_mutex_assert_owner(tsdn, &arena->lock);
641 
642 	chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
643 	    chunksize, &sn, zero, commit, true);
644 	if (chunk != NULL) {
645 		bool gdump;
646 		if (arena_chunk_register(arena, chunk, sn, *zero, &gdump)) {
647 			chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk,
648 			    chunksize, sn, true);
649 			return (NULL);
650 		}
651 		if (config_prof && opt_prof && gdump) {
652 			malloc_mutex_unlock(tsdn, &arena->lock);
653 			prof_gdump(tsdn);
654 			malloc_mutex_lock(tsdn, &arena->lock);
655 		}
656 	}
657 	if (chunk == NULL) {
658 		chunk = arena_chunk_alloc_internal_hard(tsdn, arena,
659 		    &chunk_hooks, zero, commit);
660 	}
661 
662 	if (config_stats && chunk != NULL) {
663 		arena->stats.mapped += chunksize;
664 		arena->stats.metadata_mapped += (map_bias << LG_PAGE);
665 	}
666 
667 	return (chunk);
668 }
669 
670 static arena_chunk_t *
671 arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
672 {
673 	arena_chunk_t *chunk;
674 	bool zero, commit;
675 	size_t flag_unzeroed, flag_decommitted, i;
676 
677 	assert(arena->spare == NULL);
678 
679 	zero = false;
680 	commit = false;
681 	chunk = arena_chunk_alloc_internal(tsdn, arena, &zero, &commit);
682 	if (chunk == NULL)
683 		return (NULL);
684 
685 	if (config_thp && opt_thp) {
686 		chunk->hugepage = thp_initially_huge;
687 	}
688 
689 	/*
690 	 * Initialize the map to contain one maximal free untouched run.  Mark
691 	 * the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed
692 	 * or decommitted chunk.
693 	 */
694 	flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
695 	flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
696 	arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun,
697 	    flag_unzeroed | flag_decommitted);
698 	/*
699 	 * There is no need to initialize the internal page map entries unless
700 	 * the chunk is not zeroed.
701 	 */
702 	if (!zero) {
703 		JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
704 		    (void *)arena_bitselm_get_const(chunk, map_bias+1),
705 		    (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
706 		    chunk_npages-1) -
707 		    (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
708 		for (i = map_bias+1; i < chunk_npages-1; i++)
709 			arena_mapbits_internal_set(chunk, i, flag_unzeroed);
710 	} else {
711 		JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
712 		    *)arena_bitselm_get_const(chunk, map_bias+1),
713 		    (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
714 		    chunk_npages-1) -
715 		    (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
716 		if (config_debug) {
717 			for (i = map_bias+1; i < chunk_npages-1; i++) {
718 				assert(arena_mapbits_unzeroed_get(chunk, i) ==
719 				    flag_unzeroed);
720 			}
721 		}
722 	}
723 	arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun,
724 	    flag_unzeroed);
725 
726 	return (chunk);
727 }
728 
729 static arena_chunk_t *
730 arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena)
731 {
732 	arena_chunk_t *chunk;
733 
734 	if (arena->spare != NULL)
735 		chunk = arena_chunk_init_spare(arena);
736 	else {
737 		chunk = arena_chunk_init_hard(tsdn, arena);
738 		if (chunk == NULL)
739 			return (NULL);
740 	}
741 
742 	ql_elm_new(&chunk->node, ql_link);
743 	ql_tail_insert(&arena->achunks, &chunk->node, ql_link);
744 	arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
745 
746 	return (chunk);
747 }
748 
749 static void
750 arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
751 {
752 	size_t sn;
753 	UNUSED bool hugepage JEMALLOC_CC_SILENCE_INIT(false);
754 	bool committed;
755 	chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
756 
757 	chunk_deregister(chunk, &chunk->node);
758 
759 	sn = extent_node_sn_get(&chunk->node);
760 	if (config_thp && opt_thp) {
761 		hugepage = chunk->hugepage;
762 	}
763 	committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
764 	if (!committed) {
765 		/*
766 		 * Decommit the header.  Mark the chunk as decommitted even if
767 		 * header decommit fails, since treating a partially committed
768 		 * chunk as committed has a high potential for causing later
769 		 * access of decommitted memory.
770 		 */
771 		chunk_hooks = chunk_hooks_get(tsdn, arena);
772 		chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE,
773 		    arena->ind);
774 	}
775 	if (config_thp && opt_thp && hugepage != thp_initially_huge) {
776 		/*
777 		 * Convert chunk back to initial THP state, so that all
778 		 * subsequent chunk allocations start out in a consistent state.
779 		 */
780 		if (thp_initially_huge) {
781 			pages_huge(chunk, chunksize);
782 		} else {
783 			pages_nohuge(chunk, chunksize);
784 		}
785 	}
786 
787 	chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize,
788 	    sn, committed);
789 
790 	if (config_stats) {
791 		arena->stats.mapped -= chunksize;
792 		arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
793 	}
794 }
795 
796 static void
797 arena_spare_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *spare)
798 {
799 
800 	assert(arena->spare != spare);
801 
802 	if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
803 		arena_run_dirty_remove(arena, spare, map_bias,
804 		    chunk_npages-map_bias);
805 	}
806 
807 	arena_chunk_discard(tsdn, arena, spare);
808 }
809 
810 static void
811 arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
812 {
813 	arena_chunk_t *spare;
814 
815 	assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
816 	assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
817 	assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
818 	    arena_maxrun);
819 	assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
820 	    arena_maxrun);
821 	assert(arena_mapbits_dirty_get(chunk, map_bias) ==
822 	    arena_mapbits_dirty_get(chunk, chunk_npages-1));
823 	assert(arena_mapbits_decommitted_get(chunk, map_bias) ==
824 	    arena_mapbits_decommitted_get(chunk, chunk_npages-1));
825 
826 	/* Remove run from runs_avail, so that the arena does not use it. */
827 	arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
828 
829 	ql_remove(&arena->achunks, &chunk->node, ql_link);
830 	spare = arena->spare;
831 	arena->spare = chunk;
832 	if (spare != NULL)
833 		arena_spare_discard(tsdn, arena, spare);
834 }
835 
836 static void
837 arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
838 {
839 	szind_t index = size2index(usize) - nlclasses - NBINS;
840 
841 	cassert(config_stats);
842 
843 	arena->stats.nmalloc_huge++;
844 	arena->stats.allocated_huge += usize;
845 	arena->stats.hstats[index].nmalloc++;
846 	arena->stats.hstats[index].curhchunks++;
847 }
848 
849 static void
850 arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
851 {
852 	szind_t index = size2index(usize) - nlclasses - NBINS;
853 
854 	cassert(config_stats);
855 
856 	arena->stats.nmalloc_huge--;
857 	arena->stats.allocated_huge -= usize;
858 	arena->stats.hstats[index].nmalloc--;
859 	arena->stats.hstats[index].curhchunks--;
860 }
861 
862 static void
863 arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
864 {
865 	szind_t index = size2index(usize) - nlclasses - NBINS;
866 
867 	cassert(config_stats);
868 
869 	arena->stats.ndalloc_huge++;
870 	arena->stats.allocated_huge -= usize;
871 	arena->stats.hstats[index].ndalloc++;
872 	arena->stats.hstats[index].curhchunks--;
873 }
874 
875 static void
876 arena_huge_reset_stats_cancel(arena_t *arena, size_t usize)
877 {
878 	szind_t index = size2index(usize) - nlclasses - NBINS;
879 
880 	cassert(config_stats);
881 
882 	arena->stats.ndalloc_huge++;
883 	arena->stats.hstats[index].ndalloc--;
884 }
885 
886 static void
887 arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
888 {
889 	szind_t index = size2index(usize) - nlclasses - NBINS;
890 
891 	cassert(config_stats);
892 
893 	arena->stats.ndalloc_huge--;
894 	arena->stats.allocated_huge += usize;
895 	arena->stats.hstats[index].ndalloc--;
896 	arena->stats.hstats[index].curhchunks++;
897 }
898 
899 static void
900 arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize)
901 {
902 
903 	arena_huge_dalloc_stats_update(arena, oldsize);
904 	arena_huge_malloc_stats_update(arena, usize);
905 }
906 
907 static void
908 arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
909     size_t usize)
910 {
911 
912 	arena_huge_dalloc_stats_update_undo(arena, oldsize);
913 	arena_huge_malloc_stats_update_undo(arena, usize);
914 }
915 
916 extent_node_t *
917 arena_node_alloc(tsdn_t *tsdn, arena_t *arena)
918 {
919 	extent_node_t *node;
920 
921 	malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
922 	node = ql_last(&arena->node_cache, ql_link);
923 	if (node == NULL) {
924 		malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
925 		return (base_alloc(tsdn, sizeof(extent_node_t)));
926 	}
927 	ql_tail_remove(&arena->node_cache, extent_node_t, ql_link);
928 	malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
929 	return (node);
930 }
931 
932 void
933 arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node)
934 {
935 
936 	malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
937 	ql_elm_new(node, ql_link);
938 	ql_tail_insert(&arena->node_cache, node, ql_link);
939 	malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
940 }
941 
942 static void *
943 arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
944     chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, size_t *sn,
945     bool *zero, size_t csize)
946 {
947 	void *ret;
948 	bool commit = true;
949 
950 	ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize,
951 	    alignment, sn, zero, &commit);
952 	if (ret == NULL) {
953 		/* Revert optimistic stats updates. */
954 		malloc_mutex_lock(tsdn, &arena->lock);
955 		if (config_stats) {
956 			arena_huge_malloc_stats_update_undo(arena, usize);
957 			arena->stats.mapped -= usize;
958 		}
959 		arena_nactive_sub(arena, usize >> LG_PAGE);
960 		malloc_mutex_unlock(tsdn, &arena->lock);
961 	}
962 
963 	return (ret);
964 }
965 
966 void *
967 arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
968     size_t alignment, size_t *sn, bool *zero)
969 {
970 	void *ret;
971 	chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
972 	size_t csize = CHUNK_CEILING(usize);
973 	bool commit = true;
974 
975 	malloc_mutex_lock(tsdn, &arena->lock);
976 
977 	/* Optimistically update stats. */
978 	if (config_stats) {
979 		arena_huge_malloc_stats_update(arena, usize);
980 		arena->stats.mapped += usize;
981 	}
982 	arena_nactive_add(arena, usize >> LG_PAGE);
983 
984 	ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize,
985 	    alignment, sn, zero, &commit, true);
986 	malloc_mutex_unlock(tsdn, &arena->lock);
987 	if (ret == NULL) {
988 		ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
989 		    usize, alignment, sn, zero, csize);
990 	}
991 
992 	return (ret);
993 }
994 
995 void
996 arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize,
997     size_t sn)
998 {
999 	chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
1000 	size_t csize;
1001 
1002 	csize = CHUNK_CEILING(usize);
1003 	malloc_mutex_lock(tsdn, &arena->lock);
1004 	if (config_stats) {
1005 		arena_huge_dalloc_stats_update(arena, usize);
1006 		arena->stats.mapped -= usize;
1007 	}
1008 	arena_nactive_sub(arena, usize >> LG_PAGE);
1009 
1010 	chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, sn, true);
1011 	malloc_mutex_unlock(tsdn, &arena->lock);
1012 }
1013 
1014 void
1015 arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk,
1016     size_t oldsize, size_t usize)
1017 {
1018 
1019 	assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
1020 	assert(oldsize != usize);
1021 
1022 	malloc_mutex_lock(tsdn, &arena->lock);
1023 	if (config_stats)
1024 		arena_huge_ralloc_stats_update(arena, oldsize, usize);
1025 	if (oldsize < usize)
1026 		arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE);
1027 	else
1028 		arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE);
1029 	malloc_mutex_unlock(tsdn, &arena->lock);
1030 }
1031 
1032 void
1033 arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
1034     size_t oldsize, size_t usize, size_t sn)
1035 {
1036 	size_t udiff = oldsize - usize;
1037 	size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
1038 
1039 	malloc_mutex_lock(tsdn, &arena->lock);
1040 	if (config_stats) {
1041 		arena_huge_ralloc_stats_update(arena, oldsize, usize);
1042 		if (cdiff != 0)
1043 			arena->stats.mapped -= cdiff;
1044 	}
1045 	arena_nactive_sub(arena, udiff >> LG_PAGE);
1046 
1047 	if (cdiff != 0) {
1048 		chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
1049 		void *nchunk = (void *)((uintptr_t)chunk +
1050 		    CHUNK_CEILING(usize));
1051 
1052 		chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
1053 		    sn, true);
1054 	}
1055 	malloc_mutex_unlock(tsdn, &arena->lock);
1056 }
1057 
1058 static bool
1059 arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena,
1060     chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize,
1061     size_t *sn, bool *zero, void *nchunk, size_t udiff, size_t cdiff)
1062 {
1063 	bool err;
1064 	bool commit = true;
1065 
1066 	err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
1067 	    chunksize, sn, zero, &commit) == NULL);
1068 	if (err) {
1069 		/* Revert optimistic stats updates. */
1070 		malloc_mutex_lock(tsdn, &arena->lock);
1071 		if (config_stats) {
1072 			arena_huge_ralloc_stats_update_undo(arena, oldsize,
1073 			    usize);
1074 			arena->stats.mapped -= cdiff;
1075 		}
1076 		arena_nactive_sub(arena, udiff >> LG_PAGE);
1077 		malloc_mutex_unlock(tsdn, &arena->lock);
1078 	} else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
1079 	    cdiff, true, arena->ind)) {
1080 		chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
1081 		    *sn, *zero, true);
1082 		err = true;
1083 	}
1084 	return (err);
1085 }
1086 
1087 bool
1088 arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk,
1089     size_t oldsize, size_t usize, bool *zero)
1090 {
1091 	bool err;
1092 	chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
1093 	void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
1094 	size_t udiff = usize - oldsize;
1095 	size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
1096 	size_t sn;
1097 	bool commit = true;
1098 
1099 	malloc_mutex_lock(tsdn, &arena->lock);
1100 
1101 	/* Optimistically update stats. */
1102 	if (config_stats) {
1103 		arena_huge_ralloc_stats_update(arena, oldsize, usize);
1104 		arena->stats.mapped += cdiff;
1105 	}
1106 	arena_nactive_add(arena, udiff >> LG_PAGE);
1107 
1108 	err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
1109 	    chunksize, &sn, zero, &commit, true) == NULL);
1110 	malloc_mutex_unlock(tsdn, &arena->lock);
1111 	if (err) {
1112 		err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena,
1113 		    &chunk_hooks, chunk, oldsize, usize, &sn, zero, nchunk,
1114 		    udiff, cdiff);
1115 	} else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
1116 	    cdiff, true, arena->ind)) {
1117 		chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
1118 		    sn, *zero, true);
1119 		err = true;
1120 	}
1121 
1122 	return (err);
1123 }
1124 
1125 /*
1126  * Do first-best-fit run selection, i.e. select the lowest run that best fits.
1127  * Run sizes are indexed, so not all candidate runs are necessarily exactly the
1128  * same size.
1129  */
1130 static arena_run_t *
1131 arena_run_first_best_fit(arena_t *arena, size_t size)
1132 {
1133 	pszind_t pind, i;
1134 
1135 	pind = psz2ind(run_quantize_ceil(size));
1136 
1137 	for (i = pind; pind2sz(i) <= chunksize; i++) {
1138 		arena_chunk_map_misc_t *miscelm = arena_run_heap_first(
1139 		    &arena->runs_avail[i]);
1140 		if (miscelm != NULL)
1141 			return (&miscelm->run);
1142 	}
1143 
1144 	return (NULL);
1145 }
1146 
1147 static arena_run_t *
1148 arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
1149 {
1150 	arena_run_t *run = arena_run_first_best_fit(arena, size);
1151 	if (run != NULL) {
1152 		if (arena_run_split_large(arena, run, size, zero))
1153 			run = NULL;
1154 	}
1155 	return (run);
1156 }
1157 
1158 static arena_run_t *
1159 arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
1160 {
1161 	arena_chunk_t *chunk;
1162 	arena_run_t *run;
1163 
1164 	assert(size <= arena_maxrun);
1165 	assert(size == PAGE_CEILING(size));
1166 
1167 	/* Search the arena's chunks for the lowest best fit. */
1168 	run = arena_run_alloc_large_helper(arena, size, zero);
1169 	if (run != NULL)
1170 		return (run);
1171 
1172 	/*
1173 	 * No usable runs.  Create a new chunk from which to allocate the run.
1174 	 */
1175 	chunk = arena_chunk_alloc(tsdn, arena);
1176 	if (chunk != NULL) {
1177 		run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
1178 		if (arena_run_split_large(arena, run, size, zero))
1179 			run = NULL;
1180 		return (run);
1181 	}
1182 
1183 	/*
1184 	 * arena_chunk_alloc() failed, but another thread may have made
1185 	 * sufficient memory available while this one dropped arena->lock in
1186 	 * arena_chunk_alloc(), so search one more time.
1187 	 */
1188 	return (arena_run_alloc_large_helper(arena, size, zero));
1189 }
1190 
1191 static arena_run_t *
1192 arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind)
1193 {
1194 	arena_run_t *run = arena_run_first_best_fit(arena, size);
1195 	if (run != NULL) {
1196 		if (arena_run_split_small(arena, run, size, binind))
1197 			run = NULL;
1198 	}
1199 	return (run);
1200 }
1201 
1202 static arena_run_t *
1203 arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind)
1204 {
1205 	arena_chunk_t *chunk;
1206 	arena_run_t *run;
1207 
1208 	assert(size <= arena_maxrun);
1209 	assert(size == PAGE_CEILING(size));
1210 	assert(binind != BININD_INVALID);
1211 
1212 	/* Search the arena's chunks for the lowest best fit. */
1213 	run = arena_run_alloc_small_helper(arena, size, binind);
1214 	if (run != NULL)
1215 		return (run);
1216 
1217 	/*
1218 	 * No usable runs.  Create a new chunk from which to allocate the run.
1219 	 */
1220 	chunk = arena_chunk_alloc(tsdn, arena);
1221 	if (chunk != NULL) {
1222 		run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
1223 		if (arena_run_split_small(arena, run, size, binind))
1224 			run = NULL;
1225 		return (run);
1226 	}
1227 
1228 	/*
1229 	 * arena_chunk_alloc() failed, but another thread may have made
1230 	 * sufficient memory available while this one dropped arena->lock in
1231 	 * arena_chunk_alloc(), so search one more time.
1232 	 */
1233 	return (arena_run_alloc_small_helper(arena, size, binind));
1234 }
1235 
1236 static bool
1237 arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult)
1238 {
1239 
1240 	return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t)
1241 	    << 3));
1242 }
1243 
1244 ssize_t
1245 arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena)
1246 {
1247 	ssize_t lg_dirty_mult;
1248 
1249 	malloc_mutex_lock(tsdn, &arena->lock);
1250 	lg_dirty_mult = arena->lg_dirty_mult;
1251 	malloc_mutex_unlock(tsdn, &arena->lock);
1252 
1253 	return (lg_dirty_mult);
1254 }
1255 
1256 bool
1257 arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, ssize_t lg_dirty_mult)
1258 {
1259 
1260 	if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
1261 		return (true);
1262 
1263 	malloc_mutex_lock(tsdn, &arena->lock);
1264 	arena->lg_dirty_mult = lg_dirty_mult;
1265 	arena_maybe_purge(tsdn, arena);
1266 	malloc_mutex_unlock(tsdn, &arena->lock);
1267 
1268 	return (false);
1269 }
1270 
1271 static void
1272 arena_decay_deadline_init(arena_t *arena)
1273 {
1274 
1275 	assert(opt_purge == purge_mode_decay);
1276 
1277 	/*
1278 	 * Generate a new deadline that is uniformly random within the next
1279 	 * epoch after the current one.
1280 	 */
1281 	nstime_copy(&arena->decay.deadline, &arena->decay.epoch);
1282 	nstime_add(&arena->decay.deadline, &arena->decay.interval);
1283 	if (arena->decay.time > 0) {
1284 		nstime_t jitter;
1285 
1286 		nstime_init(&jitter, prng_range_u64(&arena->decay.jitter_state,
1287 		    nstime_ns(&arena->decay.interval)));
1288 		nstime_add(&arena->decay.deadline, &jitter);
1289 	}
1290 }
1291 
1292 static bool
1293 arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time)
1294 {
1295 
1296 	assert(opt_purge == purge_mode_decay);
1297 
1298 	return (nstime_compare(&arena->decay.deadline, time) <= 0);
1299 }
1300 
1301 static size_t
1302 arena_decay_backlog_npages_limit(const arena_t *arena)
1303 {
1304 	static const uint64_t h_steps[] = {
1305 #define	STEP(step, h, x, y) \
1306 		h,
1307 		SMOOTHSTEP
1308 #undef STEP
1309 	};
1310 	uint64_t sum;
1311 	size_t npages_limit_backlog;
1312 	unsigned i;
1313 
1314 	assert(opt_purge == purge_mode_decay);
1315 
1316 	/*
1317 	 * For each element of decay_backlog, multiply by the corresponding
1318 	 * fixed-point smoothstep decay factor.  Sum the products, then divide
1319 	 * to round down to the nearest whole number of pages.
1320 	 */
1321 	sum = 0;
1322 	for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
1323 		sum += arena->decay.backlog[i] * h_steps[i];
1324 	npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
1325 
1326 	return (npages_limit_backlog);
1327 }
1328 
1329 static void
1330 arena_decay_backlog_update_last(arena_t *arena)
1331 {
1332 	size_t ndirty_delta = (arena->ndirty > arena->decay.ndirty) ?
1333 	    arena->ndirty - arena->decay.ndirty : 0;
1334 	arena->decay.backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta;
1335 }
1336 
1337 static void
1338 arena_decay_backlog_update(arena_t *arena, uint64_t nadvance_u64)
1339 {
1340 
1341 	if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
1342 		memset(arena->decay.backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
1343 		    sizeof(size_t));
1344 	} else {
1345 		size_t nadvance_z = (size_t)nadvance_u64;
1346 
1347 		assert((uint64_t)nadvance_z == nadvance_u64);
1348 
1349 		memmove(arena->decay.backlog, &arena->decay.backlog[nadvance_z],
1350 		    (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
1351 		if (nadvance_z > 1) {
1352 			memset(&arena->decay.backlog[SMOOTHSTEP_NSTEPS -
1353 			    nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
1354 		}
1355 	}
1356 
1357 	arena_decay_backlog_update_last(arena);
1358 }
1359 
1360 static void
1361 arena_decay_epoch_advance_helper(arena_t *arena, const nstime_t *time)
1362 {
1363 	uint64_t nadvance_u64;
1364 	nstime_t delta;
1365 
1366 	assert(opt_purge == purge_mode_decay);
1367 	assert(arena_decay_deadline_reached(arena, time));
1368 
1369 	nstime_copy(&delta, time);
1370 	nstime_subtract(&delta, &arena->decay.epoch);
1371 	nadvance_u64 = nstime_divide(&delta, &arena->decay.interval);
1372 	assert(nadvance_u64 > 0);
1373 
1374 	/* Add nadvance_u64 decay intervals to epoch. */
1375 	nstime_copy(&delta, &arena->decay.interval);
1376 	nstime_imultiply(&delta, nadvance_u64);
1377 	nstime_add(&arena->decay.epoch, &delta);
1378 
1379 	/* Set a new deadline. */
1380 	arena_decay_deadline_init(arena);
1381 
1382 	/* Update the backlog. */
1383 	arena_decay_backlog_update(arena, nadvance_u64);
1384 }
1385 
1386 static void
1387 arena_decay_epoch_advance_purge(tsdn_t *tsdn, arena_t *arena)
1388 {
1389 	size_t ndirty_limit = arena_decay_backlog_npages_limit(arena);
1390 
1391 	if (arena->ndirty > ndirty_limit)
1392 		arena_purge_to_limit(tsdn, arena, ndirty_limit);
1393 	arena->decay.ndirty = arena->ndirty;
1394 }
1395 
1396 static void
1397 arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, const nstime_t *time)
1398 {
1399 
1400 	arena_decay_epoch_advance_helper(arena, time);
1401 	arena_decay_epoch_advance_purge(tsdn, arena);
1402 }
1403 
1404 static void
1405 arena_decay_init(arena_t *arena, ssize_t decay_time)
1406 {
1407 
1408 	arena->decay.time = decay_time;
1409 	if (decay_time > 0) {
1410 		nstime_init2(&arena->decay.interval, decay_time, 0);
1411 		nstime_idivide(&arena->decay.interval, SMOOTHSTEP_NSTEPS);
1412 	}
1413 
1414 	nstime_init(&arena->decay.epoch, 0);
1415 	nstime_update(&arena->decay.epoch);
1416 	arena->decay.jitter_state = (uint64_t)(uintptr_t)arena;
1417 	arena_decay_deadline_init(arena);
1418 	arena->decay.ndirty = arena->ndirty;
1419 	memset(arena->decay.backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
1420 }
1421 
1422 static bool
1423 arena_decay_time_valid(ssize_t decay_time)
1424 {
1425 
1426 	if (decay_time < -1)
1427 		return (false);
1428 	if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX)
1429 		return (true);
1430 	return (false);
1431 }
1432 
1433 ssize_t
1434 arena_decay_time_get(tsdn_t *tsdn, arena_t *arena)
1435 {
1436 	ssize_t decay_time;
1437 
1438 	malloc_mutex_lock(tsdn, &arena->lock);
1439 	decay_time = arena->decay.time;
1440 	malloc_mutex_unlock(tsdn, &arena->lock);
1441 
1442 	return (decay_time);
1443 }
1444 
1445 bool
1446 arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time)
1447 {
1448 
1449 	if (!arena_decay_time_valid(decay_time))
1450 		return (true);
1451 
1452 	malloc_mutex_lock(tsdn, &arena->lock);
1453 	/*
1454 	 * Restart decay backlog from scratch, which may cause many dirty pages
1455 	 * to be immediately purged.  It would conceptually be possible to map
1456 	 * the old backlog onto the new backlog, but there is no justification
1457 	 * for such complexity since decay_time changes are intended to be
1458 	 * infrequent, either between the {-1, 0, >0} states, or a one-time
1459 	 * arbitrary change during initial arena configuration.
1460 	 */
1461 	arena_decay_init(arena, decay_time);
1462 	arena_maybe_purge(tsdn, arena);
1463 	malloc_mutex_unlock(tsdn, &arena->lock);
1464 
1465 	return (false);
1466 }
1467 
1468 static void
1469 arena_maybe_purge_ratio(tsdn_t *tsdn, arena_t *arena)
1470 {
1471 
1472 	assert(opt_purge == purge_mode_ratio);
1473 
1474 	/* Don't purge if the option is disabled. */
1475 	if (arena->lg_dirty_mult < 0)
1476 		return;
1477 
1478 	/*
1479 	 * Iterate, since preventing recursive purging could otherwise leave too
1480 	 * many dirty pages.
1481 	 */
1482 	while (true) {
1483 		size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
1484 		if (threshold < chunk_npages)
1485 			threshold = chunk_npages;
1486 		/*
1487 		 * Don't purge unless the number of purgeable pages exceeds the
1488 		 * threshold.
1489 		 */
1490 		if (arena->ndirty <= threshold)
1491 			return;
1492 		arena_purge_to_limit(tsdn, arena, threshold);
1493 	}
1494 }
1495 
1496 static void
1497 arena_maybe_purge_decay(tsdn_t *tsdn, arena_t *arena)
1498 {
1499 	nstime_t time;
1500 
1501 	assert(opt_purge == purge_mode_decay);
1502 
1503 	/* Purge all or nothing if the option is disabled. */
1504 	if (arena->decay.time <= 0) {
1505 		if (arena->decay.time == 0)
1506 			arena_purge_to_limit(tsdn, arena, 0);
1507 		return;
1508 	}
1509 
1510 	nstime_init(&time, 0);
1511 	nstime_update(&time);
1512 	if (unlikely(!nstime_monotonic() && nstime_compare(&arena->decay.epoch,
1513 	    &time) > 0)) {
1514 		/*
1515 		 * Time went backwards.  Move the epoch back in time and
1516 		 * generate a new deadline, with the expectation that time
1517 		 * typically flows forward for long enough periods of time that
1518 		 * epochs complete.  Unfortunately, this strategy is susceptible
1519 		 * to clock jitter triggering premature epoch advances, but
1520 		 * clock jitter estimation and compensation isn't feasible here
1521 		 * because calls into this code are event-driven.
1522 		 */
1523 		nstime_copy(&arena->decay.epoch, &time);
1524 		arena_decay_deadline_init(arena);
1525 	} else {
1526 		/* Verify that time does not go backwards. */
1527 		assert(nstime_compare(&arena->decay.epoch, &time) <= 0);
1528 	}
1529 
1530 	/*
1531 	 * If the deadline has been reached, advance to the current epoch and
1532 	 * purge to the new limit if necessary.  Note that dirty pages created
1533 	 * during the current epoch are not subject to purge until a future
1534 	 * epoch, so as a result purging only happens during epoch advances.
1535 	 */
1536 	if (arena_decay_deadline_reached(arena, &time))
1537 		arena_decay_epoch_advance(tsdn, arena, &time);
1538 }
1539 
1540 void
1541 arena_maybe_purge(tsdn_t *tsdn, arena_t *arena)
1542 {
1543 
1544 	/* Don't recursively purge. */
1545 	if (arena->purging)
1546 		return;
1547 
1548 	if (opt_purge == purge_mode_ratio)
1549 		arena_maybe_purge_ratio(tsdn, arena);
1550 	else
1551 		arena_maybe_purge_decay(tsdn, arena);
1552 }
1553 
1554 static size_t
1555 arena_dirty_count(arena_t *arena)
1556 {
1557 	size_t ndirty = 0;
1558 	arena_runs_dirty_link_t *rdelm;
1559 	extent_node_t *chunkselm;
1560 
1561 	for (rdelm = qr_next(&arena->runs_dirty, rd_link),
1562 	    chunkselm = qr_next(&arena->chunks_cache, cc_link);
1563 	    rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) {
1564 		size_t npages;
1565 
1566 		if (rdelm == &chunkselm->rd) {
1567 			npages = extent_node_size_get(chunkselm) >> LG_PAGE;
1568 			chunkselm = qr_next(chunkselm, cc_link);
1569 		} else {
1570 			arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
1571 			    rdelm);
1572 			arena_chunk_map_misc_t *miscelm =
1573 			    arena_rd_to_miscelm(rdelm);
1574 			size_t pageind = arena_miscelm_to_pageind(miscelm);
1575 			assert(arena_mapbits_allocated_get(chunk, pageind) ==
1576 			    0);
1577 			assert(arena_mapbits_large_get(chunk, pageind) == 0);
1578 			assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
1579 			npages = arena_mapbits_unallocated_size_get(chunk,
1580 			    pageind) >> LG_PAGE;
1581 		}
1582 		ndirty += npages;
1583 	}
1584 
1585 	return (ndirty);
1586 }
1587 
1588 static size_t
1589 arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
1590     size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel,
1591     extent_node_t *purge_chunks_sentinel)
1592 {
1593 	arena_runs_dirty_link_t *rdelm, *rdelm_next;
1594 	extent_node_t *chunkselm;
1595 	size_t nstashed = 0;
1596 
1597 	/* Stash runs/chunks according to ndirty_limit. */
1598 	for (rdelm = qr_next(&arena->runs_dirty, rd_link),
1599 	    chunkselm = qr_next(&arena->chunks_cache, cc_link);
1600 	    rdelm != &arena->runs_dirty; rdelm = rdelm_next) {
1601 		size_t npages;
1602 		rdelm_next = qr_next(rdelm, rd_link);
1603 
1604 		if (rdelm == &chunkselm->rd) {
1605 			extent_node_t *chunkselm_next;
1606 			size_t sn;
1607 			bool zero, commit;
1608 			UNUSED void *chunk;
1609 
1610 			npages = extent_node_size_get(chunkselm) >> LG_PAGE;
1611 			if (opt_purge == purge_mode_decay && arena->ndirty -
1612 			    (nstashed + npages) < ndirty_limit)
1613 				break;
1614 
1615 			chunkselm_next = qr_next(chunkselm, cc_link);
1616 			/*
1617 			 * Allocate.  chunkselm remains valid due to the
1618 			 * dalloc_node=false argument to chunk_alloc_cache().
1619 			 */
1620 			zero = false;
1621 			commit = false;
1622 			chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
1623 			    extent_node_addr_get(chunkselm),
1624 			    extent_node_size_get(chunkselm), chunksize, &sn,
1625 			    &zero, &commit, false);
1626 			assert(chunk == extent_node_addr_get(chunkselm));
1627 			assert(zero == extent_node_zeroed_get(chunkselm));
1628 			extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
1629 			    purge_chunks_sentinel);
1630 			assert(npages == (extent_node_size_get(chunkselm) >>
1631 			    LG_PAGE));
1632 			chunkselm = chunkselm_next;
1633 		} else {
1634 			arena_chunk_t *chunk =
1635 			    (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
1636 			arena_chunk_map_misc_t *miscelm =
1637 			    arena_rd_to_miscelm(rdelm);
1638 			size_t pageind = arena_miscelm_to_pageind(miscelm);
1639 			arena_run_t *run = &miscelm->run;
1640 			size_t run_size =
1641 			    arena_mapbits_unallocated_size_get(chunk, pageind);
1642 
1643 			npages = run_size >> LG_PAGE;
1644 			if (opt_purge == purge_mode_decay && arena->ndirty -
1645 			    (nstashed + npages) < ndirty_limit)
1646 				break;
1647 
1648 			assert(pageind + npages <= chunk_npages);
1649 			assert(arena_mapbits_dirty_get(chunk, pageind) ==
1650 			    arena_mapbits_dirty_get(chunk, pageind+npages-1));
1651 
1652 			/*
1653 			 * If purging the spare chunk's run, make it available
1654 			 * prior to allocation.
1655 			 */
1656 			if (chunk == arena->spare)
1657 				arena_chunk_alloc(tsdn, arena);
1658 
1659 			/* Temporarily allocate the free dirty run. */
1660 			arena_run_split_large(arena, run, run_size, false);
1661 			/* Stash. */
1662 			if (false)
1663 				qr_new(rdelm, rd_link); /* Redundant. */
1664 			else {
1665 				assert(qr_next(rdelm, rd_link) == rdelm);
1666 				assert(qr_prev(rdelm, rd_link) == rdelm);
1667 			}
1668 			qr_meld(purge_runs_sentinel, rdelm, rd_link);
1669 		}
1670 
1671 		nstashed += npages;
1672 		if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <=
1673 		    ndirty_limit)
1674 			break;
1675 	}
1676 
1677 	return (nstashed);
1678 }
1679 
1680 static size_t
1681 arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
1682     arena_runs_dirty_link_t *purge_runs_sentinel,
1683     extent_node_t *purge_chunks_sentinel)
1684 {
1685 	size_t npurged, nmadvise;
1686 	arena_runs_dirty_link_t *rdelm;
1687 	extent_node_t *chunkselm;
1688 
1689 	if (config_stats)
1690 		nmadvise = 0;
1691 	npurged = 0;
1692 
1693 	malloc_mutex_unlock(tsdn, &arena->lock);
1694 	for (rdelm = qr_next(purge_runs_sentinel, rd_link),
1695 	    chunkselm = qr_next(purge_chunks_sentinel, cc_link);
1696 	    rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) {
1697 		size_t npages;
1698 
1699 		if (rdelm == &chunkselm->rd) {
1700 			/*
1701 			 * Don't actually purge the chunk here because 1)
1702 			 * chunkselm is embedded in the chunk and must remain
1703 			 * valid, and 2) we deallocate the chunk in
1704 			 * arena_unstash_purged(), where it is destroyed,
1705 			 * decommitted, or purged, depending on chunk
1706 			 * deallocation policy.
1707 			 */
1708 			size_t size = extent_node_size_get(chunkselm);
1709 			npages = size >> LG_PAGE;
1710 			chunkselm = qr_next(chunkselm, cc_link);
1711 		} else {
1712 			size_t pageind, run_size, flag_unzeroed, flags, i;
1713 			bool decommitted;
1714 			arena_chunk_t *chunk =
1715 			    (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
1716 			arena_chunk_map_misc_t *miscelm =
1717 			    arena_rd_to_miscelm(rdelm);
1718 			pageind = arena_miscelm_to_pageind(miscelm);
1719 			run_size = arena_mapbits_large_size_get(chunk, pageind);
1720 			npages = run_size >> LG_PAGE;
1721 
1722 			/*
1723 			 * If this is the first run purged within chunk, mark
1724 			 * the chunk as non-THP-capable.  This will prevent all
1725 			 * use of THPs for this chunk until the chunk as a whole
1726 			 * is deallocated.
1727 			 */
1728 			if (config_thp && opt_thp && chunk->hugepage) {
1729 				chunk->hugepage = pages_nohuge(chunk,
1730 				    chunksize);
1731 			}
1732 
1733 			assert(pageind + npages <= chunk_npages);
1734 			assert(!arena_mapbits_decommitted_get(chunk, pageind));
1735 			assert(!arena_mapbits_decommitted_get(chunk,
1736 			    pageind+npages-1));
1737 			decommitted = !chunk_hooks->decommit(chunk, chunksize,
1738 			    pageind << LG_PAGE, npages << LG_PAGE, arena->ind);
1739 			if (decommitted) {
1740 				flag_unzeroed = 0;
1741 				flags = CHUNK_MAP_DECOMMITTED;
1742 			} else {
1743 				flag_unzeroed = chunk_purge_wrapper(tsdn, arena,
1744 				    chunk_hooks, chunk, chunksize, pageind <<
1745 				    LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0;
1746 				flags = flag_unzeroed;
1747 			}
1748 			arena_mapbits_large_set(chunk, pageind+npages-1, 0,
1749 			    flags);
1750 			arena_mapbits_large_set(chunk, pageind, run_size,
1751 			    flags);
1752 
1753 			/*
1754 			 * Set the unzeroed flag for internal pages, now that
1755 			 * chunk_purge_wrapper() has returned whether the pages
1756 			 * were zeroed as a side effect of purging.  This chunk
1757 			 * map modification is safe even though the arena mutex
1758 			 * isn't currently owned by this thread, because the run
1759 			 * is marked as allocated, thus protecting it from being
1760 			 * modified by any other thread.  As long as these
1761 			 * writes don't perturb the first and last elements'
1762 			 * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
1763 			 */
1764 			for (i = 1; i < npages-1; i++) {
1765 				arena_mapbits_internal_set(chunk, pageind+i,
1766 				    flag_unzeroed);
1767 			}
1768 		}
1769 
1770 		npurged += npages;
1771 		if (config_stats)
1772 			nmadvise++;
1773 	}
1774 	malloc_mutex_lock(tsdn, &arena->lock);
1775 
1776 	if (config_stats) {
1777 		arena->stats.nmadvise += nmadvise;
1778 		arena->stats.purged += npurged;
1779 	}
1780 
1781 	return (npurged);
1782 }
1783 
1784 static void
1785 arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
1786     arena_runs_dirty_link_t *purge_runs_sentinel,
1787     extent_node_t *purge_chunks_sentinel)
1788 {
1789 	arena_runs_dirty_link_t *rdelm, *rdelm_next;
1790 	extent_node_t *chunkselm;
1791 
1792 	/* Deallocate chunks/runs. */
1793 	for (rdelm = qr_next(purge_runs_sentinel, rd_link),
1794 	    chunkselm = qr_next(purge_chunks_sentinel, cc_link);
1795 	    rdelm != purge_runs_sentinel; rdelm = rdelm_next) {
1796 		rdelm_next = qr_next(rdelm, rd_link);
1797 		if (rdelm == &chunkselm->rd) {
1798 			extent_node_t *chunkselm_next = qr_next(chunkselm,
1799 			    cc_link);
1800 			void *addr = extent_node_addr_get(chunkselm);
1801 			size_t size = extent_node_size_get(chunkselm);
1802 			size_t sn = extent_node_sn_get(chunkselm);
1803 			bool zeroed = extent_node_zeroed_get(chunkselm);
1804 			bool committed = extent_node_committed_get(chunkselm);
1805 			extent_node_dirty_remove(chunkselm);
1806 			arena_node_dalloc(tsdn, arena, chunkselm);
1807 			chunkselm = chunkselm_next;
1808 			chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr,
1809 			    size, sn, zeroed, committed);
1810 		} else {
1811 			arena_chunk_t *chunk =
1812 			    (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
1813 			arena_chunk_map_misc_t *miscelm =
1814 			    arena_rd_to_miscelm(rdelm);
1815 			size_t pageind = arena_miscelm_to_pageind(miscelm);
1816 			bool decommitted = (arena_mapbits_decommitted_get(chunk,
1817 			    pageind) != 0);
1818 			arena_run_t *run = &miscelm->run;
1819 			qr_remove(rdelm, rd_link);
1820 			arena_run_dalloc(tsdn, arena, run, false, true,
1821 			    decommitted);
1822 		}
1823 	}
1824 }
1825 
1826 /*
1827  * NB: ndirty_limit is interpreted differently depending on opt_purge:
1828  *   - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the
1829  *                       desired state:
1830  *                       (arena->ndirty <= ndirty_limit)
1831  *   - purge_mode_decay: Purge as many dirty runs/chunks as possible without
1832  *                       violating the invariant:
1833  *                       (arena->ndirty >= ndirty_limit)
1834  */
1835 static void
1836 arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
1837 {
1838 	chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
1839 	size_t npurge, npurged;
1840 	arena_runs_dirty_link_t purge_runs_sentinel;
1841 	extent_node_t purge_chunks_sentinel;
1842 
1843 	arena->purging = true;
1844 
1845 	/*
1846 	 * Calls to arena_dirty_count() are disabled even for debug builds
1847 	 * because overhead grows nonlinearly as memory usage increases.
1848 	 */
1849 	if (false && config_debug) {
1850 		size_t ndirty = arena_dirty_count(arena);
1851 		assert(ndirty == arena->ndirty);
1852 	}
1853 	assert(opt_purge != purge_mode_ratio || (arena->nactive >>
1854 	    arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0);
1855 
1856 	qr_new(&purge_runs_sentinel, rd_link);
1857 	extent_node_dirty_linkage_init(&purge_chunks_sentinel);
1858 
1859 	npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit,
1860 	    &purge_runs_sentinel, &purge_chunks_sentinel);
1861 	if (npurge == 0)
1862 		goto label_return;
1863 	npurged = arena_purge_stashed(tsdn, arena, &chunk_hooks,
1864 	    &purge_runs_sentinel, &purge_chunks_sentinel);
1865 	assert(npurged == npurge);
1866 	arena_unstash_purged(tsdn, arena, &chunk_hooks, &purge_runs_sentinel,
1867 	    &purge_chunks_sentinel);
1868 
1869 	if (config_stats)
1870 		arena->stats.npurge++;
1871 
1872 label_return:
1873 	arena->purging = false;
1874 }
1875 
1876 void
1877 arena_purge(tsdn_t *tsdn, arena_t *arena, bool all)
1878 {
1879 
1880 	malloc_mutex_lock(tsdn, &arena->lock);
1881 	if (all)
1882 		arena_purge_to_limit(tsdn, arena, 0);
1883 	else
1884 		arena_maybe_purge(tsdn, arena);
1885 	malloc_mutex_unlock(tsdn, &arena->lock);
1886 }
1887 
1888 static void
1889 arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk)
1890 {
1891 	size_t pageind, npages;
1892 
1893 	cassert(config_prof);
1894 	assert(opt_prof);
1895 
1896 	/*
1897 	 * Iterate over the allocated runs and remove profiled allocations from
1898 	 * the sample set.
1899 	 */
1900 	for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
1901 		if (arena_mapbits_allocated_get(chunk, pageind) != 0) {
1902 			if (arena_mapbits_large_get(chunk, pageind) != 0) {
1903 				void *ptr = (void *)((uintptr_t)chunk + (pageind
1904 				    << LG_PAGE));
1905 				size_t usize = isalloc(tsd_tsdn(tsd), ptr,
1906 				    config_prof);
1907 
1908 				prof_free(tsd, ptr, usize);
1909 				npages = arena_mapbits_large_size_get(chunk,
1910 				    pageind) >> LG_PAGE;
1911 			} else {
1912 				/* Skip small run. */
1913 				size_t binind = arena_mapbits_binind_get(chunk,
1914 				    pageind);
1915 				arena_bin_info_t *bin_info =
1916 				    &arena_bin_info[binind];
1917 				npages = bin_info->run_size >> LG_PAGE;
1918 			}
1919 		} else {
1920 			/* Skip unallocated run. */
1921 			npages = arena_mapbits_unallocated_size_get(chunk,
1922 			    pageind) >> LG_PAGE;
1923 		}
1924 		assert(pageind + npages <= chunk_npages);
1925 	}
1926 }
1927 
1928 void
1929 arena_reset(tsd_t *tsd, arena_t *arena)
1930 {
1931 	unsigned i;
1932 	extent_node_t *node;
1933 
1934 	/*
1935 	 * Locking in this function is unintuitive.  The caller guarantees that
1936 	 * no concurrent operations are happening in this arena, but there are
1937 	 * still reasons that some locking is necessary:
1938 	 *
1939 	 * - Some of the functions in the transitive closure of calls assume
1940 	 *   appropriate locks are held, and in some cases these locks are
1941 	 *   temporarily dropped to avoid lock order reversal or deadlock due to
1942 	 *   reentry.
1943 	 * - mallctl("epoch", ...) may concurrently refresh stats.  While
1944 	 *   strictly speaking this is a "concurrent operation", disallowing
1945 	 *   stats refreshes would impose an inconvenient burden.
1946 	 */
1947 
1948 	/* Remove large allocations from prof sample set. */
1949 	if (config_prof && opt_prof) {
1950 		ql_foreach(node, &arena->achunks, ql_link) {
1951 			arena_achunk_prof_reset(tsd, arena,
1952 			    extent_node_addr_get(node));
1953 		}
1954 	}
1955 
1956 	/* Reset curruns for large size classes. */
1957 	if (config_stats) {
1958 		for (i = 0; i < nlclasses; i++)
1959 			arena->stats.lstats[i].curruns = 0;
1960 	}
1961 
1962 	/* Huge allocations. */
1963 	malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
1964 	for (node = ql_last(&arena->huge, ql_link); node != NULL; node =
1965 	    ql_last(&arena->huge, ql_link)) {
1966 		void *ptr = extent_node_addr_get(node);
1967 		size_t usize;
1968 
1969 		malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
1970 		if (config_stats || (config_prof && opt_prof))
1971 			usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
1972 		/* Remove huge allocation from prof sample set. */
1973 		if (config_prof && opt_prof)
1974 			prof_free(tsd, ptr, usize);
1975 		huge_dalloc(tsd_tsdn(tsd), ptr);
1976 		malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
1977 		/* Cancel out unwanted effects on stats. */
1978 		if (config_stats)
1979 			arena_huge_reset_stats_cancel(arena, usize);
1980 	}
1981 	malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
1982 
1983 	malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
1984 
1985 	/* Bins. */
1986 	for (i = 0; i < NBINS; i++) {
1987 		arena_bin_t *bin = &arena->bins[i];
1988 		malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
1989 		bin->runcur = NULL;
1990 		arena_run_heap_new(&bin->runs);
1991 		if (config_stats) {
1992 			bin->stats.curregs = 0;
1993 			bin->stats.curruns = 0;
1994 		}
1995 		malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
1996 	}
1997 
1998 	/*
1999 	 * Re-initialize runs_dirty such that the chunks_cache and runs_dirty
2000 	 * chains directly correspond.
2001 	 */
2002 	qr_new(&arena->runs_dirty, rd_link);
2003 	for (node = qr_next(&arena->chunks_cache, cc_link);
2004 	    node != &arena->chunks_cache; node = qr_next(node, cc_link)) {
2005 		qr_new(&node->rd, rd_link);
2006 		qr_meld(&arena->runs_dirty, &node->rd, rd_link);
2007 	}
2008 
2009 	/* Arena chunks. */
2010 	for (node = ql_last(&arena->achunks, ql_link); node != NULL; node =
2011 	    ql_last(&arena->achunks, ql_link)) {
2012 		ql_remove(&arena->achunks, node, ql_link);
2013 		arena_chunk_discard(tsd_tsdn(tsd), arena,
2014 		    extent_node_addr_get(node));
2015 	}
2016 
2017 	/* Spare. */
2018 	if (arena->spare != NULL) {
2019 		arena_chunk_discard(tsd_tsdn(tsd), arena, arena->spare);
2020 		arena->spare = NULL;
2021 	}
2022 
2023 	assert(!arena->purging);
2024 	arena->nactive = 0;
2025 
2026 	for (i = 0; i < NPSIZES; i++)
2027 		arena_run_heap_new(&arena->runs_avail[i]);
2028 
2029 	malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
2030 }
2031 
2032 static void
2033 arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
2034     size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty,
2035     size_t flag_decommitted)
2036 {
2037 	size_t size = *p_size;
2038 	size_t run_ind = *p_run_ind;
2039 	size_t run_pages = *p_run_pages;
2040 
2041 	/* Try to coalesce forward. */
2042 	if (run_ind + run_pages < chunk_npages &&
2043 	    arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
2044 	    arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty &&
2045 	    arena_mapbits_decommitted_get(chunk, run_ind+run_pages) ==
2046 	    flag_decommitted) {
2047 		size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
2048 		    run_ind+run_pages);
2049 		size_t nrun_pages = nrun_size >> LG_PAGE;
2050 
2051 		/*
2052 		 * Remove successor from runs_avail; the coalesced run is
2053 		 * inserted later.
2054 		 */
2055 		assert(arena_mapbits_unallocated_size_get(chunk,
2056 		    run_ind+run_pages+nrun_pages-1) == nrun_size);
2057 		assert(arena_mapbits_dirty_get(chunk,
2058 		    run_ind+run_pages+nrun_pages-1) == flag_dirty);
2059 		assert(arena_mapbits_decommitted_get(chunk,
2060 		    run_ind+run_pages+nrun_pages-1) == flag_decommitted);
2061 		arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages);
2062 
2063 		/*
2064 		 * If the successor is dirty, remove it from the set of dirty
2065 		 * pages.
2066 		 */
2067 		if (flag_dirty != 0) {
2068 			arena_run_dirty_remove(arena, chunk, run_ind+run_pages,
2069 			    nrun_pages);
2070 		}
2071 
2072 		size += nrun_size;
2073 		run_pages += nrun_pages;
2074 
2075 		arena_mapbits_unallocated_size_set(chunk, run_ind, size);
2076 		arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
2077 		    size);
2078 	}
2079 
2080 	/* Try to coalesce backward. */
2081 	if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
2082 	    run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
2083 	    flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) ==
2084 	    flag_decommitted) {
2085 		size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
2086 		    run_ind-1);
2087 		size_t prun_pages = prun_size >> LG_PAGE;
2088 
2089 		run_ind -= prun_pages;
2090 
2091 		/*
2092 		 * Remove predecessor from runs_avail; the coalesced run is
2093 		 * inserted later.
2094 		 */
2095 		assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
2096 		    prun_size);
2097 		assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
2098 		assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
2099 		    flag_decommitted);
2100 		arena_avail_remove(arena, chunk, run_ind, prun_pages);
2101 
2102 		/*
2103 		 * If the predecessor is dirty, remove it from the set of dirty
2104 		 * pages.
2105 		 */
2106 		if (flag_dirty != 0) {
2107 			arena_run_dirty_remove(arena, chunk, run_ind,
2108 			    prun_pages);
2109 		}
2110 
2111 		size += prun_size;
2112 		run_pages += prun_pages;
2113 
2114 		arena_mapbits_unallocated_size_set(chunk, run_ind, size);
2115 		arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
2116 		    size);
2117 	}
2118 
2119 	*p_size = size;
2120 	*p_run_ind = run_ind;
2121 	*p_run_pages = run_pages;
2122 }
2123 
2124 static size_t
2125 arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
2126     size_t run_ind)
2127 {
2128 	size_t size;
2129 
2130 	assert(run_ind >= map_bias);
2131 	assert(run_ind < chunk_npages);
2132 
2133 	if (arena_mapbits_large_get(chunk, run_ind) != 0) {
2134 		size = arena_mapbits_large_size_get(chunk, run_ind);
2135 		assert(size == PAGE || arena_mapbits_large_size_get(chunk,
2136 		    run_ind+(size>>LG_PAGE)-1) == 0);
2137 	} else {
2138 		arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
2139 		size = bin_info->run_size;
2140 	}
2141 
2142 	return (size);
2143 }
2144 
2145 static void
2146 arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty,
2147     bool cleaned, bool decommitted)
2148 {
2149 	arena_chunk_t *chunk;
2150 	arena_chunk_map_misc_t *miscelm;
2151 	size_t size, run_ind, run_pages, flag_dirty, flag_decommitted;
2152 
2153 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
2154 	miscelm = arena_run_to_miscelm(run);
2155 	run_ind = arena_miscelm_to_pageind(miscelm);
2156 	assert(run_ind >= map_bias);
2157 	assert(run_ind < chunk_npages);
2158 	size = arena_run_size_get(arena, chunk, run, run_ind);
2159 	run_pages = (size >> LG_PAGE);
2160 	arena_nactive_sub(arena, run_pages);
2161 
2162 	/*
2163 	 * The run is dirty if the caller claims to have dirtied it, as well as
2164 	 * if it was already dirty before being allocated and the caller
2165 	 * doesn't claim to have cleaned it.
2166 	 */
2167 	assert(arena_mapbits_dirty_get(chunk, run_ind) ==
2168 	    arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
2169 	if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind)
2170 	    != 0)
2171 		dirty = true;
2172 	flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
2173 	flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0;
2174 
2175 	/* Mark pages as unallocated in the chunk map. */
2176 	if (dirty || decommitted) {
2177 		size_t flags = flag_dirty | flag_decommitted;
2178 		arena_mapbits_unallocated_set(chunk, run_ind, size, flags);
2179 		arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
2180 		    flags);
2181 	} else {
2182 		arena_mapbits_unallocated_set(chunk, run_ind, size,
2183 		    arena_mapbits_unzeroed_get(chunk, run_ind));
2184 		arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
2185 		    arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
2186 	}
2187 
2188 	arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages,
2189 	    flag_dirty, flag_decommitted);
2190 
2191 	/* Insert into runs_avail, now that coalescing is complete. */
2192 	assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
2193 	    arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
2194 	assert(arena_mapbits_dirty_get(chunk, run_ind) ==
2195 	    arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
2196 	assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
2197 	    arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1));
2198 	arena_avail_insert(arena, chunk, run_ind, run_pages);
2199 
2200 	if (dirty)
2201 		arena_run_dirty_insert(arena, chunk, run_ind, run_pages);
2202 
2203 	/* Deallocate chunk if it is now completely unused. */
2204 	if (size == arena_maxrun) {
2205 		assert(run_ind == map_bias);
2206 		assert(run_pages == (arena_maxrun >> LG_PAGE));
2207 		arena_chunk_dalloc(tsdn, arena, chunk);
2208 	}
2209 
2210 	/*
2211 	 * It is okay to do dirty page processing here even if the chunk was
2212 	 * deallocated above, since in that case it is the spare.  Waiting
2213 	 * until after possible chunk deallocation to do dirty processing
2214 	 * allows for an old spare to be fully deallocated, thus decreasing the
2215 	 * chances of spuriously crossing the dirty page purging threshold.
2216 	 */
2217 	if (dirty)
2218 		arena_maybe_purge(tsdn, arena);
2219 }
2220 
2221 static void
2222 arena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
2223     arena_run_t *run, size_t oldsize, size_t newsize)
2224 {
2225 	arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
2226 	size_t pageind = arena_miscelm_to_pageind(miscelm);
2227 	size_t head_npages = (oldsize - newsize) >> LG_PAGE;
2228 	size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
2229 	size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
2230 	size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
2231 	    CHUNK_MAP_UNZEROED : 0;
2232 
2233 	assert(oldsize > newsize);
2234 
2235 	/*
2236 	 * Update the chunk map so that arena_run_dalloc() can treat the
2237 	 * leading run as separately allocated.  Set the last element of each
2238 	 * run first, in case of single-page runs.
2239 	 */
2240 	assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
2241 	arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
2242 	    (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2243 	    pageind+head_npages-1)));
2244 	arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty |
2245 	    (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
2246 
2247 	if (config_debug) {
2248 		UNUSED size_t tail_npages = newsize >> LG_PAGE;
2249 		assert(arena_mapbits_large_size_get(chunk,
2250 		    pageind+head_npages+tail_npages-1) == 0);
2251 		assert(arena_mapbits_dirty_get(chunk,
2252 		    pageind+head_npages+tail_npages-1) == flag_dirty);
2253 	}
2254 	arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
2255 	    flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2256 	    pageind+head_npages)));
2257 
2258 	arena_run_dalloc(tsdn, arena, run, false, false, (flag_decommitted !=
2259 	    0));
2260 }
2261 
2262 static void
2263 arena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
2264     arena_run_t *run, size_t oldsize, size_t newsize, bool dirty)
2265 {
2266 	arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
2267 	size_t pageind = arena_miscelm_to_pageind(miscelm);
2268 	size_t head_npages = newsize >> LG_PAGE;
2269 	size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
2270 	size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
2271 	size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
2272 	    CHUNK_MAP_UNZEROED : 0;
2273 	arena_chunk_map_misc_t *tail_miscelm;
2274 	arena_run_t *tail_run;
2275 
2276 	assert(oldsize > newsize);
2277 
2278 	/*
2279 	 * Update the chunk map so that arena_run_dalloc() can treat the
2280 	 * trailing run as separately allocated.  Set the last element of each
2281 	 * run first, in case of single-page runs.
2282 	 */
2283 	assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
2284 	arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
2285 	    (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2286 	    pageind+head_npages-1)));
2287 	arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty |
2288 	    (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
2289 
2290 	if (config_debug) {
2291 		UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
2292 		assert(arena_mapbits_large_size_get(chunk,
2293 		    pageind+head_npages+tail_npages-1) == 0);
2294 		assert(arena_mapbits_dirty_get(chunk,
2295 		    pageind+head_npages+tail_npages-1) == flag_dirty);
2296 	}
2297 	arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
2298 	    flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2299 	    pageind+head_npages)));
2300 
2301 	tail_miscelm = arena_miscelm_get_mutable(chunk, pageind + head_npages);
2302 	tail_run = &tail_miscelm->run;
2303 	arena_run_dalloc(tsdn, arena, tail_run, dirty, false, (flag_decommitted
2304 	    != 0));
2305 }
2306 
2307 static void
2308 arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
2309 {
2310 	arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
2311 
2312 	arena_run_heap_insert(&bin->runs, miscelm);
2313 }
2314 
2315 static arena_run_t *
2316 arena_bin_nonfull_run_tryget(arena_bin_t *bin)
2317 {
2318 	arena_chunk_map_misc_t *miscelm;
2319 
2320 	miscelm = arena_run_heap_remove_first(&bin->runs);
2321 	if (miscelm == NULL)
2322 		return (NULL);
2323 	if (config_stats)
2324 		bin->stats.reruns++;
2325 
2326 	return (&miscelm->run);
2327 }
2328 
2329 static arena_run_t *
2330 arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
2331 {
2332 	arena_run_t *run;
2333 	szind_t binind;
2334 	arena_bin_info_t *bin_info;
2335 
2336 	/* Look for a usable run. */
2337 	run = arena_bin_nonfull_run_tryget(bin);
2338 	if (run != NULL)
2339 		return (run);
2340 	/* No existing runs have any space available. */
2341 
2342 	binind = arena_bin_index(arena, bin);
2343 	bin_info = &arena_bin_info[binind];
2344 
2345 	/* Allocate a new run. */
2346 	malloc_mutex_unlock(tsdn, &bin->lock);
2347 	/******************************/
2348 	malloc_mutex_lock(tsdn, &arena->lock);
2349 	run = arena_run_alloc_small(tsdn, arena, bin_info->run_size, binind);
2350 	if (run != NULL) {
2351 		/* Initialize run internals. */
2352 		run->binind = binind;
2353 		run->nfree = bin_info->nregs;
2354 		bitmap_init(run->bitmap, &bin_info->bitmap_info);
2355 	}
2356 	malloc_mutex_unlock(tsdn, &arena->lock);
2357 	/********************************/
2358 	malloc_mutex_lock(tsdn, &bin->lock);
2359 	if (run != NULL) {
2360 		if (config_stats) {
2361 			bin->stats.nruns++;
2362 			bin->stats.curruns++;
2363 		}
2364 		return (run);
2365 	}
2366 
2367 	/*
2368 	 * arena_run_alloc_small() failed, but another thread may have made
2369 	 * sufficient memory available while this one dropped bin->lock above,
2370 	 * so search one more time.
2371 	 */
2372 	run = arena_bin_nonfull_run_tryget(bin);
2373 	if (run != NULL)
2374 		return (run);
2375 
2376 	return (NULL);
2377 }
2378 
2379 /* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
2380 static void *
2381 arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
2382 {
2383 	szind_t binind;
2384 	arena_bin_info_t *bin_info;
2385 	arena_run_t *run;
2386 
2387 	binind = arena_bin_index(arena, bin);
2388 	bin_info = &arena_bin_info[binind];
2389 	bin->runcur = NULL;
2390 	run = arena_bin_nonfull_run_get(tsdn, arena, bin);
2391 	if (bin->runcur != NULL && bin->runcur->nfree > 0) {
2392 		/*
2393 		 * Another thread updated runcur while this one ran without the
2394 		 * bin lock in arena_bin_nonfull_run_get().
2395 		 */
2396 		void *ret;
2397 		assert(bin->runcur->nfree > 0);
2398 		ret = arena_run_reg_alloc(bin->runcur, bin_info);
2399 		if (run != NULL) {
2400 			arena_chunk_t *chunk;
2401 
2402 			/*
2403 			 * arena_run_alloc_small() may have allocated run, or
2404 			 * it may have pulled run from the bin's run tree.
2405 			 * Therefore it is unsafe to make any assumptions about
2406 			 * how run has previously been used, and
2407 			 * arena_bin_lower_run() must be called, as if a region
2408 			 * were just deallocated from the run.
2409 			 */
2410 			chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
2411 			if (run->nfree == bin_info->nregs) {
2412 				arena_dalloc_bin_run(tsdn, arena, chunk, run,
2413 				    bin);
2414 			} else
2415 				arena_bin_lower_run(arena, run, bin);
2416 		}
2417 		return (ret);
2418 	}
2419 
2420 	if (run == NULL)
2421 		return (NULL);
2422 
2423 	bin->runcur = run;
2424 
2425 	assert(bin->runcur->nfree > 0);
2426 
2427 	return (arena_run_reg_alloc(bin->runcur, bin_info));
2428 }
2429 
2430 void
2431 arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
2432     szind_t binind, uint64_t prof_accumbytes)
2433 {
2434 	unsigned i, nfill;
2435 	arena_bin_t *bin;
2436 
2437 	assert(tbin->ncached == 0);
2438 
2439 	if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes))
2440 		prof_idump(tsdn);
2441 	bin = &arena->bins[binind];
2442 	malloc_mutex_lock(tsdn, &bin->lock);
2443 	for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
2444 	    tbin->lg_fill_div); i < nfill; i++) {
2445 		arena_run_t *run;
2446 		void *ptr;
2447 		if ((run = bin->runcur) != NULL && run->nfree > 0)
2448 			ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
2449 		else
2450 			ptr = arena_bin_malloc_hard(tsdn, arena, bin);
2451 		if (ptr == NULL) {
2452 			/*
2453 			 * OOM.  tbin->avail isn't yet filled down to its first
2454 			 * element, so the successful allocations (if any) must
2455 			 * be moved just before tbin->avail before bailing out.
2456 			 */
2457 			if (i > 0) {
2458 				memmove(tbin->avail - i, tbin->avail - nfill,
2459 				    i * sizeof(void *));
2460 			}
2461 			break;
2462 		}
2463 		if (config_fill && unlikely(opt_junk_alloc)) {
2464 			arena_alloc_junk_small(ptr, &arena_bin_info[binind],
2465 			    true);
2466 		}
2467 		/* Insert such that low regions get used first. */
2468 		*(tbin->avail - nfill + i) = ptr;
2469 	}
2470 	if (config_stats) {
2471 		bin->stats.nmalloc += i;
2472 		bin->stats.nrequests += tbin->tstats.nrequests;
2473 		bin->stats.curregs += i;
2474 		bin->stats.nfills++;
2475 		tbin->tstats.nrequests = 0;
2476 	}
2477 	malloc_mutex_unlock(tsdn, &bin->lock);
2478 	tbin->ncached = i;
2479 	arena_decay_tick(tsdn, arena);
2480 }
2481 
2482 void
2483 arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
2484 {
2485 
2486 	size_t redzone_size = bin_info->redzone_size;
2487 
2488 	if (zero) {
2489 		memset((void *)((uintptr_t)ptr - redzone_size),
2490 		    JEMALLOC_ALLOC_JUNK, redzone_size);
2491 		memset((void *)((uintptr_t)ptr + bin_info->reg_size),
2492 		    JEMALLOC_ALLOC_JUNK, redzone_size);
2493 	} else {
2494 		memset((void *)((uintptr_t)ptr - redzone_size),
2495 		    JEMALLOC_ALLOC_JUNK, bin_info->reg_interval);
2496 	}
2497 }
2498 
2499 #ifdef JEMALLOC_JET
2500 #undef arena_redzone_corruption
2501 #define	arena_redzone_corruption JEMALLOC_N(n_arena_redzone_corruption)
2502 #endif
2503 static void
2504 arena_redzone_corruption(void *ptr, size_t usize, bool after,
2505     size_t offset, uint8_t byte)
2506 {
2507 
2508 	malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p "
2509 	    "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s",
2510 	    after ? "after" : "before", ptr, usize, byte);
2511 }
2512 #ifdef JEMALLOC_JET
2513 #undef arena_redzone_corruption
2514 #define	arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
2515 arena_redzone_corruption_t *arena_redzone_corruption =
2516     JEMALLOC_N(n_arena_redzone_corruption);
2517 #endif
2518 
2519 static void
2520 arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
2521 {
2522 	bool error = false;
2523 
2524 	if (opt_junk_alloc) {
2525 		size_t size = bin_info->reg_size;
2526 		size_t redzone_size = bin_info->redzone_size;
2527 		size_t i;
2528 
2529 		for (i = 1; i <= redzone_size; i++) {
2530 			uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
2531 			if (*byte != JEMALLOC_ALLOC_JUNK) {
2532 				error = true;
2533 				arena_redzone_corruption(ptr, size, false, i,
2534 				    *byte);
2535 				if (reset)
2536 					*byte = JEMALLOC_ALLOC_JUNK;
2537 			}
2538 		}
2539 		for (i = 0; i < redzone_size; i++) {
2540 			uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
2541 			if (*byte != JEMALLOC_ALLOC_JUNK) {
2542 				error = true;
2543 				arena_redzone_corruption(ptr, size, true, i,
2544 				    *byte);
2545 				if (reset)
2546 					*byte = JEMALLOC_ALLOC_JUNK;
2547 			}
2548 		}
2549 	}
2550 
2551 	if (opt_abort && error)
2552 		abort();
2553 }
2554 
2555 #ifdef JEMALLOC_JET
2556 #undef arena_dalloc_junk_small
2557 #define	arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small)
2558 #endif
2559 void
2560 arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
2561 {
2562 	size_t redzone_size = bin_info->redzone_size;
2563 
2564 	arena_redzones_validate(ptr, bin_info, false);
2565 	memset((void *)((uintptr_t)ptr - redzone_size), JEMALLOC_FREE_JUNK,
2566 	    bin_info->reg_interval);
2567 }
2568 #ifdef JEMALLOC_JET
2569 #undef arena_dalloc_junk_small
2570 #define	arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
2571 arena_dalloc_junk_small_t *arena_dalloc_junk_small =
2572     JEMALLOC_N(n_arena_dalloc_junk_small);
2573 #endif
2574 
2575 void
2576 arena_quarantine_junk_small(void *ptr, size_t usize)
2577 {
2578 	szind_t binind;
2579 	arena_bin_info_t *bin_info;
2580 	cassert(config_fill);
2581 	assert(opt_junk_free);
2582 	assert(opt_quarantine);
2583 	assert(usize <= SMALL_MAXCLASS);
2584 
2585 	binind = size2index(usize);
2586 	bin_info = &arena_bin_info[binind];
2587 	arena_redzones_validate(ptr, bin_info, true);
2588 }
2589 
2590 static void *
2591 arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
2592 {
2593 	void *ret;
2594 	arena_bin_t *bin;
2595 	size_t usize;
2596 	arena_run_t *run;
2597 
2598 	assert(binind < NBINS);
2599 	bin = &arena->bins[binind];
2600 	usize = index2size(binind);
2601 
2602 	malloc_mutex_lock(tsdn, &bin->lock);
2603 	if ((run = bin->runcur) != NULL && run->nfree > 0)
2604 		ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
2605 	else
2606 		ret = arena_bin_malloc_hard(tsdn, arena, bin);
2607 
2608 	if (ret == NULL) {
2609 		malloc_mutex_unlock(tsdn, &bin->lock);
2610 		return (NULL);
2611 	}
2612 
2613 	if (config_stats) {
2614 		bin->stats.nmalloc++;
2615 		bin->stats.nrequests++;
2616 		bin->stats.curregs++;
2617 	}
2618 	malloc_mutex_unlock(tsdn, &bin->lock);
2619 	if (config_prof && !isthreaded && arena_prof_accum(tsdn, arena, usize))
2620 		prof_idump(tsdn);
2621 
2622 	if (!zero) {
2623 		if (config_fill) {
2624 			if (unlikely(opt_junk_alloc)) {
2625 				arena_alloc_junk_small(ret,
2626 				    &arena_bin_info[binind], false);
2627 			} else if (unlikely(opt_zero))
2628 				memset(ret, 0, usize);
2629 		}
2630 		JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
2631 	} else {
2632 		if (config_fill && unlikely(opt_junk_alloc)) {
2633 			arena_alloc_junk_small(ret, &arena_bin_info[binind],
2634 			    true);
2635 		}
2636 		JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
2637 		memset(ret, 0, usize);
2638 	}
2639 
2640 	arena_decay_tick(tsdn, arena);
2641 	return (ret);
2642 }
2643 
2644 void *
2645 arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
2646 {
2647 	void *ret;
2648 	size_t usize;
2649 	uintptr_t random_offset;
2650 	arena_run_t *run;
2651 	arena_chunk_map_misc_t *miscelm;
2652 	UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
2653 
2654 	/* Large allocation. */
2655 	usize = index2size(binind);
2656 	malloc_mutex_lock(tsdn, &arena->lock);
2657 	if (config_cache_oblivious) {
2658 		uint64_t r;
2659 
2660 		/*
2661 		 * Compute a uniformly distributed offset within the first page
2662 		 * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64
2663 		 * for 4 KiB pages and 64-byte cachelines.
2664 		 */
2665 		r = prng_lg_range_zu(&arena->offset_state, LG_PAGE -
2666 		    LG_CACHELINE, false);
2667 		random_offset = ((uintptr_t)r) << LG_CACHELINE;
2668 	} else
2669 		random_offset = 0;
2670 	run = arena_run_alloc_large(tsdn, arena, usize + large_pad, zero);
2671 	if (run == NULL) {
2672 		malloc_mutex_unlock(tsdn, &arena->lock);
2673 		return (NULL);
2674 	}
2675 	miscelm = arena_run_to_miscelm(run);
2676 	ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) +
2677 	    random_offset);
2678 	if (config_stats) {
2679 		szind_t index = binind - NBINS;
2680 
2681 		arena->stats.nmalloc_large++;
2682 		arena->stats.nrequests_large++;
2683 		arena->stats.allocated_large += usize;
2684 		arena->stats.lstats[index].nmalloc++;
2685 		arena->stats.lstats[index].nrequests++;
2686 		arena->stats.lstats[index].curruns++;
2687 	}
2688 	if (config_prof)
2689 		idump = arena_prof_accum_locked(arena, usize);
2690 	malloc_mutex_unlock(tsdn, &arena->lock);
2691 	if (config_prof && idump)
2692 		prof_idump(tsdn);
2693 
2694 	if (!zero) {
2695 		if (config_fill) {
2696 			if (unlikely(opt_junk_alloc))
2697 				memset(ret, JEMALLOC_ALLOC_JUNK, usize);
2698 			else if (unlikely(opt_zero))
2699 				memset(ret, 0, usize);
2700 		}
2701 	}
2702 
2703 	arena_decay_tick(tsdn, arena);
2704 	return (ret);
2705 }
2706 
2707 void *
2708 arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
2709     bool zero)
2710 {
2711 
2712 	assert(!tsdn_null(tsdn) || arena != NULL);
2713 
2714 	if (likely(!tsdn_null(tsdn)))
2715 		arena = arena_choose(tsdn_tsd(tsdn), arena);
2716 	if (unlikely(arena == NULL))
2717 		return (NULL);
2718 
2719 	if (likely(size <= SMALL_MAXCLASS))
2720 		return (arena_malloc_small(tsdn, arena, ind, zero));
2721 	if (likely(size <= large_maxclass))
2722 		return (arena_malloc_large(tsdn, arena, ind, zero));
2723 	assert(index2size(ind) >= chunksize);
2724 	return (huge_malloc(tsdn, arena, index2size(ind), zero));
2725 }
2726 
2727 /* Only handles large allocations that require more than page alignment. */
2728 static void *
2729 arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
2730     bool zero)
2731 {
2732 	void *ret;
2733 	size_t alloc_size, leadsize, trailsize;
2734 	arena_run_t *run;
2735 	arena_chunk_t *chunk;
2736 	arena_chunk_map_misc_t *miscelm;
2737 	void *rpages;
2738 
2739 	assert(!tsdn_null(tsdn) || arena != NULL);
2740 	assert(usize == PAGE_CEILING(usize));
2741 
2742 	if (likely(!tsdn_null(tsdn)))
2743 		arena = arena_choose(tsdn_tsd(tsdn), arena);
2744 	if (unlikely(arena == NULL))
2745 		return (NULL);
2746 
2747 	alignment = PAGE_CEILING(alignment);
2748 	alloc_size = usize + large_pad + alignment - PAGE;
2749 
2750 	malloc_mutex_lock(tsdn, &arena->lock);
2751 	run = arena_run_alloc_large(tsdn, arena, alloc_size, false);
2752 	if (run == NULL) {
2753 		malloc_mutex_unlock(tsdn, &arena->lock);
2754 		return (NULL);
2755 	}
2756 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
2757 	miscelm = arena_run_to_miscelm(run);
2758 	rpages = arena_miscelm_to_rpages(miscelm);
2759 
2760 	leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) -
2761 	    (uintptr_t)rpages;
2762 	assert(alloc_size >= leadsize + usize);
2763 	trailsize = alloc_size - leadsize - usize - large_pad;
2764 	if (leadsize != 0) {
2765 		arena_chunk_map_misc_t *head_miscelm = miscelm;
2766 		arena_run_t *head_run = run;
2767 
2768 		miscelm = arena_miscelm_get_mutable(chunk,
2769 		    arena_miscelm_to_pageind(head_miscelm) + (leadsize >>
2770 		    LG_PAGE));
2771 		run = &miscelm->run;
2772 
2773 		arena_run_trim_head(tsdn, arena, chunk, head_run, alloc_size,
2774 		    alloc_size - leadsize);
2775 	}
2776 	if (trailsize != 0) {
2777 		arena_run_trim_tail(tsdn, arena, chunk, run, usize + large_pad +
2778 		    trailsize, usize + large_pad, false);
2779 	}
2780 	if (arena_run_init_large(arena, run, usize + large_pad, zero)) {
2781 		size_t run_ind =
2782 		    arena_miscelm_to_pageind(arena_run_to_miscelm(run));
2783 		bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0);
2784 		bool decommitted = (arena_mapbits_decommitted_get(chunk,
2785 		    run_ind) != 0);
2786 
2787 		assert(decommitted); /* Cause of OOM. */
2788 		arena_run_dalloc(tsdn, arena, run, dirty, false, decommitted);
2789 		malloc_mutex_unlock(tsdn, &arena->lock);
2790 		return (NULL);
2791 	}
2792 	ret = arena_miscelm_to_rpages(miscelm);
2793 
2794 	if (config_stats) {
2795 		szind_t index = size2index(usize) - NBINS;
2796 
2797 		arena->stats.nmalloc_large++;
2798 		arena->stats.nrequests_large++;
2799 		arena->stats.allocated_large += usize;
2800 		arena->stats.lstats[index].nmalloc++;
2801 		arena->stats.lstats[index].nrequests++;
2802 		arena->stats.lstats[index].curruns++;
2803 	}
2804 	malloc_mutex_unlock(tsdn, &arena->lock);
2805 
2806 	if (config_fill && !zero) {
2807 		if (unlikely(opt_junk_alloc))
2808 			memset(ret, JEMALLOC_ALLOC_JUNK, usize);
2809 		else if (unlikely(opt_zero))
2810 			memset(ret, 0, usize);
2811 	}
2812 	arena_decay_tick(tsdn, arena);
2813 	return (ret);
2814 }
2815 
2816 void *
2817 arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
2818     bool zero, tcache_t *tcache)
2819 {
2820 	void *ret;
2821 
2822 	if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
2823 	    && (usize & PAGE_MASK) == 0))) {
2824 		/* Small; alignment doesn't require special run placement. */
2825 		ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
2826 		    tcache, true);
2827 	} else if (usize <= large_maxclass && alignment <= PAGE) {
2828 		/*
2829 		 * Large; alignment doesn't require special run placement.
2830 		 * However, the cached pointer may be at a random offset from
2831 		 * the base of the run, so do some bit manipulation to retrieve
2832 		 * the base.
2833 		 */
2834 		ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
2835 		    tcache, true);
2836 		if (config_cache_oblivious)
2837 			ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
2838 	} else {
2839 		if (likely(usize <= large_maxclass)) {
2840 			ret = arena_palloc_large(tsdn, arena, usize, alignment,
2841 			    zero);
2842 		} else if (likely(alignment <= chunksize))
2843 			ret = huge_malloc(tsdn, arena, usize, zero);
2844 		else {
2845 			ret = huge_palloc(tsdn, arena, usize, alignment, zero);
2846 		}
2847 	}
2848 	return (ret);
2849 }
2850 
2851 void
2852 arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size)
2853 {
2854 	arena_chunk_t *chunk;
2855 	size_t pageind;
2856 	szind_t binind;
2857 
2858 	cassert(config_prof);
2859 	assert(ptr != NULL);
2860 	assert(CHUNK_ADDR2BASE(ptr) != ptr);
2861 	assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
2862 	assert(isalloc(tsdn, ptr, true) == LARGE_MINCLASS);
2863 	assert(size <= SMALL_MAXCLASS);
2864 
2865 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
2866 	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2867 	binind = size2index(size);
2868 	assert(binind < NBINS);
2869 	arena_mapbits_large_binind_set(chunk, pageind, binind);
2870 
2871 	assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
2872 	assert(isalloc(tsdn, ptr, true) == size);
2873 }
2874 
2875 static void
2876 arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
2877     arena_bin_t *bin)
2878 {
2879 
2880 	/* Dissociate run from bin. */
2881 	if (run == bin->runcur)
2882 		bin->runcur = NULL;
2883 	else {
2884 		szind_t binind = arena_bin_index(extent_node_arena_get(
2885 		    &chunk->node), bin);
2886 		arena_bin_info_t *bin_info = &arena_bin_info[binind];
2887 
2888 		/*
2889 		 * The following block's conditional is necessary because if the
2890 		 * run only contains one region, then it never gets inserted
2891 		 * into the non-full runs tree.
2892 		 */
2893 		if (bin_info->nregs != 1) {
2894 			arena_chunk_map_misc_t *miscelm =
2895 			    arena_run_to_miscelm(run);
2896 
2897 			arena_run_heap_remove(&bin->runs, miscelm);
2898 		}
2899 	}
2900 }
2901 
2902 static void
2903 arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
2904     arena_run_t *run, arena_bin_t *bin)
2905 {
2906 
2907 	assert(run != bin->runcur);
2908 
2909 	malloc_mutex_unlock(tsdn, &bin->lock);
2910 	/******************************/
2911 	malloc_mutex_lock(tsdn, &arena->lock);
2912 	arena_run_dalloc(tsdn, arena, run, true, false, false);
2913 	malloc_mutex_unlock(tsdn, &arena->lock);
2914 	/****************************/
2915 	malloc_mutex_lock(tsdn, &bin->lock);
2916 	if (config_stats)
2917 		bin->stats.curruns--;
2918 }
2919 
2920 static void
2921 arena_bin_lower_run(arena_t *arena, arena_run_t *run, arena_bin_t *bin)
2922 {
2923 
2924 	/*
2925 	 * Make sure that if bin->runcur is non-NULL, it refers to the
2926 	 * oldest/lowest non-full run.  It is okay to NULL runcur out rather
2927 	 * than proactively keeping it pointing at the oldest/lowest non-full
2928 	 * run.
2929 	 */
2930 	if (bin->runcur != NULL &&
2931 	    arena_snad_comp(arena_run_to_miscelm(bin->runcur),
2932 	    arena_run_to_miscelm(run)) > 0) {
2933 		/* Switch runcur. */
2934 		if (bin->runcur->nfree > 0)
2935 			arena_bin_runs_insert(bin, bin->runcur);
2936 		bin->runcur = run;
2937 		if (config_stats)
2938 			bin->stats.reruns++;
2939 	} else
2940 		arena_bin_runs_insert(bin, run);
2941 }
2942 
2943 static void
2944 arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
2945     void *ptr, arena_chunk_map_bits_t *bitselm, bool junked)
2946 {
2947 	size_t pageind, rpages_ind;
2948 	arena_run_t *run;
2949 	arena_bin_t *bin;
2950 	arena_bin_info_t *bin_info;
2951 	szind_t binind;
2952 
2953 	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2954 	rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
2955 	run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
2956 	binind = run->binind;
2957 	bin = &arena->bins[binind];
2958 	bin_info = &arena_bin_info[binind];
2959 
2960 	if (!junked && config_fill && unlikely(opt_junk_free))
2961 		arena_dalloc_junk_small(ptr, bin_info);
2962 
2963 	arena_run_reg_dalloc(run, ptr);
2964 	if (run->nfree == bin_info->nregs) {
2965 		arena_dissociate_bin_run(chunk, run, bin);
2966 		arena_dalloc_bin_run(tsdn, arena, chunk, run, bin);
2967 	} else if (run->nfree == 1 && run != bin->runcur)
2968 		arena_bin_lower_run(arena, run, bin);
2969 
2970 	if (config_stats) {
2971 		bin->stats.ndalloc++;
2972 		bin->stats.curregs--;
2973 	}
2974 }
2975 
2976 void
2977 arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
2978     arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm)
2979 {
2980 
2981 	arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, true);
2982 }
2983 
2984 void
2985 arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr,
2986     size_t pageind, arena_chunk_map_bits_t *bitselm)
2987 {
2988 	arena_run_t *run;
2989 	arena_bin_t *bin;
2990 	size_t rpages_ind;
2991 
2992 	rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
2993 	run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
2994 	bin = &arena->bins[run->binind];
2995 	malloc_mutex_lock(tsdn, &bin->lock);
2996 	arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, false);
2997 	malloc_mutex_unlock(tsdn, &bin->lock);
2998 }
2999 
3000 void
3001 arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
3002     void *ptr, size_t pageind)
3003 {
3004 	arena_chunk_map_bits_t *bitselm;
3005 
3006 	if (config_debug) {
3007 		/* arena_ptr_small_binind_get() does extra sanity checking. */
3008 		assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
3009 		    pageind)) != BININD_INVALID);
3010 	}
3011 	bitselm = arena_bitselm_get_mutable(chunk, pageind);
3012 	arena_dalloc_bin(tsdn, arena, chunk, ptr, pageind, bitselm);
3013 	arena_decay_tick(tsdn, arena);
3014 }
3015 
3016 #ifdef JEMALLOC_JET
3017 #undef arena_dalloc_junk_large
3018 #define	arena_dalloc_junk_large JEMALLOC_N(n_arena_dalloc_junk_large)
3019 #endif
3020 void
3021 arena_dalloc_junk_large(void *ptr, size_t usize)
3022 {
3023 
3024 	if (config_fill && unlikely(opt_junk_free))
3025 		memset(ptr, JEMALLOC_FREE_JUNK, usize);
3026 }
3027 #ifdef JEMALLOC_JET
3028 #undef arena_dalloc_junk_large
3029 #define	arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
3030 arena_dalloc_junk_large_t *arena_dalloc_junk_large =
3031     JEMALLOC_N(n_arena_dalloc_junk_large);
3032 #endif
3033 
3034 static void
3035 arena_dalloc_large_locked_impl(tsdn_t *tsdn, arena_t *arena,
3036     arena_chunk_t *chunk, void *ptr, bool junked)
3037 {
3038 	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
3039 	arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
3040 	    pageind);
3041 	arena_run_t *run = &miscelm->run;
3042 
3043 	if (config_fill || config_stats) {
3044 		size_t usize = arena_mapbits_large_size_get(chunk, pageind) -
3045 		    large_pad;
3046 
3047 		if (!junked)
3048 			arena_dalloc_junk_large(ptr, usize);
3049 		if (config_stats) {
3050 			szind_t index = size2index(usize) - NBINS;
3051 
3052 			arena->stats.ndalloc_large++;
3053 			arena->stats.allocated_large -= usize;
3054 			arena->stats.lstats[index].ndalloc++;
3055 			arena->stats.lstats[index].curruns--;
3056 		}
3057 	}
3058 
3059 	arena_run_dalloc(tsdn, arena, run, true, false, false);
3060 }
3061 
3062 void
3063 arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena,
3064     arena_chunk_t *chunk, void *ptr)
3065 {
3066 
3067 	arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, true);
3068 }
3069 
3070 void
3071 arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
3072     void *ptr)
3073 {
3074 
3075 	malloc_mutex_lock(tsdn, &arena->lock);
3076 	arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, false);
3077 	malloc_mutex_unlock(tsdn, &arena->lock);
3078 	arena_decay_tick(tsdn, arena);
3079 }
3080 
3081 static void
3082 arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
3083     void *ptr, size_t oldsize, size_t size)
3084 {
3085 	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
3086 	arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
3087 	    pageind);
3088 	arena_run_t *run = &miscelm->run;
3089 
3090 	assert(size < oldsize);
3091 
3092 	/*
3093 	 * Shrink the run, and make trailing pages available for other
3094 	 * allocations.
3095 	 */
3096 	malloc_mutex_lock(tsdn, &arena->lock);
3097 	arena_run_trim_tail(tsdn, arena, chunk, run, oldsize + large_pad, size +
3098 	    large_pad, true);
3099 	if (config_stats) {
3100 		szind_t oldindex = size2index(oldsize) - NBINS;
3101 		szind_t index = size2index(size) - NBINS;
3102 
3103 		arena->stats.ndalloc_large++;
3104 		arena->stats.allocated_large -= oldsize;
3105 		arena->stats.lstats[oldindex].ndalloc++;
3106 		arena->stats.lstats[oldindex].curruns--;
3107 
3108 		arena->stats.nmalloc_large++;
3109 		arena->stats.nrequests_large++;
3110 		arena->stats.allocated_large += size;
3111 		arena->stats.lstats[index].nmalloc++;
3112 		arena->stats.lstats[index].nrequests++;
3113 		arena->stats.lstats[index].curruns++;
3114 	}
3115 	malloc_mutex_unlock(tsdn, &arena->lock);
3116 }
3117 
3118 static bool
3119 arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
3120     void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
3121 {
3122 	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
3123 	size_t npages = (oldsize + large_pad) >> LG_PAGE;
3124 	size_t followsize;
3125 
3126 	assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) -
3127 	    large_pad);
3128 
3129 	/* Try to extend the run. */
3130 	malloc_mutex_lock(tsdn, &arena->lock);
3131 	if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk,
3132 	    pageind+npages) != 0)
3133 		goto label_fail;
3134 	followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages);
3135 	if (oldsize + followsize >= usize_min) {
3136 		/*
3137 		 * The next run is available and sufficiently large.  Split the
3138 		 * following run, then merge the first part with the existing
3139 		 * allocation.
3140 		 */
3141 		arena_run_t *run;
3142 		size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask;
3143 
3144 		usize = usize_max;
3145 		while (oldsize + followsize < usize)
3146 			usize = index2size(size2index(usize)-1);
3147 		assert(usize >= usize_min);
3148 		assert(usize >= oldsize);
3149 		splitsize = usize - oldsize;
3150 		if (splitsize == 0)
3151 			goto label_fail;
3152 
3153 		run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run;
3154 		if (arena_run_split_large(arena, run, splitsize, zero))
3155 			goto label_fail;
3156 
3157 		if (config_cache_oblivious && zero) {
3158 			/*
3159 			 * Zero the trailing bytes of the original allocation's
3160 			 * last page, since they are in an indeterminate state.
3161 			 * There will always be trailing bytes, because ptr's
3162 			 * offset from the beginning of the run is a multiple of
3163 			 * CACHELINE in [0 .. PAGE).
3164 			 */
3165 			void *zbase = (void *)((uintptr_t)ptr + oldsize);
3166 			void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
3167 			    PAGE));
3168 			size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
3169 			assert(nzero > 0);
3170 			memset(zbase, 0, nzero);
3171 		}
3172 
3173 		size = oldsize + splitsize;
3174 		npages = (size + large_pad) >> LG_PAGE;
3175 
3176 		/*
3177 		 * Mark the extended run as dirty if either portion of the run
3178 		 * was dirty before allocation.  This is rather pedantic,
3179 		 * because there's not actually any sequence of events that
3180 		 * could cause the resulting run to be passed to
3181 		 * arena_run_dalloc() with the dirty argument set to false
3182 		 * (which is when dirty flag consistency would really matter).
3183 		 */
3184 		flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
3185 		    arena_mapbits_dirty_get(chunk, pageind+npages-1);
3186 		flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0;
3187 		arena_mapbits_large_set(chunk, pageind, size + large_pad,
3188 		    flag_dirty | (flag_unzeroed_mask &
3189 		    arena_mapbits_unzeroed_get(chunk, pageind)));
3190 		arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty |
3191 		    (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
3192 		    pageind+npages-1)));
3193 
3194 		if (config_stats) {
3195 			szind_t oldindex = size2index(oldsize) - NBINS;
3196 			szind_t index = size2index(size) - NBINS;
3197 
3198 			arena->stats.ndalloc_large++;
3199 			arena->stats.allocated_large -= oldsize;
3200 			arena->stats.lstats[oldindex].ndalloc++;
3201 			arena->stats.lstats[oldindex].curruns--;
3202 
3203 			arena->stats.nmalloc_large++;
3204 			arena->stats.nrequests_large++;
3205 			arena->stats.allocated_large += size;
3206 			arena->stats.lstats[index].nmalloc++;
3207 			arena->stats.lstats[index].nrequests++;
3208 			arena->stats.lstats[index].curruns++;
3209 		}
3210 		malloc_mutex_unlock(tsdn, &arena->lock);
3211 		return (false);
3212 	}
3213 label_fail:
3214 	malloc_mutex_unlock(tsdn, &arena->lock);
3215 	return (true);
3216 }
3217 
3218 #ifdef JEMALLOC_JET
3219 #undef arena_ralloc_junk_large
3220 #define	arena_ralloc_junk_large JEMALLOC_N(n_arena_ralloc_junk_large)
3221 #endif
3222 static void
3223 arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
3224 {
3225 
3226 	if (config_fill && unlikely(opt_junk_free)) {
3227 		memset((void *)((uintptr_t)ptr + usize), JEMALLOC_FREE_JUNK,
3228 		    old_usize - usize);
3229 	}
3230 }
3231 #ifdef JEMALLOC_JET
3232 #undef arena_ralloc_junk_large
3233 #define	arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
3234 arena_ralloc_junk_large_t *arena_ralloc_junk_large =
3235     JEMALLOC_N(n_arena_ralloc_junk_large);
3236 #endif
3237 
3238 /*
3239  * Try to resize a large allocation, in order to avoid copying.  This will
3240  * always fail if growing an object, and the following run is already in use.
3241  */
3242 static bool
3243 arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
3244     size_t usize_max, bool zero)
3245 {
3246 	arena_chunk_t *chunk;
3247 	arena_t *arena;
3248 
3249 	if (oldsize == usize_max) {
3250 		/* Current size class is compatible and maximal. */
3251 		return (false);
3252 	}
3253 
3254 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
3255 	arena = extent_node_arena_get(&chunk->node);
3256 
3257 	if (oldsize < usize_max) {
3258 		bool ret = arena_ralloc_large_grow(tsdn, arena, chunk, ptr,
3259 		    oldsize, usize_min, usize_max, zero);
3260 		if (config_fill && !ret && !zero) {
3261 			if (unlikely(opt_junk_alloc)) {
3262 				memset((void *)((uintptr_t)ptr + oldsize),
3263 				    JEMALLOC_ALLOC_JUNK,
3264 				    isalloc(tsdn, ptr, config_prof) - oldsize);
3265 			} else if (unlikely(opt_zero)) {
3266 				memset((void *)((uintptr_t)ptr + oldsize), 0,
3267 				    isalloc(tsdn, ptr, config_prof) - oldsize);
3268 			}
3269 		}
3270 		return (ret);
3271 	}
3272 
3273 	assert(oldsize > usize_max);
3274 	/* Fill before shrinking in order avoid a race. */
3275 	arena_ralloc_junk_large(ptr, oldsize, usize_max);
3276 	arena_ralloc_large_shrink(tsdn, arena, chunk, ptr, oldsize, usize_max);
3277 	return (false);
3278 }
3279 
3280 bool
3281 arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
3282     size_t extra, bool zero)
3283 {
3284 	size_t usize_min, usize_max;
3285 
3286 	/* Calls with non-zero extra had to clamp extra. */
3287 	assert(extra == 0 || size + extra <= HUGE_MAXCLASS);
3288 
3289 	if (unlikely(size > HUGE_MAXCLASS))
3290 		return (true);
3291 
3292 	usize_min = s2u(size);
3293 	usize_max = s2u(size + extra);
3294 	if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) {
3295 		arena_chunk_t *chunk;
3296 
3297 		/*
3298 		 * Avoid moving the allocation if the size class can be left the
3299 		 * same.
3300 		 */
3301 		if (oldsize <= SMALL_MAXCLASS) {
3302 			assert(arena_bin_info[size2index(oldsize)].reg_size ==
3303 			    oldsize);
3304 			if ((usize_max > SMALL_MAXCLASS ||
3305 			    size2index(usize_max) != size2index(oldsize)) &&
3306 			    (size > oldsize || usize_max < oldsize))
3307 				return (true);
3308 		} else {
3309 			if (usize_max <= SMALL_MAXCLASS)
3310 				return (true);
3311 			if (arena_ralloc_large(tsdn, ptr, oldsize, usize_min,
3312 			    usize_max, zero))
3313 				return (true);
3314 		}
3315 
3316 		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
3317 		arena_decay_tick(tsdn, extent_node_arena_get(&chunk->node));
3318 		return (false);
3319 	} else {
3320 		return (huge_ralloc_no_move(tsdn, ptr, oldsize, usize_min,
3321 		    usize_max, zero));
3322 	}
3323 }
3324 
3325 static void *
3326 arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
3327     size_t alignment, bool zero, tcache_t *tcache)
3328 {
3329 
3330 	if (alignment == 0)
3331 		return (arena_malloc(tsdn, arena, usize, size2index(usize),
3332 		    zero, tcache, true));
3333 	usize = sa2u(usize, alignment);
3334 	if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
3335 		return (NULL);
3336 	return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
3337 }
3338 
3339 void *
3340 arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
3341     size_t alignment, bool zero, tcache_t *tcache)
3342 {
3343 	void *ret;
3344 	size_t usize;
3345 
3346 	usize = s2u(size);
3347 	if (unlikely(usize == 0 || size > HUGE_MAXCLASS))
3348 		return (NULL);
3349 
3350 	if (likely(usize <= large_maxclass)) {
3351 		size_t copysize;
3352 
3353 		/* Try to avoid moving the allocation. */
3354 		if (!arena_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, 0,
3355 		    zero))
3356 			return (ptr);
3357 
3358 		/*
3359 		 * size and oldsize are different enough that we need to move
3360 		 * the object.  In that case, fall back to allocating new space
3361 		 * and copying.
3362 		 */
3363 		ret = arena_ralloc_move_helper(tsd_tsdn(tsd), arena, usize,
3364 		    alignment, zero, tcache);
3365 		if (ret == NULL)
3366 			return (NULL);
3367 
3368 		/*
3369 		 * Junk/zero-filling were already done by
3370 		 * ipalloc()/arena_malloc().
3371 		 */
3372 
3373 		copysize = (usize < oldsize) ? usize : oldsize;
3374 		JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
3375 		memcpy(ret, ptr, copysize);
3376 		isqalloc(tsd, ptr, oldsize, tcache, true);
3377 	} else {
3378 		ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment,
3379 		    zero, tcache);
3380 	}
3381 	return (ret);
3382 }
3383 
3384 dss_prec_t
3385 arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena)
3386 {
3387 	dss_prec_t ret;
3388 
3389 	malloc_mutex_lock(tsdn, &arena->lock);
3390 	ret = arena->dss_prec;
3391 	malloc_mutex_unlock(tsdn, &arena->lock);
3392 	return (ret);
3393 }
3394 
3395 bool
3396 arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec)
3397 {
3398 
3399 	if (!have_dss)
3400 		return (dss_prec != dss_prec_disabled);
3401 	malloc_mutex_lock(tsdn, &arena->lock);
3402 	arena->dss_prec = dss_prec;
3403 	malloc_mutex_unlock(tsdn, &arena->lock);
3404 	return (false);
3405 }
3406 
3407 ssize_t
3408 arena_lg_dirty_mult_default_get(void)
3409 {
3410 
3411 	return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default));
3412 }
3413 
3414 bool
3415 arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult)
3416 {
3417 
3418 	if (opt_purge != purge_mode_ratio)
3419 		return (true);
3420 	if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
3421 		return (true);
3422 	atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult);
3423 	return (false);
3424 }
3425 
3426 ssize_t
3427 arena_decay_time_default_get(void)
3428 {
3429 
3430 	return ((ssize_t)atomic_read_z((size_t *)&decay_time_default));
3431 }
3432 
3433 bool
3434 arena_decay_time_default_set(ssize_t decay_time)
3435 {
3436 
3437 	if (opt_purge != purge_mode_decay)
3438 		return (true);
3439 	if (!arena_decay_time_valid(decay_time))
3440 		return (true);
3441 	atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time);
3442 	return (false);
3443 }
3444 
3445 static void
3446 arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
3447     const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
3448     size_t *nactive, size_t *ndirty)
3449 {
3450 
3451 	*nthreads += arena_nthreads_get(arena, false);
3452 	*dss = dss_prec_names[arena->dss_prec];
3453 	*lg_dirty_mult = arena->lg_dirty_mult;
3454 	*decay_time = arena->decay.time;
3455 	*nactive += arena->nactive;
3456 	*ndirty += arena->ndirty;
3457 }
3458 
3459 void
3460 arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
3461     const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
3462     size_t *nactive, size_t *ndirty)
3463 {
3464 
3465 	malloc_mutex_lock(tsdn, &arena->lock);
3466 	arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
3467 	    decay_time, nactive, ndirty);
3468 	malloc_mutex_unlock(tsdn, &arena->lock);
3469 }
3470 
3471 void
3472 arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
3473     const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
3474     size_t *nactive, size_t *ndirty, arena_stats_t *astats,
3475     malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
3476     malloc_huge_stats_t *hstats)
3477 {
3478 	unsigned i;
3479 
3480 	cassert(config_stats);
3481 
3482 	malloc_mutex_lock(tsdn, &arena->lock);
3483 	arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
3484 	    decay_time, nactive, ndirty);
3485 
3486 	astats->mapped += arena->stats.mapped;
3487 	astats->retained += arena->stats.retained;
3488 	astats->npurge += arena->stats.npurge;
3489 	astats->nmadvise += arena->stats.nmadvise;
3490 	astats->purged += arena->stats.purged;
3491 	astats->metadata_mapped += arena->stats.metadata_mapped;
3492 	astats->metadata_allocated += arena_metadata_allocated_get(arena);
3493 	astats->allocated_large += arena->stats.allocated_large;
3494 	astats->nmalloc_large += arena->stats.nmalloc_large;
3495 	astats->ndalloc_large += arena->stats.ndalloc_large;
3496 	astats->nrequests_large += arena->stats.nrequests_large;
3497 	astats->allocated_huge += arena->stats.allocated_huge;
3498 	astats->nmalloc_huge += arena->stats.nmalloc_huge;
3499 	astats->ndalloc_huge += arena->stats.ndalloc_huge;
3500 
3501 	for (i = 0; i < nlclasses; i++) {
3502 		lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
3503 		lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
3504 		lstats[i].nrequests += arena->stats.lstats[i].nrequests;
3505 		lstats[i].curruns += arena->stats.lstats[i].curruns;
3506 	}
3507 
3508 	for (i = 0; i < nhclasses; i++) {
3509 		hstats[i].nmalloc += arena->stats.hstats[i].nmalloc;
3510 		hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
3511 		hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
3512 	}
3513 	malloc_mutex_unlock(tsdn, &arena->lock);
3514 
3515 	for (i = 0; i < NBINS; i++) {
3516 		arena_bin_t *bin = &arena->bins[i];
3517 
3518 		malloc_mutex_lock(tsdn, &bin->lock);
3519 		bstats[i].nmalloc += bin->stats.nmalloc;
3520 		bstats[i].ndalloc += bin->stats.ndalloc;
3521 		bstats[i].nrequests += bin->stats.nrequests;
3522 		bstats[i].curregs += bin->stats.curregs;
3523 		if (config_tcache) {
3524 			bstats[i].nfills += bin->stats.nfills;
3525 			bstats[i].nflushes += bin->stats.nflushes;
3526 		}
3527 		bstats[i].nruns += bin->stats.nruns;
3528 		bstats[i].reruns += bin->stats.reruns;
3529 		bstats[i].curruns += bin->stats.curruns;
3530 		malloc_mutex_unlock(tsdn, &bin->lock);
3531 	}
3532 }
3533 
3534 unsigned
3535 arena_nthreads_get(arena_t *arena, bool internal)
3536 {
3537 
3538 	return (atomic_read_u(&arena->nthreads[internal]));
3539 }
3540 
3541 void
3542 arena_nthreads_inc(arena_t *arena, bool internal)
3543 {
3544 
3545 	atomic_add_u(&arena->nthreads[internal], 1);
3546 }
3547 
3548 void
3549 arena_nthreads_dec(arena_t *arena, bool internal)
3550 {
3551 
3552 	atomic_sub_u(&arena->nthreads[internal], 1);
3553 }
3554 
3555 size_t
3556 arena_extent_sn_next(arena_t *arena)
3557 {
3558 
3559 	return (atomic_add_z(&arena->extent_sn_next, 1) - 1);
3560 }
3561 
3562 arena_t *
3563 arena_new(tsdn_t *tsdn, unsigned ind)
3564 {
3565 	arena_t *arena;
3566 	unsigned i;
3567 
3568 	/*
3569 	 * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
3570 	 * because there is no way to clean up if base_alloc() OOMs.
3571 	 */
3572 	if (config_stats) {
3573 		arena = (arena_t *)base_alloc(tsdn,
3574 		    CACHELINE_CEILING(sizeof(arena_t)) +
3575 		    QUANTUM_CEILING((nlclasses * sizeof(malloc_large_stats_t)))
3576 		    + (nhclasses * sizeof(malloc_huge_stats_t)));
3577 	} else
3578 		arena = (arena_t *)base_alloc(tsdn, sizeof(arena_t));
3579 	if (arena == NULL)
3580 		return (NULL);
3581 
3582 	arena->ind = ind;
3583 	arena->nthreads[0] = arena->nthreads[1] = 0;
3584 	if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA))
3585 		return (NULL);
3586 
3587 	if (config_stats) {
3588 		memset(&arena->stats, 0, sizeof(arena_stats_t));
3589 		arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena
3590 		    + CACHELINE_CEILING(sizeof(arena_t)));
3591 		memset(arena->stats.lstats, 0, nlclasses *
3592 		    sizeof(malloc_large_stats_t));
3593 		arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena
3594 		    + CACHELINE_CEILING(sizeof(arena_t)) +
3595 		    QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t)));
3596 		memset(arena->stats.hstats, 0, nhclasses *
3597 		    sizeof(malloc_huge_stats_t));
3598 		if (config_tcache)
3599 			ql_new(&arena->tcache_ql);
3600 	}
3601 
3602 	if (config_prof)
3603 		arena->prof_accumbytes = 0;
3604 
3605 	if (config_cache_oblivious) {
3606 		/*
3607 		 * A nondeterministic seed based on the address of arena reduces
3608 		 * the likelihood of lockstep non-uniform cache index
3609 		 * utilization among identical concurrent processes, but at the
3610 		 * cost of test repeatability.  For debug builds, instead use a
3611 		 * deterministic seed.
3612 		 */
3613 		arena->offset_state = config_debug ? ind :
3614 		    (size_t)(uintptr_t)arena;
3615 	}
3616 
3617 	arena->dss_prec = chunk_dss_prec_get();
3618 
3619 	ql_new(&arena->achunks);
3620 
3621 	arena->extent_sn_next = 0;
3622 
3623 	arena->spare = NULL;
3624 
3625 	arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
3626 	arena->purging = false;
3627 	arena->nactive = 0;
3628 	arena->ndirty = 0;
3629 
3630 	for (i = 0; i < NPSIZES; i++)
3631 		arena_run_heap_new(&arena->runs_avail[i]);
3632 
3633 	qr_new(&arena->runs_dirty, rd_link);
3634 	qr_new(&arena->chunks_cache, cc_link);
3635 
3636 	if (opt_purge == purge_mode_decay)
3637 		arena_decay_init(arena, arena_decay_time_default_get());
3638 
3639 	ql_new(&arena->huge);
3640 	if (malloc_mutex_init(&arena->huge_mtx, "arena_huge",
3641 	    WITNESS_RANK_ARENA_HUGE))
3642 		return (NULL);
3643 
3644 	extent_tree_szsnad_new(&arena->chunks_szsnad_cached);
3645 	extent_tree_ad_new(&arena->chunks_ad_cached);
3646 	extent_tree_szsnad_new(&arena->chunks_szsnad_retained);
3647 	extent_tree_ad_new(&arena->chunks_ad_retained);
3648 	if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
3649 	    WITNESS_RANK_ARENA_CHUNKS))
3650 		return (NULL);
3651 	ql_new(&arena->node_cache);
3652 	if (malloc_mutex_init(&arena->node_cache_mtx, "arena_node_cache",
3653 	    WITNESS_RANK_ARENA_NODE_CACHE))
3654 		return (NULL);
3655 
3656 	arena->chunk_hooks = chunk_hooks_default;
3657 
3658 	/* Initialize bins. */
3659 	for (i = 0; i < NBINS; i++) {
3660 		arena_bin_t *bin = &arena->bins[i];
3661 		if (malloc_mutex_init(&bin->lock, "arena_bin",
3662 		    WITNESS_RANK_ARENA_BIN))
3663 			return (NULL);
3664 		bin->runcur = NULL;
3665 		arena_run_heap_new(&bin->runs);
3666 		if (config_stats)
3667 			memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
3668 	}
3669 
3670 	return (arena);
3671 }
3672 
3673 /*
3674  * Calculate bin_info->run_size such that it meets the following constraints:
3675  *
3676  *   *) bin_info->run_size <= arena_maxrun
3677  *   *) bin_info->nregs <= RUN_MAXREGS
3678  *
3679  * bin_info->nregs and bin_info->reg0_offset are also calculated here, since
3680  * these settings are all interdependent.
3681  */
3682 static void
3683 bin_info_run_size_calc(arena_bin_info_t *bin_info)
3684 {
3685 	size_t pad_size;
3686 	size_t try_run_size, perfect_run_size, actual_run_size;
3687 	uint32_t try_nregs, perfect_nregs, actual_nregs;
3688 
3689 	/*
3690 	 * Determine redzone size based on minimum alignment and minimum
3691 	 * redzone size.  Add padding to the end of the run if it is needed to
3692 	 * align the regions.  The padding allows each redzone to be half the
3693 	 * minimum alignment; without the padding, each redzone would have to
3694 	 * be twice as large in order to maintain alignment.
3695 	 */
3696 	if (config_fill && unlikely(opt_redzone)) {
3697 		size_t align_min = ZU(1) << (ffs_zu(bin_info->reg_size) - 1);
3698 		if (align_min <= REDZONE_MINSIZE) {
3699 			bin_info->redzone_size = REDZONE_MINSIZE;
3700 			pad_size = 0;
3701 		} else {
3702 			bin_info->redzone_size = align_min >> 1;
3703 			pad_size = bin_info->redzone_size;
3704 		}
3705 	} else {
3706 		bin_info->redzone_size = 0;
3707 		pad_size = 0;
3708 	}
3709 	bin_info->reg_interval = bin_info->reg_size +
3710 	    (bin_info->redzone_size << 1);
3711 
3712 	/*
3713 	 * Compute run size under ideal conditions (no redzones, no limit on run
3714 	 * size).
3715 	 */
3716 	try_run_size = PAGE;
3717 	try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
3718 	do {
3719 		perfect_run_size = try_run_size;
3720 		perfect_nregs = try_nregs;
3721 
3722 		try_run_size += PAGE;
3723 		try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
3724 	} while (perfect_run_size != perfect_nregs * bin_info->reg_size);
3725 	assert(perfect_nregs <= RUN_MAXREGS);
3726 
3727 	actual_run_size = perfect_run_size;
3728 	actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3729 	    bin_info->reg_interval);
3730 
3731 	/*
3732 	 * Redzones can require enough padding that not even a single region can
3733 	 * fit within the number of pages that would normally be dedicated to a
3734 	 * run for this size class.  Increase the run size until at least one
3735 	 * region fits.
3736 	 */
3737 	while (actual_nregs == 0) {
3738 		assert(config_fill && unlikely(opt_redzone));
3739 
3740 		actual_run_size += PAGE;
3741 		actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3742 		    bin_info->reg_interval);
3743 	}
3744 
3745 	/*
3746 	 * Make sure that the run will fit within an arena chunk.
3747 	 */
3748 	while (actual_run_size > arena_maxrun) {
3749 		actual_run_size -= PAGE;
3750 		actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3751 		    bin_info->reg_interval);
3752 	}
3753 	assert(actual_nregs > 0);
3754 	assert(actual_run_size == s2u(actual_run_size));
3755 
3756 	/* Copy final settings. */
3757 	bin_info->run_size = actual_run_size;
3758 	bin_info->nregs = actual_nregs;
3759 	bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs *
3760 	    bin_info->reg_interval) - pad_size + bin_info->redzone_size);
3761 
3762 	assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
3763 	    * bin_info->reg_interval) + pad_size == bin_info->run_size);
3764 }
3765 
3766 static void
3767 bin_info_init(void)
3768 {
3769 	arena_bin_info_t *bin_info;
3770 
3771 #define	BIN_INFO_INIT_bin_yes(index, size)				\
3772 	bin_info = &arena_bin_info[index];				\
3773 	bin_info->reg_size = size;					\
3774 	bin_info_run_size_calc(bin_info);				\
3775 	bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
3776 #define	BIN_INFO_INIT_bin_no(index, size)
3777 #define	SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup)	\
3778 	BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
3779 	SIZE_CLASSES
3780 #undef BIN_INFO_INIT_bin_yes
3781 #undef BIN_INFO_INIT_bin_no
3782 #undef SC
3783 }
3784 
3785 static void
3786 init_thp_initially_huge(void) {
3787 	int fd;
3788 	char buf[sizeof("[always] madvise never\n")];
3789 	ssize_t nread;
3790 	static const char *enabled_states[] = {
3791 		"[always] madvise never\n",
3792 		"always [madvise] never\n",
3793 		"always madvise [never]\n"
3794 	};
3795 	static const bool thp_initially_huge_states[] = {
3796 		true,
3797 		false,
3798 		false
3799 	};
3800 	unsigned i;
3801 
3802 	if (config_debug) {
3803 		for (i = 0; i < sizeof(enabled_states)/sizeof(const char *);
3804 		    i++) {
3805 			assert(sizeof(buf) > strlen(enabled_states[i]));
3806 		}
3807 	}
3808 	assert(sizeof(enabled_states)/sizeof(const char *) ==
3809 	    sizeof(thp_initially_huge_states)/sizeof(bool));
3810 
3811 #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_open)
3812 	fd = (int)syscall(SYS_open,
3813 	    "/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
3814 #else
3815 	fd = open("/sys/kernel/mm/transparent_hugepage/enabled", O_RDONLY);
3816 #endif
3817 	if (fd == -1) {
3818 		goto label_error;
3819 	}
3820 
3821 #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_read)
3822 	nread = (ssize_t)syscall(SYS_read, fd, &buf, sizeof(buf));
3823 #else
3824 	nread = read(fd, &buf, sizeof(buf));
3825 #endif
3826 
3827 #if defined(JEMALLOC_USE_SYSCALL) && defined(SYS_close)
3828 	syscall(SYS_close, fd);
3829 #else
3830 	close(fd);
3831 #endif
3832 
3833 	if (nread < 1) {
3834 		goto label_error;
3835 	}
3836 	for (i = 0; i < sizeof(enabled_states)/sizeof(const char *);
3837 	    i++) {
3838 		if (strncmp(buf, enabled_states[i], (size_t)nread) == 0) {
3839 			thp_initially_huge = thp_initially_huge_states[i];
3840 			return;
3841 		}
3842 	}
3843 
3844 label_error:
3845 	thp_initially_huge = false;
3846 }
3847 
3848 void
3849 arena_boot(void)
3850 {
3851 	unsigned i;
3852 
3853 	if (config_thp && opt_thp) {
3854 		init_thp_initially_huge();
3855 	}
3856 
3857 	arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
3858 	arena_decay_time_default_set(opt_decay_time);
3859 
3860 	/*
3861 	 * Compute the header size such that it is large enough to contain the
3862 	 * page map.  The page map is biased to omit entries for the header
3863 	 * itself, so some iteration is necessary to compute the map bias.
3864 	 *
3865 	 * 1) Compute safe header_size and map_bias values that include enough
3866 	 *    space for an unbiased page map.
3867 	 * 2) Refine map_bias based on (1) to omit the header pages in the page
3868 	 *    map.  The resulting map_bias may be one too small.
3869 	 * 3) Refine map_bias based on (2).  The result will be >= the result
3870 	 *    from (2), and will always be correct.
3871 	 */
3872 	map_bias = 0;
3873 	for (i = 0; i < 3; i++) {
3874 		size_t header_size = offsetof(arena_chunk_t, map_bits) +
3875 		    ((sizeof(arena_chunk_map_bits_t) +
3876 		    sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias));
3877 		map_bias = (header_size + PAGE_MASK) >> LG_PAGE;
3878 	}
3879 	assert(map_bias > 0);
3880 
3881 	map_misc_offset = offsetof(arena_chunk_t, map_bits) +
3882 	    sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias);
3883 
3884 	arena_maxrun = chunksize - (map_bias << LG_PAGE);
3885 	assert(arena_maxrun > 0);
3886 	large_maxclass = index2size(size2index(chunksize)-1);
3887 	assert(large_maxclass > 0);
3888 	assert(large_maxclass + large_pad <= arena_maxrun);
3889 	nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS);
3890 	nhclasses = NSIZES - nlclasses - NBINS;
3891 
3892 	bin_info_init();
3893 }
3894 
3895 void
3896 arena_prefork0(tsdn_t *tsdn, arena_t *arena)
3897 {
3898 
3899 	malloc_mutex_prefork(tsdn, &arena->lock);
3900 }
3901 
3902 void
3903 arena_prefork1(tsdn_t *tsdn, arena_t *arena)
3904 {
3905 
3906 	malloc_mutex_prefork(tsdn, &arena->chunks_mtx);
3907 }
3908 
3909 void
3910 arena_prefork2(tsdn_t *tsdn, arena_t *arena)
3911 {
3912 
3913 	malloc_mutex_prefork(tsdn, &arena->node_cache_mtx);
3914 }
3915 
3916 void
3917 arena_prefork3(tsdn_t *tsdn, arena_t *arena)
3918 {
3919 	unsigned i;
3920 
3921 	for (i = 0; i < NBINS; i++)
3922 		malloc_mutex_prefork(tsdn, &arena->bins[i].lock);
3923 	malloc_mutex_prefork(tsdn, &arena->huge_mtx);
3924 }
3925 
3926 void
3927 arena_postfork_parent(tsdn_t *tsdn, arena_t *arena)
3928 {
3929 	unsigned i;
3930 
3931 	malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx);
3932 	for (i = 0; i < NBINS; i++)
3933 		malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock);
3934 	malloc_mutex_postfork_parent(tsdn, &arena->node_cache_mtx);
3935 	malloc_mutex_postfork_parent(tsdn, &arena->chunks_mtx);
3936 	malloc_mutex_postfork_parent(tsdn, &arena->lock);
3937 }
3938 
3939 void
3940 arena_postfork_child(tsdn_t *tsdn, arena_t *arena)
3941 {
3942 	unsigned i;
3943 
3944 	malloc_mutex_postfork_child(tsdn, &arena->huge_mtx);
3945 	for (i = 0; i < NBINS; i++)
3946 		malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
3947 	malloc_mutex_postfork_child(tsdn, &arena->node_cache_mtx);
3948 	malloc_mutex_postfork_child(tsdn, &arena->chunks_mtx);
3949 	malloc_mutex_postfork_child(tsdn, &arena->lock);
3950 }
3951