xref: /freebsd/contrib/jemalloc/src/arena.c (revision bde951447fbed7c2669f80b5c7b6d3d16bb144f8)
1a4bd5210SJason Evans #define	JEMALLOC_ARENA_C_
2a4bd5210SJason Evans #include "jemalloc/internal/jemalloc_internal.h"
3a4bd5210SJason Evans 
4a4bd5210SJason Evans /******************************************************************************/
5a4bd5210SJason Evans /* Data. */
6a4bd5210SJason Evans 
7df0d881dSJason Evans purge_mode_t	opt_purge = PURGE_DEFAULT;
8df0d881dSJason Evans const char	*purge_mode_names[] = {
9df0d881dSJason Evans 	"ratio",
10df0d881dSJason Evans 	"decay",
11df0d881dSJason Evans 	"N/A"
12df0d881dSJason Evans };
13a4bd5210SJason Evans ssize_t		opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
14d0e79aa3SJason Evans static ssize_t	lg_dirty_mult_default;
15df0d881dSJason Evans ssize_t		opt_decay_time = DECAY_TIME_DEFAULT;
16df0d881dSJason Evans static ssize_t	decay_time_default;
17df0d881dSJason Evans 
18a4bd5210SJason Evans arena_bin_info_t	arena_bin_info[NBINS];
19a4bd5210SJason Evans 
20d0e79aa3SJason Evans size_t		map_bias;
21d0e79aa3SJason Evans size_t		map_misc_offset;
22d0e79aa3SJason Evans size_t		arena_maxrun; /* Max run size for arenas. */
23536b3538SJason Evans size_t		large_maxclass; /* Max large size class. */
24d0e79aa3SJason Evans unsigned	nlclasses; /* Number of large size classes. */
25d0e79aa3SJason Evans unsigned	nhclasses; /* Number of huge size classes. */
26a4bd5210SJason Evans 
27a4bd5210SJason Evans /******************************************************************************/
28f921d10fSJason Evans /*
29f921d10fSJason Evans  * Function prototypes for static functions that are referenced prior to
30f921d10fSJason Evans  * definition.
31f921d10fSJason Evans  */
32a4bd5210SJason Evans 
33*bde95144SJason Evans static void	arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena,
34*bde95144SJason Evans     arena_chunk_t *chunk);
351f0a49e8SJason Evans static void	arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena,
361f0a49e8SJason Evans     size_t ndirty_limit);
371f0a49e8SJason Evans static void	arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run,
381f0a49e8SJason Evans     bool dirty, bool cleaned, bool decommitted);
391f0a49e8SJason Evans static void	arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena,
401f0a49e8SJason Evans     arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin);
41a4bd5210SJason Evans static void	arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
42a4bd5210SJason Evans     arena_run_t *run, arena_bin_t *bin);
43a4bd5210SJason Evans 
44a4bd5210SJason Evans /******************************************************************************/
45a4bd5210SJason Evans 
46d0e79aa3SJason Evans JEMALLOC_INLINE_C size_t
47df0d881dSJason Evans arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm)
48d0e79aa3SJason Evans {
49d0e79aa3SJason Evans 	arena_chunk_t *chunk;
50d0e79aa3SJason Evans 	size_t pageind, mapbits;
51d0e79aa3SJason Evans 
52d0e79aa3SJason Evans 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm);
53d0e79aa3SJason Evans 	pageind = arena_miscelm_to_pageind(miscelm);
54d0e79aa3SJason Evans 	mapbits = arena_mapbits_get(chunk, pageind);
55536b3538SJason Evans 	return (arena_mapbits_size_decode(mapbits));
56d0e79aa3SJason Evans }
57d0e79aa3SJason Evans 
58d0e79aa3SJason Evans JEMALLOC_INLINE_C int
59df0d881dSJason Evans arena_run_addr_comp(const arena_chunk_map_misc_t *a,
60df0d881dSJason Evans     const arena_chunk_map_misc_t *b)
61d0e79aa3SJason Evans {
62d0e79aa3SJason Evans 	uintptr_t a_miscelm = (uintptr_t)a;
63d0e79aa3SJason Evans 	uintptr_t b_miscelm = (uintptr_t)b;
64a4bd5210SJason Evans 
65a4bd5210SJason Evans 	assert(a != NULL);
66a4bd5210SJason Evans 	assert(b != NULL);
67a4bd5210SJason Evans 
68d0e79aa3SJason Evans 	return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm));
69a4bd5210SJason Evans }
70a4bd5210SJason Evans 
711f0a49e8SJason Evans /* Generate pairing heap functions. */
721f0a49e8SJason Evans ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t,
731f0a49e8SJason Evans     ph_link, arena_run_addr_comp)
74a4bd5210SJason Evans 
75df0d881dSJason Evans #ifdef JEMALLOC_JET
76df0d881dSJason Evans #undef run_quantize_floor
771f0a49e8SJason Evans #define	run_quantize_floor JEMALLOC_N(n_run_quantize_floor)
78df0d881dSJason Evans #endif
79df0d881dSJason Evans static size_t
80df0d881dSJason Evans run_quantize_floor(size_t size)
81a4bd5210SJason Evans {
82df0d881dSJason Evans 	size_t ret;
83*bde95144SJason Evans 	pszind_t pind;
84a4bd5210SJason Evans 
85df0d881dSJason Evans 	assert(size > 0);
86*bde95144SJason Evans 	assert(size <= HUGE_MAXCLASS);
87df0d881dSJason Evans 	assert((size & PAGE_MASK) == 0);
88a4bd5210SJason Evans 
89*bde95144SJason Evans 	assert(size != 0);
90*bde95144SJason Evans 	assert(size == PAGE_CEILING(size));
91*bde95144SJason Evans 
92*bde95144SJason Evans 	pind = psz2ind(size - large_pad + 1);
93*bde95144SJason Evans 	if (pind == 0) {
94*bde95144SJason Evans 		/*
95*bde95144SJason Evans 		 * Avoid underflow.  This short-circuit would also do the right
96*bde95144SJason Evans 		 * thing for all sizes in the range for which there are
97*bde95144SJason Evans 		 * PAGE-spaced size classes, but it's simplest to just handle
98*bde95144SJason Evans 		 * the one case that would cause erroneous results.
99*bde95144SJason Evans 		 */
100*bde95144SJason Evans 		return (size);
101*bde95144SJason Evans 	}
102*bde95144SJason Evans 	ret = pind2sz(pind - 1) + large_pad;
103*bde95144SJason Evans 	assert(ret <= size);
104a4bd5210SJason Evans 	return (ret);
105a4bd5210SJason Evans }
106df0d881dSJason Evans #ifdef JEMALLOC_JET
107df0d881dSJason Evans #undef run_quantize_floor
108df0d881dSJason Evans #define	run_quantize_floor JEMALLOC_N(run_quantize_floor)
1091f0a49e8SJason Evans run_quantize_t *run_quantize_floor = JEMALLOC_N(n_run_quantize_floor);
110df0d881dSJason Evans #endif
111a4bd5210SJason Evans 
112df0d881dSJason Evans #ifdef JEMALLOC_JET
113df0d881dSJason Evans #undef run_quantize_ceil
1141f0a49e8SJason Evans #define	run_quantize_ceil JEMALLOC_N(n_run_quantize_ceil)
115df0d881dSJason Evans #endif
116df0d881dSJason Evans static size_t
117df0d881dSJason Evans run_quantize_ceil(size_t size)
118df0d881dSJason Evans {
119df0d881dSJason Evans 	size_t ret;
120df0d881dSJason Evans 
121df0d881dSJason Evans 	assert(size > 0);
122*bde95144SJason Evans 	assert(size <= HUGE_MAXCLASS);
123df0d881dSJason Evans 	assert((size & PAGE_MASK) == 0);
124df0d881dSJason Evans 
125*bde95144SJason Evans 	ret = run_quantize_floor(size);
126*bde95144SJason Evans 	if (ret < size) {
127*bde95144SJason Evans 		/*
128*bde95144SJason Evans 		 * Skip a quantization that may have an adequately large run,
129*bde95144SJason Evans 		 * because under-sized runs may be mixed in.  This only happens
130*bde95144SJason Evans 		 * when an unusual size is requested, i.e. for aligned
131*bde95144SJason Evans 		 * allocation, and is just one of several places where linear
132*bde95144SJason Evans 		 * search would potentially find sufficiently aligned available
133*bde95144SJason Evans 		 * memory somewhere lower.
134*bde95144SJason Evans 		 */
135*bde95144SJason Evans 		ret = pind2sz(psz2ind(ret - large_pad + 1)) + large_pad;
136*bde95144SJason Evans 	}
137df0d881dSJason Evans 	return (ret);
138df0d881dSJason Evans }
139df0d881dSJason Evans #ifdef JEMALLOC_JET
140df0d881dSJason Evans #undef run_quantize_ceil
141df0d881dSJason Evans #define	run_quantize_ceil JEMALLOC_N(run_quantize_ceil)
1421f0a49e8SJason Evans run_quantize_t *run_quantize_ceil = JEMALLOC_N(n_run_quantize_ceil);
143df0d881dSJason Evans #endif
144df0d881dSJason Evans 
14582872ac0SJason Evans static void
14682872ac0SJason Evans arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
147d0e79aa3SJason Evans     size_t npages)
14882872ac0SJason Evans {
149*bde95144SJason Evans 	pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
1501f0a49e8SJason Evans 	    arena_miscelm_get_const(chunk, pageind))));
15182872ac0SJason Evans 	assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
15282872ac0SJason Evans 	    LG_PAGE));
153*bde95144SJason Evans 	assert((npages << LG_PAGE) < chunksize);
154*bde95144SJason Evans 	assert(pind2sz(pind) <= chunksize);
155*bde95144SJason Evans 	arena_run_heap_insert(&arena->runs_avail[pind],
1561f0a49e8SJason Evans 	    arena_miscelm_get_mutable(chunk, pageind));
15782872ac0SJason Evans }
15882872ac0SJason Evans 
15982872ac0SJason Evans static void
16082872ac0SJason Evans arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
161d0e79aa3SJason Evans     size_t npages)
16282872ac0SJason Evans {
163*bde95144SJason Evans 	pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get(
1641f0a49e8SJason Evans 	    arena_miscelm_get_const(chunk, pageind))));
16582872ac0SJason Evans 	assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
16682872ac0SJason Evans 	    LG_PAGE));
167*bde95144SJason Evans 	assert((npages << LG_PAGE) < chunksize);
168*bde95144SJason Evans 	assert(pind2sz(pind) <= chunksize);
169*bde95144SJason Evans 	arena_run_heap_remove(&arena->runs_avail[pind],
1701f0a49e8SJason Evans 	    arena_miscelm_get_mutable(chunk, pageind));
17182872ac0SJason Evans }
17282872ac0SJason Evans 
173d0e79aa3SJason Evans static void
174d0e79aa3SJason Evans arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
175d0e79aa3SJason Evans     size_t npages)
176d0e79aa3SJason Evans {
1771f0a49e8SJason Evans 	arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
1781f0a49e8SJason Evans 	    pageind);
179d0e79aa3SJason Evans 
180d0e79aa3SJason Evans 	assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
181d0e79aa3SJason Evans 	    LG_PAGE));
182d0e79aa3SJason Evans 	assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
183d0e79aa3SJason Evans 	assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
184d0e79aa3SJason Evans 	    CHUNK_MAP_DIRTY);
185d0e79aa3SJason Evans 
186d0e79aa3SJason Evans 	qr_new(&miscelm->rd, rd_link);
187d0e79aa3SJason Evans 	qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link);
188d0e79aa3SJason Evans 	arena->ndirty += npages;
189d0e79aa3SJason Evans }
190d0e79aa3SJason Evans 
191d0e79aa3SJason Evans static void
192d0e79aa3SJason Evans arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
193d0e79aa3SJason Evans     size_t npages)
194d0e79aa3SJason Evans {
1951f0a49e8SJason Evans 	arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
1961f0a49e8SJason Evans 	    pageind);
197d0e79aa3SJason Evans 
198d0e79aa3SJason Evans 	assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
199d0e79aa3SJason Evans 	    LG_PAGE));
200d0e79aa3SJason Evans 	assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY);
201d0e79aa3SJason Evans 	assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) ==
202d0e79aa3SJason Evans 	    CHUNK_MAP_DIRTY);
203d0e79aa3SJason Evans 
204d0e79aa3SJason Evans 	qr_remove(&miscelm->rd, rd_link);
205d0e79aa3SJason Evans 	assert(arena->ndirty >= npages);
206d0e79aa3SJason Evans 	arena->ndirty -= npages;
207d0e79aa3SJason Evans }
208d0e79aa3SJason Evans 
209d0e79aa3SJason Evans static size_t
210d0e79aa3SJason Evans arena_chunk_dirty_npages(const extent_node_t *node)
211d0e79aa3SJason Evans {
212d0e79aa3SJason Evans 
213d0e79aa3SJason Evans 	return (extent_node_size_get(node) >> LG_PAGE);
214d0e79aa3SJason Evans }
215d0e79aa3SJason Evans 
216d0e79aa3SJason Evans void
217d0e79aa3SJason Evans arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache)
218d0e79aa3SJason Evans {
219d0e79aa3SJason Evans 
220d0e79aa3SJason Evans 	if (cache) {
221d0e79aa3SJason Evans 		extent_node_dirty_linkage_init(node);
222d0e79aa3SJason Evans 		extent_node_dirty_insert(node, &arena->runs_dirty,
223d0e79aa3SJason Evans 		    &arena->chunks_cache);
224d0e79aa3SJason Evans 		arena->ndirty += arena_chunk_dirty_npages(node);
225d0e79aa3SJason Evans 	}
226d0e79aa3SJason Evans }
227d0e79aa3SJason Evans 
228d0e79aa3SJason Evans void
229d0e79aa3SJason Evans arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty)
230d0e79aa3SJason Evans {
231d0e79aa3SJason Evans 
232d0e79aa3SJason Evans 	if (dirty) {
233d0e79aa3SJason Evans 		extent_node_dirty_remove(node);
234d0e79aa3SJason Evans 		assert(arena->ndirty >= arena_chunk_dirty_npages(node));
235d0e79aa3SJason Evans 		arena->ndirty -= arena_chunk_dirty_npages(node);
236d0e79aa3SJason Evans 	}
237d0e79aa3SJason Evans }
238d0e79aa3SJason Evans 
239d0e79aa3SJason Evans JEMALLOC_INLINE_C void *
240a4bd5210SJason Evans arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
241a4bd5210SJason Evans {
242a4bd5210SJason Evans 	void *ret;
243df0d881dSJason Evans 	size_t regind;
244d0e79aa3SJason Evans 	arena_chunk_map_misc_t *miscelm;
245d0e79aa3SJason Evans 	void *rpages;
246a4bd5210SJason Evans 
247a4bd5210SJason Evans 	assert(run->nfree > 0);
248d0e79aa3SJason Evans 	assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info));
249a4bd5210SJason Evans 
250df0d881dSJason Evans 	regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info);
251d0e79aa3SJason Evans 	miscelm = arena_run_to_miscelm(run);
252d0e79aa3SJason Evans 	rpages = arena_miscelm_to_rpages(miscelm);
253d0e79aa3SJason Evans 	ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset +
254a4bd5210SJason Evans 	    (uintptr_t)(bin_info->reg_interval * regind));
255a4bd5210SJason Evans 	run->nfree--;
256a4bd5210SJason Evans 	return (ret);
257a4bd5210SJason Evans }
258a4bd5210SJason Evans 
259d0e79aa3SJason Evans JEMALLOC_INLINE_C void
260a4bd5210SJason Evans arena_run_reg_dalloc(arena_run_t *run, void *ptr)
261a4bd5210SJason Evans {
262a4bd5210SJason Evans 	arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
263e722f8f8SJason Evans 	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
264e722f8f8SJason Evans 	size_t mapbits = arena_mapbits_get(chunk, pageind);
265536b3538SJason Evans 	szind_t binind = arena_ptr_small_binind_get(ptr, mapbits);
266a4bd5210SJason Evans 	arena_bin_info_t *bin_info = &arena_bin_info[binind];
267df0d881dSJason Evans 	size_t regind = arena_run_regind(run, bin_info, ptr);
268a4bd5210SJason Evans 
269a4bd5210SJason Evans 	assert(run->nfree < bin_info->nregs);
270a4bd5210SJason Evans 	/* Freeing an interior pointer can cause assertion failure. */
271d0e79aa3SJason Evans 	assert(((uintptr_t)ptr -
272d0e79aa3SJason Evans 	    ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
273a4bd5210SJason Evans 	    (uintptr_t)bin_info->reg0_offset)) %
274a4bd5210SJason Evans 	    (uintptr_t)bin_info->reg_interval == 0);
275d0e79aa3SJason Evans 	assert((uintptr_t)ptr >=
276d0e79aa3SJason Evans 	    (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) +
277a4bd5210SJason Evans 	    (uintptr_t)bin_info->reg0_offset);
278a4bd5210SJason Evans 	/* Freeing an unallocated pointer can cause assertion failure. */
279d0e79aa3SJason Evans 	assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind));
280a4bd5210SJason Evans 
281d0e79aa3SJason Evans 	bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind);
282a4bd5210SJason Evans 	run->nfree++;
283a4bd5210SJason Evans }
284a4bd5210SJason Evans 
285d0e79aa3SJason Evans JEMALLOC_INLINE_C void
28688ad2f8dSJason Evans arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
28788ad2f8dSJason Evans {
28888ad2f8dSJason Evans 
289d0e79aa3SJason Evans 	JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
290d0e79aa3SJason Evans 	    (run_ind << LG_PAGE)), (npages << LG_PAGE));
29188ad2f8dSJason Evans 	memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
29288ad2f8dSJason Evans 	    (npages << LG_PAGE));
29388ad2f8dSJason Evans }
29488ad2f8dSJason Evans 
295d0e79aa3SJason Evans JEMALLOC_INLINE_C void
2962b06b201SJason Evans arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
2972b06b201SJason Evans {
2982b06b201SJason Evans 
299d0e79aa3SJason Evans 	JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind
300d0e79aa3SJason Evans 	    << LG_PAGE)), PAGE);
3012b06b201SJason Evans }
3022b06b201SJason Evans 
303d0e79aa3SJason Evans JEMALLOC_INLINE_C void
30488ad2f8dSJason Evans arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
305a4bd5210SJason Evans {
306a4bd5210SJason Evans 	size_t i;
307a4bd5210SJason Evans 	UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
308a4bd5210SJason Evans 
3092b06b201SJason Evans 	arena_run_page_mark_zeroed(chunk, run_ind);
310a4bd5210SJason Evans 	for (i = 0; i < PAGE / sizeof(size_t); i++)
311a4bd5210SJason Evans 		assert(p[i] == 0);
312a4bd5210SJason Evans }
313a4bd5210SJason Evans 
314a4bd5210SJason Evans static void
315df0d881dSJason Evans arena_nactive_add(arena_t *arena, size_t add_pages)
316a4bd5210SJason Evans {
317a4bd5210SJason Evans 
318f921d10fSJason Evans 	if (config_stats) {
319df0d881dSJason Evans 		size_t cactive_add = CHUNK_CEILING((arena->nactive +
320df0d881dSJason Evans 		    add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
321d0e79aa3SJason Evans 		    LG_PAGE);
322df0d881dSJason Evans 		if (cactive_add != 0)
323df0d881dSJason Evans 			stats_cactive_add(cactive_add);
324f921d10fSJason Evans 	}
325df0d881dSJason Evans 	arena->nactive += add_pages;
326df0d881dSJason Evans }
327df0d881dSJason Evans 
328df0d881dSJason Evans static void
329df0d881dSJason Evans arena_nactive_sub(arena_t *arena, size_t sub_pages)
330df0d881dSJason Evans {
331df0d881dSJason Evans 
332df0d881dSJason Evans 	if (config_stats) {
333df0d881dSJason Evans 		size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) -
334df0d881dSJason Evans 		    CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE);
335df0d881dSJason Evans 		if (cactive_sub != 0)
336df0d881dSJason Evans 			stats_cactive_sub(cactive_sub);
337df0d881dSJason Evans 	}
338df0d881dSJason Evans 	arena->nactive -= sub_pages;
339f921d10fSJason Evans }
340e722f8f8SJason Evans 
341f921d10fSJason Evans static void
342f921d10fSJason Evans arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
343d0e79aa3SJason Evans     size_t flag_dirty, size_t flag_decommitted, size_t need_pages)
344f921d10fSJason Evans {
345f921d10fSJason Evans 	size_t total_pages, rem_pages;
346f921d10fSJason Evans 
347d0e79aa3SJason Evans 	assert(flag_dirty == 0 || flag_decommitted == 0);
348d0e79aa3SJason Evans 
349e722f8f8SJason Evans 	total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
350a4bd5210SJason Evans 	    LG_PAGE;
351e722f8f8SJason Evans 	assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
352e722f8f8SJason Evans 	    flag_dirty);
353a4bd5210SJason Evans 	assert(need_pages <= total_pages);
354a4bd5210SJason Evans 	rem_pages = total_pages - need_pages;
355a4bd5210SJason Evans 
356d0e79aa3SJason Evans 	arena_avail_remove(arena, chunk, run_ind, total_pages);
357d0e79aa3SJason Evans 	if (flag_dirty != 0)
358d0e79aa3SJason Evans 		arena_run_dirty_remove(arena, chunk, run_ind, total_pages);
359df0d881dSJason Evans 	arena_nactive_add(arena, need_pages);
360a4bd5210SJason Evans 
361a4bd5210SJason Evans 	/* Keep track of trailing unused pages for later use. */
362a4bd5210SJason Evans 	if (rem_pages > 0) {
363d0e79aa3SJason Evans 		size_t flags = flag_dirty | flag_decommitted;
364d0e79aa3SJason Evans 		size_t flag_unzeroed_mask = (flags == 0) ?  CHUNK_MAP_UNZEROED :
365d0e79aa3SJason Evans 		    0;
366d0e79aa3SJason Evans 
367e722f8f8SJason Evans 		arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
368d0e79aa3SJason Evans 		    (rem_pages << LG_PAGE), flags |
369d0e79aa3SJason Evans 		    (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) &
370d0e79aa3SJason Evans 		    flag_unzeroed_mask));
371d0e79aa3SJason Evans 		arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1,
372d0e79aa3SJason Evans 		    (rem_pages << LG_PAGE), flags |
373d0e79aa3SJason Evans 		    (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) &
374d0e79aa3SJason Evans 		    flag_unzeroed_mask));
375d0e79aa3SJason Evans 		if (flag_dirty != 0) {
376d0e79aa3SJason Evans 			arena_run_dirty_insert(arena, chunk, run_ind+need_pages,
377d0e79aa3SJason Evans 			    rem_pages);
378a4bd5210SJason Evans 		}
379d0e79aa3SJason Evans 		arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages);
380a4bd5210SJason Evans 	}
381f921d10fSJason Evans }
382a4bd5210SJason Evans 
383d0e79aa3SJason Evans static bool
384f921d10fSJason Evans arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
385f921d10fSJason Evans     bool remove, bool zero)
386f921d10fSJason Evans {
387f921d10fSJason Evans 	arena_chunk_t *chunk;
388d0e79aa3SJason Evans 	arena_chunk_map_misc_t *miscelm;
389536b3538SJason Evans 	size_t flag_dirty, flag_decommitted, run_ind, need_pages;
390d0e79aa3SJason Evans 	size_t flag_unzeroed_mask;
391f921d10fSJason Evans 
392f921d10fSJason Evans 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
393d0e79aa3SJason Evans 	miscelm = arena_run_to_miscelm(run);
394d0e79aa3SJason Evans 	run_ind = arena_miscelm_to_pageind(miscelm);
395f921d10fSJason Evans 	flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
396d0e79aa3SJason Evans 	flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
397f921d10fSJason Evans 	need_pages = (size >> LG_PAGE);
398f921d10fSJason Evans 	assert(need_pages > 0);
399f921d10fSJason Evans 
400d0e79aa3SJason Evans 	if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
401d0e79aa3SJason Evans 	    run_ind << LG_PAGE, size, arena->ind))
402d0e79aa3SJason Evans 		return (true);
403d0e79aa3SJason Evans 
404f921d10fSJason Evans 	if (remove) {
405f921d10fSJason Evans 		arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
406d0e79aa3SJason Evans 		    flag_decommitted, need_pages);
407f921d10fSJason Evans 	}
408f921d10fSJason Evans 
409a4bd5210SJason Evans 	if (zero) {
410d0e79aa3SJason Evans 		if (flag_decommitted != 0) {
411d0e79aa3SJason Evans 			/* The run is untouched, and therefore zeroed. */
412d0e79aa3SJason Evans 			JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
413d0e79aa3SJason Evans 			    *)((uintptr_t)chunk + (run_ind << LG_PAGE)),
414d0e79aa3SJason Evans 			    (need_pages << LG_PAGE));
415d0e79aa3SJason Evans 		} else if (flag_dirty != 0) {
416d0e79aa3SJason Evans 			/* The run is dirty, so all pages must be zeroed. */
417d0e79aa3SJason Evans 			arena_run_zero(chunk, run_ind, need_pages);
418d0e79aa3SJason Evans 		} else {
419a4bd5210SJason Evans 			/*
420f921d10fSJason Evans 			 * The run is clean, so some pages may be zeroed (i.e.
421f921d10fSJason Evans 			 * never before touched).
422a4bd5210SJason Evans 			 */
423536b3538SJason Evans 			size_t i;
424a4bd5210SJason Evans 			for (i = 0; i < need_pages; i++) {
425f921d10fSJason Evans 				if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
426f921d10fSJason Evans 				    != 0)
427f921d10fSJason Evans 					arena_run_zero(chunk, run_ind+i, 1);
428f921d10fSJason Evans 				else if (config_debug) {
429f921d10fSJason Evans 					arena_run_page_validate_zeroed(chunk,
430f921d10fSJason Evans 					    run_ind+i);
4312b06b201SJason Evans 				} else {
432f921d10fSJason Evans 					arena_run_page_mark_zeroed(chunk,
433f921d10fSJason Evans 					    run_ind+i);
434a4bd5210SJason Evans 				}
435a4bd5210SJason Evans 			}
436a4bd5210SJason Evans 		}
4372b06b201SJason Evans 	} else {
438d0e79aa3SJason Evans 		JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
4392b06b201SJason Evans 		    (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
440a4bd5210SJason Evans 	}
441a4bd5210SJason Evans 
442a4bd5210SJason Evans 	/*
443f921d10fSJason Evans 	 * Set the last element first, in case the run only contains one page
444f921d10fSJason Evans 	 * (i.e. both statements set the same element).
445a4bd5210SJason Evans 	 */
446d0e79aa3SJason Evans 	flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
447d0e79aa3SJason Evans 	    CHUNK_MAP_UNZEROED : 0;
448d0e79aa3SJason Evans 	arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty |
449d0e79aa3SJason Evans 	    (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
450d0e79aa3SJason Evans 	    run_ind+need_pages-1)));
451d0e79aa3SJason Evans 	arena_mapbits_large_set(chunk, run_ind, size, flag_dirty |
452d0e79aa3SJason Evans 	    (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind)));
453d0e79aa3SJason Evans 	return (false);
454f921d10fSJason Evans }
455f921d10fSJason Evans 
456d0e79aa3SJason Evans static bool
457f921d10fSJason Evans arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
458f921d10fSJason Evans {
459f921d10fSJason Evans 
460d0e79aa3SJason Evans 	return (arena_run_split_large_helper(arena, run, size, true, zero));
461f921d10fSJason Evans }
462f921d10fSJason Evans 
463d0e79aa3SJason Evans static bool
464f921d10fSJason Evans arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
465f921d10fSJason Evans {
466f921d10fSJason Evans 
467d0e79aa3SJason Evans 	return (arena_run_split_large_helper(arena, run, size, false, zero));
468f921d10fSJason Evans }
469f921d10fSJason Evans 
470d0e79aa3SJason Evans static bool
471f921d10fSJason Evans arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
472536b3538SJason Evans     szind_t binind)
473f921d10fSJason Evans {
474f921d10fSJason Evans 	arena_chunk_t *chunk;
475d0e79aa3SJason Evans 	arena_chunk_map_misc_t *miscelm;
476d0e79aa3SJason Evans 	size_t flag_dirty, flag_decommitted, run_ind, need_pages, i;
477f921d10fSJason Evans 
478f921d10fSJason Evans 	assert(binind != BININD_INVALID);
479f921d10fSJason Evans 
480f921d10fSJason Evans 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
481d0e79aa3SJason Evans 	miscelm = arena_run_to_miscelm(run);
482d0e79aa3SJason Evans 	run_ind = arena_miscelm_to_pageind(miscelm);
483f921d10fSJason Evans 	flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
484d0e79aa3SJason Evans 	flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind);
485f921d10fSJason Evans 	need_pages = (size >> LG_PAGE);
486f921d10fSJason Evans 	assert(need_pages > 0);
487f921d10fSJason Evans 
488d0e79aa3SJason Evans 	if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize,
489d0e79aa3SJason Evans 	    run_ind << LG_PAGE, size, arena->ind))
490d0e79aa3SJason Evans 		return (true);
491f921d10fSJason Evans 
492d0e79aa3SJason Evans 	arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
493d0e79aa3SJason Evans 	    flag_decommitted, need_pages);
494d0e79aa3SJason Evans 
495d0e79aa3SJason Evans 	for (i = 0; i < need_pages; i++) {
496d0e79aa3SJason Evans 		size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk,
497d0e79aa3SJason Evans 		    run_ind+i);
498d0e79aa3SJason Evans 		arena_mapbits_small_set(chunk, run_ind+i, i, binind,
499d0e79aa3SJason Evans 		    flag_unzeroed);
500d0e79aa3SJason Evans 		if (config_debug && flag_dirty == 0 && flag_unzeroed == 0)
501f921d10fSJason Evans 			arena_run_page_validate_zeroed(chunk, run_ind+i);
50288ad2f8dSJason Evans 	}
503d0e79aa3SJason Evans 	JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
5042b06b201SJason Evans 	    (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
505d0e79aa3SJason Evans 	return (false);
506a4bd5210SJason Evans }
507a4bd5210SJason Evans 
508a4bd5210SJason Evans static arena_chunk_t *
509f921d10fSJason Evans arena_chunk_init_spare(arena_t *arena)
510a4bd5210SJason Evans {
511a4bd5210SJason Evans 	arena_chunk_t *chunk;
512a4bd5210SJason Evans 
513f921d10fSJason Evans 	assert(arena->spare != NULL);
514f921d10fSJason Evans 
515a4bd5210SJason Evans 	chunk = arena->spare;
516a4bd5210SJason Evans 	arena->spare = NULL;
517a4bd5210SJason Evans 
51835dad073SJason Evans 	assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
51935dad073SJason Evans 	assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
520e722f8f8SJason Evans 	assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
521d0e79aa3SJason Evans 	    arena_maxrun);
522f921d10fSJason Evans 	assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
523d0e79aa3SJason Evans 	    arena_maxrun);
524e722f8f8SJason Evans 	assert(arena_mapbits_dirty_get(chunk, map_bias) ==
525e722f8f8SJason Evans 	    arena_mapbits_dirty_get(chunk, chunk_npages-1));
526f921d10fSJason Evans 
527f921d10fSJason Evans 	return (chunk);
528f921d10fSJason Evans }
529f921d10fSJason Evans 
530d0e79aa3SJason Evans static bool
5311f0a49e8SJason Evans arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
5321f0a49e8SJason Evans     bool zero)
533d0e79aa3SJason Evans {
534d0e79aa3SJason Evans 
535d0e79aa3SJason Evans 	/*
536d0e79aa3SJason Evans 	 * The extent node notion of "committed" doesn't directly apply to
537d0e79aa3SJason Evans 	 * arena chunks.  Arbitrarily mark them as committed.  The commit state
538d0e79aa3SJason Evans 	 * of runs is tracked individually, and upon chunk deallocation the
539d0e79aa3SJason Evans 	 * entire chunk is in a consistent commit state.
540d0e79aa3SJason Evans 	 */
541d0e79aa3SJason Evans 	extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true);
542d0e79aa3SJason Evans 	extent_node_achunk_set(&chunk->node, true);
5431f0a49e8SJason Evans 	return (chunk_register(tsdn, chunk, &chunk->node));
544d0e79aa3SJason Evans }
545d0e79aa3SJason Evans 
546d0e79aa3SJason Evans static arena_chunk_t *
5471f0a49e8SJason Evans arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena,
5481f0a49e8SJason Evans     chunk_hooks_t *chunk_hooks, bool *zero, bool *commit)
549d0e79aa3SJason Evans {
550d0e79aa3SJason Evans 	arena_chunk_t *chunk;
551d0e79aa3SJason Evans 
5521f0a49e8SJason Evans 	malloc_mutex_unlock(tsdn, &arena->lock);
553d0e79aa3SJason Evans 
5541f0a49e8SJason Evans 	chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks,
5551f0a49e8SJason Evans 	    NULL, chunksize, chunksize, zero, commit);
556d0e79aa3SJason Evans 	if (chunk != NULL && !*commit) {
557d0e79aa3SJason Evans 		/* Commit header. */
558d0e79aa3SJason Evans 		if (chunk_hooks->commit(chunk, chunksize, 0, map_bias <<
559d0e79aa3SJason Evans 		    LG_PAGE, arena->ind)) {
5601f0a49e8SJason Evans 			chunk_dalloc_wrapper(tsdn, arena, chunk_hooks,
5611f0a49e8SJason Evans 			    (void *)chunk, chunksize, *zero, *commit);
562d0e79aa3SJason Evans 			chunk = NULL;
563d0e79aa3SJason Evans 		}
564d0e79aa3SJason Evans 	}
5651f0a49e8SJason Evans 	if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, *zero)) {
566d0e79aa3SJason Evans 		if (!*commit) {
567d0e79aa3SJason Evans 			/* Undo commit of header. */
568d0e79aa3SJason Evans 			chunk_hooks->decommit(chunk, chunksize, 0, map_bias <<
569d0e79aa3SJason Evans 			    LG_PAGE, arena->ind);
570d0e79aa3SJason Evans 		}
5711f0a49e8SJason Evans 		chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk,
5721f0a49e8SJason Evans 		    chunksize, *zero, *commit);
573d0e79aa3SJason Evans 		chunk = NULL;
574d0e79aa3SJason Evans 	}
575d0e79aa3SJason Evans 
5761f0a49e8SJason Evans 	malloc_mutex_lock(tsdn, &arena->lock);
577d0e79aa3SJason Evans 	return (chunk);
578d0e79aa3SJason Evans }
579d0e79aa3SJason Evans 
580d0e79aa3SJason Evans static arena_chunk_t *
5811f0a49e8SJason Evans arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero,
5821f0a49e8SJason Evans     bool *commit)
583d0e79aa3SJason Evans {
584d0e79aa3SJason Evans 	arena_chunk_t *chunk;
585d0e79aa3SJason Evans 	chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
586d0e79aa3SJason Evans 
5871f0a49e8SJason Evans 	chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize,
588*bde95144SJason Evans 	    chunksize, zero, commit, true);
589d0e79aa3SJason Evans 	if (chunk != NULL) {
5901f0a49e8SJason Evans 		if (arena_chunk_register(tsdn, arena, chunk, *zero)) {
5911f0a49e8SJason Evans 			chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk,
592d0e79aa3SJason Evans 			    chunksize, true);
593d0e79aa3SJason Evans 			return (NULL);
594d0e79aa3SJason Evans 		}
595d0e79aa3SJason Evans 	}
596d0e79aa3SJason Evans 	if (chunk == NULL) {
5971f0a49e8SJason Evans 		chunk = arena_chunk_alloc_internal_hard(tsdn, arena,
5981f0a49e8SJason Evans 		    &chunk_hooks, zero, commit);
599d0e79aa3SJason Evans 	}
600d0e79aa3SJason Evans 
601d0e79aa3SJason Evans 	if (config_stats && chunk != NULL) {
602d0e79aa3SJason Evans 		arena->stats.mapped += chunksize;
603d0e79aa3SJason Evans 		arena->stats.metadata_mapped += (map_bias << LG_PAGE);
604d0e79aa3SJason Evans 	}
605d0e79aa3SJason Evans 
606d0e79aa3SJason Evans 	return (chunk);
607d0e79aa3SJason Evans }
608d0e79aa3SJason Evans 
609f921d10fSJason Evans static arena_chunk_t *
6101f0a49e8SJason Evans arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena)
611f921d10fSJason Evans {
612f921d10fSJason Evans 	arena_chunk_t *chunk;
613d0e79aa3SJason Evans 	bool zero, commit;
614d0e79aa3SJason Evans 	size_t flag_unzeroed, flag_decommitted, i;
615f921d10fSJason Evans 
616f921d10fSJason Evans 	assert(arena->spare == NULL);
617a4bd5210SJason Evans 
618a4bd5210SJason Evans 	zero = false;
619d0e79aa3SJason Evans 	commit = false;
6201f0a49e8SJason Evans 	chunk = arena_chunk_alloc_internal(tsdn, arena, &zero, &commit);
621a4bd5210SJason Evans 	if (chunk == NULL)
622a4bd5210SJason Evans 		return (NULL);
62382872ac0SJason Evans 
624a4bd5210SJason Evans 	/*
625f921d10fSJason Evans 	 * Initialize the map to contain one maximal free untouched run.  Mark
6261f0a49e8SJason Evans 	 * the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed
6271f0a49e8SJason Evans 	 * or decommitted chunk.
628a4bd5210SJason Evans 	 */
629d0e79aa3SJason Evans 	flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED;
630d0e79aa3SJason Evans 	flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED;
631d0e79aa3SJason Evans 	arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun,
632d0e79aa3SJason Evans 	    flag_unzeroed | flag_decommitted);
633a4bd5210SJason Evans 	/*
634f921d10fSJason Evans 	 * There is no need to initialize the internal page map entries unless
635f921d10fSJason Evans 	 * the chunk is not zeroed.
636a4bd5210SJason Evans 	 */
637d0e79aa3SJason Evans 	if (!zero) {
638d0e79aa3SJason Evans 		JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(
6391f0a49e8SJason Evans 		    (void *)arena_bitselm_get_const(chunk, map_bias+1),
6401f0a49e8SJason Evans 		    (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
6411f0a49e8SJason Evans 		    chunk_npages-1) -
6421f0a49e8SJason Evans 		    (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
643a4bd5210SJason Evans 		for (i = map_bias+1; i < chunk_npages-1; i++)
644d0e79aa3SJason Evans 			arena_mapbits_internal_set(chunk, i, flag_unzeroed);
6452b06b201SJason Evans 	} else {
646d0e79aa3SJason Evans 		JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void
6471f0a49e8SJason Evans 		    *)arena_bitselm_get_const(chunk, map_bias+1),
6481f0a49e8SJason Evans 		    (size_t)((uintptr_t)arena_bitselm_get_const(chunk,
6491f0a49e8SJason Evans 		    chunk_npages-1) -
6501f0a49e8SJason Evans 		    (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1)));
6512b06b201SJason Evans 		if (config_debug) {
652e722f8f8SJason Evans 			for (i = map_bias+1; i < chunk_npages-1; i++) {
653f921d10fSJason Evans 				assert(arena_mapbits_unzeroed_get(chunk, i) ==
654d0e79aa3SJason Evans 				    flag_unzeroed);
6552b06b201SJason Evans 			}
656a4bd5210SJason Evans 		}
657e722f8f8SJason Evans 	}
658d0e79aa3SJason Evans 	arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun,
659d0e79aa3SJason Evans 	    flag_unzeroed);
660f921d10fSJason Evans 
661f921d10fSJason Evans 	return (chunk);
662a4bd5210SJason Evans }
663a4bd5210SJason Evans 
664f921d10fSJason Evans static arena_chunk_t *
6651f0a49e8SJason Evans arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena)
666f921d10fSJason Evans {
667f921d10fSJason Evans 	arena_chunk_t *chunk;
668f921d10fSJason Evans 
669f921d10fSJason Evans 	if (arena->spare != NULL)
670f921d10fSJason Evans 		chunk = arena_chunk_init_spare(arena);
6712fff27f8SJason Evans 	else {
6721f0a49e8SJason Evans 		chunk = arena_chunk_init_hard(tsdn, arena);
6732fff27f8SJason Evans 		if (chunk == NULL)
6742fff27f8SJason Evans 			return (NULL);
6752fff27f8SJason Evans 	}
676f921d10fSJason Evans 
6771f0a49e8SJason Evans 	ql_elm_new(&chunk->node, ql_link);
6781f0a49e8SJason Evans 	ql_tail_insert(&arena->achunks, &chunk->node, ql_link);
679d0e79aa3SJason Evans 	arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias);
68082872ac0SJason Evans 
681a4bd5210SJason Evans 	return (chunk);
682a4bd5210SJason Evans }
683a4bd5210SJason Evans 
684a4bd5210SJason Evans static void
6851f0a49e8SJason Evans arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
686a4bd5210SJason Evans {
6871f0a49e8SJason Evans 	bool committed;
6881f0a49e8SJason Evans 	chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
6891f0a49e8SJason Evans 
6901f0a49e8SJason Evans 	chunk_deregister(chunk, &chunk->node);
6911f0a49e8SJason Evans 
6921f0a49e8SJason Evans 	committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0);
6931f0a49e8SJason Evans 	if (!committed) {
6941f0a49e8SJason Evans 		/*
6951f0a49e8SJason Evans 		 * Decommit the header.  Mark the chunk as decommitted even if
6961f0a49e8SJason Evans 		 * header decommit fails, since treating a partially committed
6971f0a49e8SJason Evans 		 * chunk as committed has a high potential for causing later
6981f0a49e8SJason Evans 		 * access of decommitted memory.
6991f0a49e8SJason Evans 		 */
7001f0a49e8SJason Evans 		chunk_hooks = chunk_hooks_get(tsdn, arena);
7011f0a49e8SJason Evans 		chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE,
7021f0a49e8SJason Evans 		    arena->ind);
7031f0a49e8SJason Evans 	}
7041f0a49e8SJason Evans 
7051f0a49e8SJason Evans 	chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize,
7061f0a49e8SJason Evans 	    committed);
7071f0a49e8SJason Evans 
7081f0a49e8SJason Evans 	if (config_stats) {
7091f0a49e8SJason Evans 		arena->stats.mapped -= chunksize;
7101f0a49e8SJason Evans 		arena->stats.metadata_mapped -= (map_bias << LG_PAGE);
7111f0a49e8SJason Evans 	}
7121f0a49e8SJason Evans }
7131f0a49e8SJason Evans 
7141f0a49e8SJason Evans static void
7151f0a49e8SJason Evans arena_spare_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *spare)
7161f0a49e8SJason Evans {
7171f0a49e8SJason Evans 
7181f0a49e8SJason Evans 	assert(arena->spare != spare);
7191f0a49e8SJason Evans 
7201f0a49e8SJason Evans 	if (arena_mapbits_dirty_get(spare, map_bias) != 0) {
7211f0a49e8SJason Evans 		arena_run_dirty_remove(arena, spare, map_bias,
7221f0a49e8SJason Evans 		    chunk_npages-map_bias);
7231f0a49e8SJason Evans 	}
7241f0a49e8SJason Evans 
7251f0a49e8SJason Evans 	arena_chunk_discard(tsdn, arena, spare);
7261f0a49e8SJason Evans }
7271f0a49e8SJason Evans 
7281f0a49e8SJason Evans static void
7291f0a49e8SJason Evans arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk)
7301f0a49e8SJason Evans {
7311f0a49e8SJason Evans 	arena_chunk_t *spare;
732d0e79aa3SJason Evans 
73335dad073SJason Evans 	assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
73435dad073SJason Evans 	assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
73535dad073SJason Evans 	assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
736d0e79aa3SJason Evans 	    arena_maxrun);
73735dad073SJason Evans 	assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
738d0e79aa3SJason Evans 	    arena_maxrun);
73935dad073SJason Evans 	assert(arena_mapbits_dirty_get(chunk, map_bias) ==
74035dad073SJason Evans 	    arena_mapbits_dirty_get(chunk, chunk_npages-1));
741d0e79aa3SJason Evans 	assert(arena_mapbits_decommitted_get(chunk, map_bias) ==
742d0e79aa3SJason Evans 	    arena_mapbits_decommitted_get(chunk, chunk_npages-1));
74335dad073SJason Evans 
744df0d881dSJason Evans 	/* Remove run from runs_avail, so that the arena does not use it. */
745d0e79aa3SJason Evans 	arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias);
746a4bd5210SJason Evans 
7471f0a49e8SJason Evans 	ql_remove(&arena->achunks, &chunk->node, ql_link);
7481f0a49e8SJason Evans 	spare = arena->spare;
749a4bd5210SJason Evans 	arena->spare = chunk;
7501f0a49e8SJason Evans 	if (spare != NULL)
7511f0a49e8SJason Evans 		arena_spare_discard(tsdn, arena, spare);
752a4bd5210SJason Evans }
753a4bd5210SJason Evans 
754d0e79aa3SJason Evans static void
755d0e79aa3SJason Evans arena_huge_malloc_stats_update(arena_t *arena, size_t usize)
756d0e79aa3SJason Evans {
757536b3538SJason Evans 	szind_t index = size2index(usize) - nlclasses - NBINS;
758d0e79aa3SJason Evans 
759d0e79aa3SJason Evans 	cassert(config_stats);
760d0e79aa3SJason Evans 
761d0e79aa3SJason Evans 	arena->stats.nmalloc_huge++;
762d0e79aa3SJason Evans 	arena->stats.allocated_huge += usize;
763d0e79aa3SJason Evans 	arena->stats.hstats[index].nmalloc++;
764d0e79aa3SJason Evans 	arena->stats.hstats[index].curhchunks++;
765d0e79aa3SJason Evans }
766d0e79aa3SJason Evans 
767d0e79aa3SJason Evans static void
768d0e79aa3SJason Evans arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize)
769d0e79aa3SJason Evans {
770536b3538SJason Evans 	szind_t index = size2index(usize) - nlclasses - NBINS;
771d0e79aa3SJason Evans 
772d0e79aa3SJason Evans 	cassert(config_stats);
773d0e79aa3SJason Evans 
774d0e79aa3SJason Evans 	arena->stats.nmalloc_huge--;
775d0e79aa3SJason Evans 	arena->stats.allocated_huge -= usize;
776d0e79aa3SJason Evans 	arena->stats.hstats[index].nmalloc--;
777d0e79aa3SJason Evans 	arena->stats.hstats[index].curhchunks--;
778d0e79aa3SJason Evans }
779d0e79aa3SJason Evans 
780d0e79aa3SJason Evans static void
781d0e79aa3SJason Evans arena_huge_dalloc_stats_update(arena_t *arena, size_t usize)
782d0e79aa3SJason Evans {
783536b3538SJason Evans 	szind_t index = size2index(usize) - nlclasses - NBINS;
784d0e79aa3SJason Evans 
785d0e79aa3SJason Evans 	cassert(config_stats);
786d0e79aa3SJason Evans 
787d0e79aa3SJason Evans 	arena->stats.ndalloc_huge++;
788d0e79aa3SJason Evans 	arena->stats.allocated_huge -= usize;
789d0e79aa3SJason Evans 	arena->stats.hstats[index].ndalloc++;
790d0e79aa3SJason Evans 	arena->stats.hstats[index].curhchunks--;
791d0e79aa3SJason Evans }
792d0e79aa3SJason Evans 
793d0e79aa3SJason Evans static void
7941f0a49e8SJason Evans arena_huge_reset_stats_cancel(arena_t *arena, size_t usize)
7951f0a49e8SJason Evans {
7961f0a49e8SJason Evans 	szind_t index = size2index(usize) - nlclasses - NBINS;
7971f0a49e8SJason Evans 
7981f0a49e8SJason Evans 	cassert(config_stats);
7991f0a49e8SJason Evans 
8001f0a49e8SJason Evans 	arena->stats.ndalloc_huge++;
8011f0a49e8SJason Evans 	arena->stats.hstats[index].ndalloc--;
8021f0a49e8SJason Evans }
8031f0a49e8SJason Evans 
8041f0a49e8SJason Evans static void
805d0e79aa3SJason Evans arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize)
806d0e79aa3SJason Evans {
807536b3538SJason Evans 	szind_t index = size2index(usize) - nlclasses - NBINS;
808d0e79aa3SJason Evans 
809d0e79aa3SJason Evans 	cassert(config_stats);
810d0e79aa3SJason Evans 
811d0e79aa3SJason Evans 	arena->stats.ndalloc_huge--;
812d0e79aa3SJason Evans 	arena->stats.allocated_huge += usize;
813d0e79aa3SJason Evans 	arena->stats.hstats[index].ndalloc--;
814d0e79aa3SJason Evans 	arena->stats.hstats[index].curhchunks++;
815d0e79aa3SJason Evans }
816d0e79aa3SJason Evans 
817d0e79aa3SJason Evans static void
818d0e79aa3SJason Evans arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize)
819d0e79aa3SJason Evans {
820d0e79aa3SJason Evans 
821d0e79aa3SJason Evans 	arena_huge_dalloc_stats_update(arena, oldsize);
822d0e79aa3SJason Evans 	arena_huge_malloc_stats_update(arena, usize);
823d0e79aa3SJason Evans }
824d0e79aa3SJason Evans 
825d0e79aa3SJason Evans static void
826d0e79aa3SJason Evans arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize,
827d0e79aa3SJason Evans     size_t usize)
828d0e79aa3SJason Evans {
829d0e79aa3SJason Evans 
830d0e79aa3SJason Evans 	arena_huge_dalloc_stats_update_undo(arena, oldsize);
831d0e79aa3SJason Evans 	arena_huge_malloc_stats_update_undo(arena, usize);
832d0e79aa3SJason Evans }
833d0e79aa3SJason Evans 
834d0e79aa3SJason Evans extent_node_t *
8351f0a49e8SJason Evans arena_node_alloc(tsdn_t *tsdn, arena_t *arena)
836d0e79aa3SJason Evans {
837d0e79aa3SJason Evans 	extent_node_t *node;
838d0e79aa3SJason Evans 
8391f0a49e8SJason Evans 	malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
840d0e79aa3SJason Evans 	node = ql_last(&arena->node_cache, ql_link);
841d0e79aa3SJason Evans 	if (node == NULL) {
8421f0a49e8SJason Evans 		malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
8431f0a49e8SJason Evans 		return (base_alloc(tsdn, sizeof(extent_node_t)));
844d0e79aa3SJason Evans 	}
845d0e79aa3SJason Evans 	ql_tail_remove(&arena->node_cache, extent_node_t, ql_link);
8461f0a49e8SJason Evans 	malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
847d0e79aa3SJason Evans 	return (node);
848d0e79aa3SJason Evans }
849d0e79aa3SJason Evans 
850d0e79aa3SJason Evans void
8511f0a49e8SJason Evans arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node)
852d0e79aa3SJason Evans {
853d0e79aa3SJason Evans 
8541f0a49e8SJason Evans 	malloc_mutex_lock(tsdn, &arena->node_cache_mtx);
855d0e79aa3SJason Evans 	ql_elm_new(node, ql_link);
856d0e79aa3SJason Evans 	ql_tail_insert(&arena->node_cache, node, ql_link);
8571f0a49e8SJason Evans 	malloc_mutex_unlock(tsdn, &arena->node_cache_mtx);
858d0e79aa3SJason Evans }
859d0e79aa3SJason Evans 
860d0e79aa3SJason Evans static void *
8611f0a49e8SJason Evans arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena,
8621f0a49e8SJason Evans     chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero,
8631f0a49e8SJason Evans     size_t csize)
864d0e79aa3SJason Evans {
865d0e79aa3SJason Evans 	void *ret;
866d0e79aa3SJason Evans 	bool commit = true;
867d0e79aa3SJason Evans 
8681f0a49e8SJason Evans 	ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize,
8691f0a49e8SJason Evans 	    alignment, zero, &commit);
870d0e79aa3SJason Evans 	if (ret == NULL) {
871d0e79aa3SJason Evans 		/* Revert optimistic stats updates. */
8721f0a49e8SJason Evans 		malloc_mutex_lock(tsdn, &arena->lock);
873d0e79aa3SJason Evans 		if (config_stats) {
874d0e79aa3SJason Evans 			arena_huge_malloc_stats_update_undo(arena, usize);
875d0e79aa3SJason Evans 			arena->stats.mapped -= usize;
876d0e79aa3SJason Evans 		}
877df0d881dSJason Evans 		arena_nactive_sub(arena, usize >> LG_PAGE);
8781f0a49e8SJason Evans 		malloc_mutex_unlock(tsdn, &arena->lock);
879d0e79aa3SJason Evans 	}
880d0e79aa3SJason Evans 
881d0e79aa3SJason Evans 	return (ret);
882d0e79aa3SJason Evans }
883d0e79aa3SJason Evans 
884d0e79aa3SJason Evans void *
8851f0a49e8SJason Evans arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize,
8861f0a49e8SJason Evans     size_t alignment, bool *zero)
887d0e79aa3SJason Evans {
888d0e79aa3SJason Evans 	void *ret;
889d0e79aa3SJason Evans 	chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
890d0e79aa3SJason Evans 	size_t csize = CHUNK_CEILING(usize);
891*bde95144SJason Evans 	bool commit = true;
892d0e79aa3SJason Evans 
8931f0a49e8SJason Evans 	malloc_mutex_lock(tsdn, &arena->lock);
894d0e79aa3SJason Evans 
895d0e79aa3SJason Evans 	/* Optimistically update stats. */
896d0e79aa3SJason Evans 	if (config_stats) {
897d0e79aa3SJason Evans 		arena_huge_malloc_stats_update(arena, usize);
898d0e79aa3SJason Evans 		arena->stats.mapped += usize;
899d0e79aa3SJason Evans 	}
900df0d881dSJason Evans 	arena_nactive_add(arena, usize >> LG_PAGE);
901d0e79aa3SJason Evans 
9021f0a49e8SJason Evans 	ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize,
903*bde95144SJason Evans 	    alignment, zero, &commit, true);
9041f0a49e8SJason Evans 	malloc_mutex_unlock(tsdn, &arena->lock);
905d0e79aa3SJason Evans 	if (ret == NULL) {
9061f0a49e8SJason Evans 		ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks,
9071f0a49e8SJason Evans 		    usize, alignment, zero, csize);
908d0e79aa3SJason Evans 	}
909d0e79aa3SJason Evans 
910d0e79aa3SJason Evans 	return (ret);
911d0e79aa3SJason Evans }
912d0e79aa3SJason Evans 
913d0e79aa3SJason Evans void
9141f0a49e8SJason Evans arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize)
915d0e79aa3SJason Evans {
916d0e79aa3SJason Evans 	chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
917d0e79aa3SJason Evans 	size_t csize;
918d0e79aa3SJason Evans 
919d0e79aa3SJason Evans 	csize = CHUNK_CEILING(usize);
9201f0a49e8SJason Evans 	malloc_mutex_lock(tsdn, &arena->lock);
921d0e79aa3SJason Evans 	if (config_stats) {
922d0e79aa3SJason Evans 		arena_huge_dalloc_stats_update(arena, usize);
923d0e79aa3SJason Evans 		arena->stats.mapped -= usize;
924d0e79aa3SJason Evans 	}
925df0d881dSJason Evans 	arena_nactive_sub(arena, usize >> LG_PAGE);
926d0e79aa3SJason Evans 
9271f0a49e8SJason Evans 	chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, true);
9281f0a49e8SJason Evans 	malloc_mutex_unlock(tsdn, &arena->lock);
929d0e79aa3SJason Evans }
930d0e79aa3SJason Evans 
931d0e79aa3SJason Evans void
9321f0a49e8SJason Evans arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk,
9331f0a49e8SJason Evans     size_t oldsize, size_t usize)
934d0e79aa3SJason Evans {
935d0e79aa3SJason Evans 
936d0e79aa3SJason Evans 	assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize));
937d0e79aa3SJason Evans 	assert(oldsize != usize);
938d0e79aa3SJason Evans 
9391f0a49e8SJason Evans 	malloc_mutex_lock(tsdn, &arena->lock);
940d0e79aa3SJason Evans 	if (config_stats)
941d0e79aa3SJason Evans 		arena_huge_ralloc_stats_update(arena, oldsize, usize);
942df0d881dSJason Evans 	if (oldsize < usize)
943df0d881dSJason Evans 		arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE);
944df0d881dSJason Evans 	else
945df0d881dSJason Evans 		arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE);
9461f0a49e8SJason Evans 	malloc_mutex_unlock(tsdn, &arena->lock);
947d0e79aa3SJason Evans }
948d0e79aa3SJason Evans 
949d0e79aa3SJason Evans void
9501f0a49e8SJason Evans arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk,
9511f0a49e8SJason Evans     size_t oldsize, size_t usize)
952d0e79aa3SJason Evans {
953d0e79aa3SJason Evans 	size_t udiff = oldsize - usize;
954d0e79aa3SJason Evans 	size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize);
955d0e79aa3SJason Evans 
9561f0a49e8SJason Evans 	malloc_mutex_lock(tsdn, &arena->lock);
957d0e79aa3SJason Evans 	if (config_stats) {
958d0e79aa3SJason Evans 		arena_huge_ralloc_stats_update(arena, oldsize, usize);
959df0d881dSJason Evans 		if (cdiff != 0)
960d0e79aa3SJason Evans 			arena->stats.mapped -= cdiff;
961d0e79aa3SJason Evans 	}
962df0d881dSJason Evans 	arena_nactive_sub(arena, udiff >> LG_PAGE);
963d0e79aa3SJason Evans 
964d0e79aa3SJason Evans 	if (cdiff != 0) {
965d0e79aa3SJason Evans 		chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER;
966d0e79aa3SJason Evans 		void *nchunk = (void *)((uintptr_t)chunk +
967d0e79aa3SJason Evans 		    CHUNK_CEILING(usize));
968d0e79aa3SJason Evans 
9691f0a49e8SJason Evans 		chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
9701f0a49e8SJason Evans 		    true);
971d0e79aa3SJason Evans 	}
9721f0a49e8SJason Evans 	malloc_mutex_unlock(tsdn, &arena->lock);
973d0e79aa3SJason Evans }
974d0e79aa3SJason Evans 
975d0e79aa3SJason Evans static bool
9761f0a49e8SJason Evans arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena,
9771f0a49e8SJason Evans     chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize,
9781f0a49e8SJason Evans     bool *zero, void *nchunk, size_t udiff, size_t cdiff)
979d0e79aa3SJason Evans {
980d0e79aa3SJason Evans 	bool err;
981d0e79aa3SJason Evans 	bool commit = true;
982d0e79aa3SJason Evans 
9831f0a49e8SJason Evans 	err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
9841f0a49e8SJason Evans 	    chunksize, zero, &commit) == NULL);
985d0e79aa3SJason Evans 	if (err) {
986d0e79aa3SJason Evans 		/* Revert optimistic stats updates. */
9871f0a49e8SJason Evans 		malloc_mutex_lock(tsdn, &arena->lock);
988d0e79aa3SJason Evans 		if (config_stats) {
989d0e79aa3SJason Evans 			arena_huge_ralloc_stats_update_undo(arena, oldsize,
990d0e79aa3SJason Evans 			    usize);
991d0e79aa3SJason Evans 			arena->stats.mapped -= cdiff;
992d0e79aa3SJason Evans 		}
993df0d881dSJason Evans 		arena_nactive_sub(arena, udiff >> LG_PAGE);
9941f0a49e8SJason Evans 		malloc_mutex_unlock(tsdn, &arena->lock);
995d0e79aa3SJason Evans 	} else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk,
996d0e79aa3SJason Evans 	    cdiff, true, arena->ind)) {
9971f0a49e8SJason Evans 		chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff,
9981f0a49e8SJason Evans 		    *zero, true);
999d0e79aa3SJason Evans 		err = true;
1000d0e79aa3SJason Evans 	}
1001d0e79aa3SJason Evans 	return (err);
1002d0e79aa3SJason Evans }
1003d0e79aa3SJason Evans 
1004d0e79aa3SJason Evans bool
10051f0a49e8SJason Evans arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk,
10061f0a49e8SJason Evans     size_t oldsize, size_t usize, bool *zero)
1007d0e79aa3SJason Evans {
1008d0e79aa3SJason Evans 	bool err;
10091f0a49e8SJason Evans 	chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
1010d0e79aa3SJason Evans 	void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize));
1011d0e79aa3SJason Evans 	size_t udiff = usize - oldsize;
1012d0e79aa3SJason Evans 	size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize);
1013*bde95144SJason Evans 	bool commit = true;
1014d0e79aa3SJason Evans 
10151f0a49e8SJason Evans 	malloc_mutex_lock(tsdn, &arena->lock);
1016d0e79aa3SJason Evans 
1017d0e79aa3SJason Evans 	/* Optimistically update stats. */
1018d0e79aa3SJason Evans 	if (config_stats) {
1019d0e79aa3SJason Evans 		arena_huge_ralloc_stats_update(arena, oldsize, usize);
1020d0e79aa3SJason Evans 		arena->stats.mapped += cdiff;
1021d0e79aa3SJason Evans 	}
1022df0d881dSJason Evans 	arena_nactive_add(arena, udiff >> LG_PAGE);
1023d0e79aa3SJason Evans 
10241f0a49e8SJason Evans 	err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff,
1025*bde95144SJason Evans 	    chunksize, zero, &commit, true) == NULL);
10261f0a49e8SJason Evans 	malloc_mutex_unlock(tsdn, &arena->lock);
1027d0e79aa3SJason Evans 	if (err) {
10281f0a49e8SJason Evans 		err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena,
10291f0a49e8SJason Evans 		    &chunk_hooks, chunk, oldsize, usize, zero, nchunk, udiff,
1030d0e79aa3SJason Evans 		    cdiff);
1031d0e79aa3SJason Evans 	} else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk,
1032d0e79aa3SJason Evans 	    cdiff, true, arena->ind)) {
10331f0a49e8SJason Evans 		chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff,
10341f0a49e8SJason Evans 		    *zero, true);
1035d0e79aa3SJason Evans 		err = true;
1036d0e79aa3SJason Evans 	}
1037d0e79aa3SJason Evans 
1038d0e79aa3SJason Evans 	return (err);
1039d0e79aa3SJason Evans }
1040d0e79aa3SJason Evans 
1041d0e79aa3SJason Evans /*
1042d0e79aa3SJason Evans  * Do first-best-fit run selection, i.e. select the lowest run that best fits.
1043df0d881dSJason Evans  * Run sizes are indexed, so not all candidate runs are necessarily exactly the
1044df0d881dSJason Evans  * same size.
1045d0e79aa3SJason Evans  */
1046d0e79aa3SJason Evans static arena_run_t *
1047d0e79aa3SJason Evans arena_run_first_best_fit(arena_t *arena, size_t size)
1048d0e79aa3SJason Evans {
1049*bde95144SJason Evans 	pszind_t pind, i;
1050df0d881dSJason Evans 
1051*bde95144SJason Evans 	pind = psz2ind(run_quantize_ceil(size));
1052*bde95144SJason Evans 
1053*bde95144SJason Evans 	for (i = pind; pind2sz(i) <= chunksize; i++) {
10541f0a49e8SJason Evans 		arena_chunk_map_misc_t *miscelm = arena_run_heap_first(
1055*bde95144SJason Evans 		    &arena->runs_avail[i]);
1056df0d881dSJason Evans 		if (miscelm != NULL)
1057d0e79aa3SJason Evans 			return (&miscelm->run);
1058d0e79aa3SJason Evans 	}
1059d0e79aa3SJason Evans 
1060df0d881dSJason Evans 	return (NULL);
1061df0d881dSJason Evans }
1062df0d881dSJason Evans 
1063a4bd5210SJason Evans static arena_run_t *
1064f921d10fSJason Evans arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
1065a4bd5210SJason Evans {
1066*bde95144SJason Evans 	arena_run_t *run = arena_run_first_best_fit(arena, size);
1067d0e79aa3SJason Evans 	if (run != NULL) {
1068d0e79aa3SJason Evans 		if (arena_run_split_large(arena, run, size, zero))
1069d0e79aa3SJason Evans 			run = NULL;
1070a4bd5210SJason Evans 	}
1071d0e79aa3SJason Evans 	return (run);
107235dad073SJason Evans }
107335dad073SJason Evans 
107435dad073SJason Evans static arena_run_t *
10751f0a49e8SJason Evans arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero)
107635dad073SJason Evans {
107735dad073SJason Evans 	arena_chunk_t *chunk;
107835dad073SJason Evans 	arena_run_t *run;
107935dad073SJason Evans 
1080d0e79aa3SJason Evans 	assert(size <= arena_maxrun);
1081d0e79aa3SJason Evans 	assert(size == PAGE_CEILING(size));
108235dad073SJason Evans 
108335dad073SJason Evans 	/* Search the arena's chunks for the lowest best fit. */
1084f921d10fSJason Evans 	run = arena_run_alloc_large_helper(arena, size, zero);
108535dad073SJason Evans 	if (run != NULL)
108635dad073SJason Evans 		return (run);
108735dad073SJason Evans 
1088a4bd5210SJason Evans 	/*
1089a4bd5210SJason Evans 	 * No usable runs.  Create a new chunk from which to allocate the run.
1090a4bd5210SJason Evans 	 */
10911f0a49e8SJason Evans 	chunk = arena_chunk_alloc(tsdn, arena);
1092a4bd5210SJason Evans 	if (chunk != NULL) {
10931f0a49e8SJason Evans 		run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
1094d0e79aa3SJason Evans 		if (arena_run_split_large(arena, run, size, zero))
1095d0e79aa3SJason Evans 			run = NULL;
1096a4bd5210SJason Evans 		return (run);
1097a4bd5210SJason Evans 	}
1098a4bd5210SJason Evans 
1099a4bd5210SJason Evans 	/*
1100a4bd5210SJason Evans 	 * arena_chunk_alloc() failed, but another thread may have made
1101a4bd5210SJason Evans 	 * sufficient memory available while this one dropped arena->lock in
1102a4bd5210SJason Evans 	 * arena_chunk_alloc(), so search one more time.
1103a4bd5210SJason Evans 	 */
1104f921d10fSJason Evans 	return (arena_run_alloc_large_helper(arena, size, zero));
1105f921d10fSJason Evans }
1106f921d10fSJason Evans 
1107f921d10fSJason Evans static arena_run_t *
1108536b3538SJason Evans arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind)
1109f921d10fSJason Evans {
1110d0e79aa3SJason Evans 	arena_run_t *run = arena_run_first_best_fit(arena, size);
1111d0e79aa3SJason Evans 	if (run != NULL) {
1112d0e79aa3SJason Evans 		if (arena_run_split_small(arena, run, size, binind))
1113d0e79aa3SJason Evans 			run = NULL;
1114d0e79aa3SJason Evans 	}
1115f921d10fSJason Evans 	return (run);
1116f921d10fSJason Evans }
1117f921d10fSJason Evans 
1118f921d10fSJason Evans static arena_run_t *
11191f0a49e8SJason Evans arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind)
1120f921d10fSJason Evans {
1121f921d10fSJason Evans 	arena_chunk_t *chunk;
1122f921d10fSJason Evans 	arena_run_t *run;
1123f921d10fSJason Evans 
1124d0e79aa3SJason Evans 	assert(size <= arena_maxrun);
1125d0e79aa3SJason Evans 	assert(size == PAGE_CEILING(size));
1126f921d10fSJason Evans 	assert(binind != BININD_INVALID);
1127f921d10fSJason Evans 
1128f921d10fSJason Evans 	/* Search the arena's chunks for the lowest best fit. */
1129f921d10fSJason Evans 	run = arena_run_alloc_small_helper(arena, size, binind);
1130f921d10fSJason Evans 	if (run != NULL)
1131f921d10fSJason Evans 		return (run);
1132f921d10fSJason Evans 
1133f921d10fSJason Evans 	/*
1134f921d10fSJason Evans 	 * No usable runs.  Create a new chunk from which to allocate the run.
1135f921d10fSJason Evans 	 */
11361f0a49e8SJason Evans 	chunk = arena_chunk_alloc(tsdn, arena);
1137f921d10fSJason Evans 	if (chunk != NULL) {
11381f0a49e8SJason Evans 		run = &arena_miscelm_get_mutable(chunk, map_bias)->run;
1139d0e79aa3SJason Evans 		if (arena_run_split_small(arena, run, size, binind))
1140d0e79aa3SJason Evans 			run = NULL;
1141f921d10fSJason Evans 		return (run);
1142f921d10fSJason Evans 	}
1143f921d10fSJason Evans 
1144f921d10fSJason Evans 	/*
1145f921d10fSJason Evans 	 * arena_chunk_alloc() failed, but another thread may have made
1146f921d10fSJason Evans 	 * sufficient memory available while this one dropped arena->lock in
1147f921d10fSJason Evans 	 * arena_chunk_alloc(), so search one more time.
1148f921d10fSJason Evans 	 */
1149f921d10fSJason Evans 	return (arena_run_alloc_small_helper(arena, size, binind));
1150a4bd5210SJason Evans }
1151a4bd5210SJason Evans 
1152d0e79aa3SJason Evans static bool
1153d0e79aa3SJason Evans arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult)
1154d0e79aa3SJason Evans {
1155d0e79aa3SJason Evans 
1156d0e79aa3SJason Evans 	return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t)
1157d0e79aa3SJason Evans 	    << 3));
1158d0e79aa3SJason Evans }
1159d0e79aa3SJason Evans 
1160d0e79aa3SJason Evans ssize_t
11611f0a49e8SJason Evans arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena)
1162d0e79aa3SJason Evans {
1163d0e79aa3SJason Evans 	ssize_t lg_dirty_mult;
1164d0e79aa3SJason Evans 
11651f0a49e8SJason Evans 	malloc_mutex_lock(tsdn, &arena->lock);
1166d0e79aa3SJason Evans 	lg_dirty_mult = arena->lg_dirty_mult;
11671f0a49e8SJason Evans 	malloc_mutex_unlock(tsdn, &arena->lock);
1168d0e79aa3SJason Evans 
1169d0e79aa3SJason Evans 	return (lg_dirty_mult);
1170d0e79aa3SJason Evans }
1171d0e79aa3SJason Evans 
1172d0e79aa3SJason Evans bool
11731f0a49e8SJason Evans arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, ssize_t lg_dirty_mult)
1174d0e79aa3SJason Evans {
1175d0e79aa3SJason Evans 
1176d0e79aa3SJason Evans 	if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
1177d0e79aa3SJason Evans 		return (true);
1178d0e79aa3SJason Evans 
11791f0a49e8SJason Evans 	malloc_mutex_lock(tsdn, &arena->lock);
1180d0e79aa3SJason Evans 	arena->lg_dirty_mult = lg_dirty_mult;
11811f0a49e8SJason Evans 	arena_maybe_purge(tsdn, arena);
11821f0a49e8SJason Evans 	malloc_mutex_unlock(tsdn, &arena->lock);
1183d0e79aa3SJason Evans 
1184d0e79aa3SJason Evans 	return (false);
1185d0e79aa3SJason Evans }
1186d0e79aa3SJason Evans 
1187df0d881dSJason Evans static void
1188df0d881dSJason Evans arena_decay_deadline_init(arena_t *arena)
1189a4bd5210SJason Evans {
1190a4bd5210SJason Evans 
1191df0d881dSJason Evans 	assert(opt_purge == purge_mode_decay);
1192df0d881dSJason Evans 
1193df0d881dSJason Evans 	/*
1194df0d881dSJason Evans 	 * Generate a new deadline that is uniformly random within the next
1195df0d881dSJason Evans 	 * epoch after the current one.
1196df0d881dSJason Evans 	 */
1197*bde95144SJason Evans 	nstime_copy(&arena->decay.deadline, &arena->decay.epoch);
1198*bde95144SJason Evans 	nstime_add(&arena->decay.deadline, &arena->decay.interval);
1199*bde95144SJason Evans 	if (arena->decay.time > 0) {
1200df0d881dSJason Evans 		nstime_t jitter;
1201df0d881dSJason Evans 
1202*bde95144SJason Evans 		nstime_init(&jitter, prng_range_u64(&arena->decay.jitter_state,
1203*bde95144SJason Evans 		    nstime_ns(&arena->decay.interval)));
1204*bde95144SJason Evans 		nstime_add(&arena->decay.deadline, &jitter);
1205df0d881dSJason Evans 	}
1206df0d881dSJason Evans }
1207df0d881dSJason Evans 
1208df0d881dSJason Evans static bool
1209df0d881dSJason Evans arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time)
1210df0d881dSJason Evans {
1211df0d881dSJason Evans 
1212df0d881dSJason Evans 	assert(opt_purge == purge_mode_decay);
1213df0d881dSJason Evans 
1214*bde95144SJason Evans 	return (nstime_compare(&arena->decay.deadline, time) <= 0);
1215df0d881dSJason Evans }
1216df0d881dSJason Evans 
1217df0d881dSJason Evans static size_t
1218df0d881dSJason Evans arena_decay_backlog_npages_limit(const arena_t *arena)
1219df0d881dSJason Evans {
1220df0d881dSJason Evans 	static const uint64_t h_steps[] = {
1221df0d881dSJason Evans #define	STEP(step, h, x, y) \
1222df0d881dSJason Evans 		h,
1223df0d881dSJason Evans 		SMOOTHSTEP
1224df0d881dSJason Evans #undef STEP
1225df0d881dSJason Evans 	};
1226df0d881dSJason Evans 	uint64_t sum;
1227df0d881dSJason Evans 	size_t npages_limit_backlog;
1228df0d881dSJason Evans 	unsigned i;
1229df0d881dSJason Evans 
1230df0d881dSJason Evans 	assert(opt_purge == purge_mode_decay);
1231df0d881dSJason Evans 
1232df0d881dSJason Evans 	/*
1233df0d881dSJason Evans 	 * For each element of decay_backlog, multiply by the corresponding
1234df0d881dSJason Evans 	 * fixed-point smoothstep decay factor.  Sum the products, then divide
1235df0d881dSJason Evans 	 * to round down to the nearest whole number of pages.
1236df0d881dSJason Evans 	 */
1237df0d881dSJason Evans 	sum = 0;
1238df0d881dSJason Evans 	for (i = 0; i < SMOOTHSTEP_NSTEPS; i++)
1239*bde95144SJason Evans 		sum += arena->decay.backlog[i] * h_steps[i];
12401f0a49e8SJason Evans 	npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP);
1241df0d881dSJason Evans 
1242df0d881dSJason Evans 	return (npages_limit_backlog);
1243df0d881dSJason Evans }
1244df0d881dSJason Evans 
1245df0d881dSJason Evans static void
1246*bde95144SJason Evans arena_decay_backlog_update_last(arena_t *arena)
1247df0d881dSJason Evans {
1248*bde95144SJason Evans 	size_t ndirty_delta = (arena->ndirty > arena->decay.ndirty) ?
1249*bde95144SJason Evans 	    arena->ndirty - arena->decay.ndirty : 0;
1250*bde95144SJason Evans 	arena->decay.backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta;
1251*bde95144SJason Evans }
1252df0d881dSJason Evans 
1253*bde95144SJason Evans static void
1254*bde95144SJason Evans arena_decay_backlog_update(arena_t *arena, uint64_t nadvance_u64)
1255*bde95144SJason Evans {
1256df0d881dSJason Evans 
12571f0a49e8SJason Evans 	if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) {
1258*bde95144SJason Evans 		memset(arena->decay.backlog, 0, (SMOOTHSTEP_NSTEPS-1) *
1259df0d881dSJason Evans 		    sizeof(size_t));
1260df0d881dSJason Evans 	} else {
12611f0a49e8SJason Evans 		size_t nadvance_z = (size_t)nadvance_u64;
12621f0a49e8SJason Evans 
12631f0a49e8SJason Evans 		assert((uint64_t)nadvance_z == nadvance_u64);
12641f0a49e8SJason Evans 
1265*bde95144SJason Evans 		memmove(arena->decay.backlog, &arena->decay.backlog[nadvance_z],
12661f0a49e8SJason Evans 		    (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t));
12671f0a49e8SJason Evans 		if (nadvance_z > 1) {
1268*bde95144SJason Evans 			memset(&arena->decay.backlog[SMOOTHSTEP_NSTEPS -
12691f0a49e8SJason Evans 			    nadvance_z], 0, (nadvance_z-1) * sizeof(size_t));
1270df0d881dSJason Evans 		}
1271df0d881dSJason Evans 	}
1272*bde95144SJason Evans 
1273*bde95144SJason Evans 	arena_decay_backlog_update_last(arena);
1274df0d881dSJason Evans }
1275df0d881dSJason Evans 
1276*bde95144SJason Evans static void
1277*bde95144SJason Evans arena_decay_epoch_advance_helper(arena_t *arena, const nstime_t *time)
1278df0d881dSJason Evans {
1279*bde95144SJason Evans 	uint64_t nadvance_u64;
1280*bde95144SJason Evans 	nstime_t delta;
1281df0d881dSJason Evans 
1282df0d881dSJason Evans 	assert(opt_purge == purge_mode_decay);
1283*bde95144SJason Evans 	assert(arena_decay_deadline_reached(arena, time));
1284df0d881dSJason Evans 
1285*bde95144SJason Evans 	nstime_copy(&delta, time);
1286*bde95144SJason Evans 	nstime_subtract(&delta, &arena->decay.epoch);
1287*bde95144SJason Evans 	nadvance_u64 = nstime_divide(&delta, &arena->decay.interval);
1288*bde95144SJason Evans 	assert(nadvance_u64 > 0);
1289df0d881dSJason Evans 
1290*bde95144SJason Evans 	/* Add nadvance_u64 decay intervals to epoch. */
1291*bde95144SJason Evans 	nstime_copy(&delta, &arena->decay.interval);
1292*bde95144SJason Evans 	nstime_imultiply(&delta, nadvance_u64);
1293*bde95144SJason Evans 	nstime_add(&arena->decay.epoch, &delta);
1294df0d881dSJason Evans 
1295*bde95144SJason Evans 	/* Set a new deadline. */
1296*bde95144SJason Evans 	arena_decay_deadline_init(arena);
1297*bde95144SJason Evans 
1298*bde95144SJason Evans 	/* Update the backlog. */
1299*bde95144SJason Evans 	arena_decay_backlog_update(arena, nadvance_u64);
1300*bde95144SJason Evans }
1301*bde95144SJason Evans 
1302*bde95144SJason Evans static void
1303*bde95144SJason Evans arena_decay_epoch_advance_purge(tsdn_t *tsdn, arena_t *arena)
1304*bde95144SJason Evans {
1305*bde95144SJason Evans 	size_t ndirty_limit = arena_decay_backlog_npages_limit(arena);
1306*bde95144SJason Evans 
1307*bde95144SJason Evans 	if (arena->ndirty > ndirty_limit)
1308*bde95144SJason Evans 		arena_purge_to_limit(tsdn, arena, ndirty_limit);
1309*bde95144SJason Evans 	arena->decay.ndirty = arena->ndirty;
1310*bde95144SJason Evans }
1311*bde95144SJason Evans 
1312*bde95144SJason Evans static void
1313*bde95144SJason Evans arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, const nstime_t *time)
1314*bde95144SJason Evans {
1315*bde95144SJason Evans 
1316*bde95144SJason Evans 	arena_decay_epoch_advance_helper(arena, time);
1317*bde95144SJason Evans 	arena_decay_epoch_advance_purge(tsdn, arena);
1318df0d881dSJason Evans }
1319df0d881dSJason Evans 
1320df0d881dSJason Evans static void
1321df0d881dSJason Evans arena_decay_init(arena_t *arena, ssize_t decay_time)
1322df0d881dSJason Evans {
1323df0d881dSJason Evans 
1324*bde95144SJason Evans 	arena->decay.time = decay_time;
1325df0d881dSJason Evans 	if (decay_time > 0) {
1326*bde95144SJason Evans 		nstime_init2(&arena->decay.interval, decay_time, 0);
1327*bde95144SJason Evans 		nstime_idivide(&arena->decay.interval, SMOOTHSTEP_NSTEPS);
1328df0d881dSJason Evans 	}
1329df0d881dSJason Evans 
1330*bde95144SJason Evans 	nstime_init(&arena->decay.epoch, 0);
1331*bde95144SJason Evans 	nstime_update(&arena->decay.epoch);
1332*bde95144SJason Evans 	arena->decay.jitter_state = (uint64_t)(uintptr_t)arena;
1333df0d881dSJason Evans 	arena_decay_deadline_init(arena);
1334*bde95144SJason Evans 	arena->decay.ndirty = arena->ndirty;
1335*bde95144SJason Evans 	memset(arena->decay.backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t));
1336df0d881dSJason Evans }
1337df0d881dSJason Evans 
1338df0d881dSJason Evans static bool
1339df0d881dSJason Evans arena_decay_time_valid(ssize_t decay_time)
1340df0d881dSJason Evans {
1341df0d881dSJason Evans 
1342cbc3697dSJason Evans 	if (decay_time < -1)
1343cbc3697dSJason Evans 		return (false);
1344cbc3697dSJason Evans 	if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX)
1345cbc3697dSJason Evans 		return (true);
1346cbc3697dSJason Evans 	return (false);
1347df0d881dSJason Evans }
1348df0d881dSJason Evans 
1349df0d881dSJason Evans ssize_t
13501f0a49e8SJason Evans arena_decay_time_get(tsdn_t *tsdn, arena_t *arena)
1351df0d881dSJason Evans {
1352df0d881dSJason Evans 	ssize_t decay_time;
1353df0d881dSJason Evans 
13541f0a49e8SJason Evans 	malloc_mutex_lock(tsdn, &arena->lock);
1355*bde95144SJason Evans 	decay_time = arena->decay.time;
13561f0a49e8SJason Evans 	malloc_mutex_unlock(tsdn, &arena->lock);
1357df0d881dSJason Evans 
1358df0d881dSJason Evans 	return (decay_time);
1359df0d881dSJason Evans }
1360df0d881dSJason Evans 
1361df0d881dSJason Evans bool
13621f0a49e8SJason Evans arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time)
1363df0d881dSJason Evans {
1364df0d881dSJason Evans 
1365df0d881dSJason Evans 	if (!arena_decay_time_valid(decay_time))
1366df0d881dSJason Evans 		return (true);
1367df0d881dSJason Evans 
13681f0a49e8SJason Evans 	malloc_mutex_lock(tsdn, &arena->lock);
1369df0d881dSJason Evans 	/*
1370df0d881dSJason Evans 	 * Restart decay backlog from scratch, which may cause many dirty pages
1371df0d881dSJason Evans 	 * to be immediately purged.  It would conceptually be possible to map
1372df0d881dSJason Evans 	 * the old backlog onto the new backlog, but there is no justification
1373df0d881dSJason Evans 	 * for such complexity since decay_time changes are intended to be
1374df0d881dSJason Evans 	 * infrequent, either between the {-1, 0, >0} states, or a one-time
1375df0d881dSJason Evans 	 * arbitrary change during initial arena configuration.
1376df0d881dSJason Evans 	 */
1377df0d881dSJason Evans 	arena_decay_init(arena, decay_time);
13781f0a49e8SJason Evans 	arena_maybe_purge(tsdn, arena);
13791f0a49e8SJason Evans 	malloc_mutex_unlock(tsdn, &arena->lock);
1380df0d881dSJason Evans 
1381df0d881dSJason Evans 	return (false);
1382df0d881dSJason Evans }
1383df0d881dSJason Evans 
1384df0d881dSJason Evans static void
13851f0a49e8SJason Evans arena_maybe_purge_ratio(tsdn_t *tsdn, arena_t *arena)
1386df0d881dSJason Evans {
1387df0d881dSJason Evans 
1388df0d881dSJason Evans 	assert(opt_purge == purge_mode_ratio);
1389df0d881dSJason Evans 
139082872ac0SJason Evans 	/* Don't purge if the option is disabled. */
1391d0e79aa3SJason Evans 	if (arena->lg_dirty_mult < 0)
139282872ac0SJason Evans 		return;
1393df0d881dSJason Evans 
1394d0e79aa3SJason Evans 	/*
1395d0e79aa3SJason Evans 	 * Iterate, since preventing recursive purging could otherwise leave too
1396d0e79aa3SJason Evans 	 * many dirty pages.
1397d0e79aa3SJason Evans 	 */
1398d0e79aa3SJason Evans 	while (true) {
1399d0e79aa3SJason Evans 		size_t threshold = (arena->nactive >> arena->lg_dirty_mult);
1400d0e79aa3SJason Evans 		if (threshold < chunk_npages)
1401d0e79aa3SJason Evans 			threshold = chunk_npages;
140282872ac0SJason Evans 		/*
140382872ac0SJason Evans 		 * Don't purge unless the number of purgeable pages exceeds the
140482872ac0SJason Evans 		 * threshold.
140582872ac0SJason Evans 		 */
1406d0e79aa3SJason Evans 		if (arena->ndirty <= threshold)
140782872ac0SJason Evans 			return;
14081f0a49e8SJason Evans 		arena_purge_to_limit(tsdn, arena, threshold);
1409a4bd5210SJason Evans 	}
1410f921d10fSJason Evans }
1411f921d10fSJason Evans 
1412df0d881dSJason Evans static void
14131f0a49e8SJason Evans arena_maybe_purge_decay(tsdn_t *tsdn, arena_t *arena)
1414df0d881dSJason Evans {
1415df0d881dSJason Evans 	nstime_t time;
1416df0d881dSJason Evans 
1417df0d881dSJason Evans 	assert(opt_purge == purge_mode_decay);
1418df0d881dSJason Evans 
1419df0d881dSJason Evans 	/* Purge all or nothing if the option is disabled. */
1420*bde95144SJason Evans 	if (arena->decay.time <= 0) {
1421*bde95144SJason Evans 		if (arena->decay.time == 0)
14221f0a49e8SJason Evans 			arena_purge_to_limit(tsdn, arena, 0);
1423df0d881dSJason Evans 		return;
1424df0d881dSJason Evans 	}
1425df0d881dSJason Evans 
1426*bde95144SJason Evans 	nstime_init(&time, 0);
1427*bde95144SJason Evans 	nstime_update(&time);
1428*bde95144SJason Evans 	if (unlikely(!nstime_monotonic() && nstime_compare(&arena->decay.epoch,
1429*bde95144SJason Evans 	    &time) > 0)) {
1430*bde95144SJason Evans 		/*
1431*bde95144SJason Evans 		 * Time went backwards.  Move the epoch back in time and
1432*bde95144SJason Evans 		 * generate a new deadline, with the expectation that time
1433*bde95144SJason Evans 		 * typically flows forward for long enough periods of time that
1434*bde95144SJason Evans 		 * epochs complete.  Unfortunately, this strategy is susceptible
1435*bde95144SJason Evans 		 * to clock jitter triggering premature epoch advances, but
1436*bde95144SJason Evans 		 * clock jitter estimation and compensation isn't feasible here
1437*bde95144SJason Evans 		 * because calls into this code are event-driven.
1438*bde95144SJason Evans 		 */
1439*bde95144SJason Evans 		nstime_copy(&arena->decay.epoch, &time);
1440*bde95144SJason Evans 		arena_decay_deadline_init(arena);
1441*bde95144SJason Evans 	} else {
1442*bde95144SJason Evans 		/* Verify that time does not go backwards. */
1443*bde95144SJason Evans 		assert(nstime_compare(&arena->decay.epoch, &time) <= 0);
1444df0d881dSJason Evans 	}
1445df0d881dSJason Evans 
1446df0d881dSJason Evans 	/*
1447*bde95144SJason Evans 	 * If the deadline has been reached, advance to the current epoch and
1448*bde95144SJason Evans 	 * purge to the new limit if necessary.  Note that dirty pages created
1449*bde95144SJason Evans 	 * during the current epoch are not subject to purge until a future
1450*bde95144SJason Evans 	 * epoch, so as a result purging only happens during epoch advances.
1451df0d881dSJason Evans 	 */
1452*bde95144SJason Evans 	if (arena_decay_deadline_reached(arena, &time))
1453*bde95144SJason Evans 		arena_decay_epoch_advance(tsdn, arena, &time);
1454df0d881dSJason Evans }
1455df0d881dSJason Evans 
1456df0d881dSJason Evans void
14571f0a49e8SJason Evans arena_maybe_purge(tsdn_t *tsdn, arena_t *arena)
1458df0d881dSJason Evans {
1459df0d881dSJason Evans 
1460df0d881dSJason Evans 	/* Don't recursively purge. */
1461df0d881dSJason Evans 	if (arena->purging)
1462df0d881dSJason Evans 		return;
1463df0d881dSJason Evans 
1464df0d881dSJason Evans 	if (opt_purge == purge_mode_ratio)
14651f0a49e8SJason Evans 		arena_maybe_purge_ratio(tsdn, arena);
1466df0d881dSJason Evans 	else
14671f0a49e8SJason Evans 		arena_maybe_purge_decay(tsdn, arena);
1468df0d881dSJason Evans }
1469df0d881dSJason Evans 
1470f921d10fSJason Evans static size_t
1471d0e79aa3SJason Evans arena_dirty_count(arena_t *arena)
1472f921d10fSJason Evans {
1473d0e79aa3SJason Evans 	size_t ndirty = 0;
1474d0e79aa3SJason Evans 	arena_runs_dirty_link_t *rdelm;
1475d0e79aa3SJason Evans 	extent_node_t *chunkselm;
1476d0e79aa3SJason Evans 
1477d0e79aa3SJason Evans 	for (rdelm = qr_next(&arena->runs_dirty, rd_link),
1478d0e79aa3SJason Evans 	    chunkselm = qr_next(&arena->chunks_cache, cc_link);
1479d0e79aa3SJason Evans 	    rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) {
1480d0e79aa3SJason Evans 		size_t npages;
1481d0e79aa3SJason Evans 
1482d0e79aa3SJason Evans 		if (rdelm == &chunkselm->rd) {
1483d0e79aa3SJason Evans 			npages = extent_node_size_get(chunkselm) >> LG_PAGE;
1484d0e79aa3SJason Evans 			chunkselm = qr_next(chunkselm, cc_link);
1485d0e79aa3SJason Evans 		} else {
1486d0e79aa3SJason Evans 			arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(
1487d0e79aa3SJason Evans 			    rdelm);
1488d0e79aa3SJason Evans 			arena_chunk_map_misc_t *miscelm =
1489d0e79aa3SJason Evans 			    arena_rd_to_miscelm(rdelm);
1490d0e79aa3SJason Evans 			size_t pageind = arena_miscelm_to_pageind(miscelm);
1491d0e79aa3SJason Evans 			assert(arena_mapbits_allocated_get(chunk, pageind) ==
1492d0e79aa3SJason Evans 			    0);
1493d0e79aa3SJason Evans 			assert(arena_mapbits_large_get(chunk, pageind) == 0);
1494d0e79aa3SJason Evans 			assert(arena_mapbits_dirty_get(chunk, pageind) != 0);
1495d0e79aa3SJason Evans 			npages = arena_mapbits_unallocated_size_get(chunk,
1496d0e79aa3SJason Evans 			    pageind) >> LG_PAGE;
1497d0e79aa3SJason Evans 		}
1498d0e79aa3SJason Evans 		ndirty += npages;
1499d0e79aa3SJason Evans 	}
1500d0e79aa3SJason Evans 
1501d0e79aa3SJason Evans 	return (ndirty);
1502d0e79aa3SJason Evans }
1503d0e79aa3SJason Evans 
1504d0e79aa3SJason Evans static size_t
15051f0a49e8SJason Evans arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
1506df0d881dSJason Evans     size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel,
1507d0e79aa3SJason Evans     extent_node_t *purge_chunks_sentinel)
1508f921d10fSJason Evans {
1509d0e79aa3SJason Evans 	arena_runs_dirty_link_t *rdelm, *rdelm_next;
1510d0e79aa3SJason Evans 	extent_node_t *chunkselm;
1511d0e79aa3SJason Evans 	size_t nstashed = 0;
1512f921d10fSJason Evans 
1513df0d881dSJason Evans 	/* Stash runs/chunks according to ndirty_limit. */
1514d0e79aa3SJason Evans 	for (rdelm = qr_next(&arena->runs_dirty, rd_link),
1515d0e79aa3SJason Evans 	    chunkselm = qr_next(&arena->chunks_cache, cc_link);
1516d0e79aa3SJason Evans 	    rdelm != &arena->runs_dirty; rdelm = rdelm_next) {
1517d0e79aa3SJason Evans 		size_t npages;
1518d0e79aa3SJason Evans 		rdelm_next = qr_next(rdelm, rd_link);
1519d0e79aa3SJason Evans 
1520d0e79aa3SJason Evans 		if (rdelm == &chunkselm->rd) {
1521d0e79aa3SJason Evans 			extent_node_t *chunkselm_next;
1522*bde95144SJason Evans 			bool zero, commit;
1523d0e79aa3SJason Evans 			UNUSED void *chunk;
1524d0e79aa3SJason Evans 
1525df0d881dSJason Evans 			npages = extent_node_size_get(chunkselm) >> LG_PAGE;
1526df0d881dSJason Evans 			if (opt_purge == purge_mode_decay && arena->ndirty -
1527df0d881dSJason Evans 			    (nstashed + npages) < ndirty_limit)
1528df0d881dSJason Evans 				break;
1529df0d881dSJason Evans 
1530d0e79aa3SJason Evans 			chunkselm_next = qr_next(chunkselm, cc_link);
1531f921d10fSJason Evans 			/*
1532d0e79aa3SJason Evans 			 * Allocate.  chunkselm remains valid due to the
1533d0e79aa3SJason Evans 			 * dalloc_node=false argument to chunk_alloc_cache().
1534f921d10fSJason Evans 			 */
1535d0e79aa3SJason Evans 			zero = false;
1536*bde95144SJason Evans 			commit = false;
15371f0a49e8SJason Evans 			chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks,
1538d0e79aa3SJason Evans 			    extent_node_addr_get(chunkselm),
1539d0e79aa3SJason Evans 			    extent_node_size_get(chunkselm), chunksize, &zero,
1540*bde95144SJason Evans 			    &commit, false);
1541d0e79aa3SJason Evans 			assert(chunk == extent_node_addr_get(chunkselm));
1542d0e79aa3SJason Evans 			assert(zero == extent_node_zeroed_get(chunkselm));
1543d0e79aa3SJason Evans 			extent_node_dirty_insert(chunkselm, purge_runs_sentinel,
1544d0e79aa3SJason Evans 			    purge_chunks_sentinel);
1545df0d881dSJason Evans 			assert(npages == (extent_node_size_get(chunkselm) >>
1546df0d881dSJason Evans 			    LG_PAGE));
1547d0e79aa3SJason Evans 			chunkselm = chunkselm_next;
1548d0e79aa3SJason Evans 		} else {
1549d0e79aa3SJason Evans 			arena_chunk_t *chunk =
1550d0e79aa3SJason Evans 			    (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
1551d0e79aa3SJason Evans 			arena_chunk_map_misc_t *miscelm =
1552d0e79aa3SJason Evans 			    arena_rd_to_miscelm(rdelm);
1553d0e79aa3SJason Evans 			size_t pageind = arena_miscelm_to_pageind(miscelm);
1554d0e79aa3SJason Evans 			arena_run_t *run = &miscelm->run;
1555f921d10fSJason Evans 			size_t run_size =
1556f921d10fSJason Evans 			    arena_mapbits_unallocated_size_get(chunk, pageind);
1557f921d10fSJason Evans 
1558f921d10fSJason Evans 			npages = run_size >> LG_PAGE;
1559df0d881dSJason Evans 			if (opt_purge == purge_mode_decay && arena->ndirty -
1560df0d881dSJason Evans 			    (nstashed + npages) < ndirty_limit)
1561df0d881dSJason Evans 				break;
1562d0e79aa3SJason Evans 
1563f921d10fSJason Evans 			assert(pageind + npages <= chunk_npages);
1564f921d10fSJason Evans 			assert(arena_mapbits_dirty_get(chunk, pageind) ==
1565f921d10fSJason Evans 			    arena_mapbits_dirty_get(chunk, pageind+npages-1));
1566f921d10fSJason Evans 
1567d0e79aa3SJason Evans 			/*
1568d0e79aa3SJason Evans 			 * If purging the spare chunk's run, make it available
1569d0e79aa3SJason Evans 			 * prior to allocation.
1570d0e79aa3SJason Evans 			 */
1571d0e79aa3SJason Evans 			if (chunk == arena->spare)
15721f0a49e8SJason Evans 				arena_chunk_alloc(tsdn, arena);
1573f921d10fSJason Evans 
1574d0e79aa3SJason Evans 			/* Temporarily allocate the free dirty run. */
1575d0e79aa3SJason Evans 			arena_run_split_large(arena, run, run_size, false);
1576d0e79aa3SJason Evans 			/* Stash. */
1577d0e79aa3SJason Evans 			if (false)
1578d0e79aa3SJason Evans 				qr_new(rdelm, rd_link); /* Redundant. */
1579d0e79aa3SJason Evans 			else {
1580d0e79aa3SJason Evans 				assert(qr_next(rdelm, rd_link) == rdelm);
1581d0e79aa3SJason Evans 				assert(qr_prev(rdelm, rd_link) == rdelm);
1582f921d10fSJason Evans 			}
1583d0e79aa3SJason Evans 			qr_meld(purge_runs_sentinel, rdelm, rd_link);
1584d0e79aa3SJason Evans 		}
1585f921d10fSJason Evans 
1586d0e79aa3SJason Evans 		nstashed += npages;
1587df0d881dSJason Evans 		if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <=
1588df0d881dSJason Evans 		    ndirty_limit)
1589d0e79aa3SJason Evans 			break;
1590f921d10fSJason Evans 	}
1591d0e79aa3SJason Evans 
1592d0e79aa3SJason Evans 	return (nstashed);
1593f921d10fSJason Evans }
1594f921d10fSJason Evans 
1595f921d10fSJason Evans static size_t
15961f0a49e8SJason Evans arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
1597d0e79aa3SJason Evans     arena_runs_dirty_link_t *purge_runs_sentinel,
1598d0e79aa3SJason Evans     extent_node_t *purge_chunks_sentinel)
1599f921d10fSJason Evans {
1600d0e79aa3SJason Evans 	size_t npurged, nmadvise;
1601d0e79aa3SJason Evans 	arena_runs_dirty_link_t *rdelm;
1602d0e79aa3SJason Evans 	extent_node_t *chunkselm;
1603f921d10fSJason Evans 
1604f921d10fSJason Evans 	if (config_stats)
1605f921d10fSJason Evans 		nmadvise = 0;
1606f921d10fSJason Evans 	npurged = 0;
1607f921d10fSJason Evans 
16081f0a49e8SJason Evans 	malloc_mutex_unlock(tsdn, &arena->lock);
1609d0e79aa3SJason Evans 	for (rdelm = qr_next(purge_runs_sentinel, rd_link),
1610d0e79aa3SJason Evans 	    chunkselm = qr_next(purge_chunks_sentinel, cc_link);
1611d0e79aa3SJason Evans 	    rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) {
1612d0e79aa3SJason Evans 		size_t npages;
1613d0e79aa3SJason Evans 
1614d0e79aa3SJason Evans 		if (rdelm == &chunkselm->rd) {
1615f921d10fSJason Evans 			/*
1616d0e79aa3SJason Evans 			 * Don't actually purge the chunk here because 1)
1617d0e79aa3SJason Evans 			 * chunkselm is embedded in the chunk and must remain
1618d0e79aa3SJason Evans 			 * valid, and 2) we deallocate the chunk in
1619d0e79aa3SJason Evans 			 * arena_unstash_purged(), where it is destroyed,
1620d0e79aa3SJason Evans 			 * decommitted, or purged, depending on chunk
1621d0e79aa3SJason Evans 			 * deallocation policy.
1622d0e79aa3SJason Evans 			 */
1623d0e79aa3SJason Evans 			size_t size = extent_node_size_get(chunkselm);
1624d0e79aa3SJason Evans 			npages = size >> LG_PAGE;
1625d0e79aa3SJason Evans 			chunkselm = qr_next(chunkselm, cc_link);
1626d0e79aa3SJason Evans 		} else {
1627d0e79aa3SJason Evans 			size_t pageind, run_size, flag_unzeroed, flags, i;
1628d0e79aa3SJason Evans 			bool decommitted;
1629d0e79aa3SJason Evans 			arena_chunk_t *chunk =
1630d0e79aa3SJason Evans 			    (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
1631d0e79aa3SJason Evans 			arena_chunk_map_misc_t *miscelm =
1632d0e79aa3SJason Evans 			    arena_rd_to_miscelm(rdelm);
1633d0e79aa3SJason Evans 			pageind = arena_miscelm_to_pageind(miscelm);
1634d0e79aa3SJason Evans 			run_size = arena_mapbits_large_size_get(chunk, pageind);
1635d0e79aa3SJason Evans 			npages = run_size >> LG_PAGE;
1636d0e79aa3SJason Evans 
1637d0e79aa3SJason Evans 			assert(pageind + npages <= chunk_npages);
1638d0e79aa3SJason Evans 			assert(!arena_mapbits_decommitted_get(chunk, pageind));
1639d0e79aa3SJason Evans 			assert(!arena_mapbits_decommitted_get(chunk,
1640d0e79aa3SJason Evans 			    pageind+npages-1));
1641d0e79aa3SJason Evans 			decommitted = !chunk_hooks->decommit(chunk, chunksize,
1642d0e79aa3SJason Evans 			    pageind << LG_PAGE, npages << LG_PAGE, arena->ind);
1643d0e79aa3SJason Evans 			if (decommitted) {
1644d0e79aa3SJason Evans 				flag_unzeroed = 0;
1645d0e79aa3SJason Evans 				flags = CHUNK_MAP_DECOMMITTED;
1646d0e79aa3SJason Evans 			} else {
16471f0a49e8SJason Evans 				flag_unzeroed = chunk_purge_wrapper(tsdn, arena,
1648d0e79aa3SJason Evans 				    chunk_hooks, chunk, chunksize, pageind <<
1649d0e79aa3SJason Evans 				    LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0;
1650d0e79aa3SJason Evans 				flags = flag_unzeroed;
1651d0e79aa3SJason Evans 			}
1652d0e79aa3SJason Evans 			arena_mapbits_large_set(chunk, pageind+npages-1, 0,
1653d0e79aa3SJason Evans 			    flags);
1654d0e79aa3SJason Evans 			arena_mapbits_large_set(chunk, pageind, run_size,
1655d0e79aa3SJason Evans 			    flags);
1656d0e79aa3SJason Evans 
1657d0e79aa3SJason Evans 			/*
1658d0e79aa3SJason Evans 			 * Set the unzeroed flag for internal pages, now that
1659d0e79aa3SJason Evans 			 * chunk_purge_wrapper() has returned whether the pages
1660d0e79aa3SJason Evans 			 * were zeroed as a side effect of purging.  This chunk
1661d0e79aa3SJason Evans 			 * map modification is safe even though the arena mutex
1662d0e79aa3SJason Evans 			 * isn't currently owned by this thread, because the run
1663d0e79aa3SJason Evans 			 * is marked as allocated, thus protecting it from being
1664d0e79aa3SJason Evans 			 * modified by any other thread.  As long as these
1665f921d10fSJason Evans 			 * writes don't perturb the first and last elements'
1666f921d10fSJason Evans 			 * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
1667f921d10fSJason Evans 			 */
1668d0e79aa3SJason Evans 			for (i = 1; i < npages-1; i++) {
1669d0e79aa3SJason Evans 				arena_mapbits_internal_set(chunk, pageind+i,
1670f921d10fSJason Evans 				    flag_unzeroed);
1671f921d10fSJason Evans 			}
1672d0e79aa3SJason Evans 		}
1673d0e79aa3SJason Evans 
1674f921d10fSJason Evans 		npurged += npages;
1675f921d10fSJason Evans 		if (config_stats)
1676f921d10fSJason Evans 			nmadvise++;
1677f921d10fSJason Evans 	}
16781f0a49e8SJason Evans 	malloc_mutex_lock(tsdn, &arena->lock);
1679d0e79aa3SJason Evans 
1680d0e79aa3SJason Evans 	if (config_stats) {
1681f921d10fSJason Evans 		arena->stats.nmadvise += nmadvise;
1682d0e79aa3SJason Evans 		arena->stats.purged += npurged;
1683d0e79aa3SJason Evans 	}
1684f921d10fSJason Evans 
1685f921d10fSJason Evans 	return (npurged);
1686f921d10fSJason Evans }
1687f921d10fSJason Evans 
1688f921d10fSJason Evans static void
16891f0a49e8SJason Evans arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks,
1690d0e79aa3SJason Evans     arena_runs_dirty_link_t *purge_runs_sentinel,
1691d0e79aa3SJason Evans     extent_node_t *purge_chunks_sentinel)
1692f921d10fSJason Evans {
1693d0e79aa3SJason Evans 	arena_runs_dirty_link_t *rdelm, *rdelm_next;
1694d0e79aa3SJason Evans 	extent_node_t *chunkselm;
1695f921d10fSJason Evans 
1696d0e79aa3SJason Evans 	/* Deallocate chunks/runs. */
1697d0e79aa3SJason Evans 	for (rdelm = qr_next(purge_runs_sentinel, rd_link),
1698d0e79aa3SJason Evans 	    chunkselm = qr_next(purge_chunks_sentinel, cc_link);
1699d0e79aa3SJason Evans 	    rdelm != purge_runs_sentinel; rdelm = rdelm_next) {
1700d0e79aa3SJason Evans 		rdelm_next = qr_next(rdelm, rd_link);
1701d0e79aa3SJason Evans 		if (rdelm == &chunkselm->rd) {
1702d0e79aa3SJason Evans 			extent_node_t *chunkselm_next = qr_next(chunkselm,
1703d0e79aa3SJason Evans 			    cc_link);
1704d0e79aa3SJason Evans 			void *addr = extent_node_addr_get(chunkselm);
1705d0e79aa3SJason Evans 			size_t size = extent_node_size_get(chunkselm);
1706d0e79aa3SJason Evans 			bool zeroed = extent_node_zeroed_get(chunkselm);
1707d0e79aa3SJason Evans 			bool committed = extent_node_committed_get(chunkselm);
1708d0e79aa3SJason Evans 			extent_node_dirty_remove(chunkselm);
17091f0a49e8SJason Evans 			arena_node_dalloc(tsdn, arena, chunkselm);
1710d0e79aa3SJason Evans 			chunkselm = chunkselm_next;
17111f0a49e8SJason Evans 			chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr,
17121f0a49e8SJason Evans 			    size, zeroed, committed);
1713d0e79aa3SJason Evans 		} else {
1714d0e79aa3SJason Evans 			arena_chunk_t *chunk =
1715d0e79aa3SJason Evans 			    (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm);
1716d0e79aa3SJason Evans 			arena_chunk_map_misc_t *miscelm =
1717d0e79aa3SJason Evans 			    arena_rd_to_miscelm(rdelm);
1718d0e79aa3SJason Evans 			size_t pageind = arena_miscelm_to_pageind(miscelm);
1719d0e79aa3SJason Evans 			bool decommitted = (arena_mapbits_decommitted_get(chunk,
1720d0e79aa3SJason Evans 			    pageind) != 0);
1721d0e79aa3SJason Evans 			arena_run_t *run = &miscelm->run;
1722d0e79aa3SJason Evans 			qr_remove(rdelm, rd_link);
17231f0a49e8SJason Evans 			arena_run_dalloc(tsdn, arena, run, false, true,
17241f0a49e8SJason Evans 			    decommitted);
1725f921d10fSJason Evans 		}
1726f921d10fSJason Evans 	}
172782872ac0SJason Evans }
172882872ac0SJason Evans 
1729df0d881dSJason Evans /*
1730df0d881dSJason Evans  * NB: ndirty_limit is interpreted differently depending on opt_purge:
1731df0d881dSJason Evans  *   - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the
1732df0d881dSJason Evans  *                       desired state:
1733df0d881dSJason Evans  *                       (arena->ndirty <= ndirty_limit)
1734df0d881dSJason Evans  *   - purge_mode_decay: Purge as many dirty runs/chunks as possible without
1735df0d881dSJason Evans  *                       violating the invariant:
1736df0d881dSJason Evans  *                       (arena->ndirty >= ndirty_limit)
1737df0d881dSJason Evans  */
1738a4bd5210SJason Evans static void
17391f0a49e8SJason Evans arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit)
1740a4bd5210SJason Evans {
17411f0a49e8SJason Evans 	chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena);
1742df0d881dSJason Evans 	size_t npurge, npurged;
1743d0e79aa3SJason Evans 	arena_runs_dirty_link_t purge_runs_sentinel;
1744d0e79aa3SJason Evans 	extent_node_t purge_chunks_sentinel;
1745a4bd5210SJason Evans 
1746d0e79aa3SJason Evans 	arena->purging = true;
1747d0e79aa3SJason Evans 
1748d0e79aa3SJason Evans 	/*
1749d0e79aa3SJason Evans 	 * Calls to arena_dirty_count() are disabled even for debug builds
1750d0e79aa3SJason Evans 	 * because overhead grows nonlinearly as memory usage increases.
1751d0e79aa3SJason Evans 	 */
1752d0e79aa3SJason Evans 	if (false && config_debug) {
1753d0e79aa3SJason Evans 		size_t ndirty = arena_dirty_count(arena);
1754a4bd5210SJason Evans 		assert(ndirty == arena->ndirty);
1755a4bd5210SJason Evans 	}
1756df0d881dSJason Evans 	assert(opt_purge != purge_mode_ratio || (arena->nactive >>
1757df0d881dSJason Evans 	    arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0);
1758df0d881dSJason Evans 
1759df0d881dSJason Evans 	qr_new(&purge_runs_sentinel, rd_link);
1760df0d881dSJason Evans 	extent_node_dirty_linkage_init(&purge_chunks_sentinel);
1761df0d881dSJason Evans 
17621f0a49e8SJason Evans 	npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit,
1763df0d881dSJason Evans 	    &purge_runs_sentinel, &purge_chunks_sentinel);
1764df0d881dSJason Evans 	if (npurge == 0)
1765df0d881dSJason Evans 		goto label_return;
17661f0a49e8SJason Evans 	npurged = arena_purge_stashed(tsdn, arena, &chunk_hooks,
17671f0a49e8SJason Evans 	    &purge_runs_sentinel, &purge_chunks_sentinel);
1768df0d881dSJason Evans 	assert(npurged == npurge);
17691f0a49e8SJason Evans 	arena_unstash_purged(tsdn, arena, &chunk_hooks, &purge_runs_sentinel,
1770df0d881dSJason Evans 	    &purge_chunks_sentinel);
1771a4bd5210SJason Evans 
1772a4bd5210SJason Evans 	if (config_stats)
1773a4bd5210SJason Evans 		arena->stats.npurge++;
1774a4bd5210SJason Evans 
1775df0d881dSJason Evans label_return:
1776d0e79aa3SJason Evans 	arena->purging = false;
1777a4bd5210SJason Evans }
1778a4bd5210SJason Evans 
1779a4bd5210SJason Evans void
17801f0a49e8SJason Evans arena_purge(tsdn_t *tsdn, arena_t *arena, bool all)
1781a4bd5210SJason Evans {
1782a4bd5210SJason Evans 
17831f0a49e8SJason Evans 	malloc_mutex_lock(tsdn, &arena->lock);
1784df0d881dSJason Evans 	if (all)
17851f0a49e8SJason Evans 		arena_purge_to_limit(tsdn, arena, 0);
1786df0d881dSJason Evans 	else
17871f0a49e8SJason Evans 		arena_maybe_purge(tsdn, arena);
17881f0a49e8SJason Evans 	malloc_mutex_unlock(tsdn, &arena->lock);
17891f0a49e8SJason Evans }
17901f0a49e8SJason Evans 
17911f0a49e8SJason Evans static void
17921f0a49e8SJason Evans arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk)
17931f0a49e8SJason Evans {
17941f0a49e8SJason Evans 	size_t pageind, npages;
17951f0a49e8SJason Evans 
17961f0a49e8SJason Evans 	cassert(config_prof);
17971f0a49e8SJason Evans 	assert(opt_prof);
17981f0a49e8SJason Evans 
17991f0a49e8SJason Evans 	/*
18001f0a49e8SJason Evans 	 * Iterate over the allocated runs and remove profiled allocations from
18011f0a49e8SJason Evans 	 * the sample set.
18021f0a49e8SJason Evans 	 */
18031f0a49e8SJason Evans 	for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
18041f0a49e8SJason Evans 		if (arena_mapbits_allocated_get(chunk, pageind) != 0) {
18051f0a49e8SJason Evans 			if (arena_mapbits_large_get(chunk, pageind) != 0) {
18061f0a49e8SJason Evans 				void *ptr = (void *)((uintptr_t)chunk + (pageind
18071f0a49e8SJason Evans 				    << LG_PAGE));
18081f0a49e8SJason Evans 				size_t usize = isalloc(tsd_tsdn(tsd), ptr,
18091f0a49e8SJason Evans 				    config_prof);
18101f0a49e8SJason Evans 
18111f0a49e8SJason Evans 				prof_free(tsd, ptr, usize);
18121f0a49e8SJason Evans 				npages = arena_mapbits_large_size_get(chunk,
18131f0a49e8SJason Evans 				    pageind) >> LG_PAGE;
18141f0a49e8SJason Evans 			} else {
18151f0a49e8SJason Evans 				/* Skip small run. */
18161f0a49e8SJason Evans 				size_t binind = arena_mapbits_binind_get(chunk,
18171f0a49e8SJason Evans 				    pageind);
18181f0a49e8SJason Evans 				arena_bin_info_t *bin_info =
18191f0a49e8SJason Evans 				    &arena_bin_info[binind];
18201f0a49e8SJason Evans 				npages = bin_info->run_size >> LG_PAGE;
18211f0a49e8SJason Evans 			}
18221f0a49e8SJason Evans 		} else {
18231f0a49e8SJason Evans 			/* Skip unallocated run. */
18241f0a49e8SJason Evans 			npages = arena_mapbits_unallocated_size_get(chunk,
18251f0a49e8SJason Evans 			    pageind) >> LG_PAGE;
18261f0a49e8SJason Evans 		}
18271f0a49e8SJason Evans 		assert(pageind + npages <= chunk_npages);
18281f0a49e8SJason Evans 	}
18291f0a49e8SJason Evans }
18301f0a49e8SJason Evans 
18311f0a49e8SJason Evans void
18321f0a49e8SJason Evans arena_reset(tsd_t *tsd, arena_t *arena)
18331f0a49e8SJason Evans {
18341f0a49e8SJason Evans 	unsigned i;
18351f0a49e8SJason Evans 	extent_node_t *node;
18361f0a49e8SJason Evans 
18371f0a49e8SJason Evans 	/*
18381f0a49e8SJason Evans 	 * Locking in this function is unintuitive.  The caller guarantees that
18391f0a49e8SJason Evans 	 * no concurrent operations are happening in this arena, but there are
18401f0a49e8SJason Evans 	 * still reasons that some locking is necessary:
18411f0a49e8SJason Evans 	 *
18421f0a49e8SJason Evans 	 * - Some of the functions in the transitive closure of calls assume
18431f0a49e8SJason Evans 	 *   appropriate locks are held, and in some cases these locks are
18441f0a49e8SJason Evans 	 *   temporarily dropped to avoid lock order reversal or deadlock due to
18451f0a49e8SJason Evans 	 *   reentry.
18461f0a49e8SJason Evans 	 * - mallctl("epoch", ...) may concurrently refresh stats.  While
18471f0a49e8SJason Evans 	 *   strictly speaking this is a "concurrent operation", disallowing
18481f0a49e8SJason Evans 	 *   stats refreshes would impose an inconvenient burden.
18491f0a49e8SJason Evans 	 */
18501f0a49e8SJason Evans 
18511f0a49e8SJason Evans 	/* Remove large allocations from prof sample set. */
18521f0a49e8SJason Evans 	if (config_prof && opt_prof) {
18531f0a49e8SJason Evans 		ql_foreach(node, &arena->achunks, ql_link) {
18541f0a49e8SJason Evans 			arena_achunk_prof_reset(tsd, arena,
18551f0a49e8SJason Evans 			    extent_node_addr_get(node));
18561f0a49e8SJason Evans 		}
18571f0a49e8SJason Evans 	}
18581f0a49e8SJason Evans 
18591f0a49e8SJason Evans 	/* Reset curruns for large size classes. */
18601f0a49e8SJason Evans 	if (config_stats) {
18611f0a49e8SJason Evans 		for (i = 0; i < nlclasses; i++)
18621f0a49e8SJason Evans 			arena->stats.lstats[i].curruns = 0;
18631f0a49e8SJason Evans 	}
18641f0a49e8SJason Evans 
18651f0a49e8SJason Evans 	/* Huge allocations. */
18661f0a49e8SJason Evans 	malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
18671f0a49e8SJason Evans 	for (node = ql_last(&arena->huge, ql_link); node != NULL; node =
18681f0a49e8SJason Evans 	    ql_last(&arena->huge, ql_link)) {
18691f0a49e8SJason Evans 		void *ptr = extent_node_addr_get(node);
18701f0a49e8SJason Evans 		size_t usize;
18711f0a49e8SJason Evans 
18721f0a49e8SJason Evans 		malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
18731f0a49e8SJason Evans 		if (config_stats || (config_prof && opt_prof))
18741f0a49e8SJason Evans 			usize = isalloc(tsd_tsdn(tsd), ptr, config_prof);
18751f0a49e8SJason Evans 		/* Remove huge allocation from prof sample set. */
18761f0a49e8SJason Evans 		if (config_prof && opt_prof)
18771f0a49e8SJason Evans 			prof_free(tsd, ptr, usize);
18781f0a49e8SJason Evans 		huge_dalloc(tsd_tsdn(tsd), ptr);
18791f0a49e8SJason Evans 		malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx);
18801f0a49e8SJason Evans 		/* Cancel out unwanted effects on stats. */
18811f0a49e8SJason Evans 		if (config_stats)
18821f0a49e8SJason Evans 			arena_huge_reset_stats_cancel(arena, usize);
18831f0a49e8SJason Evans 	}
18841f0a49e8SJason Evans 	malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx);
18851f0a49e8SJason Evans 
18861f0a49e8SJason Evans 	malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock);
18871f0a49e8SJason Evans 
18881f0a49e8SJason Evans 	/* Bins. */
18891f0a49e8SJason Evans 	for (i = 0; i < NBINS; i++) {
18901f0a49e8SJason Evans 		arena_bin_t *bin = &arena->bins[i];
18911f0a49e8SJason Evans 		malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
18921f0a49e8SJason Evans 		bin->runcur = NULL;
18931f0a49e8SJason Evans 		arena_run_heap_new(&bin->runs);
18941f0a49e8SJason Evans 		if (config_stats) {
18951f0a49e8SJason Evans 			bin->stats.curregs = 0;
18961f0a49e8SJason Evans 			bin->stats.curruns = 0;
18971f0a49e8SJason Evans 		}
18981f0a49e8SJason Evans 		malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
18991f0a49e8SJason Evans 	}
19001f0a49e8SJason Evans 
19011f0a49e8SJason Evans 	/*
19021f0a49e8SJason Evans 	 * Re-initialize runs_dirty such that the chunks_cache and runs_dirty
19031f0a49e8SJason Evans 	 * chains directly correspond.
19041f0a49e8SJason Evans 	 */
19051f0a49e8SJason Evans 	qr_new(&arena->runs_dirty, rd_link);
19061f0a49e8SJason Evans 	for (node = qr_next(&arena->chunks_cache, cc_link);
19071f0a49e8SJason Evans 	    node != &arena->chunks_cache; node = qr_next(node, cc_link)) {
19081f0a49e8SJason Evans 		qr_new(&node->rd, rd_link);
19091f0a49e8SJason Evans 		qr_meld(&arena->runs_dirty, &node->rd, rd_link);
19101f0a49e8SJason Evans 	}
19111f0a49e8SJason Evans 
19121f0a49e8SJason Evans 	/* Arena chunks. */
19131f0a49e8SJason Evans 	for (node = ql_last(&arena->achunks, ql_link); node != NULL; node =
19141f0a49e8SJason Evans 	    ql_last(&arena->achunks, ql_link)) {
19151f0a49e8SJason Evans 		ql_remove(&arena->achunks, node, ql_link);
19161f0a49e8SJason Evans 		arena_chunk_discard(tsd_tsdn(tsd), arena,
19171f0a49e8SJason Evans 		    extent_node_addr_get(node));
19181f0a49e8SJason Evans 	}
19191f0a49e8SJason Evans 
19201f0a49e8SJason Evans 	/* Spare. */
19211f0a49e8SJason Evans 	if (arena->spare != NULL) {
19221f0a49e8SJason Evans 		arena_chunk_discard(tsd_tsdn(tsd), arena, arena->spare);
19231f0a49e8SJason Evans 		arena->spare = NULL;
19241f0a49e8SJason Evans 	}
19251f0a49e8SJason Evans 
19261f0a49e8SJason Evans 	assert(!arena->purging);
19271f0a49e8SJason Evans 	arena->nactive = 0;
19281f0a49e8SJason Evans 
1929*bde95144SJason Evans 	for (i = 0; i < NPSIZES; i++)
19301f0a49e8SJason Evans 		arena_run_heap_new(&arena->runs_avail[i]);
19311f0a49e8SJason Evans 
19321f0a49e8SJason Evans 	malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock);
1933a4bd5210SJason Evans }
1934a4bd5210SJason Evans 
1935a4bd5210SJason Evans static void
1936f921d10fSJason Evans arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
1937d0e79aa3SJason Evans     size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty,
1938d0e79aa3SJason Evans     size_t flag_decommitted)
1939a4bd5210SJason Evans {
1940f921d10fSJason Evans 	size_t size = *p_size;
1941f921d10fSJason Evans 	size_t run_ind = *p_run_ind;
1942f921d10fSJason Evans 	size_t run_pages = *p_run_pages;
1943a4bd5210SJason Evans 
1944a4bd5210SJason Evans 	/* Try to coalesce forward. */
1945a4bd5210SJason Evans 	if (run_ind + run_pages < chunk_npages &&
1946e722f8f8SJason Evans 	    arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
1947d0e79aa3SJason Evans 	    arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty &&
1948d0e79aa3SJason Evans 	    arena_mapbits_decommitted_get(chunk, run_ind+run_pages) ==
1949d0e79aa3SJason Evans 	    flag_decommitted) {
1950e722f8f8SJason Evans 		size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
1951e722f8f8SJason Evans 		    run_ind+run_pages);
1952a4bd5210SJason Evans 		size_t nrun_pages = nrun_size >> LG_PAGE;
1953a4bd5210SJason Evans 
1954a4bd5210SJason Evans 		/*
1955a4bd5210SJason Evans 		 * Remove successor from runs_avail; the coalesced run is
1956a4bd5210SJason Evans 		 * inserted later.
1957a4bd5210SJason Evans 		 */
1958e722f8f8SJason Evans 		assert(arena_mapbits_unallocated_size_get(chunk,
1959e722f8f8SJason Evans 		    run_ind+run_pages+nrun_pages-1) == nrun_size);
1960e722f8f8SJason Evans 		assert(arena_mapbits_dirty_get(chunk,
1961e722f8f8SJason Evans 		    run_ind+run_pages+nrun_pages-1) == flag_dirty);
1962d0e79aa3SJason Evans 		assert(arena_mapbits_decommitted_get(chunk,
1963d0e79aa3SJason Evans 		    run_ind+run_pages+nrun_pages-1) == flag_decommitted);
1964d0e79aa3SJason Evans 		arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages);
1965d0e79aa3SJason Evans 
1966d0e79aa3SJason Evans 		/*
1967d0e79aa3SJason Evans 		 * If the successor is dirty, remove it from the set of dirty
1968d0e79aa3SJason Evans 		 * pages.
1969d0e79aa3SJason Evans 		 */
1970d0e79aa3SJason Evans 		if (flag_dirty != 0) {
1971d0e79aa3SJason Evans 			arena_run_dirty_remove(arena, chunk, run_ind+run_pages,
1972d0e79aa3SJason Evans 			    nrun_pages);
1973d0e79aa3SJason Evans 		}
1974a4bd5210SJason Evans 
1975a4bd5210SJason Evans 		size += nrun_size;
1976a4bd5210SJason Evans 		run_pages += nrun_pages;
1977a4bd5210SJason Evans 
1978e722f8f8SJason Evans 		arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1979e722f8f8SJason Evans 		arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1980e722f8f8SJason Evans 		    size);
1981a4bd5210SJason Evans 	}
1982a4bd5210SJason Evans 
1983a4bd5210SJason Evans 	/* Try to coalesce backward. */
1984f921d10fSJason Evans 	if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
1985f921d10fSJason Evans 	    run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
1986d0e79aa3SJason Evans 	    flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) ==
1987d0e79aa3SJason Evans 	    flag_decommitted) {
1988e722f8f8SJason Evans 		size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
1989e722f8f8SJason Evans 		    run_ind-1);
1990a4bd5210SJason Evans 		size_t prun_pages = prun_size >> LG_PAGE;
1991a4bd5210SJason Evans 
1992a4bd5210SJason Evans 		run_ind -= prun_pages;
1993a4bd5210SJason Evans 
1994a4bd5210SJason Evans 		/*
1995a4bd5210SJason Evans 		 * Remove predecessor from runs_avail; the coalesced run is
1996a4bd5210SJason Evans 		 * inserted later.
1997a4bd5210SJason Evans 		 */
1998e722f8f8SJason Evans 		assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1999e722f8f8SJason Evans 		    prun_size);
2000e722f8f8SJason Evans 		assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
2001d0e79aa3SJason Evans 		assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
2002d0e79aa3SJason Evans 		    flag_decommitted);
2003d0e79aa3SJason Evans 		arena_avail_remove(arena, chunk, run_ind, prun_pages);
2004d0e79aa3SJason Evans 
2005d0e79aa3SJason Evans 		/*
2006d0e79aa3SJason Evans 		 * If the predecessor is dirty, remove it from the set of dirty
2007d0e79aa3SJason Evans 		 * pages.
2008d0e79aa3SJason Evans 		 */
2009d0e79aa3SJason Evans 		if (flag_dirty != 0) {
2010d0e79aa3SJason Evans 			arena_run_dirty_remove(arena, chunk, run_ind,
2011d0e79aa3SJason Evans 			    prun_pages);
2012d0e79aa3SJason Evans 		}
2013a4bd5210SJason Evans 
2014a4bd5210SJason Evans 		size += prun_size;
2015a4bd5210SJason Evans 		run_pages += prun_pages;
2016a4bd5210SJason Evans 
2017e722f8f8SJason Evans 		arena_mapbits_unallocated_size_set(chunk, run_ind, size);
2018e722f8f8SJason Evans 		arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
2019e722f8f8SJason Evans 		    size);
2020a4bd5210SJason Evans 	}
2021a4bd5210SJason Evans 
2022f921d10fSJason Evans 	*p_size = size;
2023f921d10fSJason Evans 	*p_run_ind = run_ind;
2024f921d10fSJason Evans 	*p_run_pages = run_pages;
2025f921d10fSJason Evans }
2026f921d10fSJason Evans 
2027d0e79aa3SJason Evans static size_t
2028d0e79aa3SJason Evans arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
2029d0e79aa3SJason Evans     size_t run_ind)
2030f921d10fSJason Evans {
2031d0e79aa3SJason Evans 	size_t size;
2032f921d10fSJason Evans 
2033f921d10fSJason Evans 	assert(run_ind >= map_bias);
2034f921d10fSJason Evans 	assert(run_ind < chunk_npages);
2035d0e79aa3SJason Evans 
2036f921d10fSJason Evans 	if (arena_mapbits_large_get(chunk, run_ind) != 0) {
2037f921d10fSJason Evans 		size = arena_mapbits_large_size_get(chunk, run_ind);
2038d0e79aa3SJason Evans 		assert(size == PAGE || arena_mapbits_large_size_get(chunk,
2039f921d10fSJason Evans 		    run_ind+(size>>LG_PAGE)-1) == 0);
2040f921d10fSJason Evans 	} else {
2041d0e79aa3SJason Evans 		arena_bin_info_t *bin_info = &arena_bin_info[run->binind];
2042f921d10fSJason Evans 		size = bin_info->run_size;
2043f921d10fSJason Evans 	}
2044d0e79aa3SJason Evans 
2045d0e79aa3SJason Evans 	return (size);
2046d0e79aa3SJason Evans }
2047d0e79aa3SJason Evans 
2048d0e79aa3SJason Evans static void
20491f0a49e8SJason Evans arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty,
20501f0a49e8SJason Evans     bool cleaned, bool decommitted)
2051d0e79aa3SJason Evans {
2052d0e79aa3SJason Evans 	arena_chunk_t *chunk;
2053d0e79aa3SJason Evans 	arena_chunk_map_misc_t *miscelm;
2054d0e79aa3SJason Evans 	size_t size, run_ind, run_pages, flag_dirty, flag_decommitted;
2055d0e79aa3SJason Evans 
2056d0e79aa3SJason Evans 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
2057d0e79aa3SJason Evans 	miscelm = arena_run_to_miscelm(run);
2058d0e79aa3SJason Evans 	run_ind = arena_miscelm_to_pageind(miscelm);
2059d0e79aa3SJason Evans 	assert(run_ind >= map_bias);
2060d0e79aa3SJason Evans 	assert(run_ind < chunk_npages);
2061d0e79aa3SJason Evans 	size = arena_run_size_get(arena, chunk, run, run_ind);
2062f921d10fSJason Evans 	run_pages = (size >> LG_PAGE);
2063df0d881dSJason Evans 	arena_nactive_sub(arena, run_pages);
2064f921d10fSJason Evans 
2065f921d10fSJason Evans 	/*
2066f921d10fSJason Evans 	 * The run is dirty if the caller claims to have dirtied it, as well as
2067f921d10fSJason Evans 	 * if it was already dirty before being allocated and the caller
2068f921d10fSJason Evans 	 * doesn't claim to have cleaned it.
2069f921d10fSJason Evans 	 */
2070f921d10fSJason Evans 	assert(arena_mapbits_dirty_get(chunk, run_ind) ==
2071f921d10fSJason Evans 	    arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
2072d0e79aa3SJason Evans 	if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind)
2073d0e79aa3SJason Evans 	    != 0)
2074f921d10fSJason Evans 		dirty = true;
2075f921d10fSJason Evans 	flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
2076d0e79aa3SJason Evans 	flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0;
2077f921d10fSJason Evans 
2078f921d10fSJason Evans 	/* Mark pages as unallocated in the chunk map. */
2079d0e79aa3SJason Evans 	if (dirty || decommitted) {
2080d0e79aa3SJason Evans 		size_t flags = flag_dirty | flag_decommitted;
2081d0e79aa3SJason Evans 		arena_mapbits_unallocated_set(chunk, run_ind, size, flags);
2082f921d10fSJason Evans 		arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
2083d0e79aa3SJason Evans 		    flags);
2084f921d10fSJason Evans 	} else {
2085f921d10fSJason Evans 		arena_mapbits_unallocated_set(chunk, run_ind, size,
2086f921d10fSJason Evans 		    arena_mapbits_unzeroed_get(chunk, run_ind));
2087f921d10fSJason Evans 		arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
2088f921d10fSJason Evans 		    arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
2089f921d10fSJason Evans 	}
2090f921d10fSJason Evans 
2091f921d10fSJason Evans 	arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages,
2092d0e79aa3SJason Evans 	    flag_dirty, flag_decommitted);
2093f921d10fSJason Evans 
2094a4bd5210SJason Evans 	/* Insert into runs_avail, now that coalescing is complete. */
2095e722f8f8SJason Evans 	assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
2096e722f8f8SJason Evans 	    arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
2097e722f8f8SJason Evans 	assert(arena_mapbits_dirty_get(chunk, run_ind) ==
2098e722f8f8SJason Evans 	    arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
2099d0e79aa3SJason Evans 	assert(arena_mapbits_decommitted_get(chunk, run_ind) ==
2100d0e79aa3SJason Evans 	    arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1));
2101d0e79aa3SJason Evans 	arena_avail_insert(arena, chunk, run_ind, run_pages);
2102d0e79aa3SJason Evans 
2103d0e79aa3SJason Evans 	if (dirty)
2104d0e79aa3SJason Evans 		arena_run_dirty_insert(arena, chunk, run_ind, run_pages);
2105a4bd5210SJason Evans 
2106e722f8f8SJason Evans 	/* Deallocate chunk if it is now completely unused. */
2107d0e79aa3SJason Evans 	if (size == arena_maxrun) {
2108e722f8f8SJason Evans 		assert(run_ind == map_bias);
2109d0e79aa3SJason Evans 		assert(run_pages == (arena_maxrun >> LG_PAGE));
21101f0a49e8SJason Evans 		arena_chunk_dalloc(tsdn, arena, chunk);
2111e722f8f8SJason Evans 	}
2112a4bd5210SJason Evans 
2113a4bd5210SJason Evans 	/*
2114a4bd5210SJason Evans 	 * It is okay to do dirty page processing here even if the chunk was
2115a4bd5210SJason Evans 	 * deallocated above, since in that case it is the spare.  Waiting
2116a4bd5210SJason Evans 	 * until after possible chunk deallocation to do dirty processing
2117a4bd5210SJason Evans 	 * allows for an old spare to be fully deallocated, thus decreasing the
2118a4bd5210SJason Evans 	 * chances of spuriously crossing the dirty page purging threshold.
2119a4bd5210SJason Evans 	 */
2120a4bd5210SJason Evans 	if (dirty)
21211f0a49e8SJason Evans 		arena_maybe_purge(tsdn, arena);
2122a4bd5210SJason Evans }
2123a4bd5210SJason Evans 
2124a4bd5210SJason Evans static void
21251f0a49e8SJason Evans arena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
21261f0a49e8SJason Evans     arena_run_t *run, size_t oldsize, size_t newsize)
2127a4bd5210SJason Evans {
2128d0e79aa3SJason Evans 	arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
2129d0e79aa3SJason Evans 	size_t pageind = arena_miscelm_to_pageind(miscelm);
2130a4bd5210SJason Evans 	size_t head_npages = (oldsize - newsize) >> LG_PAGE;
2131e722f8f8SJason Evans 	size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
2132d0e79aa3SJason Evans 	size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
2133d0e79aa3SJason Evans 	size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
2134d0e79aa3SJason Evans 	    CHUNK_MAP_UNZEROED : 0;
2135a4bd5210SJason Evans 
2136a4bd5210SJason Evans 	assert(oldsize > newsize);
2137a4bd5210SJason Evans 
2138a4bd5210SJason Evans 	/*
2139a4bd5210SJason Evans 	 * Update the chunk map so that arena_run_dalloc() can treat the
2140a4bd5210SJason Evans 	 * leading run as separately allocated.  Set the last element of each
2141a4bd5210SJason Evans 	 * run first, in case of single-page runs.
2142a4bd5210SJason Evans 	 */
2143e722f8f8SJason Evans 	assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
2144d0e79aa3SJason Evans 	arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
2145d0e79aa3SJason Evans 	    (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2146d0e79aa3SJason Evans 	    pageind+head_npages-1)));
2147d0e79aa3SJason Evans 	arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty |
2148d0e79aa3SJason Evans 	    (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
2149a4bd5210SJason Evans 
2150a4bd5210SJason Evans 	if (config_debug) {
2151a4bd5210SJason Evans 		UNUSED size_t tail_npages = newsize >> LG_PAGE;
2152e722f8f8SJason Evans 		assert(arena_mapbits_large_size_get(chunk,
2153e722f8f8SJason Evans 		    pageind+head_npages+tail_npages-1) == 0);
2154e722f8f8SJason Evans 		assert(arena_mapbits_dirty_get(chunk,
2155e722f8f8SJason Evans 		    pageind+head_npages+tail_npages-1) == flag_dirty);
2156a4bd5210SJason Evans 	}
215735dad073SJason Evans 	arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
2158d0e79aa3SJason Evans 	    flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2159d0e79aa3SJason Evans 	    pageind+head_npages)));
2160a4bd5210SJason Evans 
21611f0a49e8SJason Evans 	arena_run_dalloc(tsdn, arena, run, false, false, (flag_decommitted !=
21621f0a49e8SJason Evans 	    0));
2163a4bd5210SJason Evans }
2164a4bd5210SJason Evans 
2165a4bd5210SJason Evans static void
21661f0a49e8SJason Evans arena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
21671f0a49e8SJason Evans     arena_run_t *run, size_t oldsize, size_t newsize, bool dirty)
2168a4bd5210SJason Evans {
2169d0e79aa3SJason Evans 	arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
2170d0e79aa3SJason Evans 	size_t pageind = arena_miscelm_to_pageind(miscelm);
2171a4bd5210SJason Evans 	size_t head_npages = newsize >> LG_PAGE;
2172e722f8f8SJason Evans 	size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
2173d0e79aa3SJason Evans 	size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind);
2174d0e79aa3SJason Evans 	size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ?
2175d0e79aa3SJason Evans 	    CHUNK_MAP_UNZEROED : 0;
2176d0e79aa3SJason Evans 	arena_chunk_map_misc_t *tail_miscelm;
2177d0e79aa3SJason Evans 	arena_run_t *tail_run;
2178a4bd5210SJason Evans 
2179a4bd5210SJason Evans 	assert(oldsize > newsize);
2180a4bd5210SJason Evans 
2181a4bd5210SJason Evans 	/*
2182a4bd5210SJason Evans 	 * Update the chunk map so that arena_run_dalloc() can treat the
2183a4bd5210SJason Evans 	 * trailing run as separately allocated.  Set the last element of each
2184a4bd5210SJason Evans 	 * run first, in case of single-page runs.
2185a4bd5210SJason Evans 	 */
2186e722f8f8SJason Evans 	assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
2187d0e79aa3SJason Evans 	arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
2188d0e79aa3SJason Evans 	    (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2189d0e79aa3SJason Evans 	    pageind+head_npages-1)));
2190d0e79aa3SJason Evans 	arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty |
2191d0e79aa3SJason Evans 	    (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind)));
2192a4bd5210SJason Evans 
2193e722f8f8SJason Evans 	if (config_debug) {
2194e722f8f8SJason Evans 		UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
2195e722f8f8SJason Evans 		assert(arena_mapbits_large_size_get(chunk,
2196e722f8f8SJason Evans 		    pageind+head_npages+tail_npages-1) == 0);
2197e722f8f8SJason Evans 		assert(arena_mapbits_dirty_get(chunk,
2198e722f8f8SJason Evans 		    pageind+head_npages+tail_npages-1) == flag_dirty);
2199e722f8f8SJason Evans 	}
2200e722f8f8SJason Evans 	arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
2201d0e79aa3SJason Evans 	    flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
2202d0e79aa3SJason Evans 	    pageind+head_npages)));
2203a4bd5210SJason Evans 
22041f0a49e8SJason Evans 	tail_miscelm = arena_miscelm_get_mutable(chunk, pageind + head_npages);
2205d0e79aa3SJason Evans 	tail_run = &tail_miscelm->run;
22061f0a49e8SJason Evans 	arena_run_dalloc(tsdn, arena, tail_run, dirty, false, (flag_decommitted
22071f0a49e8SJason Evans 	    != 0));
2208a4bd5210SJason Evans }
2209a4bd5210SJason Evans 
2210a4bd5210SJason Evans static void
2211a4bd5210SJason Evans arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
2212a4bd5210SJason Evans {
2213d0e79aa3SJason Evans 	arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run);
2214a4bd5210SJason Evans 
22151f0a49e8SJason Evans 	arena_run_heap_insert(&bin->runs, miscelm);
2216a4bd5210SJason Evans }
2217a4bd5210SJason Evans 
2218a4bd5210SJason Evans static arena_run_t *
2219a4bd5210SJason Evans arena_bin_nonfull_run_tryget(arena_bin_t *bin)
2220a4bd5210SJason Evans {
22211f0a49e8SJason Evans 	arena_chunk_map_misc_t *miscelm;
22221f0a49e8SJason Evans 
22231f0a49e8SJason Evans 	miscelm = arena_run_heap_remove_first(&bin->runs);
22241f0a49e8SJason Evans 	if (miscelm == NULL)
22251f0a49e8SJason Evans 		return (NULL);
2226a4bd5210SJason Evans 	if (config_stats)
2227a4bd5210SJason Evans 		bin->stats.reruns++;
22281f0a49e8SJason Evans 
22291f0a49e8SJason Evans 	return (&miscelm->run);
2230a4bd5210SJason Evans }
2231a4bd5210SJason Evans 
2232a4bd5210SJason Evans static arena_run_t *
22331f0a49e8SJason Evans arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
2234a4bd5210SJason Evans {
2235a4bd5210SJason Evans 	arena_run_t *run;
2236536b3538SJason Evans 	szind_t binind;
2237a4bd5210SJason Evans 	arena_bin_info_t *bin_info;
2238a4bd5210SJason Evans 
2239a4bd5210SJason Evans 	/* Look for a usable run. */
2240a4bd5210SJason Evans 	run = arena_bin_nonfull_run_tryget(bin);
2241a4bd5210SJason Evans 	if (run != NULL)
2242a4bd5210SJason Evans 		return (run);
2243a4bd5210SJason Evans 	/* No existing runs have any space available. */
2244a4bd5210SJason Evans 
2245a4bd5210SJason Evans 	binind = arena_bin_index(arena, bin);
2246a4bd5210SJason Evans 	bin_info = &arena_bin_info[binind];
2247a4bd5210SJason Evans 
2248a4bd5210SJason Evans 	/* Allocate a new run. */
22491f0a49e8SJason Evans 	malloc_mutex_unlock(tsdn, &bin->lock);
2250a4bd5210SJason Evans 	/******************************/
22511f0a49e8SJason Evans 	malloc_mutex_lock(tsdn, &arena->lock);
22521f0a49e8SJason Evans 	run = arena_run_alloc_small(tsdn, arena, bin_info->run_size, binind);
2253a4bd5210SJason Evans 	if (run != NULL) {
2254a4bd5210SJason Evans 		/* Initialize run internals. */
2255d0e79aa3SJason Evans 		run->binind = binind;
2256a4bd5210SJason Evans 		run->nfree = bin_info->nregs;
2257d0e79aa3SJason Evans 		bitmap_init(run->bitmap, &bin_info->bitmap_info);
2258a4bd5210SJason Evans 	}
22591f0a49e8SJason Evans 	malloc_mutex_unlock(tsdn, &arena->lock);
2260a4bd5210SJason Evans 	/********************************/
22611f0a49e8SJason Evans 	malloc_mutex_lock(tsdn, &bin->lock);
2262a4bd5210SJason Evans 	if (run != NULL) {
2263a4bd5210SJason Evans 		if (config_stats) {
2264a4bd5210SJason Evans 			bin->stats.nruns++;
2265a4bd5210SJason Evans 			bin->stats.curruns++;
2266a4bd5210SJason Evans 		}
2267a4bd5210SJason Evans 		return (run);
2268a4bd5210SJason Evans 	}
2269a4bd5210SJason Evans 
2270a4bd5210SJason Evans 	/*
2271f921d10fSJason Evans 	 * arena_run_alloc_small() failed, but another thread may have made
2272a4bd5210SJason Evans 	 * sufficient memory available while this one dropped bin->lock above,
2273a4bd5210SJason Evans 	 * so search one more time.
2274a4bd5210SJason Evans 	 */
2275a4bd5210SJason Evans 	run = arena_bin_nonfull_run_tryget(bin);
2276a4bd5210SJason Evans 	if (run != NULL)
2277a4bd5210SJason Evans 		return (run);
2278a4bd5210SJason Evans 
2279a4bd5210SJason Evans 	return (NULL);
2280a4bd5210SJason Evans }
2281a4bd5210SJason Evans 
2282a4bd5210SJason Evans /* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
2283a4bd5210SJason Evans static void *
22841f0a49e8SJason Evans arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin)
2285a4bd5210SJason Evans {
2286536b3538SJason Evans 	szind_t binind;
2287a4bd5210SJason Evans 	arena_bin_info_t *bin_info;
2288a4bd5210SJason Evans 	arena_run_t *run;
2289a4bd5210SJason Evans 
2290a4bd5210SJason Evans 	binind = arena_bin_index(arena, bin);
2291a4bd5210SJason Evans 	bin_info = &arena_bin_info[binind];
2292a4bd5210SJason Evans 	bin->runcur = NULL;
22931f0a49e8SJason Evans 	run = arena_bin_nonfull_run_get(tsdn, arena, bin);
2294a4bd5210SJason Evans 	if (bin->runcur != NULL && bin->runcur->nfree > 0) {
2295a4bd5210SJason Evans 		/*
2296a4bd5210SJason Evans 		 * Another thread updated runcur while this one ran without the
2297a4bd5210SJason Evans 		 * bin lock in arena_bin_nonfull_run_get().
2298a4bd5210SJason Evans 		 */
2299536b3538SJason Evans 		void *ret;
2300a4bd5210SJason Evans 		assert(bin->runcur->nfree > 0);
2301a4bd5210SJason Evans 		ret = arena_run_reg_alloc(bin->runcur, bin_info);
2302a4bd5210SJason Evans 		if (run != NULL) {
2303a4bd5210SJason Evans 			arena_chunk_t *chunk;
2304a4bd5210SJason Evans 
2305a4bd5210SJason Evans 			/*
2306f921d10fSJason Evans 			 * arena_run_alloc_small() may have allocated run, or
2307f921d10fSJason Evans 			 * it may have pulled run from the bin's run tree.
2308f921d10fSJason Evans 			 * Therefore it is unsafe to make any assumptions about
2309f921d10fSJason Evans 			 * how run has previously been used, and
2310f921d10fSJason Evans 			 * arena_bin_lower_run() must be called, as if a region
2311f921d10fSJason Evans 			 * were just deallocated from the run.
2312a4bd5210SJason Evans 			 */
2313a4bd5210SJason Evans 			chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
23141f0a49e8SJason Evans 			if (run->nfree == bin_info->nregs) {
23151f0a49e8SJason Evans 				arena_dalloc_bin_run(tsdn, arena, chunk, run,
23161f0a49e8SJason Evans 				    bin);
23171f0a49e8SJason Evans 			} else
2318a4bd5210SJason Evans 				arena_bin_lower_run(arena, chunk, run, bin);
2319a4bd5210SJason Evans 		}
2320a4bd5210SJason Evans 		return (ret);
2321a4bd5210SJason Evans 	}
2322a4bd5210SJason Evans 
2323a4bd5210SJason Evans 	if (run == NULL)
2324a4bd5210SJason Evans 		return (NULL);
2325a4bd5210SJason Evans 
2326a4bd5210SJason Evans 	bin->runcur = run;
2327a4bd5210SJason Evans 
2328a4bd5210SJason Evans 	assert(bin->runcur->nfree > 0);
2329a4bd5210SJason Evans 
2330a4bd5210SJason Evans 	return (arena_run_reg_alloc(bin->runcur, bin_info));
2331a4bd5210SJason Evans }
2332a4bd5210SJason Evans 
2333a4bd5210SJason Evans void
23341f0a49e8SJason Evans arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin,
2335df0d881dSJason Evans     szind_t binind, uint64_t prof_accumbytes)
2336a4bd5210SJason Evans {
2337a4bd5210SJason Evans 	unsigned i, nfill;
2338a4bd5210SJason Evans 	arena_bin_t *bin;
2339a4bd5210SJason Evans 
2340a4bd5210SJason Evans 	assert(tbin->ncached == 0);
2341a4bd5210SJason Evans 
23421f0a49e8SJason Evans 	if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes))
23431f0a49e8SJason Evans 		prof_idump(tsdn);
2344a4bd5210SJason Evans 	bin = &arena->bins[binind];
23451f0a49e8SJason Evans 	malloc_mutex_lock(tsdn, &bin->lock);
2346a4bd5210SJason Evans 	for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
2347a4bd5210SJason Evans 	    tbin->lg_fill_div); i < nfill; i++) {
2348536b3538SJason Evans 		arena_run_t *run;
2349536b3538SJason Evans 		void *ptr;
2350a4bd5210SJason Evans 		if ((run = bin->runcur) != NULL && run->nfree > 0)
2351a4bd5210SJason Evans 			ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
2352a4bd5210SJason Evans 		else
23531f0a49e8SJason Evans 			ptr = arena_bin_malloc_hard(tsdn, arena, bin);
2354d0e79aa3SJason Evans 		if (ptr == NULL) {
2355d0e79aa3SJason Evans 			/*
2356d0e79aa3SJason Evans 			 * OOM.  tbin->avail isn't yet filled down to its first
2357d0e79aa3SJason Evans 			 * element, so the successful allocations (if any) must
2358df0d881dSJason Evans 			 * be moved just before tbin->avail before bailing out.
2359d0e79aa3SJason Evans 			 */
2360d0e79aa3SJason Evans 			if (i > 0) {
2361df0d881dSJason Evans 				memmove(tbin->avail - i, tbin->avail - nfill,
2362d0e79aa3SJason Evans 				    i * sizeof(void *));
2363d0e79aa3SJason Evans 			}
2364a4bd5210SJason Evans 			break;
2365d0e79aa3SJason Evans 		}
2366d0e79aa3SJason Evans 		if (config_fill && unlikely(opt_junk_alloc)) {
2367a4bd5210SJason Evans 			arena_alloc_junk_small(ptr, &arena_bin_info[binind],
2368a4bd5210SJason Evans 			    true);
2369a4bd5210SJason Evans 		}
2370a4bd5210SJason Evans 		/* Insert such that low regions get used first. */
2371df0d881dSJason Evans 		*(tbin->avail - nfill + i) = ptr;
2372a4bd5210SJason Evans 	}
2373a4bd5210SJason Evans 	if (config_stats) {
2374a4bd5210SJason Evans 		bin->stats.nmalloc += i;
2375a4bd5210SJason Evans 		bin->stats.nrequests += tbin->tstats.nrequests;
2376d0e79aa3SJason Evans 		bin->stats.curregs += i;
2377a4bd5210SJason Evans 		bin->stats.nfills++;
2378a4bd5210SJason Evans 		tbin->tstats.nrequests = 0;
2379a4bd5210SJason Evans 	}
23801f0a49e8SJason Evans 	malloc_mutex_unlock(tsdn, &bin->lock);
2381a4bd5210SJason Evans 	tbin->ncached = i;
23821f0a49e8SJason Evans 	arena_decay_tick(tsdn, arena);
2383a4bd5210SJason Evans }
2384a4bd5210SJason Evans 
2385a4bd5210SJason Evans void
2386a4bd5210SJason Evans arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
2387a4bd5210SJason Evans {
2388a4bd5210SJason Evans 
2389a4bd5210SJason Evans 	size_t redzone_size = bin_info->redzone_size;
23901f0a49e8SJason Evans 
23911f0a49e8SJason Evans 	if (zero) {
23921f0a49e8SJason Evans 		memset((void *)((uintptr_t)ptr - redzone_size),
23931f0a49e8SJason Evans 		    JEMALLOC_ALLOC_JUNK, redzone_size);
23941f0a49e8SJason Evans 		memset((void *)((uintptr_t)ptr + bin_info->reg_size),
23951f0a49e8SJason Evans 		    JEMALLOC_ALLOC_JUNK, redzone_size);
2396a4bd5210SJason Evans 	} else {
23971f0a49e8SJason Evans 		memset((void *)((uintptr_t)ptr - redzone_size),
23981f0a49e8SJason Evans 		    JEMALLOC_ALLOC_JUNK, bin_info->reg_interval);
2399a4bd5210SJason Evans 	}
2400a4bd5210SJason Evans }
2401a4bd5210SJason Evans 
2402f921d10fSJason Evans #ifdef JEMALLOC_JET
2403f921d10fSJason Evans #undef arena_redzone_corruption
24041f0a49e8SJason Evans #define	arena_redzone_corruption JEMALLOC_N(n_arena_redzone_corruption)
2405f921d10fSJason Evans #endif
2406f921d10fSJason Evans static void
2407f921d10fSJason Evans arena_redzone_corruption(void *ptr, size_t usize, bool after,
2408f921d10fSJason Evans     size_t offset, uint8_t byte)
2409f921d10fSJason Evans {
2410f921d10fSJason Evans 
2411f921d10fSJason Evans 	malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p "
2412f921d10fSJason Evans 	    "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s",
2413f921d10fSJason Evans 	    after ? "after" : "before", ptr, usize, byte);
2414f921d10fSJason Evans }
2415f921d10fSJason Evans #ifdef JEMALLOC_JET
2416f921d10fSJason Evans #undef arena_redzone_corruption
2417f921d10fSJason Evans #define	arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
2418f921d10fSJason Evans arena_redzone_corruption_t *arena_redzone_corruption =
24191f0a49e8SJason Evans     JEMALLOC_N(n_arena_redzone_corruption);
2420f921d10fSJason Evans #endif
2421f921d10fSJason Evans 
2422f921d10fSJason Evans static void
2423f921d10fSJason Evans arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
2424a4bd5210SJason Evans {
2425a4bd5210SJason Evans 	bool error = false;
2426a4bd5210SJason Evans 
2427d0e79aa3SJason Evans 	if (opt_junk_alloc) {
2428536b3538SJason Evans 		size_t size = bin_info->reg_size;
2429536b3538SJason Evans 		size_t redzone_size = bin_info->redzone_size;
2430536b3538SJason Evans 		size_t i;
2431536b3538SJason Evans 
2432a4bd5210SJason Evans 		for (i = 1; i <= redzone_size; i++) {
2433f921d10fSJason Evans 			uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
24341f0a49e8SJason Evans 			if (*byte != JEMALLOC_ALLOC_JUNK) {
2435a4bd5210SJason Evans 				error = true;
2436d0e79aa3SJason Evans 				arena_redzone_corruption(ptr, size, false, i,
2437d0e79aa3SJason Evans 				    *byte);
2438f921d10fSJason Evans 				if (reset)
24391f0a49e8SJason Evans 					*byte = JEMALLOC_ALLOC_JUNK;
2440a4bd5210SJason Evans 			}
2441a4bd5210SJason Evans 		}
2442a4bd5210SJason Evans 		for (i = 0; i < redzone_size; i++) {
2443f921d10fSJason Evans 			uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
24441f0a49e8SJason Evans 			if (*byte != JEMALLOC_ALLOC_JUNK) {
2445a4bd5210SJason Evans 				error = true;
2446d0e79aa3SJason Evans 				arena_redzone_corruption(ptr, size, true, i,
2447d0e79aa3SJason Evans 				    *byte);
2448f921d10fSJason Evans 				if (reset)
24491f0a49e8SJason Evans 					*byte = JEMALLOC_ALLOC_JUNK;
2450a4bd5210SJason Evans 			}
2451a4bd5210SJason Evans 		}
2452d0e79aa3SJason Evans 	}
2453d0e79aa3SJason Evans 
2454a4bd5210SJason Evans 	if (opt_abort && error)
2455a4bd5210SJason Evans 		abort();
2456f921d10fSJason Evans }
2457a4bd5210SJason Evans 
2458f921d10fSJason Evans #ifdef JEMALLOC_JET
2459f921d10fSJason Evans #undef arena_dalloc_junk_small
24601f0a49e8SJason Evans #define	arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small)
2461f921d10fSJason Evans #endif
2462f921d10fSJason Evans void
2463f921d10fSJason Evans arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
2464f921d10fSJason Evans {
2465f921d10fSJason Evans 	size_t redzone_size = bin_info->redzone_size;
2466f921d10fSJason Evans 
2467f921d10fSJason Evans 	arena_redzones_validate(ptr, bin_info, false);
24681f0a49e8SJason Evans 	memset((void *)((uintptr_t)ptr - redzone_size), JEMALLOC_FREE_JUNK,
2469a4bd5210SJason Evans 	    bin_info->reg_interval);
2470a4bd5210SJason Evans }
2471f921d10fSJason Evans #ifdef JEMALLOC_JET
2472f921d10fSJason Evans #undef arena_dalloc_junk_small
2473f921d10fSJason Evans #define	arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
2474f921d10fSJason Evans arena_dalloc_junk_small_t *arena_dalloc_junk_small =
24751f0a49e8SJason Evans     JEMALLOC_N(n_arena_dalloc_junk_small);
2476f921d10fSJason Evans #endif
2477f921d10fSJason Evans 
2478f921d10fSJason Evans void
2479f921d10fSJason Evans arena_quarantine_junk_small(void *ptr, size_t usize)
2480f921d10fSJason Evans {
2481536b3538SJason Evans 	szind_t binind;
2482f921d10fSJason Evans 	arena_bin_info_t *bin_info;
2483f921d10fSJason Evans 	cassert(config_fill);
2484d0e79aa3SJason Evans 	assert(opt_junk_free);
2485f921d10fSJason Evans 	assert(opt_quarantine);
2486f921d10fSJason Evans 	assert(usize <= SMALL_MAXCLASS);
2487f921d10fSJason Evans 
2488d0e79aa3SJason Evans 	binind = size2index(usize);
2489f921d10fSJason Evans 	bin_info = &arena_bin_info[binind];
2490f921d10fSJason Evans 	arena_redzones_validate(ptr, bin_info, true);
2491f921d10fSJason Evans }
2492a4bd5210SJason Evans 
2493df0d881dSJason Evans static void *
24941f0a49e8SJason Evans arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
2495a4bd5210SJason Evans {
2496a4bd5210SJason Evans 	void *ret;
2497a4bd5210SJason Evans 	arena_bin_t *bin;
2498df0d881dSJason Evans 	size_t usize;
2499a4bd5210SJason Evans 	arena_run_t *run;
2500a4bd5210SJason Evans 
2501a4bd5210SJason Evans 	assert(binind < NBINS);
2502a4bd5210SJason Evans 	bin = &arena->bins[binind];
2503df0d881dSJason Evans 	usize = index2size(binind);
2504a4bd5210SJason Evans 
25051f0a49e8SJason Evans 	malloc_mutex_lock(tsdn, &bin->lock);
2506a4bd5210SJason Evans 	if ((run = bin->runcur) != NULL && run->nfree > 0)
2507a4bd5210SJason Evans 		ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
2508a4bd5210SJason Evans 	else
25091f0a49e8SJason Evans 		ret = arena_bin_malloc_hard(tsdn, arena, bin);
2510a4bd5210SJason Evans 
2511a4bd5210SJason Evans 	if (ret == NULL) {
25121f0a49e8SJason Evans 		malloc_mutex_unlock(tsdn, &bin->lock);
2513a4bd5210SJason Evans 		return (NULL);
2514a4bd5210SJason Evans 	}
2515a4bd5210SJason Evans 
2516a4bd5210SJason Evans 	if (config_stats) {
2517a4bd5210SJason Evans 		bin->stats.nmalloc++;
2518a4bd5210SJason Evans 		bin->stats.nrequests++;
2519d0e79aa3SJason Evans 		bin->stats.curregs++;
2520a4bd5210SJason Evans 	}
25211f0a49e8SJason Evans 	malloc_mutex_unlock(tsdn, &bin->lock);
25221f0a49e8SJason Evans 	if (config_prof && !isthreaded && arena_prof_accum(tsdn, arena, usize))
25231f0a49e8SJason Evans 		prof_idump(tsdn);
2524a4bd5210SJason Evans 
2525d0e79aa3SJason Evans 	if (!zero) {
2526a4bd5210SJason Evans 		if (config_fill) {
2527d0e79aa3SJason Evans 			if (unlikely(opt_junk_alloc)) {
2528a4bd5210SJason Evans 				arena_alloc_junk_small(ret,
2529a4bd5210SJason Evans 				    &arena_bin_info[binind], false);
2530d0e79aa3SJason Evans 			} else if (unlikely(opt_zero))
2531df0d881dSJason Evans 				memset(ret, 0, usize);
2532a4bd5210SJason Evans 		}
2533df0d881dSJason Evans 		JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
2534a4bd5210SJason Evans 	} else {
2535d0e79aa3SJason Evans 		if (config_fill && unlikely(opt_junk_alloc)) {
2536a4bd5210SJason Evans 			arena_alloc_junk_small(ret, &arena_bin_info[binind],
2537a4bd5210SJason Evans 			    true);
2538a4bd5210SJason Evans 		}
2539df0d881dSJason Evans 		JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize);
2540df0d881dSJason Evans 		memset(ret, 0, usize);
2541a4bd5210SJason Evans 	}
2542a4bd5210SJason Evans 
25431f0a49e8SJason Evans 	arena_decay_tick(tsdn, arena);
2544a4bd5210SJason Evans 	return (ret);
2545a4bd5210SJason Evans }
2546a4bd5210SJason Evans 
2547a4bd5210SJason Evans void *
25481f0a49e8SJason Evans arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero)
2549a4bd5210SJason Evans {
2550a4bd5210SJason Evans 	void *ret;
2551d0e79aa3SJason Evans 	size_t usize;
2552d0e79aa3SJason Evans 	uintptr_t random_offset;
2553d0e79aa3SJason Evans 	arena_run_t *run;
2554d0e79aa3SJason Evans 	arena_chunk_map_misc_t *miscelm;
25551f0a49e8SJason Evans 	UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false);
2556a4bd5210SJason Evans 
2557a4bd5210SJason Evans 	/* Large allocation. */
2558df0d881dSJason Evans 	usize = index2size(binind);
25591f0a49e8SJason Evans 	malloc_mutex_lock(tsdn, &arena->lock);
2560d0e79aa3SJason Evans 	if (config_cache_oblivious) {
2561d0e79aa3SJason Evans 		uint64_t r;
2562d0e79aa3SJason Evans 
2563d0e79aa3SJason Evans 		/*
2564d0e79aa3SJason Evans 		 * Compute a uniformly distributed offset within the first page
2565d0e79aa3SJason Evans 		 * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64
2566d0e79aa3SJason Evans 		 * for 4 KiB pages and 64-byte cachelines.
2567d0e79aa3SJason Evans 		 */
2568*bde95144SJason Evans 		r = prng_lg_range_zu(&arena->offset_state, LG_PAGE -
2569*bde95144SJason Evans 		    LG_CACHELINE, false);
2570d0e79aa3SJason Evans 		random_offset = ((uintptr_t)r) << LG_CACHELINE;
2571d0e79aa3SJason Evans 	} else
2572d0e79aa3SJason Evans 		random_offset = 0;
25731f0a49e8SJason Evans 	run = arena_run_alloc_large(tsdn, arena, usize + large_pad, zero);
2574d0e79aa3SJason Evans 	if (run == NULL) {
25751f0a49e8SJason Evans 		malloc_mutex_unlock(tsdn, &arena->lock);
2576a4bd5210SJason Evans 		return (NULL);
2577a4bd5210SJason Evans 	}
2578d0e79aa3SJason Evans 	miscelm = arena_run_to_miscelm(run);
2579d0e79aa3SJason Evans 	ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) +
2580d0e79aa3SJason Evans 	    random_offset);
2581a4bd5210SJason Evans 	if (config_stats) {
2582df0d881dSJason Evans 		szind_t index = binind - NBINS;
2583d0e79aa3SJason Evans 
2584a4bd5210SJason Evans 		arena->stats.nmalloc_large++;
2585a4bd5210SJason Evans 		arena->stats.nrequests_large++;
2586d0e79aa3SJason Evans 		arena->stats.allocated_large += usize;
2587d0e79aa3SJason Evans 		arena->stats.lstats[index].nmalloc++;
2588d0e79aa3SJason Evans 		arena->stats.lstats[index].nrequests++;
2589d0e79aa3SJason Evans 		arena->stats.lstats[index].curruns++;
2590a4bd5210SJason Evans 	}
2591a4bd5210SJason Evans 	if (config_prof)
2592d0e79aa3SJason Evans 		idump = arena_prof_accum_locked(arena, usize);
25931f0a49e8SJason Evans 	malloc_mutex_unlock(tsdn, &arena->lock);
2594f8ca2db1SJason Evans 	if (config_prof && idump)
25951f0a49e8SJason Evans 		prof_idump(tsdn);
2596a4bd5210SJason Evans 
2597d0e79aa3SJason Evans 	if (!zero) {
2598a4bd5210SJason Evans 		if (config_fill) {
2599d0e79aa3SJason Evans 			if (unlikely(opt_junk_alloc))
26001f0a49e8SJason Evans 				memset(ret, JEMALLOC_ALLOC_JUNK, usize);
2601d0e79aa3SJason Evans 			else if (unlikely(opt_zero))
2602d0e79aa3SJason Evans 				memset(ret, 0, usize);
2603a4bd5210SJason Evans 		}
2604a4bd5210SJason Evans 	}
2605a4bd5210SJason Evans 
26061f0a49e8SJason Evans 	arena_decay_tick(tsdn, arena);
2607a4bd5210SJason Evans 	return (ret);
2608a4bd5210SJason Evans }
2609a4bd5210SJason Evans 
2610df0d881dSJason Evans void *
26111f0a49e8SJason Evans arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind,
26121f0a49e8SJason Evans     bool zero)
2613df0d881dSJason Evans {
2614df0d881dSJason Evans 
26151f0a49e8SJason Evans 	assert(!tsdn_null(tsdn) || arena != NULL);
26161f0a49e8SJason Evans 
26171f0a49e8SJason Evans 	if (likely(!tsdn_null(tsdn)))
26181f0a49e8SJason Evans 		arena = arena_choose(tsdn_tsd(tsdn), arena);
2619df0d881dSJason Evans 	if (unlikely(arena == NULL))
2620df0d881dSJason Evans 		return (NULL);
2621df0d881dSJason Evans 
2622df0d881dSJason Evans 	if (likely(size <= SMALL_MAXCLASS))
26231f0a49e8SJason Evans 		return (arena_malloc_small(tsdn, arena, ind, zero));
2624df0d881dSJason Evans 	if (likely(size <= large_maxclass))
26251f0a49e8SJason Evans 		return (arena_malloc_large(tsdn, arena, ind, zero));
26261f0a49e8SJason Evans 	return (huge_malloc(tsdn, arena, index2size(ind), zero));
2627df0d881dSJason Evans }
2628df0d881dSJason Evans 
2629a4bd5210SJason Evans /* Only handles large allocations that require more than page alignment. */
2630d0e79aa3SJason Evans static void *
26311f0a49e8SJason Evans arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
2632d0e79aa3SJason Evans     bool zero)
2633a4bd5210SJason Evans {
2634a4bd5210SJason Evans 	void *ret;
2635a4bd5210SJason Evans 	size_t alloc_size, leadsize, trailsize;
2636a4bd5210SJason Evans 	arena_run_t *run;
2637a4bd5210SJason Evans 	arena_chunk_t *chunk;
2638d0e79aa3SJason Evans 	arena_chunk_map_misc_t *miscelm;
2639d0e79aa3SJason Evans 	void *rpages;
2640a4bd5210SJason Evans 
26411f0a49e8SJason Evans 	assert(!tsdn_null(tsdn) || arena != NULL);
2642d0e79aa3SJason Evans 	assert(usize == PAGE_CEILING(usize));
2643d0e79aa3SJason Evans 
26441f0a49e8SJason Evans 	if (likely(!tsdn_null(tsdn)))
26451f0a49e8SJason Evans 		arena = arena_choose(tsdn_tsd(tsdn), arena);
2646d0e79aa3SJason Evans 	if (unlikely(arena == NULL))
2647d0e79aa3SJason Evans 		return (NULL);
2648a4bd5210SJason Evans 
2649a4bd5210SJason Evans 	alignment = PAGE_CEILING(alignment);
265062b2691eSJason Evans 	alloc_size = usize + large_pad + alignment - PAGE;
2651a4bd5210SJason Evans 
26521f0a49e8SJason Evans 	malloc_mutex_lock(tsdn, &arena->lock);
26531f0a49e8SJason Evans 	run = arena_run_alloc_large(tsdn, arena, alloc_size, false);
2654a4bd5210SJason Evans 	if (run == NULL) {
26551f0a49e8SJason Evans 		malloc_mutex_unlock(tsdn, &arena->lock);
2656a4bd5210SJason Evans 		return (NULL);
2657a4bd5210SJason Evans 	}
2658a4bd5210SJason Evans 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
2659d0e79aa3SJason Evans 	miscelm = arena_run_to_miscelm(run);
2660d0e79aa3SJason Evans 	rpages = arena_miscelm_to_rpages(miscelm);
2661a4bd5210SJason Evans 
2662d0e79aa3SJason Evans 	leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) -
2663d0e79aa3SJason Evans 	    (uintptr_t)rpages;
2664d0e79aa3SJason Evans 	assert(alloc_size >= leadsize + usize);
2665d0e79aa3SJason Evans 	trailsize = alloc_size - leadsize - usize - large_pad;
2666a4bd5210SJason Evans 	if (leadsize != 0) {
2667d0e79aa3SJason Evans 		arena_chunk_map_misc_t *head_miscelm = miscelm;
2668d0e79aa3SJason Evans 		arena_run_t *head_run = run;
2669d0e79aa3SJason Evans 
26701f0a49e8SJason Evans 		miscelm = arena_miscelm_get_mutable(chunk,
2671d0e79aa3SJason Evans 		    arena_miscelm_to_pageind(head_miscelm) + (leadsize >>
2672d0e79aa3SJason Evans 		    LG_PAGE));
2673d0e79aa3SJason Evans 		run = &miscelm->run;
2674d0e79aa3SJason Evans 
26751f0a49e8SJason Evans 		arena_run_trim_head(tsdn, arena, chunk, head_run, alloc_size,
2676d0e79aa3SJason Evans 		    alloc_size - leadsize);
2677a4bd5210SJason Evans 	}
2678a4bd5210SJason Evans 	if (trailsize != 0) {
26791f0a49e8SJason Evans 		arena_run_trim_tail(tsdn, arena, chunk, run, usize + large_pad +
2680d0e79aa3SJason Evans 		    trailsize, usize + large_pad, false);
2681a4bd5210SJason Evans 	}
2682d0e79aa3SJason Evans 	if (arena_run_init_large(arena, run, usize + large_pad, zero)) {
2683d0e79aa3SJason Evans 		size_t run_ind =
2684d0e79aa3SJason Evans 		    arena_miscelm_to_pageind(arena_run_to_miscelm(run));
2685d0e79aa3SJason Evans 		bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0);
2686d0e79aa3SJason Evans 		bool decommitted = (arena_mapbits_decommitted_get(chunk,
2687d0e79aa3SJason Evans 		    run_ind) != 0);
2688d0e79aa3SJason Evans 
2689d0e79aa3SJason Evans 		assert(decommitted); /* Cause of OOM. */
26901f0a49e8SJason Evans 		arena_run_dalloc(tsdn, arena, run, dirty, false, decommitted);
26911f0a49e8SJason Evans 		malloc_mutex_unlock(tsdn, &arena->lock);
2692d0e79aa3SJason Evans 		return (NULL);
2693d0e79aa3SJason Evans 	}
2694d0e79aa3SJason Evans 	ret = arena_miscelm_to_rpages(miscelm);
2695a4bd5210SJason Evans 
2696a4bd5210SJason Evans 	if (config_stats) {
2697536b3538SJason Evans 		szind_t index = size2index(usize) - NBINS;
2698d0e79aa3SJason Evans 
2699a4bd5210SJason Evans 		arena->stats.nmalloc_large++;
2700a4bd5210SJason Evans 		arena->stats.nrequests_large++;
2701d0e79aa3SJason Evans 		arena->stats.allocated_large += usize;
2702d0e79aa3SJason Evans 		arena->stats.lstats[index].nmalloc++;
2703d0e79aa3SJason Evans 		arena->stats.lstats[index].nrequests++;
2704d0e79aa3SJason Evans 		arena->stats.lstats[index].curruns++;
2705a4bd5210SJason Evans 	}
27061f0a49e8SJason Evans 	malloc_mutex_unlock(tsdn, &arena->lock);
2707a4bd5210SJason Evans 
2708d0e79aa3SJason Evans 	if (config_fill && !zero) {
2709d0e79aa3SJason Evans 		if (unlikely(opt_junk_alloc))
27101f0a49e8SJason Evans 			memset(ret, JEMALLOC_ALLOC_JUNK, usize);
2711d0e79aa3SJason Evans 		else if (unlikely(opt_zero))
2712d0e79aa3SJason Evans 			memset(ret, 0, usize);
2713d0e79aa3SJason Evans 	}
27141f0a49e8SJason Evans 	arena_decay_tick(tsdn, arena);
2715d0e79aa3SJason Evans 	return (ret);
2716d0e79aa3SJason Evans }
2717d0e79aa3SJason Evans 
2718d0e79aa3SJason Evans void *
27191f0a49e8SJason Evans arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment,
2720d0e79aa3SJason Evans     bool zero, tcache_t *tcache)
2721d0e79aa3SJason Evans {
2722d0e79aa3SJason Evans 	void *ret;
2723d0e79aa3SJason Evans 
2724d0e79aa3SJason Evans 	if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE
2725d0e79aa3SJason Evans 	    && (usize & PAGE_MASK) == 0))) {
2726d0e79aa3SJason Evans 		/* Small; alignment doesn't require special run placement. */
27271f0a49e8SJason Evans 		ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
2728df0d881dSJason Evans 		    tcache, true);
2729536b3538SJason Evans 	} else if (usize <= large_maxclass && alignment <= PAGE) {
2730d0e79aa3SJason Evans 		/*
2731d0e79aa3SJason Evans 		 * Large; alignment doesn't require special run placement.
2732d0e79aa3SJason Evans 		 * However, the cached pointer may be at a random offset from
2733d0e79aa3SJason Evans 		 * the base of the run, so do some bit manipulation to retrieve
2734d0e79aa3SJason Evans 		 * the base.
2735d0e79aa3SJason Evans 		 */
27361f0a49e8SJason Evans 		ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero,
2737df0d881dSJason Evans 		    tcache, true);
2738d0e79aa3SJason Evans 		if (config_cache_oblivious)
2739d0e79aa3SJason Evans 			ret = (void *)((uintptr_t)ret & ~PAGE_MASK);
2740d0e79aa3SJason Evans 	} else {
2741536b3538SJason Evans 		if (likely(usize <= large_maxclass)) {
27421f0a49e8SJason Evans 			ret = arena_palloc_large(tsdn, arena, usize, alignment,
2743d0e79aa3SJason Evans 			    zero);
2744d0e79aa3SJason Evans 		} else if (likely(alignment <= chunksize))
27451f0a49e8SJason Evans 			ret = huge_malloc(tsdn, arena, usize, zero);
2746d0e79aa3SJason Evans 		else {
27471f0a49e8SJason Evans 			ret = huge_palloc(tsdn, arena, usize, alignment, zero);
2748d0e79aa3SJason Evans 		}
2749a4bd5210SJason Evans 	}
2750a4bd5210SJason Evans 	return (ret);
2751a4bd5210SJason Evans }
2752a4bd5210SJason Evans 
2753a4bd5210SJason Evans void
27541f0a49e8SJason Evans arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size)
2755a4bd5210SJason Evans {
2756a4bd5210SJason Evans 	arena_chunk_t *chunk;
2757d0e79aa3SJason Evans 	size_t pageind;
2758536b3538SJason Evans 	szind_t binind;
2759a4bd5210SJason Evans 
27608ed34ab0SJason Evans 	cassert(config_prof);
2761a4bd5210SJason Evans 	assert(ptr != NULL);
2762a4bd5210SJason Evans 	assert(CHUNK_ADDR2BASE(ptr) != ptr);
27631f0a49e8SJason Evans 	assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
27641f0a49e8SJason Evans 	assert(isalloc(tsdn, ptr, true) == LARGE_MINCLASS);
2765a4bd5210SJason Evans 	assert(size <= SMALL_MAXCLASS);
2766a4bd5210SJason Evans 
2767a4bd5210SJason Evans 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
2768a4bd5210SJason Evans 	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2769d0e79aa3SJason Evans 	binind = size2index(size);
2770a4bd5210SJason Evans 	assert(binind < NBINS);
2771e722f8f8SJason Evans 	arena_mapbits_large_binind_set(chunk, pageind, binind);
2772a4bd5210SJason Evans 
27731f0a49e8SJason Evans 	assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS);
27741f0a49e8SJason Evans 	assert(isalloc(tsdn, ptr, true) == size);
2775a4bd5210SJason Evans }
2776a4bd5210SJason Evans 
2777a4bd5210SJason Evans static void
2778a4bd5210SJason Evans arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
2779a4bd5210SJason Evans     arena_bin_t *bin)
2780a4bd5210SJason Evans {
2781a4bd5210SJason Evans 
2782a4bd5210SJason Evans 	/* Dissociate run from bin. */
2783a4bd5210SJason Evans 	if (run == bin->runcur)
2784a4bd5210SJason Evans 		bin->runcur = NULL;
2785a4bd5210SJason Evans 	else {
2786536b3538SJason Evans 		szind_t binind = arena_bin_index(extent_node_arena_get(
2787d0e79aa3SJason Evans 		    &chunk->node), bin);
2788a4bd5210SJason Evans 		arena_bin_info_t *bin_info = &arena_bin_info[binind];
2789a4bd5210SJason Evans 
2790a4bd5210SJason Evans 		/*
27911f0a49e8SJason Evans 		 * The following block's conditional is necessary because if the
27921f0a49e8SJason Evans 		 * run only contains one region, then it never gets inserted
27931f0a49e8SJason Evans 		 * into the non-full runs tree.
2794a4bd5210SJason Evans 		 */
27951f0a49e8SJason Evans 		if (bin_info->nregs != 1) {
27961f0a49e8SJason Evans 			arena_chunk_map_misc_t *miscelm =
27971f0a49e8SJason Evans 			    arena_run_to_miscelm(run);
27981f0a49e8SJason Evans 
27991f0a49e8SJason Evans 			arena_run_heap_remove(&bin->runs, miscelm);
2800a4bd5210SJason Evans 		}
2801a4bd5210SJason Evans 	}
2802a4bd5210SJason Evans }
2803a4bd5210SJason Evans 
2804a4bd5210SJason Evans static void
28051f0a49e8SJason Evans arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
28061f0a49e8SJason Evans     arena_run_t *run, arena_bin_t *bin)
2807a4bd5210SJason Evans {
2808a4bd5210SJason Evans 
2809a4bd5210SJason Evans 	assert(run != bin->runcur);
2810a4bd5210SJason Evans 
28111f0a49e8SJason Evans 	malloc_mutex_unlock(tsdn, &bin->lock);
2812a4bd5210SJason Evans 	/******************************/
28131f0a49e8SJason Evans 	malloc_mutex_lock(tsdn, &arena->lock);
28141f0a49e8SJason Evans 	arena_run_dalloc(tsdn, arena, run, true, false, false);
28151f0a49e8SJason Evans 	malloc_mutex_unlock(tsdn, &arena->lock);
2816a4bd5210SJason Evans 	/****************************/
28171f0a49e8SJason Evans 	malloc_mutex_lock(tsdn, &bin->lock);
2818a4bd5210SJason Evans 	if (config_stats)
2819a4bd5210SJason Evans 		bin->stats.curruns--;
2820a4bd5210SJason Evans }
2821a4bd5210SJason Evans 
2822a4bd5210SJason Evans static void
2823a4bd5210SJason Evans arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
2824a4bd5210SJason Evans     arena_bin_t *bin)
2825a4bd5210SJason Evans {
2826a4bd5210SJason Evans 
2827a4bd5210SJason Evans 	/*
2828a4bd5210SJason Evans 	 * Make sure that if bin->runcur is non-NULL, it refers to the lowest
2829a4bd5210SJason Evans 	 * non-full run.  It is okay to NULL runcur out rather than proactively
2830a4bd5210SJason Evans 	 * keeping it pointing at the lowest non-full run.
2831a4bd5210SJason Evans 	 */
2832a4bd5210SJason Evans 	if ((uintptr_t)run < (uintptr_t)bin->runcur) {
2833a4bd5210SJason Evans 		/* Switch runcur. */
2834a4bd5210SJason Evans 		if (bin->runcur->nfree > 0)
2835a4bd5210SJason Evans 			arena_bin_runs_insert(bin, bin->runcur);
2836a4bd5210SJason Evans 		bin->runcur = run;
2837a4bd5210SJason Evans 		if (config_stats)
2838a4bd5210SJason Evans 			bin->stats.reruns++;
2839a4bd5210SJason Evans 	} else
2840a4bd5210SJason Evans 		arena_bin_runs_insert(bin, run);
2841a4bd5210SJason Evans }
2842a4bd5210SJason Evans 
2843d0e79aa3SJason Evans static void
28441f0a49e8SJason Evans arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
28451f0a49e8SJason Evans     void *ptr, arena_chunk_map_bits_t *bitselm, bool junked)
2846a4bd5210SJason Evans {
2847d0e79aa3SJason Evans 	size_t pageind, rpages_ind;
2848a4bd5210SJason Evans 	arena_run_t *run;
2849a4bd5210SJason Evans 	arena_bin_t *bin;
2850e722f8f8SJason Evans 	arena_bin_info_t *bin_info;
2851536b3538SJason Evans 	szind_t binind;
2852a4bd5210SJason Evans 
2853a4bd5210SJason Evans 	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2854d0e79aa3SJason Evans 	rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
28551f0a49e8SJason Evans 	run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
2856d0e79aa3SJason Evans 	binind = run->binind;
2857d0e79aa3SJason Evans 	bin = &arena->bins[binind];
2858e722f8f8SJason Evans 	bin_info = &arena_bin_info[binind];
2859a4bd5210SJason Evans 
2860d0e79aa3SJason Evans 	if (!junked && config_fill && unlikely(opt_junk_free))
2861a4bd5210SJason Evans 		arena_dalloc_junk_small(ptr, bin_info);
2862a4bd5210SJason Evans 
2863a4bd5210SJason Evans 	arena_run_reg_dalloc(run, ptr);
2864a4bd5210SJason Evans 	if (run->nfree == bin_info->nregs) {
2865a4bd5210SJason Evans 		arena_dissociate_bin_run(chunk, run, bin);
28661f0a49e8SJason Evans 		arena_dalloc_bin_run(tsdn, arena, chunk, run, bin);
2867a4bd5210SJason Evans 	} else if (run->nfree == 1 && run != bin->runcur)
2868a4bd5210SJason Evans 		arena_bin_lower_run(arena, chunk, run, bin);
2869a4bd5210SJason Evans 
2870a4bd5210SJason Evans 	if (config_stats) {
2871a4bd5210SJason Evans 		bin->stats.ndalloc++;
2872d0e79aa3SJason Evans 		bin->stats.curregs--;
2873a4bd5210SJason Evans 	}
2874a4bd5210SJason Evans }
2875a4bd5210SJason Evans 
2876a4bd5210SJason Evans void
28771f0a49e8SJason Evans arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena,
28781f0a49e8SJason Evans     arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm)
2879d0e79aa3SJason Evans {
2880d0e79aa3SJason Evans 
28811f0a49e8SJason Evans 	arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, true);
2882d0e79aa3SJason Evans }
2883d0e79aa3SJason Evans 
2884d0e79aa3SJason Evans void
28851f0a49e8SJason Evans arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr,
2886d0e79aa3SJason Evans     size_t pageind, arena_chunk_map_bits_t *bitselm)
2887e722f8f8SJason Evans {
2888e722f8f8SJason Evans 	arena_run_t *run;
2889e722f8f8SJason Evans 	arena_bin_t *bin;
2890d0e79aa3SJason Evans 	size_t rpages_ind;
2891e722f8f8SJason Evans 
2892d0e79aa3SJason Evans 	rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind);
28931f0a49e8SJason Evans 	run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run;
2894d0e79aa3SJason Evans 	bin = &arena->bins[run->binind];
28951f0a49e8SJason Evans 	malloc_mutex_lock(tsdn, &bin->lock);
28961f0a49e8SJason Evans 	arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, false);
28971f0a49e8SJason Evans 	malloc_mutex_unlock(tsdn, &bin->lock);
2898e722f8f8SJason Evans }
2899e722f8f8SJason Evans 
2900e722f8f8SJason Evans void
29011f0a49e8SJason Evans arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
29021f0a49e8SJason Evans     void *ptr, size_t pageind)
2903e722f8f8SJason Evans {
2904d0e79aa3SJason Evans 	arena_chunk_map_bits_t *bitselm;
2905e722f8f8SJason Evans 
2906e722f8f8SJason Evans 	if (config_debug) {
2907e722f8f8SJason Evans 		/* arena_ptr_small_binind_get() does extra sanity checking. */
2908e722f8f8SJason Evans 		assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
2909e722f8f8SJason Evans 		    pageind)) != BININD_INVALID);
2910e722f8f8SJason Evans 	}
29111f0a49e8SJason Evans 	bitselm = arena_bitselm_get_mutable(chunk, pageind);
29121f0a49e8SJason Evans 	arena_dalloc_bin(tsdn, arena, chunk, ptr, pageind, bitselm);
29131f0a49e8SJason Evans 	arena_decay_tick(tsdn, arena);
2914e722f8f8SJason Evans }
2915a4bd5210SJason Evans 
2916f921d10fSJason Evans #ifdef JEMALLOC_JET
2917f921d10fSJason Evans #undef arena_dalloc_junk_large
29181f0a49e8SJason Evans #define	arena_dalloc_junk_large JEMALLOC_N(n_arena_dalloc_junk_large)
2919f921d10fSJason Evans #endif
2920d0e79aa3SJason Evans void
2921f921d10fSJason Evans arena_dalloc_junk_large(void *ptr, size_t usize)
2922f921d10fSJason Evans {
2923f921d10fSJason Evans 
2924d0e79aa3SJason Evans 	if (config_fill && unlikely(opt_junk_free))
29251f0a49e8SJason Evans 		memset(ptr, JEMALLOC_FREE_JUNK, usize);
2926f921d10fSJason Evans }
2927f921d10fSJason Evans #ifdef JEMALLOC_JET
2928f921d10fSJason Evans #undef arena_dalloc_junk_large
2929f921d10fSJason Evans #define	arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
2930f921d10fSJason Evans arena_dalloc_junk_large_t *arena_dalloc_junk_large =
29311f0a49e8SJason Evans     JEMALLOC_N(n_arena_dalloc_junk_large);
2932f921d10fSJason Evans #endif
2933f921d10fSJason Evans 
2934536b3538SJason Evans static void
29351f0a49e8SJason Evans arena_dalloc_large_locked_impl(tsdn_t *tsdn, arena_t *arena,
29361f0a49e8SJason Evans     arena_chunk_t *chunk, void *ptr, bool junked)
2937a4bd5210SJason Evans {
2938d0e79aa3SJason Evans 	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
29391f0a49e8SJason Evans 	arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
29401f0a49e8SJason Evans 	    pageind);
2941d0e79aa3SJason Evans 	arena_run_t *run = &miscelm->run;
2942a4bd5210SJason Evans 
2943a4bd5210SJason Evans 	if (config_fill || config_stats) {
2944d0e79aa3SJason Evans 		size_t usize = arena_mapbits_large_size_get(chunk, pageind) -
2945d0e79aa3SJason Evans 		    large_pad;
2946a4bd5210SJason Evans 
2947d0e79aa3SJason Evans 		if (!junked)
2948f921d10fSJason Evans 			arena_dalloc_junk_large(ptr, usize);
2949a4bd5210SJason Evans 		if (config_stats) {
2950536b3538SJason Evans 			szind_t index = size2index(usize) - NBINS;
2951d0e79aa3SJason Evans 
2952a4bd5210SJason Evans 			arena->stats.ndalloc_large++;
2953f921d10fSJason Evans 			arena->stats.allocated_large -= usize;
2954d0e79aa3SJason Evans 			arena->stats.lstats[index].ndalloc++;
2955d0e79aa3SJason Evans 			arena->stats.lstats[index].curruns--;
2956a4bd5210SJason Evans 		}
2957a4bd5210SJason Evans 	}
2958a4bd5210SJason Evans 
29591f0a49e8SJason Evans 	arena_run_dalloc(tsdn, arena, run, true, false, false);
2960d0e79aa3SJason Evans }
2961d0e79aa3SJason Evans 
2962d0e79aa3SJason Evans void
29631f0a49e8SJason Evans arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena,
29641f0a49e8SJason Evans     arena_chunk_t *chunk, void *ptr)
29651f0a49e8SJason Evans {
29661f0a49e8SJason Evans 
29671f0a49e8SJason Evans 	arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, true);
29681f0a49e8SJason Evans }
29691f0a49e8SJason Evans 
29701f0a49e8SJason Evans void
29711f0a49e8SJason Evans arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
2972d0e79aa3SJason Evans     void *ptr)
2973d0e79aa3SJason Evans {
2974d0e79aa3SJason Evans 
29751f0a49e8SJason Evans 	malloc_mutex_lock(tsdn, &arena->lock);
29761f0a49e8SJason Evans 	arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, false);
29771f0a49e8SJason Evans 	malloc_mutex_unlock(tsdn, &arena->lock);
29781f0a49e8SJason Evans 	arena_decay_tick(tsdn, arena);
2979e722f8f8SJason Evans }
2980e722f8f8SJason Evans 
2981a4bd5210SJason Evans static void
29821f0a49e8SJason Evans arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
29831f0a49e8SJason Evans     void *ptr, size_t oldsize, size_t size)
2984a4bd5210SJason Evans {
2985d0e79aa3SJason Evans 	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
29861f0a49e8SJason Evans 	arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk,
29871f0a49e8SJason Evans 	    pageind);
2988d0e79aa3SJason Evans 	arena_run_t *run = &miscelm->run;
2989a4bd5210SJason Evans 
2990a4bd5210SJason Evans 	assert(size < oldsize);
2991a4bd5210SJason Evans 
2992a4bd5210SJason Evans 	/*
2993a4bd5210SJason Evans 	 * Shrink the run, and make trailing pages available for other
2994a4bd5210SJason Evans 	 * allocations.
2995a4bd5210SJason Evans 	 */
29961f0a49e8SJason Evans 	malloc_mutex_lock(tsdn, &arena->lock);
29971f0a49e8SJason Evans 	arena_run_trim_tail(tsdn, arena, chunk, run, oldsize + large_pad, size +
2998d0e79aa3SJason Evans 	    large_pad, true);
2999a4bd5210SJason Evans 	if (config_stats) {
3000536b3538SJason Evans 		szind_t oldindex = size2index(oldsize) - NBINS;
3001536b3538SJason Evans 		szind_t index = size2index(size) - NBINS;
3002d0e79aa3SJason Evans 
3003a4bd5210SJason Evans 		arena->stats.ndalloc_large++;
3004a4bd5210SJason Evans 		arena->stats.allocated_large -= oldsize;
3005d0e79aa3SJason Evans 		arena->stats.lstats[oldindex].ndalloc++;
3006d0e79aa3SJason Evans 		arena->stats.lstats[oldindex].curruns--;
3007a4bd5210SJason Evans 
3008a4bd5210SJason Evans 		arena->stats.nmalloc_large++;
3009a4bd5210SJason Evans 		arena->stats.nrequests_large++;
3010a4bd5210SJason Evans 		arena->stats.allocated_large += size;
3011d0e79aa3SJason Evans 		arena->stats.lstats[index].nmalloc++;
3012d0e79aa3SJason Evans 		arena->stats.lstats[index].nrequests++;
3013d0e79aa3SJason Evans 		arena->stats.lstats[index].curruns++;
3014a4bd5210SJason Evans 	}
30151f0a49e8SJason Evans 	malloc_mutex_unlock(tsdn, &arena->lock);
3016a4bd5210SJason Evans }
3017a4bd5210SJason Evans 
3018a4bd5210SJason Evans static bool
30191f0a49e8SJason Evans arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk,
30201f0a49e8SJason Evans     void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero)
3021a4bd5210SJason Evans {
3022a4bd5210SJason Evans 	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
3023d0e79aa3SJason Evans 	size_t npages = (oldsize + large_pad) >> LG_PAGE;
3024a4bd5210SJason Evans 	size_t followsize;
3025a4bd5210SJason Evans 
3026d0e79aa3SJason Evans 	assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) -
3027d0e79aa3SJason Evans 	    large_pad);
3028a4bd5210SJason Evans 
3029a4bd5210SJason Evans 	/* Try to extend the run. */
30301f0a49e8SJason Evans 	malloc_mutex_lock(tsdn, &arena->lock);
3031536b3538SJason Evans 	if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk,
3032536b3538SJason Evans 	    pageind+npages) != 0)
3033536b3538SJason Evans 		goto label_fail;
3034536b3538SJason Evans 	followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages);
3035536b3538SJason Evans 	if (oldsize + followsize >= usize_min) {
3036a4bd5210SJason Evans 		/*
3037a4bd5210SJason Evans 		 * The next run is available and sufficiently large.  Split the
3038a4bd5210SJason Evans 		 * following run, then merge the first part with the existing
3039a4bd5210SJason Evans 		 * allocation.
3040a4bd5210SJason Evans 		 */
3041d0e79aa3SJason Evans 		arena_run_t *run;
3042536b3538SJason Evans 		size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask;
3043d0e79aa3SJason Evans 
3044536b3538SJason Evans 		usize = usize_max;
3045d0e79aa3SJason Evans 		while (oldsize + followsize < usize)
3046d0e79aa3SJason Evans 			usize = index2size(size2index(usize)-1);
3047d0e79aa3SJason Evans 		assert(usize >= usize_min);
3048536b3538SJason Evans 		assert(usize >= oldsize);
3049d0e79aa3SJason Evans 		splitsize = usize - oldsize;
3050536b3538SJason Evans 		if (splitsize == 0)
3051536b3538SJason Evans 			goto label_fail;
3052d0e79aa3SJason Evans 
30531f0a49e8SJason Evans 		run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run;
3054536b3538SJason Evans 		if (arena_run_split_large(arena, run, splitsize, zero))
3055536b3538SJason Evans 			goto label_fail;
3056a4bd5210SJason Evans 
3057ba4f5cc0SJason Evans 		if (config_cache_oblivious && zero) {
3058ba4f5cc0SJason Evans 			/*
3059ba4f5cc0SJason Evans 			 * Zero the trailing bytes of the original allocation's
3060ba4f5cc0SJason Evans 			 * last page, since they are in an indeterminate state.
3061ba4f5cc0SJason Evans 			 * There will always be trailing bytes, because ptr's
3062ba4f5cc0SJason Evans 			 * offset from the beginning of the run is a multiple of
3063ba4f5cc0SJason Evans 			 * CACHELINE in [0 .. PAGE).
3064ba4f5cc0SJason Evans 			 */
3065ba4f5cc0SJason Evans 			void *zbase = (void *)((uintptr_t)ptr + oldsize);
3066ba4f5cc0SJason Evans 			void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase +
3067ba4f5cc0SJason Evans 			    PAGE));
3068ba4f5cc0SJason Evans 			size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase;
3069ba4f5cc0SJason Evans 			assert(nzero > 0);
3070ba4f5cc0SJason Evans 			memset(zbase, 0, nzero);
3071ba4f5cc0SJason Evans 		}
3072ba4f5cc0SJason Evans 
3073a4bd5210SJason Evans 		size = oldsize + splitsize;
3074d0e79aa3SJason Evans 		npages = (size + large_pad) >> LG_PAGE;
3075a4bd5210SJason Evans 
3076a4bd5210SJason Evans 		/*
3077a4bd5210SJason Evans 		 * Mark the extended run as dirty if either portion of the run
3078a4bd5210SJason Evans 		 * was dirty before allocation.  This is rather pedantic,
3079a4bd5210SJason Evans 		 * because there's not actually any sequence of events that
3080a4bd5210SJason Evans 		 * could cause the resulting run to be passed to
3081a4bd5210SJason Evans 		 * arena_run_dalloc() with the dirty argument set to false
3082a4bd5210SJason Evans 		 * (which is when dirty flag consistency would really matter).
3083a4bd5210SJason Evans 		 */
3084e722f8f8SJason Evans 		flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
3085e722f8f8SJason Evans 		    arena_mapbits_dirty_get(chunk, pageind+npages-1);
3086d0e79aa3SJason Evans 		flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0;
3087d0e79aa3SJason Evans 		arena_mapbits_large_set(chunk, pageind, size + large_pad,
3088d0e79aa3SJason Evans 		    flag_dirty | (flag_unzeroed_mask &
3089d0e79aa3SJason Evans 		    arena_mapbits_unzeroed_get(chunk, pageind)));
3090d0e79aa3SJason Evans 		arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty |
3091d0e79aa3SJason Evans 		    (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk,
3092d0e79aa3SJason Evans 		    pageind+npages-1)));
3093a4bd5210SJason Evans 
3094a4bd5210SJason Evans 		if (config_stats) {
3095536b3538SJason Evans 			szind_t oldindex = size2index(oldsize) - NBINS;
3096536b3538SJason Evans 			szind_t index = size2index(size) - NBINS;
3097d0e79aa3SJason Evans 
3098a4bd5210SJason Evans 			arena->stats.ndalloc_large++;
3099a4bd5210SJason Evans 			arena->stats.allocated_large -= oldsize;
3100d0e79aa3SJason Evans 			arena->stats.lstats[oldindex].ndalloc++;
3101d0e79aa3SJason Evans 			arena->stats.lstats[oldindex].curruns--;
3102a4bd5210SJason Evans 
3103a4bd5210SJason Evans 			arena->stats.nmalloc_large++;
3104a4bd5210SJason Evans 			arena->stats.nrequests_large++;
3105a4bd5210SJason Evans 			arena->stats.allocated_large += size;
3106d0e79aa3SJason Evans 			arena->stats.lstats[index].nmalloc++;
3107d0e79aa3SJason Evans 			arena->stats.lstats[index].nrequests++;
3108d0e79aa3SJason Evans 			arena->stats.lstats[index].curruns++;
3109a4bd5210SJason Evans 		}
31101f0a49e8SJason Evans 		malloc_mutex_unlock(tsdn, &arena->lock);
3111a4bd5210SJason Evans 		return (false);
3112a4bd5210SJason Evans 	}
3113536b3538SJason Evans label_fail:
31141f0a49e8SJason Evans 	malloc_mutex_unlock(tsdn, &arena->lock);
3115a4bd5210SJason Evans 	return (true);
3116a4bd5210SJason Evans }
3117a4bd5210SJason Evans 
3118f921d10fSJason Evans #ifdef JEMALLOC_JET
3119f921d10fSJason Evans #undef arena_ralloc_junk_large
31201f0a49e8SJason Evans #define	arena_ralloc_junk_large JEMALLOC_N(n_arena_ralloc_junk_large)
3121f921d10fSJason Evans #endif
3122f921d10fSJason Evans static void
3123f921d10fSJason Evans arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
3124f921d10fSJason Evans {
3125f921d10fSJason Evans 
3126d0e79aa3SJason Evans 	if (config_fill && unlikely(opt_junk_free)) {
31271f0a49e8SJason Evans 		memset((void *)((uintptr_t)ptr + usize), JEMALLOC_FREE_JUNK,
3128f921d10fSJason Evans 		    old_usize - usize);
3129f921d10fSJason Evans 	}
3130f921d10fSJason Evans }
3131f921d10fSJason Evans #ifdef JEMALLOC_JET
3132f921d10fSJason Evans #undef arena_ralloc_junk_large
3133f921d10fSJason Evans #define	arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
3134f921d10fSJason Evans arena_ralloc_junk_large_t *arena_ralloc_junk_large =
31351f0a49e8SJason Evans     JEMALLOC_N(n_arena_ralloc_junk_large);
3136f921d10fSJason Evans #endif
3137f921d10fSJason Evans 
3138a4bd5210SJason Evans /*
3139a4bd5210SJason Evans  * Try to resize a large allocation, in order to avoid copying.  This will
3140a4bd5210SJason Evans  * always fail if growing an object, and the following run is already in use.
3141a4bd5210SJason Evans  */
3142a4bd5210SJason Evans static bool
31431f0a49e8SJason Evans arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min,
3144536b3538SJason Evans     size_t usize_max, bool zero)
3145a4bd5210SJason Evans {
3146a4bd5210SJason Evans 	arena_chunk_t *chunk;
3147a4bd5210SJason Evans 	arena_t *arena;
3148a4bd5210SJason Evans 
3149536b3538SJason Evans 	if (oldsize == usize_max) {
3150536b3538SJason Evans 		/* Current size class is compatible and maximal. */
3151536b3538SJason Evans 		return (false);
3152536b3538SJason Evans 	}
3153536b3538SJason Evans 
3154a4bd5210SJason Evans 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
3155d0e79aa3SJason Evans 	arena = extent_node_arena_get(&chunk->node);
3156a4bd5210SJason Evans 
3157536b3538SJason Evans 	if (oldsize < usize_max) {
31581f0a49e8SJason Evans 		bool ret = arena_ralloc_large_grow(tsdn, arena, chunk, ptr,
31591f0a49e8SJason Evans 		    oldsize, usize_min, usize_max, zero);
3160d0e79aa3SJason Evans 		if (config_fill && !ret && !zero) {
3161d0e79aa3SJason Evans 			if (unlikely(opt_junk_alloc)) {
31621f0a49e8SJason Evans 				memset((void *)((uintptr_t)ptr + oldsize),
31631f0a49e8SJason Evans 				    JEMALLOC_ALLOC_JUNK,
31641f0a49e8SJason Evans 				    isalloc(tsdn, ptr, config_prof) - oldsize);
3165d0e79aa3SJason Evans 			} else if (unlikely(opt_zero)) {
3166536b3538SJason Evans 				memset((void *)((uintptr_t)ptr + oldsize), 0,
31671f0a49e8SJason Evans 				    isalloc(tsdn, ptr, config_prof) - oldsize);
3168f921d10fSJason Evans 			}
3169a4bd5210SJason Evans 		}
3170a4bd5210SJason Evans 		return (ret);
3171a4bd5210SJason Evans 	}
3172536b3538SJason Evans 
3173536b3538SJason Evans 	assert(oldsize > usize_max);
3174536b3538SJason Evans 	/* Fill before shrinking in order avoid a race. */
3175536b3538SJason Evans 	arena_ralloc_junk_large(ptr, oldsize, usize_max);
31761f0a49e8SJason Evans 	arena_ralloc_large_shrink(tsdn, arena, chunk, ptr, oldsize, usize_max);
3177536b3538SJason Evans 	return (false);
3178a4bd5210SJason Evans }
3179a4bd5210SJason Evans 
3180f921d10fSJason Evans bool
31811f0a49e8SJason Evans arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size,
3182df0d881dSJason Evans     size_t extra, bool zero)
3183a4bd5210SJason Evans {
3184536b3538SJason Evans 	size_t usize_min, usize_max;
3185a4bd5210SJason Evans 
3186df0d881dSJason Evans 	/* Calls with non-zero extra had to clamp extra. */
3187df0d881dSJason Evans 	assert(extra == 0 || size + extra <= HUGE_MAXCLASS);
3188df0d881dSJason Evans 
3189df0d881dSJason Evans 	if (unlikely(size > HUGE_MAXCLASS))
3190df0d881dSJason Evans 		return (true);
3191df0d881dSJason Evans 
3192536b3538SJason Evans 	usize_min = s2u(size);
3193536b3538SJason Evans 	usize_max = s2u(size + extra);
3194536b3538SJason Evans 	if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) {
3195df0d881dSJason Evans 		arena_chunk_t *chunk;
3196df0d881dSJason Evans 
3197a4bd5210SJason Evans 		/*
3198d0e79aa3SJason Evans 		 * Avoid moving the allocation if the size class can be left the
3199d0e79aa3SJason Evans 		 * same.
3200a4bd5210SJason Evans 		 */
3201a4bd5210SJason Evans 		if (oldsize <= SMALL_MAXCLASS) {
3202536b3538SJason Evans 			assert(arena_bin_info[size2index(oldsize)].reg_size ==
3203536b3538SJason Evans 			    oldsize);
3204df0d881dSJason Evans 			if ((usize_max > SMALL_MAXCLASS ||
3205df0d881dSJason Evans 			    size2index(usize_max) != size2index(oldsize)) &&
3206df0d881dSJason Evans 			    (size > oldsize || usize_max < oldsize))
3207f921d10fSJason Evans 				return (true);
3208536b3538SJason Evans 		} else {
3209df0d881dSJason Evans 			if (usize_max <= SMALL_MAXCLASS)
3210df0d881dSJason Evans 				return (true);
32111f0a49e8SJason Evans 			if (arena_ralloc_large(tsdn, ptr, oldsize, usize_min,
3212df0d881dSJason Evans 			    usize_max, zero))
3213df0d881dSJason Evans 				return (true);
3214df0d881dSJason Evans 		}
3215df0d881dSJason Evans 
3216df0d881dSJason Evans 		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
32171f0a49e8SJason Evans 		arena_decay_tick(tsdn, extent_node_arena_get(&chunk->node));
3218df0d881dSJason Evans 		return (false);
3219df0d881dSJason Evans 	} else {
32201f0a49e8SJason Evans 		return (huge_ralloc_no_move(tsdn, ptr, oldsize, usize_min,
3221df0d881dSJason Evans 		    usize_max, zero));
3222536b3538SJason Evans 	}
3223536b3538SJason Evans }
3224536b3538SJason Evans 
3225536b3538SJason Evans static void *
32261f0a49e8SJason Evans arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize,
3227536b3538SJason Evans     size_t alignment, bool zero, tcache_t *tcache)
3228536b3538SJason Evans {
3229536b3538SJason Evans 
3230536b3538SJason Evans 	if (alignment == 0)
32311f0a49e8SJason Evans 		return (arena_malloc(tsdn, arena, usize, size2index(usize),
32321f0a49e8SJason Evans 		    zero, tcache, true));
3233536b3538SJason Evans 	usize = sa2u(usize, alignment);
3234df0d881dSJason Evans 	if (unlikely(usize == 0 || usize > HUGE_MAXCLASS))
3235536b3538SJason Evans 		return (NULL);
32361f0a49e8SJason Evans 	return (ipalloct(tsdn, usize, alignment, zero, tcache, arena));
3237a4bd5210SJason Evans }
3238a4bd5210SJason Evans 
3239a4bd5210SJason Evans void *
3240d0e79aa3SJason Evans arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size,
3241536b3538SJason Evans     size_t alignment, bool zero, tcache_t *tcache)
3242a4bd5210SJason Evans {
3243a4bd5210SJason Evans 	void *ret;
3244536b3538SJason Evans 	size_t usize;
3245d0e79aa3SJason Evans 
3246536b3538SJason Evans 	usize = s2u(size);
3247df0d881dSJason Evans 	if (unlikely(usize == 0 || size > HUGE_MAXCLASS))
3248536b3538SJason Evans 		return (NULL);
3249536b3538SJason Evans 
3250536b3538SJason Evans 	if (likely(usize <= large_maxclass)) {
3251a4bd5210SJason Evans 		size_t copysize;
3252a4bd5210SJason Evans 
3253a4bd5210SJason Evans 		/* Try to avoid moving the allocation. */
32541f0a49e8SJason Evans 		if (!arena_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, 0,
32551f0a49e8SJason Evans 		    zero))
3256f921d10fSJason Evans 			return (ptr);
3257a4bd5210SJason Evans 
3258a4bd5210SJason Evans 		/*
3259d0e79aa3SJason Evans 		 * size and oldsize are different enough that we need to move
3260d0e79aa3SJason Evans 		 * the object.  In that case, fall back to allocating new space
3261d0e79aa3SJason Evans 		 * and copying.
3262a4bd5210SJason Evans 		 */
32631f0a49e8SJason Evans 		ret = arena_ralloc_move_helper(tsd_tsdn(tsd), arena, usize,
32641f0a49e8SJason Evans 		    alignment, zero, tcache);
3265a4bd5210SJason Evans 		if (ret == NULL)
3266a4bd5210SJason Evans 			return (NULL);
3267a4bd5210SJason Evans 
3268d0e79aa3SJason Evans 		/*
3269d0e79aa3SJason Evans 		 * Junk/zero-filling were already done by
3270d0e79aa3SJason Evans 		 * ipalloc()/arena_malloc().
3271d0e79aa3SJason Evans 		 */
3272a4bd5210SJason Evans 
3273536b3538SJason Evans 		copysize = (usize < oldsize) ? usize : oldsize;
3274d0e79aa3SJason Evans 		JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
3275a4bd5210SJason Evans 		memcpy(ret, ptr, copysize);
32761f0a49e8SJason Evans 		isqalloc(tsd, ptr, oldsize, tcache, true);
3277d0e79aa3SJason Evans 	} else {
3278536b3538SJason Evans 		ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment,
3279536b3538SJason Evans 		    zero, tcache);
3280d0e79aa3SJason Evans 	}
3281a4bd5210SJason Evans 	return (ret);
3282a4bd5210SJason Evans }
3283a4bd5210SJason Evans 
328482872ac0SJason Evans dss_prec_t
32851f0a49e8SJason Evans arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena)
328682872ac0SJason Evans {
328782872ac0SJason Evans 	dss_prec_t ret;
328882872ac0SJason Evans 
32891f0a49e8SJason Evans 	malloc_mutex_lock(tsdn, &arena->lock);
329082872ac0SJason Evans 	ret = arena->dss_prec;
32911f0a49e8SJason Evans 	malloc_mutex_unlock(tsdn, &arena->lock);
329282872ac0SJason Evans 	return (ret);
329382872ac0SJason Evans }
329482872ac0SJason Evans 
3295d0e79aa3SJason Evans bool
32961f0a49e8SJason Evans arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec)
329782872ac0SJason Evans {
329882872ac0SJason Evans 
3299d0e79aa3SJason Evans 	if (!have_dss)
3300d0e79aa3SJason Evans 		return (dss_prec != dss_prec_disabled);
33011f0a49e8SJason Evans 	malloc_mutex_lock(tsdn, &arena->lock);
330282872ac0SJason Evans 	arena->dss_prec = dss_prec;
33031f0a49e8SJason Evans 	malloc_mutex_unlock(tsdn, &arena->lock);
3304d0e79aa3SJason Evans 	return (false);
3305d0e79aa3SJason Evans }
3306d0e79aa3SJason Evans 
3307d0e79aa3SJason Evans ssize_t
3308d0e79aa3SJason Evans arena_lg_dirty_mult_default_get(void)
3309d0e79aa3SJason Evans {
3310d0e79aa3SJason Evans 
3311d0e79aa3SJason Evans 	return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default));
3312d0e79aa3SJason Evans }
3313d0e79aa3SJason Evans 
3314d0e79aa3SJason Evans bool
3315d0e79aa3SJason Evans arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult)
3316d0e79aa3SJason Evans {
3317d0e79aa3SJason Evans 
3318df0d881dSJason Evans 	if (opt_purge != purge_mode_ratio)
3319df0d881dSJason Evans 		return (true);
3320d0e79aa3SJason Evans 	if (!arena_lg_dirty_mult_valid(lg_dirty_mult))
3321d0e79aa3SJason Evans 		return (true);
3322d0e79aa3SJason Evans 	atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult);
3323d0e79aa3SJason Evans 	return (false);
332482872ac0SJason Evans }
332582872ac0SJason Evans 
3326df0d881dSJason Evans ssize_t
3327df0d881dSJason Evans arena_decay_time_default_get(void)
3328df0d881dSJason Evans {
3329df0d881dSJason Evans 
3330df0d881dSJason Evans 	return ((ssize_t)atomic_read_z((size_t *)&decay_time_default));
3331df0d881dSJason Evans }
3332df0d881dSJason Evans 
3333df0d881dSJason Evans bool
3334df0d881dSJason Evans arena_decay_time_default_set(ssize_t decay_time)
3335df0d881dSJason Evans {
3336df0d881dSJason Evans 
3337df0d881dSJason Evans 	if (opt_purge != purge_mode_decay)
3338df0d881dSJason Evans 		return (true);
3339df0d881dSJason Evans 	if (!arena_decay_time_valid(decay_time))
3340df0d881dSJason Evans 		return (true);
3341df0d881dSJason Evans 	atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time);
3342df0d881dSJason Evans 	return (false);
3343df0d881dSJason Evans }
3344df0d881dSJason Evans 
3345df0d881dSJason Evans static void
3346df0d881dSJason Evans arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads,
3347df0d881dSJason Evans     const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
3348df0d881dSJason Evans     size_t *nactive, size_t *ndirty)
3349df0d881dSJason Evans {
3350df0d881dSJason Evans 
33511f0a49e8SJason Evans 	*nthreads += arena_nthreads_get(arena, false);
3352df0d881dSJason Evans 	*dss = dss_prec_names[arena->dss_prec];
3353df0d881dSJason Evans 	*lg_dirty_mult = arena->lg_dirty_mult;
3354*bde95144SJason Evans 	*decay_time = arena->decay.time;
3355df0d881dSJason Evans 	*nactive += arena->nactive;
3356df0d881dSJason Evans 	*ndirty += arena->ndirty;
3357df0d881dSJason Evans }
3358df0d881dSJason Evans 
335982872ac0SJason Evans void
33601f0a49e8SJason Evans arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
33611f0a49e8SJason Evans     const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
33621f0a49e8SJason Evans     size_t *nactive, size_t *ndirty)
3363df0d881dSJason Evans {
3364df0d881dSJason Evans 
33651f0a49e8SJason Evans 	malloc_mutex_lock(tsdn, &arena->lock);
3366df0d881dSJason Evans 	arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
3367df0d881dSJason Evans 	    decay_time, nactive, ndirty);
33681f0a49e8SJason Evans 	malloc_mutex_unlock(tsdn, &arena->lock);
3369df0d881dSJason Evans }
3370df0d881dSJason Evans 
3371df0d881dSJason Evans void
33721f0a49e8SJason Evans arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads,
33731f0a49e8SJason Evans     const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time,
33741f0a49e8SJason Evans     size_t *nactive, size_t *ndirty, arena_stats_t *astats,
33751f0a49e8SJason Evans     malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats,
33761f0a49e8SJason Evans     malloc_huge_stats_t *hstats)
337782872ac0SJason Evans {
337882872ac0SJason Evans 	unsigned i;
337982872ac0SJason Evans 
3380df0d881dSJason Evans 	cassert(config_stats);
3381df0d881dSJason Evans 
33821f0a49e8SJason Evans 	malloc_mutex_lock(tsdn, &arena->lock);
3383df0d881dSJason Evans 	arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult,
3384df0d881dSJason Evans 	    decay_time, nactive, ndirty);
338582872ac0SJason Evans 
338682872ac0SJason Evans 	astats->mapped += arena->stats.mapped;
33871f0a49e8SJason Evans 	astats->retained += arena->stats.retained;
338882872ac0SJason Evans 	astats->npurge += arena->stats.npurge;
338982872ac0SJason Evans 	astats->nmadvise += arena->stats.nmadvise;
339082872ac0SJason Evans 	astats->purged += arena->stats.purged;
3391d0e79aa3SJason Evans 	astats->metadata_mapped += arena->stats.metadata_mapped;
3392d0e79aa3SJason Evans 	astats->metadata_allocated += arena_metadata_allocated_get(arena);
339382872ac0SJason Evans 	astats->allocated_large += arena->stats.allocated_large;
339482872ac0SJason Evans 	astats->nmalloc_large += arena->stats.nmalloc_large;
339582872ac0SJason Evans 	astats->ndalloc_large += arena->stats.ndalloc_large;
339682872ac0SJason Evans 	astats->nrequests_large += arena->stats.nrequests_large;
3397d0e79aa3SJason Evans 	astats->allocated_huge += arena->stats.allocated_huge;
3398d0e79aa3SJason Evans 	astats->nmalloc_huge += arena->stats.nmalloc_huge;
3399d0e79aa3SJason Evans 	astats->ndalloc_huge += arena->stats.ndalloc_huge;
340082872ac0SJason Evans 
340182872ac0SJason Evans 	for (i = 0; i < nlclasses; i++) {
340282872ac0SJason Evans 		lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
340382872ac0SJason Evans 		lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
340482872ac0SJason Evans 		lstats[i].nrequests += arena->stats.lstats[i].nrequests;
340582872ac0SJason Evans 		lstats[i].curruns += arena->stats.lstats[i].curruns;
340682872ac0SJason Evans 	}
3407d0e79aa3SJason Evans 
3408d0e79aa3SJason Evans 	for (i = 0; i < nhclasses; i++) {
3409d0e79aa3SJason Evans 		hstats[i].nmalloc += arena->stats.hstats[i].nmalloc;
3410d0e79aa3SJason Evans 		hstats[i].ndalloc += arena->stats.hstats[i].ndalloc;
3411d0e79aa3SJason Evans 		hstats[i].curhchunks += arena->stats.hstats[i].curhchunks;
3412d0e79aa3SJason Evans 	}
34131f0a49e8SJason Evans 	malloc_mutex_unlock(tsdn, &arena->lock);
341482872ac0SJason Evans 
341582872ac0SJason Evans 	for (i = 0; i < NBINS; i++) {
341682872ac0SJason Evans 		arena_bin_t *bin = &arena->bins[i];
341782872ac0SJason Evans 
34181f0a49e8SJason Evans 		malloc_mutex_lock(tsdn, &bin->lock);
341982872ac0SJason Evans 		bstats[i].nmalloc += bin->stats.nmalloc;
342082872ac0SJason Evans 		bstats[i].ndalloc += bin->stats.ndalloc;
342182872ac0SJason Evans 		bstats[i].nrequests += bin->stats.nrequests;
3422d0e79aa3SJason Evans 		bstats[i].curregs += bin->stats.curregs;
342382872ac0SJason Evans 		if (config_tcache) {
342482872ac0SJason Evans 			bstats[i].nfills += bin->stats.nfills;
342582872ac0SJason Evans 			bstats[i].nflushes += bin->stats.nflushes;
342682872ac0SJason Evans 		}
342782872ac0SJason Evans 		bstats[i].nruns += bin->stats.nruns;
342882872ac0SJason Evans 		bstats[i].reruns += bin->stats.reruns;
342982872ac0SJason Evans 		bstats[i].curruns += bin->stats.curruns;
34301f0a49e8SJason Evans 		malloc_mutex_unlock(tsdn, &bin->lock);
343182872ac0SJason Evans 	}
343282872ac0SJason Evans }
343382872ac0SJason Evans 
3434df0d881dSJason Evans unsigned
34351f0a49e8SJason Evans arena_nthreads_get(arena_t *arena, bool internal)
3436df0d881dSJason Evans {
3437df0d881dSJason Evans 
34381f0a49e8SJason Evans 	return (atomic_read_u(&arena->nthreads[internal]));
3439df0d881dSJason Evans }
3440df0d881dSJason Evans 
3441df0d881dSJason Evans void
34421f0a49e8SJason Evans arena_nthreads_inc(arena_t *arena, bool internal)
3443df0d881dSJason Evans {
3444df0d881dSJason Evans 
34451f0a49e8SJason Evans 	atomic_add_u(&arena->nthreads[internal], 1);
3446df0d881dSJason Evans }
3447df0d881dSJason Evans 
3448df0d881dSJason Evans void
34491f0a49e8SJason Evans arena_nthreads_dec(arena_t *arena, bool internal)
3450df0d881dSJason Evans {
3451df0d881dSJason Evans 
34521f0a49e8SJason Evans 	atomic_sub_u(&arena->nthreads[internal], 1);
3453df0d881dSJason Evans }
3454df0d881dSJason Evans 
3455d0e79aa3SJason Evans arena_t *
34561f0a49e8SJason Evans arena_new(tsdn_t *tsdn, unsigned ind)
3457a4bd5210SJason Evans {
3458d0e79aa3SJason Evans 	arena_t *arena;
3459a4bd5210SJason Evans 	unsigned i;
3460a4bd5210SJason Evans 
3461d0e79aa3SJason Evans 	/*
3462d0e79aa3SJason Evans 	 * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly
3463d0e79aa3SJason Evans 	 * because there is no way to clean up if base_alloc() OOMs.
3464d0e79aa3SJason Evans 	 */
3465d0e79aa3SJason Evans 	if (config_stats) {
34661f0a49e8SJason Evans 		arena = (arena_t *)base_alloc(tsdn,
3467*bde95144SJason Evans 		    CACHELINE_CEILING(sizeof(arena_t)) +
3468*bde95144SJason Evans 		    QUANTUM_CEILING((nlclasses * sizeof(malloc_large_stats_t)))
3469*bde95144SJason Evans 		    + (nhclasses * sizeof(malloc_huge_stats_t)));
3470d0e79aa3SJason Evans 	} else
3471*bde95144SJason Evans 		arena = (arena_t *)base_alloc(tsdn, sizeof(arena_t));
3472d0e79aa3SJason Evans 	if (arena == NULL)
3473d0e79aa3SJason Evans 		return (NULL);
3474d0e79aa3SJason Evans 
3475a4bd5210SJason Evans 	arena->ind = ind;
34761f0a49e8SJason Evans 	arena->nthreads[0] = arena->nthreads[1] = 0;
34771f0a49e8SJason Evans 	if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA))
3478d0e79aa3SJason Evans 		return (NULL);
3479a4bd5210SJason Evans 
3480a4bd5210SJason Evans 	if (config_stats) {
3481a4bd5210SJason Evans 		memset(&arena->stats, 0, sizeof(arena_stats_t));
3482d0e79aa3SJason Evans 		arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena
3483*bde95144SJason Evans 		    + CACHELINE_CEILING(sizeof(arena_t)));
3484a4bd5210SJason Evans 		memset(arena->stats.lstats, 0, nlclasses *
3485a4bd5210SJason Evans 		    sizeof(malloc_large_stats_t));
3486d0e79aa3SJason Evans 		arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena
3487*bde95144SJason Evans 		    + CACHELINE_CEILING(sizeof(arena_t)) +
3488d0e79aa3SJason Evans 		    QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t)));
3489d0e79aa3SJason Evans 		memset(arena->stats.hstats, 0, nhclasses *
3490d0e79aa3SJason Evans 		    sizeof(malloc_huge_stats_t));
3491a4bd5210SJason Evans 		if (config_tcache)
3492a4bd5210SJason Evans 			ql_new(&arena->tcache_ql);
3493a4bd5210SJason Evans 	}
3494a4bd5210SJason Evans 
3495a4bd5210SJason Evans 	if (config_prof)
3496a4bd5210SJason Evans 		arena->prof_accumbytes = 0;
3497a4bd5210SJason Evans 
3498d0e79aa3SJason Evans 	if (config_cache_oblivious) {
3499d0e79aa3SJason Evans 		/*
3500d0e79aa3SJason Evans 		 * A nondeterministic seed based on the address of arena reduces
3501d0e79aa3SJason Evans 		 * the likelihood of lockstep non-uniform cache index
3502d0e79aa3SJason Evans 		 * utilization among identical concurrent processes, but at the
3503d0e79aa3SJason Evans 		 * cost of test repeatability.  For debug builds, instead use a
3504d0e79aa3SJason Evans 		 * deterministic seed.
3505d0e79aa3SJason Evans 		 */
3506d0e79aa3SJason Evans 		arena->offset_state = config_debug ? ind :
3507*bde95144SJason Evans 		    (size_t)(uintptr_t)arena;
3508d0e79aa3SJason Evans 	}
3509d0e79aa3SJason Evans 
3510*bde95144SJason Evans 	arena->dss_prec = chunk_dss_prec_get();
35111f0a49e8SJason Evans 
35121f0a49e8SJason Evans 	ql_new(&arena->achunks);
351382872ac0SJason Evans 
3514a4bd5210SJason Evans 	arena->spare = NULL;
3515a4bd5210SJason Evans 
3516d0e79aa3SJason Evans 	arena->lg_dirty_mult = arena_lg_dirty_mult_default_get();
3517d0e79aa3SJason Evans 	arena->purging = false;
3518a4bd5210SJason Evans 	arena->nactive = 0;
3519a4bd5210SJason Evans 	arena->ndirty = 0;
3520a4bd5210SJason Evans 
3521*bde95144SJason Evans 	for (i = 0; i < NPSIZES; i++)
35221f0a49e8SJason Evans 		arena_run_heap_new(&arena->runs_avail[i]);
3523*bde95144SJason Evans 
3524d0e79aa3SJason Evans 	qr_new(&arena->runs_dirty, rd_link);
3525d0e79aa3SJason Evans 	qr_new(&arena->chunks_cache, cc_link);
3526d0e79aa3SJason Evans 
3527df0d881dSJason Evans 	if (opt_purge == purge_mode_decay)
3528df0d881dSJason Evans 		arena_decay_init(arena, arena_decay_time_default_get());
3529df0d881dSJason Evans 
3530d0e79aa3SJason Evans 	ql_new(&arena->huge);
35311f0a49e8SJason Evans 	if (malloc_mutex_init(&arena->huge_mtx, "arena_huge",
35321f0a49e8SJason Evans 	    WITNESS_RANK_ARENA_HUGE))
3533d0e79aa3SJason Evans 		return (NULL);
3534d0e79aa3SJason Evans 
3535d0e79aa3SJason Evans 	extent_tree_szad_new(&arena->chunks_szad_cached);
3536d0e79aa3SJason Evans 	extent_tree_ad_new(&arena->chunks_ad_cached);
3537d0e79aa3SJason Evans 	extent_tree_szad_new(&arena->chunks_szad_retained);
3538d0e79aa3SJason Evans 	extent_tree_ad_new(&arena->chunks_ad_retained);
35391f0a49e8SJason Evans 	if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks",
35401f0a49e8SJason Evans 	    WITNESS_RANK_ARENA_CHUNKS))
3541d0e79aa3SJason Evans 		return (NULL);
3542d0e79aa3SJason Evans 	ql_new(&arena->node_cache);
35431f0a49e8SJason Evans 	if (malloc_mutex_init(&arena->node_cache_mtx, "arena_node_cache",
35441f0a49e8SJason Evans 	    WITNESS_RANK_ARENA_NODE_CACHE))
3545d0e79aa3SJason Evans 		return (NULL);
3546d0e79aa3SJason Evans 
3547d0e79aa3SJason Evans 	arena->chunk_hooks = chunk_hooks_default;
3548a4bd5210SJason Evans 
3549a4bd5210SJason Evans 	/* Initialize bins. */
3550a4bd5210SJason Evans 	for (i = 0; i < NBINS; i++) {
35511f0a49e8SJason Evans 		arena_bin_t *bin = &arena->bins[i];
35521f0a49e8SJason Evans 		if (malloc_mutex_init(&bin->lock, "arena_bin",
35531f0a49e8SJason Evans 		    WITNESS_RANK_ARENA_BIN))
3554d0e79aa3SJason Evans 			return (NULL);
3555a4bd5210SJason Evans 		bin->runcur = NULL;
35561f0a49e8SJason Evans 		arena_run_heap_new(&bin->runs);
3557a4bd5210SJason Evans 		if (config_stats)
3558a4bd5210SJason Evans 			memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
3559a4bd5210SJason Evans 	}
3560a4bd5210SJason Evans 
3561d0e79aa3SJason Evans 	return (arena);
3562a4bd5210SJason Evans }
3563a4bd5210SJason Evans 
3564a4bd5210SJason Evans /*
3565a4bd5210SJason Evans  * Calculate bin_info->run_size such that it meets the following constraints:
3566a4bd5210SJason Evans  *
3567d0e79aa3SJason Evans  *   *) bin_info->run_size <= arena_maxrun
3568a4bd5210SJason Evans  *   *) bin_info->nregs <= RUN_MAXREGS
3569a4bd5210SJason Evans  *
3570d0e79aa3SJason Evans  * bin_info->nregs and bin_info->reg0_offset are also calculated here, since
3571d0e79aa3SJason Evans  * these settings are all interdependent.
3572a4bd5210SJason Evans  */
3573d0e79aa3SJason Evans static void
3574d0e79aa3SJason Evans bin_info_run_size_calc(arena_bin_info_t *bin_info)
3575a4bd5210SJason Evans {
3576a4bd5210SJason Evans 	size_t pad_size;
3577d0e79aa3SJason Evans 	size_t try_run_size, perfect_run_size, actual_run_size;
3578d0e79aa3SJason Evans 	uint32_t try_nregs, perfect_nregs, actual_nregs;
3579a4bd5210SJason Evans 
3580a4bd5210SJason Evans 	/*
3581a4bd5210SJason Evans 	 * Determine redzone size based on minimum alignment and minimum
3582a4bd5210SJason Evans 	 * redzone size.  Add padding to the end of the run if it is needed to
3583a4bd5210SJason Evans 	 * align the regions.  The padding allows each redzone to be half the
3584a4bd5210SJason Evans 	 * minimum alignment; without the padding, each redzone would have to
3585a4bd5210SJason Evans 	 * be twice as large in order to maintain alignment.
3586a4bd5210SJason Evans 	 */
3587d0e79aa3SJason Evans 	if (config_fill && unlikely(opt_redzone)) {
3588df0d881dSJason Evans 		size_t align_min = ZU(1) << (ffs_zu(bin_info->reg_size) - 1);
3589a4bd5210SJason Evans 		if (align_min <= REDZONE_MINSIZE) {
3590a4bd5210SJason Evans 			bin_info->redzone_size = REDZONE_MINSIZE;
3591a4bd5210SJason Evans 			pad_size = 0;
3592a4bd5210SJason Evans 		} else {
3593a4bd5210SJason Evans 			bin_info->redzone_size = align_min >> 1;
3594a4bd5210SJason Evans 			pad_size = bin_info->redzone_size;
3595a4bd5210SJason Evans 		}
3596a4bd5210SJason Evans 	} else {
3597a4bd5210SJason Evans 		bin_info->redzone_size = 0;
3598a4bd5210SJason Evans 		pad_size = 0;
3599a4bd5210SJason Evans 	}
3600a4bd5210SJason Evans 	bin_info->reg_interval = bin_info->reg_size +
3601a4bd5210SJason Evans 	    (bin_info->redzone_size << 1);
3602a4bd5210SJason Evans 
3603a4bd5210SJason Evans 	/*
3604d0e79aa3SJason Evans 	 * Compute run size under ideal conditions (no redzones, no limit on run
3605d0e79aa3SJason Evans 	 * size).
3606a4bd5210SJason Evans 	 */
3607d0e79aa3SJason Evans 	try_run_size = PAGE;
3608df0d881dSJason Evans 	try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
3609a4bd5210SJason Evans 	do {
3610d0e79aa3SJason Evans 		perfect_run_size = try_run_size;
3611d0e79aa3SJason Evans 		perfect_nregs = try_nregs;
3612a4bd5210SJason Evans 
3613a4bd5210SJason Evans 		try_run_size += PAGE;
3614df0d881dSJason Evans 		try_nregs = (uint32_t)(try_run_size / bin_info->reg_size);
3615d0e79aa3SJason Evans 	} while (perfect_run_size != perfect_nregs * bin_info->reg_size);
3616d0e79aa3SJason Evans 	assert(perfect_nregs <= RUN_MAXREGS);
3617a4bd5210SJason Evans 
3618d0e79aa3SJason Evans 	actual_run_size = perfect_run_size;
3619df0d881dSJason Evans 	actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3620df0d881dSJason Evans 	    bin_info->reg_interval);
3621d0e79aa3SJason Evans 
3622d0e79aa3SJason Evans 	/*
3623d0e79aa3SJason Evans 	 * Redzones can require enough padding that not even a single region can
3624d0e79aa3SJason Evans 	 * fit within the number of pages that would normally be dedicated to a
3625d0e79aa3SJason Evans 	 * run for this size class.  Increase the run size until at least one
3626d0e79aa3SJason Evans 	 * region fits.
3627d0e79aa3SJason Evans 	 */
3628d0e79aa3SJason Evans 	while (actual_nregs == 0) {
3629d0e79aa3SJason Evans 		assert(config_fill && unlikely(opt_redzone));
3630d0e79aa3SJason Evans 
3631d0e79aa3SJason Evans 		actual_run_size += PAGE;
3632df0d881dSJason Evans 		actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3633df0d881dSJason Evans 		    bin_info->reg_interval);
3634d0e79aa3SJason Evans 	}
3635d0e79aa3SJason Evans 
3636d0e79aa3SJason Evans 	/*
3637d0e79aa3SJason Evans 	 * Make sure that the run will fit within an arena chunk.
3638d0e79aa3SJason Evans 	 */
3639d0e79aa3SJason Evans 	while (actual_run_size > arena_maxrun) {
3640d0e79aa3SJason Evans 		actual_run_size -= PAGE;
3641df0d881dSJason Evans 		actual_nregs = (uint32_t)((actual_run_size - pad_size) /
3642df0d881dSJason Evans 		    bin_info->reg_interval);
3643d0e79aa3SJason Evans 	}
3644d0e79aa3SJason Evans 	assert(actual_nregs > 0);
3645d0e79aa3SJason Evans 	assert(actual_run_size == s2u(actual_run_size));
3646a4bd5210SJason Evans 
3647a4bd5210SJason Evans 	/* Copy final settings. */
3648d0e79aa3SJason Evans 	bin_info->run_size = actual_run_size;
3649d0e79aa3SJason Evans 	bin_info->nregs = actual_nregs;
3650df0d881dSJason Evans 	bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs *
3651df0d881dSJason Evans 	    bin_info->reg_interval) - pad_size + bin_info->redzone_size);
3652d0e79aa3SJason Evans 
3653a4bd5210SJason Evans 	assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
3654a4bd5210SJason Evans 	    * bin_info->reg_interval) + pad_size == bin_info->run_size);
3655a4bd5210SJason Evans }
3656a4bd5210SJason Evans 
3657a4bd5210SJason Evans static void
3658a4bd5210SJason Evans bin_info_init(void)
3659a4bd5210SJason Evans {
3660a4bd5210SJason Evans 	arena_bin_info_t *bin_info;
3661a4bd5210SJason Evans 
3662d0e79aa3SJason Evans #define	BIN_INFO_INIT_bin_yes(index, size)				\
3663d0e79aa3SJason Evans 	bin_info = &arena_bin_info[index];				\
3664a4bd5210SJason Evans 	bin_info->reg_size = size;					\
3665d0e79aa3SJason Evans 	bin_info_run_size_calc(bin_info);				\
3666a4bd5210SJason Evans 	bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
3667d0e79aa3SJason Evans #define	BIN_INFO_INIT_bin_no(index, size)
3668*bde95144SJason Evans #define	SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup)	\
3669d0e79aa3SJason Evans 	BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))
3670a4bd5210SJason Evans 	SIZE_CLASSES
3671d0e79aa3SJason Evans #undef BIN_INFO_INIT_bin_yes
3672d0e79aa3SJason Evans #undef BIN_INFO_INIT_bin_no
3673d0e79aa3SJason Evans #undef SC
3674a4bd5210SJason Evans }
3675a4bd5210SJason Evans 
3676*bde95144SJason Evans void
3677a4bd5210SJason Evans arena_boot(void)
3678a4bd5210SJason Evans {
3679a4bd5210SJason Evans 	unsigned i;
3680a4bd5210SJason Evans 
3681d0e79aa3SJason Evans 	arena_lg_dirty_mult_default_set(opt_lg_dirty_mult);
3682df0d881dSJason Evans 	arena_decay_time_default_set(opt_decay_time);
3683d0e79aa3SJason Evans 
3684a4bd5210SJason Evans 	/*
3685a4bd5210SJason Evans 	 * Compute the header size such that it is large enough to contain the
3686a4bd5210SJason Evans 	 * page map.  The page map is biased to omit entries for the header
3687a4bd5210SJason Evans 	 * itself, so some iteration is necessary to compute the map bias.
3688a4bd5210SJason Evans 	 *
3689a4bd5210SJason Evans 	 * 1) Compute safe header_size and map_bias values that include enough
3690a4bd5210SJason Evans 	 *    space for an unbiased page map.
3691a4bd5210SJason Evans 	 * 2) Refine map_bias based on (1) to omit the header pages in the page
3692a4bd5210SJason Evans 	 *    map.  The resulting map_bias may be one too small.
3693a4bd5210SJason Evans 	 * 3) Refine map_bias based on (2).  The result will be >= the result
3694a4bd5210SJason Evans 	 *    from (2), and will always be correct.
3695a4bd5210SJason Evans 	 */
3696a4bd5210SJason Evans 	map_bias = 0;
3697a4bd5210SJason Evans 	for (i = 0; i < 3; i++) {
3698536b3538SJason Evans 		size_t header_size = offsetof(arena_chunk_t, map_bits) +
3699d0e79aa3SJason Evans 		    ((sizeof(arena_chunk_map_bits_t) +
3700d0e79aa3SJason Evans 		    sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias));
3701d0e79aa3SJason Evans 		map_bias = (header_size + PAGE_MASK) >> LG_PAGE;
3702a4bd5210SJason Evans 	}
3703a4bd5210SJason Evans 	assert(map_bias > 0);
3704a4bd5210SJason Evans 
3705d0e79aa3SJason Evans 	map_misc_offset = offsetof(arena_chunk_t, map_bits) +
3706d0e79aa3SJason Evans 	    sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias);
3707d0e79aa3SJason Evans 
3708d0e79aa3SJason Evans 	arena_maxrun = chunksize - (map_bias << LG_PAGE);
3709d0e79aa3SJason Evans 	assert(arena_maxrun > 0);
3710536b3538SJason Evans 	large_maxclass = index2size(size2index(chunksize)-1);
3711536b3538SJason Evans 	if (large_maxclass > arena_maxrun) {
3712d0e79aa3SJason Evans 		/*
3713d0e79aa3SJason Evans 		 * For small chunk sizes it's possible for there to be fewer
3714d0e79aa3SJason Evans 		 * non-header pages available than are necessary to serve the
3715d0e79aa3SJason Evans 		 * size classes just below chunksize.
3716d0e79aa3SJason Evans 		 */
3717536b3538SJason Evans 		large_maxclass = arena_maxrun;
3718d0e79aa3SJason Evans 	}
3719536b3538SJason Evans 	assert(large_maxclass > 0);
3720536b3538SJason Evans 	nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS);
3721d0e79aa3SJason Evans 	nhclasses = NSIZES - nlclasses - NBINS;
3722a4bd5210SJason Evans 
3723a4bd5210SJason Evans 	bin_info_init();
3724a4bd5210SJason Evans }
3725a4bd5210SJason Evans 
3726a4bd5210SJason Evans void
37271f0a49e8SJason Evans arena_prefork0(tsdn_t *tsdn, arena_t *arena)
3728a4bd5210SJason Evans {
3729a4bd5210SJason Evans 
37301f0a49e8SJason Evans 	malloc_mutex_prefork(tsdn, &arena->lock);
3731a4bd5210SJason Evans }
3732a4bd5210SJason Evans 
3733a4bd5210SJason Evans void
37341f0a49e8SJason Evans arena_prefork1(tsdn_t *tsdn, arena_t *arena)
3735a4bd5210SJason Evans {
3736a4bd5210SJason Evans 
37371f0a49e8SJason Evans 	malloc_mutex_prefork(tsdn, &arena->chunks_mtx);
3738a4bd5210SJason Evans }
3739a4bd5210SJason Evans 
3740a4bd5210SJason Evans void
37411f0a49e8SJason Evans arena_prefork2(tsdn_t *tsdn, arena_t *arena)
37421f0a49e8SJason Evans {
37431f0a49e8SJason Evans 
37441f0a49e8SJason Evans 	malloc_mutex_prefork(tsdn, &arena->node_cache_mtx);
37451f0a49e8SJason Evans }
37461f0a49e8SJason Evans 
37471f0a49e8SJason Evans void
37481f0a49e8SJason Evans arena_prefork3(tsdn_t *tsdn, arena_t *arena)
3749a4bd5210SJason Evans {
3750a4bd5210SJason Evans 	unsigned i;
3751a4bd5210SJason Evans 
3752a4bd5210SJason Evans 	for (i = 0; i < NBINS; i++)
37531f0a49e8SJason Evans 		malloc_mutex_prefork(tsdn, &arena->bins[i].lock);
37541f0a49e8SJason Evans 	malloc_mutex_prefork(tsdn, &arena->huge_mtx);
37551f0a49e8SJason Evans }
37561f0a49e8SJason Evans 
37571f0a49e8SJason Evans void
37581f0a49e8SJason Evans arena_postfork_parent(tsdn_t *tsdn, arena_t *arena)
37591f0a49e8SJason Evans {
37601f0a49e8SJason Evans 	unsigned i;
37611f0a49e8SJason Evans 
37621f0a49e8SJason Evans 	malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx);
37631f0a49e8SJason Evans 	for (i = 0; i < NBINS; i++)
37641f0a49e8SJason Evans 		malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock);
37651f0a49e8SJason Evans 	malloc_mutex_postfork_parent(tsdn, &arena->node_cache_mtx);
37661f0a49e8SJason Evans 	malloc_mutex_postfork_parent(tsdn, &arena->chunks_mtx);
37671f0a49e8SJason Evans 	malloc_mutex_postfork_parent(tsdn, &arena->lock);
37681f0a49e8SJason Evans }
37691f0a49e8SJason Evans 
37701f0a49e8SJason Evans void
37711f0a49e8SJason Evans arena_postfork_child(tsdn_t *tsdn, arena_t *arena)
37721f0a49e8SJason Evans {
37731f0a49e8SJason Evans 	unsigned i;
37741f0a49e8SJason Evans 
37751f0a49e8SJason Evans 	malloc_mutex_postfork_child(tsdn, &arena->huge_mtx);
37761f0a49e8SJason Evans 	for (i = 0; i < NBINS; i++)
37771f0a49e8SJason Evans 		malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock);
37781f0a49e8SJason Evans 	malloc_mutex_postfork_child(tsdn, &arena->node_cache_mtx);
37791f0a49e8SJason Evans 	malloc_mutex_postfork_child(tsdn, &arena->chunks_mtx);
37801f0a49e8SJason Evans 	malloc_mutex_postfork_child(tsdn, &arena->lock);
3781a4bd5210SJason Evans }
3782