xref: /freebsd/contrib/jemalloc/src/arena.c (revision e722f8f8ac59f5a163a06f2538f46efa42aedfb9)
1a4bd5210SJason Evans #define	JEMALLOC_ARENA_C_
2a4bd5210SJason Evans #include "jemalloc/internal/jemalloc_internal.h"
3a4bd5210SJason Evans 
4a4bd5210SJason Evans /******************************************************************************/
5a4bd5210SJason Evans /* Data. */
6a4bd5210SJason Evans 
7a4bd5210SJason Evans ssize_t		opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
8a4bd5210SJason Evans arena_bin_info_t	arena_bin_info[NBINS];
9a4bd5210SJason Evans 
10*e722f8f8SJason Evans JEMALLOC_ALIGNED(CACHELINE)
11a4bd5210SJason Evans const uint8_t	small_size2bin[] = {
12a4bd5210SJason Evans #define	S2B_8(i)	i,
13a4bd5210SJason Evans #define	S2B_16(i)	S2B_8(i) S2B_8(i)
14a4bd5210SJason Evans #define	S2B_32(i)	S2B_16(i) S2B_16(i)
15a4bd5210SJason Evans #define	S2B_64(i)	S2B_32(i) S2B_32(i)
16a4bd5210SJason Evans #define	S2B_128(i)	S2B_64(i) S2B_64(i)
17a4bd5210SJason Evans #define	S2B_256(i)	S2B_128(i) S2B_128(i)
18a4bd5210SJason Evans #define	S2B_512(i)	S2B_256(i) S2B_256(i)
19a4bd5210SJason Evans #define	S2B_1024(i)	S2B_512(i) S2B_512(i)
20a4bd5210SJason Evans #define	S2B_2048(i)	S2B_1024(i) S2B_1024(i)
21a4bd5210SJason Evans #define	S2B_4096(i)	S2B_2048(i) S2B_2048(i)
22a4bd5210SJason Evans #define	S2B_8192(i)	S2B_4096(i) S2B_4096(i)
23a4bd5210SJason Evans #define	SIZE_CLASS(bin, delta, size)					\
24a4bd5210SJason Evans 	S2B_##delta(bin)
25a4bd5210SJason Evans 	SIZE_CLASSES
26a4bd5210SJason Evans #undef S2B_8
27a4bd5210SJason Evans #undef S2B_16
28a4bd5210SJason Evans #undef S2B_32
29a4bd5210SJason Evans #undef S2B_64
30a4bd5210SJason Evans #undef S2B_128
31a4bd5210SJason Evans #undef S2B_256
32a4bd5210SJason Evans #undef S2B_512
33a4bd5210SJason Evans #undef S2B_1024
34a4bd5210SJason Evans #undef S2B_2048
35a4bd5210SJason Evans #undef S2B_4096
36a4bd5210SJason Evans #undef S2B_8192
37a4bd5210SJason Evans #undef SIZE_CLASS
38a4bd5210SJason Evans };
39a4bd5210SJason Evans 
40a4bd5210SJason Evans /******************************************************************************/
41a4bd5210SJason Evans /* Function prototypes for non-inline static functions. */
42a4bd5210SJason Evans 
43a4bd5210SJason Evans static void	arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
44*e722f8f8SJason Evans     bool large, size_t binind, bool zero);
45a4bd5210SJason Evans static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
46a4bd5210SJason Evans static void	arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
47a4bd5210SJason Evans static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large,
48*e722f8f8SJason Evans     size_t binind, bool zero);
49a4bd5210SJason Evans static void	arena_purge(arena_t *arena, bool all);
50a4bd5210SJason Evans static void	arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty);
51a4bd5210SJason Evans static void	arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
52a4bd5210SJason Evans     arena_run_t *run, size_t oldsize, size_t newsize);
53a4bd5210SJason Evans static void	arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk,
54a4bd5210SJason Evans     arena_run_t *run, size_t oldsize, size_t newsize, bool dirty);
55a4bd5210SJason Evans static arena_run_t	*arena_bin_runs_first(arena_bin_t *bin);
56a4bd5210SJason Evans static void	arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run);
57a4bd5210SJason Evans static void	arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run);
58a4bd5210SJason Evans static arena_run_t *arena_bin_nonfull_run_tryget(arena_bin_t *bin);
59a4bd5210SJason Evans static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
60a4bd5210SJason Evans static void	*arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
61a4bd5210SJason Evans static void	arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
62a4bd5210SJason Evans     arena_bin_t *bin);
63a4bd5210SJason Evans static void	arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
64a4bd5210SJason Evans     arena_run_t *run, arena_bin_t *bin);
65a4bd5210SJason Evans static void	arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
66a4bd5210SJason Evans     arena_run_t *run, arena_bin_t *bin);
67a4bd5210SJason Evans static void	arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk,
68a4bd5210SJason Evans     void *ptr, size_t oldsize, size_t size);
69a4bd5210SJason Evans static bool	arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk,
70a4bd5210SJason Evans     void *ptr, size_t oldsize, size_t size, size_t extra, bool zero);
71a4bd5210SJason Evans static bool	arena_ralloc_large(void *ptr, size_t oldsize, size_t size,
72a4bd5210SJason Evans     size_t extra, bool zero);
73a4bd5210SJason Evans static size_t	bin_info_run_size_calc(arena_bin_info_t *bin_info,
74a4bd5210SJason Evans     size_t min_run_size);
75a4bd5210SJason Evans static void	bin_info_init(void);
76a4bd5210SJason Evans 
77a4bd5210SJason Evans /******************************************************************************/
78a4bd5210SJason Evans 
79a4bd5210SJason Evans static inline int
80a4bd5210SJason Evans arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
81a4bd5210SJason Evans {
82a4bd5210SJason Evans 	uintptr_t a_mapelm = (uintptr_t)a;
83a4bd5210SJason Evans 	uintptr_t b_mapelm = (uintptr_t)b;
84a4bd5210SJason Evans 
85a4bd5210SJason Evans 	assert(a != NULL);
86a4bd5210SJason Evans 	assert(b != NULL);
87a4bd5210SJason Evans 
88a4bd5210SJason Evans 	return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm));
89a4bd5210SJason Evans }
90a4bd5210SJason Evans 
91a4bd5210SJason Evans /* Generate red-black tree functions. */
92a4bd5210SJason Evans rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t,
93a4bd5210SJason Evans     u.rb_link, arena_run_comp)
94a4bd5210SJason Evans 
95a4bd5210SJason Evans static inline int
96a4bd5210SJason Evans arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
97a4bd5210SJason Evans {
98a4bd5210SJason Evans 	int ret;
99a4bd5210SJason Evans 	size_t a_size = a->bits & ~PAGE_MASK;
100a4bd5210SJason Evans 	size_t b_size = b->bits & ~PAGE_MASK;
101a4bd5210SJason Evans 
102a4bd5210SJason Evans 	assert((a->bits & CHUNK_MAP_KEY) == CHUNK_MAP_KEY || (a->bits &
103a4bd5210SJason Evans 	    CHUNK_MAP_DIRTY) == (b->bits & CHUNK_MAP_DIRTY));
104a4bd5210SJason Evans 
105a4bd5210SJason Evans 	ret = (a_size > b_size) - (a_size < b_size);
106a4bd5210SJason Evans 	if (ret == 0) {
107a4bd5210SJason Evans 		uintptr_t a_mapelm, b_mapelm;
108a4bd5210SJason Evans 
109a4bd5210SJason Evans 		if ((a->bits & CHUNK_MAP_KEY) != CHUNK_MAP_KEY)
110a4bd5210SJason Evans 			a_mapelm = (uintptr_t)a;
111a4bd5210SJason Evans 		else {
112a4bd5210SJason Evans 			/*
113a4bd5210SJason Evans 			 * Treat keys as though they are lower than anything
114a4bd5210SJason Evans 			 * else.
115a4bd5210SJason Evans 			 */
116a4bd5210SJason Evans 			a_mapelm = 0;
117a4bd5210SJason Evans 		}
118a4bd5210SJason Evans 		b_mapelm = (uintptr_t)b;
119a4bd5210SJason Evans 
120a4bd5210SJason Evans 		ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm);
121a4bd5210SJason Evans 	}
122a4bd5210SJason Evans 
123a4bd5210SJason Evans 	return (ret);
124a4bd5210SJason Evans }
125a4bd5210SJason Evans 
126a4bd5210SJason Evans /* Generate red-black tree functions. */
127a4bd5210SJason Evans rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t,
128a4bd5210SJason Evans     u.rb_link, arena_avail_comp)
129a4bd5210SJason Evans 
130a4bd5210SJason Evans static inline void *
131a4bd5210SJason Evans arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
132a4bd5210SJason Evans {
133a4bd5210SJason Evans 	void *ret;
134a4bd5210SJason Evans 	unsigned regind;
135a4bd5210SJason Evans 	bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
136a4bd5210SJason Evans 	    (uintptr_t)bin_info->bitmap_offset);
137a4bd5210SJason Evans 
138a4bd5210SJason Evans 	assert(run->nfree > 0);
139a4bd5210SJason Evans 	assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false);
140a4bd5210SJason Evans 
141a4bd5210SJason Evans 	regind = bitmap_sfu(bitmap, &bin_info->bitmap_info);
142a4bd5210SJason Evans 	ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset +
143a4bd5210SJason Evans 	    (uintptr_t)(bin_info->reg_interval * regind));
144a4bd5210SJason Evans 	run->nfree--;
145a4bd5210SJason Evans 	if (regind == run->nextind)
146a4bd5210SJason Evans 		run->nextind++;
147a4bd5210SJason Evans 	assert(regind < run->nextind);
148a4bd5210SJason Evans 	return (ret);
149a4bd5210SJason Evans }
150a4bd5210SJason Evans 
151a4bd5210SJason Evans static inline void
152a4bd5210SJason Evans arena_run_reg_dalloc(arena_run_t *run, void *ptr)
153a4bd5210SJason Evans {
154a4bd5210SJason Evans 	arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
155*e722f8f8SJason Evans 	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
156*e722f8f8SJason Evans 	size_t mapbits = arena_mapbits_get(chunk, pageind);
157*e722f8f8SJason Evans 	size_t binind = arena_ptr_small_binind_get(ptr, mapbits);
158a4bd5210SJason Evans 	arena_bin_info_t *bin_info = &arena_bin_info[binind];
159a4bd5210SJason Evans 	unsigned regind = arena_run_regind(run, bin_info, ptr);
160a4bd5210SJason Evans 	bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
161a4bd5210SJason Evans 	    (uintptr_t)bin_info->bitmap_offset);
162a4bd5210SJason Evans 
163a4bd5210SJason Evans 	assert(run->nfree < bin_info->nregs);
164a4bd5210SJason Evans 	/* Freeing an interior pointer can cause assertion failure. */
165a4bd5210SJason Evans 	assert(((uintptr_t)ptr - ((uintptr_t)run +
166a4bd5210SJason Evans 	    (uintptr_t)bin_info->reg0_offset)) %
167a4bd5210SJason Evans 	    (uintptr_t)bin_info->reg_interval == 0);
168a4bd5210SJason Evans 	assert((uintptr_t)ptr >= (uintptr_t)run +
169a4bd5210SJason Evans 	    (uintptr_t)bin_info->reg0_offset);
170a4bd5210SJason Evans 	/* Freeing an unallocated pointer can cause assertion failure. */
171a4bd5210SJason Evans 	assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind));
172a4bd5210SJason Evans 
173a4bd5210SJason Evans 	bitmap_unset(bitmap, &bin_info->bitmap_info, regind);
174a4bd5210SJason Evans 	run->nfree++;
175a4bd5210SJason Evans }
176a4bd5210SJason Evans 
177a4bd5210SJason Evans static inline void
178a4bd5210SJason Evans arena_chunk_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
179a4bd5210SJason Evans {
180a4bd5210SJason Evans 	size_t i;
181a4bd5210SJason Evans 	UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
182a4bd5210SJason Evans 
183a4bd5210SJason Evans 	for (i = 0; i < PAGE / sizeof(size_t); i++)
184a4bd5210SJason Evans 		assert(p[i] == 0);
185a4bd5210SJason Evans }
186a4bd5210SJason Evans 
187a4bd5210SJason Evans static void
188a4bd5210SJason Evans arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
189*e722f8f8SJason Evans     size_t binind, bool zero)
190a4bd5210SJason Evans {
191a4bd5210SJason Evans 	arena_chunk_t *chunk;
192a4bd5210SJason Evans 	size_t run_ind, total_pages, need_pages, rem_pages, i;
193a4bd5210SJason Evans 	size_t flag_dirty;
194a4bd5210SJason Evans 	arena_avail_tree_t *runs_avail;
195a4bd5210SJason Evans 
196*e722f8f8SJason Evans 	assert((large && binind == BININD_INVALID) || (large == false && binind
197*e722f8f8SJason Evans 	    != BININD_INVALID));
198*e722f8f8SJason Evans 
199a4bd5210SJason Evans 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
200a4bd5210SJason Evans 	run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
201*e722f8f8SJason Evans 	flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
202a4bd5210SJason Evans 	runs_avail = (flag_dirty != 0) ? &arena->runs_avail_dirty :
203a4bd5210SJason Evans 	    &arena->runs_avail_clean;
204*e722f8f8SJason Evans 	total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
205a4bd5210SJason Evans 	    LG_PAGE;
206*e722f8f8SJason Evans 	assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
207*e722f8f8SJason Evans 	    flag_dirty);
208a4bd5210SJason Evans 	need_pages = (size >> LG_PAGE);
209a4bd5210SJason Evans 	assert(need_pages > 0);
210a4bd5210SJason Evans 	assert(need_pages <= total_pages);
211a4bd5210SJason Evans 	rem_pages = total_pages - need_pages;
212a4bd5210SJason Evans 
213*e722f8f8SJason Evans 	arena_avail_tree_remove(runs_avail, arena_mapp_get(chunk, run_ind));
214a4bd5210SJason Evans 	if (config_stats) {
215a4bd5210SJason Evans 		/*
216a4bd5210SJason Evans 		 * Update stats_cactive if nactive is crossing a chunk
217a4bd5210SJason Evans 		 * multiple.
218a4bd5210SJason Evans 		 */
219a4bd5210SJason Evans 		size_t cactive_diff = CHUNK_CEILING((arena->nactive +
220a4bd5210SJason Evans 		    need_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
221a4bd5210SJason Evans 		    LG_PAGE);
222a4bd5210SJason Evans 		if (cactive_diff != 0)
223a4bd5210SJason Evans 			stats_cactive_add(cactive_diff);
224a4bd5210SJason Evans 	}
225a4bd5210SJason Evans 	arena->nactive += need_pages;
226a4bd5210SJason Evans 
227a4bd5210SJason Evans 	/* Keep track of trailing unused pages for later use. */
228a4bd5210SJason Evans 	if (rem_pages > 0) {
229a4bd5210SJason Evans 		if (flag_dirty != 0) {
230*e722f8f8SJason Evans 			arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
231*e722f8f8SJason Evans 			    (rem_pages << LG_PAGE), CHUNK_MAP_DIRTY);
232*e722f8f8SJason Evans 			arena_mapbits_unallocated_set(chunk,
233*e722f8f8SJason Evans 			    run_ind+total_pages-1, (rem_pages << LG_PAGE),
234*e722f8f8SJason Evans 			    CHUNK_MAP_DIRTY);
235a4bd5210SJason Evans 		} else {
236*e722f8f8SJason Evans 			arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
237*e722f8f8SJason Evans 			    (rem_pages << LG_PAGE),
238*e722f8f8SJason Evans 			    arena_mapbits_unzeroed_get(chunk,
239*e722f8f8SJason Evans 			    run_ind+need_pages));
240*e722f8f8SJason Evans 			arena_mapbits_unallocated_set(chunk,
241*e722f8f8SJason Evans 			    run_ind+total_pages-1, (rem_pages << LG_PAGE),
242*e722f8f8SJason Evans 			    arena_mapbits_unzeroed_get(chunk,
243*e722f8f8SJason Evans 			    run_ind+total_pages-1));
244a4bd5210SJason Evans 		}
245*e722f8f8SJason Evans 		arena_avail_tree_insert(runs_avail, arena_mapp_get(chunk,
246*e722f8f8SJason Evans 		    run_ind+need_pages));
247a4bd5210SJason Evans 	}
248a4bd5210SJason Evans 
249a4bd5210SJason Evans 	/* Update dirty page accounting. */
250a4bd5210SJason Evans 	if (flag_dirty != 0) {
251a4bd5210SJason Evans 		chunk->ndirty -= need_pages;
252a4bd5210SJason Evans 		arena->ndirty -= need_pages;
253a4bd5210SJason Evans 	}
254a4bd5210SJason Evans 
255a4bd5210SJason Evans 	/*
256a4bd5210SJason Evans 	 * Update the page map separately for large vs. small runs, since it is
257a4bd5210SJason Evans 	 * possible to avoid iteration for large mallocs.
258a4bd5210SJason Evans 	 */
259a4bd5210SJason Evans 	if (large) {
260a4bd5210SJason Evans 		if (zero) {
261a4bd5210SJason Evans 			if (flag_dirty == 0) {
262a4bd5210SJason Evans 				/*
263a4bd5210SJason Evans 				 * The run is clean, so some pages may be
264a4bd5210SJason Evans 				 * zeroed (i.e. never before touched).
265a4bd5210SJason Evans 				 */
266a4bd5210SJason Evans 				for (i = 0; i < need_pages; i++) {
267*e722f8f8SJason Evans 					if (arena_mapbits_unzeroed_get(chunk,
268*e722f8f8SJason Evans 					    run_ind+i) != 0) {
269a4bd5210SJason Evans 						VALGRIND_MAKE_MEM_UNDEFINED(
270a4bd5210SJason Evans 						    (void *)((uintptr_t)
271a4bd5210SJason Evans 						    chunk + ((run_ind+i) <<
272a4bd5210SJason Evans 						    LG_PAGE)), PAGE);
273a4bd5210SJason Evans 						memset((void *)((uintptr_t)
274a4bd5210SJason Evans 						    chunk + ((run_ind+i) <<
275a4bd5210SJason Evans 						    LG_PAGE)), 0, PAGE);
276a4bd5210SJason Evans 					} else if (config_debug) {
277a4bd5210SJason Evans 						VALGRIND_MAKE_MEM_DEFINED(
278a4bd5210SJason Evans 						    (void *)((uintptr_t)
279a4bd5210SJason Evans 						    chunk + ((run_ind+i) <<
280a4bd5210SJason Evans 						    LG_PAGE)), PAGE);
281a4bd5210SJason Evans 						arena_chunk_validate_zeroed(
282a4bd5210SJason Evans 						    chunk, run_ind+i);
283a4bd5210SJason Evans 					}
284a4bd5210SJason Evans 				}
285a4bd5210SJason Evans 			} else {
286a4bd5210SJason Evans 				/*
287a4bd5210SJason Evans 				 * The run is dirty, so all pages must be
288a4bd5210SJason Evans 				 * zeroed.
289a4bd5210SJason Evans 				 */
290a4bd5210SJason Evans 				VALGRIND_MAKE_MEM_UNDEFINED((void
291a4bd5210SJason Evans 				    *)((uintptr_t)chunk + (run_ind <<
292a4bd5210SJason Evans 				    LG_PAGE)), (need_pages << LG_PAGE));
293a4bd5210SJason Evans 				memset((void *)((uintptr_t)chunk + (run_ind <<
294a4bd5210SJason Evans 				    LG_PAGE)), 0, (need_pages << LG_PAGE));
295a4bd5210SJason Evans 			}
296a4bd5210SJason Evans 		}
297a4bd5210SJason Evans 
298a4bd5210SJason Evans 		/*
299a4bd5210SJason Evans 		 * Set the last element first, in case the run only contains one
300a4bd5210SJason Evans 		 * page (i.e. both statements set the same element).
301a4bd5210SJason Evans 		 */
302*e722f8f8SJason Evans 		arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0,
303*e722f8f8SJason Evans 		    flag_dirty);
304*e722f8f8SJason Evans 		arena_mapbits_large_set(chunk, run_ind, size, flag_dirty);
305a4bd5210SJason Evans 	} else {
306a4bd5210SJason Evans 		assert(zero == false);
307a4bd5210SJason Evans 		/*
308a4bd5210SJason Evans 		 * Propagate the dirty and unzeroed flags to the allocated
309a4bd5210SJason Evans 		 * small run, so that arena_dalloc_bin_run() has the ability to
310a4bd5210SJason Evans 		 * conditionally trim clean pages.
311a4bd5210SJason Evans 		 */
312*e722f8f8SJason Evans 		arena_mapbits_small_set(chunk, run_ind, 0, binind,
313*e722f8f8SJason Evans 		    arena_mapbits_unzeroed_get(chunk, run_ind) | flag_dirty);
314a4bd5210SJason Evans 		/*
315a4bd5210SJason Evans 		 * The first page will always be dirtied during small run
316a4bd5210SJason Evans 		 * initialization, so a validation failure here would not
317a4bd5210SJason Evans 		 * actually cause an observable failure.
318a4bd5210SJason Evans 		 */
319a4bd5210SJason Evans 		if (config_debug && flag_dirty == 0 &&
320*e722f8f8SJason Evans 		    arena_mapbits_unzeroed_get(chunk, run_ind) == 0)
321a4bd5210SJason Evans 			arena_chunk_validate_zeroed(chunk, run_ind);
322a4bd5210SJason Evans 		for (i = 1; i < need_pages - 1; i++) {
323*e722f8f8SJason Evans 			arena_mapbits_small_set(chunk, run_ind+i, i,
324*e722f8f8SJason Evans 			    binind, arena_mapbits_unzeroed_get(chunk,
325*e722f8f8SJason Evans 			    run_ind+i));
326a4bd5210SJason Evans 			if (config_debug && flag_dirty == 0 &&
327*e722f8f8SJason Evans 			    arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0)
328a4bd5210SJason Evans 				arena_chunk_validate_zeroed(chunk, run_ind+i);
329a4bd5210SJason Evans 		}
330*e722f8f8SJason Evans 		arena_mapbits_small_set(chunk, run_ind+need_pages-1,
331*e722f8f8SJason Evans 		    need_pages-1, binind, arena_mapbits_unzeroed_get(chunk,
332*e722f8f8SJason Evans 		    run_ind+need_pages-1) | flag_dirty);
333a4bd5210SJason Evans 		if (config_debug && flag_dirty == 0 &&
334*e722f8f8SJason Evans 		    arena_mapbits_unzeroed_get(chunk, run_ind+need_pages-1) ==
335*e722f8f8SJason Evans 		    0) {
336a4bd5210SJason Evans 			arena_chunk_validate_zeroed(chunk,
337a4bd5210SJason Evans 			    run_ind+need_pages-1);
338a4bd5210SJason Evans 		}
339a4bd5210SJason Evans 	}
340a4bd5210SJason Evans }
341a4bd5210SJason Evans 
342a4bd5210SJason Evans static arena_chunk_t *
343a4bd5210SJason Evans arena_chunk_alloc(arena_t *arena)
344a4bd5210SJason Evans {
345a4bd5210SJason Evans 	arena_chunk_t *chunk;
346a4bd5210SJason Evans 	size_t i;
347a4bd5210SJason Evans 
348a4bd5210SJason Evans 	if (arena->spare != NULL) {
349a4bd5210SJason Evans 		arena_avail_tree_t *runs_avail;
350a4bd5210SJason Evans 
351a4bd5210SJason Evans 		chunk = arena->spare;
352a4bd5210SJason Evans 		arena->spare = NULL;
353a4bd5210SJason Evans 
354a4bd5210SJason Evans 		/* Insert the run into the appropriate runs_avail_* tree. */
355*e722f8f8SJason Evans 		if (arena_mapbits_dirty_get(chunk, map_bias) == 0)
356a4bd5210SJason Evans 			runs_avail = &arena->runs_avail_clean;
357a4bd5210SJason Evans 		else
358a4bd5210SJason Evans 			runs_avail = &arena->runs_avail_dirty;
359*e722f8f8SJason Evans 		assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
360*e722f8f8SJason Evans 		    arena_maxclass);
361*e722f8f8SJason Evans 		assert(arena_mapbits_unallocated_size_get(chunk,
362*e722f8f8SJason Evans 		    chunk_npages-1) == arena_maxclass);
363*e722f8f8SJason Evans 		assert(arena_mapbits_dirty_get(chunk, map_bias) ==
364*e722f8f8SJason Evans 		    arena_mapbits_dirty_get(chunk, chunk_npages-1));
365*e722f8f8SJason Evans 		arena_avail_tree_insert(runs_avail, arena_mapp_get(chunk,
366*e722f8f8SJason Evans 		    map_bias));
367a4bd5210SJason Evans 	} else {
368a4bd5210SJason Evans 		bool zero;
369a4bd5210SJason Evans 		size_t unzeroed;
370a4bd5210SJason Evans 
371a4bd5210SJason Evans 		zero = false;
372a4bd5210SJason Evans 		malloc_mutex_unlock(&arena->lock);
373a4bd5210SJason Evans 		chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize,
374a4bd5210SJason Evans 		    false, &zero);
375a4bd5210SJason Evans 		malloc_mutex_lock(&arena->lock);
376a4bd5210SJason Evans 		if (chunk == NULL)
377a4bd5210SJason Evans 			return (NULL);
378a4bd5210SJason Evans 		if (config_stats)
379a4bd5210SJason Evans 			arena->stats.mapped += chunksize;
380a4bd5210SJason Evans 
381a4bd5210SJason Evans 		chunk->arena = arena;
382a4bd5210SJason Evans 		ql_elm_new(chunk, link_dirty);
383a4bd5210SJason Evans 		chunk->dirtied = false;
384a4bd5210SJason Evans 
385a4bd5210SJason Evans 		/*
386a4bd5210SJason Evans 		 * Claim that no pages are in use, since the header is merely
387a4bd5210SJason Evans 		 * overhead.
388a4bd5210SJason Evans 		 */
389a4bd5210SJason Evans 		chunk->ndirty = 0;
390a4bd5210SJason Evans 
391a4bd5210SJason Evans 		/*
392a4bd5210SJason Evans 		 * Initialize the map to contain one maximal free untouched run.
393a4bd5210SJason Evans 		 * Mark the pages as zeroed iff chunk_alloc() returned a zeroed
394a4bd5210SJason Evans 		 * chunk.
395a4bd5210SJason Evans 		 */
396a4bd5210SJason Evans 		unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
397*e722f8f8SJason Evans 		arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass,
398*e722f8f8SJason Evans 		    unzeroed);
399a4bd5210SJason Evans 		/*
400a4bd5210SJason Evans 		 * There is no need to initialize the internal page map entries
401a4bd5210SJason Evans 		 * unless the chunk is not zeroed.
402a4bd5210SJason Evans 		 */
403a4bd5210SJason Evans 		if (zero == false) {
404a4bd5210SJason Evans 			for (i = map_bias+1; i < chunk_npages-1; i++)
405*e722f8f8SJason Evans 				arena_mapbits_unzeroed_set(chunk, i, unzeroed);
406a4bd5210SJason Evans 		} else if (config_debug) {
407*e722f8f8SJason Evans 			for (i = map_bias+1; i < chunk_npages-1; i++) {
408*e722f8f8SJason Evans 				assert(arena_mapbits_unzeroed_get(chunk, i) ==
409*e722f8f8SJason Evans 				    unzeroed);
410a4bd5210SJason Evans 			}
411*e722f8f8SJason Evans 		}
412*e722f8f8SJason Evans 		arena_mapbits_unallocated_set(chunk, chunk_npages-1,
413*e722f8f8SJason Evans 		    arena_maxclass, unzeroed);
414a4bd5210SJason Evans 
415a4bd5210SJason Evans 		/* Insert the run into the runs_avail_clean tree. */
416a4bd5210SJason Evans 		arena_avail_tree_insert(&arena->runs_avail_clean,
417*e722f8f8SJason Evans 		    arena_mapp_get(chunk, map_bias));
418a4bd5210SJason Evans 	}
419a4bd5210SJason Evans 
420a4bd5210SJason Evans 	return (chunk);
421a4bd5210SJason Evans }
422a4bd5210SJason Evans 
423a4bd5210SJason Evans static void
424a4bd5210SJason Evans arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
425a4bd5210SJason Evans {
426a4bd5210SJason Evans 	arena_avail_tree_t *runs_avail;
427a4bd5210SJason Evans 
428a4bd5210SJason Evans 	/*
429a4bd5210SJason Evans 	 * Remove run from the appropriate runs_avail_* tree, so that the arena
430a4bd5210SJason Evans 	 * does not use it.
431a4bd5210SJason Evans 	 */
432*e722f8f8SJason Evans 	if (arena_mapbits_dirty_get(chunk, map_bias) == 0)
433a4bd5210SJason Evans 		runs_avail = &arena->runs_avail_clean;
434a4bd5210SJason Evans 	else
435a4bd5210SJason Evans 		runs_avail = &arena->runs_avail_dirty;
436*e722f8f8SJason Evans 	arena_avail_tree_remove(runs_avail, arena_mapp_get(chunk, map_bias));
437a4bd5210SJason Evans 
438a4bd5210SJason Evans 	if (arena->spare != NULL) {
439a4bd5210SJason Evans 		arena_chunk_t *spare = arena->spare;
440a4bd5210SJason Evans 
441a4bd5210SJason Evans 		arena->spare = chunk;
442a4bd5210SJason Evans 		if (spare->dirtied) {
443a4bd5210SJason Evans 			ql_remove(&chunk->arena->chunks_dirty, spare,
444a4bd5210SJason Evans 			    link_dirty);
445a4bd5210SJason Evans 			arena->ndirty -= spare->ndirty;
446a4bd5210SJason Evans 		}
447a4bd5210SJason Evans 		malloc_mutex_unlock(&arena->lock);
448a4bd5210SJason Evans 		chunk_dealloc((void *)spare, chunksize, true);
449a4bd5210SJason Evans 		malloc_mutex_lock(&arena->lock);
450a4bd5210SJason Evans 		if (config_stats)
451a4bd5210SJason Evans 			arena->stats.mapped -= chunksize;
452a4bd5210SJason Evans 	} else
453a4bd5210SJason Evans 		arena->spare = chunk;
454a4bd5210SJason Evans }
455a4bd5210SJason Evans 
456a4bd5210SJason Evans static arena_run_t *
457*e722f8f8SJason Evans arena_run_alloc(arena_t *arena, size_t size, bool large, size_t binind,
458*e722f8f8SJason Evans     bool zero)
459a4bd5210SJason Evans {
460a4bd5210SJason Evans 	arena_chunk_t *chunk;
461a4bd5210SJason Evans 	arena_run_t *run;
462a4bd5210SJason Evans 	arena_chunk_map_t *mapelm, key;
463a4bd5210SJason Evans 
464a4bd5210SJason Evans 	assert(size <= arena_maxclass);
465a4bd5210SJason Evans 	assert((size & PAGE_MASK) == 0);
466*e722f8f8SJason Evans 	assert((large && binind == BININD_INVALID) || (large == false && binind
467*e722f8f8SJason Evans 	    != BININD_INVALID));
468a4bd5210SJason Evans 
469a4bd5210SJason Evans 	/* Search the arena's chunks for the lowest best fit. */
470a4bd5210SJason Evans 	key.bits = size | CHUNK_MAP_KEY;
471a4bd5210SJason Evans 	mapelm = arena_avail_tree_nsearch(&arena->runs_avail_dirty, &key);
472a4bd5210SJason Evans 	if (mapelm != NULL) {
473a4bd5210SJason Evans 		arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
474a4bd5210SJason Evans 		size_t pageind = (((uintptr_t)mapelm -
475a4bd5210SJason Evans 		    (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
476a4bd5210SJason Evans 		    + map_bias;
477a4bd5210SJason Evans 
478a4bd5210SJason Evans 		run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
479a4bd5210SJason Evans 		    LG_PAGE));
480*e722f8f8SJason Evans 		arena_run_split(arena, run, size, large, binind, zero);
481a4bd5210SJason Evans 		return (run);
482a4bd5210SJason Evans 	}
483a4bd5210SJason Evans 	mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key);
484a4bd5210SJason Evans 	if (mapelm != NULL) {
485a4bd5210SJason Evans 		arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
486a4bd5210SJason Evans 		size_t pageind = (((uintptr_t)mapelm -
487a4bd5210SJason Evans 		    (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
488a4bd5210SJason Evans 		    + map_bias;
489a4bd5210SJason Evans 
490a4bd5210SJason Evans 		run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
491a4bd5210SJason Evans 		    LG_PAGE));
492*e722f8f8SJason Evans 		arena_run_split(arena, run, size, large, binind, zero);
493a4bd5210SJason Evans 		return (run);
494a4bd5210SJason Evans 	}
495a4bd5210SJason Evans 
496a4bd5210SJason Evans 	/*
497a4bd5210SJason Evans 	 * No usable runs.  Create a new chunk from which to allocate the run.
498a4bd5210SJason Evans 	 */
499a4bd5210SJason Evans 	chunk = arena_chunk_alloc(arena);
500a4bd5210SJason Evans 	if (chunk != NULL) {
501a4bd5210SJason Evans 		run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
502*e722f8f8SJason Evans 		arena_run_split(arena, run, size, large, binind, zero);
503a4bd5210SJason Evans 		return (run);
504a4bd5210SJason Evans 	}
505a4bd5210SJason Evans 
506a4bd5210SJason Evans 	/*
507a4bd5210SJason Evans 	 * arena_chunk_alloc() failed, but another thread may have made
508a4bd5210SJason Evans 	 * sufficient memory available while this one dropped arena->lock in
509a4bd5210SJason Evans 	 * arena_chunk_alloc(), so search one more time.
510a4bd5210SJason Evans 	 */
511a4bd5210SJason Evans 	mapelm = arena_avail_tree_nsearch(&arena->runs_avail_dirty, &key);
512a4bd5210SJason Evans 	if (mapelm != NULL) {
513a4bd5210SJason Evans 		arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
514a4bd5210SJason Evans 		size_t pageind = (((uintptr_t)mapelm -
515a4bd5210SJason Evans 		    (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
516a4bd5210SJason Evans 		    + map_bias;
517a4bd5210SJason Evans 
518a4bd5210SJason Evans 		run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
519a4bd5210SJason Evans 		    LG_PAGE));
520*e722f8f8SJason Evans 		arena_run_split(arena, run, size, large, binind, zero);
521a4bd5210SJason Evans 		return (run);
522a4bd5210SJason Evans 	}
523a4bd5210SJason Evans 	mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key);
524a4bd5210SJason Evans 	if (mapelm != NULL) {
525a4bd5210SJason Evans 		arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
526a4bd5210SJason Evans 		size_t pageind = (((uintptr_t)mapelm -
527a4bd5210SJason Evans 		    (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
528a4bd5210SJason Evans 		    + map_bias;
529a4bd5210SJason Evans 
530a4bd5210SJason Evans 		run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
531a4bd5210SJason Evans 		    LG_PAGE));
532*e722f8f8SJason Evans 		arena_run_split(arena, run, size, large, binind, zero);
533a4bd5210SJason Evans 		return (run);
534a4bd5210SJason Evans 	}
535a4bd5210SJason Evans 
536a4bd5210SJason Evans 	return (NULL);
537a4bd5210SJason Evans }
538a4bd5210SJason Evans 
539a4bd5210SJason Evans static inline void
540a4bd5210SJason Evans arena_maybe_purge(arena_t *arena)
541a4bd5210SJason Evans {
542a4bd5210SJason Evans 
543a4bd5210SJason Evans 	/* Enforce opt_lg_dirty_mult. */
544a4bd5210SJason Evans 	if (opt_lg_dirty_mult >= 0 && arena->ndirty > arena->npurgatory &&
545a4bd5210SJason Evans 	    (arena->ndirty - arena->npurgatory) > chunk_npages &&
546a4bd5210SJason Evans 	    (arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
547a4bd5210SJason Evans 	    arena->npurgatory))
548a4bd5210SJason Evans 		arena_purge(arena, false);
549a4bd5210SJason Evans }
550a4bd5210SJason Evans 
551a4bd5210SJason Evans static inline void
552a4bd5210SJason Evans arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
553a4bd5210SJason Evans {
554a4bd5210SJason Evans 	ql_head(arena_chunk_map_t) mapelms;
555a4bd5210SJason Evans 	arena_chunk_map_t *mapelm;
556a4bd5210SJason Evans 	size_t pageind, flag_unzeroed;
557a4bd5210SJason Evans 	size_t ndirty;
558a4bd5210SJason Evans 	size_t nmadvise;
559a4bd5210SJason Evans 
560a4bd5210SJason Evans 	ql_new(&mapelms);
561a4bd5210SJason Evans 
562a4bd5210SJason Evans 	flag_unzeroed =
563a4bd5210SJason Evans #ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
564a4bd5210SJason Evans    /*
565a4bd5210SJason Evans     * madvise(..., MADV_DONTNEED) results in zero-filled pages for anonymous
566a4bd5210SJason Evans     * mappings, but not for file-backed mappings.
567a4bd5210SJason Evans     */
568a4bd5210SJason Evans 	    0
569a4bd5210SJason Evans #else
570a4bd5210SJason Evans 	    CHUNK_MAP_UNZEROED
571a4bd5210SJason Evans #endif
572a4bd5210SJason Evans 	    ;
573a4bd5210SJason Evans 
574a4bd5210SJason Evans 	/*
575a4bd5210SJason Evans 	 * If chunk is the spare, temporarily re-allocate it, 1) so that its
576a4bd5210SJason Evans 	 * run is reinserted into runs_avail_dirty, and 2) so that it cannot be
577a4bd5210SJason Evans 	 * completely discarded by another thread while arena->lock is dropped
578a4bd5210SJason Evans 	 * by this thread.  Note that the arena_run_dalloc() call will
579a4bd5210SJason Evans 	 * implicitly deallocate the chunk, so no explicit action is required
580a4bd5210SJason Evans 	 * in this function to deallocate the chunk.
581a4bd5210SJason Evans 	 *
582a4bd5210SJason Evans 	 * Note that once a chunk contains dirty pages, it cannot again contain
583a4bd5210SJason Evans 	 * a single run unless 1) it is a dirty run, or 2) this function purges
584a4bd5210SJason Evans 	 * dirty pages and causes the transition to a single clean run.  Thus
585a4bd5210SJason Evans 	 * (chunk == arena->spare) is possible, but it is not possible for
586a4bd5210SJason Evans 	 * this function to be called on the spare unless it contains a dirty
587a4bd5210SJason Evans 	 * run.
588a4bd5210SJason Evans 	 */
589a4bd5210SJason Evans 	if (chunk == arena->spare) {
590*e722f8f8SJason Evans 		assert(arena_mapbits_dirty_get(chunk, map_bias) != 0);
591a4bd5210SJason Evans 		arena_chunk_alloc(arena);
592a4bd5210SJason Evans 	}
593a4bd5210SJason Evans 
594a4bd5210SJason Evans 	/* Temporarily allocate all free dirty runs within chunk. */
595a4bd5210SJason Evans 	for (pageind = map_bias; pageind < chunk_npages;) {
596*e722f8f8SJason Evans 		mapelm = arena_mapp_get(chunk, pageind);
597*e722f8f8SJason Evans 		if (arena_mapbits_allocated_get(chunk, pageind) == 0) {
598a4bd5210SJason Evans 			size_t npages;
599a4bd5210SJason Evans 
600*e722f8f8SJason Evans 			npages = arena_mapbits_unallocated_size_get(chunk,
601*e722f8f8SJason Evans 			    pageind) >> LG_PAGE;
602a4bd5210SJason Evans 			assert(pageind + npages <= chunk_npages);
603*e722f8f8SJason Evans 			if (arena_mapbits_dirty_get(chunk, pageind)) {
604a4bd5210SJason Evans 				size_t i;
605a4bd5210SJason Evans 
606a4bd5210SJason Evans 				arena_avail_tree_remove(
607a4bd5210SJason Evans 				    &arena->runs_avail_dirty, mapelm);
608a4bd5210SJason Evans 
609*e722f8f8SJason Evans 				arena_mapbits_large_set(chunk, pageind,
610*e722f8f8SJason Evans 				    (npages << LG_PAGE), flag_unzeroed);
611a4bd5210SJason Evans 				/*
612a4bd5210SJason Evans 				 * Update internal elements in the page map, so
613a4bd5210SJason Evans 				 * that CHUNK_MAP_UNZEROED is properly set.
614a4bd5210SJason Evans 				 */
615a4bd5210SJason Evans 				for (i = 1; i < npages - 1; i++) {
616*e722f8f8SJason Evans 					arena_mapbits_unzeroed_set(chunk,
617*e722f8f8SJason Evans 					    pageind+i, flag_unzeroed);
618a4bd5210SJason Evans 				}
619a4bd5210SJason Evans 				if (npages > 1) {
620*e722f8f8SJason Evans 					arena_mapbits_large_set(chunk,
621*e722f8f8SJason Evans 					    pageind+npages-1, 0, flag_unzeroed);
622a4bd5210SJason Evans 				}
623a4bd5210SJason Evans 
624a4bd5210SJason Evans 				if (config_stats) {
625a4bd5210SJason Evans 					/*
626a4bd5210SJason Evans 					 * Update stats_cactive if nactive is
627a4bd5210SJason Evans 					 * crossing a chunk multiple.
628a4bd5210SJason Evans 					 */
629a4bd5210SJason Evans 					size_t cactive_diff =
630a4bd5210SJason Evans 					    CHUNK_CEILING((arena->nactive +
631a4bd5210SJason Evans 					    npages) << LG_PAGE) -
632a4bd5210SJason Evans 					    CHUNK_CEILING(arena->nactive <<
633a4bd5210SJason Evans 					    LG_PAGE);
634a4bd5210SJason Evans 					if (cactive_diff != 0)
635a4bd5210SJason Evans 						stats_cactive_add(cactive_diff);
636a4bd5210SJason Evans 				}
637a4bd5210SJason Evans 				arena->nactive += npages;
638a4bd5210SJason Evans 				/* Append to list for later processing. */
639a4bd5210SJason Evans 				ql_elm_new(mapelm, u.ql_link);
640a4bd5210SJason Evans 				ql_tail_insert(&mapelms, mapelm, u.ql_link);
641a4bd5210SJason Evans 			}
642a4bd5210SJason Evans 
643a4bd5210SJason Evans 			pageind += npages;
644a4bd5210SJason Evans 		} else {
645a4bd5210SJason Evans 			/* Skip allocated run. */
646*e722f8f8SJason Evans 			if (arena_mapbits_large_get(chunk, pageind))
647*e722f8f8SJason Evans 				pageind += arena_mapbits_large_size_get(chunk,
648*e722f8f8SJason Evans 				    pageind) >> LG_PAGE;
649a4bd5210SJason Evans 			else {
650*e722f8f8SJason Evans 				size_t binind;
651*e722f8f8SJason Evans 				arena_bin_info_t *bin_info;
652a4bd5210SJason Evans 				arena_run_t *run = (arena_run_t *)((uintptr_t)
653a4bd5210SJason Evans 				    chunk + (uintptr_t)(pageind << LG_PAGE));
654a4bd5210SJason Evans 
655*e722f8f8SJason Evans 				assert(arena_mapbits_small_runind_get(chunk,
656*e722f8f8SJason Evans 				    pageind) == 0);
657*e722f8f8SJason Evans 				binind = arena_bin_index(arena, run->bin);
658*e722f8f8SJason Evans 				bin_info = &arena_bin_info[binind];
659a4bd5210SJason Evans 				pageind += bin_info->run_size >> LG_PAGE;
660a4bd5210SJason Evans 			}
661a4bd5210SJason Evans 		}
662a4bd5210SJason Evans 	}
663a4bd5210SJason Evans 	assert(pageind == chunk_npages);
664a4bd5210SJason Evans 
665a4bd5210SJason Evans 	if (config_debug)
666a4bd5210SJason Evans 		ndirty = chunk->ndirty;
667a4bd5210SJason Evans 	if (config_stats)
668a4bd5210SJason Evans 		arena->stats.purged += chunk->ndirty;
669a4bd5210SJason Evans 	arena->ndirty -= chunk->ndirty;
670a4bd5210SJason Evans 	chunk->ndirty = 0;
671a4bd5210SJason Evans 	ql_remove(&arena->chunks_dirty, chunk, link_dirty);
672a4bd5210SJason Evans 	chunk->dirtied = false;
673a4bd5210SJason Evans 
674a4bd5210SJason Evans 	malloc_mutex_unlock(&arena->lock);
675a4bd5210SJason Evans 	if (config_stats)
676a4bd5210SJason Evans 		nmadvise = 0;
677a4bd5210SJason Evans 	ql_foreach(mapelm, &mapelms, u.ql_link) {
678a4bd5210SJason Evans 		size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
679a4bd5210SJason Evans 		    sizeof(arena_chunk_map_t)) + map_bias;
680*e722f8f8SJason Evans 		size_t npages = arena_mapbits_large_size_get(chunk, pageind) >>
681*e722f8f8SJason Evans 		    LG_PAGE;
682a4bd5210SJason Evans 
683a4bd5210SJason Evans 		assert(pageind + npages <= chunk_npages);
684a4bd5210SJason Evans 		assert(ndirty >= npages);
685a4bd5210SJason Evans 		if (config_debug)
686a4bd5210SJason Evans 			ndirty -= npages;
687a4bd5210SJason Evans 
6888ed34ab0SJason Evans 		pages_purge((void *)((uintptr_t)chunk + (pageind << LG_PAGE)),
6898ed34ab0SJason Evans 		    (npages << LG_PAGE));
690a4bd5210SJason Evans 		if (config_stats)
691a4bd5210SJason Evans 			nmadvise++;
692a4bd5210SJason Evans 	}
693a4bd5210SJason Evans 	assert(ndirty == 0);
694a4bd5210SJason Evans 	malloc_mutex_lock(&arena->lock);
695a4bd5210SJason Evans 	if (config_stats)
696a4bd5210SJason Evans 		arena->stats.nmadvise += nmadvise;
697a4bd5210SJason Evans 
698a4bd5210SJason Evans 	/* Deallocate runs. */
699a4bd5210SJason Evans 	for (mapelm = ql_first(&mapelms); mapelm != NULL;
700a4bd5210SJason Evans 	    mapelm = ql_first(&mapelms)) {
701a4bd5210SJason Evans 		size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
702a4bd5210SJason Evans 		    sizeof(arena_chunk_map_t)) + map_bias;
703a4bd5210SJason Evans 		arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
704a4bd5210SJason Evans 		    (uintptr_t)(pageind << LG_PAGE));
705a4bd5210SJason Evans 
706a4bd5210SJason Evans 		ql_remove(&mapelms, mapelm, u.ql_link);
707a4bd5210SJason Evans 		arena_run_dalloc(arena, run, false);
708a4bd5210SJason Evans 	}
709a4bd5210SJason Evans }
710a4bd5210SJason Evans 
711a4bd5210SJason Evans static void
712a4bd5210SJason Evans arena_purge(arena_t *arena, bool all)
713a4bd5210SJason Evans {
714a4bd5210SJason Evans 	arena_chunk_t *chunk;
715a4bd5210SJason Evans 	size_t npurgatory;
716a4bd5210SJason Evans 	if (config_debug) {
717a4bd5210SJason Evans 		size_t ndirty = 0;
718a4bd5210SJason Evans 
719a4bd5210SJason Evans 		ql_foreach(chunk, &arena->chunks_dirty, link_dirty) {
720a4bd5210SJason Evans 		    assert(chunk->dirtied);
721a4bd5210SJason Evans 		    ndirty += chunk->ndirty;
722a4bd5210SJason Evans 		}
723a4bd5210SJason Evans 		assert(ndirty == arena->ndirty);
724a4bd5210SJason Evans 	}
725a4bd5210SJason Evans 	assert(arena->ndirty > arena->npurgatory || all);
726a4bd5210SJason Evans 	assert(arena->ndirty - arena->npurgatory > chunk_npages || all);
727a4bd5210SJason Evans 	assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
728a4bd5210SJason Evans 	    arena->npurgatory) || all);
729a4bd5210SJason Evans 
730a4bd5210SJason Evans 	if (config_stats)
731a4bd5210SJason Evans 		arena->stats.npurge++;
732a4bd5210SJason Evans 
733a4bd5210SJason Evans 	/*
734a4bd5210SJason Evans 	 * Compute the minimum number of pages that this thread should try to
735a4bd5210SJason Evans 	 * purge, and add the result to arena->npurgatory.  This will keep
736a4bd5210SJason Evans 	 * multiple threads from racing to reduce ndirty below the threshold.
737a4bd5210SJason Evans 	 */
738a4bd5210SJason Evans 	npurgatory = arena->ndirty - arena->npurgatory;
739a4bd5210SJason Evans 	if (all == false) {
740a4bd5210SJason Evans 		assert(npurgatory >= arena->nactive >> opt_lg_dirty_mult);
741a4bd5210SJason Evans 		npurgatory -= arena->nactive >> opt_lg_dirty_mult;
742a4bd5210SJason Evans 	}
743a4bd5210SJason Evans 	arena->npurgatory += npurgatory;
744a4bd5210SJason Evans 
745a4bd5210SJason Evans 	while (npurgatory > 0) {
746a4bd5210SJason Evans 		/* Get next chunk with dirty pages. */
747a4bd5210SJason Evans 		chunk = ql_first(&arena->chunks_dirty);
748a4bd5210SJason Evans 		if (chunk == NULL) {
749a4bd5210SJason Evans 			/*
750a4bd5210SJason Evans 			 * This thread was unable to purge as many pages as
751a4bd5210SJason Evans 			 * originally intended, due to races with other threads
752a4bd5210SJason Evans 			 * that either did some of the purging work, or re-used
753a4bd5210SJason Evans 			 * dirty pages.
754a4bd5210SJason Evans 			 */
755a4bd5210SJason Evans 			arena->npurgatory -= npurgatory;
756a4bd5210SJason Evans 			return;
757a4bd5210SJason Evans 		}
758a4bd5210SJason Evans 		while (chunk->ndirty == 0) {
759a4bd5210SJason Evans 			ql_remove(&arena->chunks_dirty, chunk, link_dirty);
760a4bd5210SJason Evans 			chunk->dirtied = false;
761a4bd5210SJason Evans 			chunk = ql_first(&arena->chunks_dirty);
762a4bd5210SJason Evans 			if (chunk == NULL) {
763a4bd5210SJason Evans 				/* Same logic as for above. */
764a4bd5210SJason Evans 				arena->npurgatory -= npurgatory;
765a4bd5210SJason Evans 				return;
766a4bd5210SJason Evans 			}
767a4bd5210SJason Evans 		}
768a4bd5210SJason Evans 
769a4bd5210SJason Evans 		if (chunk->ndirty > npurgatory) {
770a4bd5210SJason Evans 			/*
771a4bd5210SJason Evans 			 * This thread will, at a minimum, purge all the dirty
772a4bd5210SJason Evans 			 * pages in chunk, so set npurgatory to reflect this
773a4bd5210SJason Evans 			 * thread's commitment to purge the pages.  This tends
774a4bd5210SJason Evans 			 * to reduce the chances of the following scenario:
775a4bd5210SJason Evans 			 *
776a4bd5210SJason Evans 			 * 1) This thread sets arena->npurgatory such that
777a4bd5210SJason Evans 			 *    (arena->ndirty - arena->npurgatory) is at the
778a4bd5210SJason Evans 			 *    threshold.
779a4bd5210SJason Evans 			 * 2) This thread drops arena->lock.
780a4bd5210SJason Evans 			 * 3) Another thread causes one or more pages to be
781a4bd5210SJason Evans 			 *    dirtied, and immediately determines that it must
782a4bd5210SJason Evans 			 *    purge dirty pages.
783a4bd5210SJason Evans 			 *
784a4bd5210SJason Evans 			 * If this scenario *does* play out, that's okay,
785a4bd5210SJason Evans 			 * because all of the purging work being done really
786a4bd5210SJason Evans 			 * needs to happen.
787a4bd5210SJason Evans 			 */
788a4bd5210SJason Evans 			arena->npurgatory += chunk->ndirty - npurgatory;
789a4bd5210SJason Evans 			npurgatory = chunk->ndirty;
790a4bd5210SJason Evans 		}
791a4bd5210SJason Evans 
792a4bd5210SJason Evans 		arena->npurgatory -= chunk->ndirty;
793a4bd5210SJason Evans 		npurgatory -= chunk->ndirty;
794a4bd5210SJason Evans 		arena_chunk_purge(arena, chunk);
795a4bd5210SJason Evans 	}
796a4bd5210SJason Evans }
797a4bd5210SJason Evans 
798a4bd5210SJason Evans void
799a4bd5210SJason Evans arena_purge_all(arena_t *arena)
800a4bd5210SJason Evans {
801a4bd5210SJason Evans 
802a4bd5210SJason Evans 	malloc_mutex_lock(&arena->lock);
803a4bd5210SJason Evans 	arena_purge(arena, true);
804a4bd5210SJason Evans 	malloc_mutex_unlock(&arena->lock);
805a4bd5210SJason Evans }
806a4bd5210SJason Evans 
807a4bd5210SJason Evans static void
808a4bd5210SJason Evans arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
809a4bd5210SJason Evans {
810a4bd5210SJason Evans 	arena_chunk_t *chunk;
811a4bd5210SJason Evans 	size_t size, run_ind, run_pages, flag_dirty;
812a4bd5210SJason Evans 	arena_avail_tree_t *runs_avail;
813a4bd5210SJason Evans 
814a4bd5210SJason Evans 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
815a4bd5210SJason Evans 	run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
816a4bd5210SJason Evans 	assert(run_ind >= map_bias);
817a4bd5210SJason Evans 	assert(run_ind < chunk_npages);
818*e722f8f8SJason Evans 	if (arena_mapbits_large_get(chunk, run_ind) != 0) {
819*e722f8f8SJason Evans 		size = arena_mapbits_large_size_get(chunk, run_ind);
820a4bd5210SJason Evans 		assert(size == PAGE ||
821*e722f8f8SJason Evans 		    arena_mapbits_large_size_get(chunk,
822*e722f8f8SJason Evans 		    run_ind+(size>>LG_PAGE)-1) == 0);
823a4bd5210SJason Evans 	} else {
824a4bd5210SJason Evans 		size_t binind = arena_bin_index(arena, run->bin);
825a4bd5210SJason Evans 		arena_bin_info_t *bin_info = &arena_bin_info[binind];
826a4bd5210SJason Evans 		size = bin_info->run_size;
827a4bd5210SJason Evans 	}
828a4bd5210SJason Evans 	run_pages = (size >> LG_PAGE);
829a4bd5210SJason Evans 	if (config_stats) {
830a4bd5210SJason Evans 		/*
831a4bd5210SJason Evans 		 * Update stats_cactive if nactive is crossing a chunk
832a4bd5210SJason Evans 		 * multiple.
833a4bd5210SJason Evans 		 */
834a4bd5210SJason Evans 		size_t cactive_diff = CHUNK_CEILING(arena->nactive << LG_PAGE) -
835a4bd5210SJason Evans 		    CHUNK_CEILING((arena->nactive - run_pages) << LG_PAGE);
836a4bd5210SJason Evans 		if (cactive_diff != 0)
837a4bd5210SJason Evans 			stats_cactive_sub(cactive_diff);
838a4bd5210SJason Evans 	}
839a4bd5210SJason Evans 	arena->nactive -= run_pages;
840a4bd5210SJason Evans 
841a4bd5210SJason Evans 	/*
842a4bd5210SJason Evans 	 * The run is dirty if the caller claims to have dirtied it, as well as
843a4bd5210SJason Evans 	 * if it was already dirty before being allocated.
844a4bd5210SJason Evans 	 */
845*e722f8f8SJason Evans 	if (arena_mapbits_dirty_get(chunk, run_ind) != 0)
846a4bd5210SJason Evans 		dirty = true;
847a4bd5210SJason Evans 	flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
848a4bd5210SJason Evans 	runs_avail = dirty ? &arena->runs_avail_dirty :
849a4bd5210SJason Evans 	    &arena->runs_avail_clean;
850a4bd5210SJason Evans 
851a4bd5210SJason Evans 	/* Mark pages as unallocated in the chunk map. */
852a4bd5210SJason Evans 	if (dirty) {
853*e722f8f8SJason Evans 		arena_mapbits_unallocated_set(chunk, run_ind, size,
854*e722f8f8SJason Evans 		    CHUNK_MAP_DIRTY);
855*e722f8f8SJason Evans 		arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
856*e722f8f8SJason Evans 		    CHUNK_MAP_DIRTY);
857a4bd5210SJason Evans 
858a4bd5210SJason Evans 		chunk->ndirty += run_pages;
859a4bd5210SJason Evans 		arena->ndirty += run_pages;
860a4bd5210SJason Evans 	} else {
861*e722f8f8SJason Evans 		arena_mapbits_unallocated_set(chunk, run_ind, size,
862*e722f8f8SJason Evans 		    arena_mapbits_unzeroed_get(chunk, run_ind));
863*e722f8f8SJason Evans 		arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
864*e722f8f8SJason Evans 		    arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
865a4bd5210SJason Evans 	}
866a4bd5210SJason Evans 
867a4bd5210SJason Evans 	/* Try to coalesce forward. */
868a4bd5210SJason Evans 	if (run_ind + run_pages < chunk_npages &&
869*e722f8f8SJason Evans 	    arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
870*e722f8f8SJason Evans 	    arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) {
871*e722f8f8SJason Evans 		size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
872*e722f8f8SJason Evans 		    run_ind+run_pages);
873a4bd5210SJason Evans 		size_t nrun_pages = nrun_size >> LG_PAGE;
874a4bd5210SJason Evans 
875a4bd5210SJason Evans 		/*
876a4bd5210SJason Evans 		 * Remove successor from runs_avail; the coalesced run is
877a4bd5210SJason Evans 		 * inserted later.
878a4bd5210SJason Evans 		 */
879*e722f8f8SJason Evans 		assert(arena_mapbits_unallocated_size_get(chunk,
880*e722f8f8SJason Evans 		    run_ind+run_pages+nrun_pages-1) == nrun_size);
881*e722f8f8SJason Evans 		assert(arena_mapbits_dirty_get(chunk,
882*e722f8f8SJason Evans 		    run_ind+run_pages+nrun_pages-1) == flag_dirty);
883a4bd5210SJason Evans 		arena_avail_tree_remove(runs_avail,
884*e722f8f8SJason Evans 		    arena_mapp_get(chunk, run_ind+run_pages));
885a4bd5210SJason Evans 
886a4bd5210SJason Evans 		size += nrun_size;
887a4bd5210SJason Evans 		run_pages += nrun_pages;
888a4bd5210SJason Evans 
889*e722f8f8SJason Evans 		arena_mapbits_unallocated_size_set(chunk, run_ind, size);
890*e722f8f8SJason Evans 		arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
891*e722f8f8SJason Evans 		    size);
892a4bd5210SJason Evans 	}
893a4bd5210SJason Evans 
894a4bd5210SJason Evans 	/* Try to coalesce backward. */
895*e722f8f8SJason Evans 	if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, run_ind-1)
896*e722f8f8SJason Evans 	    == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == flag_dirty) {
897*e722f8f8SJason Evans 		size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
898*e722f8f8SJason Evans 		    run_ind-1);
899a4bd5210SJason Evans 		size_t prun_pages = prun_size >> LG_PAGE;
900a4bd5210SJason Evans 
901a4bd5210SJason Evans 		run_ind -= prun_pages;
902a4bd5210SJason Evans 
903a4bd5210SJason Evans 		/*
904a4bd5210SJason Evans 		 * Remove predecessor from runs_avail; the coalesced run is
905a4bd5210SJason Evans 		 * inserted later.
906a4bd5210SJason Evans 		 */
907*e722f8f8SJason Evans 		assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
908*e722f8f8SJason Evans 		    prun_size);
909*e722f8f8SJason Evans 		assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
910*e722f8f8SJason Evans 		arena_avail_tree_remove(runs_avail, arena_mapp_get(chunk,
911*e722f8f8SJason Evans 		    run_ind));
912a4bd5210SJason Evans 
913a4bd5210SJason Evans 		size += prun_size;
914a4bd5210SJason Evans 		run_pages += prun_pages;
915a4bd5210SJason Evans 
916*e722f8f8SJason Evans 		arena_mapbits_unallocated_size_set(chunk, run_ind, size);
917*e722f8f8SJason Evans 		arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
918*e722f8f8SJason Evans 		    size);
919a4bd5210SJason Evans 	}
920a4bd5210SJason Evans 
921a4bd5210SJason Evans 	/* Insert into runs_avail, now that coalescing is complete. */
922*e722f8f8SJason Evans 	assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
923*e722f8f8SJason Evans 	    arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
924*e722f8f8SJason Evans 	assert(arena_mapbits_dirty_get(chunk, run_ind) ==
925*e722f8f8SJason Evans 	    arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
926*e722f8f8SJason Evans 	arena_avail_tree_insert(runs_avail, arena_mapp_get(chunk, run_ind));
927a4bd5210SJason Evans 
928a4bd5210SJason Evans 	if (dirty) {
929a4bd5210SJason Evans 		/*
930a4bd5210SJason Evans 		 * Insert into chunks_dirty before potentially calling
931a4bd5210SJason Evans 		 * arena_chunk_dealloc(), so that chunks_dirty and
932a4bd5210SJason Evans 		 * arena->ndirty are consistent.
933a4bd5210SJason Evans 		 */
934a4bd5210SJason Evans 		if (chunk->dirtied == false) {
935a4bd5210SJason Evans 			ql_tail_insert(&arena->chunks_dirty, chunk, link_dirty);
936a4bd5210SJason Evans 			chunk->dirtied = true;
937a4bd5210SJason Evans 		}
938a4bd5210SJason Evans 	}
939a4bd5210SJason Evans 
940*e722f8f8SJason Evans 	/* Deallocate chunk if it is now completely unused. */
941*e722f8f8SJason Evans 	if (size == arena_maxclass) {
942*e722f8f8SJason Evans 		assert(run_ind == map_bias);
943*e722f8f8SJason Evans 		assert(run_pages == (arena_maxclass >> LG_PAGE));
944*e722f8f8SJason Evans 		assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
945*e722f8f8SJason Evans 		assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
946*e722f8f8SJason Evans 		    arena_maxclass);
947a4bd5210SJason Evans 		arena_chunk_dealloc(arena, chunk);
948*e722f8f8SJason Evans 	}
949a4bd5210SJason Evans 
950a4bd5210SJason Evans 	/*
951a4bd5210SJason Evans 	 * It is okay to do dirty page processing here even if the chunk was
952a4bd5210SJason Evans 	 * deallocated above, since in that case it is the spare.  Waiting
953a4bd5210SJason Evans 	 * until after possible chunk deallocation to do dirty processing
954a4bd5210SJason Evans 	 * allows for an old spare to be fully deallocated, thus decreasing the
955a4bd5210SJason Evans 	 * chances of spuriously crossing the dirty page purging threshold.
956a4bd5210SJason Evans 	 */
957a4bd5210SJason Evans 	if (dirty)
958a4bd5210SJason Evans 		arena_maybe_purge(arena);
959a4bd5210SJason Evans }
960a4bd5210SJason Evans 
961a4bd5210SJason Evans static void
962a4bd5210SJason Evans arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
963a4bd5210SJason Evans     size_t oldsize, size_t newsize)
964a4bd5210SJason Evans {
965a4bd5210SJason Evans 	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
966a4bd5210SJason Evans 	size_t head_npages = (oldsize - newsize) >> LG_PAGE;
967*e722f8f8SJason Evans 	size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
968a4bd5210SJason Evans 
969a4bd5210SJason Evans 	assert(oldsize > newsize);
970a4bd5210SJason Evans 
971a4bd5210SJason Evans 	/*
972a4bd5210SJason Evans 	 * Update the chunk map so that arena_run_dalloc() can treat the
973a4bd5210SJason Evans 	 * leading run as separately allocated.  Set the last element of each
974a4bd5210SJason Evans 	 * run first, in case of single-page runs.
975a4bd5210SJason Evans 	 */
976*e722f8f8SJason Evans 	assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
977*e722f8f8SJason Evans 	arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
978*e722f8f8SJason Evans 	    arena_mapbits_unzeroed_get(chunk, pageind+head_npages-1));
979*e722f8f8SJason Evans 	arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty |
980*e722f8f8SJason Evans 	    arena_mapbits_unzeroed_get(chunk, pageind));
981a4bd5210SJason Evans 
982a4bd5210SJason Evans 	if (config_debug) {
983a4bd5210SJason Evans 		UNUSED size_t tail_npages = newsize >> LG_PAGE;
984*e722f8f8SJason Evans 		assert(arena_mapbits_large_size_get(chunk,
985*e722f8f8SJason Evans 		    pageind+head_npages+tail_npages-1) == 0);
986*e722f8f8SJason Evans 		assert(arena_mapbits_dirty_get(chunk,
987*e722f8f8SJason Evans 		    pageind+head_npages+tail_npages-1) == flag_dirty);
988a4bd5210SJason Evans 	}
989*e722f8f8SJason Evans 	arena_mapbits_large_set(chunk, pageind+head_npages, newsize, flag_dirty
990*e722f8f8SJason Evans 	    | arena_mapbits_unzeroed_get(chunk, pageind+head_npages));
991a4bd5210SJason Evans 
992a4bd5210SJason Evans 	arena_run_dalloc(arena, run, false);
993a4bd5210SJason Evans }
994a4bd5210SJason Evans 
995a4bd5210SJason Evans static void
996a4bd5210SJason Evans arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
997a4bd5210SJason Evans     size_t oldsize, size_t newsize, bool dirty)
998a4bd5210SJason Evans {
999a4bd5210SJason Evans 	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1000a4bd5210SJason Evans 	size_t head_npages = newsize >> LG_PAGE;
1001*e722f8f8SJason Evans 	size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
1002a4bd5210SJason Evans 
1003a4bd5210SJason Evans 	assert(oldsize > newsize);
1004a4bd5210SJason Evans 
1005a4bd5210SJason Evans 	/*
1006a4bd5210SJason Evans 	 * Update the chunk map so that arena_run_dalloc() can treat the
1007a4bd5210SJason Evans 	 * trailing run as separately allocated.  Set the last element of each
1008a4bd5210SJason Evans 	 * run first, in case of single-page runs.
1009a4bd5210SJason Evans 	 */
1010*e722f8f8SJason Evans 	assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
1011*e722f8f8SJason Evans 	arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty |
1012*e722f8f8SJason Evans 	    arena_mapbits_unzeroed_get(chunk, pageind+head_npages-1));
1013*e722f8f8SJason Evans 	arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty |
1014*e722f8f8SJason Evans 	    arena_mapbits_unzeroed_get(chunk, pageind));
1015a4bd5210SJason Evans 
1016*e722f8f8SJason Evans 	if (config_debug) {
1017*e722f8f8SJason Evans 		UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
1018*e722f8f8SJason Evans 		assert(arena_mapbits_large_size_get(chunk,
1019*e722f8f8SJason Evans 		    pageind+head_npages+tail_npages-1) == 0);
1020*e722f8f8SJason Evans 		assert(arena_mapbits_dirty_get(chunk,
1021*e722f8f8SJason Evans 		    pageind+head_npages+tail_npages-1) == flag_dirty);
1022*e722f8f8SJason Evans 	}
1023*e722f8f8SJason Evans 	arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
1024*e722f8f8SJason Evans 	    flag_dirty | arena_mapbits_unzeroed_get(chunk,
1025*e722f8f8SJason Evans 	    pageind+head_npages));
1026a4bd5210SJason Evans 
1027a4bd5210SJason Evans 	arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
1028a4bd5210SJason Evans 	    dirty);
1029a4bd5210SJason Evans }
1030a4bd5210SJason Evans 
1031a4bd5210SJason Evans static arena_run_t *
1032a4bd5210SJason Evans arena_bin_runs_first(arena_bin_t *bin)
1033a4bd5210SJason Evans {
1034a4bd5210SJason Evans 	arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs);
1035a4bd5210SJason Evans 	if (mapelm != NULL) {
1036a4bd5210SJason Evans 		arena_chunk_t *chunk;
1037a4bd5210SJason Evans 		size_t pageind;
1038*e722f8f8SJason Evans 		arena_run_t *run;
1039a4bd5210SJason Evans 
1040a4bd5210SJason Evans 		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
1041a4bd5210SJason Evans 		pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) /
1042a4bd5210SJason Evans 		    sizeof(arena_chunk_map_t))) + map_bias;
1043*e722f8f8SJason Evans 		run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
1044*e722f8f8SJason Evans 		    arena_mapbits_small_runind_get(chunk, pageind)) <<
1045a4bd5210SJason Evans 		    LG_PAGE));
1046a4bd5210SJason Evans 		return (run);
1047a4bd5210SJason Evans 	}
1048a4bd5210SJason Evans 
1049a4bd5210SJason Evans 	return (NULL);
1050a4bd5210SJason Evans }
1051a4bd5210SJason Evans 
1052a4bd5210SJason Evans static void
1053a4bd5210SJason Evans arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
1054a4bd5210SJason Evans {
1055a4bd5210SJason Evans 	arena_chunk_t *chunk = CHUNK_ADDR2BASE(run);
1056a4bd5210SJason Evans 	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1057*e722f8f8SJason Evans 	arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
1058a4bd5210SJason Evans 
1059a4bd5210SJason Evans 	assert(arena_run_tree_search(&bin->runs, mapelm) == NULL);
1060a4bd5210SJason Evans 
1061a4bd5210SJason Evans 	arena_run_tree_insert(&bin->runs, mapelm);
1062a4bd5210SJason Evans }
1063a4bd5210SJason Evans 
1064a4bd5210SJason Evans static void
1065a4bd5210SJason Evans arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
1066a4bd5210SJason Evans {
1067a4bd5210SJason Evans 	arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1068a4bd5210SJason Evans 	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1069*e722f8f8SJason Evans 	arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
1070a4bd5210SJason Evans 
1071a4bd5210SJason Evans 	assert(arena_run_tree_search(&bin->runs, mapelm) != NULL);
1072a4bd5210SJason Evans 
1073a4bd5210SJason Evans 	arena_run_tree_remove(&bin->runs, mapelm);
1074a4bd5210SJason Evans }
1075a4bd5210SJason Evans 
1076a4bd5210SJason Evans static arena_run_t *
1077a4bd5210SJason Evans arena_bin_nonfull_run_tryget(arena_bin_t *bin)
1078a4bd5210SJason Evans {
1079a4bd5210SJason Evans 	arena_run_t *run = arena_bin_runs_first(bin);
1080a4bd5210SJason Evans 	if (run != NULL) {
1081a4bd5210SJason Evans 		arena_bin_runs_remove(bin, run);
1082a4bd5210SJason Evans 		if (config_stats)
1083a4bd5210SJason Evans 			bin->stats.reruns++;
1084a4bd5210SJason Evans 	}
1085a4bd5210SJason Evans 	return (run);
1086a4bd5210SJason Evans }
1087a4bd5210SJason Evans 
1088a4bd5210SJason Evans static arena_run_t *
1089a4bd5210SJason Evans arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
1090a4bd5210SJason Evans {
1091a4bd5210SJason Evans 	arena_run_t *run;
1092a4bd5210SJason Evans 	size_t binind;
1093a4bd5210SJason Evans 	arena_bin_info_t *bin_info;
1094a4bd5210SJason Evans 
1095a4bd5210SJason Evans 	/* Look for a usable run. */
1096a4bd5210SJason Evans 	run = arena_bin_nonfull_run_tryget(bin);
1097a4bd5210SJason Evans 	if (run != NULL)
1098a4bd5210SJason Evans 		return (run);
1099a4bd5210SJason Evans 	/* No existing runs have any space available. */
1100a4bd5210SJason Evans 
1101a4bd5210SJason Evans 	binind = arena_bin_index(arena, bin);
1102a4bd5210SJason Evans 	bin_info = &arena_bin_info[binind];
1103a4bd5210SJason Evans 
1104a4bd5210SJason Evans 	/* Allocate a new run. */
1105a4bd5210SJason Evans 	malloc_mutex_unlock(&bin->lock);
1106a4bd5210SJason Evans 	/******************************/
1107a4bd5210SJason Evans 	malloc_mutex_lock(&arena->lock);
1108*e722f8f8SJason Evans 	run = arena_run_alloc(arena, bin_info->run_size, false, binind, false);
1109a4bd5210SJason Evans 	if (run != NULL) {
1110a4bd5210SJason Evans 		bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
1111a4bd5210SJason Evans 		    (uintptr_t)bin_info->bitmap_offset);
1112a4bd5210SJason Evans 
1113a4bd5210SJason Evans 		/* Initialize run internals. */
1114*e722f8f8SJason Evans 		VALGRIND_MAKE_MEM_UNDEFINED(run, bin_info->reg0_offset -
1115*e722f8f8SJason Evans 		    bin_info->redzone_size);
1116a4bd5210SJason Evans 		run->bin = bin;
1117a4bd5210SJason Evans 		run->nextind = 0;
1118a4bd5210SJason Evans 		run->nfree = bin_info->nregs;
1119a4bd5210SJason Evans 		bitmap_init(bitmap, &bin_info->bitmap_info);
1120a4bd5210SJason Evans 	}
1121a4bd5210SJason Evans 	malloc_mutex_unlock(&arena->lock);
1122a4bd5210SJason Evans 	/********************************/
1123a4bd5210SJason Evans 	malloc_mutex_lock(&bin->lock);
1124a4bd5210SJason Evans 	if (run != NULL) {
1125a4bd5210SJason Evans 		if (config_stats) {
1126a4bd5210SJason Evans 			bin->stats.nruns++;
1127a4bd5210SJason Evans 			bin->stats.curruns++;
1128a4bd5210SJason Evans 		}
1129a4bd5210SJason Evans 		return (run);
1130a4bd5210SJason Evans 	}
1131a4bd5210SJason Evans 
1132a4bd5210SJason Evans 	/*
1133a4bd5210SJason Evans 	 * arena_run_alloc() failed, but another thread may have made
1134a4bd5210SJason Evans 	 * sufficient memory available while this one dropped bin->lock above,
1135a4bd5210SJason Evans 	 * so search one more time.
1136a4bd5210SJason Evans 	 */
1137a4bd5210SJason Evans 	run = arena_bin_nonfull_run_tryget(bin);
1138a4bd5210SJason Evans 	if (run != NULL)
1139a4bd5210SJason Evans 		return (run);
1140a4bd5210SJason Evans 
1141a4bd5210SJason Evans 	return (NULL);
1142a4bd5210SJason Evans }
1143a4bd5210SJason Evans 
1144a4bd5210SJason Evans /* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
1145a4bd5210SJason Evans static void *
1146a4bd5210SJason Evans arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
1147a4bd5210SJason Evans {
1148a4bd5210SJason Evans 	void *ret;
1149a4bd5210SJason Evans 	size_t binind;
1150a4bd5210SJason Evans 	arena_bin_info_t *bin_info;
1151a4bd5210SJason Evans 	arena_run_t *run;
1152a4bd5210SJason Evans 
1153a4bd5210SJason Evans 	binind = arena_bin_index(arena, bin);
1154a4bd5210SJason Evans 	bin_info = &arena_bin_info[binind];
1155a4bd5210SJason Evans 	bin->runcur = NULL;
1156a4bd5210SJason Evans 	run = arena_bin_nonfull_run_get(arena, bin);
1157a4bd5210SJason Evans 	if (bin->runcur != NULL && bin->runcur->nfree > 0) {
1158a4bd5210SJason Evans 		/*
1159a4bd5210SJason Evans 		 * Another thread updated runcur while this one ran without the
1160a4bd5210SJason Evans 		 * bin lock in arena_bin_nonfull_run_get().
1161a4bd5210SJason Evans 		 */
1162a4bd5210SJason Evans 		assert(bin->runcur->nfree > 0);
1163a4bd5210SJason Evans 		ret = arena_run_reg_alloc(bin->runcur, bin_info);
1164a4bd5210SJason Evans 		if (run != NULL) {
1165a4bd5210SJason Evans 			arena_chunk_t *chunk;
1166a4bd5210SJason Evans 
1167a4bd5210SJason Evans 			/*
1168a4bd5210SJason Evans 			 * arena_run_alloc() may have allocated run, or it may
1169a4bd5210SJason Evans 			 * have pulled run from the bin's run tree.  Therefore
1170a4bd5210SJason Evans 			 * it is unsafe to make any assumptions about how run
1171a4bd5210SJason Evans 			 * has previously been used, and arena_bin_lower_run()
1172a4bd5210SJason Evans 			 * must be called, as if a region were just deallocated
1173a4bd5210SJason Evans 			 * from the run.
1174a4bd5210SJason Evans 			 */
1175a4bd5210SJason Evans 			chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1176a4bd5210SJason Evans 			if (run->nfree == bin_info->nregs)
1177a4bd5210SJason Evans 				arena_dalloc_bin_run(arena, chunk, run, bin);
1178a4bd5210SJason Evans 			else
1179a4bd5210SJason Evans 				arena_bin_lower_run(arena, chunk, run, bin);
1180a4bd5210SJason Evans 		}
1181a4bd5210SJason Evans 		return (ret);
1182a4bd5210SJason Evans 	}
1183a4bd5210SJason Evans 
1184a4bd5210SJason Evans 	if (run == NULL)
1185a4bd5210SJason Evans 		return (NULL);
1186a4bd5210SJason Evans 
1187a4bd5210SJason Evans 	bin->runcur = run;
1188a4bd5210SJason Evans 
1189a4bd5210SJason Evans 	assert(bin->runcur->nfree > 0);
1190a4bd5210SJason Evans 
1191a4bd5210SJason Evans 	return (arena_run_reg_alloc(bin->runcur, bin_info));
1192a4bd5210SJason Evans }
1193a4bd5210SJason Evans 
1194a4bd5210SJason Evans void
1195a4bd5210SJason Evans arena_prof_accum(arena_t *arena, uint64_t accumbytes)
1196a4bd5210SJason Evans {
1197a4bd5210SJason Evans 
11988ed34ab0SJason Evans 	cassert(config_prof);
11998ed34ab0SJason Evans 
12008ed34ab0SJason Evans 	if (config_prof && prof_interval != 0) {
1201a4bd5210SJason Evans 		arena->prof_accumbytes += accumbytes;
1202a4bd5210SJason Evans 		if (arena->prof_accumbytes >= prof_interval) {
1203a4bd5210SJason Evans 			prof_idump();
1204a4bd5210SJason Evans 			arena->prof_accumbytes -= prof_interval;
1205a4bd5210SJason Evans 		}
1206a4bd5210SJason Evans 	}
1207a4bd5210SJason Evans }
1208a4bd5210SJason Evans 
1209a4bd5210SJason Evans void
1210a4bd5210SJason Evans arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
1211a4bd5210SJason Evans     uint64_t prof_accumbytes)
1212a4bd5210SJason Evans {
1213a4bd5210SJason Evans 	unsigned i, nfill;
1214a4bd5210SJason Evans 	arena_bin_t *bin;
1215a4bd5210SJason Evans 	arena_run_t *run;
1216a4bd5210SJason Evans 	void *ptr;
1217a4bd5210SJason Evans 
1218a4bd5210SJason Evans 	assert(tbin->ncached == 0);
1219a4bd5210SJason Evans 
1220a4bd5210SJason Evans 	if (config_prof) {
1221a4bd5210SJason Evans 		malloc_mutex_lock(&arena->lock);
1222a4bd5210SJason Evans 		arena_prof_accum(arena, prof_accumbytes);
1223a4bd5210SJason Evans 		malloc_mutex_unlock(&arena->lock);
1224a4bd5210SJason Evans 	}
1225a4bd5210SJason Evans 	bin = &arena->bins[binind];
1226a4bd5210SJason Evans 	malloc_mutex_lock(&bin->lock);
1227a4bd5210SJason Evans 	for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
1228a4bd5210SJason Evans 	    tbin->lg_fill_div); i < nfill; i++) {
1229a4bd5210SJason Evans 		if ((run = bin->runcur) != NULL && run->nfree > 0)
1230a4bd5210SJason Evans 			ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
1231a4bd5210SJason Evans 		else
1232a4bd5210SJason Evans 			ptr = arena_bin_malloc_hard(arena, bin);
1233a4bd5210SJason Evans 		if (ptr == NULL)
1234a4bd5210SJason Evans 			break;
1235a4bd5210SJason Evans 		if (config_fill && opt_junk) {
1236a4bd5210SJason Evans 			arena_alloc_junk_small(ptr, &arena_bin_info[binind],
1237a4bd5210SJason Evans 			    true);
1238a4bd5210SJason Evans 		}
1239a4bd5210SJason Evans 		/* Insert such that low regions get used first. */
1240a4bd5210SJason Evans 		tbin->avail[nfill - 1 - i] = ptr;
1241a4bd5210SJason Evans 	}
1242a4bd5210SJason Evans 	if (config_stats) {
1243a4bd5210SJason Evans 		bin->stats.allocated += i * arena_bin_info[binind].reg_size;
1244a4bd5210SJason Evans 		bin->stats.nmalloc += i;
1245a4bd5210SJason Evans 		bin->stats.nrequests += tbin->tstats.nrequests;
1246a4bd5210SJason Evans 		bin->stats.nfills++;
1247a4bd5210SJason Evans 		tbin->tstats.nrequests = 0;
1248a4bd5210SJason Evans 	}
1249a4bd5210SJason Evans 	malloc_mutex_unlock(&bin->lock);
1250a4bd5210SJason Evans 	tbin->ncached = i;
1251a4bd5210SJason Evans }
1252a4bd5210SJason Evans 
1253a4bd5210SJason Evans void
1254a4bd5210SJason Evans arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
1255a4bd5210SJason Evans {
1256a4bd5210SJason Evans 
1257a4bd5210SJason Evans 	if (zero) {
1258a4bd5210SJason Evans 		size_t redzone_size = bin_info->redzone_size;
1259a4bd5210SJason Evans 		memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
1260a4bd5210SJason Evans 		    redzone_size);
1261a4bd5210SJason Evans 		memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
1262a4bd5210SJason Evans 		    redzone_size);
1263a4bd5210SJason Evans 	} else {
1264a4bd5210SJason Evans 		memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
1265a4bd5210SJason Evans 		    bin_info->reg_interval);
1266a4bd5210SJason Evans 	}
1267a4bd5210SJason Evans }
1268a4bd5210SJason Evans 
1269a4bd5210SJason Evans void
1270a4bd5210SJason Evans arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
1271a4bd5210SJason Evans {
1272a4bd5210SJason Evans 	size_t size = bin_info->reg_size;
1273a4bd5210SJason Evans 	size_t redzone_size = bin_info->redzone_size;
1274a4bd5210SJason Evans 	size_t i;
1275a4bd5210SJason Evans 	bool error = false;
1276a4bd5210SJason Evans 
1277a4bd5210SJason Evans 	for (i = 1; i <= redzone_size; i++) {
1278a4bd5210SJason Evans 		unsigned byte;
1279a4bd5210SJason Evans 		if ((byte = *(uint8_t *)((uintptr_t)ptr - i)) != 0xa5) {
1280a4bd5210SJason Evans 			error = true;
1281a4bd5210SJason Evans 			malloc_printf("<jemalloc>: Corrupt redzone "
1282a4bd5210SJason Evans 			    "%zu byte%s before %p (size %zu), byte=%#x\n", i,
1283a4bd5210SJason Evans 			    (i == 1) ? "" : "s", ptr, size, byte);
1284a4bd5210SJason Evans 		}
1285a4bd5210SJason Evans 	}
1286a4bd5210SJason Evans 	for (i = 0; i < redzone_size; i++) {
1287a4bd5210SJason Evans 		unsigned byte;
1288a4bd5210SJason Evans 		if ((byte = *(uint8_t *)((uintptr_t)ptr + size + i)) != 0xa5) {
1289a4bd5210SJason Evans 			error = true;
1290a4bd5210SJason Evans 			malloc_printf("<jemalloc>: Corrupt redzone "
1291a4bd5210SJason Evans 			    "%zu byte%s after end of %p (size %zu), byte=%#x\n",
1292a4bd5210SJason Evans 			    i, (i == 1) ? "" : "s", ptr, size, byte);
1293a4bd5210SJason Evans 		}
1294a4bd5210SJason Evans 	}
1295a4bd5210SJason Evans 	if (opt_abort && error)
1296a4bd5210SJason Evans 		abort();
1297a4bd5210SJason Evans 
1298a4bd5210SJason Evans 	memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
1299a4bd5210SJason Evans 	    bin_info->reg_interval);
1300a4bd5210SJason Evans }
1301a4bd5210SJason Evans 
1302a4bd5210SJason Evans void *
1303a4bd5210SJason Evans arena_malloc_small(arena_t *arena, size_t size, bool zero)
1304a4bd5210SJason Evans {
1305a4bd5210SJason Evans 	void *ret;
1306a4bd5210SJason Evans 	arena_bin_t *bin;
1307a4bd5210SJason Evans 	arena_run_t *run;
1308a4bd5210SJason Evans 	size_t binind;
1309a4bd5210SJason Evans 
1310a4bd5210SJason Evans 	binind = SMALL_SIZE2BIN(size);
1311a4bd5210SJason Evans 	assert(binind < NBINS);
1312a4bd5210SJason Evans 	bin = &arena->bins[binind];
1313a4bd5210SJason Evans 	size = arena_bin_info[binind].reg_size;
1314a4bd5210SJason Evans 
1315a4bd5210SJason Evans 	malloc_mutex_lock(&bin->lock);
1316a4bd5210SJason Evans 	if ((run = bin->runcur) != NULL && run->nfree > 0)
1317a4bd5210SJason Evans 		ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
1318a4bd5210SJason Evans 	else
1319a4bd5210SJason Evans 		ret = arena_bin_malloc_hard(arena, bin);
1320a4bd5210SJason Evans 
1321a4bd5210SJason Evans 	if (ret == NULL) {
1322a4bd5210SJason Evans 		malloc_mutex_unlock(&bin->lock);
1323a4bd5210SJason Evans 		return (NULL);
1324a4bd5210SJason Evans 	}
1325a4bd5210SJason Evans 
1326a4bd5210SJason Evans 	if (config_stats) {
1327a4bd5210SJason Evans 		bin->stats.allocated += size;
1328a4bd5210SJason Evans 		bin->stats.nmalloc++;
1329a4bd5210SJason Evans 		bin->stats.nrequests++;
1330a4bd5210SJason Evans 	}
1331a4bd5210SJason Evans 	malloc_mutex_unlock(&bin->lock);
1332a4bd5210SJason Evans 	if (config_prof && isthreaded == false) {
1333a4bd5210SJason Evans 		malloc_mutex_lock(&arena->lock);
1334a4bd5210SJason Evans 		arena_prof_accum(arena, size);
1335a4bd5210SJason Evans 		malloc_mutex_unlock(&arena->lock);
1336a4bd5210SJason Evans 	}
1337a4bd5210SJason Evans 
1338a4bd5210SJason Evans 	if (zero == false) {
1339a4bd5210SJason Evans 		if (config_fill) {
1340a4bd5210SJason Evans 			if (opt_junk) {
1341a4bd5210SJason Evans 				arena_alloc_junk_small(ret,
1342a4bd5210SJason Evans 				    &arena_bin_info[binind], false);
1343a4bd5210SJason Evans 			} else if (opt_zero)
1344a4bd5210SJason Evans 				memset(ret, 0, size);
1345a4bd5210SJason Evans 		}
1346a4bd5210SJason Evans 	} else {
1347a4bd5210SJason Evans 		if (config_fill && opt_junk) {
1348a4bd5210SJason Evans 			arena_alloc_junk_small(ret, &arena_bin_info[binind],
1349a4bd5210SJason Evans 			    true);
1350a4bd5210SJason Evans 		}
1351a4bd5210SJason Evans 		VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
1352a4bd5210SJason Evans 		memset(ret, 0, size);
1353a4bd5210SJason Evans 	}
1354a4bd5210SJason Evans 
1355a4bd5210SJason Evans 	return (ret);
1356a4bd5210SJason Evans }
1357a4bd5210SJason Evans 
1358a4bd5210SJason Evans void *
1359a4bd5210SJason Evans arena_malloc_large(arena_t *arena, size_t size, bool zero)
1360a4bd5210SJason Evans {
1361a4bd5210SJason Evans 	void *ret;
1362a4bd5210SJason Evans 
1363a4bd5210SJason Evans 	/* Large allocation. */
1364a4bd5210SJason Evans 	size = PAGE_CEILING(size);
1365a4bd5210SJason Evans 	malloc_mutex_lock(&arena->lock);
1366*e722f8f8SJason Evans 	ret = (void *)arena_run_alloc(arena, size, true, BININD_INVALID, zero);
1367a4bd5210SJason Evans 	if (ret == NULL) {
1368a4bd5210SJason Evans 		malloc_mutex_unlock(&arena->lock);
1369a4bd5210SJason Evans 		return (NULL);
1370a4bd5210SJason Evans 	}
1371a4bd5210SJason Evans 	if (config_stats) {
1372a4bd5210SJason Evans 		arena->stats.nmalloc_large++;
1373a4bd5210SJason Evans 		arena->stats.nrequests_large++;
1374a4bd5210SJason Evans 		arena->stats.allocated_large += size;
1375a4bd5210SJason Evans 		arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1376a4bd5210SJason Evans 		arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1377a4bd5210SJason Evans 		arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1378a4bd5210SJason Evans 	}
1379a4bd5210SJason Evans 	if (config_prof)
1380a4bd5210SJason Evans 		arena_prof_accum(arena, size);
1381a4bd5210SJason Evans 	malloc_mutex_unlock(&arena->lock);
1382a4bd5210SJason Evans 
1383a4bd5210SJason Evans 	if (zero == false) {
1384a4bd5210SJason Evans 		if (config_fill) {
1385a4bd5210SJason Evans 			if (opt_junk)
1386a4bd5210SJason Evans 				memset(ret, 0xa5, size);
1387a4bd5210SJason Evans 			else if (opt_zero)
1388a4bd5210SJason Evans 				memset(ret, 0, size);
1389a4bd5210SJason Evans 		}
1390a4bd5210SJason Evans 	}
1391a4bd5210SJason Evans 
1392a4bd5210SJason Evans 	return (ret);
1393a4bd5210SJason Evans }
1394a4bd5210SJason Evans 
1395a4bd5210SJason Evans /* Only handles large allocations that require more than page alignment. */
1396a4bd5210SJason Evans void *
1397a4bd5210SJason Evans arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
1398a4bd5210SJason Evans {
1399a4bd5210SJason Evans 	void *ret;
1400a4bd5210SJason Evans 	size_t alloc_size, leadsize, trailsize;
1401a4bd5210SJason Evans 	arena_run_t *run;
1402a4bd5210SJason Evans 	arena_chunk_t *chunk;
1403a4bd5210SJason Evans 
1404a4bd5210SJason Evans 	assert((size & PAGE_MASK) == 0);
1405a4bd5210SJason Evans 
1406a4bd5210SJason Evans 	alignment = PAGE_CEILING(alignment);
1407a4bd5210SJason Evans 	alloc_size = size + alignment - PAGE;
1408a4bd5210SJason Evans 
1409a4bd5210SJason Evans 	malloc_mutex_lock(&arena->lock);
1410*e722f8f8SJason Evans 	run = arena_run_alloc(arena, alloc_size, true, BININD_INVALID, zero);
1411a4bd5210SJason Evans 	if (run == NULL) {
1412a4bd5210SJason Evans 		malloc_mutex_unlock(&arena->lock);
1413a4bd5210SJason Evans 		return (NULL);
1414a4bd5210SJason Evans 	}
1415a4bd5210SJason Evans 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1416a4bd5210SJason Evans 
1417a4bd5210SJason Evans 	leadsize = ALIGNMENT_CEILING((uintptr_t)run, alignment) -
1418a4bd5210SJason Evans 	    (uintptr_t)run;
1419a4bd5210SJason Evans 	assert(alloc_size >= leadsize + size);
1420a4bd5210SJason Evans 	trailsize = alloc_size - leadsize - size;
1421a4bd5210SJason Evans 	ret = (void *)((uintptr_t)run + leadsize);
1422a4bd5210SJason Evans 	if (leadsize != 0) {
1423a4bd5210SJason Evans 		arena_run_trim_head(arena, chunk, run, alloc_size, alloc_size -
1424a4bd5210SJason Evans 		    leadsize);
1425a4bd5210SJason Evans 	}
1426a4bd5210SJason Evans 	if (trailsize != 0) {
1427a4bd5210SJason Evans 		arena_run_trim_tail(arena, chunk, ret, size + trailsize, size,
1428a4bd5210SJason Evans 		    false);
1429a4bd5210SJason Evans 	}
1430a4bd5210SJason Evans 
1431a4bd5210SJason Evans 	if (config_stats) {
1432a4bd5210SJason Evans 		arena->stats.nmalloc_large++;
1433a4bd5210SJason Evans 		arena->stats.nrequests_large++;
1434a4bd5210SJason Evans 		arena->stats.allocated_large += size;
1435a4bd5210SJason Evans 		arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1436a4bd5210SJason Evans 		arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1437a4bd5210SJason Evans 		arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1438a4bd5210SJason Evans 	}
1439a4bd5210SJason Evans 	malloc_mutex_unlock(&arena->lock);
1440a4bd5210SJason Evans 
1441a4bd5210SJason Evans 	if (config_fill && zero == false) {
1442a4bd5210SJason Evans 		if (opt_junk)
1443a4bd5210SJason Evans 			memset(ret, 0xa5, size);
1444a4bd5210SJason Evans 		else if (opt_zero)
1445a4bd5210SJason Evans 			memset(ret, 0, size);
1446a4bd5210SJason Evans 	}
1447a4bd5210SJason Evans 	return (ret);
1448a4bd5210SJason Evans }
1449a4bd5210SJason Evans 
1450a4bd5210SJason Evans void
1451a4bd5210SJason Evans arena_prof_promoted(const void *ptr, size_t size)
1452a4bd5210SJason Evans {
1453a4bd5210SJason Evans 	arena_chunk_t *chunk;
1454a4bd5210SJason Evans 	size_t pageind, binind;
1455a4bd5210SJason Evans 
14568ed34ab0SJason Evans 	cassert(config_prof);
1457a4bd5210SJason Evans 	assert(ptr != NULL);
1458a4bd5210SJason Evans 	assert(CHUNK_ADDR2BASE(ptr) != ptr);
1459a4bd5210SJason Evans 	assert(isalloc(ptr, false) == PAGE);
1460a4bd5210SJason Evans 	assert(isalloc(ptr, true) == PAGE);
1461a4bd5210SJason Evans 	assert(size <= SMALL_MAXCLASS);
1462a4bd5210SJason Evans 
1463a4bd5210SJason Evans 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1464a4bd5210SJason Evans 	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1465a4bd5210SJason Evans 	binind = SMALL_SIZE2BIN(size);
1466a4bd5210SJason Evans 	assert(binind < NBINS);
1467*e722f8f8SJason Evans 	arena_mapbits_large_binind_set(chunk, pageind, binind);
1468a4bd5210SJason Evans 
1469a4bd5210SJason Evans 	assert(isalloc(ptr, false) == PAGE);
1470a4bd5210SJason Evans 	assert(isalloc(ptr, true) == size);
1471a4bd5210SJason Evans }
1472a4bd5210SJason Evans 
1473a4bd5210SJason Evans static void
1474a4bd5210SJason Evans arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
1475a4bd5210SJason Evans     arena_bin_t *bin)
1476a4bd5210SJason Evans {
1477a4bd5210SJason Evans 
1478a4bd5210SJason Evans 	/* Dissociate run from bin. */
1479a4bd5210SJason Evans 	if (run == bin->runcur)
1480a4bd5210SJason Evans 		bin->runcur = NULL;
1481a4bd5210SJason Evans 	else {
1482a4bd5210SJason Evans 		size_t binind = arena_bin_index(chunk->arena, bin);
1483a4bd5210SJason Evans 		arena_bin_info_t *bin_info = &arena_bin_info[binind];
1484a4bd5210SJason Evans 
1485a4bd5210SJason Evans 		if (bin_info->nregs != 1) {
1486a4bd5210SJason Evans 			/*
1487a4bd5210SJason Evans 			 * This block's conditional is necessary because if the
1488a4bd5210SJason Evans 			 * run only contains one region, then it never gets
1489a4bd5210SJason Evans 			 * inserted into the non-full runs tree.
1490a4bd5210SJason Evans 			 */
1491a4bd5210SJason Evans 			arena_bin_runs_remove(bin, run);
1492a4bd5210SJason Evans 		}
1493a4bd5210SJason Evans 	}
1494a4bd5210SJason Evans }
1495a4bd5210SJason Evans 
1496a4bd5210SJason Evans static void
1497a4bd5210SJason Evans arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1498a4bd5210SJason Evans     arena_bin_t *bin)
1499a4bd5210SJason Evans {
1500a4bd5210SJason Evans 	size_t binind;
1501a4bd5210SJason Evans 	arena_bin_info_t *bin_info;
1502a4bd5210SJason Evans 	size_t npages, run_ind, past;
1503a4bd5210SJason Evans 
1504a4bd5210SJason Evans 	assert(run != bin->runcur);
1505*e722f8f8SJason Evans 	assert(arena_run_tree_search(&bin->runs,
1506*e722f8f8SJason Evans 	    arena_mapp_get(chunk, ((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE))
1507*e722f8f8SJason Evans 	    == NULL);
1508a4bd5210SJason Evans 
1509a4bd5210SJason Evans 	binind = arena_bin_index(chunk->arena, run->bin);
1510a4bd5210SJason Evans 	bin_info = &arena_bin_info[binind];
1511a4bd5210SJason Evans 
1512a4bd5210SJason Evans 	malloc_mutex_unlock(&bin->lock);
1513a4bd5210SJason Evans 	/******************************/
1514a4bd5210SJason Evans 	npages = bin_info->run_size >> LG_PAGE;
1515a4bd5210SJason Evans 	run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
1516a4bd5210SJason Evans 	past = (size_t)(PAGE_CEILING((uintptr_t)run +
1517a4bd5210SJason Evans 	    (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind *
1518a4bd5210SJason Evans 	    bin_info->reg_interval - bin_info->redzone_size) -
1519a4bd5210SJason Evans 	    (uintptr_t)chunk) >> LG_PAGE);
1520a4bd5210SJason Evans 	malloc_mutex_lock(&arena->lock);
1521a4bd5210SJason Evans 
1522a4bd5210SJason Evans 	/*
1523a4bd5210SJason Evans 	 * If the run was originally clean, and some pages were never touched,
1524a4bd5210SJason Evans 	 * trim the clean pages before deallocating the dirty portion of the
1525a4bd5210SJason Evans 	 * run.
1526a4bd5210SJason Evans 	 */
1527*e722f8f8SJason Evans 	if (arena_mapbits_dirty_get(chunk, run_ind) == 0 && past - run_ind <
1528*e722f8f8SJason Evans 	    npages) {
1529a4bd5210SJason Evans 		/*
1530a4bd5210SJason Evans 		 * Trim clean pages.  Convert to large run beforehand.  Set the
1531a4bd5210SJason Evans 		 * last map element first, in case this is a one-page run.
1532a4bd5210SJason Evans 		 */
1533*e722f8f8SJason Evans 		arena_mapbits_large_set(chunk, run_ind+npages-1, 0,
1534*e722f8f8SJason Evans 		    arena_mapbits_unzeroed_get(chunk, run_ind+npages-1));
1535*e722f8f8SJason Evans 		arena_mapbits_large_set(chunk, run_ind, bin_info->run_size,
1536*e722f8f8SJason Evans 		    arena_mapbits_unzeroed_get(chunk, run_ind));
1537a4bd5210SJason Evans 		arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE),
1538a4bd5210SJason Evans 		    ((past - run_ind) << LG_PAGE), false);
1539a4bd5210SJason Evans 		/* npages = past - run_ind; */
1540a4bd5210SJason Evans 	}
1541a4bd5210SJason Evans 	arena_run_dalloc(arena, run, true);
1542a4bd5210SJason Evans 	malloc_mutex_unlock(&arena->lock);
1543a4bd5210SJason Evans 	/****************************/
1544a4bd5210SJason Evans 	malloc_mutex_lock(&bin->lock);
1545a4bd5210SJason Evans 	if (config_stats)
1546a4bd5210SJason Evans 		bin->stats.curruns--;
1547a4bd5210SJason Evans }
1548a4bd5210SJason Evans 
1549a4bd5210SJason Evans static void
1550a4bd5210SJason Evans arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1551a4bd5210SJason Evans     arena_bin_t *bin)
1552a4bd5210SJason Evans {
1553a4bd5210SJason Evans 
1554a4bd5210SJason Evans 	/*
1555a4bd5210SJason Evans 	 * Make sure that if bin->runcur is non-NULL, it refers to the lowest
1556a4bd5210SJason Evans 	 * non-full run.  It is okay to NULL runcur out rather than proactively
1557a4bd5210SJason Evans 	 * keeping it pointing at the lowest non-full run.
1558a4bd5210SJason Evans 	 */
1559a4bd5210SJason Evans 	if ((uintptr_t)run < (uintptr_t)bin->runcur) {
1560a4bd5210SJason Evans 		/* Switch runcur. */
1561a4bd5210SJason Evans 		if (bin->runcur->nfree > 0)
1562a4bd5210SJason Evans 			arena_bin_runs_insert(bin, bin->runcur);
1563a4bd5210SJason Evans 		bin->runcur = run;
1564a4bd5210SJason Evans 		if (config_stats)
1565a4bd5210SJason Evans 			bin->stats.reruns++;
1566a4bd5210SJason Evans 	} else
1567a4bd5210SJason Evans 		arena_bin_runs_insert(bin, run);
1568a4bd5210SJason Evans }
1569a4bd5210SJason Evans 
1570a4bd5210SJason Evans void
1571*e722f8f8SJason Evans arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1572a4bd5210SJason Evans     arena_chunk_map_t *mapelm)
1573a4bd5210SJason Evans {
1574a4bd5210SJason Evans 	size_t pageind;
1575a4bd5210SJason Evans 	arena_run_t *run;
1576a4bd5210SJason Evans 	arena_bin_t *bin;
1577*e722f8f8SJason Evans 	arena_bin_info_t *bin_info;
1578*e722f8f8SJason Evans 	size_t size, binind;
1579a4bd5210SJason Evans 
1580a4bd5210SJason Evans 	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1581a4bd5210SJason Evans 	run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
1582*e722f8f8SJason Evans 	    arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
1583a4bd5210SJason Evans 	bin = run->bin;
1584*e722f8f8SJason Evans 	binind = arena_ptr_small_binind_get(ptr, mapelm->bits);
1585*e722f8f8SJason Evans 	bin_info = &arena_bin_info[binind];
1586a4bd5210SJason Evans 	if (config_fill || config_stats)
1587a4bd5210SJason Evans 		size = bin_info->reg_size;
1588a4bd5210SJason Evans 
1589a4bd5210SJason Evans 	if (config_fill && opt_junk)
1590a4bd5210SJason Evans 		arena_dalloc_junk_small(ptr, bin_info);
1591a4bd5210SJason Evans 
1592a4bd5210SJason Evans 	arena_run_reg_dalloc(run, ptr);
1593a4bd5210SJason Evans 	if (run->nfree == bin_info->nregs) {
1594a4bd5210SJason Evans 		arena_dissociate_bin_run(chunk, run, bin);
1595a4bd5210SJason Evans 		arena_dalloc_bin_run(arena, chunk, run, bin);
1596a4bd5210SJason Evans 	} else if (run->nfree == 1 && run != bin->runcur)
1597a4bd5210SJason Evans 		arena_bin_lower_run(arena, chunk, run, bin);
1598a4bd5210SJason Evans 
1599a4bd5210SJason Evans 	if (config_stats) {
1600a4bd5210SJason Evans 		bin->stats.allocated -= size;
1601a4bd5210SJason Evans 		bin->stats.ndalloc++;
1602a4bd5210SJason Evans 	}
1603a4bd5210SJason Evans }
1604a4bd5210SJason Evans 
1605a4bd5210SJason Evans void
1606*e722f8f8SJason Evans arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1607*e722f8f8SJason Evans     size_t pageind, arena_chunk_map_t *mapelm)
1608*e722f8f8SJason Evans {
1609*e722f8f8SJason Evans 	arena_run_t *run;
1610*e722f8f8SJason Evans 	arena_bin_t *bin;
1611*e722f8f8SJason Evans 
1612*e722f8f8SJason Evans 	run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
1613*e722f8f8SJason Evans 	    arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
1614*e722f8f8SJason Evans 	bin = run->bin;
1615*e722f8f8SJason Evans 	malloc_mutex_lock(&bin->lock);
1616*e722f8f8SJason Evans 	arena_dalloc_bin_locked(arena, chunk, ptr, mapelm);
1617*e722f8f8SJason Evans 	malloc_mutex_unlock(&bin->lock);
1618*e722f8f8SJason Evans }
1619*e722f8f8SJason Evans 
1620*e722f8f8SJason Evans void
1621*e722f8f8SJason Evans arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1622*e722f8f8SJason Evans     size_t pageind)
1623*e722f8f8SJason Evans {
1624*e722f8f8SJason Evans 	arena_chunk_map_t *mapelm;
1625*e722f8f8SJason Evans 
1626*e722f8f8SJason Evans 	if (config_debug) {
1627*e722f8f8SJason Evans 		/* arena_ptr_small_binind_get() does extra sanity checking. */
1628*e722f8f8SJason Evans 		assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
1629*e722f8f8SJason Evans 		    pageind)) != BININD_INVALID);
1630*e722f8f8SJason Evans 	}
1631*e722f8f8SJason Evans 	mapelm = arena_mapp_get(chunk, pageind);
1632*e722f8f8SJason Evans 	arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm);
1633*e722f8f8SJason Evans }
1634*e722f8f8SJason Evans void
1635a4bd5210SJason Evans arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
1636a4bd5210SJason Evans     arena_stats_t *astats, malloc_bin_stats_t *bstats,
1637a4bd5210SJason Evans     malloc_large_stats_t *lstats)
1638a4bd5210SJason Evans {
1639a4bd5210SJason Evans 	unsigned i;
1640a4bd5210SJason Evans 
1641a4bd5210SJason Evans 	malloc_mutex_lock(&arena->lock);
1642a4bd5210SJason Evans 	*nactive += arena->nactive;
1643a4bd5210SJason Evans 	*ndirty += arena->ndirty;
1644a4bd5210SJason Evans 
1645a4bd5210SJason Evans 	astats->mapped += arena->stats.mapped;
1646a4bd5210SJason Evans 	astats->npurge += arena->stats.npurge;
1647a4bd5210SJason Evans 	astats->nmadvise += arena->stats.nmadvise;
1648a4bd5210SJason Evans 	astats->purged += arena->stats.purged;
1649a4bd5210SJason Evans 	astats->allocated_large += arena->stats.allocated_large;
1650a4bd5210SJason Evans 	astats->nmalloc_large += arena->stats.nmalloc_large;
1651a4bd5210SJason Evans 	astats->ndalloc_large += arena->stats.ndalloc_large;
1652a4bd5210SJason Evans 	astats->nrequests_large += arena->stats.nrequests_large;
1653a4bd5210SJason Evans 
1654a4bd5210SJason Evans 	for (i = 0; i < nlclasses; i++) {
1655a4bd5210SJason Evans 		lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
1656a4bd5210SJason Evans 		lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
1657a4bd5210SJason Evans 		lstats[i].nrequests += arena->stats.lstats[i].nrequests;
1658a4bd5210SJason Evans 		lstats[i].curruns += arena->stats.lstats[i].curruns;
1659a4bd5210SJason Evans 	}
1660a4bd5210SJason Evans 	malloc_mutex_unlock(&arena->lock);
1661a4bd5210SJason Evans 
1662a4bd5210SJason Evans 	for (i = 0; i < NBINS; i++) {
1663a4bd5210SJason Evans 		arena_bin_t *bin = &arena->bins[i];
1664a4bd5210SJason Evans 
1665a4bd5210SJason Evans 		malloc_mutex_lock(&bin->lock);
1666a4bd5210SJason Evans 		bstats[i].allocated += bin->stats.allocated;
1667a4bd5210SJason Evans 		bstats[i].nmalloc += bin->stats.nmalloc;
1668a4bd5210SJason Evans 		bstats[i].ndalloc += bin->stats.ndalloc;
1669a4bd5210SJason Evans 		bstats[i].nrequests += bin->stats.nrequests;
1670a4bd5210SJason Evans 		if (config_tcache) {
1671a4bd5210SJason Evans 			bstats[i].nfills += bin->stats.nfills;
1672a4bd5210SJason Evans 			bstats[i].nflushes += bin->stats.nflushes;
1673a4bd5210SJason Evans 		}
1674a4bd5210SJason Evans 		bstats[i].nruns += bin->stats.nruns;
1675a4bd5210SJason Evans 		bstats[i].reruns += bin->stats.reruns;
1676a4bd5210SJason Evans 		bstats[i].curruns += bin->stats.curruns;
1677a4bd5210SJason Evans 		malloc_mutex_unlock(&bin->lock);
1678a4bd5210SJason Evans 	}
1679a4bd5210SJason Evans }
1680a4bd5210SJason Evans 
1681a4bd5210SJason Evans void
1682*e722f8f8SJason Evans arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr)
1683a4bd5210SJason Evans {
1684a4bd5210SJason Evans 
1685a4bd5210SJason Evans 	if (config_fill || config_stats) {
1686a4bd5210SJason Evans 		size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1687*e722f8f8SJason Evans 		size_t size = arena_mapbits_large_size_get(chunk, pageind);
1688a4bd5210SJason Evans 
1689a4bd5210SJason Evans 		if (config_fill && config_stats && opt_junk)
1690a4bd5210SJason Evans 			memset(ptr, 0x5a, size);
1691a4bd5210SJason Evans 		if (config_stats) {
1692a4bd5210SJason Evans 			arena->stats.ndalloc_large++;
1693a4bd5210SJason Evans 			arena->stats.allocated_large -= size;
1694a4bd5210SJason Evans 			arena->stats.lstats[(size >> LG_PAGE) - 1].ndalloc++;
1695a4bd5210SJason Evans 			arena->stats.lstats[(size >> LG_PAGE) - 1].curruns--;
1696a4bd5210SJason Evans 		}
1697a4bd5210SJason Evans 	}
1698a4bd5210SJason Evans 
1699a4bd5210SJason Evans 	arena_run_dalloc(arena, (arena_run_t *)ptr, true);
1700a4bd5210SJason Evans }
1701a4bd5210SJason Evans 
1702*e722f8f8SJason Evans void
1703*e722f8f8SJason Evans arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
1704*e722f8f8SJason Evans {
1705*e722f8f8SJason Evans 
1706*e722f8f8SJason Evans 	malloc_mutex_lock(&arena->lock);
1707*e722f8f8SJason Evans 	arena_dalloc_large_locked(arena, chunk, ptr);
1708*e722f8f8SJason Evans 	malloc_mutex_unlock(&arena->lock);
1709*e722f8f8SJason Evans }
1710*e722f8f8SJason Evans 
1711a4bd5210SJason Evans static void
1712a4bd5210SJason Evans arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1713a4bd5210SJason Evans     size_t oldsize, size_t size)
1714a4bd5210SJason Evans {
1715a4bd5210SJason Evans 
1716a4bd5210SJason Evans 	assert(size < oldsize);
1717a4bd5210SJason Evans 
1718a4bd5210SJason Evans 	/*
1719a4bd5210SJason Evans 	 * Shrink the run, and make trailing pages available for other
1720a4bd5210SJason Evans 	 * allocations.
1721a4bd5210SJason Evans 	 */
1722a4bd5210SJason Evans 	malloc_mutex_lock(&arena->lock);
1723a4bd5210SJason Evans 	arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
1724a4bd5210SJason Evans 	    true);
1725a4bd5210SJason Evans 	if (config_stats) {
1726a4bd5210SJason Evans 		arena->stats.ndalloc_large++;
1727a4bd5210SJason Evans 		arena->stats.allocated_large -= oldsize;
1728a4bd5210SJason Evans 		arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
1729a4bd5210SJason Evans 		arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
1730a4bd5210SJason Evans 
1731a4bd5210SJason Evans 		arena->stats.nmalloc_large++;
1732a4bd5210SJason Evans 		arena->stats.nrequests_large++;
1733a4bd5210SJason Evans 		arena->stats.allocated_large += size;
1734a4bd5210SJason Evans 		arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1735a4bd5210SJason Evans 		arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1736a4bd5210SJason Evans 		arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1737a4bd5210SJason Evans 	}
1738a4bd5210SJason Evans 	malloc_mutex_unlock(&arena->lock);
1739a4bd5210SJason Evans }
1740a4bd5210SJason Evans 
1741a4bd5210SJason Evans static bool
1742a4bd5210SJason Evans arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1743a4bd5210SJason Evans     size_t oldsize, size_t size, size_t extra, bool zero)
1744a4bd5210SJason Evans {
1745a4bd5210SJason Evans 	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1746a4bd5210SJason Evans 	size_t npages = oldsize >> LG_PAGE;
1747a4bd5210SJason Evans 	size_t followsize;
1748a4bd5210SJason Evans 
1749*e722f8f8SJason Evans 	assert(oldsize == arena_mapbits_large_size_get(chunk, pageind));
1750a4bd5210SJason Evans 
1751a4bd5210SJason Evans 	/* Try to extend the run. */
1752a4bd5210SJason Evans 	assert(size + extra > oldsize);
1753a4bd5210SJason Evans 	malloc_mutex_lock(&arena->lock);
1754a4bd5210SJason Evans 	if (pageind + npages < chunk_npages &&
1755*e722f8f8SJason Evans 	    arena_mapbits_allocated_get(chunk, pageind+npages) == 0 &&
1756*e722f8f8SJason Evans 	    (followsize = arena_mapbits_unallocated_size_get(chunk,
1757*e722f8f8SJason Evans 	    pageind+npages)) >= size - oldsize) {
1758a4bd5210SJason Evans 		/*
1759a4bd5210SJason Evans 		 * The next run is available and sufficiently large.  Split the
1760a4bd5210SJason Evans 		 * following run, then merge the first part with the existing
1761a4bd5210SJason Evans 		 * allocation.
1762a4bd5210SJason Evans 		 */
1763a4bd5210SJason Evans 		size_t flag_dirty;
1764a4bd5210SJason Evans 		size_t splitsize = (oldsize + followsize <= size + extra)
1765a4bd5210SJason Evans 		    ? followsize : size + extra - oldsize;
1766a4bd5210SJason Evans 		arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk +
1767*e722f8f8SJason Evans 		    ((pageind+npages) << LG_PAGE)), splitsize, true,
1768*e722f8f8SJason Evans 		    BININD_INVALID, zero);
1769a4bd5210SJason Evans 
1770a4bd5210SJason Evans 		size = oldsize + splitsize;
1771a4bd5210SJason Evans 		npages = size >> LG_PAGE;
1772a4bd5210SJason Evans 
1773a4bd5210SJason Evans 		/*
1774a4bd5210SJason Evans 		 * Mark the extended run as dirty if either portion of the run
1775a4bd5210SJason Evans 		 * was dirty before allocation.  This is rather pedantic,
1776a4bd5210SJason Evans 		 * because there's not actually any sequence of events that
1777a4bd5210SJason Evans 		 * could cause the resulting run to be passed to
1778a4bd5210SJason Evans 		 * arena_run_dalloc() with the dirty argument set to false
1779a4bd5210SJason Evans 		 * (which is when dirty flag consistency would really matter).
1780a4bd5210SJason Evans 		 */
1781*e722f8f8SJason Evans 		flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
1782*e722f8f8SJason Evans 		    arena_mapbits_dirty_get(chunk, pageind+npages-1);
1783*e722f8f8SJason Evans 		arena_mapbits_large_set(chunk, pageind, size, flag_dirty);
1784*e722f8f8SJason Evans 		arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty);
1785a4bd5210SJason Evans 
1786a4bd5210SJason Evans 		if (config_stats) {
1787a4bd5210SJason Evans 			arena->stats.ndalloc_large++;
1788a4bd5210SJason Evans 			arena->stats.allocated_large -= oldsize;
1789*e722f8f8SJason Evans 			arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
1790*e722f8f8SJason Evans 			arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
1791a4bd5210SJason Evans 
1792a4bd5210SJason Evans 			arena->stats.nmalloc_large++;
1793a4bd5210SJason Evans 			arena->stats.nrequests_large++;
1794a4bd5210SJason Evans 			arena->stats.allocated_large += size;
1795a4bd5210SJason Evans 			arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1796*e722f8f8SJason Evans 			arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1797a4bd5210SJason Evans 			arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1798a4bd5210SJason Evans 		}
1799a4bd5210SJason Evans 		malloc_mutex_unlock(&arena->lock);
1800a4bd5210SJason Evans 		return (false);
1801a4bd5210SJason Evans 	}
1802a4bd5210SJason Evans 	malloc_mutex_unlock(&arena->lock);
1803a4bd5210SJason Evans 
1804a4bd5210SJason Evans 	return (true);
1805a4bd5210SJason Evans }
1806a4bd5210SJason Evans 
1807a4bd5210SJason Evans /*
1808a4bd5210SJason Evans  * Try to resize a large allocation, in order to avoid copying.  This will
1809a4bd5210SJason Evans  * always fail if growing an object, and the following run is already in use.
1810a4bd5210SJason Evans  */
1811a4bd5210SJason Evans static bool
1812a4bd5210SJason Evans arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
1813a4bd5210SJason Evans     bool zero)
1814a4bd5210SJason Evans {
1815a4bd5210SJason Evans 	size_t psize;
1816a4bd5210SJason Evans 
1817a4bd5210SJason Evans 	psize = PAGE_CEILING(size + extra);
1818a4bd5210SJason Evans 	if (psize == oldsize) {
1819a4bd5210SJason Evans 		/* Same size class. */
1820a4bd5210SJason Evans 		if (config_fill && opt_junk && size < oldsize) {
1821a4bd5210SJason Evans 			memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize -
1822a4bd5210SJason Evans 			    size);
1823a4bd5210SJason Evans 		}
1824a4bd5210SJason Evans 		return (false);
1825a4bd5210SJason Evans 	} else {
1826a4bd5210SJason Evans 		arena_chunk_t *chunk;
1827a4bd5210SJason Evans 		arena_t *arena;
1828a4bd5210SJason Evans 
1829a4bd5210SJason Evans 		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1830a4bd5210SJason Evans 		arena = chunk->arena;
1831a4bd5210SJason Evans 
1832a4bd5210SJason Evans 		if (psize < oldsize) {
1833a4bd5210SJason Evans 			/* Fill before shrinking in order avoid a race. */
1834a4bd5210SJason Evans 			if (config_fill && opt_junk) {
1835a4bd5210SJason Evans 				memset((void *)((uintptr_t)ptr + size), 0x5a,
1836a4bd5210SJason Evans 				    oldsize - size);
1837a4bd5210SJason Evans 			}
1838a4bd5210SJason Evans 			arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
1839a4bd5210SJason Evans 			    psize);
1840a4bd5210SJason Evans 			return (false);
1841a4bd5210SJason Evans 		} else {
1842a4bd5210SJason Evans 			bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
1843a4bd5210SJason Evans 			    oldsize, PAGE_CEILING(size),
1844a4bd5210SJason Evans 			    psize - PAGE_CEILING(size), zero);
1845a4bd5210SJason Evans 			if (config_fill && ret == false && zero == false &&
1846a4bd5210SJason Evans 			    opt_zero) {
1847a4bd5210SJason Evans 				memset((void *)((uintptr_t)ptr + oldsize), 0,
1848a4bd5210SJason Evans 				    size - oldsize);
1849a4bd5210SJason Evans 			}
1850a4bd5210SJason Evans 			return (ret);
1851a4bd5210SJason Evans 		}
1852a4bd5210SJason Evans 	}
1853a4bd5210SJason Evans }
1854a4bd5210SJason Evans 
1855a4bd5210SJason Evans void *
1856a4bd5210SJason Evans arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
1857a4bd5210SJason Evans     bool zero)
1858a4bd5210SJason Evans {
1859a4bd5210SJason Evans 
1860a4bd5210SJason Evans 	/*
1861a4bd5210SJason Evans 	 * Avoid moving the allocation if the size class can be left the same.
1862a4bd5210SJason Evans 	 */
1863a4bd5210SJason Evans 	if (oldsize <= arena_maxclass) {
1864a4bd5210SJason Evans 		if (oldsize <= SMALL_MAXCLASS) {
1865a4bd5210SJason Evans 			assert(arena_bin_info[SMALL_SIZE2BIN(oldsize)].reg_size
1866a4bd5210SJason Evans 			    == oldsize);
1867a4bd5210SJason Evans 			if ((size + extra <= SMALL_MAXCLASS &&
1868a4bd5210SJason Evans 			    SMALL_SIZE2BIN(size + extra) ==
1869a4bd5210SJason Evans 			    SMALL_SIZE2BIN(oldsize)) || (size <= oldsize &&
1870a4bd5210SJason Evans 			    size + extra >= oldsize)) {
1871a4bd5210SJason Evans 				if (config_fill && opt_junk && size < oldsize) {
1872a4bd5210SJason Evans 					memset((void *)((uintptr_t)ptr + size),
1873a4bd5210SJason Evans 					    0x5a, oldsize - size);
1874a4bd5210SJason Evans 				}
1875a4bd5210SJason Evans 				return (ptr);
1876a4bd5210SJason Evans 			}
1877a4bd5210SJason Evans 		} else {
1878a4bd5210SJason Evans 			assert(size <= arena_maxclass);
1879a4bd5210SJason Evans 			if (size + extra > SMALL_MAXCLASS) {
1880a4bd5210SJason Evans 				if (arena_ralloc_large(ptr, oldsize, size,
1881a4bd5210SJason Evans 				    extra, zero) == false)
1882a4bd5210SJason Evans 					return (ptr);
1883a4bd5210SJason Evans 			}
1884a4bd5210SJason Evans 		}
1885a4bd5210SJason Evans 	}
1886a4bd5210SJason Evans 
1887a4bd5210SJason Evans 	/* Reallocation would require a move. */
1888a4bd5210SJason Evans 	return (NULL);
1889a4bd5210SJason Evans }
1890a4bd5210SJason Evans 
1891a4bd5210SJason Evans void *
1892a4bd5210SJason Evans arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
1893a4bd5210SJason Evans     size_t alignment, bool zero, bool try_tcache)
1894a4bd5210SJason Evans {
1895a4bd5210SJason Evans 	void *ret;
1896a4bd5210SJason Evans 	size_t copysize;
1897a4bd5210SJason Evans 
1898a4bd5210SJason Evans 	/* Try to avoid moving the allocation. */
1899a4bd5210SJason Evans 	ret = arena_ralloc_no_move(ptr, oldsize, size, extra, zero);
1900a4bd5210SJason Evans 	if (ret != NULL)
1901a4bd5210SJason Evans 		return (ret);
1902a4bd5210SJason Evans 
1903a4bd5210SJason Evans 	/*
1904a4bd5210SJason Evans 	 * size and oldsize are different enough that we need to move the
1905a4bd5210SJason Evans 	 * object.  In that case, fall back to allocating new space and
1906a4bd5210SJason Evans 	 * copying.
1907a4bd5210SJason Evans 	 */
1908a4bd5210SJason Evans 	if (alignment != 0) {
1909a4bd5210SJason Evans 		size_t usize = sa2u(size + extra, alignment);
1910a4bd5210SJason Evans 		if (usize == 0)
1911a4bd5210SJason Evans 			return (NULL);
1912a4bd5210SJason Evans 		ret = ipalloc(usize, alignment, zero);
1913a4bd5210SJason Evans 	} else
1914a4bd5210SJason Evans 		ret = arena_malloc(NULL, size + extra, zero, try_tcache);
1915a4bd5210SJason Evans 
1916a4bd5210SJason Evans 	if (ret == NULL) {
1917a4bd5210SJason Evans 		if (extra == 0)
1918a4bd5210SJason Evans 			return (NULL);
1919a4bd5210SJason Evans 		/* Try again, this time without extra. */
1920a4bd5210SJason Evans 		if (alignment != 0) {
1921a4bd5210SJason Evans 			size_t usize = sa2u(size, alignment);
1922a4bd5210SJason Evans 			if (usize == 0)
1923a4bd5210SJason Evans 				return (NULL);
1924a4bd5210SJason Evans 			ret = ipalloc(usize, alignment, zero);
1925a4bd5210SJason Evans 		} else
1926a4bd5210SJason Evans 			ret = arena_malloc(NULL, size, zero, try_tcache);
1927a4bd5210SJason Evans 
1928a4bd5210SJason Evans 		if (ret == NULL)
1929a4bd5210SJason Evans 			return (NULL);
1930a4bd5210SJason Evans 	}
1931a4bd5210SJason Evans 
1932a4bd5210SJason Evans 	/* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
1933a4bd5210SJason Evans 
1934a4bd5210SJason Evans 	/*
1935a4bd5210SJason Evans 	 * Copy at most size bytes (not size+extra), since the caller has no
1936a4bd5210SJason Evans 	 * expectation that the extra bytes will be reliably preserved.
1937a4bd5210SJason Evans 	 */
1938a4bd5210SJason Evans 	copysize = (size < oldsize) ? size : oldsize;
1939*e722f8f8SJason Evans 	VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
1940a4bd5210SJason Evans 	memcpy(ret, ptr, copysize);
1941a4bd5210SJason Evans 	iqalloc(ptr);
1942a4bd5210SJason Evans 	return (ret);
1943a4bd5210SJason Evans }
1944a4bd5210SJason Evans 
1945a4bd5210SJason Evans bool
1946a4bd5210SJason Evans arena_new(arena_t *arena, unsigned ind)
1947a4bd5210SJason Evans {
1948a4bd5210SJason Evans 	unsigned i;
1949a4bd5210SJason Evans 	arena_bin_t *bin;
1950a4bd5210SJason Evans 
1951a4bd5210SJason Evans 	arena->ind = ind;
1952a4bd5210SJason Evans 	arena->nthreads = 0;
1953a4bd5210SJason Evans 
1954a4bd5210SJason Evans 	if (malloc_mutex_init(&arena->lock))
1955a4bd5210SJason Evans 		return (true);
1956a4bd5210SJason Evans 
1957a4bd5210SJason Evans 	if (config_stats) {
1958a4bd5210SJason Evans 		memset(&arena->stats, 0, sizeof(arena_stats_t));
1959a4bd5210SJason Evans 		arena->stats.lstats =
1960a4bd5210SJason Evans 		    (malloc_large_stats_t *)base_alloc(nlclasses *
1961a4bd5210SJason Evans 		    sizeof(malloc_large_stats_t));
1962a4bd5210SJason Evans 		if (arena->stats.lstats == NULL)
1963a4bd5210SJason Evans 			return (true);
1964a4bd5210SJason Evans 		memset(arena->stats.lstats, 0, nlclasses *
1965a4bd5210SJason Evans 		    sizeof(malloc_large_stats_t));
1966a4bd5210SJason Evans 		if (config_tcache)
1967a4bd5210SJason Evans 			ql_new(&arena->tcache_ql);
1968a4bd5210SJason Evans 	}
1969a4bd5210SJason Evans 
1970a4bd5210SJason Evans 	if (config_prof)
1971a4bd5210SJason Evans 		arena->prof_accumbytes = 0;
1972a4bd5210SJason Evans 
1973a4bd5210SJason Evans 	/* Initialize chunks. */
1974a4bd5210SJason Evans 	ql_new(&arena->chunks_dirty);
1975a4bd5210SJason Evans 	arena->spare = NULL;
1976a4bd5210SJason Evans 
1977a4bd5210SJason Evans 	arena->nactive = 0;
1978a4bd5210SJason Evans 	arena->ndirty = 0;
1979a4bd5210SJason Evans 	arena->npurgatory = 0;
1980a4bd5210SJason Evans 
1981a4bd5210SJason Evans 	arena_avail_tree_new(&arena->runs_avail_clean);
1982a4bd5210SJason Evans 	arena_avail_tree_new(&arena->runs_avail_dirty);
1983a4bd5210SJason Evans 
1984a4bd5210SJason Evans 	/* Initialize bins. */
1985a4bd5210SJason Evans 	for (i = 0; i < NBINS; i++) {
1986a4bd5210SJason Evans 		bin = &arena->bins[i];
1987a4bd5210SJason Evans 		if (malloc_mutex_init(&bin->lock))
1988a4bd5210SJason Evans 			return (true);
1989a4bd5210SJason Evans 		bin->runcur = NULL;
1990a4bd5210SJason Evans 		arena_run_tree_new(&bin->runs);
1991a4bd5210SJason Evans 		if (config_stats)
1992a4bd5210SJason Evans 			memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
1993a4bd5210SJason Evans 	}
1994a4bd5210SJason Evans 
1995a4bd5210SJason Evans 	return (false);
1996a4bd5210SJason Evans }
1997a4bd5210SJason Evans 
1998a4bd5210SJason Evans /*
1999a4bd5210SJason Evans  * Calculate bin_info->run_size such that it meets the following constraints:
2000a4bd5210SJason Evans  *
2001a4bd5210SJason Evans  *   *) bin_info->run_size >= min_run_size
2002a4bd5210SJason Evans  *   *) bin_info->run_size <= arena_maxclass
2003a4bd5210SJason Evans  *   *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
2004a4bd5210SJason Evans  *   *) bin_info->nregs <= RUN_MAXREGS
2005a4bd5210SJason Evans  *
2006a4bd5210SJason Evans  * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also
2007a4bd5210SJason Evans  * calculated here, since these settings are all interdependent.
2008a4bd5210SJason Evans  */
2009a4bd5210SJason Evans static size_t
2010a4bd5210SJason Evans bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
2011a4bd5210SJason Evans {
2012a4bd5210SJason Evans 	size_t pad_size;
2013a4bd5210SJason Evans 	size_t try_run_size, good_run_size;
2014a4bd5210SJason Evans 	uint32_t try_nregs, good_nregs;
2015a4bd5210SJason Evans 	uint32_t try_hdr_size, good_hdr_size;
2016a4bd5210SJason Evans 	uint32_t try_bitmap_offset, good_bitmap_offset;
2017a4bd5210SJason Evans 	uint32_t try_ctx0_offset, good_ctx0_offset;
2018a4bd5210SJason Evans 	uint32_t try_redzone0_offset, good_redzone0_offset;
2019a4bd5210SJason Evans 
2020a4bd5210SJason Evans 	assert(min_run_size >= PAGE);
2021a4bd5210SJason Evans 	assert(min_run_size <= arena_maxclass);
2022a4bd5210SJason Evans 
2023a4bd5210SJason Evans 	/*
2024a4bd5210SJason Evans 	 * Determine redzone size based on minimum alignment and minimum
2025a4bd5210SJason Evans 	 * redzone size.  Add padding to the end of the run if it is needed to
2026a4bd5210SJason Evans 	 * align the regions.  The padding allows each redzone to be half the
2027a4bd5210SJason Evans 	 * minimum alignment; without the padding, each redzone would have to
2028a4bd5210SJason Evans 	 * be twice as large in order to maintain alignment.
2029a4bd5210SJason Evans 	 */
2030a4bd5210SJason Evans 	if (config_fill && opt_redzone) {
2031a4bd5210SJason Evans 		size_t align_min = ZU(1) << (ffs(bin_info->reg_size) - 1);
2032a4bd5210SJason Evans 		if (align_min <= REDZONE_MINSIZE) {
2033a4bd5210SJason Evans 			bin_info->redzone_size = REDZONE_MINSIZE;
2034a4bd5210SJason Evans 			pad_size = 0;
2035a4bd5210SJason Evans 		} else {
2036a4bd5210SJason Evans 			bin_info->redzone_size = align_min >> 1;
2037a4bd5210SJason Evans 			pad_size = bin_info->redzone_size;
2038a4bd5210SJason Evans 		}
2039a4bd5210SJason Evans 	} else {
2040a4bd5210SJason Evans 		bin_info->redzone_size = 0;
2041a4bd5210SJason Evans 		pad_size = 0;
2042a4bd5210SJason Evans 	}
2043a4bd5210SJason Evans 	bin_info->reg_interval = bin_info->reg_size +
2044a4bd5210SJason Evans 	    (bin_info->redzone_size << 1);
2045a4bd5210SJason Evans 
2046a4bd5210SJason Evans 	/*
2047a4bd5210SJason Evans 	 * Calculate known-valid settings before entering the run_size
2048a4bd5210SJason Evans 	 * expansion loop, so that the first part of the loop always copies
2049a4bd5210SJason Evans 	 * valid settings.
2050a4bd5210SJason Evans 	 *
2051a4bd5210SJason Evans 	 * The do..while loop iteratively reduces the number of regions until
2052a4bd5210SJason Evans 	 * the run header and the regions no longer overlap.  A closed formula
2053a4bd5210SJason Evans 	 * would be quite messy, since there is an interdependency between the
2054a4bd5210SJason Evans 	 * header's mask length and the number of regions.
2055a4bd5210SJason Evans 	 */
2056a4bd5210SJason Evans 	try_run_size = min_run_size;
2057a4bd5210SJason Evans 	try_nregs = ((try_run_size - sizeof(arena_run_t)) /
2058a4bd5210SJason Evans 	    bin_info->reg_interval)
2059a4bd5210SJason Evans 	    + 1; /* Counter-act try_nregs-- in loop. */
2060a4bd5210SJason Evans 	if (try_nregs > RUN_MAXREGS) {
2061a4bd5210SJason Evans 		try_nregs = RUN_MAXREGS
2062a4bd5210SJason Evans 		    + 1; /* Counter-act try_nregs-- in loop. */
2063a4bd5210SJason Evans 	}
2064a4bd5210SJason Evans 	do {
2065a4bd5210SJason Evans 		try_nregs--;
2066a4bd5210SJason Evans 		try_hdr_size = sizeof(arena_run_t);
2067a4bd5210SJason Evans 		/* Pad to a long boundary. */
2068a4bd5210SJason Evans 		try_hdr_size = LONG_CEILING(try_hdr_size);
2069a4bd5210SJason Evans 		try_bitmap_offset = try_hdr_size;
2070a4bd5210SJason Evans 		/* Add space for bitmap. */
2071a4bd5210SJason Evans 		try_hdr_size += bitmap_size(try_nregs);
2072a4bd5210SJason Evans 		if (config_prof && opt_prof && prof_promote == false) {
2073a4bd5210SJason Evans 			/* Pad to a quantum boundary. */
2074a4bd5210SJason Evans 			try_hdr_size = QUANTUM_CEILING(try_hdr_size);
2075a4bd5210SJason Evans 			try_ctx0_offset = try_hdr_size;
2076a4bd5210SJason Evans 			/* Add space for one (prof_ctx_t *) per region. */
2077a4bd5210SJason Evans 			try_hdr_size += try_nregs * sizeof(prof_ctx_t *);
2078a4bd5210SJason Evans 		} else
2079a4bd5210SJason Evans 			try_ctx0_offset = 0;
2080a4bd5210SJason Evans 		try_redzone0_offset = try_run_size - (try_nregs *
2081a4bd5210SJason Evans 		    bin_info->reg_interval) - pad_size;
2082a4bd5210SJason Evans 	} while (try_hdr_size > try_redzone0_offset);
2083a4bd5210SJason Evans 
2084a4bd5210SJason Evans 	/* run_size expansion loop. */
2085a4bd5210SJason Evans 	do {
2086a4bd5210SJason Evans 		/*
2087a4bd5210SJason Evans 		 * Copy valid settings before trying more aggressive settings.
2088a4bd5210SJason Evans 		 */
2089a4bd5210SJason Evans 		good_run_size = try_run_size;
2090a4bd5210SJason Evans 		good_nregs = try_nregs;
2091a4bd5210SJason Evans 		good_hdr_size = try_hdr_size;
2092a4bd5210SJason Evans 		good_bitmap_offset = try_bitmap_offset;
2093a4bd5210SJason Evans 		good_ctx0_offset = try_ctx0_offset;
2094a4bd5210SJason Evans 		good_redzone0_offset = try_redzone0_offset;
2095a4bd5210SJason Evans 
2096a4bd5210SJason Evans 		/* Try more aggressive settings. */
2097a4bd5210SJason Evans 		try_run_size += PAGE;
2098a4bd5210SJason Evans 		try_nregs = ((try_run_size - sizeof(arena_run_t) - pad_size) /
2099a4bd5210SJason Evans 		    bin_info->reg_interval)
2100a4bd5210SJason Evans 		    + 1; /* Counter-act try_nregs-- in loop. */
2101a4bd5210SJason Evans 		if (try_nregs > RUN_MAXREGS) {
2102a4bd5210SJason Evans 			try_nregs = RUN_MAXREGS
2103a4bd5210SJason Evans 			    + 1; /* Counter-act try_nregs-- in loop. */
2104a4bd5210SJason Evans 		}
2105a4bd5210SJason Evans 		do {
2106a4bd5210SJason Evans 			try_nregs--;
2107a4bd5210SJason Evans 			try_hdr_size = sizeof(arena_run_t);
2108a4bd5210SJason Evans 			/* Pad to a long boundary. */
2109a4bd5210SJason Evans 			try_hdr_size = LONG_CEILING(try_hdr_size);
2110a4bd5210SJason Evans 			try_bitmap_offset = try_hdr_size;
2111a4bd5210SJason Evans 			/* Add space for bitmap. */
2112a4bd5210SJason Evans 			try_hdr_size += bitmap_size(try_nregs);
2113a4bd5210SJason Evans 			if (config_prof && opt_prof && prof_promote == false) {
2114a4bd5210SJason Evans 				/* Pad to a quantum boundary. */
2115a4bd5210SJason Evans 				try_hdr_size = QUANTUM_CEILING(try_hdr_size);
2116a4bd5210SJason Evans 				try_ctx0_offset = try_hdr_size;
2117a4bd5210SJason Evans 				/*
2118a4bd5210SJason Evans 				 * Add space for one (prof_ctx_t *) per region.
2119a4bd5210SJason Evans 				 */
2120a4bd5210SJason Evans 				try_hdr_size += try_nregs *
2121a4bd5210SJason Evans 				    sizeof(prof_ctx_t *);
2122a4bd5210SJason Evans 			}
2123a4bd5210SJason Evans 			try_redzone0_offset = try_run_size - (try_nregs *
2124a4bd5210SJason Evans 			    bin_info->reg_interval) - pad_size;
2125a4bd5210SJason Evans 		} while (try_hdr_size > try_redzone0_offset);
2126a4bd5210SJason Evans 	} while (try_run_size <= arena_maxclass
2127a4bd5210SJason Evans 	    && try_run_size <= arena_maxclass
2128a4bd5210SJason Evans 	    && RUN_MAX_OVRHD * (bin_info->reg_interval << 3) >
2129a4bd5210SJason Evans 	    RUN_MAX_OVRHD_RELAX
2130a4bd5210SJason Evans 	    && (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size
2131a4bd5210SJason Evans 	    && try_nregs < RUN_MAXREGS);
2132a4bd5210SJason Evans 
2133a4bd5210SJason Evans 	assert(good_hdr_size <= good_redzone0_offset);
2134a4bd5210SJason Evans 
2135a4bd5210SJason Evans 	/* Copy final settings. */
2136a4bd5210SJason Evans 	bin_info->run_size = good_run_size;
2137a4bd5210SJason Evans 	bin_info->nregs = good_nregs;
2138a4bd5210SJason Evans 	bin_info->bitmap_offset = good_bitmap_offset;
2139a4bd5210SJason Evans 	bin_info->ctx0_offset = good_ctx0_offset;
2140a4bd5210SJason Evans 	bin_info->reg0_offset = good_redzone0_offset + bin_info->redzone_size;
2141a4bd5210SJason Evans 
2142a4bd5210SJason Evans 	assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
2143a4bd5210SJason Evans 	    * bin_info->reg_interval) + pad_size == bin_info->run_size);
2144a4bd5210SJason Evans 
2145a4bd5210SJason Evans 	return (good_run_size);
2146a4bd5210SJason Evans }
2147a4bd5210SJason Evans 
2148a4bd5210SJason Evans static void
2149a4bd5210SJason Evans bin_info_init(void)
2150a4bd5210SJason Evans {
2151a4bd5210SJason Evans 	arena_bin_info_t *bin_info;
2152a4bd5210SJason Evans 	size_t prev_run_size = PAGE;
2153a4bd5210SJason Evans 
2154a4bd5210SJason Evans #define	SIZE_CLASS(bin, delta, size)					\
2155a4bd5210SJason Evans 	bin_info = &arena_bin_info[bin];				\
2156a4bd5210SJason Evans 	bin_info->reg_size = size;					\
2157a4bd5210SJason Evans 	prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);\
2158a4bd5210SJason Evans 	bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
2159a4bd5210SJason Evans 	SIZE_CLASSES
2160a4bd5210SJason Evans #undef SIZE_CLASS
2161a4bd5210SJason Evans }
2162a4bd5210SJason Evans 
2163a4bd5210SJason Evans void
2164a4bd5210SJason Evans arena_boot(void)
2165a4bd5210SJason Evans {
2166a4bd5210SJason Evans 	size_t header_size;
2167a4bd5210SJason Evans 	unsigned i;
2168a4bd5210SJason Evans 
2169a4bd5210SJason Evans 	/*
2170a4bd5210SJason Evans 	 * Compute the header size such that it is large enough to contain the
2171a4bd5210SJason Evans 	 * page map.  The page map is biased to omit entries for the header
2172a4bd5210SJason Evans 	 * itself, so some iteration is necessary to compute the map bias.
2173a4bd5210SJason Evans 	 *
2174a4bd5210SJason Evans 	 * 1) Compute safe header_size and map_bias values that include enough
2175a4bd5210SJason Evans 	 *    space for an unbiased page map.
2176a4bd5210SJason Evans 	 * 2) Refine map_bias based on (1) to omit the header pages in the page
2177a4bd5210SJason Evans 	 *    map.  The resulting map_bias may be one too small.
2178a4bd5210SJason Evans 	 * 3) Refine map_bias based on (2).  The result will be >= the result
2179a4bd5210SJason Evans 	 *    from (2), and will always be correct.
2180a4bd5210SJason Evans 	 */
2181a4bd5210SJason Evans 	map_bias = 0;
2182a4bd5210SJason Evans 	for (i = 0; i < 3; i++) {
2183a4bd5210SJason Evans 		header_size = offsetof(arena_chunk_t, map) +
2184a4bd5210SJason Evans 		    (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias));
2185a4bd5210SJason Evans 		map_bias = (header_size >> LG_PAGE) + ((header_size & PAGE_MASK)
2186a4bd5210SJason Evans 		    != 0);
2187a4bd5210SJason Evans 	}
2188a4bd5210SJason Evans 	assert(map_bias > 0);
2189a4bd5210SJason Evans 
2190a4bd5210SJason Evans 	arena_maxclass = chunksize - (map_bias << LG_PAGE);
2191a4bd5210SJason Evans 
2192a4bd5210SJason Evans 	bin_info_init();
2193a4bd5210SJason Evans }
2194a4bd5210SJason Evans 
2195a4bd5210SJason Evans void
2196a4bd5210SJason Evans arena_prefork(arena_t *arena)
2197a4bd5210SJason Evans {
2198a4bd5210SJason Evans 	unsigned i;
2199a4bd5210SJason Evans 
2200a4bd5210SJason Evans 	malloc_mutex_prefork(&arena->lock);
2201a4bd5210SJason Evans 	for (i = 0; i < NBINS; i++)
2202a4bd5210SJason Evans 		malloc_mutex_prefork(&arena->bins[i].lock);
2203a4bd5210SJason Evans }
2204a4bd5210SJason Evans 
2205a4bd5210SJason Evans void
2206a4bd5210SJason Evans arena_postfork_parent(arena_t *arena)
2207a4bd5210SJason Evans {
2208a4bd5210SJason Evans 	unsigned i;
2209a4bd5210SJason Evans 
2210a4bd5210SJason Evans 	for (i = 0; i < NBINS; i++)
2211a4bd5210SJason Evans 		malloc_mutex_postfork_parent(&arena->bins[i].lock);
2212a4bd5210SJason Evans 	malloc_mutex_postfork_parent(&arena->lock);
2213a4bd5210SJason Evans }
2214a4bd5210SJason Evans 
2215a4bd5210SJason Evans void
2216a4bd5210SJason Evans arena_postfork_child(arena_t *arena)
2217a4bd5210SJason Evans {
2218a4bd5210SJason Evans 	unsigned i;
2219a4bd5210SJason Evans 
2220a4bd5210SJason Evans 	for (i = 0; i < NBINS; i++)
2221a4bd5210SJason Evans 		malloc_mutex_postfork_child(&arena->bins[i].lock);
2222a4bd5210SJason Evans 	malloc_mutex_postfork_child(&arena->lock);
2223a4bd5210SJason Evans }
2224