xref: /freebsd/contrib/jemalloc/src/arena.c (revision 6486b015fc84e96725fef22b0e3363351399ae83)
1 #define	JEMALLOC_ARENA_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 
4 /******************************************************************************/
5 /* Data. */
6 
7 ssize_t		opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
8 arena_bin_info_t	arena_bin_info[NBINS];
9 
10 JEMALLOC_ATTR(aligned(CACHELINE))
11 const uint8_t	small_size2bin[] = {
12 #define	S2B_8(i)	i,
13 #define	S2B_16(i)	S2B_8(i) S2B_8(i)
14 #define	S2B_32(i)	S2B_16(i) S2B_16(i)
15 #define	S2B_64(i)	S2B_32(i) S2B_32(i)
16 #define	S2B_128(i)	S2B_64(i) S2B_64(i)
17 #define	S2B_256(i)	S2B_128(i) S2B_128(i)
18 #define	S2B_512(i)	S2B_256(i) S2B_256(i)
19 #define	S2B_1024(i)	S2B_512(i) S2B_512(i)
20 #define	S2B_2048(i)	S2B_1024(i) S2B_1024(i)
21 #define	S2B_4096(i)	S2B_2048(i) S2B_2048(i)
22 #define	S2B_8192(i)	S2B_4096(i) S2B_4096(i)
23 #define	SIZE_CLASS(bin, delta, size)					\
24 	S2B_##delta(bin)
25 	SIZE_CLASSES
26 #undef S2B_8
27 #undef S2B_16
28 #undef S2B_32
29 #undef S2B_64
30 #undef S2B_128
31 #undef S2B_256
32 #undef S2B_512
33 #undef S2B_1024
34 #undef S2B_2048
35 #undef S2B_4096
36 #undef S2B_8192
37 #undef SIZE_CLASS
38 };
39 
40 /******************************************************************************/
41 /* Function prototypes for non-inline static functions. */
42 
43 static void	arena_run_split(arena_t *arena, arena_run_t *run, size_t size,
44     bool large, bool zero);
45 static arena_chunk_t *arena_chunk_alloc(arena_t *arena);
46 static void	arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk);
47 static arena_run_t *arena_run_alloc(arena_t *arena, size_t size, bool large,
48     bool zero);
49 static void	arena_purge(arena_t *arena, bool all);
50 static void	arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty);
51 static void	arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk,
52     arena_run_t *run, size_t oldsize, size_t newsize);
53 static void	arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk,
54     arena_run_t *run, size_t oldsize, size_t newsize, bool dirty);
55 static arena_run_t	*arena_bin_runs_first(arena_bin_t *bin);
56 static void	arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run);
57 static void	arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run);
58 static arena_run_t *arena_bin_nonfull_run_tryget(arena_bin_t *bin);
59 static arena_run_t *arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin);
60 static void	*arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin);
61 static void	arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
62     arena_bin_t *bin);
63 static void	arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
64     arena_run_t *run, arena_bin_t *bin);
65 static void	arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
66     arena_run_t *run, arena_bin_t *bin);
67 static void	arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk,
68     void *ptr, size_t oldsize, size_t size);
69 static bool	arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk,
70     void *ptr, size_t oldsize, size_t size, size_t extra, bool zero);
71 static bool	arena_ralloc_large(void *ptr, size_t oldsize, size_t size,
72     size_t extra, bool zero);
73 static size_t	bin_info_run_size_calc(arena_bin_info_t *bin_info,
74     size_t min_run_size);
75 static void	bin_info_init(void);
76 
77 /******************************************************************************/
78 
79 static inline int
80 arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
81 {
82 	uintptr_t a_mapelm = (uintptr_t)a;
83 	uintptr_t b_mapelm = (uintptr_t)b;
84 
85 	assert(a != NULL);
86 	assert(b != NULL);
87 
88 	return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm));
89 }
90 
91 /* Generate red-black tree functions. */
92 rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t,
93     u.rb_link, arena_run_comp)
94 
95 static inline int
96 arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
97 {
98 	int ret;
99 	size_t a_size = a->bits & ~PAGE_MASK;
100 	size_t b_size = b->bits & ~PAGE_MASK;
101 
102 	assert((a->bits & CHUNK_MAP_KEY) == CHUNK_MAP_KEY || (a->bits &
103 	    CHUNK_MAP_DIRTY) == (b->bits & CHUNK_MAP_DIRTY));
104 
105 	ret = (a_size > b_size) - (a_size < b_size);
106 	if (ret == 0) {
107 		uintptr_t a_mapelm, b_mapelm;
108 
109 		if ((a->bits & CHUNK_MAP_KEY) != CHUNK_MAP_KEY)
110 			a_mapelm = (uintptr_t)a;
111 		else {
112 			/*
113 			 * Treat keys as though they are lower than anything
114 			 * else.
115 			 */
116 			a_mapelm = 0;
117 		}
118 		b_mapelm = (uintptr_t)b;
119 
120 		ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm);
121 	}
122 
123 	return (ret);
124 }
125 
126 /* Generate red-black tree functions. */
127 rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t,
128     u.rb_link, arena_avail_comp)
129 
130 static inline void *
131 arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
132 {
133 	void *ret;
134 	unsigned regind;
135 	bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
136 	    (uintptr_t)bin_info->bitmap_offset);
137 
138 	assert(run->nfree > 0);
139 	assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false);
140 
141 	regind = bitmap_sfu(bitmap, &bin_info->bitmap_info);
142 	ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset +
143 	    (uintptr_t)(bin_info->reg_interval * regind));
144 	run->nfree--;
145 	if (regind == run->nextind)
146 		run->nextind++;
147 	assert(regind < run->nextind);
148 	return (ret);
149 }
150 
151 static inline void
152 arena_run_reg_dalloc(arena_run_t *run, void *ptr)
153 {
154 	arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
155 	size_t binind = arena_bin_index(chunk->arena, run->bin);
156 	arena_bin_info_t *bin_info = &arena_bin_info[binind];
157 	unsigned regind = arena_run_regind(run, bin_info, ptr);
158 	bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
159 	    (uintptr_t)bin_info->bitmap_offset);
160 
161 	assert(run->nfree < bin_info->nregs);
162 	/* Freeing an interior pointer can cause assertion failure. */
163 	assert(((uintptr_t)ptr - ((uintptr_t)run +
164 	    (uintptr_t)bin_info->reg0_offset)) %
165 	    (uintptr_t)bin_info->reg_interval == 0);
166 	assert((uintptr_t)ptr >= (uintptr_t)run +
167 	    (uintptr_t)bin_info->reg0_offset);
168 	/* Freeing an unallocated pointer can cause assertion failure. */
169 	assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind));
170 
171 	bitmap_unset(bitmap, &bin_info->bitmap_info, regind);
172 	run->nfree++;
173 }
174 
175 static inline void
176 arena_chunk_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
177 {
178 	size_t i;
179 	UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
180 
181 	for (i = 0; i < PAGE / sizeof(size_t); i++)
182 		assert(p[i] == 0);
183 }
184 
185 static void
186 arena_run_split(arena_t *arena, arena_run_t *run, size_t size, bool large,
187     bool zero)
188 {
189 	arena_chunk_t *chunk;
190 	size_t run_ind, total_pages, need_pages, rem_pages, i;
191 	size_t flag_dirty;
192 	arena_avail_tree_t *runs_avail;
193 
194 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
195 	run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
196 	flag_dirty = chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY;
197 	runs_avail = (flag_dirty != 0) ? &arena->runs_avail_dirty :
198 	    &arena->runs_avail_clean;
199 	total_pages = (chunk->map[run_ind-map_bias].bits & ~PAGE_MASK) >>
200 	    LG_PAGE;
201 	assert((chunk->map[run_ind+total_pages-1-map_bias].bits &
202 	    CHUNK_MAP_DIRTY) == flag_dirty);
203 	need_pages = (size >> LG_PAGE);
204 	assert(need_pages > 0);
205 	assert(need_pages <= total_pages);
206 	rem_pages = total_pages - need_pages;
207 
208 	arena_avail_tree_remove(runs_avail, &chunk->map[run_ind-map_bias]);
209 	if (config_stats) {
210 		/*
211 		 * Update stats_cactive if nactive is crossing a chunk
212 		 * multiple.
213 		 */
214 		size_t cactive_diff = CHUNK_CEILING((arena->nactive +
215 		    need_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive <<
216 		    LG_PAGE);
217 		if (cactive_diff != 0)
218 			stats_cactive_add(cactive_diff);
219 	}
220 	arena->nactive += need_pages;
221 
222 	/* Keep track of trailing unused pages for later use. */
223 	if (rem_pages > 0) {
224 		if (flag_dirty != 0) {
225 			chunk->map[run_ind+need_pages-map_bias].bits =
226 			    (rem_pages << LG_PAGE) | CHUNK_MAP_DIRTY;
227 			chunk->map[run_ind+total_pages-1-map_bias].bits =
228 			    (rem_pages << LG_PAGE) | CHUNK_MAP_DIRTY;
229 		} else {
230 			chunk->map[run_ind+need_pages-map_bias].bits =
231 			    (rem_pages << LG_PAGE) |
232 			    (chunk->map[run_ind+need_pages-map_bias].bits &
233 			    CHUNK_MAP_UNZEROED);
234 			chunk->map[run_ind+total_pages-1-map_bias].bits =
235 			    (rem_pages << LG_PAGE) |
236 			    (chunk->map[run_ind+total_pages-1-map_bias].bits &
237 			    CHUNK_MAP_UNZEROED);
238 		}
239 		arena_avail_tree_insert(runs_avail,
240 		    &chunk->map[run_ind+need_pages-map_bias]);
241 	}
242 
243 	/* Update dirty page accounting. */
244 	if (flag_dirty != 0) {
245 		chunk->ndirty -= need_pages;
246 		arena->ndirty -= need_pages;
247 	}
248 
249 	/*
250 	 * Update the page map separately for large vs. small runs, since it is
251 	 * possible to avoid iteration for large mallocs.
252 	 */
253 	if (large) {
254 		if (zero) {
255 			if (flag_dirty == 0) {
256 				/*
257 				 * The run is clean, so some pages may be
258 				 * zeroed (i.e. never before touched).
259 				 */
260 				for (i = 0; i < need_pages; i++) {
261 					if ((chunk->map[run_ind+i-map_bias].bits
262 					    & CHUNK_MAP_UNZEROED) != 0) {
263 						VALGRIND_MAKE_MEM_UNDEFINED(
264 						    (void *)((uintptr_t)
265 						    chunk + ((run_ind+i) <<
266 						    LG_PAGE)), PAGE);
267 						memset((void *)((uintptr_t)
268 						    chunk + ((run_ind+i) <<
269 						    LG_PAGE)), 0, PAGE);
270 					} else if (config_debug) {
271 						VALGRIND_MAKE_MEM_DEFINED(
272 						    (void *)((uintptr_t)
273 						    chunk + ((run_ind+i) <<
274 						    LG_PAGE)), PAGE);
275 						arena_chunk_validate_zeroed(
276 						    chunk, run_ind+i);
277 					}
278 				}
279 			} else {
280 				/*
281 				 * The run is dirty, so all pages must be
282 				 * zeroed.
283 				 */
284 				VALGRIND_MAKE_MEM_UNDEFINED((void
285 				    *)((uintptr_t)chunk + (run_ind <<
286 				    LG_PAGE)), (need_pages << LG_PAGE));
287 				memset((void *)((uintptr_t)chunk + (run_ind <<
288 				    LG_PAGE)), 0, (need_pages << LG_PAGE));
289 			}
290 		}
291 
292 		/*
293 		 * Set the last element first, in case the run only contains one
294 		 * page (i.e. both statements set the same element).
295 		 */
296 		chunk->map[run_ind+need_pages-1-map_bias].bits =
297 		    CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED | flag_dirty;
298 		chunk->map[run_ind-map_bias].bits = size | flag_dirty |
299 		    CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
300 	} else {
301 		assert(zero == false);
302 		/*
303 		 * Propagate the dirty and unzeroed flags to the allocated
304 		 * small run, so that arena_dalloc_bin_run() has the ability to
305 		 * conditionally trim clean pages.
306 		 */
307 		chunk->map[run_ind-map_bias].bits =
308 		    (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED) |
309 		    CHUNK_MAP_ALLOCATED | flag_dirty;
310 		/*
311 		 * The first page will always be dirtied during small run
312 		 * initialization, so a validation failure here would not
313 		 * actually cause an observable failure.
314 		 */
315 		if (config_debug && flag_dirty == 0 &&
316 		    (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED)
317 		    == 0)
318 			arena_chunk_validate_zeroed(chunk, run_ind);
319 		for (i = 1; i < need_pages - 1; i++) {
320 			chunk->map[run_ind+i-map_bias].bits = (i << LG_PAGE)
321 			    | (chunk->map[run_ind+i-map_bias].bits &
322 			    CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED;
323 			if (config_debug && flag_dirty == 0 &&
324 			    (chunk->map[run_ind+i-map_bias].bits &
325 			    CHUNK_MAP_UNZEROED) == 0)
326 				arena_chunk_validate_zeroed(chunk, run_ind+i);
327 		}
328 		chunk->map[run_ind+need_pages-1-map_bias].bits = ((need_pages
329 		    - 1) << LG_PAGE) |
330 		    (chunk->map[run_ind+need_pages-1-map_bias].bits &
331 		    CHUNK_MAP_UNZEROED) | CHUNK_MAP_ALLOCATED | flag_dirty;
332 		if (config_debug && flag_dirty == 0 &&
333 		    (chunk->map[run_ind+need_pages-1-map_bias].bits &
334 		    CHUNK_MAP_UNZEROED) == 0) {
335 			arena_chunk_validate_zeroed(chunk,
336 			    run_ind+need_pages-1);
337 		}
338 	}
339 }
340 
341 static arena_chunk_t *
342 arena_chunk_alloc(arena_t *arena)
343 {
344 	arena_chunk_t *chunk;
345 	size_t i;
346 
347 	if (arena->spare != NULL) {
348 		arena_avail_tree_t *runs_avail;
349 
350 		chunk = arena->spare;
351 		arena->spare = NULL;
352 
353 		/* Insert the run into the appropriate runs_avail_* tree. */
354 		if ((chunk->map[0].bits & CHUNK_MAP_DIRTY) == 0)
355 			runs_avail = &arena->runs_avail_clean;
356 		else
357 			runs_avail = &arena->runs_avail_dirty;
358 		assert((chunk->map[0].bits & ~PAGE_MASK) == arena_maxclass);
359 		assert((chunk->map[chunk_npages-1-map_bias].bits & ~PAGE_MASK)
360 		    == arena_maxclass);
361 		assert((chunk->map[0].bits & CHUNK_MAP_DIRTY) ==
362 		    (chunk->map[chunk_npages-1-map_bias].bits &
363 		    CHUNK_MAP_DIRTY));
364 		arena_avail_tree_insert(runs_avail, &chunk->map[0]);
365 	} else {
366 		bool zero;
367 		size_t unzeroed;
368 
369 		zero = false;
370 		malloc_mutex_unlock(&arena->lock);
371 		chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize,
372 		    false, &zero);
373 		malloc_mutex_lock(&arena->lock);
374 		if (chunk == NULL)
375 			return (NULL);
376 		if (config_stats)
377 			arena->stats.mapped += chunksize;
378 
379 		chunk->arena = arena;
380 		ql_elm_new(chunk, link_dirty);
381 		chunk->dirtied = false;
382 
383 		/*
384 		 * Claim that no pages are in use, since the header is merely
385 		 * overhead.
386 		 */
387 		chunk->ndirty = 0;
388 
389 		/*
390 		 * Initialize the map to contain one maximal free untouched run.
391 		 * Mark the pages as zeroed iff chunk_alloc() returned a zeroed
392 		 * chunk.
393 		 */
394 		unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
395 		chunk->map[0].bits = arena_maxclass | unzeroed;
396 		/*
397 		 * There is no need to initialize the internal page map entries
398 		 * unless the chunk is not zeroed.
399 		 */
400 		if (zero == false) {
401 			for (i = map_bias+1; i < chunk_npages-1; i++)
402 				chunk->map[i-map_bias].bits = unzeroed;
403 		} else if (config_debug) {
404 			for (i = map_bias+1; i < chunk_npages-1; i++)
405 				assert(chunk->map[i-map_bias].bits == unzeroed);
406 		}
407 		chunk->map[chunk_npages-1-map_bias].bits = arena_maxclass |
408 		    unzeroed;
409 
410 		/* Insert the run into the runs_avail_clean tree. */
411 		arena_avail_tree_insert(&arena->runs_avail_clean,
412 		    &chunk->map[0]);
413 	}
414 
415 	return (chunk);
416 }
417 
418 static void
419 arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
420 {
421 	arena_avail_tree_t *runs_avail;
422 
423 	/*
424 	 * Remove run from the appropriate runs_avail_* tree, so that the arena
425 	 * does not use it.
426 	 */
427 	if ((chunk->map[0].bits & CHUNK_MAP_DIRTY) == 0)
428 		runs_avail = &arena->runs_avail_clean;
429 	else
430 		runs_avail = &arena->runs_avail_dirty;
431 	arena_avail_tree_remove(runs_avail, &chunk->map[0]);
432 
433 	if (arena->spare != NULL) {
434 		arena_chunk_t *spare = arena->spare;
435 
436 		arena->spare = chunk;
437 		if (spare->dirtied) {
438 			ql_remove(&chunk->arena->chunks_dirty, spare,
439 			    link_dirty);
440 			arena->ndirty -= spare->ndirty;
441 		}
442 		malloc_mutex_unlock(&arena->lock);
443 		chunk_dealloc((void *)spare, chunksize, true);
444 		malloc_mutex_lock(&arena->lock);
445 		if (config_stats)
446 			arena->stats.mapped -= chunksize;
447 	} else
448 		arena->spare = chunk;
449 }
450 
451 static arena_run_t *
452 arena_run_alloc(arena_t *arena, size_t size, bool large, bool zero)
453 {
454 	arena_chunk_t *chunk;
455 	arena_run_t *run;
456 	arena_chunk_map_t *mapelm, key;
457 
458 	assert(size <= arena_maxclass);
459 	assert((size & PAGE_MASK) == 0);
460 
461 	/* Search the arena's chunks for the lowest best fit. */
462 	key.bits = size | CHUNK_MAP_KEY;
463 	mapelm = arena_avail_tree_nsearch(&arena->runs_avail_dirty, &key);
464 	if (mapelm != NULL) {
465 		arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
466 		size_t pageind = (((uintptr_t)mapelm -
467 		    (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
468 		    + map_bias;
469 
470 		run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
471 		    LG_PAGE));
472 		arena_run_split(arena, run, size, large, zero);
473 		return (run);
474 	}
475 	mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key);
476 	if (mapelm != NULL) {
477 		arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
478 		size_t pageind = (((uintptr_t)mapelm -
479 		    (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
480 		    + map_bias;
481 
482 		run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
483 		    LG_PAGE));
484 		arena_run_split(arena, run, size, large, zero);
485 		return (run);
486 	}
487 
488 	/*
489 	 * No usable runs.  Create a new chunk from which to allocate the run.
490 	 */
491 	chunk = arena_chunk_alloc(arena);
492 	if (chunk != NULL) {
493 		run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
494 		arena_run_split(arena, run, size, large, zero);
495 		return (run);
496 	}
497 
498 	/*
499 	 * arena_chunk_alloc() failed, but another thread may have made
500 	 * sufficient memory available while this one dropped arena->lock in
501 	 * arena_chunk_alloc(), so search one more time.
502 	 */
503 	mapelm = arena_avail_tree_nsearch(&arena->runs_avail_dirty, &key);
504 	if (mapelm != NULL) {
505 		arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
506 		size_t pageind = (((uintptr_t)mapelm -
507 		    (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
508 		    + map_bias;
509 
510 		run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
511 		    LG_PAGE));
512 		arena_run_split(arena, run, size, large, zero);
513 		return (run);
514 	}
515 	mapelm = arena_avail_tree_nsearch(&arena->runs_avail_clean, &key);
516 	if (mapelm != NULL) {
517 		arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
518 		size_t pageind = (((uintptr_t)mapelm -
519 		    (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
520 		    + map_bias;
521 
522 		run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
523 		    LG_PAGE));
524 		arena_run_split(arena, run, size, large, zero);
525 		return (run);
526 	}
527 
528 	return (NULL);
529 }
530 
531 static inline void
532 arena_maybe_purge(arena_t *arena)
533 {
534 
535 	/* Enforce opt_lg_dirty_mult. */
536 	if (opt_lg_dirty_mult >= 0 && arena->ndirty > arena->npurgatory &&
537 	    (arena->ndirty - arena->npurgatory) > chunk_npages &&
538 	    (arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
539 	    arena->npurgatory))
540 		arena_purge(arena, false);
541 }
542 
543 static inline void
544 arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk)
545 {
546 	ql_head(arena_chunk_map_t) mapelms;
547 	arena_chunk_map_t *mapelm;
548 	size_t pageind, flag_unzeroed;
549 	size_t ndirty;
550 	size_t nmadvise;
551 
552 	ql_new(&mapelms);
553 
554 	flag_unzeroed =
555 #ifdef JEMALLOC_PURGE_MADVISE_DONTNEED
556    /*
557     * madvise(..., MADV_DONTNEED) results in zero-filled pages for anonymous
558     * mappings, but not for file-backed mappings.
559     */
560 	    0
561 #else
562 	    CHUNK_MAP_UNZEROED
563 #endif
564 	    ;
565 
566 	/*
567 	 * If chunk is the spare, temporarily re-allocate it, 1) so that its
568 	 * run is reinserted into runs_avail_dirty, and 2) so that it cannot be
569 	 * completely discarded by another thread while arena->lock is dropped
570 	 * by this thread.  Note that the arena_run_dalloc() call will
571 	 * implicitly deallocate the chunk, so no explicit action is required
572 	 * in this function to deallocate the chunk.
573 	 *
574 	 * Note that once a chunk contains dirty pages, it cannot again contain
575 	 * a single run unless 1) it is a dirty run, or 2) this function purges
576 	 * dirty pages and causes the transition to a single clean run.  Thus
577 	 * (chunk == arena->spare) is possible, but it is not possible for
578 	 * this function to be called on the spare unless it contains a dirty
579 	 * run.
580 	 */
581 	if (chunk == arena->spare) {
582 		assert((chunk->map[0].bits & CHUNK_MAP_DIRTY) != 0);
583 		arena_chunk_alloc(arena);
584 	}
585 
586 	/* Temporarily allocate all free dirty runs within chunk. */
587 	for (pageind = map_bias; pageind < chunk_npages;) {
588 		mapelm = &chunk->map[pageind-map_bias];
589 		if ((mapelm->bits & CHUNK_MAP_ALLOCATED) == 0) {
590 			size_t npages;
591 
592 			npages = mapelm->bits >> LG_PAGE;
593 			assert(pageind + npages <= chunk_npages);
594 			if (mapelm->bits & CHUNK_MAP_DIRTY) {
595 				size_t i;
596 
597 				arena_avail_tree_remove(
598 				    &arena->runs_avail_dirty, mapelm);
599 
600 				mapelm->bits = (npages << LG_PAGE) |
601 				    flag_unzeroed | CHUNK_MAP_LARGE |
602 				    CHUNK_MAP_ALLOCATED;
603 				/*
604 				 * Update internal elements in the page map, so
605 				 * that CHUNK_MAP_UNZEROED is properly set.
606 				 */
607 				for (i = 1; i < npages - 1; i++) {
608 					chunk->map[pageind+i-map_bias].bits =
609 					    flag_unzeroed;
610 				}
611 				if (npages > 1) {
612 					chunk->map[
613 					    pageind+npages-1-map_bias].bits =
614 					    flag_unzeroed | CHUNK_MAP_LARGE |
615 					    CHUNK_MAP_ALLOCATED;
616 				}
617 
618 				if (config_stats) {
619 					/*
620 					 * Update stats_cactive if nactive is
621 					 * crossing a chunk multiple.
622 					 */
623 					size_t cactive_diff =
624 					    CHUNK_CEILING((arena->nactive +
625 					    npages) << LG_PAGE) -
626 					    CHUNK_CEILING(arena->nactive <<
627 					    LG_PAGE);
628 					if (cactive_diff != 0)
629 						stats_cactive_add(cactive_diff);
630 				}
631 				arena->nactive += npages;
632 				/* Append to list for later processing. */
633 				ql_elm_new(mapelm, u.ql_link);
634 				ql_tail_insert(&mapelms, mapelm, u.ql_link);
635 			}
636 
637 			pageind += npages;
638 		} else {
639 			/* Skip allocated run. */
640 			if (mapelm->bits & CHUNK_MAP_LARGE)
641 				pageind += mapelm->bits >> LG_PAGE;
642 			else {
643 				arena_run_t *run = (arena_run_t *)((uintptr_t)
644 				    chunk + (uintptr_t)(pageind << LG_PAGE));
645 
646 				assert((mapelm->bits >> LG_PAGE) == 0);
647 				size_t binind = arena_bin_index(arena,
648 				    run->bin);
649 				arena_bin_info_t *bin_info =
650 				    &arena_bin_info[binind];
651 				pageind += bin_info->run_size >> LG_PAGE;
652 			}
653 		}
654 	}
655 	assert(pageind == chunk_npages);
656 
657 	if (config_debug)
658 		ndirty = chunk->ndirty;
659 	if (config_stats)
660 		arena->stats.purged += chunk->ndirty;
661 	arena->ndirty -= chunk->ndirty;
662 	chunk->ndirty = 0;
663 	ql_remove(&arena->chunks_dirty, chunk, link_dirty);
664 	chunk->dirtied = false;
665 
666 	malloc_mutex_unlock(&arena->lock);
667 	if (config_stats)
668 		nmadvise = 0;
669 	ql_foreach(mapelm, &mapelms, u.ql_link) {
670 		size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
671 		    sizeof(arena_chunk_map_t)) + map_bias;
672 		size_t npages = mapelm->bits >> LG_PAGE;
673 
674 		assert(pageind + npages <= chunk_npages);
675 		assert(ndirty >= npages);
676 		if (config_debug)
677 			ndirty -= npages;
678 
679 		pages_purge((void *)((uintptr_t)chunk + (pageind << LG_PAGE)),
680 		    (npages << LG_PAGE));
681 		if (config_stats)
682 			nmadvise++;
683 	}
684 	assert(ndirty == 0);
685 	malloc_mutex_lock(&arena->lock);
686 	if (config_stats)
687 		arena->stats.nmadvise += nmadvise;
688 
689 	/* Deallocate runs. */
690 	for (mapelm = ql_first(&mapelms); mapelm != NULL;
691 	    mapelm = ql_first(&mapelms)) {
692 		size_t pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
693 		    sizeof(arena_chunk_map_t)) + map_bias;
694 		arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
695 		    (uintptr_t)(pageind << LG_PAGE));
696 
697 		ql_remove(&mapelms, mapelm, u.ql_link);
698 		arena_run_dalloc(arena, run, false);
699 	}
700 }
701 
702 static void
703 arena_purge(arena_t *arena, bool all)
704 {
705 	arena_chunk_t *chunk;
706 	size_t npurgatory;
707 	if (config_debug) {
708 		size_t ndirty = 0;
709 
710 		ql_foreach(chunk, &arena->chunks_dirty, link_dirty) {
711 		    assert(chunk->dirtied);
712 		    ndirty += chunk->ndirty;
713 		}
714 		assert(ndirty == arena->ndirty);
715 	}
716 	assert(arena->ndirty > arena->npurgatory || all);
717 	assert(arena->ndirty - arena->npurgatory > chunk_npages || all);
718 	assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
719 	    arena->npurgatory) || all);
720 
721 	if (config_stats)
722 		arena->stats.npurge++;
723 
724 	/*
725 	 * Compute the minimum number of pages that this thread should try to
726 	 * purge, and add the result to arena->npurgatory.  This will keep
727 	 * multiple threads from racing to reduce ndirty below the threshold.
728 	 */
729 	npurgatory = arena->ndirty - arena->npurgatory;
730 	if (all == false) {
731 		assert(npurgatory >= arena->nactive >> opt_lg_dirty_mult);
732 		npurgatory -= arena->nactive >> opt_lg_dirty_mult;
733 	}
734 	arena->npurgatory += npurgatory;
735 
736 	while (npurgatory > 0) {
737 		/* Get next chunk with dirty pages. */
738 		chunk = ql_first(&arena->chunks_dirty);
739 		if (chunk == NULL) {
740 			/*
741 			 * This thread was unable to purge as many pages as
742 			 * originally intended, due to races with other threads
743 			 * that either did some of the purging work, or re-used
744 			 * dirty pages.
745 			 */
746 			arena->npurgatory -= npurgatory;
747 			return;
748 		}
749 		while (chunk->ndirty == 0) {
750 			ql_remove(&arena->chunks_dirty, chunk, link_dirty);
751 			chunk->dirtied = false;
752 			chunk = ql_first(&arena->chunks_dirty);
753 			if (chunk == NULL) {
754 				/* Same logic as for above. */
755 				arena->npurgatory -= npurgatory;
756 				return;
757 			}
758 		}
759 
760 		if (chunk->ndirty > npurgatory) {
761 			/*
762 			 * This thread will, at a minimum, purge all the dirty
763 			 * pages in chunk, so set npurgatory to reflect this
764 			 * thread's commitment to purge the pages.  This tends
765 			 * to reduce the chances of the following scenario:
766 			 *
767 			 * 1) This thread sets arena->npurgatory such that
768 			 *    (arena->ndirty - arena->npurgatory) is at the
769 			 *    threshold.
770 			 * 2) This thread drops arena->lock.
771 			 * 3) Another thread causes one or more pages to be
772 			 *    dirtied, and immediately determines that it must
773 			 *    purge dirty pages.
774 			 *
775 			 * If this scenario *does* play out, that's okay,
776 			 * because all of the purging work being done really
777 			 * needs to happen.
778 			 */
779 			arena->npurgatory += chunk->ndirty - npurgatory;
780 			npurgatory = chunk->ndirty;
781 		}
782 
783 		arena->npurgatory -= chunk->ndirty;
784 		npurgatory -= chunk->ndirty;
785 		arena_chunk_purge(arena, chunk);
786 	}
787 }
788 
789 void
790 arena_purge_all(arena_t *arena)
791 {
792 
793 	malloc_mutex_lock(&arena->lock);
794 	arena_purge(arena, true);
795 	malloc_mutex_unlock(&arena->lock);
796 }
797 
798 static void
799 arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty)
800 {
801 	arena_chunk_t *chunk;
802 	size_t size, run_ind, run_pages, flag_dirty;
803 	arena_avail_tree_t *runs_avail;
804 
805 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
806 	run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
807 	assert(run_ind >= map_bias);
808 	assert(run_ind < chunk_npages);
809 	if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_LARGE) != 0) {
810 		size = chunk->map[run_ind-map_bias].bits & ~PAGE_MASK;
811 		assert(size == PAGE ||
812 		    (chunk->map[run_ind+(size>>LG_PAGE)-1-map_bias].bits &
813 		    ~PAGE_MASK) == 0);
814 		assert((chunk->map[run_ind+(size>>LG_PAGE)-1-map_bias].bits &
815 		    CHUNK_MAP_LARGE) != 0);
816 		assert((chunk->map[run_ind+(size>>LG_PAGE)-1-map_bias].bits &
817 		    CHUNK_MAP_ALLOCATED) != 0);
818 	} else {
819 		size_t binind = arena_bin_index(arena, run->bin);
820 		arena_bin_info_t *bin_info = &arena_bin_info[binind];
821 		size = bin_info->run_size;
822 	}
823 	run_pages = (size >> LG_PAGE);
824 	if (config_stats) {
825 		/*
826 		 * Update stats_cactive if nactive is crossing a chunk
827 		 * multiple.
828 		 */
829 		size_t cactive_diff = CHUNK_CEILING(arena->nactive << LG_PAGE) -
830 		    CHUNK_CEILING((arena->nactive - run_pages) << LG_PAGE);
831 		if (cactive_diff != 0)
832 			stats_cactive_sub(cactive_diff);
833 	}
834 	arena->nactive -= run_pages;
835 
836 	/*
837 	 * The run is dirty if the caller claims to have dirtied it, as well as
838 	 * if it was already dirty before being allocated.
839 	 */
840 	if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) != 0)
841 		dirty = true;
842 	flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
843 	runs_avail = dirty ? &arena->runs_avail_dirty :
844 	    &arena->runs_avail_clean;
845 
846 	/* Mark pages as unallocated in the chunk map. */
847 	if (dirty) {
848 		chunk->map[run_ind-map_bias].bits = size | CHUNK_MAP_DIRTY;
849 		chunk->map[run_ind+run_pages-1-map_bias].bits = size |
850 		    CHUNK_MAP_DIRTY;
851 
852 		chunk->ndirty += run_pages;
853 		arena->ndirty += run_pages;
854 	} else {
855 		chunk->map[run_ind-map_bias].bits = size |
856 		    (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_UNZEROED);
857 		chunk->map[run_ind+run_pages-1-map_bias].bits = size |
858 		    (chunk->map[run_ind+run_pages-1-map_bias].bits &
859 		    CHUNK_MAP_UNZEROED);
860 	}
861 
862 	/* Try to coalesce forward. */
863 	if (run_ind + run_pages < chunk_npages &&
864 	    (chunk->map[run_ind+run_pages-map_bias].bits & CHUNK_MAP_ALLOCATED)
865 	    == 0 && (chunk->map[run_ind+run_pages-map_bias].bits &
866 	    CHUNK_MAP_DIRTY) == flag_dirty) {
867 		size_t nrun_size = chunk->map[run_ind+run_pages-map_bias].bits &
868 		    ~PAGE_MASK;
869 		size_t nrun_pages = nrun_size >> LG_PAGE;
870 
871 		/*
872 		 * Remove successor from runs_avail; the coalesced run is
873 		 * inserted later.
874 		 */
875 		assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits
876 		    & ~PAGE_MASK) == nrun_size);
877 		assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits
878 		    & CHUNK_MAP_ALLOCATED) == 0);
879 		assert((chunk->map[run_ind+run_pages+nrun_pages-1-map_bias].bits
880 		    & CHUNK_MAP_DIRTY) == flag_dirty);
881 		arena_avail_tree_remove(runs_avail,
882 		    &chunk->map[run_ind+run_pages-map_bias]);
883 
884 		size += nrun_size;
885 		run_pages += nrun_pages;
886 
887 		chunk->map[run_ind-map_bias].bits = size |
888 		    (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_FLAGS_MASK);
889 		chunk->map[run_ind+run_pages-1-map_bias].bits = size |
890 		    (chunk->map[run_ind+run_pages-1-map_bias].bits &
891 		    CHUNK_MAP_FLAGS_MASK);
892 	}
893 
894 	/* Try to coalesce backward. */
895 	if (run_ind > map_bias && (chunk->map[run_ind-1-map_bias].bits &
896 	    CHUNK_MAP_ALLOCATED) == 0 && (chunk->map[run_ind-1-map_bias].bits &
897 	    CHUNK_MAP_DIRTY) == flag_dirty) {
898 		size_t prun_size = chunk->map[run_ind-1-map_bias].bits &
899 		    ~PAGE_MASK;
900 		size_t prun_pages = prun_size >> LG_PAGE;
901 
902 		run_ind -= prun_pages;
903 
904 		/*
905 		 * Remove predecessor from runs_avail; the coalesced run is
906 		 * inserted later.
907 		 */
908 		assert((chunk->map[run_ind-map_bias].bits & ~PAGE_MASK)
909 		    == prun_size);
910 		assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_ALLOCATED)
911 		    == 0);
912 		assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY)
913 		    == flag_dirty);
914 		arena_avail_tree_remove(runs_avail,
915 		    &chunk->map[run_ind-map_bias]);
916 
917 		size += prun_size;
918 		run_pages += prun_pages;
919 
920 		chunk->map[run_ind-map_bias].bits = size |
921 		    (chunk->map[run_ind-map_bias].bits & CHUNK_MAP_FLAGS_MASK);
922 		chunk->map[run_ind+run_pages-1-map_bias].bits = size |
923 		    (chunk->map[run_ind+run_pages-1-map_bias].bits &
924 		    CHUNK_MAP_FLAGS_MASK);
925 	}
926 
927 	/* Insert into runs_avail, now that coalescing is complete. */
928 	assert((chunk->map[run_ind-map_bias].bits & ~PAGE_MASK) ==
929 	    (chunk->map[run_ind+run_pages-1-map_bias].bits & ~PAGE_MASK));
930 	assert((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) ==
931 	    (chunk->map[run_ind+run_pages-1-map_bias].bits & CHUNK_MAP_DIRTY));
932 	arena_avail_tree_insert(runs_avail, &chunk->map[run_ind-map_bias]);
933 
934 	if (dirty) {
935 		/*
936 		 * Insert into chunks_dirty before potentially calling
937 		 * arena_chunk_dealloc(), so that chunks_dirty and
938 		 * arena->ndirty are consistent.
939 		 */
940 		if (chunk->dirtied == false) {
941 			ql_tail_insert(&arena->chunks_dirty, chunk, link_dirty);
942 			chunk->dirtied = true;
943 		}
944 	}
945 
946 	/*
947 	 * Deallocate chunk if it is now completely unused.  The bit
948 	 * manipulation checks whether the first run is unallocated and extends
949 	 * to the end of the chunk.
950 	 */
951 	if ((chunk->map[0].bits & (~PAGE_MASK | CHUNK_MAP_ALLOCATED)) ==
952 	    arena_maxclass)
953 		arena_chunk_dealloc(arena, chunk);
954 
955 	/*
956 	 * It is okay to do dirty page processing here even if the chunk was
957 	 * deallocated above, since in that case it is the spare.  Waiting
958 	 * until after possible chunk deallocation to do dirty processing
959 	 * allows for an old spare to be fully deallocated, thus decreasing the
960 	 * chances of spuriously crossing the dirty page purging threshold.
961 	 */
962 	if (dirty)
963 		arena_maybe_purge(arena);
964 }
965 
966 static void
967 arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
968     size_t oldsize, size_t newsize)
969 {
970 	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
971 	size_t head_npages = (oldsize - newsize) >> LG_PAGE;
972 	size_t flag_dirty = chunk->map[pageind-map_bias].bits & CHUNK_MAP_DIRTY;
973 
974 	assert(oldsize > newsize);
975 
976 	/*
977 	 * Update the chunk map so that arena_run_dalloc() can treat the
978 	 * leading run as separately allocated.  Set the last element of each
979 	 * run first, in case of single-page runs.
980 	 */
981 	assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_LARGE) != 0);
982 	assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_ALLOCATED) != 0);
983 	chunk->map[pageind+head_npages-1-map_bias].bits = flag_dirty |
984 	    (chunk->map[pageind+head_npages-1-map_bias].bits &
985 	    CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
986 	chunk->map[pageind-map_bias].bits = (oldsize - newsize)
987 	    | flag_dirty | (chunk->map[pageind-map_bias].bits &
988 	    CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
989 
990 	if (config_debug) {
991 		UNUSED size_t tail_npages = newsize >> LG_PAGE;
992 		assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
993 		    .bits & ~PAGE_MASK) == 0);
994 		assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
995 		    .bits & CHUNK_MAP_DIRTY) == flag_dirty);
996 		assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
997 		    .bits & CHUNK_MAP_LARGE) != 0);
998 		assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias]
999 		    .bits & CHUNK_MAP_ALLOCATED) != 0);
1000 	}
1001 	chunk->map[pageind+head_npages-map_bias].bits = newsize | flag_dirty |
1002 	    (chunk->map[pageind+head_npages-map_bias].bits &
1003 	    CHUNK_MAP_FLAGS_MASK) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
1004 
1005 	arena_run_dalloc(arena, run, false);
1006 }
1007 
1008 static void
1009 arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1010     size_t oldsize, size_t newsize, bool dirty)
1011 {
1012 	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1013 	size_t head_npages = newsize >> LG_PAGE;
1014 	size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
1015 	size_t flag_dirty = chunk->map[pageind-map_bias].bits &
1016 	    CHUNK_MAP_DIRTY;
1017 
1018 	assert(oldsize > newsize);
1019 
1020 	/*
1021 	 * Update the chunk map so that arena_run_dalloc() can treat the
1022 	 * trailing run as separately allocated.  Set the last element of each
1023 	 * run first, in case of single-page runs.
1024 	 */
1025 	assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_LARGE) != 0);
1026 	assert((chunk->map[pageind-map_bias].bits & CHUNK_MAP_ALLOCATED) != 0);
1027 	chunk->map[pageind+head_npages-1-map_bias].bits = flag_dirty |
1028 	    (chunk->map[pageind+head_npages-1-map_bias].bits &
1029 	    CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
1030 	chunk->map[pageind-map_bias].bits = newsize | flag_dirty |
1031 	    (chunk->map[pageind-map_bias].bits & CHUNK_MAP_UNZEROED) |
1032 	    CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
1033 
1034 	assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
1035 	    ~PAGE_MASK) == 0);
1036 	assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
1037 	    CHUNK_MAP_LARGE) != 0);
1038 	assert((chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
1039 	    CHUNK_MAP_ALLOCATED) != 0);
1040 	chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits =
1041 	    flag_dirty |
1042 	    (chunk->map[pageind+head_npages+tail_npages-1-map_bias].bits &
1043 	    CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
1044 	chunk->map[pageind+head_npages-map_bias].bits = (oldsize - newsize) |
1045 	    flag_dirty | (chunk->map[pageind+head_npages-map_bias].bits &
1046 	    CHUNK_MAP_UNZEROED) | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
1047 
1048 	arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
1049 	    dirty);
1050 }
1051 
1052 static arena_run_t *
1053 arena_bin_runs_first(arena_bin_t *bin)
1054 {
1055 	arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs);
1056 	if (mapelm != NULL) {
1057 		arena_chunk_t *chunk;
1058 		size_t pageind;
1059 
1060 		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
1061 		pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) /
1062 		    sizeof(arena_chunk_map_t))) + map_bias;
1063 		arena_run_t *run = (arena_run_t *)((uintptr_t)chunk +
1064 		    (uintptr_t)((pageind - (mapelm->bits >> LG_PAGE)) <<
1065 		    LG_PAGE));
1066 		return (run);
1067 	}
1068 
1069 	return (NULL);
1070 }
1071 
1072 static void
1073 arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
1074 {
1075 	arena_chunk_t *chunk = CHUNK_ADDR2BASE(run);
1076 	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1077 	arena_chunk_map_t *mapelm = &chunk->map[pageind-map_bias];
1078 
1079 	assert(arena_run_tree_search(&bin->runs, mapelm) == NULL);
1080 
1081 	arena_run_tree_insert(&bin->runs, mapelm);
1082 }
1083 
1084 static void
1085 arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
1086 {
1087 	arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1088 	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1089 	arena_chunk_map_t *mapelm = &chunk->map[pageind-map_bias];
1090 
1091 	assert(arena_run_tree_search(&bin->runs, mapelm) != NULL);
1092 
1093 	arena_run_tree_remove(&bin->runs, mapelm);
1094 }
1095 
1096 static arena_run_t *
1097 arena_bin_nonfull_run_tryget(arena_bin_t *bin)
1098 {
1099 	arena_run_t *run = arena_bin_runs_first(bin);
1100 	if (run != NULL) {
1101 		arena_bin_runs_remove(bin, run);
1102 		if (config_stats)
1103 			bin->stats.reruns++;
1104 	}
1105 	return (run);
1106 }
1107 
1108 static arena_run_t *
1109 arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
1110 {
1111 	arena_run_t *run;
1112 	size_t binind;
1113 	arena_bin_info_t *bin_info;
1114 
1115 	/* Look for a usable run. */
1116 	run = arena_bin_nonfull_run_tryget(bin);
1117 	if (run != NULL)
1118 		return (run);
1119 	/* No existing runs have any space available. */
1120 
1121 	binind = arena_bin_index(arena, bin);
1122 	bin_info = &arena_bin_info[binind];
1123 
1124 	/* Allocate a new run. */
1125 	malloc_mutex_unlock(&bin->lock);
1126 	/******************************/
1127 	malloc_mutex_lock(&arena->lock);
1128 	run = arena_run_alloc(arena, bin_info->run_size, false, false);
1129 	if (run != NULL) {
1130 		bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
1131 		    (uintptr_t)bin_info->bitmap_offset);
1132 
1133 		/* Initialize run internals. */
1134 		run->bin = bin;
1135 		run->nextind = 0;
1136 		run->nfree = bin_info->nregs;
1137 		bitmap_init(bitmap, &bin_info->bitmap_info);
1138 	}
1139 	malloc_mutex_unlock(&arena->lock);
1140 	/********************************/
1141 	malloc_mutex_lock(&bin->lock);
1142 	if (run != NULL) {
1143 		if (config_stats) {
1144 			bin->stats.nruns++;
1145 			bin->stats.curruns++;
1146 		}
1147 		return (run);
1148 	}
1149 
1150 	/*
1151 	 * arena_run_alloc() failed, but another thread may have made
1152 	 * sufficient memory available while this one dropped bin->lock above,
1153 	 * so search one more time.
1154 	 */
1155 	run = arena_bin_nonfull_run_tryget(bin);
1156 	if (run != NULL)
1157 		return (run);
1158 
1159 	return (NULL);
1160 }
1161 
1162 /* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
1163 static void *
1164 arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
1165 {
1166 	void *ret;
1167 	size_t binind;
1168 	arena_bin_info_t *bin_info;
1169 	arena_run_t *run;
1170 
1171 	binind = arena_bin_index(arena, bin);
1172 	bin_info = &arena_bin_info[binind];
1173 	bin->runcur = NULL;
1174 	run = arena_bin_nonfull_run_get(arena, bin);
1175 	if (bin->runcur != NULL && bin->runcur->nfree > 0) {
1176 		/*
1177 		 * Another thread updated runcur while this one ran without the
1178 		 * bin lock in arena_bin_nonfull_run_get().
1179 		 */
1180 		assert(bin->runcur->nfree > 0);
1181 		ret = arena_run_reg_alloc(bin->runcur, bin_info);
1182 		if (run != NULL) {
1183 			arena_chunk_t *chunk;
1184 
1185 			/*
1186 			 * arena_run_alloc() may have allocated run, or it may
1187 			 * have pulled run from the bin's run tree.  Therefore
1188 			 * it is unsafe to make any assumptions about how run
1189 			 * has previously been used, and arena_bin_lower_run()
1190 			 * must be called, as if a region were just deallocated
1191 			 * from the run.
1192 			 */
1193 			chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1194 			if (run->nfree == bin_info->nregs)
1195 				arena_dalloc_bin_run(arena, chunk, run, bin);
1196 			else
1197 				arena_bin_lower_run(arena, chunk, run, bin);
1198 		}
1199 		return (ret);
1200 	}
1201 
1202 	if (run == NULL)
1203 		return (NULL);
1204 
1205 	bin->runcur = run;
1206 
1207 	assert(bin->runcur->nfree > 0);
1208 
1209 	return (arena_run_reg_alloc(bin->runcur, bin_info));
1210 }
1211 
1212 void
1213 arena_prof_accum(arena_t *arena, uint64_t accumbytes)
1214 {
1215 
1216 	cassert(config_prof);
1217 
1218 	if (config_prof && prof_interval != 0) {
1219 		arena->prof_accumbytes += accumbytes;
1220 		if (arena->prof_accumbytes >= prof_interval) {
1221 			prof_idump();
1222 			arena->prof_accumbytes -= prof_interval;
1223 		}
1224 	}
1225 }
1226 
1227 void
1228 arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
1229     uint64_t prof_accumbytes)
1230 {
1231 	unsigned i, nfill;
1232 	arena_bin_t *bin;
1233 	arena_run_t *run;
1234 	void *ptr;
1235 
1236 	assert(tbin->ncached == 0);
1237 
1238 	if (config_prof) {
1239 		malloc_mutex_lock(&arena->lock);
1240 		arena_prof_accum(arena, prof_accumbytes);
1241 		malloc_mutex_unlock(&arena->lock);
1242 	}
1243 	bin = &arena->bins[binind];
1244 	malloc_mutex_lock(&bin->lock);
1245 	for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
1246 	    tbin->lg_fill_div); i < nfill; i++) {
1247 		if ((run = bin->runcur) != NULL && run->nfree > 0)
1248 			ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
1249 		else
1250 			ptr = arena_bin_malloc_hard(arena, bin);
1251 		if (ptr == NULL)
1252 			break;
1253 		if (config_fill && opt_junk) {
1254 			arena_alloc_junk_small(ptr, &arena_bin_info[binind],
1255 			    true);
1256 		}
1257 		/* Insert such that low regions get used first. */
1258 		tbin->avail[nfill - 1 - i] = ptr;
1259 	}
1260 	if (config_stats) {
1261 		bin->stats.allocated += i * arena_bin_info[binind].reg_size;
1262 		bin->stats.nmalloc += i;
1263 		bin->stats.nrequests += tbin->tstats.nrequests;
1264 		bin->stats.nfills++;
1265 		tbin->tstats.nrequests = 0;
1266 	}
1267 	malloc_mutex_unlock(&bin->lock);
1268 	tbin->ncached = i;
1269 }
1270 
1271 void
1272 arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
1273 {
1274 
1275 	if (zero) {
1276 		size_t redzone_size = bin_info->redzone_size;
1277 		memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
1278 		    redzone_size);
1279 		memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
1280 		    redzone_size);
1281 	} else {
1282 		memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
1283 		    bin_info->reg_interval);
1284 	}
1285 }
1286 
1287 void
1288 arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
1289 {
1290 	size_t size = bin_info->reg_size;
1291 	size_t redzone_size = bin_info->redzone_size;
1292 	size_t i;
1293 	bool error = false;
1294 
1295 	for (i = 1; i <= redzone_size; i++) {
1296 		unsigned byte;
1297 		if ((byte = *(uint8_t *)((uintptr_t)ptr - i)) != 0xa5) {
1298 			error = true;
1299 			malloc_printf("<jemalloc>: Corrupt redzone "
1300 			    "%zu byte%s before %p (size %zu), byte=%#x\n", i,
1301 			    (i == 1) ? "" : "s", ptr, size, byte);
1302 		}
1303 	}
1304 	for (i = 0; i < redzone_size; i++) {
1305 		unsigned byte;
1306 		if ((byte = *(uint8_t *)((uintptr_t)ptr + size + i)) != 0xa5) {
1307 			error = true;
1308 			malloc_printf("<jemalloc>: Corrupt redzone "
1309 			    "%zu byte%s after end of %p (size %zu), byte=%#x\n",
1310 			    i, (i == 1) ? "" : "s", ptr, size, byte);
1311 		}
1312 	}
1313 	if (opt_abort && error)
1314 		abort();
1315 
1316 	memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
1317 	    bin_info->reg_interval);
1318 }
1319 
1320 void *
1321 arena_malloc_small(arena_t *arena, size_t size, bool zero)
1322 {
1323 	void *ret;
1324 	arena_bin_t *bin;
1325 	arena_run_t *run;
1326 	size_t binind;
1327 
1328 	binind = SMALL_SIZE2BIN(size);
1329 	assert(binind < NBINS);
1330 	bin = &arena->bins[binind];
1331 	size = arena_bin_info[binind].reg_size;
1332 
1333 	malloc_mutex_lock(&bin->lock);
1334 	if ((run = bin->runcur) != NULL && run->nfree > 0)
1335 		ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
1336 	else
1337 		ret = arena_bin_malloc_hard(arena, bin);
1338 
1339 	if (ret == NULL) {
1340 		malloc_mutex_unlock(&bin->lock);
1341 		return (NULL);
1342 	}
1343 
1344 	if (config_stats) {
1345 		bin->stats.allocated += size;
1346 		bin->stats.nmalloc++;
1347 		bin->stats.nrequests++;
1348 	}
1349 	malloc_mutex_unlock(&bin->lock);
1350 	if (config_prof && isthreaded == false) {
1351 		malloc_mutex_lock(&arena->lock);
1352 		arena_prof_accum(arena, size);
1353 		malloc_mutex_unlock(&arena->lock);
1354 	}
1355 
1356 	if (zero == false) {
1357 		if (config_fill) {
1358 			if (opt_junk) {
1359 				arena_alloc_junk_small(ret,
1360 				    &arena_bin_info[binind], false);
1361 			} else if (opt_zero)
1362 				memset(ret, 0, size);
1363 		}
1364 	} else {
1365 		if (config_fill && opt_junk) {
1366 			arena_alloc_junk_small(ret, &arena_bin_info[binind],
1367 			    true);
1368 		}
1369 		VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
1370 		memset(ret, 0, size);
1371 	}
1372 
1373 	return (ret);
1374 }
1375 
1376 void *
1377 arena_malloc_large(arena_t *arena, size_t size, bool zero)
1378 {
1379 	void *ret;
1380 
1381 	/* Large allocation. */
1382 	size = PAGE_CEILING(size);
1383 	malloc_mutex_lock(&arena->lock);
1384 	ret = (void *)arena_run_alloc(arena, size, true, zero);
1385 	if (ret == NULL) {
1386 		malloc_mutex_unlock(&arena->lock);
1387 		return (NULL);
1388 	}
1389 	if (config_stats) {
1390 		arena->stats.nmalloc_large++;
1391 		arena->stats.nrequests_large++;
1392 		arena->stats.allocated_large += size;
1393 		arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1394 		arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1395 		arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1396 	}
1397 	if (config_prof)
1398 		arena_prof_accum(arena, size);
1399 	malloc_mutex_unlock(&arena->lock);
1400 
1401 	if (zero == false) {
1402 		if (config_fill) {
1403 			if (opt_junk)
1404 				memset(ret, 0xa5, size);
1405 			else if (opt_zero)
1406 				memset(ret, 0, size);
1407 		}
1408 	}
1409 
1410 	return (ret);
1411 }
1412 
1413 /* Only handles large allocations that require more than page alignment. */
1414 void *
1415 arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
1416 {
1417 	void *ret;
1418 	size_t alloc_size, leadsize, trailsize;
1419 	arena_run_t *run;
1420 	arena_chunk_t *chunk;
1421 
1422 	assert((size & PAGE_MASK) == 0);
1423 
1424 	alignment = PAGE_CEILING(alignment);
1425 	alloc_size = size + alignment - PAGE;
1426 
1427 	malloc_mutex_lock(&arena->lock);
1428 	run = arena_run_alloc(arena, alloc_size, true, zero);
1429 	if (run == NULL) {
1430 		malloc_mutex_unlock(&arena->lock);
1431 		return (NULL);
1432 	}
1433 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1434 
1435 	leadsize = ALIGNMENT_CEILING((uintptr_t)run, alignment) -
1436 	    (uintptr_t)run;
1437 	assert(alloc_size >= leadsize + size);
1438 	trailsize = alloc_size - leadsize - size;
1439 	ret = (void *)((uintptr_t)run + leadsize);
1440 	if (leadsize != 0) {
1441 		arena_run_trim_head(arena, chunk, run, alloc_size, alloc_size -
1442 		    leadsize);
1443 	}
1444 	if (trailsize != 0) {
1445 		arena_run_trim_tail(arena, chunk, ret, size + trailsize, size,
1446 		    false);
1447 	}
1448 
1449 	if (config_stats) {
1450 		arena->stats.nmalloc_large++;
1451 		arena->stats.nrequests_large++;
1452 		arena->stats.allocated_large += size;
1453 		arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1454 		arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1455 		arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1456 	}
1457 	malloc_mutex_unlock(&arena->lock);
1458 
1459 	if (config_fill && zero == false) {
1460 		if (opt_junk)
1461 			memset(ret, 0xa5, size);
1462 		else if (opt_zero)
1463 			memset(ret, 0, size);
1464 	}
1465 	return (ret);
1466 }
1467 
1468 void
1469 arena_prof_promoted(const void *ptr, size_t size)
1470 {
1471 	arena_chunk_t *chunk;
1472 	size_t pageind, binind;
1473 
1474 	cassert(config_prof);
1475 	assert(ptr != NULL);
1476 	assert(CHUNK_ADDR2BASE(ptr) != ptr);
1477 	assert(isalloc(ptr, false) == PAGE);
1478 	assert(isalloc(ptr, true) == PAGE);
1479 	assert(size <= SMALL_MAXCLASS);
1480 
1481 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1482 	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1483 	binind = SMALL_SIZE2BIN(size);
1484 	assert(binind < NBINS);
1485 	chunk->map[pageind-map_bias].bits = (chunk->map[pageind-map_bias].bits &
1486 	    ~CHUNK_MAP_CLASS_MASK) | ((binind+1) << CHUNK_MAP_CLASS_SHIFT);
1487 
1488 	assert(isalloc(ptr, false) == PAGE);
1489 	assert(isalloc(ptr, true) == size);
1490 }
1491 
1492 static void
1493 arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
1494     arena_bin_t *bin)
1495 {
1496 
1497 	/* Dissociate run from bin. */
1498 	if (run == bin->runcur)
1499 		bin->runcur = NULL;
1500 	else {
1501 		size_t binind = arena_bin_index(chunk->arena, bin);
1502 		arena_bin_info_t *bin_info = &arena_bin_info[binind];
1503 
1504 		if (bin_info->nregs != 1) {
1505 			/*
1506 			 * This block's conditional is necessary because if the
1507 			 * run only contains one region, then it never gets
1508 			 * inserted into the non-full runs tree.
1509 			 */
1510 			arena_bin_runs_remove(bin, run);
1511 		}
1512 	}
1513 }
1514 
1515 static void
1516 arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1517     arena_bin_t *bin)
1518 {
1519 	size_t binind;
1520 	arena_bin_info_t *bin_info;
1521 	size_t npages, run_ind, past;
1522 
1523 	assert(run != bin->runcur);
1524 	assert(arena_run_tree_search(&bin->runs, &chunk->map[
1525 	    (((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE)-map_bias]) == NULL);
1526 
1527 	binind = arena_bin_index(chunk->arena, run->bin);
1528 	bin_info = &arena_bin_info[binind];
1529 
1530 	malloc_mutex_unlock(&bin->lock);
1531 	/******************************/
1532 	npages = bin_info->run_size >> LG_PAGE;
1533 	run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
1534 	past = (size_t)(PAGE_CEILING((uintptr_t)run +
1535 	    (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind *
1536 	    bin_info->reg_interval - bin_info->redzone_size) -
1537 	    (uintptr_t)chunk) >> LG_PAGE);
1538 	malloc_mutex_lock(&arena->lock);
1539 
1540 	/*
1541 	 * If the run was originally clean, and some pages were never touched,
1542 	 * trim the clean pages before deallocating the dirty portion of the
1543 	 * run.
1544 	 */
1545 	if ((chunk->map[run_ind-map_bias].bits & CHUNK_MAP_DIRTY) == 0 && past
1546 	    - run_ind < npages) {
1547 		/*
1548 		 * Trim clean pages.  Convert to large run beforehand.  Set the
1549 		 * last map element first, in case this is a one-page run.
1550 		 */
1551 		chunk->map[run_ind+npages-1-map_bias].bits = CHUNK_MAP_LARGE |
1552 		    (chunk->map[run_ind+npages-1-map_bias].bits &
1553 		    CHUNK_MAP_FLAGS_MASK);
1554 		chunk->map[run_ind-map_bias].bits = bin_info->run_size |
1555 		    CHUNK_MAP_LARGE | (chunk->map[run_ind-map_bias].bits &
1556 		    CHUNK_MAP_FLAGS_MASK);
1557 		arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE),
1558 		    ((past - run_ind) << LG_PAGE), false);
1559 		/* npages = past - run_ind; */
1560 	}
1561 	arena_run_dalloc(arena, run, true);
1562 	malloc_mutex_unlock(&arena->lock);
1563 	/****************************/
1564 	malloc_mutex_lock(&bin->lock);
1565 	if (config_stats)
1566 		bin->stats.curruns--;
1567 }
1568 
1569 static void
1570 arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1571     arena_bin_t *bin)
1572 {
1573 
1574 	/*
1575 	 * Make sure that if bin->runcur is non-NULL, it refers to the lowest
1576 	 * non-full run.  It is okay to NULL runcur out rather than proactively
1577 	 * keeping it pointing at the lowest non-full run.
1578 	 */
1579 	if ((uintptr_t)run < (uintptr_t)bin->runcur) {
1580 		/* Switch runcur. */
1581 		if (bin->runcur->nfree > 0)
1582 			arena_bin_runs_insert(bin, bin->runcur);
1583 		bin->runcur = run;
1584 		if (config_stats)
1585 			bin->stats.reruns++;
1586 	} else
1587 		arena_bin_runs_insert(bin, run);
1588 }
1589 
1590 void
1591 arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1592     arena_chunk_map_t *mapelm)
1593 {
1594 	size_t pageind;
1595 	arena_run_t *run;
1596 	arena_bin_t *bin;
1597 	size_t size;
1598 
1599 	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1600 	run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
1601 	    (mapelm->bits >> LG_PAGE)) << LG_PAGE));
1602 	bin = run->bin;
1603 	size_t binind = arena_bin_index(arena, bin);
1604 	arena_bin_info_t *bin_info = &arena_bin_info[binind];
1605 	if (config_fill || config_stats)
1606 		size = bin_info->reg_size;
1607 
1608 	if (config_fill && opt_junk)
1609 		arena_dalloc_junk_small(ptr, bin_info);
1610 
1611 	arena_run_reg_dalloc(run, ptr);
1612 	if (run->nfree == bin_info->nregs) {
1613 		arena_dissociate_bin_run(chunk, run, bin);
1614 		arena_dalloc_bin_run(arena, chunk, run, bin);
1615 	} else if (run->nfree == 1 && run != bin->runcur)
1616 		arena_bin_lower_run(arena, chunk, run, bin);
1617 
1618 	if (config_stats) {
1619 		bin->stats.allocated -= size;
1620 		bin->stats.ndalloc++;
1621 	}
1622 }
1623 
1624 void
1625 arena_stats_merge(arena_t *arena, size_t *nactive, size_t *ndirty,
1626     arena_stats_t *astats, malloc_bin_stats_t *bstats,
1627     malloc_large_stats_t *lstats)
1628 {
1629 	unsigned i;
1630 
1631 	malloc_mutex_lock(&arena->lock);
1632 	*nactive += arena->nactive;
1633 	*ndirty += arena->ndirty;
1634 
1635 	astats->mapped += arena->stats.mapped;
1636 	astats->npurge += arena->stats.npurge;
1637 	astats->nmadvise += arena->stats.nmadvise;
1638 	astats->purged += arena->stats.purged;
1639 	astats->allocated_large += arena->stats.allocated_large;
1640 	astats->nmalloc_large += arena->stats.nmalloc_large;
1641 	astats->ndalloc_large += arena->stats.ndalloc_large;
1642 	astats->nrequests_large += arena->stats.nrequests_large;
1643 
1644 	for (i = 0; i < nlclasses; i++) {
1645 		lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
1646 		lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
1647 		lstats[i].nrequests += arena->stats.lstats[i].nrequests;
1648 		lstats[i].curruns += arena->stats.lstats[i].curruns;
1649 	}
1650 	malloc_mutex_unlock(&arena->lock);
1651 
1652 	for (i = 0; i < NBINS; i++) {
1653 		arena_bin_t *bin = &arena->bins[i];
1654 
1655 		malloc_mutex_lock(&bin->lock);
1656 		bstats[i].allocated += bin->stats.allocated;
1657 		bstats[i].nmalloc += bin->stats.nmalloc;
1658 		bstats[i].ndalloc += bin->stats.ndalloc;
1659 		bstats[i].nrequests += bin->stats.nrequests;
1660 		if (config_tcache) {
1661 			bstats[i].nfills += bin->stats.nfills;
1662 			bstats[i].nflushes += bin->stats.nflushes;
1663 		}
1664 		bstats[i].nruns += bin->stats.nruns;
1665 		bstats[i].reruns += bin->stats.reruns;
1666 		bstats[i].curruns += bin->stats.curruns;
1667 		malloc_mutex_unlock(&bin->lock);
1668 	}
1669 }
1670 
1671 void
1672 arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
1673 {
1674 
1675 	if (config_fill || config_stats) {
1676 		size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1677 		size_t size = chunk->map[pageind-map_bias].bits & ~PAGE_MASK;
1678 
1679 		if (config_fill && config_stats && opt_junk)
1680 			memset(ptr, 0x5a, size);
1681 		if (config_stats) {
1682 			arena->stats.ndalloc_large++;
1683 			arena->stats.allocated_large -= size;
1684 			arena->stats.lstats[(size >> LG_PAGE) - 1].ndalloc++;
1685 			arena->stats.lstats[(size >> LG_PAGE) - 1].curruns--;
1686 		}
1687 	}
1688 
1689 	arena_run_dalloc(arena, (arena_run_t *)ptr, true);
1690 }
1691 
1692 static void
1693 arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1694     size_t oldsize, size_t size)
1695 {
1696 
1697 	assert(size < oldsize);
1698 
1699 	/*
1700 	 * Shrink the run, and make trailing pages available for other
1701 	 * allocations.
1702 	 */
1703 	malloc_mutex_lock(&arena->lock);
1704 	arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
1705 	    true);
1706 	if (config_stats) {
1707 		arena->stats.ndalloc_large++;
1708 		arena->stats.allocated_large -= oldsize;
1709 		arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
1710 		arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
1711 
1712 		arena->stats.nmalloc_large++;
1713 		arena->stats.nrequests_large++;
1714 		arena->stats.allocated_large += size;
1715 		arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1716 		arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1717 		arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1718 	}
1719 	malloc_mutex_unlock(&arena->lock);
1720 }
1721 
1722 static bool
1723 arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1724     size_t oldsize, size_t size, size_t extra, bool zero)
1725 {
1726 	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1727 	size_t npages = oldsize >> LG_PAGE;
1728 	size_t followsize;
1729 
1730 	assert(oldsize == (chunk->map[pageind-map_bias].bits & ~PAGE_MASK));
1731 
1732 	/* Try to extend the run. */
1733 	assert(size + extra > oldsize);
1734 	malloc_mutex_lock(&arena->lock);
1735 	if (pageind + npages < chunk_npages &&
1736 	    (chunk->map[pageind+npages-map_bias].bits
1737 	    & CHUNK_MAP_ALLOCATED) == 0 && (followsize =
1738 	    chunk->map[pageind+npages-map_bias].bits & ~PAGE_MASK) >= size -
1739 	    oldsize) {
1740 		/*
1741 		 * The next run is available and sufficiently large.  Split the
1742 		 * following run, then merge the first part with the existing
1743 		 * allocation.
1744 		 */
1745 		size_t flag_dirty;
1746 		size_t splitsize = (oldsize + followsize <= size + extra)
1747 		    ? followsize : size + extra - oldsize;
1748 		arena_run_split(arena, (arena_run_t *)((uintptr_t)chunk +
1749 		    ((pageind+npages) << LG_PAGE)), splitsize, true, zero);
1750 
1751 		size = oldsize + splitsize;
1752 		npages = size >> LG_PAGE;
1753 
1754 		/*
1755 		 * Mark the extended run as dirty if either portion of the run
1756 		 * was dirty before allocation.  This is rather pedantic,
1757 		 * because there's not actually any sequence of events that
1758 		 * could cause the resulting run to be passed to
1759 		 * arena_run_dalloc() with the dirty argument set to false
1760 		 * (which is when dirty flag consistency would really matter).
1761 		 */
1762 		flag_dirty = (chunk->map[pageind-map_bias].bits &
1763 		    CHUNK_MAP_DIRTY) |
1764 		    (chunk->map[pageind+npages-1-map_bias].bits &
1765 		    CHUNK_MAP_DIRTY);
1766 		chunk->map[pageind-map_bias].bits = size | flag_dirty
1767 		    | CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
1768 		chunk->map[pageind+npages-1-map_bias].bits = flag_dirty |
1769 		    CHUNK_MAP_LARGE | CHUNK_MAP_ALLOCATED;
1770 
1771 		if (config_stats) {
1772 			arena->stats.ndalloc_large++;
1773 			arena->stats.allocated_large -= oldsize;
1774 			arena->stats.lstats[(oldsize >> LG_PAGE)
1775 			    - 1].ndalloc++;
1776 			arena->stats.lstats[(oldsize >> LG_PAGE)
1777 			    - 1].curruns--;
1778 
1779 			arena->stats.nmalloc_large++;
1780 			arena->stats.nrequests_large++;
1781 			arena->stats.allocated_large += size;
1782 			arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1783 			arena->stats.lstats[(size >> LG_PAGE)
1784 			    - 1].nrequests++;
1785 			arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1786 		}
1787 		malloc_mutex_unlock(&arena->lock);
1788 		return (false);
1789 	}
1790 	malloc_mutex_unlock(&arena->lock);
1791 
1792 	return (true);
1793 }
1794 
1795 /*
1796  * Try to resize a large allocation, in order to avoid copying.  This will
1797  * always fail if growing an object, and the following run is already in use.
1798  */
1799 static bool
1800 arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
1801     bool zero)
1802 {
1803 	size_t psize;
1804 
1805 	psize = PAGE_CEILING(size + extra);
1806 	if (psize == oldsize) {
1807 		/* Same size class. */
1808 		if (config_fill && opt_junk && size < oldsize) {
1809 			memset((void *)((uintptr_t)ptr + size), 0x5a, oldsize -
1810 			    size);
1811 		}
1812 		return (false);
1813 	} else {
1814 		arena_chunk_t *chunk;
1815 		arena_t *arena;
1816 
1817 		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1818 		arena = chunk->arena;
1819 
1820 		if (psize < oldsize) {
1821 			/* Fill before shrinking in order avoid a race. */
1822 			if (config_fill && opt_junk) {
1823 				memset((void *)((uintptr_t)ptr + size), 0x5a,
1824 				    oldsize - size);
1825 			}
1826 			arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
1827 			    psize);
1828 			return (false);
1829 		} else {
1830 			bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
1831 			    oldsize, PAGE_CEILING(size),
1832 			    psize - PAGE_CEILING(size), zero);
1833 			if (config_fill && ret == false && zero == false &&
1834 			    opt_zero) {
1835 				memset((void *)((uintptr_t)ptr + oldsize), 0,
1836 				    size - oldsize);
1837 			}
1838 			return (ret);
1839 		}
1840 	}
1841 }
1842 
1843 void *
1844 arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
1845     bool zero)
1846 {
1847 
1848 	/*
1849 	 * Avoid moving the allocation if the size class can be left the same.
1850 	 */
1851 	if (oldsize <= arena_maxclass) {
1852 		if (oldsize <= SMALL_MAXCLASS) {
1853 			assert(arena_bin_info[SMALL_SIZE2BIN(oldsize)].reg_size
1854 			    == oldsize);
1855 			if ((size + extra <= SMALL_MAXCLASS &&
1856 			    SMALL_SIZE2BIN(size + extra) ==
1857 			    SMALL_SIZE2BIN(oldsize)) || (size <= oldsize &&
1858 			    size + extra >= oldsize)) {
1859 				if (config_fill && opt_junk && size < oldsize) {
1860 					memset((void *)((uintptr_t)ptr + size),
1861 					    0x5a, oldsize - size);
1862 				}
1863 				return (ptr);
1864 			}
1865 		} else {
1866 			assert(size <= arena_maxclass);
1867 			if (size + extra > SMALL_MAXCLASS) {
1868 				if (arena_ralloc_large(ptr, oldsize, size,
1869 				    extra, zero) == false)
1870 					return (ptr);
1871 			}
1872 		}
1873 	}
1874 
1875 	/* Reallocation would require a move. */
1876 	return (NULL);
1877 }
1878 
1879 void *
1880 arena_ralloc(void *ptr, size_t oldsize, size_t size, size_t extra,
1881     size_t alignment, bool zero, bool try_tcache)
1882 {
1883 	void *ret;
1884 	size_t copysize;
1885 
1886 	/* Try to avoid moving the allocation. */
1887 	ret = arena_ralloc_no_move(ptr, oldsize, size, extra, zero);
1888 	if (ret != NULL)
1889 		return (ret);
1890 
1891 	/*
1892 	 * size and oldsize are different enough that we need to move the
1893 	 * object.  In that case, fall back to allocating new space and
1894 	 * copying.
1895 	 */
1896 	if (alignment != 0) {
1897 		size_t usize = sa2u(size + extra, alignment);
1898 		if (usize == 0)
1899 			return (NULL);
1900 		ret = ipalloc(usize, alignment, zero);
1901 	} else
1902 		ret = arena_malloc(NULL, size + extra, zero, try_tcache);
1903 
1904 	if (ret == NULL) {
1905 		if (extra == 0)
1906 			return (NULL);
1907 		/* Try again, this time without extra. */
1908 		if (alignment != 0) {
1909 			size_t usize = sa2u(size, alignment);
1910 			if (usize == 0)
1911 				return (NULL);
1912 			ret = ipalloc(usize, alignment, zero);
1913 		} else
1914 			ret = arena_malloc(NULL, size, zero, try_tcache);
1915 
1916 		if (ret == NULL)
1917 			return (NULL);
1918 	}
1919 
1920 	/* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
1921 
1922 	/*
1923 	 * Copy at most size bytes (not size+extra), since the caller has no
1924 	 * expectation that the extra bytes will be reliably preserved.
1925 	 */
1926 	copysize = (size < oldsize) ? size : oldsize;
1927 	memcpy(ret, ptr, copysize);
1928 	iqalloc(ptr);
1929 	return (ret);
1930 }
1931 
1932 bool
1933 arena_new(arena_t *arena, unsigned ind)
1934 {
1935 	unsigned i;
1936 	arena_bin_t *bin;
1937 
1938 	arena->ind = ind;
1939 	arena->nthreads = 0;
1940 
1941 	if (malloc_mutex_init(&arena->lock))
1942 		return (true);
1943 
1944 	if (config_stats) {
1945 		memset(&arena->stats, 0, sizeof(arena_stats_t));
1946 		arena->stats.lstats =
1947 		    (malloc_large_stats_t *)base_alloc(nlclasses *
1948 		    sizeof(malloc_large_stats_t));
1949 		if (arena->stats.lstats == NULL)
1950 			return (true);
1951 		memset(arena->stats.lstats, 0, nlclasses *
1952 		    sizeof(malloc_large_stats_t));
1953 		if (config_tcache)
1954 			ql_new(&arena->tcache_ql);
1955 	}
1956 
1957 	if (config_prof)
1958 		arena->prof_accumbytes = 0;
1959 
1960 	/* Initialize chunks. */
1961 	ql_new(&arena->chunks_dirty);
1962 	arena->spare = NULL;
1963 
1964 	arena->nactive = 0;
1965 	arena->ndirty = 0;
1966 	arena->npurgatory = 0;
1967 
1968 	arena_avail_tree_new(&arena->runs_avail_clean);
1969 	arena_avail_tree_new(&arena->runs_avail_dirty);
1970 
1971 	/* Initialize bins. */
1972 	for (i = 0; i < NBINS; i++) {
1973 		bin = &arena->bins[i];
1974 		if (malloc_mutex_init(&bin->lock))
1975 			return (true);
1976 		bin->runcur = NULL;
1977 		arena_run_tree_new(&bin->runs);
1978 		if (config_stats)
1979 			memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
1980 	}
1981 
1982 	return (false);
1983 }
1984 
1985 /*
1986  * Calculate bin_info->run_size such that it meets the following constraints:
1987  *
1988  *   *) bin_info->run_size >= min_run_size
1989  *   *) bin_info->run_size <= arena_maxclass
1990  *   *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
1991  *   *) bin_info->nregs <= RUN_MAXREGS
1992  *
1993  * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also
1994  * calculated here, since these settings are all interdependent.
1995  */
1996 static size_t
1997 bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
1998 {
1999 	size_t pad_size;
2000 	size_t try_run_size, good_run_size;
2001 	uint32_t try_nregs, good_nregs;
2002 	uint32_t try_hdr_size, good_hdr_size;
2003 	uint32_t try_bitmap_offset, good_bitmap_offset;
2004 	uint32_t try_ctx0_offset, good_ctx0_offset;
2005 	uint32_t try_redzone0_offset, good_redzone0_offset;
2006 
2007 	assert(min_run_size >= PAGE);
2008 	assert(min_run_size <= arena_maxclass);
2009 
2010 	/*
2011 	 * Determine redzone size based on minimum alignment and minimum
2012 	 * redzone size.  Add padding to the end of the run if it is needed to
2013 	 * align the regions.  The padding allows each redzone to be half the
2014 	 * minimum alignment; without the padding, each redzone would have to
2015 	 * be twice as large in order to maintain alignment.
2016 	 */
2017 	if (config_fill && opt_redzone) {
2018 		size_t align_min = ZU(1) << (ffs(bin_info->reg_size) - 1);
2019 		if (align_min <= REDZONE_MINSIZE) {
2020 			bin_info->redzone_size = REDZONE_MINSIZE;
2021 			pad_size = 0;
2022 		} else {
2023 			bin_info->redzone_size = align_min >> 1;
2024 			pad_size = bin_info->redzone_size;
2025 		}
2026 	} else {
2027 		bin_info->redzone_size = 0;
2028 		pad_size = 0;
2029 	}
2030 	bin_info->reg_interval = bin_info->reg_size +
2031 	    (bin_info->redzone_size << 1);
2032 
2033 	/*
2034 	 * Calculate known-valid settings before entering the run_size
2035 	 * expansion loop, so that the first part of the loop always copies
2036 	 * valid settings.
2037 	 *
2038 	 * The do..while loop iteratively reduces the number of regions until
2039 	 * the run header and the regions no longer overlap.  A closed formula
2040 	 * would be quite messy, since there is an interdependency between the
2041 	 * header's mask length and the number of regions.
2042 	 */
2043 	try_run_size = min_run_size;
2044 	try_nregs = ((try_run_size - sizeof(arena_run_t)) /
2045 	    bin_info->reg_interval)
2046 	    + 1; /* Counter-act try_nregs-- in loop. */
2047 	if (try_nregs > RUN_MAXREGS) {
2048 		try_nregs = RUN_MAXREGS
2049 		    + 1; /* Counter-act try_nregs-- in loop. */
2050 	}
2051 	do {
2052 		try_nregs--;
2053 		try_hdr_size = sizeof(arena_run_t);
2054 		/* Pad to a long boundary. */
2055 		try_hdr_size = LONG_CEILING(try_hdr_size);
2056 		try_bitmap_offset = try_hdr_size;
2057 		/* Add space for bitmap. */
2058 		try_hdr_size += bitmap_size(try_nregs);
2059 		if (config_prof && opt_prof && prof_promote == false) {
2060 			/* Pad to a quantum boundary. */
2061 			try_hdr_size = QUANTUM_CEILING(try_hdr_size);
2062 			try_ctx0_offset = try_hdr_size;
2063 			/* Add space for one (prof_ctx_t *) per region. */
2064 			try_hdr_size += try_nregs * sizeof(prof_ctx_t *);
2065 		} else
2066 			try_ctx0_offset = 0;
2067 		try_redzone0_offset = try_run_size - (try_nregs *
2068 		    bin_info->reg_interval) - pad_size;
2069 	} while (try_hdr_size > try_redzone0_offset);
2070 
2071 	/* run_size expansion loop. */
2072 	do {
2073 		/*
2074 		 * Copy valid settings before trying more aggressive settings.
2075 		 */
2076 		good_run_size = try_run_size;
2077 		good_nregs = try_nregs;
2078 		good_hdr_size = try_hdr_size;
2079 		good_bitmap_offset = try_bitmap_offset;
2080 		good_ctx0_offset = try_ctx0_offset;
2081 		good_redzone0_offset = try_redzone0_offset;
2082 
2083 		/* Try more aggressive settings. */
2084 		try_run_size += PAGE;
2085 		try_nregs = ((try_run_size - sizeof(arena_run_t) - pad_size) /
2086 		    bin_info->reg_interval)
2087 		    + 1; /* Counter-act try_nregs-- in loop. */
2088 		if (try_nregs > RUN_MAXREGS) {
2089 			try_nregs = RUN_MAXREGS
2090 			    + 1; /* Counter-act try_nregs-- in loop. */
2091 		}
2092 		do {
2093 			try_nregs--;
2094 			try_hdr_size = sizeof(arena_run_t);
2095 			/* Pad to a long boundary. */
2096 			try_hdr_size = LONG_CEILING(try_hdr_size);
2097 			try_bitmap_offset = try_hdr_size;
2098 			/* Add space for bitmap. */
2099 			try_hdr_size += bitmap_size(try_nregs);
2100 			if (config_prof && opt_prof && prof_promote == false) {
2101 				/* Pad to a quantum boundary. */
2102 				try_hdr_size = QUANTUM_CEILING(try_hdr_size);
2103 				try_ctx0_offset = try_hdr_size;
2104 				/*
2105 				 * Add space for one (prof_ctx_t *) per region.
2106 				 */
2107 				try_hdr_size += try_nregs *
2108 				    sizeof(prof_ctx_t *);
2109 			}
2110 			try_redzone0_offset = try_run_size - (try_nregs *
2111 			    bin_info->reg_interval) - pad_size;
2112 		} while (try_hdr_size > try_redzone0_offset);
2113 	} while (try_run_size <= arena_maxclass
2114 	    && try_run_size <= arena_maxclass
2115 	    && RUN_MAX_OVRHD * (bin_info->reg_interval << 3) >
2116 	    RUN_MAX_OVRHD_RELAX
2117 	    && (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size
2118 	    && try_nregs < RUN_MAXREGS);
2119 
2120 	assert(good_hdr_size <= good_redzone0_offset);
2121 
2122 	/* Copy final settings. */
2123 	bin_info->run_size = good_run_size;
2124 	bin_info->nregs = good_nregs;
2125 	bin_info->bitmap_offset = good_bitmap_offset;
2126 	bin_info->ctx0_offset = good_ctx0_offset;
2127 	bin_info->reg0_offset = good_redzone0_offset + bin_info->redzone_size;
2128 
2129 	assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
2130 	    * bin_info->reg_interval) + pad_size == bin_info->run_size);
2131 
2132 	return (good_run_size);
2133 }
2134 
2135 static void
2136 bin_info_init(void)
2137 {
2138 	arena_bin_info_t *bin_info;
2139 	size_t prev_run_size = PAGE;
2140 
2141 #define	SIZE_CLASS(bin, delta, size)					\
2142 	bin_info = &arena_bin_info[bin];				\
2143 	bin_info->reg_size = size;					\
2144 	prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);\
2145 	bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
2146 	SIZE_CLASSES
2147 #undef SIZE_CLASS
2148 }
2149 
2150 void
2151 arena_boot(void)
2152 {
2153 	size_t header_size;
2154 	unsigned i;
2155 
2156 	/*
2157 	 * Compute the header size such that it is large enough to contain the
2158 	 * page map.  The page map is biased to omit entries for the header
2159 	 * itself, so some iteration is necessary to compute the map bias.
2160 	 *
2161 	 * 1) Compute safe header_size and map_bias values that include enough
2162 	 *    space for an unbiased page map.
2163 	 * 2) Refine map_bias based on (1) to omit the header pages in the page
2164 	 *    map.  The resulting map_bias may be one too small.
2165 	 * 3) Refine map_bias based on (2).  The result will be >= the result
2166 	 *    from (2), and will always be correct.
2167 	 */
2168 	map_bias = 0;
2169 	for (i = 0; i < 3; i++) {
2170 		header_size = offsetof(arena_chunk_t, map) +
2171 		    (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias));
2172 		map_bias = (header_size >> LG_PAGE) + ((header_size & PAGE_MASK)
2173 		    != 0);
2174 	}
2175 	assert(map_bias > 0);
2176 
2177 	arena_maxclass = chunksize - (map_bias << LG_PAGE);
2178 
2179 	bin_info_init();
2180 }
2181 
2182 void
2183 arena_prefork(arena_t *arena)
2184 {
2185 	unsigned i;
2186 
2187 	malloc_mutex_prefork(&arena->lock);
2188 	for (i = 0; i < NBINS; i++)
2189 		malloc_mutex_prefork(&arena->bins[i].lock);
2190 }
2191 
2192 void
2193 arena_postfork_parent(arena_t *arena)
2194 {
2195 	unsigned i;
2196 
2197 	for (i = 0; i < NBINS; i++)
2198 		malloc_mutex_postfork_parent(&arena->bins[i].lock);
2199 	malloc_mutex_postfork_parent(&arena->lock);
2200 }
2201 
2202 void
2203 arena_postfork_child(arena_t *arena)
2204 {
2205 	unsigned i;
2206 
2207 	for (i = 0; i < NBINS; i++)
2208 		malloc_mutex_postfork_child(&arena->bins[i].lock);
2209 	malloc_mutex_postfork_child(&arena->lock);
2210 }
2211