xref: /freebsd/contrib/jemalloc/src/arena.c (revision d8b88105c2ccf7686552516877f541efb54fb6c8)
1 #define	JEMALLOC_ARENA_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 
4 /******************************************************************************/
5 /* Data. */
6 
7 ssize_t		opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT;
8 arena_bin_info_t	arena_bin_info[NBINS];
9 
10 JEMALLOC_ALIGNED(CACHELINE)
11 const uint8_t	small_size2bin[] = {
12 #define	S2B_8(i)	i,
13 #define	S2B_16(i)	S2B_8(i) S2B_8(i)
14 #define	S2B_32(i)	S2B_16(i) S2B_16(i)
15 #define	S2B_64(i)	S2B_32(i) S2B_32(i)
16 #define	S2B_128(i)	S2B_64(i) S2B_64(i)
17 #define	S2B_256(i)	S2B_128(i) S2B_128(i)
18 #define	S2B_512(i)	S2B_256(i) S2B_256(i)
19 #define	S2B_1024(i)	S2B_512(i) S2B_512(i)
20 #define	S2B_2048(i)	S2B_1024(i) S2B_1024(i)
21 #define	S2B_4096(i)	S2B_2048(i) S2B_2048(i)
22 #define	S2B_8192(i)	S2B_4096(i) S2B_4096(i)
23 #define	SIZE_CLASS(bin, delta, size)					\
24 	S2B_##delta(bin)
25 	SIZE_CLASSES
26 #undef S2B_8
27 #undef S2B_16
28 #undef S2B_32
29 #undef S2B_64
30 #undef S2B_128
31 #undef S2B_256
32 #undef S2B_512
33 #undef S2B_1024
34 #undef S2B_2048
35 #undef S2B_4096
36 #undef S2B_8192
37 #undef SIZE_CLASS
38 };
39 
40 /******************************************************************************/
41 /*
42  * Function prototypes for static functions that are referenced prior to
43  * definition.
44  */
45 
46 static void	arena_purge(arena_t *arena, bool all);
47 static void	arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty,
48     bool cleaned);
49 static void	arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk,
50     arena_run_t *run, arena_bin_t *bin);
51 static void	arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk,
52     arena_run_t *run, arena_bin_t *bin);
53 
54 /******************************************************************************/
55 
56 static inline int
57 arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
58 {
59 	uintptr_t a_mapelm = (uintptr_t)a;
60 	uintptr_t b_mapelm = (uintptr_t)b;
61 
62 	assert(a != NULL);
63 	assert(b != NULL);
64 
65 	return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm));
66 }
67 
68 /* Generate red-black tree functions. */
69 rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t,
70     u.rb_link, arena_run_comp)
71 
72 static inline int
73 arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b)
74 {
75 	int ret;
76 	size_t a_size = a->bits & ~PAGE_MASK;
77 	size_t b_size = b->bits & ~PAGE_MASK;
78 
79 	ret = (a_size > b_size) - (a_size < b_size);
80 	if (ret == 0) {
81 		uintptr_t a_mapelm, b_mapelm;
82 
83 		if ((a->bits & CHUNK_MAP_KEY) != CHUNK_MAP_KEY)
84 			a_mapelm = (uintptr_t)a;
85 		else {
86 			/*
87 			 * Treat keys as though they are lower than anything
88 			 * else.
89 			 */
90 			a_mapelm = 0;
91 		}
92 		b_mapelm = (uintptr_t)b;
93 
94 		ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm);
95 	}
96 
97 	return (ret);
98 }
99 
100 /* Generate red-black tree functions. */
101 rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t,
102     u.rb_link, arena_avail_comp)
103 
104 static inline int
105 arena_chunk_dirty_comp(arena_chunk_t *a, arena_chunk_t *b)
106 {
107 
108 	assert(a != NULL);
109 	assert(b != NULL);
110 
111 	/*
112 	 * Short-circuit for self comparison.  The following comparison code
113 	 * would come to the same result, but at the cost of executing the slow
114 	 * path.
115 	 */
116 	if (a == b)
117 		return (0);
118 
119 	/*
120 	 * Order such that chunks with higher fragmentation are "less than"
121 	 * those with lower fragmentation -- purging order is from "least" to
122 	 * "greatest".  Fragmentation is measured as:
123 	 *
124 	 *     mean current avail run size
125 	 *   --------------------------------
126 	 *   mean defragmented avail run size
127 	 *
128 	 *            navail
129 	 *         -----------
130 	 *         nruns_avail           nruns_avail-nruns_adjac
131 	 * = ========================= = -----------------------
132 	 *            navail                  nruns_avail
133 	 *    -----------------------
134 	 *    nruns_avail-nruns_adjac
135 	 *
136 	 * The following code multiplies away the denominator prior to
137 	 * comparison, in order to avoid division.
138 	 *
139 	 */
140 	{
141 		size_t a_val = (a->nruns_avail - a->nruns_adjac) *
142 		    b->nruns_avail;
143 		size_t b_val = (b->nruns_avail - b->nruns_adjac) *
144 		    a->nruns_avail;
145 
146 		if (a_val < b_val)
147 			return (1);
148 		if (a_val > b_val)
149 			return (-1);
150 	}
151 	/*
152 	 * Break ties by chunk address.  For fragmented chunks, report lower
153 	 * addresses as "lower", so that fragmentation reduction happens first
154 	 * at lower addresses.  However, use the opposite ordering for
155 	 * unfragmented chunks, in order to increase the chances of
156 	 * re-allocating dirty runs.
157 	 */
158 	{
159 		uintptr_t a_chunk = (uintptr_t)a;
160 		uintptr_t b_chunk = (uintptr_t)b;
161 		int ret = ((a_chunk > b_chunk) - (a_chunk < b_chunk));
162 		if (a->nruns_adjac == 0) {
163 			assert(b->nruns_adjac == 0);
164 			ret = -ret;
165 		}
166 		return (ret);
167 	}
168 }
169 
170 /* Generate red-black tree functions. */
171 rb_gen(static UNUSED, arena_chunk_dirty_, arena_chunk_tree_t, arena_chunk_t,
172     dirty_link, arena_chunk_dirty_comp)
173 
174 static inline bool
175 arena_avail_adjac_pred(arena_chunk_t *chunk, size_t pageind)
176 {
177 	bool ret;
178 
179 	if (pageind-1 < map_bias)
180 		ret = false;
181 	else {
182 		ret = (arena_mapbits_allocated_get(chunk, pageind-1) == 0);
183 		assert(ret == false || arena_mapbits_dirty_get(chunk,
184 		    pageind-1) != arena_mapbits_dirty_get(chunk, pageind));
185 	}
186 	return (ret);
187 }
188 
189 static inline bool
190 arena_avail_adjac_succ(arena_chunk_t *chunk, size_t pageind, size_t npages)
191 {
192 	bool ret;
193 
194 	if (pageind+npages == chunk_npages)
195 		ret = false;
196 	else {
197 		assert(pageind+npages < chunk_npages);
198 		ret = (arena_mapbits_allocated_get(chunk, pageind+npages) == 0);
199 		assert(ret == false || arena_mapbits_dirty_get(chunk, pageind)
200 		    != arena_mapbits_dirty_get(chunk, pageind+npages));
201 	}
202 	return (ret);
203 }
204 
205 static inline bool
206 arena_avail_adjac(arena_chunk_t *chunk, size_t pageind, size_t npages)
207 {
208 
209 	return (arena_avail_adjac_pred(chunk, pageind) ||
210 	    arena_avail_adjac_succ(chunk, pageind, npages));
211 }
212 
213 static void
214 arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
215     size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
216 {
217 
218 	assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
219 	    LG_PAGE));
220 
221 	/*
222 	 * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
223 	 * removed and reinserted even if the run to be inserted is clean.
224 	 */
225 	if (chunk->ndirty != 0)
226 		arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
227 
228 	if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
229 		chunk->nruns_adjac++;
230 	if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
231 		chunk->nruns_adjac++;
232 	chunk->nruns_avail++;
233 	assert(chunk->nruns_avail > chunk->nruns_adjac);
234 
235 	if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
236 		arena->ndirty += npages;
237 		chunk->ndirty += npages;
238 	}
239 	if (chunk->ndirty != 0)
240 		arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
241 
242 	arena_avail_tree_insert(&arena->runs_avail, arena_mapp_get(chunk,
243 	    pageind));
244 }
245 
246 static void
247 arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind,
248     size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ)
249 {
250 
251 	assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >>
252 	    LG_PAGE));
253 
254 	/*
255 	 * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be
256 	 * removed and reinserted even if the run to be removed is clean.
257 	 */
258 	if (chunk->ndirty != 0)
259 		arena_chunk_dirty_remove(&arena->chunks_dirty, chunk);
260 
261 	if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind))
262 		chunk->nruns_adjac--;
263 	if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages))
264 		chunk->nruns_adjac--;
265 	chunk->nruns_avail--;
266 	assert(chunk->nruns_avail > chunk->nruns_adjac || (chunk->nruns_avail
267 	    == 0 && chunk->nruns_adjac == 0));
268 
269 	if (arena_mapbits_dirty_get(chunk, pageind) != 0) {
270 		arena->ndirty -= npages;
271 		chunk->ndirty -= npages;
272 	}
273 	if (chunk->ndirty != 0)
274 		arena_chunk_dirty_insert(&arena->chunks_dirty, chunk);
275 
276 	arena_avail_tree_remove(&arena->runs_avail, arena_mapp_get(chunk,
277 	    pageind));
278 }
279 
280 static inline void *
281 arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info)
282 {
283 	void *ret;
284 	unsigned regind;
285 	bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
286 	    (uintptr_t)bin_info->bitmap_offset);
287 
288 	assert(run->nfree > 0);
289 	assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false);
290 
291 	regind = bitmap_sfu(bitmap, &bin_info->bitmap_info);
292 	ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset +
293 	    (uintptr_t)(bin_info->reg_interval * regind));
294 	run->nfree--;
295 	if (regind == run->nextind)
296 		run->nextind++;
297 	assert(regind < run->nextind);
298 	return (ret);
299 }
300 
301 static inline void
302 arena_run_reg_dalloc(arena_run_t *run, void *ptr)
303 {
304 	arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
305 	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
306 	size_t mapbits = arena_mapbits_get(chunk, pageind);
307 	size_t binind = arena_ptr_small_binind_get(ptr, mapbits);
308 	arena_bin_info_t *bin_info = &arena_bin_info[binind];
309 	unsigned regind = arena_run_regind(run, bin_info, ptr);
310 	bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
311 	    (uintptr_t)bin_info->bitmap_offset);
312 
313 	assert(run->nfree < bin_info->nregs);
314 	/* Freeing an interior pointer can cause assertion failure. */
315 	assert(((uintptr_t)ptr - ((uintptr_t)run +
316 	    (uintptr_t)bin_info->reg0_offset)) %
317 	    (uintptr_t)bin_info->reg_interval == 0);
318 	assert((uintptr_t)ptr >= (uintptr_t)run +
319 	    (uintptr_t)bin_info->reg0_offset);
320 	/* Freeing an unallocated pointer can cause assertion failure. */
321 	assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind));
322 
323 	bitmap_unset(bitmap, &bin_info->bitmap_info, regind);
324 	run->nfree++;
325 }
326 
327 static inline void
328 arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages)
329 {
330 
331 	VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind <<
332 	    LG_PAGE)), (npages << LG_PAGE));
333 	memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0,
334 	    (npages << LG_PAGE));
335 }
336 
337 static inline void
338 arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind)
339 {
340 
341 	VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind <<
342 	    LG_PAGE)), PAGE);
343 }
344 
345 static inline void
346 arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind)
347 {
348 	size_t i;
349 	UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE));
350 
351 	arena_run_page_mark_zeroed(chunk, run_ind);
352 	for (i = 0; i < PAGE / sizeof(size_t); i++)
353 		assert(p[i] == 0);
354 }
355 
356 static void
357 arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages)
358 {
359 
360 	if (config_stats) {
361 		ssize_t cactive_diff = CHUNK_CEILING((arena->nactive +
362 		    add_pages) << LG_PAGE) - CHUNK_CEILING((arena->nactive -
363 		    sub_pages) << LG_PAGE);
364 		if (cactive_diff != 0)
365 			stats_cactive_add(cactive_diff);
366 	}
367 }
368 
369 static void
370 arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind,
371     size_t flag_dirty, size_t need_pages)
372 {
373 	size_t total_pages, rem_pages;
374 
375 	total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >>
376 	    LG_PAGE;
377 	assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) ==
378 	    flag_dirty);
379 	assert(need_pages <= total_pages);
380 	rem_pages = total_pages - need_pages;
381 
382 	arena_avail_remove(arena, chunk, run_ind, total_pages, true, true);
383 	arena_cactive_update(arena, need_pages, 0);
384 	arena->nactive += need_pages;
385 
386 	/* Keep track of trailing unused pages for later use. */
387 	if (rem_pages > 0) {
388 		if (flag_dirty != 0) {
389 			arena_mapbits_unallocated_set(chunk,
390 			    run_ind+need_pages, (rem_pages << LG_PAGE),
391 			    flag_dirty);
392 			arena_mapbits_unallocated_set(chunk,
393 			    run_ind+total_pages-1, (rem_pages << LG_PAGE),
394 			    flag_dirty);
395 		} else {
396 			arena_mapbits_unallocated_set(chunk, run_ind+need_pages,
397 			    (rem_pages << LG_PAGE),
398 			    arena_mapbits_unzeroed_get(chunk,
399 			    run_ind+need_pages));
400 			arena_mapbits_unallocated_set(chunk,
401 			    run_ind+total_pages-1, (rem_pages << LG_PAGE),
402 			    arena_mapbits_unzeroed_get(chunk,
403 			    run_ind+total_pages-1));
404 		}
405 		arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages,
406 		    false, true);
407 	}
408 }
409 
410 static void
411 arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size,
412     bool remove, bool zero)
413 {
414 	arena_chunk_t *chunk;
415 	size_t flag_dirty, run_ind, need_pages, i;
416 
417 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
418 	run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
419 	flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
420 	need_pages = (size >> LG_PAGE);
421 	assert(need_pages > 0);
422 
423 	if (remove) {
424 		arena_run_split_remove(arena, chunk, run_ind, flag_dirty,
425 		    need_pages);
426 	}
427 
428 	if (zero) {
429 		if (flag_dirty == 0) {
430 			/*
431 			 * The run is clean, so some pages may be zeroed (i.e.
432 			 * never before touched).
433 			 */
434 			for (i = 0; i < need_pages; i++) {
435 				if (arena_mapbits_unzeroed_get(chunk, run_ind+i)
436 				    != 0)
437 					arena_run_zero(chunk, run_ind+i, 1);
438 				else if (config_debug) {
439 					arena_run_page_validate_zeroed(chunk,
440 					    run_ind+i);
441 				} else {
442 					arena_run_page_mark_zeroed(chunk,
443 					    run_ind+i);
444 				}
445 			}
446 		} else {
447 			/* The run is dirty, so all pages must be zeroed. */
448 			arena_run_zero(chunk, run_ind, need_pages);
449 		}
450 	} else {
451 		VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
452 		    (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
453 	}
454 
455 	/*
456 	 * Set the last element first, in case the run only contains one page
457 	 * (i.e. both statements set the same element).
458 	 */
459 	arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty);
460 	arena_mapbits_large_set(chunk, run_ind, size, flag_dirty);
461 }
462 
463 static void
464 arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
465 {
466 
467 	arena_run_split_large_helper(arena, run, size, true, zero);
468 }
469 
470 static void
471 arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero)
472 {
473 
474 	arena_run_split_large_helper(arena, run, size, false, zero);
475 }
476 
477 static void
478 arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size,
479     size_t binind)
480 {
481 	arena_chunk_t *chunk;
482 	size_t flag_dirty, run_ind, need_pages, i;
483 
484 	assert(binind != BININD_INVALID);
485 
486 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
487 	run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
488 	flag_dirty = arena_mapbits_dirty_get(chunk, run_ind);
489 	need_pages = (size >> LG_PAGE);
490 	assert(need_pages > 0);
491 
492 	arena_run_split_remove(arena, chunk, run_ind, flag_dirty, need_pages);
493 
494 	/*
495 	 * Propagate the dirty and unzeroed flags to the allocated small run,
496 	 * so that arena_dalloc_bin_run() has the ability to conditionally trim
497 	 * clean pages.
498 	 */
499 	arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty);
500 	/*
501 	 * The first page will always be dirtied during small run
502 	 * initialization, so a validation failure here would not actually
503 	 * cause an observable failure.
504 	 */
505 	if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk,
506 	    run_ind) == 0)
507 		arena_run_page_validate_zeroed(chunk, run_ind);
508 	for (i = 1; i < need_pages - 1; i++) {
509 		arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0);
510 		if (config_debug && flag_dirty == 0 &&
511 		    arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0)
512 			arena_run_page_validate_zeroed(chunk, run_ind+i);
513 	}
514 	arena_mapbits_small_set(chunk, run_ind+need_pages-1, need_pages-1,
515 	    binind, flag_dirty);
516 	if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk,
517 	    run_ind+need_pages-1) == 0)
518 		arena_run_page_validate_zeroed(chunk, run_ind+need_pages-1);
519 	VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk +
520 	    (run_ind << LG_PAGE)), (need_pages << LG_PAGE));
521 }
522 
523 static arena_chunk_t *
524 arena_chunk_init_spare(arena_t *arena)
525 {
526 	arena_chunk_t *chunk;
527 
528 	assert(arena->spare != NULL);
529 
530 	chunk = arena->spare;
531 	arena->spare = NULL;
532 
533 	assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
534 	assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
535 	assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
536 	    arena_maxclass);
537 	assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
538 	    arena_maxclass);
539 	assert(arena_mapbits_dirty_get(chunk, map_bias) ==
540 	    arena_mapbits_dirty_get(chunk, chunk_npages-1));
541 
542 	return (chunk);
543 }
544 
545 static arena_chunk_t *
546 arena_chunk_init_hard(arena_t *arena)
547 {
548 	arena_chunk_t *chunk;
549 	bool zero;
550 	size_t unzeroed, i;
551 
552 	assert(arena->spare == NULL);
553 
554 	zero = false;
555 	malloc_mutex_unlock(&arena->lock);
556 	chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize, false,
557 	    &zero, arena->dss_prec);
558 	malloc_mutex_lock(&arena->lock);
559 	if (chunk == NULL)
560 		return (NULL);
561 	if (config_stats)
562 		arena->stats.mapped += chunksize;
563 
564 	chunk->arena = arena;
565 
566 	/*
567 	 * Claim that no pages are in use, since the header is merely overhead.
568 	 */
569 	chunk->ndirty = 0;
570 
571 	chunk->nruns_avail = 0;
572 	chunk->nruns_adjac = 0;
573 
574 	/*
575 	 * Initialize the map to contain one maximal free untouched run.  Mark
576 	 * the pages as zeroed iff chunk_alloc() returned a zeroed chunk.
577 	 */
578 	unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED;
579 	arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass,
580 	    unzeroed);
581 	/*
582 	 * There is no need to initialize the internal page map entries unless
583 	 * the chunk is not zeroed.
584 	 */
585 	if (zero == false) {
586 		VALGRIND_MAKE_MEM_UNDEFINED((void *)arena_mapp_get(chunk,
587 		    map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk,
588 		    chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk,
589 		    map_bias+1)));
590 		for (i = map_bias+1; i < chunk_npages-1; i++)
591 			arena_mapbits_unzeroed_set(chunk, i, unzeroed);
592 	} else {
593 		VALGRIND_MAKE_MEM_DEFINED((void *)arena_mapp_get(chunk,
594 		    map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk,
595 		    chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk,
596 		    map_bias+1)));
597 		if (config_debug) {
598 			for (i = map_bias+1; i < chunk_npages-1; i++) {
599 				assert(arena_mapbits_unzeroed_get(chunk, i) ==
600 				    unzeroed);
601 			}
602 		}
603 	}
604 	arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxclass,
605 	    unzeroed);
606 
607 	return (chunk);
608 }
609 
610 static arena_chunk_t *
611 arena_chunk_alloc(arena_t *arena)
612 {
613 	arena_chunk_t *chunk;
614 
615 	if (arena->spare != NULL)
616 		chunk = arena_chunk_init_spare(arena);
617 	else
618 		chunk = arena_chunk_init_hard(arena);
619 
620 	/* Insert the run into the runs_avail tree. */
621 	arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias,
622 	    false, false);
623 
624 	return (chunk);
625 }
626 
627 static void
628 arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk)
629 {
630 	assert(arena_mapbits_allocated_get(chunk, map_bias) == 0);
631 	assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0);
632 	assert(arena_mapbits_unallocated_size_get(chunk, map_bias) ==
633 	    arena_maxclass);
634 	assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) ==
635 	    arena_maxclass);
636 	assert(arena_mapbits_dirty_get(chunk, map_bias) ==
637 	    arena_mapbits_dirty_get(chunk, chunk_npages-1));
638 
639 	/*
640 	 * Remove run from the runs_avail tree, so that the arena does not use
641 	 * it.
642 	 */
643 	arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias,
644 	    false, false);
645 
646 	if (arena->spare != NULL) {
647 		arena_chunk_t *spare = arena->spare;
648 
649 		arena->spare = chunk;
650 		malloc_mutex_unlock(&arena->lock);
651 		chunk_dealloc((void *)spare, chunksize, true);
652 		malloc_mutex_lock(&arena->lock);
653 		if (config_stats)
654 			arena->stats.mapped -= chunksize;
655 	} else
656 		arena->spare = chunk;
657 }
658 
659 static arena_run_t *
660 arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero)
661 {
662 	arena_run_t *run;
663 	arena_chunk_map_t *mapelm, key;
664 
665 	key.bits = size | CHUNK_MAP_KEY;
666 	mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key);
667 	if (mapelm != NULL) {
668 		arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
669 		size_t pageind = (((uintptr_t)mapelm -
670 		    (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
671 		    + map_bias;
672 
673 		run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
674 		    LG_PAGE));
675 		arena_run_split_large(arena, run, size, zero);
676 		return (run);
677 	}
678 
679 	return (NULL);
680 }
681 
682 static arena_run_t *
683 arena_run_alloc_large(arena_t *arena, size_t size, bool zero)
684 {
685 	arena_chunk_t *chunk;
686 	arena_run_t *run;
687 
688 	assert(size <= arena_maxclass);
689 	assert((size & PAGE_MASK) == 0);
690 
691 	/* Search the arena's chunks for the lowest best fit. */
692 	run = arena_run_alloc_large_helper(arena, size, zero);
693 	if (run != NULL)
694 		return (run);
695 
696 	/*
697 	 * No usable runs.  Create a new chunk from which to allocate the run.
698 	 */
699 	chunk = arena_chunk_alloc(arena);
700 	if (chunk != NULL) {
701 		run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
702 		arena_run_split_large(arena, run, size, zero);
703 		return (run);
704 	}
705 
706 	/*
707 	 * arena_chunk_alloc() failed, but another thread may have made
708 	 * sufficient memory available while this one dropped arena->lock in
709 	 * arena_chunk_alloc(), so search one more time.
710 	 */
711 	return (arena_run_alloc_large_helper(arena, size, zero));
712 }
713 
714 static arena_run_t *
715 arena_run_alloc_small_helper(arena_t *arena, size_t size, size_t binind)
716 {
717 	arena_run_t *run;
718 	arena_chunk_map_t *mapelm, key;
719 
720 	key.bits = size | CHUNK_MAP_KEY;
721 	mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key);
722 	if (mapelm != NULL) {
723 		arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm);
724 		size_t pageind = (((uintptr_t)mapelm -
725 		    (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t))
726 		    + map_bias;
727 
728 		run = (arena_run_t *)((uintptr_t)run_chunk + (pageind <<
729 		    LG_PAGE));
730 		arena_run_split_small(arena, run, size, binind);
731 		return (run);
732 	}
733 
734 	return (NULL);
735 }
736 
737 static arena_run_t *
738 arena_run_alloc_small(arena_t *arena, size_t size, size_t binind)
739 {
740 	arena_chunk_t *chunk;
741 	arena_run_t *run;
742 
743 	assert(size <= arena_maxclass);
744 	assert((size & PAGE_MASK) == 0);
745 	assert(binind != BININD_INVALID);
746 
747 	/* Search the arena's chunks for the lowest best fit. */
748 	run = arena_run_alloc_small_helper(arena, size, binind);
749 	if (run != NULL)
750 		return (run);
751 
752 	/*
753 	 * No usable runs.  Create a new chunk from which to allocate the run.
754 	 */
755 	chunk = arena_chunk_alloc(arena);
756 	if (chunk != NULL) {
757 		run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE));
758 		arena_run_split_small(arena, run, size, binind);
759 		return (run);
760 	}
761 
762 	/*
763 	 * arena_chunk_alloc() failed, but another thread may have made
764 	 * sufficient memory available while this one dropped arena->lock in
765 	 * arena_chunk_alloc(), so search one more time.
766 	 */
767 	return (arena_run_alloc_small_helper(arena, size, binind));
768 }
769 
770 static inline void
771 arena_maybe_purge(arena_t *arena)
772 {
773 	size_t npurgeable, threshold;
774 
775 	/* Don't purge if the option is disabled. */
776 	if (opt_lg_dirty_mult < 0)
777 		return;
778 	/* Don't purge if all dirty pages are already being purged. */
779 	if (arena->ndirty <= arena->npurgatory)
780 		return;
781 	npurgeable = arena->ndirty - arena->npurgatory;
782 	threshold = (arena->nactive >> opt_lg_dirty_mult);
783 	/*
784 	 * Don't purge unless the number of purgeable pages exceeds the
785 	 * threshold.
786 	 */
787 	if (npurgeable <= threshold)
788 		return;
789 
790 	arena_purge(arena, false);
791 }
792 
793 static arena_chunk_t *
794 chunks_dirty_iter_cb(arena_chunk_tree_t *tree, arena_chunk_t *chunk, void *arg)
795 {
796        size_t *ndirty = (size_t *)arg;
797 
798        assert(chunk->ndirty != 0);
799        *ndirty += chunk->ndirty;
800        return (NULL);
801 }
802 
803 static size_t
804 arena_compute_npurgatory(arena_t *arena, bool all)
805 {
806 	size_t npurgatory, npurgeable;
807 
808 	/*
809 	 * Compute the minimum number of pages that this thread should try to
810 	 * purge.
811 	 */
812 	npurgeable = arena->ndirty - arena->npurgatory;
813 
814 	if (all == false) {
815 		size_t threshold = (arena->nactive >> opt_lg_dirty_mult);
816 
817 		npurgatory = npurgeable - threshold;
818 	} else
819 		npurgatory = npurgeable;
820 
821 	return (npurgatory);
822 }
823 
824 static void
825 arena_chunk_stash_dirty(arena_t *arena, arena_chunk_t *chunk, bool all,
826     arena_chunk_mapelms_t *mapelms)
827 {
828 	size_t pageind, npages;
829 
830 	/*
831 	 * Temporarily allocate free dirty runs within chunk.  If all is false,
832 	 * only operate on dirty runs that are fragments; otherwise operate on
833 	 * all dirty runs.
834 	 */
835 	for (pageind = map_bias; pageind < chunk_npages; pageind += npages) {
836 		arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
837 		if (arena_mapbits_allocated_get(chunk, pageind) == 0) {
838 			size_t run_size =
839 			    arena_mapbits_unallocated_size_get(chunk, pageind);
840 
841 			npages = run_size >> LG_PAGE;
842 			assert(pageind + npages <= chunk_npages);
843 			assert(arena_mapbits_dirty_get(chunk, pageind) ==
844 			    arena_mapbits_dirty_get(chunk, pageind+npages-1));
845 
846 			if (arena_mapbits_dirty_get(chunk, pageind) != 0 &&
847 			    (all || arena_avail_adjac(chunk, pageind,
848 			    npages))) {
849 				arena_run_t *run = (arena_run_t *)((uintptr_t)
850 				    chunk + (uintptr_t)(pageind << LG_PAGE));
851 
852 				arena_run_split_large(arena, run, run_size,
853 				    false);
854 				/* Append to list for later processing. */
855 				ql_elm_new(mapelm, u.ql_link);
856 				ql_tail_insert(mapelms, mapelm, u.ql_link);
857 			}
858 		} else {
859 			/* Skip run. */
860 			if (arena_mapbits_large_get(chunk, pageind) != 0) {
861 				npages = arena_mapbits_large_size_get(chunk,
862 				    pageind) >> LG_PAGE;
863 			} else {
864 				size_t binind;
865 				arena_bin_info_t *bin_info;
866 				arena_run_t *run = (arena_run_t *)((uintptr_t)
867 				    chunk + (uintptr_t)(pageind << LG_PAGE));
868 
869 				assert(arena_mapbits_small_runind_get(chunk,
870 				    pageind) == 0);
871 				binind = arena_bin_index(arena, run->bin);
872 				bin_info = &arena_bin_info[binind];
873 				npages = bin_info->run_size >> LG_PAGE;
874 			}
875 		}
876 	}
877 	assert(pageind == chunk_npages);
878 	assert(chunk->ndirty == 0 || all == false);
879 	assert(chunk->nruns_adjac == 0);
880 }
881 
882 static size_t
883 arena_chunk_purge_stashed(arena_t *arena, arena_chunk_t *chunk,
884     arena_chunk_mapelms_t *mapelms)
885 {
886 	size_t npurged, pageind, npages, nmadvise;
887 	arena_chunk_map_t *mapelm;
888 
889 	malloc_mutex_unlock(&arena->lock);
890 	if (config_stats)
891 		nmadvise = 0;
892 	npurged = 0;
893 	ql_foreach(mapelm, mapelms, u.ql_link) {
894 		bool unzeroed;
895 		size_t flag_unzeroed, i;
896 
897 		pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
898 		    sizeof(arena_chunk_map_t)) + map_bias;
899 		npages = arena_mapbits_large_size_get(chunk, pageind) >>
900 		    LG_PAGE;
901 		assert(pageind + npages <= chunk_npages);
902 		unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind <<
903 		    LG_PAGE)), (npages << LG_PAGE));
904 		flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0;
905 		/*
906 		 * Set the unzeroed flag for all pages, now that pages_purge()
907 		 * has returned whether the pages were zeroed as a side effect
908 		 * of purging.  This chunk map modification is safe even though
909 		 * the arena mutex isn't currently owned by this thread,
910 		 * because the run is marked as allocated, thus protecting it
911 		 * from being modified by any other thread.  As long as these
912 		 * writes don't perturb the first and last elements'
913 		 * CHUNK_MAP_ALLOCATED bits, behavior is well defined.
914 		 */
915 		for (i = 0; i < npages; i++) {
916 			arena_mapbits_unzeroed_set(chunk, pageind+i,
917 			    flag_unzeroed);
918 		}
919 		npurged += npages;
920 		if (config_stats)
921 			nmadvise++;
922 	}
923 	malloc_mutex_lock(&arena->lock);
924 	if (config_stats)
925 		arena->stats.nmadvise += nmadvise;
926 
927 	return (npurged);
928 }
929 
930 static void
931 arena_chunk_unstash_purged(arena_t *arena, arena_chunk_t *chunk,
932     arena_chunk_mapelms_t *mapelms)
933 {
934 	arena_chunk_map_t *mapelm;
935 	size_t pageind;
936 
937 	/* Deallocate runs. */
938 	for (mapelm = ql_first(mapelms); mapelm != NULL;
939 	    mapelm = ql_first(mapelms)) {
940 		arena_run_t *run;
941 
942 		pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) /
943 		    sizeof(arena_chunk_map_t)) + map_bias;
944 		run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)(pageind <<
945 		    LG_PAGE));
946 		ql_remove(mapelms, mapelm, u.ql_link);
947 		arena_run_dalloc(arena, run, false, true);
948 	}
949 }
950 
951 static inline size_t
952 arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all)
953 {
954 	size_t npurged;
955 	arena_chunk_mapelms_t mapelms;
956 
957 	ql_new(&mapelms);
958 
959 	/*
960 	 * If chunk is the spare, temporarily re-allocate it, 1) so that its
961 	 * run is reinserted into runs_avail, and 2) so that it cannot be
962 	 * completely discarded by another thread while arena->lock is dropped
963 	 * by this thread.  Note that the arena_run_dalloc() call will
964 	 * implicitly deallocate the chunk, so no explicit action is required
965 	 * in this function to deallocate the chunk.
966 	 *
967 	 * Note that once a chunk contains dirty pages, it cannot again contain
968 	 * a single run unless 1) it is a dirty run, or 2) this function purges
969 	 * dirty pages and causes the transition to a single clean run.  Thus
970 	 * (chunk == arena->spare) is possible, but it is not possible for
971 	 * this function to be called on the spare unless it contains a dirty
972 	 * run.
973 	 */
974 	if (chunk == arena->spare) {
975 		assert(arena_mapbits_dirty_get(chunk, map_bias) != 0);
976 		assert(arena_mapbits_dirty_get(chunk, chunk_npages-1) != 0);
977 
978 		arena_chunk_alloc(arena);
979 	}
980 
981 	if (config_stats)
982 		arena->stats.purged += chunk->ndirty;
983 
984 	/*
985 	 * Operate on all dirty runs if there is no clean/dirty run
986 	 * fragmentation.
987 	 */
988 	if (chunk->nruns_adjac == 0)
989 		all = true;
990 
991 	arena_chunk_stash_dirty(arena, chunk, all, &mapelms);
992 	npurged = arena_chunk_purge_stashed(arena, chunk, &mapelms);
993 	arena_chunk_unstash_purged(arena, chunk, &mapelms);
994 
995 	return (npurged);
996 }
997 
998 static void
999 arena_purge(arena_t *arena, bool all)
1000 {
1001 	arena_chunk_t *chunk;
1002 	size_t npurgatory;
1003 	if (config_debug) {
1004 		size_t ndirty = 0;
1005 
1006 		arena_chunk_dirty_iter(&arena->chunks_dirty, NULL,
1007 		    chunks_dirty_iter_cb, (void *)&ndirty);
1008 		assert(ndirty == arena->ndirty);
1009 	}
1010 	assert(arena->ndirty > arena->npurgatory || all);
1011 	assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty -
1012 	    arena->npurgatory) || all);
1013 
1014 	if (config_stats)
1015 		arena->stats.npurge++;
1016 
1017 	/*
1018 	 * Add the minimum number of pages this thread should try to purge to
1019 	 * arena->npurgatory.  This will keep multiple threads from racing to
1020 	 * reduce ndirty below the threshold.
1021 	 */
1022 	npurgatory = arena_compute_npurgatory(arena, all);
1023 	arena->npurgatory += npurgatory;
1024 
1025 	while (npurgatory > 0) {
1026 		size_t npurgeable, npurged, nunpurged;
1027 
1028 		/* Get next chunk with dirty pages. */
1029 		chunk = arena_chunk_dirty_first(&arena->chunks_dirty);
1030 		if (chunk == NULL) {
1031 			/*
1032 			 * This thread was unable to purge as many pages as
1033 			 * originally intended, due to races with other threads
1034 			 * that either did some of the purging work, or re-used
1035 			 * dirty pages.
1036 			 */
1037 			arena->npurgatory -= npurgatory;
1038 			return;
1039 		}
1040 		npurgeable = chunk->ndirty;
1041 		assert(npurgeable != 0);
1042 
1043 		if (npurgeable > npurgatory && chunk->nruns_adjac == 0) {
1044 			/*
1045 			 * This thread will purge all the dirty pages in chunk,
1046 			 * so set npurgatory to reflect this thread's intent to
1047 			 * purge the pages.  This tends to reduce the chances
1048 			 * of the following scenario:
1049 			 *
1050 			 * 1) This thread sets arena->npurgatory such that
1051 			 *    (arena->ndirty - arena->npurgatory) is at the
1052 			 *    threshold.
1053 			 * 2) This thread drops arena->lock.
1054 			 * 3) Another thread causes one or more pages to be
1055 			 *    dirtied, and immediately determines that it must
1056 			 *    purge dirty pages.
1057 			 *
1058 			 * If this scenario *does* play out, that's okay,
1059 			 * because all of the purging work being done really
1060 			 * needs to happen.
1061 			 */
1062 			arena->npurgatory += npurgeable - npurgatory;
1063 			npurgatory = npurgeable;
1064 		}
1065 
1066 		/*
1067 		 * Keep track of how many pages are purgeable, versus how many
1068 		 * actually get purged, and adjust counters accordingly.
1069 		 */
1070 		arena->npurgatory -= npurgeable;
1071 		npurgatory -= npurgeable;
1072 		npurged = arena_chunk_purge(arena, chunk, all);
1073 		nunpurged = npurgeable - npurged;
1074 		arena->npurgatory += nunpurged;
1075 		npurgatory += nunpurged;
1076 	}
1077 }
1078 
1079 void
1080 arena_purge_all(arena_t *arena)
1081 {
1082 
1083 	malloc_mutex_lock(&arena->lock);
1084 	arena_purge(arena, true);
1085 	malloc_mutex_unlock(&arena->lock);
1086 }
1087 
1088 static void
1089 arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size,
1090     size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty)
1091 {
1092 	size_t size = *p_size;
1093 	size_t run_ind = *p_run_ind;
1094 	size_t run_pages = *p_run_pages;
1095 
1096 	/* Try to coalesce forward. */
1097 	if (run_ind + run_pages < chunk_npages &&
1098 	    arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 &&
1099 	    arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) {
1100 		size_t nrun_size = arena_mapbits_unallocated_size_get(chunk,
1101 		    run_ind+run_pages);
1102 		size_t nrun_pages = nrun_size >> LG_PAGE;
1103 
1104 		/*
1105 		 * Remove successor from runs_avail; the coalesced run is
1106 		 * inserted later.
1107 		 */
1108 		assert(arena_mapbits_unallocated_size_get(chunk,
1109 		    run_ind+run_pages+nrun_pages-1) == nrun_size);
1110 		assert(arena_mapbits_dirty_get(chunk,
1111 		    run_ind+run_pages+nrun_pages-1) == flag_dirty);
1112 		arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages,
1113 		    false, true);
1114 
1115 		size += nrun_size;
1116 		run_pages += nrun_pages;
1117 
1118 		arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1119 		arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1120 		    size);
1121 	}
1122 
1123 	/* Try to coalesce backward. */
1124 	if (run_ind > map_bias && arena_mapbits_allocated_get(chunk,
1125 	    run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) ==
1126 	    flag_dirty) {
1127 		size_t prun_size = arena_mapbits_unallocated_size_get(chunk,
1128 		    run_ind-1);
1129 		size_t prun_pages = prun_size >> LG_PAGE;
1130 
1131 		run_ind -= prun_pages;
1132 
1133 		/*
1134 		 * Remove predecessor from runs_avail; the coalesced run is
1135 		 * inserted later.
1136 		 */
1137 		assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1138 		    prun_size);
1139 		assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty);
1140 		arena_avail_remove(arena, chunk, run_ind, prun_pages, true,
1141 		    false);
1142 
1143 		size += prun_size;
1144 		run_pages += prun_pages;
1145 
1146 		arena_mapbits_unallocated_size_set(chunk, run_ind, size);
1147 		arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1,
1148 		    size);
1149 	}
1150 
1151 	*p_size = size;
1152 	*p_run_ind = run_ind;
1153 	*p_run_pages = run_pages;
1154 }
1155 
1156 static void
1157 arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned)
1158 {
1159 	arena_chunk_t *chunk;
1160 	size_t size, run_ind, run_pages, flag_dirty;
1161 
1162 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1163 	run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
1164 	assert(run_ind >= map_bias);
1165 	assert(run_ind < chunk_npages);
1166 	if (arena_mapbits_large_get(chunk, run_ind) != 0) {
1167 		size = arena_mapbits_large_size_get(chunk, run_ind);
1168 		assert(size == PAGE ||
1169 		    arena_mapbits_large_size_get(chunk,
1170 		    run_ind+(size>>LG_PAGE)-1) == 0);
1171 	} else {
1172 		size_t binind = arena_bin_index(arena, run->bin);
1173 		arena_bin_info_t *bin_info = &arena_bin_info[binind];
1174 		size = bin_info->run_size;
1175 	}
1176 	run_pages = (size >> LG_PAGE);
1177 	arena_cactive_update(arena, 0, run_pages);
1178 	arena->nactive -= run_pages;
1179 
1180 	/*
1181 	 * The run is dirty if the caller claims to have dirtied it, as well as
1182 	 * if it was already dirty before being allocated and the caller
1183 	 * doesn't claim to have cleaned it.
1184 	 */
1185 	assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1186 	    arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
1187 	if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0)
1188 		dirty = true;
1189 	flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0;
1190 
1191 	/* Mark pages as unallocated in the chunk map. */
1192 	if (dirty) {
1193 		arena_mapbits_unallocated_set(chunk, run_ind, size,
1194 		    CHUNK_MAP_DIRTY);
1195 		arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
1196 		    CHUNK_MAP_DIRTY);
1197 	} else {
1198 		arena_mapbits_unallocated_set(chunk, run_ind, size,
1199 		    arena_mapbits_unzeroed_get(chunk, run_ind));
1200 		arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size,
1201 		    arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1));
1202 	}
1203 
1204 	arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages,
1205 	    flag_dirty);
1206 
1207 	/* Insert into runs_avail, now that coalescing is complete. */
1208 	assert(arena_mapbits_unallocated_size_get(chunk, run_ind) ==
1209 	    arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1));
1210 	assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1211 	    arena_mapbits_dirty_get(chunk, run_ind+run_pages-1));
1212 	arena_avail_insert(arena, chunk, run_ind, run_pages, true, true);
1213 
1214 	/* Deallocate chunk if it is now completely unused. */
1215 	if (size == arena_maxclass) {
1216 		assert(run_ind == map_bias);
1217 		assert(run_pages == (arena_maxclass >> LG_PAGE));
1218 		arena_chunk_dealloc(arena, chunk);
1219 	}
1220 
1221 	/*
1222 	 * It is okay to do dirty page processing here even if the chunk was
1223 	 * deallocated above, since in that case it is the spare.  Waiting
1224 	 * until after possible chunk deallocation to do dirty processing
1225 	 * allows for an old spare to be fully deallocated, thus decreasing the
1226 	 * chances of spuriously crossing the dirty page purging threshold.
1227 	 */
1228 	if (dirty)
1229 		arena_maybe_purge(arena);
1230 }
1231 
1232 static void
1233 arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1234     size_t oldsize, size_t newsize)
1235 {
1236 	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1237 	size_t head_npages = (oldsize - newsize) >> LG_PAGE;
1238 	size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
1239 
1240 	assert(oldsize > newsize);
1241 
1242 	/*
1243 	 * Update the chunk map so that arena_run_dalloc() can treat the
1244 	 * leading run as separately allocated.  Set the last element of each
1245 	 * run first, in case of single-page runs.
1246 	 */
1247 	assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
1248 	arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
1249 	arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty);
1250 
1251 	if (config_debug) {
1252 		UNUSED size_t tail_npages = newsize >> LG_PAGE;
1253 		assert(arena_mapbits_large_size_get(chunk,
1254 		    pageind+head_npages+tail_npages-1) == 0);
1255 		assert(arena_mapbits_dirty_get(chunk,
1256 		    pageind+head_npages+tail_npages-1) == flag_dirty);
1257 	}
1258 	arena_mapbits_large_set(chunk, pageind+head_npages, newsize,
1259 	    flag_dirty);
1260 
1261 	arena_run_dalloc(arena, run, false, false);
1262 }
1263 
1264 static void
1265 arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1266     size_t oldsize, size_t newsize, bool dirty)
1267 {
1268 	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1269 	size_t head_npages = newsize >> LG_PAGE;
1270 	size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind);
1271 
1272 	assert(oldsize > newsize);
1273 
1274 	/*
1275 	 * Update the chunk map so that arena_run_dalloc() can treat the
1276 	 * trailing run as separately allocated.  Set the last element of each
1277 	 * run first, in case of single-page runs.
1278 	 */
1279 	assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize);
1280 	arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty);
1281 	arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty);
1282 
1283 	if (config_debug) {
1284 		UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE;
1285 		assert(arena_mapbits_large_size_get(chunk,
1286 		    pageind+head_npages+tail_npages-1) == 0);
1287 		assert(arena_mapbits_dirty_get(chunk,
1288 		    pageind+head_npages+tail_npages-1) == flag_dirty);
1289 	}
1290 	arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize,
1291 	    flag_dirty);
1292 
1293 	arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize),
1294 	    dirty, false);
1295 }
1296 
1297 static arena_run_t *
1298 arena_bin_runs_first(arena_bin_t *bin)
1299 {
1300 	arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs);
1301 	if (mapelm != NULL) {
1302 		arena_chunk_t *chunk;
1303 		size_t pageind;
1304 		arena_run_t *run;
1305 
1306 		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm);
1307 		pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) /
1308 		    sizeof(arena_chunk_map_t))) + map_bias;
1309 		run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
1310 		    arena_mapbits_small_runind_get(chunk, pageind)) <<
1311 		    LG_PAGE));
1312 		return (run);
1313 	}
1314 
1315 	return (NULL);
1316 }
1317 
1318 static void
1319 arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run)
1320 {
1321 	arena_chunk_t *chunk = CHUNK_ADDR2BASE(run);
1322 	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1323 	arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
1324 
1325 	assert(arena_run_tree_search(&bin->runs, mapelm) == NULL);
1326 
1327 	arena_run_tree_insert(&bin->runs, mapelm);
1328 }
1329 
1330 static void
1331 arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run)
1332 {
1333 	arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1334 	size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE;
1335 	arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind);
1336 
1337 	assert(arena_run_tree_search(&bin->runs, mapelm) != NULL);
1338 
1339 	arena_run_tree_remove(&bin->runs, mapelm);
1340 }
1341 
1342 static arena_run_t *
1343 arena_bin_nonfull_run_tryget(arena_bin_t *bin)
1344 {
1345 	arena_run_t *run = arena_bin_runs_first(bin);
1346 	if (run != NULL) {
1347 		arena_bin_runs_remove(bin, run);
1348 		if (config_stats)
1349 			bin->stats.reruns++;
1350 	}
1351 	return (run);
1352 }
1353 
1354 static arena_run_t *
1355 arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin)
1356 {
1357 	arena_run_t *run;
1358 	size_t binind;
1359 	arena_bin_info_t *bin_info;
1360 
1361 	/* Look for a usable run. */
1362 	run = arena_bin_nonfull_run_tryget(bin);
1363 	if (run != NULL)
1364 		return (run);
1365 	/* No existing runs have any space available. */
1366 
1367 	binind = arena_bin_index(arena, bin);
1368 	bin_info = &arena_bin_info[binind];
1369 
1370 	/* Allocate a new run. */
1371 	malloc_mutex_unlock(&bin->lock);
1372 	/******************************/
1373 	malloc_mutex_lock(&arena->lock);
1374 	run = arena_run_alloc_small(arena, bin_info->run_size, binind);
1375 	if (run != NULL) {
1376 		bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run +
1377 		    (uintptr_t)bin_info->bitmap_offset);
1378 
1379 		/* Initialize run internals. */
1380 		run->bin = bin;
1381 		run->nextind = 0;
1382 		run->nfree = bin_info->nregs;
1383 		bitmap_init(bitmap, &bin_info->bitmap_info);
1384 	}
1385 	malloc_mutex_unlock(&arena->lock);
1386 	/********************************/
1387 	malloc_mutex_lock(&bin->lock);
1388 	if (run != NULL) {
1389 		if (config_stats) {
1390 			bin->stats.nruns++;
1391 			bin->stats.curruns++;
1392 		}
1393 		return (run);
1394 	}
1395 
1396 	/*
1397 	 * arena_run_alloc_small() failed, but another thread may have made
1398 	 * sufficient memory available while this one dropped bin->lock above,
1399 	 * so search one more time.
1400 	 */
1401 	run = arena_bin_nonfull_run_tryget(bin);
1402 	if (run != NULL)
1403 		return (run);
1404 
1405 	return (NULL);
1406 }
1407 
1408 /* Re-fill bin->runcur, then call arena_run_reg_alloc(). */
1409 static void *
1410 arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin)
1411 {
1412 	void *ret;
1413 	size_t binind;
1414 	arena_bin_info_t *bin_info;
1415 	arena_run_t *run;
1416 
1417 	binind = arena_bin_index(arena, bin);
1418 	bin_info = &arena_bin_info[binind];
1419 	bin->runcur = NULL;
1420 	run = arena_bin_nonfull_run_get(arena, bin);
1421 	if (bin->runcur != NULL && bin->runcur->nfree > 0) {
1422 		/*
1423 		 * Another thread updated runcur while this one ran without the
1424 		 * bin lock in arena_bin_nonfull_run_get().
1425 		 */
1426 		assert(bin->runcur->nfree > 0);
1427 		ret = arena_run_reg_alloc(bin->runcur, bin_info);
1428 		if (run != NULL) {
1429 			arena_chunk_t *chunk;
1430 
1431 			/*
1432 			 * arena_run_alloc_small() may have allocated run, or
1433 			 * it may have pulled run from the bin's run tree.
1434 			 * Therefore it is unsafe to make any assumptions about
1435 			 * how run has previously been used, and
1436 			 * arena_bin_lower_run() must be called, as if a region
1437 			 * were just deallocated from the run.
1438 			 */
1439 			chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1440 			if (run->nfree == bin_info->nregs)
1441 				arena_dalloc_bin_run(arena, chunk, run, bin);
1442 			else
1443 				arena_bin_lower_run(arena, chunk, run, bin);
1444 		}
1445 		return (ret);
1446 	}
1447 
1448 	if (run == NULL)
1449 		return (NULL);
1450 
1451 	bin->runcur = run;
1452 
1453 	assert(bin->runcur->nfree > 0);
1454 
1455 	return (arena_run_reg_alloc(bin->runcur, bin_info));
1456 }
1457 
1458 void
1459 arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind,
1460     uint64_t prof_accumbytes)
1461 {
1462 	unsigned i, nfill;
1463 	arena_bin_t *bin;
1464 	arena_run_t *run;
1465 	void *ptr;
1466 
1467 	assert(tbin->ncached == 0);
1468 
1469 	if (config_prof && arena_prof_accum(arena, prof_accumbytes))
1470 		prof_idump();
1471 	bin = &arena->bins[binind];
1472 	malloc_mutex_lock(&bin->lock);
1473 	for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >>
1474 	    tbin->lg_fill_div); i < nfill; i++) {
1475 		if ((run = bin->runcur) != NULL && run->nfree > 0)
1476 			ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]);
1477 		else
1478 			ptr = arena_bin_malloc_hard(arena, bin);
1479 		if (ptr == NULL)
1480 			break;
1481 		if (config_fill && opt_junk) {
1482 			arena_alloc_junk_small(ptr, &arena_bin_info[binind],
1483 			    true);
1484 		}
1485 		/* Insert such that low regions get used first. */
1486 		tbin->avail[nfill - 1 - i] = ptr;
1487 	}
1488 	if (config_stats) {
1489 		bin->stats.allocated += i * arena_bin_info[binind].reg_size;
1490 		bin->stats.nmalloc += i;
1491 		bin->stats.nrequests += tbin->tstats.nrequests;
1492 		bin->stats.nfills++;
1493 		tbin->tstats.nrequests = 0;
1494 	}
1495 	malloc_mutex_unlock(&bin->lock);
1496 	tbin->ncached = i;
1497 }
1498 
1499 void
1500 arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero)
1501 {
1502 
1503 	if (zero) {
1504 		size_t redzone_size = bin_info->redzone_size;
1505 		memset((void *)((uintptr_t)ptr - redzone_size), 0xa5,
1506 		    redzone_size);
1507 		memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5,
1508 		    redzone_size);
1509 	} else {
1510 		memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5,
1511 		    bin_info->reg_interval);
1512 	}
1513 }
1514 
1515 #ifdef JEMALLOC_JET
1516 #undef arena_redzone_corruption
1517 #define	arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl)
1518 #endif
1519 static void
1520 arena_redzone_corruption(void *ptr, size_t usize, bool after,
1521     size_t offset, uint8_t byte)
1522 {
1523 
1524 	malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p "
1525 	    "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s",
1526 	    after ? "after" : "before", ptr, usize, byte);
1527 }
1528 #ifdef JEMALLOC_JET
1529 #undef arena_redzone_corruption
1530 #define	arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption)
1531 arena_redzone_corruption_t *arena_redzone_corruption =
1532     JEMALLOC_N(arena_redzone_corruption_impl);
1533 #endif
1534 
1535 static void
1536 arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset)
1537 {
1538 	size_t size = bin_info->reg_size;
1539 	size_t redzone_size = bin_info->redzone_size;
1540 	size_t i;
1541 	bool error = false;
1542 
1543 	for (i = 1; i <= redzone_size; i++) {
1544 		uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i);
1545 		if (*byte != 0xa5) {
1546 			error = true;
1547 			arena_redzone_corruption(ptr, size, false, i, *byte);
1548 			if (reset)
1549 				*byte = 0xa5;
1550 		}
1551 	}
1552 	for (i = 0; i < redzone_size; i++) {
1553 		uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i);
1554 		if (*byte != 0xa5) {
1555 			error = true;
1556 			arena_redzone_corruption(ptr, size, true, i, *byte);
1557 			if (reset)
1558 				*byte = 0xa5;
1559 		}
1560 	}
1561 	if (opt_abort && error)
1562 		abort();
1563 }
1564 
1565 #ifdef JEMALLOC_JET
1566 #undef arena_dalloc_junk_small
1567 #define	arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl)
1568 #endif
1569 void
1570 arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info)
1571 {
1572 	size_t redzone_size = bin_info->redzone_size;
1573 
1574 	arena_redzones_validate(ptr, bin_info, false);
1575 	memset((void *)((uintptr_t)ptr - redzone_size), 0x5a,
1576 	    bin_info->reg_interval);
1577 }
1578 #ifdef JEMALLOC_JET
1579 #undef arena_dalloc_junk_small
1580 #define	arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small)
1581 arena_dalloc_junk_small_t *arena_dalloc_junk_small =
1582     JEMALLOC_N(arena_dalloc_junk_small_impl);
1583 #endif
1584 
1585 void
1586 arena_quarantine_junk_small(void *ptr, size_t usize)
1587 {
1588 	size_t binind;
1589 	arena_bin_info_t *bin_info;
1590 	cassert(config_fill);
1591 	assert(opt_junk);
1592 	assert(opt_quarantine);
1593 	assert(usize <= SMALL_MAXCLASS);
1594 
1595 	binind = SMALL_SIZE2BIN(usize);
1596 	bin_info = &arena_bin_info[binind];
1597 	arena_redzones_validate(ptr, bin_info, true);
1598 }
1599 
1600 void *
1601 arena_malloc_small(arena_t *arena, size_t size, bool zero)
1602 {
1603 	void *ret;
1604 	arena_bin_t *bin;
1605 	arena_run_t *run;
1606 	size_t binind;
1607 
1608 	binind = SMALL_SIZE2BIN(size);
1609 	assert(binind < NBINS);
1610 	bin = &arena->bins[binind];
1611 	size = arena_bin_info[binind].reg_size;
1612 
1613 	malloc_mutex_lock(&bin->lock);
1614 	if ((run = bin->runcur) != NULL && run->nfree > 0)
1615 		ret = arena_run_reg_alloc(run, &arena_bin_info[binind]);
1616 	else
1617 		ret = arena_bin_malloc_hard(arena, bin);
1618 
1619 	if (ret == NULL) {
1620 		malloc_mutex_unlock(&bin->lock);
1621 		return (NULL);
1622 	}
1623 
1624 	if (config_stats) {
1625 		bin->stats.allocated += size;
1626 		bin->stats.nmalloc++;
1627 		bin->stats.nrequests++;
1628 	}
1629 	malloc_mutex_unlock(&bin->lock);
1630 	if (config_prof && isthreaded == false && arena_prof_accum(arena, size))
1631 		prof_idump();
1632 
1633 	if (zero == false) {
1634 		if (config_fill) {
1635 			if (opt_junk) {
1636 				arena_alloc_junk_small(ret,
1637 				    &arena_bin_info[binind], false);
1638 			} else if (opt_zero)
1639 				memset(ret, 0, size);
1640 		}
1641 		VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
1642 	} else {
1643 		if (config_fill && opt_junk) {
1644 			arena_alloc_junk_small(ret, &arena_bin_info[binind],
1645 			    true);
1646 		}
1647 		VALGRIND_MAKE_MEM_UNDEFINED(ret, size);
1648 		memset(ret, 0, size);
1649 	}
1650 
1651 	return (ret);
1652 }
1653 
1654 void *
1655 arena_malloc_large(arena_t *arena, size_t size, bool zero)
1656 {
1657 	void *ret;
1658 	UNUSED bool idump;
1659 
1660 	/* Large allocation. */
1661 	size = PAGE_CEILING(size);
1662 	malloc_mutex_lock(&arena->lock);
1663 	ret = (void *)arena_run_alloc_large(arena, size, zero);
1664 	if (ret == NULL) {
1665 		malloc_mutex_unlock(&arena->lock);
1666 		return (NULL);
1667 	}
1668 	if (config_stats) {
1669 		arena->stats.nmalloc_large++;
1670 		arena->stats.nrequests_large++;
1671 		arena->stats.allocated_large += size;
1672 		arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1673 		arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1674 		arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1675 	}
1676 	if (config_prof)
1677 		idump = arena_prof_accum_locked(arena, size);
1678 	malloc_mutex_unlock(&arena->lock);
1679 	if (config_prof && idump)
1680 		prof_idump();
1681 
1682 	if (zero == false) {
1683 		if (config_fill) {
1684 			if (opt_junk)
1685 				memset(ret, 0xa5, size);
1686 			else if (opt_zero)
1687 				memset(ret, 0, size);
1688 		}
1689 	}
1690 
1691 	return (ret);
1692 }
1693 
1694 /* Only handles large allocations that require more than page alignment. */
1695 void *
1696 arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero)
1697 {
1698 	void *ret;
1699 	size_t alloc_size, leadsize, trailsize;
1700 	arena_run_t *run;
1701 	arena_chunk_t *chunk;
1702 
1703 	assert((size & PAGE_MASK) == 0);
1704 
1705 	alignment = PAGE_CEILING(alignment);
1706 	alloc_size = size + alignment - PAGE;
1707 
1708 	malloc_mutex_lock(&arena->lock);
1709 	run = arena_run_alloc_large(arena, alloc_size, false);
1710 	if (run == NULL) {
1711 		malloc_mutex_unlock(&arena->lock);
1712 		return (NULL);
1713 	}
1714 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run);
1715 
1716 	leadsize = ALIGNMENT_CEILING((uintptr_t)run, alignment) -
1717 	    (uintptr_t)run;
1718 	assert(alloc_size >= leadsize + size);
1719 	trailsize = alloc_size - leadsize - size;
1720 	ret = (void *)((uintptr_t)run + leadsize);
1721 	if (leadsize != 0) {
1722 		arena_run_trim_head(arena, chunk, run, alloc_size, alloc_size -
1723 		    leadsize);
1724 	}
1725 	if (trailsize != 0) {
1726 		arena_run_trim_tail(arena, chunk, ret, size + trailsize, size,
1727 		    false);
1728 	}
1729 	arena_run_init_large(arena, (arena_run_t *)ret, size, zero);
1730 
1731 	if (config_stats) {
1732 		arena->stats.nmalloc_large++;
1733 		arena->stats.nrequests_large++;
1734 		arena->stats.allocated_large += size;
1735 		arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
1736 		arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
1737 		arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
1738 	}
1739 	malloc_mutex_unlock(&arena->lock);
1740 
1741 	if (config_fill && zero == false) {
1742 		if (opt_junk)
1743 			memset(ret, 0xa5, size);
1744 		else if (opt_zero)
1745 			memset(ret, 0, size);
1746 	}
1747 	return (ret);
1748 }
1749 
1750 void
1751 arena_prof_promoted(const void *ptr, size_t size)
1752 {
1753 	arena_chunk_t *chunk;
1754 	size_t pageind, binind;
1755 
1756 	cassert(config_prof);
1757 	assert(ptr != NULL);
1758 	assert(CHUNK_ADDR2BASE(ptr) != ptr);
1759 	assert(isalloc(ptr, false) == PAGE);
1760 	assert(isalloc(ptr, true) == PAGE);
1761 	assert(size <= SMALL_MAXCLASS);
1762 
1763 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1764 	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1765 	binind = SMALL_SIZE2BIN(size);
1766 	assert(binind < NBINS);
1767 	arena_mapbits_large_binind_set(chunk, pageind, binind);
1768 
1769 	assert(isalloc(ptr, false) == PAGE);
1770 	assert(isalloc(ptr, true) == size);
1771 }
1772 
1773 static void
1774 arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run,
1775     arena_bin_t *bin)
1776 {
1777 
1778 	/* Dissociate run from bin. */
1779 	if (run == bin->runcur)
1780 		bin->runcur = NULL;
1781 	else {
1782 		size_t binind = arena_bin_index(chunk->arena, bin);
1783 		arena_bin_info_t *bin_info = &arena_bin_info[binind];
1784 
1785 		if (bin_info->nregs != 1) {
1786 			/*
1787 			 * This block's conditional is necessary because if the
1788 			 * run only contains one region, then it never gets
1789 			 * inserted into the non-full runs tree.
1790 			 */
1791 			arena_bin_runs_remove(bin, run);
1792 		}
1793 	}
1794 }
1795 
1796 static void
1797 arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1798     arena_bin_t *bin)
1799 {
1800 	size_t binind;
1801 	arena_bin_info_t *bin_info;
1802 	size_t npages, run_ind, past;
1803 
1804 	assert(run != bin->runcur);
1805 	assert(arena_run_tree_search(&bin->runs,
1806 	    arena_mapp_get(chunk, ((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE))
1807 	    == NULL);
1808 
1809 	binind = arena_bin_index(chunk->arena, run->bin);
1810 	bin_info = &arena_bin_info[binind];
1811 
1812 	malloc_mutex_unlock(&bin->lock);
1813 	/******************************/
1814 	npages = bin_info->run_size >> LG_PAGE;
1815 	run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE);
1816 	past = (size_t)(PAGE_CEILING((uintptr_t)run +
1817 	    (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind *
1818 	    bin_info->reg_interval - bin_info->redzone_size) -
1819 	    (uintptr_t)chunk) >> LG_PAGE);
1820 	malloc_mutex_lock(&arena->lock);
1821 
1822 	/*
1823 	 * If the run was originally clean, and some pages were never touched,
1824 	 * trim the clean pages before deallocating the dirty portion of the
1825 	 * run.
1826 	 */
1827 	assert(arena_mapbits_dirty_get(chunk, run_ind) ==
1828 	    arena_mapbits_dirty_get(chunk, run_ind+npages-1));
1829 	if (arena_mapbits_dirty_get(chunk, run_ind) == 0 && past - run_ind <
1830 	    npages) {
1831 		/* Trim clean pages.  Convert to large run beforehand. */
1832 		assert(npages > 0);
1833 		arena_mapbits_large_set(chunk, run_ind, bin_info->run_size, 0);
1834 		arena_mapbits_large_set(chunk, run_ind+npages-1, 0, 0);
1835 		arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE),
1836 		    ((past - run_ind) << LG_PAGE), false);
1837 		/* npages = past - run_ind; */
1838 	}
1839 	arena_run_dalloc(arena, run, true, false);
1840 	malloc_mutex_unlock(&arena->lock);
1841 	/****************************/
1842 	malloc_mutex_lock(&bin->lock);
1843 	if (config_stats)
1844 		bin->stats.curruns--;
1845 }
1846 
1847 static void
1848 arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run,
1849     arena_bin_t *bin)
1850 {
1851 
1852 	/*
1853 	 * Make sure that if bin->runcur is non-NULL, it refers to the lowest
1854 	 * non-full run.  It is okay to NULL runcur out rather than proactively
1855 	 * keeping it pointing at the lowest non-full run.
1856 	 */
1857 	if ((uintptr_t)run < (uintptr_t)bin->runcur) {
1858 		/* Switch runcur. */
1859 		if (bin->runcur->nfree > 0)
1860 			arena_bin_runs_insert(bin, bin->runcur);
1861 		bin->runcur = run;
1862 		if (config_stats)
1863 			bin->stats.reruns++;
1864 	} else
1865 		arena_bin_runs_insert(bin, run);
1866 }
1867 
1868 void
1869 arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1870     arena_chunk_map_t *mapelm)
1871 {
1872 	size_t pageind;
1873 	arena_run_t *run;
1874 	arena_bin_t *bin;
1875 	arena_bin_info_t *bin_info;
1876 	size_t size, binind;
1877 
1878 	pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1879 	run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
1880 	    arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
1881 	bin = run->bin;
1882 	binind = arena_ptr_small_binind_get(ptr, mapelm->bits);
1883 	bin_info = &arena_bin_info[binind];
1884 	if (config_fill || config_stats)
1885 		size = bin_info->reg_size;
1886 
1887 	if (config_fill && opt_junk)
1888 		arena_dalloc_junk_small(ptr, bin_info);
1889 
1890 	arena_run_reg_dalloc(run, ptr);
1891 	if (run->nfree == bin_info->nregs) {
1892 		arena_dissociate_bin_run(chunk, run, bin);
1893 		arena_dalloc_bin_run(arena, chunk, run, bin);
1894 	} else if (run->nfree == 1 && run != bin->runcur)
1895 		arena_bin_lower_run(arena, chunk, run, bin);
1896 
1897 	if (config_stats) {
1898 		bin->stats.allocated -= size;
1899 		bin->stats.ndalloc++;
1900 	}
1901 }
1902 
1903 void
1904 arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1905     size_t pageind, arena_chunk_map_t *mapelm)
1906 {
1907 	arena_run_t *run;
1908 	arena_bin_t *bin;
1909 
1910 	run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind -
1911 	    arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE));
1912 	bin = run->bin;
1913 	malloc_mutex_lock(&bin->lock);
1914 	arena_dalloc_bin_locked(arena, chunk, ptr, mapelm);
1915 	malloc_mutex_unlock(&bin->lock);
1916 }
1917 
1918 void
1919 arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1920     size_t pageind)
1921 {
1922 	arena_chunk_map_t *mapelm;
1923 
1924 	if (config_debug) {
1925 		/* arena_ptr_small_binind_get() does extra sanity checking. */
1926 		assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk,
1927 		    pageind)) != BININD_INVALID);
1928 	}
1929 	mapelm = arena_mapp_get(chunk, pageind);
1930 	arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm);
1931 }
1932 
1933 #ifdef JEMALLOC_JET
1934 #undef arena_dalloc_junk_large
1935 #define	arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl)
1936 #endif
1937 static void
1938 arena_dalloc_junk_large(void *ptr, size_t usize)
1939 {
1940 
1941 	if (config_fill && opt_junk)
1942 		memset(ptr, 0x5a, usize);
1943 }
1944 #ifdef JEMALLOC_JET
1945 #undef arena_dalloc_junk_large
1946 #define	arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large)
1947 arena_dalloc_junk_large_t *arena_dalloc_junk_large =
1948     JEMALLOC_N(arena_dalloc_junk_large_impl);
1949 #endif
1950 
1951 void
1952 arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr)
1953 {
1954 
1955 	if (config_fill || config_stats) {
1956 		size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
1957 		size_t usize = arena_mapbits_large_size_get(chunk, pageind);
1958 
1959 		arena_dalloc_junk_large(ptr, usize);
1960 		if (config_stats) {
1961 			arena->stats.ndalloc_large++;
1962 			arena->stats.allocated_large -= usize;
1963 			arena->stats.lstats[(usize >> LG_PAGE) - 1].ndalloc++;
1964 			arena->stats.lstats[(usize >> LG_PAGE) - 1].curruns--;
1965 		}
1966 	}
1967 
1968 	arena_run_dalloc(arena, (arena_run_t *)ptr, true, false);
1969 }
1970 
1971 void
1972 arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr)
1973 {
1974 
1975 	malloc_mutex_lock(&arena->lock);
1976 	arena_dalloc_large_locked(arena, chunk, ptr);
1977 	malloc_mutex_unlock(&arena->lock);
1978 }
1979 
1980 static void
1981 arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr,
1982     size_t oldsize, size_t size)
1983 {
1984 
1985 	assert(size < oldsize);
1986 
1987 	/*
1988 	 * Shrink the run, and make trailing pages available for other
1989 	 * allocations.
1990 	 */
1991 	malloc_mutex_lock(&arena->lock);
1992 	arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size,
1993 	    true);
1994 	if (config_stats) {
1995 		arena->stats.ndalloc_large++;
1996 		arena->stats.allocated_large -= oldsize;
1997 		arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
1998 		arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
1999 
2000 		arena->stats.nmalloc_large++;
2001 		arena->stats.nrequests_large++;
2002 		arena->stats.allocated_large += size;
2003 		arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
2004 		arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
2005 		arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
2006 	}
2007 	malloc_mutex_unlock(&arena->lock);
2008 }
2009 
2010 static bool
2011 arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr,
2012     size_t oldsize, size_t size, size_t extra, bool zero)
2013 {
2014 	size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE;
2015 	size_t npages = oldsize >> LG_PAGE;
2016 	size_t followsize;
2017 
2018 	assert(oldsize == arena_mapbits_large_size_get(chunk, pageind));
2019 
2020 	/* Try to extend the run. */
2021 	assert(size + extra > oldsize);
2022 	malloc_mutex_lock(&arena->lock);
2023 	if (pageind + npages < chunk_npages &&
2024 	    arena_mapbits_allocated_get(chunk, pageind+npages) == 0 &&
2025 	    (followsize = arena_mapbits_unallocated_size_get(chunk,
2026 	    pageind+npages)) >= size - oldsize) {
2027 		/*
2028 		 * The next run is available and sufficiently large.  Split the
2029 		 * following run, then merge the first part with the existing
2030 		 * allocation.
2031 		 */
2032 		size_t flag_dirty;
2033 		size_t splitsize = (oldsize + followsize <= size + extra)
2034 		    ? followsize : size + extra - oldsize;
2035 		arena_run_split_large(arena, (arena_run_t *)((uintptr_t)chunk +
2036 		    ((pageind+npages) << LG_PAGE)), splitsize, zero);
2037 
2038 		size = oldsize + splitsize;
2039 		npages = size >> LG_PAGE;
2040 
2041 		/*
2042 		 * Mark the extended run as dirty if either portion of the run
2043 		 * was dirty before allocation.  This is rather pedantic,
2044 		 * because there's not actually any sequence of events that
2045 		 * could cause the resulting run to be passed to
2046 		 * arena_run_dalloc() with the dirty argument set to false
2047 		 * (which is when dirty flag consistency would really matter).
2048 		 */
2049 		flag_dirty = arena_mapbits_dirty_get(chunk, pageind) |
2050 		    arena_mapbits_dirty_get(chunk, pageind+npages-1);
2051 		arena_mapbits_large_set(chunk, pageind, size, flag_dirty);
2052 		arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty);
2053 
2054 		if (config_stats) {
2055 			arena->stats.ndalloc_large++;
2056 			arena->stats.allocated_large -= oldsize;
2057 			arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++;
2058 			arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--;
2059 
2060 			arena->stats.nmalloc_large++;
2061 			arena->stats.nrequests_large++;
2062 			arena->stats.allocated_large += size;
2063 			arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++;
2064 			arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++;
2065 			arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++;
2066 		}
2067 		malloc_mutex_unlock(&arena->lock);
2068 		return (false);
2069 	}
2070 	malloc_mutex_unlock(&arena->lock);
2071 
2072 	return (true);
2073 }
2074 
2075 #ifdef JEMALLOC_JET
2076 #undef arena_ralloc_junk_large
2077 #define	arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl)
2078 #endif
2079 static void
2080 arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize)
2081 {
2082 
2083 	if (config_fill && opt_junk) {
2084 		memset((void *)((uintptr_t)ptr + usize), 0x5a,
2085 		    old_usize - usize);
2086 	}
2087 }
2088 #ifdef JEMALLOC_JET
2089 #undef arena_ralloc_junk_large
2090 #define	arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large)
2091 arena_ralloc_junk_large_t *arena_ralloc_junk_large =
2092     JEMALLOC_N(arena_ralloc_junk_large_impl);
2093 #endif
2094 
2095 /*
2096  * Try to resize a large allocation, in order to avoid copying.  This will
2097  * always fail if growing an object, and the following run is already in use.
2098  */
2099 static bool
2100 arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra,
2101     bool zero)
2102 {
2103 	size_t psize;
2104 
2105 	psize = PAGE_CEILING(size + extra);
2106 	if (psize == oldsize) {
2107 		/* Same size class. */
2108 		return (false);
2109 	} else {
2110 		arena_chunk_t *chunk;
2111 		arena_t *arena;
2112 
2113 		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
2114 		arena = chunk->arena;
2115 
2116 		if (psize < oldsize) {
2117 			/* Fill before shrinking in order avoid a race. */
2118 			arena_ralloc_junk_large(ptr, oldsize, psize);
2119 			arena_ralloc_large_shrink(arena, chunk, ptr, oldsize,
2120 			    psize);
2121 			return (false);
2122 		} else {
2123 			bool ret = arena_ralloc_large_grow(arena, chunk, ptr,
2124 			    oldsize, PAGE_CEILING(size),
2125 			    psize - PAGE_CEILING(size), zero);
2126 			if (config_fill && ret == false && zero == false) {
2127 				if (opt_junk) {
2128 					memset((void *)((uintptr_t)ptr +
2129 					    oldsize), 0xa5, isalloc(ptr,
2130 					    config_prof) - oldsize);
2131 				} else if (opt_zero) {
2132 					memset((void *)((uintptr_t)ptr +
2133 					    oldsize), 0, isalloc(ptr,
2134 					    config_prof) - oldsize);
2135 				}
2136 			}
2137 			return (ret);
2138 		}
2139 	}
2140 }
2141 
2142 bool
2143 arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra,
2144     bool zero)
2145 {
2146 
2147 	/*
2148 	 * Avoid moving the allocation if the size class can be left the same.
2149 	 */
2150 	if (oldsize <= arena_maxclass) {
2151 		if (oldsize <= SMALL_MAXCLASS) {
2152 			assert(arena_bin_info[SMALL_SIZE2BIN(oldsize)].reg_size
2153 			    == oldsize);
2154 			if ((size + extra <= SMALL_MAXCLASS &&
2155 			    SMALL_SIZE2BIN(size + extra) ==
2156 			    SMALL_SIZE2BIN(oldsize)) || (size <= oldsize &&
2157 			    size + extra >= oldsize))
2158 				return (false);
2159 		} else {
2160 			assert(size <= arena_maxclass);
2161 			if (size + extra > SMALL_MAXCLASS) {
2162 				if (arena_ralloc_large(ptr, oldsize, size,
2163 				    extra, zero) == false)
2164 					return (false);
2165 			}
2166 		}
2167 	}
2168 
2169 	/* Reallocation would require a move. */
2170 	return (true);
2171 }
2172 
2173 void *
2174 arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size,
2175     size_t extra, size_t alignment, bool zero, bool try_tcache_alloc,
2176     bool try_tcache_dalloc)
2177 {
2178 	void *ret;
2179 	size_t copysize;
2180 
2181 	/* Try to avoid moving the allocation. */
2182 	if (arena_ralloc_no_move(ptr, oldsize, size, extra, zero) == false)
2183 		return (ptr);
2184 
2185 	/*
2186 	 * size and oldsize are different enough that we need to move the
2187 	 * object.  In that case, fall back to allocating new space and
2188 	 * copying.
2189 	 */
2190 	if (alignment != 0) {
2191 		size_t usize = sa2u(size + extra, alignment);
2192 		if (usize == 0)
2193 			return (NULL);
2194 		ret = ipalloct(usize, alignment, zero, try_tcache_alloc, arena);
2195 	} else
2196 		ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc);
2197 
2198 	if (ret == NULL) {
2199 		if (extra == 0)
2200 			return (NULL);
2201 		/* Try again, this time without extra. */
2202 		if (alignment != 0) {
2203 			size_t usize = sa2u(size, alignment);
2204 			if (usize == 0)
2205 				return (NULL);
2206 			ret = ipalloct(usize, alignment, zero, try_tcache_alloc,
2207 			    arena);
2208 		} else
2209 			ret = arena_malloc(arena, size, zero, try_tcache_alloc);
2210 
2211 		if (ret == NULL)
2212 			return (NULL);
2213 	}
2214 
2215 	/* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */
2216 
2217 	/*
2218 	 * Copy at most size bytes (not size+extra), since the caller has no
2219 	 * expectation that the extra bytes will be reliably preserved.
2220 	 */
2221 	copysize = (size < oldsize) ? size : oldsize;
2222 	VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize);
2223 	memcpy(ret, ptr, copysize);
2224 	iqalloct(ptr, try_tcache_dalloc);
2225 	return (ret);
2226 }
2227 
2228 dss_prec_t
2229 arena_dss_prec_get(arena_t *arena)
2230 {
2231 	dss_prec_t ret;
2232 
2233 	malloc_mutex_lock(&arena->lock);
2234 	ret = arena->dss_prec;
2235 	malloc_mutex_unlock(&arena->lock);
2236 	return (ret);
2237 }
2238 
2239 void
2240 arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec)
2241 {
2242 
2243 	malloc_mutex_lock(&arena->lock);
2244 	arena->dss_prec = dss_prec;
2245 	malloc_mutex_unlock(&arena->lock);
2246 }
2247 
2248 void
2249 arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive,
2250     size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats,
2251     malloc_large_stats_t *lstats)
2252 {
2253 	unsigned i;
2254 
2255 	malloc_mutex_lock(&arena->lock);
2256 	*dss = dss_prec_names[arena->dss_prec];
2257 	*nactive += arena->nactive;
2258 	*ndirty += arena->ndirty;
2259 
2260 	astats->mapped += arena->stats.mapped;
2261 	astats->npurge += arena->stats.npurge;
2262 	astats->nmadvise += arena->stats.nmadvise;
2263 	astats->purged += arena->stats.purged;
2264 	astats->allocated_large += arena->stats.allocated_large;
2265 	astats->nmalloc_large += arena->stats.nmalloc_large;
2266 	astats->ndalloc_large += arena->stats.ndalloc_large;
2267 	astats->nrequests_large += arena->stats.nrequests_large;
2268 
2269 	for (i = 0; i < nlclasses; i++) {
2270 		lstats[i].nmalloc += arena->stats.lstats[i].nmalloc;
2271 		lstats[i].ndalloc += arena->stats.lstats[i].ndalloc;
2272 		lstats[i].nrequests += arena->stats.lstats[i].nrequests;
2273 		lstats[i].curruns += arena->stats.lstats[i].curruns;
2274 	}
2275 	malloc_mutex_unlock(&arena->lock);
2276 
2277 	for (i = 0; i < NBINS; i++) {
2278 		arena_bin_t *bin = &arena->bins[i];
2279 
2280 		malloc_mutex_lock(&bin->lock);
2281 		bstats[i].allocated += bin->stats.allocated;
2282 		bstats[i].nmalloc += bin->stats.nmalloc;
2283 		bstats[i].ndalloc += bin->stats.ndalloc;
2284 		bstats[i].nrequests += bin->stats.nrequests;
2285 		if (config_tcache) {
2286 			bstats[i].nfills += bin->stats.nfills;
2287 			bstats[i].nflushes += bin->stats.nflushes;
2288 		}
2289 		bstats[i].nruns += bin->stats.nruns;
2290 		bstats[i].reruns += bin->stats.reruns;
2291 		bstats[i].curruns += bin->stats.curruns;
2292 		malloc_mutex_unlock(&bin->lock);
2293 	}
2294 }
2295 
2296 bool
2297 arena_new(arena_t *arena, unsigned ind)
2298 {
2299 	unsigned i;
2300 	arena_bin_t *bin;
2301 
2302 	arena->ind = ind;
2303 	arena->nthreads = 0;
2304 
2305 	if (malloc_mutex_init(&arena->lock))
2306 		return (true);
2307 
2308 	if (config_stats) {
2309 		memset(&arena->stats, 0, sizeof(arena_stats_t));
2310 		arena->stats.lstats =
2311 		    (malloc_large_stats_t *)base_alloc(nlclasses *
2312 		    sizeof(malloc_large_stats_t));
2313 		if (arena->stats.lstats == NULL)
2314 			return (true);
2315 		memset(arena->stats.lstats, 0, nlclasses *
2316 		    sizeof(malloc_large_stats_t));
2317 		if (config_tcache)
2318 			ql_new(&arena->tcache_ql);
2319 	}
2320 
2321 	if (config_prof)
2322 		arena->prof_accumbytes = 0;
2323 
2324 	arena->dss_prec = chunk_dss_prec_get();
2325 
2326 	/* Initialize chunks. */
2327 	arena_chunk_dirty_new(&arena->chunks_dirty);
2328 	arena->spare = NULL;
2329 
2330 	arena->nactive = 0;
2331 	arena->ndirty = 0;
2332 	arena->npurgatory = 0;
2333 
2334 	arena_avail_tree_new(&arena->runs_avail);
2335 
2336 	/* Initialize bins. */
2337 	for (i = 0; i < NBINS; i++) {
2338 		bin = &arena->bins[i];
2339 		if (malloc_mutex_init(&bin->lock))
2340 			return (true);
2341 		bin->runcur = NULL;
2342 		arena_run_tree_new(&bin->runs);
2343 		if (config_stats)
2344 			memset(&bin->stats, 0, sizeof(malloc_bin_stats_t));
2345 	}
2346 
2347 	return (false);
2348 }
2349 
2350 /*
2351  * Calculate bin_info->run_size such that it meets the following constraints:
2352  *
2353  *   *) bin_info->run_size >= min_run_size
2354  *   *) bin_info->run_size <= arena_maxclass
2355  *   *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed).
2356  *   *) bin_info->nregs <= RUN_MAXREGS
2357  *
2358  * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also
2359  * calculated here, since these settings are all interdependent.
2360  */
2361 static size_t
2362 bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size)
2363 {
2364 	size_t pad_size;
2365 	size_t try_run_size, good_run_size;
2366 	uint32_t try_nregs, good_nregs;
2367 	uint32_t try_hdr_size, good_hdr_size;
2368 	uint32_t try_bitmap_offset, good_bitmap_offset;
2369 	uint32_t try_ctx0_offset, good_ctx0_offset;
2370 	uint32_t try_redzone0_offset, good_redzone0_offset;
2371 
2372 	assert(min_run_size >= PAGE);
2373 	assert(min_run_size <= arena_maxclass);
2374 
2375 	/*
2376 	 * Determine redzone size based on minimum alignment and minimum
2377 	 * redzone size.  Add padding to the end of the run if it is needed to
2378 	 * align the regions.  The padding allows each redzone to be half the
2379 	 * minimum alignment; without the padding, each redzone would have to
2380 	 * be twice as large in order to maintain alignment.
2381 	 */
2382 	if (config_fill && opt_redzone) {
2383 		size_t align_min = ZU(1) << (ffs(bin_info->reg_size) - 1);
2384 		if (align_min <= REDZONE_MINSIZE) {
2385 			bin_info->redzone_size = REDZONE_MINSIZE;
2386 			pad_size = 0;
2387 		} else {
2388 			bin_info->redzone_size = align_min >> 1;
2389 			pad_size = bin_info->redzone_size;
2390 		}
2391 	} else {
2392 		bin_info->redzone_size = 0;
2393 		pad_size = 0;
2394 	}
2395 	bin_info->reg_interval = bin_info->reg_size +
2396 	    (bin_info->redzone_size << 1);
2397 
2398 	/*
2399 	 * Calculate known-valid settings before entering the run_size
2400 	 * expansion loop, so that the first part of the loop always copies
2401 	 * valid settings.
2402 	 *
2403 	 * The do..while loop iteratively reduces the number of regions until
2404 	 * the run header and the regions no longer overlap.  A closed formula
2405 	 * would be quite messy, since there is an interdependency between the
2406 	 * header's mask length and the number of regions.
2407 	 */
2408 	try_run_size = min_run_size;
2409 	try_nregs = ((try_run_size - sizeof(arena_run_t)) /
2410 	    bin_info->reg_interval)
2411 	    + 1; /* Counter-act try_nregs-- in loop. */
2412 	if (try_nregs > RUN_MAXREGS) {
2413 		try_nregs = RUN_MAXREGS
2414 		    + 1; /* Counter-act try_nregs-- in loop. */
2415 	}
2416 	do {
2417 		try_nregs--;
2418 		try_hdr_size = sizeof(arena_run_t);
2419 		/* Pad to a long boundary. */
2420 		try_hdr_size = LONG_CEILING(try_hdr_size);
2421 		try_bitmap_offset = try_hdr_size;
2422 		/* Add space for bitmap. */
2423 		try_hdr_size += bitmap_size(try_nregs);
2424 		if (config_prof && opt_prof && prof_promote == false) {
2425 			/* Pad to a quantum boundary. */
2426 			try_hdr_size = QUANTUM_CEILING(try_hdr_size);
2427 			try_ctx0_offset = try_hdr_size;
2428 			/* Add space for one (prof_ctx_t *) per region. */
2429 			try_hdr_size += try_nregs * sizeof(prof_ctx_t *);
2430 		} else
2431 			try_ctx0_offset = 0;
2432 		try_redzone0_offset = try_run_size - (try_nregs *
2433 		    bin_info->reg_interval) - pad_size;
2434 	} while (try_hdr_size > try_redzone0_offset);
2435 
2436 	/* run_size expansion loop. */
2437 	do {
2438 		/*
2439 		 * Copy valid settings before trying more aggressive settings.
2440 		 */
2441 		good_run_size = try_run_size;
2442 		good_nregs = try_nregs;
2443 		good_hdr_size = try_hdr_size;
2444 		good_bitmap_offset = try_bitmap_offset;
2445 		good_ctx0_offset = try_ctx0_offset;
2446 		good_redzone0_offset = try_redzone0_offset;
2447 
2448 		/* Try more aggressive settings. */
2449 		try_run_size += PAGE;
2450 		try_nregs = ((try_run_size - sizeof(arena_run_t) - pad_size) /
2451 		    bin_info->reg_interval)
2452 		    + 1; /* Counter-act try_nregs-- in loop. */
2453 		if (try_nregs > RUN_MAXREGS) {
2454 			try_nregs = RUN_MAXREGS
2455 			    + 1; /* Counter-act try_nregs-- in loop. */
2456 		}
2457 		do {
2458 			try_nregs--;
2459 			try_hdr_size = sizeof(arena_run_t);
2460 			/* Pad to a long boundary. */
2461 			try_hdr_size = LONG_CEILING(try_hdr_size);
2462 			try_bitmap_offset = try_hdr_size;
2463 			/* Add space for bitmap. */
2464 			try_hdr_size += bitmap_size(try_nregs);
2465 			if (config_prof && opt_prof && prof_promote == false) {
2466 				/* Pad to a quantum boundary. */
2467 				try_hdr_size = QUANTUM_CEILING(try_hdr_size);
2468 				try_ctx0_offset = try_hdr_size;
2469 				/*
2470 				 * Add space for one (prof_ctx_t *) per region.
2471 				 */
2472 				try_hdr_size += try_nregs *
2473 				    sizeof(prof_ctx_t *);
2474 			}
2475 			try_redzone0_offset = try_run_size - (try_nregs *
2476 			    bin_info->reg_interval) - pad_size;
2477 		} while (try_hdr_size > try_redzone0_offset);
2478 	} while (try_run_size <= arena_maxclass
2479 	    && try_run_size <= arena_maxclass
2480 	    && RUN_MAX_OVRHD * (bin_info->reg_interval << 3) >
2481 	    RUN_MAX_OVRHD_RELAX
2482 	    && (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size
2483 	    && try_nregs < RUN_MAXREGS);
2484 
2485 	assert(good_hdr_size <= good_redzone0_offset);
2486 
2487 	/* Copy final settings. */
2488 	bin_info->run_size = good_run_size;
2489 	bin_info->nregs = good_nregs;
2490 	bin_info->bitmap_offset = good_bitmap_offset;
2491 	bin_info->ctx0_offset = good_ctx0_offset;
2492 	bin_info->reg0_offset = good_redzone0_offset + bin_info->redzone_size;
2493 
2494 	assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs
2495 	    * bin_info->reg_interval) + pad_size == bin_info->run_size);
2496 
2497 	return (good_run_size);
2498 }
2499 
2500 static void
2501 bin_info_init(void)
2502 {
2503 	arena_bin_info_t *bin_info;
2504 	size_t prev_run_size = PAGE;
2505 
2506 #define	SIZE_CLASS(bin, delta, size)					\
2507 	bin_info = &arena_bin_info[bin];				\
2508 	bin_info->reg_size = size;					\
2509 	prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);\
2510 	bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs);
2511 	SIZE_CLASSES
2512 #undef SIZE_CLASS
2513 }
2514 
2515 void
2516 arena_boot(void)
2517 {
2518 	size_t header_size;
2519 	unsigned i;
2520 
2521 	/*
2522 	 * Compute the header size such that it is large enough to contain the
2523 	 * page map.  The page map is biased to omit entries for the header
2524 	 * itself, so some iteration is necessary to compute the map bias.
2525 	 *
2526 	 * 1) Compute safe header_size and map_bias values that include enough
2527 	 *    space for an unbiased page map.
2528 	 * 2) Refine map_bias based on (1) to omit the header pages in the page
2529 	 *    map.  The resulting map_bias may be one too small.
2530 	 * 3) Refine map_bias based on (2).  The result will be >= the result
2531 	 *    from (2), and will always be correct.
2532 	 */
2533 	map_bias = 0;
2534 	for (i = 0; i < 3; i++) {
2535 		header_size = offsetof(arena_chunk_t, map) +
2536 		    (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias));
2537 		map_bias = (header_size >> LG_PAGE) + ((header_size & PAGE_MASK)
2538 		    != 0);
2539 	}
2540 	assert(map_bias > 0);
2541 
2542 	arena_maxclass = chunksize - (map_bias << LG_PAGE);
2543 
2544 	bin_info_init();
2545 }
2546 
2547 void
2548 arena_prefork(arena_t *arena)
2549 {
2550 	unsigned i;
2551 
2552 	malloc_mutex_prefork(&arena->lock);
2553 	for (i = 0; i < NBINS; i++)
2554 		malloc_mutex_prefork(&arena->bins[i].lock);
2555 }
2556 
2557 void
2558 arena_postfork_parent(arena_t *arena)
2559 {
2560 	unsigned i;
2561 
2562 	for (i = 0; i < NBINS; i++)
2563 		malloc_mutex_postfork_parent(&arena->bins[i].lock);
2564 	malloc_mutex_postfork_parent(&arena->lock);
2565 }
2566 
2567 void
2568 arena_postfork_child(arena_t *arena)
2569 {
2570 	unsigned i;
2571 
2572 	for (i = 0; i < NBINS; i++)
2573 		malloc_mutex_postfork_child(&arena->bins[i].lock);
2574 	malloc_mutex_postfork_child(&arena->lock);
2575 }
2576