xref: /freebsd/contrib/jemalloc/src/jemalloc.c (revision 7c1b51d6dc2e165ae7333373513b080f17cf79bd)
1 #define JEMALLOC_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
4 
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/atomic.h"
7 #include "jemalloc/internal/ctl.h"
8 #include "jemalloc/internal/extent_dss.h"
9 #include "jemalloc/internal/extent_mmap.h"
10 #include "jemalloc/internal/jemalloc_internal_types.h"
11 #include "jemalloc/internal/malloc_io.h"
12 #include "jemalloc/internal/mutex.h"
13 #include "jemalloc/internal/rtree.h"
14 #include "jemalloc/internal/size_classes.h"
15 #include "jemalloc/internal/spin.h"
16 #include "jemalloc/internal/sz.h"
17 #include "jemalloc/internal/ticker.h"
18 #include "jemalloc/internal/util.h"
19 
20 /******************************************************************************/
21 /* Data. */
22 
23 /* Work around <http://llvm.org/bugs/show_bug.cgi?id=12623>: */
24 const char	*__malloc_options_1_0 = NULL;
25 __sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0);
26 
27 /* Runtime configuration options. */
28 const char	*je_malloc_conf
29 #ifndef _WIN32
30     JEMALLOC_ATTR(weak)
31 #endif
32     ;
33 bool	opt_abort =
34 #ifdef JEMALLOC_DEBUG
35     true
36 #else
37     false
38 #endif
39     ;
40 bool	opt_abort_conf =
41 #ifdef JEMALLOC_DEBUG
42     true
43 #else
44     false
45 #endif
46     ;
47 const char	*opt_junk =
48 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
49     "true"
50 #else
51     "false"
52 #endif
53     ;
54 bool	opt_junk_alloc =
55 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
56     true
57 #else
58     false
59 #endif
60     ;
61 bool	opt_junk_free =
62 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
63     true
64 #else
65     false
66 #endif
67     ;
68 
69 bool	opt_utrace = false;
70 bool	opt_xmalloc = false;
71 bool	opt_zero = false;
72 unsigned	opt_narenas = 0;
73 
74 unsigned	ncpus;
75 
76 /* Protects arenas initialization. */
77 malloc_mutex_t arenas_lock;
78 /*
79  * Arenas that are used to service external requests.  Not all elements of the
80  * arenas array are necessarily used; arenas are created lazily as needed.
81  *
82  * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
83  * arenas.  arenas[narenas_auto..narenas_total) are only used if the application
84  * takes some action to create them and allocate from them.
85  *
86  * Points to an arena_t.
87  */
88 JEMALLOC_ALIGNED(CACHELINE)
89 atomic_p_t		arenas[MALLOCX_ARENA_LIMIT];
90 static atomic_u_t	narenas_total; /* Use narenas_total_*(). */
91 static arena_t		*a0; /* arenas[0]; read-only after initialization. */
92 unsigned		narenas_auto; /* Read-only after initialization. */
93 
94 typedef enum {
95 	malloc_init_uninitialized	= 3,
96 	malloc_init_a0_initialized	= 2,
97 	malloc_init_recursible		= 1,
98 	malloc_init_initialized		= 0 /* Common case --> jnz. */
99 } malloc_init_t;
100 static malloc_init_t	malloc_init_state = malloc_init_uninitialized;
101 
102 /* False should be the common case.  Set to true to trigger initialization. */
103 bool			malloc_slow = true;
104 
105 /* When malloc_slow is true, set the corresponding bits for sanity check. */
106 enum {
107 	flag_opt_junk_alloc	= (1U),
108 	flag_opt_junk_free	= (1U << 1),
109 	flag_opt_zero		= (1U << 2),
110 	flag_opt_utrace		= (1U << 3),
111 	flag_opt_xmalloc	= (1U << 4)
112 };
113 static uint8_t	malloc_slow_flags;
114 
115 #ifdef JEMALLOC_THREADED_INIT
116 /* Used to let the initializing thread recursively allocate. */
117 #  define NO_INITIALIZER	((unsigned long)0)
118 #  define INITIALIZER		pthread_self()
119 #  define IS_INITIALIZER	(malloc_initializer == pthread_self())
120 static pthread_t		malloc_initializer = NO_INITIALIZER;
121 #else
122 #  define NO_INITIALIZER	false
123 #  define INITIALIZER		true
124 #  define IS_INITIALIZER	malloc_initializer
125 static bool			malloc_initializer = NO_INITIALIZER;
126 #endif
127 
128 /* Used to avoid initialization races. */
129 #ifdef _WIN32
130 #if _WIN32_WINNT >= 0x0600
131 static malloc_mutex_t	init_lock = SRWLOCK_INIT;
132 #else
133 static malloc_mutex_t	init_lock;
134 static bool init_lock_initialized = false;
135 
136 JEMALLOC_ATTR(constructor)
137 static void WINAPI
138 _init_init_lock(void) {
139 	/*
140 	 * If another constructor in the same binary is using mallctl to e.g.
141 	 * set up extent hooks, it may end up running before this one, and
142 	 * malloc_init_hard will crash trying to lock the uninitialized lock. So
143 	 * we force an initialization of the lock in malloc_init_hard as well.
144 	 * We don't try to care about atomicity of the accessed to the
145 	 * init_lock_initialized boolean, since it really only matters early in
146 	 * the process creation, before any separate thread normally starts
147 	 * doing anything.
148 	 */
149 	if (!init_lock_initialized) {
150 		malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT,
151 		    malloc_mutex_rank_exclusive);
152 	}
153 	init_lock_initialized = true;
154 }
155 
156 #ifdef _MSC_VER
157 #  pragma section(".CRT$XCU", read)
158 JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
159 static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
160 #endif
161 #endif
162 #else
163 static malloc_mutex_t	init_lock = MALLOC_MUTEX_INITIALIZER;
164 #endif
165 
166 typedef struct {
167 	void	*p;	/* Input pointer (as in realloc(p, s)). */
168 	size_t	s;	/* Request size. */
169 	void	*r;	/* Result pointer. */
170 } malloc_utrace_t;
171 
172 #ifdef JEMALLOC_UTRACE
173 #  define UTRACE(a, b, c) do {						\
174 	if (unlikely(opt_utrace)) {					\
175 		int utrace_serrno = errno;				\
176 		malloc_utrace_t ut;					\
177 		ut.p = (a);						\
178 		ut.s = (b);						\
179 		ut.r = (c);						\
180 		utrace(&ut, sizeof(ut));				\
181 		errno = utrace_serrno;					\
182 	}								\
183 } while (0)
184 #else
185 #  define UTRACE(a, b, c)
186 #endif
187 
188 /* Whether encountered any invalid config options. */
189 static bool had_conf_error = false;
190 
191 /******************************************************************************/
192 /*
193  * Function prototypes for static functions that are referenced prior to
194  * definition.
195  */
196 
197 static bool	malloc_init_hard_a0(void);
198 static bool	malloc_init_hard(void);
199 
200 /******************************************************************************/
201 /*
202  * Begin miscellaneous support functions.
203  */
204 
205 bool
206 malloc_initialized(void) {
207 	return (malloc_init_state == malloc_init_initialized);
208 }
209 
210 JEMALLOC_ALWAYS_INLINE bool
211 malloc_init_a0(void) {
212 	if (unlikely(malloc_init_state == malloc_init_uninitialized)) {
213 		return malloc_init_hard_a0();
214 	}
215 	return false;
216 }
217 
218 JEMALLOC_ALWAYS_INLINE bool
219 malloc_init(void) {
220 	if (unlikely(!malloc_initialized()) && malloc_init_hard()) {
221 		return true;
222 	}
223 	return false;
224 }
225 
226 /*
227  * The a0*() functions are used instead of i{d,}alloc() in situations that
228  * cannot tolerate TLS variable access.
229  */
230 
231 static void *
232 a0ialloc(size_t size, bool zero, bool is_internal) {
233 	if (unlikely(malloc_init_a0())) {
234 		return NULL;
235 	}
236 
237 	return iallocztm(TSDN_NULL, size, sz_size2index(size), zero, NULL,
238 	    is_internal, arena_get(TSDN_NULL, 0, true), true);
239 }
240 
241 static void
242 a0idalloc(void *ptr, bool is_internal) {
243 	idalloctm(TSDN_NULL, ptr, NULL, NULL, is_internal, true);
244 }
245 
246 void *
247 a0malloc(size_t size) {
248 	return a0ialloc(size, false, true);
249 }
250 
251 void
252 a0dalloc(void *ptr) {
253 	a0idalloc(ptr, true);
254 }
255 
256 /*
257  * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
258  * situations that cannot tolerate TLS variable access (TLS allocation and very
259  * early internal data structure initialization).
260  */
261 
262 void *
263 bootstrap_malloc(size_t size) {
264 	if (unlikely(size == 0)) {
265 		size = 1;
266 	}
267 
268 	return a0ialloc(size, false, false);
269 }
270 
271 void *
272 bootstrap_calloc(size_t num, size_t size) {
273 	size_t num_size;
274 
275 	num_size = num * size;
276 	if (unlikely(num_size == 0)) {
277 		assert(num == 0 || size == 0);
278 		num_size = 1;
279 	}
280 
281 	return a0ialloc(num_size, true, false);
282 }
283 
284 void
285 bootstrap_free(void *ptr) {
286 	if (unlikely(ptr == NULL)) {
287 		return;
288 	}
289 
290 	a0idalloc(ptr, false);
291 }
292 
293 void
294 arena_set(unsigned ind, arena_t *arena) {
295 	atomic_store_p(&arenas[ind], arena, ATOMIC_RELEASE);
296 }
297 
298 static void
299 narenas_total_set(unsigned narenas) {
300 	atomic_store_u(&narenas_total, narenas, ATOMIC_RELEASE);
301 }
302 
303 static void
304 narenas_total_inc(void) {
305 	atomic_fetch_add_u(&narenas_total, 1, ATOMIC_RELEASE);
306 }
307 
308 unsigned
309 narenas_total_get(void) {
310 	return atomic_load_u(&narenas_total, ATOMIC_ACQUIRE);
311 }
312 
313 /* Create a new arena and insert it into the arenas array at index ind. */
314 static arena_t *
315 arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
316 	arena_t *arena;
317 
318 	assert(ind <= narenas_total_get());
319 	if (ind >= MALLOCX_ARENA_LIMIT) {
320 		return NULL;
321 	}
322 	if (ind == narenas_total_get()) {
323 		narenas_total_inc();
324 	}
325 
326 	/*
327 	 * Another thread may have already initialized arenas[ind] if it's an
328 	 * auto arena.
329 	 */
330 	arena = arena_get(tsdn, ind, false);
331 	if (arena != NULL) {
332 		assert(ind < narenas_auto);
333 		return arena;
334 	}
335 
336 	/* Actually initialize the arena. */
337 	arena = arena_new(tsdn, ind, extent_hooks);
338 
339 	return arena;
340 }
341 
342 static void
343 arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) {
344 	if (ind == 0) {
345 		return;
346 	}
347 	if (have_background_thread) {
348 		bool err;
349 		malloc_mutex_lock(tsdn, &background_thread_lock);
350 		err = background_thread_create(tsdn_tsd(tsdn), ind);
351 		malloc_mutex_unlock(tsdn, &background_thread_lock);
352 		if (err) {
353 			malloc_printf("<jemalloc>: error in background thread "
354 				      "creation for arena %u. Abort.\n", ind);
355 			abort();
356 		}
357 	}
358 }
359 
360 arena_t *
361 arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
362 	arena_t *arena;
363 
364 	malloc_mutex_lock(tsdn, &arenas_lock);
365 	arena = arena_init_locked(tsdn, ind, extent_hooks);
366 	malloc_mutex_unlock(tsdn, &arenas_lock);
367 
368 	arena_new_create_background_thread(tsdn, ind);
369 
370 	return arena;
371 }
372 
373 static void
374 arena_bind(tsd_t *tsd, unsigned ind, bool internal) {
375 	arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false);
376 	arena_nthreads_inc(arena, internal);
377 
378 	if (internal) {
379 		tsd_iarena_set(tsd, arena);
380 	} else {
381 		tsd_arena_set(tsd, arena);
382 	}
383 }
384 
385 void
386 arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) {
387 	arena_t *oldarena, *newarena;
388 
389 	oldarena = arena_get(tsd_tsdn(tsd), oldind, false);
390 	newarena = arena_get(tsd_tsdn(tsd), newind, false);
391 	arena_nthreads_dec(oldarena, false);
392 	arena_nthreads_inc(newarena, false);
393 	tsd_arena_set(tsd, newarena);
394 }
395 
396 static void
397 arena_unbind(tsd_t *tsd, unsigned ind, bool internal) {
398 	arena_t *arena;
399 
400 	arena = arena_get(tsd_tsdn(tsd), ind, false);
401 	arena_nthreads_dec(arena, internal);
402 
403 	if (internal) {
404 		tsd_iarena_set(tsd, NULL);
405 	} else {
406 		tsd_arena_set(tsd, NULL);
407 	}
408 }
409 
410 arena_tdata_t *
411 arena_tdata_get_hard(tsd_t *tsd, unsigned ind) {
412 	arena_tdata_t *tdata, *arenas_tdata_old;
413 	arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
414 	unsigned narenas_tdata_old, i;
415 	unsigned narenas_tdata = tsd_narenas_tdata_get(tsd);
416 	unsigned narenas_actual = narenas_total_get();
417 
418 	/*
419 	 * Dissociate old tdata array (and set up for deallocation upon return)
420 	 * if it's too small.
421 	 */
422 	if (arenas_tdata != NULL && narenas_tdata < narenas_actual) {
423 		arenas_tdata_old = arenas_tdata;
424 		narenas_tdata_old = narenas_tdata;
425 		arenas_tdata = NULL;
426 		narenas_tdata = 0;
427 		tsd_arenas_tdata_set(tsd, arenas_tdata);
428 		tsd_narenas_tdata_set(tsd, narenas_tdata);
429 	} else {
430 		arenas_tdata_old = NULL;
431 		narenas_tdata_old = 0;
432 	}
433 
434 	/* Allocate tdata array if it's missing. */
435 	if (arenas_tdata == NULL) {
436 		bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd);
437 		narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1;
438 
439 		if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) {
440 			*arenas_tdata_bypassp = true;
441 			arenas_tdata = (arena_tdata_t *)a0malloc(
442 			    sizeof(arena_tdata_t) * narenas_tdata);
443 			*arenas_tdata_bypassp = false;
444 		}
445 		if (arenas_tdata == NULL) {
446 			tdata = NULL;
447 			goto label_return;
448 		}
449 		assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp);
450 		tsd_arenas_tdata_set(tsd, arenas_tdata);
451 		tsd_narenas_tdata_set(tsd, narenas_tdata);
452 	}
453 
454 	/*
455 	 * Copy to tdata array.  It's possible that the actual number of arenas
456 	 * has increased since narenas_total_get() was called above, but that
457 	 * causes no correctness issues unless two threads concurrently execute
458 	 * the arenas.create mallctl, which we trust mallctl synchronization to
459 	 * prevent.
460 	 */
461 
462 	/* Copy/initialize tickers. */
463 	for (i = 0; i < narenas_actual; i++) {
464 		if (i < narenas_tdata_old) {
465 			ticker_copy(&arenas_tdata[i].decay_ticker,
466 			    &arenas_tdata_old[i].decay_ticker);
467 		} else {
468 			ticker_init(&arenas_tdata[i].decay_ticker,
469 			    DECAY_NTICKS_PER_UPDATE);
470 		}
471 	}
472 	if (narenas_tdata > narenas_actual) {
473 		memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
474 		    * (narenas_tdata - narenas_actual));
475 	}
476 
477 	/* Read the refreshed tdata array. */
478 	tdata = &arenas_tdata[ind];
479 label_return:
480 	if (arenas_tdata_old != NULL) {
481 		a0dalloc(arenas_tdata_old);
482 	}
483 	return tdata;
484 }
485 
486 /* Slow path, called only by arena_choose(). */
487 arena_t *
488 arena_choose_hard(tsd_t *tsd, bool internal) {
489 	arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL);
490 
491 	if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) {
492 		unsigned choose = percpu_arena_choose();
493 		ret = arena_get(tsd_tsdn(tsd), choose, true);
494 		assert(ret != NULL);
495 		arena_bind(tsd, arena_ind_get(ret), false);
496 		arena_bind(tsd, arena_ind_get(ret), true);
497 
498 		return ret;
499 	}
500 
501 	if (narenas_auto > 1) {
502 		unsigned i, j, choose[2], first_null;
503 		bool is_new_arena[2];
504 
505 		/*
506 		 * Determine binding for both non-internal and internal
507 		 * allocation.
508 		 *
509 		 *   choose[0]: For application allocation.
510 		 *   choose[1]: For internal metadata allocation.
511 		 */
512 
513 		for (j = 0; j < 2; j++) {
514 			choose[j] = 0;
515 			is_new_arena[j] = false;
516 		}
517 
518 		first_null = narenas_auto;
519 		malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock);
520 		assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL);
521 		for (i = 1; i < narenas_auto; i++) {
522 			if (arena_get(tsd_tsdn(tsd), i, false) != NULL) {
523 				/*
524 				 * Choose the first arena that has the lowest
525 				 * number of threads assigned to it.
526 				 */
527 				for (j = 0; j < 2; j++) {
528 					if (arena_nthreads_get(arena_get(
529 					    tsd_tsdn(tsd), i, false), !!j) <
530 					    arena_nthreads_get(arena_get(
531 					    tsd_tsdn(tsd), choose[j], false),
532 					    !!j)) {
533 						choose[j] = i;
534 					}
535 				}
536 			} else if (first_null == narenas_auto) {
537 				/*
538 				 * Record the index of the first uninitialized
539 				 * arena, in case all extant arenas are in use.
540 				 *
541 				 * NB: It is possible for there to be
542 				 * discontinuities in terms of initialized
543 				 * versus uninitialized arenas, due to the
544 				 * "thread.arena" mallctl.
545 				 */
546 				first_null = i;
547 			}
548 		}
549 
550 		for (j = 0; j < 2; j++) {
551 			if (arena_nthreads_get(arena_get(tsd_tsdn(tsd),
552 			    choose[j], false), !!j) == 0 || first_null ==
553 			    narenas_auto) {
554 				/*
555 				 * Use an unloaded arena, or the least loaded
556 				 * arena if all arenas are already initialized.
557 				 */
558 				if (!!j == internal) {
559 					ret = arena_get(tsd_tsdn(tsd),
560 					    choose[j], false);
561 				}
562 			} else {
563 				arena_t *arena;
564 
565 				/* Initialize a new arena. */
566 				choose[j] = first_null;
567 				arena = arena_init_locked(tsd_tsdn(tsd),
568 				    choose[j],
569 				    (extent_hooks_t *)&extent_hooks_default);
570 				if (arena == NULL) {
571 					malloc_mutex_unlock(tsd_tsdn(tsd),
572 					    &arenas_lock);
573 					return NULL;
574 				}
575 				is_new_arena[j] = true;
576 				if (!!j == internal) {
577 					ret = arena;
578 				}
579 			}
580 			arena_bind(tsd, choose[j], !!j);
581 		}
582 		malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock);
583 
584 		for (j = 0; j < 2; j++) {
585 			if (is_new_arena[j]) {
586 				assert(choose[j] > 0);
587 				arena_new_create_background_thread(
588 				    tsd_tsdn(tsd), choose[j]);
589 			}
590 		}
591 
592 	} else {
593 		ret = arena_get(tsd_tsdn(tsd), 0, false);
594 		arena_bind(tsd, 0, false);
595 		arena_bind(tsd, 0, true);
596 	}
597 
598 	return ret;
599 }
600 
601 void
602 iarena_cleanup(tsd_t *tsd) {
603 	arena_t *iarena;
604 
605 	iarena = tsd_iarena_get(tsd);
606 	if (iarena != NULL) {
607 		arena_unbind(tsd, arena_ind_get(iarena), true);
608 	}
609 }
610 
611 void
612 arena_cleanup(tsd_t *tsd) {
613 	arena_t *arena;
614 
615 	arena = tsd_arena_get(tsd);
616 	if (arena != NULL) {
617 		arena_unbind(tsd, arena_ind_get(arena), false);
618 	}
619 }
620 
621 void
622 arenas_tdata_cleanup(tsd_t *tsd) {
623 	arena_tdata_t *arenas_tdata;
624 
625 	/* Prevent tsd->arenas_tdata from being (re)created. */
626 	*tsd_arenas_tdata_bypassp_get(tsd) = true;
627 
628 	arenas_tdata = tsd_arenas_tdata_get(tsd);
629 	if (arenas_tdata != NULL) {
630 		tsd_arenas_tdata_set(tsd, NULL);
631 		a0dalloc(arenas_tdata);
632 	}
633 }
634 
635 static void
636 stats_print_atexit(void) {
637 	if (config_stats) {
638 		tsdn_t *tsdn;
639 		unsigned narenas, i;
640 
641 		tsdn = tsdn_fetch();
642 
643 		/*
644 		 * Merge stats from extant threads.  This is racy, since
645 		 * individual threads do not lock when recording tcache stats
646 		 * events.  As a consequence, the final stats may be slightly
647 		 * out of date by the time they are reported, if other threads
648 		 * continue to allocate.
649 		 */
650 		for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
651 			arena_t *arena = arena_get(tsdn, i, false);
652 			if (arena != NULL) {
653 				tcache_t *tcache;
654 
655 				malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
656 				ql_foreach(tcache, &arena->tcache_ql, link) {
657 					tcache_stats_merge(tsdn, tcache, arena);
658 				}
659 				malloc_mutex_unlock(tsdn,
660 				    &arena->tcache_ql_mtx);
661 			}
662 		}
663 	}
664 	je_malloc_stats_print(NULL, NULL, opt_stats_print_opts);
665 }
666 
667 /*
668  * Ensure that we don't hold any locks upon entry to or exit from allocator
669  * code (in a "broad" sense that doesn't count a reentrant allocation as an
670  * entrance or exit).
671  */
672 JEMALLOC_ALWAYS_INLINE void
673 check_entry_exit_locking(tsdn_t *tsdn) {
674 	if (!config_debug) {
675 		return;
676 	}
677 	if (tsdn_null(tsdn)) {
678 		return;
679 	}
680 	tsd_t *tsd = tsdn_tsd(tsdn);
681 	/*
682 	 * It's possible we hold locks at entry/exit if we're in a nested
683 	 * allocation.
684 	 */
685 	int8_t reentrancy_level = tsd_reentrancy_level_get(tsd);
686 	if (reentrancy_level != 0) {
687 		return;
688 	}
689 	witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
690 }
691 
692 /*
693  * End miscellaneous support functions.
694  */
695 /******************************************************************************/
696 /*
697  * Begin initialization functions.
698  */
699 
700 static char *
701 jemalloc_secure_getenv(const char *name) {
702 #ifdef JEMALLOC_HAVE_SECURE_GETENV
703 	return secure_getenv(name);
704 #else
705 #  ifdef JEMALLOC_HAVE_ISSETUGID
706 	if (issetugid() != 0) {
707 		return NULL;
708 	}
709 #  endif
710 	return getenv(name);
711 #endif
712 }
713 
714 static unsigned
715 malloc_ncpus(void) {
716 	long result;
717 
718 #ifdef _WIN32
719 	SYSTEM_INFO si;
720 	GetSystemInfo(&si);
721 	result = si.dwNumberOfProcessors;
722 #elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT)
723 	/*
724 	 * glibc >= 2.6 has the CPU_COUNT macro.
725 	 *
726 	 * glibc's sysconf() uses isspace().  glibc allocates for the first time
727 	 * *before* setting up the isspace tables.  Therefore we need a
728 	 * different method to get the number of CPUs.
729 	 */
730 	{
731 		cpu_set_t set;
732 
733 		pthread_getaffinity_np(pthread_self(), sizeof(set), &set);
734 		result = CPU_COUNT(&set);
735 	}
736 #else
737 	result = sysconf(_SC_NPROCESSORS_ONLN);
738 #endif
739 	return ((result == -1) ? 1 : (unsigned)result);
740 }
741 
742 static void
743 init_opt_stats_print_opts(const char *v, size_t vlen) {
744 	size_t opts_len = strlen(opt_stats_print_opts);
745 	assert(opts_len <= stats_print_tot_num_options);
746 
747 	for (size_t i = 0; i < vlen; i++) {
748 		switch (v[i]) {
749 #define OPTION(o, v, d, s) case o: break;
750 			STATS_PRINT_OPTIONS
751 #undef OPTION
752 		default: continue;
753 		}
754 
755 		if (strchr(opt_stats_print_opts, v[i]) != NULL) {
756 			/* Ignore repeated. */
757 			continue;
758 		}
759 
760 		opt_stats_print_opts[opts_len++] = v[i];
761 		opt_stats_print_opts[opts_len] = '\0';
762 		assert(opts_len <= stats_print_tot_num_options);
763 	}
764 	assert(opts_len == strlen(opt_stats_print_opts));
765 }
766 
767 static bool
768 malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
769     char const **v_p, size_t *vlen_p) {
770 	bool accept;
771 	const char *opts = *opts_p;
772 
773 	*k_p = opts;
774 
775 	for (accept = false; !accept;) {
776 		switch (*opts) {
777 		case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
778 		case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
779 		case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
780 		case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
781 		case 'Y': case 'Z':
782 		case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
783 		case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
784 		case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
785 		case 's': case 't': case 'u': case 'v': case 'w': case 'x':
786 		case 'y': case 'z':
787 		case '0': case '1': case '2': case '3': case '4': case '5':
788 		case '6': case '7': case '8': case '9':
789 		case '_':
790 			opts++;
791 			break;
792 		case ':':
793 			opts++;
794 			*klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
795 			*v_p = opts;
796 			accept = true;
797 			break;
798 		case '\0':
799 			if (opts != *opts_p) {
800 				malloc_write("<jemalloc>: Conf string ends "
801 				    "with key\n");
802 			}
803 			return true;
804 		default:
805 			malloc_write("<jemalloc>: Malformed conf string\n");
806 			return true;
807 		}
808 	}
809 
810 	for (accept = false; !accept;) {
811 		switch (*opts) {
812 		case ',':
813 			opts++;
814 			/*
815 			 * Look ahead one character here, because the next time
816 			 * this function is called, it will assume that end of
817 			 * input has been cleanly reached if no input remains,
818 			 * but we have optimistically already consumed the
819 			 * comma if one exists.
820 			 */
821 			if (*opts == '\0') {
822 				malloc_write("<jemalloc>: Conf string ends "
823 				    "with comma\n");
824 			}
825 			*vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
826 			accept = true;
827 			break;
828 		case '\0':
829 			*vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
830 			accept = true;
831 			break;
832 		default:
833 			opts++;
834 			break;
835 		}
836 	}
837 
838 	*opts_p = opts;
839 	return false;
840 }
841 
842 static void
843 malloc_abort_invalid_conf(void) {
844 	assert(opt_abort_conf);
845 	malloc_printf("<jemalloc>: Abort (abort_conf:true) on invalid conf "
846 	    "value (see above).\n");
847 	abort();
848 }
849 
850 static void
851 malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
852     size_t vlen) {
853 	malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
854 	    (int)vlen, v);
855 	had_conf_error = true;
856 	if (opt_abort_conf) {
857 		malloc_abort_invalid_conf();
858 	}
859 }
860 
861 static void
862 malloc_slow_flag_init(void) {
863 	/*
864 	 * Combine the runtime options into malloc_slow for fast path.  Called
865 	 * after processing all the options.
866 	 */
867 	malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
868 	    | (opt_junk_free ? flag_opt_junk_free : 0)
869 	    | (opt_zero ? flag_opt_zero : 0)
870 	    | (opt_utrace ? flag_opt_utrace : 0)
871 	    | (opt_xmalloc ? flag_opt_xmalloc : 0);
872 
873 	malloc_slow = (malloc_slow_flags != 0);
874 }
875 
876 static void
877 malloc_conf_init(void) {
878 	unsigned i;
879 	char buf[PATH_MAX + 1];
880 	const char *opts, *k, *v;
881 	size_t klen, vlen;
882 
883 	for (i = 0; i < 4; i++) {
884 		/* Get runtime configuration. */
885 		switch (i) {
886 		case 0:
887 			opts = config_malloc_conf;
888 			break;
889 		case 1:
890 			if (je_malloc_conf != NULL) {
891 				/*
892 				 * Use options that were compiled into the
893 				 * program.
894 				 */
895 				opts = je_malloc_conf;
896 			} else {
897 				/* No configuration specified. */
898 				buf[0] = '\0';
899 				opts = buf;
900 			}
901 			break;
902 		case 2: {
903 			ssize_t linklen = 0;
904 #ifndef _WIN32
905 			int saved_errno = errno;
906 			const char *linkname =
907 #  ifdef JEMALLOC_PREFIX
908 			    "/etc/"JEMALLOC_PREFIX"malloc.conf"
909 #  else
910 			    "/etc/malloc.conf"
911 #  endif
912 			    ;
913 
914 			/*
915 			 * Try to use the contents of the "/etc/malloc.conf"
916 			 * symbolic link's name.
917 			 */
918 			linklen = readlink(linkname, buf, sizeof(buf) - 1);
919 			if (linklen == -1) {
920 				/* No configuration specified. */
921 				linklen = 0;
922 				/* Restore errno. */
923 				set_errno(saved_errno);
924 			}
925 #endif
926 			buf[linklen] = '\0';
927 			opts = buf;
928 			break;
929 		} case 3: {
930 			const char *envname =
931 #ifdef JEMALLOC_PREFIX
932 			    JEMALLOC_CPREFIX"MALLOC_CONF"
933 #else
934 			    "MALLOC_CONF"
935 #endif
936 			    ;
937 
938 			if ((opts = jemalloc_secure_getenv(envname)) != NULL) {
939 				/*
940 				 * Do nothing; opts is already initialized to
941 				 * the value of the MALLOC_CONF environment
942 				 * variable.
943 				 */
944 			} else {
945 				/* No configuration specified. */
946 				buf[0] = '\0';
947 				opts = buf;
948 			}
949 			break;
950 		} default:
951 			not_reached();
952 			buf[0] = '\0';
953 			opts = buf;
954 		}
955 
956 		while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
957 		    &vlen)) {
958 #define CONF_MATCH(n)							\
959 	(sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
960 #define CONF_MATCH_VALUE(n)						\
961 	(sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
962 #define CONF_HANDLE_BOOL(o, n)						\
963 			if (CONF_MATCH(n)) {				\
964 				if (CONF_MATCH_VALUE("true")) {		\
965 					o = true;			\
966 				} else if (CONF_MATCH_VALUE("false")) {	\
967 					o = false;			\
968 				} else {				\
969 					malloc_conf_error(		\
970 					    "Invalid conf value",	\
971 					    k, klen, v, vlen);		\
972 				}					\
973 				continue;				\
974 			}
975 #define CONF_MIN_no(um, min)	false
976 #define CONF_MIN_yes(um, min)	((um) < (min))
977 #define CONF_MAX_no(um, max)	false
978 #define CONF_MAX_yes(um, max)	((um) > (max))
979 #define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip)	\
980 			if (CONF_MATCH(n)) {				\
981 				uintmax_t um;				\
982 				char *end;				\
983 									\
984 				set_errno(0);				\
985 				um = malloc_strtoumax(v, &end, 0);	\
986 				if (get_errno() != 0 || (uintptr_t)end -\
987 				    (uintptr_t)v != vlen) {		\
988 					malloc_conf_error(		\
989 					    "Invalid conf value",	\
990 					    k, klen, v, vlen);		\
991 				} else if (clip) {			\
992 					if (CONF_MIN_##check_min(um,	\
993 					    (t)(min))) {		\
994 						o = (t)(min);		\
995 					} else if (			\
996 					    CONF_MAX_##check_max(um,	\
997 					    (t)(max))) {		\
998 						o = (t)(max);		\
999 					} else {			\
1000 						o = (t)um;		\
1001 					}				\
1002 				} else {				\
1003 					if (CONF_MIN_##check_min(um,	\
1004 					    (t)(min)) ||		\
1005 					    CONF_MAX_##check_max(um,	\
1006 					    (t)(max))) {		\
1007 						malloc_conf_error(	\
1008 						    "Out-of-range "	\
1009 						    "conf value",	\
1010 						    k, klen, v, vlen);	\
1011 					} else {			\
1012 						o = (t)um;		\
1013 					}				\
1014 				}					\
1015 				continue;				\
1016 			}
1017 #define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max,	\
1018     clip)								\
1019 			CONF_HANDLE_T_U(unsigned, o, n, min, max,	\
1020 			    check_min, check_max, clip)
1021 #define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip)	\
1022 			CONF_HANDLE_T_U(size_t, o, n, min, max,		\
1023 			    check_min, check_max, clip)
1024 #define CONF_HANDLE_SSIZE_T(o, n, min, max)				\
1025 			if (CONF_MATCH(n)) {				\
1026 				long l;					\
1027 				char *end;				\
1028 									\
1029 				set_errno(0);				\
1030 				l = strtol(v, &end, 0);			\
1031 				if (get_errno() != 0 || (uintptr_t)end -\
1032 				    (uintptr_t)v != vlen) {		\
1033 					malloc_conf_error(		\
1034 					    "Invalid conf value",	\
1035 					    k, klen, v, vlen);		\
1036 				} else if (l < (ssize_t)(min) || l >	\
1037 				    (ssize_t)(max)) {			\
1038 					malloc_conf_error(		\
1039 					    "Out-of-range conf value",	\
1040 					    k, klen, v, vlen);		\
1041 				} else {				\
1042 					o = l;				\
1043 				}					\
1044 				continue;				\
1045 			}
1046 #define CONF_HANDLE_CHAR_P(o, n, d)					\
1047 			if (CONF_MATCH(n)) {				\
1048 				size_t cpylen = (vlen <=		\
1049 				    sizeof(o)-1) ? vlen :		\
1050 				    sizeof(o)-1;			\
1051 				strncpy(o, v, cpylen);			\
1052 				o[cpylen] = '\0';			\
1053 				continue;				\
1054 			}
1055 
1056 			CONF_HANDLE_BOOL(opt_abort, "abort")
1057 			CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf")
1058 			if (opt_abort_conf && had_conf_error) {
1059 				malloc_abort_invalid_conf();
1060 			}
1061 			CONF_HANDLE_BOOL(opt_retain, "retain")
1062 			if (strncmp("dss", k, klen) == 0) {
1063 				int i;
1064 				bool match = false;
1065 				for (i = 0; i < dss_prec_limit; i++) {
1066 					if (strncmp(dss_prec_names[i], v, vlen)
1067 					    == 0) {
1068 						if (extent_dss_prec_set(i)) {
1069 							malloc_conf_error(
1070 							    "Error setting dss",
1071 							    k, klen, v, vlen);
1072 						} else {
1073 							opt_dss =
1074 							    dss_prec_names[i];
1075 							match = true;
1076 							break;
1077 						}
1078 					}
1079 				}
1080 				if (!match) {
1081 					malloc_conf_error("Invalid conf value",
1082 					    k, klen, v, vlen);
1083 				}
1084 				continue;
1085 			}
1086 			CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
1087 			    UINT_MAX, yes, no, false)
1088 			CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms,
1089 			    "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
1090 			    QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
1091 			    SSIZE_MAX);
1092 			CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms,
1093 			    "muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
1094 			    QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
1095 			    SSIZE_MAX);
1096 			CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
1097 			if (CONF_MATCH("stats_print_opts")) {
1098 				init_opt_stats_print_opts(v, vlen);
1099 				continue;
1100 			}
1101 			if (config_fill) {
1102 				if (CONF_MATCH("junk")) {
1103 					if (CONF_MATCH_VALUE("true")) {
1104 						opt_junk = "true";
1105 						opt_junk_alloc = opt_junk_free =
1106 						    true;
1107 					} else if (CONF_MATCH_VALUE("false")) {
1108 						opt_junk = "false";
1109 						opt_junk_alloc = opt_junk_free =
1110 						    false;
1111 					} else if (CONF_MATCH_VALUE("alloc")) {
1112 						opt_junk = "alloc";
1113 						opt_junk_alloc = true;
1114 						opt_junk_free = false;
1115 					} else if (CONF_MATCH_VALUE("free")) {
1116 						opt_junk = "free";
1117 						opt_junk_alloc = false;
1118 						opt_junk_free = true;
1119 					} else {
1120 						malloc_conf_error(
1121 						    "Invalid conf value", k,
1122 						    klen, v, vlen);
1123 					}
1124 					continue;
1125 				}
1126 				CONF_HANDLE_BOOL(opt_zero, "zero")
1127 			}
1128 			if (config_utrace) {
1129 				CONF_HANDLE_BOOL(opt_utrace, "utrace")
1130 			}
1131 			if (config_xmalloc) {
1132 				CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
1133 			}
1134 			CONF_HANDLE_BOOL(opt_tcache, "tcache")
1135 			CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max",
1136 			    -1, (sizeof(size_t) << 3) - 1)
1137 			if (strncmp("percpu_arena", k, klen) == 0) {
1138 				int i;
1139 				bool match = false;
1140 				for (i = percpu_arena_mode_names_base; i <
1141 				    percpu_arena_mode_names_limit; i++) {
1142 					if (strncmp(percpu_arena_mode_names[i],
1143 					    v, vlen) == 0) {
1144 						if (!have_percpu_arena) {
1145 							malloc_conf_error(
1146 							    "No getcpu support",
1147 							    k, klen, v, vlen);
1148 						}
1149 						opt_percpu_arena = i;
1150 						match = true;
1151 						break;
1152 					}
1153 				}
1154 				if (!match) {
1155 					malloc_conf_error("Invalid conf value",
1156 					    k, klen, v, vlen);
1157 				}
1158 				continue;
1159 			}
1160 			CONF_HANDLE_BOOL(opt_background_thread,
1161 			    "background_thread");
1162 			if (config_prof) {
1163 				CONF_HANDLE_BOOL(opt_prof, "prof")
1164 				CONF_HANDLE_CHAR_P(opt_prof_prefix,
1165 				    "prof_prefix", "jeprof")
1166 				CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
1167 				CONF_HANDLE_BOOL(opt_prof_thread_active_init,
1168 				    "prof_thread_active_init")
1169 				CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
1170 				    "lg_prof_sample", 0, (sizeof(uint64_t) << 3)
1171 				    - 1, no, yes, true)
1172 				CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
1173 				CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
1174 				    "lg_prof_interval", -1,
1175 				    (sizeof(uint64_t) << 3) - 1)
1176 				CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
1177 				CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
1178 				CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
1179 			}
1180 			malloc_conf_error("Invalid conf pair", k, klen, v,
1181 			    vlen);
1182 #undef CONF_MATCH
1183 #undef CONF_MATCH_VALUE
1184 #undef CONF_HANDLE_BOOL
1185 #undef CONF_MIN_no
1186 #undef CONF_MIN_yes
1187 #undef CONF_MAX_no
1188 #undef CONF_MAX_yes
1189 #undef CONF_HANDLE_T_U
1190 #undef CONF_HANDLE_UNSIGNED
1191 #undef CONF_HANDLE_SIZE_T
1192 #undef CONF_HANDLE_SSIZE_T
1193 #undef CONF_HANDLE_CHAR_P
1194 		}
1195 	}
1196 }
1197 
1198 static bool
1199 malloc_init_hard_needed(void) {
1200 	if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
1201 	    malloc_init_recursible)) {
1202 		/*
1203 		 * Another thread initialized the allocator before this one
1204 		 * acquired init_lock, or this thread is the initializing
1205 		 * thread, and it is recursively allocating.
1206 		 */
1207 		return false;
1208 	}
1209 #ifdef JEMALLOC_THREADED_INIT
1210 	if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
1211 		/* Busy-wait until the initializing thread completes. */
1212 		spin_t spinner = SPIN_INITIALIZER;
1213 		do {
1214 			malloc_mutex_unlock(TSDN_NULL, &init_lock);
1215 			spin_adaptive(&spinner);
1216 			malloc_mutex_lock(TSDN_NULL, &init_lock);
1217 		} while (!malloc_initialized());
1218 		return false;
1219 	}
1220 #endif
1221 	return true;
1222 }
1223 
1224 static bool
1225 malloc_init_hard_a0_locked() {
1226 	malloc_initializer = INITIALIZER;
1227 
1228 	if (config_prof) {
1229 		prof_boot0();
1230 	}
1231 	malloc_conf_init();
1232 	if (opt_stats_print) {
1233 		/* Print statistics at exit. */
1234 		if (atexit(stats_print_atexit) != 0) {
1235 			malloc_write("<jemalloc>: Error in atexit()\n");
1236 			if (opt_abort) {
1237 				abort();
1238 			}
1239 		}
1240 	}
1241 	if (pages_boot()) {
1242 		return true;
1243 	}
1244 	if (base_boot(TSDN_NULL)) {
1245 		return true;
1246 	}
1247 	if (extent_boot()) {
1248 		return true;
1249 	}
1250 	if (ctl_boot()) {
1251 		return true;
1252 	}
1253 	if (config_prof) {
1254 		prof_boot1();
1255 	}
1256 	arena_boot();
1257 	if (tcache_boot(TSDN_NULL)) {
1258 		return true;
1259 	}
1260 	if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS,
1261 	    malloc_mutex_rank_exclusive)) {
1262 		return true;
1263 	}
1264 	/*
1265 	 * Create enough scaffolding to allow recursive allocation in
1266 	 * malloc_ncpus().
1267 	 */
1268 	narenas_auto = 1;
1269 	memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
1270 	/*
1271 	 * Initialize one arena here.  The rest are lazily created in
1272 	 * arena_choose_hard().
1273 	 */
1274 	if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default)
1275 	    == NULL) {
1276 		return true;
1277 	}
1278 	a0 = arena_get(TSDN_NULL, 0, false);
1279 	malloc_init_state = malloc_init_a0_initialized;
1280 
1281 	return false;
1282 }
1283 
1284 static bool
1285 malloc_init_hard_a0(void) {
1286 	bool ret;
1287 
1288 	malloc_mutex_lock(TSDN_NULL, &init_lock);
1289 	ret = malloc_init_hard_a0_locked();
1290 	malloc_mutex_unlock(TSDN_NULL, &init_lock);
1291 	return ret;
1292 }
1293 
1294 /* Initialize data structures which may trigger recursive allocation. */
1295 static bool
1296 malloc_init_hard_recursible(void) {
1297 	malloc_init_state = malloc_init_recursible;
1298 
1299 	ncpus = malloc_ncpus();
1300 
1301 #if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \
1302     && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \
1303     !defined(__native_client__))
1304 	/* LinuxThreads' pthread_atfork() allocates. */
1305 	if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
1306 	    jemalloc_postfork_child) != 0) {
1307 		malloc_write("<jemalloc>: Error in pthread_atfork()\n");
1308 		if (opt_abort) {
1309 			abort();
1310 		}
1311 		return true;
1312 	}
1313 #endif
1314 
1315 	if (background_thread_boot0()) {
1316 		return true;
1317 	}
1318 
1319 	return false;
1320 }
1321 
1322 static unsigned
1323 malloc_narenas_default(void) {
1324 	assert(ncpus > 0);
1325 	/*
1326 	 * For SMP systems, create more than one arena per CPU by
1327 	 * default.
1328 	 */
1329 	if (ncpus > 1) {
1330 		return ncpus << 2;
1331 	} else {
1332 		return 1;
1333 	}
1334 }
1335 
1336 static percpu_arena_mode_t
1337 percpu_arena_as_initialized(percpu_arena_mode_t mode) {
1338 	assert(!malloc_initialized());
1339 	assert(mode <= percpu_arena_disabled);
1340 
1341 	if (mode != percpu_arena_disabled) {
1342 		mode += percpu_arena_mode_enabled_base;
1343 	}
1344 
1345 	return mode;
1346 }
1347 
1348 static bool
1349 malloc_init_narenas(void) {
1350 	assert(ncpus > 0);
1351 
1352 	if (opt_percpu_arena != percpu_arena_disabled) {
1353 		if (!have_percpu_arena || malloc_getcpu() < 0) {
1354 			opt_percpu_arena = percpu_arena_disabled;
1355 			malloc_printf("<jemalloc>: perCPU arena getcpu() not "
1356 			    "available. Setting narenas to %u.\n", opt_narenas ?
1357 			    opt_narenas : malloc_narenas_default());
1358 			if (opt_abort) {
1359 				abort();
1360 			}
1361 		} else {
1362 			if (ncpus >= MALLOCX_ARENA_LIMIT) {
1363 				malloc_printf("<jemalloc>: narenas w/ percpu"
1364 				    "arena beyond limit (%d)\n", ncpus);
1365 				if (opt_abort) {
1366 					abort();
1367 				}
1368 				return true;
1369 			}
1370 			/* NB: opt_percpu_arena isn't fully initialized yet. */
1371 			if (percpu_arena_as_initialized(opt_percpu_arena) ==
1372 			    per_phycpu_arena && ncpus % 2 != 0) {
1373 				malloc_printf("<jemalloc>: invalid "
1374 				    "configuration -- per physical CPU arena "
1375 				    "with odd number (%u) of CPUs (no hyper "
1376 				    "threading?).\n", ncpus);
1377 				if (opt_abort)
1378 					abort();
1379 			}
1380 			unsigned n = percpu_arena_ind_limit(
1381 			    percpu_arena_as_initialized(opt_percpu_arena));
1382 			if (opt_narenas < n) {
1383 				/*
1384 				 * If narenas is specified with percpu_arena
1385 				 * enabled, actual narenas is set as the greater
1386 				 * of the two. percpu_arena_choose will be free
1387 				 * to use any of the arenas based on CPU
1388 				 * id. This is conservative (at a small cost)
1389 				 * but ensures correctness.
1390 				 *
1391 				 * If for some reason the ncpus determined at
1392 				 * boot is not the actual number (e.g. because
1393 				 * of affinity setting from numactl), reserving
1394 				 * narenas this way provides a workaround for
1395 				 * percpu_arena.
1396 				 */
1397 				opt_narenas = n;
1398 			}
1399 		}
1400 	}
1401 	if (opt_narenas == 0) {
1402 		opt_narenas = malloc_narenas_default();
1403 	}
1404 	assert(opt_narenas > 0);
1405 
1406 	narenas_auto = opt_narenas;
1407 	/*
1408 	 * Limit the number of arenas to the indexing range of MALLOCX_ARENA().
1409 	 */
1410 	if (narenas_auto >= MALLOCX_ARENA_LIMIT) {
1411 		narenas_auto = MALLOCX_ARENA_LIMIT - 1;
1412 		malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
1413 		    narenas_auto);
1414 	}
1415 	narenas_total_set(narenas_auto);
1416 
1417 	return false;
1418 }
1419 
1420 static void
1421 malloc_init_percpu(void) {
1422 	opt_percpu_arena = percpu_arena_as_initialized(opt_percpu_arena);
1423 }
1424 
1425 static bool
1426 malloc_init_hard_finish(void) {
1427 	if (malloc_mutex_boot()) {
1428 		return true;
1429 	}
1430 
1431 	malloc_init_state = malloc_init_initialized;
1432 	malloc_slow_flag_init();
1433 
1434 	return false;
1435 }
1436 
1437 static void
1438 malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) {
1439 	malloc_mutex_assert_owner(tsdn, &init_lock);
1440 	malloc_mutex_unlock(tsdn, &init_lock);
1441 	if (reentrancy_set) {
1442 		assert(!tsdn_null(tsdn));
1443 		tsd_t *tsd = tsdn_tsd(tsdn);
1444 		assert(tsd_reentrancy_level_get(tsd) > 0);
1445 		post_reentrancy(tsd);
1446 	}
1447 }
1448 
1449 static bool
1450 malloc_init_hard(void) {
1451 	tsd_t *tsd;
1452 
1453 #if defined(_WIN32) && _WIN32_WINNT < 0x0600
1454 	_init_init_lock();
1455 #endif
1456 	malloc_mutex_lock(TSDN_NULL, &init_lock);
1457 
1458 #define UNLOCK_RETURN(tsdn, ret, reentrancy)		\
1459 	malloc_init_hard_cleanup(tsdn, reentrancy);	\
1460 	return ret;
1461 
1462 	if (!malloc_init_hard_needed()) {
1463 		UNLOCK_RETURN(TSDN_NULL, false, false)
1464 	}
1465 
1466 	if (malloc_init_state != malloc_init_a0_initialized &&
1467 	    malloc_init_hard_a0_locked()) {
1468 		UNLOCK_RETURN(TSDN_NULL, true, false)
1469 	}
1470 
1471 	malloc_mutex_unlock(TSDN_NULL, &init_lock);
1472 	/* Recursive allocation relies on functional tsd. */
1473 	tsd = malloc_tsd_boot0();
1474 	if (tsd == NULL) {
1475 		return true;
1476 	}
1477 	if (malloc_init_hard_recursible()) {
1478 		return true;
1479 	}
1480 
1481 	malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
1482 	/* Set reentrancy level to 1 during init. */
1483 	pre_reentrancy(tsd, NULL);
1484 	/* Initialize narenas before prof_boot2 (for allocation). */
1485 	if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) {
1486 		UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1487 	}
1488 	if (config_prof && prof_boot2(tsd)) {
1489 		UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1490 	}
1491 
1492 	malloc_init_percpu();
1493 
1494 	if (malloc_init_hard_finish()) {
1495 		UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1496 	}
1497 	post_reentrancy(tsd);
1498 	malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
1499 
1500 	malloc_tsd_boot1();
1501 	/* Update TSD after tsd_boot1. */
1502 	tsd = tsd_fetch();
1503 	if (opt_background_thread) {
1504 		assert(have_background_thread);
1505 		/*
1506 		 * Need to finish init & unlock first before creating background
1507 		 * threads (pthread_create depends on malloc).
1508 		 */
1509 		malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
1510 		bool err = background_thread_create(tsd, 0);
1511 		malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
1512 		if (err) {
1513 			return true;
1514 		}
1515 	}
1516 #undef UNLOCK_RETURN
1517 	return false;
1518 }
1519 
1520 /*
1521  * End initialization functions.
1522  */
1523 /******************************************************************************/
1524 /*
1525  * Begin allocation-path internal functions and data structures.
1526  */
1527 
1528 /*
1529  * Settings determined by the documented behavior of the allocation functions.
1530  */
1531 typedef struct static_opts_s static_opts_t;
1532 struct static_opts_s {
1533 	/* Whether or not allocation size may overflow. */
1534 	bool may_overflow;
1535 	/* Whether or not allocations of size 0 should be treated as size 1. */
1536 	bool bump_empty_alloc;
1537 	/*
1538 	 * Whether to assert that allocations are not of size 0 (after any
1539 	 * bumping).
1540 	 */
1541 	bool assert_nonempty_alloc;
1542 
1543 	/*
1544 	 * Whether or not to modify the 'result' argument to malloc in case of
1545 	 * error.
1546 	 */
1547 	bool null_out_result_on_error;
1548 	/* Whether to set errno when we encounter an error condition. */
1549 	bool set_errno_on_error;
1550 
1551 	/*
1552 	 * The minimum valid alignment for functions requesting aligned storage.
1553 	 */
1554 	size_t min_alignment;
1555 
1556 	/* The error string to use if we oom. */
1557 	const char *oom_string;
1558 	/* The error string to use if the passed-in alignment is invalid. */
1559 	const char *invalid_alignment_string;
1560 
1561 	/*
1562 	 * False if we're configured to skip some time-consuming operations.
1563 	 *
1564 	 * This isn't really a malloc "behavior", but it acts as a useful
1565 	 * summary of several other static (or at least, static after program
1566 	 * initialization) options.
1567 	 */
1568 	bool slow;
1569 };
1570 
1571 JEMALLOC_ALWAYS_INLINE void
1572 static_opts_init(static_opts_t *static_opts) {
1573 	static_opts->may_overflow = false;
1574 	static_opts->bump_empty_alloc = false;
1575 	static_opts->assert_nonempty_alloc = false;
1576 	static_opts->null_out_result_on_error = false;
1577 	static_opts->set_errno_on_error = false;
1578 	static_opts->min_alignment = 0;
1579 	static_opts->oom_string = "";
1580 	static_opts->invalid_alignment_string = "";
1581 	static_opts->slow = false;
1582 }
1583 
1584 /*
1585  * These correspond to the macros in jemalloc/jemalloc_macros.h.  Broadly, we
1586  * should have one constant here per magic value there.  Note however that the
1587  * representations need not be related.
1588  */
1589 #define TCACHE_IND_NONE ((unsigned)-1)
1590 #define TCACHE_IND_AUTOMATIC ((unsigned)-2)
1591 #define ARENA_IND_AUTOMATIC ((unsigned)-1)
1592 
1593 typedef struct dynamic_opts_s dynamic_opts_t;
1594 struct dynamic_opts_s {
1595 	void **result;
1596 	size_t num_items;
1597 	size_t item_size;
1598 	size_t alignment;
1599 	bool zero;
1600 	unsigned tcache_ind;
1601 	unsigned arena_ind;
1602 };
1603 
1604 JEMALLOC_ALWAYS_INLINE void
1605 dynamic_opts_init(dynamic_opts_t *dynamic_opts) {
1606 	dynamic_opts->result = NULL;
1607 	dynamic_opts->num_items = 0;
1608 	dynamic_opts->item_size = 0;
1609 	dynamic_opts->alignment = 0;
1610 	dynamic_opts->zero = false;
1611 	dynamic_opts->tcache_ind = TCACHE_IND_AUTOMATIC;
1612 	dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC;
1613 }
1614 
1615 /* ind is ignored if dopts->alignment > 0. */
1616 JEMALLOC_ALWAYS_INLINE void *
1617 imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
1618     size_t size, size_t usize, szind_t ind) {
1619 	tcache_t *tcache;
1620 	arena_t *arena;
1621 
1622 	/* Fill in the tcache. */
1623 	if (dopts->tcache_ind == TCACHE_IND_AUTOMATIC) {
1624 		if (likely(!sopts->slow)) {
1625 			/* Getting tcache ptr unconditionally. */
1626 			tcache = tsd_tcachep_get(tsd);
1627 			assert(tcache == tcache_get(tsd));
1628 		} else {
1629 			tcache = tcache_get(tsd);
1630 		}
1631 	} else if (dopts->tcache_ind == TCACHE_IND_NONE) {
1632 		tcache = NULL;
1633 	} else {
1634 		tcache = tcaches_get(tsd, dopts->tcache_ind);
1635 	}
1636 
1637 	/* Fill in the arena. */
1638 	if (dopts->arena_ind == ARENA_IND_AUTOMATIC) {
1639 		/*
1640 		 * In case of automatic arena management, we defer arena
1641 		 * computation until as late as we can, hoping to fill the
1642 		 * allocation out of the tcache.
1643 		 */
1644 		arena = NULL;
1645 	} else {
1646 		arena = arena_get(tsd_tsdn(tsd), dopts->arena_ind, true);
1647 	}
1648 
1649 	if (unlikely(dopts->alignment != 0)) {
1650 		return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment,
1651 		    dopts->zero, tcache, arena);
1652 	}
1653 
1654 	return iallocztm(tsd_tsdn(tsd), size, ind, dopts->zero, tcache, false,
1655 	    arena, sopts->slow);
1656 }
1657 
1658 JEMALLOC_ALWAYS_INLINE void *
1659 imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
1660     size_t usize, szind_t ind) {
1661 	void *ret;
1662 
1663 	/*
1664 	 * For small allocations, sampling bumps the usize.  If so, we allocate
1665 	 * from the ind_large bucket.
1666 	 */
1667 	szind_t ind_large;
1668 	size_t bumped_usize = usize;
1669 
1670 	if (usize <= SMALL_MAXCLASS) {
1671 		assert(((dopts->alignment == 0) ? sz_s2u(LARGE_MINCLASS) :
1672 		    sz_sa2u(LARGE_MINCLASS, dopts->alignment))
1673 		    == LARGE_MINCLASS);
1674 		ind_large = sz_size2index(LARGE_MINCLASS);
1675 		bumped_usize = sz_s2u(LARGE_MINCLASS);
1676 		ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize,
1677 		    bumped_usize, ind_large);
1678 		if (unlikely(ret == NULL)) {
1679 			return NULL;
1680 		}
1681 		arena_prof_promote(tsd_tsdn(tsd), ret, usize);
1682 	} else {
1683 		ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind);
1684 	}
1685 
1686 	return ret;
1687 }
1688 
1689 /*
1690  * Returns true if the allocation will overflow, and false otherwise.  Sets
1691  * *size to the product either way.
1692  */
1693 JEMALLOC_ALWAYS_INLINE bool
1694 compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts,
1695     size_t *size) {
1696 	/*
1697 	 * This function is just num_items * item_size, except that we may have
1698 	 * to check for overflow.
1699 	 */
1700 
1701 	if (!may_overflow) {
1702 		assert(dopts->num_items == 1);
1703 		*size = dopts->item_size;
1704 		return false;
1705 	}
1706 
1707 	/* A size_t with its high-half bits all set to 1. */
1708 	const static size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2);
1709 
1710 	*size = dopts->item_size * dopts->num_items;
1711 
1712 	if (unlikely(*size == 0)) {
1713 		return (dopts->num_items != 0 && dopts->item_size != 0);
1714 	}
1715 
1716 	/*
1717 	 * We got a non-zero size, but we don't know if we overflowed to get
1718 	 * there.  To avoid having to do a divide, we'll be clever and note that
1719 	 * if both A and B can be represented in N/2 bits, then their product
1720 	 * can be represented in N bits (without the possibility of overflow).
1721 	 */
1722 	if (likely((high_bits & (dopts->num_items | dopts->item_size)) == 0)) {
1723 		return false;
1724 	}
1725 	if (likely(*size / dopts->item_size == dopts->num_items)) {
1726 		return false;
1727 	}
1728 	return true;
1729 }
1730 
1731 JEMALLOC_ALWAYS_INLINE int
1732 imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
1733 	/* Where the actual allocated memory will live. */
1734 	void *allocation = NULL;
1735 	/* Filled in by compute_size_with_overflow below. */
1736 	size_t size = 0;
1737 	/*
1738 	 * For unaligned allocations, we need only ind.  For aligned
1739 	 * allocations, or in case of stats or profiling we need usize.
1740 	 *
1741 	 * These are actually dead stores, in that their values are reset before
1742 	 * any branch on their value is taken.  Sometimes though, it's
1743 	 * convenient to pass them as arguments before this point.  To avoid
1744 	 * undefined behavior then, we initialize them with dummy stores.
1745 	 */
1746 	szind_t ind = 0;
1747 	size_t usize = 0;
1748 
1749 	/* Reentrancy is only checked on slow path. */
1750 	int8_t reentrancy_level;
1751 
1752 	/* Compute the amount of memory the user wants. */
1753 	if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts,
1754 	    &size))) {
1755 		goto label_oom;
1756 	}
1757 
1758 	/* Validate the user input. */
1759 	if (sopts->bump_empty_alloc) {
1760 		if (unlikely(size == 0)) {
1761 			size = 1;
1762 		}
1763 	}
1764 
1765 	if (sopts->assert_nonempty_alloc) {
1766 		assert (size != 0);
1767 	}
1768 
1769 	if (unlikely(dopts->alignment < sopts->min_alignment
1770 	    || (dopts->alignment & (dopts->alignment - 1)) != 0)) {
1771 		goto label_invalid_alignment;
1772 	}
1773 
1774 	/* This is the beginning of the "core" algorithm. */
1775 
1776 	if (dopts->alignment == 0) {
1777 		ind = sz_size2index(size);
1778 		if (unlikely(ind >= NSIZES)) {
1779 			goto label_oom;
1780 		}
1781 		if (config_stats || (config_prof && opt_prof)) {
1782 			usize = sz_index2size(ind);
1783 			assert(usize > 0 && usize <= LARGE_MAXCLASS);
1784 		}
1785 	} else {
1786 		usize = sz_sa2u(size, dopts->alignment);
1787 		if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
1788 			goto label_oom;
1789 		}
1790 	}
1791 
1792 	check_entry_exit_locking(tsd_tsdn(tsd));
1793 
1794 	/*
1795 	 * If we need to handle reentrancy, we can do it out of a
1796 	 * known-initialized arena (i.e. arena 0).
1797 	 */
1798 	reentrancy_level = tsd_reentrancy_level_get(tsd);
1799 	if (sopts->slow && unlikely(reentrancy_level > 0)) {
1800 		/*
1801 		 * We should never specify particular arenas or tcaches from
1802 		 * within our internal allocations.
1803 		 */
1804 		assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC ||
1805 		    dopts->tcache_ind == TCACHE_IND_NONE);
1806 		assert(dopts->arena_ind == ARENA_IND_AUTOMATIC);
1807 		dopts->tcache_ind = TCACHE_IND_NONE;
1808 		/* We know that arena 0 has already been initialized. */
1809 		dopts->arena_ind = 0;
1810 	}
1811 
1812 	/* If profiling is on, get our profiling context. */
1813 	if (config_prof && opt_prof) {
1814 		/*
1815 		 * Note that if we're going down this path, usize must have been
1816 		 * initialized in the previous if statement.
1817 		 */
1818 		prof_tctx_t *tctx = prof_alloc_prep(
1819 		    tsd, usize, prof_active_get_unlocked(), true);
1820 
1821 		alloc_ctx_t alloc_ctx;
1822 		if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
1823 			alloc_ctx.slab = (usize <= SMALL_MAXCLASS);
1824 			allocation = imalloc_no_sample(
1825 			    sopts, dopts, tsd, usize, usize, ind);
1826 		} else if ((uintptr_t)tctx > (uintptr_t)1U) {
1827 			/*
1828 			 * Note that ind might still be 0 here.  This is fine;
1829 			 * imalloc_sample ignores ind if dopts->alignment > 0.
1830 			 */
1831 			allocation = imalloc_sample(
1832 			    sopts, dopts, tsd, usize, ind);
1833 			alloc_ctx.slab = false;
1834 		} else {
1835 			allocation = NULL;
1836 		}
1837 
1838 		if (unlikely(allocation == NULL)) {
1839 			prof_alloc_rollback(tsd, tctx, true);
1840 			goto label_oom;
1841 		}
1842 		prof_malloc(tsd_tsdn(tsd), allocation, usize, &alloc_ctx, tctx);
1843 	} else {
1844 		/*
1845 		 * If dopts->alignment > 0, then ind is still 0, but usize was
1846 		 * computed in the previous if statement.  Down the positive
1847 		 * alignment path, imalloc_no_sample ignores ind and size
1848 		 * (relying only on usize).
1849 		 */
1850 		allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize,
1851 		    ind);
1852 		if (unlikely(allocation == NULL)) {
1853 			goto label_oom;
1854 		}
1855 	}
1856 
1857 	/*
1858 	 * Allocation has been done at this point.  We still have some
1859 	 * post-allocation work to do though.
1860 	 */
1861 	assert(dopts->alignment == 0
1862 	    || ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0));
1863 
1864 	if (config_stats) {
1865 		assert(usize == isalloc(tsd_tsdn(tsd), allocation));
1866 		*tsd_thread_allocatedp_get(tsd) += usize;
1867 	}
1868 
1869 	if (sopts->slow) {
1870 		UTRACE(0, size, allocation);
1871 	}
1872 
1873 	/* Success! */
1874 	check_entry_exit_locking(tsd_tsdn(tsd));
1875 	*dopts->result = allocation;
1876 	return 0;
1877 
1878 label_oom:
1879 	if (unlikely(sopts->slow) && config_xmalloc && unlikely(opt_xmalloc)) {
1880 		malloc_write(sopts->oom_string);
1881 		abort();
1882 	}
1883 
1884 	if (sopts->slow) {
1885 		UTRACE(NULL, size, NULL);
1886 	}
1887 
1888 	check_entry_exit_locking(tsd_tsdn(tsd));
1889 
1890 	if (sopts->set_errno_on_error) {
1891 		set_errno(ENOMEM);
1892 	}
1893 
1894 	if (sopts->null_out_result_on_error) {
1895 		*dopts->result = NULL;
1896 	}
1897 
1898 	return ENOMEM;
1899 
1900 	/*
1901 	 * This label is only jumped to by one goto; we move it out of line
1902 	 * anyways to avoid obscuring the non-error paths, and for symmetry with
1903 	 * the oom case.
1904 	 */
1905 label_invalid_alignment:
1906 	if (config_xmalloc && unlikely(opt_xmalloc)) {
1907 		malloc_write(sopts->invalid_alignment_string);
1908 		abort();
1909 	}
1910 
1911 	if (sopts->set_errno_on_error) {
1912 		set_errno(EINVAL);
1913 	}
1914 
1915 	if (sopts->slow) {
1916 		UTRACE(NULL, size, NULL);
1917 	}
1918 
1919 	check_entry_exit_locking(tsd_tsdn(tsd));
1920 
1921 	if (sopts->null_out_result_on_error) {
1922 		*dopts->result = NULL;
1923 	}
1924 
1925 	return EINVAL;
1926 }
1927 
1928 /* Returns the errno-style error code of the allocation. */
1929 JEMALLOC_ALWAYS_INLINE int
1930 imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) {
1931 	if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) {
1932 		if (config_xmalloc && unlikely(opt_xmalloc)) {
1933 			malloc_write(sopts->oom_string);
1934 			abort();
1935 		}
1936 		UTRACE(NULL, dopts->num_items * dopts->item_size, NULL);
1937 		set_errno(ENOMEM);
1938 		*dopts->result = NULL;
1939 
1940 		return ENOMEM;
1941 	}
1942 
1943 	/* We always need the tsd.  Let's grab it right away. */
1944 	tsd_t *tsd = tsd_fetch();
1945 	assert(tsd);
1946 	if (likely(tsd_fast(tsd))) {
1947 		/* Fast and common path. */
1948 		tsd_assert_fast(tsd);
1949 		sopts->slow = false;
1950 		return imalloc_body(sopts, dopts, tsd);
1951 	} else {
1952 		sopts->slow = true;
1953 		return imalloc_body(sopts, dopts, tsd);
1954 	}
1955 }
1956 /******************************************************************************/
1957 /*
1958  * Begin malloc(3)-compatible functions.
1959  */
1960 
1961 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1962 void JEMALLOC_NOTHROW *
1963 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
1964 je_malloc(size_t size) {
1965 	void *ret;
1966 	static_opts_t sopts;
1967 	dynamic_opts_t dopts;
1968 
1969 	static_opts_init(&sopts);
1970 	dynamic_opts_init(&dopts);
1971 
1972 	sopts.bump_empty_alloc = true;
1973 	sopts.null_out_result_on_error = true;
1974 	sopts.set_errno_on_error = true;
1975 	sopts.oom_string = "<jemalloc>: Error in malloc(): out of memory\n";
1976 
1977 	dopts.result = &ret;
1978 	dopts.num_items = 1;
1979 	dopts.item_size = size;
1980 
1981 	imalloc(&sopts, &dopts);
1982 
1983 	return ret;
1984 }
1985 
1986 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
1987 JEMALLOC_ATTR(nonnull(1))
1988 je_posix_memalign(void **memptr, size_t alignment, size_t size) {
1989 	int ret;
1990 	static_opts_t sopts;
1991 	dynamic_opts_t dopts;
1992 
1993 	static_opts_init(&sopts);
1994 	dynamic_opts_init(&dopts);
1995 
1996 	sopts.bump_empty_alloc = true;
1997 	sopts.min_alignment = sizeof(void *);
1998 	sopts.oom_string =
1999 	    "<jemalloc>: Error allocating aligned memory: out of memory\n";
2000 	sopts.invalid_alignment_string =
2001 	    "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2002 
2003 	dopts.result = memptr;
2004 	dopts.num_items = 1;
2005 	dopts.item_size = size;
2006 	dopts.alignment = alignment;
2007 
2008 	ret = imalloc(&sopts, &dopts);
2009 	return ret;
2010 }
2011 
2012 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2013 void JEMALLOC_NOTHROW *
2014 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
2015 je_aligned_alloc(size_t alignment, size_t size) {
2016 	void *ret;
2017 
2018 	static_opts_t sopts;
2019 	dynamic_opts_t dopts;
2020 
2021 	static_opts_init(&sopts);
2022 	dynamic_opts_init(&dopts);
2023 
2024 	sopts.bump_empty_alloc = true;
2025 	sopts.null_out_result_on_error = true;
2026 	sopts.set_errno_on_error = true;
2027 	sopts.min_alignment = 1;
2028 	sopts.oom_string =
2029 	    "<jemalloc>: Error allocating aligned memory: out of memory\n";
2030 	sopts.invalid_alignment_string =
2031 	    "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2032 
2033 	dopts.result = &ret;
2034 	dopts.num_items = 1;
2035 	dopts.item_size = size;
2036 	dopts.alignment = alignment;
2037 
2038 	imalloc(&sopts, &dopts);
2039 	return ret;
2040 }
2041 
2042 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2043 void JEMALLOC_NOTHROW *
2044 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
2045 je_calloc(size_t num, size_t size) {
2046 	void *ret;
2047 	static_opts_t sopts;
2048 	dynamic_opts_t dopts;
2049 
2050 	static_opts_init(&sopts);
2051 	dynamic_opts_init(&dopts);
2052 
2053 	sopts.may_overflow = true;
2054 	sopts.bump_empty_alloc = true;
2055 	sopts.null_out_result_on_error = true;
2056 	sopts.set_errno_on_error = true;
2057 	sopts.oom_string = "<jemalloc>: Error in calloc(): out of memory\n";
2058 
2059 	dopts.result = &ret;
2060 	dopts.num_items = num;
2061 	dopts.item_size = size;
2062 	dopts.zero = true;
2063 
2064 	imalloc(&sopts, &dopts);
2065 
2066 	return ret;
2067 }
2068 
2069 static void *
2070 irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
2071     prof_tctx_t *tctx) {
2072 	void *p;
2073 
2074 	if (tctx == NULL) {
2075 		return NULL;
2076 	}
2077 	if (usize <= SMALL_MAXCLASS) {
2078 		p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false);
2079 		if (p == NULL) {
2080 			return NULL;
2081 		}
2082 		arena_prof_promote(tsd_tsdn(tsd), p, usize);
2083 	} else {
2084 		p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
2085 	}
2086 
2087 	return p;
2088 }
2089 
2090 JEMALLOC_ALWAYS_INLINE void *
2091 irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
2092    alloc_ctx_t *alloc_ctx) {
2093 	void *p;
2094 	bool prof_active;
2095 	prof_tctx_t *old_tctx, *tctx;
2096 
2097 	prof_active = prof_active_get_unlocked();
2098 	old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
2099 	tctx = prof_alloc_prep(tsd, usize, prof_active, true);
2100 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2101 		p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx);
2102 	} else {
2103 		p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
2104 	}
2105 	if (unlikely(p == NULL)) {
2106 		prof_alloc_rollback(tsd, tctx, true);
2107 		return NULL;
2108 	}
2109 	prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize,
2110 	    old_tctx);
2111 
2112 	return p;
2113 }
2114 
2115 JEMALLOC_ALWAYS_INLINE void
2116 ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
2117 	if (!slow_path) {
2118 		tsd_assert_fast(tsd);
2119 	}
2120 	check_entry_exit_locking(tsd_tsdn(tsd));
2121 	if (tsd_reentrancy_level_get(tsd) != 0) {
2122 		assert(slow_path);
2123 	}
2124 
2125 	assert(ptr != NULL);
2126 	assert(malloc_initialized() || IS_INITIALIZER);
2127 
2128 	alloc_ctx_t alloc_ctx;
2129 	rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2130 	rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2131 	    (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2132 	assert(alloc_ctx.szind != NSIZES);
2133 
2134 	size_t usize;
2135 	if (config_prof && opt_prof) {
2136 		usize = sz_index2size(alloc_ctx.szind);
2137 		prof_free(tsd, ptr, usize, &alloc_ctx);
2138 	} else if (config_stats) {
2139 		usize = sz_index2size(alloc_ctx.szind);
2140 	}
2141 	if (config_stats) {
2142 		*tsd_thread_deallocatedp_get(tsd) += usize;
2143 	}
2144 
2145 	if (likely(!slow_path)) {
2146 		idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
2147 		    false);
2148 	} else {
2149 		idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
2150 		    true);
2151 	}
2152 }
2153 
2154 JEMALLOC_ALWAYS_INLINE void
2155 isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
2156 	if (!slow_path) {
2157 		tsd_assert_fast(tsd);
2158 	}
2159 	check_entry_exit_locking(tsd_tsdn(tsd));
2160 	if (tsd_reentrancy_level_get(tsd) != 0) {
2161 		assert(slow_path);
2162 	}
2163 
2164 	assert(ptr != NULL);
2165 	assert(malloc_initialized() || IS_INITIALIZER);
2166 
2167 	alloc_ctx_t alloc_ctx, *ctx;
2168 	if (config_prof && opt_prof) {
2169 		rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2170 		rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2171 		    (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2172 		assert(alloc_ctx.szind == sz_size2index(usize));
2173 		ctx = &alloc_ctx;
2174 		prof_free(tsd, ptr, usize, ctx);
2175 	} else {
2176 		ctx = NULL;
2177 	}
2178 
2179 	if (config_stats) {
2180 		*tsd_thread_deallocatedp_get(tsd) += usize;
2181 	}
2182 
2183 	if (likely(!slow_path)) {
2184 		isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, false);
2185 	} else {
2186 		isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, true);
2187 	}
2188 }
2189 
2190 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2191 void JEMALLOC_NOTHROW *
2192 JEMALLOC_ALLOC_SIZE(2)
2193 je_realloc(void *ptr, size_t size) {
2194 	void *ret;
2195 	tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL);
2196 	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
2197 	size_t old_usize = 0;
2198 
2199 	if (unlikely(size == 0)) {
2200 		if (ptr != NULL) {
2201 			/* realloc(ptr, 0) is equivalent to free(ptr). */
2202 			UTRACE(ptr, 0, 0);
2203 			tcache_t *tcache;
2204 			tsd_t *tsd = tsd_fetch();
2205 			if (tsd_reentrancy_level_get(tsd) == 0) {
2206 				tcache = tcache_get(tsd);
2207 			} else {
2208 				tcache = NULL;
2209 			}
2210 			ifree(tsd, ptr, tcache, true);
2211 			return NULL;
2212 		}
2213 		size = 1;
2214 	}
2215 
2216 	if (likely(ptr != NULL)) {
2217 		assert(malloc_initialized() || IS_INITIALIZER);
2218 		tsd_t *tsd = tsd_fetch();
2219 
2220 		check_entry_exit_locking(tsd_tsdn(tsd));
2221 
2222 		alloc_ctx_t alloc_ctx;
2223 		rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2224 		rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2225 		    (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2226 		assert(alloc_ctx.szind != NSIZES);
2227 		old_usize = sz_index2size(alloc_ctx.szind);
2228 		assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
2229 		if (config_prof && opt_prof) {
2230 			usize = sz_s2u(size);
2231 			ret = unlikely(usize == 0 || usize > LARGE_MAXCLASS) ?
2232 			    NULL : irealloc_prof(tsd, ptr, old_usize, usize,
2233 			    &alloc_ctx);
2234 		} else {
2235 			if (config_stats) {
2236 				usize = sz_s2u(size);
2237 			}
2238 			ret = iralloc(tsd, ptr, old_usize, size, 0, false);
2239 		}
2240 		tsdn = tsd_tsdn(tsd);
2241 	} else {
2242 		/* realloc(NULL, size) is equivalent to malloc(size). */
2243 		return je_malloc(size);
2244 	}
2245 
2246 	if (unlikely(ret == NULL)) {
2247 		if (config_xmalloc && unlikely(opt_xmalloc)) {
2248 			malloc_write("<jemalloc>: Error in realloc(): "
2249 			    "out of memory\n");
2250 			abort();
2251 		}
2252 		set_errno(ENOMEM);
2253 	}
2254 	if (config_stats && likely(ret != NULL)) {
2255 		tsd_t *tsd;
2256 
2257 		assert(usize == isalloc(tsdn, ret));
2258 		tsd = tsdn_tsd(tsdn);
2259 		*tsd_thread_allocatedp_get(tsd) += usize;
2260 		*tsd_thread_deallocatedp_get(tsd) += old_usize;
2261 	}
2262 	UTRACE(ptr, size, ret);
2263 	check_entry_exit_locking(tsdn);
2264 	return ret;
2265 }
2266 
2267 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2268 je_free(void *ptr) {
2269 	UTRACE(ptr, 0, 0);
2270 	if (likely(ptr != NULL)) {
2271 		/*
2272 		 * We avoid setting up tsd fully (e.g. tcache, arena binding)
2273 		 * based on only free() calls -- other activities trigger the
2274 		 * minimal to full transition.  This is because free() may
2275 		 * happen during thread shutdown after tls deallocation: if a
2276 		 * thread never had any malloc activities until then, a
2277 		 * fully-setup tsd won't be destructed properly.
2278 		 */
2279 		tsd_t *tsd = tsd_fetch_min();
2280 		check_entry_exit_locking(tsd_tsdn(tsd));
2281 
2282 		tcache_t *tcache;
2283 		if (likely(tsd_fast(tsd))) {
2284 			tsd_assert_fast(tsd);
2285 			/* Unconditionally get tcache ptr on fast path. */
2286 			tcache = tsd_tcachep_get(tsd);
2287 			ifree(tsd, ptr, tcache, false);
2288 		} else {
2289 			if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
2290 				tcache = tcache_get(tsd);
2291 			} else {
2292 				tcache = NULL;
2293 			}
2294 			ifree(tsd, ptr, tcache, true);
2295 		}
2296 		check_entry_exit_locking(tsd_tsdn(tsd));
2297 	}
2298 }
2299 
2300 /*
2301  * End malloc(3)-compatible functions.
2302  */
2303 /******************************************************************************/
2304 /*
2305  * Begin non-standard override functions.
2306  */
2307 
2308 #ifdef JEMALLOC_OVERRIDE_MEMALIGN
2309 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2310 void JEMALLOC_NOTHROW *
2311 JEMALLOC_ATTR(malloc)
2312 je_memalign(size_t alignment, size_t size) {
2313 	void *ret;
2314 	static_opts_t sopts;
2315 	dynamic_opts_t dopts;
2316 
2317 	static_opts_init(&sopts);
2318 	dynamic_opts_init(&dopts);
2319 
2320 	sopts.bump_empty_alloc = true;
2321 	sopts.min_alignment = 1;
2322 	sopts.oom_string =
2323 	    "<jemalloc>: Error allocating aligned memory: out of memory\n";
2324 	sopts.invalid_alignment_string =
2325 	    "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2326 	sopts.null_out_result_on_error = true;
2327 
2328 	dopts.result = &ret;
2329 	dopts.num_items = 1;
2330 	dopts.item_size = size;
2331 	dopts.alignment = alignment;
2332 
2333 	imalloc(&sopts, &dopts);
2334 	return ret;
2335 }
2336 #endif
2337 
2338 #ifdef JEMALLOC_OVERRIDE_VALLOC
2339 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2340 void JEMALLOC_NOTHROW *
2341 JEMALLOC_ATTR(malloc)
2342 je_valloc(size_t size) {
2343 	void *ret;
2344 
2345 	static_opts_t sopts;
2346 	dynamic_opts_t dopts;
2347 
2348 	static_opts_init(&sopts);
2349 	dynamic_opts_init(&dopts);
2350 
2351 	sopts.bump_empty_alloc = true;
2352 	sopts.null_out_result_on_error = true;
2353 	sopts.min_alignment = PAGE;
2354 	sopts.oom_string =
2355 	    "<jemalloc>: Error allocating aligned memory: out of memory\n";
2356 	sopts.invalid_alignment_string =
2357 	    "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2358 
2359 	dopts.result = &ret;
2360 	dopts.num_items = 1;
2361 	dopts.item_size = size;
2362 	dopts.alignment = PAGE;
2363 
2364 	imalloc(&sopts, &dopts);
2365 
2366 	return ret;
2367 }
2368 #endif
2369 
2370 #if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK)
2371 /*
2372  * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
2373  * to inconsistently reference libc's malloc(3)-compatible functions
2374  * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
2375  *
2376  * These definitions interpose hooks in glibc.  The functions are actually
2377  * passed an extra argument for the caller return address, which will be
2378  * ignored.
2379  */
2380 JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
2381 JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
2382 JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
2383 #  ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
2384 JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
2385     je_memalign;
2386 #  endif
2387 
2388 #  ifdef CPU_COUNT
2389 /*
2390  * To enable static linking with glibc, the libc specific malloc interface must
2391  * be implemented also, so none of glibc's malloc.o functions are added to the
2392  * link.
2393  */
2394 #    define ALIAS(je_fn)	__attribute__((alias (#je_fn), used))
2395 /* To force macro expansion of je_ prefix before stringification. */
2396 #    define PREALIAS(je_fn)	ALIAS(je_fn)
2397 #    ifdef JEMALLOC_OVERRIDE___LIBC_CALLOC
2398 void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc);
2399 #    endif
2400 #    ifdef JEMALLOC_OVERRIDE___LIBC_FREE
2401 void __libc_free(void* ptr) PREALIAS(je_free);
2402 #    endif
2403 #    ifdef JEMALLOC_OVERRIDE___LIBC_MALLOC
2404 void *__libc_malloc(size_t size) PREALIAS(je_malloc);
2405 #    endif
2406 #    ifdef JEMALLOC_OVERRIDE___LIBC_MEMALIGN
2407 void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign);
2408 #    endif
2409 #    ifdef JEMALLOC_OVERRIDE___LIBC_REALLOC
2410 void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc);
2411 #    endif
2412 #    ifdef JEMALLOC_OVERRIDE___LIBC_VALLOC
2413 void *__libc_valloc(size_t size) PREALIAS(je_valloc);
2414 #    endif
2415 #    ifdef JEMALLOC_OVERRIDE___POSIX_MEMALIGN
2416 int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign);
2417 #    endif
2418 #    undef PREALIAS
2419 #    undef ALIAS
2420 #  endif
2421 #endif
2422 
2423 /*
2424  * End non-standard override functions.
2425  */
2426 /******************************************************************************/
2427 /*
2428  * Begin non-standard functions.
2429  */
2430 
2431 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2432 void JEMALLOC_NOTHROW *
2433 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
2434 je_mallocx(size_t size, int flags) {
2435 	void *ret;
2436 	static_opts_t sopts;
2437 	dynamic_opts_t dopts;
2438 
2439 	static_opts_init(&sopts);
2440 	dynamic_opts_init(&dopts);
2441 
2442 	sopts.assert_nonempty_alloc = true;
2443 	sopts.null_out_result_on_error = true;
2444 	sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n";
2445 
2446 	dopts.result = &ret;
2447 	dopts.num_items = 1;
2448 	dopts.item_size = size;
2449 	if (unlikely(flags != 0)) {
2450 		if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) {
2451 			dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
2452 		}
2453 
2454 		dopts.zero = MALLOCX_ZERO_GET(flags);
2455 
2456 		if ((flags & MALLOCX_TCACHE_MASK) != 0) {
2457 			if ((flags & MALLOCX_TCACHE_MASK)
2458 			    == MALLOCX_TCACHE_NONE) {
2459 				dopts.tcache_ind = TCACHE_IND_NONE;
2460 			} else {
2461 				dopts.tcache_ind = MALLOCX_TCACHE_GET(flags);
2462 			}
2463 		} else {
2464 			dopts.tcache_ind = TCACHE_IND_AUTOMATIC;
2465 		}
2466 
2467 		if ((flags & MALLOCX_ARENA_MASK) != 0)
2468 			dopts.arena_ind = MALLOCX_ARENA_GET(flags);
2469 	}
2470 
2471 	imalloc(&sopts, &dopts);
2472 	return ret;
2473 }
2474 
2475 static void *
2476 irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize,
2477     size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
2478     prof_tctx_t *tctx) {
2479 	void *p;
2480 
2481 	if (tctx == NULL) {
2482 		return NULL;
2483 	}
2484 	if (usize <= SMALL_MAXCLASS) {
2485 		p = iralloct(tsdn, old_ptr, old_usize, LARGE_MINCLASS,
2486 		    alignment, zero, tcache, arena);
2487 		if (p == NULL) {
2488 			return NULL;
2489 		}
2490 		arena_prof_promote(tsdn, p, usize);
2491 	} else {
2492 		p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero,
2493 		    tcache, arena);
2494 	}
2495 
2496 	return p;
2497 }
2498 
2499 JEMALLOC_ALWAYS_INLINE void *
2500 irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
2501     size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
2502     arena_t *arena, alloc_ctx_t *alloc_ctx) {
2503 	void *p;
2504 	bool prof_active;
2505 	prof_tctx_t *old_tctx, *tctx;
2506 
2507 	prof_active = prof_active_get_unlocked();
2508 	old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
2509 	tctx = prof_alloc_prep(tsd, *usize, prof_active, false);
2510 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2511 		p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize,
2512 		    *usize, alignment, zero, tcache, arena, tctx);
2513 	} else {
2514 		p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment,
2515 		    zero, tcache, arena);
2516 	}
2517 	if (unlikely(p == NULL)) {
2518 		prof_alloc_rollback(tsd, tctx, false);
2519 		return NULL;
2520 	}
2521 
2522 	if (p == old_ptr && alignment != 0) {
2523 		/*
2524 		 * The allocation did not move, so it is possible that the size
2525 		 * class is smaller than would guarantee the requested
2526 		 * alignment, and that the alignment constraint was
2527 		 * serendipitously satisfied.  Additionally, old_usize may not
2528 		 * be the same as the current usize because of in-place large
2529 		 * reallocation.  Therefore, query the actual value of usize.
2530 		 */
2531 		*usize = isalloc(tsd_tsdn(tsd), p);
2532 	}
2533 	prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr,
2534 	    old_usize, old_tctx);
2535 
2536 	return p;
2537 }
2538 
2539 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2540 void JEMALLOC_NOTHROW *
2541 JEMALLOC_ALLOC_SIZE(2)
2542 je_rallocx(void *ptr, size_t size, int flags) {
2543 	void *p;
2544 	tsd_t *tsd;
2545 	size_t usize;
2546 	size_t old_usize;
2547 	size_t alignment = MALLOCX_ALIGN_GET(flags);
2548 	bool zero = flags & MALLOCX_ZERO;
2549 	arena_t *arena;
2550 	tcache_t *tcache;
2551 
2552 	assert(ptr != NULL);
2553 	assert(size != 0);
2554 	assert(malloc_initialized() || IS_INITIALIZER);
2555 	tsd = tsd_fetch();
2556 	check_entry_exit_locking(tsd_tsdn(tsd));
2557 
2558 	if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
2559 		unsigned arena_ind = MALLOCX_ARENA_GET(flags);
2560 		arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
2561 		if (unlikely(arena == NULL)) {
2562 			goto label_oom;
2563 		}
2564 	} else {
2565 		arena = NULL;
2566 	}
2567 
2568 	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2569 		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
2570 			tcache = NULL;
2571 		} else {
2572 			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2573 		}
2574 	} else {
2575 		tcache = tcache_get(tsd);
2576 	}
2577 
2578 	alloc_ctx_t alloc_ctx;
2579 	rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2580 	rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2581 	    (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2582 	assert(alloc_ctx.szind != NSIZES);
2583 	old_usize = sz_index2size(alloc_ctx.szind);
2584 	assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
2585 	if (config_prof && opt_prof) {
2586 		usize = (alignment == 0) ?
2587 		    sz_s2u(size) : sz_sa2u(size, alignment);
2588 		if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
2589 			goto label_oom;
2590 		}
2591 		p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
2592 		    zero, tcache, arena, &alloc_ctx);
2593 		if (unlikely(p == NULL)) {
2594 			goto label_oom;
2595 		}
2596 	} else {
2597 		p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment,
2598 		    zero, tcache, arena);
2599 		if (unlikely(p == NULL)) {
2600 			goto label_oom;
2601 		}
2602 		if (config_stats) {
2603 			usize = isalloc(tsd_tsdn(tsd), p);
2604 		}
2605 	}
2606 	assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2607 
2608 	if (config_stats) {
2609 		*tsd_thread_allocatedp_get(tsd) += usize;
2610 		*tsd_thread_deallocatedp_get(tsd) += old_usize;
2611 	}
2612 	UTRACE(ptr, size, p);
2613 	check_entry_exit_locking(tsd_tsdn(tsd));
2614 	return p;
2615 label_oom:
2616 	if (config_xmalloc && unlikely(opt_xmalloc)) {
2617 		malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
2618 		abort();
2619 	}
2620 	UTRACE(ptr, size, 0);
2621 	check_entry_exit_locking(tsd_tsdn(tsd));
2622 	return NULL;
2623 }
2624 
2625 JEMALLOC_ALWAYS_INLINE size_t
2626 ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
2627     size_t extra, size_t alignment, bool zero) {
2628 	size_t usize;
2629 
2630 	if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) {
2631 		return old_usize;
2632 	}
2633 	usize = isalloc(tsdn, ptr);
2634 
2635 	return usize;
2636 }
2637 
2638 static size_t
2639 ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
2640     size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) {
2641 	size_t usize;
2642 
2643 	if (tctx == NULL) {
2644 		return old_usize;
2645 	}
2646 	usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment,
2647 	    zero);
2648 
2649 	return usize;
2650 }
2651 
2652 JEMALLOC_ALWAYS_INLINE size_t
2653 ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
2654     size_t extra, size_t alignment, bool zero, alloc_ctx_t *alloc_ctx) {
2655 	size_t usize_max, usize;
2656 	bool prof_active;
2657 	prof_tctx_t *old_tctx, *tctx;
2658 
2659 	prof_active = prof_active_get_unlocked();
2660 	old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx);
2661 	/*
2662 	 * usize isn't knowable before ixalloc() returns when extra is non-zero.
2663 	 * Therefore, compute its maximum possible value and use that in
2664 	 * prof_alloc_prep() to decide whether to capture a backtrace.
2665 	 * prof_realloc() will use the actual usize to decide whether to sample.
2666 	 */
2667 	if (alignment == 0) {
2668 		usize_max = sz_s2u(size+extra);
2669 		assert(usize_max > 0 && usize_max <= LARGE_MAXCLASS);
2670 	} else {
2671 		usize_max = sz_sa2u(size+extra, alignment);
2672 		if (unlikely(usize_max == 0 || usize_max > LARGE_MAXCLASS)) {
2673 			/*
2674 			 * usize_max is out of range, and chances are that
2675 			 * allocation will fail, but use the maximum possible
2676 			 * value and carry on with prof_alloc_prep(), just in
2677 			 * case allocation succeeds.
2678 			 */
2679 			usize_max = LARGE_MAXCLASS;
2680 		}
2681 	}
2682 	tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
2683 
2684 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2685 		usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize,
2686 		    size, extra, alignment, zero, tctx);
2687 	} else {
2688 		usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
2689 		    extra, alignment, zero);
2690 	}
2691 	if (usize == old_usize) {
2692 		prof_alloc_rollback(tsd, tctx, false);
2693 		return usize;
2694 	}
2695 	prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize,
2696 	    old_tctx);
2697 
2698 	return usize;
2699 }
2700 
2701 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2702 je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
2703 	tsd_t *tsd;
2704 	size_t usize, old_usize;
2705 	size_t alignment = MALLOCX_ALIGN_GET(flags);
2706 	bool zero = flags & MALLOCX_ZERO;
2707 
2708 	assert(ptr != NULL);
2709 	assert(size != 0);
2710 	assert(SIZE_T_MAX - size >= extra);
2711 	assert(malloc_initialized() || IS_INITIALIZER);
2712 	tsd = tsd_fetch();
2713 	check_entry_exit_locking(tsd_tsdn(tsd));
2714 
2715 	alloc_ctx_t alloc_ctx;
2716 	rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2717 	rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2718 	    (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2719 	assert(alloc_ctx.szind != NSIZES);
2720 	old_usize = sz_index2size(alloc_ctx.szind);
2721 	assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
2722 	/*
2723 	 * The API explicitly absolves itself of protecting against (size +
2724 	 * extra) numerical overflow, but we may need to clamp extra to avoid
2725 	 * exceeding LARGE_MAXCLASS.
2726 	 *
2727 	 * Ordinarily, size limit checking is handled deeper down, but here we
2728 	 * have to check as part of (size + extra) clamping, since we need the
2729 	 * clamped value in the above helper functions.
2730 	 */
2731 	if (unlikely(size > LARGE_MAXCLASS)) {
2732 		usize = old_usize;
2733 		goto label_not_resized;
2734 	}
2735 	if (unlikely(LARGE_MAXCLASS - size < extra)) {
2736 		extra = LARGE_MAXCLASS - size;
2737 	}
2738 
2739 	if (config_prof && opt_prof) {
2740 		usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
2741 		    alignment, zero, &alloc_ctx);
2742 	} else {
2743 		usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
2744 		    extra, alignment, zero);
2745 	}
2746 	if (unlikely(usize == old_usize)) {
2747 		goto label_not_resized;
2748 	}
2749 
2750 	if (config_stats) {
2751 		*tsd_thread_allocatedp_get(tsd) += usize;
2752 		*tsd_thread_deallocatedp_get(tsd) += old_usize;
2753 	}
2754 label_not_resized:
2755 	UTRACE(ptr, size, ptr);
2756 	check_entry_exit_locking(tsd_tsdn(tsd));
2757 	return usize;
2758 }
2759 
2760 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2761 JEMALLOC_ATTR(pure)
2762 je_sallocx(const void *ptr, int flags) {
2763 	size_t usize;
2764 	tsdn_t *tsdn;
2765 
2766 	assert(malloc_initialized() || IS_INITIALIZER);
2767 	assert(ptr != NULL);
2768 
2769 	tsdn = tsdn_fetch();
2770 	check_entry_exit_locking(tsdn);
2771 
2772 	if (config_debug || force_ivsalloc) {
2773 		usize = ivsalloc(tsdn, ptr);
2774 		assert(force_ivsalloc || usize != 0);
2775 	} else {
2776 		usize = isalloc(tsdn, ptr);
2777 	}
2778 
2779 	check_entry_exit_locking(tsdn);
2780 	return usize;
2781 }
2782 
2783 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2784 je_dallocx(void *ptr, int flags) {
2785 	assert(ptr != NULL);
2786 	assert(malloc_initialized() || IS_INITIALIZER);
2787 
2788 	tsd_t *tsd = tsd_fetch();
2789 	bool fast = tsd_fast(tsd);
2790 	check_entry_exit_locking(tsd_tsdn(tsd));
2791 
2792 	tcache_t *tcache;
2793 	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2794 		/* Not allowed to be reentrant and specify a custom tcache. */
2795 		assert(tsd_reentrancy_level_get(tsd) == 0);
2796 		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
2797 			tcache = NULL;
2798 		} else {
2799 			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2800 		}
2801 	} else {
2802 		if (likely(fast)) {
2803 			tcache = tsd_tcachep_get(tsd);
2804 			assert(tcache == tcache_get(tsd));
2805 		} else {
2806 			if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
2807 				tcache = tcache_get(tsd);
2808 			}  else {
2809 				tcache = NULL;
2810 			}
2811 		}
2812 	}
2813 
2814 	UTRACE(ptr, 0, 0);
2815 	if (likely(fast)) {
2816 		tsd_assert_fast(tsd);
2817 		ifree(tsd, ptr, tcache, false);
2818 	} else {
2819 		ifree(tsd, ptr, tcache, true);
2820 	}
2821 	check_entry_exit_locking(tsd_tsdn(tsd));
2822 }
2823 
2824 JEMALLOC_ALWAYS_INLINE size_t
2825 inallocx(tsdn_t *tsdn, size_t size, int flags) {
2826 	check_entry_exit_locking(tsdn);
2827 
2828 	size_t usize;
2829 	if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) {
2830 		usize = sz_s2u(size);
2831 	} else {
2832 		usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
2833 	}
2834 	check_entry_exit_locking(tsdn);
2835 	return usize;
2836 }
2837 
2838 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2839 je_sdallocx(void *ptr, size_t size, int flags) {
2840 	assert(ptr != NULL);
2841 	assert(malloc_initialized() || IS_INITIALIZER);
2842 
2843 	tsd_t *tsd = tsd_fetch();
2844 	bool fast = tsd_fast(tsd);
2845 	size_t usize = inallocx(tsd_tsdn(tsd), size, flags);
2846 	assert(usize == isalloc(tsd_tsdn(tsd), ptr));
2847 	check_entry_exit_locking(tsd_tsdn(tsd));
2848 
2849 	tcache_t *tcache;
2850 	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2851 		/* Not allowed to be reentrant and specify a custom tcache. */
2852 		assert(tsd_reentrancy_level_get(tsd) == 0);
2853 		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
2854 			tcache = NULL;
2855 		} else {
2856 			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2857 		}
2858 	} else {
2859 		if (likely(fast)) {
2860 			tcache = tsd_tcachep_get(tsd);
2861 			assert(tcache == tcache_get(tsd));
2862 		} else {
2863 			if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
2864 				tcache = tcache_get(tsd);
2865 			} else {
2866 				tcache = NULL;
2867 			}
2868 		}
2869 	}
2870 
2871 	UTRACE(ptr, 0, 0);
2872 	if (likely(fast)) {
2873 		tsd_assert_fast(tsd);
2874 		isfree(tsd, ptr, usize, tcache, false);
2875 	} else {
2876 		isfree(tsd, ptr, usize, tcache, true);
2877 	}
2878 	check_entry_exit_locking(tsd_tsdn(tsd));
2879 }
2880 
2881 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2882 JEMALLOC_ATTR(pure)
2883 je_nallocx(size_t size, int flags) {
2884 	size_t usize;
2885 	tsdn_t *tsdn;
2886 
2887 	assert(size != 0);
2888 
2889 	if (unlikely(malloc_init())) {
2890 		return 0;
2891 	}
2892 
2893 	tsdn = tsdn_fetch();
2894 	check_entry_exit_locking(tsdn);
2895 
2896 	usize = inallocx(tsdn, size, flags);
2897 	if (unlikely(usize > LARGE_MAXCLASS)) {
2898 		return 0;
2899 	}
2900 
2901 	check_entry_exit_locking(tsdn);
2902 	return usize;
2903 }
2904 
2905 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2906 je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
2907     size_t newlen) {
2908 	int ret;
2909 	tsd_t *tsd;
2910 
2911 	if (unlikely(malloc_init())) {
2912 		return EAGAIN;
2913 	}
2914 
2915 	tsd = tsd_fetch();
2916 	check_entry_exit_locking(tsd_tsdn(tsd));
2917 	ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
2918 	check_entry_exit_locking(tsd_tsdn(tsd));
2919 	return ret;
2920 }
2921 
2922 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2923 je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) {
2924 	int ret;
2925 
2926 	if (unlikely(malloc_init())) {
2927 		return EAGAIN;
2928 	}
2929 
2930 	tsd_t *tsd = tsd_fetch();
2931 	check_entry_exit_locking(tsd_tsdn(tsd));
2932 	ret = ctl_nametomib(tsd, name, mibp, miblenp);
2933 	check_entry_exit_locking(tsd_tsdn(tsd));
2934 	return ret;
2935 }
2936 
2937 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2938 je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
2939   void *newp, size_t newlen) {
2940 	int ret;
2941 	tsd_t *tsd;
2942 
2943 	if (unlikely(malloc_init())) {
2944 		return EAGAIN;
2945 	}
2946 
2947 	tsd = tsd_fetch();
2948 	check_entry_exit_locking(tsd_tsdn(tsd));
2949 	ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
2950 	check_entry_exit_locking(tsd_tsdn(tsd));
2951 	return ret;
2952 }
2953 
2954 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2955 je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
2956     const char *opts) {
2957 	tsdn_t *tsdn;
2958 
2959 	tsdn = tsdn_fetch();
2960 	check_entry_exit_locking(tsdn);
2961 	stats_print(write_cb, cbopaque, opts);
2962 	check_entry_exit_locking(tsdn);
2963 }
2964 
2965 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2966 je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
2967 	size_t ret;
2968 	tsdn_t *tsdn;
2969 
2970 	assert(malloc_initialized() || IS_INITIALIZER);
2971 
2972 	tsdn = tsdn_fetch();
2973 	check_entry_exit_locking(tsdn);
2974 
2975 	if (unlikely(ptr == NULL)) {
2976 		ret = 0;
2977 	} else {
2978 		if (config_debug || force_ivsalloc) {
2979 			ret = ivsalloc(tsdn, ptr);
2980 			assert(force_ivsalloc || ret != 0);
2981 		} else {
2982 			ret = isalloc(tsdn, ptr);
2983 		}
2984 	}
2985 
2986 	check_entry_exit_locking(tsdn);
2987 	return ret;
2988 }
2989 
2990 /*
2991  * End non-standard functions.
2992  */
2993 /******************************************************************************/
2994 /*
2995  * Begin compatibility functions.
2996  */
2997 
2998 #define	ALLOCM_LG_ALIGN(la)	(la)
2999 #define	ALLOCM_ALIGN(a)		(ffsl(a)-1)
3000 #define	ALLOCM_ZERO		((int)0x40)
3001 #define	ALLOCM_NO_MOVE		((int)0x80)
3002 
3003 #define	ALLOCM_SUCCESS		0
3004 #define	ALLOCM_ERR_OOM		1
3005 #define	ALLOCM_ERR_NOT_MOVED	2
3006 
3007 int
3008 je_allocm(void **ptr, size_t *rsize, size_t size, int flags) {
3009 	assert(ptr != NULL);
3010 
3011 	void *p = je_mallocx(size, flags);
3012 	if (p == NULL) {
3013 		return (ALLOCM_ERR_OOM);
3014 	}
3015 	if (rsize != NULL) {
3016 		*rsize = isalloc(tsdn_fetch(), p);
3017 	}
3018 	*ptr = p;
3019 	return ALLOCM_SUCCESS;
3020 }
3021 
3022 int
3023 je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) {
3024 	assert(ptr != NULL);
3025 	assert(*ptr != NULL);
3026 	assert(size != 0);
3027 	assert(SIZE_T_MAX - size >= extra);
3028 
3029 	int ret;
3030 	bool no_move = flags & ALLOCM_NO_MOVE;
3031 
3032 	if (no_move) {
3033 		size_t usize = je_xallocx(*ptr, size, extra, flags);
3034 		ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED;
3035 		if (rsize != NULL) {
3036 			*rsize = usize;
3037 		}
3038 	} else {
3039 		void *p = je_rallocx(*ptr, size+extra, flags);
3040 		if (p != NULL) {
3041 			*ptr = p;
3042 			ret = ALLOCM_SUCCESS;
3043 		} else {
3044 			ret = ALLOCM_ERR_OOM;
3045 		}
3046 		if (rsize != NULL) {
3047 			*rsize = isalloc(tsdn_fetch(), *ptr);
3048 		}
3049 	}
3050 	return ret;
3051 }
3052 
3053 int
3054 je_sallocm(const void *ptr, size_t *rsize, int flags) {
3055 	assert(rsize != NULL);
3056 	*rsize = je_sallocx(ptr, flags);
3057 	return ALLOCM_SUCCESS;
3058 }
3059 
3060 int
3061 je_dallocm(void *ptr, int flags) {
3062 	je_dallocx(ptr, flags);
3063 	return ALLOCM_SUCCESS;
3064 }
3065 
3066 int
3067 je_nallocm(size_t *rsize, size_t size, int flags) {
3068 	size_t usize = je_nallocx(size, flags);
3069 	if (usize == 0) {
3070 		return ALLOCM_ERR_OOM;
3071 	}
3072 	if (rsize != NULL) {
3073 		*rsize = usize;
3074 	}
3075 	return ALLOCM_SUCCESS;
3076 }
3077 
3078 #undef ALLOCM_LG_ALIGN
3079 #undef ALLOCM_ALIGN
3080 #undef ALLOCM_ZERO
3081 #undef ALLOCM_NO_MOVE
3082 
3083 #undef ALLOCM_SUCCESS
3084 #undef ALLOCM_ERR_OOM
3085 #undef ALLOCM_ERR_NOT_MOVED
3086 
3087 /*
3088  * End compatibility functions.
3089  */
3090 /******************************************************************************/
3091 /*
3092  * The following functions are used by threading libraries for protection of
3093  * malloc during fork().
3094  */
3095 
3096 /*
3097  * If an application creates a thread before doing any allocation in the main
3098  * thread, then calls fork(2) in the main thread followed by memory allocation
3099  * in the child process, a race can occur that results in deadlock within the
3100  * child: the main thread may have forked while the created thread had
3101  * partially initialized the allocator.  Ordinarily jemalloc prevents
3102  * fork/malloc races via the following functions it registers during
3103  * initialization using pthread_atfork(), but of course that does no good if
3104  * the allocator isn't fully initialized at fork time.  The following library
3105  * constructor is a partial solution to this problem.  It may still be possible
3106  * to trigger the deadlock described above, but doing so would involve forking
3107  * via a library constructor that runs before jemalloc's runs.
3108  */
3109 #ifndef JEMALLOC_JET
3110 JEMALLOC_ATTR(constructor)
3111 static void
3112 jemalloc_constructor(void) {
3113 	malloc_init();
3114 }
3115 #endif
3116 
3117 #ifndef JEMALLOC_MUTEX_INIT_CB
3118 void
3119 jemalloc_prefork(void)
3120 #else
3121 JEMALLOC_EXPORT void
3122 _malloc_prefork(void)
3123 #endif
3124 {
3125 	tsd_t *tsd;
3126 	unsigned i, j, narenas;
3127 	arena_t *arena;
3128 
3129 #ifdef JEMALLOC_MUTEX_INIT_CB
3130 	if (!malloc_initialized()) {
3131 		return;
3132 	}
3133 #endif
3134 	assert(malloc_initialized());
3135 
3136 	tsd = tsd_fetch();
3137 
3138 	narenas = narenas_total_get();
3139 
3140 	witness_prefork(tsd_witness_tsdp_get(tsd));
3141 	/* Acquire all mutexes in a safe order. */
3142 	ctl_prefork(tsd_tsdn(tsd));
3143 	tcache_prefork(tsd_tsdn(tsd));
3144 	malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock);
3145 	if (have_background_thread) {
3146 		background_thread_prefork0(tsd_tsdn(tsd));
3147 	}
3148 	prof_prefork0(tsd_tsdn(tsd));
3149 	if (have_background_thread) {
3150 		background_thread_prefork1(tsd_tsdn(tsd));
3151 	}
3152 	/* Break arena prefork into stages to preserve lock order. */
3153 	for (i = 0; i < 8; i++) {
3154 		for (j = 0; j < narenas; j++) {
3155 			if ((arena = arena_get(tsd_tsdn(tsd), j, false)) !=
3156 			    NULL) {
3157 				switch (i) {
3158 				case 0:
3159 					arena_prefork0(tsd_tsdn(tsd), arena);
3160 					break;
3161 				case 1:
3162 					arena_prefork1(tsd_tsdn(tsd), arena);
3163 					break;
3164 				case 2:
3165 					arena_prefork2(tsd_tsdn(tsd), arena);
3166 					break;
3167 				case 3:
3168 					arena_prefork3(tsd_tsdn(tsd), arena);
3169 					break;
3170 				case 4:
3171 					arena_prefork4(tsd_tsdn(tsd), arena);
3172 					break;
3173 				case 5:
3174 					arena_prefork5(tsd_tsdn(tsd), arena);
3175 					break;
3176 				case 6:
3177 					arena_prefork6(tsd_tsdn(tsd), arena);
3178 					break;
3179 				case 7:
3180 					arena_prefork7(tsd_tsdn(tsd), arena);
3181 					break;
3182 				default: not_reached();
3183 				}
3184 			}
3185 		}
3186 	}
3187 	prof_prefork1(tsd_tsdn(tsd));
3188 }
3189 
3190 #ifndef JEMALLOC_MUTEX_INIT_CB
3191 void
3192 jemalloc_postfork_parent(void)
3193 #else
3194 JEMALLOC_EXPORT void
3195 _malloc_postfork(void)
3196 #endif
3197 {
3198 	tsd_t *tsd;
3199 	unsigned i, narenas;
3200 
3201 #ifdef JEMALLOC_MUTEX_INIT_CB
3202 	if (!malloc_initialized()) {
3203 		return;
3204 	}
3205 #endif
3206 	assert(malloc_initialized());
3207 
3208 	tsd = tsd_fetch();
3209 
3210 	witness_postfork_parent(tsd_witness_tsdp_get(tsd));
3211 	/* Release all mutexes, now that fork() has completed. */
3212 	for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
3213 		arena_t *arena;
3214 
3215 		if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
3216 			arena_postfork_parent(tsd_tsdn(tsd), arena);
3217 		}
3218 	}
3219 	prof_postfork_parent(tsd_tsdn(tsd));
3220 	if (have_background_thread) {
3221 		background_thread_postfork_parent(tsd_tsdn(tsd));
3222 	}
3223 	malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock);
3224 	tcache_postfork_parent(tsd_tsdn(tsd));
3225 	ctl_postfork_parent(tsd_tsdn(tsd));
3226 }
3227 
3228 void
3229 jemalloc_postfork_child(void) {
3230 	tsd_t *tsd;
3231 	unsigned i, narenas;
3232 
3233 	assert(malloc_initialized());
3234 
3235 	tsd = tsd_fetch();
3236 
3237 	witness_postfork_child(tsd_witness_tsdp_get(tsd));
3238 	/* Release all mutexes, now that fork() has completed. */
3239 	for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
3240 		arena_t *arena;
3241 
3242 		if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
3243 			arena_postfork_child(tsd_tsdn(tsd), arena);
3244 		}
3245 	}
3246 	prof_postfork_child(tsd_tsdn(tsd));
3247 	if (have_background_thread) {
3248 		background_thread_postfork_child(tsd_tsdn(tsd));
3249 	}
3250 	malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock);
3251 	tcache_postfork_child(tsd_tsdn(tsd));
3252 	ctl_postfork_child(tsd_tsdn(tsd));
3253 }
3254 
3255 void
3256 _malloc_first_thread(void)
3257 {
3258 
3259 	(void)malloc_mutex_first_thread();
3260 }
3261 
3262 /******************************************************************************/
3263