xref: /freebsd/contrib/jemalloc/src/jemalloc.c (revision 9fc5c47fa5c7fa58d61245f0408611943e613164)
1 #define	JEMALLOC_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 
4 /******************************************************************************/
5 /* Data. */
6 
7 /* Work around <http://llvm.org/bugs/show_bug.cgi?id=12623>: */
8 const char	*__malloc_options_1_0 = NULL;
9 __sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0);
10 
11 /* Runtime configuration options. */
12 const char	*je_malloc_conf JEMALLOC_ATTR(weak);
13 bool	opt_abort =
14 #ifdef JEMALLOC_DEBUG
15     true
16 #else
17     false
18 #endif
19     ;
20 const char	*opt_junk =
21 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
22     "true"
23 #else
24     "false"
25 #endif
26     ;
27 bool	opt_junk_alloc =
28 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
29     true
30 #else
31     false
32 #endif
33     ;
34 bool	opt_junk_free =
35 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
36     true
37 #else
38     false
39 #endif
40     ;
41 
42 size_t	opt_quarantine = ZU(0);
43 bool	opt_redzone = false;
44 bool	opt_utrace = false;
45 bool	opt_xmalloc = false;
46 bool	opt_zero = false;
47 size_t	opt_narenas = 0;
48 
49 /* Initialized to true if the process is running inside Valgrind. */
50 bool	in_valgrind;
51 
52 unsigned	ncpus;
53 
54 /* Protects arenas initialization (arenas, narenas_total). */
55 static malloc_mutex_t	arenas_lock;
56 /*
57  * Arenas that are used to service external requests.  Not all elements of the
58  * arenas array are necessarily used; arenas are created lazily as needed.
59  *
60  * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
61  * arenas.  arenas[narenas_auto..narenas_total) are only used if the application
62  * takes some action to create them and allocate from them.
63  */
64 static arena_t		**arenas;
65 static unsigned		narenas_total;
66 static arena_t		*a0; /* arenas[0]; read-only after initialization. */
67 static unsigned		narenas_auto; /* Read-only after initialization. */
68 
69 typedef enum {
70 	malloc_init_uninitialized	= 3,
71 	malloc_init_a0_initialized	= 2,
72 	malloc_init_recursible		= 1,
73 	malloc_init_initialized		= 0 /* Common case --> jnz. */
74 } malloc_init_t;
75 static malloc_init_t	malloc_init_state = malloc_init_uninitialized;
76 
77 JEMALLOC_ALIGNED(CACHELINE)
78 const size_t	index2size_tab[NSIZES] = {
79 #define	SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
80 	((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)),
81 	SIZE_CLASSES
82 #undef SC
83 };
84 
85 JEMALLOC_ALIGNED(CACHELINE)
86 const uint8_t	size2index_tab[] = {
87 #if LG_TINY_MIN == 0
88 #warning "Dangerous LG_TINY_MIN"
89 #define	S2B_0(i)	i,
90 #elif LG_TINY_MIN == 1
91 #warning "Dangerous LG_TINY_MIN"
92 #define	S2B_1(i)	i,
93 #elif LG_TINY_MIN == 2
94 #warning "Dangerous LG_TINY_MIN"
95 #define	S2B_2(i)	i,
96 #elif LG_TINY_MIN == 3
97 #define	S2B_3(i)	i,
98 #elif LG_TINY_MIN == 4
99 #define	S2B_4(i)	i,
100 #elif LG_TINY_MIN == 5
101 #define	S2B_5(i)	i,
102 #elif LG_TINY_MIN == 6
103 #define	S2B_6(i)	i,
104 #elif LG_TINY_MIN == 7
105 #define	S2B_7(i)	i,
106 #elif LG_TINY_MIN == 8
107 #define	S2B_8(i)	i,
108 #elif LG_TINY_MIN == 9
109 #define	S2B_9(i)	i,
110 #elif LG_TINY_MIN == 10
111 #define	S2B_10(i)	i,
112 #elif LG_TINY_MIN == 11
113 #define	S2B_11(i)	i,
114 #else
115 #error "Unsupported LG_TINY_MIN"
116 #endif
117 #if LG_TINY_MIN < 1
118 #define	S2B_1(i)	S2B_0(i) S2B_0(i)
119 #endif
120 #if LG_TINY_MIN < 2
121 #define	S2B_2(i)	S2B_1(i) S2B_1(i)
122 #endif
123 #if LG_TINY_MIN < 3
124 #define	S2B_3(i)	S2B_2(i) S2B_2(i)
125 #endif
126 #if LG_TINY_MIN < 4
127 #define	S2B_4(i)	S2B_3(i) S2B_3(i)
128 #endif
129 #if LG_TINY_MIN < 5
130 #define	S2B_5(i)	S2B_4(i) S2B_4(i)
131 #endif
132 #if LG_TINY_MIN < 6
133 #define	S2B_6(i)	S2B_5(i) S2B_5(i)
134 #endif
135 #if LG_TINY_MIN < 7
136 #define	S2B_7(i)	S2B_6(i) S2B_6(i)
137 #endif
138 #if LG_TINY_MIN < 8
139 #define	S2B_8(i)	S2B_7(i) S2B_7(i)
140 #endif
141 #if LG_TINY_MIN < 9
142 #define	S2B_9(i)	S2B_8(i) S2B_8(i)
143 #endif
144 #if LG_TINY_MIN < 10
145 #define	S2B_10(i)	S2B_9(i) S2B_9(i)
146 #endif
147 #if LG_TINY_MIN < 11
148 #define	S2B_11(i)	S2B_10(i) S2B_10(i)
149 #endif
150 #define	S2B_no(i)
151 #define	SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \
152 	S2B_##lg_delta_lookup(index)
153 	SIZE_CLASSES
154 #undef S2B_3
155 #undef S2B_4
156 #undef S2B_5
157 #undef S2B_6
158 #undef S2B_7
159 #undef S2B_8
160 #undef S2B_9
161 #undef S2B_10
162 #undef S2B_11
163 #undef S2B_no
164 #undef SC
165 };
166 
167 #ifdef JEMALLOC_THREADED_INIT
168 /* Used to let the initializing thread recursively allocate. */
169 #  define NO_INITIALIZER	((unsigned long)0)
170 #  define INITIALIZER		pthread_self()
171 #  define IS_INITIALIZER	(malloc_initializer == pthread_self())
172 static pthread_t		malloc_initializer = NO_INITIALIZER;
173 #else
174 #  define NO_INITIALIZER	false
175 #  define INITIALIZER		true
176 #  define IS_INITIALIZER	malloc_initializer
177 static bool			malloc_initializer = NO_INITIALIZER;
178 #endif
179 
180 /* Used to avoid initialization races. */
181 #ifdef _WIN32
182 #if _WIN32_WINNT >= 0x0600
183 static malloc_mutex_t	init_lock = SRWLOCK_INIT;
184 #else
185 static malloc_mutex_t	init_lock;
186 
187 JEMALLOC_ATTR(constructor)
188 static void WINAPI
189 _init_init_lock(void)
190 {
191 
192 	malloc_mutex_init(&init_lock);
193 }
194 
195 #ifdef _MSC_VER
196 #  pragma section(".CRT$XCU", read)
197 JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
198 static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
199 #endif
200 #endif
201 #else
202 static malloc_mutex_t	init_lock = MALLOC_MUTEX_INITIALIZER;
203 #endif
204 
205 typedef struct {
206 	void	*p;	/* Input pointer (as in realloc(p, s)). */
207 	size_t	s;	/* Request size. */
208 	void	*r;	/* Result pointer. */
209 } malloc_utrace_t;
210 
211 #ifdef JEMALLOC_UTRACE
212 #  define UTRACE(a, b, c) do {						\
213 	if (unlikely(opt_utrace)) {					\
214 		int utrace_serrno = errno;				\
215 		malloc_utrace_t ut;					\
216 		ut.p = (a);						\
217 		ut.s = (b);						\
218 		ut.r = (c);						\
219 		utrace(&ut, sizeof(ut));				\
220 		errno = utrace_serrno;					\
221 	}								\
222 } while (0)
223 #else
224 #  define UTRACE(a, b, c)
225 #endif
226 
227 /******************************************************************************/
228 /*
229  * Function prototypes for static functions that are referenced prior to
230  * definition.
231  */
232 
233 static bool	malloc_init_hard_a0(void);
234 static bool	malloc_init_hard(void);
235 
236 /******************************************************************************/
237 /*
238  * Begin miscellaneous support functions.
239  */
240 
241 JEMALLOC_ALWAYS_INLINE_C bool
242 malloc_initialized(void)
243 {
244 
245 	return (malloc_init_state == malloc_init_initialized);
246 }
247 
248 JEMALLOC_ALWAYS_INLINE_C void
249 malloc_thread_init(void)
250 {
251 
252 	/*
253 	 * TSD initialization can't be safely done as a side effect of
254 	 * deallocation, because it is possible for a thread to do nothing but
255 	 * deallocate its TLS data via free(), in which case writing to TLS
256 	 * would cause write-after-free memory corruption.  The quarantine
257 	 * facility *only* gets used as a side effect of deallocation, so make
258 	 * a best effort attempt at initializing its TSD by hooking all
259 	 * allocation events.
260 	 */
261 	if (config_fill && unlikely(opt_quarantine))
262 		quarantine_alloc_hook();
263 }
264 
265 JEMALLOC_ALWAYS_INLINE_C bool
266 malloc_init_a0(void)
267 {
268 
269 	if (unlikely(malloc_init_state == malloc_init_uninitialized))
270 		return (malloc_init_hard_a0());
271 	return (false);
272 }
273 
274 JEMALLOC_ALWAYS_INLINE_C bool
275 malloc_init(void)
276 {
277 
278 	if (unlikely(!malloc_initialized()) && malloc_init_hard())
279 		return (true);
280 	malloc_thread_init();
281 
282 	return (false);
283 }
284 
285 /*
286  * The a0*() functions are used instead of i[mcd]alloc() in situations that
287  * cannot tolerate TLS variable access.
288  */
289 
290 arena_t *
291 a0get(void)
292 {
293 
294 	assert(a0 != NULL);
295 	return (a0);
296 }
297 
298 static void *
299 a0ialloc(size_t size, bool zero, bool is_metadata)
300 {
301 
302 	if (unlikely(malloc_init_a0()))
303 		return (NULL);
304 
305 	return (iallocztm(NULL, size, zero, false, is_metadata, a0get()));
306 }
307 
308 static void
309 a0idalloc(void *ptr, bool is_metadata)
310 {
311 
312 	idalloctm(NULL, ptr, false, is_metadata);
313 }
314 
315 void *
316 a0malloc(size_t size)
317 {
318 
319 	return (a0ialloc(size, false, true));
320 }
321 
322 void
323 a0dalloc(void *ptr)
324 {
325 
326 	a0idalloc(ptr, true);
327 }
328 
329 /*
330  * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
331  * situations that cannot tolerate TLS variable access (TLS allocation and very
332  * early internal data structure initialization).
333  */
334 
335 void *
336 bootstrap_malloc(size_t size)
337 {
338 
339 	if (unlikely(size == 0))
340 		size = 1;
341 
342 	return (a0ialloc(size, false, false));
343 }
344 
345 void *
346 bootstrap_calloc(size_t num, size_t size)
347 {
348 	size_t num_size;
349 
350 	num_size = num * size;
351 	if (unlikely(num_size == 0)) {
352 		assert(num == 0 || size == 0);
353 		num_size = 1;
354 	}
355 
356 	return (a0ialloc(num_size, true, false));
357 }
358 
359 void
360 bootstrap_free(void *ptr)
361 {
362 
363 	if (unlikely(ptr == NULL))
364 		return;
365 
366 	a0idalloc(ptr, false);
367 }
368 
369 /* Create a new arena and insert it into the arenas array at index ind. */
370 static arena_t *
371 arena_init_locked(unsigned ind)
372 {
373 	arena_t *arena;
374 
375 	/* Expand arenas if necessary. */
376 	assert(ind <= narenas_total);
377 	if (ind > MALLOCX_ARENA_MAX)
378 		return (NULL);
379 	if (ind == narenas_total) {
380 		unsigned narenas_new = narenas_total + 1;
381 		arena_t **arenas_new =
382 		    (arena_t **)a0malloc(CACHELINE_CEILING(narenas_new *
383 		    sizeof(arena_t *)));
384 		if (arenas_new == NULL)
385 			return (NULL);
386 		memcpy(arenas_new, arenas, narenas_total * sizeof(arena_t *));
387 		arenas_new[ind] = NULL;
388 		/*
389 		 * Deallocate only if arenas came from a0malloc() (not
390 		 * base_alloc()).
391 		 */
392 		if (narenas_total != narenas_auto)
393 			a0dalloc(arenas);
394 		arenas = arenas_new;
395 		narenas_total = narenas_new;
396 	}
397 
398 	/*
399 	 * Another thread may have already initialized arenas[ind] if it's an
400 	 * auto arena.
401 	 */
402 	arena = arenas[ind];
403 	if (arena != NULL) {
404 		assert(ind < narenas_auto);
405 		return (arena);
406 	}
407 
408 	/* Actually initialize the arena. */
409 	arena = arenas[ind] = arena_new(ind);
410 	return (arena);
411 }
412 
413 arena_t *
414 arena_init(unsigned ind)
415 {
416 	arena_t *arena;
417 
418 	malloc_mutex_lock(&arenas_lock);
419 	arena = arena_init_locked(ind);
420 	malloc_mutex_unlock(&arenas_lock);
421 	return (arena);
422 }
423 
424 unsigned
425 narenas_total_get(void)
426 {
427 	unsigned narenas;
428 
429 	malloc_mutex_lock(&arenas_lock);
430 	narenas = narenas_total;
431 	malloc_mutex_unlock(&arenas_lock);
432 
433 	return (narenas);
434 }
435 
436 static void
437 arena_bind_locked(tsd_t *tsd, unsigned ind)
438 {
439 	arena_t *arena;
440 
441 	arena = arenas[ind];
442 	arena->nthreads++;
443 
444 	if (tsd_nominal(tsd))
445 		tsd_arena_set(tsd, arena);
446 }
447 
448 static void
449 arena_bind(tsd_t *tsd, unsigned ind)
450 {
451 
452 	malloc_mutex_lock(&arenas_lock);
453 	arena_bind_locked(tsd, ind);
454 	malloc_mutex_unlock(&arenas_lock);
455 }
456 
457 void
458 arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind)
459 {
460 	arena_t *oldarena, *newarena;
461 
462 	malloc_mutex_lock(&arenas_lock);
463 	oldarena = arenas[oldind];
464 	newarena = arenas[newind];
465 	oldarena->nthreads--;
466 	newarena->nthreads++;
467 	malloc_mutex_unlock(&arenas_lock);
468 	tsd_arena_set(tsd, newarena);
469 }
470 
471 unsigned
472 arena_nbound(unsigned ind)
473 {
474 	unsigned nthreads;
475 
476 	malloc_mutex_lock(&arenas_lock);
477 	nthreads = arenas[ind]->nthreads;
478 	malloc_mutex_unlock(&arenas_lock);
479 	return (nthreads);
480 }
481 
482 static void
483 arena_unbind(tsd_t *tsd, unsigned ind)
484 {
485 	arena_t *arena;
486 
487 	malloc_mutex_lock(&arenas_lock);
488 	arena = arenas[ind];
489 	arena->nthreads--;
490 	malloc_mutex_unlock(&arenas_lock);
491 	tsd_arena_set(tsd, NULL);
492 }
493 
494 arena_t *
495 arena_get_hard(tsd_t *tsd, unsigned ind, bool init_if_missing)
496 {
497 	arena_t *arena;
498 	arena_t **arenas_cache = tsd_arenas_cache_get(tsd);
499 	unsigned narenas_cache = tsd_narenas_cache_get(tsd);
500 	unsigned narenas_actual = narenas_total_get();
501 
502 	/* Deallocate old cache if it's too small. */
503 	if (arenas_cache != NULL && narenas_cache < narenas_actual) {
504 		a0dalloc(arenas_cache);
505 		arenas_cache = NULL;
506 		narenas_cache = 0;
507 		tsd_arenas_cache_set(tsd, arenas_cache);
508 		tsd_narenas_cache_set(tsd, narenas_cache);
509 	}
510 
511 	/* Allocate cache if it's missing. */
512 	if (arenas_cache == NULL) {
513 		bool *arenas_cache_bypassp = tsd_arenas_cache_bypassp_get(tsd);
514 		assert(ind < narenas_actual || !init_if_missing);
515 		narenas_cache = (ind < narenas_actual) ? narenas_actual : ind+1;
516 
517 		if (!*arenas_cache_bypassp) {
518 			*arenas_cache_bypassp = true;
519 			arenas_cache = (arena_t **)a0malloc(sizeof(arena_t *) *
520 			    narenas_cache);
521 			*arenas_cache_bypassp = false;
522 		} else
523 			arenas_cache = NULL;
524 		if (arenas_cache == NULL) {
525 			/*
526 			 * This function must always tell the truth, even if
527 			 * it's slow, so don't let OOM or recursive allocation
528 			 * avoidance (note arenas_cache_bypass check) get in the
529 			 * way.
530 			 */
531 			if (ind >= narenas_actual)
532 				return (NULL);
533 			malloc_mutex_lock(&arenas_lock);
534 			arena = arenas[ind];
535 			malloc_mutex_unlock(&arenas_lock);
536 			return (arena);
537 		}
538 		tsd_arenas_cache_set(tsd, arenas_cache);
539 		tsd_narenas_cache_set(tsd, narenas_cache);
540 	}
541 
542 	/*
543 	 * Copy to cache.  It's possible that the actual number of arenas has
544 	 * increased since narenas_total_get() was called above, but that causes
545 	 * no correctness issues unless two threads concurrently execute the
546 	 * arenas.extend mallctl, which we trust mallctl synchronization to
547 	 * prevent.
548 	 */
549 	malloc_mutex_lock(&arenas_lock);
550 	memcpy(arenas_cache, arenas, sizeof(arena_t *) * narenas_actual);
551 	malloc_mutex_unlock(&arenas_lock);
552 	if (narenas_cache > narenas_actual) {
553 		memset(&arenas_cache[narenas_actual], 0, sizeof(arena_t *) *
554 		    (narenas_cache - narenas_actual));
555 	}
556 
557 	/* Read the refreshed cache, and init the arena if necessary. */
558 	arena = arenas_cache[ind];
559 	if (init_if_missing && arena == NULL)
560 		arena = arenas_cache[ind] = arena_init(ind);
561 	return (arena);
562 }
563 
564 /* Slow path, called only by arena_choose(). */
565 arena_t *
566 arena_choose_hard(tsd_t *tsd)
567 {
568 	arena_t *ret;
569 
570 	if (narenas_auto > 1) {
571 		unsigned i, choose, first_null;
572 
573 		choose = 0;
574 		first_null = narenas_auto;
575 		malloc_mutex_lock(&arenas_lock);
576 		assert(a0get() != NULL);
577 		for (i = 1; i < narenas_auto; i++) {
578 			if (arenas[i] != NULL) {
579 				/*
580 				 * Choose the first arena that has the lowest
581 				 * number of threads assigned to it.
582 				 */
583 				if (arenas[i]->nthreads <
584 				    arenas[choose]->nthreads)
585 					choose = i;
586 			} else if (first_null == narenas_auto) {
587 				/*
588 				 * Record the index of the first uninitialized
589 				 * arena, in case all extant arenas are in use.
590 				 *
591 				 * NB: It is possible for there to be
592 				 * discontinuities in terms of initialized
593 				 * versus uninitialized arenas, due to the
594 				 * "thread.arena" mallctl.
595 				 */
596 				first_null = i;
597 			}
598 		}
599 
600 		if (arenas[choose]->nthreads == 0
601 		    || first_null == narenas_auto) {
602 			/*
603 			 * Use an unloaded arena, or the least loaded arena if
604 			 * all arenas are already initialized.
605 			 */
606 			ret = arenas[choose];
607 		} else {
608 			/* Initialize a new arena. */
609 			choose = first_null;
610 			ret = arena_init_locked(choose);
611 			if (ret == NULL) {
612 				malloc_mutex_unlock(&arenas_lock);
613 				return (NULL);
614 			}
615 		}
616 		arena_bind_locked(tsd, choose);
617 		malloc_mutex_unlock(&arenas_lock);
618 	} else {
619 		ret = a0get();
620 		arena_bind(tsd, 0);
621 	}
622 
623 	return (ret);
624 }
625 
626 void
627 thread_allocated_cleanup(tsd_t *tsd)
628 {
629 
630 	/* Do nothing. */
631 }
632 
633 void
634 thread_deallocated_cleanup(tsd_t *tsd)
635 {
636 
637 	/* Do nothing. */
638 }
639 
640 void
641 arena_cleanup(tsd_t *tsd)
642 {
643 	arena_t *arena;
644 
645 	arena = tsd_arena_get(tsd);
646 	if (arena != NULL)
647 		arena_unbind(tsd, arena->ind);
648 }
649 
650 void
651 arenas_cache_cleanup(tsd_t *tsd)
652 {
653 	arena_t **arenas_cache;
654 
655 	arenas_cache = tsd_arenas_cache_get(tsd);
656 	if (arenas_cache != NULL)
657 		a0dalloc(arenas_cache);
658 }
659 
660 void
661 narenas_cache_cleanup(tsd_t *tsd)
662 {
663 
664 	/* Do nothing. */
665 }
666 
667 void
668 arenas_cache_bypass_cleanup(tsd_t *tsd)
669 {
670 
671 	/* Do nothing. */
672 }
673 
674 static void
675 stats_print_atexit(void)
676 {
677 
678 	if (config_tcache && config_stats) {
679 		unsigned narenas, i;
680 
681 		/*
682 		 * Merge stats from extant threads.  This is racy, since
683 		 * individual threads do not lock when recording tcache stats
684 		 * events.  As a consequence, the final stats may be slightly
685 		 * out of date by the time they are reported, if other threads
686 		 * continue to allocate.
687 		 */
688 		for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
689 			arena_t *arena = arenas[i];
690 			if (arena != NULL) {
691 				tcache_t *tcache;
692 
693 				/*
694 				 * tcache_stats_merge() locks bins, so if any
695 				 * code is introduced that acquires both arena
696 				 * and bin locks in the opposite order,
697 				 * deadlocks may result.
698 				 */
699 				malloc_mutex_lock(&arena->lock);
700 				ql_foreach(tcache, &arena->tcache_ql, link) {
701 					tcache_stats_merge(tcache, arena);
702 				}
703 				malloc_mutex_unlock(&arena->lock);
704 			}
705 		}
706 	}
707 	je_malloc_stats_print(NULL, NULL, NULL);
708 }
709 
710 /*
711  * End miscellaneous support functions.
712  */
713 /******************************************************************************/
714 /*
715  * Begin initialization functions.
716  */
717 
718 #ifndef JEMALLOC_HAVE_SECURE_GETENV
719 static char *
720 secure_getenv(const char *name)
721 {
722 
723 #  ifdef JEMALLOC_HAVE_ISSETUGID
724 	if (issetugid() != 0)
725 		return (NULL);
726 #  endif
727 	return (getenv(name));
728 }
729 #endif
730 
731 static unsigned
732 malloc_ncpus(void)
733 {
734 	long result;
735 
736 #ifdef _WIN32
737 	SYSTEM_INFO si;
738 	GetSystemInfo(&si);
739 	result = si.dwNumberOfProcessors;
740 #else
741 	result = sysconf(_SC_NPROCESSORS_ONLN);
742 #endif
743 	return ((result == -1) ? 1 : (unsigned)result);
744 }
745 
746 static bool
747 malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
748     char const **v_p, size_t *vlen_p)
749 {
750 	bool accept;
751 	const char *opts = *opts_p;
752 
753 	*k_p = opts;
754 
755 	for (accept = false; !accept;) {
756 		switch (*opts) {
757 		case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
758 		case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
759 		case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
760 		case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
761 		case 'Y': case 'Z':
762 		case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
763 		case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
764 		case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
765 		case 's': case 't': case 'u': case 'v': case 'w': case 'x':
766 		case 'y': case 'z':
767 		case '0': case '1': case '2': case '3': case '4': case '5':
768 		case '6': case '7': case '8': case '9':
769 		case '_':
770 			opts++;
771 			break;
772 		case ':':
773 			opts++;
774 			*klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
775 			*v_p = opts;
776 			accept = true;
777 			break;
778 		case '\0':
779 			if (opts != *opts_p) {
780 				malloc_write("<jemalloc>: Conf string ends "
781 				    "with key\n");
782 			}
783 			return (true);
784 		default:
785 			malloc_write("<jemalloc>: Malformed conf string\n");
786 			return (true);
787 		}
788 	}
789 
790 	for (accept = false; !accept;) {
791 		switch (*opts) {
792 		case ',':
793 			opts++;
794 			/*
795 			 * Look ahead one character here, because the next time
796 			 * this function is called, it will assume that end of
797 			 * input has been cleanly reached if no input remains,
798 			 * but we have optimistically already consumed the
799 			 * comma if one exists.
800 			 */
801 			if (*opts == '\0') {
802 				malloc_write("<jemalloc>: Conf string ends "
803 				    "with comma\n");
804 			}
805 			*vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
806 			accept = true;
807 			break;
808 		case '\0':
809 			*vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
810 			accept = true;
811 			break;
812 		default:
813 			opts++;
814 			break;
815 		}
816 	}
817 
818 	*opts_p = opts;
819 	return (false);
820 }
821 
822 static void
823 malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
824     size_t vlen)
825 {
826 
827 	malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
828 	    (int)vlen, v);
829 }
830 
831 static void
832 malloc_conf_init(void)
833 {
834 	unsigned i;
835 	char buf[PATH_MAX + 1];
836 	const char *opts, *k, *v;
837 	size_t klen, vlen;
838 
839 	/*
840 	 * Automatically configure valgrind before processing options.  The
841 	 * valgrind option remains in jemalloc 3.x for compatibility reasons.
842 	 */
843 	if (config_valgrind) {
844 		in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
845 		if (config_fill && unlikely(in_valgrind)) {
846 			opt_junk = "false";
847 			opt_junk_alloc = false;
848 			opt_junk_free = false;
849 			assert(!opt_zero);
850 			opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
851 			opt_redzone = true;
852 		}
853 		if (config_tcache && unlikely(in_valgrind))
854 			opt_tcache = false;
855 	}
856 
857 	for (i = 0; i < 3; i++) {
858 		/* Get runtime configuration. */
859 		switch (i) {
860 		case 0:
861 			if (je_malloc_conf != NULL) {
862 				/*
863 				 * Use options that were compiled into the
864 				 * program.
865 				 */
866 				opts = je_malloc_conf;
867 			} else {
868 				/* No configuration specified. */
869 				buf[0] = '\0';
870 				opts = buf;
871 			}
872 			break;
873 		case 1: {
874 			int linklen = 0;
875 #ifndef _WIN32
876 			int saved_errno = errno;
877 			const char *linkname =
878 #  ifdef JEMALLOC_PREFIX
879 			    "/etc/"JEMALLOC_PREFIX"malloc.conf"
880 #  else
881 			    "/etc/malloc.conf"
882 #  endif
883 			    ;
884 
885 			/*
886 			 * Try to use the contents of the "/etc/malloc.conf"
887 			 * symbolic link's name.
888 			 */
889 			linklen = readlink(linkname, buf, sizeof(buf) - 1);
890 			if (linklen == -1) {
891 				/* No configuration specified. */
892 				linklen = 0;
893 				/* Restore errno. */
894 				set_errno(saved_errno);
895 			}
896 #endif
897 			buf[linklen] = '\0';
898 			opts = buf;
899 			break;
900 		} case 2: {
901 			const char *envname =
902 #ifdef JEMALLOC_PREFIX
903 			    JEMALLOC_CPREFIX"MALLOC_CONF"
904 #else
905 			    "MALLOC_CONF"
906 #endif
907 			    ;
908 
909 			if ((opts = secure_getenv(envname)) != NULL) {
910 				/*
911 				 * Do nothing; opts is already initialized to
912 				 * the value of the MALLOC_CONF environment
913 				 * variable.
914 				 */
915 			} else {
916 				/* No configuration specified. */
917 				buf[0] = '\0';
918 				opts = buf;
919 			}
920 			break;
921 		} default:
922 			not_reached();
923 			buf[0] = '\0';
924 			opts = buf;
925 		}
926 
927 		while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
928 		    &vlen)) {
929 #define	CONF_MATCH(n)							\
930 	(sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
931 #define	CONF_MATCH_VALUE(n)						\
932 	(sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
933 #define	CONF_HANDLE_BOOL(o, n, cont)					\
934 			if (CONF_MATCH(n)) {				\
935 				if (CONF_MATCH_VALUE("true"))		\
936 					o = true;			\
937 				else if (CONF_MATCH_VALUE("false"))	\
938 					o = false;			\
939 				else {					\
940 					malloc_conf_error(		\
941 					    "Invalid conf value",	\
942 					    k, klen, v, vlen);		\
943 				}					\
944 				if (cont)				\
945 					continue;			\
946 			}
947 #define	CONF_HANDLE_SIZE_T(o, n, min, max, clip)			\
948 			if (CONF_MATCH(n)) {				\
949 				uintmax_t um;				\
950 				char *end;				\
951 									\
952 				set_errno(0);				\
953 				um = malloc_strtoumax(v, &end, 0);	\
954 				if (get_errno() != 0 || (uintptr_t)end -\
955 				    (uintptr_t)v != vlen) {		\
956 					malloc_conf_error(		\
957 					    "Invalid conf value",	\
958 					    k, klen, v, vlen);		\
959 				} else if (clip) {			\
960 					if ((min) != 0 && um < (min))	\
961 						o = (min);		\
962 					else if (um > (max))		\
963 						o = (max);		\
964 					else				\
965 						o = um;			\
966 				} else {				\
967 					if (((min) != 0 && um < (min))	\
968 					    || um > (max)) {		\
969 						malloc_conf_error(	\
970 						    "Out-of-range "	\
971 						    "conf value",	\
972 						    k, klen, v, vlen);	\
973 					} else				\
974 						o = um;			\
975 				}					\
976 				continue;				\
977 			}
978 #define	CONF_HANDLE_SSIZE_T(o, n, min, max)				\
979 			if (CONF_MATCH(n)) {				\
980 				long l;					\
981 				char *end;				\
982 									\
983 				set_errno(0);				\
984 				l = strtol(v, &end, 0);			\
985 				if (get_errno() != 0 || (uintptr_t)end -\
986 				    (uintptr_t)v != vlen) {		\
987 					malloc_conf_error(		\
988 					    "Invalid conf value",	\
989 					    k, klen, v, vlen);		\
990 				} else if (l < (ssize_t)(min) || l >	\
991 				    (ssize_t)(max)) {			\
992 					malloc_conf_error(		\
993 					    "Out-of-range conf value",	\
994 					    k, klen, v, vlen);		\
995 				} else					\
996 					o = l;				\
997 				continue;				\
998 			}
999 #define	CONF_HANDLE_CHAR_P(o, n, d)					\
1000 			if (CONF_MATCH(n)) {				\
1001 				size_t cpylen = (vlen <=		\
1002 				    sizeof(o)-1) ? vlen :		\
1003 				    sizeof(o)-1;			\
1004 				strncpy(o, v, cpylen);			\
1005 				o[cpylen] = '\0';			\
1006 				continue;				\
1007 			}
1008 
1009 			CONF_HANDLE_BOOL(opt_abort, "abort", true)
1010 			/*
1011 			 * Chunks always require at least one header page,
1012 			 * as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and
1013 			 * possibly an additional page in the presence of
1014 			 * redzones.  In order to simplify options processing,
1015 			 * use a conservative bound that accommodates all these
1016 			 * constraints.
1017 			 */
1018 			CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
1019 			    LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1),
1020 			    (sizeof(size_t) << 3) - 1, true)
1021 			if (strncmp("dss", k, klen) == 0) {
1022 				int i;
1023 				bool match = false;
1024 				for (i = 0; i < dss_prec_limit; i++) {
1025 					if (strncmp(dss_prec_names[i], v, vlen)
1026 					    == 0) {
1027 						if (chunk_dss_prec_set(i)) {
1028 							malloc_conf_error(
1029 							    "Error setting dss",
1030 							    k, klen, v, vlen);
1031 						} else {
1032 							opt_dss =
1033 							    dss_prec_names[i];
1034 							match = true;
1035 							break;
1036 						}
1037 					}
1038 				}
1039 				if (!match) {
1040 					malloc_conf_error("Invalid conf value",
1041 					    k, klen, v, vlen);
1042 				}
1043 				continue;
1044 			}
1045 			CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
1046 			    SIZE_T_MAX, false)
1047 			CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
1048 			    -1, (sizeof(size_t) << 3) - 1)
1049 			CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true)
1050 			if (config_fill) {
1051 				if (CONF_MATCH("junk")) {
1052 					if (CONF_MATCH_VALUE("true")) {
1053 						opt_junk = "true";
1054 						opt_junk_alloc = opt_junk_free =
1055 						    true;
1056 					} else if (CONF_MATCH_VALUE("false")) {
1057 						opt_junk = "false";
1058 						opt_junk_alloc = opt_junk_free =
1059 						    false;
1060 					} else if (CONF_MATCH_VALUE("alloc")) {
1061 						opt_junk = "alloc";
1062 						opt_junk_alloc = true;
1063 						opt_junk_free = false;
1064 					} else if (CONF_MATCH_VALUE("free")) {
1065 						opt_junk = "free";
1066 						opt_junk_alloc = false;
1067 						opt_junk_free = true;
1068 					} else {
1069 						malloc_conf_error(
1070 						    "Invalid conf value", k,
1071 						    klen, v, vlen);
1072 					}
1073 					continue;
1074 				}
1075 				CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
1076 				    0, SIZE_T_MAX, false)
1077 				CONF_HANDLE_BOOL(opt_redzone, "redzone", true)
1078 				CONF_HANDLE_BOOL(opt_zero, "zero", true)
1079 			}
1080 			if (config_utrace) {
1081 				CONF_HANDLE_BOOL(opt_utrace, "utrace", true)
1082 			}
1083 			if (config_xmalloc) {
1084 				CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true)
1085 			}
1086 			if (config_tcache) {
1087 				CONF_HANDLE_BOOL(opt_tcache, "tcache",
1088 				    !config_valgrind || !in_valgrind)
1089 				if (CONF_MATCH("tcache")) {
1090 					assert(config_valgrind && in_valgrind);
1091 					if (opt_tcache) {
1092 						opt_tcache = false;
1093 						malloc_conf_error(
1094 						"tcache cannot be enabled "
1095 						"while running inside Valgrind",
1096 						k, klen, v, vlen);
1097 					}
1098 					continue;
1099 				}
1100 				CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
1101 				    "lg_tcache_max", -1,
1102 				    (sizeof(size_t) << 3) - 1)
1103 			}
1104 			if (config_prof) {
1105 				CONF_HANDLE_BOOL(opt_prof, "prof", true)
1106 				CONF_HANDLE_CHAR_P(opt_prof_prefix,
1107 				    "prof_prefix", "jeprof")
1108 				CONF_HANDLE_BOOL(opt_prof_active, "prof_active",
1109 				    true)
1110 				CONF_HANDLE_BOOL(opt_prof_thread_active_init,
1111 				    "prof_thread_active_init", true)
1112 				CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
1113 				    "lg_prof_sample", 0,
1114 				    (sizeof(uint64_t) << 3) - 1, true)
1115 				CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum",
1116 				    true)
1117 				CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
1118 				    "lg_prof_interval", -1,
1119 				    (sizeof(uint64_t) << 3) - 1)
1120 				CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump",
1121 				    true)
1122 				CONF_HANDLE_BOOL(opt_prof_final, "prof_final",
1123 				    true)
1124 				CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak",
1125 				    true)
1126 			}
1127 			malloc_conf_error("Invalid conf pair", k, klen, v,
1128 			    vlen);
1129 #undef CONF_MATCH
1130 #undef CONF_HANDLE_BOOL
1131 #undef CONF_HANDLE_SIZE_T
1132 #undef CONF_HANDLE_SSIZE_T
1133 #undef CONF_HANDLE_CHAR_P
1134 		}
1135 	}
1136 }
1137 
1138 /* init_lock must be held. */
1139 static bool
1140 malloc_init_hard_needed(void)
1141 {
1142 
1143 	if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
1144 	    malloc_init_recursible)) {
1145 		/*
1146 		 * Another thread initialized the allocator before this one
1147 		 * acquired init_lock, or this thread is the initializing
1148 		 * thread, and it is recursively allocating.
1149 		 */
1150 		return (false);
1151 	}
1152 #ifdef JEMALLOC_THREADED_INIT
1153 	if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
1154 		/* Busy-wait until the initializing thread completes. */
1155 		do {
1156 			malloc_mutex_unlock(&init_lock);
1157 			CPU_SPINWAIT;
1158 			malloc_mutex_lock(&init_lock);
1159 		} while (!malloc_initialized());
1160 		return (false);
1161 	}
1162 #endif
1163 	return (true);
1164 }
1165 
1166 /* init_lock must be held. */
1167 static bool
1168 malloc_init_hard_a0_locked(void)
1169 {
1170 
1171 	malloc_initializer = INITIALIZER;
1172 
1173 	if (config_prof)
1174 		prof_boot0();
1175 	malloc_conf_init();
1176 	if (opt_stats_print) {
1177 		/* Print statistics at exit. */
1178 		if (atexit(stats_print_atexit) != 0) {
1179 			malloc_write("<jemalloc>: Error in atexit()\n");
1180 			if (opt_abort)
1181 				abort();
1182 		}
1183 	}
1184 	if (base_boot())
1185 		return (true);
1186 	if (chunk_boot())
1187 		return (true);
1188 	if (ctl_boot())
1189 		return (true);
1190 	if (config_prof)
1191 		prof_boot1();
1192 	if (arena_boot())
1193 		return (true);
1194 	if (config_tcache && tcache_boot())
1195 		return (true);
1196 	if (malloc_mutex_init(&arenas_lock))
1197 		return (true);
1198 	/*
1199 	 * Create enough scaffolding to allow recursive allocation in
1200 	 * malloc_ncpus().
1201 	 */
1202 	narenas_total = narenas_auto = 1;
1203 	arenas = &a0;
1204 	memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
1205 	/*
1206 	 * Initialize one arena here.  The rest are lazily created in
1207 	 * arena_choose_hard().
1208 	 */
1209 	if (arena_init(0) == NULL)
1210 		return (true);
1211 	malloc_init_state = malloc_init_a0_initialized;
1212 	return (false);
1213 }
1214 
1215 static bool
1216 malloc_init_hard_a0(void)
1217 {
1218 	bool ret;
1219 
1220 	malloc_mutex_lock(&init_lock);
1221 	ret = malloc_init_hard_a0_locked();
1222 	malloc_mutex_unlock(&init_lock);
1223 	return (ret);
1224 }
1225 
1226 /*
1227  * Initialize data structures which may trigger recursive allocation.
1228  *
1229  * init_lock must be held.
1230  */
1231 static void
1232 malloc_init_hard_recursible(void)
1233 {
1234 
1235 	malloc_init_state = malloc_init_recursible;
1236 	malloc_mutex_unlock(&init_lock);
1237 
1238 	ncpus = malloc_ncpus();
1239 
1240 #if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
1241     && !defined(_WIN32) && !defined(__native_client__))
1242 	/* LinuxThreads's pthread_atfork() allocates. */
1243 	if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
1244 	    jemalloc_postfork_child) != 0) {
1245 		malloc_write("<jemalloc>: Error in pthread_atfork()\n");
1246 		if (opt_abort)
1247 			abort();
1248 	}
1249 #endif
1250 	malloc_mutex_lock(&init_lock);
1251 }
1252 
1253 /* init_lock must be held. */
1254 static bool
1255 malloc_init_hard_finish(void)
1256 {
1257 
1258 	if (mutex_boot())
1259 		return (true);
1260 
1261 	if (opt_narenas == 0) {
1262 		/*
1263 		 * For SMP systems, create more than one arena per CPU by
1264 		 * default.
1265 		 */
1266 		if (ncpus > 1)
1267 			opt_narenas = ncpus << 2;
1268 		else
1269 			opt_narenas = 1;
1270 	}
1271 	narenas_auto = opt_narenas;
1272 	/*
1273 	 * Make sure that the arenas array can be allocated.  In practice, this
1274 	 * limit is enough to allow the allocator to function, but the ctl
1275 	 * machinery will fail to allocate memory at far lower limits.
1276 	 */
1277 	if (narenas_auto > chunksize / sizeof(arena_t *)) {
1278 		narenas_auto = chunksize / sizeof(arena_t *);
1279 		malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
1280 		    narenas_auto);
1281 	}
1282 	narenas_total = narenas_auto;
1283 
1284 	/* Allocate and initialize arenas. */
1285 	arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total);
1286 	if (arenas == NULL)
1287 		return (true);
1288 	/*
1289 	 * Zero the array.  In practice, this should always be pre-zeroed,
1290 	 * since it was just mmap()ed, but let's be sure.
1291 	 */
1292 	memset(arenas, 0, sizeof(arena_t *) * narenas_total);
1293 	/* Copy the pointer to the one arena that was already initialized. */
1294 	arenas[0] = a0;
1295 
1296 	malloc_init_state = malloc_init_initialized;
1297 	return (false);
1298 }
1299 
1300 static bool
1301 malloc_init_hard(void)
1302 {
1303 
1304 	malloc_mutex_lock(&init_lock);
1305 	if (!malloc_init_hard_needed()) {
1306 		malloc_mutex_unlock(&init_lock);
1307 		return (false);
1308 	}
1309 
1310 	if (malloc_init_state != malloc_init_a0_initialized &&
1311 	    malloc_init_hard_a0_locked()) {
1312 		malloc_mutex_unlock(&init_lock);
1313 		return (true);
1314 	}
1315 	if (malloc_tsd_boot0()) {
1316 		malloc_mutex_unlock(&init_lock);
1317 		return (true);
1318 	}
1319 	if (config_prof && prof_boot2()) {
1320 		malloc_mutex_unlock(&init_lock);
1321 		return (true);
1322 	}
1323 
1324 	malloc_init_hard_recursible();
1325 
1326 	if (malloc_init_hard_finish()) {
1327 		malloc_mutex_unlock(&init_lock);
1328 		return (true);
1329 	}
1330 
1331 	malloc_mutex_unlock(&init_lock);
1332 	malloc_tsd_boot1();
1333 	return (false);
1334 }
1335 
1336 /*
1337  * End initialization functions.
1338  */
1339 /******************************************************************************/
1340 /*
1341  * Begin malloc(3)-compatible functions.
1342  */
1343 
1344 static void *
1345 imalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
1346 {
1347 	void *p;
1348 
1349 	if (tctx == NULL)
1350 		return (NULL);
1351 	if (usize <= SMALL_MAXCLASS) {
1352 		p = imalloc(tsd, LARGE_MINCLASS);
1353 		if (p == NULL)
1354 			return (NULL);
1355 		arena_prof_promoted(p, usize);
1356 	} else
1357 		p = imalloc(tsd, usize);
1358 
1359 	return (p);
1360 }
1361 
1362 JEMALLOC_ALWAYS_INLINE_C void *
1363 imalloc_prof(tsd_t *tsd, size_t usize)
1364 {
1365 	void *p;
1366 	prof_tctx_t *tctx;
1367 
1368 	tctx = prof_alloc_prep(tsd, usize, true);
1369 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1370 		p = imalloc_prof_sample(tsd, usize, tctx);
1371 	else
1372 		p = imalloc(tsd, usize);
1373 	if (unlikely(p == NULL)) {
1374 		prof_alloc_rollback(tsd, tctx, true);
1375 		return (NULL);
1376 	}
1377 	prof_malloc(p, usize, tctx);
1378 
1379 	return (p);
1380 }
1381 
1382 JEMALLOC_ALWAYS_INLINE_C void *
1383 imalloc_body(size_t size, tsd_t **tsd, size_t *usize)
1384 {
1385 
1386 	if (unlikely(malloc_init()))
1387 		return (NULL);
1388 	*tsd = tsd_fetch();
1389 
1390 	if (config_prof && opt_prof) {
1391 		*usize = s2u(size);
1392 		if (unlikely(*usize == 0))
1393 			return (NULL);
1394 		return (imalloc_prof(*tsd, *usize));
1395 	}
1396 
1397 	if (config_stats || (config_valgrind && unlikely(in_valgrind)))
1398 		*usize = s2u(size);
1399 	return (imalloc(*tsd, size));
1400 }
1401 
1402 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1403 void JEMALLOC_NOTHROW *
1404 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
1405 je_malloc(size_t size)
1406 {
1407 	void *ret;
1408 	tsd_t *tsd;
1409 	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1410 
1411 	if (size == 0)
1412 		size = 1;
1413 
1414 	ret = imalloc_body(size, &tsd, &usize);
1415 	if (unlikely(ret == NULL)) {
1416 		if (config_xmalloc && unlikely(opt_xmalloc)) {
1417 			malloc_write("<jemalloc>: Error in malloc(): "
1418 			    "out of memory\n");
1419 			abort();
1420 		}
1421 		set_errno(ENOMEM);
1422 	}
1423 	if (config_stats && likely(ret != NULL)) {
1424 		assert(usize == isalloc(ret, config_prof));
1425 		*tsd_thread_allocatedp_get(tsd) += usize;
1426 	}
1427 	UTRACE(0, size, ret);
1428 	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
1429 	return (ret);
1430 }
1431 
1432 static void *
1433 imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize,
1434     prof_tctx_t *tctx)
1435 {
1436 	void *p;
1437 
1438 	if (tctx == NULL)
1439 		return (NULL);
1440 	if (usize <= SMALL_MAXCLASS) {
1441 		assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS);
1442 		p = ipalloc(tsd, LARGE_MINCLASS, alignment, false);
1443 		if (p == NULL)
1444 			return (NULL);
1445 		arena_prof_promoted(p, usize);
1446 	} else
1447 		p = ipalloc(tsd, usize, alignment, false);
1448 
1449 	return (p);
1450 }
1451 
1452 JEMALLOC_ALWAYS_INLINE_C void *
1453 imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize)
1454 {
1455 	void *p;
1456 	prof_tctx_t *tctx;
1457 
1458 	tctx = prof_alloc_prep(tsd, usize, true);
1459 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1460 		p = imemalign_prof_sample(tsd, alignment, usize, tctx);
1461 	else
1462 		p = ipalloc(tsd, usize, alignment, false);
1463 	if (unlikely(p == NULL)) {
1464 		prof_alloc_rollback(tsd, tctx, true);
1465 		return (NULL);
1466 	}
1467 	prof_malloc(p, usize, tctx);
1468 
1469 	return (p);
1470 }
1471 
1472 JEMALLOC_ATTR(nonnull(1))
1473 static int
1474 imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
1475 {
1476 	int ret;
1477 	tsd_t *tsd;
1478 	size_t usize;
1479 	void *result;
1480 
1481 	assert(min_alignment != 0);
1482 
1483 	if (unlikely(malloc_init())) {
1484 		result = NULL;
1485 		goto label_oom;
1486 	}
1487 	tsd = tsd_fetch();
1488 	if (size == 0)
1489 		size = 1;
1490 
1491 	/* Make sure that alignment is a large enough power of 2. */
1492 	if (unlikely(((alignment - 1) & alignment) != 0
1493 	    || (alignment < min_alignment))) {
1494 		if (config_xmalloc && unlikely(opt_xmalloc)) {
1495 			malloc_write("<jemalloc>: Error allocating "
1496 			    "aligned memory: invalid alignment\n");
1497 			abort();
1498 		}
1499 		result = NULL;
1500 		ret = EINVAL;
1501 		goto label_return;
1502 	}
1503 
1504 	usize = sa2u(size, alignment);
1505 	if (unlikely(usize == 0)) {
1506 		result = NULL;
1507 		goto label_oom;
1508 	}
1509 
1510 	if (config_prof && opt_prof)
1511 		result = imemalign_prof(tsd, alignment, usize);
1512 	else
1513 		result = ipalloc(tsd, usize, alignment, false);
1514 	if (unlikely(result == NULL))
1515 		goto label_oom;
1516 	assert(((uintptr_t)result & (alignment - 1)) == ZU(0));
1517 
1518 	*memptr = result;
1519 	ret = 0;
1520 label_return:
1521 	if (config_stats && likely(result != NULL)) {
1522 		assert(usize == isalloc(result, config_prof));
1523 		*tsd_thread_allocatedp_get(tsd) += usize;
1524 	}
1525 	UTRACE(0, size, result);
1526 	return (ret);
1527 label_oom:
1528 	assert(result == NULL);
1529 	if (config_xmalloc && unlikely(opt_xmalloc)) {
1530 		malloc_write("<jemalloc>: Error allocating aligned memory: "
1531 		    "out of memory\n");
1532 		abort();
1533 	}
1534 	ret = ENOMEM;
1535 	goto label_return;
1536 }
1537 
1538 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
1539 JEMALLOC_ATTR(nonnull(1))
1540 je_posix_memalign(void **memptr, size_t alignment, size_t size)
1541 {
1542 	int ret = imemalign(memptr, alignment, size, sizeof(void *));
1543 	JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
1544 	    config_prof), false);
1545 	return (ret);
1546 }
1547 
1548 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1549 void JEMALLOC_NOTHROW *
1550 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
1551 je_aligned_alloc(size_t alignment, size_t size)
1552 {
1553 	void *ret;
1554 	int err;
1555 
1556 	if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) {
1557 		ret = NULL;
1558 		set_errno(err);
1559 	}
1560 	JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
1561 	    false);
1562 	return (ret);
1563 }
1564 
1565 static void *
1566 icalloc_prof_sample(tsd_t *tsd, size_t usize, prof_tctx_t *tctx)
1567 {
1568 	void *p;
1569 
1570 	if (tctx == NULL)
1571 		return (NULL);
1572 	if (usize <= SMALL_MAXCLASS) {
1573 		p = icalloc(tsd, LARGE_MINCLASS);
1574 		if (p == NULL)
1575 			return (NULL);
1576 		arena_prof_promoted(p, usize);
1577 	} else
1578 		p = icalloc(tsd, usize);
1579 
1580 	return (p);
1581 }
1582 
1583 JEMALLOC_ALWAYS_INLINE_C void *
1584 icalloc_prof(tsd_t *tsd, size_t usize)
1585 {
1586 	void *p;
1587 	prof_tctx_t *tctx;
1588 
1589 	tctx = prof_alloc_prep(tsd, usize, true);
1590 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1591 		p = icalloc_prof_sample(tsd, usize, tctx);
1592 	else
1593 		p = icalloc(tsd, usize);
1594 	if (unlikely(p == NULL)) {
1595 		prof_alloc_rollback(tsd, tctx, true);
1596 		return (NULL);
1597 	}
1598 	prof_malloc(p, usize, tctx);
1599 
1600 	return (p);
1601 }
1602 
1603 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1604 void JEMALLOC_NOTHROW *
1605 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
1606 je_calloc(size_t num, size_t size)
1607 {
1608 	void *ret;
1609 	tsd_t *tsd;
1610 	size_t num_size;
1611 	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1612 
1613 	if (unlikely(malloc_init())) {
1614 		num_size = 0;
1615 		ret = NULL;
1616 		goto label_return;
1617 	}
1618 	tsd = tsd_fetch();
1619 
1620 	num_size = num * size;
1621 	if (unlikely(num_size == 0)) {
1622 		if (num == 0 || size == 0)
1623 			num_size = 1;
1624 		else {
1625 			ret = NULL;
1626 			goto label_return;
1627 		}
1628 	/*
1629 	 * Try to avoid division here.  We know that it isn't possible to
1630 	 * overflow during multiplication if neither operand uses any of the
1631 	 * most significant half of the bits in a size_t.
1632 	 */
1633 	} else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) <<
1634 	    2))) && (num_size / size != num))) {
1635 		/* size_t overflow. */
1636 		ret = NULL;
1637 		goto label_return;
1638 	}
1639 
1640 	if (config_prof && opt_prof) {
1641 		usize = s2u(num_size);
1642 		if (unlikely(usize == 0)) {
1643 			ret = NULL;
1644 			goto label_return;
1645 		}
1646 		ret = icalloc_prof(tsd, usize);
1647 	} else {
1648 		if (config_stats || (config_valgrind && unlikely(in_valgrind)))
1649 			usize = s2u(num_size);
1650 		ret = icalloc(tsd, num_size);
1651 	}
1652 
1653 label_return:
1654 	if (unlikely(ret == NULL)) {
1655 		if (config_xmalloc && unlikely(opt_xmalloc)) {
1656 			malloc_write("<jemalloc>: Error in calloc(): out of "
1657 			    "memory\n");
1658 			abort();
1659 		}
1660 		set_errno(ENOMEM);
1661 	}
1662 	if (config_stats && likely(ret != NULL)) {
1663 		assert(usize == isalloc(ret, config_prof));
1664 		*tsd_thread_allocatedp_get(tsd) += usize;
1665 	}
1666 	UTRACE(0, num_size, ret);
1667 	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
1668 	return (ret);
1669 }
1670 
1671 static void *
1672 irealloc_prof_sample(tsd_t *tsd, void *oldptr, size_t old_usize, size_t usize,
1673     prof_tctx_t *tctx)
1674 {
1675 	void *p;
1676 
1677 	if (tctx == NULL)
1678 		return (NULL);
1679 	if (usize <= SMALL_MAXCLASS) {
1680 		p = iralloc(tsd, oldptr, old_usize, LARGE_MINCLASS, 0, false);
1681 		if (p == NULL)
1682 			return (NULL);
1683 		arena_prof_promoted(p, usize);
1684 	} else
1685 		p = iralloc(tsd, oldptr, old_usize, usize, 0, false);
1686 
1687 	return (p);
1688 }
1689 
1690 JEMALLOC_ALWAYS_INLINE_C void *
1691 irealloc_prof(tsd_t *tsd, void *oldptr, size_t old_usize, size_t usize)
1692 {
1693 	void *p;
1694 	prof_tctx_t *old_tctx, *tctx;
1695 
1696 	old_tctx = prof_tctx_get(oldptr);
1697 	tctx = prof_alloc_prep(tsd, usize, true);
1698 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U))
1699 		p = irealloc_prof_sample(tsd, oldptr, old_usize, usize, tctx);
1700 	else
1701 		p = iralloc(tsd, oldptr, old_usize, usize, 0, false);
1702 	if (p == NULL)
1703 		return (NULL);
1704 	prof_realloc(tsd, p, usize, tctx, true, old_usize, old_tctx);
1705 
1706 	return (p);
1707 }
1708 
1709 JEMALLOC_INLINE_C void
1710 ifree(tsd_t *tsd, void *ptr, tcache_t *tcache)
1711 {
1712 	size_t usize;
1713 	UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1714 
1715 	assert(ptr != NULL);
1716 	assert(malloc_initialized() || IS_INITIALIZER);
1717 
1718 	if (config_prof && opt_prof) {
1719 		usize = isalloc(ptr, config_prof);
1720 		prof_free(tsd, ptr, usize);
1721 	} else if (config_stats || config_valgrind)
1722 		usize = isalloc(ptr, config_prof);
1723 	if (config_stats)
1724 		*tsd_thread_deallocatedp_get(tsd) += usize;
1725 	if (config_valgrind && unlikely(in_valgrind))
1726 		rzsize = p2rz(ptr);
1727 	iqalloc(tsd, ptr, tcache);
1728 	JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1729 }
1730 
1731 JEMALLOC_INLINE_C void
1732 isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache)
1733 {
1734 	UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1735 
1736 	assert(ptr != NULL);
1737 	assert(malloc_initialized() || IS_INITIALIZER);
1738 
1739 	if (config_prof && opt_prof)
1740 		prof_free(tsd, ptr, usize);
1741 	if (config_stats)
1742 		*tsd_thread_deallocatedp_get(tsd) += usize;
1743 	if (config_valgrind && unlikely(in_valgrind))
1744 		rzsize = p2rz(ptr);
1745 	isqalloc(tsd, ptr, usize, tcache);
1746 	JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1747 }
1748 
1749 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1750 void JEMALLOC_NOTHROW *
1751 JEMALLOC_ALLOC_SIZE(2)
1752 je_realloc(void *ptr, size_t size)
1753 {
1754 	void *ret;
1755 	tsd_t *tsd JEMALLOC_CC_SILENCE_INIT(NULL);
1756 	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1757 	size_t old_usize = 0;
1758 	UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1759 
1760 	if (unlikely(size == 0)) {
1761 		if (ptr != NULL) {
1762 			/* realloc(ptr, 0) is equivalent to free(ptr). */
1763 			UTRACE(ptr, 0, 0);
1764 			tsd = tsd_fetch();
1765 			ifree(tsd, ptr, tcache_get(tsd, false));
1766 			return (NULL);
1767 		}
1768 		size = 1;
1769 	}
1770 
1771 	if (likely(ptr != NULL)) {
1772 		assert(malloc_initialized() || IS_INITIALIZER);
1773 		malloc_thread_init();
1774 		tsd = tsd_fetch();
1775 
1776 		old_usize = isalloc(ptr, config_prof);
1777 		if (config_valgrind && unlikely(in_valgrind))
1778 			old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize);
1779 
1780 		if (config_prof && opt_prof) {
1781 			usize = s2u(size);
1782 			ret = unlikely(usize == 0) ? NULL : irealloc_prof(tsd,
1783 			    ptr, old_usize, usize);
1784 		} else {
1785 			if (config_stats || (config_valgrind &&
1786 			    unlikely(in_valgrind)))
1787 				usize = s2u(size);
1788 			ret = iralloc(tsd, ptr, old_usize, size, 0, false);
1789 		}
1790 	} else {
1791 		/* realloc(NULL, size) is equivalent to malloc(size). */
1792 		ret = imalloc_body(size, &tsd, &usize);
1793 	}
1794 
1795 	if (unlikely(ret == NULL)) {
1796 		if (config_xmalloc && unlikely(opt_xmalloc)) {
1797 			malloc_write("<jemalloc>: Error in realloc(): "
1798 			    "out of memory\n");
1799 			abort();
1800 		}
1801 		set_errno(ENOMEM);
1802 	}
1803 	if (config_stats && likely(ret != NULL)) {
1804 		assert(usize == isalloc(ret, config_prof));
1805 		*tsd_thread_allocatedp_get(tsd) += usize;
1806 		*tsd_thread_deallocatedp_get(tsd) += old_usize;
1807 	}
1808 	UTRACE(ptr, size, ret);
1809 	JEMALLOC_VALGRIND_REALLOC(true, ret, usize, true, ptr, old_usize,
1810 	    old_rzsize, true, false);
1811 	return (ret);
1812 }
1813 
1814 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
1815 je_free(void *ptr)
1816 {
1817 
1818 	UTRACE(ptr, 0, 0);
1819 	if (likely(ptr != NULL)) {
1820 		tsd_t *tsd = tsd_fetch();
1821 		ifree(tsd, ptr, tcache_get(tsd, false));
1822 	}
1823 }
1824 
1825 /*
1826  * End malloc(3)-compatible functions.
1827  */
1828 /******************************************************************************/
1829 /*
1830  * Begin non-standard override functions.
1831  */
1832 
1833 #ifdef JEMALLOC_OVERRIDE_MEMALIGN
1834 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1835 void JEMALLOC_NOTHROW *
1836 JEMALLOC_ATTR(malloc)
1837 je_memalign(size_t alignment, size_t size)
1838 {
1839 	void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1840 	if (unlikely(imemalign(&ret, alignment, size, 1) != 0))
1841 		ret = NULL;
1842 	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1843 	return (ret);
1844 }
1845 #endif
1846 
1847 #ifdef JEMALLOC_OVERRIDE_VALLOC
1848 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
1849 void JEMALLOC_NOTHROW *
1850 JEMALLOC_ATTR(malloc)
1851 je_valloc(size_t size)
1852 {
1853 	void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1854 	if (unlikely(imemalign(&ret, PAGE, size, 1) != 0))
1855 		ret = NULL;
1856 	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1857 	return (ret);
1858 }
1859 #endif
1860 
1861 /*
1862  * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1863  * #define je_malloc malloc
1864  */
1865 #define	malloc_is_malloc 1
1866 #define	is_malloc_(a) malloc_is_ ## a
1867 #define	is_malloc(a) is_malloc_(a)
1868 
1869 #if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK))
1870 /*
1871  * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1872  * to inconsistently reference libc's malloc(3)-compatible functions
1873  * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1874  *
1875  * These definitions interpose hooks in glibc.  The functions are actually
1876  * passed an extra argument for the caller return address, which will be
1877  * ignored.
1878  */
1879 JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
1880 JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
1881 JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
1882 # ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
1883 JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
1884     je_memalign;
1885 # endif
1886 #endif
1887 
1888 /*
1889  * End non-standard override functions.
1890  */
1891 /******************************************************************************/
1892 /*
1893  * Begin non-standard functions.
1894  */
1895 
1896 JEMALLOC_ALWAYS_INLINE_C bool
1897 imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize,
1898     size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
1899 {
1900 
1901 	if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) {
1902 		*alignment = 0;
1903 		*usize = s2u(size);
1904 	} else {
1905 		*alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
1906 		*usize = sa2u(size, *alignment);
1907 	}
1908 	*zero = MALLOCX_ZERO_GET(flags);
1909 	if ((flags & MALLOCX_TCACHE_MASK) != 0) {
1910 		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
1911 			*tcache = NULL;
1912 		else
1913 			*tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
1914 	} else
1915 		*tcache = tcache_get(tsd, true);
1916 	if ((flags & MALLOCX_ARENA_MASK) != 0) {
1917 		unsigned arena_ind = MALLOCX_ARENA_GET(flags);
1918 		*arena = arena_get(tsd, arena_ind, true, true);
1919 		if (unlikely(*arena == NULL))
1920 			return (true);
1921 	} else
1922 		*arena = NULL;
1923 	return (false);
1924 }
1925 
1926 JEMALLOC_ALWAYS_INLINE_C bool
1927 imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize,
1928     size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena)
1929 {
1930 
1931 	if (likely(flags == 0)) {
1932 		*usize = s2u(size);
1933 		assert(*usize != 0);
1934 		*alignment = 0;
1935 		*zero = false;
1936 		*tcache = tcache_get(tsd, true);
1937 		*arena = NULL;
1938 		return (false);
1939 	} else {
1940 		return (imallocx_flags_decode_hard(tsd, size, flags, usize,
1941 		    alignment, zero, tcache, arena));
1942 	}
1943 }
1944 
1945 JEMALLOC_ALWAYS_INLINE_C void *
1946 imallocx_flags(tsd_t *tsd, size_t usize, size_t alignment, bool zero,
1947     tcache_t *tcache, arena_t *arena)
1948 {
1949 
1950 	if (alignment != 0)
1951 		return (ipalloct(tsd, usize, alignment, zero, tcache, arena));
1952 	if (zero)
1953 		return (icalloct(tsd, usize, tcache, arena));
1954 	return (imalloct(tsd, usize, tcache, arena));
1955 }
1956 
1957 JEMALLOC_ALWAYS_INLINE_C void *
1958 imallocx_maybe_flags(tsd_t *tsd, size_t size, int flags, size_t usize,
1959     size_t alignment, bool zero, tcache_t *tcache, arena_t *arena)
1960 {
1961 
1962 	if (likely(flags == 0))
1963 		return (imalloc(tsd, size));
1964 	return (imallocx_flags(tsd, usize, alignment, zero, tcache, arena));
1965 }
1966 
1967 static void *
1968 imallocx_prof_sample(tsd_t *tsd, size_t size, int flags, size_t usize,
1969     size_t alignment, bool zero, tcache_t *tcache, arena_t *arena)
1970 {
1971 	void *p;
1972 
1973 	if (usize <= SMALL_MAXCLASS) {
1974 		assert(((alignment == 0) ? s2u(LARGE_MINCLASS) :
1975 		    sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS);
1976 		p = imallocx_maybe_flags(tsd, LARGE_MINCLASS, flags,
1977 		    LARGE_MINCLASS, alignment, zero, tcache, arena);
1978 		if (p == NULL)
1979 			return (NULL);
1980 		arena_prof_promoted(p, usize);
1981 	} else {
1982 		p = imallocx_maybe_flags(tsd, size, flags, usize, alignment,
1983 		    zero, tcache, arena);
1984 	}
1985 
1986 	return (p);
1987 }
1988 
1989 JEMALLOC_ALWAYS_INLINE_C void *
1990 imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
1991 {
1992 	void *p;
1993 	size_t alignment;
1994 	bool zero;
1995 	tcache_t *tcache;
1996 	arena_t *arena;
1997 	prof_tctx_t *tctx;
1998 
1999 	if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment,
2000 	    &zero, &tcache, &arena)))
2001 		return (NULL);
2002 	tctx = prof_alloc_prep(tsd, *usize, true);
2003 	if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
2004 		p = imallocx_maybe_flags(tsd, size, flags, *usize, alignment,
2005 		    zero, tcache, arena);
2006 	} else if ((uintptr_t)tctx > (uintptr_t)1U) {
2007 		p = imallocx_prof_sample(tsd, size, flags, *usize, alignment,
2008 		    zero, tcache, arena);
2009 	} else
2010 		p = NULL;
2011 	if (unlikely(p == NULL)) {
2012 		prof_alloc_rollback(tsd, tctx, true);
2013 		return (NULL);
2014 	}
2015 	prof_malloc(p, *usize, tctx);
2016 
2017 	assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2018 	return (p);
2019 }
2020 
2021 JEMALLOC_ALWAYS_INLINE_C void *
2022 imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize)
2023 {
2024 	void *p;
2025 	size_t alignment;
2026 	bool zero;
2027 	tcache_t *tcache;
2028 	arena_t *arena;
2029 
2030 	if (likely(flags == 0)) {
2031 		if (config_stats || (config_valgrind && unlikely(in_valgrind)))
2032 			*usize = s2u(size);
2033 		return (imalloc(tsd, size));
2034 	}
2035 
2036 	if (unlikely(imallocx_flags_decode_hard(tsd, size, flags, usize,
2037 	    &alignment, &zero, &tcache, &arena)))
2038 		return (NULL);
2039 	p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena);
2040 	assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2041 	return (p);
2042 }
2043 
2044 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2045 void JEMALLOC_NOTHROW *
2046 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
2047 je_mallocx(size_t size, int flags)
2048 {
2049 	tsd_t *tsd;
2050 	void *p;
2051 	size_t usize;
2052 
2053 	assert(size != 0);
2054 
2055 	if (unlikely(malloc_init()))
2056 		goto label_oom;
2057 	tsd = tsd_fetch();
2058 
2059 	if (config_prof && opt_prof)
2060 		p = imallocx_prof(tsd, size, flags, &usize);
2061 	else
2062 		p = imallocx_no_prof(tsd, size, flags, &usize);
2063 	if (unlikely(p == NULL))
2064 		goto label_oom;
2065 
2066 	if (config_stats) {
2067 		assert(usize == isalloc(p, config_prof));
2068 		*tsd_thread_allocatedp_get(tsd) += usize;
2069 	}
2070 	UTRACE(0, size, p);
2071 	JEMALLOC_VALGRIND_MALLOC(true, p, usize, MALLOCX_ZERO_GET(flags));
2072 	return (p);
2073 label_oom:
2074 	if (config_xmalloc && unlikely(opt_xmalloc)) {
2075 		malloc_write("<jemalloc>: Error in mallocx(): out of memory\n");
2076 		abort();
2077 	}
2078 	UTRACE(0, size, 0);
2079 	return (NULL);
2080 }
2081 
2082 static void *
2083 irallocx_prof_sample(tsd_t *tsd, void *oldptr, size_t old_usize, size_t size,
2084     size_t alignment, size_t usize, bool zero, tcache_t *tcache, arena_t *arena,
2085     prof_tctx_t *tctx)
2086 {
2087 	void *p;
2088 
2089 	if (tctx == NULL)
2090 		return (NULL);
2091 	if (usize <= SMALL_MAXCLASS) {
2092 		p = iralloct(tsd, oldptr, old_usize, LARGE_MINCLASS, alignment,
2093 		    zero, tcache, arena);
2094 		if (p == NULL)
2095 			return (NULL);
2096 		arena_prof_promoted(p, usize);
2097 	} else {
2098 		p = iralloct(tsd, oldptr, old_usize, size, alignment, zero,
2099 		    tcache, arena);
2100 	}
2101 
2102 	return (p);
2103 }
2104 
2105 JEMALLOC_ALWAYS_INLINE_C void *
2106 irallocx_prof(tsd_t *tsd, void *oldptr, size_t old_usize, size_t size,
2107     size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
2108     arena_t *arena)
2109 {
2110 	void *p;
2111 	prof_tctx_t *old_tctx, *tctx;
2112 
2113 	old_tctx = prof_tctx_get(oldptr);
2114 	tctx = prof_alloc_prep(tsd, *usize, false);
2115 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2116 		p = irallocx_prof_sample(tsd, oldptr, old_usize, size,
2117 		    alignment, *usize, zero, tcache, arena, tctx);
2118 	} else {
2119 		p = iralloct(tsd, oldptr, old_usize, size, alignment, zero,
2120 		    tcache, arena);
2121 	}
2122 	if (unlikely(p == NULL)) {
2123 		prof_alloc_rollback(tsd, tctx, false);
2124 		return (NULL);
2125 	}
2126 
2127 	if (p == oldptr && alignment != 0) {
2128 		/*
2129 		 * The allocation did not move, so it is possible that the size
2130 		 * class is smaller than would guarantee the requested
2131 		 * alignment, and that the alignment constraint was
2132 		 * serendipitously satisfied.  Additionally, old_usize may not
2133 		 * be the same as the current usize because of in-place large
2134 		 * reallocation.  Therefore, query the actual value of usize.
2135 		 */
2136 		*usize = isalloc(p, config_prof);
2137 	}
2138 	prof_realloc(tsd, p, *usize, tctx, false, old_usize, old_tctx);
2139 
2140 	return (p);
2141 }
2142 
2143 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2144 void JEMALLOC_NOTHROW *
2145 JEMALLOC_ALLOC_SIZE(2)
2146 je_rallocx(void *ptr, size_t size, int flags)
2147 {
2148 	void *p;
2149 	tsd_t *tsd;
2150 	size_t usize;
2151 	size_t old_usize;
2152 	UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
2153 	size_t alignment = MALLOCX_ALIGN_GET(flags);
2154 	bool zero = flags & MALLOCX_ZERO;
2155 	arena_t *arena;
2156 	tcache_t *tcache;
2157 
2158 	assert(ptr != NULL);
2159 	assert(size != 0);
2160 	assert(malloc_initialized() || IS_INITIALIZER);
2161 	malloc_thread_init();
2162 	tsd = tsd_fetch();
2163 
2164 	if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
2165 		unsigned arena_ind = MALLOCX_ARENA_GET(flags);
2166 		arena = arena_get(tsd, arena_ind, true, true);
2167 		if (unlikely(arena == NULL))
2168 			goto label_oom;
2169 	} else
2170 		arena = NULL;
2171 
2172 	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2173 		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2174 			tcache = NULL;
2175 		else
2176 			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2177 	} else
2178 		tcache = tcache_get(tsd, true);
2179 
2180 	old_usize = isalloc(ptr, config_prof);
2181 	if (config_valgrind && unlikely(in_valgrind))
2182 		old_rzsize = u2rz(old_usize);
2183 
2184 	if (config_prof && opt_prof) {
2185 		usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
2186 		assert(usize != 0);
2187 		p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
2188 		    zero, tcache, arena);
2189 		if (unlikely(p == NULL))
2190 			goto label_oom;
2191 	} else {
2192 		p = iralloct(tsd, ptr, old_usize, size, alignment, zero,
2193 		     tcache, arena);
2194 		if (unlikely(p == NULL))
2195 			goto label_oom;
2196 		if (config_stats || (config_valgrind && unlikely(in_valgrind)))
2197 			usize = isalloc(p, config_prof);
2198 	}
2199 	assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2200 
2201 	if (config_stats) {
2202 		*tsd_thread_allocatedp_get(tsd) += usize;
2203 		*tsd_thread_deallocatedp_get(tsd) += old_usize;
2204 	}
2205 	UTRACE(ptr, size, p);
2206 	JEMALLOC_VALGRIND_REALLOC(true, p, usize, false, ptr, old_usize,
2207 	    old_rzsize, false, zero);
2208 	return (p);
2209 label_oom:
2210 	if (config_xmalloc && unlikely(opt_xmalloc)) {
2211 		malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
2212 		abort();
2213 	}
2214 	UTRACE(ptr, size, 0);
2215 	return (NULL);
2216 }
2217 
2218 JEMALLOC_ALWAYS_INLINE_C size_t
2219 ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra,
2220     size_t alignment, bool zero)
2221 {
2222 	size_t usize;
2223 
2224 	if (ixalloc(ptr, old_usize, size, extra, alignment, zero))
2225 		return (old_usize);
2226 	usize = isalloc(ptr, config_prof);
2227 
2228 	return (usize);
2229 }
2230 
2231 static size_t
2232 ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra,
2233     size_t alignment, size_t max_usize, bool zero, prof_tctx_t *tctx)
2234 {
2235 	size_t usize;
2236 
2237 	if (tctx == NULL)
2238 		return (old_usize);
2239 	/* Use minimum usize to determine whether promotion may happen. */
2240 	if (((alignment == 0) ? s2u(size) : sa2u(size, alignment)) <=
2241 	    SMALL_MAXCLASS) {
2242 		if (ixalloc(ptr, old_usize, SMALL_MAXCLASS+1,
2243 		    (SMALL_MAXCLASS+1 >= size+extra) ? 0 : size+extra -
2244 		    (SMALL_MAXCLASS+1), alignment, zero))
2245 			return (old_usize);
2246 		usize = isalloc(ptr, config_prof);
2247 		if (max_usize < LARGE_MINCLASS)
2248 			arena_prof_promoted(ptr, usize);
2249 	} else {
2250 		usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
2251 		    zero);
2252 	}
2253 
2254 	return (usize);
2255 }
2256 
2257 JEMALLOC_ALWAYS_INLINE_C size_t
2258 ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
2259     size_t extra, size_t alignment, bool zero)
2260 {
2261 	size_t max_usize, usize;
2262 	prof_tctx_t *old_tctx, *tctx;
2263 
2264 	old_tctx = prof_tctx_get(ptr);
2265 	/*
2266 	 * usize isn't knowable before ixalloc() returns when extra is non-zero.
2267 	 * Therefore, compute its maximum possible value and use that in
2268 	 * prof_alloc_prep() to decide whether to capture a backtrace.
2269 	 * prof_realloc() will use the actual usize to decide whether to sample.
2270 	 */
2271 	max_usize = (alignment == 0) ? s2u(size+extra) : sa2u(size+extra,
2272 	    alignment);
2273 	tctx = prof_alloc_prep(tsd, max_usize, false);
2274 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2275 		usize = ixallocx_prof_sample(ptr, old_usize, size, extra,
2276 		    alignment, zero, max_usize, tctx);
2277 	} else {
2278 		usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
2279 		    zero);
2280 	}
2281 	if (unlikely(usize == old_usize)) {
2282 		prof_alloc_rollback(tsd, tctx, false);
2283 		return (usize);
2284 	}
2285 	prof_realloc(tsd, ptr, usize, tctx, false, old_usize, old_tctx);
2286 
2287 	return (usize);
2288 }
2289 
2290 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2291 je_xallocx(void *ptr, size_t size, size_t extra, int flags)
2292 {
2293 	tsd_t *tsd;
2294 	size_t usize, old_usize;
2295 	UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
2296 	size_t alignment = MALLOCX_ALIGN_GET(flags);
2297 	bool zero = flags & MALLOCX_ZERO;
2298 
2299 	assert(ptr != NULL);
2300 	assert(size != 0);
2301 	assert(SIZE_T_MAX - size >= extra);
2302 	assert(malloc_initialized() || IS_INITIALIZER);
2303 	malloc_thread_init();
2304 	tsd = tsd_fetch();
2305 
2306 	old_usize = isalloc(ptr, config_prof);
2307 	if (config_valgrind && unlikely(in_valgrind))
2308 		old_rzsize = u2rz(old_usize);
2309 
2310 	if (config_prof && opt_prof) {
2311 		usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
2312 		    alignment, zero);
2313 	} else {
2314 		usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
2315 		    zero);
2316 	}
2317 	if (unlikely(usize == old_usize))
2318 		goto label_not_resized;
2319 
2320 	if (config_stats) {
2321 		*tsd_thread_allocatedp_get(tsd) += usize;
2322 		*tsd_thread_deallocatedp_get(tsd) += old_usize;
2323 	}
2324 	JEMALLOC_VALGRIND_REALLOC(false, ptr, usize, false, ptr, old_usize,
2325 	    old_rzsize, false, zero);
2326 label_not_resized:
2327 	UTRACE(ptr, size, ptr);
2328 	return (usize);
2329 }
2330 
2331 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2332 JEMALLOC_ATTR(pure)
2333 je_sallocx(const void *ptr, int flags)
2334 {
2335 	size_t usize;
2336 
2337 	assert(malloc_initialized() || IS_INITIALIZER);
2338 	malloc_thread_init();
2339 
2340 	if (config_ivsalloc)
2341 		usize = ivsalloc(ptr, config_prof);
2342 	else
2343 		usize = isalloc(ptr, config_prof);
2344 
2345 	return (usize);
2346 }
2347 
2348 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2349 je_dallocx(void *ptr, int flags)
2350 {
2351 	tsd_t *tsd;
2352 	tcache_t *tcache;
2353 
2354 	assert(ptr != NULL);
2355 	assert(malloc_initialized() || IS_INITIALIZER);
2356 
2357 	tsd = tsd_fetch();
2358 	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2359 		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2360 			tcache = NULL;
2361 		else
2362 			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2363 	} else
2364 		tcache = tcache_get(tsd, false);
2365 
2366 	UTRACE(ptr, 0, 0);
2367 	ifree(tsd_fetch(), ptr, tcache);
2368 }
2369 
2370 JEMALLOC_ALWAYS_INLINE_C size_t
2371 inallocx(size_t size, int flags)
2372 {
2373 	size_t usize;
2374 
2375 	if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0))
2376 		usize = s2u(size);
2377 	else
2378 		usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
2379 	assert(usize != 0);
2380 	return (usize);
2381 }
2382 
2383 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2384 je_sdallocx(void *ptr, size_t size, int flags)
2385 {
2386 	tsd_t *tsd;
2387 	tcache_t *tcache;
2388 	size_t usize;
2389 
2390 	assert(ptr != NULL);
2391 	assert(malloc_initialized() || IS_INITIALIZER);
2392 	usize = inallocx(size, flags);
2393 	assert(usize == isalloc(ptr, config_prof));
2394 
2395 	tsd = tsd_fetch();
2396 	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2397 		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE)
2398 			tcache = NULL;
2399 		else
2400 			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2401 	} else
2402 		tcache = tcache_get(tsd, false);
2403 
2404 	UTRACE(ptr, 0, 0);
2405 	isfree(tsd, ptr, usize, tcache);
2406 }
2407 
2408 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2409 JEMALLOC_ATTR(pure)
2410 je_nallocx(size_t size, int flags)
2411 {
2412 
2413 	assert(size != 0);
2414 
2415 	if (unlikely(malloc_init()))
2416 		return (0);
2417 
2418 	return (inallocx(size, flags));
2419 }
2420 
2421 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2422 je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
2423     size_t newlen)
2424 {
2425 
2426 	if (unlikely(malloc_init()))
2427 		return (EAGAIN);
2428 
2429 	return (ctl_byname(name, oldp, oldlenp, newp, newlen));
2430 }
2431 
2432 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2433 je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
2434 {
2435 
2436 	if (unlikely(malloc_init()))
2437 		return (EAGAIN);
2438 
2439 	return (ctl_nametomib(name, mibp, miblenp));
2440 }
2441 
2442 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2443 je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
2444   void *newp, size_t newlen)
2445 {
2446 
2447 	if (unlikely(malloc_init()))
2448 		return (EAGAIN);
2449 
2450 	return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
2451 }
2452 
2453 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2454 je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
2455     const char *opts)
2456 {
2457 
2458 	stats_print(write_cb, cbopaque, opts);
2459 }
2460 
2461 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2462 je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
2463 {
2464 	size_t ret;
2465 
2466 	assert(malloc_initialized() || IS_INITIALIZER);
2467 	malloc_thread_init();
2468 
2469 	if (config_ivsalloc)
2470 		ret = ivsalloc(ptr, config_prof);
2471 	else
2472 		ret = (ptr == NULL) ? 0 : isalloc(ptr, config_prof);
2473 
2474 	return (ret);
2475 }
2476 
2477 /*
2478  * End non-standard functions.
2479  */
2480 /******************************************************************************/
2481 /*
2482  * Begin compatibility functions.
2483  */
2484 
2485 #define	ALLOCM_LG_ALIGN(la)	(la)
2486 #define	ALLOCM_ALIGN(a)		(ffsl(a)-1)
2487 #define	ALLOCM_ZERO		((int)0x40)
2488 #define	ALLOCM_NO_MOVE		((int)0x80)
2489 
2490 #define	ALLOCM_SUCCESS		0
2491 #define	ALLOCM_ERR_OOM		1
2492 #define	ALLOCM_ERR_NOT_MOVED	2
2493 
2494 int
2495 je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
2496 {
2497 	void *p;
2498 
2499 	assert(ptr != NULL);
2500 
2501 	p = je_mallocx(size, flags);
2502 	if (p == NULL)
2503 		return (ALLOCM_ERR_OOM);
2504 	if (rsize != NULL)
2505 		*rsize = isalloc(p, config_prof);
2506 	*ptr = p;
2507 	return (ALLOCM_SUCCESS);
2508 }
2509 
2510 int
2511 je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
2512 {
2513 	int ret;
2514 	bool no_move = flags & ALLOCM_NO_MOVE;
2515 
2516 	assert(ptr != NULL);
2517 	assert(*ptr != NULL);
2518 	assert(size != 0);
2519 	assert(SIZE_T_MAX - size >= extra);
2520 
2521 	if (no_move) {
2522 		size_t usize = je_xallocx(*ptr, size, extra, flags);
2523 		ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED;
2524 		if (rsize != NULL)
2525 			*rsize = usize;
2526 	} else {
2527 		void *p = je_rallocx(*ptr, size+extra, flags);
2528 		if (p != NULL) {
2529 			*ptr = p;
2530 			ret = ALLOCM_SUCCESS;
2531 		} else
2532 			ret = ALLOCM_ERR_OOM;
2533 		if (rsize != NULL)
2534 			*rsize = isalloc(*ptr, config_prof);
2535 	}
2536 	return (ret);
2537 }
2538 
2539 int
2540 je_sallocm(const void *ptr, size_t *rsize, int flags)
2541 {
2542 
2543 	assert(rsize != NULL);
2544 	*rsize = je_sallocx(ptr, flags);
2545 	return (ALLOCM_SUCCESS);
2546 }
2547 
2548 int
2549 je_dallocm(void *ptr, int flags)
2550 {
2551 
2552 	je_dallocx(ptr, flags);
2553 	return (ALLOCM_SUCCESS);
2554 }
2555 
2556 int
2557 je_nallocm(size_t *rsize, size_t size, int flags)
2558 {
2559 	size_t usize;
2560 
2561 	usize = je_nallocx(size, flags);
2562 	if (usize == 0)
2563 		return (ALLOCM_ERR_OOM);
2564 	if (rsize != NULL)
2565 		*rsize = usize;
2566 	return (ALLOCM_SUCCESS);
2567 }
2568 
2569 #undef ALLOCM_LG_ALIGN
2570 #undef ALLOCM_ALIGN
2571 #undef ALLOCM_ZERO
2572 #undef ALLOCM_NO_MOVE
2573 
2574 #undef ALLOCM_SUCCESS
2575 #undef ALLOCM_ERR_OOM
2576 #undef ALLOCM_ERR_NOT_MOVED
2577 
2578 /*
2579  * End compatibility functions.
2580  */
2581 /******************************************************************************/
2582 /*
2583  * The following functions are used by threading libraries for protection of
2584  * malloc during fork().
2585  */
2586 
2587 /*
2588  * If an application creates a thread before doing any allocation in the main
2589  * thread, then calls fork(2) in the main thread followed by memory allocation
2590  * in the child process, a race can occur that results in deadlock within the
2591  * child: the main thread may have forked while the created thread had
2592  * partially initialized the allocator.  Ordinarily jemalloc prevents
2593  * fork/malloc races via the following functions it registers during
2594  * initialization using pthread_atfork(), but of course that does no good if
2595  * the allocator isn't fully initialized at fork time.  The following library
2596  * constructor is a partial solution to this problem.  It may still be possible
2597  * to trigger the deadlock described above, but doing so would involve forking
2598  * via a library constructor that runs before jemalloc's runs.
2599  */
2600 JEMALLOC_ATTR(constructor)
2601 static void
2602 jemalloc_constructor(void)
2603 {
2604 
2605 	malloc_init();
2606 }
2607 
2608 #ifndef JEMALLOC_MUTEX_INIT_CB
2609 void
2610 jemalloc_prefork(void)
2611 #else
2612 JEMALLOC_EXPORT void
2613 _malloc_prefork(void)
2614 #endif
2615 {
2616 	unsigned i;
2617 
2618 #ifdef JEMALLOC_MUTEX_INIT_CB
2619 	if (!malloc_initialized())
2620 		return;
2621 #endif
2622 	assert(malloc_initialized());
2623 
2624 	/* Acquire all mutexes in a safe order. */
2625 	ctl_prefork();
2626 	prof_prefork();
2627 	malloc_mutex_prefork(&arenas_lock);
2628 	for (i = 0; i < narenas_total; i++) {
2629 		if (arenas[i] != NULL)
2630 			arena_prefork(arenas[i]);
2631 	}
2632 	chunk_prefork();
2633 	base_prefork();
2634 }
2635 
2636 #ifndef JEMALLOC_MUTEX_INIT_CB
2637 void
2638 jemalloc_postfork_parent(void)
2639 #else
2640 JEMALLOC_EXPORT void
2641 _malloc_postfork(void)
2642 #endif
2643 {
2644 	unsigned i;
2645 
2646 #ifdef JEMALLOC_MUTEX_INIT_CB
2647 	if (!malloc_initialized())
2648 		return;
2649 #endif
2650 	assert(malloc_initialized());
2651 
2652 	/* Release all mutexes, now that fork() has completed. */
2653 	base_postfork_parent();
2654 	chunk_postfork_parent();
2655 	for (i = 0; i < narenas_total; i++) {
2656 		if (arenas[i] != NULL)
2657 			arena_postfork_parent(arenas[i]);
2658 	}
2659 	malloc_mutex_postfork_parent(&arenas_lock);
2660 	prof_postfork_parent();
2661 	ctl_postfork_parent();
2662 }
2663 
2664 void
2665 jemalloc_postfork_child(void)
2666 {
2667 	unsigned i;
2668 
2669 	assert(malloc_initialized());
2670 
2671 	/* Release all mutexes, now that fork() has completed. */
2672 	base_postfork_child();
2673 	chunk_postfork_child();
2674 	for (i = 0; i < narenas_total; i++) {
2675 		if (arenas[i] != NULL)
2676 			arena_postfork_child(arenas[i]);
2677 	}
2678 	malloc_mutex_postfork_child(&arenas_lock);
2679 	prof_postfork_child();
2680 	ctl_postfork_child();
2681 }
2682 
2683 void
2684 _malloc_first_thread(void)
2685 {
2686 
2687 	(void)malloc_mutex_first_thread();
2688 }
2689 
2690 /******************************************************************************/
2691