xref: /freebsd/contrib/jemalloc/src/jemalloc.c (revision e39e854e27f53a784c3982cbeb68f4ad1cfd9162)
1 #define	JEMALLOC_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 
4 /******************************************************************************/
5 /* Data. */
6 
7 malloc_tsd_data(, arenas, arena_t *, NULL)
8 malloc_tsd_data(, thread_allocated, thread_allocated_t,
9     THREAD_ALLOCATED_INITIALIZER)
10 
11 /* Work around <http://llvm.org/bugs/show_bug.cgi?id=12623>: */
12 const char	*__malloc_options_1_0 = NULL;
13 __sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0);
14 
15 /* Runtime configuration options. */
16 const char	*je_malloc_conf;
17 #ifdef JEMALLOC_DEBUG
18 bool	opt_abort = true;
19 #  ifdef JEMALLOC_FILL
20 bool	opt_junk = true;
21 #  else
22 bool	opt_junk = false;
23 #  endif
24 #else
25 bool	opt_abort = false;
26 bool	opt_junk = false;
27 #endif
28 size_t	opt_quarantine = ZU(0);
29 bool	opt_redzone = false;
30 bool	opt_utrace = false;
31 bool	opt_valgrind = false;
32 bool	opt_xmalloc = false;
33 bool	opt_zero = false;
34 size_t	opt_narenas = 0;
35 
36 unsigned	ncpus;
37 
38 malloc_mutex_t		arenas_lock;
39 arena_t			**arenas;
40 unsigned		narenas;
41 
42 /* Set to true once the allocator has been initialized. */
43 static bool		malloc_initialized = false;
44 
45 #ifdef JEMALLOC_THREADED_INIT
46 /* Used to let the initializing thread recursively allocate. */
47 #  define NO_INITIALIZER	((unsigned long)0)
48 #  define INITIALIZER		pthread_self()
49 #  define IS_INITIALIZER	(malloc_initializer == pthread_self())
50 static pthread_t		malloc_initializer = NO_INITIALIZER;
51 #else
52 #  define NO_INITIALIZER	false
53 #  define INITIALIZER		true
54 #  define IS_INITIALIZER	malloc_initializer
55 static bool			malloc_initializer = NO_INITIALIZER;
56 #endif
57 
58 /* Used to avoid initialization races. */
59 #ifdef _WIN32
60 static malloc_mutex_t	init_lock;
61 
62 JEMALLOC_ATTR(constructor)
63 static void WINAPI
64 _init_init_lock(void)
65 {
66 
67 	malloc_mutex_init(&init_lock);
68 }
69 
70 #ifdef _MSC_VER
71 #  pragma section(".CRT$XCU", read)
72 JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
73 static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
74 #endif
75 
76 #else
77 static malloc_mutex_t	init_lock = MALLOC_MUTEX_INITIALIZER;
78 #endif
79 
80 typedef struct {
81 	void	*p;	/* Input pointer (as in realloc(p, s)). */
82 	size_t	s;	/* Request size. */
83 	void	*r;	/* Result pointer. */
84 } malloc_utrace_t;
85 
86 #ifdef JEMALLOC_UTRACE
87 #  define UTRACE(a, b, c) do {						\
88 	if (opt_utrace) {						\
89 		malloc_utrace_t ut;					\
90 		ut.p = (a);						\
91 		ut.s = (b);						\
92 		ut.r = (c);						\
93 		utrace(&ut, sizeof(ut));				\
94 	}								\
95 } while (0)
96 #else
97 #  define UTRACE(a, b, c)
98 #endif
99 
100 /******************************************************************************/
101 /* Function prototypes for non-inline static functions. */
102 
103 static void	stats_print_atexit(void);
104 static unsigned	malloc_ncpus(void);
105 static bool	malloc_conf_next(char const **opts_p, char const **k_p,
106     size_t *klen_p, char const **v_p, size_t *vlen_p);
107 static void	malloc_conf_error(const char *msg, const char *k, size_t klen,
108     const char *v, size_t vlen);
109 static void	malloc_conf_init(void);
110 static bool	malloc_init_hard(void);
111 static int	imemalign(void **memptr, size_t alignment, size_t size,
112     size_t min_alignment);
113 
114 /******************************************************************************/
115 /*
116  * Begin miscellaneous support functions.
117  */
118 
119 /* Create a new arena and insert it into the arenas array at index ind. */
120 arena_t *
121 arenas_extend(unsigned ind)
122 {
123 	arena_t *ret;
124 
125 	ret = (arena_t *)base_alloc(sizeof(arena_t));
126 	if (ret != NULL && arena_new(ret, ind) == false) {
127 		arenas[ind] = ret;
128 		return (ret);
129 	}
130 	/* Only reached if there is an OOM error. */
131 
132 	/*
133 	 * OOM here is quite inconvenient to propagate, since dealing with it
134 	 * would require a check for failure in the fast path.  Instead, punt
135 	 * by using arenas[0].  In practice, this is an extremely unlikely
136 	 * failure.
137 	 */
138 	malloc_write("<jemalloc>: Error initializing arena\n");
139 	if (opt_abort)
140 		abort();
141 
142 	return (arenas[0]);
143 }
144 
145 /* Slow path, called only by choose_arena(). */
146 arena_t *
147 choose_arena_hard(void)
148 {
149 	arena_t *ret;
150 
151 	if (narenas > 1) {
152 		unsigned i, choose, first_null;
153 
154 		choose = 0;
155 		first_null = narenas;
156 		malloc_mutex_lock(&arenas_lock);
157 		assert(arenas[0] != NULL);
158 		for (i = 1; i < narenas; i++) {
159 			if (arenas[i] != NULL) {
160 				/*
161 				 * Choose the first arena that has the lowest
162 				 * number of threads assigned to it.
163 				 */
164 				if (arenas[i]->nthreads <
165 				    arenas[choose]->nthreads)
166 					choose = i;
167 			} else if (first_null == narenas) {
168 				/*
169 				 * Record the index of the first uninitialized
170 				 * arena, in case all extant arenas are in use.
171 				 *
172 				 * NB: It is possible for there to be
173 				 * discontinuities in terms of initialized
174 				 * versus uninitialized arenas, due to the
175 				 * "thread.arena" mallctl.
176 				 */
177 				first_null = i;
178 			}
179 		}
180 
181 		if (arenas[choose]->nthreads == 0 || first_null == narenas) {
182 			/*
183 			 * Use an unloaded arena, or the least loaded arena if
184 			 * all arenas are already initialized.
185 			 */
186 			ret = arenas[choose];
187 		} else {
188 			/* Initialize a new arena. */
189 			ret = arenas_extend(first_null);
190 		}
191 		ret->nthreads++;
192 		malloc_mutex_unlock(&arenas_lock);
193 	} else {
194 		ret = arenas[0];
195 		malloc_mutex_lock(&arenas_lock);
196 		ret->nthreads++;
197 		malloc_mutex_unlock(&arenas_lock);
198 	}
199 
200 	arenas_tsd_set(&ret);
201 
202 	return (ret);
203 }
204 
205 static void
206 stats_print_atexit(void)
207 {
208 
209 	if (config_tcache && config_stats) {
210 		unsigned i;
211 
212 		/*
213 		 * Merge stats from extant threads.  This is racy, since
214 		 * individual threads do not lock when recording tcache stats
215 		 * events.  As a consequence, the final stats may be slightly
216 		 * out of date by the time they are reported, if other threads
217 		 * continue to allocate.
218 		 */
219 		for (i = 0; i < narenas; i++) {
220 			arena_t *arena = arenas[i];
221 			if (arena != NULL) {
222 				tcache_t *tcache;
223 
224 				/*
225 				 * tcache_stats_merge() locks bins, so if any
226 				 * code is introduced that acquires both arena
227 				 * and bin locks in the opposite order,
228 				 * deadlocks may result.
229 				 */
230 				malloc_mutex_lock(&arena->lock);
231 				ql_foreach(tcache, &arena->tcache_ql, link) {
232 					tcache_stats_merge(tcache, arena);
233 				}
234 				malloc_mutex_unlock(&arena->lock);
235 			}
236 		}
237 	}
238 	je_malloc_stats_print(NULL, NULL, NULL);
239 }
240 
241 /*
242  * End miscellaneous support functions.
243  */
244 /******************************************************************************/
245 /*
246  * Begin initialization functions.
247  */
248 
249 static unsigned
250 malloc_ncpus(void)
251 {
252 	unsigned ret;
253 	long result;
254 
255 #ifdef _WIN32
256 	SYSTEM_INFO si;
257 	GetSystemInfo(&si);
258 	result = si.dwNumberOfProcessors;
259 #else
260 	result = sysconf(_SC_NPROCESSORS_ONLN);
261 	if (result == -1) {
262 		/* Error. */
263 		ret = 1;
264 	}
265 #endif
266 	ret = (unsigned)result;
267 
268 	return (ret);
269 }
270 
271 void
272 arenas_cleanup(void *arg)
273 {
274 	arena_t *arena = *(arena_t **)arg;
275 
276 	malloc_mutex_lock(&arenas_lock);
277 	arena->nthreads--;
278 	malloc_mutex_unlock(&arenas_lock);
279 }
280 
281 static inline bool
282 malloc_init(void)
283 {
284 
285 	if (malloc_initialized == false)
286 		return (malloc_init_hard());
287 
288 	return (false);
289 }
290 
291 static bool
292 malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
293     char const **v_p, size_t *vlen_p)
294 {
295 	bool accept;
296 	const char *opts = *opts_p;
297 
298 	*k_p = opts;
299 
300 	for (accept = false; accept == false;) {
301 		switch (*opts) {
302 		case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
303 		case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
304 		case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
305 		case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
306 		case 'Y': case 'Z':
307 		case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
308 		case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
309 		case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
310 		case 's': case 't': case 'u': case 'v': case 'w': case 'x':
311 		case 'y': case 'z':
312 		case '0': case '1': case '2': case '3': case '4': case '5':
313 		case '6': case '7': case '8': case '9':
314 		case '_':
315 			opts++;
316 			break;
317 		case ':':
318 			opts++;
319 			*klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
320 			*v_p = opts;
321 			accept = true;
322 			break;
323 		case '\0':
324 			if (opts != *opts_p) {
325 				malloc_write("<jemalloc>: Conf string ends "
326 				    "with key\n");
327 			}
328 			return (true);
329 		default:
330 			malloc_write("<jemalloc>: Malformed conf string\n");
331 			return (true);
332 		}
333 	}
334 
335 	for (accept = false; accept == false;) {
336 		switch (*opts) {
337 		case ',':
338 			opts++;
339 			/*
340 			 * Look ahead one character here, because the next time
341 			 * this function is called, it will assume that end of
342 			 * input has been cleanly reached if no input remains,
343 			 * but we have optimistically already consumed the
344 			 * comma if one exists.
345 			 */
346 			if (*opts == '\0') {
347 				malloc_write("<jemalloc>: Conf string ends "
348 				    "with comma\n");
349 			}
350 			*vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
351 			accept = true;
352 			break;
353 		case '\0':
354 			*vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
355 			accept = true;
356 			break;
357 		default:
358 			opts++;
359 			break;
360 		}
361 	}
362 
363 	*opts_p = opts;
364 	return (false);
365 }
366 
367 static void
368 malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
369     size_t vlen)
370 {
371 
372 	malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
373 	    (int)vlen, v);
374 }
375 
376 static void
377 malloc_conf_init(void)
378 {
379 	unsigned i;
380 	char buf[PATH_MAX + 1];
381 	const char *opts, *k, *v;
382 	size_t klen, vlen;
383 
384 	for (i = 0; i < 3; i++) {
385 		/* Get runtime configuration. */
386 		switch (i) {
387 		case 0:
388 			if (je_malloc_conf != NULL) {
389 				/*
390 				 * Use options that were compiled into the
391 				 * program.
392 				 */
393 				opts = je_malloc_conf;
394 			} else {
395 				/* No configuration specified. */
396 				buf[0] = '\0';
397 				opts = buf;
398 			}
399 			break;
400 		case 1: {
401 #ifndef _WIN32
402 			int linklen;
403 			const char *linkname =
404 #  ifdef JEMALLOC_PREFIX
405 			    "/etc/"JEMALLOC_PREFIX"malloc.conf"
406 #  else
407 			    "/etc/malloc.conf"
408 #  endif
409 			    ;
410 
411 			if ((linklen = readlink(linkname, buf,
412 			    sizeof(buf) - 1)) != -1) {
413 				/*
414 				 * Use the contents of the "/etc/malloc.conf"
415 				 * symbolic link's name.
416 				 */
417 				buf[linklen] = '\0';
418 				opts = buf;
419 			} else
420 #endif
421 			{
422 				/* No configuration specified. */
423 				buf[0] = '\0';
424 				opts = buf;
425 			}
426 			break;
427 		} case 2: {
428 			const char *envname =
429 #ifdef JEMALLOC_PREFIX
430 			    JEMALLOC_CPREFIX"MALLOC_CONF"
431 #else
432 			    "MALLOC_CONF"
433 #endif
434 			    ;
435 
436 			if (issetugid() == 0 && (opts = getenv(envname)) !=
437 			    NULL) {
438 				/*
439 				 * Do nothing; opts is already initialized to
440 				 * the value of the MALLOC_CONF environment
441 				 * variable.
442 				 */
443 			} else {
444 				/* No configuration specified. */
445 				buf[0] = '\0';
446 				opts = buf;
447 			}
448 			break;
449 		} default:
450 			/* NOTREACHED */
451 			assert(false);
452 			buf[0] = '\0';
453 			opts = buf;
454 		}
455 
456 		while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
457 		    &vlen) == false) {
458 #define	CONF_HANDLE_BOOL_HIT(o, n, hit)					\
459 			if (sizeof(n)-1 == klen && strncmp(n, k,	\
460 			    klen) == 0) {				\
461 				if (strncmp("true", v, vlen) == 0 &&	\
462 				    vlen == sizeof("true")-1)		\
463 					o = true;			\
464 				else if (strncmp("false", v, vlen) ==	\
465 				    0 && vlen == sizeof("false")-1)	\
466 					o = false;			\
467 				else {					\
468 					malloc_conf_error(		\
469 					    "Invalid conf value",	\
470 					    k, klen, v, vlen);		\
471 				}					\
472 				hit = true;				\
473 			} else						\
474 				hit = false;
475 #define	CONF_HANDLE_BOOL(o, n) {					\
476 			bool hit;					\
477 			CONF_HANDLE_BOOL_HIT(o, n, hit);		\
478 			if (hit)					\
479 				continue;				\
480 }
481 #define	CONF_HANDLE_SIZE_T(o, n, min, max)				\
482 			if (sizeof(n)-1 == klen && strncmp(n, k,	\
483 			    klen) == 0) {				\
484 				uintmax_t um;				\
485 				char *end;				\
486 									\
487 				set_errno(0);				\
488 				um = malloc_strtoumax(v, &end, 0);	\
489 				if (get_errno() != 0 || (uintptr_t)end -\
490 				    (uintptr_t)v != vlen) {		\
491 					malloc_conf_error(		\
492 					    "Invalid conf value",	\
493 					    k, klen, v, vlen);		\
494 				} else if (um < min || um > max) {	\
495 					malloc_conf_error(		\
496 					    "Out-of-range conf value",	\
497 					    k, klen, v, vlen);		\
498 				} else					\
499 					o = um;				\
500 				continue;				\
501 			}
502 #define	CONF_HANDLE_SSIZE_T(o, n, min, max)				\
503 			if (sizeof(n)-1 == klen && strncmp(n, k,	\
504 			    klen) == 0) {				\
505 				long l;					\
506 				char *end;				\
507 									\
508 				set_errno(0);				\
509 				l = strtol(v, &end, 0);			\
510 				if (get_errno() != 0 || (uintptr_t)end -\
511 				    (uintptr_t)v != vlen) {		\
512 					malloc_conf_error(		\
513 					    "Invalid conf value",	\
514 					    k, klen, v, vlen);		\
515 				} else if (l < (ssize_t)min || l >	\
516 				    (ssize_t)max) {			\
517 					malloc_conf_error(		\
518 					    "Out-of-range conf value",	\
519 					    k, klen, v, vlen);		\
520 				} else					\
521 					o = l;				\
522 				continue;				\
523 			}
524 #define	CONF_HANDLE_CHAR_P(o, n, d)					\
525 			if (sizeof(n)-1 == klen && strncmp(n, k,	\
526 			    klen) == 0) {				\
527 				size_t cpylen = (vlen <=		\
528 				    sizeof(o)-1) ? vlen :		\
529 				    sizeof(o)-1;			\
530 				strncpy(o, v, cpylen);			\
531 				o[cpylen] = '\0';			\
532 				continue;				\
533 			}
534 
535 			CONF_HANDLE_BOOL(opt_abort, "abort")
536 			/*
537 			 * Chunks always require at least one header page, plus
538 			 * one data page in the absence of redzones, or three
539 			 * pages in the presence of redzones.  In order to
540 			 * simplify options processing, fix the limit based on
541 			 * config_fill.
542 			 */
543 			CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
544 			    (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1)
545 			CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
546 			    SIZE_T_MAX)
547 			CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
548 			    -1, (sizeof(size_t) << 3) - 1)
549 			CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
550 			if (config_fill) {
551 				CONF_HANDLE_BOOL(opt_junk, "junk")
552 				CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
553 				    0, SIZE_T_MAX)
554 				CONF_HANDLE_BOOL(opt_redzone, "redzone")
555 				CONF_HANDLE_BOOL(opt_zero, "zero")
556 			}
557 			if (config_utrace) {
558 				CONF_HANDLE_BOOL(opt_utrace, "utrace")
559 			}
560 			if (config_valgrind) {
561 				bool hit;
562 				CONF_HANDLE_BOOL_HIT(opt_valgrind,
563 				    "valgrind", hit)
564 				if (config_fill && opt_valgrind && hit) {
565 					opt_junk = false;
566 					opt_zero = false;
567 					if (opt_quarantine == 0) {
568 						opt_quarantine =
569 						    JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
570 					}
571 					opt_redzone = true;
572 				}
573 				if (hit)
574 					continue;
575 			}
576 			if (config_xmalloc) {
577 				CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
578 			}
579 			if (config_tcache) {
580 				CONF_HANDLE_BOOL(opt_tcache, "tcache")
581 				CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
582 				    "lg_tcache_max", -1,
583 				    (sizeof(size_t) << 3) - 1)
584 			}
585 			if (config_prof) {
586 				CONF_HANDLE_BOOL(opt_prof, "prof")
587 				CONF_HANDLE_CHAR_P(opt_prof_prefix,
588 				    "prof_prefix", "jeprof")
589 				CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
590 				CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
591 				    "lg_prof_sample", 0,
592 				    (sizeof(uint64_t) << 3) - 1)
593 				CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
594 				CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
595 				    "lg_prof_interval", -1,
596 				    (sizeof(uint64_t) << 3) - 1)
597 				CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
598 				CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
599 				CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
600 			}
601 			malloc_conf_error("Invalid conf pair", k, klen, v,
602 			    vlen);
603 #undef CONF_HANDLE_BOOL
604 #undef CONF_HANDLE_SIZE_T
605 #undef CONF_HANDLE_SSIZE_T
606 #undef CONF_HANDLE_CHAR_P
607 		}
608 	}
609 }
610 
611 static bool
612 malloc_init_hard(void)
613 {
614 	arena_t *init_arenas[1];
615 
616 	malloc_mutex_lock(&init_lock);
617 	if (malloc_initialized || IS_INITIALIZER) {
618 		/*
619 		 * Another thread initialized the allocator before this one
620 		 * acquired init_lock, or this thread is the initializing
621 		 * thread, and it is recursively allocating.
622 		 */
623 		malloc_mutex_unlock(&init_lock);
624 		return (false);
625 	}
626 #ifdef JEMALLOC_THREADED_INIT
627 	if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) {
628 		/* Busy-wait until the initializing thread completes. */
629 		do {
630 			malloc_mutex_unlock(&init_lock);
631 			CPU_SPINWAIT;
632 			malloc_mutex_lock(&init_lock);
633 		} while (malloc_initialized == false);
634 		malloc_mutex_unlock(&init_lock);
635 		return (false);
636 	}
637 #endif
638 	malloc_initializer = INITIALIZER;
639 
640 	malloc_tsd_boot();
641 	if (config_prof)
642 		prof_boot0();
643 
644 	malloc_conf_init();
645 
646 #if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
647     && !defined(_WIN32))
648 	/* Register fork handlers. */
649 	if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
650 	    jemalloc_postfork_child) != 0) {
651 		malloc_write("<jemalloc>: Error in pthread_atfork()\n");
652 		if (opt_abort)
653 			abort();
654 	}
655 #endif
656 
657 	if (opt_stats_print) {
658 		/* Print statistics at exit. */
659 		if (atexit(stats_print_atexit) != 0) {
660 			malloc_write("<jemalloc>: Error in atexit()\n");
661 			if (opt_abort)
662 				abort();
663 		}
664 	}
665 
666 	if (base_boot()) {
667 		malloc_mutex_unlock(&init_lock);
668 		return (true);
669 	}
670 
671 	if (chunk_boot()) {
672 		malloc_mutex_unlock(&init_lock);
673 		return (true);
674 	}
675 
676 	if (ctl_boot()) {
677 		malloc_mutex_unlock(&init_lock);
678 		return (true);
679 	}
680 
681 	if (config_prof)
682 		prof_boot1();
683 
684 	arena_boot();
685 
686 	if (config_tcache && tcache_boot0()) {
687 		malloc_mutex_unlock(&init_lock);
688 		return (true);
689 	}
690 
691 	if (huge_boot()) {
692 		malloc_mutex_unlock(&init_lock);
693 		return (true);
694 	}
695 
696 	if (malloc_mutex_init(&arenas_lock))
697 		return (true);
698 
699 	/*
700 	 * Create enough scaffolding to allow recursive allocation in
701 	 * malloc_ncpus().
702 	 */
703 	narenas = 1;
704 	arenas = init_arenas;
705 	memset(arenas, 0, sizeof(arena_t *) * narenas);
706 
707 	/*
708 	 * Initialize one arena here.  The rest are lazily created in
709 	 * choose_arena_hard().
710 	 */
711 	arenas_extend(0);
712 	if (arenas[0] == NULL) {
713 		malloc_mutex_unlock(&init_lock);
714 		return (true);
715 	}
716 
717 	/* Initialize allocation counters before any allocations can occur. */
718 	if (config_stats && thread_allocated_tsd_boot()) {
719 		malloc_mutex_unlock(&init_lock);
720 		return (true);
721 	}
722 
723 	if (arenas_tsd_boot()) {
724 		malloc_mutex_unlock(&init_lock);
725 		return (true);
726 	}
727 
728 	if (config_tcache && tcache_boot1()) {
729 		malloc_mutex_unlock(&init_lock);
730 		return (true);
731 	}
732 
733 	if (config_fill && quarantine_boot()) {
734 		malloc_mutex_unlock(&init_lock);
735 		return (true);
736 	}
737 
738 	if (config_prof && prof_boot2()) {
739 		malloc_mutex_unlock(&init_lock);
740 		return (true);
741 	}
742 
743 	/* Get number of CPUs. */
744 	malloc_mutex_unlock(&init_lock);
745 	ncpus = malloc_ncpus();
746 	malloc_mutex_lock(&init_lock);
747 
748 	if (mutex_boot()) {
749 		malloc_mutex_unlock(&init_lock);
750 		return (true);
751 	}
752 
753 	if (opt_narenas == 0) {
754 		/*
755 		 * For SMP systems, create more than one arena per CPU by
756 		 * default.
757 		 */
758 		if (ncpus > 1)
759 			opt_narenas = ncpus << 2;
760 		else
761 			opt_narenas = 1;
762 	}
763 	narenas = opt_narenas;
764 	/*
765 	 * Make sure that the arenas array can be allocated.  In practice, this
766 	 * limit is enough to allow the allocator to function, but the ctl
767 	 * machinery will fail to allocate memory at far lower limits.
768 	 */
769 	if (narenas > chunksize / sizeof(arena_t *)) {
770 		narenas = chunksize / sizeof(arena_t *);
771 		malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
772 		    narenas);
773 	}
774 
775 	/* Allocate and initialize arenas. */
776 	arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas);
777 	if (arenas == NULL) {
778 		malloc_mutex_unlock(&init_lock);
779 		return (true);
780 	}
781 	/*
782 	 * Zero the array.  In practice, this should always be pre-zeroed,
783 	 * since it was just mmap()ed, but let's be sure.
784 	 */
785 	memset(arenas, 0, sizeof(arena_t *) * narenas);
786 	/* Copy the pointer to the one arena that was already initialized. */
787 	arenas[0] = init_arenas[0];
788 
789 	malloc_initialized = true;
790 	malloc_mutex_unlock(&init_lock);
791 	return (false);
792 }
793 
794 /*
795  * End initialization functions.
796  */
797 /******************************************************************************/
798 /*
799  * Begin malloc(3)-compatible functions.
800  */
801 
802 void *
803 je_malloc(size_t size)
804 {
805 	void *ret;
806 	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
807 	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
808 
809 	if (malloc_init()) {
810 		ret = NULL;
811 		goto label_oom;
812 	}
813 
814 	if (size == 0)
815 		size = 1;
816 
817 	if (config_prof && opt_prof) {
818 		usize = s2u(size);
819 		PROF_ALLOC_PREP(1, usize, cnt);
820 		if (cnt == NULL) {
821 			ret = NULL;
822 			goto label_oom;
823 		}
824 		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
825 		    SMALL_MAXCLASS) {
826 			ret = imalloc(SMALL_MAXCLASS+1);
827 			if (ret != NULL)
828 				arena_prof_promoted(ret, usize);
829 		} else
830 			ret = imalloc(size);
831 	} else {
832 		if (config_stats || (config_valgrind && opt_valgrind))
833 			usize = s2u(size);
834 		ret = imalloc(size);
835 	}
836 
837 label_oom:
838 	if (ret == NULL) {
839 		if (config_xmalloc && opt_xmalloc) {
840 			malloc_write("<jemalloc>: Error in malloc(): "
841 			    "out of memory\n");
842 			abort();
843 		}
844 		set_errno(ENOMEM);
845 	}
846 	if (config_prof && opt_prof && ret != NULL)
847 		prof_malloc(ret, usize, cnt);
848 	if (config_stats && ret != NULL) {
849 		assert(usize == isalloc(ret, config_prof));
850 		thread_allocated_tsd_get()->allocated += usize;
851 	}
852 	UTRACE(0, size, ret);
853 	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
854 	return (ret);
855 }
856 
857 JEMALLOC_ATTR(nonnull(1))
858 #ifdef JEMALLOC_PROF
859 /*
860  * Avoid any uncertainty as to how many backtrace frames to ignore in
861  * PROF_ALLOC_PREP().
862  */
863 JEMALLOC_ATTR(noinline)
864 #endif
865 static int
866 imemalign(void **memptr, size_t alignment, size_t size,
867     size_t min_alignment)
868 {
869 	int ret;
870 	size_t usize;
871 	void *result;
872 	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
873 
874 	assert(min_alignment != 0);
875 
876 	if (malloc_init())
877 		result = NULL;
878 	else {
879 		if (size == 0)
880 			size = 1;
881 
882 		/* Make sure that alignment is a large enough power of 2. */
883 		if (((alignment - 1) & alignment) != 0
884 		    || (alignment < min_alignment)) {
885 			if (config_xmalloc && opt_xmalloc) {
886 				malloc_write("<jemalloc>: Error allocating "
887 				    "aligned memory: invalid alignment\n");
888 				abort();
889 			}
890 			result = NULL;
891 			ret = EINVAL;
892 			goto label_return;
893 		}
894 
895 		usize = sa2u(size, alignment);
896 		if (usize == 0) {
897 			result = NULL;
898 			ret = ENOMEM;
899 			goto label_return;
900 		}
901 
902 		if (config_prof && opt_prof) {
903 			PROF_ALLOC_PREP(2, usize, cnt);
904 			if (cnt == NULL) {
905 				result = NULL;
906 				ret = EINVAL;
907 			} else {
908 				if (prof_promote && (uintptr_t)cnt !=
909 				    (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
910 					assert(sa2u(SMALL_MAXCLASS+1,
911 					    alignment) != 0);
912 					result = ipalloc(sa2u(SMALL_MAXCLASS+1,
913 					    alignment), alignment, false);
914 					if (result != NULL) {
915 						arena_prof_promoted(result,
916 						    usize);
917 					}
918 				} else {
919 					result = ipalloc(usize, alignment,
920 					    false);
921 				}
922 			}
923 		} else
924 			result = ipalloc(usize, alignment, false);
925 	}
926 
927 	if (result == NULL) {
928 		if (config_xmalloc && opt_xmalloc) {
929 			malloc_write("<jemalloc>: Error allocating aligned "
930 			    "memory: out of memory\n");
931 			abort();
932 		}
933 		ret = ENOMEM;
934 		goto label_return;
935 	}
936 
937 	*memptr = result;
938 	ret = 0;
939 
940 label_return:
941 	if (config_stats && result != NULL) {
942 		assert(usize == isalloc(result, config_prof));
943 		thread_allocated_tsd_get()->allocated += usize;
944 	}
945 	if (config_prof && opt_prof && result != NULL)
946 		prof_malloc(result, usize, cnt);
947 	UTRACE(0, size, result);
948 	return (ret);
949 }
950 
951 int
952 je_posix_memalign(void **memptr, size_t alignment, size_t size)
953 {
954 	int ret = imemalign(memptr, alignment, size, sizeof(void *));
955 	JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
956 	    config_prof), false);
957 	return (ret);
958 }
959 
960 void *
961 je_aligned_alloc(size_t alignment, size_t size)
962 {
963 	void *ret;
964 	int err;
965 
966 	if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
967 		ret = NULL;
968 		set_errno(err);
969 	}
970 	JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
971 	    false);
972 	return (ret);
973 }
974 
975 void *
976 je_calloc(size_t num, size_t size)
977 {
978 	void *ret;
979 	size_t num_size;
980 	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
981 	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
982 
983 	if (malloc_init()) {
984 		num_size = 0;
985 		ret = NULL;
986 		goto label_return;
987 	}
988 
989 	num_size = num * size;
990 	if (num_size == 0) {
991 		if (num == 0 || size == 0)
992 			num_size = 1;
993 		else {
994 			ret = NULL;
995 			goto label_return;
996 		}
997 	/*
998 	 * Try to avoid division here.  We know that it isn't possible to
999 	 * overflow during multiplication if neither operand uses any of the
1000 	 * most significant half of the bits in a size_t.
1001 	 */
1002 	} else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
1003 	    && (num_size / size != num)) {
1004 		/* size_t overflow. */
1005 		ret = NULL;
1006 		goto label_return;
1007 	}
1008 
1009 	if (config_prof && opt_prof) {
1010 		usize = s2u(num_size);
1011 		PROF_ALLOC_PREP(1, usize, cnt);
1012 		if (cnt == NULL) {
1013 			ret = NULL;
1014 			goto label_return;
1015 		}
1016 		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
1017 		    <= SMALL_MAXCLASS) {
1018 			ret = icalloc(SMALL_MAXCLASS+1);
1019 			if (ret != NULL)
1020 				arena_prof_promoted(ret, usize);
1021 		} else
1022 			ret = icalloc(num_size);
1023 	} else {
1024 		if (config_stats || (config_valgrind && opt_valgrind))
1025 			usize = s2u(num_size);
1026 		ret = icalloc(num_size);
1027 	}
1028 
1029 label_return:
1030 	if (ret == NULL) {
1031 		if (config_xmalloc && opt_xmalloc) {
1032 			malloc_write("<jemalloc>: Error in calloc(): out of "
1033 			    "memory\n");
1034 			abort();
1035 		}
1036 		set_errno(ENOMEM);
1037 	}
1038 
1039 	if (config_prof && opt_prof && ret != NULL)
1040 		prof_malloc(ret, usize, cnt);
1041 	if (config_stats && ret != NULL) {
1042 		assert(usize == isalloc(ret, config_prof));
1043 		thread_allocated_tsd_get()->allocated += usize;
1044 	}
1045 	UTRACE(0, num_size, ret);
1046 	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
1047 	return (ret);
1048 }
1049 
1050 void *
1051 je_realloc(void *ptr, size_t size)
1052 {
1053 	void *ret;
1054 	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1055 	size_t old_size = 0;
1056 	size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1057 	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
1058 	prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
1059 
1060 	if (size == 0) {
1061 		if (ptr != NULL) {
1062 			/* realloc(ptr, 0) is equivalent to free(p). */
1063 			if (config_prof) {
1064 				old_size = isalloc(ptr, true);
1065 				if (config_valgrind && opt_valgrind)
1066 					old_rzsize = p2rz(ptr);
1067 			} else if (config_stats) {
1068 				old_size = isalloc(ptr, false);
1069 				if (config_valgrind && opt_valgrind)
1070 					old_rzsize = u2rz(old_size);
1071 			} else if (config_valgrind && opt_valgrind) {
1072 				old_size = isalloc(ptr, false);
1073 				old_rzsize = u2rz(old_size);
1074 			}
1075 			if (config_prof && opt_prof) {
1076 				old_ctx = prof_ctx_get(ptr);
1077 				cnt = NULL;
1078 			}
1079 			iqalloc(ptr);
1080 			ret = NULL;
1081 			goto label_return;
1082 		} else
1083 			size = 1;
1084 	}
1085 
1086 	if (ptr != NULL) {
1087 		assert(malloc_initialized || IS_INITIALIZER);
1088 
1089 		if (config_prof) {
1090 			old_size = isalloc(ptr, true);
1091 			if (config_valgrind && opt_valgrind)
1092 				old_rzsize = p2rz(ptr);
1093 		} else if (config_stats) {
1094 			old_size = isalloc(ptr, false);
1095 			if (config_valgrind && opt_valgrind)
1096 				old_rzsize = u2rz(old_size);
1097 		} else if (config_valgrind && opt_valgrind) {
1098 			old_size = isalloc(ptr, false);
1099 			old_rzsize = u2rz(old_size);
1100 		}
1101 		if (config_prof && opt_prof) {
1102 			usize = s2u(size);
1103 			old_ctx = prof_ctx_get(ptr);
1104 			PROF_ALLOC_PREP(1, usize, cnt);
1105 			if (cnt == NULL) {
1106 				old_ctx = NULL;
1107 				ret = NULL;
1108 				goto label_oom;
1109 			}
1110 			if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
1111 			    usize <= SMALL_MAXCLASS) {
1112 				ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
1113 				    false, false);
1114 				if (ret != NULL)
1115 					arena_prof_promoted(ret, usize);
1116 				else
1117 					old_ctx = NULL;
1118 			} else {
1119 				ret = iralloc(ptr, size, 0, 0, false, false);
1120 				if (ret == NULL)
1121 					old_ctx = NULL;
1122 			}
1123 		} else {
1124 			if (config_stats || (config_valgrind && opt_valgrind))
1125 				usize = s2u(size);
1126 			ret = iralloc(ptr, size, 0, 0, false, false);
1127 		}
1128 
1129 label_oom:
1130 		if (ret == NULL) {
1131 			if (config_xmalloc && opt_xmalloc) {
1132 				malloc_write("<jemalloc>: Error in realloc(): "
1133 				    "out of memory\n");
1134 				abort();
1135 			}
1136 			set_errno(ENOMEM);
1137 		}
1138 	} else {
1139 		/* realloc(NULL, size) is equivalent to malloc(size). */
1140 		if (config_prof && opt_prof)
1141 			old_ctx = NULL;
1142 		if (malloc_init()) {
1143 			if (config_prof && opt_prof)
1144 				cnt = NULL;
1145 			ret = NULL;
1146 		} else {
1147 			if (config_prof && opt_prof) {
1148 				usize = s2u(size);
1149 				PROF_ALLOC_PREP(1, usize, cnt);
1150 				if (cnt == NULL)
1151 					ret = NULL;
1152 				else {
1153 					if (prof_promote && (uintptr_t)cnt !=
1154 					    (uintptr_t)1U && usize <=
1155 					    SMALL_MAXCLASS) {
1156 						ret = imalloc(SMALL_MAXCLASS+1);
1157 						if (ret != NULL) {
1158 							arena_prof_promoted(ret,
1159 							    usize);
1160 						}
1161 					} else
1162 						ret = imalloc(size);
1163 				}
1164 			} else {
1165 				if (config_stats || (config_valgrind &&
1166 				    opt_valgrind))
1167 					usize = s2u(size);
1168 				ret = imalloc(size);
1169 			}
1170 		}
1171 
1172 		if (ret == NULL) {
1173 			if (config_xmalloc && opt_xmalloc) {
1174 				malloc_write("<jemalloc>: Error in realloc(): "
1175 				    "out of memory\n");
1176 				abort();
1177 			}
1178 			set_errno(ENOMEM);
1179 		}
1180 	}
1181 
1182 label_return:
1183 	if (config_prof && opt_prof)
1184 		prof_realloc(ret, usize, cnt, old_size, old_ctx);
1185 	if (config_stats && ret != NULL) {
1186 		thread_allocated_t *ta;
1187 		assert(usize == isalloc(ret, config_prof));
1188 		ta = thread_allocated_tsd_get();
1189 		ta->allocated += usize;
1190 		ta->deallocated += old_size;
1191 	}
1192 	UTRACE(ptr, size, ret);
1193 	JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false);
1194 	return (ret);
1195 }
1196 
1197 void
1198 je_free(void *ptr)
1199 {
1200 
1201 	UTRACE(ptr, 0, 0);
1202 	if (ptr != NULL) {
1203 		size_t usize;
1204 		size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1205 
1206 		assert(malloc_initialized || IS_INITIALIZER);
1207 
1208 		if (config_prof && opt_prof) {
1209 			usize = isalloc(ptr, config_prof);
1210 			prof_free(ptr, usize);
1211 		} else if (config_stats || config_valgrind)
1212 			usize = isalloc(ptr, config_prof);
1213 		if (config_stats)
1214 			thread_allocated_tsd_get()->deallocated += usize;
1215 		if (config_valgrind && opt_valgrind)
1216 			rzsize = p2rz(ptr);
1217 		iqalloc(ptr);
1218 		JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1219 	}
1220 }
1221 
1222 /*
1223  * End malloc(3)-compatible functions.
1224  */
1225 /******************************************************************************/
1226 /*
1227  * Begin non-standard override functions.
1228  */
1229 
1230 #ifdef JEMALLOC_OVERRIDE_MEMALIGN
1231 void *
1232 je_memalign(size_t alignment, size_t size)
1233 {
1234 	void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1235 	imemalign(&ret, alignment, size, 1);
1236 	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1237 	return (ret);
1238 }
1239 #endif
1240 
1241 #ifdef JEMALLOC_OVERRIDE_VALLOC
1242 void *
1243 je_valloc(size_t size)
1244 {
1245 	void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1246 	imemalign(&ret, PAGE, size, 1);
1247 	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1248 	return (ret);
1249 }
1250 #endif
1251 
1252 /*
1253  * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1254  * #define je_malloc malloc
1255  */
1256 #define	malloc_is_malloc 1
1257 #define	is_malloc_(a) malloc_is_ ## a
1258 #define	is_malloc(a) is_malloc_(a)
1259 
1260 #if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
1261 /*
1262  * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1263  * to inconsistently reference libc's malloc(3)-compatible functions
1264  * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1265  *
1266  * These definitions interpose hooks in glibc.  The functions are actually
1267  * passed an extra argument for the caller return address, which will be
1268  * ignored.
1269  */
1270 JEMALLOC_EXPORT void (* const __free_hook)(void *ptr) = je_free;
1271 JEMALLOC_EXPORT void *(* const __malloc_hook)(size_t size) = je_malloc;
1272 JEMALLOC_EXPORT void *(* const __realloc_hook)(void *ptr, size_t size) =
1273     je_realloc;
1274 JEMALLOC_EXPORT void *(* const __memalign_hook)(size_t alignment, size_t size) =
1275     je_memalign;
1276 #endif
1277 
1278 /*
1279  * End non-standard override functions.
1280  */
1281 /******************************************************************************/
1282 /*
1283  * Begin non-standard functions.
1284  */
1285 
1286 size_t
1287 je_malloc_usable_size(const void *ptr)
1288 {
1289 	size_t ret;
1290 
1291 	assert(malloc_initialized || IS_INITIALIZER);
1292 
1293 	if (config_ivsalloc)
1294 		ret = ivsalloc(ptr, config_prof);
1295 	else
1296 		ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
1297 
1298 	return (ret);
1299 }
1300 
1301 void
1302 je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
1303     const char *opts)
1304 {
1305 
1306 	stats_print(write_cb, cbopaque, opts);
1307 }
1308 
1309 int
1310 je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
1311     size_t newlen)
1312 {
1313 
1314 	if (malloc_init())
1315 		return (EAGAIN);
1316 
1317 	return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1318 }
1319 
1320 int
1321 je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
1322 {
1323 
1324 	if (malloc_init())
1325 		return (EAGAIN);
1326 
1327 	return (ctl_nametomib(name, mibp, miblenp));
1328 }
1329 
1330 int
1331 je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1332   void *newp, size_t newlen)
1333 {
1334 
1335 	if (malloc_init())
1336 		return (EAGAIN);
1337 
1338 	return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1339 }
1340 
1341 /*
1342  * End non-standard functions.
1343  */
1344 /******************************************************************************/
1345 /*
1346  * Begin experimental functions.
1347  */
1348 #ifdef JEMALLOC_EXPERIMENTAL
1349 
1350 JEMALLOC_INLINE void *
1351 iallocm(size_t usize, size_t alignment, bool zero)
1352 {
1353 
1354 	assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
1355 	    alignment)));
1356 
1357 	if (alignment != 0)
1358 		return (ipalloc(usize, alignment, zero));
1359 	else if (zero)
1360 		return (icalloc(usize));
1361 	else
1362 		return (imalloc(usize));
1363 }
1364 
1365 int
1366 je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
1367 {
1368 	void *p;
1369 	size_t usize;
1370 	size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1371 	    & (SIZE_T_MAX-1));
1372 	bool zero = flags & ALLOCM_ZERO;
1373 
1374 	assert(ptr != NULL);
1375 	assert(size != 0);
1376 
1377 	if (malloc_init())
1378 		goto label_oom;
1379 
1380 	usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
1381 	if (usize == 0)
1382 		goto label_oom;
1383 
1384 	if (config_prof && opt_prof) {
1385 		prof_thr_cnt_t *cnt;
1386 
1387 		PROF_ALLOC_PREP(1, usize, cnt);
1388 		if (cnt == NULL)
1389 			goto label_oom;
1390 		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
1391 		    SMALL_MAXCLASS) {
1392 			size_t usize_promoted = (alignment == 0) ?
1393 			    s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
1394 			    alignment);
1395 			assert(usize_promoted != 0);
1396 			p = iallocm(usize_promoted, alignment, zero);
1397 			if (p == NULL)
1398 				goto label_oom;
1399 			arena_prof_promoted(p, usize);
1400 		} else {
1401 			p = iallocm(usize, alignment, zero);
1402 			if (p == NULL)
1403 				goto label_oom;
1404 		}
1405 		prof_malloc(p, usize, cnt);
1406 	} else {
1407 		p = iallocm(usize, alignment, zero);
1408 		if (p == NULL)
1409 			goto label_oom;
1410 	}
1411 	if (rsize != NULL)
1412 		*rsize = usize;
1413 
1414 	*ptr = p;
1415 	if (config_stats) {
1416 		assert(usize == isalloc(p, config_prof));
1417 		thread_allocated_tsd_get()->allocated += usize;
1418 	}
1419 	UTRACE(0, size, p);
1420 	JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
1421 	return (ALLOCM_SUCCESS);
1422 label_oom:
1423 	if (config_xmalloc && opt_xmalloc) {
1424 		malloc_write("<jemalloc>: Error in allocm(): "
1425 		    "out of memory\n");
1426 		abort();
1427 	}
1428 	*ptr = NULL;
1429 	UTRACE(0, size, 0);
1430 	return (ALLOCM_ERR_OOM);
1431 }
1432 
1433 int
1434 je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
1435 {
1436 	void *p, *q;
1437 	size_t usize;
1438 	size_t old_size;
1439 	size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1440 	size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1441 	    & (SIZE_T_MAX-1));
1442 	bool zero = flags & ALLOCM_ZERO;
1443 	bool no_move = flags & ALLOCM_NO_MOVE;
1444 
1445 	assert(ptr != NULL);
1446 	assert(*ptr != NULL);
1447 	assert(size != 0);
1448 	assert(SIZE_T_MAX - size >= extra);
1449 	assert(malloc_initialized || IS_INITIALIZER);
1450 
1451 	p = *ptr;
1452 	if (config_prof && opt_prof) {
1453 		prof_thr_cnt_t *cnt;
1454 
1455 		/*
1456 		 * usize isn't knowable before iralloc() returns when extra is
1457 		 * non-zero.  Therefore, compute its maximum possible value and
1458 		 * use that in PROF_ALLOC_PREP() to decide whether to capture a
1459 		 * backtrace.  prof_realloc() will use the actual usize to
1460 		 * decide whether to sample.
1461 		 */
1462 		size_t max_usize = (alignment == 0) ? s2u(size+extra) :
1463 		    sa2u(size+extra, alignment);
1464 		prof_ctx_t *old_ctx = prof_ctx_get(p);
1465 		old_size = isalloc(p, true);
1466 		if (config_valgrind && opt_valgrind)
1467 			old_rzsize = p2rz(p);
1468 		PROF_ALLOC_PREP(1, max_usize, cnt);
1469 		if (cnt == NULL)
1470 			goto label_oom;
1471 		/*
1472 		 * Use minimum usize to determine whether promotion may happen.
1473 		 */
1474 		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
1475 		    && ((alignment == 0) ? s2u(size) : sa2u(size, alignment))
1476 		    <= SMALL_MAXCLASS) {
1477 			q = iralloc(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1478 			    size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
1479 			    alignment, zero, no_move);
1480 			if (q == NULL)
1481 				goto label_err;
1482 			if (max_usize < PAGE) {
1483 				usize = max_usize;
1484 				arena_prof_promoted(q, usize);
1485 			} else
1486 				usize = isalloc(q, config_prof);
1487 		} else {
1488 			q = iralloc(p, size, extra, alignment, zero, no_move);
1489 			if (q == NULL)
1490 				goto label_err;
1491 			usize = isalloc(q, config_prof);
1492 		}
1493 		prof_realloc(q, usize, cnt, old_size, old_ctx);
1494 		if (rsize != NULL)
1495 			*rsize = usize;
1496 	} else {
1497 		if (config_stats) {
1498 			old_size = isalloc(p, false);
1499 			if (config_valgrind && opt_valgrind)
1500 				old_rzsize = u2rz(old_size);
1501 		} else if (config_valgrind && opt_valgrind) {
1502 			old_size = isalloc(p, false);
1503 			old_rzsize = u2rz(old_size);
1504 		}
1505 		q = iralloc(p, size, extra, alignment, zero, no_move);
1506 		if (q == NULL)
1507 			goto label_err;
1508 		if (config_stats)
1509 			usize = isalloc(q, config_prof);
1510 		if (rsize != NULL) {
1511 			if (config_stats == false)
1512 				usize = isalloc(q, config_prof);
1513 			*rsize = usize;
1514 		}
1515 	}
1516 
1517 	*ptr = q;
1518 	if (config_stats) {
1519 		thread_allocated_t *ta;
1520 		ta = thread_allocated_tsd_get();
1521 		ta->allocated += usize;
1522 		ta->deallocated += old_size;
1523 	}
1524 	UTRACE(p, size, q);
1525 	JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero);
1526 	return (ALLOCM_SUCCESS);
1527 label_err:
1528 	if (no_move) {
1529 		UTRACE(p, size, q);
1530 		return (ALLOCM_ERR_NOT_MOVED);
1531 	}
1532 label_oom:
1533 	if (config_xmalloc && opt_xmalloc) {
1534 		malloc_write("<jemalloc>: Error in rallocm(): "
1535 		    "out of memory\n");
1536 		abort();
1537 	}
1538 	UTRACE(p, size, 0);
1539 	return (ALLOCM_ERR_OOM);
1540 }
1541 
1542 int
1543 je_sallocm(const void *ptr, size_t *rsize, int flags)
1544 {
1545 	size_t sz;
1546 
1547 	assert(malloc_initialized || IS_INITIALIZER);
1548 
1549 	if (config_ivsalloc)
1550 		sz = ivsalloc(ptr, config_prof);
1551 	else {
1552 		assert(ptr != NULL);
1553 		sz = isalloc(ptr, config_prof);
1554 	}
1555 	assert(rsize != NULL);
1556 	*rsize = sz;
1557 
1558 	return (ALLOCM_SUCCESS);
1559 }
1560 
1561 int
1562 je_dallocm(void *ptr, int flags)
1563 {
1564 	size_t usize;
1565 	size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1566 
1567 	assert(ptr != NULL);
1568 	assert(malloc_initialized || IS_INITIALIZER);
1569 
1570 	UTRACE(ptr, 0, 0);
1571 	if (config_stats || config_valgrind)
1572 		usize = isalloc(ptr, config_prof);
1573 	if (config_prof && opt_prof) {
1574 		if (config_stats == false && config_valgrind == false)
1575 			usize = isalloc(ptr, config_prof);
1576 		prof_free(ptr, usize);
1577 	}
1578 	if (config_stats)
1579 		thread_allocated_tsd_get()->deallocated += usize;
1580 	if (config_valgrind && opt_valgrind)
1581 		rzsize = p2rz(ptr);
1582 	iqalloc(ptr);
1583 	JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1584 
1585 	return (ALLOCM_SUCCESS);
1586 }
1587 
1588 int
1589 je_nallocm(size_t *rsize, size_t size, int flags)
1590 {
1591 	size_t usize;
1592 	size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1593 	    & (SIZE_T_MAX-1));
1594 
1595 	assert(size != 0);
1596 
1597 	if (malloc_init())
1598 		return (ALLOCM_ERR_OOM);
1599 
1600 	usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
1601 	if (usize == 0)
1602 		return (ALLOCM_ERR_OOM);
1603 
1604 	if (rsize != NULL)
1605 		*rsize = usize;
1606 	return (ALLOCM_SUCCESS);
1607 }
1608 
1609 #endif
1610 /*
1611  * End experimental functions.
1612  */
1613 /******************************************************************************/
1614 /*
1615  * The following functions are used by threading libraries for protection of
1616  * malloc during fork().
1617  */
1618 
1619 #ifndef JEMALLOC_MUTEX_INIT_CB
1620 void
1621 jemalloc_prefork(void)
1622 #else
1623 JEMALLOC_EXPORT void
1624 _malloc_prefork(void)
1625 #endif
1626 {
1627 	unsigned i;
1628 
1629 #ifdef JEMALLOC_MUTEX_INIT_CB
1630 	if (malloc_initialized == false)
1631 		return;
1632 #endif
1633 	assert(malloc_initialized);
1634 
1635 	/* Acquire all mutexes in a safe order. */
1636 	malloc_mutex_prefork(&arenas_lock);
1637 	for (i = 0; i < narenas; i++) {
1638 		if (arenas[i] != NULL)
1639 			arena_prefork(arenas[i]);
1640 	}
1641 	base_prefork();
1642 	huge_prefork();
1643 	chunk_dss_prefork();
1644 }
1645 
1646 #ifndef JEMALLOC_MUTEX_INIT_CB
1647 void
1648 jemalloc_postfork_parent(void)
1649 #else
1650 JEMALLOC_EXPORT void
1651 _malloc_postfork(void)
1652 #endif
1653 {
1654 	unsigned i;
1655 
1656 #ifdef JEMALLOC_MUTEX_INIT_CB
1657 	if (malloc_initialized == false)
1658 		return;
1659 #endif
1660 	assert(malloc_initialized);
1661 
1662 	/* Release all mutexes, now that fork() has completed. */
1663 	chunk_dss_postfork_parent();
1664 	huge_postfork_parent();
1665 	base_postfork_parent();
1666 	for (i = 0; i < narenas; i++) {
1667 		if (arenas[i] != NULL)
1668 			arena_postfork_parent(arenas[i]);
1669 	}
1670 	malloc_mutex_postfork_parent(&arenas_lock);
1671 }
1672 
1673 void
1674 jemalloc_postfork_child(void)
1675 {
1676 	unsigned i;
1677 
1678 	assert(malloc_initialized);
1679 
1680 	/* Release all mutexes, now that fork() has completed. */
1681 	chunk_dss_postfork_child();
1682 	huge_postfork_child();
1683 	base_postfork_child();
1684 	for (i = 0; i < narenas; i++) {
1685 		if (arenas[i] != NULL)
1686 			arena_postfork_child(arenas[i]);
1687 	}
1688 	malloc_mutex_postfork_child(&arenas_lock);
1689 }
1690 
1691 /******************************************************************************/
1692 /*
1693  * The following functions are used for TLS allocation/deallocation in static
1694  * binaries on FreeBSD.  The primary difference between these and i[mcd]alloc()
1695  * is that these avoid accessing TLS variables.
1696  */
1697 
1698 static void *
1699 a0alloc(size_t size, bool zero)
1700 {
1701 
1702 	if (malloc_init())
1703 		return (NULL);
1704 
1705 	if (size == 0)
1706 		size = 1;
1707 
1708 	if (size <= arena_maxclass)
1709 		return (arena_malloc(arenas[0], size, zero, false));
1710 	else
1711 		return (huge_malloc(size, zero));
1712 }
1713 
1714 void *
1715 a0malloc(size_t size)
1716 {
1717 
1718 	return (a0alloc(size, false));
1719 }
1720 
1721 void *
1722 a0calloc(size_t num, size_t size)
1723 {
1724 
1725 	return (a0alloc(num * size, true));
1726 }
1727 
1728 void
1729 a0free(void *ptr)
1730 {
1731 	arena_chunk_t *chunk;
1732 
1733 	if (ptr == NULL)
1734 		return;
1735 
1736 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1737 	if (chunk != ptr)
1738 		arena_dalloc(chunk->arena, chunk, ptr, false);
1739 	else
1740 		huge_dalloc(ptr, true);
1741 }
1742 
1743 /******************************************************************************/
1744