xref: /freebsd/contrib/jemalloc/src/jemalloc.c (revision cc16dea626cf2fc80cde667ac4798065108e596c)
1 #define	JEMALLOC_C_
2 #include "jemalloc/internal/jemalloc_internal.h"
3 
4 /******************************************************************************/
5 /* Data. */
6 
7 malloc_tsd_data(, arenas, arena_t *, NULL)
8 malloc_tsd_data(, thread_allocated, thread_allocated_t,
9     THREAD_ALLOCATED_INITIALIZER)
10 
11 /* Work around <http://llvm.org/bugs/show_bug.cgi?id=12623>: */
12 const char	*__malloc_options_1_0 = NULL;
13 __sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0);
14 
15 /* Runtime configuration options. */
16 const char	*je_malloc_conf;
17 bool	opt_abort =
18 #ifdef JEMALLOC_DEBUG
19     true
20 #else
21     false
22 #endif
23     ;
24 bool	opt_junk =
25 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
26     true
27 #else
28     false
29 #endif
30     ;
31 size_t	opt_quarantine = ZU(0);
32 bool	opt_redzone = false;
33 bool	opt_utrace = false;
34 bool	opt_valgrind = false;
35 bool	opt_xmalloc = false;
36 bool	opt_zero = false;
37 size_t	opt_narenas = 0;
38 
39 unsigned	ncpus;
40 
41 malloc_mutex_t		arenas_lock;
42 arena_t			**arenas;
43 unsigned		narenas_total;
44 unsigned		narenas_auto;
45 
46 /* Set to true once the allocator has been initialized. */
47 static bool		malloc_initialized = false;
48 
49 #ifdef JEMALLOC_THREADED_INIT
50 /* Used to let the initializing thread recursively allocate. */
51 #  define NO_INITIALIZER	((unsigned long)0)
52 #  define INITIALIZER		pthread_self()
53 #  define IS_INITIALIZER	(malloc_initializer == pthread_self())
54 static pthread_t		malloc_initializer = NO_INITIALIZER;
55 #else
56 #  define NO_INITIALIZER	false
57 #  define INITIALIZER		true
58 #  define IS_INITIALIZER	malloc_initializer
59 static bool			malloc_initializer = NO_INITIALIZER;
60 #endif
61 
62 /* Used to avoid initialization races. */
63 #ifdef _WIN32
64 static malloc_mutex_t	init_lock;
65 
66 JEMALLOC_ATTR(constructor)
67 static void WINAPI
68 _init_init_lock(void)
69 {
70 
71 	malloc_mutex_init(&init_lock);
72 }
73 
74 #ifdef _MSC_VER
75 #  pragma section(".CRT$XCU", read)
76 JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
77 static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
78 #endif
79 
80 #else
81 static malloc_mutex_t	init_lock = MALLOC_MUTEX_INITIALIZER;
82 #endif
83 
84 typedef struct {
85 	void	*p;	/* Input pointer (as in realloc(p, s)). */
86 	size_t	s;	/* Request size. */
87 	void	*r;	/* Result pointer. */
88 } malloc_utrace_t;
89 
90 #ifdef JEMALLOC_UTRACE
91 #  define UTRACE(a, b, c) do {						\
92 	if (opt_utrace) {						\
93 		int utrace_serrno = errno;				\
94 		malloc_utrace_t ut;					\
95 		ut.p = (a);						\
96 		ut.s = (b);						\
97 		ut.r = (c);						\
98 		utrace(&ut, sizeof(ut));				\
99 		errno = utrace_serrno;					\
100 	}								\
101 } while (0)
102 #else
103 #  define UTRACE(a, b, c)
104 #endif
105 
106 /******************************************************************************/
107 /* Function prototypes for non-inline static functions. */
108 
109 static void	stats_print_atexit(void);
110 static unsigned	malloc_ncpus(void);
111 static bool	malloc_conf_next(char const **opts_p, char const **k_p,
112     size_t *klen_p, char const **v_p, size_t *vlen_p);
113 static void	malloc_conf_error(const char *msg, const char *k, size_t klen,
114     const char *v, size_t vlen);
115 static void	malloc_conf_init(void);
116 static bool	malloc_init_hard(void);
117 static int	imemalign(void **memptr, size_t alignment, size_t size,
118     size_t min_alignment);
119 
120 /******************************************************************************/
121 /*
122  * Begin miscellaneous support functions.
123  */
124 
125 /* Create a new arena and insert it into the arenas array at index ind. */
126 arena_t *
127 arenas_extend(unsigned ind)
128 {
129 	arena_t *ret;
130 
131 	ret = (arena_t *)base_alloc(sizeof(arena_t));
132 	if (ret != NULL && arena_new(ret, ind) == false) {
133 		arenas[ind] = ret;
134 		return (ret);
135 	}
136 	/* Only reached if there is an OOM error. */
137 
138 	/*
139 	 * OOM here is quite inconvenient to propagate, since dealing with it
140 	 * would require a check for failure in the fast path.  Instead, punt
141 	 * by using arenas[0].  In practice, this is an extremely unlikely
142 	 * failure.
143 	 */
144 	malloc_write("<jemalloc>: Error initializing arena\n");
145 	if (opt_abort)
146 		abort();
147 
148 	return (arenas[0]);
149 }
150 
151 /* Slow path, called only by choose_arena(). */
152 arena_t *
153 choose_arena_hard(void)
154 {
155 	arena_t *ret;
156 
157 	if (narenas_auto > 1) {
158 		unsigned i, choose, first_null;
159 
160 		choose = 0;
161 		first_null = narenas_auto;
162 		malloc_mutex_lock(&arenas_lock);
163 		assert(arenas[0] != NULL);
164 		for (i = 1; i < narenas_auto; i++) {
165 			if (arenas[i] != NULL) {
166 				/*
167 				 * Choose the first arena that has the lowest
168 				 * number of threads assigned to it.
169 				 */
170 				if (arenas[i]->nthreads <
171 				    arenas[choose]->nthreads)
172 					choose = i;
173 			} else if (first_null == narenas_auto) {
174 				/*
175 				 * Record the index of the first uninitialized
176 				 * arena, in case all extant arenas are in use.
177 				 *
178 				 * NB: It is possible for there to be
179 				 * discontinuities in terms of initialized
180 				 * versus uninitialized arenas, due to the
181 				 * "thread.arena" mallctl.
182 				 */
183 				first_null = i;
184 			}
185 		}
186 
187 		if (arenas[choose]->nthreads == 0
188 		    || first_null == narenas_auto) {
189 			/*
190 			 * Use an unloaded arena, or the least loaded arena if
191 			 * all arenas are already initialized.
192 			 */
193 			ret = arenas[choose];
194 		} else {
195 			/* Initialize a new arena. */
196 			ret = arenas_extend(first_null);
197 		}
198 		ret->nthreads++;
199 		malloc_mutex_unlock(&arenas_lock);
200 	} else {
201 		ret = arenas[0];
202 		malloc_mutex_lock(&arenas_lock);
203 		ret->nthreads++;
204 		malloc_mutex_unlock(&arenas_lock);
205 	}
206 
207 	arenas_tsd_set(&ret);
208 
209 	return (ret);
210 }
211 
212 static void
213 stats_print_atexit(void)
214 {
215 
216 	if (config_tcache && config_stats) {
217 		unsigned narenas, i;
218 
219 		/*
220 		 * Merge stats from extant threads.  This is racy, since
221 		 * individual threads do not lock when recording tcache stats
222 		 * events.  As a consequence, the final stats may be slightly
223 		 * out of date by the time they are reported, if other threads
224 		 * continue to allocate.
225 		 */
226 		for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
227 			arena_t *arena = arenas[i];
228 			if (arena != NULL) {
229 				tcache_t *tcache;
230 
231 				/*
232 				 * tcache_stats_merge() locks bins, so if any
233 				 * code is introduced that acquires both arena
234 				 * and bin locks in the opposite order,
235 				 * deadlocks may result.
236 				 */
237 				malloc_mutex_lock(&arena->lock);
238 				ql_foreach(tcache, &arena->tcache_ql, link) {
239 					tcache_stats_merge(tcache, arena);
240 				}
241 				malloc_mutex_unlock(&arena->lock);
242 			}
243 		}
244 	}
245 	je_malloc_stats_print(NULL, NULL, NULL);
246 }
247 
248 /*
249  * End miscellaneous support functions.
250  */
251 /******************************************************************************/
252 /*
253  * Begin initialization functions.
254  */
255 
256 static unsigned
257 malloc_ncpus(void)
258 {
259 	unsigned ret;
260 	long result;
261 
262 #ifdef _WIN32
263 	SYSTEM_INFO si;
264 	GetSystemInfo(&si);
265 	result = si.dwNumberOfProcessors;
266 #else
267 	result = sysconf(_SC_NPROCESSORS_ONLN);
268 #endif
269 	if (result == -1) {
270 		/* Error. */
271 		ret = 1;
272 	}  else {
273     ret = (unsigned)result;
274   }
275 
276 	return (ret);
277 }
278 
279 void
280 arenas_cleanup(void *arg)
281 {
282 	arena_t *arena = *(arena_t **)arg;
283 
284 	malloc_mutex_lock(&arenas_lock);
285 	arena->nthreads--;
286 	malloc_mutex_unlock(&arenas_lock);
287 }
288 
289 static JEMALLOC_ATTR(always_inline) void
290 malloc_thread_init(void)
291 {
292 
293 	/*
294 	 * TSD initialization can't be safely done as a side effect of
295 	 * deallocation, because it is possible for a thread to do nothing but
296 	 * deallocate its TLS data via free(), in which case writing to TLS
297 	 * would cause write-after-free memory corruption.  The quarantine
298 	 * facility *only* gets used as a side effect of deallocation, so make
299 	 * a best effort attempt at initializing its TSD by hooking all
300 	 * allocation events.
301 	 */
302 	if (config_fill && opt_quarantine)
303 		quarantine_alloc_hook();
304 }
305 
306 static JEMALLOC_ATTR(always_inline) bool
307 malloc_init(void)
308 {
309 
310 	if (malloc_initialized == false && malloc_init_hard())
311 		return (true);
312 	malloc_thread_init();
313 
314 	return (false);
315 }
316 
317 static bool
318 malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
319     char const **v_p, size_t *vlen_p)
320 {
321 	bool accept;
322 	const char *opts = *opts_p;
323 
324 	*k_p = opts;
325 
326 	for (accept = false; accept == false;) {
327 		switch (*opts) {
328 		case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
329 		case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
330 		case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
331 		case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
332 		case 'Y': case 'Z':
333 		case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
334 		case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
335 		case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
336 		case 's': case 't': case 'u': case 'v': case 'w': case 'x':
337 		case 'y': case 'z':
338 		case '0': case '1': case '2': case '3': case '4': case '5':
339 		case '6': case '7': case '8': case '9':
340 		case '_':
341 			opts++;
342 			break;
343 		case ':':
344 			opts++;
345 			*klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
346 			*v_p = opts;
347 			accept = true;
348 			break;
349 		case '\0':
350 			if (opts != *opts_p) {
351 				malloc_write("<jemalloc>: Conf string ends "
352 				    "with key\n");
353 			}
354 			return (true);
355 		default:
356 			malloc_write("<jemalloc>: Malformed conf string\n");
357 			return (true);
358 		}
359 	}
360 
361 	for (accept = false; accept == false;) {
362 		switch (*opts) {
363 		case ',':
364 			opts++;
365 			/*
366 			 * Look ahead one character here, because the next time
367 			 * this function is called, it will assume that end of
368 			 * input has been cleanly reached if no input remains,
369 			 * but we have optimistically already consumed the
370 			 * comma if one exists.
371 			 */
372 			if (*opts == '\0') {
373 				malloc_write("<jemalloc>: Conf string ends "
374 				    "with comma\n");
375 			}
376 			*vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
377 			accept = true;
378 			break;
379 		case '\0':
380 			*vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
381 			accept = true;
382 			break;
383 		default:
384 			opts++;
385 			break;
386 		}
387 	}
388 
389 	*opts_p = opts;
390 	return (false);
391 }
392 
393 static void
394 malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
395     size_t vlen)
396 {
397 
398 	malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
399 	    (int)vlen, v);
400 }
401 
402 static void
403 malloc_conf_init(void)
404 {
405 	unsigned i;
406 	char buf[PATH_MAX + 1];
407 	const char *opts, *k, *v;
408 	size_t klen, vlen;
409 
410 	/*
411 	 * Automatically configure valgrind before processing options.  The
412 	 * valgrind option remains in jemalloc 3.x for compatibility reasons.
413 	 */
414 	if (config_valgrind) {
415 		opt_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
416 		if (config_fill && opt_valgrind) {
417 			opt_junk = false;
418 			assert(opt_zero == false);
419 			opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
420 			opt_redzone = true;
421 		}
422 		if (config_tcache && opt_valgrind)
423 			opt_tcache = false;
424 	}
425 
426 	for (i = 0; i < 3; i++) {
427 		/* Get runtime configuration. */
428 		switch (i) {
429 		case 0:
430 			if (je_malloc_conf != NULL) {
431 				/*
432 				 * Use options that were compiled into the
433 				 * program.
434 				 */
435 				opts = je_malloc_conf;
436 			} else {
437 				/* No configuration specified. */
438 				buf[0] = '\0';
439 				opts = buf;
440 			}
441 			break;
442 		case 1: {
443 #ifndef _WIN32
444 			int linklen;
445 			const char *linkname =
446 #  ifdef JEMALLOC_PREFIX
447 			    "/etc/"JEMALLOC_PREFIX"malloc.conf"
448 #  else
449 			    "/etc/malloc.conf"
450 #  endif
451 			    ;
452 
453 			if ((linklen = readlink(linkname, buf,
454 			    sizeof(buf) - 1)) != -1) {
455 				/*
456 				 * Use the contents of the "/etc/malloc.conf"
457 				 * symbolic link's name.
458 				 */
459 				buf[linklen] = '\0';
460 				opts = buf;
461 			} else
462 #endif
463 			{
464 				/* No configuration specified. */
465 				buf[0] = '\0';
466 				opts = buf;
467 			}
468 			break;
469 		} case 2: {
470 			const char *envname =
471 #ifdef JEMALLOC_PREFIX
472 			    JEMALLOC_CPREFIX"MALLOC_CONF"
473 #else
474 			    "MALLOC_CONF"
475 #endif
476 			    ;
477 
478 			if (issetugid() == 0 && (opts = getenv(envname)) !=
479 			    NULL) {
480 				/*
481 				 * Do nothing; opts is already initialized to
482 				 * the value of the MALLOC_CONF environment
483 				 * variable.
484 				 */
485 			} else {
486 				/* No configuration specified. */
487 				buf[0] = '\0';
488 				opts = buf;
489 			}
490 			break;
491 		} default:
492 			/* NOTREACHED */
493 			assert(false);
494 			buf[0] = '\0';
495 			opts = buf;
496 		}
497 
498 		while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
499 		    &vlen) == false) {
500 #define	CONF_HANDLE_BOOL(o, n)						\
501 			if (sizeof(n)-1 == klen && strncmp(n, k,	\
502 			    klen) == 0) {				\
503 				if (strncmp("true", v, vlen) == 0 &&	\
504 				    vlen == sizeof("true")-1)		\
505 					o = true;			\
506 				else if (strncmp("false", v, vlen) ==	\
507 				    0 && vlen == sizeof("false")-1)	\
508 					o = false;			\
509 				else {					\
510 					malloc_conf_error(		\
511 					    "Invalid conf value",	\
512 					    k, klen, v, vlen);		\
513 				}					\
514 				continue;				\
515 			}
516 #define	CONF_HANDLE_SIZE_T(o, n, min, max, clip)			\
517 			if (sizeof(n)-1 == klen && strncmp(n, k,	\
518 			    klen) == 0) {				\
519 				uintmax_t um;				\
520 				char *end;				\
521 									\
522 				set_errno(0);				\
523 				um = malloc_strtoumax(v, &end, 0);	\
524 				if (get_errno() != 0 || (uintptr_t)end -\
525 				    (uintptr_t)v != vlen) {		\
526 					malloc_conf_error(		\
527 					    "Invalid conf value",	\
528 					    k, klen, v, vlen);		\
529 				} else if (clip) {			\
530 					if (um < min)			\
531 						o = min;		\
532 					else if (um > max)		\
533 						o = max;		\
534 					else				\
535 						o = um;			\
536 				} else {				\
537 					if (um < min || um > max) {	\
538 						malloc_conf_error(	\
539 						    "Out-of-range "	\
540 						    "conf value",	\
541 						    k, klen, v, vlen);	\
542 					} else				\
543 						o = um;			\
544 				}					\
545 				continue;				\
546 			}
547 #define	CONF_HANDLE_SSIZE_T(o, n, min, max)				\
548 			if (sizeof(n)-1 == klen && strncmp(n, k,	\
549 			    klen) == 0) {				\
550 				long l;					\
551 				char *end;				\
552 									\
553 				set_errno(0);				\
554 				l = strtol(v, &end, 0);			\
555 				if (get_errno() != 0 || (uintptr_t)end -\
556 				    (uintptr_t)v != vlen) {		\
557 					malloc_conf_error(		\
558 					    "Invalid conf value",	\
559 					    k, klen, v, vlen);		\
560 				} else if (l < (ssize_t)min || l >	\
561 				    (ssize_t)max) {			\
562 					malloc_conf_error(		\
563 					    "Out-of-range conf value",	\
564 					    k, klen, v, vlen);		\
565 				} else					\
566 					o = l;				\
567 				continue;				\
568 			}
569 #define	CONF_HANDLE_CHAR_P(o, n, d)					\
570 			if (sizeof(n)-1 == klen && strncmp(n, k,	\
571 			    klen) == 0) {				\
572 				size_t cpylen = (vlen <=		\
573 				    sizeof(o)-1) ? vlen :		\
574 				    sizeof(o)-1;			\
575 				strncpy(o, v, cpylen);			\
576 				o[cpylen] = '\0';			\
577 				continue;				\
578 			}
579 
580 			CONF_HANDLE_BOOL(opt_abort, "abort")
581 			/*
582 			 * Chunks always require at least one header page, plus
583 			 * one data page in the absence of redzones, or three
584 			 * pages in the presence of redzones.  In order to
585 			 * simplify options processing, fix the limit based on
586 			 * config_fill.
587 			 */
588 			CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
589 			    (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1,
590 			    true)
591 			if (strncmp("dss", k, klen) == 0) {
592 				int i;
593 				bool match = false;
594 				for (i = 0; i < dss_prec_limit; i++) {
595 					if (strncmp(dss_prec_names[i], v, vlen)
596 					    == 0) {
597 						if (chunk_dss_prec_set(i)) {
598 							malloc_conf_error(
599 							    "Error setting dss",
600 							    k, klen, v, vlen);
601 						} else {
602 							opt_dss =
603 							    dss_prec_names[i];
604 							match = true;
605 							break;
606 						}
607 					}
608 				}
609 				if (match == false) {
610 					malloc_conf_error("Invalid conf value",
611 					    k, klen, v, vlen);
612 				}
613 				continue;
614 			}
615 			CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
616 			    SIZE_T_MAX, false)
617 			CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
618 			    -1, (sizeof(size_t) << 3) - 1)
619 			CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
620 			if (config_fill) {
621 				CONF_HANDLE_BOOL(opt_junk, "junk")
622 				CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
623 				    0, SIZE_T_MAX, false)
624 				CONF_HANDLE_BOOL(opt_redzone, "redzone")
625 				CONF_HANDLE_BOOL(opt_zero, "zero")
626 			}
627 			if (config_utrace) {
628 				CONF_HANDLE_BOOL(opt_utrace, "utrace")
629 			}
630 			if (config_valgrind) {
631 				CONF_HANDLE_BOOL(opt_valgrind, "valgrind")
632 			}
633 			if (config_xmalloc) {
634 				CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
635 			}
636 			if (config_tcache) {
637 				CONF_HANDLE_BOOL(opt_tcache, "tcache")
638 				CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
639 				    "lg_tcache_max", -1,
640 				    (sizeof(size_t) << 3) - 1)
641 			}
642 			if (config_prof) {
643 				CONF_HANDLE_BOOL(opt_prof, "prof")
644 				CONF_HANDLE_CHAR_P(opt_prof_prefix,
645 				    "prof_prefix", "jeprof")
646 				CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
647 				CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
648 				    "lg_prof_sample", 0,
649 				    (sizeof(uint64_t) << 3) - 1)
650 				CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
651 				CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
652 				    "lg_prof_interval", -1,
653 				    (sizeof(uint64_t) << 3) - 1)
654 				CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
655 				CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
656 				CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
657 			}
658 			malloc_conf_error("Invalid conf pair", k, klen, v,
659 			    vlen);
660 #undef CONF_HANDLE_BOOL
661 #undef CONF_HANDLE_SIZE_T
662 #undef CONF_HANDLE_SSIZE_T
663 #undef CONF_HANDLE_CHAR_P
664 		}
665 	}
666 }
667 
668 static bool
669 malloc_init_hard(void)
670 {
671 	arena_t *init_arenas[1];
672 
673 	malloc_mutex_lock(&init_lock);
674 	if (malloc_initialized || IS_INITIALIZER) {
675 		/*
676 		 * Another thread initialized the allocator before this one
677 		 * acquired init_lock, or this thread is the initializing
678 		 * thread, and it is recursively allocating.
679 		 */
680 		malloc_mutex_unlock(&init_lock);
681 		return (false);
682 	}
683 #ifdef JEMALLOC_THREADED_INIT
684 	if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) {
685 		/* Busy-wait until the initializing thread completes. */
686 		do {
687 			malloc_mutex_unlock(&init_lock);
688 			CPU_SPINWAIT;
689 			malloc_mutex_lock(&init_lock);
690 		} while (malloc_initialized == false);
691 		malloc_mutex_unlock(&init_lock);
692 		return (false);
693 	}
694 #endif
695 	malloc_initializer = INITIALIZER;
696 
697 	malloc_tsd_boot();
698 	if (config_prof)
699 		prof_boot0();
700 
701 	malloc_conf_init();
702 
703 #if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
704     && !defined(_WIN32))
705 	/* Register fork handlers. */
706 	if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
707 	    jemalloc_postfork_child) != 0) {
708 		malloc_write("<jemalloc>: Error in pthread_atfork()\n");
709 		if (opt_abort)
710 			abort();
711 	}
712 #endif
713 
714 	if (opt_stats_print) {
715 		/* Print statistics at exit. */
716 		if (atexit(stats_print_atexit) != 0) {
717 			malloc_write("<jemalloc>: Error in atexit()\n");
718 			if (opt_abort)
719 				abort();
720 		}
721 	}
722 
723 	if (base_boot()) {
724 		malloc_mutex_unlock(&init_lock);
725 		return (true);
726 	}
727 
728 	if (chunk_boot()) {
729 		malloc_mutex_unlock(&init_lock);
730 		return (true);
731 	}
732 
733 	if (ctl_boot()) {
734 		malloc_mutex_unlock(&init_lock);
735 		return (true);
736 	}
737 
738 	if (config_prof)
739 		prof_boot1();
740 
741 	arena_boot();
742 
743 	if (config_tcache && tcache_boot0()) {
744 		malloc_mutex_unlock(&init_lock);
745 		return (true);
746 	}
747 
748 	if (huge_boot()) {
749 		malloc_mutex_unlock(&init_lock);
750 		return (true);
751 	}
752 
753 	if (malloc_mutex_init(&arenas_lock))
754 		return (true);
755 
756 	/*
757 	 * Create enough scaffolding to allow recursive allocation in
758 	 * malloc_ncpus().
759 	 */
760 	narenas_total = narenas_auto = 1;
761 	arenas = init_arenas;
762 	memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
763 
764 	/*
765 	 * Initialize one arena here.  The rest are lazily created in
766 	 * choose_arena_hard().
767 	 */
768 	arenas_extend(0);
769 	if (arenas[0] == NULL) {
770 		malloc_mutex_unlock(&init_lock);
771 		return (true);
772 	}
773 
774 	/* Initialize allocation counters before any allocations can occur. */
775 	if (config_stats && thread_allocated_tsd_boot()) {
776 		malloc_mutex_unlock(&init_lock);
777 		return (true);
778 	}
779 
780 	if (arenas_tsd_boot()) {
781 		malloc_mutex_unlock(&init_lock);
782 		return (true);
783 	}
784 
785 	if (config_tcache && tcache_boot1()) {
786 		malloc_mutex_unlock(&init_lock);
787 		return (true);
788 	}
789 
790 	if (config_fill && quarantine_boot()) {
791 		malloc_mutex_unlock(&init_lock);
792 		return (true);
793 	}
794 
795 	if (config_prof && prof_boot2()) {
796 		malloc_mutex_unlock(&init_lock);
797 		return (true);
798 	}
799 
800 	/* Get number of CPUs. */
801 	malloc_mutex_unlock(&init_lock);
802 	ncpus = malloc_ncpus();
803 	malloc_mutex_lock(&init_lock);
804 
805 	if (mutex_boot()) {
806 		malloc_mutex_unlock(&init_lock);
807 		return (true);
808 	}
809 
810 	if (opt_narenas == 0) {
811 		/*
812 		 * For SMP systems, create more than one arena per CPU by
813 		 * default.
814 		 */
815 		if (ncpus > 1)
816 			opt_narenas = ncpus << 2;
817 		else
818 			opt_narenas = 1;
819 	}
820 	narenas_auto = opt_narenas;
821 	/*
822 	 * Make sure that the arenas array can be allocated.  In practice, this
823 	 * limit is enough to allow the allocator to function, but the ctl
824 	 * machinery will fail to allocate memory at far lower limits.
825 	 */
826 	if (narenas_auto > chunksize / sizeof(arena_t *)) {
827 		narenas_auto = chunksize / sizeof(arena_t *);
828 		malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
829 		    narenas_auto);
830 	}
831 	narenas_total = narenas_auto;
832 
833 	/* Allocate and initialize arenas. */
834 	arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total);
835 	if (arenas == NULL) {
836 		malloc_mutex_unlock(&init_lock);
837 		return (true);
838 	}
839 	/*
840 	 * Zero the array.  In practice, this should always be pre-zeroed,
841 	 * since it was just mmap()ed, but let's be sure.
842 	 */
843 	memset(arenas, 0, sizeof(arena_t *) * narenas_total);
844 	/* Copy the pointer to the one arena that was already initialized. */
845 	arenas[0] = init_arenas[0];
846 
847 	malloc_initialized = true;
848 	malloc_mutex_unlock(&init_lock);
849 	return (false);
850 }
851 
852 /*
853  * End initialization functions.
854  */
855 /******************************************************************************/
856 /*
857  * Begin malloc(3)-compatible functions.
858  */
859 
860 void *
861 je_malloc(size_t size)
862 {
863 	void *ret;
864 	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
865 	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
866 
867 	if (malloc_init()) {
868 		ret = NULL;
869 		goto label_oom;
870 	}
871 
872 	if (size == 0)
873 		size = 1;
874 
875 	if (config_prof && opt_prof) {
876 		usize = s2u(size);
877 		PROF_ALLOC_PREP(1, usize, cnt);
878 		if (cnt == NULL) {
879 			ret = NULL;
880 			goto label_oom;
881 		}
882 		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
883 		    SMALL_MAXCLASS) {
884 			ret = imalloc(SMALL_MAXCLASS+1);
885 			if (ret != NULL)
886 				arena_prof_promoted(ret, usize);
887 		} else
888 			ret = imalloc(size);
889 	} else {
890 		if (config_stats || (config_valgrind && opt_valgrind))
891 			usize = s2u(size);
892 		ret = imalloc(size);
893 	}
894 
895 label_oom:
896 	if (ret == NULL) {
897 		if (config_xmalloc && opt_xmalloc) {
898 			malloc_write("<jemalloc>: Error in malloc(): "
899 			    "out of memory\n");
900 			abort();
901 		}
902 		set_errno(ENOMEM);
903 	}
904 	if (config_prof && opt_prof && ret != NULL)
905 		prof_malloc(ret, usize, cnt);
906 	if (config_stats && ret != NULL) {
907 		assert(usize == isalloc(ret, config_prof));
908 		thread_allocated_tsd_get()->allocated += usize;
909 	}
910 	UTRACE(0, size, ret);
911 	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
912 	return (ret);
913 }
914 
915 JEMALLOC_ATTR(nonnull(1))
916 #ifdef JEMALLOC_PROF
917 /*
918  * Avoid any uncertainty as to how many backtrace frames to ignore in
919  * PROF_ALLOC_PREP().
920  */
921 JEMALLOC_NOINLINE
922 #endif
923 static int
924 imemalign(void **memptr, size_t alignment, size_t size,
925     size_t min_alignment)
926 {
927 	int ret;
928 	size_t usize;
929 	void *result;
930 	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
931 
932 	assert(min_alignment != 0);
933 
934 	if (malloc_init())
935 		result = NULL;
936 	else {
937 		if (size == 0)
938 			size = 1;
939 
940 		/* Make sure that alignment is a large enough power of 2. */
941 		if (((alignment - 1) & alignment) != 0
942 		    || (alignment < min_alignment)) {
943 			if (config_xmalloc && opt_xmalloc) {
944 				malloc_write("<jemalloc>: Error allocating "
945 				    "aligned memory: invalid alignment\n");
946 				abort();
947 			}
948 			result = NULL;
949 			ret = EINVAL;
950 			goto label_return;
951 		}
952 
953 		usize = sa2u(size, alignment);
954 		if (usize == 0) {
955 			result = NULL;
956 			ret = ENOMEM;
957 			goto label_return;
958 		}
959 
960 		if (config_prof && opt_prof) {
961 			PROF_ALLOC_PREP(2, usize, cnt);
962 			if (cnt == NULL) {
963 				result = NULL;
964 				ret = EINVAL;
965 			} else {
966 				if (prof_promote && (uintptr_t)cnt !=
967 				    (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
968 					assert(sa2u(SMALL_MAXCLASS+1,
969 					    alignment) != 0);
970 					result = ipalloc(sa2u(SMALL_MAXCLASS+1,
971 					    alignment), alignment, false);
972 					if (result != NULL) {
973 						arena_prof_promoted(result,
974 						    usize);
975 					}
976 				} else {
977 					result = ipalloc(usize, alignment,
978 					    false);
979 				}
980 			}
981 		} else
982 			result = ipalloc(usize, alignment, false);
983 	}
984 
985 	if (result == NULL) {
986 		if (config_xmalloc && opt_xmalloc) {
987 			malloc_write("<jemalloc>: Error allocating aligned "
988 			    "memory: out of memory\n");
989 			abort();
990 		}
991 		ret = ENOMEM;
992 		goto label_return;
993 	}
994 
995 	*memptr = result;
996 	ret = 0;
997 
998 label_return:
999 	if (config_stats && result != NULL) {
1000 		assert(usize == isalloc(result, config_prof));
1001 		thread_allocated_tsd_get()->allocated += usize;
1002 	}
1003 	if (config_prof && opt_prof && result != NULL)
1004 		prof_malloc(result, usize, cnt);
1005 	UTRACE(0, size, result);
1006 	return (ret);
1007 }
1008 
1009 int
1010 je_posix_memalign(void **memptr, size_t alignment, size_t size)
1011 {
1012 	int ret = imemalign(memptr, alignment, size, sizeof(void *));
1013 	JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
1014 	    config_prof), false);
1015 	return (ret);
1016 }
1017 
1018 void *
1019 je_aligned_alloc(size_t alignment, size_t size)
1020 {
1021 	void *ret;
1022 	int err;
1023 
1024 	if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
1025 		ret = NULL;
1026 		set_errno(err);
1027 	}
1028 	JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
1029 	    false);
1030 	return (ret);
1031 }
1032 
1033 void *
1034 je_calloc(size_t num, size_t size)
1035 {
1036 	void *ret;
1037 	size_t num_size;
1038 	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1039 	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
1040 
1041 	if (malloc_init()) {
1042 		num_size = 0;
1043 		ret = NULL;
1044 		goto label_return;
1045 	}
1046 
1047 	num_size = num * size;
1048 	if (num_size == 0) {
1049 		if (num == 0 || size == 0)
1050 			num_size = 1;
1051 		else {
1052 			ret = NULL;
1053 			goto label_return;
1054 		}
1055 	/*
1056 	 * Try to avoid division here.  We know that it isn't possible to
1057 	 * overflow during multiplication if neither operand uses any of the
1058 	 * most significant half of the bits in a size_t.
1059 	 */
1060 	} else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
1061 	    && (num_size / size != num)) {
1062 		/* size_t overflow. */
1063 		ret = NULL;
1064 		goto label_return;
1065 	}
1066 
1067 	if (config_prof && opt_prof) {
1068 		usize = s2u(num_size);
1069 		PROF_ALLOC_PREP(1, usize, cnt);
1070 		if (cnt == NULL) {
1071 			ret = NULL;
1072 			goto label_return;
1073 		}
1074 		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
1075 		    <= SMALL_MAXCLASS) {
1076 			ret = icalloc(SMALL_MAXCLASS+1);
1077 			if (ret != NULL)
1078 				arena_prof_promoted(ret, usize);
1079 		} else
1080 			ret = icalloc(num_size);
1081 	} else {
1082 		if (config_stats || (config_valgrind && opt_valgrind))
1083 			usize = s2u(num_size);
1084 		ret = icalloc(num_size);
1085 	}
1086 
1087 label_return:
1088 	if (ret == NULL) {
1089 		if (config_xmalloc && opt_xmalloc) {
1090 			malloc_write("<jemalloc>: Error in calloc(): out of "
1091 			    "memory\n");
1092 			abort();
1093 		}
1094 		set_errno(ENOMEM);
1095 	}
1096 
1097 	if (config_prof && opt_prof && ret != NULL)
1098 		prof_malloc(ret, usize, cnt);
1099 	if (config_stats && ret != NULL) {
1100 		assert(usize == isalloc(ret, config_prof));
1101 		thread_allocated_tsd_get()->allocated += usize;
1102 	}
1103 	UTRACE(0, num_size, ret);
1104 	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
1105 	return (ret);
1106 }
1107 
1108 void *
1109 je_realloc(void *ptr, size_t size)
1110 {
1111 	void *ret;
1112 	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1113 	size_t old_size = 0;
1114 	size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1115 	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
1116 	prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
1117 
1118 	if (size == 0) {
1119 		if (ptr != NULL) {
1120 			/* realloc(ptr, 0) is equivalent to free(p). */
1121 			assert(malloc_initialized || IS_INITIALIZER);
1122 			if (config_prof) {
1123 				old_size = isalloc(ptr, true);
1124 				if (config_valgrind && opt_valgrind)
1125 					old_rzsize = p2rz(ptr);
1126 			} else if (config_stats) {
1127 				old_size = isalloc(ptr, false);
1128 				if (config_valgrind && opt_valgrind)
1129 					old_rzsize = u2rz(old_size);
1130 			} else if (config_valgrind && opt_valgrind) {
1131 				old_size = isalloc(ptr, false);
1132 				old_rzsize = u2rz(old_size);
1133 			}
1134 			if (config_prof && opt_prof) {
1135 				old_ctx = prof_ctx_get(ptr);
1136 				cnt = NULL;
1137 			}
1138 			iqalloc(ptr);
1139 			ret = NULL;
1140 			goto label_return;
1141 		} else
1142 			size = 1;
1143 	}
1144 
1145 	if (ptr != NULL) {
1146 		assert(malloc_initialized || IS_INITIALIZER);
1147 		malloc_thread_init();
1148 
1149 		if (config_prof) {
1150 			old_size = isalloc(ptr, true);
1151 			if (config_valgrind && opt_valgrind)
1152 				old_rzsize = p2rz(ptr);
1153 		} else if (config_stats) {
1154 			old_size = isalloc(ptr, false);
1155 			if (config_valgrind && opt_valgrind)
1156 				old_rzsize = u2rz(old_size);
1157 		} else if (config_valgrind && opt_valgrind) {
1158 			old_size = isalloc(ptr, false);
1159 			old_rzsize = u2rz(old_size);
1160 		}
1161 		if (config_prof && opt_prof) {
1162 			usize = s2u(size);
1163 			old_ctx = prof_ctx_get(ptr);
1164 			PROF_ALLOC_PREP(1, usize, cnt);
1165 			if (cnt == NULL) {
1166 				old_ctx = NULL;
1167 				ret = NULL;
1168 				goto label_oom;
1169 			}
1170 			if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
1171 			    usize <= SMALL_MAXCLASS) {
1172 				ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
1173 				    false, false);
1174 				if (ret != NULL)
1175 					arena_prof_promoted(ret, usize);
1176 				else
1177 					old_ctx = NULL;
1178 			} else {
1179 				ret = iralloc(ptr, size, 0, 0, false, false);
1180 				if (ret == NULL)
1181 					old_ctx = NULL;
1182 			}
1183 		} else {
1184 			if (config_stats || (config_valgrind && opt_valgrind))
1185 				usize = s2u(size);
1186 			ret = iralloc(ptr, size, 0, 0, false, false);
1187 		}
1188 
1189 label_oom:
1190 		if (ret == NULL) {
1191 			if (config_xmalloc && opt_xmalloc) {
1192 				malloc_write("<jemalloc>: Error in realloc(): "
1193 				    "out of memory\n");
1194 				abort();
1195 			}
1196 			set_errno(ENOMEM);
1197 		}
1198 	} else {
1199 		/* realloc(NULL, size) is equivalent to malloc(size). */
1200 		if (config_prof && opt_prof)
1201 			old_ctx = NULL;
1202 		if (malloc_init()) {
1203 			if (config_prof && opt_prof)
1204 				cnt = NULL;
1205 			ret = NULL;
1206 		} else {
1207 			if (config_prof && opt_prof) {
1208 				usize = s2u(size);
1209 				PROF_ALLOC_PREP(1, usize, cnt);
1210 				if (cnt == NULL)
1211 					ret = NULL;
1212 				else {
1213 					if (prof_promote && (uintptr_t)cnt !=
1214 					    (uintptr_t)1U && usize <=
1215 					    SMALL_MAXCLASS) {
1216 						ret = imalloc(SMALL_MAXCLASS+1);
1217 						if (ret != NULL) {
1218 							arena_prof_promoted(ret,
1219 							    usize);
1220 						}
1221 					} else
1222 						ret = imalloc(size);
1223 				}
1224 			} else {
1225 				if (config_stats || (config_valgrind &&
1226 				    opt_valgrind))
1227 					usize = s2u(size);
1228 				ret = imalloc(size);
1229 			}
1230 		}
1231 
1232 		if (ret == NULL) {
1233 			if (config_xmalloc && opt_xmalloc) {
1234 				malloc_write("<jemalloc>: Error in realloc(): "
1235 				    "out of memory\n");
1236 				abort();
1237 			}
1238 			set_errno(ENOMEM);
1239 		}
1240 	}
1241 
1242 label_return:
1243 	if (config_prof && opt_prof)
1244 		prof_realloc(ret, usize, cnt, old_size, old_ctx);
1245 	if (config_stats && ret != NULL) {
1246 		thread_allocated_t *ta;
1247 		assert(usize == isalloc(ret, config_prof));
1248 		ta = thread_allocated_tsd_get();
1249 		ta->allocated += usize;
1250 		ta->deallocated += old_size;
1251 	}
1252 	UTRACE(ptr, size, ret);
1253 	JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false);
1254 	return (ret);
1255 }
1256 
1257 void
1258 je_free(void *ptr)
1259 {
1260 
1261 	UTRACE(ptr, 0, 0);
1262 	if (ptr != NULL) {
1263 		size_t usize;
1264 		size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1265 
1266 		assert(malloc_initialized || IS_INITIALIZER);
1267 
1268 		if (config_prof && opt_prof) {
1269 			usize = isalloc(ptr, config_prof);
1270 			prof_free(ptr, usize);
1271 		} else if (config_stats || config_valgrind)
1272 			usize = isalloc(ptr, config_prof);
1273 		if (config_stats)
1274 			thread_allocated_tsd_get()->deallocated += usize;
1275 		if (config_valgrind && opt_valgrind)
1276 			rzsize = p2rz(ptr);
1277 		iqalloc(ptr);
1278 		JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1279 	}
1280 }
1281 
1282 /*
1283  * End malloc(3)-compatible functions.
1284  */
1285 /******************************************************************************/
1286 /*
1287  * Begin non-standard override functions.
1288  */
1289 
1290 #ifdef JEMALLOC_OVERRIDE_MEMALIGN
1291 void *
1292 je_memalign(size_t alignment, size_t size)
1293 {
1294 	void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1295 	imemalign(&ret, alignment, size, 1);
1296 	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1297 	return (ret);
1298 }
1299 #endif
1300 
1301 #ifdef JEMALLOC_OVERRIDE_VALLOC
1302 void *
1303 je_valloc(size_t size)
1304 {
1305 	void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1306 	imemalign(&ret, PAGE, size, 1);
1307 	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1308 	return (ret);
1309 }
1310 #endif
1311 
1312 /*
1313  * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1314  * #define je_malloc malloc
1315  */
1316 #define	malloc_is_malloc 1
1317 #define	is_malloc_(a) malloc_is_ ## a
1318 #define	is_malloc(a) is_malloc_(a)
1319 
1320 #if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
1321 /*
1322  * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1323  * to inconsistently reference libc's malloc(3)-compatible functions
1324  * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1325  *
1326  * These definitions interpose hooks in glibc.  The functions are actually
1327  * passed an extra argument for the caller return address, which will be
1328  * ignored.
1329  */
1330 JEMALLOC_EXPORT void (* __free_hook)(void *ptr) = je_free;
1331 JEMALLOC_EXPORT void *(* __malloc_hook)(size_t size) = je_malloc;
1332 JEMALLOC_EXPORT void *(* __realloc_hook)(void *ptr, size_t size) = je_realloc;
1333 JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) =
1334     je_memalign;
1335 #endif
1336 
1337 /*
1338  * End non-standard override functions.
1339  */
1340 /******************************************************************************/
1341 /*
1342  * Begin non-standard functions.
1343  */
1344 
1345 size_t
1346 je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
1347 {
1348 	size_t ret;
1349 
1350 	assert(malloc_initialized || IS_INITIALIZER);
1351 	malloc_thread_init();
1352 
1353 	if (config_ivsalloc)
1354 		ret = ivsalloc(ptr, config_prof);
1355 	else
1356 		ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
1357 
1358 	return (ret);
1359 }
1360 
1361 void
1362 je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
1363     const char *opts)
1364 {
1365 
1366 	stats_print(write_cb, cbopaque, opts);
1367 }
1368 
1369 int
1370 je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
1371     size_t newlen)
1372 {
1373 
1374 	if (malloc_init())
1375 		return (EAGAIN);
1376 
1377 	return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1378 }
1379 
1380 int
1381 je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
1382 {
1383 
1384 	if (malloc_init())
1385 		return (EAGAIN);
1386 
1387 	return (ctl_nametomib(name, mibp, miblenp));
1388 }
1389 
1390 int
1391 je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1392   void *newp, size_t newlen)
1393 {
1394 
1395 	if (malloc_init())
1396 		return (EAGAIN);
1397 
1398 	return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1399 }
1400 
1401 /*
1402  * End non-standard functions.
1403  */
1404 /******************************************************************************/
1405 /*
1406  * Begin experimental functions.
1407  */
1408 #ifdef JEMALLOC_EXPERIMENTAL
1409 
1410 static JEMALLOC_ATTR(always_inline) void *
1411 iallocm(size_t usize, size_t alignment, bool zero, bool try_tcache,
1412     arena_t *arena)
1413 {
1414 
1415 	assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
1416 	    alignment)));
1417 
1418 	if (alignment != 0)
1419 		return (ipallocx(usize, alignment, zero, try_tcache, arena));
1420 	else if (zero)
1421 		return (icallocx(usize, try_tcache, arena));
1422 	else
1423 		return (imallocx(usize, try_tcache, arena));
1424 }
1425 
1426 int
1427 je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
1428 {
1429 	void *p;
1430 	size_t usize;
1431 	size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1432 	    & (SIZE_T_MAX-1));
1433 	bool zero = flags & ALLOCM_ZERO;
1434 	unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
1435 	arena_t *arena;
1436 	bool try_tcache;
1437 
1438 	assert(ptr != NULL);
1439 	assert(size != 0);
1440 
1441 	if (malloc_init())
1442 		goto label_oom;
1443 
1444 	if (arena_ind != UINT_MAX) {
1445 		arena = arenas[arena_ind];
1446 		try_tcache = false;
1447 	} else {
1448 		arena = NULL;
1449 		try_tcache = true;
1450 	}
1451 
1452 	usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
1453 	if (usize == 0)
1454 		goto label_oom;
1455 
1456 	if (config_prof && opt_prof) {
1457 		prof_thr_cnt_t *cnt;
1458 
1459 		PROF_ALLOC_PREP(1, usize, cnt);
1460 		if (cnt == NULL)
1461 			goto label_oom;
1462 		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
1463 		    SMALL_MAXCLASS) {
1464 			size_t usize_promoted = (alignment == 0) ?
1465 			    s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
1466 			    alignment);
1467 			assert(usize_promoted != 0);
1468 			p = iallocm(usize_promoted, alignment, zero,
1469 			    try_tcache, arena);
1470 			if (p == NULL)
1471 				goto label_oom;
1472 			arena_prof_promoted(p, usize);
1473 		} else {
1474 			p = iallocm(usize, alignment, zero, try_tcache, arena);
1475 			if (p == NULL)
1476 				goto label_oom;
1477 		}
1478 		prof_malloc(p, usize, cnt);
1479 	} else {
1480 		p = iallocm(usize, alignment, zero, try_tcache, arena);
1481 		if (p == NULL)
1482 			goto label_oom;
1483 	}
1484 	if (rsize != NULL)
1485 		*rsize = usize;
1486 
1487 	*ptr = p;
1488 	if (config_stats) {
1489 		assert(usize == isalloc(p, config_prof));
1490 		thread_allocated_tsd_get()->allocated += usize;
1491 	}
1492 	UTRACE(0, size, p);
1493 	JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
1494 	return (ALLOCM_SUCCESS);
1495 label_oom:
1496 	if (config_xmalloc && opt_xmalloc) {
1497 		malloc_write("<jemalloc>: Error in allocm(): "
1498 		    "out of memory\n");
1499 		abort();
1500 	}
1501 	*ptr = NULL;
1502 	UTRACE(0, size, 0);
1503 	return (ALLOCM_ERR_OOM);
1504 }
1505 
1506 int
1507 je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
1508 {
1509 	void *p, *q;
1510 	size_t usize;
1511 	size_t old_size;
1512 	size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1513 	size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1514 	    & (SIZE_T_MAX-1));
1515 	bool zero = flags & ALLOCM_ZERO;
1516 	bool no_move = flags & ALLOCM_NO_MOVE;
1517 	unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
1518 	bool try_tcache_alloc, try_tcache_dalloc;
1519 	arena_t *arena;
1520 
1521 	assert(ptr != NULL);
1522 	assert(*ptr != NULL);
1523 	assert(size != 0);
1524 	assert(SIZE_T_MAX - size >= extra);
1525 	assert(malloc_initialized || IS_INITIALIZER);
1526 	malloc_thread_init();
1527 
1528 	if (arena_ind != UINT_MAX) {
1529 		arena_chunk_t *chunk;
1530 		try_tcache_alloc = true;
1531 		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(*ptr);
1532 		try_tcache_dalloc = (chunk == *ptr || chunk->arena !=
1533 		    arenas[arena_ind]);
1534 		arena = arenas[arena_ind];
1535 	} else {
1536 		try_tcache_alloc = true;
1537 		try_tcache_dalloc = true;
1538 		arena = NULL;
1539 	}
1540 
1541 	p = *ptr;
1542 	if (config_prof && opt_prof) {
1543 		prof_thr_cnt_t *cnt;
1544 
1545 		/*
1546 		 * usize isn't knowable before iralloc() returns when extra is
1547 		 * non-zero.  Therefore, compute its maximum possible value and
1548 		 * use that in PROF_ALLOC_PREP() to decide whether to capture a
1549 		 * backtrace.  prof_realloc() will use the actual usize to
1550 		 * decide whether to sample.
1551 		 */
1552 		size_t max_usize = (alignment == 0) ? s2u(size+extra) :
1553 		    sa2u(size+extra, alignment);
1554 		prof_ctx_t *old_ctx = prof_ctx_get(p);
1555 		old_size = isalloc(p, true);
1556 		if (config_valgrind && opt_valgrind)
1557 			old_rzsize = p2rz(p);
1558 		PROF_ALLOC_PREP(1, max_usize, cnt);
1559 		if (cnt == NULL)
1560 			goto label_oom;
1561 		/*
1562 		 * Use minimum usize to determine whether promotion may happen.
1563 		 */
1564 		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
1565 		    && ((alignment == 0) ? s2u(size) : sa2u(size, alignment))
1566 		    <= SMALL_MAXCLASS) {
1567 			q = irallocx(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1568 			    size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
1569 			    alignment, zero, no_move, try_tcache_alloc,
1570 			    try_tcache_dalloc, arena);
1571 			if (q == NULL)
1572 				goto label_err;
1573 			if (max_usize < PAGE) {
1574 				usize = max_usize;
1575 				arena_prof_promoted(q, usize);
1576 			} else
1577 				usize = isalloc(q, config_prof);
1578 		} else {
1579 			q = irallocx(p, size, extra, alignment, zero, no_move,
1580 			    try_tcache_alloc, try_tcache_dalloc, arena);
1581 			if (q == NULL)
1582 				goto label_err;
1583 			usize = isalloc(q, config_prof);
1584 		}
1585 		prof_realloc(q, usize, cnt, old_size, old_ctx);
1586 		if (rsize != NULL)
1587 			*rsize = usize;
1588 	} else {
1589 		if (config_stats) {
1590 			old_size = isalloc(p, false);
1591 			if (config_valgrind && opt_valgrind)
1592 				old_rzsize = u2rz(old_size);
1593 		} else if (config_valgrind && opt_valgrind) {
1594 			old_size = isalloc(p, false);
1595 			old_rzsize = u2rz(old_size);
1596 		}
1597 		q = irallocx(p, size, extra, alignment, zero, no_move,
1598 		    try_tcache_alloc, try_tcache_dalloc, arena);
1599 		if (q == NULL)
1600 			goto label_err;
1601 		if (config_stats)
1602 			usize = isalloc(q, config_prof);
1603 		if (rsize != NULL) {
1604 			if (config_stats == false)
1605 				usize = isalloc(q, config_prof);
1606 			*rsize = usize;
1607 		}
1608 	}
1609 
1610 	*ptr = q;
1611 	if (config_stats) {
1612 		thread_allocated_t *ta;
1613 		ta = thread_allocated_tsd_get();
1614 		ta->allocated += usize;
1615 		ta->deallocated += old_size;
1616 	}
1617 	UTRACE(p, size, q);
1618 	JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero);
1619 	return (ALLOCM_SUCCESS);
1620 label_err:
1621 	if (no_move) {
1622 		UTRACE(p, size, q);
1623 		return (ALLOCM_ERR_NOT_MOVED);
1624 	}
1625 label_oom:
1626 	if (config_xmalloc && opt_xmalloc) {
1627 		malloc_write("<jemalloc>: Error in rallocm(): "
1628 		    "out of memory\n");
1629 		abort();
1630 	}
1631 	UTRACE(p, size, 0);
1632 	return (ALLOCM_ERR_OOM);
1633 }
1634 
1635 int
1636 je_sallocm(const void *ptr, size_t *rsize, int flags)
1637 {
1638 	size_t sz;
1639 
1640 	assert(malloc_initialized || IS_INITIALIZER);
1641 	malloc_thread_init();
1642 
1643 	if (config_ivsalloc)
1644 		sz = ivsalloc(ptr, config_prof);
1645 	else {
1646 		assert(ptr != NULL);
1647 		sz = isalloc(ptr, config_prof);
1648 	}
1649 	assert(rsize != NULL);
1650 	*rsize = sz;
1651 
1652 	return (ALLOCM_SUCCESS);
1653 }
1654 
1655 int
1656 je_dallocm(void *ptr, int flags)
1657 {
1658 	size_t usize;
1659 	size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1660 	unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
1661 	bool try_tcache;
1662 
1663 	assert(ptr != NULL);
1664 	assert(malloc_initialized || IS_INITIALIZER);
1665 
1666 	if (arena_ind != UINT_MAX) {
1667 		arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1668 		try_tcache = (chunk == ptr || chunk->arena !=
1669 		    arenas[arena_ind]);
1670 	} else
1671 		try_tcache = true;
1672 
1673 	UTRACE(ptr, 0, 0);
1674 	if (config_stats || config_valgrind)
1675 		usize = isalloc(ptr, config_prof);
1676 	if (config_prof && opt_prof) {
1677 		if (config_stats == false && config_valgrind == false)
1678 			usize = isalloc(ptr, config_prof);
1679 		prof_free(ptr, usize);
1680 	}
1681 	if (config_stats)
1682 		thread_allocated_tsd_get()->deallocated += usize;
1683 	if (config_valgrind && opt_valgrind)
1684 		rzsize = p2rz(ptr);
1685 	iqallocx(ptr, try_tcache);
1686 	JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1687 
1688 	return (ALLOCM_SUCCESS);
1689 }
1690 
1691 int
1692 je_nallocm(size_t *rsize, size_t size, int flags)
1693 {
1694 	size_t usize;
1695 	size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1696 	    & (SIZE_T_MAX-1));
1697 
1698 	assert(size != 0);
1699 
1700 	if (malloc_init())
1701 		return (ALLOCM_ERR_OOM);
1702 
1703 	usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
1704 	if (usize == 0)
1705 		return (ALLOCM_ERR_OOM);
1706 
1707 	if (rsize != NULL)
1708 		*rsize = usize;
1709 	return (ALLOCM_SUCCESS);
1710 }
1711 
1712 #endif
1713 /*
1714  * End experimental functions.
1715  */
1716 /******************************************************************************/
1717 /*
1718  * The following functions are used by threading libraries for protection of
1719  * malloc during fork().
1720  */
1721 
1722 /*
1723  * If an application creates a thread before doing any allocation in the main
1724  * thread, then calls fork(2) in the main thread followed by memory allocation
1725  * in the child process, a race can occur that results in deadlock within the
1726  * child: the main thread may have forked while the created thread had
1727  * partially initialized the allocator.  Ordinarily jemalloc prevents
1728  * fork/malloc races via the following functions it registers during
1729  * initialization using pthread_atfork(), but of course that does no good if
1730  * the allocator isn't fully initialized at fork time.  The following library
1731  * constructor is a partial solution to this problem.  It may still possible to
1732  * trigger the deadlock described above, but doing so would involve forking via
1733  * a library constructor that runs before jemalloc's runs.
1734  */
1735 JEMALLOC_ATTR(constructor)
1736 static void
1737 jemalloc_constructor(void)
1738 {
1739 
1740 	malloc_init();
1741 }
1742 
1743 #ifndef JEMALLOC_MUTEX_INIT_CB
1744 void
1745 jemalloc_prefork(void)
1746 #else
1747 JEMALLOC_EXPORT void
1748 _malloc_prefork(void)
1749 #endif
1750 {
1751 	unsigned i;
1752 
1753 #ifdef JEMALLOC_MUTEX_INIT_CB
1754 	if (malloc_initialized == false)
1755 		return;
1756 #endif
1757 	assert(malloc_initialized);
1758 
1759 	/* Acquire all mutexes in a safe order. */
1760 	ctl_prefork();
1761 	prof_prefork();
1762 	malloc_mutex_prefork(&arenas_lock);
1763 	for (i = 0; i < narenas_total; i++) {
1764 		if (arenas[i] != NULL)
1765 			arena_prefork(arenas[i]);
1766 	}
1767 	chunk_prefork();
1768 	base_prefork();
1769 	huge_prefork();
1770 }
1771 
1772 #ifndef JEMALLOC_MUTEX_INIT_CB
1773 void
1774 jemalloc_postfork_parent(void)
1775 #else
1776 JEMALLOC_EXPORT void
1777 _malloc_postfork(void)
1778 #endif
1779 {
1780 	unsigned i;
1781 
1782 #ifdef JEMALLOC_MUTEX_INIT_CB
1783 	if (malloc_initialized == false)
1784 		return;
1785 #endif
1786 	assert(malloc_initialized);
1787 
1788 	/* Release all mutexes, now that fork() has completed. */
1789 	huge_postfork_parent();
1790 	base_postfork_parent();
1791 	chunk_postfork_parent();
1792 	for (i = 0; i < narenas_total; i++) {
1793 		if (arenas[i] != NULL)
1794 			arena_postfork_parent(arenas[i]);
1795 	}
1796 	malloc_mutex_postfork_parent(&arenas_lock);
1797 	prof_postfork_parent();
1798 	ctl_postfork_parent();
1799 }
1800 
1801 void
1802 jemalloc_postfork_child(void)
1803 {
1804 	unsigned i;
1805 
1806 	assert(malloc_initialized);
1807 
1808 	/* Release all mutexes, now that fork() has completed. */
1809 	huge_postfork_child();
1810 	base_postfork_child();
1811 	chunk_postfork_child();
1812 	for (i = 0; i < narenas_total; i++) {
1813 		if (arenas[i] != NULL)
1814 			arena_postfork_child(arenas[i]);
1815 	}
1816 	malloc_mutex_postfork_child(&arenas_lock);
1817 	prof_postfork_child();
1818 	ctl_postfork_child();
1819 }
1820 
1821 /******************************************************************************/
1822 /*
1823  * The following functions are used for TLS allocation/deallocation in static
1824  * binaries on FreeBSD.  The primary difference between these and i[mcd]alloc()
1825  * is that these avoid accessing TLS variables.
1826  */
1827 
1828 static void *
1829 a0alloc(size_t size, bool zero)
1830 {
1831 
1832 	if (malloc_init())
1833 		return (NULL);
1834 
1835 	if (size == 0)
1836 		size = 1;
1837 
1838 	if (size <= arena_maxclass)
1839 		return (arena_malloc(arenas[0], size, zero, false));
1840 	else
1841 		return (huge_malloc(size, zero));
1842 }
1843 
1844 void *
1845 a0malloc(size_t size)
1846 {
1847 
1848 	return (a0alloc(size, false));
1849 }
1850 
1851 void *
1852 a0calloc(size_t num, size_t size)
1853 {
1854 
1855 	return (a0alloc(num * size, true));
1856 }
1857 
1858 void
1859 a0free(void *ptr)
1860 {
1861 	arena_chunk_t *chunk;
1862 
1863 	if (ptr == NULL)
1864 		return;
1865 
1866 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1867 	if (chunk != ptr)
1868 		arena_dalloc(chunk->arena, chunk, ptr, false);
1869 	else
1870 		huge_dalloc(ptr, true);
1871 }
1872 
1873 /******************************************************************************/
1874