xref: /freebsd/contrib/jemalloc/src/jemalloc.c (revision 88ad2f8dccdd7a04f3687ff79efd55e68912a611)
1a4bd5210SJason Evans #define	JEMALLOC_C_
2a4bd5210SJason Evans #include "jemalloc/internal/jemalloc_internal.h"
3a4bd5210SJason Evans 
4a4bd5210SJason Evans /******************************************************************************/
5a4bd5210SJason Evans /* Data. */
6a4bd5210SJason Evans 
7a4bd5210SJason Evans malloc_tsd_data(, arenas, arena_t *, NULL)
8a4bd5210SJason Evans malloc_tsd_data(, thread_allocated, thread_allocated_t,
9a4bd5210SJason Evans     THREAD_ALLOCATED_INITIALIZER)
10a4bd5210SJason Evans 
114fdb8d2aSDimitry Andric /* Work around <http://llvm.org/bugs/show_bug.cgi?id=12623>: */
124fdb8d2aSDimitry Andric const char	*__malloc_options_1_0 = NULL;
13a4bd5210SJason Evans __sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0);
14a4bd5210SJason Evans 
15a4bd5210SJason Evans /* Runtime configuration options. */
16e722f8f8SJason Evans const char	*je_malloc_conf;
17*88ad2f8dSJason Evans bool	opt_abort =
18a4bd5210SJason Evans #ifdef JEMALLOC_DEBUG
19*88ad2f8dSJason Evans     true
20a4bd5210SJason Evans #else
21*88ad2f8dSJason Evans     false
22a4bd5210SJason Evans #endif
23*88ad2f8dSJason Evans     ;
24*88ad2f8dSJason Evans bool	opt_junk =
25*88ad2f8dSJason Evans #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
26*88ad2f8dSJason Evans     true
27a4bd5210SJason Evans #else
28*88ad2f8dSJason Evans     false
29a4bd5210SJason Evans #endif
30*88ad2f8dSJason Evans     ;
31a4bd5210SJason Evans size_t	opt_quarantine = ZU(0);
32a4bd5210SJason Evans bool	opt_redzone = false;
33a4bd5210SJason Evans bool	opt_utrace = false;
34a4bd5210SJason Evans bool	opt_valgrind = false;
35a4bd5210SJason Evans bool	opt_xmalloc = false;
36a4bd5210SJason Evans bool	opt_zero = false;
37a4bd5210SJason Evans size_t	opt_narenas = 0;
38a4bd5210SJason Evans 
39a4bd5210SJason Evans unsigned	ncpus;
40a4bd5210SJason Evans 
41a4bd5210SJason Evans malloc_mutex_t		arenas_lock;
42a4bd5210SJason Evans arena_t			**arenas;
4382872ac0SJason Evans unsigned		narenas_total;
4482872ac0SJason Evans unsigned		narenas_auto;
45a4bd5210SJason Evans 
46a4bd5210SJason Evans /* Set to true once the allocator has been initialized. */
47a4bd5210SJason Evans static bool		malloc_initialized = false;
48a4bd5210SJason Evans 
49a4bd5210SJason Evans #ifdef JEMALLOC_THREADED_INIT
50a4bd5210SJason Evans /* Used to let the initializing thread recursively allocate. */
51a4bd5210SJason Evans #  define NO_INITIALIZER	((unsigned long)0)
52a4bd5210SJason Evans #  define INITIALIZER		pthread_self()
53a4bd5210SJason Evans #  define IS_INITIALIZER	(malloc_initializer == pthread_self())
54a4bd5210SJason Evans static pthread_t		malloc_initializer = NO_INITIALIZER;
55a4bd5210SJason Evans #else
56a4bd5210SJason Evans #  define NO_INITIALIZER	false
57a4bd5210SJason Evans #  define INITIALIZER		true
58a4bd5210SJason Evans #  define IS_INITIALIZER	malloc_initializer
59a4bd5210SJason Evans static bool			malloc_initializer = NO_INITIALIZER;
60a4bd5210SJason Evans #endif
61a4bd5210SJason Evans 
62a4bd5210SJason Evans /* Used to avoid initialization races. */
63e722f8f8SJason Evans #ifdef _WIN32
64e722f8f8SJason Evans static malloc_mutex_t	init_lock;
65e722f8f8SJason Evans 
66e722f8f8SJason Evans JEMALLOC_ATTR(constructor)
67e722f8f8SJason Evans static void WINAPI
68e722f8f8SJason Evans _init_init_lock(void)
69e722f8f8SJason Evans {
70e722f8f8SJason Evans 
71e722f8f8SJason Evans 	malloc_mutex_init(&init_lock);
72e722f8f8SJason Evans }
73e722f8f8SJason Evans 
74e722f8f8SJason Evans #ifdef _MSC_VER
75e722f8f8SJason Evans #  pragma section(".CRT$XCU", read)
76e722f8f8SJason Evans JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
77e722f8f8SJason Evans static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
78e722f8f8SJason Evans #endif
79e722f8f8SJason Evans 
80e722f8f8SJason Evans #else
81a4bd5210SJason Evans static malloc_mutex_t	init_lock = MALLOC_MUTEX_INITIALIZER;
82e722f8f8SJason Evans #endif
83a4bd5210SJason Evans 
84a4bd5210SJason Evans typedef struct {
85a4bd5210SJason Evans 	void	*p;	/* Input pointer (as in realloc(p, s)). */
86a4bd5210SJason Evans 	size_t	s;	/* Request size. */
87a4bd5210SJason Evans 	void	*r;	/* Result pointer. */
88a4bd5210SJason Evans } malloc_utrace_t;
89a4bd5210SJason Evans 
90a4bd5210SJason Evans #ifdef JEMALLOC_UTRACE
91a4bd5210SJason Evans #  define UTRACE(a, b, c) do {						\
92a4bd5210SJason Evans 	if (opt_utrace) {						\
93*88ad2f8dSJason Evans 		int utrace_serrno = errno;				\
94a4bd5210SJason Evans 		malloc_utrace_t ut;					\
95a4bd5210SJason Evans 		ut.p = (a);						\
96a4bd5210SJason Evans 		ut.s = (b);						\
97a4bd5210SJason Evans 		ut.r = (c);						\
98a4bd5210SJason Evans 		utrace(&ut, sizeof(ut));				\
99*88ad2f8dSJason Evans 		errno = utrace_serrno;					\
100a4bd5210SJason Evans 	}								\
101a4bd5210SJason Evans } while (0)
102a4bd5210SJason Evans #else
103a4bd5210SJason Evans #  define UTRACE(a, b, c)
104a4bd5210SJason Evans #endif
105a4bd5210SJason Evans 
106a4bd5210SJason Evans /******************************************************************************/
107a4bd5210SJason Evans /* Function prototypes for non-inline static functions. */
108a4bd5210SJason Evans 
109a4bd5210SJason Evans static void	stats_print_atexit(void);
110a4bd5210SJason Evans static unsigned	malloc_ncpus(void);
111a4bd5210SJason Evans static bool	malloc_conf_next(char const **opts_p, char const **k_p,
112a4bd5210SJason Evans     size_t *klen_p, char const **v_p, size_t *vlen_p);
113a4bd5210SJason Evans static void	malloc_conf_error(const char *msg, const char *k, size_t klen,
114a4bd5210SJason Evans     const char *v, size_t vlen);
115a4bd5210SJason Evans static void	malloc_conf_init(void);
116a4bd5210SJason Evans static bool	malloc_init_hard(void);
117a4bd5210SJason Evans static int	imemalign(void **memptr, size_t alignment, size_t size,
118a4bd5210SJason Evans     size_t min_alignment);
119a4bd5210SJason Evans 
120a4bd5210SJason Evans /******************************************************************************/
121a4bd5210SJason Evans /*
122a4bd5210SJason Evans  * Begin miscellaneous support functions.
123a4bd5210SJason Evans  */
124a4bd5210SJason Evans 
125a4bd5210SJason Evans /* Create a new arena and insert it into the arenas array at index ind. */
126a4bd5210SJason Evans arena_t *
127a4bd5210SJason Evans arenas_extend(unsigned ind)
128a4bd5210SJason Evans {
129a4bd5210SJason Evans 	arena_t *ret;
130a4bd5210SJason Evans 
131a4bd5210SJason Evans 	ret = (arena_t *)base_alloc(sizeof(arena_t));
132a4bd5210SJason Evans 	if (ret != NULL && arena_new(ret, ind) == false) {
133a4bd5210SJason Evans 		arenas[ind] = ret;
134a4bd5210SJason Evans 		return (ret);
135a4bd5210SJason Evans 	}
136a4bd5210SJason Evans 	/* Only reached if there is an OOM error. */
137a4bd5210SJason Evans 
138a4bd5210SJason Evans 	/*
139a4bd5210SJason Evans 	 * OOM here is quite inconvenient to propagate, since dealing with it
140a4bd5210SJason Evans 	 * would require a check for failure in the fast path.  Instead, punt
141a4bd5210SJason Evans 	 * by using arenas[0].  In practice, this is an extremely unlikely
142a4bd5210SJason Evans 	 * failure.
143a4bd5210SJason Evans 	 */
144a4bd5210SJason Evans 	malloc_write("<jemalloc>: Error initializing arena\n");
145a4bd5210SJason Evans 	if (opt_abort)
146a4bd5210SJason Evans 		abort();
147a4bd5210SJason Evans 
148a4bd5210SJason Evans 	return (arenas[0]);
149a4bd5210SJason Evans }
150a4bd5210SJason Evans 
151a4bd5210SJason Evans /* Slow path, called only by choose_arena(). */
152a4bd5210SJason Evans arena_t *
153a4bd5210SJason Evans choose_arena_hard(void)
154a4bd5210SJason Evans {
155a4bd5210SJason Evans 	arena_t *ret;
156a4bd5210SJason Evans 
15782872ac0SJason Evans 	if (narenas_auto > 1) {
158a4bd5210SJason Evans 		unsigned i, choose, first_null;
159a4bd5210SJason Evans 
160a4bd5210SJason Evans 		choose = 0;
16182872ac0SJason Evans 		first_null = narenas_auto;
162a4bd5210SJason Evans 		malloc_mutex_lock(&arenas_lock);
163a4bd5210SJason Evans 		assert(arenas[0] != NULL);
16482872ac0SJason Evans 		for (i = 1; i < narenas_auto; i++) {
165a4bd5210SJason Evans 			if (arenas[i] != NULL) {
166a4bd5210SJason Evans 				/*
167a4bd5210SJason Evans 				 * Choose the first arena that has the lowest
168a4bd5210SJason Evans 				 * number of threads assigned to it.
169a4bd5210SJason Evans 				 */
170a4bd5210SJason Evans 				if (arenas[i]->nthreads <
171a4bd5210SJason Evans 				    arenas[choose]->nthreads)
172a4bd5210SJason Evans 					choose = i;
17382872ac0SJason Evans 			} else if (first_null == narenas_auto) {
174a4bd5210SJason Evans 				/*
175a4bd5210SJason Evans 				 * Record the index of the first uninitialized
176a4bd5210SJason Evans 				 * arena, in case all extant arenas are in use.
177a4bd5210SJason Evans 				 *
178a4bd5210SJason Evans 				 * NB: It is possible for there to be
179a4bd5210SJason Evans 				 * discontinuities in terms of initialized
180a4bd5210SJason Evans 				 * versus uninitialized arenas, due to the
181a4bd5210SJason Evans 				 * "thread.arena" mallctl.
182a4bd5210SJason Evans 				 */
183a4bd5210SJason Evans 				first_null = i;
184a4bd5210SJason Evans 			}
185a4bd5210SJason Evans 		}
186a4bd5210SJason Evans 
18782872ac0SJason Evans 		if (arenas[choose]->nthreads == 0
18882872ac0SJason Evans 		    || first_null == narenas_auto) {
189a4bd5210SJason Evans 			/*
190a4bd5210SJason Evans 			 * Use an unloaded arena, or the least loaded arena if
191a4bd5210SJason Evans 			 * all arenas are already initialized.
192a4bd5210SJason Evans 			 */
193a4bd5210SJason Evans 			ret = arenas[choose];
194a4bd5210SJason Evans 		} else {
195a4bd5210SJason Evans 			/* Initialize a new arena. */
196a4bd5210SJason Evans 			ret = arenas_extend(first_null);
197a4bd5210SJason Evans 		}
198a4bd5210SJason Evans 		ret->nthreads++;
199a4bd5210SJason Evans 		malloc_mutex_unlock(&arenas_lock);
200a4bd5210SJason Evans 	} else {
201a4bd5210SJason Evans 		ret = arenas[0];
202a4bd5210SJason Evans 		malloc_mutex_lock(&arenas_lock);
203a4bd5210SJason Evans 		ret->nthreads++;
204a4bd5210SJason Evans 		malloc_mutex_unlock(&arenas_lock);
205a4bd5210SJason Evans 	}
206a4bd5210SJason Evans 
207a4bd5210SJason Evans 	arenas_tsd_set(&ret);
208a4bd5210SJason Evans 
209a4bd5210SJason Evans 	return (ret);
210a4bd5210SJason Evans }
211a4bd5210SJason Evans 
212a4bd5210SJason Evans static void
213a4bd5210SJason Evans stats_print_atexit(void)
214a4bd5210SJason Evans {
215a4bd5210SJason Evans 
216a4bd5210SJason Evans 	if (config_tcache && config_stats) {
21782872ac0SJason Evans 		unsigned narenas, i;
218a4bd5210SJason Evans 
219a4bd5210SJason Evans 		/*
220a4bd5210SJason Evans 		 * Merge stats from extant threads.  This is racy, since
221a4bd5210SJason Evans 		 * individual threads do not lock when recording tcache stats
222a4bd5210SJason Evans 		 * events.  As a consequence, the final stats may be slightly
223a4bd5210SJason Evans 		 * out of date by the time they are reported, if other threads
224a4bd5210SJason Evans 		 * continue to allocate.
225a4bd5210SJason Evans 		 */
22682872ac0SJason Evans 		for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
227a4bd5210SJason Evans 			arena_t *arena = arenas[i];
228a4bd5210SJason Evans 			if (arena != NULL) {
229a4bd5210SJason Evans 				tcache_t *tcache;
230a4bd5210SJason Evans 
231a4bd5210SJason Evans 				/*
232a4bd5210SJason Evans 				 * tcache_stats_merge() locks bins, so if any
233a4bd5210SJason Evans 				 * code is introduced that acquires both arena
234a4bd5210SJason Evans 				 * and bin locks in the opposite order,
235a4bd5210SJason Evans 				 * deadlocks may result.
236a4bd5210SJason Evans 				 */
237a4bd5210SJason Evans 				malloc_mutex_lock(&arena->lock);
238a4bd5210SJason Evans 				ql_foreach(tcache, &arena->tcache_ql, link) {
239a4bd5210SJason Evans 					tcache_stats_merge(tcache, arena);
240a4bd5210SJason Evans 				}
241a4bd5210SJason Evans 				malloc_mutex_unlock(&arena->lock);
242a4bd5210SJason Evans 			}
243a4bd5210SJason Evans 		}
244a4bd5210SJason Evans 	}
245a4bd5210SJason Evans 	je_malloc_stats_print(NULL, NULL, NULL);
246a4bd5210SJason Evans }
247a4bd5210SJason Evans 
248a4bd5210SJason Evans /*
249a4bd5210SJason Evans  * End miscellaneous support functions.
250a4bd5210SJason Evans  */
251a4bd5210SJason Evans /******************************************************************************/
252a4bd5210SJason Evans /*
253a4bd5210SJason Evans  * Begin initialization functions.
254a4bd5210SJason Evans  */
255a4bd5210SJason Evans 
256a4bd5210SJason Evans static unsigned
257a4bd5210SJason Evans malloc_ncpus(void)
258a4bd5210SJason Evans {
259a4bd5210SJason Evans 	unsigned ret;
260a4bd5210SJason Evans 	long result;
261a4bd5210SJason Evans 
262e722f8f8SJason Evans #ifdef _WIN32
263e722f8f8SJason Evans 	SYSTEM_INFO si;
264e722f8f8SJason Evans 	GetSystemInfo(&si);
265e722f8f8SJason Evans 	result = si.dwNumberOfProcessors;
266e722f8f8SJason Evans #else
267a4bd5210SJason Evans 	result = sysconf(_SC_NPROCESSORS_ONLN);
26882872ac0SJason Evans #endif
269a4bd5210SJason Evans 	if (result == -1) {
270a4bd5210SJason Evans 		/* Error. */
271a4bd5210SJason Evans 		ret = 1;
27282872ac0SJason Evans 	}  else {
273a4bd5210SJason Evans     ret = (unsigned)result;
27482872ac0SJason Evans   }
275a4bd5210SJason Evans 
276a4bd5210SJason Evans 	return (ret);
277a4bd5210SJason Evans }
278a4bd5210SJason Evans 
279a4bd5210SJason Evans void
280a4bd5210SJason Evans arenas_cleanup(void *arg)
281a4bd5210SJason Evans {
282a4bd5210SJason Evans 	arena_t *arena = *(arena_t **)arg;
283a4bd5210SJason Evans 
284a4bd5210SJason Evans 	malloc_mutex_lock(&arenas_lock);
285a4bd5210SJason Evans 	arena->nthreads--;
286a4bd5210SJason Evans 	malloc_mutex_unlock(&arenas_lock);
287a4bd5210SJason Evans }
288a4bd5210SJason Evans 
289*88ad2f8dSJason Evans static JEMALLOC_ATTR(always_inline) bool
290a4bd5210SJason Evans malloc_init(void)
291a4bd5210SJason Evans {
292a4bd5210SJason Evans 
293a4bd5210SJason Evans 	if (malloc_initialized == false)
294a4bd5210SJason Evans 		return (malloc_init_hard());
295a4bd5210SJason Evans 
296a4bd5210SJason Evans 	return (false);
297a4bd5210SJason Evans }
298a4bd5210SJason Evans 
299a4bd5210SJason Evans static bool
300a4bd5210SJason Evans malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
301a4bd5210SJason Evans     char const **v_p, size_t *vlen_p)
302a4bd5210SJason Evans {
303a4bd5210SJason Evans 	bool accept;
304a4bd5210SJason Evans 	const char *opts = *opts_p;
305a4bd5210SJason Evans 
306a4bd5210SJason Evans 	*k_p = opts;
307a4bd5210SJason Evans 
308a4bd5210SJason Evans 	for (accept = false; accept == false;) {
309a4bd5210SJason Evans 		switch (*opts) {
310a4bd5210SJason Evans 		case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
311a4bd5210SJason Evans 		case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
312a4bd5210SJason Evans 		case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
313a4bd5210SJason Evans 		case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
314a4bd5210SJason Evans 		case 'Y': case 'Z':
315a4bd5210SJason Evans 		case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
316a4bd5210SJason Evans 		case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
317a4bd5210SJason Evans 		case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
318a4bd5210SJason Evans 		case 's': case 't': case 'u': case 'v': case 'w': case 'x':
319a4bd5210SJason Evans 		case 'y': case 'z':
320a4bd5210SJason Evans 		case '0': case '1': case '2': case '3': case '4': case '5':
321a4bd5210SJason Evans 		case '6': case '7': case '8': case '9':
322a4bd5210SJason Evans 		case '_':
323a4bd5210SJason Evans 			opts++;
324a4bd5210SJason Evans 			break;
325a4bd5210SJason Evans 		case ':':
326a4bd5210SJason Evans 			opts++;
327a4bd5210SJason Evans 			*klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
328a4bd5210SJason Evans 			*v_p = opts;
329a4bd5210SJason Evans 			accept = true;
330a4bd5210SJason Evans 			break;
331a4bd5210SJason Evans 		case '\0':
332a4bd5210SJason Evans 			if (opts != *opts_p) {
333a4bd5210SJason Evans 				malloc_write("<jemalloc>: Conf string ends "
334a4bd5210SJason Evans 				    "with key\n");
335a4bd5210SJason Evans 			}
336a4bd5210SJason Evans 			return (true);
337a4bd5210SJason Evans 		default:
338a4bd5210SJason Evans 			malloc_write("<jemalloc>: Malformed conf string\n");
339a4bd5210SJason Evans 			return (true);
340a4bd5210SJason Evans 		}
341a4bd5210SJason Evans 	}
342a4bd5210SJason Evans 
343a4bd5210SJason Evans 	for (accept = false; accept == false;) {
344a4bd5210SJason Evans 		switch (*opts) {
345a4bd5210SJason Evans 		case ',':
346a4bd5210SJason Evans 			opts++;
347a4bd5210SJason Evans 			/*
348a4bd5210SJason Evans 			 * Look ahead one character here, because the next time
349a4bd5210SJason Evans 			 * this function is called, it will assume that end of
350a4bd5210SJason Evans 			 * input has been cleanly reached if no input remains,
351a4bd5210SJason Evans 			 * but we have optimistically already consumed the
352a4bd5210SJason Evans 			 * comma if one exists.
353a4bd5210SJason Evans 			 */
354a4bd5210SJason Evans 			if (*opts == '\0') {
355a4bd5210SJason Evans 				malloc_write("<jemalloc>: Conf string ends "
356a4bd5210SJason Evans 				    "with comma\n");
357a4bd5210SJason Evans 			}
358a4bd5210SJason Evans 			*vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
359a4bd5210SJason Evans 			accept = true;
360a4bd5210SJason Evans 			break;
361a4bd5210SJason Evans 		case '\0':
362a4bd5210SJason Evans 			*vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
363a4bd5210SJason Evans 			accept = true;
364a4bd5210SJason Evans 			break;
365a4bd5210SJason Evans 		default:
366a4bd5210SJason Evans 			opts++;
367a4bd5210SJason Evans 			break;
368a4bd5210SJason Evans 		}
369a4bd5210SJason Evans 	}
370a4bd5210SJason Evans 
371a4bd5210SJason Evans 	*opts_p = opts;
372a4bd5210SJason Evans 	return (false);
373a4bd5210SJason Evans }
374a4bd5210SJason Evans 
375a4bd5210SJason Evans static void
376a4bd5210SJason Evans malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
377a4bd5210SJason Evans     size_t vlen)
378a4bd5210SJason Evans {
379a4bd5210SJason Evans 
380a4bd5210SJason Evans 	malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
381a4bd5210SJason Evans 	    (int)vlen, v);
382a4bd5210SJason Evans }
383a4bd5210SJason Evans 
384a4bd5210SJason Evans static void
385a4bd5210SJason Evans malloc_conf_init(void)
386a4bd5210SJason Evans {
387a4bd5210SJason Evans 	unsigned i;
388a4bd5210SJason Evans 	char buf[PATH_MAX + 1];
389a4bd5210SJason Evans 	const char *opts, *k, *v;
390a4bd5210SJason Evans 	size_t klen, vlen;
391a4bd5210SJason Evans 
39282872ac0SJason Evans 	/*
39382872ac0SJason Evans 	 * Automatically configure valgrind before processing options.  The
39482872ac0SJason Evans 	 * valgrind option remains in jemalloc 3.x for compatibility reasons.
39582872ac0SJason Evans 	 */
39682872ac0SJason Evans 	if (config_valgrind) {
39782872ac0SJason Evans 		opt_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
39882872ac0SJason Evans 		if (config_fill && opt_valgrind) {
39982872ac0SJason Evans 			opt_junk = false;
40082872ac0SJason Evans 			assert(opt_zero == false);
40182872ac0SJason Evans 			opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
40282872ac0SJason Evans 			opt_redzone = true;
40382872ac0SJason Evans 		}
40482872ac0SJason Evans 		if (config_tcache && opt_valgrind)
40582872ac0SJason Evans 			opt_tcache = false;
40682872ac0SJason Evans 	}
40782872ac0SJason Evans 
408a4bd5210SJason Evans 	for (i = 0; i < 3; i++) {
409a4bd5210SJason Evans 		/* Get runtime configuration. */
410a4bd5210SJason Evans 		switch (i) {
411a4bd5210SJason Evans 		case 0:
412a4bd5210SJason Evans 			if (je_malloc_conf != NULL) {
413a4bd5210SJason Evans 				/*
414a4bd5210SJason Evans 				 * Use options that were compiled into the
415a4bd5210SJason Evans 				 * program.
416a4bd5210SJason Evans 				 */
417a4bd5210SJason Evans 				opts = je_malloc_conf;
418a4bd5210SJason Evans 			} else {
419a4bd5210SJason Evans 				/* No configuration specified. */
420a4bd5210SJason Evans 				buf[0] = '\0';
421a4bd5210SJason Evans 				opts = buf;
422a4bd5210SJason Evans 			}
423a4bd5210SJason Evans 			break;
424a4bd5210SJason Evans 		case 1: {
425e722f8f8SJason Evans #ifndef _WIN32
426a4bd5210SJason Evans 			int linklen;
427a4bd5210SJason Evans 			const char *linkname =
428a4bd5210SJason Evans #  ifdef JEMALLOC_PREFIX
429a4bd5210SJason Evans 			    "/etc/"JEMALLOC_PREFIX"malloc.conf"
430a4bd5210SJason Evans #  else
431a4bd5210SJason Evans 			    "/etc/malloc.conf"
432a4bd5210SJason Evans #  endif
433a4bd5210SJason Evans 			    ;
434a4bd5210SJason Evans 
435a4bd5210SJason Evans 			if ((linklen = readlink(linkname, buf,
436a4bd5210SJason Evans 			    sizeof(buf) - 1)) != -1) {
437a4bd5210SJason Evans 				/*
438a4bd5210SJason Evans 				 * Use the contents of the "/etc/malloc.conf"
439a4bd5210SJason Evans 				 * symbolic link's name.
440a4bd5210SJason Evans 				 */
441a4bd5210SJason Evans 				buf[linklen] = '\0';
442a4bd5210SJason Evans 				opts = buf;
443e722f8f8SJason Evans 			} else
444e722f8f8SJason Evans #endif
445e722f8f8SJason Evans 			{
446a4bd5210SJason Evans 				/* No configuration specified. */
447a4bd5210SJason Evans 				buf[0] = '\0';
448a4bd5210SJason Evans 				opts = buf;
449a4bd5210SJason Evans 			}
450a4bd5210SJason Evans 			break;
451a4bd5210SJason Evans 		} case 2: {
452a4bd5210SJason Evans 			const char *envname =
453a4bd5210SJason Evans #ifdef JEMALLOC_PREFIX
454a4bd5210SJason Evans 			    JEMALLOC_CPREFIX"MALLOC_CONF"
455a4bd5210SJason Evans #else
456a4bd5210SJason Evans 			    "MALLOC_CONF"
457a4bd5210SJason Evans #endif
458a4bd5210SJason Evans 			    ;
459a4bd5210SJason Evans 
460a4bd5210SJason Evans 			if (issetugid() == 0 && (opts = getenv(envname)) !=
461a4bd5210SJason Evans 			    NULL) {
462a4bd5210SJason Evans 				/*
463a4bd5210SJason Evans 				 * Do nothing; opts is already initialized to
464a4bd5210SJason Evans 				 * the value of the MALLOC_CONF environment
465a4bd5210SJason Evans 				 * variable.
466a4bd5210SJason Evans 				 */
467a4bd5210SJason Evans 			} else {
468a4bd5210SJason Evans 				/* No configuration specified. */
469a4bd5210SJason Evans 				buf[0] = '\0';
470a4bd5210SJason Evans 				opts = buf;
471a4bd5210SJason Evans 			}
472a4bd5210SJason Evans 			break;
473a4bd5210SJason Evans 		} default:
474a4bd5210SJason Evans 			/* NOTREACHED */
475a4bd5210SJason Evans 			assert(false);
476a4bd5210SJason Evans 			buf[0] = '\0';
477a4bd5210SJason Evans 			opts = buf;
478a4bd5210SJason Evans 		}
479a4bd5210SJason Evans 
480a4bd5210SJason Evans 		while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
481a4bd5210SJason Evans 		    &vlen) == false) {
482*88ad2f8dSJason Evans #define	CONF_HANDLE_BOOL(o, n)						\
4838ed34ab0SJason Evans 			if (sizeof(n)-1 == klen && strncmp(n, k,	\
484a4bd5210SJason Evans 			    klen) == 0) {				\
485a4bd5210SJason Evans 				if (strncmp("true", v, vlen) == 0 &&	\
486a4bd5210SJason Evans 				    vlen == sizeof("true")-1)		\
487a4bd5210SJason Evans 					o = true;			\
488a4bd5210SJason Evans 				else if (strncmp("false", v, vlen) ==	\
489a4bd5210SJason Evans 				    0 && vlen == sizeof("false")-1)	\
490a4bd5210SJason Evans 					o = false;			\
491a4bd5210SJason Evans 				else {					\
492a4bd5210SJason Evans 					malloc_conf_error(		\
493a4bd5210SJason Evans 					    "Invalid conf value",	\
494a4bd5210SJason Evans 					    k, klen, v, vlen);		\
495a4bd5210SJason Evans 				}					\
496a4bd5210SJason Evans 				continue;				\
497a4bd5210SJason Evans 			}
498*88ad2f8dSJason Evans #define	CONF_HANDLE_SIZE_T(o, n, min, max, clip)			\
4998ed34ab0SJason Evans 			if (sizeof(n)-1 == klen && strncmp(n, k,	\
500a4bd5210SJason Evans 			    klen) == 0) {				\
501a4bd5210SJason Evans 				uintmax_t um;				\
502a4bd5210SJason Evans 				char *end;				\
503a4bd5210SJason Evans 									\
504e722f8f8SJason Evans 				set_errno(0);				\
505a4bd5210SJason Evans 				um = malloc_strtoumax(v, &end, 0);	\
506e722f8f8SJason Evans 				if (get_errno() != 0 || (uintptr_t)end -\
507a4bd5210SJason Evans 				    (uintptr_t)v != vlen) {		\
508a4bd5210SJason Evans 					malloc_conf_error(		\
509a4bd5210SJason Evans 					    "Invalid conf value",	\
510a4bd5210SJason Evans 					    k, klen, v, vlen);		\
511*88ad2f8dSJason Evans 				} else if (clip) {			\
512*88ad2f8dSJason Evans 					if (um < min)			\
513*88ad2f8dSJason Evans 						o = min;		\
514*88ad2f8dSJason Evans 					else if (um > max)		\
515*88ad2f8dSJason Evans 						o = max;		\
516*88ad2f8dSJason Evans 					else				\
517*88ad2f8dSJason Evans 						o = um;			\
518*88ad2f8dSJason Evans 				} else {				\
519*88ad2f8dSJason Evans 					if (um < min || um > max) {	\
520a4bd5210SJason Evans 						malloc_conf_error(	\
521*88ad2f8dSJason Evans 						    "Out-of-range "	\
522*88ad2f8dSJason Evans 						    "conf value",	\
523a4bd5210SJason Evans 						    k, klen, v, vlen);	\
524a4bd5210SJason Evans 					} else				\
525a4bd5210SJason Evans 						o = um;			\
526*88ad2f8dSJason Evans 				}					\
527a4bd5210SJason Evans 				continue;				\
528a4bd5210SJason Evans 			}
529a4bd5210SJason Evans #define	CONF_HANDLE_SSIZE_T(o, n, min, max)				\
5308ed34ab0SJason Evans 			if (sizeof(n)-1 == klen && strncmp(n, k,	\
531a4bd5210SJason Evans 			    klen) == 0) {				\
532a4bd5210SJason Evans 				long l;					\
533a4bd5210SJason Evans 				char *end;				\
534a4bd5210SJason Evans 									\
535e722f8f8SJason Evans 				set_errno(0);				\
536a4bd5210SJason Evans 				l = strtol(v, &end, 0);			\
537e722f8f8SJason Evans 				if (get_errno() != 0 || (uintptr_t)end -\
538a4bd5210SJason Evans 				    (uintptr_t)v != vlen) {		\
539a4bd5210SJason Evans 					malloc_conf_error(		\
540a4bd5210SJason Evans 					    "Invalid conf value",	\
541a4bd5210SJason Evans 					    k, klen, v, vlen);		\
542a4bd5210SJason Evans 				} else if (l < (ssize_t)min || l >	\
543a4bd5210SJason Evans 				    (ssize_t)max) {			\
544a4bd5210SJason Evans 					malloc_conf_error(		\
545a4bd5210SJason Evans 					    "Out-of-range conf value",	\
546a4bd5210SJason Evans 					    k, klen, v, vlen);		\
547a4bd5210SJason Evans 				} else					\
548a4bd5210SJason Evans 					o = l;				\
549a4bd5210SJason Evans 				continue;				\
550a4bd5210SJason Evans 			}
551a4bd5210SJason Evans #define	CONF_HANDLE_CHAR_P(o, n, d)					\
5528ed34ab0SJason Evans 			if (sizeof(n)-1 == klen && strncmp(n, k,	\
553a4bd5210SJason Evans 			    klen) == 0) {				\
554a4bd5210SJason Evans 				size_t cpylen = (vlen <=		\
555a4bd5210SJason Evans 				    sizeof(o)-1) ? vlen :		\
556a4bd5210SJason Evans 				    sizeof(o)-1;			\
557a4bd5210SJason Evans 				strncpy(o, v, cpylen);			\
558a4bd5210SJason Evans 				o[cpylen] = '\0';			\
559a4bd5210SJason Evans 				continue;				\
560a4bd5210SJason Evans 			}
561a4bd5210SJason Evans 
5628ed34ab0SJason Evans 			CONF_HANDLE_BOOL(opt_abort, "abort")
563a4bd5210SJason Evans 			/*
564a4bd5210SJason Evans 			 * Chunks always require at least one header page, plus
565a4bd5210SJason Evans 			 * one data page in the absence of redzones, or three
566a4bd5210SJason Evans 			 * pages in the presence of redzones.  In order to
567a4bd5210SJason Evans 			 * simplify options processing, fix the limit based on
568a4bd5210SJason Evans 			 * config_fill.
569a4bd5210SJason Evans 			 */
5708ed34ab0SJason Evans 			CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
571*88ad2f8dSJason Evans 			    (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1,
572*88ad2f8dSJason Evans 			    true)
57382872ac0SJason Evans 			if (strncmp("dss", k, klen) == 0) {
57482872ac0SJason Evans 				int i;
57582872ac0SJason Evans 				bool match = false;
57682872ac0SJason Evans 				for (i = 0; i < dss_prec_limit; i++) {
57782872ac0SJason Evans 					if (strncmp(dss_prec_names[i], v, vlen)
57882872ac0SJason Evans 					    == 0) {
57982872ac0SJason Evans 						if (chunk_dss_prec_set(i)) {
58082872ac0SJason Evans 							malloc_conf_error(
58182872ac0SJason Evans 							    "Error setting dss",
58282872ac0SJason Evans 							    k, klen, v, vlen);
58382872ac0SJason Evans 						} else {
58482872ac0SJason Evans 							opt_dss =
58582872ac0SJason Evans 							    dss_prec_names[i];
58682872ac0SJason Evans 							match = true;
58782872ac0SJason Evans 							break;
58882872ac0SJason Evans 						}
58982872ac0SJason Evans 					}
59082872ac0SJason Evans 				}
59182872ac0SJason Evans 				if (match == false) {
59282872ac0SJason Evans 					malloc_conf_error("Invalid conf value",
59382872ac0SJason Evans 					    k, klen, v, vlen);
59482872ac0SJason Evans 				}
59582872ac0SJason Evans 				continue;
59682872ac0SJason Evans 			}
5978ed34ab0SJason Evans 			CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
598*88ad2f8dSJason Evans 			    SIZE_T_MAX, false)
5998ed34ab0SJason Evans 			CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
600a4bd5210SJason Evans 			    -1, (sizeof(size_t) << 3) - 1)
6018ed34ab0SJason Evans 			CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
602a4bd5210SJason Evans 			if (config_fill) {
6038ed34ab0SJason Evans 				CONF_HANDLE_BOOL(opt_junk, "junk")
6048ed34ab0SJason Evans 				CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
605*88ad2f8dSJason Evans 				    0, SIZE_T_MAX, false)
6068ed34ab0SJason Evans 				CONF_HANDLE_BOOL(opt_redzone, "redzone")
6078ed34ab0SJason Evans 				CONF_HANDLE_BOOL(opt_zero, "zero")
608a4bd5210SJason Evans 			}
609a4bd5210SJason Evans 			if (config_utrace) {
6108ed34ab0SJason Evans 				CONF_HANDLE_BOOL(opt_utrace, "utrace")
611a4bd5210SJason Evans 			}
612a4bd5210SJason Evans 			if (config_valgrind) {
61382872ac0SJason Evans 				CONF_HANDLE_BOOL(opt_valgrind, "valgrind")
614a4bd5210SJason Evans 			}
615a4bd5210SJason Evans 			if (config_xmalloc) {
6168ed34ab0SJason Evans 				CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
617a4bd5210SJason Evans 			}
618a4bd5210SJason Evans 			if (config_tcache) {
6198ed34ab0SJason Evans 				CONF_HANDLE_BOOL(opt_tcache, "tcache")
620a4bd5210SJason Evans 				CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
6218ed34ab0SJason Evans 				    "lg_tcache_max", -1,
622a4bd5210SJason Evans 				    (sizeof(size_t) << 3) - 1)
623a4bd5210SJason Evans 			}
624a4bd5210SJason Evans 			if (config_prof) {
6258ed34ab0SJason Evans 				CONF_HANDLE_BOOL(opt_prof, "prof")
6268ed34ab0SJason Evans 				CONF_HANDLE_CHAR_P(opt_prof_prefix,
6278ed34ab0SJason Evans 				    "prof_prefix", "jeprof")
6288ed34ab0SJason Evans 				CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
629a4bd5210SJason Evans 				CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
6308ed34ab0SJason Evans 				    "lg_prof_sample", 0,
631a4bd5210SJason Evans 				    (sizeof(uint64_t) << 3) - 1)
6328ed34ab0SJason Evans 				CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
633a4bd5210SJason Evans 				CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
6348ed34ab0SJason Evans 				    "lg_prof_interval", -1,
635a4bd5210SJason Evans 				    (sizeof(uint64_t) << 3) - 1)
6368ed34ab0SJason Evans 				CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
6378ed34ab0SJason Evans 				CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
6388ed34ab0SJason Evans 				CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
639a4bd5210SJason Evans 			}
640a4bd5210SJason Evans 			malloc_conf_error("Invalid conf pair", k, klen, v,
641a4bd5210SJason Evans 			    vlen);
642a4bd5210SJason Evans #undef CONF_HANDLE_BOOL
643a4bd5210SJason Evans #undef CONF_HANDLE_SIZE_T
644a4bd5210SJason Evans #undef CONF_HANDLE_SSIZE_T
645a4bd5210SJason Evans #undef CONF_HANDLE_CHAR_P
646a4bd5210SJason Evans 		}
647a4bd5210SJason Evans 	}
648a4bd5210SJason Evans }
649a4bd5210SJason Evans 
650a4bd5210SJason Evans static bool
651a4bd5210SJason Evans malloc_init_hard(void)
652a4bd5210SJason Evans {
653a4bd5210SJason Evans 	arena_t *init_arenas[1];
654a4bd5210SJason Evans 
655a4bd5210SJason Evans 	malloc_mutex_lock(&init_lock);
656a4bd5210SJason Evans 	if (malloc_initialized || IS_INITIALIZER) {
657a4bd5210SJason Evans 		/*
658a4bd5210SJason Evans 		 * Another thread initialized the allocator before this one
659a4bd5210SJason Evans 		 * acquired init_lock, or this thread is the initializing
660a4bd5210SJason Evans 		 * thread, and it is recursively allocating.
661a4bd5210SJason Evans 		 */
662a4bd5210SJason Evans 		malloc_mutex_unlock(&init_lock);
663a4bd5210SJason Evans 		return (false);
664a4bd5210SJason Evans 	}
665a4bd5210SJason Evans #ifdef JEMALLOC_THREADED_INIT
666a4bd5210SJason Evans 	if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) {
667a4bd5210SJason Evans 		/* Busy-wait until the initializing thread completes. */
668a4bd5210SJason Evans 		do {
669a4bd5210SJason Evans 			malloc_mutex_unlock(&init_lock);
670a4bd5210SJason Evans 			CPU_SPINWAIT;
671a4bd5210SJason Evans 			malloc_mutex_lock(&init_lock);
672a4bd5210SJason Evans 		} while (malloc_initialized == false);
673a4bd5210SJason Evans 		malloc_mutex_unlock(&init_lock);
674a4bd5210SJason Evans 		return (false);
675a4bd5210SJason Evans 	}
676a4bd5210SJason Evans #endif
677a4bd5210SJason Evans 	malloc_initializer = INITIALIZER;
678a4bd5210SJason Evans 
679a4bd5210SJason Evans 	malloc_tsd_boot();
680a4bd5210SJason Evans 	if (config_prof)
681a4bd5210SJason Evans 		prof_boot0();
682a4bd5210SJason Evans 
683a4bd5210SJason Evans 	malloc_conf_init();
684a4bd5210SJason Evans 
685e722f8f8SJason Evans #if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
686e722f8f8SJason Evans     && !defined(_WIN32))
687a4bd5210SJason Evans 	/* Register fork handlers. */
688a4bd5210SJason Evans 	if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
689a4bd5210SJason Evans 	    jemalloc_postfork_child) != 0) {
690a4bd5210SJason Evans 		malloc_write("<jemalloc>: Error in pthread_atfork()\n");
691a4bd5210SJason Evans 		if (opt_abort)
692a4bd5210SJason Evans 			abort();
693a4bd5210SJason Evans 	}
694a4bd5210SJason Evans #endif
695a4bd5210SJason Evans 
696a4bd5210SJason Evans 	if (opt_stats_print) {
697a4bd5210SJason Evans 		/* Print statistics at exit. */
698a4bd5210SJason Evans 		if (atexit(stats_print_atexit) != 0) {
699a4bd5210SJason Evans 			malloc_write("<jemalloc>: Error in atexit()\n");
700a4bd5210SJason Evans 			if (opt_abort)
701a4bd5210SJason Evans 				abort();
702a4bd5210SJason Evans 		}
703a4bd5210SJason Evans 	}
704a4bd5210SJason Evans 
705a4bd5210SJason Evans 	if (base_boot()) {
706a4bd5210SJason Evans 		malloc_mutex_unlock(&init_lock);
707a4bd5210SJason Evans 		return (true);
708a4bd5210SJason Evans 	}
709a4bd5210SJason Evans 
7104bcb1430SJason Evans 	if (chunk_boot()) {
711a4bd5210SJason Evans 		malloc_mutex_unlock(&init_lock);
712a4bd5210SJason Evans 		return (true);
713a4bd5210SJason Evans 	}
714a4bd5210SJason Evans 
715a4bd5210SJason Evans 	if (ctl_boot()) {
716a4bd5210SJason Evans 		malloc_mutex_unlock(&init_lock);
717a4bd5210SJason Evans 		return (true);
718a4bd5210SJason Evans 	}
719a4bd5210SJason Evans 
720a4bd5210SJason Evans 	if (config_prof)
721a4bd5210SJason Evans 		prof_boot1();
722a4bd5210SJason Evans 
723a4bd5210SJason Evans 	arena_boot();
724a4bd5210SJason Evans 
725a4bd5210SJason Evans 	if (config_tcache && tcache_boot0()) {
726a4bd5210SJason Evans 		malloc_mutex_unlock(&init_lock);
727a4bd5210SJason Evans 		return (true);
728a4bd5210SJason Evans 	}
729a4bd5210SJason Evans 
730a4bd5210SJason Evans 	if (huge_boot()) {
731a4bd5210SJason Evans 		malloc_mutex_unlock(&init_lock);
732a4bd5210SJason Evans 		return (true);
733a4bd5210SJason Evans 	}
734a4bd5210SJason Evans 
735a4bd5210SJason Evans 	if (malloc_mutex_init(&arenas_lock))
736a4bd5210SJason Evans 		return (true);
737a4bd5210SJason Evans 
738a4bd5210SJason Evans 	/*
739a4bd5210SJason Evans 	 * Create enough scaffolding to allow recursive allocation in
740a4bd5210SJason Evans 	 * malloc_ncpus().
741a4bd5210SJason Evans 	 */
74282872ac0SJason Evans 	narenas_total = narenas_auto = 1;
743a4bd5210SJason Evans 	arenas = init_arenas;
74482872ac0SJason Evans 	memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
745a4bd5210SJason Evans 
746a4bd5210SJason Evans 	/*
747a4bd5210SJason Evans 	 * Initialize one arena here.  The rest are lazily created in
748a4bd5210SJason Evans 	 * choose_arena_hard().
749a4bd5210SJason Evans 	 */
750a4bd5210SJason Evans 	arenas_extend(0);
751a4bd5210SJason Evans 	if (arenas[0] == NULL) {
752a4bd5210SJason Evans 		malloc_mutex_unlock(&init_lock);
753a4bd5210SJason Evans 		return (true);
754a4bd5210SJason Evans 	}
755a4bd5210SJason Evans 
756a4bd5210SJason Evans 	/* Initialize allocation counters before any allocations can occur. */
757a4bd5210SJason Evans 	if (config_stats && thread_allocated_tsd_boot()) {
758a4bd5210SJason Evans 		malloc_mutex_unlock(&init_lock);
759a4bd5210SJason Evans 		return (true);
760a4bd5210SJason Evans 	}
761a4bd5210SJason Evans 
762a4bd5210SJason Evans 	if (arenas_tsd_boot()) {
763a4bd5210SJason Evans 		malloc_mutex_unlock(&init_lock);
764a4bd5210SJason Evans 		return (true);
765a4bd5210SJason Evans 	}
766a4bd5210SJason Evans 
767a4bd5210SJason Evans 	if (config_tcache && tcache_boot1()) {
768a4bd5210SJason Evans 		malloc_mutex_unlock(&init_lock);
769a4bd5210SJason Evans 		return (true);
770a4bd5210SJason Evans 	}
771a4bd5210SJason Evans 
772a4bd5210SJason Evans 	if (config_fill && quarantine_boot()) {
773a4bd5210SJason Evans 		malloc_mutex_unlock(&init_lock);
774a4bd5210SJason Evans 		return (true);
775a4bd5210SJason Evans 	}
776a4bd5210SJason Evans 
777a4bd5210SJason Evans 	if (config_prof && prof_boot2()) {
778a4bd5210SJason Evans 		malloc_mutex_unlock(&init_lock);
779a4bd5210SJason Evans 		return (true);
780a4bd5210SJason Evans 	}
781a4bd5210SJason Evans 
782a4bd5210SJason Evans 	/* Get number of CPUs. */
783a4bd5210SJason Evans 	malloc_mutex_unlock(&init_lock);
784a4bd5210SJason Evans 	ncpus = malloc_ncpus();
785a4bd5210SJason Evans 	malloc_mutex_lock(&init_lock);
786a4bd5210SJason Evans 
787a4bd5210SJason Evans 	if (mutex_boot()) {
788a4bd5210SJason Evans 		malloc_mutex_unlock(&init_lock);
789a4bd5210SJason Evans 		return (true);
790a4bd5210SJason Evans 	}
791a4bd5210SJason Evans 
792a4bd5210SJason Evans 	if (opt_narenas == 0) {
793a4bd5210SJason Evans 		/*
794a4bd5210SJason Evans 		 * For SMP systems, create more than one arena per CPU by
795a4bd5210SJason Evans 		 * default.
796a4bd5210SJason Evans 		 */
797a4bd5210SJason Evans 		if (ncpus > 1)
798a4bd5210SJason Evans 			opt_narenas = ncpus << 2;
799a4bd5210SJason Evans 		else
800a4bd5210SJason Evans 			opt_narenas = 1;
801a4bd5210SJason Evans 	}
80282872ac0SJason Evans 	narenas_auto = opt_narenas;
803a4bd5210SJason Evans 	/*
804a4bd5210SJason Evans 	 * Make sure that the arenas array can be allocated.  In practice, this
805a4bd5210SJason Evans 	 * limit is enough to allow the allocator to function, but the ctl
806a4bd5210SJason Evans 	 * machinery will fail to allocate memory at far lower limits.
807a4bd5210SJason Evans 	 */
80882872ac0SJason Evans 	if (narenas_auto > chunksize / sizeof(arena_t *)) {
80982872ac0SJason Evans 		narenas_auto = chunksize / sizeof(arena_t *);
810a4bd5210SJason Evans 		malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
81182872ac0SJason Evans 		    narenas_auto);
812a4bd5210SJason Evans 	}
81382872ac0SJason Evans 	narenas_total = narenas_auto;
814a4bd5210SJason Evans 
815a4bd5210SJason Evans 	/* Allocate and initialize arenas. */
81682872ac0SJason Evans 	arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total);
817a4bd5210SJason Evans 	if (arenas == NULL) {
818a4bd5210SJason Evans 		malloc_mutex_unlock(&init_lock);
819a4bd5210SJason Evans 		return (true);
820a4bd5210SJason Evans 	}
821a4bd5210SJason Evans 	/*
822a4bd5210SJason Evans 	 * Zero the array.  In practice, this should always be pre-zeroed,
823a4bd5210SJason Evans 	 * since it was just mmap()ed, but let's be sure.
824a4bd5210SJason Evans 	 */
82582872ac0SJason Evans 	memset(arenas, 0, sizeof(arena_t *) * narenas_total);
826a4bd5210SJason Evans 	/* Copy the pointer to the one arena that was already initialized. */
827a4bd5210SJason Evans 	arenas[0] = init_arenas[0];
828a4bd5210SJason Evans 
829a4bd5210SJason Evans 	malloc_initialized = true;
830a4bd5210SJason Evans 	malloc_mutex_unlock(&init_lock);
831a4bd5210SJason Evans 	return (false);
832a4bd5210SJason Evans }
833a4bd5210SJason Evans 
834a4bd5210SJason Evans /*
835a4bd5210SJason Evans  * End initialization functions.
836a4bd5210SJason Evans  */
837a4bd5210SJason Evans /******************************************************************************/
838a4bd5210SJason Evans /*
839a4bd5210SJason Evans  * Begin malloc(3)-compatible functions.
840a4bd5210SJason Evans  */
841a4bd5210SJason Evans 
842a4bd5210SJason Evans void *
843a4bd5210SJason Evans je_malloc(size_t size)
844a4bd5210SJason Evans {
845a4bd5210SJason Evans 	void *ret;
846e722f8f8SJason Evans 	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
847a4bd5210SJason Evans 	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
848a4bd5210SJason Evans 
849a4bd5210SJason Evans 	if (malloc_init()) {
850a4bd5210SJason Evans 		ret = NULL;
851a4bd5210SJason Evans 		goto label_oom;
852a4bd5210SJason Evans 	}
853a4bd5210SJason Evans 
854a4bd5210SJason Evans 	if (size == 0)
855a4bd5210SJason Evans 		size = 1;
856a4bd5210SJason Evans 
857a4bd5210SJason Evans 	if (config_prof && opt_prof) {
858a4bd5210SJason Evans 		usize = s2u(size);
859a4bd5210SJason Evans 		PROF_ALLOC_PREP(1, usize, cnt);
860a4bd5210SJason Evans 		if (cnt == NULL) {
861a4bd5210SJason Evans 			ret = NULL;
862a4bd5210SJason Evans 			goto label_oom;
863a4bd5210SJason Evans 		}
864a4bd5210SJason Evans 		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
865a4bd5210SJason Evans 		    SMALL_MAXCLASS) {
866a4bd5210SJason Evans 			ret = imalloc(SMALL_MAXCLASS+1);
867a4bd5210SJason Evans 			if (ret != NULL)
868a4bd5210SJason Evans 				arena_prof_promoted(ret, usize);
869a4bd5210SJason Evans 		} else
870a4bd5210SJason Evans 			ret = imalloc(size);
871a4bd5210SJason Evans 	} else {
872a4bd5210SJason Evans 		if (config_stats || (config_valgrind && opt_valgrind))
873a4bd5210SJason Evans 			usize = s2u(size);
874a4bd5210SJason Evans 		ret = imalloc(size);
875a4bd5210SJason Evans 	}
876a4bd5210SJason Evans 
877a4bd5210SJason Evans label_oom:
878a4bd5210SJason Evans 	if (ret == NULL) {
879a4bd5210SJason Evans 		if (config_xmalloc && opt_xmalloc) {
880a4bd5210SJason Evans 			malloc_write("<jemalloc>: Error in malloc(): "
881a4bd5210SJason Evans 			    "out of memory\n");
882a4bd5210SJason Evans 			abort();
883a4bd5210SJason Evans 		}
884e722f8f8SJason Evans 		set_errno(ENOMEM);
885a4bd5210SJason Evans 	}
886a4bd5210SJason Evans 	if (config_prof && opt_prof && ret != NULL)
887a4bd5210SJason Evans 		prof_malloc(ret, usize, cnt);
888a4bd5210SJason Evans 	if (config_stats && ret != NULL) {
889a4bd5210SJason Evans 		assert(usize == isalloc(ret, config_prof));
890a4bd5210SJason Evans 		thread_allocated_tsd_get()->allocated += usize;
891a4bd5210SJason Evans 	}
892a4bd5210SJason Evans 	UTRACE(0, size, ret);
893a4bd5210SJason Evans 	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
894a4bd5210SJason Evans 	return (ret);
895a4bd5210SJason Evans }
896a4bd5210SJason Evans 
897a4bd5210SJason Evans JEMALLOC_ATTR(nonnull(1))
898a4bd5210SJason Evans #ifdef JEMALLOC_PROF
899a4bd5210SJason Evans /*
900a4bd5210SJason Evans  * Avoid any uncertainty as to how many backtrace frames to ignore in
901a4bd5210SJason Evans  * PROF_ALLOC_PREP().
902a4bd5210SJason Evans  */
903*88ad2f8dSJason Evans JEMALLOC_NOINLINE
904a4bd5210SJason Evans #endif
905a4bd5210SJason Evans static int
906a4bd5210SJason Evans imemalign(void **memptr, size_t alignment, size_t size,
907a4bd5210SJason Evans     size_t min_alignment)
908a4bd5210SJason Evans {
909a4bd5210SJason Evans 	int ret;
910a4bd5210SJason Evans 	size_t usize;
911a4bd5210SJason Evans 	void *result;
912a4bd5210SJason Evans 	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
913a4bd5210SJason Evans 
914a4bd5210SJason Evans 	assert(min_alignment != 0);
915a4bd5210SJason Evans 
916a4bd5210SJason Evans 	if (malloc_init())
917a4bd5210SJason Evans 		result = NULL;
918a4bd5210SJason Evans 	else {
919a4bd5210SJason Evans 		if (size == 0)
920a4bd5210SJason Evans 			size = 1;
921a4bd5210SJason Evans 
922a4bd5210SJason Evans 		/* Make sure that alignment is a large enough power of 2. */
923a4bd5210SJason Evans 		if (((alignment - 1) & alignment) != 0
924a4bd5210SJason Evans 		    || (alignment < min_alignment)) {
925a4bd5210SJason Evans 			if (config_xmalloc && opt_xmalloc) {
926a4bd5210SJason Evans 				malloc_write("<jemalloc>: Error allocating "
927a4bd5210SJason Evans 				    "aligned memory: invalid alignment\n");
928a4bd5210SJason Evans 				abort();
929a4bd5210SJason Evans 			}
930a4bd5210SJason Evans 			result = NULL;
931a4bd5210SJason Evans 			ret = EINVAL;
932a4bd5210SJason Evans 			goto label_return;
933a4bd5210SJason Evans 		}
934a4bd5210SJason Evans 
935a4bd5210SJason Evans 		usize = sa2u(size, alignment);
936a4bd5210SJason Evans 		if (usize == 0) {
937a4bd5210SJason Evans 			result = NULL;
938a4bd5210SJason Evans 			ret = ENOMEM;
939a4bd5210SJason Evans 			goto label_return;
940a4bd5210SJason Evans 		}
941a4bd5210SJason Evans 
942a4bd5210SJason Evans 		if (config_prof && opt_prof) {
943a4bd5210SJason Evans 			PROF_ALLOC_PREP(2, usize, cnt);
944a4bd5210SJason Evans 			if (cnt == NULL) {
945a4bd5210SJason Evans 				result = NULL;
946a4bd5210SJason Evans 				ret = EINVAL;
947a4bd5210SJason Evans 			} else {
948a4bd5210SJason Evans 				if (prof_promote && (uintptr_t)cnt !=
949a4bd5210SJason Evans 				    (uintptr_t)1U && usize <= SMALL_MAXCLASS) {
950a4bd5210SJason Evans 					assert(sa2u(SMALL_MAXCLASS+1,
951a4bd5210SJason Evans 					    alignment) != 0);
952a4bd5210SJason Evans 					result = ipalloc(sa2u(SMALL_MAXCLASS+1,
953a4bd5210SJason Evans 					    alignment), alignment, false);
954a4bd5210SJason Evans 					if (result != NULL) {
955a4bd5210SJason Evans 						arena_prof_promoted(result,
956a4bd5210SJason Evans 						    usize);
957a4bd5210SJason Evans 					}
958a4bd5210SJason Evans 				} else {
959a4bd5210SJason Evans 					result = ipalloc(usize, alignment,
960a4bd5210SJason Evans 					    false);
961a4bd5210SJason Evans 				}
962a4bd5210SJason Evans 			}
963a4bd5210SJason Evans 		} else
964a4bd5210SJason Evans 			result = ipalloc(usize, alignment, false);
965a4bd5210SJason Evans 	}
966a4bd5210SJason Evans 
967a4bd5210SJason Evans 	if (result == NULL) {
968a4bd5210SJason Evans 		if (config_xmalloc && opt_xmalloc) {
969a4bd5210SJason Evans 			malloc_write("<jemalloc>: Error allocating aligned "
970a4bd5210SJason Evans 			    "memory: out of memory\n");
971a4bd5210SJason Evans 			abort();
972a4bd5210SJason Evans 		}
973a4bd5210SJason Evans 		ret = ENOMEM;
974a4bd5210SJason Evans 		goto label_return;
975a4bd5210SJason Evans 	}
976a4bd5210SJason Evans 
977a4bd5210SJason Evans 	*memptr = result;
978a4bd5210SJason Evans 	ret = 0;
979a4bd5210SJason Evans 
980a4bd5210SJason Evans label_return:
981a4bd5210SJason Evans 	if (config_stats && result != NULL) {
982a4bd5210SJason Evans 		assert(usize == isalloc(result, config_prof));
983a4bd5210SJason Evans 		thread_allocated_tsd_get()->allocated += usize;
984a4bd5210SJason Evans 	}
985a4bd5210SJason Evans 	if (config_prof && opt_prof && result != NULL)
986a4bd5210SJason Evans 		prof_malloc(result, usize, cnt);
987a4bd5210SJason Evans 	UTRACE(0, size, result);
988a4bd5210SJason Evans 	return (ret);
989a4bd5210SJason Evans }
990a4bd5210SJason Evans 
991a4bd5210SJason Evans int
992a4bd5210SJason Evans je_posix_memalign(void **memptr, size_t alignment, size_t size)
993a4bd5210SJason Evans {
994a4bd5210SJason Evans 	int ret = imemalign(memptr, alignment, size, sizeof(void *));
995a4bd5210SJason Evans 	JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
996a4bd5210SJason Evans 	    config_prof), false);
997a4bd5210SJason Evans 	return (ret);
998a4bd5210SJason Evans }
999a4bd5210SJason Evans 
1000a4bd5210SJason Evans void *
1001a4bd5210SJason Evans je_aligned_alloc(size_t alignment, size_t size)
1002a4bd5210SJason Evans {
1003a4bd5210SJason Evans 	void *ret;
1004a4bd5210SJason Evans 	int err;
1005a4bd5210SJason Evans 
1006a4bd5210SJason Evans 	if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
1007a4bd5210SJason Evans 		ret = NULL;
1008e722f8f8SJason Evans 		set_errno(err);
1009a4bd5210SJason Evans 	}
1010a4bd5210SJason Evans 	JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
1011a4bd5210SJason Evans 	    false);
1012a4bd5210SJason Evans 	return (ret);
1013a4bd5210SJason Evans }
1014a4bd5210SJason Evans 
1015a4bd5210SJason Evans void *
1016a4bd5210SJason Evans je_calloc(size_t num, size_t size)
1017a4bd5210SJason Evans {
1018a4bd5210SJason Evans 	void *ret;
1019a4bd5210SJason Evans 	size_t num_size;
1020e722f8f8SJason Evans 	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1021a4bd5210SJason Evans 	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
1022a4bd5210SJason Evans 
1023a4bd5210SJason Evans 	if (malloc_init()) {
1024a4bd5210SJason Evans 		num_size = 0;
1025a4bd5210SJason Evans 		ret = NULL;
1026a4bd5210SJason Evans 		goto label_return;
1027a4bd5210SJason Evans 	}
1028a4bd5210SJason Evans 
1029a4bd5210SJason Evans 	num_size = num * size;
1030a4bd5210SJason Evans 	if (num_size == 0) {
1031a4bd5210SJason Evans 		if (num == 0 || size == 0)
1032a4bd5210SJason Evans 			num_size = 1;
1033a4bd5210SJason Evans 		else {
1034a4bd5210SJason Evans 			ret = NULL;
1035a4bd5210SJason Evans 			goto label_return;
1036a4bd5210SJason Evans 		}
1037a4bd5210SJason Evans 	/*
1038a4bd5210SJason Evans 	 * Try to avoid division here.  We know that it isn't possible to
1039a4bd5210SJason Evans 	 * overflow during multiplication if neither operand uses any of the
1040a4bd5210SJason Evans 	 * most significant half of the bits in a size_t.
1041a4bd5210SJason Evans 	 */
1042a4bd5210SJason Evans 	} else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
1043a4bd5210SJason Evans 	    && (num_size / size != num)) {
1044a4bd5210SJason Evans 		/* size_t overflow. */
1045a4bd5210SJason Evans 		ret = NULL;
1046a4bd5210SJason Evans 		goto label_return;
1047a4bd5210SJason Evans 	}
1048a4bd5210SJason Evans 
1049a4bd5210SJason Evans 	if (config_prof && opt_prof) {
1050a4bd5210SJason Evans 		usize = s2u(num_size);
1051a4bd5210SJason Evans 		PROF_ALLOC_PREP(1, usize, cnt);
1052a4bd5210SJason Evans 		if (cnt == NULL) {
1053a4bd5210SJason Evans 			ret = NULL;
1054a4bd5210SJason Evans 			goto label_return;
1055a4bd5210SJason Evans 		}
1056a4bd5210SJason Evans 		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize
1057a4bd5210SJason Evans 		    <= SMALL_MAXCLASS) {
1058a4bd5210SJason Evans 			ret = icalloc(SMALL_MAXCLASS+1);
1059a4bd5210SJason Evans 			if (ret != NULL)
1060a4bd5210SJason Evans 				arena_prof_promoted(ret, usize);
1061a4bd5210SJason Evans 		} else
1062a4bd5210SJason Evans 			ret = icalloc(num_size);
1063a4bd5210SJason Evans 	} else {
1064a4bd5210SJason Evans 		if (config_stats || (config_valgrind && opt_valgrind))
1065a4bd5210SJason Evans 			usize = s2u(num_size);
1066a4bd5210SJason Evans 		ret = icalloc(num_size);
1067a4bd5210SJason Evans 	}
1068a4bd5210SJason Evans 
1069a4bd5210SJason Evans label_return:
1070a4bd5210SJason Evans 	if (ret == NULL) {
1071a4bd5210SJason Evans 		if (config_xmalloc && opt_xmalloc) {
1072a4bd5210SJason Evans 			malloc_write("<jemalloc>: Error in calloc(): out of "
1073a4bd5210SJason Evans 			    "memory\n");
1074a4bd5210SJason Evans 			abort();
1075a4bd5210SJason Evans 		}
1076e722f8f8SJason Evans 		set_errno(ENOMEM);
1077a4bd5210SJason Evans 	}
1078a4bd5210SJason Evans 
1079a4bd5210SJason Evans 	if (config_prof && opt_prof && ret != NULL)
1080a4bd5210SJason Evans 		prof_malloc(ret, usize, cnt);
1081a4bd5210SJason Evans 	if (config_stats && ret != NULL) {
1082a4bd5210SJason Evans 		assert(usize == isalloc(ret, config_prof));
1083a4bd5210SJason Evans 		thread_allocated_tsd_get()->allocated += usize;
1084a4bd5210SJason Evans 	}
1085a4bd5210SJason Evans 	UTRACE(0, num_size, ret);
1086a4bd5210SJason Evans 	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
1087a4bd5210SJason Evans 	return (ret);
1088a4bd5210SJason Evans }
1089a4bd5210SJason Evans 
1090a4bd5210SJason Evans void *
1091a4bd5210SJason Evans je_realloc(void *ptr, size_t size)
1092a4bd5210SJason Evans {
1093a4bd5210SJason Evans 	void *ret;
1094e722f8f8SJason Evans 	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1095a4bd5210SJason Evans 	size_t old_size = 0;
1096a4bd5210SJason Evans 	size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1097a4bd5210SJason Evans 	prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL);
1098a4bd5210SJason Evans 	prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL);
1099a4bd5210SJason Evans 
1100a4bd5210SJason Evans 	if (size == 0) {
1101a4bd5210SJason Evans 		if (ptr != NULL) {
1102a4bd5210SJason Evans 			/* realloc(ptr, 0) is equivalent to free(p). */
1103a4bd5210SJason Evans 			if (config_prof) {
1104a4bd5210SJason Evans 				old_size = isalloc(ptr, true);
1105a4bd5210SJason Evans 				if (config_valgrind && opt_valgrind)
1106a4bd5210SJason Evans 					old_rzsize = p2rz(ptr);
1107a4bd5210SJason Evans 			} else if (config_stats) {
1108a4bd5210SJason Evans 				old_size = isalloc(ptr, false);
1109a4bd5210SJason Evans 				if (config_valgrind && opt_valgrind)
1110a4bd5210SJason Evans 					old_rzsize = u2rz(old_size);
1111a4bd5210SJason Evans 			} else if (config_valgrind && opt_valgrind) {
1112a4bd5210SJason Evans 				old_size = isalloc(ptr, false);
1113a4bd5210SJason Evans 				old_rzsize = u2rz(old_size);
1114a4bd5210SJason Evans 			}
1115a4bd5210SJason Evans 			if (config_prof && opt_prof) {
1116a4bd5210SJason Evans 				old_ctx = prof_ctx_get(ptr);
1117a4bd5210SJason Evans 				cnt = NULL;
1118a4bd5210SJason Evans 			}
1119a4bd5210SJason Evans 			iqalloc(ptr);
1120a4bd5210SJason Evans 			ret = NULL;
1121a4bd5210SJason Evans 			goto label_return;
1122a4bd5210SJason Evans 		} else
1123a4bd5210SJason Evans 			size = 1;
1124a4bd5210SJason Evans 	}
1125a4bd5210SJason Evans 
1126a4bd5210SJason Evans 	if (ptr != NULL) {
1127a4bd5210SJason Evans 		assert(malloc_initialized || IS_INITIALIZER);
1128a4bd5210SJason Evans 
1129a4bd5210SJason Evans 		if (config_prof) {
1130a4bd5210SJason Evans 			old_size = isalloc(ptr, true);
1131a4bd5210SJason Evans 			if (config_valgrind && opt_valgrind)
1132a4bd5210SJason Evans 				old_rzsize = p2rz(ptr);
1133a4bd5210SJason Evans 		} else if (config_stats) {
1134a4bd5210SJason Evans 			old_size = isalloc(ptr, false);
1135a4bd5210SJason Evans 			if (config_valgrind && opt_valgrind)
1136a4bd5210SJason Evans 				old_rzsize = u2rz(old_size);
1137a4bd5210SJason Evans 		} else if (config_valgrind && opt_valgrind) {
1138a4bd5210SJason Evans 			old_size = isalloc(ptr, false);
1139a4bd5210SJason Evans 			old_rzsize = u2rz(old_size);
1140a4bd5210SJason Evans 		}
1141a4bd5210SJason Evans 		if (config_prof && opt_prof) {
1142a4bd5210SJason Evans 			usize = s2u(size);
1143a4bd5210SJason Evans 			old_ctx = prof_ctx_get(ptr);
1144a4bd5210SJason Evans 			PROF_ALLOC_PREP(1, usize, cnt);
1145a4bd5210SJason Evans 			if (cnt == NULL) {
1146a4bd5210SJason Evans 				old_ctx = NULL;
1147a4bd5210SJason Evans 				ret = NULL;
1148a4bd5210SJason Evans 				goto label_oom;
1149a4bd5210SJason Evans 			}
1150a4bd5210SJason Evans 			if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U &&
1151a4bd5210SJason Evans 			    usize <= SMALL_MAXCLASS) {
1152a4bd5210SJason Evans 				ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0,
1153a4bd5210SJason Evans 				    false, false);
1154a4bd5210SJason Evans 				if (ret != NULL)
1155a4bd5210SJason Evans 					arena_prof_promoted(ret, usize);
1156a4bd5210SJason Evans 				else
1157a4bd5210SJason Evans 					old_ctx = NULL;
1158a4bd5210SJason Evans 			} else {
1159a4bd5210SJason Evans 				ret = iralloc(ptr, size, 0, 0, false, false);
1160a4bd5210SJason Evans 				if (ret == NULL)
1161a4bd5210SJason Evans 					old_ctx = NULL;
1162a4bd5210SJason Evans 			}
1163a4bd5210SJason Evans 		} else {
1164a4bd5210SJason Evans 			if (config_stats || (config_valgrind && opt_valgrind))
1165a4bd5210SJason Evans 				usize = s2u(size);
1166a4bd5210SJason Evans 			ret = iralloc(ptr, size, 0, 0, false, false);
1167a4bd5210SJason Evans 		}
1168a4bd5210SJason Evans 
1169a4bd5210SJason Evans label_oom:
1170a4bd5210SJason Evans 		if (ret == NULL) {
1171a4bd5210SJason Evans 			if (config_xmalloc && opt_xmalloc) {
1172a4bd5210SJason Evans 				malloc_write("<jemalloc>: Error in realloc(): "
1173a4bd5210SJason Evans 				    "out of memory\n");
1174a4bd5210SJason Evans 				abort();
1175a4bd5210SJason Evans 			}
1176e722f8f8SJason Evans 			set_errno(ENOMEM);
1177a4bd5210SJason Evans 		}
1178a4bd5210SJason Evans 	} else {
1179a4bd5210SJason Evans 		/* realloc(NULL, size) is equivalent to malloc(size). */
1180a4bd5210SJason Evans 		if (config_prof && opt_prof)
1181a4bd5210SJason Evans 			old_ctx = NULL;
1182a4bd5210SJason Evans 		if (malloc_init()) {
1183a4bd5210SJason Evans 			if (config_prof && opt_prof)
1184a4bd5210SJason Evans 				cnt = NULL;
1185a4bd5210SJason Evans 			ret = NULL;
1186a4bd5210SJason Evans 		} else {
1187a4bd5210SJason Evans 			if (config_prof && opt_prof) {
1188a4bd5210SJason Evans 				usize = s2u(size);
1189a4bd5210SJason Evans 				PROF_ALLOC_PREP(1, usize, cnt);
1190a4bd5210SJason Evans 				if (cnt == NULL)
1191a4bd5210SJason Evans 					ret = NULL;
1192a4bd5210SJason Evans 				else {
1193a4bd5210SJason Evans 					if (prof_promote && (uintptr_t)cnt !=
1194a4bd5210SJason Evans 					    (uintptr_t)1U && usize <=
1195a4bd5210SJason Evans 					    SMALL_MAXCLASS) {
1196a4bd5210SJason Evans 						ret = imalloc(SMALL_MAXCLASS+1);
1197a4bd5210SJason Evans 						if (ret != NULL) {
1198a4bd5210SJason Evans 							arena_prof_promoted(ret,
1199a4bd5210SJason Evans 							    usize);
1200a4bd5210SJason Evans 						}
1201a4bd5210SJason Evans 					} else
1202a4bd5210SJason Evans 						ret = imalloc(size);
1203a4bd5210SJason Evans 				}
1204a4bd5210SJason Evans 			} else {
1205a4bd5210SJason Evans 				if (config_stats || (config_valgrind &&
1206a4bd5210SJason Evans 				    opt_valgrind))
1207a4bd5210SJason Evans 					usize = s2u(size);
1208a4bd5210SJason Evans 				ret = imalloc(size);
1209a4bd5210SJason Evans 			}
1210a4bd5210SJason Evans 		}
1211a4bd5210SJason Evans 
1212a4bd5210SJason Evans 		if (ret == NULL) {
1213a4bd5210SJason Evans 			if (config_xmalloc && opt_xmalloc) {
1214a4bd5210SJason Evans 				malloc_write("<jemalloc>: Error in realloc(): "
1215a4bd5210SJason Evans 				    "out of memory\n");
1216a4bd5210SJason Evans 				abort();
1217a4bd5210SJason Evans 			}
1218e722f8f8SJason Evans 			set_errno(ENOMEM);
1219a4bd5210SJason Evans 		}
1220a4bd5210SJason Evans 	}
1221a4bd5210SJason Evans 
1222a4bd5210SJason Evans label_return:
1223a4bd5210SJason Evans 	if (config_prof && opt_prof)
1224a4bd5210SJason Evans 		prof_realloc(ret, usize, cnt, old_size, old_ctx);
1225a4bd5210SJason Evans 	if (config_stats && ret != NULL) {
1226a4bd5210SJason Evans 		thread_allocated_t *ta;
1227a4bd5210SJason Evans 		assert(usize == isalloc(ret, config_prof));
1228a4bd5210SJason Evans 		ta = thread_allocated_tsd_get();
1229a4bd5210SJason Evans 		ta->allocated += usize;
1230a4bd5210SJason Evans 		ta->deallocated += old_size;
1231a4bd5210SJason Evans 	}
1232a4bd5210SJason Evans 	UTRACE(ptr, size, ret);
1233a4bd5210SJason Evans 	JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false);
1234a4bd5210SJason Evans 	return (ret);
1235a4bd5210SJason Evans }
1236a4bd5210SJason Evans 
1237a4bd5210SJason Evans void
1238a4bd5210SJason Evans je_free(void *ptr)
1239a4bd5210SJason Evans {
1240a4bd5210SJason Evans 
1241a4bd5210SJason Evans 	UTRACE(ptr, 0, 0);
1242a4bd5210SJason Evans 	if (ptr != NULL) {
1243a4bd5210SJason Evans 		size_t usize;
1244a4bd5210SJason Evans 		size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1245a4bd5210SJason Evans 
1246a4bd5210SJason Evans 		assert(malloc_initialized || IS_INITIALIZER);
1247a4bd5210SJason Evans 
1248a4bd5210SJason Evans 		if (config_prof && opt_prof) {
1249a4bd5210SJason Evans 			usize = isalloc(ptr, config_prof);
1250a4bd5210SJason Evans 			prof_free(ptr, usize);
1251a4bd5210SJason Evans 		} else if (config_stats || config_valgrind)
1252a4bd5210SJason Evans 			usize = isalloc(ptr, config_prof);
1253a4bd5210SJason Evans 		if (config_stats)
1254a4bd5210SJason Evans 			thread_allocated_tsd_get()->deallocated += usize;
1255a4bd5210SJason Evans 		if (config_valgrind && opt_valgrind)
1256a4bd5210SJason Evans 			rzsize = p2rz(ptr);
1257a4bd5210SJason Evans 		iqalloc(ptr);
1258a4bd5210SJason Evans 		JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1259a4bd5210SJason Evans 	}
1260a4bd5210SJason Evans }
1261a4bd5210SJason Evans 
1262a4bd5210SJason Evans /*
1263a4bd5210SJason Evans  * End malloc(3)-compatible functions.
1264a4bd5210SJason Evans  */
1265a4bd5210SJason Evans /******************************************************************************/
1266a4bd5210SJason Evans /*
1267a4bd5210SJason Evans  * Begin non-standard override functions.
1268a4bd5210SJason Evans  */
1269a4bd5210SJason Evans 
1270a4bd5210SJason Evans #ifdef JEMALLOC_OVERRIDE_MEMALIGN
1271a4bd5210SJason Evans void *
1272a4bd5210SJason Evans je_memalign(size_t alignment, size_t size)
1273a4bd5210SJason Evans {
1274a4bd5210SJason Evans 	void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1275a4bd5210SJason Evans 	imemalign(&ret, alignment, size, 1);
1276a4bd5210SJason Evans 	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1277a4bd5210SJason Evans 	return (ret);
1278a4bd5210SJason Evans }
1279a4bd5210SJason Evans #endif
1280a4bd5210SJason Evans 
1281a4bd5210SJason Evans #ifdef JEMALLOC_OVERRIDE_VALLOC
1282a4bd5210SJason Evans void *
1283a4bd5210SJason Evans je_valloc(size_t size)
1284a4bd5210SJason Evans {
1285a4bd5210SJason Evans 	void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1286a4bd5210SJason Evans 	imemalign(&ret, PAGE, size, 1);
1287a4bd5210SJason Evans 	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1288a4bd5210SJason Evans 	return (ret);
1289a4bd5210SJason Evans }
1290a4bd5210SJason Evans #endif
1291a4bd5210SJason Evans 
1292a4bd5210SJason Evans /*
1293a4bd5210SJason Evans  * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1294a4bd5210SJason Evans  * #define je_malloc malloc
1295a4bd5210SJason Evans  */
1296a4bd5210SJason Evans #define	malloc_is_malloc 1
1297a4bd5210SJason Evans #define	is_malloc_(a) malloc_is_ ## a
1298a4bd5210SJason Evans #define	is_malloc(a) is_malloc_(a)
1299a4bd5210SJason Evans 
1300a4bd5210SJason Evans #if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
1301a4bd5210SJason Evans /*
1302a4bd5210SJason Evans  * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1303a4bd5210SJason Evans  * to inconsistently reference libc's malloc(3)-compatible functions
1304a4bd5210SJason Evans  * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1305a4bd5210SJason Evans  *
1306a4bd5210SJason Evans  * These definitions interpose hooks in glibc.  The functions are actually
1307a4bd5210SJason Evans  * passed an extra argument for the caller return address, which will be
1308a4bd5210SJason Evans  * ignored.
1309a4bd5210SJason Evans  */
131082872ac0SJason Evans JEMALLOC_EXPORT void (* __free_hook)(void *ptr) = je_free;
131182872ac0SJason Evans JEMALLOC_EXPORT void *(* __malloc_hook)(size_t size) = je_malloc;
131282872ac0SJason Evans JEMALLOC_EXPORT void *(* __realloc_hook)(void *ptr, size_t size) = je_realloc;
131382872ac0SJason Evans JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) =
1314e722f8f8SJason Evans     je_memalign;
1315a4bd5210SJason Evans #endif
1316a4bd5210SJason Evans 
1317a4bd5210SJason Evans /*
1318a4bd5210SJason Evans  * End non-standard override functions.
1319a4bd5210SJason Evans  */
1320a4bd5210SJason Evans /******************************************************************************/
1321a4bd5210SJason Evans /*
1322a4bd5210SJason Evans  * Begin non-standard functions.
1323a4bd5210SJason Evans  */
1324a4bd5210SJason Evans 
1325a4bd5210SJason Evans size_t
132682872ac0SJason Evans je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
1327a4bd5210SJason Evans {
1328a4bd5210SJason Evans 	size_t ret;
1329a4bd5210SJason Evans 
1330a4bd5210SJason Evans 	assert(malloc_initialized || IS_INITIALIZER);
1331a4bd5210SJason Evans 
1332a4bd5210SJason Evans 	if (config_ivsalloc)
1333a4bd5210SJason Evans 		ret = ivsalloc(ptr, config_prof);
1334a4bd5210SJason Evans 	else
1335a4bd5210SJason Evans 		ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
1336a4bd5210SJason Evans 
1337a4bd5210SJason Evans 	return (ret);
1338a4bd5210SJason Evans }
1339a4bd5210SJason Evans 
1340a4bd5210SJason Evans void
1341a4bd5210SJason Evans je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
1342a4bd5210SJason Evans     const char *opts)
1343a4bd5210SJason Evans {
1344a4bd5210SJason Evans 
1345a4bd5210SJason Evans 	stats_print(write_cb, cbopaque, opts);
1346a4bd5210SJason Evans }
1347a4bd5210SJason Evans 
1348a4bd5210SJason Evans int
1349a4bd5210SJason Evans je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
1350a4bd5210SJason Evans     size_t newlen)
1351a4bd5210SJason Evans {
1352a4bd5210SJason Evans 
1353a4bd5210SJason Evans 	if (malloc_init())
1354a4bd5210SJason Evans 		return (EAGAIN);
1355a4bd5210SJason Evans 
1356a4bd5210SJason Evans 	return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1357a4bd5210SJason Evans }
1358a4bd5210SJason Evans 
1359a4bd5210SJason Evans int
1360a4bd5210SJason Evans je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
1361a4bd5210SJason Evans {
1362a4bd5210SJason Evans 
1363a4bd5210SJason Evans 	if (malloc_init())
1364a4bd5210SJason Evans 		return (EAGAIN);
1365a4bd5210SJason Evans 
1366a4bd5210SJason Evans 	return (ctl_nametomib(name, mibp, miblenp));
1367a4bd5210SJason Evans }
1368a4bd5210SJason Evans 
1369a4bd5210SJason Evans int
1370a4bd5210SJason Evans je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1371a4bd5210SJason Evans   void *newp, size_t newlen)
1372a4bd5210SJason Evans {
1373a4bd5210SJason Evans 
1374a4bd5210SJason Evans 	if (malloc_init())
1375a4bd5210SJason Evans 		return (EAGAIN);
1376a4bd5210SJason Evans 
1377a4bd5210SJason Evans 	return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1378a4bd5210SJason Evans }
1379a4bd5210SJason Evans 
1380a4bd5210SJason Evans /*
1381a4bd5210SJason Evans  * End non-standard functions.
1382a4bd5210SJason Evans  */
1383a4bd5210SJason Evans /******************************************************************************/
1384a4bd5210SJason Evans /*
1385a4bd5210SJason Evans  * Begin experimental functions.
1386a4bd5210SJason Evans  */
1387a4bd5210SJason Evans #ifdef JEMALLOC_EXPERIMENTAL
1388a4bd5210SJason Evans 
1389*88ad2f8dSJason Evans static JEMALLOC_ATTR(always_inline) void *
139082872ac0SJason Evans iallocm(size_t usize, size_t alignment, bool zero, bool try_tcache,
139182872ac0SJason Evans     arena_t *arena)
1392a4bd5210SJason Evans {
1393a4bd5210SJason Evans 
1394a4bd5210SJason Evans 	assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
1395a4bd5210SJason Evans 	    alignment)));
1396a4bd5210SJason Evans 
1397a4bd5210SJason Evans 	if (alignment != 0)
139882872ac0SJason Evans 		return (ipallocx(usize, alignment, zero, try_tcache, arena));
1399a4bd5210SJason Evans 	else if (zero)
140082872ac0SJason Evans 		return (icallocx(usize, try_tcache, arena));
1401a4bd5210SJason Evans 	else
140282872ac0SJason Evans 		return (imallocx(usize, try_tcache, arena));
1403a4bd5210SJason Evans }
1404a4bd5210SJason Evans 
1405a4bd5210SJason Evans int
1406a4bd5210SJason Evans je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
1407a4bd5210SJason Evans {
1408a4bd5210SJason Evans 	void *p;
1409a4bd5210SJason Evans 	size_t usize;
1410a4bd5210SJason Evans 	size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1411a4bd5210SJason Evans 	    & (SIZE_T_MAX-1));
1412a4bd5210SJason Evans 	bool zero = flags & ALLOCM_ZERO;
141382872ac0SJason Evans 	unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
141482872ac0SJason Evans 	arena_t *arena;
141582872ac0SJason Evans 	bool try_tcache;
1416a4bd5210SJason Evans 
1417a4bd5210SJason Evans 	assert(ptr != NULL);
1418a4bd5210SJason Evans 	assert(size != 0);
1419a4bd5210SJason Evans 
1420a4bd5210SJason Evans 	if (malloc_init())
1421a4bd5210SJason Evans 		goto label_oom;
1422a4bd5210SJason Evans 
142382872ac0SJason Evans 	if (arena_ind != UINT_MAX) {
142482872ac0SJason Evans 		arena = arenas[arena_ind];
142582872ac0SJason Evans 		try_tcache = false;
142682872ac0SJason Evans 	} else {
142782872ac0SJason Evans 		arena = NULL;
142882872ac0SJason Evans 		try_tcache = true;
142982872ac0SJason Evans 	}
143082872ac0SJason Evans 
1431a4bd5210SJason Evans 	usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
1432a4bd5210SJason Evans 	if (usize == 0)
1433a4bd5210SJason Evans 		goto label_oom;
1434a4bd5210SJason Evans 
1435a4bd5210SJason Evans 	if (config_prof && opt_prof) {
1436e722f8f8SJason Evans 		prof_thr_cnt_t *cnt;
1437e722f8f8SJason Evans 
1438a4bd5210SJason Evans 		PROF_ALLOC_PREP(1, usize, cnt);
1439a4bd5210SJason Evans 		if (cnt == NULL)
1440a4bd5210SJason Evans 			goto label_oom;
1441a4bd5210SJason Evans 		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <=
1442a4bd5210SJason Evans 		    SMALL_MAXCLASS) {
1443a4bd5210SJason Evans 			size_t usize_promoted = (alignment == 0) ?
1444a4bd5210SJason Evans 			    s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1,
1445a4bd5210SJason Evans 			    alignment);
1446a4bd5210SJason Evans 			assert(usize_promoted != 0);
144782872ac0SJason Evans 			p = iallocm(usize_promoted, alignment, zero,
144882872ac0SJason Evans 			    try_tcache, arena);
1449a4bd5210SJason Evans 			if (p == NULL)
1450a4bd5210SJason Evans 				goto label_oom;
1451a4bd5210SJason Evans 			arena_prof_promoted(p, usize);
1452a4bd5210SJason Evans 		} else {
145382872ac0SJason Evans 			p = iallocm(usize, alignment, zero, try_tcache, arena);
1454a4bd5210SJason Evans 			if (p == NULL)
1455a4bd5210SJason Evans 				goto label_oom;
1456a4bd5210SJason Evans 		}
1457a4bd5210SJason Evans 		prof_malloc(p, usize, cnt);
1458a4bd5210SJason Evans 	} else {
145982872ac0SJason Evans 		p = iallocm(usize, alignment, zero, try_tcache, arena);
1460a4bd5210SJason Evans 		if (p == NULL)
1461a4bd5210SJason Evans 			goto label_oom;
1462a4bd5210SJason Evans 	}
1463a4bd5210SJason Evans 	if (rsize != NULL)
1464a4bd5210SJason Evans 		*rsize = usize;
1465a4bd5210SJason Evans 
1466a4bd5210SJason Evans 	*ptr = p;
1467a4bd5210SJason Evans 	if (config_stats) {
1468a4bd5210SJason Evans 		assert(usize == isalloc(p, config_prof));
1469a4bd5210SJason Evans 		thread_allocated_tsd_get()->allocated += usize;
1470a4bd5210SJason Evans 	}
1471a4bd5210SJason Evans 	UTRACE(0, size, p);
1472a4bd5210SJason Evans 	JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
1473a4bd5210SJason Evans 	return (ALLOCM_SUCCESS);
1474a4bd5210SJason Evans label_oom:
1475a4bd5210SJason Evans 	if (config_xmalloc && opt_xmalloc) {
1476a4bd5210SJason Evans 		malloc_write("<jemalloc>: Error in allocm(): "
1477a4bd5210SJason Evans 		    "out of memory\n");
1478a4bd5210SJason Evans 		abort();
1479a4bd5210SJason Evans 	}
1480a4bd5210SJason Evans 	*ptr = NULL;
1481a4bd5210SJason Evans 	UTRACE(0, size, 0);
1482a4bd5210SJason Evans 	return (ALLOCM_ERR_OOM);
1483a4bd5210SJason Evans }
1484a4bd5210SJason Evans 
1485a4bd5210SJason Evans int
1486a4bd5210SJason Evans je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
1487a4bd5210SJason Evans {
1488a4bd5210SJason Evans 	void *p, *q;
1489a4bd5210SJason Evans 	size_t usize;
1490a4bd5210SJason Evans 	size_t old_size;
1491a4bd5210SJason Evans 	size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1492a4bd5210SJason Evans 	size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1493a4bd5210SJason Evans 	    & (SIZE_T_MAX-1));
1494a4bd5210SJason Evans 	bool zero = flags & ALLOCM_ZERO;
1495a4bd5210SJason Evans 	bool no_move = flags & ALLOCM_NO_MOVE;
149682872ac0SJason Evans 	unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
149782872ac0SJason Evans 	bool try_tcache_alloc, try_tcache_dalloc;
149882872ac0SJason Evans 	arena_t *arena;
1499a4bd5210SJason Evans 
1500a4bd5210SJason Evans 	assert(ptr != NULL);
1501a4bd5210SJason Evans 	assert(*ptr != NULL);
1502a4bd5210SJason Evans 	assert(size != 0);
1503a4bd5210SJason Evans 	assert(SIZE_T_MAX - size >= extra);
1504a4bd5210SJason Evans 	assert(malloc_initialized || IS_INITIALIZER);
1505a4bd5210SJason Evans 
150682872ac0SJason Evans 	if (arena_ind != UINT_MAX) {
150782872ac0SJason Evans 		arena_chunk_t *chunk;
150882872ac0SJason Evans 		try_tcache_alloc = true;
150982872ac0SJason Evans 		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(*ptr);
151082872ac0SJason Evans 		try_tcache_dalloc = (chunk == *ptr || chunk->arena !=
151182872ac0SJason Evans 		    arenas[arena_ind]);
151282872ac0SJason Evans 		arena = arenas[arena_ind];
151382872ac0SJason Evans 	} else {
151482872ac0SJason Evans 		try_tcache_alloc = true;
151582872ac0SJason Evans 		try_tcache_dalloc = true;
151682872ac0SJason Evans 		arena = NULL;
151782872ac0SJason Evans 	}
151882872ac0SJason Evans 
1519a4bd5210SJason Evans 	p = *ptr;
1520a4bd5210SJason Evans 	if (config_prof && opt_prof) {
1521e722f8f8SJason Evans 		prof_thr_cnt_t *cnt;
1522e722f8f8SJason Evans 
1523a4bd5210SJason Evans 		/*
1524a4bd5210SJason Evans 		 * usize isn't knowable before iralloc() returns when extra is
1525a4bd5210SJason Evans 		 * non-zero.  Therefore, compute its maximum possible value and
1526a4bd5210SJason Evans 		 * use that in PROF_ALLOC_PREP() to decide whether to capture a
1527a4bd5210SJason Evans 		 * backtrace.  prof_realloc() will use the actual usize to
1528a4bd5210SJason Evans 		 * decide whether to sample.
1529a4bd5210SJason Evans 		 */
1530a4bd5210SJason Evans 		size_t max_usize = (alignment == 0) ? s2u(size+extra) :
1531a4bd5210SJason Evans 		    sa2u(size+extra, alignment);
1532a4bd5210SJason Evans 		prof_ctx_t *old_ctx = prof_ctx_get(p);
1533a4bd5210SJason Evans 		old_size = isalloc(p, true);
1534a4bd5210SJason Evans 		if (config_valgrind && opt_valgrind)
1535a4bd5210SJason Evans 			old_rzsize = p2rz(p);
1536a4bd5210SJason Evans 		PROF_ALLOC_PREP(1, max_usize, cnt);
1537a4bd5210SJason Evans 		if (cnt == NULL)
1538a4bd5210SJason Evans 			goto label_oom;
1539a4bd5210SJason Evans 		/*
1540a4bd5210SJason Evans 		 * Use minimum usize to determine whether promotion may happen.
1541a4bd5210SJason Evans 		 */
1542a4bd5210SJason Evans 		if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U
1543a4bd5210SJason Evans 		    && ((alignment == 0) ? s2u(size) : sa2u(size, alignment))
1544a4bd5210SJason Evans 		    <= SMALL_MAXCLASS) {
154582872ac0SJason Evans 			q = irallocx(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1546a4bd5210SJason Evans 			    size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
154782872ac0SJason Evans 			    alignment, zero, no_move, try_tcache_alloc,
154882872ac0SJason Evans 			    try_tcache_dalloc, arena);
1549a4bd5210SJason Evans 			if (q == NULL)
1550a4bd5210SJason Evans 				goto label_err;
1551a4bd5210SJason Evans 			if (max_usize < PAGE) {
1552a4bd5210SJason Evans 				usize = max_usize;
1553a4bd5210SJason Evans 				arena_prof_promoted(q, usize);
1554a4bd5210SJason Evans 			} else
1555a4bd5210SJason Evans 				usize = isalloc(q, config_prof);
1556a4bd5210SJason Evans 		} else {
155782872ac0SJason Evans 			q = irallocx(p, size, extra, alignment, zero, no_move,
155882872ac0SJason Evans 			    try_tcache_alloc, try_tcache_dalloc, arena);
1559a4bd5210SJason Evans 			if (q == NULL)
1560a4bd5210SJason Evans 				goto label_err;
1561a4bd5210SJason Evans 			usize = isalloc(q, config_prof);
1562a4bd5210SJason Evans 		}
1563a4bd5210SJason Evans 		prof_realloc(q, usize, cnt, old_size, old_ctx);
1564a4bd5210SJason Evans 		if (rsize != NULL)
1565a4bd5210SJason Evans 			*rsize = usize;
1566a4bd5210SJason Evans 	} else {
1567a4bd5210SJason Evans 		if (config_stats) {
1568a4bd5210SJason Evans 			old_size = isalloc(p, false);
1569a4bd5210SJason Evans 			if (config_valgrind && opt_valgrind)
1570a4bd5210SJason Evans 				old_rzsize = u2rz(old_size);
1571a4bd5210SJason Evans 		} else if (config_valgrind && opt_valgrind) {
1572a4bd5210SJason Evans 			old_size = isalloc(p, false);
1573a4bd5210SJason Evans 			old_rzsize = u2rz(old_size);
1574a4bd5210SJason Evans 		}
157582872ac0SJason Evans 		q = irallocx(p, size, extra, alignment, zero, no_move,
157682872ac0SJason Evans 		    try_tcache_alloc, try_tcache_dalloc, arena);
1577a4bd5210SJason Evans 		if (q == NULL)
1578a4bd5210SJason Evans 			goto label_err;
1579a4bd5210SJason Evans 		if (config_stats)
1580a4bd5210SJason Evans 			usize = isalloc(q, config_prof);
1581a4bd5210SJason Evans 		if (rsize != NULL) {
1582a4bd5210SJason Evans 			if (config_stats == false)
1583a4bd5210SJason Evans 				usize = isalloc(q, config_prof);
1584a4bd5210SJason Evans 			*rsize = usize;
1585a4bd5210SJason Evans 		}
1586a4bd5210SJason Evans 	}
1587a4bd5210SJason Evans 
1588a4bd5210SJason Evans 	*ptr = q;
1589a4bd5210SJason Evans 	if (config_stats) {
1590a4bd5210SJason Evans 		thread_allocated_t *ta;
1591a4bd5210SJason Evans 		ta = thread_allocated_tsd_get();
1592a4bd5210SJason Evans 		ta->allocated += usize;
1593a4bd5210SJason Evans 		ta->deallocated += old_size;
1594a4bd5210SJason Evans 	}
1595a4bd5210SJason Evans 	UTRACE(p, size, q);
1596a4bd5210SJason Evans 	JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero);
1597a4bd5210SJason Evans 	return (ALLOCM_SUCCESS);
1598a4bd5210SJason Evans label_err:
1599a4bd5210SJason Evans 	if (no_move) {
1600a4bd5210SJason Evans 		UTRACE(p, size, q);
1601a4bd5210SJason Evans 		return (ALLOCM_ERR_NOT_MOVED);
1602a4bd5210SJason Evans 	}
1603a4bd5210SJason Evans label_oom:
1604a4bd5210SJason Evans 	if (config_xmalloc && opt_xmalloc) {
1605a4bd5210SJason Evans 		malloc_write("<jemalloc>: Error in rallocm(): "
1606a4bd5210SJason Evans 		    "out of memory\n");
1607a4bd5210SJason Evans 		abort();
1608a4bd5210SJason Evans 	}
1609a4bd5210SJason Evans 	UTRACE(p, size, 0);
1610a4bd5210SJason Evans 	return (ALLOCM_ERR_OOM);
1611a4bd5210SJason Evans }
1612a4bd5210SJason Evans 
1613a4bd5210SJason Evans int
1614a4bd5210SJason Evans je_sallocm(const void *ptr, size_t *rsize, int flags)
1615a4bd5210SJason Evans {
1616a4bd5210SJason Evans 	size_t sz;
1617a4bd5210SJason Evans 
1618a4bd5210SJason Evans 	assert(malloc_initialized || IS_INITIALIZER);
1619a4bd5210SJason Evans 
1620a4bd5210SJason Evans 	if (config_ivsalloc)
1621a4bd5210SJason Evans 		sz = ivsalloc(ptr, config_prof);
1622a4bd5210SJason Evans 	else {
1623a4bd5210SJason Evans 		assert(ptr != NULL);
1624a4bd5210SJason Evans 		sz = isalloc(ptr, config_prof);
1625a4bd5210SJason Evans 	}
1626a4bd5210SJason Evans 	assert(rsize != NULL);
1627a4bd5210SJason Evans 	*rsize = sz;
1628a4bd5210SJason Evans 
1629a4bd5210SJason Evans 	return (ALLOCM_SUCCESS);
1630a4bd5210SJason Evans }
1631a4bd5210SJason Evans 
1632a4bd5210SJason Evans int
1633a4bd5210SJason Evans je_dallocm(void *ptr, int flags)
1634a4bd5210SJason Evans {
1635a4bd5210SJason Evans 	size_t usize;
1636a4bd5210SJason Evans 	size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
163782872ac0SJason Evans 	unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
163882872ac0SJason Evans 	bool try_tcache;
1639a4bd5210SJason Evans 
1640a4bd5210SJason Evans 	assert(ptr != NULL);
1641a4bd5210SJason Evans 	assert(malloc_initialized || IS_INITIALIZER);
1642a4bd5210SJason Evans 
164382872ac0SJason Evans 	if (arena_ind != UINT_MAX) {
164482872ac0SJason Evans 		arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
164582872ac0SJason Evans 		try_tcache = (chunk == ptr || chunk->arena !=
164682872ac0SJason Evans 		    arenas[arena_ind]);
164782872ac0SJason Evans 	} else
164882872ac0SJason Evans 		try_tcache = true;
164982872ac0SJason Evans 
1650a4bd5210SJason Evans 	UTRACE(ptr, 0, 0);
1651a4bd5210SJason Evans 	if (config_stats || config_valgrind)
1652a4bd5210SJason Evans 		usize = isalloc(ptr, config_prof);
1653a4bd5210SJason Evans 	if (config_prof && opt_prof) {
1654a4bd5210SJason Evans 		if (config_stats == false && config_valgrind == false)
1655a4bd5210SJason Evans 			usize = isalloc(ptr, config_prof);
1656a4bd5210SJason Evans 		prof_free(ptr, usize);
1657a4bd5210SJason Evans 	}
1658a4bd5210SJason Evans 	if (config_stats)
1659a4bd5210SJason Evans 		thread_allocated_tsd_get()->deallocated += usize;
1660a4bd5210SJason Evans 	if (config_valgrind && opt_valgrind)
1661a4bd5210SJason Evans 		rzsize = p2rz(ptr);
166282872ac0SJason Evans 	iqallocx(ptr, try_tcache);
1663a4bd5210SJason Evans 	JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1664a4bd5210SJason Evans 
1665a4bd5210SJason Evans 	return (ALLOCM_SUCCESS);
1666a4bd5210SJason Evans }
1667a4bd5210SJason Evans 
1668a4bd5210SJason Evans int
1669a4bd5210SJason Evans je_nallocm(size_t *rsize, size_t size, int flags)
1670a4bd5210SJason Evans {
1671a4bd5210SJason Evans 	size_t usize;
1672a4bd5210SJason Evans 	size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK)
1673a4bd5210SJason Evans 	    & (SIZE_T_MAX-1));
1674a4bd5210SJason Evans 
1675a4bd5210SJason Evans 	assert(size != 0);
1676a4bd5210SJason Evans 
1677a4bd5210SJason Evans 	if (malloc_init())
1678a4bd5210SJason Evans 		return (ALLOCM_ERR_OOM);
1679a4bd5210SJason Evans 
1680a4bd5210SJason Evans 	usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
1681a4bd5210SJason Evans 	if (usize == 0)
1682a4bd5210SJason Evans 		return (ALLOCM_ERR_OOM);
1683a4bd5210SJason Evans 
1684a4bd5210SJason Evans 	if (rsize != NULL)
1685a4bd5210SJason Evans 		*rsize = usize;
1686a4bd5210SJason Evans 	return (ALLOCM_SUCCESS);
1687a4bd5210SJason Evans }
1688a4bd5210SJason Evans 
1689a4bd5210SJason Evans #endif
1690a4bd5210SJason Evans /*
1691a4bd5210SJason Evans  * End experimental functions.
1692a4bd5210SJason Evans  */
1693a4bd5210SJason Evans /******************************************************************************/
1694a4bd5210SJason Evans /*
1695a4bd5210SJason Evans  * The following functions are used by threading libraries for protection of
1696a4bd5210SJason Evans  * malloc during fork().
1697a4bd5210SJason Evans  */
1698a4bd5210SJason Evans 
169982872ac0SJason Evans /*
170082872ac0SJason Evans  * If an application creates a thread before doing any allocation in the main
170182872ac0SJason Evans  * thread, then calls fork(2) in the main thread followed by memory allocation
170282872ac0SJason Evans  * in the child process, a race can occur that results in deadlock within the
170382872ac0SJason Evans  * child: the main thread may have forked while the created thread had
170482872ac0SJason Evans  * partially initialized the allocator.  Ordinarily jemalloc prevents
170582872ac0SJason Evans  * fork/malloc races via the following functions it registers during
170682872ac0SJason Evans  * initialization using pthread_atfork(), but of course that does no good if
170782872ac0SJason Evans  * the allocator isn't fully initialized at fork time.  The following library
170882872ac0SJason Evans  * constructor is a partial solution to this problem.  It may still possible to
170982872ac0SJason Evans  * trigger the deadlock described above, but doing so would involve forking via
171082872ac0SJason Evans  * a library constructor that runs before jemalloc's runs.
171182872ac0SJason Evans  */
171282872ac0SJason Evans JEMALLOC_ATTR(constructor)
171382872ac0SJason Evans static void
171482872ac0SJason Evans jemalloc_constructor(void)
171582872ac0SJason Evans {
171682872ac0SJason Evans 
171782872ac0SJason Evans 	malloc_init();
171882872ac0SJason Evans }
171982872ac0SJason Evans 
1720a4bd5210SJason Evans #ifndef JEMALLOC_MUTEX_INIT_CB
1721a4bd5210SJason Evans void
1722a4bd5210SJason Evans jemalloc_prefork(void)
1723a4bd5210SJason Evans #else
1724e722f8f8SJason Evans JEMALLOC_EXPORT void
1725a4bd5210SJason Evans _malloc_prefork(void)
1726a4bd5210SJason Evans #endif
1727a4bd5210SJason Evans {
1728a4bd5210SJason Evans 	unsigned i;
1729a4bd5210SJason Evans 
173035dad073SJason Evans #ifdef JEMALLOC_MUTEX_INIT_CB
173135dad073SJason Evans 	if (malloc_initialized == false)
173235dad073SJason Evans 		return;
173335dad073SJason Evans #endif
173435dad073SJason Evans 	assert(malloc_initialized);
173535dad073SJason Evans 
1736a4bd5210SJason Evans 	/* Acquire all mutexes in a safe order. */
173782872ac0SJason Evans 	ctl_prefork();
1738a4bd5210SJason Evans 	malloc_mutex_prefork(&arenas_lock);
173982872ac0SJason Evans 	for (i = 0; i < narenas_total; i++) {
1740a4bd5210SJason Evans 		if (arenas[i] != NULL)
1741a4bd5210SJason Evans 			arena_prefork(arenas[i]);
1742a4bd5210SJason Evans 	}
174382872ac0SJason Evans 	prof_prefork();
174482872ac0SJason Evans 	chunk_prefork();
1745a4bd5210SJason Evans 	base_prefork();
1746a4bd5210SJason Evans 	huge_prefork();
1747a4bd5210SJason Evans }
1748a4bd5210SJason Evans 
1749a4bd5210SJason Evans #ifndef JEMALLOC_MUTEX_INIT_CB
1750a4bd5210SJason Evans void
1751a4bd5210SJason Evans jemalloc_postfork_parent(void)
1752a4bd5210SJason Evans #else
1753e722f8f8SJason Evans JEMALLOC_EXPORT void
1754a4bd5210SJason Evans _malloc_postfork(void)
1755a4bd5210SJason Evans #endif
1756a4bd5210SJason Evans {
1757a4bd5210SJason Evans 	unsigned i;
1758a4bd5210SJason Evans 
175935dad073SJason Evans #ifdef JEMALLOC_MUTEX_INIT_CB
176035dad073SJason Evans 	if (malloc_initialized == false)
176135dad073SJason Evans 		return;
176235dad073SJason Evans #endif
176335dad073SJason Evans 	assert(malloc_initialized);
176435dad073SJason Evans 
1765a4bd5210SJason Evans 	/* Release all mutexes, now that fork() has completed. */
1766a4bd5210SJason Evans 	huge_postfork_parent();
1767a4bd5210SJason Evans 	base_postfork_parent();
176882872ac0SJason Evans 	chunk_postfork_parent();
176982872ac0SJason Evans 	prof_postfork_parent();
177082872ac0SJason Evans 	for (i = 0; i < narenas_total; i++) {
1771a4bd5210SJason Evans 		if (arenas[i] != NULL)
1772a4bd5210SJason Evans 			arena_postfork_parent(arenas[i]);
1773a4bd5210SJason Evans 	}
1774a4bd5210SJason Evans 	malloc_mutex_postfork_parent(&arenas_lock);
177582872ac0SJason Evans 	ctl_postfork_parent();
1776a4bd5210SJason Evans }
1777a4bd5210SJason Evans 
1778a4bd5210SJason Evans void
1779a4bd5210SJason Evans jemalloc_postfork_child(void)
1780a4bd5210SJason Evans {
1781a4bd5210SJason Evans 	unsigned i;
1782a4bd5210SJason Evans 
178335dad073SJason Evans 	assert(malloc_initialized);
178435dad073SJason Evans 
1785a4bd5210SJason Evans 	/* Release all mutexes, now that fork() has completed. */
1786a4bd5210SJason Evans 	huge_postfork_child();
1787a4bd5210SJason Evans 	base_postfork_child();
178882872ac0SJason Evans 	chunk_postfork_child();
178982872ac0SJason Evans 	prof_postfork_child();
179082872ac0SJason Evans 	for (i = 0; i < narenas_total; i++) {
1791a4bd5210SJason Evans 		if (arenas[i] != NULL)
1792a4bd5210SJason Evans 			arena_postfork_child(arenas[i]);
1793a4bd5210SJason Evans 	}
1794a4bd5210SJason Evans 	malloc_mutex_postfork_child(&arenas_lock);
179582872ac0SJason Evans 	ctl_postfork_child();
1796a4bd5210SJason Evans }
1797a4bd5210SJason Evans 
1798a4bd5210SJason Evans /******************************************************************************/
1799a4bd5210SJason Evans /*
1800a4bd5210SJason Evans  * The following functions are used for TLS allocation/deallocation in static
1801a4bd5210SJason Evans  * binaries on FreeBSD.  The primary difference between these and i[mcd]alloc()
1802a4bd5210SJason Evans  * is that these avoid accessing TLS variables.
1803a4bd5210SJason Evans  */
1804a4bd5210SJason Evans 
1805a4bd5210SJason Evans static void *
1806a4bd5210SJason Evans a0alloc(size_t size, bool zero)
1807a4bd5210SJason Evans {
1808a4bd5210SJason Evans 
1809a4bd5210SJason Evans 	if (malloc_init())
1810a4bd5210SJason Evans 		return (NULL);
1811a4bd5210SJason Evans 
1812a4bd5210SJason Evans 	if (size == 0)
1813a4bd5210SJason Evans 		size = 1;
1814a4bd5210SJason Evans 
1815a4bd5210SJason Evans 	if (size <= arena_maxclass)
1816a4bd5210SJason Evans 		return (arena_malloc(arenas[0], size, zero, false));
1817a4bd5210SJason Evans 	else
1818a4bd5210SJason Evans 		return (huge_malloc(size, zero));
1819a4bd5210SJason Evans }
1820a4bd5210SJason Evans 
1821a4bd5210SJason Evans void *
1822a4bd5210SJason Evans a0malloc(size_t size)
1823a4bd5210SJason Evans {
1824a4bd5210SJason Evans 
1825a4bd5210SJason Evans 	return (a0alloc(size, false));
1826a4bd5210SJason Evans }
1827a4bd5210SJason Evans 
1828a4bd5210SJason Evans void *
1829a4bd5210SJason Evans a0calloc(size_t num, size_t size)
1830a4bd5210SJason Evans {
1831a4bd5210SJason Evans 
1832a4bd5210SJason Evans 	return (a0alloc(num * size, true));
1833a4bd5210SJason Evans }
1834a4bd5210SJason Evans 
1835a4bd5210SJason Evans void
1836a4bd5210SJason Evans a0free(void *ptr)
1837a4bd5210SJason Evans {
1838a4bd5210SJason Evans 	arena_chunk_t *chunk;
1839a4bd5210SJason Evans 
1840a4bd5210SJason Evans 	if (ptr == NULL)
1841a4bd5210SJason Evans 		return;
1842a4bd5210SJason Evans 
1843a4bd5210SJason Evans 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1844a4bd5210SJason Evans 	if (chunk != ptr)
1845a4bd5210SJason Evans 		arena_dalloc(chunk->arena, chunk, ptr, false);
1846a4bd5210SJason Evans 	else
1847a4bd5210SJason Evans 		huge_dalloc(ptr, true);
1848a4bd5210SJason Evans }
1849a4bd5210SJason Evans 
1850a4bd5210SJason Evans /******************************************************************************/
1851