xref: /freebsd/contrib/jemalloc/src/jemalloc.c (revision 8495e8b1e9e13fb707296a486acf7538dbfa12b2)
1a4bd5210SJason Evans #define	JEMALLOC_C_
2a4bd5210SJason Evans #include "jemalloc/internal/jemalloc_internal.h"
3a4bd5210SJason Evans 
4a4bd5210SJason Evans /******************************************************************************/
5a4bd5210SJason Evans /* Data. */
6a4bd5210SJason Evans 
7a4bd5210SJason Evans malloc_tsd_data(, arenas, arena_t *, NULL)
8a4bd5210SJason Evans malloc_tsd_data(, thread_allocated, thread_allocated_t,
9a4bd5210SJason Evans     THREAD_ALLOCATED_INITIALIZER)
10a4bd5210SJason Evans 
114fdb8d2aSDimitry Andric /* Work around <http://llvm.org/bugs/show_bug.cgi?id=12623>: */
124fdb8d2aSDimitry Andric const char	*__malloc_options_1_0 = NULL;
13a4bd5210SJason Evans __sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0);
14a4bd5210SJason Evans 
15a4bd5210SJason Evans /* Runtime configuration options. */
16e722f8f8SJason Evans const char	*je_malloc_conf;
1788ad2f8dSJason Evans bool	opt_abort =
18a4bd5210SJason Evans #ifdef JEMALLOC_DEBUG
1988ad2f8dSJason Evans     true
20a4bd5210SJason Evans #else
2188ad2f8dSJason Evans     false
22a4bd5210SJason Evans #endif
2388ad2f8dSJason Evans     ;
2488ad2f8dSJason Evans bool	opt_junk =
2588ad2f8dSJason Evans #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
2688ad2f8dSJason Evans     true
27a4bd5210SJason Evans #else
2888ad2f8dSJason Evans     false
29a4bd5210SJason Evans #endif
3088ad2f8dSJason Evans     ;
31a4bd5210SJason Evans size_t	opt_quarantine = ZU(0);
32a4bd5210SJason Evans bool	opt_redzone = false;
33a4bd5210SJason Evans bool	opt_utrace = false;
34a4bd5210SJason Evans bool	opt_valgrind = false;
35a4bd5210SJason Evans bool	opt_xmalloc = false;
36a4bd5210SJason Evans bool	opt_zero = false;
37a4bd5210SJason Evans size_t	opt_narenas = 0;
38a4bd5210SJason Evans 
39a4bd5210SJason Evans unsigned	ncpus;
40a4bd5210SJason Evans 
41a4bd5210SJason Evans malloc_mutex_t		arenas_lock;
42a4bd5210SJason Evans arena_t			**arenas;
4382872ac0SJason Evans unsigned		narenas_total;
4482872ac0SJason Evans unsigned		narenas_auto;
45a4bd5210SJason Evans 
46a4bd5210SJason Evans /* Set to true once the allocator has been initialized. */
47a4bd5210SJason Evans static bool		malloc_initialized = false;
48a4bd5210SJason Evans 
49a4bd5210SJason Evans #ifdef JEMALLOC_THREADED_INIT
50a4bd5210SJason Evans /* Used to let the initializing thread recursively allocate. */
51a4bd5210SJason Evans #  define NO_INITIALIZER	((unsigned long)0)
52a4bd5210SJason Evans #  define INITIALIZER		pthread_self()
53a4bd5210SJason Evans #  define IS_INITIALIZER	(malloc_initializer == pthread_self())
54a4bd5210SJason Evans static pthread_t		malloc_initializer = NO_INITIALIZER;
55a4bd5210SJason Evans #else
56a4bd5210SJason Evans #  define NO_INITIALIZER	false
57a4bd5210SJason Evans #  define INITIALIZER		true
58a4bd5210SJason Evans #  define IS_INITIALIZER	malloc_initializer
59a4bd5210SJason Evans static bool			malloc_initializer = NO_INITIALIZER;
60a4bd5210SJason Evans #endif
61a4bd5210SJason Evans 
62a4bd5210SJason Evans /* Used to avoid initialization races. */
63e722f8f8SJason Evans #ifdef _WIN32
64e722f8f8SJason Evans static malloc_mutex_t	init_lock;
65e722f8f8SJason Evans 
66e722f8f8SJason Evans JEMALLOC_ATTR(constructor)
67e722f8f8SJason Evans static void WINAPI
68e722f8f8SJason Evans _init_init_lock(void)
69e722f8f8SJason Evans {
70e722f8f8SJason Evans 
71e722f8f8SJason Evans 	malloc_mutex_init(&init_lock);
72e722f8f8SJason Evans }
73e722f8f8SJason Evans 
74e722f8f8SJason Evans #ifdef _MSC_VER
75e722f8f8SJason Evans #  pragma section(".CRT$XCU", read)
76e722f8f8SJason Evans JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
77e722f8f8SJason Evans static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
78e722f8f8SJason Evans #endif
79e722f8f8SJason Evans 
80e722f8f8SJason Evans #else
81a4bd5210SJason Evans static malloc_mutex_t	init_lock = MALLOC_MUTEX_INITIALIZER;
82e722f8f8SJason Evans #endif
83a4bd5210SJason Evans 
84a4bd5210SJason Evans typedef struct {
85a4bd5210SJason Evans 	void	*p;	/* Input pointer (as in realloc(p, s)). */
86a4bd5210SJason Evans 	size_t	s;	/* Request size. */
87a4bd5210SJason Evans 	void	*r;	/* Result pointer. */
88a4bd5210SJason Evans } malloc_utrace_t;
89a4bd5210SJason Evans 
90a4bd5210SJason Evans #ifdef JEMALLOC_UTRACE
91a4bd5210SJason Evans #  define UTRACE(a, b, c) do {						\
92a4bd5210SJason Evans 	if (opt_utrace) {						\
9388ad2f8dSJason Evans 		int utrace_serrno = errno;				\
94a4bd5210SJason Evans 		malloc_utrace_t ut;					\
95a4bd5210SJason Evans 		ut.p = (a);						\
96a4bd5210SJason Evans 		ut.s = (b);						\
97a4bd5210SJason Evans 		ut.r = (c);						\
98a4bd5210SJason Evans 		utrace(&ut, sizeof(ut));				\
9988ad2f8dSJason Evans 		errno = utrace_serrno;					\
100a4bd5210SJason Evans 	}								\
101a4bd5210SJason Evans } while (0)
102a4bd5210SJason Evans #else
103a4bd5210SJason Evans #  define UTRACE(a, b, c)
104a4bd5210SJason Evans #endif
105a4bd5210SJason Evans 
106a4bd5210SJason Evans /******************************************************************************/
107f921d10fSJason Evans /*
108f921d10fSJason Evans  * Function prototypes for static functions that are referenced prior to
109f921d10fSJason Evans  * definition.
110f921d10fSJason Evans  */
111a4bd5210SJason Evans 
112a4bd5210SJason Evans static bool	malloc_init_hard(void);
113a4bd5210SJason Evans 
114a4bd5210SJason Evans /******************************************************************************/
115a4bd5210SJason Evans /*
116a4bd5210SJason Evans  * Begin miscellaneous support functions.
117a4bd5210SJason Evans  */
118a4bd5210SJason Evans 
119a4bd5210SJason Evans /* Create a new arena and insert it into the arenas array at index ind. */
120a4bd5210SJason Evans arena_t *
121a4bd5210SJason Evans arenas_extend(unsigned ind)
122a4bd5210SJason Evans {
123a4bd5210SJason Evans 	arena_t *ret;
124a4bd5210SJason Evans 
125a4bd5210SJason Evans 	ret = (arena_t *)base_alloc(sizeof(arena_t));
126a4bd5210SJason Evans 	if (ret != NULL && arena_new(ret, ind) == false) {
127a4bd5210SJason Evans 		arenas[ind] = ret;
128a4bd5210SJason Evans 		return (ret);
129a4bd5210SJason Evans 	}
130a4bd5210SJason Evans 	/* Only reached if there is an OOM error. */
131a4bd5210SJason Evans 
132a4bd5210SJason Evans 	/*
133a4bd5210SJason Evans 	 * OOM here is quite inconvenient to propagate, since dealing with it
134a4bd5210SJason Evans 	 * would require a check for failure in the fast path.  Instead, punt
135a4bd5210SJason Evans 	 * by using arenas[0].  In practice, this is an extremely unlikely
136a4bd5210SJason Evans 	 * failure.
137a4bd5210SJason Evans 	 */
138a4bd5210SJason Evans 	malloc_write("<jemalloc>: Error initializing arena\n");
139a4bd5210SJason Evans 	if (opt_abort)
140a4bd5210SJason Evans 		abort();
141a4bd5210SJason Evans 
142a4bd5210SJason Evans 	return (arenas[0]);
143a4bd5210SJason Evans }
144a4bd5210SJason Evans 
145a4bd5210SJason Evans /* Slow path, called only by choose_arena(). */
146a4bd5210SJason Evans arena_t *
147a4bd5210SJason Evans choose_arena_hard(void)
148a4bd5210SJason Evans {
149a4bd5210SJason Evans 	arena_t *ret;
150a4bd5210SJason Evans 
15182872ac0SJason Evans 	if (narenas_auto > 1) {
152a4bd5210SJason Evans 		unsigned i, choose, first_null;
153a4bd5210SJason Evans 
154a4bd5210SJason Evans 		choose = 0;
15582872ac0SJason Evans 		first_null = narenas_auto;
156a4bd5210SJason Evans 		malloc_mutex_lock(&arenas_lock);
157a4bd5210SJason Evans 		assert(arenas[0] != NULL);
15882872ac0SJason Evans 		for (i = 1; i < narenas_auto; i++) {
159a4bd5210SJason Evans 			if (arenas[i] != NULL) {
160a4bd5210SJason Evans 				/*
161a4bd5210SJason Evans 				 * Choose the first arena that has the lowest
162a4bd5210SJason Evans 				 * number of threads assigned to it.
163a4bd5210SJason Evans 				 */
164a4bd5210SJason Evans 				if (arenas[i]->nthreads <
165a4bd5210SJason Evans 				    arenas[choose]->nthreads)
166a4bd5210SJason Evans 					choose = i;
16782872ac0SJason Evans 			} else if (first_null == narenas_auto) {
168a4bd5210SJason Evans 				/*
169a4bd5210SJason Evans 				 * Record the index of the first uninitialized
170a4bd5210SJason Evans 				 * arena, in case all extant arenas are in use.
171a4bd5210SJason Evans 				 *
172a4bd5210SJason Evans 				 * NB: It is possible for there to be
173a4bd5210SJason Evans 				 * discontinuities in terms of initialized
174a4bd5210SJason Evans 				 * versus uninitialized arenas, due to the
175a4bd5210SJason Evans 				 * "thread.arena" mallctl.
176a4bd5210SJason Evans 				 */
177a4bd5210SJason Evans 				first_null = i;
178a4bd5210SJason Evans 			}
179a4bd5210SJason Evans 		}
180a4bd5210SJason Evans 
18182872ac0SJason Evans 		if (arenas[choose]->nthreads == 0
18282872ac0SJason Evans 		    || first_null == narenas_auto) {
183a4bd5210SJason Evans 			/*
184a4bd5210SJason Evans 			 * Use an unloaded arena, or the least loaded arena if
185a4bd5210SJason Evans 			 * all arenas are already initialized.
186a4bd5210SJason Evans 			 */
187a4bd5210SJason Evans 			ret = arenas[choose];
188a4bd5210SJason Evans 		} else {
189a4bd5210SJason Evans 			/* Initialize a new arena. */
190a4bd5210SJason Evans 			ret = arenas_extend(first_null);
191a4bd5210SJason Evans 		}
192a4bd5210SJason Evans 		ret->nthreads++;
193a4bd5210SJason Evans 		malloc_mutex_unlock(&arenas_lock);
194a4bd5210SJason Evans 	} else {
195a4bd5210SJason Evans 		ret = arenas[0];
196a4bd5210SJason Evans 		malloc_mutex_lock(&arenas_lock);
197a4bd5210SJason Evans 		ret->nthreads++;
198a4bd5210SJason Evans 		malloc_mutex_unlock(&arenas_lock);
199a4bd5210SJason Evans 	}
200a4bd5210SJason Evans 
201a4bd5210SJason Evans 	arenas_tsd_set(&ret);
202a4bd5210SJason Evans 
203a4bd5210SJason Evans 	return (ret);
204a4bd5210SJason Evans }
205a4bd5210SJason Evans 
206a4bd5210SJason Evans static void
207a4bd5210SJason Evans stats_print_atexit(void)
208a4bd5210SJason Evans {
209a4bd5210SJason Evans 
210a4bd5210SJason Evans 	if (config_tcache && config_stats) {
21182872ac0SJason Evans 		unsigned narenas, i;
212a4bd5210SJason Evans 
213a4bd5210SJason Evans 		/*
214a4bd5210SJason Evans 		 * Merge stats from extant threads.  This is racy, since
215a4bd5210SJason Evans 		 * individual threads do not lock when recording tcache stats
216a4bd5210SJason Evans 		 * events.  As a consequence, the final stats may be slightly
217a4bd5210SJason Evans 		 * out of date by the time they are reported, if other threads
218a4bd5210SJason Evans 		 * continue to allocate.
219a4bd5210SJason Evans 		 */
22082872ac0SJason Evans 		for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
221a4bd5210SJason Evans 			arena_t *arena = arenas[i];
222a4bd5210SJason Evans 			if (arena != NULL) {
223a4bd5210SJason Evans 				tcache_t *tcache;
224a4bd5210SJason Evans 
225a4bd5210SJason Evans 				/*
226a4bd5210SJason Evans 				 * tcache_stats_merge() locks bins, so if any
227a4bd5210SJason Evans 				 * code is introduced that acquires both arena
228a4bd5210SJason Evans 				 * and bin locks in the opposite order,
229a4bd5210SJason Evans 				 * deadlocks may result.
230a4bd5210SJason Evans 				 */
231a4bd5210SJason Evans 				malloc_mutex_lock(&arena->lock);
232a4bd5210SJason Evans 				ql_foreach(tcache, &arena->tcache_ql, link) {
233a4bd5210SJason Evans 					tcache_stats_merge(tcache, arena);
234a4bd5210SJason Evans 				}
235a4bd5210SJason Evans 				malloc_mutex_unlock(&arena->lock);
236a4bd5210SJason Evans 			}
237a4bd5210SJason Evans 		}
238a4bd5210SJason Evans 	}
239a4bd5210SJason Evans 	je_malloc_stats_print(NULL, NULL, NULL);
240a4bd5210SJason Evans }
241a4bd5210SJason Evans 
242a4bd5210SJason Evans /*
243a4bd5210SJason Evans  * End miscellaneous support functions.
244a4bd5210SJason Evans  */
245a4bd5210SJason Evans /******************************************************************************/
246a4bd5210SJason Evans /*
247a4bd5210SJason Evans  * Begin initialization functions.
248a4bd5210SJason Evans  */
249a4bd5210SJason Evans 
250a4bd5210SJason Evans static unsigned
251a4bd5210SJason Evans malloc_ncpus(void)
252a4bd5210SJason Evans {
253a4bd5210SJason Evans 	long result;
254a4bd5210SJason Evans 
255e722f8f8SJason Evans #ifdef _WIN32
256e722f8f8SJason Evans 	SYSTEM_INFO si;
257e722f8f8SJason Evans 	GetSystemInfo(&si);
258e722f8f8SJason Evans 	result = si.dwNumberOfProcessors;
259e722f8f8SJason Evans #else
260a4bd5210SJason Evans 	result = sysconf(_SC_NPROCESSORS_ONLN);
26182872ac0SJason Evans #endif
262f921d10fSJason Evans 	return ((result == -1) ? 1 : (unsigned)result);
263a4bd5210SJason Evans }
264a4bd5210SJason Evans 
265a4bd5210SJason Evans void
266a4bd5210SJason Evans arenas_cleanup(void *arg)
267a4bd5210SJason Evans {
268a4bd5210SJason Evans 	arena_t *arena = *(arena_t **)arg;
269a4bd5210SJason Evans 
270a4bd5210SJason Evans 	malloc_mutex_lock(&arenas_lock);
271a4bd5210SJason Evans 	arena->nthreads--;
272a4bd5210SJason Evans 	malloc_mutex_unlock(&arenas_lock);
273a4bd5210SJason Evans }
274a4bd5210SJason Evans 
2752b06b201SJason Evans JEMALLOC_ALWAYS_INLINE_C void
276f8ca2db1SJason Evans malloc_thread_init(void)
277f8ca2db1SJason Evans {
278f8ca2db1SJason Evans 
279f8ca2db1SJason Evans 	/*
280f8ca2db1SJason Evans 	 * TSD initialization can't be safely done as a side effect of
281f8ca2db1SJason Evans 	 * deallocation, because it is possible for a thread to do nothing but
282f8ca2db1SJason Evans 	 * deallocate its TLS data via free(), in which case writing to TLS
283f8ca2db1SJason Evans 	 * would cause write-after-free memory corruption.  The quarantine
284f8ca2db1SJason Evans 	 * facility *only* gets used as a side effect of deallocation, so make
285f8ca2db1SJason Evans 	 * a best effort attempt at initializing its TSD by hooking all
286f8ca2db1SJason Evans 	 * allocation events.
287f8ca2db1SJason Evans 	 */
288f8ca2db1SJason Evans 	if (config_fill && opt_quarantine)
289f8ca2db1SJason Evans 		quarantine_alloc_hook();
290f8ca2db1SJason Evans }
291f8ca2db1SJason Evans 
2922b06b201SJason Evans JEMALLOC_ALWAYS_INLINE_C bool
293a4bd5210SJason Evans malloc_init(void)
294a4bd5210SJason Evans {
295a4bd5210SJason Evans 
296f8ca2db1SJason Evans 	if (malloc_initialized == false && malloc_init_hard())
297f8ca2db1SJason Evans 		return (true);
298f8ca2db1SJason Evans 	malloc_thread_init();
299a4bd5210SJason Evans 
300a4bd5210SJason Evans 	return (false);
301a4bd5210SJason Evans }
302a4bd5210SJason Evans 
303a4bd5210SJason Evans static bool
304a4bd5210SJason Evans malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
305a4bd5210SJason Evans     char const **v_p, size_t *vlen_p)
306a4bd5210SJason Evans {
307a4bd5210SJason Evans 	bool accept;
308a4bd5210SJason Evans 	const char *opts = *opts_p;
309a4bd5210SJason Evans 
310a4bd5210SJason Evans 	*k_p = opts;
311a4bd5210SJason Evans 
312a4bd5210SJason Evans 	for (accept = false; accept == false;) {
313a4bd5210SJason Evans 		switch (*opts) {
314a4bd5210SJason Evans 		case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
315a4bd5210SJason Evans 		case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
316a4bd5210SJason Evans 		case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
317a4bd5210SJason Evans 		case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
318a4bd5210SJason Evans 		case 'Y': case 'Z':
319a4bd5210SJason Evans 		case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
320a4bd5210SJason Evans 		case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
321a4bd5210SJason Evans 		case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
322a4bd5210SJason Evans 		case 's': case 't': case 'u': case 'v': case 'w': case 'x':
323a4bd5210SJason Evans 		case 'y': case 'z':
324a4bd5210SJason Evans 		case '0': case '1': case '2': case '3': case '4': case '5':
325a4bd5210SJason Evans 		case '6': case '7': case '8': case '9':
326a4bd5210SJason Evans 		case '_':
327a4bd5210SJason Evans 			opts++;
328a4bd5210SJason Evans 			break;
329a4bd5210SJason Evans 		case ':':
330a4bd5210SJason Evans 			opts++;
331a4bd5210SJason Evans 			*klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
332a4bd5210SJason Evans 			*v_p = opts;
333a4bd5210SJason Evans 			accept = true;
334a4bd5210SJason Evans 			break;
335a4bd5210SJason Evans 		case '\0':
336a4bd5210SJason Evans 			if (opts != *opts_p) {
337a4bd5210SJason Evans 				malloc_write("<jemalloc>: Conf string ends "
338a4bd5210SJason Evans 				    "with key\n");
339a4bd5210SJason Evans 			}
340a4bd5210SJason Evans 			return (true);
341a4bd5210SJason Evans 		default:
342a4bd5210SJason Evans 			malloc_write("<jemalloc>: Malformed conf string\n");
343a4bd5210SJason Evans 			return (true);
344a4bd5210SJason Evans 		}
345a4bd5210SJason Evans 	}
346a4bd5210SJason Evans 
347a4bd5210SJason Evans 	for (accept = false; accept == false;) {
348a4bd5210SJason Evans 		switch (*opts) {
349a4bd5210SJason Evans 		case ',':
350a4bd5210SJason Evans 			opts++;
351a4bd5210SJason Evans 			/*
352a4bd5210SJason Evans 			 * Look ahead one character here, because the next time
353a4bd5210SJason Evans 			 * this function is called, it will assume that end of
354a4bd5210SJason Evans 			 * input has been cleanly reached if no input remains,
355a4bd5210SJason Evans 			 * but we have optimistically already consumed the
356a4bd5210SJason Evans 			 * comma if one exists.
357a4bd5210SJason Evans 			 */
358a4bd5210SJason Evans 			if (*opts == '\0') {
359a4bd5210SJason Evans 				malloc_write("<jemalloc>: Conf string ends "
360a4bd5210SJason Evans 				    "with comma\n");
361a4bd5210SJason Evans 			}
362a4bd5210SJason Evans 			*vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
363a4bd5210SJason Evans 			accept = true;
364a4bd5210SJason Evans 			break;
365a4bd5210SJason Evans 		case '\0':
366a4bd5210SJason Evans 			*vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
367a4bd5210SJason Evans 			accept = true;
368a4bd5210SJason Evans 			break;
369a4bd5210SJason Evans 		default:
370a4bd5210SJason Evans 			opts++;
371a4bd5210SJason Evans 			break;
372a4bd5210SJason Evans 		}
373a4bd5210SJason Evans 	}
374a4bd5210SJason Evans 
375a4bd5210SJason Evans 	*opts_p = opts;
376a4bd5210SJason Evans 	return (false);
377a4bd5210SJason Evans }
378a4bd5210SJason Evans 
379a4bd5210SJason Evans static void
380a4bd5210SJason Evans malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
381a4bd5210SJason Evans     size_t vlen)
382a4bd5210SJason Evans {
383a4bd5210SJason Evans 
384a4bd5210SJason Evans 	malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
385a4bd5210SJason Evans 	    (int)vlen, v);
386a4bd5210SJason Evans }
387a4bd5210SJason Evans 
388a4bd5210SJason Evans static void
389a4bd5210SJason Evans malloc_conf_init(void)
390a4bd5210SJason Evans {
391a4bd5210SJason Evans 	unsigned i;
392a4bd5210SJason Evans 	char buf[PATH_MAX + 1];
393a4bd5210SJason Evans 	const char *opts, *k, *v;
394a4bd5210SJason Evans 	size_t klen, vlen;
395a4bd5210SJason Evans 
39682872ac0SJason Evans 	/*
39782872ac0SJason Evans 	 * Automatically configure valgrind before processing options.  The
39882872ac0SJason Evans 	 * valgrind option remains in jemalloc 3.x for compatibility reasons.
39982872ac0SJason Evans 	 */
40082872ac0SJason Evans 	if (config_valgrind) {
40182872ac0SJason Evans 		opt_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false;
40282872ac0SJason Evans 		if (config_fill && opt_valgrind) {
40382872ac0SJason Evans 			opt_junk = false;
40482872ac0SJason Evans 			assert(opt_zero == false);
40582872ac0SJason Evans 			opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT;
40682872ac0SJason Evans 			opt_redzone = true;
40782872ac0SJason Evans 		}
40882872ac0SJason Evans 		if (config_tcache && opt_valgrind)
40982872ac0SJason Evans 			opt_tcache = false;
41082872ac0SJason Evans 	}
41182872ac0SJason Evans 
412a4bd5210SJason Evans 	for (i = 0; i < 3; i++) {
413a4bd5210SJason Evans 		/* Get runtime configuration. */
414a4bd5210SJason Evans 		switch (i) {
415a4bd5210SJason Evans 		case 0:
416a4bd5210SJason Evans 			if (je_malloc_conf != NULL) {
417a4bd5210SJason Evans 				/*
418a4bd5210SJason Evans 				 * Use options that were compiled into the
419a4bd5210SJason Evans 				 * program.
420a4bd5210SJason Evans 				 */
421a4bd5210SJason Evans 				opts = je_malloc_conf;
422a4bd5210SJason Evans 			} else {
423a4bd5210SJason Evans 				/* No configuration specified. */
424a4bd5210SJason Evans 				buf[0] = '\0';
425a4bd5210SJason Evans 				opts = buf;
426a4bd5210SJason Evans 			}
427a4bd5210SJason Evans 			break;
428a4bd5210SJason Evans 		case 1: {
4292b06b201SJason Evans 			int linklen = 0;
430e722f8f8SJason Evans #ifndef _WIN32
4312b06b201SJason Evans 			int saved_errno = errno;
432a4bd5210SJason Evans 			const char *linkname =
433a4bd5210SJason Evans #  ifdef JEMALLOC_PREFIX
434a4bd5210SJason Evans 			    "/etc/"JEMALLOC_PREFIX"malloc.conf"
435a4bd5210SJason Evans #  else
436a4bd5210SJason Evans 			    "/etc/malloc.conf"
437a4bd5210SJason Evans #  endif
438a4bd5210SJason Evans 			    ;
439a4bd5210SJason Evans 
440a4bd5210SJason Evans 			/*
4412b06b201SJason Evans 			 * Try to use the contents of the "/etc/malloc.conf"
442a4bd5210SJason Evans 			 * symbolic link's name.
443a4bd5210SJason Evans 			 */
4442b06b201SJason Evans 			linklen = readlink(linkname, buf, sizeof(buf) - 1);
4452b06b201SJason Evans 			if (linklen == -1) {
4462b06b201SJason Evans 				/* No configuration specified. */
4472b06b201SJason Evans 				linklen = 0;
4482b06b201SJason Evans 				/* restore errno */
4492b06b201SJason Evans 				set_errno(saved_errno);
4502b06b201SJason Evans 			}
4512b06b201SJason Evans #endif
452a4bd5210SJason Evans 			buf[linklen] = '\0';
453a4bd5210SJason Evans 			opts = buf;
454a4bd5210SJason Evans 			break;
455a4bd5210SJason Evans 		} case 2: {
456a4bd5210SJason Evans 			const char *envname =
457a4bd5210SJason Evans #ifdef JEMALLOC_PREFIX
458a4bd5210SJason Evans 			    JEMALLOC_CPREFIX"MALLOC_CONF"
459a4bd5210SJason Evans #else
460a4bd5210SJason Evans 			    "MALLOC_CONF"
461a4bd5210SJason Evans #endif
462a4bd5210SJason Evans 			    ;
463a4bd5210SJason Evans 
464a4bd5210SJason Evans 			if (issetugid() == 0 && (opts = getenv(envname)) !=
465a4bd5210SJason Evans 			    NULL) {
466a4bd5210SJason Evans 				/*
467a4bd5210SJason Evans 				 * Do nothing; opts is already initialized to
468a4bd5210SJason Evans 				 * the value of the MALLOC_CONF environment
469a4bd5210SJason Evans 				 * variable.
470a4bd5210SJason Evans 				 */
471a4bd5210SJason Evans 			} else {
472a4bd5210SJason Evans 				/* No configuration specified. */
473a4bd5210SJason Evans 				buf[0] = '\0';
474a4bd5210SJason Evans 				opts = buf;
475a4bd5210SJason Evans 			}
476a4bd5210SJason Evans 			break;
477a4bd5210SJason Evans 		} default:
478f921d10fSJason Evans 			not_reached();
479a4bd5210SJason Evans 			buf[0] = '\0';
480a4bd5210SJason Evans 			opts = buf;
481a4bd5210SJason Evans 		}
482a4bd5210SJason Evans 
483a4bd5210SJason Evans 		while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v,
484a4bd5210SJason Evans 		    &vlen) == false) {
48588ad2f8dSJason Evans #define	CONF_HANDLE_BOOL(o, n)						\
4868ed34ab0SJason Evans 			if (sizeof(n)-1 == klen && strncmp(n, k,	\
487a4bd5210SJason Evans 			    klen) == 0) {				\
488a4bd5210SJason Evans 				if (strncmp("true", v, vlen) == 0 &&	\
489a4bd5210SJason Evans 				    vlen == sizeof("true")-1)		\
490a4bd5210SJason Evans 					o = true;			\
491a4bd5210SJason Evans 				else if (strncmp("false", v, vlen) ==	\
492a4bd5210SJason Evans 				    0 && vlen == sizeof("false")-1)	\
493a4bd5210SJason Evans 					o = false;			\
494a4bd5210SJason Evans 				else {					\
495a4bd5210SJason Evans 					malloc_conf_error(		\
496a4bd5210SJason Evans 					    "Invalid conf value",	\
497a4bd5210SJason Evans 					    k, klen, v, vlen);		\
498a4bd5210SJason Evans 				}					\
499a4bd5210SJason Evans 				continue;				\
500a4bd5210SJason Evans 			}
50188ad2f8dSJason Evans #define	CONF_HANDLE_SIZE_T(o, n, min, max, clip)			\
5028ed34ab0SJason Evans 			if (sizeof(n)-1 == klen && strncmp(n, k,	\
503a4bd5210SJason Evans 			    klen) == 0) {				\
504a4bd5210SJason Evans 				uintmax_t um;				\
505a4bd5210SJason Evans 				char *end;				\
506a4bd5210SJason Evans 									\
507e722f8f8SJason Evans 				set_errno(0);				\
508a4bd5210SJason Evans 				um = malloc_strtoumax(v, &end, 0);	\
509e722f8f8SJason Evans 				if (get_errno() != 0 || (uintptr_t)end -\
510a4bd5210SJason Evans 				    (uintptr_t)v != vlen) {		\
511a4bd5210SJason Evans 					malloc_conf_error(		\
512a4bd5210SJason Evans 					    "Invalid conf value",	\
513a4bd5210SJason Evans 					    k, klen, v, vlen);		\
51488ad2f8dSJason Evans 				} else if (clip) {			\
515f921d10fSJason Evans 					if (min != 0 && um < min)	\
51688ad2f8dSJason Evans 						o = min;		\
51788ad2f8dSJason Evans 					else if (um > max)		\
51888ad2f8dSJason Evans 						o = max;		\
51988ad2f8dSJason Evans 					else				\
52088ad2f8dSJason Evans 						o = um;			\
52188ad2f8dSJason Evans 				} else {				\
522f921d10fSJason Evans 					if ((min != 0 && um < min) ||	\
523f921d10fSJason Evans 					    um > max) {			\
524a4bd5210SJason Evans 						malloc_conf_error(	\
52588ad2f8dSJason Evans 						    "Out-of-range "	\
52688ad2f8dSJason Evans 						    "conf value",	\
527a4bd5210SJason Evans 						    k, klen, v, vlen);	\
528a4bd5210SJason Evans 					} else				\
529a4bd5210SJason Evans 						o = um;			\
53088ad2f8dSJason Evans 				}					\
531a4bd5210SJason Evans 				continue;				\
532a4bd5210SJason Evans 			}
533a4bd5210SJason Evans #define	CONF_HANDLE_SSIZE_T(o, n, min, max)				\
5348ed34ab0SJason Evans 			if (sizeof(n)-1 == klen && strncmp(n, k,	\
535a4bd5210SJason Evans 			    klen) == 0) {				\
536a4bd5210SJason Evans 				long l;					\
537a4bd5210SJason Evans 				char *end;				\
538a4bd5210SJason Evans 									\
539e722f8f8SJason Evans 				set_errno(0);				\
540a4bd5210SJason Evans 				l = strtol(v, &end, 0);			\
541e722f8f8SJason Evans 				if (get_errno() != 0 || (uintptr_t)end -\
542a4bd5210SJason Evans 				    (uintptr_t)v != vlen) {		\
543a4bd5210SJason Evans 					malloc_conf_error(		\
544a4bd5210SJason Evans 					    "Invalid conf value",	\
545a4bd5210SJason Evans 					    k, klen, v, vlen);		\
546a4bd5210SJason Evans 				} else if (l < (ssize_t)min || l >	\
547a4bd5210SJason Evans 				    (ssize_t)max) {			\
548a4bd5210SJason Evans 					malloc_conf_error(		\
549a4bd5210SJason Evans 					    "Out-of-range conf value",	\
550a4bd5210SJason Evans 					    k, klen, v, vlen);		\
551a4bd5210SJason Evans 				} else					\
552a4bd5210SJason Evans 					o = l;				\
553a4bd5210SJason Evans 				continue;				\
554a4bd5210SJason Evans 			}
555a4bd5210SJason Evans #define	CONF_HANDLE_CHAR_P(o, n, d)					\
5568ed34ab0SJason Evans 			if (sizeof(n)-1 == klen && strncmp(n, k,	\
557a4bd5210SJason Evans 			    klen) == 0) {				\
558a4bd5210SJason Evans 				size_t cpylen = (vlen <=		\
559a4bd5210SJason Evans 				    sizeof(o)-1) ? vlen :		\
560a4bd5210SJason Evans 				    sizeof(o)-1;			\
561a4bd5210SJason Evans 				strncpy(o, v, cpylen);			\
562a4bd5210SJason Evans 				o[cpylen] = '\0';			\
563a4bd5210SJason Evans 				continue;				\
564a4bd5210SJason Evans 			}
565a4bd5210SJason Evans 
5668ed34ab0SJason Evans 			CONF_HANDLE_BOOL(opt_abort, "abort")
567a4bd5210SJason Evans 			/*
568a4bd5210SJason Evans 			 * Chunks always require at least one header page, plus
569a4bd5210SJason Evans 			 * one data page in the absence of redzones, or three
570a4bd5210SJason Evans 			 * pages in the presence of redzones.  In order to
571a4bd5210SJason Evans 			 * simplify options processing, fix the limit based on
572a4bd5210SJason Evans 			 * config_fill.
573a4bd5210SJason Evans 			 */
5748ed34ab0SJason Evans 			CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE +
57588ad2f8dSJason Evans 			    (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1,
57688ad2f8dSJason Evans 			    true)
57782872ac0SJason Evans 			if (strncmp("dss", k, klen) == 0) {
57882872ac0SJason Evans 				int i;
57982872ac0SJason Evans 				bool match = false;
58082872ac0SJason Evans 				for (i = 0; i < dss_prec_limit; i++) {
58182872ac0SJason Evans 					if (strncmp(dss_prec_names[i], v, vlen)
58282872ac0SJason Evans 					    == 0) {
58382872ac0SJason Evans 						if (chunk_dss_prec_set(i)) {
58482872ac0SJason Evans 							malloc_conf_error(
58582872ac0SJason Evans 							    "Error setting dss",
58682872ac0SJason Evans 							    k, klen, v, vlen);
58782872ac0SJason Evans 						} else {
58882872ac0SJason Evans 							opt_dss =
58982872ac0SJason Evans 							    dss_prec_names[i];
59082872ac0SJason Evans 							match = true;
59182872ac0SJason Evans 							break;
59282872ac0SJason Evans 						}
59382872ac0SJason Evans 					}
59482872ac0SJason Evans 				}
59582872ac0SJason Evans 				if (match == false) {
59682872ac0SJason Evans 					malloc_conf_error("Invalid conf value",
59782872ac0SJason Evans 					    k, klen, v, vlen);
59882872ac0SJason Evans 				}
59982872ac0SJason Evans 				continue;
60082872ac0SJason Evans 			}
6018ed34ab0SJason Evans 			CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1,
60288ad2f8dSJason Evans 			    SIZE_T_MAX, false)
6038ed34ab0SJason Evans 			CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult",
604a4bd5210SJason Evans 			    -1, (sizeof(size_t) << 3) - 1)
6058ed34ab0SJason Evans 			CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
606a4bd5210SJason Evans 			if (config_fill) {
6078ed34ab0SJason Evans 				CONF_HANDLE_BOOL(opt_junk, "junk")
6088ed34ab0SJason Evans 				CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine",
60988ad2f8dSJason Evans 				    0, SIZE_T_MAX, false)
6108ed34ab0SJason Evans 				CONF_HANDLE_BOOL(opt_redzone, "redzone")
6118ed34ab0SJason Evans 				CONF_HANDLE_BOOL(opt_zero, "zero")
612a4bd5210SJason Evans 			}
613a4bd5210SJason Evans 			if (config_utrace) {
6148ed34ab0SJason Evans 				CONF_HANDLE_BOOL(opt_utrace, "utrace")
615a4bd5210SJason Evans 			}
616a4bd5210SJason Evans 			if (config_valgrind) {
61782872ac0SJason Evans 				CONF_HANDLE_BOOL(opt_valgrind, "valgrind")
618a4bd5210SJason Evans 			}
619a4bd5210SJason Evans 			if (config_xmalloc) {
6208ed34ab0SJason Evans 				CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
621a4bd5210SJason Evans 			}
622a4bd5210SJason Evans 			if (config_tcache) {
6238ed34ab0SJason Evans 				CONF_HANDLE_BOOL(opt_tcache, "tcache")
624a4bd5210SJason Evans 				CONF_HANDLE_SSIZE_T(opt_lg_tcache_max,
6258ed34ab0SJason Evans 				    "lg_tcache_max", -1,
626a4bd5210SJason Evans 				    (sizeof(size_t) << 3) - 1)
627a4bd5210SJason Evans 			}
628a4bd5210SJason Evans 			if (config_prof) {
6298ed34ab0SJason Evans 				CONF_HANDLE_BOOL(opt_prof, "prof")
6308ed34ab0SJason Evans 				CONF_HANDLE_CHAR_P(opt_prof_prefix,
6318ed34ab0SJason Evans 				    "prof_prefix", "jeprof")
6328ed34ab0SJason Evans 				CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
633a4bd5210SJason Evans 				CONF_HANDLE_SSIZE_T(opt_lg_prof_sample,
6348ed34ab0SJason Evans 				    "lg_prof_sample", 0,
635a4bd5210SJason Evans 				    (sizeof(uint64_t) << 3) - 1)
6368ed34ab0SJason Evans 				CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
637a4bd5210SJason Evans 				CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
6388ed34ab0SJason Evans 				    "lg_prof_interval", -1,
639a4bd5210SJason Evans 				    (sizeof(uint64_t) << 3) - 1)
6408ed34ab0SJason Evans 				CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
6418ed34ab0SJason Evans 				CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
6428ed34ab0SJason Evans 				CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
643a4bd5210SJason Evans 			}
644a4bd5210SJason Evans 			malloc_conf_error("Invalid conf pair", k, klen, v,
645a4bd5210SJason Evans 			    vlen);
646a4bd5210SJason Evans #undef CONF_HANDLE_BOOL
647a4bd5210SJason Evans #undef CONF_HANDLE_SIZE_T
648a4bd5210SJason Evans #undef CONF_HANDLE_SSIZE_T
649a4bd5210SJason Evans #undef CONF_HANDLE_CHAR_P
650a4bd5210SJason Evans 		}
651a4bd5210SJason Evans 	}
652a4bd5210SJason Evans }
653a4bd5210SJason Evans 
654a4bd5210SJason Evans static bool
655a4bd5210SJason Evans malloc_init_hard(void)
656a4bd5210SJason Evans {
657a4bd5210SJason Evans 	arena_t *init_arenas[1];
658a4bd5210SJason Evans 
659a4bd5210SJason Evans 	malloc_mutex_lock(&init_lock);
660a4bd5210SJason Evans 	if (malloc_initialized || IS_INITIALIZER) {
661a4bd5210SJason Evans 		/*
662a4bd5210SJason Evans 		 * Another thread initialized the allocator before this one
663a4bd5210SJason Evans 		 * acquired init_lock, or this thread is the initializing
664a4bd5210SJason Evans 		 * thread, and it is recursively allocating.
665a4bd5210SJason Evans 		 */
666a4bd5210SJason Evans 		malloc_mutex_unlock(&init_lock);
667a4bd5210SJason Evans 		return (false);
668a4bd5210SJason Evans 	}
669a4bd5210SJason Evans #ifdef JEMALLOC_THREADED_INIT
670a4bd5210SJason Evans 	if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) {
671a4bd5210SJason Evans 		/* Busy-wait until the initializing thread completes. */
672a4bd5210SJason Evans 		do {
673a4bd5210SJason Evans 			malloc_mutex_unlock(&init_lock);
674a4bd5210SJason Evans 			CPU_SPINWAIT;
675a4bd5210SJason Evans 			malloc_mutex_lock(&init_lock);
676a4bd5210SJason Evans 		} while (malloc_initialized == false);
677a4bd5210SJason Evans 		malloc_mutex_unlock(&init_lock);
678a4bd5210SJason Evans 		return (false);
679a4bd5210SJason Evans 	}
680a4bd5210SJason Evans #endif
681a4bd5210SJason Evans 	malloc_initializer = INITIALIZER;
682a4bd5210SJason Evans 
683a4bd5210SJason Evans 	malloc_tsd_boot();
684a4bd5210SJason Evans 	if (config_prof)
685a4bd5210SJason Evans 		prof_boot0();
686a4bd5210SJason Evans 
687a4bd5210SJason Evans 	malloc_conf_init();
688a4bd5210SJason Evans 
689a4bd5210SJason Evans 	if (opt_stats_print) {
690a4bd5210SJason Evans 		/* Print statistics at exit. */
691a4bd5210SJason Evans 		if (atexit(stats_print_atexit) != 0) {
692a4bd5210SJason Evans 			malloc_write("<jemalloc>: Error in atexit()\n");
693a4bd5210SJason Evans 			if (opt_abort)
694a4bd5210SJason Evans 				abort();
695a4bd5210SJason Evans 		}
696a4bd5210SJason Evans 	}
697a4bd5210SJason Evans 
698a4bd5210SJason Evans 	if (base_boot()) {
699a4bd5210SJason Evans 		malloc_mutex_unlock(&init_lock);
700a4bd5210SJason Evans 		return (true);
701a4bd5210SJason Evans 	}
702a4bd5210SJason Evans 
7034bcb1430SJason Evans 	if (chunk_boot()) {
704a4bd5210SJason Evans 		malloc_mutex_unlock(&init_lock);
705a4bd5210SJason Evans 		return (true);
706a4bd5210SJason Evans 	}
707a4bd5210SJason Evans 
708a4bd5210SJason Evans 	if (ctl_boot()) {
709a4bd5210SJason Evans 		malloc_mutex_unlock(&init_lock);
710a4bd5210SJason Evans 		return (true);
711a4bd5210SJason Evans 	}
712a4bd5210SJason Evans 
713a4bd5210SJason Evans 	if (config_prof)
714a4bd5210SJason Evans 		prof_boot1();
715a4bd5210SJason Evans 
716a4bd5210SJason Evans 	arena_boot();
717a4bd5210SJason Evans 
718a4bd5210SJason Evans 	if (config_tcache && tcache_boot0()) {
719a4bd5210SJason Evans 		malloc_mutex_unlock(&init_lock);
720a4bd5210SJason Evans 		return (true);
721a4bd5210SJason Evans 	}
722a4bd5210SJason Evans 
723a4bd5210SJason Evans 	if (huge_boot()) {
724a4bd5210SJason Evans 		malloc_mutex_unlock(&init_lock);
725a4bd5210SJason Evans 		return (true);
726a4bd5210SJason Evans 	}
727a4bd5210SJason Evans 
728f921d10fSJason Evans 	if (malloc_mutex_init(&arenas_lock)) {
729f921d10fSJason Evans 		malloc_mutex_unlock(&init_lock);
730a4bd5210SJason Evans 		return (true);
731f921d10fSJason Evans 	}
732a4bd5210SJason Evans 
733a4bd5210SJason Evans 	/*
734a4bd5210SJason Evans 	 * Create enough scaffolding to allow recursive allocation in
735a4bd5210SJason Evans 	 * malloc_ncpus().
736a4bd5210SJason Evans 	 */
73782872ac0SJason Evans 	narenas_total = narenas_auto = 1;
738a4bd5210SJason Evans 	arenas = init_arenas;
73982872ac0SJason Evans 	memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
740a4bd5210SJason Evans 
741a4bd5210SJason Evans 	/*
742a4bd5210SJason Evans 	 * Initialize one arena here.  The rest are lazily created in
743a4bd5210SJason Evans 	 * choose_arena_hard().
744a4bd5210SJason Evans 	 */
745a4bd5210SJason Evans 	arenas_extend(0);
746a4bd5210SJason Evans 	if (arenas[0] == NULL) {
747a4bd5210SJason Evans 		malloc_mutex_unlock(&init_lock);
748a4bd5210SJason Evans 		return (true);
749a4bd5210SJason Evans 	}
750a4bd5210SJason Evans 
751a4bd5210SJason Evans 	/* Initialize allocation counters before any allocations can occur. */
752a4bd5210SJason Evans 	if (config_stats && thread_allocated_tsd_boot()) {
753a4bd5210SJason Evans 		malloc_mutex_unlock(&init_lock);
754a4bd5210SJason Evans 		return (true);
755a4bd5210SJason Evans 	}
756a4bd5210SJason Evans 
757a4bd5210SJason Evans 	if (arenas_tsd_boot()) {
758a4bd5210SJason Evans 		malloc_mutex_unlock(&init_lock);
759a4bd5210SJason Evans 		return (true);
760a4bd5210SJason Evans 	}
761a4bd5210SJason Evans 
762a4bd5210SJason Evans 	if (config_tcache && tcache_boot1()) {
763a4bd5210SJason Evans 		malloc_mutex_unlock(&init_lock);
764a4bd5210SJason Evans 		return (true);
765a4bd5210SJason Evans 	}
766a4bd5210SJason Evans 
767a4bd5210SJason Evans 	if (config_fill && quarantine_boot()) {
768a4bd5210SJason Evans 		malloc_mutex_unlock(&init_lock);
769a4bd5210SJason Evans 		return (true);
770a4bd5210SJason Evans 	}
771a4bd5210SJason Evans 
772a4bd5210SJason Evans 	if (config_prof && prof_boot2()) {
773a4bd5210SJason Evans 		malloc_mutex_unlock(&init_lock);
774a4bd5210SJason Evans 		return (true);
775a4bd5210SJason Evans 	}
776a4bd5210SJason Evans 
777a4bd5210SJason Evans 	malloc_mutex_unlock(&init_lock);
778f921d10fSJason Evans 	/**********************************************************************/
779f921d10fSJason Evans 	/* Recursive allocation may follow. */
780f921d10fSJason Evans 
781a4bd5210SJason Evans 	ncpus = malloc_ncpus();
782f921d10fSJason Evans 
783f921d10fSJason Evans #if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \
784f921d10fSJason Evans     && !defined(_WIN32))
785f921d10fSJason Evans 	/* LinuxThreads's pthread_atfork() allocates. */
786f921d10fSJason Evans 	if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
787f921d10fSJason Evans 	    jemalloc_postfork_child) != 0) {
788f921d10fSJason Evans 		malloc_write("<jemalloc>: Error in pthread_atfork()\n");
789f921d10fSJason Evans 		if (opt_abort)
790f921d10fSJason Evans 			abort();
791f921d10fSJason Evans 	}
792f921d10fSJason Evans #endif
793f921d10fSJason Evans 
794f921d10fSJason Evans 	/* Done recursively allocating. */
795f921d10fSJason Evans 	/**********************************************************************/
796a4bd5210SJason Evans 	malloc_mutex_lock(&init_lock);
797a4bd5210SJason Evans 
798a4bd5210SJason Evans 	if (mutex_boot()) {
799a4bd5210SJason Evans 		malloc_mutex_unlock(&init_lock);
800a4bd5210SJason Evans 		return (true);
801a4bd5210SJason Evans 	}
802a4bd5210SJason Evans 
803a4bd5210SJason Evans 	if (opt_narenas == 0) {
804a4bd5210SJason Evans 		/*
805a4bd5210SJason Evans 		 * For SMP systems, create more than one arena per CPU by
806a4bd5210SJason Evans 		 * default.
807a4bd5210SJason Evans 		 */
808a4bd5210SJason Evans 		if (ncpus > 1)
809a4bd5210SJason Evans 			opt_narenas = ncpus << 2;
810a4bd5210SJason Evans 		else
811a4bd5210SJason Evans 			opt_narenas = 1;
812a4bd5210SJason Evans 	}
81382872ac0SJason Evans 	narenas_auto = opt_narenas;
814a4bd5210SJason Evans 	/*
815a4bd5210SJason Evans 	 * Make sure that the arenas array can be allocated.  In practice, this
816a4bd5210SJason Evans 	 * limit is enough to allow the allocator to function, but the ctl
817a4bd5210SJason Evans 	 * machinery will fail to allocate memory at far lower limits.
818a4bd5210SJason Evans 	 */
81982872ac0SJason Evans 	if (narenas_auto > chunksize / sizeof(arena_t *)) {
82082872ac0SJason Evans 		narenas_auto = chunksize / sizeof(arena_t *);
821a4bd5210SJason Evans 		malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
82282872ac0SJason Evans 		    narenas_auto);
823a4bd5210SJason Evans 	}
82482872ac0SJason Evans 	narenas_total = narenas_auto;
825a4bd5210SJason Evans 
826a4bd5210SJason Evans 	/* Allocate and initialize arenas. */
82782872ac0SJason Evans 	arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total);
828a4bd5210SJason Evans 	if (arenas == NULL) {
829a4bd5210SJason Evans 		malloc_mutex_unlock(&init_lock);
830a4bd5210SJason Evans 		return (true);
831a4bd5210SJason Evans 	}
832a4bd5210SJason Evans 	/*
833a4bd5210SJason Evans 	 * Zero the array.  In practice, this should always be pre-zeroed,
834a4bd5210SJason Evans 	 * since it was just mmap()ed, but let's be sure.
835a4bd5210SJason Evans 	 */
83682872ac0SJason Evans 	memset(arenas, 0, sizeof(arena_t *) * narenas_total);
837a4bd5210SJason Evans 	/* Copy the pointer to the one arena that was already initialized. */
838a4bd5210SJason Evans 	arenas[0] = init_arenas[0];
839a4bd5210SJason Evans 
840a4bd5210SJason Evans 	malloc_initialized = true;
841a4bd5210SJason Evans 	malloc_mutex_unlock(&init_lock);
842f921d10fSJason Evans 
843a4bd5210SJason Evans 	return (false);
844a4bd5210SJason Evans }
845a4bd5210SJason Evans 
846a4bd5210SJason Evans /*
847a4bd5210SJason Evans  * End initialization functions.
848a4bd5210SJason Evans  */
849a4bd5210SJason Evans /******************************************************************************/
850a4bd5210SJason Evans /*
851a4bd5210SJason Evans  * Begin malloc(3)-compatible functions.
852a4bd5210SJason Evans  */
853a4bd5210SJason Evans 
854f921d10fSJason Evans static void *
855f921d10fSJason Evans imalloc_prof_sample(size_t usize, prof_thr_cnt_t *cnt)
856f921d10fSJason Evans {
857f921d10fSJason Evans 	void *p;
858f921d10fSJason Evans 
859f921d10fSJason Evans 	if (cnt == NULL)
860f921d10fSJason Evans 		return (NULL);
861f921d10fSJason Evans 	if (prof_promote && usize <= SMALL_MAXCLASS) {
862f921d10fSJason Evans 		p = imalloc(SMALL_MAXCLASS+1);
863f921d10fSJason Evans 		if (p == NULL)
864f921d10fSJason Evans 			return (NULL);
865f921d10fSJason Evans 		arena_prof_promoted(p, usize);
866f921d10fSJason Evans 	} else
867f921d10fSJason Evans 		p = imalloc(usize);
868f921d10fSJason Evans 
869f921d10fSJason Evans 	return (p);
870f921d10fSJason Evans }
871f921d10fSJason Evans 
872f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C void *
873f921d10fSJason Evans imalloc_prof(size_t usize, prof_thr_cnt_t *cnt)
874f921d10fSJason Evans {
875f921d10fSJason Evans 	void *p;
876f921d10fSJason Evans 
877f921d10fSJason Evans 	if ((uintptr_t)cnt != (uintptr_t)1U)
878f921d10fSJason Evans 		p = imalloc_prof_sample(usize, cnt);
879f921d10fSJason Evans 	else
880f921d10fSJason Evans 		p = imalloc(usize);
881f921d10fSJason Evans 	if (p == NULL)
882f921d10fSJason Evans 		return (NULL);
883f921d10fSJason Evans 	prof_malloc(p, usize, cnt);
884f921d10fSJason Evans 
885f921d10fSJason Evans 	return (p);
886f921d10fSJason Evans }
887f921d10fSJason Evans 
888f921d10fSJason Evans /*
889f921d10fSJason Evans  * MALLOC_BODY() is a macro rather than a function because its contents are in
890f921d10fSJason Evans  * the fast path, but inlining would cause reliability issues when determining
891f921d10fSJason Evans  * how many frames to discard from heap profiling backtraces.
892f921d10fSJason Evans  */
893f921d10fSJason Evans #define	MALLOC_BODY(ret, size, usize) do {				\
894f921d10fSJason Evans 	if (malloc_init())						\
895f921d10fSJason Evans 		ret = NULL;						\
896f921d10fSJason Evans 	else {								\
897f921d10fSJason Evans 		if (config_prof && opt_prof) {				\
898f921d10fSJason Evans 			prof_thr_cnt_t *cnt;				\
899f921d10fSJason Evans 									\
900f921d10fSJason Evans 			usize = s2u(size);				\
901f921d10fSJason Evans 			/*						\
902f921d10fSJason Evans 			 * Call PROF_ALLOC_PREP() here rather than in	\
903f921d10fSJason Evans 			 * imalloc_prof() so that imalloc_prof() can be	\
904f921d10fSJason Evans 			 * inlined without introducing uncertainty	\
905f921d10fSJason Evans 			 * about the number of backtrace frames to	\
906f921d10fSJason Evans 			 * ignore.  imalloc_prof() is in the fast path	\
907f921d10fSJason Evans 			 * when heap profiling is enabled, so inlining	\
908f921d10fSJason Evans 			 * is critical to performance.  (For		\
909f921d10fSJason Evans 			 * consistency all callers of PROF_ALLOC_PREP()	\
910f921d10fSJason Evans 			 * are structured similarly, even though e.g.	\
911f921d10fSJason Evans 			 * realloc() isn't called enough for inlining	\
912f921d10fSJason Evans 			 * to be critical.)				\
913f921d10fSJason Evans 			 */						\
914f921d10fSJason Evans 			PROF_ALLOC_PREP(1, usize, cnt);			\
915f921d10fSJason Evans 			ret = imalloc_prof(usize, cnt);			\
916f921d10fSJason Evans 		} else {						\
917f921d10fSJason Evans 			if (config_stats || (config_valgrind &&		\
918f921d10fSJason Evans 			    opt_valgrind))				\
919f921d10fSJason Evans 				usize = s2u(size);			\
920f921d10fSJason Evans 			ret = imalloc(size);				\
921f921d10fSJason Evans 		}							\
922f921d10fSJason Evans 	}								\
923f921d10fSJason Evans } while (0)
924f921d10fSJason Evans 
925a4bd5210SJason Evans void *
926a4bd5210SJason Evans je_malloc(size_t size)
927a4bd5210SJason Evans {
928a4bd5210SJason Evans 	void *ret;
929e722f8f8SJason Evans 	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
930a4bd5210SJason Evans 
931a4bd5210SJason Evans 	if (size == 0)
932a4bd5210SJason Evans 		size = 1;
933a4bd5210SJason Evans 
934f921d10fSJason Evans 	MALLOC_BODY(ret, size, usize);
935a4bd5210SJason Evans 
936a4bd5210SJason Evans 	if (ret == NULL) {
937a4bd5210SJason Evans 		if (config_xmalloc && opt_xmalloc) {
938a4bd5210SJason Evans 			malloc_write("<jemalloc>: Error in malloc(): "
939a4bd5210SJason Evans 			    "out of memory\n");
940a4bd5210SJason Evans 			abort();
941a4bd5210SJason Evans 		}
942e722f8f8SJason Evans 		set_errno(ENOMEM);
943a4bd5210SJason Evans 	}
944a4bd5210SJason Evans 	if (config_stats && ret != NULL) {
945a4bd5210SJason Evans 		assert(usize == isalloc(ret, config_prof));
946a4bd5210SJason Evans 		thread_allocated_tsd_get()->allocated += usize;
947a4bd5210SJason Evans 	}
948a4bd5210SJason Evans 	UTRACE(0, size, ret);
949a4bd5210SJason Evans 	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false);
950a4bd5210SJason Evans 	return (ret);
951a4bd5210SJason Evans }
952a4bd5210SJason Evans 
953f921d10fSJason Evans static void *
954f921d10fSJason Evans imemalign_prof_sample(size_t alignment, size_t usize, prof_thr_cnt_t *cnt)
955f921d10fSJason Evans {
956f921d10fSJason Evans 	void *p;
957f921d10fSJason Evans 
958f921d10fSJason Evans 	if (cnt == NULL)
959f921d10fSJason Evans 		return (NULL);
960f921d10fSJason Evans 	if (prof_promote && usize <= SMALL_MAXCLASS) {
961f921d10fSJason Evans 		assert(sa2u(SMALL_MAXCLASS+1, alignment) != 0);
962f921d10fSJason Evans 		p = ipalloc(sa2u(SMALL_MAXCLASS+1, alignment), alignment,
963f921d10fSJason Evans 		    false);
964f921d10fSJason Evans 		if (p == NULL)
965f921d10fSJason Evans 			return (NULL);
966f921d10fSJason Evans 		arena_prof_promoted(p, usize);
967f921d10fSJason Evans 	} else
968f921d10fSJason Evans 		p = ipalloc(usize, alignment, false);
969f921d10fSJason Evans 
970f921d10fSJason Evans 	return (p);
971f921d10fSJason Evans }
972f921d10fSJason Evans 
973f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C void *
974f921d10fSJason Evans imemalign_prof(size_t alignment, size_t usize, prof_thr_cnt_t *cnt)
975f921d10fSJason Evans {
976f921d10fSJason Evans 	void *p;
977f921d10fSJason Evans 
978f921d10fSJason Evans 	if ((uintptr_t)cnt != (uintptr_t)1U)
979f921d10fSJason Evans 		p = imemalign_prof_sample(alignment, usize, cnt);
980f921d10fSJason Evans 	else
981f921d10fSJason Evans 		p = ipalloc(usize, alignment, false);
982f921d10fSJason Evans 	if (p == NULL)
983f921d10fSJason Evans 		return (NULL);
984f921d10fSJason Evans 	prof_malloc(p, usize, cnt);
985f921d10fSJason Evans 
986f921d10fSJason Evans 	return (p);
987f921d10fSJason Evans }
988f921d10fSJason Evans 
989a4bd5210SJason Evans JEMALLOC_ATTR(nonnull(1))
990a4bd5210SJason Evans #ifdef JEMALLOC_PROF
991a4bd5210SJason Evans /*
992a4bd5210SJason Evans  * Avoid any uncertainty as to how many backtrace frames to ignore in
993a4bd5210SJason Evans  * PROF_ALLOC_PREP().
994a4bd5210SJason Evans  */
99588ad2f8dSJason Evans JEMALLOC_NOINLINE
996a4bd5210SJason Evans #endif
997a4bd5210SJason Evans static int
998f921d10fSJason Evans imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment)
999a4bd5210SJason Evans {
1000a4bd5210SJason Evans 	int ret;
1001a4bd5210SJason Evans 	size_t usize;
1002a4bd5210SJason Evans 	void *result;
1003a4bd5210SJason Evans 
1004a4bd5210SJason Evans 	assert(min_alignment != 0);
1005a4bd5210SJason Evans 
1006f921d10fSJason Evans 	if (malloc_init()) {
1007a4bd5210SJason Evans 		result = NULL;
1008f921d10fSJason Evans 		goto label_oom;
1009f921d10fSJason Evans 	} else {
1010a4bd5210SJason Evans 		if (size == 0)
1011a4bd5210SJason Evans 			size = 1;
1012a4bd5210SJason Evans 
1013a4bd5210SJason Evans 		/* Make sure that alignment is a large enough power of 2. */
1014a4bd5210SJason Evans 		if (((alignment - 1) & alignment) != 0
1015a4bd5210SJason Evans 		    || (alignment < min_alignment)) {
1016a4bd5210SJason Evans 			if (config_xmalloc && opt_xmalloc) {
1017a4bd5210SJason Evans 				malloc_write("<jemalloc>: Error allocating "
1018a4bd5210SJason Evans 				    "aligned memory: invalid alignment\n");
1019a4bd5210SJason Evans 				abort();
1020a4bd5210SJason Evans 			}
1021a4bd5210SJason Evans 			result = NULL;
1022a4bd5210SJason Evans 			ret = EINVAL;
1023a4bd5210SJason Evans 			goto label_return;
1024a4bd5210SJason Evans 		}
1025a4bd5210SJason Evans 
1026a4bd5210SJason Evans 		usize = sa2u(size, alignment);
1027a4bd5210SJason Evans 		if (usize == 0) {
1028a4bd5210SJason Evans 			result = NULL;
1029f921d10fSJason Evans 			goto label_oom;
1030a4bd5210SJason Evans 		}
1031a4bd5210SJason Evans 
1032a4bd5210SJason Evans 		if (config_prof && opt_prof) {
1033f921d10fSJason Evans 			prof_thr_cnt_t *cnt;
1034f921d10fSJason Evans 
1035a4bd5210SJason Evans 			PROF_ALLOC_PREP(2, usize, cnt);
1036f921d10fSJason Evans 			result = imemalign_prof(alignment, usize, cnt);
1037a4bd5210SJason Evans 		} else
1038a4bd5210SJason Evans 			result = ipalloc(usize, alignment, false);
1039f921d10fSJason Evans 		if (result == NULL)
1040f921d10fSJason Evans 			goto label_oom;
1041a4bd5210SJason Evans 	}
1042a4bd5210SJason Evans 
1043a4bd5210SJason Evans 	*memptr = result;
1044a4bd5210SJason Evans 	ret = 0;
1045a4bd5210SJason Evans label_return:
1046a4bd5210SJason Evans 	if (config_stats && result != NULL) {
1047a4bd5210SJason Evans 		assert(usize == isalloc(result, config_prof));
1048a4bd5210SJason Evans 		thread_allocated_tsd_get()->allocated += usize;
1049a4bd5210SJason Evans 	}
1050a4bd5210SJason Evans 	UTRACE(0, size, result);
1051a4bd5210SJason Evans 	return (ret);
1052f921d10fSJason Evans label_oom:
1053f921d10fSJason Evans 	assert(result == NULL);
1054f921d10fSJason Evans 	if (config_xmalloc && opt_xmalloc) {
1055f921d10fSJason Evans 		malloc_write("<jemalloc>: Error allocating aligned memory: "
1056f921d10fSJason Evans 		    "out of memory\n");
1057f921d10fSJason Evans 		abort();
1058f921d10fSJason Evans 	}
1059f921d10fSJason Evans 	ret = ENOMEM;
1060f921d10fSJason Evans 	goto label_return;
1061a4bd5210SJason Evans }
1062a4bd5210SJason Evans 
1063a4bd5210SJason Evans int
1064a4bd5210SJason Evans je_posix_memalign(void **memptr, size_t alignment, size_t size)
1065a4bd5210SJason Evans {
1066a4bd5210SJason Evans 	int ret = imemalign(memptr, alignment, size, sizeof(void *));
1067a4bd5210SJason Evans 	JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr,
1068a4bd5210SJason Evans 	    config_prof), false);
1069a4bd5210SJason Evans 	return (ret);
1070a4bd5210SJason Evans }
1071a4bd5210SJason Evans 
1072a4bd5210SJason Evans void *
1073a4bd5210SJason Evans je_aligned_alloc(size_t alignment, size_t size)
1074a4bd5210SJason Evans {
1075a4bd5210SJason Evans 	void *ret;
1076a4bd5210SJason Evans 	int err;
1077a4bd5210SJason Evans 
1078a4bd5210SJason Evans 	if ((err = imemalign(&ret, alignment, size, 1)) != 0) {
1079a4bd5210SJason Evans 		ret = NULL;
1080e722f8f8SJason Evans 		set_errno(err);
1081a4bd5210SJason Evans 	}
1082a4bd5210SJason Evans 	JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof),
1083a4bd5210SJason Evans 	    false);
1084a4bd5210SJason Evans 	return (ret);
1085a4bd5210SJason Evans }
1086a4bd5210SJason Evans 
1087f921d10fSJason Evans static void *
1088f921d10fSJason Evans icalloc_prof_sample(size_t usize, prof_thr_cnt_t *cnt)
1089f921d10fSJason Evans {
1090f921d10fSJason Evans 	void *p;
1091f921d10fSJason Evans 
1092f921d10fSJason Evans 	if (cnt == NULL)
1093f921d10fSJason Evans 		return (NULL);
1094f921d10fSJason Evans 	if (prof_promote && usize <= SMALL_MAXCLASS) {
1095f921d10fSJason Evans 		p = icalloc(SMALL_MAXCLASS+1);
1096f921d10fSJason Evans 		if (p == NULL)
1097f921d10fSJason Evans 			return (NULL);
1098f921d10fSJason Evans 		arena_prof_promoted(p, usize);
1099f921d10fSJason Evans 	} else
1100f921d10fSJason Evans 		p = icalloc(usize);
1101f921d10fSJason Evans 
1102f921d10fSJason Evans 	return (p);
1103f921d10fSJason Evans }
1104f921d10fSJason Evans 
1105f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C void *
1106f921d10fSJason Evans icalloc_prof(size_t usize, prof_thr_cnt_t *cnt)
1107f921d10fSJason Evans {
1108f921d10fSJason Evans 	void *p;
1109f921d10fSJason Evans 
1110f921d10fSJason Evans 	if ((uintptr_t)cnt != (uintptr_t)1U)
1111f921d10fSJason Evans 		p = icalloc_prof_sample(usize, cnt);
1112f921d10fSJason Evans 	else
1113f921d10fSJason Evans 		p = icalloc(usize);
1114f921d10fSJason Evans 	if (p == NULL)
1115f921d10fSJason Evans 		return (NULL);
1116f921d10fSJason Evans 	prof_malloc(p, usize, cnt);
1117f921d10fSJason Evans 
1118f921d10fSJason Evans 	return (p);
1119f921d10fSJason Evans }
1120f921d10fSJason Evans 
1121a4bd5210SJason Evans void *
1122a4bd5210SJason Evans je_calloc(size_t num, size_t size)
1123a4bd5210SJason Evans {
1124a4bd5210SJason Evans 	void *ret;
1125a4bd5210SJason Evans 	size_t num_size;
1126e722f8f8SJason Evans 	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1127a4bd5210SJason Evans 
1128a4bd5210SJason Evans 	if (malloc_init()) {
1129a4bd5210SJason Evans 		num_size = 0;
1130a4bd5210SJason Evans 		ret = NULL;
1131a4bd5210SJason Evans 		goto label_return;
1132a4bd5210SJason Evans 	}
1133a4bd5210SJason Evans 
1134a4bd5210SJason Evans 	num_size = num * size;
1135a4bd5210SJason Evans 	if (num_size == 0) {
1136a4bd5210SJason Evans 		if (num == 0 || size == 0)
1137a4bd5210SJason Evans 			num_size = 1;
1138a4bd5210SJason Evans 		else {
1139a4bd5210SJason Evans 			ret = NULL;
1140a4bd5210SJason Evans 			goto label_return;
1141a4bd5210SJason Evans 		}
1142a4bd5210SJason Evans 	/*
1143a4bd5210SJason Evans 	 * Try to avoid division here.  We know that it isn't possible to
1144a4bd5210SJason Evans 	 * overflow during multiplication if neither operand uses any of the
1145a4bd5210SJason Evans 	 * most significant half of the bits in a size_t.
1146a4bd5210SJason Evans 	 */
1147a4bd5210SJason Evans 	} else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2)))
1148a4bd5210SJason Evans 	    && (num_size / size != num)) {
1149a4bd5210SJason Evans 		/* size_t overflow. */
1150a4bd5210SJason Evans 		ret = NULL;
1151a4bd5210SJason Evans 		goto label_return;
1152a4bd5210SJason Evans 	}
1153a4bd5210SJason Evans 
1154a4bd5210SJason Evans 	if (config_prof && opt_prof) {
1155f921d10fSJason Evans 		prof_thr_cnt_t *cnt;
1156f921d10fSJason Evans 
1157a4bd5210SJason Evans 		usize = s2u(num_size);
1158a4bd5210SJason Evans 		PROF_ALLOC_PREP(1, usize, cnt);
1159f921d10fSJason Evans 		ret = icalloc_prof(usize, cnt);
1160a4bd5210SJason Evans 	} else {
1161a4bd5210SJason Evans 		if (config_stats || (config_valgrind && opt_valgrind))
1162a4bd5210SJason Evans 			usize = s2u(num_size);
1163a4bd5210SJason Evans 		ret = icalloc(num_size);
1164a4bd5210SJason Evans 	}
1165a4bd5210SJason Evans 
1166a4bd5210SJason Evans label_return:
1167a4bd5210SJason Evans 	if (ret == NULL) {
1168a4bd5210SJason Evans 		if (config_xmalloc && opt_xmalloc) {
1169a4bd5210SJason Evans 			malloc_write("<jemalloc>: Error in calloc(): out of "
1170a4bd5210SJason Evans 			    "memory\n");
1171a4bd5210SJason Evans 			abort();
1172a4bd5210SJason Evans 		}
1173e722f8f8SJason Evans 		set_errno(ENOMEM);
1174a4bd5210SJason Evans 	}
1175a4bd5210SJason Evans 	if (config_stats && ret != NULL) {
1176a4bd5210SJason Evans 		assert(usize == isalloc(ret, config_prof));
1177a4bd5210SJason Evans 		thread_allocated_tsd_get()->allocated += usize;
1178a4bd5210SJason Evans 	}
1179a4bd5210SJason Evans 	UTRACE(0, num_size, ret);
1180a4bd5210SJason Evans 	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true);
1181a4bd5210SJason Evans 	return (ret);
1182a4bd5210SJason Evans }
1183a4bd5210SJason Evans 
1184f921d10fSJason Evans static void *
1185f921d10fSJason Evans irealloc_prof_sample(void *oldptr, size_t usize, prof_thr_cnt_t *cnt)
1186a4bd5210SJason Evans {
1187f921d10fSJason Evans 	void *p;
1188a4bd5210SJason Evans 
1189a4bd5210SJason Evans 	if (cnt == NULL)
1190f921d10fSJason Evans 		return (NULL);
1191f921d10fSJason Evans 	if (prof_promote && usize <= SMALL_MAXCLASS) {
1192f921d10fSJason Evans 		p = iralloc(oldptr, SMALL_MAXCLASS+1, 0, 0, false);
1193f921d10fSJason Evans 		if (p == NULL)
1194f921d10fSJason Evans 			return (NULL);
1195f921d10fSJason Evans 		arena_prof_promoted(p, usize);
1196a4bd5210SJason Evans 	} else
1197f921d10fSJason Evans 		p = iralloc(oldptr, usize, 0, 0, false);
1198f921d10fSJason Evans 
1199f921d10fSJason Evans 	return (p);
1200a4bd5210SJason Evans }
1201a4bd5210SJason Evans 
1202f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C void *
1203f921d10fSJason Evans irealloc_prof(void *oldptr, size_t old_usize, size_t usize, prof_thr_cnt_t *cnt)
1204a4bd5210SJason Evans {
1205f921d10fSJason Evans 	void *p;
1206f921d10fSJason Evans 	prof_ctx_t *old_ctx;
1207a4bd5210SJason Evans 
1208f921d10fSJason Evans 	old_ctx = prof_ctx_get(oldptr);
1209f921d10fSJason Evans 	if ((uintptr_t)cnt != (uintptr_t)1U)
1210f921d10fSJason Evans 		p = irealloc_prof_sample(oldptr, usize, cnt);
1211f921d10fSJason Evans 	else
1212f921d10fSJason Evans 		p = iralloc(oldptr, usize, 0, 0, false);
1213f921d10fSJason Evans 	if (p == NULL)
1214f921d10fSJason Evans 		return (NULL);
1215f921d10fSJason Evans 	prof_realloc(p, usize, cnt, old_usize, old_ctx);
1216f921d10fSJason Evans 
1217f921d10fSJason Evans 	return (p);
1218f921d10fSJason Evans }
1219f921d10fSJason Evans 
1220f921d10fSJason Evans JEMALLOC_INLINE_C void
1221f921d10fSJason Evans ifree(void *ptr)
1222f921d10fSJason Evans {
1223a4bd5210SJason Evans 	size_t usize;
1224f921d10fSJason Evans 	UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1225a4bd5210SJason Evans 
1226f921d10fSJason Evans 	assert(ptr != NULL);
1227a4bd5210SJason Evans 	assert(malloc_initialized || IS_INITIALIZER);
1228a4bd5210SJason Evans 
1229a4bd5210SJason Evans 	if (config_prof && opt_prof) {
1230a4bd5210SJason Evans 		usize = isalloc(ptr, config_prof);
1231a4bd5210SJason Evans 		prof_free(ptr, usize);
1232a4bd5210SJason Evans 	} else if (config_stats || config_valgrind)
1233a4bd5210SJason Evans 		usize = isalloc(ptr, config_prof);
1234a4bd5210SJason Evans 	if (config_stats)
1235a4bd5210SJason Evans 		thread_allocated_tsd_get()->deallocated += usize;
1236a4bd5210SJason Evans 	if (config_valgrind && opt_valgrind)
1237a4bd5210SJason Evans 		rzsize = p2rz(ptr);
1238a4bd5210SJason Evans 	iqalloc(ptr);
1239a4bd5210SJason Evans 	JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1240a4bd5210SJason Evans }
1241f921d10fSJason Evans 
1242f921d10fSJason Evans void *
1243f921d10fSJason Evans je_realloc(void *ptr, size_t size)
1244f921d10fSJason Evans {
1245f921d10fSJason Evans 	void *ret;
1246f921d10fSJason Evans 	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
1247f921d10fSJason Evans 	size_t old_usize = 0;
1248f921d10fSJason Evans 	UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1249f921d10fSJason Evans 
1250f921d10fSJason Evans 	if (size == 0) {
1251f921d10fSJason Evans 		if (ptr != NULL) {
1252f921d10fSJason Evans 			/* realloc(ptr, 0) is equivalent to free(ptr). */
1253f921d10fSJason Evans 			UTRACE(ptr, 0, 0);
1254f921d10fSJason Evans 			ifree(ptr);
1255f921d10fSJason Evans 			return (NULL);
1256f921d10fSJason Evans 		}
1257f921d10fSJason Evans 		size = 1;
1258f921d10fSJason Evans 	}
1259f921d10fSJason Evans 
1260f921d10fSJason Evans 	if (ptr != NULL) {
1261f921d10fSJason Evans 		assert(malloc_initialized || IS_INITIALIZER);
1262f921d10fSJason Evans 		malloc_thread_init();
1263f921d10fSJason Evans 
1264f921d10fSJason Evans 		if ((config_prof && opt_prof) || config_stats ||
1265f921d10fSJason Evans 		    (config_valgrind && opt_valgrind))
1266f921d10fSJason Evans 			old_usize = isalloc(ptr, config_prof);
1267f921d10fSJason Evans 		if (config_valgrind && opt_valgrind)
1268f921d10fSJason Evans 			old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize);
1269f921d10fSJason Evans 
1270f921d10fSJason Evans 		if (config_prof && opt_prof) {
1271f921d10fSJason Evans 			prof_thr_cnt_t *cnt;
1272f921d10fSJason Evans 
1273f921d10fSJason Evans 			usize = s2u(size);
1274f921d10fSJason Evans 			PROF_ALLOC_PREP(1, usize, cnt);
1275f921d10fSJason Evans 			ret = irealloc_prof(ptr, old_usize, usize, cnt);
1276f921d10fSJason Evans 		} else {
1277f921d10fSJason Evans 			if (config_stats || (config_valgrind && opt_valgrind))
1278f921d10fSJason Evans 				usize = s2u(size);
1279f921d10fSJason Evans 			ret = iralloc(ptr, size, 0, 0, false);
1280f921d10fSJason Evans 		}
1281f921d10fSJason Evans 	} else {
1282f921d10fSJason Evans 		/* realloc(NULL, size) is equivalent to malloc(size). */
1283f921d10fSJason Evans 		MALLOC_BODY(ret, size, usize);
1284f921d10fSJason Evans 	}
1285f921d10fSJason Evans 
1286f921d10fSJason Evans 	if (ret == NULL) {
1287f921d10fSJason Evans 		if (config_xmalloc && opt_xmalloc) {
1288f921d10fSJason Evans 			malloc_write("<jemalloc>: Error in realloc(): "
1289f921d10fSJason Evans 			    "out of memory\n");
1290f921d10fSJason Evans 			abort();
1291f921d10fSJason Evans 		}
1292f921d10fSJason Evans 		set_errno(ENOMEM);
1293f921d10fSJason Evans 	}
1294f921d10fSJason Evans 	if (config_stats && ret != NULL) {
1295f921d10fSJason Evans 		thread_allocated_t *ta;
1296f921d10fSJason Evans 		assert(usize == isalloc(ret, config_prof));
1297f921d10fSJason Evans 		ta = thread_allocated_tsd_get();
1298f921d10fSJason Evans 		ta->allocated += usize;
1299f921d10fSJason Evans 		ta->deallocated += old_usize;
1300f921d10fSJason Evans 	}
1301f921d10fSJason Evans 	UTRACE(ptr, size, ret);
1302f921d10fSJason Evans 	JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_usize, old_rzsize,
1303f921d10fSJason Evans 	    false);
1304f921d10fSJason Evans 	return (ret);
1305f921d10fSJason Evans }
1306f921d10fSJason Evans 
1307f921d10fSJason Evans void
1308f921d10fSJason Evans je_free(void *ptr)
1309f921d10fSJason Evans {
1310f921d10fSJason Evans 
1311f921d10fSJason Evans 	UTRACE(ptr, 0, 0);
1312f921d10fSJason Evans 	if (ptr != NULL)
1313f921d10fSJason Evans 		ifree(ptr);
1314a4bd5210SJason Evans }
1315a4bd5210SJason Evans 
1316a4bd5210SJason Evans /*
1317a4bd5210SJason Evans  * End malloc(3)-compatible functions.
1318a4bd5210SJason Evans  */
1319a4bd5210SJason Evans /******************************************************************************/
1320a4bd5210SJason Evans /*
1321a4bd5210SJason Evans  * Begin non-standard override functions.
1322a4bd5210SJason Evans  */
1323a4bd5210SJason Evans 
1324a4bd5210SJason Evans #ifdef JEMALLOC_OVERRIDE_MEMALIGN
1325a4bd5210SJason Evans void *
1326a4bd5210SJason Evans je_memalign(size_t alignment, size_t size)
1327a4bd5210SJason Evans {
1328a4bd5210SJason Evans 	void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1329a4bd5210SJason Evans 	imemalign(&ret, alignment, size, 1);
1330a4bd5210SJason Evans 	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1331a4bd5210SJason Evans 	return (ret);
1332a4bd5210SJason Evans }
1333a4bd5210SJason Evans #endif
1334a4bd5210SJason Evans 
1335a4bd5210SJason Evans #ifdef JEMALLOC_OVERRIDE_VALLOC
1336a4bd5210SJason Evans void *
1337a4bd5210SJason Evans je_valloc(size_t size)
1338a4bd5210SJason Evans {
1339a4bd5210SJason Evans 	void *ret JEMALLOC_CC_SILENCE_INIT(NULL);
1340a4bd5210SJason Evans 	imemalign(&ret, PAGE, size, 1);
1341a4bd5210SJason Evans 	JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false);
1342a4bd5210SJason Evans 	return (ret);
1343a4bd5210SJason Evans }
1344a4bd5210SJason Evans #endif
1345a4bd5210SJason Evans 
1346a4bd5210SJason Evans /*
1347a4bd5210SJason Evans  * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has
1348a4bd5210SJason Evans  * #define je_malloc malloc
1349a4bd5210SJason Evans  */
1350a4bd5210SJason Evans #define	malloc_is_malloc 1
1351a4bd5210SJason Evans #define	is_malloc_(a) malloc_is_ ## a
1352a4bd5210SJason Evans #define	is_malloc(a) is_malloc_(a)
1353a4bd5210SJason Evans 
1354a4bd5210SJason Evans #if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__))
1355a4bd5210SJason Evans /*
1356a4bd5210SJason Evans  * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
1357a4bd5210SJason Evans  * to inconsistently reference libc's malloc(3)-compatible functions
1358a4bd5210SJason Evans  * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
1359a4bd5210SJason Evans  *
1360a4bd5210SJason Evans  * These definitions interpose hooks in glibc.  The functions are actually
1361a4bd5210SJason Evans  * passed an extra argument for the caller return address, which will be
1362a4bd5210SJason Evans  * ignored.
1363a4bd5210SJason Evans  */
136482872ac0SJason Evans JEMALLOC_EXPORT void (* __free_hook)(void *ptr) = je_free;
136582872ac0SJason Evans JEMALLOC_EXPORT void *(* __malloc_hook)(size_t size) = je_malloc;
136682872ac0SJason Evans JEMALLOC_EXPORT void *(* __realloc_hook)(void *ptr, size_t size) = je_realloc;
136782872ac0SJason Evans JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) =
1368e722f8f8SJason Evans     je_memalign;
1369a4bd5210SJason Evans #endif
1370a4bd5210SJason Evans 
1371a4bd5210SJason Evans /*
1372a4bd5210SJason Evans  * End non-standard override functions.
1373a4bd5210SJason Evans  */
1374a4bd5210SJason Evans /******************************************************************************/
1375a4bd5210SJason Evans /*
1376a4bd5210SJason Evans  * Begin non-standard functions.
1377a4bd5210SJason Evans  */
1378a4bd5210SJason Evans 
1379f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C void *
1380f921d10fSJason Evans imallocx(size_t usize, size_t alignment, bool zero, bool try_tcache,
1381f921d10fSJason Evans     arena_t *arena)
1382a4bd5210SJason Evans {
1383f921d10fSJason Evans 
1384f921d10fSJason Evans 	assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize,
1385f921d10fSJason Evans 	    alignment)));
1386f921d10fSJason Evans 
1387f921d10fSJason Evans 	if (alignment != 0)
1388f921d10fSJason Evans 		return (ipalloct(usize, alignment, zero, try_tcache, arena));
1389f921d10fSJason Evans 	else if (zero)
1390f921d10fSJason Evans 		return (icalloct(usize, try_tcache, arena));
1391f921d10fSJason Evans 	else
1392f921d10fSJason Evans 		return (imalloct(usize, try_tcache, arena));
1393f921d10fSJason Evans }
1394f921d10fSJason Evans 
1395f921d10fSJason Evans static void *
1396f921d10fSJason Evans imallocx_prof_sample(size_t usize, size_t alignment, bool zero, bool try_tcache,
1397f921d10fSJason Evans     arena_t *arena, prof_thr_cnt_t *cnt)
1398f921d10fSJason Evans {
1399f921d10fSJason Evans 	void *p;
1400f921d10fSJason Evans 
1401f921d10fSJason Evans 	if (cnt == NULL)
1402f921d10fSJason Evans 		return (NULL);
1403f921d10fSJason Evans 	if (prof_promote && usize <= SMALL_MAXCLASS) {
1404f921d10fSJason Evans 		size_t usize_promoted = (alignment == 0) ?
1405f921d10fSJason Evans 		    s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1, alignment);
1406f921d10fSJason Evans 		assert(usize_promoted != 0);
1407f921d10fSJason Evans 		p = imallocx(usize_promoted, alignment, zero, try_tcache,
1408f921d10fSJason Evans 		    arena);
1409f921d10fSJason Evans 		if (p == NULL)
1410f921d10fSJason Evans 			return (NULL);
1411f921d10fSJason Evans 		arena_prof_promoted(p, usize);
1412f921d10fSJason Evans 	} else
1413f921d10fSJason Evans 		p = imallocx(usize, alignment, zero, try_tcache, arena);
1414f921d10fSJason Evans 
1415f921d10fSJason Evans 	return (p);
1416f921d10fSJason Evans }
1417f921d10fSJason Evans 
1418f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C void *
1419f921d10fSJason Evans imallocx_prof(size_t usize, size_t alignment, bool zero, bool try_tcache,
1420f921d10fSJason Evans     arena_t *arena, prof_thr_cnt_t *cnt)
1421f921d10fSJason Evans {
1422f921d10fSJason Evans 	void *p;
1423f921d10fSJason Evans 
1424f921d10fSJason Evans 	if ((uintptr_t)cnt != (uintptr_t)1U) {
1425f921d10fSJason Evans 		p = imallocx_prof_sample(usize, alignment, zero, try_tcache,
1426f921d10fSJason Evans 		    arena, cnt);
1427f921d10fSJason Evans 	} else
1428f921d10fSJason Evans 		p = imallocx(usize, alignment, zero, try_tcache, arena);
1429f921d10fSJason Evans 	if (p == NULL)
1430f921d10fSJason Evans 		return (NULL);
1431f921d10fSJason Evans 	prof_malloc(p, usize, cnt);
1432f921d10fSJason Evans 
1433f921d10fSJason Evans 	return (p);
1434f921d10fSJason Evans }
1435f921d10fSJason Evans 
1436f921d10fSJason Evans void *
1437f921d10fSJason Evans je_mallocx(size_t size, int flags)
1438f921d10fSJason Evans {
1439f921d10fSJason Evans 	void *p;
1440f921d10fSJason Evans 	size_t usize;
1441f921d10fSJason Evans 	size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
1442f921d10fSJason Evans 	    & (SIZE_T_MAX-1));
1443f921d10fSJason Evans 	bool zero = flags & MALLOCX_ZERO;
1444f921d10fSJason Evans 	unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
1445f921d10fSJason Evans 	arena_t *arena;
1446f921d10fSJason Evans 	bool try_tcache;
1447f921d10fSJason Evans 
1448f921d10fSJason Evans 	assert(size != 0);
1449f921d10fSJason Evans 
1450f921d10fSJason Evans 	if (malloc_init())
1451f921d10fSJason Evans 		goto label_oom;
1452f921d10fSJason Evans 
1453f921d10fSJason Evans 	if (arena_ind != UINT_MAX) {
1454f921d10fSJason Evans 		arena = arenas[arena_ind];
1455f921d10fSJason Evans 		try_tcache = false;
1456f921d10fSJason Evans 	} else {
1457f921d10fSJason Evans 		arena = NULL;
1458f921d10fSJason Evans 		try_tcache = true;
1459f921d10fSJason Evans 	}
1460f921d10fSJason Evans 
1461f921d10fSJason Evans 	usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
1462f921d10fSJason Evans 	assert(usize != 0);
1463f921d10fSJason Evans 
1464f921d10fSJason Evans 	if (config_prof && opt_prof) {
1465f921d10fSJason Evans 		prof_thr_cnt_t *cnt;
1466f921d10fSJason Evans 
1467f921d10fSJason Evans 		PROF_ALLOC_PREP(1, usize, cnt);
1468f921d10fSJason Evans 		p = imallocx_prof(usize, alignment, zero, try_tcache, arena,
1469f921d10fSJason Evans 		    cnt);
1470f921d10fSJason Evans 	} else
1471f921d10fSJason Evans 		p = imallocx(usize, alignment, zero, try_tcache, arena);
1472f921d10fSJason Evans 	if (p == NULL)
1473f921d10fSJason Evans 		goto label_oom;
1474f921d10fSJason Evans 
1475f921d10fSJason Evans 	if (config_stats) {
1476f921d10fSJason Evans 		assert(usize == isalloc(p, config_prof));
1477f921d10fSJason Evans 		thread_allocated_tsd_get()->allocated += usize;
1478f921d10fSJason Evans 	}
1479f921d10fSJason Evans 	UTRACE(0, size, p);
1480f921d10fSJason Evans 	JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero);
1481f921d10fSJason Evans 	return (p);
1482f921d10fSJason Evans label_oom:
1483f921d10fSJason Evans 	if (config_xmalloc && opt_xmalloc) {
1484f921d10fSJason Evans 		malloc_write("<jemalloc>: Error in mallocx(): out of memory\n");
1485f921d10fSJason Evans 		abort();
1486f921d10fSJason Evans 	}
1487f921d10fSJason Evans 	UTRACE(0, size, 0);
1488f921d10fSJason Evans 	return (NULL);
1489f921d10fSJason Evans }
1490f921d10fSJason Evans 
1491f921d10fSJason Evans static void *
1492f921d10fSJason Evans irallocx_prof_sample(void *oldptr, size_t size, size_t alignment, size_t usize,
1493f921d10fSJason Evans     bool zero, bool try_tcache_alloc, bool try_tcache_dalloc, arena_t *arena,
1494f921d10fSJason Evans     prof_thr_cnt_t *cnt)
1495f921d10fSJason Evans {
1496f921d10fSJason Evans 	void *p;
1497f921d10fSJason Evans 
1498f921d10fSJason Evans 	if (cnt == NULL)
1499f921d10fSJason Evans 		return (NULL);
1500f921d10fSJason Evans 	if (prof_promote && usize <= SMALL_MAXCLASS) {
1501f921d10fSJason Evans 		p = iralloct(oldptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1502f921d10fSJason Evans 		    size) ? 0 : size - (SMALL_MAXCLASS+1), alignment, zero,
1503f921d10fSJason Evans 		    try_tcache_alloc, try_tcache_dalloc, arena);
1504f921d10fSJason Evans 		if (p == NULL)
1505f921d10fSJason Evans 			return (NULL);
1506f921d10fSJason Evans 		arena_prof_promoted(p, usize);
1507f921d10fSJason Evans 	} else {
1508f921d10fSJason Evans 		p = iralloct(oldptr, size, 0, alignment, zero,
1509f921d10fSJason Evans 		    try_tcache_alloc, try_tcache_dalloc, arena);
1510f921d10fSJason Evans 	}
1511f921d10fSJason Evans 
1512f921d10fSJason Evans 	return (p);
1513f921d10fSJason Evans }
1514f921d10fSJason Evans 
1515f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C void *
1516f921d10fSJason Evans irallocx_prof(void *oldptr, size_t old_usize, size_t size, size_t alignment,
1517f921d10fSJason Evans     size_t *usize, bool zero, bool try_tcache_alloc, bool try_tcache_dalloc,
1518f921d10fSJason Evans     arena_t *arena, prof_thr_cnt_t *cnt)
1519f921d10fSJason Evans {
1520f921d10fSJason Evans 	void *p;
1521f921d10fSJason Evans 	prof_ctx_t *old_ctx;
1522f921d10fSJason Evans 
1523f921d10fSJason Evans 	old_ctx = prof_ctx_get(oldptr);
1524f921d10fSJason Evans 	if ((uintptr_t)cnt != (uintptr_t)1U)
1525f921d10fSJason Evans 		p = irallocx_prof_sample(oldptr, size, alignment, *usize, zero,
1526f921d10fSJason Evans 		    try_tcache_alloc, try_tcache_dalloc, arena, cnt);
1527f921d10fSJason Evans 	else {
1528f921d10fSJason Evans 		p = iralloct(oldptr, size, 0, alignment, zero,
1529f921d10fSJason Evans 		    try_tcache_alloc, try_tcache_dalloc, arena);
1530f921d10fSJason Evans 	}
1531f921d10fSJason Evans 	if (p == NULL)
1532f921d10fSJason Evans 		return (NULL);
1533f921d10fSJason Evans 
1534f921d10fSJason Evans 	if (p == oldptr && alignment != 0) {
1535f921d10fSJason Evans 		/*
1536f921d10fSJason Evans 		 * The allocation did not move, so it is possible that the size
1537f921d10fSJason Evans 		 * class is smaller than would guarantee the requested
1538f921d10fSJason Evans 		 * alignment, and that the alignment constraint was
1539f921d10fSJason Evans 		 * serendipitously satisfied.  Additionally, old_usize may not
1540f921d10fSJason Evans 		 * be the same as the current usize because of in-place large
1541f921d10fSJason Evans 		 * reallocation.  Therefore, query the actual value of usize.
1542f921d10fSJason Evans 		 */
1543f921d10fSJason Evans 		*usize = isalloc(p, config_prof);
1544f921d10fSJason Evans 	}
1545f921d10fSJason Evans 	prof_realloc(p, *usize, cnt, old_usize, old_ctx);
1546f921d10fSJason Evans 
1547f921d10fSJason Evans 	return (p);
1548f921d10fSJason Evans }
1549f921d10fSJason Evans 
1550f921d10fSJason Evans void *
1551f921d10fSJason Evans je_rallocx(void *ptr, size_t size, int flags)
1552f921d10fSJason Evans {
1553f921d10fSJason Evans 	void *p;
1554f921d10fSJason Evans 	size_t usize, old_usize;
1555f921d10fSJason Evans 	UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1556f921d10fSJason Evans 	size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
1557f921d10fSJason Evans 	    & (SIZE_T_MAX-1));
1558f921d10fSJason Evans 	bool zero = flags & MALLOCX_ZERO;
1559f921d10fSJason Evans 	unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
1560f921d10fSJason Evans 	bool try_tcache_alloc, try_tcache_dalloc;
1561f921d10fSJason Evans 	arena_t *arena;
1562f921d10fSJason Evans 
1563f921d10fSJason Evans 	assert(ptr != NULL);
1564f921d10fSJason Evans 	assert(size != 0);
1565f921d10fSJason Evans 	assert(malloc_initialized || IS_INITIALIZER);
1566f921d10fSJason Evans 	malloc_thread_init();
1567f921d10fSJason Evans 
1568f921d10fSJason Evans 	if (arena_ind != UINT_MAX) {
1569f921d10fSJason Evans 		arena_chunk_t *chunk;
1570f921d10fSJason Evans 		try_tcache_alloc = false;
1571f921d10fSJason Evans 		chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1572f921d10fSJason Evans 		try_tcache_dalloc = (chunk == ptr || chunk->arena !=
1573f921d10fSJason Evans 		    arenas[arena_ind]);
1574f921d10fSJason Evans 		arena = arenas[arena_ind];
1575f921d10fSJason Evans 	} else {
1576f921d10fSJason Evans 		try_tcache_alloc = true;
1577f921d10fSJason Evans 		try_tcache_dalloc = true;
1578f921d10fSJason Evans 		arena = NULL;
1579f921d10fSJason Evans 	}
1580f921d10fSJason Evans 
1581f921d10fSJason Evans 	if ((config_prof && opt_prof) || config_stats ||
1582f921d10fSJason Evans 	    (config_valgrind && opt_valgrind))
1583f921d10fSJason Evans 		old_usize = isalloc(ptr, config_prof);
1584f921d10fSJason Evans 	if (config_valgrind && opt_valgrind)
1585f921d10fSJason Evans 		old_rzsize = u2rz(old_usize);
1586f921d10fSJason Evans 
1587f921d10fSJason Evans 	if (config_prof && opt_prof) {
1588f921d10fSJason Evans 		prof_thr_cnt_t *cnt;
1589f921d10fSJason Evans 
1590f921d10fSJason Evans 		usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
1591f921d10fSJason Evans 		assert(usize != 0);
1592f921d10fSJason Evans 		PROF_ALLOC_PREP(1, usize, cnt);
1593f921d10fSJason Evans 		p = irallocx_prof(ptr, old_usize, size, alignment, &usize, zero,
1594f921d10fSJason Evans 		    try_tcache_alloc, try_tcache_dalloc, arena, cnt);
1595f921d10fSJason Evans 		if (p == NULL)
1596f921d10fSJason Evans 			goto label_oom;
1597f921d10fSJason Evans 	} else {
1598f921d10fSJason Evans 		p = iralloct(ptr, size, 0, alignment, zero, try_tcache_alloc,
1599f921d10fSJason Evans 		    try_tcache_dalloc, arena);
1600f921d10fSJason Evans 		if (p == NULL)
1601f921d10fSJason Evans 			goto label_oom;
1602f921d10fSJason Evans 		if (config_stats || (config_valgrind && opt_valgrind))
1603f921d10fSJason Evans 			usize = isalloc(p, config_prof);
1604f921d10fSJason Evans 	}
1605f921d10fSJason Evans 
1606f921d10fSJason Evans 	if (config_stats) {
1607f921d10fSJason Evans 		thread_allocated_t *ta;
1608f921d10fSJason Evans 		ta = thread_allocated_tsd_get();
1609f921d10fSJason Evans 		ta->allocated += usize;
1610f921d10fSJason Evans 		ta->deallocated += old_usize;
1611f921d10fSJason Evans 	}
1612f921d10fSJason Evans 	UTRACE(ptr, size, p);
1613f921d10fSJason Evans 	JEMALLOC_VALGRIND_REALLOC(p, usize, ptr, old_usize, old_rzsize, zero);
1614f921d10fSJason Evans 	return (p);
1615f921d10fSJason Evans label_oom:
1616f921d10fSJason Evans 	if (config_xmalloc && opt_xmalloc) {
1617f921d10fSJason Evans 		malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
1618f921d10fSJason Evans 		abort();
1619f921d10fSJason Evans 	}
1620f921d10fSJason Evans 	UTRACE(ptr, size, 0);
1621f921d10fSJason Evans 	return (NULL);
1622f921d10fSJason Evans }
1623f921d10fSJason Evans 
1624f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C size_t
1625f921d10fSJason Evans ixallocx_helper(void *ptr, size_t old_usize, size_t size, size_t extra,
1626f921d10fSJason Evans     size_t alignment, bool zero, arena_t *arena)
1627f921d10fSJason Evans {
1628f921d10fSJason Evans 	size_t usize;
1629f921d10fSJason Evans 
1630f921d10fSJason Evans 	if (ixalloc(ptr, size, extra, alignment, zero))
1631f921d10fSJason Evans 		return (old_usize);
1632f921d10fSJason Evans 	usize = isalloc(ptr, config_prof);
1633f921d10fSJason Evans 
1634f921d10fSJason Evans 	return (usize);
1635f921d10fSJason Evans }
1636f921d10fSJason Evans 
1637f921d10fSJason Evans static size_t
1638f921d10fSJason Evans ixallocx_prof_sample(void *ptr, size_t old_usize, size_t size, size_t extra,
1639f921d10fSJason Evans     size_t alignment, size_t max_usize, bool zero, arena_t *arena,
1640f921d10fSJason Evans     prof_thr_cnt_t *cnt)
1641f921d10fSJason Evans {
1642f921d10fSJason Evans 	size_t usize;
1643f921d10fSJason Evans 
1644f921d10fSJason Evans 	if (cnt == NULL)
1645f921d10fSJason Evans 		return (old_usize);
1646f921d10fSJason Evans 	/* Use minimum usize to determine whether promotion may happen. */
1647f921d10fSJason Evans 	if (prof_promote && ((alignment == 0) ? s2u(size) : sa2u(size,
1648f921d10fSJason Evans 	    alignment)) <= SMALL_MAXCLASS) {
1649f921d10fSJason Evans 		if (ixalloc(ptr, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >=
1650f921d10fSJason Evans 		    size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1),
1651f921d10fSJason Evans 		    alignment, zero))
1652f921d10fSJason Evans 			return (old_usize);
1653f921d10fSJason Evans 		usize = isalloc(ptr, config_prof);
1654f921d10fSJason Evans 		if (max_usize < PAGE)
1655f921d10fSJason Evans 			arena_prof_promoted(ptr, usize);
1656f921d10fSJason Evans 	} else {
1657f921d10fSJason Evans 		usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
1658f921d10fSJason Evans 		    zero, arena);
1659f921d10fSJason Evans 	}
1660f921d10fSJason Evans 
1661f921d10fSJason Evans 	return (usize);
1662f921d10fSJason Evans }
1663f921d10fSJason Evans 
1664f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C size_t
1665f921d10fSJason Evans ixallocx_prof(void *ptr, size_t old_usize, size_t size, size_t extra,
1666f921d10fSJason Evans     size_t alignment, size_t max_usize, bool zero, arena_t *arena,
1667f921d10fSJason Evans     prof_thr_cnt_t *cnt)
1668f921d10fSJason Evans {
1669f921d10fSJason Evans 	size_t usize;
1670f921d10fSJason Evans 	prof_ctx_t *old_ctx;
1671f921d10fSJason Evans 
1672f921d10fSJason Evans 	old_ctx = prof_ctx_get(ptr);
1673f921d10fSJason Evans 	if ((uintptr_t)cnt != (uintptr_t)1U) {
1674f921d10fSJason Evans 		usize = ixallocx_prof_sample(ptr, old_usize, size, extra,
1675f921d10fSJason Evans 		    alignment, zero, max_usize, arena, cnt);
1676f921d10fSJason Evans 	} else {
1677f921d10fSJason Evans 		usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
1678f921d10fSJason Evans 		    zero, arena);
1679f921d10fSJason Evans 	}
1680f921d10fSJason Evans 	if (usize == old_usize)
1681f921d10fSJason Evans 		return (usize);
1682f921d10fSJason Evans 	prof_realloc(ptr, usize, cnt, old_usize, old_ctx);
1683f921d10fSJason Evans 
1684f921d10fSJason Evans 	return (usize);
1685f921d10fSJason Evans }
1686f921d10fSJason Evans 
1687f921d10fSJason Evans size_t
1688f921d10fSJason Evans je_xallocx(void *ptr, size_t size, size_t extra, int flags)
1689f921d10fSJason Evans {
1690f921d10fSJason Evans 	size_t usize, old_usize;
1691f921d10fSJason Evans 	UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0);
1692f921d10fSJason Evans 	size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
1693f921d10fSJason Evans 	    & (SIZE_T_MAX-1));
1694f921d10fSJason Evans 	bool zero = flags & MALLOCX_ZERO;
1695f921d10fSJason Evans 	unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
1696f921d10fSJason Evans 	arena_t *arena;
1697f921d10fSJason Evans 
1698f921d10fSJason Evans 	assert(ptr != NULL);
1699f921d10fSJason Evans 	assert(size != 0);
1700f921d10fSJason Evans 	assert(SIZE_T_MAX - size >= extra);
1701f921d10fSJason Evans 	assert(malloc_initialized || IS_INITIALIZER);
1702f921d10fSJason Evans 	malloc_thread_init();
1703f921d10fSJason Evans 
1704f921d10fSJason Evans 	if (arena_ind != UINT_MAX)
1705f921d10fSJason Evans 		arena = arenas[arena_ind];
1706f921d10fSJason Evans 	else
1707f921d10fSJason Evans 		arena = NULL;
1708f921d10fSJason Evans 
1709f921d10fSJason Evans 	old_usize = isalloc(ptr, config_prof);
1710f921d10fSJason Evans 	if (config_valgrind && opt_valgrind)
1711f921d10fSJason Evans 		old_rzsize = u2rz(old_usize);
1712f921d10fSJason Evans 
1713f921d10fSJason Evans 	if (config_prof && opt_prof) {
1714f921d10fSJason Evans 		prof_thr_cnt_t *cnt;
1715f921d10fSJason Evans 		/*
1716f921d10fSJason Evans 		 * usize isn't knowable before ixalloc() returns when extra is
1717f921d10fSJason Evans 		 * non-zero.  Therefore, compute its maximum possible value and
1718f921d10fSJason Evans 		 * use that in PROF_ALLOC_PREP() to decide whether to capture a
1719f921d10fSJason Evans 		 * backtrace.  prof_realloc() will use the actual usize to
1720f921d10fSJason Evans 		 * decide whether to sample.
1721f921d10fSJason Evans 		 */
1722f921d10fSJason Evans 		size_t max_usize = (alignment == 0) ? s2u(size+extra) :
1723f921d10fSJason Evans 		    sa2u(size+extra, alignment);
1724f921d10fSJason Evans 		PROF_ALLOC_PREP(1, max_usize, cnt);
1725f921d10fSJason Evans 		usize = ixallocx_prof(ptr, old_usize, size, extra, alignment,
1726f921d10fSJason Evans 		    max_usize, zero, arena, cnt);
1727f921d10fSJason Evans 	} else {
1728f921d10fSJason Evans 		usize = ixallocx_helper(ptr, old_usize, size, extra, alignment,
1729f921d10fSJason Evans 		    zero, arena);
1730f921d10fSJason Evans 	}
1731f921d10fSJason Evans 	if (usize == old_usize)
1732f921d10fSJason Evans 		goto label_not_resized;
1733f921d10fSJason Evans 
1734f921d10fSJason Evans 	if (config_stats) {
1735f921d10fSJason Evans 		thread_allocated_t *ta;
1736f921d10fSJason Evans 		ta = thread_allocated_tsd_get();
1737f921d10fSJason Evans 		ta->allocated += usize;
1738f921d10fSJason Evans 		ta->deallocated += old_usize;
1739f921d10fSJason Evans 	}
1740f921d10fSJason Evans 	JEMALLOC_VALGRIND_REALLOC(ptr, usize, ptr, old_usize, old_rzsize, zero);
1741f921d10fSJason Evans label_not_resized:
1742f921d10fSJason Evans 	UTRACE(ptr, size, ptr);
1743f921d10fSJason Evans 	return (usize);
1744f921d10fSJason Evans }
1745f921d10fSJason Evans 
1746f921d10fSJason Evans size_t
1747f921d10fSJason Evans je_sallocx(const void *ptr, int flags)
1748f921d10fSJason Evans {
1749f921d10fSJason Evans 	size_t usize;
1750a4bd5210SJason Evans 
1751a4bd5210SJason Evans 	assert(malloc_initialized || IS_INITIALIZER);
1752f8ca2db1SJason Evans 	malloc_thread_init();
1753a4bd5210SJason Evans 
1754a4bd5210SJason Evans 	if (config_ivsalloc)
1755f921d10fSJason Evans 		usize = ivsalloc(ptr, config_prof);
1756f921d10fSJason Evans 	else {
1757f921d10fSJason Evans 		assert(ptr != NULL);
1758f921d10fSJason Evans 		usize = isalloc(ptr, config_prof);
1759f921d10fSJason Evans 	}
1760a4bd5210SJason Evans 
1761f921d10fSJason Evans 	return (usize);
1762a4bd5210SJason Evans }
1763a4bd5210SJason Evans 
1764a4bd5210SJason Evans void
1765f921d10fSJason Evans je_dallocx(void *ptr, int flags)
1766a4bd5210SJason Evans {
1767f921d10fSJason Evans 	size_t usize;
1768f921d10fSJason Evans 	UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0);
1769f921d10fSJason Evans 	unsigned arena_ind = ((unsigned)(flags >> 8)) - 1;
1770f921d10fSJason Evans 	bool try_tcache;
1771a4bd5210SJason Evans 
1772f921d10fSJason Evans 	assert(ptr != NULL);
1773f921d10fSJason Evans 	assert(malloc_initialized || IS_INITIALIZER);
1774f921d10fSJason Evans 
1775f921d10fSJason Evans 	if (arena_ind != UINT_MAX) {
1776f921d10fSJason Evans 		arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
1777f921d10fSJason Evans 		try_tcache = (chunk == ptr || chunk->arena !=
1778f921d10fSJason Evans 		    arenas[arena_ind]);
1779f921d10fSJason Evans 	} else
1780f921d10fSJason Evans 		try_tcache = true;
1781f921d10fSJason Evans 
1782f921d10fSJason Evans 	UTRACE(ptr, 0, 0);
1783f921d10fSJason Evans 	if (config_stats || config_valgrind)
1784f921d10fSJason Evans 		usize = isalloc(ptr, config_prof);
1785f921d10fSJason Evans 	if (config_prof && opt_prof) {
1786f921d10fSJason Evans 		if (config_stats == false && config_valgrind == false)
1787f921d10fSJason Evans 			usize = isalloc(ptr, config_prof);
1788f921d10fSJason Evans 		prof_free(ptr, usize);
1789f921d10fSJason Evans 	}
1790f921d10fSJason Evans 	if (config_stats)
1791f921d10fSJason Evans 		thread_allocated_tsd_get()->deallocated += usize;
1792f921d10fSJason Evans 	if (config_valgrind && opt_valgrind)
1793f921d10fSJason Evans 		rzsize = p2rz(ptr);
1794f921d10fSJason Evans 	iqalloct(ptr, try_tcache);
1795f921d10fSJason Evans 	JEMALLOC_VALGRIND_FREE(ptr, rzsize);
1796f921d10fSJason Evans }
1797f921d10fSJason Evans 
1798f921d10fSJason Evans size_t
1799f921d10fSJason Evans je_nallocx(size_t size, int flags)
1800f921d10fSJason Evans {
1801f921d10fSJason Evans 	size_t usize;
1802f921d10fSJason Evans 	size_t alignment = (ZU(1) << (flags & MALLOCX_LG_ALIGN_MASK)
1803f921d10fSJason Evans 	    & (SIZE_T_MAX-1));
1804f921d10fSJason Evans 
1805f921d10fSJason Evans 	assert(size != 0);
1806f921d10fSJason Evans 
1807f921d10fSJason Evans 	if (malloc_init())
1808f921d10fSJason Evans 		return (0);
1809f921d10fSJason Evans 
1810f921d10fSJason Evans 	usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment);
1811f921d10fSJason Evans 	assert(usize != 0);
1812f921d10fSJason Evans 	return (usize);
1813a4bd5210SJason Evans }
1814a4bd5210SJason Evans 
1815a4bd5210SJason Evans int
1816a4bd5210SJason Evans je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
1817a4bd5210SJason Evans     size_t newlen)
1818a4bd5210SJason Evans {
1819a4bd5210SJason Evans 
1820a4bd5210SJason Evans 	if (malloc_init())
1821a4bd5210SJason Evans 		return (EAGAIN);
1822a4bd5210SJason Evans 
1823a4bd5210SJason Evans 	return (ctl_byname(name, oldp, oldlenp, newp, newlen));
1824a4bd5210SJason Evans }
1825a4bd5210SJason Evans 
1826a4bd5210SJason Evans int
1827a4bd5210SJason Evans je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp)
1828a4bd5210SJason Evans {
1829a4bd5210SJason Evans 
1830a4bd5210SJason Evans 	if (malloc_init())
1831a4bd5210SJason Evans 		return (EAGAIN);
1832a4bd5210SJason Evans 
1833a4bd5210SJason Evans 	return (ctl_nametomib(name, mibp, miblenp));
1834a4bd5210SJason Evans }
1835a4bd5210SJason Evans 
1836a4bd5210SJason Evans int
1837a4bd5210SJason Evans je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
1838a4bd5210SJason Evans   void *newp, size_t newlen)
1839a4bd5210SJason Evans {
1840a4bd5210SJason Evans 
1841a4bd5210SJason Evans 	if (malloc_init())
1842a4bd5210SJason Evans 		return (EAGAIN);
1843a4bd5210SJason Evans 
1844a4bd5210SJason Evans 	return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen));
1845a4bd5210SJason Evans }
1846a4bd5210SJason Evans 
1847f921d10fSJason Evans void
1848f921d10fSJason Evans je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
1849f921d10fSJason Evans     const char *opts)
1850f921d10fSJason Evans {
1851f921d10fSJason Evans 
1852f921d10fSJason Evans 	stats_print(write_cb, cbopaque, opts);
1853f921d10fSJason Evans }
1854f921d10fSJason Evans 
1855f921d10fSJason Evans size_t
1856f921d10fSJason Evans je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr)
1857f921d10fSJason Evans {
1858f921d10fSJason Evans 	size_t ret;
1859f921d10fSJason Evans 
1860f921d10fSJason Evans 	assert(malloc_initialized || IS_INITIALIZER);
1861f921d10fSJason Evans 	malloc_thread_init();
1862f921d10fSJason Evans 
1863f921d10fSJason Evans 	if (config_ivsalloc)
1864f921d10fSJason Evans 		ret = ivsalloc(ptr, config_prof);
1865f921d10fSJason Evans 	else
1866f921d10fSJason Evans 		ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0;
1867f921d10fSJason Evans 
1868f921d10fSJason Evans 	return (ret);
1869f921d10fSJason Evans }
1870f921d10fSJason Evans 
1871a4bd5210SJason Evans /*
1872a4bd5210SJason Evans  * End non-standard functions.
1873a4bd5210SJason Evans  */
1874a4bd5210SJason Evans /******************************************************************************/
1875a4bd5210SJason Evans /*
1876a4bd5210SJason Evans  * Begin experimental functions.
1877a4bd5210SJason Evans  */
1878a4bd5210SJason Evans #ifdef JEMALLOC_EXPERIMENTAL
1879a4bd5210SJason Evans 
1880a4bd5210SJason Evans int
1881a4bd5210SJason Evans je_allocm(void **ptr, size_t *rsize, size_t size, int flags)
1882a4bd5210SJason Evans {
1883a4bd5210SJason Evans 	void *p;
1884a4bd5210SJason Evans 
1885a4bd5210SJason Evans 	assert(ptr != NULL);
1886a4bd5210SJason Evans 
1887f921d10fSJason Evans 	p = je_mallocx(size, flags);
1888a4bd5210SJason Evans 	if (p == NULL)
1889a4bd5210SJason Evans 		return (ALLOCM_ERR_OOM);
1890f921d10fSJason Evans 	if (rsize != NULL)
1891f921d10fSJason Evans 		*rsize = isalloc(p, config_prof);
1892f921d10fSJason Evans 	*ptr = p;
1893f921d10fSJason Evans 	return (ALLOCM_SUCCESS);
1894a4bd5210SJason Evans }
1895a4bd5210SJason Evans 
1896a4bd5210SJason Evans int
1897a4bd5210SJason Evans je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags)
1898a4bd5210SJason Evans {
1899f921d10fSJason Evans 	int ret;
1900a4bd5210SJason Evans 	bool no_move = flags & ALLOCM_NO_MOVE;
1901a4bd5210SJason Evans 
1902a4bd5210SJason Evans 	assert(ptr != NULL);
1903a4bd5210SJason Evans 	assert(*ptr != NULL);
1904a4bd5210SJason Evans 	assert(size != 0);
1905a4bd5210SJason Evans 	assert(SIZE_T_MAX - size >= extra);
1906a4bd5210SJason Evans 
1907f921d10fSJason Evans 	if (no_move) {
1908f921d10fSJason Evans 		size_t usize = je_xallocx(*ptr, size, extra, flags);
1909f921d10fSJason Evans 		ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED;
1910a4bd5210SJason Evans 		if (rsize != NULL)
1911a4bd5210SJason Evans 			*rsize = usize;
1912a4bd5210SJason Evans 	} else {
1913f921d10fSJason Evans 		void *p = je_rallocx(*ptr, size+extra, flags);
1914f921d10fSJason Evans 		if (p != NULL) {
1915f921d10fSJason Evans 			*ptr = p;
1916f921d10fSJason Evans 			ret = ALLOCM_SUCCESS;
1917f921d10fSJason Evans 		} else
1918f921d10fSJason Evans 			ret = ALLOCM_ERR_OOM;
1919f921d10fSJason Evans 		if (rsize != NULL)
1920f921d10fSJason Evans 			*rsize = isalloc(*ptr, config_prof);
1921a4bd5210SJason Evans 	}
1922f921d10fSJason Evans 	return (ret);
1923a4bd5210SJason Evans }
1924a4bd5210SJason Evans 
1925a4bd5210SJason Evans int
1926a4bd5210SJason Evans je_sallocm(const void *ptr, size_t *rsize, int flags)
1927a4bd5210SJason Evans {
1928a4bd5210SJason Evans 
1929a4bd5210SJason Evans 	assert(rsize != NULL);
1930f921d10fSJason Evans 	*rsize = je_sallocx(ptr, flags);
1931a4bd5210SJason Evans 	return (ALLOCM_SUCCESS);
1932a4bd5210SJason Evans }
1933a4bd5210SJason Evans 
1934a4bd5210SJason Evans int
1935a4bd5210SJason Evans je_dallocm(void *ptr, int flags)
1936a4bd5210SJason Evans {
1937a4bd5210SJason Evans 
1938f921d10fSJason Evans 	je_dallocx(ptr, flags);
1939a4bd5210SJason Evans 	return (ALLOCM_SUCCESS);
1940a4bd5210SJason Evans }
1941a4bd5210SJason Evans 
1942a4bd5210SJason Evans int
1943a4bd5210SJason Evans je_nallocm(size_t *rsize, size_t size, int flags)
1944a4bd5210SJason Evans {
1945a4bd5210SJason Evans 	size_t usize;
1946a4bd5210SJason Evans 
1947f921d10fSJason Evans 	usize = je_nallocx(size, flags);
1948a4bd5210SJason Evans 	if (usize == 0)
1949a4bd5210SJason Evans 		return (ALLOCM_ERR_OOM);
1950a4bd5210SJason Evans 	if (rsize != NULL)
1951a4bd5210SJason Evans 		*rsize = usize;
1952a4bd5210SJason Evans 	return (ALLOCM_SUCCESS);
1953a4bd5210SJason Evans }
1954a4bd5210SJason Evans 
1955a4bd5210SJason Evans #endif
1956a4bd5210SJason Evans /*
1957a4bd5210SJason Evans  * End experimental functions.
1958a4bd5210SJason Evans  */
1959a4bd5210SJason Evans /******************************************************************************/
1960a4bd5210SJason Evans /*
1961a4bd5210SJason Evans  * The following functions are used by threading libraries for protection of
1962a4bd5210SJason Evans  * malloc during fork().
1963a4bd5210SJason Evans  */
1964a4bd5210SJason Evans 
196582872ac0SJason Evans /*
196682872ac0SJason Evans  * If an application creates a thread before doing any allocation in the main
196782872ac0SJason Evans  * thread, then calls fork(2) in the main thread followed by memory allocation
196882872ac0SJason Evans  * in the child process, a race can occur that results in deadlock within the
196982872ac0SJason Evans  * child: the main thread may have forked while the created thread had
197082872ac0SJason Evans  * partially initialized the allocator.  Ordinarily jemalloc prevents
197182872ac0SJason Evans  * fork/malloc races via the following functions it registers during
197282872ac0SJason Evans  * initialization using pthread_atfork(), but of course that does no good if
197382872ac0SJason Evans  * the allocator isn't fully initialized at fork time.  The following library
197482872ac0SJason Evans  * constructor is a partial solution to this problem.  It may still possible to
197582872ac0SJason Evans  * trigger the deadlock described above, but doing so would involve forking via
197682872ac0SJason Evans  * a library constructor that runs before jemalloc's runs.
197782872ac0SJason Evans  */
197882872ac0SJason Evans JEMALLOC_ATTR(constructor)
197982872ac0SJason Evans static void
198082872ac0SJason Evans jemalloc_constructor(void)
198182872ac0SJason Evans {
198282872ac0SJason Evans 
198382872ac0SJason Evans 	malloc_init();
198482872ac0SJason Evans }
198582872ac0SJason Evans 
1986a4bd5210SJason Evans #ifndef JEMALLOC_MUTEX_INIT_CB
1987a4bd5210SJason Evans void
1988a4bd5210SJason Evans jemalloc_prefork(void)
1989a4bd5210SJason Evans #else
1990e722f8f8SJason Evans JEMALLOC_EXPORT void
1991a4bd5210SJason Evans _malloc_prefork(void)
1992a4bd5210SJason Evans #endif
1993a4bd5210SJason Evans {
1994a4bd5210SJason Evans 	unsigned i;
1995a4bd5210SJason Evans 
199635dad073SJason Evans #ifdef JEMALLOC_MUTEX_INIT_CB
199735dad073SJason Evans 	if (malloc_initialized == false)
199835dad073SJason Evans 		return;
199935dad073SJason Evans #endif
200035dad073SJason Evans 	assert(malloc_initialized);
200135dad073SJason Evans 
2002a4bd5210SJason Evans 	/* Acquire all mutexes in a safe order. */
200382872ac0SJason Evans 	ctl_prefork();
2004f8ca2db1SJason Evans 	prof_prefork();
2005a4bd5210SJason Evans 	malloc_mutex_prefork(&arenas_lock);
200682872ac0SJason Evans 	for (i = 0; i < narenas_total; i++) {
2007a4bd5210SJason Evans 		if (arenas[i] != NULL)
2008a4bd5210SJason Evans 			arena_prefork(arenas[i]);
2009a4bd5210SJason Evans 	}
201082872ac0SJason Evans 	chunk_prefork();
2011a4bd5210SJason Evans 	base_prefork();
2012a4bd5210SJason Evans 	huge_prefork();
2013a4bd5210SJason Evans }
2014a4bd5210SJason Evans 
2015a4bd5210SJason Evans #ifndef JEMALLOC_MUTEX_INIT_CB
2016a4bd5210SJason Evans void
2017a4bd5210SJason Evans jemalloc_postfork_parent(void)
2018a4bd5210SJason Evans #else
2019e722f8f8SJason Evans JEMALLOC_EXPORT void
2020a4bd5210SJason Evans _malloc_postfork(void)
2021a4bd5210SJason Evans #endif
2022a4bd5210SJason Evans {
2023a4bd5210SJason Evans 	unsigned i;
2024a4bd5210SJason Evans 
202535dad073SJason Evans #ifdef JEMALLOC_MUTEX_INIT_CB
202635dad073SJason Evans 	if (malloc_initialized == false)
202735dad073SJason Evans 		return;
202835dad073SJason Evans #endif
202935dad073SJason Evans 	assert(malloc_initialized);
203035dad073SJason Evans 
2031a4bd5210SJason Evans 	/* Release all mutexes, now that fork() has completed. */
2032a4bd5210SJason Evans 	huge_postfork_parent();
2033a4bd5210SJason Evans 	base_postfork_parent();
203482872ac0SJason Evans 	chunk_postfork_parent();
203582872ac0SJason Evans 	for (i = 0; i < narenas_total; i++) {
2036a4bd5210SJason Evans 		if (arenas[i] != NULL)
2037a4bd5210SJason Evans 			arena_postfork_parent(arenas[i]);
2038a4bd5210SJason Evans 	}
2039a4bd5210SJason Evans 	malloc_mutex_postfork_parent(&arenas_lock);
2040f8ca2db1SJason Evans 	prof_postfork_parent();
204182872ac0SJason Evans 	ctl_postfork_parent();
2042a4bd5210SJason Evans }
2043a4bd5210SJason Evans 
2044a4bd5210SJason Evans void
2045a4bd5210SJason Evans jemalloc_postfork_child(void)
2046a4bd5210SJason Evans {
2047a4bd5210SJason Evans 	unsigned i;
2048a4bd5210SJason Evans 
204935dad073SJason Evans 	assert(malloc_initialized);
205035dad073SJason Evans 
2051a4bd5210SJason Evans 	/* Release all mutexes, now that fork() has completed. */
2052a4bd5210SJason Evans 	huge_postfork_child();
2053a4bd5210SJason Evans 	base_postfork_child();
205482872ac0SJason Evans 	chunk_postfork_child();
205582872ac0SJason Evans 	for (i = 0; i < narenas_total; i++) {
2056a4bd5210SJason Evans 		if (arenas[i] != NULL)
2057a4bd5210SJason Evans 			arena_postfork_child(arenas[i]);
2058a4bd5210SJason Evans 	}
2059a4bd5210SJason Evans 	malloc_mutex_postfork_child(&arenas_lock);
2060f8ca2db1SJason Evans 	prof_postfork_child();
206182872ac0SJason Evans 	ctl_postfork_child();
2062a4bd5210SJason Evans }
2063a4bd5210SJason Evans 
2064*8495e8b1SKonstantin Belousov void
2065*8495e8b1SKonstantin Belousov _malloc_first_thread(void)
2066*8495e8b1SKonstantin Belousov {
2067*8495e8b1SKonstantin Belousov 
2068*8495e8b1SKonstantin Belousov 	(void)malloc_mutex_first_thread();
2069*8495e8b1SKonstantin Belousov }
2070*8495e8b1SKonstantin Belousov 
2071a4bd5210SJason Evans /******************************************************************************/
2072a4bd5210SJason Evans /*
2073a4bd5210SJason Evans  * The following functions are used for TLS allocation/deallocation in static
2074a4bd5210SJason Evans  * binaries on FreeBSD.  The primary difference between these and i[mcd]alloc()
2075a4bd5210SJason Evans  * is that these avoid accessing TLS variables.
2076a4bd5210SJason Evans  */
2077a4bd5210SJason Evans 
2078a4bd5210SJason Evans static void *
2079a4bd5210SJason Evans a0alloc(size_t size, bool zero)
2080a4bd5210SJason Evans {
2081a4bd5210SJason Evans 
2082a4bd5210SJason Evans 	if (malloc_init())
2083a4bd5210SJason Evans 		return (NULL);
2084a4bd5210SJason Evans 
2085a4bd5210SJason Evans 	if (size == 0)
2086a4bd5210SJason Evans 		size = 1;
2087a4bd5210SJason Evans 
2088a4bd5210SJason Evans 	if (size <= arena_maxclass)
2089a4bd5210SJason Evans 		return (arena_malloc(arenas[0], size, zero, false));
2090a4bd5210SJason Evans 	else
20912fff27f8SJason Evans 		return (huge_malloc(size, zero, huge_dss_prec_get(arenas[0])));
2092a4bd5210SJason Evans }
2093a4bd5210SJason Evans 
2094a4bd5210SJason Evans void *
2095a4bd5210SJason Evans a0malloc(size_t size)
2096a4bd5210SJason Evans {
2097a4bd5210SJason Evans 
2098a4bd5210SJason Evans 	return (a0alloc(size, false));
2099a4bd5210SJason Evans }
2100a4bd5210SJason Evans 
2101a4bd5210SJason Evans void *
2102a4bd5210SJason Evans a0calloc(size_t num, size_t size)
2103a4bd5210SJason Evans {
2104a4bd5210SJason Evans 
2105a4bd5210SJason Evans 	return (a0alloc(num * size, true));
2106a4bd5210SJason Evans }
2107a4bd5210SJason Evans 
2108a4bd5210SJason Evans void
2109a4bd5210SJason Evans a0free(void *ptr)
2110a4bd5210SJason Evans {
2111a4bd5210SJason Evans 	arena_chunk_t *chunk;
2112a4bd5210SJason Evans 
2113a4bd5210SJason Evans 	if (ptr == NULL)
2114a4bd5210SJason Evans 		return;
2115a4bd5210SJason Evans 
2116a4bd5210SJason Evans 	chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr);
2117a4bd5210SJason Evans 	if (chunk != ptr)
2118a4bd5210SJason Evans 		arena_dalloc(chunk->arena, chunk, ptr, false);
2119a4bd5210SJason Evans 	else
2120a4bd5210SJason Evans 		huge_dalloc(ptr, true);
2121a4bd5210SJason Evans }
2122a4bd5210SJason Evans 
2123a4bd5210SJason Evans /******************************************************************************/
2124