xref: /freebsd/contrib/jemalloc/src/jemalloc.c (revision 68d75eff68281c1b445e3010bb975eae07aac225)
1 #define JEMALLOC_C_
2 #include "jemalloc/internal/jemalloc_preamble.h"
3 #include "jemalloc/internal/jemalloc_internal_includes.h"
4 
5 #include "jemalloc/internal/assert.h"
6 #include "jemalloc/internal/atomic.h"
7 #include "jemalloc/internal/ctl.h"
8 #include "jemalloc/internal/extent_dss.h"
9 #include "jemalloc/internal/extent_mmap.h"
10 #include "jemalloc/internal/jemalloc_internal_types.h"
11 #include "jemalloc/internal/log.h"
12 #include "jemalloc/internal/malloc_io.h"
13 #include "jemalloc/internal/mutex.h"
14 #include "jemalloc/internal/rtree.h"
15 #include "jemalloc/internal/size_classes.h"
16 #include "jemalloc/internal/spin.h"
17 #include "jemalloc/internal/sz.h"
18 #include "jemalloc/internal/ticker.h"
19 #include "jemalloc/internal/util.h"
20 
21 /******************************************************************************/
22 /* Data. */
23 
24 /* Work around <http://llvm.org/bugs/show_bug.cgi?id=12623>: */
25 const char	*__malloc_options_1_0 = NULL;
26 __sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0);
27 
28 /* Runtime configuration options. */
29 const char	*je_malloc_conf
30 #ifndef _WIN32
31     JEMALLOC_ATTR(weak)
32 #endif
33     ;
34 bool	opt_abort =
35 #ifdef JEMALLOC_DEBUG
36     true
37 #else
38     false
39 #endif
40     ;
41 bool	opt_abort_conf =
42 #ifdef JEMALLOC_DEBUG
43     true
44 #else
45     false
46 #endif
47     ;
48 const char	*opt_junk =
49 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
50     "true"
51 #else
52     "false"
53 #endif
54     ;
55 bool	opt_junk_alloc =
56 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
57     true
58 #else
59     false
60 #endif
61     ;
62 bool	opt_junk_free =
63 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
64     true
65 #else
66     false
67 #endif
68     ;
69 
70 bool	opt_utrace = false;
71 bool	opt_xmalloc = false;
72 bool	opt_zero = false;
73 unsigned	opt_narenas = 0;
74 
75 unsigned	ncpus;
76 
77 /* Protects arenas initialization. */
78 malloc_mutex_t arenas_lock;
79 /*
80  * Arenas that are used to service external requests.  Not all elements of the
81  * arenas array are necessarily used; arenas are created lazily as needed.
82  *
83  * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
84  * arenas.  arenas[narenas_auto..narenas_total) are only used if the application
85  * takes some action to create them and allocate from them.
86  *
87  * Points to an arena_t.
88  */
89 JEMALLOC_ALIGNED(CACHELINE)
90 atomic_p_t		arenas[MALLOCX_ARENA_LIMIT];
91 static atomic_u_t	narenas_total; /* Use narenas_total_*(). */
92 static arena_t		*a0; /* arenas[0]; read-only after initialization. */
93 unsigned		narenas_auto; /* Read-only after initialization. */
94 
95 typedef enum {
96 	malloc_init_uninitialized	= 3,
97 	malloc_init_a0_initialized	= 2,
98 	malloc_init_recursible		= 1,
99 	malloc_init_initialized		= 0 /* Common case --> jnz. */
100 } malloc_init_t;
101 static malloc_init_t	malloc_init_state = malloc_init_uninitialized;
102 
103 /* False should be the common case.  Set to true to trigger initialization. */
104 bool			malloc_slow = true;
105 
106 /* When malloc_slow is true, set the corresponding bits for sanity check. */
107 enum {
108 	flag_opt_junk_alloc	= (1U),
109 	flag_opt_junk_free	= (1U << 1),
110 	flag_opt_zero		= (1U << 2),
111 	flag_opt_utrace		= (1U << 3),
112 	flag_opt_xmalloc	= (1U << 4)
113 };
114 static uint8_t	malloc_slow_flags;
115 
116 #ifdef JEMALLOC_THREADED_INIT
117 /* Used to let the initializing thread recursively allocate. */
118 #  define NO_INITIALIZER	((unsigned long)0)
119 #  define INITIALIZER		pthread_self()
120 #  define IS_INITIALIZER	(malloc_initializer == pthread_self())
121 static pthread_t		malloc_initializer = NO_INITIALIZER;
122 #else
123 #  define NO_INITIALIZER	false
124 #  define INITIALIZER		true
125 #  define IS_INITIALIZER	malloc_initializer
126 static bool			malloc_initializer = NO_INITIALIZER;
127 #endif
128 
129 /* Used to avoid initialization races. */
130 #ifdef _WIN32
131 #if _WIN32_WINNT >= 0x0600
132 static malloc_mutex_t	init_lock = SRWLOCK_INIT;
133 #else
134 static malloc_mutex_t	init_lock;
135 static bool init_lock_initialized = false;
136 
137 JEMALLOC_ATTR(constructor)
138 static void WINAPI
139 _init_init_lock(void) {
140 	/*
141 	 * If another constructor in the same binary is using mallctl to e.g.
142 	 * set up extent hooks, it may end up running before this one, and
143 	 * malloc_init_hard will crash trying to lock the uninitialized lock. So
144 	 * we force an initialization of the lock in malloc_init_hard as well.
145 	 * We don't try to care about atomicity of the accessed to the
146 	 * init_lock_initialized boolean, since it really only matters early in
147 	 * the process creation, before any separate thread normally starts
148 	 * doing anything.
149 	 */
150 	if (!init_lock_initialized) {
151 		malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT,
152 		    malloc_mutex_rank_exclusive);
153 	}
154 	init_lock_initialized = true;
155 }
156 
157 #ifdef _MSC_VER
158 #  pragma section(".CRT$XCU", read)
159 JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
160 static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
161 #endif
162 #endif
163 #else
164 static malloc_mutex_t	init_lock = MALLOC_MUTEX_INITIALIZER;
165 #endif
166 
167 typedef struct {
168 	void	*p;	/* Input pointer (as in realloc(p, s)). */
169 	size_t	s;	/* Request size. */
170 	void	*r;	/* Result pointer. */
171 } malloc_utrace_t;
172 
173 #ifdef JEMALLOC_UTRACE
174 #  define UTRACE(a, b, c) do {						\
175 	if (unlikely(opt_utrace)) {					\
176 		int utrace_serrno = errno;				\
177 		malloc_utrace_t ut;					\
178 		ut.p = (a);						\
179 		ut.s = (b);						\
180 		ut.r = (c);						\
181 		utrace(&ut, sizeof(ut));				\
182 		errno = utrace_serrno;					\
183 	}								\
184 } while (0)
185 #else
186 #  define UTRACE(a, b, c)
187 #endif
188 
189 /* Whether encountered any invalid config options. */
190 static bool had_conf_error = false;
191 
192 /******************************************************************************/
193 /*
194  * Function prototypes for static functions that are referenced prior to
195  * definition.
196  */
197 
198 static bool	malloc_init_hard_a0(void);
199 static bool	malloc_init_hard(void);
200 
201 /******************************************************************************/
202 /*
203  * Begin miscellaneous support functions.
204  */
205 
206 bool
207 malloc_initialized(void) {
208 	return (malloc_init_state == malloc_init_initialized);
209 }
210 
211 JEMALLOC_ALWAYS_INLINE bool
212 malloc_init_a0(void) {
213 	if (unlikely(malloc_init_state == malloc_init_uninitialized)) {
214 		return malloc_init_hard_a0();
215 	}
216 	return false;
217 }
218 
219 JEMALLOC_ALWAYS_INLINE bool
220 malloc_init(void) {
221 	if (unlikely(!malloc_initialized()) && malloc_init_hard()) {
222 		return true;
223 	}
224 	return false;
225 }
226 
227 /*
228  * The a0*() functions are used instead of i{d,}alloc() in situations that
229  * cannot tolerate TLS variable access.
230  */
231 
232 static void *
233 a0ialloc(size_t size, bool zero, bool is_internal) {
234 	if (unlikely(malloc_init_a0())) {
235 		return NULL;
236 	}
237 
238 	return iallocztm(TSDN_NULL, size, sz_size2index(size), zero, NULL,
239 	    is_internal, arena_get(TSDN_NULL, 0, true), true);
240 }
241 
242 static void
243 a0idalloc(void *ptr, bool is_internal) {
244 	idalloctm(TSDN_NULL, ptr, NULL, NULL, is_internal, true);
245 }
246 
247 void *
248 a0malloc(size_t size) {
249 	return a0ialloc(size, false, true);
250 }
251 
252 void
253 a0dalloc(void *ptr) {
254 	a0idalloc(ptr, true);
255 }
256 
257 /*
258  * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
259  * situations that cannot tolerate TLS variable access (TLS allocation and very
260  * early internal data structure initialization).
261  */
262 
263 void *
264 bootstrap_malloc(size_t size) {
265 	if (unlikely(size == 0)) {
266 		size = 1;
267 	}
268 
269 	return a0ialloc(size, false, false);
270 }
271 
272 void *
273 bootstrap_calloc(size_t num, size_t size) {
274 	size_t num_size;
275 
276 	num_size = num * size;
277 	if (unlikely(num_size == 0)) {
278 		assert(num == 0 || size == 0);
279 		num_size = 1;
280 	}
281 
282 	return a0ialloc(num_size, true, false);
283 }
284 
285 void
286 bootstrap_free(void *ptr) {
287 	if (unlikely(ptr == NULL)) {
288 		return;
289 	}
290 
291 	a0idalloc(ptr, false);
292 }
293 
294 void
295 arena_set(unsigned ind, arena_t *arena) {
296 	atomic_store_p(&arenas[ind], arena, ATOMIC_RELEASE);
297 }
298 
299 static void
300 narenas_total_set(unsigned narenas) {
301 	atomic_store_u(&narenas_total, narenas, ATOMIC_RELEASE);
302 }
303 
304 static void
305 narenas_total_inc(void) {
306 	atomic_fetch_add_u(&narenas_total, 1, ATOMIC_RELEASE);
307 }
308 
309 unsigned
310 narenas_total_get(void) {
311 	return atomic_load_u(&narenas_total, ATOMIC_ACQUIRE);
312 }
313 
314 /* Create a new arena and insert it into the arenas array at index ind. */
315 static arena_t *
316 arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
317 	arena_t *arena;
318 
319 	assert(ind <= narenas_total_get());
320 	if (ind >= MALLOCX_ARENA_LIMIT) {
321 		return NULL;
322 	}
323 	if (ind == narenas_total_get()) {
324 		narenas_total_inc();
325 	}
326 
327 	/*
328 	 * Another thread may have already initialized arenas[ind] if it's an
329 	 * auto arena.
330 	 */
331 	arena = arena_get(tsdn, ind, false);
332 	if (arena != NULL) {
333 		assert(ind < narenas_auto);
334 		return arena;
335 	}
336 
337 	/* Actually initialize the arena. */
338 	arena = arena_new(tsdn, ind, extent_hooks);
339 
340 	return arena;
341 }
342 
343 static void
344 arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) {
345 	if (ind == 0) {
346 		return;
347 	}
348 	if (have_background_thread) {
349 		bool err;
350 		malloc_mutex_lock(tsdn, &background_thread_lock);
351 		err = background_thread_create(tsdn_tsd(tsdn), ind);
352 		malloc_mutex_unlock(tsdn, &background_thread_lock);
353 		if (err) {
354 			malloc_printf("<jemalloc>: error in background thread "
355 				      "creation for arena %u. Abort.\n", ind);
356 			abort();
357 		}
358 	}
359 }
360 
361 arena_t *
362 arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
363 	arena_t *arena;
364 
365 	malloc_mutex_lock(tsdn, &arenas_lock);
366 	arena = arena_init_locked(tsdn, ind, extent_hooks);
367 	malloc_mutex_unlock(tsdn, &arenas_lock);
368 
369 	arena_new_create_background_thread(tsdn, ind);
370 
371 	return arena;
372 }
373 
374 static void
375 arena_bind(tsd_t *tsd, unsigned ind, bool internal) {
376 	arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false);
377 	arena_nthreads_inc(arena, internal);
378 
379 	if (internal) {
380 		tsd_iarena_set(tsd, arena);
381 	} else {
382 		tsd_arena_set(tsd, arena);
383 	}
384 }
385 
386 void
387 arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) {
388 	arena_t *oldarena, *newarena;
389 
390 	oldarena = arena_get(tsd_tsdn(tsd), oldind, false);
391 	newarena = arena_get(tsd_tsdn(tsd), newind, false);
392 	arena_nthreads_dec(oldarena, false);
393 	arena_nthreads_inc(newarena, false);
394 	tsd_arena_set(tsd, newarena);
395 }
396 
397 static void
398 arena_unbind(tsd_t *tsd, unsigned ind, bool internal) {
399 	arena_t *arena;
400 
401 	arena = arena_get(tsd_tsdn(tsd), ind, false);
402 	arena_nthreads_dec(arena, internal);
403 
404 	if (internal) {
405 		tsd_iarena_set(tsd, NULL);
406 	} else {
407 		tsd_arena_set(tsd, NULL);
408 	}
409 }
410 
411 arena_tdata_t *
412 arena_tdata_get_hard(tsd_t *tsd, unsigned ind) {
413 	arena_tdata_t *tdata, *arenas_tdata_old;
414 	arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
415 	unsigned narenas_tdata_old, i;
416 	unsigned narenas_tdata = tsd_narenas_tdata_get(tsd);
417 	unsigned narenas_actual = narenas_total_get();
418 
419 	/*
420 	 * Dissociate old tdata array (and set up for deallocation upon return)
421 	 * if it's too small.
422 	 */
423 	if (arenas_tdata != NULL && narenas_tdata < narenas_actual) {
424 		arenas_tdata_old = arenas_tdata;
425 		narenas_tdata_old = narenas_tdata;
426 		arenas_tdata = NULL;
427 		narenas_tdata = 0;
428 		tsd_arenas_tdata_set(tsd, arenas_tdata);
429 		tsd_narenas_tdata_set(tsd, narenas_tdata);
430 	} else {
431 		arenas_tdata_old = NULL;
432 		narenas_tdata_old = 0;
433 	}
434 
435 	/* Allocate tdata array if it's missing. */
436 	if (arenas_tdata == NULL) {
437 		bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd);
438 		narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1;
439 
440 		if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) {
441 			*arenas_tdata_bypassp = true;
442 			arenas_tdata = (arena_tdata_t *)a0malloc(
443 			    sizeof(arena_tdata_t) * narenas_tdata);
444 			*arenas_tdata_bypassp = false;
445 		}
446 		if (arenas_tdata == NULL) {
447 			tdata = NULL;
448 			goto label_return;
449 		}
450 		assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp);
451 		tsd_arenas_tdata_set(tsd, arenas_tdata);
452 		tsd_narenas_tdata_set(tsd, narenas_tdata);
453 	}
454 
455 	/*
456 	 * Copy to tdata array.  It's possible that the actual number of arenas
457 	 * has increased since narenas_total_get() was called above, but that
458 	 * causes no correctness issues unless two threads concurrently execute
459 	 * the arenas.create mallctl, which we trust mallctl synchronization to
460 	 * prevent.
461 	 */
462 
463 	/* Copy/initialize tickers. */
464 	for (i = 0; i < narenas_actual; i++) {
465 		if (i < narenas_tdata_old) {
466 			ticker_copy(&arenas_tdata[i].decay_ticker,
467 			    &arenas_tdata_old[i].decay_ticker);
468 		} else {
469 			ticker_init(&arenas_tdata[i].decay_ticker,
470 			    DECAY_NTICKS_PER_UPDATE);
471 		}
472 	}
473 	if (narenas_tdata > narenas_actual) {
474 		memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
475 		    * (narenas_tdata - narenas_actual));
476 	}
477 
478 	/* Read the refreshed tdata array. */
479 	tdata = &arenas_tdata[ind];
480 label_return:
481 	if (arenas_tdata_old != NULL) {
482 		a0dalloc(arenas_tdata_old);
483 	}
484 	return tdata;
485 }
486 
487 /* Slow path, called only by arena_choose(). */
488 arena_t *
489 arena_choose_hard(tsd_t *tsd, bool internal) {
490 	arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL);
491 
492 	if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) {
493 		unsigned choose = percpu_arena_choose();
494 		ret = arena_get(tsd_tsdn(tsd), choose, true);
495 		assert(ret != NULL);
496 		arena_bind(tsd, arena_ind_get(ret), false);
497 		arena_bind(tsd, arena_ind_get(ret), true);
498 
499 		return ret;
500 	}
501 
502 	if (narenas_auto > 1) {
503 		unsigned i, j, choose[2], first_null;
504 		bool is_new_arena[2];
505 
506 		/*
507 		 * Determine binding for both non-internal and internal
508 		 * allocation.
509 		 *
510 		 *   choose[0]: For application allocation.
511 		 *   choose[1]: For internal metadata allocation.
512 		 */
513 
514 		for (j = 0; j < 2; j++) {
515 			choose[j] = 0;
516 			is_new_arena[j] = false;
517 		}
518 
519 		first_null = narenas_auto;
520 		malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock);
521 		assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL);
522 		for (i = 1; i < narenas_auto; i++) {
523 			if (arena_get(tsd_tsdn(tsd), i, false) != NULL) {
524 				/*
525 				 * Choose the first arena that has the lowest
526 				 * number of threads assigned to it.
527 				 */
528 				for (j = 0; j < 2; j++) {
529 					if (arena_nthreads_get(arena_get(
530 					    tsd_tsdn(tsd), i, false), !!j) <
531 					    arena_nthreads_get(arena_get(
532 					    tsd_tsdn(tsd), choose[j], false),
533 					    !!j)) {
534 						choose[j] = i;
535 					}
536 				}
537 			} else if (first_null == narenas_auto) {
538 				/*
539 				 * Record the index of the first uninitialized
540 				 * arena, in case all extant arenas are in use.
541 				 *
542 				 * NB: It is possible for there to be
543 				 * discontinuities in terms of initialized
544 				 * versus uninitialized arenas, due to the
545 				 * "thread.arena" mallctl.
546 				 */
547 				first_null = i;
548 			}
549 		}
550 
551 		for (j = 0; j < 2; j++) {
552 			if (arena_nthreads_get(arena_get(tsd_tsdn(tsd),
553 			    choose[j], false), !!j) == 0 || first_null ==
554 			    narenas_auto) {
555 				/*
556 				 * Use an unloaded arena, or the least loaded
557 				 * arena if all arenas are already initialized.
558 				 */
559 				if (!!j == internal) {
560 					ret = arena_get(tsd_tsdn(tsd),
561 					    choose[j], false);
562 				}
563 			} else {
564 				arena_t *arena;
565 
566 				/* Initialize a new arena. */
567 				choose[j] = first_null;
568 				arena = arena_init_locked(tsd_tsdn(tsd),
569 				    choose[j],
570 				    (extent_hooks_t *)&extent_hooks_default);
571 				if (arena == NULL) {
572 					malloc_mutex_unlock(tsd_tsdn(tsd),
573 					    &arenas_lock);
574 					return NULL;
575 				}
576 				is_new_arena[j] = true;
577 				if (!!j == internal) {
578 					ret = arena;
579 				}
580 			}
581 			arena_bind(tsd, choose[j], !!j);
582 		}
583 		malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock);
584 
585 		for (j = 0; j < 2; j++) {
586 			if (is_new_arena[j]) {
587 				assert(choose[j] > 0);
588 				arena_new_create_background_thread(
589 				    tsd_tsdn(tsd), choose[j]);
590 			}
591 		}
592 
593 	} else {
594 		ret = arena_get(tsd_tsdn(tsd), 0, false);
595 		arena_bind(tsd, 0, false);
596 		arena_bind(tsd, 0, true);
597 	}
598 
599 	return ret;
600 }
601 
602 void
603 iarena_cleanup(tsd_t *tsd) {
604 	arena_t *iarena;
605 
606 	iarena = tsd_iarena_get(tsd);
607 	if (iarena != NULL) {
608 		arena_unbind(tsd, arena_ind_get(iarena), true);
609 	}
610 }
611 
612 void
613 arena_cleanup(tsd_t *tsd) {
614 	arena_t *arena;
615 
616 	arena = tsd_arena_get(tsd);
617 	if (arena != NULL) {
618 		arena_unbind(tsd, arena_ind_get(arena), false);
619 	}
620 }
621 
622 void
623 arenas_tdata_cleanup(tsd_t *tsd) {
624 	arena_tdata_t *arenas_tdata;
625 
626 	/* Prevent tsd->arenas_tdata from being (re)created. */
627 	*tsd_arenas_tdata_bypassp_get(tsd) = true;
628 
629 	arenas_tdata = tsd_arenas_tdata_get(tsd);
630 	if (arenas_tdata != NULL) {
631 		tsd_arenas_tdata_set(tsd, NULL);
632 		a0dalloc(arenas_tdata);
633 	}
634 }
635 
636 static void
637 stats_print_atexit(void) {
638 	if (config_stats) {
639 		tsdn_t *tsdn;
640 		unsigned narenas, i;
641 
642 		tsdn = tsdn_fetch();
643 
644 		/*
645 		 * Merge stats from extant threads.  This is racy, since
646 		 * individual threads do not lock when recording tcache stats
647 		 * events.  As a consequence, the final stats may be slightly
648 		 * out of date by the time they are reported, if other threads
649 		 * continue to allocate.
650 		 */
651 		for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
652 			arena_t *arena = arena_get(tsdn, i, false);
653 			if (arena != NULL) {
654 				tcache_t *tcache;
655 
656 				malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
657 				ql_foreach(tcache, &arena->tcache_ql, link) {
658 					tcache_stats_merge(tsdn, tcache, arena);
659 				}
660 				malloc_mutex_unlock(tsdn,
661 				    &arena->tcache_ql_mtx);
662 			}
663 		}
664 	}
665 	je_malloc_stats_print(NULL, NULL, opt_stats_print_opts);
666 }
667 
668 /*
669  * Ensure that we don't hold any locks upon entry to or exit from allocator
670  * code (in a "broad" sense that doesn't count a reentrant allocation as an
671  * entrance or exit).
672  */
673 JEMALLOC_ALWAYS_INLINE void
674 check_entry_exit_locking(tsdn_t *tsdn) {
675 	if (!config_debug) {
676 		return;
677 	}
678 	if (tsdn_null(tsdn)) {
679 		return;
680 	}
681 	tsd_t *tsd = tsdn_tsd(tsdn);
682 	/*
683 	 * It's possible we hold locks at entry/exit if we're in a nested
684 	 * allocation.
685 	 */
686 	int8_t reentrancy_level = tsd_reentrancy_level_get(tsd);
687 	if (reentrancy_level != 0) {
688 		return;
689 	}
690 	witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
691 }
692 
693 /*
694  * End miscellaneous support functions.
695  */
696 /******************************************************************************/
697 /*
698  * Begin initialization functions.
699  */
700 
701 static char *
702 jemalloc_secure_getenv(const char *name) {
703 #ifdef JEMALLOC_HAVE_SECURE_GETENV
704 	return secure_getenv(name);
705 #else
706 #  ifdef JEMALLOC_HAVE_ISSETUGID
707 	if (issetugid() != 0) {
708 		return NULL;
709 	}
710 #  endif
711 	return getenv(name);
712 #endif
713 }
714 
715 static unsigned
716 malloc_ncpus(void) {
717 	long result;
718 
719 #ifdef _WIN32
720 	SYSTEM_INFO si;
721 	GetSystemInfo(&si);
722 	result = si.dwNumberOfProcessors;
723 #elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT)
724 	/*
725 	 * glibc >= 2.6 has the CPU_COUNT macro.
726 	 *
727 	 * glibc's sysconf() uses isspace().  glibc allocates for the first time
728 	 * *before* setting up the isspace tables.  Therefore we need a
729 	 * different method to get the number of CPUs.
730 	 */
731 	{
732 		cpu_set_t set;
733 
734 		pthread_getaffinity_np(pthread_self(), sizeof(set), &set);
735 		result = CPU_COUNT(&set);
736 	}
737 #else
738 	result = sysconf(_SC_NPROCESSORS_ONLN);
739 #endif
740 	return ((result == -1) ? 1 : (unsigned)result);
741 }
742 
743 static void
744 init_opt_stats_print_opts(const char *v, size_t vlen) {
745 	size_t opts_len = strlen(opt_stats_print_opts);
746 	assert(opts_len <= stats_print_tot_num_options);
747 
748 	for (size_t i = 0; i < vlen; i++) {
749 		switch (v[i]) {
750 #define OPTION(o, v, d, s) case o: break;
751 			STATS_PRINT_OPTIONS
752 #undef OPTION
753 		default: continue;
754 		}
755 
756 		if (strchr(opt_stats_print_opts, v[i]) != NULL) {
757 			/* Ignore repeated. */
758 			continue;
759 		}
760 
761 		opt_stats_print_opts[opts_len++] = v[i];
762 		opt_stats_print_opts[opts_len] = '\0';
763 		assert(opts_len <= stats_print_tot_num_options);
764 	}
765 	assert(opts_len == strlen(opt_stats_print_opts));
766 }
767 
768 static bool
769 malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
770     char const **v_p, size_t *vlen_p) {
771 	bool accept;
772 	const char *opts = *opts_p;
773 
774 	*k_p = opts;
775 
776 	for (accept = false; !accept;) {
777 		switch (*opts) {
778 		case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
779 		case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
780 		case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
781 		case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
782 		case 'Y': case 'Z':
783 		case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
784 		case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
785 		case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
786 		case 's': case 't': case 'u': case 'v': case 'w': case 'x':
787 		case 'y': case 'z':
788 		case '0': case '1': case '2': case '3': case '4': case '5':
789 		case '6': case '7': case '8': case '9':
790 		case '_':
791 			opts++;
792 			break;
793 		case ':':
794 			opts++;
795 			*klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
796 			*v_p = opts;
797 			accept = true;
798 			break;
799 		case '\0':
800 			if (opts != *opts_p) {
801 				malloc_write("<jemalloc>: Conf string ends "
802 				    "with key\n");
803 			}
804 			return true;
805 		default:
806 			malloc_write("<jemalloc>: Malformed conf string\n");
807 			return true;
808 		}
809 	}
810 
811 	for (accept = false; !accept;) {
812 		switch (*opts) {
813 		case ',':
814 			opts++;
815 			/*
816 			 * Look ahead one character here, because the next time
817 			 * this function is called, it will assume that end of
818 			 * input has been cleanly reached if no input remains,
819 			 * but we have optimistically already consumed the
820 			 * comma if one exists.
821 			 */
822 			if (*opts == '\0') {
823 				malloc_write("<jemalloc>: Conf string ends "
824 				    "with comma\n");
825 			}
826 			*vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
827 			accept = true;
828 			break;
829 		case '\0':
830 			*vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
831 			accept = true;
832 			break;
833 		default:
834 			opts++;
835 			break;
836 		}
837 	}
838 
839 	*opts_p = opts;
840 	return false;
841 }
842 
843 static void
844 malloc_abort_invalid_conf(void) {
845 	assert(opt_abort_conf);
846 	malloc_printf("<jemalloc>: Abort (abort_conf:true) on invalid conf "
847 	    "value (see above).\n");
848 	abort();
849 }
850 
851 static void
852 malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
853     size_t vlen) {
854 	malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
855 	    (int)vlen, v);
856 	/* If abort_conf is set, error out after processing all options. */
857 	had_conf_error = true;
858 }
859 
860 static void
861 malloc_slow_flag_init(void) {
862 	/*
863 	 * Combine the runtime options into malloc_slow for fast path.  Called
864 	 * after processing all the options.
865 	 */
866 	malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
867 	    | (opt_junk_free ? flag_opt_junk_free : 0)
868 	    | (opt_zero ? flag_opt_zero : 0)
869 	    | (opt_utrace ? flag_opt_utrace : 0)
870 	    | (opt_xmalloc ? flag_opt_xmalloc : 0);
871 
872 	malloc_slow = (malloc_slow_flags != 0);
873 }
874 
875 static void
876 malloc_conf_init(void) {
877 	unsigned i;
878 	char buf[PATH_MAX + 1];
879 	const char *opts, *k, *v;
880 	size_t klen, vlen;
881 
882 	for (i = 0; i < 4; i++) {
883 		/* Get runtime configuration. */
884 		switch (i) {
885 		case 0:
886 			opts = config_malloc_conf;
887 			break;
888 		case 1:
889 			if (je_malloc_conf != NULL) {
890 				/*
891 				 * Use options that were compiled into the
892 				 * program.
893 				 */
894 				opts = je_malloc_conf;
895 			} else {
896 				/* No configuration specified. */
897 				buf[0] = '\0';
898 				opts = buf;
899 			}
900 			break;
901 		case 2: {
902 			ssize_t linklen = 0;
903 #ifndef _WIN32
904 			int saved_errno = errno;
905 			const char *linkname =
906 #  ifdef JEMALLOC_PREFIX
907 			    "/etc/"JEMALLOC_PREFIX"malloc.conf"
908 #  else
909 			    "/etc/malloc.conf"
910 #  endif
911 			    ;
912 
913 			/*
914 			 * Try to use the contents of the "/etc/malloc.conf"
915 			 * symbolic link's name.
916 			 */
917 			linklen = readlink(linkname, buf, sizeof(buf) - 1);
918 			if (linklen == -1) {
919 				/* No configuration specified. */
920 				linklen = 0;
921 				/* Restore errno. */
922 				set_errno(saved_errno);
923 			}
924 #endif
925 			buf[linklen] = '\0';
926 			opts = buf;
927 			break;
928 		} case 3: {
929 			const char *envname =
930 #ifdef JEMALLOC_PREFIX
931 			    JEMALLOC_CPREFIX"MALLOC_CONF"
932 #else
933 			    "MALLOC_CONF"
934 #endif
935 			    ;
936 
937 			if ((opts = jemalloc_secure_getenv(envname)) != NULL) {
938 				/*
939 				 * Do nothing; opts is already initialized to
940 				 * the value of the MALLOC_CONF environment
941 				 * variable.
942 				 */
943 			} else {
944 				/* No configuration specified. */
945 				buf[0] = '\0';
946 				opts = buf;
947 			}
948 			break;
949 		} default:
950 			not_reached();
951 			buf[0] = '\0';
952 			opts = buf;
953 		}
954 
955 		while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
956 		    &vlen)) {
957 #define CONF_MATCH(n)							\
958 	(sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
959 #define CONF_MATCH_VALUE(n)						\
960 	(sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
961 #define CONF_HANDLE_BOOL(o, n)						\
962 			if (CONF_MATCH(n)) {				\
963 				if (CONF_MATCH_VALUE("true")) {		\
964 					o = true;			\
965 				} else if (CONF_MATCH_VALUE("false")) {	\
966 					o = false;			\
967 				} else {				\
968 					malloc_conf_error(		\
969 					    "Invalid conf value",	\
970 					    k, klen, v, vlen);		\
971 				}					\
972 				continue;				\
973 			}
974 #define CONF_MIN_no(um, min)	false
975 #define CONF_MIN_yes(um, min)	((um) < (min))
976 #define CONF_MAX_no(um, max)	false
977 #define CONF_MAX_yes(um, max)	((um) > (max))
978 #define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip)	\
979 			if (CONF_MATCH(n)) {				\
980 				uintmax_t um;				\
981 				char *end;				\
982 									\
983 				set_errno(0);				\
984 				um = malloc_strtoumax(v, &end, 0);	\
985 				if (get_errno() != 0 || (uintptr_t)end -\
986 				    (uintptr_t)v != vlen) {		\
987 					malloc_conf_error(		\
988 					    "Invalid conf value",	\
989 					    k, klen, v, vlen);		\
990 				} else if (clip) {			\
991 					if (CONF_MIN_##check_min(um,	\
992 					    (t)(min))) {		\
993 						o = (t)(min);		\
994 					} else if (			\
995 					    CONF_MAX_##check_max(um,	\
996 					    (t)(max))) {		\
997 						o = (t)(max);		\
998 					} else {			\
999 						o = (t)um;		\
1000 					}				\
1001 				} else {				\
1002 					if (CONF_MIN_##check_min(um,	\
1003 					    (t)(min)) ||		\
1004 					    CONF_MAX_##check_max(um,	\
1005 					    (t)(max))) {		\
1006 						malloc_conf_error(	\
1007 						    "Out-of-range "	\
1008 						    "conf value",	\
1009 						    k, klen, v, vlen);	\
1010 					} else {			\
1011 						o = (t)um;		\
1012 					}				\
1013 				}					\
1014 				continue;				\
1015 			}
1016 #define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max,	\
1017     clip)								\
1018 			CONF_HANDLE_T_U(unsigned, o, n, min, max,	\
1019 			    check_min, check_max, clip)
1020 #define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip)	\
1021 			CONF_HANDLE_T_U(size_t, o, n, min, max,		\
1022 			    check_min, check_max, clip)
1023 #define CONF_HANDLE_SSIZE_T(o, n, min, max)				\
1024 			if (CONF_MATCH(n)) {				\
1025 				long l;					\
1026 				char *end;				\
1027 									\
1028 				set_errno(0);				\
1029 				l = strtol(v, &end, 0);			\
1030 				if (get_errno() != 0 || (uintptr_t)end -\
1031 				    (uintptr_t)v != vlen) {		\
1032 					malloc_conf_error(		\
1033 					    "Invalid conf value",	\
1034 					    k, klen, v, vlen);		\
1035 				} else if (l < (ssize_t)(min) || l >	\
1036 				    (ssize_t)(max)) {			\
1037 					malloc_conf_error(		\
1038 					    "Out-of-range conf value",	\
1039 					    k, klen, v, vlen);		\
1040 				} else {				\
1041 					o = l;				\
1042 				}					\
1043 				continue;				\
1044 			}
1045 #define CONF_HANDLE_CHAR_P(o, n, d)					\
1046 			if (CONF_MATCH(n)) {				\
1047 				size_t cpylen = (vlen <=		\
1048 				    sizeof(o)-1) ? vlen :		\
1049 				    sizeof(o)-1;			\
1050 				strncpy(o, v, cpylen);			\
1051 				o[cpylen] = '\0';			\
1052 				continue;				\
1053 			}
1054 
1055 			CONF_HANDLE_BOOL(opt_abort, "abort")
1056 			CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf")
1057 			if (strncmp("metadata_thp", k, klen) == 0) {
1058 				int i;
1059 				bool match = false;
1060 				for (i = 0; i < metadata_thp_mode_limit; i++) {
1061 					if (strncmp(metadata_thp_mode_names[i],
1062 					    v, vlen) == 0) {
1063 						opt_metadata_thp = i;
1064 						match = true;
1065 						break;
1066 					}
1067 				}
1068 				if (!match) {
1069 					malloc_conf_error("Invalid conf value",
1070 					    k, klen, v, vlen);
1071 				}
1072 				continue;
1073 			}
1074 			CONF_HANDLE_BOOL(opt_retain, "retain")
1075 			if (strncmp("dss", k, klen) == 0) {
1076 				int i;
1077 				bool match = false;
1078 				for (i = 0; i < dss_prec_limit; i++) {
1079 					if (strncmp(dss_prec_names[i], v, vlen)
1080 					    == 0) {
1081 						if (extent_dss_prec_set(i)) {
1082 							malloc_conf_error(
1083 							    "Error setting dss",
1084 							    k, klen, v, vlen);
1085 						} else {
1086 							opt_dss =
1087 							    dss_prec_names[i];
1088 							match = true;
1089 							break;
1090 						}
1091 					}
1092 				}
1093 				if (!match) {
1094 					malloc_conf_error("Invalid conf value",
1095 					    k, klen, v, vlen);
1096 				}
1097 				continue;
1098 			}
1099 			CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
1100 			    UINT_MAX, yes, no, false)
1101 			CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms,
1102 			    "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
1103 			    QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
1104 			    SSIZE_MAX);
1105 			CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms,
1106 			    "muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
1107 			    QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
1108 			    SSIZE_MAX);
1109 			CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
1110 			if (CONF_MATCH("stats_print_opts")) {
1111 				init_opt_stats_print_opts(v, vlen);
1112 				continue;
1113 			}
1114 			if (config_fill) {
1115 				if (CONF_MATCH("junk")) {
1116 					if (CONF_MATCH_VALUE("true")) {
1117 						opt_junk = "true";
1118 						opt_junk_alloc = opt_junk_free =
1119 						    true;
1120 					} else if (CONF_MATCH_VALUE("false")) {
1121 						opt_junk = "false";
1122 						opt_junk_alloc = opt_junk_free =
1123 						    false;
1124 					} else if (CONF_MATCH_VALUE("alloc")) {
1125 						opt_junk = "alloc";
1126 						opt_junk_alloc = true;
1127 						opt_junk_free = false;
1128 					} else if (CONF_MATCH_VALUE("free")) {
1129 						opt_junk = "free";
1130 						opt_junk_alloc = false;
1131 						opt_junk_free = true;
1132 					} else {
1133 						malloc_conf_error(
1134 						    "Invalid conf value", k,
1135 						    klen, v, vlen);
1136 					}
1137 					continue;
1138 				}
1139 				CONF_HANDLE_BOOL(opt_zero, "zero")
1140 			}
1141 			if (config_utrace) {
1142 				CONF_HANDLE_BOOL(opt_utrace, "utrace")
1143 			}
1144 			if (config_xmalloc) {
1145 				CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
1146 			}
1147 			CONF_HANDLE_BOOL(opt_tcache, "tcache")
1148 			CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit,
1149 			    "lg_extent_max_active_fit", 0,
1150 			    (sizeof(size_t) << 3), yes, yes, false)
1151 			CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max",
1152 			    -1, (sizeof(size_t) << 3) - 1)
1153 			if (strncmp("percpu_arena", k, klen) == 0) {
1154 				bool match = false;
1155 				for (int i = percpu_arena_mode_names_base; i <
1156 				    percpu_arena_mode_names_limit; i++) {
1157 					if (strncmp(percpu_arena_mode_names[i],
1158 					    v, vlen) == 0) {
1159 						if (!have_percpu_arena) {
1160 							malloc_conf_error(
1161 							    "No getcpu support",
1162 							    k, klen, v, vlen);
1163 						}
1164 						opt_percpu_arena = i;
1165 						match = true;
1166 						break;
1167 					}
1168 				}
1169 				if (!match) {
1170 					malloc_conf_error("Invalid conf value",
1171 					    k, klen, v, vlen);
1172 				}
1173 				continue;
1174 			}
1175 			CONF_HANDLE_BOOL(opt_background_thread,
1176 			    "background_thread");
1177 			CONF_HANDLE_SIZE_T(opt_max_background_threads,
1178 					   "max_background_threads", 1,
1179 					   opt_max_background_threads, yes, yes,
1180 					   true);
1181 			if (config_prof) {
1182 				CONF_HANDLE_BOOL(opt_prof, "prof")
1183 				CONF_HANDLE_CHAR_P(opt_prof_prefix,
1184 				    "prof_prefix", "jeprof")
1185 				CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
1186 				CONF_HANDLE_BOOL(opt_prof_thread_active_init,
1187 				    "prof_thread_active_init")
1188 				CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
1189 				    "lg_prof_sample", 0, (sizeof(uint64_t) << 3)
1190 				    - 1, no, yes, true)
1191 				CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
1192 				CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
1193 				    "lg_prof_interval", -1,
1194 				    (sizeof(uint64_t) << 3) - 1)
1195 				CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
1196 				CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
1197 				CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
1198 			}
1199 			if (config_log) {
1200 				if (CONF_MATCH("log")) {
1201 					size_t cpylen = (
1202 					    vlen <= sizeof(log_var_names) ?
1203 					    vlen : sizeof(log_var_names) - 1);
1204 					strncpy(log_var_names, v, cpylen);
1205 					log_var_names[cpylen] = '\0';
1206 					continue;
1207 				}
1208 			}
1209 			if (CONF_MATCH("thp")) {
1210 				bool match = false;
1211 				for (int i = 0; i < thp_mode_names_limit; i++) {
1212 					if (strncmp(thp_mode_names[i],v, vlen)
1213 					    == 0) {
1214 						if (!have_madvise_huge) {
1215 							malloc_conf_error(
1216 							    "No THP support",
1217 							    k, klen, v, vlen);
1218 						}
1219 						opt_thp = i;
1220 						match = true;
1221 						break;
1222 					}
1223 				}
1224 				if (!match) {
1225 					malloc_conf_error("Invalid conf value",
1226 					    k, klen, v, vlen);
1227 				}
1228 				continue;
1229 			}
1230 			malloc_conf_error("Invalid conf pair", k, klen, v,
1231 			    vlen);
1232 #undef CONF_MATCH
1233 #undef CONF_MATCH_VALUE
1234 #undef CONF_HANDLE_BOOL
1235 #undef CONF_MIN_no
1236 #undef CONF_MIN_yes
1237 #undef CONF_MAX_no
1238 #undef CONF_MAX_yes
1239 #undef CONF_HANDLE_T_U
1240 #undef CONF_HANDLE_UNSIGNED
1241 #undef CONF_HANDLE_SIZE_T
1242 #undef CONF_HANDLE_SSIZE_T
1243 #undef CONF_HANDLE_CHAR_P
1244 		}
1245 		if (opt_abort_conf && had_conf_error) {
1246 			malloc_abort_invalid_conf();
1247 		}
1248 	}
1249 	atomic_store_b(&log_init_done, true, ATOMIC_RELEASE);
1250 }
1251 
1252 static bool
1253 malloc_init_hard_needed(void) {
1254 	if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
1255 	    malloc_init_recursible)) {
1256 		/*
1257 		 * Another thread initialized the allocator before this one
1258 		 * acquired init_lock, or this thread is the initializing
1259 		 * thread, and it is recursively allocating.
1260 		 */
1261 		return false;
1262 	}
1263 #ifdef JEMALLOC_THREADED_INIT
1264 	if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
1265 		/* Busy-wait until the initializing thread completes. */
1266 		spin_t spinner = SPIN_INITIALIZER;
1267 		do {
1268 			malloc_mutex_unlock(TSDN_NULL, &init_lock);
1269 			spin_adaptive(&spinner);
1270 			malloc_mutex_lock(TSDN_NULL, &init_lock);
1271 		} while (!malloc_initialized());
1272 		return false;
1273 	}
1274 #endif
1275 	return true;
1276 }
1277 
1278 static bool
1279 malloc_init_hard_a0_locked() {
1280 	malloc_initializer = INITIALIZER;
1281 
1282 	if (config_prof) {
1283 		prof_boot0();
1284 	}
1285 	malloc_conf_init();
1286 	if (opt_stats_print) {
1287 		/* Print statistics at exit. */
1288 		if (atexit(stats_print_atexit) != 0) {
1289 			malloc_write("<jemalloc>: Error in atexit()\n");
1290 			if (opt_abort) {
1291 				abort();
1292 			}
1293 		}
1294 	}
1295 	if (pages_boot()) {
1296 		return true;
1297 	}
1298 	if (base_boot(TSDN_NULL)) {
1299 		return true;
1300 	}
1301 	if (extent_boot()) {
1302 		return true;
1303 	}
1304 	if (ctl_boot()) {
1305 		return true;
1306 	}
1307 	if (config_prof) {
1308 		prof_boot1();
1309 	}
1310 	arena_boot();
1311 	if (tcache_boot(TSDN_NULL)) {
1312 		return true;
1313 	}
1314 	if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS,
1315 	    malloc_mutex_rank_exclusive)) {
1316 		return true;
1317 	}
1318 	/*
1319 	 * Create enough scaffolding to allow recursive allocation in
1320 	 * malloc_ncpus().
1321 	 */
1322 	narenas_auto = 1;
1323 	memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
1324 	/*
1325 	 * Initialize one arena here.  The rest are lazily created in
1326 	 * arena_choose_hard().
1327 	 */
1328 	if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default)
1329 	    == NULL) {
1330 		return true;
1331 	}
1332 	a0 = arena_get(TSDN_NULL, 0, false);
1333 	malloc_init_state = malloc_init_a0_initialized;
1334 
1335 	return false;
1336 }
1337 
1338 static bool
1339 malloc_init_hard_a0(void) {
1340 	bool ret;
1341 
1342 	malloc_mutex_lock(TSDN_NULL, &init_lock);
1343 	ret = malloc_init_hard_a0_locked();
1344 	malloc_mutex_unlock(TSDN_NULL, &init_lock);
1345 	return ret;
1346 }
1347 
1348 /* Initialize data structures which may trigger recursive allocation. */
1349 static bool
1350 malloc_init_hard_recursible(void) {
1351 	malloc_init_state = malloc_init_recursible;
1352 
1353 	ncpus = malloc_ncpus();
1354 
1355 #if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \
1356     && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \
1357     !defined(__native_client__))
1358 	/* LinuxThreads' pthread_atfork() allocates. */
1359 	if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
1360 	    jemalloc_postfork_child) != 0) {
1361 		malloc_write("<jemalloc>: Error in pthread_atfork()\n");
1362 		if (opt_abort) {
1363 			abort();
1364 		}
1365 		return true;
1366 	}
1367 #endif
1368 
1369 	if (background_thread_boot0()) {
1370 		return true;
1371 	}
1372 
1373 	return false;
1374 }
1375 
1376 static unsigned
1377 malloc_narenas_default(void) {
1378 	assert(ncpus > 0);
1379 	/*
1380 	 * For SMP systems, create more than one arena per CPU by
1381 	 * default.
1382 	 */
1383 	if (ncpus > 1) {
1384 		return ncpus << 2;
1385 	} else {
1386 		return 1;
1387 	}
1388 }
1389 
1390 static percpu_arena_mode_t
1391 percpu_arena_as_initialized(percpu_arena_mode_t mode) {
1392 	assert(!malloc_initialized());
1393 	assert(mode <= percpu_arena_disabled);
1394 
1395 	if (mode != percpu_arena_disabled) {
1396 		mode += percpu_arena_mode_enabled_base;
1397 	}
1398 
1399 	return mode;
1400 }
1401 
1402 static bool
1403 malloc_init_narenas(void) {
1404 	assert(ncpus > 0);
1405 
1406 	if (opt_percpu_arena != percpu_arena_disabled) {
1407 		if (!have_percpu_arena || malloc_getcpu() < 0) {
1408 			opt_percpu_arena = percpu_arena_disabled;
1409 			malloc_printf("<jemalloc>: perCPU arena getcpu() not "
1410 			    "available. Setting narenas to %u.\n", opt_narenas ?
1411 			    opt_narenas : malloc_narenas_default());
1412 			if (opt_abort) {
1413 				abort();
1414 			}
1415 		} else {
1416 			if (ncpus >= MALLOCX_ARENA_LIMIT) {
1417 				malloc_printf("<jemalloc>: narenas w/ percpu"
1418 				    "arena beyond limit (%d)\n", ncpus);
1419 				if (opt_abort) {
1420 					abort();
1421 				}
1422 				return true;
1423 			}
1424 			/* NB: opt_percpu_arena isn't fully initialized yet. */
1425 			if (percpu_arena_as_initialized(opt_percpu_arena) ==
1426 			    per_phycpu_arena && ncpus % 2 != 0) {
1427 				malloc_printf("<jemalloc>: invalid "
1428 				    "configuration -- per physical CPU arena "
1429 				    "with odd number (%u) of CPUs (no hyper "
1430 				    "threading?).\n", ncpus);
1431 				if (opt_abort)
1432 					abort();
1433 			}
1434 			unsigned n = percpu_arena_ind_limit(
1435 			    percpu_arena_as_initialized(opt_percpu_arena));
1436 			if (opt_narenas < n) {
1437 				/*
1438 				 * If narenas is specified with percpu_arena
1439 				 * enabled, actual narenas is set as the greater
1440 				 * of the two. percpu_arena_choose will be free
1441 				 * to use any of the arenas based on CPU
1442 				 * id. This is conservative (at a small cost)
1443 				 * but ensures correctness.
1444 				 *
1445 				 * If for some reason the ncpus determined at
1446 				 * boot is not the actual number (e.g. because
1447 				 * of affinity setting from numactl), reserving
1448 				 * narenas this way provides a workaround for
1449 				 * percpu_arena.
1450 				 */
1451 				opt_narenas = n;
1452 			}
1453 		}
1454 	}
1455 	if (opt_narenas == 0) {
1456 		opt_narenas = malloc_narenas_default();
1457 	}
1458 	assert(opt_narenas > 0);
1459 
1460 	narenas_auto = opt_narenas;
1461 	/*
1462 	 * Limit the number of arenas to the indexing range of MALLOCX_ARENA().
1463 	 */
1464 	if (narenas_auto >= MALLOCX_ARENA_LIMIT) {
1465 		narenas_auto = MALLOCX_ARENA_LIMIT - 1;
1466 		malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
1467 		    narenas_auto);
1468 	}
1469 	narenas_total_set(narenas_auto);
1470 
1471 	return false;
1472 }
1473 
1474 static void
1475 malloc_init_percpu(void) {
1476 	opt_percpu_arena = percpu_arena_as_initialized(opt_percpu_arena);
1477 }
1478 
1479 static bool
1480 malloc_init_hard_finish(void) {
1481 	if (malloc_mutex_boot()) {
1482 		return true;
1483 	}
1484 
1485 	malloc_init_state = malloc_init_initialized;
1486 	malloc_slow_flag_init();
1487 
1488 	return false;
1489 }
1490 
1491 static void
1492 malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) {
1493 	malloc_mutex_assert_owner(tsdn, &init_lock);
1494 	malloc_mutex_unlock(tsdn, &init_lock);
1495 	if (reentrancy_set) {
1496 		assert(!tsdn_null(tsdn));
1497 		tsd_t *tsd = tsdn_tsd(tsdn);
1498 		assert(tsd_reentrancy_level_get(tsd) > 0);
1499 		post_reentrancy(tsd);
1500 	}
1501 }
1502 
1503 static bool
1504 malloc_init_hard(void) {
1505 	tsd_t *tsd;
1506 
1507 #if defined(_WIN32) && _WIN32_WINNT < 0x0600
1508 	_init_init_lock();
1509 #endif
1510 	malloc_mutex_lock(TSDN_NULL, &init_lock);
1511 
1512 #define UNLOCK_RETURN(tsdn, ret, reentrancy)		\
1513 	malloc_init_hard_cleanup(tsdn, reentrancy);	\
1514 	return ret;
1515 
1516 	if (!malloc_init_hard_needed()) {
1517 		UNLOCK_RETURN(TSDN_NULL, false, false)
1518 	}
1519 
1520 	if (malloc_init_state != malloc_init_a0_initialized &&
1521 	    malloc_init_hard_a0_locked()) {
1522 		UNLOCK_RETURN(TSDN_NULL, true, false)
1523 	}
1524 
1525 	malloc_mutex_unlock(TSDN_NULL, &init_lock);
1526 	/* Recursive allocation relies on functional tsd. */
1527 	tsd = malloc_tsd_boot0();
1528 	if (tsd == NULL) {
1529 		return true;
1530 	}
1531 	if (malloc_init_hard_recursible()) {
1532 		return true;
1533 	}
1534 
1535 	malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
1536 	/* Set reentrancy level to 1 during init. */
1537 	pre_reentrancy(tsd, NULL);
1538 	/* Initialize narenas before prof_boot2 (for allocation). */
1539 	if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) {
1540 		UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1541 	}
1542 	if (config_prof && prof_boot2(tsd)) {
1543 		UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1544 	}
1545 
1546 	malloc_init_percpu();
1547 
1548 	if (malloc_init_hard_finish()) {
1549 		UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1550 	}
1551 	post_reentrancy(tsd);
1552 	malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
1553 
1554 	witness_assert_lockless(witness_tsd_tsdn(
1555 	    tsd_witness_tsdp_get_unsafe(tsd)));
1556 	malloc_tsd_boot1();
1557 	/* Update TSD after tsd_boot1. */
1558 	tsd = tsd_fetch();
1559 	if (opt_background_thread) {
1560 		assert(have_background_thread);
1561 		/*
1562 		 * Need to finish init & unlock first before creating background
1563 		 * threads (pthread_create depends on malloc).  ctl_init (which
1564 		 * sets isthreaded) needs to be called without holding any lock.
1565 		 */
1566 		background_thread_ctl_init(tsd_tsdn(tsd));
1567 
1568 		malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock);
1569 		bool err = background_thread_create(tsd, 0);
1570 		malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock);
1571 		if (err) {
1572 			return true;
1573 		}
1574 	}
1575 #undef UNLOCK_RETURN
1576 	return false;
1577 }
1578 
1579 /*
1580  * End initialization functions.
1581  */
1582 /******************************************************************************/
1583 /*
1584  * Begin allocation-path internal functions and data structures.
1585  */
1586 
1587 /*
1588  * Settings determined by the documented behavior of the allocation functions.
1589  */
1590 typedef struct static_opts_s static_opts_t;
1591 struct static_opts_s {
1592 	/* Whether or not allocation size may overflow. */
1593 	bool may_overflow;
1594 	/* Whether or not allocations of size 0 should be treated as size 1. */
1595 	bool bump_empty_alloc;
1596 	/*
1597 	 * Whether to assert that allocations are not of size 0 (after any
1598 	 * bumping).
1599 	 */
1600 	bool assert_nonempty_alloc;
1601 
1602 	/*
1603 	 * Whether or not to modify the 'result' argument to malloc in case of
1604 	 * error.
1605 	 */
1606 	bool null_out_result_on_error;
1607 	/* Whether to set errno when we encounter an error condition. */
1608 	bool set_errno_on_error;
1609 
1610 	/*
1611 	 * The minimum valid alignment for functions requesting aligned storage.
1612 	 */
1613 	size_t min_alignment;
1614 
1615 	/* The error string to use if we oom. */
1616 	const char *oom_string;
1617 	/* The error string to use if the passed-in alignment is invalid. */
1618 	const char *invalid_alignment_string;
1619 
1620 	/*
1621 	 * False if we're configured to skip some time-consuming operations.
1622 	 *
1623 	 * This isn't really a malloc "behavior", but it acts as a useful
1624 	 * summary of several other static (or at least, static after program
1625 	 * initialization) options.
1626 	 */
1627 	bool slow;
1628 };
1629 
1630 JEMALLOC_ALWAYS_INLINE void
1631 static_opts_init(static_opts_t *static_opts) {
1632 	static_opts->may_overflow = false;
1633 	static_opts->bump_empty_alloc = false;
1634 	static_opts->assert_nonempty_alloc = false;
1635 	static_opts->null_out_result_on_error = false;
1636 	static_opts->set_errno_on_error = false;
1637 	static_opts->min_alignment = 0;
1638 	static_opts->oom_string = "";
1639 	static_opts->invalid_alignment_string = "";
1640 	static_opts->slow = false;
1641 }
1642 
1643 /*
1644  * These correspond to the macros in jemalloc/jemalloc_macros.h.  Broadly, we
1645  * should have one constant here per magic value there.  Note however that the
1646  * representations need not be related.
1647  */
1648 #define TCACHE_IND_NONE ((unsigned)-1)
1649 #define TCACHE_IND_AUTOMATIC ((unsigned)-2)
1650 #define ARENA_IND_AUTOMATIC ((unsigned)-1)
1651 
1652 typedef struct dynamic_opts_s dynamic_opts_t;
1653 struct dynamic_opts_s {
1654 	void **result;
1655 	size_t num_items;
1656 	size_t item_size;
1657 	size_t alignment;
1658 	bool zero;
1659 	unsigned tcache_ind;
1660 	unsigned arena_ind;
1661 };
1662 
1663 JEMALLOC_ALWAYS_INLINE void
1664 dynamic_opts_init(dynamic_opts_t *dynamic_opts) {
1665 	dynamic_opts->result = NULL;
1666 	dynamic_opts->num_items = 0;
1667 	dynamic_opts->item_size = 0;
1668 	dynamic_opts->alignment = 0;
1669 	dynamic_opts->zero = false;
1670 	dynamic_opts->tcache_ind = TCACHE_IND_AUTOMATIC;
1671 	dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC;
1672 }
1673 
1674 /* ind is ignored if dopts->alignment > 0. */
1675 JEMALLOC_ALWAYS_INLINE void *
1676 imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
1677     size_t size, size_t usize, szind_t ind) {
1678 	tcache_t *tcache;
1679 	arena_t *arena;
1680 
1681 	/* Fill in the tcache. */
1682 	if (dopts->tcache_ind == TCACHE_IND_AUTOMATIC) {
1683 		if (likely(!sopts->slow)) {
1684 			/* Getting tcache ptr unconditionally. */
1685 			tcache = tsd_tcachep_get(tsd);
1686 			assert(tcache == tcache_get(tsd));
1687 		} else {
1688 			tcache = tcache_get(tsd);
1689 		}
1690 	} else if (dopts->tcache_ind == TCACHE_IND_NONE) {
1691 		tcache = NULL;
1692 	} else {
1693 		tcache = tcaches_get(tsd, dopts->tcache_ind);
1694 	}
1695 
1696 	/* Fill in the arena. */
1697 	if (dopts->arena_ind == ARENA_IND_AUTOMATIC) {
1698 		/*
1699 		 * In case of automatic arena management, we defer arena
1700 		 * computation until as late as we can, hoping to fill the
1701 		 * allocation out of the tcache.
1702 		 */
1703 		arena = NULL;
1704 	} else {
1705 		arena = arena_get(tsd_tsdn(tsd), dopts->arena_ind, true);
1706 	}
1707 
1708 	if (unlikely(dopts->alignment != 0)) {
1709 		return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment,
1710 		    dopts->zero, tcache, arena);
1711 	}
1712 
1713 	return iallocztm(tsd_tsdn(tsd), size, ind, dopts->zero, tcache, false,
1714 	    arena, sopts->slow);
1715 }
1716 
1717 JEMALLOC_ALWAYS_INLINE void *
1718 imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
1719     size_t usize, szind_t ind) {
1720 	void *ret;
1721 
1722 	/*
1723 	 * For small allocations, sampling bumps the usize.  If so, we allocate
1724 	 * from the ind_large bucket.
1725 	 */
1726 	szind_t ind_large;
1727 	size_t bumped_usize = usize;
1728 
1729 	if (usize <= SMALL_MAXCLASS) {
1730 		assert(((dopts->alignment == 0) ? sz_s2u(LARGE_MINCLASS) :
1731 		    sz_sa2u(LARGE_MINCLASS, dopts->alignment))
1732 		    == LARGE_MINCLASS);
1733 		ind_large = sz_size2index(LARGE_MINCLASS);
1734 		bumped_usize = sz_s2u(LARGE_MINCLASS);
1735 		ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize,
1736 		    bumped_usize, ind_large);
1737 		if (unlikely(ret == NULL)) {
1738 			return NULL;
1739 		}
1740 		arena_prof_promote(tsd_tsdn(tsd), ret, usize);
1741 	} else {
1742 		ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind);
1743 	}
1744 
1745 	return ret;
1746 }
1747 
1748 /*
1749  * Returns true if the allocation will overflow, and false otherwise.  Sets
1750  * *size to the product either way.
1751  */
1752 JEMALLOC_ALWAYS_INLINE bool
1753 compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts,
1754     size_t *size) {
1755 	/*
1756 	 * This function is just num_items * item_size, except that we may have
1757 	 * to check for overflow.
1758 	 */
1759 
1760 	if (!may_overflow) {
1761 		assert(dopts->num_items == 1);
1762 		*size = dopts->item_size;
1763 		return false;
1764 	}
1765 
1766 	/* A size_t with its high-half bits all set to 1. */
1767 	static const size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2);
1768 
1769 	*size = dopts->item_size * dopts->num_items;
1770 
1771 	if (unlikely(*size == 0)) {
1772 		return (dopts->num_items != 0 && dopts->item_size != 0);
1773 	}
1774 
1775 	/*
1776 	 * We got a non-zero size, but we don't know if we overflowed to get
1777 	 * there.  To avoid having to do a divide, we'll be clever and note that
1778 	 * if both A and B can be represented in N/2 bits, then their product
1779 	 * can be represented in N bits (without the possibility of overflow).
1780 	 */
1781 	if (likely((high_bits & (dopts->num_items | dopts->item_size)) == 0)) {
1782 		return false;
1783 	}
1784 	if (likely(*size / dopts->item_size == dopts->num_items)) {
1785 		return false;
1786 	}
1787 	return true;
1788 }
1789 
1790 JEMALLOC_ALWAYS_INLINE int
1791 imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
1792 	/* Where the actual allocated memory will live. */
1793 	void *allocation = NULL;
1794 	/* Filled in by compute_size_with_overflow below. */
1795 	size_t size = 0;
1796 	/*
1797 	 * For unaligned allocations, we need only ind.  For aligned
1798 	 * allocations, or in case of stats or profiling we need usize.
1799 	 *
1800 	 * These are actually dead stores, in that their values are reset before
1801 	 * any branch on their value is taken.  Sometimes though, it's
1802 	 * convenient to pass them as arguments before this point.  To avoid
1803 	 * undefined behavior then, we initialize them with dummy stores.
1804 	 */
1805 	szind_t ind = 0;
1806 	size_t usize = 0;
1807 
1808 	/* Reentrancy is only checked on slow path. */
1809 	int8_t reentrancy_level;
1810 
1811 	/* Compute the amount of memory the user wants. */
1812 	if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts,
1813 	    &size))) {
1814 		goto label_oom;
1815 	}
1816 
1817 	/* Validate the user input. */
1818 	if (sopts->bump_empty_alloc) {
1819 		if (unlikely(size == 0)) {
1820 			size = 1;
1821 		}
1822 	}
1823 
1824 	if (sopts->assert_nonempty_alloc) {
1825 		assert (size != 0);
1826 	}
1827 
1828 	if (unlikely(dopts->alignment < sopts->min_alignment
1829 	    || (dopts->alignment & (dopts->alignment - 1)) != 0)) {
1830 		goto label_invalid_alignment;
1831 	}
1832 
1833 	/* This is the beginning of the "core" algorithm. */
1834 
1835 	if (dopts->alignment == 0) {
1836 		ind = sz_size2index(size);
1837 		if (unlikely(ind >= NSIZES)) {
1838 			goto label_oom;
1839 		}
1840 		if (config_stats || (config_prof && opt_prof)) {
1841 			usize = sz_index2size(ind);
1842 			assert(usize > 0 && usize <= LARGE_MAXCLASS);
1843 		}
1844 	} else {
1845 		usize = sz_sa2u(size, dopts->alignment);
1846 		if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
1847 			goto label_oom;
1848 		}
1849 	}
1850 
1851 	check_entry_exit_locking(tsd_tsdn(tsd));
1852 
1853 	/*
1854 	 * If we need to handle reentrancy, we can do it out of a
1855 	 * known-initialized arena (i.e. arena 0).
1856 	 */
1857 	reentrancy_level = tsd_reentrancy_level_get(tsd);
1858 	if (sopts->slow && unlikely(reentrancy_level > 0)) {
1859 		/*
1860 		 * We should never specify particular arenas or tcaches from
1861 		 * within our internal allocations.
1862 		 */
1863 		assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC ||
1864 		    dopts->tcache_ind == TCACHE_IND_NONE);
1865 		assert(dopts->arena_ind == ARENA_IND_AUTOMATIC);
1866 		dopts->tcache_ind = TCACHE_IND_NONE;
1867 		/* We know that arena 0 has already been initialized. */
1868 		dopts->arena_ind = 0;
1869 	}
1870 
1871 	/* If profiling is on, get our profiling context. */
1872 	if (config_prof && opt_prof) {
1873 		/*
1874 		 * Note that if we're going down this path, usize must have been
1875 		 * initialized in the previous if statement.
1876 		 */
1877 		prof_tctx_t *tctx = prof_alloc_prep(
1878 		    tsd, usize, prof_active_get_unlocked(), true);
1879 
1880 		alloc_ctx_t alloc_ctx;
1881 		if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
1882 			alloc_ctx.slab = (usize <= SMALL_MAXCLASS);
1883 			allocation = imalloc_no_sample(
1884 			    sopts, dopts, tsd, usize, usize, ind);
1885 		} else if ((uintptr_t)tctx > (uintptr_t)1U) {
1886 			/*
1887 			 * Note that ind might still be 0 here.  This is fine;
1888 			 * imalloc_sample ignores ind if dopts->alignment > 0.
1889 			 */
1890 			allocation = imalloc_sample(
1891 			    sopts, dopts, tsd, usize, ind);
1892 			alloc_ctx.slab = false;
1893 		} else {
1894 			allocation = NULL;
1895 		}
1896 
1897 		if (unlikely(allocation == NULL)) {
1898 			prof_alloc_rollback(tsd, tctx, true);
1899 			goto label_oom;
1900 		}
1901 		prof_malloc(tsd_tsdn(tsd), allocation, usize, &alloc_ctx, tctx);
1902 	} else {
1903 		/*
1904 		 * If dopts->alignment > 0, then ind is still 0, but usize was
1905 		 * computed in the previous if statement.  Down the positive
1906 		 * alignment path, imalloc_no_sample ignores ind and size
1907 		 * (relying only on usize).
1908 		 */
1909 		allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize,
1910 		    ind);
1911 		if (unlikely(allocation == NULL)) {
1912 			goto label_oom;
1913 		}
1914 	}
1915 
1916 	/*
1917 	 * Allocation has been done at this point.  We still have some
1918 	 * post-allocation work to do though.
1919 	 */
1920 	assert(dopts->alignment == 0
1921 	    || ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0));
1922 
1923 	if (config_stats) {
1924 		assert(usize == isalloc(tsd_tsdn(tsd), allocation));
1925 		*tsd_thread_allocatedp_get(tsd) += usize;
1926 	}
1927 
1928 	if (sopts->slow) {
1929 		UTRACE(0, size, allocation);
1930 	}
1931 
1932 	/* Success! */
1933 	check_entry_exit_locking(tsd_tsdn(tsd));
1934 	*dopts->result = allocation;
1935 	return 0;
1936 
1937 label_oom:
1938 	if (unlikely(sopts->slow) && config_xmalloc && unlikely(opt_xmalloc)) {
1939 		malloc_write(sopts->oom_string);
1940 		abort();
1941 	}
1942 
1943 	if (sopts->slow) {
1944 		UTRACE(NULL, size, NULL);
1945 	}
1946 
1947 	check_entry_exit_locking(tsd_tsdn(tsd));
1948 
1949 	if (sopts->set_errno_on_error) {
1950 		set_errno(ENOMEM);
1951 	}
1952 
1953 	if (sopts->null_out_result_on_error) {
1954 		*dopts->result = NULL;
1955 	}
1956 
1957 	return ENOMEM;
1958 
1959 	/*
1960 	 * This label is only jumped to by one goto; we move it out of line
1961 	 * anyways to avoid obscuring the non-error paths, and for symmetry with
1962 	 * the oom case.
1963 	 */
1964 label_invalid_alignment:
1965 	if (config_xmalloc && unlikely(opt_xmalloc)) {
1966 		malloc_write(sopts->invalid_alignment_string);
1967 		abort();
1968 	}
1969 
1970 	if (sopts->set_errno_on_error) {
1971 		set_errno(EINVAL);
1972 	}
1973 
1974 	if (sopts->slow) {
1975 		UTRACE(NULL, size, NULL);
1976 	}
1977 
1978 	check_entry_exit_locking(tsd_tsdn(tsd));
1979 
1980 	if (sopts->null_out_result_on_error) {
1981 		*dopts->result = NULL;
1982 	}
1983 
1984 	return EINVAL;
1985 }
1986 
1987 /* Returns the errno-style error code of the allocation. */
1988 JEMALLOC_ALWAYS_INLINE int
1989 imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) {
1990 	if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) {
1991 		if (config_xmalloc && unlikely(opt_xmalloc)) {
1992 			malloc_write(sopts->oom_string);
1993 			abort();
1994 		}
1995 		UTRACE(NULL, dopts->num_items * dopts->item_size, NULL);
1996 		set_errno(ENOMEM);
1997 		*dopts->result = NULL;
1998 
1999 		return ENOMEM;
2000 	}
2001 
2002 	/* We always need the tsd.  Let's grab it right away. */
2003 	tsd_t *tsd = tsd_fetch();
2004 	assert(tsd);
2005 	if (likely(tsd_fast(tsd))) {
2006 		/* Fast and common path. */
2007 		tsd_assert_fast(tsd);
2008 		sopts->slow = false;
2009 		return imalloc_body(sopts, dopts, tsd);
2010 	} else {
2011 		sopts->slow = true;
2012 		return imalloc_body(sopts, dopts, tsd);
2013 	}
2014 }
2015 /******************************************************************************/
2016 /*
2017  * Begin malloc(3)-compatible functions.
2018  */
2019 
2020 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2021 void JEMALLOC_NOTHROW *
2022 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
2023 je_malloc(size_t size) {
2024 	void *ret;
2025 	static_opts_t sopts;
2026 	dynamic_opts_t dopts;
2027 
2028 	LOG("core.malloc.entry", "size: %zu", size);
2029 
2030 	static_opts_init(&sopts);
2031 	dynamic_opts_init(&dopts);
2032 
2033 	sopts.bump_empty_alloc = true;
2034 	sopts.null_out_result_on_error = true;
2035 	sopts.set_errno_on_error = true;
2036 	sopts.oom_string = "<jemalloc>: Error in malloc(): out of memory\n";
2037 
2038 	dopts.result = &ret;
2039 	dopts.num_items = 1;
2040 	dopts.item_size = size;
2041 
2042 	imalloc(&sopts, &dopts);
2043 
2044 	LOG("core.malloc.exit", "result: %p", ret);
2045 
2046 	return ret;
2047 }
2048 
2049 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2050 JEMALLOC_ATTR(nonnull(1))
2051 je_posix_memalign(void **memptr, size_t alignment, size_t size) {
2052 	int ret;
2053 	static_opts_t sopts;
2054 	dynamic_opts_t dopts;
2055 
2056 	LOG("core.posix_memalign.entry", "mem ptr: %p, alignment: %zu, "
2057 	    "size: %zu", memptr, alignment, size);
2058 
2059 	static_opts_init(&sopts);
2060 	dynamic_opts_init(&dopts);
2061 
2062 	sopts.bump_empty_alloc = true;
2063 	sopts.min_alignment = sizeof(void *);
2064 	sopts.oom_string =
2065 	    "<jemalloc>: Error allocating aligned memory: out of memory\n";
2066 	sopts.invalid_alignment_string =
2067 	    "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2068 
2069 	dopts.result = memptr;
2070 	dopts.num_items = 1;
2071 	dopts.item_size = size;
2072 	dopts.alignment = alignment;
2073 
2074 	ret = imalloc(&sopts, &dopts);
2075 
2076 	LOG("core.posix_memalign.exit", "result: %d, alloc ptr: %p", ret,
2077 	    *memptr);
2078 
2079 	return ret;
2080 }
2081 
2082 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2083 void JEMALLOC_NOTHROW *
2084 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
2085 je_aligned_alloc(size_t alignment, size_t size) {
2086 	void *ret;
2087 
2088 	static_opts_t sopts;
2089 	dynamic_opts_t dopts;
2090 
2091 	LOG("core.aligned_alloc.entry", "alignment: %zu, size: %zu\n",
2092 	    alignment, size);
2093 
2094 	static_opts_init(&sopts);
2095 	dynamic_opts_init(&dopts);
2096 
2097 	sopts.bump_empty_alloc = true;
2098 	sopts.null_out_result_on_error = true;
2099 	sopts.set_errno_on_error = true;
2100 	sopts.min_alignment = 1;
2101 	sopts.oom_string =
2102 	    "<jemalloc>: Error allocating aligned memory: out of memory\n";
2103 	sopts.invalid_alignment_string =
2104 	    "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2105 
2106 	dopts.result = &ret;
2107 	dopts.num_items = 1;
2108 	dopts.item_size = size;
2109 	dopts.alignment = alignment;
2110 
2111 	imalloc(&sopts, &dopts);
2112 
2113 	LOG("core.aligned_alloc.exit", "result: %p", ret);
2114 
2115 	return ret;
2116 }
2117 
2118 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2119 void JEMALLOC_NOTHROW *
2120 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
2121 je_calloc(size_t num, size_t size) {
2122 	void *ret;
2123 	static_opts_t sopts;
2124 	dynamic_opts_t dopts;
2125 
2126 	LOG("core.calloc.entry", "num: %zu, size: %zu\n", num, size);
2127 
2128 	static_opts_init(&sopts);
2129 	dynamic_opts_init(&dopts);
2130 
2131 	sopts.may_overflow = true;
2132 	sopts.bump_empty_alloc = true;
2133 	sopts.null_out_result_on_error = true;
2134 	sopts.set_errno_on_error = true;
2135 	sopts.oom_string = "<jemalloc>: Error in calloc(): out of memory\n";
2136 
2137 	dopts.result = &ret;
2138 	dopts.num_items = num;
2139 	dopts.item_size = size;
2140 	dopts.zero = true;
2141 
2142 	imalloc(&sopts, &dopts);
2143 
2144 	LOG("core.calloc.exit", "result: %p", ret);
2145 
2146 	return ret;
2147 }
2148 
2149 static void *
2150 irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
2151     prof_tctx_t *tctx) {
2152 	void *p;
2153 
2154 	if (tctx == NULL) {
2155 		return NULL;
2156 	}
2157 	if (usize <= SMALL_MAXCLASS) {
2158 		p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false);
2159 		if (p == NULL) {
2160 			return NULL;
2161 		}
2162 		arena_prof_promote(tsd_tsdn(tsd), p, usize);
2163 	} else {
2164 		p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
2165 	}
2166 
2167 	return p;
2168 }
2169 
2170 JEMALLOC_ALWAYS_INLINE void *
2171 irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
2172    alloc_ctx_t *alloc_ctx) {
2173 	void *p;
2174 	bool prof_active;
2175 	prof_tctx_t *old_tctx, *tctx;
2176 
2177 	prof_active = prof_active_get_unlocked();
2178 	old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
2179 	tctx = prof_alloc_prep(tsd, usize, prof_active, true);
2180 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2181 		p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx);
2182 	} else {
2183 		p = iralloc(tsd, old_ptr, old_usize, usize, 0, false);
2184 	}
2185 	if (unlikely(p == NULL)) {
2186 		prof_alloc_rollback(tsd, tctx, true);
2187 		return NULL;
2188 	}
2189 	prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize,
2190 	    old_tctx);
2191 
2192 	return p;
2193 }
2194 
2195 JEMALLOC_ALWAYS_INLINE void
2196 ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
2197 	if (!slow_path) {
2198 		tsd_assert_fast(tsd);
2199 	}
2200 	check_entry_exit_locking(tsd_tsdn(tsd));
2201 	if (tsd_reentrancy_level_get(tsd) != 0) {
2202 		assert(slow_path);
2203 	}
2204 
2205 	assert(ptr != NULL);
2206 	assert(malloc_initialized() || IS_INITIALIZER);
2207 
2208 	alloc_ctx_t alloc_ctx;
2209 	rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2210 	rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2211 	    (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2212 	assert(alloc_ctx.szind != NSIZES);
2213 
2214 	size_t usize;
2215 	if (config_prof && opt_prof) {
2216 		usize = sz_index2size(alloc_ctx.szind);
2217 		prof_free(tsd, ptr, usize, &alloc_ctx);
2218 	} else if (config_stats) {
2219 		usize = sz_index2size(alloc_ctx.szind);
2220 	}
2221 	if (config_stats) {
2222 		*tsd_thread_deallocatedp_get(tsd) += usize;
2223 	}
2224 
2225 	if (likely(!slow_path)) {
2226 		idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
2227 		    false);
2228 	} else {
2229 		idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
2230 		    true);
2231 	}
2232 }
2233 
2234 JEMALLOC_ALWAYS_INLINE void
2235 isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
2236 	if (!slow_path) {
2237 		tsd_assert_fast(tsd);
2238 	}
2239 	check_entry_exit_locking(tsd_tsdn(tsd));
2240 	if (tsd_reentrancy_level_get(tsd) != 0) {
2241 		assert(slow_path);
2242 	}
2243 
2244 	assert(ptr != NULL);
2245 	assert(malloc_initialized() || IS_INITIALIZER);
2246 
2247 	alloc_ctx_t alloc_ctx, *ctx;
2248 	if (!config_cache_oblivious && ((uintptr_t)ptr & PAGE_MASK) != 0) {
2249 		/*
2250 		 * When cache_oblivious is disabled and ptr is not page aligned,
2251 		 * the allocation was not sampled -- usize can be used to
2252 		 * determine szind directly.
2253 		 */
2254 		alloc_ctx.szind = sz_size2index(usize);
2255 		alloc_ctx.slab = true;
2256 		ctx = &alloc_ctx;
2257 		if (config_debug) {
2258 			alloc_ctx_t dbg_ctx;
2259 			rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2260 			rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree,
2261 			    rtree_ctx, (uintptr_t)ptr, true, &dbg_ctx.szind,
2262 			    &dbg_ctx.slab);
2263 			assert(dbg_ctx.szind == alloc_ctx.szind);
2264 			assert(dbg_ctx.slab == alloc_ctx.slab);
2265 		}
2266 	} else if (config_prof && opt_prof) {
2267 		rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2268 		rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2269 		    (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2270 		assert(alloc_ctx.szind == sz_size2index(usize));
2271 		ctx = &alloc_ctx;
2272 	} else {
2273 		ctx = NULL;
2274 	}
2275 
2276 	if (config_prof && opt_prof) {
2277 		prof_free(tsd, ptr, usize, ctx);
2278 	}
2279 	if (config_stats) {
2280 		*tsd_thread_deallocatedp_get(tsd) += usize;
2281 	}
2282 
2283 	if (likely(!slow_path)) {
2284 		isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, false);
2285 	} else {
2286 		isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, true);
2287 	}
2288 }
2289 
2290 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2291 void JEMALLOC_NOTHROW *
2292 JEMALLOC_ALLOC_SIZE(2)
2293 je_realloc(void *ptr, size_t size) {
2294 	void *ret;
2295 	tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL);
2296 	size_t usize JEMALLOC_CC_SILENCE_INIT(0);
2297 	size_t old_usize = 0;
2298 
2299 	LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size);
2300 
2301 	if (unlikely(size == 0)) {
2302 		size = 1;
2303 	}
2304 
2305 	if (likely(ptr != NULL)) {
2306 		assert(malloc_initialized() || IS_INITIALIZER);
2307 		tsd_t *tsd = tsd_fetch();
2308 
2309 		check_entry_exit_locking(tsd_tsdn(tsd));
2310 
2311 		alloc_ctx_t alloc_ctx;
2312 		rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2313 		rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2314 		    (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2315 		assert(alloc_ctx.szind != NSIZES);
2316 		old_usize = sz_index2size(alloc_ctx.szind);
2317 		assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
2318 		if (config_prof && opt_prof) {
2319 			usize = sz_s2u(size);
2320 			ret = unlikely(usize == 0 || usize > LARGE_MAXCLASS) ?
2321 			    NULL : irealloc_prof(tsd, ptr, old_usize, usize,
2322 			    &alloc_ctx);
2323 		} else {
2324 			if (config_stats) {
2325 				usize = sz_s2u(size);
2326 			}
2327 			ret = iralloc(tsd, ptr, old_usize, size, 0, false);
2328 		}
2329 		tsdn = tsd_tsdn(tsd);
2330 	} else {
2331 		/* realloc(NULL, size) is equivalent to malloc(size). */
2332 		void *ret = je_malloc(size);
2333 		LOG("core.realloc.exit", "result: %p", ret);
2334 		return ret;
2335 	}
2336 
2337 	if (unlikely(ret == NULL)) {
2338 		if (config_xmalloc && unlikely(opt_xmalloc)) {
2339 			malloc_write("<jemalloc>: Error in realloc(): "
2340 			    "out of memory\n");
2341 			abort();
2342 		}
2343 		set_errno(ENOMEM);
2344 	}
2345 	if (config_stats && likely(ret != NULL)) {
2346 		tsd_t *tsd;
2347 
2348 		assert(usize == isalloc(tsdn, ret));
2349 		tsd = tsdn_tsd(tsdn);
2350 		*tsd_thread_allocatedp_get(tsd) += usize;
2351 		*tsd_thread_deallocatedp_get(tsd) += old_usize;
2352 	}
2353 	UTRACE(ptr, size, ret);
2354 	check_entry_exit_locking(tsdn);
2355 
2356 	LOG("core.realloc.exit", "result: %p", ret);
2357 	return ret;
2358 }
2359 
2360 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2361 je_free(void *ptr) {
2362 	LOG("core.free.entry", "ptr: %p", ptr);
2363 
2364 	UTRACE(ptr, 0, 0);
2365 	if (likely(ptr != NULL)) {
2366 		/*
2367 		 * We avoid setting up tsd fully (e.g. tcache, arena binding)
2368 		 * based on only free() calls -- other activities trigger the
2369 		 * minimal to full transition.  This is because free() may
2370 		 * happen during thread shutdown after tls deallocation: if a
2371 		 * thread never had any malloc activities until then, a
2372 		 * fully-setup tsd won't be destructed properly.
2373 		 */
2374 		tsd_t *tsd = tsd_fetch_min();
2375 		check_entry_exit_locking(tsd_tsdn(tsd));
2376 
2377 		tcache_t *tcache;
2378 		if (likely(tsd_fast(tsd))) {
2379 			tsd_assert_fast(tsd);
2380 			/* Unconditionally get tcache ptr on fast path. */
2381 			tcache = tsd_tcachep_get(tsd);
2382 			ifree(tsd, ptr, tcache, false);
2383 		} else {
2384 			if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
2385 				tcache = tcache_get(tsd);
2386 			} else {
2387 				tcache = NULL;
2388 			}
2389 			ifree(tsd, ptr, tcache, true);
2390 		}
2391 		check_entry_exit_locking(tsd_tsdn(tsd));
2392 	}
2393 	LOG("core.free.exit", "");
2394 }
2395 
2396 /*
2397  * End malloc(3)-compatible functions.
2398  */
2399 /******************************************************************************/
2400 /*
2401  * Begin non-standard override functions.
2402  */
2403 
2404 #ifdef JEMALLOC_OVERRIDE_MEMALIGN
2405 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2406 void JEMALLOC_NOTHROW *
2407 JEMALLOC_ATTR(malloc)
2408 je_memalign(size_t alignment, size_t size) {
2409 	void *ret;
2410 	static_opts_t sopts;
2411 	dynamic_opts_t dopts;
2412 
2413 	LOG("core.memalign.entry", "alignment: %zu, size: %zu\n", alignment,
2414 	    size);
2415 
2416 	static_opts_init(&sopts);
2417 	dynamic_opts_init(&dopts);
2418 
2419 	sopts.bump_empty_alloc = true;
2420 	sopts.min_alignment = 1;
2421 	sopts.oom_string =
2422 	    "<jemalloc>: Error allocating aligned memory: out of memory\n";
2423 	sopts.invalid_alignment_string =
2424 	    "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2425 	sopts.null_out_result_on_error = true;
2426 
2427 	dopts.result = &ret;
2428 	dopts.num_items = 1;
2429 	dopts.item_size = size;
2430 	dopts.alignment = alignment;
2431 
2432 	imalloc(&sopts, &dopts);
2433 
2434 	LOG("core.memalign.exit", "result: %p", ret);
2435 	return ret;
2436 }
2437 #endif
2438 
2439 #ifdef JEMALLOC_OVERRIDE_VALLOC
2440 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2441 void JEMALLOC_NOTHROW *
2442 JEMALLOC_ATTR(malloc)
2443 je_valloc(size_t size) {
2444 	void *ret;
2445 
2446 	static_opts_t sopts;
2447 	dynamic_opts_t dopts;
2448 
2449 	LOG("core.valloc.entry", "size: %zu\n", size);
2450 
2451 	static_opts_init(&sopts);
2452 	dynamic_opts_init(&dopts);
2453 
2454 	sopts.bump_empty_alloc = true;
2455 	sopts.null_out_result_on_error = true;
2456 	sopts.min_alignment = PAGE;
2457 	sopts.oom_string =
2458 	    "<jemalloc>: Error allocating aligned memory: out of memory\n";
2459 	sopts.invalid_alignment_string =
2460 	    "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2461 
2462 	dopts.result = &ret;
2463 	dopts.num_items = 1;
2464 	dopts.item_size = size;
2465 	dopts.alignment = PAGE;
2466 
2467 	imalloc(&sopts, &dopts);
2468 
2469 	LOG("core.valloc.exit", "result: %p\n", ret);
2470 	return ret;
2471 }
2472 #endif
2473 
2474 #if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK)
2475 /*
2476  * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
2477  * to inconsistently reference libc's malloc(3)-compatible functions
2478  * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
2479  *
2480  * These definitions interpose hooks in glibc.  The functions are actually
2481  * passed an extra argument for the caller return address, which will be
2482  * ignored.
2483  */
2484 JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
2485 JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
2486 JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
2487 #  ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
2488 JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
2489     je_memalign;
2490 #  endif
2491 
2492 #  ifdef CPU_COUNT
2493 /*
2494  * To enable static linking with glibc, the libc specific malloc interface must
2495  * be implemented also, so none of glibc's malloc.o functions are added to the
2496  * link.
2497  */
2498 #    define ALIAS(je_fn)	__attribute__((alias (#je_fn), used))
2499 /* To force macro expansion of je_ prefix before stringification. */
2500 #    define PREALIAS(je_fn)	ALIAS(je_fn)
2501 #    ifdef JEMALLOC_OVERRIDE___LIBC_CALLOC
2502 void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc);
2503 #    endif
2504 #    ifdef JEMALLOC_OVERRIDE___LIBC_FREE
2505 void __libc_free(void* ptr) PREALIAS(je_free);
2506 #    endif
2507 #    ifdef JEMALLOC_OVERRIDE___LIBC_MALLOC
2508 void *__libc_malloc(size_t size) PREALIAS(je_malloc);
2509 #    endif
2510 #    ifdef JEMALLOC_OVERRIDE___LIBC_MEMALIGN
2511 void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign);
2512 #    endif
2513 #    ifdef JEMALLOC_OVERRIDE___LIBC_REALLOC
2514 void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc);
2515 #    endif
2516 #    ifdef JEMALLOC_OVERRIDE___LIBC_VALLOC
2517 void *__libc_valloc(size_t size) PREALIAS(je_valloc);
2518 #    endif
2519 #    ifdef JEMALLOC_OVERRIDE___POSIX_MEMALIGN
2520 int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign);
2521 #    endif
2522 #    undef PREALIAS
2523 #    undef ALIAS
2524 #  endif
2525 #endif
2526 
2527 /*
2528  * End non-standard override functions.
2529  */
2530 /******************************************************************************/
2531 /*
2532  * Begin non-standard functions.
2533  */
2534 
2535 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2536 void JEMALLOC_NOTHROW *
2537 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
2538 je_mallocx(size_t size, int flags) {
2539 	void *ret;
2540 	static_opts_t sopts;
2541 	dynamic_opts_t dopts;
2542 
2543 	LOG("core.mallocx.entry", "size: %zu, flags: %d", size, flags);
2544 
2545 	static_opts_init(&sopts);
2546 	dynamic_opts_init(&dopts);
2547 
2548 	sopts.assert_nonempty_alloc = true;
2549 	sopts.null_out_result_on_error = true;
2550 	sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n";
2551 
2552 	dopts.result = &ret;
2553 	dopts.num_items = 1;
2554 	dopts.item_size = size;
2555 	if (unlikely(flags != 0)) {
2556 		if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) {
2557 			dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
2558 		}
2559 
2560 		dopts.zero = MALLOCX_ZERO_GET(flags);
2561 
2562 		if ((flags & MALLOCX_TCACHE_MASK) != 0) {
2563 			if ((flags & MALLOCX_TCACHE_MASK)
2564 			    == MALLOCX_TCACHE_NONE) {
2565 				dopts.tcache_ind = TCACHE_IND_NONE;
2566 			} else {
2567 				dopts.tcache_ind = MALLOCX_TCACHE_GET(flags);
2568 			}
2569 		} else {
2570 			dopts.tcache_ind = TCACHE_IND_AUTOMATIC;
2571 		}
2572 
2573 		if ((flags & MALLOCX_ARENA_MASK) != 0)
2574 			dopts.arena_ind = MALLOCX_ARENA_GET(flags);
2575 	}
2576 
2577 	imalloc(&sopts, &dopts);
2578 
2579 	LOG("core.mallocx.exit", "result: %p", ret);
2580 	return ret;
2581 }
2582 
2583 static void *
2584 irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize,
2585     size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
2586     prof_tctx_t *tctx) {
2587 	void *p;
2588 
2589 	if (tctx == NULL) {
2590 		return NULL;
2591 	}
2592 	if (usize <= SMALL_MAXCLASS) {
2593 		p = iralloct(tsdn, old_ptr, old_usize, LARGE_MINCLASS,
2594 		    alignment, zero, tcache, arena);
2595 		if (p == NULL) {
2596 			return NULL;
2597 		}
2598 		arena_prof_promote(tsdn, p, usize);
2599 	} else {
2600 		p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero,
2601 		    tcache, arena);
2602 	}
2603 
2604 	return p;
2605 }
2606 
2607 JEMALLOC_ALWAYS_INLINE void *
2608 irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
2609     size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
2610     arena_t *arena, alloc_ctx_t *alloc_ctx) {
2611 	void *p;
2612 	bool prof_active;
2613 	prof_tctx_t *old_tctx, *tctx;
2614 
2615 	prof_active = prof_active_get_unlocked();
2616 	old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
2617 	tctx = prof_alloc_prep(tsd, *usize, prof_active, false);
2618 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2619 		p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize,
2620 		    *usize, alignment, zero, tcache, arena, tctx);
2621 	} else {
2622 		p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment,
2623 		    zero, tcache, arena);
2624 	}
2625 	if (unlikely(p == NULL)) {
2626 		prof_alloc_rollback(tsd, tctx, false);
2627 		return NULL;
2628 	}
2629 
2630 	if (p == old_ptr && alignment != 0) {
2631 		/*
2632 		 * The allocation did not move, so it is possible that the size
2633 		 * class is smaller than would guarantee the requested
2634 		 * alignment, and that the alignment constraint was
2635 		 * serendipitously satisfied.  Additionally, old_usize may not
2636 		 * be the same as the current usize because of in-place large
2637 		 * reallocation.  Therefore, query the actual value of usize.
2638 		 */
2639 		*usize = isalloc(tsd_tsdn(tsd), p);
2640 	}
2641 	prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr,
2642 	    old_usize, old_tctx);
2643 
2644 	return p;
2645 }
2646 
2647 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2648 void JEMALLOC_NOTHROW *
2649 JEMALLOC_ALLOC_SIZE(2)
2650 je_rallocx(void *ptr, size_t size, int flags) {
2651 	void *p;
2652 	tsd_t *tsd;
2653 	size_t usize;
2654 	size_t old_usize;
2655 	size_t alignment = MALLOCX_ALIGN_GET(flags);
2656 	bool zero = flags & MALLOCX_ZERO;
2657 	arena_t *arena;
2658 	tcache_t *tcache;
2659 
2660 	LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
2661 	    size, flags);
2662 
2663 
2664 	assert(ptr != NULL);
2665 	assert(size != 0);
2666 	assert(malloc_initialized() || IS_INITIALIZER);
2667 	tsd = tsd_fetch();
2668 	check_entry_exit_locking(tsd_tsdn(tsd));
2669 
2670 	if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
2671 		unsigned arena_ind = MALLOCX_ARENA_GET(flags);
2672 		arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
2673 		if (unlikely(arena == NULL)) {
2674 			goto label_oom;
2675 		}
2676 	} else {
2677 		arena = NULL;
2678 	}
2679 
2680 	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2681 		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
2682 			tcache = NULL;
2683 		} else {
2684 			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2685 		}
2686 	} else {
2687 		tcache = tcache_get(tsd);
2688 	}
2689 
2690 	alloc_ctx_t alloc_ctx;
2691 	rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2692 	rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2693 	    (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2694 	assert(alloc_ctx.szind != NSIZES);
2695 	old_usize = sz_index2size(alloc_ctx.szind);
2696 	assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
2697 	if (config_prof && opt_prof) {
2698 		usize = (alignment == 0) ?
2699 		    sz_s2u(size) : sz_sa2u(size, alignment);
2700 		if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) {
2701 			goto label_oom;
2702 		}
2703 		p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
2704 		    zero, tcache, arena, &alloc_ctx);
2705 		if (unlikely(p == NULL)) {
2706 			goto label_oom;
2707 		}
2708 	} else {
2709 		p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment,
2710 		    zero, tcache, arena);
2711 		if (unlikely(p == NULL)) {
2712 			goto label_oom;
2713 		}
2714 		if (config_stats) {
2715 			usize = isalloc(tsd_tsdn(tsd), p);
2716 		}
2717 	}
2718 	assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
2719 
2720 	if (config_stats) {
2721 		*tsd_thread_allocatedp_get(tsd) += usize;
2722 		*tsd_thread_deallocatedp_get(tsd) += old_usize;
2723 	}
2724 	UTRACE(ptr, size, p);
2725 	check_entry_exit_locking(tsd_tsdn(tsd));
2726 
2727 	LOG("core.rallocx.exit", "result: %p", p);
2728 	return p;
2729 label_oom:
2730 	if (config_xmalloc && unlikely(opt_xmalloc)) {
2731 		malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
2732 		abort();
2733 	}
2734 	UTRACE(ptr, size, 0);
2735 	check_entry_exit_locking(tsd_tsdn(tsd));
2736 
2737 	LOG("core.rallocx.exit", "result: %p", NULL);
2738 	return NULL;
2739 }
2740 
2741 JEMALLOC_ALWAYS_INLINE size_t
2742 ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
2743     size_t extra, size_t alignment, bool zero) {
2744 	size_t usize;
2745 
2746 	if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) {
2747 		return old_usize;
2748 	}
2749 	usize = isalloc(tsdn, ptr);
2750 
2751 	return usize;
2752 }
2753 
2754 static size_t
2755 ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
2756     size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) {
2757 	size_t usize;
2758 
2759 	if (tctx == NULL) {
2760 		return old_usize;
2761 	}
2762 	usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment,
2763 	    zero);
2764 
2765 	return usize;
2766 }
2767 
2768 JEMALLOC_ALWAYS_INLINE size_t
2769 ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
2770     size_t extra, size_t alignment, bool zero, alloc_ctx_t *alloc_ctx) {
2771 	size_t usize_max, usize;
2772 	bool prof_active;
2773 	prof_tctx_t *old_tctx, *tctx;
2774 
2775 	prof_active = prof_active_get_unlocked();
2776 	old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx);
2777 	/*
2778 	 * usize isn't knowable before ixalloc() returns when extra is non-zero.
2779 	 * Therefore, compute its maximum possible value and use that in
2780 	 * prof_alloc_prep() to decide whether to capture a backtrace.
2781 	 * prof_realloc() will use the actual usize to decide whether to sample.
2782 	 */
2783 	if (alignment == 0) {
2784 		usize_max = sz_s2u(size+extra);
2785 		assert(usize_max > 0 && usize_max <= LARGE_MAXCLASS);
2786 	} else {
2787 		usize_max = sz_sa2u(size+extra, alignment);
2788 		if (unlikely(usize_max == 0 || usize_max > LARGE_MAXCLASS)) {
2789 			/*
2790 			 * usize_max is out of range, and chances are that
2791 			 * allocation will fail, but use the maximum possible
2792 			 * value and carry on with prof_alloc_prep(), just in
2793 			 * case allocation succeeds.
2794 			 */
2795 			usize_max = LARGE_MAXCLASS;
2796 		}
2797 	}
2798 	tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
2799 
2800 	if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2801 		usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize,
2802 		    size, extra, alignment, zero, tctx);
2803 	} else {
2804 		usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
2805 		    extra, alignment, zero);
2806 	}
2807 	if (usize == old_usize) {
2808 		prof_alloc_rollback(tsd, tctx, false);
2809 		return usize;
2810 	}
2811 	prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize,
2812 	    old_tctx);
2813 
2814 	return usize;
2815 }
2816 
2817 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2818 je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
2819 	tsd_t *tsd;
2820 	size_t usize, old_usize;
2821 	size_t alignment = MALLOCX_ALIGN_GET(flags);
2822 	bool zero = flags & MALLOCX_ZERO;
2823 
2824 	LOG("core.xallocx.entry", "ptr: %p, size: %zu, extra: %zu, "
2825 	    "flags: %d", ptr, size, extra, flags);
2826 
2827 	assert(ptr != NULL);
2828 	assert(size != 0);
2829 	assert(SIZE_T_MAX - size >= extra);
2830 	assert(malloc_initialized() || IS_INITIALIZER);
2831 	tsd = tsd_fetch();
2832 	check_entry_exit_locking(tsd_tsdn(tsd));
2833 
2834 	alloc_ctx_t alloc_ctx;
2835 	rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2836 	rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2837 	    (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2838 	assert(alloc_ctx.szind != NSIZES);
2839 	old_usize = sz_index2size(alloc_ctx.szind);
2840 	assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
2841 	/*
2842 	 * The API explicitly absolves itself of protecting against (size +
2843 	 * extra) numerical overflow, but we may need to clamp extra to avoid
2844 	 * exceeding LARGE_MAXCLASS.
2845 	 *
2846 	 * Ordinarily, size limit checking is handled deeper down, but here we
2847 	 * have to check as part of (size + extra) clamping, since we need the
2848 	 * clamped value in the above helper functions.
2849 	 */
2850 	if (unlikely(size > LARGE_MAXCLASS)) {
2851 		usize = old_usize;
2852 		goto label_not_resized;
2853 	}
2854 	if (unlikely(LARGE_MAXCLASS - size < extra)) {
2855 		extra = LARGE_MAXCLASS - size;
2856 	}
2857 
2858 	if (config_prof && opt_prof) {
2859 		usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
2860 		    alignment, zero, &alloc_ctx);
2861 	} else {
2862 		usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
2863 		    extra, alignment, zero);
2864 	}
2865 	if (unlikely(usize == old_usize)) {
2866 		goto label_not_resized;
2867 	}
2868 
2869 	if (config_stats) {
2870 		*tsd_thread_allocatedp_get(tsd) += usize;
2871 		*tsd_thread_deallocatedp_get(tsd) += old_usize;
2872 	}
2873 label_not_resized:
2874 	UTRACE(ptr, size, ptr);
2875 	check_entry_exit_locking(tsd_tsdn(tsd));
2876 
2877 	LOG("core.xallocx.exit", "result: %zu", usize);
2878 	return usize;
2879 }
2880 
2881 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
2882 JEMALLOC_ATTR(pure)
2883 je_sallocx(const void *ptr, UNUSED int flags) {
2884 	size_t usize;
2885 	tsdn_t *tsdn;
2886 
2887 	LOG("core.sallocx.entry", "ptr: %p, flags: %d", ptr, flags);
2888 
2889 	assert(malloc_initialized() || IS_INITIALIZER);
2890 	assert(ptr != NULL);
2891 
2892 	tsdn = tsdn_fetch();
2893 	check_entry_exit_locking(tsdn);
2894 
2895 	if (config_debug || force_ivsalloc) {
2896 		usize = ivsalloc(tsdn, ptr);
2897 		assert(force_ivsalloc || usize != 0);
2898 	} else {
2899 		usize = isalloc(tsdn, ptr);
2900 	}
2901 
2902 	check_entry_exit_locking(tsdn);
2903 
2904 	LOG("core.sallocx.exit", "result: %zu", usize);
2905 	return usize;
2906 }
2907 
2908 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2909 je_dallocx(void *ptr, int flags) {
2910 	LOG("core.dallocx.entry", "ptr: %p, flags: %d", ptr, flags);
2911 
2912 	assert(ptr != NULL);
2913 	assert(malloc_initialized() || IS_INITIALIZER);
2914 
2915 	tsd_t *tsd = tsd_fetch();
2916 	bool fast = tsd_fast(tsd);
2917 	check_entry_exit_locking(tsd_tsdn(tsd));
2918 
2919 	tcache_t *tcache;
2920 	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2921 		/* Not allowed to be reentrant and specify a custom tcache. */
2922 		assert(tsd_reentrancy_level_get(tsd) == 0);
2923 		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
2924 			tcache = NULL;
2925 		} else {
2926 			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2927 		}
2928 	} else {
2929 		if (likely(fast)) {
2930 			tcache = tsd_tcachep_get(tsd);
2931 			assert(tcache == tcache_get(tsd));
2932 		} else {
2933 			if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
2934 				tcache = tcache_get(tsd);
2935 			}  else {
2936 				tcache = NULL;
2937 			}
2938 		}
2939 	}
2940 
2941 	UTRACE(ptr, 0, 0);
2942 	if (likely(fast)) {
2943 		tsd_assert_fast(tsd);
2944 		ifree(tsd, ptr, tcache, false);
2945 	} else {
2946 		ifree(tsd, ptr, tcache, true);
2947 	}
2948 	check_entry_exit_locking(tsd_tsdn(tsd));
2949 
2950 	LOG("core.dallocx.exit", "");
2951 }
2952 
2953 JEMALLOC_ALWAYS_INLINE size_t
2954 inallocx(tsdn_t *tsdn, size_t size, int flags) {
2955 	check_entry_exit_locking(tsdn);
2956 
2957 	size_t usize;
2958 	if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) {
2959 		usize = sz_s2u(size);
2960 	} else {
2961 		usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
2962 	}
2963 	check_entry_exit_locking(tsdn);
2964 	return usize;
2965 }
2966 
2967 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
2968 je_sdallocx(void *ptr, size_t size, int flags) {
2969 	assert(ptr != NULL);
2970 	assert(malloc_initialized() || IS_INITIALIZER);
2971 
2972 	LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
2973 	    size, flags);
2974 
2975 	tsd_t *tsd = tsd_fetch();
2976 	bool fast = tsd_fast(tsd);
2977 	size_t usize = inallocx(tsd_tsdn(tsd), size, flags);
2978 	assert(usize == isalloc(tsd_tsdn(tsd), ptr));
2979 	check_entry_exit_locking(tsd_tsdn(tsd));
2980 
2981 	tcache_t *tcache;
2982 	if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
2983 		/* Not allowed to be reentrant and specify a custom tcache. */
2984 		assert(tsd_reentrancy_level_get(tsd) == 0);
2985 		if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
2986 			tcache = NULL;
2987 		} else {
2988 			tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
2989 		}
2990 	} else {
2991 		if (likely(fast)) {
2992 			tcache = tsd_tcachep_get(tsd);
2993 			assert(tcache == tcache_get(tsd));
2994 		} else {
2995 			if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
2996 				tcache = tcache_get(tsd);
2997 			} else {
2998 				tcache = NULL;
2999 			}
3000 		}
3001 	}
3002 
3003 	UTRACE(ptr, 0, 0);
3004 	if (likely(fast)) {
3005 		tsd_assert_fast(tsd);
3006 		isfree(tsd, ptr, usize, tcache, false);
3007 	} else {
3008 		isfree(tsd, ptr, usize, tcache, true);
3009 	}
3010 	check_entry_exit_locking(tsd_tsdn(tsd));
3011 
3012 	LOG("core.sdallocx.exit", "");
3013 }
3014 
3015 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
3016 JEMALLOC_ATTR(pure)
3017 je_nallocx(size_t size, int flags) {
3018 	size_t usize;
3019 	tsdn_t *tsdn;
3020 
3021 	assert(size != 0);
3022 
3023 	if (unlikely(malloc_init())) {
3024 		LOG("core.nallocx.exit", "result: %zu", ZU(0));
3025 		return 0;
3026 	}
3027 
3028 	tsdn = tsdn_fetch();
3029 	check_entry_exit_locking(tsdn);
3030 
3031 	usize = inallocx(tsdn, size, flags);
3032 	if (unlikely(usize > LARGE_MAXCLASS)) {
3033 		LOG("core.nallocx.exit", "result: %zu", ZU(0));
3034 		return 0;
3035 	}
3036 
3037 	check_entry_exit_locking(tsdn);
3038 	LOG("core.nallocx.exit", "result: %zu", usize);
3039 	return usize;
3040 }
3041 
3042 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
3043 je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
3044     size_t newlen) {
3045 	int ret;
3046 	tsd_t *tsd;
3047 
3048 	LOG("core.mallctl.entry", "name: %s", name);
3049 
3050 	if (unlikely(malloc_init())) {
3051 		LOG("core.mallctl.exit", "result: %d", EAGAIN);
3052 		return EAGAIN;
3053 	}
3054 
3055 	tsd = tsd_fetch();
3056 	check_entry_exit_locking(tsd_tsdn(tsd));
3057 	ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
3058 	check_entry_exit_locking(tsd_tsdn(tsd));
3059 
3060 	LOG("core.mallctl.exit", "result: %d", ret);
3061 	return ret;
3062 }
3063 
3064 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
3065 je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) {
3066 	int ret;
3067 
3068 	LOG("core.mallctlnametomib.entry", "name: %s", name);
3069 
3070 	if (unlikely(malloc_init())) {
3071 		LOG("core.mallctlnametomib.exit", "result: %d", EAGAIN);
3072 		return EAGAIN;
3073 	}
3074 
3075 	tsd_t *tsd = tsd_fetch();
3076 	check_entry_exit_locking(tsd_tsdn(tsd));
3077 	ret = ctl_nametomib(tsd, name, mibp, miblenp);
3078 	check_entry_exit_locking(tsd_tsdn(tsd));
3079 
3080 	LOG("core.mallctlnametomib.exit", "result: %d", ret);
3081 	return ret;
3082 }
3083 
3084 JEMALLOC_EXPORT int JEMALLOC_NOTHROW
3085 je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
3086   void *newp, size_t newlen) {
3087 	int ret;
3088 	tsd_t *tsd;
3089 
3090 	LOG("core.mallctlbymib.entry", "");
3091 
3092 	if (unlikely(malloc_init())) {
3093 		LOG("core.mallctlbymib.exit", "result: %d", EAGAIN);
3094 		return EAGAIN;
3095 	}
3096 
3097 	tsd = tsd_fetch();
3098 	check_entry_exit_locking(tsd_tsdn(tsd));
3099 	ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
3100 	check_entry_exit_locking(tsd_tsdn(tsd));
3101 	LOG("core.mallctlbymib.exit", "result: %d", ret);
3102 	return ret;
3103 }
3104 
3105 JEMALLOC_EXPORT void JEMALLOC_NOTHROW
3106 je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
3107     const char *opts) {
3108 	tsdn_t *tsdn;
3109 
3110 	LOG("core.malloc_stats_print.entry", "");
3111 
3112 	tsdn = tsdn_fetch();
3113 	check_entry_exit_locking(tsdn);
3114 	stats_print(write_cb, cbopaque, opts);
3115 	check_entry_exit_locking(tsdn);
3116 	LOG("core.malloc_stats_print.exit", "");
3117 }
3118 
3119 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
3120 je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
3121 	size_t ret;
3122 	tsdn_t *tsdn;
3123 
3124 	LOG("core.malloc_usable_size.entry", "ptr: %p", ptr);
3125 
3126 	assert(malloc_initialized() || IS_INITIALIZER);
3127 
3128 	tsdn = tsdn_fetch();
3129 	check_entry_exit_locking(tsdn);
3130 
3131 	if (unlikely(ptr == NULL)) {
3132 		ret = 0;
3133 	} else {
3134 		if (config_debug || force_ivsalloc) {
3135 			ret = ivsalloc(tsdn, ptr);
3136 			assert(force_ivsalloc || ret != 0);
3137 		} else {
3138 			ret = isalloc(tsdn, ptr);
3139 		}
3140 	}
3141 
3142 	check_entry_exit_locking(tsdn);
3143 	LOG("core.malloc_usable_size.exit", "result: %zu", ret);
3144 	return ret;
3145 }
3146 
3147 /*
3148  * End non-standard functions.
3149  */
3150 /******************************************************************************/
3151 /*
3152  * Begin compatibility functions.
3153  */
3154 
3155 #define	ALLOCM_LG_ALIGN(la)	(la)
3156 #define	ALLOCM_ALIGN(a)		(ffsl(a)-1)
3157 #define	ALLOCM_ZERO		((int)0x40)
3158 #define	ALLOCM_NO_MOVE		((int)0x80)
3159 
3160 #define	ALLOCM_SUCCESS		0
3161 #define	ALLOCM_ERR_OOM		1
3162 #define	ALLOCM_ERR_NOT_MOVED	2
3163 
3164 int
3165 je_allocm(void **ptr, size_t *rsize, size_t size, int flags) {
3166 	assert(ptr != NULL);
3167 
3168 	void *p = je_mallocx(size, flags);
3169 	if (p == NULL) {
3170 		return (ALLOCM_ERR_OOM);
3171 	}
3172 	if (rsize != NULL) {
3173 		*rsize = isalloc(tsdn_fetch(), p);
3174 	}
3175 	*ptr = p;
3176 	return ALLOCM_SUCCESS;
3177 }
3178 
3179 int
3180 je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) {
3181 	assert(ptr != NULL);
3182 	assert(*ptr != NULL);
3183 	assert(size != 0);
3184 	assert(SIZE_T_MAX - size >= extra);
3185 
3186 	int ret;
3187 	bool no_move = flags & ALLOCM_NO_MOVE;
3188 
3189 	if (no_move) {
3190 		size_t usize = je_xallocx(*ptr, size, extra, flags);
3191 		ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED;
3192 		if (rsize != NULL) {
3193 			*rsize = usize;
3194 		}
3195 	} else {
3196 		void *p = je_rallocx(*ptr, size+extra, flags);
3197 		if (p != NULL) {
3198 			*ptr = p;
3199 			ret = ALLOCM_SUCCESS;
3200 		} else {
3201 			ret = ALLOCM_ERR_OOM;
3202 		}
3203 		if (rsize != NULL) {
3204 			*rsize = isalloc(tsdn_fetch(), *ptr);
3205 		}
3206 	}
3207 	return ret;
3208 }
3209 
3210 int
3211 je_sallocm(const void *ptr, size_t *rsize, int flags) {
3212 	assert(rsize != NULL);
3213 	*rsize = je_sallocx(ptr, flags);
3214 	return ALLOCM_SUCCESS;
3215 }
3216 
3217 int
3218 je_dallocm(void *ptr, int flags) {
3219 	je_dallocx(ptr, flags);
3220 	return ALLOCM_SUCCESS;
3221 }
3222 
3223 int
3224 je_nallocm(size_t *rsize, size_t size, int flags) {
3225 	size_t usize = je_nallocx(size, flags);
3226 	if (usize == 0) {
3227 		return ALLOCM_ERR_OOM;
3228 	}
3229 	if (rsize != NULL) {
3230 		*rsize = usize;
3231 	}
3232 	return ALLOCM_SUCCESS;
3233 }
3234 
3235 #undef ALLOCM_LG_ALIGN
3236 #undef ALLOCM_ALIGN
3237 #undef ALLOCM_ZERO
3238 #undef ALLOCM_NO_MOVE
3239 
3240 #undef ALLOCM_SUCCESS
3241 #undef ALLOCM_ERR_OOM
3242 #undef ALLOCM_ERR_NOT_MOVED
3243 
3244 /*
3245  * End compatibility functions.
3246  */
3247 /******************************************************************************/
3248 /*
3249  * The following functions are used by threading libraries for protection of
3250  * malloc during fork().
3251  */
3252 
3253 /*
3254  * If an application creates a thread before doing any allocation in the main
3255  * thread, then calls fork(2) in the main thread followed by memory allocation
3256  * in the child process, a race can occur that results in deadlock within the
3257  * child: the main thread may have forked while the created thread had
3258  * partially initialized the allocator.  Ordinarily jemalloc prevents
3259  * fork/malloc races via the following functions it registers during
3260  * initialization using pthread_atfork(), but of course that does no good if
3261  * the allocator isn't fully initialized at fork time.  The following library
3262  * constructor is a partial solution to this problem.  It may still be possible
3263  * to trigger the deadlock described above, but doing so would involve forking
3264  * via a library constructor that runs before jemalloc's runs.
3265  */
3266 #ifndef JEMALLOC_JET
3267 JEMALLOC_ATTR(constructor)
3268 static void
3269 jemalloc_constructor(void) {
3270 	malloc_init();
3271 }
3272 #endif
3273 
3274 #ifndef JEMALLOC_MUTEX_INIT_CB
3275 void
3276 jemalloc_prefork(void)
3277 #else
3278 JEMALLOC_EXPORT void
3279 _malloc_prefork(void)
3280 #endif
3281 {
3282 	tsd_t *tsd;
3283 	unsigned i, j, narenas;
3284 	arena_t *arena;
3285 
3286 #ifdef JEMALLOC_MUTEX_INIT_CB
3287 	if (!malloc_initialized()) {
3288 		return;
3289 	}
3290 #endif
3291 	assert(malloc_initialized());
3292 
3293 	tsd = tsd_fetch();
3294 
3295 	narenas = narenas_total_get();
3296 
3297 	witness_prefork(tsd_witness_tsdp_get(tsd));
3298 	/* Acquire all mutexes in a safe order. */
3299 	ctl_prefork(tsd_tsdn(tsd));
3300 	tcache_prefork(tsd_tsdn(tsd));
3301 	malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock);
3302 	if (have_background_thread) {
3303 		background_thread_prefork0(tsd_tsdn(tsd));
3304 	}
3305 	prof_prefork0(tsd_tsdn(tsd));
3306 	if (have_background_thread) {
3307 		background_thread_prefork1(tsd_tsdn(tsd));
3308 	}
3309 	/* Break arena prefork into stages to preserve lock order. */
3310 	for (i = 0; i < 8; i++) {
3311 		for (j = 0; j < narenas; j++) {
3312 			if ((arena = arena_get(tsd_tsdn(tsd), j, false)) !=
3313 			    NULL) {
3314 				switch (i) {
3315 				case 0:
3316 					arena_prefork0(tsd_tsdn(tsd), arena);
3317 					break;
3318 				case 1:
3319 					arena_prefork1(tsd_tsdn(tsd), arena);
3320 					break;
3321 				case 2:
3322 					arena_prefork2(tsd_tsdn(tsd), arena);
3323 					break;
3324 				case 3:
3325 					arena_prefork3(tsd_tsdn(tsd), arena);
3326 					break;
3327 				case 4:
3328 					arena_prefork4(tsd_tsdn(tsd), arena);
3329 					break;
3330 				case 5:
3331 					arena_prefork5(tsd_tsdn(tsd), arena);
3332 					break;
3333 				case 6:
3334 					arena_prefork6(tsd_tsdn(tsd), arena);
3335 					break;
3336 				case 7:
3337 					arena_prefork7(tsd_tsdn(tsd), arena);
3338 					break;
3339 				default: not_reached();
3340 				}
3341 			}
3342 		}
3343 	}
3344 	prof_prefork1(tsd_tsdn(tsd));
3345 }
3346 
3347 #ifndef JEMALLOC_MUTEX_INIT_CB
3348 void
3349 jemalloc_postfork_parent(void)
3350 #else
3351 JEMALLOC_EXPORT void
3352 _malloc_postfork(void)
3353 #endif
3354 {
3355 	tsd_t *tsd;
3356 	unsigned i, narenas;
3357 
3358 #ifdef JEMALLOC_MUTEX_INIT_CB
3359 	if (!malloc_initialized()) {
3360 		return;
3361 	}
3362 #endif
3363 	assert(malloc_initialized());
3364 
3365 	tsd = tsd_fetch();
3366 
3367 	witness_postfork_parent(tsd_witness_tsdp_get(tsd));
3368 	/* Release all mutexes, now that fork() has completed. */
3369 	for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
3370 		arena_t *arena;
3371 
3372 		if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
3373 			arena_postfork_parent(tsd_tsdn(tsd), arena);
3374 		}
3375 	}
3376 	prof_postfork_parent(tsd_tsdn(tsd));
3377 	if (have_background_thread) {
3378 		background_thread_postfork_parent(tsd_tsdn(tsd));
3379 	}
3380 	malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock);
3381 	tcache_postfork_parent(tsd_tsdn(tsd));
3382 	ctl_postfork_parent(tsd_tsdn(tsd));
3383 }
3384 
3385 void
3386 jemalloc_postfork_child(void) {
3387 	tsd_t *tsd;
3388 	unsigned i, narenas;
3389 
3390 	assert(malloc_initialized());
3391 
3392 	tsd = tsd_fetch();
3393 
3394 	witness_postfork_child(tsd_witness_tsdp_get(tsd));
3395 	/* Release all mutexes, now that fork() has completed. */
3396 	for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
3397 		arena_t *arena;
3398 
3399 		if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
3400 			arena_postfork_child(tsd_tsdn(tsd), arena);
3401 		}
3402 	}
3403 	prof_postfork_child(tsd_tsdn(tsd));
3404 	if (have_background_thread) {
3405 		background_thread_postfork_child(tsd_tsdn(tsd));
3406 	}
3407 	malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock);
3408 	tcache_postfork_child(tsd_tsdn(tsd));
3409 	ctl_postfork_child(tsd_tsdn(tsd));
3410 }
3411 
3412 void
3413 _malloc_first_thread(void)
3414 {
3415 
3416 	(void)malloc_mutex_first_thread();
3417 }
3418 
3419 /******************************************************************************/
3420