1a4bd5210SJason Evans #define JEMALLOC_C_
2b7eaed25SJason Evans #include "jemalloc/internal/jemalloc_preamble.h"
3b7eaed25SJason Evans #include "jemalloc/internal/jemalloc_internal_includes.h"
4b7eaed25SJason Evans
5b7eaed25SJason Evans #include "jemalloc/internal/assert.h"
6b7eaed25SJason Evans #include "jemalloc/internal/atomic.h"
7b7eaed25SJason Evans #include "jemalloc/internal/ctl.h"
8b7eaed25SJason Evans #include "jemalloc/internal/extent_dss.h"
9b7eaed25SJason Evans #include "jemalloc/internal/extent_mmap.h"
10*c5ad8142SEric van Gyzen #include "jemalloc/internal/hook.h"
11b7eaed25SJason Evans #include "jemalloc/internal/jemalloc_internal_types.h"
120ef50b4eSJason Evans #include "jemalloc/internal/log.h"
13b7eaed25SJason Evans #include "jemalloc/internal/malloc_io.h"
14b7eaed25SJason Evans #include "jemalloc/internal/mutex.h"
15b7eaed25SJason Evans #include "jemalloc/internal/rtree.h"
16*c5ad8142SEric van Gyzen #include "jemalloc/internal/safety_check.h"
17*c5ad8142SEric van Gyzen #include "jemalloc/internal/sc.h"
18b7eaed25SJason Evans #include "jemalloc/internal/spin.h"
19b7eaed25SJason Evans #include "jemalloc/internal/sz.h"
20b7eaed25SJason Evans #include "jemalloc/internal/ticker.h"
21b7eaed25SJason Evans #include "jemalloc/internal/util.h"
22a4bd5210SJason Evans
23a4bd5210SJason Evans /******************************************************************************/
24a4bd5210SJason Evans /* Data. */
25a4bd5210SJason Evans
264fdb8d2aSDimitry Andric /* Work around <http://llvm.org/bugs/show_bug.cgi?id=12623>: */
274fdb8d2aSDimitry Andric const char *__malloc_options_1_0 = NULL;
28a4bd5210SJason Evans __sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0);
29a4bd5210SJason Evans
30a4bd5210SJason Evans /* Runtime configuration options. */
31bde95144SJason Evans const char *je_malloc_conf
32bde95144SJason Evans #ifndef _WIN32
33bde95144SJason Evans JEMALLOC_ATTR(weak)
34bde95144SJason Evans #endif
35bde95144SJason Evans ;
3688ad2f8dSJason Evans bool opt_abort =
37a4bd5210SJason Evans #ifdef JEMALLOC_DEBUG
3888ad2f8dSJason Evans true
39a4bd5210SJason Evans #else
4088ad2f8dSJason Evans false
41a4bd5210SJason Evans #endif
4288ad2f8dSJason Evans ;
43b7eaed25SJason Evans bool opt_abort_conf =
44b7eaed25SJason Evans #ifdef JEMALLOC_DEBUG
45b7eaed25SJason Evans true
46b7eaed25SJason Evans #else
47b7eaed25SJason Evans false
48b7eaed25SJason Evans #endif
49b7eaed25SJason Evans ;
50*c5ad8142SEric van Gyzen /* Intentionally default off, even with debug builds. */
51*c5ad8142SEric van Gyzen bool opt_confirm_conf = false;
52d0e79aa3SJason Evans const char *opt_junk =
53d0e79aa3SJason Evans #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
54d0e79aa3SJason Evans "true"
55d0e79aa3SJason Evans #else
56d0e79aa3SJason Evans "false"
57d0e79aa3SJason Evans #endif
58d0e79aa3SJason Evans ;
59d0e79aa3SJason Evans bool opt_junk_alloc =
6088ad2f8dSJason Evans #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
6188ad2f8dSJason Evans true
62a4bd5210SJason Evans #else
6388ad2f8dSJason Evans false
64a4bd5210SJason Evans #endif
6588ad2f8dSJason Evans ;
66d0e79aa3SJason Evans bool opt_junk_free =
67d0e79aa3SJason Evans #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL))
68d0e79aa3SJason Evans true
69d0e79aa3SJason Evans #else
70d0e79aa3SJason Evans false
71d0e79aa3SJason Evans #endif
72d0e79aa3SJason Evans ;
73d0e79aa3SJason Evans
74a4bd5210SJason Evans bool opt_utrace = false;
75a4bd5210SJason Evans bool opt_xmalloc = false;
76a4bd5210SJason Evans bool opt_zero = false;
77df0d881dSJason Evans unsigned opt_narenas = 0;
78a4bd5210SJason Evans
79a4bd5210SJason Evans unsigned ncpus;
80a4bd5210SJason Evans
81df0d881dSJason Evans /* Protects arenas initialization. */
82b7eaed25SJason Evans malloc_mutex_t arenas_lock;
83d0e79aa3SJason Evans /*
84d0e79aa3SJason Evans * Arenas that are used to service external requests. Not all elements of the
85d0e79aa3SJason Evans * arenas array are necessarily used; arenas are created lazily as needed.
86d0e79aa3SJason Evans *
87d0e79aa3SJason Evans * arenas[0..narenas_auto) are used for automatic multiplexing of threads and
88d0e79aa3SJason Evans * arenas. arenas[narenas_auto..narenas_total) are only used if the application
89d0e79aa3SJason Evans * takes some action to create them and allocate from them.
90b7eaed25SJason Evans *
91b7eaed25SJason Evans * Points to an arena_t.
92d0e79aa3SJason Evans */
93b7eaed25SJason Evans JEMALLOC_ALIGNED(CACHELINE)
94b7eaed25SJason Evans atomic_p_t arenas[MALLOCX_ARENA_LIMIT];
95b7eaed25SJason Evans static atomic_u_t narenas_total; /* Use narenas_total_*(). */
96*c5ad8142SEric van Gyzen /* Below three are read-only after initialization. */
97*c5ad8142SEric van Gyzen static arena_t *a0; /* arenas[0]. */
98*c5ad8142SEric van Gyzen unsigned narenas_auto;
99*c5ad8142SEric van Gyzen unsigned manual_arena_base;
100a4bd5210SJason Evans
101d0e79aa3SJason Evans typedef enum {
102d0e79aa3SJason Evans malloc_init_uninitialized = 3,
103d0e79aa3SJason Evans malloc_init_a0_initialized = 2,
104d0e79aa3SJason Evans malloc_init_recursible = 1,
105d0e79aa3SJason Evans malloc_init_initialized = 0 /* Common case --> jnz. */
106d0e79aa3SJason Evans } malloc_init_t;
107d0e79aa3SJason Evans static malloc_init_t malloc_init_state = malloc_init_uninitialized;
108d0e79aa3SJason Evans
1091f0a49e8SJason Evans /* False should be the common case. Set to true to trigger initialization. */
110b7eaed25SJason Evans bool malloc_slow = true;
111df0d881dSJason Evans
1121f0a49e8SJason Evans /* When malloc_slow is true, set the corresponding bits for sanity check. */
113df0d881dSJason Evans enum {
114df0d881dSJason Evans flag_opt_junk_alloc = (1U),
115df0d881dSJason Evans flag_opt_junk_free = (1U << 1),
116b7eaed25SJason Evans flag_opt_zero = (1U << 2),
117b7eaed25SJason Evans flag_opt_utrace = (1U << 3),
118b7eaed25SJason Evans flag_opt_xmalloc = (1U << 4)
119df0d881dSJason Evans };
120df0d881dSJason Evans static uint8_t malloc_slow_flags;
121df0d881dSJason Evans
122a4bd5210SJason Evans #ifdef JEMALLOC_THREADED_INIT
123a4bd5210SJason Evans /* Used to let the initializing thread recursively allocate. */
124a4bd5210SJason Evans # define NO_INITIALIZER ((unsigned long)0)
125a4bd5210SJason Evans # define INITIALIZER pthread_self()
126a4bd5210SJason Evans # define IS_INITIALIZER (malloc_initializer == pthread_self())
127a4bd5210SJason Evans static pthread_t malloc_initializer = NO_INITIALIZER;
128a4bd5210SJason Evans #else
129a4bd5210SJason Evans # define NO_INITIALIZER false
130a4bd5210SJason Evans # define INITIALIZER true
131a4bd5210SJason Evans # define IS_INITIALIZER malloc_initializer
132a4bd5210SJason Evans static bool malloc_initializer = NO_INITIALIZER;
133a4bd5210SJason Evans #endif
134a4bd5210SJason Evans
135a4bd5210SJason Evans /* Used to avoid initialization races. */
136e722f8f8SJason Evans #ifdef _WIN32
137d0e79aa3SJason Evans #if _WIN32_WINNT >= 0x0600
138d0e79aa3SJason Evans static malloc_mutex_t init_lock = SRWLOCK_INIT;
139d0e79aa3SJason Evans #else
140e722f8f8SJason Evans static malloc_mutex_t init_lock;
141536b3538SJason Evans static bool init_lock_initialized = false;
142e722f8f8SJason Evans
JEMALLOC_ATTR(constructor)143e722f8f8SJason Evans JEMALLOC_ATTR(constructor)
144e722f8f8SJason Evans static void WINAPI
145b7eaed25SJason Evans _init_init_lock(void) {
146b7eaed25SJason Evans /*
147b7eaed25SJason Evans * If another constructor in the same binary is using mallctl to e.g.
148b7eaed25SJason Evans * set up extent hooks, it may end up running before this one, and
149b7eaed25SJason Evans * malloc_init_hard will crash trying to lock the uninitialized lock. So
150b7eaed25SJason Evans * we force an initialization of the lock in malloc_init_hard as well.
151b7eaed25SJason Evans * We don't try to care about atomicity of the accessed to the
152b7eaed25SJason Evans * init_lock_initialized boolean, since it really only matters early in
153b7eaed25SJason Evans * the process creation, before any separate thread normally starts
154b7eaed25SJason Evans * doing anything.
155b7eaed25SJason Evans */
156b7eaed25SJason Evans if (!init_lock_initialized) {
157b7eaed25SJason Evans malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT,
158b7eaed25SJason Evans malloc_mutex_rank_exclusive);
159b7eaed25SJason Evans }
160536b3538SJason Evans init_lock_initialized = true;
161e722f8f8SJason Evans }
162e722f8f8SJason Evans
163e722f8f8SJason Evans #ifdef _MSC_VER
164e722f8f8SJason Evans # pragma section(".CRT$XCU", read)
165e722f8f8SJason Evans JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used)
166e722f8f8SJason Evans static const void (WINAPI *init_init_lock)(void) = _init_init_lock;
167e722f8f8SJason Evans #endif
168d0e79aa3SJason Evans #endif
169e722f8f8SJason Evans #else
170a4bd5210SJason Evans static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER;
171e722f8f8SJason Evans #endif
172a4bd5210SJason Evans
173a4bd5210SJason Evans typedef struct {
174a4bd5210SJason Evans void *p; /* Input pointer (as in realloc(p, s)). */
175a4bd5210SJason Evans size_t s; /* Request size. */
176a4bd5210SJason Evans void *r; /* Result pointer. */
177a4bd5210SJason Evans } malloc_utrace_t;
178a4bd5210SJason Evans
179a4bd5210SJason Evans #ifdef JEMALLOC_UTRACE
180a4bd5210SJason Evans # define UTRACE(a, b, c) do { \
181d0e79aa3SJason Evans if (unlikely(opt_utrace)) { \
18288ad2f8dSJason Evans int utrace_serrno = errno; \
183a4bd5210SJason Evans malloc_utrace_t ut; \
184a4bd5210SJason Evans ut.p = (a); \
185a4bd5210SJason Evans ut.s = (b); \
186a4bd5210SJason Evans ut.r = (c); \
187a4bd5210SJason Evans utrace(&ut, sizeof(ut)); \
18888ad2f8dSJason Evans errno = utrace_serrno; \
189a4bd5210SJason Evans } \
190a4bd5210SJason Evans } while (0)
191a4bd5210SJason Evans #else
192a4bd5210SJason Evans # define UTRACE(a, b, c)
193a4bd5210SJason Evans #endif
194a4bd5210SJason Evans
195b7eaed25SJason Evans /* Whether encountered any invalid config options. */
196b7eaed25SJason Evans static bool had_conf_error = false;
197b7eaed25SJason Evans
198a4bd5210SJason Evans /******************************************************************************/
199f921d10fSJason Evans /*
200f921d10fSJason Evans * Function prototypes for static functions that are referenced prior to
201f921d10fSJason Evans * definition.
202f921d10fSJason Evans */
203a4bd5210SJason Evans
204d0e79aa3SJason Evans static bool malloc_init_hard_a0(void);
205a4bd5210SJason Evans static bool malloc_init_hard(void);
206a4bd5210SJason Evans
207a4bd5210SJason Evans /******************************************************************************/
208a4bd5210SJason Evans /*
209a4bd5210SJason Evans * Begin miscellaneous support functions.
210a4bd5210SJason Evans */
211a4bd5210SJason Evans
212b7eaed25SJason Evans bool
malloc_initialized(void)213b7eaed25SJason Evans malloc_initialized(void) {
214d0e79aa3SJason Evans return (malloc_init_state == malloc_init_initialized);
215a4bd5210SJason Evans }
216d0e79aa3SJason Evans
217b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE bool
malloc_init_a0(void)218b7eaed25SJason Evans malloc_init_a0(void) {
219b7eaed25SJason Evans if (unlikely(malloc_init_state == malloc_init_uninitialized)) {
220b7eaed25SJason Evans return malloc_init_hard_a0();
221b7eaed25SJason Evans }
222b7eaed25SJason Evans return false;
223a4bd5210SJason Evans }
224a4bd5210SJason Evans
225b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE bool
malloc_init(void)226b7eaed25SJason Evans malloc_init(void) {
227b7eaed25SJason Evans if (unlikely(!malloc_initialized()) && malloc_init_hard()) {
228b7eaed25SJason Evans return true;
229d0e79aa3SJason Evans }
230b7eaed25SJason Evans return false;
231d0e79aa3SJason Evans }
232d0e79aa3SJason Evans
233d0e79aa3SJason Evans /*
2341f0a49e8SJason Evans * The a0*() functions are used instead of i{d,}alloc() in situations that
235d0e79aa3SJason Evans * cannot tolerate TLS variable access.
236d0e79aa3SJason Evans */
237d0e79aa3SJason Evans
238d0e79aa3SJason Evans static void *
a0ialloc(size_t size,bool zero,bool is_internal)239b7eaed25SJason Evans a0ialloc(size_t size, bool zero, bool is_internal) {
240b7eaed25SJason Evans if (unlikely(malloc_init_a0())) {
241b7eaed25SJason Evans return NULL;
242b7eaed25SJason Evans }
243d0e79aa3SJason Evans
244b7eaed25SJason Evans return iallocztm(TSDN_NULL, size, sz_size2index(size), zero, NULL,
245b7eaed25SJason Evans is_internal, arena_get(TSDN_NULL, 0, true), true);
246d0e79aa3SJason Evans }
247d0e79aa3SJason Evans
248d0e79aa3SJason Evans static void
a0idalloc(void * ptr,bool is_internal)249b7eaed25SJason Evans a0idalloc(void *ptr, bool is_internal) {
250b7eaed25SJason Evans idalloctm(TSDN_NULL, ptr, NULL, NULL, is_internal, true);
251bde95144SJason Evans }
252bde95144SJason Evans
253d0e79aa3SJason Evans void *
a0malloc(size_t size)254b7eaed25SJason Evans a0malloc(size_t size) {
255b7eaed25SJason Evans return a0ialloc(size, false, true);
256d0e79aa3SJason Evans }
257d0e79aa3SJason Evans
258d0e79aa3SJason Evans void
a0dalloc(void * ptr)259b7eaed25SJason Evans a0dalloc(void *ptr) {
260d0e79aa3SJason Evans a0idalloc(ptr, true);
261d0e79aa3SJason Evans }
262d0e79aa3SJason Evans
263d0e79aa3SJason Evans /*
264d0e79aa3SJason Evans * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive
265d0e79aa3SJason Evans * situations that cannot tolerate TLS variable access (TLS allocation and very
266d0e79aa3SJason Evans * early internal data structure initialization).
267d0e79aa3SJason Evans */
268d0e79aa3SJason Evans
269d0e79aa3SJason Evans void *
bootstrap_malloc(size_t size)270b7eaed25SJason Evans bootstrap_malloc(size_t size) {
271b7eaed25SJason Evans if (unlikely(size == 0)) {
272d0e79aa3SJason Evans size = 1;
273b7eaed25SJason Evans }
274d0e79aa3SJason Evans
275b7eaed25SJason Evans return a0ialloc(size, false, false);
276d0e79aa3SJason Evans }
277d0e79aa3SJason Evans
278d0e79aa3SJason Evans void *
bootstrap_calloc(size_t num,size_t size)279b7eaed25SJason Evans bootstrap_calloc(size_t num, size_t size) {
280d0e79aa3SJason Evans size_t num_size;
281d0e79aa3SJason Evans
282d0e79aa3SJason Evans num_size = num * size;
283d0e79aa3SJason Evans if (unlikely(num_size == 0)) {
284d0e79aa3SJason Evans assert(num == 0 || size == 0);
285d0e79aa3SJason Evans num_size = 1;
286d0e79aa3SJason Evans }
287d0e79aa3SJason Evans
288b7eaed25SJason Evans return a0ialloc(num_size, true, false);
289d0e79aa3SJason Evans }
290d0e79aa3SJason Evans
291d0e79aa3SJason Evans void
bootstrap_free(void * ptr)292b7eaed25SJason Evans bootstrap_free(void *ptr) {
293b7eaed25SJason Evans if (unlikely(ptr == NULL)) {
294d0e79aa3SJason Evans return;
295b7eaed25SJason Evans }
296d0e79aa3SJason Evans
297d0e79aa3SJason Evans a0idalloc(ptr, false);
298d0e79aa3SJason Evans }
299d0e79aa3SJason Evans
300b7eaed25SJason Evans void
arena_set(unsigned ind,arena_t * arena)301b7eaed25SJason Evans arena_set(unsigned ind, arena_t *arena) {
302b7eaed25SJason Evans atomic_store_p(&arenas[ind], arena, ATOMIC_RELEASE);
303df0d881dSJason Evans }
304df0d881dSJason Evans
305df0d881dSJason Evans static void
narenas_total_set(unsigned narenas)306b7eaed25SJason Evans narenas_total_set(unsigned narenas) {
307b7eaed25SJason Evans atomic_store_u(&narenas_total, narenas, ATOMIC_RELEASE);
308df0d881dSJason Evans }
309df0d881dSJason Evans
310df0d881dSJason Evans static void
narenas_total_inc(void)311b7eaed25SJason Evans narenas_total_inc(void) {
312b7eaed25SJason Evans atomic_fetch_add_u(&narenas_total, 1, ATOMIC_RELEASE);
313df0d881dSJason Evans }
314df0d881dSJason Evans
315df0d881dSJason Evans unsigned
narenas_total_get(void)316b7eaed25SJason Evans narenas_total_get(void) {
317b7eaed25SJason Evans return atomic_load_u(&narenas_total, ATOMIC_ACQUIRE);
318df0d881dSJason Evans }
319df0d881dSJason Evans
320d0e79aa3SJason Evans /* Create a new arena and insert it into the arenas array at index ind. */
321d0e79aa3SJason Evans static arena_t *
arena_init_locked(tsdn_t * tsdn,unsigned ind,extent_hooks_t * extent_hooks)322b7eaed25SJason Evans arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
323d0e79aa3SJason Evans arena_t *arena;
324d0e79aa3SJason Evans
325df0d881dSJason Evans assert(ind <= narenas_total_get());
326b7eaed25SJason Evans if (ind >= MALLOCX_ARENA_LIMIT) {
327b7eaed25SJason Evans return NULL;
328b7eaed25SJason Evans }
329b7eaed25SJason Evans if (ind == narenas_total_get()) {
330df0d881dSJason Evans narenas_total_inc();
331b7eaed25SJason Evans }
332d0e79aa3SJason Evans
333d0e79aa3SJason Evans /*
334d0e79aa3SJason Evans * Another thread may have already initialized arenas[ind] if it's an
335d0e79aa3SJason Evans * auto arena.
336d0e79aa3SJason Evans */
3371f0a49e8SJason Evans arena = arena_get(tsdn, ind, false);
338d0e79aa3SJason Evans if (arena != NULL) {
339*c5ad8142SEric van Gyzen assert(arena_is_auto(arena));
340b7eaed25SJason Evans return arena;
341d0e79aa3SJason Evans }
342d0e79aa3SJason Evans
343d0e79aa3SJason Evans /* Actually initialize the arena. */
344b7eaed25SJason Evans arena = arena_new(tsdn, ind, extent_hooks);
345d0e79aa3SJason Evans
346b7eaed25SJason Evans return arena;
347d0e79aa3SJason Evans }
348d0e79aa3SJason Evans
349d0e79aa3SJason Evans static void
arena_new_create_background_thread(tsdn_t * tsdn,unsigned ind)350b7eaed25SJason Evans arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) {
351b7eaed25SJason Evans if (ind == 0) {
352b7eaed25SJason Evans return;
353b7eaed25SJason Evans }
354*c5ad8142SEric van Gyzen /*
355*c5ad8142SEric van Gyzen * Avoid creating a new background thread just for the huge arena, which
356*c5ad8142SEric van Gyzen * purges eagerly by default.
357*c5ad8142SEric van Gyzen */
358*c5ad8142SEric van Gyzen if (have_background_thread && !arena_is_huge(ind)) {
359*c5ad8142SEric van Gyzen if (background_thread_create(tsdn_tsd(tsdn), ind)) {
360b7eaed25SJason Evans malloc_printf("<jemalloc>: error in background thread "
361b7eaed25SJason Evans "creation for arena %u. Abort.\n", ind);
362b7eaed25SJason Evans abort();
363b7eaed25SJason Evans }
364b7eaed25SJason Evans }
365b7eaed25SJason Evans }
366b7eaed25SJason Evans
367b7eaed25SJason Evans arena_t *
arena_init(tsdn_t * tsdn,unsigned ind,extent_hooks_t * extent_hooks)368b7eaed25SJason Evans arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) {
369df0d881dSJason Evans arena_t *arena;
370d0e79aa3SJason Evans
371b7eaed25SJason Evans malloc_mutex_lock(tsdn, &arenas_lock);
372b7eaed25SJason Evans arena = arena_init_locked(tsdn, ind, extent_hooks);
373b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &arenas_lock);
374bde95144SJason Evans
375b7eaed25SJason Evans arena_new_create_background_thread(tsdn, ind);
376b7eaed25SJason Evans
377b7eaed25SJason Evans return arena;
378b7eaed25SJason Evans }
379b7eaed25SJason Evans
380b7eaed25SJason Evans static void
arena_bind(tsd_t * tsd,unsigned ind,bool internal)381b7eaed25SJason Evans arena_bind(tsd_t *tsd, unsigned ind, bool internal) {
382b7eaed25SJason Evans arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false);
3831f0a49e8SJason Evans arena_nthreads_inc(arena, internal);
384df0d881dSJason Evans
385b7eaed25SJason Evans if (internal) {
3861f0a49e8SJason Evans tsd_iarena_set(tsd, arena);
387b7eaed25SJason Evans } else {
388df0d881dSJason Evans tsd_arena_set(tsd, arena);
389*c5ad8142SEric van Gyzen unsigned shard = atomic_fetch_add_u(&arena->binshard_next, 1,
390*c5ad8142SEric van Gyzen ATOMIC_RELAXED);
391*c5ad8142SEric van Gyzen tsd_binshards_t *bins = tsd_binshardsp_get(tsd);
392*c5ad8142SEric van Gyzen for (unsigned i = 0; i < SC_NBINS; i++) {
393*c5ad8142SEric van Gyzen assert(bin_infos[i].n_shards > 0 &&
394*c5ad8142SEric van Gyzen bin_infos[i].n_shards <= BIN_SHARDS_MAX);
395*c5ad8142SEric van Gyzen bins->binshard[i] = shard % bin_infos[i].n_shards;
396*c5ad8142SEric van Gyzen }
397d0e79aa3SJason Evans }
398b7eaed25SJason Evans }
399d0e79aa3SJason Evans
400d0e79aa3SJason Evans void
arena_migrate(tsd_t * tsd,unsigned oldind,unsigned newind)401b7eaed25SJason Evans arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) {
402d0e79aa3SJason Evans arena_t *oldarena, *newarena;
403d0e79aa3SJason Evans
4041f0a49e8SJason Evans oldarena = arena_get(tsd_tsdn(tsd), oldind, false);
4051f0a49e8SJason Evans newarena = arena_get(tsd_tsdn(tsd), newind, false);
4061f0a49e8SJason Evans arena_nthreads_dec(oldarena, false);
4071f0a49e8SJason Evans arena_nthreads_inc(newarena, false);
408d0e79aa3SJason Evans tsd_arena_set(tsd, newarena);
409d0e79aa3SJason Evans }
410d0e79aa3SJason Evans
411d0e79aa3SJason Evans static void
arena_unbind(tsd_t * tsd,unsigned ind,bool internal)412b7eaed25SJason Evans arena_unbind(tsd_t *tsd, unsigned ind, bool internal) {
413d0e79aa3SJason Evans arena_t *arena;
414d0e79aa3SJason Evans
4151f0a49e8SJason Evans arena = arena_get(tsd_tsdn(tsd), ind, false);
4161f0a49e8SJason Evans arena_nthreads_dec(arena, internal);
417b7eaed25SJason Evans
418b7eaed25SJason Evans if (internal) {
4191f0a49e8SJason Evans tsd_iarena_set(tsd, NULL);
420b7eaed25SJason Evans } else {
421d0e79aa3SJason Evans tsd_arena_set(tsd, NULL);
422d0e79aa3SJason Evans }
423b7eaed25SJason Evans }
424d0e79aa3SJason Evans
425df0d881dSJason Evans arena_tdata_t *
arena_tdata_get_hard(tsd_t * tsd,unsigned ind)426b7eaed25SJason Evans arena_tdata_get_hard(tsd_t *tsd, unsigned ind) {
427df0d881dSJason Evans arena_tdata_t *tdata, *arenas_tdata_old;
428df0d881dSJason Evans arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd);
429df0d881dSJason Evans unsigned narenas_tdata_old, i;
430df0d881dSJason Evans unsigned narenas_tdata = tsd_narenas_tdata_get(tsd);
431d0e79aa3SJason Evans unsigned narenas_actual = narenas_total_get();
432d0e79aa3SJason Evans
433d0e79aa3SJason Evans /*
434df0d881dSJason Evans * Dissociate old tdata array (and set up for deallocation upon return)
435df0d881dSJason Evans * if it's too small.
436d0e79aa3SJason Evans */
437df0d881dSJason Evans if (arenas_tdata != NULL && narenas_tdata < narenas_actual) {
438df0d881dSJason Evans arenas_tdata_old = arenas_tdata;
439df0d881dSJason Evans narenas_tdata_old = narenas_tdata;
440df0d881dSJason Evans arenas_tdata = NULL;
441df0d881dSJason Evans narenas_tdata = 0;
442df0d881dSJason Evans tsd_arenas_tdata_set(tsd, arenas_tdata);
443df0d881dSJason Evans tsd_narenas_tdata_set(tsd, narenas_tdata);
444df0d881dSJason Evans } else {
445df0d881dSJason Evans arenas_tdata_old = NULL;
446df0d881dSJason Evans narenas_tdata_old = 0;
447d0e79aa3SJason Evans }
448df0d881dSJason Evans
449df0d881dSJason Evans /* Allocate tdata array if it's missing. */
450df0d881dSJason Evans if (arenas_tdata == NULL) {
451df0d881dSJason Evans bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd);
452df0d881dSJason Evans narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1;
453df0d881dSJason Evans
454df0d881dSJason Evans if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) {
455df0d881dSJason Evans *arenas_tdata_bypassp = true;
456df0d881dSJason Evans arenas_tdata = (arena_tdata_t *)a0malloc(
457df0d881dSJason Evans sizeof(arena_tdata_t) * narenas_tdata);
458df0d881dSJason Evans *arenas_tdata_bypassp = false;
459df0d881dSJason Evans }
460df0d881dSJason Evans if (arenas_tdata == NULL) {
461df0d881dSJason Evans tdata = NULL;
462df0d881dSJason Evans goto label_return;
463df0d881dSJason Evans }
464df0d881dSJason Evans assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp);
465df0d881dSJason Evans tsd_arenas_tdata_set(tsd, arenas_tdata);
466df0d881dSJason Evans tsd_narenas_tdata_set(tsd, narenas_tdata);
467d0e79aa3SJason Evans }
468d0e79aa3SJason Evans
469d0e79aa3SJason Evans /*
470df0d881dSJason Evans * Copy to tdata array. It's possible that the actual number of arenas
471df0d881dSJason Evans * has increased since narenas_total_get() was called above, but that
472df0d881dSJason Evans * causes no correctness issues unless two threads concurrently execute
473b7eaed25SJason Evans * the arenas.create mallctl, which we trust mallctl synchronization to
474d0e79aa3SJason Evans * prevent.
475d0e79aa3SJason Evans */
476df0d881dSJason Evans
477df0d881dSJason Evans /* Copy/initialize tickers. */
478df0d881dSJason Evans for (i = 0; i < narenas_actual; i++) {
479df0d881dSJason Evans if (i < narenas_tdata_old) {
480df0d881dSJason Evans ticker_copy(&arenas_tdata[i].decay_ticker,
481df0d881dSJason Evans &arenas_tdata_old[i].decay_ticker);
482df0d881dSJason Evans } else {
483df0d881dSJason Evans ticker_init(&arenas_tdata[i].decay_ticker,
484df0d881dSJason Evans DECAY_NTICKS_PER_UPDATE);
485df0d881dSJason Evans }
486df0d881dSJason Evans }
487df0d881dSJason Evans if (narenas_tdata > narenas_actual) {
488df0d881dSJason Evans memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t)
489df0d881dSJason Evans * (narenas_tdata - narenas_actual));
490d0e79aa3SJason Evans }
491d0e79aa3SJason Evans
492df0d881dSJason Evans /* Read the refreshed tdata array. */
493df0d881dSJason Evans tdata = &arenas_tdata[ind];
494df0d881dSJason Evans label_return:
495b7eaed25SJason Evans if (arenas_tdata_old != NULL) {
496df0d881dSJason Evans a0dalloc(arenas_tdata_old);
497b7eaed25SJason Evans }
498b7eaed25SJason Evans return tdata;
499d0e79aa3SJason Evans }
500d0e79aa3SJason Evans
501d0e79aa3SJason Evans /* Slow path, called only by arena_choose(). */
502d0e79aa3SJason Evans arena_t *
arena_choose_hard(tsd_t * tsd,bool internal)503b7eaed25SJason Evans arena_choose_hard(tsd_t *tsd, bool internal) {
5041f0a49e8SJason Evans arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL);
505a4bd5210SJason Evans
506b7eaed25SJason Evans if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) {
507b7eaed25SJason Evans unsigned choose = percpu_arena_choose();
508b7eaed25SJason Evans ret = arena_get(tsd_tsdn(tsd), choose, true);
509b7eaed25SJason Evans assert(ret != NULL);
510b7eaed25SJason Evans arena_bind(tsd, arena_ind_get(ret), false);
511b7eaed25SJason Evans arena_bind(tsd, arena_ind_get(ret), true);
512b7eaed25SJason Evans
513b7eaed25SJason Evans return ret;
514b7eaed25SJason Evans }
515b7eaed25SJason Evans
51682872ac0SJason Evans if (narenas_auto > 1) {
5171f0a49e8SJason Evans unsigned i, j, choose[2], first_null;
518b7eaed25SJason Evans bool is_new_arena[2];
519a4bd5210SJason Evans
5201f0a49e8SJason Evans /*
5211f0a49e8SJason Evans * Determine binding for both non-internal and internal
5221f0a49e8SJason Evans * allocation.
5231f0a49e8SJason Evans *
5241f0a49e8SJason Evans * choose[0]: For application allocation.
5251f0a49e8SJason Evans * choose[1]: For internal metadata allocation.
5261f0a49e8SJason Evans */
5271f0a49e8SJason Evans
528b7eaed25SJason Evans for (j = 0; j < 2; j++) {
5291f0a49e8SJason Evans choose[j] = 0;
530b7eaed25SJason Evans is_new_arena[j] = false;
531b7eaed25SJason Evans }
5321f0a49e8SJason Evans
53382872ac0SJason Evans first_null = narenas_auto;
5341f0a49e8SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock);
5351f0a49e8SJason Evans assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL);
53682872ac0SJason Evans for (i = 1; i < narenas_auto; i++) {
5371f0a49e8SJason Evans if (arena_get(tsd_tsdn(tsd), i, false) != NULL) {
538a4bd5210SJason Evans /*
539a4bd5210SJason Evans * Choose the first arena that has the lowest
540a4bd5210SJason Evans * number of threads assigned to it.
541a4bd5210SJason Evans */
5421f0a49e8SJason Evans for (j = 0; j < 2; j++) {
5431f0a49e8SJason Evans if (arena_nthreads_get(arena_get(
5441f0a49e8SJason Evans tsd_tsdn(tsd), i, false), !!j) <
5451f0a49e8SJason Evans arena_nthreads_get(arena_get(
5461f0a49e8SJason Evans tsd_tsdn(tsd), choose[j], false),
547b7eaed25SJason Evans !!j)) {
5481f0a49e8SJason Evans choose[j] = i;
5491f0a49e8SJason Evans }
550b7eaed25SJason Evans }
55182872ac0SJason Evans } else if (first_null == narenas_auto) {
552a4bd5210SJason Evans /*
553a4bd5210SJason Evans * Record the index of the first uninitialized
554a4bd5210SJason Evans * arena, in case all extant arenas are in use.
555a4bd5210SJason Evans *
556a4bd5210SJason Evans * NB: It is possible for there to be
557a4bd5210SJason Evans * discontinuities in terms of initialized
558a4bd5210SJason Evans * versus uninitialized arenas, due to the
559a4bd5210SJason Evans * "thread.arena" mallctl.
560a4bd5210SJason Evans */
561a4bd5210SJason Evans first_null = i;
562a4bd5210SJason Evans }
563a4bd5210SJason Evans }
564a4bd5210SJason Evans
5651f0a49e8SJason Evans for (j = 0; j < 2; j++) {
5661f0a49e8SJason Evans if (arena_nthreads_get(arena_get(tsd_tsdn(tsd),
5671f0a49e8SJason Evans choose[j], false), !!j) == 0 || first_null ==
5681f0a49e8SJason Evans narenas_auto) {
569a4bd5210SJason Evans /*
5701f0a49e8SJason Evans * Use an unloaded arena, or the least loaded
5711f0a49e8SJason Evans * arena if all arenas are already initialized.
572a4bd5210SJason Evans */
5731f0a49e8SJason Evans if (!!j == internal) {
5741f0a49e8SJason Evans ret = arena_get(tsd_tsdn(tsd),
5751f0a49e8SJason Evans choose[j], false);
5761f0a49e8SJason Evans }
577a4bd5210SJason Evans } else {
5781f0a49e8SJason Evans arena_t *arena;
5791f0a49e8SJason Evans
580a4bd5210SJason Evans /* Initialize a new arena. */
5811f0a49e8SJason Evans choose[j] = first_null;
5821f0a49e8SJason Evans arena = arena_init_locked(tsd_tsdn(tsd),
583b7eaed25SJason Evans choose[j],
584b7eaed25SJason Evans (extent_hooks_t *)&extent_hooks_default);
5851f0a49e8SJason Evans if (arena == NULL) {
5861f0a49e8SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd),
5871f0a49e8SJason Evans &arenas_lock);
588b7eaed25SJason Evans return NULL;
589a4bd5210SJason Evans }
590b7eaed25SJason Evans is_new_arena[j] = true;
591b7eaed25SJason Evans if (!!j == internal) {
5921f0a49e8SJason Evans ret = arena;
593d0e79aa3SJason Evans }
594b7eaed25SJason Evans }
5951f0a49e8SJason Evans arena_bind(tsd, choose[j], !!j);
5961f0a49e8SJason Evans }
5971f0a49e8SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock);
598b7eaed25SJason Evans
599b7eaed25SJason Evans for (j = 0; j < 2; j++) {
600b7eaed25SJason Evans if (is_new_arena[j]) {
601b7eaed25SJason Evans assert(choose[j] > 0);
602b7eaed25SJason Evans arena_new_create_background_thread(
603b7eaed25SJason Evans tsd_tsdn(tsd), choose[j]);
604b7eaed25SJason Evans }
605b7eaed25SJason Evans }
606b7eaed25SJason Evans
607a4bd5210SJason Evans } else {
6081f0a49e8SJason Evans ret = arena_get(tsd_tsdn(tsd), 0, false);
6091f0a49e8SJason Evans arena_bind(tsd, 0, false);
6101f0a49e8SJason Evans arena_bind(tsd, 0, true);
611a4bd5210SJason Evans }
612a4bd5210SJason Evans
613b7eaed25SJason Evans return ret;
614a4bd5210SJason Evans }
615a4bd5210SJason Evans
616d0e79aa3SJason Evans void
iarena_cleanup(tsd_t * tsd)617b7eaed25SJason Evans iarena_cleanup(tsd_t *tsd) {
6181f0a49e8SJason Evans arena_t *iarena;
6191f0a49e8SJason Evans
6201f0a49e8SJason Evans iarena = tsd_iarena_get(tsd);
621b7eaed25SJason Evans if (iarena != NULL) {
622b7eaed25SJason Evans arena_unbind(tsd, arena_ind_get(iarena), true);
623b7eaed25SJason Evans }
6241f0a49e8SJason Evans }
6251f0a49e8SJason Evans
6261f0a49e8SJason Evans void
arena_cleanup(tsd_t * tsd)627b7eaed25SJason Evans arena_cleanup(tsd_t *tsd) {
628d0e79aa3SJason Evans arena_t *arena;
629d0e79aa3SJason Evans
630d0e79aa3SJason Evans arena = tsd_arena_get(tsd);
631b7eaed25SJason Evans if (arena != NULL) {
632b7eaed25SJason Evans arena_unbind(tsd, arena_ind_get(arena), false);
633b7eaed25SJason Evans }
634d0e79aa3SJason Evans }
635d0e79aa3SJason Evans
636d0e79aa3SJason Evans void
arenas_tdata_cleanup(tsd_t * tsd)637b7eaed25SJason Evans arenas_tdata_cleanup(tsd_t *tsd) {
638df0d881dSJason Evans arena_tdata_t *arenas_tdata;
639d0e79aa3SJason Evans
640df0d881dSJason Evans /* Prevent tsd->arenas_tdata from being (re)created. */
641df0d881dSJason Evans *tsd_arenas_tdata_bypassp_get(tsd) = true;
642df0d881dSJason Evans
643df0d881dSJason Evans arenas_tdata = tsd_arenas_tdata_get(tsd);
644df0d881dSJason Evans if (arenas_tdata != NULL) {
645df0d881dSJason Evans tsd_arenas_tdata_set(tsd, NULL);
646df0d881dSJason Evans a0dalloc(arenas_tdata);
647d0e79aa3SJason Evans }
648536b3538SJason Evans }
649d0e79aa3SJason Evans
650a4bd5210SJason Evans static void
stats_print_atexit(void)651b7eaed25SJason Evans stats_print_atexit(void) {
652b7eaed25SJason Evans if (config_stats) {
6531f0a49e8SJason Evans tsdn_t *tsdn;
65482872ac0SJason Evans unsigned narenas, i;
655a4bd5210SJason Evans
6561f0a49e8SJason Evans tsdn = tsdn_fetch();
6571f0a49e8SJason Evans
658a4bd5210SJason Evans /*
659a4bd5210SJason Evans * Merge stats from extant threads. This is racy, since
660a4bd5210SJason Evans * individual threads do not lock when recording tcache stats
661a4bd5210SJason Evans * events. As a consequence, the final stats may be slightly
662a4bd5210SJason Evans * out of date by the time they are reported, if other threads
663a4bd5210SJason Evans * continue to allocate.
664a4bd5210SJason Evans */
66582872ac0SJason Evans for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
6661f0a49e8SJason Evans arena_t *arena = arena_get(tsdn, i, false);
667a4bd5210SJason Evans if (arena != NULL) {
668a4bd5210SJason Evans tcache_t *tcache;
669a4bd5210SJason Evans
670b7eaed25SJason Evans malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
671a4bd5210SJason Evans ql_foreach(tcache, &arena->tcache_ql, link) {
6721f0a49e8SJason Evans tcache_stats_merge(tsdn, tcache, arena);
673a4bd5210SJason Evans }
674b7eaed25SJason Evans malloc_mutex_unlock(tsdn,
675b7eaed25SJason Evans &arena->tcache_ql_mtx);
676a4bd5210SJason Evans }
677a4bd5210SJason Evans }
678a4bd5210SJason Evans }
679b7eaed25SJason Evans je_malloc_stats_print(NULL, NULL, opt_stats_print_opts);
680b7eaed25SJason Evans }
681b7eaed25SJason Evans
682b7eaed25SJason Evans /*
683b7eaed25SJason Evans * Ensure that we don't hold any locks upon entry to or exit from allocator
684b7eaed25SJason Evans * code (in a "broad" sense that doesn't count a reentrant allocation as an
685b7eaed25SJason Evans * entrance or exit).
686b7eaed25SJason Evans */
687b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE void
check_entry_exit_locking(tsdn_t * tsdn)688b7eaed25SJason Evans check_entry_exit_locking(tsdn_t *tsdn) {
689b7eaed25SJason Evans if (!config_debug) {
690b7eaed25SJason Evans return;
691b7eaed25SJason Evans }
692b7eaed25SJason Evans if (tsdn_null(tsdn)) {
693b7eaed25SJason Evans return;
694b7eaed25SJason Evans }
695b7eaed25SJason Evans tsd_t *tsd = tsdn_tsd(tsdn);
696b7eaed25SJason Evans /*
697b7eaed25SJason Evans * It's possible we hold locks at entry/exit if we're in a nested
698b7eaed25SJason Evans * allocation.
699b7eaed25SJason Evans */
700b7eaed25SJason Evans int8_t reentrancy_level = tsd_reentrancy_level_get(tsd);
701b7eaed25SJason Evans if (reentrancy_level != 0) {
702b7eaed25SJason Evans return;
703b7eaed25SJason Evans }
704b7eaed25SJason Evans witness_assert_lockless(tsdn_witness_tsdp_get(tsdn));
705a4bd5210SJason Evans }
706a4bd5210SJason Evans
707a4bd5210SJason Evans /*
708a4bd5210SJason Evans * End miscellaneous support functions.
709a4bd5210SJason Evans */
710a4bd5210SJason Evans /******************************************************************************/
711a4bd5210SJason Evans /*
712a4bd5210SJason Evans * Begin initialization functions.
713a4bd5210SJason Evans */
714a4bd5210SJason Evans
715d0e79aa3SJason Evans static char *
jemalloc_secure_getenv(const char * name)716b7eaed25SJason Evans jemalloc_secure_getenv(const char *name) {
7178244f2aaSJason Evans #ifdef JEMALLOC_HAVE_SECURE_GETENV
7188244f2aaSJason Evans return secure_getenv(name);
7198244f2aaSJason Evans #else
720d0e79aa3SJason Evans # ifdef JEMALLOC_HAVE_ISSETUGID
721b7eaed25SJason Evans if (issetugid() != 0) {
722b7eaed25SJason Evans return NULL;
723b7eaed25SJason Evans }
724d0e79aa3SJason Evans # endif
725b7eaed25SJason Evans return getenv(name);
726d0e79aa3SJason Evans #endif
7278244f2aaSJason Evans }
728d0e79aa3SJason Evans
729a4bd5210SJason Evans static unsigned
malloc_ncpus(void)730b7eaed25SJason Evans malloc_ncpus(void) {
731a4bd5210SJason Evans long result;
732a4bd5210SJason Evans
733e722f8f8SJason Evans #ifdef _WIN32
734e722f8f8SJason Evans SYSTEM_INFO si;
735e722f8f8SJason Evans GetSystemInfo(&si);
736e722f8f8SJason Evans result = si.dwNumberOfProcessors;
737bde95144SJason Evans #elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT)
738bde95144SJason Evans /*
739bde95144SJason Evans * glibc >= 2.6 has the CPU_COUNT macro.
740bde95144SJason Evans *
741bde95144SJason Evans * glibc's sysconf() uses isspace(). glibc allocates for the first time
742bde95144SJason Evans * *before* setting up the isspace tables. Therefore we need a
743bde95144SJason Evans * different method to get the number of CPUs.
744bde95144SJason Evans */
745bde95144SJason Evans {
746bde95144SJason Evans cpu_set_t set;
747bde95144SJason Evans
748bde95144SJason Evans pthread_getaffinity_np(pthread_self(), sizeof(set), &set);
749bde95144SJason Evans result = CPU_COUNT(&set);
750bde95144SJason Evans }
751e722f8f8SJason Evans #else
752a4bd5210SJason Evans result = sysconf(_SC_NPROCESSORS_ONLN);
75382872ac0SJason Evans #endif
754f921d10fSJason Evans return ((result == -1) ? 1 : (unsigned)result);
755a4bd5210SJason Evans }
756a4bd5210SJason Evans
757b7eaed25SJason Evans static void
init_opt_stats_print_opts(const char * v,size_t vlen)758b7eaed25SJason Evans init_opt_stats_print_opts(const char *v, size_t vlen) {
759b7eaed25SJason Evans size_t opts_len = strlen(opt_stats_print_opts);
760b7eaed25SJason Evans assert(opts_len <= stats_print_tot_num_options);
761b7eaed25SJason Evans
762b7eaed25SJason Evans for (size_t i = 0; i < vlen; i++) {
763b7eaed25SJason Evans switch (v[i]) {
764b7eaed25SJason Evans #define OPTION(o, v, d, s) case o: break;
765b7eaed25SJason Evans STATS_PRINT_OPTIONS
766b7eaed25SJason Evans #undef OPTION
767b7eaed25SJason Evans default: continue;
768b7eaed25SJason Evans }
769b7eaed25SJason Evans
770b7eaed25SJason Evans if (strchr(opt_stats_print_opts, v[i]) != NULL) {
771b7eaed25SJason Evans /* Ignore repeated. */
772b7eaed25SJason Evans continue;
773b7eaed25SJason Evans }
774b7eaed25SJason Evans
775b7eaed25SJason Evans opt_stats_print_opts[opts_len++] = v[i];
776b7eaed25SJason Evans opt_stats_print_opts[opts_len] = '\0';
777b7eaed25SJason Evans assert(opts_len <= stats_print_tot_num_options);
778b7eaed25SJason Evans }
779b7eaed25SJason Evans assert(opts_len == strlen(opt_stats_print_opts));
780b7eaed25SJason Evans }
781b7eaed25SJason Evans
782*c5ad8142SEric van Gyzen /* Reads the next size pair in a multi-sized option. */
783*c5ad8142SEric van Gyzen static bool
malloc_conf_multi_sizes_next(const char ** slab_size_segment_cur,size_t * vlen_left,size_t * slab_start,size_t * slab_end,size_t * new_size)784*c5ad8142SEric van Gyzen malloc_conf_multi_sizes_next(const char **slab_size_segment_cur,
785*c5ad8142SEric van Gyzen size_t *vlen_left, size_t *slab_start, size_t *slab_end, size_t *new_size) {
786*c5ad8142SEric van Gyzen const char *cur = *slab_size_segment_cur;
787*c5ad8142SEric van Gyzen char *end;
788*c5ad8142SEric van Gyzen uintmax_t um;
789*c5ad8142SEric van Gyzen
790*c5ad8142SEric van Gyzen set_errno(0);
791*c5ad8142SEric van Gyzen
792*c5ad8142SEric van Gyzen /* First number, then '-' */
793*c5ad8142SEric van Gyzen um = malloc_strtoumax(cur, &end, 0);
794*c5ad8142SEric van Gyzen if (get_errno() != 0 || *end != '-') {
795*c5ad8142SEric van Gyzen return true;
796*c5ad8142SEric van Gyzen }
797*c5ad8142SEric van Gyzen *slab_start = (size_t)um;
798*c5ad8142SEric van Gyzen cur = end + 1;
799*c5ad8142SEric van Gyzen
800*c5ad8142SEric van Gyzen /* Second number, then ':' */
801*c5ad8142SEric van Gyzen um = malloc_strtoumax(cur, &end, 0);
802*c5ad8142SEric van Gyzen if (get_errno() != 0 || *end != ':') {
803*c5ad8142SEric van Gyzen return true;
804*c5ad8142SEric van Gyzen }
805*c5ad8142SEric van Gyzen *slab_end = (size_t)um;
806*c5ad8142SEric van Gyzen cur = end + 1;
807*c5ad8142SEric van Gyzen
808*c5ad8142SEric van Gyzen /* Last number */
809*c5ad8142SEric van Gyzen um = malloc_strtoumax(cur, &end, 0);
810*c5ad8142SEric van Gyzen if (get_errno() != 0) {
811*c5ad8142SEric van Gyzen return true;
812*c5ad8142SEric van Gyzen }
813*c5ad8142SEric van Gyzen *new_size = (size_t)um;
814*c5ad8142SEric van Gyzen
815*c5ad8142SEric van Gyzen /* Consume the separator if there is one. */
816*c5ad8142SEric van Gyzen if (*end == '|') {
817*c5ad8142SEric van Gyzen end++;
818*c5ad8142SEric van Gyzen }
819*c5ad8142SEric van Gyzen
820*c5ad8142SEric van Gyzen *vlen_left -= end - *slab_size_segment_cur;
821*c5ad8142SEric van Gyzen *slab_size_segment_cur = end;
822*c5ad8142SEric van Gyzen
823*c5ad8142SEric van Gyzen return false;
824*c5ad8142SEric van Gyzen }
825*c5ad8142SEric van Gyzen
826a4bd5210SJason Evans static bool
malloc_conf_next(char const ** opts_p,char const ** k_p,size_t * klen_p,char const ** v_p,size_t * vlen_p)827a4bd5210SJason Evans malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p,
828b7eaed25SJason Evans char const **v_p, size_t *vlen_p) {
829a4bd5210SJason Evans bool accept;
830a4bd5210SJason Evans const char *opts = *opts_p;
831a4bd5210SJason Evans
832a4bd5210SJason Evans *k_p = opts;
833a4bd5210SJason Evans
834d0e79aa3SJason Evans for (accept = false; !accept;) {
835a4bd5210SJason Evans switch (*opts) {
836a4bd5210SJason Evans case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
837a4bd5210SJason Evans case 'G': case 'H': case 'I': case 'J': case 'K': case 'L':
838a4bd5210SJason Evans case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R':
839a4bd5210SJason Evans case 'S': case 'T': case 'U': case 'V': case 'W': case 'X':
840a4bd5210SJason Evans case 'Y': case 'Z':
841a4bd5210SJason Evans case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
842a4bd5210SJason Evans case 'g': case 'h': case 'i': case 'j': case 'k': case 'l':
843a4bd5210SJason Evans case 'm': case 'n': case 'o': case 'p': case 'q': case 'r':
844a4bd5210SJason Evans case 's': case 't': case 'u': case 'v': case 'w': case 'x':
845a4bd5210SJason Evans case 'y': case 'z':
846a4bd5210SJason Evans case '0': case '1': case '2': case '3': case '4': case '5':
847a4bd5210SJason Evans case '6': case '7': case '8': case '9':
848a4bd5210SJason Evans case '_':
849a4bd5210SJason Evans opts++;
850a4bd5210SJason Evans break;
851a4bd5210SJason Evans case ':':
852a4bd5210SJason Evans opts++;
853a4bd5210SJason Evans *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p;
854a4bd5210SJason Evans *v_p = opts;
855a4bd5210SJason Evans accept = true;
856a4bd5210SJason Evans break;
857a4bd5210SJason Evans case '\0':
858a4bd5210SJason Evans if (opts != *opts_p) {
859a4bd5210SJason Evans malloc_write("<jemalloc>: Conf string ends "
860a4bd5210SJason Evans "with key\n");
861a4bd5210SJason Evans }
862b7eaed25SJason Evans return true;
863a4bd5210SJason Evans default:
864a4bd5210SJason Evans malloc_write("<jemalloc>: Malformed conf string\n");
865b7eaed25SJason Evans return true;
866a4bd5210SJason Evans }
867a4bd5210SJason Evans }
868a4bd5210SJason Evans
869d0e79aa3SJason Evans for (accept = false; !accept;) {
870a4bd5210SJason Evans switch (*opts) {
871a4bd5210SJason Evans case ',':
872a4bd5210SJason Evans opts++;
873a4bd5210SJason Evans /*
874a4bd5210SJason Evans * Look ahead one character here, because the next time
875a4bd5210SJason Evans * this function is called, it will assume that end of
876a4bd5210SJason Evans * input has been cleanly reached if no input remains,
877a4bd5210SJason Evans * but we have optimistically already consumed the
878a4bd5210SJason Evans * comma if one exists.
879a4bd5210SJason Evans */
880a4bd5210SJason Evans if (*opts == '\0') {
881a4bd5210SJason Evans malloc_write("<jemalloc>: Conf string ends "
882a4bd5210SJason Evans "with comma\n");
883a4bd5210SJason Evans }
884a4bd5210SJason Evans *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p;
885a4bd5210SJason Evans accept = true;
886a4bd5210SJason Evans break;
887a4bd5210SJason Evans case '\0':
888a4bd5210SJason Evans *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p;
889a4bd5210SJason Evans accept = true;
890a4bd5210SJason Evans break;
891a4bd5210SJason Evans default:
892a4bd5210SJason Evans opts++;
893a4bd5210SJason Evans break;
894a4bd5210SJason Evans }
895a4bd5210SJason Evans }
896a4bd5210SJason Evans
897a4bd5210SJason Evans *opts_p = opts;
898b7eaed25SJason Evans return false;
899b7eaed25SJason Evans }
900b7eaed25SJason Evans
901b7eaed25SJason Evans static void
malloc_abort_invalid_conf(void)902b7eaed25SJason Evans malloc_abort_invalid_conf(void) {
903b7eaed25SJason Evans assert(opt_abort_conf);
904b7eaed25SJason Evans malloc_printf("<jemalloc>: Abort (abort_conf:true) on invalid conf "
905b7eaed25SJason Evans "value (see above).\n");
906b7eaed25SJason Evans abort();
907a4bd5210SJason Evans }
908a4bd5210SJason Evans
909a4bd5210SJason Evans static void
malloc_conf_error(const char * msg,const char * k,size_t klen,const char * v,size_t vlen)910a4bd5210SJason Evans malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v,
911b7eaed25SJason Evans size_t vlen) {
912a4bd5210SJason Evans malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k,
913a4bd5210SJason Evans (int)vlen, v);
9140ef50b4eSJason Evans /* If abort_conf is set, error out after processing all options. */
915*c5ad8142SEric van Gyzen const char *experimental = "experimental_";
916*c5ad8142SEric van Gyzen if (strncmp(k, experimental, strlen(experimental)) == 0) {
917*c5ad8142SEric van Gyzen /* However, tolerate experimental features. */
918*c5ad8142SEric van Gyzen return;
919*c5ad8142SEric van Gyzen }
920b7eaed25SJason Evans had_conf_error = true;
921a4bd5210SJason Evans }
922a4bd5210SJason Evans
923a4bd5210SJason Evans static void
malloc_slow_flag_init(void)924b7eaed25SJason Evans malloc_slow_flag_init(void) {
925df0d881dSJason Evans /*
926df0d881dSJason Evans * Combine the runtime options into malloc_slow for fast path. Called
927df0d881dSJason Evans * after processing all the options.
928df0d881dSJason Evans */
929df0d881dSJason Evans malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0)
930df0d881dSJason Evans | (opt_junk_free ? flag_opt_junk_free : 0)
931df0d881dSJason Evans | (opt_zero ? flag_opt_zero : 0)
932df0d881dSJason Evans | (opt_utrace ? flag_opt_utrace : 0)
933df0d881dSJason Evans | (opt_xmalloc ? flag_opt_xmalloc : 0);
934df0d881dSJason Evans
935df0d881dSJason Evans malloc_slow = (malloc_slow_flags != 0);
936df0d881dSJason Evans }
937df0d881dSJason Evans
938*c5ad8142SEric van Gyzen /* Number of sources for initializing malloc_conf */
939*c5ad8142SEric van Gyzen #define MALLOC_CONF_NSOURCES 4
940a4bd5210SJason Evans
941*c5ad8142SEric van Gyzen static const char *
obtain_malloc_conf(unsigned which_source,char buf[PATH_MAX+1])942*c5ad8142SEric van Gyzen obtain_malloc_conf(unsigned which_source, char buf[PATH_MAX + 1]) {
943*c5ad8142SEric van Gyzen if (config_debug) {
944*c5ad8142SEric van Gyzen static unsigned read_source = 0;
945*c5ad8142SEric van Gyzen /*
946*c5ad8142SEric van Gyzen * Each source should only be read once, to minimize # of
947*c5ad8142SEric van Gyzen * syscalls on init.
948*c5ad8142SEric van Gyzen */
949*c5ad8142SEric van Gyzen assert(read_source++ == which_source);
950*c5ad8142SEric van Gyzen }
951*c5ad8142SEric van Gyzen assert(which_source < MALLOC_CONF_NSOURCES);
952*c5ad8142SEric van Gyzen
953*c5ad8142SEric van Gyzen const char *ret;
954*c5ad8142SEric van Gyzen switch (which_source) {
955a4bd5210SJason Evans case 0:
956*c5ad8142SEric van Gyzen ret = config_malloc_conf;
957df0d881dSJason Evans break;
958df0d881dSJason Evans case 1:
959a4bd5210SJason Evans if (je_malloc_conf != NULL) {
960*c5ad8142SEric van Gyzen /* Use options that were compiled into the program. */
961*c5ad8142SEric van Gyzen ret = je_malloc_conf;
962a4bd5210SJason Evans } else {
963a4bd5210SJason Evans /* No configuration specified. */
964*c5ad8142SEric van Gyzen ret = NULL;
965a4bd5210SJason Evans }
966a4bd5210SJason Evans break;
967df0d881dSJason Evans case 2: {
968df0d881dSJason Evans ssize_t linklen = 0;
969e722f8f8SJason Evans #ifndef _WIN32
9702b06b201SJason Evans int saved_errno = errno;
971a4bd5210SJason Evans const char *linkname =
972a4bd5210SJason Evans # ifdef JEMALLOC_PREFIX
973a4bd5210SJason Evans "/etc/"JEMALLOC_PREFIX"malloc.conf"
974a4bd5210SJason Evans # else
975a4bd5210SJason Evans "/etc/malloc.conf"
976a4bd5210SJason Evans # endif
977a4bd5210SJason Evans ;
978a4bd5210SJason Evans
979a4bd5210SJason Evans /*
980*c5ad8142SEric van Gyzen * Try to use the contents of the "/etc/malloc.conf" symbolic
981*c5ad8142SEric van Gyzen * link's name.
982a4bd5210SJason Evans */
983*c5ad8142SEric van Gyzen #ifndef JEMALLOC_READLINKAT
984*c5ad8142SEric van Gyzen linklen = readlink(linkname, buf, PATH_MAX);
985*c5ad8142SEric van Gyzen #else
986*c5ad8142SEric van Gyzen linklen = readlinkat(AT_FDCWD, linkname, buf, PATH_MAX);
987*c5ad8142SEric van Gyzen #endif
9882b06b201SJason Evans if (linklen == -1) {
9892b06b201SJason Evans /* No configuration specified. */
9902b06b201SJason Evans linklen = 0;
991d0e79aa3SJason Evans /* Restore errno. */
9922b06b201SJason Evans set_errno(saved_errno);
9932b06b201SJason Evans }
9942b06b201SJason Evans #endif
995a4bd5210SJason Evans buf[linklen] = '\0';
996*c5ad8142SEric van Gyzen ret = buf;
997a4bd5210SJason Evans break;
998df0d881dSJason Evans } case 3: {
999a4bd5210SJason Evans const char *envname =
1000a4bd5210SJason Evans #ifdef JEMALLOC_PREFIX
1001a4bd5210SJason Evans JEMALLOC_CPREFIX"MALLOC_CONF"
1002a4bd5210SJason Evans #else
1003a4bd5210SJason Evans "MALLOC_CONF"
1004a4bd5210SJason Evans #endif
1005a4bd5210SJason Evans ;
1006a4bd5210SJason Evans
1007*c5ad8142SEric van Gyzen if ((ret = jemalloc_secure_getenv(envname)) != NULL) {
1008a4bd5210SJason Evans /*
1009*c5ad8142SEric van Gyzen * Do nothing; opts is already initialized to the value
1010*c5ad8142SEric van Gyzen * of the MALLOC_CONF environment variable.
1011a4bd5210SJason Evans */
1012a4bd5210SJason Evans } else {
1013a4bd5210SJason Evans /* No configuration specified. */
1014*c5ad8142SEric van Gyzen ret = NULL;
1015a4bd5210SJason Evans }
1016a4bd5210SJason Evans break;
1017a4bd5210SJason Evans } default:
1018f921d10fSJason Evans not_reached();
1019*c5ad8142SEric van Gyzen ret = NULL;
1020*c5ad8142SEric van Gyzen }
1021*c5ad8142SEric van Gyzen return ret;
1022*c5ad8142SEric van Gyzen }
1023*c5ad8142SEric van Gyzen
1024*c5ad8142SEric van Gyzen static void
malloc_conf_init_helper(sc_data_t * sc_data,unsigned bin_shard_sizes[SC_NBINS],bool initial_call,const char * opts_cache[MALLOC_CONF_NSOURCES],char buf[PATH_MAX+1])1025*c5ad8142SEric van Gyzen malloc_conf_init_helper(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS],
1026*c5ad8142SEric van Gyzen bool initial_call, const char *opts_cache[MALLOC_CONF_NSOURCES],
1027*c5ad8142SEric van Gyzen char buf[PATH_MAX + 1]) {
1028*c5ad8142SEric van Gyzen static const char *opts_explain[MALLOC_CONF_NSOURCES] = {
1029*c5ad8142SEric van Gyzen "string specified via --with-malloc-conf",
1030*c5ad8142SEric van Gyzen "string pointed to by the global variable malloc_conf",
1031*c5ad8142SEric van Gyzen "\"name\" of the file referenced by the symbolic link named "
1032*c5ad8142SEric van Gyzen "/etc/malloc.conf",
1033*c5ad8142SEric van Gyzen "value of the environment variable MALLOC_CONF"
1034*c5ad8142SEric van Gyzen };
1035*c5ad8142SEric van Gyzen unsigned i;
1036*c5ad8142SEric van Gyzen const char *opts, *k, *v;
1037*c5ad8142SEric van Gyzen size_t klen, vlen;
1038*c5ad8142SEric van Gyzen
1039*c5ad8142SEric van Gyzen for (i = 0; i < MALLOC_CONF_NSOURCES; i++) {
1040*c5ad8142SEric van Gyzen /* Get runtime configuration. */
1041*c5ad8142SEric van Gyzen if (initial_call) {
1042*c5ad8142SEric van Gyzen opts_cache[i] = obtain_malloc_conf(i, buf);
1043*c5ad8142SEric van Gyzen }
1044*c5ad8142SEric van Gyzen opts = opts_cache[i];
1045*c5ad8142SEric van Gyzen if (!initial_call && opt_confirm_conf) {
1046*c5ad8142SEric van Gyzen malloc_printf(
1047*c5ad8142SEric van Gyzen "<jemalloc>: malloc_conf #%u (%s): \"%s\"\n",
1048*c5ad8142SEric van Gyzen i + 1, opts_explain[i], opts != NULL ? opts : "");
1049*c5ad8142SEric van Gyzen }
1050*c5ad8142SEric van Gyzen if (opts == NULL) {
1051*c5ad8142SEric van Gyzen continue;
1052a4bd5210SJason Evans }
1053a4bd5210SJason Evans
1054d0e79aa3SJason Evans while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v,
1055d0e79aa3SJason Evans &vlen)) {
1056*c5ad8142SEric van Gyzen
1057*c5ad8142SEric van Gyzen #define CONF_ERROR(msg, k, klen, v, vlen) \
1058*c5ad8142SEric van Gyzen if (!initial_call) { \
1059*c5ad8142SEric van Gyzen malloc_conf_error( \
1060*c5ad8142SEric van Gyzen msg, k, klen, v, vlen); \
1061*c5ad8142SEric van Gyzen cur_opt_valid = false; \
1062*c5ad8142SEric van Gyzen }
1063*c5ad8142SEric van Gyzen #define CONF_CONTINUE { \
1064*c5ad8142SEric van Gyzen if (!initial_call && opt_confirm_conf \
1065*c5ad8142SEric van Gyzen && cur_opt_valid) { \
1066*c5ad8142SEric van Gyzen malloc_printf("<jemalloc>: -- " \
1067*c5ad8142SEric van Gyzen "Set conf value: %.*s:%.*s" \
1068*c5ad8142SEric van Gyzen "\n", (int)klen, k, \
1069*c5ad8142SEric van Gyzen (int)vlen, v); \
1070*c5ad8142SEric van Gyzen } \
1071*c5ad8142SEric van Gyzen continue; \
1072*c5ad8142SEric van Gyzen }
1073d0e79aa3SJason Evans #define CONF_MATCH(n) \
1074d0e79aa3SJason Evans (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0)
1075d0e79aa3SJason Evans #define CONF_MATCH_VALUE(n) \
1076d0e79aa3SJason Evans (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0)
1077b7eaed25SJason Evans #define CONF_HANDLE_BOOL(o, n) \
1078d0e79aa3SJason Evans if (CONF_MATCH(n)) { \
1079b7eaed25SJason Evans if (CONF_MATCH_VALUE("true")) { \
1080a4bd5210SJason Evans o = true; \
1081b7eaed25SJason Evans } else if (CONF_MATCH_VALUE("false")) { \
1082a4bd5210SJason Evans o = false; \
1083b7eaed25SJason Evans } else { \
1084*c5ad8142SEric van Gyzen CONF_ERROR("Invalid conf value",\
1085a4bd5210SJason Evans k, klen, v, vlen); \
1086a4bd5210SJason Evans } \
1087*c5ad8142SEric van Gyzen CONF_CONTINUE; \
1088a4bd5210SJason Evans }
1089*c5ad8142SEric van Gyzen /*
1090*c5ad8142SEric van Gyzen * One of the CONF_MIN macros below expands, in one of the use points,
1091*c5ad8142SEric van Gyzen * to "unsigned integer < 0", which is always false, triggering the
1092*c5ad8142SEric van Gyzen * GCC -Wtype-limits warning, which we disable here and re-enable below.
1093*c5ad8142SEric van Gyzen */
1094*c5ad8142SEric van Gyzen JEMALLOC_DIAGNOSTIC_PUSH
1095*c5ad8142SEric van Gyzen JEMALLOC_DIAGNOSTIC_IGNORE_TYPE_LIMITS
1096*c5ad8142SEric van Gyzen
1097*c5ad8142SEric van Gyzen #define CONF_DONT_CHECK_MIN(um, min) false
1098*c5ad8142SEric van Gyzen #define CONF_CHECK_MIN(um, min) ((um) < (min))
1099*c5ad8142SEric van Gyzen #define CONF_DONT_CHECK_MAX(um, max) false
1100*c5ad8142SEric van Gyzen #define CONF_CHECK_MAX(um, max) ((um) > (max))
11017fa7f12fSJason Evans #define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \
1102d0e79aa3SJason Evans if (CONF_MATCH(n)) { \
1103a4bd5210SJason Evans uintmax_t um; \
1104a4bd5210SJason Evans char *end; \
1105a4bd5210SJason Evans \
1106e722f8f8SJason Evans set_errno(0); \
1107a4bd5210SJason Evans um = malloc_strtoumax(v, &end, 0); \
1108e722f8f8SJason Evans if (get_errno() != 0 || (uintptr_t)end -\
1109a4bd5210SJason Evans (uintptr_t)v != vlen) { \
1110*c5ad8142SEric van Gyzen CONF_ERROR("Invalid conf value",\
1111a4bd5210SJason Evans k, klen, v, vlen); \
111288ad2f8dSJason Evans } else if (clip) { \
1113*c5ad8142SEric van Gyzen if (check_min(um, (t)(min))) { \
1114df0d881dSJason Evans o = (t)(min); \
1115b7eaed25SJason Evans } else if ( \
1116*c5ad8142SEric van Gyzen check_max(um, (t)(max))) { \
1117df0d881dSJason Evans o = (t)(max); \
1118b7eaed25SJason Evans } else { \
1119df0d881dSJason Evans o = (t)um; \
1120b7eaed25SJason Evans } \
112188ad2f8dSJason Evans } else { \
1122*c5ad8142SEric van Gyzen if (check_min(um, (t)(min)) || \
1123*c5ad8142SEric van Gyzen check_max(um, (t)(max))) { \
1124*c5ad8142SEric van Gyzen CONF_ERROR( \
112588ad2f8dSJason Evans "Out-of-range " \
112688ad2f8dSJason Evans "conf value", \
1127a4bd5210SJason Evans k, klen, v, vlen); \
1128b7eaed25SJason Evans } else { \
1129df0d881dSJason Evans o = (t)um; \
113088ad2f8dSJason Evans } \
1131b7eaed25SJason Evans } \
1132*c5ad8142SEric van Gyzen CONF_CONTINUE; \
1133a4bd5210SJason Evans }
11347fa7f12fSJason Evans #define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \
11357fa7f12fSJason Evans clip) \
11367fa7f12fSJason Evans CONF_HANDLE_T_U(unsigned, o, n, min, max, \
11377fa7f12fSJason Evans check_min, check_max, clip)
11387fa7f12fSJason Evans #define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \
11397fa7f12fSJason Evans CONF_HANDLE_T_U(size_t, o, n, min, max, \
11407fa7f12fSJason Evans check_min, check_max, clip)
1141a4bd5210SJason Evans #define CONF_HANDLE_SSIZE_T(o, n, min, max) \
1142d0e79aa3SJason Evans if (CONF_MATCH(n)) { \
1143a4bd5210SJason Evans long l; \
1144a4bd5210SJason Evans char *end; \
1145a4bd5210SJason Evans \
1146e722f8f8SJason Evans set_errno(0); \
1147a4bd5210SJason Evans l = strtol(v, &end, 0); \
1148e722f8f8SJason Evans if (get_errno() != 0 || (uintptr_t)end -\
1149a4bd5210SJason Evans (uintptr_t)v != vlen) { \
1150*c5ad8142SEric van Gyzen CONF_ERROR("Invalid conf value",\
1151a4bd5210SJason Evans k, klen, v, vlen); \
1152d0e79aa3SJason Evans } else if (l < (ssize_t)(min) || l > \
1153d0e79aa3SJason Evans (ssize_t)(max)) { \
1154*c5ad8142SEric van Gyzen CONF_ERROR( \
1155a4bd5210SJason Evans "Out-of-range conf value", \
1156a4bd5210SJason Evans k, klen, v, vlen); \
1157b7eaed25SJason Evans } else { \
1158a4bd5210SJason Evans o = l; \
1159b7eaed25SJason Evans } \
1160*c5ad8142SEric van Gyzen CONF_CONTINUE; \
1161a4bd5210SJason Evans }
1162a4bd5210SJason Evans #define CONF_HANDLE_CHAR_P(o, n, d) \
1163d0e79aa3SJason Evans if (CONF_MATCH(n)) { \
1164a4bd5210SJason Evans size_t cpylen = (vlen <= \
1165a4bd5210SJason Evans sizeof(o)-1) ? vlen : \
1166a4bd5210SJason Evans sizeof(o)-1; \
1167a4bd5210SJason Evans strncpy(o, v, cpylen); \
1168a4bd5210SJason Evans o[cpylen] = '\0'; \
1169*c5ad8142SEric van Gyzen CONF_CONTINUE; \
1170*c5ad8142SEric van Gyzen }
1171*c5ad8142SEric van Gyzen
1172*c5ad8142SEric van Gyzen bool cur_opt_valid = true;
1173*c5ad8142SEric van Gyzen
1174*c5ad8142SEric van Gyzen CONF_HANDLE_BOOL(opt_confirm_conf, "confirm_conf")
1175*c5ad8142SEric van Gyzen if (initial_call) {
1176*c5ad8142SEric van Gyzen continue;
1177a4bd5210SJason Evans }
1178a4bd5210SJason Evans
1179b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_abort, "abort")
1180b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf")
11810ef50b4eSJason Evans if (strncmp("metadata_thp", k, klen) == 0) {
11820ef50b4eSJason Evans int i;
11830ef50b4eSJason Evans bool match = false;
11840ef50b4eSJason Evans for (i = 0; i < metadata_thp_mode_limit; i++) {
11850ef50b4eSJason Evans if (strncmp(metadata_thp_mode_names[i],
11860ef50b4eSJason Evans v, vlen) == 0) {
11870ef50b4eSJason Evans opt_metadata_thp = i;
11880ef50b4eSJason Evans match = true;
11890ef50b4eSJason Evans break;
11900ef50b4eSJason Evans }
11910ef50b4eSJason Evans }
11920ef50b4eSJason Evans if (!match) {
1193*c5ad8142SEric van Gyzen CONF_ERROR("Invalid conf value",
11940ef50b4eSJason Evans k, klen, v, vlen);
11950ef50b4eSJason Evans }
1196*c5ad8142SEric van Gyzen CONF_CONTINUE;
1197b7eaed25SJason Evans }
1198b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_retain, "retain")
119982872ac0SJason Evans if (strncmp("dss", k, klen) == 0) {
120082872ac0SJason Evans int i;
120182872ac0SJason Evans bool match = false;
120282872ac0SJason Evans for (i = 0; i < dss_prec_limit; i++) {
120382872ac0SJason Evans if (strncmp(dss_prec_names[i], v, vlen)
120482872ac0SJason Evans == 0) {
1205b7eaed25SJason Evans if (extent_dss_prec_set(i)) {
1206*c5ad8142SEric van Gyzen CONF_ERROR(
120782872ac0SJason Evans "Error setting dss",
120882872ac0SJason Evans k, klen, v, vlen);
120982872ac0SJason Evans } else {
121082872ac0SJason Evans opt_dss =
121182872ac0SJason Evans dss_prec_names[i];
121282872ac0SJason Evans match = true;
121382872ac0SJason Evans break;
121482872ac0SJason Evans }
121582872ac0SJason Evans }
121682872ac0SJason Evans }
1217d0e79aa3SJason Evans if (!match) {
1218*c5ad8142SEric van Gyzen CONF_ERROR("Invalid conf value",
121982872ac0SJason Evans k, klen, v, vlen);
122082872ac0SJason Evans }
1221*c5ad8142SEric van Gyzen CONF_CONTINUE;
122282872ac0SJason Evans }
1223df0d881dSJason Evans CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1,
1224*c5ad8142SEric van Gyzen UINT_MAX, CONF_CHECK_MIN, CONF_DONT_CHECK_MAX,
1225*c5ad8142SEric van Gyzen false)
1226*c5ad8142SEric van Gyzen if (CONF_MATCH("bin_shards")) {
1227*c5ad8142SEric van Gyzen const char *bin_shards_segment_cur = v;
1228*c5ad8142SEric van Gyzen size_t vlen_left = vlen;
1229*c5ad8142SEric van Gyzen do {
1230*c5ad8142SEric van Gyzen size_t size_start;
1231*c5ad8142SEric van Gyzen size_t size_end;
1232*c5ad8142SEric van Gyzen size_t nshards;
1233*c5ad8142SEric van Gyzen bool err = malloc_conf_multi_sizes_next(
1234*c5ad8142SEric van Gyzen &bin_shards_segment_cur, &vlen_left,
1235*c5ad8142SEric van Gyzen &size_start, &size_end, &nshards);
1236*c5ad8142SEric van Gyzen if (err || bin_update_shard_size(
1237*c5ad8142SEric van Gyzen bin_shard_sizes, size_start,
1238*c5ad8142SEric van Gyzen size_end, nshards)) {
1239*c5ad8142SEric van Gyzen CONF_ERROR(
1240*c5ad8142SEric van Gyzen "Invalid settings for "
1241*c5ad8142SEric van Gyzen "bin_shards", k, klen, v,
1242*c5ad8142SEric van Gyzen vlen);
1243*c5ad8142SEric van Gyzen break;
1244*c5ad8142SEric van Gyzen }
1245*c5ad8142SEric van Gyzen } while (vlen_left > 0);
1246*c5ad8142SEric van Gyzen CONF_CONTINUE;
1247*c5ad8142SEric van Gyzen }
1248b7eaed25SJason Evans CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms,
1249b7eaed25SJason Evans "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
1250b7eaed25SJason Evans QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
1251b7eaed25SJason Evans SSIZE_MAX);
1252b7eaed25SJason Evans CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms,
1253b7eaed25SJason Evans "muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) <
1254b7eaed25SJason Evans QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) :
1255b7eaed25SJason Evans SSIZE_MAX);
1256b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_stats_print, "stats_print")
1257b7eaed25SJason Evans if (CONF_MATCH("stats_print_opts")) {
1258b7eaed25SJason Evans init_opt_stats_print_opts(v, vlen);
1259*c5ad8142SEric van Gyzen CONF_CONTINUE;
1260b7eaed25SJason Evans }
1261b7eaed25SJason Evans if (config_fill) {
1262b7eaed25SJason Evans if (CONF_MATCH("junk")) {
1263b7eaed25SJason Evans if (CONF_MATCH_VALUE("true")) {
1264b7eaed25SJason Evans opt_junk = "true";
1265b7eaed25SJason Evans opt_junk_alloc = opt_junk_free =
1266b7eaed25SJason Evans true;
1267b7eaed25SJason Evans } else if (CONF_MATCH_VALUE("false")) {
1268b7eaed25SJason Evans opt_junk = "false";
1269b7eaed25SJason Evans opt_junk_alloc = opt_junk_free =
1270b7eaed25SJason Evans false;
1271b7eaed25SJason Evans } else if (CONF_MATCH_VALUE("alloc")) {
1272b7eaed25SJason Evans opt_junk = "alloc";
1273b7eaed25SJason Evans opt_junk_alloc = true;
1274b7eaed25SJason Evans opt_junk_free = false;
1275b7eaed25SJason Evans } else if (CONF_MATCH_VALUE("free")) {
1276b7eaed25SJason Evans opt_junk = "free";
1277b7eaed25SJason Evans opt_junk_alloc = false;
1278b7eaed25SJason Evans opt_junk_free = true;
1279b7eaed25SJason Evans } else {
1280*c5ad8142SEric van Gyzen CONF_ERROR(
1281*c5ad8142SEric van Gyzen "Invalid conf value",
1282*c5ad8142SEric van Gyzen k, klen, v, vlen);
1283b7eaed25SJason Evans }
1284*c5ad8142SEric van Gyzen CONF_CONTINUE;
1285b7eaed25SJason Evans }
1286b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_zero, "zero")
1287b7eaed25SJason Evans }
1288b7eaed25SJason Evans if (config_utrace) {
1289b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_utrace, "utrace")
1290b7eaed25SJason Evans }
1291b7eaed25SJason Evans if (config_xmalloc) {
1292b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc")
1293b7eaed25SJason Evans }
1294b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_tcache, "tcache")
1295f2cb2907SJason Evans CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max",
1296f2cb2907SJason Evans -1, (sizeof(size_t) << 3) - 1)
1297*c5ad8142SEric van Gyzen
1298*c5ad8142SEric van Gyzen /*
1299*c5ad8142SEric van Gyzen * The runtime option of oversize_threshold remains
1300*c5ad8142SEric van Gyzen * undocumented. It may be tweaked in the next major
1301*c5ad8142SEric van Gyzen * release (6.0). The default value 8M is rather
1302*c5ad8142SEric van Gyzen * conservative / safe. Tuning it further down may
1303*c5ad8142SEric van Gyzen * improve fragmentation a bit more, but may also cause
1304*c5ad8142SEric van Gyzen * contention on the huge arena.
1305*c5ad8142SEric van Gyzen */
1306*c5ad8142SEric van Gyzen CONF_HANDLE_SIZE_T(opt_oversize_threshold,
1307*c5ad8142SEric van Gyzen "oversize_threshold", 0, SC_LARGE_MAXCLASS,
1308*c5ad8142SEric van Gyzen CONF_DONT_CHECK_MIN, CONF_CHECK_MAX, false)
1309*c5ad8142SEric van Gyzen CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit,
1310*c5ad8142SEric van Gyzen "lg_extent_max_active_fit", 0,
1311*c5ad8142SEric van Gyzen (sizeof(size_t) << 3), CONF_DONT_CHECK_MIN,
1312*c5ad8142SEric van Gyzen CONF_CHECK_MAX, false)
1313*c5ad8142SEric van Gyzen
1314b7eaed25SJason Evans if (strncmp("percpu_arena", k, klen) == 0) {
1315df0d881dSJason Evans bool match = false;
13160ef50b4eSJason Evans for (int i = percpu_arena_mode_names_base; i <
1317b7eaed25SJason Evans percpu_arena_mode_names_limit; i++) {
1318b7eaed25SJason Evans if (strncmp(percpu_arena_mode_names[i],
1319b7eaed25SJason Evans v, vlen) == 0) {
1320b7eaed25SJason Evans if (!have_percpu_arena) {
1321*c5ad8142SEric van Gyzen CONF_ERROR(
1322b7eaed25SJason Evans "No getcpu support",
1323b7eaed25SJason Evans k, klen, v, vlen);
1324b7eaed25SJason Evans }
1325b7eaed25SJason Evans opt_percpu_arena = i;
1326df0d881dSJason Evans match = true;
1327df0d881dSJason Evans break;
1328df0d881dSJason Evans }
1329df0d881dSJason Evans }
1330df0d881dSJason Evans if (!match) {
1331*c5ad8142SEric van Gyzen CONF_ERROR("Invalid conf value",
1332df0d881dSJason Evans k, klen, v, vlen);
1333df0d881dSJason Evans }
1334*c5ad8142SEric van Gyzen CONF_CONTINUE;
1335df0d881dSJason Evans }
1336b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_background_thread,
1337b7eaed25SJason Evans "background_thread");
13380ef50b4eSJason Evans CONF_HANDLE_SIZE_T(opt_max_background_threads,
13390ef50b4eSJason Evans "max_background_threads", 1,
1340*c5ad8142SEric van Gyzen opt_max_background_threads,
1341*c5ad8142SEric van Gyzen CONF_CHECK_MIN, CONF_CHECK_MAX,
13420ef50b4eSJason Evans true);
1343*c5ad8142SEric van Gyzen if (CONF_MATCH("slab_sizes")) {
1344*c5ad8142SEric van Gyzen bool err;
1345*c5ad8142SEric van Gyzen const char *slab_size_segment_cur = v;
1346*c5ad8142SEric van Gyzen size_t vlen_left = vlen;
1347*c5ad8142SEric van Gyzen do {
1348*c5ad8142SEric van Gyzen size_t slab_start;
1349*c5ad8142SEric van Gyzen size_t slab_end;
1350*c5ad8142SEric van Gyzen size_t pgs;
1351*c5ad8142SEric van Gyzen err = malloc_conf_multi_sizes_next(
1352*c5ad8142SEric van Gyzen &slab_size_segment_cur,
1353*c5ad8142SEric van Gyzen &vlen_left, &slab_start, &slab_end,
1354*c5ad8142SEric van Gyzen &pgs);
1355*c5ad8142SEric van Gyzen if (!err) {
1356*c5ad8142SEric van Gyzen sc_data_update_slab_size(
1357*c5ad8142SEric van Gyzen sc_data, slab_start,
1358*c5ad8142SEric van Gyzen slab_end, (int)pgs);
1359*c5ad8142SEric van Gyzen } else {
1360*c5ad8142SEric van Gyzen CONF_ERROR("Invalid settings "
1361*c5ad8142SEric van Gyzen "for slab_sizes",
1362*c5ad8142SEric van Gyzen k, klen, v, vlen);
1363*c5ad8142SEric van Gyzen }
1364*c5ad8142SEric van Gyzen } while (!err && vlen_left > 0);
1365*c5ad8142SEric van Gyzen CONF_CONTINUE;
1366*c5ad8142SEric van Gyzen }
1367a4bd5210SJason Evans if (config_prof) {
1368b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_prof, "prof")
13698ed34ab0SJason Evans CONF_HANDLE_CHAR_P(opt_prof_prefix,
13708ed34ab0SJason Evans "prof_prefix", "jeprof")
1371b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_prof_active, "prof_active")
1372d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_prof_thread_active_init,
1373b7eaed25SJason Evans "prof_thread_active_init")
1374d0e79aa3SJason Evans CONF_HANDLE_SIZE_T(opt_lg_prof_sample,
13757fa7f12fSJason Evans "lg_prof_sample", 0, (sizeof(uint64_t) << 3)
1376*c5ad8142SEric van Gyzen - 1, CONF_DONT_CHECK_MIN, CONF_CHECK_MAX,
1377*c5ad8142SEric van Gyzen true)
1378b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum")
1379a4bd5210SJason Evans CONF_HANDLE_SSIZE_T(opt_lg_prof_interval,
13808ed34ab0SJason Evans "lg_prof_interval", -1,
1381a4bd5210SJason Evans (sizeof(uint64_t) << 3) - 1)
1382b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump")
1383b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_prof_final, "prof_final")
1384b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak")
1385*c5ad8142SEric van Gyzen CONF_HANDLE_BOOL(opt_prof_log, "prof_log")
1386a4bd5210SJason Evans }
13870ef50b4eSJason Evans if (config_log) {
13880ef50b4eSJason Evans if (CONF_MATCH("log")) {
13890ef50b4eSJason Evans size_t cpylen = (
13900ef50b4eSJason Evans vlen <= sizeof(log_var_names) ?
13910ef50b4eSJason Evans vlen : sizeof(log_var_names) - 1);
13920ef50b4eSJason Evans strncpy(log_var_names, v, cpylen);
13930ef50b4eSJason Evans log_var_names[cpylen] = '\0';
1394*c5ad8142SEric van Gyzen CONF_CONTINUE;
13950ef50b4eSJason Evans }
13960ef50b4eSJason Evans }
13970ef50b4eSJason Evans if (CONF_MATCH("thp")) {
13980ef50b4eSJason Evans bool match = false;
13990ef50b4eSJason Evans for (int i = 0; i < thp_mode_names_limit; i++) {
14000ef50b4eSJason Evans if (strncmp(thp_mode_names[i],v, vlen)
14010ef50b4eSJason Evans == 0) {
14020ef50b4eSJason Evans if (!have_madvise_huge) {
1403*c5ad8142SEric van Gyzen CONF_ERROR(
14040ef50b4eSJason Evans "No THP support",
14050ef50b4eSJason Evans k, klen, v, vlen);
14060ef50b4eSJason Evans }
14070ef50b4eSJason Evans opt_thp = i;
14080ef50b4eSJason Evans match = true;
14090ef50b4eSJason Evans break;
14100ef50b4eSJason Evans }
14110ef50b4eSJason Evans }
14120ef50b4eSJason Evans if (!match) {
1413*c5ad8142SEric van Gyzen CONF_ERROR("Invalid conf value",
14140ef50b4eSJason Evans k, klen, v, vlen);
14150ef50b4eSJason Evans }
1416*c5ad8142SEric van Gyzen CONF_CONTINUE;
14170ef50b4eSJason Evans }
1418*c5ad8142SEric van Gyzen CONF_ERROR("Invalid conf pair", k, klen, v, vlen);
1419*c5ad8142SEric van Gyzen #undef CONF_ERROR
1420*c5ad8142SEric van Gyzen #undef CONF_CONTINUE
1421d0e79aa3SJason Evans #undef CONF_MATCH
14227fa7f12fSJason Evans #undef CONF_MATCH_VALUE
1423a4bd5210SJason Evans #undef CONF_HANDLE_BOOL
1424*c5ad8142SEric van Gyzen #undef CONF_DONT_CHECK_MIN
1425*c5ad8142SEric van Gyzen #undef CONF_CHECK_MIN
1426*c5ad8142SEric van Gyzen #undef CONF_DONT_CHECK_MAX
1427*c5ad8142SEric van Gyzen #undef CONF_CHECK_MAX
14287fa7f12fSJason Evans #undef CONF_HANDLE_T_U
14297fa7f12fSJason Evans #undef CONF_HANDLE_UNSIGNED
1430a4bd5210SJason Evans #undef CONF_HANDLE_SIZE_T
1431a4bd5210SJason Evans #undef CONF_HANDLE_SSIZE_T
1432a4bd5210SJason Evans #undef CONF_HANDLE_CHAR_P
1433*c5ad8142SEric van Gyzen /* Re-enable diagnostic "-Wtype-limits" */
1434*c5ad8142SEric van Gyzen JEMALLOC_DIAGNOSTIC_POP
1435a4bd5210SJason Evans }
14360ef50b4eSJason Evans if (opt_abort_conf && had_conf_error) {
14370ef50b4eSJason Evans malloc_abort_invalid_conf();
1438a4bd5210SJason Evans }
1439a4bd5210SJason Evans }
14400ef50b4eSJason Evans atomic_store_b(&log_init_done, true, ATOMIC_RELEASE);
14410ef50b4eSJason Evans }
1442a4bd5210SJason Evans
1443*c5ad8142SEric van Gyzen static void
malloc_conf_init(sc_data_t * sc_data,unsigned bin_shard_sizes[SC_NBINS])1444*c5ad8142SEric van Gyzen malloc_conf_init(sc_data_t *sc_data, unsigned bin_shard_sizes[SC_NBINS]) {
1445*c5ad8142SEric van Gyzen const char *opts_cache[MALLOC_CONF_NSOURCES] = {NULL, NULL, NULL, NULL};
1446*c5ad8142SEric van Gyzen char buf[PATH_MAX + 1];
1447*c5ad8142SEric van Gyzen
1448*c5ad8142SEric van Gyzen /* The first call only set the confirm_conf option and opts_cache */
1449*c5ad8142SEric van Gyzen malloc_conf_init_helper(NULL, NULL, true, opts_cache, buf);
1450*c5ad8142SEric van Gyzen malloc_conf_init_helper(sc_data, bin_shard_sizes, false, opts_cache,
1451*c5ad8142SEric van Gyzen NULL);
1452*c5ad8142SEric van Gyzen }
1453*c5ad8142SEric van Gyzen
1454*c5ad8142SEric van Gyzen #undef MALLOC_CONF_NSOURCES
1455*c5ad8142SEric van Gyzen
1456a4bd5210SJason Evans static bool
malloc_init_hard_needed(void)1457b7eaed25SJason Evans malloc_init_hard_needed(void) {
1458d0e79aa3SJason Evans if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state ==
1459d0e79aa3SJason Evans malloc_init_recursible)) {
1460a4bd5210SJason Evans /*
1461a4bd5210SJason Evans * Another thread initialized the allocator before this one
1462a4bd5210SJason Evans * acquired init_lock, or this thread is the initializing
1463a4bd5210SJason Evans * thread, and it is recursively allocating.
1464a4bd5210SJason Evans */
1465b7eaed25SJason Evans return false;
1466a4bd5210SJason Evans }
1467a4bd5210SJason Evans #ifdef JEMALLOC_THREADED_INIT
1468d0e79aa3SJason Evans if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) {
1469a4bd5210SJason Evans /* Busy-wait until the initializing thread completes. */
1470b7eaed25SJason Evans spin_t spinner = SPIN_INITIALIZER;
1471a4bd5210SJason Evans do {
1472bde95144SJason Evans malloc_mutex_unlock(TSDN_NULL, &init_lock);
1473bde95144SJason Evans spin_adaptive(&spinner);
1474bde95144SJason Evans malloc_mutex_lock(TSDN_NULL, &init_lock);
1475d0e79aa3SJason Evans } while (!malloc_initialized());
1476b7eaed25SJason Evans return false;
1477a4bd5210SJason Evans }
1478a4bd5210SJason Evans #endif
1479b7eaed25SJason Evans return true;
1480d0e79aa3SJason Evans }
1481d0e79aa3SJason Evans
1482d0e79aa3SJason Evans static bool
malloc_init_hard_a0_locked()1483b7eaed25SJason Evans malloc_init_hard_a0_locked() {
1484a4bd5210SJason Evans malloc_initializer = INITIALIZER;
1485a4bd5210SJason Evans
1486*c5ad8142SEric van Gyzen JEMALLOC_DIAGNOSTIC_PUSH
1487*c5ad8142SEric van Gyzen JEMALLOC_DIAGNOSTIC_IGNORE_MISSING_STRUCT_FIELD_INITIALIZERS
1488*c5ad8142SEric van Gyzen sc_data_t sc_data = {0};
1489*c5ad8142SEric van Gyzen JEMALLOC_DIAGNOSTIC_POP
1490*c5ad8142SEric van Gyzen
1491*c5ad8142SEric van Gyzen /*
1492*c5ad8142SEric van Gyzen * Ordering here is somewhat tricky; we need sc_boot() first, since that
1493*c5ad8142SEric van Gyzen * determines what the size classes will be, and then
1494*c5ad8142SEric van Gyzen * malloc_conf_init(), since any slab size tweaking will need to be done
1495*c5ad8142SEric van Gyzen * before sz_boot and bin_boot, which assume that the values they read
1496*c5ad8142SEric van Gyzen * out of sc_data_global are final.
1497*c5ad8142SEric van Gyzen */
1498*c5ad8142SEric van Gyzen sc_boot(&sc_data);
1499*c5ad8142SEric van Gyzen unsigned bin_shard_sizes[SC_NBINS];
1500*c5ad8142SEric van Gyzen bin_shard_sizes_boot(bin_shard_sizes);
1501*c5ad8142SEric van Gyzen /*
1502*c5ad8142SEric van Gyzen * prof_boot0 only initializes opt_prof_prefix. We need to do it before
1503*c5ad8142SEric van Gyzen * we parse malloc_conf options, in case malloc_conf parsing overwrites
1504*c5ad8142SEric van Gyzen * it.
1505*c5ad8142SEric van Gyzen */
1506b7eaed25SJason Evans if (config_prof) {
1507a4bd5210SJason Evans prof_boot0();
1508b7eaed25SJason Evans }
1509*c5ad8142SEric van Gyzen malloc_conf_init(&sc_data, bin_shard_sizes);
1510*c5ad8142SEric van Gyzen sz_boot(&sc_data);
1511*c5ad8142SEric van Gyzen bin_boot(&sc_data, bin_shard_sizes);
1512*c5ad8142SEric van Gyzen
1513a4bd5210SJason Evans if (opt_stats_print) {
1514a4bd5210SJason Evans /* Print statistics at exit. */
1515a4bd5210SJason Evans if (atexit(stats_print_atexit) != 0) {
1516a4bd5210SJason Evans malloc_write("<jemalloc>: Error in atexit()\n");
1517b7eaed25SJason Evans if (opt_abort) {
1518a4bd5210SJason Evans abort();
1519a4bd5210SJason Evans }
1520a4bd5210SJason Evans }
1521b7eaed25SJason Evans }
1522b7eaed25SJason Evans if (pages_boot()) {
1523b7eaed25SJason Evans return true;
1524b7eaed25SJason Evans }
1525b7eaed25SJason Evans if (base_boot(TSDN_NULL)) {
1526b7eaed25SJason Evans return true;
1527b7eaed25SJason Evans }
1528b7eaed25SJason Evans if (extent_boot()) {
1529b7eaed25SJason Evans return true;
1530b7eaed25SJason Evans }
1531b7eaed25SJason Evans if (ctl_boot()) {
1532b7eaed25SJason Evans return true;
1533b7eaed25SJason Evans }
1534b7eaed25SJason Evans if (config_prof) {
1535a4bd5210SJason Evans prof_boot1();
1536b7eaed25SJason Evans }
1537*c5ad8142SEric van Gyzen arena_boot(&sc_data);
1538b7eaed25SJason Evans if (tcache_boot(TSDN_NULL)) {
1539b7eaed25SJason Evans return true;
1540b7eaed25SJason Evans }
1541b7eaed25SJason Evans if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS,
1542b7eaed25SJason Evans malloc_mutex_rank_exclusive)) {
1543b7eaed25SJason Evans return true;
1544b7eaed25SJason Evans }
1545*c5ad8142SEric van Gyzen hook_boot();
1546a4bd5210SJason Evans /*
1547a4bd5210SJason Evans * Create enough scaffolding to allow recursive allocation in
1548a4bd5210SJason Evans * malloc_ncpus().
1549a4bd5210SJason Evans */
1550df0d881dSJason Evans narenas_auto = 1;
1551*c5ad8142SEric van Gyzen manual_arena_base = narenas_auto + 1;
155282872ac0SJason Evans memset(arenas, 0, sizeof(arena_t *) * narenas_auto);
1553a4bd5210SJason Evans /*
1554a4bd5210SJason Evans * Initialize one arena here. The rest are lazily created in
1555d0e79aa3SJason Evans * arena_choose_hard().
1556a4bd5210SJason Evans */
1557b7eaed25SJason Evans if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default)
1558b7eaed25SJason Evans == NULL) {
1559b7eaed25SJason Evans return true;
1560b7eaed25SJason Evans }
1561b7eaed25SJason Evans a0 = arena_get(TSDN_NULL, 0, false);
1562d0e79aa3SJason Evans malloc_init_state = malloc_init_a0_initialized;
15631f0a49e8SJason Evans
1564b7eaed25SJason Evans return false;
1565a4bd5210SJason Evans }
1566a4bd5210SJason Evans
1567d0e79aa3SJason Evans static bool
malloc_init_hard_a0(void)1568b7eaed25SJason Evans malloc_init_hard_a0(void) {
1569d0e79aa3SJason Evans bool ret;
1570d0e79aa3SJason Evans
15711f0a49e8SJason Evans malloc_mutex_lock(TSDN_NULL, &init_lock);
1572d0e79aa3SJason Evans ret = malloc_init_hard_a0_locked();
15731f0a49e8SJason Evans malloc_mutex_unlock(TSDN_NULL, &init_lock);
1574b7eaed25SJason Evans return ret;
1575a4bd5210SJason Evans }
1576a4bd5210SJason Evans
15771f0a49e8SJason Evans /* Initialize data structures which may trigger recursive allocation. */
1578df0d881dSJason Evans static bool
malloc_init_hard_recursible(void)1579b7eaed25SJason Evans malloc_init_hard_recursible(void) {
1580d0e79aa3SJason Evans malloc_init_state = malloc_init_recursible;
1581df0d881dSJason Evans
1582a4bd5210SJason Evans ncpus = malloc_ncpus();
1583f921d10fSJason Evans
15847fa7f12fSJason Evans #if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \
15857fa7f12fSJason Evans && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \
15867fa7f12fSJason Evans !defined(__native_client__))
1587df0d881dSJason Evans /* LinuxThreads' pthread_atfork() allocates. */
1588f921d10fSJason Evans if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent,
1589f921d10fSJason Evans jemalloc_postfork_child) != 0) {
1590f921d10fSJason Evans malloc_write("<jemalloc>: Error in pthread_atfork()\n");
1591b7eaed25SJason Evans if (opt_abort) {
1592f921d10fSJason Evans abort();
1593b7eaed25SJason Evans }
1594b7eaed25SJason Evans return true;
1595f921d10fSJason Evans }
1596f921d10fSJason Evans #endif
1597df0d881dSJason Evans
1598b7eaed25SJason Evans if (background_thread_boot0()) {
1599b7eaed25SJason Evans return true;
1600a4bd5210SJason Evans }
1601a4bd5210SJason Evans
1602b7eaed25SJason Evans return false;
1603b7eaed25SJason Evans }
1604d0e79aa3SJason Evans
1605b7eaed25SJason Evans static unsigned
malloc_narenas_default(void)1606b7eaed25SJason Evans malloc_narenas_default(void) {
1607b7eaed25SJason Evans assert(ncpus > 0);
1608a4bd5210SJason Evans /*
1609a4bd5210SJason Evans * For SMP systems, create more than one arena per CPU by
1610a4bd5210SJason Evans * default.
1611a4bd5210SJason Evans */
1612b7eaed25SJason Evans if (ncpus > 1) {
1613b7eaed25SJason Evans return ncpus << 2;
1614b7eaed25SJason Evans } else {
1615b7eaed25SJason Evans return 1;
1616a4bd5210SJason Evans }
1617b7eaed25SJason Evans }
1618b7eaed25SJason Evans
1619b7eaed25SJason Evans static percpu_arena_mode_t
percpu_arena_as_initialized(percpu_arena_mode_t mode)1620b7eaed25SJason Evans percpu_arena_as_initialized(percpu_arena_mode_t mode) {
1621b7eaed25SJason Evans assert(!malloc_initialized());
1622b7eaed25SJason Evans assert(mode <= percpu_arena_disabled);
1623b7eaed25SJason Evans
1624b7eaed25SJason Evans if (mode != percpu_arena_disabled) {
1625b7eaed25SJason Evans mode += percpu_arena_mode_enabled_base;
1626b7eaed25SJason Evans }
1627b7eaed25SJason Evans
1628b7eaed25SJason Evans return mode;
1629b7eaed25SJason Evans }
1630b7eaed25SJason Evans
1631b7eaed25SJason Evans static bool
malloc_init_narenas(void)1632b7eaed25SJason Evans malloc_init_narenas(void) {
1633b7eaed25SJason Evans assert(ncpus > 0);
1634b7eaed25SJason Evans
1635b7eaed25SJason Evans if (opt_percpu_arena != percpu_arena_disabled) {
1636b7eaed25SJason Evans if (!have_percpu_arena || malloc_getcpu() < 0) {
1637b7eaed25SJason Evans opt_percpu_arena = percpu_arena_disabled;
1638b7eaed25SJason Evans malloc_printf("<jemalloc>: perCPU arena getcpu() not "
1639b7eaed25SJason Evans "available. Setting narenas to %u.\n", opt_narenas ?
1640b7eaed25SJason Evans opt_narenas : malloc_narenas_default());
1641b7eaed25SJason Evans if (opt_abort) {
1642b7eaed25SJason Evans abort();
1643b7eaed25SJason Evans }
1644b7eaed25SJason Evans } else {
1645b7eaed25SJason Evans if (ncpus >= MALLOCX_ARENA_LIMIT) {
1646b7eaed25SJason Evans malloc_printf("<jemalloc>: narenas w/ percpu"
1647b7eaed25SJason Evans "arena beyond limit (%d)\n", ncpus);
1648b7eaed25SJason Evans if (opt_abort) {
1649b7eaed25SJason Evans abort();
1650b7eaed25SJason Evans }
1651b7eaed25SJason Evans return true;
1652b7eaed25SJason Evans }
1653b7eaed25SJason Evans /* NB: opt_percpu_arena isn't fully initialized yet. */
1654b7eaed25SJason Evans if (percpu_arena_as_initialized(opt_percpu_arena) ==
1655b7eaed25SJason Evans per_phycpu_arena && ncpus % 2 != 0) {
1656b7eaed25SJason Evans malloc_printf("<jemalloc>: invalid "
1657b7eaed25SJason Evans "configuration -- per physical CPU arena "
1658b7eaed25SJason Evans "with odd number (%u) of CPUs (no hyper "
1659b7eaed25SJason Evans "threading?).\n", ncpus);
1660b7eaed25SJason Evans if (opt_abort)
1661b7eaed25SJason Evans abort();
1662b7eaed25SJason Evans }
1663b7eaed25SJason Evans unsigned n = percpu_arena_ind_limit(
1664b7eaed25SJason Evans percpu_arena_as_initialized(opt_percpu_arena));
1665b7eaed25SJason Evans if (opt_narenas < n) {
1666b7eaed25SJason Evans /*
1667b7eaed25SJason Evans * If narenas is specified with percpu_arena
1668b7eaed25SJason Evans * enabled, actual narenas is set as the greater
1669b7eaed25SJason Evans * of the two. percpu_arena_choose will be free
1670b7eaed25SJason Evans * to use any of the arenas based on CPU
1671b7eaed25SJason Evans * id. This is conservative (at a small cost)
1672b7eaed25SJason Evans * but ensures correctness.
1673b7eaed25SJason Evans *
1674b7eaed25SJason Evans * If for some reason the ncpus determined at
1675b7eaed25SJason Evans * boot is not the actual number (e.g. because
1676b7eaed25SJason Evans * of affinity setting from numactl), reserving
1677b7eaed25SJason Evans * narenas this way provides a workaround for
1678b7eaed25SJason Evans * percpu_arena.
1679b7eaed25SJason Evans */
1680b7eaed25SJason Evans opt_narenas = n;
1681b7eaed25SJason Evans }
1682b7eaed25SJason Evans }
1683b7eaed25SJason Evans }
1684b7eaed25SJason Evans if (opt_narenas == 0) {
1685b7eaed25SJason Evans opt_narenas = malloc_narenas_default();
1686b7eaed25SJason Evans }
1687b7eaed25SJason Evans assert(opt_narenas > 0);
1688b7eaed25SJason Evans
168982872ac0SJason Evans narenas_auto = opt_narenas;
1690a4bd5210SJason Evans /*
1691df0d881dSJason Evans * Limit the number of arenas to the indexing range of MALLOCX_ARENA().
1692a4bd5210SJason Evans */
1693b7eaed25SJason Evans if (narenas_auto >= MALLOCX_ARENA_LIMIT) {
1694b7eaed25SJason Evans narenas_auto = MALLOCX_ARENA_LIMIT - 1;
1695a4bd5210SJason Evans malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n",
169682872ac0SJason Evans narenas_auto);
1697a4bd5210SJason Evans }
1698df0d881dSJason Evans narenas_total_set(narenas_auto);
1699*c5ad8142SEric van Gyzen if (arena_init_huge()) {
1700*c5ad8142SEric van Gyzen narenas_total_inc();
1701*c5ad8142SEric van Gyzen }
1702*c5ad8142SEric van Gyzen manual_arena_base = narenas_total_get();
1703a4bd5210SJason Evans
1704b7eaed25SJason Evans return false;
1705b7eaed25SJason Evans }
1706b7eaed25SJason Evans
1707b7eaed25SJason Evans static void
malloc_init_percpu(void)1708b7eaed25SJason Evans malloc_init_percpu(void) {
1709b7eaed25SJason Evans opt_percpu_arena = percpu_arena_as_initialized(opt_percpu_arena);
1710b7eaed25SJason Evans }
1711b7eaed25SJason Evans
1712b7eaed25SJason Evans static bool
malloc_init_hard_finish(void)1713b7eaed25SJason Evans malloc_init_hard_finish(void) {
1714b7eaed25SJason Evans if (malloc_mutex_boot()) {
1715b7eaed25SJason Evans return true;
1716b7eaed25SJason Evans }
1717a4bd5210SJason Evans
1718d0e79aa3SJason Evans malloc_init_state = malloc_init_initialized;
1719df0d881dSJason Evans malloc_slow_flag_init();
1720df0d881dSJason Evans
1721b7eaed25SJason Evans return false;
1722b7eaed25SJason Evans }
1723b7eaed25SJason Evans
1724b7eaed25SJason Evans static void
malloc_init_hard_cleanup(tsdn_t * tsdn,bool reentrancy_set)1725b7eaed25SJason Evans malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) {
1726b7eaed25SJason Evans malloc_mutex_assert_owner(tsdn, &init_lock);
1727b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &init_lock);
1728b7eaed25SJason Evans if (reentrancy_set) {
1729b7eaed25SJason Evans assert(!tsdn_null(tsdn));
1730b7eaed25SJason Evans tsd_t *tsd = tsdn_tsd(tsdn);
1731b7eaed25SJason Evans assert(tsd_reentrancy_level_get(tsd) > 0);
1732b7eaed25SJason Evans post_reentrancy(tsd);
1733b7eaed25SJason Evans }
1734d0e79aa3SJason Evans }
1735d0e79aa3SJason Evans
1736d0e79aa3SJason Evans static bool
malloc_init_hard(void)1737b7eaed25SJason Evans malloc_init_hard(void) {
17381f0a49e8SJason Evans tsd_t *tsd;
1739d0e79aa3SJason Evans
1740536b3538SJason Evans #if defined(_WIN32) && _WIN32_WINNT < 0x0600
1741536b3538SJason Evans _init_init_lock();
1742536b3538SJason Evans #endif
17431f0a49e8SJason Evans malloc_mutex_lock(TSDN_NULL, &init_lock);
1744b7eaed25SJason Evans
1745b7eaed25SJason Evans #define UNLOCK_RETURN(tsdn, ret, reentrancy) \
1746b7eaed25SJason Evans malloc_init_hard_cleanup(tsdn, reentrancy); \
1747b7eaed25SJason Evans return ret;
1748b7eaed25SJason Evans
1749d0e79aa3SJason Evans if (!malloc_init_hard_needed()) {
1750b7eaed25SJason Evans UNLOCK_RETURN(TSDN_NULL, false, false)
1751d0e79aa3SJason Evans }
1752f921d10fSJason Evans
1753d0e79aa3SJason Evans if (malloc_init_state != malloc_init_a0_initialized &&
1754d0e79aa3SJason Evans malloc_init_hard_a0_locked()) {
1755b7eaed25SJason Evans UNLOCK_RETURN(TSDN_NULL, true, false)
1756d0e79aa3SJason Evans }
1757df0d881dSJason Evans
17581f0a49e8SJason Evans malloc_mutex_unlock(TSDN_NULL, &init_lock);
17591f0a49e8SJason Evans /* Recursive allocation relies on functional tsd. */
17601f0a49e8SJason Evans tsd = malloc_tsd_boot0();
1761b7eaed25SJason Evans if (tsd == NULL) {
1762b7eaed25SJason Evans return true;
1763b7eaed25SJason Evans }
1764b7eaed25SJason Evans if (malloc_init_hard_recursible()) {
1765b7eaed25SJason Evans return true;
1766b7eaed25SJason Evans }
1767b7eaed25SJason Evans
17681f0a49e8SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &init_lock);
1769b7eaed25SJason Evans /* Set reentrancy level to 1 during init. */
17708b2f5aafSJason Evans pre_reentrancy(tsd, NULL);
1771b7eaed25SJason Evans /* Initialize narenas before prof_boot2 (for allocation). */
1772b7eaed25SJason Evans if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) {
1773b7eaed25SJason Evans UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1774b7eaed25SJason Evans }
1775bde95144SJason Evans if (config_prof && prof_boot2(tsd)) {
1776b7eaed25SJason Evans UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1777d0e79aa3SJason Evans }
1778d0e79aa3SJason Evans
1779b7eaed25SJason Evans malloc_init_percpu();
1780d0e79aa3SJason Evans
1781b7eaed25SJason Evans if (malloc_init_hard_finish()) {
1782b7eaed25SJason Evans UNLOCK_RETURN(tsd_tsdn(tsd), true, true)
1783b7eaed25SJason Evans }
1784b7eaed25SJason Evans post_reentrancy(tsd);
17851f0a49e8SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock);
1786b7eaed25SJason Evans
17870ef50b4eSJason Evans witness_assert_lockless(witness_tsd_tsdn(
17880ef50b4eSJason Evans tsd_witness_tsdp_get_unsafe(tsd)));
1789d0e79aa3SJason Evans malloc_tsd_boot1();
1790b7eaed25SJason Evans /* Update TSD after tsd_boot1. */
1791b7eaed25SJason Evans tsd = tsd_fetch();
1792b7eaed25SJason Evans if (opt_background_thread) {
1793b7eaed25SJason Evans assert(have_background_thread);
1794b7eaed25SJason Evans /*
1795b7eaed25SJason Evans * Need to finish init & unlock first before creating background
17960ef50b4eSJason Evans * threads (pthread_create depends on malloc). ctl_init (which
17970ef50b4eSJason Evans * sets isthreaded) needs to be called without holding any lock.
1798b7eaed25SJason Evans */
17990ef50b4eSJason Evans background_thread_ctl_init(tsd_tsdn(tsd));
1800*c5ad8142SEric van Gyzen if (background_thread_create(tsd, 0)) {
1801b7eaed25SJason Evans return true;
1802b7eaed25SJason Evans }
1803b7eaed25SJason Evans }
1804b7eaed25SJason Evans #undef UNLOCK_RETURN
1805b7eaed25SJason Evans return false;
1806a4bd5210SJason Evans }
1807a4bd5210SJason Evans
1808a4bd5210SJason Evans /*
1809a4bd5210SJason Evans * End initialization functions.
1810a4bd5210SJason Evans */
1811a4bd5210SJason Evans /******************************************************************************/
1812a4bd5210SJason Evans /*
1813b7eaed25SJason Evans * Begin allocation-path internal functions and data structures.
1814a4bd5210SJason Evans */
1815a4bd5210SJason Evans
1816b7eaed25SJason Evans /*
1817b7eaed25SJason Evans * Settings determined by the documented behavior of the allocation functions.
1818b7eaed25SJason Evans */
1819b7eaed25SJason Evans typedef struct static_opts_s static_opts_t;
1820b7eaed25SJason Evans struct static_opts_s {
1821b7eaed25SJason Evans /* Whether or not allocation size may overflow. */
1822b7eaed25SJason Evans bool may_overflow;
1823*c5ad8142SEric van Gyzen
1824*c5ad8142SEric van Gyzen /*
1825*c5ad8142SEric van Gyzen * Whether or not allocations (with alignment) of size 0 should be
1826*c5ad8142SEric van Gyzen * treated as size 1.
1827*c5ad8142SEric van Gyzen */
1828*c5ad8142SEric van Gyzen bool bump_empty_aligned_alloc;
1829b7eaed25SJason Evans /*
1830b7eaed25SJason Evans * Whether to assert that allocations are not of size 0 (after any
1831b7eaed25SJason Evans * bumping).
1832b7eaed25SJason Evans */
1833b7eaed25SJason Evans bool assert_nonempty_alloc;
1834f921d10fSJason Evans
1835b7eaed25SJason Evans /*
1836b7eaed25SJason Evans * Whether or not to modify the 'result' argument to malloc in case of
1837b7eaed25SJason Evans * error.
1838b7eaed25SJason Evans */
1839b7eaed25SJason Evans bool null_out_result_on_error;
1840b7eaed25SJason Evans /* Whether to set errno when we encounter an error condition. */
1841b7eaed25SJason Evans bool set_errno_on_error;
1842f921d10fSJason Evans
1843b7eaed25SJason Evans /*
1844b7eaed25SJason Evans * The minimum valid alignment for functions requesting aligned storage.
1845b7eaed25SJason Evans */
1846b7eaed25SJason Evans size_t min_alignment;
1847f921d10fSJason Evans
1848b7eaed25SJason Evans /* The error string to use if we oom. */
1849b7eaed25SJason Evans const char *oom_string;
1850b7eaed25SJason Evans /* The error string to use if the passed-in alignment is invalid. */
1851b7eaed25SJason Evans const char *invalid_alignment_string;
1852f921d10fSJason Evans
1853b7eaed25SJason Evans /*
1854b7eaed25SJason Evans * False if we're configured to skip some time-consuming operations.
1855b7eaed25SJason Evans *
1856b7eaed25SJason Evans * This isn't really a malloc "behavior", but it acts as a useful
1857b7eaed25SJason Evans * summary of several other static (or at least, static after program
1858b7eaed25SJason Evans * initialization) options.
1859b7eaed25SJason Evans */
1860b7eaed25SJason Evans bool slow;
1861*c5ad8142SEric van Gyzen /*
1862*c5ad8142SEric van Gyzen * Return size.
1863*c5ad8142SEric van Gyzen */
1864*c5ad8142SEric van Gyzen bool usize;
1865b7eaed25SJason Evans };
1866f921d10fSJason Evans
1867b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE void
static_opts_init(static_opts_t * static_opts)1868b7eaed25SJason Evans static_opts_init(static_opts_t *static_opts) {
1869b7eaed25SJason Evans static_opts->may_overflow = false;
1870*c5ad8142SEric van Gyzen static_opts->bump_empty_aligned_alloc = false;
1871b7eaed25SJason Evans static_opts->assert_nonempty_alloc = false;
1872b7eaed25SJason Evans static_opts->null_out_result_on_error = false;
1873b7eaed25SJason Evans static_opts->set_errno_on_error = false;
1874b7eaed25SJason Evans static_opts->min_alignment = 0;
1875b7eaed25SJason Evans static_opts->oom_string = "";
1876b7eaed25SJason Evans static_opts->invalid_alignment_string = "";
1877b7eaed25SJason Evans static_opts->slow = false;
1878*c5ad8142SEric van Gyzen static_opts->usize = false;
1879f921d10fSJason Evans }
1880f921d10fSJason Evans
18811f0a49e8SJason Evans /*
1882b7eaed25SJason Evans * These correspond to the macros in jemalloc/jemalloc_macros.h. Broadly, we
1883b7eaed25SJason Evans * should have one constant here per magic value there. Note however that the
1884b7eaed25SJason Evans * representations need not be related.
18851f0a49e8SJason Evans */
1886b7eaed25SJason Evans #define TCACHE_IND_NONE ((unsigned)-1)
1887b7eaed25SJason Evans #define TCACHE_IND_AUTOMATIC ((unsigned)-2)
1888b7eaed25SJason Evans #define ARENA_IND_AUTOMATIC ((unsigned)-1)
1889f921d10fSJason Evans
1890b7eaed25SJason Evans typedef struct dynamic_opts_s dynamic_opts_t;
1891b7eaed25SJason Evans struct dynamic_opts_s {
1892b7eaed25SJason Evans void **result;
1893*c5ad8142SEric van Gyzen size_t usize;
1894b7eaed25SJason Evans size_t num_items;
1895b7eaed25SJason Evans size_t item_size;
1896b7eaed25SJason Evans size_t alignment;
1897b7eaed25SJason Evans bool zero;
1898b7eaed25SJason Evans unsigned tcache_ind;
1899b7eaed25SJason Evans unsigned arena_ind;
1900b7eaed25SJason Evans };
1901b7eaed25SJason Evans
1902b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE void
dynamic_opts_init(dynamic_opts_t * dynamic_opts)1903b7eaed25SJason Evans dynamic_opts_init(dynamic_opts_t *dynamic_opts) {
1904b7eaed25SJason Evans dynamic_opts->result = NULL;
1905*c5ad8142SEric van Gyzen dynamic_opts->usize = 0;
1906b7eaed25SJason Evans dynamic_opts->num_items = 0;
1907b7eaed25SJason Evans dynamic_opts->item_size = 0;
1908b7eaed25SJason Evans dynamic_opts->alignment = 0;
1909b7eaed25SJason Evans dynamic_opts->zero = false;
1910b7eaed25SJason Evans dynamic_opts->tcache_ind = TCACHE_IND_AUTOMATIC;
1911b7eaed25SJason Evans dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC;
19121f0a49e8SJason Evans }
19131f0a49e8SJason Evans
1914b7eaed25SJason Evans /* ind is ignored if dopts->alignment > 0. */
1915b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE void *
imalloc_no_sample(static_opts_t * sopts,dynamic_opts_t * dopts,tsd_t * tsd,size_t size,size_t usize,szind_t ind)1916b7eaed25SJason Evans imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
1917b7eaed25SJason Evans size_t size, size_t usize, szind_t ind) {
1918b7eaed25SJason Evans tcache_t *tcache;
1919b7eaed25SJason Evans arena_t *arena;
19201f0a49e8SJason Evans
1921b7eaed25SJason Evans /* Fill in the tcache. */
1922b7eaed25SJason Evans if (dopts->tcache_ind == TCACHE_IND_AUTOMATIC) {
1923b7eaed25SJason Evans if (likely(!sopts->slow)) {
1924b7eaed25SJason Evans /* Getting tcache ptr unconditionally. */
1925b7eaed25SJason Evans tcache = tsd_tcachep_get(tsd);
1926b7eaed25SJason Evans assert(tcache == tcache_get(tsd));
1927b7eaed25SJason Evans } else {
1928b7eaed25SJason Evans tcache = tcache_get(tsd);
1929b7eaed25SJason Evans }
1930b7eaed25SJason Evans } else if (dopts->tcache_ind == TCACHE_IND_NONE) {
1931b7eaed25SJason Evans tcache = NULL;
1932b7eaed25SJason Evans } else {
1933b7eaed25SJason Evans tcache = tcaches_get(tsd, dopts->tcache_ind);
1934d0e79aa3SJason Evans }
1935d0e79aa3SJason Evans
1936b7eaed25SJason Evans /* Fill in the arena. */
1937b7eaed25SJason Evans if (dopts->arena_ind == ARENA_IND_AUTOMATIC) {
1938b7eaed25SJason Evans /*
1939b7eaed25SJason Evans * In case of automatic arena management, we defer arena
1940b7eaed25SJason Evans * computation until as late as we can, hoping to fill the
1941b7eaed25SJason Evans * allocation out of the tcache.
1942b7eaed25SJason Evans */
1943b7eaed25SJason Evans arena = NULL;
1944b7eaed25SJason Evans } else {
1945b7eaed25SJason Evans arena = arena_get(tsd_tsdn(tsd), dopts->arena_ind, true);
1946df0d881dSJason Evans }
1947df0d881dSJason Evans
1948b7eaed25SJason Evans if (unlikely(dopts->alignment != 0)) {
1949b7eaed25SJason Evans return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment,
1950b7eaed25SJason Evans dopts->zero, tcache, arena);
1951b7eaed25SJason Evans }
19521f0a49e8SJason Evans
1953b7eaed25SJason Evans return iallocztm(tsd_tsdn(tsd), size, ind, dopts->zero, tcache, false,
1954b7eaed25SJason Evans arena, sopts->slow);
1955b7eaed25SJason Evans }
19561f0a49e8SJason Evans
1957b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE void *
imalloc_sample(static_opts_t * sopts,dynamic_opts_t * dopts,tsd_t * tsd,size_t usize,szind_t ind)1958b7eaed25SJason Evans imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd,
1959b7eaed25SJason Evans size_t usize, szind_t ind) {
1960b7eaed25SJason Evans void *ret;
1961b7eaed25SJason Evans
1962b7eaed25SJason Evans /*
1963b7eaed25SJason Evans * For small allocations, sampling bumps the usize. If so, we allocate
1964b7eaed25SJason Evans * from the ind_large bucket.
1965b7eaed25SJason Evans */
1966b7eaed25SJason Evans szind_t ind_large;
1967b7eaed25SJason Evans size_t bumped_usize = usize;
1968b7eaed25SJason Evans
1969*c5ad8142SEric van Gyzen if (usize <= SC_SMALL_MAXCLASS) {
1970*c5ad8142SEric van Gyzen assert(((dopts->alignment == 0) ?
1971*c5ad8142SEric van Gyzen sz_s2u(SC_LARGE_MINCLASS) :
1972*c5ad8142SEric van Gyzen sz_sa2u(SC_LARGE_MINCLASS, dopts->alignment))
1973*c5ad8142SEric van Gyzen == SC_LARGE_MINCLASS);
1974*c5ad8142SEric van Gyzen ind_large = sz_size2index(SC_LARGE_MINCLASS);
1975*c5ad8142SEric van Gyzen bumped_usize = sz_s2u(SC_LARGE_MINCLASS);
1976b7eaed25SJason Evans ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize,
1977b7eaed25SJason Evans bumped_usize, ind_large);
1978df0d881dSJason Evans if (unlikely(ret == NULL)) {
1979b7eaed25SJason Evans return NULL;
1980b7eaed25SJason Evans }
1981b7eaed25SJason Evans arena_prof_promote(tsd_tsdn(tsd), ret, usize);
1982b7eaed25SJason Evans } else {
1983b7eaed25SJason Evans ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind);
1984b7eaed25SJason Evans }
1985b7eaed25SJason Evans
1986b7eaed25SJason Evans return ret;
1987b7eaed25SJason Evans }
1988b7eaed25SJason Evans
1989b7eaed25SJason Evans /*
1990b7eaed25SJason Evans * Returns true if the allocation will overflow, and false otherwise. Sets
1991b7eaed25SJason Evans * *size to the product either way.
1992b7eaed25SJason Evans */
1993b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE bool
compute_size_with_overflow(bool may_overflow,dynamic_opts_t * dopts,size_t * size)1994b7eaed25SJason Evans compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts,
1995b7eaed25SJason Evans size_t *size) {
1996b7eaed25SJason Evans /*
1997b7eaed25SJason Evans * This function is just num_items * item_size, except that we may have
1998b7eaed25SJason Evans * to check for overflow.
1999b7eaed25SJason Evans */
2000b7eaed25SJason Evans
2001b7eaed25SJason Evans if (!may_overflow) {
2002b7eaed25SJason Evans assert(dopts->num_items == 1);
2003b7eaed25SJason Evans *size = dopts->item_size;
2004b7eaed25SJason Evans return false;
2005b7eaed25SJason Evans }
2006b7eaed25SJason Evans
2007b7eaed25SJason Evans /* A size_t with its high-half bits all set to 1. */
20080ef50b4eSJason Evans static const size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2);
2009b7eaed25SJason Evans
2010b7eaed25SJason Evans *size = dopts->item_size * dopts->num_items;
2011b7eaed25SJason Evans
2012b7eaed25SJason Evans if (unlikely(*size == 0)) {
2013b7eaed25SJason Evans return (dopts->num_items != 0 && dopts->item_size != 0);
2014b7eaed25SJason Evans }
2015b7eaed25SJason Evans
2016b7eaed25SJason Evans /*
2017b7eaed25SJason Evans * We got a non-zero size, but we don't know if we overflowed to get
2018b7eaed25SJason Evans * there. To avoid having to do a divide, we'll be clever and note that
2019b7eaed25SJason Evans * if both A and B can be represented in N/2 bits, then their product
2020b7eaed25SJason Evans * can be represented in N bits (without the possibility of overflow).
2021b7eaed25SJason Evans */
2022b7eaed25SJason Evans if (likely((high_bits & (dopts->num_items | dopts->item_size)) == 0)) {
2023b7eaed25SJason Evans return false;
2024b7eaed25SJason Evans }
2025b7eaed25SJason Evans if (likely(*size / dopts->item_size == dopts->num_items)) {
2026b7eaed25SJason Evans return false;
2027b7eaed25SJason Evans }
2028b7eaed25SJason Evans return true;
2029b7eaed25SJason Evans }
2030b7eaed25SJason Evans
2031b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE int
imalloc_body(static_opts_t * sopts,dynamic_opts_t * dopts,tsd_t * tsd)2032b7eaed25SJason Evans imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) {
2033b7eaed25SJason Evans /* Where the actual allocated memory will live. */
2034b7eaed25SJason Evans void *allocation = NULL;
2035b7eaed25SJason Evans /* Filled in by compute_size_with_overflow below. */
2036b7eaed25SJason Evans size_t size = 0;
2037b7eaed25SJason Evans /*
2038b7eaed25SJason Evans * For unaligned allocations, we need only ind. For aligned
2039b7eaed25SJason Evans * allocations, or in case of stats or profiling we need usize.
2040b7eaed25SJason Evans *
2041b7eaed25SJason Evans * These are actually dead stores, in that their values are reset before
2042b7eaed25SJason Evans * any branch on their value is taken. Sometimes though, it's
2043b7eaed25SJason Evans * convenient to pass them as arguments before this point. To avoid
2044b7eaed25SJason Evans * undefined behavior then, we initialize them with dummy stores.
2045b7eaed25SJason Evans */
2046b7eaed25SJason Evans szind_t ind = 0;
2047b7eaed25SJason Evans size_t usize = 0;
2048b7eaed25SJason Evans
2049b7eaed25SJason Evans /* Reentrancy is only checked on slow path. */
2050b7eaed25SJason Evans int8_t reentrancy_level;
2051b7eaed25SJason Evans
2052b7eaed25SJason Evans /* Compute the amount of memory the user wants. */
2053b7eaed25SJason Evans if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts,
2054b7eaed25SJason Evans &size))) {
2055b7eaed25SJason Evans goto label_oom;
2056b7eaed25SJason Evans }
2057b7eaed25SJason Evans
2058b7eaed25SJason Evans if (unlikely(dopts->alignment < sopts->min_alignment
2059b7eaed25SJason Evans || (dopts->alignment & (dopts->alignment - 1)) != 0)) {
2060b7eaed25SJason Evans goto label_invalid_alignment;
2061b7eaed25SJason Evans }
2062b7eaed25SJason Evans
2063b7eaed25SJason Evans /* This is the beginning of the "core" algorithm. */
2064b7eaed25SJason Evans
2065b7eaed25SJason Evans if (dopts->alignment == 0) {
2066b7eaed25SJason Evans ind = sz_size2index(size);
2067*c5ad8142SEric van Gyzen if (unlikely(ind >= SC_NSIZES)) {
2068b7eaed25SJason Evans goto label_oom;
2069b7eaed25SJason Evans }
2070*c5ad8142SEric van Gyzen if (config_stats || (config_prof && opt_prof) || sopts->usize) {
2071b7eaed25SJason Evans usize = sz_index2size(ind);
2072*c5ad8142SEric van Gyzen dopts->usize = usize;
2073*c5ad8142SEric van Gyzen assert(usize > 0 && usize
2074*c5ad8142SEric van Gyzen <= SC_LARGE_MAXCLASS);
2075b7eaed25SJason Evans }
2076b7eaed25SJason Evans } else {
2077*c5ad8142SEric van Gyzen if (sopts->bump_empty_aligned_alloc) {
2078*c5ad8142SEric van Gyzen if (unlikely(size == 0)) {
2079*c5ad8142SEric van Gyzen size = 1;
2080*c5ad8142SEric van Gyzen }
2081*c5ad8142SEric van Gyzen }
2082b7eaed25SJason Evans usize = sz_sa2u(size, dopts->alignment);
2083*c5ad8142SEric van Gyzen dopts->usize = usize;
2084*c5ad8142SEric van Gyzen if (unlikely(usize == 0
2085*c5ad8142SEric van Gyzen || usize > SC_LARGE_MAXCLASS)) {
2086b7eaed25SJason Evans goto label_oom;
2087b7eaed25SJason Evans }
2088b7eaed25SJason Evans }
2089*c5ad8142SEric van Gyzen /* Validate the user input. */
2090*c5ad8142SEric van Gyzen if (sopts->assert_nonempty_alloc) {
2091*c5ad8142SEric van Gyzen assert (size != 0);
2092*c5ad8142SEric van Gyzen }
2093b7eaed25SJason Evans
2094b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd));
2095b7eaed25SJason Evans
2096b7eaed25SJason Evans /*
2097b7eaed25SJason Evans * If we need to handle reentrancy, we can do it out of a
2098b7eaed25SJason Evans * known-initialized arena (i.e. arena 0).
2099b7eaed25SJason Evans */
2100b7eaed25SJason Evans reentrancy_level = tsd_reentrancy_level_get(tsd);
2101b7eaed25SJason Evans if (sopts->slow && unlikely(reentrancy_level > 0)) {
2102b7eaed25SJason Evans /*
2103b7eaed25SJason Evans * We should never specify particular arenas or tcaches from
2104b7eaed25SJason Evans * within our internal allocations.
2105b7eaed25SJason Evans */
2106b7eaed25SJason Evans assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC ||
2107b7eaed25SJason Evans dopts->tcache_ind == TCACHE_IND_NONE);
21088b2f5aafSJason Evans assert(dopts->arena_ind == ARENA_IND_AUTOMATIC);
2109b7eaed25SJason Evans dopts->tcache_ind = TCACHE_IND_NONE;
2110b7eaed25SJason Evans /* We know that arena 0 has already been initialized. */
2111b7eaed25SJason Evans dopts->arena_ind = 0;
2112b7eaed25SJason Evans }
2113b7eaed25SJason Evans
2114b7eaed25SJason Evans /* If profiling is on, get our profiling context. */
2115b7eaed25SJason Evans if (config_prof && opt_prof) {
2116b7eaed25SJason Evans /*
2117b7eaed25SJason Evans * Note that if we're going down this path, usize must have been
2118b7eaed25SJason Evans * initialized in the previous if statement.
2119b7eaed25SJason Evans */
2120b7eaed25SJason Evans prof_tctx_t *tctx = prof_alloc_prep(
2121b7eaed25SJason Evans tsd, usize, prof_active_get_unlocked(), true);
2122b7eaed25SJason Evans
2123b7eaed25SJason Evans alloc_ctx_t alloc_ctx;
2124b7eaed25SJason Evans if (likely((uintptr_t)tctx == (uintptr_t)1U)) {
2125*c5ad8142SEric van Gyzen alloc_ctx.slab = (usize
2126*c5ad8142SEric van Gyzen <= SC_SMALL_MAXCLASS);
2127b7eaed25SJason Evans allocation = imalloc_no_sample(
2128b7eaed25SJason Evans sopts, dopts, tsd, usize, usize, ind);
2129b7eaed25SJason Evans } else if ((uintptr_t)tctx > (uintptr_t)1U) {
2130b7eaed25SJason Evans /*
2131b7eaed25SJason Evans * Note that ind might still be 0 here. This is fine;
2132b7eaed25SJason Evans * imalloc_sample ignores ind if dopts->alignment > 0.
2133b7eaed25SJason Evans */
2134b7eaed25SJason Evans allocation = imalloc_sample(
2135b7eaed25SJason Evans sopts, dopts, tsd, usize, ind);
2136b7eaed25SJason Evans alloc_ctx.slab = false;
2137b7eaed25SJason Evans } else {
2138b7eaed25SJason Evans allocation = NULL;
2139b7eaed25SJason Evans }
2140b7eaed25SJason Evans
2141b7eaed25SJason Evans if (unlikely(allocation == NULL)) {
2142b7eaed25SJason Evans prof_alloc_rollback(tsd, tctx, true);
2143b7eaed25SJason Evans goto label_oom;
2144b7eaed25SJason Evans }
2145b7eaed25SJason Evans prof_malloc(tsd_tsdn(tsd), allocation, usize, &alloc_ctx, tctx);
2146b7eaed25SJason Evans } else {
2147b7eaed25SJason Evans /*
2148b7eaed25SJason Evans * If dopts->alignment > 0, then ind is still 0, but usize was
2149b7eaed25SJason Evans * computed in the previous if statement. Down the positive
2150b7eaed25SJason Evans * alignment path, imalloc_no_sample ignores ind and size
2151b7eaed25SJason Evans * (relying only on usize).
2152b7eaed25SJason Evans */
2153b7eaed25SJason Evans allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize,
2154b7eaed25SJason Evans ind);
2155b7eaed25SJason Evans if (unlikely(allocation == NULL)) {
2156b7eaed25SJason Evans goto label_oom;
2157b7eaed25SJason Evans }
2158b7eaed25SJason Evans }
2159b7eaed25SJason Evans
2160b7eaed25SJason Evans /*
2161b7eaed25SJason Evans * Allocation has been done at this point. We still have some
2162b7eaed25SJason Evans * post-allocation work to do though.
2163b7eaed25SJason Evans */
2164b7eaed25SJason Evans assert(dopts->alignment == 0
2165b7eaed25SJason Evans || ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0));
2166b7eaed25SJason Evans
2167b7eaed25SJason Evans if (config_stats) {
2168b7eaed25SJason Evans assert(usize == isalloc(tsd_tsdn(tsd), allocation));
2169b7eaed25SJason Evans *tsd_thread_allocatedp_get(tsd) += usize;
2170b7eaed25SJason Evans }
2171b7eaed25SJason Evans
2172b7eaed25SJason Evans if (sopts->slow) {
2173b7eaed25SJason Evans UTRACE(0, size, allocation);
2174b7eaed25SJason Evans }
2175b7eaed25SJason Evans
2176b7eaed25SJason Evans /* Success! */
2177b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd));
2178b7eaed25SJason Evans *dopts->result = allocation;
2179b7eaed25SJason Evans return 0;
2180b7eaed25SJason Evans
2181b7eaed25SJason Evans label_oom:
2182b7eaed25SJason Evans if (unlikely(sopts->slow) && config_xmalloc && unlikely(opt_xmalloc)) {
2183b7eaed25SJason Evans malloc_write(sopts->oom_string);
2184df0d881dSJason Evans abort();
2185df0d881dSJason Evans }
2186b7eaed25SJason Evans
2187b7eaed25SJason Evans if (sopts->slow) {
2188b7eaed25SJason Evans UTRACE(NULL, size, NULL);
2189b7eaed25SJason Evans }
2190b7eaed25SJason Evans
2191b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd));
2192b7eaed25SJason Evans
2193b7eaed25SJason Evans if (sopts->set_errno_on_error) {
2194df0d881dSJason Evans set_errno(ENOMEM);
2195df0d881dSJason Evans }
2196b7eaed25SJason Evans
2197b7eaed25SJason Evans if (sopts->null_out_result_on_error) {
2198b7eaed25SJason Evans *dopts->result = NULL;
2199df0d881dSJason Evans }
2200b7eaed25SJason Evans
2201b7eaed25SJason Evans return ENOMEM;
2202b7eaed25SJason Evans
2203b7eaed25SJason Evans /*
2204b7eaed25SJason Evans * This label is only jumped to by one goto; we move it out of line
2205b7eaed25SJason Evans * anyways to avoid obscuring the non-error paths, and for symmetry with
2206b7eaed25SJason Evans * the oom case.
2207b7eaed25SJason Evans */
2208b7eaed25SJason Evans label_invalid_alignment:
2209b7eaed25SJason Evans if (config_xmalloc && unlikely(opt_xmalloc)) {
2210b7eaed25SJason Evans malloc_write(sopts->invalid_alignment_string);
2211b7eaed25SJason Evans abort();
2212d0e79aa3SJason Evans }
2213d0e79aa3SJason Evans
2214b7eaed25SJason Evans if (sopts->set_errno_on_error) {
2215b7eaed25SJason Evans set_errno(EINVAL);
2216b7eaed25SJason Evans }
2217b7eaed25SJason Evans
2218b7eaed25SJason Evans if (sopts->slow) {
2219b7eaed25SJason Evans UTRACE(NULL, size, NULL);
2220b7eaed25SJason Evans }
2221b7eaed25SJason Evans
2222b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd));
2223b7eaed25SJason Evans
2224b7eaed25SJason Evans if (sopts->null_out_result_on_error) {
2225b7eaed25SJason Evans *dopts->result = NULL;
2226b7eaed25SJason Evans }
2227b7eaed25SJason Evans
2228b7eaed25SJason Evans return EINVAL;
2229b7eaed25SJason Evans }
2230b7eaed25SJason Evans
2231*c5ad8142SEric van Gyzen JEMALLOC_ALWAYS_INLINE bool
imalloc_init_check(static_opts_t * sopts,dynamic_opts_t * dopts)2232*c5ad8142SEric van Gyzen imalloc_init_check(static_opts_t *sopts, dynamic_opts_t *dopts) {
2233b7eaed25SJason Evans if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) {
2234b7eaed25SJason Evans if (config_xmalloc && unlikely(opt_xmalloc)) {
2235b7eaed25SJason Evans malloc_write(sopts->oom_string);
2236b7eaed25SJason Evans abort();
2237b7eaed25SJason Evans }
2238b7eaed25SJason Evans UTRACE(NULL, dopts->num_items * dopts->item_size, NULL);
2239b7eaed25SJason Evans set_errno(ENOMEM);
2240b7eaed25SJason Evans *dopts->result = NULL;
2241b7eaed25SJason Evans
2242*c5ad8142SEric van Gyzen return false;
2243*c5ad8142SEric van Gyzen }
2244*c5ad8142SEric van Gyzen
2245*c5ad8142SEric van Gyzen return true;
2246*c5ad8142SEric van Gyzen }
2247*c5ad8142SEric van Gyzen
2248*c5ad8142SEric van Gyzen /* Returns the errno-style error code of the allocation. */
2249*c5ad8142SEric van Gyzen JEMALLOC_ALWAYS_INLINE int
imalloc(static_opts_t * sopts,dynamic_opts_t * dopts)2250*c5ad8142SEric van Gyzen imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) {
2251*c5ad8142SEric van Gyzen if (tsd_get_allocates() && !imalloc_init_check(sopts, dopts)) {
2252b7eaed25SJason Evans return ENOMEM;
2253b7eaed25SJason Evans }
2254b7eaed25SJason Evans
2255b7eaed25SJason Evans /* We always need the tsd. Let's grab it right away. */
2256b7eaed25SJason Evans tsd_t *tsd = tsd_fetch();
2257b7eaed25SJason Evans assert(tsd);
2258b7eaed25SJason Evans if (likely(tsd_fast(tsd))) {
2259b7eaed25SJason Evans /* Fast and common path. */
2260b7eaed25SJason Evans tsd_assert_fast(tsd);
2261b7eaed25SJason Evans sopts->slow = false;
2262b7eaed25SJason Evans return imalloc_body(sopts, dopts, tsd);
2263b7eaed25SJason Evans } else {
2264*c5ad8142SEric van Gyzen if (!tsd_get_allocates() && !imalloc_init_check(sopts, dopts)) {
2265*c5ad8142SEric van Gyzen return ENOMEM;
2266*c5ad8142SEric van Gyzen }
2267*c5ad8142SEric van Gyzen
2268b7eaed25SJason Evans sopts->slow = true;
2269b7eaed25SJason Evans return imalloc_body(sopts, dopts, tsd);
2270b7eaed25SJason Evans }
2271b7eaed25SJason Evans }
2272b7eaed25SJason Evans
2273*c5ad8142SEric van Gyzen JEMALLOC_NOINLINE
2274*c5ad8142SEric van Gyzen void *
malloc_default(size_t size)2275*c5ad8142SEric van Gyzen malloc_default(size_t size) {
2276a4bd5210SJason Evans void *ret;
2277b7eaed25SJason Evans static_opts_t sopts;
2278b7eaed25SJason Evans dynamic_opts_t dopts;
2279a4bd5210SJason Evans
22800ef50b4eSJason Evans LOG("core.malloc.entry", "size: %zu", size);
22810ef50b4eSJason Evans
2282b7eaed25SJason Evans static_opts_init(&sopts);
2283b7eaed25SJason Evans dynamic_opts_init(&dopts);
2284a4bd5210SJason Evans
2285b7eaed25SJason Evans sopts.null_out_result_on_error = true;
2286b7eaed25SJason Evans sopts.set_errno_on_error = true;
2287b7eaed25SJason Evans sopts.oom_string = "<jemalloc>: Error in malloc(): out of memory\n";
2288df0d881dSJason Evans
2289b7eaed25SJason Evans dopts.result = &ret;
2290b7eaed25SJason Evans dopts.num_items = 1;
2291b7eaed25SJason Evans dopts.item_size = size;
2292a4bd5210SJason Evans
2293b7eaed25SJason Evans imalloc(&sopts, &dopts);
2294*c5ad8142SEric van Gyzen /*
2295*c5ad8142SEric van Gyzen * Note that this branch gets optimized away -- it immediately follows
2296*c5ad8142SEric van Gyzen * the check on tsd_fast that sets sopts.slow.
2297*c5ad8142SEric van Gyzen */
2298*c5ad8142SEric van Gyzen if (sopts.slow) {
2299*c5ad8142SEric van Gyzen uintptr_t args[3] = {size};
2300*c5ad8142SEric van Gyzen hook_invoke_alloc(hook_alloc_malloc, ret, (uintptr_t)ret, args);
2301*c5ad8142SEric van Gyzen }
2302f921d10fSJason Evans
23030ef50b4eSJason Evans LOG("core.malloc.exit", "result: %p", ret);
23040ef50b4eSJason Evans
2305b7eaed25SJason Evans return ret;
2306a4bd5210SJason Evans }
2307a4bd5210SJason Evans
2308*c5ad8142SEric van Gyzen /******************************************************************************/
2309*c5ad8142SEric van Gyzen /*
2310*c5ad8142SEric van Gyzen * Begin malloc(3)-compatible functions.
2311*c5ad8142SEric van Gyzen */
2312*c5ad8142SEric van Gyzen
2313*c5ad8142SEric van Gyzen /*
2314*c5ad8142SEric van Gyzen * malloc() fastpath.
2315*c5ad8142SEric van Gyzen *
2316*c5ad8142SEric van Gyzen * Fastpath assumes size <= SC_LOOKUP_MAXCLASS, and that we hit
2317*c5ad8142SEric van Gyzen * tcache. If either of these is false, we tail-call to the slowpath,
2318*c5ad8142SEric van Gyzen * malloc_default(). Tail-calling is used to avoid any caller-saved
2319*c5ad8142SEric van Gyzen * registers.
2320*c5ad8142SEric van Gyzen *
2321*c5ad8142SEric van Gyzen * fastpath supports ticker and profiling, both of which will also
2322*c5ad8142SEric van Gyzen * tail-call to the slowpath if they fire.
2323*c5ad8142SEric van Gyzen */
2324*c5ad8142SEric van Gyzen JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2325*c5ad8142SEric van Gyzen void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc)2326*c5ad8142SEric van Gyzen JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
2327*c5ad8142SEric van Gyzen je_malloc(size_t size) {
2328*c5ad8142SEric van Gyzen LOG("core.malloc.entry", "size: %zu", size);
2329*c5ad8142SEric van Gyzen
2330*c5ad8142SEric van Gyzen if (tsd_get_allocates() && unlikely(!malloc_initialized())) {
2331*c5ad8142SEric van Gyzen return malloc_default(size);
2332*c5ad8142SEric van Gyzen }
2333*c5ad8142SEric van Gyzen
2334*c5ad8142SEric van Gyzen tsd_t *tsd = tsd_get(false);
2335*c5ad8142SEric van Gyzen if (unlikely(!tsd || !tsd_fast(tsd) || (size > SC_LOOKUP_MAXCLASS))) {
2336*c5ad8142SEric van Gyzen return malloc_default(size);
2337*c5ad8142SEric van Gyzen }
2338*c5ad8142SEric van Gyzen
2339*c5ad8142SEric van Gyzen tcache_t *tcache = tsd_tcachep_get(tsd);
2340*c5ad8142SEric van Gyzen
2341*c5ad8142SEric van Gyzen if (unlikely(ticker_trytick(&tcache->gc_ticker))) {
2342*c5ad8142SEric van Gyzen return malloc_default(size);
2343*c5ad8142SEric van Gyzen }
2344*c5ad8142SEric van Gyzen
2345*c5ad8142SEric van Gyzen szind_t ind = sz_size2index_lookup(size);
2346*c5ad8142SEric van Gyzen size_t usize;
2347*c5ad8142SEric van Gyzen if (config_stats || config_prof) {
2348*c5ad8142SEric van Gyzen usize = sz_index2size(ind);
2349*c5ad8142SEric van Gyzen }
2350*c5ad8142SEric van Gyzen /* Fast path relies on size being a bin. I.e. SC_LOOKUP_MAXCLASS < SC_SMALL_MAXCLASS */
2351*c5ad8142SEric van Gyzen assert(ind < SC_NBINS);
2352*c5ad8142SEric van Gyzen assert(size <= SC_SMALL_MAXCLASS);
2353*c5ad8142SEric van Gyzen
2354*c5ad8142SEric van Gyzen if (config_prof) {
2355*c5ad8142SEric van Gyzen int64_t bytes_until_sample = tsd_bytes_until_sample_get(tsd);
2356*c5ad8142SEric van Gyzen bytes_until_sample -= usize;
2357*c5ad8142SEric van Gyzen tsd_bytes_until_sample_set(tsd, bytes_until_sample);
2358*c5ad8142SEric van Gyzen
2359*c5ad8142SEric van Gyzen if (unlikely(bytes_until_sample < 0)) {
2360*c5ad8142SEric van Gyzen /*
2361*c5ad8142SEric van Gyzen * Avoid a prof_active check on the fastpath.
2362*c5ad8142SEric van Gyzen * If prof_active is false, set bytes_until_sample to
2363*c5ad8142SEric van Gyzen * a large value. If prof_active is set to true,
2364*c5ad8142SEric van Gyzen * bytes_until_sample will be reset.
2365*c5ad8142SEric van Gyzen */
2366*c5ad8142SEric van Gyzen if (!prof_active) {
2367*c5ad8142SEric van Gyzen tsd_bytes_until_sample_set(tsd, SSIZE_MAX);
2368*c5ad8142SEric van Gyzen }
2369*c5ad8142SEric van Gyzen return malloc_default(size);
2370*c5ad8142SEric van Gyzen }
2371*c5ad8142SEric van Gyzen }
2372*c5ad8142SEric van Gyzen
2373*c5ad8142SEric van Gyzen cache_bin_t *bin = tcache_small_bin_get(tcache, ind);
2374*c5ad8142SEric van Gyzen bool tcache_success;
2375*c5ad8142SEric van Gyzen void* ret = cache_bin_alloc_easy(bin, &tcache_success);
2376*c5ad8142SEric van Gyzen
2377*c5ad8142SEric van Gyzen if (tcache_success) {
2378*c5ad8142SEric van Gyzen if (config_stats) {
2379*c5ad8142SEric van Gyzen *tsd_thread_allocatedp_get(tsd) += usize;
2380*c5ad8142SEric van Gyzen bin->tstats.nrequests++;
2381*c5ad8142SEric van Gyzen }
2382*c5ad8142SEric van Gyzen if (config_prof) {
2383*c5ad8142SEric van Gyzen tcache->prof_accumbytes += usize;
2384*c5ad8142SEric van Gyzen }
2385*c5ad8142SEric van Gyzen
2386*c5ad8142SEric van Gyzen LOG("core.malloc.exit", "result: %p", ret);
2387*c5ad8142SEric van Gyzen
2388*c5ad8142SEric van Gyzen /* Fastpath success */
2389*c5ad8142SEric van Gyzen return ret;
2390*c5ad8142SEric van Gyzen }
2391*c5ad8142SEric van Gyzen
2392*c5ad8142SEric van Gyzen return malloc_default(size);
2393*c5ad8142SEric van Gyzen }
2394*c5ad8142SEric van Gyzen
2395d0e79aa3SJason Evans JEMALLOC_EXPORT int JEMALLOC_NOTHROW
2396d0e79aa3SJason Evans JEMALLOC_ATTR(nonnull(1))
je_posix_memalign(void ** memptr,size_t alignment,size_t size)2397b7eaed25SJason Evans je_posix_memalign(void **memptr, size_t alignment, size_t size) {
23981f0a49e8SJason Evans int ret;
2399b7eaed25SJason Evans static_opts_t sopts;
2400b7eaed25SJason Evans dynamic_opts_t dopts;
24011f0a49e8SJason Evans
24020ef50b4eSJason Evans LOG("core.posix_memalign.entry", "mem ptr: %p, alignment: %zu, "
24030ef50b4eSJason Evans "size: %zu", memptr, alignment, size);
24040ef50b4eSJason Evans
2405b7eaed25SJason Evans static_opts_init(&sopts);
2406b7eaed25SJason Evans dynamic_opts_init(&dopts);
24071f0a49e8SJason Evans
2408*c5ad8142SEric van Gyzen sopts.bump_empty_aligned_alloc = true;
2409b7eaed25SJason Evans sopts.min_alignment = sizeof(void *);
2410b7eaed25SJason Evans sopts.oom_string =
2411b7eaed25SJason Evans "<jemalloc>: Error allocating aligned memory: out of memory\n";
2412b7eaed25SJason Evans sopts.invalid_alignment_string =
2413b7eaed25SJason Evans "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2414b7eaed25SJason Evans
2415b7eaed25SJason Evans dopts.result = memptr;
2416b7eaed25SJason Evans dopts.num_items = 1;
2417b7eaed25SJason Evans dopts.item_size = size;
2418b7eaed25SJason Evans dopts.alignment = alignment;
2419b7eaed25SJason Evans
2420b7eaed25SJason Evans ret = imalloc(&sopts, &dopts);
2421*c5ad8142SEric van Gyzen if (sopts.slow) {
2422*c5ad8142SEric van Gyzen uintptr_t args[3] = {(uintptr_t)memptr, (uintptr_t)alignment,
2423*c5ad8142SEric van Gyzen (uintptr_t)size};
2424*c5ad8142SEric van Gyzen hook_invoke_alloc(hook_alloc_posix_memalign, *memptr,
2425*c5ad8142SEric van Gyzen (uintptr_t)ret, args);
2426*c5ad8142SEric van Gyzen }
24270ef50b4eSJason Evans
24280ef50b4eSJason Evans LOG("core.posix_memalign.exit", "result: %d, alloc ptr: %p", ret,
24290ef50b4eSJason Evans *memptr);
24300ef50b4eSJason Evans
2431b7eaed25SJason Evans return ret;
2432a4bd5210SJason Evans }
2433a4bd5210SJason Evans
2434d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2435d0e79aa3SJason Evans void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc)2436d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2)
2437b7eaed25SJason Evans je_aligned_alloc(size_t alignment, size_t size) {
2438a4bd5210SJason Evans void *ret;
2439a4bd5210SJason Evans
2440b7eaed25SJason Evans static_opts_t sopts;
2441b7eaed25SJason Evans dynamic_opts_t dopts;
24421f0a49e8SJason Evans
24430ef50b4eSJason Evans LOG("core.aligned_alloc.entry", "alignment: %zu, size: %zu\n",
24440ef50b4eSJason Evans alignment, size);
24450ef50b4eSJason Evans
2446b7eaed25SJason Evans static_opts_init(&sopts);
2447b7eaed25SJason Evans dynamic_opts_init(&dopts);
2448b7eaed25SJason Evans
2449*c5ad8142SEric van Gyzen sopts.bump_empty_aligned_alloc = true;
2450b7eaed25SJason Evans sopts.null_out_result_on_error = true;
2451b7eaed25SJason Evans sopts.set_errno_on_error = true;
2452b7eaed25SJason Evans sopts.min_alignment = 1;
2453b7eaed25SJason Evans sopts.oom_string =
2454b7eaed25SJason Evans "<jemalloc>: Error allocating aligned memory: out of memory\n";
2455b7eaed25SJason Evans sopts.invalid_alignment_string =
2456b7eaed25SJason Evans "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2457b7eaed25SJason Evans
2458b7eaed25SJason Evans dopts.result = &ret;
2459b7eaed25SJason Evans dopts.num_items = 1;
2460b7eaed25SJason Evans dopts.item_size = size;
2461b7eaed25SJason Evans dopts.alignment = alignment;
2462b7eaed25SJason Evans
2463b7eaed25SJason Evans imalloc(&sopts, &dopts);
2464*c5ad8142SEric van Gyzen if (sopts.slow) {
2465*c5ad8142SEric van Gyzen uintptr_t args[3] = {(uintptr_t)alignment, (uintptr_t)size};
2466*c5ad8142SEric van Gyzen hook_invoke_alloc(hook_alloc_aligned_alloc, ret,
2467*c5ad8142SEric van Gyzen (uintptr_t)ret, args);
2468*c5ad8142SEric van Gyzen }
24690ef50b4eSJason Evans
24700ef50b4eSJason Evans LOG("core.aligned_alloc.exit", "result: %p", ret);
24710ef50b4eSJason Evans
2472b7eaed25SJason Evans return ret;
2473a4bd5210SJason Evans }
2474a4bd5210SJason Evans
2475d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2476d0e79aa3SJason Evans void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc)2477d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2)
2478b7eaed25SJason Evans je_calloc(size_t num, size_t size) {
2479a4bd5210SJason Evans void *ret;
2480b7eaed25SJason Evans static_opts_t sopts;
2481b7eaed25SJason Evans dynamic_opts_t dopts;
2482a4bd5210SJason Evans
24830ef50b4eSJason Evans LOG("core.calloc.entry", "num: %zu, size: %zu\n", num, size);
24840ef50b4eSJason Evans
2485b7eaed25SJason Evans static_opts_init(&sopts);
2486b7eaed25SJason Evans dynamic_opts_init(&dopts);
2487a4bd5210SJason Evans
2488b7eaed25SJason Evans sopts.may_overflow = true;
2489b7eaed25SJason Evans sopts.null_out_result_on_error = true;
2490b7eaed25SJason Evans sopts.set_errno_on_error = true;
2491b7eaed25SJason Evans sopts.oom_string = "<jemalloc>: Error in calloc(): out of memory\n";
2492a4bd5210SJason Evans
2493b7eaed25SJason Evans dopts.result = &ret;
2494b7eaed25SJason Evans dopts.num_items = num;
2495b7eaed25SJason Evans dopts.item_size = size;
2496b7eaed25SJason Evans dopts.zero = true;
2497b7eaed25SJason Evans
2498b7eaed25SJason Evans imalloc(&sopts, &dopts);
2499*c5ad8142SEric van Gyzen if (sopts.slow) {
2500*c5ad8142SEric van Gyzen uintptr_t args[3] = {(uintptr_t)num, (uintptr_t)size};
2501*c5ad8142SEric van Gyzen hook_invoke_alloc(hook_alloc_calloc, ret, (uintptr_t)ret, args);
2502*c5ad8142SEric van Gyzen }
2503b7eaed25SJason Evans
25040ef50b4eSJason Evans LOG("core.calloc.exit", "result: %p", ret);
25050ef50b4eSJason Evans
2506b7eaed25SJason Evans return ret;
2507a4bd5210SJason Evans }
2508a4bd5210SJason Evans
2509f921d10fSJason Evans static void *
irealloc_prof_sample(tsd_t * tsd,void * old_ptr,size_t old_usize,size_t usize,prof_tctx_t * tctx,hook_ralloc_args_t * hook_args)2510536b3538SJason Evans irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
2511*c5ad8142SEric van Gyzen prof_tctx_t *tctx, hook_ralloc_args_t *hook_args) {
2512f921d10fSJason Evans void *p;
2513a4bd5210SJason Evans
2514b7eaed25SJason Evans if (tctx == NULL) {
2515b7eaed25SJason Evans return NULL;
2516b7eaed25SJason Evans }
2517*c5ad8142SEric van Gyzen if (usize <= SC_SMALL_MAXCLASS) {
2518*c5ad8142SEric van Gyzen p = iralloc(tsd, old_ptr, old_usize,
2519*c5ad8142SEric van Gyzen SC_LARGE_MINCLASS, 0, false, hook_args);
2520b7eaed25SJason Evans if (p == NULL) {
2521b7eaed25SJason Evans return NULL;
2522b7eaed25SJason Evans }
2523b7eaed25SJason Evans arena_prof_promote(tsd_tsdn(tsd), p, usize);
2524b7eaed25SJason Evans } else {
2525*c5ad8142SEric van Gyzen p = iralloc(tsd, old_ptr, old_usize, usize, 0, false,
2526*c5ad8142SEric van Gyzen hook_args);
2527a4bd5210SJason Evans }
2528a4bd5210SJason Evans
2529b7eaed25SJason Evans return p;
2530b7eaed25SJason Evans }
2531b7eaed25SJason Evans
2532b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE void *
irealloc_prof(tsd_t * tsd,void * old_ptr,size_t old_usize,size_t usize,alloc_ctx_t * alloc_ctx,hook_ralloc_args_t * hook_args)2533b7eaed25SJason Evans irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize,
2534*c5ad8142SEric van Gyzen alloc_ctx_t *alloc_ctx, hook_ralloc_args_t *hook_args) {
2535f921d10fSJason Evans void *p;
2536536b3538SJason Evans bool prof_active;
2537d0e79aa3SJason Evans prof_tctx_t *old_tctx, *tctx;
2538a4bd5210SJason Evans
2539536b3538SJason Evans prof_active = prof_active_get_unlocked();
2540b7eaed25SJason Evans old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
2541536b3538SJason Evans tctx = prof_alloc_prep(tsd, usize, prof_active, true);
2542b7eaed25SJason Evans if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
2543*c5ad8142SEric van Gyzen p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx,
2544*c5ad8142SEric van Gyzen hook_args);
2545b7eaed25SJason Evans } else {
2546*c5ad8142SEric van Gyzen p = iralloc(tsd, old_ptr, old_usize, usize, 0, false,
2547*c5ad8142SEric van Gyzen hook_args);
2548b7eaed25SJason Evans }
2549536b3538SJason Evans if (unlikely(p == NULL)) {
2550536b3538SJason Evans prof_alloc_rollback(tsd, tctx, true);
2551b7eaed25SJason Evans return NULL;
2552536b3538SJason Evans }
2553536b3538SJason Evans prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize,
2554536b3538SJason Evans old_tctx);
2555f921d10fSJason Evans
2556b7eaed25SJason Evans return p;
2557f921d10fSJason Evans }
2558f921d10fSJason Evans
2559b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE void
ifree(tsd_t * tsd,void * ptr,tcache_t * tcache,bool slow_path)2560b7eaed25SJason Evans ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) {
2561b7eaed25SJason Evans if (!slow_path) {
2562b7eaed25SJason Evans tsd_assert_fast(tsd);
2563b7eaed25SJason Evans }
2564b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd));
2565b7eaed25SJason Evans if (tsd_reentrancy_level_get(tsd) != 0) {
2566b7eaed25SJason Evans assert(slow_path);
2567b7eaed25SJason Evans }
2568b7eaed25SJason Evans
2569b7eaed25SJason Evans assert(ptr != NULL);
2570b7eaed25SJason Evans assert(malloc_initialized() || IS_INITIALIZER);
2571b7eaed25SJason Evans
2572b7eaed25SJason Evans alloc_ctx_t alloc_ctx;
2573b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2574b7eaed25SJason Evans rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2575b7eaed25SJason Evans (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2576*c5ad8142SEric van Gyzen assert(alloc_ctx.szind != SC_NSIZES);
2577b7eaed25SJason Evans
2578a4bd5210SJason Evans size_t usize;
2579a4bd5210SJason Evans if (config_prof && opt_prof) {
2580b7eaed25SJason Evans usize = sz_index2size(alloc_ctx.szind);
2581b7eaed25SJason Evans prof_free(tsd, ptr, usize, &alloc_ctx);
2582b7eaed25SJason Evans } else if (config_stats) {
2583b7eaed25SJason Evans usize = sz_index2size(alloc_ctx.szind);
2584b7eaed25SJason Evans }
2585b7eaed25SJason Evans if (config_stats) {
2586d0e79aa3SJason Evans *tsd_thread_deallocatedp_get(tsd) += usize;
2587b7eaed25SJason Evans }
2588df0d881dSJason Evans
2589b7eaed25SJason Evans if (likely(!slow_path)) {
2590b7eaed25SJason Evans idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
2591b7eaed25SJason Evans false);
2592b7eaed25SJason Evans } else {
2593b7eaed25SJason Evans idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false,
2594b7eaed25SJason Evans true);
2595a4bd5210SJason Evans }
2596df0d881dSJason Evans }
2597f921d10fSJason Evans
2598b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE void
isfree(tsd_t * tsd,void * ptr,size_t usize,tcache_t * tcache,bool slow_path)2599b7eaed25SJason Evans isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) {
2600b7eaed25SJason Evans if (!slow_path) {
2601b7eaed25SJason Evans tsd_assert_fast(tsd);
2602b7eaed25SJason Evans }
2603b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd));
2604b7eaed25SJason Evans if (tsd_reentrancy_level_get(tsd) != 0) {
2605b7eaed25SJason Evans assert(slow_path);
2606b7eaed25SJason Evans }
26071f0a49e8SJason Evans
2608d0e79aa3SJason Evans assert(ptr != NULL);
2609d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER);
2610d0e79aa3SJason Evans
2611b7eaed25SJason Evans alloc_ctx_t alloc_ctx, *ctx;
26120ef50b4eSJason Evans if (!config_cache_oblivious && ((uintptr_t)ptr & PAGE_MASK) != 0) {
26130ef50b4eSJason Evans /*
26140ef50b4eSJason Evans * When cache_oblivious is disabled and ptr is not page aligned,
26150ef50b4eSJason Evans * the allocation was not sampled -- usize can be used to
26160ef50b4eSJason Evans * determine szind directly.
26170ef50b4eSJason Evans */
26180ef50b4eSJason Evans alloc_ctx.szind = sz_size2index(usize);
26190ef50b4eSJason Evans alloc_ctx.slab = true;
26200ef50b4eSJason Evans ctx = &alloc_ctx;
26210ef50b4eSJason Evans if (config_debug) {
26220ef50b4eSJason Evans alloc_ctx_t dbg_ctx;
26230ef50b4eSJason Evans rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
26240ef50b4eSJason Evans rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree,
26250ef50b4eSJason Evans rtree_ctx, (uintptr_t)ptr, true, &dbg_ctx.szind,
26260ef50b4eSJason Evans &dbg_ctx.slab);
26270ef50b4eSJason Evans assert(dbg_ctx.szind == alloc_ctx.szind);
26280ef50b4eSJason Evans assert(dbg_ctx.slab == alloc_ctx.slab);
26290ef50b4eSJason Evans }
26300ef50b4eSJason Evans } else if (config_prof && opt_prof) {
2631b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2632b7eaed25SJason Evans rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2633b7eaed25SJason Evans (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2634b7eaed25SJason Evans assert(alloc_ctx.szind == sz_size2index(usize));
2635b7eaed25SJason Evans ctx = &alloc_ctx;
2636b7eaed25SJason Evans } else {
2637b7eaed25SJason Evans ctx = NULL;
2638b7eaed25SJason Evans }
2639b7eaed25SJason Evans
26400ef50b4eSJason Evans if (config_prof && opt_prof) {
26410ef50b4eSJason Evans prof_free(tsd, ptr, usize, ctx);
26420ef50b4eSJason Evans }
2643b7eaed25SJason Evans if (config_stats) {
2644d0e79aa3SJason Evans *tsd_thread_deallocatedp_get(tsd) += usize;
2645b7eaed25SJason Evans }
2646b7eaed25SJason Evans
2647b7eaed25SJason Evans if (likely(!slow_path)) {
2648b7eaed25SJason Evans isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, false);
2649b7eaed25SJason Evans } else {
2650b7eaed25SJason Evans isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, true);
2651b7eaed25SJason Evans }
2652d0e79aa3SJason Evans }
2653d0e79aa3SJason Evans
2654d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2655d0e79aa3SJason Evans void JEMALLOC_NOTHROW *
2656d0e79aa3SJason Evans JEMALLOC_ALLOC_SIZE(2)
je_realloc(void * ptr,size_t arg_size)2657*c5ad8142SEric van Gyzen je_realloc(void *ptr, size_t arg_size) {
2658f921d10fSJason Evans void *ret;
26591f0a49e8SJason Evans tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL);
2660f921d10fSJason Evans size_t usize JEMALLOC_CC_SILENCE_INIT(0);
2661f921d10fSJason Evans size_t old_usize = 0;
2662*c5ad8142SEric van Gyzen size_t size = arg_size;
2663f921d10fSJason Evans
26640ef50b4eSJason Evans LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size);
26650ef50b4eSJason Evans
2666d0e79aa3SJason Evans if (unlikely(size == 0)) {
2667f921d10fSJason Evans size = 1;
2668f921d10fSJason Evans }
2669f921d10fSJason Evans
2670d0e79aa3SJason Evans if (likely(ptr != NULL)) {
2671d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER);
2672b7eaed25SJason Evans tsd_t *tsd = tsd_fetch();
2673f921d10fSJason Evans
2674b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd));
26751f0a49e8SJason Evans
2676*c5ad8142SEric van Gyzen
2677*c5ad8142SEric van Gyzen hook_ralloc_args_t hook_args = {true, {(uintptr_t)ptr,
2678*c5ad8142SEric van Gyzen (uintptr_t)arg_size, 0, 0}};
2679*c5ad8142SEric van Gyzen
2680b7eaed25SJason Evans alloc_ctx_t alloc_ctx;
2681b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2682b7eaed25SJason Evans rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
2683b7eaed25SJason Evans (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
2684*c5ad8142SEric van Gyzen assert(alloc_ctx.szind != SC_NSIZES);
2685b7eaed25SJason Evans old_usize = sz_index2size(alloc_ctx.szind);
2686b7eaed25SJason Evans assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
2687f921d10fSJason Evans if (config_prof && opt_prof) {
2688b7eaed25SJason Evans usize = sz_s2u(size);
2689*c5ad8142SEric van Gyzen if (unlikely(usize == 0
2690*c5ad8142SEric van Gyzen || usize > SC_LARGE_MAXCLASS)) {
2691*c5ad8142SEric van Gyzen ret = NULL;
2692*c5ad8142SEric van Gyzen } else {
2693*c5ad8142SEric van Gyzen ret = irealloc_prof(tsd, ptr, old_usize, usize,
2694*c5ad8142SEric van Gyzen &alloc_ctx, &hook_args);
2695*c5ad8142SEric van Gyzen }
2696f921d10fSJason Evans } else {
2697b7eaed25SJason Evans if (config_stats) {
2698b7eaed25SJason Evans usize = sz_s2u(size);
2699b7eaed25SJason Evans }
2700*c5ad8142SEric van Gyzen ret = iralloc(tsd, ptr, old_usize, size, 0, false,
2701*c5ad8142SEric van Gyzen &hook_args);
2702f921d10fSJason Evans }
27031f0a49e8SJason Evans tsdn = tsd_tsdn(tsd);
2704f921d10fSJason Evans } else {
2705f921d10fSJason Evans /* realloc(NULL, size) is equivalent to malloc(size). */
2706*c5ad8142SEric van Gyzen static_opts_t sopts;
2707*c5ad8142SEric van Gyzen dynamic_opts_t dopts;
2708*c5ad8142SEric van Gyzen
2709*c5ad8142SEric van Gyzen static_opts_init(&sopts);
2710*c5ad8142SEric van Gyzen dynamic_opts_init(&dopts);
2711*c5ad8142SEric van Gyzen
2712*c5ad8142SEric van Gyzen sopts.null_out_result_on_error = true;
2713*c5ad8142SEric van Gyzen sopts.set_errno_on_error = true;
2714*c5ad8142SEric van Gyzen sopts.oom_string =
2715*c5ad8142SEric van Gyzen "<jemalloc>: Error in realloc(): out of memory\n";
2716*c5ad8142SEric van Gyzen
2717*c5ad8142SEric van Gyzen dopts.result = &ret;
2718*c5ad8142SEric van Gyzen dopts.num_items = 1;
2719*c5ad8142SEric van Gyzen dopts.item_size = size;
2720*c5ad8142SEric van Gyzen
2721*c5ad8142SEric van Gyzen imalloc(&sopts, &dopts);
2722*c5ad8142SEric van Gyzen if (sopts.slow) {
2723*c5ad8142SEric van Gyzen uintptr_t args[3] = {(uintptr_t)ptr, arg_size};
2724*c5ad8142SEric van Gyzen hook_invoke_alloc(hook_alloc_realloc, ret,
2725*c5ad8142SEric van Gyzen (uintptr_t)ret, args);
2726*c5ad8142SEric van Gyzen }
2727*c5ad8142SEric van Gyzen
27280ef50b4eSJason Evans return ret;
2729f921d10fSJason Evans }
2730f921d10fSJason Evans
2731d0e79aa3SJason Evans if (unlikely(ret == NULL)) {
2732d0e79aa3SJason Evans if (config_xmalloc && unlikely(opt_xmalloc)) {
2733f921d10fSJason Evans malloc_write("<jemalloc>: Error in realloc(): "
2734f921d10fSJason Evans "out of memory\n");
2735f921d10fSJason Evans abort();
2736f921d10fSJason Evans }
2737f921d10fSJason Evans set_errno(ENOMEM);
2738f921d10fSJason Evans }
2739d0e79aa3SJason Evans if (config_stats && likely(ret != NULL)) {
27401f0a49e8SJason Evans tsd_t *tsd;
27411f0a49e8SJason Evans
2742b7eaed25SJason Evans assert(usize == isalloc(tsdn, ret));
27431f0a49e8SJason Evans tsd = tsdn_tsd(tsdn);
2744d0e79aa3SJason Evans *tsd_thread_allocatedp_get(tsd) += usize;
2745d0e79aa3SJason Evans *tsd_thread_deallocatedp_get(tsd) += old_usize;
2746f921d10fSJason Evans }
2747f921d10fSJason Evans UTRACE(ptr, size, ret);
2748b7eaed25SJason Evans check_entry_exit_locking(tsdn);
27490ef50b4eSJason Evans
27500ef50b4eSJason Evans LOG("core.realloc.exit", "result: %p", ret);
2751b7eaed25SJason Evans return ret;
2752f921d10fSJason Evans }
2753f921d10fSJason Evans
2754*c5ad8142SEric van Gyzen JEMALLOC_NOINLINE
2755*c5ad8142SEric van Gyzen void
free_default(void * ptr)2756*c5ad8142SEric van Gyzen free_default(void *ptr) {
2757f921d10fSJason Evans UTRACE(ptr, 0, 0);
2758d0e79aa3SJason Evans if (likely(ptr != NULL)) {
27598b2f5aafSJason Evans /*
27608b2f5aafSJason Evans * We avoid setting up tsd fully (e.g. tcache, arena binding)
27618b2f5aafSJason Evans * based on only free() calls -- other activities trigger the
27628b2f5aafSJason Evans * minimal to full transition. This is because free() may
27638b2f5aafSJason Evans * happen during thread shutdown after tls deallocation: if a
27648b2f5aafSJason Evans * thread never had any malloc activities until then, a
27658b2f5aafSJason Evans * fully-setup tsd won't be destructed properly.
27668b2f5aafSJason Evans */
27678b2f5aafSJason Evans tsd_t *tsd = tsd_fetch_min();
2768b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd));
2769b7eaed25SJason Evans
2770b7eaed25SJason Evans tcache_t *tcache;
2771b7eaed25SJason Evans if (likely(tsd_fast(tsd))) {
2772b7eaed25SJason Evans tsd_assert_fast(tsd);
2773b7eaed25SJason Evans /* Unconditionally get tcache ptr on fast path. */
2774b7eaed25SJason Evans tcache = tsd_tcachep_get(tsd);
2775b7eaed25SJason Evans ifree(tsd, ptr, tcache, false);
2776b7eaed25SJason Evans } else {
2777b7eaed25SJason Evans if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
2778b7eaed25SJason Evans tcache = tcache_get(tsd);
2779b7eaed25SJason Evans } else {
2780b7eaed25SJason Evans tcache = NULL;
2781b7eaed25SJason Evans }
2782*c5ad8142SEric van Gyzen uintptr_t args_raw[3] = {(uintptr_t)ptr};
2783*c5ad8142SEric van Gyzen hook_invoke_dalloc(hook_dalloc_free, ptr, args_raw);
2784b7eaed25SJason Evans ifree(tsd, ptr, tcache, true);
2785b7eaed25SJason Evans }
2786b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd));
2787d0e79aa3SJason Evans }
2788*c5ad8142SEric van Gyzen }
2789*c5ad8142SEric van Gyzen
2790*c5ad8142SEric van Gyzen JEMALLOC_ALWAYS_INLINE
free_fastpath(void * ptr,size_t size,bool size_hint)2791*c5ad8142SEric van Gyzen bool free_fastpath(void *ptr, size_t size, bool size_hint) {
2792*c5ad8142SEric van Gyzen tsd_t *tsd = tsd_get(false);
2793*c5ad8142SEric van Gyzen if (unlikely(!tsd || !tsd_fast(tsd))) {
2794*c5ad8142SEric van Gyzen return false;
2795*c5ad8142SEric van Gyzen }
2796*c5ad8142SEric van Gyzen
2797*c5ad8142SEric van Gyzen tcache_t *tcache = tsd_tcachep_get(tsd);
2798*c5ad8142SEric van Gyzen
2799*c5ad8142SEric van Gyzen alloc_ctx_t alloc_ctx;
2800*c5ad8142SEric van Gyzen /*
2801*c5ad8142SEric van Gyzen * If !config_cache_oblivious, we can check PAGE alignment to
2802*c5ad8142SEric van Gyzen * detect sampled objects. Otherwise addresses are
2803*c5ad8142SEric van Gyzen * randomized, and we have to look it up in the rtree anyway.
2804*c5ad8142SEric van Gyzen * See also isfree().
2805*c5ad8142SEric van Gyzen */
2806*c5ad8142SEric van Gyzen if (!size_hint || config_cache_oblivious) {
2807*c5ad8142SEric van Gyzen rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
2808*c5ad8142SEric van Gyzen bool res = rtree_szind_slab_read_fast(tsd_tsdn(tsd), &extents_rtree,
2809*c5ad8142SEric van Gyzen rtree_ctx, (uintptr_t)ptr,
2810*c5ad8142SEric van Gyzen &alloc_ctx.szind, &alloc_ctx.slab);
2811*c5ad8142SEric van Gyzen
2812*c5ad8142SEric van Gyzen /* Note: profiled objects will have alloc_ctx.slab set */
2813*c5ad8142SEric van Gyzen if (!res || !alloc_ctx.slab) {
2814*c5ad8142SEric van Gyzen return false;
2815*c5ad8142SEric van Gyzen }
2816*c5ad8142SEric van Gyzen assert(alloc_ctx.szind != SC_NSIZES);
2817*c5ad8142SEric van Gyzen } else {
2818*c5ad8142SEric van Gyzen /*
2819*c5ad8142SEric van Gyzen * Check for both sizes that are too large, and for sampled objects.
2820*c5ad8142SEric van Gyzen * Sampled objects are always page-aligned. The sampled object check
2821*c5ad8142SEric van Gyzen * will also check for null ptr.
2822*c5ad8142SEric van Gyzen */
2823*c5ad8142SEric van Gyzen if (size > SC_LOOKUP_MAXCLASS || (((uintptr_t)ptr & PAGE_MASK) == 0)) {
2824*c5ad8142SEric van Gyzen return false;
2825*c5ad8142SEric van Gyzen }
2826*c5ad8142SEric van Gyzen alloc_ctx.szind = sz_size2index_lookup(size);
2827*c5ad8142SEric van Gyzen }
2828*c5ad8142SEric van Gyzen
2829*c5ad8142SEric van Gyzen if (unlikely(ticker_trytick(&tcache->gc_ticker))) {
2830*c5ad8142SEric van Gyzen return false;
2831*c5ad8142SEric van Gyzen }
2832*c5ad8142SEric van Gyzen
2833*c5ad8142SEric van Gyzen cache_bin_t *bin = tcache_small_bin_get(tcache, alloc_ctx.szind);
2834*c5ad8142SEric van Gyzen cache_bin_info_t *bin_info = &tcache_bin_info[alloc_ctx.szind];
2835*c5ad8142SEric van Gyzen if (!cache_bin_dalloc_easy(bin, bin_info, ptr)) {
2836*c5ad8142SEric van Gyzen return false;
2837*c5ad8142SEric van Gyzen }
2838*c5ad8142SEric van Gyzen
2839*c5ad8142SEric van Gyzen if (config_stats) {
2840*c5ad8142SEric van Gyzen size_t usize = sz_index2size(alloc_ctx.szind);
2841*c5ad8142SEric van Gyzen *tsd_thread_deallocatedp_get(tsd) += usize;
2842*c5ad8142SEric van Gyzen }
2843*c5ad8142SEric van Gyzen
2844*c5ad8142SEric van Gyzen return true;
2845*c5ad8142SEric van Gyzen }
2846*c5ad8142SEric van Gyzen
2847*c5ad8142SEric van Gyzen JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_free(void * ptr)2848*c5ad8142SEric van Gyzen je_free(void *ptr) {
2849*c5ad8142SEric van Gyzen LOG("core.free.entry", "ptr: %p", ptr);
2850*c5ad8142SEric van Gyzen
2851*c5ad8142SEric van Gyzen if (!free_fastpath(ptr, 0, false)) {
2852*c5ad8142SEric van Gyzen free_default(ptr);
2853*c5ad8142SEric van Gyzen }
2854*c5ad8142SEric van Gyzen
28550ef50b4eSJason Evans LOG("core.free.exit", "");
2856a4bd5210SJason Evans }
2857a4bd5210SJason Evans
2858a4bd5210SJason Evans /*
2859a4bd5210SJason Evans * End malloc(3)-compatible functions.
2860a4bd5210SJason Evans */
2861a4bd5210SJason Evans /******************************************************************************/
2862a4bd5210SJason Evans /*
2863a4bd5210SJason Evans * Begin non-standard override functions.
2864a4bd5210SJason Evans */
2865a4bd5210SJason Evans
2866a4bd5210SJason Evans #ifdef JEMALLOC_OVERRIDE_MEMALIGN
2867d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2868d0e79aa3SJason Evans void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc)2869d0e79aa3SJason Evans JEMALLOC_ATTR(malloc)
2870b7eaed25SJason Evans je_memalign(size_t alignment, size_t size) {
2871b7eaed25SJason Evans void *ret;
2872b7eaed25SJason Evans static_opts_t sopts;
2873b7eaed25SJason Evans dynamic_opts_t dopts;
2874b7eaed25SJason Evans
28750ef50b4eSJason Evans LOG("core.memalign.entry", "alignment: %zu, size: %zu\n", alignment,
28760ef50b4eSJason Evans size);
28770ef50b4eSJason Evans
2878b7eaed25SJason Evans static_opts_init(&sopts);
2879b7eaed25SJason Evans dynamic_opts_init(&dopts);
2880b7eaed25SJason Evans
2881b7eaed25SJason Evans sopts.min_alignment = 1;
2882b7eaed25SJason Evans sopts.oom_string =
2883b7eaed25SJason Evans "<jemalloc>: Error allocating aligned memory: out of memory\n";
2884b7eaed25SJason Evans sopts.invalid_alignment_string =
2885b7eaed25SJason Evans "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2886b7eaed25SJason Evans sopts.null_out_result_on_error = true;
2887b7eaed25SJason Evans
2888b7eaed25SJason Evans dopts.result = &ret;
2889b7eaed25SJason Evans dopts.num_items = 1;
2890b7eaed25SJason Evans dopts.item_size = size;
2891b7eaed25SJason Evans dopts.alignment = alignment;
2892b7eaed25SJason Evans
2893b7eaed25SJason Evans imalloc(&sopts, &dopts);
2894*c5ad8142SEric van Gyzen if (sopts.slow) {
2895*c5ad8142SEric van Gyzen uintptr_t args[3] = {alignment, size};
2896*c5ad8142SEric van Gyzen hook_invoke_alloc(hook_alloc_memalign, ret, (uintptr_t)ret,
2897*c5ad8142SEric van Gyzen args);
2898*c5ad8142SEric van Gyzen }
28990ef50b4eSJason Evans
29000ef50b4eSJason Evans LOG("core.memalign.exit", "result: %p", ret);
2901b7eaed25SJason Evans return ret;
2902a4bd5210SJason Evans }
2903a4bd5210SJason Evans #endif
2904a4bd5210SJason Evans
2905a4bd5210SJason Evans #ifdef JEMALLOC_OVERRIDE_VALLOC
2906d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
2907d0e79aa3SJason Evans void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc)2908d0e79aa3SJason Evans JEMALLOC_ATTR(malloc)
2909b7eaed25SJason Evans je_valloc(size_t size) {
2910b7eaed25SJason Evans void *ret;
2911b7eaed25SJason Evans
2912b7eaed25SJason Evans static_opts_t sopts;
2913b7eaed25SJason Evans dynamic_opts_t dopts;
2914b7eaed25SJason Evans
29150ef50b4eSJason Evans LOG("core.valloc.entry", "size: %zu\n", size);
29160ef50b4eSJason Evans
2917b7eaed25SJason Evans static_opts_init(&sopts);
2918b7eaed25SJason Evans dynamic_opts_init(&dopts);
2919b7eaed25SJason Evans
2920b7eaed25SJason Evans sopts.null_out_result_on_error = true;
2921b7eaed25SJason Evans sopts.min_alignment = PAGE;
2922b7eaed25SJason Evans sopts.oom_string =
2923b7eaed25SJason Evans "<jemalloc>: Error allocating aligned memory: out of memory\n";
2924b7eaed25SJason Evans sopts.invalid_alignment_string =
2925b7eaed25SJason Evans "<jemalloc>: Error allocating aligned memory: invalid alignment\n";
2926b7eaed25SJason Evans
2927b7eaed25SJason Evans dopts.result = &ret;
2928b7eaed25SJason Evans dopts.num_items = 1;
2929b7eaed25SJason Evans dopts.item_size = size;
2930b7eaed25SJason Evans dopts.alignment = PAGE;
2931b7eaed25SJason Evans
2932b7eaed25SJason Evans imalloc(&sopts, &dopts);
2933*c5ad8142SEric van Gyzen if (sopts.slow) {
2934*c5ad8142SEric van Gyzen uintptr_t args[3] = {size};
2935*c5ad8142SEric van Gyzen hook_invoke_alloc(hook_alloc_valloc, ret, (uintptr_t)ret, args);
2936*c5ad8142SEric van Gyzen }
2937b7eaed25SJason Evans
29380ef50b4eSJason Evans LOG("core.valloc.exit", "result: %p\n", ret);
2939b7eaed25SJason Evans return ret;
2940a4bd5210SJason Evans }
2941a4bd5210SJason Evans #endif
2942a4bd5210SJason Evans
2943b7eaed25SJason Evans #if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK)
2944a4bd5210SJason Evans /*
2945a4bd5210SJason Evans * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible
2946a4bd5210SJason Evans * to inconsistently reference libc's malloc(3)-compatible functions
2947a4bd5210SJason Evans * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541).
2948a4bd5210SJason Evans *
2949a4bd5210SJason Evans * These definitions interpose hooks in glibc. The functions are actually
2950a4bd5210SJason Evans * passed an extra argument for the caller return address, which will be
2951a4bd5210SJason Evans * ignored.
2952a4bd5210SJason Evans */
295382872ac0SJason Evans JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free;
295482872ac0SJason Evans JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc;
295582872ac0SJason Evans JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc;
2956d0e79aa3SJason Evans # ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK
295782872ac0SJason Evans JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) =
2958e722f8f8SJason Evans je_memalign;
2959a4bd5210SJason Evans # endif
2960bde95144SJason Evans
2961bde95144SJason Evans # ifdef CPU_COUNT
2962bde95144SJason Evans /*
2963bde95144SJason Evans * To enable static linking with glibc, the libc specific malloc interface must
2964bde95144SJason Evans * be implemented also, so none of glibc's malloc.o functions are added to the
2965bde95144SJason Evans * link.
2966bde95144SJason Evans */
2967bde95144SJason Evans # define ALIAS(je_fn) __attribute__((alias (#je_fn), used))
2968bde95144SJason Evans /* To force macro expansion of je_ prefix before stringification. */
2969bde95144SJason Evans # define PREALIAS(je_fn) ALIAS(je_fn)
2970b7eaed25SJason Evans # ifdef JEMALLOC_OVERRIDE___LIBC_CALLOC
2971bde95144SJason Evans void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc);
2972b7eaed25SJason Evans # endif
2973b7eaed25SJason Evans # ifdef JEMALLOC_OVERRIDE___LIBC_FREE
2974b7eaed25SJason Evans void __libc_free(void* ptr) PREALIAS(je_free);
2975b7eaed25SJason Evans # endif
2976b7eaed25SJason Evans # ifdef JEMALLOC_OVERRIDE___LIBC_MALLOC
2977b7eaed25SJason Evans void *__libc_malloc(size_t size) PREALIAS(je_malloc);
2978b7eaed25SJason Evans # endif
2979b7eaed25SJason Evans # ifdef JEMALLOC_OVERRIDE___LIBC_MEMALIGN
2980bde95144SJason Evans void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign);
2981b7eaed25SJason Evans # endif
2982b7eaed25SJason Evans # ifdef JEMALLOC_OVERRIDE___LIBC_REALLOC
2983b7eaed25SJason Evans void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc);
2984b7eaed25SJason Evans # endif
2985b7eaed25SJason Evans # ifdef JEMALLOC_OVERRIDE___LIBC_VALLOC
2986bde95144SJason Evans void *__libc_valloc(size_t size) PREALIAS(je_valloc);
2987b7eaed25SJason Evans # endif
2988b7eaed25SJason Evans # ifdef JEMALLOC_OVERRIDE___POSIX_MEMALIGN
2989b7eaed25SJason Evans int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign);
2990b7eaed25SJason Evans # endif
2991bde95144SJason Evans # undef PREALIAS
2992bde95144SJason Evans # undef ALIAS
2993bde95144SJason Evans # endif
2994d0e79aa3SJason Evans #endif
2995a4bd5210SJason Evans
2996a4bd5210SJason Evans /*
2997a4bd5210SJason Evans * End non-standard override functions.
2998a4bd5210SJason Evans */
2999a4bd5210SJason Evans /******************************************************************************/
3000a4bd5210SJason Evans /*
3001a4bd5210SJason Evans * Begin non-standard functions.
3002a4bd5210SJason Evans */
3003a4bd5210SJason Evans
3004*c5ad8142SEric van Gyzen #ifdef JEMALLOC_EXPERIMENTAL_SMALLOCX_API
3005*c5ad8142SEric van Gyzen
3006*c5ad8142SEric van Gyzen #define JEMALLOC_SMALLOCX_CONCAT_HELPER(x, y) x ## y
3007*c5ad8142SEric van Gyzen #define JEMALLOC_SMALLOCX_CONCAT_HELPER2(x, y) \
3008*c5ad8142SEric van Gyzen JEMALLOC_SMALLOCX_CONCAT_HELPER(x, y)
3009*c5ad8142SEric van Gyzen
3010*c5ad8142SEric van Gyzen typedef struct {
3011*c5ad8142SEric van Gyzen void *ptr;
3012*c5ad8142SEric van Gyzen size_t size;
3013*c5ad8142SEric van Gyzen } smallocx_return_t;
3014*c5ad8142SEric van Gyzen
3015*c5ad8142SEric van Gyzen JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
3016*c5ad8142SEric van Gyzen smallocx_return_t JEMALLOC_NOTHROW
3017*c5ad8142SEric van Gyzen /*
3018*c5ad8142SEric van Gyzen * The attribute JEMALLOC_ATTR(malloc) cannot be used due to:
3019*c5ad8142SEric van Gyzen * - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=86488
3020*c5ad8142SEric van Gyzen */
JEMALLOC_SMALLOCX_CONCAT_HELPER2(je_smallocx_,JEMALLOC_VERSION_GID_IDENT)3021*c5ad8142SEric van Gyzen JEMALLOC_SMALLOCX_CONCAT_HELPER2(je_smallocx_, JEMALLOC_VERSION_GID_IDENT)
3022*c5ad8142SEric van Gyzen (size_t size, int flags) {
3023*c5ad8142SEric van Gyzen /*
3024*c5ad8142SEric van Gyzen * Note: the attribute JEMALLOC_ALLOC_SIZE(1) cannot be
3025*c5ad8142SEric van Gyzen * used here because it makes writing beyond the `size`
3026*c5ad8142SEric van Gyzen * of the `ptr` undefined behavior, but the objective
3027*c5ad8142SEric van Gyzen * of this function is to allow writing beyond `size`
3028*c5ad8142SEric van Gyzen * up to `smallocx_return_t::size`.
3029*c5ad8142SEric van Gyzen */
3030*c5ad8142SEric van Gyzen smallocx_return_t ret;
3031*c5ad8142SEric van Gyzen static_opts_t sopts;
3032*c5ad8142SEric van Gyzen dynamic_opts_t dopts;
3033*c5ad8142SEric van Gyzen
3034*c5ad8142SEric van Gyzen LOG("core.smallocx.entry", "size: %zu, flags: %d", size, flags);
3035*c5ad8142SEric van Gyzen
3036*c5ad8142SEric van Gyzen static_opts_init(&sopts);
3037*c5ad8142SEric van Gyzen dynamic_opts_init(&dopts);
3038*c5ad8142SEric van Gyzen
3039*c5ad8142SEric van Gyzen sopts.assert_nonempty_alloc = true;
3040*c5ad8142SEric van Gyzen sopts.null_out_result_on_error = true;
3041*c5ad8142SEric van Gyzen sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n";
3042*c5ad8142SEric van Gyzen sopts.usize = true;
3043*c5ad8142SEric van Gyzen
3044*c5ad8142SEric van Gyzen dopts.result = &ret.ptr;
3045*c5ad8142SEric van Gyzen dopts.num_items = 1;
3046*c5ad8142SEric van Gyzen dopts.item_size = size;
3047*c5ad8142SEric van Gyzen if (unlikely(flags != 0)) {
3048*c5ad8142SEric van Gyzen if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) {
3049*c5ad8142SEric van Gyzen dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
3050*c5ad8142SEric van Gyzen }
3051*c5ad8142SEric van Gyzen
3052*c5ad8142SEric van Gyzen dopts.zero = MALLOCX_ZERO_GET(flags);
3053*c5ad8142SEric van Gyzen
3054*c5ad8142SEric van Gyzen if ((flags & MALLOCX_TCACHE_MASK) != 0) {
3055*c5ad8142SEric van Gyzen if ((flags & MALLOCX_TCACHE_MASK)
3056*c5ad8142SEric van Gyzen == MALLOCX_TCACHE_NONE) {
3057*c5ad8142SEric van Gyzen dopts.tcache_ind = TCACHE_IND_NONE;
3058*c5ad8142SEric van Gyzen } else {
3059*c5ad8142SEric van Gyzen dopts.tcache_ind = MALLOCX_TCACHE_GET(flags);
3060*c5ad8142SEric van Gyzen }
3061*c5ad8142SEric van Gyzen } else {
3062*c5ad8142SEric van Gyzen dopts.tcache_ind = TCACHE_IND_AUTOMATIC;
3063*c5ad8142SEric van Gyzen }
3064*c5ad8142SEric van Gyzen
3065*c5ad8142SEric van Gyzen if ((flags & MALLOCX_ARENA_MASK) != 0)
3066*c5ad8142SEric van Gyzen dopts.arena_ind = MALLOCX_ARENA_GET(flags);
3067*c5ad8142SEric van Gyzen }
3068*c5ad8142SEric van Gyzen
3069*c5ad8142SEric van Gyzen imalloc(&sopts, &dopts);
3070*c5ad8142SEric van Gyzen assert(dopts.usize == je_nallocx(size, flags));
3071*c5ad8142SEric van Gyzen ret.size = dopts.usize;
3072*c5ad8142SEric van Gyzen
3073*c5ad8142SEric van Gyzen LOG("core.smallocx.exit", "result: %p, size: %zu", ret.ptr, ret.size);
3074*c5ad8142SEric van Gyzen return ret;
3075*c5ad8142SEric van Gyzen }
3076*c5ad8142SEric van Gyzen #undef JEMALLOC_SMALLOCX_CONCAT_HELPER
3077*c5ad8142SEric van Gyzen #undef JEMALLOC_SMALLOCX_CONCAT_HELPER2
3078*c5ad8142SEric van Gyzen #endif
3079*c5ad8142SEric van Gyzen
3080d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
3081d0e79aa3SJason Evans void JEMALLOC_NOTHROW *
JEMALLOC_ATTR(malloc)3082d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1)
3083b7eaed25SJason Evans je_mallocx(size_t size, int flags) {
3084b7eaed25SJason Evans void *ret;
3085b7eaed25SJason Evans static_opts_t sopts;
3086b7eaed25SJason Evans dynamic_opts_t dopts;
3087f921d10fSJason Evans
30880ef50b4eSJason Evans LOG("core.mallocx.entry", "size: %zu, flags: %d", size, flags);
30890ef50b4eSJason Evans
3090b7eaed25SJason Evans static_opts_init(&sopts);
3091b7eaed25SJason Evans dynamic_opts_init(&dopts);
3092f921d10fSJason Evans
3093b7eaed25SJason Evans sopts.assert_nonempty_alloc = true;
3094b7eaed25SJason Evans sopts.null_out_result_on_error = true;
3095b7eaed25SJason Evans sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n";
3096b7eaed25SJason Evans
3097b7eaed25SJason Evans dopts.result = &ret;
3098b7eaed25SJason Evans dopts.num_items = 1;
3099b7eaed25SJason Evans dopts.item_size = size;
3100b7eaed25SJason Evans if (unlikely(flags != 0)) {
3101b7eaed25SJason Evans if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) {
3102b7eaed25SJason Evans dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags);
3103f921d10fSJason Evans }
31041f0a49e8SJason Evans
3105b7eaed25SJason Evans dopts.zero = MALLOCX_ZERO_GET(flags);
3106b7eaed25SJason Evans
3107b7eaed25SJason Evans if ((flags & MALLOCX_TCACHE_MASK) != 0) {
3108b7eaed25SJason Evans if ((flags & MALLOCX_TCACHE_MASK)
3109b7eaed25SJason Evans == MALLOCX_TCACHE_NONE) {
3110b7eaed25SJason Evans dopts.tcache_ind = TCACHE_IND_NONE;
3111b7eaed25SJason Evans } else {
3112b7eaed25SJason Evans dopts.tcache_ind = MALLOCX_TCACHE_GET(flags);
3113b7eaed25SJason Evans }
3114b7eaed25SJason Evans } else {
3115b7eaed25SJason Evans dopts.tcache_ind = TCACHE_IND_AUTOMATIC;
3116b7eaed25SJason Evans }
3117b7eaed25SJason Evans
3118b7eaed25SJason Evans if ((flags & MALLOCX_ARENA_MASK) != 0)
3119b7eaed25SJason Evans dopts.arena_ind = MALLOCX_ARENA_GET(flags);
3120b7eaed25SJason Evans }
3121b7eaed25SJason Evans
3122b7eaed25SJason Evans imalloc(&sopts, &dopts);
3123*c5ad8142SEric van Gyzen if (sopts.slow) {
3124*c5ad8142SEric van Gyzen uintptr_t args[3] = {size, flags};
3125*c5ad8142SEric van Gyzen hook_invoke_alloc(hook_alloc_mallocx, ret, (uintptr_t)ret,
3126*c5ad8142SEric van Gyzen args);
3127*c5ad8142SEric van Gyzen }
31280ef50b4eSJason Evans
31290ef50b4eSJason Evans LOG("core.mallocx.exit", "result: %p", ret);
3130b7eaed25SJason Evans return ret;
3131f921d10fSJason Evans }
3132f921d10fSJason Evans
3133f921d10fSJason Evans static void *
irallocx_prof_sample(tsdn_t * tsdn,void * old_ptr,size_t old_usize,size_t usize,size_t alignment,bool zero,tcache_t * tcache,arena_t * arena,prof_tctx_t * tctx,hook_ralloc_args_t * hook_args)3134b7eaed25SJason Evans irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize,
3135536b3538SJason Evans size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena,
3136*c5ad8142SEric van Gyzen prof_tctx_t *tctx, hook_ralloc_args_t *hook_args) {
3137f921d10fSJason Evans void *p;
3138f921d10fSJason Evans
3139b7eaed25SJason Evans if (tctx == NULL) {
3140b7eaed25SJason Evans return NULL;
3141b7eaed25SJason Evans }
3142*c5ad8142SEric van Gyzen if (usize <= SC_SMALL_MAXCLASS) {
3143*c5ad8142SEric van Gyzen p = iralloct(tsdn, old_ptr, old_usize,
3144*c5ad8142SEric van Gyzen SC_LARGE_MINCLASS, alignment, zero, tcache,
3145*c5ad8142SEric van Gyzen arena, hook_args);
3146b7eaed25SJason Evans if (p == NULL) {
3147b7eaed25SJason Evans return NULL;
3148b7eaed25SJason Evans }
3149b7eaed25SJason Evans arena_prof_promote(tsdn, p, usize);
3150f921d10fSJason Evans } else {
3151b7eaed25SJason Evans p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero,
3152*c5ad8142SEric van Gyzen tcache, arena, hook_args);
3153f921d10fSJason Evans }
3154f921d10fSJason Evans
3155b7eaed25SJason Evans return p;
3156f921d10fSJason Evans }
3157f921d10fSJason Evans
3158b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE void *
irallocx_prof(tsd_t * tsd,void * old_ptr,size_t old_usize,size_t size,size_t alignment,size_t * usize,bool zero,tcache_t * tcache,arena_t * arena,alloc_ctx_t * alloc_ctx,hook_ralloc_args_t * hook_args)3159536b3538SJason Evans irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size,
3160d0e79aa3SJason Evans size_t alignment, size_t *usize, bool zero, tcache_t *tcache,
3161*c5ad8142SEric van Gyzen arena_t *arena, alloc_ctx_t *alloc_ctx, hook_ralloc_args_t *hook_args) {
3162f921d10fSJason Evans void *p;
3163536b3538SJason Evans bool prof_active;
3164d0e79aa3SJason Evans prof_tctx_t *old_tctx, *tctx;
3165f921d10fSJason Evans
3166536b3538SJason Evans prof_active = prof_active_get_unlocked();
3167b7eaed25SJason Evans old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx);
316862b2691eSJason Evans tctx = prof_alloc_prep(tsd, *usize, prof_active, false);
3169d0e79aa3SJason Evans if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
3170b7eaed25SJason Evans p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize,
3171*c5ad8142SEric van Gyzen *usize, alignment, zero, tcache, arena, tctx, hook_args);
3172d0e79aa3SJason Evans } else {
3173b7eaed25SJason Evans p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment,
3174*c5ad8142SEric van Gyzen zero, tcache, arena, hook_args);
3175f921d10fSJason Evans }
3176d0e79aa3SJason Evans if (unlikely(p == NULL)) {
317762b2691eSJason Evans prof_alloc_rollback(tsd, tctx, false);
3178b7eaed25SJason Evans return NULL;
3179d0e79aa3SJason Evans }
3180f921d10fSJason Evans
3181536b3538SJason Evans if (p == old_ptr && alignment != 0) {
3182f921d10fSJason Evans /*
3183f921d10fSJason Evans * The allocation did not move, so it is possible that the size
3184f921d10fSJason Evans * class is smaller than would guarantee the requested
3185f921d10fSJason Evans * alignment, and that the alignment constraint was
3186f921d10fSJason Evans * serendipitously satisfied. Additionally, old_usize may not
3187f921d10fSJason Evans * be the same as the current usize because of in-place large
3188f921d10fSJason Evans * reallocation. Therefore, query the actual value of usize.
3189f921d10fSJason Evans */
3190b7eaed25SJason Evans *usize = isalloc(tsd_tsdn(tsd), p);
3191f921d10fSJason Evans }
319262b2691eSJason Evans prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr,
3193536b3538SJason Evans old_usize, old_tctx);
3194f921d10fSJason Evans
3195b7eaed25SJason Evans return p;
3196f921d10fSJason Evans }
3197f921d10fSJason Evans
3198d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN
3199d0e79aa3SJason Evans void JEMALLOC_NOTHROW *
3200d0e79aa3SJason Evans JEMALLOC_ALLOC_SIZE(2)
je_rallocx(void * ptr,size_t size,int flags)3201b7eaed25SJason Evans je_rallocx(void *ptr, size_t size, int flags) {
3202f921d10fSJason Evans void *p;
3203d0e79aa3SJason Evans tsd_t *tsd;
3204d0e79aa3SJason Evans size_t usize;
3205d0e79aa3SJason Evans size_t old_usize;
3206d0e79aa3SJason Evans size_t alignment = MALLOCX_ALIGN_GET(flags);
3207f921d10fSJason Evans bool zero = flags & MALLOCX_ZERO;
3208f921d10fSJason Evans arena_t *arena;
3209d0e79aa3SJason Evans tcache_t *tcache;
3210f921d10fSJason Evans
32110ef50b4eSJason Evans LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
32120ef50b4eSJason Evans size, flags);
32130ef50b4eSJason Evans
32140ef50b4eSJason Evans
3215f921d10fSJason Evans assert(ptr != NULL);
3216f921d10fSJason Evans assert(size != 0);
3217d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER);
3218d0e79aa3SJason Evans tsd = tsd_fetch();
3219b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd));
3220f921d10fSJason Evans
3221d0e79aa3SJason Evans if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) {
3222d0e79aa3SJason Evans unsigned arena_ind = MALLOCX_ARENA_GET(flags);
32231f0a49e8SJason Evans arena = arena_get(tsd_tsdn(tsd), arena_ind, true);
3224b7eaed25SJason Evans if (unlikely(arena == NULL)) {
3225d0e79aa3SJason Evans goto label_oom;
3226b7eaed25SJason Evans }
3227b7eaed25SJason Evans } else {
3228f921d10fSJason Evans arena = NULL;
3229b7eaed25SJason Evans }
3230f921d10fSJason Evans
3231d0e79aa3SJason Evans if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
3232b7eaed25SJason Evans if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
3233d0e79aa3SJason Evans tcache = NULL;
3234f921d10fSJason Evans } else {
3235b7eaed25SJason Evans tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
3236b7eaed25SJason Evans }
3237b7eaed25SJason Evans } else {
3238b7eaed25SJason Evans tcache = tcache_get(tsd);
3239b7eaed25SJason Evans }
3240b7eaed25SJason Evans
3241b7eaed25SJason Evans alloc_ctx_t alloc_ctx;
3242b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
3243b7eaed25SJason Evans rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
3244b7eaed25SJason Evans (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
3245*c5ad8142SEric van Gyzen assert(alloc_ctx.szind != SC_NSIZES);
3246b7eaed25SJason Evans old_usize = sz_index2size(alloc_ctx.szind);
3247b7eaed25SJason Evans assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
3248*c5ad8142SEric van Gyzen
3249*c5ad8142SEric van Gyzen hook_ralloc_args_t hook_args = {false, {(uintptr_t)ptr, size, flags,
3250*c5ad8142SEric van Gyzen 0}};
3251b7eaed25SJason Evans if (config_prof && opt_prof) {
3252b7eaed25SJason Evans usize = (alignment == 0) ?
3253b7eaed25SJason Evans sz_s2u(size) : sz_sa2u(size, alignment);
3254*c5ad8142SEric van Gyzen if (unlikely(usize == 0
3255*c5ad8142SEric van Gyzen || usize > SC_LARGE_MAXCLASS)) {
3256f921d10fSJason Evans goto label_oom;
3257b7eaed25SJason Evans }
3258b7eaed25SJason Evans p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize,
3259*c5ad8142SEric van Gyzen zero, tcache, arena, &alloc_ctx, &hook_args);
3260b7eaed25SJason Evans if (unlikely(p == NULL)) {
3261b7eaed25SJason Evans goto label_oom;
3262b7eaed25SJason Evans }
3263b7eaed25SJason Evans } else {
3264b7eaed25SJason Evans p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment,
3265*c5ad8142SEric van Gyzen zero, tcache, arena, &hook_args);
3266b7eaed25SJason Evans if (unlikely(p == NULL)) {
3267b7eaed25SJason Evans goto label_oom;
3268b7eaed25SJason Evans }
3269b7eaed25SJason Evans if (config_stats) {
3270b7eaed25SJason Evans usize = isalloc(tsd_tsdn(tsd), p);
3271b7eaed25SJason Evans }
3272f921d10fSJason Evans }
3273d0e79aa3SJason Evans assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0));
3274f921d10fSJason Evans
3275f921d10fSJason Evans if (config_stats) {
3276d0e79aa3SJason Evans *tsd_thread_allocatedp_get(tsd) += usize;
3277d0e79aa3SJason Evans *tsd_thread_deallocatedp_get(tsd) += old_usize;
3278f921d10fSJason Evans }
3279f921d10fSJason Evans UTRACE(ptr, size, p);
3280b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd));
32810ef50b4eSJason Evans
32820ef50b4eSJason Evans LOG("core.rallocx.exit", "result: %p", p);
3283b7eaed25SJason Evans return p;
3284f921d10fSJason Evans label_oom:
3285d0e79aa3SJason Evans if (config_xmalloc && unlikely(opt_xmalloc)) {
3286f921d10fSJason Evans malloc_write("<jemalloc>: Error in rallocx(): out of memory\n");
3287f921d10fSJason Evans abort();
3288f921d10fSJason Evans }
3289f921d10fSJason Evans UTRACE(ptr, size, 0);
3290b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd));
32910ef50b4eSJason Evans
32920ef50b4eSJason Evans LOG("core.rallocx.exit", "result: %p", NULL);
3293b7eaed25SJason Evans return NULL;
3294f921d10fSJason Evans }
3295f921d10fSJason Evans
3296b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE size_t
ixallocx_helper(tsdn_t * tsdn,void * ptr,size_t old_usize,size_t size,size_t extra,size_t alignment,bool zero)32971f0a49e8SJason Evans ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
3298b7eaed25SJason Evans size_t extra, size_t alignment, bool zero) {
3299*c5ad8142SEric van Gyzen size_t newsize;
3300f921d10fSJason Evans
3301*c5ad8142SEric van Gyzen if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero,
3302*c5ad8142SEric van Gyzen &newsize)) {
3303b7eaed25SJason Evans return old_usize;
3304b7eaed25SJason Evans }
3305f921d10fSJason Evans
3306*c5ad8142SEric van Gyzen return newsize;
3307f921d10fSJason Evans }
3308f921d10fSJason Evans
3309f921d10fSJason Evans static size_t
ixallocx_prof_sample(tsdn_t * tsdn,void * ptr,size_t old_usize,size_t size,size_t extra,size_t alignment,bool zero,prof_tctx_t * tctx)33101f0a49e8SJason Evans ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size,
3311b7eaed25SJason Evans size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) {
3312f921d10fSJason Evans size_t usize;
3313f921d10fSJason Evans
3314b7eaed25SJason Evans if (tctx == NULL) {
3315b7eaed25SJason Evans return old_usize;
3316b7eaed25SJason Evans }
33171f0a49e8SJason Evans usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment,
3318df0d881dSJason Evans zero);
3319f921d10fSJason Evans
3320b7eaed25SJason Evans return usize;
3321f921d10fSJason Evans }
3322f921d10fSJason Evans
3323b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE size_t
ixallocx_prof(tsd_t * tsd,void * ptr,size_t old_usize,size_t size,size_t extra,size_t alignment,bool zero,alloc_ctx_t * alloc_ctx)3324d0e79aa3SJason Evans ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size,
3325b7eaed25SJason Evans size_t extra, size_t alignment, bool zero, alloc_ctx_t *alloc_ctx) {
3326536b3538SJason Evans size_t usize_max, usize;
3327536b3538SJason Evans bool prof_active;
3328d0e79aa3SJason Evans prof_tctx_t *old_tctx, *tctx;
3329f921d10fSJason Evans
3330536b3538SJason Evans prof_active = prof_active_get_unlocked();
3331b7eaed25SJason Evans old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx);
3332d0e79aa3SJason Evans /*
3333d0e79aa3SJason Evans * usize isn't knowable before ixalloc() returns when extra is non-zero.
3334d0e79aa3SJason Evans * Therefore, compute its maximum possible value and use that in
3335d0e79aa3SJason Evans * prof_alloc_prep() to decide whether to capture a backtrace.
3336d0e79aa3SJason Evans * prof_realloc() will use the actual usize to decide whether to sample.
3337d0e79aa3SJason Evans */
3338df0d881dSJason Evans if (alignment == 0) {
3339b7eaed25SJason Evans usize_max = sz_s2u(size+extra);
3340*c5ad8142SEric van Gyzen assert(usize_max > 0
3341*c5ad8142SEric van Gyzen && usize_max <= SC_LARGE_MAXCLASS);
3342df0d881dSJason Evans } else {
3343b7eaed25SJason Evans usize_max = sz_sa2u(size+extra, alignment);
3344*c5ad8142SEric van Gyzen if (unlikely(usize_max == 0
3345*c5ad8142SEric van Gyzen || usize_max > SC_LARGE_MAXCLASS)) {
3346df0d881dSJason Evans /*
3347df0d881dSJason Evans * usize_max is out of range, and chances are that
3348df0d881dSJason Evans * allocation will fail, but use the maximum possible
3349df0d881dSJason Evans * value and carry on with prof_alloc_prep(), just in
3350df0d881dSJason Evans * case allocation succeeds.
3351df0d881dSJason Evans */
3352*c5ad8142SEric van Gyzen usize_max = SC_LARGE_MAXCLASS;
3353df0d881dSJason Evans }
3354df0d881dSJason Evans }
3355536b3538SJason Evans tctx = prof_alloc_prep(tsd, usize_max, prof_active, false);
3356df0d881dSJason Evans
3357d0e79aa3SJason Evans if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) {
33581f0a49e8SJason Evans usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize,
33591f0a49e8SJason Evans size, extra, alignment, zero, tctx);
3360f921d10fSJason Evans } else {
33611f0a49e8SJason Evans usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
33621f0a49e8SJason Evans extra, alignment, zero);
3363f921d10fSJason Evans }
3364536b3538SJason Evans if (usize == old_usize) {
3365d0e79aa3SJason Evans prof_alloc_rollback(tsd, tctx, false);
3366b7eaed25SJason Evans return usize;
3367d0e79aa3SJason Evans }
3368536b3538SJason Evans prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize,
3369536b3538SJason Evans old_tctx);
3370f921d10fSJason Evans
3371b7eaed25SJason Evans return usize;
3372f921d10fSJason Evans }
3373f921d10fSJason Evans
3374d0e79aa3SJason Evans JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
je_xallocx(void * ptr,size_t size,size_t extra,int flags)3375b7eaed25SJason Evans je_xallocx(void *ptr, size_t size, size_t extra, int flags) {
3376d0e79aa3SJason Evans tsd_t *tsd;
3377f921d10fSJason Evans size_t usize, old_usize;
3378d0e79aa3SJason Evans size_t alignment = MALLOCX_ALIGN_GET(flags);
3379f921d10fSJason Evans bool zero = flags & MALLOCX_ZERO;
3380f921d10fSJason Evans
33810ef50b4eSJason Evans LOG("core.xallocx.entry", "ptr: %p, size: %zu, extra: %zu, "
33820ef50b4eSJason Evans "flags: %d", ptr, size, extra, flags);
33830ef50b4eSJason Evans
3384f921d10fSJason Evans assert(ptr != NULL);
3385f921d10fSJason Evans assert(size != 0);
3386f921d10fSJason Evans assert(SIZE_T_MAX - size >= extra);
3387d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER);
3388d0e79aa3SJason Evans tsd = tsd_fetch();
3389b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd));
3390f921d10fSJason Evans
3391b7eaed25SJason Evans alloc_ctx_t alloc_ctx;
3392b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd);
3393b7eaed25SJason Evans rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx,
3394b7eaed25SJason Evans (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab);
3395*c5ad8142SEric van Gyzen assert(alloc_ctx.szind != SC_NSIZES);
3396b7eaed25SJason Evans old_usize = sz_index2size(alloc_ctx.szind);
3397b7eaed25SJason Evans assert(old_usize == isalloc(tsd_tsdn(tsd), ptr));
3398df0d881dSJason Evans /*
3399df0d881dSJason Evans * The API explicitly absolves itself of protecting against (size +
3400df0d881dSJason Evans * extra) numerical overflow, but we may need to clamp extra to avoid
3401*c5ad8142SEric van Gyzen * exceeding SC_LARGE_MAXCLASS.
3402df0d881dSJason Evans *
3403df0d881dSJason Evans * Ordinarily, size limit checking is handled deeper down, but here we
3404df0d881dSJason Evans * have to check as part of (size + extra) clamping, since we need the
3405df0d881dSJason Evans * clamped value in the above helper functions.
3406df0d881dSJason Evans */
3407*c5ad8142SEric van Gyzen if (unlikely(size > SC_LARGE_MAXCLASS)) {
3408536b3538SJason Evans usize = old_usize;
3409536b3538SJason Evans goto label_not_resized;
3410536b3538SJason Evans }
3411*c5ad8142SEric van Gyzen if (unlikely(SC_LARGE_MAXCLASS - size < extra)) {
3412*c5ad8142SEric van Gyzen extra = SC_LARGE_MAXCLASS - size;
3413b7eaed25SJason Evans }
3414f921d10fSJason Evans
3415f921d10fSJason Evans if (config_prof && opt_prof) {
3416d0e79aa3SJason Evans usize = ixallocx_prof(tsd, ptr, old_usize, size, extra,
3417b7eaed25SJason Evans alignment, zero, &alloc_ctx);
3418f921d10fSJason Evans } else {
34191f0a49e8SJason Evans usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size,
34201f0a49e8SJason Evans extra, alignment, zero);
3421f921d10fSJason Evans }
3422b7eaed25SJason Evans if (unlikely(usize == old_usize)) {
3423f921d10fSJason Evans goto label_not_resized;
3424b7eaed25SJason Evans }
3425f921d10fSJason Evans
3426f921d10fSJason Evans if (config_stats) {
3427d0e79aa3SJason Evans *tsd_thread_allocatedp_get(tsd) += usize;
3428d0e79aa3SJason Evans *tsd_thread_deallocatedp_get(tsd) += old_usize;
3429f921d10fSJason Evans }
3430f921d10fSJason Evans label_not_resized:
3431*c5ad8142SEric van Gyzen if (unlikely(!tsd_fast(tsd))) {
3432*c5ad8142SEric van Gyzen uintptr_t args[4] = {(uintptr_t)ptr, size, extra, flags};
3433*c5ad8142SEric van Gyzen hook_invoke_expand(hook_expand_xallocx, ptr, old_usize,
3434*c5ad8142SEric van Gyzen usize, (uintptr_t)usize, args);
3435*c5ad8142SEric van Gyzen }
3436*c5ad8142SEric van Gyzen
3437f921d10fSJason Evans UTRACE(ptr, size, ptr);
3438b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd));
34390ef50b4eSJason Evans
34400ef50b4eSJason Evans LOG("core.xallocx.exit", "result: %zu", usize);
3441b7eaed25SJason Evans return usize;
3442f921d10fSJason Evans }
3443f921d10fSJason Evans
3444d0e79aa3SJason Evans JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
JEMALLOC_ATTR(pure)3445d0e79aa3SJason Evans JEMALLOC_ATTR(pure)
3446*c5ad8142SEric van Gyzen je_sallocx(const void *ptr, int flags) {
3447f921d10fSJason Evans size_t usize;
34481f0a49e8SJason Evans tsdn_t *tsdn;
3449a4bd5210SJason Evans
34500ef50b4eSJason Evans LOG("core.sallocx.entry", "ptr: %p, flags: %d", ptr, flags);
34510ef50b4eSJason Evans
3452d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER);
3453b7eaed25SJason Evans assert(ptr != NULL);
3454a4bd5210SJason Evans
34551f0a49e8SJason Evans tsdn = tsdn_fetch();
3456b7eaed25SJason Evans check_entry_exit_locking(tsdn);
3457a4bd5210SJason Evans
3458b7eaed25SJason Evans if (config_debug || force_ivsalloc) {
3459b7eaed25SJason Evans usize = ivsalloc(tsdn, ptr);
3460b7eaed25SJason Evans assert(force_ivsalloc || usize != 0);
3461b7eaed25SJason Evans } else {
3462b7eaed25SJason Evans usize = isalloc(tsdn, ptr);
3463b7eaed25SJason Evans }
34641f0a49e8SJason Evans
3465b7eaed25SJason Evans check_entry_exit_locking(tsdn);
34660ef50b4eSJason Evans
34670ef50b4eSJason Evans LOG("core.sallocx.exit", "result: %zu", usize);
3468b7eaed25SJason Evans return usize;
3469a4bd5210SJason Evans }
3470a4bd5210SJason Evans
3471d0e79aa3SJason Evans JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_dallocx(void * ptr,int flags)3472b7eaed25SJason Evans je_dallocx(void *ptr, int flags) {
34730ef50b4eSJason Evans LOG("core.dallocx.entry", "ptr: %p, flags: %d", ptr, flags);
34740ef50b4eSJason Evans
3475f921d10fSJason Evans assert(ptr != NULL);
3476d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER);
3477f921d10fSJason Evans
3478b7eaed25SJason Evans tsd_t *tsd = tsd_fetch();
3479b7eaed25SJason Evans bool fast = tsd_fast(tsd);
3480b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd));
3481b7eaed25SJason Evans
3482b7eaed25SJason Evans tcache_t *tcache;
3483d0e79aa3SJason Evans if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
3484b7eaed25SJason Evans /* Not allowed to be reentrant and specify a custom tcache. */
3485b7eaed25SJason Evans assert(tsd_reentrancy_level_get(tsd) == 0);
3486b7eaed25SJason Evans if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
3487d0e79aa3SJason Evans tcache = NULL;
3488b7eaed25SJason Evans } else {
3489d0e79aa3SJason Evans tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
3490b7eaed25SJason Evans }
3491b7eaed25SJason Evans } else {
3492b7eaed25SJason Evans if (likely(fast)) {
3493b7eaed25SJason Evans tcache = tsd_tcachep_get(tsd);
3494b7eaed25SJason Evans assert(tcache == tcache_get(tsd));
3495b7eaed25SJason Evans } else {
3496b7eaed25SJason Evans if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
3497b7eaed25SJason Evans tcache = tcache_get(tsd);
3498b7eaed25SJason Evans } else {
3499b7eaed25SJason Evans tcache = NULL;
3500b7eaed25SJason Evans }
3501b7eaed25SJason Evans }
3502b7eaed25SJason Evans }
3503f921d10fSJason Evans
3504f921d10fSJason Evans UTRACE(ptr, 0, 0);
3505b7eaed25SJason Evans if (likely(fast)) {
3506b7eaed25SJason Evans tsd_assert_fast(tsd);
35071f0a49e8SJason Evans ifree(tsd, ptr, tcache, false);
3508b7eaed25SJason Evans } else {
3509*c5ad8142SEric van Gyzen uintptr_t args_raw[3] = {(uintptr_t)ptr, flags};
3510*c5ad8142SEric van Gyzen hook_invoke_dalloc(hook_dalloc_dallocx, ptr, args_raw);
35111f0a49e8SJason Evans ifree(tsd, ptr, tcache, true);
3512b7eaed25SJason Evans }
3513b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd));
35140ef50b4eSJason Evans
35150ef50b4eSJason Evans LOG("core.dallocx.exit", "");
3516f921d10fSJason Evans }
3517f921d10fSJason Evans
3518b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE size_t
inallocx(tsdn_t * tsdn,size_t size,int flags)3519b7eaed25SJason Evans inallocx(tsdn_t *tsdn, size_t size, int flags) {
3520b7eaed25SJason Evans check_entry_exit_locking(tsdn);
3521b7eaed25SJason Evans
3522f921d10fSJason Evans size_t usize;
3523b7eaed25SJason Evans if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) {
3524b7eaed25SJason Evans usize = sz_s2u(size);
3525b7eaed25SJason Evans } else {
3526b7eaed25SJason Evans usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags));
3527b7eaed25SJason Evans }
3528b7eaed25SJason Evans check_entry_exit_locking(tsdn);
3529b7eaed25SJason Evans return usize;
3530a4bd5210SJason Evans }
3531a4bd5210SJason Evans
3532*c5ad8142SEric van Gyzen JEMALLOC_NOINLINE void
sdallocx_default(void * ptr,size_t size,int flags)3533*c5ad8142SEric van Gyzen sdallocx_default(void *ptr, size_t size, int flags) {
3534d0e79aa3SJason Evans assert(ptr != NULL);
3535d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER);
35361f0a49e8SJason Evans
3537b7eaed25SJason Evans tsd_t *tsd = tsd_fetch();
3538b7eaed25SJason Evans bool fast = tsd_fast(tsd);
3539b7eaed25SJason Evans size_t usize = inallocx(tsd_tsdn(tsd), size, flags);
3540b7eaed25SJason Evans assert(usize == isalloc(tsd_tsdn(tsd), ptr));
3541b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd));
3542b7eaed25SJason Evans
3543b7eaed25SJason Evans tcache_t *tcache;
3544d0e79aa3SJason Evans if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) {
3545b7eaed25SJason Evans /* Not allowed to be reentrant and specify a custom tcache. */
3546b7eaed25SJason Evans assert(tsd_reentrancy_level_get(tsd) == 0);
3547b7eaed25SJason Evans if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) {
3548d0e79aa3SJason Evans tcache = NULL;
3549b7eaed25SJason Evans } else {
3550d0e79aa3SJason Evans tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags));
3551b7eaed25SJason Evans }
3552b7eaed25SJason Evans } else {
3553b7eaed25SJason Evans if (likely(fast)) {
3554b7eaed25SJason Evans tcache = tsd_tcachep_get(tsd);
3555b7eaed25SJason Evans assert(tcache == tcache_get(tsd));
3556b7eaed25SJason Evans } else {
3557b7eaed25SJason Evans if (likely(tsd_reentrancy_level_get(tsd) == 0)) {
3558b7eaed25SJason Evans tcache = tcache_get(tsd);
3559b7eaed25SJason Evans } else {
3560b7eaed25SJason Evans tcache = NULL;
3561b7eaed25SJason Evans }
3562b7eaed25SJason Evans }
3563b7eaed25SJason Evans }
3564d0e79aa3SJason Evans
3565d0e79aa3SJason Evans UTRACE(ptr, 0, 0);
3566b7eaed25SJason Evans if (likely(fast)) {
3567b7eaed25SJason Evans tsd_assert_fast(tsd);
35681f0a49e8SJason Evans isfree(tsd, ptr, usize, tcache, false);
3569b7eaed25SJason Evans } else {
3570*c5ad8142SEric van Gyzen uintptr_t args_raw[3] = {(uintptr_t)ptr, size, flags};
3571*c5ad8142SEric van Gyzen hook_invoke_dalloc(hook_dalloc_sdallocx, ptr, args_raw);
35721f0a49e8SJason Evans isfree(tsd, ptr, usize, tcache, true);
3573b7eaed25SJason Evans }
3574b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd));
35750ef50b4eSJason Evans
3576*c5ad8142SEric van Gyzen }
3577*c5ad8142SEric van Gyzen
3578*c5ad8142SEric van Gyzen JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_sdallocx(void * ptr,size_t size,int flags)3579*c5ad8142SEric van Gyzen je_sdallocx(void *ptr, size_t size, int flags) {
3580*c5ad8142SEric van Gyzen LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr,
3581*c5ad8142SEric van Gyzen size, flags);
3582*c5ad8142SEric van Gyzen
3583*c5ad8142SEric van Gyzen if (flags !=0 || !free_fastpath(ptr, size, true)) {
3584*c5ad8142SEric van Gyzen sdallocx_default(ptr, size, flags);
3585*c5ad8142SEric van Gyzen }
3586*c5ad8142SEric van Gyzen
3587*c5ad8142SEric van Gyzen LOG("core.sdallocx.exit", "");
3588*c5ad8142SEric van Gyzen }
3589*c5ad8142SEric van Gyzen
3590*c5ad8142SEric van Gyzen void JEMALLOC_NOTHROW
je_sdallocx_noflags(void * ptr,size_t size)3591*c5ad8142SEric van Gyzen je_sdallocx_noflags(void *ptr, size_t size) {
3592*c5ad8142SEric van Gyzen LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: 0", ptr,
3593*c5ad8142SEric van Gyzen size);
3594*c5ad8142SEric van Gyzen
3595*c5ad8142SEric van Gyzen if (!free_fastpath(ptr, size, true)) {
3596*c5ad8142SEric van Gyzen sdallocx_default(ptr, size, 0);
3597*c5ad8142SEric van Gyzen }
3598*c5ad8142SEric van Gyzen
35990ef50b4eSJason Evans LOG("core.sdallocx.exit", "");
3600d0e79aa3SJason Evans }
3601d0e79aa3SJason Evans
3602d0e79aa3SJason Evans JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
JEMALLOC_ATTR(pure)3603d0e79aa3SJason Evans JEMALLOC_ATTR(pure)
3604b7eaed25SJason Evans je_nallocx(size_t size, int flags) {
3605df0d881dSJason Evans size_t usize;
36061f0a49e8SJason Evans tsdn_t *tsdn;
3607d0e79aa3SJason Evans
3608d0e79aa3SJason Evans assert(size != 0);
3609d0e79aa3SJason Evans
3610b7eaed25SJason Evans if (unlikely(malloc_init())) {
36110ef50b4eSJason Evans LOG("core.nallocx.exit", "result: %zu", ZU(0));
3612b7eaed25SJason Evans return 0;
3613b7eaed25SJason Evans }
3614d0e79aa3SJason Evans
36151f0a49e8SJason Evans tsdn = tsdn_fetch();
3616b7eaed25SJason Evans check_entry_exit_locking(tsdn);
36171f0a49e8SJason Evans
36181f0a49e8SJason Evans usize = inallocx(tsdn, size, flags);
3619*c5ad8142SEric van Gyzen if (unlikely(usize > SC_LARGE_MAXCLASS)) {
36200ef50b4eSJason Evans LOG("core.nallocx.exit", "result: %zu", ZU(0));
3621b7eaed25SJason Evans return 0;
3622b7eaed25SJason Evans }
3623df0d881dSJason Evans
3624b7eaed25SJason Evans check_entry_exit_locking(tsdn);
36250ef50b4eSJason Evans LOG("core.nallocx.exit", "result: %zu", usize);
3626b7eaed25SJason Evans return usize;
3627d0e79aa3SJason Evans }
3628d0e79aa3SJason Evans
3629d0e79aa3SJason Evans JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctl(const char * name,void * oldp,size_t * oldlenp,void * newp,size_t newlen)3630a4bd5210SJason Evans je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp,
3631b7eaed25SJason Evans size_t newlen) {
36321f0a49e8SJason Evans int ret;
36331f0a49e8SJason Evans tsd_t *tsd;
3634a4bd5210SJason Evans
36350ef50b4eSJason Evans LOG("core.mallctl.entry", "name: %s", name);
36360ef50b4eSJason Evans
3637b7eaed25SJason Evans if (unlikely(malloc_init())) {
36380ef50b4eSJason Evans LOG("core.mallctl.exit", "result: %d", EAGAIN);
3639b7eaed25SJason Evans return EAGAIN;
3640b7eaed25SJason Evans }
3641a4bd5210SJason Evans
36421f0a49e8SJason Evans tsd = tsd_fetch();
3643b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd));
36441f0a49e8SJason Evans ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen);
3645b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd));
36460ef50b4eSJason Evans
36470ef50b4eSJason Evans LOG("core.mallctl.exit", "result: %d", ret);
3648b7eaed25SJason Evans return ret;
3649a4bd5210SJason Evans }
3650a4bd5210SJason Evans
3651d0e79aa3SJason Evans JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctlnametomib(const char * name,size_t * mibp,size_t * miblenp)3652b7eaed25SJason Evans je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) {
36531f0a49e8SJason Evans int ret;
3654a4bd5210SJason Evans
36550ef50b4eSJason Evans LOG("core.mallctlnametomib.entry", "name: %s", name);
36560ef50b4eSJason Evans
3657b7eaed25SJason Evans if (unlikely(malloc_init())) {
36580ef50b4eSJason Evans LOG("core.mallctlnametomib.exit", "result: %d", EAGAIN);
3659b7eaed25SJason Evans return EAGAIN;
3660b7eaed25SJason Evans }
3661a4bd5210SJason Evans
36628b2f5aafSJason Evans tsd_t *tsd = tsd_fetch();
36638b2f5aafSJason Evans check_entry_exit_locking(tsd_tsdn(tsd));
36648b2f5aafSJason Evans ret = ctl_nametomib(tsd, name, mibp, miblenp);
36658b2f5aafSJason Evans check_entry_exit_locking(tsd_tsdn(tsd));
36660ef50b4eSJason Evans
36670ef50b4eSJason Evans LOG("core.mallctlnametomib.exit", "result: %d", ret);
3668b7eaed25SJason Evans return ret;
3669a4bd5210SJason Evans }
3670a4bd5210SJason Evans
3671d0e79aa3SJason Evans JEMALLOC_EXPORT int JEMALLOC_NOTHROW
je_mallctlbymib(const size_t * mib,size_t miblen,void * oldp,size_t * oldlenp,void * newp,size_t newlen)3672a4bd5210SJason Evans je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp,
3673b7eaed25SJason Evans void *newp, size_t newlen) {
36741f0a49e8SJason Evans int ret;
36751f0a49e8SJason Evans tsd_t *tsd;
3676a4bd5210SJason Evans
36770ef50b4eSJason Evans LOG("core.mallctlbymib.entry", "");
36780ef50b4eSJason Evans
3679b7eaed25SJason Evans if (unlikely(malloc_init())) {
36800ef50b4eSJason Evans LOG("core.mallctlbymib.exit", "result: %d", EAGAIN);
3681b7eaed25SJason Evans return EAGAIN;
3682b7eaed25SJason Evans }
3683a4bd5210SJason Evans
36841f0a49e8SJason Evans tsd = tsd_fetch();
3685b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd));
36861f0a49e8SJason Evans ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen);
3687b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd));
36880ef50b4eSJason Evans LOG("core.mallctlbymib.exit", "result: %d", ret);
3689b7eaed25SJason Evans return ret;
3690a4bd5210SJason Evans }
3691a4bd5210SJason Evans
3692d0e79aa3SJason Evans JEMALLOC_EXPORT void JEMALLOC_NOTHROW
je_malloc_stats_print(void (* write_cb)(void *,const char *),void * cbopaque,const char * opts)3693f921d10fSJason Evans je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque,
3694b7eaed25SJason Evans const char *opts) {
36951f0a49e8SJason Evans tsdn_t *tsdn;
3696f921d10fSJason Evans
36970ef50b4eSJason Evans LOG("core.malloc_stats_print.entry", "");
36980ef50b4eSJason Evans
36991f0a49e8SJason Evans tsdn = tsdn_fetch();
3700b7eaed25SJason Evans check_entry_exit_locking(tsdn);
3701f921d10fSJason Evans stats_print(write_cb, cbopaque, opts);
3702b7eaed25SJason Evans check_entry_exit_locking(tsdn);
37030ef50b4eSJason Evans LOG("core.malloc_stats_print.exit", "");
3704f921d10fSJason Evans }
3705f921d10fSJason Evans
3706d0e79aa3SJason Evans JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW
je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void * ptr)3707b7eaed25SJason Evans je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) {
3708f921d10fSJason Evans size_t ret;
37091f0a49e8SJason Evans tsdn_t *tsdn;
3710f921d10fSJason Evans
37110ef50b4eSJason Evans LOG("core.malloc_usable_size.entry", "ptr: %p", ptr);
37120ef50b4eSJason Evans
3713d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER);
3714f921d10fSJason Evans
37151f0a49e8SJason Evans tsdn = tsdn_fetch();
3716b7eaed25SJason Evans check_entry_exit_locking(tsdn);
3717f921d10fSJason Evans
3718b7eaed25SJason Evans if (unlikely(ptr == NULL)) {
3719b7eaed25SJason Evans ret = 0;
3720b7eaed25SJason Evans } else {
3721b7eaed25SJason Evans if (config_debug || force_ivsalloc) {
3722b7eaed25SJason Evans ret = ivsalloc(tsdn, ptr);
3723b7eaed25SJason Evans assert(force_ivsalloc || ret != 0);
3724b7eaed25SJason Evans } else {
3725b7eaed25SJason Evans ret = isalloc(tsdn, ptr);
3726b7eaed25SJason Evans }
3727b7eaed25SJason Evans }
37281f0a49e8SJason Evans
3729b7eaed25SJason Evans check_entry_exit_locking(tsdn);
37300ef50b4eSJason Evans LOG("core.malloc_usable_size.exit", "result: %zu", ret);
3731b7eaed25SJason Evans return ret;
3732f921d10fSJason Evans }
3733f921d10fSJason Evans
3734a4bd5210SJason Evans /*
3735a4bd5210SJason Evans * End non-standard functions.
3736a4bd5210SJason Evans */
3737a4bd5210SJason Evans /******************************************************************************/
3738a4bd5210SJason Evans /*
3739d0e79aa3SJason Evans * Begin compatibility functions.
3740a4bd5210SJason Evans */
3741d0e79aa3SJason Evans
3742d0e79aa3SJason Evans #define ALLOCM_LG_ALIGN(la) (la)
3743d0e79aa3SJason Evans #define ALLOCM_ALIGN(a) (ffsl(a)-1)
3744d0e79aa3SJason Evans #define ALLOCM_ZERO ((int)0x40)
3745d0e79aa3SJason Evans #define ALLOCM_NO_MOVE ((int)0x80)
3746d0e79aa3SJason Evans
3747d0e79aa3SJason Evans #define ALLOCM_SUCCESS 0
3748d0e79aa3SJason Evans #define ALLOCM_ERR_OOM 1
3749d0e79aa3SJason Evans #define ALLOCM_ERR_NOT_MOVED 2
3750a4bd5210SJason Evans
3751a4bd5210SJason Evans int
je_allocm(void ** ptr,size_t * rsize,size_t size,int flags)3752b7eaed25SJason Evans je_allocm(void **ptr, size_t *rsize, size_t size, int flags) {
3753a4bd5210SJason Evans assert(ptr != NULL);
3754a4bd5210SJason Evans
3755b7eaed25SJason Evans void *p = je_mallocx(size, flags);
3756b7eaed25SJason Evans if (p == NULL) {
3757a4bd5210SJason Evans return (ALLOCM_ERR_OOM);
3758b7eaed25SJason Evans }
3759b7eaed25SJason Evans if (rsize != NULL) {
3760b7eaed25SJason Evans *rsize = isalloc(tsdn_fetch(), p);
3761b7eaed25SJason Evans }
3762f921d10fSJason Evans *ptr = p;
3763b7eaed25SJason Evans return ALLOCM_SUCCESS;
3764a4bd5210SJason Evans }
3765a4bd5210SJason Evans
3766a4bd5210SJason Evans int
je_rallocm(void ** ptr,size_t * rsize,size_t size,size_t extra,int flags)3767b7eaed25SJason Evans je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) {
3768a4bd5210SJason Evans assert(ptr != NULL);
3769a4bd5210SJason Evans assert(*ptr != NULL);
3770a4bd5210SJason Evans assert(size != 0);
3771a4bd5210SJason Evans assert(SIZE_T_MAX - size >= extra);
3772a4bd5210SJason Evans
3773b7eaed25SJason Evans int ret;
3774b7eaed25SJason Evans bool no_move = flags & ALLOCM_NO_MOVE;
3775b7eaed25SJason Evans
3776f921d10fSJason Evans if (no_move) {
3777f921d10fSJason Evans size_t usize = je_xallocx(*ptr, size, extra, flags);
3778f921d10fSJason Evans ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED;
3779b7eaed25SJason Evans if (rsize != NULL) {
3780a4bd5210SJason Evans *rsize = usize;
3781b7eaed25SJason Evans }
3782a4bd5210SJason Evans } else {
3783f921d10fSJason Evans void *p = je_rallocx(*ptr, size+extra, flags);
3784f921d10fSJason Evans if (p != NULL) {
3785f921d10fSJason Evans *ptr = p;
3786f921d10fSJason Evans ret = ALLOCM_SUCCESS;
3787b7eaed25SJason Evans } else {
3788f921d10fSJason Evans ret = ALLOCM_ERR_OOM;
3789a4bd5210SJason Evans }
3790b7eaed25SJason Evans if (rsize != NULL) {
3791b7eaed25SJason Evans *rsize = isalloc(tsdn_fetch(), *ptr);
3792b7eaed25SJason Evans }
3793b7eaed25SJason Evans }
3794b7eaed25SJason Evans return ret;
3795a4bd5210SJason Evans }
3796a4bd5210SJason Evans
3797a4bd5210SJason Evans int
je_sallocm(const void * ptr,size_t * rsize,int flags)3798b7eaed25SJason Evans je_sallocm(const void *ptr, size_t *rsize, int flags) {
3799a4bd5210SJason Evans assert(rsize != NULL);
3800f921d10fSJason Evans *rsize = je_sallocx(ptr, flags);
3801b7eaed25SJason Evans return ALLOCM_SUCCESS;
3802a4bd5210SJason Evans }
3803a4bd5210SJason Evans
3804a4bd5210SJason Evans int
je_dallocm(void * ptr,int flags)3805b7eaed25SJason Evans je_dallocm(void *ptr, int flags) {
3806f921d10fSJason Evans je_dallocx(ptr, flags);
3807b7eaed25SJason Evans return ALLOCM_SUCCESS;
3808a4bd5210SJason Evans }
3809a4bd5210SJason Evans
3810a4bd5210SJason Evans int
je_nallocm(size_t * rsize,size_t size,int flags)3811b7eaed25SJason Evans je_nallocm(size_t *rsize, size_t size, int flags) {
3812b7eaed25SJason Evans size_t usize = je_nallocx(size, flags);
3813b7eaed25SJason Evans if (usize == 0) {
3814b7eaed25SJason Evans return ALLOCM_ERR_OOM;
3815b7eaed25SJason Evans }
3816b7eaed25SJason Evans if (rsize != NULL) {
3817a4bd5210SJason Evans *rsize = usize;
3818b7eaed25SJason Evans }
3819b7eaed25SJason Evans return ALLOCM_SUCCESS;
3820a4bd5210SJason Evans }
3821a4bd5210SJason Evans
3822d0e79aa3SJason Evans #undef ALLOCM_LG_ALIGN
3823d0e79aa3SJason Evans #undef ALLOCM_ALIGN
3824d0e79aa3SJason Evans #undef ALLOCM_ZERO
3825d0e79aa3SJason Evans #undef ALLOCM_NO_MOVE
3826d0e79aa3SJason Evans
3827d0e79aa3SJason Evans #undef ALLOCM_SUCCESS
3828d0e79aa3SJason Evans #undef ALLOCM_ERR_OOM
3829d0e79aa3SJason Evans #undef ALLOCM_ERR_NOT_MOVED
3830d0e79aa3SJason Evans
3831a4bd5210SJason Evans /*
3832d0e79aa3SJason Evans * End compatibility functions.
3833a4bd5210SJason Evans */
3834a4bd5210SJason Evans /******************************************************************************/
3835a4bd5210SJason Evans /*
3836a4bd5210SJason Evans * The following functions are used by threading libraries for protection of
3837a4bd5210SJason Evans * malloc during fork().
3838a4bd5210SJason Evans */
3839a4bd5210SJason Evans
384082872ac0SJason Evans /*
384182872ac0SJason Evans * If an application creates a thread before doing any allocation in the main
384282872ac0SJason Evans * thread, then calls fork(2) in the main thread followed by memory allocation
384382872ac0SJason Evans * in the child process, a race can occur that results in deadlock within the
384482872ac0SJason Evans * child: the main thread may have forked while the created thread had
384582872ac0SJason Evans * partially initialized the allocator. Ordinarily jemalloc prevents
384682872ac0SJason Evans * fork/malloc races via the following functions it registers during
384782872ac0SJason Evans * initialization using pthread_atfork(), but of course that does no good if
384882872ac0SJason Evans * the allocator isn't fully initialized at fork time. The following library
3849d0e79aa3SJason Evans * constructor is a partial solution to this problem. It may still be possible
3850d0e79aa3SJason Evans * to trigger the deadlock described above, but doing so would involve forking
3851d0e79aa3SJason Evans * via a library constructor that runs before jemalloc's runs.
385282872ac0SJason Evans */
38531f0a49e8SJason Evans #ifndef JEMALLOC_JET
JEMALLOC_ATTR(constructor)385482872ac0SJason Evans JEMALLOC_ATTR(constructor)
385582872ac0SJason Evans static void
3856b7eaed25SJason Evans jemalloc_constructor(void) {
385782872ac0SJason Evans malloc_init();
385882872ac0SJason Evans }
38591f0a49e8SJason Evans #endif
386082872ac0SJason Evans
3861a4bd5210SJason Evans #ifndef JEMALLOC_MUTEX_INIT_CB
3862a4bd5210SJason Evans void
jemalloc_prefork(void)3863a4bd5210SJason Evans jemalloc_prefork(void)
3864a4bd5210SJason Evans #else
3865e722f8f8SJason Evans JEMALLOC_EXPORT void
3866a4bd5210SJason Evans _malloc_prefork(void)
3867a4bd5210SJason Evans #endif
3868a4bd5210SJason Evans {
38691f0a49e8SJason Evans tsd_t *tsd;
38701f0a49e8SJason Evans unsigned i, j, narenas;
38711f0a49e8SJason Evans arena_t *arena;
3872a4bd5210SJason Evans
387335dad073SJason Evans #ifdef JEMALLOC_MUTEX_INIT_CB
3874b7eaed25SJason Evans if (!malloc_initialized()) {
387535dad073SJason Evans return;
3876b7eaed25SJason Evans }
387735dad073SJason Evans #endif
3878d0e79aa3SJason Evans assert(malloc_initialized());
387935dad073SJason Evans
38801f0a49e8SJason Evans tsd = tsd_fetch();
3881df0d881dSJason Evans
38821f0a49e8SJason Evans narenas = narenas_total_get();
38831f0a49e8SJason Evans
3884b7eaed25SJason Evans witness_prefork(tsd_witness_tsdp_get(tsd));
38851f0a49e8SJason Evans /* Acquire all mutexes in a safe order. */
38861f0a49e8SJason Evans ctl_prefork(tsd_tsdn(tsd));
38878244f2aaSJason Evans tcache_prefork(tsd_tsdn(tsd));
38881f0a49e8SJason Evans malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock);
3889b7eaed25SJason Evans if (have_background_thread) {
3890b7eaed25SJason Evans background_thread_prefork0(tsd_tsdn(tsd));
3891b7eaed25SJason Evans }
38921f0a49e8SJason Evans prof_prefork0(tsd_tsdn(tsd));
3893b7eaed25SJason Evans if (have_background_thread) {
3894b7eaed25SJason Evans background_thread_prefork1(tsd_tsdn(tsd));
3895b7eaed25SJason Evans }
3896b7eaed25SJason Evans /* Break arena prefork into stages to preserve lock order. */
38978b2f5aafSJason Evans for (i = 0; i < 8; i++) {
38981f0a49e8SJason Evans for (j = 0; j < narenas; j++) {
38991f0a49e8SJason Evans if ((arena = arena_get(tsd_tsdn(tsd), j, false)) !=
39001f0a49e8SJason Evans NULL) {
39011f0a49e8SJason Evans switch (i) {
39021f0a49e8SJason Evans case 0:
39031f0a49e8SJason Evans arena_prefork0(tsd_tsdn(tsd), arena);
39041f0a49e8SJason Evans break;
39051f0a49e8SJason Evans case 1:
39061f0a49e8SJason Evans arena_prefork1(tsd_tsdn(tsd), arena);
39071f0a49e8SJason Evans break;
39081f0a49e8SJason Evans case 2:
39091f0a49e8SJason Evans arena_prefork2(tsd_tsdn(tsd), arena);
39101f0a49e8SJason Evans break;
3911b7eaed25SJason Evans case 3:
3912b7eaed25SJason Evans arena_prefork3(tsd_tsdn(tsd), arena);
3913b7eaed25SJason Evans break;
3914b7eaed25SJason Evans case 4:
3915b7eaed25SJason Evans arena_prefork4(tsd_tsdn(tsd), arena);
3916b7eaed25SJason Evans break;
3917b7eaed25SJason Evans case 5:
3918b7eaed25SJason Evans arena_prefork5(tsd_tsdn(tsd), arena);
3919b7eaed25SJason Evans break;
3920b7eaed25SJason Evans case 6:
3921b7eaed25SJason Evans arena_prefork6(tsd_tsdn(tsd), arena);
3922b7eaed25SJason Evans break;
39238b2f5aafSJason Evans case 7:
39248b2f5aafSJason Evans arena_prefork7(tsd_tsdn(tsd), arena);
39258b2f5aafSJason Evans break;
39261f0a49e8SJason Evans default: not_reached();
3927a4bd5210SJason Evans }
39281f0a49e8SJason Evans }
39291f0a49e8SJason Evans }
39301f0a49e8SJason Evans }
39311f0a49e8SJason Evans prof_prefork1(tsd_tsdn(tsd));
3932*c5ad8142SEric van Gyzen tsd_prefork(tsd);
3933a4bd5210SJason Evans }
3934a4bd5210SJason Evans
3935a4bd5210SJason Evans #ifndef JEMALLOC_MUTEX_INIT_CB
3936a4bd5210SJason Evans void
jemalloc_postfork_parent(void)3937a4bd5210SJason Evans jemalloc_postfork_parent(void)
3938a4bd5210SJason Evans #else
3939e722f8f8SJason Evans JEMALLOC_EXPORT void
3940a4bd5210SJason Evans _malloc_postfork(void)
3941a4bd5210SJason Evans #endif
3942a4bd5210SJason Evans {
39431f0a49e8SJason Evans tsd_t *tsd;
3944df0d881dSJason Evans unsigned i, narenas;
3945a4bd5210SJason Evans
394635dad073SJason Evans #ifdef JEMALLOC_MUTEX_INIT_CB
3947b7eaed25SJason Evans if (!malloc_initialized()) {
394835dad073SJason Evans return;
3949b7eaed25SJason Evans }
395035dad073SJason Evans #endif
3951d0e79aa3SJason Evans assert(malloc_initialized());
395235dad073SJason Evans
39531f0a49e8SJason Evans tsd = tsd_fetch();
39541f0a49e8SJason Evans
3955*c5ad8142SEric van Gyzen tsd_postfork_parent(tsd);
3956*c5ad8142SEric van Gyzen
3957b7eaed25SJason Evans witness_postfork_parent(tsd_witness_tsdp_get(tsd));
3958a4bd5210SJason Evans /* Release all mutexes, now that fork() has completed. */
3959df0d881dSJason Evans for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
3960df0d881dSJason Evans arena_t *arena;
3961df0d881dSJason Evans
3962b7eaed25SJason Evans if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
39631f0a49e8SJason Evans arena_postfork_parent(tsd_tsdn(tsd), arena);
3964a4bd5210SJason Evans }
3965b7eaed25SJason Evans }
39661f0a49e8SJason Evans prof_postfork_parent(tsd_tsdn(tsd));
3967b7eaed25SJason Evans if (have_background_thread) {
3968b7eaed25SJason Evans background_thread_postfork_parent(tsd_tsdn(tsd));
3969b7eaed25SJason Evans }
39701f0a49e8SJason Evans malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock);
39718244f2aaSJason Evans tcache_postfork_parent(tsd_tsdn(tsd));
39721f0a49e8SJason Evans ctl_postfork_parent(tsd_tsdn(tsd));
3973a4bd5210SJason Evans }
3974a4bd5210SJason Evans
3975a4bd5210SJason Evans void
jemalloc_postfork_child(void)3976b7eaed25SJason Evans jemalloc_postfork_child(void) {
39771f0a49e8SJason Evans tsd_t *tsd;
3978df0d881dSJason Evans unsigned i, narenas;
3979a4bd5210SJason Evans
3980d0e79aa3SJason Evans assert(malloc_initialized());
398135dad073SJason Evans
39821f0a49e8SJason Evans tsd = tsd_fetch();
39831f0a49e8SJason Evans
3984*c5ad8142SEric van Gyzen tsd_postfork_child(tsd);
3985*c5ad8142SEric van Gyzen
3986b7eaed25SJason Evans witness_postfork_child(tsd_witness_tsdp_get(tsd));
3987a4bd5210SJason Evans /* Release all mutexes, now that fork() has completed. */
3988df0d881dSJason Evans for (i = 0, narenas = narenas_total_get(); i < narenas; i++) {
3989df0d881dSJason Evans arena_t *arena;
3990df0d881dSJason Evans
3991b7eaed25SJason Evans if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) {
39921f0a49e8SJason Evans arena_postfork_child(tsd_tsdn(tsd), arena);
3993a4bd5210SJason Evans }
3994b7eaed25SJason Evans }
39951f0a49e8SJason Evans prof_postfork_child(tsd_tsdn(tsd));
3996b7eaed25SJason Evans if (have_background_thread) {
3997b7eaed25SJason Evans background_thread_postfork_child(tsd_tsdn(tsd));
3998b7eaed25SJason Evans }
39991f0a49e8SJason Evans malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock);
40008244f2aaSJason Evans tcache_postfork_child(tsd_tsdn(tsd));
40011f0a49e8SJason Evans ctl_postfork_child(tsd_tsdn(tsd));
4002a4bd5210SJason Evans }
4003a4bd5210SJason Evans
40048495e8b1SKonstantin Belousov void
_malloc_first_thread(void)40058495e8b1SKonstantin Belousov _malloc_first_thread(void)
40068495e8b1SKonstantin Belousov {
40078495e8b1SKonstantin Belousov
40088495e8b1SKonstantin Belousov (void)malloc_mutex_first_thread();
40098495e8b1SKonstantin Belousov }
40108495e8b1SKonstantin Belousov
4011a4bd5210SJason Evans /******************************************************************************/
4012