1a4bd5210SJason Evans #define JEMALLOC_C_ 2b7eaed25SJason Evans #include "jemalloc/internal/jemalloc_preamble.h" 3b7eaed25SJason Evans #include "jemalloc/internal/jemalloc_internal_includes.h" 4b7eaed25SJason Evans 5b7eaed25SJason Evans #include "jemalloc/internal/assert.h" 6b7eaed25SJason Evans #include "jemalloc/internal/atomic.h" 7b7eaed25SJason Evans #include "jemalloc/internal/ctl.h" 8b7eaed25SJason Evans #include "jemalloc/internal/extent_dss.h" 9b7eaed25SJason Evans #include "jemalloc/internal/extent_mmap.h" 10b7eaed25SJason Evans #include "jemalloc/internal/jemalloc_internal_types.h" 11*0ef50b4eSJason Evans #include "jemalloc/internal/log.h" 12b7eaed25SJason Evans #include "jemalloc/internal/malloc_io.h" 13b7eaed25SJason Evans #include "jemalloc/internal/mutex.h" 14b7eaed25SJason Evans #include "jemalloc/internal/rtree.h" 15b7eaed25SJason Evans #include "jemalloc/internal/size_classes.h" 16b7eaed25SJason Evans #include "jemalloc/internal/spin.h" 17b7eaed25SJason Evans #include "jemalloc/internal/sz.h" 18b7eaed25SJason Evans #include "jemalloc/internal/ticker.h" 19b7eaed25SJason Evans #include "jemalloc/internal/util.h" 20a4bd5210SJason Evans 21a4bd5210SJason Evans /******************************************************************************/ 22a4bd5210SJason Evans /* Data. */ 23a4bd5210SJason Evans 244fdb8d2aSDimitry Andric /* Work around <http://llvm.org/bugs/show_bug.cgi?id=12623>: */ 254fdb8d2aSDimitry Andric const char *__malloc_options_1_0 = NULL; 26a4bd5210SJason Evans __sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0); 27a4bd5210SJason Evans 28a4bd5210SJason Evans /* Runtime configuration options. */ 29bde95144SJason Evans const char *je_malloc_conf 30bde95144SJason Evans #ifndef _WIN32 31bde95144SJason Evans JEMALLOC_ATTR(weak) 32bde95144SJason Evans #endif 33bde95144SJason Evans ; 3488ad2f8dSJason Evans bool opt_abort = 35a4bd5210SJason Evans #ifdef JEMALLOC_DEBUG 3688ad2f8dSJason Evans true 37a4bd5210SJason Evans #else 3888ad2f8dSJason Evans false 39a4bd5210SJason Evans #endif 4088ad2f8dSJason Evans ; 41b7eaed25SJason Evans bool opt_abort_conf = 42b7eaed25SJason Evans #ifdef JEMALLOC_DEBUG 43b7eaed25SJason Evans true 44b7eaed25SJason Evans #else 45b7eaed25SJason Evans false 46b7eaed25SJason Evans #endif 47b7eaed25SJason Evans ; 48d0e79aa3SJason Evans const char *opt_junk = 49d0e79aa3SJason Evans #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 50d0e79aa3SJason Evans "true" 51d0e79aa3SJason Evans #else 52d0e79aa3SJason Evans "false" 53d0e79aa3SJason Evans #endif 54d0e79aa3SJason Evans ; 55d0e79aa3SJason Evans bool opt_junk_alloc = 5688ad2f8dSJason Evans #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 5788ad2f8dSJason Evans true 58a4bd5210SJason Evans #else 5988ad2f8dSJason Evans false 60a4bd5210SJason Evans #endif 6188ad2f8dSJason Evans ; 62d0e79aa3SJason Evans bool opt_junk_free = 63d0e79aa3SJason Evans #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 64d0e79aa3SJason Evans true 65d0e79aa3SJason Evans #else 66d0e79aa3SJason Evans false 67d0e79aa3SJason Evans #endif 68d0e79aa3SJason Evans ; 69d0e79aa3SJason Evans 70a4bd5210SJason Evans bool opt_utrace = false; 71a4bd5210SJason Evans bool opt_xmalloc = false; 72a4bd5210SJason Evans bool opt_zero = false; 73df0d881dSJason Evans unsigned opt_narenas = 0; 74a4bd5210SJason Evans 75a4bd5210SJason Evans unsigned ncpus; 76a4bd5210SJason Evans 77df0d881dSJason Evans /* Protects arenas initialization. */ 78b7eaed25SJason Evans malloc_mutex_t arenas_lock; 79d0e79aa3SJason Evans /* 80d0e79aa3SJason Evans * Arenas that are used to service external requests. Not all elements of the 81d0e79aa3SJason Evans * arenas array are necessarily used; arenas are created lazily as needed. 82d0e79aa3SJason Evans * 83d0e79aa3SJason Evans * arenas[0..narenas_auto) are used for automatic multiplexing of threads and 84d0e79aa3SJason Evans * arenas. arenas[narenas_auto..narenas_total) are only used if the application 85d0e79aa3SJason Evans * takes some action to create them and allocate from them. 86b7eaed25SJason Evans * 87b7eaed25SJason Evans * Points to an arena_t. 88d0e79aa3SJason Evans */ 89b7eaed25SJason Evans JEMALLOC_ALIGNED(CACHELINE) 90b7eaed25SJason Evans atomic_p_t arenas[MALLOCX_ARENA_LIMIT]; 91b7eaed25SJason Evans static atomic_u_t narenas_total; /* Use narenas_total_*(). */ 92d0e79aa3SJason Evans static arena_t *a0; /* arenas[0]; read-only after initialization. */ 931f0a49e8SJason Evans unsigned narenas_auto; /* Read-only after initialization. */ 94a4bd5210SJason Evans 95d0e79aa3SJason Evans typedef enum { 96d0e79aa3SJason Evans malloc_init_uninitialized = 3, 97d0e79aa3SJason Evans malloc_init_a0_initialized = 2, 98d0e79aa3SJason Evans malloc_init_recursible = 1, 99d0e79aa3SJason Evans malloc_init_initialized = 0 /* Common case --> jnz. */ 100d0e79aa3SJason Evans } malloc_init_t; 101d0e79aa3SJason Evans static malloc_init_t malloc_init_state = malloc_init_uninitialized; 102d0e79aa3SJason Evans 1031f0a49e8SJason Evans /* False should be the common case. Set to true to trigger initialization. */ 104b7eaed25SJason Evans bool malloc_slow = true; 105df0d881dSJason Evans 1061f0a49e8SJason Evans /* When malloc_slow is true, set the corresponding bits for sanity check. */ 107df0d881dSJason Evans enum { 108df0d881dSJason Evans flag_opt_junk_alloc = (1U), 109df0d881dSJason Evans flag_opt_junk_free = (1U << 1), 110b7eaed25SJason Evans flag_opt_zero = (1U << 2), 111b7eaed25SJason Evans flag_opt_utrace = (1U << 3), 112b7eaed25SJason Evans flag_opt_xmalloc = (1U << 4) 113df0d881dSJason Evans }; 114df0d881dSJason Evans static uint8_t malloc_slow_flags; 115df0d881dSJason Evans 116a4bd5210SJason Evans #ifdef JEMALLOC_THREADED_INIT 117a4bd5210SJason Evans /* Used to let the initializing thread recursively allocate. */ 118a4bd5210SJason Evans # define NO_INITIALIZER ((unsigned long)0) 119a4bd5210SJason Evans # define INITIALIZER pthread_self() 120a4bd5210SJason Evans # define IS_INITIALIZER (malloc_initializer == pthread_self()) 121a4bd5210SJason Evans static pthread_t malloc_initializer = NO_INITIALIZER; 122a4bd5210SJason Evans #else 123a4bd5210SJason Evans # define NO_INITIALIZER false 124a4bd5210SJason Evans # define INITIALIZER true 125a4bd5210SJason Evans # define IS_INITIALIZER malloc_initializer 126a4bd5210SJason Evans static bool malloc_initializer = NO_INITIALIZER; 127a4bd5210SJason Evans #endif 128a4bd5210SJason Evans 129a4bd5210SJason Evans /* Used to avoid initialization races. */ 130e722f8f8SJason Evans #ifdef _WIN32 131d0e79aa3SJason Evans #if _WIN32_WINNT >= 0x0600 132d0e79aa3SJason Evans static malloc_mutex_t init_lock = SRWLOCK_INIT; 133d0e79aa3SJason Evans #else 134e722f8f8SJason Evans static malloc_mutex_t init_lock; 135536b3538SJason Evans static bool init_lock_initialized = false; 136e722f8f8SJason Evans 137e722f8f8SJason Evans JEMALLOC_ATTR(constructor) 138e722f8f8SJason Evans static void WINAPI 139b7eaed25SJason Evans _init_init_lock(void) { 140b7eaed25SJason Evans /* 141b7eaed25SJason Evans * If another constructor in the same binary is using mallctl to e.g. 142b7eaed25SJason Evans * set up extent hooks, it may end up running before this one, and 143b7eaed25SJason Evans * malloc_init_hard will crash trying to lock the uninitialized lock. So 144b7eaed25SJason Evans * we force an initialization of the lock in malloc_init_hard as well. 145b7eaed25SJason Evans * We don't try to care about atomicity of the accessed to the 146b7eaed25SJason Evans * init_lock_initialized boolean, since it really only matters early in 147b7eaed25SJason Evans * the process creation, before any separate thread normally starts 148b7eaed25SJason Evans * doing anything. 149b7eaed25SJason Evans */ 150b7eaed25SJason Evans if (!init_lock_initialized) { 151b7eaed25SJason Evans malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT, 152b7eaed25SJason Evans malloc_mutex_rank_exclusive); 153b7eaed25SJason Evans } 154536b3538SJason Evans init_lock_initialized = true; 155e722f8f8SJason Evans } 156e722f8f8SJason Evans 157e722f8f8SJason Evans #ifdef _MSC_VER 158e722f8f8SJason Evans # pragma section(".CRT$XCU", read) 159e722f8f8SJason Evans JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) 160e722f8f8SJason Evans static const void (WINAPI *init_init_lock)(void) = _init_init_lock; 161e722f8f8SJason Evans #endif 162d0e79aa3SJason Evans #endif 163e722f8f8SJason Evans #else 164a4bd5210SJason Evans static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; 165e722f8f8SJason Evans #endif 166a4bd5210SJason Evans 167a4bd5210SJason Evans typedef struct { 168a4bd5210SJason Evans void *p; /* Input pointer (as in realloc(p, s)). */ 169a4bd5210SJason Evans size_t s; /* Request size. */ 170a4bd5210SJason Evans void *r; /* Result pointer. */ 171a4bd5210SJason Evans } malloc_utrace_t; 172a4bd5210SJason Evans 173a4bd5210SJason Evans #ifdef JEMALLOC_UTRACE 174a4bd5210SJason Evans # define UTRACE(a, b, c) do { \ 175d0e79aa3SJason Evans if (unlikely(opt_utrace)) { \ 17688ad2f8dSJason Evans int utrace_serrno = errno; \ 177a4bd5210SJason Evans malloc_utrace_t ut; \ 178a4bd5210SJason Evans ut.p = (a); \ 179a4bd5210SJason Evans ut.s = (b); \ 180a4bd5210SJason Evans ut.r = (c); \ 181a4bd5210SJason Evans utrace(&ut, sizeof(ut)); \ 18288ad2f8dSJason Evans errno = utrace_serrno; \ 183a4bd5210SJason Evans } \ 184a4bd5210SJason Evans } while (0) 185a4bd5210SJason Evans #else 186a4bd5210SJason Evans # define UTRACE(a, b, c) 187a4bd5210SJason Evans #endif 188a4bd5210SJason Evans 189b7eaed25SJason Evans /* Whether encountered any invalid config options. */ 190b7eaed25SJason Evans static bool had_conf_error = false; 191b7eaed25SJason Evans 192a4bd5210SJason Evans /******************************************************************************/ 193f921d10fSJason Evans /* 194f921d10fSJason Evans * Function prototypes for static functions that are referenced prior to 195f921d10fSJason Evans * definition. 196f921d10fSJason Evans */ 197a4bd5210SJason Evans 198d0e79aa3SJason Evans static bool malloc_init_hard_a0(void); 199a4bd5210SJason Evans static bool malloc_init_hard(void); 200a4bd5210SJason Evans 201a4bd5210SJason Evans /******************************************************************************/ 202a4bd5210SJason Evans /* 203a4bd5210SJason Evans * Begin miscellaneous support functions. 204a4bd5210SJason Evans */ 205a4bd5210SJason Evans 206b7eaed25SJason Evans bool 207b7eaed25SJason Evans malloc_initialized(void) { 208d0e79aa3SJason Evans return (malloc_init_state == malloc_init_initialized); 209a4bd5210SJason Evans } 210d0e79aa3SJason Evans 211b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE bool 212b7eaed25SJason Evans malloc_init_a0(void) { 213b7eaed25SJason Evans if (unlikely(malloc_init_state == malloc_init_uninitialized)) { 214b7eaed25SJason Evans return malloc_init_hard_a0(); 215b7eaed25SJason Evans } 216b7eaed25SJason Evans return false; 217a4bd5210SJason Evans } 218a4bd5210SJason Evans 219b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE bool 220b7eaed25SJason Evans malloc_init(void) { 221b7eaed25SJason Evans if (unlikely(!malloc_initialized()) && malloc_init_hard()) { 222b7eaed25SJason Evans return true; 223d0e79aa3SJason Evans } 224b7eaed25SJason Evans return false; 225d0e79aa3SJason Evans } 226d0e79aa3SJason Evans 227d0e79aa3SJason Evans /* 2281f0a49e8SJason Evans * The a0*() functions are used instead of i{d,}alloc() in situations that 229d0e79aa3SJason Evans * cannot tolerate TLS variable access. 230d0e79aa3SJason Evans */ 231d0e79aa3SJason Evans 232d0e79aa3SJason Evans static void * 233b7eaed25SJason Evans a0ialloc(size_t size, bool zero, bool is_internal) { 234b7eaed25SJason Evans if (unlikely(malloc_init_a0())) { 235b7eaed25SJason Evans return NULL; 236b7eaed25SJason Evans } 237d0e79aa3SJason Evans 238b7eaed25SJason Evans return iallocztm(TSDN_NULL, size, sz_size2index(size), zero, NULL, 239b7eaed25SJason Evans is_internal, arena_get(TSDN_NULL, 0, true), true); 240d0e79aa3SJason Evans } 241d0e79aa3SJason Evans 242d0e79aa3SJason Evans static void 243b7eaed25SJason Evans a0idalloc(void *ptr, bool is_internal) { 244b7eaed25SJason Evans idalloctm(TSDN_NULL, ptr, NULL, NULL, is_internal, true); 245bde95144SJason Evans } 246bde95144SJason Evans 247d0e79aa3SJason Evans void * 248b7eaed25SJason Evans a0malloc(size_t size) { 249b7eaed25SJason Evans return a0ialloc(size, false, true); 250d0e79aa3SJason Evans } 251d0e79aa3SJason Evans 252d0e79aa3SJason Evans void 253b7eaed25SJason Evans a0dalloc(void *ptr) { 254d0e79aa3SJason Evans a0idalloc(ptr, true); 255d0e79aa3SJason Evans } 256d0e79aa3SJason Evans 257d0e79aa3SJason Evans /* 258d0e79aa3SJason Evans * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive 259d0e79aa3SJason Evans * situations that cannot tolerate TLS variable access (TLS allocation and very 260d0e79aa3SJason Evans * early internal data structure initialization). 261d0e79aa3SJason Evans */ 262d0e79aa3SJason Evans 263d0e79aa3SJason Evans void * 264b7eaed25SJason Evans bootstrap_malloc(size_t size) { 265b7eaed25SJason Evans if (unlikely(size == 0)) { 266d0e79aa3SJason Evans size = 1; 267b7eaed25SJason Evans } 268d0e79aa3SJason Evans 269b7eaed25SJason Evans return a0ialloc(size, false, false); 270d0e79aa3SJason Evans } 271d0e79aa3SJason Evans 272d0e79aa3SJason Evans void * 273b7eaed25SJason Evans bootstrap_calloc(size_t num, size_t size) { 274d0e79aa3SJason Evans size_t num_size; 275d0e79aa3SJason Evans 276d0e79aa3SJason Evans num_size = num * size; 277d0e79aa3SJason Evans if (unlikely(num_size == 0)) { 278d0e79aa3SJason Evans assert(num == 0 || size == 0); 279d0e79aa3SJason Evans num_size = 1; 280d0e79aa3SJason Evans } 281d0e79aa3SJason Evans 282b7eaed25SJason Evans return a0ialloc(num_size, true, false); 283d0e79aa3SJason Evans } 284d0e79aa3SJason Evans 285d0e79aa3SJason Evans void 286b7eaed25SJason Evans bootstrap_free(void *ptr) { 287b7eaed25SJason Evans if (unlikely(ptr == NULL)) { 288d0e79aa3SJason Evans return; 289b7eaed25SJason Evans } 290d0e79aa3SJason Evans 291d0e79aa3SJason Evans a0idalloc(ptr, false); 292d0e79aa3SJason Evans } 293d0e79aa3SJason Evans 294b7eaed25SJason Evans void 295b7eaed25SJason Evans arena_set(unsigned ind, arena_t *arena) { 296b7eaed25SJason Evans atomic_store_p(&arenas[ind], arena, ATOMIC_RELEASE); 297df0d881dSJason Evans } 298df0d881dSJason Evans 299df0d881dSJason Evans static void 300b7eaed25SJason Evans narenas_total_set(unsigned narenas) { 301b7eaed25SJason Evans atomic_store_u(&narenas_total, narenas, ATOMIC_RELEASE); 302df0d881dSJason Evans } 303df0d881dSJason Evans 304df0d881dSJason Evans static void 305b7eaed25SJason Evans narenas_total_inc(void) { 306b7eaed25SJason Evans atomic_fetch_add_u(&narenas_total, 1, ATOMIC_RELEASE); 307df0d881dSJason Evans } 308df0d881dSJason Evans 309df0d881dSJason Evans unsigned 310b7eaed25SJason Evans narenas_total_get(void) { 311b7eaed25SJason Evans return atomic_load_u(&narenas_total, ATOMIC_ACQUIRE); 312df0d881dSJason Evans } 313df0d881dSJason Evans 314d0e79aa3SJason Evans /* Create a new arena and insert it into the arenas array at index ind. */ 315d0e79aa3SJason Evans static arena_t * 316b7eaed25SJason Evans arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { 317d0e79aa3SJason Evans arena_t *arena; 318d0e79aa3SJason Evans 319df0d881dSJason Evans assert(ind <= narenas_total_get()); 320b7eaed25SJason Evans if (ind >= MALLOCX_ARENA_LIMIT) { 321b7eaed25SJason Evans return NULL; 322b7eaed25SJason Evans } 323b7eaed25SJason Evans if (ind == narenas_total_get()) { 324df0d881dSJason Evans narenas_total_inc(); 325b7eaed25SJason Evans } 326d0e79aa3SJason Evans 327d0e79aa3SJason Evans /* 328d0e79aa3SJason Evans * Another thread may have already initialized arenas[ind] if it's an 329d0e79aa3SJason Evans * auto arena. 330d0e79aa3SJason Evans */ 3311f0a49e8SJason Evans arena = arena_get(tsdn, ind, false); 332d0e79aa3SJason Evans if (arena != NULL) { 333d0e79aa3SJason Evans assert(ind < narenas_auto); 334b7eaed25SJason Evans return arena; 335d0e79aa3SJason Evans } 336d0e79aa3SJason Evans 337d0e79aa3SJason Evans /* Actually initialize the arena. */ 338b7eaed25SJason Evans arena = arena_new(tsdn, ind, extent_hooks); 339d0e79aa3SJason Evans 340b7eaed25SJason Evans return arena; 341d0e79aa3SJason Evans } 342d0e79aa3SJason Evans 343d0e79aa3SJason Evans static void 344b7eaed25SJason Evans arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) { 345b7eaed25SJason Evans if (ind == 0) { 346b7eaed25SJason Evans return; 347b7eaed25SJason Evans } 348b7eaed25SJason Evans if (have_background_thread) { 349b7eaed25SJason Evans bool err; 350b7eaed25SJason Evans malloc_mutex_lock(tsdn, &background_thread_lock); 351b7eaed25SJason Evans err = background_thread_create(tsdn_tsd(tsdn), ind); 352b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &background_thread_lock); 353b7eaed25SJason Evans if (err) { 354b7eaed25SJason Evans malloc_printf("<jemalloc>: error in background thread " 355b7eaed25SJason Evans "creation for arena %u. Abort.\n", ind); 356b7eaed25SJason Evans abort(); 357b7eaed25SJason Evans } 358b7eaed25SJason Evans } 359b7eaed25SJason Evans } 360b7eaed25SJason Evans 361b7eaed25SJason Evans arena_t * 362b7eaed25SJason Evans arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { 363df0d881dSJason Evans arena_t *arena; 364d0e79aa3SJason Evans 365b7eaed25SJason Evans malloc_mutex_lock(tsdn, &arenas_lock); 366b7eaed25SJason Evans arena = arena_init_locked(tsdn, ind, extent_hooks); 367b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &arenas_lock); 368bde95144SJason Evans 369b7eaed25SJason Evans arena_new_create_background_thread(tsdn, ind); 370b7eaed25SJason Evans 371b7eaed25SJason Evans return arena; 372b7eaed25SJason Evans } 373b7eaed25SJason Evans 374b7eaed25SJason Evans static void 375b7eaed25SJason Evans arena_bind(tsd_t *tsd, unsigned ind, bool internal) { 376b7eaed25SJason Evans arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false); 3771f0a49e8SJason Evans arena_nthreads_inc(arena, internal); 378df0d881dSJason Evans 379b7eaed25SJason Evans if (internal) { 3801f0a49e8SJason Evans tsd_iarena_set(tsd, arena); 381b7eaed25SJason Evans } else { 382df0d881dSJason Evans tsd_arena_set(tsd, arena); 383d0e79aa3SJason Evans } 384b7eaed25SJason Evans } 385d0e79aa3SJason Evans 386d0e79aa3SJason Evans void 387b7eaed25SJason Evans arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) { 388d0e79aa3SJason Evans arena_t *oldarena, *newarena; 389d0e79aa3SJason Evans 3901f0a49e8SJason Evans oldarena = arena_get(tsd_tsdn(tsd), oldind, false); 3911f0a49e8SJason Evans newarena = arena_get(tsd_tsdn(tsd), newind, false); 3921f0a49e8SJason Evans arena_nthreads_dec(oldarena, false); 3931f0a49e8SJason Evans arena_nthreads_inc(newarena, false); 394d0e79aa3SJason Evans tsd_arena_set(tsd, newarena); 395d0e79aa3SJason Evans } 396d0e79aa3SJason Evans 397d0e79aa3SJason Evans static void 398b7eaed25SJason Evans arena_unbind(tsd_t *tsd, unsigned ind, bool internal) { 399d0e79aa3SJason Evans arena_t *arena; 400d0e79aa3SJason Evans 4011f0a49e8SJason Evans arena = arena_get(tsd_tsdn(tsd), ind, false); 4021f0a49e8SJason Evans arena_nthreads_dec(arena, internal); 403b7eaed25SJason Evans 404b7eaed25SJason Evans if (internal) { 4051f0a49e8SJason Evans tsd_iarena_set(tsd, NULL); 406b7eaed25SJason Evans } else { 407d0e79aa3SJason Evans tsd_arena_set(tsd, NULL); 408d0e79aa3SJason Evans } 409b7eaed25SJason Evans } 410d0e79aa3SJason Evans 411df0d881dSJason Evans arena_tdata_t * 412b7eaed25SJason Evans arena_tdata_get_hard(tsd_t *tsd, unsigned ind) { 413df0d881dSJason Evans arena_tdata_t *tdata, *arenas_tdata_old; 414df0d881dSJason Evans arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); 415df0d881dSJason Evans unsigned narenas_tdata_old, i; 416df0d881dSJason Evans unsigned narenas_tdata = tsd_narenas_tdata_get(tsd); 417d0e79aa3SJason Evans unsigned narenas_actual = narenas_total_get(); 418d0e79aa3SJason Evans 419d0e79aa3SJason Evans /* 420df0d881dSJason Evans * Dissociate old tdata array (and set up for deallocation upon return) 421df0d881dSJason Evans * if it's too small. 422d0e79aa3SJason Evans */ 423df0d881dSJason Evans if (arenas_tdata != NULL && narenas_tdata < narenas_actual) { 424df0d881dSJason Evans arenas_tdata_old = arenas_tdata; 425df0d881dSJason Evans narenas_tdata_old = narenas_tdata; 426df0d881dSJason Evans arenas_tdata = NULL; 427df0d881dSJason Evans narenas_tdata = 0; 428df0d881dSJason Evans tsd_arenas_tdata_set(tsd, arenas_tdata); 429df0d881dSJason Evans tsd_narenas_tdata_set(tsd, narenas_tdata); 430df0d881dSJason Evans } else { 431df0d881dSJason Evans arenas_tdata_old = NULL; 432df0d881dSJason Evans narenas_tdata_old = 0; 433d0e79aa3SJason Evans } 434df0d881dSJason Evans 435df0d881dSJason Evans /* Allocate tdata array if it's missing. */ 436df0d881dSJason Evans if (arenas_tdata == NULL) { 437df0d881dSJason Evans bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd); 438df0d881dSJason Evans narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1; 439df0d881dSJason Evans 440df0d881dSJason Evans if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) { 441df0d881dSJason Evans *arenas_tdata_bypassp = true; 442df0d881dSJason Evans arenas_tdata = (arena_tdata_t *)a0malloc( 443df0d881dSJason Evans sizeof(arena_tdata_t) * narenas_tdata); 444df0d881dSJason Evans *arenas_tdata_bypassp = false; 445df0d881dSJason Evans } 446df0d881dSJason Evans if (arenas_tdata == NULL) { 447df0d881dSJason Evans tdata = NULL; 448df0d881dSJason Evans goto label_return; 449df0d881dSJason Evans } 450df0d881dSJason Evans assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp); 451df0d881dSJason Evans tsd_arenas_tdata_set(tsd, arenas_tdata); 452df0d881dSJason Evans tsd_narenas_tdata_set(tsd, narenas_tdata); 453d0e79aa3SJason Evans } 454d0e79aa3SJason Evans 455d0e79aa3SJason Evans /* 456df0d881dSJason Evans * Copy to tdata array. It's possible that the actual number of arenas 457df0d881dSJason Evans * has increased since narenas_total_get() was called above, but that 458df0d881dSJason Evans * causes no correctness issues unless two threads concurrently execute 459b7eaed25SJason Evans * the arenas.create mallctl, which we trust mallctl synchronization to 460d0e79aa3SJason Evans * prevent. 461d0e79aa3SJason Evans */ 462df0d881dSJason Evans 463df0d881dSJason Evans /* Copy/initialize tickers. */ 464df0d881dSJason Evans for (i = 0; i < narenas_actual; i++) { 465df0d881dSJason Evans if (i < narenas_tdata_old) { 466df0d881dSJason Evans ticker_copy(&arenas_tdata[i].decay_ticker, 467df0d881dSJason Evans &arenas_tdata_old[i].decay_ticker); 468df0d881dSJason Evans } else { 469df0d881dSJason Evans ticker_init(&arenas_tdata[i].decay_ticker, 470df0d881dSJason Evans DECAY_NTICKS_PER_UPDATE); 471df0d881dSJason Evans } 472df0d881dSJason Evans } 473df0d881dSJason Evans if (narenas_tdata > narenas_actual) { 474df0d881dSJason Evans memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t) 475df0d881dSJason Evans * (narenas_tdata - narenas_actual)); 476d0e79aa3SJason Evans } 477d0e79aa3SJason Evans 478df0d881dSJason Evans /* Read the refreshed tdata array. */ 479df0d881dSJason Evans tdata = &arenas_tdata[ind]; 480df0d881dSJason Evans label_return: 481b7eaed25SJason Evans if (arenas_tdata_old != NULL) { 482df0d881dSJason Evans a0dalloc(arenas_tdata_old); 483b7eaed25SJason Evans } 484b7eaed25SJason Evans return tdata; 485d0e79aa3SJason Evans } 486d0e79aa3SJason Evans 487d0e79aa3SJason Evans /* Slow path, called only by arena_choose(). */ 488d0e79aa3SJason Evans arena_t * 489b7eaed25SJason Evans arena_choose_hard(tsd_t *tsd, bool internal) { 4901f0a49e8SJason Evans arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL); 491a4bd5210SJason Evans 492b7eaed25SJason Evans if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) { 493b7eaed25SJason Evans unsigned choose = percpu_arena_choose(); 494b7eaed25SJason Evans ret = arena_get(tsd_tsdn(tsd), choose, true); 495b7eaed25SJason Evans assert(ret != NULL); 496b7eaed25SJason Evans arena_bind(tsd, arena_ind_get(ret), false); 497b7eaed25SJason Evans arena_bind(tsd, arena_ind_get(ret), true); 498b7eaed25SJason Evans 499b7eaed25SJason Evans return ret; 500b7eaed25SJason Evans } 501b7eaed25SJason Evans 50282872ac0SJason Evans if (narenas_auto > 1) { 5031f0a49e8SJason Evans unsigned i, j, choose[2], first_null; 504b7eaed25SJason Evans bool is_new_arena[2]; 505a4bd5210SJason Evans 5061f0a49e8SJason Evans /* 5071f0a49e8SJason Evans * Determine binding for both non-internal and internal 5081f0a49e8SJason Evans * allocation. 5091f0a49e8SJason Evans * 5101f0a49e8SJason Evans * choose[0]: For application allocation. 5111f0a49e8SJason Evans * choose[1]: For internal metadata allocation. 5121f0a49e8SJason Evans */ 5131f0a49e8SJason Evans 514b7eaed25SJason Evans for (j = 0; j < 2; j++) { 5151f0a49e8SJason Evans choose[j] = 0; 516b7eaed25SJason Evans is_new_arena[j] = false; 517b7eaed25SJason Evans } 5181f0a49e8SJason Evans 51982872ac0SJason Evans first_null = narenas_auto; 5201f0a49e8SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock); 5211f0a49e8SJason Evans assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL); 52282872ac0SJason Evans for (i = 1; i < narenas_auto; i++) { 5231f0a49e8SJason Evans if (arena_get(tsd_tsdn(tsd), i, false) != NULL) { 524a4bd5210SJason Evans /* 525a4bd5210SJason Evans * Choose the first arena that has the lowest 526a4bd5210SJason Evans * number of threads assigned to it. 527a4bd5210SJason Evans */ 5281f0a49e8SJason Evans for (j = 0; j < 2; j++) { 5291f0a49e8SJason Evans if (arena_nthreads_get(arena_get( 5301f0a49e8SJason Evans tsd_tsdn(tsd), i, false), !!j) < 5311f0a49e8SJason Evans arena_nthreads_get(arena_get( 5321f0a49e8SJason Evans tsd_tsdn(tsd), choose[j], false), 533b7eaed25SJason Evans !!j)) { 5341f0a49e8SJason Evans choose[j] = i; 5351f0a49e8SJason Evans } 536b7eaed25SJason Evans } 53782872ac0SJason Evans } else if (first_null == narenas_auto) { 538a4bd5210SJason Evans /* 539a4bd5210SJason Evans * Record the index of the first uninitialized 540a4bd5210SJason Evans * arena, in case all extant arenas are in use. 541a4bd5210SJason Evans * 542a4bd5210SJason Evans * NB: It is possible for there to be 543a4bd5210SJason Evans * discontinuities in terms of initialized 544a4bd5210SJason Evans * versus uninitialized arenas, due to the 545a4bd5210SJason Evans * "thread.arena" mallctl. 546a4bd5210SJason Evans */ 547a4bd5210SJason Evans first_null = i; 548a4bd5210SJason Evans } 549a4bd5210SJason Evans } 550a4bd5210SJason Evans 5511f0a49e8SJason Evans for (j = 0; j < 2; j++) { 5521f0a49e8SJason Evans if (arena_nthreads_get(arena_get(tsd_tsdn(tsd), 5531f0a49e8SJason Evans choose[j], false), !!j) == 0 || first_null == 5541f0a49e8SJason Evans narenas_auto) { 555a4bd5210SJason Evans /* 5561f0a49e8SJason Evans * Use an unloaded arena, or the least loaded 5571f0a49e8SJason Evans * arena if all arenas are already initialized. 558a4bd5210SJason Evans */ 5591f0a49e8SJason Evans if (!!j == internal) { 5601f0a49e8SJason Evans ret = arena_get(tsd_tsdn(tsd), 5611f0a49e8SJason Evans choose[j], false); 5621f0a49e8SJason Evans } 563a4bd5210SJason Evans } else { 5641f0a49e8SJason Evans arena_t *arena; 5651f0a49e8SJason Evans 566a4bd5210SJason Evans /* Initialize a new arena. */ 5671f0a49e8SJason Evans choose[j] = first_null; 5681f0a49e8SJason Evans arena = arena_init_locked(tsd_tsdn(tsd), 569b7eaed25SJason Evans choose[j], 570b7eaed25SJason Evans (extent_hooks_t *)&extent_hooks_default); 5711f0a49e8SJason Evans if (arena == NULL) { 5721f0a49e8SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), 5731f0a49e8SJason Evans &arenas_lock); 574b7eaed25SJason Evans return NULL; 575a4bd5210SJason Evans } 576b7eaed25SJason Evans is_new_arena[j] = true; 577b7eaed25SJason Evans if (!!j == internal) { 5781f0a49e8SJason Evans ret = arena; 579d0e79aa3SJason Evans } 580b7eaed25SJason Evans } 5811f0a49e8SJason Evans arena_bind(tsd, choose[j], !!j); 5821f0a49e8SJason Evans } 5831f0a49e8SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock); 584b7eaed25SJason Evans 585b7eaed25SJason Evans for (j = 0; j < 2; j++) { 586b7eaed25SJason Evans if (is_new_arena[j]) { 587b7eaed25SJason Evans assert(choose[j] > 0); 588b7eaed25SJason Evans arena_new_create_background_thread( 589b7eaed25SJason Evans tsd_tsdn(tsd), choose[j]); 590b7eaed25SJason Evans } 591b7eaed25SJason Evans } 592b7eaed25SJason Evans 593a4bd5210SJason Evans } else { 5941f0a49e8SJason Evans ret = arena_get(tsd_tsdn(tsd), 0, false); 5951f0a49e8SJason Evans arena_bind(tsd, 0, false); 5961f0a49e8SJason Evans arena_bind(tsd, 0, true); 597a4bd5210SJason Evans } 598a4bd5210SJason Evans 599b7eaed25SJason Evans return ret; 600a4bd5210SJason Evans } 601a4bd5210SJason Evans 602d0e79aa3SJason Evans void 603b7eaed25SJason Evans iarena_cleanup(tsd_t *tsd) { 6041f0a49e8SJason Evans arena_t *iarena; 6051f0a49e8SJason Evans 6061f0a49e8SJason Evans iarena = tsd_iarena_get(tsd); 607b7eaed25SJason Evans if (iarena != NULL) { 608b7eaed25SJason Evans arena_unbind(tsd, arena_ind_get(iarena), true); 609b7eaed25SJason Evans } 6101f0a49e8SJason Evans } 6111f0a49e8SJason Evans 6121f0a49e8SJason Evans void 613b7eaed25SJason Evans arena_cleanup(tsd_t *tsd) { 614d0e79aa3SJason Evans arena_t *arena; 615d0e79aa3SJason Evans 616d0e79aa3SJason Evans arena = tsd_arena_get(tsd); 617b7eaed25SJason Evans if (arena != NULL) { 618b7eaed25SJason Evans arena_unbind(tsd, arena_ind_get(arena), false); 619b7eaed25SJason Evans } 620d0e79aa3SJason Evans } 621d0e79aa3SJason Evans 622d0e79aa3SJason Evans void 623b7eaed25SJason Evans arenas_tdata_cleanup(tsd_t *tsd) { 624df0d881dSJason Evans arena_tdata_t *arenas_tdata; 625d0e79aa3SJason Evans 626df0d881dSJason Evans /* Prevent tsd->arenas_tdata from being (re)created. */ 627df0d881dSJason Evans *tsd_arenas_tdata_bypassp_get(tsd) = true; 628df0d881dSJason Evans 629df0d881dSJason Evans arenas_tdata = tsd_arenas_tdata_get(tsd); 630df0d881dSJason Evans if (arenas_tdata != NULL) { 631df0d881dSJason Evans tsd_arenas_tdata_set(tsd, NULL); 632df0d881dSJason Evans a0dalloc(arenas_tdata); 633d0e79aa3SJason Evans } 634536b3538SJason Evans } 635d0e79aa3SJason Evans 636a4bd5210SJason Evans static void 637b7eaed25SJason Evans stats_print_atexit(void) { 638b7eaed25SJason Evans if (config_stats) { 6391f0a49e8SJason Evans tsdn_t *tsdn; 64082872ac0SJason Evans unsigned narenas, i; 641a4bd5210SJason Evans 6421f0a49e8SJason Evans tsdn = tsdn_fetch(); 6431f0a49e8SJason Evans 644a4bd5210SJason Evans /* 645a4bd5210SJason Evans * Merge stats from extant threads. This is racy, since 646a4bd5210SJason Evans * individual threads do not lock when recording tcache stats 647a4bd5210SJason Evans * events. As a consequence, the final stats may be slightly 648a4bd5210SJason Evans * out of date by the time they are reported, if other threads 649a4bd5210SJason Evans * continue to allocate. 650a4bd5210SJason Evans */ 65182872ac0SJason Evans for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 6521f0a49e8SJason Evans arena_t *arena = arena_get(tsdn, i, false); 653a4bd5210SJason Evans if (arena != NULL) { 654a4bd5210SJason Evans tcache_t *tcache; 655a4bd5210SJason Evans 656b7eaed25SJason Evans malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); 657a4bd5210SJason Evans ql_foreach(tcache, &arena->tcache_ql, link) { 6581f0a49e8SJason Evans tcache_stats_merge(tsdn, tcache, arena); 659a4bd5210SJason Evans } 660b7eaed25SJason Evans malloc_mutex_unlock(tsdn, 661b7eaed25SJason Evans &arena->tcache_ql_mtx); 662a4bd5210SJason Evans } 663a4bd5210SJason Evans } 664a4bd5210SJason Evans } 665b7eaed25SJason Evans je_malloc_stats_print(NULL, NULL, opt_stats_print_opts); 666b7eaed25SJason Evans } 667b7eaed25SJason Evans 668b7eaed25SJason Evans /* 669b7eaed25SJason Evans * Ensure that we don't hold any locks upon entry to or exit from allocator 670b7eaed25SJason Evans * code (in a "broad" sense that doesn't count a reentrant allocation as an 671b7eaed25SJason Evans * entrance or exit). 672b7eaed25SJason Evans */ 673b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE void 674b7eaed25SJason Evans check_entry_exit_locking(tsdn_t *tsdn) { 675b7eaed25SJason Evans if (!config_debug) { 676b7eaed25SJason Evans return; 677b7eaed25SJason Evans } 678b7eaed25SJason Evans if (tsdn_null(tsdn)) { 679b7eaed25SJason Evans return; 680b7eaed25SJason Evans } 681b7eaed25SJason Evans tsd_t *tsd = tsdn_tsd(tsdn); 682b7eaed25SJason Evans /* 683b7eaed25SJason Evans * It's possible we hold locks at entry/exit if we're in a nested 684b7eaed25SJason Evans * allocation. 685b7eaed25SJason Evans */ 686b7eaed25SJason Evans int8_t reentrancy_level = tsd_reentrancy_level_get(tsd); 687b7eaed25SJason Evans if (reentrancy_level != 0) { 688b7eaed25SJason Evans return; 689b7eaed25SJason Evans } 690b7eaed25SJason Evans witness_assert_lockless(tsdn_witness_tsdp_get(tsdn)); 691a4bd5210SJason Evans } 692a4bd5210SJason Evans 693a4bd5210SJason Evans /* 694a4bd5210SJason Evans * End miscellaneous support functions. 695a4bd5210SJason Evans */ 696a4bd5210SJason Evans /******************************************************************************/ 697a4bd5210SJason Evans /* 698a4bd5210SJason Evans * Begin initialization functions. 699a4bd5210SJason Evans */ 700a4bd5210SJason Evans 701d0e79aa3SJason Evans static char * 702b7eaed25SJason Evans jemalloc_secure_getenv(const char *name) { 7038244f2aaSJason Evans #ifdef JEMALLOC_HAVE_SECURE_GETENV 7048244f2aaSJason Evans return secure_getenv(name); 7058244f2aaSJason Evans #else 706d0e79aa3SJason Evans # ifdef JEMALLOC_HAVE_ISSETUGID 707b7eaed25SJason Evans if (issetugid() != 0) { 708b7eaed25SJason Evans return NULL; 709b7eaed25SJason Evans } 710d0e79aa3SJason Evans # endif 711b7eaed25SJason Evans return getenv(name); 712d0e79aa3SJason Evans #endif 7138244f2aaSJason Evans } 714d0e79aa3SJason Evans 715a4bd5210SJason Evans static unsigned 716b7eaed25SJason Evans malloc_ncpus(void) { 717a4bd5210SJason Evans long result; 718a4bd5210SJason Evans 719e722f8f8SJason Evans #ifdef _WIN32 720e722f8f8SJason Evans SYSTEM_INFO si; 721e722f8f8SJason Evans GetSystemInfo(&si); 722e722f8f8SJason Evans result = si.dwNumberOfProcessors; 723bde95144SJason Evans #elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT) 724bde95144SJason Evans /* 725bde95144SJason Evans * glibc >= 2.6 has the CPU_COUNT macro. 726bde95144SJason Evans * 727bde95144SJason Evans * glibc's sysconf() uses isspace(). glibc allocates for the first time 728bde95144SJason Evans * *before* setting up the isspace tables. Therefore we need a 729bde95144SJason Evans * different method to get the number of CPUs. 730bde95144SJason Evans */ 731bde95144SJason Evans { 732bde95144SJason Evans cpu_set_t set; 733bde95144SJason Evans 734bde95144SJason Evans pthread_getaffinity_np(pthread_self(), sizeof(set), &set); 735bde95144SJason Evans result = CPU_COUNT(&set); 736bde95144SJason Evans } 737e722f8f8SJason Evans #else 738a4bd5210SJason Evans result = sysconf(_SC_NPROCESSORS_ONLN); 73982872ac0SJason Evans #endif 740f921d10fSJason Evans return ((result == -1) ? 1 : (unsigned)result); 741a4bd5210SJason Evans } 742a4bd5210SJason Evans 743b7eaed25SJason Evans static void 744b7eaed25SJason Evans init_opt_stats_print_opts(const char *v, size_t vlen) { 745b7eaed25SJason Evans size_t opts_len = strlen(opt_stats_print_opts); 746b7eaed25SJason Evans assert(opts_len <= stats_print_tot_num_options); 747b7eaed25SJason Evans 748b7eaed25SJason Evans for (size_t i = 0; i < vlen; i++) { 749b7eaed25SJason Evans switch (v[i]) { 750b7eaed25SJason Evans #define OPTION(o, v, d, s) case o: break; 751b7eaed25SJason Evans STATS_PRINT_OPTIONS 752b7eaed25SJason Evans #undef OPTION 753b7eaed25SJason Evans default: continue; 754b7eaed25SJason Evans } 755b7eaed25SJason Evans 756b7eaed25SJason Evans if (strchr(opt_stats_print_opts, v[i]) != NULL) { 757b7eaed25SJason Evans /* Ignore repeated. */ 758b7eaed25SJason Evans continue; 759b7eaed25SJason Evans } 760b7eaed25SJason Evans 761b7eaed25SJason Evans opt_stats_print_opts[opts_len++] = v[i]; 762b7eaed25SJason Evans opt_stats_print_opts[opts_len] = '\0'; 763b7eaed25SJason Evans assert(opts_len <= stats_print_tot_num_options); 764b7eaed25SJason Evans } 765b7eaed25SJason Evans assert(opts_len == strlen(opt_stats_print_opts)); 766b7eaed25SJason Evans } 767b7eaed25SJason Evans 768a4bd5210SJason Evans static bool 769a4bd5210SJason Evans malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, 770b7eaed25SJason Evans char const **v_p, size_t *vlen_p) { 771a4bd5210SJason Evans bool accept; 772a4bd5210SJason Evans const char *opts = *opts_p; 773a4bd5210SJason Evans 774a4bd5210SJason Evans *k_p = opts; 775a4bd5210SJason Evans 776d0e79aa3SJason Evans for (accept = false; !accept;) { 777a4bd5210SJason Evans switch (*opts) { 778a4bd5210SJason Evans case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': 779a4bd5210SJason Evans case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': 780a4bd5210SJason Evans case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': 781a4bd5210SJason Evans case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': 782a4bd5210SJason Evans case 'Y': case 'Z': 783a4bd5210SJason Evans case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': 784a4bd5210SJason Evans case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': 785a4bd5210SJason Evans case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': 786a4bd5210SJason Evans case 's': case 't': case 'u': case 'v': case 'w': case 'x': 787a4bd5210SJason Evans case 'y': case 'z': 788a4bd5210SJason Evans case '0': case '1': case '2': case '3': case '4': case '5': 789a4bd5210SJason Evans case '6': case '7': case '8': case '9': 790a4bd5210SJason Evans case '_': 791a4bd5210SJason Evans opts++; 792a4bd5210SJason Evans break; 793a4bd5210SJason Evans case ':': 794a4bd5210SJason Evans opts++; 795a4bd5210SJason Evans *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; 796a4bd5210SJason Evans *v_p = opts; 797a4bd5210SJason Evans accept = true; 798a4bd5210SJason Evans break; 799a4bd5210SJason Evans case '\0': 800a4bd5210SJason Evans if (opts != *opts_p) { 801a4bd5210SJason Evans malloc_write("<jemalloc>: Conf string ends " 802a4bd5210SJason Evans "with key\n"); 803a4bd5210SJason Evans } 804b7eaed25SJason Evans return true; 805a4bd5210SJason Evans default: 806a4bd5210SJason Evans malloc_write("<jemalloc>: Malformed conf string\n"); 807b7eaed25SJason Evans return true; 808a4bd5210SJason Evans } 809a4bd5210SJason Evans } 810a4bd5210SJason Evans 811d0e79aa3SJason Evans for (accept = false; !accept;) { 812a4bd5210SJason Evans switch (*opts) { 813a4bd5210SJason Evans case ',': 814a4bd5210SJason Evans opts++; 815a4bd5210SJason Evans /* 816a4bd5210SJason Evans * Look ahead one character here, because the next time 817a4bd5210SJason Evans * this function is called, it will assume that end of 818a4bd5210SJason Evans * input has been cleanly reached if no input remains, 819a4bd5210SJason Evans * but we have optimistically already consumed the 820a4bd5210SJason Evans * comma if one exists. 821a4bd5210SJason Evans */ 822a4bd5210SJason Evans if (*opts == '\0') { 823a4bd5210SJason Evans malloc_write("<jemalloc>: Conf string ends " 824a4bd5210SJason Evans "with comma\n"); 825a4bd5210SJason Evans } 826a4bd5210SJason Evans *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; 827a4bd5210SJason Evans accept = true; 828a4bd5210SJason Evans break; 829a4bd5210SJason Evans case '\0': 830a4bd5210SJason Evans *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; 831a4bd5210SJason Evans accept = true; 832a4bd5210SJason Evans break; 833a4bd5210SJason Evans default: 834a4bd5210SJason Evans opts++; 835a4bd5210SJason Evans break; 836a4bd5210SJason Evans } 837a4bd5210SJason Evans } 838a4bd5210SJason Evans 839a4bd5210SJason Evans *opts_p = opts; 840b7eaed25SJason Evans return false; 841b7eaed25SJason Evans } 842b7eaed25SJason Evans 843b7eaed25SJason Evans static void 844b7eaed25SJason Evans malloc_abort_invalid_conf(void) { 845b7eaed25SJason Evans assert(opt_abort_conf); 846b7eaed25SJason Evans malloc_printf("<jemalloc>: Abort (abort_conf:true) on invalid conf " 847b7eaed25SJason Evans "value (see above).\n"); 848b7eaed25SJason Evans abort(); 849a4bd5210SJason Evans } 850a4bd5210SJason Evans 851a4bd5210SJason Evans static void 852a4bd5210SJason Evans malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, 853b7eaed25SJason Evans size_t vlen) { 854a4bd5210SJason Evans malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k, 855a4bd5210SJason Evans (int)vlen, v); 856*0ef50b4eSJason Evans /* If abort_conf is set, error out after processing all options. */ 857b7eaed25SJason Evans had_conf_error = true; 858a4bd5210SJason Evans } 859a4bd5210SJason Evans 860a4bd5210SJason Evans static void 861b7eaed25SJason Evans malloc_slow_flag_init(void) { 862df0d881dSJason Evans /* 863df0d881dSJason Evans * Combine the runtime options into malloc_slow for fast path. Called 864df0d881dSJason Evans * after processing all the options. 865df0d881dSJason Evans */ 866df0d881dSJason Evans malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0) 867df0d881dSJason Evans | (opt_junk_free ? flag_opt_junk_free : 0) 868df0d881dSJason Evans | (opt_zero ? flag_opt_zero : 0) 869df0d881dSJason Evans | (opt_utrace ? flag_opt_utrace : 0) 870df0d881dSJason Evans | (opt_xmalloc ? flag_opt_xmalloc : 0); 871df0d881dSJason Evans 872df0d881dSJason Evans malloc_slow = (malloc_slow_flags != 0); 873df0d881dSJason Evans } 874df0d881dSJason Evans 875df0d881dSJason Evans static void 876b7eaed25SJason Evans malloc_conf_init(void) { 877a4bd5210SJason Evans unsigned i; 878a4bd5210SJason Evans char buf[PATH_MAX + 1]; 879a4bd5210SJason Evans const char *opts, *k, *v; 880a4bd5210SJason Evans size_t klen, vlen; 881a4bd5210SJason Evans 882df0d881dSJason Evans for (i = 0; i < 4; i++) { 883a4bd5210SJason Evans /* Get runtime configuration. */ 884a4bd5210SJason Evans switch (i) { 885a4bd5210SJason Evans case 0: 886df0d881dSJason Evans opts = config_malloc_conf; 887df0d881dSJason Evans break; 888df0d881dSJason Evans case 1: 889a4bd5210SJason Evans if (je_malloc_conf != NULL) { 890a4bd5210SJason Evans /* 891a4bd5210SJason Evans * Use options that were compiled into the 892a4bd5210SJason Evans * program. 893a4bd5210SJason Evans */ 894a4bd5210SJason Evans opts = je_malloc_conf; 895a4bd5210SJason Evans } else { 896a4bd5210SJason Evans /* No configuration specified. */ 897a4bd5210SJason Evans buf[0] = '\0'; 898a4bd5210SJason Evans opts = buf; 899a4bd5210SJason Evans } 900a4bd5210SJason Evans break; 901df0d881dSJason Evans case 2: { 902df0d881dSJason Evans ssize_t linklen = 0; 903e722f8f8SJason Evans #ifndef _WIN32 9042b06b201SJason Evans int saved_errno = errno; 905a4bd5210SJason Evans const char *linkname = 906a4bd5210SJason Evans # ifdef JEMALLOC_PREFIX 907a4bd5210SJason Evans "/etc/"JEMALLOC_PREFIX"malloc.conf" 908a4bd5210SJason Evans # else 909a4bd5210SJason Evans "/etc/malloc.conf" 910a4bd5210SJason Evans # endif 911a4bd5210SJason Evans ; 912a4bd5210SJason Evans 913a4bd5210SJason Evans /* 9142b06b201SJason Evans * Try to use the contents of the "/etc/malloc.conf" 915a4bd5210SJason Evans * symbolic link's name. 916a4bd5210SJason Evans */ 9172b06b201SJason Evans linklen = readlink(linkname, buf, sizeof(buf) - 1); 9182b06b201SJason Evans if (linklen == -1) { 9192b06b201SJason Evans /* No configuration specified. */ 9202b06b201SJason Evans linklen = 0; 921d0e79aa3SJason Evans /* Restore errno. */ 9222b06b201SJason Evans set_errno(saved_errno); 9232b06b201SJason Evans } 9242b06b201SJason Evans #endif 925a4bd5210SJason Evans buf[linklen] = '\0'; 926a4bd5210SJason Evans opts = buf; 927a4bd5210SJason Evans break; 928df0d881dSJason Evans } case 3: { 929a4bd5210SJason Evans const char *envname = 930a4bd5210SJason Evans #ifdef JEMALLOC_PREFIX 931a4bd5210SJason Evans JEMALLOC_CPREFIX"MALLOC_CONF" 932a4bd5210SJason Evans #else 933a4bd5210SJason Evans "MALLOC_CONF" 934a4bd5210SJason Evans #endif 935a4bd5210SJason Evans ; 936a4bd5210SJason Evans 9378244f2aaSJason Evans if ((opts = jemalloc_secure_getenv(envname)) != NULL) { 938a4bd5210SJason Evans /* 939a4bd5210SJason Evans * Do nothing; opts is already initialized to 940a4bd5210SJason Evans * the value of the MALLOC_CONF environment 941a4bd5210SJason Evans * variable. 942a4bd5210SJason Evans */ 943a4bd5210SJason Evans } else { 944a4bd5210SJason Evans /* No configuration specified. */ 945a4bd5210SJason Evans buf[0] = '\0'; 946a4bd5210SJason Evans opts = buf; 947a4bd5210SJason Evans } 948a4bd5210SJason Evans break; 949a4bd5210SJason Evans } default: 950f921d10fSJason Evans not_reached(); 951a4bd5210SJason Evans buf[0] = '\0'; 952a4bd5210SJason Evans opts = buf; 953a4bd5210SJason Evans } 954a4bd5210SJason Evans 955d0e79aa3SJason Evans while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v, 956d0e79aa3SJason Evans &vlen)) { 957d0e79aa3SJason Evans #define CONF_MATCH(n) \ 958d0e79aa3SJason Evans (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0) 959d0e79aa3SJason Evans #define CONF_MATCH_VALUE(n) \ 960d0e79aa3SJason Evans (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0) 961b7eaed25SJason Evans #define CONF_HANDLE_BOOL(o, n) \ 962d0e79aa3SJason Evans if (CONF_MATCH(n)) { \ 963b7eaed25SJason Evans if (CONF_MATCH_VALUE("true")) { \ 964a4bd5210SJason Evans o = true; \ 965b7eaed25SJason Evans } else if (CONF_MATCH_VALUE("false")) { \ 966a4bd5210SJason Evans o = false; \ 967b7eaed25SJason Evans } else { \ 968a4bd5210SJason Evans malloc_conf_error( \ 969a4bd5210SJason Evans "Invalid conf value", \ 970a4bd5210SJason Evans k, klen, v, vlen); \ 971a4bd5210SJason Evans } \ 972a4bd5210SJason Evans continue; \ 973a4bd5210SJason Evans } 9747fa7f12fSJason Evans #define CONF_MIN_no(um, min) false 9757fa7f12fSJason Evans #define CONF_MIN_yes(um, min) ((um) < (min)) 9767fa7f12fSJason Evans #define CONF_MAX_no(um, max) false 9777fa7f12fSJason Evans #define CONF_MAX_yes(um, max) ((um) > (max)) 9787fa7f12fSJason Evans #define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \ 979d0e79aa3SJason Evans if (CONF_MATCH(n)) { \ 980a4bd5210SJason Evans uintmax_t um; \ 981a4bd5210SJason Evans char *end; \ 982a4bd5210SJason Evans \ 983e722f8f8SJason Evans set_errno(0); \ 984a4bd5210SJason Evans um = malloc_strtoumax(v, &end, 0); \ 985e722f8f8SJason Evans if (get_errno() != 0 || (uintptr_t)end -\ 986a4bd5210SJason Evans (uintptr_t)v != vlen) { \ 987a4bd5210SJason Evans malloc_conf_error( \ 988a4bd5210SJason Evans "Invalid conf value", \ 989a4bd5210SJason Evans k, klen, v, vlen); \ 99088ad2f8dSJason Evans } else if (clip) { \ 9917fa7f12fSJason Evans if (CONF_MIN_##check_min(um, \ 992b7eaed25SJason Evans (t)(min))) { \ 993df0d881dSJason Evans o = (t)(min); \ 994b7eaed25SJason Evans } else if ( \ 995b7eaed25SJason Evans CONF_MAX_##check_max(um, \ 996b7eaed25SJason Evans (t)(max))) { \ 997df0d881dSJason Evans o = (t)(max); \ 998b7eaed25SJason Evans } else { \ 999df0d881dSJason Evans o = (t)um; \ 1000b7eaed25SJason Evans } \ 100188ad2f8dSJason Evans } else { \ 10027fa7f12fSJason Evans if (CONF_MIN_##check_min(um, \ 10038244f2aaSJason Evans (t)(min)) || \ 10047fa7f12fSJason Evans CONF_MAX_##check_max(um, \ 10058244f2aaSJason Evans (t)(max))) { \ 1006a4bd5210SJason Evans malloc_conf_error( \ 100788ad2f8dSJason Evans "Out-of-range " \ 100888ad2f8dSJason Evans "conf value", \ 1009a4bd5210SJason Evans k, klen, v, vlen); \ 1010b7eaed25SJason Evans } else { \ 1011df0d881dSJason Evans o = (t)um; \ 101288ad2f8dSJason Evans } \ 1013b7eaed25SJason Evans } \ 1014a4bd5210SJason Evans continue; \ 1015a4bd5210SJason Evans } 10167fa7f12fSJason Evans #define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \ 10177fa7f12fSJason Evans clip) \ 10187fa7f12fSJason Evans CONF_HANDLE_T_U(unsigned, o, n, min, max, \ 10197fa7f12fSJason Evans check_min, check_max, clip) 10207fa7f12fSJason Evans #define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \ 10217fa7f12fSJason Evans CONF_HANDLE_T_U(size_t, o, n, min, max, \ 10227fa7f12fSJason Evans check_min, check_max, clip) 1023a4bd5210SJason Evans #define CONF_HANDLE_SSIZE_T(o, n, min, max) \ 1024d0e79aa3SJason Evans if (CONF_MATCH(n)) { \ 1025a4bd5210SJason Evans long l; \ 1026a4bd5210SJason Evans char *end; \ 1027a4bd5210SJason Evans \ 1028e722f8f8SJason Evans set_errno(0); \ 1029a4bd5210SJason Evans l = strtol(v, &end, 0); \ 1030e722f8f8SJason Evans if (get_errno() != 0 || (uintptr_t)end -\ 1031a4bd5210SJason Evans (uintptr_t)v != vlen) { \ 1032a4bd5210SJason Evans malloc_conf_error( \ 1033a4bd5210SJason Evans "Invalid conf value", \ 1034a4bd5210SJason Evans k, klen, v, vlen); \ 1035d0e79aa3SJason Evans } else if (l < (ssize_t)(min) || l > \ 1036d0e79aa3SJason Evans (ssize_t)(max)) { \ 1037a4bd5210SJason Evans malloc_conf_error( \ 1038a4bd5210SJason Evans "Out-of-range conf value", \ 1039a4bd5210SJason Evans k, klen, v, vlen); \ 1040b7eaed25SJason Evans } else { \ 1041a4bd5210SJason Evans o = l; \ 1042b7eaed25SJason Evans } \ 1043a4bd5210SJason Evans continue; \ 1044a4bd5210SJason Evans } 1045a4bd5210SJason Evans #define CONF_HANDLE_CHAR_P(o, n, d) \ 1046d0e79aa3SJason Evans if (CONF_MATCH(n)) { \ 1047a4bd5210SJason Evans size_t cpylen = (vlen <= \ 1048a4bd5210SJason Evans sizeof(o)-1) ? vlen : \ 1049a4bd5210SJason Evans sizeof(o)-1; \ 1050a4bd5210SJason Evans strncpy(o, v, cpylen); \ 1051a4bd5210SJason Evans o[cpylen] = '\0'; \ 1052a4bd5210SJason Evans continue; \ 1053a4bd5210SJason Evans } 1054a4bd5210SJason Evans 1055b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_abort, "abort") 1056b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf") 1057*0ef50b4eSJason Evans if (strncmp("metadata_thp", k, klen) == 0) { 1058*0ef50b4eSJason Evans int i; 1059*0ef50b4eSJason Evans bool match = false; 1060*0ef50b4eSJason Evans for (i = 0; i < metadata_thp_mode_limit; i++) { 1061*0ef50b4eSJason Evans if (strncmp(metadata_thp_mode_names[i], 1062*0ef50b4eSJason Evans v, vlen) == 0) { 1063*0ef50b4eSJason Evans opt_metadata_thp = i; 1064*0ef50b4eSJason Evans match = true; 1065*0ef50b4eSJason Evans break; 1066*0ef50b4eSJason Evans } 1067*0ef50b4eSJason Evans } 1068*0ef50b4eSJason Evans if (!match) { 1069*0ef50b4eSJason Evans malloc_conf_error("Invalid conf value", 1070*0ef50b4eSJason Evans k, klen, v, vlen); 1071*0ef50b4eSJason Evans } 1072*0ef50b4eSJason Evans continue; 1073b7eaed25SJason Evans } 1074b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_retain, "retain") 107582872ac0SJason Evans if (strncmp("dss", k, klen) == 0) { 107682872ac0SJason Evans int i; 107782872ac0SJason Evans bool match = false; 107882872ac0SJason Evans for (i = 0; i < dss_prec_limit; i++) { 107982872ac0SJason Evans if (strncmp(dss_prec_names[i], v, vlen) 108082872ac0SJason Evans == 0) { 1081b7eaed25SJason Evans if (extent_dss_prec_set(i)) { 108282872ac0SJason Evans malloc_conf_error( 108382872ac0SJason Evans "Error setting dss", 108482872ac0SJason Evans k, klen, v, vlen); 108582872ac0SJason Evans } else { 108682872ac0SJason Evans opt_dss = 108782872ac0SJason Evans dss_prec_names[i]; 108882872ac0SJason Evans match = true; 108982872ac0SJason Evans break; 109082872ac0SJason Evans } 109182872ac0SJason Evans } 109282872ac0SJason Evans } 1093d0e79aa3SJason Evans if (!match) { 109482872ac0SJason Evans malloc_conf_error("Invalid conf value", 109582872ac0SJason Evans k, klen, v, vlen); 109682872ac0SJason Evans } 109782872ac0SJason Evans continue; 109882872ac0SJason Evans } 1099df0d881dSJason Evans CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1, 11007fa7f12fSJason Evans UINT_MAX, yes, no, false) 1101b7eaed25SJason Evans CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms, 1102b7eaed25SJason Evans "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < 1103b7eaed25SJason Evans QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : 1104b7eaed25SJason Evans SSIZE_MAX); 1105b7eaed25SJason Evans CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms, 1106b7eaed25SJason Evans "muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < 1107b7eaed25SJason Evans QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : 1108b7eaed25SJason Evans SSIZE_MAX); 1109b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_stats_print, "stats_print") 1110b7eaed25SJason Evans if (CONF_MATCH("stats_print_opts")) { 1111b7eaed25SJason Evans init_opt_stats_print_opts(v, vlen); 1112b7eaed25SJason Evans continue; 1113b7eaed25SJason Evans } 1114b7eaed25SJason Evans if (config_fill) { 1115b7eaed25SJason Evans if (CONF_MATCH("junk")) { 1116b7eaed25SJason Evans if (CONF_MATCH_VALUE("true")) { 1117b7eaed25SJason Evans opt_junk = "true"; 1118b7eaed25SJason Evans opt_junk_alloc = opt_junk_free = 1119b7eaed25SJason Evans true; 1120b7eaed25SJason Evans } else if (CONF_MATCH_VALUE("false")) { 1121b7eaed25SJason Evans opt_junk = "false"; 1122b7eaed25SJason Evans opt_junk_alloc = opt_junk_free = 1123b7eaed25SJason Evans false; 1124b7eaed25SJason Evans } else if (CONF_MATCH_VALUE("alloc")) { 1125b7eaed25SJason Evans opt_junk = "alloc"; 1126b7eaed25SJason Evans opt_junk_alloc = true; 1127b7eaed25SJason Evans opt_junk_free = false; 1128b7eaed25SJason Evans } else if (CONF_MATCH_VALUE("free")) { 1129b7eaed25SJason Evans opt_junk = "free"; 1130b7eaed25SJason Evans opt_junk_alloc = false; 1131b7eaed25SJason Evans opt_junk_free = true; 1132b7eaed25SJason Evans } else { 1133b7eaed25SJason Evans malloc_conf_error( 1134b7eaed25SJason Evans "Invalid conf value", k, 1135b7eaed25SJason Evans klen, v, vlen); 1136b7eaed25SJason Evans } 1137b7eaed25SJason Evans continue; 1138b7eaed25SJason Evans } 1139b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_zero, "zero") 1140b7eaed25SJason Evans } 1141b7eaed25SJason Evans if (config_utrace) { 1142b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_utrace, "utrace") 1143b7eaed25SJason Evans } 1144b7eaed25SJason Evans if (config_xmalloc) { 1145b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc") 1146b7eaed25SJason Evans } 1147b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_tcache, "tcache") 1148*0ef50b4eSJason Evans CONF_HANDLE_SIZE_T(opt_lg_extent_max_active_fit, 1149*0ef50b4eSJason Evans "lg_extent_max_active_fit", 0, 1150*0ef50b4eSJason Evans (sizeof(size_t) << 3), yes, yes, false) 1151b7eaed25SJason Evans CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max", 1152b7eaed25SJason Evans -1, (sizeof(size_t) << 3) - 1) 1153b7eaed25SJason Evans if (strncmp("percpu_arena", k, klen) == 0) { 1154df0d881dSJason Evans bool match = false; 1155*0ef50b4eSJason Evans for (int i = percpu_arena_mode_names_base; i < 1156b7eaed25SJason Evans percpu_arena_mode_names_limit; i++) { 1157b7eaed25SJason Evans if (strncmp(percpu_arena_mode_names[i], 1158b7eaed25SJason Evans v, vlen) == 0) { 1159b7eaed25SJason Evans if (!have_percpu_arena) { 1160b7eaed25SJason Evans malloc_conf_error( 1161b7eaed25SJason Evans "No getcpu support", 1162b7eaed25SJason Evans k, klen, v, vlen); 1163b7eaed25SJason Evans } 1164b7eaed25SJason Evans opt_percpu_arena = i; 1165df0d881dSJason Evans match = true; 1166df0d881dSJason Evans break; 1167df0d881dSJason Evans } 1168df0d881dSJason Evans } 1169df0d881dSJason Evans if (!match) { 1170df0d881dSJason Evans malloc_conf_error("Invalid conf value", 1171df0d881dSJason Evans k, klen, v, vlen); 1172df0d881dSJason Evans } 1173df0d881dSJason Evans continue; 1174df0d881dSJason Evans } 1175b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_background_thread, 1176b7eaed25SJason Evans "background_thread"); 1177*0ef50b4eSJason Evans CONF_HANDLE_SIZE_T(opt_max_background_threads, 1178*0ef50b4eSJason Evans "max_background_threads", 1, 1179*0ef50b4eSJason Evans opt_max_background_threads, yes, yes, 1180*0ef50b4eSJason Evans true); 1181a4bd5210SJason Evans if (config_prof) { 1182b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_prof, "prof") 11838ed34ab0SJason Evans CONF_HANDLE_CHAR_P(opt_prof_prefix, 11848ed34ab0SJason Evans "prof_prefix", "jeprof") 1185b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_prof_active, "prof_active") 1186d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_prof_thread_active_init, 1187b7eaed25SJason Evans "prof_thread_active_init") 1188d0e79aa3SJason Evans CONF_HANDLE_SIZE_T(opt_lg_prof_sample, 11897fa7f12fSJason Evans "lg_prof_sample", 0, (sizeof(uint64_t) << 3) 11907fa7f12fSJason Evans - 1, no, yes, true) 1191b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum") 1192a4bd5210SJason Evans CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, 11938ed34ab0SJason Evans "lg_prof_interval", -1, 1194a4bd5210SJason Evans (sizeof(uint64_t) << 3) - 1) 1195b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump") 1196b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_prof_final, "prof_final") 1197b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak") 1198a4bd5210SJason Evans } 1199*0ef50b4eSJason Evans if (config_log) { 1200*0ef50b4eSJason Evans if (CONF_MATCH("log")) { 1201*0ef50b4eSJason Evans size_t cpylen = ( 1202*0ef50b4eSJason Evans vlen <= sizeof(log_var_names) ? 1203*0ef50b4eSJason Evans vlen : sizeof(log_var_names) - 1); 1204*0ef50b4eSJason Evans strncpy(log_var_names, v, cpylen); 1205*0ef50b4eSJason Evans log_var_names[cpylen] = '\0'; 1206*0ef50b4eSJason Evans continue; 1207*0ef50b4eSJason Evans } 1208*0ef50b4eSJason Evans } 1209*0ef50b4eSJason Evans if (CONF_MATCH("thp")) { 1210*0ef50b4eSJason Evans bool match = false; 1211*0ef50b4eSJason Evans for (int i = 0; i < thp_mode_names_limit; i++) { 1212*0ef50b4eSJason Evans if (strncmp(thp_mode_names[i],v, vlen) 1213*0ef50b4eSJason Evans == 0) { 1214*0ef50b4eSJason Evans if (!have_madvise_huge) { 1215*0ef50b4eSJason Evans malloc_conf_error( 1216*0ef50b4eSJason Evans "No THP support", 1217*0ef50b4eSJason Evans k, klen, v, vlen); 1218*0ef50b4eSJason Evans } 1219*0ef50b4eSJason Evans opt_thp = i; 1220*0ef50b4eSJason Evans match = true; 1221*0ef50b4eSJason Evans break; 1222*0ef50b4eSJason Evans } 1223*0ef50b4eSJason Evans } 1224*0ef50b4eSJason Evans if (!match) { 1225*0ef50b4eSJason Evans malloc_conf_error("Invalid conf value", 1226*0ef50b4eSJason Evans k, klen, v, vlen); 1227*0ef50b4eSJason Evans } 1228*0ef50b4eSJason Evans continue; 1229*0ef50b4eSJason Evans } 1230a4bd5210SJason Evans malloc_conf_error("Invalid conf pair", k, klen, v, 1231a4bd5210SJason Evans vlen); 1232d0e79aa3SJason Evans #undef CONF_MATCH 12337fa7f12fSJason Evans #undef CONF_MATCH_VALUE 1234a4bd5210SJason Evans #undef CONF_HANDLE_BOOL 12357fa7f12fSJason Evans #undef CONF_MIN_no 12367fa7f12fSJason Evans #undef CONF_MIN_yes 12377fa7f12fSJason Evans #undef CONF_MAX_no 12387fa7f12fSJason Evans #undef CONF_MAX_yes 12397fa7f12fSJason Evans #undef CONF_HANDLE_T_U 12407fa7f12fSJason Evans #undef CONF_HANDLE_UNSIGNED 1241a4bd5210SJason Evans #undef CONF_HANDLE_SIZE_T 1242a4bd5210SJason Evans #undef CONF_HANDLE_SSIZE_T 1243a4bd5210SJason Evans #undef CONF_HANDLE_CHAR_P 1244a4bd5210SJason Evans } 1245*0ef50b4eSJason Evans if (opt_abort_conf && had_conf_error) { 1246*0ef50b4eSJason Evans malloc_abort_invalid_conf(); 1247a4bd5210SJason Evans } 1248a4bd5210SJason Evans } 1249*0ef50b4eSJason Evans atomic_store_b(&log_init_done, true, ATOMIC_RELEASE); 1250*0ef50b4eSJason Evans } 1251a4bd5210SJason Evans 1252a4bd5210SJason Evans static bool 1253b7eaed25SJason Evans malloc_init_hard_needed(void) { 1254d0e79aa3SJason Evans if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state == 1255d0e79aa3SJason Evans malloc_init_recursible)) { 1256a4bd5210SJason Evans /* 1257a4bd5210SJason Evans * Another thread initialized the allocator before this one 1258a4bd5210SJason Evans * acquired init_lock, or this thread is the initializing 1259a4bd5210SJason Evans * thread, and it is recursively allocating. 1260a4bd5210SJason Evans */ 1261b7eaed25SJason Evans return false; 1262a4bd5210SJason Evans } 1263a4bd5210SJason Evans #ifdef JEMALLOC_THREADED_INIT 1264d0e79aa3SJason Evans if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) { 1265a4bd5210SJason Evans /* Busy-wait until the initializing thread completes. */ 1266b7eaed25SJason Evans spin_t spinner = SPIN_INITIALIZER; 1267a4bd5210SJason Evans do { 1268bde95144SJason Evans malloc_mutex_unlock(TSDN_NULL, &init_lock); 1269bde95144SJason Evans spin_adaptive(&spinner); 1270bde95144SJason Evans malloc_mutex_lock(TSDN_NULL, &init_lock); 1271d0e79aa3SJason Evans } while (!malloc_initialized()); 1272b7eaed25SJason Evans return false; 1273a4bd5210SJason Evans } 1274a4bd5210SJason Evans #endif 1275b7eaed25SJason Evans return true; 1276d0e79aa3SJason Evans } 1277d0e79aa3SJason Evans 1278d0e79aa3SJason Evans static bool 1279b7eaed25SJason Evans malloc_init_hard_a0_locked() { 1280a4bd5210SJason Evans malloc_initializer = INITIALIZER; 1281a4bd5210SJason Evans 1282b7eaed25SJason Evans if (config_prof) { 1283a4bd5210SJason Evans prof_boot0(); 1284b7eaed25SJason Evans } 1285a4bd5210SJason Evans malloc_conf_init(); 1286a4bd5210SJason Evans if (opt_stats_print) { 1287a4bd5210SJason Evans /* Print statistics at exit. */ 1288a4bd5210SJason Evans if (atexit(stats_print_atexit) != 0) { 1289a4bd5210SJason Evans malloc_write("<jemalloc>: Error in atexit()\n"); 1290b7eaed25SJason Evans if (opt_abort) { 1291a4bd5210SJason Evans abort(); 1292a4bd5210SJason Evans } 1293a4bd5210SJason Evans } 1294b7eaed25SJason Evans } 1295b7eaed25SJason Evans if (pages_boot()) { 1296b7eaed25SJason Evans return true; 1297b7eaed25SJason Evans } 1298b7eaed25SJason Evans if (base_boot(TSDN_NULL)) { 1299b7eaed25SJason Evans return true; 1300b7eaed25SJason Evans } 1301b7eaed25SJason Evans if (extent_boot()) { 1302b7eaed25SJason Evans return true; 1303b7eaed25SJason Evans } 1304b7eaed25SJason Evans if (ctl_boot()) { 1305b7eaed25SJason Evans return true; 1306b7eaed25SJason Evans } 1307b7eaed25SJason Evans if (config_prof) { 1308a4bd5210SJason Evans prof_boot1(); 1309b7eaed25SJason Evans } 1310bde95144SJason Evans arena_boot(); 1311b7eaed25SJason Evans if (tcache_boot(TSDN_NULL)) { 1312b7eaed25SJason Evans return true; 1313b7eaed25SJason Evans } 1314b7eaed25SJason Evans if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS, 1315b7eaed25SJason Evans malloc_mutex_rank_exclusive)) { 1316b7eaed25SJason Evans return true; 1317b7eaed25SJason Evans } 1318a4bd5210SJason Evans /* 1319a4bd5210SJason Evans * Create enough scaffolding to allow recursive allocation in 1320a4bd5210SJason Evans * malloc_ncpus(). 1321a4bd5210SJason Evans */ 1322df0d881dSJason Evans narenas_auto = 1; 132382872ac0SJason Evans memset(arenas, 0, sizeof(arena_t *) * narenas_auto); 1324a4bd5210SJason Evans /* 1325a4bd5210SJason Evans * Initialize one arena here. The rest are lazily created in 1326d0e79aa3SJason Evans * arena_choose_hard(). 1327a4bd5210SJason Evans */ 1328b7eaed25SJason Evans if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default) 1329b7eaed25SJason Evans == NULL) { 1330b7eaed25SJason Evans return true; 1331b7eaed25SJason Evans } 1332b7eaed25SJason Evans a0 = arena_get(TSDN_NULL, 0, false); 1333d0e79aa3SJason Evans malloc_init_state = malloc_init_a0_initialized; 13341f0a49e8SJason Evans 1335b7eaed25SJason Evans return false; 1336a4bd5210SJason Evans } 1337a4bd5210SJason Evans 1338d0e79aa3SJason Evans static bool 1339b7eaed25SJason Evans malloc_init_hard_a0(void) { 1340d0e79aa3SJason Evans bool ret; 1341d0e79aa3SJason Evans 13421f0a49e8SJason Evans malloc_mutex_lock(TSDN_NULL, &init_lock); 1343d0e79aa3SJason Evans ret = malloc_init_hard_a0_locked(); 13441f0a49e8SJason Evans malloc_mutex_unlock(TSDN_NULL, &init_lock); 1345b7eaed25SJason Evans return ret; 1346a4bd5210SJason Evans } 1347a4bd5210SJason Evans 13481f0a49e8SJason Evans /* Initialize data structures which may trigger recursive allocation. */ 1349df0d881dSJason Evans static bool 1350b7eaed25SJason Evans malloc_init_hard_recursible(void) { 1351d0e79aa3SJason Evans malloc_init_state = malloc_init_recursible; 1352df0d881dSJason Evans 1353a4bd5210SJason Evans ncpus = malloc_ncpus(); 1354f921d10fSJason Evans 13557fa7f12fSJason Evans #if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \ 13567fa7f12fSJason Evans && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \ 13577fa7f12fSJason Evans !defined(__native_client__)) 1358df0d881dSJason Evans /* LinuxThreads' pthread_atfork() allocates. */ 1359f921d10fSJason Evans if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, 1360f921d10fSJason Evans jemalloc_postfork_child) != 0) { 1361f921d10fSJason Evans malloc_write("<jemalloc>: Error in pthread_atfork()\n"); 1362b7eaed25SJason Evans if (opt_abort) { 1363f921d10fSJason Evans abort(); 1364b7eaed25SJason Evans } 1365b7eaed25SJason Evans return true; 1366f921d10fSJason Evans } 1367f921d10fSJason Evans #endif 1368df0d881dSJason Evans 1369b7eaed25SJason Evans if (background_thread_boot0()) { 1370b7eaed25SJason Evans return true; 1371a4bd5210SJason Evans } 1372a4bd5210SJason Evans 1373b7eaed25SJason Evans return false; 1374b7eaed25SJason Evans } 1375d0e79aa3SJason Evans 1376b7eaed25SJason Evans static unsigned 1377b7eaed25SJason Evans malloc_narenas_default(void) { 1378b7eaed25SJason Evans assert(ncpus > 0); 1379a4bd5210SJason Evans /* 1380a4bd5210SJason Evans * For SMP systems, create more than one arena per CPU by 1381a4bd5210SJason Evans * default. 1382a4bd5210SJason Evans */ 1383b7eaed25SJason Evans if (ncpus > 1) { 1384b7eaed25SJason Evans return ncpus << 2; 1385b7eaed25SJason Evans } else { 1386b7eaed25SJason Evans return 1; 1387a4bd5210SJason Evans } 1388b7eaed25SJason Evans } 1389b7eaed25SJason Evans 1390b7eaed25SJason Evans static percpu_arena_mode_t 1391b7eaed25SJason Evans percpu_arena_as_initialized(percpu_arena_mode_t mode) { 1392b7eaed25SJason Evans assert(!malloc_initialized()); 1393b7eaed25SJason Evans assert(mode <= percpu_arena_disabled); 1394b7eaed25SJason Evans 1395b7eaed25SJason Evans if (mode != percpu_arena_disabled) { 1396b7eaed25SJason Evans mode += percpu_arena_mode_enabled_base; 1397b7eaed25SJason Evans } 1398b7eaed25SJason Evans 1399b7eaed25SJason Evans return mode; 1400b7eaed25SJason Evans } 1401b7eaed25SJason Evans 1402b7eaed25SJason Evans static bool 1403b7eaed25SJason Evans malloc_init_narenas(void) { 1404b7eaed25SJason Evans assert(ncpus > 0); 1405b7eaed25SJason Evans 1406b7eaed25SJason Evans if (opt_percpu_arena != percpu_arena_disabled) { 1407b7eaed25SJason Evans if (!have_percpu_arena || malloc_getcpu() < 0) { 1408b7eaed25SJason Evans opt_percpu_arena = percpu_arena_disabled; 1409b7eaed25SJason Evans malloc_printf("<jemalloc>: perCPU arena getcpu() not " 1410b7eaed25SJason Evans "available. Setting narenas to %u.\n", opt_narenas ? 1411b7eaed25SJason Evans opt_narenas : malloc_narenas_default()); 1412b7eaed25SJason Evans if (opt_abort) { 1413b7eaed25SJason Evans abort(); 1414b7eaed25SJason Evans } 1415b7eaed25SJason Evans } else { 1416b7eaed25SJason Evans if (ncpus >= MALLOCX_ARENA_LIMIT) { 1417b7eaed25SJason Evans malloc_printf("<jemalloc>: narenas w/ percpu" 1418b7eaed25SJason Evans "arena beyond limit (%d)\n", ncpus); 1419b7eaed25SJason Evans if (opt_abort) { 1420b7eaed25SJason Evans abort(); 1421b7eaed25SJason Evans } 1422b7eaed25SJason Evans return true; 1423b7eaed25SJason Evans } 1424b7eaed25SJason Evans /* NB: opt_percpu_arena isn't fully initialized yet. */ 1425b7eaed25SJason Evans if (percpu_arena_as_initialized(opt_percpu_arena) == 1426b7eaed25SJason Evans per_phycpu_arena && ncpus % 2 != 0) { 1427b7eaed25SJason Evans malloc_printf("<jemalloc>: invalid " 1428b7eaed25SJason Evans "configuration -- per physical CPU arena " 1429b7eaed25SJason Evans "with odd number (%u) of CPUs (no hyper " 1430b7eaed25SJason Evans "threading?).\n", ncpus); 1431b7eaed25SJason Evans if (opt_abort) 1432b7eaed25SJason Evans abort(); 1433b7eaed25SJason Evans } 1434b7eaed25SJason Evans unsigned n = percpu_arena_ind_limit( 1435b7eaed25SJason Evans percpu_arena_as_initialized(opt_percpu_arena)); 1436b7eaed25SJason Evans if (opt_narenas < n) { 1437b7eaed25SJason Evans /* 1438b7eaed25SJason Evans * If narenas is specified with percpu_arena 1439b7eaed25SJason Evans * enabled, actual narenas is set as the greater 1440b7eaed25SJason Evans * of the two. percpu_arena_choose will be free 1441b7eaed25SJason Evans * to use any of the arenas based on CPU 1442b7eaed25SJason Evans * id. This is conservative (at a small cost) 1443b7eaed25SJason Evans * but ensures correctness. 1444b7eaed25SJason Evans * 1445b7eaed25SJason Evans * If for some reason the ncpus determined at 1446b7eaed25SJason Evans * boot is not the actual number (e.g. because 1447b7eaed25SJason Evans * of affinity setting from numactl), reserving 1448b7eaed25SJason Evans * narenas this way provides a workaround for 1449b7eaed25SJason Evans * percpu_arena. 1450b7eaed25SJason Evans */ 1451b7eaed25SJason Evans opt_narenas = n; 1452b7eaed25SJason Evans } 1453b7eaed25SJason Evans } 1454b7eaed25SJason Evans } 1455b7eaed25SJason Evans if (opt_narenas == 0) { 1456b7eaed25SJason Evans opt_narenas = malloc_narenas_default(); 1457b7eaed25SJason Evans } 1458b7eaed25SJason Evans assert(opt_narenas > 0); 1459b7eaed25SJason Evans 146082872ac0SJason Evans narenas_auto = opt_narenas; 1461a4bd5210SJason Evans /* 1462df0d881dSJason Evans * Limit the number of arenas to the indexing range of MALLOCX_ARENA(). 1463a4bd5210SJason Evans */ 1464b7eaed25SJason Evans if (narenas_auto >= MALLOCX_ARENA_LIMIT) { 1465b7eaed25SJason Evans narenas_auto = MALLOCX_ARENA_LIMIT - 1; 1466a4bd5210SJason Evans malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n", 146782872ac0SJason Evans narenas_auto); 1468a4bd5210SJason Evans } 1469df0d881dSJason Evans narenas_total_set(narenas_auto); 1470a4bd5210SJason Evans 1471b7eaed25SJason Evans return false; 1472b7eaed25SJason Evans } 1473b7eaed25SJason Evans 1474b7eaed25SJason Evans static void 1475b7eaed25SJason Evans malloc_init_percpu(void) { 1476b7eaed25SJason Evans opt_percpu_arena = percpu_arena_as_initialized(opt_percpu_arena); 1477b7eaed25SJason Evans } 1478b7eaed25SJason Evans 1479b7eaed25SJason Evans static bool 1480b7eaed25SJason Evans malloc_init_hard_finish(void) { 1481b7eaed25SJason Evans if (malloc_mutex_boot()) { 1482b7eaed25SJason Evans return true; 1483b7eaed25SJason Evans } 1484a4bd5210SJason Evans 1485d0e79aa3SJason Evans malloc_init_state = malloc_init_initialized; 1486df0d881dSJason Evans malloc_slow_flag_init(); 1487df0d881dSJason Evans 1488b7eaed25SJason Evans return false; 1489b7eaed25SJason Evans } 1490b7eaed25SJason Evans 1491b7eaed25SJason Evans static void 1492b7eaed25SJason Evans malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) { 1493b7eaed25SJason Evans malloc_mutex_assert_owner(tsdn, &init_lock); 1494b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &init_lock); 1495b7eaed25SJason Evans if (reentrancy_set) { 1496b7eaed25SJason Evans assert(!tsdn_null(tsdn)); 1497b7eaed25SJason Evans tsd_t *tsd = tsdn_tsd(tsdn); 1498b7eaed25SJason Evans assert(tsd_reentrancy_level_get(tsd) > 0); 1499b7eaed25SJason Evans post_reentrancy(tsd); 1500b7eaed25SJason Evans } 1501d0e79aa3SJason Evans } 1502d0e79aa3SJason Evans 1503d0e79aa3SJason Evans static bool 1504b7eaed25SJason Evans malloc_init_hard(void) { 15051f0a49e8SJason Evans tsd_t *tsd; 1506d0e79aa3SJason Evans 1507536b3538SJason Evans #if defined(_WIN32) && _WIN32_WINNT < 0x0600 1508536b3538SJason Evans _init_init_lock(); 1509536b3538SJason Evans #endif 15101f0a49e8SJason Evans malloc_mutex_lock(TSDN_NULL, &init_lock); 1511b7eaed25SJason Evans 1512b7eaed25SJason Evans #define UNLOCK_RETURN(tsdn, ret, reentrancy) \ 1513b7eaed25SJason Evans malloc_init_hard_cleanup(tsdn, reentrancy); \ 1514b7eaed25SJason Evans return ret; 1515b7eaed25SJason Evans 1516d0e79aa3SJason Evans if (!malloc_init_hard_needed()) { 1517b7eaed25SJason Evans UNLOCK_RETURN(TSDN_NULL, false, false) 1518d0e79aa3SJason Evans } 1519f921d10fSJason Evans 1520d0e79aa3SJason Evans if (malloc_init_state != malloc_init_a0_initialized && 1521d0e79aa3SJason Evans malloc_init_hard_a0_locked()) { 1522b7eaed25SJason Evans UNLOCK_RETURN(TSDN_NULL, true, false) 1523d0e79aa3SJason Evans } 1524df0d881dSJason Evans 15251f0a49e8SJason Evans malloc_mutex_unlock(TSDN_NULL, &init_lock); 15261f0a49e8SJason Evans /* Recursive allocation relies on functional tsd. */ 15271f0a49e8SJason Evans tsd = malloc_tsd_boot0(); 1528b7eaed25SJason Evans if (tsd == NULL) { 1529b7eaed25SJason Evans return true; 1530b7eaed25SJason Evans } 1531b7eaed25SJason Evans if (malloc_init_hard_recursible()) { 1532b7eaed25SJason Evans return true; 1533b7eaed25SJason Evans } 1534b7eaed25SJason Evans 15351f0a49e8SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &init_lock); 1536b7eaed25SJason Evans /* Set reentrancy level to 1 during init. */ 15378b2f5aafSJason Evans pre_reentrancy(tsd, NULL); 1538b7eaed25SJason Evans /* Initialize narenas before prof_boot2 (for allocation). */ 1539b7eaed25SJason Evans if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) { 1540b7eaed25SJason Evans UNLOCK_RETURN(tsd_tsdn(tsd), true, true) 1541b7eaed25SJason Evans } 1542bde95144SJason Evans if (config_prof && prof_boot2(tsd)) { 1543b7eaed25SJason Evans UNLOCK_RETURN(tsd_tsdn(tsd), true, true) 1544d0e79aa3SJason Evans } 1545d0e79aa3SJason Evans 1546b7eaed25SJason Evans malloc_init_percpu(); 1547d0e79aa3SJason Evans 1548b7eaed25SJason Evans if (malloc_init_hard_finish()) { 1549b7eaed25SJason Evans UNLOCK_RETURN(tsd_tsdn(tsd), true, true) 1550b7eaed25SJason Evans } 1551b7eaed25SJason Evans post_reentrancy(tsd); 15521f0a49e8SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); 1553b7eaed25SJason Evans 1554*0ef50b4eSJason Evans witness_assert_lockless(witness_tsd_tsdn( 1555*0ef50b4eSJason Evans tsd_witness_tsdp_get_unsafe(tsd))); 1556d0e79aa3SJason Evans malloc_tsd_boot1(); 1557b7eaed25SJason Evans /* Update TSD after tsd_boot1. */ 1558b7eaed25SJason Evans tsd = tsd_fetch(); 1559b7eaed25SJason Evans if (opt_background_thread) { 1560b7eaed25SJason Evans assert(have_background_thread); 1561b7eaed25SJason Evans /* 1562b7eaed25SJason Evans * Need to finish init & unlock first before creating background 1563*0ef50b4eSJason Evans * threads (pthread_create depends on malloc). ctl_init (which 1564*0ef50b4eSJason Evans * sets isthreaded) needs to be called without holding any lock. 1565b7eaed25SJason Evans */ 1566*0ef50b4eSJason Evans background_thread_ctl_init(tsd_tsdn(tsd)); 1567*0ef50b4eSJason Evans 1568b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock); 1569b7eaed25SJason Evans bool err = background_thread_create(tsd, 0); 1570b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); 1571b7eaed25SJason Evans if (err) { 1572b7eaed25SJason Evans return true; 1573b7eaed25SJason Evans } 1574b7eaed25SJason Evans } 1575b7eaed25SJason Evans #undef UNLOCK_RETURN 1576b7eaed25SJason Evans return false; 1577a4bd5210SJason Evans } 1578a4bd5210SJason Evans 1579a4bd5210SJason Evans /* 1580a4bd5210SJason Evans * End initialization functions. 1581a4bd5210SJason Evans */ 1582a4bd5210SJason Evans /******************************************************************************/ 1583a4bd5210SJason Evans /* 1584b7eaed25SJason Evans * Begin allocation-path internal functions and data structures. 1585a4bd5210SJason Evans */ 1586a4bd5210SJason Evans 1587b7eaed25SJason Evans /* 1588b7eaed25SJason Evans * Settings determined by the documented behavior of the allocation functions. 1589b7eaed25SJason Evans */ 1590b7eaed25SJason Evans typedef struct static_opts_s static_opts_t; 1591b7eaed25SJason Evans struct static_opts_s { 1592b7eaed25SJason Evans /* Whether or not allocation size may overflow. */ 1593b7eaed25SJason Evans bool may_overflow; 1594b7eaed25SJason Evans /* Whether or not allocations of size 0 should be treated as size 1. */ 1595b7eaed25SJason Evans bool bump_empty_alloc; 1596b7eaed25SJason Evans /* 1597b7eaed25SJason Evans * Whether to assert that allocations are not of size 0 (after any 1598b7eaed25SJason Evans * bumping). 1599b7eaed25SJason Evans */ 1600b7eaed25SJason Evans bool assert_nonempty_alloc; 1601f921d10fSJason Evans 1602b7eaed25SJason Evans /* 1603b7eaed25SJason Evans * Whether or not to modify the 'result' argument to malloc in case of 1604b7eaed25SJason Evans * error. 1605b7eaed25SJason Evans */ 1606b7eaed25SJason Evans bool null_out_result_on_error; 1607b7eaed25SJason Evans /* Whether to set errno when we encounter an error condition. */ 1608b7eaed25SJason Evans bool set_errno_on_error; 1609f921d10fSJason Evans 1610b7eaed25SJason Evans /* 1611b7eaed25SJason Evans * The minimum valid alignment for functions requesting aligned storage. 1612b7eaed25SJason Evans */ 1613b7eaed25SJason Evans size_t min_alignment; 1614f921d10fSJason Evans 1615b7eaed25SJason Evans /* The error string to use if we oom. */ 1616b7eaed25SJason Evans const char *oom_string; 1617b7eaed25SJason Evans /* The error string to use if the passed-in alignment is invalid. */ 1618b7eaed25SJason Evans const char *invalid_alignment_string; 1619f921d10fSJason Evans 1620b7eaed25SJason Evans /* 1621b7eaed25SJason Evans * False if we're configured to skip some time-consuming operations. 1622b7eaed25SJason Evans * 1623b7eaed25SJason Evans * This isn't really a malloc "behavior", but it acts as a useful 1624b7eaed25SJason Evans * summary of several other static (or at least, static after program 1625b7eaed25SJason Evans * initialization) options. 1626b7eaed25SJason Evans */ 1627b7eaed25SJason Evans bool slow; 1628b7eaed25SJason Evans }; 1629f921d10fSJason Evans 1630b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE void 1631b7eaed25SJason Evans static_opts_init(static_opts_t *static_opts) { 1632b7eaed25SJason Evans static_opts->may_overflow = false; 1633b7eaed25SJason Evans static_opts->bump_empty_alloc = false; 1634b7eaed25SJason Evans static_opts->assert_nonempty_alloc = false; 1635b7eaed25SJason Evans static_opts->null_out_result_on_error = false; 1636b7eaed25SJason Evans static_opts->set_errno_on_error = false; 1637b7eaed25SJason Evans static_opts->min_alignment = 0; 1638b7eaed25SJason Evans static_opts->oom_string = ""; 1639b7eaed25SJason Evans static_opts->invalid_alignment_string = ""; 1640b7eaed25SJason Evans static_opts->slow = false; 1641f921d10fSJason Evans } 1642f921d10fSJason Evans 16431f0a49e8SJason Evans /* 1644b7eaed25SJason Evans * These correspond to the macros in jemalloc/jemalloc_macros.h. Broadly, we 1645b7eaed25SJason Evans * should have one constant here per magic value there. Note however that the 1646b7eaed25SJason Evans * representations need not be related. 16471f0a49e8SJason Evans */ 1648b7eaed25SJason Evans #define TCACHE_IND_NONE ((unsigned)-1) 1649b7eaed25SJason Evans #define TCACHE_IND_AUTOMATIC ((unsigned)-2) 1650b7eaed25SJason Evans #define ARENA_IND_AUTOMATIC ((unsigned)-1) 1651f921d10fSJason Evans 1652b7eaed25SJason Evans typedef struct dynamic_opts_s dynamic_opts_t; 1653b7eaed25SJason Evans struct dynamic_opts_s { 1654b7eaed25SJason Evans void **result; 1655b7eaed25SJason Evans size_t num_items; 1656b7eaed25SJason Evans size_t item_size; 1657b7eaed25SJason Evans size_t alignment; 1658b7eaed25SJason Evans bool zero; 1659b7eaed25SJason Evans unsigned tcache_ind; 1660b7eaed25SJason Evans unsigned arena_ind; 1661b7eaed25SJason Evans }; 1662b7eaed25SJason Evans 1663b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE void 1664b7eaed25SJason Evans dynamic_opts_init(dynamic_opts_t *dynamic_opts) { 1665b7eaed25SJason Evans dynamic_opts->result = NULL; 1666b7eaed25SJason Evans dynamic_opts->num_items = 0; 1667b7eaed25SJason Evans dynamic_opts->item_size = 0; 1668b7eaed25SJason Evans dynamic_opts->alignment = 0; 1669b7eaed25SJason Evans dynamic_opts->zero = false; 1670b7eaed25SJason Evans dynamic_opts->tcache_ind = TCACHE_IND_AUTOMATIC; 1671b7eaed25SJason Evans dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC; 16721f0a49e8SJason Evans } 16731f0a49e8SJason Evans 1674b7eaed25SJason Evans /* ind is ignored if dopts->alignment > 0. */ 1675b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE void * 1676b7eaed25SJason Evans imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, 1677b7eaed25SJason Evans size_t size, size_t usize, szind_t ind) { 1678b7eaed25SJason Evans tcache_t *tcache; 1679b7eaed25SJason Evans arena_t *arena; 16801f0a49e8SJason Evans 1681b7eaed25SJason Evans /* Fill in the tcache. */ 1682b7eaed25SJason Evans if (dopts->tcache_ind == TCACHE_IND_AUTOMATIC) { 1683b7eaed25SJason Evans if (likely(!sopts->slow)) { 1684b7eaed25SJason Evans /* Getting tcache ptr unconditionally. */ 1685b7eaed25SJason Evans tcache = tsd_tcachep_get(tsd); 1686b7eaed25SJason Evans assert(tcache == tcache_get(tsd)); 1687b7eaed25SJason Evans } else { 1688b7eaed25SJason Evans tcache = tcache_get(tsd); 1689b7eaed25SJason Evans } 1690b7eaed25SJason Evans } else if (dopts->tcache_ind == TCACHE_IND_NONE) { 1691b7eaed25SJason Evans tcache = NULL; 1692b7eaed25SJason Evans } else { 1693b7eaed25SJason Evans tcache = tcaches_get(tsd, dopts->tcache_ind); 1694d0e79aa3SJason Evans } 1695d0e79aa3SJason Evans 1696b7eaed25SJason Evans /* Fill in the arena. */ 1697b7eaed25SJason Evans if (dopts->arena_ind == ARENA_IND_AUTOMATIC) { 1698b7eaed25SJason Evans /* 1699b7eaed25SJason Evans * In case of automatic arena management, we defer arena 1700b7eaed25SJason Evans * computation until as late as we can, hoping to fill the 1701b7eaed25SJason Evans * allocation out of the tcache. 1702b7eaed25SJason Evans */ 1703b7eaed25SJason Evans arena = NULL; 1704b7eaed25SJason Evans } else { 1705b7eaed25SJason Evans arena = arena_get(tsd_tsdn(tsd), dopts->arena_ind, true); 1706df0d881dSJason Evans } 1707df0d881dSJason Evans 1708b7eaed25SJason Evans if (unlikely(dopts->alignment != 0)) { 1709b7eaed25SJason Evans return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment, 1710b7eaed25SJason Evans dopts->zero, tcache, arena); 1711b7eaed25SJason Evans } 17121f0a49e8SJason Evans 1713b7eaed25SJason Evans return iallocztm(tsd_tsdn(tsd), size, ind, dopts->zero, tcache, false, 1714b7eaed25SJason Evans arena, sopts->slow); 1715b7eaed25SJason Evans } 17161f0a49e8SJason Evans 1717b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE void * 1718b7eaed25SJason Evans imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, 1719b7eaed25SJason Evans size_t usize, szind_t ind) { 1720b7eaed25SJason Evans void *ret; 1721b7eaed25SJason Evans 1722b7eaed25SJason Evans /* 1723b7eaed25SJason Evans * For small allocations, sampling bumps the usize. If so, we allocate 1724b7eaed25SJason Evans * from the ind_large bucket. 1725b7eaed25SJason Evans */ 1726b7eaed25SJason Evans szind_t ind_large; 1727b7eaed25SJason Evans size_t bumped_usize = usize; 1728b7eaed25SJason Evans 1729b7eaed25SJason Evans if (usize <= SMALL_MAXCLASS) { 1730b7eaed25SJason Evans assert(((dopts->alignment == 0) ? sz_s2u(LARGE_MINCLASS) : 1731b7eaed25SJason Evans sz_sa2u(LARGE_MINCLASS, dopts->alignment)) 1732b7eaed25SJason Evans == LARGE_MINCLASS); 1733b7eaed25SJason Evans ind_large = sz_size2index(LARGE_MINCLASS); 1734b7eaed25SJason Evans bumped_usize = sz_s2u(LARGE_MINCLASS); 1735b7eaed25SJason Evans ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize, 1736b7eaed25SJason Evans bumped_usize, ind_large); 1737df0d881dSJason Evans if (unlikely(ret == NULL)) { 1738b7eaed25SJason Evans return NULL; 1739b7eaed25SJason Evans } 1740b7eaed25SJason Evans arena_prof_promote(tsd_tsdn(tsd), ret, usize); 1741b7eaed25SJason Evans } else { 1742b7eaed25SJason Evans ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind); 1743b7eaed25SJason Evans } 1744b7eaed25SJason Evans 1745b7eaed25SJason Evans return ret; 1746b7eaed25SJason Evans } 1747b7eaed25SJason Evans 1748b7eaed25SJason Evans /* 1749b7eaed25SJason Evans * Returns true if the allocation will overflow, and false otherwise. Sets 1750b7eaed25SJason Evans * *size to the product either way. 1751b7eaed25SJason Evans */ 1752b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE bool 1753b7eaed25SJason Evans compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts, 1754b7eaed25SJason Evans size_t *size) { 1755b7eaed25SJason Evans /* 1756b7eaed25SJason Evans * This function is just num_items * item_size, except that we may have 1757b7eaed25SJason Evans * to check for overflow. 1758b7eaed25SJason Evans */ 1759b7eaed25SJason Evans 1760b7eaed25SJason Evans if (!may_overflow) { 1761b7eaed25SJason Evans assert(dopts->num_items == 1); 1762b7eaed25SJason Evans *size = dopts->item_size; 1763b7eaed25SJason Evans return false; 1764b7eaed25SJason Evans } 1765b7eaed25SJason Evans 1766b7eaed25SJason Evans /* A size_t with its high-half bits all set to 1. */ 1767*0ef50b4eSJason Evans static const size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2); 1768b7eaed25SJason Evans 1769b7eaed25SJason Evans *size = dopts->item_size * dopts->num_items; 1770b7eaed25SJason Evans 1771b7eaed25SJason Evans if (unlikely(*size == 0)) { 1772b7eaed25SJason Evans return (dopts->num_items != 0 && dopts->item_size != 0); 1773b7eaed25SJason Evans } 1774b7eaed25SJason Evans 1775b7eaed25SJason Evans /* 1776b7eaed25SJason Evans * We got a non-zero size, but we don't know if we overflowed to get 1777b7eaed25SJason Evans * there. To avoid having to do a divide, we'll be clever and note that 1778b7eaed25SJason Evans * if both A and B can be represented in N/2 bits, then their product 1779b7eaed25SJason Evans * can be represented in N bits (without the possibility of overflow). 1780b7eaed25SJason Evans */ 1781b7eaed25SJason Evans if (likely((high_bits & (dopts->num_items | dopts->item_size)) == 0)) { 1782b7eaed25SJason Evans return false; 1783b7eaed25SJason Evans } 1784b7eaed25SJason Evans if (likely(*size / dopts->item_size == dopts->num_items)) { 1785b7eaed25SJason Evans return false; 1786b7eaed25SJason Evans } 1787b7eaed25SJason Evans return true; 1788b7eaed25SJason Evans } 1789b7eaed25SJason Evans 1790b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE int 1791b7eaed25SJason Evans imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { 1792b7eaed25SJason Evans /* Where the actual allocated memory will live. */ 1793b7eaed25SJason Evans void *allocation = NULL; 1794b7eaed25SJason Evans /* Filled in by compute_size_with_overflow below. */ 1795b7eaed25SJason Evans size_t size = 0; 1796b7eaed25SJason Evans /* 1797b7eaed25SJason Evans * For unaligned allocations, we need only ind. For aligned 1798b7eaed25SJason Evans * allocations, or in case of stats or profiling we need usize. 1799b7eaed25SJason Evans * 1800b7eaed25SJason Evans * These are actually dead stores, in that their values are reset before 1801b7eaed25SJason Evans * any branch on their value is taken. Sometimes though, it's 1802b7eaed25SJason Evans * convenient to pass them as arguments before this point. To avoid 1803b7eaed25SJason Evans * undefined behavior then, we initialize them with dummy stores. 1804b7eaed25SJason Evans */ 1805b7eaed25SJason Evans szind_t ind = 0; 1806b7eaed25SJason Evans size_t usize = 0; 1807b7eaed25SJason Evans 1808b7eaed25SJason Evans /* Reentrancy is only checked on slow path. */ 1809b7eaed25SJason Evans int8_t reentrancy_level; 1810b7eaed25SJason Evans 1811b7eaed25SJason Evans /* Compute the amount of memory the user wants. */ 1812b7eaed25SJason Evans if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts, 1813b7eaed25SJason Evans &size))) { 1814b7eaed25SJason Evans goto label_oom; 1815b7eaed25SJason Evans } 1816b7eaed25SJason Evans 1817b7eaed25SJason Evans /* Validate the user input. */ 1818b7eaed25SJason Evans if (sopts->bump_empty_alloc) { 1819b7eaed25SJason Evans if (unlikely(size == 0)) { 1820b7eaed25SJason Evans size = 1; 1821b7eaed25SJason Evans } 1822b7eaed25SJason Evans } 1823b7eaed25SJason Evans 1824b7eaed25SJason Evans if (sopts->assert_nonempty_alloc) { 1825b7eaed25SJason Evans assert (size != 0); 1826b7eaed25SJason Evans } 1827b7eaed25SJason Evans 1828b7eaed25SJason Evans if (unlikely(dopts->alignment < sopts->min_alignment 1829b7eaed25SJason Evans || (dopts->alignment & (dopts->alignment - 1)) != 0)) { 1830b7eaed25SJason Evans goto label_invalid_alignment; 1831b7eaed25SJason Evans } 1832b7eaed25SJason Evans 1833b7eaed25SJason Evans /* This is the beginning of the "core" algorithm. */ 1834b7eaed25SJason Evans 1835b7eaed25SJason Evans if (dopts->alignment == 0) { 1836b7eaed25SJason Evans ind = sz_size2index(size); 1837b7eaed25SJason Evans if (unlikely(ind >= NSIZES)) { 1838b7eaed25SJason Evans goto label_oom; 1839b7eaed25SJason Evans } 1840b7eaed25SJason Evans if (config_stats || (config_prof && opt_prof)) { 1841b7eaed25SJason Evans usize = sz_index2size(ind); 1842b7eaed25SJason Evans assert(usize > 0 && usize <= LARGE_MAXCLASS); 1843b7eaed25SJason Evans } 1844b7eaed25SJason Evans } else { 1845b7eaed25SJason Evans usize = sz_sa2u(size, dopts->alignment); 1846b7eaed25SJason Evans if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { 1847b7eaed25SJason Evans goto label_oom; 1848b7eaed25SJason Evans } 1849b7eaed25SJason Evans } 1850b7eaed25SJason Evans 1851b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 1852b7eaed25SJason Evans 1853b7eaed25SJason Evans /* 1854b7eaed25SJason Evans * If we need to handle reentrancy, we can do it out of a 1855b7eaed25SJason Evans * known-initialized arena (i.e. arena 0). 1856b7eaed25SJason Evans */ 1857b7eaed25SJason Evans reentrancy_level = tsd_reentrancy_level_get(tsd); 1858b7eaed25SJason Evans if (sopts->slow && unlikely(reentrancy_level > 0)) { 1859b7eaed25SJason Evans /* 1860b7eaed25SJason Evans * We should never specify particular arenas or tcaches from 1861b7eaed25SJason Evans * within our internal allocations. 1862b7eaed25SJason Evans */ 1863b7eaed25SJason Evans assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC || 1864b7eaed25SJason Evans dopts->tcache_ind == TCACHE_IND_NONE); 18658b2f5aafSJason Evans assert(dopts->arena_ind == ARENA_IND_AUTOMATIC); 1866b7eaed25SJason Evans dopts->tcache_ind = TCACHE_IND_NONE; 1867b7eaed25SJason Evans /* We know that arena 0 has already been initialized. */ 1868b7eaed25SJason Evans dopts->arena_ind = 0; 1869b7eaed25SJason Evans } 1870b7eaed25SJason Evans 1871b7eaed25SJason Evans /* If profiling is on, get our profiling context. */ 1872b7eaed25SJason Evans if (config_prof && opt_prof) { 1873b7eaed25SJason Evans /* 1874b7eaed25SJason Evans * Note that if we're going down this path, usize must have been 1875b7eaed25SJason Evans * initialized in the previous if statement. 1876b7eaed25SJason Evans */ 1877b7eaed25SJason Evans prof_tctx_t *tctx = prof_alloc_prep( 1878b7eaed25SJason Evans tsd, usize, prof_active_get_unlocked(), true); 1879b7eaed25SJason Evans 1880b7eaed25SJason Evans alloc_ctx_t alloc_ctx; 1881b7eaed25SJason Evans if (likely((uintptr_t)tctx == (uintptr_t)1U)) { 1882b7eaed25SJason Evans alloc_ctx.slab = (usize <= SMALL_MAXCLASS); 1883b7eaed25SJason Evans allocation = imalloc_no_sample( 1884b7eaed25SJason Evans sopts, dopts, tsd, usize, usize, ind); 1885b7eaed25SJason Evans } else if ((uintptr_t)tctx > (uintptr_t)1U) { 1886b7eaed25SJason Evans /* 1887b7eaed25SJason Evans * Note that ind might still be 0 here. This is fine; 1888b7eaed25SJason Evans * imalloc_sample ignores ind if dopts->alignment > 0. 1889b7eaed25SJason Evans */ 1890b7eaed25SJason Evans allocation = imalloc_sample( 1891b7eaed25SJason Evans sopts, dopts, tsd, usize, ind); 1892b7eaed25SJason Evans alloc_ctx.slab = false; 1893b7eaed25SJason Evans } else { 1894b7eaed25SJason Evans allocation = NULL; 1895b7eaed25SJason Evans } 1896b7eaed25SJason Evans 1897b7eaed25SJason Evans if (unlikely(allocation == NULL)) { 1898b7eaed25SJason Evans prof_alloc_rollback(tsd, tctx, true); 1899b7eaed25SJason Evans goto label_oom; 1900b7eaed25SJason Evans } 1901b7eaed25SJason Evans prof_malloc(tsd_tsdn(tsd), allocation, usize, &alloc_ctx, tctx); 1902b7eaed25SJason Evans } else { 1903b7eaed25SJason Evans /* 1904b7eaed25SJason Evans * If dopts->alignment > 0, then ind is still 0, but usize was 1905b7eaed25SJason Evans * computed in the previous if statement. Down the positive 1906b7eaed25SJason Evans * alignment path, imalloc_no_sample ignores ind and size 1907b7eaed25SJason Evans * (relying only on usize). 1908b7eaed25SJason Evans */ 1909b7eaed25SJason Evans allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize, 1910b7eaed25SJason Evans ind); 1911b7eaed25SJason Evans if (unlikely(allocation == NULL)) { 1912b7eaed25SJason Evans goto label_oom; 1913b7eaed25SJason Evans } 1914b7eaed25SJason Evans } 1915b7eaed25SJason Evans 1916b7eaed25SJason Evans /* 1917b7eaed25SJason Evans * Allocation has been done at this point. We still have some 1918b7eaed25SJason Evans * post-allocation work to do though. 1919b7eaed25SJason Evans */ 1920b7eaed25SJason Evans assert(dopts->alignment == 0 1921b7eaed25SJason Evans || ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0)); 1922b7eaed25SJason Evans 1923b7eaed25SJason Evans if (config_stats) { 1924b7eaed25SJason Evans assert(usize == isalloc(tsd_tsdn(tsd), allocation)); 1925b7eaed25SJason Evans *tsd_thread_allocatedp_get(tsd) += usize; 1926b7eaed25SJason Evans } 1927b7eaed25SJason Evans 1928b7eaed25SJason Evans if (sopts->slow) { 1929b7eaed25SJason Evans UTRACE(0, size, allocation); 1930b7eaed25SJason Evans } 1931b7eaed25SJason Evans 1932b7eaed25SJason Evans /* Success! */ 1933b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 1934b7eaed25SJason Evans *dopts->result = allocation; 1935b7eaed25SJason Evans return 0; 1936b7eaed25SJason Evans 1937b7eaed25SJason Evans label_oom: 1938b7eaed25SJason Evans if (unlikely(sopts->slow) && config_xmalloc && unlikely(opt_xmalloc)) { 1939b7eaed25SJason Evans malloc_write(sopts->oom_string); 1940df0d881dSJason Evans abort(); 1941df0d881dSJason Evans } 1942b7eaed25SJason Evans 1943b7eaed25SJason Evans if (sopts->slow) { 1944b7eaed25SJason Evans UTRACE(NULL, size, NULL); 1945b7eaed25SJason Evans } 1946b7eaed25SJason Evans 1947b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 1948b7eaed25SJason Evans 1949b7eaed25SJason Evans if (sopts->set_errno_on_error) { 1950df0d881dSJason Evans set_errno(ENOMEM); 1951df0d881dSJason Evans } 1952b7eaed25SJason Evans 1953b7eaed25SJason Evans if (sopts->null_out_result_on_error) { 1954b7eaed25SJason Evans *dopts->result = NULL; 1955df0d881dSJason Evans } 1956b7eaed25SJason Evans 1957b7eaed25SJason Evans return ENOMEM; 1958b7eaed25SJason Evans 1959b7eaed25SJason Evans /* 1960b7eaed25SJason Evans * This label is only jumped to by one goto; we move it out of line 1961b7eaed25SJason Evans * anyways to avoid obscuring the non-error paths, and for symmetry with 1962b7eaed25SJason Evans * the oom case. 1963b7eaed25SJason Evans */ 1964b7eaed25SJason Evans label_invalid_alignment: 1965b7eaed25SJason Evans if (config_xmalloc && unlikely(opt_xmalloc)) { 1966b7eaed25SJason Evans malloc_write(sopts->invalid_alignment_string); 1967b7eaed25SJason Evans abort(); 1968d0e79aa3SJason Evans } 1969d0e79aa3SJason Evans 1970b7eaed25SJason Evans if (sopts->set_errno_on_error) { 1971b7eaed25SJason Evans set_errno(EINVAL); 1972b7eaed25SJason Evans } 1973b7eaed25SJason Evans 1974b7eaed25SJason Evans if (sopts->slow) { 1975b7eaed25SJason Evans UTRACE(NULL, size, NULL); 1976b7eaed25SJason Evans } 1977b7eaed25SJason Evans 1978b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 1979b7eaed25SJason Evans 1980b7eaed25SJason Evans if (sopts->null_out_result_on_error) { 1981b7eaed25SJason Evans *dopts->result = NULL; 1982b7eaed25SJason Evans } 1983b7eaed25SJason Evans 1984b7eaed25SJason Evans return EINVAL; 1985b7eaed25SJason Evans } 1986b7eaed25SJason Evans 1987b7eaed25SJason Evans /* Returns the errno-style error code of the allocation. */ 1988b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE int 1989b7eaed25SJason Evans imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) { 1990b7eaed25SJason Evans if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) { 1991b7eaed25SJason Evans if (config_xmalloc && unlikely(opt_xmalloc)) { 1992b7eaed25SJason Evans malloc_write(sopts->oom_string); 1993b7eaed25SJason Evans abort(); 1994b7eaed25SJason Evans } 1995b7eaed25SJason Evans UTRACE(NULL, dopts->num_items * dopts->item_size, NULL); 1996b7eaed25SJason Evans set_errno(ENOMEM); 1997b7eaed25SJason Evans *dopts->result = NULL; 1998b7eaed25SJason Evans 1999b7eaed25SJason Evans return ENOMEM; 2000b7eaed25SJason Evans } 2001b7eaed25SJason Evans 2002b7eaed25SJason Evans /* We always need the tsd. Let's grab it right away. */ 2003b7eaed25SJason Evans tsd_t *tsd = tsd_fetch(); 2004b7eaed25SJason Evans assert(tsd); 2005b7eaed25SJason Evans if (likely(tsd_fast(tsd))) { 2006b7eaed25SJason Evans /* Fast and common path. */ 2007b7eaed25SJason Evans tsd_assert_fast(tsd); 2008b7eaed25SJason Evans sopts->slow = false; 2009b7eaed25SJason Evans return imalloc_body(sopts, dopts, tsd); 2010b7eaed25SJason Evans } else { 2011b7eaed25SJason Evans sopts->slow = true; 2012b7eaed25SJason Evans return imalloc_body(sopts, dopts, tsd); 2013b7eaed25SJason Evans } 2014b7eaed25SJason Evans } 2015b7eaed25SJason Evans /******************************************************************************/ 2016b7eaed25SJason Evans /* 2017b7eaed25SJason Evans * Begin malloc(3)-compatible functions. 2018b7eaed25SJason Evans */ 2019b7eaed25SJason Evans 2020d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2021d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 2022d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) 2023b7eaed25SJason Evans je_malloc(size_t size) { 2024a4bd5210SJason Evans void *ret; 2025b7eaed25SJason Evans static_opts_t sopts; 2026b7eaed25SJason Evans dynamic_opts_t dopts; 2027a4bd5210SJason Evans 2028*0ef50b4eSJason Evans LOG("core.malloc.entry", "size: %zu", size); 2029*0ef50b4eSJason Evans 2030b7eaed25SJason Evans static_opts_init(&sopts); 2031b7eaed25SJason Evans dynamic_opts_init(&dopts); 2032a4bd5210SJason Evans 2033b7eaed25SJason Evans sopts.bump_empty_alloc = true; 2034b7eaed25SJason Evans sopts.null_out_result_on_error = true; 2035b7eaed25SJason Evans sopts.set_errno_on_error = true; 2036b7eaed25SJason Evans sopts.oom_string = "<jemalloc>: Error in malloc(): out of memory\n"; 2037df0d881dSJason Evans 2038b7eaed25SJason Evans dopts.result = &ret; 2039b7eaed25SJason Evans dopts.num_items = 1; 2040b7eaed25SJason Evans dopts.item_size = size; 2041a4bd5210SJason Evans 2042b7eaed25SJason Evans imalloc(&sopts, &dopts); 2043f921d10fSJason Evans 2044*0ef50b4eSJason Evans LOG("core.malloc.exit", "result: %p", ret); 2045*0ef50b4eSJason Evans 2046b7eaed25SJason Evans return ret; 2047a4bd5210SJason Evans } 2048a4bd5210SJason Evans 2049d0e79aa3SJason Evans JEMALLOC_EXPORT int JEMALLOC_NOTHROW 2050d0e79aa3SJason Evans JEMALLOC_ATTR(nonnull(1)) 2051b7eaed25SJason Evans je_posix_memalign(void **memptr, size_t alignment, size_t size) { 20521f0a49e8SJason Evans int ret; 2053b7eaed25SJason Evans static_opts_t sopts; 2054b7eaed25SJason Evans dynamic_opts_t dopts; 20551f0a49e8SJason Evans 2056*0ef50b4eSJason Evans LOG("core.posix_memalign.entry", "mem ptr: %p, alignment: %zu, " 2057*0ef50b4eSJason Evans "size: %zu", memptr, alignment, size); 2058*0ef50b4eSJason Evans 2059b7eaed25SJason Evans static_opts_init(&sopts); 2060b7eaed25SJason Evans dynamic_opts_init(&dopts); 20611f0a49e8SJason Evans 2062b7eaed25SJason Evans sopts.bump_empty_alloc = true; 2063b7eaed25SJason Evans sopts.min_alignment = sizeof(void *); 2064b7eaed25SJason Evans sopts.oom_string = 2065b7eaed25SJason Evans "<jemalloc>: Error allocating aligned memory: out of memory\n"; 2066b7eaed25SJason Evans sopts.invalid_alignment_string = 2067b7eaed25SJason Evans "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; 2068b7eaed25SJason Evans 2069b7eaed25SJason Evans dopts.result = memptr; 2070b7eaed25SJason Evans dopts.num_items = 1; 2071b7eaed25SJason Evans dopts.item_size = size; 2072b7eaed25SJason Evans dopts.alignment = alignment; 2073b7eaed25SJason Evans 2074b7eaed25SJason Evans ret = imalloc(&sopts, &dopts); 2075*0ef50b4eSJason Evans 2076*0ef50b4eSJason Evans LOG("core.posix_memalign.exit", "result: %d, alloc ptr: %p", ret, 2077*0ef50b4eSJason Evans *memptr); 2078*0ef50b4eSJason Evans 2079b7eaed25SJason Evans return ret; 2080a4bd5210SJason Evans } 2081a4bd5210SJason Evans 2082d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2083d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 2084d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2) 2085b7eaed25SJason Evans je_aligned_alloc(size_t alignment, size_t size) { 2086a4bd5210SJason Evans void *ret; 2087a4bd5210SJason Evans 2088b7eaed25SJason Evans static_opts_t sopts; 2089b7eaed25SJason Evans dynamic_opts_t dopts; 20901f0a49e8SJason Evans 2091*0ef50b4eSJason Evans LOG("core.aligned_alloc.entry", "alignment: %zu, size: %zu\n", 2092*0ef50b4eSJason Evans alignment, size); 2093*0ef50b4eSJason Evans 2094b7eaed25SJason Evans static_opts_init(&sopts); 2095b7eaed25SJason Evans dynamic_opts_init(&dopts); 2096b7eaed25SJason Evans 2097b7eaed25SJason Evans sopts.bump_empty_alloc = true; 2098b7eaed25SJason Evans sopts.null_out_result_on_error = true; 2099b7eaed25SJason Evans sopts.set_errno_on_error = true; 2100b7eaed25SJason Evans sopts.min_alignment = 1; 2101b7eaed25SJason Evans sopts.oom_string = 2102b7eaed25SJason Evans "<jemalloc>: Error allocating aligned memory: out of memory\n"; 2103b7eaed25SJason Evans sopts.invalid_alignment_string = 2104b7eaed25SJason Evans "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; 2105b7eaed25SJason Evans 2106b7eaed25SJason Evans dopts.result = &ret; 2107b7eaed25SJason Evans dopts.num_items = 1; 2108b7eaed25SJason Evans dopts.item_size = size; 2109b7eaed25SJason Evans dopts.alignment = alignment; 2110b7eaed25SJason Evans 2111b7eaed25SJason Evans imalloc(&sopts, &dopts); 2112*0ef50b4eSJason Evans 2113*0ef50b4eSJason Evans LOG("core.aligned_alloc.exit", "result: %p", ret); 2114*0ef50b4eSJason Evans 2115b7eaed25SJason Evans return ret; 2116a4bd5210SJason Evans } 2117a4bd5210SJason Evans 2118d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2119d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 2120d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) 2121b7eaed25SJason Evans je_calloc(size_t num, size_t size) { 2122a4bd5210SJason Evans void *ret; 2123b7eaed25SJason Evans static_opts_t sopts; 2124b7eaed25SJason Evans dynamic_opts_t dopts; 2125a4bd5210SJason Evans 2126*0ef50b4eSJason Evans LOG("core.calloc.entry", "num: %zu, size: %zu\n", num, size); 2127*0ef50b4eSJason Evans 2128b7eaed25SJason Evans static_opts_init(&sopts); 2129b7eaed25SJason Evans dynamic_opts_init(&dopts); 2130a4bd5210SJason Evans 2131b7eaed25SJason Evans sopts.may_overflow = true; 2132b7eaed25SJason Evans sopts.bump_empty_alloc = true; 2133b7eaed25SJason Evans sopts.null_out_result_on_error = true; 2134b7eaed25SJason Evans sopts.set_errno_on_error = true; 2135b7eaed25SJason Evans sopts.oom_string = "<jemalloc>: Error in calloc(): out of memory\n"; 2136a4bd5210SJason Evans 2137b7eaed25SJason Evans dopts.result = &ret; 2138b7eaed25SJason Evans dopts.num_items = num; 2139b7eaed25SJason Evans dopts.item_size = size; 2140b7eaed25SJason Evans dopts.zero = true; 2141b7eaed25SJason Evans 2142b7eaed25SJason Evans imalloc(&sopts, &dopts); 2143b7eaed25SJason Evans 2144*0ef50b4eSJason Evans LOG("core.calloc.exit", "result: %p", ret); 2145*0ef50b4eSJason Evans 2146b7eaed25SJason Evans return ret; 2147a4bd5210SJason Evans } 2148a4bd5210SJason Evans 2149f921d10fSJason Evans static void * 2150536b3538SJason Evans irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, 2151b7eaed25SJason Evans prof_tctx_t *tctx) { 2152f921d10fSJason Evans void *p; 2153a4bd5210SJason Evans 2154b7eaed25SJason Evans if (tctx == NULL) { 2155b7eaed25SJason Evans return NULL; 2156b7eaed25SJason Evans } 2157d0e79aa3SJason Evans if (usize <= SMALL_MAXCLASS) { 2158536b3538SJason Evans p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false); 2159b7eaed25SJason Evans if (p == NULL) { 2160b7eaed25SJason Evans return NULL; 2161b7eaed25SJason Evans } 2162b7eaed25SJason Evans arena_prof_promote(tsd_tsdn(tsd), p, usize); 2163b7eaed25SJason Evans } else { 2164536b3538SJason Evans p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); 2165a4bd5210SJason Evans } 2166a4bd5210SJason Evans 2167b7eaed25SJason Evans return p; 2168b7eaed25SJason Evans } 2169b7eaed25SJason Evans 2170b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE void * 2171b7eaed25SJason Evans irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, 2172b7eaed25SJason Evans alloc_ctx_t *alloc_ctx) { 2173f921d10fSJason Evans void *p; 2174536b3538SJason Evans bool prof_active; 2175d0e79aa3SJason Evans prof_tctx_t *old_tctx, *tctx; 2176a4bd5210SJason Evans 2177536b3538SJason Evans prof_active = prof_active_get_unlocked(); 2178b7eaed25SJason Evans old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx); 2179536b3538SJason Evans tctx = prof_alloc_prep(tsd, usize, prof_active, true); 2180b7eaed25SJason Evans if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { 2181536b3538SJason Evans p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx); 2182b7eaed25SJason Evans } else { 2183536b3538SJason Evans p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); 2184b7eaed25SJason Evans } 2185536b3538SJason Evans if (unlikely(p == NULL)) { 2186536b3538SJason Evans prof_alloc_rollback(tsd, tctx, true); 2187b7eaed25SJason Evans return NULL; 2188536b3538SJason Evans } 2189536b3538SJason Evans prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize, 2190536b3538SJason Evans old_tctx); 2191f921d10fSJason Evans 2192b7eaed25SJason Evans return p; 2193f921d10fSJason Evans } 2194f921d10fSJason Evans 2195b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE void 2196b7eaed25SJason Evans ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) { 2197b7eaed25SJason Evans if (!slow_path) { 2198b7eaed25SJason Evans tsd_assert_fast(tsd); 2199b7eaed25SJason Evans } 2200b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 2201b7eaed25SJason Evans if (tsd_reentrancy_level_get(tsd) != 0) { 2202b7eaed25SJason Evans assert(slow_path); 2203b7eaed25SJason Evans } 2204b7eaed25SJason Evans 2205b7eaed25SJason Evans assert(ptr != NULL); 2206b7eaed25SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2207b7eaed25SJason Evans 2208b7eaed25SJason Evans alloc_ctx_t alloc_ctx; 2209b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); 2210b7eaed25SJason Evans rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, 2211b7eaed25SJason Evans (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); 2212b7eaed25SJason Evans assert(alloc_ctx.szind != NSIZES); 2213b7eaed25SJason Evans 2214a4bd5210SJason Evans size_t usize; 2215a4bd5210SJason Evans if (config_prof && opt_prof) { 2216b7eaed25SJason Evans usize = sz_index2size(alloc_ctx.szind); 2217b7eaed25SJason Evans prof_free(tsd, ptr, usize, &alloc_ctx); 2218b7eaed25SJason Evans } else if (config_stats) { 2219b7eaed25SJason Evans usize = sz_index2size(alloc_ctx.szind); 2220b7eaed25SJason Evans } 2221b7eaed25SJason Evans if (config_stats) { 2222d0e79aa3SJason Evans *tsd_thread_deallocatedp_get(tsd) += usize; 2223b7eaed25SJason Evans } 2224df0d881dSJason Evans 2225b7eaed25SJason Evans if (likely(!slow_path)) { 2226b7eaed25SJason Evans idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false, 2227b7eaed25SJason Evans false); 2228b7eaed25SJason Evans } else { 2229b7eaed25SJason Evans idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false, 2230b7eaed25SJason Evans true); 2231a4bd5210SJason Evans } 2232df0d881dSJason Evans } 2233f921d10fSJason Evans 2234b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE void 2235b7eaed25SJason Evans isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) { 2236b7eaed25SJason Evans if (!slow_path) { 2237b7eaed25SJason Evans tsd_assert_fast(tsd); 2238b7eaed25SJason Evans } 2239b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 2240b7eaed25SJason Evans if (tsd_reentrancy_level_get(tsd) != 0) { 2241b7eaed25SJason Evans assert(slow_path); 2242b7eaed25SJason Evans } 22431f0a49e8SJason Evans 2244d0e79aa3SJason Evans assert(ptr != NULL); 2245d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2246d0e79aa3SJason Evans 2247b7eaed25SJason Evans alloc_ctx_t alloc_ctx, *ctx; 2248*0ef50b4eSJason Evans if (!config_cache_oblivious && ((uintptr_t)ptr & PAGE_MASK) != 0) { 2249*0ef50b4eSJason Evans /* 2250*0ef50b4eSJason Evans * When cache_oblivious is disabled and ptr is not page aligned, 2251*0ef50b4eSJason Evans * the allocation was not sampled -- usize can be used to 2252*0ef50b4eSJason Evans * determine szind directly. 2253*0ef50b4eSJason Evans */ 2254*0ef50b4eSJason Evans alloc_ctx.szind = sz_size2index(usize); 2255*0ef50b4eSJason Evans alloc_ctx.slab = true; 2256*0ef50b4eSJason Evans ctx = &alloc_ctx; 2257*0ef50b4eSJason Evans if (config_debug) { 2258*0ef50b4eSJason Evans alloc_ctx_t dbg_ctx; 2259*0ef50b4eSJason Evans rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); 2260*0ef50b4eSJason Evans rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, 2261*0ef50b4eSJason Evans rtree_ctx, (uintptr_t)ptr, true, &dbg_ctx.szind, 2262*0ef50b4eSJason Evans &dbg_ctx.slab); 2263*0ef50b4eSJason Evans assert(dbg_ctx.szind == alloc_ctx.szind); 2264*0ef50b4eSJason Evans assert(dbg_ctx.slab == alloc_ctx.slab); 2265*0ef50b4eSJason Evans } 2266*0ef50b4eSJason Evans } else if (config_prof && opt_prof) { 2267b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); 2268b7eaed25SJason Evans rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, 2269b7eaed25SJason Evans (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); 2270b7eaed25SJason Evans assert(alloc_ctx.szind == sz_size2index(usize)); 2271b7eaed25SJason Evans ctx = &alloc_ctx; 2272b7eaed25SJason Evans } else { 2273b7eaed25SJason Evans ctx = NULL; 2274b7eaed25SJason Evans } 2275b7eaed25SJason Evans 2276*0ef50b4eSJason Evans if (config_prof && opt_prof) { 2277*0ef50b4eSJason Evans prof_free(tsd, ptr, usize, ctx); 2278*0ef50b4eSJason Evans } 2279b7eaed25SJason Evans if (config_stats) { 2280d0e79aa3SJason Evans *tsd_thread_deallocatedp_get(tsd) += usize; 2281b7eaed25SJason Evans } 2282b7eaed25SJason Evans 2283b7eaed25SJason Evans if (likely(!slow_path)) { 2284b7eaed25SJason Evans isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, false); 2285b7eaed25SJason Evans } else { 2286b7eaed25SJason Evans isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, true); 2287b7eaed25SJason Evans } 2288d0e79aa3SJason Evans } 2289d0e79aa3SJason Evans 2290d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2291d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 2292d0e79aa3SJason Evans JEMALLOC_ALLOC_SIZE(2) 2293b7eaed25SJason Evans je_realloc(void *ptr, size_t size) { 2294f921d10fSJason Evans void *ret; 22951f0a49e8SJason Evans tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL); 2296f921d10fSJason Evans size_t usize JEMALLOC_CC_SILENCE_INIT(0); 2297f921d10fSJason Evans size_t old_usize = 0; 2298f921d10fSJason Evans 2299*0ef50b4eSJason Evans LOG("core.realloc.entry", "ptr: %p, size: %zu\n", ptr, size); 2300*0ef50b4eSJason Evans 2301d0e79aa3SJason Evans if (unlikely(size == 0)) { 2302f921d10fSJason Evans if (ptr != NULL) { 2303f921d10fSJason Evans /* realloc(ptr, 0) is equivalent to free(ptr). */ 2304f921d10fSJason Evans UTRACE(ptr, 0, 0); 2305b7eaed25SJason Evans tcache_t *tcache; 2306b7eaed25SJason Evans tsd_t *tsd = tsd_fetch(); 2307b7eaed25SJason Evans if (tsd_reentrancy_level_get(tsd) == 0) { 2308b7eaed25SJason Evans tcache = tcache_get(tsd); 2309b7eaed25SJason Evans } else { 2310b7eaed25SJason Evans tcache = NULL; 2311b7eaed25SJason Evans } 2312b7eaed25SJason Evans ifree(tsd, ptr, tcache, true); 2313*0ef50b4eSJason Evans 2314*0ef50b4eSJason Evans LOG("core.realloc.exit", "result: %p", NULL); 2315b7eaed25SJason Evans return NULL; 2316f921d10fSJason Evans } 2317f921d10fSJason Evans size = 1; 2318f921d10fSJason Evans } 2319f921d10fSJason Evans 2320d0e79aa3SJason Evans if (likely(ptr != NULL)) { 2321d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2322b7eaed25SJason Evans tsd_t *tsd = tsd_fetch(); 2323f921d10fSJason Evans 2324b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 23251f0a49e8SJason Evans 2326b7eaed25SJason Evans alloc_ctx_t alloc_ctx; 2327b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); 2328b7eaed25SJason Evans rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, 2329b7eaed25SJason Evans (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); 2330b7eaed25SJason Evans assert(alloc_ctx.szind != NSIZES); 2331b7eaed25SJason Evans old_usize = sz_index2size(alloc_ctx.szind); 2332b7eaed25SJason Evans assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); 2333f921d10fSJason Evans if (config_prof && opt_prof) { 2334b7eaed25SJason Evans usize = sz_s2u(size); 2335b7eaed25SJason Evans ret = unlikely(usize == 0 || usize > LARGE_MAXCLASS) ? 2336b7eaed25SJason Evans NULL : irealloc_prof(tsd, ptr, old_usize, usize, 2337b7eaed25SJason Evans &alloc_ctx); 2338f921d10fSJason Evans } else { 2339b7eaed25SJason Evans if (config_stats) { 2340b7eaed25SJason Evans usize = sz_s2u(size); 2341b7eaed25SJason Evans } 2342d0e79aa3SJason Evans ret = iralloc(tsd, ptr, old_usize, size, 0, false); 2343f921d10fSJason Evans } 23441f0a49e8SJason Evans tsdn = tsd_tsdn(tsd); 2345f921d10fSJason Evans } else { 2346f921d10fSJason Evans /* realloc(NULL, size) is equivalent to malloc(size). */ 2347*0ef50b4eSJason Evans void *ret = je_malloc(size); 2348*0ef50b4eSJason Evans LOG("core.realloc.exit", "result: %p", ret); 2349*0ef50b4eSJason Evans return ret; 2350f921d10fSJason Evans } 2351f921d10fSJason Evans 2352d0e79aa3SJason Evans if (unlikely(ret == NULL)) { 2353d0e79aa3SJason Evans if (config_xmalloc && unlikely(opt_xmalloc)) { 2354f921d10fSJason Evans malloc_write("<jemalloc>: Error in realloc(): " 2355f921d10fSJason Evans "out of memory\n"); 2356f921d10fSJason Evans abort(); 2357f921d10fSJason Evans } 2358f921d10fSJason Evans set_errno(ENOMEM); 2359f921d10fSJason Evans } 2360d0e79aa3SJason Evans if (config_stats && likely(ret != NULL)) { 23611f0a49e8SJason Evans tsd_t *tsd; 23621f0a49e8SJason Evans 2363b7eaed25SJason Evans assert(usize == isalloc(tsdn, ret)); 23641f0a49e8SJason Evans tsd = tsdn_tsd(tsdn); 2365d0e79aa3SJason Evans *tsd_thread_allocatedp_get(tsd) += usize; 2366d0e79aa3SJason Evans *tsd_thread_deallocatedp_get(tsd) += old_usize; 2367f921d10fSJason Evans } 2368f921d10fSJason Evans UTRACE(ptr, size, ret); 2369b7eaed25SJason Evans check_entry_exit_locking(tsdn); 2370*0ef50b4eSJason Evans 2371*0ef50b4eSJason Evans LOG("core.realloc.exit", "result: %p", ret); 2372b7eaed25SJason Evans return ret; 2373f921d10fSJason Evans } 2374f921d10fSJason Evans 2375d0e79aa3SJason Evans JEMALLOC_EXPORT void JEMALLOC_NOTHROW 2376b7eaed25SJason Evans je_free(void *ptr) { 2377*0ef50b4eSJason Evans LOG("core.free.entry", "ptr: %p", ptr); 2378*0ef50b4eSJason Evans 2379f921d10fSJason Evans UTRACE(ptr, 0, 0); 2380d0e79aa3SJason Evans if (likely(ptr != NULL)) { 23818b2f5aafSJason Evans /* 23828b2f5aafSJason Evans * We avoid setting up tsd fully (e.g. tcache, arena binding) 23838b2f5aafSJason Evans * based on only free() calls -- other activities trigger the 23848b2f5aafSJason Evans * minimal to full transition. This is because free() may 23858b2f5aafSJason Evans * happen during thread shutdown after tls deallocation: if a 23868b2f5aafSJason Evans * thread never had any malloc activities until then, a 23878b2f5aafSJason Evans * fully-setup tsd won't be destructed properly. 23888b2f5aafSJason Evans */ 23898b2f5aafSJason Evans tsd_t *tsd = tsd_fetch_min(); 2390b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 2391b7eaed25SJason Evans 2392b7eaed25SJason Evans tcache_t *tcache; 2393b7eaed25SJason Evans if (likely(tsd_fast(tsd))) { 2394b7eaed25SJason Evans tsd_assert_fast(tsd); 2395b7eaed25SJason Evans /* Unconditionally get tcache ptr on fast path. */ 2396b7eaed25SJason Evans tcache = tsd_tcachep_get(tsd); 2397b7eaed25SJason Evans ifree(tsd, ptr, tcache, false); 2398b7eaed25SJason Evans } else { 2399b7eaed25SJason Evans if (likely(tsd_reentrancy_level_get(tsd) == 0)) { 2400b7eaed25SJason Evans tcache = tcache_get(tsd); 2401b7eaed25SJason Evans } else { 2402b7eaed25SJason Evans tcache = NULL; 2403b7eaed25SJason Evans } 2404b7eaed25SJason Evans ifree(tsd, ptr, tcache, true); 2405b7eaed25SJason Evans } 2406b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 2407d0e79aa3SJason Evans } 2408*0ef50b4eSJason Evans LOG("core.free.exit", ""); 2409a4bd5210SJason Evans } 2410a4bd5210SJason Evans 2411a4bd5210SJason Evans /* 2412a4bd5210SJason Evans * End malloc(3)-compatible functions. 2413a4bd5210SJason Evans */ 2414a4bd5210SJason Evans /******************************************************************************/ 2415a4bd5210SJason Evans /* 2416a4bd5210SJason Evans * Begin non-standard override functions. 2417a4bd5210SJason Evans */ 2418a4bd5210SJason Evans 2419a4bd5210SJason Evans #ifdef JEMALLOC_OVERRIDE_MEMALIGN 2420d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2421d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 2422d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) 2423b7eaed25SJason Evans je_memalign(size_t alignment, size_t size) { 2424b7eaed25SJason Evans void *ret; 2425b7eaed25SJason Evans static_opts_t sopts; 2426b7eaed25SJason Evans dynamic_opts_t dopts; 2427b7eaed25SJason Evans 2428*0ef50b4eSJason Evans LOG("core.memalign.entry", "alignment: %zu, size: %zu\n", alignment, 2429*0ef50b4eSJason Evans size); 2430*0ef50b4eSJason Evans 2431b7eaed25SJason Evans static_opts_init(&sopts); 2432b7eaed25SJason Evans dynamic_opts_init(&dopts); 2433b7eaed25SJason Evans 2434b7eaed25SJason Evans sopts.bump_empty_alloc = true; 2435b7eaed25SJason Evans sopts.min_alignment = 1; 2436b7eaed25SJason Evans sopts.oom_string = 2437b7eaed25SJason Evans "<jemalloc>: Error allocating aligned memory: out of memory\n"; 2438b7eaed25SJason Evans sopts.invalid_alignment_string = 2439b7eaed25SJason Evans "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; 2440b7eaed25SJason Evans sopts.null_out_result_on_error = true; 2441b7eaed25SJason Evans 2442b7eaed25SJason Evans dopts.result = &ret; 2443b7eaed25SJason Evans dopts.num_items = 1; 2444b7eaed25SJason Evans dopts.item_size = size; 2445b7eaed25SJason Evans dopts.alignment = alignment; 2446b7eaed25SJason Evans 2447b7eaed25SJason Evans imalloc(&sopts, &dopts); 2448*0ef50b4eSJason Evans 2449*0ef50b4eSJason Evans LOG("core.memalign.exit", "result: %p", ret); 2450b7eaed25SJason Evans return ret; 2451a4bd5210SJason Evans } 2452a4bd5210SJason Evans #endif 2453a4bd5210SJason Evans 2454a4bd5210SJason Evans #ifdef JEMALLOC_OVERRIDE_VALLOC 2455d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2456d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 2457d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) 2458b7eaed25SJason Evans je_valloc(size_t size) { 2459b7eaed25SJason Evans void *ret; 2460b7eaed25SJason Evans 2461b7eaed25SJason Evans static_opts_t sopts; 2462b7eaed25SJason Evans dynamic_opts_t dopts; 2463b7eaed25SJason Evans 2464*0ef50b4eSJason Evans LOG("core.valloc.entry", "size: %zu\n", size); 2465*0ef50b4eSJason Evans 2466b7eaed25SJason Evans static_opts_init(&sopts); 2467b7eaed25SJason Evans dynamic_opts_init(&dopts); 2468b7eaed25SJason Evans 2469b7eaed25SJason Evans sopts.bump_empty_alloc = true; 2470b7eaed25SJason Evans sopts.null_out_result_on_error = true; 2471b7eaed25SJason Evans sopts.min_alignment = PAGE; 2472b7eaed25SJason Evans sopts.oom_string = 2473b7eaed25SJason Evans "<jemalloc>: Error allocating aligned memory: out of memory\n"; 2474b7eaed25SJason Evans sopts.invalid_alignment_string = 2475b7eaed25SJason Evans "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; 2476b7eaed25SJason Evans 2477b7eaed25SJason Evans dopts.result = &ret; 2478b7eaed25SJason Evans dopts.num_items = 1; 2479b7eaed25SJason Evans dopts.item_size = size; 2480b7eaed25SJason Evans dopts.alignment = PAGE; 2481b7eaed25SJason Evans 2482b7eaed25SJason Evans imalloc(&sopts, &dopts); 2483b7eaed25SJason Evans 2484*0ef50b4eSJason Evans LOG("core.valloc.exit", "result: %p\n", ret); 2485b7eaed25SJason Evans return ret; 2486a4bd5210SJason Evans } 2487a4bd5210SJason Evans #endif 2488a4bd5210SJason Evans 2489b7eaed25SJason Evans #if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK) 2490a4bd5210SJason Evans /* 2491a4bd5210SJason Evans * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible 2492a4bd5210SJason Evans * to inconsistently reference libc's malloc(3)-compatible functions 2493a4bd5210SJason Evans * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). 2494a4bd5210SJason Evans * 2495a4bd5210SJason Evans * These definitions interpose hooks in glibc. The functions are actually 2496a4bd5210SJason Evans * passed an extra argument for the caller return address, which will be 2497a4bd5210SJason Evans * ignored. 2498a4bd5210SJason Evans */ 249982872ac0SJason Evans JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free; 250082872ac0SJason Evans JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc; 250182872ac0SJason Evans JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc; 2502d0e79aa3SJason Evans # ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK 250382872ac0SJason Evans JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = 2504e722f8f8SJason Evans je_memalign; 2505a4bd5210SJason Evans # endif 2506bde95144SJason Evans 2507bde95144SJason Evans # ifdef CPU_COUNT 2508bde95144SJason Evans /* 2509bde95144SJason Evans * To enable static linking with glibc, the libc specific malloc interface must 2510bde95144SJason Evans * be implemented also, so none of glibc's malloc.o functions are added to the 2511bde95144SJason Evans * link. 2512bde95144SJason Evans */ 2513bde95144SJason Evans # define ALIAS(je_fn) __attribute__((alias (#je_fn), used)) 2514bde95144SJason Evans /* To force macro expansion of je_ prefix before stringification. */ 2515bde95144SJason Evans # define PREALIAS(je_fn) ALIAS(je_fn) 2516b7eaed25SJason Evans # ifdef JEMALLOC_OVERRIDE___LIBC_CALLOC 2517bde95144SJason Evans void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc); 2518b7eaed25SJason Evans # endif 2519b7eaed25SJason Evans # ifdef JEMALLOC_OVERRIDE___LIBC_FREE 2520b7eaed25SJason Evans void __libc_free(void* ptr) PREALIAS(je_free); 2521b7eaed25SJason Evans # endif 2522b7eaed25SJason Evans # ifdef JEMALLOC_OVERRIDE___LIBC_MALLOC 2523b7eaed25SJason Evans void *__libc_malloc(size_t size) PREALIAS(je_malloc); 2524b7eaed25SJason Evans # endif 2525b7eaed25SJason Evans # ifdef JEMALLOC_OVERRIDE___LIBC_MEMALIGN 2526bde95144SJason Evans void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign); 2527b7eaed25SJason Evans # endif 2528b7eaed25SJason Evans # ifdef JEMALLOC_OVERRIDE___LIBC_REALLOC 2529b7eaed25SJason Evans void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc); 2530b7eaed25SJason Evans # endif 2531b7eaed25SJason Evans # ifdef JEMALLOC_OVERRIDE___LIBC_VALLOC 2532bde95144SJason Evans void *__libc_valloc(size_t size) PREALIAS(je_valloc); 2533b7eaed25SJason Evans # endif 2534b7eaed25SJason Evans # ifdef JEMALLOC_OVERRIDE___POSIX_MEMALIGN 2535b7eaed25SJason Evans int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign); 2536b7eaed25SJason Evans # endif 2537bde95144SJason Evans # undef PREALIAS 2538bde95144SJason Evans # undef ALIAS 2539bde95144SJason Evans # endif 2540d0e79aa3SJason Evans #endif 2541a4bd5210SJason Evans 2542a4bd5210SJason Evans /* 2543a4bd5210SJason Evans * End non-standard override functions. 2544a4bd5210SJason Evans */ 2545a4bd5210SJason Evans /******************************************************************************/ 2546a4bd5210SJason Evans /* 2547a4bd5210SJason Evans * Begin non-standard functions. 2548a4bd5210SJason Evans */ 2549a4bd5210SJason Evans 2550d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2551d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 2552d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) 2553b7eaed25SJason Evans je_mallocx(size_t size, int flags) { 2554b7eaed25SJason Evans void *ret; 2555b7eaed25SJason Evans static_opts_t sopts; 2556b7eaed25SJason Evans dynamic_opts_t dopts; 2557f921d10fSJason Evans 2558*0ef50b4eSJason Evans LOG("core.mallocx.entry", "size: %zu, flags: %d", size, flags); 2559*0ef50b4eSJason Evans 2560b7eaed25SJason Evans static_opts_init(&sopts); 2561b7eaed25SJason Evans dynamic_opts_init(&dopts); 2562f921d10fSJason Evans 2563b7eaed25SJason Evans sopts.assert_nonempty_alloc = true; 2564b7eaed25SJason Evans sopts.null_out_result_on_error = true; 2565b7eaed25SJason Evans sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n"; 2566b7eaed25SJason Evans 2567b7eaed25SJason Evans dopts.result = &ret; 2568b7eaed25SJason Evans dopts.num_items = 1; 2569b7eaed25SJason Evans dopts.item_size = size; 2570b7eaed25SJason Evans if (unlikely(flags != 0)) { 2571b7eaed25SJason Evans if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) { 2572b7eaed25SJason Evans dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); 2573f921d10fSJason Evans } 25741f0a49e8SJason Evans 2575b7eaed25SJason Evans dopts.zero = MALLOCX_ZERO_GET(flags); 2576b7eaed25SJason Evans 2577b7eaed25SJason Evans if ((flags & MALLOCX_TCACHE_MASK) != 0) { 2578b7eaed25SJason Evans if ((flags & MALLOCX_TCACHE_MASK) 2579b7eaed25SJason Evans == MALLOCX_TCACHE_NONE) { 2580b7eaed25SJason Evans dopts.tcache_ind = TCACHE_IND_NONE; 2581b7eaed25SJason Evans } else { 2582b7eaed25SJason Evans dopts.tcache_ind = MALLOCX_TCACHE_GET(flags); 2583b7eaed25SJason Evans } 2584b7eaed25SJason Evans } else { 2585b7eaed25SJason Evans dopts.tcache_ind = TCACHE_IND_AUTOMATIC; 2586b7eaed25SJason Evans } 2587b7eaed25SJason Evans 2588b7eaed25SJason Evans if ((flags & MALLOCX_ARENA_MASK) != 0) 2589b7eaed25SJason Evans dopts.arena_ind = MALLOCX_ARENA_GET(flags); 2590b7eaed25SJason Evans } 2591b7eaed25SJason Evans 2592b7eaed25SJason Evans imalloc(&sopts, &dopts); 2593*0ef50b4eSJason Evans 2594*0ef50b4eSJason Evans LOG("core.mallocx.exit", "result: %p", ret); 2595b7eaed25SJason Evans return ret; 2596f921d10fSJason Evans } 2597f921d10fSJason Evans 2598f921d10fSJason Evans static void * 2599b7eaed25SJason Evans irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize, 2600536b3538SJason Evans size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, 2601b7eaed25SJason Evans prof_tctx_t *tctx) { 2602f921d10fSJason Evans void *p; 2603f921d10fSJason Evans 2604b7eaed25SJason Evans if (tctx == NULL) { 2605b7eaed25SJason Evans return NULL; 2606b7eaed25SJason Evans } 2607d0e79aa3SJason Evans if (usize <= SMALL_MAXCLASS) { 2608b7eaed25SJason Evans p = iralloct(tsdn, old_ptr, old_usize, LARGE_MINCLASS, 2609b7eaed25SJason Evans alignment, zero, tcache, arena); 2610b7eaed25SJason Evans if (p == NULL) { 2611b7eaed25SJason Evans return NULL; 2612b7eaed25SJason Evans } 2613b7eaed25SJason Evans arena_prof_promote(tsdn, p, usize); 2614f921d10fSJason Evans } else { 2615b7eaed25SJason Evans p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero, 2616d0e79aa3SJason Evans tcache, arena); 2617f921d10fSJason Evans } 2618f921d10fSJason Evans 2619b7eaed25SJason Evans return p; 2620f921d10fSJason Evans } 2621f921d10fSJason Evans 2622b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE void * 2623536b3538SJason Evans irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, 2624d0e79aa3SJason Evans size_t alignment, size_t *usize, bool zero, tcache_t *tcache, 2625b7eaed25SJason Evans arena_t *arena, alloc_ctx_t *alloc_ctx) { 2626f921d10fSJason Evans void *p; 2627536b3538SJason Evans bool prof_active; 2628d0e79aa3SJason Evans prof_tctx_t *old_tctx, *tctx; 2629f921d10fSJason Evans 2630536b3538SJason Evans prof_active = prof_active_get_unlocked(); 2631b7eaed25SJason Evans old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx); 263262b2691eSJason Evans tctx = prof_alloc_prep(tsd, *usize, prof_active, false); 2633d0e79aa3SJason Evans if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { 2634b7eaed25SJason Evans p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize, 2635b7eaed25SJason Evans *usize, alignment, zero, tcache, arena, tctx); 2636d0e79aa3SJason Evans } else { 2637b7eaed25SJason Evans p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment, 2638b7eaed25SJason Evans zero, tcache, arena); 2639f921d10fSJason Evans } 2640d0e79aa3SJason Evans if (unlikely(p == NULL)) { 264162b2691eSJason Evans prof_alloc_rollback(tsd, tctx, false); 2642b7eaed25SJason Evans return NULL; 2643d0e79aa3SJason Evans } 2644f921d10fSJason Evans 2645536b3538SJason Evans if (p == old_ptr && alignment != 0) { 2646f921d10fSJason Evans /* 2647f921d10fSJason Evans * The allocation did not move, so it is possible that the size 2648f921d10fSJason Evans * class is smaller than would guarantee the requested 2649f921d10fSJason Evans * alignment, and that the alignment constraint was 2650f921d10fSJason Evans * serendipitously satisfied. Additionally, old_usize may not 2651f921d10fSJason Evans * be the same as the current usize because of in-place large 2652f921d10fSJason Evans * reallocation. Therefore, query the actual value of usize. 2653f921d10fSJason Evans */ 2654b7eaed25SJason Evans *usize = isalloc(tsd_tsdn(tsd), p); 2655f921d10fSJason Evans } 265662b2691eSJason Evans prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr, 2657536b3538SJason Evans old_usize, old_tctx); 2658f921d10fSJason Evans 2659b7eaed25SJason Evans return p; 2660f921d10fSJason Evans } 2661f921d10fSJason Evans 2662d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2663d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 2664d0e79aa3SJason Evans JEMALLOC_ALLOC_SIZE(2) 2665b7eaed25SJason Evans je_rallocx(void *ptr, size_t size, int flags) { 2666f921d10fSJason Evans void *p; 2667d0e79aa3SJason Evans tsd_t *tsd; 2668d0e79aa3SJason Evans size_t usize; 2669d0e79aa3SJason Evans size_t old_usize; 2670d0e79aa3SJason Evans size_t alignment = MALLOCX_ALIGN_GET(flags); 2671f921d10fSJason Evans bool zero = flags & MALLOCX_ZERO; 2672f921d10fSJason Evans arena_t *arena; 2673d0e79aa3SJason Evans tcache_t *tcache; 2674f921d10fSJason Evans 2675*0ef50b4eSJason Evans LOG("core.rallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr, 2676*0ef50b4eSJason Evans size, flags); 2677*0ef50b4eSJason Evans 2678*0ef50b4eSJason Evans 2679f921d10fSJason Evans assert(ptr != NULL); 2680f921d10fSJason Evans assert(size != 0); 2681d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2682d0e79aa3SJason Evans tsd = tsd_fetch(); 2683b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 2684f921d10fSJason Evans 2685d0e79aa3SJason Evans if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { 2686d0e79aa3SJason Evans unsigned arena_ind = MALLOCX_ARENA_GET(flags); 26871f0a49e8SJason Evans arena = arena_get(tsd_tsdn(tsd), arena_ind, true); 2688b7eaed25SJason Evans if (unlikely(arena == NULL)) { 2689d0e79aa3SJason Evans goto label_oom; 2690b7eaed25SJason Evans } 2691b7eaed25SJason Evans } else { 2692f921d10fSJason Evans arena = NULL; 2693b7eaed25SJason Evans } 2694f921d10fSJason Evans 2695d0e79aa3SJason Evans if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { 2696b7eaed25SJason Evans if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { 2697d0e79aa3SJason Evans tcache = NULL; 2698f921d10fSJason Evans } else { 2699b7eaed25SJason Evans tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2700b7eaed25SJason Evans } 2701b7eaed25SJason Evans } else { 2702b7eaed25SJason Evans tcache = tcache_get(tsd); 2703b7eaed25SJason Evans } 2704b7eaed25SJason Evans 2705b7eaed25SJason Evans alloc_ctx_t alloc_ctx; 2706b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); 2707b7eaed25SJason Evans rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, 2708b7eaed25SJason Evans (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); 2709b7eaed25SJason Evans assert(alloc_ctx.szind != NSIZES); 2710b7eaed25SJason Evans old_usize = sz_index2size(alloc_ctx.szind); 2711b7eaed25SJason Evans assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); 2712b7eaed25SJason Evans if (config_prof && opt_prof) { 2713b7eaed25SJason Evans usize = (alignment == 0) ? 2714b7eaed25SJason Evans sz_s2u(size) : sz_sa2u(size, alignment); 2715b7eaed25SJason Evans if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { 2716f921d10fSJason Evans goto label_oom; 2717b7eaed25SJason Evans } 2718b7eaed25SJason Evans p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize, 2719b7eaed25SJason Evans zero, tcache, arena, &alloc_ctx); 2720b7eaed25SJason Evans if (unlikely(p == NULL)) { 2721b7eaed25SJason Evans goto label_oom; 2722b7eaed25SJason Evans } 2723b7eaed25SJason Evans } else { 2724b7eaed25SJason Evans p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment, 2725b7eaed25SJason Evans zero, tcache, arena); 2726b7eaed25SJason Evans if (unlikely(p == NULL)) { 2727b7eaed25SJason Evans goto label_oom; 2728b7eaed25SJason Evans } 2729b7eaed25SJason Evans if (config_stats) { 2730b7eaed25SJason Evans usize = isalloc(tsd_tsdn(tsd), p); 2731b7eaed25SJason Evans } 2732f921d10fSJason Evans } 2733d0e79aa3SJason Evans assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); 2734f921d10fSJason Evans 2735f921d10fSJason Evans if (config_stats) { 2736d0e79aa3SJason Evans *tsd_thread_allocatedp_get(tsd) += usize; 2737d0e79aa3SJason Evans *tsd_thread_deallocatedp_get(tsd) += old_usize; 2738f921d10fSJason Evans } 2739f921d10fSJason Evans UTRACE(ptr, size, p); 2740b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 2741*0ef50b4eSJason Evans 2742*0ef50b4eSJason Evans LOG("core.rallocx.exit", "result: %p", p); 2743b7eaed25SJason Evans return p; 2744f921d10fSJason Evans label_oom: 2745d0e79aa3SJason Evans if (config_xmalloc && unlikely(opt_xmalloc)) { 2746f921d10fSJason Evans malloc_write("<jemalloc>: Error in rallocx(): out of memory\n"); 2747f921d10fSJason Evans abort(); 2748f921d10fSJason Evans } 2749f921d10fSJason Evans UTRACE(ptr, size, 0); 2750b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 2751*0ef50b4eSJason Evans 2752*0ef50b4eSJason Evans LOG("core.rallocx.exit", "result: %p", NULL); 2753b7eaed25SJason Evans return NULL; 2754f921d10fSJason Evans } 2755f921d10fSJason Evans 2756b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE size_t 27571f0a49e8SJason Evans ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, 2758b7eaed25SJason Evans size_t extra, size_t alignment, bool zero) { 2759f921d10fSJason Evans size_t usize; 2760f921d10fSJason Evans 2761b7eaed25SJason Evans if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) { 2762b7eaed25SJason Evans return old_usize; 2763b7eaed25SJason Evans } 2764b7eaed25SJason Evans usize = isalloc(tsdn, ptr); 2765f921d10fSJason Evans 2766b7eaed25SJason Evans return usize; 2767f921d10fSJason Evans } 2768f921d10fSJason Evans 2769f921d10fSJason Evans static size_t 27701f0a49e8SJason Evans ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, 2771b7eaed25SJason Evans size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) { 2772f921d10fSJason Evans size_t usize; 2773f921d10fSJason Evans 2774b7eaed25SJason Evans if (tctx == NULL) { 2775b7eaed25SJason Evans return old_usize; 2776b7eaed25SJason Evans } 27771f0a49e8SJason Evans usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment, 2778df0d881dSJason Evans zero); 2779f921d10fSJason Evans 2780b7eaed25SJason Evans return usize; 2781f921d10fSJason Evans } 2782f921d10fSJason Evans 2783b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE size_t 2784d0e79aa3SJason Evans ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, 2785b7eaed25SJason Evans size_t extra, size_t alignment, bool zero, alloc_ctx_t *alloc_ctx) { 2786536b3538SJason Evans size_t usize_max, usize; 2787536b3538SJason Evans bool prof_active; 2788d0e79aa3SJason Evans prof_tctx_t *old_tctx, *tctx; 2789f921d10fSJason Evans 2790536b3538SJason Evans prof_active = prof_active_get_unlocked(); 2791b7eaed25SJason Evans old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx); 2792d0e79aa3SJason Evans /* 2793d0e79aa3SJason Evans * usize isn't knowable before ixalloc() returns when extra is non-zero. 2794d0e79aa3SJason Evans * Therefore, compute its maximum possible value and use that in 2795d0e79aa3SJason Evans * prof_alloc_prep() to decide whether to capture a backtrace. 2796d0e79aa3SJason Evans * prof_realloc() will use the actual usize to decide whether to sample. 2797d0e79aa3SJason Evans */ 2798df0d881dSJason Evans if (alignment == 0) { 2799b7eaed25SJason Evans usize_max = sz_s2u(size+extra); 2800b7eaed25SJason Evans assert(usize_max > 0 && usize_max <= LARGE_MAXCLASS); 2801df0d881dSJason Evans } else { 2802b7eaed25SJason Evans usize_max = sz_sa2u(size+extra, alignment); 2803b7eaed25SJason Evans if (unlikely(usize_max == 0 || usize_max > LARGE_MAXCLASS)) { 2804df0d881dSJason Evans /* 2805df0d881dSJason Evans * usize_max is out of range, and chances are that 2806df0d881dSJason Evans * allocation will fail, but use the maximum possible 2807df0d881dSJason Evans * value and carry on with prof_alloc_prep(), just in 2808df0d881dSJason Evans * case allocation succeeds. 2809df0d881dSJason Evans */ 2810b7eaed25SJason Evans usize_max = LARGE_MAXCLASS; 2811df0d881dSJason Evans } 2812df0d881dSJason Evans } 2813536b3538SJason Evans tctx = prof_alloc_prep(tsd, usize_max, prof_active, false); 2814df0d881dSJason Evans 2815d0e79aa3SJason Evans if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { 28161f0a49e8SJason Evans usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize, 28171f0a49e8SJason Evans size, extra, alignment, zero, tctx); 2818f921d10fSJason Evans } else { 28191f0a49e8SJason Evans usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, 28201f0a49e8SJason Evans extra, alignment, zero); 2821f921d10fSJason Evans } 2822536b3538SJason Evans if (usize == old_usize) { 2823d0e79aa3SJason Evans prof_alloc_rollback(tsd, tctx, false); 2824b7eaed25SJason Evans return usize; 2825d0e79aa3SJason Evans } 2826536b3538SJason Evans prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize, 2827536b3538SJason Evans old_tctx); 2828f921d10fSJason Evans 2829b7eaed25SJason Evans return usize; 2830f921d10fSJason Evans } 2831f921d10fSJason Evans 2832d0e79aa3SJason Evans JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2833b7eaed25SJason Evans je_xallocx(void *ptr, size_t size, size_t extra, int flags) { 2834d0e79aa3SJason Evans tsd_t *tsd; 2835f921d10fSJason Evans size_t usize, old_usize; 2836d0e79aa3SJason Evans size_t alignment = MALLOCX_ALIGN_GET(flags); 2837f921d10fSJason Evans bool zero = flags & MALLOCX_ZERO; 2838f921d10fSJason Evans 2839*0ef50b4eSJason Evans LOG("core.xallocx.entry", "ptr: %p, size: %zu, extra: %zu, " 2840*0ef50b4eSJason Evans "flags: %d", ptr, size, extra, flags); 2841*0ef50b4eSJason Evans 2842f921d10fSJason Evans assert(ptr != NULL); 2843f921d10fSJason Evans assert(size != 0); 2844f921d10fSJason Evans assert(SIZE_T_MAX - size >= extra); 2845d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2846d0e79aa3SJason Evans tsd = tsd_fetch(); 2847b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 2848f921d10fSJason Evans 2849b7eaed25SJason Evans alloc_ctx_t alloc_ctx; 2850b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); 2851b7eaed25SJason Evans rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, 2852b7eaed25SJason Evans (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); 2853b7eaed25SJason Evans assert(alloc_ctx.szind != NSIZES); 2854b7eaed25SJason Evans old_usize = sz_index2size(alloc_ctx.szind); 2855b7eaed25SJason Evans assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); 2856df0d881dSJason Evans /* 2857df0d881dSJason Evans * The API explicitly absolves itself of protecting against (size + 2858df0d881dSJason Evans * extra) numerical overflow, but we may need to clamp extra to avoid 2859b7eaed25SJason Evans * exceeding LARGE_MAXCLASS. 2860df0d881dSJason Evans * 2861df0d881dSJason Evans * Ordinarily, size limit checking is handled deeper down, but here we 2862df0d881dSJason Evans * have to check as part of (size + extra) clamping, since we need the 2863df0d881dSJason Evans * clamped value in the above helper functions. 2864df0d881dSJason Evans */ 2865b7eaed25SJason Evans if (unlikely(size > LARGE_MAXCLASS)) { 2866536b3538SJason Evans usize = old_usize; 2867536b3538SJason Evans goto label_not_resized; 2868536b3538SJason Evans } 2869b7eaed25SJason Evans if (unlikely(LARGE_MAXCLASS - size < extra)) { 2870b7eaed25SJason Evans extra = LARGE_MAXCLASS - size; 2871b7eaed25SJason Evans } 2872f921d10fSJason Evans 2873f921d10fSJason Evans if (config_prof && opt_prof) { 2874d0e79aa3SJason Evans usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, 2875b7eaed25SJason Evans alignment, zero, &alloc_ctx); 2876f921d10fSJason Evans } else { 28771f0a49e8SJason Evans usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, 28781f0a49e8SJason Evans extra, alignment, zero); 2879f921d10fSJason Evans } 2880b7eaed25SJason Evans if (unlikely(usize == old_usize)) { 2881f921d10fSJason Evans goto label_not_resized; 2882b7eaed25SJason Evans } 2883f921d10fSJason Evans 2884f921d10fSJason Evans if (config_stats) { 2885d0e79aa3SJason Evans *tsd_thread_allocatedp_get(tsd) += usize; 2886d0e79aa3SJason Evans *tsd_thread_deallocatedp_get(tsd) += old_usize; 2887f921d10fSJason Evans } 2888f921d10fSJason Evans label_not_resized: 2889f921d10fSJason Evans UTRACE(ptr, size, ptr); 2890b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 2891*0ef50b4eSJason Evans 2892*0ef50b4eSJason Evans LOG("core.xallocx.exit", "result: %zu", usize); 2893b7eaed25SJason Evans return usize; 2894f921d10fSJason Evans } 2895f921d10fSJason Evans 2896d0e79aa3SJason Evans JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2897d0e79aa3SJason Evans JEMALLOC_ATTR(pure) 2898*0ef50b4eSJason Evans je_sallocx(const void *ptr, UNUSED int flags) { 2899f921d10fSJason Evans size_t usize; 29001f0a49e8SJason Evans tsdn_t *tsdn; 2901a4bd5210SJason Evans 2902*0ef50b4eSJason Evans LOG("core.sallocx.entry", "ptr: %p, flags: %d", ptr, flags); 2903*0ef50b4eSJason Evans 2904d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2905b7eaed25SJason Evans assert(ptr != NULL); 2906a4bd5210SJason Evans 29071f0a49e8SJason Evans tsdn = tsdn_fetch(); 2908b7eaed25SJason Evans check_entry_exit_locking(tsdn); 2909a4bd5210SJason Evans 2910b7eaed25SJason Evans if (config_debug || force_ivsalloc) { 2911b7eaed25SJason Evans usize = ivsalloc(tsdn, ptr); 2912b7eaed25SJason Evans assert(force_ivsalloc || usize != 0); 2913b7eaed25SJason Evans } else { 2914b7eaed25SJason Evans usize = isalloc(tsdn, ptr); 2915b7eaed25SJason Evans } 29161f0a49e8SJason Evans 2917b7eaed25SJason Evans check_entry_exit_locking(tsdn); 2918*0ef50b4eSJason Evans 2919*0ef50b4eSJason Evans LOG("core.sallocx.exit", "result: %zu", usize); 2920b7eaed25SJason Evans return usize; 2921a4bd5210SJason Evans } 2922a4bd5210SJason Evans 2923d0e79aa3SJason Evans JEMALLOC_EXPORT void JEMALLOC_NOTHROW 2924b7eaed25SJason Evans je_dallocx(void *ptr, int flags) { 2925*0ef50b4eSJason Evans LOG("core.dallocx.entry", "ptr: %p, flags: %d", ptr, flags); 2926*0ef50b4eSJason Evans 2927f921d10fSJason Evans assert(ptr != NULL); 2928d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2929f921d10fSJason Evans 2930b7eaed25SJason Evans tsd_t *tsd = tsd_fetch(); 2931b7eaed25SJason Evans bool fast = tsd_fast(tsd); 2932b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 2933b7eaed25SJason Evans 2934b7eaed25SJason Evans tcache_t *tcache; 2935d0e79aa3SJason Evans if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { 2936b7eaed25SJason Evans /* Not allowed to be reentrant and specify a custom tcache. */ 2937b7eaed25SJason Evans assert(tsd_reentrancy_level_get(tsd) == 0); 2938b7eaed25SJason Evans if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { 2939d0e79aa3SJason Evans tcache = NULL; 2940b7eaed25SJason Evans } else { 2941d0e79aa3SJason Evans tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2942b7eaed25SJason Evans } 2943b7eaed25SJason Evans } else { 2944b7eaed25SJason Evans if (likely(fast)) { 2945b7eaed25SJason Evans tcache = tsd_tcachep_get(tsd); 2946b7eaed25SJason Evans assert(tcache == tcache_get(tsd)); 2947b7eaed25SJason Evans } else { 2948b7eaed25SJason Evans if (likely(tsd_reentrancy_level_get(tsd) == 0)) { 2949b7eaed25SJason Evans tcache = tcache_get(tsd); 2950b7eaed25SJason Evans } else { 2951b7eaed25SJason Evans tcache = NULL; 2952b7eaed25SJason Evans } 2953b7eaed25SJason Evans } 2954b7eaed25SJason Evans } 2955f921d10fSJason Evans 2956f921d10fSJason Evans UTRACE(ptr, 0, 0); 2957b7eaed25SJason Evans if (likely(fast)) { 2958b7eaed25SJason Evans tsd_assert_fast(tsd); 29591f0a49e8SJason Evans ifree(tsd, ptr, tcache, false); 2960b7eaed25SJason Evans } else { 29611f0a49e8SJason Evans ifree(tsd, ptr, tcache, true); 2962b7eaed25SJason Evans } 2963b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 2964*0ef50b4eSJason Evans 2965*0ef50b4eSJason Evans LOG("core.dallocx.exit", ""); 2966f921d10fSJason Evans } 2967f921d10fSJason Evans 2968b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE size_t 2969b7eaed25SJason Evans inallocx(tsdn_t *tsdn, size_t size, int flags) { 2970b7eaed25SJason Evans check_entry_exit_locking(tsdn); 2971b7eaed25SJason Evans 2972f921d10fSJason Evans size_t usize; 2973b7eaed25SJason Evans if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) { 2974b7eaed25SJason Evans usize = sz_s2u(size); 2975b7eaed25SJason Evans } else { 2976b7eaed25SJason Evans usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); 2977b7eaed25SJason Evans } 2978b7eaed25SJason Evans check_entry_exit_locking(tsdn); 2979b7eaed25SJason Evans return usize; 2980a4bd5210SJason Evans } 2981a4bd5210SJason Evans 2982d0e79aa3SJason Evans JEMALLOC_EXPORT void JEMALLOC_NOTHROW 2983b7eaed25SJason Evans je_sdallocx(void *ptr, size_t size, int flags) { 2984d0e79aa3SJason Evans assert(ptr != NULL); 2985d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 29861f0a49e8SJason Evans 2987*0ef50b4eSJason Evans LOG("core.sdallocx.entry", "ptr: %p, size: %zu, flags: %d", ptr, 2988*0ef50b4eSJason Evans size, flags); 2989*0ef50b4eSJason Evans 2990b7eaed25SJason Evans tsd_t *tsd = tsd_fetch(); 2991b7eaed25SJason Evans bool fast = tsd_fast(tsd); 2992b7eaed25SJason Evans size_t usize = inallocx(tsd_tsdn(tsd), size, flags); 2993b7eaed25SJason Evans assert(usize == isalloc(tsd_tsdn(tsd), ptr)); 2994b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 2995b7eaed25SJason Evans 2996b7eaed25SJason Evans tcache_t *tcache; 2997d0e79aa3SJason Evans if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { 2998b7eaed25SJason Evans /* Not allowed to be reentrant and specify a custom tcache. */ 2999b7eaed25SJason Evans assert(tsd_reentrancy_level_get(tsd) == 0); 3000b7eaed25SJason Evans if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { 3001d0e79aa3SJason Evans tcache = NULL; 3002b7eaed25SJason Evans } else { 3003d0e79aa3SJason Evans tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 3004b7eaed25SJason Evans } 3005b7eaed25SJason Evans } else { 3006b7eaed25SJason Evans if (likely(fast)) { 3007b7eaed25SJason Evans tcache = tsd_tcachep_get(tsd); 3008b7eaed25SJason Evans assert(tcache == tcache_get(tsd)); 3009b7eaed25SJason Evans } else { 3010b7eaed25SJason Evans if (likely(tsd_reentrancy_level_get(tsd) == 0)) { 3011b7eaed25SJason Evans tcache = tcache_get(tsd); 3012b7eaed25SJason Evans } else { 3013b7eaed25SJason Evans tcache = NULL; 3014b7eaed25SJason Evans } 3015b7eaed25SJason Evans } 3016b7eaed25SJason Evans } 3017d0e79aa3SJason Evans 3018d0e79aa3SJason Evans UTRACE(ptr, 0, 0); 3019b7eaed25SJason Evans if (likely(fast)) { 3020b7eaed25SJason Evans tsd_assert_fast(tsd); 30211f0a49e8SJason Evans isfree(tsd, ptr, usize, tcache, false); 3022b7eaed25SJason Evans } else { 30231f0a49e8SJason Evans isfree(tsd, ptr, usize, tcache, true); 3024b7eaed25SJason Evans } 3025b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 3026*0ef50b4eSJason Evans 3027*0ef50b4eSJason Evans LOG("core.sdallocx.exit", ""); 3028d0e79aa3SJason Evans } 3029d0e79aa3SJason Evans 3030d0e79aa3SJason Evans JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 3031d0e79aa3SJason Evans JEMALLOC_ATTR(pure) 3032b7eaed25SJason Evans je_nallocx(size_t size, int flags) { 3033df0d881dSJason Evans size_t usize; 30341f0a49e8SJason Evans tsdn_t *tsdn; 3035d0e79aa3SJason Evans 3036d0e79aa3SJason Evans assert(size != 0); 3037d0e79aa3SJason Evans 3038b7eaed25SJason Evans if (unlikely(malloc_init())) { 3039*0ef50b4eSJason Evans LOG("core.nallocx.exit", "result: %zu", ZU(0)); 3040b7eaed25SJason Evans return 0; 3041b7eaed25SJason Evans } 3042d0e79aa3SJason Evans 30431f0a49e8SJason Evans tsdn = tsdn_fetch(); 3044b7eaed25SJason Evans check_entry_exit_locking(tsdn); 30451f0a49e8SJason Evans 30461f0a49e8SJason Evans usize = inallocx(tsdn, size, flags); 3047b7eaed25SJason Evans if (unlikely(usize > LARGE_MAXCLASS)) { 3048*0ef50b4eSJason Evans LOG("core.nallocx.exit", "result: %zu", ZU(0)); 3049b7eaed25SJason Evans return 0; 3050b7eaed25SJason Evans } 3051df0d881dSJason Evans 3052b7eaed25SJason Evans check_entry_exit_locking(tsdn); 3053*0ef50b4eSJason Evans LOG("core.nallocx.exit", "result: %zu", usize); 3054b7eaed25SJason Evans return usize; 3055d0e79aa3SJason Evans } 3056d0e79aa3SJason Evans 3057d0e79aa3SJason Evans JEMALLOC_EXPORT int JEMALLOC_NOTHROW 3058a4bd5210SJason Evans je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, 3059b7eaed25SJason Evans size_t newlen) { 30601f0a49e8SJason Evans int ret; 30611f0a49e8SJason Evans tsd_t *tsd; 3062a4bd5210SJason Evans 3063*0ef50b4eSJason Evans LOG("core.mallctl.entry", "name: %s", name); 3064*0ef50b4eSJason Evans 3065b7eaed25SJason Evans if (unlikely(malloc_init())) { 3066*0ef50b4eSJason Evans LOG("core.mallctl.exit", "result: %d", EAGAIN); 3067b7eaed25SJason Evans return EAGAIN; 3068b7eaed25SJason Evans } 3069a4bd5210SJason Evans 30701f0a49e8SJason Evans tsd = tsd_fetch(); 3071b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 30721f0a49e8SJason Evans ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen); 3073b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 3074*0ef50b4eSJason Evans 3075*0ef50b4eSJason Evans LOG("core.mallctl.exit", "result: %d", ret); 3076b7eaed25SJason Evans return ret; 3077a4bd5210SJason Evans } 3078a4bd5210SJason Evans 3079d0e79aa3SJason Evans JEMALLOC_EXPORT int JEMALLOC_NOTHROW 3080b7eaed25SJason Evans je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) { 30811f0a49e8SJason Evans int ret; 3082a4bd5210SJason Evans 3083*0ef50b4eSJason Evans LOG("core.mallctlnametomib.entry", "name: %s", name); 3084*0ef50b4eSJason Evans 3085b7eaed25SJason Evans if (unlikely(malloc_init())) { 3086*0ef50b4eSJason Evans LOG("core.mallctlnametomib.exit", "result: %d", EAGAIN); 3087b7eaed25SJason Evans return EAGAIN; 3088b7eaed25SJason Evans } 3089a4bd5210SJason Evans 30908b2f5aafSJason Evans tsd_t *tsd = tsd_fetch(); 30918b2f5aafSJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 30928b2f5aafSJason Evans ret = ctl_nametomib(tsd, name, mibp, miblenp); 30938b2f5aafSJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 3094*0ef50b4eSJason Evans 3095*0ef50b4eSJason Evans LOG("core.mallctlnametomib.exit", "result: %d", ret); 3096b7eaed25SJason Evans return ret; 3097a4bd5210SJason Evans } 3098a4bd5210SJason Evans 3099d0e79aa3SJason Evans JEMALLOC_EXPORT int JEMALLOC_NOTHROW 3100a4bd5210SJason Evans je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, 3101b7eaed25SJason Evans void *newp, size_t newlen) { 31021f0a49e8SJason Evans int ret; 31031f0a49e8SJason Evans tsd_t *tsd; 3104a4bd5210SJason Evans 3105*0ef50b4eSJason Evans LOG("core.mallctlbymib.entry", ""); 3106*0ef50b4eSJason Evans 3107b7eaed25SJason Evans if (unlikely(malloc_init())) { 3108*0ef50b4eSJason Evans LOG("core.mallctlbymib.exit", "result: %d", EAGAIN); 3109b7eaed25SJason Evans return EAGAIN; 3110b7eaed25SJason Evans } 3111a4bd5210SJason Evans 31121f0a49e8SJason Evans tsd = tsd_fetch(); 3113b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 31141f0a49e8SJason Evans ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen); 3115b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 3116*0ef50b4eSJason Evans LOG("core.mallctlbymib.exit", "result: %d", ret); 3117b7eaed25SJason Evans return ret; 3118a4bd5210SJason Evans } 3119a4bd5210SJason Evans 3120d0e79aa3SJason Evans JEMALLOC_EXPORT void JEMALLOC_NOTHROW 3121f921d10fSJason Evans je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, 3122b7eaed25SJason Evans const char *opts) { 31231f0a49e8SJason Evans tsdn_t *tsdn; 3124f921d10fSJason Evans 3125*0ef50b4eSJason Evans LOG("core.malloc_stats_print.entry", ""); 3126*0ef50b4eSJason Evans 31271f0a49e8SJason Evans tsdn = tsdn_fetch(); 3128b7eaed25SJason Evans check_entry_exit_locking(tsdn); 3129f921d10fSJason Evans stats_print(write_cb, cbopaque, opts); 3130b7eaed25SJason Evans check_entry_exit_locking(tsdn); 3131*0ef50b4eSJason Evans LOG("core.malloc_stats_print.exit", ""); 3132f921d10fSJason Evans } 3133f921d10fSJason Evans 3134d0e79aa3SJason Evans JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 3135b7eaed25SJason Evans je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) { 3136f921d10fSJason Evans size_t ret; 31371f0a49e8SJason Evans tsdn_t *tsdn; 3138f921d10fSJason Evans 3139*0ef50b4eSJason Evans LOG("core.malloc_usable_size.entry", "ptr: %p", ptr); 3140*0ef50b4eSJason Evans 3141d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 3142f921d10fSJason Evans 31431f0a49e8SJason Evans tsdn = tsdn_fetch(); 3144b7eaed25SJason Evans check_entry_exit_locking(tsdn); 3145f921d10fSJason Evans 3146b7eaed25SJason Evans if (unlikely(ptr == NULL)) { 3147b7eaed25SJason Evans ret = 0; 3148b7eaed25SJason Evans } else { 3149b7eaed25SJason Evans if (config_debug || force_ivsalloc) { 3150b7eaed25SJason Evans ret = ivsalloc(tsdn, ptr); 3151b7eaed25SJason Evans assert(force_ivsalloc || ret != 0); 3152b7eaed25SJason Evans } else { 3153b7eaed25SJason Evans ret = isalloc(tsdn, ptr); 3154b7eaed25SJason Evans } 3155b7eaed25SJason Evans } 31561f0a49e8SJason Evans 3157b7eaed25SJason Evans check_entry_exit_locking(tsdn); 3158*0ef50b4eSJason Evans LOG("core.malloc_usable_size.exit", "result: %zu", ret); 3159b7eaed25SJason Evans return ret; 3160f921d10fSJason Evans } 3161f921d10fSJason Evans 3162a4bd5210SJason Evans /* 3163a4bd5210SJason Evans * End non-standard functions. 3164a4bd5210SJason Evans */ 3165a4bd5210SJason Evans /******************************************************************************/ 3166a4bd5210SJason Evans /* 3167d0e79aa3SJason Evans * Begin compatibility functions. 3168a4bd5210SJason Evans */ 3169d0e79aa3SJason Evans 3170d0e79aa3SJason Evans #define ALLOCM_LG_ALIGN(la) (la) 3171d0e79aa3SJason Evans #define ALLOCM_ALIGN(a) (ffsl(a)-1) 3172d0e79aa3SJason Evans #define ALLOCM_ZERO ((int)0x40) 3173d0e79aa3SJason Evans #define ALLOCM_NO_MOVE ((int)0x80) 3174d0e79aa3SJason Evans 3175d0e79aa3SJason Evans #define ALLOCM_SUCCESS 0 3176d0e79aa3SJason Evans #define ALLOCM_ERR_OOM 1 3177d0e79aa3SJason Evans #define ALLOCM_ERR_NOT_MOVED 2 3178a4bd5210SJason Evans 3179a4bd5210SJason Evans int 3180b7eaed25SJason Evans je_allocm(void **ptr, size_t *rsize, size_t size, int flags) { 3181a4bd5210SJason Evans assert(ptr != NULL); 3182a4bd5210SJason Evans 3183b7eaed25SJason Evans void *p = je_mallocx(size, flags); 3184b7eaed25SJason Evans if (p == NULL) { 3185a4bd5210SJason Evans return (ALLOCM_ERR_OOM); 3186b7eaed25SJason Evans } 3187b7eaed25SJason Evans if (rsize != NULL) { 3188b7eaed25SJason Evans *rsize = isalloc(tsdn_fetch(), p); 3189b7eaed25SJason Evans } 3190f921d10fSJason Evans *ptr = p; 3191b7eaed25SJason Evans return ALLOCM_SUCCESS; 3192a4bd5210SJason Evans } 3193a4bd5210SJason Evans 3194a4bd5210SJason Evans int 3195b7eaed25SJason Evans je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) { 3196a4bd5210SJason Evans assert(ptr != NULL); 3197a4bd5210SJason Evans assert(*ptr != NULL); 3198a4bd5210SJason Evans assert(size != 0); 3199a4bd5210SJason Evans assert(SIZE_T_MAX - size >= extra); 3200a4bd5210SJason Evans 3201b7eaed25SJason Evans int ret; 3202b7eaed25SJason Evans bool no_move = flags & ALLOCM_NO_MOVE; 3203b7eaed25SJason Evans 3204f921d10fSJason Evans if (no_move) { 3205f921d10fSJason Evans size_t usize = je_xallocx(*ptr, size, extra, flags); 3206f921d10fSJason Evans ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED; 3207b7eaed25SJason Evans if (rsize != NULL) { 3208a4bd5210SJason Evans *rsize = usize; 3209b7eaed25SJason Evans } 3210a4bd5210SJason Evans } else { 3211f921d10fSJason Evans void *p = je_rallocx(*ptr, size+extra, flags); 3212f921d10fSJason Evans if (p != NULL) { 3213f921d10fSJason Evans *ptr = p; 3214f921d10fSJason Evans ret = ALLOCM_SUCCESS; 3215b7eaed25SJason Evans } else { 3216f921d10fSJason Evans ret = ALLOCM_ERR_OOM; 3217a4bd5210SJason Evans } 3218b7eaed25SJason Evans if (rsize != NULL) { 3219b7eaed25SJason Evans *rsize = isalloc(tsdn_fetch(), *ptr); 3220b7eaed25SJason Evans } 3221b7eaed25SJason Evans } 3222b7eaed25SJason Evans return ret; 3223a4bd5210SJason Evans } 3224a4bd5210SJason Evans 3225a4bd5210SJason Evans int 3226b7eaed25SJason Evans je_sallocm(const void *ptr, size_t *rsize, int flags) { 3227a4bd5210SJason Evans assert(rsize != NULL); 3228f921d10fSJason Evans *rsize = je_sallocx(ptr, flags); 3229b7eaed25SJason Evans return ALLOCM_SUCCESS; 3230a4bd5210SJason Evans } 3231a4bd5210SJason Evans 3232a4bd5210SJason Evans int 3233b7eaed25SJason Evans je_dallocm(void *ptr, int flags) { 3234f921d10fSJason Evans je_dallocx(ptr, flags); 3235b7eaed25SJason Evans return ALLOCM_SUCCESS; 3236a4bd5210SJason Evans } 3237a4bd5210SJason Evans 3238a4bd5210SJason Evans int 3239b7eaed25SJason Evans je_nallocm(size_t *rsize, size_t size, int flags) { 3240b7eaed25SJason Evans size_t usize = je_nallocx(size, flags); 3241b7eaed25SJason Evans if (usize == 0) { 3242b7eaed25SJason Evans return ALLOCM_ERR_OOM; 3243b7eaed25SJason Evans } 3244b7eaed25SJason Evans if (rsize != NULL) { 3245a4bd5210SJason Evans *rsize = usize; 3246b7eaed25SJason Evans } 3247b7eaed25SJason Evans return ALLOCM_SUCCESS; 3248a4bd5210SJason Evans } 3249a4bd5210SJason Evans 3250d0e79aa3SJason Evans #undef ALLOCM_LG_ALIGN 3251d0e79aa3SJason Evans #undef ALLOCM_ALIGN 3252d0e79aa3SJason Evans #undef ALLOCM_ZERO 3253d0e79aa3SJason Evans #undef ALLOCM_NO_MOVE 3254d0e79aa3SJason Evans 3255d0e79aa3SJason Evans #undef ALLOCM_SUCCESS 3256d0e79aa3SJason Evans #undef ALLOCM_ERR_OOM 3257d0e79aa3SJason Evans #undef ALLOCM_ERR_NOT_MOVED 3258d0e79aa3SJason Evans 3259a4bd5210SJason Evans /* 3260d0e79aa3SJason Evans * End compatibility functions. 3261a4bd5210SJason Evans */ 3262a4bd5210SJason Evans /******************************************************************************/ 3263a4bd5210SJason Evans /* 3264a4bd5210SJason Evans * The following functions are used by threading libraries for protection of 3265a4bd5210SJason Evans * malloc during fork(). 3266a4bd5210SJason Evans */ 3267a4bd5210SJason Evans 326882872ac0SJason Evans /* 326982872ac0SJason Evans * If an application creates a thread before doing any allocation in the main 327082872ac0SJason Evans * thread, then calls fork(2) in the main thread followed by memory allocation 327182872ac0SJason Evans * in the child process, a race can occur that results in deadlock within the 327282872ac0SJason Evans * child: the main thread may have forked while the created thread had 327382872ac0SJason Evans * partially initialized the allocator. Ordinarily jemalloc prevents 327482872ac0SJason Evans * fork/malloc races via the following functions it registers during 327582872ac0SJason Evans * initialization using pthread_atfork(), but of course that does no good if 327682872ac0SJason Evans * the allocator isn't fully initialized at fork time. The following library 3277d0e79aa3SJason Evans * constructor is a partial solution to this problem. It may still be possible 3278d0e79aa3SJason Evans * to trigger the deadlock described above, but doing so would involve forking 3279d0e79aa3SJason Evans * via a library constructor that runs before jemalloc's runs. 328082872ac0SJason Evans */ 32811f0a49e8SJason Evans #ifndef JEMALLOC_JET 328282872ac0SJason Evans JEMALLOC_ATTR(constructor) 328382872ac0SJason Evans static void 3284b7eaed25SJason Evans jemalloc_constructor(void) { 328582872ac0SJason Evans malloc_init(); 328682872ac0SJason Evans } 32871f0a49e8SJason Evans #endif 328882872ac0SJason Evans 3289a4bd5210SJason Evans #ifndef JEMALLOC_MUTEX_INIT_CB 3290a4bd5210SJason Evans void 3291a4bd5210SJason Evans jemalloc_prefork(void) 3292a4bd5210SJason Evans #else 3293e722f8f8SJason Evans JEMALLOC_EXPORT void 3294a4bd5210SJason Evans _malloc_prefork(void) 3295a4bd5210SJason Evans #endif 3296a4bd5210SJason Evans { 32971f0a49e8SJason Evans tsd_t *tsd; 32981f0a49e8SJason Evans unsigned i, j, narenas; 32991f0a49e8SJason Evans arena_t *arena; 3300a4bd5210SJason Evans 330135dad073SJason Evans #ifdef JEMALLOC_MUTEX_INIT_CB 3302b7eaed25SJason Evans if (!malloc_initialized()) { 330335dad073SJason Evans return; 3304b7eaed25SJason Evans } 330535dad073SJason Evans #endif 3306d0e79aa3SJason Evans assert(malloc_initialized()); 330735dad073SJason Evans 33081f0a49e8SJason Evans tsd = tsd_fetch(); 3309df0d881dSJason Evans 33101f0a49e8SJason Evans narenas = narenas_total_get(); 33111f0a49e8SJason Evans 3312b7eaed25SJason Evans witness_prefork(tsd_witness_tsdp_get(tsd)); 33131f0a49e8SJason Evans /* Acquire all mutexes in a safe order. */ 33141f0a49e8SJason Evans ctl_prefork(tsd_tsdn(tsd)); 33158244f2aaSJason Evans tcache_prefork(tsd_tsdn(tsd)); 33161f0a49e8SJason Evans malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock); 3317b7eaed25SJason Evans if (have_background_thread) { 3318b7eaed25SJason Evans background_thread_prefork0(tsd_tsdn(tsd)); 3319b7eaed25SJason Evans } 33201f0a49e8SJason Evans prof_prefork0(tsd_tsdn(tsd)); 3321b7eaed25SJason Evans if (have_background_thread) { 3322b7eaed25SJason Evans background_thread_prefork1(tsd_tsdn(tsd)); 3323b7eaed25SJason Evans } 3324b7eaed25SJason Evans /* Break arena prefork into stages to preserve lock order. */ 33258b2f5aafSJason Evans for (i = 0; i < 8; i++) { 33261f0a49e8SJason Evans for (j = 0; j < narenas; j++) { 33271f0a49e8SJason Evans if ((arena = arena_get(tsd_tsdn(tsd), j, false)) != 33281f0a49e8SJason Evans NULL) { 33291f0a49e8SJason Evans switch (i) { 33301f0a49e8SJason Evans case 0: 33311f0a49e8SJason Evans arena_prefork0(tsd_tsdn(tsd), arena); 33321f0a49e8SJason Evans break; 33331f0a49e8SJason Evans case 1: 33341f0a49e8SJason Evans arena_prefork1(tsd_tsdn(tsd), arena); 33351f0a49e8SJason Evans break; 33361f0a49e8SJason Evans case 2: 33371f0a49e8SJason Evans arena_prefork2(tsd_tsdn(tsd), arena); 33381f0a49e8SJason Evans break; 3339b7eaed25SJason Evans case 3: 3340b7eaed25SJason Evans arena_prefork3(tsd_tsdn(tsd), arena); 3341b7eaed25SJason Evans break; 3342b7eaed25SJason Evans case 4: 3343b7eaed25SJason Evans arena_prefork4(tsd_tsdn(tsd), arena); 3344b7eaed25SJason Evans break; 3345b7eaed25SJason Evans case 5: 3346b7eaed25SJason Evans arena_prefork5(tsd_tsdn(tsd), arena); 3347b7eaed25SJason Evans break; 3348b7eaed25SJason Evans case 6: 3349b7eaed25SJason Evans arena_prefork6(tsd_tsdn(tsd), arena); 3350b7eaed25SJason Evans break; 33518b2f5aafSJason Evans case 7: 33528b2f5aafSJason Evans arena_prefork7(tsd_tsdn(tsd), arena); 33538b2f5aafSJason Evans break; 33541f0a49e8SJason Evans default: not_reached(); 3355a4bd5210SJason Evans } 33561f0a49e8SJason Evans } 33571f0a49e8SJason Evans } 33581f0a49e8SJason Evans } 33591f0a49e8SJason Evans prof_prefork1(tsd_tsdn(tsd)); 3360a4bd5210SJason Evans } 3361a4bd5210SJason Evans 3362a4bd5210SJason Evans #ifndef JEMALLOC_MUTEX_INIT_CB 3363a4bd5210SJason Evans void 3364a4bd5210SJason Evans jemalloc_postfork_parent(void) 3365a4bd5210SJason Evans #else 3366e722f8f8SJason Evans JEMALLOC_EXPORT void 3367a4bd5210SJason Evans _malloc_postfork(void) 3368a4bd5210SJason Evans #endif 3369a4bd5210SJason Evans { 33701f0a49e8SJason Evans tsd_t *tsd; 3371df0d881dSJason Evans unsigned i, narenas; 3372a4bd5210SJason Evans 337335dad073SJason Evans #ifdef JEMALLOC_MUTEX_INIT_CB 3374b7eaed25SJason Evans if (!malloc_initialized()) { 337535dad073SJason Evans return; 3376b7eaed25SJason Evans } 337735dad073SJason Evans #endif 3378d0e79aa3SJason Evans assert(malloc_initialized()); 337935dad073SJason Evans 33801f0a49e8SJason Evans tsd = tsd_fetch(); 33811f0a49e8SJason Evans 3382b7eaed25SJason Evans witness_postfork_parent(tsd_witness_tsdp_get(tsd)); 3383a4bd5210SJason Evans /* Release all mutexes, now that fork() has completed. */ 3384df0d881dSJason Evans for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 3385df0d881dSJason Evans arena_t *arena; 3386df0d881dSJason Evans 3387b7eaed25SJason Evans if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) { 33881f0a49e8SJason Evans arena_postfork_parent(tsd_tsdn(tsd), arena); 3389a4bd5210SJason Evans } 3390b7eaed25SJason Evans } 33911f0a49e8SJason Evans prof_postfork_parent(tsd_tsdn(tsd)); 3392b7eaed25SJason Evans if (have_background_thread) { 3393b7eaed25SJason Evans background_thread_postfork_parent(tsd_tsdn(tsd)); 3394b7eaed25SJason Evans } 33951f0a49e8SJason Evans malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock); 33968244f2aaSJason Evans tcache_postfork_parent(tsd_tsdn(tsd)); 33971f0a49e8SJason Evans ctl_postfork_parent(tsd_tsdn(tsd)); 3398a4bd5210SJason Evans } 3399a4bd5210SJason Evans 3400a4bd5210SJason Evans void 3401b7eaed25SJason Evans jemalloc_postfork_child(void) { 34021f0a49e8SJason Evans tsd_t *tsd; 3403df0d881dSJason Evans unsigned i, narenas; 3404a4bd5210SJason Evans 3405d0e79aa3SJason Evans assert(malloc_initialized()); 340635dad073SJason Evans 34071f0a49e8SJason Evans tsd = tsd_fetch(); 34081f0a49e8SJason Evans 3409b7eaed25SJason Evans witness_postfork_child(tsd_witness_tsdp_get(tsd)); 3410a4bd5210SJason Evans /* Release all mutexes, now that fork() has completed. */ 3411df0d881dSJason Evans for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 3412df0d881dSJason Evans arena_t *arena; 3413df0d881dSJason Evans 3414b7eaed25SJason Evans if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) { 34151f0a49e8SJason Evans arena_postfork_child(tsd_tsdn(tsd), arena); 3416a4bd5210SJason Evans } 3417b7eaed25SJason Evans } 34181f0a49e8SJason Evans prof_postfork_child(tsd_tsdn(tsd)); 3419b7eaed25SJason Evans if (have_background_thread) { 3420b7eaed25SJason Evans background_thread_postfork_child(tsd_tsdn(tsd)); 3421b7eaed25SJason Evans } 34221f0a49e8SJason Evans malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock); 34238244f2aaSJason Evans tcache_postfork_child(tsd_tsdn(tsd)); 34241f0a49e8SJason Evans ctl_postfork_child(tsd_tsdn(tsd)); 3425a4bd5210SJason Evans } 3426a4bd5210SJason Evans 34278495e8b1SKonstantin Belousov void 34288495e8b1SKonstantin Belousov _malloc_first_thread(void) 34298495e8b1SKonstantin Belousov { 34308495e8b1SKonstantin Belousov 34318495e8b1SKonstantin Belousov (void)malloc_mutex_first_thread(); 34328495e8b1SKonstantin Belousov } 34338495e8b1SKonstantin Belousov 3434a4bd5210SJason Evans /******************************************************************************/ 3435