1a4bd5210SJason Evans #define JEMALLOC_C_ 2*b7eaed25SJason Evans #include "jemalloc/internal/jemalloc_preamble.h" 3*b7eaed25SJason Evans #include "jemalloc/internal/jemalloc_internal_includes.h" 4*b7eaed25SJason Evans 5*b7eaed25SJason Evans #include "jemalloc/internal/assert.h" 6*b7eaed25SJason Evans #include "jemalloc/internal/atomic.h" 7*b7eaed25SJason Evans #include "jemalloc/internal/ctl.h" 8*b7eaed25SJason Evans #include "jemalloc/internal/extent_dss.h" 9*b7eaed25SJason Evans #include "jemalloc/internal/extent_mmap.h" 10*b7eaed25SJason Evans #include "jemalloc/internal/jemalloc_internal_types.h" 11*b7eaed25SJason Evans #include "jemalloc/internal/malloc_io.h" 12*b7eaed25SJason Evans #include "jemalloc/internal/mutex.h" 13*b7eaed25SJason Evans #include "jemalloc/internal/rtree.h" 14*b7eaed25SJason Evans #include "jemalloc/internal/size_classes.h" 15*b7eaed25SJason Evans #include "jemalloc/internal/spin.h" 16*b7eaed25SJason Evans #include "jemalloc/internal/sz.h" 17*b7eaed25SJason Evans #include "jemalloc/internal/ticker.h" 18*b7eaed25SJason Evans #include "jemalloc/internal/util.h" 19a4bd5210SJason Evans 20a4bd5210SJason Evans /******************************************************************************/ 21a4bd5210SJason Evans /* Data. */ 22a4bd5210SJason Evans 234fdb8d2aSDimitry Andric /* Work around <http://llvm.org/bugs/show_bug.cgi?id=12623>: */ 244fdb8d2aSDimitry Andric const char *__malloc_options_1_0 = NULL; 25a4bd5210SJason Evans __sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0); 26a4bd5210SJason Evans 27a4bd5210SJason Evans /* Runtime configuration options. */ 28bde95144SJason Evans const char *je_malloc_conf 29bde95144SJason Evans #ifndef _WIN32 30bde95144SJason Evans JEMALLOC_ATTR(weak) 31bde95144SJason Evans #endif 32bde95144SJason Evans ; 3388ad2f8dSJason Evans bool opt_abort = 34a4bd5210SJason Evans #ifdef JEMALLOC_DEBUG 3588ad2f8dSJason Evans true 36a4bd5210SJason Evans #else 3788ad2f8dSJason Evans false 38a4bd5210SJason Evans #endif 3988ad2f8dSJason Evans ; 40*b7eaed25SJason Evans bool opt_abort_conf = 41*b7eaed25SJason Evans #ifdef JEMALLOC_DEBUG 42*b7eaed25SJason Evans true 43*b7eaed25SJason Evans #else 44*b7eaed25SJason Evans false 45*b7eaed25SJason Evans #endif 46*b7eaed25SJason Evans ; 47d0e79aa3SJason Evans const char *opt_junk = 48d0e79aa3SJason Evans #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 49d0e79aa3SJason Evans "true" 50d0e79aa3SJason Evans #else 51d0e79aa3SJason Evans "false" 52d0e79aa3SJason Evans #endif 53d0e79aa3SJason Evans ; 54d0e79aa3SJason Evans bool opt_junk_alloc = 5588ad2f8dSJason Evans #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 5688ad2f8dSJason Evans true 57a4bd5210SJason Evans #else 5888ad2f8dSJason Evans false 59a4bd5210SJason Evans #endif 6088ad2f8dSJason Evans ; 61d0e79aa3SJason Evans bool opt_junk_free = 62d0e79aa3SJason Evans #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 63d0e79aa3SJason Evans true 64d0e79aa3SJason Evans #else 65d0e79aa3SJason Evans false 66d0e79aa3SJason Evans #endif 67d0e79aa3SJason Evans ; 68d0e79aa3SJason Evans 69a4bd5210SJason Evans bool opt_utrace = false; 70a4bd5210SJason Evans bool opt_xmalloc = false; 71a4bd5210SJason Evans bool opt_zero = false; 72df0d881dSJason Evans unsigned opt_narenas = 0; 73a4bd5210SJason Evans 74a4bd5210SJason Evans unsigned ncpus; 75a4bd5210SJason Evans 76df0d881dSJason Evans /* Protects arenas initialization. */ 77*b7eaed25SJason Evans malloc_mutex_t arenas_lock; 78d0e79aa3SJason Evans /* 79d0e79aa3SJason Evans * Arenas that are used to service external requests. Not all elements of the 80d0e79aa3SJason Evans * arenas array are necessarily used; arenas are created lazily as needed. 81d0e79aa3SJason Evans * 82d0e79aa3SJason Evans * arenas[0..narenas_auto) are used for automatic multiplexing of threads and 83d0e79aa3SJason Evans * arenas. arenas[narenas_auto..narenas_total) are only used if the application 84d0e79aa3SJason Evans * takes some action to create them and allocate from them. 85*b7eaed25SJason Evans * 86*b7eaed25SJason Evans * Points to an arena_t. 87d0e79aa3SJason Evans */ 88*b7eaed25SJason Evans JEMALLOC_ALIGNED(CACHELINE) 89*b7eaed25SJason Evans atomic_p_t arenas[MALLOCX_ARENA_LIMIT]; 90*b7eaed25SJason Evans static atomic_u_t narenas_total; /* Use narenas_total_*(). */ 91d0e79aa3SJason Evans static arena_t *a0; /* arenas[0]; read-only after initialization. */ 921f0a49e8SJason Evans unsigned narenas_auto; /* Read-only after initialization. */ 93a4bd5210SJason Evans 94d0e79aa3SJason Evans typedef enum { 95d0e79aa3SJason Evans malloc_init_uninitialized = 3, 96d0e79aa3SJason Evans malloc_init_a0_initialized = 2, 97d0e79aa3SJason Evans malloc_init_recursible = 1, 98d0e79aa3SJason Evans malloc_init_initialized = 0 /* Common case --> jnz. */ 99d0e79aa3SJason Evans } malloc_init_t; 100d0e79aa3SJason Evans static malloc_init_t malloc_init_state = malloc_init_uninitialized; 101d0e79aa3SJason Evans 1021f0a49e8SJason Evans /* False should be the common case. Set to true to trigger initialization. */ 103*b7eaed25SJason Evans bool malloc_slow = true; 104df0d881dSJason Evans 1051f0a49e8SJason Evans /* When malloc_slow is true, set the corresponding bits for sanity check. */ 106df0d881dSJason Evans enum { 107df0d881dSJason Evans flag_opt_junk_alloc = (1U), 108df0d881dSJason Evans flag_opt_junk_free = (1U << 1), 109*b7eaed25SJason Evans flag_opt_zero = (1U << 2), 110*b7eaed25SJason Evans flag_opt_utrace = (1U << 3), 111*b7eaed25SJason Evans flag_opt_xmalloc = (1U << 4) 112df0d881dSJason Evans }; 113df0d881dSJason Evans static uint8_t malloc_slow_flags; 114df0d881dSJason Evans 115a4bd5210SJason Evans #ifdef JEMALLOC_THREADED_INIT 116a4bd5210SJason Evans /* Used to let the initializing thread recursively allocate. */ 117a4bd5210SJason Evans # define NO_INITIALIZER ((unsigned long)0) 118a4bd5210SJason Evans # define INITIALIZER pthread_self() 119a4bd5210SJason Evans # define IS_INITIALIZER (malloc_initializer == pthread_self()) 120a4bd5210SJason Evans static pthread_t malloc_initializer = NO_INITIALIZER; 121a4bd5210SJason Evans #else 122a4bd5210SJason Evans # define NO_INITIALIZER false 123a4bd5210SJason Evans # define INITIALIZER true 124a4bd5210SJason Evans # define IS_INITIALIZER malloc_initializer 125a4bd5210SJason Evans static bool malloc_initializer = NO_INITIALIZER; 126a4bd5210SJason Evans #endif 127a4bd5210SJason Evans 128a4bd5210SJason Evans /* Used to avoid initialization races. */ 129e722f8f8SJason Evans #ifdef _WIN32 130d0e79aa3SJason Evans #if _WIN32_WINNT >= 0x0600 131d0e79aa3SJason Evans static malloc_mutex_t init_lock = SRWLOCK_INIT; 132d0e79aa3SJason Evans #else 133e722f8f8SJason Evans static malloc_mutex_t init_lock; 134536b3538SJason Evans static bool init_lock_initialized = false; 135e722f8f8SJason Evans 136e722f8f8SJason Evans JEMALLOC_ATTR(constructor) 137e722f8f8SJason Evans static void WINAPI 138*b7eaed25SJason Evans _init_init_lock(void) { 139*b7eaed25SJason Evans /* 140*b7eaed25SJason Evans * If another constructor in the same binary is using mallctl to e.g. 141*b7eaed25SJason Evans * set up extent hooks, it may end up running before this one, and 142*b7eaed25SJason Evans * malloc_init_hard will crash trying to lock the uninitialized lock. So 143*b7eaed25SJason Evans * we force an initialization of the lock in malloc_init_hard as well. 144*b7eaed25SJason Evans * We don't try to care about atomicity of the accessed to the 145*b7eaed25SJason Evans * init_lock_initialized boolean, since it really only matters early in 146*b7eaed25SJason Evans * the process creation, before any separate thread normally starts 147*b7eaed25SJason Evans * doing anything. 148*b7eaed25SJason Evans */ 149*b7eaed25SJason Evans if (!init_lock_initialized) { 150*b7eaed25SJason Evans malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT, 151*b7eaed25SJason Evans malloc_mutex_rank_exclusive); 152*b7eaed25SJason Evans } 153536b3538SJason Evans init_lock_initialized = true; 154e722f8f8SJason Evans } 155e722f8f8SJason Evans 156e722f8f8SJason Evans #ifdef _MSC_VER 157e722f8f8SJason Evans # pragma section(".CRT$XCU", read) 158e722f8f8SJason Evans JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) 159e722f8f8SJason Evans static const void (WINAPI *init_init_lock)(void) = _init_init_lock; 160e722f8f8SJason Evans #endif 161d0e79aa3SJason Evans #endif 162e722f8f8SJason Evans #else 163a4bd5210SJason Evans static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; 164e722f8f8SJason Evans #endif 165a4bd5210SJason Evans 166a4bd5210SJason Evans typedef struct { 167a4bd5210SJason Evans void *p; /* Input pointer (as in realloc(p, s)). */ 168a4bd5210SJason Evans size_t s; /* Request size. */ 169a4bd5210SJason Evans void *r; /* Result pointer. */ 170a4bd5210SJason Evans } malloc_utrace_t; 171a4bd5210SJason Evans 172a4bd5210SJason Evans #ifdef JEMALLOC_UTRACE 173a4bd5210SJason Evans # define UTRACE(a, b, c) do { \ 174d0e79aa3SJason Evans if (unlikely(opt_utrace)) { \ 17588ad2f8dSJason Evans int utrace_serrno = errno; \ 176a4bd5210SJason Evans malloc_utrace_t ut; \ 177a4bd5210SJason Evans ut.p = (a); \ 178a4bd5210SJason Evans ut.s = (b); \ 179a4bd5210SJason Evans ut.r = (c); \ 180a4bd5210SJason Evans utrace(&ut, sizeof(ut)); \ 18188ad2f8dSJason Evans errno = utrace_serrno; \ 182a4bd5210SJason Evans } \ 183a4bd5210SJason Evans } while (0) 184a4bd5210SJason Evans #else 185a4bd5210SJason Evans # define UTRACE(a, b, c) 186a4bd5210SJason Evans #endif 187a4bd5210SJason Evans 188*b7eaed25SJason Evans /* Whether encountered any invalid config options. */ 189*b7eaed25SJason Evans static bool had_conf_error = false; 190*b7eaed25SJason Evans 191a4bd5210SJason Evans /******************************************************************************/ 192f921d10fSJason Evans /* 193f921d10fSJason Evans * Function prototypes for static functions that are referenced prior to 194f921d10fSJason Evans * definition. 195f921d10fSJason Evans */ 196a4bd5210SJason Evans 197d0e79aa3SJason Evans static bool malloc_init_hard_a0(void); 198a4bd5210SJason Evans static bool malloc_init_hard(void); 199a4bd5210SJason Evans 200a4bd5210SJason Evans /******************************************************************************/ 201a4bd5210SJason Evans /* 202a4bd5210SJason Evans * Begin miscellaneous support functions. 203a4bd5210SJason Evans */ 204a4bd5210SJason Evans 205*b7eaed25SJason Evans bool 206*b7eaed25SJason Evans malloc_initialized(void) { 207d0e79aa3SJason Evans return (malloc_init_state == malloc_init_initialized); 208a4bd5210SJason Evans } 209d0e79aa3SJason Evans 210*b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE bool 211*b7eaed25SJason Evans malloc_init_a0(void) { 212*b7eaed25SJason Evans if (unlikely(malloc_init_state == malloc_init_uninitialized)) { 213*b7eaed25SJason Evans return malloc_init_hard_a0(); 214*b7eaed25SJason Evans } 215*b7eaed25SJason Evans return false; 216a4bd5210SJason Evans } 217a4bd5210SJason Evans 218*b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE bool 219*b7eaed25SJason Evans malloc_init(void) { 220*b7eaed25SJason Evans if (unlikely(!malloc_initialized()) && malloc_init_hard()) { 221*b7eaed25SJason Evans return true; 222d0e79aa3SJason Evans } 223*b7eaed25SJason Evans return false; 224d0e79aa3SJason Evans } 225d0e79aa3SJason Evans 226d0e79aa3SJason Evans /* 2271f0a49e8SJason Evans * The a0*() functions are used instead of i{d,}alloc() in situations that 228d0e79aa3SJason Evans * cannot tolerate TLS variable access. 229d0e79aa3SJason Evans */ 230d0e79aa3SJason Evans 231d0e79aa3SJason Evans static void * 232*b7eaed25SJason Evans a0ialloc(size_t size, bool zero, bool is_internal) { 233*b7eaed25SJason Evans if (unlikely(malloc_init_a0())) { 234*b7eaed25SJason Evans return NULL; 235*b7eaed25SJason Evans } 236d0e79aa3SJason Evans 237*b7eaed25SJason Evans return iallocztm(TSDN_NULL, size, sz_size2index(size), zero, NULL, 238*b7eaed25SJason Evans is_internal, arena_get(TSDN_NULL, 0, true), true); 239d0e79aa3SJason Evans } 240d0e79aa3SJason Evans 241d0e79aa3SJason Evans static void 242*b7eaed25SJason Evans a0idalloc(void *ptr, bool is_internal) { 243*b7eaed25SJason Evans idalloctm(TSDN_NULL, ptr, NULL, NULL, is_internal, true); 244bde95144SJason Evans } 245bde95144SJason Evans 246d0e79aa3SJason Evans void * 247*b7eaed25SJason Evans a0malloc(size_t size) { 248*b7eaed25SJason Evans return a0ialloc(size, false, true); 249d0e79aa3SJason Evans } 250d0e79aa3SJason Evans 251d0e79aa3SJason Evans void 252*b7eaed25SJason Evans a0dalloc(void *ptr) { 253d0e79aa3SJason Evans a0idalloc(ptr, true); 254d0e79aa3SJason Evans } 255d0e79aa3SJason Evans 256d0e79aa3SJason Evans /* 257d0e79aa3SJason Evans * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive 258d0e79aa3SJason Evans * situations that cannot tolerate TLS variable access (TLS allocation and very 259d0e79aa3SJason Evans * early internal data structure initialization). 260d0e79aa3SJason Evans */ 261d0e79aa3SJason Evans 262d0e79aa3SJason Evans void * 263*b7eaed25SJason Evans bootstrap_malloc(size_t size) { 264*b7eaed25SJason Evans if (unlikely(size == 0)) { 265d0e79aa3SJason Evans size = 1; 266*b7eaed25SJason Evans } 267d0e79aa3SJason Evans 268*b7eaed25SJason Evans return a0ialloc(size, false, false); 269d0e79aa3SJason Evans } 270d0e79aa3SJason Evans 271d0e79aa3SJason Evans void * 272*b7eaed25SJason Evans bootstrap_calloc(size_t num, size_t size) { 273d0e79aa3SJason Evans size_t num_size; 274d0e79aa3SJason Evans 275d0e79aa3SJason Evans num_size = num * size; 276d0e79aa3SJason Evans if (unlikely(num_size == 0)) { 277d0e79aa3SJason Evans assert(num == 0 || size == 0); 278d0e79aa3SJason Evans num_size = 1; 279d0e79aa3SJason Evans } 280d0e79aa3SJason Evans 281*b7eaed25SJason Evans return a0ialloc(num_size, true, false); 282d0e79aa3SJason Evans } 283d0e79aa3SJason Evans 284d0e79aa3SJason Evans void 285*b7eaed25SJason Evans bootstrap_free(void *ptr) { 286*b7eaed25SJason Evans if (unlikely(ptr == NULL)) { 287d0e79aa3SJason Evans return; 288*b7eaed25SJason Evans } 289d0e79aa3SJason Evans 290d0e79aa3SJason Evans a0idalloc(ptr, false); 291d0e79aa3SJason Evans } 292d0e79aa3SJason Evans 293*b7eaed25SJason Evans void 294*b7eaed25SJason Evans arena_set(unsigned ind, arena_t *arena) { 295*b7eaed25SJason Evans atomic_store_p(&arenas[ind], arena, ATOMIC_RELEASE); 296df0d881dSJason Evans } 297df0d881dSJason Evans 298df0d881dSJason Evans static void 299*b7eaed25SJason Evans narenas_total_set(unsigned narenas) { 300*b7eaed25SJason Evans atomic_store_u(&narenas_total, narenas, ATOMIC_RELEASE); 301df0d881dSJason Evans } 302df0d881dSJason Evans 303df0d881dSJason Evans static void 304*b7eaed25SJason Evans narenas_total_inc(void) { 305*b7eaed25SJason Evans atomic_fetch_add_u(&narenas_total, 1, ATOMIC_RELEASE); 306df0d881dSJason Evans } 307df0d881dSJason Evans 308df0d881dSJason Evans unsigned 309*b7eaed25SJason Evans narenas_total_get(void) { 310*b7eaed25SJason Evans return atomic_load_u(&narenas_total, ATOMIC_ACQUIRE); 311df0d881dSJason Evans } 312df0d881dSJason Evans 313d0e79aa3SJason Evans /* Create a new arena and insert it into the arenas array at index ind. */ 314d0e79aa3SJason Evans static arena_t * 315*b7eaed25SJason Evans arena_init_locked(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { 316d0e79aa3SJason Evans arena_t *arena; 317d0e79aa3SJason Evans 318df0d881dSJason Evans assert(ind <= narenas_total_get()); 319*b7eaed25SJason Evans if (ind >= MALLOCX_ARENA_LIMIT) { 320*b7eaed25SJason Evans return NULL; 321*b7eaed25SJason Evans } 322*b7eaed25SJason Evans if (ind == narenas_total_get()) { 323df0d881dSJason Evans narenas_total_inc(); 324*b7eaed25SJason Evans } 325d0e79aa3SJason Evans 326d0e79aa3SJason Evans /* 327d0e79aa3SJason Evans * Another thread may have already initialized arenas[ind] if it's an 328d0e79aa3SJason Evans * auto arena. 329d0e79aa3SJason Evans */ 3301f0a49e8SJason Evans arena = arena_get(tsdn, ind, false); 331d0e79aa3SJason Evans if (arena != NULL) { 332d0e79aa3SJason Evans assert(ind < narenas_auto); 333*b7eaed25SJason Evans return arena; 334d0e79aa3SJason Evans } 335d0e79aa3SJason Evans 336d0e79aa3SJason Evans /* Actually initialize the arena. */ 337*b7eaed25SJason Evans arena = arena_new(tsdn, ind, extent_hooks); 338d0e79aa3SJason Evans 339*b7eaed25SJason Evans return arena; 340d0e79aa3SJason Evans } 341d0e79aa3SJason Evans 342d0e79aa3SJason Evans static void 343*b7eaed25SJason Evans arena_new_create_background_thread(tsdn_t *tsdn, unsigned ind) { 344*b7eaed25SJason Evans if (ind == 0) { 345*b7eaed25SJason Evans return; 346*b7eaed25SJason Evans } 347*b7eaed25SJason Evans if (have_background_thread) { 348*b7eaed25SJason Evans bool err; 349*b7eaed25SJason Evans malloc_mutex_lock(tsdn, &background_thread_lock); 350*b7eaed25SJason Evans err = background_thread_create(tsdn_tsd(tsdn), ind); 351*b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &background_thread_lock); 352*b7eaed25SJason Evans if (err) { 353*b7eaed25SJason Evans malloc_printf("<jemalloc>: error in background thread " 354*b7eaed25SJason Evans "creation for arena %u. Abort.\n", ind); 355*b7eaed25SJason Evans abort(); 356*b7eaed25SJason Evans } 357*b7eaed25SJason Evans } 358*b7eaed25SJason Evans } 359*b7eaed25SJason Evans 360*b7eaed25SJason Evans arena_t * 361*b7eaed25SJason Evans arena_init(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { 362df0d881dSJason Evans arena_t *arena; 363d0e79aa3SJason Evans 364*b7eaed25SJason Evans malloc_mutex_lock(tsdn, &arenas_lock); 365*b7eaed25SJason Evans arena = arena_init_locked(tsdn, ind, extent_hooks); 366*b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &arenas_lock); 367bde95144SJason Evans 368*b7eaed25SJason Evans arena_new_create_background_thread(tsdn, ind); 369*b7eaed25SJason Evans 370*b7eaed25SJason Evans return arena; 371*b7eaed25SJason Evans } 372*b7eaed25SJason Evans 373*b7eaed25SJason Evans static void 374*b7eaed25SJason Evans arena_bind(tsd_t *tsd, unsigned ind, bool internal) { 375*b7eaed25SJason Evans arena_t *arena = arena_get(tsd_tsdn(tsd), ind, false); 3761f0a49e8SJason Evans arena_nthreads_inc(arena, internal); 377df0d881dSJason Evans 378*b7eaed25SJason Evans if (internal) { 3791f0a49e8SJason Evans tsd_iarena_set(tsd, arena); 380*b7eaed25SJason Evans } else { 381df0d881dSJason Evans tsd_arena_set(tsd, arena); 382d0e79aa3SJason Evans } 383*b7eaed25SJason Evans } 384d0e79aa3SJason Evans 385d0e79aa3SJason Evans void 386*b7eaed25SJason Evans arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) { 387d0e79aa3SJason Evans arena_t *oldarena, *newarena; 388d0e79aa3SJason Evans 3891f0a49e8SJason Evans oldarena = arena_get(tsd_tsdn(tsd), oldind, false); 3901f0a49e8SJason Evans newarena = arena_get(tsd_tsdn(tsd), newind, false); 3911f0a49e8SJason Evans arena_nthreads_dec(oldarena, false); 3921f0a49e8SJason Evans arena_nthreads_inc(newarena, false); 393d0e79aa3SJason Evans tsd_arena_set(tsd, newarena); 394d0e79aa3SJason Evans } 395d0e79aa3SJason Evans 396d0e79aa3SJason Evans static void 397*b7eaed25SJason Evans arena_unbind(tsd_t *tsd, unsigned ind, bool internal) { 398d0e79aa3SJason Evans arena_t *arena; 399d0e79aa3SJason Evans 4001f0a49e8SJason Evans arena = arena_get(tsd_tsdn(tsd), ind, false); 4011f0a49e8SJason Evans arena_nthreads_dec(arena, internal); 402*b7eaed25SJason Evans 403*b7eaed25SJason Evans if (internal) { 4041f0a49e8SJason Evans tsd_iarena_set(tsd, NULL); 405*b7eaed25SJason Evans } else { 406d0e79aa3SJason Evans tsd_arena_set(tsd, NULL); 407d0e79aa3SJason Evans } 408*b7eaed25SJason Evans } 409d0e79aa3SJason Evans 410df0d881dSJason Evans arena_tdata_t * 411*b7eaed25SJason Evans arena_tdata_get_hard(tsd_t *tsd, unsigned ind) { 412df0d881dSJason Evans arena_tdata_t *tdata, *arenas_tdata_old; 413df0d881dSJason Evans arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); 414df0d881dSJason Evans unsigned narenas_tdata_old, i; 415df0d881dSJason Evans unsigned narenas_tdata = tsd_narenas_tdata_get(tsd); 416d0e79aa3SJason Evans unsigned narenas_actual = narenas_total_get(); 417d0e79aa3SJason Evans 418d0e79aa3SJason Evans /* 419df0d881dSJason Evans * Dissociate old tdata array (and set up for deallocation upon return) 420df0d881dSJason Evans * if it's too small. 421d0e79aa3SJason Evans */ 422df0d881dSJason Evans if (arenas_tdata != NULL && narenas_tdata < narenas_actual) { 423df0d881dSJason Evans arenas_tdata_old = arenas_tdata; 424df0d881dSJason Evans narenas_tdata_old = narenas_tdata; 425df0d881dSJason Evans arenas_tdata = NULL; 426df0d881dSJason Evans narenas_tdata = 0; 427df0d881dSJason Evans tsd_arenas_tdata_set(tsd, arenas_tdata); 428df0d881dSJason Evans tsd_narenas_tdata_set(tsd, narenas_tdata); 429df0d881dSJason Evans } else { 430df0d881dSJason Evans arenas_tdata_old = NULL; 431df0d881dSJason Evans narenas_tdata_old = 0; 432d0e79aa3SJason Evans } 433df0d881dSJason Evans 434df0d881dSJason Evans /* Allocate tdata array if it's missing. */ 435df0d881dSJason Evans if (arenas_tdata == NULL) { 436df0d881dSJason Evans bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd); 437df0d881dSJason Evans narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1; 438df0d881dSJason Evans 439df0d881dSJason Evans if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) { 440df0d881dSJason Evans *arenas_tdata_bypassp = true; 441df0d881dSJason Evans arenas_tdata = (arena_tdata_t *)a0malloc( 442df0d881dSJason Evans sizeof(arena_tdata_t) * narenas_tdata); 443df0d881dSJason Evans *arenas_tdata_bypassp = false; 444df0d881dSJason Evans } 445df0d881dSJason Evans if (arenas_tdata == NULL) { 446df0d881dSJason Evans tdata = NULL; 447df0d881dSJason Evans goto label_return; 448df0d881dSJason Evans } 449df0d881dSJason Evans assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp); 450df0d881dSJason Evans tsd_arenas_tdata_set(tsd, arenas_tdata); 451df0d881dSJason Evans tsd_narenas_tdata_set(tsd, narenas_tdata); 452d0e79aa3SJason Evans } 453d0e79aa3SJason Evans 454d0e79aa3SJason Evans /* 455df0d881dSJason Evans * Copy to tdata array. It's possible that the actual number of arenas 456df0d881dSJason Evans * has increased since narenas_total_get() was called above, but that 457df0d881dSJason Evans * causes no correctness issues unless two threads concurrently execute 458*b7eaed25SJason Evans * the arenas.create mallctl, which we trust mallctl synchronization to 459d0e79aa3SJason Evans * prevent. 460d0e79aa3SJason Evans */ 461df0d881dSJason Evans 462df0d881dSJason Evans /* Copy/initialize tickers. */ 463df0d881dSJason Evans for (i = 0; i < narenas_actual; i++) { 464df0d881dSJason Evans if (i < narenas_tdata_old) { 465df0d881dSJason Evans ticker_copy(&arenas_tdata[i].decay_ticker, 466df0d881dSJason Evans &arenas_tdata_old[i].decay_ticker); 467df0d881dSJason Evans } else { 468df0d881dSJason Evans ticker_init(&arenas_tdata[i].decay_ticker, 469df0d881dSJason Evans DECAY_NTICKS_PER_UPDATE); 470df0d881dSJason Evans } 471df0d881dSJason Evans } 472df0d881dSJason Evans if (narenas_tdata > narenas_actual) { 473df0d881dSJason Evans memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t) 474df0d881dSJason Evans * (narenas_tdata - narenas_actual)); 475d0e79aa3SJason Evans } 476d0e79aa3SJason Evans 477df0d881dSJason Evans /* Read the refreshed tdata array. */ 478df0d881dSJason Evans tdata = &arenas_tdata[ind]; 479df0d881dSJason Evans label_return: 480*b7eaed25SJason Evans if (arenas_tdata_old != NULL) { 481df0d881dSJason Evans a0dalloc(arenas_tdata_old); 482*b7eaed25SJason Evans } 483*b7eaed25SJason Evans return tdata; 484d0e79aa3SJason Evans } 485d0e79aa3SJason Evans 486d0e79aa3SJason Evans /* Slow path, called only by arena_choose(). */ 487d0e79aa3SJason Evans arena_t * 488*b7eaed25SJason Evans arena_choose_hard(tsd_t *tsd, bool internal) { 4891f0a49e8SJason Evans arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL); 490a4bd5210SJason Evans 491*b7eaed25SJason Evans if (have_percpu_arena && PERCPU_ARENA_ENABLED(opt_percpu_arena)) { 492*b7eaed25SJason Evans unsigned choose = percpu_arena_choose(); 493*b7eaed25SJason Evans ret = arena_get(tsd_tsdn(tsd), choose, true); 494*b7eaed25SJason Evans assert(ret != NULL); 495*b7eaed25SJason Evans arena_bind(tsd, arena_ind_get(ret), false); 496*b7eaed25SJason Evans arena_bind(tsd, arena_ind_get(ret), true); 497*b7eaed25SJason Evans 498*b7eaed25SJason Evans return ret; 499*b7eaed25SJason Evans } 500*b7eaed25SJason Evans 50182872ac0SJason Evans if (narenas_auto > 1) { 5021f0a49e8SJason Evans unsigned i, j, choose[2], first_null; 503*b7eaed25SJason Evans bool is_new_arena[2]; 504a4bd5210SJason Evans 5051f0a49e8SJason Evans /* 5061f0a49e8SJason Evans * Determine binding for both non-internal and internal 5071f0a49e8SJason Evans * allocation. 5081f0a49e8SJason Evans * 5091f0a49e8SJason Evans * choose[0]: For application allocation. 5101f0a49e8SJason Evans * choose[1]: For internal metadata allocation. 5111f0a49e8SJason Evans */ 5121f0a49e8SJason Evans 513*b7eaed25SJason Evans for (j = 0; j < 2; j++) { 5141f0a49e8SJason Evans choose[j] = 0; 515*b7eaed25SJason Evans is_new_arena[j] = false; 516*b7eaed25SJason Evans } 5171f0a49e8SJason Evans 51882872ac0SJason Evans first_null = narenas_auto; 5191f0a49e8SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock); 5201f0a49e8SJason Evans assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL); 52182872ac0SJason Evans for (i = 1; i < narenas_auto; i++) { 5221f0a49e8SJason Evans if (arena_get(tsd_tsdn(tsd), i, false) != NULL) { 523a4bd5210SJason Evans /* 524a4bd5210SJason Evans * Choose the first arena that has the lowest 525a4bd5210SJason Evans * number of threads assigned to it. 526a4bd5210SJason Evans */ 5271f0a49e8SJason Evans for (j = 0; j < 2; j++) { 5281f0a49e8SJason Evans if (arena_nthreads_get(arena_get( 5291f0a49e8SJason Evans tsd_tsdn(tsd), i, false), !!j) < 5301f0a49e8SJason Evans arena_nthreads_get(arena_get( 5311f0a49e8SJason Evans tsd_tsdn(tsd), choose[j], false), 532*b7eaed25SJason Evans !!j)) { 5331f0a49e8SJason Evans choose[j] = i; 5341f0a49e8SJason Evans } 535*b7eaed25SJason Evans } 53682872ac0SJason Evans } else if (first_null == narenas_auto) { 537a4bd5210SJason Evans /* 538a4bd5210SJason Evans * Record the index of the first uninitialized 539a4bd5210SJason Evans * arena, in case all extant arenas are in use. 540a4bd5210SJason Evans * 541a4bd5210SJason Evans * NB: It is possible for there to be 542a4bd5210SJason Evans * discontinuities in terms of initialized 543a4bd5210SJason Evans * versus uninitialized arenas, due to the 544a4bd5210SJason Evans * "thread.arena" mallctl. 545a4bd5210SJason Evans */ 546a4bd5210SJason Evans first_null = i; 547a4bd5210SJason Evans } 548a4bd5210SJason Evans } 549a4bd5210SJason Evans 5501f0a49e8SJason Evans for (j = 0; j < 2; j++) { 5511f0a49e8SJason Evans if (arena_nthreads_get(arena_get(tsd_tsdn(tsd), 5521f0a49e8SJason Evans choose[j], false), !!j) == 0 || first_null == 5531f0a49e8SJason Evans narenas_auto) { 554a4bd5210SJason Evans /* 5551f0a49e8SJason Evans * Use an unloaded arena, or the least loaded 5561f0a49e8SJason Evans * arena if all arenas are already initialized. 557a4bd5210SJason Evans */ 5581f0a49e8SJason Evans if (!!j == internal) { 5591f0a49e8SJason Evans ret = arena_get(tsd_tsdn(tsd), 5601f0a49e8SJason Evans choose[j], false); 5611f0a49e8SJason Evans } 562a4bd5210SJason Evans } else { 5631f0a49e8SJason Evans arena_t *arena; 5641f0a49e8SJason Evans 565a4bd5210SJason Evans /* Initialize a new arena. */ 5661f0a49e8SJason Evans choose[j] = first_null; 5671f0a49e8SJason Evans arena = arena_init_locked(tsd_tsdn(tsd), 568*b7eaed25SJason Evans choose[j], 569*b7eaed25SJason Evans (extent_hooks_t *)&extent_hooks_default); 5701f0a49e8SJason Evans if (arena == NULL) { 5711f0a49e8SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), 5721f0a49e8SJason Evans &arenas_lock); 573*b7eaed25SJason Evans return NULL; 574a4bd5210SJason Evans } 575*b7eaed25SJason Evans is_new_arena[j] = true; 576*b7eaed25SJason Evans if (!!j == internal) { 5771f0a49e8SJason Evans ret = arena; 578d0e79aa3SJason Evans } 579*b7eaed25SJason Evans } 5801f0a49e8SJason Evans arena_bind(tsd, choose[j], !!j); 5811f0a49e8SJason Evans } 5821f0a49e8SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock); 583*b7eaed25SJason Evans 584*b7eaed25SJason Evans for (j = 0; j < 2; j++) { 585*b7eaed25SJason Evans if (is_new_arena[j]) { 586*b7eaed25SJason Evans assert(choose[j] > 0); 587*b7eaed25SJason Evans arena_new_create_background_thread( 588*b7eaed25SJason Evans tsd_tsdn(tsd), choose[j]); 589*b7eaed25SJason Evans } 590*b7eaed25SJason Evans } 591*b7eaed25SJason Evans 592a4bd5210SJason Evans } else { 5931f0a49e8SJason Evans ret = arena_get(tsd_tsdn(tsd), 0, false); 5941f0a49e8SJason Evans arena_bind(tsd, 0, false); 5951f0a49e8SJason Evans arena_bind(tsd, 0, true); 596a4bd5210SJason Evans } 597a4bd5210SJason Evans 598*b7eaed25SJason Evans return ret; 599a4bd5210SJason Evans } 600a4bd5210SJason Evans 601d0e79aa3SJason Evans void 602*b7eaed25SJason Evans iarena_cleanup(tsd_t *tsd) { 6031f0a49e8SJason Evans arena_t *iarena; 6041f0a49e8SJason Evans 6051f0a49e8SJason Evans iarena = tsd_iarena_get(tsd); 606*b7eaed25SJason Evans if (iarena != NULL) { 607*b7eaed25SJason Evans arena_unbind(tsd, arena_ind_get(iarena), true); 608*b7eaed25SJason Evans } 6091f0a49e8SJason Evans } 6101f0a49e8SJason Evans 6111f0a49e8SJason Evans void 612*b7eaed25SJason Evans arena_cleanup(tsd_t *tsd) { 613d0e79aa3SJason Evans arena_t *arena; 614d0e79aa3SJason Evans 615d0e79aa3SJason Evans arena = tsd_arena_get(tsd); 616*b7eaed25SJason Evans if (arena != NULL) { 617*b7eaed25SJason Evans arena_unbind(tsd, arena_ind_get(arena), false); 618*b7eaed25SJason Evans } 619d0e79aa3SJason Evans } 620d0e79aa3SJason Evans 621d0e79aa3SJason Evans void 622*b7eaed25SJason Evans arenas_tdata_cleanup(tsd_t *tsd) { 623df0d881dSJason Evans arena_tdata_t *arenas_tdata; 624d0e79aa3SJason Evans 625df0d881dSJason Evans /* Prevent tsd->arenas_tdata from being (re)created. */ 626df0d881dSJason Evans *tsd_arenas_tdata_bypassp_get(tsd) = true; 627df0d881dSJason Evans 628df0d881dSJason Evans arenas_tdata = tsd_arenas_tdata_get(tsd); 629df0d881dSJason Evans if (arenas_tdata != NULL) { 630df0d881dSJason Evans tsd_arenas_tdata_set(tsd, NULL); 631df0d881dSJason Evans a0dalloc(arenas_tdata); 632d0e79aa3SJason Evans } 633536b3538SJason Evans } 634d0e79aa3SJason Evans 635a4bd5210SJason Evans static void 636*b7eaed25SJason Evans stats_print_atexit(void) { 637*b7eaed25SJason Evans if (config_stats) { 6381f0a49e8SJason Evans tsdn_t *tsdn; 63982872ac0SJason Evans unsigned narenas, i; 640a4bd5210SJason Evans 6411f0a49e8SJason Evans tsdn = tsdn_fetch(); 6421f0a49e8SJason Evans 643a4bd5210SJason Evans /* 644a4bd5210SJason Evans * Merge stats from extant threads. This is racy, since 645a4bd5210SJason Evans * individual threads do not lock when recording tcache stats 646a4bd5210SJason Evans * events. As a consequence, the final stats may be slightly 647a4bd5210SJason Evans * out of date by the time they are reported, if other threads 648a4bd5210SJason Evans * continue to allocate. 649a4bd5210SJason Evans */ 65082872ac0SJason Evans for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 6511f0a49e8SJason Evans arena_t *arena = arena_get(tsdn, i, false); 652a4bd5210SJason Evans if (arena != NULL) { 653a4bd5210SJason Evans tcache_t *tcache; 654a4bd5210SJason Evans 655*b7eaed25SJason Evans malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); 656a4bd5210SJason Evans ql_foreach(tcache, &arena->tcache_ql, link) { 6571f0a49e8SJason Evans tcache_stats_merge(tsdn, tcache, arena); 658a4bd5210SJason Evans } 659*b7eaed25SJason Evans malloc_mutex_unlock(tsdn, 660*b7eaed25SJason Evans &arena->tcache_ql_mtx); 661a4bd5210SJason Evans } 662a4bd5210SJason Evans } 663a4bd5210SJason Evans } 664*b7eaed25SJason Evans je_malloc_stats_print(NULL, NULL, opt_stats_print_opts); 665*b7eaed25SJason Evans } 666*b7eaed25SJason Evans 667*b7eaed25SJason Evans /* 668*b7eaed25SJason Evans * Ensure that we don't hold any locks upon entry to or exit from allocator 669*b7eaed25SJason Evans * code (in a "broad" sense that doesn't count a reentrant allocation as an 670*b7eaed25SJason Evans * entrance or exit). 671*b7eaed25SJason Evans */ 672*b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE void 673*b7eaed25SJason Evans check_entry_exit_locking(tsdn_t *tsdn) { 674*b7eaed25SJason Evans if (!config_debug) { 675*b7eaed25SJason Evans return; 676*b7eaed25SJason Evans } 677*b7eaed25SJason Evans if (tsdn_null(tsdn)) { 678*b7eaed25SJason Evans return; 679*b7eaed25SJason Evans } 680*b7eaed25SJason Evans tsd_t *tsd = tsdn_tsd(tsdn); 681*b7eaed25SJason Evans /* 682*b7eaed25SJason Evans * It's possible we hold locks at entry/exit if we're in a nested 683*b7eaed25SJason Evans * allocation. 684*b7eaed25SJason Evans */ 685*b7eaed25SJason Evans int8_t reentrancy_level = tsd_reentrancy_level_get(tsd); 686*b7eaed25SJason Evans if (reentrancy_level != 0) { 687*b7eaed25SJason Evans return; 688*b7eaed25SJason Evans } 689*b7eaed25SJason Evans witness_assert_lockless(tsdn_witness_tsdp_get(tsdn)); 690a4bd5210SJason Evans } 691a4bd5210SJason Evans 692a4bd5210SJason Evans /* 693a4bd5210SJason Evans * End miscellaneous support functions. 694a4bd5210SJason Evans */ 695a4bd5210SJason Evans /******************************************************************************/ 696a4bd5210SJason Evans /* 697a4bd5210SJason Evans * Begin initialization functions. 698a4bd5210SJason Evans */ 699a4bd5210SJason Evans 700d0e79aa3SJason Evans static char * 701*b7eaed25SJason Evans jemalloc_secure_getenv(const char *name) { 7028244f2aaSJason Evans #ifdef JEMALLOC_HAVE_SECURE_GETENV 7038244f2aaSJason Evans return secure_getenv(name); 7048244f2aaSJason Evans #else 705d0e79aa3SJason Evans # ifdef JEMALLOC_HAVE_ISSETUGID 706*b7eaed25SJason Evans if (issetugid() != 0) { 707*b7eaed25SJason Evans return NULL; 708*b7eaed25SJason Evans } 709d0e79aa3SJason Evans # endif 710*b7eaed25SJason Evans return getenv(name); 711d0e79aa3SJason Evans #endif 7128244f2aaSJason Evans } 713d0e79aa3SJason Evans 714a4bd5210SJason Evans static unsigned 715*b7eaed25SJason Evans malloc_ncpus(void) { 716a4bd5210SJason Evans long result; 717a4bd5210SJason Evans 718e722f8f8SJason Evans #ifdef _WIN32 719e722f8f8SJason Evans SYSTEM_INFO si; 720e722f8f8SJason Evans GetSystemInfo(&si); 721e722f8f8SJason Evans result = si.dwNumberOfProcessors; 722bde95144SJason Evans #elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT) 723bde95144SJason Evans /* 724bde95144SJason Evans * glibc >= 2.6 has the CPU_COUNT macro. 725bde95144SJason Evans * 726bde95144SJason Evans * glibc's sysconf() uses isspace(). glibc allocates for the first time 727bde95144SJason Evans * *before* setting up the isspace tables. Therefore we need a 728bde95144SJason Evans * different method to get the number of CPUs. 729bde95144SJason Evans */ 730bde95144SJason Evans { 731bde95144SJason Evans cpu_set_t set; 732bde95144SJason Evans 733bde95144SJason Evans pthread_getaffinity_np(pthread_self(), sizeof(set), &set); 734bde95144SJason Evans result = CPU_COUNT(&set); 735bde95144SJason Evans } 736e722f8f8SJason Evans #else 737a4bd5210SJason Evans result = sysconf(_SC_NPROCESSORS_ONLN); 73882872ac0SJason Evans #endif 739f921d10fSJason Evans return ((result == -1) ? 1 : (unsigned)result); 740a4bd5210SJason Evans } 741a4bd5210SJason Evans 742*b7eaed25SJason Evans static void 743*b7eaed25SJason Evans init_opt_stats_print_opts(const char *v, size_t vlen) { 744*b7eaed25SJason Evans size_t opts_len = strlen(opt_stats_print_opts); 745*b7eaed25SJason Evans assert(opts_len <= stats_print_tot_num_options); 746*b7eaed25SJason Evans 747*b7eaed25SJason Evans for (size_t i = 0; i < vlen; i++) { 748*b7eaed25SJason Evans switch (v[i]) { 749*b7eaed25SJason Evans #define OPTION(o, v, d, s) case o: break; 750*b7eaed25SJason Evans STATS_PRINT_OPTIONS 751*b7eaed25SJason Evans #undef OPTION 752*b7eaed25SJason Evans default: continue; 753*b7eaed25SJason Evans } 754*b7eaed25SJason Evans 755*b7eaed25SJason Evans if (strchr(opt_stats_print_opts, v[i]) != NULL) { 756*b7eaed25SJason Evans /* Ignore repeated. */ 757*b7eaed25SJason Evans continue; 758*b7eaed25SJason Evans } 759*b7eaed25SJason Evans 760*b7eaed25SJason Evans opt_stats_print_opts[opts_len++] = v[i]; 761*b7eaed25SJason Evans opt_stats_print_opts[opts_len] = '\0'; 762*b7eaed25SJason Evans assert(opts_len <= stats_print_tot_num_options); 763*b7eaed25SJason Evans } 764*b7eaed25SJason Evans assert(opts_len == strlen(opt_stats_print_opts)); 765*b7eaed25SJason Evans } 766*b7eaed25SJason Evans 767a4bd5210SJason Evans static bool 768a4bd5210SJason Evans malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, 769*b7eaed25SJason Evans char const **v_p, size_t *vlen_p) { 770a4bd5210SJason Evans bool accept; 771a4bd5210SJason Evans const char *opts = *opts_p; 772a4bd5210SJason Evans 773a4bd5210SJason Evans *k_p = opts; 774a4bd5210SJason Evans 775d0e79aa3SJason Evans for (accept = false; !accept;) { 776a4bd5210SJason Evans switch (*opts) { 777a4bd5210SJason Evans case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': 778a4bd5210SJason Evans case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': 779a4bd5210SJason Evans case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': 780a4bd5210SJason Evans case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': 781a4bd5210SJason Evans case 'Y': case 'Z': 782a4bd5210SJason Evans case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': 783a4bd5210SJason Evans case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': 784a4bd5210SJason Evans case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': 785a4bd5210SJason Evans case 's': case 't': case 'u': case 'v': case 'w': case 'x': 786a4bd5210SJason Evans case 'y': case 'z': 787a4bd5210SJason Evans case '0': case '1': case '2': case '3': case '4': case '5': 788a4bd5210SJason Evans case '6': case '7': case '8': case '9': 789a4bd5210SJason Evans case '_': 790a4bd5210SJason Evans opts++; 791a4bd5210SJason Evans break; 792a4bd5210SJason Evans case ':': 793a4bd5210SJason Evans opts++; 794a4bd5210SJason Evans *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; 795a4bd5210SJason Evans *v_p = opts; 796a4bd5210SJason Evans accept = true; 797a4bd5210SJason Evans break; 798a4bd5210SJason Evans case '\0': 799a4bd5210SJason Evans if (opts != *opts_p) { 800a4bd5210SJason Evans malloc_write("<jemalloc>: Conf string ends " 801a4bd5210SJason Evans "with key\n"); 802a4bd5210SJason Evans } 803*b7eaed25SJason Evans return true; 804a4bd5210SJason Evans default: 805a4bd5210SJason Evans malloc_write("<jemalloc>: Malformed conf string\n"); 806*b7eaed25SJason Evans return true; 807a4bd5210SJason Evans } 808a4bd5210SJason Evans } 809a4bd5210SJason Evans 810d0e79aa3SJason Evans for (accept = false; !accept;) { 811a4bd5210SJason Evans switch (*opts) { 812a4bd5210SJason Evans case ',': 813a4bd5210SJason Evans opts++; 814a4bd5210SJason Evans /* 815a4bd5210SJason Evans * Look ahead one character here, because the next time 816a4bd5210SJason Evans * this function is called, it will assume that end of 817a4bd5210SJason Evans * input has been cleanly reached if no input remains, 818a4bd5210SJason Evans * but we have optimistically already consumed the 819a4bd5210SJason Evans * comma if one exists. 820a4bd5210SJason Evans */ 821a4bd5210SJason Evans if (*opts == '\0') { 822a4bd5210SJason Evans malloc_write("<jemalloc>: Conf string ends " 823a4bd5210SJason Evans "with comma\n"); 824a4bd5210SJason Evans } 825a4bd5210SJason Evans *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; 826a4bd5210SJason Evans accept = true; 827a4bd5210SJason Evans break; 828a4bd5210SJason Evans case '\0': 829a4bd5210SJason Evans *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; 830a4bd5210SJason Evans accept = true; 831a4bd5210SJason Evans break; 832a4bd5210SJason Evans default: 833a4bd5210SJason Evans opts++; 834a4bd5210SJason Evans break; 835a4bd5210SJason Evans } 836a4bd5210SJason Evans } 837a4bd5210SJason Evans 838a4bd5210SJason Evans *opts_p = opts; 839*b7eaed25SJason Evans return false; 840*b7eaed25SJason Evans } 841*b7eaed25SJason Evans 842*b7eaed25SJason Evans static void 843*b7eaed25SJason Evans malloc_abort_invalid_conf(void) { 844*b7eaed25SJason Evans assert(opt_abort_conf); 845*b7eaed25SJason Evans malloc_printf("<jemalloc>: Abort (abort_conf:true) on invalid conf " 846*b7eaed25SJason Evans "value (see above).\n"); 847*b7eaed25SJason Evans abort(); 848a4bd5210SJason Evans } 849a4bd5210SJason Evans 850a4bd5210SJason Evans static void 851a4bd5210SJason Evans malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, 852*b7eaed25SJason Evans size_t vlen) { 853a4bd5210SJason Evans malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k, 854a4bd5210SJason Evans (int)vlen, v); 855*b7eaed25SJason Evans had_conf_error = true; 856*b7eaed25SJason Evans if (opt_abort_conf) { 857*b7eaed25SJason Evans malloc_abort_invalid_conf(); 858*b7eaed25SJason Evans } 859a4bd5210SJason Evans } 860a4bd5210SJason Evans 861a4bd5210SJason Evans static void 862*b7eaed25SJason Evans malloc_slow_flag_init(void) { 863df0d881dSJason Evans /* 864df0d881dSJason Evans * Combine the runtime options into malloc_slow for fast path. Called 865df0d881dSJason Evans * after processing all the options. 866df0d881dSJason Evans */ 867df0d881dSJason Evans malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0) 868df0d881dSJason Evans | (opt_junk_free ? flag_opt_junk_free : 0) 869df0d881dSJason Evans | (opt_zero ? flag_opt_zero : 0) 870df0d881dSJason Evans | (opt_utrace ? flag_opt_utrace : 0) 871df0d881dSJason Evans | (opt_xmalloc ? flag_opt_xmalloc : 0); 872df0d881dSJason Evans 873df0d881dSJason Evans malloc_slow = (malloc_slow_flags != 0); 874df0d881dSJason Evans } 875df0d881dSJason Evans 876df0d881dSJason Evans static void 877*b7eaed25SJason Evans malloc_conf_init(void) { 878a4bd5210SJason Evans unsigned i; 879a4bd5210SJason Evans char buf[PATH_MAX + 1]; 880a4bd5210SJason Evans const char *opts, *k, *v; 881a4bd5210SJason Evans size_t klen, vlen; 882a4bd5210SJason Evans 883df0d881dSJason Evans for (i = 0; i < 4; i++) { 884a4bd5210SJason Evans /* Get runtime configuration. */ 885a4bd5210SJason Evans switch (i) { 886a4bd5210SJason Evans case 0: 887df0d881dSJason Evans opts = config_malloc_conf; 888df0d881dSJason Evans break; 889df0d881dSJason Evans case 1: 890a4bd5210SJason Evans if (je_malloc_conf != NULL) { 891a4bd5210SJason Evans /* 892a4bd5210SJason Evans * Use options that were compiled into the 893a4bd5210SJason Evans * program. 894a4bd5210SJason Evans */ 895a4bd5210SJason Evans opts = je_malloc_conf; 896a4bd5210SJason Evans } else { 897a4bd5210SJason Evans /* No configuration specified. */ 898a4bd5210SJason Evans buf[0] = '\0'; 899a4bd5210SJason Evans opts = buf; 900a4bd5210SJason Evans } 901a4bd5210SJason Evans break; 902df0d881dSJason Evans case 2: { 903df0d881dSJason Evans ssize_t linklen = 0; 904e722f8f8SJason Evans #ifndef _WIN32 9052b06b201SJason Evans int saved_errno = errno; 906a4bd5210SJason Evans const char *linkname = 907a4bd5210SJason Evans # ifdef JEMALLOC_PREFIX 908a4bd5210SJason Evans "/etc/"JEMALLOC_PREFIX"malloc.conf" 909a4bd5210SJason Evans # else 910a4bd5210SJason Evans "/etc/malloc.conf" 911a4bd5210SJason Evans # endif 912a4bd5210SJason Evans ; 913a4bd5210SJason Evans 914a4bd5210SJason Evans /* 9152b06b201SJason Evans * Try to use the contents of the "/etc/malloc.conf" 916a4bd5210SJason Evans * symbolic link's name. 917a4bd5210SJason Evans */ 9182b06b201SJason Evans linklen = readlink(linkname, buf, sizeof(buf) - 1); 9192b06b201SJason Evans if (linklen == -1) { 9202b06b201SJason Evans /* No configuration specified. */ 9212b06b201SJason Evans linklen = 0; 922d0e79aa3SJason Evans /* Restore errno. */ 9232b06b201SJason Evans set_errno(saved_errno); 9242b06b201SJason Evans } 9252b06b201SJason Evans #endif 926a4bd5210SJason Evans buf[linklen] = '\0'; 927a4bd5210SJason Evans opts = buf; 928a4bd5210SJason Evans break; 929df0d881dSJason Evans } case 3: { 930a4bd5210SJason Evans const char *envname = 931a4bd5210SJason Evans #ifdef JEMALLOC_PREFIX 932a4bd5210SJason Evans JEMALLOC_CPREFIX"MALLOC_CONF" 933a4bd5210SJason Evans #else 934a4bd5210SJason Evans "MALLOC_CONF" 935a4bd5210SJason Evans #endif 936a4bd5210SJason Evans ; 937a4bd5210SJason Evans 9388244f2aaSJason Evans if ((opts = jemalloc_secure_getenv(envname)) != NULL) { 939a4bd5210SJason Evans /* 940a4bd5210SJason Evans * Do nothing; opts is already initialized to 941a4bd5210SJason Evans * the value of the MALLOC_CONF environment 942a4bd5210SJason Evans * variable. 943a4bd5210SJason Evans */ 944a4bd5210SJason Evans } else { 945a4bd5210SJason Evans /* No configuration specified. */ 946a4bd5210SJason Evans buf[0] = '\0'; 947a4bd5210SJason Evans opts = buf; 948a4bd5210SJason Evans } 949a4bd5210SJason Evans break; 950a4bd5210SJason Evans } default: 951f921d10fSJason Evans not_reached(); 952a4bd5210SJason Evans buf[0] = '\0'; 953a4bd5210SJason Evans opts = buf; 954a4bd5210SJason Evans } 955a4bd5210SJason Evans 956d0e79aa3SJason Evans while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v, 957d0e79aa3SJason Evans &vlen)) { 958d0e79aa3SJason Evans #define CONF_MATCH(n) \ 959d0e79aa3SJason Evans (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0) 960d0e79aa3SJason Evans #define CONF_MATCH_VALUE(n) \ 961d0e79aa3SJason Evans (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0) 962*b7eaed25SJason Evans #define CONF_HANDLE_BOOL(o, n) \ 963d0e79aa3SJason Evans if (CONF_MATCH(n)) { \ 964*b7eaed25SJason Evans if (CONF_MATCH_VALUE("true")) { \ 965a4bd5210SJason Evans o = true; \ 966*b7eaed25SJason Evans } else if (CONF_MATCH_VALUE("false")) { \ 967a4bd5210SJason Evans o = false; \ 968*b7eaed25SJason Evans } else { \ 969a4bd5210SJason Evans malloc_conf_error( \ 970a4bd5210SJason Evans "Invalid conf value", \ 971a4bd5210SJason Evans k, klen, v, vlen); \ 972a4bd5210SJason Evans } \ 973a4bd5210SJason Evans continue; \ 974a4bd5210SJason Evans } 9757fa7f12fSJason Evans #define CONF_MIN_no(um, min) false 9767fa7f12fSJason Evans #define CONF_MIN_yes(um, min) ((um) < (min)) 9777fa7f12fSJason Evans #define CONF_MAX_no(um, max) false 9787fa7f12fSJason Evans #define CONF_MAX_yes(um, max) ((um) > (max)) 9797fa7f12fSJason Evans #define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \ 980d0e79aa3SJason Evans if (CONF_MATCH(n)) { \ 981a4bd5210SJason Evans uintmax_t um; \ 982a4bd5210SJason Evans char *end; \ 983a4bd5210SJason Evans \ 984e722f8f8SJason Evans set_errno(0); \ 985a4bd5210SJason Evans um = malloc_strtoumax(v, &end, 0); \ 986e722f8f8SJason Evans if (get_errno() != 0 || (uintptr_t)end -\ 987a4bd5210SJason Evans (uintptr_t)v != vlen) { \ 988a4bd5210SJason Evans malloc_conf_error( \ 989a4bd5210SJason Evans "Invalid conf value", \ 990a4bd5210SJason Evans k, klen, v, vlen); \ 99188ad2f8dSJason Evans } else if (clip) { \ 9927fa7f12fSJason Evans if (CONF_MIN_##check_min(um, \ 993*b7eaed25SJason Evans (t)(min))) { \ 994df0d881dSJason Evans o = (t)(min); \ 995*b7eaed25SJason Evans } else if ( \ 996*b7eaed25SJason Evans CONF_MAX_##check_max(um, \ 997*b7eaed25SJason Evans (t)(max))) { \ 998df0d881dSJason Evans o = (t)(max); \ 999*b7eaed25SJason Evans } else { \ 1000df0d881dSJason Evans o = (t)um; \ 1001*b7eaed25SJason Evans } \ 100288ad2f8dSJason Evans } else { \ 10037fa7f12fSJason Evans if (CONF_MIN_##check_min(um, \ 10048244f2aaSJason Evans (t)(min)) || \ 10057fa7f12fSJason Evans CONF_MAX_##check_max(um, \ 10068244f2aaSJason Evans (t)(max))) { \ 1007a4bd5210SJason Evans malloc_conf_error( \ 100888ad2f8dSJason Evans "Out-of-range " \ 100988ad2f8dSJason Evans "conf value", \ 1010a4bd5210SJason Evans k, klen, v, vlen); \ 1011*b7eaed25SJason Evans } else { \ 1012df0d881dSJason Evans o = (t)um; \ 101388ad2f8dSJason Evans } \ 1014*b7eaed25SJason Evans } \ 1015a4bd5210SJason Evans continue; \ 1016a4bd5210SJason Evans } 10177fa7f12fSJason Evans #define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \ 10187fa7f12fSJason Evans clip) \ 10197fa7f12fSJason Evans CONF_HANDLE_T_U(unsigned, o, n, min, max, \ 10207fa7f12fSJason Evans check_min, check_max, clip) 10217fa7f12fSJason Evans #define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \ 10227fa7f12fSJason Evans CONF_HANDLE_T_U(size_t, o, n, min, max, \ 10237fa7f12fSJason Evans check_min, check_max, clip) 1024a4bd5210SJason Evans #define CONF_HANDLE_SSIZE_T(o, n, min, max) \ 1025d0e79aa3SJason Evans if (CONF_MATCH(n)) { \ 1026a4bd5210SJason Evans long l; \ 1027a4bd5210SJason Evans char *end; \ 1028a4bd5210SJason Evans \ 1029e722f8f8SJason Evans set_errno(0); \ 1030a4bd5210SJason Evans l = strtol(v, &end, 0); \ 1031e722f8f8SJason Evans if (get_errno() != 0 || (uintptr_t)end -\ 1032a4bd5210SJason Evans (uintptr_t)v != vlen) { \ 1033a4bd5210SJason Evans malloc_conf_error( \ 1034a4bd5210SJason Evans "Invalid conf value", \ 1035a4bd5210SJason Evans k, klen, v, vlen); \ 1036d0e79aa3SJason Evans } else if (l < (ssize_t)(min) || l > \ 1037d0e79aa3SJason Evans (ssize_t)(max)) { \ 1038a4bd5210SJason Evans malloc_conf_error( \ 1039a4bd5210SJason Evans "Out-of-range conf value", \ 1040a4bd5210SJason Evans k, klen, v, vlen); \ 1041*b7eaed25SJason Evans } else { \ 1042a4bd5210SJason Evans o = l; \ 1043*b7eaed25SJason Evans } \ 1044a4bd5210SJason Evans continue; \ 1045a4bd5210SJason Evans } 1046a4bd5210SJason Evans #define CONF_HANDLE_CHAR_P(o, n, d) \ 1047d0e79aa3SJason Evans if (CONF_MATCH(n)) { \ 1048a4bd5210SJason Evans size_t cpylen = (vlen <= \ 1049a4bd5210SJason Evans sizeof(o)-1) ? vlen : \ 1050a4bd5210SJason Evans sizeof(o)-1; \ 1051a4bd5210SJason Evans strncpy(o, v, cpylen); \ 1052a4bd5210SJason Evans o[cpylen] = '\0'; \ 1053a4bd5210SJason Evans continue; \ 1054a4bd5210SJason Evans } 1055a4bd5210SJason Evans 1056*b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_abort, "abort") 1057*b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_abort_conf, "abort_conf") 1058*b7eaed25SJason Evans if (opt_abort_conf && had_conf_error) { 1059*b7eaed25SJason Evans malloc_abort_invalid_conf(); 1060*b7eaed25SJason Evans } 1061*b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_retain, "retain") 106282872ac0SJason Evans if (strncmp("dss", k, klen) == 0) { 106382872ac0SJason Evans int i; 106482872ac0SJason Evans bool match = false; 106582872ac0SJason Evans for (i = 0; i < dss_prec_limit; i++) { 106682872ac0SJason Evans if (strncmp(dss_prec_names[i], v, vlen) 106782872ac0SJason Evans == 0) { 1068*b7eaed25SJason Evans if (extent_dss_prec_set(i)) { 106982872ac0SJason Evans malloc_conf_error( 107082872ac0SJason Evans "Error setting dss", 107182872ac0SJason Evans k, klen, v, vlen); 107282872ac0SJason Evans } else { 107382872ac0SJason Evans opt_dss = 107482872ac0SJason Evans dss_prec_names[i]; 107582872ac0SJason Evans match = true; 107682872ac0SJason Evans break; 107782872ac0SJason Evans } 107882872ac0SJason Evans } 107982872ac0SJason Evans } 1080d0e79aa3SJason Evans if (!match) { 108182872ac0SJason Evans malloc_conf_error("Invalid conf value", 108282872ac0SJason Evans k, klen, v, vlen); 108382872ac0SJason Evans } 108482872ac0SJason Evans continue; 108582872ac0SJason Evans } 1086df0d881dSJason Evans CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1, 10877fa7f12fSJason Evans UINT_MAX, yes, no, false) 1088*b7eaed25SJason Evans CONF_HANDLE_SSIZE_T(opt_dirty_decay_ms, 1089*b7eaed25SJason Evans "dirty_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < 1090*b7eaed25SJason Evans QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : 1091*b7eaed25SJason Evans SSIZE_MAX); 1092*b7eaed25SJason Evans CONF_HANDLE_SSIZE_T(opt_muzzy_decay_ms, 1093*b7eaed25SJason Evans "muzzy_decay_ms", -1, NSTIME_SEC_MAX * KQU(1000) < 1094*b7eaed25SJason Evans QU(SSIZE_MAX) ? NSTIME_SEC_MAX * KQU(1000) : 1095*b7eaed25SJason Evans SSIZE_MAX); 1096*b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_stats_print, "stats_print") 1097*b7eaed25SJason Evans if (CONF_MATCH("stats_print_opts")) { 1098*b7eaed25SJason Evans init_opt_stats_print_opts(v, vlen); 1099*b7eaed25SJason Evans continue; 1100*b7eaed25SJason Evans } 1101*b7eaed25SJason Evans if (config_fill) { 1102*b7eaed25SJason Evans if (CONF_MATCH("junk")) { 1103*b7eaed25SJason Evans if (CONF_MATCH_VALUE("true")) { 1104*b7eaed25SJason Evans opt_junk = "true"; 1105*b7eaed25SJason Evans opt_junk_alloc = opt_junk_free = 1106*b7eaed25SJason Evans true; 1107*b7eaed25SJason Evans } else if (CONF_MATCH_VALUE("false")) { 1108*b7eaed25SJason Evans opt_junk = "false"; 1109*b7eaed25SJason Evans opt_junk_alloc = opt_junk_free = 1110*b7eaed25SJason Evans false; 1111*b7eaed25SJason Evans } else if (CONF_MATCH_VALUE("alloc")) { 1112*b7eaed25SJason Evans opt_junk = "alloc"; 1113*b7eaed25SJason Evans opt_junk_alloc = true; 1114*b7eaed25SJason Evans opt_junk_free = false; 1115*b7eaed25SJason Evans } else if (CONF_MATCH_VALUE("free")) { 1116*b7eaed25SJason Evans opt_junk = "free"; 1117*b7eaed25SJason Evans opt_junk_alloc = false; 1118*b7eaed25SJason Evans opt_junk_free = true; 1119*b7eaed25SJason Evans } else { 1120*b7eaed25SJason Evans malloc_conf_error( 1121*b7eaed25SJason Evans "Invalid conf value", k, 1122*b7eaed25SJason Evans klen, v, vlen); 1123*b7eaed25SJason Evans } 1124*b7eaed25SJason Evans continue; 1125*b7eaed25SJason Evans } 1126*b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_zero, "zero") 1127*b7eaed25SJason Evans } 1128*b7eaed25SJason Evans if (config_utrace) { 1129*b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_utrace, "utrace") 1130*b7eaed25SJason Evans } 1131*b7eaed25SJason Evans if (config_xmalloc) { 1132*b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc") 1133*b7eaed25SJason Evans } 1134*b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_tcache, "tcache") 1135*b7eaed25SJason Evans CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, "lg_tcache_max", 1136*b7eaed25SJason Evans -1, (sizeof(size_t) << 3) - 1) 1137*b7eaed25SJason Evans if (strncmp("percpu_arena", k, klen) == 0) { 1138df0d881dSJason Evans int i; 1139df0d881dSJason Evans bool match = false; 1140*b7eaed25SJason Evans for (i = percpu_arena_mode_names_base; i < 1141*b7eaed25SJason Evans percpu_arena_mode_names_limit; i++) { 1142*b7eaed25SJason Evans if (strncmp(percpu_arena_mode_names[i], 1143*b7eaed25SJason Evans v, vlen) == 0) { 1144*b7eaed25SJason Evans if (!have_percpu_arena) { 1145*b7eaed25SJason Evans malloc_conf_error( 1146*b7eaed25SJason Evans "No getcpu support", 1147*b7eaed25SJason Evans k, klen, v, vlen); 1148*b7eaed25SJason Evans } 1149*b7eaed25SJason Evans opt_percpu_arena = i; 1150df0d881dSJason Evans match = true; 1151df0d881dSJason Evans break; 1152df0d881dSJason Evans } 1153df0d881dSJason Evans } 1154df0d881dSJason Evans if (!match) { 1155df0d881dSJason Evans malloc_conf_error("Invalid conf value", 1156df0d881dSJason Evans k, klen, v, vlen); 1157df0d881dSJason Evans } 1158df0d881dSJason Evans continue; 1159df0d881dSJason Evans } 1160*b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_background_thread, 1161*b7eaed25SJason Evans "background_thread"); 1162a4bd5210SJason Evans if (config_prof) { 1163*b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_prof, "prof") 11648ed34ab0SJason Evans CONF_HANDLE_CHAR_P(opt_prof_prefix, 11658ed34ab0SJason Evans "prof_prefix", "jeprof") 1166*b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_prof_active, "prof_active") 1167d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_prof_thread_active_init, 1168*b7eaed25SJason Evans "prof_thread_active_init") 1169d0e79aa3SJason Evans CONF_HANDLE_SIZE_T(opt_lg_prof_sample, 11707fa7f12fSJason Evans "lg_prof_sample", 0, (sizeof(uint64_t) << 3) 11717fa7f12fSJason Evans - 1, no, yes, true) 1172*b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum") 1173a4bd5210SJason Evans CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, 11748ed34ab0SJason Evans "lg_prof_interval", -1, 1175a4bd5210SJason Evans (sizeof(uint64_t) << 3) - 1) 1176*b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump") 1177*b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_prof_final, "prof_final") 1178*b7eaed25SJason Evans CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak") 1179a4bd5210SJason Evans } 1180a4bd5210SJason Evans malloc_conf_error("Invalid conf pair", k, klen, v, 1181a4bd5210SJason Evans vlen); 1182d0e79aa3SJason Evans #undef CONF_MATCH 11837fa7f12fSJason Evans #undef CONF_MATCH_VALUE 1184a4bd5210SJason Evans #undef CONF_HANDLE_BOOL 11857fa7f12fSJason Evans #undef CONF_MIN_no 11867fa7f12fSJason Evans #undef CONF_MIN_yes 11877fa7f12fSJason Evans #undef CONF_MAX_no 11887fa7f12fSJason Evans #undef CONF_MAX_yes 11897fa7f12fSJason Evans #undef CONF_HANDLE_T_U 11907fa7f12fSJason Evans #undef CONF_HANDLE_UNSIGNED 1191a4bd5210SJason Evans #undef CONF_HANDLE_SIZE_T 1192a4bd5210SJason Evans #undef CONF_HANDLE_SSIZE_T 1193a4bd5210SJason Evans #undef CONF_HANDLE_CHAR_P 1194a4bd5210SJason Evans } 1195a4bd5210SJason Evans } 1196a4bd5210SJason Evans } 1197a4bd5210SJason Evans 1198a4bd5210SJason Evans static bool 1199*b7eaed25SJason Evans malloc_init_hard_needed(void) { 1200d0e79aa3SJason Evans if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state == 1201d0e79aa3SJason Evans malloc_init_recursible)) { 1202a4bd5210SJason Evans /* 1203a4bd5210SJason Evans * Another thread initialized the allocator before this one 1204a4bd5210SJason Evans * acquired init_lock, or this thread is the initializing 1205a4bd5210SJason Evans * thread, and it is recursively allocating. 1206a4bd5210SJason Evans */ 1207*b7eaed25SJason Evans return false; 1208a4bd5210SJason Evans } 1209a4bd5210SJason Evans #ifdef JEMALLOC_THREADED_INIT 1210d0e79aa3SJason Evans if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) { 1211a4bd5210SJason Evans /* Busy-wait until the initializing thread completes. */ 1212*b7eaed25SJason Evans spin_t spinner = SPIN_INITIALIZER; 1213a4bd5210SJason Evans do { 1214bde95144SJason Evans malloc_mutex_unlock(TSDN_NULL, &init_lock); 1215bde95144SJason Evans spin_adaptive(&spinner); 1216bde95144SJason Evans malloc_mutex_lock(TSDN_NULL, &init_lock); 1217d0e79aa3SJason Evans } while (!malloc_initialized()); 1218*b7eaed25SJason Evans return false; 1219a4bd5210SJason Evans } 1220a4bd5210SJason Evans #endif 1221*b7eaed25SJason Evans return true; 1222d0e79aa3SJason Evans } 1223d0e79aa3SJason Evans 1224d0e79aa3SJason Evans static bool 1225*b7eaed25SJason Evans malloc_init_hard_a0_locked() { 1226a4bd5210SJason Evans malloc_initializer = INITIALIZER; 1227a4bd5210SJason Evans 1228*b7eaed25SJason Evans if (config_prof) { 1229a4bd5210SJason Evans prof_boot0(); 1230*b7eaed25SJason Evans } 1231a4bd5210SJason Evans malloc_conf_init(); 1232a4bd5210SJason Evans if (opt_stats_print) { 1233a4bd5210SJason Evans /* Print statistics at exit. */ 1234a4bd5210SJason Evans if (atexit(stats_print_atexit) != 0) { 1235a4bd5210SJason Evans malloc_write("<jemalloc>: Error in atexit()\n"); 1236*b7eaed25SJason Evans if (opt_abort) { 1237a4bd5210SJason Evans abort(); 1238a4bd5210SJason Evans } 1239a4bd5210SJason Evans } 1240*b7eaed25SJason Evans } 1241*b7eaed25SJason Evans if (pages_boot()) { 1242*b7eaed25SJason Evans return true; 1243*b7eaed25SJason Evans } 1244*b7eaed25SJason Evans if (base_boot(TSDN_NULL)) { 1245*b7eaed25SJason Evans return true; 1246*b7eaed25SJason Evans } 1247*b7eaed25SJason Evans if (extent_boot()) { 1248*b7eaed25SJason Evans return true; 1249*b7eaed25SJason Evans } 1250*b7eaed25SJason Evans if (ctl_boot()) { 1251*b7eaed25SJason Evans return true; 1252*b7eaed25SJason Evans } 1253*b7eaed25SJason Evans if (config_prof) { 1254a4bd5210SJason Evans prof_boot1(); 1255*b7eaed25SJason Evans } 1256bde95144SJason Evans arena_boot(); 1257*b7eaed25SJason Evans if (tcache_boot(TSDN_NULL)) { 1258*b7eaed25SJason Evans return true; 1259*b7eaed25SJason Evans } 1260*b7eaed25SJason Evans if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS, 1261*b7eaed25SJason Evans malloc_mutex_rank_exclusive)) { 1262*b7eaed25SJason Evans return true; 1263*b7eaed25SJason Evans } 1264a4bd5210SJason Evans /* 1265a4bd5210SJason Evans * Create enough scaffolding to allow recursive allocation in 1266a4bd5210SJason Evans * malloc_ncpus(). 1267a4bd5210SJason Evans */ 1268df0d881dSJason Evans narenas_auto = 1; 126982872ac0SJason Evans memset(arenas, 0, sizeof(arena_t *) * narenas_auto); 1270a4bd5210SJason Evans /* 1271a4bd5210SJason Evans * Initialize one arena here. The rest are lazily created in 1272d0e79aa3SJason Evans * arena_choose_hard(). 1273a4bd5210SJason Evans */ 1274*b7eaed25SJason Evans if (arena_init(TSDN_NULL, 0, (extent_hooks_t *)&extent_hooks_default) 1275*b7eaed25SJason Evans == NULL) { 1276*b7eaed25SJason Evans return true; 1277*b7eaed25SJason Evans } 1278*b7eaed25SJason Evans a0 = arena_get(TSDN_NULL, 0, false); 1279d0e79aa3SJason Evans malloc_init_state = malloc_init_a0_initialized; 12801f0a49e8SJason Evans 1281*b7eaed25SJason Evans return false; 1282a4bd5210SJason Evans } 1283a4bd5210SJason Evans 1284d0e79aa3SJason Evans static bool 1285*b7eaed25SJason Evans malloc_init_hard_a0(void) { 1286d0e79aa3SJason Evans bool ret; 1287d0e79aa3SJason Evans 12881f0a49e8SJason Evans malloc_mutex_lock(TSDN_NULL, &init_lock); 1289d0e79aa3SJason Evans ret = malloc_init_hard_a0_locked(); 12901f0a49e8SJason Evans malloc_mutex_unlock(TSDN_NULL, &init_lock); 1291*b7eaed25SJason Evans return ret; 1292a4bd5210SJason Evans } 1293a4bd5210SJason Evans 12941f0a49e8SJason Evans /* Initialize data structures which may trigger recursive allocation. */ 1295df0d881dSJason Evans static bool 1296*b7eaed25SJason Evans malloc_init_hard_recursible(void) { 1297d0e79aa3SJason Evans malloc_init_state = malloc_init_recursible; 1298df0d881dSJason Evans 1299a4bd5210SJason Evans ncpus = malloc_ncpus(); 1300f921d10fSJason Evans 13017fa7f12fSJason Evans #if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \ 13027fa7f12fSJason Evans && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \ 13037fa7f12fSJason Evans !defined(__native_client__)) 1304df0d881dSJason Evans /* LinuxThreads' pthread_atfork() allocates. */ 1305f921d10fSJason Evans if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, 1306f921d10fSJason Evans jemalloc_postfork_child) != 0) { 1307f921d10fSJason Evans malloc_write("<jemalloc>: Error in pthread_atfork()\n"); 1308*b7eaed25SJason Evans if (opt_abort) { 1309f921d10fSJason Evans abort(); 1310*b7eaed25SJason Evans } 1311*b7eaed25SJason Evans return true; 1312f921d10fSJason Evans } 1313f921d10fSJason Evans #endif 1314df0d881dSJason Evans 1315*b7eaed25SJason Evans if (background_thread_boot0()) { 1316*b7eaed25SJason Evans return true; 1317a4bd5210SJason Evans } 1318a4bd5210SJason Evans 1319*b7eaed25SJason Evans return false; 1320*b7eaed25SJason Evans } 1321d0e79aa3SJason Evans 1322*b7eaed25SJason Evans static unsigned 1323*b7eaed25SJason Evans malloc_narenas_default(void) { 1324*b7eaed25SJason Evans assert(ncpus > 0); 1325a4bd5210SJason Evans /* 1326a4bd5210SJason Evans * For SMP systems, create more than one arena per CPU by 1327a4bd5210SJason Evans * default. 1328a4bd5210SJason Evans */ 1329*b7eaed25SJason Evans if (ncpus > 1) { 1330*b7eaed25SJason Evans return ncpus << 2; 1331*b7eaed25SJason Evans } else { 1332*b7eaed25SJason Evans return 1; 1333a4bd5210SJason Evans } 1334*b7eaed25SJason Evans } 1335*b7eaed25SJason Evans 1336*b7eaed25SJason Evans static percpu_arena_mode_t 1337*b7eaed25SJason Evans percpu_arena_as_initialized(percpu_arena_mode_t mode) { 1338*b7eaed25SJason Evans assert(!malloc_initialized()); 1339*b7eaed25SJason Evans assert(mode <= percpu_arena_disabled); 1340*b7eaed25SJason Evans 1341*b7eaed25SJason Evans if (mode != percpu_arena_disabled) { 1342*b7eaed25SJason Evans mode += percpu_arena_mode_enabled_base; 1343*b7eaed25SJason Evans } 1344*b7eaed25SJason Evans 1345*b7eaed25SJason Evans return mode; 1346*b7eaed25SJason Evans } 1347*b7eaed25SJason Evans 1348*b7eaed25SJason Evans static bool 1349*b7eaed25SJason Evans malloc_init_narenas(void) { 1350*b7eaed25SJason Evans assert(ncpus > 0); 1351*b7eaed25SJason Evans 1352*b7eaed25SJason Evans if (opt_percpu_arena != percpu_arena_disabled) { 1353*b7eaed25SJason Evans if (!have_percpu_arena || malloc_getcpu() < 0) { 1354*b7eaed25SJason Evans opt_percpu_arena = percpu_arena_disabled; 1355*b7eaed25SJason Evans malloc_printf("<jemalloc>: perCPU arena getcpu() not " 1356*b7eaed25SJason Evans "available. Setting narenas to %u.\n", opt_narenas ? 1357*b7eaed25SJason Evans opt_narenas : malloc_narenas_default()); 1358*b7eaed25SJason Evans if (opt_abort) { 1359*b7eaed25SJason Evans abort(); 1360*b7eaed25SJason Evans } 1361*b7eaed25SJason Evans } else { 1362*b7eaed25SJason Evans if (ncpus >= MALLOCX_ARENA_LIMIT) { 1363*b7eaed25SJason Evans malloc_printf("<jemalloc>: narenas w/ percpu" 1364*b7eaed25SJason Evans "arena beyond limit (%d)\n", ncpus); 1365*b7eaed25SJason Evans if (opt_abort) { 1366*b7eaed25SJason Evans abort(); 1367*b7eaed25SJason Evans } 1368*b7eaed25SJason Evans return true; 1369*b7eaed25SJason Evans } 1370*b7eaed25SJason Evans /* NB: opt_percpu_arena isn't fully initialized yet. */ 1371*b7eaed25SJason Evans if (percpu_arena_as_initialized(opt_percpu_arena) == 1372*b7eaed25SJason Evans per_phycpu_arena && ncpus % 2 != 0) { 1373*b7eaed25SJason Evans malloc_printf("<jemalloc>: invalid " 1374*b7eaed25SJason Evans "configuration -- per physical CPU arena " 1375*b7eaed25SJason Evans "with odd number (%u) of CPUs (no hyper " 1376*b7eaed25SJason Evans "threading?).\n", ncpus); 1377*b7eaed25SJason Evans if (opt_abort) 1378*b7eaed25SJason Evans abort(); 1379*b7eaed25SJason Evans } 1380*b7eaed25SJason Evans unsigned n = percpu_arena_ind_limit( 1381*b7eaed25SJason Evans percpu_arena_as_initialized(opt_percpu_arena)); 1382*b7eaed25SJason Evans if (opt_narenas < n) { 1383*b7eaed25SJason Evans /* 1384*b7eaed25SJason Evans * If narenas is specified with percpu_arena 1385*b7eaed25SJason Evans * enabled, actual narenas is set as the greater 1386*b7eaed25SJason Evans * of the two. percpu_arena_choose will be free 1387*b7eaed25SJason Evans * to use any of the arenas based on CPU 1388*b7eaed25SJason Evans * id. This is conservative (at a small cost) 1389*b7eaed25SJason Evans * but ensures correctness. 1390*b7eaed25SJason Evans * 1391*b7eaed25SJason Evans * If for some reason the ncpus determined at 1392*b7eaed25SJason Evans * boot is not the actual number (e.g. because 1393*b7eaed25SJason Evans * of affinity setting from numactl), reserving 1394*b7eaed25SJason Evans * narenas this way provides a workaround for 1395*b7eaed25SJason Evans * percpu_arena. 1396*b7eaed25SJason Evans */ 1397*b7eaed25SJason Evans opt_narenas = n; 1398*b7eaed25SJason Evans } 1399*b7eaed25SJason Evans } 1400*b7eaed25SJason Evans } 1401*b7eaed25SJason Evans if (opt_narenas == 0) { 1402*b7eaed25SJason Evans opt_narenas = malloc_narenas_default(); 1403*b7eaed25SJason Evans } 1404*b7eaed25SJason Evans assert(opt_narenas > 0); 1405*b7eaed25SJason Evans 140682872ac0SJason Evans narenas_auto = opt_narenas; 1407a4bd5210SJason Evans /* 1408df0d881dSJason Evans * Limit the number of arenas to the indexing range of MALLOCX_ARENA(). 1409a4bd5210SJason Evans */ 1410*b7eaed25SJason Evans if (narenas_auto >= MALLOCX_ARENA_LIMIT) { 1411*b7eaed25SJason Evans narenas_auto = MALLOCX_ARENA_LIMIT - 1; 1412a4bd5210SJason Evans malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n", 141382872ac0SJason Evans narenas_auto); 1414a4bd5210SJason Evans } 1415df0d881dSJason Evans narenas_total_set(narenas_auto); 1416a4bd5210SJason Evans 1417*b7eaed25SJason Evans return false; 1418*b7eaed25SJason Evans } 1419*b7eaed25SJason Evans 1420*b7eaed25SJason Evans static void 1421*b7eaed25SJason Evans malloc_init_percpu(void) { 1422*b7eaed25SJason Evans opt_percpu_arena = percpu_arena_as_initialized(opt_percpu_arena); 1423*b7eaed25SJason Evans } 1424*b7eaed25SJason Evans 1425*b7eaed25SJason Evans static bool 1426*b7eaed25SJason Evans malloc_init_hard_finish(void) { 1427*b7eaed25SJason Evans if (malloc_mutex_boot()) { 1428*b7eaed25SJason Evans return true; 1429*b7eaed25SJason Evans } 1430a4bd5210SJason Evans 1431d0e79aa3SJason Evans malloc_init_state = malloc_init_initialized; 1432df0d881dSJason Evans malloc_slow_flag_init(); 1433df0d881dSJason Evans 1434*b7eaed25SJason Evans return false; 1435*b7eaed25SJason Evans } 1436*b7eaed25SJason Evans 1437*b7eaed25SJason Evans static void 1438*b7eaed25SJason Evans malloc_init_hard_cleanup(tsdn_t *tsdn, bool reentrancy_set) { 1439*b7eaed25SJason Evans malloc_mutex_assert_owner(tsdn, &init_lock); 1440*b7eaed25SJason Evans malloc_mutex_unlock(tsdn, &init_lock); 1441*b7eaed25SJason Evans if (reentrancy_set) { 1442*b7eaed25SJason Evans assert(!tsdn_null(tsdn)); 1443*b7eaed25SJason Evans tsd_t *tsd = tsdn_tsd(tsdn); 1444*b7eaed25SJason Evans assert(tsd_reentrancy_level_get(tsd) > 0); 1445*b7eaed25SJason Evans post_reentrancy(tsd); 1446*b7eaed25SJason Evans } 1447d0e79aa3SJason Evans } 1448d0e79aa3SJason Evans 1449d0e79aa3SJason Evans static bool 1450*b7eaed25SJason Evans malloc_init_hard(void) { 14511f0a49e8SJason Evans tsd_t *tsd; 1452d0e79aa3SJason Evans 1453536b3538SJason Evans #if defined(_WIN32) && _WIN32_WINNT < 0x0600 1454536b3538SJason Evans _init_init_lock(); 1455536b3538SJason Evans #endif 14561f0a49e8SJason Evans malloc_mutex_lock(TSDN_NULL, &init_lock); 1457*b7eaed25SJason Evans 1458*b7eaed25SJason Evans #define UNLOCK_RETURN(tsdn, ret, reentrancy) \ 1459*b7eaed25SJason Evans malloc_init_hard_cleanup(tsdn, reentrancy); \ 1460*b7eaed25SJason Evans return ret; 1461*b7eaed25SJason Evans 1462d0e79aa3SJason Evans if (!malloc_init_hard_needed()) { 1463*b7eaed25SJason Evans UNLOCK_RETURN(TSDN_NULL, false, false) 1464d0e79aa3SJason Evans } 1465f921d10fSJason Evans 1466d0e79aa3SJason Evans if (malloc_init_state != malloc_init_a0_initialized && 1467d0e79aa3SJason Evans malloc_init_hard_a0_locked()) { 1468*b7eaed25SJason Evans UNLOCK_RETURN(TSDN_NULL, true, false) 1469d0e79aa3SJason Evans } 1470df0d881dSJason Evans 14711f0a49e8SJason Evans malloc_mutex_unlock(TSDN_NULL, &init_lock); 14721f0a49e8SJason Evans /* Recursive allocation relies on functional tsd. */ 14731f0a49e8SJason Evans tsd = malloc_tsd_boot0(); 1474*b7eaed25SJason Evans if (tsd == NULL) { 1475*b7eaed25SJason Evans return true; 1476*b7eaed25SJason Evans } 1477*b7eaed25SJason Evans if (malloc_init_hard_recursible()) { 1478*b7eaed25SJason Evans return true; 1479*b7eaed25SJason Evans } 1480*b7eaed25SJason Evans 14811f0a49e8SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &init_lock); 1482*b7eaed25SJason Evans /* Set reentrancy level to 1 during init. */ 1483*b7eaed25SJason Evans pre_reentrancy(tsd); 1484*b7eaed25SJason Evans /* Initialize narenas before prof_boot2 (for allocation). */ 1485*b7eaed25SJason Evans if (malloc_init_narenas() || background_thread_boot1(tsd_tsdn(tsd))) { 1486*b7eaed25SJason Evans UNLOCK_RETURN(tsd_tsdn(tsd), true, true) 1487*b7eaed25SJason Evans } 1488bde95144SJason Evans if (config_prof && prof_boot2(tsd)) { 1489*b7eaed25SJason Evans UNLOCK_RETURN(tsd_tsdn(tsd), true, true) 1490d0e79aa3SJason Evans } 1491d0e79aa3SJason Evans 1492*b7eaed25SJason Evans malloc_init_percpu(); 1493d0e79aa3SJason Evans 1494*b7eaed25SJason Evans if (malloc_init_hard_finish()) { 1495*b7eaed25SJason Evans UNLOCK_RETURN(tsd_tsdn(tsd), true, true) 1496*b7eaed25SJason Evans } 1497*b7eaed25SJason Evans post_reentrancy(tsd); 14981f0a49e8SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); 1499*b7eaed25SJason Evans 1500d0e79aa3SJason Evans malloc_tsd_boot1(); 1501*b7eaed25SJason Evans /* Update TSD after tsd_boot1. */ 1502*b7eaed25SJason Evans tsd = tsd_fetch(); 1503*b7eaed25SJason Evans if (opt_background_thread) { 1504*b7eaed25SJason Evans assert(have_background_thread); 1505*b7eaed25SJason Evans /* 1506*b7eaed25SJason Evans * Need to finish init & unlock first before creating background 1507*b7eaed25SJason Evans * threads (pthread_create depends on malloc). 1508*b7eaed25SJason Evans */ 1509*b7eaed25SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &background_thread_lock); 1510*b7eaed25SJason Evans bool err = background_thread_create(tsd, 0); 1511*b7eaed25SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &background_thread_lock); 1512*b7eaed25SJason Evans if (err) { 1513*b7eaed25SJason Evans return true; 1514*b7eaed25SJason Evans } 1515*b7eaed25SJason Evans } 1516*b7eaed25SJason Evans #undef UNLOCK_RETURN 1517*b7eaed25SJason Evans return false; 1518a4bd5210SJason Evans } 1519a4bd5210SJason Evans 1520a4bd5210SJason Evans /* 1521a4bd5210SJason Evans * End initialization functions. 1522a4bd5210SJason Evans */ 1523a4bd5210SJason Evans /******************************************************************************/ 1524a4bd5210SJason Evans /* 1525*b7eaed25SJason Evans * Begin allocation-path internal functions and data structures. 1526a4bd5210SJason Evans */ 1527a4bd5210SJason Evans 1528*b7eaed25SJason Evans /* 1529*b7eaed25SJason Evans * Settings determined by the documented behavior of the allocation functions. 1530*b7eaed25SJason Evans */ 1531*b7eaed25SJason Evans typedef struct static_opts_s static_opts_t; 1532*b7eaed25SJason Evans struct static_opts_s { 1533*b7eaed25SJason Evans /* Whether or not allocation size may overflow. */ 1534*b7eaed25SJason Evans bool may_overflow; 1535*b7eaed25SJason Evans /* Whether or not allocations of size 0 should be treated as size 1. */ 1536*b7eaed25SJason Evans bool bump_empty_alloc; 1537*b7eaed25SJason Evans /* 1538*b7eaed25SJason Evans * Whether to assert that allocations are not of size 0 (after any 1539*b7eaed25SJason Evans * bumping). 1540*b7eaed25SJason Evans */ 1541*b7eaed25SJason Evans bool assert_nonempty_alloc; 1542f921d10fSJason Evans 1543*b7eaed25SJason Evans /* 1544*b7eaed25SJason Evans * Whether or not to modify the 'result' argument to malloc in case of 1545*b7eaed25SJason Evans * error. 1546*b7eaed25SJason Evans */ 1547*b7eaed25SJason Evans bool null_out_result_on_error; 1548*b7eaed25SJason Evans /* Whether to set errno when we encounter an error condition. */ 1549*b7eaed25SJason Evans bool set_errno_on_error; 1550f921d10fSJason Evans 1551*b7eaed25SJason Evans /* 1552*b7eaed25SJason Evans * The minimum valid alignment for functions requesting aligned storage. 1553*b7eaed25SJason Evans */ 1554*b7eaed25SJason Evans size_t min_alignment; 1555f921d10fSJason Evans 1556*b7eaed25SJason Evans /* The error string to use if we oom. */ 1557*b7eaed25SJason Evans const char *oom_string; 1558*b7eaed25SJason Evans /* The error string to use if the passed-in alignment is invalid. */ 1559*b7eaed25SJason Evans const char *invalid_alignment_string; 1560f921d10fSJason Evans 1561*b7eaed25SJason Evans /* 1562*b7eaed25SJason Evans * False if we're configured to skip some time-consuming operations. 1563*b7eaed25SJason Evans * 1564*b7eaed25SJason Evans * This isn't really a malloc "behavior", but it acts as a useful 1565*b7eaed25SJason Evans * summary of several other static (or at least, static after program 1566*b7eaed25SJason Evans * initialization) options. 1567*b7eaed25SJason Evans */ 1568*b7eaed25SJason Evans bool slow; 1569*b7eaed25SJason Evans }; 1570f921d10fSJason Evans 1571*b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE void 1572*b7eaed25SJason Evans static_opts_init(static_opts_t *static_opts) { 1573*b7eaed25SJason Evans static_opts->may_overflow = false; 1574*b7eaed25SJason Evans static_opts->bump_empty_alloc = false; 1575*b7eaed25SJason Evans static_opts->assert_nonempty_alloc = false; 1576*b7eaed25SJason Evans static_opts->null_out_result_on_error = false; 1577*b7eaed25SJason Evans static_opts->set_errno_on_error = false; 1578*b7eaed25SJason Evans static_opts->min_alignment = 0; 1579*b7eaed25SJason Evans static_opts->oom_string = ""; 1580*b7eaed25SJason Evans static_opts->invalid_alignment_string = ""; 1581*b7eaed25SJason Evans static_opts->slow = false; 1582f921d10fSJason Evans } 1583f921d10fSJason Evans 15841f0a49e8SJason Evans /* 1585*b7eaed25SJason Evans * These correspond to the macros in jemalloc/jemalloc_macros.h. Broadly, we 1586*b7eaed25SJason Evans * should have one constant here per magic value there. Note however that the 1587*b7eaed25SJason Evans * representations need not be related. 15881f0a49e8SJason Evans */ 1589*b7eaed25SJason Evans #define TCACHE_IND_NONE ((unsigned)-1) 1590*b7eaed25SJason Evans #define TCACHE_IND_AUTOMATIC ((unsigned)-2) 1591*b7eaed25SJason Evans #define ARENA_IND_AUTOMATIC ((unsigned)-1) 1592f921d10fSJason Evans 1593*b7eaed25SJason Evans typedef struct dynamic_opts_s dynamic_opts_t; 1594*b7eaed25SJason Evans struct dynamic_opts_s { 1595*b7eaed25SJason Evans void **result; 1596*b7eaed25SJason Evans size_t num_items; 1597*b7eaed25SJason Evans size_t item_size; 1598*b7eaed25SJason Evans size_t alignment; 1599*b7eaed25SJason Evans bool zero; 1600*b7eaed25SJason Evans unsigned tcache_ind; 1601*b7eaed25SJason Evans unsigned arena_ind; 1602*b7eaed25SJason Evans }; 1603*b7eaed25SJason Evans 1604*b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE void 1605*b7eaed25SJason Evans dynamic_opts_init(dynamic_opts_t *dynamic_opts) { 1606*b7eaed25SJason Evans dynamic_opts->result = NULL; 1607*b7eaed25SJason Evans dynamic_opts->num_items = 0; 1608*b7eaed25SJason Evans dynamic_opts->item_size = 0; 1609*b7eaed25SJason Evans dynamic_opts->alignment = 0; 1610*b7eaed25SJason Evans dynamic_opts->zero = false; 1611*b7eaed25SJason Evans dynamic_opts->tcache_ind = TCACHE_IND_AUTOMATIC; 1612*b7eaed25SJason Evans dynamic_opts->arena_ind = ARENA_IND_AUTOMATIC; 16131f0a49e8SJason Evans } 16141f0a49e8SJason Evans 1615*b7eaed25SJason Evans /* ind is ignored if dopts->alignment > 0. */ 1616*b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE void * 1617*b7eaed25SJason Evans imalloc_no_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, 1618*b7eaed25SJason Evans size_t size, size_t usize, szind_t ind) { 1619*b7eaed25SJason Evans tcache_t *tcache; 1620*b7eaed25SJason Evans arena_t *arena; 16211f0a49e8SJason Evans 1622*b7eaed25SJason Evans /* Fill in the tcache. */ 1623*b7eaed25SJason Evans if (dopts->tcache_ind == TCACHE_IND_AUTOMATIC) { 1624*b7eaed25SJason Evans if (likely(!sopts->slow)) { 1625*b7eaed25SJason Evans /* Getting tcache ptr unconditionally. */ 1626*b7eaed25SJason Evans tcache = tsd_tcachep_get(tsd); 1627*b7eaed25SJason Evans assert(tcache == tcache_get(tsd)); 1628*b7eaed25SJason Evans } else { 1629*b7eaed25SJason Evans tcache = tcache_get(tsd); 1630*b7eaed25SJason Evans } 1631*b7eaed25SJason Evans } else if (dopts->tcache_ind == TCACHE_IND_NONE) { 1632*b7eaed25SJason Evans tcache = NULL; 1633*b7eaed25SJason Evans } else { 1634*b7eaed25SJason Evans tcache = tcaches_get(tsd, dopts->tcache_ind); 1635d0e79aa3SJason Evans } 1636d0e79aa3SJason Evans 1637*b7eaed25SJason Evans /* Fill in the arena. */ 1638*b7eaed25SJason Evans if (dopts->arena_ind == ARENA_IND_AUTOMATIC) { 1639*b7eaed25SJason Evans /* 1640*b7eaed25SJason Evans * In case of automatic arena management, we defer arena 1641*b7eaed25SJason Evans * computation until as late as we can, hoping to fill the 1642*b7eaed25SJason Evans * allocation out of the tcache. 1643*b7eaed25SJason Evans */ 1644*b7eaed25SJason Evans arena = NULL; 1645*b7eaed25SJason Evans } else { 1646*b7eaed25SJason Evans arena = arena_get(tsd_tsdn(tsd), dopts->arena_ind, true); 1647df0d881dSJason Evans } 1648df0d881dSJason Evans 1649*b7eaed25SJason Evans if (unlikely(dopts->alignment != 0)) { 1650*b7eaed25SJason Evans return ipalloct(tsd_tsdn(tsd), usize, dopts->alignment, 1651*b7eaed25SJason Evans dopts->zero, tcache, arena); 1652*b7eaed25SJason Evans } 16531f0a49e8SJason Evans 1654*b7eaed25SJason Evans return iallocztm(tsd_tsdn(tsd), size, ind, dopts->zero, tcache, false, 1655*b7eaed25SJason Evans arena, sopts->slow); 1656*b7eaed25SJason Evans } 16571f0a49e8SJason Evans 1658*b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE void * 1659*b7eaed25SJason Evans imalloc_sample(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd, 1660*b7eaed25SJason Evans size_t usize, szind_t ind) { 1661*b7eaed25SJason Evans void *ret; 1662*b7eaed25SJason Evans 1663*b7eaed25SJason Evans /* 1664*b7eaed25SJason Evans * For small allocations, sampling bumps the usize. If so, we allocate 1665*b7eaed25SJason Evans * from the ind_large bucket. 1666*b7eaed25SJason Evans */ 1667*b7eaed25SJason Evans szind_t ind_large; 1668*b7eaed25SJason Evans size_t bumped_usize = usize; 1669*b7eaed25SJason Evans 1670*b7eaed25SJason Evans if (usize <= SMALL_MAXCLASS) { 1671*b7eaed25SJason Evans assert(((dopts->alignment == 0) ? sz_s2u(LARGE_MINCLASS) : 1672*b7eaed25SJason Evans sz_sa2u(LARGE_MINCLASS, dopts->alignment)) 1673*b7eaed25SJason Evans == LARGE_MINCLASS); 1674*b7eaed25SJason Evans ind_large = sz_size2index(LARGE_MINCLASS); 1675*b7eaed25SJason Evans bumped_usize = sz_s2u(LARGE_MINCLASS); 1676*b7eaed25SJason Evans ret = imalloc_no_sample(sopts, dopts, tsd, bumped_usize, 1677*b7eaed25SJason Evans bumped_usize, ind_large); 1678df0d881dSJason Evans if (unlikely(ret == NULL)) { 1679*b7eaed25SJason Evans return NULL; 1680*b7eaed25SJason Evans } 1681*b7eaed25SJason Evans arena_prof_promote(tsd_tsdn(tsd), ret, usize); 1682*b7eaed25SJason Evans } else { 1683*b7eaed25SJason Evans ret = imalloc_no_sample(sopts, dopts, tsd, usize, usize, ind); 1684*b7eaed25SJason Evans } 1685*b7eaed25SJason Evans 1686*b7eaed25SJason Evans return ret; 1687*b7eaed25SJason Evans } 1688*b7eaed25SJason Evans 1689*b7eaed25SJason Evans /* 1690*b7eaed25SJason Evans * Returns true if the allocation will overflow, and false otherwise. Sets 1691*b7eaed25SJason Evans * *size to the product either way. 1692*b7eaed25SJason Evans */ 1693*b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE bool 1694*b7eaed25SJason Evans compute_size_with_overflow(bool may_overflow, dynamic_opts_t *dopts, 1695*b7eaed25SJason Evans size_t *size) { 1696*b7eaed25SJason Evans /* 1697*b7eaed25SJason Evans * This function is just num_items * item_size, except that we may have 1698*b7eaed25SJason Evans * to check for overflow. 1699*b7eaed25SJason Evans */ 1700*b7eaed25SJason Evans 1701*b7eaed25SJason Evans if (!may_overflow) { 1702*b7eaed25SJason Evans assert(dopts->num_items == 1); 1703*b7eaed25SJason Evans *size = dopts->item_size; 1704*b7eaed25SJason Evans return false; 1705*b7eaed25SJason Evans } 1706*b7eaed25SJason Evans 1707*b7eaed25SJason Evans /* A size_t with its high-half bits all set to 1. */ 1708*b7eaed25SJason Evans const static size_t high_bits = SIZE_T_MAX << (sizeof(size_t) * 8 / 2); 1709*b7eaed25SJason Evans 1710*b7eaed25SJason Evans *size = dopts->item_size * dopts->num_items; 1711*b7eaed25SJason Evans 1712*b7eaed25SJason Evans if (unlikely(*size == 0)) { 1713*b7eaed25SJason Evans return (dopts->num_items != 0 && dopts->item_size != 0); 1714*b7eaed25SJason Evans } 1715*b7eaed25SJason Evans 1716*b7eaed25SJason Evans /* 1717*b7eaed25SJason Evans * We got a non-zero size, but we don't know if we overflowed to get 1718*b7eaed25SJason Evans * there. To avoid having to do a divide, we'll be clever and note that 1719*b7eaed25SJason Evans * if both A and B can be represented in N/2 bits, then their product 1720*b7eaed25SJason Evans * can be represented in N bits (without the possibility of overflow). 1721*b7eaed25SJason Evans */ 1722*b7eaed25SJason Evans if (likely((high_bits & (dopts->num_items | dopts->item_size)) == 0)) { 1723*b7eaed25SJason Evans return false; 1724*b7eaed25SJason Evans } 1725*b7eaed25SJason Evans if (likely(*size / dopts->item_size == dopts->num_items)) { 1726*b7eaed25SJason Evans return false; 1727*b7eaed25SJason Evans } 1728*b7eaed25SJason Evans return true; 1729*b7eaed25SJason Evans } 1730*b7eaed25SJason Evans 1731*b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE int 1732*b7eaed25SJason Evans imalloc_body(static_opts_t *sopts, dynamic_opts_t *dopts, tsd_t *tsd) { 1733*b7eaed25SJason Evans /* Where the actual allocated memory will live. */ 1734*b7eaed25SJason Evans void *allocation = NULL; 1735*b7eaed25SJason Evans /* Filled in by compute_size_with_overflow below. */ 1736*b7eaed25SJason Evans size_t size = 0; 1737*b7eaed25SJason Evans /* 1738*b7eaed25SJason Evans * For unaligned allocations, we need only ind. For aligned 1739*b7eaed25SJason Evans * allocations, or in case of stats or profiling we need usize. 1740*b7eaed25SJason Evans * 1741*b7eaed25SJason Evans * These are actually dead stores, in that their values are reset before 1742*b7eaed25SJason Evans * any branch on their value is taken. Sometimes though, it's 1743*b7eaed25SJason Evans * convenient to pass them as arguments before this point. To avoid 1744*b7eaed25SJason Evans * undefined behavior then, we initialize them with dummy stores. 1745*b7eaed25SJason Evans */ 1746*b7eaed25SJason Evans szind_t ind = 0; 1747*b7eaed25SJason Evans size_t usize = 0; 1748*b7eaed25SJason Evans 1749*b7eaed25SJason Evans /* Reentrancy is only checked on slow path. */ 1750*b7eaed25SJason Evans int8_t reentrancy_level; 1751*b7eaed25SJason Evans 1752*b7eaed25SJason Evans /* Compute the amount of memory the user wants. */ 1753*b7eaed25SJason Evans if (unlikely(compute_size_with_overflow(sopts->may_overflow, dopts, 1754*b7eaed25SJason Evans &size))) { 1755*b7eaed25SJason Evans goto label_oom; 1756*b7eaed25SJason Evans } 1757*b7eaed25SJason Evans 1758*b7eaed25SJason Evans /* Validate the user input. */ 1759*b7eaed25SJason Evans if (sopts->bump_empty_alloc) { 1760*b7eaed25SJason Evans if (unlikely(size == 0)) { 1761*b7eaed25SJason Evans size = 1; 1762*b7eaed25SJason Evans } 1763*b7eaed25SJason Evans } 1764*b7eaed25SJason Evans 1765*b7eaed25SJason Evans if (sopts->assert_nonempty_alloc) { 1766*b7eaed25SJason Evans assert (size != 0); 1767*b7eaed25SJason Evans } 1768*b7eaed25SJason Evans 1769*b7eaed25SJason Evans if (unlikely(dopts->alignment < sopts->min_alignment 1770*b7eaed25SJason Evans || (dopts->alignment & (dopts->alignment - 1)) != 0)) { 1771*b7eaed25SJason Evans goto label_invalid_alignment; 1772*b7eaed25SJason Evans } 1773*b7eaed25SJason Evans 1774*b7eaed25SJason Evans /* This is the beginning of the "core" algorithm. */ 1775*b7eaed25SJason Evans 1776*b7eaed25SJason Evans if (dopts->alignment == 0) { 1777*b7eaed25SJason Evans ind = sz_size2index(size); 1778*b7eaed25SJason Evans if (unlikely(ind >= NSIZES)) { 1779*b7eaed25SJason Evans goto label_oom; 1780*b7eaed25SJason Evans } 1781*b7eaed25SJason Evans if (config_stats || (config_prof && opt_prof)) { 1782*b7eaed25SJason Evans usize = sz_index2size(ind); 1783*b7eaed25SJason Evans assert(usize > 0 && usize <= LARGE_MAXCLASS); 1784*b7eaed25SJason Evans } 1785*b7eaed25SJason Evans } else { 1786*b7eaed25SJason Evans usize = sz_sa2u(size, dopts->alignment); 1787*b7eaed25SJason Evans if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { 1788*b7eaed25SJason Evans goto label_oom; 1789*b7eaed25SJason Evans } 1790*b7eaed25SJason Evans } 1791*b7eaed25SJason Evans 1792*b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 1793*b7eaed25SJason Evans 1794*b7eaed25SJason Evans /* 1795*b7eaed25SJason Evans * If we need to handle reentrancy, we can do it out of a 1796*b7eaed25SJason Evans * known-initialized arena (i.e. arena 0). 1797*b7eaed25SJason Evans */ 1798*b7eaed25SJason Evans reentrancy_level = tsd_reentrancy_level_get(tsd); 1799*b7eaed25SJason Evans if (sopts->slow && unlikely(reentrancy_level > 0)) { 1800*b7eaed25SJason Evans /* 1801*b7eaed25SJason Evans * We should never specify particular arenas or tcaches from 1802*b7eaed25SJason Evans * within our internal allocations. 1803*b7eaed25SJason Evans */ 1804*b7eaed25SJason Evans assert(dopts->tcache_ind == TCACHE_IND_AUTOMATIC || 1805*b7eaed25SJason Evans dopts->tcache_ind == TCACHE_IND_NONE); 1806*b7eaed25SJason Evans assert(dopts->arena_ind = ARENA_IND_AUTOMATIC); 1807*b7eaed25SJason Evans dopts->tcache_ind = TCACHE_IND_NONE; 1808*b7eaed25SJason Evans /* We know that arena 0 has already been initialized. */ 1809*b7eaed25SJason Evans dopts->arena_ind = 0; 1810*b7eaed25SJason Evans } 1811*b7eaed25SJason Evans 1812*b7eaed25SJason Evans /* If profiling is on, get our profiling context. */ 1813*b7eaed25SJason Evans if (config_prof && opt_prof) { 1814*b7eaed25SJason Evans /* 1815*b7eaed25SJason Evans * Note that if we're going down this path, usize must have been 1816*b7eaed25SJason Evans * initialized in the previous if statement. 1817*b7eaed25SJason Evans */ 1818*b7eaed25SJason Evans prof_tctx_t *tctx = prof_alloc_prep( 1819*b7eaed25SJason Evans tsd, usize, prof_active_get_unlocked(), true); 1820*b7eaed25SJason Evans 1821*b7eaed25SJason Evans alloc_ctx_t alloc_ctx; 1822*b7eaed25SJason Evans if (likely((uintptr_t)tctx == (uintptr_t)1U)) { 1823*b7eaed25SJason Evans alloc_ctx.slab = (usize <= SMALL_MAXCLASS); 1824*b7eaed25SJason Evans allocation = imalloc_no_sample( 1825*b7eaed25SJason Evans sopts, dopts, tsd, usize, usize, ind); 1826*b7eaed25SJason Evans } else if ((uintptr_t)tctx > (uintptr_t)1U) { 1827*b7eaed25SJason Evans /* 1828*b7eaed25SJason Evans * Note that ind might still be 0 here. This is fine; 1829*b7eaed25SJason Evans * imalloc_sample ignores ind if dopts->alignment > 0. 1830*b7eaed25SJason Evans */ 1831*b7eaed25SJason Evans allocation = imalloc_sample( 1832*b7eaed25SJason Evans sopts, dopts, tsd, usize, ind); 1833*b7eaed25SJason Evans alloc_ctx.slab = false; 1834*b7eaed25SJason Evans } else { 1835*b7eaed25SJason Evans allocation = NULL; 1836*b7eaed25SJason Evans } 1837*b7eaed25SJason Evans 1838*b7eaed25SJason Evans if (unlikely(allocation == NULL)) { 1839*b7eaed25SJason Evans prof_alloc_rollback(tsd, tctx, true); 1840*b7eaed25SJason Evans goto label_oom; 1841*b7eaed25SJason Evans } 1842*b7eaed25SJason Evans prof_malloc(tsd_tsdn(tsd), allocation, usize, &alloc_ctx, tctx); 1843*b7eaed25SJason Evans } else { 1844*b7eaed25SJason Evans /* 1845*b7eaed25SJason Evans * If dopts->alignment > 0, then ind is still 0, but usize was 1846*b7eaed25SJason Evans * computed in the previous if statement. Down the positive 1847*b7eaed25SJason Evans * alignment path, imalloc_no_sample ignores ind and size 1848*b7eaed25SJason Evans * (relying only on usize). 1849*b7eaed25SJason Evans */ 1850*b7eaed25SJason Evans allocation = imalloc_no_sample(sopts, dopts, tsd, size, usize, 1851*b7eaed25SJason Evans ind); 1852*b7eaed25SJason Evans if (unlikely(allocation == NULL)) { 1853*b7eaed25SJason Evans goto label_oom; 1854*b7eaed25SJason Evans } 1855*b7eaed25SJason Evans } 1856*b7eaed25SJason Evans 1857*b7eaed25SJason Evans /* 1858*b7eaed25SJason Evans * Allocation has been done at this point. We still have some 1859*b7eaed25SJason Evans * post-allocation work to do though. 1860*b7eaed25SJason Evans */ 1861*b7eaed25SJason Evans assert(dopts->alignment == 0 1862*b7eaed25SJason Evans || ((uintptr_t)allocation & (dopts->alignment - 1)) == ZU(0)); 1863*b7eaed25SJason Evans 1864*b7eaed25SJason Evans if (config_stats) { 1865*b7eaed25SJason Evans assert(usize == isalloc(tsd_tsdn(tsd), allocation)); 1866*b7eaed25SJason Evans *tsd_thread_allocatedp_get(tsd) += usize; 1867*b7eaed25SJason Evans } 1868*b7eaed25SJason Evans 1869*b7eaed25SJason Evans if (sopts->slow) { 1870*b7eaed25SJason Evans UTRACE(0, size, allocation); 1871*b7eaed25SJason Evans } 1872*b7eaed25SJason Evans 1873*b7eaed25SJason Evans /* Success! */ 1874*b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 1875*b7eaed25SJason Evans *dopts->result = allocation; 1876*b7eaed25SJason Evans return 0; 1877*b7eaed25SJason Evans 1878*b7eaed25SJason Evans label_oom: 1879*b7eaed25SJason Evans if (unlikely(sopts->slow) && config_xmalloc && unlikely(opt_xmalloc)) { 1880*b7eaed25SJason Evans malloc_write(sopts->oom_string); 1881df0d881dSJason Evans abort(); 1882df0d881dSJason Evans } 1883*b7eaed25SJason Evans 1884*b7eaed25SJason Evans if (sopts->slow) { 1885*b7eaed25SJason Evans UTRACE(NULL, size, NULL); 1886*b7eaed25SJason Evans } 1887*b7eaed25SJason Evans 1888*b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 1889*b7eaed25SJason Evans 1890*b7eaed25SJason Evans if (sopts->set_errno_on_error) { 1891df0d881dSJason Evans set_errno(ENOMEM); 1892df0d881dSJason Evans } 1893*b7eaed25SJason Evans 1894*b7eaed25SJason Evans if (sopts->null_out_result_on_error) { 1895*b7eaed25SJason Evans *dopts->result = NULL; 1896df0d881dSJason Evans } 1897*b7eaed25SJason Evans 1898*b7eaed25SJason Evans return ENOMEM; 1899*b7eaed25SJason Evans 1900*b7eaed25SJason Evans /* 1901*b7eaed25SJason Evans * This label is only jumped to by one goto; we move it out of line 1902*b7eaed25SJason Evans * anyways to avoid obscuring the non-error paths, and for symmetry with 1903*b7eaed25SJason Evans * the oom case. 1904*b7eaed25SJason Evans */ 1905*b7eaed25SJason Evans label_invalid_alignment: 1906*b7eaed25SJason Evans if (config_xmalloc && unlikely(opt_xmalloc)) { 1907*b7eaed25SJason Evans malloc_write(sopts->invalid_alignment_string); 1908*b7eaed25SJason Evans abort(); 1909d0e79aa3SJason Evans } 1910d0e79aa3SJason Evans 1911*b7eaed25SJason Evans if (sopts->set_errno_on_error) { 1912*b7eaed25SJason Evans set_errno(EINVAL); 1913*b7eaed25SJason Evans } 1914*b7eaed25SJason Evans 1915*b7eaed25SJason Evans if (sopts->slow) { 1916*b7eaed25SJason Evans UTRACE(NULL, size, NULL); 1917*b7eaed25SJason Evans } 1918*b7eaed25SJason Evans 1919*b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 1920*b7eaed25SJason Evans 1921*b7eaed25SJason Evans if (sopts->null_out_result_on_error) { 1922*b7eaed25SJason Evans *dopts->result = NULL; 1923*b7eaed25SJason Evans } 1924*b7eaed25SJason Evans 1925*b7eaed25SJason Evans return EINVAL; 1926*b7eaed25SJason Evans } 1927*b7eaed25SJason Evans 1928*b7eaed25SJason Evans /* Returns the errno-style error code of the allocation. */ 1929*b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE int 1930*b7eaed25SJason Evans imalloc(static_opts_t *sopts, dynamic_opts_t *dopts) { 1931*b7eaed25SJason Evans if (unlikely(!malloc_initialized()) && unlikely(malloc_init())) { 1932*b7eaed25SJason Evans if (config_xmalloc && unlikely(opt_xmalloc)) { 1933*b7eaed25SJason Evans malloc_write(sopts->oom_string); 1934*b7eaed25SJason Evans abort(); 1935*b7eaed25SJason Evans } 1936*b7eaed25SJason Evans UTRACE(NULL, dopts->num_items * dopts->item_size, NULL); 1937*b7eaed25SJason Evans set_errno(ENOMEM); 1938*b7eaed25SJason Evans *dopts->result = NULL; 1939*b7eaed25SJason Evans 1940*b7eaed25SJason Evans return ENOMEM; 1941*b7eaed25SJason Evans } 1942*b7eaed25SJason Evans 1943*b7eaed25SJason Evans /* We always need the tsd. Let's grab it right away. */ 1944*b7eaed25SJason Evans tsd_t *tsd = tsd_fetch(); 1945*b7eaed25SJason Evans assert(tsd); 1946*b7eaed25SJason Evans if (likely(tsd_fast(tsd))) { 1947*b7eaed25SJason Evans /* Fast and common path. */ 1948*b7eaed25SJason Evans tsd_assert_fast(tsd); 1949*b7eaed25SJason Evans sopts->slow = false; 1950*b7eaed25SJason Evans return imalloc_body(sopts, dopts, tsd); 1951*b7eaed25SJason Evans } else { 1952*b7eaed25SJason Evans sopts->slow = true; 1953*b7eaed25SJason Evans return imalloc_body(sopts, dopts, tsd); 1954*b7eaed25SJason Evans } 1955*b7eaed25SJason Evans } 1956*b7eaed25SJason Evans /******************************************************************************/ 1957*b7eaed25SJason Evans /* 1958*b7eaed25SJason Evans * Begin malloc(3)-compatible functions. 1959*b7eaed25SJason Evans */ 1960*b7eaed25SJason Evans 1961d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 1962d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 1963d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) 1964*b7eaed25SJason Evans je_malloc(size_t size) { 1965a4bd5210SJason Evans void *ret; 1966*b7eaed25SJason Evans static_opts_t sopts; 1967*b7eaed25SJason Evans dynamic_opts_t dopts; 1968a4bd5210SJason Evans 1969*b7eaed25SJason Evans static_opts_init(&sopts); 1970*b7eaed25SJason Evans dynamic_opts_init(&dopts); 1971a4bd5210SJason Evans 1972*b7eaed25SJason Evans sopts.bump_empty_alloc = true; 1973*b7eaed25SJason Evans sopts.null_out_result_on_error = true; 1974*b7eaed25SJason Evans sopts.set_errno_on_error = true; 1975*b7eaed25SJason Evans sopts.oom_string = "<jemalloc>: Error in malloc(): out of memory\n"; 1976df0d881dSJason Evans 1977*b7eaed25SJason Evans dopts.result = &ret; 1978*b7eaed25SJason Evans dopts.num_items = 1; 1979*b7eaed25SJason Evans dopts.item_size = size; 1980a4bd5210SJason Evans 1981*b7eaed25SJason Evans imalloc(&sopts, &dopts); 1982f921d10fSJason Evans 1983*b7eaed25SJason Evans return ret; 1984a4bd5210SJason Evans } 1985a4bd5210SJason Evans 1986d0e79aa3SJason Evans JEMALLOC_EXPORT int JEMALLOC_NOTHROW 1987d0e79aa3SJason Evans JEMALLOC_ATTR(nonnull(1)) 1988*b7eaed25SJason Evans je_posix_memalign(void **memptr, size_t alignment, size_t size) { 19891f0a49e8SJason Evans int ret; 1990*b7eaed25SJason Evans static_opts_t sopts; 1991*b7eaed25SJason Evans dynamic_opts_t dopts; 19921f0a49e8SJason Evans 1993*b7eaed25SJason Evans static_opts_init(&sopts); 1994*b7eaed25SJason Evans dynamic_opts_init(&dopts); 19951f0a49e8SJason Evans 1996*b7eaed25SJason Evans sopts.bump_empty_alloc = true; 1997*b7eaed25SJason Evans sopts.min_alignment = sizeof(void *); 1998*b7eaed25SJason Evans sopts.oom_string = 1999*b7eaed25SJason Evans "<jemalloc>: Error allocating aligned memory: out of memory\n"; 2000*b7eaed25SJason Evans sopts.invalid_alignment_string = 2001*b7eaed25SJason Evans "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; 2002*b7eaed25SJason Evans 2003*b7eaed25SJason Evans dopts.result = memptr; 2004*b7eaed25SJason Evans dopts.num_items = 1; 2005*b7eaed25SJason Evans dopts.item_size = size; 2006*b7eaed25SJason Evans dopts.alignment = alignment; 2007*b7eaed25SJason Evans 2008*b7eaed25SJason Evans ret = imalloc(&sopts, &dopts); 2009*b7eaed25SJason Evans return ret; 2010a4bd5210SJason Evans } 2011a4bd5210SJason Evans 2012d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2013d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 2014d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2) 2015*b7eaed25SJason Evans je_aligned_alloc(size_t alignment, size_t size) { 2016a4bd5210SJason Evans void *ret; 2017a4bd5210SJason Evans 2018*b7eaed25SJason Evans static_opts_t sopts; 2019*b7eaed25SJason Evans dynamic_opts_t dopts; 20201f0a49e8SJason Evans 2021*b7eaed25SJason Evans static_opts_init(&sopts); 2022*b7eaed25SJason Evans dynamic_opts_init(&dopts); 2023*b7eaed25SJason Evans 2024*b7eaed25SJason Evans sopts.bump_empty_alloc = true; 2025*b7eaed25SJason Evans sopts.null_out_result_on_error = true; 2026*b7eaed25SJason Evans sopts.set_errno_on_error = true; 2027*b7eaed25SJason Evans sopts.min_alignment = 1; 2028*b7eaed25SJason Evans sopts.oom_string = 2029*b7eaed25SJason Evans "<jemalloc>: Error allocating aligned memory: out of memory\n"; 2030*b7eaed25SJason Evans sopts.invalid_alignment_string = 2031*b7eaed25SJason Evans "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; 2032*b7eaed25SJason Evans 2033*b7eaed25SJason Evans dopts.result = &ret; 2034*b7eaed25SJason Evans dopts.num_items = 1; 2035*b7eaed25SJason Evans dopts.item_size = size; 2036*b7eaed25SJason Evans dopts.alignment = alignment; 2037*b7eaed25SJason Evans 2038*b7eaed25SJason Evans imalloc(&sopts, &dopts); 2039*b7eaed25SJason Evans return ret; 2040a4bd5210SJason Evans } 2041a4bd5210SJason Evans 2042d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2043d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 2044d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) 2045*b7eaed25SJason Evans je_calloc(size_t num, size_t size) { 2046a4bd5210SJason Evans void *ret; 2047*b7eaed25SJason Evans static_opts_t sopts; 2048*b7eaed25SJason Evans dynamic_opts_t dopts; 2049a4bd5210SJason Evans 2050*b7eaed25SJason Evans static_opts_init(&sopts); 2051*b7eaed25SJason Evans dynamic_opts_init(&dopts); 2052a4bd5210SJason Evans 2053*b7eaed25SJason Evans sopts.may_overflow = true; 2054*b7eaed25SJason Evans sopts.bump_empty_alloc = true; 2055*b7eaed25SJason Evans sopts.null_out_result_on_error = true; 2056*b7eaed25SJason Evans sopts.set_errno_on_error = true; 2057*b7eaed25SJason Evans sopts.oom_string = "<jemalloc>: Error in calloc(): out of memory\n"; 2058a4bd5210SJason Evans 2059*b7eaed25SJason Evans dopts.result = &ret; 2060*b7eaed25SJason Evans dopts.num_items = num; 2061*b7eaed25SJason Evans dopts.item_size = size; 2062*b7eaed25SJason Evans dopts.zero = true; 2063*b7eaed25SJason Evans 2064*b7eaed25SJason Evans imalloc(&sopts, &dopts); 2065*b7eaed25SJason Evans 2066*b7eaed25SJason Evans return ret; 2067a4bd5210SJason Evans } 2068a4bd5210SJason Evans 2069f921d10fSJason Evans static void * 2070536b3538SJason Evans irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, 2071*b7eaed25SJason Evans prof_tctx_t *tctx) { 2072f921d10fSJason Evans void *p; 2073a4bd5210SJason Evans 2074*b7eaed25SJason Evans if (tctx == NULL) { 2075*b7eaed25SJason Evans return NULL; 2076*b7eaed25SJason Evans } 2077d0e79aa3SJason Evans if (usize <= SMALL_MAXCLASS) { 2078536b3538SJason Evans p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false); 2079*b7eaed25SJason Evans if (p == NULL) { 2080*b7eaed25SJason Evans return NULL; 2081*b7eaed25SJason Evans } 2082*b7eaed25SJason Evans arena_prof_promote(tsd_tsdn(tsd), p, usize); 2083*b7eaed25SJason Evans } else { 2084536b3538SJason Evans p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); 2085a4bd5210SJason Evans } 2086a4bd5210SJason Evans 2087*b7eaed25SJason Evans return p; 2088*b7eaed25SJason Evans } 2089*b7eaed25SJason Evans 2090*b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE void * 2091*b7eaed25SJason Evans irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, 2092*b7eaed25SJason Evans alloc_ctx_t *alloc_ctx) { 2093f921d10fSJason Evans void *p; 2094536b3538SJason Evans bool prof_active; 2095d0e79aa3SJason Evans prof_tctx_t *old_tctx, *tctx; 2096a4bd5210SJason Evans 2097536b3538SJason Evans prof_active = prof_active_get_unlocked(); 2098*b7eaed25SJason Evans old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx); 2099536b3538SJason Evans tctx = prof_alloc_prep(tsd, usize, prof_active, true); 2100*b7eaed25SJason Evans if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { 2101536b3538SJason Evans p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx); 2102*b7eaed25SJason Evans } else { 2103536b3538SJason Evans p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); 2104*b7eaed25SJason Evans } 2105536b3538SJason Evans if (unlikely(p == NULL)) { 2106536b3538SJason Evans prof_alloc_rollback(tsd, tctx, true); 2107*b7eaed25SJason Evans return NULL; 2108536b3538SJason Evans } 2109536b3538SJason Evans prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize, 2110536b3538SJason Evans old_tctx); 2111f921d10fSJason Evans 2112*b7eaed25SJason Evans return p; 2113f921d10fSJason Evans } 2114f921d10fSJason Evans 2115*b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE void 2116*b7eaed25SJason Evans ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) { 2117*b7eaed25SJason Evans if (!slow_path) { 2118*b7eaed25SJason Evans tsd_assert_fast(tsd); 2119*b7eaed25SJason Evans } 2120*b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 2121*b7eaed25SJason Evans if (tsd_reentrancy_level_get(tsd) != 0) { 2122*b7eaed25SJason Evans assert(slow_path); 2123*b7eaed25SJason Evans } 2124*b7eaed25SJason Evans 2125*b7eaed25SJason Evans assert(ptr != NULL); 2126*b7eaed25SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2127*b7eaed25SJason Evans 2128*b7eaed25SJason Evans alloc_ctx_t alloc_ctx; 2129*b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); 2130*b7eaed25SJason Evans rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, 2131*b7eaed25SJason Evans (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); 2132*b7eaed25SJason Evans assert(alloc_ctx.szind != NSIZES); 2133*b7eaed25SJason Evans 2134a4bd5210SJason Evans size_t usize; 2135a4bd5210SJason Evans if (config_prof && opt_prof) { 2136*b7eaed25SJason Evans usize = sz_index2size(alloc_ctx.szind); 2137*b7eaed25SJason Evans prof_free(tsd, ptr, usize, &alloc_ctx); 2138*b7eaed25SJason Evans } else if (config_stats) { 2139*b7eaed25SJason Evans usize = sz_index2size(alloc_ctx.szind); 2140*b7eaed25SJason Evans } 2141*b7eaed25SJason Evans if (config_stats) { 2142d0e79aa3SJason Evans *tsd_thread_deallocatedp_get(tsd) += usize; 2143*b7eaed25SJason Evans } 2144df0d881dSJason Evans 2145*b7eaed25SJason Evans if (likely(!slow_path)) { 2146*b7eaed25SJason Evans idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false, 2147*b7eaed25SJason Evans false); 2148*b7eaed25SJason Evans } else { 2149*b7eaed25SJason Evans idalloctm(tsd_tsdn(tsd), ptr, tcache, &alloc_ctx, false, 2150*b7eaed25SJason Evans true); 2151a4bd5210SJason Evans } 2152df0d881dSJason Evans } 2153f921d10fSJason Evans 2154*b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE void 2155*b7eaed25SJason Evans isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) { 2156*b7eaed25SJason Evans if (!slow_path) { 2157*b7eaed25SJason Evans tsd_assert_fast(tsd); 2158*b7eaed25SJason Evans } 2159*b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 2160*b7eaed25SJason Evans if (tsd_reentrancy_level_get(tsd) != 0) { 2161*b7eaed25SJason Evans assert(slow_path); 2162*b7eaed25SJason Evans } 21631f0a49e8SJason Evans 2164d0e79aa3SJason Evans assert(ptr != NULL); 2165d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2166d0e79aa3SJason Evans 2167*b7eaed25SJason Evans alloc_ctx_t alloc_ctx, *ctx; 2168*b7eaed25SJason Evans if (config_prof && opt_prof) { 2169*b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); 2170*b7eaed25SJason Evans rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, 2171*b7eaed25SJason Evans (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); 2172*b7eaed25SJason Evans assert(alloc_ctx.szind == sz_size2index(usize)); 2173*b7eaed25SJason Evans ctx = &alloc_ctx; 2174*b7eaed25SJason Evans prof_free(tsd, ptr, usize, ctx); 2175*b7eaed25SJason Evans } else { 2176*b7eaed25SJason Evans ctx = NULL; 2177*b7eaed25SJason Evans } 2178*b7eaed25SJason Evans 2179*b7eaed25SJason Evans if (config_stats) { 2180d0e79aa3SJason Evans *tsd_thread_deallocatedp_get(tsd) += usize; 2181*b7eaed25SJason Evans } 2182*b7eaed25SJason Evans 2183*b7eaed25SJason Evans if (likely(!slow_path)) { 2184*b7eaed25SJason Evans isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, false); 2185*b7eaed25SJason Evans } else { 2186*b7eaed25SJason Evans isdalloct(tsd_tsdn(tsd), ptr, usize, tcache, ctx, true); 2187*b7eaed25SJason Evans } 2188d0e79aa3SJason Evans } 2189d0e79aa3SJason Evans 2190d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2191d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 2192d0e79aa3SJason Evans JEMALLOC_ALLOC_SIZE(2) 2193*b7eaed25SJason Evans je_realloc(void *ptr, size_t size) { 2194f921d10fSJason Evans void *ret; 21951f0a49e8SJason Evans tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL); 2196f921d10fSJason Evans size_t usize JEMALLOC_CC_SILENCE_INIT(0); 2197f921d10fSJason Evans size_t old_usize = 0; 2198f921d10fSJason Evans 2199d0e79aa3SJason Evans if (unlikely(size == 0)) { 2200f921d10fSJason Evans if (ptr != NULL) { 2201f921d10fSJason Evans /* realloc(ptr, 0) is equivalent to free(ptr). */ 2202f921d10fSJason Evans UTRACE(ptr, 0, 0); 2203*b7eaed25SJason Evans tcache_t *tcache; 2204*b7eaed25SJason Evans tsd_t *tsd = tsd_fetch(); 2205*b7eaed25SJason Evans if (tsd_reentrancy_level_get(tsd) == 0) { 2206*b7eaed25SJason Evans tcache = tcache_get(tsd); 2207*b7eaed25SJason Evans } else { 2208*b7eaed25SJason Evans tcache = NULL; 2209*b7eaed25SJason Evans } 2210*b7eaed25SJason Evans ifree(tsd, ptr, tcache, true); 2211*b7eaed25SJason Evans return NULL; 2212f921d10fSJason Evans } 2213f921d10fSJason Evans size = 1; 2214f921d10fSJason Evans } 2215f921d10fSJason Evans 2216d0e79aa3SJason Evans if (likely(ptr != NULL)) { 2217d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2218*b7eaed25SJason Evans tsd_t *tsd = tsd_fetch(); 2219f921d10fSJason Evans 2220*b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 22211f0a49e8SJason Evans 2222*b7eaed25SJason Evans alloc_ctx_t alloc_ctx; 2223*b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); 2224*b7eaed25SJason Evans rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, 2225*b7eaed25SJason Evans (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); 2226*b7eaed25SJason Evans assert(alloc_ctx.szind != NSIZES); 2227*b7eaed25SJason Evans old_usize = sz_index2size(alloc_ctx.szind); 2228*b7eaed25SJason Evans assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); 2229f921d10fSJason Evans if (config_prof && opt_prof) { 2230*b7eaed25SJason Evans usize = sz_s2u(size); 2231*b7eaed25SJason Evans ret = unlikely(usize == 0 || usize > LARGE_MAXCLASS) ? 2232*b7eaed25SJason Evans NULL : irealloc_prof(tsd, ptr, old_usize, usize, 2233*b7eaed25SJason Evans &alloc_ctx); 2234f921d10fSJason Evans } else { 2235*b7eaed25SJason Evans if (config_stats) { 2236*b7eaed25SJason Evans usize = sz_s2u(size); 2237*b7eaed25SJason Evans } 2238d0e79aa3SJason Evans ret = iralloc(tsd, ptr, old_usize, size, 0, false); 2239f921d10fSJason Evans } 22401f0a49e8SJason Evans tsdn = tsd_tsdn(tsd); 2241f921d10fSJason Evans } else { 2242f921d10fSJason Evans /* realloc(NULL, size) is equivalent to malloc(size). */ 2243*b7eaed25SJason Evans return je_malloc(size); 2244f921d10fSJason Evans } 2245f921d10fSJason Evans 2246d0e79aa3SJason Evans if (unlikely(ret == NULL)) { 2247d0e79aa3SJason Evans if (config_xmalloc && unlikely(opt_xmalloc)) { 2248f921d10fSJason Evans malloc_write("<jemalloc>: Error in realloc(): " 2249f921d10fSJason Evans "out of memory\n"); 2250f921d10fSJason Evans abort(); 2251f921d10fSJason Evans } 2252f921d10fSJason Evans set_errno(ENOMEM); 2253f921d10fSJason Evans } 2254d0e79aa3SJason Evans if (config_stats && likely(ret != NULL)) { 22551f0a49e8SJason Evans tsd_t *tsd; 22561f0a49e8SJason Evans 2257*b7eaed25SJason Evans assert(usize == isalloc(tsdn, ret)); 22581f0a49e8SJason Evans tsd = tsdn_tsd(tsdn); 2259d0e79aa3SJason Evans *tsd_thread_allocatedp_get(tsd) += usize; 2260d0e79aa3SJason Evans *tsd_thread_deallocatedp_get(tsd) += old_usize; 2261f921d10fSJason Evans } 2262f921d10fSJason Evans UTRACE(ptr, size, ret); 2263*b7eaed25SJason Evans check_entry_exit_locking(tsdn); 2264*b7eaed25SJason Evans return ret; 2265f921d10fSJason Evans } 2266f921d10fSJason Evans 2267d0e79aa3SJason Evans JEMALLOC_EXPORT void JEMALLOC_NOTHROW 2268*b7eaed25SJason Evans je_free(void *ptr) { 2269f921d10fSJason Evans UTRACE(ptr, 0, 0); 2270d0e79aa3SJason Evans if (likely(ptr != NULL)) { 2271d0e79aa3SJason Evans tsd_t *tsd = tsd_fetch(); 2272*b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 2273*b7eaed25SJason Evans 2274*b7eaed25SJason Evans tcache_t *tcache; 2275*b7eaed25SJason Evans if (likely(tsd_fast(tsd))) { 2276*b7eaed25SJason Evans tsd_assert_fast(tsd); 2277*b7eaed25SJason Evans /* Unconditionally get tcache ptr on fast path. */ 2278*b7eaed25SJason Evans tcache = tsd_tcachep_get(tsd); 2279*b7eaed25SJason Evans ifree(tsd, ptr, tcache, false); 2280*b7eaed25SJason Evans } else { 2281*b7eaed25SJason Evans if (likely(tsd_reentrancy_level_get(tsd) == 0)) { 2282*b7eaed25SJason Evans tcache = tcache_get(tsd); 2283*b7eaed25SJason Evans } else { 2284*b7eaed25SJason Evans tcache = NULL; 2285*b7eaed25SJason Evans } 2286*b7eaed25SJason Evans ifree(tsd, ptr, tcache, true); 2287*b7eaed25SJason Evans } 2288*b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 2289d0e79aa3SJason Evans } 2290a4bd5210SJason Evans } 2291a4bd5210SJason Evans 2292a4bd5210SJason Evans /* 2293a4bd5210SJason Evans * End malloc(3)-compatible functions. 2294a4bd5210SJason Evans */ 2295a4bd5210SJason Evans /******************************************************************************/ 2296a4bd5210SJason Evans /* 2297a4bd5210SJason Evans * Begin non-standard override functions. 2298a4bd5210SJason Evans */ 2299a4bd5210SJason Evans 2300a4bd5210SJason Evans #ifdef JEMALLOC_OVERRIDE_MEMALIGN 2301d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2302d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 2303d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) 2304*b7eaed25SJason Evans je_memalign(size_t alignment, size_t size) { 2305*b7eaed25SJason Evans void *ret; 2306*b7eaed25SJason Evans static_opts_t sopts; 2307*b7eaed25SJason Evans dynamic_opts_t dopts; 2308*b7eaed25SJason Evans 2309*b7eaed25SJason Evans static_opts_init(&sopts); 2310*b7eaed25SJason Evans dynamic_opts_init(&dopts); 2311*b7eaed25SJason Evans 2312*b7eaed25SJason Evans sopts.bump_empty_alloc = true; 2313*b7eaed25SJason Evans sopts.min_alignment = 1; 2314*b7eaed25SJason Evans sopts.oom_string = 2315*b7eaed25SJason Evans "<jemalloc>: Error allocating aligned memory: out of memory\n"; 2316*b7eaed25SJason Evans sopts.invalid_alignment_string = 2317*b7eaed25SJason Evans "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; 2318*b7eaed25SJason Evans sopts.null_out_result_on_error = true; 2319*b7eaed25SJason Evans 2320*b7eaed25SJason Evans dopts.result = &ret; 2321*b7eaed25SJason Evans dopts.num_items = 1; 2322*b7eaed25SJason Evans dopts.item_size = size; 2323*b7eaed25SJason Evans dopts.alignment = alignment; 2324*b7eaed25SJason Evans 2325*b7eaed25SJason Evans imalloc(&sopts, &dopts); 2326*b7eaed25SJason Evans return ret; 2327a4bd5210SJason Evans } 2328a4bd5210SJason Evans #endif 2329a4bd5210SJason Evans 2330a4bd5210SJason Evans #ifdef JEMALLOC_OVERRIDE_VALLOC 2331d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2332d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 2333d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) 2334*b7eaed25SJason Evans je_valloc(size_t size) { 2335*b7eaed25SJason Evans void *ret; 2336*b7eaed25SJason Evans 2337*b7eaed25SJason Evans static_opts_t sopts; 2338*b7eaed25SJason Evans dynamic_opts_t dopts; 2339*b7eaed25SJason Evans 2340*b7eaed25SJason Evans static_opts_init(&sopts); 2341*b7eaed25SJason Evans dynamic_opts_init(&dopts); 2342*b7eaed25SJason Evans 2343*b7eaed25SJason Evans sopts.bump_empty_alloc = true; 2344*b7eaed25SJason Evans sopts.null_out_result_on_error = true; 2345*b7eaed25SJason Evans sopts.min_alignment = PAGE; 2346*b7eaed25SJason Evans sopts.oom_string = 2347*b7eaed25SJason Evans "<jemalloc>: Error allocating aligned memory: out of memory\n"; 2348*b7eaed25SJason Evans sopts.invalid_alignment_string = 2349*b7eaed25SJason Evans "<jemalloc>: Error allocating aligned memory: invalid alignment\n"; 2350*b7eaed25SJason Evans 2351*b7eaed25SJason Evans dopts.result = &ret; 2352*b7eaed25SJason Evans dopts.num_items = 1; 2353*b7eaed25SJason Evans dopts.item_size = size; 2354*b7eaed25SJason Evans dopts.alignment = PAGE; 2355*b7eaed25SJason Evans 2356*b7eaed25SJason Evans imalloc(&sopts, &dopts); 2357*b7eaed25SJason Evans 2358*b7eaed25SJason Evans return ret; 2359a4bd5210SJason Evans } 2360a4bd5210SJason Evans #endif 2361a4bd5210SJason Evans 2362*b7eaed25SJason Evans #if defined(JEMALLOC_IS_MALLOC) && defined(JEMALLOC_GLIBC_MALLOC_HOOK) 2363a4bd5210SJason Evans /* 2364a4bd5210SJason Evans * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible 2365a4bd5210SJason Evans * to inconsistently reference libc's malloc(3)-compatible functions 2366a4bd5210SJason Evans * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). 2367a4bd5210SJason Evans * 2368a4bd5210SJason Evans * These definitions interpose hooks in glibc. The functions are actually 2369a4bd5210SJason Evans * passed an extra argument for the caller return address, which will be 2370a4bd5210SJason Evans * ignored. 2371a4bd5210SJason Evans */ 237282872ac0SJason Evans JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free; 237382872ac0SJason Evans JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc; 237482872ac0SJason Evans JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc; 2375d0e79aa3SJason Evans # ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK 237682872ac0SJason Evans JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = 2377e722f8f8SJason Evans je_memalign; 2378a4bd5210SJason Evans # endif 2379bde95144SJason Evans 2380bde95144SJason Evans # ifdef CPU_COUNT 2381bde95144SJason Evans /* 2382bde95144SJason Evans * To enable static linking with glibc, the libc specific malloc interface must 2383bde95144SJason Evans * be implemented also, so none of glibc's malloc.o functions are added to the 2384bde95144SJason Evans * link. 2385bde95144SJason Evans */ 2386bde95144SJason Evans # define ALIAS(je_fn) __attribute__((alias (#je_fn), used)) 2387bde95144SJason Evans /* To force macro expansion of je_ prefix before stringification. */ 2388bde95144SJason Evans # define PREALIAS(je_fn) ALIAS(je_fn) 2389*b7eaed25SJason Evans # ifdef JEMALLOC_OVERRIDE___LIBC_CALLOC 2390bde95144SJason Evans void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc); 2391*b7eaed25SJason Evans # endif 2392*b7eaed25SJason Evans # ifdef JEMALLOC_OVERRIDE___LIBC_FREE 2393*b7eaed25SJason Evans void __libc_free(void* ptr) PREALIAS(je_free); 2394*b7eaed25SJason Evans # endif 2395*b7eaed25SJason Evans # ifdef JEMALLOC_OVERRIDE___LIBC_MALLOC 2396*b7eaed25SJason Evans void *__libc_malloc(size_t size) PREALIAS(je_malloc); 2397*b7eaed25SJason Evans # endif 2398*b7eaed25SJason Evans # ifdef JEMALLOC_OVERRIDE___LIBC_MEMALIGN 2399bde95144SJason Evans void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign); 2400*b7eaed25SJason Evans # endif 2401*b7eaed25SJason Evans # ifdef JEMALLOC_OVERRIDE___LIBC_REALLOC 2402*b7eaed25SJason Evans void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc); 2403*b7eaed25SJason Evans # endif 2404*b7eaed25SJason Evans # ifdef JEMALLOC_OVERRIDE___LIBC_VALLOC 2405bde95144SJason Evans void *__libc_valloc(size_t size) PREALIAS(je_valloc); 2406*b7eaed25SJason Evans # endif 2407*b7eaed25SJason Evans # ifdef JEMALLOC_OVERRIDE___POSIX_MEMALIGN 2408*b7eaed25SJason Evans int __posix_memalign(void** r, size_t a, size_t s) PREALIAS(je_posix_memalign); 2409*b7eaed25SJason Evans # endif 2410bde95144SJason Evans # undef PREALIAS 2411bde95144SJason Evans # undef ALIAS 2412bde95144SJason Evans # endif 2413d0e79aa3SJason Evans #endif 2414a4bd5210SJason Evans 2415a4bd5210SJason Evans /* 2416a4bd5210SJason Evans * End non-standard override functions. 2417a4bd5210SJason Evans */ 2418a4bd5210SJason Evans /******************************************************************************/ 2419a4bd5210SJason Evans /* 2420a4bd5210SJason Evans * Begin non-standard functions. 2421a4bd5210SJason Evans */ 2422a4bd5210SJason Evans 2423d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2424d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 2425d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) 2426*b7eaed25SJason Evans je_mallocx(size_t size, int flags) { 2427*b7eaed25SJason Evans void *ret; 2428*b7eaed25SJason Evans static_opts_t sopts; 2429*b7eaed25SJason Evans dynamic_opts_t dopts; 2430f921d10fSJason Evans 2431*b7eaed25SJason Evans static_opts_init(&sopts); 2432*b7eaed25SJason Evans dynamic_opts_init(&dopts); 2433f921d10fSJason Evans 2434*b7eaed25SJason Evans sopts.assert_nonempty_alloc = true; 2435*b7eaed25SJason Evans sopts.null_out_result_on_error = true; 2436*b7eaed25SJason Evans sopts.oom_string = "<jemalloc>: Error in mallocx(): out of memory\n"; 2437*b7eaed25SJason Evans 2438*b7eaed25SJason Evans dopts.result = &ret; 2439*b7eaed25SJason Evans dopts.num_items = 1; 2440*b7eaed25SJason Evans dopts.item_size = size; 2441*b7eaed25SJason Evans if (unlikely(flags != 0)) { 2442*b7eaed25SJason Evans if ((flags & MALLOCX_LG_ALIGN_MASK) != 0) { 2443*b7eaed25SJason Evans dopts.alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); 2444f921d10fSJason Evans } 24451f0a49e8SJason Evans 2446*b7eaed25SJason Evans dopts.zero = MALLOCX_ZERO_GET(flags); 2447*b7eaed25SJason Evans 2448*b7eaed25SJason Evans if ((flags & MALLOCX_TCACHE_MASK) != 0) { 2449*b7eaed25SJason Evans if ((flags & MALLOCX_TCACHE_MASK) 2450*b7eaed25SJason Evans == MALLOCX_TCACHE_NONE) { 2451*b7eaed25SJason Evans dopts.tcache_ind = TCACHE_IND_NONE; 2452*b7eaed25SJason Evans } else { 2453*b7eaed25SJason Evans dopts.tcache_ind = MALLOCX_TCACHE_GET(flags); 2454*b7eaed25SJason Evans } 2455*b7eaed25SJason Evans } else { 2456*b7eaed25SJason Evans dopts.tcache_ind = TCACHE_IND_AUTOMATIC; 2457*b7eaed25SJason Evans } 2458*b7eaed25SJason Evans 2459*b7eaed25SJason Evans if ((flags & MALLOCX_ARENA_MASK) != 0) 2460*b7eaed25SJason Evans dopts.arena_ind = MALLOCX_ARENA_GET(flags); 2461*b7eaed25SJason Evans } 2462*b7eaed25SJason Evans 2463*b7eaed25SJason Evans imalloc(&sopts, &dopts); 2464*b7eaed25SJason Evans return ret; 2465f921d10fSJason Evans } 2466f921d10fSJason Evans 2467f921d10fSJason Evans static void * 2468*b7eaed25SJason Evans irallocx_prof_sample(tsdn_t *tsdn, void *old_ptr, size_t old_usize, 2469536b3538SJason Evans size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, 2470*b7eaed25SJason Evans prof_tctx_t *tctx) { 2471f921d10fSJason Evans void *p; 2472f921d10fSJason Evans 2473*b7eaed25SJason Evans if (tctx == NULL) { 2474*b7eaed25SJason Evans return NULL; 2475*b7eaed25SJason Evans } 2476d0e79aa3SJason Evans if (usize <= SMALL_MAXCLASS) { 2477*b7eaed25SJason Evans p = iralloct(tsdn, old_ptr, old_usize, LARGE_MINCLASS, 2478*b7eaed25SJason Evans alignment, zero, tcache, arena); 2479*b7eaed25SJason Evans if (p == NULL) { 2480*b7eaed25SJason Evans return NULL; 2481*b7eaed25SJason Evans } 2482*b7eaed25SJason Evans arena_prof_promote(tsdn, p, usize); 2483f921d10fSJason Evans } else { 2484*b7eaed25SJason Evans p = iralloct(tsdn, old_ptr, old_usize, usize, alignment, zero, 2485d0e79aa3SJason Evans tcache, arena); 2486f921d10fSJason Evans } 2487f921d10fSJason Evans 2488*b7eaed25SJason Evans return p; 2489f921d10fSJason Evans } 2490f921d10fSJason Evans 2491*b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE void * 2492536b3538SJason Evans irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, 2493d0e79aa3SJason Evans size_t alignment, size_t *usize, bool zero, tcache_t *tcache, 2494*b7eaed25SJason Evans arena_t *arena, alloc_ctx_t *alloc_ctx) { 2495f921d10fSJason Evans void *p; 2496536b3538SJason Evans bool prof_active; 2497d0e79aa3SJason Evans prof_tctx_t *old_tctx, *tctx; 2498f921d10fSJason Evans 2499536b3538SJason Evans prof_active = prof_active_get_unlocked(); 2500*b7eaed25SJason Evans old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr, alloc_ctx); 250162b2691eSJason Evans tctx = prof_alloc_prep(tsd, *usize, prof_active, false); 2502d0e79aa3SJason Evans if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { 2503*b7eaed25SJason Evans p = irallocx_prof_sample(tsd_tsdn(tsd), old_ptr, old_usize, 2504*b7eaed25SJason Evans *usize, alignment, zero, tcache, arena, tctx); 2505d0e79aa3SJason Evans } else { 2506*b7eaed25SJason Evans p = iralloct(tsd_tsdn(tsd), old_ptr, old_usize, size, alignment, 2507*b7eaed25SJason Evans zero, tcache, arena); 2508f921d10fSJason Evans } 2509d0e79aa3SJason Evans if (unlikely(p == NULL)) { 251062b2691eSJason Evans prof_alloc_rollback(tsd, tctx, false); 2511*b7eaed25SJason Evans return NULL; 2512d0e79aa3SJason Evans } 2513f921d10fSJason Evans 2514536b3538SJason Evans if (p == old_ptr && alignment != 0) { 2515f921d10fSJason Evans /* 2516f921d10fSJason Evans * The allocation did not move, so it is possible that the size 2517f921d10fSJason Evans * class is smaller than would guarantee the requested 2518f921d10fSJason Evans * alignment, and that the alignment constraint was 2519f921d10fSJason Evans * serendipitously satisfied. Additionally, old_usize may not 2520f921d10fSJason Evans * be the same as the current usize because of in-place large 2521f921d10fSJason Evans * reallocation. Therefore, query the actual value of usize. 2522f921d10fSJason Evans */ 2523*b7eaed25SJason Evans *usize = isalloc(tsd_tsdn(tsd), p); 2524f921d10fSJason Evans } 252562b2691eSJason Evans prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr, 2526536b3538SJason Evans old_usize, old_tctx); 2527f921d10fSJason Evans 2528*b7eaed25SJason Evans return p; 2529f921d10fSJason Evans } 2530f921d10fSJason Evans 2531d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2532d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 2533d0e79aa3SJason Evans JEMALLOC_ALLOC_SIZE(2) 2534*b7eaed25SJason Evans je_rallocx(void *ptr, size_t size, int flags) { 2535f921d10fSJason Evans void *p; 2536d0e79aa3SJason Evans tsd_t *tsd; 2537d0e79aa3SJason Evans size_t usize; 2538d0e79aa3SJason Evans size_t old_usize; 2539d0e79aa3SJason Evans size_t alignment = MALLOCX_ALIGN_GET(flags); 2540f921d10fSJason Evans bool zero = flags & MALLOCX_ZERO; 2541f921d10fSJason Evans arena_t *arena; 2542d0e79aa3SJason Evans tcache_t *tcache; 2543f921d10fSJason Evans 2544f921d10fSJason Evans assert(ptr != NULL); 2545f921d10fSJason Evans assert(size != 0); 2546d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2547d0e79aa3SJason Evans tsd = tsd_fetch(); 2548*b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 2549f921d10fSJason Evans 2550d0e79aa3SJason Evans if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { 2551d0e79aa3SJason Evans unsigned arena_ind = MALLOCX_ARENA_GET(flags); 25521f0a49e8SJason Evans arena = arena_get(tsd_tsdn(tsd), arena_ind, true); 2553*b7eaed25SJason Evans if (unlikely(arena == NULL)) { 2554d0e79aa3SJason Evans goto label_oom; 2555*b7eaed25SJason Evans } 2556*b7eaed25SJason Evans } else { 2557f921d10fSJason Evans arena = NULL; 2558*b7eaed25SJason Evans } 2559f921d10fSJason Evans 2560d0e79aa3SJason Evans if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { 2561*b7eaed25SJason Evans if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { 2562d0e79aa3SJason Evans tcache = NULL; 2563f921d10fSJason Evans } else { 2564*b7eaed25SJason Evans tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2565*b7eaed25SJason Evans } 2566*b7eaed25SJason Evans } else { 2567*b7eaed25SJason Evans tcache = tcache_get(tsd); 2568*b7eaed25SJason Evans } 2569*b7eaed25SJason Evans 2570*b7eaed25SJason Evans alloc_ctx_t alloc_ctx; 2571*b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); 2572*b7eaed25SJason Evans rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, 2573*b7eaed25SJason Evans (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); 2574*b7eaed25SJason Evans assert(alloc_ctx.szind != NSIZES); 2575*b7eaed25SJason Evans old_usize = sz_index2size(alloc_ctx.szind); 2576*b7eaed25SJason Evans assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); 2577*b7eaed25SJason Evans if (config_prof && opt_prof) { 2578*b7eaed25SJason Evans usize = (alignment == 0) ? 2579*b7eaed25SJason Evans sz_s2u(size) : sz_sa2u(size, alignment); 2580*b7eaed25SJason Evans if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { 2581f921d10fSJason Evans goto label_oom; 2582*b7eaed25SJason Evans } 2583*b7eaed25SJason Evans p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize, 2584*b7eaed25SJason Evans zero, tcache, arena, &alloc_ctx); 2585*b7eaed25SJason Evans if (unlikely(p == NULL)) { 2586*b7eaed25SJason Evans goto label_oom; 2587*b7eaed25SJason Evans } 2588*b7eaed25SJason Evans } else { 2589*b7eaed25SJason Evans p = iralloct(tsd_tsdn(tsd), ptr, old_usize, size, alignment, 2590*b7eaed25SJason Evans zero, tcache, arena); 2591*b7eaed25SJason Evans if (unlikely(p == NULL)) { 2592*b7eaed25SJason Evans goto label_oom; 2593*b7eaed25SJason Evans } 2594*b7eaed25SJason Evans if (config_stats) { 2595*b7eaed25SJason Evans usize = isalloc(tsd_tsdn(tsd), p); 2596*b7eaed25SJason Evans } 2597f921d10fSJason Evans } 2598d0e79aa3SJason Evans assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); 2599f921d10fSJason Evans 2600f921d10fSJason Evans if (config_stats) { 2601d0e79aa3SJason Evans *tsd_thread_allocatedp_get(tsd) += usize; 2602d0e79aa3SJason Evans *tsd_thread_deallocatedp_get(tsd) += old_usize; 2603f921d10fSJason Evans } 2604f921d10fSJason Evans UTRACE(ptr, size, p); 2605*b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 2606*b7eaed25SJason Evans return p; 2607f921d10fSJason Evans label_oom: 2608d0e79aa3SJason Evans if (config_xmalloc && unlikely(opt_xmalloc)) { 2609f921d10fSJason Evans malloc_write("<jemalloc>: Error in rallocx(): out of memory\n"); 2610f921d10fSJason Evans abort(); 2611f921d10fSJason Evans } 2612f921d10fSJason Evans UTRACE(ptr, size, 0); 2613*b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 2614*b7eaed25SJason Evans return NULL; 2615f921d10fSJason Evans } 2616f921d10fSJason Evans 2617*b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE size_t 26181f0a49e8SJason Evans ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, 2619*b7eaed25SJason Evans size_t extra, size_t alignment, bool zero) { 2620f921d10fSJason Evans size_t usize; 2621f921d10fSJason Evans 2622*b7eaed25SJason Evans if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) { 2623*b7eaed25SJason Evans return old_usize; 2624*b7eaed25SJason Evans } 2625*b7eaed25SJason Evans usize = isalloc(tsdn, ptr); 2626f921d10fSJason Evans 2627*b7eaed25SJason Evans return usize; 2628f921d10fSJason Evans } 2629f921d10fSJason Evans 2630f921d10fSJason Evans static size_t 26311f0a49e8SJason Evans ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, 2632*b7eaed25SJason Evans size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) { 2633f921d10fSJason Evans size_t usize; 2634f921d10fSJason Evans 2635*b7eaed25SJason Evans if (tctx == NULL) { 2636*b7eaed25SJason Evans return old_usize; 2637*b7eaed25SJason Evans } 26381f0a49e8SJason Evans usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment, 2639df0d881dSJason Evans zero); 2640f921d10fSJason Evans 2641*b7eaed25SJason Evans return usize; 2642f921d10fSJason Evans } 2643f921d10fSJason Evans 2644*b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE size_t 2645d0e79aa3SJason Evans ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, 2646*b7eaed25SJason Evans size_t extra, size_t alignment, bool zero, alloc_ctx_t *alloc_ctx) { 2647536b3538SJason Evans size_t usize_max, usize; 2648536b3538SJason Evans bool prof_active; 2649d0e79aa3SJason Evans prof_tctx_t *old_tctx, *tctx; 2650f921d10fSJason Evans 2651536b3538SJason Evans prof_active = prof_active_get_unlocked(); 2652*b7eaed25SJason Evans old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr, alloc_ctx); 2653d0e79aa3SJason Evans /* 2654d0e79aa3SJason Evans * usize isn't knowable before ixalloc() returns when extra is non-zero. 2655d0e79aa3SJason Evans * Therefore, compute its maximum possible value and use that in 2656d0e79aa3SJason Evans * prof_alloc_prep() to decide whether to capture a backtrace. 2657d0e79aa3SJason Evans * prof_realloc() will use the actual usize to decide whether to sample. 2658d0e79aa3SJason Evans */ 2659df0d881dSJason Evans if (alignment == 0) { 2660*b7eaed25SJason Evans usize_max = sz_s2u(size+extra); 2661*b7eaed25SJason Evans assert(usize_max > 0 && usize_max <= LARGE_MAXCLASS); 2662df0d881dSJason Evans } else { 2663*b7eaed25SJason Evans usize_max = sz_sa2u(size+extra, alignment); 2664*b7eaed25SJason Evans if (unlikely(usize_max == 0 || usize_max > LARGE_MAXCLASS)) { 2665df0d881dSJason Evans /* 2666df0d881dSJason Evans * usize_max is out of range, and chances are that 2667df0d881dSJason Evans * allocation will fail, but use the maximum possible 2668df0d881dSJason Evans * value and carry on with prof_alloc_prep(), just in 2669df0d881dSJason Evans * case allocation succeeds. 2670df0d881dSJason Evans */ 2671*b7eaed25SJason Evans usize_max = LARGE_MAXCLASS; 2672df0d881dSJason Evans } 2673df0d881dSJason Evans } 2674536b3538SJason Evans tctx = prof_alloc_prep(tsd, usize_max, prof_active, false); 2675df0d881dSJason Evans 2676d0e79aa3SJason Evans if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { 26771f0a49e8SJason Evans usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize, 26781f0a49e8SJason Evans size, extra, alignment, zero, tctx); 2679f921d10fSJason Evans } else { 26801f0a49e8SJason Evans usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, 26811f0a49e8SJason Evans extra, alignment, zero); 2682f921d10fSJason Evans } 2683536b3538SJason Evans if (usize == old_usize) { 2684d0e79aa3SJason Evans prof_alloc_rollback(tsd, tctx, false); 2685*b7eaed25SJason Evans return usize; 2686d0e79aa3SJason Evans } 2687536b3538SJason Evans prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize, 2688536b3538SJason Evans old_tctx); 2689f921d10fSJason Evans 2690*b7eaed25SJason Evans return usize; 2691f921d10fSJason Evans } 2692f921d10fSJason Evans 2693d0e79aa3SJason Evans JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2694*b7eaed25SJason Evans je_xallocx(void *ptr, size_t size, size_t extra, int flags) { 2695d0e79aa3SJason Evans tsd_t *tsd; 2696f921d10fSJason Evans size_t usize, old_usize; 2697d0e79aa3SJason Evans size_t alignment = MALLOCX_ALIGN_GET(flags); 2698f921d10fSJason Evans bool zero = flags & MALLOCX_ZERO; 2699f921d10fSJason Evans 2700f921d10fSJason Evans assert(ptr != NULL); 2701f921d10fSJason Evans assert(size != 0); 2702f921d10fSJason Evans assert(SIZE_T_MAX - size >= extra); 2703d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2704d0e79aa3SJason Evans tsd = tsd_fetch(); 2705*b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 2706f921d10fSJason Evans 2707*b7eaed25SJason Evans alloc_ctx_t alloc_ctx; 2708*b7eaed25SJason Evans rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); 2709*b7eaed25SJason Evans rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, 2710*b7eaed25SJason Evans (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); 2711*b7eaed25SJason Evans assert(alloc_ctx.szind != NSIZES); 2712*b7eaed25SJason Evans old_usize = sz_index2size(alloc_ctx.szind); 2713*b7eaed25SJason Evans assert(old_usize == isalloc(tsd_tsdn(tsd), ptr)); 2714df0d881dSJason Evans /* 2715df0d881dSJason Evans * The API explicitly absolves itself of protecting against (size + 2716df0d881dSJason Evans * extra) numerical overflow, but we may need to clamp extra to avoid 2717*b7eaed25SJason Evans * exceeding LARGE_MAXCLASS. 2718df0d881dSJason Evans * 2719df0d881dSJason Evans * Ordinarily, size limit checking is handled deeper down, but here we 2720df0d881dSJason Evans * have to check as part of (size + extra) clamping, since we need the 2721df0d881dSJason Evans * clamped value in the above helper functions. 2722df0d881dSJason Evans */ 2723*b7eaed25SJason Evans if (unlikely(size > LARGE_MAXCLASS)) { 2724536b3538SJason Evans usize = old_usize; 2725536b3538SJason Evans goto label_not_resized; 2726536b3538SJason Evans } 2727*b7eaed25SJason Evans if (unlikely(LARGE_MAXCLASS - size < extra)) { 2728*b7eaed25SJason Evans extra = LARGE_MAXCLASS - size; 2729*b7eaed25SJason Evans } 2730f921d10fSJason Evans 2731f921d10fSJason Evans if (config_prof && opt_prof) { 2732d0e79aa3SJason Evans usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, 2733*b7eaed25SJason Evans alignment, zero, &alloc_ctx); 2734f921d10fSJason Evans } else { 27351f0a49e8SJason Evans usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, 27361f0a49e8SJason Evans extra, alignment, zero); 2737f921d10fSJason Evans } 2738*b7eaed25SJason Evans if (unlikely(usize == old_usize)) { 2739f921d10fSJason Evans goto label_not_resized; 2740*b7eaed25SJason Evans } 2741f921d10fSJason Evans 2742f921d10fSJason Evans if (config_stats) { 2743d0e79aa3SJason Evans *tsd_thread_allocatedp_get(tsd) += usize; 2744d0e79aa3SJason Evans *tsd_thread_deallocatedp_get(tsd) += old_usize; 2745f921d10fSJason Evans } 2746f921d10fSJason Evans label_not_resized: 2747f921d10fSJason Evans UTRACE(ptr, size, ptr); 2748*b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 2749*b7eaed25SJason Evans return usize; 2750f921d10fSJason Evans } 2751f921d10fSJason Evans 2752d0e79aa3SJason Evans JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2753d0e79aa3SJason Evans JEMALLOC_ATTR(pure) 2754*b7eaed25SJason Evans je_sallocx(const void *ptr, int flags) { 2755f921d10fSJason Evans size_t usize; 27561f0a49e8SJason Evans tsdn_t *tsdn; 2757a4bd5210SJason Evans 2758d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2759*b7eaed25SJason Evans assert(ptr != NULL); 2760a4bd5210SJason Evans 27611f0a49e8SJason Evans tsdn = tsdn_fetch(); 2762*b7eaed25SJason Evans check_entry_exit_locking(tsdn); 2763a4bd5210SJason Evans 2764*b7eaed25SJason Evans if (config_debug || force_ivsalloc) { 2765*b7eaed25SJason Evans usize = ivsalloc(tsdn, ptr); 2766*b7eaed25SJason Evans assert(force_ivsalloc || usize != 0); 2767*b7eaed25SJason Evans } else { 2768*b7eaed25SJason Evans usize = isalloc(tsdn, ptr); 2769*b7eaed25SJason Evans } 27701f0a49e8SJason Evans 2771*b7eaed25SJason Evans check_entry_exit_locking(tsdn); 2772*b7eaed25SJason Evans return usize; 2773a4bd5210SJason Evans } 2774a4bd5210SJason Evans 2775d0e79aa3SJason Evans JEMALLOC_EXPORT void JEMALLOC_NOTHROW 2776*b7eaed25SJason Evans je_dallocx(void *ptr, int flags) { 2777f921d10fSJason Evans assert(ptr != NULL); 2778d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2779f921d10fSJason Evans 2780*b7eaed25SJason Evans tsd_t *tsd = tsd_fetch(); 2781*b7eaed25SJason Evans bool fast = tsd_fast(tsd); 2782*b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 2783*b7eaed25SJason Evans 2784*b7eaed25SJason Evans tcache_t *tcache; 2785d0e79aa3SJason Evans if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { 2786*b7eaed25SJason Evans /* Not allowed to be reentrant and specify a custom tcache. */ 2787*b7eaed25SJason Evans assert(tsd_reentrancy_level_get(tsd) == 0); 2788*b7eaed25SJason Evans if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { 2789d0e79aa3SJason Evans tcache = NULL; 2790*b7eaed25SJason Evans } else { 2791d0e79aa3SJason Evans tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2792*b7eaed25SJason Evans } 2793*b7eaed25SJason Evans } else { 2794*b7eaed25SJason Evans if (likely(fast)) { 2795*b7eaed25SJason Evans tcache = tsd_tcachep_get(tsd); 2796*b7eaed25SJason Evans assert(tcache == tcache_get(tsd)); 2797*b7eaed25SJason Evans } else { 2798*b7eaed25SJason Evans if (likely(tsd_reentrancy_level_get(tsd) == 0)) { 2799*b7eaed25SJason Evans tcache = tcache_get(tsd); 2800*b7eaed25SJason Evans } else { 2801*b7eaed25SJason Evans tcache = NULL; 2802*b7eaed25SJason Evans } 2803*b7eaed25SJason Evans } 2804*b7eaed25SJason Evans } 2805f921d10fSJason Evans 2806f921d10fSJason Evans UTRACE(ptr, 0, 0); 2807*b7eaed25SJason Evans if (likely(fast)) { 2808*b7eaed25SJason Evans tsd_assert_fast(tsd); 28091f0a49e8SJason Evans ifree(tsd, ptr, tcache, false); 2810*b7eaed25SJason Evans } else { 28111f0a49e8SJason Evans ifree(tsd, ptr, tcache, true); 2812*b7eaed25SJason Evans } 2813*b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 2814f921d10fSJason Evans } 2815f921d10fSJason Evans 2816*b7eaed25SJason Evans JEMALLOC_ALWAYS_INLINE size_t 2817*b7eaed25SJason Evans inallocx(tsdn_t *tsdn, size_t size, int flags) { 2818*b7eaed25SJason Evans check_entry_exit_locking(tsdn); 2819*b7eaed25SJason Evans 2820f921d10fSJason Evans size_t usize; 2821*b7eaed25SJason Evans if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) { 2822*b7eaed25SJason Evans usize = sz_s2u(size); 2823*b7eaed25SJason Evans } else { 2824*b7eaed25SJason Evans usize = sz_sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); 2825*b7eaed25SJason Evans } 2826*b7eaed25SJason Evans check_entry_exit_locking(tsdn); 2827*b7eaed25SJason Evans return usize; 2828a4bd5210SJason Evans } 2829a4bd5210SJason Evans 2830d0e79aa3SJason Evans JEMALLOC_EXPORT void JEMALLOC_NOTHROW 2831*b7eaed25SJason Evans je_sdallocx(void *ptr, size_t size, int flags) { 2832d0e79aa3SJason Evans assert(ptr != NULL); 2833d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 28341f0a49e8SJason Evans 2835*b7eaed25SJason Evans tsd_t *tsd = tsd_fetch(); 2836*b7eaed25SJason Evans bool fast = tsd_fast(tsd); 2837*b7eaed25SJason Evans size_t usize = inallocx(tsd_tsdn(tsd), size, flags); 2838*b7eaed25SJason Evans assert(usize == isalloc(tsd_tsdn(tsd), ptr)); 2839*b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 2840*b7eaed25SJason Evans 2841*b7eaed25SJason Evans tcache_t *tcache; 2842d0e79aa3SJason Evans if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { 2843*b7eaed25SJason Evans /* Not allowed to be reentrant and specify a custom tcache. */ 2844*b7eaed25SJason Evans assert(tsd_reentrancy_level_get(tsd) == 0); 2845*b7eaed25SJason Evans if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) { 2846d0e79aa3SJason Evans tcache = NULL; 2847*b7eaed25SJason Evans } else { 2848d0e79aa3SJason Evans tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2849*b7eaed25SJason Evans } 2850*b7eaed25SJason Evans } else { 2851*b7eaed25SJason Evans if (likely(fast)) { 2852*b7eaed25SJason Evans tcache = tsd_tcachep_get(tsd); 2853*b7eaed25SJason Evans assert(tcache == tcache_get(tsd)); 2854*b7eaed25SJason Evans } else { 2855*b7eaed25SJason Evans if (likely(tsd_reentrancy_level_get(tsd) == 0)) { 2856*b7eaed25SJason Evans tcache = tcache_get(tsd); 2857*b7eaed25SJason Evans } else { 2858*b7eaed25SJason Evans tcache = NULL; 2859*b7eaed25SJason Evans } 2860*b7eaed25SJason Evans } 2861*b7eaed25SJason Evans } 2862d0e79aa3SJason Evans 2863d0e79aa3SJason Evans UTRACE(ptr, 0, 0); 2864*b7eaed25SJason Evans if (likely(fast)) { 2865*b7eaed25SJason Evans tsd_assert_fast(tsd); 28661f0a49e8SJason Evans isfree(tsd, ptr, usize, tcache, false); 2867*b7eaed25SJason Evans } else { 28681f0a49e8SJason Evans isfree(tsd, ptr, usize, tcache, true); 2869*b7eaed25SJason Evans } 2870*b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 2871d0e79aa3SJason Evans } 2872d0e79aa3SJason Evans 2873d0e79aa3SJason Evans JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2874d0e79aa3SJason Evans JEMALLOC_ATTR(pure) 2875*b7eaed25SJason Evans je_nallocx(size_t size, int flags) { 2876df0d881dSJason Evans size_t usize; 28771f0a49e8SJason Evans tsdn_t *tsdn; 2878d0e79aa3SJason Evans 2879d0e79aa3SJason Evans assert(size != 0); 2880d0e79aa3SJason Evans 2881*b7eaed25SJason Evans if (unlikely(malloc_init())) { 2882*b7eaed25SJason Evans return 0; 2883*b7eaed25SJason Evans } 2884d0e79aa3SJason Evans 28851f0a49e8SJason Evans tsdn = tsdn_fetch(); 2886*b7eaed25SJason Evans check_entry_exit_locking(tsdn); 28871f0a49e8SJason Evans 28881f0a49e8SJason Evans usize = inallocx(tsdn, size, flags); 2889*b7eaed25SJason Evans if (unlikely(usize > LARGE_MAXCLASS)) { 2890*b7eaed25SJason Evans return 0; 2891*b7eaed25SJason Evans } 2892df0d881dSJason Evans 2893*b7eaed25SJason Evans check_entry_exit_locking(tsdn); 2894*b7eaed25SJason Evans return usize; 2895d0e79aa3SJason Evans } 2896d0e79aa3SJason Evans 2897d0e79aa3SJason Evans JEMALLOC_EXPORT int JEMALLOC_NOTHROW 2898a4bd5210SJason Evans je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, 2899*b7eaed25SJason Evans size_t newlen) { 29001f0a49e8SJason Evans int ret; 29011f0a49e8SJason Evans tsd_t *tsd; 2902a4bd5210SJason Evans 2903*b7eaed25SJason Evans if (unlikely(malloc_init())) { 2904*b7eaed25SJason Evans return EAGAIN; 2905*b7eaed25SJason Evans } 2906a4bd5210SJason Evans 29071f0a49e8SJason Evans tsd = tsd_fetch(); 2908*b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 29091f0a49e8SJason Evans ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen); 2910*b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 2911*b7eaed25SJason Evans return ret; 2912a4bd5210SJason Evans } 2913a4bd5210SJason Evans 2914d0e79aa3SJason Evans JEMALLOC_EXPORT int JEMALLOC_NOTHROW 2915*b7eaed25SJason Evans je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) { 29161f0a49e8SJason Evans int ret; 29171f0a49e8SJason Evans tsdn_t *tsdn; 2918a4bd5210SJason Evans 2919*b7eaed25SJason Evans if (unlikely(malloc_init())) { 2920*b7eaed25SJason Evans return EAGAIN; 2921*b7eaed25SJason Evans } 2922a4bd5210SJason Evans 29231f0a49e8SJason Evans tsdn = tsdn_fetch(); 2924*b7eaed25SJason Evans check_entry_exit_locking(tsdn); 29251f0a49e8SJason Evans ret = ctl_nametomib(tsdn, name, mibp, miblenp); 2926*b7eaed25SJason Evans check_entry_exit_locking(tsdn); 2927*b7eaed25SJason Evans return ret; 2928a4bd5210SJason Evans } 2929a4bd5210SJason Evans 2930d0e79aa3SJason Evans JEMALLOC_EXPORT int JEMALLOC_NOTHROW 2931a4bd5210SJason Evans je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, 2932*b7eaed25SJason Evans void *newp, size_t newlen) { 29331f0a49e8SJason Evans int ret; 29341f0a49e8SJason Evans tsd_t *tsd; 2935a4bd5210SJason Evans 2936*b7eaed25SJason Evans if (unlikely(malloc_init())) { 2937*b7eaed25SJason Evans return EAGAIN; 2938*b7eaed25SJason Evans } 2939a4bd5210SJason Evans 29401f0a49e8SJason Evans tsd = tsd_fetch(); 2941*b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 29421f0a49e8SJason Evans ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen); 2943*b7eaed25SJason Evans check_entry_exit_locking(tsd_tsdn(tsd)); 2944*b7eaed25SJason Evans return ret; 2945a4bd5210SJason Evans } 2946a4bd5210SJason Evans 2947d0e79aa3SJason Evans JEMALLOC_EXPORT void JEMALLOC_NOTHROW 2948f921d10fSJason Evans je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, 2949*b7eaed25SJason Evans const char *opts) { 29501f0a49e8SJason Evans tsdn_t *tsdn; 2951f921d10fSJason Evans 29521f0a49e8SJason Evans tsdn = tsdn_fetch(); 2953*b7eaed25SJason Evans check_entry_exit_locking(tsdn); 2954f921d10fSJason Evans stats_print(write_cb, cbopaque, opts); 2955*b7eaed25SJason Evans check_entry_exit_locking(tsdn); 2956f921d10fSJason Evans } 2957f921d10fSJason Evans 2958d0e79aa3SJason Evans JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2959*b7eaed25SJason Evans je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) { 2960f921d10fSJason Evans size_t ret; 29611f0a49e8SJason Evans tsdn_t *tsdn; 2962f921d10fSJason Evans 2963d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2964f921d10fSJason Evans 29651f0a49e8SJason Evans tsdn = tsdn_fetch(); 2966*b7eaed25SJason Evans check_entry_exit_locking(tsdn); 2967f921d10fSJason Evans 2968*b7eaed25SJason Evans if (unlikely(ptr == NULL)) { 2969*b7eaed25SJason Evans ret = 0; 2970*b7eaed25SJason Evans } else { 2971*b7eaed25SJason Evans if (config_debug || force_ivsalloc) { 2972*b7eaed25SJason Evans ret = ivsalloc(tsdn, ptr); 2973*b7eaed25SJason Evans assert(force_ivsalloc || ret != 0); 2974*b7eaed25SJason Evans } else { 2975*b7eaed25SJason Evans ret = isalloc(tsdn, ptr); 2976*b7eaed25SJason Evans } 2977*b7eaed25SJason Evans } 29781f0a49e8SJason Evans 2979*b7eaed25SJason Evans check_entry_exit_locking(tsdn); 2980*b7eaed25SJason Evans return ret; 2981f921d10fSJason Evans } 2982f921d10fSJason Evans 2983a4bd5210SJason Evans /* 2984a4bd5210SJason Evans * End non-standard functions. 2985a4bd5210SJason Evans */ 2986a4bd5210SJason Evans /******************************************************************************/ 2987a4bd5210SJason Evans /* 2988d0e79aa3SJason Evans * Begin compatibility functions. 2989a4bd5210SJason Evans */ 2990d0e79aa3SJason Evans 2991d0e79aa3SJason Evans #define ALLOCM_LG_ALIGN(la) (la) 2992d0e79aa3SJason Evans #define ALLOCM_ALIGN(a) (ffsl(a)-1) 2993d0e79aa3SJason Evans #define ALLOCM_ZERO ((int)0x40) 2994d0e79aa3SJason Evans #define ALLOCM_NO_MOVE ((int)0x80) 2995d0e79aa3SJason Evans 2996d0e79aa3SJason Evans #define ALLOCM_SUCCESS 0 2997d0e79aa3SJason Evans #define ALLOCM_ERR_OOM 1 2998d0e79aa3SJason Evans #define ALLOCM_ERR_NOT_MOVED 2 2999a4bd5210SJason Evans 3000a4bd5210SJason Evans int 3001*b7eaed25SJason Evans je_allocm(void **ptr, size_t *rsize, size_t size, int flags) { 3002a4bd5210SJason Evans assert(ptr != NULL); 3003a4bd5210SJason Evans 3004*b7eaed25SJason Evans void *p = je_mallocx(size, flags); 3005*b7eaed25SJason Evans if (p == NULL) { 3006a4bd5210SJason Evans return (ALLOCM_ERR_OOM); 3007*b7eaed25SJason Evans } 3008*b7eaed25SJason Evans if (rsize != NULL) { 3009*b7eaed25SJason Evans *rsize = isalloc(tsdn_fetch(), p); 3010*b7eaed25SJason Evans } 3011f921d10fSJason Evans *ptr = p; 3012*b7eaed25SJason Evans return ALLOCM_SUCCESS; 3013a4bd5210SJason Evans } 3014a4bd5210SJason Evans 3015a4bd5210SJason Evans int 3016*b7eaed25SJason Evans je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) { 3017a4bd5210SJason Evans assert(ptr != NULL); 3018a4bd5210SJason Evans assert(*ptr != NULL); 3019a4bd5210SJason Evans assert(size != 0); 3020a4bd5210SJason Evans assert(SIZE_T_MAX - size >= extra); 3021a4bd5210SJason Evans 3022*b7eaed25SJason Evans int ret; 3023*b7eaed25SJason Evans bool no_move = flags & ALLOCM_NO_MOVE; 3024*b7eaed25SJason Evans 3025f921d10fSJason Evans if (no_move) { 3026f921d10fSJason Evans size_t usize = je_xallocx(*ptr, size, extra, flags); 3027f921d10fSJason Evans ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED; 3028*b7eaed25SJason Evans if (rsize != NULL) { 3029a4bd5210SJason Evans *rsize = usize; 3030*b7eaed25SJason Evans } 3031a4bd5210SJason Evans } else { 3032f921d10fSJason Evans void *p = je_rallocx(*ptr, size+extra, flags); 3033f921d10fSJason Evans if (p != NULL) { 3034f921d10fSJason Evans *ptr = p; 3035f921d10fSJason Evans ret = ALLOCM_SUCCESS; 3036*b7eaed25SJason Evans } else { 3037f921d10fSJason Evans ret = ALLOCM_ERR_OOM; 3038a4bd5210SJason Evans } 3039*b7eaed25SJason Evans if (rsize != NULL) { 3040*b7eaed25SJason Evans *rsize = isalloc(tsdn_fetch(), *ptr); 3041*b7eaed25SJason Evans } 3042*b7eaed25SJason Evans } 3043*b7eaed25SJason Evans return ret; 3044a4bd5210SJason Evans } 3045a4bd5210SJason Evans 3046a4bd5210SJason Evans int 3047*b7eaed25SJason Evans je_sallocm(const void *ptr, size_t *rsize, int flags) { 3048a4bd5210SJason Evans assert(rsize != NULL); 3049f921d10fSJason Evans *rsize = je_sallocx(ptr, flags); 3050*b7eaed25SJason Evans return ALLOCM_SUCCESS; 3051a4bd5210SJason Evans } 3052a4bd5210SJason Evans 3053a4bd5210SJason Evans int 3054*b7eaed25SJason Evans je_dallocm(void *ptr, int flags) { 3055f921d10fSJason Evans je_dallocx(ptr, flags); 3056*b7eaed25SJason Evans return ALLOCM_SUCCESS; 3057a4bd5210SJason Evans } 3058a4bd5210SJason Evans 3059a4bd5210SJason Evans int 3060*b7eaed25SJason Evans je_nallocm(size_t *rsize, size_t size, int flags) { 3061*b7eaed25SJason Evans size_t usize = je_nallocx(size, flags); 3062*b7eaed25SJason Evans if (usize == 0) { 3063*b7eaed25SJason Evans return ALLOCM_ERR_OOM; 3064*b7eaed25SJason Evans } 3065*b7eaed25SJason Evans if (rsize != NULL) { 3066a4bd5210SJason Evans *rsize = usize; 3067*b7eaed25SJason Evans } 3068*b7eaed25SJason Evans return ALLOCM_SUCCESS; 3069a4bd5210SJason Evans } 3070a4bd5210SJason Evans 3071d0e79aa3SJason Evans #undef ALLOCM_LG_ALIGN 3072d0e79aa3SJason Evans #undef ALLOCM_ALIGN 3073d0e79aa3SJason Evans #undef ALLOCM_ZERO 3074d0e79aa3SJason Evans #undef ALLOCM_NO_MOVE 3075d0e79aa3SJason Evans 3076d0e79aa3SJason Evans #undef ALLOCM_SUCCESS 3077d0e79aa3SJason Evans #undef ALLOCM_ERR_OOM 3078d0e79aa3SJason Evans #undef ALLOCM_ERR_NOT_MOVED 3079d0e79aa3SJason Evans 3080a4bd5210SJason Evans /* 3081d0e79aa3SJason Evans * End compatibility functions. 3082a4bd5210SJason Evans */ 3083a4bd5210SJason Evans /******************************************************************************/ 3084a4bd5210SJason Evans /* 3085a4bd5210SJason Evans * The following functions are used by threading libraries for protection of 3086a4bd5210SJason Evans * malloc during fork(). 3087a4bd5210SJason Evans */ 3088a4bd5210SJason Evans 308982872ac0SJason Evans /* 309082872ac0SJason Evans * If an application creates a thread before doing any allocation in the main 309182872ac0SJason Evans * thread, then calls fork(2) in the main thread followed by memory allocation 309282872ac0SJason Evans * in the child process, a race can occur that results in deadlock within the 309382872ac0SJason Evans * child: the main thread may have forked while the created thread had 309482872ac0SJason Evans * partially initialized the allocator. Ordinarily jemalloc prevents 309582872ac0SJason Evans * fork/malloc races via the following functions it registers during 309682872ac0SJason Evans * initialization using pthread_atfork(), but of course that does no good if 309782872ac0SJason Evans * the allocator isn't fully initialized at fork time. The following library 3098d0e79aa3SJason Evans * constructor is a partial solution to this problem. It may still be possible 3099d0e79aa3SJason Evans * to trigger the deadlock described above, but doing so would involve forking 3100d0e79aa3SJason Evans * via a library constructor that runs before jemalloc's runs. 310182872ac0SJason Evans */ 31021f0a49e8SJason Evans #ifndef JEMALLOC_JET 310382872ac0SJason Evans JEMALLOC_ATTR(constructor) 310482872ac0SJason Evans static void 3105*b7eaed25SJason Evans jemalloc_constructor(void) { 310682872ac0SJason Evans malloc_init(); 310782872ac0SJason Evans } 31081f0a49e8SJason Evans #endif 310982872ac0SJason Evans 3110a4bd5210SJason Evans #ifndef JEMALLOC_MUTEX_INIT_CB 3111a4bd5210SJason Evans void 3112a4bd5210SJason Evans jemalloc_prefork(void) 3113a4bd5210SJason Evans #else 3114e722f8f8SJason Evans JEMALLOC_EXPORT void 3115a4bd5210SJason Evans _malloc_prefork(void) 3116a4bd5210SJason Evans #endif 3117a4bd5210SJason Evans { 31181f0a49e8SJason Evans tsd_t *tsd; 31191f0a49e8SJason Evans unsigned i, j, narenas; 31201f0a49e8SJason Evans arena_t *arena; 3121a4bd5210SJason Evans 312235dad073SJason Evans #ifdef JEMALLOC_MUTEX_INIT_CB 3123*b7eaed25SJason Evans if (!malloc_initialized()) { 312435dad073SJason Evans return; 3125*b7eaed25SJason Evans } 312635dad073SJason Evans #endif 3127d0e79aa3SJason Evans assert(malloc_initialized()); 312835dad073SJason Evans 31291f0a49e8SJason Evans tsd = tsd_fetch(); 3130df0d881dSJason Evans 31311f0a49e8SJason Evans narenas = narenas_total_get(); 31321f0a49e8SJason Evans 3133*b7eaed25SJason Evans witness_prefork(tsd_witness_tsdp_get(tsd)); 31341f0a49e8SJason Evans /* Acquire all mutexes in a safe order. */ 31351f0a49e8SJason Evans ctl_prefork(tsd_tsdn(tsd)); 31368244f2aaSJason Evans tcache_prefork(tsd_tsdn(tsd)); 31371f0a49e8SJason Evans malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock); 3138*b7eaed25SJason Evans if (have_background_thread) { 3139*b7eaed25SJason Evans background_thread_prefork0(tsd_tsdn(tsd)); 3140*b7eaed25SJason Evans } 31411f0a49e8SJason Evans prof_prefork0(tsd_tsdn(tsd)); 3142*b7eaed25SJason Evans if (have_background_thread) { 3143*b7eaed25SJason Evans background_thread_prefork1(tsd_tsdn(tsd)); 3144*b7eaed25SJason Evans } 3145*b7eaed25SJason Evans /* Break arena prefork into stages to preserve lock order. */ 3146*b7eaed25SJason Evans for (i = 0; i < 7; i++) { 31471f0a49e8SJason Evans for (j = 0; j < narenas; j++) { 31481f0a49e8SJason Evans if ((arena = arena_get(tsd_tsdn(tsd), j, false)) != 31491f0a49e8SJason Evans NULL) { 31501f0a49e8SJason Evans switch (i) { 31511f0a49e8SJason Evans case 0: 31521f0a49e8SJason Evans arena_prefork0(tsd_tsdn(tsd), arena); 31531f0a49e8SJason Evans break; 31541f0a49e8SJason Evans case 1: 31551f0a49e8SJason Evans arena_prefork1(tsd_tsdn(tsd), arena); 31561f0a49e8SJason Evans break; 31571f0a49e8SJason Evans case 2: 31581f0a49e8SJason Evans arena_prefork2(tsd_tsdn(tsd), arena); 31591f0a49e8SJason Evans break; 3160*b7eaed25SJason Evans case 3: 3161*b7eaed25SJason Evans arena_prefork3(tsd_tsdn(tsd), arena); 3162*b7eaed25SJason Evans break; 3163*b7eaed25SJason Evans case 4: 3164*b7eaed25SJason Evans arena_prefork4(tsd_tsdn(tsd), arena); 3165*b7eaed25SJason Evans break; 3166*b7eaed25SJason Evans case 5: 3167*b7eaed25SJason Evans arena_prefork5(tsd_tsdn(tsd), arena); 3168*b7eaed25SJason Evans break; 3169*b7eaed25SJason Evans case 6: 3170*b7eaed25SJason Evans arena_prefork6(tsd_tsdn(tsd), arena); 3171*b7eaed25SJason Evans break; 31721f0a49e8SJason Evans default: not_reached(); 3173a4bd5210SJason Evans } 31741f0a49e8SJason Evans } 31751f0a49e8SJason Evans } 31761f0a49e8SJason Evans } 31771f0a49e8SJason Evans prof_prefork1(tsd_tsdn(tsd)); 3178a4bd5210SJason Evans } 3179a4bd5210SJason Evans 3180a4bd5210SJason Evans #ifndef JEMALLOC_MUTEX_INIT_CB 3181a4bd5210SJason Evans void 3182a4bd5210SJason Evans jemalloc_postfork_parent(void) 3183a4bd5210SJason Evans #else 3184e722f8f8SJason Evans JEMALLOC_EXPORT void 3185a4bd5210SJason Evans _malloc_postfork(void) 3186a4bd5210SJason Evans #endif 3187a4bd5210SJason Evans { 31881f0a49e8SJason Evans tsd_t *tsd; 3189df0d881dSJason Evans unsigned i, narenas; 3190a4bd5210SJason Evans 319135dad073SJason Evans #ifdef JEMALLOC_MUTEX_INIT_CB 3192*b7eaed25SJason Evans if (!malloc_initialized()) { 319335dad073SJason Evans return; 3194*b7eaed25SJason Evans } 319535dad073SJason Evans #endif 3196d0e79aa3SJason Evans assert(malloc_initialized()); 319735dad073SJason Evans 31981f0a49e8SJason Evans tsd = tsd_fetch(); 31991f0a49e8SJason Evans 3200*b7eaed25SJason Evans witness_postfork_parent(tsd_witness_tsdp_get(tsd)); 3201a4bd5210SJason Evans /* Release all mutexes, now that fork() has completed. */ 3202df0d881dSJason Evans for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 3203df0d881dSJason Evans arena_t *arena; 3204df0d881dSJason Evans 3205*b7eaed25SJason Evans if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) { 32061f0a49e8SJason Evans arena_postfork_parent(tsd_tsdn(tsd), arena); 3207a4bd5210SJason Evans } 3208*b7eaed25SJason Evans } 32091f0a49e8SJason Evans prof_postfork_parent(tsd_tsdn(tsd)); 3210*b7eaed25SJason Evans if (have_background_thread) { 3211*b7eaed25SJason Evans background_thread_postfork_parent(tsd_tsdn(tsd)); 3212*b7eaed25SJason Evans } 32131f0a49e8SJason Evans malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock); 32148244f2aaSJason Evans tcache_postfork_parent(tsd_tsdn(tsd)); 32151f0a49e8SJason Evans ctl_postfork_parent(tsd_tsdn(tsd)); 3216a4bd5210SJason Evans } 3217a4bd5210SJason Evans 3218a4bd5210SJason Evans void 3219*b7eaed25SJason Evans jemalloc_postfork_child(void) { 32201f0a49e8SJason Evans tsd_t *tsd; 3221df0d881dSJason Evans unsigned i, narenas; 3222a4bd5210SJason Evans 3223d0e79aa3SJason Evans assert(malloc_initialized()); 322435dad073SJason Evans 32251f0a49e8SJason Evans tsd = tsd_fetch(); 32261f0a49e8SJason Evans 3227*b7eaed25SJason Evans witness_postfork_child(tsd_witness_tsdp_get(tsd)); 3228a4bd5210SJason Evans /* Release all mutexes, now that fork() has completed. */ 3229df0d881dSJason Evans for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 3230df0d881dSJason Evans arena_t *arena; 3231df0d881dSJason Evans 3232*b7eaed25SJason Evans if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) { 32331f0a49e8SJason Evans arena_postfork_child(tsd_tsdn(tsd), arena); 3234a4bd5210SJason Evans } 3235*b7eaed25SJason Evans } 32361f0a49e8SJason Evans prof_postfork_child(tsd_tsdn(tsd)); 3237*b7eaed25SJason Evans if (have_background_thread) { 3238*b7eaed25SJason Evans background_thread_postfork_child(tsd_tsdn(tsd)); 3239*b7eaed25SJason Evans } 32401f0a49e8SJason Evans malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock); 32418244f2aaSJason Evans tcache_postfork_child(tsd_tsdn(tsd)); 32421f0a49e8SJason Evans ctl_postfork_child(tsd_tsdn(tsd)); 3243a4bd5210SJason Evans } 3244a4bd5210SJason Evans 32458495e8b1SKonstantin Belousov void 32468495e8b1SKonstantin Belousov _malloc_first_thread(void) 32478495e8b1SKonstantin Belousov { 32488495e8b1SKonstantin Belousov 32498495e8b1SKonstantin Belousov (void)malloc_mutex_first_thread(); 32508495e8b1SKonstantin Belousov } 32518495e8b1SKonstantin Belousov 3252a4bd5210SJason Evans /******************************************************************************/ 3253