1a4bd5210SJason Evans #define JEMALLOC_C_ 2a4bd5210SJason Evans #include "jemalloc/internal/jemalloc_internal.h" 3a4bd5210SJason Evans 4a4bd5210SJason Evans /******************************************************************************/ 5a4bd5210SJason Evans /* Data. */ 6a4bd5210SJason Evans 74fdb8d2aSDimitry Andric /* Work around <http://llvm.org/bugs/show_bug.cgi?id=12623>: */ 84fdb8d2aSDimitry Andric const char *__malloc_options_1_0 = NULL; 9a4bd5210SJason Evans __sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0); 10a4bd5210SJason Evans 11a4bd5210SJason Evans /* Runtime configuration options. */ 12*bde95144SJason Evans const char *je_malloc_conf 13*bde95144SJason Evans #ifndef _WIN32 14*bde95144SJason Evans JEMALLOC_ATTR(weak) 15*bde95144SJason Evans #endif 16*bde95144SJason Evans ; 1788ad2f8dSJason Evans bool opt_abort = 18a4bd5210SJason Evans #ifdef JEMALLOC_DEBUG 1988ad2f8dSJason Evans true 20a4bd5210SJason Evans #else 2188ad2f8dSJason Evans false 22a4bd5210SJason Evans #endif 2388ad2f8dSJason Evans ; 24d0e79aa3SJason Evans const char *opt_junk = 25d0e79aa3SJason Evans #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 26d0e79aa3SJason Evans "true" 27d0e79aa3SJason Evans #else 28d0e79aa3SJason Evans "false" 29d0e79aa3SJason Evans #endif 30d0e79aa3SJason Evans ; 31d0e79aa3SJason Evans bool opt_junk_alloc = 3288ad2f8dSJason Evans #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 3388ad2f8dSJason Evans true 34a4bd5210SJason Evans #else 3588ad2f8dSJason Evans false 36a4bd5210SJason Evans #endif 3788ad2f8dSJason Evans ; 38d0e79aa3SJason Evans bool opt_junk_free = 39d0e79aa3SJason Evans #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 40d0e79aa3SJason Evans true 41d0e79aa3SJason Evans #else 42d0e79aa3SJason Evans false 43d0e79aa3SJason Evans #endif 44d0e79aa3SJason Evans ; 45d0e79aa3SJason Evans 46a4bd5210SJason Evans size_t opt_quarantine = ZU(0); 47a4bd5210SJason Evans bool opt_redzone = false; 48a4bd5210SJason Evans bool opt_utrace = false; 49a4bd5210SJason Evans bool opt_xmalloc = false; 50a4bd5210SJason Evans bool opt_zero = false; 51df0d881dSJason Evans unsigned opt_narenas = 0; 52a4bd5210SJason Evans 53d0e79aa3SJason Evans /* Initialized to true if the process is running inside Valgrind. */ 54d0e79aa3SJason Evans bool in_valgrind; 55d0e79aa3SJason Evans 56a4bd5210SJason Evans unsigned ncpus; 57a4bd5210SJason Evans 58df0d881dSJason Evans /* Protects arenas initialization. */ 59d0e79aa3SJason Evans static malloc_mutex_t arenas_lock; 60d0e79aa3SJason Evans /* 61d0e79aa3SJason Evans * Arenas that are used to service external requests. Not all elements of the 62d0e79aa3SJason Evans * arenas array are necessarily used; arenas are created lazily as needed. 63d0e79aa3SJason Evans * 64d0e79aa3SJason Evans * arenas[0..narenas_auto) are used for automatic multiplexing of threads and 65d0e79aa3SJason Evans * arenas. arenas[narenas_auto..narenas_total) are only used if the application 66d0e79aa3SJason Evans * takes some action to create them and allocate from them. 67d0e79aa3SJason Evans */ 68df0d881dSJason Evans arena_t **arenas; 69df0d881dSJason Evans static unsigned narenas_total; /* Use narenas_total_*(). */ 70d0e79aa3SJason Evans static arena_t *a0; /* arenas[0]; read-only after initialization. */ 711f0a49e8SJason Evans unsigned narenas_auto; /* Read-only after initialization. */ 72a4bd5210SJason Evans 73d0e79aa3SJason Evans typedef enum { 74d0e79aa3SJason Evans malloc_init_uninitialized = 3, 75d0e79aa3SJason Evans malloc_init_a0_initialized = 2, 76d0e79aa3SJason Evans malloc_init_recursible = 1, 77d0e79aa3SJason Evans malloc_init_initialized = 0 /* Common case --> jnz. */ 78d0e79aa3SJason Evans } malloc_init_t; 79d0e79aa3SJason Evans static malloc_init_t malloc_init_state = malloc_init_uninitialized; 80d0e79aa3SJason Evans 811f0a49e8SJason Evans /* False should be the common case. Set to true to trigger initialization. */ 82df0d881dSJason Evans static bool malloc_slow = true; 83df0d881dSJason Evans 841f0a49e8SJason Evans /* When malloc_slow is true, set the corresponding bits for sanity check. */ 85df0d881dSJason Evans enum { 86df0d881dSJason Evans flag_opt_junk_alloc = (1U), 87df0d881dSJason Evans flag_opt_junk_free = (1U << 1), 88df0d881dSJason Evans flag_opt_quarantine = (1U << 2), 89df0d881dSJason Evans flag_opt_zero = (1U << 3), 90df0d881dSJason Evans flag_opt_utrace = (1U << 4), 91df0d881dSJason Evans flag_in_valgrind = (1U << 5), 92df0d881dSJason Evans flag_opt_xmalloc = (1U << 6) 93df0d881dSJason Evans }; 94df0d881dSJason Evans static uint8_t malloc_slow_flags; 95df0d881dSJason Evans 96d0e79aa3SJason Evans JEMALLOC_ALIGNED(CACHELINE) 97*bde95144SJason Evans const size_t pind2sz_tab[NPSIZES] = { 98*bde95144SJason Evans #define PSZ_yes(lg_grp, ndelta, lg_delta) \ 99*bde95144SJason Evans (((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))), 100*bde95144SJason Evans #define PSZ_no(lg_grp, ndelta, lg_delta) 101*bde95144SJason Evans #define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \ 102*bde95144SJason Evans PSZ_##psz(lg_grp, ndelta, lg_delta) 103*bde95144SJason Evans SIZE_CLASSES 104*bde95144SJason Evans #undef PSZ_yes 105*bde95144SJason Evans #undef PSZ_no 106*bde95144SJason Evans #undef SC 107*bde95144SJason Evans }; 108*bde95144SJason Evans 109*bde95144SJason Evans JEMALLOC_ALIGNED(CACHELINE) 110*bde95144SJason Evans const size_t index2size_tab[NSIZES] = { 111*bde95144SJason Evans #define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \ 112d0e79aa3SJason Evans ((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)), 113d0e79aa3SJason Evans SIZE_CLASSES 114d0e79aa3SJason Evans #undef SC 115d0e79aa3SJason Evans }; 116d0e79aa3SJason Evans 117d0e79aa3SJason Evans JEMALLOC_ALIGNED(CACHELINE) 118d0e79aa3SJason Evans const uint8_t size2index_tab[] = { 119d0e79aa3SJason Evans #if LG_TINY_MIN == 0 120d0e79aa3SJason Evans #warning "Dangerous LG_TINY_MIN" 121d0e79aa3SJason Evans #define S2B_0(i) i, 122d0e79aa3SJason Evans #elif LG_TINY_MIN == 1 123d0e79aa3SJason Evans #warning "Dangerous LG_TINY_MIN" 124d0e79aa3SJason Evans #define S2B_1(i) i, 125d0e79aa3SJason Evans #elif LG_TINY_MIN == 2 126d0e79aa3SJason Evans #warning "Dangerous LG_TINY_MIN" 127d0e79aa3SJason Evans #define S2B_2(i) i, 128d0e79aa3SJason Evans #elif LG_TINY_MIN == 3 129d0e79aa3SJason Evans #define S2B_3(i) i, 130d0e79aa3SJason Evans #elif LG_TINY_MIN == 4 131d0e79aa3SJason Evans #define S2B_4(i) i, 132d0e79aa3SJason Evans #elif LG_TINY_MIN == 5 133d0e79aa3SJason Evans #define S2B_5(i) i, 134d0e79aa3SJason Evans #elif LG_TINY_MIN == 6 135d0e79aa3SJason Evans #define S2B_6(i) i, 136d0e79aa3SJason Evans #elif LG_TINY_MIN == 7 137d0e79aa3SJason Evans #define S2B_7(i) i, 138d0e79aa3SJason Evans #elif LG_TINY_MIN == 8 139d0e79aa3SJason Evans #define S2B_8(i) i, 140d0e79aa3SJason Evans #elif LG_TINY_MIN == 9 141d0e79aa3SJason Evans #define S2B_9(i) i, 142d0e79aa3SJason Evans #elif LG_TINY_MIN == 10 143d0e79aa3SJason Evans #define S2B_10(i) i, 144d0e79aa3SJason Evans #elif LG_TINY_MIN == 11 145d0e79aa3SJason Evans #define S2B_11(i) i, 146d0e79aa3SJason Evans #else 147d0e79aa3SJason Evans #error "Unsupported LG_TINY_MIN" 148d0e79aa3SJason Evans #endif 149d0e79aa3SJason Evans #if LG_TINY_MIN < 1 150d0e79aa3SJason Evans #define S2B_1(i) S2B_0(i) S2B_0(i) 151d0e79aa3SJason Evans #endif 152d0e79aa3SJason Evans #if LG_TINY_MIN < 2 153d0e79aa3SJason Evans #define S2B_2(i) S2B_1(i) S2B_1(i) 154d0e79aa3SJason Evans #endif 155d0e79aa3SJason Evans #if LG_TINY_MIN < 3 156d0e79aa3SJason Evans #define S2B_3(i) S2B_2(i) S2B_2(i) 157d0e79aa3SJason Evans #endif 158d0e79aa3SJason Evans #if LG_TINY_MIN < 4 159d0e79aa3SJason Evans #define S2B_4(i) S2B_3(i) S2B_3(i) 160d0e79aa3SJason Evans #endif 161d0e79aa3SJason Evans #if LG_TINY_MIN < 5 162d0e79aa3SJason Evans #define S2B_5(i) S2B_4(i) S2B_4(i) 163d0e79aa3SJason Evans #endif 164d0e79aa3SJason Evans #if LG_TINY_MIN < 6 165d0e79aa3SJason Evans #define S2B_6(i) S2B_5(i) S2B_5(i) 166d0e79aa3SJason Evans #endif 167d0e79aa3SJason Evans #if LG_TINY_MIN < 7 168d0e79aa3SJason Evans #define S2B_7(i) S2B_6(i) S2B_6(i) 169d0e79aa3SJason Evans #endif 170d0e79aa3SJason Evans #if LG_TINY_MIN < 8 171d0e79aa3SJason Evans #define S2B_8(i) S2B_7(i) S2B_7(i) 172d0e79aa3SJason Evans #endif 173d0e79aa3SJason Evans #if LG_TINY_MIN < 9 174d0e79aa3SJason Evans #define S2B_9(i) S2B_8(i) S2B_8(i) 175d0e79aa3SJason Evans #endif 176d0e79aa3SJason Evans #if LG_TINY_MIN < 10 177d0e79aa3SJason Evans #define S2B_10(i) S2B_9(i) S2B_9(i) 178d0e79aa3SJason Evans #endif 179d0e79aa3SJason Evans #if LG_TINY_MIN < 11 180d0e79aa3SJason Evans #define S2B_11(i) S2B_10(i) S2B_10(i) 181d0e79aa3SJason Evans #endif 182d0e79aa3SJason Evans #define S2B_no(i) 183*bde95144SJason Evans #define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \ 184d0e79aa3SJason Evans S2B_##lg_delta_lookup(index) 185d0e79aa3SJason Evans SIZE_CLASSES 186d0e79aa3SJason Evans #undef S2B_3 187d0e79aa3SJason Evans #undef S2B_4 188d0e79aa3SJason Evans #undef S2B_5 189d0e79aa3SJason Evans #undef S2B_6 190d0e79aa3SJason Evans #undef S2B_7 191d0e79aa3SJason Evans #undef S2B_8 192d0e79aa3SJason Evans #undef S2B_9 193d0e79aa3SJason Evans #undef S2B_10 194d0e79aa3SJason Evans #undef S2B_11 195d0e79aa3SJason Evans #undef S2B_no 196d0e79aa3SJason Evans #undef SC 197d0e79aa3SJason Evans }; 198a4bd5210SJason Evans 199a4bd5210SJason Evans #ifdef JEMALLOC_THREADED_INIT 200a4bd5210SJason Evans /* Used to let the initializing thread recursively allocate. */ 201a4bd5210SJason Evans # define NO_INITIALIZER ((unsigned long)0) 202a4bd5210SJason Evans # define INITIALIZER pthread_self() 203a4bd5210SJason Evans # define IS_INITIALIZER (malloc_initializer == pthread_self()) 204a4bd5210SJason Evans static pthread_t malloc_initializer = NO_INITIALIZER; 205a4bd5210SJason Evans #else 206a4bd5210SJason Evans # define NO_INITIALIZER false 207a4bd5210SJason Evans # define INITIALIZER true 208a4bd5210SJason Evans # define IS_INITIALIZER malloc_initializer 209a4bd5210SJason Evans static bool malloc_initializer = NO_INITIALIZER; 210a4bd5210SJason Evans #endif 211a4bd5210SJason Evans 212a4bd5210SJason Evans /* Used to avoid initialization races. */ 213e722f8f8SJason Evans #ifdef _WIN32 214d0e79aa3SJason Evans #if _WIN32_WINNT >= 0x0600 215d0e79aa3SJason Evans static malloc_mutex_t init_lock = SRWLOCK_INIT; 216d0e79aa3SJason Evans #else 217e722f8f8SJason Evans static malloc_mutex_t init_lock; 218536b3538SJason Evans static bool init_lock_initialized = false; 219e722f8f8SJason Evans 220e722f8f8SJason Evans JEMALLOC_ATTR(constructor) 221e722f8f8SJason Evans static void WINAPI 222e722f8f8SJason Evans _init_init_lock(void) 223e722f8f8SJason Evans { 224e722f8f8SJason Evans 225536b3538SJason Evans /* If another constructor in the same binary is using mallctl to 226536b3538SJason Evans * e.g. setup chunk hooks, it may end up running before this one, 227536b3538SJason Evans * and malloc_init_hard will crash trying to lock the uninitialized 228536b3538SJason Evans * lock. So we force an initialization of the lock in 229536b3538SJason Evans * malloc_init_hard as well. We don't try to care about atomicity 230536b3538SJason Evans * of the accessed to the init_lock_initialized boolean, since it 231536b3538SJason Evans * really only matters early in the process creation, before any 232536b3538SJason Evans * separate thread normally starts doing anything. */ 233536b3538SJason Evans if (!init_lock_initialized) 2341f0a49e8SJason Evans malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT); 235536b3538SJason Evans init_lock_initialized = true; 236e722f8f8SJason Evans } 237e722f8f8SJason Evans 238e722f8f8SJason Evans #ifdef _MSC_VER 239e722f8f8SJason Evans # pragma section(".CRT$XCU", read) 240e722f8f8SJason Evans JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) 241e722f8f8SJason Evans static const void (WINAPI *init_init_lock)(void) = _init_init_lock; 242e722f8f8SJason Evans #endif 243d0e79aa3SJason Evans #endif 244e722f8f8SJason Evans #else 245a4bd5210SJason Evans static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; 246e722f8f8SJason Evans #endif 247a4bd5210SJason Evans 248a4bd5210SJason Evans typedef struct { 249a4bd5210SJason Evans void *p; /* Input pointer (as in realloc(p, s)). */ 250a4bd5210SJason Evans size_t s; /* Request size. */ 251a4bd5210SJason Evans void *r; /* Result pointer. */ 252a4bd5210SJason Evans } malloc_utrace_t; 253a4bd5210SJason Evans 254a4bd5210SJason Evans #ifdef JEMALLOC_UTRACE 255a4bd5210SJason Evans # define UTRACE(a, b, c) do { \ 256d0e79aa3SJason Evans if (unlikely(opt_utrace)) { \ 25788ad2f8dSJason Evans int utrace_serrno = errno; \ 258a4bd5210SJason Evans malloc_utrace_t ut; \ 259a4bd5210SJason Evans ut.p = (a); \ 260a4bd5210SJason Evans ut.s = (b); \ 261a4bd5210SJason Evans ut.r = (c); \ 262a4bd5210SJason Evans utrace(&ut, sizeof(ut)); \ 26388ad2f8dSJason Evans errno = utrace_serrno; \ 264a4bd5210SJason Evans } \ 265a4bd5210SJason Evans } while (0) 266a4bd5210SJason Evans #else 267a4bd5210SJason Evans # define UTRACE(a, b, c) 268a4bd5210SJason Evans #endif 269a4bd5210SJason Evans 270a4bd5210SJason Evans /******************************************************************************/ 271f921d10fSJason Evans /* 272f921d10fSJason Evans * Function prototypes for static functions that are referenced prior to 273f921d10fSJason Evans * definition. 274f921d10fSJason Evans */ 275a4bd5210SJason Evans 276d0e79aa3SJason Evans static bool malloc_init_hard_a0(void); 277a4bd5210SJason Evans static bool malloc_init_hard(void); 278a4bd5210SJason Evans 279a4bd5210SJason Evans /******************************************************************************/ 280a4bd5210SJason Evans /* 281a4bd5210SJason Evans * Begin miscellaneous support functions. 282a4bd5210SJason Evans */ 283a4bd5210SJason Evans 284d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C bool 285d0e79aa3SJason Evans malloc_initialized(void) 286a4bd5210SJason Evans { 287a4bd5210SJason Evans 288d0e79aa3SJason Evans return (malloc_init_state == malloc_init_initialized); 289a4bd5210SJason Evans } 290d0e79aa3SJason Evans 291d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C void 292d0e79aa3SJason Evans malloc_thread_init(void) 293d0e79aa3SJason Evans { 294a4bd5210SJason Evans 295a4bd5210SJason Evans /* 296d0e79aa3SJason Evans * TSD initialization can't be safely done as a side effect of 297d0e79aa3SJason Evans * deallocation, because it is possible for a thread to do nothing but 298d0e79aa3SJason Evans * deallocate its TLS data via free(), in which case writing to TLS 299d0e79aa3SJason Evans * would cause write-after-free memory corruption. The quarantine 300d0e79aa3SJason Evans * facility *only* gets used as a side effect of deallocation, so make 301d0e79aa3SJason Evans * a best effort attempt at initializing its TSD by hooking all 302d0e79aa3SJason Evans * allocation events. 303a4bd5210SJason Evans */ 304d0e79aa3SJason Evans if (config_fill && unlikely(opt_quarantine)) 305d0e79aa3SJason Evans quarantine_alloc_hook(); 306a4bd5210SJason Evans } 307a4bd5210SJason Evans 308d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C bool 309d0e79aa3SJason Evans malloc_init_a0(void) 310d0e79aa3SJason Evans { 311d0e79aa3SJason Evans 312d0e79aa3SJason Evans if (unlikely(malloc_init_state == malloc_init_uninitialized)) 313d0e79aa3SJason Evans return (malloc_init_hard_a0()); 314d0e79aa3SJason Evans return (false); 315d0e79aa3SJason Evans } 316d0e79aa3SJason Evans 317d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C bool 318d0e79aa3SJason Evans malloc_init(void) 319d0e79aa3SJason Evans { 320d0e79aa3SJason Evans 321d0e79aa3SJason Evans if (unlikely(!malloc_initialized()) && malloc_init_hard()) 322d0e79aa3SJason Evans return (true); 323d0e79aa3SJason Evans malloc_thread_init(); 324d0e79aa3SJason Evans 325d0e79aa3SJason Evans return (false); 326d0e79aa3SJason Evans } 327d0e79aa3SJason Evans 328d0e79aa3SJason Evans /* 3291f0a49e8SJason Evans * The a0*() functions are used instead of i{d,}alloc() in situations that 330d0e79aa3SJason Evans * cannot tolerate TLS variable access. 331d0e79aa3SJason Evans */ 332d0e79aa3SJason Evans 333d0e79aa3SJason Evans static void * 334d0e79aa3SJason Evans a0ialloc(size_t size, bool zero, bool is_metadata) 335d0e79aa3SJason Evans { 336d0e79aa3SJason Evans 337d0e79aa3SJason Evans if (unlikely(malloc_init_a0())) 338d0e79aa3SJason Evans return (NULL); 339d0e79aa3SJason Evans 3401f0a49e8SJason Evans return (iallocztm(TSDN_NULL, size, size2index(size), zero, NULL, 3411f0a49e8SJason Evans is_metadata, arena_get(TSDN_NULL, 0, true), true)); 342d0e79aa3SJason Evans } 343d0e79aa3SJason Evans 344d0e79aa3SJason Evans static void 345d0e79aa3SJason Evans a0idalloc(void *ptr, bool is_metadata) 346d0e79aa3SJason Evans { 347d0e79aa3SJason Evans 3481f0a49e8SJason Evans idalloctm(TSDN_NULL, ptr, false, is_metadata, true); 349d0e79aa3SJason Evans } 350d0e79aa3SJason Evans 351*bde95144SJason Evans arena_t * 352*bde95144SJason Evans a0get(void) 353*bde95144SJason Evans { 354*bde95144SJason Evans 355*bde95144SJason Evans return (a0); 356*bde95144SJason Evans } 357*bde95144SJason Evans 358d0e79aa3SJason Evans void * 359d0e79aa3SJason Evans a0malloc(size_t size) 360d0e79aa3SJason Evans { 361d0e79aa3SJason Evans 362d0e79aa3SJason Evans return (a0ialloc(size, false, true)); 363d0e79aa3SJason Evans } 364d0e79aa3SJason Evans 365d0e79aa3SJason Evans void 366d0e79aa3SJason Evans a0dalloc(void *ptr) 367d0e79aa3SJason Evans { 368d0e79aa3SJason Evans 369d0e79aa3SJason Evans a0idalloc(ptr, true); 370d0e79aa3SJason Evans } 371d0e79aa3SJason Evans 372d0e79aa3SJason Evans /* 373d0e79aa3SJason Evans * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive 374d0e79aa3SJason Evans * situations that cannot tolerate TLS variable access (TLS allocation and very 375d0e79aa3SJason Evans * early internal data structure initialization). 376d0e79aa3SJason Evans */ 377d0e79aa3SJason Evans 378d0e79aa3SJason Evans void * 379d0e79aa3SJason Evans bootstrap_malloc(size_t size) 380d0e79aa3SJason Evans { 381d0e79aa3SJason Evans 382d0e79aa3SJason Evans if (unlikely(size == 0)) 383d0e79aa3SJason Evans size = 1; 384d0e79aa3SJason Evans 385d0e79aa3SJason Evans return (a0ialloc(size, false, false)); 386d0e79aa3SJason Evans } 387d0e79aa3SJason Evans 388d0e79aa3SJason Evans void * 389d0e79aa3SJason Evans bootstrap_calloc(size_t num, size_t size) 390d0e79aa3SJason Evans { 391d0e79aa3SJason Evans size_t num_size; 392d0e79aa3SJason Evans 393d0e79aa3SJason Evans num_size = num * size; 394d0e79aa3SJason Evans if (unlikely(num_size == 0)) { 395d0e79aa3SJason Evans assert(num == 0 || size == 0); 396d0e79aa3SJason Evans num_size = 1; 397d0e79aa3SJason Evans } 398d0e79aa3SJason Evans 399d0e79aa3SJason Evans return (a0ialloc(num_size, true, false)); 400d0e79aa3SJason Evans } 401d0e79aa3SJason Evans 402d0e79aa3SJason Evans void 403d0e79aa3SJason Evans bootstrap_free(void *ptr) 404d0e79aa3SJason Evans { 405d0e79aa3SJason Evans 406d0e79aa3SJason Evans if (unlikely(ptr == NULL)) 407d0e79aa3SJason Evans return; 408d0e79aa3SJason Evans 409d0e79aa3SJason Evans a0idalloc(ptr, false); 410d0e79aa3SJason Evans } 411d0e79aa3SJason Evans 412df0d881dSJason Evans static void 413df0d881dSJason Evans arena_set(unsigned ind, arena_t *arena) 414df0d881dSJason Evans { 415df0d881dSJason Evans 416df0d881dSJason Evans atomic_write_p((void **)&arenas[ind], arena); 417df0d881dSJason Evans } 418df0d881dSJason Evans 419df0d881dSJason Evans static void 420df0d881dSJason Evans narenas_total_set(unsigned narenas) 421df0d881dSJason Evans { 422df0d881dSJason Evans 423df0d881dSJason Evans atomic_write_u(&narenas_total, narenas); 424df0d881dSJason Evans } 425df0d881dSJason Evans 426df0d881dSJason Evans static void 427df0d881dSJason Evans narenas_total_inc(void) 428df0d881dSJason Evans { 429df0d881dSJason Evans 430df0d881dSJason Evans atomic_add_u(&narenas_total, 1); 431df0d881dSJason Evans } 432df0d881dSJason Evans 433df0d881dSJason Evans unsigned 434df0d881dSJason Evans narenas_total_get(void) 435df0d881dSJason Evans { 436df0d881dSJason Evans 437df0d881dSJason Evans return (atomic_read_u(&narenas_total)); 438df0d881dSJason Evans } 439df0d881dSJason Evans 440d0e79aa3SJason Evans /* Create a new arena and insert it into the arenas array at index ind. */ 441d0e79aa3SJason Evans static arena_t * 4421f0a49e8SJason Evans arena_init_locked(tsdn_t *tsdn, unsigned ind) 443d0e79aa3SJason Evans { 444d0e79aa3SJason Evans arena_t *arena; 445d0e79aa3SJason Evans 446df0d881dSJason Evans assert(ind <= narenas_total_get()); 447d0e79aa3SJason Evans if (ind > MALLOCX_ARENA_MAX) 448d0e79aa3SJason Evans return (NULL); 449df0d881dSJason Evans if (ind == narenas_total_get()) 450df0d881dSJason Evans narenas_total_inc(); 451d0e79aa3SJason Evans 452d0e79aa3SJason Evans /* 453d0e79aa3SJason Evans * Another thread may have already initialized arenas[ind] if it's an 454d0e79aa3SJason Evans * auto arena. 455d0e79aa3SJason Evans */ 4561f0a49e8SJason Evans arena = arena_get(tsdn, ind, false); 457d0e79aa3SJason Evans if (arena != NULL) { 458d0e79aa3SJason Evans assert(ind < narenas_auto); 459d0e79aa3SJason Evans return (arena); 460d0e79aa3SJason Evans } 461d0e79aa3SJason Evans 462d0e79aa3SJason Evans /* Actually initialize the arena. */ 4631f0a49e8SJason Evans arena = arena_new(tsdn, ind); 464df0d881dSJason Evans arena_set(ind, arena); 465d0e79aa3SJason Evans return (arena); 466d0e79aa3SJason Evans } 467d0e79aa3SJason Evans 468d0e79aa3SJason Evans arena_t * 4691f0a49e8SJason Evans arena_init(tsdn_t *tsdn, unsigned ind) 470d0e79aa3SJason Evans { 471d0e79aa3SJason Evans arena_t *arena; 472d0e79aa3SJason Evans 4731f0a49e8SJason Evans malloc_mutex_lock(tsdn, &arenas_lock); 4741f0a49e8SJason Evans arena = arena_init_locked(tsdn, ind); 4751f0a49e8SJason Evans malloc_mutex_unlock(tsdn, &arenas_lock); 476d0e79aa3SJason Evans return (arena); 477d0e79aa3SJason Evans } 478d0e79aa3SJason Evans 479d0e79aa3SJason Evans static void 4801f0a49e8SJason Evans arena_bind(tsd_t *tsd, unsigned ind, bool internal) 481d0e79aa3SJason Evans { 482df0d881dSJason Evans arena_t *arena; 483d0e79aa3SJason Evans 484*bde95144SJason Evans if (!tsd_nominal(tsd)) 485*bde95144SJason Evans return; 486*bde95144SJason Evans 4871f0a49e8SJason Evans arena = arena_get(tsd_tsdn(tsd), ind, false); 4881f0a49e8SJason Evans arena_nthreads_inc(arena, internal); 489df0d881dSJason Evans 4901f0a49e8SJason Evans if (internal) 4911f0a49e8SJason Evans tsd_iarena_set(tsd, arena); 4921f0a49e8SJason Evans else 493df0d881dSJason Evans tsd_arena_set(tsd, arena); 494d0e79aa3SJason Evans } 495d0e79aa3SJason Evans 496d0e79aa3SJason Evans void 497d0e79aa3SJason Evans arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) 498d0e79aa3SJason Evans { 499d0e79aa3SJason Evans arena_t *oldarena, *newarena; 500d0e79aa3SJason Evans 5011f0a49e8SJason Evans oldarena = arena_get(tsd_tsdn(tsd), oldind, false); 5021f0a49e8SJason Evans newarena = arena_get(tsd_tsdn(tsd), newind, false); 5031f0a49e8SJason Evans arena_nthreads_dec(oldarena, false); 5041f0a49e8SJason Evans arena_nthreads_inc(newarena, false); 505d0e79aa3SJason Evans tsd_arena_set(tsd, newarena); 506d0e79aa3SJason Evans } 507d0e79aa3SJason Evans 508d0e79aa3SJason Evans static void 5091f0a49e8SJason Evans arena_unbind(tsd_t *tsd, unsigned ind, bool internal) 510d0e79aa3SJason Evans { 511d0e79aa3SJason Evans arena_t *arena; 512d0e79aa3SJason Evans 5131f0a49e8SJason Evans arena = arena_get(tsd_tsdn(tsd), ind, false); 5141f0a49e8SJason Evans arena_nthreads_dec(arena, internal); 5151f0a49e8SJason Evans if (internal) 5161f0a49e8SJason Evans tsd_iarena_set(tsd, NULL); 5171f0a49e8SJason Evans else 518d0e79aa3SJason Evans tsd_arena_set(tsd, NULL); 519d0e79aa3SJason Evans } 520d0e79aa3SJason Evans 521df0d881dSJason Evans arena_tdata_t * 522df0d881dSJason Evans arena_tdata_get_hard(tsd_t *tsd, unsigned ind) 523d0e79aa3SJason Evans { 524df0d881dSJason Evans arena_tdata_t *tdata, *arenas_tdata_old; 525df0d881dSJason Evans arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); 526df0d881dSJason Evans unsigned narenas_tdata_old, i; 527df0d881dSJason Evans unsigned narenas_tdata = tsd_narenas_tdata_get(tsd); 528d0e79aa3SJason Evans unsigned narenas_actual = narenas_total_get(); 529d0e79aa3SJason Evans 530d0e79aa3SJason Evans /* 531df0d881dSJason Evans * Dissociate old tdata array (and set up for deallocation upon return) 532df0d881dSJason Evans * if it's too small. 533d0e79aa3SJason Evans */ 534df0d881dSJason Evans if (arenas_tdata != NULL && narenas_tdata < narenas_actual) { 535df0d881dSJason Evans arenas_tdata_old = arenas_tdata; 536df0d881dSJason Evans narenas_tdata_old = narenas_tdata; 537df0d881dSJason Evans arenas_tdata = NULL; 538df0d881dSJason Evans narenas_tdata = 0; 539df0d881dSJason Evans tsd_arenas_tdata_set(tsd, arenas_tdata); 540df0d881dSJason Evans tsd_narenas_tdata_set(tsd, narenas_tdata); 541df0d881dSJason Evans } else { 542df0d881dSJason Evans arenas_tdata_old = NULL; 543df0d881dSJason Evans narenas_tdata_old = 0; 544d0e79aa3SJason Evans } 545df0d881dSJason Evans 546df0d881dSJason Evans /* Allocate tdata array if it's missing. */ 547df0d881dSJason Evans if (arenas_tdata == NULL) { 548df0d881dSJason Evans bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd); 549df0d881dSJason Evans narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1; 550df0d881dSJason Evans 551df0d881dSJason Evans if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) { 552df0d881dSJason Evans *arenas_tdata_bypassp = true; 553df0d881dSJason Evans arenas_tdata = (arena_tdata_t *)a0malloc( 554df0d881dSJason Evans sizeof(arena_tdata_t) * narenas_tdata); 555df0d881dSJason Evans *arenas_tdata_bypassp = false; 556df0d881dSJason Evans } 557df0d881dSJason Evans if (arenas_tdata == NULL) { 558df0d881dSJason Evans tdata = NULL; 559df0d881dSJason Evans goto label_return; 560df0d881dSJason Evans } 561df0d881dSJason Evans assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp); 562df0d881dSJason Evans tsd_arenas_tdata_set(tsd, arenas_tdata); 563df0d881dSJason Evans tsd_narenas_tdata_set(tsd, narenas_tdata); 564d0e79aa3SJason Evans } 565d0e79aa3SJason Evans 566d0e79aa3SJason Evans /* 567df0d881dSJason Evans * Copy to tdata array. It's possible that the actual number of arenas 568df0d881dSJason Evans * has increased since narenas_total_get() was called above, but that 569df0d881dSJason Evans * causes no correctness issues unless two threads concurrently execute 570df0d881dSJason Evans * the arenas.extend mallctl, which we trust mallctl synchronization to 571d0e79aa3SJason Evans * prevent. 572d0e79aa3SJason Evans */ 573df0d881dSJason Evans 574df0d881dSJason Evans /* Copy/initialize tickers. */ 575df0d881dSJason Evans for (i = 0; i < narenas_actual; i++) { 576df0d881dSJason Evans if (i < narenas_tdata_old) { 577df0d881dSJason Evans ticker_copy(&arenas_tdata[i].decay_ticker, 578df0d881dSJason Evans &arenas_tdata_old[i].decay_ticker); 579df0d881dSJason Evans } else { 580df0d881dSJason Evans ticker_init(&arenas_tdata[i].decay_ticker, 581df0d881dSJason Evans DECAY_NTICKS_PER_UPDATE); 582df0d881dSJason Evans } 583df0d881dSJason Evans } 584df0d881dSJason Evans if (narenas_tdata > narenas_actual) { 585df0d881dSJason Evans memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t) 586df0d881dSJason Evans * (narenas_tdata - narenas_actual)); 587d0e79aa3SJason Evans } 588d0e79aa3SJason Evans 589df0d881dSJason Evans /* Read the refreshed tdata array. */ 590df0d881dSJason Evans tdata = &arenas_tdata[ind]; 591df0d881dSJason Evans label_return: 592df0d881dSJason Evans if (arenas_tdata_old != NULL) 593df0d881dSJason Evans a0dalloc(arenas_tdata_old); 594df0d881dSJason Evans return (tdata); 595d0e79aa3SJason Evans } 596d0e79aa3SJason Evans 597d0e79aa3SJason Evans /* Slow path, called only by arena_choose(). */ 598d0e79aa3SJason Evans arena_t * 5991f0a49e8SJason Evans arena_choose_hard(tsd_t *tsd, bool internal) 600a4bd5210SJason Evans { 6011f0a49e8SJason Evans arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL); 602a4bd5210SJason Evans 60382872ac0SJason Evans if (narenas_auto > 1) { 6041f0a49e8SJason Evans unsigned i, j, choose[2], first_null; 605a4bd5210SJason Evans 6061f0a49e8SJason Evans /* 6071f0a49e8SJason Evans * Determine binding for both non-internal and internal 6081f0a49e8SJason Evans * allocation. 6091f0a49e8SJason Evans * 6101f0a49e8SJason Evans * choose[0]: For application allocation. 6111f0a49e8SJason Evans * choose[1]: For internal metadata allocation. 6121f0a49e8SJason Evans */ 6131f0a49e8SJason Evans 6141f0a49e8SJason Evans for (j = 0; j < 2; j++) 6151f0a49e8SJason Evans choose[j] = 0; 6161f0a49e8SJason Evans 61782872ac0SJason Evans first_null = narenas_auto; 6181f0a49e8SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock); 6191f0a49e8SJason Evans assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL); 62082872ac0SJason Evans for (i = 1; i < narenas_auto; i++) { 6211f0a49e8SJason Evans if (arena_get(tsd_tsdn(tsd), i, false) != NULL) { 622a4bd5210SJason Evans /* 623a4bd5210SJason Evans * Choose the first arena that has the lowest 624a4bd5210SJason Evans * number of threads assigned to it. 625a4bd5210SJason Evans */ 6261f0a49e8SJason Evans for (j = 0; j < 2; j++) { 6271f0a49e8SJason Evans if (arena_nthreads_get(arena_get( 6281f0a49e8SJason Evans tsd_tsdn(tsd), i, false), !!j) < 6291f0a49e8SJason Evans arena_nthreads_get(arena_get( 6301f0a49e8SJason Evans tsd_tsdn(tsd), choose[j], false), 6311f0a49e8SJason Evans !!j)) 6321f0a49e8SJason Evans choose[j] = i; 6331f0a49e8SJason Evans } 63482872ac0SJason Evans } else if (first_null == narenas_auto) { 635a4bd5210SJason Evans /* 636a4bd5210SJason Evans * Record the index of the first uninitialized 637a4bd5210SJason Evans * arena, in case all extant arenas are in use. 638a4bd5210SJason Evans * 639a4bd5210SJason Evans * NB: It is possible for there to be 640a4bd5210SJason Evans * discontinuities in terms of initialized 641a4bd5210SJason Evans * versus uninitialized arenas, due to the 642a4bd5210SJason Evans * "thread.arena" mallctl. 643a4bd5210SJason Evans */ 644a4bd5210SJason Evans first_null = i; 645a4bd5210SJason Evans } 646a4bd5210SJason Evans } 647a4bd5210SJason Evans 6481f0a49e8SJason Evans for (j = 0; j < 2; j++) { 6491f0a49e8SJason Evans if (arena_nthreads_get(arena_get(tsd_tsdn(tsd), 6501f0a49e8SJason Evans choose[j], false), !!j) == 0 || first_null == 6511f0a49e8SJason Evans narenas_auto) { 652a4bd5210SJason Evans /* 6531f0a49e8SJason Evans * Use an unloaded arena, or the least loaded 6541f0a49e8SJason Evans * arena if all arenas are already initialized. 655a4bd5210SJason Evans */ 6561f0a49e8SJason Evans if (!!j == internal) { 6571f0a49e8SJason Evans ret = arena_get(tsd_tsdn(tsd), 6581f0a49e8SJason Evans choose[j], false); 6591f0a49e8SJason Evans } 660a4bd5210SJason Evans } else { 6611f0a49e8SJason Evans arena_t *arena; 6621f0a49e8SJason Evans 663a4bd5210SJason Evans /* Initialize a new arena. */ 6641f0a49e8SJason Evans choose[j] = first_null; 6651f0a49e8SJason Evans arena = arena_init_locked(tsd_tsdn(tsd), 6661f0a49e8SJason Evans choose[j]); 6671f0a49e8SJason Evans if (arena == NULL) { 6681f0a49e8SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), 6691f0a49e8SJason Evans &arenas_lock); 670d0e79aa3SJason Evans return (NULL); 671a4bd5210SJason Evans } 6721f0a49e8SJason Evans if (!!j == internal) 6731f0a49e8SJason Evans ret = arena; 674d0e79aa3SJason Evans } 6751f0a49e8SJason Evans arena_bind(tsd, choose[j], !!j); 6761f0a49e8SJason Evans } 6771f0a49e8SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock); 678a4bd5210SJason Evans } else { 6791f0a49e8SJason Evans ret = arena_get(tsd_tsdn(tsd), 0, false); 6801f0a49e8SJason Evans arena_bind(tsd, 0, false); 6811f0a49e8SJason Evans arena_bind(tsd, 0, true); 682a4bd5210SJason Evans } 683a4bd5210SJason Evans 684a4bd5210SJason Evans return (ret); 685a4bd5210SJason Evans } 686a4bd5210SJason Evans 687d0e79aa3SJason Evans void 688d0e79aa3SJason Evans thread_allocated_cleanup(tsd_t *tsd) 689d0e79aa3SJason Evans { 690d0e79aa3SJason Evans 691d0e79aa3SJason Evans /* Do nothing. */ 692d0e79aa3SJason Evans } 693d0e79aa3SJason Evans 694d0e79aa3SJason Evans void 695d0e79aa3SJason Evans thread_deallocated_cleanup(tsd_t *tsd) 696d0e79aa3SJason Evans { 697d0e79aa3SJason Evans 698d0e79aa3SJason Evans /* Do nothing. */ 699d0e79aa3SJason Evans } 700d0e79aa3SJason Evans 701d0e79aa3SJason Evans void 7021f0a49e8SJason Evans iarena_cleanup(tsd_t *tsd) 7031f0a49e8SJason Evans { 7041f0a49e8SJason Evans arena_t *iarena; 7051f0a49e8SJason Evans 7061f0a49e8SJason Evans iarena = tsd_iarena_get(tsd); 7071f0a49e8SJason Evans if (iarena != NULL) 7081f0a49e8SJason Evans arena_unbind(tsd, iarena->ind, true); 7091f0a49e8SJason Evans } 7101f0a49e8SJason Evans 7111f0a49e8SJason Evans void 712d0e79aa3SJason Evans arena_cleanup(tsd_t *tsd) 713d0e79aa3SJason Evans { 714d0e79aa3SJason Evans arena_t *arena; 715d0e79aa3SJason Evans 716d0e79aa3SJason Evans arena = tsd_arena_get(tsd); 717d0e79aa3SJason Evans if (arena != NULL) 7181f0a49e8SJason Evans arena_unbind(tsd, arena->ind, false); 719d0e79aa3SJason Evans } 720d0e79aa3SJason Evans 721d0e79aa3SJason Evans void 722df0d881dSJason Evans arenas_tdata_cleanup(tsd_t *tsd) 723d0e79aa3SJason Evans { 724df0d881dSJason Evans arena_tdata_t *arenas_tdata; 725d0e79aa3SJason Evans 726df0d881dSJason Evans /* Prevent tsd->arenas_tdata from being (re)created. */ 727df0d881dSJason Evans *tsd_arenas_tdata_bypassp_get(tsd) = true; 728df0d881dSJason Evans 729df0d881dSJason Evans arenas_tdata = tsd_arenas_tdata_get(tsd); 730df0d881dSJason Evans if (arenas_tdata != NULL) { 731df0d881dSJason Evans tsd_arenas_tdata_set(tsd, NULL); 732df0d881dSJason Evans a0dalloc(arenas_tdata); 733d0e79aa3SJason Evans } 734536b3538SJason Evans } 735d0e79aa3SJason Evans 736d0e79aa3SJason Evans void 737df0d881dSJason Evans narenas_tdata_cleanup(tsd_t *tsd) 738d0e79aa3SJason Evans { 739d0e79aa3SJason Evans 740d0e79aa3SJason Evans /* Do nothing. */ 741d0e79aa3SJason Evans } 742d0e79aa3SJason Evans 743d0e79aa3SJason Evans void 744df0d881dSJason Evans arenas_tdata_bypass_cleanup(tsd_t *tsd) 745d0e79aa3SJason Evans { 746d0e79aa3SJason Evans 747d0e79aa3SJason Evans /* Do nothing. */ 748d0e79aa3SJason Evans } 749d0e79aa3SJason Evans 750a4bd5210SJason Evans static void 751a4bd5210SJason Evans stats_print_atexit(void) 752a4bd5210SJason Evans { 753a4bd5210SJason Evans 754a4bd5210SJason Evans if (config_tcache && config_stats) { 7551f0a49e8SJason Evans tsdn_t *tsdn; 75682872ac0SJason Evans unsigned narenas, i; 757a4bd5210SJason Evans 7581f0a49e8SJason Evans tsdn = tsdn_fetch(); 7591f0a49e8SJason Evans 760a4bd5210SJason Evans /* 761a4bd5210SJason Evans * Merge stats from extant threads. This is racy, since 762a4bd5210SJason Evans * individual threads do not lock when recording tcache stats 763a4bd5210SJason Evans * events. As a consequence, the final stats may be slightly 764a4bd5210SJason Evans * out of date by the time they are reported, if other threads 765a4bd5210SJason Evans * continue to allocate. 766a4bd5210SJason Evans */ 76782872ac0SJason Evans for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 7681f0a49e8SJason Evans arena_t *arena = arena_get(tsdn, i, false); 769a4bd5210SJason Evans if (arena != NULL) { 770a4bd5210SJason Evans tcache_t *tcache; 771a4bd5210SJason Evans 772a4bd5210SJason Evans /* 773a4bd5210SJason Evans * tcache_stats_merge() locks bins, so if any 774a4bd5210SJason Evans * code is introduced that acquires both arena 775a4bd5210SJason Evans * and bin locks in the opposite order, 776a4bd5210SJason Evans * deadlocks may result. 777a4bd5210SJason Evans */ 7781f0a49e8SJason Evans malloc_mutex_lock(tsdn, &arena->lock); 779a4bd5210SJason Evans ql_foreach(tcache, &arena->tcache_ql, link) { 7801f0a49e8SJason Evans tcache_stats_merge(tsdn, tcache, arena); 781a4bd5210SJason Evans } 7821f0a49e8SJason Evans malloc_mutex_unlock(tsdn, &arena->lock); 783a4bd5210SJason Evans } 784a4bd5210SJason Evans } 785a4bd5210SJason Evans } 786a4bd5210SJason Evans je_malloc_stats_print(NULL, NULL, NULL); 787a4bd5210SJason Evans } 788a4bd5210SJason Evans 789a4bd5210SJason Evans /* 790a4bd5210SJason Evans * End miscellaneous support functions. 791a4bd5210SJason Evans */ 792a4bd5210SJason Evans /******************************************************************************/ 793a4bd5210SJason Evans /* 794a4bd5210SJason Evans * Begin initialization functions. 795a4bd5210SJason Evans */ 796a4bd5210SJason Evans 797d0e79aa3SJason Evans #ifndef JEMALLOC_HAVE_SECURE_GETENV 798d0e79aa3SJason Evans static char * 799d0e79aa3SJason Evans secure_getenv(const char *name) 800d0e79aa3SJason Evans { 801d0e79aa3SJason Evans 802d0e79aa3SJason Evans # ifdef JEMALLOC_HAVE_ISSETUGID 803d0e79aa3SJason Evans if (issetugid() != 0) 804d0e79aa3SJason Evans return (NULL); 805d0e79aa3SJason Evans # endif 806d0e79aa3SJason Evans return (getenv(name)); 807d0e79aa3SJason Evans } 808d0e79aa3SJason Evans #endif 809d0e79aa3SJason Evans 810a4bd5210SJason Evans static unsigned 811a4bd5210SJason Evans malloc_ncpus(void) 812a4bd5210SJason Evans { 813a4bd5210SJason Evans long result; 814a4bd5210SJason Evans 815e722f8f8SJason Evans #ifdef _WIN32 816e722f8f8SJason Evans SYSTEM_INFO si; 817e722f8f8SJason Evans GetSystemInfo(&si); 818e722f8f8SJason Evans result = si.dwNumberOfProcessors; 819*bde95144SJason Evans #elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT) 820*bde95144SJason Evans /* 821*bde95144SJason Evans * glibc >= 2.6 has the CPU_COUNT macro. 822*bde95144SJason Evans * 823*bde95144SJason Evans * glibc's sysconf() uses isspace(). glibc allocates for the first time 824*bde95144SJason Evans * *before* setting up the isspace tables. Therefore we need a 825*bde95144SJason Evans * different method to get the number of CPUs. 826*bde95144SJason Evans */ 827*bde95144SJason Evans { 828*bde95144SJason Evans cpu_set_t set; 829*bde95144SJason Evans 830*bde95144SJason Evans pthread_getaffinity_np(pthread_self(), sizeof(set), &set); 831*bde95144SJason Evans result = CPU_COUNT(&set); 832*bde95144SJason Evans } 833e722f8f8SJason Evans #else 834a4bd5210SJason Evans result = sysconf(_SC_NPROCESSORS_ONLN); 83582872ac0SJason Evans #endif 836f921d10fSJason Evans return ((result == -1) ? 1 : (unsigned)result); 837a4bd5210SJason Evans } 838a4bd5210SJason Evans 839a4bd5210SJason Evans static bool 840a4bd5210SJason Evans malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, 841a4bd5210SJason Evans char const **v_p, size_t *vlen_p) 842a4bd5210SJason Evans { 843a4bd5210SJason Evans bool accept; 844a4bd5210SJason Evans const char *opts = *opts_p; 845a4bd5210SJason Evans 846a4bd5210SJason Evans *k_p = opts; 847a4bd5210SJason Evans 848d0e79aa3SJason Evans for (accept = false; !accept;) { 849a4bd5210SJason Evans switch (*opts) { 850a4bd5210SJason Evans case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': 851a4bd5210SJason Evans case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': 852a4bd5210SJason Evans case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': 853a4bd5210SJason Evans case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': 854a4bd5210SJason Evans case 'Y': case 'Z': 855a4bd5210SJason Evans case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': 856a4bd5210SJason Evans case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': 857a4bd5210SJason Evans case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': 858a4bd5210SJason Evans case 's': case 't': case 'u': case 'v': case 'w': case 'x': 859a4bd5210SJason Evans case 'y': case 'z': 860a4bd5210SJason Evans case '0': case '1': case '2': case '3': case '4': case '5': 861a4bd5210SJason Evans case '6': case '7': case '8': case '9': 862a4bd5210SJason Evans case '_': 863a4bd5210SJason Evans opts++; 864a4bd5210SJason Evans break; 865a4bd5210SJason Evans case ':': 866a4bd5210SJason Evans opts++; 867a4bd5210SJason Evans *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; 868a4bd5210SJason Evans *v_p = opts; 869a4bd5210SJason Evans accept = true; 870a4bd5210SJason Evans break; 871a4bd5210SJason Evans case '\0': 872a4bd5210SJason Evans if (opts != *opts_p) { 873a4bd5210SJason Evans malloc_write("<jemalloc>: Conf string ends " 874a4bd5210SJason Evans "with key\n"); 875a4bd5210SJason Evans } 876a4bd5210SJason Evans return (true); 877a4bd5210SJason Evans default: 878a4bd5210SJason Evans malloc_write("<jemalloc>: Malformed conf string\n"); 879a4bd5210SJason Evans return (true); 880a4bd5210SJason Evans } 881a4bd5210SJason Evans } 882a4bd5210SJason Evans 883d0e79aa3SJason Evans for (accept = false; !accept;) { 884a4bd5210SJason Evans switch (*opts) { 885a4bd5210SJason Evans case ',': 886a4bd5210SJason Evans opts++; 887a4bd5210SJason Evans /* 888a4bd5210SJason Evans * Look ahead one character here, because the next time 889a4bd5210SJason Evans * this function is called, it will assume that end of 890a4bd5210SJason Evans * input has been cleanly reached if no input remains, 891a4bd5210SJason Evans * but we have optimistically already consumed the 892a4bd5210SJason Evans * comma if one exists. 893a4bd5210SJason Evans */ 894a4bd5210SJason Evans if (*opts == '\0') { 895a4bd5210SJason Evans malloc_write("<jemalloc>: Conf string ends " 896a4bd5210SJason Evans "with comma\n"); 897a4bd5210SJason Evans } 898a4bd5210SJason Evans *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; 899a4bd5210SJason Evans accept = true; 900a4bd5210SJason Evans break; 901a4bd5210SJason Evans case '\0': 902a4bd5210SJason Evans *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; 903a4bd5210SJason Evans accept = true; 904a4bd5210SJason Evans break; 905a4bd5210SJason Evans default: 906a4bd5210SJason Evans opts++; 907a4bd5210SJason Evans break; 908a4bd5210SJason Evans } 909a4bd5210SJason Evans } 910a4bd5210SJason Evans 911a4bd5210SJason Evans *opts_p = opts; 912a4bd5210SJason Evans return (false); 913a4bd5210SJason Evans } 914a4bd5210SJason Evans 915a4bd5210SJason Evans static void 916a4bd5210SJason Evans malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, 917a4bd5210SJason Evans size_t vlen) 918a4bd5210SJason Evans { 919a4bd5210SJason Evans 920a4bd5210SJason Evans malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k, 921a4bd5210SJason Evans (int)vlen, v); 922a4bd5210SJason Evans } 923a4bd5210SJason Evans 924a4bd5210SJason Evans static void 925df0d881dSJason Evans malloc_slow_flag_init(void) 926df0d881dSJason Evans { 927df0d881dSJason Evans /* 928df0d881dSJason Evans * Combine the runtime options into malloc_slow for fast path. Called 929df0d881dSJason Evans * after processing all the options. 930df0d881dSJason Evans */ 931df0d881dSJason Evans malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0) 932df0d881dSJason Evans | (opt_junk_free ? flag_opt_junk_free : 0) 933df0d881dSJason Evans | (opt_quarantine ? flag_opt_quarantine : 0) 934df0d881dSJason Evans | (opt_zero ? flag_opt_zero : 0) 935df0d881dSJason Evans | (opt_utrace ? flag_opt_utrace : 0) 936df0d881dSJason Evans | (opt_xmalloc ? flag_opt_xmalloc : 0); 937df0d881dSJason Evans 938df0d881dSJason Evans if (config_valgrind) 939df0d881dSJason Evans malloc_slow_flags |= (in_valgrind ? flag_in_valgrind : 0); 940df0d881dSJason Evans 941df0d881dSJason Evans malloc_slow = (malloc_slow_flags != 0); 942df0d881dSJason Evans } 943df0d881dSJason Evans 944df0d881dSJason Evans static void 945a4bd5210SJason Evans malloc_conf_init(void) 946a4bd5210SJason Evans { 947a4bd5210SJason Evans unsigned i; 948a4bd5210SJason Evans char buf[PATH_MAX + 1]; 949a4bd5210SJason Evans const char *opts, *k, *v; 950a4bd5210SJason Evans size_t klen, vlen; 951a4bd5210SJason Evans 95282872ac0SJason Evans /* 95382872ac0SJason Evans * Automatically configure valgrind before processing options. The 95482872ac0SJason Evans * valgrind option remains in jemalloc 3.x for compatibility reasons. 95582872ac0SJason Evans */ 95682872ac0SJason Evans if (config_valgrind) { 957d0e79aa3SJason Evans in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false; 958d0e79aa3SJason Evans if (config_fill && unlikely(in_valgrind)) { 959d0e79aa3SJason Evans opt_junk = "false"; 960d0e79aa3SJason Evans opt_junk_alloc = false; 961d0e79aa3SJason Evans opt_junk_free = false; 962d0e79aa3SJason Evans assert(!opt_zero); 96382872ac0SJason Evans opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT; 96482872ac0SJason Evans opt_redzone = true; 96582872ac0SJason Evans } 966d0e79aa3SJason Evans if (config_tcache && unlikely(in_valgrind)) 96782872ac0SJason Evans opt_tcache = false; 96882872ac0SJason Evans } 96982872ac0SJason Evans 970df0d881dSJason Evans for (i = 0; i < 4; i++) { 971a4bd5210SJason Evans /* Get runtime configuration. */ 972a4bd5210SJason Evans switch (i) { 973a4bd5210SJason Evans case 0: 974df0d881dSJason Evans opts = config_malloc_conf; 975df0d881dSJason Evans break; 976df0d881dSJason Evans case 1: 977a4bd5210SJason Evans if (je_malloc_conf != NULL) { 978a4bd5210SJason Evans /* 979a4bd5210SJason Evans * Use options that were compiled into the 980a4bd5210SJason Evans * program. 981a4bd5210SJason Evans */ 982a4bd5210SJason Evans opts = je_malloc_conf; 983a4bd5210SJason Evans } else { 984a4bd5210SJason Evans /* No configuration specified. */ 985a4bd5210SJason Evans buf[0] = '\0'; 986a4bd5210SJason Evans opts = buf; 987a4bd5210SJason Evans } 988a4bd5210SJason Evans break; 989df0d881dSJason Evans case 2: { 990df0d881dSJason Evans ssize_t linklen = 0; 991e722f8f8SJason Evans #ifndef _WIN32 9922b06b201SJason Evans int saved_errno = errno; 993a4bd5210SJason Evans const char *linkname = 994a4bd5210SJason Evans # ifdef JEMALLOC_PREFIX 995a4bd5210SJason Evans "/etc/"JEMALLOC_PREFIX"malloc.conf" 996a4bd5210SJason Evans # else 997a4bd5210SJason Evans "/etc/malloc.conf" 998a4bd5210SJason Evans # endif 999a4bd5210SJason Evans ; 1000a4bd5210SJason Evans 1001a4bd5210SJason Evans /* 10022b06b201SJason Evans * Try to use the contents of the "/etc/malloc.conf" 1003a4bd5210SJason Evans * symbolic link's name. 1004a4bd5210SJason Evans */ 10052b06b201SJason Evans linklen = readlink(linkname, buf, sizeof(buf) - 1); 10062b06b201SJason Evans if (linklen == -1) { 10072b06b201SJason Evans /* No configuration specified. */ 10082b06b201SJason Evans linklen = 0; 1009d0e79aa3SJason Evans /* Restore errno. */ 10102b06b201SJason Evans set_errno(saved_errno); 10112b06b201SJason Evans } 10122b06b201SJason Evans #endif 1013a4bd5210SJason Evans buf[linklen] = '\0'; 1014a4bd5210SJason Evans opts = buf; 1015a4bd5210SJason Evans break; 1016df0d881dSJason Evans } case 3: { 1017a4bd5210SJason Evans const char *envname = 1018a4bd5210SJason Evans #ifdef JEMALLOC_PREFIX 1019a4bd5210SJason Evans JEMALLOC_CPREFIX"MALLOC_CONF" 1020a4bd5210SJason Evans #else 1021a4bd5210SJason Evans "MALLOC_CONF" 1022a4bd5210SJason Evans #endif 1023a4bd5210SJason Evans ; 1024a4bd5210SJason Evans 1025d0e79aa3SJason Evans if ((opts = secure_getenv(envname)) != NULL) { 1026a4bd5210SJason Evans /* 1027a4bd5210SJason Evans * Do nothing; opts is already initialized to 1028a4bd5210SJason Evans * the value of the MALLOC_CONF environment 1029a4bd5210SJason Evans * variable. 1030a4bd5210SJason Evans */ 1031a4bd5210SJason Evans } else { 1032a4bd5210SJason Evans /* No configuration specified. */ 1033a4bd5210SJason Evans buf[0] = '\0'; 1034a4bd5210SJason Evans opts = buf; 1035a4bd5210SJason Evans } 1036a4bd5210SJason Evans break; 1037a4bd5210SJason Evans } default: 1038f921d10fSJason Evans not_reached(); 1039a4bd5210SJason Evans buf[0] = '\0'; 1040a4bd5210SJason Evans opts = buf; 1041a4bd5210SJason Evans } 1042a4bd5210SJason Evans 1043d0e79aa3SJason Evans while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v, 1044d0e79aa3SJason Evans &vlen)) { 1045d0e79aa3SJason Evans #define CONF_MATCH(n) \ 1046d0e79aa3SJason Evans (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0) 1047d0e79aa3SJason Evans #define CONF_MATCH_VALUE(n) \ 1048d0e79aa3SJason Evans (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0) 1049d0e79aa3SJason Evans #define CONF_HANDLE_BOOL(o, n, cont) \ 1050d0e79aa3SJason Evans if (CONF_MATCH(n)) { \ 1051d0e79aa3SJason Evans if (CONF_MATCH_VALUE("true")) \ 1052a4bd5210SJason Evans o = true; \ 1053d0e79aa3SJason Evans else if (CONF_MATCH_VALUE("false")) \ 1054a4bd5210SJason Evans o = false; \ 1055a4bd5210SJason Evans else { \ 1056a4bd5210SJason Evans malloc_conf_error( \ 1057a4bd5210SJason Evans "Invalid conf value", \ 1058a4bd5210SJason Evans k, klen, v, vlen); \ 1059a4bd5210SJason Evans } \ 1060d0e79aa3SJason Evans if (cont) \ 1061a4bd5210SJason Evans continue; \ 1062a4bd5210SJason Evans } 1063df0d881dSJason Evans #define CONF_HANDLE_T_U(t, o, n, min, max, clip) \ 1064d0e79aa3SJason Evans if (CONF_MATCH(n)) { \ 1065a4bd5210SJason Evans uintmax_t um; \ 1066a4bd5210SJason Evans char *end; \ 1067a4bd5210SJason Evans \ 1068e722f8f8SJason Evans set_errno(0); \ 1069a4bd5210SJason Evans um = malloc_strtoumax(v, &end, 0); \ 1070e722f8f8SJason Evans if (get_errno() != 0 || (uintptr_t)end -\ 1071a4bd5210SJason Evans (uintptr_t)v != vlen) { \ 1072a4bd5210SJason Evans malloc_conf_error( \ 1073a4bd5210SJason Evans "Invalid conf value", \ 1074a4bd5210SJason Evans k, klen, v, vlen); \ 107588ad2f8dSJason Evans } else if (clip) { \ 1076d0e79aa3SJason Evans if ((min) != 0 && um < (min)) \ 1077df0d881dSJason Evans o = (t)(min); \ 1078d0e79aa3SJason Evans else if (um > (max)) \ 1079df0d881dSJason Evans o = (t)(max); \ 108088ad2f8dSJason Evans else \ 1081df0d881dSJason Evans o = (t)um; \ 108288ad2f8dSJason Evans } else { \ 1083d0e79aa3SJason Evans if (((min) != 0 && um < (min)) \ 1084d0e79aa3SJason Evans || um > (max)) { \ 1085a4bd5210SJason Evans malloc_conf_error( \ 108688ad2f8dSJason Evans "Out-of-range " \ 108788ad2f8dSJason Evans "conf value", \ 1088a4bd5210SJason Evans k, klen, v, vlen); \ 1089a4bd5210SJason Evans } else \ 1090df0d881dSJason Evans o = (t)um; \ 109188ad2f8dSJason Evans } \ 1092a4bd5210SJason Evans continue; \ 1093a4bd5210SJason Evans } 1094df0d881dSJason Evans #define CONF_HANDLE_UNSIGNED(o, n, min, max, clip) \ 1095df0d881dSJason Evans CONF_HANDLE_T_U(unsigned, o, n, min, max, clip) 1096df0d881dSJason Evans #define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \ 1097df0d881dSJason Evans CONF_HANDLE_T_U(size_t, o, n, min, max, clip) 1098a4bd5210SJason Evans #define CONF_HANDLE_SSIZE_T(o, n, min, max) \ 1099d0e79aa3SJason Evans if (CONF_MATCH(n)) { \ 1100a4bd5210SJason Evans long l; \ 1101a4bd5210SJason Evans char *end; \ 1102a4bd5210SJason Evans \ 1103e722f8f8SJason Evans set_errno(0); \ 1104a4bd5210SJason Evans l = strtol(v, &end, 0); \ 1105e722f8f8SJason Evans if (get_errno() != 0 || (uintptr_t)end -\ 1106a4bd5210SJason Evans (uintptr_t)v != vlen) { \ 1107a4bd5210SJason Evans malloc_conf_error( \ 1108a4bd5210SJason Evans "Invalid conf value", \ 1109a4bd5210SJason Evans k, klen, v, vlen); \ 1110d0e79aa3SJason Evans } else if (l < (ssize_t)(min) || l > \ 1111d0e79aa3SJason Evans (ssize_t)(max)) { \ 1112a4bd5210SJason Evans malloc_conf_error( \ 1113a4bd5210SJason Evans "Out-of-range conf value", \ 1114a4bd5210SJason Evans k, klen, v, vlen); \ 1115a4bd5210SJason Evans } else \ 1116a4bd5210SJason Evans o = l; \ 1117a4bd5210SJason Evans continue; \ 1118a4bd5210SJason Evans } 1119a4bd5210SJason Evans #define CONF_HANDLE_CHAR_P(o, n, d) \ 1120d0e79aa3SJason Evans if (CONF_MATCH(n)) { \ 1121a4bd5210SJason Evans size_t cpylen = (vlen <= \ 1122a4bd5210SJason Evans sizeof(o)-1) ? vlen : \ 1123a4bd5210SJason Evans sizeof(o)-1; \ 1124a4bd5210SJason Evans strncpy(o, v, cpylen); \ 1125a4bd5210SJason Evans o[cpylen] = '\0'; \ 1126a4bd5210SJason Evans continue; \ 1127a4bd5210SJason Evans } 1128a4bd5210SJason Evans 1129d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_abort, "abort", true) 1130a4bd5210SJason Evans /* 1131d0e79aa3SJason Evans * Chunks always require at least one header page, 1132d0e79aa3SJason Evans * as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and 1133d0e79aa3SJason Evans * possibly an additional page in the presence of 1134d0e79aa3SJason Evans * redzones. In order to simplify options processing, 1135d0e79aa3SJason Evans * use a conservative bound that accommodates all these 1136d0e79aa3SJason Evans * constraints. 1137a4bd5210SJason Evans */ 11388ed34ab0SJason Evans CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE + 1139d0e79aa3SJason Evans LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1), 1140d0e79aa3SJason Evans (sizeof(size_t) << 3) - 1, true) 114182872ac0SJason Evans if (strncmp("dss", k, klen) == 0) { 114282872ac0SJason Evans int i; 114382872ac0SJason Evans bool match = false; 114482872ac0SJason Evans for (i = 0; i < dss_prec_limit; i++) { 114582872ac0SJason Evans if (strncmp(dss_prec_names[i], v, vlen) 114682872ac0SJason Evans == 0) { 1147*bde95144SJason Evans if (chunk_dss_prec_set(i)) { 114882872ac0SJason Evans malloc_conf_error( 114982872ac0SJason Evans "Error setting dss", 115082872ac0SJason Evans k, klen, v, vlen); 115182872ac0SJason Evans } else { 115282872ac0SJason Evans opt_dss = 115382872ac0SJason Evans dss_prec_names[i]; 115482872ac0SJason Evans match = true; 115582872ac0SJason Evans break; 115682872ac0SJason Evans } 115782872ac0SJason Evans } 115882872ac0SJason Evans } 1159d0e79aa3SJason Evans if (!match) { 116082872ac0SJason Evans malloc_conf_error("Invalid conf value", 116182872ac0SJason Evans k, klen, v, vlen); 116282872ac0SJason Evans } 116382872ac0SJason Evans continue; 116482872ac0SJason Evans } 1165df0d881dSJason Evans CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1, 1166df0d881dSJason Evans UINT_MAX, false) 1167df0d881dSJason Evans if (strncmp("purge", k, klen) == 0) { 1168df0d881dSJason Evans int i; 1169df0d881dSJason Evans bool match = false; 1170df0d881dSJason Evans for (i = 0; i < purge_mode_limit; i++) { 1171df0d881dSJason Evans if (strncmp(purge_mode_names[i], v, 1172df0d881dSJason Evans vlen) == 0) { 1173df0d881dSJason Evans opt_purge = (purge_mode_t)i; 1174df0d881dSJason Evans match = true; 1175df0d881dSJason Evans break; 1176df0d881dSJason Evans } 1177df0d881dSJason Evans } 1178df0d881dSJason Evans if (!match) { 1179df0d881dSJason Evans malloc_conf_error("Invalid conf value", 1180df0d881dSJason Evans k, klen, v, vlen); 1181df0d881dSJason Evans } 1182df0d881dSJason Evans continue; 1183df0d881dSJason Evans } 11848ed34ab0SJason Evans CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult", 1185a4bd5210SJason Evans -1, (sizeof(size_t) << 3) - 1) 1186df0d881dSJason Evans CONF_HANDLE_SSIZE_T(opt_decay_time, "decay_time", -1, 1187df0d881dSJason Evans NSTIME_SEC_MAX); 1188d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true) 1189a4bd5210SJason Evans if (config_fill) { 1190d0e79aa3SJason Evans if (CONF_MATCH("junk")) { 1191d0e79aa3SJason Evans if (CONF_MATCH_VALUE("true")) { 1192*bde95144SJason Evans if (config_valgrind && 1193*bde95144SJason Evans unlikely(in_valgrind)) { 1194*bde95144SJason Evans malloc_conf_error( 1195*bde95144SJason Evans "Deallocation-time " 1196*bde95144SJason Evans "junk filling cannot " 1197*bde95144SJason Evans "be enabled while " 1198*bde95144SJason Evans "running inside " 1199*bde95144SJason Evans "Valgrind", k, klen, v, 1200*bde95144SJason Evans vlen); 1201*bde95144SJason Evans } else { 1202d0e79aa3SJason Evans opt_junk = "true"; 1203*bde95144SJason Evans opt_junk_alloc = true; 1204*bde95144SJason Evans opt_junk_free = true; 1205*bde95144SJason Evans } 1206d0e79aa3SJason Evans } else if (CONF_MATCH_VALUE("false")) { 1207d0e79aa3SJason Evans opt_junk = "false"; 1208d0e79aa3SJason Evans opt_junk_alloc = opt_junk_free = 1209d0e79aa3SJason Evans false; 1210d0e79aa3SJason Evans } else if (CONF_MATCH_VALUE("alloc")) { 1211d0e79aa3SJason Evans opt_junk = "alloc"; 1212d0e79aa3SJason Evans opt_junk_alloc = true; 1213d0e79aa3SJason Evans opt_junk_free = false; 1214d0e79aa3SJason Evans } else if (CONF_MATCH_VALUE("free")) { 1215*bde95144SJason Evans if (config_valgrind && 1216*bde95144SJason Evans unlikely(in_valgrind)) { 1217*bde95144SJason Evans malloc_conf_error( 1218*bde95144SJason Evans "Deallocation-time " 1219*bde95144SJason Evans "junk filling cannot " 1220*bde95144SJason Evans "be enabled while " 1221*bde95144SJason Evans "running inside " 1222*bde95144SJason Evans "Valgrind", k, klen, v, 1223*bde95144SJason Evans vlen); 1224*bde95144SJason Evans } else { 1225d0e79aa3SJason Evans opt_junk = "free"; 1226d0e79aa3SJason Evans opt_junk_alloc = false; 1227d0e79aa3SJason Evans opt_junk_free = true; 1228*bde95144SJason Evans } 1229d0e79aa3SJason Evans } else { 1230d0e79aa3SJason Evans malloc_conf_error( 1231d0e79aa3SJason Evans "Invalid conf value", k, 1232d0e79aa3SJason Evans klen, v, vlen); 1233d0e79aa3SJason Evans } 1234d0e79aa3SJason Evans continue; 1235d0e79aa3SJason Evans } 12368ed34ab0SJason Evans CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine", 123788ad2f8dSJason Evans 0, SIZE_T_MAX, false) 1238d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_redzone, "redzone", true) 1239d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_zero, "zero", true) 1240a4bd5210SJason Evans } 1241a4bd5210SJason Evans if (config_utrace) { 1242d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_utrace, "utrace", true) 1243a4bd5210SJason Evans } 1244a4bd5210SJason Evans if (config_xmalloc) { 1245d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true) 1246a4bd5210SJason Evans } 1247a4bd5210SJason Evans if (config_tcache) { 1248d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_tcache, "tcache", 1249d0e79aa3SJason Evans !config_valgrind || !in_valgrind) 1250d0e79aa3SJason Evans if (CONF_MATCH("tcache")) { 1251d0e79aa3SJason Evans assert(config_valgrind && in_valgrind); 1252d0e79aa3SJason Evans if (opt_tcache) { 1253d0e79aa3SJason Evans opt_tcache = false; 1254d0e79aa3SJason Evans malloc_conf_error( 1255d0e79aa3SJason Evans "tcache cannot be enabled " 1256d0e79aa3SJason Evans "while running inside Valgrind", 1257d0e79aa3SJason Evans k, klen, v, vlen); 1258d0e79aa3SJason Evans } 1259d0e79aa3SJason Evans continue; 1260d0e79aa3SJason Evans } 1261a4bd5210SJason Evans CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, 12628ed34ab0SJason Evans "lg_tcache_max", -1, 1263a4bd5210SJason Evans (sizeof(size_t) << 3) - 1) 1264a4bd5210SJason Evans } 1265a4bd5210SJason Evans if (config_prof) { 1266d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_prof, "prof", true) 12678ed34ab0SJason Evans CONF_HANDLE_CHAR_P(opt_prof_prefix, 12688ed34ab0SJason Evans "prof_prefix", "jeprof") 1269d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_prof_active, "prof_active", 1270d0e79aa3SJason Evans true) 1271d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_prof_thread_active_init, 1272d0e79aa3SJason Evans "prof_thread_active_init", true) 1273d0e79aa3SJason Evans CONF_HANDLE_SIZE_T(opt_lg_prof_sample, 12748ed34ab0SJason Evans "lg_prof_sample", 0, 1275d0e79aa3SJason Evans (sizeof(uint64_t) << 3) - 1, true) 1276d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum", 1277d0e79aa3SJason Evans true) 1278a4bd5210SJason Evans CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, 12798ed34ab0SJason Evans "lg_prof_interval", -1, 1280a4bd5210SJason Evans (sizeof(uint64_t) << 3) - 1) 1281d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump", 1282d0e79aa3SJason Evans true) 1283d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_prof_final, "prof_final", 1284d0e79aa3SJason Evans true) 1285d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak", 1286d0e79aa3SJason Evans true) 1287a4bd5210SJason Evans } 1288a4bd5210SJason Evans malloc_conf_error("Invalid conf pair", k, klen, v, 1289a4bd5210SJason Evans vlen); 1290d0e79aa3SJason Evans #undef CONF_MATCH 1291a4bd5210SJason Evans #undef CONF_HANDLE_BOOL 1292a4bd5210SJason Evans #undef CONF_HANDLE_SIZE_T 1293a4bd5210SJason Evans #undef CONF_HANDLE_SSIZE_T 1294a4bd5210SJason Evans #undef CONF_HANDLE_CHAR_P 1295a4bd5210SJason Evans } 1296a4bd5210SJason Evans } 1297a4bd5210SJason Evans } 1298a4bd5210SJason Evans 1299a4bd5210SJason Evans static bool 1300d0e79aa3SJason Evans malloc_init_hard_needed(void) 1301a4bd5210SJason Evans { 1302a4bd5210SJason Evans 1303d0e79aa3SJason Evans if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state == 1304d0e79aa3SJason Evans malloc_init_recursible)) { 1305a4bd5210SJason Evans /* 1306a4bd5210SJason Evans * Another thread initialized the allocator before this one 1307a4bd5210SJason Evans * acquired init_lock, or this thread is the initializing 1308a4bd5210SJason Evans * thread, and it is recursively allocating. 1309a4bd5210SJason Evans */ 1310a4bd5210SJason Evans return (false); 1311a4bd5210SJason Evans } 1312a4bd5210SJason Evans #ifdef JEMALLOC_THREADED_INIT 1313d0e79aa3SJason Evans if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) { 1314*bde95144SJason Evans spin_t spinner; 1315*bde95144SJason Evans 1316a4bd5210SJason Evans /* Busy-wait until the initializing thread completes. */ 1317*bde95144SJason Evans spin_init(&spinner); 1318a4bd5210SJason Evans do { 1319*bde95144SJason Evans malloc_mutex_unlock(TSDN_NULL, &init_lock); 1320*bde95144SJason Evans spin_adaptive(&spinner); 1321*bde95144SJason Evans malloc_mutex_lock(TSDN_NULL, &init_lock); 1322d0e79aa3SJason Evans } while (!malloc_initialized()); 1323a4bd5210SJason Evans return (false); 1324a4bd5210SJason Evans } 1325a4bd5210SJason Evans #endif 1326d0e79aa3SJason Evans return (true); 1327d0e79aa3SJason Evans } 1328d0e79aa3SJason Evans 1329d0e79aa3SJason Evans static bool 13301f0a49e8SJason Evans malloc_init_hard_a0_locked() 1331d0e79aa3SJason Evans { 1332d0e79aa3SJason Evans 1333a4bd5210SJason Evans malloc_initializer = INITIALIZER; 1334a4bd5210SJason Evans 1335a4bd5210SJason Evans if (config_prof) 1336a4bd5210SJason Evans prof_boot0(); 1337a4bd5210SJason Evans malloc_conf_init(); 1338a4bd5210SJason Evans if (opt_stats_print) { 1339a4bd5210SJason Evans /* Print statistics at exit. */ 1340a4bd5210SJason Evans if (atexit(stats_print_atexit) != 0) { 1341a4bd5210SJason Evans malloc_write("<jemalloc>: Error in atexit()\n"); 1342a4bd5210SJason Evans if (opt_abort) 1343a4bd5210SJason Evans abort(); 1344a4bd5210SJason Evans } 1345a4bd5210SJason Evans } 13461f0a49e8SJason Evans pages_boot(); 1347d0e79aa3SJason Evans if (base_boot()) 1348a4bd5210SJason Evans return (true); 1349d0e79aa3SJason Evans if (chunk_boot()) 1350a4bd5210SJason Evans return (true); 1351d0e79aa3SJason Evans if (ctl_boot()) 1352a4bd5210SJason Evans return (true); 1353a4bd5210SJason Evans if (config_prof) 1354a4bd5210SJason Evans prof_boot1(); 1355*bde95144SJason Evans arena_boot(); 13561f0a49e8SJason Evans if (config_tcache && tcache_boot(TSDN_NULL)) 1357a4bd5210SJason Evans return (true); 13581f0a49e8SJason Evans if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS)) 1359a4bd5210SJason Evans return (true); 1360a4bd5210SJason Evans /* 1361a4bd5210SJason Evans * Create enough scaffolding to allow recursive allocation in 1362a4bd5210SJason Evans * malloc_ncpus(). 1363a4bd5210SJason Evans */ 1364df0d881dSJason Evans narenas_auto = 1; 1365df0d881dSJason Evans narenas_total_set(narenas_auto); 1366d0e79aa3SJason Evans arenas = &a0; 136782872ac0SJason Evans memset(arenas, 0, sizeof(arena_t *) * narenas_auto); 1368a4bd5210SJason Evans /* 1369a4bd5210SJason Evans * Initialize one arena here. The rest are lazily created in 1370d0e79aa3SJason Evans * arena_choose_hard(). 1371a4bd5210SJason Evans */ 13721f0a49e8SJason Evans if (arena_init(TSDN_NULL, 0) == NULL) 1373a4bd5210SJason Evans return (true); 13741f0a49e8SJason Evans 1375d0e79aa3SJason Evans malloc_init_state = malloc_init_a0_initialized; 13761f0a49e8SJason Evans 1377d0e79aa3SJason Evans return (false); 1378a4bd5210SJason Evans } 1379a4bd5210SJason Evans 1380d0e79aa3SJason Evans static bool 1381d0e79aa3SJason Evans malloc_init_hard_a0(void) 1382d0e79aa3SJason Evans { 1383d0e79aa3SJason Evans bool ret; 1384d0e79aa3SJason Evans 13851f0a49e8SJason Evans malloc_mutex_lock(TSDN_NULL, &init_lock); 1386d0e79aa3SJason Evans ret = malloc_init_hard_a0_locked(); 13871f0a49e8SJason Evans malloc_mutex_unlock(TSDN_NULL, &init_lock); 1388d0e79aa3SJason Evans return (ret); 1389a4bd5210SJason Evans } 1390a4bd5210SJason Evans 13911f0a49e8SJason Evans /* Initialize data structures which may trigger recursive allocation. */ 1392df0d881dSJason Evans static bool 1393d0e79aa3SJason Evans malloc_init_hard_recursible(void) 1394d0e79aa3SJason Evans { 1395a4bd5210SJason Evans 1396d0e79aa3SJason Evans malloc_init_state = malloc_init_recursible; 1397df0d881dSJason Evans 1398a4bd5210SJason Evans ncpus = malloc_ncpus(); 1399f921d10fSJason Evans 1400f921d10fSJason Evans #if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \ 1401d0e79aa3SJason Evans && !defined(_WIN32) && !defined(__native_client__)) 1402df0d881dSJason Evans /* LinuxThreads' pthread_atfork() allocates. */ 1403f921d10fSJason Evans if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, 1404f921d10fSJason Evans jemalloc_postfork_child) != 0) { 1405f921d10fSJason Evans malloc_write("<jemalloc>: Error in pthread_atfork()\n"); 1406f921d10fSJason Evans if (opt_abort) 1407f921d10fSJason Evans abort(); 14081f0a49e8SJason Evans return (true); 1409f921d10fSJason Evans } 1410f921d10fSJason Evans #endif 1411df0d881dSJason Evans 14121f0a49e8SJason Evans return (false); 1413a4bd5210SJason Evans } 1414a4bd5210SJason Evans 1415d0e79aa3SJason Evans static bool 14161f0a49e8SJason Evans malloc_init_hard_finish(tsdn_t *tsdn) 1417d0e79aa3SJason Evans { 1418d0e79aa3SJason Evans 14191f0a49e8SJason Evans if (malloc_mutex_boot()) 1420d0e79aa3SJason Evans return (true); 1421d0e79aa3SJason Evans 1422a4bd5210SJason Evans if (opt_narenas == 0) { 1423a4bd5210SJason Evans /* 1424a4bd5210SJason Evans * For SMP systems, create more than one arena per CPU by 1425a4bd5210SJason Evans * default. 1426a4bd5210SJason Evans */ 1427a4bd5210SJason Evans if (ncpus > 1) 1428a4bd5210SJason Evans opt_narenas = ncpus << 2; 1429a4bd5210SJason Evans else 1430a4bd5210SJason Evans opt_narenas = 1; 1431a4bd5210SJason Evans } 143282872ac0SJason Evans narenas_auto = opt_narenas; 1433a4bd5210SJason Evans /* 1434df0d881dSJason Evans * Limit the number of arenas to the indexing range of MALLOCX_ARENA(). 1435a4bd5210SJason Evans */ 1436df0d881dSJason Evans if (narenas_auto > MALLOCX_ARENA_MAX) { 1437df0d881dSJason Evans narenas_auto = MALLOCX_ARENA_MAX; 1438a4bd5210SJason Evans malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n", 143982872ac0SJason Evans narenas_auto); 1440a4bd5210SJason Evans } 1441df0d881dSJason Evans narenas_total_set(narenas_auto); 1442a4bd5210SJason Evans 1443a4bd5210SJason Evans /* Allocate and initialize arenas. */ 14441f0a49e8SJason Evans arenas = (arena_t **)base_alloc(tsdn, sizeof(arena_t *) * 1445df0d881dSJason Evans (MALLOCX_ARENA_MAX+1)); 1446d0e79aa3SJason Evans if (arenas == NULL) 1447a4bd5210SJason Evans return (true); 1448a4bd5210SJason Evans /* Copy the pointer to the one arena that was already initialized. */ 1449df0d881dSJason Evans arena_set(0, a0); 1450a4bd5210SJason Evans 1451d0e79aa3SJason Evans malloc_init_state = malloc_init_initialized; 1452df0d881dSJason Evans malloc_slow_flag_init(); 1453df0d881dSJason Evans 1454d0e79aa3SJason Evans return (false); 1455d0e79aa3SJason Evans } 1456d0e79aa3SJason Evans 1457d0e79aa3SJason Evans static bool 1458d0e79aa3SJason Evans malloc_init_hard(void) 1459d0e79aa3SJason Evans { 14601f0a49e8SJason Evans tsd_t *tsd; 1461d0e79aa3SJason Evans 1462536b3538SJason Evans #if defined(_WIN32) && _WIN32_WINNT < 0x0600 1463536b3538SJason Evans _init_init_lock(); 1464536b3538SJason Evans #endif 14651f0a49e8SJason Evans malloc_mutex_lock(TSDN_NULL, &init_lock); 1466d0e79aa3SJason Evans if (!malloc_init_hard_needed()) { 14671f0a49e8SJason Evans malloc_mutex_unlock(TSDN_NULL, &init_lock); 1468d0e79aa3SJason Evans return (false); 1469d0e79aa3SJason Evans } 1470f921d10fSJason Evans 1471d0e79aa3SJason Evans if (malloc_init_state != malloc_init_a0_initialized && 1472d0e79aa3SJason Evans malloc_init_hard_a0_locked()) { 14731f0a49e8SJason Evans malloc_mutex_unlock(TSDN_NULL, &init_lock); 1474d0e79aa3SJason Evans return (true); 1475d0e79aa3SJason Evans } 1476df0d881dSJason Evans 14771f0a49e8SJason Evans malloc_mutex_unlock(TSDN_NULL, &init_lock); 14781f0a49e8SJason Evans /* Recursive allocation relies on functional tsd. */ 14791f0a49e8SJason Evans tsd = malloc_tsd_boot0(); 14801f0a49e8SJason Evans if (tsd == NULL) 14811f0a49e8SJason Evans return (true); 14821f0a49e8SJason Evans if (malloc_init_hard_recursible()) 14831f0a49e8SJason Evans return (true); 14841f0a49e8SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &init_lock); 14851f0a49e8SJason Evans 1486*bde95144SJason Evans if (config_prof && prof_boot2(tsd)) { 14871f0a49e8SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); 1488d0e79aa3SJason Evans return (true); 1489d0e79aa3SJason Evans } 1490d0e79aa3SJason Evans 14911f0a49e8SJason Evans if (malloc_init_hard_finish(tsd_tsdn(tsd))) { 14921f0a49e8SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); 1493df0d881dSJason Evans return (true); 1494df0d881dSJason Evans } 1495d0e79aa3SJason Evans 14961f0a49e8SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); 1497d0e79aa3SJason Evans malloc_tsd_boot1(); 1498a4bd5210SJason Evans return (false); 1499a4bd5210SJason Evans } 1500a4bd5210SJason Evans 1501a4bd5210SJason Evans /* 1502a4bd5210SJason Evans * End initialization functions. 1503a4bd5210SJason Evans */ 1504a4bd5210SJason Evans /******************************************************************************/ 1505a4bd5210SJason Evans /* 1506a4bd5210SJason Evans * Begin malloc(3)-compatible functions. 1507a4bd5210SJason Evans */ 1508a4bd5210SJason Evans 1509f921d10fSJason Evans static void * 15101f0a49e8SJason Evans ialloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, bool zero, 1511df0d881dSJason Evans prof_tctx_t *tctx, bool slow_path) 1512f921d10fSJason Evans { 1513f921d10fSJason Evans void *p; 1514f921d10fSJason Evans 1515d0e79aa3SJason Evans if (tctx == NULL) 1516f921d10fSJason Evans return (NULL); 1517d0e79aa3SJason Evans if (usize <= SMALL_MAXCLASS) { 1518df0d881dSJason Evans szind_t ind_large = size2index(LARGE_MINCLASS); 15191f0a49e8SJason Evans p = ialloc(tsd, LARGE_MINCLASS, ind_large, zero, slow_path); 1520f921d10fSJason Evans if (p == NULL) 1521f921d10fSJason Evans return (NULL); 15221f0a49e8SJason Evans arena_prof_promoted(tsd_tsdn(tsd), p, usize); 1523f921d10fSJason Evans } else 15241f0a49e8SJason Evans p = ialloc(tsd, usize, ind, zero, slow_path); 1525f921d10fSJason Evans 1526f921d10fSJason Evans return (p); 1527f921d10fSJason Evans } 1528f921d10fSJason Evans 1529f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C void * 15301f0a49e8SJason Evans ialloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool zero, bool slow_path) 1531f921d10fSJason Evans { 1532f921d10fSJason Evans void *p; 1533d0e79aa3SJason Evans prof_tctx_t *tctx; 1534f921d10fSJason Evans 1535536b3538SJason Evans tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); 1536d0e79aa3SJason Evans if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) 15371f0a49e8SJason Evans p = ialloc_prof_sample(tsd, usize, ind, zero, tctx, slow_path); 1538f921d10fSJason Evans else 15391f0a49e8SJason Evans p = ialloc(tsd, usize, ind, zero, slow_path); 1540d0e79aa3SJason Evans if (unlikely(p == NULL)) { 1541d0e79aa3SJason Evans prof_alloc_rollback(tsd, tctx, true); 1542f921d10fSJason Evans return (NULL); 1543d0e79aa3SJason Evans } 15441f0a49e8SJason Evans prof_malloc(tsd_tsdn(tsd), p, usize, tctx); 1545f921d10fSJason Evans 1546f921d10fSJason Evans return (p); 1547f921d10fSJason Evans } 1548f921d10fSJason Evans 15491f0a49e8SJason Evans /* 15501f0a49e8SJason Evans * ialloc_body() is inlined so that fast and slow paths are generated separately 15511f0a49e8SJason Evans * with statically known slow_path. 15521f0a49e8SJason Evans * 15531f0a49e8SJason Evans * This function guarantees that *tsdn is non-NULL on success. 15541f0a49e8SJason Evans */ 1555d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C void * 15561f0a49e8SJason Evans ialloc_body(size_t size, bool zero, tsdn_t **tsdn, size_t *usize, 15571f0a49e8SJason Evans bool slow_path) 1558d0e79aa3SJason Evans { 15591f0a49e8SJason Evans tsd_t *tsd; 1560df0d881dSJason Evans szind_t ind; 1561f921d10fSJason Evans 15621f0a49e8SJason Evans if (slow_path && unlikely(malloc_init())) { 15631f0a49e8SJason Evans *tsdn = NULL; 1564d0e79aa3SJason Evans return (NULL); 15651f0a49e8SJason Evans } 15661f0a49e8SJason Evans 15671f0a49e8SJason Evans tsd = tsd_fetch(); 15681f0a49e8SJason Evans *tsdn = tsd_tsdn(tsd); 15691f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 15701f0a49e8SJason Evans 1571df0d881dSJason Evans ind = size2index(size); 1572df0d881dSJason Evans if (unlikely(ind >= NSIZES)) 1573d0e79aa3SJason Evans return (NULL); 1574df0d881dSJason Evans 1575df0d881dSJason Evans if (config_stats || (config_prof && opt_prof) || (slow_path && 1576df0d881dSJason Evans config_valgrind && unlikely(in_valgrind))) { 1577df0d881dSJason Evans *usize = index2size(ind); 1578df0d881dSJason Evans assert(*usize > 0 && *usize <= HUGE_MAXCLASS); 1579d0e79aa3SJason Evans } 1580d0e79aa3SJason Evans 1581df0d881dSJason Evans if (config_prof && opt_prof) 15821f0a49e8SJason Evans return (ialloc_prof(tsd, *usize, ind, zero, slow_path)); 1583df0d881dSJason Evans 15841f0a49e8SJason Evans return (ialloc(tsd, size, ind, zero, slow_path)); 1585df0d881dSJason Evans } 1586df0d881dSJason Evans 1587df0d881dSJason Evans JEMALLOC_ALWAYS_INLINE_C void 15881f0a49e8SJason Evans ialloc_post_check(void *ret, tsdn_t *tsdn, size_t usize, const char *func, 15891f0a49e8SJason Evans bool update_errno, bool slow_path) 1590df0d881dSJason Evans { 15911f0a49e8SJason Evans 15921f0a49e8SJason Evans assert(!tsdn_null(tsdn) || ret == NULL); 15931f0a49e8SJason Evans 1594df0d881dSJason Evans if (unlikely(ret == NULL)) { 1595df0d881dSJason Evans if (slow_path && config_xmalloc && unlikely(opt_xmalloc)) { 15961f0a49e8SJason Evans malloc_printf("<jemalloc>: Error in %s(): out of " 15971f0a49e8SJason Evans "memory\n", func); 1598df0d881dSJason Evans abort(); 1599df0d881dSJason Evans } 16001f0a49e8SJason Evans if (update_errno) 1601df0d881dSJason Evans set_errno(ENOMEM); 1602df0d881dSJason Evans } 1603df0d881dSJason Evans if (config_stats && likely(ret != NULL)) { 16041f0a49e8SJason Evans assert(usize == isalloc(tsdn, ret, config_prof)); 16051f0a49e8SJason Evans *tsd_thread_allocatedp_get(tsdn_tsd(tsdn)) += usize; 1606df0d881dSJason Evans } 16071f0a49e8SJason Evans witness_assert_lockless(tsdn); 1608d0e79aa3SJason Evans } 1609d0e79aa3SJason Evans 1610d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 1611d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 1612d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) 1613a4bd5210SJason Evans je_malloc(size_t size) 1614a4bd5210SJason Evans { 1615a4bd5210SJason Evans void *ret; 16161f0a49e8SJason Evans tsdn_t *tsdn; 1617e722f8f8SJason Evans size_t usize JEMALLOC_CC_SILENCE_INIT(0); 1618a4bd5210SJason Evans 1619a4bd5210SJason Evans if (size == 0) 1620a4bd5210SJason Evans size = 1; 1621a4bd5210SJason Evans 1622df0d881dSJason Evans if (likely(!malloc_slow)) { 16231f0a49e8SJason Evans ret = ialloc_body(size, false, &tsdn, &usize, false); 16241f0a49e8SJason Evans ialloc_post_check(ret, tsdn, usize, "malloc", true, false); 1625df0d881dSJason Evans } else { 16261f0a49e8SJason Evans ret = ialloc_body(size, false, &tsdn, &usize, true); 16271f0a49e8SJason Evans ialloc_post_check(ret, tsdn, usize, "malloc", true, true); 1628a4bd5210SJason Evans UTRACE(0, size, ret); 16291f0a49e8SJason Evans JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false); 1630df0d881dSJason Evans } 1631df0d881dSJason Evans 1632a4bd5210SJason Evans return (ret); 1633a4bd5210SJason Evans } 1634a4bd5210SJason Evans 1635f921d10fSJason Evans static void * 1636d0e79aa3SJason Evans imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize, 1637d0e79aa3SJason Evans prof_tctx_t *tctx) 1638f921d10fSJason Evans { 1639f921d10fSJason Evans void *p; 1640f921d10fSJason Evans 1641d0e79aa3SJason Evans if (tctx == NULL) 1642f921d10fSJason Evans return (NULL); 1643d0e79aa3SJason Evans if (usize <= SMALL_MAXCLASS) { 1644d0e79aa3SJason Evans assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS); 1645d0e79aa3SJason Evans p = ipalloc(tsd, LARGE_MINCLASS, alignment, false); 1646f921d10fSJason Evans if (p == NULL) 1647f921d10fSJason Evans return (NULL); 16481f0a49e8SJason Evans arena_prof_promoted(tsd_tsdn(tsd), p, usize); 1649f921d10fSJason Evans } else 1650d0e79aa3SJason Evans p = ipalloc(tsd, usize, alignment, false); 1651f921d10fSJason Evans 1652f921d10fSJason Evans return (p); 1653f921d10fSJason Evans } 1654f921d10fSJason Evans 1655f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C void * 1656d0e79aa3SJason Evans imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize) 1657f921d10fSJason Evans { 1658f921d10fSJason Evans void *p; 1659d0e79aa3SJason Evans prof_tctx_t *tctx; 1660f921d10fSJason Evans 1661536b3538SJason Evans tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); 1662d0e79aa3SJason Evans if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) 1663d0e79aa3SJason Evans p = imemalign_prof_sample(tsd, alignment, usize, tctx); 1664f921d10fSJason Evans else 1665d0e79aa3SJason Evans p = ipalloc(tsd, usize, alignment, false); 1666d0e79aa3SJason Evans if (unlikely(p == NULL)) { 1667d0e79aa3SJason Evans prof_alloc_rollback(tsd, tctx, true); 1668f921d10fSJason Evans return (NULL); 1669d0e79aa3SJason Evans } 16701f0a49e8SJason Evans prof_malloc(tsd_tsdn(tsd), p, usize, tctx); 1671f921d10fSJason Evans 1672f921d10fSJason Evans return (p); 1673f921d10fSJason Evans } 1674f921d10fSJason Evans 1675a4bd5210SJason Evans JEMALLOC_ATTR(nonnull(1)) 1676a4bd5210SJason Evans static int 1677f921d10fSJason Evans imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment) 1678a4bd5210SJason Evans { 1679a4bd5210SJason Evans int ret; 1680d0e79aa3SJason Evans tsd_t *tsd; 1681a4bd5210SJason Evans size_t usize; 1682a4bd5210SJason Evans void *result; 1683a4bd5210SJason Evans 1684a4bd5210SJason Evans assert(min_alignment != 0); 1685a4bd5210SJason Evans 1686d0e79aa3SJason Evans if (unlikely(malloc_init())) { 16871f0a49e8SJason Evans tsd = NULL; 1688a4bd5210SJason Evans result = NULL; 1689f921d10fSJason Evans goto label_oom; 1690d0e79aa3SJason Evans } 1691d0e79aa3SJason Evans tsd = tsd_fetch(); 16921f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 1693a4bd5210SJason Evans if (size == 0) 1694a4bd5210SJason Evans size = 1; 1695a4bd5210SJason Evans 1696a4bd5210SJason Evans /* Make sure that alignment is a large enough power of 2. */ 1697d0e79aa3SJason Evans if (unlikely(((alignment - 1) & alignment) != 0 1698d0e79aa3SJason Evans || (alignment < min_alignment))) { 1699d0e79aa3SJason Evans if (config_xmalloc && unlikely(opt_xmalloc)) { 1700a4bd5210SJason Evans malloc_write("<jemalloc>: Error allocating " 1701a4bd5210SJason Evans "aligned memory: invalid alignment\n"); 1702a4bd5210SJason Evans abort(); 1703a4bd5210SJason Evans } 1704a4bd5210SJason Evans result = NULL; 1705a4bd5210SJason Evans ret = EINVAL; 1706a4bd5210SJason Evans goto label_return; 1707a4bd5210SJason Evans } 1708a4bd5210SJason Evans 1709a4bd5210SJason Evans usize = sa2u(size, alignment); 1710df0d881dSJason Evans if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) { 1711a4bd5210SJason Evans result = NULL; 1712f921d10fSJason Evans goto label_oom; 1713a4bd5210SJason Evans } 1714a4bd5210SJason Evans 1715d0e79aa3SJason Evans if (config_prof && opt_prof) 1716d0e79aa3SJason Evans result = imemalign_prof(tsd, alignment, usize); 1717d0e79aa3SJason Evans else 1718d0e79aa3SJason Evans result = ipalloc(tsd, usize, alignment, false); 1719d0e79aa3SJason Evans if (unlikely(result == NULL)) 1720f921d10fSJason Evans goto label_oom; 1721d0e79aa3SJason Evans assert(((uintptr_t)result & (alignment - 1)) == ZU(0)); 1722a4bd5210SJason Evans 1723a4bd5210SJason Evans *memptr = result; 1724a4bd5210SJason Evans ret = 0; 1725a4bd5210SJason Evans label_return: 1726d0e79aa3SJason Evans if (config_stats && likely(result != NULL)) { 17271f0a49e8SJason Evans assert(usize == isalloc(tsd_tsdn(tsd), result, config_prof)); 1728d0e79aa3SJason Evans *tsd_thread_allocatedp_get(tsd) += usize; 1729a4bd5210SJason Evans } 1730a4bd5210SJason Evans UTRACE(0, size, result); 17311f0a49e8SJason Evans JEMALLOC_VALGRIND_MALLOC(result != NULL, tsd_tsdn(tsd), result, usize, 17321f0a49e8SJason Evans false); 17331f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 1734a4bd5210SJason Evans return (ret); 1735f921d10fSJason Evans label_oom: 1736f921d10fSJason Evans assert(result == NULL); 1737d0e79aa3SJason Evans if (config_xmalloc && unlikely(opt_xmalloc)) { 1738f921d10fSJason Evans malloc_write("<jemalloc>: Error allocating aligned memory: " 1739f921d10fSJason Evans "out of memory\n"); 1740f921d10fSJason Evans abort(); 1741f921d10fSJason Evans } 1742f921d10fSJason Evans ret = ENOMEM; 17431f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 1744f921d10fSJason Evans goto label_return; 1745a4bd5210SJason Evans } 1746a4bd5210SJason Evans 1747d0e79aa3SJason Evans JEMALLOC_EXPORT int JEMALLOC_NOTHROW 1748d0e79aa3SJason Evans JEMALLOC_ATTR(nonnull(1)) 1749a4bd5210SJason Evans je_posix_memalign(void **memptr, size_t alignment, size_t size) 1750a4bd5210SJason Evans { 17511f0a49e8SJason Evans int ret; 17521f0a49e8SJason Evans 17531f0a49e8SJason Evans ret = imemalign(memptr, alignment, size, sizeof(void *)); 17541f0a49e8SJason Evans 1755a4bd5210SJason Evans return (ret); 1756a4bd5210SJason Evans } 1757a4bd5210SJason Evans 1758d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 1759d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 1760d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2) 1761a4bd5210SJason Evans je_aligned_alloc(size_t alignment, size_t size) 1762a4bd5210SJason Evans { 1763a4bd5210SJason Evans void *ret; 1764a4bd5210SJason Evans int err; 1765a4bd5210SJason Evans 1766d0e79aa3SJason Evans if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) { 1767a4bd5210SJason Evans ret = NULL; 1768e722f8f8SJason Evans set_errno(err); 1769a4bd5210SJason Evans } 17701f0a49e8SJason Evans 1771a4bd5210SJason Evans return (ret); 1772a4bd5210SJason Evans } 1773a4bd5210SJason Evans 1774d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 1775d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 1776d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) 1777a4bd5210SJason Evans je_calloc(size_t num, size_t size) 1778a4bd5210SJason Evans { 1779a4bd5210SJason Evans void *ret; 17801f0a49e8SJason Evans tsdn_t *tsdn; 1781a4bd5210SJason Evans size_t num_size; 1782e722f8f8SJason Evans size_t usize JEMALLOC_CC_SILENCE_INIT(0); 1783a4bd5210SJason Evans 1784a4bd5210SJason Evans num_size = num * size; 1785d0e79aa3SJason Evans if (unlikely(num_size == 0)) { 1786a4bd5210SJason Evans if (num == 0 || size == 0) 1787a4bd5210SJason Evans num_size = 1; 17881f0a49e8SJason Evans else 17891f0a49e8SJason Evans num_size = HUGE_MAXCLASS + 1; /* Trigger OOM. */ 1790a4bd5210SJason Evans /* 1791a4bd5210SJason Evans * Try to avoid division here. We know that it isn't possible to 1792a4bd5210SJason Evans * overflow during multiplication if neither operand uses any of the 1793a4bd5210SJason Evans * most significant half of the bits in a size_t. 1794a4bd5210SJason Evans */ 1795d0e79aa3SJason Evans } else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 17961f0a49e8SJason Evans 2))) && (num_size / size != num))) 17971f0a49e8SJason Evans num_size = HUGE_MAXCLASS + 1; /* size_t overflow. */ 1798a4bd5210SJason Evans 17991f0a49e8SJason Evans if (likely(!malloc_slow)) { 18001f0a49e8SJason Evans ret = ialloc_body(num_size, true, &tsdn, &usize, false); 18011f0a49e8SJason Evans ialloc_post_check(ret, tsdn, usize, "calloc", true, false); 1802a4bd5210SJason Evans } else { 18031f0a49e8SJason Evans ret = ialloc_body(num_size, true, &tsdn, &usize, true); 18041f0a49e8SJason Evans ialloc_post_check(ret, tsdn, usize, "calloc", true, true); 18051f0a49e8SJason Evans UTRACE(0, num_size, ret); 180662b2691eSJason Evans JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, true); 1807a4bd5210SJason Evans } 1808a4bd5210SJason Evans 1809a4bd5210SJason Evans return (ret); 1810a4bd5210SJason Evans } 1811a4bd5210SJason Evans 1812f921d10fSJason Evans static void * 1813536b3538SJason Evans irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, 1814d0e79aa3SJason Evans prof_tctx_t *tctx) 1815a4bd5210SJason Evans { 1816f921d10fSJason Evans void *p; 1817a4bd5210SJason Evans 1818d0e79aa3SJason Evans if (tctx == NULL) 1819f921d10fSJason Evans return (NULL); 1820d0e79aa3SJason Evans if (usize <= SMALL_MAXCLASS) { 1821536b3538SJason Evans p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false); 1822f921d10fSJason Evans if (p == NULL) 1823f921d10fSJason Evans return (NULL); 18241f0a49e8SJason Evans arena_prof_promoted(tsd_tsdn(tsd), p, usize); 1825a4bd5210SJason Evans } else 1826536b3538SJason Evans p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); 1827f921d10fSJason Evans 1828f921d10fSJason Evans return (p); 1829a4bd5210SJason Evans } 1830a4bd5210SJason Evans 1831f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C void * 1832536b3538SJason Evans irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize) 1833a4bd5210SJason Evans { 1834f921d10fSJason Evans void *p; 1835536b3538SJason Evans bool prof_active; 1836d0e79aa3SJason Evans prof_tctx_t *old_tctx, *tctx; 1837a4bd5210SJason Evans 1838536b3538SJason Evans prof_active = prof_active_get_unlocked(); 18391f0a49e8SJason Evans old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr); 1840536b3538SJason Evans tctx = prof_alloc_prep(tsd, usize, prof_active, true); 1841d0e79aa3SJason Evans if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) 1842536b3538SJason Evans p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx); 1843f921d10fSJason Evans else 1844536b3538SJason Evans p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); 1845536b3538SJason Evans if (unlikely(p == NULL)) { 1846536b3538SJason Evans prof_alloc_rollback(tsd, tctx, true); 1847f921d10fSJason Evans return (NULL); 1848536b3538SJason Evans } 1849536b3538SJason Evans prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize, 1850536b3538SJason Evans old_tctx); 1851f921d10fSJason Evans 1852f921d10fSJason Evans return (p); 1853f921d10fSJason Evans } 1854f921d10fSJason Evans 1855f921d10fSJason Evans JEMALLOC_INLINE_C void 1856df0d881dSJason Evans ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) 1857f921d10fSJason Evans { 1858a4bd5210SJason Evans size_t usize; 1859f921d10fSJason Evans UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); 1860a4bd5210SJason Evans 18611f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 18621f0a49e8SJason Evans 1863f921d10fSJason Evans assert(ptr != NULL); 1864d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 1865a4bd5210SJason Evans 1866a4bd5210SJason Evans if (config_prof && opt_prof) { 18671f0a49e8SJason Evans usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); 1868d0e79aa3SJason Evans prof_free(tsd, ptr, usize); 1869a4bd5210SJason Evans } else if (config_stats || config_valgrind) 18701f0a49e8SJason Evans usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); 1871a4bd5210SJason Evans if (config_stats) 1872d0e79aa3SJason Evans *tsd_thread_deallocatedp_get(tsd) += usize; 1873df0d881dSJason Evans 1874df0d881dSJason Evans if (likely(!slow_path)) 1875df0d881dSJason Evans iqalloc(tsd, ptr, tcache, false); 1876df0d881dSJason Evans else { 1877d0e79aa3SJason Evans if (config_valgrind && unlikely(in_valgrind)) 18781f0a49e8SJason Evans rzsize = p2rz(tsd_tsdn(tsd), ptr); 1879df0d881dSJason Evans iqalloc(tsd, ptr, tcache, true); 1880a4bd5210SJason Evans JEMALLOC_VALGRIND_FREE(ptr, rzsize); 1881a4bd5210SJason Evans } 1882df0d881dSJason Evans } 1883f921d10fSJason Evans 1884d0e79aa3SJason Evans JEMALLOC_INLINE_C void 18851f0a49e8SJason Evans isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) 1886d0e79aa3SJason Evans { 1887d0e79aa3SJason Evans UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); 1888d0e79aa3SJason Evans 18891f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 18901f0a49e8SJason Evans 1891d0e79aa3SJason Evans assert(ptr != NULL); 1892d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 1893d0e79aa3SJason Evans 1894d0e79aa3SJason Evans if (config_prof && opt_prof) 1895d0e79aa3SJason Evans prof_free(tsd, ptr, usize); 1896d0e79aa3SJason Evans if (config_stats) 1897d0e79aa3SJason Evans *tsd_thread_deallocatedp_get(tsd) += usize; 1898d0e79aa3SJason Evans if (config_valgrind && unlikely(in_valgrind)) 18991f0a49e8SJason Evans rzsize = p2rz(tsd_tsdn(tsd), ptr); 19001f0a49e8SJason Evans isqalloc(tsd, ptr, usize, tcache, slow_path); 1901d0e79aa3SJason Evans JEMALLOC_VALGRIND_FREE(ptr, rzsize); 1902d0e79aa3SJason Evans } 1903d0e79aa3SJason Evans 1904d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 1905d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 1906d0e79aa3SJason Evans JEMALLOC_ALLOC_SIZE(2) 1907f921d10fSJason Evans je_realloc(void *ptr, size_t size) 1908f921d10fSJason Evans { 1909f921d10fSJason Evans void *ret; 19101f0a49e8SJason Evans tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL); 1911f921d10fSJason Evans size_t usize JEMALLOC_CC_SILENCE_INIT(0); 1912f921d10fSJason Evans size_t old_usize = 0; 1913f921d10fSJason Evans UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 1914f921d10fSJason Evans 1915d0e79aa3SJason Evans if (unlikely(size == 0)) { 1916f921d10fSJason Evans if (ptr != NULL) { 19171f0a49e8SJason Evans tsd_t *tsd; 19181f0a49e8SJason Evans 1919f921d10fSJason Evans /* realloc(ptr, 0) is equivalent to free(ptr). */ 1920f921d10fSJason Evans UTRACE(ptr, 0, 0); 1921d0e79aa3SJason Evans tsd = tsd_fetch(); 1922df0d881dSJason Evans ifree(tsd, ptr, tcache_get(tsd, false), true); 1923f921d10fSJason Evans return (NULL); 1924f921d10fSJason Evans } 1925f921d10fSJason Evans size = 1; 1926f921d10fSJason Evans } 1927f921d10fSJason Evans 1928d0e79aa3SJason Evans if (likely(ptr != NULL)) { 19291f0a49e8SJason Evans tsd_t *tsd; 19301f0a49e8SJason Evans 1931d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 1932f921d10fSJason Evans malloc_thread_init(); 1933d0e79aa3SJason Evans tsd = tsd_fetch(); 1934f921d10fSJason Evans 19351f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 19361f0a49e8SJason Evans 19371f0a49e8SJason Evans old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); 19381f0a49e8SJason Evans if (config_valgrind && unlikely(in_valgrind)) { 19391f0a49e8SJason Evans old_rzsize = config_prof ? p2rz(tsd_tsdn(tsd), ptr) : 19401f0a49e8SJason Evans u2rz(old_usize); 19411f0a49e8SJason Evans } 1942f921d10fSJason Evans 1943f921d10fSJason Evans if (config_prof && opt_prof) { 1944f921d10fSJason Evans usize = s2u(size); 1945df0d881dSJason Evans ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ? 1946df0d881dSJason Evans NULL : irealloc_prof(tsd, ptr, old_usize, usize); 1947f921d10fSJason Evans } else { 1948d0e79aa3SJason Evans if (config_stats || (config_valgrind && 1949d0e79aa3SJason Evans unlikely(in_valgrind))) 1950f921d10fSJason Evans usize = s2u(size); 1951d0e79aa3SJason Evans ret = iralloc(tsd, ptr, old_usize, size, 0, false); 1952f921d10fSJason Evans } 19531f0a49e8SJason Evans tsdn = tsd_tsdn(tsd); 1954f921d10fSJason Evans } else { 1955f921d10fSJason Evans /* realloc(NULL, size) is equivalent to malloc(size). */ 1956df0d881dSJason Evans if (likely(!malloc_slow)) 19571f0a49e8SJason Evans ret = ialloc_body(size, false, &tsdn, &usize, false); 1958df0d881dSJason Evans else 19591f0a49e8SJason Evans ret = ialloc_body(size, false, &tsdn, &usize, true); 19601f0a49e8SJason Evans assert(!tsdn_null(tsdn) || ret == NULL); 1961f921d10fSJason Evans } 1962f921d10fSJason Evans 1963d0e79aa3SJason Evans if (unlikely(ret == NULL)) { 1964d0e79aa3SJason Evans if (config_xmalloc && unlikely(opt_xmalloc)) { 1965f921d10fSJason Evans malloc_write("<jemalloc>: Error in realloc(): " 1966f921d10fSJason Evans "out of memory\n"); 1967f921d10fSJason Evans abort(); 1968f921d10fSJason Evans } 1969f921d10fSJason Evans set_errno(ENOMEM); 1970f921d10fSJason Evans } 1971d0e79aa3SJason Evans if (config_stats && likely(ret != NULL)) { 19721f0a49e8SJason Evans tsd_t *tsd; 19731f0a49e8SJason Evans 19741f0a49e8SJason Evans assert(usize == isalloc(tsdn, ret, config_prof)); 19751f0a49e8SJason Evans tsd = tsdn_tsd(tsdn); 1976d0e79aa3SJason Evans *tsd_thread_allocatedp_get(tsd) += usize; 1977d0e79aa3SJason Evans *tsd_thread_deallocatedp_get(tsd) += old_usize; 1978f921d10fSJason Evans } 1979f921d10fSJason Evans UTRACE(ptr, size, ret); 19801f0a49e8SJason Evans JEMALLOC_VALGRIND_REALLOC(true, tsdn, ret, usize, true, ptr, old_usize, 1981d0e79aa3SJason Evans old_rzsize, true, false); 19821f0a49e8SJason Evans witness_assert_lockless(tsdn); 1983f921d10fSJason Evans return (ret); 1984f921d10fSJason Evans } 1985f921d10fSJason Evans 1986d0e79aa3SJason Evans JEMALLOC_EXPORT void JEMALLOC_NOTHROW 1987f921d10fSJason Evans je_free(void *ptr) 1988f921d10fSJason Evans { 1989f921d10fSJason Evans 1990f921d10fSJason Evans UTRACE(ptr, 0, 0); 1991d0e79aa3SJason Evans if (likely(ptr != NULL)) { 1992d0e79aa3SJason Evans tsd_t *tsd = tsd_fetch(); 19931f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 1994df0d881dSJason Evans if (likely(!malloc_slow)) 1995df0d881dSJason Evans ifree(tsd, ptr, tcache_get(tsd, false), false); 1996df0d881dSJason Evans else 1997df0d881dSJason Evans ifree(tsd, ptr, tcache_get(tsd, false), true); 19981f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 1999d0e79aa3SJason Evans } 2000a4bd5210SJason Evans } 2001a4bd5210SJason Evans 2002a4bd5210SJason Evans /* 2003a4bd5210SJason Evans * End malloc(3)-compatible functions. 2004a4bd5210SJason Evans */ 2005a4bd5210SJason Evans /******************************************************************************/ 2006a4bd5210SJason Evans /* 2007a4bd5210SJason Evans * Begin non-standard override functions. 2008a4bd5210SJason Evans */ 2009a4bd5210SJason Evans 2010a4bd5210SJason Evans #ifdef JEMALLOC_OVERRIDE_MEMALIGN 2011d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2012d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 2013d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) 2014a4bd5210SJason Evans je_memalign(size_t alignment, size_t size) 2015a4bd5210SJason Evans { 2016a4bd5210SJason Evans void *ret JEMALLOC_CC_SILENCE_INIT(NULL); 2017d0e79aa3SJason Evans if (unlikely(imemalign(&ret, alignment, size, 1) != 0)) 2018d0e79aa3SJason Evans ret = NULL; 2019a4bd5210SJason Evans return (ret); 2020a4bd5210SJason Evans } 2021a4bd5210SJason Evans #endif 2022a4bd5210SJason Evans 2023a4bd5210SJason Evans #ifdef JEMALLOC_OVERRIDE_VALLOC 2024d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2025d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 2026d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) 2027a4bd5210SJason Evans je_valloc(size_t size) 2028a4bd5210SJason Evans { 2029a4bd5210SJason Evans void *ret JEMALLOC_CC_SILENCE_INIT(NULL); 2030d0e79aa3SJason Evans if (unlikely(imemalign(&ret, PAGE, size, 1) != 0)) 2031d0e79aa3SJason Evans ret = NULL; 2032a4bd5210SJason Evans return (ret); 2033a4bd5210SJason Evans } 2034a4bd5210SJason Evans #endif 2035a4bd5210SJason Evans 2036a4bd5210SJason Evans /* 2037a4bd5210SJason Evans * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has 2038a4bd5210SJason Evans * #define je_malloc malloc 2039a4bd5210SJason Evans */ 2040a4bd5210SJason Evans #define malloc_is_malloc 1 2041a4bd5210SJason Evans #define is_malloc_(a) malloc_is_ ## a 2042a4bd5210SJason Evans #define is_malloc(a) is_malloc_(a) 2043a4bd5210SJason Evans 2044d0e79aa3SJason Evans #if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK)) 2045a4bd5210SJason Evans /* 2046a4bd5210SJason Evans * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible 2047a4bd5210SJason Evans * to inconsistently reference libc's malloc(3)-compatible functions 2048a4bd5210SJason Evans * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). 2049a4bd5210SJason Evans * 2050a4bd5210SJason Evans * These definitions interpose hooks in glibc. The functions are actually 2051a4bd5210SJason Evans * passed an extra argument for the caller return address, which will be 2052a4bd5210SJason Evans * ignored. 2053a4bd5210SJason Evans */ 205482872ac0SJason Evans JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free; 205582872ac0SJason Evans JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc; 205682872ac0SJason Evans JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc; 2057d0e79aa3SJason Evans # ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK 205882872ac0SJason Evans JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = 2059e722f8f8SJason Evans je_memalign; 2060a4bd5210SJason Evans # endif 2061*bde95144SJason Evans 2062*bde95144SJason Evans #ifdef CPU_COUNT 2063*bde95144SJason Evans /* 2064*bde95144SJason Evans * To enable static linking with glibc, the libc specific malloc interface must 2065*bde95144SJason Evans * be implemented also, so none of glibc's malloc.o functions are added to the 2066*bde95144SJason Evans * link. 2067*bde95144SJason Evans */ 2068*bde95144SJason Evans #define ALIAS(je_fn) __attribute__((alias (#je_fn), used)) 2069*bde95144SJason Evans /* To force macro expansion of je_ prefix before stringification. */ 2070*bde95144SJason Evans #define PREALIAS(je_fn) ALIAS(je_fn) 2071*bde95144SJason Evans void *__libc_malloc(size_t size) PREALIAS(je_malloc); 2072*bde95144SJason Evans void __libc_free(void* ptr) PREALIAS(je_free); 2073*bde95144SJason Evans void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc); 2074*bde95144SJason Evans void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc); 2075*bde95144SJason Evans void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign); 2076*bde95144SJason Evans void *__libc_valloc(size_t size) PREALIAS(je_valloc); 2077*bde95144SJason Evans int __posix_memalign(void** r, size_t a, size_t s) 2078*bde95144SJason Evans PREALIAS(je_posix_memalign); 2079*bde95144SJason Evans #undef PREALIAS 2080*bde95144SJason Evans #undef ALIAS 2081*bde95144SJason Evans 2082*bde95144SJason Evans #endif 2083*bde95144SJason Evans 2084d0e79aa3SJason Evans #endif 2085a4bd5210SJason Evans 2086a4bd5210SJason Evans /* 2087a4bd5210SJason Evans * End non-standard override functions. 2088a4bd5210SJason Evans */ 2089a4bd5210SJason Evans /******************************************************************************/ 2090a4bd5210SJason Evans /* 2091a4bd5210SJason Evans * Begin non-standard functions. 2092a4bd5210SJason Evans */ 2093a4bd5210SJason Evans 2094d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C bool 20951f0a49e8SJason Evans imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize, 2096d0e79aa3SJason Evans size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena) 2097a4bd5210SJason Evans { 2098f921d10fSJason Evans 2099d0e79aa3SJason Evans if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) { 2100d0e79aa3SJason Evans *alignment = 0; 2101d0e79aa3SJason Evans *usize = s2u(size); 2102d0e79aa3SJason Evans } else { 2103d0e79aa3SJason Evans *alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); 2104d0e79aa3SJason Evans *usize = sa2u(size, *alignment); 2105d0e79aa3SJason Evans } 2106df0d881dSJason Evans if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS)) 2107df0d881dSJason Evans return (true); 2108d0e79aa3SJason Evans *zero = MALLOCX_ZERO_GET(flags); 2109d0e79aa3SJason Evans if ((flags & MALLOCX_TCACHE_MASK) != 0) { 2110d0e79aa3SJason Evans if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) 2111d0e79aa3SJason Evans *tcache = NULL; 2112d0e79aa3SJason Evans else 2113d0e79aa3SJason Evans *tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2114d0e79aa3SJason Evans } else 2115d0e79aa3SJason Evans *tcache = tcache_get(tsd, true); 2116d0e79aa3SJason Evans if ((flags & MALLOCX_ARENA_MASK) != 0) { 2117d0e79aa3SJason Evans unsigned arena_ind = MALLOCX_ARENA_GET(flags); 21181f0a49e8SJason Evans *arena = arena_get(tsd_tsdn(tsd), arena_ind, true); 2119d0e79aa3SJason Evans if (unlikely(*arena == NULL)) 2120d0e79aa3SJason Evans return (true); 2121d0e79aa3SJason Evans } else 2122d0e79aa3SJason Evans *arena = NULL; 2123d0e79aa3SJason Evans return (false); 2124d0e79aa3SJason Evans } 2125d0e79aa3SJason Evans 2126d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C void * 21271f0a49e8SJason Evans imallocx_flags(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, 21281f0a49e8SJason Evans tcache_t *tcache, arena_t *arena, bool slow_path) 2129d0e79aa3SJason Evans { 2130df0d881dSJason Evans szind_t ind; 2131f921d10fSJason Evans 2132536b3538SJason Evans if (unlikely(alignment != 0)) 21331f0a49e8SJason Evans return (ipalloct(tsdn, usize, alignment, zero, tcache, arena)); 2134df0d881dSJason Evans ind = size2index(usize); 2135df0d881dSJason Evans assert(ind < NSIZES); 21361f0a49e8SJason Evans return (iallocztm(tsdn, usize, ind, zero, tcache, false, arena, 21371f0a49e8SJason Evans slow_path)); 2138d0e79aa3SJason Evans } 2139d0e79aa3SJason Evans 2140f921d10fSJason Evans static void * 21411f0a49e8SJason Evans imallocx_prof_sample(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, 21421f0a49e8SJason Evans tcache_t *tcache, arena_t *arena, bool slow_path) 2143f921d10fSJason Evans { 2144f921d10fSJason Evans void *p; 2145f921d10fSJason Evans 2146d0e79aa3SJason Evans if (usize <= SMALL_MAXCLASS) { 2147d0e79aa3SJason Evans assert(((alignment == 0) ? s2u(LARGE_MINCLASS) : 2148d0e79aa3SJason Evans sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS); 21491f0a49e8SJason Evans p = imallocx_flags(tsdn, LARGE_MINCLASS, alignment, zero, 21501f0a49e8SJason Evans tcache, arena, slow_path); 2151f921d10fSJason Evans if (p == NULL) 2152f921d10fSJason Evans return (NULL); 21531f0a49e8SJason Evans arena_prof_promoted(tsdn, p, usize); 21541f0a49e8SJason Evans } else { 21551f0a49e8SJason Evans p = imallocx_flags(tsdn, usize, alignment, zero, tcache, arena, 21561f0a49e8SJason Evans slow_path); 21571f0a49e8SJason Evans } 2158f921d10fSJason Evans 2159f921d10fSJason Evans return (p); 2160f921d10fSJason Evans } 2161f921d10fSJason Evans 2162f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C void * 21631f0a49e8SJason Evans imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, bool slow_path) 2164f921d10fSJason Evans { 2165f921d10fSJason Evans void *p; 2166d0e79aa3SJason Evans size_t alignment; 2167d0e79aa3SJason Evans bool zero; 2168d0e79aa3SJason Evans tcache_t *tcache; 2169d0e79aa3SJason Evans arena_t *arena; 2170d0e79aa3SJason Evans prof_tctx_t *tctx; 2171f921d10fSJason Evans 2172d0e79aa3SJason Evans if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment, 2173d0e79aa3SJason Evans &zero, &tcache, &arena))) 2174f921d10fSJason Evans return (NULL); 2175536b3538SJason Evans tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true); 21761f0a49e8SJason Evans if (likely((uintptr_t)tctx == (uintptr_t)1U)) { 21771f0a49e8SJason Evans p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, 21781f0a49e8SJason Evans tcache, arena, slow_path); 21791f0a49e8SJason Evans } else if ((uintptr_t)tctx > (uintptr_t)1U) { 21801f0a49e8SJason Evans p = imallocx_prof_sample(tsd_tsdn(tsd), *usize, alignment, zero, 21811f0a49e8SJason Evans tcache, arena, slow_path); 2182d0e79aa3SJason Evans } else 2183d0e79aa3SJason Evans p = NULL; 2184d0e79aa3SJason Evans if (unlikely(p == NULL)) { 2185d0e79aa3SJason Evans prof_alloc_rollback(tsd, tctx, true); 2186d0e79aa3SJason Evans return (NULL); 2187d0e79aa3SJason Evans } 21881f0a49e8SJason Evans prof_malloc(tsd_tsdn(tsd), p, *usize, tctx); 2189f921d10fSJason Evans 2190d0e79aa3SJason Evans assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); 2191f921d10fSJason Evans return (p); 2192f921d10fSJason Evans } 2193f921d10fSJason Evans 2194d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C void * 21951f0a49e8SJason Evans imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, 21961f0a49e8SJason Evans bool slow_path) 2197f921d10fSJason Evans { 2198f921d10fSJason Evans void *p; 2199d0e79aa3SJason Evans size_t alignment; 2200d0e79aa3SJason Evans bool zero; 2201d0e79aa3SJason Evans tcache_t *tcache; 2202f921d10fSJason Evans arena_t *arena; 2203d0e79aa3SJason Evans 22041f0a49e8SJason Evans if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment, 22051f0a49e8SJason Evans &zero, &tcache, &arena))) 22061f0a49e8SJason Evans return (NULL); 22071f0a49e8SJason Evans p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, tcache, 22081f0a49e8SJason Evans arena, slow_path); 22091f0a49e8SJason Evans assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); 22101f0a49e8SJason Evans return (p); 22111f0a49e8SJason Evans } 22121f0a49e8SJason Evans 22131f0a49e8SJason Evans /* This function guarantees that *tsdn is non-NULL on success. */ 22141f0a49e8SJason Evans JEMALLOC_ALWAYS_INLINE_C void * 22151f0a49e8SJason Evans imallocx_body(size_t size, int flags, tsdn_t **tsdn, size_t *usize, 22161f0a49e8SJason Evans bool slow_path) 22171f0a49e8SJason Evans { 22181f0a49e8SJason Evans tsd_t *tsd; 22191f0a49e8SJason Evans 22201f0a49e8SJason Evans if (slow_path && unlikely(malloc_init())) { 22211f0a49e8SJason Evans *tsdn = NULL; 22221f0a49e8SJason Evans return (NULL); 22231f0a49e8SJason Evans } 22241f0a49e8SJason Evans 22251f0a49e8SJason Evans tsd = tsd_fetch(); 22261f0a49e8SJason Evans *tsdn = tsd_tsdn(tsd); 22271f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 22281f0a49e8SJason Evans 2229d0e79aa3SJason Evans if (likely(flags == 0)) { 2230df0d881dSJason Evans szind_t ind = size2index(size); 2231df0d881dSJason Evans if (unlikely(ind >= NSIZES)) 2232df0d881dSJason Evans return (NULL); 22331f0a49e8SJason Evans if (config_stats || (config_prof && opt_prof) || (slow_path && 22341f0a49e8SJason Evans config_valgrind && unlikely(in_valgrind))) { 2235df0d881dSJason Evans *usize = index2size(ind); 2236df0d881dSJason Evans assert(*usize > 0 && *usize <= HUGE_MAXCLASS); 2237df0d881dSJason Evans } 22381f0a49e8SJason Evans 22391f0a49e8SJason Evans if (config_prof && opt_prof) { 22401f0a49e8SJason Evans return (ialloc_prof(tsd, *usize, ind, false, 22411f0a49e8SJason Evans slow_path)); 2242d0e79aa3SJason Evans } 2243d0e79aa3SJason Evans 22441f0a49e8SJason Evans return (ialloc(tsd, size, ind, false, slow_path)); 22451f0a49e8SJason Evans } 22461f0a49e8SJason Evans 22471f0a49e8SJason Evans if (config_prof && opt_prof) 22481f0a49e8SJason Evans return (imallocx_prof(tsd, size, flags, usize, slow_path)); 22491f0a49e8SJason Evans 22501f0a49e8SJason Evans return (imallocx_no_prof(tsd, size, flags, usize, slow_path)); 2251d0e79aa3SJason Evans } 2252d0e79aa3SJason Evans 2253d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2254d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 2255d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) 2256d0e79aa3SJason Evans je_mallocx(size_t size, int flags) 2257d0e79aa3SJason Evans { 22581f0a49e8SJason Evans tsdn_t *tsdn; 2259d0e79aa3SJason Evans void *p; 2260d0e79aa3SJason Evans size_t usize; 2261f921d10fSJason Evans 2262f921d10fSJason Evans assert(size != 0); 2263f921d10fSJason Evans 22641f0a49e8SJason Evans if (likely(!malloc_slow)) { 22651f0a49e8SJason Evans p = imallocx_body(size, flags, &tsdn, &usize, false); 22661f0a49e8SJason Evans ialloc_post_check(p, tsdn, usize, "mallocx", false, false); 22671f0a49e8SJason Evans } else { 22681f0a49e8SJason Evans p = imallocx_body(size, flags, &tsdn, &usize, true); 22691f0a49e8SJason Evans ialloc_post_check(p, tsdn, usize, "mallocx", false, true); 2270f921d10fSJason Evans UTRACE(0, size, p); 22711f0a49e8SJason Evans JEMALLOC_VALGRIND_MALLOC(p != NULL, tsdn, p, usize, 22721f0a49e8SJason Evans MALLOCX_ZERO_GET(flags)); 2273f921d10fSJason Evans } 22741f0a49e8SJason Evans 22751f0a49e8SJason Evans return (p); 2276f921d10fSJason Evans } 2277f921d10fSJason Evans 2278f921d10fSJason Evans static void * 2279536b3538SJason Evans irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, 2280536b3538SJason Evans size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, 2281d0e79aa3SJason Evans prof_tctx_t *tctx) 2282f921d10fSJason Evans { 2283f921d10fSJason Evans void *p; 2284f921d10fSJason Evans 2285d0e79aa3SJason Evans if (tctx == NULL) 2286f921d10fSJason Evans return (NULL); 2287d0e79aa3SJason Evans if (usize <= SMALL_MAXCLASS) { 2288536b3538SJason Evans p = iralloct(tsd, old_ptr, old_usize, LARGE_MINCLASS, alignment, 2289d0e79aa3SJason Evans zero, tcache, arena); 2290f921d10fSJason Evans if (p == NULL) 2291f921d10fSJason Evans return (NULL); 22921f0a49e8SJason Evans arena_prof_promoted(tsd_tsdn(tsd), p, usize); 2293f921d10fSJason Evans } else { 2294536b3538SJason Evans p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero, 2295d0e79aa3SJason Evans tcache, arena); 2296f921d10fSJason Evans } 2297f921d10fSJason Evans 2298f921d10fSJason Evans return (p); 2299f921d10fSJason Evans } 2300f921d10fSJason Evans 2301f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C void * 2302536b3538SJason Evans irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, 2303d0e79aa3SJason Evans size_t alignment, size_t *usize, bool zero, tcache_t *tcache, 2304d0e79aa3SJason Evans arena_t *arena) 2305f921d10fSJason Evans { 2306f921d10fSJason Evans void *p; 2307536b3538SJason Evans bool prof_active; 2308d0e79aa3SJason Evans prof_tctx_t *old_tctx, *tctx; 2309f921d10fSJason Evans 2310536b3538SJason Evans prof_active = prof_active_get_unlocked(); 23111f0a49e8SJason Evans old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr); 231262b2691eSJason Evans tctx = prof_alloc_prep(tsd, *usize, prof_active, false); 2313d0e79aa3SJason Evans if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { 2314536b3538SJason Evans p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize, 2315536b3538SJason Evans alignment, zero, tcache, arena, tctx); 2316d0e79aa3SJason Evans } else { 2317536b3538SJason Evans p = iralloct(tsd, old_ptr, old_usize, size, alignment, zero, 2318d0e79aa3SJason Evans tcache, arena); 2319f921d10fSJason Evans } 2320d0e79aa3SJason Evans if (unlikely(p == NULL)) { 232162b2691eSJason Evans prof_alloc_rollback(tsd, tctx, false); 2322f921d10fSJason Evans return (NULL); 2323d0e79aa3SJason Evans } 2324f921d10fSJason Evans 2325536b3538SJason Evans if (p == old_ptr && alignment != 0) { 2326f921d10fSJason Evans /* 2327f921d10fSJason Evans * The allocation did not move, so it is possible that the size 2328f921d10fSJason Evans * class is smaller than would guarantee the requested 2329f921d10fSJason Evans * alignment, and that the alignment constraint was 2330f921d10fSJason Evans * serendipitously satisfied. Additionally, old_usize may not 2331f921d10fSJason Evans * be the same as the current usize because of in-place large 2332f921d10fSJason Evans * reallocation. Therefore, query the actual value of usize. 2333f921d10fSJason Evans */ 23341f0a49e8SJason Evans *usize = isalloc(tsd_tsdn(tsd), p, config_prof); 2335f921d10fSJason Evans } 233662b2691eSJason Evans prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr, 2337536b3538SJason Evans old_usize, old_tctx); 2338f921d10fSJason Evans 2339f921d10fSJason Evans return (p); 2340f921d10fSJason Evans } 2341f921d10fSJason Evans 2342d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2343d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 2344d0e79aa3SJason Evans JEMALLOC_ALLOC_SIZE(2) 2345f921d10fSJason Evans je_rallocx(void *ptr, size_t size, int flags) 2346f921d10fSJason Evans { 2347f921d10fSJason Evans void *p; 2348d0e79aa3SJason Evans tsd_t *tsd; 2349d0e79aa3SJason Evans size_t usize; 2350d0e79aa3SJason Evans size_t old_usize; 2351f921d10fSJason Evans UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 2352d0e79aa3SJason Evans size_t alignment = MALLOCX_ALIGN_GET(flags); 2353f921d10fSJason Evans bool zero = flags & MALLOCX_ZERO; 2354f921d10fSJason Evans arena_t *arena; 2355d0e79aa3SJason Evans tcache_t *tcache; 2356f921d10fSJason Evans 2357f921d10fSJason Evans assert(ptr != NULL); 2358f921d10fSJason Evans assert(size != 0); 2359d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2360f921d10fSJason Evans malloc_thread_init(); 2361d0e79aa3SJason Evans tsd = tsd_fetch(); 23621f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2363f921d10fSJason Evans 2364d0e79aa3SJason Evans if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { 2365d0e79aa3SJason Evans unsigned arena_ind = MALLOCX_ARENA_GET(flags); 23661f0a49e8SJason Evans arena = arena_get(tsd_tsdn(tsd), arena_ind, true); 2367d0e79aa3SJason Evans if (unlikely(arena == NULL)) 2368d0e79aa3SJason Evans goto label_oom; 2369d0e79aa3SJason Evans } else 2370f921d10fSJason Evans arena = NULL; 2371f921d10fSJason Evans 2372d0e79aa3SJason Evans if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { 2373d0e79aa3SJason Evans if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) 2374d0e79aa3SJason Evans tcache = NULL; 2375d0e79aa3SJason Evans else 2376d0e79aa3SJason Evans tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2377d0e79aa3SJason Evans } else 2378d0e79aa3SJason Evans tcache = tcache_get(tsd, true); 2379d0e79aa3SJason Evans 23801f0a49e8SJason Evans old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); 2381d0e79aa3SJason Evans if (config_valgrind && unlikely(in_valgrind)) 2382f921d10fSJason Evans old_rzsize = u2rz(old_usize); 2383f921d10fSJason Evans 2384f921d10fSJason Evans if (config_prof && opt_prof) { 2385f921d10fSJason Evans usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); 2386df0d881dSJason Evans if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) 2387df0d881dSJason Evans goto label_oom; 2388d0e79aa3SJason Evans p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize, 2389d0e79aa3SJason Evans zero, tcache, arena); 2390d0e79aa3SJason Evans if (unlikely(p == NULL)) 2391f921d10fSJason Evans goto label_oom; 2392f921d10fSJason Evans } else { 2393d0e79aa3SJason Evans p = iralloct(tsd, ptr, old_usize, size, alignment, zero, 2394d0e79aa3SJason Evans tcache, arena); 2395d0e79aa3SJason Evans if (unlikely(p == NULL)) 2396f921d10fSJason Evans goto label_oom; 2397d0e79aa3SJason Evans if (config_stats || (config_valgrind && unlikely(in_valgrind))) 23981f0a49e8SJason Evans usize = isalloc(tsd_tsdn(tsd), p, config_prof); 2399f921d10fSJason Evans } 2400d0e79aa3SJason Evans assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); 2401f921d10fSJason Evans 2402f921d10fSJason Evans if (config_stats) { 2403d0e79aa3SJason Evans *tsd_thread_allocatedp_get(tsd) += usize; 2404d0e79aa3SJason Evans *tsd_thread_deallocatedp_get(tsd) += old_usize; 2405f921d10fSJason Evans } 2406f921d10fSJason Evans UTRACE(ptr, size, p); 24071f0a49e8SJason Evans JEMALLOC_VALGRIND_REALLOC(true, tsd_tsdn(tsd), p, usize, false, ptr, 24081f0a49e8SJason Evans old_usize, old_rzsize, false, zero); 24091f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2410f921d10fSJason Evans return (p); 2411f921d10fSJason Evans label_oom: 2412d0e79aa3SJason Evans if (config_xmalloc && unlikely(opt_xmalloc)) { 2413f921d10fSJason Evans malloc_write("<jemalloc>: Error in rallocx(): out of memory\n"); 2414f921d10fSJason Evans abort(); 2415f921d10fSJason Evans } 2416f921d10fSJason Evans UTRACE(ptr, size, 0); 24171f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2418f921d10fSJason Evans return (NULL); 2419f921d10fSJason Evans } 2420f921d10fSJason Evans 2421f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C size_t 24221f0a49e8SJason Evans ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, 2423df0d881dSJason Evans size_t extra, size_t alignment, bool zero) 2424f921d10fSJason Evans { 2425f921d10fSJason Evans size_t usize; 2426f921d10fSJason Evans 24271f0a49e8SJason Evans if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) 2428f921d10fSJason Evans return (old_usize); 24291f0a49e8SJason Evans usize = isalloc(tsdn, ptr, config_prof); 2430f921d10fSJason Evans 2431f921d10fSJason Evans return (usize); 2432f921d10fSJason Evans } 2433f921d10fSJason Evans 2434f921d10fSJason Evans static size_t 24351f0a49e8SJason Evans ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, 2436df0d881dSJason Evans size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) 2437f921d10fSJason Evans { 2438f921d10fSJason Evans size_t usize; 2439f921d10fSJason Evans 2440d0e79aa3SJason Evans if (tctx == NULL) 2441f921d10fSJason Evans return (old_usize); 24421f0a49e8SJason Evans usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment, 2443df0d881dSJason Evans zero); 2444f921d10fSJason Evans 2445f921d10fSJason Evans return (usize); 2446f921d10fSJason Evans } 2447f921d10fSJason Evans 2448f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C size_t 2449d0e79aa3SJason Evans ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, 2450d0e79aa3SJason Evans size_t extra, size_t alignment, bool zero) 2451f921d10fSJason Evans { 2452536b3538SJason Evans size_t usize_max, usize; 2453536b3538SJason Evans bool prof_active; 2454d0e79aa3SJason Evans prof_tctx_t *old_tctx, *tctx; 2455f921d10fSJason Evans 2456536b3538SJason Evans prof_active = prof_active_get_unlocked(); 24571f0a49e8SJason Evans old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr); 2458d0e79aa3SJason Evans /* 2459d0e79aa3SJason Evans * usize isn't knowable before ixalloc() returns when extra is non-zero. 2460d0e79aa3SJason Evans * Therefore, compute its maximum possible value and use that in 2461d0e79aa3SJason Evans * prof_alloc_prep() to decide whether to capture a backtrace. 2462d0e79aa3SJason Evans * prof_realloc() will use the actual usize to decide whether to sample. 2463d0e79aa3SJason Evans */ 2464df0d881dSJason Evans if (alignment == 0) { 2465df0d881dSJason Evans usize_max = s2u(size+extra); 2466df0d881dSJason Evans assert(usize_max > 0 && usize_max <= HUGE_MAXCLASS); 2467df0d881dSJason Evans } else { 2468df0d881dSJason Evans usize_max = sa2u(size+extra, alignment); 2469df0d881dSJason Evans if (unlikely(usize_max == 0 || usize_max > HUGE_MAXCLASS)) { 2470df0d881dSJason Evans /* 2471df0d881dSJason Evans * usize_max is out of range, and chances are that 2472df0d881dSJason Evans * allocation will fail, but use the maximum possible 2473df0d881dSJason Evans * value and carry on with prof_alloc_prep(), just in 2474df0d881dSJason Evans * case allocation succeeds. 2475df0d881dSJason Evans */ 2476df0d881dSJason Evans usize_max = HUGE_MAXCLASS; 2477df0d881dSJason Evans } 2478df0d881dSJason Evans } 2479536b3538SJason Evans tctx = prof_alloc_prep(tsd, usize_max, prof_active, false); 2480df0d881dSJason Evans 2481d0e79aa3SJason Evans if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { 24821f0a49e8SJason Evans usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize, 24831f0a49e8SJason Evans size, extra, alignment, zero, tctx); 2484f921d10fSJason Evans } else { 24851f0a49e8SJason Evans usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, 24861f0a49e8SJason Evans extra, alignment, zero); 2487f921d10fSJason Evans } 2488536b3538SJason Evans if (usize == old_usize) { 2489d0e79aa3SJason Evans prof_alloc_rollback(tsd, tctx, false); 2490f921d10fSJason Evans return (usize); 2491d0e79aa3SJason Evans } 2492536b3538SJason Evans prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize, 2493536b3538SJason Evans old_tctx); 2494f921d10fSJason Evans 2495f921d10fSJason Evans return (usize); 2496f921d10fSJason Evans } 2497f921d10fSJason Evans 2498d0e79aa3SJason Evans JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2499f921d10fSJason Evans je_xallocx(void *ptr, size_t size, size_t extra, int flags) 2500f921d10fSJason Evans { 2501d0e79aa3SJason Evans tsd_t *tsd; 2502f921d10fSJason Evans size_t usize, old_usize; 2503f921d10fSJason Evans UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 2504d0e79aa3SJason Evans size_t alignment = MALLOCX_ALIGN_GET(flags); 2505f921d10fSJason Evans bool zero = flags & MALLOCX_ZERO; 2506f921d10fSJason Evans 2507f921d10fSJason Evans assert(ptr != NULL); 2508f921d10fSJason Evans assert(size != 0); 2509f921d10fSJason Evans assert(SIZE_T_MAX - size >= extra); 2510d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2511f921d10fSJason Evans malloc_thread_init(); 2512d0e79aa3SJason Evans tsd = tsd_fetch(); 25131f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2514f921d10fSJason Evans 25151f0a49e8SJason Evans old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); 2516536b3538SJason Evans 2517df0d881dSJason Evans /* 2518df0d881dSJason Evans * The API explicitly absolves itself of protecting against (size + 2519df0d881dSJason Evans * extra) numerical overflow, but we may need to clamp extra to avoid 2520df0d881dSJason Evans * exceeding HUGE_MAXCLASS. 2521df0d881dSJason Evans * 2522df0d881dSJason Evans * Ordinarily, size limit checking is handled deeper down, but here we 2523df0d881dSJason Evans * have to check as part of (size + extra) clamping, since we need the 2524df0d881dSJason Evans * clamped value in the above helper functions. 2525df0d881dSJason Evans */ 2526536b3538SJason Evans if (unlikely(size > HUGE_MAXCLASS)) { 2527536b3538SJason Evans usize = old_usize; 2528536b3538SJason Evans goto label_not_resized; 2529536b3538SJason Evans } 2530df0d881dSJason Evans if (unlikely(HUGE_MAXCLASS - size < extra)) 2531536b3538SJason Evans extra = HUGE_MAXCLASS - size; 2532536b3538SJason Evans 2533d0e79aa3SJason Evans if (config_valgrind && unlikely(in_valgrind)) 2534f921d10fSJason Evans old_rzsize = u2rz(old_usize); 2535f921d10fSJason Evans 2536f921d10fSJason Evans if (config_prof && opt_prof) { 2537d0e79aa3SJason Evans usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, 2538d0e79aa3SJason Evans alignment, zero); 2539f921d10fSJason Evans } else { 25401f0a49e8SJason Evans usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, 25411f0a49e8SJason Evans extra, alignment, zero); 2542f921d10fSJason Evans } 2543d0e79aa3SJason Evans if (unlikely(usize == old_usize)) 2544f921d10fSJason Evans goto label_not_resized; 2545f921d10fSJason Evans 2546f921d10fSJason Evans if (config_stats) { 2547d0e79aa3SJason Evans *tsd_thread_allocatedp_get(tsd) += usize; 2548d0e79aa3SJason Evans *tsd_thread_deallocatedp_get(tsd) += old_usize; 2549f921d10fSJason Evans } 25501f0a49e8SJason Evans JEMALLOC_VALGRIND_REALLOC(false, tsd_tsdn(tsd), ptr, usize, false, ptr, 25511f0a49e8SJason Evans old_usize, old_rzsize, false, zero); 2552f921d10fSJason Evans label_not_resized: 2553f921d10fSJason Evans UTRACE(ptr, size, ptr); 25541f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2555f921d10fSJason Evans return (usize); 2556f921d10fSJason Evans } 2557f921d10fSJason Evans 2558d0e79aa3SJason Evans JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2559d0e79aa3SJason Evans JEMALLOC_ATTR(pure) 2560f921d10fSJason Evans je_sallocx(const void *ptr, int flags) 2561f921d10fSJason Evans { 2562f921d10fSJason Evans size_t usize; 25631f0a49e8SJason Evans tsdn_t *tsdn; 2564a4bd5210SJason Evans 2565d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2566f8ca2db1SJason Evans malloc_thread_init(); 2567a4bd5210SJason Evans 25681f0a49e8SJason Evans tsdn = tsdn_fetch(); 25691f0a49e8SJason Evans witness_assert_lockless(tsdn); 2570a4bd5210SJason Evans 25711f0a49e8SJason Evans if (config_ivsalloc) 25721f0a49e8SJason Evans usize = ivsalloc(tsdn, ptr, config_prof); 25731f0a49e8SJason Evans else 25741f0a49e8SJason Evans usize = isalloc(tsdn, ptr, config_prof); 25751f0a49e8SJason Evans 25761f0a49e8SJason Evans witness_assert_lockless(tsdn); 2577f921d10fSJason Evans return (usize); 2578a4bd5210SJason Evans } 2579a4bd5210SJason Evans 2580d0e79aa3SJason Evans JEMALLOC_EXPORT void JEMALLOC_NOTHROW 2581f921d10fSJason Evans je_dallocx(void *ptr, int flags) 2582a4bd5210SJason Evans { 2583d0e79aa3SJason Evans tsd_t *tsd; 2584d0e79aa3SJason Evans tcache_t *tcache; 2585a4bd5210SJason Evans 2586f921d10fSJason Evans assert(ptr != NULL); 2587d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2588f921d10fSJason Evans 2589d0e79aa3SJason Evans tsd = tsd_fetch(); 25901f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2591d0e79aa3SJason Evans if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { 2592d0e79aa3SJason Evans if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) 2593d0e79aa3SJason Evans tcache = NULL; 2594d0e79aa3SJason Evans else 2595d0e79aa3SJason Evans tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2596f921d10fSJason Evans } else 2597d0e79aa3SJason Evans tcache = tcache_get(tsd, false); 2598f921d10fSJason Evans 2599f921d10fSJason Evans UTRACE(ptr, 0, 0); 26001f0a49e8SJason Evans if (likely(!malloc_slow)) 26011f0a49e8SJason Evans ifree(tsd, ptr, tcache, false); 26021f0a49e8SJason Evans else 26031f0a49e8SJason Evans ifree(tsd, ptr, tcache, true); 26041f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2605f921d10fSJason Evans } 2606f921d10fSJason Evans 2607d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C size_t 26081f0a49e8SJason Evans inallocx(tsdn_t *tsdn, size_t size, int flags) 2609f921d10fSJason Evans { 2610f921d10fSJason Evans size_t usize; 2611f921d10fSJason Evans 26121f0a49e8SJason Evans witness_assert_lockless(tsdn); 26131f0a49e8SJason Evans 2614d0e79aa3SJason Evans if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) 2615d0e79aa3SJason Evans usize = s2u(size); 2616d0e79aa3SJason Evans else 2617d0e79aa3SJason Evans usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); 26181f0a49e8SJason Evans witness_assert_lockless(tsdn); 2619f921d10fSJason Evans return (usize); 2620a4bd5210SJason Evans } 2621a4bd5210SJason Evans 2622d0e79aa3SJason Evans JEMALLOC_EXPORT void JEMALLOC_NOTHROW 2623d0e79aa3SJason Evans je_sdallocx(void *ptr, size_t size, int flags) 2624d0e79aa3SJason Evans { 2625d0e79aa3SJason Evans tsd_t *tsd; 2626d0e79aa3SJason Evans tcache_t *tcache; 2627d0e79aa3SJason Evans size_t usize; 2628d0e79aa3SJason Evans 2629d0e79aa3SJason Evans assert(ptr != NULL); 2630d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2631d0e79aa3SJason Evans tsd = tsd_fetch(); 26321f0a49e8SJason Evans usize = inallocx(tsd_tsdn(tsd), size, flags); 26331f0a49e8SJason Evans assert(usize == isalloc(tsd_tsdn(tsd), ptr, config_prof)); 26341f0a49e8SJason Evans 26351f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2636d0e79aa3SJason Evans if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { 2637d0e79aa3SJason Evans if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) 2638d0e79aa3SJason Evans tcache = NULL; 2639d0e79aa3SJason Evans else 2640d0e79aa3SJason Evans tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2641d0e79aa3SJason Evans } else 2642d0e79aa3SJason Evans tcache = tcache_get(tsd, false); 2643d0e79aa3SJason Evans 2644d0e79aa3SJason Evans UTRACE(ptr, 0, 0); 26451f0a49e8SJason Evans if (likely(!malloc_slow)) 26461f0a49e8SJason Evans isfree(tsd, ptr, usize, tcache, false); 26471f0a49e8SJason Evans else 26481f0a49e8SJason Evans isfree(tsd, ptr, usize, tcache, true); 26491f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2650d0e79aa3SJason Evans } 2651d0e79aa3SJason Evans 2652d0e79aa3SJason Evans JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2653d0e79aa3SJason Evans JEMALLOC_ATTR(pure) 2654d0e79aa3SJason Evans je_nallocx(size_t size, int flags) 2655d0e79aa3SJason Evans { 2656df0d881dSJason Evans size_t usize; 26571f0a49e8SJason Evans tsdn_t *tsdn; 2658d0e79aa3SJason Evans 2659d0e79aa3SJason Evans assert(size != 0); 2660d0e79aa3SJason Evans 2661d0e79aa3SJason Evans if (unlikely(malloc_init())) 2662d0e79aa3SJason Evans return (0); 2663d0e79aa3SJason Evans 26641f0a49e8SJason Evans tsdn = tsdn_fetch(); 26651f0a49e8SJason Evans witness_assert_lockless(tsdn); 26661f0a49e8SJason Evans 26671f0a49e8SJason Evans usize = inallocx(tsdn, size, flags); 2668df0d881dSJason Evans if (unlikely(usize > HUGE_MAXCLASS)) 2669df0d881dSJason Evans return (0); 2670df0d881dSJason Evans 26711f0a49e8SJason Evans witness_assert_lockless(tsdn); 2672df0d881dSJason Evans return (usize); 2673d0e79aa3SJason Evans } 2674d0e79aa3SJason Evans 2675d0e79aa3SJason Evans JEMALLOC_EXPORT int JEMALLOC_NOTHROW 2676a4bd5210SJason Evans je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, 2677a4bd5210SJason Evans size_t newlen) 2678a4bd5210SJason Evans { 26791f0a49e8SJason Evans int ret; 26801f0a49e8SJason Evans tsd_t *tsd; 2681a4bd5210SJason Evans 2682d0e79aa3SJason Evans if (unlikely(malloc_init())) 2683a4bd5210SJason Evans return (EAGAIN); 2684a4bd5210SJason Evans 26851f0a49e8SJason Evans tsd = tsd_fetch(); 26861f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 26871f0a49e8SJason Evans ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen); 26881f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 26891f0a49e8SJason Evans return (ret); 2690a4bd5210SJason Evans } 2691a4bd5210SJason Evans 2692d0e79aa3SJason Evans JEMALLOC_EXPORT int JEMALLOC_NOTHROW 2693a4bd5210SJason Evans je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) 2694a4bd5210SJason Evans { 26951f0a49e8SJason Evans int ret; 26961f0a49e8SJason Evans tsdn_t *tsdn; 2697a4bd5210SJason Evans 2698d0e79aa3SJason Evans if (unlikely(malloc_init())) 2699a4bd5210SJason Evans return (EAGAIN); 2700a4bd5210SJason Evans 27011f0a49e8SJason Evans tsdn = tsdn_fetch(); 27021f0a49e8SJason Evans witness_assert_lockless(tsdn); 27031f0a49e8SJason Evans ret = ctl_nametomib(tsdn, name, mibp, miblenp); 27041f0a49e8SJason Evans witness_assert_lockless(tsdn); 27051f0a49e8SJason Evans return (ret); 2706a4bd5210SJason Evans } 2707a4bd5210SJason Evans 2708d0e79aa3SJason Evans JEMALLOC_EXPORT int JEMALLOC_NOTHROW 2709a4bd5210SJason Evans je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, 2710a4bd5210SJason Evans void *newp, size_t newlen) 2711a4bd5210SJason Evans { 27121f0a49e8SJason Evans int ret; 27131f0a49e8SJason Evans tsd_t *tsd; 2714a4bd5210SJason Evans 2715d0e79aa3SJason Evans if (unlikely(malloc_init())) 2716a4bd5210SJason Evans return (EAGAIN); 2717a4bd5210SJason Evans 27181f0a49e8SJason Evans tsd = tsd_fetch(); 27191f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 27201f0a49e8SJason Evans ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen); 27211f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 27221f0a49e8SJason Evans return (ret); 2723a4bd5210SJason Evans } 2724a4bd5210SJason Evans 2725d0e79aa3SJason Evans JEMALLOC_EXPORT void JEMALLOC_NOTHROW 2726f921d10fSJason Evans je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, 2727f921d10fSJason Evans const char *opts) 2728f921d10fSJason Evans { 27291f0a49e8SJason Evans tsdn_t *tsdn; 2730f921d10fSJason Evans 27311f0a49e8SJason Evans tsdn = tsdn_fetch(); 27321f0a49e8SJason Evans witness_assert_lockless(tsdn); 2733f921d10fSJason Evans stats_print(write_cb, cbopaque, opts); 27341f0a49e8SJason Evans witness_assert_lockless(tsdn); 2735f921d10fSJason Evans } 2736f921d10fSJason Evans 2737d0e79aa3SJason Evans JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2738f921d10fSJason Evans je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) 2739f921d10fSJason Evans { 2740f921d10fSJason Evans size_t ret; 27411f0a49e8SJason Evans tsdn_t *tsdn; 2742f921d10fSJason Evans 2743d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2744f921d10fSJason Evans malloc_thread_init(); 2745f921d10fSJason Evans 27461f0a49e8SJason Evans tsdn = tsdn_fetch(); 27471f0a49e8SJason Evans witness_assert_lockless(tsdn); 2748f921d10fSJason Evans 27491f0a49e8SJason Evans if (config_ivsalloc) 27501f0a49e8SJason Evans ret = ivsalloc(tsdn, ptr, config_prof); 27511f0a49e8SJason Evans else 27521f0a49e8SJason Evans ret = (ptr == NULL) ? 0 : isalloc(tsdn, ptr, config_prof); 27531f0a49e8SJason Evans 27541f0a49e8SJason Evans witness_assert_lockless(tsdn); 2755f921d10fSJason Evans return (ret); 2756f921d10fSJason Evans } 2757f921d10fSJason Evans 2758a4bd5210SJason Evans /* 2759a4bd5210SJason Evans * End non-standard functions. 2760a4bd5210SJason Evans */ 2761a4bd5210SJason Evans /******************************************************************************/ 2762a4bd5210SJason Evans /* 2763d0e79aa3SJason Evans * Begin compatibility functions. 2764a4bd5210SJason Evans */ 2765d0e79aa3SJason Evans 2766d0e79aa3SJason Evans #define ALLOCM_LG_ALIGN(la) (la) 2767d0e79aa3SJason Evans #define ALLOCM_ALIGN(a) (ffsl(a)-1) 2768d0e79aa3SJason Evans #define ALLOCM_ZERO ((int)0x40) 2769d0e79aa3SJason Evans #define ALLOCM_NO_MOVE ((int)0x80) 2770d0e79aa3SJason Evans 2771d0e79aa3SJason Evans #define ALLOCM_SUCCESS 0 2772d0e79aa3SJason Evans #define ALLOCM_ERR_OOM 1 2773d0e79aa3SJason Evans #define ALLOCM_ERR_NOT_MOVED 2 2774a4bd5210SJason Evans 2775a4bd5210SJason Evans int 2776a4bd5210SJason Evans je_allocm(void **ptr, size_t *rsize, size_t size, int flags) 2777a4bd5210SJason Evans { 2778a4bd5210SJason Evans void *p; 2779a4bd5210SJason Evans 2780a4bd5210SJason Evans assert(ptr != NULL); 2781a4bd5210SJason Evans 2782f921d10fSJason Evans p = je_mallocx(size, flags); 2783a4bd5210SJason Evans if (p == NULL) 2784a4bd5210SJason Evans return (ALLOCM_ERR_OOM); 2785f921d10fSJason Evans if (rsize != NULL) 27861f0a49e8SJason Evans *rsize = isalloc(tsdn_fetch(), p, config_prof); 2787f921d10fSJason Evans *ptr = p; 2788f921d10fSJason Evans return (ALLOCM_SUCCESS); 2789a4bd5210SJason Evans } 2790a4bd5210SJason Evans 2791a4bd5210SJason Evans int 2792a4bd5210SJason Evans je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) 2793a4bd5210SJason Evans { 2794f921d10fSJason Evans int ret; 2795a4bd5210SJason Evans bool no_move = flags & ALLOCM_NO_MOVE; 2796a4bd5210SJason Evans 2797a4bd5210SJason Evans assert(ptr != NULL); 2798a4bd5210SJason Evans assert(*ptr != NULL); 2799a4bd5210SJason Evans assert(size != 0); 2800a4bd5210SJason Evans assert(SIZE_T_MAX - size >= extra); 2801a4bd5210SJason Evans 2802f921d10fSJason Evans if (no_move) { 2803f921d10fSJason Evans size_t usize = je_xallocx(*ptr, size, extra, flags); 2804f921d10fSJason Evans ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED; 2805a4bd5210SJason Evans if (rsize != NULL) 2806a4bd5210SJason Evans *rsize = usize; 2807a4bd5210SJason Evans } else { 2808f921d10fSJason Evans void *p = je_rallocx(*ptr, size+extra, flags); 2809f921d10fSJason Evans if (p != NULL) { 2810f921d10fSJason Evans *ptr = p; 2811f921d10fSJason Evans ret = ALLOCM_SUCCESS; 2812f921d10fSJason Evans } else 2813f921d10fSJason Evans ret = ALLOCM_ERR_OOM; 2814f921d10fSJason Evans if (rsize != NULL) 28151f0a49e8SJason Evans *rsize = isalloc(tsdn_fetch(), *ptr, config_prof); 2816a4bd5210SJason Evans } 2817f921d10fSJason Evans return (ret); 2818a4bd5210SJason Evans } 2819a4bd5210SJason Evans 2820a4bd5210SJason Evans int 2821a4bd5210SJason Evans je_sallocm(const void *ptr, size_t *rsize, int flags) 2822a4bd5210SJason Evans { 2823a4bd5210SJason Evans 2824a4bd5210SJason Evans assert(rsize != NULL); 2825f921d10fSJason Evans *rsize = je_sallocx(ptr, flags); 2826a4bd5210SJason Evans return (ALLOCM_SUCCESS); 2827a4bd5210SJason Evans } 2828a4bd5210SJason Evans 2829a4bd5210SJason Evans int 2830a4bd5210SJason Evans je_dallocm(void *ptr, int flags) 2831a4bd5210SJason Evans { 2832a4bd5210SJason Evans 2833f921d10fSJason Evans je_dallocx(ptr, flags); 2834a4bd5210SJason Evans return (ALLOCM_SUCCESS); 2835a4bd5210SJason Evans } 2836a4bd5210SJason Evans 2837a4bd5210SJason Evans int 2838a4bd5210SJason Evans je_nallocm(size_t *rsize, size_t size, int flags) 2839a4bd5210SJason Evans { 2840a4bd5210SJason Evans size_t usize; 2841a4bd5210SJason Evans 2842f921d10fSJason Evans usize = je_nallocx(size, flags); 2843a4bd5210SJason Evans if (usize == 0) 2844a4bd5210SJason Evans return (ALLOCM_ERR_OOM); 2845a4bd5210SJason Evans if (rsize != NULL) 2846a4bd5210SJason Evans *rsize = usize; 2847a4bd5210SJason Evans return (ALLOCM_SUCCESS); 2848a4bd5210SJason Evans } 2849a4bd5210SJason Evans 2850d0e79aa3SJason Evans #undef ALLOCM_LG_ALIGN 2851d0e79aa3SJason Evans #undef ALLOCM_ALIGN 2852d0e79aa3SJason Evans #undef ALLOCM_ZERO 2853d0e79aa3SJason Evans #undef ALLOCM_NO_MOVE 2854d0e79aa3SJason Evans 2855d0e79aa3SJason Evans #undef ALLOCM_SUCCESS 2856d0e79aa3SJason Evans #undef ALLOCM_ERR_OOM 2857d0e79aa3SJason Evans #undef ALLOCM_ERR_NOT_MOVED 2858d0e79aa3SJason Evans 2859a4bd5210SJason Evans /* 2860d0e79aa3SJason Evans * End compatibility functions. 2861a4bd5210SJason Evans */ 2862a4bd5210SJason Evans /******************************************************************************/ 2863a4bd5210SJason Evans /* 2864a4bd5210SJason Evans * The following functions are used by threading libraries for protection of 2865a4bd5210SJason Evans * malloc during fork(). 2866a4bd5210SJason Evans */ 2867a4bd5210SJason Evans 286882872ac0SJason Evans /* 286982872ac0SJason Evans * If an application creates a thread before doing any allocation in the main 287082872ac0SJason Evans * thread, then calls fork(2) in the main thread followed by memory allocation 287182872ac0SJason Evans * in the child process, a race can occur that results in deadlock within the 287282872ac0SJason Evans * child: the main thread may have forked while the created thread had 287382872ac0SJason Evans * partially initialized the allocator. Ordinarily jemalloc prevents 287482872ac0SJason Evans * fork/malloc races via the following functions it registers during 287582872ac0SJason Evans * initialization using pthread_atfork(), but of course that does no good if 287682872ac0SJason Evans * the allocator isn't fully initialized at fork time. The following library 2877d0e79aa3SJason Evans * constructor is a partial solution to this problem. It may still be possible 2878d0e79aa3SJason Evans * to trigger the deadlock described above, but doing so would involve forking 2879d0e79aa3SJason Evans * via a library constructor that runs before jemalloc's runs. 288082872ac0SJason Evans */ 28811f0a49e8SJason Evans #ifndef JEMALLOC_JET 288282872ac0SJason Evans JEMALLOC_ATTR(constructor) 288382872ac0SJason Evans static void 288482872ac0SJason Evans jemalloc_constructor(void) 288582872ac0SJason Evans { 288682872ac0SJason Evans 288782872ac0SJason Evans malloc_init(); 288882872ac0SJason Evans } 28891f0a49e8SJason Evans #endif 289082872ac0SJason Evans 2891a4bd5210SJason Evans #ifndef JEMALLOC_MUTEX_INIT_CB 2892a4bd5210SJason Evans void 2893a4bd5210SJason Evans jemalloc_prefork(void) 2894a4bd5210SJason Evans #else 2895e722f8f8SJason Evans JEMALLOC_EXPORT void 2896a4bd5210SJason Evans _malloc_prefork(void) 2897a4bd5210SJason Evans #endif 2898a4bd5210SJason Evans { 28991f0a49e8SJason Evans tsd_t *tsd; 29001f0a49e8SJason Evans unsigned i, j, narenas; 29011f0a49e8SJason Evans arena_t *arena; 2902a4bd5210SJason Evans 290335dad073SJason Evans #ifdef JEMALLOC_MUTEX_INIT_CB 2904d0e79aa3SJason Evans if (!malloc_initialized()) 290535dad073SJason Evans return; 290635dad073SJason Evans #endif 2907d0e79aa3SJason Evans assert(malloc_initialized()); 290835dad073SJason Evans 29091f0a49e8SJason Evans tsd = tsd_fetch(); 2910df0d881dSJason Evans 29111f0a49e8SJason Evans narenas = narenas_total_get(); 29121f0a49e8SJason Evans 29131f0a49e8SJason Evans witness_prefork(tsd); 29141f0a49e8SJason Evans /* Acquire all mutexes in a safe order. */ 29151f0a49e8SJason Evans ctl_prefork(tsd_tsdn(tsd)); 29161f0a49e8SJason Evans malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock); 29171f0a49e8SJason Evans prof_prefork0(tsd_tsdn(tsd)); 29181f0a49e8SJason Evans for (i = 0; i < 3; i++) { 29191f0a49e8SJason Evans for (j = 0; j < narenas; j++) { 29201f0a49e8SJason Evans if ((arena = arena_get(tsd_tsdn(tsd), j, false)) != 29211f0a49e8SJason Evans NULL) { 29221f0a49e8SJason Evans switch (i) { 29231f0a49e8SJason Evans case 0: 29241f0a49e8SJason Evans arena_prefork0(tsd_tsdn(tsd), arena); 29251f0a49e8SJason Evans break; 29261f0a49e8SJason Evans case 1: 29271f0a49e8SJason Evans arena_prefork1(tsd_tsdn(tsd), arena); 29281f0a49e8SJason Evans break; 29291f0a49e8SJason Evans case 2: 29301f0a49e8SJason Evans arena_prefork2(tsd_tsdn(tsd), arena); 29311f0a49e8SJason Evans break; 29321f0a49e8SJason Evans default: not_reached(); 2933a4bd5210SJason Evans } 29341f0a49e8SJason Evans } 29351f0a49e8SJason Evans } 29361f0a49e8SJason Evans } 29371f0a49e8SJason Evans base_prefork(tsd_tsdn(tsd)); 29381f0a49e8SJason Evans for (i = 0; i < narenas; i++) { 29391f0a49e8SJason Evans if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) 29401f0a49e8SJason Evans arena_prefork3(tsd_tsdn(tsd), arena); 29411f0a49e8SJason Evans } 29421f0a49e8SJason Evans prof_prefork1(tsd_tsdn(tsd)); 2943a4bd5210SJason Evans } 2944a4bd5210SJason Evans 2945a4bd5210SJason Evans #ifndef JEMALLOC_MUTEX_INIT_CB 2946a4bd5210SJason Evans void 2947a4bd5210SJason Evans jemalloc_postfork_parent(void) 2948a4bd5210SJason Evans #else 2949e722f8f8SJason Evans JEMALLOC_EXPORT void 2950a4bd5210SJason Evans _malloc_postfork(void) 2951a4bd5210SJason Evans #endif 2952a4bd5210SJason Evans { 29531f0a49e8SJason Evans tsd_t *tsd; 2954df0d881dSJason Evans unsigned i, narenas; 2955a4bd5210SJason Evans 295635dad073SJason Evans #ifdef JEMALLOC_MUTEX_INIT_CB 2957d0e79aa3SJason Evans if (!malloc_initialized()) 295835dad073SJason Evans return; 295935dad073SJason Evans #endif 2960d0e79aa3SJason Evans assert(malloc_initialized()); 296135dad073SJason Evans 29621f0a49e8SJason Evans tsd = tsd_fetch(); 29631f0a49e8SJason Evans 29641f0a49e8SJason Evans witness_postfork_parent(tsd); 2965a4bd5210SJason Evans /* Release all mutexes, now that fork() has completed. */ 29661f0a49e8SJason Evans base_postfork_parent(tsd_tsdn(tsd)); 2967df0d881dSJason Evans for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 2968df0d881dSJason Evans arena_t *arena; 2969df0d881dSJason Evans 29701f0a49e8SJason Evans if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) 29711f0a49e8SJason Evans arena_postfork_parent(tsd_tsdn(tsd), arena); 2972a4bd5210SJason Evans } 29731f0a49e8SJason Evans prof_postfork_parent(tsd_tsdn(tsd)); 29741f0a49e8SJason Evans malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock); 29751f0a49e8SJason Evans ctl_postfork_parent(tsd_tsdn(tsd)); 2976a4bd5210SJason Evans } 2977a4bd5210SJason Evans 2978a4bd5210SJason Evans void 2979a4bd5210SJason Evans jemalloc_postfork_child(void) 2980a4bd5210SJason Evans { 29811f0a49e8SJason Evans tsd_t *tsd; 2982df0d881dSJason Evans unsigned i, narenas; 2983a4bd5210SJason Evans 2984d0e79aa3SJason Evans assert(malloc_initialized()); 298535dad073SJason Evans 29861f0a49e8SJason Evans tsd = tsd_fetch(); 29871f0a49e8SJason Evans 29881f0a49e8SJason Evans witness_postfork_child(tsd); 2989a4bd5210SJason Evans /* Release all mutexes, now that fork() has completed. */ 29901f0a49e8SJason Evans base_postfork_child(tsd_tsdn(tsd)); 2991df0d881dSJason Evans for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 2992df0d881dSJason Evans arena_t *arena; 2993df0d881dSJason Evans 29941f0a49e8SJason Evans if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) 29951f0a49e8SJason Evans arena_postfork_child(tsd_tsdn(tsd), arena); 2996a4bd5210SJason Evans } 29971f0a49e8SJason Evans prof_postfork_child(tsd_tsdn(tsd)); 29981f0a49e8SJason Evans malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock); 29991f0a49e8SJason Evans ctl_postfork_child(tsd_tsdn(tsd)); 3000a4bd5210SJason Evans } 3001a4bd5210SJason Evans 30028495e8b1SKonstantin Belousov void 30038495e8b1SKonstantin Belousov _malloc_first_thread(void) 30048495e8b1SKonstantin Belousov { 30058495e8b1SKonstantin Belousov 30068495e8b1SKonstantin Belousov (void)malloc_mutex_first_thread(); 30078495e8b1SKonstantin Belousov } 30088495e8b1SKonstantin Belousov 3009a4bd5210SJason Evans /******************************************************************************/ 3010