1a4bd5210SJason Evans #define JEMALLOC_C_ 2a4bd5210SJason Evans #include "jemalloc/internal/jemalloc_internal.h" 3a4bd5210SJason Evans 4a4bd5210SJason Evans /******************************************************************************/ 5a4bd5210SJason Evans /* Data. */ 6a4bd5210SJason Evans 74fdb8d2aSDimitry Andric /* Work around <http://llvm.org/bugs/show_bug.cgi?id=12623>: */ 84fdb8d2aSDimitry Andric const char *__malloc_options_1_0 = NULL; 9a4bd5210SJason Evans __sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0); 10a4bd5210SJason Evans 11a4bd5210SJason Evans /* Runtime configuration options. */ 12bde95144SJason Evans const char *je_malloc_conf 13bde95144SJason Evans #ifndef _WIN32 14bde95144SJason Evans JEMALLOC_ATTR(weak) 15bde95144SJason Evans #endif 16bde95144SJason Evans ; 1788ad2f8dSJason Evans bool opt_abort = 18a4bd5210SJason Evans #ifdef JEMALLOC_DEBUG 1988ad2f8dSJason Evans true 20a4bd5210SJason Evans #else 2188ad2f8dSJason Evans false 22a4bd5210SJason Evans #endif 2388ad2f8dSJason Evans ; 24d0e79aa3SJason Evans const char *opt_junk = 25d0e79aa3SJason Evans #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 26d0e79aa3SJason Evans "true" 27d0e79aa3SJason Evans #else 28d0e79aa3SJason Evans "false" 29d0e79aa3SJason Evans #endif 30d0e79aa3SJason Evans ; 31d0e79aa3SJason Evans bool opt_junk_alloc = 3288ad2f8dSJason Evans #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 3388ad2f8dSJason Evans true 34a4bd5210SJason Evans #else 3588ad2f8dSJason Evans false 36a4bd5210SJason Evans #endif 3788ad2f8dSJason Evans ; 38d0e79aa3SJason Evans bool opt_junk_free = 39d0e79aa3SJason Evans #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 40d0e79aa3SJason Evans true 41d0e79aa3SJason Evans #else 42d0e79aa3SJason Evans false 43d0e79aa3SJason Evans #endif 44d0e79aa3SJason Evans ; 45d0e79aa3SJason Evans 46a4bd5210SJason Evans size_t opt_quarantine = ZU(0); 47a4bd5210SJason Evans bool opt_redzone = false; 48a4bd5210SJason Evans bool opt_utrace = false; 49a4bd5210SJason Evans bool opt_xmalloc = false; 50a4bd5210SJason Evans bool opt_zero = false; 51df0d881dSJason Evans unsigned opt_narenas = 0; 52a4bd5210SJason Evans 53d0e79aa3SJason Evans /* Initialized to true if the process is running inside Valgrind. */ 54d0e79aa3SJason Evans bool in_valgrind; 55d0e79aa3SJason Evans 56a4bd5210SJason Evans unsigned ncpus; 57a4bd5210SJason Evans 58df0d881dSJason Evans /* Protects arenas initialization. */ 59d0e79aa3SJason Evans static malloc_mutex_t arenas_lock; 60d0e79aa3SJason Evans /* 61d0e79aa3SJason Evans * Arenas that are used to service external requests. Not all elements of the 62d0e79aa3SJason Evans * arenas array are necessarily used; arenas are created lazily as needed. 63d0e79aa3SJason Evans * 64d0e79aa3SJason Evans * arenas[0..narenas_auto) are used for automatic multiplexing of threads and 65d0e79aa3SJason Evans * arenas. arenas[narenas_auto..narenas_total) are only used if the application 66d0e79aa3SJason Evans * takes some action to create them and allocate from them. 67d0e79aa3SJason Evans */ 68df0d881dSJason Evans arena_t **arenas; 69df0d881dSJason Evans static unsigned narenas_total; /* Use narenas_total_*(). */ 70d0e79aa3SJason Evans static arena_t *a0; /* arenas[0]; read-only after initialization. */ 711f0a49e8SJason Evans unsigned narenas_auto; /* Read-only after initialization. */ 72a4bd5210SJason Evans 73d0e79aa3SJason Evans typedef enum { 74d0e79aa3SJason Evans malloc_init_uninitialized = 3, 75d0e79aa3SJason Evans malloc_init_a0_initialized = 2, 76d0e79aa3SJason Evans malloc_init_recursible = 1, 77d0e79aa3SJason Evans malloc_init_initialized = 0 /* Common case --> jnz. */ 78d0e79aa3SJason Evans } malloc_init_t; 79d0e79aa3SJason Evans static malloc_init_t malloc_init_state = malloc_init_uninitialized; 80d0e79aa3SJason Evans 811f0a49e8SJason Evans /* False should be the common case. Set to true to trigger initialization. */ 82df0d881dSJason Evans static bool malloc_slow = true; 83df0d881dSJason Evans 841f0a49e8SJason Evans /* When malloc_slow is true, set the corresponding bits for sanity check. */ 85df0d881dSJason Evans enum { 86df0d881dSJason Evans flag_opt_junk_alloc = (1U), 87df0d881dSJason Evans flag_opt_junk_free = (1U << 1), 88df0d881dSJason Evans flag_opt_quarantine = (1U << 2), 89df0d881dSJason Evans flag_opt_zero = (1U << 3), 90df0d881dSJason Evans flag_opt_utrace = (1U << 4), 91df0d881dSJason Evans flag_in_valgrind = (1U << 5), 92df0d881dSJason Evans flag_opt_xmalloc = (1U << 6) 93df0d881dSJason Evans }; 94df0d881dSJason Evans static uint8_t malloc_slow_flags; 95df0d881dSJason Evans 96d0e79aa3SJason Evans JEMALLOC_ALIGNED(CACHELINE) 97bde95144SJason Evans const size_t pind2sz_tab[NPSIZES] = { 98bde95144SJason Evans #define PSZ_yes(lg_grp, ndelta, lg_delta) \ 99bde95144SJason Evans (((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))), 100bde95144SJason Evans #define PSZ_no(lg_grp, ndelta, lg_delta) 101bde95144SJason Evans #define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \ 102bde95144SJason Evans PSZ_##psz(lg_grp, ndelta, lg_delta) 103bde95144SJason Evans SIZE_CLASSES 104bde95144SJason Evans #undef PSZ_yes 105bde95144SJason Evans #undef PSZ_no 106bde95144SJason Evans #undef SC 107bde95144SJason Evans }; 108bde95144SJason Evans 109bde95144SJason Evans JEMALLOC_ALIGNED(CACHELINE) 110bde95144SJason Evans const size_t index2size_tab[NSIZES] = { 111bde95144SJason Evans #define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \ 112d0e79aa3SJason Evans ((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)), 113d0e79aa3SJason Evans SIZE_CLASSES 114d0e79aa3SJason Evans #undef SC 115d0e79aa3SJason Evans }; 116d0e79aa3SJason Evans 117d0e79aa3SJason Evans JEMALLOC_ALIGNED(CACHELINE) 118d0e79aa3SJason Evans const uint8_t size2index_tab[] = { 119d0e79aa3SJason Evans #if LG_TINY_MIN == 0 120d0e79aa3SJason Evans #warning "Dangerous LG_TINY_MIN" 121d0e79aa3SJason Evans #define S2B_0(i) i, 122d0e79aa3SJason Evans #elif LG_TINY_MIN == 1 123d0e79aa3SJason Evans #warning "Dangerous LG_TINY_MIN" 124d0e79aa3SJason Evans #define S2B_1(i) i, 125d0e79aa3SJason Evans #elif LG_TINY_MIN == 2 126d0e79aa3SJason Evans #warning "Dangerous LG_TINY_MIN" 127d0e79aa3SJason Evans #define S2B_2(i) i, 128d0e79aa3SJason Evans #elif LG_TINY_MIN == 3 129d0e79aa3SJason Evans #define S2B_3(i) i, 130d0e79aa3SJason Evans #elif LG_TINY_MIN == 4 131d0e79aa3SJason Evans #define S2B_4(i) i, 132d0e79aa3SJason Evans #elif LG_TINY_MIN == 5 133d0e79aa3SJason Evans #define S2B_5(i) i, 134d0e79aa3SJason Evans #elif LG_TINY_MIN == 6 135d0e79aa3SJason Evans #define S2B_6(i) i, 136d0e79aa3SJason Evans #elif LG_TINY_MIN == 7 137d0e79aa3SJason Evans #define S2B_7(i) i, 138d0e79aa3SJason Evans #elif LG_TINY_MIN == 8 139d0e79aa3SJason Evans #define S2B_8(i) i, 140d0e79aa3SJason Evans #elif LG_TINY_MIN == 9 141d0e79aa3SJason Evans #define S2B_9(i) i, 142d0e79aa3SJason Evans #elif LG_TINY_MIN == 10 143d0e79aa3SJason Evans #define S2B_10(i) i, 144d0e79aa3SJason Evans #elif LG_TINY_MIN == 11 145d0e79aa3SJason Evans #define S2B_11(i) i, 146d0e79aa3SJason Evans #else 147d0e79aa3SJason Evans #error "Unsupported LG_TINY_MIN" 148d0e79aa3SJason Evans #endif 149d0e79aa3SJason Evans #if LG_TINY_MIN < 1 150d0e79aa3SJason Evans #define S2B_1(i) S2B_0(i) S2B_0(i) 151d0e79aa3SJason Evans #endif 152d0e79aa3SJason Evans #if LG_TINY_MIN < 2 153d0e79aa3SJason Evans #define S2B_2(i) S2B_1(i) S2B_1(i) 154d0e79aa3SJason Evans #endif 155d0e79aa3SJason Evans #if LG_TINY_MIN < 3 156d0e79aa3SJason Evans #define S2B_3(i) S2B_2(i) S2B_2(i) 157d0e79aa3SJason Evans #endif 158d0e79aa3SJason Evans #if LG_TINY_MIN < 4 159d0e79aa3SJason Evans #define S2B_4(i) S2B_3(i) S2B_3(i) 160d0e79aa3SJason Evans #endif 161d0e79aa3SJason Evans #if LG_TINY_MIN < 5 162d0e79aa3SJason Evans #define S2B_5(i) S2B_4(i) S2B_4(i) 163d0e79aa3SJason Evans #endif 164d0e79aa3SJason Evans #if LG_TINY_MIN < 6 165d0e79aa3SJason Evans #define S2B_6(i) S2B_5(i) S2B_5(i) 166d0e79aa3SJason Evans #endif 167d0e79aa3SJason Evans #if LG_TINY_MIN < 7 168d0e79aa3SJason Evans #define S2B_7(i) S2B_6(i) S2B_6(i) 169d0e79aa3SJason Evans #endif 170d0e79aa3SJason Evans #if LG_TINY_MIN < 8 171d0e79aa3SJason Evans #define S2B_8(i) S2B_7(i) S2B_7(i) 172d0e79aa3SJason Evans #endif 173d0e79aa3SJason Evans #if LG_TINY_MIN < 9 174d0e79aa3SJason Evans #define S2B_9(i) S2B_8(i) S2B_8(i) 175d0e79aa3SJason Evans #endif 176d0e79aa3SJason Evans #if LG_TINY_MIN < 10 177d0e79aa3SJason Evans #define S2B_10(i) S2B_9(i) S2B_9(i) 178d0e79aa3SJason Evans #endif 179d0e79aa3SJason Evans #if LG_TINY_MIN < 11 180d0e79aa3SJason Evans #define S2B_11(i) S2B_10(i) S2B_10(i) 181d0e79aa3SJason Evans #endif 182d0e79aa3SJason Evans #define S2B_no(i) 183bde95144SJason Evans #define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \ 184d0e79aa3SJason Evans S2B_##lg_delta_lookup(index) 185d0e79aa3SJason Evans SIZE_CLASSES 186d0e79aa3SJason Evans #undef S2B_3 187d0e79aa3SJason Evans #undef S2B_4 188d0e79aa3SJason Evans #undef S2B_5 189d0e79aa3SJason Evans #undef S2B_6 190d0e79aa3SJason Evans #undef S2B_7 191d0e79aa3SJason Evans #undef S2B_8 192d0e79aa3SJason Evans #undef S2B_9 193d0e79aa3SJason Evans #undef S2B_10 194d0e79aa3SJason Evans #undef S2B_11 195d0e79aa3SJason Evans #undef S2B_no 196d0e79aa3SJason Evans #undef SC 197d0e79aa3SJason Evans }; 198a4bd5210SJason Evans 199a4bd5210SJason Evans #ifdef JEMALLOC_THREADED_INIT 200a4bd5210SJason Evans /* Used to let the initializing thread recursively allocate. */ 201a4bd5210SJason Evans # define NO_INITIALIZER ((unsigned long)0) 202a4bd5210SJason Evans # define INITIALIZER pthread_self() 203a4bd5210SJason Evans # define IS_INITIALIZER (malloc_initializer == pthread_self()) 204a4bd5210SJason Evans static pthread_t malloc_initializer = NO_INITIALIZER; 205a4bd5210SJason Evans #else 206a4bd5210SJason Evans # define NO_INITIALIZER false 207a4bd5210SJason Evans # define INITIALIZER true 208a4bd5210SJason Evans # define IS_INITIALIZER malloc_initializer 209a4bd5210SJason Evans static bool malloc_initializer = NO_INITIALIZER; 210a4bd5210SJason Evans #endif 211a4bd5210SJason Evans 212a4bd5210SJason Evans /* Used to avoid initialization races. */ 213e722f8f8SJason Evans #ifdef _WIN32 214d0e79aa3SJason Evans #if _WIN32_WINNT >= 0x0600 215d0e79aa3SJason Evans static malloc_mutex_t init_lock = SRWLOCK_INIT; 216d0e79aa3SJason Evans #else 217e722f8f8SJason Evans static malloc_mutex_t init_lock; 218536b3538SJason Evans static bool init_lock_initialized = false; 219e722f8f8SJason Evans 220e722f8f8SJason Evans JEMALLOC_ATTR(constructor) 221e722f8f8SJason Evans static void WINAPI 222e722f8f8SJason Evans _init_init_lock(void) 223e722f8f8SJason Evans { 224e722f8f8SJason Evans 225536b3538SJason Evans /* If another constructor in the same binary is using mallctl to 226536b3538SJason Evans * e.g. setup chunk hooks, it may end up running before this one, 227536b3538SJason Evans * and malloc_init_hard will crash trying to lock the uninitialized 228536b3538SJason Evans * lock. So we force an initialization of the lock in 229536b3538SJason Evans * malloc_init_hard as well. We don't try to care about atomicity 230536b3538SJason Evans * of the accessed to the init_lock_initialized boolean, since it 231536b3538SJason Evans * really only matters early in the process creation, before any 232536b3538SJason Evans * separate thread normally starts doing anything. */ 233536b3538SJason Evans if (!init_lock_initialized) 2341f0a49e8SJason Evans malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT); 235536b3538SJason Evans init_lock_initialized = true; 236e722f8f8SJason Evans } 237e722f8f8SJason Evans 238e722f8f8SJason Evans #ifdef _MSC_VER 239e722f8f8SJason Evans # pragma section(".CRT$XCU", read) 240e722f8f8SJason Evans JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) 241e722f8f8SJason Evans static const void (WINAPI *init_init_lock)(void) = _init_init_lock; 242e722f8f8SJason Evans #endif 243d0e79aa3SJason Evans #endif 244e722f8f8SJason Evans #else 245a4bd5210SJason Evans static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; 246e722f8f8SJason Evans #endif 247a4bd5210SJason Evans 248a4bd5210SJason Evans typedef struct { 249a4bd5210SJason Evans void *p; /* Input pointer (as in realloc(p, s)). */ 250a4bd5210SJason Evans size_t s; /* Request size. */ 251a4bd5210SJason Evans void *r; /* Result pointer. */ 252a4bd5210SJason Evans } malloc_utrace_t; 253a4bd5210SJason Evans 254a4bd5210SJason Evans #ifdef JEMALLOC_UTRACE 255a4bd5210SJason Evans # define UTRACE(a, b, c) do { \ 256d0e79aa3SJason Evans if (unlikely(opt_utrace)) { \ 25788ad2f8dSJason Evans int utrace_serrno = errno; \ 258a4bd5210SJason Evans malloc_utrace_t ut; \ 259a4bd5210SJason Evans ut.p = (a); \ 260a4bd5210SJason Evans ut.s = (b); \ 261a4bd5210SJason Evans ut.r = (c); \ 262a4bd5210SJason Evans utrace(&ut, sizeof(ut)); \ 26388ad2f8dSJason Evans errno = utrace_serrno; \ 264a4bd5210SJason Evans } \ 265a4bd5210SJason Evans } while (0) 266a4bd5210SJason Evans #else 267a4bd5210SJason Evans # define UTRACE(a, b, c) 268a4bd5210SJason Evans #endif 269a4bd5210SJason Evans 270a4bd5210SJason Evans /******************************************************************************/ 271f921d10fSJason Evans /* 272f921d10fSJason Evans * Function prototypes for static functions that are referenced prior to 273f921d10fSJason Evans * definition. 274f921d10fSJason Evans */ 275a4bd5210SJason Evans 276d0e79aa3SJason Evans static bool malloc_init_hard_a0(void); 277a4bd5210SJason Evans static bool malloc_init_hard(void); 278a4bd5210SJason Evans 279a4bd5210SJason Evans /******************************************************************************/ 280a4bd5210SJason Evans /* 281a4bd5210SJason Evans * Begin miscellaneous support functions. 282a4bd5210SJason Evans */ 283a4bd5210SJason Evans 284d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C bool 285d0e79aa3SJason Evans malloc_initialized(void) 286a4bd5210SJason Evans { 287a4bd5210SJason Evans 288d0e79aa3SJason Evans return (malloc_init_state == malloc_init_initialized); 289a4bd5210SJason Evans } 290d0e79aa3SJason Evans 291d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C void 292d0e79aa3SJason Evans malloc_thread_init(void) 293d0e79aa3SJason Evans { 294a4bd5210SJason Evans 295a4bd5210SJason Evans /* 296d0e79aa3SJason Evans * TSD initialization can't be safely done as a side effect of 297d0e79aa3SJason Evans * deallocation, because it is possible for a thread to do nothing but 298d0e79aa3SJason Evans * deallocate its TLS data via free(), in which case writing to TLS 299d0e79aa3SJason Evans * would cause write-after-free memory corruption. The quarantine 300d0e79aa3SJason Evans * facility *only* gets used as a side effect of deallocation, so make 301d0e79aa3SJason Evans * a best effort attempt at initializing its TSD by hooking all 302d0e79aa3SJason Evans * allocation events. 303a4bd5210SJason Evans */ 304d0e79aa3SJason Evans if (config_fill && unlikely(opt_quarantine)) 305d0e79aa3SJason Evans quarantine_alloc_hook(); 306a4bd5210SJason Evans } 307a4bd5210SJason Evans 308d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C bool 309d0e79aa3SJason Evans malloc_init_a0(void) 310d0e79aa3SJason Evans { 311d0e79aa3SJason Evans 312d0e79aa3SJason Evans if (unlikely(malloc_init_state == malloc_init_uninitialized)) 313d0e79aa3SJason Evans return (malloc_init_hard_a0()); 314d0e79aa3SJason Evans return (false); 315d0e79aa3SJason Evans } 316d0e79aa3SJason Evans 317d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C bool 318d0e79aa3SJason Evans malloc_init(void) 319d0e79aa3SJason Evans { 320d0e79aa3SJason Evans 321d0e79aa3SJason Evans if (unlikely(!malloc_initialized()) && malloc_init_hard()) 322d0e79aa3SJason Evans return (true); 323d0e79aa3SJason Evans malloc_thread_init(); 324d0e79aa3SJason Evans 325d0e79aa3SJason Evans return (false); 326d0e79aa3SJason Evans } 327d0e79aa3SJason Evans 328d0e79aa3SJason Evans /* 3291f0a49e8SJason Evans * The a0*() functions are used instead of i{d,}alloc() in situations that 330d0e79aa3SJason Evans * cannot tolerate TLS variable access. 331d0e79aa3SJason Evans */ 332d0e79aa3SJason Evans 333d0e79aa3SJason Evans static void * 334d0e79aa3SJason Evans a0ialloc(size_t size, bool zero, bool is_metadata) 335d0e79aa3SJason Evans { 336d0e79aa3SJason Evans 337d0e79aa3SJason Evans if (unlikely(malloc_init_a0())) 338d0e79aa3SJason Evans return (NULL); 339d0e79aa3SJason Evans 3401f0a49e8SJason Evans return (iallocztm(TSDN_NULL, size, size2index(size), zero, NULL, 3411f0a49e8SJason Evans is_metadata, arena_get(TSDN_NULL, 0, true), true)); 342d0e79aa3SJason Evans } 343d0e79aa3SJason Evans 344d0e79aa3SJason Evans static void 345d0e79aa3SJason Evans a0idalloc(void *ptr, bool is_metadata) 346d0e79aa3SJason Evans { 347d0e79aa3SJason Evans 3481f0a49e8SJason Evans idalloctm(TSDN_NULL, ptr, false, is_metadata, true); 349d0e79aa3SJason Evans } 350d0e79aa3SJason Evans 351bde95144SJason Evans arena_t * 352bde95144SJason Evans a0get(void) 353bde95144SJason Evans { 354bde95144SJason Evans 355bde95144SJason Evans return (a0); 356bde95144SJason Evans } 357bde95144SJason Evans 358d0e79aa3SJason Evans void * 359d0e79aa3SJason Evans a0malloc(size_t size) 360d0e79aa3SJason Evans { 361d0e79aa3SJason Evans 362d0e79aa3SJason Evans return (a0ialloc(size, false, true)); 363d0e79aa3SJason Evans } 364d0e79aa3SJason Evans 365d0e79aa3SJason Evans void 366d0e79aa3SJason Evans a0dalloc(void *ptr) 367d0e79aa3SJason Evans { 368d0e79aa3SJason Evans 369d0e79aa3SJason Evans a0idalloc(ptr, true); 370d0e79aa3SJason Evans } 371d0e79aa3SJason Evans 372d0e79aa3SJason Evans /* 373d0e79aa3SJason Evans * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive 374d0e79aa3SJason Evans * situations that cannot tolerate TLS variable access (TLS allocation and very 375d0e79aa3SJason Evans * early internal data structure initialization). 376d0e79aa3SJason Evans */ 377d0e79aa3SJason Evans 378d0e79aa3SJason Evans void * 379d0e79aa3SJason Evans bootstrap_malloc(size_t size) 380d0e79aa3SJason Evans { 381d0e79aa3SJason Evans 382d0e79aa3SJason Evans if (unlikely(size == 0)) 383d0e79aa3SJason Evans size = 1; 384d0e79aa3SJason Evans 385d0e79aa3SJason Evans return (a0ialloc(size, false, false)); 386d0e79aa3SJason Evans } 387d0e79aa3SJason Evans 388d0e79aa3SJason Evans void * 389d0e79aa3SJason Evans bootstrap_calloc(size_t num, size_t size) 390d0e79aa3SJason Evans { 391d0e79aa3SJason Evans size_t num_size; 392d0e79aa3SJason Evans 393d0e79aa3SJason Evans num_size = num * size; 394d0e79aa3SJason Evans if (unlikely(num_size == 0)) { 395d0e79aa3SJason Evans assert(num == 0 || size == 0); 396d0e79aa3SJason Evans num_size = 1; 397d0e79aa3SJason Evans } 398d0e79aa3SJason Evans 399d0e79aa3SJason Evans return (a0ialloc(num_size, true, false)); 400d0e79aa3SJason Evans } 401d0e79aa3SJason Evans 402d0e79aa3SJason Evans void 403d0e79aa3SJason Evans bootstrap_free(void *ptr) 404d0e79aa3SJason Evans { 405d0e79aa3SJason Evans 406d0e79aa3SJason Evans if (unlikely(ptr == NULL)) 407d0e79aa3SJason Evans return; 408d0e79aa3SJason Evans 409d0e79aa3SJason Evans a0idalloc(ptr, false); 410d0e79aa3SJason Evans } 411d0e79aa3SJason Evans 412df0d881dSJason Evans static void 413df0d881dSJason Evans arena_set(unsigned ind, arena_t *arena) 414df0d881dSJason Evans { 415df0d881dSJason Evans 416df0d881dSJason Evans atomic_write_p((void **)&arenas[ind], arena); 417df0d881dSJason Evans } 418df0d881dSJason Evans 419df0d881dSJason Evans static void 420df0d881dSJason Evans narenas_total_set(unsigned narenas) 421df0d881dSJason Evans { 422df0d881dSJason Evans 423df0d881dSJason Evans atomic_write_u(&narenas_total, narenas); 424df0d881dSJason Evans } 425df0d881dSJason Evans 426df0d881dSJason Evans static void 427df0d881dSJason Evans narenas_total_inc(void) 428df0d881dSJason Evans { 429df0d881dSJason Evans 430df0d881dSJason Evans atomic_add_u(&narenas_total, 1); 431df0d881dSJason Evans } 432df0d881dSJason Evans 433df0d881dSJason Evans unsigned 434df0d881dSJason Evans narenas_total_get(void) 435df0d881dSJason Evans { 436df0d881dSJason Evans 437df0d881dSJason Evans return (atomic_read_u(&narenas_total)); 438df0d881dSJason Evans } 439df0d881dSJason Evans 440d0e79aa3SJason Evans /* Create a new arena and insert it into the arenas array at index ind. */ 441d0e79aa3SJason Evans static arena_t * 4421f0a49e8SJason Evans arena_init_locked(tsdn_t *tsdn, unsigned ind) 443d0e79aa3SJason Evans { 444d0e79aa3SJason Evans arena_t *arena; 445d0e79aa3SJason Evans 446df0d881dSJason Evans assert(ind <= narenas_total_get()); 447d0e79aa3SJason Evans if (ind > MALLOCX_ARENA_MAX) 448d0e79aa3SJason Evans return (NULL); 449df0d881dSJason Evans if (ind == narenas_total_get()) 450df0d881dSJason Evans narenas_total_inc(); 451d0e79aa3SJason Evans 452d0e79aa3SJason Evans /* 453d0e79aa3SJason Evans * Another thread may have already initialized arenas[ind] if it's an 454d0e79aa3SJason Evans * auto arena. 455d0e79aa3SJason Evans */ 4561f0a49e8SJason Evans arena = arena_get(tsdn, ind, false); 457d0e79aa3SJason Evans if (arena != NULL) { 458d0e79aa3SJason Evans assert(ind < narenas_auto); 459d0e79aa3SJason Evans return (arena); 460d0e79aa3SJason Evans } 461d0e79aa3SJason Evans 462d0e79aa3SJason Evans /* Actually initialize the arena. */ 4631f0a49e8SJason Evans arena = arena_new(tsdn, ind); 464df0d881dSJason Evans arena_set(ind, arena); 465d0e79aa3SJason Evans return (arena); 466d0e79aa3SJason Evans } 467d0e79aa3SJason Evans 468d0e79aa3SJason Evans arena_t * 4691f0a49e8SJason Evans arena_init(tsdn_t *tsdn, unsigned ind) 470d0e79aa3SJason Evans { 471d0e79aa3SJason Evans arena_t *arena; 472d0e79aa3SJason Evans 4731f0a49e8SJason Evans malloc_mutex_lock(tsdn, &arenas_lock); 4741f0a49e8SJason Evans arena = arena_init_locked(tsdn, ind); 4751f0a49e8SJason Evans malloc_mutex_unlock(tsdn, &arenas_lock); 476d0e79aa3SJason Evans return (arena); 477d0e79aa3SJason Evans } 478d0e79aa3SJason Evans 479d0e79aa3SJason Evans static void 4801f0a49e8SJason Evans arena_bind(tsd_t *tsd, unsigned ind, bool internal) 481d0e79aa3SJason Evans { 482df0d881dSJason Evans arena_t *arena; 483d0e79aa3SJason Evans 484bde95144SJason Evans if (!tsd_nominal(tsd)) 485bde95144SJason Evans return; 486bde95144SJason Evans 4871f0a49e8SJason Evans arena = arena_get(tsd_tsdn(tsd), ind, false); 4881f0a49e8SJason Evans arena_nthreads_inc(arena, internal); 489df0d881dSJason Evans 4901f0a49e8SJason Evans if (internal) 4911f0a49e8SJason Evans tsd_iarena_set(tsd, arena); 4921f0a49e8SJason Evans else 493df0d881dSJason Evans tsd_arena_set(tsd, arena); 494d0e79aa3SJason Evans } 495d0e79aa3SJason Evans 496d0e79aa3SJason Evans void 497d0e79aa3SJason Evans arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) 498d0e79aa3SJason Evans { 499d0e79aa3SJason Evans arena_t *oldarena, *newarena; 500d0e79aa3SJason Evans 5011f0a49e8SJason Evans oldarena = arena_get(tsd_tsdn(tsd), oldind, false); 5021f0a49e8SJason Evans newarena = arena_get(tsd_tsdn(tsd), newind, false); 5031f0a49e8SJason Evans arena_nthreads_dec(oldarena, false); 5041f0a49e8SJason Evans arena_nthreads_inc(newarena, false); 505d0e79aa3SJason Evans tsd_arena_set(tsd, newarena); 506d0e79aa3SJason Evans } 507d0e79aa3SJason Evans 508d0e79aa3SJason Evans static void 5091f0a49e8SJason Evans arena_unbind(tsd_t *tsd, unsigned ind, bool internal) 510d0e79aa3SJason Evans { 511d0e79aa3SJason Evans arena_t *arena; 512d0e79aa3SJason Evans 5131f0a49e8SJason Evans arena = arena_get(tsd_tsdn(tsd), ind, false); 5141f0a49e8SJason Evans arena_nthreads_dec(arena, internal); 5151f0a49e8SJason Evans if (internal) 5161f0a49e8SJason Evans tsd_iarena_set(tsd, NULL); 5171f0a49e8SJason Evans else 518d0e79aa3SJason Evans tsd_arena_set(tsd, NULL); 519d0e79aa3SJason Evans } 520d0e79aa3SJason Evans 521df0d881dSJason Evans arena_tdata_t * 522df0d881dSJason Evans arena_tdata_get_hard(tsd_t *tsd, unsigned ind) 523d0e79aa3SJason Evans { 524df0d881dSJason Evans arena_tdata_t *tdata, *arenas_tdata_old; 525df0d881dSJason Evans arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); 526df0d881dSJason Evans unsigned narenas_tdata_old, i; 527df0d881dSJason Evans unsigned narenas_tdata = tsd_narenas_tdata_get(tsd); 528d0e79aa3SJason Evans unsigned narenas_actual = narenas_total_get(); 529d0e79aa3SJason Evans 530d0e79aa3SJason Evans /* 531df0d881dSJason Evans * Dissociate old tdata array (and set up for deallocation upon return) 532df0d881dSJason Evans * if it's too small. 533d0e79aa3SJason Evans */ 534df0d881dSJason Evans if (arenas_tdata != NULL && narenas_tdata < narenas_actual) { 535df0d881dSJason Evans arenas_tdata_old = arenas_tdata; 536df0d881dSJason Evans narenas_tdata_old = narenas_tdata; 537df0d881dSJason Evans arenas_tdata = NULL; 538df0d881dSJason Evans narenas_tdata = 0; 539df0d881dSJason Evans tsd_arenas_tdata_set(tsd, arenas_tdata); 540df0d881dSJason Evans tsd_narenas_tdata_set(tsd, narenas_tdata); 541df0d881dSJason Evans } else { 542df0d881dSJason Evans arenas_tdata_old = NULL; 543df0d881dSJason Evans narenas_tdata_old = 0; 544d0e79aa3SJason Evans } 545df0d881dSJason Evans 546df0d881dSJason Evans /* Allocate tdata array if it's missing. */ 547df0d881dSJason Evans if (arenas_tdata == NULL) { 548df0d881dSJason Evans bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd); 549df0d881dSJason Evans narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1; 550df0d881dSJason Evans 551df0d881dSJason Evans if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) { 552df0d881dSJason Evans *arenas_tdata_bypassp = true; 553df0d881dSJason Evans arenas_tdata = (arena_tdata_t *)a0malloc( 554df0d881dSJason Evans sizeof(arena_tdata_t) * narenas_tdata); 555df0d881dSJason Evans *arenas_tdata_bypassp = false; 556df0d881dSJason Evans } 557df0d881dSJason Evans if (arenas_tdata == NULL) { 558df0d881dSJason Evans tdata = NULL; 559df0d881dSJason Evans goto label_return; 560df0d881dSJason Evans } 561df0d881dSJason Evans assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp); 562df0d881dSJason Evans tsd_arenas_tdata_set(tsd, arenas_tdata); 563df0d881dSJason Evans tsd_narenas_tdata_set(tsd, narenas_tdata); 564d0e79aa3SJason Evans } 565d0e79aa3SJason Evans 566d0e79aa3SJason Evans /* 567df0d881dSJason Evans * Copy to tdata array. It's possible that the actual number of arenas 568df0d881dSJason Evans * has increased since narenas_total_get() was called above, but that 569df0d881dSJason Evans * causes no correctness issues unless two threads concurrently execute 570df0d881dSJason Evans * the arenas.extend mallctl, which we trust mallctl synchronization to 571d0e79aa3SJason Evans * prevent. 572d0e79aa3SJason Evans */ 573df0d881dSJason Evans 574df0d881dSJason Evans /* Copy/initialize tickers. */ 575df0d881dSJason Evans for (i = 0; i < narenas_actual; i++) { 576df0d881dSJason Evans if (i < narenas_tdata_old) { 577df0d881dSJason Evans ticker_copy(&arenas_tdata[i].decay_ticker, 578df0d881dSJason Evans &arenas_tdata_old[i].decay_ticker); 579df0d881dSJason Evans } else { 580df0d881dSJason Evans ticker_init(&arenas_tdata[i].decay_ticker, 581df0d881dSJason Evans DECAY_NTICKS_PER_UPDATE); 582df0d881dSJason Evans } 583df0d881dSJason Evans } 584df0d881dSJason Evans if (narenas_tdata > narenas_actual) { 585df0d881dSJason Evans memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t) 586df0d881dSJason Evans * (narenas_tdata - narenas_actual)); 587d0e79aa3SJason Evans } 588d0e79aa3SJason Evans 589df0d881dSJason Evans /* Read the refreshed tdata array. */ 590df0d881dSJason Evans tdata = &arenas_tdata[ind]; 591df0d881dSJason Evans label_return: 592df0d881dSJason Evans if (arenas_tdata_old != NULL) 593df0d881dSJason Evans a0dalloc(arenas_tdata_old); 594df0d881dSJason Evans return (tdata); 595d0e79aa3SJason Evans } 596d0e79aa3SJason Evans 597d0e79aa3SJason Evans /* Slow path, called only by arena_choose(). */ 598d0e79aa3SJason Evans arena_t * 5991f0a49e8SJason Evans arena_choose_hard(tsd_t *tsd, bool internal) 600a4bd5210SJason Evans { 6011f0a49e8SJason Evans arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL); 602a4bd5210SJason Evans 60382872ac0SJason Evans if (narenas_auto > 1) { 6041f0a49e8SJason Evans unsigned i, j, choose[2], first_null; 605a4bd5210SJason Evans 6061f0a49e8SJason Evans /* 6071f0a49e8SJason Evans * Determine binding for both non-internal and internal 6081f0a49e8SJason Evans * allocation. 6091f0a49e8SJason Evans * 6101f0a49e8SJason Evans * choose[0]: For application allocation. 6111f0a49e8SJason Evans * choose[1]: For internal metadata allocation. 6121f0a49e8SJason Evans */ 6131f0a49e8SJason Evans 6141f0a49e8SJason Evans for (j = 0; j < 2; j++) 6151f0a49e8SJason Evans choose[j] = 0; 6161f0a49e8SJason Evans 61782872ac0SJason Evans first_null = narenas_auto; 6181f0a49e8SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock); 6191f0a49e8SJason Evans assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL); 62082872ac0SJason Evans for (i = 1; i < narenas_auto; i++) { 6211f0a49e8SJason Evans if (arena_get(tsd_tsdn(tsd), i, false) != NULL) { 622a4bd5210SJason Evans /* 623a4bd5210SJason Evans * Choose the first arena that has the lowest 624a4bd5210SJason Evans * number of threads assigned to it. 625a4bd5210SJason Evans */ 6261f0a49e8SJason Evans for (j = 0; j < 2; j++) { 6271f0a49e8SJason Evans if (arena_nthreads_get(arena_get( 6281f0a49e8SJason Evans tsd_tsdn(tsd), i, false), !!j) < 6291f0a49e8SJason Evans arena_nthreads_get(arena_get( 6301f0a49e8SJason Evans tsd_tsdn(tsd), choose[j], false), 6311f0a49e8SJason Evans !!j)) 6321f0a49e8SJason Evans choose[j] = i; 6331f0a49e8SJason Evans } 63482872ac0SJason Evans } else if (first_null == narenas_auto) { 635a4bd5210SJason Evans /* 636a4bd5210SJason Evans * Record the index of the first uninitialized 637a4bd5210SJason Evans * arena, in case all extant arenas are in use. 638a4bd5210SJason Evans * 639a4bd5210SJason Evans * NB: It is possible for there to be 640a4bd5210SJason Evans * discontinuities in terms of initialized 641a4bd5210SJason Evans * versus uninitialized arenas, due to the 642a4bd5210SJason Evans * "thread.arena" mallctl. 643a4bd5210SJason Evans */ 644a4bd5210SJason Evans first_null = i; 645a4bd5210SJason Evans } 646a4bd5210SJason Evans } 647a4bd5210SJason Evans 6481f0a49e8SJason Evans for (j = 0; j < 2; j++) { 6491f0a49e8SJason Evans if (arena_nthreads_get(arena_get(tsd_tsdn(tsd), 6501f0a49e8SJason Evans choose[j], false), !!j) == 0 || first_null == 6511f0a49e8SJason Evans narenas_auto) { 652a4bd5210SJason Evans /* 6531f0a49e8SJason Evans * Use an unloaded arena, or the least loaded 6541f0a49e8SJason Evans * arena if all arenas are already initialized. 655a4bd5210SJason Evans */ 6561f0a49e8SJason Evans if (!!j == internal) { 6571f0a49e8SJason Evans ret = arena_get(tsd_tsdn(tsd), 6581f0a49e8SJason Evans choose[j], false); 6591f0a49e8SJason Evans } 660a4bd5210SJason Evans } else { 6611f0a49e8SJason Evans arena_t *arena; 6621f0a49e8SJason Evans 663a4bd5210SJason Evans /* Initialize a new arena. */ 6641f0a49e8SJason Evans choose[j] = first_null; 6651f0a49e8SJason Evans arena = arena_init_locked(tsd_tsdn(tsd), 6661f0a49e8SJason Evans choose[j]); 6671f0a49e8SJason Evans if (arena == NULL) { 6681f0a49e8SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), 6691f0a49e8SJason Evans &arenas_lock); 670d0e79aa3SJason Evans return (NULL); 671a4bd5210SJason Evans } 6721f0a49e8SJason Evans if (!!j == internal) 6731f0a49e8SJason Evans ret = arena; 674d0e79aa3SJason Evans } 6751f0a49e8SJason Evans arena_bind(tsd, choose[j], !!j); 6761f0a49e8SJason Evans } 6771f0a49e8SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock); 678a4bd5210SJason Evans } else { 6791f0a49e8SJason Evans ret = arena_get(tsd_tsdn(tsd), 0, false); 6801f0a49e8SJason Evans arena_bind(tsd, 0, false); 6811f0a49e8SJason Evans arena_bind(tsd, 0, true); 682a4bd5210SJason Evans } 683a4bd5210SJason Evans 684a4bd5210SJason Evans return (ret); 685a4bd5210SJason Evans } 686a4bd5210SJason Evans 687d0e79aa3SJason Evans void 688d0e79aa3SJason Evans thread_allocated_cleanup(tsd_t *tsd) 689d0e79aa3SJason Evans { 690d0e79aa3SJason Evans 691d0e79aa3SJason Evans /* Do nothing. */ 692d0e79aa3SJason Evans } 693d0e79aa3SJason Evans 694d0e79aa3SJason Evans void 695d0e79aa3SJason Evans thread_deallocated_cleanup(tsd_t *tsd) 696d0e79aa3SJason Evans { 697d0e79aa3SJason Evans 698d0e79aa3SJason Evans /* Do nothing. */ 699d0e79aa3SJason Evans } 700d0e79aa3SJason Evans 701d0e79aa3SJason Evans void 7021f0a49e8SJason Evans iarena_cleanup(tsd_t *tsd) 7031f0a49e8SJason Evans { 7041f0a49e8SJason Evans arena_t *iarena; 7051f0a49e8SJason Evans 7061f0a49e8SJason Evans iarena = tsd_iarena_get(tsd); 7071f0a49e8SJason Evans if (iarena != NULL) 7081f0a49e8SJason Evans arena_unbind(tsd, iarena->ind, true); 7091f0a49e8SJason Evans } 7101f0a49e8SJason Evans 7111f0a49e8SJason Evans void 712d0e79aa3SJason Evans arena_cleanup(tsd_t *tsd) 713d0e79aa3SJason Evans { 714d0e79aa3SJason Evans arena_t *arena; 715d0e79aa3SJason Evans 716d0e79aa3SJason Evans arena = tsd_arena_get(tsd); 717d0e79aa3SJason Evans if (arena != NULL) 7181f0a49e8SJason Evans arena_unbind(tsd, arena->ind, false); 719d0e79aa3SJason Evans } 720d0e79aa3SJason Evans 721d0e79aa3SJason Evans void 722df0d881dSJason Evans arenas_tdata_cleanup(tsd_t *tsd) 723d0e79aa3SJason Evans { 724df0d881dSJason Evans arena_tdata_t *arenas_tdata; 725d0e79aa3SJason Evans 726df0d881dSJason Evans /* Prevent tsd->arenas_tdata from being (re)created. */ 727df0d881dSJason Evans *tsd_arenas_tdata_bypassp_get(tsd) = true; 728df0d881dSJason Evans 729df0d881dSJason Evans arenas_tdata = tsd_arenas_tdata_get(tsd); 730df0d881dSJason Evans if (arenas_tdata != NULL) { 731df0d881dSJason Evans tsd_arenas_tdata_set(tsd, NULL); 732df0d881dSJason Evans a0dalloc(arenas_tdata); 733d0e79aa3SJason Evans } 734536b3538SJason Evans } 735d0e79aa3SJason Evans 736d0e79aa3SJason Evans void 737df0d881dSJason Evans narenas_tdata_cleanup(tsd_t *tsd) 738d0e79aa3SJason Evans { 739d0e79aa3SJason Evans 740d0e79aa3SJason Evans /* Do nothing. */ 741d0e79aa3SJason Evans } 742d0e79aa3SJason Evans 743d0e79aa3SJason Evans void 744df0d881dSJason Evans arenas_tdata_bypass_cleanup(tsd_t *tsd) 745d0e79aa3SJason Evans { 746d0e79aa3SJason Evans 747d0e79aa3SJason Evans /* Do nothing. */ 748d0e79aa3SJason Evans } 749d0e79aa3SJason Evans 750a4bd5210SJason Evans static void 751a4bd5210SJason Evans stats_print_atexit(void) 752a4bd5210SJason Evans { 753a4bd5210SJason Evans 754a4bd5210SJason Evans if (config_tcache && config_stats) { 7551f0a49e8SJason Evans tsdn_t *tsdn; 75682872ac0SJason Evans unsigned narenas, i; 757a4bd5210SJason Evans 7581f0a49e8SJason Evans tsdn = tsdn_fetch(); 7591f0a49e8SJason Evans 760a4bd5210SJason Evans /* 761a4bd5210SJason Evans * Merge stats from extant threads. This is racy, since 762a4bd5210SJason Evans * individual threads do not lock when recording tcache stats 763a4bd5210SJason Evans * events. As a consequence, the final stats may be slightly 764a4bd5210SJason Evans * out of date by the time they are reported, if other threads 765a4bd5210SJason Evans * continue to allocate. 766a4bd5210SJason Evans */ 76782872ac0SJason Evans for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 7681f0a49e8SJason Evans arena_t *arena = arena_get(tsdn, i, false); 769a4bd5210SJason Evans if (arena != NULL) { 770a4bd5210SJason Evans tcache_t *tcache; 771a4bd5210SJason Evans 772a4bd5210SJason Evans /* 773a4bd5210SJason Evans * tcache_stats_merge() locks bins, so if any 774a4bd5210SJason Evans * code is introduced that acquires both arena 775a4bd5210SJason Evans * and bin locks in the opposite order, 776a4bd5210SJason Evans * deadlocks may result. 777a4bd5210SJason Evans */ 7781f0a49e8SJason Evans malloc_mutex_lock(tsdn, &arena->lock); 779a4bd5210SJason Evans ql_foreach(tcache, &arena->tcache_ql, link) { 7801f0a49e8SJason Evans tcache_stats_merge(tsdn, tcache, arena); 781a4bd5210SJason Evans } 7821f0a49e8SJason Evans malloc_mutex_unlock(tsdn, &arena->lock); 783a4bd5210SJason Evans } 784a4bd5210SJason Evans } 785a4bd5210SJason Evans } 786a4bd5210SJason Evans je_malloc_stats_print(NULL, NULL, NULL); 787a4bd5210SJason Evans } 788a4bd5210SJason Evans 789a4bd5210SJason Evans /* 790a4bd5210SJason Evans * End miscellaneous support functions. 791a4bd5210SJason Evans */ 792a4bd5210SJason Evans /******************************************************************************/ 793a4bd5210SJason Evans /* 794a4bd5210SJason Evans * Begin initialization functions. 795a4bd5210SJason Evans */ 796a4bd5210SJason Evans 797d0e79aa3SJason Evans #ifndef JEMALLOC_HAVE_SECURE_GETENV 798d0e79aa3SJason Evans static char * 799d0e79aa3SJason Evans secure_getenv(const char *name) 800d0e79aa3SJason Evans { 801d0e79aa3SJason Evans 802d0e79aa3SJason Evans # ifdef JEMALLOC_HAVE_ISSETUGID 803d0e79aa3SJason Evans if (issetugid() != 0) 804d0e79aa3SJason Evans return (NULL); 805d0e79aa3SJason Evans # endif 806d0e79aa3SJason Evans return (getenv(name)); 807d0e79aa3SJason Evans } 808d0e79aa3SJason Evans #endif 809d0e79aa3SJason Evans 810a4bd5210SJason Evans static unsigned 811a4bd5210SJason Evans malloc_ncpus(void) 812a4bd5210SJason Evans { 813a4bd5210SJason Evans long result; 814a4bd5210SJason Evans 815e722f8f8SJason Evans #ifdef _WIN32 816e722f8f8SJason Evans SYSTEM_INFO si; 817e722f8f8SJason Evans GetSystemInfo(&si); 818e722f8f8SJason Evans result = si.dwNumberOfProcessors; 819bde95144SJason Evans #elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT) 820bde95144SJason Evans /* 821bde95144SJason Evans * glibc >= 2.6 has the CPU_COUNT macro. 822bde95144SJason Evans * 823bde95144SJason Evans * glibc's sysconf() uses isspace(). glibc allocates for the first time 824bde95144SJason Evans * *before* setting up the isspace tables. Therefore we need a 825bde95144SJason Evans * different method to get the number of CPUs. 826bde95144SJason Evans */ 827bde95144SJason Evans { 828bde95144SJason Evans cpu_set_t set; 829bde95144SJason Evans 830bde95144SJason Evans pthread_getaffinity_np(pthread_self(), sizeof(set), &set); 831bde95144SJason Evans result = CPU_COUNT(&set); 832bde95144SJason Evans } 833e722f8f8SJason Evans #else 834a4bd5210SJason Evans result = sysconf(_SC_NPROCESSORS_ONLN); 83582872ac0SJason Evans #endif 836f921d10fSJason Evans return ((result == -1) ? 1 : (unsigned)result); 837a4bd5210SJason Evans } 838a4bd5210SJason Evans 839a4bd5210SJason Evans static bool 840a4bd5210SJason Evans malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, 841a4bd5210SJason Evans char const **v_p, size_t *vlen_p) 842a4bd5210SJason Evans { 843a4bd5210SJason Evans bool accept; 844a4bd5210SJason Evans const char *opts = *opts_p; 845a4bd5210SJason Evans 846a4bd5210SJason Evans *k_p = opts; 847a4bd5210SJason Evans 848d0e79aa3SJason Evans for (accept = false; !accept;) { 849a4bd5210SJason Evans switch (*opts) { 850a4bd5210SJason Evans case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': 851a4bd5210SJason Evans case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': 852a4bd5210SJason Evans case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': 853a4bd5210SJason Evans case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': 854a4bd5210SJason Evans case 'Y': case 'Z': 855a4bd5210SJason Evans case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': 856a4bd5210SJason Evans case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': 857a4bd5210SJason Evans case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': 858a4bd5210SJason Evans case 's': case 't': case 'u': case 'v': case 'w': case 'x': 859a4bd5210SJason Evans case 'y': case 'z': 860a4bd5210SJason Evans case '0': case '1': case '2': case '3': case '4': case '5': 861a4bd5210SJason Evans case '6': case '7': case '8': case '9': 862a4bd5210SJason Evans case '_': 863a4bd5210SJason Evans opts++; 864a4bd5210SJason Evans break; 865a4bd5210SJason Evans case ':': 866a4bd5210SJason Evans opts++; 867a4bd5210SJason Evans *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; 868a4bd5210SJason Evans *v_p = opts; 869a4bd5210SJason Evans accept = true; 870a4bd5210SJason Evans break; 871a4bd5210SJason Evans case '\0': 872a4bd5210SJason Evans if (opts != *opts_p) { 873a4bd5210SJason Evans malloc_write("<jemalloc>: Conf string ends " 874a4bd5210SJason Evans "with key\n"); 875a4bd5210SJason Evans } 876a4bd5210SJason Evans return (true); 877a4bd5210SJason Evans default: 878a4bd5210SJason Evans malloc_write("<jemalloc>: Malformed conf string\n"); 879a4bd5210SJason Evans return (true); 880a4bd5210SJason Evans } 881a4bd5210SJason Evans } 882a4bd5210SJason Evans 883d0e79aa3SJason Evans for (accept = false; !accept;) { 884a4bd5210SJason Evans switch (*opts) { 885a4bd5210SJason Evans case ',': 886a4bd5210SJason Evans opts++; 887a4bd5210SJason Evans /* 888a4bd5210SJason Evans * Look ahead one character here, because the next time 889a4bd5210SJason Evans * this function is called, it will assume that end of 890a4bd5210SJason Evans * input has been cleanly reached if no input remains, 891a4bd5210SJason Evans * but we have optimistically already consumed the 892a4bd5210SJason Evans * comma if one exists. 893a4bd5210SJason Evans */ 894a4bd5210SJason Evans if (*opts == '\0') { 895a4bd5210SJason Evans malloc_write("<jemalloc>: Conf string ends " 896a4bd5210SJason Evans "with comma\n"); 897a4bd5210SJason Evans } 898a4bd5210SJason Evans *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; 899a4bd5210SJason Evans accept = true; 900a4bd5210SJason Evans break; 901a4bd5210SJason Evans case '\0': 902a4bd5210SJason Evans *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; 903a4bd5210SJason Evans accept = true; 904a4bd5210SJason Evans break; 905a4bd5210SJason Evans default: 906a4bd5210SJason Evans opts++; 907a4bd5210SJason Evans break; 908a4bd5210SJason Evans } 909a4bd5210SJason Evans } 910a4bd5210SJason Evans 911a4bd5210SJason Evans *opts_p = opts; 912a4bd5210SJason Evans return (false); 913a4bd5210SJason Evans } 914a4bd5210SJason Evans 915a4bd5210SJason Evans static void 916a4bd5210SJason Evans malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, 917a4bd5210SJason Evans size_t vlen) 918a4bd5210SJason Evans { 919a4bd5210SJason Evans 920a4bd5210SJason Evans malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k, 921a4bd5210SJason Evans (int)vlen, v); 922a4bd5210SJason Evans } 923a4bd5210SJason Evans 924a4bd5210SJason Evans static void 925df0d881dSJason Evans malloc_slow_flag_init(void) 926df0d881dSJason Evans { 927df0d881dSJason Evans /* 928df0d881dSJason Evans * Combine the runtime options into malloc_slow for fast path. Called 929df0d881dSJason Evans * after processing all the options. 930df0d881dSJason Evans */ 931df0d881dSJason Evans malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0) 932df0d881dSJason Evans | (opt_junk_free ? flag_opt_junk_free : 0) 933df0d881dSJason Evans | (opt_quarantine ? flag_opt_quarantine : 0) 934df0d881dSJason Evans | (opt_zero ? flag_opt_zero : 0) 935df0d881dSJason Evans | (opt_utrace ? flag_opt_utrace : 0) 936df0d881dSJason Evans | (opt_xmalloc ? flag_opt_xmalloc : 0); 937df0d881dSJason Evans 938df0d881dSJason Evans if (config_valgrind) 939df0d881dSJason Evans malloc_slow_flags |= (in_valgrind ? flag_in_valgrind : 0); 940df0d881dSJason Evans 941df0d881dSJason Evans malloc_slow = (malloc_slow_flags != 0); 942df0d881dSJason Evans } 943df0d881dSJason Evans 944df0d881dSJason Evans static void 945a4bd5210SJason Evans malloc_conf_init(void) 946a4bd5210SJason Evans { 947a4bd5210SJason Evans unsigned i; 948a4bd5210SJason Evans char buf[PATH_MAX + 1]; 949a4bd5210SJason Evans const char *opts, *k, *v; 950a4bd5210SJason Evans size_t klen, vlen; 951a4bd5210SJason Evans 95282872ac0SJason Evans /* 95382872ac0SJason Evans * Automatically configure valgrind before processing options. The 95482872ac0SJason Evans * valgrind option remains in jemalloc 3.x for compatibility reasons. 95582872ac0SJason Evans */ 95682872ac0SJason Evans if (config_valgrind) { 957d0e79aa3SJason Evans in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false; 958d0e79aa3SJason Evans if (config_fill && unlikely(in_valgrind)) { 959d0e79aa3SJason Evans opt_junk = "false"; 960d0e79aa3SJason Evans opt_junk_alloc = false; 961d0e79aa3SJason Evans opt_junk_free = false; 962d0e79aa3SJason Evans assert(!opt_zero); 96382872ac0SJason Evans opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT; 96482872ac0SJason Evans opt_redzone = true; 96582872ac0SJason Evans } 966d0e79aa3SJason Evans if (config_tcache && unlikely(in_valgrind)) 96782872ac0SJason Evans opt_tcache = false; 96882872ac0SJason Evans } 96982872ac0SJason Evans 970df0d881dSJason Evans for (i = 0; i < 4; i++) { 971a4bd5210SJason Evans /* Get runtime configuration. */ 972a4bd5210SJason Evans switch (i) { 973a4bd5210SJason Evans case 0: 974df0d881dSJason Evans opts = config_malloc_conf; 975df0d881dSJason Evans break; 976df0d881dSJason Evans case 1: 977a4bd5210SJason Evans if (je_malloc_conf != NULL) { 978a4bd5210SJason Evans /* 979a4bd5210SJason Evans * Use options that were compiled into the 980a4bd5210SJason Evans * program. 981a4bd5210SJason Evans */ 982a4bd5210SJason Evans opts = je_malloc_conf; 983a4bd5210SJason Evans } else { 984a4bd5210SJason Evans /* No configuration specified. */ 985a4bd5210SJason Evans buf[0] = '\0'; 986a4bd5210SJason Evans opts = buf; 987a4bd5210SJason Evans } 988a4bd5210SJason Evans break; 989df0d881dSJason Evans case 2: { 990df0d881dSJason Evans ssize_t linklen = 0; 991e722f8f8SJason Evans #ifndef _WIN32 9922b06b201SJason Evans int saved_errno = errno; 993a4bd5210SJason Evans const char *linkname = 994a4bd5210SJason Evans # ifdef JEMALLOC_PREFIX 995a4bd5210SJason Evans "/etc/"JEMALLOC_PREFIX"malloc.conf" 996a4bd5210SJason Evans # else 997a4bd5210SJason Evans "/etc/malloc.conf" 998a4bd5210SJason Evans # endif 999a4bd5210SJason Evans ; 1000a4bd5210SJason Evans 1001a4bd5210SJason Evans /* 10022b06b201SJason Evans * Try to use the contents of the "/etc/malloc.conf" 1003a4bd5210SJason Evans * symbolic link's name. 1004a4bd5210SJason Evans */ 10052b06b201SJason Evans linklen = readlink(linkname, buf, sizeof(buf) - 1); 10062b06b201SJason Evans if (linklen == -1) { 10072b06b201SJason Evans /* No configuration specified. */ 10082b06b201SJason Evans linklen = 0; 1009d0e79aa3SJason Evans /* Restore errno. */ 10102b06b201SJason Evans set_errno(saved_errno); 10112b06b201SJason Evans } 10122b06b201SJason Evans #endif 1013a4bd5210SJason Evans buf[linklen] = '\0'; 1014a4bd5210SJason Evans opts = buf; 1015a4bd5210SJason Evans break; 1016df0d881dSJason Evans } case 3: { 1017a4bd5210SJason Evans const char *envname = 1018a4bd5210SJason Evans #ifdef JEMALLOC_PREFIX 1019a4bd5210SJason Evans JEMALLOC_CPREFIX"MALLOC_CONF" 1020a4bd5210SJason Evans #else 1021a4bd5210SJason Evans "MALLOC_CONF" 1022a4bd5210SJason Evans #endif 1023a4bd5210SJason Evans ; 1024a4bd5210SJason Evans 1025d0e79aa3SJason Evans if ((opts = secure_getenv(envname)) != NULL) { 1026a4bd5210SJason Evans /* 1027a4bd5210SJason Evans * Do nothing; opts is already initialized to 1028a4bd5210SJason Evans * the value of the MALLOC_CONF environment 1029a4bd5210SJason Evans * variable. 1030a4bd5210SJason Evans */ 1031a4bd5210SJason Evans } else { 1032a4bd5210SJason Evans /* No configuration specified. */ 1033a4bd5210SJason Evans buf[0] = '\0'; 1034a4bd5210SJason Evans opts = buf; 1035a4bd5210SJason Evans } 1036a4bd5210SJason Evans break; 1037a4bd5210SJason Evans } default: 1038f921d10fSJason Evans not_reached(); 1039a4bd5210SJason Evans buf[0] = '\0'; 1040a4bd5210SJason Evans opts = buf; 1041a4bd5210SJason Evans } 1042a4bd5210SJason Evans 1043d0e79aa3SJason Evans while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v, 1044d0e79aa3SJason Evans &vlen)) { 1045d0e79aa3SJason Evans #define CONF_MATCH(n) \ 1046d0e79aa3SJason Evans (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0) 1047d0e79aa3SJason Evans #define CONF_MATCH_VALUE(n) \ 1048d0e79aa3SJason Evans (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0) 1049d0e79aa3SJason Evans #define CONF_HANDLE_BOOL(o, n, cont) \ 1050d0e79aa3SJason Evans if (CONF_MATCH(n)) { \ 1051d0e79aa3SJason Evans if (CONF_MATCH_VALUE("true")) \ 1052a4bd5210SJason Evans o = true; \ 1053d0e79aa3SJason Evans else if (CONF_MATCH_VALUE("false")) \ 1054a4bd5210SJason Evans o = false; \ 1055a4bd5210SJason Evans else { \ 1056a4bd5210SJason Evans malloc_conf_error( \ 1057a4bd5210SJason Evans "Invalid conf value", \ 1058a4bd5210SJason Evans k, klen, v, vlen); \ 1059a4bd5210SJason Evans } \ 1060d0e79aa3SJason Evans if (cont) \ 1061a4bd5210SJason Evans continue; \ 1062a4bd5210SJason Evans } 1063*7fa7f12fSJason Evans #define CONF_MIN_no(um, min) false 1064*7fa7f12fSJason Evans #define CONF_MIN_yes(um, min) ((um) < (min)) 1065*7fa7f12fSJason Evans #define CONF_MAX_no(um, max) false 1066*7fa7f12fSJason Evans #define CONF_MAX_yes(um, max) ((um) > (max)) 1067*7fa7f12fSJason Evans #define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \ 1068d0e79aa3SJason Evans if (CONF_MATCH(n)) { \ 1069a4bd5210SJason Evans uintmax_t um; \ 1070a4bd5210SJason Evans char *end; \ 1071a4bd5210SJason Evans \ 1072e722f8f8SJason Evans set_errno(0); \ 1073a4bd5210SJason Evans um = malloc_strtoumax(v, &end, 0); \ 1074e722f8f8SJason Evans if (get_errno() != 0 || (uintptr_t)end -\ 1075a4bd5210SJason Evans (uintptr_t)v != vlen) { \ 1076a4bd5210SJason Evans malloc_conf_error( \ 1077a4bd5210SJason Evans "Invalid conf value", \ 1078a4bd5210SJason Evans k, klen, v, vlen); \ 107988ad2f8dSJason Evans } else if (clip) { \ 1080*7fa7f12fSJason Evans if (CONF_MIN_##check_min(um, \ 1081*7fa7f12fSJason Evans (min))) \ 1082df0d881dSJason Evans o = (t)(min); \ 1083*7fa7f12fSJason Evans else if (CONF_MAX_##check_max( \ 1084*7fa7f12fSJason Evans um, (max))) \ 1085df0d881dSJason Evans o = (t)(max); \ 108688ad2f8dSJason Evans else \ 1087df0d881dSJason Evans o = (t)um; \ 108888ad2f8dSJason Evans } else { \ 1089*7fa7f12fSJason Evans if (CONF_MIN_##check_min(um, \ 1090*7fa7f12fSJason Evans (min)) || \ 1091*7fa7f12fSJason Evans CONF_MAX_##check_max(um, \ 1092*7fa7f12fSJason Evans (max))) { \ 1093a4bd5210SJason Evans malloc_conf_error( \ 109488ad2f8dSJason Evans "Out-of-range " \ 109588ad2f8dSJason Evans "conf value", \ 1096a4bd5210SJason Evans k, klen, v, vlen); \ 1097a4bd5210SJason Evans } else \ 1098df0d881dSJason Evans o = (t)um; \ 109988ad2f8dSJason Evans } \ 1100a4bd5210SJason Evans continue; \ 1101a4bd5210SJason Evans } 1102*7fa7f12fSJason Evans #define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \ 1103*7fa7f12fSJason Evans clip) \ 1104*7fa7f12fSJason Evans CONF_HANDLE_T_U(unsigned, o, n, min, max, \ 1105*7fa7f12fSJason Evans check_min, check_max, clip) 1106*7fa7f12fSJason Evans #define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \ 1107*7fa7f12fSJason Evans CONF_HANDLE_T_U(size_t, o, n, min, max, \ 1108*7fa7f12fSJason Evans check_min, check_max, clip) 1109a4bd5210SJason Evans #define CONF_HANDLE_SSIZE_T(o, n, min, max) \ 1110d0e79aa3SJason Evans if (CONF_MATCH(n)) { \ 1111a4bd5210SJason Evans long l; \ 1112a4bd5210SJason Evans char *end; \ 1113a4bd5210SJason Evans \ 1114e722f8f8SJason Evans set_errno(0); \ 1115a4bd5210SJason Evans l = strtol(v, &end, 0); \ 1116e722f8f8SJason Evans if (get_errno() != 0 || (uintptr_t)end -\ 1117a4bd5210SJason Evans (uintptr_t)v != vlen) { \ 1118a4bd5210SJason Evans malloc_conf_error( \ 1119a4bd5210SJason Evans "Invalid conf value", \ 1120a4bd5210SJason Evans k, klen, v, vlen); \ 1121d0e79aa3SJason Evans } else if (l < (ssize_t)(min) || l > \ 1122d0e79aa3SJason Evans (ssize_t)(max)) { \ 1123a4bd5210SJason Evans malloc_conf_error( \ 1124a4bd5210SJason Evans "Out-of-range conf value", \ 1125a4bd5210SJason Evans k, klen, v, vlen); \ 1126a4bd5210SJason Evans } else \ 1127a4bd5210SJason Evans o = l; \ 1128a4bd5210SJason Evans continue; \ 1129a4bd5210SJason Evans } 1130a4bd5210SJason Evans #define CONF_HANDLE_CHAR_P(o, n, d) \ 1131d0e79aa3SJason Evans if (CONF_MATCH(n)) { \ 1132a4bd5210SJason Evans size_t cpylen = (vlen <= \ 1133a4bd5210SJason Evans sizeof(o)-1) ? vlen : \ 1134a4bd5210SJason Evans sizeof(o)-1; \ 1135a4bd5210SJason Evans strncpy(o, v, cpylen); \ 1136a4bd5210SJason Evans o[cpylen] = '\0'; \ 1137a4bd5210SJason Evans continue; \ 1138a4bd5210SJason Evans } 1139a4bd5210SJason Evans 1140d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_abort, "abort", true) 1141a4bd5210SJason Evans /* 1142d0e79aa3SJason Evans * Chunks always require at least one header page, 1143d0e79aa3SJason Evans * as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and 1144d0e79aa3SJason Evans * possibly an additional page in the presence of 1145d0e79aa3SJason Evans * redzones. In order to simplify options processing, 1146d0e79aa3SJason Evans * use a conservative bound that accommodates all these 1147d0e79aa3SJason Evans * constraints. 1148a4bd5210SJason Evans */ 11498ed34ab0SJason Evans CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE + 1150d0e79aa3SJason Evans LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1), 1151*7fa7f12fSJason Evans (sizeof(size_t) << 3) - 1, yes, yes, true) 115282872ac0SJason Evans if (strncmp("dss", k, klen) == 0) { 115382872ac0SJason Evans int i; 115482872ac0SJason Evans bool match = false; 115582872ac0SJason Evans for (i = 0; i < dss_prec_limit; i++) { 115682872ac0SJason Evans if (strncmp(dss_prec_names[i], v, vlen) 115782872ac0SJason Evans == 0) { 1158bde95144SJason Evans if (chunk_dss_prec_set(i)) { 115982872ac0SJason Evans malloc_conf_error( 116082872ac0SJason Evans "Error setting dss", 116182872ac0SJason Evans k, klen, v, vlen); 116282872ac0SJason Evans } else { 116382872ac0SJason Evans opt_dss = 116482872ac0SJason Evans dss_prec_names[i]; 116582872ac0SJason Evans match = true; 116682872ac0SJason Evans break; 116782872ac0SJason Evans } 116882872ac0SJason Evans } 116982872ac0SJason Evans } 1170d0e79aa3SJason Evans if (!match) { 117182872ac0SJason Evans malloc_conf_error("Invalid conf value", 117282872ac0SJason Evans k, klen, v, vlen); 117382872ac0SJason Evans } 117482872ac0SJason Evans continue; 117582872ac0SJason Evans } 1176df0d881dSJason Evans CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1, 1177*7fa7f12fSJason Evans UINT_MAX, yes, no, false) 1178df0d881dSJason Evans if (strncmp("purge", k, klen) == 0) { 1179df0d881dSJason Evans int i; 1180df0d881dSJason Evans bool match = false; 1181df0d881dSJason Evans for (i = 0; i < purge_mode_limit; i++) { 1182df0d881dSJason Evans if (strncmp(purge_mode_names[i], v, 1183df0d881dSJason Evans vlen) == 0) { 1184df0d881dSJason Evans opt_purge = (purge_mode_t)i; 1185df0d881dSJason Evans match = true; 1186df0d881dSJason Evans break; 1187df0d881dSJason Evans } 1188df0d881dSJason Evans } 1189df0d881dSJason Evans if (!match) { 1190df0d881dSJason Evans malloc_conf_error("Invalid conf value", 1191df0d881dSJason Evans k, klen, v, vlen); 1192df0d881dSJason Evans } 1193df0d881dSJason Evans continue; 1194df0d881dSJason Evans } 11958ed34ab0SJason Evans CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult", 1196a4bd5210SJason Evans -1, (sizeof(size_t) << 3) - 1) 1197df0d881dSJason Evans CONF_HANDLE_SSIZE_T(opt_decay_time, "decay_time", -1, 1198df0d881dSJason Evans NSTIME_SEC_MAX); 1199d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true) 1200a4bd5210SJason Evans if (config_fill) { 1201d0e79aa3SJason Evans if (CONF_MATCH("junk")) { 1202d0e79aa3SJason Evans if (CONF_MATCH_VALUE("true")) { 1203bde95144SJason Evans if (config_valgrind && 1204bde95144SJason Evans unlikely(in_valgrind)) { 1205bde95144SJason Evans malloc_conf_error( 1206bde95144SJason Evans "Deallocation-time " 1207bde95144SJason Evans "junk filling cannot " 1208bde95144SJason Evans "be enabled while " 1209bde95144SJason Evans "running inside " 1210bde95144SJason Evans "Valgrind", k, klen, v, 1211bde95144SJason Evans vlen); 1212bde95144SJason Evans } else { 1213d0e79aa3SJason Evans opt_junk = "true"; 1214bde95144SJason Evans opt_junk_alloc = true; 1215bde95144SJason Evans opt_junk_free = true; 1216bde95144SJason Evans } 1217d0e79aa3SJason Evans } else if (CONF_MATCH_VALUE("false")) { 1218d0e79aa3SJason Evans opt_junk = "false"; 1219d0e79aa3SJason Evans opt_junk_alloc = opt_junk_free = 1220d0e79aa3SJason Evans false; 1221d0e79aa3SJason Evans } else if (CONF_MATCH_VALUE("alloc")) { 1222d0e79aa3SJason Evans opt_junk = "alloc"; 1223d0e79aa3SJason Evans opt_junk_alloc = true; 1224d0e79aa3SJason Evans opt_junk_free = false; 1225d0e79aa3SJason Evans } else if (CONF_MATCH_VALUE("free")) { 1226bde95144SJason Evans if (config_valgrind && 1227bde95144SJason Evans unlikely(in_valgrind)) { 1228bde95144SJason Evans malloc_conf_error( 1229bde95144SJason Evans "Deallocation-time " 1230bde95144SJason Evans "junk filling cannot " 1231bde95144SJason Evans "be enabled while " 1232bde95144SJason Evans "running inside " 1233bde95144SJason Evans "Valgrind", k, klen, v, 1234bde95144SJason Evans vlen); 1235bde95144SJason Evans } else { 1236d0e79aa3SJason Evans opt_junk = "free"; 1237d0e79aa3SJason Evans opt_junk_alloc = false; 1238d0e79aa3SJason Evans opt_junk_free = true; 1239bde95144SJason Evans } 1240d0e79aa3SJason Evans } else { 1241d0e79aa3SJason Evans malloc_conf_error( 1242d0e79aa3SJason Evans "Invalid conf value", k, 1243d0e79aa3SJason Evans klen, v, vlen); 1244d0e79aa3SJason Evans } 1245d0e79aa3SJason Evans continue; 1246d0e79aa3SJason Evans } 12478ed34ab0SJason Evans CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine", 1248*7fa7f12fSJason Evans 0, SIZE_T_MAX, no, no, false) 1249d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_redzone, "redzone", true) 1250d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_zero, "zero", true) 1251a4bd5210SJason Evans } 1252a4bd5210SJason Evans if (config_utrace) { 1253d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_utrace, "utrace", true) 1254a4bd5210SJason Evans } 1255a4bd5210SJason Evans if (config_xmalloc) { 1256d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true) 1257a4bd5210SJason Evans } 1258a4bd5210SJason Evans if (config_tcache) { 1259d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_tcache, "tcache", 1260d0e79aa3SJason Evans !config_valgrind || !in_valgrind) 1261d0e79aa3SJason Evans if (CONF_MATCH("tcache")) { 1262d0e79aa3SJason Evans assert(config_valgrind && in_valgrind); 1263d0e79aa3SJason Evans if (opt_tcache) { 1264d0e79aa3SJason Evans opt_tcache = false; 1265d0e79aa3SJason Evans malloc_conf_error( 1266d0e79aa3SJason Evans "tcache cannot be enabled " 1267d0e79aa3SJason Evans "while running inside Valgrind", 1268d0e79aa3SJason Evans k, klen, v, vlen); 1269d0e79aa3SJason Evans } 1270d0e79aa3SJason Evans continue; 1271d0e79aa3SJason Evans } 1272a4bd5210SJason Evans CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, 12738ed34ab0SJason Evans "lg_tcache_max", -1, 1274a4bd5210SJason Evans (sizeof(size_t) << 3) - 1) 1275a4bd5210SJason Evans } 1276a4bd5210SJason Evans if (config_prof) { 1277d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_prof, "prof", true) 12788ed34ab0SJason Evans CONF_HANDLE_CHAR_P(opt_prof_prefix, 12798ed34ab0SJason Evans "prof_prefix", "jeprof") 1280d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_prof_active, "prof_active", 1281d0e79aa3SJason Evans true) 1282d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_prof_thread_active_init, 1283d0e79aa3SJason Evans "prof_thread_active_init", true) 1284d0e79aa3SJason Evans CONF_HANDLE_SIZE_T(opt_lg_prof_sample, 1285*7fa7f12fSJason Evans "lg_prof_sample", 0, (sizeof(uint64_t) << 3) 1286*7fa7f12fSJason Evans - 1, no, yes, true) 1287d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum", 1288d0e79aa3SJason Evans true) 1289a4bd5210SJason Evans CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, 12908ed34ab0SJason Evans "lg_prof_interval", -1, 1291a4bd5210SJason Evans (sizeof(uint64_t) << 3) - 1) 1292d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump", 1293d0e79aa3SJason Evans true) 1294d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_prof_final, "prof_final", 1295d0e79aa3SJason Evans true) 1296d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak", 1297d0e79aa3SJason Evans true) 1298a4bd5210SJason Evans } 1299a4bd5210SJason Evans malloc_conf_error("Invalid conf pair", k, klen, v, 1300a4bd5210SJason Evans vlen); 1301d0e79aa3SJason Evans #undef CONF_MATCH 1302*7fa7f12fSJason Evans #undef CONF_MATCH_VALUE 1303a4bd5210SJason Evans #undef CONF_HANDLE_BOOL 1304*7fa7f12fSJason Evans #undef CONF_MIN_no 1305*7fa7f12fSJason Evans #undef CONF_MIN_yes 1306*7fa7f12fSJason Evans #undef CONF_MAX_no 1307*7fa7f12fSJason Evans #undef CONF_MAX_yes 1308*7fa7f12fSJason Evans #undef CONF_HANDLE_T_U 1309*7fa7f12fSJason Evans #undef CONF_HANDLE_UNSIGNED 1310a4bd5210SJason Evans #undef CONF_HANDLE_SIZE_T 1311a4bd5210SJason Evans #undef CONF_HANDLE_SSIZE_T 1312a4bd5210SJason Evans #undef CONF_HANDLE_CHAR_P 1313a4bd5210SJason Evans } 1314a4bd5210SJason Evans } 1315a4bd5210SJason Evans } 1316a4bd5210SJason Evans 1317a4bd5210SJason Evans static bool 1318d0e79aa3SJason Evans malloc_init_hard_needed(void) 1319a4bd5210SJason Evans { 1320a4bd5210SJason Evans 1321d0e79aa3SJason Evans if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state == 1322d0e79aa3SJason Evans malloc_init_recursible)) { 1323a4bd5210SJason Evans /* 1324a4bd5210SJason Evans * Another thread initialized the allocator before this one 1325a4bd5210SJason Evans * acquired init_lock, or this thread is the initializing 1326a4bd5210SJason Evans * thread, and it is recursively allocating. 1327a4bd5210SJason Evans */ 1328a4bd5210SJason Evans return (false); 1329a4bd5210SJason Evans } 1330a4bd5210SJason Evans #ifdef JEMALLOC_THREADED_INIT 1331d0e79aa3SJason Evans if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) { 1332bde95144SJason Evans spin_t spinner; 1333bde95144SJason Evans 1334a4bd5210SJason Evans /* Busy-wait until the initializing thread completes. */ 1335bde95144SJason Evans spin_init(&spinner); 1336a4bd5210SJason Evans do { 1337bde95144SJason Evans malloc_mutex_unlock(TSDN_NULL, &init_lock); 1338bde95144SJason Evans spin_adaptive(&spinner); 1339bde95144SJason Evans malloc_mutex_lock(TSDN_NULL, &init_lock); 1340d0e79aa3SJason Evans } while (!malloc_initialized()); 1341a4bd5210SJason Evans return (false); 1342a4bd5210SJason Evans } 1343a4bd5210SJason Evans #endif 1344d0e79aa3SJason Evans return (true); 1345d0e79aa3SJason Evans } 1346d0e79aa3SJason Evans 1347d0e79aa3SJason Evans static bool 13481f0a49e8SJason Evans malloc_init_hard_a0_locked() 1349d0e79aa3SJason Evans { 1350d0e79aa3SJason Evans 1351a4bd5210SJason Evans malloc_initializer = INITIALIZER; 1352a4bd5210SJason Evans 1353a4bd5210SJason Evans if (config_prof) 1354a4bd5210SJason Evans prof_boot0(); 1355a4bd5210SJason Evans malloc_conf_init(); 1356a4bd5210SJason Evans if (opt_stats_print) { 1357a4bd5210SJason Evans /* Print statistics at exit. */ 1358a4bd5210SJason Evans if (atexit(stats_print_atexit) != 0) { 1359a4bd5210SJason Evans malloc_write("<jemalloc>: Error in atexit()\n"); 1360a4bd5210SJason Evans if (opt_abort) 1361a4bd5210SJason Evans abort(); 1362a4bd5210SJason Evans } 1363a4bd5210SJason Evans } 13641f0a49e8SJason Evans pages_boot(); 1365d0e79aa3SJason Evans if (base_boot()) 1366a4bd5210SJason Evans return (true); 1367d0e79aa3SJason Evans if (chunk_boot()) 1368a4bd5210SJason Evans return (true); 1369d0e79aa3SJason Evans if (ctl_boot()) 1370a4bd5210SJason Evans return (true); 1371a4bd5210SJason Evans if (config_prof) 1372a4bd5210SJason Evans prof_boot1(); 1373bde95144SJason Evans arena_boot(); 13741f0a49e8SJason Evans if (config_tcache && tcache_boot(TSDN_NULL)) 1375a4bd5210SJason Evans return (true); 13761f0a49e8SJason Evans if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS)) 1377a4bd5210SJason Evans return (true); 1378a4bd5210SJason Evans /* 1379a4bd5210SJason Evans * Create enough scaffolding to allow recursive allocation in 1380a4bd5210SJason Evans * malloc_ncpus(). 1381a4bd5210SJason Evans */ 1382df0d881dSJason Evans narenas_auto = 1; 1383df0d881dSJason Evans narenas_total_set(narenas_auto); 1384d0e79aa3SJason Evans arenas = &a0; 138582872ac0SJason Evans memset(arenas, 0, sizeof(arena_t *) * narenas_auto); 1386a4bd5210SJason Evans /* 1387a4bd5210SJason Evans * Initialize one arena here. The rest are lazily created in 1388d0e79aa3SJason Evans * arena_choose_hard(). 1389a4bd5210SJason Evans */ 13901f0a49e8SJason Evans if (arena_init(TSDN_NULL, 0) == NULL) 1391a4bd5210SJason Evans return (true); 13921f0a49e8SJason Evans 1393d0e79aa3SJason Evans malloc_init_state = malloc_init_a0_initialized; 13941f0a49e8SJason Evans 1395d0e79aa3SJason Evans return (false); 1396a4bd5210SJason Evans } 1397a4bd5210SJason Evans 1398d0e79aa3SJason Evans static bool 1399d0e79aa3SJason Evans malloc_init_hard_a0(void) 1400d0e79aa3SJason Evans { 1401d0e79aa3SJason Evans bool ret; 1402d0e79aa3SJason Evans 14031f0a49e8SJason Evans malloc_mutex_lock(TSDN_NULL, &init_lock); 1404d0e79aa3SJason Evans ret = malloc_init_hard_a0_locked(); 14051f0a49e8SJason Evans malloc_mutex_unlock(TSDN_NULL, &init_lock); 1406d0e79aa3SJason Evans return (ret); 1407a4bd5210SJason Evans } 1408a4bd5210SJason Evans 14091f0a49e8SJason Evans /* Initialize data structures which may trigger recursive allocation. */ 1410df0d881dSJason Evans static bool 1411d0e79aa3SJason Evans malloc_init_hard_recursible(void) 1412d0e79aa3SJason Evans { 1413a4bd5210SJason Evans 1414d0e79aa3SJason Evans malloc_init_state = malloc_init_recursible; 1415df0d881dSJason Evans 1416a4bd5210SJason Evans ncpus = malloc_ncpus(); 1417f921d10fSJason Evans 1418*7fa7f12fSJason Evans #if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \ 1419*7fa7f12fSJason Evans && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \ 1420*7fa7f12fSJason Evans !defined(__native_client__)) 1421df0d881dSJason Evans /* LinuxThreads' pthread_atfork() allocates. */ 1422f921d10fSJason Evans if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, 1423f921d10fSJason Evans jemalloc_postfork_child) != 0) { 1424f921d10fSJason Evans malloc_write("<jemalloc>: Error in pthread_atfork()\n"); 1425f921d10fSJason Evans if (opt_abort) 1426f921d10fSJason Evans abort(); 14271f0a49e8SJason Evans return (true); 1428f921d10fSJason Evans } 1429f921d10fSJason Evans #endif 1430df0d881dSJason Evans 14311f0a49e8SJason Evans return (false); 1432a4bd5210SJason Evans } 1433a4bd5210SJason Evans 1434d0e79aa3SJason Evans static bool 14351f0a49e8SJason Evans malloc_init_hard_finish(tsdn_t *tsdn) 1436d0e79aa3SJason Evans { 1437d0e79aa3SJason Evans 14381f0a49e8SJason Evans if (malloc_mutex_boot()) 1439d0e79aa3SJason Evans return (true); 1440d0e79aa3SJason Evans 1441a4bd5210SJason Evans if (opt_narenas == 0) { 1442a4bd5210SJason Evans /* 1443a4bd5210SJason Evans * For SMP systems, create more than one arena per CPU by 1444a4bd5210SJason Evans * default. 1445a4bd5210SJason Evans */ 1446a4bd5210SJason Evans if (ncpus > 1) 1447a4bd5210SJason Evans opt_narenas = ncpus << 2; 1448a4bd5210SJason Evans else 1449a4bd5210SJason Evans opt_narenas = 1; 1450a4bd5210SJason Evans } 145182872ac0SJason Evans narenas_auto = opt_narenas; 1452a4bd5210SJason Evans /* 1453df0d881dSJason Evans * Limit the number of arenas to the indexing range of MALLOCX_ARENA(). 1454a4bd5210SJason Evans */ 1455df0d881dSJason Evans if (narenas_auto > MALLOCX_ARENA_MAX) { 1456df0d881dSJason Evans narenas_auto = MALLOCX_ARENA_MAX; 1457a4bd5210SJason Evans malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n", 145882872ac0SJason Evans narenas_auto); 1459a4bd5210SJason Evans } 1460df0d881dSJason Evans narenas_total_set(narenas_auto); 1461a4bd5210SJason Evans 1462a4bd5210SJason Evans /* Allocate and initialize arenas. */ 14631f0a49e8SJason Evans arenas = (arena_t **)base_alloc(tsdn, sizeof(arena_t *) * 1464df0d881dSJason Evans (MALLOCX_ARENA_MAX+1)); 1465d0e79aa3SJason Evans if (arenas == NULL) 1466a4bd5210SJason Evans return (true); 1467a4bd5210SJason Evans /* Copy the pointer to the one arena that was already initialized. */ 1468df0d881dSJason Evans arena_set(0, a0); 1469a4bd5210SJason Evans 1470d0e79aa3SJason Evans malloc_init_state = malloc_init_initialized; 1471df0d881dSJason Evans malloc_slow_flag_init(); 1472df0d881dSJason Evans 1473d0e79aa3SJason Evans return (false); 1474d0e79aa3SJason Evans } 1475d0e79aa3SJason Evans 1476d0e79aa3SJason Evans static bool 1477d0e79aa3SJason Evans malloc_init_hard(void) 1478d0e79aa3SJason Evans { 14791f0a49e8SJason Evans tsd_t *tsd; 1480d0e79aa3SJason Evans 1481536b3538SJason Evans #if defined(_WIN32) && _WIN32_WINNT < 0x0600 1482536b3538SJason Evans _init_init_lock(); 1483536b3538SJason Evans #endif 14841f0a49e8SJason Evans malloc_mutex_lock(TSDN_NULL, &init_lock); 1485d0e79aa3SJason Evans if (!malloc_init_hard_needed()) { 14861f0a49e8SJason Evans malloc_mutex_unlock(TSDN_NULL, &init_lock); 1487d0e79aa3SJason Evans return (false); 1488d0e79aa3SJason Evans } 1489f921d10fSJason Evans 1490d0e79aa3SJason Evans if (malloc_init_state != malloc_init_a0_initialized && 1491d0e79aa3SJason Evans malloc_init_hard_a0_locked()) { 14921f0a49e8SJason Evans malloc_mutex_unlock(TSDN_NULL, &init_lock); 1493d0e79aa3SJason Evans return (true); 1494d0e79aa3SJason Evans } 1495df0d881dSJason Evans 14961f0a49e8SJason Evans malloc_mutex_unlock(TSDN_NULL, &init_lock); 14971f0a49e8SJason Evans /* Recursive allocation relies on functional tsd. */ 14981f0a49e8SJason Evans tsd = malloc_tsd_boot0(); 14991f0a49e8SJason Evans if (tsd == NULL) 15001f0a49e8SJason Evans return (true); 15011f0a49e8SJason Evans if (malloc_init_hard_recursible()) 15021f0a49e8SJason Evans return (true); 15031f0a49e8SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &init_lock); 15041f0a49e8SJason Evans 1505bde95144SJason Evans if (config_prof && prof_boot2(tsd)) { 15061f0a49e8SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); 1507d0e79aa3SJason Evans return (true); 1508d0e79aa3SJason Evans } 1509d0e79aa3SJason Evans 15101f0a49e8SJason Evans if (malloc_init_hard_finish(tsd_tsdn(tsd))) { 15111f0a49e8SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); 1512df0d881dSJason Evans return (true); 1513df0d881dSJason Evans } 1514d0e79aa3SJason Evans 15151f0a49e8SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); 1516d0e79aa3SJason Evans malloc_tsd_boot1(); 1517a4bd5210SJason Evans return (false); 1518a4bd5210SJason Evans } 1519a4bd5210SJason Evans 1520a4bd5210SJason Evans /* 1521a4bd5210SJason Evans * End initialization functions. 1522a4bd5210SJason Evans */ 1523a4bd5210SJason Evans /******************************************************************************/ 1524a4bd5210SJason Evans /* 1525a4bd5210SJason Evans * Begin malloc(3)-compatible functions. 1526a4bd5210SJason Evans */ 1527a4bd5210SJason Evans 1528f921d10fSJason Evans static void * 15291f0a49e8SJason Evans ialloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, bool zero, 1530df0d881dSJason Evans prof_tctx_t *tctx, bool slow_path) 1531f921d10fSJason Evans { 1532f921d10fSJason Evans void *p; 1533f921d10fSJason Evans 1534d0e79aa3SJason Evans if (tctx == NULL) 1535f921d10fSJason Evans return (NULL); 1536d0e79aa3SJason Evans if (usize <= SMALL_MAXCLASS) { 1537df0d881dSJason Evans szind_t ind_large = size2index(LARGE_MINCLASS); 15381f0a49e8SJason Evans p = ialloc(tsd, LARGE_MINCLASS, ind_large, zero, slow_path); 1539f921d10fSJason Evans if (p == NULL) 1540f921d10fSJason Evans return (NULL); 15411f0a49e8SJason Evans arena_prof_promoted(tsd_tsdn(tsd), p, usize); 1542f921d10fSJason Evans } else 15431f0a49e8SJason Evans p = ialloc(tsd, usize, ind, zero, slow_path); 1544f921d10fSJason Evans 1545f921d10fSJason Evans return (p); 1546f921d10fSJason Evans } 1547f921d10fSJason Evans 1548f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C void * 15491f0a49e8SJason Evans ialloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool zero, bool slow_path) 1550f921d10fSJason Evans { 1551f921d10fSJason Evans void *p; 1552d0e79aa3SJason Evans prof_tctx_t *tctx; 1553f921d10fSJason Evans 1554536b3538SJason Evans tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); 1555d0e79aa3SJason Evans if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) 15561f0a49e8SJason Evans p = ialloc_prof_sample(tsd, usize, ind, zero, tctx, slow_path); 1557f921d10fSJason Evans else 15581f0a49e8SJason Evans p = ialloc(tsd, usize, ind, zero, slow_path); 1559d0e79aa3SJason Evans if (unlikely(p == NULL)) { 1560d0e79aa3SJason Evans prof_alloc_rollback(tsd, tctx, true); 1561f921d10fSJason Evans return (NULL); 1562d0e79aa3SJason Evans } 15631f0a49e8SJason Evans prof_malloc(tsd_tsdn(tsd), p, usize, tctx); 1564f921d10fSJason Evans 1565f921d10fSJason Evans return (p); 1566f921d10fSJason Evans } 1567f921d10fSJason Evans 15681f0a49e8SJason Evans /* 15691f0a49e8SJason Evans * ialloc_body() is inlined so that fast and slow paths are generated separately 15701f0a49e8SJason Evans * with statically known slow_path. 15711f0a49e8SJason Evans * 15721f0a49e8SJason Evans * This function guarantees that *tsdn is non-NULL on success. 15731f0a49e8SJason Evans */ 1574d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C void * 15751f0a49e8SJason Evans ialloc_body(size_t size, bool zero, tsdn_t **tsdn, size_t *usize, 15761f0a49e8SJason Evans bool slow_path) 1577d0e79aa3SJason Evans { 15781f0a49e8SJason Evans tsd_t *tsd; 1579df0d881dSJason Evans szind_t ind; 1580f921d10fSJason Evans 15811f0a49e8SJason Evans if (slow_path && unlikely(malloc_init())) { 15821f0a49e8SJason Evans *tsdn = NULL; 1583d0e79aa3SJason Evans return (NULL); 15841f0a49e8SJason Evans } 15851f0a49e8SJason Evans 15861f0a49e8SJason Evans tsd = tsd_fetch(); 15871f0a49e8SJason Evans *tsdn = tsd_tsdn(tsd); 15881f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 15891f0a49e8SJason Evans 1590df0d881dSJason Evans ind = size2index(size); 1591df0d881dSJason Evans if (unlikely(ind >= NSIZES)) 1592d0e79aa3SJason Evans return (NULL); 1593df0d881dSJason Evans 1594df0d881dSJason Evans if (config_stats || (config_prof && opt_prof) || (slow_path && 1595df0d881dSJason Evans config_valgrind && unlikely(in_valgrind))) { 1596df0d881dSJason Evans *usize = index2size(ind); 1597df0d881dSJason Evans assert(*usize > 0 && *usize <= HUGE_MAXCLASS); 1598d0e79aa3SJason Evans } 1599d0e79aa3SJason Evans 1600df0d881dSJason Evans if (config_prof && opt_prof) 16011f0a49e8SJason Evans return (ialloc_prof(tsd, *usize, ind, zero, slow_path)); 1602df0d881dSJason Evans 16031f0a49e8SJason Evans return (ialloc(tsd, size, ind, zero, slow_path)); 1604df0d881dSJason Evans } 1605df0d881dSJason Evans 1606df0d881dSJason Evans JEMALLOC_ALWAYS_INLINE_C void 16071f0a49e8SJason Evans ialloc_post_check(void *ret, tsdn_t *tsdn, size_t usize, const char *func, 16081f0a49e8SJason Evans bool update_errno, bool slow_path) 1609df0d881dSJason Evans { 16101f0a49e8SJason Evans 16111f0a49e8SJason Evans assert(!tsdn_null(tsdn) || ret == NULL); 16121f0a49e8SJason Evans 1613df0d881dSJason Evans if (unlikely(ret == NULL)) { 1614df0d881dSJason Evans if (slow_path && config_xmalloc && unlikely(opt_xmalloc)) { 16151f0a49e8SJason Evans malloc_printf("<jemalloc>: Error in %s(): out of " 16161f0a49e8SJason Evans "memory\n", func); 1617df0d881dSJason Evans abort(); 1618df0d881dSJason Evans } 16191f0a49e8SJason Evans if (update_errno) 1620df0d881dSJason Evans set_errno(ENOMEM); 1621df0d881dSJason Evans } 1622df0d881dSJason Evans if (config_stats && likely(ret != NULL)) { 16231f0a49e8SJason Evans assert(usize == isalloc(tsdn, ret, config_prof)); 16241f0a49e8SJason Evans *tsd_thread_allocatedp_get(tsdn_tsd(tsdn)) += usize; 1625df0d881dSJason Evans } 16261f0a49e8SJason Evans witness_assert_lockless(tsdn); 1627d0e79aa3SJason Evans } 1628d0e79aa3SJason Evans 1629d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 1630d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 1631d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) 1632a4bd5210SJason Evans je_malloc(size_t size) 1633a4bd5210SJason Evans { 1634a4bd5210SJason Evans void *ret; 16351f0a49e8SJason Evans tsdn_t *tsdn; 1636e722f8f8SJason Evans size_t usize JEMALLOC_CC_SILENCE_INIT(0); 1637a4bd5210SJason Evans 1638a4bd5210SJason Evans if (size == 0) 1639a4bd5210SJason Evans size = 1; 1640a4bd5210SJason Evans 1641df0d881dSJason Evans if (likely(!malloc_slow)) { 16421f0a49e8SJason Evans ret = ialloc_body(size, false, &tsdn, &usize, false); 16431f0a49e8SJason Evans ialloc_post_check(ret, tsdn, usize, "malloc", true, false); 1644df0d881dSJason Evans } else { 16451f0a49e8SJason Evans ret = ialloc_body(size, false, &tsdn, &usize, true); 16461f0a49e8SJason Evans ialloc_post_check(ret, tsdn, usize, "malloc", true, true); 1647a4bd5210SJason Evans UTRACE(0, size, ret); 16481f0a49e8SJason Evans JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false); 1649df0d881dSJason Evans } 1650df0d881dSJason Evans 1651a4bd5210SJason Evans return (ret); 1652a4bd5210SJason Evans } 1653a4bd5210SJason Evans 1654f921d10fSJason Evans static void * 1655d0e79aa3SJason Evans imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize, 1656d0e79aa3SJason Evans prof_tctx_t *tctx) 1657f921d10fSJason Evans { 1658f921d10fSJason Evans void *p; 1659f921d10fSJason Evans 1660d0e79aa3SJason Evans if (tctx == NULL) 1661f921d10fSJason Evans return (NULL); 1662d0e79aa3SJason Evans if (usize <= SMALL_MAXCLASS) { 1663d0e79aa3SJason Evans assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS); 1664d0e79aa3SJason Evans p = ipalloc(tsd, LARGE_MINCLASS, alignment, false); 1665f921d10fSJason Evans if (p == NULL) 1666f921d10fSJason Evans return (NULL); 16671f0a49e8SJason Evans arena_prof_promoted(tsd_tsdn(tsd), p, usize); 1668f921d10fSJason Evans } else 1669d0e79aa3SJason Evans p = ipalloc(tsd, usize, alignment, false); 1670f921d10fSJason Evans 1671f921d10fSJason Evans return (p); 1672f921d10fSJason Evans } 1673f921d10fSJason Evans 1674f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C void * 1675d0e79aa3SJason Evans imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize) 1676f921d10fSJason Evans { 1677f921d10fSJason Evans void *p; 1678d0e79aa3SJason Evans prof_tctx_t *tctx; 1679f921d10fSJason Evans 1680536b3538SJason Evans tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); 1681d0e79aa3SJason Evans if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) 1682d0e79aa3SJason Evans p = imemalign_prof_sample(tsd, alignment, usize, tctx); 1683f921d10fSJason Evans else 1684d0e79aa3SJason Evans p = ipalloc(tsd, usize, alignment, false); 1685d0e79aa3SJason Evans if (unlikely(p == NULL)) { 1686d0e79aa3SJason Evans prof_alloc_rollback(tsd, tctx, true); 1687f921d10fSJason Evans return (NULL); 1688d0e79aa3SJason Evans } 16891f0a49e8SJason Evans prof_malloc(tsd_tsdn(tsd), p, usize, tctx); 1690f921d10fSJason Evans 1691f921d10fSJason Evans return (p); 1692f921d10fSJason Evans } 1693f921d10fSJason Evans 1694a4bd5210SJason Evans JEMALLOC_ATTR(nonnull(1)) 1695a4bd5210SJason Evans static int 1696f921d10fSJason Evans imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment) 1697a4bd5210SJason Evans { 1698a4bd5210SJason Evans int ret; 1699d0e79aa3SJason Evans tsd_t *tsd; 1700a4bd5210SJason Evans size_t usize; 1701a4bd5210SJason Evans void *result; 1702a4bd5210SJason Evans 1703a4bd5210SJason Evans assert(min_alignment != 0); 1704a4bd5210SJason Evans 1705d0e79aa3SJason Evans if (unlikely(malloc_init())) { 17061f0a49e8SJason Evans tsd = NULL; 1707a4bd5210SJason Evans result = NULL; 1708f921d10fSJason Evans goto label_oom; 1709d0e79aa3SJason Evans } 1710d0e79aa3SJason Evans tsd = tsd_fetch(); 17111f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 1712a4bd5210SJason Evans if (size == 0) 1713a4bd5210SJason Evans size = 1; 1714a4bd5210SJason Evans 1715a4bd5210SJason Evans /* Make sure that alignment is a large enough power of 2. */ 1716d0e79aa3SJason Evans if (unlikely(((alignment - 1) & alignment) != 0 1717d0e79aa3SJason Evans || (alignment < min_alignment))) { 1718d0e79aa3SJason Evans if (config_xmalloc && unlikely(opt_xmalloc)) { 1719a4bd5210SJason Evans malloc_write("<jemalloc>: Error allocating " 1720a4bd5210SJason Evans "aligned memory: invalid alignment\n"); 1721a4bd5210SJason Evans abort(); 1722a4bd5210SJason Evans } 1723a4bd5210SJason Evans result = NULL; 1724a4bd5210SJason Evans ret = EINVAL; 1725a4bd5210SJason Evans goto label_return; 1726a4bd5210SJason Evans } 1727a4bd5210SJason Evans 1728a4bd5210SJason Evans usize = sa2u(size, alignment); 1729df0d881dSJason Evans if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) { 1730a4bd5210SJason Evans result = NULL; 1731f921d10fSJason Evans goto label_oom; 1732a4bd5210SJason Evans } 1733a4bd5210SJason Evans 1734d0e79aa3SJason Evans if (config_prof && opt_prof) 1735d0e79aa3SJason Evans result = imemalign_prof(tsd, alignment, usize); 1736d0e79aa3SJason Evans else 1737d0e79aa3SJason Evans result = ipalloc(tsd, usize, alignment, false); 1738d0e79aa3SJason Evans if (unlikely(result == NULL)) 1739f921d10fSJason Evans goto label_oom; 1740d0e79aa3SJason Evans assert(((uintptr_t)result & (alignment - 1)) == ZU(0)); 1741a4bd5210SJason Evans 1742a4bd5210SJason Evans *memptr = result; 1743a4bd5210SJason Evans ret = 0; 1744a4bd5210SJason Evans label_return: 1745d0e79aa3SJason Evans if (config_stats && likely(result != NULL)) { 17461f0a49e8SJason Evans assert(usize == isalloc(tsd_tsdn(tsd), result, config_prof)); 1747d0e79aa3SJason Evans *tsd_thread_allocatedp_get(tsd) += usize; 1748a4bd5210SJason Evans } 1749a4bd5210SJason Evans UTRACE(0, size, result); 17501f0a49e8SJason Evans JEMALLOC_VALGRIND_MALLOC(result != NULL, tsd_tsdn(tsd), result, usize, 17511f0a49e8SJason Evans false); 17521f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 1753a4bd5210SJason Evans return (ret); 1754f921d10fSJason Evans label_oom: 1755f921d10fSJason Evans assert(result == NULL); 1756d0e79aa3SJason Evans if (config_xmalloc && unlikely(opt_xmalloc)) { 1757f921d10fSJason Evans malloc_write("<jemalloc>: Error allocating aligned memory: " 1758f921d10fSJason Evans "out of memory\n"); 1759f921d10fSJason Evans abort(); 1760f921d10fSJason Evans } 1761f921d10fSJason Evans ret = ENOMEM; 17621f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 1763f921d10fSJason Evans goto label_return; 1764a4bd5210SJason Evans } 1765a4bd5210SJason Evans 1766d0e79aa3SJason Evans JEMALLOC_EXPORT int JEMALLOC_NOTHROW 1767d0e79aa3SJason Evans JEMALLOC_ATTR(nonnull(1)) 1768a4bd5210SJason Evans je_posix_memalign(void **memptr, size_t alignment, size_t size) 1769a4bd5210SJason Evans { 17701f0a49e8SJason Evans int ret; 17711f0a49e8SJason Evans 17721f0a49e8SJason Evans ret = imemalign(memptr, alignment, size, sizeof(void *)); 17731f0a49e8SJason Evans 1774a4bd5210SJason Evans return (ret); 1775a4bd5210SJason Evans } 1776a4bd5210SJason Evans 1777d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 1778d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 1779d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2) 1780a4bd5210SJason Evans je_aligned_alloc(size_t alignment, size_t size) 1781a4bd5210SJason Evans { 1782a4bd5210SJason Evans void *ret; 1783a4bd5210SJason Evans int err; 1784a4bd5210SJason Evans 1785d0e79aa3SJason Evans if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) { 1786a4bd5210SJason Evans ret = NULL; 1787e722f8f8SJason Evans set_errno(err); 1788a4bd5210SJason Evans } 17891f0a49e8SJason Evans 1790a4bd5210SJason Evans return (ret); 1791a4bd5210SJason Evans } 1792a4bd5210SJason Evans 1793d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 1794d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 1795d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) 1796a4bd5210SJason Evans je_calloc(size_t num, size_t size) 1797a4bd5210SJason Evans { 1798a4bd5210SJason Evans void *ret; 17991f0a49e8SJason Evans tsdn_t *tsdn; 1800a4bd5210SJason Evans size_t num_size; 1801e722f8f8SJason Evans size_t usize JEMALLOC_CC_SILENCE_INIT(0); 1802a4bd5210SJason Evans 1803a4bd5210SJason Evans num_size = num * size; 1804d0e79aa3SJason Evans if (unlikely(num_size == 0)) { 1805a4bd5210SJason Evans if (num == 0 || size == 0) 1806a4bd5210SJason Evans num_size = 1; 18071f0a49e8SJason Evans else 18081f0a49e8SJason Evans num_size = HUGE_MAXCLASS + 1; /* Trigger OOM. */ 1809a4bd5210SJason Evans /* 1810a4bd5210SJason Evans * Try to avoid division here. We know that it isn't possible to 1811a4bd5210SJason Evans * overflow during multiplication if neither operand uses any of the 1812a4bd5210SJason Evans * most significant half of the bits in a size_t. 1813a4bd5210SJason Evans */ 1814d0e79aa3SJason Evans } else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 18151f0a49e8SJason Evans 2))) && (num_size / size != num))) 18161f0a49e8SJason Evans num_size = HUGE_MAXCLASS + 1; /* size_t overflow. */ 1817a4bd5210SJason Evans 18181f0a49e8SJason Evans if (likely(!malloc_slow)) { 18191f0a49e8SJason Evans ret = ialloc_body(num_size, true, &tsdn, &usize, false); 18201f0a49e8SJason Evans ialloc_post_check(ret, tsdn, usize, "calloc", true, false); 1821a4bd5210SJason Evans } else { 18221f0a49e8SJason Evans ret = ialloc_body(num_size, true, &tsdn, &usize, true); 18231f0a49e8SJason Evans ialloc_post_check(ret, tsdn, usize, "calloc", true, true); 18241f0a49e8SJason Evans UTRACE(0, num_size, ret); 182562b2691eSJason Evans JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, true); 1826a4bd5210SJason Evans } 1827a4bd5210SJason Evans 1828a4bd5210SJason Evans return (ret); 1829a4bd5210SJason Evans } 1830a4bd5210SJason Evans 1831f921d10fSJason Evans static void * 1832536b3538SJason Evans irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, 1833d0e79aa3SJason Evans prof_tctx_t *tctx) 1834a4bd5210SJason Evans { 1835f921d10fSJason Evans void *p; 1836a4bd5210SJason Evans 1837d0e79aa3SJason Evans if (tctx == NULL) 1838f921d10fSJason Evans return (NULL); 1839d0e79aa3SJason Evans if (usize <= SMALL_MAXCLASS) { 1840536b3538SJason Evans p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false); 1841f921d10fSJason Evans if (p == NULL) 1842f921d10fSJason Evans return (NULL); 18431f0a49e8SJason Evans arena_prof_promoted(tsd_tsdn(tsd), p, usize); 1844a4bd5210SJason Evans } else 1845536b3538SJason Evans p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); 1846f921d10fSJason Evans 1847f921d10fSJason Evans return (p); 1848a4bd5210SJason Evans } 1849a4bd5210SJason Evans 1850f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C void * 1851536b3538SJason Evans irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize) 1852a4bd5210SJason Evans { 1853f921d10fSJason Evans void *p; 1854536b3538SJason Evans bool prof_active; 1855d0e79aa3SJason Evans prof_tctx_t *old_tctx, *tctx; 1856a4bd5210SJason Evans 1857536b3538SJason Evans prof_active = prof_active_get_unlocked(); 18581f0a49e8SJason Evans old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr); 1859536b3538SJason Evans tctx = prof_alloc_prep(tsd, usize, prof_active, true); 1860d0e79aa3SJason Evans if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) 1861536b3538SJason Evans p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx); 1862f921d10fSJason Evans else 1863536b3538SJason Evans p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); 1864536b3538SJason Evans if (unlikely(p == NULL)) { 1865536b3538SJason Evans prof_alloc_rollback(tsd, tctx, true); 1866f921d10fSJason Evans return (NULL); 1867536b3538SJason Evans } 1868536b3538SJason Evans prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize, 1869536b3538SJason Evans old_tctx); 1870f921d10fSJason Evans 1871f921d10fSJason Evans return (p); 1872f921d10fSJason Evans } 1873f921d10fSJason Evans 1874f921d10fSJason Evans JEMALLOC_INLINE_C void 1875df0d881dSJason Evans ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) 1876f921d10fSJason Evans { 1877a4bd5210SJason Evans size_t usize; 1878f921d10fSJason Evans UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); 1879a4bd5210SJason Evans 18801f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 18811f0a49e8SJason Evans 1882f921d10fSJason Evans assert(ptr != NULL); 1883d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 1884a4bd5210SJason Evans 1885a4bd5210SJason Evans if (config_prof && opt_prof) { 18861f0a49e8SJason Evans usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); 1887d0e79aa3SJason Evans prof_free(tsd, ptr, usize); 1888a4bd5210SJason Evans } else if (config_stats || config_valgrind) 18891f0a49e8SJason Evans usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); 1890a4bd5210SJason Evans if (config_stats) 1891d0e79aa3SJason Evans *tsd_thread_deallocatedp_get(tsd) += usize; 1892df0d881dSJason Evans 1893df0d881dSJason Evans if (likely(!slow_path)) 1894df0d881dSJason Evans iqalloc(tsd, ptr, tcache, false); 1895df0d881dSJason Evans else { 1896d0e79aa3SJason Evans if (config_valgrind && unlikely(in_valgrind)) 18971f0a49e8SJason Evans rzsize = p2rz(tsd_tsdn(tsd), ptr); 1898df0d881dSJason Evans iqalloc(tsd, ptr, tcache, true); 1899a4bd5210SJason Evans JEMALLOC_VALGRIND_FREE(ptr, rzsize); 1900a4bd5210SJason Evans } 1901df0d881dSJason Evans } 1902f921d10fSJason Evans 1903d0e79aa3SJason Evans JEMALLOC_INLINE_C void 19041f0a49e8SJason Evans isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) 1905d0e79aa3SJason Evans { 1906d0e79aa3SJason Evans UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); 1907d0e79aa3SJason Evans 19081f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 19091f0a49e8SJason Evans 1910d0e79aa3SJason Evans assert(ptr != NULL); 1911d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 1912d0e79aa3SJason Evans 1913d0e79aa3SJason Evans if (config_prof && opt_prof) 1914d0e79aa3SJason Evans prof_free(tsd, ptr, usize); 1915d0e79aa3SJason Evans if (config_stats) 1916d0e79aa3SJason Evans *tsd_thread_deallocatedp_get(tsd) += usize; 1917d0e79aa3SJason Evans if (config_valgrind && unlikely(in_valgrind)) 19181f0a49e8SJason Evans rzsize = p2rz(tsd_tsdn(tsd), ptr); 19191f0a49e8SJason Evans isqalloc(tsd, ptr, usize, tcache, slow_path); 1920d0e79aa3SJason Evans JEMALLOC_VALGRIND_FREE(ptr, rzsize); 1921d0e79aa3SJason Evans } 1922d0e79aa3SJason Evans 1923d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 1924d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 1925d0e79aa3SJason Evans JEMALLOC_ALLOC_SIZE(2) 1926f921d10fSJason Evans je_realloc(void *ptr, size_t size) 1927f921d10fSJason Evans { 1928f921d10fSJason Evans void *ret; 19291f0a49e8SJason Evans tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL); 1930f921d10fSJason Evans size_t usize JEMALLOC_CC_SILENCE_INIT(0); 1931f921d10fSJason Evans size_t old_usize = 0; 1932f921d10fSJason Evans UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 1933f921d10fSJason Evans 1934d0e79aa3SJason Evans if (unlikely(size == 0)) { 1935f921d10fSJason Evans if (ptr != NULL) { 19361f0a49e8SJason Evans tsd_t *tsd; 19371f0a49e8SJason Evans 1938f921d10fSJason Evans /* realloc(ptr, 0) is equivalent to free(ptr). */ 1939f921d10fSJason Evans UTRACE(ptr, 0, 0); 1940d0e79aa3SJason Evans tsd = tsd_fetch(); 1941df0d881dSJason Evans ifree(tsd, ptr, tcache_get(tsd, false), true); 1942f921d10fSJason Evans return (NULL); 1943f921d10fSJason Evans } 1944f921d10fSJason Evans size = 1; 1945f921d10fSJason Evans } 1946f921d10fSJason Evans 1947d0e79aa3SJason Evans if (likely(ptr != NULL)) { 19481f0a49e8SJason Evans tsd_t *tsd; 19491f0a49e8SJason Evans 1950d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 1951f921d10fSJason Evans malloc_thread_init(); 1952d0e79aa3SJason Evans tsd = tsd_fetch(); 1953f921d10fSJason Evans 19541f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 19551f0a49e8SJason Evans 19561f0a49e8SJason Evans old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); 19571f0a49e8SJason Evans if (config_valgrind && unlikely(in_valgrind)) { 19581f0a49e8SJason Evans old_rzsize = config_prof ? p2rz(tsd_tsdn(tsd), ptr) : 19591f0a49e8SJason Evans u2rz(old_usize); 19601f0a49e8SJason Evans } 1961f921d10fSJason Evans 1962f921d10fSJason Evans if (config_prof && opt_prof) { 1963f921d10fSJason Evans usize = s2u(size); 1964df0d881dSJason Evans ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ? 1965df0d881dSJason Evans NULL : irealloc_prof(tsd, ptr, old_usize, usize); 1966f921d10fSJason Evans } else { 1967d0e79aa3SJason Evans if (config_stats || (config_valgrind && 1968d0e79aa3SJason Evans unlikely(in_valgrind))) 1969f921d10fSJason Evans usize = s2u(size); 1970d0e79aa3SJason Evans ret = iralloc(tsd, ptr, old_usize, size, 0, false); 1971f921d10fSJason Evans } 19721f0a49e8SJason Evans tsdn = tsd_tsdn(tsd); 1973f921d10fSJason Evans } else { 1974f921d10fSJason Evans /* realloc(NULL, size) is equivalent to malloc(size). */ 1975df0d881dSJason Evans if (likely(!malloc_slow)) 19761f0a49e8SJason Evans ret = ialloc_body(size, false, &tsdn, &usize, false); 1977df0d881dSJason Evans else 19781f0a49e8SJason Evans ret = ialloc_body(size, false, &tsdn, &usize, true); 19791f0a49e8SJason Evans assert(!tsdn_null(tsdn) || ret == NULL); 1980f921d10fSJason Evans } 1981f921d10fSJason Evans 1982d0e79aa3SJason Evans if (unlikely(ret == NULL)) { 1983d0e79aa3SJason Evans if (config_xmalloc && unlikely(opt_xmalloc)) { 1984f921d10fSJason Evans malloc_write("<jemalloc>: Error in realloc(): " 1985f921d10fSJason Evans "out of memory\n"); 1986f921d10fSJason Evans abort(); 1987f921d10fSJason Evans } 1988f921d10fSJason Evans set_errno(ENOMEM); 1989f921d10fSJason Evans } 1990d0e79aa3SJason Evans if (config_stats && likely(ret != NULL)) { 19911f0a49e8SJason Evans tsd_t *tsd; 19921f0a49e8SJason Evans 19931f0a49e8SJason Evans assert(usize == isalloc(tsdn, ret, config_prof)); 19941f0a49e8SJason Evans tsd = tsdn_tsd(tsdn); 1995d0e79aa3SJason Evans *tsd_thread_allocatedp_get(tsd) += usize; 1996d0e79aa3SJason Evans *tsd_thread_deallocatedp_get(tsd) += old_usize; 1997f921d10fSJason Evans } 1998f921d10fSJason Evans UTRACE(ptr, size, ret); 1999*7fa7f12fSJason Evans JEMALLOC_VALGRIND_REALLOC(maybe, tsdn, ret, usize, maybe, ptr, 2000*7fa7f12fSJason Evans old_usize, old_rzsize, maybe, false); 20011f0a49e8SJason Evans witness_assert_lockless(tsdn); 2002f921d10fSJason Evans return (ret); 2003f921d10fSJason Evans } 2004f921d10fSJason Evans 2005d0e79aa3SJason Evans JEMALLOC_EXPORT void JEMALLOC_NOTHROW 2006f921d10fSJason Evans je_free(void *ptr) 2007f921d10fSJason Evans { 2008f921d10fSJason Evans 2009f921d10fSJason Evans UTRACE(ptr, 0, 0); 2010d0e79aa3SJason Evans if (likely(ptr != NULL)) { 2011d0e79aa3SJason Evans tsd_t *tsd = tsd_fetch(); 20121f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2013df0d881dSJason Evans if (likely(!malloc_slow)) 2014df0d881dSJason Evans ifree(tsd, ptr, tcache_get(tsd, false), false); 2015df0d881dSJason Evans else 2016df0d881dSJason Evans ifree(tsd, ptr, tcache_get(tsd, false), true); 20171f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2018d0e79aa3SJason Evans } 2019a4bd5210SJason Evans } 2020a4bd5210SJason Evans 2021a4bd5210SJason Evans /* 2022a4bd5210SJason Evans * End malloc(3)-compatible functions. 2023a4bd5210SJason Evans */ 2024a4bd5210SJason Evans /******************************************************************************/ 2025a4bd5210SJason Evans /* 2026a4bd5210SJason Evans * Begin non-standard override functions. 2027a4bd5210SJason Evans */ 2028a4bd5210SJason Evans 2029a4bd5210SJason Evans #ifdef JEMALLOC_OVERRIDE_MEMALIGN 2030d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2031d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 2032d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) 2033a4bd5210SJason Evans je_memalign(size_t alignment, size_t size) 2034a4bd5210SJason Evans { 2035a4bd5210SJason Evans void *ret JEMALLOC_CC_SILENCE_INIT(NULL); 2036d0e79aa3SJason Evans if (unlikely(imemalign(&ret, alignment, size, 1) != 0)) 2037d0e79aa3SJason Evans ret = NULL; 2038a4bd5210SJason Evans return (ret); 2039a4bd5210SJason Evans } 2040a4bd5210SJason Evans #endif 2041a4bd5210SJason Evans 2042a4bd5210SJason Evans #ifdef JEMALLOC_OVERRIDE_VALLOC 2043d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2044d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 2045d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) 2046a4bd5210SJason Evans je_valloc(size_t size) 2047a4bd5210SJason Evans { 2048a4bd5210SJason Evans void *ret JEMALLOC_CC_SILENCE_INIT(NULL); 2049d0e79aa3SJason Evans if (unlikely(imemalign(&ret, PAGE, size, 1) != 0)) 2050d0e79aa3SJason Evans ret = NULL; 2051a4bd5210SJason Evans return (ret); 2052a4bd5210SJason Evans } 2053a4bd5210SJason Evans #endif 2054a4bd5210SJason Evans 2055a4bd5210SJason Evans /* 2056a4bd5210SJason Evans * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has 2057a4bd5210SJason Evans * #define je_malloc malloc 2058a4bd5210SJason Evans */ 2059a4bd5210SJason Evans #define malloc_is_malloc 1 2060a4bd5210SJason Evans #define is_malloc_(a) malloc_is_ ## a 2061a4bd5210SJason Evans #define is_malloc(a) is_malloc_(a) 2062a4bd5210SJason Evans 2063d0e79aa3SJason Evans #if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK)) 2064a4bd5210SJason Evans /* 2065a4bd5210SJason Evans * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible 2066a4bd5210SJason Evans * to inconsistently reference libc's malloc(3)-compatible functions 2067a4bd5210SJason Evans * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). 2068a4bd5210SJason Evans * 2069a4bd5210SJason Evans * These definitions interpose hooks in glibc. The functions are actually 2070a4bd5210SJason Evans * passed an extra argument for the caller return address, which will be 2071a4bd5210SJason Evans * ignored. 2072a4bd5210SJason Evans */ 207382872ac0SJason Evans JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free; 207482872ac0SJason Evans JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc; 207582872ac0SJason Evans JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc; 2076d0e79aa3SJason Evans # ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK 207782872ac0SJason Evans JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = 2078e722f8f8SJason Evans je_memalign; 2079a4bd5210SJason Evans # endif 2080bde95144SJason Evans 2081bde95144SJason Evans #ifdef CPU_COUNT 2082bde95144SJason Evans /* 2083bde95144SJason Evans * To enable static linking with glibc, the libc specific malloc interface must 2084bde95144SJason Evans * be implemented also, so none of glibc's malloc.o functions are added to the 2085bde95144SJason Evans * link. 2086bde95144SJason Evans */ 2087bde95144SJason Evans #define ALIAS(je_fn) __attribute__((alias (#je_fn), used)) 2088bde95144SJason Evans /* To force macro expansion of je_ prefix before stringification. */ 2089bde95144SJason Evans #define PREALIAS(je_fn) ALIAS(je_fn) 2090bde95144SJason Evans void *__libc_malloc(size_t size) PREALIAS(je_malloc); 2091bde95144SJason Evans void __libc_free(void* ptr) PREALIAS(je_free); 2092bde95144SJason Evans void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc); 2093bde95144SJason Evans void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc); 2094bde95144SJason Evans void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign); 2095bde95144SJason Evans void *__libc_valloc(size_t size) PREALIAS(je_valloc); 2096bde95144SJason Evans int __posix_memalign(void** r, size_t a, size_t s) 2097bde95144SJason Evans PREALIAS(je_posix_memalign); 2098bde95144SJason Evans #undef PREALIAS 2099bde95144SJason Evans #undef ALIAS 2100bde95144SJason Evans 2101bde95144SJason Evans #endif 2102bde95144SJason Evans 2103d0e79aa3SJason Evans #endif 2104a4bd5210SJason Evans 2105a4bd5210SJason Evans /* 2106a4bd5210SJason Evans * End non-standard override functions. 2107a4bd5210SJason Evans */ 2108a4bd5210SJason Evans /******************************************************************************/ 2109a4bd5210SJason Evans /* 2110a4bd5210SJason Evans * Begin non-standard functions. 2111a4bd5210SJason Evans */ 2112a4bd5210SJason Evans 2113d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C bool 21141f0a49e8SJason Evans imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize, 2115d0e79aa3SJason Evans size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena) 2116a4bd5210SJason Evans { 2117f921d10fSJason Evans 2118d0e79aa3SJason Evans if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) { 2119d0e79aa3SJason Evans *alignment = 0; 2120d0e79aa3SJason Evans *usize = s2u(size); 2121d0e79aa3SJason Evans } else { 2122d0e79aa3SJason Evans *alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); 2123d0e79aa3SJason Evans *usize = sa2u(size, *alignment); 2124d0e79aa3SJason Evans } 2125df0d881dSJason Evans if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS)) 2126df0d881dSJason Evans return (true); 2127d0e79aa3SJason Evans *zero = MALLOCX_ZERO_GET(flags); 2128d0e79aa3SJason Evans if ((flags & MALLOCX_TCACHE_MASK) != 0) { 2129d0e79aa3SJason Evans if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) 2130d0e79aa3SJason Evans *tcache = NULL; 2131d0e79aa3SJason Evans else 2132d0e79aa3SJason Evans *tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2133d0e79aa3SJason Evans } else 2134d0e79aa3SJason Evans *tcache = tcache_get(tsd, true); 2135d0e79aa3SJason Evans if ((flags & MALLOCX_ARENA_MASK) != 0) { 2136d0e79aa3SJason Evans unsigned arena_ind = MALLOCX_ARENA_GET(flags); 21371f0a49e8SJason Evans *arena = arena_get(tsd_tsdn(tsd), arena_ind, true); 2138d0e79aa3SJason Evans if (unlikely(*arena == NULL)) 2139d0e79aa3SJason Evans return (true); 2140d0e79aa3SJason Evans } else 2141d0e79aa3SJason Evans *arena = NULL; 2142d0e79aa3SJason Evans return (false); 2143d0e79aa3SJason Evans } 2144d0e79aa3SJason Evans 2145d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C void * 21461f0a49e8SJason Evans imallocx_flags(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, 21471f0a49e8SJason Evans tcache_t *tcache, arena_t *arena, bool slow_path) 2148d0e79aa3SJason Evans { 2149df0d881dSJason Evans szind_t ind; 2150f921d10fSJason Evans 2151536b3538SJason Evans if (unlikely(alignment != 0)) 21521f0a49e8SJason Evans return (ipalloct(tsdn, usize, alignment, zero, tcache, arena)); 2153df0d881dSJason Evans ind = size2index(usize); 2154df0d881dSJason Evans assert(ind < NSIZES); 21551f0a49e8SJason Evans return (iallocztm(tsdn, usize, ind, zero, tcache, false, arena, 21561f0a49e8SJason Evans slow_path)); 2157d0e79aa3SJason Evans } 2158d0e79aa3SJason Evans 2159f921d10fSJason Evans static void * 21601f0a49e8SJason Evans imallocx_prof_sample(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, 21611f0a49e8SJason Evans tcache_t *tcache, arena_t *arena, bool slow_path) 2162f921d10fSJason Evans { 2163f921d10fSJason Evans void *p; 2164f921d10fSJason Evans 2165d0e79aa3SJason Evans if (usize <= SMALL_MAXCLASS) { 2166d0e79aa3SJason Evans assert(((alignment == 0) ? s2u(LARGE_MINCLASS) : 2167d0e79aa3SJason Evans sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS); 21681f0a49e8SJason Evans p = imallocx_flags(tsdn, LARGE_MINCLASS, alignment, zero, 21691f0a49e8SJason Evans tcache, arena, slow_path); 2170f921d10fSJason Evans if (p == NULL) 2171f921d10fSJason Evans return (NULL); 21721f0a49e8SJason Evans arena_prof_promoted(tsdn, p, usize); 21731f0a49e8SJason Evans } else { 21741f0a49e8SJason Evans p = imallocx_flags(tsdn, usize, alignment, zero, tcache, arena, 21751f0a49e8SJason Evans slow_path); 21761f0a49e8SJason Evans } 2177f921d10fSJason Evans 2178f921d10fSJason Evans return (p); 2179f921d10fSJason Evans } 2180f921d10fSJason Evans 2181f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C void * 21821f0a49e8SJason Evans imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, bool slow_path) 2183f921d10fSJason Evans { 2184f921d10fSJason Evans void *p; 2185d0e79aa3SJason Evans size_t alignment; 2186d0e79aa3SJason Evans bool zero; 2187d0e79aa3SJason Evans tcache_t *tcache; 2188d0e79aa3SJason Evans arena_t *arena; 2189d0e79aa3SJason Evans prof_tctx_t *tctx; 2190f921d10fSJason Evans 2191d0e79aa3SJason Evans if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment, 2192d0e79aa3SJason Evans &zero, &tcache, &arena))) 2193f921d10fSJason Evans return (NULL); 2194536b3538SJason Evans tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true); 21951f0a49e8SJason Evans if (likely((uintptr_t)tctx == (uintptr_t)1U)) { 21961f0a49e8SJason Evans p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, 21971f0a49e8SJason Evans tcache, arena, slow_path); 21981f0a49e8SJason Evans } else if ((uintptr_t)tctx > (uintptr_t)1U) { 21991f0a49e8SJason Evans p = imallocx_prof_sample(tsd_tsdn(tsd), *usize, alignment, zero, 22001f0a49e8SJason Evans tcache, arena, slow_path); 2201d0e79aa3SJason Evans } else 2202d0e79aa3SJason Evans p = NULL; 2203d0e79aa3SJason Evans if (unlikely(p == NULL)) { 2204d0e79aa3SJason Evans prof_alloc_rollback(tsd, tctx, true); 2205d0e79aa3SJason Evans return (NULL); 2206d0e79aa3SJason Evans } 22071f0a49e8SJason Evans prof_malloc(tsd_tsdn(tsd), p, *usize, tctx); 2208f921d10fSJason Evans 2209d0e79aa3SJason Evans assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); 2210f921d10fSJason Evans return (p); 2211f921d10fSJason Evans } 2212f921d10fSJason Evans 2213d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C void * 22141f0a49e8SJason Evans imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, 22151f0a49e8SJason Evans bool slow_path) 2216f921d10fSJason Evans { 2217f921d10fSJason Evans void *p; 2218d0e79aa3SJason Evans size_t alignment; 2219d0e79aa3SJason Evans bool zero; 2220d0e79aa3SJason Evans tcache_t *tcache; 2221f921d10fSJason Evans arena_t *arena; 2222d0e79aa3SJason Evans 22231f0a49e8SJason Evans if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment, 22241f0a49e8SJason Evans &zero, &tcache, &arena))) 22251f0a49e8SJason Evans return (NULL); 22261f0a49e8SJason Evans p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, tcache, 22271f0a49e8SJason Evans arena, slow_path); 22281f0a49e8SJason Evans assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); 22291f0a49e8SJason Evans return (p); 22301f0a49e8SJason Evans } 22311f0a49e8SJason Evans 22321f0a49e8SJason Evans /* This function guarantees that *tsdn is non-NULL on success. */ 22331f0a49e8SJason Evans JEMALLOC_ALWAYS_INLINE_C void * 22341f0a49e8SJason Evans imallocx_body(size_t size, int flags, tsdn_t **tsdn, size_t *usize, 22351f0a49e8SJason Evans bool slow_path) 22361f0a49e8SJason Evans { 22371f0a49e8SJason Evans tsd_t *tsd; 22381f0a49e8SJason Evans 22391f0a49e8SJason Evans if (slow_path && unlikely(malloc_init())) { 22401f0a49e8SJason Evans *tsdn = NULL; 22411f0a49e8SJason Evans return (NULL); 22421f0a49e8SJason Evans } 22431f0a49e8SJason Evans 22441f0a49e8SJason Evans tsd = tsd_fetch(); 22451f0a49e8SJason Evans *tsdn = tsd_tsdn(tsd); 22461f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 22471f0a49e8SJason Evans 2248d0e79aa3SJason Evans if (likely(flags == 0)) { 2249df0d881dSJason Evans szind_t ind = size2index(size); 2250df0d881dSJason Evans if (unlikely(ind >= NSIZES)) 2251df0d881dSJason Evans return (NULL); 22521f0a49e8SJason Evans if (config_stats || (config_prof && opt_prof) || (slow_path && 22531f0a49e8SJason Evans config_valgrind && unlikely(in_valgrind))) { 2254df0d881dSJason Evans *usize = index2size(ind); 2255df0d881dSJason Evans assert(*usize > 0 && *usize <= HUGE_MAXCLASS); 2256df0d881dSJason Evans } 22571f0a49e8SJason Evans 22581f0a49e8SJason Evans if (config_prof && opt_prof) { 22591f0a49e8SJason Evans return (ialloc_prof(tsd, *usize, ind, false, 22601f0a49e8SJason Evans slow_path)); 2261d0e79aa3SJason Evans } 2262d0e79aa3SJason Evans 22631f0a49e8SJason Evans return (ialloc(tsd, size, ind, false, slow_path)); 22641f0a49e8SJason Evans } 22651f0a49e8SJason Evans 22661f0a49e8SJason Evans if (config_prof && opt_prof) 22671f0a49e8SJason Evans return (imallocx_prof(tsd, size, flags, usize, slow_path)); 22681f0a49e8SJason Evans 22691f0a49e8SJason Evans return (imallocx_no_prof(tsd, size, flags, usize, slow_path)); 2270d0e79aa3SJason Evans } 2271d0e79aa3SJason Evans 2272d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2273d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 2274d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) 2275d0e79aa3SJason Evans je_mallocx(size_t size, int flags) 2276d0e79aa3SJason Evans { 22771f0a49e8SJason Evans tsdn_t *tsdn; 2278d0e79aa3SJason Evans void *p; 2279d0e79aa3SJason Evans size_t usize; 2280f921d10fSJason Evans 2281f921d10fSJason Evans assert(size != 0); 2282f921d10fSJason Evans 22831f0a49e8SJason Evans if (likely(!malloc_slow)) { 22841f0a49e8SJason Evans p = imallocx_body(size, flags, &tsdn, &usize, false); 22851f0a49e8SJason Evans ialloc_post_check(p, tsdn, usize, "mallocx", false, false); 22861f0a49e8SJason Evans } else { 22871f0a49e8SJason Evans p = imallocx_body(size, flags, &tsdn, &usize, true); 22881f0a49e8SJason Evans ialloc_post_check(p, tsdn, usize, "mallocx", false, true); 2289f921d10fSJason Evans UTRACE(0, size, p); 22901f0a49e8SJason Evans JEMALLOC_VALGRIND_MALLOC(p != NULL, tsdn, p, usize, 22911f0a49e8SJason Evans MALLOCX_ZERO_GET(flags)); 2292f921d10fSJason Evans } 22931f0a49e8SJason Evans 22941f0a49e8SJason Evans return (p); 2295f921d10fSJason Evans } 2296f921d10fSJason Evans 2297f921d10fSJason Evans static void * 2298536b3538SJason Evans irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, 2299536b3538SJason Evans size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, 2300d0e79aa3SJason Evans prof_tctx_t *tctx) 2301f921d10fSJason Evans { 2302f921d10fSJason Evans void *p; 2303f921d10fSJason Evans 2304d0e79aa3SJason Evans if (tctx == NULL) 2305f921d10fSJason Evans return (NULL); 2306d0e79aa3SJason Evans if (usize <= SMALL_MAXCLASS) { 2307536b3538SJason Evans p = iralloct(tsd, old_ptr, old_usize, LARGE_MINCLASS, alignment, 2308d0e79aa3SJason Evans zero, tcache, arena); 2309f921d10fSJason Evans if (p == NULL) 2310f921d10fSJason Evans return (NULL); 23111f0a49e8SJason Evans arena_prof_promoted(tsd_tsdn(tsd), p, usize); 2312f921d10fSJason Evans } else { 2313536b3538SJason Evans p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero, 2314d0e79aa3SJason Evans tcache, arena); 2315f921d10fSJason Evans } 2316f921d10fSJason Evans 2317f921d10fSJason Evans return (p); 2318f921d10fSJason Evans } 2319f921d10fSJason Evans 2320f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C void * 2321536b3538SJason Evans irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, 2322d0e79aa3SJason Evans size_t alignment, size_t *usize, bool zero, tcache_t *tcache, 2323d0e79aa3SJason Evans arena_t *arena) 2324f921d10fSJason Evans { 2325f921d10fSJason Evans void *p; 2326536b3538SJason Evans bool prof_active; 2327d0e79aa3SJason Evans prof_tctx_t *old_tctx, *tctx; 2328f921d10fSJason Evans 2329536b3538SJason Evans prof_active = prof_active_get_unlocked(); 23301f0a49e8SJason Evans old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr); 233162b2691eSJason Evans tctx = prof_alloc_prep(tsd, *usize, prof_active, false); 2332d0e79aa3SJason Evans if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { 2333536b3538SJason Evans p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize, 2334536b3538SJason Evans alignment, zero, tcache, arena, tctx); 2335d0e79aa3SJason Evans } else { 2336536b3538SJason Evans p = iralloct(tsd, old_ptr, old_usize, size, alignment, zero, 2337d0e79aa3SJason Evans tcache, arena); 2338f921d10fSJason Evans } 2339d0e79aa3SJason Evans if (unlikely(p == NULL)) { 234062b2691eSJason Evans prof_alloc_rollback(tsd, tctx, false); 2341f921d10fSJason Evans return (NULL); 2342d0e79aa3SJason Evans } 2343f921d10fSJason Evans 2344536b3538SJason Evans if (p == old_ptr && alignment != 0) { 2345f921d10fSJason Evans /* 2346f921d10fSJason Evans * The allocation did not move, so it is possible that the size 2347f921d10fSJason Evans * class is smaller than would guarantee the requested 2348f921d10fSJason Evans * alignment, and that the alignment constraint was 2349f921d10fSJason Evans * serendipitously satisfied. Additionally, old_usize may not 2350f921d10fSJason Evans * be the same as the current usize because of in-place large 2351f921d10fSJason Evans * reallocation. Therefore, query the actual value of usize. 2352f921d10fSJason Evans */ 23531f0a49e8SJason Evans *usize = isalloc(tsd_tsdn(tsd), p, config_prof); 2354f921d10fSJason Evans } 235562b2691eSJason Evans prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr, 2356536b3538SJason Evans old_usize, old_tctx); 2357f921d10fSJason Evans 2358f921d10fSJason Evans return (p); 2359f921d10fSJason Evans } 2360f921d10fSJason Evans 2361d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2362d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 2363d0e79aa3SJason Evans JEMALLOC_ALLOC_SIZE(2) 2364f921d10fSJason Evans je_rallocx(void *ptr, size_t size, int flags) 2365f921d10fSJason Evans { 2366f921d10fSJason Evans void *p; 2367d0e79aa3SJason Evans tsd_t *tsd; 2368d0e79aa3SJason Evans size_t usize; 2369d0e79aa3SJason Evans size_t old_usize; 2370f921d10fSJason Evans UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 2371d0e79aa3SJason Evans size_t alignment = MALLOCX_ALIGN_GET(flags); 2372f921d10fSJason Evans bool zero = flags & MALLOCX_ZERO; 2373f921d10fSJason Evans arena_t *arena; 2374d0e79aa3SJason Evans tcache_t *tcache; 2375f921d10fSJason Evans 2376f921d10fSJason Evans assert(ptr != NULL); 2377f921d10fSJason Evans assert(size != 0); 2378d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2379f921d10fSJason Evans malloc_thread_init(); 2380d0e79aa3SJason Evans tsd = tsd_fetch(); 23811f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2382f921d10fSJason Evans 2383d0e79aa3SJason Evans if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { 2384d0e79aa3SJason Evans unsigned arena_ind = MALLOCX_ARENA_GET(flags); 23851f0a49e8SJason Evans arena = arena_get(tsd_tsdn(tsd), arena_ind, true); 2386d0e79aa3SJason Evans if (unlikely(arena == NULL)) 2387d0e79aa3SJason Evans goto label_oom; 2388d0e79aa3SJason Evans } else 2389f921d10fSJason Evans arena = NULL; 2390f921d10fSJason Evans 2391d0e79aa3SJason Evans if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { 2392d0e79aa3SJason Evans if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) 2393d0e79aa3SJason Evans tcache = NULL; 2394d0e79aa3SJason Evans else 2395d0e79aa3SJason Evans tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2396d0e79aa3SJason Evans } else 2397d0e79aa3SJason Evans tcache = tcache_get(tsd, true); 2398d0e79aa3SJason Evans 23991f0a49e8SJason Evans old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); 2400d0e79aa3SJason Evans if (config_valgrind && unlikely(in_valgrind)) 2401f921d10fSJason Evans old_rzsize = u2rz(old_usize); 2402f921d10fSJason Evans 2403f921d10fSJason Evans if (config_prof && opt_prof) { 2404f921d10fSJason Evans usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); 2405df0d881dSJason Evans if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) 2406df0d881dSJason Evans goto label_oom; 2407d0e79aa3SJason Evans p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize, 2408d0e79aa3SJason Evans zero, tcache, arena); 2409d0e79aa3SJason Evans if (unlikely(p == NULL)) 2410f921d10fSJason Evans goto label_oom; 2411f921d10fSJason Evans } else { 2412d0e79aa3SJason Evans p = iralloct(tsd, ptr, old_usize, size, alignment, zero, 2413d0e79aa3SJason Evans tcache, arena); 2414d0e79aa3SJason Evans if (unlikely(p == NULL)) 2415f921d10fSJason Evans goto label_oom; 2416d0e79aa3SJason Evans if (config_stats || (config_valgrind && unlikely(in_valgrind))) 24171f0a49e8SJason Evans usize = isalloc(tsd_tsdn(tsd), p, config_prof); 2418f921d10fSJason Evans } 2419d0e79aa3SJason Evans assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); 2420f921d10fSJason Evans 2421f921d10fSJason Evans if (config_stats) { 2422d0e79aa3SJason Evans *tsd_thread_allocatedp_get(tsd) += usize; 2423d0e79aa3SJason Evans *tsd_thread_deallocatedp_get(tsd) += old_usize; 2424f921d10fSJason Evans } 2425f921d10fSJason Evans UTRACE(ptr, size, p); 2426*7fa7f12fSJason Evans JEMALLOC_VALGRIND_REALLOC(maybe, tsd_tsdn(tsd), p, usize, no, ptr, 2427*7fa7f12fSJason Evans old_usize, old_rzsize, no, zero); 24281f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2429f921d10fSJason Evans return (p); 2430f921d10fSJason Evans label_oom: 2431d0e79aa3SJason Evans if (config_xmalloc && unlikely(opt_xmalloc)) { 2432f921d10fSJason Evans malloc_write("<jemalloc>: Error in rallocx(): out of memory\n"); 2433f921d10fSJason Evans abort(); 2434f921d10fSJason Evans } 2435f921d10fSJason Evans UTRACE(ptr, size, 0); 24361f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2437f921d10fSJason Evans return (NULL); 2438f921d10fSJason Evans } 2439f921d10fSJason Evans 2440f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C size_t 24411f0a49e8SJason Evans ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, 2442df0d881dSJason Evans size_t extra, size_t alignment, bool zero) 2443f921d10fSJason Evans { 2444f921d10fSJason Evans size_t usize; 2445f921d10fSJason Evans 24461f0a49e8SJason Evans if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) 2447f921d10fSJason Evans return (old_usize); 24481f0a49e8SJason Evans usize = isalloc(tsdn, ptr, config_prof); 2449f921d10fSJason Evans 2450f921d10fSJason Evans return (usize); 2451f921d10fSJason Evans } 2452f921d10fSJason Evans 2453f921d10fSJason Evans static size_t 24541f0a49e8SJason Evans ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, 2455df0d881dSJason Evans size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) 2456f921d10fSJason Evans { 2457f921d10fSJason Evans size_t usize; 2458f921d10fSJason Evans 2459d0e79aa3SJason Evans if (tctx == NULL) 2460f921d10fSJason Evans return (old_usize); 24611f0a49e8SJason Evans usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment, 2462df0d881dSJason Evans zero); 2463f921d10fSJason Evans 2464f921d10fSJason Evans return (usize); 2465f921d10fSJason Evans } 2466f921d10fSJason Evans 2467f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C size_t 2468d0e79aa3SJason Evans ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, 2469d0e79aa3SJason Evans size_t extra, size_t alignment, bool zero) 2470f921d10fSJason Evans { 2471536b3538SJason Evans size_t usize_max, usize; 2472536b3538SJason Evans bool prof_active; 2473d0e79aa3SJason Evans prof_tctx_t *old_tctx, *tctx; 2474f921d10fSJason Evans 2475536b3538SJason Evans prof_active = prof_active_get_unlocked(); 24761f0a49e8SJason Evans old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr); 2477d0e79aa3SJason Evans /* 2478d0e79aa3SJason Evans * usize isn't knowable before ixalloc() returns when extra is non-zero. 2479d0e79aa3SJason Evans * Therefore, compute its maximum possible value and use that in 2480d0e79aa3SJason Evans * prof_alloc_prep() to decide whether to capture a backtrace. 2481d0e79aa3SJason Evans * prof_realloc() will use the actual usize to decide whether to sample. 2482d0e79aa3SJason Evans */ 2483df0d881dSJason Evans if (alignment == 0) { 2484df0d881dSJason Evans usize_max = s2u(size+extra); 2485df0d881dSJason Evans assert(usize_max > 0 && usize_max <= HUGE_MAXCLASS); 2486df0d881dSJason Evans } else { 2487df0d881dSJason Evans usize_max = sa2u(size+extra, alignment); 2488df0d881dSJason Evans if (unlikely(usize_max == 0 || usize_max > HUGE_MAXCLASS)) { 2489df0d881dSJason Evans /* 2490df0d881dSJason Evans * usize_max is out of range, and chances are that 2491df0d881dSJason Evans * allocation will fail, but use the maximum possible 2492df0d881dSJason Evans * value and carry on with prof_alloc_prep(), just in 2493df0d881dSJason Evans * case allocation succeeds. 2494df0d881dSJason Evans */ 2495df0d881dSJason Evans usize_max = HUGE_MAXCLASS; 2496df0d881dSJason Evans } 2497df0d881dSJason Evans } 2498536b3538SJason Evans tctx = prof_alloc_prep(tsd, usize_max, prof_active, false); 2499df0d881dSJason Evans 2500d0e79aa3SJason Evans if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { 25011f0a49e8SJason Evans usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize, 25021f0a49e8SJason Evans size, extra, alignment, zero, tctx); 2503f921d10fSJason Evans } else { 25041f0a49e8SJason Evans usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, 25051f0a49e8SJason Evans extra, alignment, zero); 2506f921d10fSJason Evans } 2507536b3538SJason Evans if (usize == old_usize) { 2508d0e79aa3SJason Evans prof_alloc_rollback(tsd, tctx, false); 2509f921d10fSJason Evans return (usize); 2510d0e79aa3SJason Evans } 2511536b3538SJason Evans prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize, 2512536b3538SJason Evans old_tctx); 2513f921d10fSJason Evans 2514f921d10fSJason Evans return (usize); 2515f921d10fSJason Evans } 2516f921d10fSJason Evans 2517d0e79aa3SJason Evans JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2518f921d10fSJason Evans je_xallocx(void *ptr, size_t size, size_t extra, int flags) 2519f921d10fSJason Evans { 2520d0e79aa3SJason Evans tsd_t *tsd; 2521f921d10fSJason Evans size_t usize, old_usize; 2522f921d10fSJason Evans UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 2523d0e79aa3SJason Evans size_t alignment = MALLOCX_ALIGN_GET(flags); 2524f921d10fSJason Evans bool zero = flags & MALLOCX_ZERO; 2525f921d10fSJason Evans 2526f921d10fSJason Evans assert(ptr != NULL); 2527f921d10fSJason Evans assert(size != 0); 2528f921d10fSJason Evans assert(SIZE_T_MAX - size >= extra); 2529d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2530f921d10fSJason Evans malloc_thread_init(); 2531d0e79aa3SJason Evans tsd = tsd_fetch(); 25321f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2533f921d10fSJason Evans 25341f0a49e8SJason Evans old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); 2535536b3538SJason Evans 2536df0d881dSJason Evans /* 2537df0d881dSJason Evans * The API explicitly absolves itself of protecting against (size + 2538df0d881dSJason Evans * extra) numerical overflow, but we may need to clamp extra to avoid 2539df0d881dSJason Evans * exceeding HUGE_MAXCLASS. 2540df0d881dSJason Evans * 2541df0d881dSJason Evans * Ordinarily, size limit checking is handled deeper down, but here we 2542df0d881dSJason Evans * have to check as part of (size + extra) clamping, since we need the 2543df0d881dSJason Evans * clamped value in the above helper functions. 2544df0d881dSJason Evans */ 2545536b3538SJason Evans if (unlikely(size > HUGE_MAXCLASS)) { 2546536b3538SJason Evans usize = old_usize; 2547536b3538SJason Evans goto label_not_resized; 2548536b3538SJason Evans } 2549df0d881dSJason Evans if (unlikely(HUGE_MAXCLASS - size < extra)) 2550536b3538SJason Evans extra = HUGE_MAXCLASS - size; 2551536b3538SJason Evans 2552d0e79aa3SJason Evans if (config_valgrind && unlikely(in_valgrind)) 2553f921d10fSJason Evans old_rzsize = u2rz(old_usize); 2554f921d10fSJason Evans 2555f921d10fSJason Evans if (config_prof && opt_prof) { 2556d0e79aa3SJason Evans usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, 2557d0e79aa3SJason Evans alignment, zero); 2558f921d10fSJason Evans } else { 25591f0a49e8SJason Evans usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, 25601f0a49e8SJason Evans extra, alignment, zero); 2561f921d10fSJason Evans } 2562d0e79aa3SJason Evans if (unlikely(usize == old_usize)) 2563f921d10fSJason Evans goto label_not_resized; 2564f921d10fSJason Evans 2565f921d10fSJason Evans if (config_stats) { 2566d0e79aa3SJason Evans *tsd_thread_allocatedp_get(tsd) += usize; 2567d0e79aa3SJason Evans *tsd_thread_deallocatedp_get(tsd) += old_usize; 2568f921d10fSJason Evans } 2569*7fa7f12fSJason Evans JEMALLOC_VALGRIND_REALLOC(no, tsd_tsdn(tsd), ptr, usize, no, ptr, 2570*7fa7f12fSJason Evans old_usize, old_rzsize, no, zero); 2571f921d10fSJason Evans label_not_resized: 2572f921d10fSJason Evans UTRACE(ptr, size, ptr); 25731f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2574f921d10fSJason Evans return (usize); 2575f921d10fSJason Evans } 2576f921d10fSJason Evans 2577d0e79aa3SJason Evans JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2578d0e79aa3SJason Evans JEMALLOC_ATTR(pure) 2579f921d10fSJason Evans je_sallocx(const void *ptr, int flags) 2580f921d10fSJason Evans { 2581f921d10fSJason Evans size_t usize; 25821f0a49e8SJason Evans tsdn_t *tsdn; 2583a4bd5210SJason Evans 2584d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2585f8ca2db1SJason Evans malloc_thread_init(); 2586a4bd5210SJason Evans 25871f0a49e8SJason Evans tsdn = tsdn_fetch(); 25881f0a49e8SJason Evans witness_assert_lockless(tsdn); 2589a4bd5210SJason Evans 25901f0a49e8SJason Evans if (config_ivsalloc) 25911f0a49e8SJason Evans usize = ivsalloc(tsdn, ptr, config_prof); 25921f0a49e8SJason Evans else 25931f0a49e8SJason Evans usize = isalloc(tsdn, ptr, config_prof); 25941f0a49e8SJason Evans 25951f0a49e8SJason Evans witness_assert_lockless(tsdn); 2596f921d10fSJason Evans return (usize); 2597a4bd5210SJason Evans } 2598a4bd5210SJason Evans 2599d0e79aa3SJason Evans JEMALLOC_EXPORT void JEMALLOC_NOTHROW 2600f921d10fSJason Evans je_dallocx(void *ptr, int flags) 2601a4bd5210SJason Evans { 2602d0e79aa3SJason Evans tsd_t *tsd; 2603d0e79aa3SJason Evans tcache_t *tcache; 2604a4bd5210SJason Evans 2605f921d10fSJason Evans assert(ptr != NULL); 2606d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2607f921d10fSJason Evans 2608d0e79aa3SJason Evans tsd = tsd_fetch(); 26091f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2610d0e79aa3SJason Evans if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { 2611d0e79aa3SJason Evans if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) 2612d0e79aa3SJason Evans tcache = NULL; 2613d0e79aa3SJason Evans else 2614d0e79aa3SJason Evans tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2615f921d10fSJason Evans } else 2616d0e79aa3SJason Evans tcache = tcache_get(tsd, false); 2617f921d10fSJason Evans 2618f921d10fSJason Evans UTRACE(ptr, 0, 0); 26191f0a49e8SJason Evans if (likely(!malloc_slow)) 26201f0a49e8SJason Evans ifree(tsd, ptr, tcache, false); 26211f0a49e8SJason Evans else 26221f0a49e8SJason Evans ifree(tsd, ptr, tcache, true); 26231f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2624f921d10fSJason Evans } 2625f921d10fSJason Evans 2626d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C size_t 26271f0a49e8SJason Evans inallocx(tsdn_t *tsdn, size_t size, int flags) 2628f921d10fSJason Evans { 2629f921d10fSJason Evans size_t usize; 2630f921d10fSJason Evans 26311f0a49e8SJason Evans witness_assert_lockless(tsdn); 26321f0a49e8SJason Evans 2633d0e79aa3SJason Evans if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) 2634d0e79aa3SJason Evans usize = s2u(size); 2635d0e79aa3SJason Evans else 2636d0e79aa3SJason Evans usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); 26371f0a49e8SJason Evans witness_assert_lockless(tsdn); 2638f921d10fSJason Evans return (usize); 2639a4bd5210SJason Evans } 2640a4bd5210SJason Evans 2641d0e79aa3SJason Evans JEMALLOC_EXPORT void JEMALLOC_NOTHROW 2642d0e79aa3SJason Evans je_sdallocx(void *ptr, size_t size, int flags) 2643d0e79aa3SJason Evans { 2644d0e79aa3SJason Evans tsd_t *tsd; 2645d0e79aa3SJason Evans tcache_t *tcache; 2646d0e79aa3SJason Evans size_t usize; 2647d0e79aa3SJason Evans 2648d0e79aa3SJason Evans assert(ptr != NULL); 2649d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2650d0e79aa3SJason Evans tsd = tsd_fetch(); 26511f0a49e8SJason Evans usize = inallocx(tsd_tsdn(tsd), size, flags); 26521f0a49e8SJason Evans assert(usize == isalloc(tsd_tsdn(tsd), ptr, config_prof)); 26531f0a49e8SJason Evans 26541f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2655d0e79aa3SJason Evans if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { 2656d0e79aa3SJason Evans if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) 2657d0e79aa3SJason Evans tcache = NULL; 2658d0e79aa3SJason Evans else 2659d0e79aa3SJason Evans tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2660d0e79aa3SJason Evans } else 2661d0e79aa3SJason Evans tcache = tcache_get(tsd, false); 2662d0e79aa3SJason Evans 2663d0e79aa3SJason Evans UTRACE(ptr, 0, 0); 26641f0a49e8SJason Evans if (likely(!malloc_slow)) 26651f0a49e8SJason Evans isfree(tsd, ptr, usize, tcache, false); 26661f0a49e8SJason Evans else 26671f0a49e8SJason Evans isfree(tsd, ptr, usize, tcache, true); 26681f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2669d0e79aa3SJason Evans } 2670d0e79aa3SJason Evans 2671d0e79aa3SJason Evans JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2672d0e79aa3SJason Evans JEMALLOC_ATTR(pure) 2673d0e79aa3SJason Evans je_nallocx(size_t size, int flags) 2674d0e79aa3SJason Evans { 2675df0d881dSJason Evans size_t usize; 26761f0a49e8SJason Evans tsdn_t *tsdn; 2677d0e79aa3SJason Evans 2678d0e79aa3SJason Evans assert(size != 0); 2679d0e79aa3SJason Evans 2680d0e79aa3SJason Evans if (unlikely(malloc_init())) 2681d0e79aa3SJason Evans return (0); 2682d0e79aa3SJason Evans 26831f0a49e8SJason Evans tsdn = tsdn_fetch(); 26841f0a49e8SJason Evans witness_assert_lockless(tsdn); 26851f0a49e8SJason Evans 26861f0a49e8SJason Evans usize = inallocx(tsdn, size, flags); 2687df0d881dSJason Evans if (unlikely(usize > HUGE_MAXCLASS)) 2688df0d881dSJason Evans return (0); 2689df0d881dSJason Evans 26901f0a49e8SJason Evans witness_assert_lockless(tsdn); 2691df0d881dSJason Evans return (usize); 2692d0e79aa3SJason Evans } 2693d0e79aa3SJason Evans 2694d0e79aa3SJason Evans JEMALLOC_EXPORT int JEMALLOC_NOTHROW 2695a4bd5210SJason Evans je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, 2696a4bd5210SJason Evans size_t newlen) 2697a4bd5210SJason Evans { 26981f0a49e8SJason Evans int ret; 26991f0a49e8SJason Evans tsd_t *tsd; 2700a4bd5210SJason Evans 2701d0e79aa3SJason Evans if (unlikely(malloc_init())) 2702a4bd5210SJason Evans return (EAGAIN); 2703a4bd5210SJason Evans 27041f0a49e8SJason Evans tsd = tsd_fetch(); 27051f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 27061f0a49e8SJason Evans ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen); 27071f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 27081f0a49e8SJason Evans return (ret); 2709a4bd5210SJason Evans } 2710a4bd5210SJason Evans 2711d0e79aa3SJason Evans JEMALLOC_EXPORT int JEMALLOC_NOTHROW 2712a4bd5210SJason Evans je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) 2713a4bd5210SJason Evans { 27141f0a49e8SJason Evans int ret; 27151f0a49e8SJason Evans tsdn_t *tsdn; 2716a4bd5210SJason Evans 2717d0e79aa3SJason Evans if (unlikely(malloc_init())) 2718a4bd5210SJason Evans return (EAGAIN); 2719a4bd5210SJason Evans 27201f0a49e8SJason Evans tsdn = tsdn_fetch(); 27211f0a49e8SJason Evans witness_assert_lockless(tsdn); 27221f0a49e8SJason Evans ret = ctl_nametomib(tsdn, name, mibp, miblenp); 27231f0a49e8SJason Evans witness_assert_lockless(tsdn); 27241f0a49e8SJason Evans return (ret); 2725a4bd5210SJason Evans } 2726a4bd5210SJason Evans 2727d0e79aa3SJason Evans JEMALLOC_EXPORT int JEMALLOC_NOTHROW 2728a4bd5210SJason Evans je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, 2729a4bd5210SJason Evans void *newp, size_t newlen) 2730a4bd5210SJason Evans { 27311f0a49e8SJason Evans int ret; 27321f0a49e8SJason Evans tsd_t *tsd; 2733a4bd5210SJason Evans 2734d0e79aa3SJason Evans if (unlikely(malloc_init())) 2735a4bd5210SJason Evans return (EAGAIN); 2736a4bd5210SJason Evans 27371f0a49e8SJason Evans tsd = tsd_fetch(); 27381f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 27391f0a49e8SJason Evans ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen); 27401f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 27411f0a49e8SJason Evans return (ret); 2742a4bd5210SJason Evans } 2743a4bd5210SJason Evans 2744d0e79aa3SJason Evans JEMALLOC_EXPORT void JEMALLOC_NOTHROW 2745f921d10fSJason Evans je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, 2746f921d10fSJason Evans const char *opts) 2747f921d10fSJason Evans { 27481f0a49e8SJason Evans tsdn_t *tsdn; 2749f921d10fSJason Evans 27501f0a49e8SJason Evans tsdn = tsdn_fetch(); 27511f0a49e8SJason Evans witness_assert_lockless(tsdn); 2752f921d10fSJason Evans stats_print(write_cb, cbopaque, opts); 27531f0a49e8SJason Evans witness_assert_lockless(tsdn); 2754f921d10fSJason Evans } 2755f921d10fSJason Evans 2756d0e79aa3SJason Evans JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2757f921d10fSJason Evans je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) 2758f921d10fSJason Evans { 2759f921d10fSJason Evans size_t ret; 27601f0a49e8SJason Evans tsdn_t *tsdn; 2761f921d10fSJason Evans 2762d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2763f921d10fSJason Evans malloc_thread_init(); 2764f921d10fSJason Evans 27651f0a49e8SJason Evans tsdn = tsdn_fetch(); 27661f0a49e8SJason Evans witness_assert_lockless(tsdn); 2767f921d10fSJason Evans 27681f0a49e8SJason Evans if (config_ivsalloc) 27691f0a49e8SJason Evans ret = ivsalloc(tsdn, ptr, config_prof); 27701f0a49e8SJason Evans else 27711f0a49e8SJason Evans ret = (ptr == NULL) ? 0 : isalloc(tsdn, ptr, config_prof); 27721f0a49e8SJason Evans 27731f0a49e8SJason Evans witness_assert_lockless(tsdn); 2774f921d10fSJason Evans return (ret); 2775f921d10fSJason Evans } 2776f921d10fSJason Evans 2777a4bd5210SJason Evans /* 2778a4bd5210SJason Evans * End non-standard functions. 2779a4bd5210SJason Evans */ 2780a4bd5210SJason Evans /******************************************************************************/ 2781a4bd5210SJason Evans /* 2782d0e79aa3SJason Evans * Begin compatibility functions. 2783a4bd5210SJason Evans */ 2784d0e79aa3SJason Evans 2785d0e79aa3SJason Evans #define ALLOCM_LG_ALIGN(la) (la) 2786d0e79aa3SJason Evans #define ALLOCM_ALIGN(a) (ffsl(a)-1) 2787d0e79aa3SJason Evans #define ALLOCM_ZERO ((int)0x40) 2788d0e79aa3SJason Evans #define ALLOCM_NO_MOVE ((int)0x80) 2789d0e79aa3SJason Evans 2790d0e79aa3SJason Evans #define ALLOCM_SUCCESS 0 2791d0e79aa3SJason Evans #define ALLOCM_ERR_OOM 1 2792d0e79aa3SJason Evans #define ALLOCM_ERR_NOT_MOVED 2 2793a4bd5210SJason Evans 2794a4bd5210SJason Evans int 2795a4bd5210SJason Evans je_allocm(void **ptr, size_t *rsize, size_t size, int flags) 2796a4bd5210SJason Evans { 2797a4bd5210SJason Evans void *p; 2798a4bd5210SJason Evans 2799a4bd5210SJason Evans assert(ptr != NULL); 2800a4bd5210SJason Evans 2801f921d10fSJason Evans p = je_mallocx(size, flags); 2802a4bd5210SJason Evans if (p == NULL) 2803a4bd5210SJason Evans return (ALLOCM_ERR_OOM); 2804f921d10fSJason Evans if (rsize != NULL) 28051f0a49e8SJason Evans *rsize = isalloc(tsdn_fetch(), p, config_prof); 2806f921d10fSJason Evans *ptr = p; 2807f921d10fSJason Evans return (ALLOCM_SUCCESS); 2808a4bd5210SJason Evans } 2809a4bd5210SJason Evans 2810a4bd5210SJason Evans int 2811a4bd5210SJason Evans je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) 2812a4bd5210SJason Evans { 2813f921d10fSJason Evans int ret; 2814a4bd5210SJason Evans bool no_move = flags & ALLOCM_NO_MOVE; 2815a4bd5210SJason Evans 2816a4bd5210SJason Evans assert(ptr != NULL); 2817a4bd5210SJason Evans assert(*ptr != NULL); 2818a4bd5210SJason Evans assert(size != 0); 2819a4bd5210SJason Evans assert(SIZE_T_MAX - size >= extra); 2820a4bd5210SJason Evans 2821f921d10fSJason Evans if (no_move) { 2822f921d10fSJason Evans size_t usize = je_xallocx(*ptr, size, extra, flags); 2823f921d10fSJason Evans ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED; 2824a4bd5210SJason Evans if (rsize != NULL) 2825a4bd5210SJason Evans *rsize = usize; 2826a4bd5210SJason Evans } else { 2827f921d10fSJason Evans void *p = je_rallocx(*ptr, size+extra, flags); 2828f921d10fSJason Evans if (p != NULL) { 2829f921d10fSJason Evans *ptr = p; 2830f921d10fSJason Evans ret = ALLOCM_SUCCESS; 2831f921d10fSJason Evans } else 2832f921d10fSJason Evans ret = ALLOCM_ERR_OOM; 2833f921d10fSJason Evans if (rsize != NULL) 28341f0a49e8SJason Evans *rsize = isalloc(tsdn_fetch(), *ptr, config_prof); 2835a4bd5210SJason Evans } 2836f921d10fSJason Evans return (ret); 2837a4bd5210SJason Evans } 2838a4bd5210SJason Evans 2839a4bd5210SJason Evans int 2840a4bd5210SJason Evans je_sallocm(const void *ptr, size_t *rsize, int flags) 2841a4bd5210SJason Evans { 2842a4bd5210SJason Evans 2843a4bd5210SJason Evans assert(rsize != NULL); 2844f921d10fSJason Evans *rsize = je_sallocx(ptr, flags); 2845a4bd5210SJason Evans return (ALLOCM_SUCCESS); 2846a4bd5210SJason Evans } 2847a4bd5210SJason Evans 2848a4bd5210SJason Evans int 2849a4bd5210SJason Evans je_dallocm(void *ptr, int flags) 2850a4bd5210SJason Evans { 2851a4bd5210SJason Evans 2852f921d10fSJason Evans je_dallocx(ptr, flags); 2853a4bd5210SJason Evans return (ALLOCM_SUCCESS); 2854a4bd5210SJason Evans } 2855a4bd5210SJason Evans 2856a4bd5210SJason Evans int 2857a4bd5210SJason Evans je_nallocm(size_t *rsize, size_t size, int flags) 2858a4bd5210SJason Evans { 2859a4bd5210SJason Evans size_t usize; 2860a4bd5210SJason Evans 2861f921d10fSJason Evans usize = je_nallocx(size, flags); 2862a4bd5210SJason Evans if (usize == 0) 2863a4bd5210SJason Evans return (ALLOCM_ERR_OOM); 2864a4bd5210SJason Evans if (rsize != NULL) 2865a4bd5210SJason Evans *rsize = usize; 2866a4bd5210SJason Evans return (ALLOCM_SUCCESS); 2867a4bd5210SJason Evans } 2868a4bd5210SJason Evans 2869d0e79aa3SJason Evans #undef ALLOCM_LG_ALIGN 2870d0e79aa3SJason Evans #undef ALLOCM_ALIGN 2871d0e79aa3SJason Evans #undef ALLOCM_ZERO 2872d0e79aa3SJason Evans #undef ALLOCM_NO_MOVE 2873d0e79aa3SJason Evans 2874d0e79aa3SJason Evans #undef ALLOCM_SUCCESS 2875d0e79aa3SJason Evans #undef ALLOCM_ERR_OOM 2876d0e79aa3SJason Evans #undef ALLOCM_ERR_NOT_MOVED 2877d0e79aa3SJason Evans 2878a4bd5210SJason Evans /* 2879d0e79aa3SJason Evans * End compatibility functions. 2880a4bd5210SJason Evans */ 2881a4bd5210SJason Evans /******************************************************************************/ 2882a4bd5210SJason Evans /* 2883a4bd5210SJason Evans * The following functions are used by threading libraries for protection of 2884a4bd5210SJason Evans * malloc during fork(). 2885a4bd5210SJason Evans */ 2886a4bd5210SJason Evans 288782872ac0SJason Evans /* 288882872ac0SJason Evans * If an application creates a thread before doing any allocation in the main 288982872ac0SJason Evans * thread, then calls fork(2) in the main thread followed by memory allocation 289082872ac0SJason Evans * in the child process, a race can occur that results in deadlock within the 289182872ac0SJason Evans * child: the main thread may have forked while the created thread had 289282872ac0SJason Evans * partially initialized the allocator. Ordinarily jemalloc prevents 289382872ac0SJason Evans * fork/malloc races via the following functions it registers during 289482872ac0SJason Evans * initialization using pthread_atfork(), but of course that does no good if 289582872ac0SJason Evans * the allocator isn't fully initialized at fork time. The following library 2896d0e79aa3SJason Evans * constructor is a partial solution to this problem. It may still be possible 2897d0e79aa3SJason Evans * to trigger the deadlock described above, but doing so would involve forking 2898d0e79aa3SJason Evans * via a library constructor that runs before jemalloc's runs. 289982872ac0SJason Evans */ 29001f0a49e8SJason Evans #ifndef JEMALLOC_JET 290182872ac0SJason Evans JEMALLOC_ATTR(constructor) 290282872ac0SJason Evans static void 290382872ac0SJason Evans jemalloc_constructor(void) 290482872ac0SJason Evans { 290582872ac0SJason Evans 290682872ac0SJason Evans malloc_init(); 290782872ac0SJason Evans } 29081f0a49e8SJason Evans #endif 290982872ac0SJason Evans 2910a4bd5210SJason Evans #ifndef JEMALLOC_MUTEX_INIT_CB 2911a4bd5210SJason Evans void 2912a4bd5210SJason Evans jemalloc_prefork(void) 2913a4bd5210SJason Evans #else 2914e722f8f8SJason Evans JEMALLOC_EXPORT void 2915a4bd5210SJason Evans _malloc_prefork(void) 2916a4bd5210SJason Evans #endif 2917a4bd5210SJason Evans { 29181f0a49e8SJason Evans tsd_t *tsd; 29191f0a49e8SJason Evans unsigned i, j, narenas; 29201f0a49e8SJason Evans arena_t *arena; 2921a4bd5210SJason Evans 292235dad073SJason Evans #ifdef JEMALLOC_MUTEX_INIT_CB 2923d0e79aa3SJason Evans if (!malloc_initialized()) 292435dad073SJason Evans return; 292535dad073SJason Evans #endif 2926d0e79aa3SJason Evans assert(malloc_initialized()); 292735dad073SJason Evans 29281f0a49e8SJason Evans tsd = tsd_fetch(); 2929df0d881dSJason Evans 29301f0a49e8SJason Evans narenas = narenas_total_get(); 29311f0a49e8SJason Evans 29321f0a49e8SJason Evans witness_prefork(tsd); 29331f0a49e8SJason Evans /* Acquire all mutexes in a safe order. */ 29341f0a49e8SJason Evans ctl_prefork(tsd_tsdn(tsd)); 29351f0a49e8SJason Evans malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock); 29361f0a49e8SJason Evans prof_prefork0(tsd_tsdn(tsd)); 29371f0a49e8SJason Evans for (i = 0; i < 3; i++) { 29381f0a49e8SJason Evans for (j = 0; j < narenas; j++) { 29391f0a49e8SJason Evans if ((arena = arena_get(tsd_tsdn(tsd), j, false)) != 29401f0a49e8SJason Evans NULL) { 29411f0a49e8SJason Evans switch (i) { 29421f0a49e8SJason Evans case 0: 29431f0a49e8SJason Evans arena_prefork0(tsd_tsdn(tsd), arena); 29441f0a49e8SJason Evans break; 29451f0a49e8SJason Evans case 1: 29461f0a49e8SJason Evans arena_prefork1(tsd_tsdn(tsd), arena); 29471f0a49e8SJason Evans break; 29481f0a49e8SJason Evans case 2: 29491f0a49e8SJason Evans arena_prefork2(tsd_tsdn(tsd), arena); 29501f0a49e8SJason Evans break; 29511f0a49e8SJason Evans default: not_reached(); 2952a4bd5210SJason Evans } 29531f0a49e8SJason Evans } 29541f0a49e8SJason Evans } 29551f0a49e8SJason Evans } 29561f0a49e8SJason Evans base_prefork(tsd_tsdn(tsd)); 29571f0a49e8SJason Evans for (i = 0; i < narenas; i++) { 29581f0a49e8SJason Evans if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) 29591f0a49e8SJason Evans arena_prefork3(tsd_tsdn(tsd), arena); 29601f0a49e8SJason Evans } 29611f0a49e8SJason Evans prof_prefork1(tsd_tsdn(tsd)); 2962a4bd5210SJason Evans } 2963a4bd5210SJason Evans 2964a4bd5210SJason Evans #ifndef JEMALLOC_MUTEX_INIT_CB 2965a4bd5210SJason Evans void 2966a4bd5210SJason Evans jemalloc_postfork_parent(void) 2967a4bd5210SJason Evans #else 2968e722f8f8SJason Evans JEMALLOC_EXPORT void 2969a4bd5210SJason Evans _malloc_postfork(void) 2970a4bd5210SJason Evans #endif 2971a4bd5210SJason Evans { 29721f0a49e8SJason Evans tsd_t *tsd; 2973df0d881dSJason Evans unsigned i, narenas; 2974a4bd5210SJason Evans 297535dad073SJason Evans #ifdef JEMALLOC_MUTEX_INIT_CB 2976d0e79aa3SJason Evans if (!malloc_initialized()) 297735dad073SJason Evans return; 297835dad073SJason Evans #endif 2979d0e79aa3SJason Evans assert(malloc_initialized()); 298035dad073SJason Evans 29811f0a49e8SJason Evans tsd = tsd_fetch(); 29821f0a49e8SJason Evans 29831f0a49e8SJason Evans witness_postfork_parent(tsd); 2984a4bd5210SJason Evans /* Release all mutexes, now that fork() has completed. */ 29851f0a49e8SJason Evans base_postfork_parent(tsd_tsdn(tsd)); 2986df0d881dSJason Evans for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 2987df0d881dSJason Evans arena_t *arena; 2988df0d881dSJason Evans 29891f0a49e8SJason Evans if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) 29901f0a49e8SJason Evans arena_postfork_parent(tsd_tsdn(tsd), arena); 2991a4bd5210SJason Evans } 29921f0a49e8SJason Evans prof_postfork_parent(tsd_tsdn(tsd)); 29931f0a49e8SJason Evans malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock); 29941f0a49e8SJason Evans ctl_postfork_parent(tsd_tsdn(tsd)); 2995a4bd5210SJason Evans } 2996a4bd5210SJason Evans 2997a4bd5210SJason Evans void 2998a4bd5210SJason Evans jemalloc_postfork_child(void) 2999a4bd5210SJason Evans { 30001f0a49e8SJason Evans tsd_t *tsd; 3001df0d881dSJason Evans unsigned i, narenas; 3002a4bd5210SJason Evans 3003d0e79aa3SJason Evans assert(malloc_initialized()); 300435dad073SJason Evans 30051f0a49e8SJason Evans tsd = tsd_fetch(); 30061f0a49e8SJason Evans 30071f0a49e8SJason Evans witness_postfork_child(tsd); 3008a4bd5210SJason Evans /* Release all mutexes, now that fork() has completed. */ 30091f0a49e8SJason Evans base_postfork_child(tsd_tsdn(tsd)); 3010df0d881dSJason Evans for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 3011df0d881dSJason Evans arena_t *arena; 3012df0d881dSJason Evans 30131f0a49e8SJason Evans if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) 30141f0a49e8SJason Evans arena_postfork_child(tsd_tsdn(tsd), arena); 3015a4bd5210SJason Evans } 30161f0a49e8SJason Evans prof_postfork_child(tsd_tsdn(tsd)); 30171f0a49e8SJason Evans malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock); 30181f0a49e8SJason Evans ctl_postfork_child(tsd_tsdn(tsd)); 3019a4bd5210SJason Evans } 3020a4bd5210SJason Evans 30218495e8b1SKonstantin Belousov void 30228495e8b1SKonstantin Belousov _malloc_first_thread(void) 30238495e8b1SKonstantin Belousov { 30248495e8b1SKonstantin Belousov 30258495e8b1SKonstantin Belousov (void)malloc_mutex_first_thread(); 30268495e8b1SKonstantin Belousov } 30278495e8b1SKonstantin Belousov 3028a4bd5210SJason Evans /******************************************************************************/ 3029