1a4bd5210SJason Evans #define JEMALLOC_C_ 2a4bd5210SJason Evans #include "jemalloc/internal/jemalloc_internal.h" 3a4bd5210SJason Evans 4a4bd5210SJason Evans /******************************************************************************/ 5a4bd5210SJason Evans /* Data. */ 6a4bd5210SJason Evans 74fdb8d2aSDimitry Andric /* Work around <http://llvm.org/bugs/show_bug.cgi?id=12623>: */ 84fdb8d2aSDimitry Andric const char *__malloc_options_1_0 = NULL; 9a4bd5210SJason Evans __sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0); 10a4bd5210SJason Evans 11a4bd5210SJason Evans /* Runtime configuration options. */ 12bde95144SJason Evans const char *je_malloc_conf 13bde95144SJason Evans #ifndef _WIN32 14bde95144SJason Evans JEMALLOC_ATTR(weak) 15bde95144SJason Evans #endif 16bde95144SJason Evans ; 1788ad2f8dSJason Evans bool opt_abort = 18a4bd5210SJason Evans #ifdef JEMALLOC_DEBUG 1988ad2f8dSJason Evans true 20a4bd5210SJason Evans #else 2188ad2f8dSJason Evans false 22a4bd5210SJason Evans #endif 2388ad2f8dSJason Evans ; 24d0e79aa3SJason Evans const char *opt_junk = 25d0e79aa3SJason Evans #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 26d0e79aa3SJason Evans "true" 27d0e79aa3SJason Evans #else 28d0e79aa3SJason Evans "false" 29d0e79aa3SJason Evans #endif 30d0e79aa3SJason Evans ; 31d0e79aa3SJason Evans bool opt_junk_alloc = 3288ad2f8dSJason Evans #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 3388ad2f8dSJason Evans true 34a4bd5210SJason Evans #else 3588ad2f8dSJason Evans false 36a4bd5210SJason Evans #endif 3788ad2f8dSJason Evans ; 38d0e79aa3SJason Evans bool opt_junk_free = 39d0e79aa3SJason Evans #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 40d0e79aa3SJason Evans true 41d0e79aa3SJason Evans #else 42d0e79aa3SJason Evans false 43d0e79aa3SJason Evans #endif 44d0e79aa3SJason Evans ; 45d0e79aa3SJason Evans 46a4bd5210SJason Evans size_t opt_quarantine = ZU(0); 47a4bd5210SJason Evans bool opt_redzone = false; 48a4bd5210SJason Evans bool opt_utrace = false; 49a4bd5210SJason Evans bool opt_xmalloc = false; 50a4bd5210SJason Evans bool opt_zero = false; 51df0d881dSJason Evans unsigned opt_narenas = 0; 52a4bd5210SJason Evans 53d0e79aa3SJason Evans /* Initialized to true if the process is running inside Valgrind. */ 54d0e79aa3SJason Evans bool in_valgrind; 55d0e79aa3SJason Evans 56a4bd5210SJason Evans unsigned ncpus; 57a4bd5210SJason Evans 58df0d881dSJason Evans /* Protects arenas initialization. */ 59d0e79aa3SJason Evans static malloc_mutex_t arenas_lock; 60d0e79aa3SJason Evans /* 61d0e79aa3SJason Evans * Arenas that are used to service external requests. Not all elements of the 62d0e79aa3SJason Evans * arenas array are necessarily used; arenas are created lazily as needed. 63d0e79aa3SJason Evans * 64d0e79aa3SJason Evans * arenas[0..narenas_auto) are used for automatic multiplexing of threads and 65d0e79aa3SJason Evans * arenas. arenas[narenas_auto..narenas_total) are only used if the application 66d0e79aa3SJason Evans * takes some action to create them and allocate from them. 67d0e79aa3SJason Evans */ 68df0d881dSJason Evans arena_t **arenas; 69df0d881dSJason Evans static unsigned narenas_total; /* Use narenas_total_*(). */ 70d0e79aa3SJason Evans static arena_t *a0; /* arenas[0]; read-only after initialization. */ 711f0a49e8SJason Evans unsigned narenas_auto; /* Read-only after initialization. */ 72a4bd5210SJason Evans 73d0e79aa3SJason Evans typedef enum { 74d0e79aa3SJason Evans malloc_init_uninitialized = 3, 75d0e79aa3SJason Evans malloc_init_a0_initialized = 2, 76d0e79aa3SJason Evans malloc_init_recursible = 1, 77d0e79aa3SJason Evans malloc_init_initialized = 0 /* Common case --> jnz. */ 78d0e79aa3SJason Evans } malloc_init_t; 79d0e79aa3SJason Evans static malloc_init_t malloc_init_state = malloc_init_uninitialized; 80d0e79aa3SJason Evans 811f0a49e8SJason Evans /* False should be the common case. Set to true to trigger initialization. */ 82df0d881dSJason Evans static bool malloc_slow = true; 83df0d881dSJason Evans 841f0a49e8SJason Evans /* When malloc_slow is true, set the corresponding bits for sanity check. */ 85df0d881dSJason Evans enum { 86df0d881dSJason Evans flag_opt_junk_alloc = (1U), 87df0d881dSJason Evans flag_opt_junk_free = (1U << 1), 88df0d881dSJason Evans flag_opt_quarantine = (1U << 2), 89df0d881dSJason Evans flag_opt_zero = (1U << 3), 90df0d881dSJason Evans flag_opt_utrace = (1U << 4), 91df0d881dSJason Evans flag_in_valgrind = (1U << 5), 92df0d881dSJason Evans flag_opt_xmalloc = (1U << 6) 93df0d881dSJason Evans }; 94df0d881dSJason Evans static uint8_t malloc_slow_flags; 95df0d881dSJason Evans 96d0e79aa3SJason Evans JEMALLOC_ALIGNED(CACHELINE) 97bde95144SJason Evans const size_t pind2sz_tab[NPSIZES] = { 98bde95144SJason Evans #define PSZ_yes(lg_grp, ndelta, lg_delta) \ 99bde95144SJason Evans (((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))), 100bde95144SJason Evans #define PSZ_no(lg_grp, ndelta, lg_delta) 101bde95144SJason Evans #define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \ 102bde95144SJason Evans PSZ_##psz(lg_grp, ndelta, lg_delta) 103bde95144SJason Evans SIZE_CLASSES 104bde95144SJason Evans #undef PSZ_yes 105bde95144SJason Evans #undef PSZ_no 106bde95144SJason Evans #undef SC 107bde95144SJason Evans }; 108bde95144SJason Evans 109bde95144SJason Evans JEMALLOC_ALIGNED(CACHELINE) 110bde95144SJason Evans const size_t index2size_tab[NSIZES] = { 111bde95144SJason Evans #define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \ 112d0e79aa3SJason Evans ((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)), 113d0e79aa3SJason Evans SIZE_CLASSES 114d0e79aa3SJason Evans #undef SC 115d0e79aa3SJason Evans }; 116d0e79aa3SJason Evans 117d0e79aa3SJason Evans JEMALLOC_ALIGNED(CACHELINE) 118d0e79aa3SJason Evans const uint8_t size2index_tab[] = { 119d0e79aa3SJason Evans #if LG_TINY_MIN == 0 120d0e79aa3SJason Evans #warning "Dangerous LG_TINY_MIN" 121d0e79aa3SJason Evans #define S2B_0(i) i, 122d0e79aa3SJason Evans #elif LG_TINY_MIN == 1 123d0e79aa3SJason Evans #warning "Dangerous LG_TINY_MIN" 124d0e79aa3SJason Evans #define S2B_1(i) i, 125d0e79aa3SJason Evans #elif LG_TINY_MIN == 2 126d0e79aa3SJason Evans #warning "Dangerous LG_TINY_MIN" 127d0e79aa3SJason Evans #define S2B_2(i) i, 128d0e79aa3SJason Evans #elif LG_TINY_MIN == 3 129d0e79aa3SJason Evans #define S2B_3(i) i, 130d0e79aa3SJason Evans #elif LG_TINY_MIN == 4 131d0e79aa3SJason Evans #define S2B_4(i) i, 132d0e79aa3SJason Evans #elif LG_TINY_MIN == 5 133d0e79aa3SJason Evans #define S2B_5(i) i, 134d0e79aa3SJason Evans #elif LG_TINY_MIN == 6 135d0e79aa3SJason Evans #define S2B_6(i) i, 136d0e79aa3SJason Evans #elif LG_TINY_MIN == 7 137d0e79aa3SJason Evans #define S2B_7(i) i, 138d0e79aa3SJason Evans #elif LG_TINY_MIN == 8 139d0e79aa3SJason Evans #define S2B_8(i) i, 140d0e79aa3SJason Evans #elif LG_TINY_MIN == 9 141d0e79aa3SJason Evans #define S2B_9(i) i, 142d0e79aa3SJason Evans #elif LG_TINY_MIN == 10 143d0e79aa3SJason Evans #define S2B_10(i) i, 144d0e79aa3SJason Evans #elif LG_TINY_MIN == 11 145d0e79aa3SJason Evans #define S2B_11(i) i, 146d0e79aa3SJason Evans #else 147d0e79aa3SJason Evans #error "Unsupported LG_TINY_MIN" 148d0e79aa3SJason Evans #endif 149d0e79aa3SJason Evans #if LG_TINY_MIN < 1 150d0e79aa3SJason Evans #define S2B_1(i) S2B_0(i) S2B_0(i) 151d0e79aa3SJason Evans #endif 152d0e79aa3SJason Evans #if LG_TINY_MIN < 2 153d0e79aa3SJason Evans #define S2B_2(i) S2B_1(i) S2B_1(i) 154d0e79aa3SJason Evans #endif 155d0e79aa3SJason Evans #if LG_TINY_MIN < 3 156d0e79aa3SJason Evans #define S2B_3(i) S2B_2(i) S2B_2(i) 157d0e79aa3SJason Evans #endif 158d0e79aa3SJason Evans #if LG_TINY_MIN < 4 159d0e79aa3SJason Evans #define S2B_4(i) S2B_3(i) S2B_3(i) 160d0e79aa3SJason Evans #endif 161d0e79aa3SJason Evans #if LG_TINY_MIN < 5 162d0e79aa3SJason Evans #define S2B_5(i) S2B_4(i) S2B_4(i) 163d0e79aa3SJason Evans #endif 164d0e79aa3SJason Evans #if LG_TINY_MIN < 6 165d0e79aa3SJason Evans #define S2B_6(i) S2B_5(i) S2B_5(i) 166d0e79aa3SJason Evans #endif 167d0e79aa3SJason Evans #if LG_TINY_MIN < 7 168d0e79aa3SJason Evans #define S2B_7(i) S2B_6(i) S2B_6(i) 169d0e79aa3SJason Evans #endif 170d0e79aa3SJason Evans #if LG_TINY_MIN < 8 171d0e79aa3SJason Evans #define S2B_8(i) S2B_7(i) S2B_7(i) 172d0e79aa3SJason Evans #endif 173d0e79aa3SJason Evans #if LG_TINY_MIN < 9 174d0e79aa3SJason Evans #define S2B_9(i) S2B_8(i) S2B_8(i) 175d0e79aa3SJason Evans #endif 176d0e79aa3SJason Evans #if LG_TINY_MIN < 10 177d0e79aa3SJason Evans #define S2B_10(i) S2B_9(i) S2B_9(i) 178d0e79aa3SJason Evans #endif 179d0e79aa3SJason Evans #if LG_TINY_MIN < 11 180d0e79aa3SJason Evans #define S2B_11(i) S2B_10(i) S2B_10(i) 181d0e79aa3SJason Evans #endif 182d0e79aa3SJason Evans #define S2B_no(i) 183bde95144SJason Evans #define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \ 184d0e79aa3SJason Evans S2B_##lg_delta_lookup(index) 185d0e79aa3SJason Evans SIZE_CLASSES 186d0e79aa3SJason Evans #undef S2B_3 187d0e79aa3SJason Evans #undef S2B_4 188d0e79aa3SJason Evans #undef S2B_5 189d0e79aa3SJason Evans #undef S2B_6 190d0e79aa3SJason Evans #undef S2B_7 191d0e79aa3SJason Evans #undef S2B_8 192d0e79aa3SJason Evans #undef S2B_9 193d0e79aa3SJason Evans #undef S2B_10 194d0e79aa3SJason Evans #undef S2B_11 195d0e79aa3SJason Evans #undef S2B_no 196d0e79aa3SJason Evans #undef SC 197d0e79aa3SJason Evans }; 198a4bd5210SJason Evans 199a4bd5210SJason Evans #ifdef JEMALLOC_THREADED_INIT 200a4bd5210SJason Evans /* Used to let the initializing thread recursively allocate. */ 201a4bd5210SJason Evans # define NO_INITIALIZER ((unsigned long)0) 202a4bd5210SJason Evans # define INITIALIZER pthread_self() 203a4bd5210SJason Evans # define IS_INITIALIZER (malloc_initializer == pthread_self()) 204a4bd5210SJason Evans static pthread_t malloc_initializer = NO_INITIALIZER; 205a4bd5210SJason Evans #else 206a4bd5210SJason Evans # define NO_INITIALIZER false 207a4bd5210SJason Evans # define INITIALIZER true 208a4bd5210SJason Evans # define IS_INITIALIZER malloc_initializer 209a4bd5210SJason Evans static bool malloc_initializer = NO_INITIALIZER; 210a4bd5210SJason Evans #endif 211a4bd5210SJason Evans 212a4bd5210SJason Evans /* Used to avoid initialization races. */ 213e722f8f8SJason Evans #ifdef _WIN32 214d0e79aa3SJason Evans #if _WIN32_WINNT >= 0x0600 215d0e79aa3SJason Evans static malloc_mutex_t init_lock = SRWLOCK_INIT; 216d0e79aa3SJason Evans #else 217e722f8f8SJason Evans static malloc_mutex_t init_lock; 218536b3538SJason Evans static bool init_lock_initialized = false; 219e722f8f8SJason Evans 220e722f8f8SJason Evans JEMALLOC_ATTR(constructor) 221e722f8f8SJason Evans static void WINAPI 222e722f8f8SJason Evans _init_init_lock(void) 223e722f8f8SJason Evans { 224e722f8f8SJason Evans 225536b3538SJason Evans /* If another constructor in the same binary is using mallctl to 226536b3538SJason Evans * e.g. setup chunk hooks, it may end up running before this one, 227536b3538SJason Evans * and malloc_init_hard will crash trying to lock the uninitialized 228536b3538SJason Evans * lock. So we force an initialization of the lock in 229536b3538SJason Evans * malloc_init_hard as well. We don't try to care about atomicity 230536b3538SJason Evans * of the accessed to the init_lock_initialized boolean, since it 231536b3538SJason Evans * really only matters early in the process creation, before any 232536b3538SJason Evans * separate thread normally starts doing anything. */ 233536b3538SJason Evans if (!init_lock_initialized) 2341f0a49e8SJason Evans malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT); 235536b3538SJason Evans init_lock_initialized = true; 236e722f8f8SJason Evans } 237e722f8f8SJason Evans 238e722f8f8SJason Evans #ifdef _MSC_VER 239e722f8f8SJason Evans # pragma section(".CRT$XCU", read) 240e722f8f8SJason Evans JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) 241e722f8f8SJason Evans static const void (WINAPI *init_init_lock)(void) = _init_init_lock; 242e722f8f8SJason Evans #endif 243d0e79aa3SJason Evans #endif 244e722f8f8SJason Evans #else 245a4bd5210SJason Evans static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; 246e722f8f8SJason Evans #endif 247a4bd5210SJason Evans 248a4bd5210SJason Evans typedef struct { 249a4bd5210SJason Evans void *p; /* Input pointer (as in realloc(p, s)). */ 250a4bd5210SJason Evans size_t s; /* Request size. */ 251a4bd5210SJason Evans void *r; /* Result pointer. */ 252a4bd5210SJason Evans } malloc_utrace_t; 253a4bd5210SJason Evans 254a4bd5210SJason Evans #ifdef JEMALLOC_UTRACE 255a4bd5210SJason Evans # define UTRACE(a, b, c) do { \ 256d0e79aa3SJason Evans if (unlikely(opt_utrace)) { \ 25788ad2f8dSJason Evans int utrace_serrno = errno; \ 258a4bd5210SJason Evans malloc_utrace_t ut; \ 259a4bd5210SJason Evans ut.p = (a); \ 260a4bd5210SJason Evans ut.s = (b); \ 261a4bd5210SJason Evans ut.r = (c); \ 262a4bd5210SJason Evans utrace(&ut, sizeof(ut)); \ 26388ad2f8dSJason Evans errno = utrace_serrno; \ 264a4bd5210SJason Evans } \ 265a4bd5210SJason Evans } while (0) 266a4bd5210SJason Evans #else 267a4bd5210SJason Evans # define UTRACE(a, b, c) 268a4bd5210SJason Evans #endif 269a4bd5210SJason Evans 270a4bd5210SJason Evans /******************************************************************************/ 271f921d10fSJason Evans /* 272f921d10fSJason Evans * Function prototypes for static functions that are referenced prior to 273f921d10fSJason Evans * definition. 274f921d10fSJason Evans */ 275a4bd5210SJason Evans 276d0e79aa3SJason Evans static bool malloc_init_hard_a0(void); 277a4bd5210SJason Evans static bool malloc_init_hard(void); 278a4bd5210SJason Evans 279a4bd5210SJason Evans /******************************************************************************/ 280a4bd5210SJason Evans /* 281a4bd5210SJason Evans * Begin miscellaneous support functions. 282a4bd5210SJason Evans */ 283a4bd5210SJason Evans 284d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C bool 285d0e79aa3SJason Evans malloc_initialized(void) 286a4bd5210SJason Evans { 287a4bd5210SJason Evans 288d0e79aa3SJason Evans return (malloc_init_state == malloc_init_initialized); 289a4bd5210SJason Evans } 290d0e79aa3SJason Evans 291d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C void 292d0e79aa3SJason Evans malloc_thread_init(void) 293d0e79aa3SJason Evans { 294a4bd5210SJason Evans 295a4bd5210SJason Evans /* 296d0e79aa3SJason Evans * TSD initialization can't be safely done as a side effect of 297d0e79aa3SJason Evans * deallocation, because it is possible for a thread to do nothing but 298d0e79aa3SJason Evans * deallocate its TLS data via free(), in which case writing to TLS 299d0e79aa3SJason Evans * would cause write-after-free memory corruption. The quarantine 300d0e79aa3SJason Evans * facility *only* gets used as a side effect of deallocation, so make 301d0e79aa3SJason Evans * a best effort attempt at initializing its TSD by hooking all 302d0e79aa3SJason Evans * allocation events. 303a4bd5210SJason Evans */ 304d0e79aa3SJason Evans if (config_fill && unlikely(opt_quarantine)) 305d0e79aa3SJason Evans quarantine_alloc_hook(); 306a4bd5210SJason Evans } 307a4bd5210SJason Evans 308d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C bool 309d0e79aa3SJason Evans malloc_init_a0(void) 310d0e79aa3SJason Evans { 311d0e79aa3SJason Evans 312d0e79aa3SJason Evans if (unlikely(malloc_init_state == malloc_init_uninitialized)) 313d0e79aa3SJason Evans return (malloc_init_hard_a0()); 314d0e79aa3SJason Evans return (false); 315d0e79aa3SJason Evans } 316d0e79aa3SJason Evans 317d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C bool 318d0e79aa3SJason Evans malloc_init(void) 319d0e79aa3SJason Evans { 320d0e79aa3SJason Evans 321d0e79aa3SJason Evans if (unlikely(!malloc_initialized()) && malloc_init_hard()) 322d0e79aa3SJason Evans return (true); 323d0e79aa3SJason Evans malloc_thread_init(); 324d0e79aa3SJason Evans 325d0e79aa3SJason Evans return (false); 326d0e79aa3SJason Evans } 327d0e79aa3SJason Evans 328d0e79aa3SJason Evans /* 3291f0a49e8SJason Evans * The a0*() functions are used instead of i{d,}alloc() in situations that 330d0e79aa3SJason Evans * cannot tolerate TLS variable access. 331d0e79aa3SJason Evans */ 332d0e79aa3SJason Evans 333d0e79aa3SJason Evans static void * 334d0e79aa3SJason Evans a0ialloc(size_t size, bool zero, bool is_metadata) 335d0e79aa3SJason Evans { 336d0e79aa3SJason Evans 337d0e79aa3SJason Evans if (unlikely(malloc_init_a0())) 338d0e79aa3SJason Evans return (NULL); 339d0e79aa3SJason Evans 3401f0a49e8SJason Evans return (iallocztm(TSDN_NULL, size, size2index(size), zero, NULL, 3411f0a49e8SJason Evans is_metadata, arena_get(TSDN_NULL, 0, true), true)); 342d0e79aa3SJason Evans } 343d0e79aa3SJason Evans 344d0e79aa3SJason Evans static void 345d0e79aa3SJason Evans a0idalloc(void *ptr, bool is_metadata) 346d0e79aa3SJason Evans { 347d0e79aa3SJason Evans 3481f0a49e8SJason Evans idalloctm(TSDN_NULL, ptr, false, is_metadata, true); 349d0e79aa3SJason Evans } 350d0e79aa3SJason Evans 351bde95144SJason Evans arena_t * 352bde95144SJason Evans a0get(void) 353bde95144SJason Evans { 354bde95144SJason Evans 355bde95144SJason Evans return (a0); 356bde95144SJason Evans } 357bde95144SJason Evans 358d0e79aa3SJason Evans void * 359d0e79aa3SJason Evans a0malloc(size_t size) 360d0e79aa3SJason Evans { 361d0e79aa3SJason Evans 362d0e79aa3SJason Evans return (a0ialloc(size, false, true)); 363d0e79aa3SJason Evans } 364d0e79aa3SJason Evans 365d0e79aa3SJason Evans void 366d0e79aa3SJason Evans a0dalloc(void *ptr) 367d0e79aa3SJason Evans { 368d0e79aa3SJason Evans 369d0e79aa3SJason Evans a0idalloc(ptr, true); 370d0e79aa3SJason Evans } 371d0e79aa3SJason Evans 372d0e79aa3SJason Evans /* 373d0e79aa3SJason Evans * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive 374d0e79aa3SJason Evans * situations that cannot tolerate TLS variable access (TLS allocation and very 375d0e79aa3SJason Evans * early internal data structure initialization). 376d0e79aa3SJason Evans */ 377d0e79aa3SJason Evans 378d0e79aa3SJason Evans void * 379d0e79aa3SJason Evans bootstrap_malloc(size_t size) 380d0e79aa3SJason Evans { 381d0e79aa3SJason Evans 382d0e79aa3SJason Evans if (unlikely(size == 0)) 383d0e79aa3SJason Evans size = 1; 384d0e79aa3SJason Evans 385d0e79aa3SJason Evans return (a0ialloc(size, false, false)); 386d0e79aa3SJason Evans } 387d0e79aa3SJason Evans 388d0e79aa3SJason Evans void * 389d0e79aa3SJason Evans bootstrap_calloc(size_t num, size_t size) 390d0e79aa3SJason Evans { 391d0e79aa3SJason Evans size_t num_size; 392d0e79aa3SJason Evans 393d0e79aa3SJason Evans num_size = num * size; 394d0e79aa3SJason Evans if (unlikely(num_size == 0)) { 395d0e79aa3SJason Evans assert(num == 0 || size == 0); 396d0e79aa3SJason Evans num_size = 1; 397d0e79aa3SJason Evans } 398d0e79aa3SJason Evans 399d0e79aa3SJason Evans return (a0ialloc(num_size, true, false)); 400d0e79aa3SJason Evans } 401d0e79aa3SJason Evans 402d0e79aa3SJason Evans void 403d0e79aa3SJason Evans bootstrap_free(void *ptr) 404d0e79aa3SJason Evans { 405d0e79aa3SJason Evans 406d0e79aa3SJason Evans if (unlikely(ptr == NULL)) 407d0e79aa3SJason Evans return; 408d0e79aa3SJason Evans 409d0e79aa3SJason Evans a0idalloc(ptr, false); 410d0e79aa3SJason Evans } 411d0e79aa3SJason Evans 412df0d881dSJason Evans static void 413df0d881dSJason Evans arena_set(unsigned ind, arena_t *arena) 414df0d881dSJason Evans { 415df0d881dSJason Evans 416df0d881dSJason Evans atomic_write_p((void **)&arenas[ind], arena); 417df0d881dSJason Evans } 418df0d881dSJason Evans 419df0d881dSJason Evans static void 420df0d881dSJason Evans narenas_total_set(unsigned narenas) 421df0d881dSJason Evans { 422df0d881dSJason Evans 423df0d881dSJason Evans atomic_write_u(&narenas_total, narenas); 424df0d881dSJason Evans } 425df0d881dSJason Evans 426df0d881dSJason Evans static void 427df0d881dSJason Evans narenas_total_inc(void) 428df0d881dSJason Evans { 429df0d881dSJason Evans 430df0d881dSJason Evans atomic_add_u(&narenas_total, 1); 431df0d881dSJason Evans } 432df0d881dSJason Evans 433df0d881dSJason Evans unsigned 434df0d881dSJason Evans narenas_total_get(void) 435df0d881dSJason Evans { 436df0d881dSJason Evans 437df0d881dSJason Evans return (atomic_read_u(&narenas_total)); 438df0d881dSJason Evans } 439df0d881dSJason Evans 440d0e79aa3SJason Evans /* Create a new arena and insert it into the arenas array at index ind. */ 441d0e79aa3SJason Evans static arena_t * 4421f0a49e8SJason Evans arena_init_locked(tsdn_t *tsdn, unsigned ind) 443d0e79aa3SJason Evans { 444d0e79aa3SJason Evans arena_t *arena; 445d0e79aa3SJason Evans 446df0d881dSJason Evans assert(ind <= narenas_total_get()); 447d0e79aa3SJason Evans if (ind > MALLOCX_ARENA_MAX) 448d0e79aa3SJason Evans return (NULL); 449df0d881dSJason Evans if (ind == narenas_total_get()) 450df0d881dSJason Evans narenas_total_inc(); 451d0e79aa3SJason Evans 452d0e79aa3SJason Evans /* 453d0e79aa3SJason Evans * Another thread may have already initialized arenas[ind] if it's an 454d0e79aa3SJason Evans * auto arena. 455d0e79aa3SJason Evans */ 4561f0a49e8SJason Evans arena = arena_get(tsdn, ind, false); 457d0e79aa3SJason Evans if (arena != NULL) { 458d0e79aa3SJason Evans assert(ind < narenas_auto); 459d0e79aa3SJason Evans return (arena); 460d0e79aa3SJason Evans } 461d0e79aa3SJason Evans 462d0e79aa3SJason Evans /* Actually initialize the arena. */ 4631f0a49e8SJason Evans arena = arena_new(tsdn, ind); 464df0d881dSJason Evans arena_set(ind, arena); 465d0e79aa3SJason Evans return (arena); 466d0e79aa3SJason Evans } 467d0e79aa3SJason Evans 468d0e79aa3SJason Evans arena_t * 4691f0a49e8SJason Evans arena_init(tsdn_t *tsdn, unsigned ind) 470d0e79aa3SJason Evans { 471d0e79aa3SJason Evans arena_t *arena; 472d0e79aa3SJason Evans 4731f0a49e8SJason Evans malloc_mutex_lock(tsdn, &arenas_lock); 4741f0a49e8SJason Evans arena = arena_init_locked(tsdn, ind); 4751f0a49e8SJason Evans malloc_mutex_unlock(tsdn, &arenas_lock); 476d0e79aa3SJason Evans return (arena); 477d0e79aa3SJason Evans } 478d0e79aa3SJason Evans 479d0e79aa3SJason Evans static void 4801f0a49e8SJason Evans arena_bind(tsd_t *tsd, unsigned ind, bool internal) 481d0e79aa3SJason Evans { 482df0d881dSJason Evans arena_t *arena; 483d0e79aa3SJason Evans 484bde95144SJason Evans if (!tsd_nominal(tsd)) 485bde95144SJason Evans return; 486bde95144SJason Evans 4871f0a49e8SJason Evans arena = arena_get(tsd_tsdn(tsd), ind, false); 4881f0a49e8SJason Evans arena_nthreads_inc(arena, internal); 489df0d881dSJason Evans 4901f0a49e8SJason Evans if (internal) 4911f0a49e8SJason Evans tsd_iarena_set(tsd, arena); 4921f0a49e8SJason Evans else 493df0d881dSJason Evans tsd_arena_set(tsd, arena); 494d0e79aa3SJason Evans } 495d0e79aa3SJason Evans 496d0e79aa3SJason Evans void 497d0e79aa3SJason Evans arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) 498d0e79aa3SJason Evans { 499d0e79aa3SJason Evans arena_t *oldarena, *newarena; 500d0e79aa3SJason Evans 5011f0a49e8SJason Evans oldarena = arena_get(tsd_tsdn(tsd), oldind, false); 5021f0a49e8SJason Evans newarena = arena_get(tsd_tsdn(tsd), newind, false); 5031f0a49e8SJason Evans arena_nthreads_dec(oldarena, false); 5041f0a49e8SJason Evans arena_nthreads_inc(newarena, false); 505d0e79aa3SJason Evans tsd_arena_set(tsd, newarena); 506d0e79aa3SJason Evans } 507d0e79aa3SJason Evans 508d0e79aa3SJason Evans static void 5091f0a49e8SJason Evans arena_unbind(tsd_t *tsd, unsigned ind, bool internal) 510d0e79aa3SJason Evans { 511d0e79aa3SJason Evans arena_t *arena; 512d0e79aa3SJason Evans 5131f0a49e8SJason Evans arena = arena_get(tsd_tsdn(tsd), ind, false); 5141f0a49e8SJason Evans arena_nthreads_dec(arena, internal); 5151f0a49e8SJason Evans if (internal) 5161f0a49e8SJason Evans tsd_iarena_set(tsd, NULL); 5171f0a49e8SJason Evans else 518d0e79aa3SJason Evans tsd_arena_set(tsd, NULL); 519d0e79aa3SJason Evans } 520d0e79aa3SJason Evans 521df0d881dSJason Evans arena_tdata_t * 522df0d881dSJason Evans arena_tdata_get_hard(tsd_t *tsd, unsigned ind) 523d0e79aa3SJason Evans { 524df0d881dSJason Evans arena_tdata_t *tdata, *arenas_tdata_old; 525df0d881dSJason Evans arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); 526df0d881dSJason Evans unsigned narenas_tdata_old, i; 527df0d881dSJason Evans unsigned narenas_tdata = tsd_narenas_tdata_get(tsd); 528d0e79aa3SJason Evans unsigned narenas_actual = narenas_total_get(); 529d0e79aa3SJason Evans 530d0e79aa3SJason Evans /* 531df0d881dSJason Evans * Dissociate old tdata array (and set up for deallocation upon return) 532df0d881dSJason Evans * if it's too small. 533d0e79aa3SJason Evans */ 534df0d881dSJason Evans if (arenas_tdata != NULL && narenas_tdata < narenas_actual) { 535df0d881dSJason Evans arenas_tdata_old = arenas_tdata; 536df0d881dSJason Evans narenas_tdata_old = narenas_tdata; 537df0d881dSJason Evans arenas_tdata = NULL; 538df0d881dSJason Evans narenas_tdata = 0; 539df0d881dSJason Evans tsd_arenas_tdata_set(tsd, arenas_tdata); 540df0d881dSJason Evans tsd_narenas_tdata_set(tsd, narenas_tdata); 541df0d881dSJason Evans } else { 542df0d881dSJason Evans arenas_tdata_old = NULL; 543df0d881dSJason Evans narenas_tdata_old = 0; 544d0e79aa3SJason Evans } 545df0d881dSJason Evans 546df0d881dSJason Evans /* Allocate tdata array if it's missing. */ 547df0d881dSJason Evans if (arenas_tdata == NULL) { 548df0d881dSJason Evans bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd); 549df0d881dSJason Evans narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1; 550df0d881dSJason Evans 551df0d881dSJason Evans if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) { 552df0d881dSJason Evans *arenas_tdata_bypassp = true; 553df0d881dSJason Evans arenas_tdata = (arena_tdata_t *)a0malloc( 554df0d881dSJason Evans sizeof(arena_tdata_t) * narenas_tdata); 555df0d881dSJason Evans *arenas_tdata_bypassp = false; 556df0d881dSJason Evans } 557df0d881dSJason Evans if (arenas_tdata == NULL) { 558df0d881dSJason Evans tdata = NULL; 559df0d881dSJason Evans goto label_return; 560df0d881dSJason Evans } 561df0d881dSJason Evans assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp); 562df0d881dSJason Evans tsd_arenas_tdata_set(tsd, arenas_tdata); 563df0d881dSJason Evans tsd_narenas_tdata_set(tsd, narenas_tdata); 564d0e79aa3SJason Evans } 565d0e79aa3SJason Evans 566d0e79aa3SJason Evans /* 567df0d881dSJason Evans * Copy to tdata array. It's possible that the actual number of arenas 568df0d881dSJason Evans * has increased since narenas_total_get() was called above, but that 569df0d881dSJason Evans * causes no correctness issues unless two threads concurrently execute 570df0d881dSJason Evans * the arenas.extend mallctl, which we trust mallctl synchronization to 571d0e79aa3SJason Evans * prevent. 572d0e79aa3SJason Evans */ 573df0d881dSJason Evans 574df0d881dSJason Evans /* Copy/initialize tickers. */ 575df0d881dSJason Evans for (i = 0; i < narenas_actual; i++) { 576df0d881dSJason Evans if (i < narenas_tdata_old) { 577df0d881dSJason Evans ticker_copy(&arenas_tdata[i].decay_ticker, 578df0d881dSJason Evans &arenas_tdata_old[i].decay_ticker); 579df0d881dSJason Evans } else { 580df0d881dSJason Evans ticker_init(&arenas_tdata[i].decay_ticker, 581df0d881dSJason Evans DECAY_NTICKS_PER_UPDATE); 582df0d881dSJason Evans } 583df0d881dSJason Evans } 584df0d881dSJason Evans if (narenas_tdata > narenas_actual) { 585df0d881dSJason Evans memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t) 586df0d881dSJason Evans * (narenas_tdata - narenas_actual)); 587d0e79aa3SJason Evans } 588d0e79aa3SJason Evans 589df0d881dSJason Evans /* Read the refreshed tdata array. */ 590df0d881dSJason Evans tdata = &arenas_tdata[ind]; 591df0d881dSJason Evans label_return: 592df0d881dSJason Evans if (arenas_tdata_old != NULL) 593df0d881dSJason Evans a0dalloc(arenas_tdata_old); 594df0d881dSJason Evans return (tdata); 595d0e79aa3SJason Evans } 596d0e79aa3SJason Evans 597d0e79aa3SJason Evans /* Slow path, called only by arena_choose(). */ 598d0e79aa3SJason Evans arena_t * 5991f0a49e8SJason Evans arena_choose_hard(tsd_t *tsd, bool internal) 600a4bd5210SJason Evans { 6011f0a49e8SJason Evans arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL); 602a4bd5210SJason Evans 60382872ac0SJason Evans if (narenas_auto > 1) { 6041f0a49e8SJason Evans unsigned i, j, choose[2], first_null; 605a4bd5210SJason Evans 6061f0a49e8SJason Evans /* 6071f0a49e8SJason Evans * Determine binding for both non-internal and internal 6081f0a49e8SJason Evans * allocation. 6091f0a49e8SJason Evans * 6101f0a49e8SJason Evans * choose[0]: For application allocation. 6111f0a49e8SJason Evans * choose[1]: For internal metadata allocation. 6121f0a49e8SJason Evans */ 6131f0a49e8SJason Evans 6141f0a49e8SJason Evans for (j = 0; j < 2; j++) 6151f0a49e8SJason Evans choose[j] = 0; 6161f0a49e8SJason Evans 61782872ac0SJason Evans first_null = narenas_auto; 6181f0a49e8SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock); 6191f0a49e8SJason Evans assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL); 62082872ac0SJason Evans for (i = 1; i < narenas_auto; i++) { 6211f0a49e8SJason Evans if (arena_get(tsd_tsdn(tsd), i, false) != NULL) { 622a4bd5210SJason Evans /* 623a4bd5210SJason Evans * Choose the first arena that has the lowest 624a4bd5210SJason Evans * number of threads assigned to it. 625a4bd5210SJason Evans */ 6261f0a49e8SJason Evans for (j = 0; j < 2; j++) { 6271f0a49e8SJason Evans if (arena_nthreads_get(arena_get( 6281f0a49e8SJason Evans tsd_tsdn(tsd), i, false), !!j) < 6291f0a49e8SJason Evans arena_nthreads_get(arena_get( 6301f0a49e8SJason Evans tsd_tsdn(tsd), choose[j], false), 6311f0a49e8SJason Evans !!j)) 6321f0a49e8SJason Evans choose[j] = i; 6331f0a49e8SJason Evans } 63482872ac0SJason Evans } else if (first_null == narenas_auto) { 635a4bd5210SJason Evans /* 636a4bd5210SJason Evans * Record the index of the first uninitialized 637a4bd5210SJason Evans * arena, in case all extant arenas are in use. 638a4bd5210SJason Evans * 639a4bd5210SJason Evans * NB: It is possible for there to be 640a4bd5210SJason Evans * discontinuities in terms of initialized 641a4bd5210SJason Evans * versus uninitialized arenas, due to the 642a4bd5210SJason Evans * "thread.arena" mallctl. 643a4bd5210SJason Evans */ 644a4bd5210SJason Evans first_null = i; 645a4bd5210SJason Evans } 646a4bd5210SJason Evans } 647a4bd5210SJason Evans 6481f0a49e8SJason Evans for (j = 0; j < 2; j++) { 6491f0a49e8SJason Evans if (arena_nthreads_get(arena_get(tsd_tsdn(tsd), 6501f0a49e8SJason Evans choose[j], false), !!j) == 0 || first_null == 6511f0a49e8SJason Evans narenas_auto) { 652a4bd5210SJason Evans /* 6531f0a49e8SJason Evans * Use an unloaded arena, or the least loaded 6541f0a49e8SJason Evans * arena if all arenas are already initialized. 655a4bd5210SJason Evans */ 6561f0a49e8SJason Evans if (!!j == internal) { 6571f0a49e8SJason Evans ret = arena_get(tsd_tsdn(tsd), 6581f0a49e8SJason Evans choose[j], false); 6591f0a49e8SJason Evans } 660a4bd5210SJason Evans } else { 6611f0a49e8SJason Evans arena_t *arena; 6621f0a49e8SJason Evans 663a4bd5210SJason Evans /* Initialize a new arena. */ 6641f0a49e8SJason Evans choose[j] = first_null; 6651f0a49e8SJason Evans arena = arena_init_locked(tsd_tsdn(tsd), 6661f0a49e8SJason Evans choose[j]); 6671f0a49e8SJason Evans if (arena == NULL) { 6681f0a49e8SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), 6691f0a49e8SJason Evans &arenas_lock); 670d0e79aa3SJason Evans return (NULL); 671a4bd5210SJason Evans } 6721f0a49e8SJason Evans if (!!j == internal) 6731f0a49e8SJason Evans ret = arena; 674d0e79aa3SJason Evans } 6751f0a49e8SJason Evans arena_bind(tsd, choose[j], !!j); 6761f0a49e8SJason Evans } 6771f0a49e8SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock); 678a4bd5210SJason Evans } else { 6791f0a49e8SJason Evans ret = arena_get(tsd_tsdn(tsd), 0, false); 6801f0a49e8SJason Evans arena_bind(tsd, 0, false); 6811f0a49e8SJason Evans arena_bind(tsd, 0, true); 682a4bd5210SJason Evans } 683a4bd5210SJason Evans 684a4bd5210SJason Evans return (ret); 685a4bd5210SJason Evans } 686a4bd5210SJason Evans 687d0e79aa3SJason Evans void 688d0e79aa3SJason Evans thread_allocated_cleanup(tsd_t *tsd) 689d0e79aa3SJason Evans { 690d0e79aa3SJason Evans 691d0e79aa3SJason Evans /* Do nothing. */ 692d0e79aa3SJason Evans } 693d0e79aa3SJason Evans 694d0e79aa3SJason Evans void 695d0e79aa3SJason Evans thread_deallocated_cleanup(tsd_t *tsd) 696d0e79aa3SJason Evans { 697d0e79aa3SJason Evans 698d0e79aa3SJason Evans /* Do nothing. */ 699d0e79aa3SJason Evans } 700d0e79aa3SJason Evans 701d0e79aa3SJason Evans void 7021f0a49e8SJason Evans iarena_cleanup(tsd_t *tsd) 7031f0a49e8SJason Evans { 7041f0a49e8SJason Evans arena_t *iarena; 7051f0a49e8SJason Evans 7061f0a49e8SJason Evans iarena = tsd_iarena_get(tsd); 7071f0a49e8SJason Evans if (iarena != NULL) 7081f0a49e8SJason Evans arena_unbind(tsd, iarena->ind, true); 7091f0a49e8SJason Evans } 7101f0a49e8SJason Evans 7111f0a49e8SJason Evans void 712d0e79aa3SJason Evans arena_cleanup(tsd_t *tsd) 713d0e79aa3SJason Evans { 714d0e79aa3SJason Evans arena_t *arena; 715d0e79aa3SJason Evans 716d0e79aa3SJason Evans arena = tsd_arena_get(tsd); 717d0e79aa3SJason Evans if (arena != NULL) 7181f0a49e8SJason Evans arena_unbind(tsd, arena->ind, false); 719d0e79aa3SJason Evans } 720d0e79aa3SJason Evans 721d0e79aa3SJason Evans void 722df0d881dSJason Evans arenas_tdata_cleanup(tsd_t *tsd) 723d0e79aa3SJason Evans { 724df0d881dSJason Evans arena_tdata_t *arenas_tdata; 725d0e79aa3SJason Evans 726df0d881dSJason Evans /* Prevent tsd->arenas_tdata from being (re)created. */ 727df0d881dSJason Evans *tsd_arenas_tdata_bypassp_get(tsd) = true; 728df0d881dSJason Evans 729df0d881dSJason Evans arenas_tdata = tsd_arenas_tdata_get(tsd); 730df0d881dSJason Evans if (arenas_tdata != NULL) { 731df0d881dSJason Evans tsd_arenas_tdata_set(tsd, NULL); 732df0d881dSJason Evans a0dalloc(arenas_tdata); 733d0e79aa3SJason Evans } 734536b3538SJason Evans } 735d0e79aa3SJason Evans 736d0e79aa3SJason Evans void 737df0d881dSJason Evans narenas_tdata_cleanup(tsd_t *tsd) 738d0e79aa3SJason Evans { 739d0e79aa3SJason Evans 740d0e79aa3SJason Evans /* Do nothing. */ 741d0e79aa3SJason Evans } 742d0e79aa3SJason Evans 743d0e79aa3SJason Evans void 744df0d881dSJason Evans arenas_tdata_bypass_cleanup(tsd_t *tsd) 745d0e79aa3SJason Evans { 746d0e79aa3SJason Evans 747d0e79aa3SJason Evans /* Do nothing. */ 748d0e79aa3SJason Evans } 749d0e79aa3SJason Evans 750a4bd5210SJason Evans static void 751a4bd5210SJason Evans stats_print_atexit(void) 752a4bd5210SJason Evans { 753a4bd5210SJason Evans 754a4bd5210SJason Evans if (config_tcache && config_stats) { 7551f0a49e8SJason Evans tsdn_t *tsdn; 75682872ac0SJason Evans unsigned narenas, i; 757a4bd5210SJason Evans 7581f0a49e8SJason Evans tsdn = tsdn_fetch(); 7591f0a49e8SJason Evans 760a4bd5210SJason Evans /* 761a4bd5210SJason Evans * Merge stats from extant threads. This is racy, since 762a4bd5210SJason Evans * individual threads do not lock when recording tcache stats 763a4bd5210SJason Evans * events. As a consequence, the final stats may be slightly 764a4bd5210SJason Evans * out of date by the time they are reported, if other threads 765a4bd5210SJason Evans * continue to allocate. 766a4bd5210SJason Evans */ 76782872ac0SJason Evans for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 7681f0a49e8SJason Evans arena_t *arena = arena_get(tsdn, i, false); 769a4bd5210SJason Evans if (arena != NULL) { 770a4bd5210SJason Evans tcache_t *tcache; 771a4bd5210SJason Evans 772a4bd5210SJason Evans /* 773a4bd5210SJason Evans * tcache_stats_merge() locks bins, so if any 774a4bd5210SJason Evans * code is introduced that acquires both arena 775a4bd5210SJason Evans * and bin locks in the opposite order, 776a4bd5210SJason Evans * deadlocks may result. 777a4bd5210SJason Evans */ 7781f0a49e8SJason Evans malloc_mutex_lock(tsdn, &arena->lock); 779a4bd5210SJason Evans ql_foreach(tcache, &arena->tcache_ql, link) { 7801f0a49e8SJason Evans tcache_stats_merge(tsdn, tcache, arena); 781a4bd5210SJason Evans } 7821f0a49e8SJason Evans malloc_mutex_unlock(tsdn, &arena->lock); 783a4bd5210SJason Evans } 784a4bd5210SJason Evans } 785a4bd5210SJason Evans } 786a4bd5210SJason Evans je_malloc_stats_print(NULL, NULL, NULL); 787a4bd5210SJason Evans } 788a4bd5210SJason Evans 789a4bd5210SJason Evans /* 790a4bd5210SJason Evans * End miscellaneous support functions. 791a4bd5210SJason Evans */ 792a4bd5210SJason Evans /******************************************************************************/ 793a4bd5210SJason Evans /* 794a4bd5210SJason Evans * Begin initialization functions. 795a4bd5210SJason Evans */ 796a4bd5210SJason Evans 797d0e79aa3SJason Evans static char * 798*8244f2aaSJason Evans jemalloc_secure_getenv(const char *name) 799d0e79aa3SJason Evans { 800*8244f2aaSJason Evans #ifdef JEMALLOC_HAVE_SECURE_GETENV 801*8244f2aaSJason Evans return secure_getenv(name); 802*8244f2aaSJason Evans #else 803d0e79aa3SJason Evans # ifdef JEMALLOC_HAVE_ISSETUGID 804d0e79aa3SJason Evans if (issetugid() != 0) 805d0e79aa3SJason Evans return (NULL); 806d0e79aa3SJason Evans # endif 807d0e79aa3SJason Evans return (getenv(name)); 808d0e79aa3SJason Evans #endif 809*8244f2aaSJason Evans } 810d0e79aa3SJason Evans 811a4bd5210SJason Evans static unsigned 812a4bd5210SJason Evans malloc_ncpus(void) 813a4bd5210SJason Evans { 814a4bd5210SJason Evans long result; 815a4bd5210SJason Evans 816e722f8f8SJason Evans #ifdef _WIN32 817e722f8f8SJason Evans SYSTEM_INFO si; 818e722f8f8SJason Evans GetSystemInfo(&si); 819e722f8f8SJason Evans result = si.dwNumberOfProcessors; 820bde95144SJason Evans #elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT) 821bde95144SJason Evans /* 822bde95144SJason Evans * glibc >= 2.6 has the CPU_COUNT macro. 823bde95144SJason Evans * 824bde95144SJason Evans * glibc's sysconf() uses isspace(). glibc allocates for the first time 825bde95144SJason Evans * *before* setting up the isspace tables. Therefore we need a 826bde95144SJason Evans * different method to get the number of CPUs. 827bde95144SJason Evans */ 828bde95144SJason Evans { 829bde95144SJason Evans cpu_set_t set; 830bde95144SJason Evans 831bde95144SJason Evans pthread_getaffinity_np(pthread_self(), sizeof(set), &set); 832bde95144SJason Evans result = CPU_COUNT(&set); 833bde95144SJason Evans } 834e722f8f8SJason Evans #else 835a4bd5210SJason Evans result = sysconf(_SC_NPROCESSORS_ONLN); 83682872ac0SJason Evans #endif 837f921d10fSJason Evans return ((result == -1) ? 1 : (unsigned)result); 838a4bd5210SJason Evans } 839a4bd5210SJason Evans 840a4bd5210SJason Evans static bool 841a4bd5210SJason Evans malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, 842a4bd5210SJason Evans char const **v_p, size_t *vlen_p) 843a4bd5210SJason Evans { 844a4bd5210SJason Evans bool accept; 845a4bd5210SJason Evans const char *opts = *opts_p; 846a4bd5210SJason Evans 847a4bd5210SJason Evans *k_p = opts; 848a4bd5210SJason Evans 849d0e79aa3SJason Evans for (accept = false; !accept;) { 850a4bd5210SJason Evans switch (*opts) { 851a4bd5210SJason Evans case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': 852a4bd5210SJason Evans case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': 853a4bd5210SJason Evans case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': 854a4bd5210SJason Evans case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': 855a4bd5210SJason Evans case 'Y': case 'Z': 856a4bd5210SJason Evans case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': 857a4bd5210SJason Evans case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': 858a4bd5210SJason Evans case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': 859a4bd5210SJason Evans case 's': case 't': case 'u': case 'v': case 'w': case 'x': 860a4bd5210SJason Evans case 'y': case 'z': 861a4bd5210SJason Evans case '0': case '1': case '2': case '3': case '4': case '5': 862a4bd5210SJason Evans case '6': case '7': case '8': case '9': 863a4bd5210SJason Evans case '_': 864a4bd5210SJason Evans opts++; 865a4bd5210SJason Evans break; 866a4bd5210SJason Evans case ':': 867a4bd5210SJason Evans opts++; 868a4bd5210SJason Evans *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; 869a4bd5210SJason Evans *v_p = opts; 870a4bd5210SJason Evans accept = true; 871a4bd5210SJason Evans break; 872a4bd5210SJason Evans case '\0': 873a4bd5210SJason Evans if (opts != *opts_p) { 874a4bd5210SJason Evans malloc_write("<jemalloc>: Conf string ends " 875a4bd5210SJason Evans "with key\n"); 876a4bd5210SJason Evans } 877a4bd5210SJason Evans return (true); 878a4bd5210SJason Evans default: 879a4bd5210SJason Evans malloc_write("<jemalloc>: Malformed conf string\n"); 880a4bd5210SJason Evans return (true); 881a4bd5210SJason Evans } 882a4bd5210SJason Evans } 883a4bd5210SJason Evans 884d0e79aa3SJason Evans for (accept = false; !accept;) { 885a4bd5210SJason Evans switch (*opts) { 886a4bd5210SJason Evans case ',': 887a4bd5210SJason Evans opts++; 888a4bd5210SJason Evans /* 889a4bd5210SJason Evans * Look ahead one character here, because the next time 890a4bd5210SJason Evans * this function is called, it will assume that end of 891a4bd5210SJason Evans * input has been cleanly reached if no input remains, 892a4bd5210SJason Evans * but we have optimistically already consumed the 893a4bd5210SJason Evans * comma if one exists. 894a4bd5210SJason Evans */ 895a4bd5210SJason Evans if (*opts == '\0') { 896a4bd5210SJason Evans malloc_write("<jemalloc>: Conf string ends " 897a4bd5210SJason Evans "with comma\n"); 898a4bd5210SJason Evans } 899a4bd5210SJason Evans *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; 900a4bd5210SJason Evans accept = true; 901a4bd5210SJason Evans break; 902a4bd5210SJason Evans case '\0': 903a4bd5210SJason Evans *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; 904a4bd5210SJason Evans accept = true; 905a4bd5210SJason Evans break; 906a4bd5210SJason Evans default: 907a4bd5210SJason Evans opts++; 908a4bd5210SJason Evans break; 909a4bd5210SJason Evans } 910a4bd5210SJason Evans } 911a4bd5210SJason Evans 912a4bd5210SJason Evans *opts_p = opts; 913a4bd5210SJason Evans return (false); 914a4bd5210SJason Evans } 915a4bd5210SJason Evans 916a4bd5210SJason Evans static void 917a4bd5210SJason Evans malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, 918a4bd5210SJason Evans size_t vlen) 919a4bd5210SJason Evans { 920a4bd5210SJason Evans 921a4bd5210SJason Evans malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k, 922a4bd5210SJason Evans (int)vlen, v); 923a4bd5210SJason Evans } 924a4bd5210SJason Evans 925a4bd5210SJason Evans static void 926df0d881dSJason Evans malloc_slow_flag_init(void) 927df0d881dSJason Evans { 928df0d881dSJason Evans /* 929df0d881dSJason Evans * Combine the runtime options into malloc_slow for fast path. Called 930df0d881dSJason Evans * after processing all the options. 931df0d881dSJason Evans */ 932df0d881dSJason Evans malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0) 933df0d881dSJason Evans | (opt_junk_free ? flag_opt_junk_free : 0) 934df0d881dSJason Evans | (opt_quarantine ? flag_opt_quarantine : 0) 935df0d881dSJason Evans | (opt_zero ? flag_opt_zero : 0) 936df0d881dSJason Evans | (opt_utrace ? flag_opt_utrace : 0) 937df0d881dSJason Evans | (opt_xmalloc ? flag_opt_xmalloc : 0); 938df0d881dSJason Evans 939df0d881dSJason Evans if (config_valgrind) 940df0d881dSJason Evans malloc_slow_flags |= (in_valgrind ? flag_in_valgrind : 0); 941df0d881dSJason Evans 942df0d881dSJason Evans malloc_slow = (malloc_slow_flags != 0); 943df0d881dSJason Evans } 944df0d881dSJason Evans 945df0d881dSJason Evans static void 946a4bd5210SJason Evans malloc_conf_init(void) 947a4bd5210SJason Evans { 948a4bd5210SJason Evans unsigned i; 949a4bd5210SJason Evans char buf[PATH_MAX + 1]; 950a4bd5210SJason Evans const char *opts, *k, *v; 951a4bd5210SJason Evans size_t klen, vlen; 952a4bd5210SJason Evans 95382872ac0SJason Evans /* 95482872ac0SJason Evans * Automatically configure valgrind before processing options. The 95582872ac0SJason Evans * valgrind option remains in jemalloc 3.x for compatibility reasons. 95682872ac0SJason Evans */ 95782872ac0SJason Evans if (config_valgrind) { 958d0e79aa3SJason Evans in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false; 959d0e79aa3SJason Evans if (config_fill && unlikely(in_valgrind)) { 960d0e79aa3SJason Evans opt_junk = "false"; 961d0e79aa3SJason Evans opt_junk_alloc = false; 962d0e79aa3SJason Evans opt_junk_free = false; 963d0e79aa3SJason Evans assert(!opt_zero); 96482872ac0SJason Evans opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT; 96582872ac0SJason Evans opt_redzone = true; 96682872ac0SJason Evans } 967d0e79aa3SJason Evans if (config_tcache && unlikely(in_valgrind)) 96882872ac0SJason Evans opt_tcache = false; 96982872ac0SJason Evans } 97082872ac0SJason Evans 971df0d881dSJason Evans for (i = 0; i < 4; i++) { 972a4bd5210SJason Evans /* Get runtime configuration. */ 973a4bd5210SJason Evans switch (i) { 974a4bd5210SJason Evans case 0: 975df0d881dSJason Evans opts = config_malloc_conf; 976df0d881dSJason Evans break; 977df0d881dSJason Evans case 1: 978a4bd5210SJason Evans if (je_malloc_conf != NULL) { 979a4bd5210SJason Evans /* 980a4bd5210SJason Evans * Use options that were compiled into the 981a4bd5210SJason Evans * program. 982a4bd5210SJason Evans */ 983a4bd5210SJason Evans opts = je_malloc_conf; 984a4bd5210SJason Evans } else { 985a4bd5210SJason Evans /* No configuration specified. */ 986a4bd5210SJason Evans buf[0] = '\0'; 987a4bd5210SJason Evans opts = buf; 988a4bd5210SJason Evans } 989a4bd5210SJason Evans break; 990df0d881dSJason Evans case 2: { 991df0d881dSJason Evans ssize_t linklen = 0; 992e722f8f8SJason Evans #ifndef _WIN32 9932b06b201SJason Evans int saved_errno = errno; 994a4bd5210SJason Evans const char *linkname = 995a4bd5210SJason Evans # ifdef JEMALLOC_PREFIX 996a4bd5210SJason Evans "/etc/"JEMALLOC_PREFIX"malloc.conf" 997a4bd5210SJason Evans # else 998a4bd5210SJason Evans "/etc/malloc.conf" 999a4bd5210SJason Evans # endif 1000a4bd5210SJason Evans ; 1001a4bd5210SJason Evans 1002a4bd5210SJason Evans /* 10032b06b201SJason Evans * Try to use the contents of the "/etc/malloc.conf" 1004a4bd5210SJason Evans * symbolic link's name. 1005a4bd5210SJason Evans */ 10062b06b201SJason Evans linklen = readlink(linkname, buf, sizeof(buf) - 1); 10072b06b201SJason Evans if (linklen == -1) { 10082b06b201SJason Evans /* No configuration specified. */ 10092b06b201SJason Evans linklen = 0; 1010d0e79aa3SJason Evans /* Restore errno. */ 10112b06b201SJason Evans set_errno(saved_errno); 10122b06b201SJason Evans } 10132b06b201SJason Evans #endif 1014a4bd5210SJason Evans buf[linklen] = '\0'; 1015a4bd5210SJason Evans opts = buf; 1016a4bd5210SJason Evans break; 1017df0d881dSJason Evans } case 3: { 1018a4bd5210SJason Evans const char *envname = 1019a4bd5210SJason Evans #ifdef JEMALLOC_PREFIX 1020a4bd5210SJason Evans JEMALLOC_CPREFIX"MALLOC_CONF" 1021a4bd5210SJason Evans #else 1022a4bd5210SJason Evans "MALLOC_CONF" 1023a4bd5210SJason Evans #endif 1024a4bd5210SJason Evans ; 1025a4bd5210SJason Evans 1026*8244f2aaSJason Evans if ((opts = jemalloc_secure_getenv(envname)) != NULL) { 1027a4bd5210SJason Evans /* 1028a4bd5210SJason Evans * Do nothing; opts is already initialized to 1029a4bd5210SJason Evans * the value of the MALLOC_CONF environment 1030a4bd5210SJason Evans * variable. 1031a4bd5210SJason Evans */ 1032a4bd5210SJason Evans } else { 1033a4bd5210SJason Evans /* No configuration specified. */ 1034a4bd5210SJason Evans buf[0] = '\0'; 1035a4bd5210SJason Evans opts = buf; 1036a4bd5210SJason Evans } 1037a4bd5210SJason Evans break; 1038a4bd5210SJason Evans } default: 1039f921d10fSJason Evans not_reached(); 1040a4bd5210SJason Evans buf[0] = '\0'; 1041a4bd5210SJason Evans opts = buf; 1042a4bd5210SJason Evans } 1043a4bd5210SJason Evans 1044d0e79aa3SJason Evans while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v, 1045d0e79aa3SJason Evans &vlen)) { 1046d0e79aa3SJason Evans #define CONF_MATCH(n) \ 1047d0e79aa3SJason Evans (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0) 1048d0e79aa3SJason Evans #define CONF_MATCH_VALUE(n) \ 1049d0e79aa3SJason Evans (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0) 1050d0e79aa3SJason Evans #define CONF_HANDLE_BOOL(o, n, cont) \ 1051d0e79aa3SJason Evans if (CONF_MATCH(n)) { \ 1052d0e79aa3SJason Evans if (CONF_MATCH_VALUE("true")) \ 1053a4bd5210SJason Evans o = true; \ 1054d0e79aa3SJason Evans else if (CONF_MATCH_VALUE("false")) \ 1055a4bd5210SJason Evans o = false; \ 1056a4bd5210SJason Evans else { \ 1057a4bd5210SJason Evans malloc_conf_error( \ 1058a4bd5210SJason Evans "Invalid conf value", \ 1059a4bd5210SJason Evans k, klen, v, vlen); \ 1060a4bd5210SJason Evans } \ 1061d0e79aa3SJason Evans if (cont) \ 1062a4bd5210SJason Evans continue; \ 1063a4bd5210SJason Evans } 10647fa7f12fSJason Evans #define CONF_MIN_no(um, min) false 10657fa7f12fSJason Evans #define CONF_MIN_yes(um, min) ((um) < (min)) 10667fa7f12fSJason Evans #define CONF_MAX_no(um, max) false 10677fa7f12fSJason Evans #define CONF_MAX_yes(um, max) ((um) > (max)) 10687fa7f12fSJason Evans #define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \ 1069d0e79aa3SJason Evans if (CONF_MATCH(n)) { \ 1070a4bd5210SJason Evans uintmax_t um; \ 1071a4bd5210SJason Evans char *end; \ 1072a4bd5210SJason Evans \ 1073e722f8f8SJason Evans set_errno(0); \ 1074a4bd5210SJason Evans um = malloc_strtoumax(v, &end, 0); \ 1075e722f8f8SJason Evans if (get_errno() != 0 || (uintptr_t)end -\ 1076a4bd5210SJason Evans (uintptr_t)v != vlen) { \ 1077a4bd5210SJason Evans malloc_conf_error( \ 1078a4bd5210SJason Evans "Invalid conf value", \ 1079a4bd5210SJason Evans k, klen, v, vlen); \ 108088ad2f8dSJason Evans } else if (clip) { \ 10817fa7f12fSJason Evans if (CONF_MIN_##check_min(um, \ 1082*8244f2aaSJason Evans (t)(min))) \ 1083df0d881dSJason Evans o = (t)(min); \ 10847fa7f12fSJason Evans else if (CONF_MAX_##check_max( \ 1085*8244f2aaSJason Evans um, (t)(max))) \ 1086df0d881dSJason Evans o = (t)(max); \ 108788ad2f8dSJason Evans else \ 1088df0d881dSJason Evans o = (t)um; \ 108988ad2f8dSJason Evans } else { \ 10907fa7f12fSJason Evans if (CONF_MIN_##check_min(um, \ 1091*8244f2aaSJason Evans (t)(min)) || \ 10927fa7f12fSJason Evans CONF_MAX_##check_max(um, \ 1093*8244f2aaSJason Evans (t)(max))) { \ 1094a4bd5210SJason Evans malloc_conf_error( \ 109588ad2f8dSJason Evans "Out-of-range " \ 109688ad2f8dSJason Evans "conf value", \ 1097a4bd5210SJason Evans k, klen, v, vlen); \ 1098a4bd5210SJason Evans } else \ 1099df0d881dSJason Evans o = (t)um; \ 110088ad2f8dSJason Evans } \ 1101a4bd5210SJason Evans continue; \ 1102a4bd5210SJason Evans } 11037fa7f12fSJason Evans #define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \ 11047fa7f12fSJason Evans clip) \ 11057fa7f12fSJason Evans CONF_HANDLE_T_U(unsigned, o, n, min, max, \ 11067fa7f12fSJason Evans check_min, check_max, clip) 11077fa7f12fSJason Evans #define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \ 11087fa7f12fSJason Evans CONF_HANDLE_T_U(size_t, o, n, min, max, \ 11097fa7f12fSJason Evans check_min, check_max, clip) 1110a4bd5210SJason Evans #define CONF_HANDLE_SSIZE_T(o, n, min, max) \ 1111d0e79aa3SJason Evans if (CONF_MATCH(n)) { \ 1112a4bd5210SJason Evans long l; \ 1113a4bd5210SJason Evans char *end; \ 1114a4bd5210SJason Evans \ 1115e722f8f8SJason Evans set_errno(0); \ 1116a4bd5210SJason Evans l = strtol(v, &end, 0); \ 1117e722f8f8SJason Evans if (get_errno() != 0 || (uintptr_t)end -\ 1118a4bd5210SJason Evans (uintptr_t)v != vlen) { \ 1119a4bd5210SJason Evans malloc_conf_error( \ 1120a4bd5210SJason Evans "Invalid conf value", \ 1121a4bd5210SJason Evans k, klen, v, vlen); \ 1122d0e79aa3SJason Evans } else if (l < (ssize_t)(min) || l > \ 1123d0e79aa3SJason Evans (ssize_t)(max)) { \ 1124a4bd5210SJason Evans malloc_conf_error( \ 1125a4bd5210SJason Evans "Out-of-range conf value", \ 1126a4bd5210SJason Evans k, klen, v, vlen); \ 1127a4bd5210SJason Evans } else \ 1128a4bd5210SJason Evans o = l; \ 1129a4bd5210SJason Evans continue; \ 1130a4bd5210SJason Evans } 1131a4bd5210SJason Evans #define CONF_HANDLE_CHAR_P(o, n, d) \ 1132d0e79aa3SJason Evans if (CONF_MATCH(n)) { \ 1133a4bd5210SJason Evans size_t cpylen = (vlen <= \ 1134a4bd5210SJason Evans sizeof(o)-1) ? vlen : \ 1135a4bd5210SJason Evans sizeof(o)-1; \ 1136a4bd5210SJason Evans strncpy(o, v, cpylen); \ 1137a4bd5210SJason Evans o[cpylen] = '\0'; \ 1138a4bd5210SJason Evans continue; \ 1139a4bd5210SJason Evans } 1140a4bd5210SJason Evans 1141d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_abort, "abort", true) 1142a4bd5210SJason Evans /* 1143*8244f2aaSJason Evans * Chunks always require at least one header page, as 1144*8244f2aaSJason Evans * many as 2^(LG_SIZE_CLASS_GROUP+1) data pages (plus an 1145*8244f2aaSJason Evans * additional page in the presence of cache-oblivious 1146*8244f2aaSJason Evans * large), and possibly an additional page in the 1147*8244f2aaSJason Evans * presence of redzones. In order to simplify options 1148*8244f2aaSJason Evans * processing, use a conservative bound that 1149*8244f2aaSJason Evans * accommodates all these constraints. 1150a4bd5210SJason Evans */ 11518ed34ab0SJason Evans CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE + 1152*8244f2aaSJason Evans LG_SIZE_CLASS_GROUP + 1 + ((config_cache_oblivious 1153*8244f2aaSJason Evans || config_fill) ? 1 : 0), (sizeof(size_t) << 3) - 1, 1154*8244f2aaSJason Evans yes, yes, true) 115582872ac0SJason Evans if (strncmp("dss", k, klen) == 0) { 115682872ac0SJason Evans int i; 115782872ac0SJason Evans bool match = false; 115882872ac0SJason Evans for (i = 0; i < dss_prec_limit; i++) { 115982872ac0SJason Evans if (strncmp(dss_prec_names[i], v, vlen) 116082872ac0SJason Evans == 0) { 1161bde95144SJason Evans if (chunk_dss_prec_set(i)) { 116282872ac0SJason Evans malloc_conf_error( 116382872ac0SJason Evans "Error setting dss", 116482872ac0SJason Evans k, klen, v, vlen); 116582872ac0SJason Evans } else { 116682872ac0SJason Evans opt_dss = 116782872ac0SJason Evans dss_prec_names[i]; 116882872ac0SJason Evans match = true; 116982872ac0SJason Evans break; 117082872ac0SJason Evans } 117182872ac0SJason Evans } 117282872ac0SJason Evans } 1173d0e79aa3SJason Evans if (!match) { 117482872ac0SJason Evans malloc_conf_error("Invalid conf value", 117582872ac0SJason Evans k, klen, v, vlen); 117682872ac0SJason Evans } 117782872ac0SJason Evans continue; 117882872ac0SJason Evans } 1179df0d881dSJason Evans CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1, 11807fa7f12fSJason Evans UINT_MAX, yes, no, false) 1181df0d881dSJason Evans if (strncmp("purge", k, klen) == 0) { 1182df0d881dSJason Evans int i; 1183df0d881dSJason Evans bool match = false; 1184df0d881dSJason Evans for (i = 0; i < purge_mode_limit; i++) { 1185df0d881dSJason Evans if (strncmp(purge_mode_names[i], v, 1186df0d881dSJason Evans vlen) == 0) { 1187df0d881dSJason Evans opt_purge = (purge_mode_t)i; 1188df0d881dSJason Evans match = true; 1189df0d881dSJason Evans break; 1190df0d881dSJason Evans } 1191df0d881dSJason Evans } 1192df0d881dSJason Evans if (!match) { 1193df0d881dSJason Evans malloc_conf_error("Invalid conf value", 1194df0d881dSJason Evans k, klen, v, vlen); 1195df0d881dSJason Evans } 1196df0d881dSJason Evans continue; 1197df0d881dSJason Evans } 11988ed34ab0SJason Evans CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult", 1199a4bd5210SJason Evans -1, (sizeof(size_t) << 3) - 1) 1200df0d881dSJason Evans CONF_HANDLE_SSIZE_T(opt_decay_time, "decay_time", -1, 1201df0d881dSJason Evans NSTIME_SEC_MAX); 1202d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true) 1203a4bd5210SJason Evans if (config_fill) { 1204d0e79aa3SJason Evans if (CONF_MATCH("junk")) { 1205d0e79aa3SJason Evans if (CONF_MATCH_VALUE("true")) { 1206bde95144SJason Evans if (config_valgrind && 1207bde95144SJason Evans unlikely(in_valgrind)) { 1208bde95144SJason Evans malloc_conf_error( 1209bde95144SJason Evans "Deallocation-time " 1210bde95144SJason Evans "junk filling cannot " 1211bde95144SJason Evans "be enabled while " 1212bde95144SJason Evans "running inside " 1213bde95144SJason Evans "Valgrind", k, klen, v, 1214bde95144SJason Evans vlen); 1215bde95144SJason Evans } else { 1216d0e79aa3SJason Evans opt_junk = "true"; 1217bde95144SJason Evans opt_junk_alloc = true; 1218bde95144SJason Evans opt_junk_free = true; 1219bde95144SJason Evans } 1220d0e79aa3SJason Evans } else if (CONF_MATCH_VALUE("false")) { 1221d0e79aa3SJason Evans opt_junk = "false"; 1222d0e79aa3SJason Evans opt_junk_alloc = opt_junk_free = 1223d0e79aa3SJason Evans false; 1224d0e79aa3SJason Evans } else if (CONF_MATCH_VALUE("alloc")) { 1225d0e79aa3SJason Evans opt_junk = "alloc"; 1226d0e79aa3SJason Evans opt_junk_alloc = true; 1227d0e79aa3SJason Evans opt_junk_free = false; 1228d0e79aa3SJason Evans } else if (CONF_MATCH_VALUE("free")) { 1229bde95144SJason Evans if (config_valgrind && 1230bde95144SJason Evans unlikely(in_valgrind)) { 1231bde95144SJason Evans malloc_conf_error( 1232bde95144SJason Evans "Deallocation-time " 1233bde95144SJason Evans "junk filling cannot " 1234bde95144SJason Evans "be enabled while " 1235bde95144SJason Evans "running inside " 1236bde95144SJason Evans "Valgrind", k, klen, v, 1237bde95144SJason Evans vlen); 1238bde95144SJason Evans } else { 1239d0e79aa3SJason Evans opt_junk = "free"; 1240d0e79aa3SJason Evans opt_junk_alloc = false; 1241d0e79aa3SJason Evans opt_junk_free = true; 1242bde95144SJason Evans } 1243d0e79aa3SJason Evans } else { 1244d0e79aa3SJason Evans malloc_conf_error( 1245d0e79aa3SJason Evans "Invalid conf value", k, 1246d0e79aa3SJason Evans klen, v, vlen); 1247d0e79aa3SJason Evans } 1248d0e79aa3SJason Evans continue; 1249d0e79aa3SJason Evans } 12508ed34ab0SJason Evans CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine", 12517fa7f12fSJason Evans 0, SIZE_T_MAX, no, no, false) 1252d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_redzone, "redzone", true) 1253d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_zero, "zero", true) 1254a4bd5210SJason Evans } 1255a4bd5210SJason Evans if (config_utrace) { 1256d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_utrace, "utrace", true) 1257a4bd5210SJason Evans } 1258a4bd5210SJason Evans if (config_xmalloc) { 1259d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true) 1260a4bd5210SJason Evans } 1261a4bd5210SJason Evans if (config_tcache) { 1262d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_tcache, "tcache", 1263d0e79aa3SJason Evans !config_valgrind || !in_valgrind) 1264d0e79aa3SJason Evans if (CONF_MATCH("tcache")) { 1265d0e79aa3SJason Evans assert(config_valgrind && in_valgrind); 1266d0e79aa3SJason Evans if (opt_tcache) { 1267d0e79aa3SJason Evans opt_tcache = false; 1268d0e79aa3SJason Evans malloc_conf_error( 1269d0e79aa3SJason Evans "tcache cannot be enabled " 1270d0e79aa3SJason Evans "while running inside Valgrind", 1271d0e79aa3SJason Evans k, klen, v, vlen); 1272d0e79aa3SJason Evans } 1273d0e79aa3SJason Evans continue; 1274d0e79aa3SJason Evans } 1275a4bd5210SJason Evans CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, 12768ed34ab0SJason Evans "lg_tcache_max", -1, 1277a4bd5210SJason Evans (sizeof(size_t) << 3) - 1) 1278a4bd5210SJason Evans } 1279*8244f2aaSJason Evans if (config_thp) { 1280*8244f2aaSJason Evans CONF_HANDLE_BOOL(opt_thp, "thp", true) 1281*8244f2aaSJason Evans } 1282a4bd5210SJason Evans if (config_prof) { 1283d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_prof, "prof", true) 12848ed34ab0SJason Evans CONF_HANDLE_CHAR_P(opt_prof_prefix, 12858ed34ab0SJason Evans "prof_prefix", "jeprof") 1286d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_prof_active, "prof_active", 1287d0e79aa3SJason Evans true) 1288d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_prof_thread_active_init, 1289d0e79aa3SJason Evans "prof_thread_active_init", true) 1290d0e79aa3SJason Evans CONF_HANDLE_SIZE_T(opt_lg_prof_sample, 12917fa7f12fSJason Evans "lg_prof_sample", 0, (sizeof(uint64_t) << 3) 12927fa7f12fSJason Evans - 1, no, yes, true) 1293d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum", 1294d0e79aa3SJason Evans true) 1295a4bd5210SJason Evans CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, 12968ed34ab0SJason Evans "lg_prof_interval", -1, 1297a4bd5210SJason Evans (sizeof(uint64_t) << 3) - 1) 1298d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump", 1299d0e79aa3SJason Evans true) 1300d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_prof_final, "prof_final", 1301d0e79aa3SJason Evans true) 1302d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak", 1303d0e79aa3SJason Evans true) 1304a4bd5210SJason Evans } 1305a4bd5210SJason Evans malloc_conf_error("Invalid conf pair", k, klen, v, 1306a4bd5210SJason Evans vlen); 1307d0e79aa3SJason Evans #undef CONF_MATCH 13087fa7f12fSJason Evans #undef CONF_MATCH_VALUE 1309a4bd5210SJason Evans #undef CONF_HANDLE_BOOL 13107fa7f12fSJason Evans #undef CONF_MIN_no 13117fa7f12fSJason Evans #undef CONF_MIN_yes 13127fa7f12fSJason Evans #undef CONF_MAX_no 13137fa7f12fSJason Evans #undef CONF_MAX_yes 13147fa7f12fSJason Evans #undef CONF_HANDLE_T_U 13157fa7f12fSJason Evans #undef CONF_HANDLE_UNSIGNED 1316a4bd5210SJason Evans #undef CONF_HANDLE_SIZE_T 1317a4bd5210SJason Evans #undef CONF_HANDLE_SSIZE_T 1318a4bd5210SJason Evans #undef CONF_HANDLE_CHAR_P 1319a4bd5210SJason Evans } 1320a4bd5210SJason Evans } 1321a4bd5210SJason Evans } 1322a4bd5210SJason Evans 1323a4bd5210SJason Evans static bool 1324d0e79aa3SJason Evans malloc_init_hard_needed(void) 1325a4bd5210SJason Evans { 1326a4bd5210SJason Evans 1327d0e79aa3SJason Evans if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state == 1328d0e79aa3SJason Evans malloc_init_recursible)) { 1329a4bd5210SJason Evans /* 1330a4bd5210SJason Evans * Another thread initialized the allocator before this one 1331a4bd5210SJason Evans * acquired init_lock, or this thread is the initializing 1332a4bd5210SJason Evans * thread, and it is recursively allocating. 1333a4bd5210SJason Evans */ 1334a4bd5210SJason Evans return (false); 1335a4bd5210SJason Evans } 1336a4bd5210SJason Evans #ifdef JEMALLOC_THREADED_INIT 1337d0e79aa3SJason Evans if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) { 1338bde95144SJason Evans spin_t spinner; 1339bde95144SJason Evans 1340a4bd5210SJason Evans /* Busy-wait until the initializing thread completes. */ 1341bde95144SJason Evans spin_init(&spinner); 1342a4bd5210SJason Evans do { 1343bde95144SJason Evans malloc_mutex_unlock(TSDN_NULL, &init_lock); 1344bde95144SJason Evans spin_adaptive(&spinner); 1345bde95144SJason Evans malloc_mutex_lock(TSDN_NULL, &init_lock); 1346d0e79aa3SJason Evans } while (!malloc_initialized()); 1347a4bd5210SJason Evans return (false); 1348a4bd5210SJason Evans } 1349a4bd5210SJason Evans #endif 1350d0e79aa3SJason Evans return (true); 1351d0e79aa3SJason Evans } 1352d0e79aa3SJason Evans 1353d0e79aa3SJason Evans static bool 13541f0a49e8SJason Evans malloc_init_hard_a0_locked() 1355d0e79aa3SJason Evans { 1356d0e79aa3SJason Evans 1357a4bd5210SJason Evans malloc_initializer = INITIALIZER; 1358a4bd5210SJason Evans 1359a4bd5210SJason Evans if (config_prof) 1360a4bd5210SJason Evans prof_boot0(); 1361a4bd5210SJason Evans malloc_conf_init(); 1362a4bd5210SJason Evans if (opt_stats_print) { 1363a4bd5210SJason Evans /* Print statistics at exit. */ 1364a4bd5210SJason Evans if (atexit(stats_print_atexit) != 0) { 1365a4bd5210SJason Evans malloc_write("<jemalloc>: Error in atexit()\n"); 1366a4bd5210SJason Evans if (opt_abort) 1367a4bd5210SJason Evans abort(); 1368a4bd5210SJason Evans } 1369a4bd5210SJason Evans } 13701f0a49e8SJason Evans pages_boot(); 1371d0e79aa3SJason Evans if (base_boot()) 1372a4bd5210SJason Evans return (true); 1373d0e79aa3SJason Evans if (chunk_boot()) 1374a4bd5210SJason Evans return (true); 1375d0e79aa3SJason Evans if (ctl_boot()) 1376a4bd5210SJason Evans return (true); 1377a4bd5210SJason Evans if (config_prof) 1378a4bd5210SJason Evans prof_boot1(); 1379bde95144SJason Evans arena_boot(); 13801f0a49e8SJason Evans if (config_tcache && tcache_boot(TSDN_NULL)) 1381a4bd5210SJason Evans return (true); 13821f0a49e8SJason Evans if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS)) 1383a4bd5210SJason Evans return (true); 1384a4bd5210SJason Evans /* 1385a4bd5210SJason Evans * Create enough scaffolding to allow recursive allocation in 1386a4bd5210SJason Evans * malloc_ncpus(). 1387a4bd5210SJason Evans */ 1388df0d881dSJason Evans narenas_auto = 1; 1389df0d881dSJason Evans narenas_total_set(narenas_auto); 1390d0e79aa3SJason Evans arenas = &a0; 139182872ac0SJason Evans memset(arenas, 0, sizeof(arena_t *) * narenas_auto); 1392a4bd5210SJason Evans /* 1393a4bd5210SJason Evans * Initialize one arena here. The rest are lazily created in 1394d0e79aa3SJason Evans * arena_choose_hard(). 1395a4bd5210SJason Evans */ 13961f0a49e8SJason Evans if (arena_init(TSDN_NULL, 0) == NULL) 1397a4bd5210SJason Evans return (true); 13981f0a49e8SJason Evans 1399d0e79aa3SJason Evans malloc_init_state = malloc_init_a0_initialized; 14001f0a49e8SJason Evans 1401d0e79aa3SJason Evans return (false); 1402a4bd5210SJason Evans } 1403a4bd5210SJason Evans 1404d0e79aa3SJason Evans static bool 1405d0e79aa3SJason Evans malloc_init_hard_a0(void) 1406d0e79aa3SJason Evans { 1407d0e79aa3SJason Evans bool ret; 1408d0e79aa3SJason Evans 14091f0a49e8SJason Evans malloc_mutex_lock(TSDN_NULL, &init_lock); 1410d0e79aa3SJason Evans ret = malloc_init_hard_a0_locked(); 14111f0a49e8SJason Evans malloc_mutex_unlock(TSDN_NULL, &init_lock); 1412d0e79aa3SJason Evans return (ret); 1413a4bd5210SJason Evans } 1414a4bd5210SJason Evans 14151f0a49e8SJason Evans /* Initialize data structures which may trigger recursive allocation. */ 1416df0d881dSJason Evans static bool 1417d0e79aa3SJason Evans malloc_init_hard_recursible(void) 1418d0e79aa3SJason Evans { 1419a4bd5210SJason Evans 1420d0e79aa3SJason Evans malloc_init_state = malloc_init_recursible; 1421df0d881dSJason Evans 1422a4bd5210SJason Evans ncpus = malloc_ncpus(); 1423f921d10fSJason Evans 14247fa7f12fSJason Evans #if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \ 14257fa7f12fSJason Evans && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \ 14267fa7f12fSJason Evans !defined(__native_client__)) 1427df0d881dSJason Evans /* LinuxThreads' pthread_atfork() allocates. */ 1428f921d10fSJason Evans if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, 1429f921d10fSJason Evans jemalloc_postfork_child) != 0) { 1430f921d10fSJason Evans malloc_write("<jemalloc>: Error in pthread_atfork()\n"); 1431f921d10fSJason Evans if (opt_abort) 1432f921d10fSJason Evans abort(); 14331f0a49e8SJason Evans return (true); 1434f921d10fSJason Evans } 1435f921d10fSJason Evans #endif 1436df0d881dSJason Evans 14371f0a49e8SJason Evans return (false); 1438a4bd5210SJason Evans } 1439a4bd5210SJason Evans 1440d0e79aa3SJason Evans static bool 14411f0a49e8SJason Evans malloc_init_hard_finish(tsdn_t *tsdn) 1442d0e79aa3SJason Evans { 1443d0e79aa3SJason Evans 14441f0a49e8SJason Evans if (malloc_mutex_boot()) 1445d0e79aa3SJason Evans return (true); 1446d0e79aa3SJason Evans 1447a4bd5210SJason Evans if (opt_narenas == 0) { 1448a4bd5210SJason Evans /* 1449a4bd5210SJason Evans * For SMP systems, create more than one arena per CPU by 1450a4bd5210SJason Evans * default. 1451a4bd5210SJason Evans */ 1452a4bd5210SJason Evans if (ncpus > 1) 1453a4bd5210SJason Evans opt_narenas = ncpus << 2; 1454a4bd5210SJason Evans else 1455a4bd5210SJason Evans opt_narenas = 1; 1456a4bd5210SJason Evans } 145782872ac0SJason Evans narenas_auto = opt_narenas; 1458a4bd5210SJason Evans /* 1459df0d881dSJason Evans * Limit the number of arenas to the indexing range of MALLOCX_ARENA(). 1460a4bd5210SJason Evans */ 1461df0d881dSJason Evans if (narenas_auto > MALLOCX_ARENA_MAX) { 1462df0d881dSJason Evans narenas_auto = MALLOCX_ARENA_MAX; 1463a4bd5210SJason Evans malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n", 146482872ac0SJason Evans narenas_auto); 1465a4bd5210SJason Evans } 1466df0d881dSJason Evans narenas_total_set(narenas_auto); 1467a4bd5210SJason Evans 1468a4bd5210SJason Evans /* Allocate and initialize arenas. */ 14691f0a49e8SJason Evans arenas = (arena_t **)base_alloc(tsdn, sizeof(arena_t *) * 1470df0d881dSJason Evans (MALLOCX_ARENA_MAX+1)); 1471d0e79aa3SJason Evans if (arenas == NULL) 1472a4bd5210SJason Evans return (true); 1473a4bd5210SJason Evans /* Copy the pointer to the one arena that was already initialized. */ 1474df0d881dSJason Evans arena_set(0, a0); 1475a4bd5210SJason Evans 1476d0e79aa3SJason Evans malloc_init_state = malloc_init_initialized; 1477df0d881dSJason Evans malloc_slow_flag_init(); 1478df0d881dSJason Evans 1479d0e79aa3SJason Evans return (false); 1480d0e79aa3SJason Evans } 1481d0e79aa3SJason Evans 1482d0e79aa3SJason Evans static bool 1483d0e79aa3SJason Evans malloc_init_hard(void) 1484d0e79aa3SJason Evans { 14851f0a49e8SJason Evans tsd_t *tsd; 1486d0e79aa3SJason Evans 1487536b3538SJason Evans #if defined(_WIN32) && _WIN32_WINNT < 0x0600 1488536b3538SJason Evans _init_init_lock(); 1489536b3538SJason Evans #endif 14901f0a49e8SJason Evans malloc_mutex_lock(TSDN_NULL, &init_lock); 1491d0e79aa3SJason Evans if (!malloc_init_hard_needed()) { 14921f0a49e8SJason Evans malloc_mutex_unlock(TSDN_NULL, &init_lock); 1493d0e79aa3SJason Evans return (false); 1494d0e79aa3SJason Evans } 1495f921d10fSJason Evans 1496d0e79aa3SJason Evans if (malloc_init_state != malloc_init_a0_initialized && 1497d0e79aa3SJason Evans malloc_init_hard_a0_locked()) { 14981f0a49e8SJason Evans malloc_mutex_unlock(TSDN_NULL, &init_lock); 1499d0e79aa3SJason Evans return (true); 1500d0e79aa3SJason Evans } 1501df0d881dSJason Evans 15021f0a49e8SJason Evans malloc_mutex_unlock(TSDN_NULL, &init_lock); 15031f0a49e8SJason Evans /* Recursive allocation relies on functional tsd. */ 15041f0a49e8SJason Evans tsd = malloc_tsd_boot0(); 15051f0a49e8SJason Evans if (tsd == NULL) 15061f0a49e8SJason Evans return (true); 15071f0a49e8SJason Evans if (malloc_init_hard_recursible()) 15081f0a49e8SJason Evans return (true); 15091f0a49e8SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &init_lock); 15101f0a49e8SJason Evans 1511bde95144SJason Evans if (config_prof && prof_boot2(tsd)) { 15121f0a49e8SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); 1513d0e79aa3SJason Evans return (true); 1514d0e79aa3SJason Evans } 1515d0e79aa3SJason Evans 15161f0a49e8SJason Evans if (malloc_init_hard_finish(tsd_tsdn(tsd))) { 15171f0a49e8SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); 1518df0d881dSJason Evans return (true); 1519df0d881dSJason Evans } 1520d0e79aa3SJason Evans 15211f0a49e8SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); 1522d0e79aa3SJason Evans malloc_tsd_boot1(); 1523a4bd5210SJason Evans return (false); 1524a4bd5210SJason Evans } 1525a4bd5210SJason Evans 1526a4bd5210SJason Evans /* 1527a4bd5210SJason Evans * End initialization functions. 1528a4bd5210SJason Evans */ 1529a4bd5210SJason Evans /******************************************************************************/ 1530a4bd5210SJason Evans /* 1531a4bd5210SJason Evans * Begin malloc(3)-compatible functions. 1532a4bd5210SJason Evans */ 1533a4bd5210SJason Evans 1534f921d10fSJason Evans static void * 15351f0a49e8SJason Evans ialloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, bool zero, 1536df0d881dSJason Evans prof_tctx_t *tctx, bool slow_path) 1537f921d10fSJason Evans { 1538f921d10fSJason Evans void *p; 1539f921d10fSJason Evans 1540d0e79aa3SJason Evans if (tctx == NULL) 1541f921d10fSJason Evans return (NULL); 1542d0e79aa3SJason Evans if (usize <= SMALL_MAXCLASS) { 1543df0d881dSJason Evans szind_t ind_large = size2index(LARGE_MINCLASS); 15441f0a49e8SJason Evans p = ialloc(tsd, LARGE_MINCLASS, ind_large, zero, slow_path); 1545f921d10fSJason Evans if (p == NULL) 1546f921d10fSJason Evans return (NULL); 15471f0a49e8SJason Evans arena_prof_promoted(tsd_tsdn(tsd), p, usize); 1548f921d10fSJason Evans } else 15491f0a49e8SJason Evans p = ialloc(tsd, usize, ind, zero, slow_path); 1550f921d10fSJason Evans 1551f921d10fSJason Evans return (p); 1552f921d10fSJason Evans } 1553f921d10fSJason Evans 1554f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C void * 15551f0a49e8SJason Evans ialloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool zero, bool slow_path) 1556f921d10fSJason Evans { 1557f921d10fSJason Evans void *p; 1558d0e79aa3SJason Evans prof_tctx_t *tctx; 1559f921d10fSJason Evans 1560536b3538SJason Evans tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); 1561d0e79aa3SJason Evans if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) 15621f0a49e8SJason Evans p = ialloc_prof_sample(tsd, usize, ind, zero, tctx, slow_path); 1563f921d10fSJason Evans else 15641f0a49e8SJason Evans p = ialloc(tsd, usize, ind, zero, slow_path); 1565d0e79aa3SJason Evans if (unlikely(p == NULL)) { 1566d0e79aa3SJason Evans prof_alloc_rollback(tsd, tctx, true); 1567f921d10fSJason Evans return (NULL); 1568d0e79aa3SJason Evans } 15691f0a49e8SJason Evans prof_malloc(tsd_tsdn(tsd), p, usize, tctx); 1570f921d10fSJason Evans 1571f921d10fSJason Evans return (p); 1572f921d10fSJason Evans } 1573f921d10fSJason Evans 15741f0a49e8SJason Evans /* 15751f0a49e8SJason Evans * ialloc_body() is inlined so that fast and slow paths are generated separately 15761f0a49e8SJason Evans * with statically known slow_path. 15771f0a49e8SJason Evans * 15781f0a49e8SJason Evans * This function guarantees that *tsdn is non-NULL on success. 15791f0a49e8SJason Evans */ 1580d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C void * 15811f0a49e8SJason Evans ialloc_body(size_t size, bool zero, tsdn_t **tsdn, size_t *usize, 15821f0a49e8SJason Evans bool slow_path) 1583d0e79aa3SJason Evans { 15841f0a49e8SJason Evans tsd_t *tsd; 1585df0d881dSJason Evans szind_t ind; 1586f921d10fSJason Evans 15871f0a49e8SJason Evans if (slow_path && unlikely(malloc_init())) { 15881f0a49e8SJason Evans *tsdn = NULL; 1589d0e79aa3SJason Evans return (NULL); 15901f0a49e8SJason Evans } 15911f0a49e8SJason Evans 15921f0a49e8SJason Evans tsd = tsd_fetch(); 15931f0a49e8SJason Evans *tsdn = tsd_tsdn(tsd); 15941f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 15951f0a49e8SJason Evans 1596df0d881dSJason Evans ind = size2index(size); 1597df0d881dSJason Evans if (unlikely(ind >= NSIZES)) 1598d0e79aa3SJason Evans return (NULL); 1599df0d881dSJason Evans 1600df0d881dSJason Evans if (config_stats || (config_prof && opt_prof) || (slow_path && 1601df0d881dSJason Evans config_valgrind && unlikely(in_valgrind))) { 1602df0d881dSJason Evans *usize = index2size(ind); 1603df0d881dSJason Evans assert(*usize > 0 && *usize <= HUGE_MAXCLASS); 1604d0e79aa3SJason Evans } 1605d0e79aa3SJason Evans 1606df0d881dSJason Evans if (config_prof && opt_prof) 16071f0a49e8SJason Evans return (ialloc_prof(tsd, *usize, ind, zero, slow_path)); 1608df0d881dSJason Evans 16091f0a49e8SJason Evans return (ialloc(tsd, size, ind, zero, slow_path)); 1610df0d881dSJason Evans } 1611df0d881dSJason Evans 1612df0d881dSJason Evans JEMALLOC_ALWAYS_INLINE_C void 16131f0a49e8SJason Evans ialloc_post_check(void *ret, tsdn_t *tsdn, size_t usize, const char *func, 16141f0a49e8SJason Evans bool update_errno, bool slow_path) 1615df0d881dSJason Evans { 16161f0a49e8SJason Evans 16171f0a49e8SJason Evans assert(!tsdn_null(tsdn) || ret == NULL); 16181f0a49e8SJason Evans 1619df0d881dSJason Evans if (unlikely(ret == NULL)) { 1620df0d881dSJason Evans if (slow_path && config_xmalloc && unlikely(opt_xmalloc)) { 16211f0a49e8SJason Evans malloc_printf("<jemalloc>: Error in %s(): out of " 16221f0a49e8SJason Evans "memory\n", func); 1623df0d881dSJason Evans abort(); 1624df0d881dSJason Evans } 16251f0a49e8SJason Evans if (update_errno) 1626df0d881dSJason Evans set_errno(ENOMEM); 1627df0d881dSJason Evans } 1628df0d881dSJason Evans if (config_stats && likely(ret != NULL)) { 16291f0a49e8SJason Evans assert(usize == isalloc(tsdn, ret, config_prof)); 16301f0a49e8SJason Evans *tsd_thread_allocatedp_get(tsdn_tsd(tsdn)) += usize; 1631df0d881dSJason Evans } 16321f0a49e8SJason Evans witness_assert_lockless(tsdn); 1633d0e79aa3SJason Evans } 1634d0e79aa3SJason Evans 1635d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 1636d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 1637d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) 1638a4bd5210SJason Evans je_malloc(size_t size) 1639a4bd5210SJason Evans { 1640a4bd5210SJason Evans void *ret; 16411f0a49e8SJason Evans tsdn_t *tsdn; 1642e722f8f8SJason Evans size_t usize JEMALLOC_CC_SILENCE_INIT(0); 1643a4bd5210SJason Evans 1644a4bd5210SJason Evans if (size == 0) 1645a4bd5210SJason Evans size = 1; 1646a4bd5210SJason Evans 1647df0d881dSJason Evans if (likely(!malloc_slow)) { 16481f0a49e8SJason Evans ret = ialloc_body(size, false, &tsdn, &usize, false); 16491f0a49e8SJason Evans ialloc_post_check(ret, tsdn, usize, "malloc", true, false); 1650df0d881dSJason Evans } else { 16511f0a49e8SJason Evans ret = ialloc_body(size, false, &tsdn, &usize, true); 16521f0a49e8SJason Evans ialloc_post_check(ret, tsdn, usize, "malloc", true, true); 1653a4bd5210SJason Evans UTRACE(0, size, ret); 16541f0a49e8SJason Evans JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false); 1655df0d881dSJason Evans } 1656df0d881dSJason Evans 1657a4bd5210SJason Evans return (ret); 1658a4bd5210SJason Evans } 1659a4bd5210SJason Evans 1660f921d10fSJason Evans static void * 1661d0e79aa3SJason Evans imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize, 1662d0e79aa3SJason Evans prof_tctx_t *tctx) 1663f921d10fSJason Evans { 1664f921d10fSJason Evans void *p; 1665f921d10fSJason Evans 1666d0e79aa3SJason Evans if (tctx == NULL) 1667f921d10fSJason Evans return (NULL); 1668d0e79aa3SJason Evans if (usize <= SMALL_MAXCLASS) { 1669d0e79aa3SJason Evans assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS); 1670d0e79aa3SJason Evans p = ipalloc(tsd, LARGE_MINCLASS, alignment, false); 1671f921d10fSJason Evans if (p == NULL) 1672f921d10fSJason Evans return (NULL); 16731f0a49e8SJason Evans arena_prof_promoted(tsd_tsdn(tsd), p, usize); 1674f921d10fSJason Evans } else 1675d0e79aa3SJason Evans p = ipalloc(tsd, usize, alignment, false); 1676f921d10fSJason Evans 1677f921d10fSJason Evans return (p); 1678f921d10fSJason Evans } 1679f921d10fSJason Evans 1680f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C void * 1681d0e79aa3SJason Evans imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize) 1682f921d10fSJason Evans { 1683f921d10fSJason Evans void *p; 1684d0e79aa3SJason Evans prof_tctx_t *tctx; 1685f921d10fSJason Evans 1686536b3538SJason Evans tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); 1687d0e79aa3SJason Evans if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) 1688d0e79aa3SJason Evans p = imemalign_prof_sample(tsd, alignment, usize, tctx); 1689f921d10fSJason Evans else 1690d0e79aa3SJason Evans p = ipalloc(tsd, usize, alignment, false); 1691d0e79aa3SJason Evans if (unlikely(p == NULL)) { 1692d0e79aa3SJason Evans prof_alloc_rollback(tsd, tctx, true); 1693f921d10fSJason Evans return (NULL); 1694d0e79aa3SJason Evans } 16951f0a49e8SJason Evans prof_malloc(tsd_tsdn(tsd), p, usize, tctx); 1696f921d10fSJason Evans 1697f921d10fSJason Evans return (p); 1698f921d10fSJason Evans } 1699f921d10fSJason Evans 1700a4bd5210SJason Evans JEMALLOC_ATTR(nonnull(1)) 1701a4bd5210SJason Evans static int 1702f921d10fSJason Evans imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment) 1703a4bd5210SJason Evans { 1704a4bd5210SJason Evans int ret; 1705d0e79aa3SJason Evans tsd_t *tsd; 1706a4bd5210SJason Evans size_t usize; 1707a4bd5210SJason Evans void *result; 1708a4bd5210SJason Evans 1709a4bd5210SJason Evans assert(min_alignment != 0); 1710a4bd5210SJason Evans 1711d0e79aa3SJason Evans if (unlikely(malloc_init())) { 17121f0a49e8SJason Evans tsd = NULL; 1713a4bd5210SJason Evans result = NULL; 1714f921d10fSJason Evans goto label_oom; 1715d0e79aa3SJason Evans } 1716d0e79aa3SJason Evans tsd = tsd_fetch(); 17171f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 1718a4bd5210SJason Evans if (size == 0) 1719a4bd5210SJason Evans size = 1; 1720a4bd5210SJason Evans 1721a4bd5210SJason Evans /* Make sure that alignment is a large enough power of 2. */ 1722d0e79aa3SJason Evans if (unlikely(((alignment - 1) & alignment) != 0 1723d0e79aa3SJason Evans || (alignment < min_alignment))) { 1724d0e79aa3SJason Evans if (config_xmalloc && unlikely(opt_xmalloc)) { 1725a4bd5210SJason Evans malloc_write("<jemalloc>: Error allocating " 1726a4bd5210SJason Evans "aligned memory: invalid alignment\n"); 1727a4bd5210SJason Evans abort(); 1728a4bd5210SJason Evans } 1729a4bd5210SJason Evans result = NULL; 1730a4bd5210SJason Evans ret = EINVAL; 1731a4bd5210SJason Evans goto label_return; 1732a4bd5210SJason Evans } 1733a4bd5210SJason Evans 1734a4bd5210SJason Evans usize = sa2u(size, alignment); 1735df0d881dSJason Evans if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) { 1736a4bd5210SJason Evans result = NULL; 1737f921d10fSJason Evans goto label_oom; 1738a4bd5210SJason Evans } 1739a4bd5210SJason Evans 1740d0e79aa3SJason Evans if (config_prof && opt_prof) 1741d0e79aa3SJason Evans result = imemalign_prof(tsd, alignment, usize); 1742d0e79aa3SJason Evans else 1743d0e79aa3SJason Evans result = ipalloc(tsd, usize, alignment, false); 1744d0e79aa3SJason Evans if (unlikely(result == NULL)) 1745f921d10fSJason Evans goto label_oom; 1746d0e79aa3SJason Evans assert(((uintptr_t)result & (alignment - 1)) == ZU(0)); 1747a4bd5210SJason Evans 1748a4bd5210SJason Evans *memptr = result; 1749a4bd5210SJason Evans ret = 0; 1750a4bd5210SJason Evans label_return: 1751d0e79aa3SJason Evans if (config_stats && likely(result != NULL)) { 17521f0a49e8SJason Evans assert(usize == isalloc(tsd_tsdn(tsd), result, config_prof)); 1753d0e79aa3SJason Evans *tsd_thread_allocatedp_get(tsd) += usize; 1754a4bd5210SJason Evans } 1755a4bd5210SJason Evans UTRACE(0, size, result); 17561f0a49e8SJason Evans JEMALLOC_VALGRIND_MALLOC(result != NULL, tsd_tsdn(tsd), result, usize, 17571f0a49e8SJason Evans false); 17581f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 1759a4bd5210SJason Evans return (ret); 1760f921d10fSJason Evans label_oom: 1761f921d10fSJason Evans assert(result == NULL); 1762d0e79aa3SJason Evans if (config_xmalloc && unlikely(opt_xmalloc)) { 1763f921d10fSJason Evans malloc_write("<jemalloc>: Error allocating aligned memory: " 1764f921d10fSJason Evans "out of memory\n"); 1765f921d10fSJason Evans abort(); 1766f921d10fSJason Evans } 1767f921d10fSJason Evans ret = ENOMEM; 17681f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 1769f921d10fSJason Evans goto label_return; 1770a4bd5210SJason Evans } 1771a4bd5210SJason Evans 1772d0e79aa3SJason Evans JEMALLOC_EXPORT int JEMALLOC_NOTHROW 1773d0e79aa3SJason Evans JEMALLOC_ATTR(nonnull(1)) 1774a4bd5210SJason Evans je_posix_memalign(void **memptr, size_t alignment, size_t size) 1775a4bd5210SJason Evans { 17761f0a49e8SJason Evans int ret; 17771f0a49e8SJason Evans 17781f0a49e8SJason Evans ret = imemalign(memptr, alignment, size, sizeof(void *)); 17791f0a49e8SJason Evans 1780a4bd5210SJason Evans return (ret); 1781a4bd5210SJason Evans } 1782a4bd5210SJason Evans 1783d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 1784d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 1785d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2) 1786a4bd5210SJason Evans je_aligned_alloc(size_t alignment, size_t size) 1787a4bd5210SJason Evans { 1788a4bd5210SJason Evans void *ret; 1789a4bd5210SJason Evans int err; 1790a4bd5210SJason Evans 1791d0e79aa3SJason Evans if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) { 1792a4bd5210SJason Evans ret = NULL; 1793e722f8f8SJason Evans set_errno(err); 1794a4bd5210SJason Evans } 17951f0a49e8SJason Evans 1796a4bd5210SJason Evans return (ret); 1797a4bd5210SJason Evans } 1798a4bd5210SJason Evans 1799d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 1800d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 1801d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) 1802a4bd5210SJason Evans je_calloc(size_t num, size_t size) 1803a4bd5210SJason Evans { 1804a4bd5210SJason Evans void *ret; 18051f0a49e8SJason Evans tsdn_t *tsdn; 1806a4bd5210SJason Evans size_t num_size; 1807e722f8f8SJason Evans size_t usize JEMALLOC_CC_SILENCE_INIT(0); 1808a4bd5210SJason Evans 1809a4bd5210SJason Evans num_size = num * size; 1810d0e79aa3SJason Evans if (unlikely(num_size == 0)) { 1811a4bd5210SJason Evans if (num == 0 || size == 0) 1812a4bd5210SJason Evans num_size = 1; 18131f0a49e8SJason Evans else 18141f0a49e8SJason Evans num_size = HUGE_MAXCLASS + 1; /* Trigger OOM. */ 1815a4bd5210SJason Evans /* 1816a4bd5210SJason Evans * Try to avoid division here. We know that it isn't possible to 1817a4bd5210SJason Evans * overflow during multiplication if neither operand uses any of the 1818a4bd5210SJason Evans * most significant half of the bits in a size_t. 1819a4bd5210SJason Evans */ 1820d0e79aa3SJason Evans } else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 18211f0a49e8SJason Evans 2))) && (num_size / size != num))) 18221f0a49e8SJason Evans num_size = HUGE_MAXCLASS + 1; /* size_t overflow. */ 1823a4bd5210SJason Evans 18241f0a49e8SJason Evans if (likely(!malloc_slow)) { 18251f0a49e8SJason Evans ret = ialloc_body(num_size, true, &tsdn, &usize, false); 18261f0a49e8SJason Evans ialloc_post_check(ret, tsdn, usize, "calloc", true, false); 1827a4bd5210SJason Evans } else { 18281f0a49e8SJason Evans ret = ialloc_body(num_size, true, &tsdn, &usize, true); 18291f0a49e8SJason Evans ialloc_post_check(ret, tsdn, usize, "calloc", true, true); 18301f0a49e8SJason Evans UTRACE(0, num_size, ret); 183162b2691eSJason Evans JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, true); 1832a4bd5210SJason Evans } 1833a4bd5210SJason Evans 1834a4bd5210SJason Evans return (ret); 1835a4bd5210SJason Evans } 1836a4bd5210SJason Evans 1837f921d10fSJason Evans static void * 1838536b3538SJason Evans irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, 1839d0e79aa3SJason Evans prof_tctx_t *tctx) 1840a4bd5210SJason Evans { 1841f921d10fSJason Evans void *p; 1842a4bd5210SJason Evans 1843d0e79aa3SJason Evans if (tctx == NULL) 1844f921d10fSJason Evans return (NULL); 1845d0e79aa3SJason Evans if (usize <= SMALL_MAXCLASS) { 1846536b3538SJason Evans p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false); 1847f921d10fSJason Evans if (p == NULL) 1848f921d10fSJason Evans return (NULL); 18491f0a49e8SJason Evans arena_prof_promoted(tsd_tsdn(tsd), p, usize); 1850a4bd5210SJason Evans } else 1851536b3538SJason Evans p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); 1852f921d10fSJason Evans 1853f921d10fSJason Evans return (p); 1854a4bd5210SJason Evans } 1855a4bd5210SJason Evans 1856f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C void * 1857536b3538SJason Evans irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize) 1858a4bd5210SJason Evans { 1859f921d10fSJason Evans void *p; 1860536b3538SJason Evans bool prof_active; 1861d0e79aa3SJason Evans prof_tctx_t *old_tctx, *tctx; 1862a4bd5210SJason Evans 1863536b3538SJason Evans prof_active = prof_active_get_unlocked(); 18641f0a49e8SJason Evans old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr); 1865536b3538SJason Evans tctx = prof_alloc_prep(tsd, usize, prof_active, true); 1866d0e79aa3SJason Evans if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) 1867536b3538SJason Evans p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx); 1868f921d10fSJason Evans else 1869536b3538SJason Evans p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); 1870536b3538SJason Evans if (unlikely(p == NULL)) { 1871536b3538SJason Evans prof_alloc_rollback(tsd, tctx, true); 1872f921d10fSJason Evans return (NULL); 1873536b3538SJason Evans } 1874536b3538SJason Evans prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize, 1875536b3538SJason Evans old_tctx); 1876f921d10fSJason Evans 1877f921d10fSJason Evans return (p); 1878f921d10fSJason Evans } 1879f921d10fSJason Evans 1880f921d10fSJason Evans JEMALLOC_INLINE_C void 1881df0d881dSJason Evans ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) 1882f921d10fSJason Evans { 1883a4bd5210SJason Evans size_t usize; 1884f921d10fSJason Evans UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); 1885a4bd5210SJason Evans 18861f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 18871f0a49e8SJason Evans 1888f921d10fSJason Evans assert(ptr != NULL); 1889d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 1890a4bd5210SJason Evans 1891a4bd5210SJason Evans if (config_prof && opt_prof) { 18921f0a49e8SJason Evans usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); 1893d0e79aa3SJason Evans prof_free(tsd, ptr, usize); 1894a4bd5210SJason Evans } else if (config_stats || config_valgrind) 18951f0a49e8SJason Evans usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); 1896a4bd5210SJason Evans if (config_stats) 1897d0e79aa3SJason Evans *tsd_thread_deallocatedp_get(tsd) += usize; 1898df0d881dSJason Evans 1899df0d881dSJason Evans if (likely(!slow_path)) 1900df0d881dSJason Evans iqalloc(tsd, ptr, tcache, false); 1901df0d881dSJason Evans else { 1902d0e79aa3SJason Evans if (config_valgrind && unlikely(in_valgrind)) 19031f0a49e8SJason Evans rzsize = p2rz(tsd_tsdn(tsd), ptr); 1904df0d881dSJason Evans iqalloc(tsd, ptr, tcache, true); 1905a4bd5210SJason Evans JEMALLOC_VALGRIND_FREE(ptr, rzsize); 1906a4bd5210SJason Evans } 1907df0d881dSJason Evans } 1908f921d10fSJason Evans 1909d0e79aa3SJason Evans JEMALLOC_INLINE_C void 19101f0a49e8SJason Evans isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) 1911d0e79aa3SJason Evans { 1912d0e79aa3SJason Evans UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); 1913d0e79aa3SJason Evans 19141f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 19151f0a49e8SJason Evans 1916d0e79aa3SJason Evans assert(ptr != NULL); 1917d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 1918d0e79aa3SJason Evans 1919d0e79aa3SJason Evans if (config_prof && opt_prof) 1920d0e79aa3SJason Evans prof_free(tsd, ptr, usize); 1921d0e79aa3SJason Evans if (config_stats) 1922d0e79aa3SJason Evans *tsd_thread_deallocatedp_get(tsd) += usize; 1923d0e79aa3SJason Evans if (config_valgrind && unlikely(in_valgrind)) 19241f0a49e8SJason Evans rzsize = p2rz(tsd_tsdn(tsd), ptr); 19251f0a49e8SJason Evans isqalloc(tsd, ptr, usize, tcache, slow_path); 1926d0e79aa3SJason Evans JEMALLOC_VALGRIND_FREE(ptr, rzsize); 1927d0e79aa3SJason Evans } 1928d0e79aa3SJason Evans 1929d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 1930d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 1931d0e79aa3SJason Evans JEMALLOC_ALLOC_SIZE(2) 1932f921d10fSJason Evans je_realloc(void *ptr, size_t size) 1933f921d10fSJason Evans { 1934f921d10fSJason Evans void *ret; 19351f0a49e8SJason Evans tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL); 1936f921d10fSJason Evans size_t usize JEMALLOC_CC_SILENCE_INIT(0); 1937f921d10fSJason Evans size_t old_usize = 0; 1938f921d10fSJason Evans UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 1939f921d10fSJason Evans 1940d0e79aa3SJason Evans if (unlikely(size == 0)) { 1941f921d10fSJason Evans if (ptr != NULL) { 19421f0a49e8SJason Evans tsd_t *tsd; 19431f0a49e8SJason Evans 1944f921d10fSJason Evans /* realloc(ptr, 0) is equivalent to free(ptr). */ 1945f921d10fSJason Evans UTRACE(ptr, 0, 0); 1946d0e79aa3SJason Evans tsd = tsd_fetch(); 1947df0d881dSJason Evans ifree(tsd, ptr, tcache_get(tsd, false), true); 1948f921d10fSJason Evans return (NULL); 1949f921d10fSJason Evans } 1950f921d10fSJason Evans size = 1; 1951f921d10fSJason Evans } 1952f921d10fSJason Evans 1953d0e79aa3SJason Evans if (likely(ptr != NULL)) { 19541f0a49e8SJason Evans tsd_t *tsd; 19551f0a49e8SJason Evans 1956d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 1957f921d10fSJason Evans malloc_thread_init(); 1958d0e79aa3SJason Evans tsd = tsd_fetch(); 1959f921d10fSJason Evans 19601f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 19611f0a49e8SJason Evans 19621f0a49e8SJason Evans old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); 19631f0a49e8SJason Evans if (config_valgrind && unlikely(in_valgrind)) { 19641f0a49e8SJason Evans old_rzsize = config_prof ? p2rz(tsd_tsdn(tsd), ptr) : 19651f0a49e8SJason Evans u2rz(old_usize); 19661f0a49e8SJason Evans } 1967f921d10fSJason Evans 1968f921d10fSJason Evans if (config_prof && opt_prof) { 1969f921d10fSJason Evans usize = s2u(size); 1970df0d881dSJason Evans ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ? 1971df0d881dSJason Evans NULL : irealloc_prof(tsd, ptr, old_usize, usize); 1972f921d10fSJason Evans } else { 1973d0e79aa3SJason Evans if (config_stats || (config_valgrind && 1974d0e79aa3SJason Evans unlikely(in_valgrind))) 1975f921d10fSJason Evans usize = s2u(size); 1976d0e79aa3SJason Evans ret = iralloc(tsd, ptr, old_usize, size, 0, false); 1977f921d10fSJason Evans } 19781f0a49e8SJason Evans tsdn = tsd_tsdn(tsd); 1979f921d10fSJason Evans } else { 1980f921d10fSJason Evans /* realloc(NULL, size) is equivalent to malloc(size). */ 1981df0d881dSJason Evans if (likely(!malloc_slow)) 19821f0a49e8SJason Evans ret = ialloc_body(size, false, &tsdn, &usize, false); 1983df0d881dSJason Evans else 19841f0a49e8SJason Evans ret = ialloc_body(size, false, &tsdn, &usize, true); 19851f0a49e8SJason Evans assert(!tsdn_null(tsdn) || ret == NULL); 1986f921d10fSJason Evans } 1987f921d10fSJason Evans 1988d0e79aa3SJason Evans if (unlikely(ret == NULL)) { 1989d0e79aa3SJason Evans if (config_xmalloc && unlikely(opt_xmalloc)) { 1990f921d10fSJason Evans malloc_write("<jemalloc>: Error in realloc(): " 1991f921d10fSJason Evans "out of memory\n"); 1992f921d10fSJason Evans abort(); 1993f921d10fSJason Evans } 1994f921d10fSJason Evans set_errno(ENOMEM); 1995f921d10fSJason Evans } 1996d0e79aa3SJason Evans if (config_stats && likely(ret != NULL)) { 19971f0a49e8SJason Evans tsd_t *tsd; 19981f0a49e8SJason Evans 19991f0a49e8SJason Evans assert(usize == isalloc(tsdn, ret, config_prof)); 20001f0a49e8SJason Evans tsd = tsdn_tsd(tsdn); 2001d0e79aa3SJason Evans *tsd_thread_allocatedp_get(tsd) += usize; 2002d0e79aa3SJason Evans *tsd_thread_deallocatedp_get(tsd) += old_usize; 2003f921d10fSJason Evans } 2004f921d10fSJason Evans UTRACE(ptr, size, ret); 20057fa7f12fSJason Evans JEMALLOC_VALGRIND_REALLOC(maybe, tsdn, ret, usize, maybe, ptr, 20067fa7f12fSJason Evans old_usize, old_rzsize, maybe, false); 20071f0a49e8SJason Evans witness_assert_lockless(tsdn); 2008f921d10fSJason Evans return (ret); 2009f921d10fSJason Evans } 2010f921d10fSJason Evans 2011d0e79aa3SJason Evans JEMALLOC_EXPORT void JEMALLOC_NOTHROW 2012f921d10fSJason Evans je_free(void *ptr) 2013f921d10fSJason Evans { 2014f921d10fSJason Evans 2015f921d10fSJason Evans UTRACE(ptr, 0, 0); 2016d0e79aa3SJason Evans if (likely(ptr != NULL)) { 2017d0e79aa3SJason Evans tsd_t *tsd = tsd_fetch(); 20181f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2019df0d881dSJason Evans if (likely(!malloc_slow)) 2020df0d881dSJason Evans ifree(tsd, ptr, tcache_get(tsd, false), false); 2021df0d881dSJason Evans else 2022df0d881dSJason Evans ifree(tsd, ptr, tcache_get(tsd, false), true); 20231f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2024d0e79aa3SJason Evans } 2025a4bd5210SJason Evans } 2026a4bd5210SJason Evans 2027a4bd5210SJason Evans /* 2028a4bd5210SJason Evans * End malloc(3)-compatible functions. 2029a4bd5210SJason Evans */ 2030a4bd5210SJason Evans /******************************************************************************/ 2031a4bd5210SJason Evans /* 2032a4bd5210SJason Evans * Begin non-standard override functions. 2033a4bd5210SJason Evans */ 2034a4bd5210SJason Evans 2035a4bd5210SJason Evans #ifdef JEMALLOC_OVERRIDE_MEMALIGN 2036d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2037d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 2038d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) 2039a4bd5210SJason Evans je_memalign(size_t alignment, size_t size) 2040a4bd5210SJason Evans { 2041a4bd5210SJason Evans void *ret JEMALLOC_CC_SILENCE_INIT(NULL); 2042d0e79aa3SJason Evans if (unlikely(imemalign(&ret, alignment, size, 1) != 0)) 2043d0e79aa3SJason Evans ret = NULL; 2044a4bd5210SJason Evans return (ret); 2045a4bd5210SJason Evans } 2046a4bd5210SJason Evans #endif 2047a4bd5210SJason Evans 2048a4bd5210SJason Evans #ifdef JEMALLOC_OVERRIDE_VALLOC 2049d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2050d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 2051d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) 2052a4bd5210SJason Evans je_valloc(size_t size) 2053a4bd5210SJason Evans { 2054a4bd5210SJason Evans void *ret JEMALLOC_CC_SILENCE_INIT(NULL); 2055d0e79aa3SJason Evans if (unlikely(imemalign(&ret, PAGE, size, 1) != 0)) 2056d0e79aa3SJason Evans ret = NULL; 2057a4bd5210SJason Evans return (ret); 2058a4bd5210SJason Evans } 2059a4bd5210SJason Evans #endif 2060a4bd5210SJason Evans 2061a4bd5210SJason Evans /* 2062a4bd5210SJason Evans * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has 2063a4bd5210SJason Evans * #define je_malloc malloc 2064a4bd5210SJason Evans */ 2065a4bd5210SJason Evans #define malloc_is_malloc 1 2066a4bd5210SJason Evans #define is_malloc_(a) malloc_is_ ## a 2067a4bd5210SJason Evans #define is_malloc(a) is_malloc_(a) 2068a4bd5210SJason Evans 2069d0e79aa3SJason Evans #if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK)) 2070a4bd5210SJason Evans /* 2071a4bd5210SJason Evans * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible 2072a4bd5210SJason Evans * to inconsistently reference libc's malloc(3)-compatible functions 2073a4bd5210SJason Evans * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). 2074a4bd5210SJason Evans * 2075a4bd5210SJason Evans * These definitions interpose hooks in glibc. The functions are actually 2076a4bd5210SJason Evans * passed an extra argument for the caller return address, which will be 2077a4bd5210SJason Evans * ignored. 2078a4bd5210SJason Evans */ 207982872ac0SJason Evans JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free; 208082872ac0SJason Evans JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc; 208182872ac0SJason Evans JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc; 2082d0e79aa3SJason Evans # ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK 208382872ac0SJason Evans JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = 2084e722f8f8SJason Evans je_memalign; 2085a4bd5210SJason Evans # endif 2086bde95144SJason Evans 2087bde95144SJason Evans #ifdef CPU_COUNT 2088bde95144SJason Evans /* 2089bde95144SJason Evans * To enable static linking with glibc, the libc specific malloc interface must 2090bde95144SJason Evans * be implemented also, so none of glibc's malloc.o functions are added to the 2091bde95144SJason Evans * link. 2092bde95144SJason Evans */ 2093bde95144SJason Evans #define ALIAS(je_fn) __attribute__((alias (#je_fn), used)) 2094bde95144SJason Evans /* To force macro expansion of je_ prefix before stringification. */ 2095bde95144SJason Evans #define PREALIAS(je_fn) ALIAS(je_fn) 2096bde95144SJason Evans void *__libc_malloc(size_t size) PREALIAS(je_malloc); 2097bde95144SJason Evans void __libc_free(void* ptr) PREALIAS(je_free); 2098bde95144SJason Evans void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc); 2099bde95144SJason Evans void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc); 2100bde95144SJason Evans void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign); 2101bde95144SJason Evans void *__libc_valloc(size_t size) PREALIAS(je_valloc); 2102bde95144SJason Evans int __posix_memalign(void** r, size_t a, size_t s) 2103bde95144SJason Evans PREALIAS(je_posix_memalign); 2104bde95144SJason Evans #undef PREALIAS 2105bde95144SJason Evans #undef ALIAS 2106bde95144SJason Evans 2107bde95144SJason Evans #endif 2108bde95144SJason Evans 2109d0e79aa3SJason Evans #endif 2110a4bd5210SJason Evans 2111a4bd5210SJason Evans /* 2112a4bd5210SJason Evans * End non-standard override functions. 2113a4bd5210SJason Evans */ 2114a4bd5210SJason Evans /******************************************************************************/ 2115a4bd5210SJason Evans /* 2116a4bd5210SJason Evans * Begin non-standard functions. 2117a4bd5210SJason Evans */ 2118a4bd5210SJason Evans 2119d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C bool 21201f0a49e8SJason Evans imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize, 2121d0e79aa3SJason Evans size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena) 2122a4bd5210SJason Evans { 2123f921d10fSJason Evans 2124d0e79aa3SJason Evans if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) { 2125d0e79aa3SJason Evans *alignment = 0; 2126d0e79aa3SJason Evans *usize = s2u(size); 2127d0e79aa3SJason Evans } else { 2128d0e79aa3SJason Evans *alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); 2129d0e79aa3SJason Evans *usize = sa2u(size, *alignment); 2130d0e79aa3SJason Evans } 2131df0d881dSJason Evans if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS)) 2132df0d881dSJason Evans return (true); 2133d0e79aa3SJason Evans *zero = MALLOCX_ZERO_GET(flags); 2134d0e79aa3SJason Evans if ((flags & MALLOCX_TCACHE_MASK) != 0) { 2135d0e79aa3SJason Evans if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) 2136d0e79aa3SJason Evans *tcache = NULL; 2137d0e79aa3SJason Evans else 2138d0e79aa3SJason Evans *tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2139d0e79aa3SJason Evans } else 2140d0e79aa3SJason Evans *tcache = tcache_get(tsd, true); 2141d0e79aa3SJason Evans if ((flags & MALLOCX_ARENA_MASK) != 0) { 2142d0e79aa3SJason Evans unsigned arena_ind = MALLOCX_ARENA_GET(flags); 21431f0a49e8SJason Evans *arena = arena_get(tsd_tsdn(tsd), arena_ind, true); 2144d0e79aa3SJason Evans if (unlikely(*arena == NULL)) 2145d0e79aa3SJason Evans return (true); 2146d0e79aa3SJason Evans } else 2147d0e79aa3SJason Evans *arena = NULL; 2148d0e79aa3SJason Evans return (false); 2149d0e79aa3SJason Evans } 2150d0e79aa3SJason Evans 2151d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C void * 21521f0a49e8SJason Evans imallocx_flags(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, 21531f0a49e8SJason Evans tcache_t *tcache, arena_t *arena, bool slow_path) 2154d0e79aa3SJason Evans { 2155df0d881dSJason Evans szind_t ind; 2156f921d10fSJason Evans 2157536b3538SJason Evans if (unlikely(alignment != 0)) 21581f0a49e8SJason Evans return (ipalloct(tsdn, usize, alignment, zero, tcache, arena)); 2159df0d881dSJason Evans ind = size2index(usize); 2160df0d881dSJason Evans assert(ind < NSIZES); 21611f0a49e8SJason Evans return (iallocztm(tsdn, usize, ind, zero, tcache, false, arena, 21621f0a49e8SJason Evans slow_path)); 2163d0e79aa3SJason Evans } 2164d0e79aa3SJason Evans 2165f921d10fSJason Evans static void * 21661f0a49e8SJason Evans imallocx_prof_sample(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, 21671f0a49e8SJason Evans tcache_t *tcache, arena_t *arena, bool slow_path) 2168f921d10fSJason Evans { 2169f921d10fSJason Evans void *p; 2170f921d10fSJason Evans 2171d0e79aa3SJason Evans if (usize <= SMALL_MAXCLASS) { 2172d0e79aa3SJason Evans assert(((alignment == 0) ? s2u(LARGE_MINCLASS) : 2173d0e79aa3SJason Evans sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS); 21741f0a49e8SJason Evans p = imallocx_flags(tsdn, LARGE_MINCLASS, alignment, zero, 21751f0a49e8SJason Evans tcache, arena, slow_path); 2176f921d10fSJason Evans if (p == NULL) 2177f921d10fSJason Evans return (NULL); 21781f0a49e8SJason Evans arena_prof_promoted(tsdn, p, usize); 21791f0a49e8SJason Evans } else { 21801f0a49e8SJason Evans p = imallocx_flags(tsdn, usize, alignment, zero, tcache, arena, 21811f0a49e8SJason Evans slow_path); 21821f0a49e8SJason Evans } 2183f921d10fSJason Evans 2184f921d10fSJason Evans return (p); 2185f921d10fSJason Evans } 2186f921d10fSJason Evans 2187f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C void * 21881f0a49e8SJason Evans imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, bool slow_path) 2189f921d10fSJason Evans { 2190f921d10fSJason Evans void *p; 2191d0e79aa3SJason Evans size_t alignment; 2192d0e79aa3SJason Evans bool zero; 2193d0e79aa3SJason Evans tcache_t *tcache; 2194d0e79aa3SJason Evans arena_t *arena; 2195d0e79aa3SJason Evans prof_tctx_t *tctx; 2196f921d10fSJason Evans 2197d0e79aa3SJason Evans if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment, 2198d0e79aa3SJason Evans &zero, &tcache, &arena))) 2199f921d10fSJason Evans return (NULL); 2200536b3538SJason Evans tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true); 22011f0a49e8SJason Evans if (likely((uintptr_t)tctx == (uintptr_t)1U)) { 22021f0a49e8SJason Evans p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, 22031f0a49e8SJason Evans tcache, arena, slow_path); 22041f0a49e8SJason Evans } else if ((uintptr_t)tctx > (uintptr_t)1U) { 22051f0a49e8SJason Evans p = imallocx_prof_sample(tsd_tsdn(tsd), *usize, alignment, zero, 22061f0a49e8SJason Evans tcache, arena, slow_path); 2207d0e79aa3SJason Evans } else 2208d0e79aa3SJason Evans p = NULL; 2209d0e79aa3SJason Evans if (unlikely(p == NULL)) { 2210d0e79aa3SJason Evans prof_alloc_rollback(tsd, tctx, true); 2211d0e79aa3SJason Evans return (NULL); 2212d0e79aa3SJason Evans } 22131f0a49e8SJason Evans prof_malloc(tsd_tsdn(tsd), p, *usize, tctx); 2214f921d10fSJason Evans 2215d0e79aa3SJason Evans assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); 2216f921d10fSJason Evans return (p); 2217f921d10fSJason Evans } 2218f921d10fSJason Evans 2219d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C void * 22201f0a49e8SJason Evans imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, 22211f0a49e8SJason Evans bool slow_path) 2222f921d10fSJason Evans { 2223f921d10fSJason Evans void *p; 2224d0e79aa3SJason Evans size_t alignment; 2225d0e79aa3SJason Evans bool zero; 2226d0e79aa3SJason Evans tcache_t *tcache; 2227f921d10fSJason Evans arena_t *arena; 2228d0e79aa3SJason Evans 22291f0a49e8SJason Evans if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment, 22301f0a49e8SJason Evans &zero, &tcache, &arena))) 22311f0a49e8SJason Evans return (NULL); 22321f0a49e8SJason Evans p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, tcache, 22331f0a49e8SJason Evans arena, slow_path); 22341f0a49e8SJason Evans assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); 22351f0a49e8SJason Evans return (p); 22361f0a49e8SJason Evans } 22371f0a49e8SJason Evans 22381f0a49e8SJason Evans /* This function guarantees that *tsdn is non-NULL on success. */ 22391f0a49e8SJason Evans JEMALLOC_ALWAYS_INLINE_C void * 22401f0a49e8SJason Evans imallocx_body(size_t size, int flags, tsdn_t **tsdn, size_t *usize, 22411f0a49e8SJason Evans bool slow_path) 22421f0a49e8SJason Evans { 22431f0a49e8SJason Evans tsd_t *tsd; 22441f0a49e8SJason Evans 22451f0a49e8SJason Evans if (slow_path && unlikely(malloc_init())) { 22461f0a49e8SJason Evans *tsdn = NULL; 22471f0a49e8SJason Evans return (NULL); 22481f0a49e8SJason Evans } 22491f0a49e8SJason Evans 22501f0a49e8SJason Evans tsd = tsd_fetch(); 22511f0a49e8SJason Evans *tsdn = tsd_tsdn(tsd); 22521f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 22531f0a49e8SJason Evans 2254d0e79aa3SJason Evans if (likely(flags == 0)) { 2255df0d881dSJason Evans szind_t ind = size2index(size); 2256df0d881dSJason Evans if (unlikely(ind >= NSIZES)) 2257df0d881dSJason Evans return (NULL); 22581f0a49e8SJason Evans if (config_stats || (config_prof && opt_prof) || (slow_path && 22591f0a49e8SJason Evans config_valgrind && unlikely(in_valgrind))) { 2260df0d881dSJason Evans *usize = index2size(ind); 2261df0d881dSJason Evans assert(*usize > 0 && *usize <= HUGE_MAXCLASS); 2262df0d881dSJason Evans } 22631f0a49e8SJason Evans 22641f0a49e8SJason Evans if (config_prof && opt_prof) { 22651f0a49e8SJason Evans return (ialloc_prof(tsd, *usize, ind, false, 22661f0a49e8SJason Evans slow_path)); 2267d0e79aa3SJason Evans } 2268d0e79aa3SJason Evans 22691f0a49e8SJason Evans return (ialloc(tsd, size, ind, false, slow_path)); 22701f0a49e8SJason Evans } 22711f0a49e8SJason Evans 22721f0a49e8SJason Evans if (config_prof && opt_prof) 22731f0a49e8SJason Evans return (imallocx_prof(tsd, size, flags, usize, slow_path)); 22741f0a49e8SJason Evans 22751f0a49e8SJason Evans return (imallocx_no_prof(tsd, size, flags, usize, slow_path)); 2276d0e79aa3SJason Evans } 2277d0e79aa3SJason Evans 2278d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2279d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 2280d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) 2281d0e79aa3SJason Evans je_mallocx(size_t size, int flags) 2282d0e79aa3SJason Evans { 22831f0a49e8SJason Evans tsdn_t *tsdn; 2284d0e79aa3SJason Evans void *p; 2285d0e79aa3SJason Evans size_t usize; 2286f921d10fSJason Evans 2287f921d10fSJason Evans assert(size != 0); 2288f921d10fSJason Evans 22891f0a49e8SJason Evans if (likely(!malloc_slow)) { 22901f0a49e8SJason Evans p = imallocx_body(size, flags, &tsdn, &usize, false); 22911f0a49e8SJason Evans ialloc_post_check(p, tsdn, usize, "mallocx", false, false); 22921f0a49e8SJason Evans } else { 22931f0a49e8SJason Evans p = imallocx_body(size, flags, &tsdn, &usize, true); 22941f0a49e8SJason Evans ialloc_post_check(p, tsdn, usize, "mallocx", false, true); 2295f921d10fSJason Evans UTRACE(0, size, p); 22961f0a49e8SJason Evans JEMALLOC_VALGRIND_MALLOC(p != NULL, tsdn, p, usize, 22971f0a49e8SJason Evans MALLOCX_ZERO_GET(flags)); 2298f921d10fSJason Evans } 22991f0a49e8SJason Evans 23001f0a49e8SJason Evans return (p); 2301f921d10fSJason Evans } 2302f921d10fSJason Evans 2303f921d10fSJason Evans static void * 2304536b3538SJason Evans irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, 2305536b3538SJason Evans size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, 2306d0e79aa3SJason Evans prof_tctx_t *tctx) 2307f921d10fSJason Evans { 2308f921d10fSJason Evans void *p; 2309f921d10fSJason Evans 2310d0e79aa3SJason Evans if (tctx == NULL) 2311f921d10fSJason Evans return (NULL); 2312d0e79aa3SJason Evans if (usize <= SMALL_MAXCLASS) { 2313536b3538SJason Evans p = iralloct(tsd, old_ptr, old_usize, LARGE_MINCLASS, alignment, 2314d0e79aa3SJason Evans zero, tcache, arena); 2315f921d10fSJason Evans if (p == NULL) 2316f921d10fSJason Evans return (NULL); 23171f0a49e8SJason Evans arena_prof_promoted(tsd_tsdn(tsd), p, usize); 2318f921d10fSJason Evans } else { 2319536b3538SJason Evans p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero, 2320d0e79aa3SJason Evans tcache, arena); 2321f921d10fSJason Evans } 2322f921d10fSJason Evans 2323f921d10fSJason Evans return (p); 2324f921d10fSJason Evans } 2325f921d10fSJason Evans 2326f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C void * 2327536b3538SJason Evans irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, 2328d0e79aa3SJason Evans size_t alignment, size_t *usize, bool zero, tcache_t *tcache, 2329d0e79aa3SJason Evans arena_t *arena) 2330f921d10fSJason Evans { 2331f921d10fSJason Evans void *p; 2332536b3538SJason Evans bool prof_active; 2333d0e79aa3SJason Evans prof_tctx_t *old_tctx, *tctx; 2334f921d10fSJason Evans 2335536b3538SJason Evans prof_active = prof_active_get_unlocked(); 23361f0a49e8SJason Evans old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr); 233762b2691eSJason Evans tctx = prof_alloc_prep(tsd, *usize, prof_active, false); 2338d0e79aa3SJason Evans if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { 2339536b3538SJason Evans p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize, 2340536b3538SJason Evans alignment, zero, tcache, arena, tctx); 2341d0e79aa3SJason Evans } else { 2342536b3538SJason Evans p = iralloct(tsd, old_ptr, old_usize, size, alignment, zero, 2343d0e79aa3SJason Evans tcache, arena); 2344f921d10fSJason Evans } 2345d0e79aa3SJason Evans if (unlikely(p == NULL)) { 234662b2691eSJason Evans prof_alloc_rollback(tsd, tctx, false); 2347f921d10fSJason Evans return (NULL); 2348d0e79aa3SJason Evans } 2349f921d10fSJason Evans 2350536b3538SJason Evans if (p == old_ptr && alignment != 0) { 2351f921d10fSJason Evans /* 2352f921d10fSJason Evans * The allocation did not move, so it is possible that the size 2353f921d10fSJason Evans * class is smaller than would guarantee the requested 2354f921d10fSJason Evans * alignment, and that the alignment constraint was 2355f921d10fSJason Evans * serendipitously satisfied. Additionally, old_usize may not 2356f921d10fSJason Evans * be the same as the current usize because of in-place large 2357f921d10fSJason Evans * reallocation. Therefore, query the actual value of usize. 2358f921d10fSJason Evans */ 23591f0a49e8SJason Evans *usize = isalloc(tsd_tsdn(tsd), p, config_prof); 2360f921d10fSJason Evans } 236162b2691eSJason Evans prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr, 2362536b3538SJason Evans old_usize, old_tctx); 2363f921d10fSJason Evans 2364f921d10fSJason Evans return (p); 2365f921d10fSJason Evans } 2366f921d10fSJason Evans 2367d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2368d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 2369d0e79aa3SJason Evans JEMALLOC_ALLOC_SIZE(2) 2370f921d10fSJason Evans je_rallocx(void *ptr, size_t size, int flags) 2371f921d10fSJason Evans { 2372f921d10fSJason Evans void *p; 2373d0e79aa3SJason Evans tsd_t *tsd; 2374d0e79aa3SJason Evans size_t usize; 2375d0e79aa3SJason Evans size_t old_usize; 2376f921d10fSJason Evans UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 2377d0e79aa3SJason Evans size_t alignment = MALLOCX_ALIGN_GET(flags); 2378f921d10fSJason Evans bool zero = flags & MALLOCX_ZERO; 2379f921d10fSJason Evans arena_t *arena; 2380d0e79aa3SJason Evans tcache_t *tcache; 2381f921d10fSJason Evans 2382f921d10fSJason Evans assert(ptr != NULL); 2383f921d10fSJason Evans assert(size != 0); 2384d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2385f921d10fSJason Evans malloc_thread_init(); 2386d0e79aa3SJason Evans tsd = tsd_fetch(); 23871f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2388f921d10fSJason Evans 2389d0e79aa3SJason Evans if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { 2390d0e79aa3SJason Evans unsigned arena_ind = MALLOCX_ARENA_GET(flags); 23911f0a49e8SJason Evans arena = arena_get(tsd_tsdn(tsd), arena_ind, true); 2392d0e79aa3SJason Evans if (unlikely(arena == NULL)) 2393d0e79aa3SJason Evans goto label_oom; 2394d0e79aa3SJason Evans } else 2395f921d10fSJason Evans arena = NULL; 2396f921d10fSJason Evans 2397d0e79aa3SJason Evans if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { 2398d0e79aa3SJason Evans if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) 2399d0e79aa3SJason Evans tcache = NULL; 2400d0e79aa3SJason Evans else 2401d0e79aa3SJason Evans tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2402d0e79aa3SJason Evans } else 2403d0e79aa3SJason Evans tcache = tcache_get(tsd, true); 2404d0e79aa3SJason Evans 24051f0a49e8SJason Evans old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); 2406d0e79aa3SJason Evans if (config_valgrind && unlikely(in_valgrind)) 2407f921d10fSJason Evans old_rzsize = u2rz(old_usize); 2408f921d10fSJason Evans 2409f921d10fSJason Evans if (config_prof && opt_prof) { 2410f921d10fSJason Evans usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); 2411df0d881dSJason Evans if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) 2412df0d881dSJason Evans goto label_oom; 2413d0e79aa3SJason Evans p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize, 2414d0e79aa3SJason Evans zero, tcache, arena); 2415d0e79aa3SJason Evans if (unlikely(p == NULL)) 2416f921d10fSJason Evans goto label_oom; 2417f921d10fSJason Evans } else { 2418d0e79aa3SJason Evans p = iralloct(tsd, ptr, old_usize, size, alignment, zero, 2419d0e79aa3SJason Evans tcache, arena); 2420d0e79aa3SJason Evans if (unlikely(p == NULL)) 2421f921d10fSJason Evans goto label_oom; 2422d0e79aa3SJason Evans if (config_stats || (config_valgrind && unlikely(in_valgrind))) 24231f0a49e8SJason Evans usize = isalloc(tsd_tsdn(tsd), p, config_prof); 2424f921d10fSJason Evans } 2425d0e79aa3SJason Evans assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); 2426f921d10fSJason Evans 2427f921d10fSJason Evans if (config_stats) { 2428d0e79aa3SJason Evans *tsd_thread_allocatedp_get(tsd) += usize; 2429d0e79aa3SJason Evans *tsd_thread_deallocatedp_get(tsd) += old_usize; 2430f921d10fSJason Evans } 2431f921d10fSJason Evans UTRACE(ptr, size, p); 24327fa7f12fSJason Evans JEMALLOC_VALGRIND_REALLOC(maybe, tsd_tsdn(tsd), p, usize, no, ptr, 24337fa7f12fSJason Evans old_usize, old_rzsize, no, zero); 24341f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2435f921d10fSJason Evans return (p); 2436f921d10fSJason Evans label_oom: 2437d0e79aa3SJason Evans if (config_xmalloc && unlikely(opt_xmalloc)) { 2438f921d10fSJason Evans malloc_write("<jemalloc>: Error in rallocx(): out of memory\n"); 2439f921d10fSJason Evans abort(); 2440f921d10fSJason Evans } 2441f921d10fSJason Evans UTRACE(ptr, size, 0); 24421f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2443f921d10fSJason Evans return (NULL); 2444f921d10fSJason Evans } 2445f921d10fSJason Evans 2446f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C size_t 24471f0a49e8SJason Evans ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, 2448df0d881dSJason Evans size_t extra, size_t alignment, bool zero) 2449f921d10fSJason Evans { 2450f921d10fSJason Evans size_t usize; 2451f921d10fSJason Evans 24521f0a49e8SJason Evans if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) 2453f921d10fSJason Evans return (old_usize); 24541f0a49e8SJason Evans usize = isalloc(tsdn, ptr, config_prof); 2455f921d10fSJason Evans 2456f921d10fSJason Evans return (usize); 2457f921d10fSJason Evans } 2458f921d10fSJason Evans 2459f921d10fSJason Evans static size_t 24601f0a49e8SJason Evans ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, 2461df0d881dSJason Evans size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) 2462f921d10fSJason Evans { 2463f921d10fSJason Evans size_t usize; 2464f921d10fSJason Evans 2465d0e79aa3SJason Evans if (tctx == NULL) 2466f921d10fSJason Evans return (old_usize); 24671f0a49e8SJason Evans usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment, 2468df0d881dSJason Evans zero); 2469f921d10fSJason Evans 2470f921d10fSJason Evans return (usize); 2471f921d10fSJason Evans } 2472f921d10fSJason Evans 2473f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C size_t 2474d0e79aa3SJason Evans ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, 2475d0e79aa3SJason Evans size_t extra, size_t alignment, bool zero) 2476f921d10fSJason Evans { 2477536b3538SJason Evans size_t usize_max, usize; 2478536b3538SJason Evans bool prof_active; 2479d0e79aa3SJason Evans prof_tctx_t *old_tctx, *tctx; 2480f921d10fSJason Evans 2481536b3538SJason Evans prof_active = prof_active_get_unlocked(); 24821f0a49e8SJason Evans old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr); 2483d0e79aa3SJason Evans /* 2484d0e79aa3SJason Evans * usize isn't knowable before ixalloc() returns when extra is non-zero. 2485d0e79aa3SJason Evans * Therefore, compute its maximum possible value and use that in 2486d0e79aa3SJason Evans * prof_alloc_prep() to decide whether to capture a backtrace. 2487d0e79aa3SJason Evans * prof_realloc() will use the actual usize to decide whether to sample. 2488d0e79aa3SJason Evans */ 2489df0d881dSJason Evans if (alignment == 0) { 2490df0d881dSJason Evans usize_max = s2u(size+extra); 2491df0d881dSJason Evans assert(usize_max > 0 && usize_max <= HUGE_MAXCLASS); 2492df0d881dSJason Evans } else { 2493df0d881dSJason Evans usize_max = sa2u(size+extra, alignment); 2494df0d881dSJason Evans if (unlikely(usize_max == 0 || usize_max > HUGE_MAXCLASS)) { 2495df0d881dSJason Evans /* 2496df0d881dSJason Evans * usize_max is out of range, and chances are that 2497df0d881dSJason Evans * allocation will fail, but use the maximum possible 2498df0d881dSJason Evans * value and carry on with prof_alloc_prep(), just in 2499df0d881dSJason Evans * case allocation succeeds. 2500df0d881dSJason Evans */ 2501df0d881dSJason Evans usize_max = HUGE_MAXCLASS; 2502df0d881dSJason Evans } 2503df0d881dSJason Evans } 2504536b3538SJason Evans tctx = prof_alloc_prep(tsd, usize_max, prof_active, false); 2505df0d881dSJason Evans 2506d0e79aa3SJason Evans if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { 25071f0a49e8SJason Evans usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize, 25081f0a49e8SJason Evans size, extra, alignment, zero, tctx); 2509f921d10fSJason Evans } else { 25101f0a49e8SJason Evans usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, 25111f0a49e8SJason Evans extra, alignment, zero); 2512f921d10fSJason Evans } 2513536b3538SJason Evans if (usize == old_usize) { 2514d0e79aa3SJason Evans prof_alloc_rollback(tsd, tctx, false); 2515f921d10fSJason Evans return (usize); 2516d0e79aa3SJason Evans } 2517536b3538SJason Evans prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize, 2518536b3538SJason Evans old_tctx); 2519f921d10fSJason Evans 2520f921d10fSJason Evans return (usize); 2521f921d10fSJason Evans } 2522f921d10fSJason Evans 2523d0e79aa3SJason Evans JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2524f921d10fSJason Evans je_xallocx(void *ptr, size_t size, size_t extra, int flags) 2525f921d10fSJason Evans { 2526d0e79aa3SJason Evans tsd_t *tsd; 2527f921d10fSJason Evans size_t usize, old_usize; 2528f921d10fSJason Evans UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 2529d0e79aa3SJason Evans size_t alignment = MALLOCX_ALIGN_GET(flags); 2530f921d10fSJason Evans bool zero = flags & MALLOCX_ZERO; 2531f921d10fSJason Evans 2532f921d10fSJason Evans assert(ptr != NULL); 2533f921d10fSJason Evans assert(size != 0); 2534f921d10fSJason Evans assert(SIZE_T_MAX - size >= extra); 2535d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2536f921d10fSJason Evans malloc_thread_init(); 2537d0e79aa3SJason Evans tsd = tsd_fetch(); 25381f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2539f921d10fSJason Evans 25401f0a49e8SJason Evans old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); 2541536b3538SJason Evans 2542df0d881dSJason Evans /* 2543df0d881dSJason Evans * The API explicitly absolves itself of protecting against (size + 2544df0d881dSJason Evans * extra) numerical overflow, but we may need to clamp extra to avoid 2545df0d881dSJason Evans * exceeding HUGE_MAXCLASS. 2546df0d881dSJason Evans * 2547df0d881dSJason Evans * Ordinarily, size limit checking is handled deeper down, but here we 2548df0d881dSJason Evans * have to check as part of (size + extra) clamping, since we need the 2549df0d881dSJason Evans * clamped value in the above helper functions. 2550df0d881dSJason Evans */ 2551536b3538SJason Evans if (unlikely(size > HUGE_MAXCLASS)) { 2552536b3538SJason Evans usize = old_usize; 2553536b3538SJason Evans goto label_not_resized; 2554536b3538SJason Evans } 2555df0d881dSJason Evans if (unlikely(HUGE_MAXCLASS - size < extra)) 2556536b3538SJason Evans extra = HUGE_MAXCLASS - size; 2557536b3538SJason Evans 2558d0e79aa3SJason Evans if (config_valgrind && unlikely(in_valgrind)) 2559f921d10fSJason Evans old_rzsize = u2rz(old_usize); 2560f921d10fSJason Evans 2561f921d10fSJason Evans if (config_prof && opt_prof) { 2562d0e79aa3SJason Evans usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, 2563d0e79aa3SJason Evans alignment, zero); 2564f921d10fSJason Evans } else { 25651f0a49e8SJason Evans usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, 25661f0a49e8SJason Evans extra, alignment, zero); 2567f921d10fSJason Evans } 2568d0e79aa3SJason Evans if (unlikely(usize == old_usize)) 2569f921d10fSJason Evans goto label_not_resized; 2570f921d10fSJason Evans 2571f921d10fSJason Evans if (config_stats) { 2572d0e79aa3SJason Evans *tsd_thread_allocatedp_get(tsd) += usize; 2573d0e79aa3SJason Evans *tsd_thread_deallocatedp_get(tsd) += old_usize; 2574f921d10fSJason Evans } 25757fa7f12fSJason Evans JEMALLOC_VALGRIND_REALLOC(no, tsd_tsdn(tsd), ptr, usize, no, ptr, 25767fa7f12fSJason Evans old_usize, old_rzsize, no, zero); 2577f921d10fSJason Evans label_not_resized: 2578f921d10fSJason Evans UTRACE(ptr, size, ptr); 25791f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2580f921d10fSJason Evans return (usize); 2581f921d10fSJason Evans } 2582f921d10fSJason Evans 2583d0e79aa3SJason Evans JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2584d0e79aa3SJason Evans JEMALLOC_ATTR(pure) 2585f921d10fSJason Evans je_sallocx(const void *ptr, int flags) 2586f921d10fSJason Evans { 2587f921d10fSJason Evans size_t usize; 25881f0a49e8SJason Evans tsdn_t *tsdn; 2589a4bd5210SJason Evans 2590d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2591f8ca2db1SJason Evans malloc_thread_init(); 2592a4bd5210SJason Evans 25931f0a49e8SJason Evans tsdn = tsdn_fetch(); 25941f0a49e8SJason Evans witness_assert_lockless(tsdn); 2595a4bd5210SJason Evans 25961f0a49e8SJason Evans if (config_ivsalloc) 25971f0a49e8SJason Evans usize = ivsalloc(tsdn, ptr, config_prof); 25981f0a49e8SJason Evans else 25991f0a49e8SJason Evans usize = isalloc(tsdn, ptr, config_prof); 26001f0a49e8SJason Evans 26011f0a49e8SJason Evans witness_assert_lockless(tsdn); 2602f921d10fSJason Evans return (usize); 2603a4bd5210SJason Evans } 2604a4bd5210SJason Evans 2605d0e79aa3SJason Evans JEMALLOC_EXPORT void JEMALLOC_NOTHROW 2606f921d10fSJason Evans je_dallocx(void *ptr, int flags) 2607a4bd5210SJason Evans { 2608d0e79aa3SJason Evans tsd_t *tsd; 2609d0e79aa3SJason Evans tcache_t *tcache; 2610a4bd5210SJason Evans 2611f921d10fSJason Evans assert(ptr != NULL); 2612d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2613f921d10fSJason Evans 2614d0e79aa3SJason Evans tsd = tsd_fetch(); 26151f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2616d0e79aa3SJason Evans if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { 2617d0e79aa3SJason Evans if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) 2618d0e79aa3SJason Evans tcache = NULL; 2619d0e79aa3SJason Evans else 2620d0e79aa3SJason Evans tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2621f921d10fSJason Evans } else 2622d0e79aa3SJason Evans tcache = tcache_get(tsd, false); 2623f921d10fSJason Evans 2624f921d10fSJason Evans UTRACE(ptr, 0, 0); 26251f0a49e8SJason Evans if (likely(!malloc_slow)) 26261f0a49e8SJason Evans ifree(tsd, ptr, tcache, false); 26271f0a49e8SJason Evans else 26281f0a49e8SJason Evans ifree(tsd, ptr, tcache, true); 26291f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2630f921d10fSJason Evans } 2631f921d10fSJason Evans 2632d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C size_t 26331f0a49e8SJason Evans inallocx(tsdn_t *tsdn, size_t size, int flags) 2634f921d10fSJason Evans { 2635f921d10fSJason Evans size_t usize; 2636f921d10fSJason Evans 26371f0a49e8SJason Evans witness_assert_lockless(tsdn); 26381f0a49e8SJason Evans 2639d0e79aa3SJason Evans if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) 2640d0e79aa3SJason Evans usize = s2u(size); 2641d0e79aa3SJason Evans else 2642d0e79aa3SJason Evans usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); 26431f0a49e8SJason Evans witness_assert_lockless(tsdn); 2644f921d10fSJason Evans return (usize); 2645a4bd5210SJason Evans } 2646a4bd5210SJason Evans 2647d0e79aa3SJason Evans JEMALLOC_EXPORT void JEMALLOC_NOTHROW 2648d0e79aa3SJason Evans je_sdallocx(void *ptr, size_t size, int flags) 2649d0e79aa3SJason Evans { 2650d0e79aa3SJason Evans tsd_t *tsd; 2651d0e79aa3SJason Evans tcache_t *tcache; 2652d0e79aa3SJason Evans size_t usize; 2653d0e79aa3SJason Evans 2654d0e79aa3SJason Evans assert(ptr != NULL); 2655d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2656d0e79aa3SJason Evans tsd = tsd_fetch(); 26571f0a49e8SJason Evans usize = inallocx(tsd_tsdn(tsd), size, flags); 26581f0a49e8SJason Evans assert(usize == isalloc(tsd_tsdn(tsd), ptr, config_prof)); 26591f0a49e8SJason Evans 26601f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2661d0e79aa3SJason Evans if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { 2662d0e79aa3SJason Evans if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) 2663d0e79aa3SJason Evans tcache = NULL; 2664d0e79aa3SJason Evans else 2665d0e79aa3SJason Evans tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2666d0e79aa3SJason Evans } else 2667d0e79aa3SJason Evans tcache = tcache_get(tsd, false); 2668d0e79aa3SJason Evans 2669d0e79aa3SJason Evans UTRACE(ptr, 0, 0); 26701f0a49e8SJason Evans if (likely(!malloc_slow)) 26711f0a49e8SJason Evans isfree(tsd, ptr, usize, tcache, false); 26721f0a49e8SJason Evans else 26731f0a49e8SJason Evans isfree(tsd, ptr, usize, tcache, true); 26741f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2675d0e79aa3SJason Evans } 2676d0e79aa3SJason Evans 2677d0e79aa3SJason Evans JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2678d0e79aa3SJason Evans JEMALLOC_ATTR(pure) 2679d0e79aa3SJason Evans je_nallocx(size_t size, int flags) 2680d0e79aa3SJason Evans { 2681df0d881dSJason Evans size_t usize; 26821f0a49e8SJason Evans tsdn_t *tsdn; 2683d0e79aa3SJason Evans 2684d0e79aa3SJason Evans assert(size != 0); 2685d0e79aa3SJason Evans 2686d0e79aa3SJason Evans if (unlikely(malloc_init())) 2687d0e79aa3SJason Evans return (0); 2688d0e79aa3SJason Evans 26891f0a49e8SJason Evans tsdn = tsdn_fetch(); 26901f0a49e8SJason Evans witness_assert_lockless(tsdn); 26911f0a49e8SJason Evans 26921f0a49e8SJason Evans usize = inallocx(tsdn, size, flags); 2693df0d881dSJason Evans if (unlikely(usize > HUGE_MAXCLASS)) 2694df0d881dSJason Evans return (0); 2695df0d881dSJason Evans 26961f0a49e8SJason Evans witness_assert_lockless(tsdn); 2697df0d881dSJason Evans return (usize); 2698d0e79aa3SJason Evans } 2699d0e79aa3SJason Evans 2700d0e79aa3SJason Evans JEMALLOC_EXPORT int JEMALLOC_NOTHROW 2701a4bd5210SJason Evans je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, 2702a4bd5210SJason Evans size_t newlen) 2703a4bd5210SJason Evans { 27041f0a49e8SJason Evans int ret; 27051f0a49e8SJason Evans tsd_t *tsd; 2706a4bd5210SJason Evans 2707d0e79aa3SJason Evans if (unlikely(malloc_init())) 2708a4bd5210SJason Evans return (EAGAIN); 2709a4bd5210SJason Evans 27101f0a49e8SJason Evans tsd = tsd_fetch(); 27111f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 27121f0a49e8SJason Evans ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen); 27131f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 27141f0a49e8SJason Evans return (ret); 2715a4bd5210SJason Evans } 2716a4bd5210SJason Evans 2717d0e79aa3SJason Evans JEMALLOC_EXPORT int JEMALLOC_NOTHROW 2718a4bd5210SJason Evans je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) 2719a4bd5210SJason Evans { 27201f0a49e8SJason Evans int ret; 27211f0a49e8SJason Evans tsdn_t *tsdn; 2722a4bd5210SJason Evans 2723d0e79aa3SJason Evans if (unlikely(malloc_init())) 2724a4bd5210SJason Evans return (EAGAIN); 2725a4bd5210SJason Evans 27261f0a49e8SJason Evans tsdn = tsdn_fetch(); 27271f0a49e8SJason Evans witness_assert_lockless(tsdn); 27281f0a49e8SJason Evans ret = ctl_nametomib(tsdn, name, mibp, miblenp); 27291f0a49e8SJason Evans witness_assert_lockless(tsdn); 27301f0a49e8SJason Evans return (ret); 2731a4bd5210SJason Evans } 2732a4bd5210SJason Evans 2733d0e79aa3SJason Evans JEMALLOC_EXPORT int JEMALLOC_NOTHROW 2734a4bd5210SJason Evans je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, 2735a4bd5210SJason Evans void *newp, size_t newlen) 2736a4bd5210SJason Evans { 27371f0a49e8SJason Evans int ret; 27381f0a49e8SJason Evans tsd_t *tsd; 2739a4bd5210SJason Evans 2740d0e79aa3SJason Evans if (unlikely(malloc_init())) 2741a4bd5210SJason Evans return (EAGAIN); 2742a4bd5210SJason Evans 27431f0a49e8SJason Evans tsd = tsd_fetch(); 27441f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 27451f0a49e8SJason Evans ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen); 27461f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 27471f0a49e8SJason Evans return (ret); 2748a4bd5210SJason Evans } 2749a4bd5210SJason Evans 2750d0e79aa3SJason Evans JEMALLOC_EXPORT void JEMALLOC_NOTHROW 2751f921d10fSJason Evans je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, 2752f921d10fSJason Evans const char *opts) 2753f921d10fSJason Evans { 27541f0a49e8SJason Evans tsdn_t *tsdn; 2755f921d10fSJason Evans 27561f0a49e8SJason Evans tsdn = tsdn_fetch(); 27571f0a49e8SJason Evans witness_assert_lockless(tsdn); 2758f921d10fSJason Evans stats_print(write_cb, cbopaque, opts); 27591f0a49e8SJason Evans witness_assert_lockless(tsdn); 2760f921d10fSJason Evans } 2761f921d10fSJason Evans 2762d0e79aa3SJason Evans JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2763f921d10fSJason Evans je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) 2764f921d10fSJason Evans { 2765f921d10fSJason Evans size_t ret; 27661f0a49e8SJason Evans tsdn_t *tsdn; 2767f921d10fSJason Evans 2768d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2769f921d10fSJason Evans malloc_thread_init(); 2770f921d10fSJason Evans 27711f0a49e8SJason Evans tsdn = tsdn_fetch(); 27721f0a49e8SJason Evans witness_assert_lockless(tsdn); 2773f921d10fSJason Evans 27741f0a49e8SJason Evans if (config_ivsalloc) 27751f0a49e8SJason Evans ret = ivsalloc(tsdn, ptr, config_prof); 27761f0a49e8SJason Evans else 27771f0a49e8SJason Evans ret = (ptr == NULL) ? 0 : isalloc(tsdn, ptr, config_prof); 27781f0a49e8SJason Evans 27791f0a49e8SJason Evans witness_assert_lockless(tsdn); 2780f921d10fSJason Evans return (ret); 2781f921d10fSJason Evans } 2782f921d10fSJason Evans 2783a4bd5210SJason Evans /* 2784a4bd5210SJason Evans * End non-standard functions. 2785a4bd5210SJason Evans */ 2786a4bd5210SJason Evans /******************************************************************************/ 2787a4bd5210SJason Evans /* 2788d0e79aa3SJason Evans * Begin compatibility functions. 2789a4bd5210SJason Evans */ 2790d0e79aa3SJason Evans 2791d0e79aa3SJason Evans #define ALLOCM_LG_ALIGN(la) (la) 2792d0e79aa3SJason Evans #define ALLOCM_ALIGN(a) (ffsl(a)-1) 2793d0e79aa3SJason Evans #define ALLOCM_ZERO ((int)0x40) 2794d0e79aa3SJason Evans #define ALLOCM_NO_MOVE ((int)0x80) 2795d0e79aa3SJason Evans 2796d0e79aa3SJason Evans #define ALLOCM_SUCCESS 0 2797d0e79aa3SJason Evans #define ALLOCM_ERR_OOM 1 2798d0e79aa3SJason Evans #define ALLOCM_ERR_NOT_MOVED 2 2799a4bd5210SJason Evans 2800a4bd5210SJason Evans int 2801a4bd5210SJason Evans je_allocm(void **ptr, size_t *rsize, size_t size, int flags) 2802a4bd5210SJason Evans { 2803a4bd5210SJason Evans void *p; 2804a4bd5210SJason Evans 2805a4bd5210SJason Evans assert(ptr != NULL); 2806a4bd5210SJason Evans 2807f921d10fSJason Evans p = je_mallocx(size, flags); 2808a4bd5210SJason Evans if (p == NULL) 2809a4bd5210SJason Evans return (ALLOCM_ERR_OOM); 2810f921d10fSJason Evans if (rsize != NULL) 28111f0a49e8SJason Evans *rsize = isalloc(tsdn_fetch(), p, config_prof); 2812f921d10fSJason Evans *ptr = p; 2813f921d10fSJason Evans return (ALLOCM_SUCCESS); 2814a4bd5210SJason Evans } 2815a4bd5210SJason Evans 2816a4bd5210SJason Evans int 2817a4bd5210SJason Evans je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) 2818a4bd5210SJason Evans { 2819f921d10fSJason Evans int ret; 2820a4bd5210SJason Evans bool no_move = flags & ALLOCM_NO_MOVE; 2821a4bd5210SJason Evans 2822a4bd5210SJason Evans assert(ptr != NULL); 2823a4bd5210SJason Evans assert(*ptr != NULL); 2824a4bd5210SJason Evans assert(size != 0); 2825a4bd5210SJason Evans assert(SIZE_T_MAX - size >= extra); 2826a4bd5210SJason Evans 2827f921d10fSJason Evans if (no_move) { 2828f921d10fSJason Evans size_t usize = je_xallocx(*ptr, size, extra, flags); 2829f921d10fSJason Evans ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED; 2830a4bd5210SJason Evans if (rsize != NULL) 2831a4bd5210SJason Evans *rsize = usize; 2832a4bd5210SJason Evans } else { 2833f921d10fSJason Evans void *p = je_rallocx(*ptr, size+extra, flags); 2834f921d10fSJason Evans if (p != NULL) { 2835f921d10fSJason Evans *ptr = p; 2836f921d10fSJason Evans ret = ALLOCM_SUCCESS; 2837f921d10fSJason Evans } else 2838f921d10fSJason Evans ret = ALLOCM_ERR_OOM; 2839f921d10fSJason Evans if (rsize != NULL) 28401f0a49e8SJason Evans *rsize = isalloc(tsdn_fetch(), *ptr, config_prof); 2841a4bd5210SJason Evans } 2842f921d10fSJason Evans return (ret); 2843a4bd5210SJason Evans } 2844a4bd5210SJason Evans 2845a4bd5210SJason Evans int 2846a4bd5210SJason Evans je_sallocm(const void *ptr, size_t *rsize, int flags) 2847a4bd5210SJason Evans { 2848a4bd5210SJason Evans 2849a4bd5210SJason Evans assert(rsize != NULL); 2850f921d10fSJason Evans *rsize = je_sallocx(ptr, flags); 2851a4bd5210SJason Evans return (ALLOCM_SUCCESS); 2852a4bd5210SJason Evans } 2853a4bd5210SJason Evans 2854a4bd5210SJason Evans int 2855a4bd5210SJason Evans je_dallocm(void *ptr, int flags) 2856a4bd5210SJason Evans { 2857a4bd5210SJason Evans 2858f921d10fSJason Evans je_dallocx(ptr, flags); 2859a4bd5210SJason Evans return (ALLOCM_SUCCESS); 2860a4bd5210SJason Evans } 2861a4bd5210SJason Evans 2862a4bd5210SJason Evans int 2863a4bd5210SJason Evans je_nallocm(size_t *rsize, size_t size, int flags) 2864a4bd5210SJason Evans { 2865a4bd5210SJason Evans size_t usize; 2866a4bd5210SJason Evans 2867f921d10fSJason Evans usize = je_nallocx(size, flags); 2868a4bd5210SJason Evans if (usize == 0) 2869a4bd5210SJason Evans return (ALLOCM_ERR_OOM); 2870a4bd5210SJason Evans if (rsize != NULL) 2871a4bd5210SJason Evans *rsize = usize; 2872a4bd5210SJason Evans return (ALLOCM_SUCCESS); 2873a4bd5210SJason Evans } 2874a4bd5210SJason Evans 2875d0e79aa3SJason Evans #undef ALLOCM_LG_ALIGN 2876d0e79aa3SJason Evans #undef ALLOCM_ALIGN 2877d0e79aa3SJason Evans #undef ALLOCM_ZERO 2878d0e79aa3SJason Evans #undef ALLOCM_NO_MOVE 2879d0e79aa3SJason Evans 2880d0e79aa3SJason Evans #undef ALLOCM_SUCCESS 2881d0e79aa3SJason Evans #undef ALLOCM_ERR_OOM 2882d0e79aa3SJason Evans #undef ALLOCM_ERR_NOT_MOVED 2883d0e79aa3SJason Evans 2884a4bd5210SJason Evans /* 2885d0e79aa3SJason Evans * End compatibility functions. 2886a4bd5210SJason Evans */ 2887a4bd5210SJason Evans /******************************************************************************/ 2888a4bd5210SJason Evans /* 2889a4bd5210SJason Evans * The following functions are used by threading libraries for protection of 2890a4bd5210SJason Evans * malloc during fork(). 2891a4bd5210SJason Evans */ 2892a4bd5210SJason Evans 289382872ac0SJason Evans /* 289482872ac0SJason Evans * If an application creates a thread before doing any allocation in the main 289582872ac0SJason Evans * thread, then calls fork(2) in the main thread followed by memory allocation 289682872ac0SJason Evans * in the child process, a race can occur that results in deadlock within the 289782872ac0SJason Evans * child: the main thread may have forked while the created thread had 289882872ac0SJason Evans * partially initialized the allocator. Ordinarily jemalloc prevents 289982872ac0SJason Evans * fork/malloc races via the following functions it registers during 290082872ac0SJason Evans * initialization using pthread_atfork(), but of course that does no good if 290182872ac0SJason Evans * the allocator isn't fully initialized at fork time. The following library 2902d0e79aa3SJason Evans * constructor is a partial solution to this problem. It may still be possible 2903d0e79aa3SJason Evans * to trigger the deadlock described above, but doing so would involve forking 2904d0e79aa3SJason Evans * via a library constructor that runs before jemalloc's runs. 290582872ac0SJason Evans */ 29061f0a49e8SJason Evans #ifndef JEMALLOC_JET 290782872ac0SJason Evans JEMALLOC_ATTR(constructor) 290882872ac0SJason Evans static void 290982872ac0SJason Evans jemalloc_constructor(void) 291082872ac0SJason Evans { 291182872ac0SJason Evans 291282872ac0SJason Evans malloc_init(); 291382872ac0SJason Evans } 29141f0a49e8SJason Evans #endif 291582872ac0SJason Evans 2916a4bd5210SJason Evans #ifndef JEMALLOC_MUTEX_INIT_CB 2917a4bd5210SJason Evans void 2918a4bd5210SJason Evans jemalloc_prefork(void) 2919a4bd5210SJason Evans #else 2920e722f8f8SJason Evans JEMALLOC_EXPORT void 2921a4bd5210SJason Evans _malloc_prefork(void) 2922a4bd5210SJason Evans #endif 2923a4bd5210SJason Evans { 29241f0a49e8SJason Evans tsd_t *tsd; 29251f0a49e8SJason Evans unsigned i, j, narenas; 29261f0a49e8SJason Evans arena_t *arena; 2927a4bd5210SJason Evans 292835dad073SJason Evans #ifdef JEMALLOC_MUTEX_INIT_CB 2929d0e79aa3SJason Evans if (!malloc_initialized()) 293035dad073SJason Evans return; 293135dad073SJason Evans #endif 2932d0e79aa3SJason Evans assert(malloc_initialized()); 293335dad073SJason Evans 29341f0a49e8SJason Evans tsd = tsd_fetch(); 2935df0d881dSJason Evans 29361f0a49e8SJason Evans narenas = narenas_total_get(); 29371f0a49e8SJason Evans 29381f0a49e8SJason Evans witness_prefork(tsd); 29391f0a49e8SJason Evans /* Acquire all mutexes in a safe order. */ 29401f0a49e8SJason Evans ctl_prefork(tsd_tsdn(tsd)); 2941*8244f2aaSJason Evans tcache_prefork(tsd_tsdn(tsd)); 29421f0a49e8SJason Evans malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock); 29431f0a49e8SJason Evans prof_prefork0(tsd_tsdn(tsd)); 29441f0a49e8SJason Evans for (i = 0; i < 3; i++) { 29451f0a49e8SJason Evans for (j = 0; j < narenas; j++) { 29461f0a49e8SJason Evans if ((arena = arena_get(tsd_tsdn(tsd), j, false)) != 29471f0a49e8SJason Evans NULL) { 29481f0a49e8SJason Evans switch (i) { 29491f0a49e8SJason Evans case 0: 29501f0a49e8SJason Evans arena_prefork0(tsd_tsdn(tsd), arena); 29511f0a49e8SJason Evans break; 29521f0a49e8SJason Evans case 1: 29531f0a49e8SJason Evans arena_prefork1(tsd_tsdn(tsd), arena); 29541f0a49e8SJason Evans break; 29551f0a49e8SJason Evans case 2: 29561f0a49e8SJason Evans arena_prefork2(tsd_tsdn(tsd), arena); 29571f0a49e8SJason Evans break; 29581f0a49e8SJason Evans default: not_reached(); 2959a4bd5210SJason Evans } 29601f0a49e8SJason Evans } 29611f0a49e8SJason Evans } 29621f0a49e8SJason Evans } 29631f0a49e8SJason Evans base_prefork(tsd_tsdn(tsd)); 29641f0a49e8SJason Evans for (i = 0; i < narenas; i++) { 29651f0a49e8SJason Evans if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) 29661f0a49e8SJason Evans arena_prefork3(tsd_tsdn(tsd), arena); 29671f0a49e8SJason Evans } 29681f0a49e8SJason Evans prof_prefork1(tsd_tsdn(tsd)); 2969a4bd5210SJason Evans } 2970a4bd5210SJason Evans 2971a4bd5210SJason Evans #ifndef JEMALLOC_MUTEX_INIT_CB 2972a4bd5210SJason Evans void 2973a4bd5210SJason Evans jemalloc_postfork_parent(void) 2974a4bd5210SJason Evans #else 2975e722f8f8SJason Evans JEMALLOC_EXPORT void 2976a4bd5210SJason Evans _malloc_postfork(void) 2977a4bd5210SJason Evans #endif 2978a4bd5210SJason Evans { 29791f0a49e8SJason Evans tsd_t *tsd; 2980df0d881dSJason Evans unsigned i, narenas; 2981a4bd5210SJason Evans 298235dad073SJason Evans #ifdef JEMALLOC_MUTEX_INIT_CB 2983d0e79aa3SJason Evans if (!malloc_initialized()) 298435dad073SJason Evans return; 298535dad073SJason Evans #endif 2986d0e79aa3SJason Evans assert(malloc_initialized()); 298735dad073SJason Evans 29881f0a49e8SJason Evans tsd = tsd_fetch(); 29891f0a49e8SJason Evans 29901f0a49e8SJason Evans witness_postfork_parent(tsd); 2991a4bd5210SJason Evans /* Release all mutexes, now that fork() has completed. */ 29921f0a49e8SJason Evans base_postfork_parent(tsd_tsdn(tsd)); 2993df0d881dSJason Evans for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 2994df0d881dSJason Evans arena_t *arena; 2995df0d881dSJason Evans 29961f0a49e8SJason Evans if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) 29971f0a49e8SJason Evans arena_postfork_parent(tsd_tsdn(tsd), arena); 2998a4bd5210SJason Evans } 29991f0a49e8SJason Evans prof_postfork_parent(tsd_tsdn(tsd)); 30001f0a49e8SJason Evans malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock); 3001*8244f2aaSJason Evans tcache_postfork_parent(tsd_tsdn(tsd)); 30021f0a49e8SJason Evans ctl_postfork_parent(tsd_tsdn(tsd)); 3003a4bd5210SJason Evans } 3004a4bd5210SJason Evans 3005a4bd5210SJason Evans void 3006a4bd5210SJason Evans jemalloc_postfork_child(void) 3007a4bd5210SJason Evans { 30081f0a49e8SJason Evans tsd_t *tsd; 3009df0d881dSJason Evans unsigned i, narenas; 3010a4bd5210SJason Evans 3011d0e79aa3SJason Evans assert(malloc_initialized()); 301235dad073SJason Evans 30131f0a49e8SJason Evans tsd = tsd_fetch(); 30141f0a49e8SJason Evans 30151f0a49e8SJason Evans witness_postfork_child(tsd); 3016a4bd5210SJason Evans /* Release all mutexes, now that fork() has completed. */ 30171f0a49e8SJason Evans base_postfork_child(tsd_tsdn(tsd)); 3018df0d881dSJason Evans for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 3019df0d881dSJason Evans arena_t *arena; 3020df0d881dSJason Evans 30211f0a49e8SJason Evans if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) 30221f0a49e8SJason Evans arena_postfork_child(tsd_tsdn(tsd), arena); 3023a4bd5210SJason Evans } 30241f0a49e8SJason Evans prof_postfork_child(tsd_tsdn(tsd)); 30251f0a49e8SJason Evans malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock); 3026*8244f2aaSJason Evans tcache_postfork_child(tsd_tsdn(tsd)); 30271f0a49e8SJason Evans ctl_postfork_child(tsd_tsdn(tsd)); 3028a4bd5210SJason Evans } 3029a4bd5210SJason Evans 30308495e8b1SKonstantin Belousov void 30318495e8b1SKonstantin Belousov _malloc_first_thread(void) 30328495e8b1SKonstantin Belousov { 30338495e8b1SKonstantin Belousov 30348495e8b1SKonstantin Belousov (void)malloc_mutex_first_thread(); 30358495e8b1SKonstantin Belousov } 30368495e8b1SKonstantin Belousov 3037a4bd5210SJason Evans /******************************************************************************/ 3038