1a4bd5210SJason Evans #define JEMALLOC_C_ 2a4bd5210SJason Evans #include "jemalloc/internal/jemalloc_internal.h" 3a4bd5210SJason Evans 4a4bd5210SJason Evans /******************************************************************************/ 5a4bd5210SJason Evans /* Data. */ 6a4bd5210SJason Evans 74fdb8d2aSDimitry Andric /* Work around <http://llvm.org/bugs/show_bug.cgi?id=12623>: */ 84fdb8d2aSDimitry Andric const char *__malloc_options_1_0 = NULL; 9a4bd5210SJason Evans __sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0); 10a4bd5210SJason Evans 11a4bd5210SJason Evans /* Runtime configuration options. */ 12d0e79aa3SJason Evans const char *je_malloc_conf JEMALLOC_ATTR(weak); 1388ad2f8dSJason Evans bool opt_abort = 14a4bd5210SJason Evans #ifdef JEMALLOC_DEBUG 1588ad2f8dSJason Evans true 16a4bd5210SJason Evans #else 1788ad2f8dSJason Evans false 18a4bd5210SJason Evans #endif 1988ad2f8dSJason Evans ; 20d0e79aa3SJason Evans const char *opt_junk = 21d0e79aa3SJason Evans #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 22d0e79aa3SJason Evans "true" 23d0e79aa3SJason Evans #else 24d0e79aa3SJason Evans "false" 25d0e79aa3SJason Evans #endif 26d0e79aa3SJason Evans ; 27d0e79aa3SJason Evans bool opt_junk_alloc = 2888ad2f8dSJason Evans #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 2988ad2f8dSJason Evans true 30a4bd5210SJason Evans #else 3188ad2f8dSJason Evans false 32a4bd5210SJason Evans #endif 3388ad2f8dSJason Evans ; 34d0e79aa3SJason Evans bool opt_junk_free = 35d0e79aa3SJason Evans #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 36d0e79aa3SJason Evans true 37d0e79aa3SJason Evans #else 38d0e79aa3SJason Evans false 39d0e79aa3SJason Evans #endif 40d0e79aa3SJason Evans ; 41d0e79aa3SJason Evans 42a4bd5210SJason Evans size_t opt_quarantine = ZU(0); 43a4bd5210SJason Evans bool opt_redzone = false; 44a4bd5210SJason Evans bool opt_utrace = false; 45a4bd5210SJason Evans bool opt_xmalloc = false; 46a4bd5210SJason Evans bool opt_zero = false; 47df0d881dSJason Evans unsigned opt_narenas = 0; 48a4bd5210SJason Evans 49d0e79aa3SJason Evans /* Initialized to true if the process is running inside Valgrind. */ 50d0e79aa3SJason Evans bool in_valgrind; 51d0e79aa3SJason Evans 52a4bd5210SJason Evans unsigned ncpus; 53a4bd5210SJason Evans 54df0d881dSJason Evans /* Protects arenas initialization. */ 55d0e79aa3SJason Evans static malloc_mutex_t arenas_lock; 56d0e79aa3SJason Evans /* 57d0e79aa3SJason Evans * Arenas that are used to service external requests. Not all elements of the 58d0e79aa3SJason Evans * arenas array are necessarily used; arenas are created lazily as needed. 59d0e79aa3SJason Evans * 60d0e79aa3SJason Evans * arenas[0..narenas_auto) are used for automatic multiplexing of threads and 61d0e79aa3SJason Evans * arenas. arenas[narenas_auto..narenas_total) are only used if the application 62d0e79aa3SJason Evans * takes some action to create them and allocate from them. 63d0e79aa3SJason Evans */ 64df0d881dSJason Evans arena_t **arenas; 65df0d881dSJason Evans static unsigned narenas_total; /* Use narenas_total_*(). */ 66d0e79aa3SJason Evans static arena_t *a0; /* arenas[0]; read-only after initialization. */ 671f0a49e8SJason Evans unsigned narenas_auto; /* Read-only after initialization. */ 68a4bd5210SJason Evans 69d0e79aa3SJason Evans typedef enum { 70d0e79aa3SJason Evans malloc_init_uninitialized = 3, 71d0e79aa3SJason Evans malloc_init_a0_initialized = 2, 72d0e79aa3SJason Evans malloc_init_recursible = 1, 73d0e79aa3SJason Evans malloc_init_initialized = 0 /* Common case --> jnz. */ 74d0e79aa3SJason Evans } malloc_init_t; 75d0e79aa3SJason Evans static malloc_init_t malloc_init_state = malloc_init_uninitialized; 76d0e79aa3SJason Evans 771f0a49e8SJason Evans /* False should be the common case. Set to true to trigger initialization. */ 78df0d881dSJason Evans static bool malloc_slow = true; 79df0d881dSJason Evans 801f0a49e8SJason Evans /* When malloc_slow is true, set the corresponding bits for sanity check. */ 81df0d881dSJason Evans enum { 82df0d881dSJason Evans flag_opt_junk_alloc = (1U), 83df0d881dSJason Evans flag_opt_junk_free = (1U << 1), 84df0d881dSJason Evans flag_opt_quarantine = (1U << 2), 85df0d881dSJason Evans flag_opt_zero = (1U << 3), 86df0d881dSJason Evans flag_opt_utrace = (1U << 4), 87df0d881dSJason Evans flag_in_valgrind = (1U << 5), 88df0d881dSJason Evans flag_opt_xmalloc = (1U << 6) 89df0d881dSJason Evans }; 90df0d881dSJason Evans static uint8_t malloc_slow_flags; 91df0d881dSJason Evans 92df0d881dSJason Evans /* Last entry for overflow detection only. */ 93d0e79aa3SJason Evans JEMALLOC_ALIGNED(CACHELINE) 94df0d881dSJason Evans const size_t index2size_tab[NSIZES+1] = { 95d0e79aa3SJason Evans #define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ 96d0e79aa3SJason Evans ((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)), 97d0e79aa3SJason Evans SIZE_CLASSES 98d0e79aa3SJason Evans #undef SC 99df0d881dSJason Evans ZU(0) 100d0e79aa3SJason Evans }; 101d0e79aa3SJason Evans 102d0e79aa3SJason Evans JEMALLOC_ALIGNED(CACHELINE) 103d0e79aa3SJason Evans const uint8_t size2index_tab[] = { 104d0e79aa3SJason Evans #if LG_TINY_MIN == 0 105d0e79aa3SJason Evans #warning "Dangerous LG_TINY_MIN" 106d0e79aa3SJason Evans #define S2B_0(i) i, 107d0e79aa3SJason Evans #elif LG_TINY_MIN == 1 108d0e79aa3SJason Evans #warning "Dangerous LG_TINY_MIN" 109d0e79aa3SJason Evans #define S2B_1(i) i, 110d0e79aa3SJason Evans #elif LG_TINY_MIN == 2 111d0e79aa3SJason Evans #warning "Dangerous LG_TINY_MIN" 112d0e79aa3SJason Evans #define S2B_2(i) i, 113d0e79aa3SJason Evans #elif LG_TINY_MIN == 3 114d0e79aa3SJason Evans #define S2B_3(i) i, 115d0e79aa3SJason Evans #elif LG_TINY_MIN == 4 116d0e79aa3SJason Evans #define S2B_4(i) i, 117d0e79aa3SJason Evans #elif LG_TINY_MIN == 5 118d0e79aa3SJason Evans #define S2B_5(i) i, 119d0e79aa3SJason Evans #elif LG_TINY_MIN == 6 120d0e79aa3SJason Evans #define S2B_6(i) i, 121d0e79aa3SJason Evans #elif LG_TINY_MIN == 7 122d0e79aa3SJason Evans #define S2B_7(i) i, 123d0e79aa3SJason Evans #elif LG_TINY_MIN == 8 124d0e79aa3SJason Evans #define S2B_8(i) i, 125d0e79aa3SJason Evans #elif LG_TINY_MIN == 9 126d0e79aa3SJason Evans #define S2B_9(i) i, 127d0e79aa3SJason Evans #elif LG_TINY_MIN == 10 128d0e79aa3SJason Evans #define S2B_10(i) i, 129d0e79aa3SJason Evans #elif LG_TINY_MIN == 11 130d0e79aa3SJason Evans #define S2B_11(i) i, 131d0e79aa3SJason Evans #else 132d0e79aa3SJason Evans #error "Unsupported LG_TINY_MIN" 133d0e79aa3SJason Evans #endif 134d0e79aa3SJason Evans #if LG_TINY_MIN < 1 135d0e79aa3SJason Evans #define S2B_1(i) S2B_0(i) S2B_0(i) 136d0e79aa3SJason Evans #endif 137d0e79aa3SJason Evans #if LG_TINY_MIN < 2 138d0e79aa3SJason Evans #define S2B_2(i) S2B_1(i) S2B_1(i) 139d0e79aa3SJason Evans #endif 140d0e79aa3SJason Evans #if LG_TINY_MIN < 3 141d0e79aa3SJason Evans #define S2B_3(i) S2B_2(i) S2B_2(i) 142d0e79aa3SJason Evans #endif 143d0e79aa3SJason Evans #if LG_TINY_MIN < 4 144d0e79aa3SJason Evans #define S2B_4(i) S2B_3(i) S2B_3(i) 145d0e79aa3SJason Evans #endif 146d0e79aa3SJason Evans #if LG_TINY_MIN < 5 147d0e79aa3SJason Evans #define S2B_5(i) S2B_4(i) S2B_4(i) 148d0e79aa3SJason Evans #endif 149d0e79aa3SJason Evans #if LG_TINY_MIN < 6 150d0e79aa3SJason Evans #define S2B_6(i) S2B_5(i) S2B_5(i) 151d0e79aa3SJason Evans #endif 152d0e79aa3SJason Evans #if LG_TINY_MIN < 7 153d0e79aa3SJason Evans #define S2B_7(i) S2B_6(i) S2B_6(i) 154d0e79aa3SJason Evans #endif 155d0e79aa3SJason Evans #if LG_TINY_MIN < 8 156d0e79aa3SJason Evans #define S2B_8(i) S2B_7(i) S2B_7(i) 157d0e79aa3SJason Evans #endif 158d0e79aa3SJason Evans #if LG_TINY_MIN < 9 159d0e79aa3SJason Evans #define S2B_9(i) S2B_8(i) S2B_8(i) 160d0e79aa3SJason Evans #endif 161d0e79aa3SJason Evans #if LG_TINY_MIN < 10 162d0e79aa3SJason Evans #define S2B_10(i) S2B_9(i) S2B_9(i) 163d0e79aa3SJason Evans #endif 164d0e79aa3SJason Evans #if LG_TINY_MIN < 11 165d0e79aa3SJason Evans #define S2B_11(i) S2B_10(i) S2B_10(i) 166d0e79aa3SJason Evans #endif 167d0e79aa3SJason Evans #define S2B_no(i) 168d0e79aa3SJason Evans #define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ 169d0e79aa3SJason Evans S2B_##lg_delta_lookup(index) 170d0e79aa3SJason Evans SIZE_CLASSES 171d0e79aa3SJason Evans #undef S2B_3 172d0e79aa3SJason Evans #undef S2B_4 173d0e79aa3SJason Evans #undef S2B_5 174d0e79aa3SJason Evans #undef S2B_6 175d0e79aa3SJason Evans #undef S2B_7 176d0e79aa3SJason Evans #undef S2B_8 177d0e79aa3SJason Evans #undef S2B_9 178d0e79aa3SJason Evans #undef S2B_10 179d0e79aa3SJason Evans #undef S2B_11 180d0e79aa3SJason Evans #undef S2B_no 181d0e79aa3SJason Evans #undef SC 182d0e79aa3SJason Evans }; 183a4bd5210SJason Evans 184a4bd5210SJason Evans #ifdef JEMALLOC_THREADED_INIT 185a4bd5210SJason Evans /* Used to let the initializing thread recursively allocate. */ 186a4bd5210SJason Evans # define NO_INITIALIZER ((unsigned long)0) 187a4bd5210SJason Evans # define INITIALIZER pthread_self() 188a4bd5210SJason Evans # define IS_INITIALIZER (malloc_initializer == pthread_self()) 189a4bd5210SJason Evans static pthread_t malloc_initializer = NO_INITIALIZER; 190a4bd5210SJason Evans #else 191a4bd5210SJason Evans # define NO_INITIALIZER false 192a4bd5210SJason Evans # define INITIALIZER true 193a4bd5210SJason Evans # define IS_INITIALIZER malloc_initializer 194a4bd5210SJason Evans static bool malloc_initializer = NO_INITIALIZER; 195a4bd5210SJason Evans #endif 196a4bd5210SJason Evans 197a4bd5210SJason Evans /* Used to avoid initialization races. */ 198e722f8f8SJason Evans #ifdef _WIN32 199d0e79aa3SJason Evans #if _WIN32_WINNT >= 0x0600 200d0e79aa3SJason Evans static malloc_mutex_t init_lock = SRWLOCK_INIT; 201d0e79aa3SJason Evans #else 202e722f8f8SJason Evans static malloc_mutex_t init_lock; 203536b3538SJason Evans static bool init_lock_initialized = false; 204e722f8f8SJason Evans 205e722f8f8SJason Evans JEMALLOC_ATTR(constructor) 206e722f8f8SJason Evans static void WINAPI 207e722f8f8SJason Evans _init_init_lock(void) 208e722f8f8SJason Evans { 209e722f8f8SJason Evans 210536b3538SJason Evans /* If another constructor in the same binary is using mallctl to 211536b3538SJason Evans * e.g. setup chunk hooks, it may end up running before this one, 212536b3538SJason Evans * and malloc_init_hard will crash trying to lock the uninitialized 213536b3538SJason Evans * lock. So we force an initialization of the lock in 214536b3538SJason Evans * malloc_init_hard as well. We don't try to care about atomicity 215536b3538SJason Evans * of the accessed to the init_lock_initialized boolean, since it 216536b3538SJason Evans * really only matters early in the process creation, before any 217536b3538SJason Evans * separate thread normally starts doing anything. */ 218536b3538SJason Evans if (!init_lock_initialized) 2191f0a49e8SJason Evans malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT); 220536b3538SJason Evans init_lock_initialized = true; 221e722f8f8SJason Evans } 222e722f8f8SJason Evans 223e722f8f8SJason Evans #ifdef _MSC_VER 224e722f8f8SJason Evans # pragma section(".CRT$XCU", read) 225e722f8f8SJason Evans JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) 226e722f8f8SJason Evans static const void (WINAPI *init_init_lock)(void) = _init_init_lock; 227e722f8f8SJason Evans #endif 228d0e79aa3SJason Evans #endif 229e722f8f8SJason Evans #else 230a4bd5210SJason Evans static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; 231e722f8f8SJason Evans #endif 232a4bd5210SJason Evans 233a4bd5210SJason Evans typedef struct { 234a4bd5210SJason Evans void *p; /* Input pointer (as in realloc(p, s)). */ 235a4bd5210SJason Evans size_t s; /* Request size. */ 236a4bd5210SJason Evans void *r; /* Result pointer. */ 237a4bd5210SJason Evans } malloc_utrace_t; 238a4bd5210SJason Evans 239a4bd5210SJason Evans #ifdef JEMALLOC_UTRACE 240a4bd5210SJason Evans # define UTRACE(a, b, c) do { \ 241d0e79aa3SJason Evans if (unlikely(opt_utrace)) { \ 24288ad2f8dSJason Evans int utrace_serrno = errno; \ 243a4bd5210SJason Evans malloc_utrace_t ut; \ 244a4bd5210SJason Evans ut.p = (a); \ 245a4bd5210SJason Evans ut.s = (b); \ 246a4bd5210SJason Evans ut.r = (c); \ 247a4bd5210SJason Evans utrace(&ut, sizeof(ut)); \ 24888ad2f8dSJason Evans errno = utrace_serrno; \ 249a4bd5210SJason Evans } \ 250a4bd5210SJason Evans } while (0) 251a4bd5210SJason Evans #else 252a4bd5210SJason Evans # define UTRACE(a, b, c) 253a4bd5210SJason Evans #endif 254a4bd5210SJason Evans 255a4bd5210SJason Evans /******************************************************************************/ 256f921d10fSJason Evans /* 257f921d10fSJason Evans * Function prototypes for static functions that are referenced prior to 258f921d10fSJason Evans * definition. 259f921d10fSJason Evans */ 260a4bd5210SJason Evans 261d0e79aa3SJason Evans static bool malloc_init_hard_a0(void); 262a4bd5210SJason Evans static bool malloc_init_hard(void); 263a4bd5210SJason Evans 264a4bd5210SJason Evans /******************************************************************************/ 265a4bd5210SJason Evans /* 266a4bd5210SJason Evans * Begin miscellaneous support functions. 267a4bd5210SJason Evans */ 268a4bd5210SJason Evans 269d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C bool 270d0e79aa3SJason Evans malloc_initialized(void) 271a4bd5210SJason Evans { 272a4bd5210SJason Evans 273d0e79aa3SJason Evans return (malloc_init_state == malloc_init_initialized); 274a4bd5210SJason Evans } 275d0e79aa3SJason Evans 276d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C void 277d0e79aa3SJason Evans malloc_thread_init(void) 278d0e79aa3SJason Evans { 279a4bd5210SJason Evans 280a4bd5210SJason Evans /* 281d0e79aa3SJason Evans * TSD initialization can't be safely done as a side effect of 282d0e79aa3SJason Evans * deallocation, because it is possible for a thread to do nothing but 283d0e79aa3SJason Evans * deallocate its TLS data via free(), in which case writing to TLS 284d0e79aa3SJason Evans * would cause write-after-free memory corruption. The quarantine 285d0e79aa3SJason Evans * facility *only* gets used as a side effect of deallocation, so make 286d0e79aa3SJason Evans * a best effort attempt at initializing its TSD by hooking all 287d0e79aa3SJason Evans * allocation events. 288a4bd5210SJason Evans */ 289d0e79aa3SJason Evans if (config_fill && unlikely(opt_quarantine)) 290d0e79aa3SJason Evans quarantine_alloc_hook(); 291a4bd5210SJason Evans } 292a4bd5210SJason Evans 293d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C bool 294d0e79aa3SJason Evans malloc_init_a0(void) 295d0e79aa3SJason Evans { 296d0e79aa3SJason Evans 297d0e79aa3SJason Evans if (unlikely(malloc_init_state == malloc_init_uninitialized)) 298d0e79aa3SJason Evans return (malloc_init_hard_a0()); 299d0e79aa3SJason Evans return (false); 300d0e79aa3SJason Evans } 301d0e79aa3SJason Evans 302d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C bool 303d0e79aa3SJason Evans malloc_init(void) 304d0e79aa3SJason Evans { 305d0e79aa3SJason Evans 306d0e79aa3SJason Evans if (unlikely(!malloc_initialized()) && malloc_init_hard()) 307d0e79aa3SJason Evans return (true); 308d0e79aa3SJason Evans malloc_thread_init(); 309d0e79aa3SJason Evans 310d0e79aa3SJason Evans return (false); 311d0e79aa3SJason Evans } 312d0e79aa3SJason Evans 313d0e79aa3SJason Evans /* 3141f0a49e8SJason Evans * The a0*() functions are used instead of i{d,}alloc() in situations that 315d0e79aa3SJason Evans * cannot tolerate TLS variable access. 316d0e79aa3SJason Evans */ 317d0e79aa3SJason Evans 318d0e79aa3SJason Evans static void * 319d0e79aa3SJason Evans a0ialloc(size_t size, bool zero, bool is_metadata) 320d0e79aa3SJason Evans { 321d0e79aa3SJason Evans 322d0e79aa3SJason Evans if (unlikely(malloc_init_a0())) 323d0e79aa3SJason Evans return (NULL); 324d0e79aa3SJason Evans 3251f0a49e8SJason Evans return (iallocztm(TSDN_NULL, size, size2index(size), zero, NULL, 3261f0a49e8SJason Evans is_metadata, arena_get(TSDN_NULL, 0, true), true)); 327d0e79aa3SJason Evans } 328d0e79aa3SJason Evans 329d0e79aa3SJason Evans static void 330d0e79aa3SJason Evans a0idalloc(void *ptr, bool is_metadata) 331d0e79aa3SJason Evans { 332d0e79aa3SJason Evans 3331f0a49e8SJason Evans idalloctm(TSDN_NULL, ptr, false, is_metadata, true); 334d0e79aa3SJason Evans } 335d0e79aa3SJason Evans 336d0e79aa3SJason Evans void * 337d0e79aa3SJason Evans a0malloc(size_t size) 338d0e79aa3SJason Evans { 339d0e79aa3SJason Evans 340d0e79aa3SJason Evans return (a0ialloc(size, false, true)); 341d0e79aa3SJason Evans } 342d0e79aa3SJason Evans 343d0e79aa3SJason Evans void 344d0e79aa3SJason Evans a0dalloc(void *ptr) 345d0e79aa3SJason Evans { 346d0e79aa3SJason Evans 347d0e79aa3SJason Evans a0idalloc(ptr, true); 348d0e79aa3SJason Evans } 349d0e79aa3SJason Evans 350d0e79aa3SJason Evans /* 351d0e79aa3SJason Evans * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive 352d0e79aa3SJason Evans * situations that cannot tolerate TLS variable access (TLS allocation and very 353d0e79aa3SJason Evans * early internal data structure initialization). 354d0e79aa3SJason Evans */ 355d0e79aa3SJason Evans 356d0e79aa3SJason Evans void * 357d0e79aa3SJason Evans bootstrap_malloc(size_t size) 358d0e79aa3SJason Evans { 359d0e79aa3SJason Evans 360d0e79aa3SJason Evans if (unlikely(size == 0)) 361d0e79aa3SJason Evans size = 1; 362d0e79aa3SJason Evans 363d0e79aa3SJason Evans return (a0ialloc(size, false, false)); 364d0e79aa3SJason Evans } 365d0e79aa3SJason Evans 366d0e79aa3SJason Evans void * 367d0e79aa3SJason Evans bootstrap_calloc(size_t num, size_t size) 368d0e79aa3SJason Evans { 369d0e79aa3SJason Evans size_t num_size; 370d0e79aa3SJason Evans 371d0e79aa3SJason Evans num_size = num * size; 372d0e79aa3SJason Evans if (unlikely(num_size == 0)) { 373d0e79aa3SJason Evans assert(num == 0 || size == 0); 374d0e79aa3SJason Evans num_size = 1; 375d0e79aa3SJason Evans } 376d0e79aa3SJason Evans 377d0e79aa3SJason Evans return (a0ialloc(num_size, true, false)); 378d0e79aa3SJason Evans } 379d0e79aa3SJason Evans 380d0e79aa3SJason Evans void 381d0e79aa3SJason Evans bootstrap_free(void *ptr) 382d0e79aa3SJason Evans { 383d0e79aa3SJason Evans 384d0e79aa3SJason Evans if (unlikely(ptr == NULL)) 385d0e79aa3SJason Evans return; 386d0e79aa3SJason Evans 387d0e79aa3SJason Evans a0idalloc(ptr, false); 388d0e79aa3SJason Evans } 389d0e79aa3SJason Evans 390df0d881dSJason Evans static void 391df0d881dSJason Evans arena_set(unsigned ind, arena_t *arena) 392df0d881dSJason Evans { 393df0d881dSJason Evans 394df0d881dSJason Evans atomic_write_p((void **)&arenas[ind], arena); 395df0d881dSJason Evans } 396df0d881dSJason Evans 397df0d881dSJason Evans static void 398df0d881dSJason Evans narenas_total_set(unsigned narenas) 399df0d881dSJason Evans { 400df0d881dSJason Evans 401df0d881dSJason Evans atomic_write_u(&narenas_total, narenas); 402df0d881dSJason Evans } 403df0d881dSJason Evans 404df0d881dSJason Evans static void 405df0d881dSJason Evans narenas_total_inc(void) 406df0d881dSJason Evans { 407df0d881dSJason Evans 408df0d881dSJason Evans atomic_add_u(&narenas_total, 1); 409df0d881dSJason Evans } 410df0d881dSJason Evans 411df0d881dSJason Evans unsigned 412df0d881dSJason Evans narenas_total_get(void) 413df0d881dSJason Evans { 414df0d881dSJason Evans 415df0d881dSJason Evans return (atomic_read_u(&narenas_total)); 416df0d881dSJason Evans } 417df0d881dSJason Evans 418d0e79aa3SJason Evans /* Create a new arena and insert it into the arenas array at index ind. */ 419d0e79aa3SJason Evans static arena_t * 4201f0a49e8SJason Evans arena_init_locked(tsdn_t *tsdn, unsigned ind) 421d0e79aa3SJason Evans { 422d0e79aa3SJason Evans arena_t *arena; 423d0e79aa3SJason Evans 424df0d881dSJason Evans assert(ind <= narenas_total_get()); 425d0e79aa3SJason Evans if (ind > MALLOCX_ARENA_MAX) 426d0e79aa3SJason Evans return (NULL); 427df0d881dSJason Evans if (ind == narenas_total_get()) 428df0d881dSJason Evans narenas_total_inc(); 429d0e79aa3SJason Evans 430d0e79aa3SJason Evans /* 431d0e79aa3SJason Evans * Another thread may have already initialized arenas[ind] if it's an 432d0e79aa3SJason Evans * auto arena. 433d0e79aa3SJason Evans */ 4341f0a49e8SJason Evans arena = arena_get(tsdn, ind, false); 435d0e79aa3SJason Evans if (arena != NULL) { 436d0e79aa3SJason Evans assert(ind < narenas_auto); 437d0e79aa3SJason Evans return (arena); 438d0e79aa3SJason Evans } 439d0e79aa3SJason Evans 440d0e79aa3SJason Evans /* Actually initialize the arena. */ 4411f0a49e8SJason Evans arena = arena_new(tsdn, ind); 442df0d881dSJason Evans arena_set(ind, arena); 443d0e79aa3SJason Evans return (arena); 444d0e79aa3SJason Evans } 445d0e79aa3SJason Evans 446d0e79aa3SJason Evans arena_t * 4471f0a49e8SJason Evans arena_init(tsdn_t *tsdn, unsigned ind) 448d0e79aa3SJason Evans { 449d0e79aa3SJason Evans arena_t *arena; 450d0e79aa3SJason Evans 4511f0a49e8SJason Evans malloc_mutex_lock(tsdn, &arenas_lock); 4521f0a49e8SJason Evans arena = arena_init_locked(tsdn, ind); 4531f0a49e8SJason Evans malloc_mutex_unlock(tsdn, &arenas_lock); 454d0e79aa3SJason Evans return (arena); 455d0e79aa3SJason Evans } 456d0e79aa3SJason Evans 457d0e79aa3SJason Evans static void 4581f0a49e8SJason Evans arena_bind(tsd_t *tsd, unsigned ind, bool internal) 459d0e79aa3SJason Evans { 460df0d881dSJason Evans arena_t *arena; 461d0e79aa3SJason Evans 4621f0a49e8SJason Evans arena = arena_get(tsd_tsdn(tsd), ind, false); 4631f0a49e8SJason Evans arena_nthreads_inc(arena, internal); 464df0d881dSJason Evans 4651f0a49e8SJason Evans if (tsd_nominal(tsd)) { 4661f0a49e8SJason Evans if (internal) 4671f0a49e8SJason Evans tsd_iarena_set(tsd, arena); 4681f0a49e8SJason Evans else 469df0d881dSJason Evans tsd_arena_set(tsd, arena); 470d0e79aa3SJason Evans } 4711f0a49e8SJason Evans } 472d0e79aa3SJason Evans 473d0e79aa3SJason Evans void 474d0e79aa3SJason Evans arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) 475d0e79aa3SJason Evans { 476d0e79aa3SJason Evans arena_t *oldarena, *newarena; 477d0e79aa3SJason Evans 4781f0a49e8SJason Evans oldarena = arena_get(tsd_tsdn(tsd), oldind, false); 4791f0a49e8SJason Evans newarena = arena_get(tsd_tsdn(tsd), newind, false); 4801f0a49e8SJason Evans arena_nthreads_dec(oldarena, false); 4811f0a49e8SJason Evans arena_nthreads_inc(newarena, false); 482d0e79aa3SJason Evans tsd_arena_set(tsd, newarena); 483d0e79aa3SJason Evans } 484d0e79aa3SJason Evans 485d0e79aa3SJason Evans static void 4861f0a49e8SJason Evans arena_unbind(tsd_t *tsd, unsigned ind, bool internal) 487d0e79aa3SJason Evans { 488d0e79aa3SJason Evans arena_t *arena; 489d0e79aa3SJason Evans 4901f0a49e8SJason Evans arena = arena_get(tsd_tsdn(tsd), ind, false); 4911f0a49e8SJason Evans arena_nthreads_dec(arena, internal); 4921f0a49e8SJason Evans if (internal) 4931f0a49e8SJason Evans tsd_iarena_set(tsd, NULL); 4941f0a49e8SJason Evans else 495d0e79aa3SJason Evans tsd_arena_set(tsd, NULL); 496d0e79aa3SJason Evans } 497d0e79aa3SJason Evans 498df0d881dSJason Evans arena_tdata_t * 499df0d881dSJason Evans arena_tdata_get_hard(tsd_t *tsd, unsigned ind) 500d0e79aa3SJason Evans { 501df0d881dSJason Evans arena_tdata_t *tdata, *arenas_tdata_old; 502df0d881dSJason Evans arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); 503df0d881dSJason Evans unsigned narenas_tdata_old, i; 504df0d881dSJason Evans unsigned narenas_tdata = tsd_narenas_tdata_get(tsd); 505d0e79aa3SJason Evans unsigned narenas_actual = narenas_total_get(); 506d0e79aa3SJason Evans 507d0e79aa3SJason Evans /* 508df0d881dSJason Evans * Dissociate old tdata array (and set up for deallocation upon return) 509df0d881dSJason Evans * if it's too small. 510d0e79aa3SJason Evans */ 511df0d881dSJason Evans if (arenas_tdata != NULL && narenas_tdata < narenas_actual) { 512df0d881dSJason Evans arenas_tdata_old = arenas_tdata; 513df0d881dSJason Evans narenas_tdata_old = narenas_tdata; 514df0d881dSJason Evans arenas_tdata = NULL; 515df0d881dSJason Evans narenas_tdata = 0; 516df0d881dSJason Evans tsd_arenas_tdata_set(tsd, arenas_tdata); 517df0d881dSJason Evans tsd_narenas_tdata_set(tsd, narenas_tdata); 518df0d881dSJason Evans } else { 519df0d881dSJason Evans arenas_tdata_old = NULL; 520df0d881dSJason Evans narenas_tdata_old = 0; 521d0e79aa3SJason Evans } 522df0d881dSJason Evans 523df0d881dSJason Evans /* Allocate tdata array if it's missing. */ 524df0d881dSJason Evans if (arenas_tdata == NULL) { 525df0d881dSJason Evans bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd); 526df0d881dSJason Evans narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1; 527df0d881dSJason Evans 528df0d881dSJason Evans if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) { 529df0d881dSJason Evans *arenas_tdata_bypassp = true; 530df0d881dSJason Evans arenas_tdata = (arena_tdata_t *)a0malloc( 531df0d881dSJason Evans sizeof(arena_tdata_t) * narenas_tdata); 532df0d881dSJason Evans *arenas_tdata_bypassp = false; 533df0d881dSJason Evans } 534df0d881dSJason Evans if (arenas_tdata == NULL) { 535df0d881dSJason Evans tdata = NULL; 536df0d881dSJason Evans goto label_return; 537df0d881dSJason Evans } 538df0d881dSJason Evans assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp); 539df0d881dSJason Evans tsd_arenas_tdata_set(tsd, arenas_tdata); 540df0d881dSJason Evans tsd_narenas_tdata_set(tsd, narenas_tdata); 541d0e79aa3SJason Evans } 542d0e79aa3SJason Evans 543d0e79aa3SJason Evans /* 544df0d881dSJason Evans * Copy to tdata array. It's possible that the actual number of arenas 545df0d881dSJason Evans * has increased since narenas_total_get() was called above, but that 546df0d881dSJason Evans * causes no correctness issues unless two threads concurrently execute 547df0d881dSJason Evans * the arenas.extend mallctl, which we trust mallctl synchronization to 548d0e79aa3SJason Evans * prevent. 549d0e79aa3SJason Evans */ 550df0d881dSJason Evans 551df0d881dSJason Evans /* Copy/initialize tickers. */ 552df0d881dSJason Evans for (i = 0; i < narenas_actual; i++) { 553df0d881dSJason Evans if (i < narenas_tdata_old) { 554df0d881dSJason Evans ticker_copy(&arenas_tdata[i].decay_ticker, 555df0d881dSJason Evans &arenas_tdata_old[i].decay_ticker); 556df0d881dSJason Evans } else { 557df0d881dSJason Evans ticker_init(&arenas_tdata[i].decay_ticker, 558df0d881dSJason Evans DECAY_NTICKS_PER_UPDATE); 559df0d881dSJason Evans } 560df0d881dSJason Evans } 561df0d881dSJason Evans if (narenas_tdata > narenas_actual) { 562df0d881dSJason Evans memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t) 563df0d881dSJason Evans * (narenas_tdata - narenas_actual)); 564d0e79aa3SJason Evans } 565d0e79aa3SJason Evans 566df0d881dSJason Evans /* Read the refreshed tdata array. */ 567df0d881dSJason Evans tdata = &arenas_tdata[ind]; 568df0d881dSJason Evans label_return: 569df0d881dSJason Evans if (arenas_tdata_old != NULL) 570df0d881dSJason Evans a0dalloc(arenas_tdata_old); 571df0d881dSJason Evans return (tdata); 572d0e79aa3SJason Evans } 573d0e79aa3SJason Evans 574d0e79aa3SJason Evans /* Slow path, called only by arena_choose(). */ 575d0e79aa3SJason Evans arena_t * 5761f0a49e8SJason Evans arena_choose_hard(tsd_t *tsd, bool internal) 577a4bd5210SJason Evans { 5781f0a49e8SJason Evans arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL); 579a4bd5210SJason Evans 58082872ac0SJason Evans if (narenas_auto > 1) { 5811f0a49e8SJason Evans unsigned i, j, choose[2], first_null; 582a4bd5210SJason Evans 5831f0a49e8SJason Evans /* 5841f0a49e8SJason Evans * Determine binding for both non-internal and internal 5851f0a49e8SJason Evans * allocation. 5861f0a49e8SJason Evans * 5871f0a49e8SJason Evans * choose[0]: For application allocation. 5881f0a49e8SJason Evans * choose[1]: For internal metadata allocation. 5891f0a49e8SJason Evans */ 5901f0a49e8SJason Evans 5911f0a49e8SJason Evans for (j = 0; j < 2; j++) 5921f0a49e8SJason Evans choose[j] = 0; 5931f0a49e8SJason Evans 59482872ac0SJason Evans first_null = narenas_auto; 5951f0a49e8SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock); 5961f0a49e8SJason Evans assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL); 59782872ac0SJason Evans for (i = 1; i < narenas_auto; i++) { 5981f0a49e8SJason Evans if (arena_get(tsd_tsdn(tsd), i, false) != NULL) { 599a4bd5210SJason Evans /* 600a4bd5210SJason Evans * Choose the first arena that has the lowest 601a4bd5210SJason Evans * number of threads assigned to it. 602a4bd5210SJason Evans */ 6031f0a49e8SJason Evans for (j = 0; j < 2; j++) { 6041f0a49e8SJason Evans if (arena_nthreads_get(arena_get( 6051f0a49e8SJason Evans tsd_tsdn(tsd), i, false), !!j) < 6061f0a49e8SJason Evans arena_nthreads_get(arena_get( 6071f0a49e8SJason Evans tsd_tsdn(tsd), choose[j], false), 6081f0a49e8SJason Evans !!j)) 6091f0a49e8SJason Evans choose[j] = i; 6101f0a49e8SJason Evans } 61182872ac0SJason Evans } else if (first_null == narenas_auto) { 612a4bd5210SJason Evans /* 613a4bd5210SJason Evans * Record the index of the first uninitialized 614a4bd5210SJason Evans * arena, in case all extant arenas are in use. 615a4bd5210SJason Evans * 616a4bd5210SJason Evans * NB: It is possible for there to be 617a4bd5210SJason Evans * discontinuities in terms of initialized 618a4bd5210SJason Evans * versus uninitialized arenas, due to the 619a4bd5210SJason Evans * "thread.arena" mallctl. 620a4bd5210SJason Evans */ 621a4bd5210SJason Evans first_null = i; 622a4bd5210SJason Evans } 623a4bd5210SJason Evans } 624a4bd5210SJason Evans 6251f0a49e8SJason Evans for (j = 0; j < 2; j++) { 6261f0a49e8SJason Evans if (arena_nthreads_get(arena_get(tsd_tsdn(tsd), 6271f0a49e8SJason Evans choose[j], false), !!j) == 0 || first_null == 6281f0a49e8SJason Evans narenas_auto) { 629a4bd5210SJason Evans /* 6301f0a49e8SJason Evans * Use an unloaded arena, or the least loaded 6311f0a49e8SJason Evans * arena if all arenas are already initialized. 632a4bd5210SJason Evans */ 6331f0a49e8SJason Evans if (!!j == internal) { 6341f0a49e8SJason Evans ret = arena_get(tsd_tsdn(tsd), 6351f0a49e8SJason Evans choose[j], false); 6361f0a49e8SJason Evans } 637a4bd5210SJason Evans } else { 6381f0a49e8SJason Evans arena_t *arena; 6391f0a49e8SJason Evans 640a4bd5210SJason Evans /* Initialize a new arena. */ 6411f0a49e8SJason Evans choose[j] = first_null; 6421f0a49e8SJason Evans arena = arena_init_locked(tsd_tsdn(tsd), 6431f0a49e8SJason Evans choose[j]); 6441f0a49e8SJason Evans if (arena == NULL) { 6451f0a49e8SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), 6461f0a49e8SJason Evans &arenas_lock); 647d0e79aa3SJason Evans return (NULL); 648a4bd5210SJason Evans } 6491f0a49e8SJason Evans if (!!j == internal) 6501f0a49e8SJason Evans ret = arena; 651d0e79aa3SJason Evans } 6521f0a49e8SJason Evans arena_bind(tsd, choose[j], !!j); 6531f0a49e8SJason Evans } 6541f0a49e8SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock); 655a4bd5210SJason Evans } else { 6561f0a49e8SJason Evans ret = arena_get(tsd_tsdn(tsd), 0, false); 6571f0a49e8SJason Evans arena_bind(tsd, 0, false); 6581f0a49e8SJason Evans arena_bind(tsd, 0, true); 659a4bd5210SJason Evans } 660a4bd5210SJason Evans 661a4bd5210SJason Evans return (ret); 662a4bd5210SJason Evans } 663a4bd5210SJason Evans 664d0e79aa3SJason Evans void 665d0e79aa3SJason Evans thread_allocated_cleanup(tsd_t *tsd) 666d0e79aa3SJason Evans { 667d0e79aa3SJason Evans 668d0e79aa3SJason Evans /* Do nothing. */ 669d0e79aa3SJason Evans } 670d0e79aa3SJason Evans 671d0e79aa3SJason Evans void 672d0e79aa3SJason Evans thread_deallocated_cleanup(tsd_t *tsd) 673d0e79aa3SJason Evans { 674d0e79aa3SJason Evans 675d0e79aa3SJason Evans /* Do nothing. */ 676d0e79aa3SJason Evans } 677d0e79aa3SJason Evans 678d0e79aa3SJason Evans void 6791f0a49e8SJason Evans iarena_cleanup(tsd_t *tsd) 6801f0a49e8SJason Evans { 6811f0a49e8SJason Evans arena_t *iarena; 6821f0a49e8SJason Evans 6831f0a49e8SJason Evans iarena = tsd_iarena_get(tsd); 6841f0a49e8SJason Evans if (iarena != NULL) 6851f0a49e8SJason Evans arena_unbind(tsd, iarena->ind, true); 6861f0a49e8SJason Evans } 6871f0a49e8SJason Evans 6881f0a49e8SJason Evans void 689d0e79aa3SJason Evans arena_cleanup(tsd_t *tsd) 690d0e79aa3SJason Evans { 691d0e79aa3SJason Evans arena_t *arena; 692d0e79aa3SJason Evans 693d0e79aa3SJason Evans arena = tsd_arena_get(tsd); 694d0e79aa3SJason Evans if (arena != NULL) 6951f0a49e8SJason Evans arena_unbind(tsd, arena->ind, false); 696d0e79aa3SJason Evans } 697d0e79aa3SJason Evans 698d0e79aa3SJason Evans void 699df0d881dSJason Evans arenas_tdata_cleanup(tsd_t *tsd) 700d0e79aa3SJason Evans { 701df0d881dSJason Evans arena_tdata_t *arenas_tdata; 702d0e79aa3SJason Evans 703df0d881dSJason Evans /* Prevent tsd->arenas_tdata from being (re)created. */ 704df0d881dSJason Evans *tsd_arenas_tdata_bypassp_get(tsd) = true; 705df0d881dSJason Evans 706df0d881dSJason Evans arenas_tdata = tsd_arenas_tdata_get(tsd); 707df0d881dSJason Evans if (arenas_tdata != NULL) { 708df0d881dSJason Evans tsd_arenas_tdata_set(tsd, NULL); 709df0d881dSJason Evans a0dalloc(arenas_tdata); 710d0e79aa3SJason Evans } 711536b3538SJason Evans } 712d0e79aa3SJason Evans 713d0e79aa3SJason Evans void 714df0d881dSJason Evans narenas_tdata_cleanup(tsd_t *tsd) 715d0e79aa3SJason Evans { 716d0e79aa3SJason Evans 717d0e79aa3SJason Evans /* Do nothing. */ 718d0e79aa3SJason Evans } 719d0e79aa3SJason Evans 720d0e79aa3SJason Evans void 721df0d881dSJason Evans arenas_tdata_bypass_cleanup(tsd_t *tsd) 722d0e79aa3SJason Evans { 723d0e79aa3SJason Evans 724d0e79aa3SJason Evans /* Do nothing. */ 725d0e79aa3SJason Evans } 726d0e79aa3SJason Evans 727a4bd5210SJason Evans static void 728a4bd5210SJason Evans stats_print_atexit(void) 729a4bd5210SJason Evans { 730a4bd5210SJason Evans 731a4bd5210SJason Evans if (config_tcache && config_stats) { 7321f0a49e8SJason Evans tsdn_t *tsdn; 73382872ac0SJason Evans unsigned narenas, i; 734a4bd5210SJason Evans 7351f0a49e8SJason Evans tsdn = tsdn_fetch(); 7361f0a49e8SJason Evans 737a4bd5210SJason Evans /* 738a4bd5210SJason Evans * Merge stats from extant threads. This is racy, since 739a4bd5210SJason Evans * individual threads do not lock when recording tcache stats 740a4bd5210SJason Evans * events. As a consequence, the final stats may be slightly 741a4bd5210SJason Evans * out of date by the time they are reported, if other threads 742a4bd5210SJason Evans * continue to allocate. 743a4bd5210SJason Evans */ 74482872ac0SJason Evans for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 7451f0a49e8SJason Evans arena_t *arena = arena_get(tsdn, i, false); 746a4bd5210SJason Evans if (arena != NULL) { 747a4bd5210SJason Evans tcache_t *tcache; 748a4bd5210SJason Evans 749a4bd5210SJason Evans /* 750a4bd5210SJason Evans * tcache_stats_merge() locks bins, so if any 751a4bd5210SJason Evans * code is introduced that acquires both arena 752a4bd5210SJason Evans * and bin locks in the opposite order, 753a4bd5210SJason Evans * deadlocks may result. 754a4bd5210SJason Evans */ 7551f0a49e8SJason Evans malloc_mutex_lock(tsdn, &arena->lock); 756a4bd5210SJason Evans ql_foreach(tcache, &arena->tcache_ql, link) { 7571f0a49e8SJason Evans tcache_stats_merge(tsdn, tcache, arena); 758a4bd5210SJason Evans } 7591f0a49e8SJason Evans malloc_mutex_unlock(tsdn, &arena->lock); 760a4bd5210SJason Evans } 761a4bd5210SJason Evans } 762a4bd5210SJason Evans } 763a4bd5210SJason Evans je_malloc_stats_print(NULL, NULL, NULL); 764a4bd5210SJason Evans } 765a4bd5210SJason Evans 766a4bd5210SJason Evans /* 767a4bd5210SJason Evans * End miscellaneous support functions. 768a4bd5210SJason Evans */ 769a4bd5210SJason Evans /******************************************************************************/ 770a4bd5210SJason Evans /* 771a4bd5210SJason Evans * Begin initialization functions. 772a4bd5210SJason Evans */ 773a4bd5210SJason Evans 774d0e79aa3SJason Evans #ifndef JEMALLOC_HAVE_SECURE_GETENV 775d0e79aa3SJason Evans static char * 776d0e79aa3SJason Evans secure_getenv(const char *name) 777d0e79aa3SJason Evans { 778d0e79aa3SJason Evans 779d0e79aa3SJason Evans # ifdef JEMALLOC_HAVE_ISSETUGID 780d0e79aa3SJason Evans if (issetugid() != 0) 781d0e79aa3SJason Evans return (NULL); 782d0e79aa3SJason Evans # endif 783d0e79aa3SJason Evans return (getenv(name)); 784d0e79aa3SJason Evans } 785d0e79aa3SJason Evans #endif 786d0e79aa3SJason Evans 787a4bd5210SJason Evans static unsigned 788a4bd5210SJason Evans malloc_ncpus(void) 789a4bd5210SJason Evans { 790a4bd5210SJason Evans long result; 791a4bd5210SJason Evans 792e722f8f8SJason Evans #ifdef _WIN32 793e722f8f8SJason Evans SYSTEM_INFO si; 794e722f8f8SJason Evans GetSystemInfo(&si); 795e722f8f8SJason Evans result = si.dwNumberOfProcessors; 796e722f8f8SJason Evans #else 797a4bd5210SJason Evans result = sysconf(_SC_NPROCESSORS_ONLN); 79882872ac0SJason Evans #endif 799f921d10fSJason Evans return ((result == -1) ? 1 : (unsigned)result); 800a4bd5210SJason Evans } 801a4bd5210SJason Evans 802a4bd5210SJason Evans static bool 803a4bd5210SJason Evans malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, 804a4bd5210SJason Evans char const **v_p, size_t *vlen_p) 805a4bd5210SJason Evans { 806a4bd5210SJason Evans bool accept; 807a4bd5210SJason Evans const char *opts = *opts_p; 808a4bd5210SJason Evans 809a4bd5210SJason Evans *k_p = opts; 810a4bd5210SJason Evans 811d0e79aa3SJason Evans for (accept = false; !accept;) { 812a4bd5210SJason Evans switch (*opts) { 813a4bd5210SJason Evans case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': 814a4bd5210SJason Evans case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': 815a4bd5210SJason Evans case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': 816a4bd5210SJason Evans case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': 817a4bd5210SJason Evans case 'Y': case 'Z': 818a4bd5210SJason Evans case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': 819a4bd5210SJason Evans case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': 820a4bd5210SJason Evans case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': 821a4bd5210SJason Evans case 's': case 't': case 'u': case 'v': case 'w': case 'x': 822a4bd5210SJason Evans case 'y': case 'z': 823a4bd5210SJason Evans case '0': case '1': case '2': case '3': case '4': case '5': 824a4bd5210SJason Evans case '6': case '7': case '8': case '9': 825a4bd5210SJason Evans case '_': 826a4bd5210SJason Evans opts++; 827a4bd5210SJason Evans break; 828a4bd5210SJason Evans case ':': 829a4bd5210SJason Evans opts++; 830a4bd5210SJason Evans *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; 831a4bd5210SJason Evans *v_p = opts; 832a4bd5210SJason Evans accept = true; 833a4bd5210SJason Evans break; 834a4bd5210SJason Evans case '\0': 835a4bd5210SJason Evans if (opts != *opts_p) { 836a4bd5210SJason Evans malloc_write("<jemalloc>: Conf string ends " 837a4bd5210SJason Evans "with key\n"); 838a4bd5210SJason Evans } 839a4bd5210SJason Evans return (true); 840a4bd5210SJason Evans default: 841a4bd5210SJason Evans malloc_write("<jemalloc>: Malformed conf string\n"); 842a4bd5210SJason Evans return (true); 843a4bd5210SJason Evans } 844a4bd5210SJason Evans } 845a4bd5210SJason Evans 846d0e79aa3SJason Evans for (accept = false; !accept;) { 847a4bd5210SJason Evans switch (*opts) { 848a4bd5210SJason Evans case ',': 849a4bd5210SJason Evans opts++; 850a4bd5210SJason Evans /* 851a4bd5210SJason Evans * Look ahead one character here, because the next time 852a4bd5210SJason Evans * this function is called, it will assume that end of 853a4bd5210SJason Evans * input has been cleanly reached if no input remains, 854a4bd5210SJason Evans * but we have optimistically already consumed the 855a4bd5210SJason Evans * comma if one exists. 856a4bd5210SJason Evans */ 857a4bd5210SJason Evans if (*opts == '\0') { 858a4bd5210SJason Evans malloc_write("<jemalloc>: Conf string ends " 859a4bd5210SJason Evans "with comma\n"); 860a4bd5210SJason Evans } 861a4bd5210SJason Evans *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; 862a4bd5210SJason Evans accept = true; 863a4bd5210SJason Evans break; 864a4bd5210SJason Evans case '\0': 865a4bd5210SJason Evans *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; 866a4bd5210SJason Evans accept = true; 867a4bd5210SJason Evans break; 868a4bd5210SJason Evans default: 869a4bd5210SJason Evans opts++; 870a4bd5210SJason Evans break; 871a4bd5210SJason Evans } 872a4bd5210SJason Evans } 873a4bd5210SJason Evans 874a4bd5210SJason Evans *opts_p = opts; 875a4bd5210SJason Evans return (false); 876a4bd5210SJason Evans } 877a4bd5210SJason Evans 878a4bd5210SJason Evans static void 879a4bd5210SJason Evans malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, 880a4bd5210SJason Evans size_t vlen) 881a4bd5210SJason Evans { 882a4bd5210SJason Evans 883a4bd5210SJason Evans malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k, 884a4bd5210SJason Evans (int)vlen, v); 885a4bd5210SJason Evans } 886a4bd5210SJason Evans 887a4bd5210SJason Evans static void 888df0d881dSJason Evans malloc_slow_flag_init(void) 889df0d881dSJason Evans { 890df0d881dSJason Evans /* 891df0d881dSJason Evans * Combine the runtime options into malloc_slow for fast path. Called 892df0d881dSJason Evans * after processing all the options. 893df0d881dSJason Evans */ 894df0d881dSJason Evans malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0) 895df0d881dSJason Evans | (opt_junk_free ? flag_opt_junk_free : 0) 896df0d881dSJason Evans | (opt_quarantine ? flag_opt_quarantine : 0) 897df0d881dSJason Evans | (opt_zero ? flag_opt_zero : 0) 898df0d881dSJason Evans | (opt_utrace ? flag_opt_utrace : 0) 899df0d881dSJason Evans | (opt_xmalloc ? flag_opt_xmalloc : 0); 900df0d881dSJason Evans 901df0d881dSJason Evans if (config_valgrind) 902df0d881dSJason Evans malloc_slow_flags |= (in_valgrind ? flag_in_valgrind : 0); 903df0d881dSJason Evans 904df0d881dSJason Evans malloc_slow = (malloc_slow_flags != 0); 905df0d881dSJason Evans } 906df0d881dSJason Evans 907df0d881dSJason Evans static void 908a4bd5210SJason Evans malloc_conf_init(void) 909a4bd5210SJason Evans { 910a4bd5210SJason Evans unsigned i; 911a4bd5210SJason Evans char buf[PATH_MAX + 1]; 912a4bd5210SJason Evans const char *opts, *k, *v; 913a4bd5210SJason Evans size_t klen, vlen; 914a4bd5210SJason Evans 91582872ac0SJason Evans /* 91682872ac0SJason Evans * Automatically configure valgrind before processing options. The 91782872ac0SJason Evans * valgrind option remains in jemalloc 3.x for compatibility reasons. 91882872ac0SJason Evans */ 91982872ac0SJason Evans if (config_valgrind) { 920d0e79aa3SJason Evans in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false; 921d0e79aa3SJason Evans if (config_fill && unlikely(in_valgrind)) { 922d0e79aa3SJason Evans opt_junk = "false"; 923d0e79aa3SJason Evans opt_junk_alloc = false; 924d0e79aa3SJason Evans opt_junk_free = false; 925d0e79aa3SJason Evans assert(!opt_zero); 92682872ac0SJason Evans opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT; 92782872ac0SJason Evans opt_redzone = true; 92882872ac0SJason Evans } 929d0e79aa3SJason Evans if (config_tcache && unlikely(in_valgrind)) 93082872ac0SJason Evans opt_tcache = false; 93182872ac0SJason Evans } 93282872ac0SJason Evans 933df0d881dSJason Evans for (i = 0; i < 4; i++) { 934a4bd5210SJason Evans /* Get runtime configuration. */ 935a4bd5210SJason Evans switch (i) { 936a4bd5210SJason Evans case 0: 937df0d881dSJason Evans opts = config_malloc_conf; 938df0d881dSJason Evans break; 939df0d881dSJason Evans case 1: 940a4bd5210SJason Evans if (je_malloc_conf != NULL) { 941a4bd5210SJason Evans /* 942a4bd5210SJason Evans * Use options that were compiled into the 943a4bd5210SJason Evans * program. 944a4bd5210SJason Evans */ 945a4bd5210SJason Evans opts = je_malloc_conf; 946a4bd5210SJason Evans } else { 947a4bd5210SJason Evans /* No configuration specified. */ 948a4bd5210SJason Evans buf[0] = '\0'; 949a4bd5210SJason Evans opts = buf; 950a4bd5210SJason Evans } 951a4bd5210SJason Evans break; 952df0d881dSJason Evans case 2: { 953df0d881dSJason Evans ssize_t linklen = 0; 954e722f8f8SJason Evans #ifndef _WIN32 9552b06b201SJason Evans int saved_errno = errno; 956a4bd5210SJason Evans const char *linkname = 957a4bd5210SJason Evans # ifdef JEMALLOC_PREFIX 958a4bd5210SJason Evans "/etc/"JEMALLOC_PREFIX"malloc.conf" 959a4bd5210SJason Evans # else 960a4bd5210SJason Evans "/etc/malloc.conf" 961a4bd5210SJason Evans # endif 962a4bd5210SJason Evans ; 963a4bd5210SJason Evans 964a4bd5210SJason Evans /* 9652b06b201SJason Evans * Try to use the contents of the "/etc/malloc.conf" 966a4bd5210SJason Evans * symbolic link's name. 967a4bd5210SJason Evans */ 9682b06b201SJason Evans linklen = readlink(linkname, buf, sizeof(buf) - 1); 9692b06b201SJason Evans if (linklen == -1) { 9702b06b201SJason Evans /* No configuration specified. */ 9712b06b201SJason Evans linklen = 0; 972d0e79aa3SJason Evans /* Restore errno. */ 9732b06b201SJason Evans set_errno(saved_errno); 9742b06b201SJason Evans } 9752b06b201SJason Evans #endif 976a4bd5210SJason Evans buf[linklen] = '\0'; 977a4bd5210SJason Evans opts = buf; 978a4bd5210SJason Evans break; 979df0d881dSJason Evans } case 3: { 980a4bd5210SJason Evans const char *envname = 981a4bd5210SJason Evans #ifdef JEMALLOC_PREFIX 982a4bd5210SJason Evans JEMALLOC_CPREFIX"MALLOC_CONF" 983a4bd5210SJason Evans #else 984a4bd5210SJason Evans "MALLOC_CONF" 985a4bd5210SJason Evans #endif 986a4bd5210SJason Evans ; 987a4bd5210SJason Evans 988d0e79aa3SJason Evans if ((opts = secure_getenv(envname)) != NULL) { 989a4bd5210SJason Evans /* 990a4bd5210SJason Evans * Do nothing; opts is already initialized to 991a4bd5210SJason Evans * the value of the MALLOC_CONF environment 992a4bd5210SJason Evans * variable. 993a4bd5210SJason Evans */ 994a4bd5210SJason Evans } else { 995a4bd5210SJason Evans /* No configuration specified. */ 996a4bd5210SJason Evans buf[0] = '\0'; 997a4bd5210SJason Evans opts = buf; 998a4bd5210SJason Evans } 999a4bd5210SJason Evans break; 1000a4bd5210SJason Evans } default: 1001f921d10fSJason Evans not_reached(); 1002a4bd5210SJason Evans buf[0] = '\0'; 1003a4bd5210SJason Evans opts = buf; 1004a4bd5210SJason Evans } 1005a4bd5210SJason Evans 1006d0e79aa3SJason Evans while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v, 1007d0e79aa3SJason Evans &vlen)) { 1008d0e79aa3SJason Evans #define CONF_MATCH(n) \ 1009d0e79aa3SJason Evans (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0) 1010d0e79aa3SJason Evans #define CONF_MATCH_VALUE(n) \ 1011d0e79aa3SJason Evans (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0) 1012d0e79aa3SJason Evans #define CONF_HANDLE_BOOL(o, n, cont) \ 1013d0e79aa3SJason Evans if (CONF_MATCH(n)) { \ 1014d0e79aa3SJason Evans if (CONF_MATCH_VALUE("true")) \ 1015a4bd5210SJason Evans o = true; \ 1016d0e79aa3SJason Evans else if (CONF_MATCH_VALUE("false")) \ 1017a4bd5210SJason Evans o = false; \ 1018a4bd5210SJason Evans else { \ 1019a4bd5210SJason Evans malloc_conf_error( \ 1020a4bd5210SJason Evans "Invalid conf value", \ 1021a4bd5210SJason Evans k, klen, v, vlen); \ 1022a4bd5210SJason Evans } \ 1023d0e79aa3SJason Evans if (cont) \ 1024a4bd5210SJason Evans continue; \ 1025a4bd5210SJason Evans } 1026df0d881dSJason Evans #define CONF_HANDLE_T_U(t, o, n, min, max, clip) \ 1027d0e79aa3SJason Evans if (CONF_MATCH(n)) { \ 1028a4bd5210SJason Evans uintmax_t um; \ 1029a4bd5210SJason Evans char *end; \ 1030a4bd5210SJason Evans \ 1031e722f8f8SJason Evans set_errno(0); \ 1032a4bd5210SJason Evans um = malloc_strtoumax(v, &end, 0); \ 1033e722f8f8SJason Evans if (get_errno() != 0 || (uintptr_t)end -\ 1034a4bd5210SJason Evans (uintptr_t)v != vlen) { \ 1035a4bd5210SJason Evans malloc_conf_error( \ 1036a4bd5210SJason Evans "Invalid conf value", \ 1037a4bd5210SJason Evans k, klen, v, vlen); \ 103888ad2f8dSJason Evans } else if (clip) { \ 1039d0e79aa3SJason Evans if ((min) != 0 && um < (min)) \ 1040df0d881dSJason Evans o = (t)(min); \ 1041d0e79aa3SJason Evans else if (um > (max)) \ 1042df0d881dSJason Evans o = (t)(max); \ 104388ad2f8dSJason Evans else \ 1044df0d881dSJason Evans o = (t)um; \ 104588ad2f8dSJason Evans } else { \ 1046d0e79aa3SJason Evans if (((min) != 0 && um < (min)) \ 1047d0e79aa3SJason Evans || um > (max)) { \ 1048a4bd5210SJason Evans malloc_conf_error( \ 104988ad2f8dSJason Evans "Out-of-range " \ 105088ad2f8dSJason Evans "conf value", \ 1051a4bd5210SJason Evans k, klen, v, vlen); \ 1052a4bd5210SJason Evans } else \ 1053df0d881dSJason Evans o = (t)um; \ 105488ad2f8dSJason Evans } \ 1055a4bd5210SJason Evans continue; \ 1056a4bd5210SJason Evans } 1057df0d881dSJason Evans #define CONF_HANDLE_UNSIGNED(o, n, min, max, clip) \ 1058df0d881dSJason Evans CONF_HANDLE_T_U(unsigned, o, n, min, max, clip) 1059df0d881dSJason Evans #define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \ 1060df0d881dSJason Evans CONF_HANDLE_T_U(size_t, o, n, min, max, clip) 1061a4bd5210SJason Evans #define CONF_HANDLE_SSIZE_T(o, n, min, max) \ 1062d0e79aa3SJason Evans if (CONF_MATCH(n)) { \ 1063a4bd5210SJason Evans long l; \ 1064a4bd5210SJason Evans char *end; \ 1065a4bd5210SJason Evans \ 1066e722f8f8SJason Evans set_errno(0); \ 1067a4bd5210SJason Evans l = strtol(v, &end, 0); \ 1068e722f8f8SJason Evans if (get_errno() != 0 || (uintptr_t)end -\ 1069a4bd5210SJason Evans (uintptr_t)v != vlen) { \ 1070a4bd5210SJason Evans malloc_conf_error( \ 1071a4bd5210SJason Evans "Invalid conf value", \ 1072a4bd5210SJason Evans k, klen, v, vlen); \ 1073d0e79aa3SJason Evans } else if (l < (ssize_t)(min) || l > \ 1074d0e79aa3SJason Evans (ssize_t)(max)) { \ 1075a4bd5210SJason Evans malloc_conf_error( \ 1076a4bd5210SJason Evans "Out-of-range conf value", \ 1077a4bd5210SJason Evans k, klen, v, vlen); \ 1078a4bd5210SJason Evans } else \ 1079a4bd5210SJason Evans o = l; \ 1080a4bd5210SJason Evans continue; \ 1081a4bd5210SJason Evans } 1082a4bd5210SJason Evans #define CONF_HANDLE_CHAR_P(o, n, d) \ 1083d0e79aa3SJason Evans if (CONF_MATCH(n)) { \ 1084a4bd5210SJason Evans size_t cpylen = (vlen <= \ 1085a4bd5210SJason Evans sizeof(o)-1) ? vlen : \ 1086a4bd5210SJason Evans sizeof(o)-1; \ 1087a4bd5210SJason Evans strncpy(o, v, cpylen); \ 1088a4bd5210SJason Evans o[cpylen] = '\0'; \ 1089a4bd5210SJason Evans continue; \ 1090a4bd5210SJason Evans } 1091a4bd5210SJason Evans 1092d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_abort, "abort", true) 1093a4bd5210SJason Evans /* 1094d0e79aa3SJason Evans * Chunks always require at least one header page, 1095d0e79aa3SJason Evans * as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and 1096d0e79aa3SJason Evans * possibly an additional page in the presence of 1097d0e79aa3SJason Evans * redzones. In order to simplify options processing, 1098d0e79aa3SJason Evans * use a conservative bound that accommodates all these 1099d0e79aa3SJason Evans * constraints. 1100a4bd5210SJason Evans */ 11018ed34ab0SJason Evans CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE + 1102d0e79aa3SJason Evans LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1), 1103d0e79aa3SJason Evans (sizeof(size_t) << 3) - 1, true) 110482872ac0SJason Evans if (strncmp("dss", k, klen) == 0) { 110582872ac0SJason Evans int i; 110682872ac0SJason Evans bool match = false; 110782872ac0SJason Evans for (i = 0; i < dss_prec_limit; i++) { 110882872ac0SJason Evans if (strncmp(dss_prec_names[i], v, vlen) 110982872ac0SJason Evans == 0) { 11101f0a49e8SJason Evans if (chunk_dss_prec_set(NULL, 11111f0a49e8SJason Evans i)) { 111282872ac0SJason Evans malloc_conf_error( 111382872ac0SJason Evans "Error setting dss", 111482872ac0SJason Evans k, klen, v, vlen); 111582872ac0SJason Evans } else { 111682872ac0SJason Evans opt_dss = 111782872ac0SJason Evans dss_prec_names[i]; 111882872ac0SJason Evans match = true; 111982872ac0SJason Evans break; 112082872ac0SJason Evans } 112182872ac0SJason Evans } 112282872ac0SJason Evans } 1123d0e79aa3SJason Evans if (!match) { 112482872ac0SJason Evans malloc_conf_error("Invalid conf value", 112582872ac0SJason Evans k, klen, v, vlen); 112682872ac0SJason Evans } 112782872ac0SJason Evans continue; 112882872ac0SJason Evans } 1129df0d881dSJason Evans CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1, 1130df0d881dSJason Evans UINT_MAX, false) 1131df0d881dSJason Evans if (strncmp("purge", k, klen) == 0) { 1132df0d881dSJason Evans int i; 1133df0d881dSJason Evans bool match = false; 1134df0d881dSJason Evans for (i = 0; i < purge_mode_limit; i++) { 1135df0d881dSJason Evans if (strncmp(purge_mode_names[i], v, 1136df0d881dSJason Evans vlen) == 0) { 1137df0d881dSJason Evans opt_purge = (purge_mode_t)i; 1138df0d881dSJason Evans match = true; 1139df0d881dSJason Evans break; 1140df0d881dSJason Evans } 1141df0d881dSJason Evans } 1142df0d881dSJason Evans if (!match) { 1143df0d881dSJason Evans malloc_conf_error("Invalid conf value", 1144df0d881dSJason Evans k, klen, v, vlen); 1145df0d881dSJason Evans } 1146df0d881dSJason Evans continue; 1147df0d881dSJason Evans } 11488ed34ab0SJason Evans CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult", 1149a4bd5210SJason Evans -1, (sizeof(size_t) << 3) - 1) 1150df0d881dSJason Evans CONF_HANDLE_SSIZE_T(opt_decay_time, "decay_time", -1, 1151df0d881dSJason Evans NSTIME_SEC_MAX); 1152d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true) 1153a4bd5210SJason Evans if (config_fill) { 1154d0e79aa3SJason Evans if (CONF_MATCH("junk")) { 1155d0e79aa3SJason Evans if (CONF_MATCH_VALUE("true")) { 1156d0e79aa3SJason Evans opt_junk = "true"; 1157d0e79aa3SJason Evans opt_junk_alloc = opt_junk_free = 1158d0e79aa3SJason Evans true; 1159d0e79aa3SJason Evans } else if (CONF_MATCH_VALUE("false")) { 1160d0e79aa3SJason Evans opt_junk = "false"; 1161d0e79aa3SJason Evans opt_junk_alloc = opt_junk_free = 1162d0e79aa3SJason Evans false; 1163d0e79aa3SJason Evans } else if (CONF_MATCH_VALUE("alloc")) { 1164d0e79aa3SJason Evans opt_junk = "alloc"; 1165d0e79aa3SJason Evans opt_junk_alloc = true; 1166d0e79aa3SJason Evans opt_junk_free = false; 1167d0e79aa3SJason Evans } else if (CONF_MATCH_VALUE("free")) { 1168d0e79aa3SJason Evans opt_junk = "free"; 1169d0e79aa3SJason Evans opt_junk_alloc = false; 1170d0e79aa3SJason Evans opt_junk_free = true; 1171d0e79aa3SJason Evans } else { 1172d0e79aa3SJason Evans malloc_conf_error( 1173d0e79aa3SJason Evans "Invalid conf value", k, 1174d0e79aa3SJason Evans klen, v, vlen); 1175d0e79aa3SJason Evans } 1176d0e79aa3SJason Evans continue; 1177d0e79aa3SJason Evans } 11788ed34ab0SJason Evans CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine", 117988ad2f8dSJason Evans 0, SIZE_T_MAX, false) 1180d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_redzone, "redzone", true) 1181d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_zero, "zero", true) 1182a4bd5210SJason Evans } 1183a4bd5210SJason Evans if (config_utrace) { 1184d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_utrace, "utrace", true) 1185a4bd5210SJason Evans } 1186a4bd5210SJason Evans if (config_xmalloc) { 1187d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true) 1188a4bd5210SJason Evans } 1189a4bd5210SJason Evans if (config_tcache) { 1190d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_tcache, "tcache", 1191d0e79aa3SJason Evans !config_valgrind || !in_valgrind) 1192d0e79aa3SJason Evans if (CONF_MATCH("tcache")) { 1193d0e79aa3SJason Evans assert(config_valgrind && in_valgrind); 1194d0e79aa3SJason Evans if (opt_tcache) { 1195d0e79aa3SJason Evans opt_tcache = false; 1196d0e79aa3SJason Evans malloc_conf_error( 1197d0e79aa3SJason Evans "tcache cannot be enabled " 1198d0e79aa3SJason Evans "while running inside Valgrind", 1199d0e79aa3SJason Evans k, klen, v, vlen); 1200d0e79aa3SJason Evans } 1201d0e79aa3SJason Evans continue; 1202d0e79aa3SJason Evans } 1203a4bd5210SJason Evans CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, 12048ed34ab0SJason Evans "lg_tcache_max", -1, 1205a4bd5210SJason Evans (sizeof(size_t) << 3) - 1) 1206a4bd5210SJason Evans } 1207a4bd5210SJason Evans if (config_prof) { 1208d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_prof, "prof", true) 12098ed34ab0SJason Evans CONF_HANDLE_CHAR_P(opt_prof_prefix, 12108ed34ab0SJason Evans "prof_prefix", "jeprof") 1211d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_prof_active, "prof_active", 1212d0e79aa3SJason Evans true) 1213d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_prof_thread_active_init, 1214d0e79aa3SJason Evans "prof_thread_active_init", true) 1215d0e79aa3SJason Evans CONF_HANDLE_SIZE_T(opt_lg_prof_sample, 12168ed34ab0SJason Evans "lg_prof_sample", 0, 1217d0e79aa3SJason Evans (sizeof(uint64_t) << 3) - 1, true) 1218d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum", 1219d0e79aa3SJason Evans true) 1220a4bd5210SJason Evans CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, 12218ed34ab0SJason Evans "lg_prof_interval", -1, 1222a4bd5210SJason Evans (sizeof(uint64_t) << 3) - 1) 1223d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump", 1224d0e79aa3SJason Evans true) 1225d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_prof_final, "prof_final", 1226d0e79aa3SJason Evans true) 1227d0e79aa3SJason Evans CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak", 1228d0e79aa3SJason Evans true) 1229a4bd5210SJason Evans } 1230a4bd5210SJason Evans malloc_conf_error("Invalid conf pair", k, klen, v, 1231a4bd5210SJason Evans vlen); 1232d0e79aa3SJason Evans #undef CONF_MATCH 1233a4bd5210SJason Evans #undef CONF_HANDLE_BOOL 1234a4bd5210SJason Evans #undef CONF_HANDLE_SIZE_T 1235a4bd5210SJason Evans #undef CONF_HANDLE_SSIZE_T 1236a4bd5210SJason Evans #undef CONF_HANDLE_CHAR_P 1237a4bd5210SJason Evans } 1238a4bd5210SJason Evans } 1239a4bd5210SJason Evans } 1240a4bd5210SJason Evans 1241a4bd5210SJason Evans static bool 1242d0e79aa3SJason Evans malloc_init_hard_needed(void) 1243a4bd5210SJason Evans { 1244a4bd5210SJason Evans 1245d0e79aa3SJason Evans if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state == 1246d0e79aa3SJason Evans malloc_init_recursible)) { 1247a4bd5210SJason Evans /* 1248a4bd5210SJason Evans * Another thread initialized the allocator before this one 1249a4bd5210SJason Evans * acquired init_lock, or this thread is the initializing 1250a4bd5210SJason Evans * thread, and it is recursively allocating. 1251a4bd5210SJason Evans */ 1252a4bd5210SJason Evans return (false); 1253a4bd5210SJason Evans } 1254a4bd5210SJason Evans #ifdef JEMALLOC_THREADED_INIT 1255d0e79aa3SJason Evans if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) { 1256a4bd5210SJason Evans /* Busy-wait until the initializing thread completes. */ 1257a4bd5210SJason Evans do { 12581f0a49e8SJason Evans malloc_mutex_unlock(NULL, &init_lock); 1259a4bd5210SJason Evans CPU_SPINWAIT; 12601f0a49e8SJason Evans malloc_mutex_lock(NULL, &init_lock); 1261d0e79aa3SJason Evans } while (!malloc_initialized()); 1262a4bd5210SJason Evans return (false); 1263a4bd5210SJason Evans } 1264a4bd5210SJason Evans #endif 1265d0e79aa3SJason Evans return (true); 1266d0e79aa3SJason Evans } 1267d0e79aa3SJason Evans 1268d0e79aa3SJason Evans static bool 12691f0a49e8SJason Evans malloc_init_hard_a0_locked() 1270d0e79aa3SJason Evans { 1271d0e79aa3SJason Evans 1272a4bd5210SJason Evans malloc_initializer = INITIALIZER; 1273a4bd5210SJason Evans 1274a4bd5210SJason Evans if (config_prof) 1275a4bd5210SJason Evans prof_boot0(); 1276a4bd5210SJason Evans malloc_conf_init(); 1277a4bd5210SJason Evans if (opt_stats_print) { 1278a4bd5210SJason Evans /* Print statistics at exit. */ 1279a4bd5210SJason Evans if (atexit(stats_print_atexit) != 0) { 1280a4bd5210SJason Evans malloc_write("<jemalloc>: Error in atexit()\n"); 1281a4bd5210SJason Evans if (opt_abort) 1282a4bd5210SJason Evans abort(); 1283a4bd5210SJason Evans } 1284a4bd5210SJason Evans } 12851f0a49e8SJason Evans pages_boot(); 1286d0e79aa3SJason Evans if (base_boot()) 1287a4bd5210SJason Evans return (true); 1288d0e79aa3SJason Evans if (chunk_boot()) 1289a4bd5210SJason Evans return (true); 1290d0e79aa3SJason Evans if (ctl_boot()) 1291a4bd5210SJason Evans return (true); 1292a4bd5210SJason Evans if (config_prof) 1293a4bd5210SJason Evans prof_boot1(); 1294d0e79aa3SJason Evans if (arena_boot()) 1295a4bd5210SJason Evans return (true); 12961f0a49e8SJason Evans if (config_tcache && tcache_boot(TSDN_NULL)) 1297a4bd5210SJason Evans return (true); 12981f0a49e8SJason Evans if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS)) 1299a4bd5210SJason Evans return (true); 1300a4bd5210SJason Evans /* 1301a4bd5210SJason Evans * Create enough scaffolding to allow recursive allocation in 1302a4bd5210SJason Evans * malloc_ncpus(). 1303a4bd5210SJason Evans */ 1304df0d881dSJason Evans narenas_auto = 1; 1305df0d881dSJason Evans narenas_total_set(narenas_auto); 1306d0e79aa3SJason Evans arenas = &a0; 130782872ac0SJason Evans memset(arenas, 0, sizeof(arena_t *) * narenas_auto); 1308a4bd5210SJason Evans /* 1309a4bd5210SJason Evans * Initialize one arena here. The rest are lazily created in 1310d0e79aa3SJason Evans * arena_choose_hard(). 1311a4bd5210SJason Evans */ 13121f0a49e8SJason Evans if (arena_init(TSDN_NULL, 0) == NULL) 1313a4bd5210SJason Evans return (true); 13141f0a49e8SJason Evans 1315d0e79aa3SJason Evans malloc_init_state = malloc_init_a0_initialized; 13161f0a49e8SJason Evans 1317d0e79aa3SJason Evans return (false); 1318a4bd5210SJason Evans } 1319a4bd5210SJason Evans 1320d0e79aa3SJason Evans static bool 1321d0e79aa3SJason Evans malloc_init_hard_a0(void) 1322d0e79aa3SJason Evans { 1323d0e79aa3SJason Evans bool ret; 1324d0e79aa3SJason Evans 13251f0a49e8SJason Evans malloc_mutex_lock(TSDN_NULL, &init_lock); 1326d0e79aa3SJason Evans ret = malloc_init_hard_a0_locked(); 13271f0a49e8SJason Evans malloc_mutex_unlock(TSDN_NULL, &init_lock); 1328d0e79aa3SJason Evans return (ret); 1329a4bd5210SJason Evans } 1330a4bd5210SJason Evans 13311f0a49e8SJason Evans /* Initialize data structures which may trigger recursive allocation. */ 1332df0d881dSJason Evans static bool 1333d0e79aa3SJason Evans malloc_init_hard_recursible(void) 1334d0e79aa3SJason Evans { 1335a4bd5210SJason Evans 1336d0e79aa3SJason Evans malloc_init_state = malloc_init_recursible; 1337df0d881dSJason Evans 1338a4bd5210SJason Evans ncpus = malloc_ncpus(); 1339f921d10fSJason Evans 1340f921d10fSJason Evans #if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \ 1341d0e79aa3SJason Evans && !defined(_WIN32) && !defined(__native_client__)) 1342df0d881dSJason Evans /* LinuxThreads' pthread_atfork() allocates. */ 1343f921d10fSJason Evans if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, 1344f921d10fSJason Evans jemalloc_postfork_child) != 0) { 1345f921d10fSJason Evans malloc_write("<jemalloc>: Error in pthread_atfork()\n"); 1346f921d10fSJason Evans if (opt_abort) 1347f921d10fSJason Evans abort(); 13481f0a49e8SJason Evans return (true); 1349f921d10fSJason Evans } 1350f921d10fSJason Evans #endif 1351df0d881dSJason Evans 13521f0a49e8SJason Evans return (false); 1353a4bd5210SJason Evans } 1354a4bd5210SJason Evans 1355d0e79aa3SJason Evans static bool 13561f0a49e8SJason Evans malloc_init_hard_finish(tsdn_t *tsdn) 1357d0e79aa3SJason Evans { 1358d0e79aa3SJason Evans 13591f0a49e8SJason Evans if (malloc_mutex_boot()) 1360d0e79aa3SJason Evans return (true); 1361d0e79aa3SJason Evans 1362a4bd5210SJason Evans if (opt_narenas == 0) { 1363a4bd5210SJason Evans /* 1364a4bd5210SJason Evans * For SMP systems, create more than one arena per CPU by 1365a4bd5210SJason Evans * default. 1366a4bd5210SJason Evans */ 1367a4bd5210SJason Evans if (ncpus > 1) 1368a4bd5210SJason Evans opt_narenas = ncpus << 2; 1369a4bd5210SJason Evans else 1370a4bd5210SJason Evans opt_narenas = 1; 1371a4bd5210SJason Evans } 137282872ac0SJason Evans narenas_auto = opt_narenas; 1373a4bd5210SJason Evans /* 1374df0d881dSJason Evans * Limit the number of arenas to the indexing range of MALLOCX_ARENA(). 1375a4bd5210SJason Evans */ 1376df0d881dSJason Evans if (narenas_auto > MALLOCX_ARENA_MAX) { 1377df0d881dSJason Evans narenas_auto = MALLOCX_ARENA_MAX; 1378a4bd5210SJason Evans malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n", 137982872ac0SJason Evans narenas_auto); 1380a4bd5210SJason Evans } 1381df0d881dSJason Evans narenas_total_set(narenas_auto); 1382a4bd5210SJason Evans 1383a4bd5210SJason Evans /* Allocate and initialize arenas. */ 13841f0a49e8SJason Evans arenas = (arena_t **)base_alloc(tsdn, sizeof(arena_t *) * 1385df0d881dSJason Evans (MALLOCX_ARENA_MAX+1)); 1386d0e79aa3SJason Evans if (arenas == NULL) 1387a4bd5210SJason Evans return (true); 1388a4bd5210SJason Evans /* Copy the pointer to the one arena that was already initialized. */ 1389df0d881dSJason Evans arena_set(0, a0); 1390a4bd5210SJason Evans 1391d0e79aa3SJason Evans malloc_init_state = malloc_init_initialized; 1392df0d881dSJason Evans malloc_slow_flag_init(); 1393df0d881dSJason Evans 1394d0e79aa3SJason Evans return (false); 1395d0e79aa3SJason Evans } 1396d0e79aa3SJason Evans 1397d0e79aa3SJason Evans static bool 1398d0e79aa3SJason Evans malloc_init_hard(void) 1399d0e79aa3SJason Evans { 14001f0a49e8SJason Evans tsd_t *tsd; 1401d0e79aa3SJason Evans 1402536b3538SJason Evans #if defined(_WIN32) && _WIN32_WINNT < 0x0600 1403536b3538SJason Evans _init_init_lock(); 1404536b3538SJason Evans #endif 14051f0a49e8SJason Evans malloc_mutex_lock(TSDN_NULL, &init_lock); 1406d0e79aa3SJason Evans if (!malloc_init_hard_needed()) { 14071f0a49e8SJason Evans malloc_mutex_unlock(TSDN_NULL, &init_lock); 1408d0e79aa3SJason Evans return (false); 1409d0e79aa3SJason Evans } 1410f921d10fSJason Evans 1411d0e79aa3SJason Evans if (malloc_init_state != malloc_init_a0_initialized && 1412d0e79aa3SJason Evans malloc_init_hard_a0_locked()) { 14131f0a49e8SJason Evans malloc_mutex_unlock(TSDN_NULL, &init_lock); 1414d0e79aa3SJason Evans return (true); 1415d0e79aa3SJason Evans } 1416df0d881dSJason Evans 14171f0a49e8SJason Evans malloc_mutex_unlock(TSDN_NULL, &init_lock); 14181f0a49e8SJason Evans /* Recursive allocation relies on functional tsd. */ 14191f0a49e8SJason Evans tsd = malloc_tsd_boot0(); 14201f0a49e8SJason Evans if (tsd == NULL) 14211f0a49e8SJason Evans return (true); 14221f0a49e8SJason Evans if (malloc_init_hard_recursible()) 14231f0a49e8SJason Evans return (true); 14241f0a49e8SJason Evans malloc_mutex_lock(tsd_tsdn(tsd), &init_lock); 14251f0a49e8SJason Evans 14261f0a49e8SJason Evans if (config_prof && prof_boot2(tsd_tsdn(tsd))) { 14271f0a49e8SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); 1428d0e79aa3SJason Evans return (true); 1429d0e79aa3SJason Evans } 1430d0e79aa3SJason Evans 14311f0a49e8SJason Evans if (malloc_init_hard_finish(tsd_tsdn(tsd))) { 14321f0a49e8SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); 1433df0d881dSJason Evans return (true); 1434df0d881dSJason Evans } 1435d0e79aa3SJason Evans 14361f0a49e8SJason Evans malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); 1437d0e79aa3SJason Evans malloc_tsd_boot1(); 1438a4bd5210SJason Evans return (false); 1439a4bd5210SJason Evans } 1440a4bd5210SJason Evans 1441a4bd5210SJason Evans /* 1442a4bd5210SJason Evans * End initialization functions. 1443a4bd5210SJason Evans */ 1444a4bd5210SJason Evans /******************************************************************************/ 1445a4bd5210SJason Evans /* 1446a4bd5210SJason Evans * Begin malloc(3)-compatible functions. 1447a4bd5210SJason Evans */ 1448a4bd5210SJason Evans 1449f921d10fSJason Evans static void * 14501f0a49e8SJason Evans ialloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, bool zero, 1451df0d881dSJason Evans prof_tctx_t *tctx, bool slow_path) 1452f921d10fSJason Evans { 1453f921d10fSJason Evans void *p; 1454f921d10fSJason Evans 1455d0e79aa3SJason Evans if (tctx == NULL) 1456f921d10fSJason Evans return (NULL); 1457d0e79aa3SJason Evans if (usize <= SMALL_MAXCLASS) { 1458df0d881dSJason Evans szind_t ind_large = size2index(LARGE_MINCLASS); 14591f0a49e8SJason Evans p = ialloc(tsd, LARGE_MINCLASS, ind_large, zero, slow_path); 1460f921d10fSJason Evans if (p == NULL) 1461f921d10fSJason Evans return (NULL); 14621f0a49e8SJason Evans arena_prof_promoted(tsd_tsdn(tsd), p, usize); 1463f921d10fSJason Evans } else 14641f0a49e8SJason Evans p = ialloc(tsd, usize, ind, zero, slow_path); 1465f921d10fSJason Evans 1466f921d10fSJason Evans return (p); 1467f921d10fSJason Evans } 1468f921d10fSJason Evans 1469f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C void * 14701f0a49e8SJason Evans ialloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool zero, bool slow_path) 1471f921d10fSJason Evans { 1472f921d10fSJason Evans void *p; 1473d0e79aa3SJason Evans prof_tctx_t *tctx; 1474f921d10fSJason Evans 1475536b3538SJason Evans tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); 1476d0e79aa3SJason Evans if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) 14771f0a49e8SJason Evans p = ialloc_prof_sample(tsd, usize, ind, zero, tctx, slow_path); 1478f921d10fSJason Evans else 14791f0a49e8SJason Evans p = ialloc(tsd, usize, ind, zero, slow_path); 1480d0e79aa3SJason Evans if (unlikely(p == NULL)) { 1481d0e79aa3SJason Evans prof_alloc_rollback(tsd, tctx, true); 1482f921d10fSJason Evans return (NULL); 1483d0e79aa3SJason Evans } 14841f0a49e8SJason Evans prof_malloc(tsd_tsdn(tsd), p, usize, tctx); 1485f921d10fSJason Evans 1486f921d10fSJason Evans return (p); 1487f921d10fSJason Evans } 1488f921d10fSJason Evans 14891f0a49e8SJason Evans /* 14901f0a49e8SJason Evans * ialloc_body() is inlined so that fast and slow paths are generated separately 14911f0a49e8SJason Evans * with statically known slow_path. 14921f0a49e8SJason Evans * 14931f0a49e8SJason Evans * This function guarantees that *tsdn is non-NULL on success. 14941f0a49e8SJason Evans */ 1495d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C void * 14961f0a49e8SJason Evans ialloc_body(size_t size, bool zero, tsdn_t **tsdn, size_t *usize, 14971f0a49e8SJason Evans bool slow_path) 1498d0e79aa3SJason Evans { 14991f0a49e8SJason Evans tsd_t *tsd; 1500df0d881dSJason Evans szind_t ind; 1501f921d10fSJason Evans 15021f0a49e8SJason Evans if (slow_path && unlikely(malloc_init())) { 15031f0a49e8SJason Evans *tsdn = NULL; 1504d0e79aa3SJason Evans return (NULL); 15051f0a49e8SJason Evans } 15061f0a49e8SJason Evans 15071f0a49e8SJason Evans tsd = tsd_fetch(); 15081f0a49e8SJason Evans *tsdn = tsd_tsdn(tsd); 15091f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 15101f0a49e8SJason Evans 1511df0d881dSJason Evans ind = size2index(size); 1512df0d881dSJason Evans if (unlikely(ind >= NSIZES)) 1513d0e79aa3SJason Evans return (NULL); 1514df0d881dSJason Evans 1515df0d881dSJason Evans if (config_stats || (config_prof && opt_prof) || (slow_path && 1516df0d881dSJason Evans config_valgrind && unlikely(in_valgrind))) { 1517df0d881dSJason Evans *usize = index2size(ind); 1518df0d881dSJason Evans assert(*usize > 0 && *usize <= HUGE_MAXCLASS); 1519d0e79aa3SJason Evans } 1520d0e79aa3SJason Evans 1521df0d881dSJason Evans if (config_prof && opt_prof) 15221f0a49e8SJason Evans return (ialloc_prof(tsd, *usize, ind, zero, slow_path)); 1523df0d881dSJason Evans 15241f0a49e8SJason Evans return (ialloc(tsd, size, ind, zero, slow_path)); 1525df0d881dSJason Evans } 1526df0d881dSJason Evans 1527df0d881dSJason Evans JEMALLOC_ALWAYS_INLINE_C void 15281f0a49e8SJason Evans ialloc_post_check(void *ret, tsdn_t *tsdn, size_t usize, const char *func, 15291f0a49e8SJason Evans bool update_errno, bool slow_path) 1530df0d881dSJason Evans { 15311f0a49e8SJason Evans 15321f0a49e8SJason Evans assert(!tsdn_null(tsdn) || ret == NULL); 15331f0a49e8SJason Evans 1534df0d881dSJason Evans if (unlikely(ret == NULL)) { 1535df0d881dSJason Evans if (slow_path && config_xmalloc && unlikely(opt_xmalloc)) { 15361f0a49e8SJason Evans malloc_printf("<jemalloc>: Error in %s(): out of " 15371f0a49e8SJason Evans "memory\n", func); 1538df0d881dSJason Evans abort(); 1539df0d881dSJason Evans } 15401f0a49e8SJason Evans if (update_errno) 1541df0d881dSJason Evans set_errno(ENOMEM); 1542df0d881dSJason Evans } 1543df0d881dSJason Evans if (config_stats && likely(ret != NULL)) { 15441f0a49e8SJason Evans assert(usize == isalloc(tsdn, ret, config_prof)); 15451f0a49e8SJason Evans *tsd_thread_allocatedp_get(tsdn_tsd(tsdn)) += usize; 1546df0d881dSJason Evans } 15471f0a49e8SJason Evans witness_assert_lockless(tsdn); 1548d0e79aa3SJason Evans } 1549d0e79aa3SJason Evans 1550d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 1551d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 1552d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) 1553a4bd5210SJason Evans je_malloc(size_t size) 1554a4bd5210SJason Evans { 1555a4bd5210SJason Evans void *ret; 15561f0a49e8SJason Evans tsdn_t *tsdn; 1557e722f8f8SJason Evans size_t usize JEMALLOC_CC_SILENCE_INIT(0); 1558a4bd5210SJason Evans 1559a4bd5210SJason Evans if (size == 0) 1560a4bd5210SJason Evans size = 1; 1561a4bd5210SJason Evans 1562df0d881dSJason Evans if (likely(!malloc_slow)) { 15631f0a49e8SJason Evans ret = ialloc_body(size, false, &tsdn, &usize, false); 15641f0a49e8SJason Evans ialloc_post_check(ret, tsdn, usize, "malloc", true, false); 1565df0d881dSJason Evans } else { 15661f0a49e8SJason Evans ret = ialloc_body(size, false, &tsdn, &usize, true); 15671f0a49e8SJason Evans ialloc_post_check(ret, tsdn, usize, "malloc", true, true); 1568a4bd5210SJason Evans UTRACE(0, size, ret); 15691f0a49e8SJason Evans JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false); 1570df0d881dSJason Evans } 1571df0d881dSJason Evans 1572a4bd5210SJason Evans return (ret); 1573a4bd5210SJason Evans } 1574a4bd5210SJason Evans 1575f921d10fSJason Evans static void * 1576d0e79aa3SJason Evans imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize, 1577d0e79aa3SJason Evans prof_tctx_t *tctx) 1578f921d10fSJason Evans { 1579f921d10fSJason Evans void *p; 1580f921d10fSJason Evans 1581d0e79aa3SJason Evans if (tctx == NULL) 1582f921d10fSJason Evans return (NULL); 1583d0e79aa3SJason Evans if (usize <= SMALL_MAXCLASS) { 1584d0e79aa3SJason Evans assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS); 1585d0e79aa3SJason Evans p = ipalloc(tsd, LARGE_MINCLASS, alignment, false); 1586f921d10fSJason Evans if (p == NULL) 1587f921d10fSJason Evans return (NULL); 15881f0a49e8SJason Evans arena_prof_promoted(tsd_tsdn(tsd), p, usize); 1589f921d10fSJason Evans } else 1590d0e79aa3SJason Evans p = ipalloc(tsd, usize, alignment, false); 1591f921d10fSJason Evans 1592f921d10fSJason Evans return (p); 1593f921d10fSJason Evans } 1594f921d10fSJason Evans 1595f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C void * 1596d0e79aa3SJason Evans imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize) 1597f921d10fSJason Evans { 1598f921d10fSJason Evans void *p; 1599d0e79aa3SJason Evans prof_tctx_t *tctx; 1600f921d10fSJason Evans 1601536b3538SJason Evans tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); 1602d0e79aa3SJason Evans if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) 1603d0e79aa3SJason Evans p = imemalign_prof_sample(tsd, alignment, usize, tctx); 1604f921d10fSJason Evans else 1605d0e79aa3SJason Evans p = ipalloc(tsd, usize, alignment, false); 1606d0e79aa3SJason Evans if (unlikely(p == NULL)) { 1607d0e79aa3SJason Evans prof_alloc_rollback(tsd, tctx, true); 1608f921d10fSJason Evans return (NULL); 1609d0e79aa3SJason Evans } 16101f0a49e8SJason Evans prof_malloc(tsd_tsdn(tsd), p, usize, tctx); 1611f921d10fSJason Evans 1612f921d10fSJason Evans return (p); 1613f921d10fSJason Evans } 1614f921d10fSJason Evans 1615a4bd5210SJason Evans JEMALLOC_ATTR(nonnull(1)) 1616a4bd5210SJason Evans static int 1617f921d10fSJason Evans imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment) 1618a4bd5210SJason Evans { 1619a4bd5210SJason Evans int ret; 1620d0e79aa3SJason Evans tsd_t *tsd; 1621a4bd5210SJason Evans size_t usize; 1622a4bd5210SJason Evans void *result; 1623a4bd5210SJason Evans 1624a4bd5210SJason Evans assert(min_alignment != 0); 1625a4bd5210SJason Evans 1626d0e79aa3SJason Evans if (unlikely(malloc_init())) { 16271f0a49e8SJason Evans tsd = NULL; 1628a4bd5210SJason Evans result = NULL; 1629f921d10fSJason Evans goto label_oom; 1630d0e79aa3SJason Evans } 1631d0e79aa3SJason Evans tsd = tsd_fetch(); 16321f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 1633a4bd5210SJason Evans if (size == 0) 1634a4bd5210SJason Evans size = 1; 1635a4bd5210SJason Evans 1636a4bd5210SJason Evans /* Make sure that alignment is a large enough power of 2. */ 1637d0e79aa3SJason Evans if (unlikely(((alignment - 1) & alignment) != 0 1638d0e79aa3SJason Evans || (alignment < min_alignment))) { 1639d0e79aa3SJason Evans if (config_xmalloc && unlikely(opt_xmalloc)) { 1640a4bd5210SJason Evans malloc_write("<jemalloc>: Error allocating " 1641a4bd5210SJason Evans "aligned memory: invalid alignment\n"); 1642a4bd5210SJason Evans abort(); 1643a4bd5210SJason Evans } 1644a4bd5210SJason Evans result = NULL; 1645a4bd5210SJason Evans ret = EINVAL; 1646a4bd5210SJason Evans goto label_return; 1647a4bd5210SJason Evans } 1648a4bd5210SJason Evans 1649a4bd5210SJason Evans usize = sa2u(size, alignment); 1650df0d881dSJason Evans if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) { 1651a4bd5210SJason Evans result = NULL; 1652f921d10fSJason Evans goto label_oom; 1653a4bd5210SJason Evans } 1654a4bd5210SJason Evans 1655d0e79aa3SJason Evans if (config_prof && opt_prof) 1656d0e79aa3SJason Evans result = imemalign_prof(tsd, alignment, usize); 1657d0e79aa3SJason Evans else 1658d0e79aa3SJason Evans result = ipalloc(tsd, usize, alignment, false); 1659d0e79aa3SJason Evans if (unlikely(result == NULL)) 1660f921d10fSJason Evans goto label_oom; 1661d0e79aa3SJason Evans assert(((uintptr_t)result & (alignment - 1)) == ZU(0)); 1662a4bd5210SJason Evans 1663a4bd5210SJason Evans *memptr = result; 1664a4bd5210SJason Evans ret = 0; 1665a4bd5210SJason Evans label_return: 1666d0e79aa3SJason Evans if (config_stats && likely(result != NULL)) { 16671f0a49e8SJason Evans assert(usize == isalloc(tsd_tsdn(tsd), result, config_prof)); 1668d0e79aa3SJason Evans *tsd_thread_allocatedp_get(tsd) += usize; 1669a4bd5210SJason Evans } 1670a4bd5210SJason Evans UTRACE(0, size, result); 16711f0a49e8SJason Evans JEMALLOC_VALGRIND_MALLOC(result != NULL, tsd_tsdn(tsd), result, usize, 16721f0a49e8SJason Evans false); 16731f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 1674a4bd5210SJason Evans return (ret); 1675f921d10fSJason Evans label_oom: 1676f921d10fSJason Evans assert(result == NULL); 1677d0e79aa3SJason Evans if (config_xmalloc && unlikely(opt_xmalloc)) { 1678f921d10fSJason Evans malloc_write("<jemalloc>: Error allocating aligned memory: " 1679f921d10fSJason Evans "out of memory\n"); 1680f921d10fSJason Evans abort(); 1681f921d10fSJason Evans } 1682f921d10fSJason Evans ret = ENOMEM; 16831f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 1684f921d10fSJason Evans goto label_return; 1685a4bd5210SJason Evans } 1686a4bd5210SJason Evans 1687d0e79aa3SJason Evans JEMALLOC_EXPORT int JEMALLOC_NOTHROW 1688d0e79aa3SJason Evans JEMALLOC_ATTR(nonnull(1)) 1689a4bd5210SJason Evans je_posix_memalign(void **memptr, size_t alignment, size_t size) 1690a4bd5210SJason Evans { 16911f0a49e8SJason Evans int ret; 16921f0a49e8SJason Evans 16931f0a49e8SJason Evans ret = imemalign(memptr, alignment, size, sizeof(void *)); 16941f0a49e8SJason Evans 1695a4bd5210SJason Evans return (ret); 1696a4bd5210SJason Evans } 1697a4bd5210SJason Evans 1698d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 1699d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 1700d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2) 1701a4bd5210SJason Evans je_aligned_alloc(size_t alignment, size_t size) 1702a4bd5210SJason Evans { 1703a4bd5210SJason Evans void *ret; 1704a4bd5210SJason Evans int err; 1705a4bd5210SJason Evans 1706d0e79aa3SJason Evans if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) { 1707a4bd5210SJason Evans ret = NULL; 1708e722f8f8SJason Evans set_errno(err); 1709a4bd5210SJason Evans } 17101f0a49e8SJason Evans 1711a4bd5210SJason Evans return (ret); 1712a4bd5210SJason Evans } 1713a4bd5210SJason Evans 1714d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 1715d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 1716d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) 1717a4bd5210SJason Evans je_calloc(size_t num, size_t size) 1718a4bd5210SJason Evans { 1719a4bd5210SJason Evans void *ret; 17201f0a49e8SJason Evans tsdn_t *tsdn; 1721a4bd5210SJason Evans size_t num_size; 1722e722f8f8SJason Evans size_t usize JEMALLOC_CC_SILENCE_INIT(0); 1723a4bd5210SJason Evans 1724a4bd5210SJason Evans num_size = num * size; 1725d0e79aa3SJason Evans if (unlikely(num_size == 0)) { 1726a4bd5210SJason Evans if (num == 0 || size == 0) 1727a4bd5210SJason Evans num_size = 1; 17281f0a49e8SJason Evans else 17291f0a49e8SJason Evans num_size = HUGE_MAXCLASS + 1; /* Trigger OOM. */ 1730a4bd5210SJason Evans /* 1731a4bd5210SJason Evans * Try to avoid division here. We know that it isn't possible to 1732a4bd5210SJason Evans * overflow during multiplication if neither operand uses any of the 1733a4bd5210SJason Evans * most significant half of the bits in a size_t. 1734a4bd5210SJason Evans */ 1735d0e79aa3SJason Evans } else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 17361f0a49e8SJason Evans 2))) && (num_size / size != num))) 17371f0a49e8SJason Evans num_size = HUGE_MAXCLASS + 1; /* size_t overflow. */ 1738a4bd5210SJason Evans 17391f0a49e8SJason Evans if (likely(!malloc_slow)) { 17401f0a49e8SJason Evans ret = ialloc_body(num_size, true, &tsdn, &usize, false); 17411f0a49e8SJason Evans ialloc_post_check(ret, tsdn, usize, "calloc", true, false); 1742a4bd5210SJason Evans } else { 17431f0a49e8SJason Evans ret = ialloc_body(num_size, true, &tsdn, &usize, true); 17441f0a49e8SJason Evans ialloc_post_check(ret, tsdn, usize, "calloc", true, true); 17451f0a49e8SJason Evans UTRACE(0, num_size, ret); 1746*62b2691eSJason Evans JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, true); 1747a4bd5210SJason Evans } 1748a4bd5210SJason Evans 1749a4bd5210SJason Evans return (ret); 1750a4bd5210SJason Evans } 1751a4bd5210SJason Evans 1752f921d10fSJason Evans static void * 1753536b3538SJason Evans irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, 1754d0e79aa3SJason Evans prof_tctx_t *tctx) 1755a4bd5210SJason Evans { 1756f921d10fSJason Evans void *p; 1757a4bd5210SJason Evans 1758d0e79aa3SJason Evans if (tctx == NULL) 1759f921d10fSJason Evans return (NULL); 1760d0e79aa3SJason Evans if (usize <= SMALL_MAXCLASS) { 1761536b3538SJason Evans p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false); 1762f921d10fSJason Evans if (p == NULL) 1763f921d10fSJason Evans return (NULL); 17641f0a49e8SJason Evans arena_prof_promoted(tsd_tsdn(tsd), p, usize); 1765a4bd5210SJason Evans } else 1766536b3538SJason Evans p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); 1767f921d10fSJason Evans 1768f921d10fSJason Evans return (p); 1769a4bd5210SJason Evans } 1770a4bd5210SJason Evans 1771f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C void * 1772536b3538SJason Evans irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize) 1773a4bd5210SJason Evans { 1774f921d10fSJason Evans void *p; 1775536b3538SJason Evans bool prof_active; 1776d0e79aa3SJason Evans prof_tctx_t *old_tctx, *tctx; 1777a4bd5210SJason Evans 1778536b3538SJason Evans prof_active = prof_active_get_unlocked(); 17791f0a49e8SJason Evans old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr); 1780536b3538SJason Evans tctx = prof_alloc_prep(tsd, usize, prof_active, true); 1781d0e79aa3SJason Evans if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) 1782536b3538SJason Evans p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx); 1783f921d10fSJason Evans else 1784536b3538SJason Evans p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); 1785536b3538SJason Evans if (unlikely(p == NULL)) { 1786536b3538SJason Evans prof_alloc_rollback(tsd, tctx, true); 1787f921d10fSJason Evans return (NULL); 1788536b3538SJason Evans } 1789536b3538SJason Evans prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize, 1790536b3538SJason Evans old_tctx); 1791f921d10fSJason Evans 1792f921d10fSJason Evans return (p); 1793f921d10fSJason Evans } 1794f921d10fSJason Evans 1795f921d10fSJason Evans JEMALLOC_INLINE_C void 1796df0d881dSJason Evans ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) 1797f921d10fSJason Evans { 1798a4bd5210SJason Evans size_t usize; 1799f921d10fSJason Evans UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); 1800a4bd5210SJason Evans 18011f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 18021f0a49e8SJason Evans 1803f921d10fSJason Evans assert(ptr != NULL); 1804d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 1805a4bd5210SJason Evans 1806a4bd5210SJason Evans if (config_prof && opt_prof) { 18071f0a49e8SJason Evans usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); 1808d0e79aa3SJason Evans prof_free(tsd, ptr, usize); 1809a4bd5210SJason Evans } else if (config_stats || config_valgrind) 18101f0a49e8SJason Evans usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); 1811a4bd5210SJason Evans if (config_stats) 1812d0e79aa3SJason Evans *tsd_thread_deallocatedp_get(tsd) += usize; 1813df0d881dSJason Evans 1814df0d881dSJason Evans if (likely(!slow_path)) 1815df0d881dSJason Evans iqalloc(tsd, ptr, tcache, false); 1816df0d881dSJason Evans else { 1817d0e79aa3SJason Evans if (config_valgrind && unlikely(in_valgrind)) 18181f0a49e8SJason Evans rzsize = p2rz(tsd_tsdn(tsd), ptr); 1819df0d881dSJason Evans iqalloc(tsd, ptr, tcache, true); 1820a4bd5210SJason Evans JEMALLOC_VALGRIND_FREE(ptr, rzsize); 1821a4bd5210SJason Evans } 1822df0d881dSJason Evans } 1823f921d10fSJason Evans 1824d0e79aa3SJason Evans JEMALLOC_INLINE_C void 18251f0a49e8SJason Evans isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) 1826d0e79aa3SJason Evans { 1827d0e79aa3SJason Evans UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); 1828d0e79aa3SJason Evans 18291f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 18301f0a49e8SJason Evans 1831d0e79aa3SJason Evans assert(ptr != NULL); 1832d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 1833d0e79aa3SJason Evans 1834d0e79aa3SJason Evans if (config_prof && opt_prof) 1835d0e79aa3SJason Evans prof_free(tsd, ptr, usize); 1836d0e79aa3SJason Evans if (config_stats) 1837d0e79aa3SJason Evans *tsd_thread_deallocatedp_get(tsd) += usize; 1838d0e79aa3SJason Evans if (config_valgrind && unlikely(in_valgrind)) 18391f0a49e8SJason Evans rzsize = p2rz(tsd_tsdn(tsd), ptr); 18401f0a49e8SJason Evans isqalloc(tsd, ptr, usize, tcache, slow_path); 1841d0e79aa3SJason Evans JEMALLOC_VALGRIND_FREE(ptr, rzsize); 1842d0e79aa3SJason Evans } 1843d0e79aa3SJason Evans 1844d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 1845d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 1846d0e79aa3SJason Evans JEMALLOC_ALLOC_SIZE(2) 1847f921d10fSJason Evans je_realloc(void *ptr, size_t size) 1848f921d10fSJason Evans { 1849f921d10fSJason Evans void *ret; 18501f0a49e8SJason Evans tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL); 1851f921d10fSJason Evans size_t usize JEMALLOC_CC_SILENCE_INIT(0); 1852f921d10fSJason Evans size_t old_usize = 0; 1853f921d10fSJason Evans UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 1854f921d10fSJason Evans 1855d0e79aa3SJason Evans if (unlikely(size == 0)) { 1856f921d10fSJason Evans if (ptr != NULL) { 18571f0a49e8SJason Evans tsd_t *tsd; 18581f0a49e8SJason Evans 1859f921d10fSJason Evans /* realloc(ptr, 0) is equivalent to free(ptr). */ 1860f921d10fSJason Evans UTRACE(ptr, 0, 0); 1861d0e79aa3SJason Evans tsd = tsd_fetch(); 1862df0d881dSJason Evans ifree(tsd, ptr, tcache_get(tsd, false), true); 1863f921d10fSJason Evans return (NULL); 1864f921d10fSJason Evans } 1865f921d10fSJason Evans size = 1; 1866f921d10fSJason Evans } 1867f921d10fSJason Evans 1868d0e79aa3SJason Evans if (likely(ptr != NULL)) { 18691f0a49e8SJason Evans tsd_t *tsd; 18701f0a49e8SJason Evans 1871d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 1872f921d10fSJason Evans malloc_thread_init(); 1873d0e79aa3SJason Evans tsd = tsd_fetch(); 1874f921d10fSJason Evans 18751f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 18761f0a49e8SJason Evans 18771f0a49e8SJason Evans old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); 18781f0a49e8SJason Evans if (config_valgrind && unlikely(in_valgrind)) { 18791f0a49e8SJason Evans old_rzsize = config_prof ? p2rz(tsd_tsdn(tsd), ptr) : 18801f0a49e8SJason Evans u2rz(old_usize); 18811f0a49e8SJason Evans } 1882f921d10fSJason Evans 1883f921d10fSJason Evans if (config_prof && opt_prof) { 1884f921d10fSJason Evans usize = s2u(size); 1885df0d881dSJason Evans ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ? 1886df0d881dSJason Evans NULL : irealloc_prof(tsd, ptr, old_usize, usize); 1887f921d10fSJason Evans } else { 1888d0e79aa3SJason Evans if (config_stats || (config_valgrind && 1889d0e79aa3SJason Evans unlikely(in_valgrind))) 1890f921d10fSJason Evans usize = s2u(size); 1891d0e79aa3SJason Evans ret = iralloc(tsd, ptr, old_usize, size, 0, false); 1892f921d10fSJason Evans } 18931f0a49e8SJason Evans tsdn = tsd_tsdn(tsd); 1894f921d10fSJason Evans } else { 1895f921d10fSJason Evans /* realloc(NULL, size) is equivalent to malloc(size). */ 1896df0d881dSJason Evans if (likely(!malloc_slow)) 18971f0a49e8SJason Evans ret = ialloc_body(size, false, &tsdn, &usize, false); 1898df0d881dSJason Evans else 18991f0a49e8SJason Evans ret = ialloc_body(size, false, &tsdn, &usize, true); 19001f0a49e8SJason Evans assert(!tsdn_null(tsdn) || ret == NULL); 1901f921d10fSJason Evans } 1902f921d10fSJason Evans 1903d0e79aa3SJason Evans if (unlikely(ret == NULL)) { 1904d0e79aa3SJason Evans if (config_xmalloc && unlikely(opt_xmalloc)) { 1905f921d10fSJason Evans malloc_write("<jemalloc>: Error in realloc(): " 1906f921d10fSJason Evans "out of memory\n"); 1907f921d10fSJason Evans abort(); 1908f921d10fSJason Evans } 1909f921d10fSJason Evans set_errno(ENOMEM); 1910f921d10fSJason Evans } 1911d0e79aa3SJason Evans if (config_stats && likely(ret != NULL)) { 19121f0a49e8SJason Evans tsd_t *tsd; 19131f0a49e8SJason Evans 19141f0a49e8SJason Evans assert(usize == isalloc(tsdn, ret, config_prof)); 19151f0a49e8SJason Evans tsd = tsdn_tsd(tsdn); 1916d0e79aa3SJason Evans *tsd_thread_allocatedp_get(tsd) += usize; 1917d0e79aa3SJason Evans *tsd_thread_deallocatedp_get(tsd) += old_usize; 1918f921d10fSJason Evans } 1919f921d10fSJason Evans UTRACE(ptr, size, ret); 19201f0a49e8SJason Evans JEMALLOC_VALGRIND_REALLOC(true, tsdn, ret, usize, true, ptr, old_usize, 1921d0e79aa3SJason Evans old_rzsize, true, false); 19221f0a49e8SJason Evans witness_assert_lockless(tsdn); 1923f921d10fSJason Evans return (ret); 1924f921d10fSJason Evans } 1925f921d10fSJason Evans 1926d0e79aa3SJason Evans JEMALLOC_EXPORT void JEMALLOC_NOTHROW 1927f921d10fSJason Evans je_free(void *ptr) 1928f921d10fSJason Evans { 1929f921d10fSJason Evans 1930f921d10fSJason Evans UTRACE(ptr, 0, 0); 1931d0e79aa3SJason Evans if (likely(ptr != NULL)) { 1932d0e79aa3SJason Evans tsd_t *tsd = tsd_fetch(); 19331f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 1934df0d881dSJason Evans if (likely(!malloc_slow)) 1935df0d881dSJason Evans ifree(tsd, ptr, tcache_get(tsd, false), false); 1936df0d881dSJason Evans else 1937df0d881dSJason Evans ifree(tsd, ptr, tcache_get(tsd, false), true); 19381f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 1939d0e79aa3SJason Evans } 1940a4bd5210SJason Evans } 1941a4bd5210SJason Evans 1942a4bd5210SJason Evans /* 1943a4bd5210SJason Evans * End malloc(3)-compatible functions. 1944a4bd5210SJason Evans */ 1945a4bd5210SJason Evans /******************************************************************************/ 1946a4bd5210SJason Evans /* 1947a4bd5210SJason Evans * Begin non-standard override functions. 1948a4bd5210SJason Evans */ 1949a4bd5210SJason Evans 1950a4bd5210SJason Evans #ifdef JEMALLOC_OVERRIDE_MEMALIGN 1951d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 1952d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 1953d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) 1954a4bd5210SJason Evans je_memalign(size_t alignment, size_t size) 1955a4bd5210SJason Evans { 1956a4bd5210SJason Evans void *ret JEMALLOC_CC_SILENCE_INIT(NULL); 1957d0e79aa3SJason Evans if (unlikely(imemalign(&ret, alignment, size, 1) != 0)) 1958d0e79aa3SJason Evans ret = NULL; 1959a4bd5210SJason Evans return (ret); 1960a4bd5210SJason Evans } 1961a4bd5210SJason Evans #endif 1962a4bd5210SJason Evans 1963a4bd5210SJason Evans #ifdef JEMALLOC_OVERRIDE_VALLOC 1964d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 1965d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 1966d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) 1967a4bd5210SJason Evans je_valloc(size_t size) 1968a4bd5210SJason Evans { 1969a4bd5210SJason Evans void *ret JEMALLOC_CC_SILENCE_INIT(NULL); 1970d0e79aa3SJason Evans if (unlikely(imemalign(&ret, PAGE, size, 1) != 0)) 1971d0e79aa3SJason Evans ret = NULL; 1972a4bd5210SJason Evans return (ret); 1973a4bd5210SJason Evans } 1974a4bd5210SJason Evans #endif 1975a4bd5210SJason Evans 1976a4bd5210SJason Evans /* 1977a4bd5210SJason Evans * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has 1978a4bd5210SJason Evans * #define je_malloc malloc 1979a4bd5210SJason Evans */ 1980a4bd5210SJason Evans #define malloc_is_malloc 1 1981a4bd5210SJason Evans #define is_malloc_(a) malloc_is_ ## a 1982a4bd5210SJason Evans #define is_malloc(a) is_malloc_(a) 1983a4bd5210SJason Evans 1984d0e79aa3SJason Evans #if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK)) 1985a4bd5210SJason Evans /* 1986a4bd5210SJason Evans * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible 1987a4bd5210SJason Evans * to inconsistently reference libc's malloc(3)-compatible functions 1988a4bd5210SJason Evans * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). 1989a4bd5210SJason Evans * 1990a4bd5210SJason Evans * These definitions interpose hooks in glibc. The functions are actually 1991a4bd5210SJason Evans * passed an extra argument for the caller return address, which will be 1992a4bd5210SJason Evans * ignored. 1993a4bd5210SJason Evans */ 199482872ac0SJason Evans JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free; 199582872ac0SJason Evans JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc; 199682872ac0SJason Evans JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc; 1997d0e79aa3SJason Evans # ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK 199882872ac0SJason Evans JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = 1999e722f8f8SJason Evans je_memalign; 2000a4bd5210SJason Evans # endif 2001d0e79aa3SJason Evans #endif 2002a4bd5210SJason Evans 2003a4bd5210SJason Evans /* 2004a4bd5210SJason Evans * End non-standard override functions. 2005a4bd5210SJason Evans */ 2006a4bd5210SJason Evans /******************************************************************************/ 2007a4bd5210SJason Evans /* 2008a4bd5210SJason Evans * Begin non-standard functions. 2009a4bd5210SJason Evans */ 2010a4bd5210SJason Evans 2011d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C bool 20121f0a49e8SJason Evans imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize, 2013d0e79aa3SJason Evans size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena) 2014a4bd5210SJason Evans { 2015f921d10fSJason Evans 2016d0e79aa3SJason Evans if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) { 2017d0e79aa3SJason Evans *alignment = 0; 2018d0e79aa3SJason Evans *usize = s2u(size); 2019d0e79aa3SJason Evans } else { 2020d0e79aa3SJason Evans *alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); 2021d0e79aa3SJason Evans *usize = sa2u(size, *alignment); 2022d0e79aa3SJason Evans } 2023df0d881dSJason Evans if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS)) 2024df0d881dSJason Evans return (true); 2025d0e79aa3SJason Evans *zero = MALLOCX_ZERO_GET(flags); 2026d0e79aa3SJason Evans if ((flags & MALLOCX_TCACHE_MASK) != 0) { 2027d0e79aa3SJason Evans if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) 2028d0e79aa3SJason Evans *tcache = NULL; 2029d0e79aa3SJason Evans else 2030d0e79aa3SJason Evans *tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2031d0e79aa3SJason Evans } else 2032d0e79aa3SJason Evans *tcache = tcache_get(tsd, true); 2033d0e79aa3SJason Evans if ((flags & MALLOCX_ARENA_MASK) != 0) { 2034d0e79aa3SJason Evans unsigned arena_ind = MALLOCX_ARENA_GET(flags); 20351f0a49e8SJason Evans *arena = arena_get(tsd_tsdn(tsd), arena_ind, true); 2036d0e79aa3SJason Evans if (unlikely(*arena == NULL)) 2037d0e79aa3SJason Evans return (true); 2038d0e79aa3SJason Evans } else 2039d0e79aa3SJason Evans *arena = NULL; 2040d0e79aa3SJason Evans return (false); 2041d0e79aa3SJason Evans } 2042d0e79aa3SJason Evans 2043d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C void * 20441f0a49e8SJason Evans imallocx_flags(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, 20451f0a49e8SJason Evans tcache_t *tcache, arena_t *arena, bool slow_path) 2046d0e79aa3SJason Evans { 2047df0d881dSJason Evans szind_t ind; 2048f921d10fSJason Evans 2049536b3538SJason Evans if (unlikely(alignment != 0)) 20501f0a49e8SJason Evans return (ipalloct(tsdn, usize, alignment, zero, tcache, arena)); 2051df0d881dSJason Evans ind = size2index(usize); 2052df0d881dSJason Evans assert(ind < NSIZES); 20531f0a49e8SJason Evans return (iallocztm(tsdn, usize, ind, zero, tcache, false, arena, 20541f0a49e8SJason Evans slow_path)); 2055d0e79aa3SJason Evans } 2056d0e79aa3SJason Evans 2057f921d10fSJason Evans static void * 20581f0a49e8SJason Evans imallocx_prof_sample(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, 20591f0a49e8SJason Evans tcache_t *tcache, arena_t *arena, bool slow_path) 2060f921d10fSJason Evans { 2061f921d10fSJason Evans void *p; 2062f921d10fSJason Evans 2063d0e79aa3SJason Evans if (usize <= SMALL_MAXCLASS) { 2064d0e79aa3SJason Evans assert(((alignment == 0) ? s2u(LARGE_MINCLASS) : 2065d0e79aa3SJason Evans sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS); 20661f0a49e8SJason Evans p = imallocx_flags(tsdn, LARGE_MINCLASS, alignment, zero, 20671f0a49e8SJason Evans tcache, arena, slow_path); 2068f921d10fSJason Evans if (p == NULL) 2069f921d10fSJason Evans return (NULL); 20701f0a49e8SJason Evans arena_prof_promoted(tsdn, p, usize); 20711f0a49e8SJason Evans } else { 20721f0a49e8SJason Evans p = imallocx_flags(tsdn, usize, alignment, zero, tcache, arena, 20731f0a49e8SJason Evans slow_path); 20741f0a49e8SJason Evans } 2075f921d10fSJason Evans 2076f921d10fSJason Evans return (p); 2077f921d10fSJason Evans } 2078f921d10fSJason Evans 2079f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C void * 20801f0a49e8SJason Evans imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, bool slow_path) 2081f921d10fSJason Evans { 2082f921d10fSJason Evans void *p; 2083d0e79aa3SJason Evans size_t alignment; 2084d0e79aa3SJason Evans bool zero; 2085d0e79aa3SJason Evans tcache_t *tcache; 2086d0e79aa3SJason Evans arena_t *arena; 2087d0e79aa3SJason Evans prof_tctx_t *tctx; 2088f921d10fSJason Evans 2089d0e79aa3SJason Evans if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment, 2090d0e79aa3SJason Evans &zero, &tcache, &arena))) 2091f921d10fSJason Evans return (NULL); 2092536b3538SJason Evans tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true); 20931f0a49e8SJason Evans if (likely((uintptr_t)tctx == (uintptr_t)1U)) { 20941f0a49e8SJason Evans p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, 20951f0a49e8SJason Evans tcache, arena, slow_path); 20961f0a49e8SJason Evans } else if ((uintptr_t)tctx > (uintptr_t)1U) { 20971f0a49e8SJason Evans p = imallocx_prof_sample(tsd_tsdn(tsd), *usize, alignment, zero, 20981f0a49e8SJason Evans tcache, arena, slow_path); 2099d0e79aa3SJason Evans } else 2100d0e79aa3SJason Evans p = NULL; 2101d0e79aa3SJason Evans if (unlikely(p == NULL)) { 2102d0e79aa3SJason Evans prof_alloc_rollback(tsd, tctx, true); 2103d0e79aa3SJason Evans return (NULL); 2104d0e79aa3SJason Evans } 21051f0a49e8SJason Evans prof_malloc(tsd_tsdn(tsd), p, *usize, tctx); 2106f921d10fSJason Evans 2107d0e79aa3SJason Evans assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); 2108f921d10fSJason Evans return (p); 2109f921d10fSJason Evans } 2110f921d10fSJason Evans 2111d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C void * 21121f0a49e8SJason Evans imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, 21131f0a49e8SJason Evans bool slow_path) 2114f921d10fSJason Evans { 2115f921d10fSJason Evans void *p; 2116d0e79aa3SJason Evans size_t alignment; 2117d0e79aa3SJason Evans bool zero; 2118d0e79aa3SJason Evans tcache_t *tcache; 2119f921d10fSJason Evans arena_t *arena; 2120d0e79aa3SJason Evans 21211f0a49e8SJason Evans if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment, 21221f0a49e8SJason Evans &zero, &tcache, &arena))) 21231f0a49e8SJason Evans return (NULL); 21241f0a49e8SJason Evans p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, tcache, 21251f0a49e8SJason Evans arena, slow_path); 21261f0a49e8SJason Evans assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); 21271f0a49e8SJason Evans return (p); 21281f0a49e8SJason Evans } 21291f0a49e8SJason Evans 21301f0a49e8SJason Evans /* This function guarantees that *tsdn is non-NULL on success. */ 21311f0a49e8SJason Evans JEMALLOC_ALWAYS_INLINE_C void * 21321f0a49e8SJason Evans imallocx_body(size_t size, int flags, tsdn_t **tsdn, size_t *usize, 21331f0a49e8SJason Evans bool slow_path) 21341f0a49e8SJason Evans { 21351f0a49e8SJason Evans tsd_t *tsd; 21361f0a49e8SJason Evans 21371f0a49e8SJason Evans if (slow_path && unlikely(malloc_init())) { 21381f0a49e8SJason Evans *tsdn = NULL; 21391f0a49e8SJason Evans return (NULL); 21401f0a49e8SJason Evans } 21411f0a49e8SJason Evans 21421f0a49e8SJason Evans tsd = tsd_fetch(); 21431f0a49e8SJason Evans *tsdn = tsd_tsdn(tsd); 21441f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 21451f0a49e8SJason Evans 2146d0e79aa3SJason Evans if (likely(flags == 0)) { 2147df0d881dSJason Evans szind_t ind = size2index(size); 2148df0d881dSJason Evans if (unlikely(ind >= NSIZES)) 2149df0d881dSJason Evans return (NULL); 21501f0a49e8SJason Evans if (config_stats || (config_prof && opt_prof) || (slow_path && 21511f0a49e8SJason Evans config_valgrind && unlikely(in_valgrind))) { 2152df0d881dSJason Evans *usize = index2size(ind); 2153df0d881dSJason Evans assert(*usize > 0 && *usize <= HUGE_MAXCLASS); 2154df0d881dSJason Evans } 21551f0a49e8SJason Evans 21561f0a49e8SJason Evans if (config_prof && opt_prof) { 21571f0a49e8SJason Evans return (ialloc_prof(tsd, *usize, ind, false, 21581f0a49e8SJason Evans slow_path)); 2159d0e79aa3SJason Evans } 2160d0e79aa3SJason Evans 21611f0a49e8SJason Evans return (ialloc(tsd, size, ind, false, slow_path)); 21621f0a49e8SJason Evans } 21631f0a49e8SJason Evans 21641f0a49e8SJason Evans if (config_prof && opt_prof) 21651f0a49e8SJason Evans return (imallocx_prof(tsd, size, flags, usize, slow_path)); 21661f0a49e8SJason Evans 21671f0a49e8SJason Evans return (imallocx_no_prof(tsd, size, flags, usize, slow_path)); 2168d0e79aa3SJason Evans } 2169d0e79aa3SJason Evans 2170d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2171d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 2172d0e79aa3SJason Evans JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) 2173d0e79aa3SJason Evans je_mallocx(size_t size, int flags) 2174d0e79aa3SJason Evans { 21751f0a49e8SJason Evans tsdn_t *tsdn; 2176d0e79aa3SJason Evans void *p; 2177d0e79aa3SJason Evans size_t usize; 2178f921d10fSJason Evans 2179f921d10fSJason Evans assert(size != 0); 2180f921d10fSJason Evans 21811f0a49e8SJason Evans if (likely(!malloc_slow)) { 21821f0a49e8SJason Evans p = imallocx_body(size, flags, &tsdn, &usize, false); 21831f0a49e8SJason Evans ialloc_post_check(p, tsdn, usize, "mallocx", false, false); 21841f0a49e8SJason Evans } else { 21851f0a49e8SJason Evans p = imallocx_body(size, flags, &tsdn, &usize, true); 21861f0a49e8SJason Evans ialloc_post_check(p, tsdn, usize, "mallocx", false, true); 2187f921d10fSJason Evans UTRACE(0, size, p); 21881f0a49e8SJason Evans JEMALLOC_VALGRIND_MALLOC(p != NULL, tsdn, p, usize, 21891f0a49e8SJason Evans MALLOCX_ZERO_GET(flags)); 2190f921d10fSJason Evans } 21911f0a49e8SJason Evans 21921f0a49e8SJason Evans return (p); 2193f921d10fSJason Evans } 2194f921d10fSJason Evans 2195f921d10fSJason Evans static void * 2196536b3538SJason Evans irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, 2197536b3538SJason Evans size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, 2198d0e79aa3SJason Evans prof_tctx_t *tctx) 2199f921d10fSJason Evans { 2200f921d10fSJason Evans void *p; 2201f921d10fSJason Evans 2202d0e79aa3SJason Evans if (tctx == NULL) 2203f921d10fSJason Evans return (NULL); 2204d0e79aa3SJason Evans if (usize <= SMALL_MAXCLASS) { 2205536b3538SJason Evans p = iralloct(tsd, old_ptr, old_usize, LARGE_MINCLASS, alignment, 2206d0e79aa3SJason Evans zero, tcache, arena); 2207f921d10fSJason Evans if (p == NULL) 2208f921d10fSJason Evans return (NULL); 22091f0a49e8SJason Evans arena_prof_promoted(tsd_tsdn(tsd), p, usize); 2210f921d10fSJason Evans } else { 2211536b3538SJason Evans p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero, 2212d0e79aa3SJason Evans tcache, arena); 2213f921d10fSJason Evans } 2214f921d10fSJason Evans 2215f921d10fSJason Evans return (p); 2216f921d10fSJason Evans } 2217f921d10fSJason Evans 2218f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C void * 2219536b3538SJason Evans irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, 2220d0e79aa3SJason Evans size_t alignment, size_t *usize, bool zero, tcache_t *tcache, 2221d0e79aa3SJason Evans arena_t *arena) 2222f921d10fSJason Evans { 2223f921d10fSJason Evans void *p; 2224536b3538SJason Evans bool prof_active; 2225d0e79aa3SJason Evans prof_tctx_t *old_tctx, *tctx; 2226f921d10fSJason Evans 2227536b3538SJason Evans prof_active = prof_active_get_unlocked(); 22281f0a49e8SJason Evans old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr); 2229*62b2691eSJason Evans tctx = prof_alloc_prep(tsd, *usize, prof_active, false); 2230d0e79aa3SJason Evans if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { 2231536b3538SJason Evans p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize, 2232536b3538SJason Evans alignment, zero, tcache, arena, tctx); 2233d0e79aa3SJason Evans } else { 2234536b3538SJason Evans p = iralloct(tsd, old_ptr, old_usize, size, alignment, zero, 2235d0e79aa3SJason Evans tcache, arena); 2236f921d10fSJason Evans } 2237d0e79aa3SJason Evans if (unlikely(p == NULL)) { 2238*62b2691eSJason Evans prof_alloc_rollback(tsd, tctx, false); 2239f921d10fSJason Evans return (NULL); 2240d0e79aa3SJason Evans } 2241f921d10fSJason Evans 2242536b3538SJason Evans if (p == old_ptr && alignment != 0) { 2243f921d10fSJason Evans /* 2244f921d10fSJason Evans * The allocation did not move, so it is possible that the size 2245f921d10fSJason Evans * class is smaller than would guarantee the requested 2246f921d10fSJason Evans * alignment, and that the alignment constraint was 2247f921d10fSJason Evans * serendipitously satisfied. Additionally, old_usize may not 2248f921d10fSJason Evans * be the same as the current usize because of in-place large 2249f921d10fSJason Evans * reallocation. Therefore, query the actual value of usize. 2250f921d10fSJason Evans */ 22511f0a49e8SJason Evans *usize = isalloc(tsd_tsdn(tsd), p, config_prof); 2252f921d10fSJason Evans } 2253*62b2691eSJason Evans prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr, 2254536b3538SJason Evans old_usize, old_tctx); 2255f921d10fSJason Evans 2256f921d10fSJason Evans return (p); 2257f921d10fSJason Evans } 2258f921d10fSJason Evans 2259d0e79aa3SJason Evans JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2260d0e79aa3SJason Evans void JEMALLOC_NOTHROW * 2261d0e79aa3SJason Evans JEMALLOC_ALLOC_SIZE(2) 2262f921d10fSJason Evans je_rallocx(void *ptr, size_t size, int flags) 2263f921d10fSJason Evans { 2264f921d10fSJason Evans void *p; 2265d0e79aa3SJason Evans tsd_t *tsd; 2266d0e79aa3SJason Evans size_t usize; 2267d0e79aa3SJason Evans size_t old_usize; 2268f921d10fSJason Evans UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 2269d0e79aa3SJason Evans size_t alignment = MALLOCX_ALIGN_GET(flags); 2270f921d10fSJason Evans bool zero = flags & MALLOCX_ZERO; 2271f921d10fSJason Evans arena_t *arena; 2272d0e79aa3SJason Evans tcache_t *tcache; 2273f921d10fSJason Evans 2274f921d10fSJason Evans assert(ptr != NULL); 2275f921d10fSJason Evans assert(size != 0); 2276d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2277f921d10fSJason Evans malloc_thread_init(); 2278d0e79aa3SJason Evans tsd = tsd_fetch(); 22791f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2280f921d10fSJason Evans 2281d0e79aa3SJason Evans if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { 2282d0e79aa3SJason Evans unsigned arena_ind = MALLOCX_ARENA_GET(flags); 22831f0a49e8SJason Evans arena = arena_get(tsd_tsdn(tsd), arena_ind, true); 2284d0e79aa3SJason Evans if (unlikely(arena == NULL)) 2285d0e79aa3SJason Evans goto label_oom; 2286d0e79aa3SJason Evans } else 2287f921d10fSJason Evans arena = NULL; 2288f921d10fSJason Evans 2289d0e79aa3SJason Evans if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { 2290d0e79aa3SJason Evans if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) 2291d0e79aa3SJason Evans tcache = NULL; 2292d0e79aa3SJason Evans else 2293d0e79aa3SJason Evans tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2294d0e79aa3SJason Evans } else 2295d0e79aa3SJason Evans tcache = tcache_get(tsd, true); 2296d0e79aa3SJason Evans 22971f0a49e8SJason Evans old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); 2298d0e79aa3SJason Evans if (config_valgrind && unlikely(in_valgrind)) 2299f921d10fSJason Evans old_rzsize = u2rz(old_usize); 2300f921d10fSJason Evans 2301f921d10fSJason Evans if (config_prof && opt_prof) { 2302f921d10fSJason Evans usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); 2303df0d881dSJason Evans if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) 2304df0d881dSJason Evans goto label_oom; 2305d0e79aa3SJason Evans p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize, 2306d0e79aa3SJason Evans zero, tcache, arena); 2307d0e79aa3SJason Evans if (unlikely(p == NULL)) 2308f921d10fSJason Evans goto label_oom; 2309f921d10fSJason Evans } else { 2310d0e79aa3SJason Evans p = iralloct(tsd, ptr, old_usize, size, alignment, zero, 2311d0e79aa3SJason Evans tcache, arena); 2312d0e79aa3SJason Evans if (unlikely(p == NULL)) 2313f921d10fSJason Evans goto label_oom; 2314d0e79aa3SJason Evans if (config_stats || (config_valgrind && unlikely(in_valgrind))) 23151f0a49e8SJason Evans usize = isalloc(tsd_tsdn(tsd), p, config_prof); 2316f921d10fSJason Evans } 2317d0e79aa3SJason Evans assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); 2318f921d10fSJason Evans 2319f921d10fSJason Evans if (config_stats) { 2320d0e79aa3SJason Evans *tsd_thread_allocatedp_get(tsd) += usize; 2321d0e79aa3SJason Evans *tsd_thread_deallocatedp_get(tsd) += old_usize; 2322f921d10fSJason Evans } 2323f921d10fSJason Evans UTRACE(ptr, size, p); 23241f0a49e8SJason Evans JEMALLOC_VALGRIND_REALLOC(true, tsd_tsdn(tsd), p, usize, false, ptr, 23251f0a49e8SJason Evans old_usize, old_rzsize, false, zero); 23261f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2327f921d10fSJason Evans return (p); 2328f921d10fSJason Evans label_oom: 2329d0e79aa3SJason Evans if (config_xmalloc && unlikely(opt_xmalloc)) { 2330f921d10fSJason Evans malloc_write("<jemalloc>: Error in rallocx(): out of memory\n"); 2331f921d10fSJason Evans abort(); 2332f921d10fSJason Evans } 2333f921d10fSJason Evans UTRACE(ptr, size, 0); 23341f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2335f921d10fSJason Evans return (NULL); 2336f921d10fSJason Evans } 2337f921d10fSJason Evans 2338f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C size_t 23391f0a49e8SJason Evans ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, 2340df0d881dSJason Evans size_t extra, size_t alignment, bool zero) 2341f921d10fSJason Evans { 2342f921d10fSJason Evans size_t usize; 2343f921d10fSJason Evans 23441f0a49e8SJason Evans if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) 2345f921d10fSJason Evans return (old_usize); 23461f0a49e8SJason Evans usize = isalloc(tsdn, ptr, config_prof); 2347f921d10fSJason Evans 2348f921d10fSJason Evans return (usize); 2349f921d10fSJason Evans } 2350f921d10fSJason Evans 2351f921d10fSJason Evans static size_t 23521f0a49e8SJason Evans ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, 2353df0d881dSJason Evans size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) 2354f921d10fSJason Evans { 2355f921d10fSJason Evans size_t usize; 2356f921d10fSJason Evans 2357d0e79aa3SJason Evans if (tctx == NULL) 2358f921d10fSJason Evans return (old_usize); 23591f0a49e8SJason Evans usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment, 2360df0d881dSJason Evans zero); 2361f921d10fSJason Evans 2362f921d10fSJason Evans return (usize); 2363f921d10fSJason Evans } 2364f921d10fSJason Evans 2365f921d10fSJason Evans JEMALLOC_ALWAYS_INLINE_C size_t 2366d0e79aa3SJason Evans ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, 2367d0e79aa3SJason Evans size_t extra, size_t alignment, bool zero) 2368f921d10fSJason Evans { 2369536b3538SJason Evans size_t usize_max, usize; 2370536b3538SJason Evans bool prof_active; 2371d0e79aa3SJason Evans prof_tctx_t *old_tctx, *tctx; 2372f921d10fSJason Evans 2373536b3538SJason Evans prof_active = prof_active_get_unlocked(); 23741f0a49e8SJason Evans old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr); 2375d0e79aa3SJason Evans /* 2376d0e79aa3SJason Evans * usize isn't knowable before ixalloc() returns when extra is non-zero. 2377d0e79aa3SJason Evans * Therefore, compute its maximum possible value and use that in 2378d0e79aa3SJason Evans * prof_alloc_prep() to decide whether to capture a backtrace. 2379d0e79aa3SJason Evans * prof_realloc() will use the actual usize to decide whether to sample. 2380d0e79aa3SJason Evans */ 2381df0d881dSJason Evans if (alignment == 0) { 2382df0d881dSJason Evans usize_max = s2u(size+extra); 2383df0d881dSJason Evans assert(usize_max > 0 && usize_max <= HUGE_MAXCLASS); 2384df0d881dSJason Evans } else { 2385df0d881dSJason Evans usize_max = sa2u(size+extra, alignment); 2386df0d881dSJason Evans if (unlikely(usize_max == 0 || usize_max > HUGE_MAXCLASS)) { 2387df0d881dSJason Evans /* 2388df0d881dSJason Evans * usize_max is out of range, and chances are that 2389df0d881dSJason Evans * allocation will fail, but use the maximum possible 2390df0d881dSJason Evans * value and carry on with prof_alloc_prep(), just in 2391df0d881dSJason Evans * case allocation succeeds. 2392df0d881dSJason Evans */ 2393df0d881dSJason Evans usize_max = HUGE_MAXCLASS; 2394df0d881dSJason Evans } 2395df0d881dSJason Evans } 2396536b3538SJason Evans tctx = prof_alloc_prep(tsd, usize_max, prof_active, false); 2397df0d881dSJason Evans 2398d0e79aa3SJason Evans if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { 23991f0a49e8SJason Evans usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize, 24001f0a49e8SJason Evans size, extra, alignment, zero, tctx); 2401f921d10fSJason Evans } else { 24021f0a49e8SJason Evans usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, 24031f0a49e8SJason Evans extra, alignment, zero); 2404f921d10fSJason Evans } 2405536b3538SJason Evans if (usize == old_usize) { 2406d0e79aa3SJason Evans prof_alloc_rollback(tsd, tctx, false); 2407f921d10fSJason Evans return (usize); 2408d0e79aa3SJason Evans } 2409536b3538SJason Evans prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize, 2410536b3538SJason Evans old_tctx); 2411f921d10fSJason Evans 2412f921d10fSJason Evans return (usize); 2413f921d10fSJason Evans } 2414f921d10fSJason Evans 2415d0e79aa3SJason Evans JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2416f921d10fSJason Evans je_xallocx(void *ptr, size_t size, size_t extra, int flags) 2417f921d10fSJason Evans { 2418d0e79aa3SJason Evans tsd_t *tsd; 2419f921d10fSJason Evans size_t usize, old_usize; 2420f921d10fSJason Evans UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 2421d0e79aa3SJason Evans size_t alignment = MALLOCX_ALIGN_GET(flags); 2422f921d10fSJason Evans bool zero = flags & MALLOCX_ZERO; 2423f921d10fSJason Evans 2424f921d10fSJason Evans assert(ptr != NULL); 2425f921d10fSJason Evans assert(size != 0); 2426f921d10fSJason Evans assert(SIZE_T_MAX - size >= extra); 2427d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2428f921d10fSJason Evans malloc_thread_init(); 2429d0e79aa3SJason Evans tsd = tsd_fetch(); 24301f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2431f921d10fSJason Evans 24321f0a49e8SJason Evans old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); 2433536b3538SJason Evans 2434df0d881dSJason Evans /* 2435df0d881dSJason Evans * The API explicitly absolves itself of protecting against (size + 2436df0d881dSJason Evans * extra) numerical overflow, but we may need to clamp extra to avoid 2437df0d881dSJason Evans * exceeding HUGE_MAXCLASS. 2438df0d881dSJason Evans * 2439df0d881dSJason Evans * Ordinarily, size limit checking is handled deeper down, but here we 2440df0d881dSJason Evans * have to check as part of (size + extra) clamping, since we need the 2441df0d881dSJason Evans * clamped value in the above helper functions. 2442df0d881dSJason Evans */ 2443536b3538SJason Evans if (unlikely(size > HUGE_MAXCLASS)) { 2444536b3538SJason Evans usize = old_usize; 2445536b3538SJason Evans goto label_not_resized; 2446536b3538SJason Evans } 2447df0d881dSJason Evans if (unlikely(HUGE_MAXCLASS - size < extra)) 2448536b3538SJason Evans extra = HUGE_MAXCLASS - size; 2449536b3538SJason Evans 2450d0e79aa3SJason Evans if (config_valgrind && unlikely(in_valgrind)) 2451f921d10fSJason Evans old_rzsize = u2rz(old_usize); 2452f921d10fSJason Evans 2453f921d10fSJason Evans if (config_prof && opt_prof) { 2454d0e79aa3SJason Evans usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, 2455d0e79aa3SJason Evans alignment, zero); 2456f921d10fSJason Evans } else { 24571f0a49e8SJason Evans usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, 24581f0a49e8SJason Evans extra, alignment, zero); 2459f921d10fSJason Evans } 2460d0e79aa3SJason Evans if (unlikely(usize == old_usize)) 2461f921d10fSJason Evans goto label_not_resized; 2462f921d10fSJason Evans 2463f921d10fSJason Evans if (config_stats) { 2464d0e79aa3SJason Evans *tsd_thread_allocatedp_get(tsd) += usize; 2465d0e79aa3SJason Evans *tsd_thread_deallocatedp_get(tsd) += old_usize; 2466f921d10fSJason Evans } 24671f0a49e8SJason Evans JEMALLOC_VALGRIND_REALLOC(false, tsd_tsdn(tsd), ptr, usize, false, ptr, 24681f0a49e8SJason Evans old_usize, old_rzsize, false, zero); 2469f921d10fSJason Evans label_not_resized: 2470f921d10fSJason Evans UTRACE(ptr, size, ptr); 24711f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2472f921d10fSJason Evans return (usize); 2473f921d10fSJason Evans } 2474f921d10fSJason Evans 2475d0e79aa3SJason Evans JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2476d0e79aa3SJason Evans JEMALLOC_ATTR(pure) 2477f921d10fSJason Evans je_sallocx(const void *ptr, int flags) 2478f921d10fSJason Evans { 2479f921d10fSJason Evans size_t usize; 24801f0a49e8SJason Evans tsdn_t *tsdn; 2481a4bd5210SJason Evans 2482d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2483f8ca2db1SJason Evans malloc_thread_init(); 2484a4bd5210SJason Evans 24851f0a49e8SJason Evans tsdn = tsdn_fetch(); 24861f0a49e8SJason Evans witness_assert_lockless(tsdn); 2487a4bd5210SJason Evans 24881f0a49e8SJason Evans if (config_ivsalloc) 24891f0a49e8SJason Evans usize = ivsalloc(tsdn, ptr, config_prof); 24901f0a49e8SJason Evans else 24911f0a49e8SJason Evans usize = isalloc(tsdn, ptr, config_prof); 24921f0a49e8SJason Evans 24931f0a49e8SJason Evans witness_assert_lockless(tsdn); 2494f921d10fSJason Evans return (usize); 2495a4bd5210SJason Evans } 2496a4bd5210SJason Evans 2497d0e79aa3SJason Evans JEMALLOC_EXPORT void JEMALLOC_NOTHROW 2498f921d10fSJason Evans je_dallocx(void *ptr, int flags) 2499a4bd5210SJason Evans { 2500d0e79aa3SJason Evans tsd_t *tsd; 2501d0e79aa3SJason Evans tcache_t *tcache; 2502a4bd5210SJason Evans 2503f921d10fSJason Evans assert(ptr != NULL); 2504d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2505f921d10fSJason Evans 2506d0e79aa3SJason Evans tsd = tsd_fetch(); 25071f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2508d0e79aa3SJason Evans if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { 2509d0e79aa3SJason Evans if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) 2510d0e79aa3SJason Evans tcache = NULL; 2511d0e79aa3SJason Evans else 2512d0e79aa3SJason Evans tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2513f921d10fSJason Evans } else 2514d0e79aa3SJason Evans tcache = tcache_get(tsd, false); 2515f921d10fSJason Evans 2516f921d10fSJason Evans UTRACE(ptr, 0, 0); 25171f0a49e8SJason Evans if (likely(!malloc_slow)) 25181f0a49e8SJason Evans ifree(tsd, ptr, tcache, false); 25191f0a49e8SJason Evans else 25201f0a49e8SJason Evans ifree(tsd, ptr, tcache, true); 25211f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2522f921d10fSJason Evans } 2523f921d10fSJason Evans 2524d0e79aa3SJason Evans JEMALLOC_ALWAYS_INLINE_C size_t 25251f0a49e8SJason Evans inallocx(tsdn_t *tsdn, size_t size, int flags) 2526f921d10fSJason Evans { 2527f921d10fSJason Evans size_t usize; 2528f921d10fSJason Evans 25291f0a49e8SJason Evans witness_assert_lockless(tsdn); 25301f0a49e8SJason Evans 2531d0e79aa3SJason Evans if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) 2532d0e79aa3SJason Evans usize = s2u(size); 2533d0e79aa3SJason Evans else 2534d0e79aa3SJason Evans usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); 25351f0a49e8SJason Evans witness_assert_lockless(tsdn); 2536f921d10fSJason Evans return (usize); 2537a4bd5210SJason Evans } 2538a4bd5210SJason Evans 2539d0e79aa3SJason Evans JEMALLOC_EXPORT void JEMALLOC_NOTHROW 2540d0e79aa3SJason Evans je_sdallocx(void *ptr, size_t size, int flags) 2541d0e79aa3SJason Evans { 2542d0e79aa3SJason Evans tsd_t *tsd; 2543d0e79aa3SJason Evans tcache_t *tcache; 2544d0e79aa3SJason Evans size_t usize; 2545d0e79aa3SJason Evans 2546d0e79aa3SJason Evans assert(ptr != NULL); 2547d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2548d0e79aa3SJason Evans tsd = tsd_fetch(); 25491f0a49e8SJason Evans usize = inallocx(tsd_tsdn(tsd), size, flags); 25501f0a49e8SJason Evans assert(usize == isalloc(tsd_tsdn(tsd), ptr, config_prof)); 25511f0a49e8SJason Evans 25521f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2553d0e79aa3SJason Evans if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { 2554d0e79aa3SJason Evans if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) 2555d0e79aa3SJason Evans tcache = NULL; 2556d0e79aa3SJason Evans else 2557d0e79aa3SJason Evans tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2558d0e79aa3SJason Evans } else 2559d0e79aa3SJason Evans tcache = tcache_get(tsd, false); 2560d0e79aa3SJason Evans 2561d0e79aa3SJason Evans UTRACE(ptr, 0, 0); 25621f0a49e8SJason Evans if (likely(!malloc_slow)) 25631f0a49e8SJason Evans isfree(tsd, ptr, usize, tcache, false); 25641f0a49e8SJason Evans else 25651f0a49e8SJason Evans isfree(tsd, ptr, usize, tcache, true); 25661f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 2567d0e79aa3SJason Evans } 2568d0e79aa3SJason Evans 2569d0e79aa3SJason Evans JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2570d0e79aa3SJason Evans JEMALLOC_ATTR(pure) 2571d0e79aa3SJason Evans je_nallocx(size_t size, int flags) 2572d0e79aa3SJason Evans { 2573df0d881dSJason Evans size_t usize; 25741f0a49e8SJason Evans tsdn_t *tsdn; 2575d0e79aa3SJason Evans 2576d0e79aa3SJason Evans assert(size != 0); 2577d0e79aa3SJason Evans 2578d0e79aa3SJason Evans if (unlikely(malloc_init())) 2579d0e79aa3SJason Evans return (0); 2580d0e79aa3SJason Evans 25811f0a49e8SJason Evans tsdn = tsdn_fetch(); 25821f0a49e8SJason Evans witness_assert_lockless(tsdn); 25831f0a49e8SJason Evans 25841f0a49e8SJason Evans usize = inallocx(tsdn, size, flags); 2585df0d881dSJason Evans if (unlikely(usize > HUGE_MAXCLASS)) 2586df0d881dSJason Evans return (0); 2587df0d881dSJason Evans 25881f0a49e8SJason Evans witness_assert_lockless(tsdn); 2589df0d881dSJason Evans return (usize); 2590d0e79aa3SJason Evans } 2591d0e79aa3SJason Evans 2592d0e79aa3SJason Evans JEMALLOC_EXPORT int JEMALLOC_NOTHROW 2593a4bd5210SJason Evans je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, 2594a4bd5210SJason Evans size_t newlen) 2595a4bd5210SJason Evans { 25961f0a49e8SJason Evans int ret; 25971f0a49e8SJason Evans tsd_t *tsd; 2598a4bd5210SJason Evans 2599d0e79aa3SJason Evans if (unlikely(malloc_init())) 2600a4bd5210SJason Evans return (EAGAIN); 2601a4bd5210SJason Evans 26021f0a49e8SJason Evans tsd = tsd_fetch(); 26031f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 26041f0a49e8SJason Evans ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen); 26051f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 26061f0a49e8SJason Evans return (ret); 2607a4bd5210SJason Evans } 2608a4bd5210SJason Evans 2609d0e79aa3SJason Evans JEMALLOC_EXPORT int JEMALLOC_NOTHROW 2610a4bd5210SJason Evans je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) 2611a4bd5210SJason Evans { 26121f0a49e8SJason Evans int ret; 26131f0a49e8SJason Evans tsdn_t *tsdn; 2614a4bd5210SJason Evans 2615d0e79aa3SJason Evans if (unlikely(malloc_init())) 2616a4bd5210SJason Evans return (EAGAIN); 2617a4bd5210SJason Evans 26181f0a49e8SJason Evans tsdn = tsdn_fetch(); 26191f0a49e8SJason Evans witness_assert_lockless(tsdn); 26201f0a49e8SJason Evans ret = ctl_nametomib(tsdn, name, mibp, miblenp); 26211f0a49e8SJason Evans witness_assert_lockless(tsdn); 26221f0a49e8SJason Evans return (ret); 2623a4bd5210SJason Evans } 2624a4bd5210SJason Evans 2625d0e79aa3SJason Evans JEMALLOC_EXPORT int JEMALLOC_NOTHROW 2626a4bd5210SJason Evans je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, 2627a4bd5210SJason Evans void *newp, size_t newlen) 2628a4bd5210SJason Evans { 26291f0a49e8SJason Evans int ret; 26301f0a49e8SJason Evans tsd_t *tsd; 2631a4bd5210SJason Evans 2632d0e79aa3SJason Evans if (unlikely(malloc_init())) 2633a4bd5210SJason Evans return (EAGAIN); 2634a4bd5210SJason Evans 26351f0a49e8SJason Evans tsd = tsd_fetch(); 26361f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 26371f0a49e8SJason Evans ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen); 26381f0a49e8SJason Evans witness_assert_lockless(tsd_tsdn(tsd)); 26391f0a49e8SJason Evans return (ret); 2640a4bd5210SJason Evans } 2641a4bd5210SJason Evans 2642d0e79aa3SJason Evans JEMALLOC_EXPORT void JEMALLOC_NOTHROW 2643f921d10fSJason Evans je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, 2644f921d10fSJason Evans const char *opts) 2645f921d10fSJason Evans { 26461f0a49e8SJason Evans tsdn_t *tsdn; 2647f921d10fSJason Evans 26481f0a49e8SJason Evans tsdn = tsdn_fetch(); 26491f0a49e8SJason Evans witness_assert_lockless(tsdn); 2650f921d10fSJason Evans stats_print(write_cb, cbopaque, opts); 26511f0a49e8SJason Evans witness_assert_lockless(tsdn); 2652f921d10fSJason Evans } 2653f921d10fSJason Evans 2654d0e79aa3SJason Evans JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2655f921d10fSJason Evans je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) 2656f921d10fSJason Evans { 2657f921d10fSJason Evans size_t ret; 26581f0a49e8SJason Evans tsdn_t *tsdn; 2659f921d10fSJason Evans 2660d0e79aa3SJason Evans assert(malloc_initialized() || IS_INITIALIZER); 2661f921d10fSJason Evans malloc_thread_init(); 2662f921d10fSJason Evans 26631f0a49e8SJason Evans tsdn = tsdn_fetch(); 26641f0a49e8SJason Evans witness_assert_lockless(tsdn); 2665f921d10fSJason Evans 26661f0a49e8SJason Evans if (config_ivsalloc) 26671f0a49e8SJason Evans ret = ivsalloc(tsdn, ptr, config_prof); 26681f0a49e8SJason Evans else 26691f0a49e8SJason Evans ret = (ptr == NULL) ? 0 : isalloc(tsdn, ptr, config_prof); 26701f0a49e8SJason Evans 26711f0a49e8SJason Evans witness_assert_lockless(tsdn); 2672f921d10fSJason Evans return (ret); 2673f921d10fSJason Evans } 2674f921d10fSJason Evans 2675a4bd5210SJason Evans /* 2676a4bd5210SJason Evans * End non-standard functions. 2677a4bd5210SJason Evans */ 2678a4bd5210SJason Evans /******************************************************************************/ 2679a4bd5210SJason Evans /* 2680d0e79aa3SJason Evans * Begin compatibility functions. 2681a4bd5210SJason Evans */ 2682d0e79aa3SJason Evans 2683d0e79aa3SJason Evans #define ALLOCM_LG_ALIGN(la) (la) 2684d0e79aa3SJason Evans #define ALLOCM_ALIGN(a) (ffsl(a)-1) 2685d0e79aa3SJason Evans #define ALLOCM_ZERO ((int)0x40) 2686d0e79aa3SJason Evans #define ALLOCM_NO_MOVE ((int)0x80) 2687d0e79aa3SJason Evans 2688d0e79aa3SJason Evans #define ALLOCM_SUCCESS 0 2689d0e79aa3SJason Evans #define ALLOCM_ERR_OOM 1 2690d0e79aa3SJason Evans #define ALLOCM_ERR_NOT_MOVED 2 2691a4bd5210SJason Evans 2692a4bd5210SJason Evans int 2693a4bd5210SJason Evans je_allocm(void **ptr, size_t *rsize, size_t size, int flags) 2694a4bd5210SJason Evans { 2695a4bd5210SJason Evans void *p; 2696a4bd5210SJason Evans 2697a4bd5210SJason Evans assert(ptr != NULL); 2698a4bd5210SJason Evans 2699f921d10fSJason Evans p = je_mallocx(size, flags); 2700a4bd5210SJason Evans if (p == NULL) 2701a4bd5210SJason Evans return (ALLOCM_ERR_OOM); 2702f921d10fSJason Evans if (rsize != NULL) 27031f0a49e8SJason Evans *rsize = isalloc(tsdn_fetch(), p, config_prof); 2704f921d10fSJason Evans *ptr = p; 2705f921d10fSJason Evans return (ALLOCM_SUCCESS); 2706a4bd5210SJason Evans } 2707a4bd5210SJason Evans 2708a4bd5210SJason Evans int 2709a4bd5210SJason Evans je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) 2710a4bd5210SJason Evans { 2711f921d10fSJason Evans int ret; 2712a4bd5210SJason Evans bool no_move = flags & ALLOCM_NO_MOVE; 2713a4bd5210SJason Evans 2714a4bd5210SJason Evans assert(ptr != NULL); 2715a4bd5210SJason Evans assert(*ptr != NULL); 2716a4bd5210SJason Evans assert(size != 0); 2717a4bd5210SJason Evans assert(SIZE_T_MAX - size >= extra); 2718a4bd5210SJason Evans 2719f921d10fSJason Evans if (no_move) { 2720f921d10fSJason Evans size_t usize = je_xallocx(*ptr, size, extra, flags); 2721f921d10fSJason Evans ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED; 2722a4bd5210SJason Evans if (rsize != NULL) 2723a4bd5210SJason Evans *rsize = usize; 2724a4bd5210SJason Evans } else { 2725f921d10fSJason Evans void *p = je_rallocx(*ptr, size+extra, flags); 2726f921d10fSJason Evans if (p != NULL) { 2727f921d10fSJason Evans *ptr = p; 2728f921d10fSJason Evans ret = ALLOCM_SUCCESS; 2729f921d10fSJason Evans } else 2730f921d10fSJason Evans ret = ALLOCM_ERR_OOM; 2731f921d10fSJason Evans if (rsize != NULL) 27321f0a49e8SJason Evans *rsize = isalloc(tsdn_fetch(), *ptr, config_prof); 2733a4bd5210SJason Evans } 2734f921d10fSJason Evans return (ret); 2735a4bd5210SJason Evans } 2736a4bd5210SJason Evans 2737a4bd5210SJason Evans int 2738a4bd5210SJason Evans je_sallocm(const void *ptr, size_t *rsize, int flags) 2739a4bd5210SJason Evans { 2740a4bd5210SJason Evans 2741a4bd5210SJason Evans assert(rsize != NULL); 2742f921d10fSJason Evans *rsize = je_sallocx(ptr, flags); 2743a4bd5210SJason Evans return (ALLOCM_SUCCESS); 2744a4bd5210SJason Evans } 2745a4bd5210SJason Evans 2746a4bd5210SJason Evans int 2747a4bd5210SJason Evans je_dallocm(void *ptr, int flags) 2748a4bd5210SJason Evans { 2749a4bd5210SJason Evans 2750f921d10fSJason Evans je_dallocx(ptr, flags); 2751a4bd5210SJason Evans return (ALLOCM_SUCCESS); 2752a4bd5210SJason Evans } 2753a4bd5210SJason Evans 2754a4bd5210SJason Evans int 2755a4bd5210SJason Evans je_nallocm(size_t *rsize, size_t size, int flags) 2756a4bd5210SJason Evans { 2757a4bd5210SJason Evans size_t usize; 2758a4bd5210SJason Evans 2759f921d10fSJason Evans usize = je_nallocx(size, flags); 2760a4bd5210SJason Evans if (usize == 0) 2761a4bd5210SJason Evans return (ALLOCM_ERR_OOM); 2762a4bd5210SJason Evans if (rsize != NULL) 2763a4bd5210SJason Evans *rsize = usize; 2764a4bd5210SJason Evans return (ALLOCM_SUCCESS); 2765a4bd5210SJason Evans } 2766a4bd5210SJason Evans 2767d0e79aa3SJason Evans #undef ALLOCM_LG_ALIGN 2768d0e79aa3SJason Evans #undef ALLOCM_ALIGN 2769d0e79aa3SJason Evans #undef ALLOCM_ZERO 2770d0e79aa3SJason Evans #undef ALLOCM_NO_MOVE 2771d0e79aa3SJason Evans 2772d0e79aa3SJason Evans #undef ALLOCM_SUCCESS 2773d0e79aa3SJason Evans #undef ALLOCM_ERR_OOM 2774d0e79aa3SJason Evans #undef ALLOCM_ERR_NOT_MOVED 2775d0e79aa3SJason Evans 2776a4bd5210SJason Evans /* 2777d0e79aa3SJason Evans * End compatibility functions. 2778a4bd5210SJason Evans */ 2779a4bd5210SJason Evans /******************************************************************************/ 2780a4bd5210SJason Evans /* 2781a4bd5210SJason Evans * The following functions are used by threading libraries for protection of 2782a4bd5210SJason Evans * malloc during fork(). 2783a4bd5210SJason Evans */ 2784a4bd5210SJason Evans 278582872ac0SJason Evans /* 278682872ac0SJason Evans * If an application creates a thread before doing any allocation in the main 278782872ac0SJason Evans * thread, then calls fork(2) in the main thread followed by memory allocation 278882872ac0SJason Evans * in the child process, a race can occur that results in deadlock within the 278982872ac0SJason Evans * child: the main thread may have forked while the created thread had 279082872ac0SJason Evans * partially initialized the allocator. Ordinarily jemalloc prevents 279182872ac0SJason Evans * fork/malloc races via the following functions it registers during 279282872ac0SJason Evans * initialization using pthread_atfork(), but of course that does no good if 279382872ac0SJason Evans * the allocator isn't fully initialized at fork time. The following library 2794d0e79aa3SJason Evans * constructor is a partial solution to this problem. It may still be possible 2795d0e79aa3SJason Evans * to trigger the deadlock described above, but doing so would involve forking 2796d0e79aa3SJason Evans * via a library constructor that runs before jemalloc's runs. 279782872ac0SJason Evans */ 27981f0a49e8SJason Evans #ifndef JEMALLOC_JET 279982872ac0SJason Evans JEMALLOC_ATTR(constructor) 280082872ac0SJason Evans static void 280182872ac0SJason Evans jemalloc_constructor(void) 280282872ac0SJason Evans { 280382872ac0SJason Evans 280482872ac0SJason Evans malloc_init(); 280582872ac0SJason Evans } 28061f0a49e8SJason Evans #endif 280782872ac0SJason Evans 2808a4bd5210SJason Evans #ifndef JEMALLOC_MUTEX_INIT_CB 2809a4bd5210SJason Evans void 2810a4bd5210SJason Evans jemalloc_prefork(void) 2811a4bd5210SJason Evans #else 2812e722f8f8SJason Evans JEMALLOC_EXPORT void 2813a4bd5210SJason Evans _malloc_prefork(void) 2814a4bd5210SJason Evans #endif 2815a4bd5210SJason Evans { 28161f0a49e8SJason Evans tsd_t *tsd; 28171f0a49e8SJason Evans unsigned i, j, narenas; 28181f0a49e8SJason Evans arena_t *arena; 2819a4bd5210SJason Evans 282035dad073SJason Evans #ifdef JEMALLOC_MUTEX_INIT_CB 2821d0e79aa3SJason Evans if (!malloc_initialized()) 282235dad073SJason Evans return; 282335dad073SJason Evans #endif 2824d0e79aa3SJason Evans assert(malloc_initialized()); 282535dad073SJason Evans 28261f0a49e8SJason Evans tsd = tsd_fetch(); 2827df0d881dSJason Evans 28281f0a49e8SJason Evans narenas = narenas_total_get(); 28291f0a49e8SJason Evans 28301f0a49e8SJason Evans witness_prefork(tsd); 28311f0a49e8SJason Evans /* Acquire all mutexes in a safe order. */ 28321f0a49e8SJason Evans ctl_prefork(tsd_tsdn(tsd)); 28331f0a49e8SJason Evans malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock); 28341f0a49e8SJason Evans prof_prefork0(tsd_tsdn(tsd)); 28351f0a49e8SJason Evans for (i = 0; i < 3; i++) { 28361f0a49e8SJason Evans for (j = 0; j < narenas; j++) { 28371f0a49e8SJason Evans if ((arena = arena_get(tsd_tsdn(tsd), j, false)) != 28381f0a49e8SJason Evans NULL) { 28391f0a49e8SJason Evans switch (i) { 28401f0a49e8SJason Evans case 0: 28411f0a49e8SJason Evans arena_prefork0(tsd_tsdn(tsd), arena); 28421f0a49e8SJason Evans break; 28431f0a49e8SJason Evans case 1: 28441f0a49e8SJason Evans arena_prefork1(tsd_tsdn(tsd), arena); 28451f0a49e8SJason Evans break; 28461f0a49e8SJason Evans case 2: 28471f0a49e8SJason Evans arena_prefork2(tsd_tsdn(tsd), arena); 28481f0a49e8SJason Evans break; 28491f0a49e8SJason Evans default: not_reached(); 2850a4bd5210SJason Evans } 28511f0a49e8SJason Evans } 28521f0a49e8SJason Evans } 28531f0a49e8SJason Evans } 28541f0a49e8SJason Evans base_prefork(tsd_tsdn(tsd)); 28551f0a49e8SJason Evans chunk_prefork(tsd_tsdn(tsd)); 28561f0a49e8SJason Evans for (i = 0; i < narenas; i++) { 28571f0a49e8SJason Evans if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) 28581f0a49e8SJason Evans arena_prefork3(tsd_tsdn(tsd), arena); 28591f0a49e8SJason Evans } 28601f0a49e8SJason Evans prof_prefork1(tsd_tsdn(tsd)); 2861a4bd5210SJason Evans } 2862a4bd5210SJason Evans 2863a4bd5210SJason Evans #ifndef JEMALLOC_MUTEX_INIT_CB 2864a4bd5210SJason Evans void 2865a4bd5210SJason Evans jemalloc_postfork_parent(void) 2866a4bd5210SJason Evans #else 2867e722f8f8SJason Evans JEMALLOC_EXPORT void 2868a4bd5210SJason Evans _malloc_postfork(void) 2869a4bd5210SJason Evans #endif 2870a4bd5210SJason Evans { 28711f0a49e8SJason Evans tsd_t *tsd; 2872df0d881dSJason Evans unsigned i, narenas; 2873a4bd5210SJason Evans 287435dad073SJason Evans #ifdef JEMALLOC_MUTEX_INIT_CB 2875d0e79aa3SJason Evans if (!malloc_initialized()) 287635dad073SJason Evans return; 287735dad073SJason Evans #endif 2878d0e79aa3SJason Evans assert(malloc_initialized()); 287935dad073SJason Evans 28801f0a49e8SJason Evans tsd = tsd_fetch(); 28811f0a49e8SJason Evans 28821f0a49e8SJason Evans witness_postfork_parent(tsd); 2883a4bd5210SJason Evans /* Release all mutexes, now that fork() has completed. */ 28841f0a49e8SJason Evans chunk_postfork_parent(tsd_tsdn(tsd)); 28851f0a49e8SJason Evans base_postfork_parent(tsd_tsdn(tsd)); 2886df0d881dSJason Evans for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 2887df0d881dSJason Evans arena_t *arena; 2888df0d881dSJason Evans 28891f0a49e8SJason Evans if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) 28901f0a49e8SJason Evans arena_postfork_parent(tsd_tsdn(tsd), arena); 2891a4bd5210SJason Evans } 28921f0a49e8SJason Evans prof_postfork_parent(tsd_tsdn(tsd)); 28931f0a49e8SJason Evans malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock); 28941f0a49e8SJason Evans ctl_postfork_parent(tsd_tsdn(tsd)); 2895a4bd5210SJason Evans } 2896a4bd5210SJason Evans 2897a4bd5210SJason Evans void 2898a4bd5210SJason Evans jemalloc_postfork_child(void) 2899a4bd5210SJason Evans { 29001f0a49e8SJason Evans tsd_t *tsd; 2901df0d881dSJason Evans unsigned i, narenas; 2902a4bd5210SJason Evans 2903d0e79aa3SJason Evans assert(malloc_initialized()); 290435dad073SJason Evans 29051f0a49e8SJason Evans tsd = tsd_fetch(); 29061f0a49e8SJason Evans 29071f0a49e8SJason Evans witness_postfork_child(tsd); 2908a4bd5210SJason Evans /* Release all mutexes, now that fork() has completed. */ 29091f0a49e8SJason Evans chunk_postfork_child(tsd_tsdn(tsd)); 29101f0a49e8SJason Evans base_postfork_child(tsd_tsdn(tsd)); 2911df0d881dSJason Evans for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 2912df0d881dSJason Evans arena_t *arena; 2913df0d881dSJason Evans 29141f0a49e8SJason Evans if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) 29151f0a49e8SJason Evans arena_postfork_child(tsd_tsdn(tsd), arena); 2916a4bd5210SJason Evans } 29171f0a49e8SJason Evans prof_postfork_child(tsd_tsdn(tsd)); 29181f0a49e8SJason Evans malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock); 29191f0a49e8SJason Evans ctl_postfork_child(tsd_tsdn(tsd)); 2920a4bd5210SJason Evans } 2921a4bd5210SJason Evans 29228495e8b1SKonstantin Belousov void 29238495e8b1SKonstantin Belousov _malloc_first_thread(void) 29248495e8b1SKonstantin Belousov { 29258495e8b1SKonstantin Belousov 29268495e8b1SKonstantin Belousov (void)malloc_mutex_first_thread(); 29278495e8b1SKonstantin Belousov } 29288495e8b1SKonstantin Belousov 2929a4bd5210SJason Evans /******************************************************************************/ 2930