1a4bd5210SJason Evans #define JEMALLOC_C_ 2a4bd5210SJason Evans #include "jemalloc/internal/jemalloc_internal.h" 3a4bd5210SJason Evans 4a4bd5210SJason Evans /******************************************************************************/ 5a4bd5210SJason Evans /* Data. */ 6a4bd5210SJason Evans 7a4bd5210SJason Evans malloc_tsd_data(, arenas, arena_t *, NULL) 8a4bd5210SJason Evans malloc_tsd_data(, thread_allocated, thread_allocated_t, 9a4bd5210SJason Evans THREAD_ALLOCATED_INITIALIZER) 10a4bd5210SJason Evans 114fdb8d2aSDimitry Andric /* Work around <http://llvm.org/bugs/show_bug.cgi?id=12623>: */ 124fdb8d2aSDimitry Andric const char *__malloc_options_1_0 = NULL; 13a4bd5210SJason Evans __sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0); 14a4bd5210SJason Evans 15a4bd5210SJason Evans /* Runtime configuration options. */ 16e722f8f8SJason Evans const char *je_malloc_conf; 1788ad2f8dSJason Evans bool opt_abort = 18a4bd5210SJason Evans #ifdef JEMALLOC_DEBUG 1988ad2f8dSJason Evans true 20a4bd5210SJason Evans #else 2188ad2f8dSJason Evans false 22a4bd5210SJason Evans #endif 2388ad2f8dSJason Evans ; 2488ad2f8dSJason Evans bool opt_junk = 2588ad2f8dSJason Evans #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 2688ad2f8dSJason Evans true 27a4bd5210SJason Evans #else 2888ad2f8dSJason Evans false 29a4bd5210SJason Evans #endif 3088ad2f8dSJason Evans ; 31a4bd5210SJason Evans size_t opt_quarantine = ZU(0); 32a4bd5210SJason Evans bool opt_redzone = false; 33a4bd5210SJason Evans bool opt_utrace = false; 34a4bd5210SJason Evans bool opt_valgrind = false; 35a4bd5210SJason Evans bool opt_xmalloc = false; 36a4bd5210SJason Evans bool opt_zero = false; 37a4bd5210SJason Evans size_t opt_narenas = 0; 38a4bd5210SJason Evans 39a4bd5210SJason Evans unsigned ncpus; 40a4bd5210SJason Evans 41a4bd5210SJason Evans malloc_mutex_t arenas_lock; 42a4bd5210SJason Evans arena_t **arenas; 4382872ac0SJason Evans unsigned narenas_total; 4482872ac0SJason Evans unsigned narenas_auto; 45a4bd5210SJason Evans 46a4bd5210SJason Evans /* Set to true once the allocator has been initialized. */ 47a4bd5210SJason Evans static bool malloc_initialized = false; 48a4bd5210SJason Evans 49a4bd5210SJason Evans #ifdef JEMALLOC_THREADED_INIT 50a4bd5210SJason Evans /* Used to let the initializing thread recursively allocate. */ 51a4bd5210SJason Evans # define NO_INITIALIZER ((unsigned long)0) 52a4bd5210SJason Evans # define INITIALIZER pthread_self() 53a4bd5210SJason Evans # define IS_INITIALIZER (malloc_initializer == pthread_self()) 54a4bd5210SJason Evans static pthread_t malloc_initializer = NO_INITIALIZER; 55a4bd5210SJason Evans #else 56a4bd5210SJason Evans # define NO_INITIALIZER false 57a4bd5210SJason Evans # define INITIALIZER true 58a4bd5210SJason Evans # define IS_INITIALIZER malloc_initializer 59a4bd5210SJason Evans static bool malloc_initializer = NO_INITIALIZER; 60a4bd5210SJason Evans #endif 61a4bd5210SJason Evans 62a4bd5210SJason Evans /* Used to avoid initialization races. */ 63e722f8f8SJason Evans #ifdef _WIN32 64e722f8f8SJason Evans static malloc_mutex_t init_lock; 65e722f8f8SJason Evans 66e722f8f8SJason Evans JEMALLOC_ATTR(constructor) 67e722f8f8SJason Evans static void WINAPI 68e722f8f8SJason Evans _init_init_lock(void) 69e722f8f8SJason Evans { 70e722f8f8SJason Evans 71e722f8f8SJason Evans malloc_mutex_init(&init_lock); 72e722f8f8SJason Evans } 73e722f8f8SJason Evans 74e722f8f8SJason Evans #ifdef _MSC_VER 75e722f8f8SJason Evans # pragma section(".CRT$XCU", read) 76e722f8f8SJason Evans JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) 77e722f8f8SJason Evans static const void (WINAPI *init_init_lock)(void) = _init_init_lock; 78e722f8f8SJason Evans #endif 79e722f8f8SJason Evans 80e722f8f8SJason Evans #else 81a4bd5210SJason Evans static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; 82e722f8f8SJason Evans #endif 83a4bd5210SJason Evans 84a4bd5210SJason Evans typedef struct { 85a4bd5210SJason Evans void *p; /* Input pointer (as in realloc(p, s)). */ 86a4bd5210SJason Evans size_t s; /* Request size. */ 87a4bd5210SJason Evans void *r; /* Result pointer. */ 88a4bd5210SJason Evans } malloc_utrace_t; 89a4bd5210SJason Evans 90a4bd5210SJason Evans #ifdef JEMALLOC_UTRACE 91a4bd5210SJason Evans # define UTRACE(a, b, c) do { \ 92a4bd5210SJason Evans if (opt_utrace) { \ 9388ad2f8dSJason Evans int utrace_serrno = errno; \ 94a4bd5210SJason Evans malloc_utrace_t ut; \ 95a4bd5210SJason Evans ut.p = (a); \ 96a4bd5210SJason Evans ut.s = (b); \ 97a4bd5210SJason Evans ut.r = (c); \ 98a4bd5210SJason Evans utrace(&ut, sizeof(ut)); \ 9988ad2f8dSJason Evans errno = utrace_serrno; \ 100a4bd5210SJason Evans } \ 101a4bd5210SJason Evans } while (0) 102a4bd5210SJason Evans #else 103a4bd5210SJason Evans # define UTRACE(a, b, c) 104a4bd5210SJason Evans #endif 105a4bd5210SJason Evans 106a4bd5210SJason Evans /******************************************************************************/ 107a4bd5210SJason Evans /* Function prototypes for non-inline static functions. */ 108a4bd5210SJason Evans 109a4bd5210SJason Evans static void stats_print_atexit(void); 110a4bd5210SJason Evans static unsigned malloc_ncpus(void); 111a4bd5210SJason Evans static bool malloc_conf_next(char const **opts_p, char const **k_p, 112a4bd5210SJason Evans size_t *klen_p, char const **v_p, size_t *vlen_p); 113a4bd5210SJason Evans static void malloc_conf_error(const char *msg, const char *k, size_t klen, 114a4bd5210SJason Evans const char *v, size_t vlen); 115a4bd5210SJason Evans static void malloc_conf_init(void); 116a4bd5210SJason Evans static bool malloc_init_hard(void); 117a4bd5210SJason Evans static int imemalign(void **memptr, size_t alignment, size_t size, 118a4bd5210SJason Evans size_t min_alignment); 119a4bd5210SJason Evans 120a4bd5210SJason Evans /******************************************************************************/ 121a4bd5210SJason Evans /* 122a4bd5210SJason Evans * Begin miscellaneous support functions. 123a4bd5210SJason Evans */ 124a4bd5210SJason Evans 125a4bd5210SJason Evans /* Create a new arena and insert it into the arenas array at index ind. */ 126a4bd5210SJason Evans arena_t * 127a4bd5210SJason Evans arenas_extend(unsigned ind) 128a4bd5210SJason Evans { 129a4bd5210SJason Evans arena_t *ret; 130a4bd5210SJason Evans 131a4bd5210SJason Evans ret = (arena_t *)base_alloc(sizeof(arena_t)); 132a4bd5210SJason Evans if (ret != NULL && arena_new(ret, ind) == false) { 133a4bd5210SJason Evans arenas[ind] = ret; 134a4bd5210SJason Evans return (ret); 135a4bd5210SJason Evans } 136a4bd5210SJason Evans /* Only reached if there is an OOM error. */ 137a4bd5210SJason Evans 138a4bd5210SJason Evans /* 139a4bd5210SJason Evans * OOM here is quite inconvenient to propagate, since dealing with it 140a4bd5210SJason Evans * would require a check for failure in the fast path. Instead, punt 141a4bd5210SJason Evans * by using arenas[0]. In practice, this is an extremely unlikely 142a4bd5210SJason Evans * failure. 143a4bd5210SJason Evans */ 144a4bd5210SJason Evans malloc_write("<jemalloc>: Error initializing arena\n"); 145a4bd5210SJason Evans if (opt_abort) 146a4bd5210SJason Evans abort(); 147a4bd5210SJason Evans 148a4bd5210SJason Evans return (arenas[0]); 149a4bd5210SJason Evans } 150a4bd5210SJason Evans 151a4bd5210SJason Evans /* Slow path, called only by choose_arena(). */ 152a4bd5210SJason Evans arena_t * 153a4bd5210SJason Evans choose_arena_hard(void) 154a4bd5210SJason Evans { 155a4bd5210SJason Evans arena_t *ret; 156a4bd5210SJason Evans 15782872ac0SJason Evans if (narenas_auto > 1) { 158a4bd5210SJason Evans unsigned i, choose, first_null; 159a4bd5210SJason Evans 160a4bd5210SJason Evans choose = 0; 16182872ac0SJason Evans first_null = narenas_auto; 162a4bd5210SJason Evans malloc_mutex_lock(&arenas_lock); 163a4bd5210SJason Evans assert(arenas[0] != NULL); 16482872ac0SJason Evans for (i = 1; i < narenas_auto; i++) { 165a4bd5210SJason Evans if (arenas[i] != NULL) { 166a4bd5210SJason Evans /* 167a4bd5210SJason Evans * Choose the first arena that has the lowest 168a4bd5210SJason Evans * number of threads assigned to it. 169a4bd5210SJason Evans */ 170a4bd5210SJason Evans if (arenas[i]->nthreads < 171a4bd5210SJason Evans arenas[choose]->nthreads) 172a4bd5210SJason Evans choose = i; 17382872ac0SJason Evans } else if (first_null == narenas_auto) { 174a4bd5210SJason Evans /* 175a4bd5210SJason Evans * Record the index of the first uninitialized 176a4bd5210SJason Evans * arena, in case all extant arenas are in use. 177a4bd5210SJason Evans * 178a4bd5210SJason Evans * NB: It is possible for there to be 179a4bd5210SJason Evans * discontinuities in terms of initialized 180a4bd5210SJason Evans * versus uninitialized arenas, due to the 181a4bd5210SJason Evans * "thread.arena" mallctl. 182a4bd5210SJason Evans */ 183a4bd5210SJason Evans first_null = i; 184a4bd5210SJason Evans } 185a4bd5210SJason Evans } 186a4bd5210SJason Evans 18782872ac0SJason Evans if (arenas[choose]->nthreads == 0 18882872ac0SJason Evans || first_null == narenas_auto) { 189a4bd5210SJason Evans /* 190a4bd5210SJason Evans * Use an unloaded arena, or the least loaded arena if 191a4bd5210SJason Evans * all arenas are already initialized. 192a4bd5210SJason Evans */ 193a4bd5210SJason Evans ret = arenas[choose]; 194a4bd5210SJason Evans } else { 195a4bd5210SJason Evans /* Initialize a new arena. */ 196a4bd5210SJason Evans ret = arenas_extend(first_null); 197a4bd5210SJason Evans } 198a4bd5210SJason Evans ret->nthreads++; 199a4bd5210SJason Evans malloc_mutex_unlock(&arenas_lock); 200a4bd5210SJason Evans } else { 201a4bd5210SJason Evans ret = arenas[0]; 202a4bd5210SJason Evans malloc_mutex_lock(&arenas_lock); 203a4bd5210SJason Evans ret->nthreads++; 204a4bd5210SJason Evans malloc_mutex_unlock(&arenas_lock); 205a4bd5210SJason Evans } 206a4bd5210SJason Evans 207a4bd5210SJason Evans arenas_tsd_set(&ret); 208a4bd5210SJason Evans 209a4bd5210SJason Evans return (ret); 210a4bd5210SJason Evans } 211a4bd5210SJason Evans 212a4bd5210SJason Evans static void 213a4bd5210SJason Evans stats_print_atexit(void) 214a4bd5210SJason Evans { 215a4bd5210SJason Evans 216a4bd5210SJason Evans if (config_tcache && config_stats) { 21782872ac0SJason Evans unsigned narenas, i; 218a4bd5210SJason Evans 219a4bd5210SJason Evans /* 220a4bd5210SJason Evans * Merge stats from extant threads. This is racy, since 221a4bd5210SJason Evans * individual threads do not lock when recording tcache stats 222a4bd5210SJason Evans * events. As a consequence, the final stats may be slightly 223a4bd5210SJason Evans * out of date by the time they are reported, if other threads 224a4bd5210SJason Evans * continue to allocate. 225a4bd5210SJason Evans */ 22682872ac0SJason Evans for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 227a4bd5210SJason Evans arena_t *arena = arenas[i]; 228a4bd5210SJason Evans if (arena != NULL) { 229a4bd5210SJason Evans tcache_t *tcache; 230a4bd5210SJason Evans 231a4bd5210SJason Evans /* 232a4bd5210SJason Evans * tcache_stats_merge() locks bins, so if any 233a4bd5210SJason Evans * code is introduced that acquires both arena 234a4bd5210SJason Evans * and bin locks in the opposite order, 235a4bd5210SJason Evans * deadlocks may result. 236a4bd5210SJason Evans */ 237a4bd5210SJason Evans malloc_mutex_lock(&arena->lock); 238a4bd5210SJason Evans ql_foreach(tcache, &arena->tcache_ql, link) { 239a4bd5210SJason Evans tcache_stats_merge(tcache, arena); 240a4bd5210SJason Evans } 241a4bd5210SJason Evans malloc_mutex_unlock(&arena->lock); 242a4bd5210SJason Evans } 243a4bd5210SJason Evans } 244a4bd5210SJason Evans } 245a4bd5210SJason Evans je_malloc_stats_print(NULL, NULL, NULL); 246a4bd5210SJason Evans } 247a4bd5210SJason Evans 248a4bd5210SJason Evans /* 249a4bd5210SJason Evans * End miscellaneous support functions. 250a4bd5210SJason Evans */ 251a4bd5210SJason Evans /******************************************************************************/ 252a4bd5210SJason Evans /* 253a4bd5210SJason Evans * Begin initialization functions. 254a4bd5210SJason Evans */ 255a4bd5210SJason Evans 256a4bd5210SJason Evans static unsigned 257a4bd5210SJason Evans malloc_ncpus(void) 258a4bd5210SJason Evans { 259a4bd5210SJason Evans unsigned ret; 260a4bd5210SJason Evans long result; 261a4bd5210SJason Evans 262e722f8f8SJason Evans #ifdef _WIN32 263e722f8f8SJason Evans SYSTEM_INFO si; 264e722f8f8SJason Evans GetSystemInfo(&si); 265e722f8f8SJason Evans result = si.dwNumberOfProcessors; 266e722f8f8SJason Evans #else 267a4bd5210SJason Evans result = sysconf(_SC_NPROCESSORS_ONLN); 26882872ac0SJason Evans #endif 269a4bd5210SJason Evans if (result == -1) { 270a4bd5210SJason Evans /* Error. */ 271a4bd5210SJason Evans ret = 1; 27282872ac0SJason Evans } else { 273a4bd5210SJason Evans ret = (unsigned)result; 27482872ac0SJason Evans } 275a4bd5210SJason Evans 276a4bd5210SJason Evans return (ret); 277a4bd5210SJason Evans } 278a4bd5210SJason Evans 279a4bd5210SJason Evans void 280a4bd5210SJason Evans arenas_cleanup(void *arg) 281a4bd5210SJason Evans { 282a4bd5210SJason Evans arena_t *arena = *(arena_t **)arg; 283a4bd5210SJason Evans 284a4bd5210SJason Evans malloc_mutex_lock(&arenas_lock); 285a4bd5210SJason Evans arena->nthreads--; 286a4bd5210SJason Evans malloc_mutex_unlock(&arenas_lock); 287a4bd5210SJason Evans } 288a4bd5210SJason Evans 289*2b06b201SJason Evans JEMALLOC_ALWAYS_INLINE_C void 290f8ca2db1SJason Evans malloc_thread_init(void) 291f8ca2db1SJason Evans { 292f8ca2db1SJason Evans 293f8ca2db1SJason Evans /* 294f8ca2db1SJason Evans * TSD initialization can't be safely done as a side effect of 295f8ca2db1SJason Evans * deallocation, because it is possible for a thread to do nothing but 296f8ca2db1SJason Evans * deallocate its TLS data via free(), in which case writing to TLS 297f8ca2db1SJason Evans * would cause write-after-free memory corruption. The quarantine 298f8ca2db1SJason Evans * facility *only* gets used as a side effect of deallocation, so make 299f8ca2db1SJason Evans * a best effort attempt at initializing its TSD by hooking all 300f8ca2db1SJason Evans * allocation events. 301f8ca2db1SJason Evans */ 302f8ca2db1SJason Evans if (config_fill && opt_quarantine) 303f8ca2db1SJason Evans quarantine_alloc_hook(); 304f8ca2db1SJason Evans } 305f8ca2db1SJason Evans 306*2b06b201SJason Evans JEMALLOC_ALWAYS_INLINE_C bool 307a4bd5210SJason Evans malloc_init(void) 308a4bd5210SJason Evans { 309a4bd5210SJason Evans 310f8ca2db1SJason Evans if (malloc_initialized == false && malloc_init_hard()) 311f8ca2db1SJason Evans return (true); 312f8ca2db1SJason Evans malloc_thread_init(); 313a4bd5210SJason Evans 314a4bd5210SJason Evans return (false); 315a4bd5210SJason Evans } 316a4bd5210SJason Evans 317a4bd5210SJason Evans static bool 318a4bd5210SJason Evans malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, 319a4bd5210SJason Evans char const **v_p, size_t *vlen_p) 320a4bd5210SJason Evans { 321a4bd5210SJason Evans bool accept; 322a4bd5210SJason Evans const char *opts = *opts_p; 323a4bd5210SJason Evans 324a4bd5210SJason Evans *k_p = opts; 325a4bd5210SJason Evans 326a4bd5210SJason Evans for (accept = false; accept == false;) { 327a4bd5210SJason Evans switch (*opts) { 328a4bd5210SJason Evans case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': 329a4bd5210SJason Evans case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': 330a4bd5210SJason Evans case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': 331a4bd5210SJason Evans case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': 332a4bd5210SJason Evans case 'Y': case 'Z': 333a4bd5210SJason Evans case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': 334a4bd5210SJason Evans case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': 335a4bd5210SJason Evans case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': 336a4bd5210SJason Evans case 's': case 't': case 'u': case 'v': case 'w': case 'x': 337a4bd5210SJason Evans case 'y': case 'z': 338a4bd5210SJason Evans case '0': case '1': case '2': case '3': case '4': case '5': 339a4bd5210SJason Evans case '6': case '7': case '8': case '9': 340a4bd5210SJason Evans case '_': 341a4bd5210SJason Evans opts++; 342a4bd5210SJason Evans break; 343a4bd5210SJason Evans case ':': 344a4bd5210SJason Evans opts++; 345a4bd5210SJason Evans *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; 346a4bd5210SJason Evans *v_p = opts; 347a4bd5210SJason Evans accept = true; 348a4bd5210SJason Evans break; 349a4bd5210SJason Evans case '\0': 350a4bd5210SJason Evans if (opts != *opts_p) { 351a4bd5210SJason Evans malloc_write("<jemalloc>: Conf string ends " 352a4bd5210SJason Evans "with key\n"); 353a4bd5210SJason Evans } 354a4bd5210SJason Evans return (true); 355a4bd5210SJason Evans default: 356a4bd5210SJason Evans malloc_write("<jemalloc>: Malformed conf string\n"); 357a4bd5210SJason Evans return (true); 358a4bd5210SJason Evans } 359a4bd5210SJason Evans } 360a4bd5210SJason Evans 361a4bd5210SJason Evans for (accept = false; accept == false;) { 362a4bd5210SJason Evans switch (*opts) { 363a4bd5210SJason Evans case ',': 364a4bd5210SJason Evans opts++; 365a4bd5210SJason Evans /* 366a4bd5210SJason Evans * Look ahead one character here, because the next time 367a4bd5210SJason Evans * this function is called, it will assume that end of 368a4bd5210SJason Evans * input has been cleanly reached if no input remains, 369a4bd5210SJason Evans * but we have optimistically already consumed the 370a4bd5210SJason Evans * comma if one exists. 371a4bd5210SJason Evans */ 372a4bd5210SJason Evans if (*opts == '\0') { 373a4bd5210SJason Evans malloc_write("<jemalloc>: Conf string ends " 374a4bd5210SJason Evans "with comma\n"); 375a4bd5210SJason Evans } 376a4bd5210SJason Evans *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; 377a4bd5210SJason Evans accept = true; 378a4bd5210SJason Evans break; 379a4bd5210SJason Evans case '\0': 380a4bd5210SJason Evans *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; 381a4bd5210SJason Evans accept = true; 382a4bd5210SJason Evans break; 383a4bd5210SJason Evans default: 384a4bd5210SJason Evans opts++; 385a4bd5210SJason Evans break; 386a4bd5210SJason Evans } 387a4bd5210SJason Evans } 388a4bd5210SJason Evans 389a4bd5210SJason Evans *opts_p = opts; 390a4bd5210SJason Evans return (false); 391a4bd5210SJason Evans } 392a4bd5210SJason Evans 393a4bd5210SJason Evans static void 394a4bd5210SJason Evans malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, 395a4bd5210SJason Evans size_t vlen) 396a4bd5210SJason Evans { 397a4bd5210SJason Evans 398a4bd5210SJason Evans malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k, 399a4bd5210SJason Evans (int)vlen, v); 400a4bd5210SJason Evans } 401a4bd5210SJason Evans 402a4bd5210SJason Evans static void 403a4bd5210SJason Evans malloc_conf_init(void) 404a4bd5210SJason Evans { 405a4bd5210SJason Evans unsigned i; 406a4bd5210SJason Evans char buf[PATH_MAX + 1]; 407a4bd5210SJason Evans const char *opts, *k, *v; 408a4bd5210SJason Evans size_t klen, vlen; 409a4bd5210SJason Evans 41082872ac0SJason Evans /* 41182872ac0SJason Evans * Automatically configure valgrind before processing options. The 41282872ac0SJason Evans * valgrind option remains in jemalloc 3.x for compatibility reasons. 41382872ac0SJason Evans */ 41482872ac0SJason Evans if (config_valgrind) { 41582872ac0SJason Evans opt_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false; 41682872ac0SJason Evans if (config_fill && opt_valgrind) { 41782872ac0SJason Evans opt_junk = false; 41882872ac0SJason Evans assert(opt_zero == false); 41982872ac0SJason Evans opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT; 42082872ac0SJason Evans opt_redzone = true; 42182872ac0SJason Evans } 42282872ac0SJason Evans if (config_tcache && opt_valgrind) 42382872ac0SJason Evans opt_tcache = false; 42482872ac0SJason Evans } 42582872ac0SJason Evans 426a4bd5210SJason Evans for (i = 0; i < 3; i++) { 427a4bd5210SJason Evans /* Get runtime configuration. */ 428a4bd5210SJason Evans switch (i) { 429a4bd5210SJason Evans case 0: 430a4bd5210SJason Evans if (je_malloc_conf != NULL) { 431a4bd5210SJason Evans /* 432a4bd5210SJason Evans * Use options that were compiled into the 433a4bd5210SJason Evans * program. 434a4bd5210SJason Evans */ 435a4bd5210SJason Evans opts = je_malloc_conf; 436a4bd5210SJason Evans } else { 437a4bd5210SJason Evans /* No configuration specified. */ 438a4bd5210SJason Evans buf[0] = '\0'; 439a4bd5210SJason Evans opts = buf; 440a4bd5210SJason Evans } 441a4bd5210SJason Evans break; 442a4bd5210SJason Evans case 1: { 443*2b06b201SJason Evans int linklen = 0; 444e722f8f8SJason Evans #ifndef _WIN32 445*2b06b201SJason Evans int saved_errno = errno; 446a4bd5210SJason Evans const char *linkname = 447a4bd5210SJason Evans # ifdef JEMALLOC_PREFIX 448a4bd5210SJason Evans "/etc/"JEMALLOC_PREFIX"malloc.conf" 449a4bd5210SJason Evans # else 450a4bd5210SJason Evans "/etc/malloc.conf" 451a4bd5210SJason Evans # endif 452a4bd5210SJason Evans ; 453a4bd5210SJason Evans 454a4bd5210SJason Evans /* 455*2b06b201SJason Evans * Try to use the contents of the "/etc/malloc.conf" 456a4bd5210SJason Evans * symbolic link's name. 457a4bd5210SJason Evans */ 458*2b06b201SJason Evans linklen = readlink(linkname, buf, sizeof(buf) - 1); 459*2b06b201SJason Evans if (linklen == -1) { 460*2b06b201SJason Evans /* No configuration specified. */ 461*2b06b201SJason Evans linklen = 0; 462*2b06b201SJason Evans /* restore errno */ 463*2b06b201SJason Evans set_errno(saved_errno); 464*2b06b201SJason Evans } 465*2b06b201SJason Evans #endif 466a4bd5210SJason Evans buf[linklen] = '\0'; 467a4bd5210SJason Evans opts = buf; 468a4bd5210SJason Evans break; 469a4bd5210SJason Evans } case 2: { 470a4bd5210SJason Evans const char *envname = 471a4bd5210SJason Evans #ifdef JEMALLOC_PREFIX 472a4bd5210SJason Evans JEMALLOC_CPREFIX"MALLOC_CONF" 473a4bd5210SJason Evans #else 474a4bd5210SJason Evans "MALLOC_CONF" 475a4bd5210SJason Evans #endif 476a4bd5210SJason Evans ; 477a4bd5210SJason Evans 478a4bd5210SJason Evans if (issetugid() == 0 && (opts = getenv(envname)) != 479a4bd5210SJason Evans NULL) { 480a4bd5210SJason Evans /* 481a4bd5210SJason Evans * Do nothing; opts is already initialized to 482a4bd5210SJason Evans * the value of the MALLOC_CONF environment 483a4bd5210SJason Evans * variable. 484a4bd5210SJason Evans */ 485a4bd5210SJason Evans } else { 486a4bd5210SJason Evans /* No configuration specified. */ 487a4bd5210SJason Evans buf[0] = '\0'; 488a4bd5210SJason Evans opts = buf; 489a4bd5210SJason Evans } 490a4bd5210SJason Evans break; 491a4bd5210SJason Evans } default: 492a4bd5210SJason Evans /* NOTREACHED */ 493a4bd5210SJason Evans assert(false); 494a4bd5210SJason Evans buf[0] = '\0'; 495a4bd5210SJason Evans opts = buf; 496a4bd5210SJason Evans } 497a4bd5210SJason Evans 498a4bd5210SJason Evans while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v, 499a4bd5210SJason Evans &vlen) == false) { 50088ad2f8dSJason Evans #define CONF_HANDLE_BOOL(o, n) \ 5018ed34ab0SJason Evans if (sizeof(n)-1 == klen && strncmp(n, k, \ 502a4bd5210SJason Evans klen) == 0) { \ 503a4bd5210SJason Evans if (strncmp("true", v, vlen) == 0 && \ 504a4bd5210SJason Evans vlen == sizeof("true")-1) \ 505a4bd5210SJason Evans o = true; \ 506a4bd5210SJason Evans else if (strncmp("false", v, vlen) == \ 507a4bd5210SJason Evans 0 && vlen == sizeof("false")-1) \ 508a4bd5210SJason Evans o = false; \ 509a4bd5210SJason Evans else { \ 510a4bd5210SJason Evans malloc_conf_error( \ 511a4bd5210SJason Evans "Invalid conf value", \ 512a4bd5210SJason Evans k, klen, v, vlen); \ 513a4bd5210SJason Evans } \ 514a4bd5210SJason Evans continue; \ 515a4bd5210SJason Evans } 51688ad2f8dSJason Evans #define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \ 5178ed34ab0SJason Evans if (sizeof(n)-1 == klen && strncmp(n, k, \ 518a4bd5210SJason Evans klen) == 0) { \ 519a4bd5210SJason Evans uintmax_t um; \ 520a4bd5210SJason Evans char *end; \ 521a4bd5210SJason Evans \ 522e722f8f8SJason Evans set_errno(0); \ 523a4bd5210SJason Evans um = malloc_strtoumax(v, &end, 0); \ 524e722f8f8SJason Evans if (get_errno() != 0 || (uintptr_t)end -\ 525a4bd5210SJason Evans (uintptr_t)v != vlen) { \ 526a4bd5210SJason Evans malloc_conf_error( \ 527a4bd5210SJason Evans "Invalid conf value", \ 528a4bd5210SJason Evans k, klen, v, vlen); \ 52988ad2f8dSJason Evans } else if (clip) { \ 53088ad2f8dSJason Evans if (um < min) \ 53188ad2f8dSJason Evans o = min; \ 53288ad2f8dSJason Evans else if (um > max) \ 53388ad2f8dSJason Evans o = max; \ 53488ad2f8dSJason Evans else \ 53588ad2f8dSJason Evans o = um; \ 53688ad2f8dSJason Evans } else { \ 53788ad2f8dSJason Evans if (um < min || um > max) { \ 538a4bd5210SJason Evans malloc_conf_error( \ 53988ad2f8dSJason Evans "Out-of-range " \ 54088ad2f8dSJason Evans "conf value", \ 541a4bd5210SJason Evans k, klen, v, vlen); \ 542a4bd5210SJason Evans } else \ 543a4bd5210SJason Evans o = um; \ 54488ad2f8dSJason Evans } \ 545a4bd5210SJason Evans continue; \ 546a4bd5210SJason Evans } 547a4bd5210SJason Evans #define CONF_HANDLE_SSIZE_T(o, n, min, max) \ 5488ed34ab0SJason Evans if (sizeof(n)-1 == klen && strncmp(n, k, \ 549a4bd5210SJason Evans klen) == 0) { \ 550a4bd5210SJason Evans long l; \ 551a4bd5210SJason Evans char *end; \ 552a4bd5210SJason Evans \ 553e722f8f8SJason Evans set_errno(0); \ 554a4bd5210SJason Evans l = strtol(v, &end, 0); \ 555e722f8f8SJason Evans if (get_errno() != 0 || (uintptr_t)end -\ 556a4bd5210SJason Evans (uintptr_t)v != vlen) { \ 557a4bd5210SJason Evans malloc_conf_error( \ 558a4bd5210SJason Evans "Invalid conf value", \ 559a4bd5210SJason Evans k, klen, v, vlen); \ 560a4bd5210SJason Evans } else if (l < (ssize_t)min || l > \ 561a4bd5210SJason Evans (ssize_t)max) { \ 562a4bd5210SJason Evans malloc_conf_error( \ 563a4bd5210SJason Evans "Out-of-range conf value", \ 564a4bd5210SJason Evans k, klen, v, vlen); \ 565a4bd5210SJason Evans } else \ 566a4bd5210SJason Evans o = l; \ 567a4bd5210SJason Evans continue; \ 568a4bd5210SJason Evans } 569a4bd5210SJason Evans #define CONF_HANDLE_CHAR_P(o, n, d) \ 5708ed34ab0SJason Evans if (sizeof(n)-1 == klen && strncmp(n, k, \ 571a4bd5210SJason Evans klen) == 0) { \ 572a4bd5210SJason Evans size_t cpylen = (vlen <= \ 573a4bd5210SJason Evans sizeof(o)-1) ? vlen : \ 574a4bd5210SJason Evans sizeof(o)-1; \ 575a4bd5210SJason Evans strncpy(o, v, cpylen); \ 576a4bd5210SJason Evans o[cpylen] = '\0'; \ 577a4bd5210SJason Evans continue; \ 578a4bd5210SJason Evans } 579a4bd5210SJason Evans 5808ed34ab0SJason Evans CONF_HANDLE_BOOL(opt_abort, "abort") 581a4bd5210SJason Evans /* 582a4bd5210SJason Evans * Chunks always require at least one header page, plus 583a4bd5210SJason Evans * one data page in the absence of redzones, or three 584a4bd5210SJason Evans * pages in the presence of redzones. In order to 585a4bd5210SJason Evans * simplify options processing, fix the limit based on 586a4bd5210SJason Evans * config_fill. 587a4bd5210SJason Evans */ 5888ed34ab0SJason Evans CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE + 58988ad2f8dSJason Evans (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1, 59088ad2f8dSJason Evans true) 59182872ac0SJason Evans if (strncmp("dss", k, klen) == 0) { 59282872ac0SJason Evans int i; 59382872ac0SJason Evans bool match = false; 59482872ac0SJason Evans for (i = 0; i < dss_prec_limit; i++) { 59582872ac0SJason Evans if (strncmp(dss_prec_names[i], v, vlen) 59682872ac0SJason Evans == 0) { 59782872ac0SJason Evans if (chunk_dss_prec_set(i)) { 59882872ac0SJason Evans malloc_conf_error( 59982872ac0SJason Evans "Error setting dss", 60082872ac0SJason Evans k, klen, v, vlen); 60182872ac0SJason Evans } else { 60282872ac0SJason Evans opt_dss = 60382872ac0SJason Evans dss_prec_names[i]; 60482872ac0SJason Evans match = true; 60582872ac0SJason Evans break; 60682872ac0SJason Evans } 60782872ac0SJason Evans } 60882872ac0SJason Evans } 60982872ac0SJason Evans if (match == false) { 61082872ac0SJason Evans malloc_conf_error("Invalid conf value", 61182872ac0SJason Evans k, klen, v, vlen); 61282872ac0SJason Evans } 61382872ac0SJason Evans continue; 61482872ac0SJason Evans } 6158ed34ab0SJason Evans CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1, 61688ad2f8dSJason Evans SIZE_T_MAX, false) 6178ed34ab0SJason Evans CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult", 618a4bd5210SJason Evans -1, (sizeof(size_t) << 3) - 1) 6198ed34ab0SJason Evans CONF_HANDLE_BOOL(opt_stats_print, "stats_print") 620a4bd5210SJason Evans if (config_fill) { 6218ed34ab0SJason Evans CONF_HANDLE_BOOL(opt_junk, "junk") 6228ed34ab0SJason Evans CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine", 62388ad2f8dSJason Evans 0, SIZE_T_MAX, false) 6248ed34ab0SJason Evans CONF_HANDLE_BOOL(opt_redzone, "redzone") 6258ed34ab0SJason Evans CONF_HANDLE_BOOL(opt_zero, "zero") 626a4bd5210SJason Evans } 627a4bd5210SJason Evans if (config_utrace) { 6288ed34ab0SJason Evans CONF_HANDLE_BOOL(opt_utrace, "utrace") 629a4bd5210SJason Evans } 630a4bd5210SJason Evans if (config_valgrind) { 63182872ac0SJason Evans CONF_HANDLE_BOOL(opt_valgrind, "valgrind") 632a4bd5210SJason Evans } 633a4bd5210SJason Evans if (config_xmalloc) { 6348ed34ab0SJason Evans CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc") 635a4bd5210SJason Evans } 636a4bd5210SJason Evans if (config_tcache) { 6378ed34ab0SJason Evans CONF_HANDLE_BOOL(opt_tcache, "tcache") 638a4bd5210SJason Evans CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, 6398ed34ab0SJason Evans "lg_tcache_max", -1, 640a4bd5210SJason Evans (sizeof(size_t) << 3) - 1) 641a4bd5210SJason Evans } 642a4bd5210SJason Evans if (config_prof) { 6438ed34ab0SJason Evans CONF_HANDLE_BOOL(opt_prof, "prof") 6448ed34ab0SJason Evans CONF_HANDLE_CHAR_P(opt_prof_prefix, 6458ed34ab0SJason Evans "prof_prefix", "jeprof") 6468ed34ab0SJason Evans CONF_HANDLE_BOOL(opt_prof_active, "prof_active") 647a4bd5210SJason Evans CONF_HANDLE_SSIZE_T(opt_lg_prof_sample, 6488ed34ab0SJason Evans "lg_prof_sample", 0, 649a4bd5210SJason Evans (sizeof(uint64_t) << 3) - 1) 6508ed34ab0SJason Evans CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum") 651a4bd5210SJason Evans CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, 6528ed34ab0SJason Evans "lg_prof_interval", -1, 653a4bd5210SJason Evans (sizeof(uint64_t) << 3) - 1) 6548ed34ab0SJason Evans CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump") 6558ed34ab0SJason Evans CONF_HANDLE_BOOL(opt_prof_final, "prof_final") 6568ed34ab0SJason Evans CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak") 657a4bd5210SJason Evans } 658a4bd5210SJason Evans malloc_conf_error("Invalid conf pair", k, klen, v, 659a4bd5210SJason Evans vlen); 660a4bd5210SJason Evans #undef CONF_HANDLE_BOOL 661a4bd5210SJason Evans #undef CONF_HANDLE_SIZE_T 662a4bd5210SJason Evans #undef CONF_HANDLE_SSIZE_T 663a4bd5210SJason Evans #undef CONF_HANDLE_CHAR_P 664a4bd5210SJason Evans } 665a4bd5210SJason Evans } 666a4bd5210SJason Evans } 667a4bd5210SJason Evans 668a4bd5210SJason Evans static bool 669a4bd5210SJason Evans malloc_init_hard(void) 670a4bd5210SJason Evans { 671a4bd5210SJason Evans arena_t *init_arenas[1]; 672a4bd5210SJason Evans 673a4bd5210SJason Evans malloc_mutex_lock(&init_lock); 674a4bd5210SJason Evans if (malloc_initialized || IS_INITIALIZER) { 675a4bd5210SJason Evans /* 676a4bd5210SJason Evans * Another thread initialized the allocator before this one 677a4bd5210SJason Evans * acquired init_lock, or this thread is the initializing 678a4bd5210SJason Evans * thread, and it is recursively allocating. 679a4bd5210SJason Evans */ 680a4bd5210SJason Evans malloc_mutex_unlock(&init_lock); 681a4bd5210SJason Evans return (false); 682a4bd5210SJason Evans } 683a4bd5210SJason Evans #ifdef JEMALLOC_THREADED_INIT 684a4bd5210SJason Evans if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) { 685a4bd5210SJason Evans /* Busy-wait until the initializing thread completes. */ 686a4bd5210SJason Evans do { 687a4bd5210SJason Evans malloc_mutex_unlock(&init_lock); 688a4bd5210SJason Evans CPU_SPINWAIT; 689a4bd5210SJason Evans malloc_mutex_lock(&init_lock); 690a4bd5210SJason Evans } while (malloc_initialized == false); 691a4bd5210SJason Evans malloc_mutex_unlock(&init_lock); 692a4bd5210SJason Evans return (false); 693a4bd5210SJason Evans } 694a4bd5210SJason Evans #endif 695a4bd5210SJason Evans malloc_initializer = INITIALIZER; 696a4bd5210SJason Evans 697a4bd5210SJason Evans malloc_tsd_boot(); 698a4bd5210SJason Evans if (config_prof) 699a4bd5210SJason Evans prof_boot0(); 700a4bd5210SJason Evans 701a4bd5210SJason Evans malloc_conf_init(); 702a4bd5210SJason Evans 703e722f8f8SJason Evans #if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \ 704e722f8f8SJason Evans && !defined(_WIN32)) 705a4bd5210SJason Evans /* Register fork handlers. */ 706a4bd5210SJason Evans if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, 707a4bd5210SJason Evans jemalloc_postfork_child) != 0) { 708a4bd5210SJason Evans malloc_write("<jemalloc>: Error in pthread_atfork()\n"); 709a4bd5210SJason Evans if (opt_abort) 710a4bd5210SJason Evans abort(); 711a4bd5210SJason Evans } 712a4bd5210SJason Evans #endif 713a4bd5210SJason Evans 714a4bd5210SJason Evans if (opt_stats_print) { 715a4bd5210SJason Evans /* Print statistics at exit. */ 716a4bd5210SJason Evans if (atexit(stats_print_atexit) != 0) { 717a4bd5210SJason Evans malloc_write("<jemalloc>: Error in atexit()\n"); 718a4bd5210SJason Evans if (opt_abort) 719a4bd5210SJason Evans abort(); 720a4bd5210SJason Evans } 721a4bd5210SJason Evans } 722a4bd5210SJason Evans 723a4bd5210SJason Evans if (base_boot()) { 724a4bd5210SJason Evans malloc_mutex_unlock(&init_lock); 725a4bd5210SJason Evans return (true); 726a4bd5210SJason Evans } 727a4bd5210SJason Evans 7284bcb1430SJason Evans if (chunk_boot()) { 729a4bd5210SJason Evans malloc_mutex_unlock(&init_lock); 730a4bd5210SJason Evans return (true); 731a4bd5210SJason Evans } 732a4bd5210SJason Evans 733a4bd5210SJason Evans if (ctl_boot()) { 734a4bd5210SJason Evans malloc_mutex_unlock(&init_lock); 735a4bd5210SJason Evans return (true); 736a4bd5210SJason Evans } 737a4bd5210SJason Evans 738a4bd5210SJason Evans if (config_prof) 739a4bd5210SJason Evans prof_boot1(); 740a4bd5210SJason Evans 741a4bd5210SJason Evans arena_boot(); 742a4bd5210SJason Evans 743a4bd5210SJason Evans if (config_tcache && tcache_boot0()) { 744a4bd5210SJason Evans malloc_mutex_unlock(&init_lock); 745a4bd5210SJason Evans return (true); 746a4bd5210SJason Evans } 747a4bd5210SJason Evans 748a4bd5210SJason Evans if (huge_boot()) { 749a4bd5210SJason Evans malloc_mutex_unlock(&init_lock); 750a4bd5210SJason Evans return (true); 751a4bd5210SJason Evans } 752a4bd5210SJason Evans 753a4bd5210SJason Evans if (malloc_mutex_init(&arenas_lock)) 754a4bd5210SJason Evans return (true); 755a4bd5210SJason Evans 756a4bd5210SJason Evans /* 757a4bd5210SJason Evans * Create enough scaffolding to allow recursive allocation in 758a4bd5210SJason Evans * malloc_ncpus(). 759a4bd5210SJason Evans */ 76082872ac0SJason Evans narenas_total = narenas_auto = 1; 761a4bd5210SJason Evans arenas = init_arenas; 76282872ac0SJason Evans memset(arenas, 0, sizeof(arena_t *) * narenas_auto); 763a4bd5210SJason Evans 764a4bd5210SJason Evans /* 765a4bd5210SJason Evans * Initialize one arena here. The rest are lazily created in 766a4bd5210SJason Evans * choose_arena_hard(). 767a4bd5210SJason Evans */ 768a4bd5210SJason Evans arenas_extend(0); 769a4bd5210SJason Evans if (arenas[0] == NULL) { 770a4bd5210SJason Evans malloc_mutex_unlock(&init_lock); 771a4bd5210SJason Evans return (true); 772a4bd5210SJason Evans } 773a4bd5210SJason Evans 774a4bd5210SJason Evans /* Initialize allocation counters before any allocations can occur. */ 775a4bd5210SJason Evans if (config_stats && thread_allocated_tsd_boot()) { 776a4bd5210SJason Evans malloc_mutex_unlock(&init_lock); 777a4bd5210SJason Evans return (true); 778a4bd5210SJason Evans } 779a4bd5210SJason Evans 780a4bd5210SJason Evans if (arenas_tsd_boot()) { 781a4bd5210SJason Evans malloc_mutex_unlock(&init_lock); 782a4bd5210SJason Evans return (true); 783a4bd5210SJason Evans } 784a4bd5210SJason Evans 785a4bd5210SJason Evans if (config_tcache && tcache_boot1()) { 786a4bd5210SJason Evans malloc_mutex_unlock(&init_lock); 787a4bd5210SJason Evans return (true); 788a4bd5210SJason Evans } 789a4bd5210SJason Evans 790a4bd5210SJason Evans if (config_fill && quarantine_boot()) { 791a4bd5210SJason Evans malloc_mutex_unlock(&init_lock); 792a4bd5210SJason Evans return (true); 793a4bd5210SJason Evans } 794a4bd5210SJason Evans 795a4bd5210SJason Evans if (config_prof && prof_boot2()) { 796a4bd5210SJason Evans malloc_mutex_unlock(&init_lock); 797a4bd5210SJason Evans return (true); 798a4bd5210SJason Evans } 799a4bd5210SJason Evans 800a4bd5210SJason Evans /* Get number of CPUs. */ 801a4bd5210SJason Evans malloc_mutex_unlock(&init_lock); 802a4bd5210SJason Evans ncpus = malloc_ncpus(); 803a4bd5210SJason Evans malloc_mutex_lock(&init_lock); 804a4bd5210SJason Evans 805a4bd5210SJason Evans if (mutex_boot()) { 806a4bd5210SJason Evans malloc_mutex_unlock(&init_lock); 807a4bd5210SJason Evans return (true); 808a4bd5210SJason Evans } 809a4bd5210SJason Evans 810a4bd5210SJason Evans if (opt_narenas == 0) { 811a4bd5210SJason Evans /* 812a4bd5210SJason Evans * For SMP systems, create more than one arena per CPU by 813a4bd5210SJason Evans * default. 814a4bd5210SJason Evans */ 815a4bd5210SJason Evans if (ncpus > 1) 816a4bd5210SJason Evans opt_narenas = ncpus << 2; 817a4bd5210SJason Evans else 818a4bd5210SJason Evans opt_narenas = 1; 819a4bd5210SJason Evans } 82082872ac0SJason Evans narenas_auto = opt_narenas; 821a4bd5210SJason Evans /* 822a4bd5210SJason Evans * Make sure that the arenas array can be allocated. In practice, this 823a4bd5210SJason Evans * limit is enough to allow the allocator to function, but the ctl 824a4bd5210SJason Evans * machinery will fail to allocate memory at far lower limits. 825a4bd5210SJason Evans */ 82682872ac0SJason Evans if (narenas_auto > chunksize / sizeof(arena_t *)) { 82782872ac0SJason Evans narenas_auto = chunksize / sizeof(arena_t *); 828a4bd5210SJason Evans malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n", 82982872ac0SJason Evans narenas_auto); 830a4bd5210SJason Evans } 83182872ac0SJason Evans narenas_total = narenas_auto; 832a4bd5210SJason Evans 833a4bd5210SJason Evans /* Allocate and initialize arenas. */ 83482872ac0SJason Evans arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total); 835a4bd5210SJason Evans if (arenas == NULL) { 836a4bd5210SJason Evans malloc_mutex_unlock(&init_lock); 837a4bd5210SJason Evans return (true); 838a4bd5210SJason Evans } 839a4bd5210SJason Evans /* 840a4bd5210SJason Evans * Zero the array. In practice, this should always be pre-zeroed, 841a4bd5210SJason Evans * since it was just mmap()ed, but let's be sure. 842a4bd5210SJason Evans */ 84382872ac0SJason Evans memset(arenas, 0, sizeof(arena_t *) * narenas_total); 844a4bd5210SJason Evans /* Copy the pointer to the one arena that was already initialized. */ 845a4bd5210SJason Evans arenas[0] = init_arenas[0]; 846a4bd5210SJason Evans 847a4bd5210SJason Evans malloc_initialized = true; 848a4bd5210SJason Evans malloc_mutex_unlock(&init_lock); 849a4bd5210SJason Evans return (false); 850a4bd5210SJason Evans } 851a4bd5210SJason Evans 852a4bd5210SJason Evans /* 853a4bd5210SJason Evans * End initialization functions. 854a4bd5210SJason Evans */ 855a4bd5210SJason Evans /******************************************************************************/ 856a4bd5210SJason Evans /* 857a4bd5210SJason Evans * Begin malloc(3)-compatible functions. 858a4bd5210SJason Evans */ 859a4bd5210SJason Evans 860a4bd5210SJason Evans void * 861a4bd5210SJason Evans je_malloc(size_t size) 862a4bd5210SJason Evans { 863a4bd5210SJason Evans void *ret; 864e722f8f8SJason Evans size_t usize JEMALLOC_CC_SILENCE_INIT(0); 865a4bd5210SJason Evans prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); 866a4bd5210SJason Evans 867a4bd5210SJason Evans if (malloc_init()) { 868a4bd5210SJason Evans ret = NULL; 869a4bd5210SJason Evans goto label_oom; 870a4bd5210SJason Evans } 871a4bd5210SJason Evans 872a4bd5210SJason Evans if (size == 0) 873a4bd5210SJason Evans size = 1; 874a4bd5210SJason Evans 875a4bd5210SJason Evans if (config_prof && opt_prof) { 876a4bd5210SJason Evans usize = s2u(size); 877a4bd5210SJason Evans PROF_ALLOC_PREP(1, usize, cnt); 878a4bd5210SJason Evans if (cnt == NULL) { 879a4bd5210SJason Evans ret = NULL; 880a4bd5210SJason Evans goto label_oom; 881a4bd5210SJason Evans } 882a4bd5210SJason Evans if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <= 883a4bd5210SJason Evans SMALL_MAXCLASS) { 884a4bd5210SJason Evans ret = imalloc(SMALL_MAXCLASS+1); 885a4bd5210SJason Evans if (ret != NULL) 886a4bd5210SJason Evans arena_prof_promoted(ret, usize); 887a4bd5210SJason Evans } else 888a4bd5210SJason Evans ret = imalloc(size); 889a4bd5210SJason Evans } else { 890a4bd5210SJason Evans if (config_stats || (config_valgrind && opt_valgrind)) 891a4bd5210SJason Evans usize = s2u(size); 892a4bd5210SJason Evans ret = imalloc(size); 893a4bd5210SJason Evans } 894a4bd5210SJason Evans 895a4bd5210SJason Evans label_oom: 896a4bd5210SJason Evans if (ret == NULL) { 897a4bd5210SJason Evans if (config_xmalloc && opt_xmalloc) { 898a4bd5210SJason Evans malloc_write("<jemalloc>: Error in malloc(): " 899a4bd5210SJason Evans "out of memory\n"); 900a4bd5210SJason Evans abort(); 901a4bd5210SJason Evans } 902e722f8f8SJason Evans set_errno(ENOMEM); 903a4bd5210SJason Evans } 904a4bd5210SJason Evans if (config_prof && opt_prof && ret != NULL) 905a4bd5210SJason Evans prof_malloc(ret, usize, cnt); 906a4bd5210SJason Evans if (config_stats && ret != NULL) { 907a4bd5210SJason Evans assert(usize == isalloc(ret, config_prof)); 908a4bd5210SJason Evans thread_allocated_tsd_get()->allocated += usize; 909a4bd5210SJason Evans } 910a4bd5210SJason Evans UTRACE(0, size, ret); 911a4bd5210SJason Evans JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false); 912a4bd5210SJason Evans return (ret); 913a4bd5210SJason Evans } 914a4bd5210SJason Evans 915a4bd5210SJason Evans JEMALLOC_ATTR(nonnull(1)) 916a4bd5210SJason Evans #ifdef JEMALLOC_PROF 917a4bd5210SJason Evans /* 918a4bd5210SJason Evans * Avoid any uncertainty as to how many backtrace frames to ignore in 919a4bd5210SJason Evans * PROF_ALLOC_PREP(). 920a4bd5210SJason Evans */ 92188ad2f8dSJason Evans JEMALLOC_NOINLINE 922a4bd5210SJason Evans #endif 923a4bd5210SJason Evans static int 924a4bd5210SJason Evans imemalign(void **memptr, size_t alignment, size_t size, 925a4bd5210SJason Evans size_t min_alignment) 926a4bd5210SJason Evans { 927a4bd5210SJason Evans int ret; 928a4bd5210SJason Evans size_t usize; 929a4bd5210SJason Evans void *result; 930a4bd5210SJason Evans prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); 931a4bd5210SJason Evans 932a4bd5210SJason Evans assert(min_alignment != 0); 933a4bd5210SJason Evans 934a4bd5210SJason Evans if (malloc_init()) 935a4bd5210SJason Evans result = NULL; 936a4bd5210SJason Evans else { 937a4bd5210SJason Evans if (size == 0) 938a4bd5210SJason Evans size = 1; 939a4bd5210SJason Evans 940a4bd5210SJason Evans /* Make sure that alignment is a large enough power of 2. */ 941a4bd5210SJason Evans if (((alignment - 1) & alignment) != 0 942a4bd5210SJason Evans || (alignment < min_alignment)) { 943a4bd5210SJason Evans if (config_xmalloc && opt_xmalloc) { 944a4bd5210SJason Evans malloc_write("<jemalloc>: Error allocating " 945a4bd5210SJason Evans "aligned memory: invalid alignment\n"); 946a4bd5210SJason Evans abort(); 947a4bd5210SJason Evans } 948a4bd5210SJason Evans result = NULL; 949a4bd5210SJason Evans ret = EINVAL; 950a4bd5210SJason Evans goto label_return; 951a4bd5210SJason Evans } 952a4bd5210SJason Evans 953a4bd5210SJason Evans usize = sa2u(size, alignment); 954a4bd5210SJason Evans if (usize == 0) { 955a4bd5210SJason Evans result = NULL; 956a4bd5210SJason Evans ret = ENOMEM; 957a4bd5210SJason Evans goto label_return; 958a4bd5210SJason Evans } 959a4bd5210SJason Evans 960a4bd5210SJason Evans if (config_prof && opt_prof) { 961a4bd5210SJason Evans PROF_ALLOC_PREP(2, usize, cnt); 962a4bd5210SJason Evans if (cnt == NULL) { 963a4bd5210SJason Evans result = NULL; 964a4bd5210SJason Evans ret = EINVAL; 965a4bd5210SJason Evans } else { 966a4bd5210SJason Evans if (prof_promote && (uintptr_t)cnt != 967a4bd5210SJason Evans (uintptr_t)1U && usize <= SMALL_MAXCLASS) { 968a4bd5210SJason Evans assert(sa2u(SMALL_MAXCLASS+1, 969a4bd5210SJason Evans alignment) != 0); 970a4bd5210SJason Evans result = ipalloc(sa2u(SMALL_MAXCLASS+1, 971a4bd5210SJason Evans alignment), alignment, false); 972a4bd5210SJason Evans if (result != NULL) { 973a4bd5210SJason Evans arena_prof_promoted(result, 974a4bd5210SJason Evans usize); 975a4bd5210SJason Evans } 976a4bd5210SJason Evans } else { 977a4bd5210SJason Evans result = ipalloc(usize, alignment, 978a4bd5210SJason Evans false); 979a4bd5210SJason Evans } 980a4bd5210SJason Evans } 981a4bd5210SJason Evans } else 982a4bd5210SJason Evans result = ipalloc(usize, alignment, false); 983a4bd5210SJason Evans } 984a4bd5210SJason Evans 985a4bd5210SJason Evans if (result == NULL) { 986a4bd5210SJason Evans if (config_xmalloc && opt_xmalloc) { 987a4bd5210SJason Evans malloc_write("<jemalloc>: Error allocating aligned " 988a4bd5210SJason Evans "memory: out of memory\n"); 989a4bd5210SJason Evans abort(); 990a4bd5210SJason Evans } 991a4bd5210SJason Evans ret = ENOMEM; 992a4bd5210SJason Evans goto label_return; 993a4bd5210SJason Evans } 994a4bd5210SJason Evans 995a4bd5210SJason Evans *memptr = result; 996a4bd5210SJason Evans ret = 0; 997a4bd5210SJason Evans 998a4bd5210SJason Evans label_return: 999a4bd5210SJason Evans if (config_stats && result != NULL) { 1000a4bd5210SJason Evans assert(usize == isalloc(result, config_prof)); 1001a4bd5210SJason Evans thread_allocated_tsd_get()->allocated += usize; 1002a4bd5210SJason Evans } 1003a4bd5210SJason Evans if (config_prof && opt_prof && result != NULL) 1004a4bd5210SJason Evans prof_malloc(result, usize, cnt); 1005a4bd5210SJason Evans UTRACE(0, size, result); 1006a4bd5210SJason Evans return (ret); 1007a4bd5210SJason Evans } 1008a4bd5210SJason Evans 1009a4bd5210SJason Evans int 1010a4bd5210SJason Evans je_posix_memalign(void **memptr, size_t alignment, size_t size) 1011a4bd5210SJason Evans { 1012a4bd5210SJason Evans int ret = imemalign(memptr, alignment, size, sizeof(void *)); 1013a4bd5210SJason Evans JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr, 1014a4bd5210SJason Evans config_prof), false); 1015a4bd5210SJason Evans return (ret); 1016a4bd5210SJason Evans } 1017a4bd5210SJason Evans 1018a4bd5210SJason Evans void * 1019a4bd5210SJason Evans je_aligned_alloc(size_t alignment, size_t size) 1020a4bd5210SJason Evans { 1021a4bd5210SJason Evans void *ret; 1022a4bd5210SJason Evans int err; 1023a4bd5210SJason Evans 1024a4bd5210SJason Evans if ((err = imemalign(&ret, alignment, size, 1)) != 0) { 1025a4bd5210SJason Evans ret = NULL; 1026e722f8f8SJason Evans set_errno(err); 1027a4bd5210SJason Evans } 1028a4bd5210SJason Evans JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof), 1029a4bd5210SJason Evans false); 1030a4bd5210SJason Evans return (ret); 1031a4bd5210SJason Evans } 1032a4bd5210SJason Evans 1033a4bd5210SJason Evans void * 1034a4bd5210SJason Evans je_calloc(size_t num, size_t size) 1035a4bd5210SJason Evans { 1036a4bd5210SJason Evans void *ret; 1037a4bd5210SJason Evans size_t num_size; 1038e722f8f8SJason Evans size_t usize JEMALLOC_CC_SILENCE_INIT(0); 1039a4bd5210SJason Evans prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); 1040a4bd5210SJason Evans 1041a4bd5210SJason Evans if (malloc_init()) { 1042a4bd5210SJason Evans num_size = 0; 1043a4bd5210SJason Evans ret = NULL; 1044a4bd5210SJason Evans goto label_return; 1045a4bd5210SJason Evans } 1046a4bd5210SJason Evans 1047a4bd5210SJason Evans num_size = num * size; 1048a4bd5210SJason Evans if (num_size == 0) { 1049a4bd5210SJason Evans if (num == 0 || size == 0) 1050a4bd5210SJason Evans num_size = 1; 1051a4bd5210SJason Evans else { 1052a4bd5210SJason Evans ret = NULL; 1053a4bd5210SJason Evans goto label_return; 1054a4bd5210SJason Evans } 1055a4bd5210SJason Evans /* 1056a4bd5210SJason Evans * Try to avoid division here. We know that it isn't possible to 1057a4bd5210SJason Evans * overflow during multiplication if neither operand uses any of the 1058a4bd5210SJason Evans * most significant half of the bits in a size_t. 1059a4bd5210SJason Evans */ 1060a4bd5210SJason Evans } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2))) 1061a4bd5210SJason Evans && (num_size / size != num)) { 1062a4bd5210SJason Evans /* size_t overflow. */ 1063a4bd5210SJason Evans ret = NULL; 1064a4bd5210SJason Evans goto label_return; 1065a4bd5210SJason Evans } 1066a4bd5210SJason Evans 1067a4bd5210SJason Evans if (config_prof && opt_prof) { 1068a4bd5210SJason Evans usize = s2u(num_size); 1069a4bd5210SJason Evans PROF_ALLOC_PREP(1, usize, cnt); 1070a4bd5210SJason Evans if (cnt == NULL) { 1071a4bd5210SJason Evans ret = NULL; 1072a4bd5210SJason Evans goto label_return; 1073a4bd5210SJason Evans } 1074a4bd5210SJason Evans if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize 1075a4bd5210SJason Evans <= SMALL_MAXCLASS) { 1076a4bd5210SJason Evans ret = icalloc(SMALL_MAXCLASS+1); 1077a4bd5210SJason Evans if (ret != NULL) 1078a4bd5210SJason Evans arena_prof_promoted(ret, usize); 1079a4bd5210SJason Evans } else 1080a4bd5210SJason Evans ret = icalloc(num_size); 1081a4bd5210SJason Evans } else { 1082a4bd5210SJason Evans if (config_stats || (config_valgrind && opt_valgrind)) 1083a4bd5210SJason Evans usize = s2u(num_size); 1084a4bd5210SJason Evans ret = icalloc(num_size); 1085a4bd5210SJason Evans } 1086a4bd5210SJason Evans 1087a4bd5210SJason Evans label_return: 1088a4bd5210SJason Evans if (ret == NULL) { 1089a4bd5210SJason Evans if (config_xmalloc && opt_xmalloc) { 1090a4bd5210SJason Evans malloc_write("<jemalloc>: Error in calloc(): out of " 1091a4bd5210SJason Evans "memory\n"); 1092a4bd5210SJason Evans abort(); 1093a4bd5210SJason Evans } 1094e722f8f8SJason Evans set_errno(ENOMEM); 1095a4bd5210SJason Evans } 1096a4bd5210SJason Evans 1097a4bd5210SJason Evans if (config_prof && opt_prof && ret != NULL) 1098a4bd5210SJason Evans prof_malloc(ret, usize, cnt); 1099a4bd5210SJason Evans if (config_stats && ret != NULL) { 1100a4bd5210SJason Evans assert(usize == isalloc(ret, config_prof)); 1101a4bd5210SJason Evans thread_allocated_tsd_get()->allocated += usize; 1102a4bd5210SJason Evans } 1103a4bd5210SJason Evans UTRACE(0, num_size, ret); 1104a4bd5210SJason Evans JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true); 1105a4bd5210SJason Evans return (ret); 1106a4bd5210SJason Evans } 1107a4bd5210SJason Evans 1108a4bd5210SJason Evans void * 1109a4bd5210SJason Evans je_realloc(void *ptr, size_t size) 1110a4bd5210SJason Evans { 1111a4bd5210SJason Evans void *ret; 1112e722f8f8SJason Evans size_t usize JEMALLOC_CC_SILENCE_INIT(0); 1113a4bd5210SJason Evans size_t old_size = 0; 1114a4bd5210SJason Evans size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 1115a4bd5210SJason Evans prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); 1116a4bd5210SJason Evans prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL); 1117a4bd5210SJason Evans 1118a4bd5210SJason Evans if (size == 0) { 1119a4bd5210SJason Evans if (ptr != NULL) { 1120a4bd5210SJason Evans /* realloc(ptr, 0) is equivalent to free(p). */ 1121f8ca2db1SJason Evans assert(malloc_initialized || IS_INITIALIZER); 1122a4bd5210SJason Evans if (config_prof) { 1123a4bd5210SJason Evans old_size = isalloc(ptr, true); 1124a4bd5210SJason Evans if (config_valgrind && opt_valgrind) 1125a4bd5210SJason Evans old_rzsize = p2rz(ptr); 1126a4bd5210SJason Evans } else if (config_stats) { 1127a4bd5210SJason Evans old_size = isalloc(ptr, false); 1128a4bd5210SJason Evans if (config_valgrind && opt_valgrind) 1129a4bd5210SJason Evans old_rzsize = u2rz(old_size); 1130a4bd5210SJason Evans } else if (config_valgrind && opt_valgrind) { 1131a4bd5210SJason Evans old_size = isalloc(ptr, false); 1132a4bd5210SJason Evans old_rzsize = u2rz(old_size); 1133a4bd5210SJason Evans } 1134a4bd5210SJason Evans if (config_prof && opt_prof) { 1135a4bd5210SJason Evans old_ctx = prof_ctx_get(ptr); 1136a4bd5210SJason Evans cnt = NULL; 1137a4bd5210SJason Evans } 1138a4bd5210SJason Evans iqalloc(ptr); 1139a4bd5210SJason Evans ret = NULL; 1140a4bd5210SJason Evans goto label_return; 1141a4bd5210SJason Evans } else 1142a4bd5210SJason Evans size = 1; 1143a4bd5210SJason Evans } 1144a4bd5210SJason Evans 1145a4bd5210SJason Evans if (ptr != NULL) { 1146a4bd5210SJason Evans assert(malloc_initialized || IS_INITIALIZER); 1147f8ca2db1SJason Evans malloc_thread_init(); 1148a4bd5210SJason Evans 1149a4bd5210SJason Evans if (config_prof) { 1150a4bd5210SJason Evans old_size = isalloc(ptr, true); 1151a4bd5210SJason Evans if (config_valgrind && opt_valgrind) 1152a4bd5210SJason Evans old_rzsize = p2rz(ptr); 1153a4bd5210SJason Evans } else if (config_stats) { 1154a4bd5210SJason Evans old_size = isalloc(ptr, false); 1155a4bd5210SJason Evans if (config_valgrind && opt_valgrind) 1156a4bd5210SJason Evans old_rzsize = u2rz(old_size); 1157a4bd5210SJason Evans } else if (config_valgrind && opt_valgrind) { 1158a4bd5210SJason Evans old_size = isalloc(ptr, false); 1159a4bd5210SJason Evans old_rzsize = u2rz(old_size); 1160a4bd5210SJason Evans } 1161a4bd5210SJason Evans if (config_prof && opt_prof) { 1162a4bd5210SJason Evans usize = s2u(size); 1163a4bd5210SJason Evans old_ctx = prof_ctx_get(ptr); 1164a4bd5210SJason Evans PROF_ALLOC_PREP(1, usize, cnt); 1165a4bd5210SJason Evans if (cnt == NULL) { 1166a4bd5210SJason Evans old_ctx = NULL; 1167a4bd5210SJason Evans ret = NULL; 1168a4bd5210SJason Evans goto label_oom; 1169a4bd5210SJason Evans } 1170a4bd5210SJason Evans if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && 1171a4bd5210SJason Evans usize <= SMALL_MAXCLASS) { 1172a4bd5210SJason Evans ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0, 1173a4bd5210SJason Evans false, false); 1174a4bd5210SJason Evans if (ret != NULL) 1175a4bd5210SJason Evans arena_prof_promoted(ret, usize); 1176a4bd5210SJason Evans else 1177a4bd5210SJason Evans old_ctx = NULL; 1178a4bd5210SJason Evans } else { 1179a4bd5210SJason Evans ret = iralloc(ptr, size, 0, 0, false, false); 1180a4bd5210SJason Evans if (ret == NULL) 1181a4bd5210SJason Evans old_ctx = NULL; 1182a4bd5210SJason Evans } 1183a4bd5210SJason Evans } else { 1184a4bd5210SJason Evans if (config_stats || (config_valgrind && opt_valgrind)) 1185a4bd5210SJason Evans usize = s2u(size); 1186a4bd5210SJason Evans ret = iralloc(ptr, size, 0, 0, false, false); 1187a4bd5210SJason Evans } 1188a4bd5210SJason Evans 1189a4bd5210SJason Evans label_oom: 1190a4bd5210SJason Evans if (ret == NULL) { 1191a4bd5210SJason Evans if (config_xmalloc && opt_xmalloc) { 1192a4bd5210SJason Evans malloc_write("<jemalloc>: Error in realloc(): " 1193a4bd5210SJason Evans "out of memory\n"); 1194a4bd5210SJason Evans abort(); 1195a4bd5210SJason Evans } 1196e722f8f8SJason Evans set_errno(ENOMEM); 1197a4bd5210SJason Evans } 1198a4bd5210SJason Evans } else { 1199a4bd5210SJason Evans /* realloc(NULL, size) is equivalent to malloc(size). */ 1200a4bd5210SJason Evans if (config_prof && opt_prof) 1201a4bd5210SJason Evans old_ctx = NULL; 1202a4bd5210SJason Evans if (malloc_init()) { 1203a4bd5210SJason Evans if (config_prof && opt_prof) 1204a4bd5210SJason Evans cnt = NULL; 1205a4bd5210SJason Evans ret = NULL; 1206a4bd5210SJason Evans } else { 1207a4bd5210SJason Evans if (config_prof && opt_prof) { 1208a4bd5210SJason Evans usize = s2u(size); 1209a4bd5210SJason Evans PROF_ALLOC_PREP(1, usize, cnt); 1210a4bd5210SJason Evans if (cnt == NULL) 1211a4bd5210SJason Evans ret = NULL; 1212a4bd5210SJason Evans else { 1213a4bd5210SJason Evans if (prof_promote && (uintptr_t)cnt != 1214a4bd5210SJason Evans (uintptr_t)1U && usize <= 1215a4bd5210SJason Evans SMALL_MAXCLASS) { 1216a4bd5210SJason Evans ret = imalloc(SMALL_MAXCLASS+1); 1217a4bd5210SJason Evans if (ret != NULL) { 1218a4bd5210SJason Evans arena_prof_promoted(ret, 1219a4bd5210SJason Evans usize); 1220a4bd5210SJason Evans } 1221a4bd5210SJason Evans } else 1222a4bd5210SJason Evans ret = imalloc(size); 1223a4bd5210SJason Evans } 1224a4bd5210SJason Evans } else { 1225a4bd5210SJason Evans if (config_stats || (config_valgrind && 1226a4bd5210SJason Evans opt_valgrind)) 1227a4bd5210SJason Evans usize = s2u(size); 1228a4bd5210SJason Evans ret = imalloc(size); 1229a4bd5210SJason Evans } 1230a4bd5210SJason Evans } 1231a4bd5210SJason Evans 1232a4bd5210SJason Evans if (ret == NULL) { 1233a4bd5210SJason Evans if (config_xmalloc && opt_xmalloc) { 1234a4bd5210SJason Evans malloc_write("<jemalloc>: Error in realloc(): " 1235a4bd5210SJason Evans "out of memory\n"); 1236a4bd5210SJason Evans abort(); 1237a4bd5210SJason Evans } 1238e722f8f8SJason Evans set_errno(ENOMEM); 1239a4bd5210SJason Evans } 1240a4bd5210SJason Evans } 1241a4bd5210SJason Evans 1242a4bd5210SJason Evans label_return: 1243a4bd5210SJason Evans if (config_prof && opt_prof) 1244a4bd5210SJason Evans prof_realloc(ret, usize, cnt, old_size, old_ctx); 1245a4bd5210SJason Evans if (config_stats && ret != NULL) { 1246a4bd5210SJason Evans thread_allocated_t *ta; 1247a4bd5210SJason Evans assert(usize == isalloc(ret, config_prof)); 1248a4bd5210SJason Evans ta = thread_allocated_tsd_get(); 1249a4bd5210SJason Evans ta->allocated += usize; 1250a4bd5210SJason Evans ta->deallocated += old_size; 1251a4bd5210SJason Evans } 1252a4bd5210SJason Evans UTRACE(ptr, size, ret); 1253a4bd5210SJason Evans JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false); 1254a4bd5210SJason Evans return (ret); 1255a4bd5210SJason Evans } 1256a4bd5210SJason Evans 1257a4bd5210SJason Evans void 1258a4bd5210SJason Evans je_free(void *ptr) 1259a4bd5210SJason Evans { 1260a4bd5210SJason Evans 1261a4bd5210SJason Evans UTRACE(ptr, 0, 0); 1262a4bd5210SJason Evans if (ptr != NULL) { 1263a4bd5210SJason Evans size_t usize; 1264a4bd5210SJason Evans size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); 1265a4bd5210SJason Evans 1266a4bd5210SJason Evans assert(malloc_initialized || IS_INITIALIZER); 1267a4bd5210SJason Evans 1268a4bd5210SJason Evans if (config_prof && opt_prof) { 1269a4bd5210SJason Evans usize = isalloc(ptr, config_prof); 1270a4bd5210SJason Evans prof_free(ptr, usize); 1271a4bd5210SJason Evans } else if (config_stats || config_valgrind) 1272a4bd5210SJason Evans usize = isalloc(ptr, config_prof); 1273a4bd5210SJason Evans if (config_stats) 1274a4bd5210SJason Evans thread_allocated_tsd_get()->deallocated += usize; 1275a4bd5210SJason Evans if (config_valgrind && opt_valgrind) 1276a4bd5210SJason Evans rzsize = p2rz(ptr); 1277a4bd5210SJason Evans iqalloc(ptr); 1278a4bd5210SJason Evans JEMALLOC_VALGRIND_FREE(ptr, rzsize); 1279a4bd5210SJason Evans } 1280a4bd5210SJason Evans } 1281a4bd5210SJason Evans 1282a4bd5210SJason Evans /* 1283a4bd5210SJason Evans * End malloc(3)-compatible functions. 1284a4bd5210SJason Evans */ 1285a4bd5210SJason Evans /******************************************************************************/ 1286a4bd5210SJason Evans /* 1287a4bd5210SJason Evans * Begin non-standard override functions. 1288a4bd5210SJason Evans */ 1289a4bd5210SJason Evans 1290a4bd5210SJason Evans #ifdef JEMALLOC_OVERRIDE_MEMALIGN 1291a4bd5210SJason Evans void * 1292a4bd5210SJason Evans je_memalign(size_t alignment, size_t size) 1293a4bd5210SJason Evans { 1294a4bd5210SJason Evans void *ret JEMALLOC_CC_SILENCE_INIT(NULL); 1295a4bd5210SJason Evans imemalign(&ret, alignment, size, 1); 1296a4bd5210SJason Evans JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); 1297a4bd5210SJason Evans return (ret); 1298a4bd5210SJason Evans } 1299a4bd5210SJason Evans #endif 1300a4bd5210SJason Evans 1301a4bd5210SJason Evans #ifdef JEMALLOC_OVERRIDE_VALLOC 1302a4bd5210SJason Evans void * 1303a4bd5210SJason Evans je_valloc(size_t size) 1304a4bd5210SJason Evans { 1305a4bd5210SJason Evans void *ret JEMALLOC_CC_SILENCE_INIT(NULL); 1306a4bd5210SJason Evans imemalign(&ret, PAGE, size, 1); 1307a4bd5210SJason Evans JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); 1308a4bd5210SJason Evans return (ret); 1309a4bd5210SJason Evans } 1310a4bd5210SJason Evans #endif 1311a4bd5210SJason Evans 1312a4bd5210SJason Evans /* 1313a4bd5210SJason Evans * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has 1314a4bd5210SJason Evans * #define je_malloc malloc 1315a4bd5210SJason Evans */ 1316a4bd5210SJason Evans #define malloc_is_malloc 1 1317a4bd5210SJason Evans #define is_malloc_(a) malloc_is_ ## a 1318a4bd5210SJason Evans #define is_malloc(a) is_malloc_(a) 1319a4bd5210SJason Evans 1320a4bd5210SJason Evans #if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__)) 1321a4bd5210SJason Evans /* 1322a4bd5210SJason Evans * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible 1323a4bd5210SJason Evans * to inconsistently reference libc's malloc(3)-compatible functions 1324a4bd5210SJason Evans * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). 1325a4bd5210SJason Evans * 1326a4bd5210SJason Evans * These definitions interpose hooks in glibc. The functions are actually 1327a4bd5210SJason Evans * passed an extra argument for the caller return address, which will be 1328a4bd5210SJason Evans * ignored. 1329a4bd5210SJason Evans */ 133082872ac0SJason Evans JEMALLOC_EXPORT void (* __free_hook)(void *ptr) = je_free; 133182872ac0SJason Evans JEMALLOC_EXPORT void *(* __malloc_hook)(size_t size) = je_malloc; 133282872ac0SJason Evans JEMALLOC_EXPORT void *(* __realloc_hook)(void *ptr, size_t size) = je_realloc; 133382872ac0SJason Evans JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) = 1334e722f8f8SJason Evans je_memalign; 1335a4bd5210SJason Evans #endif 1336a4bd5210SJason Evans 1337a4bd5210SJason Evans /* 1338a4bd5210SJason Evans * End non-standard override functions. 1339a4bd5210SJason Evans */ 1340a4bd5210SJason Evans /******************************************************************************/ 1341a4bd5210SJason Evans /* 1342a4bd5210SJason Evans * Begin non-standard functions. 1343a4bd5210SJason Evans */ 1344a4bd5210SJason Evans 1345a4bd5210SJason Evans size_t 134682872ac0SJason Evans je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) 1347a4bd5210SJason Evans { 1348a4bd5210SJason Evans size_t ret; 1349a4bd5210SJason Evans 1350a4bd5210SJason Evans assert(malloc_initialized || IS_INITIALIZER); 1351f8ca2db1SJason Evans malloc_thread_init(); 1352a4bd5210SJason Evans 1353a4bd5210SJason Evans if (config_ivsalloc) 1354a4bd5210SJason Evans ret = ivsalloc(ptr, config_prof); 1355a4bd5210SJason Evans else 1356a4bd5210SJason Evans ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0; 1357a4bd5210SJason Evans 1358a4bd5210SJason Evans return (ret); 1359a4bd5210SJason Evans } 1360a4bd5210SJason Evans 1361a4bd5210SJason Evans void 1362a4bd5210SJason Evans je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, 1363a4bd5210SJason Evans const char *opts) 1364a4bd5210SJason Evans { 1365a4bd5210SJason Evans 1366a4bd5210SJason Evans stats_print(write_cb, cbopaque, opts); 1367a4bd5210SJason Evans } 1368a4bd5210SJason Evans 1369a4bd5210SJason Evans int 1370a4bd5210SJason Evans je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, 1371a4bd5210SJason Evans size_t newlen) 1372a4bd5210SJason Evans { 1373a4bd5210SJason Evans 1374a4bd5210SJason Evans if (malloc_init()) 1375a4bd5210SJason Evans return (EAGAIN); 1376a4bd5210SJason Evans 1377a4bd5210SJason Evans return (ctl_byname(name, oldp, oldlenp, newp, newlen)); 1378a4bd5210SJason Evans } 1379a4bd5210SJason Evans 1380a4bd5210SJason Evans int 1381a4bd5210SJason Evans je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) 1382a4bd5210SJason Evans { 1383a4bd5210SJason Evans 1384a4bd5210SJason Evans if (malloc_init()) 1385a4bd5210SJason Evans return (EAGAIN); 1386a4bd5210SJason Evans 1387a4bd5210SJason Evans return (ctl_nametomib(name, mibp, miblenp)); 1388a4bd5210SJason Evans } 1389a4bd5210SJason Evans 1390a4bd5210SJason Evans int 1391a4bd5210SJason Evans je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, 1392a4bd5210SJason Evans void *newp, size_t newlen) 1393a4bd5210SJason Evans { 1394a4bd5210SJason Evans 1395a4bd5210SJason Evans if (malloc_init()) 1396a4bd5210SJason Evans return (EAGAIN); 1397a4bd5210SJason Evans 1398a4bd5210SJason Evans return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen)); 1399a4bd5210SJason Evans } 1400a4bd5210SJason Evans 1401a4bd5210SJason Evans /* 1402a4bd5210SJason Evans * End non-standard functions. 1403a4bd5210SJason Evans */ 1404a4bd5210SJason Evans /******************************************************************************/ 1405a4bd5210SJason Evans /* 1406a4bd5210SJason Evans * Begin experimental functions. 1407a4bd5210SJason Evans */ 1408a4bd5210SJason Evans #ifdef JEMALLOC_EXPERIMENTAL 1409a4bd5210SJason Evans 1410*2b06b201SJason Evans JEMALLOC_ALWAYS_INLINE_C void * 141182872ac0SJason Evans iallocm(size_t usize, size_t alignment, bool zero, bool try_tcache, 141282872ac0SJason Evans arena_t *arena) 1413a4bd5210SJason Evans { 1414a4bd5210SJason Evans 1415a4bd5210SJason Evans assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, 1416a4bd5210SJason Evans alignment))); 1417a4bd5210SJason Evans 1418a4bd5210SJason Evans if (alignment != 0) 141982872ac0SJason Evans return (ipallocx(usize, alignment, zero, try_tcache, arena)); 1420a4bd5210SJason Evans else if (zero) 142182872ac0SJason Evans return (icallocx(usize, try_tcache, arena)); 1422a4bd5210SJason Evans else 142382872ac0SJason Evans return (imallocx(usize, try_tcache, arena)); 1424a4bd5210SJason Evans } 1425a4bd5210SJason Evans 1426a4bd5210SJason Evans int 1427a4bd5210SJason Evans je_allocm(void **ptr, size_t *rsize, size_t size, int flags) 1428a4bd5210SJason Evans { 1429a4bd5210SJason Evans void *p; 1430a4bd5210SJason Evans size_t usize; 1431a4bd5210SJason Evans size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) 1432a4bd5210SJason Evans & (SIZE_T_MAX-1)); 1433a4bd5210SJason Evans bool zero = flags & ALLOCM_ZERO; 143482872ac0SJason Evans unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; 143582872ac0SJason Evans arena_t *arena; 143682872ac0SJason Evans bool try_tcache; 1437a4bd5210SJason Evans 1438a4bd5210SJason Evans assert(ptr != NULL); 1439a4bd5210SJason Evans assert(size != 0); 1440a4bd5210SJason Evans 1441a4bd5210SJason Evans if (malloc_init()) 1442a4bd5210SJason Evans goto label_oom; 1443a4bd5210SJason Evans 144482872ac0SJason Evans if (arena_ind != UINT_MAX) { 144582872ac0SJason Evans arena = arenas[arena_ind]; 144682872ac0SJason Evans try_tcache = false; 144782872ac0SJason Evans } else { 144882872ac0SJason Evans arena = NULL; 144982872ac0SJason Evans try_tcache = true; 145082872ac0SJason Evans } 145182872ac0SJason Evans 1452a4bd5210SJason Evans usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); 1453a4bd5210SJason Evans if (usize == 0) 1454a4bd5210SJason Evans goto label_oom; 1455a4bd5210SJason Evans 1456a4bd5210SJason Evans if (config_prof && opt_prof) { 1457e722f8f8SJason Evans prof_thr_cnt_t *cnt; 1458e722f8f8SJason Evans 1459a4bd5210SJason Evans PROF_ALLOC_PREP(1, usize, cnt); 1460a4bd5210SJason Evans if (cnt == NULL) 1461a4bd5210SJason Evans goto label_oom; 1462a4bd5210SJason Evans if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <= 1463a4bd5210SJason Evans SMALL_MAXCLASS) { 1464a4bd5210SJason Evans size_t usize_promoted = (alignment == 0) ? 1465a4bd5210SJason Evans s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1, 1466a4bd5210SJason Evans alignment); 1467a4bd5210SJason Evans assert(usize_promoted != 0); 146882872ac0SJason Evans p = iallocm(usize_promoted, alignment, zero, 146982872ac0SJason Evans try_tcache, arena); 1470a4bd5210SJason Evans if (p == NULL) 1471a4bd5210SJason Evans goto label_oom; 1472a4bd5210SJason Evans arena_prof_promoted(p, usize); 1473a4bd5210SJason Evans } else { 147482872ac0SJason Evans p = iallocm(usize, alignment, zero, try_tcache, arena); 1475a4bd5210SJason Evans if (p == NULL) 1476a4bd5210SJason Evans goto label_oom; 1477a4bd5210SJason Evans } 1478a4bd5210SJason Evans prof_malloc(p, usize, cnt); 1479a4bd5210SJason Evans } else { 148082872ac0SJason Evans p = iallocm(usize, alignment, zero, try_tcache, arena); 1481a4bd5210SJason Evans if (p == NULL) 1482a4bd5210SJason Evans goto label_oom; 1483a4bd5210SJason Evans } 1484a4bd5210SJason Evans if (rsize != NULL) 1485a4bd5210SJason Evans *rsize = usize; 1486a4bd5210SJason Evans 1487a4bd5210SJason Evans *ptr = p; 1488a4bd5210SJason Evans if (config_stats) { 1489a4bd5210SJason Evans assert(usize == isalloc(p, config_prof)); 1490a4bd5210SJason Evans thread_allocated_tsd_get()->allocated += usize; 1491a4bd5210SJason Evans } 1492a4bd5210SJason Evans UTRACE(0, size, p); 1493a4bd5210SJason Evans JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero); 1494a4bd5210SJason Evans return (ALLOCM_SUCCESS); 1495a4bd5210SJason Evans label_oom: 1496a4bd5210SJason Evans if (config_xmalloc && opt_xmalloc) { 1497a4bd5210SJason Evans malloc_write("<jemalloc>: Error in allocm(): " 1498a4bd5210SJason Evans "out of memory\n"); 1499a4bd5210SJason Evans abort(); 1500a4bd5210SJason Evans } 1501a4bd5210SJason Evans *ptr = NULL; 1502a4bd5210SJason Evans UTRACE(0, size, 0); 1503a4bd5210SJason Evans return (ALLOCM_ERR_OOM); 1504a4bd5210SJason Evans } 1505a4bd5210SJason Evans 1506a4bd5210SJason Evans int 1507a4bd5210SJason Evans je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) 1508a4bd5210SJason Evans { 1509a4bd5210SJason Evans void *p, *q; 1510a4bd5210SJason Evans size_t usize; 1511a4bd5210SJason Evans size_t old_size; 1512a4bd5210SJason Evans size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 1513a4bd5210SJason Evans size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) 1514a4bd5210SJason Evans & (SIZE_T_MAX-1)); 1515a4bd5210SJason Evans bool zero = flags & ALLOCM_ZERO; 1516a4bd5210SJason Evans bool no_move = flags & ALLOCM_NO_MOVE; 151782872ac0SJason Evans unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; 151882872ac0SJason Evans bool try_tcache_alloc, try_tcache_dalloc; 151982872ac0SJason Evans arena_t *arena; 1520a4bd5210SJason Evans 1521a4bd5210SJason Evans assert(ptr != NULL); 1522a4bd5210SJason Evans assert(*ptr != NULL); 1523a4bd5210SJason Evans assert(size != 0); 1524a4bd5210SJason Evans assert(SIZE_T_MAX - size >= extra); 1525a4bd5210SJason Evans assert(malloc_initialized || IS_INITIALIZER); 1526f8ca2db1SJason Evans malloc_thread_init(); 1527a4bd5210SJason Evans 152882872ac0SJason Evans if (arena_ind != UINT_MAX) { 152982872ac0SJason Evans arena_chunk_t *chunk; 153082872ac0SJason Evans try_tcache_alloc = true; 153182872ac0SJason Evans chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(*ptr); 153282872ac0SJason Evans try_tcache_dalloc = (chunk == *ptr || chunk->arena != 153382872ac0SJason Evans arenas[arena_ind]); 153482872ac0SJason Evans arena = arenas[arena_ind]; 153582872ac0SJason Evans } else { 153682872ac0SJason Evans try_tcache_alloc = true; 153782872ac0SJason Evans try_tcache_dalloc = true; 153882872ac0SJason Evans arena = NULL; 153982872ac0SJason Evans } 154082872ac0SJason Evans 1541a4bd5210SJason Evans p = *ptr; 1542a4bd5210SJason Evans if (config_prof && opt_prof) { 1543e722f8f8SJason Evans prof_thr_cnt_t *cnt; 1544e722f8f8SJason Evans 1545a4bd5210SJason Evans /* 1546a4bd5210SJason Evans * usize isn't knowable before iralloc() returns when extra is 1547a4bd5210SJason Evans * non-zero. Therefore, compute its maximum possible value and 1548a4bd5210SJason Evans * use that in PROF_ALLOC_PREP() to decide whether to capture a 1549a4bd5210SJason Evans * backtrace. prof_realloc() will use the actual usize to 1550a4bd5210SJason Evans * decide whether to sample. 1551a4bd5210SJason Evans */ 1552a4bd5210SJason Evans size_t max_usize = (alignment == 0) ? s2u(size+extra) : 1553a4bd5210SJason Evans sa2u(size+extra, alignment); 1554a4bd5210SJason Evans prof_ctx_t *old_ctx = prof_ctx_get(p); 1555a4bd5210SJason Evans old_size = isalloc(p, true); 1556a4bd5210SJason Evans if (config_valgrind && opt_valgrind) 1557a4bd5210SJason Evans old_rzsize = p2rz(p); 1558a4bd5210SJason Evans PROF_ALLOC_PREP(1, max_usize, cnt); 1559a4bd5210SJason Evans if (cnt == NULL) 1560a4bd5210SJason Evans goto label_oom; 1561a4bd5210SJason Evans /* 1562a4bd5210SJason Evans * Use minimum usize to determine whether promotion may happen. 1563a4bd5210SJason Evans */ 1564a4bd5210SJason Evans if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U 1565a4bd5210SJason Evans && ((alignment == 0) ? s2u(size) : sa2u(size, alignment)) 1566a4bd5210SJason Evans <= SMALL_MAXCLASS) { 156782872ac0SJason Evans q = irallocx(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >= 1568a4bd5210SJason Evans size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1), 156982872ac0SJason Evans alignment, zero, no_move, try_tcache_alloc, 157082872ac0SJason Evans try_tcache_dalloc, arena); 1571a4bd5210SJason Evans if (q == NULL) 1572a4bd5210SJason Evans goto label_err; 1573a4bd5210SJason Evans if (max_usize < PAGE) { 1574a4bd5210SJason Evans usize = max_usize; 1575a4bd5210SJason Evans arena_prof_promoted(q, usize); 1576a4bd5210SJason Evans } else 1577a4bd5210SJason Evans usize = isalloc(q, config_prof); 1578a4bd5210SJason Evans } else { 157982872ac0SJason Evans q = irallocx(p, size, extra, alignment, zero, no_move, 158082872ac0SJason Evans try_tcache_alloc, try_tcache_dalloc, arena); 1581a4bd5210SJason Evans if (q == NULL) 1582a4bd5210SJason Evans goto label_err; 1583a4bd5210SJason Evans usize = isalloc(q, config_prof); 1584a4bd5210SJason Evans } 1585a4bd5210SJason Evans prof_realloc(q, usize, cnt, old_size, old_ctx); 1586a4bd5210SJason Evans if (rsize != NULL) 1587a4bd5210SJason Evans *rsize = usize; 1588a4bd5210SJason Evans } else { 1589a4bd5210SJason Evans if (config_stats) { 1590a4bd5210SJason Evans old_size = isalloc(p, false); 1591a4bd5210SJason Evans if (config_valgrind && opt_valgrind) 1592a4bd5210SJason Evans old_rzsize = u2rz(old_size); 1593a4bd5210SJason Evans } else if (config_valgrind && opt_valgrind) { 1594a4bd5210SJason Evans old_size = isalloc(p, false); 1595a4bd5210SJason Evans old_rzsize = u2rz(old_size); 1596a4bd5210SJason Evans } 159782872ac0SJason Evans q = irallocx(p, size, extra, alignment, zero, no_move, 159882872ac0SJason Evans try_tcache_alloc, try_tcache_dalloc, arena); 1599a4bd5210SJason Evans if (q == NULL) 1600a4bd5210SJason Evans goto label_err; 1601a4bd5210SJason Evans if (config_stats) 1602a4bd5210SJason Evans usize = isalloc(q, config_prof); 1603a4bd5210SJason Evans if (rsize != NULL) { 1604a4bd5210SJason Evans if (config_stats == false) 1605a4bd5210SJason Evans usize = isalloc(q, config_prof); 1606a4bd5210SJason Evans *rsize = usize; 1607a4bd5210SJason Evans } 1608a4bd5210SJason Evans } 1609a4bd5210SJason Evans 1610a4bd5210SJason Evans *ptr = q; 1611a4bd5210SJason Evans if (config_stats) { 1612a4bd5210SJason Evans thread_allocated_t *ta; 1613a4bd5210SJason Evans ta = thread_allocated_tsd_get(); 1614a4bd5210SJason Evans ta->allocated += usize; 1615a4bd5210SJason Evans ta->deallocated += old_size; 1616a4bd5210SJason Evans } 1617a4bd5210SJason Evans UTRACE(p, size, q); 1618a4bd5210SJason Evans JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero); 1619a4bd5210SJason Evans return (ALLOCM_SUCCESS); 1620a4bd5210SJason Evans label_err: 1621a4bd5210SJason Evans if (no_move) { 1622a4bd5210SJason Evans UTRACE(p, size, q); 1623a4bd5210SJason Evans return (ALLOCM_ERR_NOT_MOVED); 1624a4bd5210SJason Evans } 1625a4bd5210SJason Evans label_oom: 1626a4bd5210SJason Evans if (config_xmalloc && opt_xmalloc) { 1627a4bd5210SJason Evans malloc_write("<jemalloc>: Error in rallocm(): " 1628a4bd5210SJason Evans "out of memory\n"); 1629a4bd5210SJason Evans abort(); 1630a4bd5210SJason Evans } 1631a4bd5210SJason Evans UTRACE(p, size, 0); 1632a4bd5210SJason Evans return (ALLOCM_ERR_OOM); 1633a4bd5210SJason Evans } 1634a4bd5210SJason Evans 1635a4bd5210SJason Evans int 1636a4bd5210SJason Evans je_sallocm(const void *ptr, size_t *rsize, int flags) 1637a4bd5210SJason Evans { 1638a4bd5210SJason Evans size_t sz; 1639a4bd5210SJason Evans 1640a4bd5210SJason Evans assert(malloc_initialized || IS_INITIALIZER); 1641f8ca2db1SJason Evans malloc_thread_init(); 1642a4bd5210SJason Evans 1643a4bd5210SJason Evans if (config_ivsalloc) 1644a4bd5210SJason Evans sz = ivsalloc(ptr, config_prof); 1645a4bd5210SJason Evans else { 1646a4bd5210SJason Evans assert(ptr != NULL); 1647a4bd5210SJason Evans sz = isalloc(ptr, config_prof); 1648a4bd5210SJason Evans } 1649a4bd5210SJason Evans assert(rsize != NULL); 1650a4bd5210SJason Evans *rsize = sz; 1651a4bd5210SJason Evans 1652a4bd5210SJason Evans return (ALLOCM_SUCCESS); 1653a4bd5210SJason Evans } 1654a4bd5210SJason Evans 1655a4bd5210SJason Evans int 1656a4bd5210SJason Evans je_dallocm(void *ptr, int flags) 1657a4bd5210SJason Evans { 1658a4bd5210SJason Evans size_t usize; 1659a4bd5210SJason Evans size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); 166082872ac0SJason Evans unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; 166182872ac0SJason Evans bool try_tcache; 1662a4bd5210SJason Evans 1663a4bd5210SJason Evans assert(ptr != NULL); 1664a4bd5210SJason Evans assert(malloc_initialized || IS_INITIALIZER); 1665a4bd5210SJason Evans 166682872ac0SJason Evans if (arena_ind != UINT_MAX) { 166782872ac0SJason Evans arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 166882872ac0SJason Evans try_tcache = (chunk == ptr || chunk->arena != 166982872ac0SJason Evans arenas[arena_ind]); 167082872ac0SJason Evans } else 167182872ac0SJason Evans try_tcache = true; 167282872ac0SJason Evans 1673a4bd5210SJason Evans UTRACE(ptr, 0, 0); 1674a4bd5210SJason Evans if (config_stats || config_valgrind) 1675a4bd5210SJason Evans usize = isalloc(ptr, config_prof); 1676a4bd5210SJason Evans if (config_prof && opt_prof) { 1677a4bd5210SJason Evans if (config_stats == false && config_valgrind == false) 1678a4bd5210SJason Evans usize = isalloc(ptr, config_prof); 1679a4bd5210SJason Evans prof_free(ptr, usize); 1680a4bd5210SJason Evans } 1681a4bd5210SJason Evans if (config_stats) 1682a4bd5210SJason Evans thread_allocated_tsd_get()->deallocated += usize; 1683a4bd5210SJason Evans if (config_valgrind && opt_valgrind) 1684a4bd5210SJason Evans rzsize = p2rz(ptr); 168582872ac0SJason Evans iqallocx(ptr, try_tcache); 1686a4bd5210SJason Evans JEMALLOC_VALGRIND_FREE(ptr, rzsize); 1687a4bd5210SJason Evans 1688a4bd5210SJason Evans return (ALLOCM_SUCCESS); 1689a4bd5210SJason Evans } 1690a4bd5210SJason Evans 1691a4bd5210SJason Evans int 1692a4bd5210SJason Evans je_nallocm(size_t *rsize, size_t size, int flags) 1693a4bd5210SJason Evans { 1694a4bd5210SJason Evans size_t usize; 1695a4bd5210SJason Evans size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) 1696a4bd5210SJason Evans & (SIZE_T_MAX-1)); 1697a4bd5210SJason Evans 1698a4bd5210SJason Evans assert(size != 0); 1699a4bd5210SJason Evans 1700a4bd5210SJason Evans if (malloc_init()) 1701a4bd5210SJason Evans return (ALLOCM_ERR_OOM); 1702a4bd5210SJason Evans 1703a4bd5210SJason Evans usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); 1704a4bd5210SJason Evans if (usize == 0) 1705a4bd5210SJason Evans return (ALLOCM_ERR_OOM); 1706a4bd5210SJason Evans 1707a4bd5210SJason Evans if (rsize != NULL) 1708a4bd5210SJason Evans *rsize = usize; 1709a4bd5210SJason Evans return (ALLOCM_SUCCESS); 1710a4bd5210SJason Evans } 1711a4bd5210SJason Evans 1712a4bd5210SJason Evans #endif 1713a4bd5210SJason Evans /* 1714a4bd5210SJason Evans * End experimental functions. 1715a4bd5210SJason Evans */ 1716a4bd5210SJason Evans /******************************************************************************/ 1717a4bd5210SJason Evans /* 1718a4bd5210SJason Evans * The following functions are used by threading libraries for protection of 1719a4bd5210SJason Evans * malloc during fork(). 1720a4bd5210SJason Evans */ 1721a4bd5210SJason Evans 172282872ac0SJason Evans /* 172382872ac0SJason Evans * If an application creates a thread before doing any allocation in the main 172482872ac0SJason Evans * thread, then calls fork(2) in the main thread followed by memory allocation 172582872ac0SJason Evans * in the child process, a race can occur that results in deadlock within the 172682872ac0SJason Evans * child: the main thread may have forked while the created thread had 172782872ac0SJason Evans * partially initialized the allocator. Ordinarily jemalloc prevents 172882872ac0SJason Evans * fork/malloc races via the following functions it registers during 172982872ac0SJason Evans * initialization using pthread_atfork(), but of course that does no good if 173082872ac0SJason Evans * the allocator isn't fully initialized at fork time. The following library 173182872ac0SJason Evans * constructor is a partial solution to this problem. It may still possible to 173282872ac0SJason Evans * trigger the deadlock described above, but doing so would involve forking via 173382872ac0SJason Evans * a library constructor that runs before jemalloc's runs. 173482872ac0SJason Evans */ 173582872ac0SJason Evans JEMALLOC_ATTR(constructor) 173682872ac0SJason Evans static void 173782872ac0SJason Evans jemalloc_constructor(void) 173882872ac0SJason Evans { 173982872ac0SJason Evans 174082872ac0SJason Evans malloc_init(); 174182872ac0SJason Evans } 174282872ac0SJason Evans 1743a4bd5210SJason Evans #ifndef JEMALLOC_MUTEX_INIT_CB 1744a4bd5210SJason Evans void 1745a4bd5210SJason Evans jemalloc_prefork(void) 1746a4bd5210SJason Evans #else 1747e722f8f8SJason Evans JEMALLOC_EXPORT void 1748a4bd5210SJason Evans _malloc_prefork(void) 1749a4bd5210SJason Evans #endif 1750a4bd5210SJason Evans { 1751a4bd5210SJason Evans unsigned i; 1752a4bd5210SJason Evans 175335dad073SJason Evans #ifdef JEMALLOC_MUTEX_INIT_CB 175435dad073SJason Evans if (malloc_initialized == false) 175535dad073SJason Evans return; 175635dad073SJason Evans #endif 175735dad073SJason Evans assert(malloc_initialized); 175835dad073SJason Evans 1759a4bd5210SJason Evans /* Acquire all mutexes in a safe order. */ 176082872ac0SJason Evans ctl_prefork(); 1761f8ca2db1SJason Evans prof_prefork(); 1762a4bd5210SJason Evans malloc_mutex_prefork(&arenas_lock); 176382872ac0SJason Evans for (i = 0; i < narenas_total; i++) { 1764a4bd5210SJason Evans if (arenas[i] != NULL) 1765a4bd5210SJason Evans arena_prefork(arenas[i]); 1766a4bd5210SJason Evans } 176782872ac0SJason Evans chunk_prefork(); 1768a4bd5210SJason Evans base_prefork(); 1769a4bd5210SJason Evans huge_prefork(); 1770a4bd5210SJason Evans } 1771a4bd5210SJason Evans 1772a4bd5210SJason Evans #ifndef JEMALLOC_MUTEX_INIT_CB 1773a4bd5210SJason Evans void 1774a4bd5210SJason Evans jemalloc_postfork_parent(void) 1775a4bd5210SJason Evans #else 1776e722f8f8SJason Evans JEMALLOC_EXPORT void 1777a4bd5210SJason Evans _malloc_postfork(void) 1778a4bd5210SJason Evans #endif 1779a4bd5210SJason Evans { 1780a4bd5210SJason Evans unsigned i; 1781a4bd5210SJason Evans 178235dad073SJason Evans #ifdef JEMALLOC_MUTEX_INIT_CB 178335dad073SJason Evans if (malloc_initialized == false) 178435dad073SJason Evans return; 178535dad073SJason Evans #endif 178635dad073SJason Evans assert(malloc_initialized); 178735dad073SJason Evans 1788a4bd5210SJason Evans /* Release all mutexes, now that fork() has completed. */ 1789a4bd5210SJason Evans huge_postfork_parent(); 1790a4bd5210SJason Evans base_postfork_parent(); 179182872ac0SJason Evans chunk_postfork_parent(); 179282872ac0SJason Evans for (i = 0; i < narenas_total; i++) { 1793a4bd5210SJason Evans if (arenas[i] != NULL) 1794a4bd5210SJason Evans arena_postfork_parent(arenas[i]); 1795a4bd5210SJason Evans } 1796a4bd5210SJason Evans malloc_mutex_postfork_parent(&arenas_lock); 1797f8ca2db1SJason Evans prof_postfork_parent(); 179882872ac0SJason Evans ctl_postfork_parent(); 1799a4bd5210SJason Evans } 1800a4bd5210SJason Evans 1801a4bd5210SJason Evans void 1802a4bd5210SJason Evans jemalloc_postfork_child(void) 1803a4bd5210SJason Evans { 1804a4bd5210SJason Evans unsigned i; 1805a4bd5210SJason Evans 180635dad073SJason Evans assert(malloc_initialized); 180735dad073SJason Evans 1808a4bd5210SJason Evans /* Release all mutexes, now that fork() has completed. */ 1809a4bd5210SJason Evans huge_postfork_child(); 1810a4bd5210SJason Evans base_postfork_child(); 181182872ac0SJason Evans chunk_postfork_child(); 181282872ac0SJason Evans for (i = 0; i < narenas_total; i++) { 1813a4bd5210SJason Evans if (arenas[i] != NULL) 1814a4bd5210SJason Evans arena_postfork_child(arenas[i]); 1815a4bd5210SJason Evans } 1816a4bd5210SJason Evans malloc_mutex_postfork_child(&arenas_lock); 1817f8ca2db1SJason Evans prof_postfork_child(); 181882872ac0SJason Evans ctl_postfork_child(); 1819a4bd5210SJason Evans } 1820a4bd5210SJason Evans 1821a4bd5210SJason Evans /******************************************************************************/ 1822a4bd5210SJason Evans /* 1823a4bd5210SJason Evans * The following functions are used for TLS allocation/deallocation in static 1824a4bd5210SJason Evans * binaries on FreeBSD. The primary difference between these and i[mcd]alloc() 1825a4bd5210SJason Evans * is that these avoid accessing TLS variables. 1826a4bd5210SJason Evans */ 1827a4bd5210SJason Evans 1828a4bd5210SJason Evans static void * 1829a4bd5210SJason Evans a0alloc(size_t size, bool zero) 1830a4bd5210SJason Evans { 1831a4bd5210SJason Evans 1832a4bd5210SJason Evans if (malloc_init()) 1833a4bd5210SJason Evans return (NULL); 1834a4bd5210SJason Evans 1835a4bd5210SJason Evans if (size == 0) 1836a4bd5210SJason Evans size = 1; 1837a4bd5210SJason Evans 1838a4bd5210SJason Evans if (size <= arena_maxclass) 1839a4bd5210SJason Evans return (arena_malloc(arenas[0], size, zero, false)); 1840a4bd5210SJason Evans else 1841a4bd5210SJason Evans return (huge_malloc(size, zero)); 1842a4bd5210SJason Evans } 1843a4bd5210SJason Evans 1844a4bd5210SJason Evans void * 1845a4bd5210SJason Evans a0malloc(size_t size) 1846a4bd5210SJason Evans { 1847a4bd5210SJason Evans 1848a4bd5210SJason Evans return (a0alloc(size, false)); 1849a4bd5210SJason Evans } 1850a4bd5210SJason Evans 1851a4bd5210SJason Evans void * 1852a4bd5210SJason Evans a0calloc(size_t num, size_t size) 1853a4bd5210SJason Evans { 1854a4bd5210SJason Evans 1855a4bd5210SJason Evans return (a0alloc(num * size, true)); 1856a4bd5210SJason Evans } 1857a4bd5210SJason Evans 1858a4bd5210SJason Evans void 1859a4bd5210SJason Evans a0free(void *ptr) 1860a4bd5210SJason Evans { 1861a4bd5210SJason Evans arena_chunk_t *chunk; 1862a4bd5210SJason Evans 1863a4bd5210SJason Evans if (ptr == NULL) 1864a4bd5210SJason Evans return; 1865a4bd5210SJason Evans 1866a4bd5210SJason Evans chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 1867a4bd5210SJason Evans if (chunk != ptr) 1868a4bd5210SJason Evans arena_dalloc(chunk->arena, chunk, ptr, false); 1869a4bd5210SJason Evans else 1870a4bd5210SJason Evans huge_dalloc(ptr, true); 1871a4bd5210SJason Evans } 1872a4bd5210SJason Evans 1873a4bd5210SJason Evans /******************************************************************************/ 1874