1 #define JEMALLOC_C_ 2 #include "jemalloc/internal/jemalloc_internal.h" 3 4 /******************************************************************************/ 5 /* Data. */ 6 7 /* Work around <http://llvm.org/bugs/show_bug.cgi?id=12623>: */ 8 const char *__malloc_options_1_0 = NULL; 9 __sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0); 10 11 /* Runtime configuration options. */ 12 const char *je_malloc_conf JEMALLOC_ATTR(weak); 13 bool opt_abort = 14 #ifdef JEMALLOC_DEBUG 15 true 16 #else 17 false 18 #endif 19 ; 20 const char *opt_junk = 21 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 22 "true" 23 #else 24 "false" 25 #endif 26 ; 27 bool opt_junk_alloc = 28 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 29 true 30 #else 31 false 32 #endif 33 ; 34 bool opt_junk_free = 35 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 36 true 37 #else 38 false 39 #endif 40 ; 41 42 size_t opt_quarantine = ZU(0); 43 bool opt_redzone = false; 44 bool opt_utrace = false; 45 bool opt_xmalloc = false; 46 bool opt_zero = false; 47 unsigned opt_narenas = 0; 48 49 /* Initialized to true if the process is running inside Valgrind. */ 50 bool in_valgrind; 51 52 unsigned ncpus; 53 54 /* Protects arenas initialization. */ 55 static malloc_mutex_t arenas_lock; 56 /* 57 * Arenas that are used to service external requests. Not all elements of the 58 * arenas array are necessarily used; arenas are created lazily as needed. 59 * 60 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and 61 * arenas. arenas[narenas_auto..narenas_total) are only used if the application 62 * takes some action to create them and allocate from them. 63 */ 64 arena_t **arenas; 65 static unsigned narenas_total; /* Use narenas_total_*(). */ 66 static arena_t *a0; /* arenas[0]; read-only after initialization. */ 67 static unsigned narenas_auto; /* Read-only after initialization. */ 68 69 typedef enum { 70 malloc_init_uninitialized = 3, 71 malloc_init_a0_initialized = 2, 72 malloc_init_recursible = 1, 73 malloc_init_initialized = 0 /* Common case --> jnz. */ 74 } malloc_init_t; 75 static malloc_init_t malloc_init_state = malloc_init_uninitialized; 76 77 /* 0 should be the common case. Set to true to trigger initialization. */ 78 static bool malloc_slow = true; 79 80 /* When malloc_slow != 0, set the corresponding bits for sanity check. */ 81 enum { 82 flag_opt_junk_alloc = (1U), 83 flag_opt_junk_free = (1U << 1), 84 flag_opt_quarantine = (1U << 2), 85 flag_opt_zero = (1U << 3), 86 flag_opt_utrace = (1U << 4), 87 flag_in_valgrind = (1U << 5), 88 flag_opt_xmalloc = (1U << 6) 89 }; 90 static uint8_t malloc_slow_flags; 91 92 /* Last entry for overflow detection only. */ 93 JEMALLOC_ALIGNED(CACHELINE) 94 const size_t index2size_tab[NSIZES+1] = { 95 #define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ 96 ((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)), 97 SIZE_CLASSES 98 #undef SC 99 ZU(0) 100 }; 101 102 JEMALLOC_ALIGNED(CACHELINE) 103 const uint8_t size2index_tab[] = { 104 #if LG_TINY_MIN == 0 105 #warning "Dangerous LG_TINY_MIN" 106 #define S2B_0(i) i, 107 #elif LG_TINY_MIN == 1 108 #warning "Dangerous LG_TINY_MIN" 109 #define S2B_1(i) i, 110 #elif LG_TINY_MIN == 2 111 #warning "Dangerous LG_TINY_MIN" 112 #define S2B_2(i) i, 113 #elif LG_TINY_MIN == 3 114 #define S2B_3(i) i, 115 #elif LG_TINY_MIN == 4 116 #define S2B_4(i) i, 117 #elif LG_TINY_MIN == 5 118 #define S2B_5(i) i, 119 #elif LG_TINY_MIN == 6 120 #define S2B_6(i) i, 121 #elif LG_TINY_MIN == 7 122 #define S2B_7(i) i, 123 #elif LG_TINY_MIN == 8 124 #define S2B_8(i) i, 125 #elif LG_TINY_MIN == 9 126 #define S2B_9(i) i, 127 #elif LG_TINY_MIN == 10 128 #define S2B_10(i) i, 129 #elif LG_TINY_MIN == 11 130 #define S2B_11(i) i, 131 #else 132 #error "Unsupported LG_TINY_MIN" 133 #endif 134 #if LG_TINY_MIN < 1 135 #define S2B_1(i) S2B_0(i) S2B_0(i) 136 #endif 137 #if LG_TINY_MIN < 2 138 #define S2B_2(i) S2B_1(i) S2B_1(i) 139 #endif 140 #if LG_TINY_MIN < 3 141 #define S2B_3(i) S2B_2(i) S2B_2(i) 142 #endif 143 #if LG_TINY_MIN < 4 144 #define S2B_4(i) S2B_3(i) S2B_3(i) 145 #endif 146 #if LG_TINY_MIN < 5 147 #define S2B_5(i) S2B_4(i) S2B_4(i) 148 #endif 149 #if LG_TINY_MIN < 6 150 #define S2B_6(i) S2B_5(i) S2B_5(i) 151 #endif 152 #if LG_TINY_MIN < 7 153 #define S2B_7(i) S2B_6(i) S2B_6(i) 154 #endif 155 #if LG_TINY_MIN < 8 156 #define S2B_8(i) S2B_7(i) S2B_7(i) 157 #endif 158 #if LG_TINY_MIN < 9 159 #define S2B_9(i) S2B_8(i) S2B_8(i) 160 #endif 161 #if LG_TINY_MIN < 10 162 #define S2B_10(i) S2B_9(i) S2B_9(i) 163 #endif 164 #if LG_TINY_MIN < 11 165 #define S2B_11(i) S2B_10(i) S2B_10(i) 166 #endif 167 #define S2B_no(i) 168 #define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ 169 S2B_##lg_delta_lookup(index) 170 SIZE_CLASSES 171 #undef S2B_3 172 #undef S2B_4 173 #undef S2B_5 174 #undef S2B_6 175 #undef S2B_7 176 #undef S2B_8 177 #undef S2B_9 178 #undef S2B_10 179 #undef S2B_11 180 #undef S2B_no 181 #undef SC 182 }; 183 184 #ifdef JEMALLOC_THREADED_INIT 185 /* Used to let the initializing thread recursively allocate. */ 186 # define NO_INITIALIZER ((unsigned long)0) 187 # define INITIALIZER pthread_self() 188 # define IS_INITIALIZER (malloc_initializer == pthread_self()) 189 static pthread_t malloc_initializer = NO_INITIALIZER; 190 #else 191 # define NO_INITIALIZER false 192 # define INITIALIZER true 193 # define IS_INITIALIZER malloc_initializer 194 static bool malloc_initializer = NO_INITIALIZER; 195 #endif 196 197 /* Used to avoid initialization races. */ 198 #ifdef _WIN32 199 #if _WIN32_WINNT >= 0x0600 200 static malloc_mutex_t init_lock = SRWLOCK_INIT; 201 #else 202 static malloc_mutex_t init_lock; 203 static bool init_lock_initialized = false; 204 205 JEMALLOC_ATTR(constructor) 206 static void WINAPI 207 _init_init_lock(void) 208 { 209 210 /* If another constructor in the same binary is using mallctl to 211 * e.g. setup chunk hooks, it may end up running before this one, 212 * and malloc_init_hard will crash trying to lock the uninitialized 213 * lock. So we force an initialization of the lock in 214 * malloc_init_hard as well. We don't try to care about atomicity 215 * of the accessed to the init_lock_initialized boolean, since it 216 * really only matters early in the process creation, before any 217 * separate thread normally starts doing anything. */ 218 if (!init_lock_initialized) 219 malloc_mutex_init(&init_lock); 220 init_lock_initialized = true; 221 } 222 223 #ifdef _MSC_VER 224 # pragma section(".CRT$XCU", read) 225 JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) 226 static const void (WINAPI *init_init_lock)(void) = _init_init_lock; 227 #endif 228 #endif 229 #else 230 static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; 231 #endif 232 233 typedef struct { 234 void *p; /* Input pointer (as in realloc(p, s)). */ 235 size_t s; /* Request size. */ 236 void *r; /* Result pointer. */ 237 } malloc_utrace_t; 238 239 #ifdef JEMALLOC_UTRACE 240 # define UTRACE(a, b, c) do { \ 241 if (unlikely(opt_utrace)) { \ 242 int utrace_serrno = errno; \ 243 malloc_utrace_t ut; \ 244 ut.p = (a); \ 245 ut.s = (b); \ 246 ut.r = (c); \ 247 utrace(&ut, sizeof(ut)); \ 248 errno = utrace_serrno; \ 249 } \ 250 } while (0) 251 #else 252 # define UTRACE(a, b, c) 253 #endif 254 255 /******************************************************************************/ 256 /* 257 * Function prototypes for static functions that are referenced prior to 258 * definition. 259 */ 260 261 static bool malloc_init_hard_a0(void); 262 static bool malloc_init_hard(void); 263 264 /******************************************************************************/ 265 /* 266 * Begin miscellaneous support functions. 267 */ 268 269 JEMALLOC_ALWAYS_INLINE_C bool 270 malloc_initialized(void) 271 { 272 273 return (malloc_init_state == malloc_init_initialized); 274 } 275 276 JEMALLOC_ALWAYS_INLINE_C void 277 malloc_thread_init(void) 278 { 279 280 /* 281 * TSD initialization can't be safely done as a side effect of 282 * deallocation, because it is possible for a thread to do nothing but 283 * deallocate its TLS data via free(), in which case writing to TLS 284 * would cause write-after-free memory corruption. The quarantine 285 * facility *only* gets used as a side effect of deallocation, so make 286 * a best effort attempt at initializing its TSD by hooking all 287 * allocation events. 288 */ 289 if (config_fill && unlikely(opt_quarantine)) 290 quarantine_alloc_hook(); 291 } 292 293 JEMALLOC_ALWAYS_INLINE_C bool 294 malloc_init_a0(void) 295 { 296 297 if (unlikely(malloc_init_state == malloc_init_uninitialized)) 298 return (malloc_init_hard_a0()); 299 return (false); 300 } 301 302 JEMALLOC_ALWAYS_INLINE_C bool 303 malloc_init(void) 304 { 305 306 if (unlikely(!malloc_initialized()) && malloc_init_hard()) 307 return (true); 308 malloc_thread_init(); 309 310 return (false); 311 } 312 313 /* 314 * The a0*() functions are used instead of i[mcd]alloc() in situations that 315 * cannot tolerate TLS variable access. 316 */ 317 318 static void * 319 a0ialloc(size_t size, bool zero, bool is_metadata) 320 { 321 322 if (unlikely(malloc_init_a0())) 323 return (NULL); 324 325 return (iallocztm(NULL, size, size2index(size), zero, false, 326 is_metadata, arena_get(0, false), true)); 327 } 328 329 static void 330 a0idalloc(void *ptr, bool is_metadata) 331 { 332 333 idalloctm(NULL, ptr, false, is_metadata, true); 334 } 335 336 void * 337 a0malloc(size_t size) 338 { 339 340 return (a0ialloc(size, false, true)); 341 } 342 343 void 344 a0dalloc(void *ptr) 345 { 346 347 a0idalloc(ptr, true); 348 } 349 350 /* 351 * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive 352 * situations that cannot tolerate TLS variable access (TLS allocation and very 353 * early internal data structure initialization). 354 */ 355 356 void * 357 bootstrap_malloc(size_t size) 358 { 359 360 if (unlikely(size == 0)) 361 size = 1; 362 363 return (a0ialloc(size, false, false)); 364 } 365 366 void * 367 bootstrap_calloc(size_t num, size_t size) 368 { 369 size_t num_size; 370 371 num_size = num * size; 372 if (unlikely(num_size == 0)) { 373 assert(num == 0 || size == 0); 374 num_size = 1; 375 } 376 377 return (a0ialloc(num_size, true, false)); 378 } 379 380 void 381 bootstrap_free(void *ptr) 382 { 383 384 if (unlikely(ptr == NULL)) 385 return; 386 387 a0idalloc(ptr, false); 388 } 389 390 static void 391 arena_set(unsigned ind, arena_t *arena) 392 { 393 394 atomic_write_p((void **)&arenas[ind], arena); 395 } 396 397 static void 398 narenas_total_set(unsigned narenas) 399 { 400 401 atomic_write_u(&narenas_total, narenas); 402 } 403 404 static void 405 narenas_total_inc(void) 406 { 407 408 atomic_add_u(&narenas_total, 1); 409 } 410 411 unsigned 412 narenas_total_get(void) 413 { 414 415 return (atomic_read_u(&narenas_total)); 416 } 417 418 /* Create a new arena and insert it into the arenas array at index ind. */ 419 static arena_t * 420 arena_init_locked(unsigned ind) 421 { 422 arena_t *arena; 423 424 assert(ind <= narenas_total_get()); 425 if (ind > MALLOCX_ARENA_MAX) 426 return (NULL); 427 if (ind == narenas_total_get()) 428 narenas_total_inc(); 429 430 /* 431 * Another thread may have already initialized arenas[ind] if it's an 432 * auto arena. 433 */ 434 arena = arena_get(ind, false); 435 if (arena != NULL) { 436 assert(ind < narenas_auto); 437 return (arena); 438 } 439 440 /* Actually initialize the arena. */ 441 arena = arena_new(ind); 442 arena_set(ind, arena); 443 return (arena); 444 } 445 446 arena_t * 447 arena_init(unsigned ind) 448 { 449 arena_t *arena; 450 451 malloc_mutex_lock(&arenas_lock); 452 arena = arena_init_locked(ind); 453 malloc_mutex_unlock(&arenas_lock); 454 return (arena); 455 } 456 457 static void 458 arena_bind(tsd_t *tsd, unsigned ind) 459 { 460 arena_t *arena; 461 462 arena = arena_get(ind, false); 463 arena_nthreads_inc(arena); 464 465 if (tsd_nominal(tsd)) 466 tsd_arena_set(tsd, arena); 467 } 468 469 void 470 arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) 471 { 472 arena_t *oldarena, *newarena; 473 474 oldarena = arena_get(oldind, false); 475 newarena = arena_get(newind, false); 476 arena_nthreads_dec(oldarena); 477 arena_nthreads_inc(newarena); 478 tsd_arena_set(tsd, newarena); 479 } 480 481 static void 482 arena_unbind(tsd_t *tsd, unsigned ind) 483 { 484 arena_t *arena; 485 486 arena = arena_get(ind, false); 487 arena_nthreads_dec(arena); 488 tsd_arena_set(tsd, NULL); 489 } 490 491 arena_tdata_t * 492 arena_tdata_get_hard(tsd_t *tsd, unsigned ind) 493 { 494 arena_tdata_t *tdata, *arenas_tdata_old; 495 arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); 496 unsigned narenas_tdata_old, i; 497 unsigned narenas_tdata = tsd_narenas_tdata_get(tsd); 498 unsigned narenas_actual = narenas_total_get(); 499 500 /* 501 * Dissociate old tdata array (and set up for deallocation upon return) 502 * if it's too small. 503 */ 504 if (arenas_tdata != NULL && narenas_tdata < narenas_actual) { 505 arenas_tdata_old = arenas_tdata; 506 narenas_tdata_old = narenas_tdata; 507 arenas_tdata = NULL; 508 narenas_tdata = 0; 509 tsd_arenas_tdata_set(tsd, arenas_tdata); 510 tsd_narenas_tdata_set(tsd, narenas_tdata); 511 } else { 512 arenas_tdata_old = NULL; 513 narenas_tdata_old = 0; 514 } 515 516 /* Allocate tdata array if it's missing. */ 517 if (arenas_tdata == NULL) { 518 bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd); 519 narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1; 520 521 if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) { 522 *arenas_tdata_bypassp = true; 523 arenas_tdata = (arena_tdata_t *)a0malloc( 524 sizeof(arena_tdata_t) * narenas_tdata); 525 *arenas_tdata_bypassp = false; 526 } 527 if (arenas_tdata == NULL) { 528 tdata = NULL; 529 goto label_return; 530 } 531 assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp); 532 tsd_arenas_tdata_set(tsd, arenas_tdata); 533 tsd_narenas_tdata_set(tsd, narenas_tdata); 534 } 535 536 /* 537 * Copy to tdata array. It's possible that the actual number of arenas 538 * has increased since narenas_total_get() was called above, but that 539 * causes no correctness issues unless two threads concurrently execute 540 * the arenas.extend mallctl, which we trust mallctl synchronization to 541 * prevent. 542 */ 543 544 /* Copy/initialize tickers. */ 545 for (i = 0; i < narenas_actual; i++) { 546 if (i < narenas_tdata_old) { 547 ticker_copy(&arenas_tdata[i].decay_ticker, 548 &arenas_tdata_old[i].decay_ticker); 549 } else { 550 ticker_init(&arenas_tdata[i].decay_ticker, 551 DECAY_NTICKS_PER_UPDATE); 552 } 553 } 554 if (narenas_tdata > narenas_actual) { 555 memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t) 556 * (narenas_tdata - narenas_actual)); 557 } 558 559 /* Read the refreshed tdata array. */ 560 tdata = &arenas_tdata[ind]; 561 label_return: 562 if (arenas_tdata_old != NULL) 563 a0dalloc(arenas_tdata_old); 564 return (tdata); 565 } 566 567 /* Slow path, called only by arena_choose(). */ 568 arena_t * 569 arena_choose_hard(tsd_t *tsd) 570 { 571 arena_t *ret; 572 573 if (narenas_auto > 1) { 574 unsigned i, choose, first_null; 575 576 choose = 0; 577 first_null = narenas_auto; 578 malloc_mutex_lock(&arenas_lock); 579 assert(arena_get(0, false) != NULL); 580 for (i = 1; i < narenas_auto; i++) { 581 if (arena_get(i, false) != NULL) { 582 /* 583 * Choose the first arena that has the lowest 584 * number of threads assigned to it. 585 */ 586 if (arena_nthreads_get(arena_get(i, false)) < 587 arena_nthreads_get(arena_get(choose, 588 false))) 589 choose = i; 590 } else if (first_null == narenas_auto) { 591 /* 592 * Record the index of the first uninitialized 593 * arena, in case all extant arenas are in use. 594 * 595 * NB: It is possible for there to be 596 * discontinuities in terms of initialized 597 * versus uninitialized arenas, due to the 598 * "thread.arena" mallctl. 599 */ 600 first_null = i; 601 } 602 } 603 604 if (arena_nthreads_get(arena_get(choose, false)) == 0 605 || first_null == narenas_auto) { 606 /* 607 * Use an unloaded arena, or the least loaded arena if 608 * all arenas are already initialized. 609 */ 610 ret = arena_get(choose, false); 611 } else { 612 /* Initialize a new arena. */ 613 choose = first_null; 614 ret = arena_init_locked(choose); 615 if (ret == NULL) { 616 malloc_mutex_unlock(&arenas_lock); 617 return (NULL); 618 } 619 } 620 arena_bind(tsd, choose); 621 malloc_mutex_unlock(&arenas_lock); 622 } else { 623 ret = arena_get(0, false); 624 arena_bind(tsd, 0); 625 } 626 627 return (ret); 628 } 629 630 void 631 thread_allocated_cleanup(tsd_t *tsd) 632 { 633 634 /* Do nothing. */ 635 } 636 637 void 638 thread_deallocated_cleanup(tsd_t *tsd) 639 { 640 641 /* Do nothing. */ 642 } 643 644 void 645 arena_cleanup(tsd_t *tsd) 646 { 647 arena_t *arena; 648 649 arena = tsd_arena_get(tsd); 650 if (arena != NULL) 651 arena_unbind(tsd, arena->ind); 652 } 653 654 void 655 arenas_tdata_cleanup(tsd_t *tsd) 656 { 657 arena_tdata_t *arenas_tdata; 658 659 /* Prevent tsd->arenas_tdata from being (re)created. */ 660 *tsd_arenas_tdata_bypassp_get(tsd) = true; 661 662 arenas_tdata = tsd_arenas_tdata_get(tsd); 663 if (arenas_tdata != NULL) { 664 tsd_arenas_tdata_set(tsd, NULL); 665 a0dalloc(arenas_tdata); 666 } 667 } 668 669 void 670 narenas_tdata_cleanup(tsd_t *tsd) 671 { 672 673 /* Do nothing. */ 674 } 675 676 void 677 arenas_tdata_bypass_cleanup(tsd_t *tsd) 678 { 679 680 /* Do nothing. */ 681 } 682 683 static void 684 stats_print_atexit(void) 685 { 686 687 if (config_tcache && config_stats) { 688 unsigned narenas, i; 689 690 /* 691 * Merge stats from extant threads. This is racy, since 692 * individual threads do not lock when recording tcache stats 693 * events. As a consequence, the final stats may be slightly 694 * out of date by the time they are reported, if other threads 695 * continue to allocate. 696 */ 697 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 698 arena_t *arena = arena_get(i, false); 699 if (arena != NULL) { 700 tcache_t *tcache; 701 702 /* 703 * tcache_stats_merge() locks bins, so if any 704 * code is introduced that acquires both arena 705 * and bin locks in the opposite order, 706 * deadlocks may result. 707 */ 708 malloc_mutex_lock(&arena->lock); 709 ql_foreach(tcache, &arena->tcache_ql, link) { 710 tcache_stats_merge(tcache, arena); 711 } 712 malloc_mutex_unlock(&arena->lock); 713 } 714 } 715 } 716 je_malloc_stats_print(NULL, NULL, NULL); 717 } 718 719 /* 720 * End miscellaneous support functions. 721 */ 722 /******************************************************************************/ 723 /* 724 * Begin initialization functions. 725 */ 726 727 #ifndef JEMALLOC_HAVE_SECURE_GETENV 728 static char * 729 secure_getenv(const char *name) 730 { 731 732 # ifdef JEMALLOC_HAVE_ISSETUGID 733 if (issetugid() != 0) 734 return (NULL); 735 # endif 736 return (getenv(name)); 737 } 738 #endif 739 740 static unsigned 741 malloc_ncpus(void) 742 { 743 long result; 744 745 #ifdef _WIN32 746 SYSTEM_INFO si; 747 GetSystemInfo(&si); 748 result = si.dwNumberOfProcessors; 749 #else 750 result = sysconf(_SC_NPROCESSORS_ONLN); 751 #endif 752 return ((result == -1) ? 1 : (unsigned)result); 753 } 754 755 static bool 756 malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, 757 char const **v_p, size_t *vlen_p) 758 { 759 bool accept; 760 const char *opts = *opts_p; 761 762 *k_p = opts; 763 764 for (accept = false; !accept;) { 765 switch (*opts) { 766 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': 767 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': 768 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': 769 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': 770 case 'Y': case 'Z': 771 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': 772 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': 773 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': 774 case 's': case 't': case 'u': case 'v': case 'w': case 'x': 775 case 'y': case 'z': 776 case '0': case '1': case '2': case '3': case '4': case '5': 777 case '6': case '7': case '8': case '9': 778 case '_': 779 opts++; 780 break; 781 case ':': 782 opts++; 783 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; 784 *v_p = opts; 785 accept = true; 786 break; 787 case '\0': 788 if (opts != *opts_p) { 789 malloc_write("<jemalloc>: Conf string ends " 790 "with key\n"); 791 } 792 return (true); 793 default: 794 malloc_write("<jemalloc>: Malformed conf string\n"); 795 return (true); 796 } 797 } 798 799 for (accept = false; !accept;) { 800 switch (*opts) { 801 case ',': 802 opts++; 803 /* 804 * Look ahead one character here, because the next time 805 * this function is called, it will assume that end of 806 * input has been cleanly reached if no input remains, 807 * but we have optimistically already consumed the 808 * comma if one exists. 809 */ 810 if (*opts == '\0') { 811 malloc_write("<jemalloc>: Conf string ends " 812 "with comma\n"); 813 } 814 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; 815 accept = true; 816 break; 817 case '\0': 818 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; 819 accept = true; 820 break; 821 default: 822 opts++; 823 break; 824 } 825 } 826 827 *opts_p = opts; 828 return (false); 829 } 830 831 static void 832 malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, 833 size_t vlen) 834 { 835 836 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k, 837 (int)vlen, v); 838 } 839 840 static void 841 malloc_slow_flag_init(void) 842 { 843 /* 844 * Combine the runtime options into malloc_slow for fast path. Called 845 * after processing all the options. 846 */ 847 malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0) 848 | (opt_junk_free ? flag_opt_junk_free : 0) 849 | (opt_quarantine ? flag_opt_quarantine : 0) 850 | (opt_zero ? flag_opt_zero : 0) 851 | (opt_utrace ? flag_opt_utrace : 0) 852 | (opt_xmalloc ? flag_opt_xmalloc : 0); 853 854 if (config_valgrind) 855 malloc_slow_flags |= (in_valgrind ? flag_in_valgrind : 0); 856 857 malloc_slow = (malloc_slow_flags != 0); 858 } 859 860 static void 861 malloc_conf_init(void) 862 { 863 unsigned i; 864 char buf[PATH_MAX + 1]; 865 const char *opts, *k, *v; 866 size_t klen, vlen; 867 868 /* 869 * Automatically configure valgrind before processing options. The 870 * valgrind option remains in jemalloc 3.x for compatibility reasons. 871 */ 872 if (config_valgrind) { 873 in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false; 874 if (config_fill && unlikely(in_valgrind)) { 875 opt_junk = "false"; 876 opt_junk_alloc = false; 877 opt_junk_free = false; 878 assert(!opt_zero); 879 opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT; 880 opt_redzone = true; 881 } 882 if (config_tcache && unlikely(in_valgrind)) 883 opt_tcache = false; 884 } 885 886 for (i = 0; i < 4; i++) { 887 /* Get runtime configuration. */ 888 switch (i) { 889 case 0: 890 opts = config_malloc_conf; 891 break; 892 case 1: 893 if (je_malloc_conf != NULL) { 894 /* 895 * Use options that were compiled into the 896 * program. 897 */ 898 opts = je_malloc_conf; 899 } else { 900 /* No configuration specified. */ 901 buf[0] = '\0'; 902 opts = buf; 903 } 904 break; 905 case 2: { 906 ssize_t linklen = 0; 907 #ifndef _WIN32 908 int saved_errno = errno; 909 const char *linkname = 910 # ifdef JEMALLOC_PREFIX 911 "/etc/"JEMALLOC_PREFIX"malloc.conf" 912 # else 913 "/etc/malloc.conf" 914 # endif 915 ; 916 917 /* 918 * Try to use the contents of the "/etc/malloc.conf" 919 * symbolic link's name. 920 */ 921 linklen = readlink(linkname, buf, sizeof(buf) - 1); 922 if (linklen == -1) { 923 /* No configuration specified. */ 924 linklen = 0; 925 /* Restore errno. */ 926 set_errno(saved_errno); 927 } 928 #endif 929 buf[linklen] = '\0'; 930 opts = buf; 931 break; 932 } case 3: { 933 const char *envname = 934 #ifdef JEMALLOC_PREFIX 935 JEMALLOC_CPREFIX"MALLOC_CONF" 936 #else 937 "MALLOC_CONF" 938 #endif 939 ; 940 941 if ((opts = secure_getenv(envname)) != NULL) { 942 /* 943 * Do nothing; opts is already initialized to 944 * the value of the MALLOC_CONF environment 945 * variable. 946 */ 947 } else { 948 /* No configuration specified. */ 949 buf[0] = '\0'; 950 opts = buf; 951 } 952 break; 953 } default: 954 not_reached(); 955 buf[0] = '\0'; 956 opts = buf; 957 } 958 959 while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v, 960 &vlen)) { 961 #define CONF_MATCH(n) \ 962 (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0) 963 #define CONF_MATCH_VALUE(n) \ 964 (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0) 965 #define CONF_HANDLE_BOOL(o, n, cont) \ 966 if (CONF_MATCH(n)) { \ 967 if (CONF_MATCH_VALUE("true")) \ 968 o = true; \ 969 else if (CONF_MATCH_VALUE("false")) \ 970 o = false; \ 971 else { \ 972 malloc_conf_error( \ 973 "Invalid conf value", \ 974 k, klen, v, vlen); \ 975 } \ 976 if (cont) \ 977 continue; \ 978 } 979 #define CONF_HANDLE_T_U(t, o, n, min, max, clip) \ 980 if (CONF_MATCH(n)) { \ 981 uintmax_t um; \ 982 char *end; \ 983 \ 984 set_errno(0); \ 985 um = malloc_strtoumax(v, &end, 0); \ 986 if (get_errno() != 0 || (uintptr_t)end -\ 987 (uintptr_t)v != vlen) { \ 988 malloc_conf_error( \ 989 "Invalid conf value", \ 990 k, klen, v, vlen); \ 991 } else if (clip) { \ 992 if ((min) != 0 && um < (min)) \ 993 o = (t)(min); \ 994 else if (um > (max)) \ 995 o = (t)(max); \ 996 else \ 997 o = (t)um; \ 998 } else { \ 999 if (((min) != 0 && um < (min)) \ 1000 || um > (max)) { \ 1001 malloc_conf_error( \ 1002 "Out-of-range " \ 1003 "conf value", \ 1004 k, klen, v, vlen); \ 1005 } else \ 1006 o = (t)um; \ 1007 } \ 1008 continue; \ 1009 } 1010 #define CONF_HANDLE_UNSIGNED(o, n, min, max, clip) \ 1011 CONF_HANDLE_T_U(unsigned, o, n, min, max, clip) 1012 #define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \ 1013 CONF_HANDLE_T_U(size_t, o, n, min, max, clip) 1014 #define CONF_HANDLE_SSIZE_T(o, n, min, max) \ 1015 if (CONF_MATCH(n)) { \ 1016 long l; \ 1017 char *end; \ 1018 \ 1019 set_errno(0); \ 1020 l = strtol(v, &end, 0); \ 1021 if (get_errno() != 0 || (uintptr_t)end -\ 1022 (uintptr_t)v != vlen) { \ 1023 malloc_conf_error( \ 1024 "Invalid conf value", \ 1025 k, klen, v, vlen); \ 1026 } else if (l < (ssize_t)(min) || l > \ 1027 (ssize_t)(max)) { \ 1028 malloc_conf_error( \ 1029 "Out-of-range conf value", \ 1030 k, klen, v, vlen); \ 1031 } else \ 1032 o = l; \ 1033 continue; \ 1034 } 1035 #define CONF_HANDLE_CHAR_P(o, n, d) \ 1036 if (CONF_MATCH(n)) { \ 1037 size_t cpylen = (vlen <= \ 1038 sizeof(o)-1) ? vlen : \ 1039 sizeof(o)-1; \ 1040 strncpy(o, v, cpylen); \ 1041 o[cpylen] = '\0'; \ 1042 continue; \ 1043 } 1044 1045 CONF_HANDLE_BOOL(opt_abort, "abort", true) 1046 /* 1047 * Chunks always require at least one header page, 1048 * as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and 1049 * possibly an additional page in the presence of 1050 * redzones. In order to simplify options processing, 1051 * use a conservative bound that accommodates all these 1052 * constraints. 1053 */ 1054 CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE + 1055 LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1), 1056 (sizeof(size_t) << 3) - 1, true) 1057 if (strncmp("dss", k, klen) == 0) { 1058 int i; 1059 bool match = false; 1060 for (i = 0; i < dss_prec_limit; i++) { 1061 if (strncmp(dss_prec_names[i], v, vlen) 1062 == 0) { 1063 if (chunk_dss_prec_set(i)) { 1064 malloc_conf_error( 1065 "Error setting dss", 1066 k, klen, v, vlen); 1067 } else { 1068 opt_dss = 1069 dss_prec_names[i]; 1070 match = true; 1071 break; 1072 } 1073 } 1074 } 1075 if (!match) { 1076 malloc_conf_error("Invalid conf value", 1077 k, klen, v, vlen); 1078 } 1079 continue; 1080 } 1081 CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1, 1082 UINT_MAX, false) 1083 if (strncmp("purge", k, klen) == 0) { 1084 int i; 1085 bool match = false; 1086 for (i = 0; i < purge_mode_limit; i++) { 1087 if (strncmp(purge_mode_names[i], v, 1088 vlen) == 0) { 1089 opt_purge = (purge_mode_t)i; 1090 match = true; 1091 break; 1092 } 1093 } 1094 if (!match) { 1095 malloc_conf_error("Invalid conf value", 1096 k, klen, v, vlen); 1097 } 1098 continue; 1099 } 1100 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult", 1101 -1, (sizeof(size_t) << 3) - 1) 1102 CONF_HANDLE_SSIZE_T(opt_decay_time, "decay_time", -1, 1103 NSTIME_SEC_MAX); 1104 CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true) 1105 if (config_fill) { 1106 if (CONF_MATCH("junk")) { 1107 if (CONF_MATCH_VALUE("true")) { 1108 opt_junk = "true"; 1109 opt_junk_alloc = opt_junk_free = 1110 true; 1111 } else if (CONF_MATCH_VALUE("false")) { 1112 opt_junk = "false"; 1113 opt_junk_alloc = opt_junk_free = 1114 false; 1115 } else if (CONF_MATCH_VALUE("alloc")) { 1116 opt_junk = "alloc"; 1117 opt_junk_alloc = true; 1118 opt_junk_free = false; 1119 } else if (CONF_MATCH_VALUE("free")) { 1120 opt_junk = "free"; 1121 opt_junk_alloc = false; 1122 opt_junk_free = true; 1123 } else { 1124 malloc_conf_error( 1125 "Invalid conf value", k, 1126 klen, v, vlen); 1127 } 1128 continue; 1129 } 1130 CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine", 1131 0, SIZE_T_MAX, false) 1132 CONF_HANDLE_BOOL(opt_redzone, "redzone", true) 1133 CONF_HANDLE_BOOL(opt_zero, "zero", true) 1134 } 1135 if (config_utrace) { 1136 CONF_HANDLE_BOOL(opt_utrace, "utrace", true) 1137 } 1138 if (config_xmalloc) { 1139 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true) 1140 } 1141 if (config_tcache) { 1142 CONF_HANDLE_BOOL(opt_tcache, "tcache", 1143 !config_valgrind || !in_valgrind) 1144 if (CONF_MATCH("tcache")) { 1145 assert(config_valgrind && in_valgrind); 1146 if (opt_tcache) { 1147 opt_tcache = false; 1148 malloc_conf_error( 1149 "tcache cannot be enabled " 1150 "while running inside Valgrind", 1151 k, klen, v, vlen); 1152 } 1153 continue; 1154 } 1155 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, 1156 "lg_tcache_max", -1, 1157 (sizeof(size_t) << 3) - 1) 1158 } 1159 if (config_prof) { 1160 CONF_HANDLE_BOOL(opt_prof, "prof", true) 1161 CONF_HANDLE_CHAR_P(opt_prof_prefix, 1162 "prof_prefix", "jeprof") 1163 CONF_HANDLE_BOOL(opt_prof_active, "prof_active", 1164 true) 1165 CONF_HANDLE_BOOL(opt_prof_thread_active_init, 1166 "prof_thread_active_init", true) 1167 CONF_HANDLE_SIZE_T(opt_lg_prof_sample, 1168 "lg_prof_sample", 0, 1169 (sizeof(uint64_t) << 3) - 1, true) 1170 CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum", 1171 true) 1172 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, 1173 "lg_prof_interval", -1, 1174 (sizeof(uint64_t) << 3) - 1) 1175 CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump", 1176 true) 1177 CONF_HANDLE_BOOL(opt_prof_final, "prof_final", 1178 true) 1179 CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak", 1180 true) 1181 } 1182 malloc_conf_error("Invalid conf pair", k, klen, v, 1183 vlen); 1184 #undef CONF_MATCH 1185 #undef CONF_HANDLE_BOOL 1186 #undef CONF_HANDLE_SIZE_T 1187 #undef CONF_HANDLE_SSIZE_T 1188 #undef CONF_HANDLE_CHAR_P 1189 } 1190 } 1191 } 1192 1193 /* init_lock must be held. */ 1194 static bool 1195 malloc_init_hard_needed(void) 1196 { 1197 1198 if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state == 1199 malloc_init_recursible)) { 1200 /* 1201 * Another thread initialized the allocator before this one 1202 * acquired init_lock, or this thread is the initializing 1203 * thread, and it is recursively allocating. 1204 */ 1205 return (false); 1206 } 1207 #ifdef JEMALLOC_THREADED_INIT 1208 if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) { 1209 /* Busy-wait until the initializing thread completes. */ 1210 do { 1211 malloc_mutex_unlock(&init_lock); 1212 CPU_SPINWAIT; 1213 malloc_mutex_lock(&init_lock); 1214 } while (!malloc_initialized()); 1215 return (false); 1216 } 1217 #endif 1218 return (true); 1219 } 1220 1221 /* init_lock must be held. */ 1222 static bool 1223 malloc_init_hard_a0_locked(void) 1224 { 1225 1226 malloc_initializer = INITIALIZER; 1227 1228 if (config_prof) 1229 prof_boot0(); 1230 malloc_conf_init(); 1231 if (opt_stats_print) { 1232 /* Print statistics at exit. */ 1233 if (atexit(stats_print_atexit) != 0) { 1234 malloc_write("<jemalloc>: Error in atexit()\n"); 1235 if (opt_abort) 1236 abort(); 1237 } 1238 } 1239 if (base_boot()) 1240 return (true); 1241 if (chunk_boot()) 1242 return (true); 1243 if (ctl_boot()) 1244 return (true); 1245 if (config_prof) 1246 prof_boot1(); 1247 if (arena_boot()) 1248 return (true); 1249 if (config_tcache && tcache_boot()) 1250 return (true); 1251 if (malloc_mutex_init(&arenas_lock)) 1252 return (true); 1253 /* 1254 * Create enough scaffolding to allow recursive allocation in 1255 * malloc_ncpus(). 1256 */ 1257 narenas_auto = 1; 1258 narenas_total_set(narenas_auto); 1259 arenas = &a0; 1260 memset(arenas, 0, sizeof(arena_t *) * narenas_auto); 1261 /* 1262 * Initialize one arena here. The rest are lazily created in 1263 * arena_choose_hard(). 1264 */ 1265 if (arena_init(0) == NULL) 1266 return (true); 1267 malloc_init_state = malloc_init_a0_initialized; 1268 return (false); 1269 } 1270 1271 static bool 1272 malloc_init_hard_a0(void) 1273 { 1274 bool ret; 1275 1276 malloc_mutex_lock(&init_lock); 1277 ret = malloc_init_hard_a0_locked(); 1278 malloc_mutex_unlock(&init_lock); 1279 return (ret); 1280 } 1281 1282 /* 1283 * Initialize data structures which may trigger recursive allocation. 1284 * 1285 * init_lock must be held. 1286 */ 1287 static bool 1288 malloc_init_hard_recursible(void) 1289 { 1290 bool ret = false; 1291 1292 malloc_init_state = malloc_init_recursible; 1293 malloc_mutex_unlock(&init_lock); 1294 1295 /* LinuxThreads' pthread_setspecific() allocates. */ 1296 if (malloc_tsd_boot0()) { 1297 ret = true; 1298 goto label_return; 1299 } 1300 1301 ncpus = malloc_ncpus(); 1302 1303 #if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \ 1304 && !defined(_WIN32) && !defined(__native_client__)) 1305 /* LinuxThreads' pthread_atfork() allocates. */ 1306 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, 1307 jemalloc_postfork_child) != 0) { 1308 ret = true; 1309 malloc_write("<jemalloc>: Error in pthread_atfork()\n"); 1310 if (opt_abort) 1311 abort(); 1312 } 1313 #endif 1314 1315 label_return: 1316 malloc_mutex_lock(&init_lock); 1317 return (ret); 1318 } 1319 1320 /* init_lock must be held. */ 1321 static bool 1322 malloc_init_hard_finish(void) 1323 { 1324 1325 if (mutex_boot()) 1326 return (true); 1327 1328 if (opt_narenas == 0) { 1329 /* 1330 * For SMP systems, create more than one arena per CPU by 1331 * default. 1332 */ 1333 if (ncpus > 1) 1334 opt_narenas = ncpus << 2; 1335 else 1336 opt_narenas = 1; 1337 } 1338 narenas_auto = opt_narenas; 1339 /* 1340 * Limit the number of arenas to the indexing range of MALLOCX_ARENA(). 1341 */ 1342 if (narenas_auto > MALLOCX_ARENA_MAX) { 1343 narenas_auto = MALLOCX_ARENA_MAX; 1344 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n", 1345 narenas_auto); 1346 } 1347 narenas_total_set(narenas_auto); 1348 1349 /* Allocate and initialize arenas. */ 1350 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * 1351 (MALLOCX_ARENA_MAX+1)); 1352 if (arenas == NULL) 1353 return (true); 1354 /* Copy the pointer to the one arena that was already initialized. */ 1355 arena_set(0, a0); 1356 1357 malloc_init_state = malloc_init_initialized; 1358 malloc_slow_flag_init(); 1359 1360 return (false); 1361 } 1362 1363 static bool 1364 malloc_init_hard(void) 1365 { 1366 1367 #if defined(_WIN32) && _WIN32_WINNT < 0x0600 1368 _init_init_lock(); 1369 #endif 1370 malloc_mutex_lock(&init_lock); 1371 if (!malloc_init_hard_needed()) { 1372 malloc_mutex_unlock(&init_lock); 1373 return (false); 1374 } 1375 1376 if (malloc_init_state != malloc_init_a0_initialized && 1377 malloc_init_hard_a0_locked()) { 1378 malloc_mutex_unlock(&init_lock); 1379 return (true); 1380 } 1381 1382 if (malloc_init_hard_recursible()) { 1383 malloc_mutex_unlock(&init_lock); 1384 return (true); 1385 } 1386 1387 if (config_prof && prof_boot2()) { 1388 malloc_mutex_unlock(&init_lock); 1389 return (true); 1390 } 1391 1392 if (malloc_init_hard_finish()) { 1393 malloc_mutex_unlock(&init_lock); 1394 return (true); 1395 } 1396 1397 malloc_mutex_unlock(&init_lock); 1398 malloc_tsd_boot1(); 1399 return (false); 1400 } 1401 1402 /* 1403 * End initialization functions. 1404 */ 1405 /******************************************************************************/ 1406 /* 1407 * Begin malloc(3)-compatible functions. 1408 */ 1409 1410 static void * 1411 imalloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, 1412 prof_tctx_t *tctx, bool slow_path) 1413 { 1414 void *p; 1415 1416 if (tctx == NULL) 1417 return (NULL); 1418 if (usize <= SMALL_MAXCLASS) { 1419 szind_t ind_large = size2index(LARGE_MINCLASS); 1420 p = imalloc(tsd, LARGE_MINCLASS, ind_large, slow_path); 1421 if (p == NULL) 1422 return (NULL); 1423 arena_prof_promoted(p, usize); 1424 } else 1425 p = imalloc(tsd, usize, ind, slow_path); 1426 1427 return (p); 1428 } 1429 1430 JEMALLOC_ALWAYS_INLINE_C void * 1431 imalloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool slow_path) 1432 { 1433 void *p; 1434 prof_tctx_t *tctx; 1435 1436 tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); 1437 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) 1438 p = imalloc_prof_sample(tsd, usize, ind, tctx, slow_path); 1439 else 1440 p = imalloc(tsd, usize, ind, slow_path); 1441 if (unlikely(p == NULL)) { 1442 prof_alloc_rollback(tsd, tctx, true); 1443 return (NULL); 1444 } 1445 prof_malloc(p, usize, tctx); 1446 1447 return (p); 1448 } 1449 1450 JEMALLOC_ALWAYS_INLINE_C void * 1451 imalloc_body(size_t size, tsd_t **tsd, size_t *usize, bool slow_path) 1452 { 1453 szind_t ind; 1454 1455 if (slow_path && unlikely(malloc_init())) 1456 return (NULL); 1457 *tsd = tsd_fetch(); 1458 ind = size2index(size); 1459 if (unlikely(ind >= NSIZES)) 1460 return (NULL); 1461 1462 if (config_stats || (config_prof && opt_prof) || (slow_path && 1463 config_valgrind && unlikely(in_valgrind))) { 1464 *usize = index2size(ind); 1465 assert(*usize > 0 && *usize <= HUGE_MAXCLASS); 1466 } 1467 1468 if (config_prof && opt_prof) 1469 return (imalloc_prof(*tsd, *usize, ind, slow_path)); 1470 1471 return (imalloc(*tsd, size, ind, slow_path)); 1472 } 1473 1474 JEMALLOC_ALWAYS_INLINE_C void 1475 imalloc_post_check(void *ret, tsd_t *tsd, size_t usize, bool slow_path) 1476 { 1477 if (unlikely(ret == NULL)) { 1478 if (slow_path && config_xmalloc && unlikely(opt_xmalloc)) { 1479 malloc_write("<jemalloc>: Error in malloc(): " 1480 "out of memory\n"); 1481 abort(); 1482 } 1483 set_errno(ENOMEM); 1484 } 1485 if (config_stats && likely(ret != NULL)) { 1486 assert(usize == isalloc(ret, config_prof)); 1487 *tsd_thread_allocatedp_get(tsd) += usize; 1488 } 1489 } 1490 1491 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 1492 void JEMALLOC_NOTHROW * 1493 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) 1494 je_malloc(size_t size) 1495 { 1496 void *ret; 1497 tsd_t *tsd; 1498 size_t usize JEMALLOC_CC_SILENCE_INIT(0); 1499 1500 if (size == 0) 1501 size = 1; 1502 1503 if (likely(!malloc_slow)) { 1504 /* 1505 * imalloc_body() is inlined so that fast and slow paths are 1506 * generated separately with statically known slow_path. 1507 */ 1508 ret = imalloc_body(size, &tsd, &usize, false); 1509 imalloc_post_check(ret, tsd, usize, false); 1510 } else { 1511 ret = imalloc_body(size, &tsd, &usize, true); 1512 imalloc_post_check(ret, tsd, usize, true); 1513 UTRACE(0, size, ret); 1514 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false); 1515 } 1516 1517 return (ret); 1518 } 1519 1520 static void * 1521 imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize, 1522 prof_tctx_t *tctx) 1523 { 1524 void *p; 1525 1526 if (tctx == NULL) 1527 return (NULL); 1528 if (usize <= SMALL_MAXCLASS) { 1529 assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS); 1530 p = ipalloc(tsd, LARGE_MINCLASS, alignment, false); 1531 if (p == NULL) 1532 return (NULL); 1533 arena_prof_promoted(p, usize); 1534 } else 1535 p = ipalloc(tsd, usize, alignment, false); 1536 1537 return (p); 1538 } 1539 1540 JEMALLOC_ALWAYS_INLINE_C void * 1541 imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize) 1542 { 1543 void *p; 1544 prof_tctx_t *tctx; 1545 1546 tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); 1547 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) 1548 p = imemalign_prof_sample(tsd, alignment, usize, tctx); 1549 else 1550 p = ipalloc(tsd, usize, alignment, false); 1551 if (unlikely(p == NULL)) { 1552 prof_alloc_rollback(tsd, tctx, true); 1553 return (NULL); 1554 } 1555 prof_malloc(p, usize, tctx); 1556 1557 return (p); 1558 } 1559 1560 JEMALLOC_ATTR(nonnull(1)) 1561 static int 1562 imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment) 1563 { 1564 int ret; 1565 tsd_t *tsd; 1566 size_t usize; 1567 void *result; 1568 1569 assert(min_alignment != 0); 1570 1571 if (unlikely(malloc_init())) { 1572 result = NULL; 1573 goto label_oom; 1574 } 1575 tsd = tsd_fetch(); 1576 if (size == 0) 1577 size = 1; 1578 1579 /* Make sure that alignment is a large enough power of 2. */ 1580 if (unlikely(((alignment - 1) & alignment) != 0 1581 || (alignment < min_alignment))) { 1582 if (config_xmalloc && unlikely(opt_xmalloc)) { 1583 malloc_write("<jemalloc>: Error allocating " 1584 "aligned memory: invalid alignment\n"); 1585 abort(); 1586 } 1587 result = NULL; 1588 ret = EINVAL; 1589 goto label_return; 1590 } 1591 1592 usize = sa2u(size, alignment); 1593 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) { 1594 result = NULL; 1595 goto label_oom; 1596 } 1597 1598 if (config_prof && opt_prof) 1599 result = imemalign_prof(tsd, alignment, usize); 1600 else 1601 result = ipalloc(tsd, usize, alignment, false); 1602 if (unlikely(result == NULL)) 1603 goto label_oom; 1604 assert(((uintptr_t)result & (alignment - 1)) == ZU(0)); 1605 1606 *memptr = result; 1607 ret = 0; 1608 label_return: 1609 if (config_stats && likely(result != NULL)) { 1610 assert(usize == isalloc(result, config_prof)); 1611 *tsd_thread_allocatedp_get(tsd) += usize; 1612 } 1613 UTRACE(0, size, result); 1614 return (ret); 1615 label_oom: 1616 assert(result == NULL); 1617 if (config_xmalloc && unlikely(opt_xmalloc)) { 1618 malloc_write("<jemalloc>: Error allocating aligned memory: " 1619 "out of memory\n"); 1620 abort(); 1621 } 1622 ret = ENOMEM; 1623 goto label_return; 1624 } 1625 1626 JEMALLOC_EXPORT int JEMALLOC_NOTHROW 1627 JEMALLOC_ATTR(nonnull(1)) 1628 je_posix_memalign(void **memptr, size_t alignment, size_t size) 1629 { 1630 int ret = imemalign(memptr, alignment, size, sizeof(void *)); 1631 JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr, 1632 config_prof), false); 1633 return (ret); 1634 } 1635 1636 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 1637 void JEMALLOC_NOTHROW * 1638 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2) 1639 je_aligned_alloc(size_t alignment, size_t size) 1640 { 1641 void *ret; 1642 int err; 1643 1644 if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) { 1645 ret = NULL; 1646 set_errno(err); 1647 } 1648 JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof), 1649 false); 1650 return (ret); 1651 } 1652 1653 static void * 1654 icalloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, prof_tctx_t *tctx) 1655 { 1656 void *p; 1657 1658 if (tctx == NULL) 1659 return (NULL); 1660 if (usize <= SMALL_MAXCLASS) { 1661 szind_t ind_large = size2index(LARGE_MINCLASS); 1662 p = icalloc(tsd, LARGE_MINCLASS, ind_large); 1663 if (p == NULL) 1664 return (NULL); 1665 arena_prof_promoted(p, usize); 1666 } else 1667 p = icalloc(tsd, usize, ind); 1668 1669 return (p); 1670 } 1671 1672 JEMALLOC_ALWAYS_INLINE_C void * 1673 icalloc_prof(tsd_t *tsd, size_t usize, szind_t ind) 1674 { 1675 void *p; 1676 prof_tctx_t *tctx; 1677 1678 tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); 1679 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) 1680 p = icalloc_prof_sample(tsd, usize, ind, tctx); 1681 else 1682 p = icalloc(tsd, usize, ind); 1683 if (unlikely(p == NULL)) { 1684 prof_alloc_rollback(tsd, tctx, true); 1685 return (NULL); 1686 } 1687 prof_malloc(p, usize, tctx); 1688 1689 return (p); 1690 } 1691 1692 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 1693 void JEMALLOC_NOTHROW * 1694 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) 1695 je_calloc(size_t num, size_t size) 1696 { 1697 void *ret; 1698 tsd_t *tsd; 1699 size_t num_size; 1700 szind_t ind; 1701 size_t usize JEMALLOC_CC_SILENCE_INIT(0); 1702 1703 if (unlikely(malloc_init())) { 1704 num_size = 0; 1705 ret = NULL; 1706 goto label_return; 1707 } 1708 tsd = tsd_fetch(); 1709 1710 num_size = num * size; 1711 if (unlikely(num_size == 0)) { 1712 if (num == 0 || size == 0) 1713 num_size = 1; 1714 else { 1715 ret = NULL; 1716 goto label_return; 1717 } 1718 /* 1719 * Try to avoid division here. We know that it isn't possible to 1720 * overflow during multiplication if neither operand uses any of the 1721 * most significant half of the bits in a size_t. 1722 */ 1723 } else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 1724 2))) && (num_size / size != num))) { 1725 /* size_t overflow. */ 1726 ret = NULL; 1727 goto label_return; 1728 } 1729 1730 ind = size2index(num_size); 1731 if (unlikely(ind >= NSIZES)) { 1732 ret = NULL; 1733 goto label_return; 1734 } 1735 if (config_prof && opt_prof) { 1736 usize = index2size(ind); 1737 ret = icalloc_prof(tsd, usize, ind); 1738 } else { 1739 if (config_stats || (config_valgrind && unlikely(in_valgrind))) 1740 usize = index2size(ind); 1741 ret = icalloc(tsd, num_size, ind); 1742 } 1743 1744 label_return: 1745 if (unlikely(ret == NULL)) { 1746 if (config_xmalloc && unlikely(opt_xmalloc)) { 1747 malloc_write("<jemalloc>: Error in calloc(): out of " 1748 "memory\n"); 1749 abort(); 1750 } 1751 set_errno(ENOMEM); 1752 } 1753 if (config_stats && likely(ret != NULL)) { 1754 assert(usize == isalloc(ret, config_prof)); 1755 *tsd_thread_allocatedp_get(tsd) += usize; 1756 } 1757 UTRACE(0, num_size, ret); 1758 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true); 1759 return (ret); 1760 } 1761 1762 static void * 1763 irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, 1764 prof_tctx_t *tctx) 1765 { 1766 void *p; 1767 1768 if (tctx == NULL) 1769 return (NULL); 1770 if (usize <= SMALL_MAXCLASS) { 1771 p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false); 1772 if (p == NULL) 1773 return (NULL); 1774 arena_prof_promoted(p, usize); 1775 } else 1776 p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); 1777 1778 return (p); 1779 } 1780 1781 JEMALLOC_ALWAYS_INLINE_C void * 1782 irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize) 1783 { 1784 void *p; 1785 bool prof_active; 1786 prof_tctx_t *old_tctx, *tctx; 1787 1788 prof_active = prof_active_get_unlocked(); 1789 old_tctx = prof_tctx_get(old_ptr); 1790 tctx = prof_alloc_prep(tsd, usize, prof_active, true); 1791 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) 1792 p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx); 1793 else 1794 p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); 1795 if (unlikely(p == NULL)) { 1796 prof_alloc_rollback(tsd, tctx, true); 1797 return (NULL); 1798 } 1799 prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize, 1800 old_tctx); 1801 1802 return (p); 1803 } 1804 1805 JEMALLOC_INLINE_C void 1806 ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) 1807 { 1808 size_t usize; 1809 UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); 1810 1811 assert(ptr != NULL); 1812 assert(malloc_initialized() || IS_INITIALIZER); 1813 1814 if (config_prof && opt_prof) { 1815 usize = isalloc(ptr, config_prof); 1816 prof_free(tsd, ptr, usize); 1817 } else if (config_stats || config_valgrind) 1818 usize = isalloc(ptr, config_prof); 1819 if (config_stats) 1820 *tsd_thread_deallocatedp_get(tsd) += usize; 1821 1822 if (likely(!slow_path)) 1823 iqalloc(tsd, ptr, tcache, false); 1824 else { 1825 if (config_valgrind && unlikely(in_valgrind)) 1826 rzsize = p2rz(ptr); 1827 iqalloc(tsd, ptr, tcache, true); 1828 JEMALLOC_VALGRIND_FREE(ptr, rzsize); 1829 } 1830 } 1831 1832 JEMALLOC_INLINE_C void 1833 isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache) 1834 { 1835 UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); 1836 1837 assert(ptr != NULL); 1838 assert(malloc_initialized() || IS_INITIALIZER); 1839 1840 if (config_prof && opt_prof) 1841 prof_free(tsd, ptr, usize); 1842 if (config_stats) 1843 *tsd_thread_deallocatedp_get(tsd) += usize; 1844 if (config_valgrind && unlikely(in_valgrind)) 1845 rzsize = p2rz(ptr); 1846 isqalloc(tsd, ptr, usize, tcache); 1847 JEMALLOC_VALGRIND_FREE(ptr, rzsize); 1848 } 1849 1850 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 1851 void JEMALLOC_NOTHROW * 1852 JEMALLOC_ALLOC_SIZE(2) 1853 je_realloc(void *ptr, size_t size) 1854 { 1855 void *ret; 1856 tsd_t *tsd JEMALLOC_CC_SILENCE_INIT(NULL); 1857 size_t usize JEMALLOC_CC_SILENCE_INIT(0); 1858 size_t old_usize = 0; 1859 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 1860 1861 if (unlikely(size == 0)) { 1862 if (ptr != NULL) { 1863 /* realloc(ptr, 0) is equivalent to free(ptr). */ 1864 UTRACE(ptr, 0, 0); 1865 tsd = tsd_fetch(); 1866 ifree(tsd, ptr, tcache_get(tsd, false), true); 1867 return (NULL); 1868 } 1869 size = 1; 1870 } 1871 1872 if (likely(ptr != NULL)) { 1873 assert(malloc_initialized() || IS_INITIALIZER); 1874 malloc_thread_init(); 1875 tsd = tsd_fetch(); 1876 1877 old_usize = isalloc(ptr, config_prof); 1878 if (config_valgrind && unlikely(in_valgrind)) 1879 old_rzsize = config_prof ? p2rz(ptr) : u2rz(old_usize); 1880 1881 if (config_prof && opt_prof) { 1882 usize = s2u(size); 1883 ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ? 1884 NULL : irealloc_prof(tsd, ptr, old_usize, usize); 1885 } else { 1886 if (config_stats || (config_valgrind && 1887 unlikely(in_valgrind))) 1888 usize = s2u(size); 1889 ret = iralloc(tsd, ptr, old_usize, size, 0, false); 1890 } 1891 } else { 1892 /* realloc(NULL, size) is equivalent to malloc(size). */ 1893 if (likely(!malloc_slow)) 1894 ret = imalloc_body(size, &tsd, &usize, false); 1895 else 1896 ret = imalloc_body(size, &tsd, &usize, true); 1897 } 1898 1899 if (unlikely(ret == NULL)) { 1900 if (config_xmalloc && unlikely(opt_xmalloc)) { 1901 malloc_write("<jemalloc>: Error in realloc(): " 1902 "out of memory\n"); 1903 abort(); 1904 } 1905 set_errno(ENOMEM); 1906 } 1907 if (config_stats && likely(ret != NULL)) { 1908 assert(usize == isalloc(ret, config_prof)); 1909 *tsd_thread_allocatedp_get(tsd) += usize; 1910 *tsd_thread_deallocatedp_get(tsd) += old_usize; 1911 } 1912 UTRACE(ptr, size, ret); 1913 JEMALLOC_VALGRIND_REALLOC(true, ret, usize, true, ptr, old_usize, 1914 old_rzsize, true, false); 1915 return (ret); 1916 } 1917 1918 JEMALLOC_EXPORT void JEMALLOC_NOTHROW 1919 je_free(void *ptr) 1920 { 1921 1922 UTRACE(ptr, 0, 0); 1923 if (likely(ptr != NULL)) { 1924 tsd_t *tsd = tsd_fetch(); 1925 if (likely(!malloc_slow)) 1926 ifree(tsd, ptr, tcache_get(tsd, false), false); 1927 else 1928 ifree(tsd, ptr, tcache_get(tsd, false), true); 1929 } 1930 } 1931 1932 /* 1933 * End malloc(3)-compatible functions. 1934 */ 1935 /******************************************************************************/ 1936 /* 1937 * Begin non-standard override functions. 1938 */ 1939 1940 #ifdef JEMALLOC_OVERRIDE_MEMALIGN 1941 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 1942 void JEMALLOC_NOTHROW * 1943 JEMALLOC_ATTR(malloc) 1944 je_memalign(size_t alignment, size_t size) 1945 { 1946 void *ret JEMALLOC_CC_SILENCE_INIT(NULL); 1947 if (unlikely(imemalign(&ret, alignment, size, 1) != 0)) 1948 ret = NULL; 1949 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); 1950 return (ret); 1951 } 1952 #endif 1953 1954 #ifdef JEMALLOC_OVERRIDE_VALLOC 1955 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 1956 void JEMALLOC_NOTHROW * 1957 JEMALLOC_ATTR(malloc) 1958 je_valloc(size_t size) 1959 { 1960 void *ret JEMALLOC_CC_SILENCE_INIT(NULL); 1961 if (unlikely(imemalign(&ret, PAGE, size, 1) != 0)) 1962 ret = NULL; 1963 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); 1964 return (ret); 1965 } 1966 #endif 1967 1968 /* 1969 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has 1970 * #define je_malloc malloc 1971 */ 1972 #define malloc_is_malloc 1 1973 #define is_malloc_(a) malloc_is_ ## a 1974 #define is_malloc(a) is_malloc_(a) 1975 1976 #if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK)) 1977 /* 1978 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible 1979 * to inconsistently reference libc's malloc(3)-compatible functions 1980 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). 1981 * 1982 * These definitions interpose hooks in glibc. The functions are actually 1983 * passed an extra argument for the caller return address, which will be 1984 * ignored. 1985 */ 1986 JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free; 1987 JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc; 1988 JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc; 1989 # ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK 1990 JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = 1991 je_memalign; 1992 # endif 1993 #endif 1994 1995 /* 1996 * End non-standard override functions. 1997 */ 1998 /******************************************************************************/ 1999 /* 2000 * Begin non-standard functions. 2001 */ 2002 2003 JEMALLOC_ALWAYS_INLINE_C bool 2004 imallocx_flags_decode_hard(tsd_t *tsd, size_t size, int flags, size_t *usize, 2005 size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena) 2006 { 2007 2008 if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) { 2009 *alignment = 0; 2010 *usize = s2u(size); 2011 } else { 2012 *alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); 2013 *usize = sa2u(size, *alignment); 2014 } 2015 if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS)) 2016 return (true); 2017 *zero = MALLOCX_ZERO_GET(flags); 2018 if ((flags & MALLOCX_TCACHE_MASK) != 0) { 2019 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) 2020 *tcache = NULL; 2021 else 2022 *tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2023 } else 2024 *tcache = tcache_get(tsd, true); 2025 if ((flags & MALLOCX_ARENA_MASK) != 0) { 2026 unsigned arena_ind = MALLOCX_ARENA_GET(flags); 2027 *arena = arena_get(arena_ind, true); 2028 if (unlikely(*arena == NULL)) 2029 return (true); 2030 } else 2031 *arena = NULL; 2032 return (false); 2033 } 2034 2035 JEMALLOC_ALWAYS_INLINE_C bool 2036 imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize, 2037 size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena) 2038 { 2039 2040 if (likely(flags == 0)) { 2041 *usize = s2u(size); 2042 if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS)) 2043 return (true); 2044 *alignment = 0; 2045 *zero = false; 2046 *tcache = tcache_get(tsd, true); 2047 *arena = NULL; 2048 return (false); 2049 } else { 2050 return (imallocx_flags_decode_hard(tsd, size, flags, usize, 2051 alignment, zero, tcache, arena)); 2052 } 2053 } 2054 2055 JEMALLOC_ALWAYS_INLINE_C void * 2056 imallocx_flags(tsd_t *tsd, size_t usize, size_t alignment, bool zero, 2057 tcache_t *tcache, arena_t *arena) 2058 { 2059 szind_t ind; 2060 2061 if (unlikely(alignment != 0)) 2062 return (ipalloct(tsd, usize, alignment, zero, tcache, arena)); 2063 ind = size2index(usize); 2064 assert(ind < NSIZES); 2065 if (unlikely(zero)) 2066 return (icalloct(tsd, usize, ind, tcache, arena)); 2067 return (imalloct(tsd, usize, ind, tcache, arena)); 2068 } 2069 2070 static void * 2071 imallocx_prof_sample(tsd_t *tsd, size_t usize, size_t alignment, bool zero, 2072 tcache_t *tcache, arena_t *arena) 2073 { 2074 void *p; 2075 2076 if (usize <= SMALL_MAXCLASS) { 2077 assert(((alignment == 0) ? s2u(LARGE_MINCLASS) : 2078 sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS); 2079 p = imallocx_flags(tsd, LARGE_MINCLASS, alignment, zero, tcache, 2080 arena); 2081 if (p == NULL) 2082 return (NULL); 2083 arena_prof_promoted(p, usize); 2084 } else 2085 p = imallocx_flags(tsd, usize, alignment, zero, tcache, arena); 2086 2087 return (p); 2088 } 2089 2090 JEMALLOC_ALWAYS_INLINE_C void * 2091 imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize) 2092 { 2093 void *p; 2094 size_t alignment; 2095 bool zero; 2096 tcache_t *tcache; 2097 arena_t *arena; 2098 prof_tctx_t *tctx; 2099 2100 if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment, 2101 &zero, &tcache, &arena))) 2102 return (NULL); 2103 tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true); 2104 if (likely((uintptr_t)tctx == (uintptr_t)1U)) 2105 p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena); 2106 else if ((uintptr_t)tctx > (uintptr_t)1U) { 2107 p = imallocx_prof_sample(tsd, *usize, alignment, zero, tcache, 2108 arena); 2109 } else 2110 p = NULL; 2111 if (unlikely(p == NULL)) { 2112 prof_alloc_rollback(tsd, tctx, true); 2113 return (NULL); 2114 } 2115 prof_malloc(p, *usize, tctx); 2116 2117 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); 2118 return (p); 2119 } 2120 2121 JEMALLOC_ALWAYS_INLINE_C void * 2122 imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize) 2123 { 2124 void *p; 2125 size_t alignment; 2126 bool zero; 2127 tcache_t *tcache; 2128 arena_t *arena; 2129 2130 if (likely(flags == 0)) { 2131 szind_t ind = size2index(size); 2132 if (unlikely(ind >= NSIZES)) 2133 return (NULL); 2134 if (config_stats || (config_valgrind && 2135 unlikely(in_valgrind))) { 2136 *usize = index2size(ind); 2137 assert(*usize > 0 && *usize <= HUGE_MAXCLASS); 2138 } 2139 return (imalloc(tsd, size, ind, true)); 2140 } 2141 2142 if (unlikely(imallocx_flags_decode_hard(tsd, size, flags, usize, 2143 &alignment, &zero, &tcache, &arena))) 2144 return (NULL); 2145 p = imallocx_flags(tsd, *usize, alignment, zero, tcache, arena); 2146 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); 2147 return (p); 2148 } 2149 2150 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2151 void JEMALLOC_NOTHROW * 2152 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) 2153 je_mallocx(size_t size, int flags) 2154 { 2155 tsd_t *tsd; 2156 void *p; 2157 size_t usize; 2158 2159 assert(size != 0); 2160 2161 if (unlikely(malloc_init())) 2162 goto label_oom; 2163 tsd = tsd_fetch(); 2164 2165 if (config_prof && opt_prof) 2166 p = imallocx_prof(tsd, size, flags, &usize); 2167 else 2168 p = imallocx_no_prof(tsd, size, flags, &usize); 2169 if (unlikely(p == NULL)) 2170 goto label_oom; 2171 2172 if (config_stats) { 2173 assert(usize == isalloc(p, config_prof)); 2174 *tsd_thread_allocatedp_get(tsd) += usize; 2175 } 2176 UTRACE(0, size, p); 2177 JEMALLOC_VALGRIND_MALLOC(true, p, usize, MALLOCX_ZERO_GET(flags)); 2178 return (p); 2179 label_oom: 2180 if (config_xmalloc && unlikely(opt_xmalloc)) { 2181 malloc_write("<jemalloc>: Error in mallocx(): out of memory\n"); 2182 abort(); 2183 } 2184 UTRACE(0, size, 0); 2185 return (NULL); 2186 } 2187 2188 static void * 2189 irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, 2190 size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, 2191 prof_tctx_t *tctx) 2192 { 2193 void *p; 2194 2195 if (tctx == NULL) 2196 return (NULL); 2197 if (usize <= SMALL_MAXCLASS) { 2198 p = iralloct(tsd, old_ptr, old_usize, LARGE_MINCLASS, alignment, 2199 zero, tcache, arena); 2200 if (p == NULL) 2201 return (NULL); 2202 arena_prof_promoted(p, usize); 2203 } else { 2204 p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero, 2205 tcache, arena); 2206 } 2207 2208 return (p); 2209 } 2210 2211 JEMALLOC_ALWAYS_INLINE_C void * 2212 irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, 2213 size_t alignment, size_t *usize, bool zero, tcache_t *tcache, 2214 arena_t *arena) 2215 { 2216 void *p; 2217 bool prof_active; 2218 prof_tctx_t *old_tctx, *tctx; 2219 2220 prof_active = prof_active_get_unlocked(); 2221 old_tctx = prof_tctx_get(old_ptr); 2222 tctx = prof_alloc_prep(tsd, *usize, prof_active, true); 2223 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { 2224 p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize, 2225 alignment, zero, tcache, arena, tctx); 2226 } else { 2227 p = iralloct(tsd, old_ptr, old_usize, size, alignment, zero, 2228 tcache, arena); 2229 } 2230 if (unlikely(p == NULL)) { 2231 prof_alloc_rollback(tsd, tctx, true); 2232 return (NULL); 2233 } 2234 2235 if (p == old_ptr && alignment != 0) { 2236 /* 2237 * The allocation did not move, so it is possible that the size 2238 * class is smaller than would guarantee the requested 2239 * alignment, and that the alignment constraint was 2240 * serendipitously satisfied. Additionally, old_usize may not 2241 * be the same as the current usize because of in-place large 2242 * reallocation. Therefore, query the actual value of usize. 2243 */ 2244 *usize = isalloc(p, config_prof); 2245 } 2246 prof_realloc(tsd, p, *usize, tctx, prof_active, true, old_ptr, 2247 old_usize, old_tctx); 2248 2249 return (p); 2250 } 2251 2252 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2253 void JEMALLOC_NOTHROW * 2254 JEMALLOC_ALLOC_SIZE(2) 2255 je_rallocx(void *ptr, size_t size, int flags) 2256 { 2257 void *p; 2258 tsd_t *tsd; 2259 size_t usize; 2260 size_t old_usize; 2261 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 2262 size_t alignment = MALLOCX_ALIGN_GET(flags); 2263 bool zero = flags & MALLOCX_ZERO; 2264 arena_t *arena; 2265 tcache_t *tcache; 2266 2267 assert(ptr != NULL); 2268 assert(size != 0); 2269 assert(malloc_initialized() || IS_INITIALIZER); 2270 malloc_thread_init(); 2271 tsd = tsd_fetch(); 2272 2273 if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { 2274 unsigned arena_ind = MALLOCX_ARENA_GET(flags); 2275 arena = arena_get(arena_ind, true); 2276 if (unlikely(arena == NULL)) 2277 goto label_oom; 2278 } else 2279 arena = NULL; 2280 2281 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { 2282 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) 2283 tcache = NULL; 2284 else 2285 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2286 } else 2287 tcache = tcache_get(tsd, true); 2288 2289 old_usize = isalloc(ptr, config_prof); 2290 if (config_valgrind && unlikely(in_valgrind)) 2291 old_rzsize = u2rz(old_usize); 2292 2293 if (config_prof && opt_prof) { 2294 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); 2295 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) 2296 goto label_oom; 2297 p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize, 2298 zero, tcache, arena); 2299 if (unlikely(p == NULL)) 2300 goto label_oom; 2301 } else { 2302 p = iralloct(tsd, ptr, old_usize, size, alignment, zero, 2303 tcache, arena); 2304 if (unlikely(p == NULL)) 2305 goto label_oom; 2306 if (config_stats || (config_valgrind && unlikely(in_valgrind))) 2307 usize = isalloc(p, config_prof); 2308 } 2309 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); 2310 2311 if (config_stats) { 2312 *tsd_thread_allocatedp_get(tsd) += usize; 2313 *tsd_thread_deallocatedp_get(tsd) += old_usize; 2314 } 2315 UTRACE(ptr, size, p); 2316 JEMALLOC_VALGRIND_REALLOC(true, p, usize, false, ptr, old_usize, 2317 old_rzsize, false, zero); 2318 return (p); 2319 label_oom: 2320 if (config_xmalloc && unlikely(opt_xmalloc)) { 2321 malloc_write("<jemalloc>: Error in rallocx(): out of memory\n"); 2322 abort(); 2323 } 2324 UTRACE(ptr, size, 0); 2325 return (NULL); 2326 } 2327 2328 JEMALLOC_ALWAYS_INLINE_C size_t 2329 ixallocx_helper(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, 2330 size_t extra, size_t alignment, bool zero) 2331 { 2332 size_t usize; 2333 2334 if (ixalloc(tsd, ptr, old_usize, size, extra, alignment, zero)) 2335 return (old_usize); 2336 usize = isalloc(ptr, config_prof); 2337 2338 return (usize); 2339 } 2340 2341 static size_t 2342 ixallocx_prof_sample(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, 2343 size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) 2344 { 2345 size_t usize; 2346 2347 if (tctx == NULL) 2348 return (old_usize); 2349 usize = ixallocx_helper(tsd, ptr, old_usize, size, extra, alignment, 2350 zero); 2351 2352 return (usize); 2353 } 2354 2355 JEMALLOC_ALWAYS_INLINE_C size_t 2356 ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, 2357 size_t extra, size_t alignment, bool zero) 2358 { 2359 size_t usize_max, usize; 2360 bool prof_active; 2361 prof_tctx_t *old_tctx, *tctx; 2362 2363 prof_active = prof_active_get_unlocked(); 2364 old_tctx = prof_tctx_get(ptr); 2365 /* 2366 * usize isn't knowable before ixalloc() returns when extra is non-zero. 2367 * Therefore, compute its maximum possible value and use that in 2368 * prof_alloc_prep() to decide whether to capture a backtrace. 2369 * prof_realloc() will use the actual usize to decide whether to sample. 2370 */ 2371 if (alignment == 0) { 2372 usize_max = s2u(size+extra); 2373 assert(usize_max > 0 && usize_max <= HUGE_MAXCLASS); 2374 } else { 2375 usize_max = sa2u(size+extra, alignment); 2376 if (unlikely(usize_max == 0 || usize_max > HUGE_MAXCLASS)) { 2377 /* 2378 * usize_max is out of range, and chances are that 2379 * allocation will fail, but use the maximum possible 2380 * value and carry on with prof_alloc_prep(), just in 2381 * case allocation succeeds. 2382 */ 2383 usize_max = HUGE_MAXCLASS; 2384 } 2385 } 2386 tctx = prof_alloc_prep(tsd, usize_max, prof_active, false); 2387 2388 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { 2389 usize = ixallocx_prof_sample(tsd, ptr, old_usize, size, extra, 2390 alignment, zero, tctx); 2391 } else { 2392 usize = ixallocx_helper(tsd, ptr, old_usize, size, extra, 2393 alignment, zero); 2394 } 2395 if (usize == old_usize) { 2396 prof_alloc_rollback(tsd, tctx, false); 2397 return (usize); 2398 } 2399 prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize, 2400 old_tctx); 2401 2402 return (usize); 2403 } 2404 2405 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2406 je_xallocx(void *ptr, size_t size, size_t extra, int flags) 2407 { 2408 tsd_t *tsd; 2409 size_t usize, old_usize; 2410 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 2411 size_t alignment = MALLOCX_ALIGN_GET(flags); 2412 bool zero = flags & MALLOCX_ZERO; 2413 2414 assert(ptr != NULL); 2415 assert(size != 0); 2416 assert(SIZE_T_MAX - size >= extra); 2417 assert(malloc_initialized() || IS_INITIALIZER); 2418 malloc_thread_init(); 2419 tsd = tsd_fetch(); 2420 2421 old_usize = isalloc(ptr, config_prof); 2422 2423 /* 2424 * The API explicitly absolves itself of protecting against (size + 2425 * extra) numerical overflow, but we may need to clamp extra to avoid 2426 * exceeding HUGE_MAXCLASS. 2427 * 2428 * Ordinarily, size limit checking is handled deeper down, but here we 2429 * have to check as part of (size + extra) clamping, since we need the 2430 * clamped value in the above helper functions. 2431 */ 2432 if (unlikely(size > HUGE_MAXCLASS)) { 2433 usize = old_usize; 2434 goto label_not_resized; 2435 } 2436 if (unlikely(HUGE_MAXCLASS - size < extra)) 2437 extra = HUGE_MAXCLASS - size; 2438 2439 if (config_valgrind && unlikely(in_valgrind)) 2440 old_rzsize = u2rz(old_usize); 2441 2442 if (config_prof && opt_prof) { 2443 usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, 2444 alignment, zero); 2445 } else { 2446 usize = ixallocx_helper(tsd, ptr, old_usize, size, extra, 2447 alignment, zero); 2448 } 2449 if (unlikely(usize == old_usize)) 2450 goto label_not_resized; 2451 2452 if (config_stats) { 2453 *tsd_thread_allocatedp_get(tsd) += usize; 2454 *tsd_thread_deallocatedp_get(tsd) += old_usize; 2455 } 2456 JEMALLOC_VALGRIND_REALLOC(false, ptr, usize, false, ptr, old_usize, 2457 old_rzsize, false, zero); 2458 label_not_resized: 2459 UTRACE(ptr, size, ptr); 2460 return (usize); 2461 } 2462 2463 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2464 JEMALLOC_ATTR(pure) 2465 je_sallocx(const void *ptr, int flags) 2466 { 2467 size_t usize; 2468 2469 assert(malloc_initialized() || IS_INITIALIZER); 2470 malloc_thread_init(); 2471 2472 if (config_ivsalloc) 2473 usize = ivsalloc(ptr, config_prof); 2474 else 2475 usize = isalloc(ptr, config_prof); 2476 2477 return (usize); 2478 } 2479 2480 JEMALLOC_EXPORT void JEMALLOC_NOTHROW 2481 je_dallocx(void *ptr, int flags) 2482 { 2483 tsd_t *tsd; 2484 tcache_t *tcache; 2485 2486 assert(ptr != NULL); 2487 assert(malloc_initialized() || IS_INITIALIZER); 2488 2489 tsd = tsd_fetch(); 2490 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { 2491 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) 2492 tcache = NULL; 2493 else 2494 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2495 } else 2496 tcache = tcache_get(tsd, false); 2497 2498 UTRACE(ptr, 0, 0); 2499 ifree(tsd_fetch(), ptr, tcache, true); 2500 } 2501 2502 JEMALLOC_ALWAYS_INLINE_C size_t 2503 inallocx(size_t size, int flags) 2504 { 2505 size_t usize; 2506 2507 if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) 2508 usize = s2u(size); 2509 else 2510 usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); 2511 return (usize); 2512 } 2513 2514 JEMALLOC_EXPORT void JEMALLOC_NOTHROW 2515 je_sdallocx(void *ptr, size_t size, int flags) 2516 { 2517 tsd_t *tsd; 2518 tcache_t *tcache; 2519 size_t usize; 2520 2521 assert(ptr != NULL); 2522 assert(malloc_initialized() || IS_INITIALIZER); 2523 usize = inallocx(size, flags); 2524 assert(usize == isalloc(ptr, config_prof)); 2525 2526 tsd = tsd_fetch(); 2527 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { 2528 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) 2529 tcache = NULL; 2530 else 2531 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2532 } else 2533 tcache = tcache_get(tsd, false); 2534 2535 UTRACE(ptr, 0, 0); 2536 isfree(tsd, ptr, usize, tcache); 2537 } 2538 2539 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2540 JEMALLOC_ATTR(pure) 2541 je_nallocx(size_t size, int flags) 2542 { 2543 size_t usize; 2544 2545 assert(size != 0); 2546 2547 if (unlikely(malloc_init())) 2548 return (0); 2549 2550 usize = inallocx(size, flags); 2551 if (unlikely(usize > HUGE_MAXCLASS)) 2552 return (0); 2553 2554 return (usize); 2555 } 2556 2557 JEMALLOC_EXPORT int JEMALLOC_NOTHROW 2558 je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, 2559 size_t newlen) 2560 { 2561 2562 if (unlikely(malloc_init())) 2563 return (EAGAIN); 2564 2565 return (ctl_byname(name, oldp, oldlenp, newp, newlen)); 2566 } 2567 2568 JEMALLOC_EXPORT int JEMALLOC_NOTHROW 2569 je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) 2570 { 2571 2572 if (unlikely(malloc_init())) 2573 return (EAGAIN); 2574 2575 return (ctl_nametomib(name, mibp, miblenp)); 2576 } 2577 2578 JEMALLOC_EXPORT int JEMALLOC_NOTHROW 2579 je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, 2580 void *newp, size_t newlen) 2581 { 2582 2583 if (unlikely(malloc_init())) 2584 return (EAGAIN); 2585 2586 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen)); 2587 } 2588 2589 JEMALLOC_EXPORT void JEMALLOC_NOTHROW 2590 je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, 2591 const char *opts) 2592 { 2593 2594 stats_print(write_cb, cbopaque, opts); 2595 } 2596 2597 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2598 je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) 2599 { 2600 size_t ret; 2601 2602 assert(malloc_initialized() || IS_INITIALIZER); 2603 malloc_thread_init(); 2604 2605 if (config_ivsalloc) 2606 ret = ivsalloc(ptr, config_prof); 2607 else 2608 ret = (ptr == NULL) ? 0 : isalloc(ptr, config_prof); 2609 2610 return (ret); 2611 } 2612 2613 /* 2614 * End non-standard functions. 2615 */ 2616 /******************************************************************************/ 2617 /* 2618 * Begin compatibility functions. 2619 */ 2620 2621 #define ALLOCM_LG_ALIGN(la) (la) 2622 #define ALLOCM_ALIGN(a) (ffsl(a)-1) 2623 #define ALLOCM_ZERO ((int)0x40) 2624 #define ALLOCM_NO_MOVE ((int)0x80) 2625 2626 #define ALLOCM_SUCCESS 0 2627 #define ALLOCM_ERR_OOM 1 2628 #define ALLOCM_ERR_NOT_MOVED 2 2629 2630 int 2631 je_allocm(void **ptr, size_t *rsize, size_t size, int flags) 2632 { 2633 void *p; 2634 2635 assert(ptr != NULL); 2636 2637 p = je_mallocx(size, flags); 2638 if (p == NULL) 2639 return (ALLOCM_ERR_OOM); 2640 if (rsize != NULL) 2641 *rsize = isalloc(p, config_prof); 2642 *ptr = p; 2643 return (ALLOCM_SUCCESS); 2644 } 2645 2646 int 2647 je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) 2648 { 2649 int ret; 2650 bool no_move = flags & ALLOCM_NO_MOVE; 2651 2652 assert(ptr != NULL); 2653 assert(*ptr != NULL); 2654 assert(size != 0); 2655 assert(SIZE_T_MAX - size >= extra); 2656 2657 if (no_move) { 2658 size_t usize = je_xallocx(*ptr, size, extra, flags); 2659 ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED; 2660 if (rsize != NULL) 2661 *rsize = usize; 2662 } else { 2663 void *p = je_rallocx(*ptr, size+extra, flags); 2664 if (p != NULL) { 2665 *ptr = p; 2666 ret = ALLOCM_SUCCESS; 2667 } else 2668 ret = ALLOCM_ERR_OOM; 2669 if (rsize != NULL) 2670 *rsize = isalloc(*ptr, config_prof); 2671 } 2672 return (ret); 2673 } 2674 2675 int 2676 je_sallocm(const void *ptr, size_t *rsize, int flags) 2677 { 2678 2679 assert(rsize != NULL); 2680 *rsize = je_sallocx(ptr, flags); 2681 return (ALLOCM_SUCCESS); 2682 } 2683 2684 int 2685 je_dallocm(void *ptr, int flags) 2686 { 2687 2688 je_dallocx(ptr, flags); 2689 return (ALLOCM_SUCCESS); 2690 } 2691 2692 int 2693 je_nallocm(size_t *rsize, size_t size, int flags) 2694 { 2695 size_t usize; 2696 2697 usize = je_nallocx(size, flags); 2698 if (usize == 0) 2699 return (ALLOCM_ERR_OOM); 2700 if (rsize != NULL) 2701 *rsize = usize; 2702 return (ALLOCM_SUCCESS); 2703 } 2704 2705 #undef ALLOCM_LG_ALIGN 2706 #undef ALLOCM_ALIGN 2707 #undef ALLOCM_ZERO 2708 #undef ALLOCM_NO_MOVE 2709 2710 #undef ALLOCM_SUCCESS 2711 #undef ALLOCM_ERR_OOM 2712 #undef ALLOCM_ERR_NOT_MOVED 2713 2714 /* 2715 * End compatibility functions. 2716 */ 2717 /******************************************************************************/ 2718 /* 2719 * The following functions are used by threading libraries for protection of 2720 * malloc during fork(). 2721 */ 2722 2723 /* 2724 * If an application creates a thread before doing any allocation in the main 2725 * thread, then calls fork(2) in the main thread followed by memory allocation 2726 * in the child process, a race can occur that results in deadlock within the 2727 * child: the main thread may have forked while the created thread had 2728 * partially initialized the allocator. Ordinarily jemalloc prevents 2729 * fork/malloc races via the following functions it registers during 2730 * initialization using pthread_atfork(), but of course that does no good if 2731 * the allocator isn't fully initialized at fork time. The following library 2732 * constructor is a partial solution to this problem. It may still be possible 2733 * to trigger the deadlock described above, but doing so would involve forking 2734 * via a library constructor that runs before jemalloc's runs. 2735 */ 2736 JEMALLOC_ATTR(constructor) 2737 static void 2738 jemalloc_constructor(void) 2739 { 2740 2741 malloc_init(); 2742 } 2743 2744 #ifndef JEMALLOC_MUTEX_INIT_CB 2745 void 2746 jemalloc_prefork(void) 2747 #else 2748 JEMALLOC_EXPORT void 2749 _malloc_prefork(void) 2750 #endif 2751 { 2752 unsigned i, narenas; 2753 2754 #ifdef JEMALLOC_MUTEX_INIT_CB 2755 if (!malloc_initialized()) 2756 return; 2757 #endif 2758 assert(malloc_initialized()); 2759 2760 /* Acquire all mutexes in a safe order. */ 2761 ctl_prefork(); 2762 prof_prefork(); 2763 malloc_mutex_prefork(&arenas_lock); 2764 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 2765 arena_t *arena; 2766 2767 if ((arena = arena_get(i, false)) != NULL) 2768 arena_prefork(arena); 2769 } 2770 chunk_prefork(); 2771 base_prefork(); 2772 } 2773 2774 #ifndef JEMALLOC_MUTEX_INIT_CB 2775 void 2776 jemalloc_postfork_parent(void) 2777 #else 2778 JEMALLOC_EXPORT void 2779 _malloc_postfork(void) 2780 #endif 2781 { 2782 unsigned i, narenas; 2783 2784 #ifdef JEMALLOC_MUTEX_INIT_CB 2785 if (!malloc_initialized()) 2786 return; 2787 #endif 2788 assert(malloc_initialized()); 2789 2790 /* Release all mutexes, now that fork() has completed. */ 2791 base_postfork_parent(); 2792 chunk_postfork_parent(); 2793 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 2794 arena_t *arena; 2795 2796 if ((arena = arena_get(i, false)) != NULL) 2797 arena_postfork_parent(arena); 2798 } 2799 malloc_mutex_postfork_parent(&arenas_lock); 2800 prof_postfork_parent(); 2801 ctl_postfork_parent(); 2802 } 2803 2804 void 2805 jemalloc_postfork_child(void) 2806 { 2807 unsigned i, narenas; 2808 2809 assert(malloc_initialized()); 2810 2811 /* Release all mutexes, now that fork() has completed. */ 2812 base_postfork_child(); 2813 chunk_postfork_child(); 2814 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 2815 arena_t *arena; 2816 2817 if ((arena = arena_get(i, false)) != NULL) 2818 arena_postfork_child(arena); 2819 } 2820 malloc_mutex_postfork_child(&arenas_lock); 2821 prof_postfork_child(); 2822 ctl_postfork_child(); 2823 } 2824 2825 void 2826 _malloc_first_thread(void) 2827 { 2828 2829 (void)malloc_mutex_first_thread(); 2830 } 2831 2832 /******************************************************************************/ 2833