1 #define JEMALLOC_C_ 2 #include "jemalloc/internal/jemalloc_internal.h" 3 4 /******************************************************************************/ 5 /* Data. */ 6 7 /* Work around <http://llvm.org/bugs/show_bug.cgi?id=12623>: */ 8 const char *__malloc_options_1_0 = NULL; 9 __sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0); 10 11 /* Runtime configuration options. */ 12 const char *je_malloc_conf 13 #ifndef _WIN32 14 JEMALLOC_ATTR(weak) 15 #endif 16 ; 17 bool opt_abort = 18 #ifdef JEMALLOC_DEBUG 19 true 20 #else 21 false 22 #endif 23 ; 24 const char *opt_junk = 25 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 26 "true" 27 #else 28 "false" 29 #endif 30 ; 31 bool opt_junk_alloc = 32 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 33 true 34 #else 35 false 36 #endif 37 ; 38 bool opt_junk_free = 39 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 40 true 41 #else 42 false 43 #endif 44 ; 45 46 size_t opt_quarantine = ZU(0); 47 bool opt_redzone = false; 48 bool opt_utrace = false; 49 bool opt_xmalloc = false; 50 bool opt_zero = false; 51 unsigned opt_narenas = 0; 52 53 /* Initialized to true if the process is running inside Valgrind. */ 54 bool in_valgrind; 55 56 unsigned ncpus; 57 58 /* Protects arenas initialization. */ 59 static malloc_mutex_t arenas_lock; 60 /* 61 * Arenas that are used to service external requests. Not all elements of the 62 * arenas array are necessarily used; arenas are created lazily as needed. 63 * 64 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and 65 * arenas. arenas[narenas_auto..narenas_total) are only used if the application 66 * takes some action to create them and allocate from them. 67 */ 68 arena_t **arenas; 69 static unsigned narenas_total; /* Use narenas_total_*(). */ 70 static arena_t *a0; /* arenas[0]; read-only after initialization. */ 71 unsigned narenas_auto; /* Read-only after initialization. */ 72 73 typedef enum { 74 malloc_init_uninitialized = 3, 75 malloc_init_a0_initialized = 2, 76 malloc_init_recursible = 1, 77 malloc_init_initialized = 0 /* Common case --> jnz. */ 78 } malloc_init_t; 79 static malloc_init_t malloc_init_state = malloc_init_uninitialized; 80 81 /* False should be the common case. Set to true to trigger initialization. */ 82 static bool malloc_slow = true; 83 84 /* When malloc_slow is true, set the corresponding bits for sanity check. */ 85 enum { 86 flag_opt_junk_alloc = (1U), 87 flag_opt_junk_free = (1U << 1), 88 flag_opt_quarantine = (1U << 2), 89 flag_opt_zero = (1U << 3), 90 flag_opt_utrace = (1U << 4), 91 flag_in_valgrind = (1U << 5), 92 flag_opt_xmalloc = (1U << 6) 93 }; 94 static uint8_t malloc_slow_flags; 95 96 JEMALLOC_ALIGNED(CACHELINE) 97 const size_t pind2sz_tab[NPSIZES] = { 98 #define PSZ_yes(lg_grp, ndelta, lg_delta) \ 99 (((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))), 100 #define PSZ_no(lg_grp, ndelta, lg_delta) 101 #define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \ 102 PSZ_##psz(lg_grp, ndelta, lg_delta) 103 SIZE_CLASSES 104 #undef PSZ_yes 105 #undef PSZ_no 106 #undef SC 107 }; 108 109 JEMALLOC_ALIGNED(CACHELINE) 110 const size_t index2size_tab[NSIZES] = { 111 #define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \ 112 ((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)), 113 SIZE_CLASSES 114 #undef SC 115 }; 116 117 JEMALLOC_ALIGNED(CACHELINE) 118 const uint8_t size2index_tab[] = { 119 #if LG_TINY_MIN == 0 120 #warning "Dangerous LG_TINY_MIN" 121 #define S2B_0(i) i, 122 #elif LG_TINY_MIN == 1 123 #warning "Dangerous LG_TINY_MIN" 124 #define S2B_1(i) i, 125 #elif LG_TINY_MIN == 2 126 #warning "Dangerous LG_TINY_MIN" 127 #define S2B_2(i) i, 128 #elif LG_TINY_MIN == 3 129 #define S2B_3(i) i, 130 #elif LG_TINY_MIN == 4 131 #define S2B_4(i) i, 132 #elif LG_TINY_MIN == 5 133 #define S2B_5(i) i, 134 #elif LG_TINY_MIN == 6 135 #define S2B_6(i) i, 136 #elif LG_TINY_MIN == 7 137 #define S2B_7(i) i, 138 #elif LG_TINY_MIN == 8 139 #define S2B_8(i) i, 140 #elif LG_TINY_MIN == 9 141 #define S2B_9(i) i, 142 #elif LG_TINY_MIN == 10 143 #define S2B_10(i) i, 144 #elif LG_TINY_MIN == 11 145 #define S2B_11(i) i, 146 #else 147 #error "Unsupported LG_TINY_MIN" 148 #endif 149 #if LG_TINY_MIN < 1 150 #define S2B_1(i) S2B_0(i) S2B_0(i) 151 #endif 152 #if LG_TINY_MIN < 2 153 #define S2B_2(i) S2B_1(i) S2B_1(i) 154 #endif 155 #if LG_TINY_MIN < 3 156 #define S2B_3(i) S2B_2(i) S2B_2(i) 157 #endif 158 #if LG_TINY_MIN < 4 159 #define S2B_4(i) S2B_3(i) S2B_3(i) 160 #endif 161 #if LG_TINY_MIN < 5 162 #define S2B_5(i) S2B_4(i) S2B_4(i) 163 #endif 164 #if LG_TINY_MIN < 6 165 #define S2B_6(i) S2B_5(i) S2B_5(i) 166 #endif 167 #if LG_TINY_MIN < 7 168 #define S2B_7(i) S2B_6(i) S2B_6(i) 169 #endif 170 #if LG_TINY_MIN < 8 171 #define S2B_8(i) S2B_7(i) S2B_7(i) 172 #endif 173 #if LG_TINY_MIN < 9 174 #define S2B_9(i) S2B_8(i) S2B_8(i) 175 #endif 176 #if LG_TINY_MIN < 10 177 #define S2B_10(i) S2B_9(i) S2B_9(i) 178 #endif 179 #if LG_TINY_MIN < 11 180 #define S2B_11(i) S2B_10(i) S2B_10(i) 181 #endif 182 #define S2B_no(i) 183 #define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \ 184 S2B_##lg_delta_lookup(index) 185 SIZE_CLASSES 186 #undef S2B_3 187 #undef S2B_4 188 #undef S2B_5 189 #undef S2B_6 190 #undef S2B_7 191 #undef S2B_8 192 #undef S2B_9 193 #undef S2B_10 194 #undef S2B_11 195 #undef S2B_no 196 #undef SC 197 }; 198 199 #ifdef JEMALLOC_THREADED_INIT 200 /* Used to let the initializing thread recursively allocate. */ 201 # define NO_INITIALIZER ((unsigned long)0) 202 # define INITIALIZER pthread_self() 203 # define IS_INITIALIZER (malloc_initializer == pthread_self()) 204 static pthread_t malloc_initializer = NO_INITIALIZER; 205 #else 206 # define NO_INITIALIZER false 207 # define INITIALIZER true 208 # define IS_INITIALIZER malloc_initializer 209 static bool malloc_initializer = NO_INITIALIZER; 210 #endif 211 212 /* Used to avoid initialization races. */ 213 #ifdef _WIN32 214 #if _WIN32_WINNT >= 0x0600 215 static malloc_mutex_t init_lock = SRWLOCK_INIT; 216 #else 217 static malloc_mutex_t init_lock; 218 static bool init_lock_initialized = false; 219 220 JEMALLOC_ATTR(constructor) 221 static void WINAPI 222 _init_init_lock(void) 223 { 224 225 /* If another constructor in the same binary is using mallctl to 226 * e.g. setup chunk hooks, it may end up running before this one, 227 * and malloc_init_hard will crash trying to lock the uninitialized 228 * lock. So we force an initialization of the lock in 229 * malloc_init_hard as well. We don't try to care about atomicity 230 * of the accessed to the init_lock_initialized boolean, since it 231 * really only matters early in the process creation, before any 232 * separate thread normally starts doing anything. */ 233 if (!init_lock_initialized) 234 malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT); 235 init_lock_initialized = true; 236 } 237 238 #ifdef _MSC_VER 239 # pragma section(".CRT$XCU", read) 240 JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) 241 static const void (WINAPI *init_init_lock)(void) = _init_init_lock; 242 #endif 243 #endif 244 #else 245 static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; 246 #endif 247 248 typedef struct { 249 void *p; /* Input pointer (as in realloc(p, s)). */ 250 size_t s; /* Request size. */ 251 void *r; /* Result pointer. */ 252 } malloc_utrace_t; 253 254 #ifdef JEMALLOC_UTRACE 255 # define UTRACE(a, b, c) do { \ 256 if (unlikely(opt_utrace)) { \ 257 int utrace_serrno = errno; \ 258 malloc_utrace_t ut; \ 259 ut.p = (a); \ 260 ut.s = (b); \ 261 ut.r = (c); \ 262 utrace(&ut, sizeof(ut)); \ 263 errno = utrace_serrno; \ 264 } \ 265 } while (0) 266 #else 267 # define UTRACE(a, b, c) 268 #endif 269 270 /******************************************************************************/ 271 /* 272 * Function prototypes for static functions that are referenced prior to 273 * definition. 274 */ 275 276 static bool malloc_init_hard_a0(void); 277 static bool malloc_init_hard(void); 278 279 /******************************************************************************/ 280 /* 281 * Begin miscellaneous support functions. 282 */ 283 284 JEMALLOC_ALWAYS_INLINE_C bool 285 malloc_initialized(void) 286 { 287 288 return (malloc_init_state == malloc_init_initialized); 289 } 290 291 JEMALLOC_ALWAYS_INLINE_C void 292 malloc_thread_init(void) 293 { 294 295 /* 296 * TSD initialization can't be safely done as a side effect of 297 * deallocation, because it is possible for a thread to do nothing but 298 * deallocate its TLS data via free(), in which case writing to TLS 299 * would cause write-after-free memory corruption. The quarantine 300 * facility *only* gets used as a side effect of deallocation, so make 301 * a best effort attempt at initializing its TSD by hooking all 302 * allocation events. 303 */ 304 if (config_fill && unlikely(opt_quarantine)) 305 quarantine_alloc_hook(); 306 } 307 308 JEMALLOC_ALWAYS_INLINE_C bool 309 malloc_init_a0(void) 310 { 311 312 if (unlikely(malloc_init_state == malloc_init_uninitialized)) 313 return (malloc_init_hard_a0()); 314 return (false); 315 } 316 317 JEMALLOC_ALWAYS_INLINE_C bool 318 malloc_init(void) 319 { 320 321 if (unlikely(!malloc_initialized()) && malloc_init_hard()) 322 return (true); 323 malloc_thread_init(); 324 325 return (false); 326 } 327 328 /* 329 * The a0*() functions are used instead of i{d,}alloc() in situations that 330 * cannot tolerate TLS variable access. 331 */ 332 333 static void * 334 a0ialloc(size_t size, bool zero, bool is_metadata) 335 { 336 337 if (unlikely(malloc_init_a0())) 338 return (NULL); 339 340 return (iallocztm(TSDN_NULL, size, size2index(size), zero, NULL, 341 is_metadata, arena_get(TSDN_NULL, 0, true), true)); 342 } 343 344 static void 345 a0idalloc(void *ptr, bool is_metadata) 346 { 347 348 idalloctm(TSDN_NULL, ptr, false, is_metadata, true); 349 } 350 351 arena_t * 352 a0get(void) 353 { 354 355 return (a0); 356 } 357 358 void * 359 a0malloc(size_t size) 360 { 361 362 return (a0ialloc(size, false, true)); 363 } 364 365 void 366 a0dalloc(void *ptr) 367 { 368 369 a0idalloc(ptr, true); 370 } 371 372 /* 373 * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive 374 * situations that cannot tolerate TLS variable access (TLS allocation and very 375 * early internal data structure initialization). 376 */ 377 378 void * 379 bootstrap_malloc(size_t size) 380 { 381 382 if (unlikely(size == 0)) 383 size = 1; 384 385 return (a0ialloc(size, false, false)); 386 } 387 388 void * 389 bootstrap_calloc(size_t num, size_t size) 390 { 391 size_t num_size; 392 393 num_size = num * size; 394 if (unlikely(num_size == 0)) { 395 assert(num == 0 || size == 0); 396 num_size = 1; 397 } 398 399 return (a0ialloc(num_size, true, false)); 400 } 401 402 void 403 bootstrap_free(void *ptr) 404 { 405 406 if (unlikely(ptr == NULL)) 407 return; 408 409 a0idalloc(ptr, false); 410 } 411 412 static void 413 arena_set(unsigned ind, arena_t *arena) 414 { 415 416 atomic_write_p((void **)&arenas[ind], arena); 417 } 418 419 static void 420 narenas_total_set(unsigned narenas) 421 { 422 423 atomic_write_u(&narenas_total, narenas); 424 } 425 426 static void 427 narenas_total_inc(void) 428 { 429 430 atomic_add_u(&narenas_total, 1); 431 } 432 433 unsigned 434 narenas_total_get(void) 435 { 436 437 return (atomic_read_u(&narenas_total)); 438 } 439 440 /* Create a new arena and insert it into the arenas array at index ind. */ 441 static arena_t * 442 arena_init_locked(tsdn_t *tsdn, unsigned ind) 443 { 444 arena_t *arena; 445 446 assert(ind <= narenas_total_get()); 447 if (ind > MALLOCX_ARENA_MAX) 448 return (NULL); 449 if (ind == narenas_total_get()) 450 narenas_total_inc(); 451 452 /* 453 * Another thread may have already initialized arenas[ind] if it's an 454 * auto arena. 455 */ 456 arena = arena_get(tsdn, ind, false); 457 if (arena != NULL) { 458 assert(ind < narenas_auto); 459 return (arena); 460 } 461 462 /* Actually initialize the arena. */ 463 arena = arena_new(tsdn, ind); 464 arena_set(ind, arena); 465 return (arena); 466 } 467 468 arena_t * 469 arena_init(tsdn_t *tsdn, unsigned ind) 470 { 471 arena_t *arena; 472 473 malloc_mutex_lock(tsdn, &arenas_lock); 474 arena = arena_init_locked(tsdn, ind); 475 malloc_mutex_unlock(tsdn, &arenas_lock); 476 return (arena); 477 } 478 479 static void 480 arena_bind(tsd_t *tsd, unsigned ind, bool internal) 481 { 482 arena_t *arena; 483 484 if (!tsd_nominal(tsd)) 485 return; 486 487 arena = arena_get(tsd_tsdn(tsd), ind, false); 488 arena_nthreads_inc(arena, internal); 489 490 if (internal) 491 tsd_iarena_set(tsd, arena); 492 else 493 tsd_arena_set(tsd, arena); 494 } 495 496 void 497 arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) 498 { 499 arena_t *oldarena, *newarena; 500 501 oldarena = arena_get(tsd_tsdn(tsd), oldind, false); 502 newarena = arena_get(tsd_tsdn(tsd), newind, false); 503 arena_nthreads_dec(oldarena, false); 504 arena_nthreads_inc(newarena, false); 505 tsd_arena_set(tsd, newarena); 506 } 507 508 static void 509 arena_unbind(tsd_t *tsd, unsigned ind, bool internal) 510 { 511 arena_t *arena; 512 513 arena = arena_get(tsd_tsdn(tsd), ind, false); 514 arena_nthreads_dec(arena, internal); 515 if (internal) 516 tsd_iarena_set(tsd, NULL); 517 else 518 tsd_arena_set(tsd, NULL); 519 } 520 521 arena_tdata_t * 522 arena_tdata_get_hard(tsd_t *tsd, unsigned ind) 523 { 524 arena_tdata_t *tdata, *arenas_tdata_old; 525 arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); 526 unsigned narenas_tdata_old, i; 527 unsigned narenas_tdata = tsd_narenas_tdata_get(tsd); 528 unsigned narenas_actual = narenas_total_get(); 529 530 /* 531 * Dissociate old tdata array (and set up for deallocation upon return) 532 * if it's too small. 533 */ 534 if (arenas_tdata != NULL && narenas_tdata < narenas_actual) { 535 arenas_tdata_old = arenas_tdata; 536 narenas_tdata_old = narenas_tdata; 537 arenas_tdata = NULL; 538 narenas_tdata = 0; 539 tsd_arenas_tdata_set(tsd, arenas_tdata); 540 tsd_narenas_tdata_set(tsd, narenas_tdata); 541 } else { 542 arenas_tdata_old = NULL; 543 narenas_tdata_old = 0; 544 } 545 546 /* Allocate tdata array if it's missing. */ 547 if (arenas_tdata == NULL) { 548 bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd); 549 narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1; 550 551 if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) { 552 *arenas_tdata_bypassp = true; 553 arenas_tdata = (arena_tdata_t *)a0malloc( 554 sizeof(arena_tdata_t) * narenas_tdata); 555 *arenas_tdata_bypassp = false; 556 } 557 if (arenas_tdata == NULL) { 558 tdata = NULL; 559 goto label_return; 560 } 561 assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp); 562 tsd_arenas_tdata_set(tsd, arenas_tdata); 563 tsd_narenas_tdata_set(tsd, narenas_tdata); 564 } 565 566 /* 567 * Copy to tdata array. It's possible that the actual number of arenas 568 * has increased since narenas_total_get() was called above, but that 569 * causes no correctness issues unless two threads concurrently execute 570 * the arenas.extend mallctl, which we trust mallctl synchronization to 571 * prevent. 572 */ 573 574 /* Copy/initialize tickers. */ 575 for (i = 0; i < narenas_actual; i++) { 576 if (i < narenas_tdata_old) { 577 ticker_copy(&arenas_tdata[i].decay_ticker, 578 &arenas_tdata_old[i].decay_ticker); 579 } else { 580 ticker_init(&arenas_tdata[i].decay_ticker, 581 DECAY_NTICKS_PER_UPDATE); 582 } 583 } 584 if (narenas_tdata > narenas_actual) { 585 memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t) 586 * (narenas_tdata - narenas_actual)); 587 } 588 589 /* Read the refreshed tdata array. */ 590 tdata = &arenas_tdata[ind]; 591 label_return: 592 if (arenas_tdata_old != NULL) 593 a0dalloc(arenas_tdata_old); 594 return (tdata); 595 } 596 597 /* Slow path, called only by arena_choose(). */ 598 arena_t * 599 arena_choose_hard(tsd_t *tsd, bool internal) 600 { 601 arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL); 602 603 if (narenas_auto > 1) { 604 unsigned i, j, choose[2], first_null; 605 606 /* 607 * Determine binding for both non-internal and internal 608 * allocation. 609 * 610 * choose[0]: For application allocation. 611 * choose[1]: For internal metadata allocation. 612 */ 613 614 for (j = 0; j < 2; j++) 615 choose[j] = 0; 616 617 first_null = narenas_auto; 618 malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock); 619 assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL); 620 for (i = 1; i < narenas_auto; i++) { 621 if (arena_get(tsd_tsdn(tsd), i, false) != NULL) { 622 /* 623 * Choose the first arena that has the lowest 624 * number of threads assigned to it. 625 */ 626 for (j = 0; j < 2; j++) { 627 if (arena_nthreads_get(arena_get( 628 tsd_tsdn(tsd), i, false), !!j) < 629 arena_nthreads_get(arena_get( 630 tsd_tsdn(tsd), choose[j], false), 631 !!j)) 632 choose[j] = i; 633 } 634 } else if (first_null == narenas_auto) { 635 /* 636 * Record the index of the first uninitialized 637 * arena, in case all extant arenas are in use. 638 * 639 * NB: It is possible for there to be 640 * discontinuities in terms of initialized 641 * versus uninitialized arenas, due to the 642 * "thread.arena" mallctl. 643 */ 644 first_null = i; 645 } 646 } 647 648 for (j = 0; j < 2; j++) { 649 if (arena_nthreads_get(arena_get(tsd_tsdn(tsd), 650 choose[j], false), !!j) == 0 || first_null == 651 narenas_auto) { 652 /* 653 * Use an unloaded arena, or the least loaded 654 * arena if all arenas are already initialized. 655 */ 656 if (!!j == internal) { 657 ret = arena_get(tsd_tsdn(tsd), 658 choose[j], false); 659 } 660 } else { 661 arena_t *arena; 662 663 /* Initialize a new arena. */ 664 choose[j] = first_null; 665 arena = arena_init_locked(tsd_tsdn(tsd), 666 choose[j]); 667 if (arena == NULL) { 668 malloc_mutex_unlock(tsd_tsdn(tsd), 669 &arenas_lock); 670 return (NULL); 671 } 672 if (!!j == internal) 673 ret = arena; 674 } 675 arena_bind(tsd, choose[j], !!j); 676 } 677 malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock); 678 } else { 679 ret = arena_get(tsd_tsdn(tsd), 0, false); 680 arena_bind(tsd, 0, false); 681 arena_bind(tsd, 0, true); 682 } 683 684 return (ret); 685 } 686 687 void 688 thread_allocated_cleanup(tsd_t *tsd) 689 { 690 691 /* Do nothing. */ 692 } 693 694 void 695 thread_deallocated_cleanup(tsd_t *tsd) 696 { 697 698 /* Do nothing. */ 699 } 700 701 void 702 iarena_cleanup(tsd_t *tsd) 703 { 704 arena_t *iarena; 705 706 iarena = tsd_iarena_get(tsd); 707 if (iarena != NULL) 708 arena_unbind(tsd, iarena->ind, true); 709 } 710 711 void 712 arena_cleanup(tsd_t *tsd) 713 { 714 arena_t *arena; 715 716 arena = tsd_arena_get(tsd); 717 if (arena != NULL) 718 arena_unbind(tsd, arena->ind, false); 719 } 720 721 void 722 arenas_tdata_cleanup(tsd_t *tsd) 723 { 724 arena_tdata_t *arenas_tdata; 725 726 /* Prevent tsd->arenas_tdata from being (re)created. */ 727 *tsd_arenas_tdata_bypassp_get(tsd) = true; 728 729 arenas_tdata = tsd_arenas_tdata_get(tsd); 730 if (arenas_tdata != NULL) { 731 tsd_arenas_tdata_set(tsd, NULL); 732 a0dalloc(arenas_tdata); 733 } 734 } 735 736 void 737 narenas_tdata_cleanup(tsd_t *tsd) 738 { 739 740 /* Do nothing. */ 741 } 742 743 void 744 arenas_tdata_bypass_cleanup(tsd_t *tsd) 745 { 746 747 /* Do nothing. */ 748 } 749 750 static void 751 stats_print_atexit(void) 752 { 753 754 if (config_tcache && config_stats) { 755 tsdn_t *tsdn; 756 unsigned narenas, i; 757 758 tsdn = tsdn_fetch(); 759 760 /* 761 * Merge stats from extant threads. This is racy, since 762 * individual threads do not lock when recording tcache stats 763 * events. As a consequence, the final stats may be slightly 764 * out of date by the time they are reported, if other threads 765 * continue to allocate. 766 */ 767 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 768 arena_t *arena = arena_get(tsdn, i, false); 769 if (arena != NULL) { 770 tcache_t *tcache; 771 772 /* 773 * tcache_stats_merge() locks bins, so if any 774 * code is introduced that acquires both arena 775 * and bin locks in the opposite order, 776 * deadlocks may result. 777 */ 778 malloc_mutex_lock(tsdn, &arena->lock); 779 ql_foreach(tcache, &arena->tcache_ql, link) { 780 tcache_stats_merge(tsdn, tcache, arena); 781 } 782 malloc_mutex_unlock(tsdn, &arena->lock); 783 } 784 } 785 } 786 je_malloc_stats_print(NULL, NULL, NULL); 787 } 788 789 /* 790 * End miscellaneous support functions. 791 */ 792 /******************************************************************************/ 793 /* 794 * Begin initialization functions. 795 */ 796 797 #ifndef JEMALLOC_HAVE_SECURE_GETENV 798 static char * 799 secure_getenv(const char *name) 800 { 801 802 # ifdef JEMALLOC_HAVE_ISSETUGID 803 if (issetugid() != 0) 804 return (NULL); 805 # endif 806 return (getenv(name)); 807 } 808 #endif 809 810 static unsigned 811 malloc_ncpus(void) 812 { 813 long result; 814 815 #ifdef _WIN32 816 SYSTEM_INFO si; 817 GetSystemInfo(&si); 818 result = si.dwNumberOfProcessors; 819 #elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT) 820 /* 821 * glibc >= 2.6 has the CPU_COUNT macro. 822 * 823 * glibc's sysconf() uses isspace(). glibc allocates for the first time 824 * *before* setting up the isspace tables. Therefore we need a 825 * different method to get the number of CPUs. 826 */ 827 { 828 cpu_set_t set; 829 830 pthread_getaffinity_np(pthread_self(), sizeof(set), &set); 831 result = CPU_COUNT(&set); 832 } 833 #else 834 result = sysconf(_SC_NPROCESSORS_ONLN); 835 #endif 836 return ((result == -1) ? 1 : (unsigned)result); 837 } 838 839 static bool 840 malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, 841 char const **v_p, size_t *vlen_p) 842 { 843 bool accept; 844 const char *opts = *opts_p; 845 846 *k_p = opts; 847 848 for (accept = false; !accept;) { 849 switch (*opts) { 850 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': 851 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': 852 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': 853 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': 854 case 'Y': case 'Z': 855 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': 856 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': 857 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': 858 case 's': case 't': case 'u': case 'v': case 'w': case 'x': 859 case 'y': case 'z': 860 case '0': case '1': case '2': case '3': case '4': case '5': 861 case '6': case '7': case '8': case '9': 862 case '_': 863 opts++; 864 break; 865 case ':': 866 opts++; 867 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; 868 *v_p = opts; 869 accept = true; 870 break; 871 case '\0': 872 if (opts != *opts_p) { 873 malloc_write("<jemalloc>: Conf string ends " 874 "with key\n"); 875 } 876 return (true); 877 default: 878 malloc_write("<jemalloc>: Malformed conf string\n"); 879 return (true); 880 } 881 } 882 883 for (accept = false; !accept;) { 884 switch (*opts) { 885 case ',': 886 opts++; 887 /* 888 * Look ahead one character here, because the next time 889 * this function is called, it will assume that end of 890 * input has been cleanly reached if no input remains, 891 * but we have optimistically already consumed the 892 * comma if one exists. 893 */ 894 if (*opts == '\0') { 895 malloc_write("<jemalloc>: Conf string ends " 896 "with comma\n"); 897 } 898 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; 899 accept = true; 900 break; 901 case '\0': 902 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; 903 accept = true; 904 break; 905 default: 906 opts++; 907 break; 908 } 909 } 910 911 *opts_p = opts; 912 return (false); 913 } 914 915 static void 916 malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, 917 size_t vlen) 918 { 919 920 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k, 921 (int)vlen, v); 922 } 923 924 static void 925 malloc_slow_flag_init(void) 926 { 927 /* 928 * Combine the runtime options into malloc_slow for fast path. Called 929 * after processing all the options. 930 */ 931 malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0) 932 | (opt_junk_free ? flag_opt_junk_free : 0) 933 | (opt_quarantine ? flag_opt_quarantine : 0) 934 | (opt_zero ? flag_opt_zero : 0) 935 | (opt_utrace ? flag_opt_utrace : 0) 936 | (opt_xmalloc ? flag_opt_xmalloc : 0); 937 938 if (config_valgrind) 939 malloc_slow_flags |= (in_valgrind ? flag_in_valgrind : 0); 940 941 malloc_slow = (malloc_slow_flags != 0); 942 } 943 944 static void 945 malloc_conf_init(void) 946 { 947 unsigned i; 948 char buf[PATH_MAX + 1]; 949 const char *opts, *k, *v; 950 size_t klen, vlen; 951 952 /* 953 * Automatically configure valgrind before processing options. The 954 * valgrind option remains in jemalloc 3.x for compatibility reasons. 955 */ 956 if (config_valgrind) { 957 in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false; 958 if (config_fill && unlikely(in_valgrind)) { 959 opt_junk = "false"; 960 opt_junk_alloc = false; 961 opt_junk_free = false; 962 assert(!opt_zero); 963 opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT; 964 opt_redzone = true; 965 } 966 if (config_tcache && unlikely(in_valgrind)) 967 opt_tcache = false; 968 } 969 970 for (i = 0; i < 4; i++) { 971 /* Get runtime configuration. */ 972 switch (i) { 973 case 0: 974 opts = config_malloc_conf; 975 break; 976 case 1: 977 if (je_malloc_conf != NULL) { 978 /* 979 * Use options that were compiled into the 980 * program. 981 */ 982 opts = je_malloc_conf; 983 } else { 984 /* No configuration specified. */ 985 buf[0] = '\0'; 986 opts = buf; 987 } 988 break; 989 case 2: { 990 ssize_t linklen = 0; 991 #ifndef _WIN32 992 int saved_errno = errno; 993 const char *linkname = 994 # ifdef JEMALLOC_PREFIX 995 "/etc/"JEMALLOC_PREFIX"malloc.conf" 996 # else 997 "/etc/malloc.conf" 998 # endif 999 ; 1000 1001 /* 1002 * Try to use the contents of the "/etc/malloc.conf" 1003 * symbolic link's name. 1004 */ 1005 linklen = readlink(linkname, buf, sizeof(buf) - 1); 1006 if (linklen == -1) { 1007 /* No configuration specified. */ 1008 linklen = 0; 1009 /* Restore errno. */ 1010 set_errno(saved_errno); 1011 } 1012 #endif 1013 buf[linklen] = '\0'; 1014 opts = buf; 1015 break; 1016 } case 3: { 1017 const char *envname = 1018 #ifdef JEMALLOC_PREFIX 1019 JEMALLOC_CPREFIX"MALLOC_CONF" 1020 #else 1021 "MALLOC_CONF" 1022 #endif 1023 ; 1024 1025 if ((opts = secure_getenv(envname)) != NULL) { 1026 /* 1027 * Do nothing; opts is already initialized to 1028 * the value of the MALLOC_CONF environment 1029 * variable. 1030 */ 1031 } else { 1032 /* No configuration specified. */ 1033 buf[0] = '\0'; 1034 opts = buf; 1035 } 1036 break; 1037 } default: 1038 not_reached(); 1039 buf[0] = '\0'; 1040 opts = buf; 1041 } 1042 1043 while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v, 1044 &vlen)) { 1045 #define CONF_MATCH(n) \ 1046 (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0) 1047 #define CONF_MATCH_VALUE(n) \ 1048 (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0) 1049 #define CONF_HANDLE_BOOL(o, n, cont) \ 1050 if (CONF_MATCH(n)) { \ 1051 if (CONF_MATCH_VALUE("true")) \ 1052 o = true; \ 1053 else if (CONF_MATCH_VALUE("false")) \ 1054 o = false; \ 1055 else { \ 1056 malloc_conf_error( \ 1057 "Invalid conf value", \ 1058 k, klen, v, vlen); \ 1059 } \ 1060 if (cont) \ 1061 continue; \ 1062 } 1063 #define CONF_MIN_no(um, min) false 1064 #define CONF_MIN_yes(um, min) ((um) < (min)) 1065 #define CONF_MAX_no(um, max) false 1066 #define CONF_MAX_yes(um, max) ((um) > (max)) 1067 #define CONF_HANDLE_T_U(t, o, n, min, max, check_min, check_max, clip) \ 1068 if (CONF_MATCH(n)) { \ 1069 uintmax_t um; \ 1070 char *end; \ 1071 \ 1072 set_errno(0); \ 1073 um = malloc_strtoumax(v, &end, 0); \ 1074 if (get_errno() != 0 || (uintptr_t)end -\ 1075 (uintptr_t)v != vlen) { \ 1076 malloc_conf_error( \ 1077 "Invalid conf value", \ 1078 k, klen, v, vlen); \ 1079 } else if (clip) { \ 1080 if (CONF_MIN_##check_min(um, \ 1081 (min))) \ 1082 o = (t)(min); \ 1083 else if (CONF_MAX_##check_max( \ 1084 um, (max))) \ 1085 o = (t)(max); \ 1086 else \ 1087 o = (t)um; \ 1088 } else { \ 1089 if (CONF_MIN_##check_min(um, \ 1090 (min)) || \ 1091 CONF_MAX_##check_max(um, \ 1092 (max))) { \ 1093 malloc_conf_error( \ 1094 "Out-of-range " \ 1095 "conf value", \ 1096 k, klen, v, vlen); \ 1097 } else \ 1098 o = (t)um; \ 1099 } \ 1100 continue; \ 1101 } 1102 #define CONF_HANDLE_UNSIGNED(o, n, min, max, check_min, check_max, \ 1103 clip) \ 1104 CONF_HANDLE_T_U(unsigned, o, n, min, max, \ 1105 check_min, check_max, clip) 1106 #define CONF_HANDLE_SIZE_T(o, n, min, max, check_min, check_max, clip) \ 1107 CONF_HANDLE_T_U(size_t, o, n, min, max, \ 1108 check_min, check_max, clip) 1109 #define CONF_HANDLE_SSIZE_T(o, n, min, max) \ 1110 if (CONF_MATCH(n)) { \ 1111 long l; \ 1112 char *end; \ 1113 \ 1114 set_errno(0); \ 1115 l = strtol(v, &end, 0); \ 1116 if (get_errno() != 0 || (uintptr_t)end -\ 1117 (uintptr_t)v != vlen) { \ 1118 malloc_conf_error( \ 1119 "Invalid conf value", \ 1120 k, klen, v, vlen); \ 1121 } else if (l < (ssize_t)(min) || l > \ 1122 (ssize_t)(max)) { \ 1123 malloc_conf_error( \ 1124 "Out-of-range conf value", \ 1125 k, klen, v, vlen); \ 1126 } else \ 1127 o = l; \ 1128 continue; \ 1129 } 1130 #define CONF_HANDLE_CHAR_P(o, n, d) \ 1131 if (CONF_MATCH(n)) { \ 1132 size_t cpylen = (vlen <= \ 1133 sizeof(o)-1) ? vlen : \ 1134 sizeof(o)-1; \ 1135 strncpy(o, v, cpylen); \ 1136 o[cpylen] = '\0'; \ 1137 continue; \ 1138 } 1139 1140 CONF_HANDLE_BOOL(opt_abort, "abort", true) 1141 /* 1142 * Chunks always require at least one header page, 1143 * as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and 1144 * possibly an additional page in the presence of 1145 * redzones. In order to simplify options processing, 1146 * use a conservative bound that accommodates all these 1147 * constraints. 1148 */ 1149 CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE + 1150 LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1), 1151 (sizeof(size_t) << 3) - 1, yes, yes, true) 1152 if (strncmp("dss", k, klen) == 0) { 1153 int i; 1154 bool match = false; 1155 for (i = 0; i < dss_prec_limit; i++) { 1156 if (strncmp(dss_prec_names[i], v, vlen) 1157 == 0) { 1158 if (chunk_dss_prec_set(i)) { 1159 malloc_conf_error( 1160 "Error setting dss", 1161 k, klen, v, vlen); 1162 } else { 1163 opt_dss = 1164 dss_prec_names[i]; 1165 match = true; 1166 break; 1167 } 1168 } 1169 } 1170 if (!match) { 1171 malloc_conf_error("Invalid conf value", 1172 k, klen, v, vlen); 1173 } 1174 continue; 1175 } 1176 CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1, 1177 UINT_MAX, yes, no, false) 1178 if (strncmp("purge", k, klen) == 0) { 1179 int i; 1180 bool match = false; 1181 for (i = 0; i < purge_mode_limit; i++) { 1182 if (strncmp(purge_mode_names[i], v, 1183 vlen) == 0) { 1184 opt_purge = (purge_mode_t)i; 1185 match = true; 1186 break; 1187 } 1188 } 1189 if (!match) { 1190 malloc_conf_error("Invalid conf value", 1191 k, klen, v, vlen); 1192 } 1193 continue; 1194 } 1195 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult", 1196 -1, (sizeof(size_t) << 3) - 1) 1197 CONF_HANDLE_SSIZE_T(opt_decay_time, "decay_time", -1, 1198 NSTIME_SEC_MAX); 1199 CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true) 1200 if (config_fill) { 1201 if (CONF_MATCH("junk")) { 1202 if (CONF_MATCH_VALUE("true")) { 1203 if (config_valgrind && 1204 unlikely(in_valgrind)) { 1205 malloc_conf_error( 1206 "Deallocation-time " 1207 "junk filling cannot " 1208 "be enabled while " 1209 "running inside " 1210 "Valgrind", k, klen, v, 1211 vlen); 1212 } else { 1213 opt_junk = "true"; 1214 opt_junk_alloc = true; 1215 opt_junk_free = true; 1216 } 1217 } else if (CONF_MATCH_VALUE("false")) { 1218 opt_junk = "false"; 1219 opt_junk_alloc = opt_junk_free = 1220 false; 1221 } else if (CONF_MATCH_VALUE("alloc")) { 1222 opt_junk = "alloc"; 1223 opt_junk_alloc = true; 1224 opt_junk_free = false; 1225 } else if (CONF_MATCH_VALUE("free")) { 1226 if (config_valgrind && 1227 unlikely(in_valgrind)) { 1228 malloc_conf_error( 1229 "Deallocation-time " 1230 "junk filling cannot " 1231 "be enabled while " 1232 "running inside " 1233 "Valgrind", k, klen, v, 1234 vlen); 1235 } else { 1236 opt_junk = "free"; 1237 opt_junk_alloc = false; 1238 opt_junk_free = true; 1239 } 1240 } else { 1241 malloc_conf_error( 1242 "Invalid conf value", k, 1243 klen, v, vlen); 1244 } 1245 continue; 1246 } 1247 CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine", 1248 0, SIZE_T_MAX, no, no, false) 1249 CONF_HANDLE_BOOL(opt_redzone, "redzone", true) 1250 CONF_HANDLE_BOOL(opt_zero, "zero", true) 1251 } 1252 if (config_utrace) { 1253 CONF_HANDLE_BOOL(opt_utrace, "utrace", true) 1254 } 1255 if (config_xmalloc) { 1256 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true) 1257 } 1258 if (config_tcache) { 1259 CONF_HANDLE_BOOL(opt_tcache, "tcache", 1260 !config_valgrind || !in_valgrind) 1261 if (CONF_MATCH("tcache")) { 1262 assert(config_valgrind && in_valgrind); 1263 if (opt_tcache) { 1264 opt_tcache = false; 1265 malloc_conf_error( 1266 "tcache cannot be enabled " 1267 "while running inside Valgrind", 1268 k, klen, v, vlen); 1269 } 1270 continue; 1271 } 1272 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, 1273 "lg_tcache_max", -1, 1274 (sizeof(size_t) << 3) - 1) 1275 } 1276 if (config_prof) { 1277 CONF_HANDLE_BOOL(opt_prof, "prof", true) 1278 CONF_HANDLE_CHAR_P(opt_prof_prefix, 1279 "prof_prefix", "jeprof") 1280 CONF_HANDLE_BOOL(opt_prof_active, "prof_active", 1281 true) 1282 CONF_HANDLE_BOOL(opt_prof_thread_active_init, 1283 "prof_thread_active_init", true) 1284 CONF_HANDLE_SIZE_T(opt_lg_prof_sample, 1285 "lg_prof_sample", 0, (sizeof(uint64_t) << 3) 1286 - 1, no, yes, true) 1287 CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum", 1288 true) 1289 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, 1290 "lg_prof_interval", -1, 1291 (sizeof(uint64_t) << 3) - 1) 1292 CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump", 1293 true) 1294 CONF_HANDLE_BOOL(opt_prof_final, "prof_final", 1295 true) 1296 CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak", 1297 true) 1298 } 1299 malloc_conf_error("Invalid conf pair", k, klen, v, 1300 vlen); 1301 #undef CONF_MATCH 1302 #undef CONF_MATCH_VALUE 1303 #undef CONF_HANDLE_BOOL 1304 #undef CONF_MIN_no 1305 #undef CONF_MIN_yes 1306 #undef CONF_MAX_no 1307 #undef CONF_MAX_yes 1308 #undef CONF_HANDLE_T_U 1309 #undef CONF_HANDLE_UNSIGNED 1310 #undef CONF_HANDLE_SIZE_T 1311 #undef CONF_HANDLE_SSIZE_T 1312 #undef CONF_HANDLE_CHAR_P 1313 } 1314 } 1315 } 1316 1317 static bool 1318 malloc_init_hard_needed(void) 1319 { 1320 1321 if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state == 1322 malloc_init_recursible)) { 1323 /* 1324 * Another thread initialized the allocator before this one 1325 * acquired init_lock, or this thread is the initializing 1326 * thread, and it is recursively allocating. 1327 */ 1328 return (false); 1329 } 1330 #ifdef JEMALLOC_THREADED_INIT 1331 if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) { 1332 spin_t spinner; 1333 1334 /* Busy-wait until the initializing thread completes. */ 1335 spin_init(&spinner); 1336 do { 1337 malloc_mutex_unlock(TSDN_NULL, &init_lock); 1338 spin_adaptive(&spinner); 1339 malloc_mutex_lock(TSDN_NULL, &init_lock); 1340 } while (!malloc_initialized()); 1341 return (false); 1342 } 1343 #endif 1344 return (true); 1345 } 1346 1347 static bool 1348 malloc_init_hard_a0_locked() 1349 { 1350 1351 malloc_initializer = INITIALIZER; 1352 1353 if (config_prof) 1354 prof_boot0(); 1355 malloc_conf_init(); 1356 if (opt_stats_print) { 1357 /* Print statistics at exit. */ 1358 if (atexit(stats_print_atexit) != 0) { 1359 malloc_write("<jemalloc>: Error in atexit()\n"); 1360 if (opt_abort) 1361 abort(); 1362 } 1363 } 1364 pages_boot(); 1365 if (base_boot()) 1366 return (true); 1367 if (chunk_boot()) 1368 return (true); 1369 if (ctl_boot()) 1370 return (true); 1371 if (config_prof) 1372 prof_boot1(); 1373 arena_boot(); 1374 if (config_tcache && tcache_boot(TSDN_NULL)) 1375 return (true); 1376 if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS)) 1377 return (true); 1378 /* 1379 * Create enough scaffolding to allow recursive allocation in 1380 * malloc_ncpus(). 1381 */ 1382 narenas_auto = 1; 1383 narenas_total_set(narenas_auto); 1384 arenas = &a0; 1385 memset(arenas, 0, sizeof(arena_t *) * narenas_auto); 1386 /* 1387 * Initialize one arena here. The rest are lazily created in 1388 * arena_choose_hard(). 1389 */ 1390 if (arena_init(TSDN_NULL, 0) == NULL) 1391 return (true); 1392 1393 malloc_init_state = malloc_init_a0_initialized; 1394 1395 return (false); 1396 } 1397 1398 static bool 1399 malloc_init_hard_a0(void) 1400 { 1401 bool ret; 1402 1403 malloc_mutex_lock(TSDN_NULL, &init_lock); 1404 ret = malloc_init_hard_a0_locked(); 1405 malloc_mutex_unlock(TSDN_NULL, &init_lock); 1406 return (ret); 1407 } 1408 1409 /* Initialize data structures which may trigger recursive allocation. */ 1410 static bool 1411 malloc_init_hard_recursible(void) 1412 { 1413 1414 malloc_init_state = malloc_init_recursible; 1415 1416 ncpus = malloc_ncpus(); 1417 1418 #if (defined(JEMALLOC_HAVE_PTHREAD_ATFORK) && !defined(JEMALLOC_MUTEX_INIT_CB) \ 1419 && !defined(JEMALLOC_ZONE) && !defined(_WIN32) && \ 1420 !defined(__native_client__)) 1421 /* LinuxThreads' pthread_atfork() allocates. */ 1422 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, 1423 jemalloc_postfork_child) != 0) { 1424 malloc_write("<jemalloc>: Error in pthread_atfork()\n"); 1425 if (opt_abort) 1426 abort(); 1427 return (true); 1428 } 1429 #endif 1430 1431 return (false); 1432 } 1433 1434 static bool 1435 malloc_init_hard_finish(tsdn_t *tsdn) 1436 { 1437 1438 if (malloc_mutex_boot()) 1439 return (true); 1440 1441 if (opt_narenas == 0) { 1442 /* 1443 * For SMP systems, create more than one arena per CPU by 1444 * default. 1445 */ 1446 if (ncpus > 1) 1447 opt_narenas = ncpus << 2; 1448 else 1449 opt_narenas = 1; 1450 } 1451 narenas_auto = opt_narenas; 1452 /* 1453 * Limit the number of arenas to the indexing range of MALLOCX_ARENA(). 1454 */ 1455 if (narenas_auto > MALLOCX_ARENA_MAX) { 1456 narenas_auto = MALLOCX_ARENA_MAX; 1457 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n", 1458 narenas_auto); 1459 } 1460 narenas_total_set(narenas_auto); 1461 1462 /* Allocate and initialize arenas. */ 1463 arenas = (arena_t **)base_alloc(tsdn, sizeof(arena_t *) * 1464 (MALLOCX_ARENA_MAX+1)); 1465 if (arenas == NULL) 1466 return (true); 1467 /* Copy the pointer to the one arena that was already initialized. */ 1468 arena_set(0, a0); 1469 1470 malloc_init_state = malloc_init_initialized; 1471 malloc_slow_flag_init(); 1472 1473 return (false); 1474 } 1475 1476 static bool 1477 malloc_init_hard(void) 1478 { 1479 tsd_t *tsd; 1480 1481 #if defined(_WIN32) && _WIN32_WINNT < 0x0600 1482 _init_init_lock(); 1483 #endif 1484 malloc_mutex_lock(TSDN_NULL, &init_lock); 1485 if (!malloc_init_hard_needed()) { 1486 malloc_mutex_unlock(TSDN_NULL, &init_lock); 1487 return (false); 1488 } 1489 1490 if (malloc_init_state != malloc_init_a0_initialized && 1491 malloc_init_hard_a0_locked()) { 1492 malloc_mutex_unlock(TSDN_NULL, &init_lock); 1493 return (true); 1494 } 1495 1496 malloc_mutex_unlock(TSDN_NULL, &init_lock); 1497 /* Recursive allocation relies on functional tsd. */ 1498 tsd = malloc_tsd_boot0(); 1499 if (tsd == NULL) 1500 return (true); 1501 if (malloc_init_hard_recursible()) 1502 return (true); 1503 malloc_mutex_lock(tsd_tsdn(tsd), &init_lock); 1504 1505 if (config_prof && prof_boot2(tsd)) { 1506 malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); 1507 return (true); 1508 } 1509 1510 if (malloc_init_hard_finish(tsd_tsdn(tsd))) { 1511 malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); 1512 return (true); 1513 } 1514 1515 malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); 1516 malloc_tsd_boot1(); 1517 return (false); 1518 } 1519 1520 /* 1521 * End initialization functions. 1522 */ 1523 /******************************************************************************/ 1524 /* 1525 * Begin malloc(3)-compatible functions. 1526 */ 1527 1528 static void * 1529 ialloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, bool zero, 1530 prof_tctx_t *tctx, bool slow_path) 1531 { 1532 void *p; 1533 1534 if (tctx == NULL) 1535 return (NULL); 1536 if (usize <= SMALL_MAXCLASS) { 1537 szind_t ind_large = size2index(LARGE_MINCLASS); 1538 p = ialloc(tsd, LARGE_MINCLASS, ind_large, zero, slow_path); 1539 if (p == NULL) 1540 return (NULL); 1541 arena_prof_promoted(tsd_tsdn(tsd), p, usize); 1542 } else 1543 p = ialloc(tsd, usize, ind, zero, slow_path); 1544 1545 return (p); 1546 } 1547 1548 JEMALLOC_ALWAYS_INLINE_C void * 1549 ialloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool zero, bool slow_path) 1550 { 1551 void *p; 1552 prof_tctx_t *tctx; 1553 1554 tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); 1555 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) 1556 p = ialloc_prof_sample(tsd, usize, ind, zero, tctx, slow_path); 1557 else 1558 p = ialloc(tsd, usize, ind, zero, slow_path); 1559 if (unlikely(p == NULL)) { 1560 prof_alloc_rollback(tsd, tctx, true); 1561 return (NULL); 1562 } 1563 prof_malloc(tsd_tsdn(tsd), p, usize, tctx); 1564 1565 return (p); 1566 } 1567 1568 /* 1569 * ialloc_body() is inlined so that fast and slow paths are generated separately 1570 * with statically known slow_path. 1571 * 1572 * This function guarantees that *tsdn is non-NULL on success. 1573 */ 1574 JEMALLOC_ALWAYS_INLINE_C void * 1575 ialloc_body(size_t size, bool zero, tsdn_t **tsdn, size_t *usize, 1576 bool slow_path) 1577 { 1578 tsd_t *tsd; 1579 szind_t ind; 1580 1581 if (slow_path && unlikely(malloc_init())) { 1582 *tsdn = NULL; 1583 return (NULL); 1584 } 1585 1586 tsd = tsd_fetch(); 1587 *tsdn = tsd_tsdn(tsd); 1588 witness_assert_lockless(tsd_tsdn(tsd)); 1589 1590 ind = size2index(size); 1591 if (unlikely(ind >= NSIZES)) 1592 return (NULL); 1593 1594 if (config_stats || (config_prof && opt_prof) || (slow_path && 1595 config_valgrind && unlikely(in_valgrind))) { 1596 *usize = index2size(ind); 1597 assert(*usize > 0 && *usize <= HUGE_MAXCLASS); 1598 } 1599 1600 if (config_prof && opt_prof) 1601 return (ialloc_prof(tsd, *usize, ind, zero, slow_path)); 1602 1603 return (ialloc(tsd, size, ind, zero, slow_path)); 1604 } 1605 1606 JEMALLOC_ALWAYS_INLINE_C void 1607 ialloc_post_check(void *ret, tsdn_t *tsdn, size_t usize, const char *func, 1608 bool update_errno, bool slow_path) 1609 { 1610 1611 assert(!tsdn_null(tsdn) || ret == NULL); 1612 1613 if (unlikely(ret == NULL)) { 1614 if (slow_path && config_xmalloc && unlikely(opt_xmalloc)) { 1615 malloc_printf("<jemalloc>: Error in %s(): out of " 1616 "memory\n", func); 1617 abort(); 1618 } 1619 if (update_errno) 1620 set_errno(ENOMEM); 1621 } 1622 if (config_stats && likely(ret != NULL)) { 1623 assert(usize == isalloc(tsdn, ret, config_prof)); 1624 *tsd_thread_allocatedp_get(tsdn_tsd(tsdn)) += usize; 1625 } 1626 witness_assert_lockless(tsdn); 1627 } 1628 1629 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 1630 void JEMALLOC_NOTHROW * 1631 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) 1632 je_malloc(size_t size) 1633 { 1634 void *ret; 1635 tsdn_t *tsdn; 1636 size_t usize JEMALLOC_CC_SILENCE_INIT(0); 1637 1638 if (size == 0) 1639 size = 1; 1640 1641 if (likely(!malloc_slow)) { 1642 ret = ialloc_body(size, false, &tsdn, &usize, false); 1643 ialloc_post_check(ret, tsdn, usize, "malloc", true, false); 1644 } else { 1645 ret = ialloc_body(size, false, &tsdn, &usize, true); 1646 ialloc_post_check(ret, tsdn, usize, "malloc", true, true); 1647 UTRACE(0, size, ret); 1648 JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false); 1649 } 1650 1651 return (ret); 1652 } 1653 1654 static void * 1655 imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize, 1656 prof_tctx_t *tctx) 1657 { 1658 void *p; 1659 1660 if (tctx == NULL) 1661 return (NULL); 1662 if (usize <= SMALL_MAXCLASS) { 1663 assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS); 1664 p = ipalloc(tsd, LARGE_MINCLASS, alignment, false); 1665 if (p == NULL) 1666 return (NULL); 1667 arena_prof_promoted(tsd_tsdn(tsd), p, usize); 1668 } else 1669 p = ipalloc(tsd, usize, alignment, false); 1670 1671 return (p); 1672 } 1673 1674 JEMALLOC_ALWAYS_INLINE_C void * 1675 imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize) 1676 { 1677 void *p; 1678 prof_tctx_t *tctx; 1679 1680 tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); 1681 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) 1682 p = imemalign_prof_sample(tsd, alignment, usize, tctx); 1683 else 1684 p = ipalloc(tsd, usize, alignment, false); 1685 if (unlikely(p == NULL)) { 1686 prof_alloc_rollback(tsd, tctx, true); 1687 return (NULL); 1688 } 1689 prof_malloc(tsd_tsdn(tsd), p, usize, tctx); 1690 1691 return (p); 1692 } 1693 1694 JEMALLOC_ATTR(nonnull(1)) 1695 static int 1696 imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment) 1697 { 1698 int ret; 1699 tsd_t *tsd; 1700 size_t usize; 1701 void *result; 1702 1703 assert(min_alignment != 0); 1704 1705 if (unlikely(malloc_init())) { 1706 tsd = NULL; 1707 result = NULL; 1708 goto label_oom; 1709 } 1710 tsd = tsd_fetch(); 1711 witness_assert_lockless(tsd_tsdn(tsd)); 1712 if (size == 0) 1713 size = 1; 1714 1715 /* Make sure that alignment is a large enough power of 2. */ 1716 if (unlikely(((alignment - 1) & alignment) != 0 1717 || (alignment < min_alignment))) { 1718 if (config_xmalloc && unlikely(opt_xmalloc)) { 1719 malloc_write("<jemalloc>: Error allocating " 1720 "aligned memory: invalid alignment\n"); 1721 abort(); 1722 } 1723 result = NULL; 1724 ret = EINVAL; 1725 goto label_return; 1726 } 1727 1728 usize = sa2u(size, alignment); 1729 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) { 1730 result = NULL; 1731 goto label_oom; 1732 } 1733 1734 if (config_prof && opt_prof) 1735 result = imemalign_prof(tsd, alignment, usize); 1736 else 1737 result = ipalloc(tsd, usize, alignment, false); 1738 if (unlikely(result == NULL)) 1739 goto label_oom; 1740 assert(((uintptr_t)result & (alignment - 1)) == ZU(0)); 1741 1742 *memptr = result; 1743 ret = 0; 1744 label_return: 1745 if (config_stats && likely(result != NULL)) { 1746 assert(usize == isalloc(tsd_tsdn(tsd), result, config_prof)); 1747 *tsd_thread_allocatedp_get(tsd) += usize; 1748 } 1749 UTRACE(0, size, result); 1750 JEMALLOC_VALGRIND_MALLOC(result != NULL, tsd_tsdn(tsd), result, usize, 1751 false); 1752 witness_assert_lockless(tsd_tsdn(tsd)); 1753 return (ret); 1754 label_oom: 1755 assert(result == NULL); 1756 if (config_xmalloc && unlikely(opt_xmalloc)) { 1757 malloc_write("<jemalloc>: Error allocating aligned memory: " 1758 "out of memory\n"); 1759 abort(); 1760 } 1761 ret = ENOMEM; 1762 witness_assert_lockless(tsd_tsdn(tsd)); 1763 goto label_return; 1764 } 1765 1766 JEMALLOC_EXPORT int JEMALLOC_NOTHROW 1767 JEMALLOC_ATTR(nonnull(1)) 1768 je_posix_memalign(void **memptr, size_t alignment, size_t size) 1769 { 1770 int ret; 1771 1772 ret = imemalign(memptr, alignment, size, sizeof(void *)); 1773 1774 return (ret); 1775 } 1776 1777 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 1778 void JEMALLOC_NOTHROW * 1779 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2) 1780 je_aligned_alloc(size_t alignment, size_t size) 1781 { 1782 void *ret; 1783 int err; 1784 1785 if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) { 1786 ret = NULL; 1787 set_errno(err); 1788 } 1789 1790 return (ret); 1791 } 1792 1793 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 1794 void JEMALLOC_NOTHROW * 1795 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) 1796 je_calloc(size_t num, size_t size) 1797 { 1798 void *ret; 1799 tsdn_t *tsdn; 1800 size_t num_size; 1801 size_t usize JEMALLOC_CC_SILENCE_INIT(0); 1802 1803 num_size = num * size; 1804 if (unlikely(num_size == 0)) { 1805 if (num == 0 || size == 0) 1806 num_size = 1; 1807 else 1808 num_size = HUGE_MAXCLASS + 1; /* Trigger OOM. */ 1809 /* 1810 * Try to avoid division here. We know that it isn't possible to 1811 * overflow during multiplication if neither operand uses any of the 1812 * most significant half of the bits in a size_t. 1813 */ 1814 } else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 1815 2))) && (num_size / size != num))) 1816 num_size = HUGE_MAXCLASS + 1; /* size_t overflow. */ 1817 1818 if (likely(!malloc_slow)) { 1819 ret = ialloc_body(num_size, true, &tsdn, &usize, false); 1820 ialloc_post_check(ret, tsdn, usize, "calloc", true, false); 1821 } else { 1822 ret = ialloc_body(num_size, true, &tsdn, &usize, true); 1823 ialloc_post_check(ret, tsdn, usize, "calloc", true, true); 1824 UTRACE(0, num_size, ret); 1825 JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, true); 1826 } 1827 1828 return (ret); 1829 } 1830 1831 static void * 1832 irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, 1833 prof_tctx_t *tctx) 1834 { 1835 void *p; 1836 1837 if (tctx == NULL) 1838 return (NULL); 1839 if (usize <= SMALL_MAXCLASS) { 1840 p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false); 1841 if (p == NULL) 1842 return (NULL); 1843 arena_prof_promoted(tsd_tsdn(tsd), p, usize); 1844 } else 1845 p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); 1846 1847 return (p); 1848 } 1849 1850 JEMALLOC_ALWAYS_INLINE_C void * 1851 irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize) 1852 { 1853 void *p; 1854 bool prof_active; 1855 prof_tctx_t *old_tctx, *tctx; 1856 1857 prof_active = prof_active_get_unlocked(); 1858 old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr); 1859 tctx = prof_alloc_prep(tsd, usize, prof_active, true); 1860 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) 1861 p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx); 1862 else 1863 p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); 1864 if (unlikely(p == NULL)) { 1865 prof_alloc_rollback(tsd, tctx, true); 1866 return (NULL); 1867 } 1868 prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize, 1869 old_tctx); 1870 1871 return (p); 1872 } 1873 1874 JEMALLOC_INLINE_C void 1875 ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) 1876 { 1877 size_t usize; 1878 UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); 1879 1880 witness_assert_lockless(tsd_tsdn(tsd)); 1881 1882 assert(ptr != NULL); 1883 assert(malloc_initialized() || IS_INITIALIZER); 1884 1885 if (config_prof && opt_prof) { 1886 usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); 1887 prof_free(tsd, ptr, usize); 1888 } else if (config_stats || config_valgrind) 1889 usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); 1890 if (config_stats) 1891 *tsd_thread_deallocatedp_get(tsd) += usize; 1892 1893 if (likely(!slow_path)) 1894 iqalloc(tsd, ptr, tcache, false); 1895 else { 1896 if (config_valgrind && unlikely(in_valgrind)) 1897 rzsize = p2rz(tsd_tsdn(tsd), ptr); 1898 iqalloc(tsd, ptr, tcache, true); 1899 JEMALLOC_VALGRIND_FREE(ptr, rzsize); 1900 } 1901 } 1902 1903 JEMALLOC_INLINE_C void 1904 isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) 1905 { 1906 UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); 1907 1908 witness_assert_lockless(tsd_tsdn(tsd)); 1909 1910 assert(ptr != NULL); 1911 assert(malloc_initialized() || IS_INITIALIZER); 1912 1913 if (config_prof && opt_prof) 1914 prof_free(tsd, ptr, usize); 1915 if (config_stats) 1916 *tsd_thread_deallocatedp_get(tsd) += usize; 1917 if (config_valgrind && unlikely(in_valgrind)) 1918 rzsize = p2rz(tsd_tsdn(tsd), ptr); 1919 isqalloc(tsd, ptr, usize, tcache, slow_path); 1920 JEMALLOC_VALGRIND_FREE(ptr, rzsize); 1921 } 1922 1923 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 1924 void JEMALLOC_NOTHROW * 1925 JEMALLOC_ALLOC_SIZE(2) 1926 je_realloc(void *ptr, size_t size) 1927 { 1928 void *ret; 1929 tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL); 1930 size_t usize JEMALLOC_CC_SILENCE_INIT(0); 1931 size_t old_usize = 0; 1932 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 1933 1934 if (unlikely(size == 0)) { 1935 if (ptr != NULL) { 1936 tsd_t *tsd; 1937 1938 /* realloc(ptr, 0) is equivalent to free(ptr). */ 1939 UTRACE(ptr, 0, 0); 1940 tsd = tsd_fetch(); 1941 ifree(tsd, ptr, tcache_get(tsd, false), true); 1942 return (NULL); 1943 } 1944 size = 1; 1945 } 1946 1947 if (likely(ptr != NULL)) { 1948 tsd_t *tsd; 1949 1950 assert(malloc_initialized() || IS_INITIALIZER); 1951 malloc_thread_init(); 1952 tsd = tsd_fetch(); 1953 1954 witness_assert_lockless(tsd_tsdn(tsd)); 1955 1956 old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); 1957 if (config_valgrind && unlikely(in_valgrind)) { 1958 old_rzsize = config_prof ? p2rz(tsd_tsdn(tsd), ptr) : 1959 u2rz(old_usize); 1960 } 1961 1962 if (config_prof && opt_prof) { 1963 usize = s2u(size); 1964 ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ? 1965 NULL : irealloc_prof(tsd, ptr, old_usize, usize); 1966 } else { 1967 if (config_stats || (config_valgrind && 1968 unlikely(in_valgrind))) 1969 usize = s2u(size); 1970 ret = iralloc(tsd, ptr, old_usize, size, 0, false); 1971 } 1972 tsdn = tsd_tsdn(tsd); 1973 } else { 1974 /* realloc(NULL, size) is equivalent to malloc(size). */ 1975 if (likely(!malloc_slow)) 1976 ret = ialloc_body(size, false, &tsdn, &usize, false); 1977 else 1978 ret = ialloc_body(size, false, &tsdn, &usize, true); 1979 assert(!tsdn_null(tsdn) || ret == NULL); 1980 } 1981 1982 if (unlikely(ret == NULL)) { 1983 if (config_xmalloc && unlikely(opt_xmalloc)) { 1984 malloc_write("<jemalloc>: Error in realloc(): " 1985 "out of memory\n"); 1986 abort(); 1987 } 1988 set_errno(ENOMEM); 1989 } 1990 if (config_stats && likely(ret != NULL)) { 1991 tsd_t *tsd; 1992 1993 assert(usize == isalloc(tsdn, ret, config_prof)); 1994 tsd = tsdn_tsd(tsdn); 1995 *tsd_thread_allocatedp_get(tsd) += usize; 1996 *tsd_thread_deallocatedp_get(tsd) += old_usize; 1997 } 1998 UTRACE(ptr, size, ret); 1999 JEMALLOC_VALGRIND_REALLOC(maybe, tsdn, ret, usize, maybe, ptr, 2000 old_usize, old_rzsize, maybe, false); 2001 witness_assert_lockless(tsdn); 2002 return (ret); 2003 } 2004 2005 JEMALLOC_EXPORT void JEMALLOC_NOTHROW 2006 je_free(void *ptr) 2007 { 2008 2009 UTRACE(ptr, 0, 0); 2010 if (likely(ptr != NULL)) { 2011 tsd_t *tsd = tsd_fetch(); 2012 witness_assert_lockless(tsd_tsdn(tsd)); 2013 if (likely(!malloc_slow)) 2014 ifree(tsd, ptr, tcache_get(tsd, false), false); 2015 else 2016 ifree(tsd, ptr, tcache_get(tsd, false), true); 2017 witness_assert_lockless(tsd_tsdn(tsd)); 2018 } 2019 } 2020 2021 /* 2022 * End malloc(3)-compatible functions. 2023 */ 2024 /******************************************************************************/ 2025 /* 2026 * Begin non-standard override functions. 2027 */ 2028 2029 #ifdef JEMALLOC_OVERRIDE_MEMALIGN 2030 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2031 void JEMALLOC_NOTHROW * 2032 JEMALLOC_ATTR(malloc) 2033 je_memalign(size_t alignment, size_t size) 2034 { 2035 void *ret JEMALLOC_CC_SILENCE_INIT(NULL); 2036 if (unlikely(imemalign(&ret, alignment, size, 1) != 0)) 2037 ret = NULL; 2038 return (ret); 2039 } 2040 #endif 2041 2042 #ifdef JEMALLOC_OVERRIDE_VALLOC 2043 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2044 void JEMALLOC_NOTHROW * 2045 JEMALLOC_ATTR(malloc) 2046 je_valloc(size_t size) 2047 { 2048 void *ret JEMALLOC_CC_SILENCE_INIT(NULL); 2049 if (unlikely(imemalign(&ret, PAGE, size, 1) != 0)) 2050 ret = NULL; 2051 return (ret); 2052 } 2053 #endif 2054 2055 /* 2056 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has 2057 * #define je_malloc malloc 2058 */ 2059 #define malloc_is_malloc 1 2060 #define is_malloc_(a) malloc_is_ ## a 2061 #define is_malloc(a) is_malloc_(a) 2062 2063 #if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK)) 2064 /* 2065 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible 2066 * to inconsistently reference libc's malloc(3)-compatible functions 2067 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). 2068 * 2069 * These definitions interpose hooks in glibc. The functions are actually 2070 * passed an extra argument for the caller return address, which will be 2071 * ignored. 2072 */ 2073 JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free; 2074 JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc; 2075 JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc; 2076 # ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK 2077 JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = 2078 je_memalign; 2079 # endif 2080 2081 #ifdef CPU_COUNT 2082 /* 2083 * To enable static linking with glibc, the libc specific malloc interface must 2084 * be implemented also, so none of glibc's malloc.o functions are added to the 2085 * link. 2086 */ 2087 #define ALIAS(je_fn) __attribute__((alias (#je_fn), used)) 2088 /* To force macro expansion of je_ prefix before stringification. */ 2089 #define PREALIAS(je_fn) ALIAS(je_fn) 2090 void *__libc_malloc(size_t size) PREALIAS(je_malloc); 2091 void __libc_free(void* ptr) PREALIAS(je_free); 2092 void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc); 2093 void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc); 2094 void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign); 2095 void *__libc_valloc(size_t size) PREALIAS(je_valloc); 2096 int __posix_memalign(void** r, size_t a, size_t s) 2097 PREALIAS(je_posix_memalign); 2098 #undef PREALIAS 2099 #undef ALIAS 2100 2101 #endif 2102 2103 #endif 2104 2105 /* 2106 * End non-standard override functions. 2107 */ 2108 /******************************************************************************/ 2109 /* 2110 * Begin non-standard functions. 2111 */ 2112 2113 JEMALLOC_ALWAYS_INLINE_C bool 2114 imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize, 2115 size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena) 2116 { 2117 2118 if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) { 2119 *alignment = 0; 2120 *usize = s2u(size); 2121 } else { 2122 *alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); 2123 *usize = sa2u(size, *alignment); 2124 } 2125 if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS)) 2126 return (true); 2127 *zero = MALLOCX_ZERO_GET(flags); 2128 if ((flags & MALLOCX_TCACHE_MASK) != 0) { 2129 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) 2130 *tcache = NULL; 2131 else 2132 *tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2133 } else 2134 *tcache = tcache_get(tsd, true); 2135 if ((flags & MALLOCX_ARENA_MASK) != 0) { 2136 unsigned arena_ind = MALLOCX_ARENA_GET(flags); 2137 *arena = arena_get(tsd_tsdn(tsd), arena_ind, true); 2138 if (unlikely(*arena == NULL)) 2139 return (true); 2140 } else 2141 *arena = NULL; 2142 return (false); 2143 } 2144 2145 JEMALLOC_ALWAYS_INLINE_C void * 2146 imallocx_flags(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, 2147 tcache_t *tcache, arena_t *arena, bool slow_path) 2148 { 2149 szind_t ind; 2150 2151 if (unlikely(alignment != 0)) 2152 return (ipalloct(tsdn, usize, alignment, zero, tcache, arena)); 2153 ind = size2index(usize); 2154 assert(ind < NSIZES); 2155 return (iallocztm(tsdn, usize, ind, zero, tcache, false, arena, 2156 slow_path)); 2157 } 2158 2159 static void * 2160 imallocx_prof_sample(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, 2161 tcache_t *tcache, arena_t *arena, bool slow_path) 2162 { 2163 void *p; 2164 2165 if (usize <= SMALL_MAXCLASS) { 2166 assert(((alignment == 0) ? s2u(LARGE_MINCLASS) : 2167 sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS); 2168 p = imallocx_flags(tsdn, LARGE_MINCLASS, alignment, zero, 2169 tcache, arena, slow_path); 2170 if (p == NULL) 2171 return (NULL); 2172 arena_prof_promoted(tsdn, p, usize); 2173 } else { 2174 p = imallocx_flags(tsdn, usize, alignment, zero, tcache, arena, 2175 slow_path); 2176 } 2177 2178 return (p); 2179 } 2180 2181 JEMALLOC_ALWAYS_INLINE_C void * 2182 imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, bool slow_path) 2183 { 2184 void *p; 2185 size_t alignment; 2186 bool zero; 2187 tcache_t *tcache; 2188 arena_t *arena; 2189 prof_tctx_t *tctx; 2190 2191 if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment, 2192 &zero, &tcache, &arena))) 2193 return (NULL); 2194 tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true); 2195 if (likely((uintptr_t)tctx == (uintptr_t)1U)) { 2196 p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, 2197 tcache, arena, slow_path); 2198 } else if ((uintptr_t)tctx > (uintptr_t)1U) { 2199 p = imallocx_prof_sample(tsd_tsdn(tsd), *usize, alignment, zero, 2200 tcache, arena, slow_path); 2201 } else 2202 p = NULL; 2203 if (unlikely(p == NULL)) { 2204 prof_alloc_rollback(tsd, tctx, true); 2205 return (NULL); 2206 } 2207 prof_malloc(tsd_tsdn(tsd), p, *usize, tctx); 2208 2209 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); 2210 return (p); 2211 } 2212 2213 JEMALLOC_ALWAYS_INLINE_C void * 2214 imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, 2215 bool slow_path) 2216 { 2217 void *p; 2218 size_t alignment; 2219 bool zero; 2220 tcache_t *tcache; 2221 arena_t *arena; 2222 2223 if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment, 2224 &zero, &tcache, &arena))) 2225 return (NULL); 2226 p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, tcache, 2227 arena, slow_path); 2228 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); 2229 return (p); 2230 } 2231 2232 /* This function guarantees that *tsdn is non-NULL on success. */ 2233 JEMALLOC_ALWAYS_INLINE_C void * 2234 imallocx_body(size_t size, int flags, tsdn_t **tsdn, size_t *usize, 2235 bool slow_path) 2236 { 2237 tsd_t *tsd; 2238 2239 if (slow_path && unlikely(malloc_init())) { 2240 *tsdn = NULL; 2241 return (NULL); 2242 } 2243 2244 tsd = tsd_fetch(); 2245 *tsdn = tsd_tsdn(tsd); 2246 witness_assert_lockless(tsd_tsdn(tsd)); 2247 2248 if (likely(flags == 0)) { 2249 szind_t ind = size2index(size); 2250 if (unlikely(ind >= NSIZES)) 2251 return (NULL); 2252 if (config_stats || (config_prof && opt_prof) || (slow_path && 2253 config_valgrind && unlikely(in_valgrind))) { 2254 *usize = index2size(ind); 2255 assert(*usize > 0 && *usize <= HUGE_MAXCLASS); 2256 } 2257 2258 if (config_prof && opt_prof) { 2259 return (ialloc_prof(tsd, *usize, ind, false, 2260 slow_path)); 2261 } 2262 2263 return (ialloc(tsd, size, ind, false, slow_path)); 2264 } 2265 2266 if (config_prof && opt_prof) 2267 return (imallocx_prof(tsd, size, flags, usize, slow_path)); 2268 2269 return (imallocx_no_prof(tsd, size, flags, usize, slow_path)); 2270 } 2271 2272 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2273 void JEMALLOC_NOTHROW * 2274 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) 2275 je_mallocx(size_t size, int flags) 2276 { 2277 tsdn_t *tsdn; 2278 void *p; 2279 size_t usize; 2280 2281 assert(size != 0); 2282 2283 if (likely(!malloc_slow)) { 2284 p = imallocx_body(size, flags, &tsdn, &usize, false); 2285 ialloc_post_check(p, tsdn, usize, "mallocx", false, false); 2286 } else { 2287 p = imallocx_body(size, flags, &tsdn, &usize, true); 2288 ialloc_post_check(p, tsdn, usize, "mallocx", false, true); 2289 UTRACE(0, size, p); 2290 JEMALLOC_VALGRIND_MALLOC(p != NULL, tsdn, p, usize, 2291 MALLOCX_ZERO_GET(flags)); 2292 } 2293 2294 return (p); 2295 } 2296 2297 static void * 2298 irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, 2299 size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, 2300 prof_tctx_t *tctx) 2301 { 2302 void *p; 2303 2304 if (tctx == NULL) 2305 return (NULL); 2306 if (usize <= SMALL_MAXCLASS) { 2307 p = iralloct(tsd, old_ptr, old_usize, LARGE_MINCLASS, alignment, 2308 zero, tcache, arena); 2309 if (p == NULL) 2310 return (NULL); 2311 arena_prof_promoted(tsd_tsdn(tsd), p, usize); 2312 } else { 2313 p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero, 2314 tcache, arena); 2315 } 2316 2317 return (p); 2318 } 2319 2320 JEMALLOC_ALWAYS_INLINE_C void * 2321 irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, 2322 size_t alignment, size_t *usize, bool zero, tcache_t *tcache, 2323 arena_t *arena) 2324 { 2325 void *p; 2326 bool prof_active; 2327 prof_tctx_t *old_tctx, *tctx; 2328 2329 prof_active = prof_active_get_unlocked(); 2330 old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr); 2331 tctx = prof_alloc_prep(tsd, *usize, prof_active, false); 2332 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { 2333 p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize, 2334 alignment, zero, tcache, arena, tctx); 2335 } else { 2336 p = iralloct(tsd, old_ptr, old_usize, size, alignment, zero, 2337 tcache, arena); 2338 } 2339 if (unlikely(p == NULL)) { 2340 prof_alloc_rollback(tsd, tctx, false); 2341 return (NULL); 2342 } 2343 2344 if (p == old_ptr && alignment != 0) { 2345 /* 2346 * The allocation did not move, so it is possible that the size 2347 * class is smaller than would guarantee the requested 2348 * alignment, and that the alignment constraint was 2349 * serendipitously satisfied. Additionally, old_usize may not 2350 * be the same as the current usize because of in-place large 2351 * reallocation. Therefore, query the actual value of usize. 2352 */ 2353 *usize = isalloc(tsd_tsdn(tsd), p, config_prof); 2354 } 2355 prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr, 2356 old_usize, old_tctx); 2357 2358 return (p); 2359 } 2360 2361 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2362 void JEMALLOC_NOTHROW * 2363 JEMALLOC_ALLOC_SIZE(2) 2364 je_rallocx(void *ptr, size_t size, int flags) 2365 { 2366 void *p; 2367 tsd_t *tsd; 2368 size_t usize; 2369 size_t old_usize; 2370 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 2371 size_t alignment = MALLOCX_ALIGN_GET(flags); 2372 bool zero = flags & MALLOCX_ZERO; 2373 arena_t *arena; 2374 tcache_t *tcache; 2375 2376 assert(ptr != NULL); 2377 assert(size != 0); 2378 assert(malloc_initialized() || IS_INITIALIZER); 2379 malloc_thread_init(); 2380 tsd = tsd_fetch(); 2381 witness_assert_lockless(tsd_tsdn(tsd)); 2382 2383 if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { 2384 unsigned arena_ind = MALLOCX_ARENA_GET(flags); 2385 arena = arena_get(tsd_tsdn(tsd), arena_ind, true); 2386 if (unlikely(arena == NULL)) 2387 goto label_oom; 2388 } else 2389 arena = NULL; 2390 2391 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { 2392 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) 2393 tcache = NULL; 2394 else 2395 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2396 } else 2397 tcache = tcache_get(tsd, true); 2398 2399 old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); 2400 if (config_valgrind && unlikely(in_valgrind)) 2401 old_rzsize = u2rz(old_usize); 2402 2403 if (config_prof && opt_prof) { 2404 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); 2405 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) 2406 goto label_oom; 2407 p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize, 2408 zero, tcache, arena); 2409 if (unlikely(p == NULL)) 2410 goto label_oom; 2411 } else { 2412 p = iralloct(tsd, ptr, old_usize, size, alignment, zero, 2413 tcache, arena); 2414 if (unlikely(p == NULL)) 2415 goto label_oom; 2416 if (config_stats || (config_valgrind && unlikely(in_valgrind))) 2417 usize = isalloc(tsd_tsdn(tsd), p, config_prof); 2418 } 2419 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); 2420 2421 if (config_stats) { 2422 *tsd_thread_allocatedp_get(tsd) += usize; 2423 *tsd_thread_deallocatedp_get(tsd) += old_usize; 2424 } 2425 UTRACE(ptr, size, p); 2426 JEMALLOC_VALGRIND_REALLOC(maybe, tsd_tsdn(tsd), p, usize, no, ptr, 2427 old_usize, old_rzsize, no, zero); 2428 witness_assert_lockless(tsd_tsdn(tsd)); 2429 return (p); 2430 label_oom: 2431 if (config_xmalloc && unlikely(opt_xmalloc)) { 2432 malloc_write("<jemalloc>: Error in rallocx(): out of memory\n"); 2433 abort(); 2434 } 2435 UTRACE(ptr, size, 0); 2436 witness_assert_lockless(tsd_tsdn(tsd)); 2437 return (NULL); 2438 } 2439 2440 JEMALLOC_ALWAYS_INLINE_C size_t 2441 ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, 2442 size_t extra, size_t alignment, bool zero) 2443 { 2444 size_t usize; 2445 2446 if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) 2447 return (old_usize); 2448 usize = isalloc(tsdn, ptr, config_prof); 2449 2450 return (usize); 2451 } 2452 2453 static size_t 2454 ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, 2455 size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) 2456 { 2457 size_t usize; 2458 2459 if (tctx == NULL) 2460 return (old_usize); 2461 usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment, 2462 zero); 2463 2464 return (usize); 2465 } 2466 2467 JEMALLOC_ALWAYS_INLINE_C size_t 2468 ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, 2469 size_t extra, size_t alignment, bool zero) 2470 { 2471 size_t usize_max, usize; 2472 bool prof_active; 2473 prof_tctx_t *old_tctx, *tctx; 2474 2475 prof_active = prof_active_get_unlocked(); 2476 old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr); 2477 /* 2478 * usize isn't knowable before ixalloc() returns when extra is non-zero. 2479 * Therefore, compute its maximum possible value and use that in 2480 * prof_alloc_prep() to decide whether to capture a backtrace. 2481 * prof_realloc() will use the actual usize to decide whether to sample. 2482 */ 2483 if (alignment == 0) { 2484 usize_max = s2u(size+extra); 2485 assert(usize_max > 0 && usize_max <= HUGE_MAXCLASS); 2486 } else { 2487 usize_max = sa2u(size+extra, alignment); 2488 if (unlikely(usize_max == 0 || usize_max > HUGE_MAXCLASS)) { 2489 /* 2490 * usize_max is out of range, and chances are that 2491 * allocation will fail, but use the maximum possible 2492 * value and carry on with prof_alloc_prep(), just in 2493 * case allocation succeeds. 2494 */ 2495 usize_max = HUGE_MAXCLASS; 2496 } 2497 } 2498 tctx = prof_alloc_prep(tsd, usize_max, prof_active, false); 2499 2500 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { 2501 usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize, 2502 size, extra, alignment, zero, tctx); 2503 } else { 2504 usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, 2505 extra, alignment, zero); 2506 } 2507 if (usize == old_usize) { 2508 prof_alloc_rollback(tsd, tctx, false); 2509 return (usize); 2510 } 2511 prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize, 2512 old_tctx); 2513 2514 return (usize); 2515 } 2516 2517 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2518 je_xallocx(void *ptr, size_t size, size_t extra, int flags) 2519 { 2520 tsd_t *tsd; 2521 size_t usize, old_usize; 2522 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 2523 size_t alignment = MALLOCX_ALIGN_GET(flags); 2524 bool zero = flags & MALLOCX_ZERO; 2525 2526 assert(ptr != NULL); 2527 assert(size != 0); 2528 assert(SIZE_T_MAX - size >= extra); 2529 assert(malloc_initialized() || IS_INITIALIZER); 2530 malloc_thread_init(); 2531 tsd = tsd_fetch(); 2532 witness_assert_lockless(tsd_tsdn(tsd)); 2533 2534 old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); 2535 2536 /* 2537 * The API explicitly absolves itself of protecting against (size + 2538 * extra) numerical overflow, but we may need to clamp extra to avoid 2539 * exceeding HUGE_MAXCLASS. 2540 * 2541 * Ordinarily, size limit checking is handled deeper down, but here we 2542 * have to check as part of (size + extra) clamping, since we need the 2543 * clamped value in the above helper functions. 2544 */ 2545 if (unlikely(size > HUGE_MAXCLASS)) { 2546 usize = old_usize; 2547 goto label_not_resized; 2548 } 2549 if (unlikely(HUGE_MAXCLASS - size < extra)) 2550 extra = HUGE_MAXCLASS - size; 2551 2552 if (config_valgrind && unlikely(in_valgrind)) 2553 old_rzsize = u2rz(old_usize); 2554 2555 if (config_prof && opt_prof) { 2556 usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, 2557 alignment, zero); 2558 } else { 2559 usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, 2560 extra, alignment, zero); 2561 } 2562 if (unlikely(usize == old_usize)) 2563 goto label_not_resized; 2564 2565 if (config_stats) { 2566 *tsd_thread_allocatedp_get(tsd) += usize; 2567 *tsd_thread_deallocatedp_get(tsd) += old_usize; 2568 } 2569 JEMALLOC_VALGRIND_REALLOC(no, tsd_tsdn(tsd), ptr, usize, no, ptr, 2570 old_usize, old_rzsize, no, zero); 2571 label_not_resized: 2572 UTRACE(ptr, size, ptr); 2573 witness_assert_lockless(tsd_tsdn(tsd)); 2574 return (usize); 2575 } 2576 2577 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2578 JEMALLOC_ATTR(pure) 2579 je_sallocx(const void *ptr, int flags) 2580 { 2581 size_t usize; 2582 tsdn_t *tsdn; 2583 2584 assert(malloc_initialized() || IS_INITIALIZER); 2585 malloc_thread_init(); 2586 2587 tsdn = tsdn_fetch(); 2588 witness_assert_lockless(tsdn); 2589 2590 if (config_ivsalloc) 2591 usize = ivsalloc(tsdn, ptr, config_prof); 2592 else 2593 usize = isalloc(tsdn, ptr, config_prof); 2594 2595 witness_assert_lockless(tsdn); 2596 return (usize); 2597 } 2598 2599 JEMALLOC_EXPORT void JEMALLOC_NOTHROW 2600 je_dallocx(void *ptr, int flags) 2601 { 2602 tsd_t *tsd; 2603 tcache_t *tcache; 2604 2605 assert(ptr != NULL); 2606 assert(malloc_initialized() || IS_INITIALIZER); 2607 2608 tsd = tsd_fetch(); 2609 witness_assert_lockless(tsd_tsdn(tsd)); 2610 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { 2611 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) 2612 tcache = NULL; 2613 else 2614 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2615 } else 2616 tcache = tcache_get(tsd, false); 2617 2618 UTRACE(ptr, 0, 0); 2619 if (likely(!malloc_slow)) 2620 ifree(tsd, ptr, tcache, false); 2621 else 2622 ifree(tsd, ptr, tcache, true); 2623 witness_assert_lockless(tsd_tsdn(tsd)); 2624 } 2625 2626 JEMALLOC_ALWAYS_INLINE_C size_t 2627 inallocx(tsdn_t *tsdn, size_t size, int flags) 2628 { 2629 size_t usize; 2630 2631 witness_assert_lockless(tsdn); 2632 2633 if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) 2634 usize = s2u(size); 2635 else 2636 usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); 2637 witness_assert_lockless(tsdn); 2638 return (usize); 2639 } 2640 2641 JEMALLOC_EXPORT void JEMALLOC_NOTHROW 2642 je_sdallocx(void *ptr, size_t size, int flags) 2643 { 2644 tsd_t *tsd; 2645 tcache_t *tcache; 2646 size_t usize; 2647 2648 assert(ptr != NULL); 2649 assert(malloc_initialized() || IS_INITIALIZER); 2650 tsd = tsd_fetch(); 2651 usize = inallocx(tsd_tsdn(tsd), size, flags); 2652 assert(usize == isalloc(tsd_tsdn(tsd), ptr, config_prof)); 2653 2654 witness_assert_lockless(tsd_tsdn(tsd)); 2655 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { 2656 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) 2657 tcache = NULL; 2658 else 2659 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2660 } else 2661 tcache = tcache_get(tsd, false); 2662 2663 UTRACE(ptr, 0, 0); 2664 if (likely(!malloc_slow)) 2665 isfree(tsd, ptr, usize, tcache, false); 2666 else 2667 isfree(tsd, ptr, usize, tcache, true); 2668 witness_assert_lockless(tsd_tsdn(tsd)); 2669 } 2670 2671 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2672 JEMALLOC_ATTR(pure) 2673 je_nallocx(size_t size, int flags) 2674 { 2675 size_t usize; 2676 tsdn_t *tsdn; 2677 2678 assert(size != 0); 2679 2680 if (unlikely(malloc_init())) 2681 return (0); 2682 2683 tsdn = tsdn_fetch(); 2684 witness_assert_lockless(tsdn); 2685 2686 usize = inallocx(tsdn, size, flags); 2687 if (unlikely(usize > HUGE_MAXCLASS)) 2688 return (0); 2689 2690 witness_assert_lockless(tsdn); 2691 return (usize); 2692 } 2693 2694 JEMALLOC_EXPORT int JEMALLOC_NOTHROW 2695 je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, 2696 size_t newlen) 2697 { 2698 int ret; 2699 tsd_t *tsd; 2700 2701 if (unlikely(malloc_init())) 2702 return (EAGAIN); 2703 2704 tsd = tsd_fetch(); 2705 witness_assert_lockless(tsd_tsdn(tsd)); 2706 ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen); 2707 witness_assert_lockless(tsd_tsdn(tsd)); 2708 return (ret); 2709 } 2710 2711 JEMALLOC_EXPORT int JEMALLOC_NOTHROW 2712 je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) 2713 { 2714 int ret; 2715 tsdn_t *tsdn; 2716 2717 if (unlikely(malloc_init())) 2718 return (EAGAIN); 2719 2720 tsdn = tsdn_fetch(); 2721 witness_assert_lockless(tsdn); 2722 ret = ctl_nametomib(tsdn, name, mibp, miblenp); 2723 witness_assert_lockless(tsdn); 2724 return (ret); 2725 } 2726 2727 JEMALLOC_EXPORT int JEMALLOC_NOTHROW 2728 je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, 2729 void *newp, size_t newlen) 2730 { 2731 int ret; 2732 tsd_t *tsd; 2733 2734 if (unlikely(malloc_init())) 2735 return (EAGAIN); 2736 2737 tsd = tsd_fetch(); 2738 witness_assert_lockless(tsd_tsdn(tsd)); 2739 ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen); 2740 witness_assert_lockless(tsd_tsdn(tsd)); 2741 return (ret); 2742 } 2743 2744 JEMALLOC_EXPORT void JEMALLOC_NOTHROW 2745 je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, 2746 const char *opts) 2747 { 2748 tsdn_t *tsdn; 2749 2750 tsdn = tsdn_fetch(); 2751 witness_assert_lockless(tsdn); 2752 stats_print(write_cb, cbopaque, opts); 2753 witness_assert_lockless(tsdn); 2754 } 2755 2756 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2757 je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) 2758 { 2759 size_t ret; 2760 tsdn_t *tsdn; 2761 2762 assert(malloc_initialized() || IS_INITIALIZER); 2763 malloc_thread_init(); 2764 2765 tsdn = tsdn_fetch(); 2766 witness_assert_lockless(tsdn); 2767 2768 if (config_ivsalloc) 2769 ret = ivsalloc(tsdn, ptr, config_prof); 2770 else 2771 ret = (ptr == NULL) ? 0 : isalloc(tsdn, ptr, config_prof); 2772 2773 witness_assert_lockless(tsdn); 2774 return (ret); 2775 } 2776 2777 /* 2778 * End non-standard functions. 2779 */ 2780 /******************************************************************************/ 2781 /* 2782 * Begin compatibility functions. 2783 */ 2784 2785 #define ALLOCM_LG_ALIGN(la) (la) 2786 #define ALLOCM_ALIGN(a) (ffsl(a)-1) 2787 #define ALLOCM_ZERO ((int)0x40) 2788 #define ALLOCM_NO_MOVE ((int)0x80) 2789 2790 #define ALLOCM_SUCCESS 0 2791 #define ALLOCM_ERR_OOM 1 2792 #define ALLOCM_ERR_NOT_MOVED 2 2793 2794 int 2795 je_allocm(void **ptr, size_t *rsize, size_t size, int flags) 2796 { 2797 void *p; 2798 2799 assert(ptr != NULL); 2800 2801 p = je_mallocx(size, flags); 2802 if (p == NULL) 2803 return (ALLOCM_ERR_OOM); 2804 if (rsize != NULL) 2805 *rsize = isalloc(tsdn_fetch(), p, config_prof); 2806 *ptr = p; 2807 return (ALLOCM_SUCCESS); 2808 } 2809 2810 int 2811 je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) 2812 { 2813 int ret; 2814 bool no_move = flags & ALLOCM_NO_MOVE; 2815 2816 assert(ptr != NULL); 2817 assert(*ptr != NULL); 2818 assert(size != 0); 2819 assert(SIZE_T_MAX - size >= extra); 2820 2821 if (no_move) { 2822 size_t usize = je_xallocx(*ptr, size, extra, flags); 2823 ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED; 2824 if (rsize != NULL) 2825 *rsize = usize; 2826 } else { 2827 void *p = je_rallocx(*ptr, size+extra, flags); 2828 if (p != NULL) { 2829 *ptr = p; 2830 ret = ALLOCM_SUCCESS; 2831 } else 2832 ret = ALLOCM_ERR_OOM; 2833 if (rsize != NULL) 2834 *rsize = isalloc(tsdn_fetch(), *ptr, config_prof); 2835 } 2836 return (ret); 2837 } 2838 2839 int 2840 je_sallocm(const void *ptr, size_t *rsize, int flags) 2841 { 2842 2843 assert(rsize != NULL); 2844 *rsize = je_sallocx(ptr, flags); 2845 return (ALLOCM_SUCCESS); 2846 } 2847 2848 int 2849 je_dallocm(void *ptr, int flags) 2850 { 2851 2852 je_dallocx(ptr, flags); 2853 return (ALLOCM_SUCCESS); 2854 } 2855 2856 int 2857 je_nallocm(size_t *rsize, size_t size, int flags) 2858 { 2859 size_t usize; 2860 2861 usize = je_nallocx(size, flags); 2862 if (usize == 0) 2863 return (ALLOCM_ERR_OOM); 2864 if (rsize != NULL) 2865 *rsize = usize; 2866 return (ALLOCM_SUCCESS); 2867 } 2868 2869 #undef ALLOCM_LG_ALIGN 2870 #undef ALLOCM_ALIGN 2871 #undef ALLOCM_ZERO 2872 #undef ALLOCM_NO_MOVE 2873 2874 #undef ALLOCM_SUCCESS 2875 #undef ALLOCM_ERR_OOM 2876 #undef ALLOCM_ERR_NOT_MOVED 2877 2878 /* 2879 * End compatibility functions. 2880 */ 2881 /******************************************************************************/ 2882 /* 2883 * The following functions are used by threading libraries for protection of 2884 * malloc during fork(). 2885 */ 2886 2887 /* 2888 * If an application creates a thread before doing any allocation in the main 2889 * thread, then calls fork(2) in the main thread followed by memory allocation 2890 * in the child process, a race can occur that results in deadlock within the 2891 * child: the main thread may have forked while the created thread had 2892 * partially initialized the allocator. Ordinarily jemalloc prevents 2893 * fork/malloc races via the following functions it registers during 2894 * initialization using pthread_atfork(), but of course that does no good if 2895 * the allocator isn't fully initialized at fork time. The following library 2896 * constructor is a partial solution to this problem. It may still be possible 2897 * to trigger the deadlock described above, but doing so would involve forking 2898 * via a library constructor that runs before jemalloc's runs. 2899 */ 2900 #ifndef JEMALLOC_JET 2901 JEMALLOC_ATTR(constructor) 2902 static void 2903 jemalloc_constructor(void) 2904 { 2905 2906 malloc_init(); 2907 } 2908 #endif 2909 2910 #ifndef JEMALLOC_MUTEX_INIT_CB 2911 void 2912 jemalloc_prefork(void) 2913 #else 2914 JEMALLOC_EXPORT void 2915 _malloc_prefork(void) 2916 #endif 2917 { 2918 tsd_t *tsd; 2919 unsigned i, j, narenas; 2920 arena_t *arena; 2921 2922 #ifdef JEMALLOC_MUTEX_INIT_CB 2923 if (!malloc_initialized()) 2924 return; 2925 #endif 2926 assert(malloc_initialized()); 2927 2928 tsd = tsd_fetch(); 2929 2930 narenas = narenas_total_get(); 2931 2932 witness_prefork(tsd); 2933 /* Acquire all mutexes in a safe order. */ 2934 ctl_prefork(tsd_tsdn(tsd)); 2935 malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock); 2936 prof_prefork0(tsd_tsdn(tsd)); 2937 for (i = 0; i < 3; i++) { 2938 for (j = 0; j < narenas; j++) { 2939 if ((arena = arena_get(tsd_tsdn(tsd), j, false)) != 2940 NULL) { 2941 switch (i) { 2942 case 0: 2943 arena_prefork0(tsd_tsdn(tsd), arena); 2944 break; 2945 case 1: 2946 arena_prefork1(tsd_tsdn(tsd), arena); 2947 break; 2948 case 2: 2949 arena_prefork2(tsd_tsdn(tsd), arena); 2950 break; 2951 default: not_reached(); 2952 } 2953 } 2954 } 2955 } 2956 base_prefork(tsd_tsdn(tsd)); 2957 for (i = 0; i < narenas; i++) { 2958 if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) 2959 arena_prefork3(tsd_tsdn(tsd), arena); 2960 } 2961 prof_prefork1(tsd_tsdn(tsd)); 2962 } 2963 2964 #ifndef JEMALLOC_MUTEX_INIT_CB 2965 void 2966 jemalloc_postfork_parent(void) 2967 #else 2968 JEMALLOC_EXPORT void 2969 _malloc_postfork(void) 2970 #endif 2971 { 2972 tsd_t *tsd; 2973 unsigned i, narenas; 2974 2975 #ifdef JEMALLOC_MUTEX_INIT_CB 2976 if (!malloc_initialized()) 2977 return; 2978 #endif 2979 assert(malloc_initialized()); 2980 2981 tsd = tsd_fetch(); 2982 2983 witness_postfork_parent(tsd); 2984 /* Release all mutexes, now that fork() has completed. */ 2985 base_postfork_parent(tsd_tsdn(tsd)); 2986 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 2987 arena_t *arena; 2988 2989 if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) 2990 arena_postfork_parent(tsd_tsdn(tsd), arena); 2991 } 2992 prof_postfork_parent(tsd_tsdn(tsd)); 2993 malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock); 2994 ctl_postfork_parent(tsd_tsdn(tsd)); 2995 } 2996 2997 void 2998 jemalloc_postfork_child(void) 2999 { 3000 tsd_t *tsd; 3001 unsigned i, narenas; 3002 3003 assert(malloc_initialized()); 3004 3005 tsd = tsd_fetch(); 3006 3007 witness_postfork_child(tsd); 3008 /* Release all mutexes, now that fork() has completed. */ 3009 base_postfork_child(tsd_tsdn(tsd)); 3010 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 3011 arena_t *arena; 3012 3013 if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) 3014 arena_postfork_child(tsd_tsdn(tsd), arena); 3015 } 3016 prof_postfork_child(tsd_tsdn(tsd)); 3017 malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock); 3018 ctl_postfork_child(tsd_tsdn(tsd)); 3019 } 3020 3021 void 3022 _malloc_first_thread(void) 3023 { 3024 3025 (void)malloc_mutex_first_thread(); 3026 } 3027 3028 /******************************************************************************/ 3029