1 #define JEMALLOC_C_ 2 #include "jemalloc/internal/jemalloc_internal.h" 3 4 /******************************************************************************/ 5 /* Data. */ 6 7 /* Work around <http://llvm.org/bugs/show_bug.cgi?id=12623>: */ 8 const char *__malloc_options_1_0 = NULL; 9 __sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0); 10 11 /* Runtime configuration options. */ 12 const char *je_malloc_conf 13 #ifndef _WIN32 14 JEMALLOC_ATTR(weak) 15 #endif 16 ; 17 bool opt_abort = 18 #ifdef JEMALLOC_DEBUG 19 true 20 #else 21 false 22 #endif 23 ; 24 const char *opt_junk = 25 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 26 "true" 27 #else 28 "false" 29 #endif 30 ; 31 bool opt_junk_alloc = 32 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 33 true 34 #else 35 false 36 #endif 37 ; 38 bool opt_junk_free = 39 #if (defined(JEMALLOC_DEBUG) && defined(JEMALLOC_FILL)) 40 true 41 #else 42 false 43 #endif 44 ; 45 46 size_t opt_quarantine = ZU(0); 47 bool opt_redzone = false; 48 bool opt_utrace = false; 49 bool opt_xmalloc = false; 50 bool opt_zero = false; 51 unsigned opt_narenas = 0; 52 53 /* Initialized to true if the process is running inside Valgrind. */ 54 bool in_valgrind; 55 56 unsigned ncpus; 57 58 /* Protects arenas initialization. */ 59 static malloc_mutex_t arenas_lock; 60 /* 61 * Arenas that are used to service external requests. Not all elements of the 62 * arenas array are necessarily used; arenas are created lazily as needed. 63 * 64 * arenas[0..narenas_auto) are used for automatic multiplexing of threads and 65 * arenas. arenas[narenas_auto..narenas_total) are only used if the application 66 * takes some action to create them and allocate from them. 67 */ 68 arena_t **arenas; 69 static unsigned narenas_total; /* Use narenas_total_*(). */ 70 static arena_t *a0; /* arenas[0]; read-only after initialization. */ 71 unsigned narenas_auto; /* Read-only after initialization. */ 72 73 typedef enum { 74 malloc_init_uninitialized = 3, 75 malloc_init_a0_initialized = 2, 76 malloc_init_recursible = 1, 77 malloc_init_initialized = 0 /* Common case --> jnz. */ 78 } malloc_init_t; 79 static malloc_init_t malloc_init_state = malloc_init_uninitialized; 80 81 /* False should be the common case. Set to true to trigger initialization. */ 82 static bool malloc_slow = true; 83 84 /* When malloc_slow is true, set the corresponding bits for sanity check. */ 85 enum { 86 flag_opt_junk_alloc = (1U), 87 flag_opt_junk_free = (1U << 1), 88 flag_opt_quarantine = (1U << 2), 89 flag_opt_zero = (1U << 3), 90 flag_opt_utrace = (1U << 4), 91 flag_in_valgrind = (1U << 5), 92 flag_opt_xmalloc = (1U << 6) 93 }; 94 static uint8_t malloc_slow_flags; 95 96 JEMALLOC_ALIGNED(CACHELINE) 97 const size_t pind2sz_tab[NPSIZES] = { 98 #define PSZ_yes(lg_grp, ndelta, lg_delta) \ 99 (((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta))), 100 #define PSZ_no(lg_grp, ndelta, lg_delta) 101 #define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \ 102 PSZ_##psz(lg_grp, ndelta, lg_delta) 103 SIZE_CLASSES 104 #undef PSZ_yes 105 #undef PSZ_no 106 #undef SC 107 }; 108 109 JEMALLOC_ALIGNED(CACHELINE) 110 const size_t index2size_tab[NSIZES] = { 111 #define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \ 112 ((ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)), 113 SIZE_CLASSES 114 #undef SC 115 }; 116 117 JEMALLOC_ALIGNED(CACHELINE) 118 const uint8_t size2index_tab[] = { 119 #if LG_TINY_MIN == 0 120 #warning "Dangerous LG_TINY_MIN" 121 #define S2B_0(i) i, 122 #elif LG_TINY_MIN == 1 123 #warning "Dangerous LG_TINY_MIN" 124 #define S2B_1(i) i, 125 #elif LG_TINY_MIN == 2 126 #warning "Dangerous LG_TINY_MIN" 127 #define S2B_2(i) i, 128 #elif LG_TINY_MIN == 3 129 #define S2B_3(i) i, 130 #elif LG_TINY_MIN == 4 131 #define S2B_4(i) i, 132 #elif LG_TINY_MIN == 5 133 #define S2B_5(i) i, 134 #elif LG_TINY_MIN == 6 135 #define S2B_6(i) i, 136 #elif LG_TINY_MIN == 7 137 #define S2B_7(i) i, 138 #elif LG_TINY_MIN == 8 139 #define S2B_8(i) i, 140 #elif LG_TINY_MIN == 9 141 #define S2B_9(i) i, 142 #elif LG_TINY_MIN == 10 143 #define S2B_10(i) i, 144 #elif LG_TINY_MIN == 11 145 #define S2B_11(i) i, 146 #else 147 #error "Unsupported LG_TINY_MIN" 148 #endif 149 #if LG_TINY_MIN < 1 150 #define S2B_1(i) S2B_0(i) S2B_0(i) 151 #endif 152 #if LG_TINY_MIN < 2 153 #define S2B_2(i) S2B_1(i) S2B_1(i) 154 #endif 155 #if LG_TINY_MIN < 3 156 #define S2B_3(i) S2B_2(i) S2B_2(i) 157 #endif 158 #if LG_TINY_MIN < 4 159 #define S2B_4(i) S2B_3(i) S2B_3(i) 160 #endif 161 #if LG_TINY_MIN < 5 162 #define S2B_5(i) S2B_4(i) S2B_4(i) 163 #endif 164 #if LG_TINY_MIN < 6 165 #define S2B_6(i) S2B_5(i) S2B_5(i) 166 #endif 167 #if LG_TINY_MIN < 7 168 #define S2B_7(i) S2B_6(i) S2B_6(i) 169 #endif 170 #if LG_TINY_MIN < 8 171 #define S2B_8(i) S2B_7(i) S2B_7(i) 172 #endif 173 #if LG_TINY_MIN < 9 174 #define S2B_9(i) S2B_8(i) S2B_8(i) 175 #endif 176 #if LG_TINY_MIN < 10 177 #define S2B_10(i) S2B_9(i) S2B_9(i) 178 #endif 179 #if LG_TINY_MIN < 11 180 #define S2B_11(i) S2B_10(i) S2B_10(i) 181 #endif 182 #define S2B_no(i) 183 #define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \ 184 S2B_##lg_delta_lookup(index) 185 SIZE_CLASSES 186 #undef S2B_3 187 #undef S2B_4 188 #undef S2B_5 189 #undef S2B_6 190 #undef S2B_7 191 #undef S2B_8 192 #undef S2B_9 193 #undef S2B_10 194 #undef S2B_11 195 #undef S2B_no 196 #undef SC 197 }; 198 199 #ifdef JEMALLOC_THREADED_INIT 200 /* Used to let the initializing thread recursively allocate. */ 201 # define NO_INITIALIZER ((unsigned long)0) 202 # define INITIALIZER pthread_self() 203 # define IS_INITIALIZER (malloc_initializer == pthread_self()) 204 static pthread_t malloc_initializer = NO_INITIALIZER; 205 #else 206 # define NO_INITIALIZER false 207 # define INITIALIZER true 208 # define IS_INITIALIZER malloc_initializer 209 static bool malloc_initializer = NO_INITIALIZER; 210 #endif 211 212 /* Used to avoid initialization races. */ 213 #ifdef _WIN32 214 #if _WIN32_WINNT >= 0x0600 215 static malloc_mutex_t init_lock = SRWLOCK_INIT; 216 #else 217 static malloc_mutex_t init_lock; 218 static bool init_lock_initialized = false; 219 220 JEMALLOC_ATTR(constructor) 221 static void WINAPI 222 _init_init_lock(void) 223 { 224 225 /* If another constructor in the same binary is using mallctl to 226 * e.g. setup chunk hooks, it may end up running before this one, 227 * and malloc_init_hard will crash trying to lock the uninitialized 228 * lock. So we force an initialization of the lock in 229 * malloc_init_hard as well. We don't try to care about atomicity 230 * of the accessed to the init_lock_initialized boolean, since it 231 * really only matters early in the process creation, before any 232 * separate thread normally starts doing anything. */ 233 if (!init_lock_initialized) 234 malloc_mutex_init(&init_lock, "init", WITNESS_RANK_INIT); 235 init_lock_initialized = true; 236 } 237 238 #ifdef _MSC_VER 239 # pragma section(".CRT$XCU", read) 240 JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) 241 static const void (WINAPI *init_init_lock)(void) = _init_init_lock; 242 #endif 243 #endif 244 #else 245 static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; 246 #endif 247 248 typedef struct { 249 void *p; /* Input pointer (as in realloc(p, s)). */ 250 size_t s; /* Request size. */ 251 void *r; /* Result pointer. */ 252 } malloc_utrace_t; 253 254 #ifdef JEMALLOC_UTRACE 255 # define UTRACE(a, b, c) do { \ 256 if (unlikely(opt_utrace)) { \ 257 int utrace_serrno = errno; \ 258 malloc_utrace_t ut; \ 259 ut.p = (a); \ 260 ut.s = (b); \ 261 ut.r = (c); \ 262 utrace(&ut, sizeof(ut)); \ 263 errno = utrace_serrno; \ 264 } \ 265 } while (0) 266 #else 267 # define UTRACE(a, b, c) 268 #endif 269 270 /******************************************************************************/ 271 /* 272 * Function prototypes for static functions that are referenced prior to 273 * definition. 274 */ 275 276 static bool malloc_init_hard_a0(void); 277 static bool malloc_init_hard(void); 278 279 /******************************************************************************/ 280 /* 281 * Begin miscellaneous support functions. 282 */ 283 284 JEMALLOC_ALWAYS_INLINE_C bool 285 malloc_initialized(void) 286 { 287 288 return (malloc_init_state == malloc_init_initialized); 289 } 290 291 JEMALLOC_ALWAYS_INLINE_C void 292 malloc_thread_init(void) 293 { 294 295 /* 296 * TSD initialization can't be safely done as a side effect of 297 * deallocation, because it is possible for a thread to do nothing but 298 * deallocate its TLS data via free(), in which case writing to TLS 299 * would cause write-after-free memory corruption. The quarantine 300 * facility *only* gets used as a side effect of deallocation, so make 301 * a best effort attempt at initializing its TSD by hooking all 302 * allocation events. 303 */ 304 if (config_fill && unlikely(opt_quarantine)) 305 quarantine_alloc_hook(); 306 } 307 308 JEMALLOC_ALWAYS_INLINE_C bool 309 malloc_init_a0(void) 310 { 311 312 if (unlikely(malloc_init_state == malloc_init_uninitialized)) 313 return (malloc_init_hard_a0()); 314 return (false); 315 } 316 317 JEMALLOC_ALWAYS_INLINE_C bool 318 malloc_init(void) 319 { 320 321 if (unlikely(!malloc_initialized()) && malloc_init_hard()) 322 return (true); 323 malloc_thread_init(); 324 325 return (false); 326 } 327 328 /* 329 * The a0*() functions are used instead of i{d,}alloc() in situations that 330 * cannot tolerate TLS variable access. 331 */ 332 333 static void * 334 a0ialloc(size_t size, bool zero, bool is_metadata) 335 { 336 337 if (unlikely(malloc_init_a0())) 338 return (NULL); 339 340 return (iallocztm(TSDN_NULL, size, size2index(size), zero, NULL, 341 is_metadata, arena_get(TSDN_NULL, 0, true), true)); 342 } 343 344 static void 345 a0idalloc(void *ptr, bool is_metadata) 346 { 347 348 idalloctm(TSDN_NULL, ptr, false, is_metadata, true); 349 } 350 351 arena_t * 352 a0get(void) 353 { 354 355 return (a0); 356 } 357 358 void * 359 a0malloc(size_t size) 360 { 361 362 return (a0ialloc(size, false, true)); 363 } 364 365 void 366 a0dalloc(void *ptr) 367 { 368 369 a0idalloc(ptr, true); 370 } 371 372 /* 373 * FreeBSD's libc uses the bootstrap_*() functions in bootstrap-senstive 374 * situations that cannot tolerate TLS variable access (TLS allocation and very 375 * early internal data structure initialization). 376 */ 377 378 void * 379 bootstrap_malloc(size_t size) 380 { 381 382 if (unlikely(size == 0)) 383 size = 1; 384 385 return (a0ialloc(size, false, false)); 386 } 387 388 void * 389 bootstrap_calloc(size_t num, size_t size) 390 { 391 size_t num_size; 392 393 num_size = num * size; 394 if (unlikely(num_size == 0)) { 395 assert(num == 0 || size == 0); 396 num_size = 1; 397 } 398 399 return (a0ialloc(num_size, true, false)); 400 } 401 402 void 403 bootstrap_free(void *ptr) 404 { 405 406 if (unlikely(ptr == NULL)) 407 return; 408 409 a0idalloc(ptr, false); 410 } 411 412 static void 413 arena_set(unsigned ind, arena_t *arena) 414 { 415 416 atomic_write_p((void **)&arenas[ind], arena); 417 } 418 419 static void 420 narenas_total_set(unsigned narenas) 421 { 422 423 atomic_write_u(&narenas_total, narenas); 424 } 425 426 static void 427 narenas_total_inc(void) 428 { 429 430 atomic_add_u(&narenas_total, 1); 431 } 432 433 unsigned 434 narenas_total_get(void) 435 { 436 437 return (atomic_read_u(&narenas_total)); 438 } 439 440 /* Create a new arena and insert it into the arenas array at index ind. */ 441 static arena_t * 442 arena_init_locked(tsdn_t *tsdn, unsigned ind) 443 { 444 arena_t *arena; 445 446 assert(ind <= narenas_total_get()); 447 if (ind > MALLOCX_ARENA_MAX) 448 return (NULL); 449 if (ind == narenas_total_get()) 450 narenas_total_inc(); 451 452 /* 453 * Another thread may have already initialized arenas[ind] if it's an 454 * auto arena. 455 */ 456 arena = arena_get(tsdn, ind, false); 457 if (arena != NULL) { 458 assert(ind < narenas_auto); 459 return (arena); 460 } 461 462 /* Actually initialize the arena. */ 463 arena = arena_new(tsdn, ind); 464 arena_set(ind, arena); 465 return (arena); 466 } 467 468 arena_t * 469 arena_init(tsdn_t *tsdn, unsigned ind) 470 { 471 arena_t *arena; 472 473 malloc_mutex_lock(tsdn, &arenas_lock); 474 arena = arena_init_locked(tsdn, ind); 475 malloc_mutex_unlock(tsdn, &arenas_lock); 476 return (arena); 477 } 478 479 static void 480 arena_bind(tsd_t *tsd, unsigned ind, bool internal) 481 { 482 arena_t *arena; 483 484 if (!tsd_nominal(tsd)) 485 return; 486 487 arena = arena_get(tsd_tsdn(tsd), ind, false); 488 arena_nthreads_inc(arena, internal); 489 490 if (internal) 491 tsd_iarena_set(tsd, arena); 492 else 493 tsd_arena_set(tsd, arena); 494 } 495 496 void 497 arena_migrate(tsd_t *tsd, unsigned oldind, unsigned newind) 498 { 499 arena_t *oldarena, *newarena; 500 501 oldarena = arena_get(tsd_tsdn(tsd), oldind, false); 502 newarena = arena_get(tsd_tsdn(tsd), newind, false); 503 arena_nthreads_dec(oldarena, false); 504 arena_nthreads_inc(newarena, false); 505 tsd_arena_set(tsd, newarena); 506 } 507 508 static void 509 arena_unbind(tsd_t *tsd, unsigned ind, bool internal) 510 { 511 arena_t *arena; 512 513 arena = arena_get(tsd_tsdn(tsd), ind, false); 514 arena_nthreads_dec(arena, internal); 515 if (internal) 516 tsd_iarena_set(tsd, NULL); 517 else 518 tsd_arena_set(tsd, NULL); 519 } 520 521 arena_tdata_t * 522 arena_tdata_get_hard(tsd_t *tsd, unsigned ind) 523 { 524 arena_tdata_t *tdata, *arenas_tdata_old; 525 arena_tdata_t *arenas_tdata = tsd_arenas_tdata_get(tsd); 526 unsigned narenas_tdata_old, i; 527 unsigned narenas_tdata = tsd_narenas_tdata_get(tsd); 528 unsigned narenas_actual = narenas_total_get(); 529 530 /* 531 * Dissociate old tdata array (and set up for deallocation upon return) 532 * if it's too small. 533 */ 534 if (arenas_tdata != NULL && narenas_tdata < narenas_actual) { 535 arenas_tdata_old = arenas_tdata; 536 narenas_tdata_old = narenas_tdata; 537 arenas_tdata = NULL; 538 narenas_tdata = 0; 539 tsd_arenas_tdata_set(tsd, arenas_tdata); 540 tsd_narenas_tdata_set(tsd, narenas_tdata); 541 } else { 542 arenas_tdata_old = NULL; 543 narenas_tdata_old = 0; 544 } 545 546 /* Allocate tdata array if it's missing. */ 547 if (arenas_tdata == NULL) { 548 bool *arenas_tdata_bypassp = tsd_arenas_tdata_bypassp_get(tsd); 549 narenas_tdata = (ind < narenas_actual) ? narenas_actual : ind+1; 550 551 if (tsd_nominal(tsd) && !*arenas_tdata_bypassp) { 552 *arenas_tdata_bypassp = true; 553 arenas_tdata = (arena_tdata_t *)a0malloc( 554 sizeof(arena_tdata_t) * narenas_tdata); 555 *arenas_tdata_bypassp = false; 556 } 557 if (arenas_tdata == NULL) { 558 tdata = NULL; 559 goto label_return; 560 } 561 assert(tsd_nominal(tsd) && !*arenas_tdata_bypassp); 562 tsd_arenas_tdata_set(tsd, arenas_tdata); 563 tsd_narenas_tdata_set(tsd, narenas_tdata); 564 } 565 566 /* 567 * Copy to tdata array. It's possible that the actual number of arenas 568 * has increased since narenas_total_get() was called above, but that 569 * causes no correctness issues unless two threads concurrently execute 570 * the arenas.extend mallctl, which we trust mallctl synchronization to 571 * prevent. 572 */ 573 574 /* Copy/initialize tickers. */ 575 for (i = 0; i < narenas_actual; i++) { 576 if (i < narenas_tdata_old) { 577 ticker_copy(&arenas_tdata[i].decay_ticker, 578 &arenas_tdata_old[i].decay_ticker); 579 } else { 580 ticker_init(&arenas_tdata[i].decay_ticker, 581 DECAY_NTICKS_PER_UPDATE); 582 } 583 } 584 if (narenas_tdata > narenas_actual) { 585 memset(&arenas_tdata[narenas_actual], 0, sizeof(arena_tdata_t) 586 * (narenas_tdata - narenas_actual)); 587 } 588 589 /* Read the refreshed tdata array. */ 590 tdata = &arenas_tdata[ind]; 591 label_return: 592 if (arenas_tdata_old != NULL) 593 a0dalloc(arenas_tdata_old); 594 return (tdata); 595 } 596 597 /* Slow path, called only by arena_choose(). */ 598 arena_t * 599 arena_choose_hard(tsd_t *tsd, bool internal) 600 { 601 arena_t *ret JEMALLOC_CC_SILENCE_INIT(NULL); 602 603 if (narenas_auto > 1) { 604 unsigned i, j, choose[2], first_null; 605 606 /* 607 * Determine binding for both non-internal and internal 608 * allocation. 609 * 610 * choose[0]: For application allocation. 611 * choose[1]: For internal metadata allocation. 612 */ 613 614 for (j = 0; j < 2; j++) 615 choose[j] = 0; 616 617 first_null = narenas_auto; 618 malloc_mutex_lock(tsd_tsdn(tsd), &arenas_lock); 619 assert(arena_get(tsd_tsdn(tsd), 0, false) != NULL); 620 for (i = 1; i < narenas_auto; i++) { 621 if (arena_get(tsd_tsdn(tsd), i, false) != NULL) { 622 /* 623 * Choose the first arena that has the lowest 624 * number of threads assigned to it. 625 */ 626 for (j = 0; j < 2; j++) { 627 if (arena_nthreads_get(arena_get( 628 tsd_tsdn(tsd), i, false), !!j) < 629 arena_nthreads_get(arena_get( 630 tsd_tsdn(tsd), choose[j], false), 631 !!j)) 632 choose[j] = i; 633 } 634 } else if (first_null == narenas_auto) { 635 /* 636 * Record the index of the first uninitialized 637 * arena, in case all extant arenas are in use. 638 * 639 * NB: It is possible for there to be 640 * discontinuities in terms of initialized 641 * versus uninitialized arenas, due to the 642 * "thread.arena" mallctl. 643 */ 644 first_null = i; 645 } 646 } 647 648 for (j = 0; j < 2; j++) { 649 if (arena_nthreads_get(arena_get(tsd_tsdn(tsd), 650 choose[j], false), !!j) == 0 || first_null == 651 narenas_auto) { 652 /* 653 * Use an unloaded arena, or the least loaded 654 * arena if all arenas are already initialized. 655 */ 656 if (!!j == internal) { 657 ret = arena_get(tsd_tsdn(tsd), 658 choose[j], false); 659 } 660 } else { 661 arena_t *arena; 662 663 /* Initialize a new arena. */ 664 choose[j] = first_null; 665 arena = arena_init_locked(tsd_tsdn(tsd), 666 choose[j]); 667 if (arena == NULL) { 668 malloc_mutex_unlock(tsd_tsdn(tsd), 669 &arenas_lock); 670 return (NULL); 671 } 672 if (!!j == internal) 673 ret = arena; 674 } 675 arena_bind(tsd, choose[j], !!j); 676 } 677 malloc_mutex_unlock(tsd_tsdn(tsd), &arenas_lock); 678 } else { 679 ret = arena_get(tsd_tsdn(tsd), 0, false); 680 arena_bind(tsd, 0, false); 681 arena_bind(tsd, 0, true); 682 } 683 684 return (ret); 685 } 686 687 void 688 thread_allocated_cleanup(tsd_t *tsd) 689 { 690 691 /* Do nothing. */ 692 } 693 694 void 695 thread_deallocated_cleanup(tsd_t *tsd) 696 { 697 698 /* Do nothing. */ 699 } 700 701 void 702 iarena_cleanup(tsd_t *tsd) 703 { 704 arena_t *iarena; 705 706 iarena = tsd_iarena_get(tsd); 707 if (iarena != NULL) 708 arena_unbind(tsd, iarena->ind, true); 709 } 710 711 void 712 arena_cleanup(tsd_t *tsd) 713 { 714 arena_t *arena; 715 716 arena = tsd_arena_get(tsd); 717 if (arena != NULL) 718 arena_unbind(tsd, arena->ind, false); 719 } 720 721 void 722 arenas_tdata_cleanup(tsd_t *tsd) 723 { 724 arena_tdata_t *arenas_tdata; 725 726 /* Prevent tsd->arenas_tdata from being (re)created. */ 727 *tsd_arenas_tdata_bypassp_get(tsd) = true; 728 729 arenas_tdata = tsd_arenas_tdata_get(tsd); 730 if (arenas_tdata != NULL) { 731 tsd_arenas_tdata_set(tsd, NULL); 732 a0dalloc(arenas_tdata); 733 } 734 } 735 736 void 737 narenas_tdata_cleanup(tsd_t *tsd) 738 { 739 740 /* Do nothing. */ 741 } 742 743 void 744 arenas_tdata_bypass_cleanup(tsd_t *tsd) 745 { 746 747 /* Do nothing. */ 748 } 749 750 static void 751 stats_print_atexit(void) 752 { 753 754 if (config_tcache && config_stats) { 755 tsdn_t *tsdn; 756 unsigned narenas, i; 757 758 tsdn = tsdn_fetch(); 759 760 /* 761 * Merge stats from extant threads. This is racy, since 762 * individual threads do not lock when recording tcache stats 763 * events. As a consequence, the final stats may be slightly 764 * out of date by the time they are reported, if other threads 765 * continue to allocate. 766 */ 767 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 768 arena_t *arena = arena_get(tsdn, i, false); 769 if (arena != NULL) { 770 tcache_t *tcache; 771 772 /* 773 * tcache_stats_merge() locks bins, so if any 774 * code is introduced that acquires both arena 775 * and bin locks in the opposite order, 776 * deadlocks may result. 777 */ 778 malloc_mutex_lock(tsdn, &arena->lock); 779 ql_foreach(tcache, &arena->tcache_ql, link) { 780 tcache_stats_merge(tsdn, tcache, arena); 781 } 782 malloc_mutex_unlock(tsdn, &arena->lock); 783 } 784 } 785 } 786 je_malloc_stats_print(NULL, NULL, NULL); 787 } 788 789 /* 790 * End miscellaneous support functions. 791 */ 792 /******************************************************************************/ 793 /* 794 * Begin initialization functions. 795 */ 796 797 #ifndef JEMALLOC_HAVE_SECURE_GETENV 798 static char * 799 secure_getenv(const char *name) 800 { 801 802 # ifdef JEMALLOC_HAVE_ISSETUGID 803 if (issetugid() != 0) 804 return (NULL); 805 # endif 806 return (getenv(name)); 807 } 808 #endif 809 810 static unsigned 811 malloc_ncpus(void) 812 { 813 long result; 814 815 #ifdef _WIN32 816 SYSTEM_INFO si; 817 GetSystemInfo(&si); 818 result = si.dwNumberOfProcessors; 819 #elif defined(JEMALLOC_GLIBC_MALLOC_HOOK) && defined(CPU_COUNT) 820 /* 821 * glibc >= 2.6 has the CPU_COUNT macro. 822 * 823 * glibc's sysconf() uses isspace(). glibc allocates for the first time 824 * *before* setting up the isspace tables. Therefore we need a 825 * different method to get the number of CPUs. 826 */ 827 { 828 cpu_set_t set; 829 830 pthread_getaffinity_np(pthread_self(), sizeof(set), &set); 831 result = CPU_COUNT(&set); 832 } 833 #else 834 result = sysconf(_SC_NPROCESSORS_ONLN); 835 #endif 836 return ((result == -1) ? 1 : (unsigned)result); 837 } 838 839 static bool 840 malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, 841 char const **v_p, size_t *vlen_p) 842 { 843 bool accept; 844 const char *opts = *opts_p; 845 846 *k_p = opts; 847 848 for (accept = false; !accept;) { 849 switch (*opts) { 850 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': 851 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': 852 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': 853 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': 854 case 'Y': case 'Z': 855 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': 856 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': 857 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': 858 case 's': case 't': case 'u': case 'v': case 'w': case 'x': 859 case 'y': case 'z': 860 case '0': case '1': case '2': case '3': case '4': case '5': 861 case '6': case '7': case '8': case '9': 862 case '_': 863 opts++; 864 break; 865 case ':': 866 opts++; 867 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; 868 *v_p = opts; 869 accept = true; 870 break; 871 case '\0': 872 if (opts != *opts_p) { 873 malloc_write("<jemalloc>: Conf string ends " 874 "with key\n"); 875 } 876 return (true); 877 default: 878 malloc_write("<jemalloc>: Malformed conf string\n"); 879 return (true); 880 } 881 } 882 883 for (accept = false; !accept;) { 884 switch (*opts) { 885 case ',': 886 opts++; 887 /* 888 * Look ahead one character here, because the next time 889 * this function is called, it will assume that end of 890 * input has been cleanly reached if no input remains, 891 * but we have optimistically already consumed the 892 * comma if one exists. 893 */ 894 if (*opts == '\0') { 895 malloc_write("<jemalloc>: Conf string ends " 896 "with comma\n"); 897 } 898 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; 899 accept = true; 900 break; 901 case '\0': 902 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; 903 accept = true; 904 break; 905 default: 906 opts++; 907 break; 908 } 909 } 910 911 *opts_p = opts; 912 return (false); 913 } 914 915 static void 916 malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, 917 size_t vlen) 918 { 919 920 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k, 921 (int)vlen, v); 922 } 923 924 static void 925 malloc_slow_flag_init(void) 926 { 927 /* 928 * Combine the runtime options into malloc_slow for fast path. Called 929 * after processing all the options. 930 */ 931 malloc_slow_flags |= (opt_junk_alloc ? flag_opt_junk_alloc : 0) 932 | (opt_junk_free ? flag_opt_junk_free : 0) 933 | (opt_quarantine ? flag_opt_quarantine : 0) 934 | (opt_zero ? flag_opt_zero : 0) 935 | (opt_utrace ? flag_opt_utrace : 0) 936 | (opt_xmalloc ? flag_opt_xmalloc : 0); 937 938 if (config_valgrind) 939 malloc_slow_flags |= (in_valgrind ? flag_in_valgrind : 0); 940 941 malloc_slow = (malloc_slow_flags != 0); 942 } 943 944 static void 945 malloc_conf_init(void) 946 { 947 unsigned i; 948 char buf[PATH_MAX + 1]; 949 const char *opts, *k, *v; 950 size_t klen, vlen; 951 952 /* 953 * Automatically configure valgrind before processing options. The 954 * valgrind option remains in jemalloc 3.x for compatibility reasons. 955 */ 956 if (config_valgrind) { 957 in_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false; 958 if (config_fill && unlikely(in_valgrind)) { 959 opt_junk = "false"; 960 opt_junk_alloc = false; 961 opt_junk_free = false; 962 assert(!opt_zero); 963 opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT; 964 opt_redzone = true; 965 } 966 if (config_tcache && unlikely(in_valgrind)) 967 opt_tcache = false; 968 } 969 970 for (i = 0; i < 4; i++) { 971 /* Get runtime configuration. */ 972 switch (i) { 973 case 0: 974 opts = config_malloc_conf; 975 break; 976 case 1: 977 if (je_malloc_conf != NULL) { 978 /* 979 * Use options that were compiled into the 980 * program. 981 */ 982 opts = je_malloc_conf; 983 } else { 984 /* No configuration specified. */ 985 buf[0] = '\0'; 986 opts = buf; 987 } 988 break; 989 case 2: { 990 ssize_t linklen = 0; 991 #ifndef _WIN32 992 int saved_errno = errno; 993 const char *linkname = 994 # ifdef JEMALLOC_PREFIX 995 "/etc/"JEMALLOC_PREFIX"malloc.conf" 996 # else 997 "/etc/malloc.conf" 998 # endif 999 ; 1000 1001 /* 1002 * Try to use the contents of the "/etc/malloc.conf" 1003 * symbolic link's name. 1004 */ 1005 linklen = readlink(linkname, buf, sizeof(buf) - 1); 1006 if (linklen == -1) { 1007 /* No configuration specified. */ 1008 linklen = 0; 1009 /* Restore errno. */ 1010 set_errno(saved_errno); 1011 } 1012 #endif 1013 buf[linklen] = '\0'; 1014 opts = buf; 1015 break; 1016 } case 3: { 1017 const char *envname = 1018 #ifdef JEMALLOC_PREFIX 1019 JEMALLOC_CPREFIX"MALLOC_CONF" 1020 #else 1021 "MALLOC_CONF" 1022 #endif 1023 ; 1024 1025 if ((opts = secure_getenv(envname)) != NULL) { 1026 /* 1027 * Do nothing; opts is already initialized to 1028 * the value of the MALLOC_CONF environment 1029 * variable. 1030 */ 1031 } else { 1032 /* No configuration specified. */ 1033 buf[0] = '\0'; 1034 opts = buf; 1035 } 1036 break; 1037 } default: 1038 not_reached(); 1039 buf[0] = '\0'; 1040 opts = buf; 1041 } 1042 1043 while (*opts != '\0' && !malloc_conf_next(&opts, &k, &klen, &v, 1044 &vlen)) { 1045 #define CONF_MATCH(n) \ 1046 (sizeof(n)-1 == klen && strncmp(n, k, klen) == 0) 1047 #define CONF_MATCH_VALUE(n) \ 1048 (sizeof(n)-1 == vlen && strncmp(n, v, vlen) == 0) 1049 #define CONF_HANDLE_BOOL(o, n, cont) \ 1050 if (CONF_MATCH(n)) { \ 1051 if (CONF_MATCH_VALUE("true")) \ 1052 o = true; \ 1053 else if (CONF_MATCH_VALUE("false")) \ 1054 o = false; \ 1055 else { \ 1056 malloc_conf_error( \ 1057 "Invalid conf value", \ 1058 k, klen, v, vlen); \ 1059 } \ 1060 if (cont) \ 1061 continue; \ 1062 } 1063 #define CONF_HANDLE_T_U(t, o, n, min, max, clip) \ 1064 if (CONF_MATCH(n)) { \ 1065 uintmax_t um; \ 1066 char *end; \ 1067 \ 1068 set_errno(0); \ 1069 um = malloc_strtoumax(v, &end, 0); \ 1070 if (get_errno() != 0 || (uintptr_t)end -\ 1071 (uintptr_t)v != vlen) { \ 1072 malloc_conf_error( \ 1073 "Invalid conf value", \ 1074 k, klen, v, vlen); \ 1075 } else if (clip) { \ 1076 if ((min) != 0 && um < (min)) \ 1077 o = (t)(min); \ 1078 else if (um > (max)) \ 1079 o = (t)(max); \ 1080 else \ 1081 o = (t)um; \ 1082 } else { \ 1083 if (((min) != 0 && um < (min)) \ 1084 || um > (max)) { \ 1085 malloc_conf_error( \ 1086 "Out-of-range " \ 1087 "conf value", \ 1088 k, klen, v, vlen); \ 1089 } else \ 1090 o = (t)um; \ 1091 } \ 1092 continue; \ 1093 } 1094 #define CONF_HANDLE_UNSIGNED(o, n, min, max, clip) \ 1095 CONF_HANDLE_T_U(unsigned, o, n, min, max, clip) 1096 #define CONF_HANDLE_SIZE_T(o, n, min, max, clip) \ 1097 CONF_HANDLE_T_U(size_t, o, n, min, max, clip) 1098 #define CONF_HANDLE_SSIZE_T(o, n, min, max) \ 1099 if (CONF_MATCH(n)) { \ 1100 long l; \ 1101 char *end; \ 1102 \ 1103 set_errno(0); \ 1104 l = strtol(v, &end, 0); \ 1105 if (get_errno() != 0 || (uintptr_t)end -\ 1106 (uintptr_t)v != vlen) { \ 1107 malloc_conf_error( \ 1108 "Invalid conf value", \ 1109 k, klen, v, vlen); \ 1110 } else if (l < (ssize_t)(min) || l > \ 1111 (ssize_t)(max)) { \ 1112 malloc_conf_error( \ 1113 "Out-of-range conf value", \ 1114 k, klen, v, vlen); \ 1115 } else \ 1116 o = l; \ 1117 continue; \ 1118 } 1119 #define CONF_HANDLE_CHAR_P(o, n, d) \ 1120 if (CONF_MATCH(n)) { \ 1121 size_t cpylen = (vlen <= \ 1122 sizeof(o)-1) ? vlen : \ 1123 sizeof(o)-1; \ 1124 strncpy(o, v, cpylen); \ 1125 o[cpylen] = '\0'; \ 1126 continue; \ 1127 } 1128 1129 CONF_HANDLE_BOOL(opt_abort, "abort", true) 1130 /* 1131 * Chunks always require at least one header page, 1132 * as many as 2^(LG_SIZE_CLASS_GROUP+1) data pages, and 1133 * possibly an additional page in the presence of 1134 * redzones. In order to simplify options processing, 1135 * use a conservative bound that accommodates all these 1136 * constraints. 1137 */ 1138 CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE + 1139 LG_SIZE_CLASS_GROUP + (config_fill ? 2 : 1), 1140 (sizeof(size_t) << 3) - 1, true) 1141 if (strncmp("dss", k, klen) == 0) { 1142 int i; 1143 bool match = false; 1144 for (i = 0; i < dss_prec_limit; i++) { 1145 if (strncmp(dss_prec_names[i], v, vlen) 1146 == 0) { 1147 if (chunk_dss_prec_set(i)) { 1148 malloc_conf_error( 1149 "Error setting dss", 1150 k, klen, v, vlen); 1151 } else { 1152 opt_dss = 1153 dss_prec_names[i]; 1154 match = true; 1155 break; 1156 } 1157 } 1158 } 1159 if (!match) { 1160 malloc_conf_error("Invalid conf value", 1161 k, klen, v, vlen); 1162 } 1163 continue; 1164 } 1165 CONF_HANDLE_UNSIGNED(opt_narenas, "narenas", 1, 1166 UINT_MAX, false) 1167 if (strncmp("purge", k, klen) == 0) { 1168 int i; 1169 bool match = false; 1170 for (i = 0; i < purge_mode_limit; i++) { 1171 if (strncmp(purge_mode_names[i], v, 1172 vlen) == 0) { 1173 opt_purge = (purge_mode_t)i; 1174 match = true; 1175 break; 1176 } 1177 } 1178 if (!match) { 1179 malloc_conf_error("Invalid conf value", 1180 k, klen, v, vlen); 1181 } 1182 continue; 1183 } 1184 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult", 1185 -1, (sizeof(size_t) << 3) - 1) 1186 CONF_HANDLE_SSIZE_T(opt_decay_time, "decay_time", -1, 1187 NSTIME_SEC_MAX); 1188 CONF_HANDLE_BOOL(opt_stats_print, "stats_print", true) 1189 if (config_fill) { 1190 if (CONF_MATCH("junk")) { 1191 if (CONF_MATCH_VALUE("true")) { 1192 if (config_valgrind && 1193 unlikely(in_valgrind)) { 1194 malloc_conf_error( 1195 "Deallocation-time " 1196 "junk filling cannot " 1197 "be enabled while " 1198 "running inside " 1199 "Valgrind", k, klen, v, 1200 vlen); 1201 } else { 1202 opt_junk = "true"; 1203 opt_junk_alloc = true; 1204 opt_junk_free = true; 1205 } 1206 } else if (CONF_MATCH_VALUE("false")) { 1207 opt_junk = "false"; 1208 opt_junk_alloc = opt_junk_free = 1209 false; 1210 } else if (CONF_MATCH_VALUE("alloc")) { 1211 opt_junk = "alloc"; 1212 opt_junk_alloc = true; 1213 opt_junk_free = false; 1214 } else if (CONF_MATCH_VALUE("free")) { 1215 if (config_valgrind && 1216 unlikely(in_valgrind)) { 1217 malloc_conf_error( 1218 "Deallocation-time " 1219 "junk filling cannot " 1220 "be enabled while " 1221 "running inside " 1222 "Valgrind", k, klen, v, 1223 vlen); 1224 } else { 1225 opt_junk = "free"; 1226 opt_junk_alloc = false; 1227 opt_junk_free = true; 1228 } 1229 } else { 1230 malloc_conf_error( 1231 "Invalid conf value", k, 1232 klen, v, vlen); 1233 } 1234 continue; 1235 } 1236 CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine", 1237 0, SIZE_T_MAX, false) 1238 CONF_HANDLE_BOOL(opt_redzone, "redzone", true) 1239 CONF_HANDLE_BOOL(opt_zero, "zero", true) 1240 } 1241 if (config_utrace) { 1242 CONF_HANDLE_BOOL(opt_utrace, "utrace", true) 1243 } 1244 if (config_xmalloc) { 1245 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc", true) 1246 } 1247 if (config_tcache) { 1248 CONF_HANDLE_BOOL(opt_tcache, "tcache", 1249 !config_valgrind || !in_valgrind) 1250 if (CONF_MATCH("tcache")) { 1251 assert(config_valgrind && in_valgrind); 1252 if (opt_tcache) { 1253 opt_tcache = false; 1254 malloc_conf_error( 1255 "tcache cannot be enabled " 1256 "while running inside Valgrind", 1257 k, klen, v, vlen); 1258 } 1259 continue; 1260 } 1261 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, 1262 "lg_tcache_max", -1, 1263 (sizeof(size_t) << 3) - 1) 1264 } 1265 if (config_prof) { 1266 CONF_HANDLE_BOOL(opt_prof, "prof", true) 1267 CONF_HANDLE_CHAR_P(opt_prof_prefix, 1268 "prof_prefix", "jeprof") 1269 CONF_HANDLE_BOOL(opt_prof_active, "prof_active", 1270 true) 1271 CONF_HANDLE_BOOL(opt_prof_thread_active_init, 1272 "prof_thread_active_init", true) 1273 CONF_HANDLE_SIZE_T(opt_lg_prof_sample, 1274 "lg_prof_sample", 0, 1275 (sizeof(uint64_t) << 3) - 1, true) 1276 CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum", 1277 true) 1278 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, 1279 "lg_prof_interval", -1, 1280 (sizeof(uint64_t) << 3) - 1) 1281 CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump", 1282 true) 1283 CONF_HANDLE_BOOL(opt_prof_final, "prof_final", 1284 true) 1285 CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak", 1286 true) 1287 } 1288 malloc_conf_error("Invalid conf pair", k, klen, v, 1289 vlen); 1290 #undef CONF_MATCH 1291 #undef CONF_HANDLE_BOOL 1292 #undef CONF_HANDLE_SIZE_T 1293 #undef CONF_HANDLE_SSIZE_T 1294 #undef CONF_HANDLE_CHAR_P 1295 } 1296 } 1297 } 1298 1299 static bool 1300 malloc_init_hard_needed(void) 1301 { 1302 1303 if (malloc_initialized() || (IS_INITIALIZER && malloc_init_state == 1304 malloc_init_recursible)) { 1305 /* 1306 * Another thread initialized the allocator before this one 1307 * acquired init_lock, or this thread is the initializing 1308 * thread, and it is recursively allocating. 1309 */ 1310 return (false); 1311 } 1312 #ifdef JEMALLOC_THREADED_INIT 1313 if (malloc_initializer != NO_INITIALIZER && !IS_INITIALIZER) { 1314 spin_t spinner; 1315 1316 /* Busy-wait until the initializing thread completes. */ 1317 spin_init(&spinner); 1318 do { 1319 malloc_mutex_unlock(TSDN_NULL, &init_lock); 1320 spin_adaptive(&spinner); 1321 malloc_mutex_lock(TSDN_NULL, &init_lock); 1322 } while (!malloc_initialized()); 1323 return (false); 1324 } 1325 #endif 1326 return (true); 1327 } 1328 1329 static bool 1330 malloc_init_hard_a0_locked() 1331 { 1332 1333 malloc_initializer = INITIALIZER; 1334 1335 if (config_prof) 1336 prof_boot0(); 1337 malloc_conf_init(); 1338 if (opt_stats_print) { 1339 /* Print statistics at exit. */ 1340 if (atexit(stats_print_atexit) != 0) { 1341 malloc_write("<jemalloc>: Error in atexit()\n"); 1342 if (opt_abort) 1343 abort(); 1344 } 1345 } 1346 pages_boot(); 1347 if (base_boot()) 1348 return (true); 1349 if (chunk_boot()) 1350 return (true); 1351 if (ctl_boot()) 1352 return (true); 1353 if (config_prof) 1354 prof_boot1(); 1355 arena_boot(); 1356 if (config_tcache && tcache_boot(TSDN_NULL)) 1357 return (true); 1358 if (malloc_mutex_init(&arenas_lock, "arenas", WITNESS_RANK_ARENAS)) 1359 return (true); 1360 /* 1361 * Create enough scaffolding to allow recursive allocation in 1362 * malloc_ncpus(). 1363 */ 1364 narenas_auto = 1; 1365 narenas_total_set(narenas_auto); 1366 arenas = &a0; 1367 memset(arenas, 0, sizeof(arena_t *) * narenas_auto); 1368 /* 1369 * Initialize one arena here. The rest are lazily created in 1370 * arena_choose_hard(). 1371 */ 1372 if (arena_init(TSDN_NULL, 0) == NULL) 1373 return (true); 1374 1375 malloc_init_state = malloc_init_a0_initialized; 1376 1377 return (false); 1378 } 1379 1380 static bool 1381 malloc_init_hard_a0(void) 1382 { 1383 bool ret; 1384 1385 malloc_mutex_lock(TSDN_NULL, &init_lock); 1386 ret = malloc_init_hard_a0_locked(); 1387 malloc_mutex_unlock(TSDN_NULL, &init_lock); 1388 return (ret); 1389 } 1390 1391 /* Initialize data structures which may trigger recursive allocation. */ 1392 static bool 1393 malloc_init_hard_recursible(void) 1394 { 1395 1396 malloc_init_state = malloc_init_recursible; 1397 1398 ncpus = malloc_ncpus(); 1399 1400 #if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \ 1401 && !defined(_WIN32) && !defined(__native_client__)) 1402 /* LinuxThreads' pthread_atfork() allocates. */ 1403 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, 1404 jemalloc_postfork_child) != 0) { 1405 malloc_write("<jemalloc>: Error in pthread_atfork()\n"); 1406 if (opt_abort) 1407 abort(); 1408 return (true); 1409 } 1410 #endif 1411 1412 return (false); 1413 } 1414 1415 static bool 1416 malloc_init_hard_finish(tsdn_t *tsdn) 1417 { 1418 1419 if (malloc_mutex_boot()) 1420 return (true); 1421 1422 if (opt_narenas == 0) { 1423 /* 1424 * For SMP systems, create more than one arena per CPU by 1425 * default. 1426 */ 1427 if (ncpus > 1) 1428 opt_narenas = ncpus << 2; 1429 else 1430 opt_narenas = 1; 1431 } 1432 narenas_auto = opt_narenas; 1433 /* 1434 * Limit the number of arenas to the indexing range of MALLOCX_ARENA(). 1435 */ 1436 if (narenas_auto > MALLOCX_ARENA_MAX) { 1437 narenas_auto = MALLOCX_ARENA_MAX; 1438 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n", 1439 narenas_auto); 1440 } 1441 narenas_total_set(narenas_auto); 1442 1443 /* Allocate and initialize arenas. */ 1444 arenas = (arena_t **)base_alloc(tsdn, sizeof(arena_t *) * 1445 (MALLOCX_ARENA_MAX+1)); 1446 if (arenas == NULL) 1447 return (true); 1448 /* Copy the pointer to the one arena that was already initialized. */ 1449 arena_set(0, a0); 1450 1451 malloc_init_state = malloc_init_initialized; 1452 malloc_slow_flag_init(); 1453 1454 return (false); 1455 } 1456 1457 static bool 1458 malloc_init_hard(void) 1459 { 1460 tsd_t *tsd; 1461 1462 #if defined(_WIN32) && _WIN32_WINNT < 0x0600 1463 _init_init_lock(); 1464 #endif 1465 malloc_mutex_lock(TSDN_NULL, &init_lock); 1466 if (!malloc_init_hard_needed()) { 1467 malloc_mutex_unlock(TSDN_NULL, &init_lock); 1468 return (false); 1469 } 1470 1471 if (malloc_init_state != malloc_init_a0_initialized && 1472 malloc_init_hard_a0_locked()) { 1473 malloc_mutex_unlock(TSDN_NULL, &init_lock); 1474 return (true); 1475 } 1476 1477 malloc_mutex_unlock(TSDN_NULL, &init_lock); 1478 /* Recursive allocation relies on functional tsd. */ 1479 tsd = malloc_tsd_boot0(); 1480 if (tsd == NULL) 1481 return (true); 1482 if (malloc_init_hard_recursible()) 1483 return (true); 1484 malloc_mutex_lock(tsd_tsdn(tsd), &init_lock); 1485 1486 if (config_prof && prof_boot2(tsd)) { 1487 malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); 1488 return (true); 1489 } 1490 1491 if (malloc_init_hard_finish(tsd_tsdn(tsd))) { 1492 malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); 1493 return (true); 1494 } 1495 1496 malloc_mutex_unlock(tsd_tsdn(tsd), &init_lock); 1497 malloc_tsd_boot1(); 1498 return (false); 1499 } 1500 1501 /* 1502 * End initialization functions. 1503 */ 1504 /******************************************************************************/ 1505 /* 1506 * Begin malloc(3)-compatible functions. 1507 */ 1508 1509 static void * 1510 ialloc_prof_sample(tsd_t *tsd, size_t usize, szind_t ind, bool zero, 1511 prof_tctx_t *tctx, bool slow_path) 1512 { 1513 void *p; 1514 1515 if (tctx == NULL) 1516 return (NULL); 1517 if (usize <= SMALL_MAXCLASS) { 1518 szind_t ind_large = size2index(LARGE_MINCLASS); 1519 p = ialloc(tsd, LARGE_MINCLASS, ind_large, zero, slow_path); 1520 if (p == NULL) 1521 return (NULL); 1522 arena_prof_promoted(tsd_tsdn(tsd), p, usize); 1523 } else 1524 p = ialloc(tsd, usize, ind, zero, slow_path); 1525 1526 return (p); 1527 } 1528 1529 JEMALLOC_ALWAYS_INLINE_C void * 1530 ialloc_prof(tsd_t *tsd, size_t usize, szind_t ind, bool zero, bool slow_path) 1531 { 1532 void *p; 1533 prof_tctx_t *tctx; 1534 1535 tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); 1536 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) 1537 p = ialloc_prof_sample(tsd, usize, ind, zero, tctx, slow_path); 1538 else 1539 p = ialloc(tsd, usize, ind, zero, slow_path); 1540 if (unlikely(p == NULL)) { 1541 prof_alloc_rollback(tsd, tctx, true); 1542 return (NULL); 1543 } 1544 prof_malloc(tsd_tsdn(tsd), p, usize, tctx); 1545 1546 return (p); 1547 } 1548 1549 /* 1550 * ialloc_body() is inlined so that fast and slow paths are generated separately 1551 * with statically known slow_path. 1552 * 1553 * This function guarantees that *tsdn is non-NULL on success. 1554 */ 1555 JEMALLOC_ALWAYS_INLINE_C void * 1556 ialloc_body(size_t size, bool zero, tsdn_t **tsdn, size_t *usize, 1557 bool slow_path) 1558 { 1559 tsd_t *tsd; 1560 szind_t ind; 1561 1562 if (slow_path && unlikely(malloc_init())) { 1563 *tsdn = NULL; 1564 return (NULL); 1565 } 1566 1567 tsd = tsd_fetch(); 1568 *tsdn = tsd_tsdn(tsd); 1569 witness_assert_lockless(tsd_tsdn(tsd)); 1570 1571 ind = size2index(size); 1572 if (unlikely(ind >= NSIZES)) 1573 return (NULL); 1574 1575 if (config_stats || (config_prof && opt_prof) || (slow_path && 1576 config_valgrind && unlikely(in_valgrind))) { 1577 *usize = index2size(ind); 1578 assert(*usize > 0 && *usize <= HUGE_MAXCLASS); 1579 } 1580 1581 if (config_prof && opt_prof) 1582 return (ialloc_prof(tsd, *usize, ind, zero, slow_path)); 1583 1584 return (ialloc(tsd, size, ind, zero, slow_path)); 1585 } 1586 1587 JEMALLOC_ALWAYS_INLINE_C void 1588 ialloc_post_check(void *ret, tsdn_t *tsdn, size_t usize, const char *func, 1589 bool update_errno, bool slow_path) 1590 { 1591 1592 assert(!tsdn_null(tsdn) || ret == NULL); 1593 1594 if (unlikely(ret == NULL)) { 1595 if (slow_path && config_xmalloc && unlikely(opt_xmalloc)) { 1596 malloc_printf("<jemalloc>: Error in %s(): out of " 1597 "memory\n", func); 1598 abort(); 1599 } 1600 if (update_errno) 1601 set_errno(ENOMEM); 1602 } 1603 if (config_stats && likely(ret != NULL)) { 1604 assert(usize == isalloc(tsdn, ret, config_prof)); 1605 *tsd_thread_allocatedp_get(tsdn_tsd(tsdn)) += usize; 1606 } 1607 witness_assert_lockless(tsdn); 1608 } 1609 1610 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 1611 void JEMALLOC_NOTHROW * 1612 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) 1613 je_malloc(size_t size) 1614 { 1615 void *ret; 1616 tsdn_t *tsdn; 1617 size_t usize JEMALLOC_CC_SILENCE_INIT(0); 1618 1619 if (size == 0) 1620 size = 1; 1621 1622 if (likely(!malloc_slow)) { 1623 ret = ialloc_body(size, false, &tsdn, &usize, false); 1624 ialloc_post_check(ret, tsdn, usize, "malloc", true, false); 1625 } else { 1626 ret = ialloc_body(size, false, &tsdn, &usize, true); 1627 ialloc_post_check(ret, tsdn, usize, "malloc", true, true); 1628 UTRACE(0, size, ret); 1629 JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, false); 1630 } 1631 1632 return (ret); 1633 } 1634 1635 static void * 1636 imemalign_prof_sample(tsd_t *tsd, size_t alignment, size_t usize, 1637 prof_tctx_t *tctx) 1638 { 1639 void *p; 1640 1641 if (tctx == NULL) 1642 return (NULL); 1643 if (usize <= SMALL_MAXCLASS) { 1644 assert(sa2u(LARGE_MINCLASS, alignment) == LARGE_MINCLASS); 1645 p = ipalloc(tsd, LARGE_MINCLASS, alignment, false); 1646 if (p == NULL) 1647 return (NULL); 1648 arena_prof_promoted(tsd_tsdn(tsd), p, usize); 1649 } else 1650 p = ipalloc(tsd, usize, alignment, false); 1651 1652 return (p); 1653 } 1654 1655 JEMALLOC_ALWAYS_INLINE_C void * 1656 imemalign_prof(tsd_t *tsd, size_t alignment, size_t usize) 1657 { 1658 void *p; 1659 prof_tctx_t *tctx; 1660 1661 tctx = prof_alloc_prep(tsd, usize, prof_active_get_unlocked(), true); 1662 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) 1663 p = imemalign_prof_sample(tsd, alignment, usize, tctx); 1664 else 1665 p = ipalloc(tsd, usize, alignment, false); 1666 if (unlikely(p == NULL)) { 1667 prof_alloc_rollback(tsd, tctx, true); 1668 return (NULL); 1669 } 1670 prof_malloc(tsd_tsdn(tsd), p, usize, tctx); 1671 1672 return (p); 1673 } 1674 1675 JEMALLOC_ATTR(nonnull(1)) 1676 static int 1677 imemalign(void **memptr, size_t alignment, size_t size, size_t min_alignment) 1678 { 1679 int ret; 1680 tsd_t *tsd; 1681 size_t usize; 1682 void *result; 1683 1684 assert(min_alignment != 0); 1685 1686 if (unlikely(malloc_init())) { 1687 tsd = NULL; 1688 result = NULL; 1689 goto label_oom; 1690 } 1691 tsd = tsd_fetch(); 1692 witness_assert_lockless(tsd_tsdn(tsd)); 1693 if (size == 0) 1694 size = 1; 1695 1696 /* Make sure that alignment is a large enough power of 2. */ 1697 if (unlikely(((alignment - 1) & alignment) != 0 1698 || (alignment < min_alignment))) { 1699 if (config_xmalloc && unlikely(opt_xmalloc)) { 1700 malloc_write("<jemalloc>: Error allocating " 1701 "aligned memory: invalid alignment\n"); 1702 abort(); 1703 } 1704 result = NULL; 1705 ret = EINVAL; 1706 goto label_return; 1707 } 1708 1709 usize = sa2u(size, alignment); 1710 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) { 1711 result = NULL; 1712 goto label_oom; 1713 } 1714 1715 if (config_prof && opt_prof) 1716 result = imemalign_prof(tsd, alignment, usize); 1717 else 1718 result = ipalloc(tsd, usize, alignment, false); 1719 if (unlikely(result == NULL)) 1720 goto label_oom; 1721 assert(((uintptr_t)result & (alignment - 1)) == ZU(0)); 1722 1723 *memptr = result; 1724 ret = 0; 1725 label_return: 1726 if (config_stats && likely(result != NULL)) { 1727 assert(usize == isalloc(tsd_tsdn(tsd), result, config_prof)); 1728 *tsd_thread_allocatedp_get(tsd) += usize; 1729 } 1730 UTRACE(0, size, result); 1731 JEMALLOC_VALGRIND_MALLOC(result != NULL, tsd_tsdn(tsd), result, usize, 1732 false); 1733 witness_assert_lockless(tsd_tsdn(tsd)); 1734 return (ret); 1735 label_oom: 1736 assert(result == NULL); 1737 if (config_xmalloc && unlikely(opt_xmalloc)) { 1738 malloc_write("<jemalloc>: Error allocating aligned memory: " 1739 "out of memory\n"); 1740 abort(); 1741 } 1742 ret = ENOMEM; 1743 witness_assert_lockless(tsd_tsdn(tsd)); 1744 goto label_return; 1745 } 1746 1747 JEMALLOC_EXPORT int JEMALLOC_NOTHROW 1748 JEMALLOC_ATTR(nonnull(1)) 1749 je_posix_memalign(void **memptr, size_t alignment, size_t size) 1750 { 1751 int ret; 1752 1753 ret = imemalign(memptr, alignment, size, sizeof(void *)); 1754 1755 return (ret); 1756 } 1757 1758 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 1759 void JEMALLOC_NOTHROW * 1760 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(2) 1761 je_aligned_alloc(size_t alignment, size_t size) 1762 { 1763 void *ret; 1764 int err; 1765 1766 if (unlikely((err = imemalign(&ret, alignment, size, 1)) != 0)) { 1767 ret = NULL; 1768 set_errno(err); 1769 } 1770 1771 return (ret); 1772 } 1773 1774 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 1775 void JEMALLOC_NOTHROW * 1776 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE2(1, 2) 1777 je_calloc(size_t num, size_t size) 1778 { 1779 void *ret; 1780 tsdn_t *tsdn; 1781 size_t num_size; 1782 size_t usize JEMALLOC_CC_SILENCE_INIT(0); 1783 1784 num_size = num * size; 1785 if (unlikely(num_size == 0)) { 1786 if (num == 0 || size == 0) 1787 num_size = 1; 1788 else 1789 num_size = HUGE_MAXCLASS + 1; /* Trigger OOM. */ 1790 /* 1791 * Try to avoid division here. We know that it isn't possible to 1792 * overflow during multiplication if neither operand uses any of the 1793 * most significant half of the bits in a size_t. 1794 */ 1795 } else if (unlikely(((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 1796 2))) && (num_size / size != num))) 1797 num_size = HUGE_MAXCLASS + 1; /* size_t overflow. */ 1798 1799 if (likely(!malloc_slow)) { 1800 ret = ialloc_body(num_size, true, &tsdn, &usize, false); 1801 ialloc_post_check(ret, tsdn, usize, "calloc", true, false); 1802 } else { 1803 ret = ialloc_body(num_size, true, &tsdn, &usize, true); 1804 ialloc_post_check(ret, tsdn, usize, "calloc", true, true); 1805 UTRACE(0, num_size, ret); 1806 JEMALLOC_VALGRIND_MALLOC(ret != NULL, tsdn, ret, usize, true); 1807 } 1808 1809 return (ret); 1810 } 1811 1812 static void * 1813 irealloc_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize, 1814 prof_tctx_t *tctx) 1815 { 1816 void *p; 1817 1818 if (tctx == NULL) 1819 return (NULL); 1820 if (usize <= SMALL_MAXCLASS) { 1821 p = iralloc(tsd, old_ptr, old_usize, LARGE_MINCLASS, 0, false); 1822 if (p == NULL) 1823 return (NULL); 1824 arena_prof_promoted(tsd_tsdn(tsd), p, usize); 1825 } else 1826 p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); 1827 1828 return (p); 1829 } 1830 1831 JEMALLOC_ALWAYS_INLINE_C void * 1832 irealloc_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t usize) 1833 { 1834 void *p; 1835 bool prof_active; 1836 prof_tctx_t *old_tctx, *tctx; 1837 1838 prof_active = prof_active_get_unlocked(); 1839 old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr); 1840 tctx = prof_alloc_prep(tsd, usize, prof_active, true); 1841 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) 1842 p = irealloc_prof_sample(tsd, old_ptr, old_usize, usize, tctx); 1843 else 1844 p = iralloc(tsd, old_ptr, old_usize, usize, 0, false); 1845 if (unlikely(p == NULL)) { 1846 prof_alloc_rollback(tsd, tctx, true); 1847 return (NULL); 1848 } 1849 prof_realloc(tsd, p, usize, tctx, prof_active, true, old_ptr, old_usize, 1850 old_tctx); 1851 1852 return (p); 1853 } 1854 1855 JEMALLOC_INLINE_C void 1856 ifree(tsd_t *tsd, void *ptr, tcache_t *tcache, bool slow_path) 1857 { 1858 size_t usize; 1859 UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); 1860 1861 witness_assert_lockless(tsd_tsdn(tsd)); 1862 1863 assert(ptr != NULL); 1864 assert(malloc_initialized() || IS_INITIALIZER); 1865 1866 if (config_prof && opt_prof) { 1867 usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); 1868 prof_free(tsd, ptr, usize); 1869 } else if (config_stats || config_valgrind) 1870 usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); 1871 if (config_stats) 1872 *tsd_thread_deallocatedp_get(tsd) += usize; 1873 1874 if (likely(!slow_path)) 1875 iqalloc(tsd, ptr, tcache, false); 1876 else { 1877 if (config_valgrind && unlikely(in_valgrind)) 1878 rzsize = p2rz(tsd_tsdn(tsd), ptr); 1879 iqalloc(tsd, ptr, tcache, true); 1880 JEMALLOC_VALGRIND_FREE(ptr, rzsize); 1881 } 1882 } 1883 1884 JEMALLOC_INLINE_C void 1885 isfree(tsd_t *tsd, void *ptr, size_t usize, tcache_t *tcache, bool slow_path) 1886 { 1887 UNUSED size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); 1888 1889 witness_assert_lockless(tsd_tsdn(tsd)); 1890 1891 assert(ptr != NULL); 1892 assert(malloc_initialized() || IS_INITIALIZER); 1893 1894 if (config_prof && opt_prof) 1895 prof_free(tsd, ptr, usize); 1896 if (config_stats) 1897 *tsd_thread_deallocatedp_get(tsd) += usize; 1898 if (config_valgrind && unlikely(in_valgrind)) 1899 rzsize = p2rz(tsd_tsdn(tsd), ptr); 1900 isqalloc(tsd, ptr, usize, tcache, slow_path); 1901 JEMALLOC_VALGRIND_FREE(ptr, rzsize); 1902 } 1903 1904 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 1905 void JEMALLOC_NOTHROW * 1906 JEMALLOC_ALLOC_SIZE(2) 1907 je_realloc(void *ptr, size_t size) 1908 { 1909 void *ret; 1910 tsdn_t *tsdn JEMALLOC_CC_SILENCE_INIT(NULL); 1911 size_t usize JEMALLOC_CC_SILENCE_INIT(0); 1912 size_t old_usize = 0; 1913 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 1914 1915 if (unlikely(size == 0)) { 1916 if (ptr != NULL) { 1917 tsd_t *tsd; 1918 1919 /* realloc(ptr, 0) is equivalent to free(ptr). */ 1920 UTRACE(ptr, 0, 0); 1921 tsd = tsd_fetch(); 1922 ifree(tsd, ptr, tcache_get(tsd, false), true); 1923 return (NULL); 1924 } 1925 size = 1; 1926 } 1927 1928 if (likely(ptr != NULL)) { 1929 tsd_t *tsd; 1930 1931 assert(malloc_initialized() || IS_INITIALIZER); 1932 malloc_thread_init(); 1933 tsd = tsd_fetch(); 1934 1935 witness_assert_lockless(tsd_tsdn(tsd)); 1936 1937 old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); 1938 if (config_valgrind && unlikely(in_valgrind)) { 1939 old_rzsize = config_prof ? p2rz(tsd_tsdn(tsd), ptr) : 1940 u2rz(old_usize); 1941 } 1942 1943 if (config_prof && opt_prof) { 1944 usize = s2u(size); 1945 ret = unlikely(usize == 0 || usize > HUGE_MAXCLASS) ? 1946 NULL : irealloc_prof(tsd, ptr, old_usize, usize); 1947 } else { 1948 if (config_stats || (config_valgrind && 1949 unlikely(in_valgrind))) 1950 usize = s2u(size); 1951 ret = iralloc(tsd, ptr, old_usize, size, 0, false); 1952 } 1953 tsdn = tsd_tsdn(tsd); 1954 } else { 1955 /* realloc(NULL, size) is equivalent to malloc(size). */ 1956 if (likely(!malloc_slow)) 1957 ret = ialloc_body(size, false, &tsdn, &usize, false); 1958 else 1959 ret = ialloc_body(size, false, &tsdn, &usize, true); 1960 assert(!tsdn_null(tsdn) || ret == NULL); 1961 } 1962 1963 if (unlikely(ret == NULL)) { 1964 if (config_xmalloc && unlikely(opt_xmalloc)) { 1965 malloc_write("<jemalloc>: Error in realloc(): " 1966 "out of memory\n"); 1967 abort(); 1968 } 1969 set_errno(ENOMEM); 1970 } 1971 if (config_stats && likely(ret != NULL)) { 1972 tsd_t *tsd; 1973 1974 assert(usize == isalloc(tsdn, ret, config_prof)); 1975 tsd = tsdn_tsd(tsdn); 1976 *tsd_thread_allocatedp_get(tsd) += usize; 1977 *tsd_thread_deallocatedp_get(tsd) += old_usize; 1978 } 1979 UTRACE(ptr, size, ret); 1980 JEMALLOC_VALGRIND_REALLOC(true, tsdn, ret, usize, true, ptr, old_usize, 1981 old_rzsize, true, false); 1982 witness_assert_lockless(tsdn); 1983 return (ret); 1984 } 1985 1986 JEMALLOC_EXPORT void JEMALLOC_NOTHROW 1987 je_free(void *ptr) 1988 { 1989 1990 UTRACE(ptr, 0, 0); 1991 if (likely(ptr != NULL)) { 1992 tsd_t *tsd = tsd_fetch(); 1993 witness_assert_lockless(tsd_tsdn(tsd)); 1994 if (likely(!malloc_slow)) 1995 ifree(tsd, ptr, tcache_get(tsd, false), false); 1996 else 1997 ifree(tsd, ptr, tcache_get(tsd, false), true); 1998 witness_assert_lockless(tsd_tsdn(tsd)); 1999 } 2000 } 2001 2002 /* 2003 * End malloc(3)-compatible functions. 2004 */ 2005 /******************************************************************************/ 2006 /* 2007 * Begin non-standard override functions. 2008 */ 2009 2010 #ifdef JEMALLOC_OVERRIDE_MEMALIGN 2011 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2012 void JEMALLOC_NOTHROW * 2013 JEMALLOC_ATTR(malloc) 2014 je_memalign(size_t alignment, size_t size) 2015 { 2016 void *ret JEMALLOC_CC_SILENCE_INIT(NULL); 2017 if (unlikely(imemalign(&ret, alignment, size, 1) != 0)) 2018 ret = NULL; 2019 return (ret); 2020 } 2021 #endif 2022 2023 #ifdef JEMALLOC_OVERRIDE_VALLOC 2024 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2025 void JEMALLOC_NOTHROW * 2026 JEMALLOC_ATTR(malloc) 2027 je_valloc(size_t size) 2028 { 2029 void *ret JEMALLOC_CC_SILENCE_INIT(NULL); 2030 if (unlikely(imemalign(&ret, PAGE, size, 1) != 0)) 2031 ret = NULL; 2032 return (ret); 2033 } 2034 #endif 2035 2036 /* 2037 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has 2038 * #define je_malloc malloc 2039 */ 2040 #define malloc_is_malloc 1 2041 #define is_malloc_(a) malloc_is_ ## a 2042 #define is_malloc(a) is_malloc_(a) 2043 2044 #if ((is_malloc(je_malloc) == 1) && defined(JEMALLOC_GLIBC_MALLOC_HOOK)) 2045 /* 2046 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible 2047 * to inconsistently reference libc's malloc(3)-compatible functions 2048 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). 2049 * 2050 * These definitions interpose hooks in glibc. The functions are actually 2051 * passed an extra argument for the caller return address, which will be 2052 * ignored. 2053 */ 2054 JEMALLOC_EXPORT void (*__free_hook)(void *ptr) = je_free; 2055 JEMALLOC_EXPORT void *(*__malloc_hook)(size_t size) = je_malloc; 2056 JEMALLOC_EXPORT void *(*__realloc_hook)(void *ptr, size_t size) = je_realloc; 2057 # ifdef JEMALLOC_GLIBC_MEMALIGN_HOOK 2058 JEMALLOC_EXPORT void *(*__memalign_hook)(size_t alignment, size_t size) = 2059 je_memalign; 2060 # endif 2061 2062 #ifdef CPU_COUNT 2063 /* 2064 * To enable static linking with glibc, the libc specific malloc interface must 2065 * be implemented also, so none of glibc's malloc.o functions are added to the 2066 * link. 2067 */ 2068 #define ALIAS(je_fn) __attribute__((alias (#je_fn), used)) 2069 /* To force macro expansion of je_ prefix before stringification. */ 2070 #define PREALIAS(je_fn) ALIAS(je_fn) 2071 void *__libc_malloc(size_t size) PREALIAS(je_malloc); 2072 void __libc_free(void* ptr) PREALIAS(je_free); 2073 void *__libc_realloc(void* ptr, size_t size) PREALIAS(je_realloc); 2074 void *__libc_calloc(size_t n, size_t size) PREALIAS(je_calloc); 2075 void *__libc_memalign(size_t align, size_t s) PREALIAS(je_memalign); 2076 void *__libc_valloc(size_t size) PREALIAS(je_valloc); 2077 int __posix_memalign(void** r, size_t a, size_t s) 2078 PREALIAS(je_posix_memalign); 2079 #undef PREALIAS 2080 #undef ALIAS 2081 2082 #endif 2083 2084 #endif 2085 2086 /* 2087 * End non-standard override functions. 2088 */ 2089 /******************************************************************************/ 2090 /* 2091 * Begin non-standard functions. 2092 */ 2093 2094 JEMALLOC_ALWAYS_INLINE_C bool 2095 imallocx_flags_decode(tsd_t *tsd, size_t size, int flags, size_t *usize, 2096 size_t *alignment, bool *zero, tcache_t **tcache, arena_t **arena) 2097 { 2098 2099 if ((flags & MALLOCX_LG_ALIGN_MASK) == 0) { 2100 *alignment = 0; 2101 *usize = s2u(size); 2102 } else { 2103 *alignment = MALLOCX_ALIGN_GET_SPECIFIED(flags); 2104 *usize = sa2u(size, *alignment); 2105 } 2106 if (unlikely(*usize == 0 || *usize > HUGE_MAXCLASS)) 2107 return (true); 2108 *zero = MALLOCX_ZERO_GET(flags); 2109 if ((flags & MALLOCX_TCACHE_MASK) != 0) { 2110 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) 2111 *tcache = NULL; 2112 else 2113 *tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2114 } else 2115 *tcache = tcache_get(tsd, true); 2116 if ((flags & MALLOCX_ARENA_MASK) != 0) { 2117 unsigned arena_ind = MALLOCX_ARENA_GET(flags); 2118 *arena = arena_get(tsd_tsdn(tsd), arena_ind, true); 2119 if (unlikely(*arena == NULL)) 2120 return (true); 2121 } else 2122 *arena = NULL; 2123 return (false); 2124 } 2125 2126 JEMALLOC_ALWAYS_INLINE_C void * 2127 imallocx_flags(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, 2128 tcache_t *tcache, arena_t *arena, bool slow_path) 2129 { 2130 szind_t ind; 2131 2132 if (unlikely(alignment != 0)) 2133 return (ipalloct(tsdn, usize, alignment, zero, tcache, arena)); 2134 ind = size2index(usize); 2135 assert(ind < NSIZES); 2136 return (iallocztm(tsdn, usize, ind, zero, tcache, false, arena, 2137 slow_path)); 2138 } 2139 2140 static void * 2141 imallocx_prof_sample(tsdn_t *tsdn, size_t usize, size_t alignment, bool zero, 2142 tcache_t *tcache, arena_t *arena, bool slow_path) 2143 { 2144 void *p; 2145 2146 if (usize <= SMALL_MAXCLASS) { 2147 assert(((alignment == 0) ? s2u(LARGE_MINCLASS) : 2148 sa2u(LARGE_MINCLASS, alignment)) == LARGE_MINCLASS); 2149 p = imallocx_flags(tsdn, LARGE_MINCLASS, alignment, zero, 2150 tcache, arena, slow_path); 2151 if (p == NULL) 2152 return (NULL); 2153 arena_prof_promoted(tsdn, p, usize); 2154 } else { 2155 p = imallocx_flags(tsdn, usize, alignment, zero, tcache, arena, 2156 slow_path); 2157 } 2158 2159 return (p); 2160 } 2161 2162 JEMALLOC_ALWAYS_INLINE_C void * 2163 imallocx_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, bool slow_path) 2164 { 2165 void *p; 2166 size_t alignment; 2167 bool zero; 2168 tcache_t *tcache; 2169 arena_t *arena; 2170 prof_tctx_t *tctx; 2171 2172 if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment, 2173 &zero, &tcache, &arena))) 2174 return (NULL); 2175 tctx = prof_alloc_prep(tsd, *usize, prof_active_get_unlocked(), true); 2176 if (likely((uintptr_t)tctx == (uintptr_t)1U)) { 2177 p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, 2178 tcache, arena, slow_path); 2179 } else if ((uintptr_t)tctx > (uintptr_t)1U) { 2180 p = imallocx_prof_sample(tsd_tsdn(tsd), *usize, alignment, zero, 2181 tcache, arena, slow_path); 2182 } else 2183 p = NULL; 2184 if (unlikely(p == NULL)) { 2185 prof_alloc_rollback(tsd, tctx, true); 2186 return (NULL); 2187 } 2188 prof_malloc(tsd_tsdn(tsd), p, *usize, tctx); 2189 2190 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); 2191 return (p); 2192 } 2193 2194 JEMALLOC_ALWAYS_INLINE_C void * 2195 imallocx_no_prof(tsd_t *tsd, size_t size, int flags, size_t *usize, 2196 bool slow_path) 2197 { 2198 void *p; 2199 size_t alignment; 2200 bool zero; 2201 tcache_t *tcache; 2202 arena_t *arena; 2203 2204 if (unlikely(imallocx_flags_decode(tsd, size, flags, usize, &alignment, 2205 &zero, &tcache, &arena))) 2206 return (NULL); 2207 p = imallocx_flags(tsd_tsdn(tsd), *usize, alignment, zero, tcache, 2208 arena, slow_path); 2209 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); 2210 return (p); 2211 } 2212 2213 /* This function guarantees that *tsdn is non-NULL on success. */ 2214 JEMALLOC_ALWAYS_INLINE_C void * 2215 imallocx_body(size_t size, int flags, tsdn_t **tsdn, size_t *usize, 2216 bool slow_path) 2217 { 2218 tsd_t *tsd; 2219 2220 if (slow_path && unlikely(malloc_init())) { 2221 *tsdn = NULL; 2222 return (NULL); 2223 } 2224 2225 tsd = tsd_fetch(); 2226 *tsdn = tsd_tsdn(tsd); 2227 witness_assert_lockless(tsd_tsdn(tsd)); 2228 2229 if (likely(flags == 0)) { 2230 szind_t ind = size2index(size); 2231 if (unlikely(ind >= NSIZES)) 2232 return (NULL); 2233 if (config_stats || (config_prof && opt_prof) || (slow_path && 2234 config_valgrind && unlikely(in_valgrind))) { 2235 *usize = index2size(ind); 2236 assert(*usize > 0 && *usize <= HUGE_MAXCLASS); 2237 } 2238 2239 if (config_prof && opt_prof) { 2240 return (ialloc_prof(tsd, *usize, ind, false, 2241 slow_path)); 2242 } 2243 2244 return (ialloc(tsd, size, ind, false, slow_path)); 2245 } 2246 2247 if (config_prof && opt_prof) 2248 return (imallocx_prof(tsd, size, flags, usize, slow_path)); 2249 2250 return (imallocx_no_prof(tsd, size, flags, usize, slow_path)); 2251 } 2252 2253 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2254 void JEMALLOC_NOTHROW * 2255 JEMALLOC_ATTR(malloc) JEMALLOC_ALLOC_SIZE(1) 2256 je_mallocx(size_t size, int flags) 2257 { 2258 tsdn_t *tsdn; 2259 void *p; 2260 size_t usize; 2261 2262 assert(size != 0); 2263 2264 if (likely(!malloc_slow)) { 2265 p = imallocx_body(size, flags, &tsdn, &usize, false); 2266 ialloc_post_check(p, tsdn, usize, "mallocx", false, false); 2267 } else { 2268 p = imallocx_body(size, flags, &tsdn, &usize, true); 2269 ialloc_post_check(p, tsdn, usize, "mallocx", false, true); 2270 UTRACE(0, size, p); 2271 JEMALLOC_VALGRIND_MALLOC(p != NULL, tsdn, p, usize, 2272 MALLOCX_ZERO_GET(flags)); 2273 } 2274 2275 return (p); 2276 } 2277 2278 static void * 2279 irallocx_prof_sample(tsd_t *tsd, void *old_ptr, size_t old_usize, 2280 size_t usize, size_t alignment, bool zero, tcache_t *tcache, arena_t *arena, 2281 prof_tctx_t *tctx) 2282 { 2283 void *p; 2284 2285 if (tctx == NULL) 2286 return (NULL); 2287 if (usize <= SMALL_MAXCLASS) { 2288 p = iralloct(tsd, old_ptr, old_usize, LARGE_MINCLASS, alignment, 2289 zero, tcache, arena); 2290 if (p == NULL) 2291 return (NULL); 2292 arena_prof_promoted(tsd_tsdn(tsd), p, usize); 2293 } else { 2294 p = iralloct(tsd, old_ptr, old_usize, usize, alignment, zero, 2295 tcache, arena); 2296 } 2297 2298 return (p); 2299 } 2300 2301 JEMALLOC_ALWAYS_INLINE_C void * 2302 irallocx_prof(tsd_t *tsd, void *old_ptr, size_t old_usize, size_t size, 2303 size_t alignment, size_t *usize, bool zero, tcache_t *tcache, 2304 arena_t *arena) 2305 { 2306 void *p; 2307 bool prof_active; 2308 prof_tctx_t *old_tctx, *tctx; 2309 2310 prof_active = prof_active_get_unlocked(); 2311 old_tctx = prof_tctx_get(tsd_tsdn(tsd), old_ptr); 2312 tctx = prof_alloc_prep(tsd, *usize, prof_active, false); 2313 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { 2314 p = irallocx_prof_sample(tsd, old_ptr, old_usize, *usize, 2315 alignment, zero, tcache, arena, tctx); 2316 } else { 2317 p = iralloct(tsd, old_ptr, old_usize, size, alignment, zero, 2318 tcache, arena); 2319 } 2320 if (unlikely(p == NULL)) { 2321 prof_alloc_rollback(tsd, tctx, false); 2322 return (NULL); 2323 } 2324 2325 if (p == old_ptr && alignment != 0) { 2326 /* 2327 * The allocation did not move, so it is possible that the size 2328 * class is smaller than would guarantee the requested 2329 * alignment, and that the alignment constraint was 2330 * serendipitously satisfied. Additionally, old_usize may not 2331 * be the same as the current usize because of in-place large 2332 * reallocation. Therefore, query the actual value of usize. 2333 */ 2334 *usize = isalloc(tsd_tsdn(tsd), p, config_prof); 2335 } 2336 prof_realloc(tsd, p, *usize, tctx, prof_active, false, old_ptr, 2337 old_usize, old_tctx); 2338 2339 return (p); 2340 } 2341 2342 JEMALLOC_EXPORT JEMALLOC_ALLOCATOR JEMALLOC_RESTRICT_RETURN 2343 void JEMALLOC_NOTHROW * 2344 JEMALLOC_ALLOC_SIZE(2) 2345 je_rallocx(void *ptr, size_t size, int flags) 2346 { 2347 void *p; 2348 tsd_t *tsd; 2349 size_t usize; 2350 size_t old_usize; 2351 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 2352 size_t alignment = MALLOCX_ALIGN_GET(flags); 2353 bool zero = flags & MALLOCX_ZERO; 2354 arena_t *arena; 2355 tcache_t *tcache; 2356 2357 assert(ptr != NULL); 2358 assert(size != 0); 2359 assert(malloc_initialized() || IS_INITIALIZER); 2360 malloc_thread_init(); 2361 tsd = tsd_fetch(); 2362 witness_assert_lockless(tsd_tsdn(tsd)); 2363 2364 if (unlikely((flags & MALLOCX_ARENA_MASK) != 0)) { 2365 unsigned arena_ind = MALLOCX_ARENA_GET(flags); 2366 arena = arena_get(tsd_tsdn(tsd), arena_ind, true); 2367 if (unlikely(arena == NULL)) 2368 goto label_oom; 2369 } else 2370 arena = NULL; 2371 2372 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { 2373 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) 2374 tcache = NULL; 2375 else 2376 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2377 } else 2378 tcache = tcache_get(tsd, true); 2379 2380 old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); 2381 if (config_valgrind && unlikely(in_valgrind)) 2382 old_rzsize = u2rz(old_usize); 2383 2384 if (config_prof && opt_prof) { 2385 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); 2386 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) 2387 goto label_oom; 2388 p = irallocx_prof(tsd, ptr, old_usize, size, alignment, &usize, 2389 zero, tcache, arena); 2390 if (unlikely(p == NULL)) 2391 goto label_oom; 2392 } else { 2393 p = iralloct(tsd, ptr, old_usize, size, alignment, zero, 2394 tcache, arena); 2395 if (unlikely(p == NULL)) 2396 goto label_oom; 2397 if (config_stats || (config_valgrind && unlikely(in_valgrind))) 2398 usize = isalloc(tsd_tsdn(tsd), p, config_prof); 2399 } 2400 assert(alignment == 0 || ((uintptr_t)p & (alignment - 1)) == ZU(0)); 2401 2402 if (config_stats) { 2403 *tsd_thread_allocatedp_get(tsd) += usize; 2404 *tsd_thread_deallocatedp_get(tsd) += old_usize; 2405 } 2406 UTRACE(ptr, size, p); 2407 JEMALLOC_VALGRIND_REALLOC(true, tsd_tsdn(tsd), p, usize, false, ptr, 2408 old_usize, old_rzsize, false, zero); 2409 witness_assert_lockless(tsd_tsdn(tsd)); 2410 return (p); 2411 label_oom: 2412 if (config_xmalloc && unlikely(opt_xmalloc)) { 2413 malloc_write("<jemalloc>: Error in rallocx(): out of memory\n"); 2414 abort(); 2415 } 2416 UTRACE(ptr, size, 0); 2417 witness_assert_lockless(tsd_tsdn(tsd)); 2418 return (NULL); 2419 } 2420 2421 JEMALLOC_ALWAYS_INLINE_C size_t 2422 ixallocx_helper(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, 2423 size_t extra, size_t alignment, bool zero) 2424 { 2425 size_t usize; 2426 2427 if (ixalloc(tsdn, ptr, old_usize, size, extra, alignment, zero)) 2428 return (old_usize); 2429 usize = isalloc(tsdn, ptr, config_prof); 2430 2431 return (usize); 2432 } 2433 2434 static size_t 2435 ixallocx_prof_sample(tsdn_t *tsdn, void *ptr, size_t old_usize, size_t size, 2436 size_t extra, size_t alignment, bool zero, prof_tctx_t *tctx) 2437 { 2438 size_t usize; 2439 2440 if (tctx == NULL) 2441 return (old_usize); 2442 usize = ixallocx_helper(tsdn, ptr, old_usize, size, extra, alignment, 2443 zero); 2444 2445 return (usize); 2446 } 2447 2448 JEMALLOC_ALWAYS_INLINE_C size_t 2449 ixallocx_prof(tsd_t *tsd, void *ptr, size_t old_usize, size_t size, 2450 size_t extra, size_t alignment, bool zero) 2451 { 2452 size_t usize_max, usize; 2453 bool prof_active; 2454 prof_tctx_t *old_tctx, *tctx; 2455 2456 prof_active = prof_active_get_unlocked(); 2457 old_tctx = prof_tctx_get(tsd_tsdn(tsd), ptr); 2458 /* 2459 * usize isn't knowable before ixalloc() returns when extra is non-zero. 2460 * Therefore, compute its maximum possible value and use that in 2461 * prof_alloc_prep() to decide whether to capture a backtrace. 2462 * prof_realloc() will use the actual usize to decide whether to sample. 2463 */ 2464 if (alignment == 0) { 2465 usize_max = s2u(size+extra); 2466 assert(usize_max > 0 && usize_max <= HUGE_MAXCLASS); 2467 } else { 2468 usize_max = sa2u(size+extra, alignment); 2469 if (unlikely(usize_max == 0 || usize_max > HUGE_MAXCLASS)) { 2470 /* 2471 * usize_max is out of range, and chances are that 2472 * allocation will fail, but use the maximum possible 2473 * value and carry on with prof_alloc_prep(), just in 2474 * case allocation succeeds. 2475 */ 2476 usize_max = HUGE_MAXCLASS; 2477 } 2478 } 2479 tctx = prof_alloc_prep(tsd, usize_max, prof_active, false); 2480 2481 if (unlikely((uintptr_t)tctx != (uintptr_t)1U)) { 2482 usize = ixallocx_prof_sample(tsd_tsdn(tsd), ptr, old_usize, 2483 size, extra, alignment, zero, tctx); 2484 } else { 2485 usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, 2486 extra, alignment, zero); 2487 } 2488 if (usize == old_usize) { 2489 prof_alloc_rollback(tsd, tctx, false); 2490 return (usize); 2491 } 2492 prof_realloc(tsd, ptr, usize, tctx, prof_active, false, ptr, old_usize, 2493 old_tctx); 2494 2495 return (usize); 2496 } 2497 2498 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2499 je_xallocx(void *ptr, size_t size, size_t extra, int flags) 2500 { 2501 tsd_t *tsd; 2502 size_t usize, old_usize; 2503 UNUSED size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 2504 size_t alignment = MALLOCX_ALIGN_GET(flags); 2505 bool zero = flags & MALLOCX_ZERO; 2506 2507 assert(ptr != NULL); 2508 assert(size != 0); 2509 assert(SIZE_T_MAX - size >= extra); 2510 assert(malloc_initialized() || IS_INITIALIZER); 2511 malloc_thread_init(); 2512 tsd = tsd_fetch(); 2513 witness_assert_lockless(tsd_tsdn(tsd)); 2514 2515 old_usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); 2516 2517 /* 2518 * The API explicitly absolves itself of protecting against (size + 2519 * extra) numerical overflow, but we may need to clamp extra to avoid 2520 * exceeding HUGE_MAXCLASS. 2521 * 2522 * Ordinarily, size limit checking is handled deeper down, but here we 2523 * have to check as part of (size + extra) clamping, since we need the 2524 * clamped value in the above helper functions. 2525 */ 2526 if (unlikely(size > HUGE_MAXCLASS)) { 2527 usize = old_usize; 2528 goto label_not_resized; 2529 } 2530 if (unlikely(HUGE_MAXCLASS - size < extra)) 2531 extra = HUGE_MAXCLASS - size; 2532 2533 if (config_valgrind && unlikely(in_valgrind)) 2534 old_rzsize = u2rz(old_usize); 2535 2536 if (config_prof && opt_prof) { 2537 usize = ixallocx_prof(tsd, ptr, old_usize, size, extra, 2538 alignment, zero); 2539 } else { 2540 usize = ixallocx_helper(tsd_tsdn(tsd), ptr, old_usize, size, 2541 extra, alignment, zero); 2542 } 2543 if (unlikely(usize == old_usize)) 2544 goto label_not_resized; 2545 2546 if (config_stats) { 2547 *tsd_thread_allocatedp_get(tsd) += usize; 2548 *tsd_thread_deallocatedp_get(tsd) += old_usize; 2549 } 2550 JEMALLOC_VALGRIND_REALLOC(false, tsd_tsdn(tsd), ptr, usize, false, ptr, 2551 old_usize, old_rzsize, false, zero); 2552 label_not_resized: 2553 UTRACE(ptr, size, ptr); 2554 witness_assert_lockless(tsd_tsdn(tsd)); 2555 return (usize); 2556 } 2557 2558 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2559 JEMALLOC_ATTR(pure) 2560 je_sallocx(const void *ptr, int flags) 2561 { 2562 size_t usize; 2563 tsdn_t *tsdn; 2564 2565 assert(malloc_initialized() || IS_INITIALIZER); 2566 malloc_thread_init(); 2567 2568 tsdn = tsdn_fetch(); 2569 witness_assert_lockless(tsdn); 2570 2571 if (config_ivsalloc) 2572 usize = ivsalloc(tsdn, ptr, config_prof); 2573 else 2574 usize = isalloc(tsdn, ptr, config_prof); 2575 2576 witness_assert_lockless(tsdn); 2577 return (usize); 2578 } 2579 2580 JEMALLOC_EXPORT void JEMALLOC_NOTHROW 2581 je_dallocx(void *ptr, int flags) 2582 { 2583 tsd_t *tsd; 2584 tcache_t *tcache; 2585 2586 assert(ptr != NULL); 2587 assert(malloc_initialized() || IS_INITIALIZER); 2588 2589 tsd = tsd_fetch(); 2590 witness_assert_lockless(tsd_tsdn(tsd)); 2591 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { 2592 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) 2593 tcache = NULL; 2594 else 2595 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2596 } else 2597 tcache = tcache_get(tsd, false); 2598 2599 UTRACE(ptr, 0, 0); 2600 if (likely(!malloc_slow)) 2601 ifree(tsd, ptr, tcache, false); 2602 else 2603 ifree(tsd, ptr, tcache, true); 2604 witness_assert_lockless(tsd_tsdn(tsd)); 2605 } 2606 2607 JEMALLOC_ALWAYS_INLINE_C size_t 2608 inallocx(tsdn_t *tsdn, size_t size, int flags) 2609 { 2610 size_t usize; 2611 2612 witness_assert_lockless(tsdn); 2613 2614 if (likely((flags & MALLOCX_LG_ALIGN_MASK) == 0)) 2615 usize = s2u(size); 2616 else 2617 usize = sa2u(size, MALLOCX_ALIGN_GET_SPECIFIED(flags)); 2618 witness_assert_lockless(tsdn); 2619 return (usize); 2620 } 2621 2622 JEMALLOC_EXPORT void JEMALLOC_NOTHROW 2623 je_sdallocx(void *ptr, size_t size, int flags) 2624 { 2625 tsd_t *tsd; 2626 tcache_t *tcache; 2627 size_t usize; 2628 2629 assert(ptr != NULL); 2630 assert(malloc_initialized() || IS_INITIALIZER); 2631 tsd = tsd_fetch(); 2632 usize = inallocx(tsd_tsdn(tsd), size, flags); 2633 assert(usize == isalloc(tsd_tsdn(tsd), ptr, config_prof)); 2634 2635 witness_assert_lockless(tsd_tsdn(tsd)); 2636 if (unlikely((flags & MALLOCX_TCACHE_MASK) != 0)) { 2637 if ((flags & MALLOCX_TCACHE_MASK) == MALLOCX_TCACHE_NONE) 2638 tcache = NULL; 2639 else 2640 tcache = tcaches_get(tsd, MALLOCX_TCACHE_GET(flags)); 2641 } else 2642 tcache = tcache_get(tsd, false); 2643 2644 UTRACE(ptr, 0, 0); 2645 if (likely(!malloc_slow)) 2646 isfree(tsd, ptr, usize, tcache, false); 2647 else 2648 isfree(tsd, ptr, usize, tcache, true); 2649 witness_assert_lockless(tsd_tsdn(tsd)); 2650 } 2651 2652 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2653 JEMALLOC_ATTR(pure) 2654 je_nallocx(size_t size, int flags) 2655 { 2656 size_t usize; 2657 tsdn_t *tsdn; 2658 2659 assert(size != 0); 2660 2661 if (unlikely(malloc_init())) 2662 return (0); 2663 2664 tsdn = tsdn_fetch(); 2665 witness_assert_lockless(tsdn); 2666 2667 usize = inallocx(tsdn, size, flags); 2668 if (unlikely(usize > HUGE_MAXCLASS)) 2669 return (0); 2670 2671 witness_assert_lockless(tsdn); 2672 return (usize); 2673 } 2674 2675 JEMALLOC_EXPORT int JEMALLOC_NOTHROW 2676 je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, 2677 size_t newlen) 2678 { 2679 int ret; 2680 tsd_t *tsd; 2681 2682 if (unlikely(malloc_init())) 2683 return (EAGAIN); 2684 2685 tsd = tsd_fetch(); 2686 witness_assert_lockless(tsd_tsdn(tsd)); 2687 ret = ctl_byname(tsd, name, oldp, oldlenp, newp, newlen); 2688 witness_assert_lockless(tsd_tsdn(tsd)); 2689 return (ret); 2690 } 2691 2692 JEMALLOC_EXPORT int JEMALLOC_NOTHROW 2693 je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) 2694 { 2695 int ret; 2696 tsdn_t *tsdn; 2697 2698 if (unlikely(malloc_init())) 2699 return (EAGAIN); 2700 2701 tsdn = tsdn_fetch(); 2702 witness_assert_lockless(tsdn); 2703 ret = ctl_nametomib(tsdn, name, mibp, miblenp); 2704 witness_assert_lockless(tsdn); 2705 return (ret); 2706 } 2707 2708 JEMALLOC_EXPORT int JEMALLOC_NOTHROW 2709 je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, 2710 void *newp, size_t newlen) 2711 { 2712 int ret; 2713 tsd_t *tsd; 2714 2715 if (unlikely(malloc_init())) 2716 return (EAGAIN); 2717 2718 tsd = tsd_fetch(); 2719 witness_assert_lockless(tsd_tsdn(tsd)); 2720 ret = ctl_bymib(tsd, mib, miblen, oldp, oldlenp, newp, newlen); 2721 witness_assert_lockless(tsd_tsdn(tsd)); 2722 return (ret); 2723 } 2724 2725 JEMALLOC_EXPORT void JEMALLOC_NOTHROW 2726 je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, 2727 const char *opts) 2728 { 2729 tsdn_t *tsdn; 2730 2731 tsdn = tsdn_fetch(); 2732 witness_assert_lockless(tsdn); 2733 stats_print(write_cb, cbopaque, opts); 2734 witness_assert_lockless(tsdn); 2735 } 2736 2737 JEMALLOC_EXPORT size_t JEMALLOC_NOTHROW 2738 je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) 2739 { 2740 size_t ret; 2741 tsdn_t *tsdn; 2742 2743 assert(malloc_initialized() || IS_INITIALIZER); 2744 malloc_thread_init(); 2745 2746 tsdn = tsdn_fetch(); 2747 witness_assert_lockless(tsdn); 2748 2749 if (config_ivsalloc) 2750 ret = ivsalloc(tsdn, ptr, config_prof); 2751 else 2752 ret = (ptr == NULL) ? 0 : isalloc(tsdn, ptr, config_prof); 2753 2754 witness_assert_lockless(tsdn); 2755 return (ret); 2756 } 2757 2758 /* 2759 * End non-standard functions. 2760 */ 2761 /******************************************************************************/ 2762 /* 2763 * Begin compatibility functions. 2764 */ 2765 2766 #define ALLOCM_LG_ALIGN(la) (la) 2767 #define ALLOCM_ALIGN(a) (ffsl(a)-1) 2768 #define ALLOCM_ZERO ((int)0x40) 2769 #define ALLOCM_NO_MOVE ((int)0x80) 2770 2771 #define ALLOCM_SUCCESS 0 2772 #define ALLOCM_ERR_OOM 1 2773 #define ALLOCM_ERR_NOT_MOVED 2 2774 2775 int 2776 je_allocm(void **ptr, size_t *rsize, size_t size, int flags) 2777 { 2778 void *p; 2779 2780 assert(ptr != NULL); 2781 2782 p = je_mallocx(size, flags); 2783 if (p == NULL) 2784 return (ALLOCM_ERR_OOM); 2785 if (rsize != NULL) 2786 *rsize = isalloc(tsdn_fetch(), p, config_prof); 2787 *ptr = p; 2788 return (ALLOCM_SUCCESS); 2789 } 2790 2791 int 2792 je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) 2793 { 2794 int ret; 2795 bool no_move = flags & ALLOCM_NO_MOVE; 2796 2797 assert(ptr != NULL); 2798 assert(*ptr != NULL); 2799 assert(size != 0); 2800 assert(SIZE_T_MAX - size >= extra); 2801 2802 if (no_move) { 2803 size_t usize = je_xallocx(*ptr, size, extra, flags); 2804 ret = (usize >= size) ? ALLOCM_SUCCESS : ALLOCM_ERR_NOT_MOVED; 2805 if (rsize != NULL) 2806 *rsize = usize; 2807 } else { 2808 void *p = je_rallocx(*ptr, size+extra, flags); 2809 if (p != NULL) { 2810 *ptr = p; 2811 ret = ALLOCM_SUCCESS; 2812 } else 2813 ret = ALLOCM_ERR_OOM; 2814 if (rsize != NULL) 2815 *rsize = isalloc(tsdn_fetch(), *ptr, config_prof); 2816 } 2817 return (ret); 2818 } 2819 2820 int 2821 je_sallocm(const void *ptr, size_t *rsize, int flags) 2822 { 2823 2824 assert(rsize != NULL); 2825 *rsize = je_sallocx(ptr, flags); 2826 return (ALLOCM_SUCCESS); 2827 } 2828 2829 int 2830 je_dallocm(void *ptr, int flags) 2831 { 2832 2833 je_dallocx(ptr, flags); 2834 return (ALLOCM_SUCCESS); 2835 } 2836 2837 int 2838 je_nallocm(size_t *rsize, size_t size, int flags) 2839 { 2840 size_t usize; 2841 2842 usize = je_nallocx(size, flags); 2843 if (usize == 0) 2844 return (ALLOCM_ERR_OOM); 2845 if (rsize != NULL) 2846 *rsize = usize; 2847 return (ALLOCM_SUCCESS); 2848 } 2849 2850 #undef ALLOCM_LG_ALIGN 2851 #undef ALLOCM_ALIGN 2852 #undef ALLOCM_ZERO 2853 #undef ALLOCM_NO_MOVE 2854 2855 #undef ALLOCM_SUCCESS 2856 #undef ALLOCM_ERR_OOM 2857 #undef ALLOCM_ERR_NOT_MOVED 2858 2859 /* 2860 * End compatibility functions. 2861 */ 2862 /******************************************************************************/ 2863 /* 2864 * The following functions are used by threading libraries for protection of 2865 * malloc during fork(). 2866 */ 2867 2868 /* 2869 * If an application creates a thread before doing any allocation in the main 2870 * thread, then calls fork(2) in the main thread followed by memory allocation 2871 * in the child process, a race can occur that results in deadlock within the 2872 * child: the main thread may have forked while the created thread had 2873 * partially initialized the allocator. Ordinarily jemalloc prevents 2874 * fork/malloc races via the following functions it registers during 2875 * initialization using pthread_atfork(), but of course that does no good if 2876 * the allocator isn't fully initialized at fork time. The following library 2877 * constructor is a partial solution to this problem. It may still be possible 2878 * to trigger the deadlock described above, but doing so would involve forking 2879 * via a library constructor that runs before jemalloc's runs. 2880 */ 2881 #ifndef JEMALLOC_JET 2882 JEMALLOC_ATTR(constructor) 2883 static void 2884 jemalloc_constructor(void) 2885 { 2886 2887 malloc_init(); 2888 } 2889 #endif 2890 2891 #ifndef JEMALLOC_MUTEX_INIT_CB 2892 void 2893 jemalloc_prefork(void) 2894 #else 2895 JEMALLOC_EXPORT void 2896 _malloc_prefork(void) 2897 #endif 2898 { 2899 tsd_t *tsd; 2900 unsigned i, j, narenas; 2901 arena_t *arena; 2902 2903 #ifdef JEMALLOC_MUTEX_INIT_CB 2904 if (!malloc_initialized()) 2905 return; 2906 #endif 2907 assert(malloc_initialized()); 2908 2909 tsd = tsd_fetch(); 2910 2911 narenas = narenas_total_get(); 2912 2913 witness_prefork(tsd); 2914 /* Acquire all mutexes in a safe order. */ 2915 ctl_prefork(tsd_tsdn(tsd)); 2916 malloc_mutex_prefork(tsd_tsdn(tsd), &arenas_lock); 2917 prof_prefork0(tsd_tsdn(tsd)); 2918 for (i = 0; i < 3; i++) { 2919 for (j = 0; j < narenas; j++) { 2920 if ((arena = arena_get(tsd_tsdn(tsd), j, false)) != 2921 NULL) { 2922 switch (i) { 2923 case 0: 2924 arena_prefork0(tsd_tsdn(tsd), arena); 2925 break; 2926 case 1: 2927 arena_prefork1(tsd_tsdn(tsd), arena); 2928 break; 2929 case 2: 2930 arena_prefork2(tsd_tsdn(tsd), arena); 2931 break; 2932 default: not_reached(); 2933 } 2934 } 2935 } 2936 } 2937 base_prefork(tsd_tsdn(tsd)); 2938 for (i = 0; i < narenas; i++) { 2939 if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) 2940 arena_prefork3(tsd_tsdn(tsd), arena); 2941 } 2942 prof_prefork1(tsd_tsdn(tsd)); 2943 } 2944 2945 #ifndef JEMALLOC_MUTEX_INIT_CB 2946 void 2947 jemalloc_postfork_parent(void) 2948 #else 2949 JEMALLOC_EXPORT void 2950 _malloc_postfork(void) 2951 #endif 2952 { 2953 tsd_t *tsd; 2954 unsigned i, narenas; 2955 2956 #ifdef JEMALLOC_MUTEX_INIT_CB 2957 if (!malloc_initialized()) 2958 return; 2959 #endif 2960 assert(malloc_initialized()); 2961 2962 tsd = tsd_fetch(); 2963 2964 witness_postfork_parent(tsd); 2965 /* Release all mutexes, now that fork() has completed. */ 2966 base_postfork_parent(tsd_tsdn(tsd)); 2967 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 2968 arena_t *arena; 2969 2970 if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) 2971 arena_postfork_parent(tsd_tsdn(tsd), arena); 2972 } 2973 prof_postfork_parent(tsd_tsdn(tsd)); 2974 malloc_mutex_postfork_parent(tsd_tsdn(tsd), &arenas_lock); 2975 ctl_postfork_parent(tsd_tsdn(tsd)); 2976 } 2977 2978 void 2979 jemalloc_postfork_child(void) 2980 { 2981 tsd_t *tsd; 2982 unsigned i, narenas; 2983 2984 assert(malloc_initialized()); 2985 2986 tsd = tsd_fetch(); 2987 2988 witness_postfork_child(tsd); 2989 /* Release all mutexes, now that fork() has completed. */ 2990 base_postfork_child(tsd_tsdn(tsd)); 2991 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 2992 arena_t *arena; 2993 2994 if ((arena = arena_get(tsd_tsdn(tsd), i, false)) != NULL) 2995 arena_postfork_child(tsd_tsdn(tsd), arena); 2996 } 2997 prof_postfork_child(tsd_tsdn(tsd)); 2998 malloc_mutex_postfork_child(tsd_tsdn(tsd), &arenas_lock); 2999 ctl_postfork_child(tsd_tsdn(tsd)); 3000 } 3001 3002 void 3003 _malloc_first_thread(void) 3004 { 3005 3006 (void)malloc_mutex_first_thread(); 3007 } 3008 3009 /******************************************************************************/ 3010