1 #define JEMALLOC_C_ 2 #include "jemalloc/internal/jemalloc_internal.h" 3 4 /******************************************************************************/ 5 /* Data. */ 6 7 malloc_tsd_data(, arenas, arena_t *, NULL) 8 malloc_tsd_data(, thread_allocated, thread_allocated_t, 9 THREAD_ALLOCATED_INITIALIZER) 10 11 /* Work around <http://llvm.org/bugs/show_bug.cgi?id=12623>: */ 12 const char *__malloc_options_1_0 = NULL; 13 __sym_compat(_malloc_options, __malloc_options_1_0, FBSD_1.0); 14 15 /* Runtime configuration options. */ 16 const char *je_malloc_conf; 17 #ifdef JEMALLOC_DEBUG 18 bool opt_abort = true; 19 # ifdef JEMALLOC_FILL 20 bool opt_junk = true; 21 # else 22 bool opt_junk = false; 23 # endif 24 #else 25 bool opt_abort = false; 26 bool opt_junk = false; 27 #endif 28 size_t opt_quarantine = ZU(0); 29 bool opt_redzone = false; 30 bool opt_utrace = false; 31 bool opt_valgrind = false; 32 bool opt_xmalloc = false; 33 bool opt_zero = false; 34 size_t opt_narenas = 0; 35 36 unsigned ncpus; 37 38 malloc_mutex_t arenas_lock; 39 arena_t **arenas; 40 unsigned narenas_total; 41 unsigned narenas_auto; 42 43 /* Set to true once the allocator has been initialized. */ 44 static bool malloc_initialized = false; 45 46 #ifdef JEMALLOC_THREADED_INIT 47 /* Used to let the initializing thread recursively allocate. */ 48 # define NO_INITIALIZER ((unsigned long)0) 49 # define INITIALIZER pthread_self() 50 # define IS_INITIALIZER (malloc_initializer == pthread_self()) 51 static pthread_t malloc_initializer = NO_INITIALIZER; 52 #else 53 # define NO_INITIALIZER false 54 # define INITIALIZER true 55 # define IS_INITIALIZER malloc_initializer 56 static bool malloc_initializer = NO_INITIALIZER; 57 #endif 58 59 /* Used to avoid initialization races. */ 60 #ifdef _WIN32 61 static malloc_mutex_t init_lock; 62 63 JEMALLOC_ATTR(constructor) 64 static void WINAPI 65 _init_init_lock(void) 66 { 67 68 malloc_mutex_init(&init_lock); 69 } 70 71 #ifdef _MSC_VER 72 # pragma section(".CRT$XCU", read) 73 JEMALLOC_SECTION(".CRT$XCU") JEMALLOC_ATTR(used) 74 static const void (WINAPI *init_init_lock)(void) = _init_init_lock; 75 #endif 76 77 #else 78 static malloc_mutex_t init_lock = MALLOC_MUTEX_INITIALIZER; 79 #endif 80 81 typedef struct { 82 void *p; /* Input pointer (as in realloc(p, s)). */ 83 size_t s; /* Request size. */ 84 void *r; /* Result pointer. */ 85 } malloc_utrace_t; 86 87 #ifdef JEMALLOC_UTRACE 88 # define UTRACE(a, b, c) do { \ 89 if (opt_utrace) { \ 90 malloc_utrace_t ut; \ 91 ut.p = (a); \ 92 ut.s = (b); \ 93 ut.r = (c); \ 94 utrace(&ut, sizeof(ut)); \ 95 } \ 96 } while (0) 97 #else 98 # define UTRACE(a, b, c) 99 #endif 100 101 /******************************************************************************/ 102 /* Function prototypes for non-inline static functions. */ 103 104 static void stats_print_atexit(void); 105 static unsigned malloc_ncpus(void); 106 static bool malloc_conf_next(char const **opts_p, char const **k_p, 107 size_t *klen_p, char const **v_p, size_t *vlen_p); 108 static void malloc_conf_error(const char *msg, const char *k, size_t klen, 109 const char *v, size_t vlen); 110 static void malloc_conf_init(void); 111 static bool malloc_init_hard(void); 112 static int imemalign(void **memptr, size_t alignment, size_t size, 113 size_t min_alignment); 114 115 /******************************************************************************/ 116 /* 117 * Begin miscellaneous support functions. 118 */ 119 120 /* Create a new arena and insert it into the arenas array at index ind. */ 121 arena_t * 122 arenas_extend(unsigned ind) 123 { 124 arena_t *ret; 125 126 ret = (arena_t *)base_alloc(sizeof(arena_t)); 127 if (ret != NULL && arena_new(ret, ind) == false) { 128 arenas[ind] = ret; 129 return (ret); 130 } 131 /* Only reached if there is an OOM error. */ 132 133 /* 134 * OOM here is quite inconvenient to propagate, since dealing with it 135 * would require a check for failure in the fast path. Instead, punt 136 * by using arenas[0]. In practice, this is an extremely unlikely 137 * failure. 138 */ 139 malloc_write("<jemalloc>: Error initializing arena\n"); 140 if (opt_abort) 141 abort(); 142 143 return (arenas[0]); 144 } 145 146 /* Slow path, called only by choose_arena(). */ 147 arena_t * 148 choose_arena_hard(void) 149 { 150 arena_t *ret; 151 152 if (narenas_auto > 1) { 153 unsigned i, choose, first_null; 154 155 choose = 0; 156 first_null = narenas_auto; 157 malloc_mutex_lock(&arenas_lock); 158 assert(arenas[0] != NULL); 159 for (i = 1; i < narenas_auto; i++) { 160 if (arenas[i] != NULL) { 161 /* 162 * Choose the first arena that has the lowest 163 * number of threads assigned to it. 164 */ 165 if (arenas[i]->nthreads < 166 arenas[choose]->nthreads) 167 choose = i; 168 } else if (first_null == narenas_auto) { 169 /* 170 * Record the index of the first uninitialized 171 * arena, in case all extant arenas are in use. 172 * 173 * NB: It is possible for there to be 174 * discontinuities in terms of initialized 175 * versus uninitialized arenas, due to the 176 * "thread.arena" mallctl. 177 */ 178 first_null = i; 179 } 180 } 181 182 if (arenas[choose]->nthreads == 0 183 || first_null == narenas_auto) { 184 /* 185 * Use an unloaded arena, or the least loaded arena if 186 * all arenas are already initialized. 187 */ 188 ret = arenas[choose]; 189 } else { 190 /* Initialize a new arena. */ 191 ret = arenas_extend(first_null); 192 } 193 ret->nthreads++; 194 malloc_mutex_unlock(&arenas_lock); 195 } else { 196 ret = arenas[0]; 197 malloc_mutex_lock(&arenas_lock); 198 ret->nthreads++; 199 malloc_mutex_unlock(&arenas_lock); 200 } 201 202 arenas_tsd_set(&ret); 203 204 return (ret); 205 } 206 207 static void 208 stats_print_atexit(void) 209 { 210 211 if (config_tcache && config_stats) { 212 unsigned narenas, i; 213 214 /* 215 * Merge stats from extant threads. This is racy, since 216 * individual threads do not lock when recording tcache stats 217 * events. As a consequence, the final stats may be slightly 218 * out of date by the time they are reported, if other threads 219 * continue to allocate. 220 */ 221 for (i = 0, narenas = narenas_total_get(); i < narenas; i++) { 222 arena_t *arena = arenas[i]; 223 if (arena != NULL) { 224 tcache_t *tcache; 225 226 /* 227 * tcache_stats_merge() locks bins, so if any 228 * code is introduced that acquires both arena 229 * and bin locks in the opposite order, 230 * deadlocks may result. 231 */ 232 malloc_mutex_lock(&arena->lock); 233 ql_foreach(tcache, &arena->tcache_ql, link) { 234 tcache_stats_merge(tcache, arena); 235 } 236 malloc_mutex_unlock(&arena->lock); 237 } 238 } 239 } 240 je_malloc_stats_print(NULL, NULL, NULL); 241 } 242 243 /* 244 * End miscellaneous support functions. 245 */ 246 /******************************************************************************/ 247 /* 248 * Begin initialization functions. 249 */ 250 251 static unsigned 252 malloc_ncpus(void) 253 { 254 unsigned ret; 255 long result; 256 257 #ifdef _WIN32 258 SYSTEM_INFO si; 259 GetSystemInfo(&si); 260 result = si.dwNumberOfProcessors; 261 #else 262 result = sysconf(_SC_NPROCESSORS_ONLN); 263 #endif 264 if (result == -1) { 265 /* Error. */ 266 ret = 1; 267 } else { 268 ret = (unsigned)result; 269 } 270 271 return (ret); 272 } 273 274 void 275 arenas_cleanup(void *arg) 276 { 277 arena_t *arena = *(arena_t **)arg; 278 279 malloc_mutex_lock(&arenas_lock); 280 arena->nthreads--; 281 malloc_mutex_unlock(&arenas_lock); 282 } 283 284 static inline bool 285 malloc_init(void) 286 { 287 288 if (malloc_initialized == false) 289 return (malloc_init_hard()); 290 291 return (false); 292 } 293 294 static bool 295 malloc_conf_next(char const **opts_p, char const **k_p, size_t *klen_p, 296 char const **v_p, size_t *vlen_p) 297 { 298 bool accept; 299 const char *opts = *opts_p; 300 301 *k_p = opts; 302 303 for (accept = false; accept == false;) { 304 switch (*opts) { 305 case 'A': case 'B': case 'C': case 'D': case 'E': case 'F': 306 case 'G': case 'H': case 'I': case 'J': case 'K': case 'L': 307 case 'M': case 'N': case 'O': case 'P': case 'Q': case 'R': 308 case 'S': case 'T': case 'U': case 'V': case 'W': case 'X': 309 case 'Y': case 'Z': 310 case 'a': case 'b': case 'c': case 'd': case 'e': case 'f': 311 case 'g': case 'h': case 'i': case 'j': case 'k': case 'l': 312 case 'm': case 'n': case 'o': case 'p': case 'q': case 'r': 313 case 's': case 't': case 'u': case 'v': case 'w': case 'x': 314 case 'y': case 'z': 315 case '0': case '1': case '2': case '3': case '4': case '5': 316 case '6': case '7': case '8': case '9': 317 case '_': 318 opts++; 319 break; 320 case ':': 321 opts++; 322 *klen_p = (uintptr_t)opts - 1 - (uintptr_t)*k_p; 323 *v_p = opts; 324 accept = true; 325 break; 326 case '\0': 327 if (opts != *opts_p) { 328 malloc_write("<jemalloc>: Conf string ends " 329 "with key\n"); 330 } 331 return (true); 332 default: 333 malloc_write("<jemalloc>: Malformed conf string\n"); 334 return (true); 335 } 336 } 337 338 for (accept = false; accept == false;) { 339 switch (*opts) { 340 case ',': 341 opts++; 342 /* 343 * Look ahead one character here, because the next time 344 * this function is called, it will assume that end of 345 * input has been cleanly reached if no input remains, 346 * but we have optimistically already consumed the 347 * comma if one exists. 348 */ 349 if (*opts == '\0') { 350 malloc_write("<jemalloc>: Conf string ends " 351 "with comma\n"); 352 } 353 *vlen_p = (uintptr_t)opts - 1 - (uintptr_t)*v_p; 354 accept = true; 355 break; 356 case '\0': 357 *vlen_p = (uintptr_t)opts - (uintptr_t)*v_p; 358 accept = true; 359 break; 360 default: 361 opts++; 362 break; 363 } 364 } 365 366 *opts_p = opts; 367 return (false); 368 } 369 370 static void 371 malloc_conf_error(const char *msg, const char *k, size_t klen, const char *v, 372 size_t vlen) 373 { 374 375 malloc_printf("<jemalloc>: %s: %.*s:%.*s\n", msg, (int)klen, k, 376 (int)vlen, v); 377 } 378 379 static void 380 malloc_conf_init(void) 381 { 382 unsigned i; 383 char buf[PATH_MAX + 1]; 384 const char *opts, *k, *v; 385 size_t klen, vlen; 386 387 /* 388 * Automatically configure valgrind before processing options. The 389 * valgrind option remains in jemalloc 3.x for compatibility reasons. 390 */ 391 if (config_valgrind) { 392 opt_valgrind = (RUNNING_ON_VALGRIND != 0) ? true : false; 393 if (config_fill && opt_valgrind) { 394 opt_junk = false; 395 assert(opt_zero == false); 396 opt_quarantine = JEMALLOC_VALGRIND_QUARANTINE_DEFAULT; 397 opt_redzone = true; 398 } 399 if (config_tcache && opt_valgrind) 400 opt_tcache = false; 401 } 402 403 for (i = 0; i < 3; i++) { 404 /* Get runtime configuration. */ 405 switch (i) { 406 case 0: 407 if (je_malloc_conf != NULL) { 408 /* 409 * Use options that were compiled into the 410 * program. 411 */ 412 opts = je_malloc_conf; 413 } else { 414 /* No configuration specified. */ 415 buf[0] = '\0'; 416 opts = buf; 417 } 418 break; 419 case 1: { 420 #ifndef _WIN32 421 int linklen; 422 const char *linkname = 423 # ifdef JEMALLOC_PREFIX 424 "/etc/"JEMALLOC_PREFIX"malloc.conf" 425 # else 426 "/etc/malloc.conf" 427 # endif 428 ; 429 430 if ((linklen = readlink(linkname, buf, 431 sizeof(buf) - 1)) != -1) { 432 /* 433 * Use the contents of the "/etc/malloc.conf" 434 * symbolic link's name. 435 */ 436 buf[linklen] = '\0'; 437 opts = buf; 438 } else 439 #endif 440 { 441 /* No configuration specified. */ 442 buf[0] = '\0'; 443 opts = buf; 444 } 445 break; 446 } case 2: { 447 const char *envname = 448 #ifdef JEMALLOC_PREFIX 449 JEMALLOC_CPREFIX"MALLOC_CONF" 450 #else 451 "MALLOC_CONF" 452 #endif 453 ; 454 455 if (issetugid() == 0 && (opts = getenv(envname)) != 456 NULL) { 457 /* 458 * Do nothing; opts is already initialized to 459 * the value of the MALLOC_CONF environment 460 * variable. 461 */ 462 } else { 463 /* No configuration specified. */ 464 buf[0] = '\0'; 465 opts = buf; 466 } 467 break; 468 } default: 469 /* NOTREACHED */ 470 assert(false); 471 buf[0] = '\0'; 472 opts = buf; 473 } 474 475 while (*opts != '\0' && malloc_conf_next(&opts, &k, &klen, &v, 476 &vlen) == false) { 477 #define CONF_HANDLE_BOOL_HIT(o, n, hit) \ 478 if (sizeof(n)-1 == klen && strncmp(n, k, \ 479 klen) == 0) { \ 480 if (strncmp("true", v, vlen) == 0 && \ 481 vlen == sizeof("true")-1) \ 482 o = true; \ 483 else if (strncmp("false", v, vlen) == \ 484 0 && vlen == sizeof("false")-1) \ 485 o = false; \ 486 else { \ 487 malloc_conf_error( \ 488 "Invalid conf value", \ 489 k, klen, v, vlen); \ 490 } \ 491 hit = true; \ 492 } else \ 493 hit = false; 494 #define CONF_HANDLE_BOOL(o, n) { \ 495 bool hit; \ 496 CONF_HANDLE_BOOL_HIT(o, n, hit); \ 497 if (hit) \ 498 continue; \ 499 } 500 #define CONF_HANDLE_SIZE_T(o, n, min, max) \ 501 if (sizeof(n)-1 == klen && strncmp(n, k, \ 502 klen) == 0) { \ 503 uintmax_t um; \ 504 char *end; \ 505 \ 506 set_errno(0); \ 507 um = malloc_strtoumax(v, &end, 0); \ 508 if (get_errno() != 0 || (uintptr_t)end -\ 509 (uintptr_t)v != vlen) { \ 510 malloc_conf_error( \ 511 "Invalid conf value", \ 512 k, klen, v, vlen); \ 513 } else if (um < min || um > max) { \ 514 malloc_conf_error( \ 515 "Out-of-range conf value", \ 516 k, klen, v, vlen); \ 517 } else \ 518 o = um; \ 519 continue; \ 520 } 521 #define CONF_HANDLE_SSIZE_T(o, n, min, max) \ 522 if (sizeof(n)-1 == klen && strncmp(n, k, \ 523 klen) == 0) { \ 524 long l; \ 525 char *end; \ 526 \ 527 set_errno(0); \ 528 l = strtol(v, &end, 0); \ 529 if (get_errno() != 0 || (uintptr_t)end -\ 530 (uintptr_t)v != vlen) { \ 531 malloc_conf_error( \ 532 "Invalid conf value", \ 533 k, klen, v, vlen); \ 534 } else if (l < (ssize_t)min || l > \ 535 (ssize_t)max) { \ 536 malloc_conf_error( \ 537 "Out-of-range conf value", \ 538 k, klen, v, vlen); \ 539 } else \ 540 o = l; \ 541 continue; \ 542 } 543 #define CONF_HANDLE_CHAR_P(o, n, d) \ 544 if (sizeof(n)-1 == klen && strncmp(n, k, \ 545 klen) == 0) { \ 546 size_t cpylen = (vlen <= \ 547 sizeof(o)-1) ? vlen : \ 548 sizeof(o)-1; \ 549 strncpy(o, v, cpylen); \ 550 o[cpylen] = '\0'; \ 551 continue; \ 552 } 553 554 CONF_HANDLE_BOOL(opt_abort, "abort") 555 /* 556 * Chunks always require at least one header page, plus 557 * one data page in the absence of redzones, or three 558 * pages in the presence of redzones. In order to 559 * simplify options processing, fix the limit based on 560 * config_fill. 561 */ 562 CONF_HANDLE_SIZE_T(opt_lg_chunk, "lg_chunk", LG_PAGE + 563 (config_fill ? 2 : 1), (sizeof(size_t) << 3) - 1) 564 if (strncmp("dss", k, klen) == 0) { 565 int i; 566 bool match = false; 567 for (i = 0; i < dss_prec_limit; i++) { 568 if (strncmp(dss_prec_names[i], v, vlen) 569 == 0) { 570 if (chunk_dss_prec_set(i)) { 571 malloc_conf_error( 572 "Error setting dss", 573 k, klen, v, vlen); 574 } else { 575 opt_dss = 576 dss_prec_names[i]; 577 match = true; 578 break; 579 } 580 } 581 } 582 if (match == false) { 583 malloc_conf_error("Invalid conf value", 584 k, klen, v, vlen); 585 } 586 continue; 587 } 588 CONF_HANDLE_SIZE_T(opt_narenas, "narenas", 1, 589 SIZE_T_MAX) 590 CONF_HANDLE_SSIZE_T(opt_lg_dirty_mult, "lg_dirty_mult", 591 -1, (sizeof(size_t) << 3) - 1) 592 CONF_HANDLE_BOOL(opt_stats_print, "stats_print") 593 if (config_fill) { 594 CONF_HANDLE_BOOL(opt_junk, "junk") 595 CONF_HANDLE_SIZE_T(opt_quarantine, "quarantine", 596 0, SIZE_T_MAX) 597 CONF_HANDLE_BOOL(opt_redzone, "redzone") 598 CONF_HANDLE_BOOL(opt_zero, "zero") 599 } 600 if (config_utrace) { 601 CONF_HANDLE_BOOL(opt_utrace, "utrace") 602 } 603 if (config_valgrind) { 604 CONF_HANDLE_BOOL(opt_valgrind, "valgrind") 605 } 606 if (config_xmalloc) { 607 CONF_HANDLE_BOOL(opt_xmalloc, "xmalloc") 608 } 609 if (config_tcache) { 610 CONF_HANDLE_BOOL(opt_tcache, "tcache") 611 CONF_HANDLE_SSIZE_T(opt_lg_tcache_max, 612 "lg_tcache_max", -1, 613 (sizeof(size_t) << 3) - 1) 614 } 615 if (config_prof) { 616 CONF_HANDLE_BOOL(opt_prof, "prof") 617 CONF_HANDLE_CHAR_P(opt_prof_prefix, 618 "prof_prefix", "jeprof") 619 CONF_HANDLE_BOOL(opt_prof_active, "prof_active") 620 CONF_HANDLE_SSIZE_T(opt_lg_prof_sample, 621 "lg_prof_sample", 0, 622 (sizeof(uint64_t) << 3) - 1) 623 CONF_HANDLE_BOOL(opt_prof_accum, "prof_accum") 624 CONF_HANDLE_SSIZE_T(opt_lg_prof_interval, 625 "lg_prof_interval", -1, 626 (sizeof(uint64_t) << 3) - 1) 627 CONF_HANDLE_BOOL(opt_prof_gdump, "prof_gdump") 628 CONF_HANDLE_BOOL(opt_prof_final, "prof_final") 629 CONF_HANDLE_BOOL(opt_prof_leak, "prof_leak") 630 } 631 malloc_conf_error("Invalid conf pair", k, klen, v, 632 vlen); 633 #undef CONF_HANDLE_BOOL 634 #undef CONF_HANDLE_SIZE_T 635 #undef CONF_HANDLE_SSIZE_T 636 #undef CONF_HANDLE_CHAR_P 637 } 638 } 639 } 640 641 static bool 642 malloc_init_hard(void) 643 { 644 arena_t *init_arenas[1]; 645 646 malloc_mutex_lock(&init_lock); 647 if (malloc_initialized || IS_INITIALIZER) { 648 /* 649 * Another thread initialized the allocator before this one 650 * acquired init_lock, or this thread is the initializing 651 * thread, and it is recursively allocating. 652 */ 653 malloc_mutex_unlock(&init_lock); 654 return (false); 655 } 656 #ifdef JEMALLOC_THREADED_INIT 657 if (malloc_initializer != NO_INITIALIZER && IS_INITIALIZER == false) { 658 /* Busy-wait until the initializing thread completes. */ 659 do { 660 malloc_mutex_unlock(&init_lock); 661 CPU_SPINWAIT; 662 malloc_mutex_lock(&init_lock); 663 } while (malloc_initialized == false); 664 malloc_mutex_unlock(&init_lock); 665 return (false); 666 } 667 #endif 668 malloc_initializer = INITIALIZER; 669 670 malloc_tsd_boot(); 671 if (config_prof) 672 prof_boot0(); 673 674 malloc_conf_init(); 675 676 #if (!defined(JEMALLOC_MUTEX_INIT_CB) && !defined(JEMALLOC_ZONE) \ 677 && !defined(_WIN32)) 678 /* Register fork handlers. */ 679 if (pthread_atfork(jemalloc_prefork, jemalloc_postfork_parent, 680 jemalloc_postfork_child) != 0) { 681 malloc_write("<jemalloc>: Error in pthread_atfork()\n"); 682 if (opt_abort) 683 abort(); 684 } 685 #endif 686 687 if (opt_stats_print) { 688 /* Print statistics at exit. */ 689 if (atexit(stats_print_atexit) != 0) { 690 malloc_write("<jemalloc>: Error in atexit()\n"); 691 if (opt_abort) 692 abort(); 693 } 694 } 695 696 if (base_boot()) { 697 malloc_mutex_unlock(&init_lock); 698 return (true); 699 } 700 701 if (chunk_boot()) { 702 malloc_mutex_unlock(&init_lock); 703 return (true); 704 } 705 706 if (ctl_boot()) { 707 malloc_mutex_unlock(&init_lock); 708 return (true); 709 } 710 711 if (config_prof) 712 prof_boot1(); 713 714 arena_boot(); 715 716 if (config_tcache && tcache_boot0()) { 717 malloc_mutex_unlock(&init_lock); 718 return (true); 719 } 720 721 if (huge_boot()) { 722 malloc_mutex_unlock(&init_lock); 723 return (true); 724 } 725 726 if (malloc_mutex_init(&arenas_lock)) 727 return (true); 728 729 /* 730 * Create enough scaffolding to allow recursive allocation in 731 * malloc_ncpus(). 732 */ 733 narenas_total = narenas_auto = 1; 734 arenas = init_arenas; 735 memset(arenas, 0, sizeof(arena_t *) * narenas_auto); 736 737 /* 738 * Initialize one arena here. The rest are lazily created in 739 * choose_arena_hard(). 740 */ 741 arenas_extend(0); 742 if (arenas[0] == NULL) { 743 malloc_mutex_unlock(&init_lock); 744 return (true); 745 } 746 747 /* Initialize allocation counters before any allocations can occur. */ 748 if (config_stats && thread_allocated_tsd_boot()) { 749 malloc_mutex_unlock(&init_lock); 750 return (true); 751 } 752 753 if (arenas_tsd_boot()) { 754 malloc_mutex_unlock(&init_lock); 755 return (true); 756 } 757 758 if (config_tcache && tcache_boot1()) { 759 malloc_mutex_unlock(&init_lock); 760 return (true); 761 } 762 763 if (config_fill && quarantine_boot()) { 764 malloc_mutex_unlock(&init_lock); 765 return (true); 766 } 767 768 if (config_prof && prof_boot2()) { 769 malloc_mutex_unlock(&init_lock); 770 return (true); 771 } 772 773 /* Get number of CPUs. */ 774 malloc_mutex_unlock(&init_lock); 775 ncpus = malloc_ncpus(); 776 malloc_mutex_lock(&init_lock); 777 778 if (mutex_boot()) { 779 malloc_mutex_unlock(&init_lock); 780 return (true); 781 } 782 783 if (opt_narenas == 0) { 784 /* 785 * For SMP systems, create more than one arena per CPU by 786 * default. 787 */ 788 if (ncpus > 1) 789 opt_narenas = ncpus << 2; 790 else 791 opt_narenas = 1; 792 } 793 narenas_auto = opt_narenas; 794 /* 795 * Make sure that the arenas array can be allocated. In practice, this 796 * limit is enough to allow the allocator to function, but the ctl 797 * machinery will fail to allocate memory at far lower limits. 798 */ 799 if (narenas_auto > chunksize / sizeof(arena_t *)) { 800 narenas_auto = chunksize / sizeof(arena_t *); 801 malloc_printf("<jemalloc>: Reducing narenas to limit (%d)\n", 802 narenas_auto); 803 } 804 narenas_total = narenas_auto; 805 806 /* Allocate and initialize arenas. */ 807 arenas = (arena_t **)base_alloc(sizeof(arena_t *) * narenas_total); 808 if (arenas == NULL) { 809 malloc_mutex_unlock(&init_lock); 810 return (true); 811 } 812 /* 813 * Zero the array. In practice, this should always be pre-zeroed, 814 * since it was just mmap()ed, but let's be sure. 815 */ 816 memset(arenas, 0, sizeof(arena_t *) * narenas_total); 817 /* Copy the pointer to the one arena that was already initialized. */ 818 arenas[0] = init_arenas[0]; 819 820 malloc_initialized = true; 821 malloc_mutex_unlock(&init_lock); 822 return (false); 823 } 824 825 /* 826 * End initialization functions. 827 */ 828 /******************************************************************************/ 829 /* 830 * Begin malloc(3)-compatible functions. 831 */ 832 833 void * 834 je_malloc(size_t size) 835 { 836 void *ret; 837 size_t usize JEMALLOC_CC_SILENCE_INIT(0); 838 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); 839 840 if (malloc_init()) { 841 ret = NULL; 842 goto label_oom; 843 } 844 845 if (size == 0) 846 size = 1; 847 848 if (config_prof && opt_prof) { 849 usize = s2u(size); 850 PROF_ALLOC_PREP(1, usize, cnt); 851 if (cnt == NULL) { 852 ret = NULL; 853 goto label_oom; 854 } 855 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <= 856 SMALL_MAXCLASS) { 857 ret = imalloc(SMALL_MAXCLASS+1); 858 if (ret != NULL) 859 arena_prof_promoted(ret, usize); 860 } else 861 ret = imalloc(size); 862 } else { 863 if (config_stats || (config_valgrind && opt_valgrind)) 864 usize = s2u(size); 865 ret = imalloc(size); 866 } 867 868 label_oom: 869 if (ret == NULL) { 870 if (config_xmalloc && opt_xmalloc) { 871 malloc_write("<jemalloc>: Error in malloc(): " 872 "out of memory\n"); 873 abort(); 874 } 875 set_errno(ENOMEM); 876 } 877 if (config_prof && opt_prof && ret != NULL) 878 prof_malloc(ret, usize, cnt); 879 if (config_stats && ret != NULL) { 880 assert(usize == isalloc(ret, config_prof)); 881 thread_allocated_tsd_get()->allocated += usize; 882 } 883 UTRACE(0, size, ret); 884 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, false); 885 return (ret); 886 } 887 888 JEMALLOC_ATTR(nonnull(1)) 889 #ifdef JEMALLOC_PROF 890 /* 891 * Avoid any uncertainty as to how many backtrace frames to ignore in 892 * PROF_ALLOC_PREP(). 893 */ 894 JEMALLOC_ATTR(noinline) 895 #endif 896 static int 897 imemalign(void **memptr, size_t alignment, size_t size, 898 size_t min_alignment) 899 { 900 int ret; 901 size_t usize; 902 void *result; 903 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); 904 905 assert(min_alignment != 0); 906 907 if (malloc_init()) 908 result = NULL; 909 else { 910 if (size == 0) 911 size = 1; 912 913 /* Make sure that alignment is a large enough power of 2. */ 914 if (((alignment - 1) & alignment) != 0 915 || (alignment < min_alignment)) { 916 if (config_xmalloc && opt_xmalloc) { 917 malloc_write("<jemalloc>: Error allocating " 918 "aligned memory: invalid alignment\n"); 919 abort(); 920 } 921 result = NULL; 922 ret = EINVAL; 923 goto label_return; 924 } 925 926 usize = sa2u(size, alignment); 927 if (usize == 0) { 928 result = NULL; 929 ret = ENOMEM; 930 goto label_return; 931 } 932 933 if (config_prof && opt_prof) { 934 PROF_ALLOC_PREP(2, usize, cnt); 935 if (cnt == NULL) { 936 result = NULL; 937 ret = EINVAL; 938 } else { 939 if (prof_promote && (uintptr_t)cnt != 940 (uintptr_t)1U && usize <= SMALL_MAXCLASS) { 941 assert(sa2u(SMALL_MAXCLASS+1, 942 alignment) != 0); 943 result = ipalloc(sa2u(SMALL_MAXCLASS+1, 944 alignment), alignment, false); 945 if (result != NULL) { 946 arena_prof_promoted(result, 947 usize); 948 } 949 } else { 950 result = ipalloc(usize, alignment, 951 false); 952 } 953 } 954 } else 955 result = ipalloc(usize, alignment, false); 956 } 957 958 if (result == NULL) { 959 if (config_xmalloc && opt_xmalloc) { 960 malloc_write("<jemalloc>: Error allocating aligned " 961 "memory: out of memory\n"); 962 abort(); 963 } 964 ret = ENOMEM; 965 goto label_return; 966 } 967 968 *memptr = result; 969 ret = 0; 970 971 label_return: 972 if (config_stats && result != NULL) { 973 assert(usize == isalloc(result, config_prof)); 974 thread_allocated_tsd_get()->allocated += usize; 975 } 976 if (config_prof && opt_prof && result != NULL) 977 prof_malloc(result, usize, cnt); 978 UTRACE(0, size, result); 979 return (ret); 980 } 981 982 int 983 je_posix_memalign(void **memptr, size_t alignment, size_t size) 984 { 985 int ret = imemalign(memptr, alignment, size, sizeof(void *)); 986 JEMALLOC_VALGRIND_MALLOC(ret == 0, *memptr, isalloc(*memptr, 987 config_prof), false); 988 return (ret); 989 } 990 991 void * 992 je_aligned_alloc(size_t alignment, size_t size) 993 { 994 void *ret; 995 int err; 996 997 if ((err = imemalign(&ret, alignment, size, 1)) != 0) { 998 ret = NULL; 999 set_errno(err); 1000 } 1001 JEMALLOC_VALGRIND_MALLOC(err == 0, ret, isalloc(ret, config_prof), 1002 false); 1003 return (ret); 1004 } 1005 1006 void * 1007 je_calloc(size_t num, size_t size) 1008 { 1009 void *ret; 1010 size_t num_size; 1011 size_t usize JEMALLOC_CC_SILENCE_INIT(0); 1012 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); 1013 1014 if (malloc_init()) { 1015 num_size = 0; 1016 ret = NULL; 1017 goto label_return; 1018 } 1019 1020 num_size = num * size; 1021 if (num_size == 0) { 1022 if (num == 0 || size == 0) 1023 num_size = 1; 1024 else { 1025 ret = NULL; 1026 goto label_return; 1027 } 1028 /* 1029 * Try to avoid division here. We know that it isn't possible to 1030 * overflow during multiplication if neither operand uses any of the 1031 * most significant half of the bits in a size_t. 1032 */ 1033 } else if (((num | size) & (SIZE_T_MAX << (sizeof(size_t) << 2))) 1034 && (num_size / size != num)) { 1035 /* size_t overflow. */ 1036 ret = NULL; 1037 goto label_return; 1038 } 1039 1040 if (config_prof && opt_prof) { 1041 usize = s2u(num_size); 1042 PROF_ALLOC_PREP(1, usize, cnt); 1043 if (cnt == NULL) { 1044 ret = NULL; 1045 goto label_return; 1046 } 1047 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize 1048 <= SMALL_MAXCLASS) { 1049 ret = icalloc(SMALL_MAXCLASS+1); 1050 if (ret != NULL) 1051 arena_prof_promoted(ret, usize); 1052 } else 1053 ret = icalloc(num_size); 1054 } else { 1055 if (config_stats || (config_valgrind && opt_valgrind)) 1056 usize = s2u(num_size); 1057 ret = icalloc(num_size); 1058 } 1059 1060 label_return: 1061 if (ret == NULL) { 1062 if (config_xmalloc && opt_xmalloc) { 1063 malloc_write("<jemalloc>: Error in calloc(): out of " 1064 "memory\n"); 1065 abort(); 1066 } 1067 set_errno(ENOMEM); 1068 } 1069 1070 if (config_prof && opt_prof && ret != NULL) 1071 prof_malloc(ret, usize, cnt); 1072 if (config_stats && ret != NULL) { 1073 assert(usize == isalloc(ret, config_prof)); 1074 thread_allocated_tsd_get()->allocated += usize; 1075 } 1076 UTRACE(0, num_size, ret); 1077 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, usize, true); 1078 return (ret); 1079 } 1080 1081 void * 1082 je_realloc(void *ptr, size_t size) 1083 { 1084 void *ret; 1085 size_t usize JEMALLOC_CC_SILENCE_INIT(0); 1086 size_t old_size = 0; 1087 size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 1088 prof_thr_cnt_t *cnt JEMALLOC_CC_SILENCE_INIT(NULL); 1089 prof_ctx_t *old_ctx JEMALLOC_CC_SILENCE_INIT(NULL); 1090 1091 if (size == 0) { 1092 if (ptr != NULL) { 1093 /* realloc(ptr, 0) is equivalent to free(p). */ 1094 if (config_prof) { 1095 old_size = isalloc(ptr, true); 1096 if (config_valgrind && opt_valgrind) 1097 old_rzsize = p2rz(ptr); 1098 } else if (config_stats) { 1099 old_size = isalloc(ptr, false); 1100 if (config_valgrind && opt_valgrind) 1101 old_rzsize = u2rz(old_size); 1102 } else if (config_valgrind && opt_valgrind) { 1103 old_size = isalloc(ptr, false); 1104 old_rzsize = u2rz(old_size); 1105 } 1106 if (config_prof && opt_prof) { 1107 old_ctx = prof_ctx_get(ptr); 1108 cnt = NULL; 1109 } 1110 iqalloc(ptr); 1111 ret = NULL; 1112 goto label_return; 1113 } else 1114 size = 1; 1115 } 1116 1117 if (ptr != NULL) { 1118 assert(malloc_initialized || IS_INITIALIZER); 1119 1120 if (config_prof) { 1121 old_size = isalloc(ptr, true); 1122 if (config_valgrind && opt_valgrind) 1123 old_rzsize = p2rz(ptr); 1124 } else if (config_stats) { 1125 old_size = isalloc(ptr, false); 1126 if (config_valgrind && opt_valgrind) 1127 old_rzsize = u2rz(old_size); 1128 } else if (config_valgrind && opt_valgrind) { 1129 old_size = isalloc(ptr, false); 1130 old_rzsize = u2rz(old_size); 1131 } 1132 if (config_prof && opt_prof) { 1133 usize = s2u(size); 1134 old_ctx = prof_ctx_get(ptr); 1135 PROF_ALLOC_PREP(1, usize, cnt); 1136 if (cnt == NULL) { 1137 old_ctx = NULL; 1138 ret = NULL; 1139 goto label_oom; 1140 } 1141 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && 1142 usize <= SMALL_MAXCLASS) { 1143 ret = iralloc(ptr, SMALL_MAXCLASS+1, 0, 0, 1144 false, false); 1145 if (ret != NULL) 1146 arena_prof_promoted(ret, usize); 1147 else 1148 old_ctx = NULL; 1149 } else { 1150 ret = iralloc(ptr, size, 0, 0, false, false); 1151 if (ret == NULL) 1152 old_ctx = NULL; 1153 } 1154 } else { 1155 if (config_stats || (config_valgrind && opt_valgrind)) 1156 usize = s2u(size); 1157 ret = iralloc(ptr, size, 0, 0, false, false); 1158 } 1159 1160 label_oom: 1161 if (ret == NULL) { 1162 if (config_xmalloc && opt_xmalloc) { 1163 malloc_write("<jemalloc>: Error in realloc(): " 1164 "out of memory\n"); 1165 abort(); 1166 } 1167 set_errno(ENOMEM); 1168 } 1169 } else { 1170 /* realloc(NULL, size) is equivalent to malloc(size). */ 1171 if (config_prof && opt_prof) 1172 old_ctx = NULL; 1173 if (malloc_init()) { 1174 if (config_prof && opt_prof) 1175 cnt = NULL; 1176 ret = NULL; 1177 } else { 1178 if (config_prof && opt_prof) { 1179 usize = s2u(size); 1180 PROF_ALLOC_PREP(1, usize, cnt); 1181 if (cnt == NULL) 1182 ret = NULL; 1183 else { 1184 if (prof_promote && (uintptr_t)cnt != 1185 (uintptr_t)1U && usize <= 1186 SMALL_MAXCLASS) { 1187 ret = imalloc(SMALL_MAXCLASS+1); 1188 if (ret != NULL) { 1189 arena_prof_promoted(ret, 1190 usize); 1191 } 1192 } else 1193 ret = imalloc(size); 1194 } 1195 } else { 1196 if (config_stats || (config_valgrind && 1197 opt_valgrind)) 1198 usize = s2u(size); 1199 ret = imalloc(size); 1200 } 1201 } 1202 1203 if (ret == NULL) { 1204 if (config_xmalloc && opt_xmalloc) { 1205 malloc_write("<jemalloc>: Error in realloc(): " 1206 "out of memory\n"); 1207 abort(); 1208 } 1209 set_errno(ENOMEM); 1210 } 1211 } 1212 1213 label_return: 1214 if (config_prof && opt_prof) 1215 prof_realloc(ret, usize, cnt, old_size, old_ctx); 1216 if (config_stats && ret != NULL) { 1217 thread_allocated_t *ta; 1218 assert(usize == isalloc(ret, config_prof)); 1219 ta = thread_allocated_tsd_get(); 1220 ta->allocated += usize; 1221 ta->deallocated += old_size; 1222 } 1223 UTRACE(ptr, size, ret); 1224 JEMALLOC_VALGRIND_REALLOC(ret, usize, ptr, old_size, old_rzsize, false); 1225 return (ret); 1226 } 1227 1228 void 1229 je_free(void *ptr) 1230 { 1231 1232 UTRACE(ptr, 0, 0); 1233 if (ptr != NULL) { 1234 size_t usize; 1235 size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); 1236 1237 assert(malloc_initialized || IS_INITIALIZER); 1238 1239 if (config_prof && opt_prof) { 1240 usize = isalloc(ptr, config_prof); 1241 prof_free(ptr, usize); 1242 } else if (config_stats || config_valgrind) 1243 usize = isalloc(ptr, config_prof); 1244 if (config_stats) 1245 thread_allocated_tsd_get()->deallocated += usize; 1246 if (config_valgrind && opt_valgrind) 1247 rzsize = p2rz(ptr); 1248 iqalloc(ptr); 1249 JEMALLOC_VALGRIND_FREE(ptr, rzsize); 1250 } 1251 } 1252 1253 /* 1254 * End malloc(3)-compatible functions. 1255 */ 1256 /******************************************************************************/ 1257 /* 1258 * Begin non-standard override functions. 1259 */ 1260 1261 #ifdef JEMALLOC_OVERRIDE_MEMALIGN 1262 void * 1263 je_memalign(size_t alignment, size_t size) 1264 { 1265 void *ret JEMALLOC_CC_SILENCE_INIT(NULL); 1266 imemalign(&ret, alignment, size, 1); 1267 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); 1268 return (ret); 1269 } 1270 #endif 1271 1272 #ifdef JEMALLOC_OVERRIDE_VALLOC 1273 void * 1274 je_valloc(size_t size) 1275 { 1276 void *ret JEMALLOC_CC_SILENCE_INIT(NULL); 1277 imemalign(&ret, PAGE, size, 1); 1278 JEMALLOC_VALGRIND_MALLOC(ret != NULL, ret, size, false); 1279 return (ret); 1280 } 1281 #endif 1282 1283 /* 1284 * is_malloc(je_malloc) is some macro magic to detect if jemalloc_defs.h has 1285 * #define je_malloc malloc 1286 */ 1287 #define malloc_is_malloc 1 1288 #define is_malloc_(a) malloc_is_ ## a 1289 #define is_malloc(a) is_malloc_(a) 1290 1291 #if ((is_malloc(je_malloc) == 1) && defined(__GLIBC__) && !defined(__UCLIBC__)) 1292 /* 1293 * glibc provides the RTLD_DEEPBIND flag for dlopen which can make it possible 1294 * to inconsistently reference libc's malloc(3)-compatible functions 1295 * (https://bugzilla.mozilla.org/show_bug.cgi?id=493541). 1296 * 1297 * These definitions interpose hooks in glibc. The functions are actually 1298 * passed an extra argument for the caller return address, which will be 1299 * ignored. 1300 */ 1301 JEMALLOC_EXPORT void (* __free_hook)(void *ptr) = je_free; 1302 JEMALLOC_EXPORT void *(* __malloc_hook)(size_t size) = je_malloc; 1303 JEMALLOC_EXPORT void *(* __realloc_hook)(void *ptr, size_t size) = je_realloc; 1304 JEMALLOC_EXPORT void *(* __memalign_hook)(size_t alignment, size_t size) = 1305 je_memalign; 1306 #endif 1307 1308 /* 1309 * End non-standard override functions. 1310 */ 1311 /******************************************************************************/ 1312 /* 1313 * Begin non-standard functions. 1314 */ 1315 1316 size_t 1317 je_malloc_usable_size(JEMALLOC_USABLE_SIZE_CONST void *ptr) 1318 { 1319 size_t ret; 1320 1321 assert(malloc_initialized || IS_INITIALIZER); 1322 1323 if (config_ivsalloc) 1324 ret = ivsalloc(ptr, config_prof); 1325 else 1326 ret = (ptr != NULL) ? isalloc(ptr, config_prof) : 0; 1327 1328 return (ret); 1329 } 1330 1331 void 1332 je_malloc_stats_print(void (*write_cb)(void *, const char *), void *cbopaque, 1333 const char *opts) 1334 { 1335 1336 stats_print(write_cb, cbopaque, opts); 1337 } 1338 1339 int 1340 je_mallctl(const char *name, void *oldp, size_t *oldlenp, void *newp, 1341 size_t newlen) 1342 { 1343 1344 if (malloc_init()) 1345 return (EAGAIN); 1346 1347 return (ctl_byname(name, oldp, oldlenp, newp, newlen)); 1348 } 1349 1350 int 1351 je_mallctlnametomib(const char *name, size_t *mibp, size_t *miblenp) 1352 { 1353 1354 if (malloc_init()) 1355 return (EAGAIN); 1356 1357 return (ctl_nametomib(name, mibp, miblenp)); 1358 } 1359 1360 int 1361 je_mallctlbymib(const size_t *mib, size_t miblen, void *oldp, size_t *oldlenp, 1362 void *newp, size_t newlen) 1363 { 1364 1365 if (malloc_init()) 1366 return (EAGAIN); 1367 1368 return (ctl_bymib(mib, miblen, oldp, oldlenp, newp, newlen)); 1369 } 1370 1371 /* 1372 * End non-standard functions. 1373 */ 1374 /******************************************************************************/ 1375 /* 1376 * Begin experimental functions. 1377 */ 1378 #ifdef JEMALLOC_EXPERIMENTAL 1379 1380 JEMALLOC_INLINE void * 1381 iallocm(size_t usize, size_t alignment, bool zero, bool try_tcache, 1382 arena_t *arena) 1383 { 1384 1385 assert(usize == ((alignment == 0) ? s2u(usize) : sa2u(usize, 1386 alignment))); 1387 1388 if (alignment != 0) 1389 return (ipallocx(usize, alignment, zero, try_tcache, arena)); 1390 else if (zero) 1391 return (icallocx(usize, try_tcache, arena)); 1392 else 1393 return (imallocx(usize, try_tcache, arena)); 1394 } 1395 1396 int 1397 je_allocm(void **ptr, size_t *rsize, size_t size, int flags) 1398 { 1399 void *p; 1400 size_t usize; 1401 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) 1402 & (SIZE_T_MAX-1)); 1403 bool zero = flags & ALLOCM_ZERO; 1404 unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; 1405 arena_t *arena; 1406 bool try_tcache; 1407 1408 assert(ptr != NULL); 1409 assert(size != 0); 1410 1411 if (malloc_init()) 1412 goto label_oom; 1413 1414 if (arena_ind != UINT_MAX) { 1415 arena = arenas[arena_ind]; 1416 try_tcache = false; 1417 } else { 1418 arena = NULL; 1419 try_tcache = true; 1420 } 1421 1422 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); 1423 if (usize == 0) 1424 goto label_oom; 1425 1426 if (config_prof && opt_prof) { 1427 prof_thr_cnt_t *cnt; 1428 1429 PROF_ALLOC_PREP(1, usize, cnt); 1430 if (cnt == NULL) 1431 goto label_oom; 1432 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U && usize <= 1433 SMALL_MAXCLASS) { 1434 size_t usize_promoted = (alignment == 0) ? 1435 s2u(SMALL_MAXCLASS+1) : sa2u(SMALL_MAXCLASS+1, 1436 alignment); 1437 assert(usize_promoted != 0); 1438 p = iallocm(usize_promoted, alignment, zero, 1439 try_tcache, arena); 1440 if (p == NULL) 1441 goto label_oom; 1442 arena_prof_promoted(p, usize); 1443 } else { 1444 p = iallocm(usize, alignment, zero, try_tcache, arena); 1445 if (p == NULL) 1446 goto label_oom; 1447 } 1448 prof_malloc(p, usize, cnt); 1449 } else { 1450 p = iallocm(usize, alignment, zero, try_tcache, arena); 1451 if (p == NULL) 1452 goto label_oom; 1453 } 1454 if (rsize != NULL) 1455 *rsize = usize; 1456 1457 *ptr = p; 1458 if (config_stats) { 1459 assert(usize == isalloc(p, config_prof)); 1460 thread_allocated_tsd_get()->allocated += usize; 1461 } 1462 UTRACE(0, size, p); 1463 JEMALLOC_VALGRIND_MALLOC(true, p, usize, zero); 1464 return (ALLOCM_SUCCESS); 1465 label_oom: 1466 if (config_xmalloc && opt_xmalloc) { 1467 malloc_write("<jemalloc>: Error in allocm(): " 1468 "out of memory\n"); 1469 abort(); 1470 } 1471 *ptr = NULL; 1472 UTRACE(0, size, 0); 1473 return (ALLOCM_ERR_OOM); 1474 } 1475 1476 int 1477 je_rallocm(void **ptr, size_t *rsize, size_t size, size_t extra, int flags) 1478 { 1479 void *p, *q; 1480 size_t usize; 1481 size_t old_size; 1482 size_t old_rzsize JEMALLOC_CC_SILENCE_INIT(0); 1483 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) 1484 & (SIZE_T_MAX-1)); 1485 bool zero = flags & ALLOCM_ZERO; 1486 bool no_move = flags & ALLOCM_NO_MOVE; 1487 unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; 1488 bool try_tcache_alloc, try_tcache_dalloc; 1489 arena_t *arena; 1490 1491 assert(ptr != NULL); 1492 assert(*ptr != NULL); 1493 assert(size != 0); 1494 assert(SIZE_T_MAX - size >= extra); 1495 assert(malloc_initialized || IS_INITIALIZER); 1496 1497 if (arena_ind != UINT_MAX) { 1498 arena_chunk_t *chunk; 1499 try_tcache_alloc = true; 1500 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(*ptr); 1501 try_tcache_dalloc = (chunk == *ptr || chunk->arena != 1502 arenas[arena_ind]); 1503 arena = arenas[arena_ind]; 1504 } else { 1505 try_tcache_alloc = true; 1506 try_tcache_dalloc = true; 1507 arena = NULL; 1508 } 1509 1510 p = *ptr; 1511 if (config_prof && opt_prof) { 1512 prof_thr_cnt_t *cnt; 1513 1514 /* 1515 * usize isn't knowable before iralloc() returns when extra is 1516 * non-zero. Therefore, compute its maximum possible value and 1517 * use that in PROF_ALLOC_PREP() to decide whether to capture a 1518 * backtrace. prof_realloc() will use the actual usize to 1519 * decide whether to sample. 1520 */ 1521 size_t max_usize = (alignment == 0) ? s2u(size+extra) : 1522 sa2u(size+extra, alignment); 1523 prof_ctx_t *old_ctx = prof_ctx_get(p); 1524 old_size = isalloc(p, true); 1525 if (config_valgrind && opt_valgrind) 1526 old_rzsize = p2rz(p); 1527 PROF_ALLOC_PREP(1, max_usize, cnt); 1528 if (cnt == NULL) 1529 goto label_oom; 1530 /* 1531 * Use minimum usize to determine whether promotion may happen. 1532 */ 1533 if (prof_promote && (uintptr_t)cnt != (uintptr_t)1U 1534 && ((alignment == 0) ? s2u(size) : sa2u(size, alignment)) 1535 <= SMALL_MAXCLASS) { 1536 q = irallocx(p, SMALL_MAXCLASS+1, (SMALL_MAXCLASS+1 >= 1537 size+extra) ? 0 : size+extra - (SMALL_MAXCLASS+1), 1538 alignment, zero, no_move, try_tcache_alloc, 1539 try_tcache_dalloc, arena); 1540 if (q == NULL) 1541 goto label_err; 1542 if (max_usize < PAGE) { 1543 usize = max_usize; 1544 arena_prof_promoted(q, usize); 1545 } else 1546 usize = isalloc(q, config_prof); 1547 } else { 1548 q = irallocx(p, size, extra, alignment, zero, no_move, 1549 try_tcache_alloc, try_tcache_dalloc, arena); 1550 if (q == NULL) 1551 goto label_err; 1552 usize = isalloc(q, config_prof); 1553 } 1554 prof_realloc(q, usize, cnt, old_size, old_ctx); 1555 if (rsize != NULL) 1556 *rsize = usize; 1557 } else { 1558 if (config_stats) { 1559 old_size = isalloc(p, false); 1560 if (config_valgrind && opt_valgrind) 1561 old_rzsize = u2rz(old_size); 1562 } else if (config_valgrind && opt_valgrind) { 1563 old_size = isalloc(p, false); 1564 old_rzsize = u2rz(old_size); 1565 } 1566 q = irallocx(p, size, extra, alignment, zero, no_move, 1567 try_tcache_alloc, try_tcache_dalloc, arena); 1568 if (q == NULL) 1569 goto label_err; 1570 if (config_stats) 1571 usize = isalloc(q, config_prof); 1572 if (rsize != NULL) { 1573 if (config_stats == false) 1574 usize = isalloc(q, config_prof); 1575 *rsize = usize; 1576 } 1577 } 1578 1579 *ptr = q; 1580 if (config_stats) { 1581 thread_allocated_t *ta; 1582 ta = thread_allocated_tsd_get(); 1583 ta->allocated += usize; 1584 ta->deallocated += old_size; 1585 } 1586 UTRACE(p, size, q); 1587 JEMALLOC_VALGRIND_REALLOC(q, usize, p, old_size, old_rzsize, zero); 1588 return (ALLOCM_SUCCESS); 1589 label_err: 1590 if (no_move) { 1591 UTRACE(p, size, q); 1592 return (ALLOCM_ERR_NOT_MOVED); 1593 } 1594 label_oom: 1595 if (config_xmalloc && opt_xmalloc) { 1596 malloc_write("<jemalloc>: Error in rallocm(): " 1597 "out of memory\n"); 1598 abort(); 1599 } 1600 UTRACE(p, size, 0); 1601 return (ALLOCM_ERR_OOM); 1602 } 1603 1604 int 1605 je_sallocm(const void *ptr, size_t *rsize, int flags) 1606 { 1607 size_t sz; 1608 1609 assert(malloc_initialized || IS_INITIALIZER); 1610 1611 if (config_ivsalloc) 1612 sz = ivsalloc(ptr, config_prof); 1613 else { 1614 assert(ptr != NULL); 1615 sz = isalloc(ptr, config_prof); 1616 } 1617 assert(rsize != NULL); 1618 *rsize = sz; 1619 1620 return (ALLOCM_SUCCESS); 1621 } 1622 1623 int 1624 je_dallocm(void *ptr, int flags) 1625 { 1626 size_t usize; 1627 size_t rzsize JEMALLOC_CC_SILENCE_INIT(0); 1628 unsigned arena_ind = ((unsigned)(flags >> 8)) - 1; 1629 bool try_tcache; 1630 1631 assert(ptr != NULL); 1632 assert(malloc_initialized || IS_INITIALIZER); 1633 1634 if (arena_ind != UINT_MAX) { 1635 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 1636 try_tcache = (chunk == ptr || chunk->arena != 1637 arenas[arena_ind]); 1638 } else 1639 try_tcache = true; 1640 1641 UTRACE(ptr, 0, 0); 1642 if (config_stats || config_valgrind) 1643 usize = isalloc(ptr, config_prof); 1644 if (config_prof && opt_prof) { 1645 if (config_stats == false && config_valgrind == false) 1646 usize = isalloc(ptr, config_prof); 1647 prof_free(ptr, usize); 1648 } 1649 if (config_stats) 1650 thread_allocated_tsd_get()->deallocated += usize; 1651 if (config_valgrind && opt_valgrind) 1652 rzsize = p2rz(ptr); 1653 iqallocx(ptr, try_tcache); 1654 JEMALLOC_VALGRIND_FREE(ptr, rzsize); 1655 1656 return (ALLOCM_SUCCESS); 1657 } 1658 1659 int 1660 je_nallocm(size_t *rsize, size_t size, int flags) 1661 { 1662 size_t usize; 1663 size_t alignment = (ZU(1) << (flags & ALLOCM_LG_ALIGN_MASK) 1664 & (SIZE_T_MAX-1)); 1665 1666 assert(size != 0); 1667 1668 if (malloc_init()) 1669 return (ALLOCM_ERR_OOM); 1670 1671 usize = (alignment == 0) ? s2u(size) : sa2u(size, alignment); 1672 if (usize == 0) 1673 return (ALLOCM_ERR_OOM); 1674 1675 if (rsize != NULL) 1676 *rsize = usize; 1677 return (ALLOCM_SUCCESS); 1678 } 1679 1680 #endif 1681 /* 1682 * End experimental functions. 1683 */ 1684 /******************************************************************************/ 1685 /* 1686 * The following functions are used by threading libraries for protection of 1687 * malloc during fork(). 1688 */ 1689 1690 /* 1691 * If an application creates a thread before doing any allocation in the main 1692 * thread, then calls fork(2) in the main thread followed by memory allocation 1693 * in the child process, a race can occur that results in deadlock within the 1694 * child: the main thread may have forked while the created thread had 1695 * partially initialized the allocator. Ordinarily jemalloc prevents 1696 * fork/malloc races via the following functions it registers during 1697 * initialization using pthread_atfork(), but of course that does no good if 1698 * the allocator isn't fully initialized at fork time. The following library 1699 * constructor is a partial solution to this problem. It may still possible to 1700 * trigger the deadlock described above, but doing so would involve forking via 1701 * a library constructor that runs before jemalloc's runs. 1702 */ 1703 JEMALLOC_ATTR(constructor) 1704 static void 1705 jemalloc_constructor(void) 1706 { 1707 1708 malloc_init(); 1709 } 1710 1711 #ifndef JEMALLOC_MUTEX_INIT_CB 1712 void 1713 jemalloc_prefork(void) 1714 #else 1715 JEMALLOC_EXPORT void 1716 _malloc_prefork(void) 1717 #endif 1718 { 1719 unsigned i; 1720 1721 #ifdef JEMALLOC_MUTEX_INIT_CB 1722 if (malloc_initialized == false) 1723 return; 1724 #endif 1725 assert(malloc_initialized); 1726 1727 /* Acquire all mutexes in a safe order. */ 1728 ctl_prefork(); 1729 malloc_mutex_prefork(&arenas_lock); 1730 for (i = 0; i < narenas_total; i++) { 1731 if (arenas[i] != NULL) 1732 arena_prefork(arenas[i]); 1733 } 1734 prof_prefork(); 1735 chunk_prefork(); 1736 base_prefork(); 1737 huge_prefork(); 1738 } 1739 1740 #ifndef JEMALLOC_MUTEX_INIT_CB 1741 void 1742 jemalloc_postfork_parent(void) 1743 #else 1744 JEMALLOC_EXPORT void 1745 _malloc_postfork(void) 1746 #endif 1747 { 1748 unsigned i; 1749 1750 #ifdef JEMALLOC_MUTEX_INIT_CB 1751 if (malloc_initialized == false) 1752 return; 1753 #endif 1754 assert(malloc_initialized); 1755 1756 /* Release all mutexes, now that fork() has completed. */ 1757 huge_postfork_parent(); 1758 base_postfork_parent(); 1759 chunk_postfork_parent(); 1760 prof_postfork_parent(); 1761 for (i = 0; i < narenas_total; i++) { 1762 if (arenas[i] != NULL) 1763 arena_postfork_parent(arenas[i]); 1764 } 1765 malloc_mutex_postfork_parent(&arenas_lock); 1766 ctl_postfork_parent(); 1767 } 1768 1769 void 1770 jemalloc_postfork_child(void) 1771 { 1772 unsigned i; 1773 1774 assert(malloc_initialized); 1775 1776 /* Release all mutexes, now that fork() has completed. */ 1777 huge_postfork_child(); 1778 base_postfork_child(); 1779 chunk_postfork_child(); 1780 prof_postfork_child(); 1781 for (i = 0; i < narenas_total; i++) { 1782 if (arenas[i] != NULL) 1783 arena_postfork_child(arenas[i]); 1784 } 1785 malloc_mutex_postfork_child(&arenas_lock); 1786 ctl_postfork_child(); 1787 } 1788 1789 /******************************************************************************/ 1790 /* 1791 * The following functions are used for TLS allocation/deallocation in static 1792 * binaries on FreeBSD. The primary difference between these and i[mcd]alloc() 1793 * is that these avoid accessing TLS variables. 1794 */ 1795 1796 static void * 1797 a0alloc(size_t size, bool zero) 1798 { 1799 1800 if (malloc_init()) 1801 return (NULL); 1802 1803 if (size == 0) 1804 size = 1; 1805 1806 if (size <= arena_maxclass) 1807 return (arena_malloc(arenas[0], size, zero, false)); 1808 else 1809 return (huge_malloc(size, zero)); 1810 } 1811 1812 void * 1813 a0malloc(size_t size) 1814 { 1815 1816 return (a0alloc(size, false)); 1817 } 1818 1819 void * 1820 a0calloc(size_t num, size_t size) 1821 { 1822 1823 return (a0alloc(num * size, true)); 1824 } 1825 1826 void 1827 a0free(void *ptr) 1828 { 1829 arena_chunk_t *chunk; 1830 1831 if (ptr == NULL) 1832 return; 1833 1834 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 1835 if (chunk != ptr) 1836 arena_dalloc(chunk->arena, chunk, ptr, false); 1837 else 1838 huge_dalloc(ptr, true); 1839 } 1840 1841 /******************************************************************************/ 1842