1 #define JEMALLOC_TCACHE_C_ 2 #include "jemalloc/internal/jemalloc_preamble.h" 3 #include "jemalloc/internal/jemalloc_internal_includes.h" 4 5 #include "jemalloc/internal/assert.h" 6 #include "jemalloc/internal/mutex.h" 7 #include "jemalloc/internal/size_classes.h" 8 9 /******************************************************************************/ 10 /* Data. */ 11 12 bool opt_tcache = true; 13 ssize_t opt_lg_tcache_max = LG_TCACHE_MAXCLASS_DEFAULT; 14 15 tcache_bin_info_t *tcache_bin_info; 16 static unsigned stack_nelms; /* Total stack elms per tcache. */ 17 18 unsigned nhbins; 19 size_t tcache_maxclass; 20 21 tcaches_t *tcaches; 22 23 /* Index of first element within tcaches that has never been used. */ 24 static unsigned tcaches_past; 25 26 /* Head of singly linked list tracking available tcaches elements. */ 27 static tcaches_t *tcaches_avail; 28 29 /* Protects tcaches{,_past,_avail}. */ 30 static malloc_mutex_t tcaches_mtx; 31 32 /******************************************************************************/ 33 34 size_t 35 tcache_salloc(tsdn_t *tsdn, const void *ptr) { 36 return arena_salloc(tsdn, ptr); 37 } 38 39 void 40 tcache_event_hard(tsd_t *tsd, tcache_t *tcache) { 41 szind_t binind = tcache->next_gc_bin; 42 43 tcache_bin_t *tbin; 44 if (binind < NBINS) { 45 tbin = tcache_small_bin_get(tcache, binind); 46 } else { 47 tbin = tcache_large_bin_get(tcache, binind); 48 } 49 if (tbin->low_water > 0) { 50 /* 51 * Flush (ceiling) 3/4 of the objects below the low water mark. 52 */ 53 if (binind < NBINS) { 54 tcache_bin_flush_small(tsd, tcache, tbin, binind, 55 tbin->ncached - tbin->low_water + (tbin->low_water 56 >> 2)); 57 /* 58 * Reduce fill count by 2X. Limit lg_fill_div such that 59 * the fill count is always at least 1. 60 */ 61 tcache_bin_info_t *tbin_info = &tcache_bin_info[binind]; 62 if ((tbin_info->ncached_max >> 63 (tcache->lg_fill_div[binind] + 1)) >= 1) { 64 tcache->lg_fill_div[binind]++; 65 } 66 } else { 67 tcache_bin_flush_large(tsd, tbin, binind, tbin->ncached 68 - tbin->low_water + (tbin->low_water >> 2), tcache); 69 } 70 } else if (tbin->low_water < 0) { 71 /* 72 * Increase fill count by 2X for small bins. Make sure 73 * lg_fill_div stays greater than 0. 74 */ 75 if (binind < NBINS && tcache->lg_fill_div[binind] > 1) { 76 tcache->lg_fill_div[binind]--; 77 } 78 } 79 tbin->low_water = tbin->ncached; 80 81 tcache->next_gc_bin++; 82 if (tcache->next_gc_bin == nhbins) { 83 tcache->next_gc_bin = 0; 84 } 85 } 86 87 void * 88 tcache_alloc_small_hard(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, 89 tcache_bin_t *tbin, szind_t binind, bool *tcache_success) { 90 void *ret; 91 92 assert(tcache->arena != NULL); 93 arena_tcache_fill_small(tsdn, arena, tcache, tbin, binind, 94 config_prof ? tcache->prof_accumbytes : 0); 95 if (config_prof) { 96 tcache->prof_accumbytes = 0; 97 } 98 ret = tcache_alloc_easy(tbin, tcache_success); 99 100 return ret; 101 } 102 103 void 104 tcache_bin_flush_small(tsd_t *tsd, tcache_t *tcache, tcache_bin_t *tbin, 105 szind_t binind, unsigned rem) { 106 bool merged_stats = false; 107 108 assert(binind < NBINS); 109 assert(rem <= tbin->ncached); 110 111 arena_t *arena = tcache->arena; 112 assert(arena != NULL); 113 unsigned nflush = tbin->ncached - rem; 114 VARIABLE_ARRAY(extent_t *, item_extent, nflush); 115 /* Look up extent once per item. */ 116 for (unsigned i = 0 ; i < nflush; i++) { 117 item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i)); 118 } 119 120 while (nflush > 0) { 121 /* Lock the arena bin associated with the first object. */ 122 extent_t *extent = item_extent[0]; 123 arena_t *bin_arena = extent_arena_get(extent); 124 arena_bin_t *bin = &bin_arena->bins[binind]; 125 126 if (config_prof && bin_arena == arena) { 127 if (arena_prof_accum(tsd_tsdn(tsd), arena, 128 tcache->prof_accumbytes)) { 129 prof_idump(tsd_tsdn(tsd)); 130 } 131 tcache->prof_accumbytes = 0; 132 } 133 134 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); 135 if (config_stats && bin_arena == arena) { 136 assert(!merged_stats); 137 merged_stats = true; 138 bin->stats.nflushes++; 139 bin->stats.nrequests += tbin->tstats.nrequests; 140 tbin->tstats.nrequests = 0; 141 } 142 unsigned ndeferred = 0; 143 for (unsigned i = 0; i < nflush; i++) { 144 void *ptr = *(tbin->avail - 1 - i); 145 extent = item_extent[i]; 146 assert(ptr != NULL && extent != NULL); 147 148 if (extent_arena_get(extent) == bin_arena) { 149 arena_dalloc_bin_junked_locked(tsd_tsdn(tsd), 150 bin_arena, extent, ptr); 151 } else { 152 /* 153 * This object was allocated via a different 154 * arena bin than the one that is currently 155 * locked. Stash the object, so that it can be 156 * handled in a future pass. 157 */ 158 *(tbin->avail - 1 - ndeferred) = ptr; 159 item_extent[ndeferred] = extent; 160 ndeferred++; 161 } 162 } 163 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); 164 arena_decay_ticks(tsd_tsdn(tsd), bin_arena, nflush - ndeferred); 165 nflush = ndeferred; 166 } 167 if (config_stats && !merged_stats) { 168 /* 169 * The flush loop didn't happen to flush to this thread's 170 * arena, so the stats didn't get merged. Manually do so now. 171 */ 172 arena_bin_t *bin = &arena->bins[binind]; 173 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); 174 bin->stats.nflushes++; 175 bin->stats.nrequests += tbin->tstats.nrequests; 176 tbin->tstats.nrequests = 0; 177 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); 178 } 179 180 memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * 181 sizeof(void *)); 182 tbin->ncached = rem; 183 if ((low_water_t)tbin->ncached < tbin->low_water) { 184 tbin->low_water = tbin->ncached; 185 } 186 } 187 188 void 189 tcache_bin_flush_large(tsd_t *tsd, tcache_bin_t *tbin, szind_t binind, 190 unsigned rem, tcache_t *tcache) { 191 bool merged_stats = false; 192 193 assert(binind < nhbins); 194 assert(rem <= tbin->ncached); 195 196 arena_t *arena = tcache->arena; 197 assert(arena != NULL); 198 unsigned nflush = tbin->ncached - rem; 199 VARIABLE_ARRAY(extent_t *, item_extent, nflush); 200 /* Look up extent once per item. */ 201 for (unsigned i = 0 ; i < nflush; i++) { 202 item_extent[i] = iealloc(tsd_tsdn(tsd), *(tbin->avail - 1 - i)); 203 } 204 205 while (nflush > 0) { 206 /* Lock the arena associated with the first object. */ 207 extent_t *extent = item_extent[0]; 208 arena_t *locked_arena = extent_arena_get(extent); 209 UNUSED bool idump; 210 211 if (config_prof) { 212 idump = false; 213 } 214 215 malloc_mutex_lock(tsd_tsdn(tsd), &locked_arena->large_mtx); 216 for (unsigned i = 0; i < nflush; i++) { 217 void *ptr = *(tbin->avail - 1 - i); 218 assert(ptr != NULL); 219 extent = item_extent[i]; 220 if (extent_arena_get(extent) == locked_arena) { 221 large_dalloc_prep_junked_locked(tsd_tsdn(tsd), 222 extent); 223 } 224 } 225 if ((config_prof || config_stats) && locked_arena == arena) { 226 if (config_prof) { 227 idump = arena_prof_accum(tsd_tsdn(tsd), arena, 228 tcache->prof_accumbytes); 229 tcache->prof_accumbytes = 0; 230 } 231 if (config_stats) { 232 merged_stats = true; 233 arena_stats_large_nrequests_add(tsd_tsdn(tsd), 234 &arena->stats, binind, 235 tbin->tstats.nrequests); 236 tbin->tstats.nrequests = 0; 237 } 238 } 239 malloc_mutex_unlock(tsd_tsdn(tsd), &locked_arena->large_mtx); 240 241 unsigned ndeferred = 0; 242 for (unsigned i = 0; i < nflush; i++) { 243 void *ptr = *(tbin->avail - 1 - i); 244 extent = item_extent[i]; 245 assert(ptr != NULL && extent != NULL); 246 247 if (extent_arena_get(extent) == locked_arena) { 248 large_dalloc_finish(tsd_tsdn(tsd), extent); 249 } else { 250 /* 251 * This object was allocated via a different 252 * arena than the one that is currently locked. 253 * Stash the object, so that it can be handled 254 * in a future pass. 255 */ 256 *(tbin->avail - 1 - ndeferred) = ptr; 257 item_extent[ndeferred] = extent; 258 ndeferred++; 259 } 260 } 261 if (config_prof && idump) { 262 prof_idump(tsd_tsdn(tsd)); 263 } 264 arena_decay_ticks(tsd_tsdn(tsd), locked_arena, nflush - 265 ndeferred); 266 nflush = ndeferred; 267 } 268 if (config_stats && !merged_stats) { 269 /* 270 * The flush loop didn't happen to flush to this thread's 271 * arena, so the stats didn't get merged. Manually do so now. 272 */ 273 arena_stats_large_nrequests_add(tsd_tsdn(tsd), &arena->stats, 274 binind, tbin->tstats.nrequests); 275 tbin->tstats.nrequests = 0; 276 } 277 278 memmove(tbin->avail - rem, tbin->avail - tbin->ncached, rem * 279 sizeof(void *)); 280 tbin->ncached = rem; 281 if ((low_water_t)tbin->ncached < tbin->low_water) { 282 tbin->low_water = tbin->ncached; 283 } 284 } 285 286 void 287 tcache_arena_associate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { 288 assert(tcache->arena == NULL); 289 tcache->arena = arena; 290 291 if (config_stats) { 292 /* Link into list of extant tcaches. */ 293 malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); 294 ql_elm_new(tcache, link); 295 ql_tail_insert(&arena->tcache_ql, tcache, link); 296 malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); 297 } 298 } 299 300 static void 301 tcache_arena_dissociate(tsdn_t *tsdn, tcache_t *tcache) { 302 arena_t *arena = tcache->arena; 303 assert(arena != NULL); 304 if (config_stats) { 305 /* Unlink from list of extant tcaches. */ 306 malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); 307 if (config_debug) { 308 bool in_ql = false; 309 tcache_t *iter; 310 ql_foreach(iter, &arena->tcache_ql, link) { 311 if (iter == tcache) { 312 in_ql = true; 313 break; 314 } 315 } 316 assert(in_ql); 317 } 318 ql_remove(&arena->tcache_ql, tcache, link); 319 tcache_stats_merge(tsdn, tcache, arena); 320 malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); 321 } 322 tcache->arena = NULL; 323 } 324 325 void 326 tcache_arena_reassociate(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { 327 tcache_arena_dissociate(tsdn, tcache); 328 tcache_arena_associate(tsdn, tcache, arena); 329 } 330 331 bool 332 tsd_tcache_enabled_data_init(tsd_t *tsd) { 333 /* Called upon tsd initialization. */ 334 tsd_tcache_enabled_set(tsd, opt_tcache); 335 tsd_slow_update(tsd); 336 337 if (opt_tcache) { 338 /* Trigger tcache init. */ 339 tsd_tcache_data_init(tsd); 340 } 341 342 return false; 343 } 344 345 /* Initialize auto tcache (embedded in TSD). */ 346 static void 347 tcache_init(tsd_t *tsd, tcache_t *tcache, void *avail_stack) { 348 memset(&tcache->link, 0, sizeof(ql_elm(tcache_t))); 349 tcache->prof_accumbytes = 0; 350 tcache->next_gc_bin = 0; 351 tcache->arena = NULL; 352 353 ticker_init(&tcache->gc_ticker, TCACHE_GC_INCR); 354 355 size_t stack_offset = 0; 356 assert((TCACHE_NSLOTS_SMALL_MAX & 1U) == 0); 357 memset(tcache->tbins_small, 0, sizeof(tcache_bin_t) * NBINS); 358 memset(tcache->tbins_large, 0, sizeof(tcache_bin_t) * (nhbins - NBINS)); 359 unsigned i = 0; 360 for (; i < NBINS; i++) { 361 tcache->lg_fill_div[i] = 1; 362 stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); 363 /* 364 * avail points past the available space. Allocations will 365 * access the slots toward higher addresses (for the benefit of 366 * prefetch). 367 */ 368 tcache_small_bin_get(tcache, i)->avail = 369 (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset); 370 } 371 for (; i < nhbins; i++) { 372 stack_offset += tcache_bin_info[i].ncached_max * sizeof(void *); 373 tcache_large_bin_get(tcache, i)->avail = 374 (void **)((uintptr_t)avail_stack + (uintptr_t)stack_offset); 375 } 376 assert(stack_offset == stack_nelms * sizeof(void *)); 377 } 378 379 /* Initialize auto tcache (embedded in TSD). */ 380 bool 381 tsd_tcache_data_init(tsd_t *tsd) { 382 tcache_t *tcache = tsd_tcachep_get_unsafe(tsd); 383 assert(tcache_small_bin_get(tcache, 0)->avail == NULL); 384 size_t size = stack_nelms * sizeof(void *); 385 /* Avoid false cacheline sharing. */ 386 size = sz_sa2u(size, CACHELINE); 387 388 void *avail_array = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, 389 NULL, true, arena_get(TSDN_NULL, 0, true)); 390 if (avail_array == NULL) { 391 return true; 392 } 393 394 tcache_init(tsd, tcache, avail_array); 395 /* 396 * Initialization is a bit tricky here. After malloc init is done, all 397 * threads can rely on arena_choose and associate tcache accordingly. 398 * However, the thread that does actual malloc bootstrapping relies on 399 * functional tsd, and it can only rely on a0. In that case, we 400 * associate its tcache to a0 temporarily, and later on 401 * arena_choose_hard() will re-associate properly. 402 */ 403 tcache->arena = NULL; 404 arena_t *arena; 405 if (!malloc_initialized()) { 406 /* If in initialization, assign to a0. */ 407 arena = arena_get(tsd_tsdn(tsd), 0, false); 408 tcache_arena_associate(tsd_tsdn(tsd), tcache, arena); 409 } else { 410 arena = arena_choose(tsd, NULL); 411 /* This may happen if thread.tcache.enabled is used. */ 412 if (tcache->arena == NULL) { 413 tcache_arena_associate(tsd_tsdn(tsd), tcache, arena); 414 } 415 } 416 assert(arena == tcache->arena); 417 418 return false; 419 } 420 421 /* Created manual tcache for tcache.create mallctl. */ 422 tcache_t * 423 tcache_create_explicit(tsd_t *tsd) { 424 tcache_t *tcache; 425 size_t size, stack_offset; 426 427 size = sizeof(tcache_t); 428 /* Naturally align the pointer stacks. */ 429 size = PTR_CEILING(size); 430 stack_offset = size; 431 size += stack_nelms * sizeof(void *); 432 /* Avoid false cacheline sharing. */ 433 size = sz_sa2u(size, CACHELINE); 434 435 tcache = ipallocztm(tsd_tsdn(tsd), size, CACHELINE, true, NULL, true, 436 arena_get(TSDN_NULL, 0, true)); 437 if (tcache == NULL) { 438 return NULL; 439 } 440 441 tcache_init(tsd, tcache, 442 (void *)((uintptr_t)tcache + (uintptr_t)stack_offset)); 443 tcache_arena_associate(tsd_tsdn(tsd), tcache, arena_ichoose(tsd, NULL)); 444 445 return tcache; 446 } 447 448 static void 449 tcache_flush_cache(tsd_t *tsd, tcache_t *tcache) { 450 assert(tcache->arena != NULL); 451 452 for (unsigned i = 0; i < NBINS; i++) { 453 tcache_bin_t *tbin = tcache_small_bin_get(tcache, i); 454 tcache_bin_flush_small(tsd, tcache, tbin, i, 0); 455 456 if (config_stats) { 457 assert(tbin->tstats.nrequests == 0); 458 } 459 } 460 for (unsigned i = NBINS; i < nhbins; i++) { 461 tcache_bin_t *tbin = tcache_large_bin_get(tcache, i); 462 tcache_bin_flush_large(tsd, tbin, i, 0, tcache); 463 464 if (config_stats) { 465 assert(tbin->tstats.nrequests == 0); 466 } 467 } 468 469 if (config_prof && tcache->prof_accumbytes > 0 && 470 arena_prof_accum(tsd_tsdn(tsd), tcache->arena, 471 tcache->prof_accumbytes)) { 472 prof_idump(tsd_tsdn(tsd)); 473 } 474 } 475 476 void 477 tcache_flush(tsd_t *tsd) { 478 assert(tcache_available(tsd)); 479 tcache_flush_cache(tsd, tsd_tcachep_get(tsd)); 480 } 481 482 static void 483 tcache_destroy(tsd_t *tsd, tcache_t *tcache, bool tsd_tcache) { 484 tcache_flush_cache(tsd, tcache); 485 tcache_arena_dissociate(tsd_tsdn(tsd), tcache); 486 487 if (tsd_tcache) { 488 /* Release the avail array for the TSD embedded auto tcache. */ 489 void *avail_array = 490 (void *)((uintptr_t)tcache_small_bin_get(tcache, 0)->avail - 491 (uintptr_t)tcache_bin_info[0].ncached_max * sizeof(void *)); 492 idalloctm(tsd_tsdn(tsd), avail_array, NULL, NULL, true, true); 493 } else { 494 /* Release both the tcache struct and avail array. */ 495 idalloctm(tsd_tsdn(tsd), tcache, NULL, NULL, true, true); 496 } 497 } 498 499 /* For auto tcache (embedded in TSD) only. */ 500 void 501 tcache_cleanup(tsd_t *tsd) { 502 tcache_t *tcache = tsd_tcachep_get(tsd); 503 if (!tcache_available(tsd)) { 504 assert(tsd_tcache_enabled_get(tsd) == false); 505 if (config_debug) { 506 assert(tcache_small_bin_get(tcache, 0)->avail == NULL); 507 } 508 return; 509 } 510 assert(tsd_tcache_enabled_get(tsd)); 511 assert(tcache_small_bin_get(tcache, 0)->avail != NULL); 512 513 tcache_destroy(tsd, tcache, true); 514 if (config_debug) { 515 tcache_small_bin_get(tcache, 0)->avail = NULL; 516 } 517 } 518 519 void 520 tcache_stats_merge(tsdn_t *tsdn, tcache_t *tcache, arena_t *arena) { 521 unsigned i; 522 523 cassert(config_stats); 524 525 /* Merge and reset tcache stats. */ 526 for (i = 0; i < NBINS; i++) { 527 arena_bin_t *bin = &arena->bins[i]; 528 tcache_bin_t *tbin = tcache_small_bin_get(tcache, i); 529 malloc_mutex_lock(tsdn, &bin->lock); 530 bin->stats.nrequests += tbin->tstats.nrequests; 531 malloc_mutex_unlock(tsdn, &bin->lock); 532 tbin->tstats.nrequests = 0; 533 } 534 535 for (; i < nhbins; i++) { 536 tcache_bin_t *tbin = tcache_large_bin_get(tcache, i); 537 arena_stats_large_nrequests_add(tsdn, &arena->stats, i, 538 tbin->tstats.nrequests); 539 tbin->tstats.nrequests = 0; 540 } 541 } 542 543 static bool 544 tcaches_create_prep(tsd_t *tsd) { 545 bool err; 546 547 malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); 548 549 if (tcaches == NULL) { 550 tcaches = base_alloc(tsd_tsdn(tsd), b0get(), sizeof(tcache_t *) 551 * (MALLOCX_TCACHE_MAX+1), CACHELINE); 552 if (tcaches == NULL) { 553 err = true; 554 goto label_return; 555 } 556 } 557 558 if (tcaches_avail == NULL && tcaches_past > MALLOCX_TCACHE_MAX) { 559 err = true; 560 goto label_return; 561 } 562 563 err = false; 564 label_return: 565 malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); 566 return err; 567 } 568 569 bool 570 tcaches_create(tsd_t *tsd, unsigned *r_ind) { 571 witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0); 572 573 bool err; 574 575 if (tcaches_create_prep(tsd)) { 576 err = true; 577 goto label_return; 578 } 579 580 tcache_t *tcache = tcache_create_explicit(tsd); 581 if (tcache == NULL) { 582 err = true; 583 goto label_return; 584 } 585 586 tcaches_t *elm; 587 malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); 588 if (tcaches_avail != NULL) { 589 elm = tcaches_avail; 590 tcaches_avail = tcaches_avail->next; 591 elm->tcache = tcache; 592 *r_ind = (unsigned)(elm - tcaches); 593 } else { 594 elm = &tcaches[tcaches_past]; 595 elm->tcache = tcache; 596 *r_ind = tcaches_past; 597 tcaches_past++; 598 } 599 malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); 600 601 err = false; 602 label_return: 603 witness_assert_depth(tsdn_witness_tsdp_get(tsd_tsdn(tsd)), 0); 604 return err; 605 } 606 607 static tcache_t * 608 tcaches_elm_remove(tsd_t *tsd, tcaches_t *elm) { 609 malloc_mutex_assert_owner(tsd_tsdn(tsd), &tcaches_mtx); 610 611 if (elm->tcache == NULL) { 612 return NULL; 613 } 614 tcache_t *tcache = elm->tcache; 615 elm->tcache = NULL; 616 return tcache; 617 } 618 619 void 620 tcaches_flush(tsd_t *tsd, unsigned ind) { 621 malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); 622 tcache_t *tcache = tcaches_elm_remove(tsd, &tcaches[ind]); 623 malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); 624 if (tcache != NULL) { 625 tcache_destroy(tsd, tcache, false); 626 } 627 } 628 629 void 630 tcaches_destroy(tsd_t *tsd, unsigned ind) { 631 malloc_mutex_lock(tsd_tsdn(tsd), &tcaches_mtx); 632 tcaches_t *elm = &tcaches[ind]; 633 tcache_t *tcache = tcaches_elm_remove(tsd, elm); 634 elm->next = tcaches_avail; 635 tcaches_avail = elm; 636 malloc_mutex_unlock(tsd_tsdn(tsd), &tcaches_mtx); 637 if (tcache != NULL) { 638 tcache_destroy(tsd, tcache, false); 639 } 640 } 641 642 bool 643 tcache_boot(tsdn_t *tsdn) { 644 /* If necessary, clamp opt_lg_tcache_max. */ 645 if (opt_lg_tcache_max < 0 || (ZU(1) << opt_lg_tcache_max) < 646 SMALL_MAXCLASS) { 647 tcache_maxclass = SMALL_MAXCLASS; 648 } else { 649 tcache_maxclass = (ZU(1) << opt_lg_tcache_max); 650 } 651 652 if (malloc_mutex_init(&tcaches_mtx, "tcaches", WITNESS_RANK_TCACHES, 653 malloc_mutex_rank_exclusive)) { 654 return true; 655 } 656 657 nhbins = sz_size2index(tcache_maxclass) + 1; 658 659 /* Initialize tcache_bin_info. */ 660 tcache_bin_info = (tcache_bin_info_t *)base_alloc(tsdn, b0get(), nhbins 661 * sizeof(tcache_bin_info_t), CACHELINE); 662 if (tcache_bin_info == NULL) { 663 return true; 664 } 665 stack_nelms = 0; 666 unsigned i; 667 for (i = 0; i < NBINS; i++) { 668 if ((arena_bin_info[i].nregs << 1) <= TCACHE_NSLOTS_SMALL_MIN) { 669 tcache_bin_info[i].ncached_max = 670 TCACHE_NSLOTS_SMALL_MIN; 671 } else if ((arena_bin_info[i].nregs << 1) <= 672 TCACHE_NSLOTS_SMALL_MAX) { 673 tcache_bin_info[i].ncached_max = 674 (arena_bin_info[i].nregs << 1); 675 } else { 676 tcache_bin_info[i].ncached_max = 677 TCACHE_NSLOTS_SMALL_MAX; 678 } 679 stack_nelms += tcache_bin_info[i].ncached_max; 680 } 681 for (; i < nhbins; i++) { 682 tcache_bin_info[i].ncached_max = TCACHE_NSLOTS_LARGE; 683 stack_nelms += tcache_bin_info[i].ncached_max; 684 } 685 686 return false; 687 } 688 689 void 690 tcache_prefork(tsdn_t *tsdn) { 691 if (!config_prof && opt_tcache) { 692 malloc_mutex_prefork(tsdn, &tcaches_mtx); 693 } 694 } 695 696 void 697 tcache_postfork_parent(tsdn_t *tsdn) { 698 if (!config_prof && opt_tcache) { 699 malloc_mutex_postfork_parent(tsdn, &tcaches_mtx); 700 } 701 } 702 703 void 704 tcache_postfork_child(tsdn_t *tsdn) { 705 if (!config_prof && opt_tcache) { 706 malloc_mutex_postfork_child(tsdn, &tcaches_mtx); 707 } 708 } 709