1 #define JEMALLOC_ARENA_C_ 2 #include "jemalloc/internal/jemalloc_preamble.h" 3 #include "jemalloc/internal/jemalloc_internal_includes.h" 4 5 #include "jemalloc/internal/assert.h" 6 #include "jemalloc/internal/extent_dss.h" 7 #include "jemalloc/internal/extent_mmap.h" 8 #include "jemalloc/internal/mutex.h" 9 #include "jemalloc/internal/rtree.h" 10 #include "jemalloc/internal/size_classes.h" 11 #include "jemalloc/internal/util.h" 12 13 /******************************************************************************/ 14 /* Data. */ 15 16 /* 17 * Define names for both unininitialized and initialized phases, so that 18 * options and mallctl processing are straightforward. 19 */ 20 const char *percpu_arena_mode_names[] = { 21 "percpu", 22 "phycpu", 23 "disabled", 24 "percpu", 25 "phycpu" 26 }; 27 percpu_arena_mode_t opt_percpu_arena = PERCPU_ARENA_DEFAULT; 28 29 ssize_t opt_dirty_decay_ms = DIRTY_DECAY_MS_DEFAULT; 30 ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT; 31 32 static atomic_zd_t dirty_decay_ms_default; 33 static atomic_zd_t muzzy_decay_ms_default; 34 35 const arena_bin_info_t arena_bin_info[NBINS] = { 36 #define BIN_INFO_bin_yes(reg_size, slab_size, nregs) \ 37 {reg_size, slab_size, nregs, BITMAP_INFO_INITIALIZER(nregs)}, 38 #define BIN_INFO_bin_no(reg_size, slab_size, nregs) 39 #define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \ 40 lg_delta_lookup) \ 41 BIN_INFO_bin_##bin((1U<<lg_grp) + (ndelta<<lg_delta), \ 42 (pgs << LG_PAGE), (pgs << LG_PAGE) / ((1U<<lg_grp) + \ 43 (ndelta<<lg_delta))) 44 SIZE_CLASSES 45 #undef BIN_INFO_bin_yes 46 #undef BIN_INFO_bin_no 47 #undef SC 48 }; 49 50 const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = { 51 #define STEP(step, h, x, y) \ 52 h, 53 SMOOTHSTEP 54 #undef STEP 55 }; 56 57 /******************************************************************************/ 58 /* 59 * Function prototypes for static functions that are referenced prior to 60 * definition. 61 */ 62 63 static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, 64 arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit, 65 bool is_background_thread); 66 static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, 67 bool is_background_thread, bool all); 68 static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, 69 arena_bin_t *bin); 70 static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, 71 arena_bin_t *bin); 72 73 /******************************************************************************/ 74 75 static bool 76 arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) { 77 if (config_debug) { 78 for (size_t i = 0; i < sizeof(arena_stats_t); i++) { 79 assert(((char *)arena_stats)[i] == 0); 80 } 81 } 82 #ifndef JEMALLOC_ATOMIC_U64 83 if (malloc_mutex_init(&arena_stats->mtx, "arena_stats", 84 WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) { 85 return true; 86 } 87 #endif 88 /* Memory is zeroed, so there is no need to clear stats. */ 89 return false; 90 } 91 92 static void 93 arena_stats_lock(tsdn_t *tsdn, arena_stats_t *arena_stats) { 94 #ifndef JEMALLOC_ATOMIC_U64 95 malloc_mutex_lock(tsdn, &arena_stats->mtx); 96 #endif 97 } 98 99 static void 100 arena_stats_unlock(tsdn_t *tsdn, arena_stats_t *arena_stats) { 101 #ifndef JEMALLOC_ATOMIC_U64 102 malloc_mutex_unlock(tsdn, &arena_stats->mtx); 103 #endif 104 } 105 106 static uint64_t 107 arena_stats_read_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, 108 arena_stats_u64_t *p) { 109 #ifdef JEMALLOC_ATOMIC_U64 110 return atomic_load_u64(p, ATOMIC_RELAXED); 111 #else 112 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); 113 return *p; 114 #endif 115 } 116 117 static void 118 arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, 119 arena_stats_u64_t *p, uint64_t x) { 120 #ifdef JEMALLOC_ATOMIC_U64 121 atomic_fetch_add_u64(p, x, ATOMIC_RELAXED); 122 #else 123 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); 124 *p += x; 125 #endif 126 } 127 128 UNUSED static void 129 arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, 130 arena_stats_u64_t *p, uint64_t x) { 131 #ifdef JEMALLOC_ATOMIC_U64 132 UNUSED uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED); 133 assert(r - x <= r); 134 #else 135 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); 136 *p -= x; 137 assert(*p + x >= *p); 138 #endif 139 } 140 141 /* 142 * Non-atomically sets *dst += src. *dst needs external synchronization. 143 * This lets us avoid the cost of a fetch_add when its unnecessary (note that 144 * the types here are atomic). 145 */ 146 static void 147 arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) { 148 #ifdef JEMALLOC_ATOMIC_U64 149 uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED); 150 atomic_store_u64(dst, src + cur_dst, ATOMIC_RELAXED); 151 #else 152 *dst += src; 153 #endif 154 } 155 156 static size_t 157 arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p) { 158 #ifdef JEMALLOC_ATOMIC_U64 159 return atomic_load_zu(p, ATOMIC_RELAXED); 160 #else 161 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); 162 return atomic_load_zu(p, ATOMIC_RELAXED); 163 #endif 164 } 165 166 static void 167 arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p, 168 size_t x) { 169 #ifdef JEMALLOC_ATOMIC_U64 170 atomic_fetch_add_zu(p, x, ATOMIC_RELAXED); 171 #else 172 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); 173 size_t cur = atomic_load_zu(p, ATOMIC_RELAXED); 174 atomic_store_zu(p, cur + x, ATOMIC_RELAXED); 175 #endif 176 } 177 178 static void 179 arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p, 180 size_t x) { 181 #ifdef JEMALLOC_ATOMIC_U64 182 UNUSED size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED); 183 assert(r - x <= r); 184 #else 185 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); 186 size_t cur = atomic_load_zu(p, ATOMIC_RELAXED); 187 atomic_store_zu(p, cur - x, ATOMIC_RELAXED); 188 #endif 189 } 190 191 /* Like the _u64 variant, needs an externally synchronized *dst. */ 192 static void 193 arena_stats_accum_zu(atomic_zu_t *dst, size_t src) { 194 size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED); 195 atomic_store_zu(dst, src + cur_dst, ATOMIC_RELAXED); 196 } 197 198 void 199 arena_stats_large_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats, 200 szind_t szind, uint64_t nrequests) { 201 arena_stats_lock(tsdn, arena_stats); 202 arena_stats_add_u64(tsdn, arena_stats, &arena_stats->lstats[szind - 203 NBINS].nrequests, nrequests); 204 arena_stats_unlock(tsdn, arena_stats); 205 } 206 207 void 208 arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) { 209 arena_stats_lock(tsdn, arena_stats); 210 arena_stats_add_zu(tsdn, arena_stats, &arena_stats->mapped, size); 211 arena_stats_unlock(tsdn, arena_stats); 212 } 213 214 void 215 arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, 216 const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, 217 size_t *nactive, size_t *ndirty, size_t *nmuzzy) { 218 *nthreads += arena_nthreads_get(arena, false); 219 *dss = dss_prec_names[arena_dss_prec_get(arena)]; 220 *dirty_decay_ms = arena_dirty_decay_ms_get(arena); 221 *muzzy_decay_ms = arena_muzzy_decay_ms_get(arena); 222 *nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED); 223 *ndirty += extents_npages_get(&arena->extents_dirty); 224 *nmuzzy += extents_npages_get(&arena->extents_muzzy); 225 } 226 227 void 228 arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, 229 const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, 230 size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats, 231 malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats) { 232 cassert(config_stats); 233 234 arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms, 235 muzzy_decay_ms, nactive, ndirty, nmuzzy); 236 237 size_t base_allocated, base_resident, base_mapped; 238 base_stats_get(tsdn, arena->base, &base_allocated, &base_resident, 239 &base_mapped); 240 241 arena_stats_lock(tsdn, &arena->stats); 242 243 arena_stats_accum_zu(&astats->mapped, base_mapped 244 + arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped)); 245 arena_stats_accum_zu(&astats->retained, 246 extents_npages_get(&arena->extents_retained) << LG_PAGE); 247 248 arena_stats_accum_u64(&astats->decay_dirty.npurge, 249 arena_stats_read_u64(tsdn, &arena->stats, 250 &arena->stats.decay_dirty.npurge)); 251 arena_stats_accum_u64(&astats->decay_dirty.nmadvise, 252 arena_stats_read_u64(tsdn, &arena->stats, 253 &arena->stats.decay_dirty.nmadvise)); 254 arena_stats_accum_u64(&astats->decay_dirty.purged, 255 arena_stats_read_u64(tsdn, &arena->stats, 256 &arena->stats.decay_dirty.purged)); 257 258 arena_stats_accum_u64(&astats->decay_muzzy.npurge, 259 arena_stats_read_u64(tsdn, &arena->stats, 260 &arena->stats.decay_muzzy.npurge)); 261 arena_stats_accum_u64(&astats->decay_muzzy.nmadvise, 262 arena_stats_read_u64(tsdn, &arena->stats, 263 &arena->stats.decay_muzzy.nmadvise)); 264 arena_stats_accum_u64(&astats->decay_muzzy.purged, 265 arena_stats_read_u64(tsdn, &arena->stats, 266 &arena->stats.decay_muzzy.purged)); 267 268 arena_stats_accum_zu(&astats->base, base_allocated); 269 arena_stats_accum_zu(&astats->internal, arena_internal_get(arena)); 270 arena_stats_accum_zu(&astats->resident, base_resident + 271 (((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) + 272 extents_npages_get(&arena->extents_dirty) + 273 extents_npages_get(&arena->extents_muzzy)) << LG_PAGE))); 274 275 for (szind_t i = 0; i < NSIZES - NBINS; i++) { 276 uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats, 277 &arena->stats.lstats[i].nmalloc); 278 arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc); 279 arena_stats_accum_u64(&astats->nmalloc_large, nmalloc); 280 281 uint64_t ndalloc = arena_stats_read_u64(tsdn, &arena->stats, 282 &arena->stats.lstats[i].ndalloc); 283 arena_stats_accum_u64(&lstats[i].ndalloc, ndalloc); 284 arena_stats_accum_u64(&astats->ndalloc_large, ndalloc); 285 286 uint64_t nrequests = arena_stats_read_u64(tsdn, &arena->stats, 287 &arena->stats.lstats[i].nrequests); 288 arena_stats_accum_u64(&lstats[i].nrequests, 289 nmalloc + nrequests); 290 arena_stats_accum_u64(&astats->nrequests_large, 291 nmalloc + nrequests); 292 293 assert(nmalloc >= ndalloc); 294 assert(nmalloc - ndalloc <= SIZE_T_MAX); 295 size_t curlextents = (size_t)(nmalloc - ndalloc); 296 lstats[i].curlextents += curlextents; 297 arena_stats_accum_zu(&astats->allocated_large, 298 curlextents * sz_index2size(NBINS + i)); 299 } 300 301 arena_stats_unlock(tsdn, &arena->stats); 302 303 /* tcache_bytes counts currently cached bytes. */ 304 atomic_store_zu(&astats->tcache_bytes, 0, ATOMIC_RELAXED); 305 malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); 306 tcache_t *tcache; 307 ql_foreach(tcache, &arena->tcache_ql, link) { 308 szind_t i = 0; 309 for (; i < NBINS; i++) { 310 tcache_bin_t *tbin = tcache_small_bin_get(tcache, i); 311 arena_stats_accum_zu(&astats->tcache_bytes, 312 tbin->ncached * sz_index2size(i)); 313 } 314 for (; i < nhbins; i++) { 315 tcache_bin_t *tbin = tcache_large_bin_get(tcache, i); 316 arena_stats_accum_zu(&astats->tcache_bytes, 317 tbin->ncached * sz_index2size(i)); 318 } 319 } 320 malloc_mutex_prof_read(tsdn, 321 &astats->mutex_prof_data[arena_prof_mutex_tcache_list], 322 &arena->tcache_ql_mtx); 323 malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); 324 325 #define READ_ARENA_MUTEX_PROF_DATA(mtx, ind) \ 326 malloc_mutex_lock(tsdn, &arena->mtx); \ 327 malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind], \ 328 &arena->mtx); \ 329 malloc_mutex_unlock(tsdn, &arena->mtx); 330 331 /* Gather per arena mutex profiling data. */ 332 READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large); 333 READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx, 334 arena_prof_mutex_extent_avail) 335 READ_ARENA_MUTEX_PROF_DATA(extents_dirty.mtx, 336 arena_prof_mutex_extents_dirty) 337 READ_ARENA_MUTEX_PROF_DATA(extents_muzzy.mtx, 338 arena_prof_mutex_extents_muzzy) 339 READ_ARENA_MUTEX_PROF_DATA(extents_retained.mtx, 340 arena_prof_mutex_extents_retained) 341 READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx, 342 arena_prof_mutex_decay_dirty) 343 READ_ARENA_MUTEX_PROF_DATA(decay_muzzy.mtx, 344 arena_prof_mutex_decay_muzzy) 345 READ_ARENA_MUTEX_PROF_DATA(base->mtx, 346 arena_prof_mutex_base) 347 #undef READ_ARENA_MUTEX_PROF_DATA 348 349 nstime_copy(&astats->uptime, &arena->create_time); 350 nstime_update(&astats->uptime); 351 nstime_subtract(&astats->uptime, &arena->create_time); 352 353 for (szind_t i = 0; i < NBINS; i++) { 354 arena_bin_t *bin = &arena->bins[i]; 355 356 malloc_mutex_lock(tsdn, &bin->lock); 357 malloc_mutex_prof_read(tsdn, &bstats[i].mutex_data, &bin->lock); 358 bstats[i].nmalloc += bin->stats.nmalloc; 359 bstats[i].ndalloc += bin->stats.ndalloc; 360 bstats[i].nrequests += bin->stats.nrequests; 361 bstats[i].curregs += bin->stats.curregs; 362 bstats[i].nfills += bin->stats.nfills; 363 bstats[i].nflushes += bin->stats.nflushes; 364 bstats[i].nslabs += bin->stats.nslabs; 365 bstats[i].reslabs += bin->stats.reslabs; 366 bstats[i].curslabs += bin->stats.curslabs; 367 malloc_mutex_unlock(tsdn, &bin->lock); 368 } 369 } 370 371 void 372 arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, 373 extent_hooks_t **r_extent_hooks, extent_t *extent) { 374 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 375 WITNESS_RANK_CORE, 0); 376 377 extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_dirty, 378 extent); 379 if (arena_dirty_decay_ms_get(arena) == 0) { 380 arena_decay_dirty(tsdn, arena, false, true); 381 } else { 382 arena_background_thread_inactivity_check(tsdn, arena, false); 383 } 384 } 385 386 static void * 387 arena_slab_reg_alloc(tsdn_t *tsdn, extent_t *slab, 388 const arena_bin_info_t *bin_info) { 389 void *ret; 390 arena_slab_data_t *slab_data = extent_slab_data_get(slab); 391 size_t regind; 392 393 assert(extent_nfree_get(slab) > 0); 394 assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info)); 395 396 regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info); 397 ret = (void *)((uintptr_t)extent_addr_get(slab) + 398 (uintptr_t)(bin_info->reg_size * regind)); 399 extent_nfree_dec(slab); 400 return ret; 401 } 402 403 #ifndef JEMALLOC_JET 404 static 405 #endif 406 size_t 407 arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) { 408 size_t diff, regind; 409 410 /* Freeing a pointer outside the slab can cause assertion failure. */ 411 assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab)); 412 assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab)); 413 /* Freeing an interior pointer can cause assertion failure. */ 414 assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) % 415 (uintptr_t)arena_bin_info[binind].reg_size == 0); 416 417 /* Avoid doing division with a variable divisor. */ 418 diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)); 419 switch (binind) { 420 #define REGIND_bin_yes(index, reg_size) \ 421 case index: \ 422 regind = diff / (reg_size); \ 423 assert(diff == regind * (reg_size)); \ 424 break; 425 #define REGIND_bin_no(index, reg_size) 426 #define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \ 427 lg_delta_lookup) \ 428 REGIND_bin_##bin(index, (1U<<lg_grp) + (ndelta<<lg_delta)) 429 SIZE_CLASSES 430 #undef REGIND_bin_yes 431 #undef REGIND_bin_no 432 #undef SC 433 default: not_reached(); 434 } 435 436 assert(regind < arena_bin_info[binind].nregs); 437 438 return regind; 439 } 440 441 static void 442 arena_slab_reg_dalloc(tsdn_t *tsdn, extent_t *slab, 443 arena_slab_data_t *slab_data, void *ptr) { 444 szind_t binind = extent_szind_get(slab); 445 const arena_bin_info_t *bin_info = &arena_bin_info[binind]; 446 size_t regind = arena_slab_regind(slab, binind, ptr); 447 448 assert(extent_nfree_get(slab) < bin_info->nregs); 449 /* Freeing an unallocated pointer can cause assertion failure. */ 450 assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind)); 451 452 bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind); 453 extent_nfree_inc(slab); 454 } 455 456 static void 457 arena_nactive_add(arena_t *arena, size_t add_pages) { 458 atomic_fetch_add_zu(&arena->nactive, add_pages, ATOMIC_RELAXED); 459 } 460 461 static void 462 arena_nactive_sub(arena_t *arena, size_t sub_pages) { 463 assert(atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) >= sub_pages); 464 atomic_fetch_sub_zu(&arena->nactive, sub_pages, ATOMIC_RELAXED); 465 } 466 467 static void 468 arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { 469 szind_t index, hindex; 470 471 cassert(config_stats); 472 473 if (usize < LARGE_MINCLASS) { 474 usize = LARGE_MINCLASS; 475 } 476 index = sz_size2index(usize); 477 hindex = (index >= NBINS) ? index - NBINS : 0; 478 479 arena_stats_add_u64(tsdn, &arena->stats, 480 &arena->stats.lstats[hindex].nmalloc, 1); 481 } 482 483 static void 484 arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { 485 szind_t index, hindex; 486 487 cassert(config_stats); 488 489 if (usize < LARGE_MINCLASS) { 490 usize = LARGE_MINCLASS; 491 } 492 index = sz_size2index(usize); 493 hindex = (index >= NBINS) ? index - NBINS : 0; 494 495 arena_stats_add_u64(tsdn, &arena->stats, 496 &arena->stats.lstats[hindex].ndalloc, 1); 497 } 498 499 static void 500 arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize, 501 size_t usize) { 502 arena_large_dalloc_stats_update(tsdn, arena, oldusize); 503 arena_large_malloc_stats_update(tsdn, arena, usize); 504 } 505 506 extent_t * 507 arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, 508 size_t alignment, bool *zero) { 509 extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; 510 511 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 512 WITNESS_RANK_CORE, 0); 513 514 szind_t szind = sz_size2index(usize); 515 size_t mapped_add; 516 bool commit = true; 517 extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks, 518 &arena->extents_dirty, NULL, usize, sz_large_pad, alignment, false, 519 szind, zero, &commit); 520 if (extent == NULL) { 521 extent = extents_alloc(tsdn, arena, &extent_hooks, 522 &arena->extents_muzzy, NULL, usize, sz_large_pad, alignment, 523 false, szind, zero, &commit); 524 } 525 size_t size = usize + sz_large_pad; 526 if (extent == NULL) { 527 extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL, 528 usize, sz_large_pad, alignment, false, szind, zero, 529 &commit); 530 if (config_stats) { 531 /* 532 * extent may be NULL on OOM, but in that case 533 * mapped_add isn't used below, so there's no need to 534 * conditionlly set it to 0 here. 535 */ 536 mapped_add = size; 537 } 538 } else if (config_stats) { 539 mapped_add = 0; 540 } 541 542 if (extent != NULL) { 543 if (config_stats) { 544 arena_stats_lock(tsdn, &arena->stats); 545 arena_large_malloc_stats_update(tsdn, arena, usize); 546 if (mapped_add != 0) { 547 arena_stats_add_zu(tsdn, &arena->stats, 548 &arena->stats.mapped, mapped_add); 549 } 550 arena_stats_unlock(tsdn, &arena->stats); 551 } 552 arena_nactive_add(arena, size >> LG_PAGE); 553 } 554 555 return extent; 556 } 557 558 void 559 arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { 560 if (config_stats) { 561 arena_stats_lock(tsdn, &arena->stats); 562 arena_large_dalloc_stats_update(tsdn, arena, 563 extent_usize_get(extent)); 564 arena_stats_unlock(tsdn, &arena->stats); 565 } 566 arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE); 567 } 568 569 void 570 arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent, 571 size_t oldusize) { 572 size_t usize = extent_usize_get(extent); 573 size_t udiff = oldusize - usize; 574 575 if (config_stats) { 576 arena_stats_lock(tsdn, &arena->stats); 577 arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); 578 arena_stats_unlock(tsdn, &arena->stats); 579 } 580 arena_nactive_sub(arena, udiff >> LG_PAGE); 581 } 582 583 void 584 arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent, 585 size_t oldusize) { 586 size_t usize = extent_usize_get(extent); 587 size_t udiff = usize - oldusize; 588 589 if (config_stats) { 590 arena_stats_lock(tsdn, &arena->stats); 591 arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); 592 arena_stats_unlock(tsdn, &arena->stats); 593 } 594 arena_nactive_add(arena, udiff >> LG_PAGE); 595 } 596 597 static ssize_t 598 arena_decay_ms_read(arena_decay_t *decay) { 599 return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED); 600 } 601 602 static void 603 arena_decay_ms_write(arena_decay_t *decay, ssize_t decay_ms) { 604 atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED); 605 } 606 607 static void 608 arena_decay_deadline_init(arena_decay_t *decay) { 609 /* 610 * Generate a new deadline that is uniformly random within the next 611 * epoch after the current one. 612 */ 613 nstime_copy(&decay->deadline, &decay->epoch); 614 nstime_add(&decay->deadline, &decay->interval); 615 if (arena_decay_ms_read(decay) > 0) { 616 nstime_t jitter; 617 618 nstime_init(&jitter, prng_range_u64(&decay->jitter_state, 619 nstime_ns(&decay->interval))); 620 nstime_add(&decay->deadline, &jitter); 621 } 622 } 623 624 static bool 625 arena_decay_deadline_reached(const arena_decay_t *decay, const nstime_t *time) { 626 return (nstime_compare(&decay->deadline, time) <= 0); 627 } 628 629 static size_t 630 arena_decay_backlog_npages_limit(const arena_decay_t *decay) { 631 uint64_t sum; 632 size_t npages_limit_backlog; 633 unsigned i; 634 635 /* 636 * For each element of decay_backlog, multiply by the corresponding 637 * fixed-point smoothstep decay factor. Sum the products, then divide 638 * to round down to the nearest whole number of pages. 639 */ 640 sum = 0; 641 for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { 642 sum += decay->backlog[i] * h_steps[i]; 643 } 644 npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP); 645 646 return npages_limit_backlog; 647 } 648 649 static void 650 arena_decay_backlog_update_last(arena_decay_t *decay, size_t current_npages) { 651 size_t npages_delta = (current_npages > decay->nunpurged) ? 652 current_npages - decay->nunpurged : 0; 653 decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta; 654 655 if (config_debug) { 656 if (current_npages > decay->ceil_npages) { 657 decay->ceil_npages = current_npages; 658 } 659 size_t npages_limit = arena_decay_backlog_npages_limit(decay); 660 assert(decay->ceil_npages >= npages_limit); 661 if (decay->ceil_npages > npages_limit) { 662 decay->ceil_npages = npages_limit; 663 } 664 } 665 } 666 667 static void 668 arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64, 669 size_t current_npages) { 670 if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) { 671 memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) * 672 sizeof(size_t)); 673 } else { 674 size_t nadvance_z = (size_t)nadvance_u64; 675 676 assert((uint64_t)nadvance_z == nadvance_u64); 677 678 memmove(decay->backlog, &decay->backlog[nadvance_z], 679 (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t)); 680 if (nadvance_z > 1) { 681 memset(&decay->backlog[SMOOTHSTEP_NSTEPS - 682 nadvance_z], 0, (nadvance_z-1) * sizeof(size_t)); 683 } 684 } 685 686 arena_decay_backlog_update_last(decay, current_npages); 687 } 688 689 static void 690 arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, 691 extents_t *extents, size_t current_npages, size_t npages_limit, 692 bool is_background_thread) { 693 if (current_npages > npages_limit) { 694 arena_decay_to_limit(tsdn, arena, decay, extents, false, 695 npages_limit, is_background_thread); 696 } 697 } 698 699 static void 700 arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time, 701 size_t current_npages) { 702 assert(arena_decay_deadline_reached(decay, time)); 703 704 nstime_t delta; 705 nstime_copy(&delta, time); 706 nstime_subtract(&delta, &decay->epoch); 707 708 uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval); 709 assert(nadvance_u64 > 0); 710 711 /* Add nadvance_u64 decay intervals to epoch. */ 712 nstime_copy(&delta, &decay->interval); 713 nstime_imultiply(&delta, nadvance_u64); 714 nstime_add(&decay->epoch, &delta); 715 716 /* Set a new deadline. */ 717 arena_decay_deadline_init(decay); 718 719 /* Update the backlog. */ 720 arena_decay_backlog_update(decay, nadvance_u64, current_npages); 721 } 722 723 static void 724 arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, 725 extents_t *extents, const nstime_t *time, bool is_background_thread) { 726 size_t current_npages = extents_npages_get(extents); 727 arena_decay_epoch_advance_helper(decay, time, current_npages); 728 729 size_t npages_limit = arena_decay_backlog_npages_limit(decay); 730 /* We may unlock decay->mtx when try_purge(). Finish logging first. */ 731 decay->nunpurged = (npages_limit > current_npages) ? npages_limit : 732 current_npages; 733 734 if (!background_thread_enabled() || is_background_thread) { 735 arena_decay_try_purge(tsdn, arena, decay, extents, 736 current_npages, npages_limit, is_background_thread); 737 } 738 } 739 740 static void 741 arena_decay_reinit(arena_decay_t *decay, extents_t *extents, ssize_t decay_ms) { 742 arena_decay_ms_write(decay, decay_ms); 743 if (decay_ms > 0) { 744 nstime_init(&decay->interval, (uint64_t)decay_ms * 745 KQU(1000000)); 746 nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS); 747 } 748 749 nstime_init(&decay->epoch, 0); 750 nstime_update(&decay->epoch); 751 decay->jitter_state = (uint64_t)(uintptr_t)decay; 752 arena_decay_deadline_init(decay); 753 decay->nunpurged = 0; 754 memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t)); 755 } 756 757 static bool 758 arena_decay_init(arena_decay_t *decay, extents_t *extents, ssize_t decay_ms, 759 decay_stats_t *stats) { 760 if (config_debug) { 761 for (size_t i = 0; i < sizeof(arena_decay_t); i++) { 762 assert(((char *)decay)[i] == 0); 763 } 764 decay->ceil_npages = 0; 765 } 766 if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY, 767 malloc_mutex_rank_exclusive)) { 768 return true; 769 } 770 decay->purging = false; 771 arena_decay_reinit(decay, extents, decay_ms); 772 /* Memory is zeroed, so there is no need to clear stats. */ 773 if (config_stats) { 774 decay->stats = stats; 775 } 776 return false; 777 } 778 779 static bool 780 arena_decay_ms_valid(ssize_t decay_ms) { 781 if (decay_ms < -1) { 782 return false; 783 } 784 if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX * 785 KQU(1000)) { 786 return true; 787 } 788 return false; 789 } 790 791 static bool 792 arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, 793 extents_t *extents, bool is_background_thread) { 794 malloc_mutex_assert_owner(tsdn, &decay->mtx); 795 796 /* Purge all or nothing if the option is disabled. */ 797 ssize_t decay_ms = arena_decay_ms_read(decay); 798 if (decay_ms <= 0) { 799 if (decay_ms == 0) { 800 arena_decay_to_limit(tsdn, arena, decay, extents, false, 801 0, is_background_thread); 802 } 803 return false; 804 } 805 806 nstime_t time; 807 nstime_init(&time, 0); 808 nstime_update(&time); 809 if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch, &time) 810 > 0)) { 811 /* 812 * Time went backwards. Move the epoch back in time and 813 * generate a new deadline, with the expectation that time 814 * typically flows forward for long enough periods of time that 815 * epochs complete. Unfortunately, this strategy is susceptible 816 * to clock jitter triggering premature epoch advances, but 817 * clock jitter estimation and compensation isn't feasible here 818 * because calls into this code are event-driven. 819 */ 820 nstime_copy(&decay->epoch, &time); 821 arena_decay_deadline_init(decay); 822 } else { 823 /* Verify that time does not go backwards. */ 824 assert(nstime_compare(&decay->epoch, &time) <= 0); 825 } 826 827 /* 828 * If the deadline has been reached, advance to the current epoch and 829 * purge to the new limit if necessary. Note that dirty pages created 830 * during the current epoch are not subject to purge until a future 831 * epoch, so as a result purging only happens during epoch advances, or 832 * being triggered by background threads (scheduled event). 833 */ 834 bool advance_epoch = arena_decay_deadline_reached(decay, &time); 835 if (advance_epoch) { 836 arena_decay_epoch_advance(tsdn, arena, decay, extents, &time, 837 is_background_thread); 838 } else if (is_background_thread) { 839 arena_decay_try_purge(tsdn, arena, decay, extents, 840 extents_npages_get(extents), 841 arena_decay_backlog_npages_limit(decay), 842 is_background_thread); 843 } 844 845 return advance_epoch; 846 } 847 848 static ssize_t 849 arena_decay_ms_get(arena_decay_t *decay) { 850 return arena_decay_ms_read(decay); 851 } 852 853 ssize_t 854 arena_dirty_decay_ms_get(arena_t *arena) { 855 return arena_decay_ms_get(&arena->decay_dirty); 856 } 857 858 ssize_t 859 arena_muzzy_decay_ms_get(arena_t *arena) { 860 return arena_decay_ms_get(&arena->decay_muzzy); 861 } 862 863 static bool 864 arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, 865 extents_t *extents, ssize_t decay_ms) { 866 if (!arena_decay_ms_valid(decay_ms)) { 867 return true; 868 } 869 870 malloc_mutex_lock(tsdn, &decay->mtx); 871 /* 872 * Restart decay backlog from scratch, which may cause many dirty pages 873 * to be immediately purged. It would conceptually be possible to map 874 * the old backlog onto the new backlog, but there is no justification 875 * for such complexity since decay_ms changes are intended to be 876 * infrequent, either between the {-1, 0, >0} states, or a one-time 877 * arbitrary change during initial arena configuration. 878 */ 879 arena_decay_reinit(decay, extents, decay_ms); 880 arena_maybe_decay(tsdn, arena, decay, extents, false); 881 malloc_mutex_unlock(tsdn, &decay->mtx); 882 883 return false; 884 } 885 886 bool 887 arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, 888 ssize_t decay_ms) { 889 return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty, 890 &arena->extents_dirty, decay_ms); 891 } 892 893 bool 894 arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena, 895 ssize_t decay_ms) { 896 return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy, 897 &arena->extents_muzzy, decay_ms); 898 } 899 900 static size_t 901 arena_stash_decayed(tsdn_t *tsdn, arena_t *arena, 902 extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_limit, 903 extent_list_t *decay_extents) { 904 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 905 WITNESS_RANK_CORE, 0); 906 907 /* Stash extents according to npages_limit. */ 908 size_t nstashed = 0; 909 extent_t *extent; 910 while ((extent = extents_evict(tsdn, arena, r_extent_hooks, extents, 911 npages_limit)) != NULL) { 912 extent_list_append(decay_extents, extent); 913 nstashed += extent_size_get(extent) >> LG_PAGE; 914 } 915 return nstashed; 916 } 917 918 static size_t 919 arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, 920 extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents, 921 bool all, extent_list_t *decay_extents, bool is_background_thread) { 922 UNUSED size_t nmadvise, nunmapped; 923 size_t npurged; 924 925 if (config_stats) { 926 nmadvise = 0; 927 nunmapped = 0; 928 } 929 npurged = 0; 930 931 ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena); 932 for (extent_t *extent = extent_list_first(decay_extents); extent != 933 NULL; extent = extent_list_first(decay_extents)) { 934 if (config_stats) { 935 nmadvise++; 936 } 937 size_t npages = extent_size_get(extent) >> LG_PAGE; 938 npurged += npages; 939 extent_list_remove(decay_extents, extent); 940 switch (extents_state_get(extents)) { 941 case extent_state_active: 942 not_reached(); 943 case extent_state_dirty: 944 if (!all && muzzy_decay_ms != 0 && 945 !extent_purge_lazy_wrapper(tsdn, arena, 946 r_extent_hooks, extent, 0, 947 extent_size_get(extent))) { 948 extents_dalloc(tsdn, arena, r_extent_hooks, 949 &arena->extents_muzzy, extent); 950 arena_background_thread_inactivity_check(tsdn, 951 arena, is_background_thread); 952 break; 953 } 954 /* Fall through. */ 955 case extent_state_muzzy: 956 extent_dalloc_wrapper(tsdn, arena, r_extent_hooks, 957 extent); 958 if (config_stats) { 959 nunmapped += npages; 960 } 961 break; 962 case extent_state_retained: 963 default: 964 not_reached(); 965 } 966 } 967 968 if (config_stats) { 969 arena_stats_lock(tsdn, &arena->stats); 970 arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->npurge, 971 1); 972 arena_stats_add_u64(tsdn, &arena->stats, 973 &decay->stats->nmadvise, nmadvise); 974 arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->purged, 975 npurged); 976 arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped, 977 nunmapped << LG_PAGE); 978 arena_stats_unlock(tsdn, &arena->stats); 979 } 980 981 return npurged; 982 } 983 984 /* 985 * npages_limit: Decay as many dirty extents as possible without violating the 986 * invariant: (extents_npages_get(extents) >= npages_limit) 987 */ 988 static void 989 arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, 990 extents_t *extents, bool all, size_t npages_limit, 991 bool is_background_thread) { 992 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 993 WITNESS_RANK_CORE, 1); 994 malloc_mutex_assert_owner(tsdn, &decay->mtx); 995 996 if (decay->purging) { 997 return; 998 } 999 decay->purging = true; 1000 malloc_mutex_unlock(tsdn, &decay->mtx); 1001 1002 extent_hooks_t *extent_hooks = extent_hooks_get(arena); 1003 1004 extent_list_t decay_extents; 1005 extent_list_init(&decay_extents); 1006 1007 size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents, 1008 npages_limit, &decay_extents); 1009 if (npurge != 0) { 1010 UNUSED size_t npurged = arena_decay_stashed(tsdn, arena, 1011 &extent_hooks, decay, extents, all, &decay_extents, 1012 is_background_thread); 1013 assert(npurged == npurge); 1014 } 1015 1016 malloc_mutex_lock(tsdn, &decay->mtx); 1017 decay->purging = false; 1018 } 1019 1020 static bool 1021 arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, 1022 extents_t *extents, bool is_background_thread, bool all) { 1023 if (all) { 1024 malloc_mutex_lock(tsdn, &decay->mtx); 1025 arena_decay_to_limit(tsdn, arena, decay, extents, all, 0, 1026 is_background_thread); 1027 malloc_mutex_unlock(tsdn, &decay->mtx); 1028 1029 return false; 1030 } 1031 1032 if (malloc_mutex_trylock(tsdn, &decay->mtx)) { 1033 /* No need to wait if another thread is in progress. */ 1034 return true; 1035 } 1036 1037 bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents, 1038 is_background_thread); 1039 size_t npages_new; 1040 if (epoch_advanced) { 1041 /* Backlog is updated on epoch advance. */ 1042 npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1]; 1043 } 1044 malloc_mutex_unlock(tsdn, &decay->mtx); 1045 1046 if (have_background_thread && background_thread_enabled() && 1047 epoch_advanced && !is_background_thread) { 1048 background_thread_interval_check(tsdn, arena, decay, npages_new); 1049 } 1050 1051 return false; 1052 } 1053 1054 static bool 1055 arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, 1056 bool all) { 1057 return arena_decay_impl(tsdn, arena, &arena->decay_dirty, 1058 &arena->extents_dirty, is_background_thread, all); 1059 } 1060 1061 static bool 1062 arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, 1063 bool all) { 1064 return arena_decay_impl(tsdn, arena, &arena->decay_muzzy, 1065 &arena->extents_muzzy, is_background_thread, all); 1066 } 1067 1068 void 1069 arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) { 1070 if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) { 1071 return; 1072 } 1073 arena_decay_muzzy(tsdn, arena, is_background_thread, all); 1074 } 1075 1076 static void 1077 arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) { 1078 arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE); 1079 1080 extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; 1081 arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, slab); 1082 } 1083 1084 static void 1085 arena_bin_slabs_nonfull_insert(arena_bin_t *bin, extent_t *slab) { 1086 assert(extent_nfree_get(slab) > 0); 1087 extent_heap_insert(&bin->slabs_nonfull, slab); 1088 } 1089 1090 static void 1091 arena_bin_slabs_nonfull_remove(arena_bin_t *bin, extent_t *slab) { 1092 extent_heap_remove(&bin->slabs_nonfull, slab); 1093 } 1094 1095 static extent_t * 1096 arena_bin_slabs_nonfull_tryget(arena_bin_t *bin) { 1097 extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull); 1098 if (slab == NULL) { 1099 return NULL; 1100 } 1101 if (config_stats) { 1102 bin->stats.reslabs++; 1103 } 1104 return slab; 1105 } 1106 1107 static void 1108 arena_bin_slabs_full_insert(arena_t *arena, arena_bin_t *bin, extent_t *slab) { 1109 assert(extent_nfree_get(slab) == 0); 1110 /* 1111 * Tracking extents is required by arena_reset, which is not allowed 1112 * for auto arenas. Bypass this step to avoid touching the extent 1113 * linkage (often results in cache misses) for auto arenas. 1114 */ 1115 if (arena_is_auto(arena)) { 1116 return; 1117 } 1118 extent_list_append(&bin->slabs_full, slab); 1119 } 1120 1121 static void 1122 arena_bin_slabs_full_remove(arena_t *arena, arena_bin_t *bin, extent_t *slab) { 1123 if (arena_is_auto(arena)) { 1124 return; 1125 } 1126 extent_list_remove(&bin->slabs_full, slab); 1127 } 1128 1129 void 1130 arena_reset(tsd_t *tsd, arena_t *arena) { 1131 /* 1132 * Locking in this function is unintuitive. The caller guarantees that 1133 * no concurrent operations are happening in this arena, but there are 1134 * still reasons that some locking is necessary: 1135 * 1136 * - Some of the functions in the transitive closure of calls assume 1137 * appropriate locks are held, and in some cases these locks are 1138 * temporarily dropped to avoid lock order reversal or deadlock due to 1139 * reentry. 1140 * - mallctl("epoch", ...) may concurrently refresh stats. While 1141 * strictly speaking this is a "concurrent operation", disallowing 1142 * stats refreshes would impose an inconvenient burden. 1143 */ 1144 1145 /* Large allocations. */ 1146 malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); 1147 1148 for (extent_t *extent = extent_list_first(&arena->large); extent != 1149 NULL; extent = extent_list_first(&arena->large)) { 1150 void *ptr = extent_base_get(extent); 1151 size_t usize; 1152 1153 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); 1154 alloc_ctx_t alloc_ctx; 1155 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); 1156 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, 1157 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); 1158 assert(alloc_ctx.szind != NSIZES); 1159 1160 if (config_stats || (config_prof && opt_prof)) { 1161 usize = sz_index2size(alloc_ctx.szind); 1162 assert(usize == isalloc(tsd_tsdn(tsd), ptr)); 1163 } 1164 /* Remove large allocation from prof sample set. */ 1165 if (config_prof && opt_prof) { 1166 prof_free(tsd, ptr, usize, &alloc_ctx); 1167 } 1168 large_dalloc(tsd_tsdn(tsd), extent); 1169 malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); 1170 } 1171 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); 1172 1173 /* Bins. */ 1174 for (unsigned i = 0; i < NBINS; i++) { 1175 extent_t *slab; 1176 arena_bin_t *bin = &arena->bins[i]; 1177 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); 1178 if (bin->slabcur != NULL) { 1179 slab = bin->slabcur; 1180 bin->slabcur = NULL; 1181 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); 1182 arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); 1183 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); 1184 } 1185 while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) != 1186 NULL) { 1187 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); 1188 arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); 1189 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); 1190 } 1191 for (slab = extent_list_first(&bin->slabs_full); slab != NULL; 1192 slab = extent_list_first(&bin->slabs_full)) { 1193 arena_bin_slabs_full_remove(arena, bin, slab); 1194 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); 1195 arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); 1196 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); 1197 } 1198 if (config_stats) { 1199 bin->stats.curregs = 0; 1200 bin->stats.curslabs = 0; 1201 } 1202 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); 1203 } 1204 1205 atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED); 1206 } 1207 1208 static void 1209 arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) { 1210 /* 1211 * Iterate over the retained extents and destroy them. This gives the 1212 * extent allocator underlying the extent hooks an opportunity to unmap 1213 * all retained memory without having to keep its own metadata 1214 * structures. In practice, virtual memory for dss-allocated extents is 1215 * leaked here, so best practice is to avoid dss for arenas to be 1216 * destroyed, or provide custom extent hooks that track retained 1217 * dss-based extents for later reuse. 1218 */ 1219 extent_hooks_t *extent_hooks = extent_hooks_get(arena); 1220 extent_t *extent; 1221 while ((extent = extents_evict(tsdn, arena, &extent_hooks, 1222 &arena->extents_retained, 0)) != NULL) { 1223 extent_destroy_wrapper(tsdn, arena, &extent_hooks, extent); 1224 } 1225 } 1226 1227 void 1228 arena_destroy(tsd_t *tsd, arena_t *arena) { 1229 assert(base_ind_get(arena->base) >= narenas_auto); 1230 assert(arena_nthreads_get(arena, false) == 0); 1231 assert(arena_nthreads_get(arena, true) == 0); 1232 1233 /* 1234 * No allocations have occurred since arena_reset() was called. 1235 * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached 1236 * extents, so only retained extents may remain. 1237 */ 1238 assert(extents_npages_get(&arena->extents_dirty) == 0); 1239 assert(extents_npages_get(&arena->extents_muzzy) == 0); 1240 1241 /* Deallocate retained memory. */ 1242 arena_destroy_retained(tsd_tsdn(tsd), arena); 1243 1244 /* 1245 * Remove the arena pointer from the arenas array. We rely on the fact 1246 * that there is no way for the application to get a dirty read from the 1247 * arenas array unless there is an inherent race in the application 1248 * involving access of an arena being concurrently destroyed. The 1249 * application must synchronize knowledge of the arena's validity, so as 1250 * long as we use an atomic write to update the arenas array, the 1251 * application will get a clean read any time after it synchronizes 1252 * knowledge that the arena is no longer valid. 1253 */ 1254 arena_set(base_ind_get(arena->base), NULL); 1255 1256 /* 1257 * Destroy the base allocator, which manages all metadata ever mapped by 1258 * this arena. 1259 */ 1260 base_delete(tsd_tsdn(tsd), arena->base); 1261 } 1262 1263 static extent_t * 1264 arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, 1265 extent_hooks_t **r_extent_hooks, const arena_bin_info_t *bin_info, 1266 szind_t szind) { 1267 extent_t *slab; 1268 bool zero, commit; 1269 1270 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1271 WITNESS_RANK_CORE, 0); 1272 1273 zero = false; 1274 commit = true; 1275 slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL, 1276 bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit); 1277 1278 if (config_stats && slab != NULL) { 1279 arena_stats_mapped_add(tsdn, &arena->stats, 1280 bin_info->slab_size); 1281 } 1282 1283 return slab; 1284 } 1285 1286 static extent_t * 1287 arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, 1288 const arena_bin_info_t *bin_info) { 1289 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1290 WITNESS_RANK_CORE, 0); 1291 1292 extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; 1293 szind_t szind = sz_size2index(bin_info->reg_size); 1294 bool zero = false; 1295 bool commit = true; 1296 extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks, 1297 &arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, true, 1298 binind, &zero, &commit); 1299 if (slab == NULL) { 1300 slab = extents_alloc(tsdn, arena, &extent_hooks, 1301 &arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE, 1302 true, binind, &zero, &commit); 1303 } 1304 if (slab == NULL) { 1305 slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks, 1306 bin_info, szind); 1307 if (slab == NULL) { 1308 return NULL; 1309 } 1310 } 1311 assert(extent_slab_get(slab)); 1312 1313 /* Initialize slab internals. */ 1314 arena_slab_data_t *slab_data = extent_slab_data_get(slab); 1315 extent_nfree_set(slab, bin_info->nregs); 1316 bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false); 1317 1318 arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE); 1319 1320 return slab; 1321 } 1322 1323 static extent_t * 1324 arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin, 1325 szind_t binind) { 1326 extent_t *slab; 1327 const arena_bin_info_t *bin_info; 1328 1329 /* Look for a usable slab. */ 1330 slab = arena_bin_slabs_nonfull_tryget(bin); 1331 if (slab != NULL) { 1332 return slab; 1333 } 1334 /* No existing slabs have any space available. */ 1335 1336 bin_info = &arena_bin_info[binind]; 1337 1338 /* Allocate a new slab. */ 1339 malloc_mutex_unlock(tsdn, &bin->lock); 1340 /******************************/ 1341 slab = arena_slab_alloc(tsdn, arena, binind, bin_info); 1342 /********************************/ 1343 malloc_mutex_lock(tsdn, &bin->lock); 1344 if (slab != NULL) { 1345 if (config_stats) { 1346 bin->stats.nslabs++; 1347 bin->stats.curslabs++; 1348 } 1349 return slab; 1350 } 1351 1352 /* 1353 * arena_slab_alloc() failed, but another thread may have made 1354 * sufficient memory available while this one dropped bin->lock above, 1355 * so search one more time. 1356 */ 1357 slab = arena_bin_slabs_nonfull_tryget(bin); 1358 if (slab != NULL) { 1359 return slab; 1360 } 1361 1362 return NULL; 1363 } 1364 1365 /* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */ 1366 static void * 1367 arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin, 1368 szind_t binind) { 1369 const arena_bin_info_t *bin_info; 1370 extent_t *slab; 1371 1372 bin_info = &arena_bin_info[binind]; 1373 if (!arena_is_auto(arena) && bin->slabcur != NULL) { 1374 arena_bin_slabs_full_insert(arena, bin, bin->slabcur); 1375 bin->slabcur = NULL; 1376 } 1377 slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind); 1378 if (bin->slabcur != NULL) { 1379 /* 1380 * Another thread updated slabcur while this one ran without the 1381 * bin lock in arena_bin_nonfull_slab_get(). 1382 */ 1383 if (extent_nfree_get(bin->slabcur) > 0) { 1384 void *ret = arena_slab_reg_alloc(tsdn, bin->slabcur, 1385 bin_info); 1386 if (slab != NULL) { 1387 /* 1388 * arena_slab_alloc() may have allocated slab, 1389 * or it may have been pulled from 1390 * slabs_nonfull. Therefore it is unsafe to 1391 * make any assumptions about how slab has 1392 * previously been used, and 1393 * arena_bin_lower_slab() must be called, as if 1394 * a region were just deallocated from the slab. 1395 */ 1396 if (extent_nfree_get(slab) == bin_info->nregs) { 1397 arena_dalloc_bin_slab(tsdn, arena, slab, 1398 bin); 1399 } else { 1400 arena_bin_lower_slab(tsdn, arena, slab, 1401 bin); 1402 } 1403 } 1404 return ret; 1405 } 1406 1407 arena_bin_slabs_full_insert(arena, bin, bin->slabcur); 1408 bin->slabcur = NULL; 1409 } 1410 1411 if (slab == NULL) { 1412 return NULL; 1413 } 1414 bin->slabcur = slab; 1415 1416 assert(extent_nfree_get(bin->slabcur) > 0); 1417 1418 return arena_slab_reg_alloc(tsdn, slab, bin_info); 1419 } 1420 1421 void 1422 arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, 1423 tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) { 1424 unsigned i, nfill; 1425 arena_bin_t *bin; 1426 1427 assert(tbin->ncached == 0); 1428 1429 if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) { 1430 prof_idump(tsdn); 1431 } 1432 bin = &arena->bins[binind]; 1433 malloc_mutex_lock(tsdn, &bin->lock); 1434 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> 1435 tcache->lg_fill_div[binind]); i < nfill; i++) { 1436 extent_t *slab; 1437 void *ptr; 1438 if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 1439 0) { 1440 ptr = arena_slab_reg_alloc(tsdn, slab, 1441 &arena_bin_info[binind]); 1442 } else { 1443 ptr = arena_bin_malloc_hard(tsdn, arena, bin, binind); 1444 } 1445 if (ptr == NULL) { 1446 /* 1447 * OOM. tbin->avail isn't yet filled down to its first 1448 * element, so the successful allocations (if any) must 1449 * be moved just before tbin->avail before bailing out. 1450 */ 1451 if (i > 0) { 1452 memmove(tbin->avail - i, tbin->avail - nfill, 1453 i * sizeof(void *)); 1454 } 1455 break; 1456 } 1457 if (config_fill && unlikely(opt_junk_alloc)) { 1458 arena_alloc_junk_small(ptr, &arena_bin_info[binind], 1459 true); 1460 } 1461 /* Insert such that low regions get used first. */ 1462 *(tbin->avail - nfill + i) = ptr; 1463 } 1464 if (config_stats) { 1465 bin->stats.nmalloc += i; 1466 bin->stats.nrequests += tbin->tstats.nrequests; 1467 bin->stats.curregs += i; 1468 bin->stats.nfills++; 1469 tbin->tstats.nrequests = 0; 1470 } 1471 malloc_mutex_unlock(tsdn, &bin->lock); 1472 tbin->ncached = i; 1473 arena_decay_tick(tsdn, arena); 1474 } 1475 1476 void 1477 arena_alloc_junk_small(void *ptr, const arena_bin_info_t *bin_info, bool zero) { 1478 if (!zero) { 1479 memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size); 1480 } 1481 } 1482 1483 static void 1484 arena_dalloc_junk_small_impl(void *ptr, const arena_bin_info_t *bin_info) { 1485 memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size); 1486 } 1487 arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small = 1488 arena_dalloc_junk_small_impl; 1489 1490 static void * 1491 arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) { 1492 void *ret; 1493 arena_bin_t *bin; 1494 size_t usize; 1495 extent_t *slab; 1496 1497 assert(binind < NBINS); 1498 bin = &arena->bins[binind]; 1499 usize = sz_index2size(binind); 1500 1501 malloc_mutex_lock(tsdn, &bin->lock); 1502 if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) { 1503 ret = arena_slab_reg_alloc(tsdn, slab, &arena_bin_info[binind]); 1504 } else { 1505 ret = arena_bin_malloc_hard(tsdn, arena, bin, binind); 1506 } 1507 1508 if (ret == NULL) { 1509 malloc_mutex_unlock(tsdn, &bin->lock); 1510 return NULL; 1511 } 1512 1513 if (config_stats) { 1514 bin->stats.nmalloc++; 1515 bin->stats.nrequests++; 1516 bin->stats.curregs++; 1517 } 1518 malloc_mutex_unlock(tsdn, &bin->lock); 1519 if (config_prof && arena_prof_accum(tsdn, arena, usize)) { 1520 prof_idump(tsdn); 1521 } 1522 1523 if (!zero) { 1524 if (config_fill) { 1525 if (unlikely(opt_junk_alloc)) { 1526 arena_alloc_junk_small(ret, 1527 &arena_bin_info[binind], false); 1528 } else if (unlikely(opt_zero)) { 1529 memset(ret, 0, usize); 1530 } 1531 } 1532 } else { 1533 if (config_fill && unlikely(opt_junk_alloc)) { 1534 arena_alloc_junk_small(ret, &arena_bin_info[binind], 1535 true); 1536 } 1537 memset(ret, 0, usize); 1538 } 1539 1540 arena_decay_tick(tsdn, arena); 1541 return ret; 1542 } 1543 1544 void * 1545 arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, 1546 bool zero) { 1547 assert(!tsdn_null(tsdn) || arena != NULL); 1548 1549 if (likely(!tsdn_null(tsdn))) { 1550 arena = arena_choose(tsdn_tsd(tsdn), arena); 1551 } 1552 if (unlikely(arena == NULL)) { 1553 return NULL; 1554 } 1555 1556 if (likely(size <= SMALL_MAXCLASS)) { 1557 return arena_malloc_small(tsdn, arena, ind, zero); 1558 } 1559 return large_malloc(tsdn, arena, sz_index2size(ind), zero); 1560 } 1561 1562 void * 1563 arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, 1564 bool zero, tcache_t *tcache) { 1565 void *ret; 1566 1567 if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE 1568 && (usize & PAGE_MASK) == 0))) { 1569 /* Small; alignment doesn't require special slab placement. */ 1570 ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize), 1571 zero, tcache, true); 1572 } else { 1573 if (likely(alignment <= CACHELINE)) { 1574 ret = large_malloc(tsdn, arena, usize, zero); 1575 } else { 1576 ret = large_palloc(tsdn, arena, usize, alignment, zero); 1577 } 1578 } 1579 return ret; 1580 } 1581 1582 void 1583 arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize) { 1584 cassert(config_prof); 1585 assert(ptr != NULL); 1586 assert(isalloc(tsdn, ptr) == LARGE_MINCLASS); 1587 assert(usize <= SMALL_MAXCLASS); 1588 1589 rtree_ctx_t rtree_ctx_fallback; 1590 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 1591 1592 extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, 1593 (uintptr_t)ptr, true); 1594 arena_t *arena = extent_arena_get(extent); 1595 1596 szind_t szind = sz_size2index(usize); 1597 extent_szind_set(extent, szind); 1598 rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, 1599 szind, false); 1600 1601 prof_accum_cancel(tsdn, &arena->prof_accum, usize); 1602 1603 assert(isalloc(tsdn, ptr) == usize); 1604 } 1605 1606 static size_t 1607 arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) { 1608 cassert(config_prof); 1609 assert(ptr != NULL); 1610 1611 extent_szind_set(extent, NBINS); 1612 rtree_ctx_t rtree_ctx_fallback; 1613 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 1614 rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, 1615 NBINS, false); 1616 1617 assert(isalloc(tsdn, ptr) == LARGE_MINCLASS); 1618 1619 return LARGE_MINCLASS; 1620 } 1621 1622 void 1623 arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache, 1624 bool slow_path) { 1625 cassert(config_prof); 1626 assert(opt_prof); 1627 1628 extent_t *extent = iealloc(tsdn, ptr); 1629 size_t usize = arena_prof_demote(tsdn, extent, ptr); 1630 if (usize <= tcache_maxclass) { 1631 tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, 1632 sz_size2index(usize), slow_path); 1633 } else { 1634 large_dalloc(tsdn, extent); 1635 } 1636 } 1637 1638 static void 1639 arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, arena_bin_t *bin) { 1640 /* Dissociate slab from bin. */ 1641 if (slab == bin->slabcur) { 1642 bin->slabcur = NULL; 1643 } else { 1644 szind_t binind = extent_szind_get(slab); 1645 const arena_bin_info_t *bin_info = &arena_bin_info[binind]; 1646 1647 /* 1648 * The following block's conditional is necessary because if the 1649 * slab only contains one region, then it never gets inserted 1650 * into the non-full slabs heap. 1651 */ 1652 if (bin_info->nregs == 1) { 1653 arena_bin_slabs_full_remove(arena, bin, slab); 1654 } else { 1655 arena_bin_slabs_nonfull_remove(bin, slab); 1656 } 1657 } 1658 } 1659 1660 static void 1661 arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, 1662 arena_bin_t *bin) { 1663 assert(slab != bin->slabcur); 1664 1665 malloc_mutex_unlock(tsdn, &bin->lock); 1666 /******************************/ 1667 arena_slab_dalloc(tsdn, arena, slab); 1668 /****************************/ 1669 malloc_mutex_lock(tsdn, &bin->lock); 1670 if (config_stats) { 1671 bin->stats.curslabs--; 1672 } 1673 } 1674 1675 static void 1676 arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, 1677 arena_bin_t *bin) { 1678 assert(extent_nfree_get(slab) > 0); 1679 1680 /* 1681 * Make sure that if bin->slabcur is non-NULL, it refers to the 1682 * oldest/lowest non-full slab. It is okay to NULL slabcur out rather 1683 * than proactively keeping it pointing at the oldest/lowest non-full 1684 * slab. 1685 */ 1686 if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) { 1687 /* Switch slabcur. */ 1688 if (extent_nfree_get(bin->slabcur) > 0) { 1689 arena_bin_slabs_nonfull_insert(bin, bin->slabcur); 1690 } else { 1691 arena_bin_slabs_full_insert(arena, bin, bin->slabcur); 1692 } 1693 bin->slabcur = slab; 1694 if (config_stats) { 1695 bin->stats.reslabs++; 1696 } 1697 } else { 1698 arena_bin_slabs_nonfull_insert(bin, slab); 1699 } 1700 } 1701 1702 static void 1703 arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab, 1704 void *ptr, bool junked) { 1705 arena_slab_data_t *slab_data = extent_slab_data_get(slab); 1706 szind_t binind = extent_szind_get(slab); 1707 arena_bin_t *bin = &arena->bins[binind]; 1708 const arena_bin_info_t *bin_info = &arena_bin_info[binind]; 1709 1710 if (!junked && config_fill && unlikely(opt_junk_free)) { 1711 arena_dalloc_junk_small(ptr, bin_info); 1712 } 1713 1714 arena_slab_reg_dalloc(tsdn, slab, slab_data, ptr); 1715 unsigned nfree = extent_nfree_get(slab); 1716 if (nfree == bin_info->nregs) { 1717 arena_dissociate_bin_slab(arena, slab, bin); 1718 arena_dalloc_bin_slab(tsdn, arena, slab, bin); 1719 } else if (nfree == 1 && slab != bin->slabcur) { 1720 arena_bin_slabs_full_remove(arena, bin, slab); 1721 arena_bin_lower_slab(tsdn, arena, slab, bin); 1722 } 1723 1724 if (config_stats) { 1725 bin->stats.ndalloc++; 1726 bin->stats.curregs--; 1727 } 1728 } 1729 1730 void 1731 arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, extent_t *extent, 1732 void *ptr) { 1733 arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, true); 1734 } 1735 1736 static void 1737 arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) { 1738 szind_t binind = extent_szind_get(extent); 1739 arena_bin_t *bin = &arena->bins[binind]; 1740 1741 malloc_mutex_lock(tsdn, &bin->lock); 1742 arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, false); 1743 malloc_mutex_unlock(tsdn, &bin->lock); 1744 } 1745 1746 void 1747 arena_dalloc_small(tsdn_t *tsdn, void *ptr) { 1748 extent_t *extent = iealloc(tsdn, ptr); 1749 arena_t *arena = extent_arena_get(extent); 1750 1751 arena_dalloc_bin(tsdn, arena, extent, ptr); 1752 arena_decay_tick(tsdn, arena); 1753 } 1754 1755 bool 1756 arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, 1757 size_t extra, bool zero) { 1758 /* Calls with non-zero extra had to clamp extra. */ 1759 assert(extra == 0 || size + extra <= LARGE_MAXCLASS); 1760 1761 if (unlikely(size > LARGE_MAXCLASS)) { 1762 return true; 1763 } 1764 1765 extent_t *extent = iealloc(tsdn, ptr); 1766 size_t usize_min = sz_s2u(size); 1767 size_t usize_max = sz_s2u(size + extra); 1768 if (likely(oldsize <= SMALL_MAXCLASS && usize_min <= SMALL_MAXCLASS)) { 1769 /* 1770 * Avoid moving the allocation if the size class can be left the 1771 * same. 1772 */ 1773 assert(arena_bin_info[sz_size2index(oldsize)].reg_size == 1774 oldsize); 1775 if ((usize_max > SMALL_MAXCLASS || sz_size2index(usize_max) != 1776 sz_size2index(oldsize)) && (size > oldsize || usize_max < 1777 oldsize)) { 1778 return true; 1779 } 1780 1781 arena_decay_tick(tsdn, extent_arena_get(extent)); 1782 return false; 1783 } else if (oldsize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS) { 1784 return large_ralloc_no_move(tsdn, extent, usize_min, usize_max, 1785 zero); 1786 } 1787 1788 return true; 1789 } 1790 1791 static void * 1792 arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, 1793 size_t alignment, bool zero, tcache_t *tcache) { 1794 if (alignment == 0) { 1795 return arena_malloc(tsdn, arena, usize, sz_size2index(usize), 1796 zero, tcache, true); 1797 } 1798 usize = sz_sa2u(usize, alignment); 1799 if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { 1800 return NULL; 1801 } 1802 return ipalloct(tsdn, usize, alignment, zero, tcache, arena); 1803 } 1804 1805 void * 1806 arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize, 1807 size_t size, size_t alignment, bool zero, tcache_t *tcache) { 1808 size_t usize = sz_s2u(size); 1809 if (unlikely(usize == 0 || size > LARGE_MAXCLASS)) { 1810 return NULL; 1811 } 1812 1813 if (likely(usize <= SMALL_MAXCLASS)) { 1814 /* Try to avoid moving the allocation. */ 1815 if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero)) { 1816 return ptr; 1817 } 1818 } 1819 1820 if (oldsize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS) { 1821 return large_ralloc(tsdn, arena, iealloc(tsdn, ptr), usize, 1822 alignment, zero, tcache); 1823 } 1824 1825 /* 1826 * size and oldsize are different enough that we need to move the 1827 * object. In that case, fall back to allocating new space and copying. 1828 */ 1829 void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment, 1830 zero, tcache); 1831 if (ret == NULL) { 1832 return NULL; 1833 } 1834 1835 /* 1836 * Junk/zero-filling were already done by 1837 * ipalloc()/arena_malloc(). 1838 */ 1839 1840 size_t copysize = (usize < oldsize) ? usize : oldsize; 1841 memcpy(ret, ptr, copysize); 1842 isdalloct(tsdn, ptr, oldsize, tcache, NULL, true); 1843 return ret; 1844 } 1845 1846 dss_prec_t 1847 arena_dss_prec_get(arena_t *arena) { 1848 return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE); 1849 } 1850 1851 bool 1852 arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) { 1853 if (!have_dss) { 1854 return (dss_prec != dss_prec_disabled); 1855 } 1856 atomic_store_u(&arena->dss_prec, (unsigned)dss_prec, ATOMIC_RELEASE); 1857 return false; 1858 } 1859 1860 ssize_t 1861 arena_dirty_decay_ms_default_get(void) { 1862 return atomic_load_zd(&dirty_decay_ms_default, ATOMIC_RELAXED); 1863 } 1864 1865 bool 1866 arena_dirty_decay_ms_default_set(ssize_t decay_ms) { 1867 if (!arena_decay_ms_valid(decay_ms)) { 1868 return true; 1869 } 1870 atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED); 1871 return false; 1872 } 1873 1874 ssize_t 1875 arena_muzzy_decay_ms_default_get(void) { 1876 return atomic_load_zd(&muzzy_decay_ms_default, ATOMIC_RELAXED); 1877 } 1878 1879 bool 1880 arena_muzzy_decay_ms_default_set(ssize_t decay_ms) { 1881 if (!arena_decay_ms_valid(decay_ms)) { 1882 return true; 1883 } 1884 atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED); 1885 return false; 1886 } 1887 1888 unsigned 1889 arena_nthreads_get(arena_t *arena, bool internal) { 1890 return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXED); 1891 } 1892 1893 void 1894 arena_nthreads_inc(arena_t *arena, bool internal) { 1895 atomic_fetch_add_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED); 1896 } 1897 1898 void 1899 arena_nthreads_dec(arena_t *arena, bool internal) { 1900 atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED); 1901 } 1902 1903 size_t 1904 arena_extent_sn_next(arena_t *arena) { 1905 return atomic_fetch_add_zu(&arena->extent_sn_next, 1, ATOMIC_RELAXED); 1906 } 1907 1908 arena_t * 1909 arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { 1910 arena_t *arena; 1911 base_t *base; 1912 unsigned i; 1913 1914 if (ind == 0) { 1915 base = b0get(); 1916 } else { 1917 base = base_new(tsdn, ind, extent_hooks); 1918 if (base == NULL) { 1919 return NULL; 1920 } 1921 } 1922 1923 arena = (arena_t *)base_alloc(tsdn, base, sizeof(arena_t), CACHELINE); 1924 if (arena == NULL) { 1925 goto label_error; 1926 } 1927 1928 atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED); 1929 atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED); 1930 arena->last_thd = NULL; 1931 1932 if (config_stats) { 1933 if (arena_stats_init(tsdn, &arena->stats)) { 1934 goto label_error; 1935 } 1936 1937 ql_new(&arena->tcache_ql); 1938 if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql", 1939 WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) { 1940 goto label_error; 1941 } 1942 } 1943 1944 if (config_prof) { 1945 if (prof_accum_init(tsdn, &arena->prof_accum)) { 1946 goto label_error; 1947 } 1948 } 1949 1950 if (config_cache_oblivious) { 1951 /* 1952 * A nondeterministic seed based on the address of arena reduces 1953 * the likelihood of lockstep non-uniform cache index 1954 * utilization among identical concurrent processes, but at the 1955 * cost of test repeatability. For debug builds, instead use a 1956 * deterministic seed. 1957 */ 1958 atomic_store_zu(&arena->offset_state, config_debug ? ind : 1959 (size_t)(uintptr_t)arena, ATOMIC_RELAXED); 1960 } 1961 1962 atomic_store_zu(&arena->extent_sn_next, 0, ATOMIC_RELAXED); 1963 1964 atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(), 1965 ATOMIC_RELAXED); 1966 1967 atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED); 1968 1969 extent_list_init(&arena->large); 1970 if (malloc_mutex_init(&arena->large_mtx, "arena_large", 1971 WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) { 1972 goto label_error; 1973 } 1974 1975 /* 1976 * Delay coalescing for dirty extents despite the disruptive effect on 1977 * memory layout for best-fit extent allocation, since cached extents 1978 * are likely to be reused soon after deallocation, and the cost of 1979 * merging/splitting extents is non-trivial. 1980 */ 1981 if (extents_init(tsdn, &arena->extents_dirty, extent_state_dirty, 1982 true)) { 1983 goto label_error; 1984 } 1985 /* 1986 * Coalesce muzzy extents immediately, because operations on them are in 1987 * the critical path much less often than for dirty extents. 1988 */ 1989 if (extents_init(tsdn, &arena->extents_muzzy, extent_state_muzzy, 1990 false)) { 1991 goto label_error; 1992 } 1993 /* 1994 * Coalesce retained extents immediately, in part because they will 1995 * never be evicted (and therefore there's no opportunity for delayed 1996 * coalescing), but also because operations on retained extents are not 1997 * in the critical path. 1998 */ 1999 if (extents_init(tsdn, &arena->extents_retained, extent_state_retained, 2000 false)) { 2001 goto label_error; 2002 } 2003 2004 if (arena_decay_init(&arena->decay_dirty, &arena->extents_dirty, 2005 arena_dirty_decay_ms_default_get(), &arena->stats.decay_dirty)) { 2006 goto label_error; 2007 } 2008 if (arena_decay_init(&arena->decay_muzzy, &arena->extents_muzzy, 2009 arena_muzzy_decay_ms_default_get(), &arena->stats.decay_muzzy)) { 2010 goto label_error; 2011 } 2012 2013 arena->extent_grow_next = sz_psz2ind(HUGEPAGE); 2014 if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow", 2015 WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) { 2016 goto label_error; 2017 } 2018 2019 extent_avail_new(&arena->extent_avail); 2020 if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail", 2021 WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) { 2022 goto label_error; 2023 } 2024 2025 /* Initialize bins. */ 2026 for (i = 0; i < NBINS; i++) { 2027 arena_bin_t *bin = &arena->bins[i]; 2028 if (malloc_mutex_init(&bin->lock, "arena_bin", 2029 WITNESS_RANK_ARENA_BIN, malloc_mutex_rank_exclusive)) { 2030 goto label_error; 2031 } 2032 bin->slabcur = NULL; 2033 extent_heap_new(&bin->slabs_nonfull); 2034 extent_list_init(&bin->slabs_full); 2035 if (config_stats) { 2036 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); 2037 } 2038 } 2039 2040 arena->base = base; 2041 /* Set arena before creating background threads. */ 2042 arena_set(ind, arena); 2043 2044 nstime_init(&arena->create_time, 0); 2045 nstime_update(&arena->create_time); 2046 2047 /* We don't support reentrancy for arena 0 bootstrapping. */ 2048 if (ind != 0) { 2049 /* 2050 * If we're here, then arena 0 already exists, so bootstrapping 2051 * is done enough that we should have tsd. 2052 */ 2053 assert(!tsdn_null(tsdn)); 2054 pre_reentrancy(tsdn_tsd(tsdn), arena); 2055 if (hooks_arena_new_hook) { 2056 hooks_arena_new_hook(); 2057 } 2058 post_reentrancy(tsdn_tsd(tsdn)); 2059 } 2060 2061 return arena; 2062 label_error: 2063 if (ind != 0) { 2064 base_delete(tsdn, base); 2065 } 2066 return NULL; 2067 } 2068 2069 void 2070 arena_boot(void) { 2071 arena_dirty_decay_ms_default_set(opt_dirty_decay_ms); 2072 arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms); 2073 } 2074 2075 void 2076 arena_prefork0(tsdn_t *tsdn, arena_t *arena) { 2077 malloc_mutex_prefork(tsdn, &arena->decay_dirty.mtx); 2078 malloc_mutex_prefork(tsdn, &arena->decay_muzzy.mtx); 2079 } 2080 2081 void 2082 arena_prefork1(tsdn_t *tsdn, arena_t *arena) { 2083 if (config_stats) { 2084 malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx); 2085 } 2086 } 2087 2088 void 2089 arena_prefork2(tsdn_t *tsdn, arena_t *arena) { 2090 malloc_mutex_prefork(tsdn, &arena->extent_grow_mtx); 2091 } 2092 2093 void 2094 arena_prefork3(tsdn_t *tsdn, arena_t *arena) { 2095 extents_prefork(tsdn, &arena->extents_dirty); 2096 extents_prefork(tsdn, &arena->extents_muzzy); 2097 extents_prefork(tsdn, &arena->extents_retained); 2098 } 2099 2100 void 2101 arena_prefork4(tsdn_t *tsdn, arena_t *arena) { 2102 malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx); 2103 } 2104 2105 void 2106 arena_prefork5(tsdn_t *tsdn, arena_t *arena) { 2107 base_prefork(tsdn, arena->base); 2108 } 2109 2110 void 2111 arena_prefork6(tsdn_t *tsdn, arena_t *arena) { 2112 malloc_mutex_prefork(tsdn, &arena->large_mtx); 2113 } 2114 2115 void 2116 arena_prefork7(tsdn_t *tsdn, arena_t *arena) { 2117 for (unsigned i = 0; i < NBINS; i++) { 2118 malloc_mutex_prefork(tsdn, &arena->bins[i].lock); 2119 } 2120 } 2121 2122 void 2123 arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) { 2124 unsigned i; 2125 2126 for (i = 0; i < NBINS; i++) { 2127 malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock); 2128 } 2129 malloc_mutex_postfork_parent(tsdn, &arena->large_mtx); 2130 base_postfork_parent(tsdn, arena->base); 2131 malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx); 2132 extents_postfork_parent(tsdn, &arena->extents_dirty); 2133 extents_postfork_parent(tsdn, &arena->extents_muzzy); 2134 extents_postfork_parent(tsdn, &arena->extents_retained); 2135 malloc_mutex_postfork_parent(tsdn, &arena->extent_grow_mtx); 2136 malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx); 2137 malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx); 2138 if (config_stats) { 2139 malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx); 2140 } 2141 } 2142 2143 void 2144 arena_postfork_child(tsdn_t *tsdn, arena_t *arena) { 2145 unsigned i; 2146 2147 atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED); 2148 atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED); 2149 if (tsd_arena_get(tsdn_tsd(tsdn)) == arena) { 2150 arena_nthreads_inc(arena, false); 2151 } 2152 if (tsd_iarena_get(tsdn_tsd(tsdn)) == arena) { 2153 arena_nthreads_inc(arena, true); 2154 } 2155 if (config_stats) { 2156 ql_new(&arena->tcache_ql); 2157 tcache_t *tcache = tcache_get(tsdn_tsd(tsdn)); 2158 if (tcache != NULL && tcache->arena == arena) { 2159 ql_elm_new(tcache, link); 2160 ql_tail_insert(&arena->tcache_ql, tcache, link); 2161 } 2162 } 2163 2164 for (i = 0; i < NBINS; i++) { 2165 malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock); 2166 } 2167 malloc_mutex_postfork_child(tsdn, &arena->large_mtx); 2168 base_postfork_child(tsdn, arena->base); 2169 malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx); 2170 extents_postfork_child(tsdn, &arena->extents_dirty); 2171 extents_postfork_child(tsdn, &arena->extents_muzzy); 2172 extents_postfork_child(tsdn, &arena->extents_retained); 2173 malloc_mutex_postfork_child(tsdn, &arena->extent_grow_mtx); 2174 malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx); 2175 malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx); 2176 if (config_stats) { 2177 malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx); 2178 } 2179 } 2180