1 #define JEMALLOC_ARENA_C_ 2 #include "jemalloc/internal/jemalloc_preamble.h" 3 #include "jemalloc/internal/jemalloc_internal_includes.h" 4 5 #include "jemalloc/internal/assert.h" 6 #include "jemalloc/internal/extent_dss.h" 7 #include "jemalloc/internal/extent_mmap.h" 8 #include "jemalloc/internal/mutex.h" 9 #include "jemalloc/internal/rtree.h" 10 #include "jemalloc/internal/size_classes.h" 11 #include "jemalloc/internal/util.h" 12 13 /******************************************************************************/ 14 /* Data. */ 15 16 /* 17 * Define names for both unininitialized and initialized phases, so that 18 * options and mallctl processing are straightforward. 19 */ 20 const char *percpu_arena_mode_names[] = { 21 "percpu", 22 "phycpu", 23 "disabled", 24 "percpu", 25 "phycpu" 26 }; 27 percpu_arena_mode_t opt_percpu_arena = PERCPU_ARENA_DEFAULT; 28 29 ssize_t opt_dirty_decay_ms = DIRTY_DECAY_MS_DEFAULT; 30 ssize_t opt_muzzy_decay_ms = MUZZY_DECAY_MS_DEFAULT; 31 32 static atomic_zd_t dirty_decay_ms_default; 33 static atomic_zd_t muzzy_decay_ms_default; 34 35 const arena_bin_info_t arena_bin_info[NBINS] = { 36 #define BIN_INFO_bin_yes(reg_size, slab_size, nregs) \ 37 {reg_size, slab_size, nregs, BITMAP_INFO_INITIALIZER(nregs)}, 38 #define BIN_INFO_bin_no(reg_size, slab_size, nregs) 39 #define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \ 40 lg_delta_lookup) \ 41 BIN_INFO_bin_##bin((1U<<lg_grp) + (ndelta<<lg_delta), \ 42 (pgs << LG_PAGE), (pgs << LG_PAGE) / ((1U<<lg_grp) + \ 43 (ndelta<<lg_delta))) 44 SIZE_CLASSES 45 #undef BIN_INFO_bin_yes 46 #undef BIN_INFO_bin_no 47 #undef SC 48 }; 49 50 const uint64_t h_steps[SMOOTHSTEP_NSTEPS] = { 51 #define STEP(step, h, x, y) \ 52 h, 53 SMOOTHSTEP 54 #undef STEP 55 }; 56 57 /******************************************************************************/ 58 /* 59 * Function prototypes for static functions that are referenced prior to 60 * definition. 61 */ 62 63 static void arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, 64 arena_decay_t *decay, extents_t *extents, bool all, size_t npages_limit); 65 static bool arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, 66 bool is_background_thread, bool all); 67 static void arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, 68 arena_bin_t *bin); 69 static void arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, 70 arena_bin_t *bin); 71 72 /******************************************************************************/ 73 74 static bool 75 arena_stats_init(tsdn_t *tsdn, arena_stats_t *arena_stats) { 76 if (config_debug) { 77 for (size_t i = 0; i < sizeof(arena_stats_t); i++) { 78 assert(((char *)arena_stats)[i] == 0); 79 } 80 } 81 #ifndef JEMALLOC_ATOMIC_U64 82 if (malloc_mutex_init(&arena_stats->mtx, "arena_stats", 83 WITNESS_RANK_ARENA_STATS, malloc_mutex_rank_exclusive)) { 84 return true; 85 } 86 #endif 87 /* Memory is zeroed, so there is no need to clear stats. */ 88 return false; 89 } 90 91 static void 92 arena_stats_lock(tsdn_t *tsdn, arena_stats_t *arena_stats) { 93 #ifndef JEMALLOC_ATOMIC_U64 94 malloc_mutex_lock(tsdn, &arena_stats->mtx); 95 #endif 96 } 97 98 static void 99 arena_stats_unlock(tsdn_t *tsdn, arena_stats_t *arena_stats) { 100 #ifndef JEMALLOC_ATOMIC_U64 101 malloc_mutex_unlock(tsdn, &arena_stats->mtx); 102 #endif 103 } 104 105 static uint64_t 106 arena_stats_read_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, 107 arena_stats_u64_t *p) { 108 #ifdef JEMALLOC_ATOMIC_U64 109 return atomic_load_u64(p, ATOMIC_RELAXED); 110 #else 111 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); 112 return *p; 113 #endif 114 } 115 116 static void 117 arena_stats_add_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, 118 arena_stats_u64_t *p, uint64_t x) { 119 #ifdef JEMALLOC_ATOMIC_U64 120 atomic_fetch_add_u64(p, x, ATOMIC_RELAXED); 121 #else 122 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); 123 *p += x; 124 #endif 125 } 126 127 UNUSED static void 128 arena_stats_sub_u64(tsdn_t *tsdn, arena_stats_t *arena_stats, 129 arena_stats_u64_t *p, uint64_t x) { 130 #ifdef JEMALLOC_ATOMIC_U64 131 UNUSED uint64_t r = atomic_fetch_sub_u64(p, x, ATOMIC_RELAXED); 132 assert(r - x <= r); 133 #else 134 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); 135 *p -= x; 136 assert(*p + x >= *p); 137 #endif 138 } 139 140 /* 141 * Non-atomically sets *dst += src. *dst needs external synchronization. 142 * This lets us avoid the cost of a fetch_add when its unnecessary (note that 143 * the types here are atomic). 144 */ 145 static void 146 arena_stats_accum_u64(arena_stats_u64_t *dst, uint64_t src) { 147 #ifdef JEMALLOC_ATOMIC_U64 148 uint64_t cur_dst = atomic_load_u64(dst, ATOMIC_RELAXED); 149 atomic_store_u64(dst, src + cur_dst, ATOMIC_RELAXED); 150 #else 151 *dst += src; 152 #endif 153 } 154 155 static size_t 156 arena_stats_read_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p) { 157 #ifdef JEMALLOC_ATOMIC_U64 158 return atomic_load_zu(p, ATOMIC_RELAXED); 159 #else 160 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); 161 return atomic_load_zu(p, ATOMIC_RELAXED); 162 #endif 163 } 164 165 static void 166 arena_stats_add_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p, 167 size_t x) { 168 #ifdef JEMALLOC_ATOMIC_U64 169 atomic_fetch_add_zu(p, x, ATOMIC_RELAXED); 170 #else 171 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); 172 size_t cur = atomic_load_zu(p, ATOMIC_RELAXED); 173 atomic_store_zu(p, cur + x, ATOMIC_RELAXED); 174 #endif 175 } 176 177 static void 178 arena_stats_sub_zu(tsdn_t *tsdn, arena_stats_t *arena_stats, atomic_zu_t *p, 179 size_t x) { 180 #ifdef JEMALLOC_ATOMIC_U64 181 UNUSED size_t r = atomic_fetch_sub_zu(p, x, ATOMIC_RELAXED); 182 assert(r - x <= r); 183 #else 184 malloc_mutex_assert_owner(tsdn, &arena_stats->mtx); 185 size_t cur = atomic_load_zu(p, ATOMIC_RELAXED); 186 atomic_store_zu(p, cur - x, ATOMIC_RELAXED); 187 #endif 188 } 189 190 /* Like the _u64 variant, needs an externally synchronized *dst. */ 191 static void 192 arena_stats_accum_zu(atomic_zu_t *dst, size_t src) { 193 size_t cur_dst = atomic_load_zu(dst, ATOMIC_RELAXED); 194 atomic_store_zu(dst, src + cur_dst, ATOMIC_RELAXED); 195 } 196 197 void 198 arena_stats_large_nrequests_add(tsdn_t *tsdn, arena_stats_t *arena_stats, 199 szind_t szind, uint64_t nrequests) { 200 arena_stats_lock(tsdn, arena_stats); 201 arena_stats_add_u64(tsdn, arena_stats, &arena_stats->lstats[szind - 202 NBINS].nrequests, nrequests); 203 arena_stats_unlock(tsdn, arena_stats); 204 } 205 206 void 207 arena_stats_mapped_add(tsdn_t *tsdn, arena_stats_t *arena_stats, size_t size) { 208 arena_stats_lock(tsdn, arena_stats); 209 arena_stats_add_zu(tsdn, arena_stats, &arena_stats->mapped, size); 210 arena_stats_unlock(tsdn, arena_stats); 211 } 212 213 void 214 arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, 215 const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, 216 size_t *nactive, size_t *ndirty, size_t *nmuzzy) { 217 *nthreads += arena_nthreads_get(arena, false); 218 *dss = dss_prec_names[arena_dss_prec_get(arena)]; 219 *dirty_decay_ms = arena_dirty_decay_ms_get(arena); 220 *muzzy_decay_ms = arena_muzzy_decay_ms_get(arena); 221 *nactive += atomic_load_zu(&arena->nactive, ATOMIC_RELAXED); 222 *ndirty += extents_npages_get(&arena->extents_dirty); 223 *nmuzzy += extents_npages_get(&arena->extents_muzzy); 224 } 225 226 void 227 arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, 228 const char **dss, ssize_t *dirty_decay_ms, ssize_t *muzzy_decay_ms, 229 size_t *nactive, size_t *ndirty, size_t *nmuzzy, arena_stats_t *astats, 230 malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats) { 231 cassert(config_stats); 232 233 arena_basic_stats_merge(tsdn, arena, nthreads, dss, dirty_decay_ms, 234 muzzy_decay_ms, nactive, ndirty, nmuzzy); 235 236 size_t base_allocated, base_resident, base_mapped; 237 base_stats_get(tsdn, arena->base, &base_allocated, &base_resident, 238 &base_mapped); 239 240 arena_stats_lock(tsdn, &arena->stats); 241 242 arena_stats_accum_zu(&astats->mapped, base_mapped 243 + arena_stats_read_zu(tsdn, &arena->stats, &arena->stats.mapped)); 244 arena_stats_accum_zu(&astats->retained, 245 extents_npages_get(&arena->extents_retained) << LG_PAGE); 246 247 arena_stats_accum_u64(&astats->decay_dirty.npurge, 248 arena_stats_read_u64(tsdn, &arena->stats, 249 &arena->stats.decay_dirty.npurge)); 250 arena_stats_accum_u64(&astats->decay_dirty.nmadvise, 251 arena_stats_read_u64(tsdn, &arena->stats, 252 &arena->stats.decay_dirty.nmadvise)); 253 arena_stats_accum_u64(&astats->decay_dirty.purged, 254 arena_stats_read_u64(tsdn, &arena->stats, 255 &arena->stats.decay_dirty.purged)); 256 257 arena_stats_accum_u64(&astats->decay_muzzy.npurge, 258 arena_stats_read_u64(tsdn, &arena->stats, 259 &arena->stats.decay_muzzy.npurge)); 260 arena_stats_accum_u64(&astats->decay_muzzy.nmadvise, 261 arena_stats_read_u64(tsdn, &arena->stats, 262 &arena->stats.decay_muzzy.nmadvise)); 263 arena_stats_accum_u64(&astats->decay_muzzy.purged, 264 arena_stats_read_u64(tsdn, &arena->stats, 265 &arena->stats.decay_muzzy.purged)); 266 267 arena_stats_accum_zu(&astats->base, base_allocated); 268 arena_stats_accum_zu(&astats->internal, arena_internal_get(arena)); 269 arena_stats_accum_zu(&astats->resident, base_resident + 270 (((atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) + 271 extents_npages_get(&arena->extents_dirty) + 272 extents_npages_get(&arena->extents_muzzy)) << LG_PAGE))); 273 274 for (szind_t i = 0; i < NSIZES - NBINS; i++) { 275 uint64_t nmalloc = arena_stats_read_u64(tsdn, &arena->stats, 276 &arena->stats.lstats[i].nmalloc); 277 arena_stats_accum_u64(&lstats[i].nmalloc, nmalloc); 278 arena_stats_accum_u64(&astats->nmalloc_large, nmalloc); 279 280 uint64_t ndalloc = arena_stats_read_u64(tsdn, &arena->stats, 281 &arena->stats.lstats[i].ndalloc); 282 arena_stats_accum_u64(&lstats[i].ndalloc, ndalloc); 283 arena_stats_accum_u64(&astats->ndalloc_large, ndalloc); 284 285 uint64_t nrequests = arena_stats_read_u64(tsdn, &arena->stats, 286 &arena->stats.lstats[i].nrequests); 287 arena_stats_accum_u64(&lstats[i].nrequests, 288 nmalloc + nrequests); 289 arena_stats_accum_u64(&astats->nrequests_large, 290 nmalloc + nrequests); 291 292 assert(nmalloc >= ndalloc); 293 assert(nmalloc - ndalloc <= SIZE_T_MAX); 294 size_t curlextents = (size_t)(nmalloc - ndalloc); 295 lstats[i].curlextents += curlextents; 296 arena_stats_accum_zu(&astats->allocated_large, 297 curlextents * sz_index2size(NBINS + i)); 298 } 299 300 arena_stats_unlock(tsdn, &arena->stats); 301 302 /* tcache_bytes counts currently cached bytes. */ 303 atomic_store_zu(&astats->tcache_bytes, 0, ATOMIC_RELAXED); 304 malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx); 305 tcache_t *tcache; 306 ql_foreach(tcache, &arena->tcache_ql, link) { 307 szind_t i = 0; 308 for (; i < NBINS; i++) { 309 tcache_bin_t *tbin = tcache_small_bin_get(tcache, i); 310 arena_stats_accum_zu(&astats->tcache_bytes, 311 tbin->ncached * sz_index2size(i)); 312 } 313 for (; i < nhbins; i++) { 314 tcache_bin_t *tbin = tcache_large_bin_get(tcache, i); 315 arena_stats_accum_zu(&astats->tcache_bytes, 316 tbin->ncached * sz_index2size(i)); 317 } 318 } 319 malloc_mutex_prof_read(tsdn, 320 &astats->mutex_prof_data[arena_prof_mutex_tcache_list], 321 &arena->tcache_ql_mtx); 322 malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx); 323 324 #define READ_ARENA_MUTEX_PROF_DATA(mtx, ind) \ 325 malloc_mutex_lock(tsdn, &arena->mtx); \ 326 malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind], \ 327 &arena->mtx); \ 328 malloc_mutex_unlock(tsdn, &arena->mtx); 329 330 /* Gather per arena mutex profiling data. */ 331 READ_ARENA_MUTEX_PROF_DATA(large_mtx, arena_prof_mutex_large); 332 READ_ARENA_MUTEX_PROF_DATA(extent_avail_mtx, 333 arena_prof_mutex_extent_avail) 334 READ_ARENA_MUTEX_PROF_DATA(extents_dirty.mtx, 335 arena_prof_mutex_extents_dirty) 336 READ_ARENA_MUTEX_PROF_DATA(extents_muzzy.mtx, 337 arena_prof_mutex_extents_muzzy) 338 READ_ARENA_MUTEX_PROF_DATA(extents_retained.mtx, 339 arena_prof_mutex_extents_retained) 340 READ_ARENA_MUTEX_PROF_DATA(decay_dirty.mtx, 341 arena_prof_mutex_decay_dirty) 342 READ_ARENA_MUTEX_PROF_DATA(decay_muzzy.mtx, 343 arena_prof_mutex_decay_muzzy) 344 READ_ARENA_MUTEX_PROF_DATA(base->mtx, 345 arena_prof_mutex_base) 346 #undef READ_ARENA_MUTEX_PROF_DATA 347 348 nstime_copy(&astats->uptime, &arena->create_time); 349 nstime_update(&astats->uptime); 350 nstime_subtract(&astats->uptime, &arena->create_time); 351 352 for (szind_t i = 0; i < NBINS; i++) { 353 arena_bin_t *bin = &arena->bins[i]; 354 355 malloc_mutex_lock(tsdn, &bin->lock); 356 malloc_mutex_prof_read(tsdn, &bstats[i].mutex_data, &bin->lock); 357 bstats[i].nmalloc += bin->stats.nmalloc; 358 bstats[i].ndalloc += bin->stats.ndalloc; 359 bstats[i].nrequests += bin->stats.nrequests; 360 bstats[i].curregs += bin->stats.curregs; 361 bstats[i].nfills += bin->stats.nfills; 362 bstats[i].nflushes += bin->stats.nflushes; 363 bstats[i].nslabs += bin->stats.nslabs; 364 bstats[i].reslabs += bin->stats.reslabs; 365 bstats[i].curslabs += bin->stats.curslabs; 366 malloc_mutex_unlock(tsdn, &bin->lock); 367 } 368 } 369 370 void 371 arena_extents_dirty_dalloc(tsdn_t *tsdn, arena_t *arena, 372 extent_hooks_t **r_extent_hooks, extent_t *extent) { 373 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 374 WITNESS_RANK_CORE, 0); 375 376 extents_dalloc(tsdn, arena, r_extent_hooks, &arena->extents_dirty, 377 extent); 378 if (arena_dirty_decay_ms_get(arena) == 0) { 379 arena_decay_dirty(tsdn, arena, false, true); 380 } else { 381 arena_background_thread_inactivity_check(tsdn, arena); 382 } 383 } 384 385 static void * 386 arena_slab_reg_alloc(tsdn_t *tsdn, extent_t *slab, 387 const arena_bin_info_t *bin_info) { 388 void *ret; 389 arena_slab_data_t *slab_data = extent_slab_data_get(slab); 390 size_t regind; 391 392 assert(extent_nfree_get(slab) > 0); 393 assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info)); 394 395 regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info); 396 ret = (void *)((uintptr_t)extent_addr_get(slab) + 397 (uintptr_t)(bin_info->reg_size * regind)); 398 extent_nfree_dec(slab); 399 return ret; 400 } 401 402 #ifndef JEMALLOC_JET 403 static 404 #endif 405 size_t 406 arena_slab_regind(extent_t *slab, szind_t binind, const void *ptr) { 407 size_t diff, regind; 408 409 /* Freeing a pointer outside the slab can cause assertion failure. */ 410 assert((uintptr_t)ptr >= (uintptr_t)extent_addr_get(slab)); 411 assert((uintptr_t)ptr < (uintptr_t)extent_past_get(slab)); 412 /* Freeing an interior pointer can cause assertion failure. */ 413 assert(((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)) % 414 (uintptr_t)arena_bin_info[binind].reg_size == 0); 415 416 /* Avoid doing division with a variable divisor. */ 417 diff = (size_t)((uintptr_t)ptr - (uintptr_t)extent_addr_get(slab)); 418 switch (binind) { 419 #define REGIND_bin_yes(index, reg_size) \ 420 case index: \ 421 regind = diff / (reg_size); \ 422 assert(diff == regind * (reg_size)); \ 423 break; 424 #define REGIND_bin_no(index, reg_size) 425 #define SC(index, lg_grp, lg_delta, ndelta, psz, bin, pgs, \ 426 lg_delta_lookup) \ 427 REGIND_bin_##bin(index, (1U<<lg_grp) + (ndelta<<lg_delta)) 428 SIZE_CLASSES 429 #undef REGIND_bin_yes 430 #undef REGIND_bin_no 431 #undef SC 432 default: not_reached(); 433 } 434 435 assert(regind < arena_bin_info[binind].nregs); 436 437 return regind; 438 } 439 440 static void 441 arena_slab_reg_dalloc(tsdn_t *tsdn, extent_t *slab, 442 arena_slab_data_t *slab_data, void *ptr) { 443 szind_t binind = extent_szind_get(slab); 444 const arena_bin_info_t *bin_info = &arena_bin_info[binind]; 445 size_t regind = arena_slab_regind(slab, binind, ptr); 446 447 assert(extent_nfree_get(slab) < bin_info->nregs); 448 /* Freeing an unallocated pointer can cause assertion failure. */ 449 assert(bitmap_get(slab_data->bitmap, &bin_info->bitmap_info, regind)); 450 451 bitmap_unset(slab_data->bitmap, &bin_info->bitmap_info, regind); 452 extent_nfree_inc(slab); 453 } 454 455 static void 456 arena_nactive_add(arena_t *arena, size_t add_pages) { 457 atomic_fetch_add_zu(&arena->nactive, add_pages, ATOMIC_RELAXED); 458 } 459 460 static void 461 arena_nactive_sub(arena_t *arena, size_t sub_pages) { 462 assert(atomic_load_zu(&arena->nactive, ATOMIC_RELAXED) >= sub_pages); 463 atomic_fetch_sub_zu(&arena->nactive, sub_pages, ATOMIC_RELAXED); 464 } 465 466 static void 467 arena_large_malloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { 468 szind_t index, hindex; 469 470 cassert(config_stats); 471 472 if (usize < LARGE_MINCLASS) { 473 usize = LARGE_MINCLASS; 474 } 475 index = sz_size2index(usize); 476 hindex = (index >= NBINS) ? index - NBINS : 0; 477 478 arena_stats_add_u64(tsdn, &arena->stats, 479 &arena->stats.lstats[hindex].nmalloc, 1); 480 } 481 482 static void 483 arena_large_dalloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t usize) { 484 szind_t index, hindex; 485 486 cassert(config_stats); 487 488 if (usize < LARGE_MINCLASS) { 489 usize = LARGE_MINCLASS; 490 } 491 index = sz_size2index(usize); 492 hindex = (index >= NBINS) ? index - NBINS : 0; 493 494 arena_stats_add_u64(tsdn, &arena->stats, 495 &arena->stats.lstats[hindex].ndalloc, 1); 496 } 497 498 static void 499 arena_large_ralloc_stats_update(tsdn_t *tsdn, arena_t *arena, size_t oldusize, 500 size_t usize) { 501 arena_large_dalloc_stats_update(tsdn, arena, oldusize); 502 arena_large_malloc_stats_update(tsdn, arena, usize); 503 } 504 505 extent_t * 506 arena_extent_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, 507 size_t alignment, bool *zero) { 508 extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; 509 510 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 511 WITNESS_RANK_CORE, 0); 512 513 szind_t szind = sz_size2index(usize); 514 size_t mapped_add; 515 bool commit = true; 516 extent_t *extent = extents_alloc(tsdn, arena, &extent_hooks, 517 &arena->extents_dirty, NULL, usize, sz_large_pad, alignment, false, 518 szind, zero, &commit); 519 if (extent == NULL) { 520 extent = extents_alloc(tsdn, arena, &extent_hooks, 521 &arena->extents_muzzy, NULL, usize, sz_large_pad, alignment, 522 false, szind, zero, &commit); 523 } 524 size_t size = usize + sz_large_pad; 525 if (extent == NULL) { 526 extent = extent_alloc_wrapper(tsdn, arena, &extent_hooks, NULL, 527 usize, sz_large_pad, alignment, false, szind, zero, 528 &commit); 529 if (config_stats) { 530 /* 531 * extent may be NULL on OOM, but in that case 532 * mapped_add isn't used below, so there's no need to 533 * conditionlly set it to 0 here. 534 */ 535 mapped_add = size; 536 } 537 } else if (config_stats) { 538 mapped_add = 0; 539 } 540 541 if (extent != NULL) { 542 if (config_stats) { 543 arena_stats_lock(tsdn, &arena->stats); 544 arena_large_malloc_stats_update(tsdn, arena, usize); 545 if (mapped_add != 0) { 546 arena_stats_add_zu(tsdn, &arena->stats, 547 &arena->stats.mapped, mapped_add); 548 } 549 arena_stats_unlock(tsdn, &arena->stats); 550 } 551 arena_nactive_add(arena, size >> LG_PAGE); 552 } 553 554 return extent; 555 } 556 557 void 558 arena_extent_dalloc_large_prep(tsdn_t *tsdn, arena_t *arena, extent_t *extent) { 559 if (config_stats) { 560 arena_stats_lock(tsdn, &arena->stats); 561 arena_large_dalloc_stats_update(tsdn, arena, 562 extent_usize_get(extent)); 563 arena_stats_unlock(tsdn, &arena->stats); 564 } 565 arena_nactive_sub(arena, extent_size_get(extent) >> LG_PAGE); 566 } 567 568 void 569 arena_extent_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, extent_t *extent, 570 size_t oldusize) { 571 size_t usize = extent_usize_get(extent); 572 size_t udiff = oldusize - usize; 573 574 if (config_stats) { 575 arena_stats_lock(tsdn, &arena->stats); 576 arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); 577 arena_stats_unlock(tsdn, &arena->stats); 578 } 579 arena_nactive_sub(arena, udiff >> LG_PAGE); 580 } 581 582 void 583 arena_extent_ralloc_large_expand(tsdn_t *tsdn, arena_t *arena, extent_t *extent, 584 size_t oldusize) { 585 size_t usize = extent_usize_get(extent); 586 size_t udiff = usize - oldusize; 587 588 if (config_stats) { 589 arena_stats_lock(tsdn, &arena->stats); 590 arena_large_ralloc_stats_update(tsdn, arena, oldusize, usize); 591 arena_stats_unlock(tsdn, &arena->stats); 592 } 593 arena_nactive_add(arena, udiff >> LG_PAGE); 594 } 595 596 static ssize_t 597 arena_decay_ms_read(arena_decay_t *decay) { 598 return atomic_load_zd(&decay->time_ms, ATOMIC_RELAXED); 599 } 600 601 static void 602 arena_decay_ms_write(arena_decay_t *decay, ssize_t decay_ms) { 603 atomic_store_zd(&decay->time_ms, decay_ms, ATOMIC_RELAXED); 604 } 605 606 static void 607 arena_decay_deadline_init(arena_decay_t *decay) { 608 /* 609 * Generate a new deadline that is uniformly random within the next 610 * epoch after the current one. 611 */ 612 nstime_copy(&decay->deadline, &decay->epoch); 613 nstime_add(&decay->deadline, &decay->interval); 614 if (arena_decay_ms_read(decay) > 0) { 615 nstime_t jitter; 616 617 nstime_init(&jitter, prng_range_u64(&decay->jitter_state, 618 nstime_ns(&decay->interval))); 619 nstime_add(&decay->deadline, &jitter); 620 } 621 } 622 623 static bool 624 arena_decay_deadline_reached(const arena_decay_t *decay, const nstime_t *time) { 625 return (nstime_compare(&decay->deadline, time) <= 0); 626 } 627 628 static size_t 629 arena_decay_backlog_npages_limit(const arena_decay_t *decay) { 630 uint64_t sum; 631 size_t npages_limit_backlog; 632 unsigned i; 633 634 /* 635 * For each element of decay_backlog, multiply by the corresponding 636 * fixed-point smoothstep decay factor. Sum the products, then divide 637 * to round down to the nearest whole number of pages. 638 */ 639 sum = 0; 640 for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) { 641 sum += decay->backlog[i] * h_steps[i]; 642 } 643 npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP); 644 645 return npages_limit_backlog; 646 } 647 648 static void 649 arena_decay_backlog_update_last(arena_decay_t *decay, size_t current_npages) { 650 size_t npages_delta = (current_npages > decay->nunpurged) ? 651 current_npages - decay->nunpurged : 0; 652 decay->backlog[SMOOTHSTEP_NSTEPS-1] = npages_delta; 653 654 if (config_debug) { 655 if (current_npages > decay->ceil_npages) { 656 decay->ceil_npages = current_npages; 657 } 658 size_t npages_limit = arena_decay_backlog_npages_limit(decay); 659 assert(decay->ceil_npages >= npages_limit); 660 if (decay->ceil_npages > npages_limit) { 661 decay->ceil_npages = npages_limit; 662 } 663 } 664 } 665 666 static void 667 arena_decay_backlog_update(arena_decay_t *decay, uint64_t nadvance_u64, 668 size_t current_npages) { 669 if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) { 670 memset(decay->backlog, 0, (SMOOTHSTEP_NSTEPS-1) * 671 sizeof(size_t)); 672 } else { 673 size_t nadvance_z = (size_t)nadvance_u64; 674 675 assert((uint64_t)nadvance_z == nadvance_u64); 676 677 memmove(decay->backlog, &decay->backlog[nadvance_z], 678 (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t)); 679 if (nadvance_z > 1) { 680 memset(&decay->backlog[SMOOTHSTEP_NSTEPS - 681 nadvance_z], 0, (nadvance_z-1) * sizeof(size_t)); 682 } 683 } 684 685 arena_decay_backlog_update_last(decay, current_npages); 686 } 687 688 static void 689 arena_decay_try_purge(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, 690 extents_t *extents, size_t current_npages, size_t npages_limit) { 691 if (current_npages > npages_limit) { 692 arena_decay_to_limit(tsdn, arena, decay, extents, false, 693 npages_limit); 694 } 695 } 696 697 static void 698 arena_decay_epoch_advance_helper(arena_decay_t *decay, const nstime_t *time, 699 size_t current_npages) { 700 assert(arena_decay_deadline_reached(decay, time)); 701 702 nstime_t delta; 703 nstime_copy(&delta, time); 704 nstime_subtract(&delta, &decay->epoch); 705 706 uint64_t nadvance_u64 = nstime_divide(&delta, &decay->interval); 707 assert(nadvance_u64 > 0); 708 709 /* Add nadvance_u64 decay intervals to epoch. */ 710 nstime_copy(&delta, &decay->interval); 711 nstime_imultiply(&delta, nadvance_u64); 712 nstime_add(&decay->epoch, &delta); 713 714 /* Set a new deadline. */ 715 arena_decay_deadline_init(decay); 716 717 /* Update the backlog. */ 718 arena_decay_backlog_update(decay, nadvance_u64, current_npages); 719 } 720 721 static void 722 arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, 723 extents_t *extents, const nstime_t *time, bool purge) { 724 size_t current_npages = extents_npages_get(extents); 725 arena_decay_epoch_advance_helper(decay, time, current_npages); 726 727 size_t npages_limit = arena_decay_backlog_npages_limit(decay); 728 /* We may unlock decay->mtx when try_purge(). Finish logging first. */ 729 decay->nunpurged = (npages_limit > current_npages) ? npages_limit : 730 current_npages; 731 if (purge) { 732 arena_decay_try_purge(tsdn, arena, decay, extents, 733 current_npages, npages_limit); 734 } 735 } 736 737 static void 738 arena_decay_reinit(arena_decay_t *decay, extents_t *extents, ssize_t decay_ms) { 739 arena_decay_ms_write(decay, decay_ms); 740 if (decay_ms > 0) { 741 nstime_init(&decay->interval, (uint64_t)decay_ms * 742 KQU(1000000)); 743 nstime_idivide(&decay->interval, SMOOTHSTEP_NSTEPS); 744 } 745 746 nstime_init(&decay->epoch, 0); 747 nstime_update(&decay->epoch); 748 decay->jitter_state = (uint64_t)(uintptr_t)decay; 749 arena_decay_deadline_init(decay); 750 decay->nunpurged = 0; 751 memset(decay->backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t)); 752 } 753 754 static bool 755 arena_decay_init(arena_decay_t *decay, extents_t *extents, ssize_t decay_ms, 756 decay_stats_t *stats) { 757 if (config_debug) { 758 for (size_t i = 0; i < sizeof(arena_decay_t); i++) { 759 assert(((char *)decay)[i] == 0); 760 } 761 decay->ceil_npages = 0; 762 } 763 if (malloc_mutex_init(&decay->mtx, "decay", WITNESS_RANK_DECAY, 764 malloc_mutex_rank_exclusive)) { 765 return true; 766 } 767 decay->purging = false; 768 arena_decay_reinit(decay, extents, decay_ms); 769 /* Memory is zeroed, so there is no need to clear stats. */ 770 if (config_stats) { 771 decay->stats = stats; 772 } 773 return false; 774 } 775 776 static bool 777 arena_decay_ms_valid(ssize_t decay_ms) { 778 if (decay_ms < -1) { 779 return false; 780 } 781 if (decay_ms == -1 || (uint64_t)decay_ms <= NSTIME_SEC_MAX * 782 KQU(1000)) { 783 return true; 784 } 785 return false; 786 } 787 788 static bool 789 arena_maybe_decay(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, 790 extents_t *extents, bool is_background_thread) { 791 malloc_mutex_assert_owner(tsdn, &decay->mtx); 792 793 /* Purge all or nothing if the option is disabled. */ 794 ssize_t decay_ms = arena_decay_ms_read(decay); 795 if (decay_ms <= 0) { 796 if (decay_ms == 0) { 797 arena_decay_to_limit(tsdn, arena, decay, extents, false, 798 0); 799 } 800 return false; 801 } 802 803 nstime_t time; 804 nstime_init(&time, 0); 805 nstime_update(&time); 806 if (unlikely(!nstime_monotonic() && nstime_compare(&decay->epoch, &time) 807 > 0)) { 808 /* 809 * Time went backwards. Move the epoch back in time and 810 * generate a new deadline, with the expectation that time 811 * typically flows forward for long enough periods of time that 812 * epochs complete. Unfortunately, this strategy is susceptible 813 * to clock jitter triggering premature epoch advances, but 814 * clock jitter estimation and compensation isn't feasible here 815 * because calls into this code are event-driven. 816 */ 817 nstime_copy(&decay->epoch, &time); 818 arena_decay_deadline_init(decay); 819 } else { 820 /* Verify that time does not go backwards. */ 821 assert(nstime_compare(&decay->epoch, &time) <= 0); 822 } 823 824 /* 825 * If the deadline has been reached, advance to the current epoch and 826 * purge to the new limit if necessary. Note that dirty pages created 827 * during the current epoch are not subject to purge until a future 828 * epoch, so as a result purging only happens during epoch advances, or 829 * being triggered by background threads (scheduled event). 830 */ 831 bool advance_epoch = arena_decay_deadline_reached(decay, &time); 832 if (advance_epoch) { 833 bool should_purge = is_background_thread || 834 !background_thread_enabled(); 835 arena_decay_epoch_advance(tsdn, arena, decay, extents, &time, 836 should_purge); 837 } else if (is_background_thread) { 838 arena_decay_try_purge(tsdn, arena, decay, extents, 839 extents_npages_get(extents), 840 arena_decay_backlog_npages_limit(decay)); 841 } 842 843 return advance_epoch; 844 } 845 846 static ssize_t 847 arena_decay_ms_get(arena_decay_t *decay) { 848 return arena_decay_ms_read(decay); 849 } 850 851 ssize_t 852 arena_dirty_decay_ms_get(arena_t *arena) { 853 return arena_decay_ms_get(&arena->decay_dirty); 854 } 855 856 ssize_t 857 arena_muzzy_decay_ms_get(arena_t *arena) { 858 return arena_decay_ms_get(&arena->decay_muzzy); 859 } 860 861 static bool 862 arena_decay_ms_set(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, 863 extents_t *extents, ssize_t decay_ms) { 864 if (!arena_decay_ms_valid(decay_ms)) { 865 return true; 866 } 867 868 malloc_mutex_lock(tsdn, &decay->mtx); 869 /* 870 * Restart decay backlog from scratch, which may cause many dirty pages 871 * to be immediately purged. It would conceptually be possible to map 872 * the old backlog onto the new backlog, but there is no justification 873 * for such complexity since decay_ms changes are intended to be 874 * infrequent, either between the {-1, 0, >0} states, or a one-time 875 * arbitrary change during initial arena configuration. 876 */ 877 arena_decay_reinit(decay, extents, decay_ms); 878 arena_maybe_decay(tsdn, arena, decay, extents, false); 879 malloc_mutex_unlock(tsdn, &decay->mtx); 880 881 return false; 882 } 883 884 bool 885 arena_dirty_decay_ms_set(tsdn_t *tsdn, arena_t *arena, 886 ssize_t decay_ms) { 887 return arena_decay_ms_set(tsdn, arena, &arena->decay_dirty, 888 &arena->extents_dirty, decay_ms); 889 } 890 891 bool 892 arena_muzzy_decay_ms_set(tsdn_t *tsdn, arena_t *arena, 893 ssize_t decay_ms) { 894 return arena_decay_ms_set(tsdn, arena, &arena->decay_muzzy, 895 &arena->extents_muzzy, decay_ms); 896 } 897 898 static size_t 899 arena_stash_decayed(tsdn_t *tsdn, arena_t *arena, 900 extent_hooks_t **r_extent_hooks, extents_t *extents, size_t npages_limit, 901 extent_list_t *decay_extents) { 902 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 903 WITNESS_RANK_CORE, 0); 904 905 /* Stash extents according to npages_limit. */ 906 size_t nstashed = 0; 907 extent_t *extent; 908 while ((extent = extents_evict(tsdn, arena, r_extent_hooks, extents, 909 npages_limit)) != NULL) { 910 extent_list_append(decay_extents, extent); 911 nstashed += extent_size_get(extent) >> LG_PAGE; 912 } 913 return nstashed; 914 } 915 916 static size_t 917 arena_decay_stashed(tsdn_t *tsdn, arena_t *arena, 918 extent_hooks_t **r_extent_hooks, arena_decay_t *decay, extents_t *extents, 919 bool all, extent_list_t *decay_extents) { 920 UNUSED size_t nmadvise, nunmapped; 921 size_t npurged; 922 923 if (config_stats) { 924 nmadvise = 0; 925 nunmapped = 0; 926 } 927 npurged = 0; 928 929 ssize_t muzzy_decay_ms = arena_muzzy_decay_ms_get(arena); 930 for (extent_t *extent = extent_list_first(decay_extents); extent != 931 NULL; extent = extent_list_first(decay_extents)) { 932 if (config_stats) { 933 nmadvise++; 934 } 935 size_t npages = extent_size_get(extent) >> LG_PAGE; 936 npurged += npages; 937 extent_list_remove(decay_extents, extent); 938 switch (extents_state_get(extents)) { 939 case extent_state_active: 940 not_reached(); 941 case extent_state_dirty: 942 if (!all && muzzy_decay_ms != 0 && 943 !extent_purge_lazy_wrapper(tsdn, arena, 944 r_extent_hooks, extent, 0, 945 extent_size_get(extent))) { 946 extents_dalloc(tsdn, arena, r_extent_hooks, 947 &arena->extents_muzzy, extent); 948 arena_background_thread_inactivity_check(tsdn, 949 arena); 950 break; 951 } 952 /* Fall through. */ 953 case extent_state_muzzy: 954 extent_dalloc_wrapper(tsdn, arena, r_extent_hooks, 955 extent); 956 if (config_stats) { 957 nunmapped += npages; 958 } 959 break; 960 case extent_state_retained: 961 default: 962 not_reached(); 963 } 964 } 965 966 if (config_stats) { 967 arena_stats_lock(tsdn, &arena->stats); 968 arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->npurge, 969 1); 970 arena_stats_add_u64(tsdn, &arena->stats, 971 &decay->stats->nmadvise, nmadvise); 972 arena_stats_add_u64(tsdn, &arena->stats, &decay->stats->purged, 973 npurged); 974 arena_stats_sub_zu(tsdn, &arena->stats, &arena->stats.mapped, 975 nunmapped << LG_PAGE); 976 arena_stats_unlock(tsdn, &arena->stats); 977 } 978 979 return npurged; 980 } 981 982 /* 983 * npages_limit: Decay as many dirty extents as possible without violating the 984 * invariant: (extents_npages_get(extents) >= npages_limit) 985 */ 986 static void 987 arena_decay_to_limit(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, 988 extents_t *extents, bool all, size_t npages_limit) { 989 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 990 WITNESS_RANK_CORE, 1); 991 malloc_mutex_assert_owner(tsdn, &decay->mtx); 992 993 if (decay->purging) { 994 return; 995 } 996 decay->purging = true; 997 malloc_mutex_unlock(tsdn, &decay->mtx); 998 999 extent_hooks_t *extent_hooks = extent_hooks_get(arena); 1000 1001 extent_list_t decay_extents; 1002 extent_list_init(&decay_extents); 1003 1004 size_t npurge = arena_stash_decayed(tsdn, arena, &extent_hooks, extents, 1005 npages_limit, &decay_extents); 1006 if (npurge != 0) { 1007 UNUSED size_t npurged = arena_decay_stashed(tsdn, arena, 1008 &extent_hooks, decay, extents, all, &decay_extents); 1009 assert(npurged == npurge); 1010 } 1011 1012 malloc_mutex_lock(tsdn, &decay->mtx); 1013 decay->purging = false; 1014 } 1015 1016 static bool 1017 arena_decay_impl(tsdn_t *tsdn, arena_t *arena, arena_decay_t *decay, 1018 extents_t *extents, bool is_background_thread, bool all) { 1019 if (all) { 1020 malloc_mutex_lock(tsdn, &decay->mtx); 1021 arena_decay_to_limit(tsdn, arena, decay, extents, all, 0); 1022 malloc_mutex_unlock(tsdn, &decay->mtx); 1023 1024 return false; 1025 } 1026 1027 if (malloc_mutex_trylock(tsdn, &decay->mtx)) { 1028 /* No need to wait if another thread is in progress. */ 1029 return true; 1030 } 1031 1032 bool epoch_advanced = arena_maybe_decay(tsdn, arena, decay, extents, 1033 is_background_thread); 1034 size_t npages_new; 1035 if (epoch_advanced) { 1036 /* Backlog is updated on epoch advance. */ 1037 npages_new = decay->backlog[SMOOTHSTEP_NSTEPS-1]; 1038 } 1039 malloc_mutex_unlock(tsdn, &decay->mtx); 1040 1041 if (have_background_thread && background_thread_enabled() && 1042 epoch_advanced && !is_background_thread) { 1043 background_thread_interval_check(tsdn, arena, decay, npages_new); 1044 } 1045 1046 return false; 1047 } 1048 1049 static bool 1050 arena_decay_dirty(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, 1051 bool all) { 1052 return arena_decay_impl(tsdn, arena, &arena->decay_dirty, 1053 &arena->extents_dirty, is_background_thread, all); 1054 } 1055 1056 static bool 1057 arena_decay_muzzy(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, 1058 bool all) { 1059 return arena_decay_impl(tsdn, arena, &arena->decay_muzzy, 1060 &arena->extents_muzzy, is_background_thread, all); 1061 } 1062 1063 void 1064 arena_decay(tsdn_t *tsdn, arena_t *arena, bool is_background_thread, bool all) { 1065 if (arena_decay_dirty(tsdn, arena, is_background_thread, all)) { 1066 return; 1067 } 1068 arena_decay_muzzy(tsdn, arena, is_background_thread, all); 1069 } 1070 1071 static void 1072 arena_slab_dalloc(tsdn_t *tsdn, arena_t *arena, extent_t *slab) { 1073 arena_nactive_sub(arena, extent_size_get(slab) >> LG_PAGE); 1074 1075 extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; 1076 arena_extents_dirty_dalloc(tsdn, arena, &extent_hooks, slab); 1077 } 1078 1079 static void 1080 arena_bin_slabs_nonfull_insert(arena_bin_t *bin, extent_t *slab) { 1081 assert(extent_nfree_get(slab) > 0); 1082 extent_heap_insert(&bin->slabs_nonfull, slab); 1083 } 1084 1085 static void 1086 arena_bin_slabs_nonfull_remove(arena_bin_t *bin, extent_t *slab) { 1087 extent_heap_remove(&bin->slabs_nonfull, slab); 1088 } 1089 1090 static extent_t * 1091 arena_bin_slabs_nonfull_tryget(arena_bin_t *bin) { 1092 extent_t *slab = extent_heap_remove_first(&bin->slabs_nonfull); 1093 if (slab == NULL) { 1094 return NULL; 1095 } 1096 if (config_stats) { 1097 bin->stats.reslabs++; 1098 } 1099 return slab; 1100 } 1101 1102 static void 1103 arena_bin_slabs_full_insert(arena_t *arena, arena_bin_t *bin, extent_t *slab) { 1104 assert(extent_nfree_get(slab) == 0); 1105 /* 1106 * Tracking extents is required by arena_reset, which is not allowed 1107 * for auto arenas. Bypass this step to avoid touching the extent 1108 * linkage (often results in cache misses) for auto arenas. 1109 */ 1110 if (arena_is_auto(arena)) { 1111 return; 1112 } 1113 extent_list_append(&bin->slabs_full, slab); 1114 } 1115 1116 static void 1117 arena_bin_slabs_full_remove(arena_t *arena, arena_bin_t *bin, extent_t *slab) { 1118 if (arena_is_auto(arena)) { 1119 return; 1120 } 1121 extent_list_remove(&bin->slabs_full, slab); 1122 } 1123 1124 void 1125 arena_reset(tsd_t *tsd, arena_t *arena) { 1126 /* 1127 * Locking in this function is unintuitive. The caller guarantees that 1128 * no concurrent operations are happening in this arena, but there are 1129 * still reasons that some locking is necessary: 1130 * 1131 * - Some of the functions in the transitive closure of calls assume 1132 * appropriate locks are held, and in some cases these locks are 1133 * temporarily dropped to avoid lock order reversal or deadlock due to 1134 * reentry. 1135 * - mallctl("epoch", ...) may concurrently refresh stats. While 1136 * strictly speaking this is a "concurrent operation", disallowing 1137 * stats refreshes would impose an inconvenient burden. 1138 */ 1139 1140 /* Large allocations. */ 1141 malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); 1142 1143 for (extent_t *extent = extent_list_first(&arena->large); extent != 1144 NULL; extent = extent_list_first(&arena->large)) { 1145 void *ptr = extent_base_get(extent); 1146 size_t usize; 1147 1148 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); 1149 alloc_ctx_t alloc_ctx; 1150 rtree_ctx_t *rtree_ctx = tsd_rtree_ctx(tsd); 1151 rtree_szind_slab_read(tsd_tsdn(tsd), &extents_rtree, rtree_ctx, 1152 (uintptr_t)ptr, true, &alloc_ctx.szind, &alloc_ctx.slab); 1153 assert(alloc_ctx.szind != NSIZES); 1154 1155 if (config_stats || (config_prof && opt_prof)) { 1156 usize = sz_index2size(alloc_ctx.szind); 1157 assert(usize == isalloc(tsd_tsdn(tsd), ptr)); 1158 } 1159 /* Remove large allocation from prof sample set. */ 1160 if (config_prof && opt_prof) { 1161 prof_free(tsd, ptr, usize, &alloc_ctx); 1162 } 1163 large_dalloc(tsd_tsdn(tsd), extent); 1164 malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx); 1165 } 1166 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx); 1167 1168 /* Bins. */ 1169 for (unsigned i = 0; i < NBINS; i++) { 1170 extent_t *slab; 1171 arena_bin_t *bin = &arena->bins[i]; 1172 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); 1173 if (bin->slabcur != NULL) { 1174 slab = bin->slabcur; 1175 bin->slabcur = NULL; 1176 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); 1177 arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); 1178 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); 1179 } 1180 while ((slab = extent_heap_remove_first(&bin->slabs_nonfull)) != 1181 NULL) { 1182 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); 1183 arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); 1184 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); 1185 } 1186 for (slab = extent_list_first(&bin->slabs_full); slab != NULL; 1187 slab = extent_list_first(&bin->slabs_full)) { 1188 arena_bin_slabs_full_remove(arena, bin, slab); 1189 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); 1190 arena_slab_dalloc(tsd_tsdn(tsd), arena, slab); 1191 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); 1192 } 1193 if (config_stats) { 1194 bin->stats.curregs = 0; 1195 bin->stats.curslabs = 0; 1196 } 1197 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); 1198 } 1199 1200 atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED); 1201 } 1202 1203 static void 1204 arena_destroy_retained(tsdn_t *tsdn, arena_t *arena) { 1205 /* 1206 * Iterate over the retained extents and destroy them. This gives the 1207 * extent allocator underlying the extent hooks an opportunity to unmap 1208 * all retained memory without having to keep its own metadata 1209 * structures. In practice, virtual memory for dss-allocated extents is 1210 * leaked here, so best practice is to avoid dss for arenas to be 1211 * destroyed, or provide custom extent hooks that track retained 1212 * dss-based extents for later reuse. 1213 */ 1214 extent_hooks_t *extent_hooks = extent_hooks_get(arena); 1215 extent_t *extent; 1216 while ((extent = extents_evict(tsdn, arena, &extent_hooks, 1217 &arena->extents_retained, 0)) != NULL) { 1218 extent_destroy_wrapper(tsdn, arena, &extent_hooks, extent); 1219 } 1220 } 1221 1222 void 1223 arena_destroy(tsd_t *tsd, arena_t *arena) { 1224 assert(base_ind_get(arena->base) >= narenas_auto); 1225 assert(arena_nthreads_get(arena, false) == 0); 1226 assert(arena_nthreads_get(arena, true) == 0); 1227 1228 /* 1229 * No allocations have occurred since arena_reset() was called. 1230 * Furthermore, the caller (arena_i_destroy_ctl()) purged all cached 1231 * extents, so only retained extents may remain. 1232 */ 1233 assert(extents_npages_get(&arena->extents_dirty) == 0); 1234 assert(extents_npages_get(&arena->extents_muzzy) == 0); 1235 1236 /* Deallocate retained memory. */ 1237 arena_destroy_retained(tsd_tsdn(tsd), arena); 1238 1239 /* 1240 * Remove the arena pointer from the arenas array. We rely on the fact 1241 * that there is no way for the application to get a dirty read from the 1242 * arenas array unless there is an inherent race in the application 1243 * involving access of an arena being concurrently destroyed. The 1244 * application must synchronize knowledge of the arena's validity, so as 1245 * long as we use an atomic write to update the arenas array, the 1246 * application will get a clean read any time after it synchronizes 1247 * knowledge that the arena is no longer valid. 1248 */ 1249 arena_set(base_ind_get(arena->base), NULL); 1250 1251 /* 1252 * Destroy the base allocator, which manages all metadata ever mapped by 1253 * this arena. 1254 */ 1255 base_delete(arena->base); 1256 } 1257 1258 static extent_t * 1259 arena_slab_alloc_hard(tsdn_t *tsdn, arena_t *arena, 1260 extent_hooks_t **r_extent_hooks, const arena_bin_info_t *bin_info, 1261 szind_t szind) { 1262 extent_t *slab; 1263 bool zero, commit; 1264 1265 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1266 WITNESS_RANK_CORE, 0); 1267 1268 zero = false; 1269 commit = true; 1270 slab = extent_alloc_wrapper(tsdn, arena, r_extent_hooks, NULL, 1271 bin_info->slab_size, 0, PAGE, true, szind, &zero, &commit); 1272 1273 if (config_stats && slab != NULL) { 1274 arena_stats_mapped_add(tsdn, &arena->stats, 1275 bin_info->slab_size); 1276 } 1277 1278 return slab; 1279 } 1280 1281 static extent_t * 1282 arena_slab_alloc(tsdn_t *tsdn, arena_t *arena, szind_t binind, 1283 const arena_bin_info_t *bin_info) { 1284 witness_assert_depth_to_rank(tsdn_witness_tsdp_get(tsdn), 1285 WITNESS_RANK_CORE, 0); 1286 1287 extent_hooks_t *extent_hooks = EXTENT_HOOKS_INITIALIZER; 1288 szind_t szind = sz_size2index(bin_info->reg_size); 1289 bool zero = false; 1290 bool commit = true; 1291 extent_t *slab = extents_alloc(tsdn, arena, &extent_hooks, 1292 &arena->extents_dirty, NULL, bin_info->slab_size, 0, PAGE, true, 1293 binind, &zero, &commit); 1294 if (slab == NULL) { 1295 slab = extents_alloc(tsdn, arena, &extent_hooks, 1296 &arena->extents_muzzy, NULL, bin_info->slab_size, 0, PAGE, 1297 true, binind, &zero, &commit); 1298 } 1299 if (slab == NULL) { 1300 slab = arena_slab_alloc_hard(tsdn, arena, &extent_hooks, 1301 bin_info, szind); 1302 if (slab == NULL) { 1303 return NULL; 1304 } 1305 } 1306 assert(extent_slab_get(slab)); 1307 1308 /* Initialize slab internals. */ 1309 arena_slab_data_t *slab_data = extent_slab_data_get(slab); 1310 extent_nfree_set(slab, bin_info->nregs); 1311 bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false); 1312 1313 arena_nactive_add(arena, extent_size_get(slab) >> LG_PAGE); 1314 1315 return slab; 1316 } 1317 1318 static extent_t * 1319 arena_bin_nonfull_slab_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin, 1320 szind_t binind) { 1321 extent_t *slab; 1322 const arena_bin_info_t *bin_info; 1323 1324 /* Look for a usable slab. */ 1325 slab = arena_bin_slabs_nonfull_tryget(bin); 1326 if (slab != NULL) { 1327 return slab; 1328 } 1329 /* No existing slabs have any space available. */ 1330 1331 bin_info = &arena_bin_info[binind]; 1332 1333 /* Allocate a new slab. */ 1334 malloc_mutex_unlock(tsdn, &bin->lock); 1335 /******************************/ 1336 slab = arena_slab_alloc(tsdn, arena, binind, bin_info); 1337 /********************************/ 1338 malloc_mutex_lock(tsdn, &bin->lock); 1339 if (slab != NULL) { 1340 if (config_stats) { 1341 bin->stats.nslabs++; 1342 bin->stats.curslabs++; 1343 } 1344 return slab; 1345 } 1346 1347 /* 1348 * arena_slab_alloc() failed, but another thread may have made 1349 * sufficient memory available while this one dropped bin->lock above, 1350 * so search one more time. 1351 */ 1352 slab = arena_bin_slabs_nonfull_tryget(bin); 1353 if (slab != NULL) { 1354 return slab; 1355 } 1356 1357 return NULL; 1358 } 1359 1360 /* Re-fill bin->slabcur, then call arena_slab_reg_alloc(). */ 1361 static void * 1362 arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin, 1363 szind_t binind) { 1364 const arena_bin_info_t *bin_info; 1365 extent_t *slab; 1366 1367 bin_info = &arena_bin_info[binind]; 1368 if (!arena_is_auto(arena) && bin->slabcur != NULL) { 1369 arena_bin_slabs_full_insert(arena, bin, bin->slabcur); 1370 bin->slabcur = NULL; 1371 } 1372 slab = arena_bin_nonfull_slab_get(tsdn, arena, bin, binind); 1373 if (bin->slabcur != NULL) { 1374 /* 1375 * Another thread updated slabcur while this one ran without the 1376 * bin lock in arena_bin_nonfull_slab_get(). 1377 */ 1378 if (extent_nfree_get(bin->slabcur) > 0) { 1379 void *ret = arena_slab_reg_alloc(tsdn, bin->slabcur, 1380 bin_info); 1381 if (slab != NULL) { 1382 /* 1383 * arena_slab_alloc() may have allocated slab, 1384 * or it may have been pulled from 1385 * slabs_nonfull. Therefore it is unsafe to 1386 * make any assumptions about how slab has 1387 * previously been used, and 1388 * arena_bin_lower_slab() must be called, as if 1389 * a region were just deallocated from the slab. 1390 */ 1391 if (extent_nfree_get(slab) == bin_info->nregs) { 1392 arena_dalloc_bin_slab(tsdn, arena, slab, 1393 bin); 1394 } else { 1395 arena_bin_lower_slab(tsdn, arena, slab, 1396 bin); 1397 } 1398 } 1399 return ret; 1400 } 1401 1402 arena_bin_slabs_full_insert(arena, bin, bin->slabcur); 1403 bin->slabcur = NULL; 1404 } 1405 1406 if (slab == NULL) { 1407 return NULL; 1408 } 1409 bin->slabcur = slab; 1410 1411 assert(extent_nfree_get(bin->slabcur) > 0); 1412 1413 return arena_slab_reg_alloc(tsdn, slab, bin_info); 1414 } 1415 1416 void 1417 arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_t *tcache, 1418 tcache_bin_t *tbin, szind_t binind, uint64_t prof_accumbytes) { 1419 unsigned i, nfill; 1420 arena_bin_t *bin; 1421 1422 assert(tbin->ncached == 0); 1423 1424 if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) { 1425 prof_idump(tsdn); 1426 } 1427 bin = &arena->bins[binind]; 1428 malloc_mutex_lock(tsdn, &bin->lock); 1429 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> 1430 tcache->lg_fill_div[binind]); i < nfill; i++) { 1431 extent_t *slab; 1432 void *ptr; 1433 if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 1434 0) { 1435 ptr = arena_slab_reg_alloc(tsdn, slab, 1436 &arena_bin_info[binind]); 1437 } else { 1438 ptr = arena_bin_malloc_hard(tsdn, arena, bin, binind); 1439 } 1440 if (ptr == NULL) { 1441 /* 1442 * OOM. tbin->avail isn't yet filled down to its first 1443 * element, so the successful allocations (if any) must 1444 * be moved just before tbin->avail before bailing out. 1445 */ 1446 if (i > 0) { 1447 memmove(tbin->avail - i, tbin->avail - nfill, 1448 i * sizeof(void *)); 1449 } 1450 break; 1451 } 1452 if (config_fill && unlikely(opt_junk_alloc)) { 1453 arena_alloc_junk_small(ptr, &arena_bin_info[binind], 1454 true); 1455 } 1456 /* Insert such that low regions get used first. */ 1457 *(tbin->avail - nfill + i) = ptr; 1458 } 1459 if (config_stats) { 1460 bin->stats.nmalloc += i; 1461 bin->stats.nrequests += tbin->tstats.nrequests; 1462 bin->stats.curregs += i; 1463 bin->stats.nfills++; 1464 tbin->tstats.nrequests = 0; 1465 } 1466 malloc_mutex_unlock(tsdn, &bin->lock); 1467 tbin->ncached = i; 1468 arena_decay_tick(tsdn, arena); 1469 } 1470 1471 void 1472 arena_alloc_junk_small(void *ptr, const arena_bin_info_t *bin_info, bool zero) { 1473 if (!zero) { 1474 memset(ptr, JEMALLOC_ALLOC_JUNK, bin_info->reg_size); 1475 } 1476 } 1477 1478 static void 1479 arena_dalloc_junk_small_impl(void *ptr, const arena_bin_info_t *bin_info) { 1480 memset(ptr, JEMALLOC_FREE_JUNK, bin_info->reg_size); 1481 } 1482 arena_dalloc_junk_small_t *JET_MUTABLE arena_dalloc_junk_small = 1483 arena_dalloc_junk_small_impl; 1484 1485 static void * 1486 arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) { 1487 void *ret; 1488 arena_bin_t *bin; 1489 size_t usize; 1490 extent_t *slab; 1491 1492 assert(binind < NBINS); 1493 bin = &arena->bins[binind]; 1494 usize = sz_index2size(binind); 1495 1496 malloc_mutex_lock(tsdn, &bin->lock); 1497 if ((slab = bin->slabcur) != NULL && extent_nfree_get(slab) > 0) { 1498 ret = arena_slab_reg_alloc(tsdn, slab, &arena_bin_info[binind]); 1499 } else { 1500 ret = arena_bin_malloc_hard(tsdn, arena, bin, binind); 1501 } 1502 1503 if (ret == NULL) { 1504 malloc_mutex_unlock(tsdn, &bin->lock); 1505 return NULL; 1506 } 1507 1508 if (config_stats) { 1509 bin->stats.nmalloc++; 1510 bin->stats.nrequests++; 1511 bin->stats.curregs++; 1512 } 1513 malloc_mutex_unlock(tsdn, &bin->lock); 1514 if (config_prof && arena_prof_accum(tsdn, arena, usize)) { 1515 prof_idump(tsdn); 1516 } 1517 1518 if (!zero) { 1519 if (config_fill) { 1520 if (unlikely(opt_junk_alloc)) { 1521 arena_alloc_junk_small(ret, 1522 &arena_bin_info[binind], false); 1523 } else if (unlikely(opt_zero)) { 1524 memset(ret, 0, usize); 1525 } 1526 } 1527 } else { 1528 if (config_fill && unlikely(opt_junk_alloc)) { 1529 arena_alloc_junk_small(ret, &arena_bin_info[binind], 1530 true); 1531 } 1532 memset(ret, 0, usize); 1533 } 1534 1535 arena_decay_tick(tsdn, arena); 1536 return ret; 1537 } 1538 1539 void * 1540 arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, 1541 bool zero) { 1542 assert(!tsdn_null(tsdn) || arena != NULL); 1543 1544 if (likely(!tsdn_null(tsdn))) { 1545 arena = arena_choose(tsdn_tsd(tsdn), arena); 1546 } 1547 if (unlikely(arena == NULL)) { 1548 return NULL; 1549 } 1550 1551 if (likely(size <= SMALL_MAXCLASS)) { 1552 return arena_malloc_small(tsdn, arena, ind, zero); 1553 } 1554 return large_malloc(tsdn, arena, sz_index2size(ind), zero); 1555 } 1556 1557 void * 1558 arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, 1559 bool zero, tcache_t *tcache) { 1560 void *ret; 1561 1562 if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE 1563 && (usize & PAGE_MASK) == 0))) { 1564 /* Small; alignment doesn't require special slab placement. */ 1565 ret = arena_malloc(tsdn, arena, usize, sz_size2index(usize), 1566 zero, tcache, true); 1567 } else { 1568 if (likely(alignment <= CACHELINE)) { 1569 ret = large_malloc(tsdn, arena, usize, zero); 1570 } else { 1571 ret = large_palloc(tsdn, arena, usize, alignment, zero); 1572 } 1573 } 1574 return ret; 1575 } 1576 1577 void 1578 arena_prof_promote(tsdn_t *tsdn, const void *ptr, size_t usize) { 1579 cassert(config_prof); 1580 assert(ptr != NULL); 1581 assert(isalloc(tsdn, ptr) == LARGE_MINCLASS); 1582 assert(usize <= SMALL_MAXCLASS); 1583 1584 rtree_ctx_t rtree_ctx_fallback; 1585 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 1586 1587 extent_t *extent = rtree_extent_read(tsdn, &extents_rtree, rtree_ctx, 1588 (uintptr_t)ptr, true); 1589 arena_t *arena = extent_arena_get(extent); 1590 1591 szind_t szind = sz_size2index(usize); 1592 extent_szind_set(extent, szind); 1593 rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, 1594 szind, false); 1595 1596 prof_accum_cancel(tsdn, &arena->prof_accum, usize); 1597 1598 assert(isalloc(tsdn, ptr) == usize); 1599 } 1600 1601 static size_t 1602 arena_prof_demote(tsdn_t *tsdn, extent_t *extent, const void *ptr) { 1603 cassert(config_prof); 1604 assert(ptr != NULL); 1605 1606 extent_szind_set(extent, NBINS); 1607 rtree_ctx_t rtree_ctx_fallback; 1608 rtree_ctx_t *rtree_ctx = tsdn_rtree_ctx(tsdn, &rtree_ctx_fallback); 1609 rtree_szind_slab_update(tsdn, &extents_rtree, rtree_ctx, (uintptr_t)ptr, 1610 NBINS, false); 1611 1612 assert(isalloc(tsdn, ptr) == LARGE_MINCLASS); 1613 1614 return LARGE_MINCLASS; 1615 } 1616 1617 void 1618 arena_dalloc_promoted(tsdn_t *tsdn, void *ptr, tcache_t *tcache, 1619 bool slow_path) { 1620 cassert(config_prof); 1621 assert(opt_prof); 1622 1623 extent_t *extent = iealloc(tsdn, ptr); 1624 size_t usize = arena_prof_demote(tsdn, extent, ptr); 1625 if (usize <= tcache_maxclass) { 1626 tcache_dalloc_large(tsdn_tsd(tsdn), tcache, ptr, 1627 sz_size2index(usize), slow_path); 1628 } else { 1629 large_dalloc(tsdn, extent); 1630 } 1631 } 1632 1633 static void 1634 arena_dissociate_bin_slab(arena_t *arena, extent_t *slab, arena_bin_t *bin) { 1635 /* Dissociate slab from bin. */ 1636 if (slab == bin->slabcur) { 1637 bin->slabcur = NULL; 1638 } else { 1639 szind_t binind = extent_szind_get(slab); 1640 const arena_bin_info_t *bin_info = &arena_bin_info[binind]; 1641 1642 /* 1643 * The following block's conditional is necessary because if the 1644 * slab only contains one region, then it never gets inserted 1645 * into the non-full slabs heap. 1646 */ 1647 if (bin_info->nregs == 1) { 1648 arena_bin_slabs_full_remove(arena, bin, slab); 1649 } else { 1650 arena_bin_slabs_nonfull_remove(bin, slab); 1651 } 1652 } 1653 } 1654 1655 static void 1656 arena_dalloc_bin_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, 1657 arena_bin_t *bin) { 1658 assert(slab != bin->slabcur); 1659 1660 malloc_mutex_unlock(tsdn, &bin->lock); 1661 /******************************/ 1662 arena_slab_dalloc(tsdn, arena, slab); 1663 /****************************/ 1664 malloc_mutex_lock(tsdn, &bin->lock); 1665 if (config_stats) { 1666 bin->stats.curslabs--; 1667 } 1668 } 1669 1670 static void 1671 arena_bin_lower_slab(tsdn_t *tsdn, arena_t *arena, extent_t *slab, 1672 arena_bin_t *bin) { 1673 assert(extent_nfree_get(slab) > 0); 1674 1675 /* 1676 * Make sure that if bin->slabcur is non-NULL, it refers to the 1677 * oldest/lowest non-full slab. It is okay to NULL slabcur out rather 1678 * than proactively keeping it pointing at the oldest/lowest non-full 1679 * slab. 1680 */ 1681 if (bin->slabcur != NULL && extent_snad_comp(bin->slabcur, slab) > 0) { 1682 /* Switch slabcur. */ 1683 if (extent_nfree_get(bin->slabcur) > 0) { 1684 arena_bin_slabs_nonfull_insert(bin, bin->slabcur); 1685 } else { 1686 arena_bin_slabs_full_insert(arena, bin, bin->slabcur); 1687 } 1688 bin->slabcur = slab; 1689 if (config_stats) { 1690 bin->stats.reslabs++; 1691 } 1692 } else { 1693 arena_bin_slabs_nonfull_insert(bin, slab); 1694 } 1695 } 1696 1697 static void 1698 arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, extent_t *slab, 1699 void *ptr, bool junked) { 1700 arena_slab_data_t *slab_data = extent_slab_data_get(slab); 1701 szind_t binind = extent_szind_get(slab); 1702 arena_bin_t *bin = &arena->bins[binind]; 1703 const arena_bin_info_t *bin_info = &arena_bin_info[binind]; 1704 1705 if (!junked && config_fill && unlikely(opt_junk_free)) { 1706 arena_dalloc_junk_small(ptr, bin_info); 1707 } 1708 1709 arena_slab_reg_dalloc(tsdn, slab, slab_data, ptr); 1710 unsigned nfree = extent_nfree_get(slab); 1711 if (nfree == bin_info->nregs) { 1712 arena_dissociate_bin_slab(arena, slab, bin); 1713 arena_dalloc_bin_slab(tsdn, arena, slab, bin); 1714 } else if (nfree == 1 && slab != bin->slabcur) { 1715 arena_bin_slabs_full_remove(arena, bin, slab); 1716 arena_bin_lower_slab(tsdn, arena, slab, bin); 1717 } 1718 1719 if (config_stats) { 1720 bin->stats.ndalloc++; 1721 bin->stats.curregs--; 1722 } 1723 } 1724 1725 void 1726 arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, extent_t *extent, 1727 void *ptr) { 1728 arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, true); 1729 } 1730 1731 static void 1732 arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, extent_t *extent, void *ptr) { 1733 szind_t binind = extent_szind_get(extent); 1734 arena_bin_t *bin = &arena->bins[binind]; 1735 1736 malloc_mutex_lock(tsdn, &bin->lock); 1737 arena_dalloc_bin_locked_impl(tsdn, arena, extent, ptr, false); 1738 malloc_mutex_unlock(tsdn, &bin->lock); 1739 } 1740 1741 void 1742 arena_dalloc_small(tsdn_t *tsdn, void *ptr) { 1743 extent_t *extent = iealloc(tsdn, ptr); 1744 arena_t *arena = extent_arena_get(extent); 1745 1746 arena_dalloc_bin(tsdn, arena, extent, ptr); 1747 arena_decay_tick(tsdn, arena); 1748 } 1749 1750 bool 1751 arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, 1752 size_t extra, bool zero) { 1753 /* Calls with non-zero extra had to clamp extra. */ 1754 assert(extra == 0 || size + extra <= LARGE_MAXCLASS); 1755 1756 if (unlikely(size > LARGE_MAXCLASS)) { 1757 return true; 1758 } 1759 1760 extent_t *extent = iealloc(tsdn, ptr); 1761 size_t usize_min = sz_s2u(size); 1762 size_t usize_max = sz_s2u(size + extra); 1763 if (likely(oldsize <= SMALL_MAXCLASS && usize_min <= SMALL_MAXCLASS)) { 1764 /* 1765 * Avoid moving the allocation if the size class can be left the 1766 * same. 1767 */ 1768 assert(arena_bin_info[sz_size2index(oldsize)].reg_size == 1769 oldsize); 1770 if ((usize_max > SMALL_MAXCLASS || sz_size2index(usize_max) != 1771 sz_size2index(oldsize)) && (size > oldsize || usize_max < 1772 oldsize)) { 1773 return true; 1774 } 1775 1776 arena_decay_tick(tsdn, extent_arena_get(extent)); 1777 return false; 1778 } else if (oldsize >= LARGE_MINCLASS && usize_max >= LARGE_MINCLASS) { 1779 return large_ralloc_no_move(tsdn, extent, usize_min, usize_max, 1780 zero); 1781 } 1782 1783 return true; 1784 } 1785 1786 static void * 1787 arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, 1788 size_t alignment, bool zero, tcache_t *tcache) { 1789 if (alignment == 0) { 1790 return arena_malloc(tsdn, arena, usize, sz_size2index(usize), 1791 zero, tcache, true); 1792 } 1793 usize = sz_sa2u(usize, alignment); 1794 if (unlikely(usize == 0 || usize > LARGE_MAXCLASS)) { 1795 return NULL; 1796 } 1797 return ipalloct(tsdn, usize, alignment, zero, tcache, arena); 1798 } 1799 1800 void * 1801 arena_ralloc(tsdn_t *tsdn, arena_t *arena, void *ptr, size_t oldsize, 1802 size_t size, size_t alignment, bool zero, tcache_t *tcache) { 1803 size_t usize = sz_s2u(size); 1804 if (unlikely(usize == 0 || size > LARGE_MAXCLASS)) { 1805 return NULL; 1806 } 1807 1808 if (likely(usize <= SMALL_MAXCLASS)) { 1809 /* Try to avoid moving the allocation. */ 1810 if (!arena_ralloc_no_move(tsdn, ptr, oldsize, usize, 0, zero)) { 1811 return ptr; 1812 } 1813 } 1814 1815 if (oldsize >= LARGE_MINCLASS && usize >= LARGE_MINCLASS) { 1816 return large_ralloc(tsdn, arena, iealloc(tsdn, ptr), usize, 1817 alignment, zero, tcache); 1818 } 1819 1820 /* 1821 * size and oldsize are different enough that we need to move the 1822 * object. In that case, fall back to allocating new space and copying. 1823 */ 1824 void *ret = arena_ralloc_move_helper(tsdn, arena, usize, alignment, 1825 zero, tcache); 1826 if (ret == NULL) { 1827 return NULL; 1828 } 1829 1830 /* 1831 * Junk/zero-filling were already done by 1832 * ipalloc()/arena_malloc(). 1833 */ 1834 1835 size_t copysize = (usize < oldsize) ? usize : oldsize; 1836 memcpy(ret, ptr, copysize); 1837 isdalloct(tsdn, ptr, oldsize, tcache, NULL, true); 1838 return ret; 1839 } 1840 1841 dss_prec_t 1842 arena_dss_prec_get(arena_t *arena) { 1843 return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE); 1844 } 1845 1846 bool 1847 arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) { 1848 if (!have_dss) { 1849 return (dss_prec != dss_prec_disabled); 1850 } 1851 atomic_store_u(&arena->dss_prec, (unsigned)dss_prec, ATOMIC_RELEASE); 1852 return false; 1853 } 1854 1855 ssize_t 1856 arena_dirty_decay_ms_default_get(void) { 1857 return atomic_load_zd(&dirty_decay_ms_default, ATOMIC_RELAXED); 1858 } 1859 1860 bool 1861 arena_dirty_decay_ms_default_set(ssize_t decay_ms) { 1862 if (!arena_decay_ms_valid(decay_ms)) { 1863 return true; 1864 } 1865 atomic_store_zd(&dirty_decay_ms_default, decay_ms, ATOMIC_RELAXED); 1866 return false; 1867 } 1868 1869 ssize_t 1870 arena_muzzy_decay_ms_default_get(void) { 1871 return atomic_load_zd(&muzzy_decay_ms_default, ATOMIC_RELAXED); 1872 } 1873 1874 bool 1875 arena_muzzy_decay_ms_default_set(ssize_t decay_ms) { 1876 if (!arena_decay_ms_valid(decay_ms)) { 1877 return true; 1878 } 1879 atomic_store_zd(&muzzy_decay_ms_default, decay_ms, ATOMIC_RELAXED); 1880 return false; 1881 } 1882 1883 unsigned 1884 arena_nthreads_get(arena_t *arena, bool internal) { 1885 return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXED); 1886 } 1887 1888 void 1889 arena_nthreads_inc(arena_t *arena, bool internal) { 1890 atomic_fetch_add_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED); 1891 } 1892 1893 void 1894 arena_nthreads_dec(arena_t *arena, bool internal) { 1895 atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED); 1896 } 1897 1898 size_t 1899 arena_extent_sn_next(arena_t *arena) { 1900 return atomic_fetch_add_zu(&arena->extent_sn_next, 1, ATOMIC_RELAXED); 1901 } 1902 1903 arena_t * 1904 arena_new(tsdn_t *tsdn, unsigned ind, extent_hooks_t *extent_hooks) { 1905 arena_t *arena; 1906 base_t *base; 1907 unsigned i; 1908 1909 if (ind == 0) { 1910 base = b0get(); 1911 } else { 1912 base = base_new(tsdn, ind, extent_hooks); 1913 if (base == NULL) { 1914 return NULL; 1915 } 1916 } 1917 1918 arena = (arena_t *)base_alloc(tsdn, base, sizeof(arena_t), CACHELINE); 1919 if (arena == NULL) { 1920 goto label_error; 1921 } 1922 1923 atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED); 1924 atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED); 1925 arena->last_thd = NULL; 1926 1927 if (config_stats) { 1928 if (arena_stats_init(tsdn, &arena->stats)) { 1929 goto label_error; 1930 } 1931 1932 ql_new(&arena->tcache_ql); 1933 if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql", 1934 WITNESS_RANK_TCACHE_QL, malloc_mutex_rank_exclusive)) { 1935 goto label_error; 1936 } 1937 } 1938 1939 if (config_prof) { 1940 if (prof_accum_init(tsdn, &arena->prof_accum)) { 1941 goto label_error; 1942 } 1943 } 1944 1945 if (config_cache_oblivious) { 1946 /* 1947 * A nondeterministic seed based on the address of arena reduces 1948 * the likelihood of lockstep non-uniform cache index 1949 * utilization among identical concurrent processes, but at the 1950 * cost of test repeatability. For debug builds, instead use a 1951 * deterministic seed. 1952 */ 1953 atomic_store_zu(&arena->offset_state, config_debug ? ind : 1954 (size_t)(uintptr_t)arena, ATOMIC_RELAXED); 1955 } 1956 1957 atomic_store_zu(&arena->extent_sn_next, 0, ATOMIC_RELAXED); 1958 1959 atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(), 1960 ATOMIC_RELAXED); 1961 1962 atomic_store_zu(&arena->nactive, 0, ATOMIC_RELAXED); 1963 1964 extent_list_init(&arena->large); 1965 if (malloc_mutex_init(&arena->large_mtx, "arena_large", 1966 WITNESS_RANK_ARENA_LARGE, malloc_mutex_rank_exclusive)) { 1967 goto label_error; 1968 } 1969 1970 /* 1971 * Delay coalescing for dirty extents despite the disruptive effect on 1972 * memory layout for best-fit extent allocation, since cached extents 1973 * are likely to be reused soon after deallocation, and the cost of 1974 * merging/splitting extents is non-trivial. 1975 */ 1976 if (extents_init(tsdn, &arena->extents_dirty, extent_state_dirty, 1977 true)) { 1978 goto label_error; 1979 } 1980 /* 1981 * Coalesce muzzy extents immediately, because operations on them are in 1982 * the critical path much less often than for dirty extents. 1983 */ 1984 if (extents_init(tsdn, &arena->extents_muzzy, extent_state_muzzy, 1985 false)) { 1986 goto label_error; 1987 } 1988 /* 1989 * Coalesce retained extents immediately, in part because they will 1990 * never be evicted (and therefore there's no opportunity for delayed 1991 * coalescing), but also because operations on retained extents are not 1992 * in the critical path. 1993 */ 1994 if (extents_init(tsdn, &arena->extents_retained, extent_state_retained, 1995 false)) { 1996 goto label_error; 1997 } 1998 1999 if (arena_decay_init(&arena->decay_dirty, &arena->extents_dirty, 2000 arena_dirty_decay_ms_default_get(), &arena->stats.decay_dirty)) { 2001 goto label_error; 2002 } 2003 if (arena_decay_init(&arena->decay_muzzy, &arena->extents_muzzy, 2004 arena_muzzy_decay_ms_default_get(), &arena->stats.decay_muzzy)) { 2005 goto label_error; 2006 } 2007 2008 arena->extent_grow_next = sz_psz2ind(HUGEPAGE); 2009 if (malloc_mutex_init(&arena->extent_grow_mtx, "extent_grow", 2010 WITNESS_RANK_EXTENT_GROW, malloc_mutex_rank_exclusive)) { 2011 goto label_error; 2012 } 2013 2014 extent_avail_new(&arena->extent_avail); 2015 if (malloc_mutex_init(&arena->extent_avail_mtx, "extent_avail", 2016 WITNESS_RANK_EXTENT_AVAIL, malloc_mutex_rank_exclusive)) { 2017 goto label_error; 2018 } 2019 2020 /* Initialize bins. */ 2021 for (i = 0; i < NBINS; i++) { 2022 arena_bin_t *bin = &arena->bins[i]; 2023 if (malloc_mutex_init(&bin->lock, "arena_bin", 2024 WITNESS_RANK_ARENA_BIN, malloc_mutex_rank_exclusive)) { 2025 goto label_error; 2026 } 2027 bin->slabcur = NULL; 2028 extent_heap_new(&bin->slabs_nonfull); 2029 extent_list_init(&bin->slabs_full); 2030 if (config_stats) { 2031 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); 2032 } 2033 } 2034 2035 arena->base = base; 2036 /* Set arena before creating background threads. */ 2037 arena_set(ind, arena); 2038 2039 nstime_init(&arena->create_time, 0); 2040 nstime_update(&arena->create_time); 2041 2042 /* We don't support reentrancy for arena 0 bootstrapping. */ 2043 if (ind != 0) { 2044 /* 2045 * If we're here, then arena 0 already exists, so bootstrapping 2046 * is done enough that we should have tsd. 2047 */ 2048 assert(!tsdn_null(tsdn)); 2049 pre_reentrancy(tsdn_tsd(tsdn)); 2050 if (hooks_arena_new_hook) { 2051 hooks_arena_new_hook(); 2052 } 2053 post_reentrancy(tsdn_tsd(tsdn)); 2054 } 2055 2056 return arena; 2057 label_error: 2058 if (ind != 0) { 2059 base_delete(base); 2060 } 2061 return NULL; 2062 } 2063 2064 void 2065 arena_boot(void) { 2066 arena_dirty_decay_ms_default_set(opt_dirty_decay_ms); 2067 arena_muzzy_decay_ms_default_set(opt_muzzy_decay_ms); 2068 } 2069 2070 void 2071 arena_prefork0(tsdn_t *tsdn, arena_t *arena) { 2072 malloc_mutex_prefork(tsdn, &arena->decay_dirty.mtx); 2073 malloc_mutex_prefork(tsdn, &arena->decay_muzzy.mtx); 2074 } 2075 2076 void 2077 arena_prefork1(tsdn_t *tsdn, arena_t *arena) { 2078 if (config_stats) { 2079 malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx); 2080 } 2081 } 2082 2083 void 2084 arena_prefork2(tsdn_t *tsdn, arena_t *arena) { 2085 extents_prefork(tsdn, &arena->extents_dirty); 2086 extents_prefork(tsdn, &arena->extents_muzzy); 2087 extents_prefork(tsdn, &arena->extents_retained); 2088 } 2089 2090 void 2091 arena_prefork3(tsdn_t *tsdn, arena_t *arena) { 2092 malloc_mutex_prefork(tsdn, &arena->extent_avail_mtx); 2093 } 2094 2095 void 2096 arena_prefork4(tsdn_t *tsdn, arena_t *arena) { 2097 base_prefork(tsdn, arena->base); 2098 } 2099 2100 void 2101 arena_prefork5(tsdn_t *tsdn, arena_t *arena) { 2102 malloc_mutex_prefork(tsdn, &arena->large_mtx); 2103 } 2104 2105 void 2106 arena_prefork6(tsdn_t *tsdn, arena_t *arena) { 2107 for (unsigned i = 0; i < NBINS; i++) { 2108 malloc_mutex_prefork(tsdn, &arena->bins[i].lock); 2109 } 2110 } 2111 2112 void 2113 arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) { 2114 unsigned i; 2115 2116 for (i = 0; i < NBINS; i++) { 2117 malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock); 2118 } 2119 malloc_mutex_postfork_parent(tsdn, &arena->large_mtx); 2120 base_postfork_parent(tsdn, arena->base); 2121 malloc_mutex_postfork_parent(tsdn, &arena->extent_avail_mtx); 2122 extents_postfork_parent(tsdn, &arena->extents_dirty); 2123 extents_postfork_parent(tsdn, &arena->extents_muzzy); 2124 extents_postfork_parent(tsdn, &arena->extents_retained); 2125 malloc_mutex_postfork_parent(tsdn, &arena->decay_dirty.mtx); 2126 malloc_mutex_postfork_parent(tsdn, &arena->decay_muzzy.mtx); 2127 if (config_stats) { 2128 malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx); 2129 } 2130 } 2131 2132 void 2133 arena_postfork_child(tsdn_t *tsdn, arena_t *arena) { 2134 unsigned i; 2135 2136 for (i = 0; i < NBINS; i++) { 2137 malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock); 2138 } 2139 malloc_mutex_postfork_child(tsdn, &arena->large_mtx); 2140 base_postfork_child(tsdn, arena->base); 2141 malloc_mutex_postfork_child(tsdn, &arena->extent_avail_mtx); 2142 extents_postfork_child(tsdn, &arena->extents_dirty); 2143 extents_postfork_child(tsdn, &arena->extents_muzzy); 2144 extents_postfork_child(tsdn, &arena->extents_retained); 2145 malloc_mutex_postfork_child(tsdn, &arena->decay_dirty.mtx); 2146 malloc_mutex_postfork_child(tsdn, &arena->decay_muzzy.mtx); 2147 if (config_stats) { 2148 malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx); 2149 } 2150 } 2151