1 #define JEMALLOC_ARENA_C_ 2 #include "jemalloc/internal/jemalloc_internal.h" 3 4 /******************************************************************************/ 5 /* Data. */ 6 7 purge_mode_t opt_purge = PURGE_DEFAULT; 8 const char *purge_mode_names[] = { 9 "ratio", 10 "decay", 11 "N/A" 12 }; 13 ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; 14 static ssize_t lg_dirty_mult_default; 15 ssize_t opt_decay_time = DECAY_TIME_DEFAULT; 16 static ssize_t decay_time_default; 17 18 arena_bin_info_t arena_bin_info[NBINS]; 19 20 size_t map_bias; 21 size_t map_misc_offset; 22 size_t arena_maxrun; /* Max run size for arenas. */ 23 size_t large_maxclass; /* Max large size class. */ 24 unsigned nlclasses; /* Number of large size classes. */ 25 unsigned nhclasses; /* Number of huge size classes. */ 26 27 /******************************************************************************/ 28 /* 29 * Function prototypes for static functions that are referenced prior to 30 * definition. 31 */ 32 33 static void arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, 34 arena_chunk_t *chunk); 35 static void arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, 36 size_t ndirty_limit); 37 static void arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, 38 bool dirty, bool cleaned, bool decommitted); 39 static void arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, 40 arena_chunk_t *chunk, arena_run_t *run, arena_bin_t *bin); 41 static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, 42 arena_run_t *run, arena_bin_t *bin); 43 44 /******************************************************************************/ 45 46 JEMALLOC_INLINE_C size_t 47 arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm) 48 { 49 arena_chunk_t *chunk; 50 size_t pageind, mapbits; 51 52 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); 53 pageind = arena_miscelm_to_pageind(miscelm); 54 mapbits = arena_mapbits_get(chunk, pageind); 55 return (arena_mapbits_size_decode(mapbits)); 56 } 57 58 JEMALLOC_INLINE_C int 59 arena_run_addr_comp(const arena_chunk_map_misc_t *a, 60 const arena_chunk_map_misc_t *b) 61 { 62 uintptr_t a_miscelm = (uintptr_t)a; 63 uintptr_t b_miscelm = (uintptr_t)b; 64 65 assert(a != NULL); 66 assert(b != NULL); 67 68 return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm)); 69 } 70 71 /* Generate pairing heap functions. */ 72 ph_gen(static UNUSED, arena_run_heap_, arena_run_heap_t, arena_chunk_map_misc_t, 73 ph_link, arena_run_addr_comp) 74 75 #ifdef JEMALLOC_JET 76 #undef run_quantize_floor 77 #define run_quantize_floor JEMALLOC_N(n_run_quantize_floor) 78 #endif 79 static size_t 80 run_quantize_floor(size_t size) 81 { 82 size_t ret; 83 pszind_t pind; 84 85 assert(size > 0); 86 assert(size <= HUGE_MAXCLASS); 87 assert((size & PAGE_MASK) == 0); 88 89 assert(size != 0); 90 assert(size == PAGE_CEILING(size)); 91 92 pind = psz2ind(size - large_pad + 1); 93 if (pind == 0) { 94 /* 95 * Avoid underflow. This short-circuit would also do the right 96 * thing for all sizes in the range for which there are 97 * PAGE-spaced size classes, but it's simplest to just handle 98 * the one case that would cause erroneous results. 99 */ 100 return (size); 101 } 102 ret = pind2sz(pind - 1) + large_pad; 103 assert(ret <= size); 104 return (ret); 105 } 106 #ifdef JEMALLOC_JET 107 #undef run_quantize_floor 108 #define run_quantize_floor JEMALLOC_N(run_quantize_floor) 109 run_quantize_t *run_quantize_floor = JEMALLOC_N(n_run_quantize_floor); 110 #endif 111 112 #ifdef JEMALLOC_JET 113 #undef run_quantize_ceil 114 #define run_quantize_ceil JEMALLOC_N(n_run_quantize_ceil) 115 #endif 116 static size_t 117 run_quantize_ceil(size_t size) 118 { 119 size_t ret; 120 121 assert(size > 0); 122 assert(size <= HUGE_MAXCLASS); 123 assert((size & PAGE_MASK) == 0); 124 125 ret = run_quantize_floor(size); 126 if (ret < size) { 127 /* 128 * Skip a quantization that may have an adequately large run, 129 * because under-sized runs may be mixed in. This only happens 130 * when an unusual size is requested, i.e. for aligned 131 * allocation, and is just one of several places where linear 132 * search would potentially find sufficiently aligned available 133 * memory somewhere lower. 134 */ 135 ret = pind2sz(psz2ind(ret - large_pad + 1)) + large_pad; 136 } 137 return (ret); 138 } 139 #ifdef JEMALLOC_JET 140 #undef run_quantize_ceil 141 #define run_quantize_ceil JEMALLOC_N(run_quantize_ceil) 142 run_quantize_t *run_quantize_ceil = JEMALLOC_N(n_run_quantize_ceil); 143 #endif 144 145 static void 146 arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 147 size_t npages) 148 { 149 pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get( 150 arena_miscelm_get_const(chunk, pageind)))); 151 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 152 LG_PAGE)); 153 assert((npages << LG_PAGE) < chunksize); 154 assert(pind2sz(pind) <= chunksize); 155 arena_run_heap_insert(&arena->runs_avail[pind], 156 arena_miscelm_get_mutable(chunk, pageind)); 157 } 158 159 static void 160 arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 161 size_t npages) 162 { 163 pszind_t pind = psz2ind(run_quantize_floor(arena_miscelm_size_get( 164 arena_miscelm_get_const(chunk, pageind)))); 165 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 166 LG_PAGE)); 167 assert((npages << LG_PAGE) < chunksize); 168 assert(pind2sz(pind) <= chunksize); 169 arena_run_heap_remove(&arena->runs_avail[pind], 170 arena_miscelm_get_mutable(chunk, pageind)); 171 } 172 173 static void 174 arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 175 size_t npages) 176 { 177 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, 178 pageind); 179 180 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 181 LG_PAGE)); 182 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); 183 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == 184 CHUNK_MAP_DIRTY); 185 186 qr_new(&miscelm->rd, rd_link); 187 qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link); 188 arena->ndirty += npages; 189 } 190 191 static void 192 arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 193 size_t npages) 194 { 195 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, 196 pageind); 197 198 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 199 LG_PAGE)); 200 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); 201 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == 202 CHUNK_MAP_DIRTY); 203 204 qr_remove(&miscelm->rd, rd_link); 205 assert(arena->ndirty >= npages); 206 arena->ndirty -= npages; 207 } 208 209 static size_t 210 arena_chunk_dirty_npages(const extent_node_t *node) 211 { 212 213 return (extent_node_size_get(node) >> LG_PAGE); 214 } 215 216 void 217 arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache) 218 { 219 220 if (cache) { 221 extent_node_dirty_linkage_init(node); 222 extent_node_dirty_insert(node, &arena->runs_dirty, 223 &arena->chunks_cache); 224 arena->ndirty += arena_chunk_dirty_npages(node); 225 } 226 } 227 228 void 229 arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty) 230 { 231 232 if (dirty) { 233 extent_node_dirty_remove(node); 234 assert(arena->ndirty >= arena_chunk_dirty_npages(node)); 235 arena->ndirty -= arena_chunk_dirty_npages(node); 236 } 237 } 238 239 JEMALLOC_INLINE_C void * 240 arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info) 241 { 242 void *ret; 243 size_t regind; 244 arena_chunk_map_misc_t *miscelm; 245 void *rpages; 246 247 assert(run->nfree > 0); 248 assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info)); 249 250 regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info); 251 miscelm = arena_run_to_miscelm(run); 252 rpages = arena_miscelm_to_rpages(miscelm); 253 ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset + 254 (uintptr_t)(bin_info->reg_interval * regind)); 255 run->nfree--; 256 return (ret); 257 } 258 259 JEMALLOC_INLINE_C void 260 arena_run_reg_dalloc(arena_run_t *run, void *ptr) 261 { 262 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 263 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 264 size_t mapbits = arena_mapbits_get(chunk, pageind); 265 szind_t binind = arena_ptr_small_binind_get(ptr, mapbits); 266 arena_bin_info_t *bin_info = &arena_bin_info[binind]; 267 size_t regind = arena_run_regind(run, bin_info, ptr); 268 269 assert(run->nfree < bin_info->nregs); 270 /* Freeing an interior pointer can cause assertion failure. */ 271 assert(((uintptr_t)ptr - 272 ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + 273 (uintptr_t)bin_info->reg0_offset)) % 274 (uintptr_t)bin_info->reg_interval == 0); 275 assert((uintptr_t)ptr >= 276 (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + 277 (uintptr_t)bin_info->reg0_offset); 278 /* Freeing an unallocated pointer can cause assertion failure. */ 279 assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind)); 280 281 bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind); 282 run->nfree++; 283 } 284 285 JEMALLOC_INLINE_C void 286 arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages) 287 { 288 289 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + 290 (run_ind << LG_PAGE)), (npages << LG_PAGE)); 291 memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0, 292 (npages << LG_PAGE)); 293 } 294 295 JEMALLOC_INLINE_C void 296 arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind) 297 { 298 299 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind 300 << LG_PAGE)), PAGE); 301 } 302 303 JEMALLOC_INLINE_C void 304 arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) 305 { 306 size_t i; 307 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE)); 308 309 arena_run_page_mark_zeroed(chunk, run_ind); 310 for (i = 0; i < PAGE / sizeof(size_t); i++) 311 assert(p[i] == 0); 312 } 313 314 static void 315 arena_nactive_add(arena_t *arena, size_t add_pages) 316 { 317 318 if (config_stats) { 319 size_t cactive_add = CHUNK_CEILING((arena->nactive + 320 add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive << 321 LG_PAGE); 322 if (cactive_add != 0) 323 stats_cactive_add(cactive_add); 324 } 325 arena->nactive += add_pages; 326 } 327 328 static void 329 arena_nactive_sub(arena_t *arena, size_t sub_pages) 330 { 331 332 if (config_stats) { 333 size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) - 334 CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE); 335 if (cactive_sub != 0) 336 stats_cactive_sub(cactive_sub); 337 } 338 arena->nactive -= sub_pages; 339 } 340 341 static void 342 arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind, 343 size_t flag_dirty, size_t flag_decommitted, size_t need_pages) 344 { 345 size_t total_pages, rem_pages; 346 347 assert(flag_dirty == 0 || flag_decommitted == 0); 348 349 total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >> 350 LG_PAGE; 351 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) == 352 flag_dirty); 353 assert(need_pages <= total_pages); 354 rem_pages = total_pages - need_pages; 355 356 arena_avail_remove(arena, chunk, run_ind, total_pages); 357 if (flag_dirty != 0) 358 arena_run_dirty_remove(arena, chunk, run_ind, total_pages); 359 arena_nactive_add(arena, need_pages); 360 361 /* Keep track of trailing unused pages for later use. */ 362 if (rem_pages > 0) { 363 size_t flags = flag_dirty | flag_decommitted; 364 size_t flag_unzeroed_mask = (flags == 0) ? CHUNK_MAP_UNZEROED : 365 0; 366 367 arena_mapbits_unallocated_set(chunk, run_ind+need_pages, 368 (rem_pages << LG_PAGE), flags | 369 (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) & 370 flag_unzeroed_mask)); 371 arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1, 372 (rem_pages << LG_PAGE), flags | 373 (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) & 374 flag_unzeroed_mask)); 375 if (flag_dirty != 0) { 376 arena_run_dirty_insert(arena, chunk, run_ind+need_pages, 377 rem_pages); 378 } 379 arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages); 380 } 381 } 382 383 static bool 384 arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size, 385 bool remove, bool zero) 386 { 387 arena_chunk_t *chunk; 388 arena_chunk_map_misc_t *miscelm; 389 size_t flag_dirty, flag_decommitted, run_ind, need_pages; 390 size_t flag_unzeroed_mask; 391 392 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 393 miscelm = arena_run_to_miscelm(run); 394 run_ind = arena_miscelm_to_pageind(miscelm); 395 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); 396 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind); 397 need_pages = (size >> LG_PAGE); 398 assert(need_pages > 0); 399 400 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize, 401 run_ind << LG_PAGE, size, arena->ind)) 402 return (true); 403 404 if (remove) { 405 arena_run_split_remove(arena, chunk, run_ind, flag_dirty, 406 flag_decommitted, need_pages); 407 } 408 409 if (zero) { 410 if (flag_decommitted != 0) { 411 /* The run is untouched, and therefore zeroed. */ 412 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void 413 *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 414 (need_pages << LG_PAGE)); 415 } else if (flag_dirty != 0) { 416 /* The run is dirty, so all pages must be zeroed. */ 417 arena_run_zero(chunk, run_ind, need_pages); 418 } else { 419 /* 420 * The run is clean, so some pages may be zeroed (i.e. 421 * never before touched). 422 */ 423 size_t i; 424 for (i = 0; i < need_pages; i++) { 425 if (arena_mapbits_unzeroed_get(chunk, run_ind+i) 426 != 0) 427 arena_run_zero(chunk, run_ind+i, 1); 428 else if (config_debug) { 429 arena_run_page_validate_zeroed(chunk, 430 run_ind+i); 431 } else { 432 arena_run_page_mark_zeroed(chunk, 433 run_ind+i); 434 } 435 } 436 } 437 } else { 438 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + 439 (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); 440 } 441 442 /* 443 * Set the last element first, in case the run only contains one page 444 * (i.e. both statements set the same element). 445 */ 446 flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? 447 CHUNK_MAP_UNZEROED : 0; 448 arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty | 449 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, 450 run_ind+need_pages-1))); 451 arena_mapbits_large_set(chunk, run_ind, size, flag_dirty | 452 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind))); 453 return (false); 454 } 455 456 static bool 457 arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) 458 { 459 460 return (arena_run_split_large_helper(arena, run, size, true, zero)); 461 } 462 463 static bool 464 arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) 465 { 466 467 return (arena_run_split_large_helper(arena, run, size, false, zero)); 468 } 469 470 static bool 471 arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size, 472 szind_t binind) 473 { 474 arena_chunk_t *chunk; 475 arena_chunk_map_misc_t *miscelm; 476 size_t flag_dirty, flag_decommitted, run_ind, need_pages, i; 477 478 assert(binind != BININD_INVALID); 479 480 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 481 miscelm = arena_run_to_miscelm(run); 482 run_ind = arena_miscelm_to_pageind(miscelm); 483 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); 484 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind); 485 need_pages = (size >> LG_PAGE); 486 assert(need_pages > 0); 487 488 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize, 489 run_ind << LG_PAGE, size, arena->ind)) 490 return (true); 491 492 arena_run_split_remove(arena, chunk, run_ind, flag_dirty, 493 flag_decommitted, need_pages); 494 495 for (i = 0; i < need_pages; i++) { 496 size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk, 497 run_ind+i); 498 arena_mapbits_small_set(chunk, run_ind+i, i, binind, 499 flag_unzeroed); 500 if (config_debug && flag_dirty == 0 && flag_unzeroed == 0) 501 arena_run_page_validate_zeroed(chunk, run_ind+i); 502 } 503 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + 504 (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); 505 return (false); 506 } 507 508 static arena_chunk_t * 509 arena_chunk_init_spare(arena_t *arena) 510 { 511 arena_chunk_t *chunk; 512 513 assert(arena->spare != NULL); 514 515 chunk = arena->spare; 516 arena->spare = NULL; 517 518 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); 519 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); 520 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == 521 arena_maxrun); 522 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == 523 arena_maxrun); 524 assert(arena_mapbits_dirty_get(chunk, map_bias) == 525 arena_mapbits_dirty_get(chunk, chunk_npages-1)); 526 527 return (chunk); 528 } 529 530 static bool 531 arena_chunk_register(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, 532 bool zero) 533 { 534 535 /* 536 * The extent node notion of "committed" doesn't directly apply to 537 * arena chunks. Arbitrarily mark them as committed. The commit state 538 * of runs is tracked individually, and upon chunk deallocation the 539 * entire chunk is in a consistent commit state. 540 */ 541 extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true); 542 extent_node_achunk_set(&chunk->node, true); 543 return (chunk_register(tsdn, chunk, &chunk->node)); 544 } 545 546 static arena_chunk_t * 547 arena_chunk_alloc_internal_hard(tsdn_t *tsdn, arena_t *arena, 548 chunk_hooks_t *chunk_hooks, bool *zero, bool *commit) 549 { 550 arena_chunk_t *chunk; 551 552 malloc_mutex_unlock(tsdn, &arena->lock); 553 554 chunk = (arena_chunk_t *)chunk_alloc_wrapper(tsdn, arena, chunk_hooks, 555 NULL, chunksize, chunksize, zero, commit); 556 if (chunk != NULL && !*commit) { 557 /* Commit header. */ 558 if (chunk_hooks->commit(chunk, chunksize, 0, map_bias << 559 LG_PAGE, arena->ind)) { 560 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, 561 (void *)chunk, chunksize, *zero, *commit); 562 chunk = NULL; 563 } 564 } 565 if (chunk != NULL && arena_chunk_register(tsdn, arena, chunk, *zero)) { 566 if (!*commit) { 567 /* Undo commit of header. */ 568 chunk_hooks->decommit(chunk, chunksize, 0, map_bias << 569 LG_PAGE, arena->ind); 570 } 571 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, (void *)chunk, 572 chunksize, *zero, *commit); 573 chunk = NULL; 574 } 575 576 malloc_mutex_lock(tsdn, &arena->lock); 577 return (chunk); 578 } 579 580 static arena_chunk_t * 581 arena_chunk_alloc_internal(tsdn_t *tsdn, arena_t *arena, bool *zero, 582 bool *commit) 583 { 584 arena_chunk_t *chunk; 585 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; 586 587 chunk = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, chunksize, 588 chunksize, zero, commit, true); 589 if (chunk != NULL) { 590 if (arena_chunk_register(tsdn, arena, chunk, *zero)) { 591 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, 592 chunksize, true); 593 return (NULL); 594 } 595 } 596 if (chunk == NULL) { 597 chunk = arena_chunk_alloc_internal_hard(tsdn, arena, 598 &chunk_hooks, zero, commit); 599 } 600 601 if (config_stats && chunk != NULL) { 602 arena->stats.mapped += chunksize; 603 arena->stats.metadata_mapped += (map_bias << LG_PAGE); 604 } 605 606 return (chunk); 607 } 608 609 static arena_chunk_t * 610 arena_chunk_init_hard(tsdn_t *tsdn, arena_t *arena) 611 { 612 arena_chunk_t *chunk; 613 bool zero, commit; 614 size_t flag_unzeroed, flag_decommitted, i; 615 616 assert(arena->spare == NULL); 617 618 zero = false; 619 commit = false; 620 chunk = arena_chunk_alloc_internal(tsdn, arena, &zero, &commit); 621 if (chunk == NULL) 622 return (NULL); 623 624 /* 625 * Initialize the map to contain one maximal free untouched run. Mark 626 * the pages as zeroed if arena_chunk_alloc_internal() returned a zeroed 627 * or decommitted chunk. 628 */ 629 flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED; 630 flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED; 631 arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun, 632 flag_unzeroed | flag_decommitted); 633 /* 634 * There is no need to initialize the internal page map entries unless 635 * the chunk is not zeroed. 636 */ 637 if (!zero) { 638 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( 639 (void *)arena_bitselm_get_const(chunk, map_bias+1), 640 (size_t)((uintptr_t)arena_bitselm_get_const(chunk, 641 chunk_npages-1) - 642 (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1))); 643 for (i = map_bias+1; i < chunk_npages-1; i++) 644 arena_mapbits_internal_set(chunk, i, flag_unzeroed); 645 } else { 646 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void 647 *)arena_bitselm_get_const(chunk, map_bias+1), 648 (size_t)((uintptr_t)arena_bitselm_get_const(chunk, 649 chunk_npages-1) - 650 (uintptr_t)arena_bitselm_get_const(chunk, map_bias+1))); 651 if (config_debug) { 652 for (i = map_bias+1; i < chunk_npages-1; i++) { 653 assert(arena_mapbits_unzeroed_get(chunk, i) == 654 flag_unzeroed); 655 } 656 } 657 } 658 arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun, 659 flag_unzeroed); 660 661 return (chunk); 662 } 663 664 static arena_chunk_t * 665 arena_chunk_alloc(tsdn_t *tsdn, arena_t *arena) 666 { 667 arena_chunk_t *chunk; 668 669 if (arena->spare != NULL) 670 chunk = arena_chunk_init_spare(arena); 671 else { 672 chunk = arena_chunk_init_hard(tsdn, arena); 673 if (chunk == NULL) 674 return (NULL); 675 } 676 677 ql_elm_new(&chunk->node, ql_link); 678 ql_tail_insert(&arena->achunks, &chunk->node, ql_link); 679 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias); 680 681 return (chunk); 682 } 683 684 static void 685 arena_chunk_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk) 686 { 687 bool committed; 688 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; 689 690 chunk_deregister(chunk, &chunk->node); 691 692 committed = (arena_mapbits_decommitted_get(chunk, map_bias) == 0); 693 if (!committed) { 694 /* 695 * Decommit the header. Mark the chunk as decommitted even if 696 * header decommit fails, since treating a partially committed 697 * chunk as committed has a high potential for causing later 698 * access of decommitted memory. 699 */ 700 chunk_hooks = chunk_hooks_get(tsdn, arena); 701 chunk_hooks.decommit(chunk, chunksize, 0, map_bias << LG_PAGE, 702 arena->ind); 703 } 704 705 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, (void *)chunk, chunksize, 706 committed); 707 708 if (config_stats) { 709 arena->stats.mapped -= chunksize; 710 arena->stats.metadata_mapped -= (map_bias << LG_PAGE); 711 } 712 } 713 714 static void 715 arena_spare_discard(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *spare) 716 { 717 718 assert(arena->spare != spare); 719 720 if (arena_mapbits_dirty_get(spare, map_bias) != 0) { 721 arena_run_dirty_remove(arena, spare, map_bias, 722 chunk_npages-map_bias); 723 } 724 725 arena_chunk_discard(tsdn, arena, spare); 726 } 727 728 static void 729 arena_chunk_dalloc(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk) 730 { 731 arena_chunk_t *spare; 732 733 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); 734 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); 735 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == 736 arena_maxrun); 737 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == 738 arena_maxrun); 739 assert(arena_mapbits_dirty_get(chunk, map_bias) == 740 arena_mapbits_dirty_get(chunk, chunk_npages-1)); 741 assert(arena_mapbits_decommitted_get(chunk, map_bias) == 742 arena_mapbits_decommitted_get(chunk, chunk_npages-1)); 743 744 /* Remove run from runs_avail, so that the arena does not use it. */ 745 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias); 746 747 ql_remove(&arena->achunks, &chunk->node, ql_link); 748 spare = arena->spare; 749 arena->spare = chunk; 750 if (spare != NULL) 751 arena_spare_discard(tsdn, arena, spare); 752 } 753 754 static void 755 arena_huge_malloc_stats_update(arena_t *arena, size_t usize) 756 { 757 szind_t index = size2index(usize) - nlclasses - NBINS; 758 759 cassert(config_stats); 760 761 arena->stats.nmalloc_huge++; 762 arena->stats.allocated_huge += usize; 763 arena->stats.hstats[index].nmalloc++; 764 arena->stats.hstats[index].curhchunks++; 765 } 766 767 static void 768 arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize) 769 { 770 szind_t index = size2index(usize) - nlclasses - NBINS; 771 772 cassert(config_stats); 773 774 arena->stats.nmalloc_huge--; 775 arena->stats.allocated_huge -= usize; 776 arena->stats.hstats[index].nmalloc--; 777 arena->stats.hstats[index].curhchunks--; 778 } 779 780 static void 781 arena_huge_dalloc_stats_update(arena_t *arena, size_t usize) 782 { 783 szind_t index = size2index(usize) - nlclasses - NBINS; 784 785 cassert(config_stats); 786 787 arena->stats.ndalloc_huge++; 788 arena->stats.allocated_huge -= usize; 789 arena->stats.hstats[index].ndalloc++; 790 arena->stats.hstats[index].curhchunks--; 791 } 792 793 static void 794 arena_huge_reset_stats_cancel(arena_t *arena, size_t usize) 795 { 796 szind_t index = size2index(usize) - nlclasses - NBINS; 797 798 cassert(config_stats); 799 800 arena->stats.ndalloc_huge++; 801 arena->stats.hstats[index].ndalloc--; 802 } 803 804 static void 805 arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize) 806 { 807 szind_t index = size2index(usize) - nlclasses - NBINS; 808 809 cassert(config_stats); 810 811 arena->stats.ndalloc_huge--; 812 arena->stats.allocated_huge += usize; 813 arena->stats.hstats[index].ndalloc--; 814 arena->stats.hstats[index].curhchunks++; 815 } 816 817 static void 818 arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize) 819 { 820 821 arena_huge_dalloc_stats_update(arena, oldsize); 822 arena_huge_malloc_stats_update(arena, usize); 823 } 824 825 static void 826 arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize, 827 size_t usize) 828 { 829 830 arena_huge_dalloc_stats_update_undo(arena, oldsize); 831 arena_huge_malloc_stats_update_undo(arena, usize); 832 } 833 834 extent_node_t * 835 arena_node_alloc(tsdn_t *tsdn, arena_t *arena) 836 { 837 extent_node_t *node; 838 839 malloc_mutex_lock(tsdn, &arena->node_cache_mtx); 840 node = ql_last(&arena->node_cache, ql_link); 841 if (node == NULL) { 842 malloc_mutex_unlock(tsdn, &arena->node_cache_mtx); 843 return (base_alloc(tsdn, sizeof(extent_node_t))); 844 } 845 ql_tail_remove(&arena->node_cache, extent_node_t, ql_link); 846 malloc_mutex_unlock(tsdn, &arena->node_cache_mtx); 847 return (node); 848 } 849 850 void 851 arena_node_dalloc(tsdn_t *tsdn, arena_t *arena, extent_node_t *node) 852 { 853 854 malloc_mutex_lock(tsdn, &arena->node_cache_mtx); 855 ql_elm_new(node, ql_link); 856 ql_tail_insert(&arena->node_cache, node, ql_link); 857 malloc_mutex_unlock(tsdn, &arena->node_cache_mtx); 858 } 859 860 static void * 861 arena_chunk_alloc_huge_hard(tsdn_t *tsdn, arena_t *arena, 862 chunk_hooks_t *chunk_hooks, size_t usize, size_t alignment, bool *zero, 863 size_t csize) 864 { 865 void *ret; 866 bool commit = true; 867 868 ret = chunk_alloc_wrapper(tsdn, arena, chunk_hooks, NULL, csize, 869 alignment, zero, &commit); 870 if (ret == NULL) { 871 /* Revert optimistic stats updates. */ 872 malloc_mutex_lock(tsdn, &arena->lock); 873 if (config_stats) { 874 arena_huge_malloc_stats_update_undo(arena, usize); 875 arena->stats.mapped -= usize; 876 } 877 arena_nactive_sub(arena, usize >> LG_PAGE); 878 malloc_mutex_unlock(tsdn, &arena->lock); 879 } 880 881 return (ret); 882 } 883 884 void * 885 arena_chunk_alloc_huge(tsdn_t *tsdn, arena_t *arena, size_t usize, 886 size_t alignment, bool *zero) 887 { 888 void *ret; 889 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; 890 size_t csize = CHUNK_CEILING(usize); 891 bool commit = true; 892 893 malloc_mutex_lock(tsdn, &arena->lock); 894 895 /* Optimistically update stats. */ 896 if (config_stats) { 897 arena_huge_malloc_stats_update(arena, usize); 898 arena->stats.mapped += usize; 899 } 900 arena_nactive_add(arena, usize >> LG_PAGE); 901 902 ret = chunk_alloc_cache(tsdn, arena, &chunk_hooks, NULL, csize, 903 alignment, zero, &commit, true); 904 malloc_mutex_unlock(tsdn, &arena->lock); 905 if (ret == NULL) { 906 ret = arena_chunk_alloc_huge_hard(tsdn, arena, &chunk_hooks, 907 usize, alignment, zero, csize); 908 } 909 910 return (ret); 911 } 912 913 void 914 arena_chunk_dalloc_huge(tsdn_t *tsdn, arena_t *arena, void *chunk, size_t usize) 915 { 916 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; 917 size_t csize; 918 919 csize = CHUNK_CEILING(usize); 920 malloc_mutex_lock(tsdn, &arena->lock); 921 if (config_stats) { 922 arena_huge_dalloc_stats_update(arena, usize); 923 arena->stats.mapped -= usize; 924 } 925 arena_nactive_sub(arena, usize >> LG_PAGE); 926 927 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, chunk, csize, true); 928 malloc_mutex_unlock(tsdn, &arena->lock); 929 } 930 931 void 932 arena_chunk_ralloc_huge_similar(tsdn_t *tsdn, arena_t *arena, void *chunk, 933 size_t oldsize, size_t usize) 934 { 935 936 assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize)); 937 assert(oldsize != usize); 938 939 malloc_mutex_lock(tsdn, &arena->lock); 940 if (config_stats) 941 arena_huge_ralloc_stats_update(arena, oldsize, usize); 942 if (oldsize < usize) 943 arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE); 944 else 945 arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE); 946 malloc_mutex_unlock(tsdn, &arena->lock); 947 } 948 949 void 950 arena_chunk_ralloc_huge_shrink(tsdn_t *tsdn, arena_t *arena, void *chunk, 951 size_t oldsize, size_t usize) 952 { 953 size_t udiff = oldsize - usize; 954 size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); 955 956 malloc_mutex_lock(tsdn, &arena->lock); 957 if (config_stats) { 958 arena_huge_ralloc_stats_update(arena, oldsize, usize); 959 if (cdiff != 0) 960 arena->stats.mapped -= cdiff; 961 } 962 arena_nactive_sub(arena, udiff >> LG_PAGE); 963 964 if (cdiff != 0) { 965 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; 966 void *nchunk = (void *)((uintptr_t)chunk + 967 CHUNK_CEILING(usize)); 968 969 chunk_dalloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff, 970 true); 971 } 972 malloc_mutex_unlock(tsdn, &arena->lock); 973 } 974 975 static bool 976 arena_chunk_ralloc_huge_expand_hard(tsdn_t *tsdn, arena_t *arena, 977 chunk_hooks_t *chunk_hooks, void *chunk, size_t oldsize, size_t usize, 978 bool *zero, void *nchunk, size_t udiff, size_t cdiff) 979 { 980 bool err; 981 bool commit = true; 982 983 err = (chunk_alloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff, 984 chunksize, zero, &commit) == NULL); 985 if (err) { 986 /* Revert optimistic stats updates. */ 987 malloc_mutex_lock(tsdn, &arena->lock); 988 if (config_stats) { 989 arena_huge_ralloc_stats_update_undo(arena, oldsize, 990 usize); 991 arena->stats.mapped -= cdiff; 992 } 993 arena_nactive_sub(arena, udiff >> LG_PAGE); 994 malloc_mutex_unlock(tsdn, &arena->lock); 995 } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk, 996 cdiff, true, arena->ind)) { 997 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, nchunk, cdiff, 998 *zero, true); 999 err = true; 1000 } 1001 return (err); 1002 } 1003 1004 bool 1005 arena_chunk_ralloc_huge_expand(tsdn_t *tsdn, arena_t *arena, void *chunk, 1006 size_t oldsize, size_t usize, bool *zero) 1007 { 1008 bool err; 1009 chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena); 1010 void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize)); 1011 size_t udiff = usize - oldsize; 1012 size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize); 1013 bool commit = true; 1014 1015 malloc_mutex_lock(tsdn, &arena->lock); 1016 1017 /* Optimistically update stats. */ 1018 if (config_stats) { 1019 arena_huge_ralloc_stats_update(arena, oldsize, usize); 1020 arena->stats.mapped += cdiff; 1021 } 1022 arena_nactive_add(arena, udiff >> LG_PAGE); 1023 1024 err = (chunk_alloc_cache(tsdn, arena, &chunk_hooks, nchunk, cdiff, 1025 chunksize, zero, &commit, true) == NULL); 1026 malloc_mutex_unlock(tsdn, &arena->lock); 1027 if (err) { 1028 err = arena_chunk_ralloc_huge_expand_hard(tsdn, arena, 1029 &chunk_hooks, chunk, oldsize, usize, zero, nchunk, udiff, 1030 cdiff); 1031 } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk, 1032 cdiff, true, arena->ind)) { 1033 chunk_dalloc_wrapper(tsdn, arena, &chunk_hooks, nchunk, cdiff, 1034 *zero, true); 1035 err = true; 1036 } 1037 1038 return (err); 1039 } 1040 1041 /* 1042 * Do first-best-fit run selection, i.e. select the lowest run that best fits. 1043 * Run sizes are indexed, so not all candidate runs are necessarily exactly the 1044 * same size. 1045 */ 1046 static arena_run_t * 1047 arena_run_first_best_fit(arena_t *arena, size_t size) 1048 { 1049 pszind_t pind, i; 1050 1051 pind = psz2ind(run_quantize_ceil(size)); 1052 1053 for (i = pind; pind2sz(i) <= chunksize; i++) { 1054 arena_chunk_map_misc_t *miscelm = arena_run_heap_first( 1055 &arena->runs_avail[i]); 1056 if (miscelm != NULL) 1057 return (&miscelm->run); 1058 } 1059 1060 return (NULL); 1061 } 1062 1063 static arena_run_t * 1064 arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero) 1065 { 1066 arena_run_t *run = arena_run_first_best_fit(arena, size); 1067 if (run != NULL) { 1068 if (arena_run_split_large(arena, run, size, zero)) 1069 run = NULL; 1070 } 1071 return (run); 1072 } 1073 1074 static arena_run_t * 1075 arena_run_alloc_large(tsdn_t *tsdn, arena_t *arena, size_t size, bool zero) 1076 { 1077 arena_chunk_t *chunk; 1078 arena_run_t *run; 1079 1080 assert(size <= arena_maxrun); 1081 assert(size == PAGE_CEILING(size)); 1082 1083 /* Search the arena's chunks for the lowest best fit. */ 1084 run = arena_run_alloc_large_helper(arena, size, zero); 1085 if (run != NULL) 1086 return (run); 1087 1088 /* 1089 * No usable runs. Create a new chunk from which to allocate the run. 1090 */ 1091 chunk = arena_chunk_alloc(tsdn, arena); 1092 if (chunk != NULL) { 1093 run = &arena_miscelm_get_mutable(chunk, map_bias)->run; 1094 if (arena_run_split_large(arena, run, size, zero)) 1095 run = NULL; 1096 return (run); 1097 } 1098 1099 /* 1100 * arena_chunk_alloc() failed, but another thread may have made 1101 * sufficient memory available while this one dropped arena->lock in 1102 * arena_chunk_alloc(), so search one more time. 1103 */ 1104 return (arena_run_alloc_large_helper(arena, size, zero)); 1105 } 1106 1107 static arena_run_t * 1108 arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind) 1109 { 1110 arena_run_t *run = arena_run_first_best_fit(arena, size); 1111 if (run != NULL) { 1112 if (arena_run_split_small(arena, run, size, binind)) 1113 run = NULL; 1114 } 1115 return (run); 1116 } 1117 1118 static arena_run_t * 1119 arena_run_alloc_small(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t binind) 1120 { 1121 arena_chunk_t *chunk; 1122 arena_run_t *run; 1123 1124 assert(size <= arena_maxrun); 1125 assert(size == PAGE_CEILING(size)); 1126 assert(binind != BININD_INVALID); 1127 1128 /* Search the arena's chunks for the lowest best fit. */ 1129 run = arena_run_alloc_small_helper(arena, size, binind); 1130 if (run != NULL) 1131 return (run); 1132 1133 /* 1134 * No usable runs. Create a new chunk from which to allocate the run. 1135 */ 1136 chunk = arena_chunk_alloc(tsdn, arena); 1137 if (chunk != NULL) { 1138 run = &arena_miscelm_get_mutable(chunk, map_bias)->run; 1139 if (arena_run_split_small(arena, run, size, binind)) 1140 run = NULL; 1141 return (run); 1142 } 1143 1144 /* 1145 * arena_chunk_alloc() failed, but another thread may have made 1146 * sufficient memory available while this one dropped arena->lock in 1147 * arena_chunk_alloc(), so search one more time. 1148 */ 1149 return (arena_run_alloc_small_helper(arena, size, binind)); 1150 } 1151 1152 static bool 1153 arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult) 1154 { 1155 1156 return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t) 1157 << 3)); 1158 } 1159 1160 ssize_t 1161 arena_lg_dirty_mult_get(tsdn_t *tsdn, arena_t *arena) 1162 { 1163 ssize_t lg_dirty_mult; 1164 1165 malloc_mutex_lock(tsdn, &arena->lock); 1166 lg_dirty_mult = arena->lg_dirty_mult; 1167 malloc_mutex_unlock(tsdn, &arena->lock); 1168 1169 return (lg_dirty_mult); 1170 } 1171 1172 bool 1173 arena_lg_dirty_mult_set(tsdn_t *tsdn, arena_t *arena, ssize_t lg_dirty_mult) 1174 { 1175 1176 if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) 1177 return (true); 1178 1179 malloc_mutex_lock(tsdn, &arena->lock); 1180 arena->lg_dirty_mult = lg_dirty_mult; 1181 arena_maybe_purge(tsdn, arena); 1182 malloc_mutex_unlock(tsdn, &arena->lock); 1183 1184 return (false); 1185 } 1186 1187 static void 1188 arena_decay_deadline_init(arena_t *arena) 1189 { 1190 1191 assert(opt_purge == purge_mode_decay); 1192 1193 /* 1194 * Generate a new deadline that is uniformly random within the next 1195 * epoch after the current one. 1196 */ 1197 nstime_copy(&arena->decay.deadline, &arena->decay.epoch); 1198 nstime_add(&arena->decay.deadline, &arena->decay.interval); 1199 if (arena->decay.time > 0) { 1200 nstime_t jitter; 1201 1202 nstime_init(&jitter, prng_range_u64(&arena->decay.jitter_state, 1203 nstime_ns(&arena->decay.interval))); 1204 nstime_add(&arena->decay.deadline, &jitter); 1205 } 1206 } 1207 1208 static bool 1209 arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time) 1210 { 1211 1212 assert(opt_purge == purge_mode_decay); 1213 1214 return (nstime_compare(&arena->decay.deadline, time) <= 0); 1215 } 1216 1217 static size_t 1218 arena_decay_backlog_npages_limit(const arena_t *arena) 1219 { 1220 static const uint64_t h_steps[] = { 1221 #define STEP(step, h, x, y) \ 1222 h, 1223 SMOOTHSTEP 1224 #undef STEP 1225 }; 1226 uint64_t sum; 1227 size_t npages_limit_backlog; 1228 unsigned i; 1229 1230 assert(opt_purge == purge_mode_decay); 1231 1232 /* 1233 * For each element of decay_backlog, multiply by the corresponding 1234 * fixed-point smoothstep decay factor. Sum the products, then divide 1235 * to round down to the nearest whole number of pages. 1236 */ 1237 sum = 0; 1238 for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) 1239 sum += arena->decay.backlog[i] * h_steps[i]; 1240 npages_limit_backlog = (size_t)(sum >> SMOOTHSTEP_BFP); 1241 1242 return (npages_limit_backlog); 1243 } 1244 1245 static void 1246 arena_decay_backlog_update_last(arena_t *arena) 1247 { 1248 size_t ndirty_delta = (arena->ndirty > arena->decay.ndirty) ? 1249 arena->ndirty - arena->decay.ndirty : 0; 1250 arena->decay.backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta; 1251 } 1252 1253 static void 1254 arena_decay_backlog_update(arena_t *arena, uint64_t nadvance_u64) 1255 { 1256 1257 if (nadvance_u64 >= SMOOTHSTEP_NSTEPS) { 1258 memset(arena->decay.backlog, 0, (SMOOTHSTEP_NSTEPS-1) * 1259 sizeof(size_t)); 1260 } else { 1261 size_t nadvance_z = (size_t)nadvance_u64; 1262 1263 assert((uint64_t)nadvance_z == nadvance_u64); 1264 1265 memmove(arena->decay.backlog, &arena->decay.backlog[nadvance_z], 1266 (SMOOTHSTEP_NSTEPS - nadvance_z) * sizeof(size_t)); 1267 if (nadvance_z > 1) { 1268 memset(&arena->decay.backlog[SMOOTHSTEP_NSTEPS - 1269 nadvance_z], 0, (nadvance_z-1) * sizeof(size_t)); 1270 } 1271 } 1272 1273 arena_decay_backlog_update_last(arena); 1274 } 1275 1276 static void 1277 arena_decay_epoch_advance_helper(arena_t *arena, const nstime_t *time) 1278 { 1279 uint64_t nadvance_u64; 1280 nstime_t delta; 1281 1282 assert(opt_purge == purge_mode_decay); 1283 assert(arena_decay_deadline_reached(arena, time)); 1284 1285 nstime_copy(&delta, time); 1286 nstime_subtract(&delta, &arena->decay.epoch); 1287 nadvance_u64 = nstime_divide(&delta, &arena->decay.interval); 1288 assert(nadvance_u64 > 0); 1289 1290 /* Add nadvance_u64 decay intervals to epoch. */ 1291 nstime_copy(&delta, &arena->decay.interval); 1292 nstime_imultiply(&delta, nadvance_u64); 1293 nstime_add(&arena->decay.epoch, &delta); 1294 1295 /* Set a new deadline. */ 1296 arena_decay_deadline_init(arena); 1297 1298 /* Update the backlog. */ 1299 arena_decay_backlog_update(arena, nadvance_u64); 1300 } 1301 1302 static void 1303 arena_decay_epoch_advance_purge(tsdn_t *tsdn, arena_t *arena) 1304 { 1305 size_t ndirty_limit = arena_decay_backlog_npages_limit(arena); 1306 1307 if (arena->ndirty > ndirty_limit) 1308 arena_purge_to_limit(tsdn, arena, ndirty_limit); 1309 arena->decay.ndirty = arena->ndirty; 1310 } 1311 1312 static void 1313 arena_decay_epoch_advance(tsdn_t *tsdn, arena_t *arena, const nstime_t *time) 1314 { 1315 1316 arena_decay_epoch_advance_helper(arena, time); 1317 arena_decay_epoch_advance_purge(tsdn, arena); 1318 } 1319 1320 static void 1321 arena_decay_init(arena_t *arena, ssize_t decay_time) 1322 { 1323 1324 arena->decay.time = decay_time; 1325 if (decay_time > 0) { 1326 nstime_init2(&arena->decay.interval, decay_time, 0); 1327 nstime_idivide(&arena->decay.interval, SMOOTHSTEP_NSTEPS); 1328 } 1329 1330 nstime_init(&arena->decay.epoch, 0); 1331 nstime_update(&arena->decay.epoch); 1332 arena->decay.jitter_state = (uint64_t)(uintptr_t)arena; 1333 arena_decay_deadline_init(arena); 1334 arena->decay.ndirty = arena->ndirty; 1335 memset(arena->decay.backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t)); 1336 } 1337 1338 static bool 1339 arena_decay_time_valid(ssize_t decay_time) 1340 { 1341 1342 if (decay_time < -1) 1343 return (false); 1344 if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX) 1345 return (true); 1346 return (false); 1347 } 1348 1349 ssize_t 1350 arena_decay_time_get(tsdn_t *tsdn, arena_t *arena) 1351 { 1352 ssize_t decay_time; 1353 1354 malloc_mutex_lock(tsdn, &arena->lock); 1355 decay_time = arena->decay.time; 1356 malloc_mutex_unlock(tsdn, &arena->lock); 1357 1358 return (decay_time); 1359 } 1360 1361 bool 1362 arena_decay_time_set(tsdn_t *tsdn, arena_t *arena, ssize_t decay_time) 1363 { 1364 1365 if (!arena_decay_time_valid(decay_time)) 1366 return (true); 1367 1368 malloc_mutex_lock(tsdn, &arena->lock); 1369 /* 1370 * Restart decay backlog from scratch, which may cause many dirty pages 1371 * to be immediately purged. It would conceptually be possible to map 1372 * the old backlog onto the new backlog, but there is no justification 1373 * for such complexity since decay_time changes are intended to be 1374 * infrequent, either between the {-1, 0, >0} states, or a one-time 1375 * arbitrary change during initial arena configuration. 1376 */ 1377 arena_decay_init(arena, decay_time); 1378 arena_maybe_purge(tsdn, arena); 1379 malloc_mutex_unlock(tsdn, &arena->lock); 1380 1381 return (false); 1382 } 1383 1384 static void 1385 arena_maybe_purge_ratio(tsdn_t *tsdn, arena_t *arena) 1386 { 1387 1388 assert(opt_purge == purge_mode_ratio); 1389 1390 /* Don't purge if the option is disabled. */ 1391 if (arena->lg_dirty_mult < 0) 1392 return; 1393 1394 /* 1395 * Iterate, since preventing recursive purging could otherwise leave too 1396 * many dirty pages. 1397 */ 1398 while (true) { 1399 size_t threshold = (arena->nactive >> arena->lg_dirty_mult); 1400 if (threshold < chunk_npages) 1401 threshold = chunk_npages; 1402 /* 1403 * Don't purge unless the number of purgeable pages exceeds the 1404 * threshold. 1405 */ 1406 if (arena->ndirty <= threshold) 1407 return; 1408 arena_purge_to_limit(tsdn, arena, threshold); 1409 } 1410 } 1411 1412 static void 1413 arena_maybe_purge_decay(tsdn_t *tsdn, arena_t *arena) 1414 { 1415 nstime_t time; 1416 1417 assert(opt_purge == purge_mode_decay); 1418 1419 /* Purge all or nothing if the option is disabled. */ 1420 if (arena->decay.time <= 0) { 1421 if (arena->decay.time == 0) 1422 arena_purge_to_limit(tsdn, arena, 0); 1423 return; 1424 } 1425 1426 nstime_init(&time, 0); 1427 nstime_update(&time); 1428 if (unlikely(!nstime_monotonic() && nstime_compare(&arena->decay.epoch, 1429 &time) > 0)) { 1430 /* 1431 * Time went backwards. Move the epoch back in time and 1432 * generate a new deadline, with the expectation that time 1433 * typically flows forward for long enough periods of time that 1434 * epochs complete. Unfortunately, this strategy is susceptible 1435 * to clock jitter triggering premature epoch advances, but 1436 * clock jitter estimation and compensation isn't feasible here 1437 * because calls into this code are event-driven. 1438 */ 1439 nstime_copy(&arena->decay.epoch, &time); 1440 arena_decay_deadline_init(arena); 1441 } else { 1442 /* Verify that time does not go backwards. */ 1443 assert(nstime_compare(&arena->decay.epoch, &time) <= 0); 1444 } 1445 1446 /* 1447 * If the deadline has been reached, advance to the current epoch and 1448 * purge to the new limit if necessary. Note that dirty pages created 1449 * during the current epoch are not subject to purge until a future 1450 * epoch, so as a result purging only happens during epoch advances. 1451 */ 1452 if (arena_decay_deadline_reached(arena, &time)) 1453 arena_decay_epoch_advance(tsdn, arena, &time); 1454 } 1455 1456 void 1457 arena_maybe_purge(tsdn_t *tsdn, arena_t *arena) 1458 { 1459 1460 /* Don't recursively purge. */ 1461 if (arena->purging) 1462 return; 1463 1464 if (opt_purge == purge_mode_ratio) 1465 arena_maybe_purge_ratio(tsdn, arena); 1466 else 1467 arena_maybe_purge_decay(tsdn, arena); 1468 } 1469 1470 static size_t 1471 arena_dirty_count(arena_t *arena) 1472 { 1473 size_t ndirty = 0; 1474 arena_runs_dirty_link_t *rdelm; 1475 extent_node_t *chunkselm; 1476 1477 for (rdelm = qr_next(&arena->runs_dirty, rd_link), 1478 chunkselm = qr_next(&arena->chunks_cache, cc_link); 1479 rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) { 1480 size_t npages; 1481 1482 if (rdelm == &chunkselm->rd) { 1483 npages = extent_node_size_get(chunkselm) >> LG_PAGE; 1484 chunkselm = qr_next(chunkselm, cc_link); 1485 } else { 1486 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( 1487 rdelm); 1488 arena_chunk_map_misc_t *miscelm = 1489 arena_rd_to_miscelm(rdelm); 1490 size_t pageind = arena_miscelm_to_pageind(miscelm); 1491 assert(arena_mapbits_allocated_get(chunk, pageind) == 1492 0); 1493 assert(arena_mapbits_large_get(chunk, pageind) == 0); 1494 assert(arena_mapbits_dirty_get(chunk, pageind) != 0); 1495 npages = arena_mapbits_unallocated_size_get(chunk, 1496 pageind) >> LG_PAGE; 1497 } 1498 ndirty += npages; 1499 } 1500 1501 return (ndirty); 1502 } 1503 1504 static size_t 1505 arena_stash_dirty(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, 1506 size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel, 1507 extent_node_t *purge_chunks_sentinel) 1508 { 1509 arena_runs_dirty_link_t *rdelm, *rdelm_next; 1510 extent_node_t *chunkselm; 1511 size_t nstashed = 0; 1512 1513 /* Stash runs/chunks according to ndirty_limit. */ 1514 for (rdelm = qr_next(&arena->runs_dirty, rd_link), 1515 chunkselm = qr_next(&arena->chunks_cache, cc_link); 1516 rdelm != &arena->runs_dirty; rdelm = rdelm_next) { 1517 size_t npages; 1518 rdelm_next = qr_next(rdelm, rd_link); 1519 1520 if (rdelm == &chunkselm->rd) { 1521 extent_node_t *chunkselm_next; 1522 bool zero, commit; 1523 UNUSED void *chunk; 1524 1525 npages = extent_node_size_get(chunkselm) >> LG_PAGE; 1526 if (opt_purge == purge_mode_decay && arena->ndirty - 1527 (nstashed + npages) < ndirty_limit) 1528 break; 1529 1530 chunkselm_next = qr_next(chunkselm, cc_link); 1531 /* 1532 * Allocate. chunkselm remains valid due to the 1533 * dalloc_node=false argument to chunk_alloc_cache(). 1534 */ 1535 zero = false; 1536 commit = false; 1537 chunk = chunk_alloc_cache(tsdn, arena, chunk_hooks, 1538 extent_node_addr_get(chunkselm), 1539 extent_node_size_get(chunkselm), chunksize, &zero, 1540 &commit, false); 1541 assert(chunk == extent_node_addr_get(chunkselm)); 1542 assert(zero == extent_node_zeroed_get(chunkselm)); 1543 extent_node_dirty_insert(chunkselm, purge_runs_sentinel, 1544 purge_chunks_sentinel); 1545 assert(npages == (extent_node_size_get(chunkselm) >> 1546 LG_PAGE)); 1547 chunkselm = chunkselm_next; 1548 } else { 1549 arena_chunk_t *chunk = 1550 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); 1551 arena_chunk_map_misc_t *miscelm = 1552 arena_rd_to_miscelm(rdelm); 1553 size_t pageind = arena_miscelm_to_pageind(miscelm); 1554 arena_run_t *run = &miscelm->run; 1555 size_t run_size = 1556 arena_mapbits_unallocated_size_get(chunk, pageind); 1557 1558 npages = run_size >> LG_PAGE; 1559 if (opt_purge == purge_mode_decay && arena->ndirty - 1560 (nstashed + npages) < ndirty_limit) 1561 break; 1562 1563 assert(pageind + npages <= chunk_npages); 1564 assert(arena_mapbits_dirty_get(chunk, pageind) == 1565 arena_mapbits_dirty_get(chunk, pageind+npages-1)); 1566 1567 /* 1568 * If purging the spare chunk's run, make it available 1569 * prior to allocation. 1570 */ 1571 if (chunk == arena->spare) 1572 arena_chunk_alloc(tsdn, arena); 1573 1574 /* Temporarily allocate the free dirty run. */ 1575 arena_run_split_large(arena, run, run_size, false); 1576 /* Stash. */ 1577 if (false) 1578 qr_new(rdelm, rd_link); /* Redundant. */ 1579 else { 1580 assert(qr_next(rdelm, rd_link) == rdelm); 1581 assert(qr_prev(rdelm, rd_link) == rdelm); 1582 } 1583 qr_meld(purge_runs_sentinel, rdelm, rd_link); 1584 } 1585 1586 nstashed += npages; 1587 if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <= 1588 ndirty_limit) 1589 break; 1590 } 1591 1592 return (nstashed); 1593 } 1594 1595 static size_t 1596 arena_purge_stashed(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, 1597 arena_runs_dirty_link_t *purge_runs_sentinel, 1598 extent_node_t *purge_chunks_sentinel) 1599 { 1600 size_t npurged, nmadvise; 1601 arena_runs_dirty_link_t *rdelm; 1602 extent_node_t *chunkselm; 1603 1604 if (config_stats) 1605 nmadvise = 0; 1606 npurged = 0; 1607 1608 malloc_mutex_unlock(tsdn, &arena->lock); 1609 for (rdelm = qr_next(purge_runs_sentinel, rd_link), 1610 chunkselm = qr_next(purge_chunks_sentinel, cc_link); 1611 rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) { 1612 size_t npages; 1613 1614 if (rdelm == &chunkselm->rd) { 1615 /* 1616 * Don't actually purge the chunk here because 1) 1617 * chunkselm is embedded in the chunk and must remain 1618 * valid, and 2) we deallocate the chunk in 1619 * arena_unstash_purged(), where it is destroyed, 1620 * decommitted, or purged, depending on chunk 1621 * deallocation policy. 1622 */ 1623 size_t size = extent_node_size_get(chunkselm); 1624 npages = size >> LG_PAGE; 1625 chunkselm = qr_next(chunkselm, cc_link); 1626 } else { 1627 size_t pageind, run_size, flag_unzeroed, flags, i; 1628 bool decommitted; 1629 arena_chunk_t *chunk = 1630 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); 1631 arena_chunk_map_misc_t *miscelm = 1632 arena_rd_to_miscelm(rdelm); 1633 pageind = arena_miscelm_to_pageind(miscelm); 1634 run_size = arena_mapbits_large_size_get(chunk, pageind); 1635 npages = run_size >> LG_PAGE; 1636 1637 assert(pageind + npages <= chunk_npages); 1638 assert(!arena_mapbits_decommitted_get(chunk, pageind)); 1639 assert(!arena_mapbits_decommitted_get(chunk, 1640 pageind+npages-1)); 1641 decommitted = !chunk_hooks->decommit(chunk, chunksize, 1642 pageind << LG_PAGE, npages << LG_PAGE, arena->ind); 1643 if (decommitted) { 1644 flag_unzeroed = 0; 1645 flags = CHUNK_MAP_DECOMMITTED; 1646 } else { 1647 flag_unzeroed = chunk_purge_wrapper(tsdn, arena, 1648 chunk_hooks, chunk, chunksize, pageind << 1649 LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0; 1650 flags = flag_unzeroed; 1651 } 1652 arena_mapbits_large_set(chunk, pageind+npages-1, 0, 1653 flags); 1654 arena_mapbits_large_set(chunk, pageind, run_size, 1655 flags); 1656 1657 /* 1658 * Set the unzeroed flag for internal pages, now that 1659 * chunk_purge_wrapper() has returned whether the pages 1660 * were zeroed as a side effect of purging. This chunk 1661 * map modification is safe even though the arena mutex 1662 * isn't currently owned by this thread, because the run 1663 * is marked as allocated, thus protecting it from being 1664 * modified by any other thread. As long as these 1665 * writes don't perturb the first and last elements' 1666 * CHUNK_MAP_ALLOCATED bits, behavior is well defined. 1667 */ 1668 for (i = 1; i < npages-1; i++) { 1669 arena_mapbits_internal_set(chunk, pageind+i, 1670 flag_unzeroed); 1671 } 1672 } 1673 1674 npurged += npages; 1675 if (config_stats) 1676 nmadvise++; 1677 } 1678 malloc_mutex_lock(tsdn, &arena->lock); 1679 1680 if (config_stats) { 1681 arena->stats.nmadvise += nmadvise; 1682 arena->stats.purged += npurged; 1683 } 1684 1685 return (npurged); 1686 } 1687 1688 static void 1689 arena_unstash_purged(tsdn_t *tsdn, arena_t *arena, chunk_hooks_t *chunk_hooks, 1690 arena_runs_dirty_link_t *purge_runs_sentinel, 1691 extent_node_t *purge_chunks_sentinel) 1692 { 1693 arena_runs_dirty_link_t *rdelm, *rdelm_next; 1694 extent_node_t *chunkselm; 1695 1696 /* Deallocate chunks/runs. */ 1697 for (rdelm = qr_next(purge_runs_sentinel, rd_link), 1698 chunkselm = qr_next(purge_chunks_sentinel, cc_link); 1699 rdelm != purge_runs_sentinel; rdelm = rdelm_next) { 1700 rdelm_next = qr_next(rdelm, rd_link); 1701 if (rdelm == &chunkselm->rd) { 1702 extent_node_t *chunkselm_next = qr_next(chunkselm, 1703 cc_link); 1704 void *addr = extent_node_addr_get(chunkselm); 1705 size_t size = extent_node_size_get(chunkselm); 1706 bool zeroed = extent_node_zeroed_get(chunkselm); 1707 bool committed = extent_node_committed_get(chunkselm); 1708 extent_node_dirty_remove(chunkselm); 1709 arena_node_dalloc(tsdn, arena, chunkselm); 1710 chunkselm = chunkselm_next; 1711 chunk_dalloc_wrapper(tsdn, arena, chunk_hooks, addr, 1712 size, zeroed, committed); 1713 } else { 1714 arena_chunk_t *chunk = 1715 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); 1716 arena_chunk_map_misc_t *miscelm = 1717 arena_rd_to_miscelm(rdelm); 1718 size_t pageind = arena_miscelm_to_pageind(miscelm); 1719 bool decommitted = (arena_mapbits_decommitted_get(chunk, 1720 pageind) != 0); 1721 arena_run_t *run = &miscelm->run; 1722 qr_remove(rdelm, rd_link); 1723 arena_run_dalloc(tsdn, arena, run, false, true, 1724 decommitted); 1725 } 1726 } 1727 } 1728 1729 /* 1730 * NB: ndirty_limit is interpreted differently depending on opt_purge: 1731 * - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the 1732 * desired state: 1733 * (arena->ndirty <= ndirty_limit) 1734 * - purge_mode_decay: Purge as many dirty runs/chunks as possible without 1735 * violating the invariant: 1736 * (arena->ndirty >= ndirty_limit) 1737 */ 1738 static void 1739 arena_purge_to_limit(tsdn_t *tsdn, arena_t *arena, size_t ndirty_limit) 1740 { 1741 chunk_hooks_t chunk_hooks = chunk_hooks_get(tsdn, arena); 1742 size_t npurge, npurged; 1743 arena_runs_dirty_link_t purge_runs_sentinel; 1744 extent_node_t purge_chunks_sentinel; 1745 1746 arena->purging = true; 1747 1748 /* 1749 * Calls to arena_dirty_count() are disabled even for debug builds 1750 * because overhead grows nonlinearly as memory usage increases. 1751 */ 1752 if (false && config_debug) { 1753 size_t ndirty = arena_dirty_count(arena); 1754 assert(ndirty == arena->ndirty); 1755 } 1756 assert(opt_purge != purge_mode_ratio || (arena->nactive >> 1757 arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0); 1758 1759 qr_new(&purge_runs_sentinel, rd_link); 1760 extent_node_dirty_linkage_init(&purge_chunks_sentinel); 1761 1762 npurge = arena_stash_dirty(tsdn, arena, &chunk_hooks, ndirty_limit, 1763 &purge_runs_sentinel, &purge_chunks_sentinel); 1764 if (npurge == 0) 1765 goto label_return; 1766 npurged = arena_purge_stashed(tsdn, arena, &chunk_hooks, 1767 &purge_runs_sentinel, &purge_chunks_sentinel); 1768 assert(npurged == npurge); 1769 arena_unstash_purged(tsdn, arena, &chunk_hooks, &purge_runs_sentinel, 1770 &purge_chunks_sentinel); 1771 1772 if (config_stats) 1773 arena->stats.npurge++; 1774 1775 label_return: 1776 arena->purging = false; 1777 } 1778 1779 void 1780 arena_purge(tsdn_t *tsdn, arena_t *arena, bool all) 1781 { 1782 1783 malloc_mutex_lock(tsdn, &arena->lock); 1784 if (all) 1785 arena_purge_to_limit(tsdn, arena, 0); 1786 else 1787 arena_maybe_purge(tsdn, arena); 1788 malloc_mutex_unlock(tsdn, &arena->lock); 1789 } 1790 1791 static void 1792 arena_achunk_prof_reset(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk) 1793 { 1794 size_t pageind, npages; 1795 1796 cassert(config_prof); 1797 assert(opt_prof); 1798 1799 /* 1800 * Iterate over the allocated runs and remove profiled allocations from 1801 * the sample set. 1802 */ 1803 for (pageind = map_bias; pageind < chunk_npages; pageind += npages) { 1804 if (arena_mapbits_allocated_get(chunk, pageind) != 0) { 1805 if (arena_mapbits_large_get(chunk, pageind) != 0) { 1806 void *ptr = (void *)((uintptr_t)chunk + (pageind 1807 << LG_PAGE)); 1808 size_t usize = isalloc(tsd_tsdn(tsd), ptr, 1809 config_prof); 1810 1811 prof_free(tsd, ptr, usize); 1812 npages = arena_mapbits_large_size_get(chunk, 1813 pageind) >> LG_PAGE; 1814 } else { 1815 /* Skip small run. */ 1816 size_t binind = arena_mapbits_binind_get(chunk, 1817 pageind); 1818 arena_bin_info_t *bin_info = 1819 &arena_bin_info[binind]; 1820 npages = bin_info->run_size >> LG_PAGE; 1821 } 1822 } else { 1823 /* Skip unallocated run. */ 1824 npages = arena_mapbits_unallocated_size_get(chunk, 1825 pageind) >> LG_PAGE; 1826 } 1827 assert(pageind + npages <= chunk_npages); 1828 } 1829 } 1830 1831 void 1832 arena_reset(tsd_t *tsd, arena_t *arena) 1833 { 1834 unsigned i; 1835 extent_node_t *node; 1836 1837 /* 1838 * Locking in this function is unintuitive. The caller guarantees that 1839 * no concurrent operations are happening in this arena, but there are 1840 * still reasons that some locking is necessary: 1841 * 1842 * - Some of the functions in the transitive closure of calls assume 1843 * appropriate locks are held, and in some cases these locks are 1844 * temporarily dropped to avoid lock order reversal or deadlock due to 1845 * reentry. 1846 * - mallctl("epoch", ...) may concurrently refresh stats. While 1847 * strictly speaking this is a "concurrent operation", disallowing 1848 * stats refreshes would impose an inconvenient burden. 1849 */ 1850 1851 /* Remove large allocations from prof sample set. */ 1852 if (config_prof && opt_prof) { 1853 ql_foreach(node, &arena->achunks, ql_link) { 1854 arena_achunk_prof_reset(tsd, arena, 1855 extent_node_addr_get(node)); 1856 } 1857 } 1858 1859 /* Reset curruns for large size classes. */ 1860 if (config_stats) { 1861 for (i = 0; i < nlclasses; i++) 1862 arena->stats.lstats[i].curruns = 0; 1863 } 1864 1865 /* Huge allocations. */ 1866 malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx); 1867 for (node = ql_last(&arena->huge, ql_link); node != NULL; node = 1868 ql_last(&arena->huge, ql_link)) { 1869 void *ptr = extent_node_addr_get(node); 1870 size_t usize; 1871 1872 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx); 1873 if (config_stats || (config_prof && opt_prof)) 1874 usize = isalloc(tsd_tsdn(tsd), ptr, config_prof); 1875 /* Remove huge allocation from prof sample set. */ 1876 if (config_prof && opt_prof) 1877 prof_free(tsd, ptr, usize); 1878 huge_dalloc(tsd_tsdn(tsd), ptr); 1879 malloc_mutex_lock(tsd_tsdn(tsd), &arena->huge_mtx); 1880 /* Cancel out unwanted effects on stats. */ 1881 if (config_stats) 1882 arena_huge_reset_stats_cancel(arena, usize); 1883 } 1884 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->huge_mtx); 1885 1886 malloc_mutex_lock(tsd_tsdn(tsd), &arena->lock); 1887 1888 /* Bins. */ 1889 for (i = 0; i < NBINS; i++) { 1890 arena_bin_t *bin = &arena->bins[i]; 1891 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock); 1892 bin->runcur = NULL; 1893 arena_run_heap_new(&bin->runs); 1894 if (config_stats) { 1895 bin->stats.curregs = 0; 1896 bin->stats.curruns = 0; 1897 } 1898 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock); 1899 } 1900 1901 /* 1902 * Re-initialize runs_dirty such that the chunks_cache and runs_dirty 1903 * chains directly correspond. 1904 */ 1905 qr_new(&arena->runs_dirty, rd_link); 1906 for (node = qr_next(&arena->chunks_cache, cc_link); 1907 node != &arena->chunks_cache; node = qr_next(node, cc_link)) { 1908 qr_new(&node->rd, rd_link); 1909 qr_meld(&arena->runs_dirty, &node->rd, rd_link); 1910 } 1911 1912 /* Arena chunks. */ 1913 for (node = ql_last(&arena->achunks, ql_link); node != NULL; node = 1914 ql_last(&arena->achunks, ql_link)) { 1915 ql_remove(&arena->achunks, node, ql_link); 1916 arena_chunk_discard(tsd_tsdn(tsd), arena, 1917 extent_node_addr_get(node)); 1918 } 1919 1920 /* Spare. */ 1921 if (arena->spare != NULL) { 1922 arena_chunk_discard(tsd_tsdn(tsd), arena, arena->spare); 1923 arena->spare = NULL; 1924 } 1925 1926 assert(!arena->purging); 1927 arena->nactive = 0; 1928 1929 for (i = 0; i < NPSIZES; i++) 1930 arena_run_heap_new(&arena->runs_avail[i]); 1931 1932 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->lock); 1933 } 1934 1935 static void 1936 arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size, 1937 size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty, 1938 size_t flag_decommitted) 1939 { 1940 size_t size = *p_size; 1941 size_t run_ind = *p_run_ind; 1942 size_t run_pages = *p_run_pages; 1943 1944 /* Try to coalesce forward. */ 1945 if (run_ind + run_pages < chunk_npages && 1946 arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 && 1947 arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty && 1948 arena_mapbits_decommitted_get(chunk, run_ind+run_pages) == 1949 flag_decommitted) { 1950 size_t nrun_size = arena_mapbits_unallocated_size_get(chunk, 1951 run_ind+run_pages); 1952 size_t nrun_pages = nrun_size >> LG_PAGE; 1953 1954 /* 1955 * Remove successor from runs_avail; the coalesced run is 1956 * inserted later. 1957 */ 1958 assert(arena_mapbits_unallocated_size_get(chunk, 1959 run_ind+run_pages+nrun_pages-1) == nrun_size); 1960 assert(arena_mapbits_dirty_get(chunk, 1961 run_ind+run_pages+nrun_pages-1) == flag_dirty); 1962 assert(arena_mapbits_decommitted_get(chunk, 1963 run_ind+run_pages+nrun_pages-1) == flag_decommitted); 1964 arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages); 1965 1966 /* 1967 * If the successor is dirty, remove it from the set of dirty 1968 * pages. 1969 */ 1970 if (flag_dirty != 0) { 1971 arena_run_dirty_remove(arena, chunk, run_ind+run_pages, 1972 nrun_pages); 1973 } 1974 1975 size += nrun_size; 1976 run_pages += nrun_pages; 1977 1978 arena_mapbits_unallocated_size_set(chunk, run_ind, size); 1979 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, 1980 size); 1981 } 1982 1983 /* Try to coalesce backward. */ 1984 if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, 1985 run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == 1986 flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) == 1987 flag_decommitted) { 1988 size_t prun_size = arena_mapbits_unallocated_size_get(chunk, 1989 run_ind-1); 1990 size_t prun_pages = prun_size >> LG_PAGE; 1991 1992 run_ind -= prun_pages; 1993 1994 /* 1995 * Remove predecessor from runs_avail; the coalesced run is 1996 * inserted later. 1997 */ 1998 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == 1999 prun_size); 2000 assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty); 2001 assert(arena_mapbits_decommitted_get(chunk, run_ind) == 2002 flag_decommitted); 2003 arena_avail_remove(arena, chunk, run_ind, prun_pages); 2004 2005 /* 2006 * If the predecessor is dirty, remove it from the set of dirty 2007 * pages. 2008 */ 2009 if (flag_dirty != 0) { 2010 arena_run_dirty_remove(arena, chunk, run_ind, 2011 prun_pages); 2012 } 2013 2014 size += prun_size; 2015 run_pages += prun_pages; 2016 2017 arena_mapbits_unallocated_size_set(chunk, run_ind, size); 2018 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, 2019 size); 2020 } 2021 2022 *p_size = size; 2023 *p_run_ind = run_ind; 2024 *p_run_pages = run_pages; 2025 } 2026 2027 static size_t 2028 arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 2029 size_t run_ind) 2030 { 2031 size_t size; 2032 2033 assert(run_ind >= map_bias); 2034 assert(run_ind < chunk_npages); 2035 2036 if (arena_mapbits_large_get(chunk, run_ind) != 0) { 2037 size = arena_mapbits_large_size_get(chunk, run_ind); 2038 assert(size == PAGE || arena_mapbits_large_size_get(chunk, 2039 run_ind+(size>>LG_PAGE)-1) == 0); 2040 } else { 2041 arena_bin_info_t *bin_info = &arena_bin_info[run->binind]; 2042 size = bin_info->run_size; 2043 } 2044 2045 return (size); 2046 } 2047 2048 static void 2049 arena_run_dalloc(tsdn_t *tsdn, arena_t *arena, arena_run_t *run, bool dirty, 2050 bool cleaned, bool decommitted) 2051 { 2052 arena_chunk_t *chunk; 2053 arena_chunk_map_misc_t *miscelm; 2054 size_t size, run_ind, run_pages, flag_dirty, flag_decommitted; 2055 2056 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 2057 miscelm = arena_run_to_miscelm(run); 2058 run_ind = arena_miscelm_to_pageind(miscelm); 2059 assert(run_ind >= map_bias); 2060 assert(run_ind < chunk_npages); 2061 size = arena_run_size_get(arena, chunk, run, run_ind); 2062 run_pages = (size >> LG_PAGE); 2063 arena_nactive_sub(arena, run_pages); 2064 2065 /* 2066 * The run is dirty if the caller claims to have dirtied it, as well as 2067 * if it was already dirty before being allocated and the caller 2068 * doesn't claim to have cleaned it. 2069 */ 2070 assert(arena_mapbits_dirty_get(chunk, run_ind) == 2071 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); 2072 if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind) 2073 != 0) 2074 dirty = true; 2075 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0; 2076 flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0; 2077 2078 /* Mark pages as unallocated in the chunk map. */ 2079 if (dirty || decommitted) { 2080 size_t flags = flag_dirty | flag_decommitted; 2081 arena_mapbits_unallocated_set(chunk, run_ind, size, flags); 2082 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, 2083 flags); 2084 } else { 2085 arena_mapbits_unallocated_set(chunk, run_ind, size, 2086 arena_mapbits_unzeroed_get(chunk, run_ind)); 2087 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, 2088 arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1)); 2089 } 2090 2091 arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages, 2092 flag_dirty, flag_decommitted); 2093 2094 /* Insert into runs_avail, now that coalescing is complete. */ 2095 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == 2096 arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1)); 2097 assert(arena_mapbits_dirty_get(chunk, run_ind) == 2098 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); 2099 assert(arena_mapbits_decommitted_get(chunk, run_ind) == 2100 arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1)); 2101 arena_avail_insert(arena, chunk, run_ind, run_pages); 2102 2103 if (dirty) 2104 arena_run_dirty_insert(arena, chunk, run_ind, run_pages); 2105 2106 /* Deallocate chunk if it is now completely unused. */ 2107 if (size == arena_maxrun) { 2108 assert(run_ind == map_bias); 2109 assert(run_pages == (arena_maxrun >> LG_PAGE)); 2110 arena_chunk_dalloc(tsdn, arena, chunk); 2111 } 2112 2113 /* 2114 * It is okay to do dirty page processing here even if the chunk was 2115 * deallocated above, since in that case it is the spare. Waiting 2116 * until after possible chunk deallocation to do dirty processing 2117 * allows for an old spare to be fully deallocated, thus decreasing the 2118 * chances of spuriously crossing the dirty page purging threshold. 2119 */ 2120 if (dirty) 2121 arena_maybe_purge(tsdn, arena); 2122 } 2123 2124 static void 2125 arena_run_trim_head(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, 2126 arena_run_t *run, size_t oldsize, size_t newsize) 2127 { 2128 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 2129 size_t pageind = arena_miscelm_to_pageind(miscelm); 2130 size_t head_npages = (oldsize - newsize) >> LG_PAGE; 2131 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); 2132 size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind); 2133 size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? 2134 CHUNK_MAP_UNZEROED : 0; 2135 2136 assert(oldsize > newsize); 2137 2138 /* 2139 * Update the chunk map so that arena_run_dalloc() can treat the 2140 * leading run as separately allocated. Set the last element of each 2141 * run first, in case of single-page runs. 2142 */ 2143 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); 2144 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty | 2145 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, 2146 pageind+head_npages-1))); 2147 arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty | 2148 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind))); 2149 2150 if (config_debug) { 2151 UNUSED size_t tail_npages = newsize >> LG_PAGE; 2152 assert(arena_mapbits_large_size_get(chunk, 2153 pageind+head_npages+tail_npages-1) == 0); 2154 assert(arena_mapbits_dirty_get(chunk, 2155 pageind+head_npages+tail_npages-1) == flag_dirty); 2156 } 2157 arena_mapbits_large_set(chunk, pageind+head_npages, newsize, 2158 flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, 2159 pageind+head_npages))); 2160 2161 arena_run_dalloc(tsdn, arena, run, false, false, (flag_decommitted != 2162 0)); 2163 } 2164 2165 static void 2166 arena_run_trim_tail(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, 2167 arena_run_t *run, size_t oldsize, size_t newsize, bool dirty) 2168 { 2169 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 2170 size_t pageind = arena_miscelm_to_pageind(miscelm); 2171 size_t head_npages = newsize >> LG_PAGE; 2172 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); 2173 size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind); 2174 size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? 2175 CHUNK_MAP_UNZEROED : 0; 2176 arena_chunk_map_misc_t *tail_miscelm; 2177 arena_run_t *tail_run; 2178 2179 assert(oldsize > newsize); 2180 2181 /* 2182 * Update the chunk map so that arena_run_dalloc() can treat the 2183 * trailing run as separately allocated. Set the last element of each 2184 * run first, in case of single-page runs. 2185 */ 2186 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); 2187 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty | 2188 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, 2189 pageind+head_npages-1))); 2190 arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty | 2191 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind))); 2192 2193 if (config_debug) { 2194 UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE; 2195 assert(arena_mapbits_large_size_get(chunk, 2196 pageind+head_npages+tail_npages-1) == 0); 2197 assert(arena_mapbits_dirty_get(chunk, 2198 pageind+head_npages+tail_npages-1) == flag_dirty); 2199 } 2200 arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize, 2201 flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, 2202 pageind+head_npages))); 2203 2204 tail_miscelm = arena_miscelm_get_mutable(chunk, pageind + head_npages); 2205 tail_run = &tail_miscelm->run; 2206 arena_run_dalloc(tsdn, arena, tail_run, dirty, false, (flag_decommitted 2207 != 0)); 2208 } 2209 2210 static void 2211 arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run) 2212 { 2213 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 2214 2215 arena_run_heap_insert(&bin->runs, miscelm); 2216 } 2217 2218 static arena_run_t * 2219 arena_bin_nonfull_run_tryget(arena_bin_t *bin) 2220 { 2221 arena_chunk_map_misc_t *miscelm; 2222 2223 miscelm = arena_run_heap_remove_first(&bin->runs); 2224 if (miscelm == NULL) 2225 return (NULL); 2226 if (config_stats) 2227 bin->stats.reruns++; 2228 2229 return (&miscelm->run); 2230 } 2231 2232 static arena_run_t * 2233 arena_bin_nonfull_run_get(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin) 2234 { 2235 arena_run_t *run; 2236 szind_t binind; 2237 arena_bin_info_t *bin_info; 2238 2239 /* Look for a usable run. */ 2240 run = arena_bin_nonfull_run_tryget(bin); 2241 if (run != NULL) 2242 return (run); 2243 /* No existing runs have any space available. */ 2244 2245 binind = arena_bin_index(arena, bin); 2246 bin_info = &arena_bin_info[binind]; 2247 2248 /* Allocate a new run. */ 2249 malloc_mutex_unlock(tsdn, &bin->lock); 2250 /******************************/ 2251 malloc_mutex_lock(tsdn, &arena->lock); 2252 run = arena_run_alloc_small(tsdn, arena, bin_info->run_size, binind); 2253 if (run != NULL) { 2254 /* Initialize run internals. */ 2255 run->binind = binind; 2256 run->nfree = bin_info->nregs; 2257 bitmap_init(run->bitmap, &bin_info->bitmap_info); 2258 } 2259 malloc_mutex_unlock(tsdn, &arena->lock); 2260 /********************************/ 2261 malloc_mutex_lock(tsdn, &bin->lock); 2262 if (run != NULL) { 2263 if (config_stats) { 2264 bin->stats.nruns++; 2265 bin->stats.curruns++; 2266 } 2267 return (run); 2268 } 2269 2270 /* 2271 * arena_run_alloc_small() failed, but another thread may have made 2272 * sufficient memory available while this one dropped bin->lock above, 2273 * so search one more time. 2274 */ 2275 run = arena_bin_nonfull_run_tryget(bin); 2276 if (run != NULL) 2277 return (run); 2278 2279 return (NULL); 2280 } 2281 2282 /* Re-fill bin->runcur, then call arena_run_reg_alloc(). */ 2283 static void * 2284 arena_bin_malloc_hard(tsdn_t *tsdn, arena_t *arena, arena_bin_t *bin) 2285 { 2286 szind_t binind; 2287 arena_bin_info_t *bin_info; 2288 arena_run_t *run; 2289 2290 binind = arena_bin_index(arena, bin); 2291 bin_info = &arena_bin_info[binind]; 2292 bin->runcur = NULL; 2293 run = arena_bin_nonfull_run_get(tsdn, arena, bin); 2294 if (bin->runcur != NULL && bin->runcur->nfree > 0) { 2295 /* 2296 * Another thread updated runcur while this one ran without the 2297 * bin lock in arena_bin_nonfull_run_get(). 2298 */ 2299 void *ret; 2300 assert(bin->runcur->nfree > 0); 2301 ret = arena_run_reg_alloc(bin->runcur, bin_info); 2302 if (run != NULL) { 2303 arena_chunk_t *chunk; 2304 2305 /* 2306 * arena_run_alloc_small() may have allocated run, or 2307 * it may have pulled run from the bin's run tree. 2308 * Therefore it is unsafe to make any assumptions about 2309 * how run has previously been used, and 2310 * arena_bin_lower_run() must be called, as if a region 2311 * were just deallocated from the run. 2312 */ 2313 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 2314 if (run->nfree == bin_info->nregs) { 2315 arena_dalloc_bin_run(tsdn, arena, chunk, run, 2316 bin); 2317 } else 2318 arena_bin_lower_run(arena, chunk, run, bin); 2319 } 2320 return (ret); 2321 } 2322 2323 if (run == NULL) 2324 return (NULL); 2325 2326 bin->runcur = run; 2327 2328 assert(bin->runcur->nfree > 0); 2329 2330 return (arena_run_reg_alloc(bin->runcur, bin_info)); 2331 } 2332 2333 void 2334 arena_tcache_fill_small(tsdn_t *tsdn, arena_t *arena, tcache_bin_t *tbin, 2335 szind_t binind, uint64_t prof_accumbytes) 2336 { 2337 unsigned i, nfill; 2338 arena_bin_t *bin; 2339 2340 assert(tbin->ncached == 0); 2341 2342 if (config_prof && arena_prof_accum(tsdn, arena, prof_accumbytes)) 2343 prof_idump(tsdn); 2344 bin = &arena->bins[binind]; 2345 malloc_mutex_lock(tsdn, &bin->lock); 2346 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> 2347 tbin->lg_fill_div); i < nfill; i++) { 2348 arena_run_t *run; 2349 void *ptr; 2350 if ((run = bin->runcur) != NULL && run->nfree > 0) 2351 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); 2352 else 2353 ptr = arena_bin_malloc_hard(tsdn, arena, bin); 2354 if (ptr == NULL) { 2355 /* 2356 * OOM. tbin->avail isn't yet filled down to its first 2357 * element, so the successful allocations (if any) must 2358 * be moved just before tbin->avail before bailing out. 2359 */ 2360 if (i > 0) { 2361 memmove(tbin->avail - i, tbin->avail - nfill, 2362 i * sizeof(void *)); 2363 } 2364 break; 2365 } 2366 if (config_fill && unlikely(opt_junk_alloc)) { 2367 arena_alloc_junk_small(ptr, &arena_bin_info[binind], 2368 true); 2369 } 2370 /* Insert such that low regions get used first. */ 2371 *(tbin->avail - nfill + i) = ptr; 2372 } 2373 if (config_stats) { 2374 bin->stats.nmalloc += i; 2375 bin->stats.nrequests += tbin->tstats.nrequests; 2376 bin->stats.curregs += i; 2377 bin->stats.nfills++; 2378 tbin->tstats.nrequests = 0; 2379 } 2380 malloc_mutex_unlock(tsdn, &bin->lock); 2381 tbin->ncached = i; 2382 arena_decay_tick(tsdn, arena); 2383 } 2384 2385 void 2386 arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero) 2387 { 2388 2389 size_t redzone_size = bin_info->redzone_size; 2390 2391 if (zero) { 2392 memset((void *)((uintptr_t)ptr - redzone_size), 2393 JEMALLOC_ALLOC_JUNK, redzone_size); 2394 memset((void *)((uintptr_t)ptr + bin_info->reg_size), 2395 JEMALLOC_ALLOC_JUNK, redzone_size); 2396 } else { 2397 memset((void *)((uintptr_t)ptr - redzone_size), 2398 JEMALLOC_ALLOC_JUNK, bin_info->reg_interval); 2399 } 2400 } 2401 2402 #ifdef JEMALLOC_JET 2403 #undef arena_redzone_corruption 2404 #define arena_redzone_corruption JEMALLOC_N(n_arena_redzone_corruption) 2405 #endif 2406 static void 2407 arena_redzone_corruption(void *ptr, size_t usize, bool after, 2408 size_t offset, uint8_t byte) 2409 { 2410 2411 malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p " 2412 "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s", 2413 after ? "after" : "before", ptr, usize, byte); 2414 } 2415 #ifdef JEMALLOC_JET 2416 #undef arena_redzone_corruption 2417 #define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption) 2418 arena_redzone_corruption_t *arena_redzone_corruption = 2419 JEMALLOC_N(n_arena_redzone_corruption); 2420 #endif 2421 2422 static void 2423 arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset) 2424 { 2425 bool error = false; 2426 2427 if (opt_junk_alloc) { 2428 size_t size = bin_info->reg_size; 2429 size_t redzone_size = bin_info->redzone_size; 2430 size_t i; 2431 2432 for (i = 1; i <= redzone_size; i++) { 2433 uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i); 2434 if (*byte != JEMALLOC_ALLOC_JUNK) { 2435 error = true; 2436 arena_redzone_corruption(ptr, size, false, i, 2437 *byte); 2438 if (reset) 2439 *byte = JEMALLOC_ALLOC_JUNK; 2440 } 2441 } 2442 for (i = 0; i < redzone_size; i++) { 2443 uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i); 2444 if (*byte != JEMALLOC_ALLOC_JUNK) { 2445 error = true; 2446 arena_redzone_corruption(ptr, size, true, i, 2447 *byte); 2448 if (reset) 2449 *byte = JEMALLOC_ALLOC_JUNK; 2450 } 2451 } 2452 } 2453 2454 if (opt_abort && error) 2455 abort(); 2456 } 2457 2458 #ifdef JEMALLOC_JET 2459 #undef arena_dalloc_junk_small 2460 #define arena_dalloc_junk_small JEMALLOC_N(n_arena_dalloc_junk_small) 2461 #endif 2462 void 2463 arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) 2464 { 2465 size_t redzone_size = bin_info->redzone_size; 2466 2467 arena_redzones_validate(ptr, bin_info, false); 2468 memset((void *)((uintptr_t)ptr - redzone_size), JEMALLOC_FREE_JUNK, 2469 bin_info->reg_interval); 2470 } 2471 #ifdef JEMALLOC_JET 2472 #undef arena_dalloc_junk_small 2473 #define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) 2474 arena_dalloc_junk_small_t *arena_dalloc_junk_small = 2475 JEMALLOC_N(n_arena_dalloc_junk_small); 2476 #endif 2477 2478 void 2479 arena_quarantine_junk_small(void *ptr, size_t usize) 2480 { 2481 szind_t binind; 2482 arena_bin_info_t *bin_info; 2483 cassert(config_fill); 2484 assert(opt_junk_free); 2485 assert(opt_quarantine); 2486 assert(usize <= SMALL_MAXCLASS); 2487 2488 binind = size2index(usize); 2489 bin_info = &arena_bin_info[binind]; 2490 arena_redzones_validate(ptr, bin_info, true); 2491 } 2492 2493 static void * 2494 arena_malloc_small(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) 2495 { 2496 void *ret; 2497 arena_bin_t *bin; 2498 size_t usize; 2499 arena_run_t *run; 2500 2501 assert(binind < NBINS); 2502 bin = &arena->bins[binind]; 2503 usize = index2size(binind); 2504 2505 malloc_mutex_lock(tsdn, &bin->lock); 2506 if ((run = bin->runcur) != NULL && run->nfree > 0) 2507 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]); 2508 else 2509 ret = arena_bin_malloc_hard(tsdn, arena, bin); 2510 2511 if (ret == NULL) { 2512 malloc_mutex_unlock(tsdn, &bin->lock); 2513 return (NULL); 2514 } 2515 2516 if (config_stats) { 2517 bin->stats.nmalloc++; 2518 bin->stats.nrequests++; 2519 bin->stats.curregs++; 2520 } 2521 malloc_mutex_unlock(tsdn, &bin->lock); 2522 if (config_prof && !isthreaded && arena_prof_accum(tsdn, arena, usize)) 2523 prof_idump(tsdn); 2524 2525 if (!zero) { 2526 if (config_fill) { 2527 if (unlikely(opt_junk_alloc)) { 2528 arena_alloc_junk_small(ret, 2529 &arena_bin_info[binind], false); 2530 } else if (unlikely(opt_zero)) 2531 memset(ret, 0, usize); 2532 } 2533 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize); 2534 } else { 2535 if (config_fill && unlikely(opt_junk_alloc)) { 2536 arena_alloc_junk_small(ret, &arena_bin_info[binind], 2537 true); 2538 } 2539 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize); 2540 memset(ret, 0, usize); 2541 } 2542 2543 arena_decay_tick(tsdn, arena); 2544 return (ret); 2545 } 2546 2547 void * 2548 arena_malloc_large(tsdn_t *tsdn, arena_t *arena, szind_t binind, bool zero) 2549 { 2550 void *ret; 2551 size_t usize; 2552 uintptr_t random_offset; 2553 arena_run_t *run; 2554 arena_chunk_map_misc_t *miscelm; 2555 UNUSED bool idump JEMALLOC_CC_SILENCE_INIT(false); 2556 2557 /* Large allocation. */ 2558 usize = index2size(binind); 2559 malloc_mutex_lock(tsdn, &arena->lock); 2560 if (config_cache_oblivious) { 2561 uint64_t r; 2562 2563 /* 2564 * Compute a uniformly distributed offset within the first page 2565 * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64 2566 * for 4 KiB pages and 64-byte cachelines. 2567 */ 2568 r = prng_lg_range_zu(&arena->offset_state, LG_PAGE - 2569 LG_CACHELINE, false); 2570 random_offset = ((uintptr_t)r) << LG_CACHELINE; 2571 } else 2572 random_offset = 0; 2573 run = arena_run_alloc_large(tsdn, arena, usize + large_pad, zero); 2574 if (run == NULL) { 2575 malloc_mutex_unlock(tsdn, &arena->lock); 2576 return (NULL); 2577 } 2578 miscelm = arena_run_to_miscelm(run); 2579 ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) + 2580 random_offset); 2581 if (config_stats) { 2582 szind_t index = binind - NBINS; 2583 2584 arena->stats.nmalloc_large++; 2585 arena->stats.nrequests_large++; 2586 arena->stats.allocated_large += usize; 2587 arena->stats.lstats[index].nmalloc++; 2588 arena->stats.lstats[index].nrequests++; 2589 arena->stats.lstats[index].curruns++; 2590 } 2591 if (config_prof) 2592 idump = arena_prof_accum_locked(arena, usize); 2593 malloc_mutex_unlock(tsdn, &arena->lock); 2594 if (config_prof && idump) 2595 prof_idump(tsdn); 2596 2597 if (!zero) { 2598 if (config_fill) { 2599 if (unlikely(opt_junk_alloc)) 2600 memset(ret, JEMALLOC_ALLOC_JUNK, usize); 2601 else if (unlikely(opt_zero)) 2602 memset(ret, 0, usize); 2603 } 2604 } 2605 2606 arena_decay_tick(tsdn, arena); 2607 return (ret); 2608 } 2609 2610 void * 2611 arena_malloc_hard(tsdn_t *tsdn, arena_t *arena, size_t size, szind_t ind, 2612 bool zero) 2613 { 2614 2615 assert(!tsdn_null(tsdn) || arena != NULL); 2616 2617 if (likely(!tsdn_null(tsdn))) 2618 arena = arena_choose(tsdn_tsd(tsdn), arena); 2619 if (unlikely(arena == NULL)) 2620 return (NULL); 2621 2622 if (likely(size <= SMALL_MAXCLASS)) 2623 return (arena_malloc_small(tsdn, arena, ind, zero)); 2624 if (likely(size <= large_maxclass)) 2625 return (arena_malloc_large(tsdn, arena, ind, zero)); 2626 return (huge_malloc(tsdn, arena, index2size(ind), zero)); 2627 } 2628 2629 /* Only handles large allocations that require more than page alignment. */ 2630 static void * 2631 arena_palloc_large(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, 2632 bool zero) 2633 { 2634 void *ret; 2635 size_t alloc_size, leadsize, trailsize; 2636 arena_run_t *run; 2637 arena_chunk_t *chunk; 2638 arena_chunk_map_misc_t *miscelm; 2639 void *rpages; 2640 2641 assert(!tsdn_null(tsdn) || arena != NULL); 2642 assert(usize == PAGE_CEILING(usize)); 2643 2644 if (likely(!tsdn_null(tsdn))) 2645 arena = arena_choose(tsdn_tsd(tsdn), arena); 2646 if (unlikely(arena == NULL)) 2647 return (NULL); 2648 2649 alignment = PAGE_CEILING(alignment); 2650 alloc_size = usize + large_pad + alignment - PAGE; 2651 2652 malloc_mutex_lock(tsdn, &arena->lock); 2653 run = arena_run_alloc_large(tsdn, arena, alloc_size, false); 2654 if (run == NULL) { 2655 malloc_mutex_unlock(tsdn, &arena->lock); 2656 return (NULL); 2657 } 2658 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 2659 miscelm = arena_run_to_miscelm(run); 2660 rpages = arena_miscelm_to_rpages(miscelm); 2661 2662 leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) - 2663 (uintptr_t)rpages; 2664 assert(alloc_size >= leadsize + usize); 2665 trailsize = alloc_size - leadsize - usize - large_pad; 2666 if (leadsize != 0) { 2667 arena_chunk_map_misc_t *head_miscelm = miscelm; 2668 arena_run_t *head_run = run; 2669 2670 miscelm = arena_miscelm_get_mutable(chunk, 2671 arena_miscelm_to_pageind(head_miscelm) + (leadsize >> 2672 LG_PAGE)); 2673 run = &miscelm->run; 2674 2675 arena_run_trim_head(tsdn, arena, chunk, head_run, alloc_size, 2676 alloc_size - leadsize); 2677 } 2678 if (trailsize != 0) { 2679 arena_run_trim_tail(tsdn, arena, chunk, run, usize + large_pad + 2680 trailsize, usize + large_pad, false); 2681 } 2682 if (arena_run_init_large(arena, run, usize + large_pad, zero)) { 2683 size_t run_ind = 2684 arena_miscelm_to_pageind(arena_run_to_miscelm(run)); 2685 bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0); 2686 bool decommitted = (arena_mapbits_decommitted_get(chunk, 2687 run_ind) != 0); 2688 2689 assert(decommitted); /* Cause of OOM. */ 2690 arena_run_dalloc(tsdn, arena, run, dirty, false, decommitted); 2691 malloc_mutex_unlock(tsdn, &arena->lock); 2692 return (NULL); 2693 } 2694 ret = arena_miscelm_to_rpages(miscelm); 2695 2696 if (config_stats) { 2697 szind_t index = size2index(usize) - NBINS; 2698 2699 arena->stats.nmalloc_large++; 2700 arena->stats.nrequests_large++; 2701 arena->stats.allocated_large += usize; 2702 arena->stats.lstats[index].nmalloc++; 2703 arena->stats.lstats[index].nrequests++; 2704 arena->stats.lstats[index].curruns++; 2705 } 2706 malloc_mutex_unlock(tsdn, &arena->lock); 2707 2708 if (config_fill && !zero) { 2709 if (unlikely(opt_junk_alloc)) 2710 memset(ret, JEMALLOC_ALLOC_JUNK, usize); 2711 else if (unlikely(opt_zero)) 2712 memset(ret, 0, usize); 2713 } 2714 arena_decay_tick(tsdn, arena); 2715 return (ret); 2716 } 2717 2718 void * 2719 arena_palloc(tsdn_t *tsdn, arena_t *arena, size_t usize, size_t alignment, 2720 bool zero, tcache_t *tcache) 2721 { 2722 void *ret; 2723 2724 if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE 2725 && (usize & PAGE_MASK) == 0))) { 2726 /* Small; alignment doesn't require special run placement. */ 2727 ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero, 2728 tcache, true); 2729 } else if (usize <= large_maxclass && alignment <= PAGE) { 2730 /* 2731 * Large; alignment doesn't require special run placement. 2732 * However, the cached pointer may be at a random offset from 2733 * the base of the run, so do some bit manipulation to retrieve 2734 * the base. 2735 */ 2736 ret = arena_malloc(tsdn, arena, usize, size2index(usize), zero, 2737 tcache, true); 2738 if (config_cache_oblivious) 2739 ret = (void *)((uintptr_t)ret & ~PAGE_MASK); 2740 } else { 2741 if (likely(usize <= large_maxclass)) { 2742 ret = arena_palloc_large(tsdn, arena, usize, alignment, 2743 zero); 2744 } else if (likely(alignment <= chunksize)) 2745 ret = huge_malloc(tsdn, arena, usize, zero); 2746 else { 2747 ret = huge_palloc(tsdn, arena, usize, alignment, zero); 2748 } 2749 } 2750 return (ret); 2751 } 2752 2753 void 2754 arena_prof_promoted(tsdn_t *tsdn, const void *ptr, size_t size) 2755 { 2756 arena_chunk_t *chunk; 2757 size_t pageind; 2758 szind_t binind; 2759 2760 cassert(config_prof); 2761 assert(ptr != NULL); 2762 assert(CHUNK_ADDR2BASE(ptr) != ptr); 2763 assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS); 2764 assert(isalloc(tsdn, ptr, true) == LARGE_MINCLASS); 2765 assert(size <= SMALL_MAXCLASS); 2766 2767 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 2768 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2769 binind = size2index(size); 2770 assert(binind < NBINS); 2771 arena_mapbits_large_binind_set(chunk, pageind, binind); 2772 2773 assert(isalloc(tsdn, ptr, false) == LARGE_MINCLASS); 2774 assert(isalloc(tsdn, ptr, true) == size); 2775 } 2776 2777 static void 2778 arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, 2779 arena_bin_t *bin) 2780 { 2781 2782 /* Dissociate run from bin. */ 2783 if (run == bin->runcur) 2784 bin->runcur = NULL; 2785 else { 2786 szind_t binind = arena_bin_index(extent_node_arena_get( 2787 &chunk->node), bin); 2788 arena_bin_info_t *bin_info = &arena_bin_info[binind]; 2789 2790 /* 2791 * The following block's conditional is necessary because if the 2792 * run only contains one region, then it never gets inserted 2793 * into the non-full runs tree. 2794 */ 2795 if (bin_info->nregs != 1) { 2796 arena_chunk_map_misc_t *miscelm = 2797 arena_run_to_miscelm(run); 2798 2799 arena_run_heap_remove(&bin->runs, miscelm); 2800 } 2801 } 2802 } 2803 2804 static void 2805 arena_dalloc_bin_run(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, 2806 arena_run_t *run, arena_bin_t *bin) 2807 { 2808 2809 assert(run != bin->runcur); 2810 2811 malloc_mutex_unlock(tsdn, &bin->lock); 2812 /******************************/ 2813 malloc_mutex_lock(tsdn, &arena->lock); 2814 arena_run_dalloc(tsdn, arena, run, true, false, false); 2815 malloc_mutex_unlock(tsdn, &arena->lock); 2816 /****************************/ 2817 malloc_mutex_lock(tsdn, &bin->lock); 2818 if (config_stats) 2819 bin->stats.curruns--; 2820 } 2821 2822 static void 2823 arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 2824 arena_bin_t *bin) 2825 { 2826 2827 /* 2828 * Make sure that if bin->runcur is non-NULL, it refers to the lowest 2829 * non-full run. It is okay to NULL runcur out rather than proactively 2830 * keeping it pointing at the lowest non-full run. 2831 */ 2832 if ((uintptr_t)run < (uintptr_t)bin->runcur) { 2833 /* Switch runcur. */ 2834 if (bin->runcur->nfree > 0) 2835 arena_bin_runs_insert(bin, bin->runcur); 2836 bin->runcur = run; 2837 if (config_stats) 2838 bin->stats.reruns++; 2839 } else 2840 arena_bin_runs_insert(bin, run); 2841 } 2842 2843 static void 2844 arena_dalloc_bin_locked_impl(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, 2845 void *ptr, arena_chunk_map_bits_t *bitselm, bool junked) 2846 { 2847 size_t pageind, rpages_ind; 2848 arena_run_t *run; 2849 arena_bin_t *bin; 2850 arena_bin_info_t *bin_info; 2851 szind_t binind; 2852 2853 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2854 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); 2855 run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run; 2856 binind = run->binind; 2857 bin = &arena->bins[binind]; 2858 bin_info = &arena_bin_info[binind]; 2859 2860 if (!junked && config_fill && unlikely(opt_junk_free)) 2861 arena_dalloc_junk_small(ptr, bin_info); 2862 2863 arena_run_reg_dalloc(run, ptr); 2864 if (run->nfree == bin_info->nregs) { 2865 arena_dissociate_bin_run(chunk, run, bin); 2866 arena_dalloc_bin_run(tsdn, arena, chunk, run, bin); 2867 } else if (run->nfree == 1 && run != bin->runcur) 2868 arena_bin_lower_run(arena, chunk, run, bin); 2869 2870 if (config_stats) { 2871 bin->stats.ndalloc++; 2872 bin->stats.curregs--; 2873 } 2874 } 2875 2876 void 2877 arena_dalloc_bin_junked_locked(tsdn_t *tsdn, arena_t *arena, 2878 arena_chunk_t *chunk, void *ptr, arena_chunk_map_bits_t *bitselm) 2879 { 2880 2881 arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, true); 2882 } 2883 2884 void 2885 arena_dalloc_bin(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, void *ptr, 2886 size_t pageind, arena_chunk_map_bits_t *bitselm) 2887 { 2888 arena_run_t *run; 2889 arena_bin_t *bin; 2890 size_t rpages_ind; 2891 2892 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); 2893 run = &arena_miscelm_get_mutable(chunk, rpages_ind)->run; 2894 bin = &arena->bins[run->binind]; 2895 malloc_mutex_lock(tsdn, &bin->lock); 2896 arena_dalloc_bin_locked_impl(tsdn, arena, chunk, ptr, bitselm, false); 2897 malloc_mutex_unlock(tsdn, &bin->lock); 2898 } 2899 2900 void 2901 arena_dalloc_small(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, 2902 void *ptr, size_t pageind) 2903 { 2904 arena_chunk_map_bits_t *bitselm; 2905 2906 if (config_debug) { 2907 /* arena_ptr_small_binind_get() does extra sanity checking. */ 2908 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, 2909 pageind)) != BININD_INVALID); 2910 } 2911 bitselm = arena_bitselm_get_mutable(chunk, pageind); 2912 arena_dalloc_bin(tsdn, arena, chunk, ptr, pageind, bitselm); 2913 arena_decay_tick(tsdn, arena); 2914 } 2915 2916 #ifdef JEMALLOC_JET 2917 #undef arena_dalloc_junk_large 2918 #define arena_dalloc_junk_large JEMALLOC_N(n_arena_dalloc_junk_large) 2919 #endif 2920 void 2921 arena_dalloc_junk_large(void *ptr, size_t usize) 2922 { 2923 2924 if (config_fill && unlikely(opt_junk_free)) 2925 memset(ptr, JEMALLOC_FREE_JUNK, usize); 2926 } 2927 #ifdef JEMALLOC_JET 2928 #undef arena_dalloc_junk_large 2929 #define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large) 2930 arena_dalloc_junk_large_t *arena_dalloc_junk_large = 2931 JEMALLOC_N(n_arena_dalloc_junk_large); 2932 #endif 2933 2934 static void 2935 arena_dalloc_large_locked_impl(tsdn_t *tsdn, arena_t *arena, 2936 arena_chunk_t *chunk, void *ptr, bool junked) 2937 { 2938 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2939 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, 2940 pageind); 2941 arena_run_t *run = &miscelm->run; 2942 2943 if (config_fill || config_stats) { 2944 size_t usize = arena_mapbits_large_size_get(chunk, pageind) - 2945 large_pad; 2946 2947 if (!junked) 2948 arena_dalloc_junk_large(ptr, usize); 2949 if (config_stats) { 2950 szind_t index = size2index(usize) - NBINS; 2951 2952 arena->stats.ndalloc_large++; 2953 arena->stats.allocated_large -= usize; 2954 arena->stats.lstats[index].ndalloc++; 2955 arena->stats.lstats[index].curruns--; 2956 } 2957 } 2958 2959 arena_run_dalloc(tsdn, arena, run, true, false, false); 2960 } 2961 2962 void 2963 arena_dalloc_large_junked_locked(tsdn_t *tsdn, arena_t *arena, 2964 arena_chunk_t *chunk, void *ptr) 2965 { 2966 2967 arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, true); 2968 } 2969 2970 void 2971 arena_dalloc_large(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, 2972 void *ptr) 2973 { 2974 2975 malloc_mutex_lock(tsdn, &arena->lock); 2976 arena_dalloc_large_locked_impl(tsdn, arena, chunk, ptr, false); 2977 malloc_mutex_unlock(tsdn, &arena->lock); 2978 arena_decay_tick(tsdn, arena); 2979 } 2980 2981 static void 2982 arena_ralloc_large_shrink(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, 2983 void *ptr, size_t oldsize, size_t size) 2984 { 2985 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2986 arena_chunk_map_misc_t *miscelm = arena_miscelm_get_mutable(chunk, 2987 pageind); 2988 arena_run_t *run = &miscelm->run; 2989 2990 assert(size < oldsize); 2991 2992 /* 2993 * Shrink the run, and make trailing pages available for other 2994 * allocations. 2995 */ 2996 malloc_mutex_lock(tsdn, &arena->lock); 2997 arena_run_trim_tail(tsdn, arena, chunk, run, oldsize + large_pad, size + 2998 large_pad, true); 2999 if (config_stats) { 3000 szind_t oldindex = size2index(oldsize) - NBINS; 3001 szind_t index = size2index(size) - NBINS; 3002 3003 arena->stats.ndalloc_large++; 3004 arena->stats.allocated_large -= oldsize; 3005 arena->stats.lstats[oldindex].ndalloc++; 3006 arena->stats.lstats[oldindex].curruns--; 3007 3008 arena->stats.nmalloc_large++; 3009 arena->stats.nrequests_large++; 3010 arena->stats.allocated_large += size; 3011 arena->stats.lstats[index].nmalloc++; 3012 arena->stats.lstats[index].nrequests++; 3013 arena->stats.lstats[index].curruns++; 3014 } 3015 malloc_mutex_unlock(tsdn, &arena->lock); 3016 } 3017 3018 static bool 3019 arena_ralloc_large_grow(tsdn_t *tsdn, arena_t *arena, arena_chunk_t *chunk, 3020 void *ptr, size_t oldsize, size_t usize_min, size_t usize_max, bool zero) 3021 { 3022 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 3023 size_t npages = (oldsize + large_pad) >> LG_PAGE; 3024 size_t followsize; 3025 3026 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) - 3027 large_pad); 3028 3029 /* Try to extend the run. */ 3030 malloc_mutex_lock(tsdn, &arena->lock); 3031 if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk, 3032 pageind+npages) != 0) 3033 goto label_fail; 3034 followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages); 3035 if (oldsize + followsize >= usize_min) { 3036 /* 3037 * The next run is available and sufficiently large. Split the 3038 * following run, then merge the first part with the existing 3039 * allocation. 3040 */ 3041 arena_run_t *run; 3042 size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask; 3043 3044 usize = usize_max; 3045 while (oldsize + followsize < usize) 3046 usize = index2size(size2index(usize)-1); 3047 assert(usize >= usize_min); 3048 assert(usize >= oldsize); 3049 splitsize = usize - oldsize; 3050 if (splitsize == 0) 3051 goto label_fail; 3052 3053 run = &arena_miscelm_get_mutable(chunk, pageind+npages)->run; 3054 if (arena_run_split_large(arena, run, splitsize, zero)) 3055 goto label_fail; 3056 3057 if (config_cache_oblivious && zero) { 3058 /* 3059 * Zero the trailing bytes of the original allocation's 3060 * last page, since they are in an indeterminate state. 3061 * There will always be trailing bytes, because ptr's 3062 * offset from the beginning of the run is a multiple of 3063 * CACHELINE in [0 .. PAGE). 3064 */ 3065 void *zbase = (void *)((uintptr_t)ptr + oldsize); 3066 void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase + 3067 PAGE)); 3068 size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase; 3069 assert(nzero > 0); 3070 memset(zbase, 0, nzero); 3071 } 3072 3073 size = oldsize + splitsize; 3074 npages = (size + large_pad) >> LG_PAGE; 3075 3076 /* 3077 * Mark the extended run as dirty if either portion of the run 3078 * was dirty before allocation. This is rather pedantic, 3079 * because there's not actually any sequence of events that 3080 * could cause the resulting run to be passed to 3081 * arena_run_dalloc() with the dirty argument set to false 3082 * (which is when dirty flag consistency would really matter). 3083 */ 3084 flag_dirty = arena_mapbits_dirty_get(chunk, pageind) | 3085 arena_mapbits_dirty_get(chunk, pageind+npages-1); 3086 flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0; 3087 arena_mapbits_large_set(chunk, pageind, size + large_pad, 3088 flag_dirty | (flag_unzeroed_mask & 3089 arena_mapbits_unzeroed_get(chunk, pageind))); 3090 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty | 3091 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, 3092 pageind+npages-1))); 3093 3094 if (config_stats) { 3095 szind_t oldindex = size2index(oldsize) - NBINS; 3096 szind_t index = size2index(size) - NBINS; 3097 3098 arena->stats.ndalloc_large++; 3099 arena->stats.allocated_large -= oldsize; 3100 arena->stats.lstats[oldindex].ndalloc++; 3101 arena->stats.lstats[oldindex].curruns--; 3102 3103 arena->stats.nmalloc_large++; 3104 arena->stats.nrequests_large++; 3105 arena->stats.allocated_large += size; 3106 arena->stats.lstats[index].nmalloc++; 3107 arena->stats.lstats[index].nrequests++; 3108 arena->stats.lstats[index].curruns++; 3109 } 3110 malloc_mutex_unlock(tsdn, &arena->lock); 3111 return (false); 3112 } 3113 label_fail: 3114 malloc_mutex_unlock(tsdn, &arena->lock); 3115 return (true); 3116 } 3117 3118 #ifdef JEMALLOC_JET 3119 #undef arena_ralloc_junk_large 3120 #define arena_ralloc_junk_large JEMALLOC_N(n_arena_ralloc_junk_large) 3121 #endif 3122 static void 3123 arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize) 3124 { 3125 3126 if (config_fill && unlikely(opt_junk_free)) { 3127 memset((void *)((uintptr_t)ptr + usize), JEMALLOC_FREE_JUNK, 3128 old_usize - usize); 3129 } 3130 } 3131 #ifdef JEMALLOC_JET 3132 #undef arena_ralloc_junk_large 3133 #define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large) 3134 arena_ralloc_junk_large_t *arena_ralloc_junk_large = 3135 JEMALLOC_N(n_arena_ralloc_junk_large); 3136 #endif 3137 3138 /* 3139 * Try to resize a large allocation, in order to avoid copying. This will 3140 * always fail if growing an object, and the following run is already in use. 3141 */ 3142 static bool 3143 arena_ralloc_large(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t usize_min, 3144 size_t usize_max, bool zero) 3145 { 3146 arena_chunk_t *chunk; 3147 arena_t *arena; 3148 3149 if (oldsize == usize_max) { 3150 /* Current size class is compatible and maximal. */ 3151 return (false); 3152 } 3153 3154 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 3155 arena = extent_node_arena_get(&chunk->node); 3156 3157 if (oldsize < usize_max) { 3158 bool ret = arena_ralloc_large_grow(tsdn, arena, chunk, ptr, 3159 oldsize, usize_min, usize_max, zero); 3160 if (config_fill && !ret && !zero) { 3161 if (unlikely(opt_junk_alloc)) { 3162 memset((void *)((uintptr_t)ptr + oldsize), 3163 JEMALLOC_ALLOC_JUNK, 3164 isalloc(tsdn, ptr, config_prof) - oldsize); 3165 } else if (unlikely(opt_zero)) { 3166 memset((void *)((uintptr_t)ptr + oldsize), 0, 3167 isalloc(tsdn, ptr, config_prof) - oldsize); 3168 } 3169 } 3170 return (ret); 3171 } 3172 3173 assert(oldsize > usize_max); 3174 /* Fill before shrinking in order avoid a race. */ 3175 arena_ralloc_junk_large(ptr, oldsize, usize_max); 3176 arena_ralloc_large_shrink(tsdn, arena, chunk, ptr, oldsize, usize_max); 3177 return (false); 3178 } 3179 3180 bool 3181 arena_ralloc_no_move(tsdn_t *tsdn, void *ptr, size_t oldsize, size_t size, 3182 size_t extra, bool zero) 3183 { 3184 size_t usize_min, usize_max; 3185 3186 /* Calls with non-zero extra had to clamp extra. */ 3187 assert(extra == 0 || size + extra <= HUGE_MAXCLASS); 3188 3189 if (unlikely(size > HUGE_MAXCLASS)) 3190 return (true); 3191 3192 usize_min = s2u(size); 3193 usize_max = s2u(size + extra); 3194 if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) { 3195 arena_chunk_t *chunk; 3196 3197 /* 3198 * Avoid moving the allocation if the size class can be left the 3199 * same. 3200 */ 3201 if (oldsize <= SMALL_MAXCLASS) { 3202 assert(arena_bin_info[size2index(oldsize)].reg_size == 3203 oldsize); 3204 if ((usize_max > SMALL_MAXCLASS || 3205 size2index(usize_max) != size2index(oldsize)) && 3206 (size > oldsize || usize_max < oldsize)) 3207 return (true); 3208 } else { 3209 if (usize_max <= SMALL_MAXCLASS) 3210 return (true); 3211 if (arena_ralloc_large(tsdn, ptr, oldsize, usize_min, 3212 usize_max, zero)) 3213 return (true); 3214 } 3215 3216 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 3217 arena_decay_tick(tsdn, extent_node_arena_get(&chunk->node)); 3218 return (false); 3219 } else { 3220 return (huge_ralloc_no_move(tsdn, ptr, oldsize, usize_min, 3221 usize_max, zero)); 3222 } 3223 } 3224 3225 static void * 3226 arena_ralloc_move_helper(tsdn_t *tsdn, arena_t *arena, size_t usize, 3227 size_t alignment, bool zero, tcache_t *tcache) 3228 { 3229 3230 if (alignment == 0) 3231 return (arena_malloc(tsdn, arena, usize, size2index(usize), 3232 zero, tcache, true)); 3233 usize = sa2u(usize, alignment); 3234 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) 3235 return (NULL); 3236 return (ipalloct(tsdn, usize, alignment, zero, tcache, arena)); 3237 } 3238 3239 void * 3240 arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, 3241 size_t alignment, bool zero, tcache_t *tcache) 3242 { 3243 void *ret; 3244 size_t usize; 3245 3246 usize = s2u(size); 3247 if (unlikely(usize == 0 || size > HUGE_MAXCLASS)) 3248 return (NULL); 3249 3250 if (likely(usize <= large_maxclass)) { 3251 size_t copysize; 3252 3253 /* Try to avoid moving the allocation. */ 3254 if (!arena_ralloc_no_move(tsd_tsdn(tsd), ptr, oldsize, usize, 0, 3255 zero)) 3256 return (ptr); 3257 3258 /* 3259 * size and oldsize are different enough that we need to move 3260 * the object. In that case, fall back to allocating new space 3261 * and copying. 3262 */ 3263 ret = arena_ralloc_move_helper(tsd_tsdn(tsd), arena, usize, 3264 alignment, zero, tcache); 3265 if (ret == NULL) 3266 return (NULL); 3267 3268 /* 3269 * Junk/zero-filling were already done by 3270 * ipalloc()/arena_malloc(). 3271 */ 3272 3273 copysize = (usize < oldsize) ? usize : oldsize; 3274 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); 3275 memcpy(ret, ptr, copysize); 3276 isqalloc(tsd, ptr, oldsize, tcache, true); 3277 } else { 3278 ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment, 3279 zero, tcache); 3280 } 3281 return (ret); 3282 } 3283 3284 dss_prec_t 3285 arena_dss_prec_get(tsdn_t *tsdn, arena_t *arena) 3286 { 3287 dss_prec_t ret; 3288 3289 malloc_mutex_lock(tsdn, &arena->lock); 3290 ret = arena->dss_prec; 3291 malloc_mutex_unlock(tsdn, &arena->lock); 3292 return (ret); 3293 } 3294 3295 bool 3296 arena_dss_prec_set(tsdn_t *tsdn, arena_t *arena, dss_prec_t dss_prec) 3297 { 3298 3299 if (!have_dss) 3300 return (dss_prec != dss_prec_disabled); 3301 malloc_mutex_lock(tsdn, &arena->lock); 3302 arena->dss_prec = dss_prec; 3303 malloc_mutex_unlock(tsdn, &arena->lock); 3304 return (false); 3305 } 3306 3307 ssize_t 3308 arena_lg_dirty_mult_default_get(void) 3309 { 3310 3311 return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default)); 3312 } 3313 3314 bool 3315 arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult) 3316 { 3317 3318 if (opt_purge != purge_mode_ratio) 3319 return (true); 3320 if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) 3321 return (true); 3322 atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult); 3323 return (false); 3324 } 3325 3326 ssize_t 3327 arena_decay_time_default_get(void) 3328 { 3329 3330 return ((ssize_t)atomic_read_z((size_t *)&decay_time_default)); 3331 } 3332 3333 bool 3334 arena_decay_time_default_set(ssize_t decay_time) 3335 { 3336 3337 if (opt_purge != purge_mode_decay) 3338 return (true); 3339 if (!arena_decay_time_valid(decay_time)) 3340 return (true); 3341 atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time); 3342 return (false); 3343 } 3344 3345 static void 3346 arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads, 3347 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, 3348 size_t *nactive, size_t *ndirty) 3349 { 3350 3351 *nthreads += arena_nthreads_get(arena, false); 3352 *dss = dss_prec_names[arena->dss_prec]; 3353 *lg_dirty_mult = arena->lg_dirty_mult; 3354 *decay_time = arena->decay.time; 3355 *nactive += arena->nactive; 3356 *ndirty += arena->ndirty; 3357 } 3358 3359 void 3360 arena_basic_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, 3361 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, 3362 size_t *nactive, size_t *ndirty) 3363 { 3364 3365 malloc_mutex_lock(tsdn, &arena->lock); 3366 arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult, 3367 decay_time, nactive, ndirty); 3368 malloc_mutex_unlock(tsdn, &arena->lock); 3369 } 3370 3371 void 3372 arena_stats_merge(tsdn_t *tsdn, arena_t *arena, unsigned *nthreads, 3373 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, 3374 size_t *nactive, size_t *ndirty, arena_stats_t *astats, 3375 malloc_bin_stats_t *bstats, malloc_large_stats_t *lstats, 3376 malloc_huge_stats_t *hstats) 3377 { 3378 unsigned i; 3379 3380 cassert(config_stats); 3381 3382 malloc_mutex_lock(tsdn, &arena->lock); 3383 arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult, 3384 decay_time, nactive, ndirty); 3385 3386 astats->mapped += arena->stats.mapped; 3387 astats->retained += arena->stats.retained; 3388 astats->npurge += arena->stats.npurge; 3389 astats->nmadvise += arena->stats.nmadvise; 3390 astats->purged += arena->stats.purged; 3391 astats->metadata_mapped += arena->stats.metadata_mapped; 3392 astats->metadata_allocated += arena_metadata_allocated_get(arena); 3393 astats->allocated_large += arena->stats.allocated_large; 3394 astats->nmalloc_large += arena->stats.nmalloc_large; 3395 astats->ndalloc_large += arena->stats.ndalloc_large; 3396 astats->nrequests_large += arena->stats.nrequests_large; 3397 astats->allocated_huge += arena->stats.allocated_huge; 3398 astats->nmalloc_huge += arena->stats.nmalloc_huge; 3399 astats->ndalloc_huge += arena->stats.ndalloc_huge; 3400 3401 for (i = 0; i < nlclasses; i++) { 3402 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc; 3403 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc; 3404 lstats[i].nrequests += arena->stats.lstats[i].nrequests; 3405 lstats[i].curruns += arena->stats.lstats[i].curruns; 3406 } 3407 3408 for (i = 0; i < nhclasses; i++) { 3409 hstats[i].nmalloc += arena->stats.hstats[i].nmalloc; 3410 hstats[i].ndalloc += arena->stats.hstats[i].ndalloc; 3411 hstats[i].curhchunks += arena->stats.hstats[i].curhchunks; 3412 } 3413 malloc_mutex_unlock(tsdn, &arena->lock); 3414 3415 for (i = 0; i < NBINS; i++) { 3416 arena_bin_t *bin = &arena->bins[i]; 3417 3418 malloc_mutex_lock(tsdn, &bin->lock); 3419 bstats[i].nmalloc += bin->stats.nmalloc; 3420 bstats[i].ndalloc += bin->stats.ndalloc; 3421 bstats[i].nrequests += bin->stats.nrequests; 3422 bstats[i].curregs += bin->stats.curregs; 3423 if (config_tcache) { 3424 bstats[i].nfills += bin->stats.nfills; 3425 bstats[i].nflushes += bin->stats.nflushes; 3426 } 3427 bstats[i].nruns += bin->stats.nruns; 3428 bstats[i].reruns += bin->stats.reruns; 3429 bstats[i].curruns += bin->stats.curruns; 3430 malloc_mutex_unlock(tsdn, &bin->lock); 3431 } 3432 } 3433 3434 unsigned 3435 arena_nthreads_get(arena_t *arena, bool internal) 3436 { 3437 3438 return (atomic_read_u(&arena->nthreads[internal])); 3439 } 3440 3441 void 3442 arena_nthreads_inc(arena_t *arena, bool internal) 3443 { 3444 3445 atomic_add_u(&arena->nthreads[internal], 1); 3446 } 3447 3448 void 3449 arena_nthreads_dec(arena_t *arena, bool internal) 3450 { 3451 3452 atomic_sub_u(&arena->nthreads[internal], 1); 3453 } 3454 3455 arena_t * 3456 arena_new(tsdn_t *tsdn, unsigned ind) 3457 { 3458 arena_t *arena; 3459 unsigned i; 3460 3461 /* 3462 * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly 3463 * because there is no way to clean up if base_alloc() OOMs. 3464 */ 3465 if (config_stats) { 3466 arena = (arena_t *)base_alloc(tsdn, 3467 CACHELINE_CEILING(sizeof(arena_t)) + 3468 QUANTUM_CEILING((nlclasses * sizeof(malloc_large_stats_t))) 3469 + (nhclasses * sizeof(malloc_huge_stats_t))); 3470 } else 3471 arena = (arena_t *)base_alloc(tsdn, sizeof(arena_t)); 3472 if (arena == NULL) 3473 return (NULL); 3474 3475 arena->ind = ind; 3476 arena->nthreads[0] = arena->nthreads[1] = 0; 3477 if (malloc_mutex_init(&arena->lock, "arena", WITNESS_RANK_ARENA)) 3478 return (NULL); 3479 3480 if (config_stats) { 3481 memset(&arena->stats, 0, sizeof(arena_stats_t)); 3482 arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena 3483 + CACHELINE_CEILING(sizeof(arena_t))); 3484 memset(arena->stats.lstats, 0, nlclasses * 3485 sizeof(malloc_large_stats_t)); 3486 arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena 3487 + CACHELINE_CEILING(sizeof(arena_t)) + 3488 QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t))); 3489 memset(arena->stats.hstats, 0, nhclasses * 3490 sizeof(malloc_huge_stats_t)); 3491 if (config_tcache) 3492 ql_new(&arena->tcache_ql); 3493 } 3494 3495 if (config_prof) 3496 arena->prof_accumbytes = 0; 3497 3498 if (config_cache_oblivious) { 3499 /* 3500 * A nondeterministic seed based on the address of arena reduces 3501 * the likelihood of lockstep non-uniform cache index 3502 * utilization among identical concurrent processes, but at the 3503 * cost of test repeatability. For debug builds, instead use a 3504 * deterministic seed. 3505 */ 3506 arena->offset_state = config_debug ? ind : 3507 (size_t)(uintptr_t)arena; 3508 } 3509 3510 arena->dss_prec = chunk_dss_prec_get(); 3511 3512 ql_new(&arena->achunks); 3513 3514 arena->spare = NULL; 3515 3516 arena->lg_dirty_mult = arena_lg_dirty_mult_default_get(); 3517 arena->purging = false; 3518 arena->nactive = 0; 3519 arena->ndirty = 0; 3520 3521 for (i = 0; i < NPSIZES; i++) 3522 arena_run_heap_new(&arena->runs_avail[i]); 3523 3524 qr_new(&arena->runs_dirty, rd_link); 3525 qr_new(&arena->chunks_cache, cc_link); 3526 3527 if (opt_purge == purge_mode_decay) 3528 arena_decay_init(arena, arena_decay_time_default_get()); 3529 3530 ql_new(&arena->huge); 3531 if (malloc_mutex_init(&arena->huge_mtx, "arena_huge", 3532 WITNESS_RANK_ARENA_HUGE)) 3533 return (NULL); 3534 3535 extent_tree_szad_new(&arena->chunks_szad_cached); 3536 extent_tree_ad_new(&arena->chunks_ad_cached); 3537 extent_tree_szad_new(&arena->chunks_szad_retained); 3538 extent_tree_ad_new(&arena->chunks_ad_retained); 3539 if (malloc_mutex_init(&arena->chunks_mtx, "arena_chunks", 3540 WITNESS_RANK_ARENA_CHUNKS)) 3541 return (NULL); 3542 ql_new(&arena->node_cache); 3543 if (malloc_mutex_init(&arena->node_cache_mtx, "arena_node_cache", 3544 WITNESS_RANK_ARENA_NODE_CACHE)) 3545 return (NULL); 3546 3547 arena->chunk_hooks = chunk_hooks_default; 3548 3549 /* Initialize bins. */ 3550 for (i = 0; i < NBINS; i++) { 3551 arena_bin_t *bin = &arena->bins[i]; 3552 if (malloc_mutex_init(&bin->lock, "arena_bin", 3553 WITNESS_RANK_ARENA_BIN)) 3554 return (NULL); 3555 bin->runcur = NULL; 3556 arena_run_heap_new(&bin->runs); 3557 if (config_stats) 3558 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); 3559 } 3560 3561 return (arena); 3562 } 3563 3564 /* 3565 * Calculate bin_info->run_size such that it meets the following constraints: 3566 * 3567 * *) bin_info->run_size <= arena_maxrun 3568 * *) bin_info->nregs <= RUN_MAXREGS 3569 * 3570 * bin_info->nregs and bin_info->reg0_offset are also calculated here, since 3571 * these settings are all interdependent. 3572 */ 3573 static void 3574 bin_info_run_size_calc(arena_bin_info_t *bin_info) 3575 { 3576 size_t pad_size; 3577 size_t try_run_size, perfect_run_size, actual_run_size; 3578 uint32_t try_nregs, perfect_nregs, actual_nregs; 3579 3580 /* 3581 * Determine redzone size based on minimum alignment and minimum 3582 * redzone size. Add padding to the end of the run if it is needed to 3583 * align the regions. The padding allows each redzone to be half the 3584 * minimum alignment; without the padding, each redzone would have to 3585 * be twice as large in order to maintain alignment. 3586 */ 3587 if (config_fill && unlikely(opt_redzone)) { 3588 size_t align_min = ZU(1) << (ffs_zu(bin_info->reg_size) - 1); 3589 if (align_min <= REDZONE_MINSIZE) { 3590 bin_info->redzone_size = REDZONE_MINSIZE; 3591 pad_size = 0; 3592 } else { 3593 bin_info->redzone_size = align_min >> 1; 3594 pad_size = bin_info->redzone_size; 3595 } 3596 } else { 3597 bin_info->redzone_size = 0; 3598 pad_size = 0; 3599 } 3600 bin_info->reg_interval = bin_info->reg_size + 3601 (bin_info->redzone_size << 1); 3602 3603 /* 3604 * Compute run size under ideal conditions (no redzones, no limit on run 3605 * size). 3606 */ 3607 try_run_size = PAGE; 3608 try_nregs = (uint32_t)(try_run_size / bin_info->reg_size); 3609 do { 3610 perfect_run_size = try_run_size; 3611 perfect_nregs = try_nregs; 3612 3613 try_run_size += PAGE; 3614 try_nregs = (uint32_t)(try_run_size / bin_info->reg_size); 3615 } while (perfect_run_size != perfect_nregs * bin_info->reg_size); 3616 assert(perfect_nregs <= RUN_MAXREGS); 3617 3618 actual_run_size = perfect_run_size; 3619 actual_nregs = (uint32_t)((actual_run_size - pad_size) / 3620 bin_info->reg_interval); 3621 3622 /* 3623 * Redzones can require enough padding that not even a single region can 3624 * fit within the number of pages that would normally be dedicated to a 3625 * run for this size class. Increase the run size until at least one 3626 * region fits. 3627 */ 3628 while (actual_nregs == 0) { 3629 assert(config_fill && unlikely(opt_redzone)); 3630 3631 actual_run_size += PAGE; 3632 actual_nregs = (uint32_t)((actual_run_size - pad_size) / 3633 bin_info->reg_interval); 3634 } 3635 3636 /* 3637 * Make sure that the run will fit within an arena chunk. 3638 */ 3639 while (actual_run_size > arena_maxrun) { 3640 actual_run_size -= PAGE; 3641 actual_nregs = (uint32_t)((actual_run_size - pad_size) / 3642 bin_info->reg_interval); 3643 } 3644 assert(actual_nregs > 0); 3645 assert(actual_run_size == s2u(actual_run_size)); 3646 3647 /* Copy final settings. */ 3648 bin_info->run_size = actual_run_size; 3649 bin_info->nregs = actual_nregs; 3650 bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs * 3651 bin_info->reg_interval) - pad_size + bin_info->redzone_size); 3652 3653 assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs 3654 * bin_info->reg_interval) + pad_size == bin_info->run_size); 3655 } 3656 3657 static void 3658 bin_info_init(void) 3659 { 3660 arena_bin_info_t *bin_info; 3661 3662 #define BIN_INFO_INIT_bin_yes(index, size) \ 3663 bin_info = &arena_bin_info[index]; \ 3664 bin_info->reg_size = size; \ 3665 bin_info_run_size_calc(bin_info); \ 3666 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); 3667 #define BIN_INFO_INIT_bin_no(index, size) 3668 #define SC(index, lg_grp, lg_delta, ndelta, psz, bin, lg_delta_lookup) \ 3669 BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)) 3670 SIZE_CLASSES 3671 #undef BIN_INFO_INIT_bin_yes 3672 #undef BIN_INFO_INIT_bin_no 3673 #undef SC 3674 } 3675 3676 void 3677 arena_boot(void) 3678 { 3679 unsigned i; 3680 3681 arena_lg_dirty_mult_default_set(opt_lg_dirty_mult); 3682 arena_decay_time_default_set(opt_decay_time); 3683 3684 /* 3685 * Compute the header size such that it is large enough to contain the 3686 * page map. The page map is biased to omit entries for the header 3687 * itself, so some iteration is necessary to compute the map bias. 3688 * 3689 * 1) Compute safe header_size and map_bias values that include enough 3690 * space for an unbiased page map. 3691 * 2) Refine map_bias based on (1) to omit the header pages in the page 3692 * map. The resulting map_bias may be one too small. 3693 * 3) Refine map_bias based on (2). The result will be >= the result 3694 * from (2), and will always be correct. 3695 */ 3696 map_bias = 0; 3697 for (i = 0; i < 3; i++) { 3698 size_t header_size = offsetof(arena_chunk_t, map_bits) + 3699 ((sizeof(arena_chunk_map_bits_t) + 3700 sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias)); 3701 map_bias = (header_size + PAGE_MASK) >> LG_PAGE; 3702 } 3703 assert(map_bias > 0); 3704 3705 map_misc_offset = offsetof(arena_chunk_t, map_bits) + 3706 sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias); 3707 3708 arena_maxrun = chunksize - (map_bias << LG_PAGE); 3709 assert(arena_maxrun > 0); 3710 large_maxclass = index2size(size2index(chunksize)-1); 3711 if (large_maxclass > arena_maxrun) { 3712 /* 3713 * For small chunk sizes it's possible for there to be fewer 3714 * non-header pages available than are necessary to serve the 3715 * size classes just below chunksize. 3716 */ 3717 large_maxclass = arena_maxrun; 3718 } 3719 assert(large_maxclass > 0); 3720 nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS); 3721 nhclasses = NSIZES - nlclasses - NBINS; 3722 3723 bin_info_init(); 3724 } 3725 3726 void 3727 arena_prefork0(tsdn_t *tsdn, arena_t *arena) 3728 { 3729 3730 malloc_mutex_prefork(tsdn, &arena->lock); 3731 } 3732 3733 void 3734 arena_prefork1(tsdn_t *tsdn, arena_t *arena) 3735 { 3736 3737 malloc_mutex_prefork(tsdn, &arena->chunks_mtx); 3738 } 3739 3740 void 3741 arena_prefork2(tsdn_t *tsdn, arena_t *arena) 3742 { 3743 3744 malloc_mutex_prefork(tsdn, &arena->node_cache_mtx); 3745 } 3746 3747 void 3748 arena_prefork3(tsdn_t *tsdn, arena_t *arena) 3749 { 3750 unsigned i; 3751 3752 for (i = 0; i < NBINS; i++) 3753 malloc_mutex_prefork(tsdn, &arena->bins[i].lock); 3754 malloc_mutex_prefork(tsdn, &arena->huge_mtx); 3755 } 3756 3757 void 3758 arena_postfork_parent(tsdn_t *tsdn, arena_t *arena) 3759 { 3760 unsigned i; 3761 3762 malloc_mutex_postfork_parent(tsdn, &arena->huge_mtx); 3763 for (i = 0; i < NBINS; i++) 3764 malloc_mutex_postfork_parent(tsdn, &arena->bins[i].lock); 3765 malloc_mutex_postfork_parent(tsdn, &arena->node_cache_mtx); 3766 malloc_mutex_postfork_parent(tsdn, &arena->chunks_mtx); 3767 malloc_mutex_postfork_parent(tsdn, &arena->lock); 3768 } 3769 3770 void 3771 arena_postfork_child(tsdn_t *tsdn, arena_t *arena) 3772 { 3773 unsigned i; 3774 3775 malloc_mutex_postfork_child(tsdn, &arena->huge_mtx); 3776 for (i = 0; i < NBINS; i++) 3777 malloc_mutex_postfork_child(tsdn, &arena->bins[i].lock); 3778 malloc_mutex_postfork_child(tsdn, &arena->node_cache_mtx); 3779 malloc_mutex_postfork_child(tsdn, &arena->chunks_mtx); 3780 malloc_mutex_postfork_child(tsdn, &arena->lock); 3781 } 3782