1 #define JEMALLOC_ARENA_C_ 2 #include "jemalloc/internal/jemalloc_internal.h" 3 4 /******************************************************************************/ 5 /* Data. */ 6 7 purge_mode_t opt_purge = PURGE_DEFAULT; 8 const char *purge_mode_names[] = { 9 "ratio", 10 "decay", 11 "N/A" 12 }; 13 ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; 14 static ssize_t lg_dirty_mult_default; 15 ssize_t opt_decay_time = DECAY_TIME_DEFAULT; 16 static ssize_t decay_time_default; 17 18 arena_bin_info_t arena_bin_info[NBINS]; 19 20 size_t map_bias; 21 size_t map_misc_offset; 22 size_t arena_maxrun; /* Max run size for arenas. */ 23 size_t large_maxclass; /* Max large size class. */ 24 size_t run_quantize_max; /* Max run_quantize_*() input. */ 25 static size_t small_maxrun; /* Max run size for small size classes. */ 26 static bool *small_run_tab; /* Valid small run page multiples. */ 27 static size_t *run_quantize_floor_tab; /* run_quantize_floor() memoization. */ 28 static size_t *run_quantize_ceil_tab; /* run_quantize_ceil() memoization. */ 29 unsigned nlclasses; /* Number of large size classes. */ 30 unsigned nhclasses; /* Number of huge size classes. */ 31 static szind_t runs_avail_bias; /* Size index for first runs_avail tree. */ 32 static szind_t runs_avail_nclasses; /* Number of runs_avail trees. */ 33 34 /******************************************************************************/ 35 /* 36 * Function prototypes for static functions that are referenced prior to 37 * definition. 38 */ 39 40 static void arena_purge_to_limit(arena_t *arena, size_t ndirty_limit); 41 static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, 42 bool cleaned, bool decommitted); 43 static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, 44 arena_run_t *run, arena_bin_t *bin); 45 static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, 46 arena_run_t *run, arena_bin_t *bin); 47 48 /******************************************************************************/ 49 50 JEMALLOC_INLINE_C size_t 51 arena_miscelm_size_get(const arena_chunk_map_misc_t *miscelm) 52 { 53 arena_chunk_t *chunk; 54 size_t pageind, mapbits; 55 56 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(miscelm); 57 pageind = arena_miscelm_to_pageind(miscelm); 58 mapbits = arena_mapbits_get(chunk, pageind); 59 return (arena_mapbits_size_decode(mapbits)); 60 } 61 62 JEMALLOC_INLINE_C int 63 arena_run_addr_comp(const arena_chunk_map_misc_t *a, 64 const arena_chunk_map_misc_t *b) 65 { 66 uintptr_t a_miscelm = (uintptr_t)a; 67 uintptr_t b_miscelm = (uintptr_t)b; 68 69 assert(a != NULL); 70 assert(b != NULL); 71 72 return ((a_miscelm > b_miscelm) - (a_miscelm < b_miscelm)); 73 } 74 75 /* Generate red-black tree functions. */ 76 rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_misc_t, 77 rb_link, arena_run_addr_comp) 78 79 static size_t 80 run_quantize_floor_compute(size_t size) 81 { 82 size_t qsize; 83 84 assert(size != 0); 85 assert(size == PAGE_CEILING(size)); 86 87 /* Don't change sizes that are valid small run sizes. */ 88 if (size <= small_maxrun && small_run_tab[size >> LG_PAGE]) 89 return (size); 90 91 /* 92 * Round down to the nearest run size that can actually be requested 93 * during normal large allocation. Add large_pad so that cache index 94 * randomization can offset the allocation from the page boundary. 95 */ 96 qsize = index2size(size2index(size - large_pad + 1) - 1) + large_pad; 97 if (qsize <= SMALL_MAXCLASS + large_pad) 98 return (run_quantize_floor_compute(size - large_pad)); 99 assert(qsize <= size); 100 return (qsize); 101 } 102 103 static size_t 104 run_quantize_ceil_compute_hard(size_t size) 105 { 106 size_t large_run_size_next; 107 108 assert(size != 0); 109 assert(size == PAGE_CEILING(size)); 110 111 /* 112 * Return the next quantized size greater than the input size. 113 * Quantized sizes comprise the union of run sizes that back small 114 * region runs, and run sizes that back large regions with no explicit 115 * alignment constraints. 116 */ 117 118 if (size > SMALL_MAXCLASS) { 119 large_run_size_next = PAGE_CEILING(index2size(size2index(size - 120 large_pad) + 1) + large_pad); 121 } else 122 large_run_size_next = SIZE_T_MAX; 123 if (size >= small_maxrun) 124 return (large_run_size_next); 125 126 while (true) { 127 size += PAGE; 128 assert(size <= small_maxrun); 129 if (small_run_tab[size >> LG_PAGE]) { 130 if (large_run_size_next < size) 131 return (large_run_size_next); 132 return (size); 133 } 134 } 135 } 136 137 static size_t 138 run_quantize_ceil_compute(size_t size) 139 { 140 size_t qsize = run_quantize_floor_compute(size); 141 142 if (qsize < size) { 143 /* 144 * Skip a quantization that may have an adequately large run, 145 * because under-sized runs may be mixed in. This only happens 146 * when an unusual size is requested, i.e. for aligned 147 * allocation, and is just one of several places where linear 148 * search would potentially find sufficiently aligned available 149 * memory somewhere lower. 150 */ 151 qsize = run_quantize_ceil_compute_hard(qsize); 152 } 153 return (qsize); 154 } 155 156 #ifdef JEMALLOC_JET 157 #undef run_quantize_floor 158 #define run_quantize_floor JEMALLOC_N(run_quantize_floor_impl) 159 #endif 160 static size_t 161 run_quantize_floor(size_t size) 162 { 163 size_t ret; 164 165 assert(size > 0); 166 assert(size <= run_quantize_max); 167 assert((size & PAGE_MASK) == 0); 168 169 ret = run_quantize_floor_tab[(size >> LG_PAGE) - 1]; 170 assert(ret == run_quantize_floor_compute(size)); 171 return (ret); 172 } 173 #ifdef JEMALLOC_JET 174 #undef run_quantize_floor 175 #define run_quantize_floor JEMALLOC_N(run_quantize_floor) 176 run_quantize_t *run_quantize_floor = JEMALLOC_N(run_quantize_floor_impl); 177 #endif 178 179 #ifdef JEMALLOC_JET 180 #undef run_quantize_ceil 181 #define run_quantize_ceil JEMALLOC_N(run_quantize_ceil_impl) 182 #endif 183 static size_t 184 run_quantize_ceil(size_t size) 185 { 186 size_t ret; 187 188 assert(size > 0); 189 assert(size <= run_quantize_max); 190 assert((size & PAGE_MASK) == 0); 191 192 ret = run_quantize_ceil_tab[(size >> LG_PAGE) - 1]; 193 assert(ret == run_quantize_ceil_compute(size)); 194 return (ret); 195 } 196 #ifdef JEMALLOC_JET 197 #undef run_quantize_ceil 198 #define run_quantize_ceil JEMALLOC_N(run_quantize_ceil) 199 run_quantize_t *run_quantize_ceil = JEMALLOC_N(run_quantize_ceil_impl); 200 #endif 201 202 static arena_run_tree_t * 203 arena_runs_avail_get(arena_t *arena, szind_t ind) 204 { 205 206 assert(ind >= runs_avail_bias); 207 assert(ind - runs_avail_bias < runs_avail_nclasses); 208 209 return (&arena->runs_avail[ind - runs_avail_bias]); 210 } 211 212 static void 213 arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 214 size_t npages) 215 { 216 szind_t ind = size2index(run_quantize_floor(arena_miscelm_size_get( 217 arena_miscelm_get(chunk, pageind)))); 218 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 219 LG_PAGE)); 220 arena_run_tree_insert(arena_runs_avail_get(arena, ind), 221 arena_miscelm_get(chunk, pageind)); 222 } 223 224 static void 225 arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 226 size_t npages) 227 { 228 szind_t ind = size2index(run_quantize_floor(arena_miscelm_size_get( 229 arena_miscelm_get(chunk, pageind)))); 230 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 231 LG_PAGE)); 232 arena_run_tree_remove(arena_runs_avail_get(arena, ind), 233 arena_miscelm_get(chunk, pageind)); 234 } 235 236 static void 237 arena_run_dirty_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 238 size_t npages) 239 { 240 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); 241 242 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 243 LG_PAGE)); 244 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); 245 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == 246 CHUNK_MAP_DIRTY); 247 248 qr_new(&miscelm->rd, rd_link); 249 qr_meld(&arena->runs_dirty, &miscelm->rd, rd_link); 250 arena->ndirty += npages; 251 } 252 253 static void 254 arena_run_dirty_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 255 size_t npages) 256 { 257 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); 258 259 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 260 LG_PAGE)); 261 assert(arena_mapbits_dirty_get(chunk, pageind) == CHUNK_MAP_DIRTY); 262 assert(arena_mapbits_dirty_get(chunk, pageind+npages-1) == 263 CHUNK_MAP_DIRTY); 264 265 qr_remove(&miscelm->rd, rd_link); 266 assert(arena->ndirty >= npages); 267 arena->ndirty -= npages; 268 } 269 270 static size_t 271 arena_chunk_dirty_npages(const extent_node_t *node) 272 { 273 274 return (extent_node_size_get(node) >> LG_PAGE); 275 } 276 277 void 278 arena_chunk_cache_maybe_insert(arena_t *arena, extent_node_t *node, bool cache) 279 { 280 281 if (cache) { 282 extent_node_dirty_linkage_init(node); 283 extent_node_dirty_insert(node, &arena->runs_dirty, 284 &arena->chunks_cache); 285 arena->ndirty += arena_chunk_dirty_npages(node); 286 } 287 } 288 289 void 290 arena_chunk_cache_maybe_remove(arena_t *arena, extent_node_t *node, bool dirty) 291 { 292 293 if (dirty) { 294 extent_node_dirty_remove(node); 295 assert(arena->ndirty >= arena_chunk_dirty_npages(node)); 296 arena->ndirty -= arena_chunk_dirty_npages(node); 297 } 298 } 299 300 JEMALLOC_INLINE_C void * 301 arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info) 302 { 303 void *ret; 304 size_t regind; 305 arena_chunk_map_misc_t *miscelm; 306 void *rpages; 307 308 assert(run->nfree > 0); 309 assert(!bitmap_full(run->bitmap, &bin_info->bitmap_info)); 310 311 regind = (unsigned)bitmap_sfu(run->bitmap, &bin_info->bitmap_info); 312 miscelm = arena_run_to_miscelm(run); 313 rpages = arena_miscelm_to_rpages(miscelm); 314 ret = (void *)((uintptr_t)rpages + (uintptr_t)bin_info->reg0_offset + 315 (uintptr_t)(bin_info->reg_interval * regind)); 316 run->nfree--; 317 return (ret); 318 } 319 320 JEMALLOC_INLINE_C void 321 arena_run_reg_dalloc(arena_run_t *run, void *ptr) 322 { 323 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 324 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 325 size_t mapbits = arena_mapbits_get(chunk, pageind); 326 szind_t binind = arena_ptr_small_binind_get(ptr, mapbits); 327 arena_bin_info_t *bin_info = &arena_bin_info[binind]; 328 size_t regind = arena_run_regind(run, bin_info, ptr); 329 330 assert(run->nfree < bin_info->nregs); 331 /* Freeing an interior pointer can cause assertion failure. */ 332 assert(((uintptr_t)ptr - 333 ((uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + 334 (uintptr_t)bin_info->reg0_offset)) % 335 (uintptr_t)bin_info->reg_interval == 0); 336 assert((uintptr_t)ptr >= 337 (uintptr_t)arena_miscelm_to_rpages(arena_run_to_miscelm(run)) + 338 (uintptr_t)bin_info->reg0_offset); 339 /* Freeing an unallocated pointer can cause assertion failure. */ 340 assert(bitmap_get(run->bitmap, &bin_info->bitmap_info, regind)); 341 342 bitmap_unset(run->bitmap, &bin_info->bitmap_info, regind); 343 run->nfree++; 344 } 345 346 JEMALLOC_INLINE_C void 347 arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages) 348 { 349 350 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + 351 (run_ind << LG_PAGE)), (npages << LG_PAGE)); 352 memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0, 353 (npages << LG_PAGE)); 354 } 355 356 JEMALLOC_INLINE_C void 357 arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind) 358 { 359 360 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind 361 << LG_PAGE)), PAGE); 362 } 363 364 JEMALLOC_INLINE_C void 365 arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) 366 { 367 size_t i; 368 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE)); 369 370 arena_run_page_mark_zeroed(chunk, run_ind); 371 for (i = 0; i < PAGE / sizeof(size_t); i++) 372 assert(p[i] == 0); 373 } 374 375 static void 376 arena_nactive_add(arena_t *arena, size_t add_pages) 377 { 378 379 if (config_stats) { 380 size_t cactive_add = CHUNK_CEILING((arena->nactive + 381 add_pages) << LG_PAGE) - CHUNK_CEILING(arena->nactive << 382 LG_PAGE); 383 if (cactive_add != 0) 384 stats_cactive_add(cactive_add); 385 } 386 arena->nactive += add_pages; 387 } 388 389 static void 390 arena_nactive_sub(arena_t *arena, size_t sub_pages) 391 { 392 393 if (config_stats) { 394 size_t cactive_sub = CHUNK_CEILING(arena->nactive << LG_PAGE) - 395 CHUNK_CEILING((arena->nactive - sub_pages) << LG_PAGE); 396 if (cactive_sub != 0) 397 stats_cactive_sub(cactive_sub); 398 } 399 arena->nactive -= sub_pages; 400 } 401 402 static void 403 arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind, 404 size_t flag_dirty, size_t flag_decommitted, size_t need_pages) 405 { 406 size_t total_pages, rem_pages; 407 408 assert(flag_dirty == 0 || flag_decommitted == 0); 409 410 total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >> 411 LG_PAGE; 412 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) == 413 flag_dirty); 414 assert(need_pages <= total_pages); 415 rem_pages = total_pages - need_pages; 416 417 arena_avail_remove(arena, chunk, run_ind, total_pages); 418 if (flag_dirty != 0) 419 arena_run_dirty_remove(arena, chunk, run_ind, total_pages); 420 arena_nactive_add(arena, need_pages); 421 422 /* Keep track of trailing unused pages for later use. */ 423 if (rem_pages > 0) { 424 size_t flags = flag_dirty | flag_decommitted; 425 size_t flag_unzeroed_mask = (flags == 0) ? CHUNK_MAP_UNZEROED : 426 0; 427 428 arena_mapbits_unallocated_set(chunk, run_ind+need_pages, 429 (rem_pages << LG_PAGE), flags | 430 (arena_mapbits_unzeroed_get(chunk, run_ind+need_pages) & 431 flag_unzeroed_mask)); 432 arena_mapbits_unallocated_set(chunk, run_ind+total_pages-1, 433 (rem_pages << LG_PAGE), flags | 434 (arena_mapbits_unzeroed_get(chunk, run_ind+total_pages-1) & 435 flag_unzeroed_mask)); 436 if (flag_dirty != 0) { 437 arena_run_dirty_insert(arena, chunk, run_ind+need_pages, 438 rem_pages); 439 } 440 arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages); 441 } 442 } 443 444 static bool 445 arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size, 446 bool remove, bool zero) 447 { 448 arena_chunk_t *chunk; 449 arena_chunk_map_misc_t *miscelm; 450 size_t flag_dirty, flag_decommitted, run_ind, need_pages; 451 size_t flag_unzeroed_mask; 452 453 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 454 miscelm = arena_run_to_miscelm(run); 455 run_ind = arena_miscelm_to_pageind(miscelm); 456 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); 457 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind); 458 need_pages = (size >> LG_PAGE); 459 assert(need_pages > 0); 460 461 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize, 462 run_ind << LG_PAGE, size, arena->ind)) 463 return (true); 464 465 if (remove) { 466 arena_run_split_remove(arena, chunk, run_ind, flag_dirty, 467 flag_decommitted, need_pages); 468 } 469 470 if (zero) { 471 if (flag_decommitted != 0) { 472 /* The run is untouched, and therefore zeroed. */ 473 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void 474 *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 475 (need_pages << LG_PAGE)); 476 } else if (flag_dirty != 0) { 477 /* The run is dirty, so all pages must be zeroed. */ 478 arena_run_zero(chunk, run_ind, need_pages); 479 } else { 480 /* 481 * The run is clean, so some pages may be zeroed (i.e. 482 * never before touched). 483 */ 484 size_t i; 485 for (i = 0; i < need_pages; i++) { 486 if (arena_mapbits_unzeroed_get(chunk, run_ind+i) 487 != 0) 488 arena_run_zero(chunk, run_ind+i, 1); 489 else if (config_debug) { 490 arena_run_page_validate_zeroed(chunk, 491 run_ind+i); 492 } else { 493 arena_run_page_mark_zeroed(chunk, 494 run_ind+i); 495 } 496 } 497 } 498 } else { 499 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + 500 (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); 501 } 502 503 /* 504 * Set the last element first, in case the run only contains one page 505 * (i.e. both statements set the same element). 506 */ 507 flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? 508 CHUNK_MAP_UNZEROED : 0; 509 arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty | 510 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, 511 run_ind+need_pages-1))); 512 arena_mapbits_large_set(chunk, run_ind, size, flag_dirty | 513 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, run_ind))); 514 return (false); 515 } 516 517 static bool 518 arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) 519 { 520 521 return (arena_run_split_large_helper(arena, run, size, true, zero)); 522 } 523 524 static bool 525 arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) 526 { 527 528 return (arena_run_split_large_helper(arena, run, size, false, zero)); 529 } 530 531 static bool 532 arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size, 533 szind_t binind) 534 { 535 arena_chunk_t *chunk; 536 arena_chunk_map_misc_t *miscelm; 537 size_t flag_dirty, flag_decommitted, run_ind, need_pages, i; 538 539 assert(binind != BININD_INVALID); 540 541 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 542 miscelm = arena_run_to_miscelm(run); 543 run_ind = arena_miscelm_to_pageind(miscelm); 544 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); 545 flag_decommitted = arena_mapbits_decommitted_get(chunk, run_ind); 546 need_pages = (size >> LG_PAGE); 547 assert(need_pages > 0); 548 549 if (flag_decommitted != 0 && arena->chunk_hooks.commit(chunk, chunksize, 550 run_ind << LG_PAGE, size, arena->ind)) 551 return (true); 552 553 arena_run_split_remove(arena, chunk, run_ind, flag_dirty, 554 flag_decommitted, need_pages); 555 556 for (i = 0; i < need_pages; i++) { 557 size_t flag_unzeroed = arena_mapbits_unzeroed_get(chunk, 558 run_ind+i); 559 arena_mapbits_small_set(chunk, run_ind+i, i, binind, 560 flag_unzeroed); 561 if (config_debug && flag_dirty == 0 && flag_unzeroed == 0) 562 arena_run_page_validate_zeroed(chunk, run_ind+i); 563 } 564 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + 565 (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); 566 return (false); 567 } 568 569 static arena_chunk_t * 570 arena_chunk_init_spare(arena_t *arena) 571 { 572 arena_chunk_t *chunk; 573 574 assert(arena->spare != NULL); 575 576 chunk = arena->spare; 577 arena->spare = NULL; 578 579 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); 580 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); 581 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == 582 arena_maxrun); 583 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == 584 arena_maxrun); 585 assert(arena_mapbits_dirty_get(chunk, map_bias) == 586 arena_mapbits_dirty_get(chunk, chunk_npages-1)); 587 588 return (chunk); 589 } 590 591 static bool 592 arena_chunk_register(arena_t *arena, arena_chunk_t *chunk, bool zero) 593 { 594 595 /* 596 * The extent node notion of "committed" doesn't directly apply to 597 * arena chunks. Arbitrarily mark them as committed. The commit state 598 * of runs is tracked individually, and upon chunk deallocation the 599 * entire chunk is in a consistent commit state. 600 */ 601 extent_node_init(&chunk->node, arena, chunk, chunksize, zero, true); 602 extent_node_achunk_set(&chunk->node, true); 603 return (chunk_register(chunk, &chunk->node)); 604 } 605 606 static arena_chunk_t * 607 arena_chunk_alloc_internal_hard(arena_t *arena, chunk_hooks_t *chunk_hooks, 608 bool *zero, bool *commit) 609 { 610 arena_chunk_t *chunk; 611 612 malloc_mutex_unlock(&arena->lock); 613 614 chunk = (arena_chunk_t *)chunk_alloc_wrapper(arena, chunk_hooks, NULL, 615 chunksize, chunksize, zero, commit); 616 if (chunk != NULL && !*commit) { 617 /* Commit header. */ 618 if (chunk_hooks->commit(chunk, chunksize, 0, map_bias << 619 LG_PAGE, arena->ind)) { 620 chunk_dalloc_wrapper(arena, chunk_hooks, 621 (void *)chunk, chunksize, *commit); 622 chunk = NULL; 623 } 624 } 625 if (chunk != NULL && arena_chunk_register(arena, chunk, *zero)) { 626 if (!*commit) { 627 /* Undo commit of header. */ 628 chunk_hooks->decommit(chunk, chunksize, 0, map_bias << 629 LG_PAGE, arena->ind); 630 } 631 chunk_dalloc_wrapper(arena, chunk_hooks, (void *)chunk, 632 chunksize, *commit); 633 chunk = NULL; 634 } 635 636 malloc_mutex_lock(&arena->lock); 637 return (chunk); 638 } 639 640 static arena_chunk_t * 641 arena_chunk_alloc_internal(arena_t *arena, bool *zero, bool *commit) 642 { 643 arena_chunk_t *chunk; 644 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; 645 646 chunk = chunk_alloc_cache(arena, &chunk_hooks, NULL, chunksize, 647 chunksize, zero, true); 648 if (chunk != NULL) { 649 if (arena_chunk_register(arena, chunk, *zero)) { 650 chunk_dalloc_cache(arena, &chunk_hooks, chunk, 651 chunksize, true); 652 return (NULL); 653 } 654 *commit = true; 655 } 656 if (chunk == NULL) { 657 chunk = arena_chunk_alloc_internal_hard(arena, &chunk_hooks, 658 zero, commit); 659 } 660 661 if (config_stats && chunk != NULL) { 662 arena->stats.mapped += chunksize; 663 arena->stats.metadata_mapped += (map_bias << LG_PAGE); 664 } 665 666 return (chunk); 667 } 668 669 static arena_chunk_t * 670 arena_chunk_init_hard(arena_t *arena) 671 { 672 arena_chunk_t *chunk; 673 bool zero, commit; 674 size_t flag_unzeroed, flag_decommitted, i; 675 676 assert(arena->spare == NULL); 677 678 zero = false; 679 commit = false; 680 chunk = arena_chunk_alloc_internal(arena, &zero, &commit); 681 if (chunk == NULL) 682 return (NULL); 683 684 /* 685 * Initialize the map to contain one maximal free untouched run. Mark 686 * the pages as zeroed if chunk_alloc() returned a zeroed or decommitted 687 * chunk. 688 */ 689 flag_unzeroed = (zero || !commit) ? 0 : CHUNK_MAP_UNZEROED; 690 flag_decommitted = commit ? 0 : CHUNK_MAP_DECOMMITTED; 691 arena_mapbits_unallocated_set(chunk, map_bias, arena_maxrun, 692 flag_unzeroed | flag_decommitted); 693 /* 694 * There is no need to initialize the internal page map entries unless 695 * the chunk is not zeroed. 696 */ 697 if (!zero) { 698 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED( 699 (void *)arena_bitselm_get(chunk, map_bias+1), 700 (size_t)((uintptr_t) arena_bitselm_get(chunk, 701 chunk_npages-1) - (uintptr_t)arena_bitselm_get(chunk, 702 map_bias+1))); 703 for (i = map_bias+1; i < chunk_npages-1; i++) 704 arena_mapbits_internal_set(chunk, i, flag_unzeroed); 705 } else { 706 JEMALLOC_VALGRIND_MAKE_MEM_DEFINED((void 707 *)arena_bitselm_get(chunk, map_bias+1), (size_t)((uintptr_t) 708 arena_bitselm_get(chunk, chunk_npages-1) - 709 (uintptr_t)arena_bitselm_get(chunk, map_bias+1))); 710 if (config_debug) { 711 for (i = map_bias+1; i < chunk_npages-1; i++) { 712 assert(arena_mapbits_unzeroed_get(chunk, i) == 713 flag_unzeroed); 714 } 715 } 716 } 717 arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxrun, 718 flag_unzeroed); 719 720 return (chunk); 721 } 722 723 static arena_chunk_t * 724 arena_chunk_alloc(arena_t *arena) 725 { 726 arena_chunk_t *chunk; 727 728 if (arena->spare != NULL) 729 chunk = arena_chunk_init_spare(arena); 730 else { 731 chunk = arena_chunk_init_hard(arena); 732 if (chunk == NULL) 733 return (NULL); 734 } 735 736 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias); 737 738 return (chunk); 739 } 740 741 static void 742 arena_chunk_dalloc(arena_t *arena, arena_chunk_t *chunk) 743 { 744 745 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); 746 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); 747 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == 748 arena_maxrun); 749 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == 750 arena_maxrun); 751 assert(arena_mapbits_dirty_get(chunk, map_bias) == 752 arena_mapbits_dirty_get(chunk, chunk_npages-1)); 753 assert(arena_mapbits_decommitted_get(chunk, map_bias) == 754 arena_mapbits_decommitted_get(chunk, chunk_npages-1)); 755 756 /* Remove run from runs_avail, so that the arena does not use it. */ 757 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias); 758 759 if (arena->spare != NULL) { 760 arena_chunk_t *spare = arena->spare; 761 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; 762 bool committed; 763 764 arena->spare = chunk; 765 if (arena_mapbits_dirty_get(spare, map_bias) != 0) { 766 arena_run_dirty_remove(arena, spare, map_bias, 767 chunk_npages-map_bias); 768 } 769 770 chunk_deregister(spare, &spare->node); 771 772 committed = (arena_mapbits_decommitted_get(spare, map_bias) == 773 0); 774 if (!committed) { 775 /* 776 * Decommit the header. Mark the chunk as decommitted 777 * even if header decommit fails, since treating a 778 * partially committed chunk as committed has a high 779 * potential for causing later access of decommitted 780 * memory. 781 */ 782 chunk_hooks = chunk_hooks_get(arena); 783 chunk_hooks.decommit(spare, chunksize, 0, map_bias << 784 LG_PAGE, arena->ind); 785 } 786 787 chunk_dalloc_cache(arena, &chunk_hooks, (void *)spare, 788 chunksize, committed); 789 790 if (config_stats) { 791 arena->stats.mapped -= chunksize; 792 arena->stats.metadata_mapped -= (map_bias << LG_PAGE); 793 } 794 } else 795 arena->spare = chunk; 796 } 797 798 static void 799 arena_huge_malloc_stats_update(arena_t *arena, size_t usize) 800 { 801 szind_t index = size2index(usize) - nlclasses - NBINS; 802 803 cassert(config_stats); 804 805 arena->stats.nmalloc_huge++; 806 arena->stats.allocated_huge += usize; 807 arena->stats.hstats[index].nmalloc++; 808 arena->stats.hstats[index].curhchunks++; 809 } 810 811 static void 812 arena_huge_malloc_stats_update_undo(arena_t *arena, size_t usize) 813 { 814 szind_t index = size2index(usize) - nlclasses - NBINS; 815 816 cassert(config_stats); 817 818 arena->stats.nmalloc_huge--; 819 arena->stats.allocated_huge -= usize; 820 arena->stats.hstats[index].nmalloc--; 821 arena->stats.hstats[index].curhchunks--; 822 } 823 824 static void 825 arena_huge_dalloc_stats_update(arena_t *arena, size_t usize) 826 { 827 szind_t index = size2index(usize) - nlclasses - NBINS; 828 829 cassert(config_stats); 830 831 arena->stats.ndalloc_huge++; 832 arena->stats.allocated_huge -= usize; 833 arena->stats.hstats[index].ndalloc++; 834 arena->stats.hstats[index].curhchunks--; 835 } 836 837 static void 838 arena_huge_dalloc_stats_update_undo(arena_t *arena, size_t usize) 839 { 840 szind_t index = size2index(usize) - nlclasses - NBINS; 841 842 cassert(config_stats); 843 844 arena->stats.ndalloc_huge--; 845 arena->stats.allocated_huge += usize; 846 arena->stats.hstats[index].ndalloc--; 847 arena->stats.hstats[index].curhchunks++; 848 } 849 850 static void 851 arena_huge_ralloc_stats_update(arena_t *arena, size_t oldsize, size_t usize) 852 { 853 854 arena_huge_dalloc_stats_update(arena, oldsize); 855 arena_huge_malloc_stats_update(arena, usize); 856 } 857 858 static void 859 arena_huge_ralloc_stats_update_undo(arena_t *arena, size_t oldsize, 860 size_t usize) 861 { 862 863 arena_huge_dalloc_stats_update_undo(arena, oldsize); 864 arena_huge_malloc_stats_update_undo(arena, usize); 865 } 866 867 extent_node_t * 868 arena_node_alloc(arena_t *arena) 869 { 870 extent_node_t *node; 871 872 malloc_mutex_lock(&arena->node_cache_mtx); 873 node = ql_last(&arena->node_cache, ql_link); 874 if (node == NULL) { 875 malloc_mutex_unlock(&arena->node_cache_mtx); 876 return (base_alloc(sizeof(extent_node_t))); 877 } 878 ql_tail_remove(&arena->node_cache, extent_node_t, ql_link); 879 malloc_mutex_unlock(&arena->node_cache_mtx); 880 return (node); 881 } 882 883 void 884 arena_node_dalloc(arena_t *arena, extent_node_t *node) 885 { 886 887 malloc_mutex_lock(&arena->node_cache_mtx); 888 ql_elm_new(node, ql_link); 889 ql_tail_insert(&arena->node_cache, node, ql_link); 890 malloc_mutex_unlock(&arena->node_cache_mtx); 891 } 892 893 static void * 894 arena_chunk_alloc_huge_hard(arena_t *arena, chunk_hooks_t *chunk_hooks, 895 size_t usize, size_t alignment, bool *zero, size_t csize) 896 { 897 void *ret; 898 bool commit = true; 899 900 ret = chunk_alloc_wrapper(arena, chunk_hooks, NULL, csize, alignment, 901 zero, &commit); 902 if (ret == NULL) { 903 /* Revert optimistic stats updates. */ 904 malloc_mutex_lock(&arena->lock); 905 if (config_stats) { 906 arena_huge_malloc_stats_update_undo(arena, usize); 907 arena->stats.mapped -= usize; 908 } 909 arena_nactive_sub(arena, usize >> LG_PAGE); 910 malloc_mutex_unlock(&arena->lock); 911 } 912 913 return (ret); 914 } 915 916 void * 917 arena_chunk_alloc_huge(arena_t *arena, size_t usize, size_t alignment, 918 bool *zero) 919 { 920 void *ret; 921 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; 922 size_t csize = CHUNK_CEILING(usize); 923 924 malloc_mutex_lock(&arena->lock); 925 926 /* Optimistically update stats. */ 927 if (config_stats) { 928 arena_huge_malloc_stats_update(arena, usize); 929 arena->stats.mapped += usize; 930 } 931 arena_nactive_add(arena, usize >> LG_PAGE); 932 933 ret = chunk_alloc_cache(arena, &chunk_hooks, NULL, csize, alignment, 934 zero, true); 935 malloc_mutex_unlock(&arena->lock); 936 if (ret == NULL) { 937 ret = arena_chunk_alloc_huge_hard(arena, &chunk_hooks, usize, 938 alignment, zero, csize); 939 } 940 941 return (ret); 942 } 943 944 void 945 arena_chunk_dalloc_huge(arena_t *arena, void *chunk, size_t usize) 946 { 947 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; 948 size_t csize; 949 950 csize = CHUNK_CEILING(usize); 951 malloc_mutex_lock(&arena->lock); 952 if (config_stats) { 953 arena_huge_dalloc_stats_update(arena, usize); 954 arena->stats.mapped -= usize; 955 } 956 arena_nactive_sub(arena, usize >> LG_PAGE); 957 958 chunk_dalloc_cache(arena, &chunk_hooks, chunk, csize, true); 959 malloc_mutex_unlock(&arena->lock); 960 } 961 962 void 963 arena_chunk_ralloc_huge_similar(arena_t *arena, void *chunk, size_t oldsize, 964 size_t usize) 965 { 966 967 assert(CHUNK_CEILING(oldsize) == CHUNK_CEILING(usize)); 968 assert(oldsize != usize); 969 970 malloc_mutex_lock(&arena->lock); 971 if (config_stats) 972 arena_huge_ralloc_stats_update(arena, oldsize, usize); 973 if (oldsize < usize) 974 arena_nactive_add(arena, (usize - oldsize) >> LG_PAGE); 975 else 976 arena_nactive_sub(arena, (oldsize - usize) >> LG_PAGE); 977 malloc_mutex_unlock(&arena->lock); 978 } 979 980 void 981 arena_chunk_ralloc_huge_shrink(arena_t *arena, void *chunk, size_t oldsize, 982 size_t usize) 983 { 984 size_t udiff = oldsize - usize; 985 size_t cdiff = CHUNK_CEILING(oldsize) - CHUNK_CEILING(usize); 986 987 malloc_mutex_lock(&arena->lock); 988 if (config_stats) { 989 arena_huge_ralloc_stats_update(arena, oldsize, usize); 990 if (cdiff != 0) 991 arena->stats.mapped -= cdiff; 992 } 993 arena_nactive_sub(arena, udiff >> LG_PAGE); 994 995 if (cdiff != 0) { 996 chunk_hooks_t chunk_hooks = CHUNK_HOOKS_INITIALIZER; 997 void *nchunk = (void *)((uintptr_t)chunk + 998 CHUNK_CEILING(usize)); 999 1000 chunk_dalloc_cache(arena, &chunk_hooks, nchunk, cdiff, true); 1001 } 1002 malloc_mutex_unlock(&arena->lock); 1003 } 1004 1005 static bool 1006 arena_chunk_ralloc_huge_expand_hard(arena_t *arena, chunk_hooks_t *chunk_hooks, 1007 void *chunk, size_t oldsize, size_t usize, bool *zero, void *nchunk, 1008 size_t udiff, size_t cdiff) 1009 { 1010 bool err; 1011 bool commit = true; 1012 1013 err = (chunk_alloc_wrapper(arena, chunk_hooks, nchunk, cdiff, chunksize, 1014 zero, &commit) == NULL); 1015 if (err) { 1016 /* Revert optimistic stats updates. */ 1017 malloc_mutex_lock(&arena->lock); 1018 if (config_stats) { 1019 arena_huge_ralloc_stats_update_undo(arena, oldsize, 1020 usize); 1021 arena->stats.mapped -= cdiff; 1022 } 1023 arena_nactive_sub(arena, udiff >> LG_PAGE); 1024 malloc_mutex_unlock(&arena->lock); 1025 } else if (chunk_hooks->merge(chunk, CHUNK_CEILING(oldsize), nchunk, 1026 cdiff, true, arena->ind)) { 1027 chunk_dalloc_arena(arena, chunk_hooks, nchunk, cdiff, *zero, 1028 true); 1029 err = true; 1030 } 1031 return (err); 1032 } 1033 1034 bool 1035 arena_chunk_ralloc_huge_expand(arena_t *arena, void *chunk, size_t oldsize, 1036 size_t usize, bool *zero) 1037 { 1038 bool err; 1039 chunk_hooks_t chunk_hooks = chunk_hooks_get(arena); 1040 void *nchunk = (void *)((uintptr_t)chunk + CHUNK_CEILING(oldsize)); 1041 size_t udiff = usize - oldsize; 1042 size_t cdiff = CHUNK_CEILING(usize) - CHUNK_CEILING(oldsize); 1043 1044 malloc_mutex_lock(&arena->lock); 1045 1046 /* Optimistically update stats. */ 1047 if (config_stats) { 1048 arena_huge_ralloc_stats_update(arena, oldsize, usize); 1049 arena->stats.mapped += cdiff; 1050 } 1051 arena_nactive_add(arena, udiff >> LG_PAGE); 1052 1053 err = (chunk_alloc_cache(arena, &arena->chunk_hooks, nchunk, cdiff, 1054 chunksize, zero, true) == NULL); 1055 malloc_mutex_unlock(&arena->lock); 1056 if (err) { 1057 err = arena_chunk_ralloc_huge_expand_hard(arena, &chunk_hooks, 1058 chunk, oldsize, usize, zero, nchunk, udiff, 1059 cdiff); 1060 } else if (chunk_hooks.merge(chunk, CHUNK_CEILING(oldsize), nchunk, 1061 cdiff, true, arena->ind)) { 1062 chunk_dalloc_arena(arena, &chunk_hooks, nchunk, cdiff, *zero, 1063 true); 1064 err = true; 1065 } 1066 1067 return (err); 1068 } 1069 1070 /* 1071 * Do first-best-fit run selection, i.e. select the lowest run that best fits. 1072 * Run sizes are indexed, so not all candidate runs are necessarily exactly the 1073 * same size. 1074 */ 1075 static arena_run_t * 1076 arena_run_first_best_fit(arena_t *arena, size_t size) 1077 { 1078 szind_t ind, i; 1079 1080 ind = size2index(run_quantize_ceil(size)); 1081 for (i = ind; i < runs_avail_nclasses + runs_avail_bias; i++) { 1082 arena_chunk_map_misc_t *miscelm = arena_run_tree_first( 1083 arena_runs_avail_get(arena, i)); 1084 if (miscelm != NULL) 1085 return (&miscelm->run); 1086 } 1087 1088 return (NULL); 1089 } 1090 1091 static arena_run_t * 1092 arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero) 1093 { 1094 arena_run_t *run = arena_run_first_best_fit(arena, s2u(size)); 1095 if (run != NULL) { 1096 if (arena_run_split_large(arena, run, size, zero)) 1097 run = NULL; 1098 } 1099 return (run); 1100 } 1101 1102 static arena_run_t * 1103 arena_run_alloc_large(arena_t *arena, size_t size, bool zero) 1104 { 1105 arena_chunk_t *chunk; 1106 arena_run_t *run; 1107 1108 assert(size <= arena_maxrun); 1109 assert(size == PAGE_CEILING(size)); 1110 1111 /* Search the arena's chunks for the lowest best fit. */ 1112 run = arena_run_alloc_large_helper(arena, size, zero); 1113 if (run != NULL) 1114 return (run); 1115 1116 /* 1117 * No usable runs. Create a new chunk from which to allocate the run. 1118 */ 1119 chunk = arena_chunk_alloc(arena); 1120 if (chunk != NULL) { 1121 run = &arena_miscelm_get(chunk, map_bias)->run; 1122 if (arena_run_split_large(arena, run, size, zero)) 1123 run = NULL; 1124 return (run); 1125 } 1126 1127 /* 1128 * arena_chunk_alloc() failed, but another thread may have made 1129 * sufficient memory available while this one dropped arena->lock in 1130 * arena_chunk_alloc(), so search one more time. 1131 */ 1132 return (arena_run_alloc_large_helper(arena, size, zero)); 1133 } 1134 1135 static arena_run_t * 1136 arena_run_alloc_small_helper(arena_t *arena, size_t size, szind_t binind) 1137 { 1138 arena_run_t *run = arena_run_first_best_fit(arena, size); 1139 if (run != NULL) { 1140 if (arena_run_split_small(arena, run, size, binind)) 1141 run = NULL; 1142 } 1143 return (run); 1144 } 1145 1146 static arena_run_t * 1147 arena_run_alloc_small(arena_t *arena, size_t size, szind_t binind) 1148 { 1149 arena_chunk_t *chunk; 1150 arena_run_t *run; 1151 1152 assert(size <= arena_maxrun); 1153 assert(size == PAGE_CEILING(size)); 1154 assert(binind != BININD_INVALID); 1155 1156 /* Search the arena's chunks for the lowest best fit. */ 1157 run = arena_run_alloc_small_helper(arena, size, binind); 1158 if (run != NULL) 1159 return (run); 1160 1161 /* 1162 * No usable runs. Create a new chunk from which to allocate the run. 1163 */ 1164 chunk = arena_chunk_alloc(arena); 1165 if (chunk != NULL) { 1166 run = &arena_miscelm_get(chunk, map_bias)->run; 1167 if (arena_run_split_small(arena, run, size, binind)) 1168 run = NULL; 1169 return (run); 1170 } 1171 1172 /* 1173 * arena_chunk_alloc() failed, but another thread may have made 1174 * sufficient memory available while this one dropped arena->lock in 1175 * arena_chunk_alloc(), so search one more time. 1176 */ 1177 return (arena_run_alloc_small_helper(arena, size, binind)); 1178 } 1179 1180 static bool 1181 arena_lg_dirty_mult_valid(ssize_t lg_dirty_mult) 1182 { 1183 1184 return (lg_dirty_mult >= -1 && lg_dirty_mult < (ssize_t)(sizeof(size_t) 1185 << 3)); 1186 } 1187 1188 ssize_t 1189 arena_lg_dirty_mult_get(arena_t *arena) 1190 { 1191 ssize_t lg_dirty_mult; 1192 1193 malloc_mutex_lock(&arena->lock); 1194 lg_dirty_mult = arena->lg_dirty_mult; 1195 malloc_mutex_unlock(&arena->lock); 1196 1197 return (lg_dirty_mult); 1198 } 1199 1200 bool 1201 arena_lg_dirty_mult_set(arena_t *arena, ssize_t lg_dirty_mult) 1202 { 1203 1204 if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) 1205 return (true); 1206 1207 malloc_mutex_lock(&arena->lock); 1208 arena->lg_dirty_mult = lg_dirty_mult; 1209 arena_maybe_purge(arena); 1210 malloc_mutex_unlock(&arena->lock); 1211 1212 return (false); 1213 } 1214 1215 static void 1216 arena_decay_deadline_init(arena_t *arena) 1217 { 1218 1219 assert(opt_purge == purge_mode_decay); 1220 1221 /* 1222 * Generate a new deadline that is uniformly random within the next 1223 * epoch after the current one. 1224 */ 1225 nstime_copy(&arena->decay_deadline, &arena->decay_epoch); 1226 nstime_add(&arena->decay_deadline, &arena->decay_interval); 1227 if (arena->decay_time > 0) { 1228 nstime_t jitter; 1229 1230 nstime_init(&jitter, prng_range(&arena->decay_jitter_state, 1231 nstime_ns(&arena->decay_interval))); 1232 nstime_add(&arena->decay_deadline, &jitter); 1233 } 1234 } 1235 1236 static bool 1237 arena_decay_deadline_reached(const arena_t *arena, const nstime_t *time) 1238 { 1239 1240 assert(opt_purge == purge_mode_decay); 1241 1242 return (nstime_compare(&arena->decay_deadline, time) <= 0); 1243 } 1244 1245 static size_t 1246 arena_decay_backlog_npages_limit(const arena_t *arena) 1247 { 1248 static const uint64_t h_steps[] = { 1249 #define STEP(step, h, x, y) \ 1250 h, 1251 SMOOTHSTEP 1252 #undef STEP 1253 }; 1254 uint64_t sum; 1255 size_t npages_limit_backlog; 1256 unsigned i; 1257 1258 assert(opt_purge == purge_mode_decay); 1259 1260 /* 1261 * For each element of decay_backlog, multiply by the corresponding 1262 * fixed-point smoothstep decay factor. Sum the products, then divide 1263 * to round down to the nearest whole number of pages. 1264 */ 1265 sum = 0; 1266 for (i = 0; i < SMOOTHSTEP_NSTEPS; i++) 1267 sum += arena->decay_backlog[i] * h_steps[i]; 1268 npages_limit_backlog = (sum >> SMOOTHSTEP_BFP); 1269 1270 return (npages_limit_backlog); 1271 } 1272 1273 static void 1274 arena_decay_epoch_advance(arena_t *arena, const nstime_t *time) 1275 { 1276 uint64_t nadvance; 1277 nstime_t delta; 1278 size_t ndirty_delta; 1279 1280 assert(opt_purge == purge_mode_decay); 1281 assert(arena_decay_deadline_reached(arena, time)); 1282 1283 nstime_copy(&delta, time); 1284 nstime_subtract(&delta, &arena->decay_epoch); 1285 nadvance = nstime_divide(&delta, &arena->decay_interval); 1286 assert(nadvance > 0); 1287 1288 /* Add nadvance decay intervals to epoch. */ 1289 nstime_copy(&delta, &arena->decay_interval); 1290 nstime_imultiply(&delta, nadvance); 1291 nstime_add(&arena->decay_epoch, &delta); 1292 1293 /* Set a new deadline. */ 1294 arena_decay_deadline_init(arena); 1295 1296 /* Update the backlog. */ 1297 if (nadvance >= SMOOTHSTEP_NSTEPS) { 1298 memset(arena->decay_backlog, 0, (SMOOTHSTEP_NSTEPS-1) * 1299 sizeof(size_t)); 1300 } else { 1301 memmove(arena->decay_backlog, &arena->decay_backlog[nadvance], 1302 (SMOOTHSTEP_NSTEPS - nadvance) * sizeof(size_t)); 1303 if (nadvance > 1) { 1304 memset(&arena->decay_backlog[SMOOTHSTEP_NSTEPS - 1305 nadvance], 0, (nadvance-1) * sizeof(size_t)); 1306 } 1307 } 1308 ndirty_delta = (arena->ndirty > arena->decay_ndirty) ? arena->ndirty - 1309 arena->decay_ndirty : 0; 1310 arena->decay_ndirty = arena->ndirty; 1311 arena->decay_backlog[SMOOTHSTEP_NSTEPS-1] = ndirty_delta; 1312 arena->decay_backlog_npages_limit = 1313 arena_decay_backlog_npages_limit(arena); 1314 } 1315 1316 static size_t 1317 arena_decay_npages_limit(arena_t *arena) 1318 { 1319 size_t npages_limit; 1320 1321 assert(opt_purge == purge_mode_decay); 1322 1323 npages_limit = arena->decay_backlog_npages_limit; 1324 1325 /* Add in any dirty pages created during the current epoch. */ 1326 if (arena->ndirty > arena->decay_ndirty) 1327 npages_limit += arena->ndirty - arena->decay_ndirty; 1328 1329 return (npages_limit); 1330 } 1331 1332 static void 1333 arena_decay_init(arena_t *arena, ssize_t decay_time) 1334 { 1335 1336 arena->decay_time = decay_time; 1337 if (decay_time > 0) { 1338 nstime_init2(&arena->decay_interval, decay_time, 0); 1339 nstime_idivide(&arena->decay_interval, SMOOTHSTEP_NSTEPS); 1340 } 1341 1342 nstime_init(&arena->decay_epoch, 0); 1343 nstime_update(&arena->decay_epoch); 1344 arena->decay_jitter_state = (uint64_t)(uintptr_t)arena; 1345 arena_decay_deadline_init(arena); 1346 arena->decay_ndirty = arena->ndirty; 1347 arena->decay_backlog_npages_limit = 0; 1348 memset(arena->decay_backlog, 0, SMOOTHSTEP_NSTEPS * sizeof(size_t)); 1349 } 1350 1351 static bool 1352 arena_decay_time_valid(ssize_t decay_time) 1353 { 1354 1355 if (decay_time < -1) 1356 return (false); 1357 if (decay_time == -1 || (uint64_t)decay_time <= NSTIME_SEC_MAX) 1358 return (true); 1359 return (false); 1360 } 1361 1362 ssize_t 1363 arena_decay_time_get(arena_t *arena) 1364 { 1365 ssize_t decay_time; 1366 1367 malloc_mutex_lock(&arena->lock); 1368 decay_time = arena->decay_time; 1369 malloc_mutex_unlock(&arena->lock); 1370 1371 return (decay_time); 1372 } 1373 1374 bool 1375 arena_decay_time_set(arena_t *arena, ssize_t decay_time) 1376 { 1377 1378 if (!arena_decay_time_valid(decay_time)) 1379 return (true); 1380 1381 malloc_mutex_lock(&arena->lock); 1382 /* 1383 * Restart decay backlog from scratch, which may cause many dirty pages 1384 * to be immediately purged. It would conceptually be possible to map 1385 * the old backlog onto the new backlog, but there is no justification 1386 * for such complexity since decay_time changes are intended to be 1387 * infrequent, either between the {-1, 0, >0} states, or a one-time 1388 * arbitrary change during initial arena configuration. 1389 */ 1390 arena_decay_init(arena, decay_time); 1391 arena_maybe_purge(arena); 1392 malloc_mutex_unlock(&arena->lock); 1393 1394 return (false); 1395 } 1396 1397 static void 1398 arena_maybe_purge_ratio(arena_t *arena) 1399 { 1400 1401 assert(opt_purge == purge_mode_ratio); 1402 1403 /* Don't purge if the option is disabled. */ 1404 if (arena->lg_dirty_mult < 0) 1405 return; 1406 1407 /* 1408 * Iterate, since preventing recursive purging could otherwise leave too 1409 * many dirty pages. 1410 */ 1411 while (true) { 1412 size_t threshold = (arena->nactive >> arena->lg_dirty_mult); 1413 if (threshold < chunk_npages) 1414 threshold = chunk_npages; 1415 /* 1416 * Don't purge unless the number of purgeable pages exceeds the 1417 * threshold. 1418 */ 1419 if (arena->ndirty <= threshold) 1420 return; 1421 arena_purge_to_limit(arena, threshold); 1422 } 1423 } 1424 1425 static void 1426 arena_maybe_purge_decay(arena_t *arena) 1427 { 1428 nstime_t time; 1429 size_t ndirty_limit; 1430 1431 assert(opt_purge == purge_mode_decay); 1432 1433 /* Purge all or nothing if the option is disabled. */ 1434 if (arena->decay_time <= 0) { 1435 if (arena->decay_time == 0) 1436 arena_purge_to_limit(arena, 0); 1437 return; 1438 } 1439 1440 nstime_copy(&time, &arena->decay_epoch); 1441 if (unlikely(nstime_update(&time))) { 1442 /* Time went backwards. Force an epoch advance. */ 1443 nstime_copy(&time, &arena->decay_deadline); 1444 } 1445 1446 if (arena_decay_deadline_reached(arena, &time)) 1447 arena_decay_epoch_advance(arena, &time); 1448 1449 ndirty_limit = arena_decay_npages_limit(arena); 1450 1451 /* 1452 * Don't try to purge unless the number of purgeable pages exceeds the 1453 * current limit. 1454 */ 1455 if (arena->ndirty <= ndirty_limit) 1456 return; 1457 arena_purge_to_limit(arena, ndirty_limit); 1458 } 1459 1460 void 1461 arena_maybe_purge(arena_t *arena) 1462 { 1463 1464 /* Don't recursively purge. */ 1465 if (arena->purging) 1466 return; 1467 1468 if (opt_purge == purge_mode_ratio) 1469 arena_maybe_purge_ratio(arena); 1470 else 1471 arena_maybe_purge_decay(arena); 1472 } 1473 1474 static size_t 1475 arena_dirty_count(arena_t *arena) 1476 { 1477 size_t ndirty = 0; 1478 arena_runs_dirty_link_t *rdelm; 1479 extent_node_t *chunkselm; 1480 1481 for (rdelm = qr_next(&arena->runs_dirty, rd_link), 1482 chunkselm = qr_next(&arena->chunks_cache, cc_link); 1483 rdelm != &arena->runs_dirty; rdelm = qr_next(rdelm, rd_link)) { 1484 size_t npages; 1485 1486 if (rdelm == &chunkselm->rd) { 1487 npages = extent_node_size_get(chunkselm) >> LG_PAGE; 1488 chunkselm = qr_next(chunkselm, cc_link); 1489 } else { 1490 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE( 1491 rdelm); 1492 arena_chunk_map_misc_t *miscelm = 1493 arena_rd_to_miscelm(rdelm); 1494 size_t pageind = arena_miscelm_to_pageind(miscelm); 1495 assert(arena_mapbits_allocated_get(chunk, pageind) == 1496 0); 1497 assert(arena_mapbits_large_get(chunk, pageind) == 0); 1498 assert(arena_mapbits_dirty_get(chunk, pageind) != 0); 1499 npages = arena_mapbits_unallocated_size_get(chunk, 1500 pageind) >> LG_PAGE; 1501 } 1502 ndirty += npages; 1503 } 1504 1505 return (ndirty); 1506 } 1507 1508 static size_t 1509 arena_stash_dirty(arena_t *arena, chunk_hooks_t *chunk_hooks, 1510 size_t ndirty_limit, arena_runs_dirty_link_t *purge_runs_sentinel, 1511 extent_node_t *purge_chunks_sentinel) 1512 { 1513 arena_runs_dirty_link_t *rdelm, *rdelm_next; 1514 extent_node_t *chunkselm; 1515 size_t nstashed = 0; 1516 1517 /* Stash runs/chunks according to ndirty_limit. */ 1518 for (rdelm = qr_next(&arena->runs_dirty, rd_link), 1519 chunkselm = qr_next(&arena->chunks_cache, cc_link); 1520 rdelm != &arena->runs_dirty; rdelm = rdelm_next) { 1521 size_t npages; 1522 rdelm_next = qr_next(rdelm, rd_link); 1523 1524 if (rdelm == &chunkselm->rd) { 1525 extent_node_t *chunkselm_next; 1526 bool zero; 1527 UNUSED void *chunk; 1528 1529 npages = extent_node_size_get(chunkselm) >> LG_PAGE; 1530 if (opt_purge == purge_mode_decay && arena->ndirty - 1531 (nstashed + npages) < ndirty_limit) 1532 break; 1533 1534 chunkselm_next = qr_next(chunkselm, cc_link); 1535 /* 1536 * Allocate. chunkselm remains valid due to the 1537 * dalloc_node=false argument to chunk_alloc_cache(). 1538 */ 1539 zero = false; 1540 chunk = chunk_alloc_cache(arena, chunk_hooks, 1541 extent_node_addr_get(chunkselm), 1542 extent_node_size_get(chunkselm), chunksize, &zero, 1543 false); 1544 assert(chunk == extent_node_addr_get(chunkselm)); 1545 assert(zero == extent_node_zeroed_get(chunkselm)); 1546 extent_node_dirty_insert(chunkselm, purge_runs_sentinel, 1547 purge_chunks_sentinel); 1548 assert(npages == (extent_node_size_get(chunkselm) >> 1549 LG_PAGE)); 1550 chunkselm = chunkselm_next; 1551 } else { 1552 arena_chunk_t *chunk = 1553 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); 1554 arena_chunk_map_misc_t *miscelm = 1555 arena_rd_to_miscelm(rdelm); 1556 size_t pageind = arena_miscelm_to_pageind(miscelm); 1557 arena_run_t *run = &miscelm->run; 1558 size_t run_size = 1559 arena_mapbits_unallocated_size_get(chunk, pageind); 1560 1561 npages = run_size >> LG_PAGE; 1562 if (opt_purge == purge_mode_decay && arena->ndirty - 1563 (nstashed + npages) < ndirty_limit) 1564 break; 1565 1566 assert(pageind + npages <= chunk_npages); 1567 assert(arena_mapbits_dirty_get(chunk, pageind) == 1568 arena_mapbits_dirty_get(chunk, pageind+npages-1)); 1569 1570 /* 1571 * If purging the spare chunk's run, make it available 1572 * prior to allocation. 1573 */ 1574 if (chunk == arena->spare) 1575 arena_chunk_alloc(arena); 1576 1577 /* Temporarily allocate the free dirty run. */ 1578 arena_run_split_large(arena, run, run_size, false); 1579 /* Stash. */ 1580 if (false) 1581 qr_new(rdelm, rd_link); /* Redundant. */ 1582 else { 1583 assert(qr_next(rdelm, rd_link) == rdelm); 1584 assert(qr_prev(rdelm, rd_link) == rdelm); 1585 } 1586 qr_meld(purge_runs_sentinel, rdelm, rd_link); 1587 } 1588 1589 nstashed += npages; 1590 if (opt_purge == purge_mode_ratio && arena->ndirty - nstashed <= 1591 ndirty_limit) 1592 break; 1593 } 1594 1595 return (nstashed); 1596 } 1597 1598 static size_t 1599 arena_purge_stashed(arena_t *arena, chunk_hooks_t *chunk_hooks, 1600 arena_runs_dirty_link_t *purge_runs_sentinel, 1601 extent_node_t *purge_chunks_sentinel) 1602 { 1603 size_t npurged, nmadvise; 1604 arena_runs_dirty_link_t *rdelm; 1605 extent_node_t *chunkselm; 1606 1607 if (config_stats) 1608 nmadvise = 0; 1609 npurged = 0; 1610 1611 malloc_mutex_unlock(&arena->lock); 1612 for (rdelm = qr_next(purge_runs_sentinel, rd_link), 1613 chunkselm = qr_next(purge_chunks_sentinel, cc_link); 1614 rdelm != purge_runs_sentinel; rdelm = qr_next(rdelm, rd_link)) { 1615 size_t npages; 1616 1617 if (rdelm == &chunkselm->rd) { 1618 /* 1619 * Don't actually purge the chunk here because 1) 1620 * chunkselm is embedded in the chunk and must remain 1621 * valid, and 2) we deallocate the chunk in 1622 * arena_unstash_purged(), where it is destroyed, 1623 * decommitted, or purged, depending on chunk 1624 * deallocation policy. 1625 */ 1626 size_t size = extent_node_size_get(chunkselm); 1627 npages = size >> LG_PAGE; 1628 chunkselm = qr_next(chunkselm, cc_link); 1629 } else { 1630 size_t pageind, run_size, flag_unzeroed, flags, i; 1631 bool decommitted; 1632 arena_chunk_t *chunk = 1633 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); 1634 arena_chunk_map_misc_t *miscelm = 1635 arena_rd_to_miscelm(rdelm); 1636 pageind = arena_miscelm_to_pageind(miscelm); 1637 run_size = arena_mapbits_large_size_get(chunk, pageind); 1638 npages = run_size >> LG_PAGE; 1639 1640 assert(pageind + npages <= chunk_npages); 1641 assert(!arena_mapbits_decommitted_get(chunk, pageind)); 1642 assert(!arena_mapbits_decommitted_get(chunk, 1643 pageind+npages-1)); 1644 decommitted = !chunk_hooks->decommit(chunk, chunksize, 1645 pageind << LG_PAGE, npages << LG_PAGE, arena->ind); 1646 if (decommitted) { 1647 flag_unzeroed = 0; 1648 flags = CHUNK_MAP_DECOMMITTED; 1649 } else { 1650 flag_unzeroed = chunk_purge_wrapper(arena, 1651 chunk_hooks, chunk, chunksize, pageind << 1652 LG_PAGE, run_size) ? CHUNK_MAP_UNZEROED : 0; 1653 flags = flag_unzeroed; 1654 } 1655 arena_mapbits_large_set(chunk, pageind+npages-1, 0, 1656 flags); 1657 arena_mapbits_large_set(chunk, pageind, run_size, 1658 flags); 1659 1660 /* 1661 * Set the unzeroed flag for internal pages, now that 1662 * chunk_purge_wrapper() has returned whether the pages 1663 * were zeroed as a side effect of purging. This chunk 1664 * map modification is safe even though the arena mutex 1665 * isn't currently owned by this thread, because the run 1666 * is marked as allocated, thus protecting it from being 1667 * modified by any other thread. As long as these 1668 * writes don't perturb the first and last elements' 1669 * CHUNK_MAP_ALLOCATED bits, behavior is well defined. 1670 */ 1671 for (i = 1; i < npages-1; i++) { 1672 arena_mapbits_internal_set(chunk, pageind+i, 1673 flag_unzeroed); 1674 } 1675 } 1676 1677 npurged += npages; 1678 if (config_stats) 1679 nmadvise++; 1680 } 1681 malloc_mutex_lock(&arena->lock); 1682 1683 if (config_stats) { 1684 arena->stats.nmadvise += nmadvise; 1685 arena->stats.purged += npurged; 1686 } 1687 1688 return (npurged); 1689 } 1690 1691 static void 1692 arena_unstash_purged(arena_t *arena, chunk_hooks_t *chunk_hooks, 1693 arena_runs_dirty_link_t *purge_runs_sentinel, 1694 extent_node_t *purge_chunks_sentinel) 1695 { 1696 arena_runs_dirty_link_t *rdelm, *rdelm_next; 1697 extent_node_t *chunkselm; 1698 1699 /* Deallocate chunks/runs. */ 1700 for (rdelm = qr_next(purge_runs_sentinel, rd_link), 1701 chunkselm = qr_next(purge_chunks_sentinel, cc_link); 1702 rdelm != purge_runs_sentinel; rdelm = rdelm_next) { 1703 rdelm_next = qr_next(rdelm, rd_link); 1704 if (rdelm == &chunkselm->rd) { 1705 extent_node_t *chunkselm_next = qr_next(chunkselm, 1706 cc_link); 1707 void *addr = extent_node_addr_get(chunkselm); 1708 size_t size = extent_node_size_get(chunkselm); 1709 bool zeroed = extent_node_zeroed_get(chunkselm); 1710 bool committed = extent_node_committed_get(chunkselm); 1711 extent_node_dirty_remove(chunkselm); 1712 arena_node_dalloc(arena, chunkselm); 1713 chunkselm = chunkselm_next; 1714 chunk_dalloc_arena(arena, chunk_hooks, addr, size, 1715 zeroed, committed); 1716 } else { 1717 arena_chunk_t *chunk = 1718 (arena_chunk_t *)CHUNK_ADDR2BASE(rdelm); 1719 arena_chunk_map_misc_t *miscelm = 1720 arena_rd_to_miscelm(rdelm); 1721 size_t pageind = arena_miscelm_to_pageind(miscelm); 1722 bool decommitted = (arena_mapbits_decommitted_get(chunk, 1723 pageind) != 0); 1724 arena_run_t *run = &miscelm->run; 1725 qr_remove(rdelm, rd_link); 1726 arena_run_dalloc(arena, run, false, true, decommitted); 1727 } 1728 } 1729 } 1730 1731 /* 1732 * NB: ndirty_limit is interpreted differently depending on opt_purge: 1733 * - purge_mode_ratio: Purge as few dirty run/chunks as possible to reach the 1734 * desired state: 1735 * (arena->ndirty <= ndirty_limit) 1736 * - purge_mode_decay: Purge as many dirty runs/chunks as possible without 1737 * violating the invariant: 1738 * (arena->ndirty >= ndirty_limit) 1739 */ 1740 static void 1741 arena_purge_to_limit(arena_t *arena, size_t ndirty_limit) 1742 { 1743 chunk_hooks_t chunk_hooks = chunk_hooks_get(arena); 1744 size_t npurge, npurged; 1745 arena_runs_dirty_link_t purge_runs_sentinel; 1746 extent_node_t purge_chunks_sentinel; 1747 1748 arena->purging = true; 1749 1750 /* 1751 * Calls to arena_dirty_count() are disabled even for debug builds 1752 * because overhead grows nonlinearly as memory usage increases. 1753 */ 1754 if (false && config_debug) { 1755 size_t ndirty = arena_dirty_count(arena); 1756 assert(ndirty == arena->ndirty); 1757 } 1758 assert(opt_purge != purge_mode_ratio || (arena->nactive >> 1759 arena->lg_dirty_mult) < arena->ndirty || ndirty_limit == 0); 1760 1761 qr_new(&purge_runs_sentinel, rd_link); 1762 extent_node_dirty_linkage_init(&purge_chunks_sentinel); 1763 1764 npurge = arena_stash_dirty(arena, &chunk_hooks, ndirty_limit, 1765 &purge_runs_sentinel, &purge_chunks_sentinel); 1766 if (npurge == 0) 1767 goto label_return; 1768 npurged = arena_purge_stashed(arena, &chunk_hooks, &purge_runs_sentinel, 1769 &purge_chunks_sentinel); 1770 assert(npurged == npurge); 1771 arena_unstash_purged(arena, &chunk_hooks, &purge_runs_sentinel, 1772 &purge_chunks_sentinel); 1773 1774 if (config_stats) 1775 arena->stats.npurge++; 1776 1777 label_return: 1778 arena->purging = false; 1779 } 1780 1781 void 1782 arena_purge(arena_t *arena, bool all) 1783 { 1784 1785 malloc_mutex_lock(&arena->lock); 1786 if (all) 1787 arena_purge_to_limit(arena, 0); 1788 else 1789 arena_maybe_purge(arena); 1790 malloc_mutex_unlock(&arena->lock); 1791 } 1792 1793 static void 1794 arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size, 1795 size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty, 1796 size_t flag_decommitted) 1797 { 1798 size_t size = *p_size; 1799 size_t run_ind = *p_run_ind; 1800 size_t run_pages = *p_run_pages; 1801 1802 /* Try to coalesce forward. */ 1803 if (run_ind + run_pages < chunk_npages && 1804 arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 && 1805 arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty && 1806 arena_mapbits_decommitted_get(chunk, run_ind+run_pages) == 1807 flag_decommitted) { 1808 size_t nrun_size = arena_mapbits_unallocated_size_get(chunk, 1809 run_ind+run_pages); 1810 size_t nrun_pages = nrun_size >> LG_PAGE; 1811 1812 /* 1813 * Remove successor from runs_avail; the coalesced run is 1814 * inserted later. 1815 */ 1816 assert(arena_mapbits_unallocated_size_get(chunk, 1817 run_ind+run_pages+nrun_pages-1) == nrun_size); 1818 assert(arena_mapbits_dirty_get(chunk, 1819 run_ind+run_pages+nrun_pages-1) == flag_dirty); 1820 assert(arena_mapbits_decommitted_get(chunk, 1821 run_ind+run_pages+nrun_pages-1) == flag_decommitted); 1822 arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages); 1823 1824 /* 1825 * If the successor is dirty, remove it from the set of dirty 1826 * pages. 1827 */ 1828 if (flag_dirty != 0) { 1829 arena_run_dirty_remove(arena, chunk, run_ind+run_pages, 1830 nrun_pages); 1831 } 1832 1833 size += nrun_size; 1834 run_pages += nrun_pages; 1835 1836 arena_mapbits_unallocated_size_set(chunk, run_ind, size); 1837 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, 1838 size); 1839 } 1840 1841 /* Try to coalesce backward. */ 1842 if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, 1843 run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == 1844 flag_dirty && arena_mapbits_decommitted_get(chunk, run_ind-1) == 1845 flag_decommitted) { 1846 size_t prun_size = arena_mapbits_unallocated_size_get(chunk, 1847 run_ind-1); 1848 size_t prun_pages = prun_size >> LG_PAGE; 1849 1850 run_ind -= prun_pages; 1851 1852 /* 1853 * Remove predecessor from runs_avail; the coalesced run is 1854 * inserted later. 1855 */ 1856 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == 1857 prun_size); 1858 assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty); 1859 assert(arena_mapbits_decommitted_get(chunk, run_ind) == 1860 flag_decommitted); 1861 arena_avail_remove(arena, chunk, run_ind, prun_pages); 1862 1863 /* 1864 * If the predecessor is dirty, remove it from the set of dirty 1865 * pages. 1866 */ 1867 if (flag_dirty != 0) { 1868 arena_run_dirty_remove(arena, chunk, run_ind, 1869 prun_pages); 1870 } 1871 1872 size += prun_size; 1873 run_pages += prun_pages; 1874 1875 arena_mapbits_unallocated_size_set(chunk, run_ind, size); 1876 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, 1877 size); 1878 } 1879 1880 *p_size = size; 1881 *p_run_ind = run_ind; 1882 *p_run_pages = run_pages; 1883 } 1884 1885 static size_t 1886 arena_run_size_get(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 1887 size_t run_ind) 1888 { 1889 size_t size; 1890 1891 assert(run_ind >= map_bias); 1892 assert(run_ind < chunk_npages); 1893 1894 if (arena_mapbits_large_get(chunk, run_ind) != 0) { 1895 size = arena_mapbits_large_size_get(chunk, run_ind); 1896 assert(size == PAGE || arena_mapbits_large_size_get(chunk, 1897 run_ind+(size>>LG_PAGE)-1) == 0); 1898 } else { 1899 arena_bin_info_t *bin_info = &arena_bin_info[run->binind]; 1900 size = bin_info->run_size; 1901 } 1902 1903 return (size); 1904 } 1905 1906 static void 1907 arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned, 1908 bool decommitted) 1909 { 1910 arena_chunk_t *chunk; 1911 arena_chunk_map_misc_t *miscelm; 1912 size_t size, run_ind, run_pages, flag_dirty, flag_decommitted; 1913 1914 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 1915 miscelm = arena_run_to_miscelm(run); 1916 run_ind = arena_miscelm_to_pageind(miscelm); 1917 assert(run_ind >= map_bias); 1918 assert(run_ind < chunk_npages); 1919 size = arena_run_size_get(arena, chunk, run, run_ind); 1920 run_pages = (size >> LG_PAGE); 1921 arena_nactive_sub(arena, run_pages); 1922 1923 /* 1924 * The run is dirty if the caller claims to have dirtied it, as well as 1925 * if it was already dirty before being allocated and the caller 1926 * doesn't claim to have cleaned it. 1927 */ 1928 assert(arena_mapbits_dirty_get(chunk, run_ind) == 1929 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); 1930 if (!cleaned && !decommitted && arena_mapbits_dirty_get(chunk, run_ind) 1931 != 0) 1932 dirty = true; 1933 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0; 1934 flag_decommitted = decommitted ? CHUNK_MAP_DECOMMITTED : 0; 1935 1936 /* Mark pages as unallocated in the chunk map. */ 1937 if (dirty || decommitted) { 1938 size_t flags = flag_dirty | flag_decommitted; 1939 arena_mapbits_unallocated_set(chunk, run_ind, size, flags); 1940 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, 1941 flags); 1942 } else { 1943 arena_mapbits_unallocated_set(chunk, run_ind, size, 1944 arena_mapbits_unzeroed_get(chunk, run_ind)); 1945 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, 1946 arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1)); 1947 } 1948 1949 arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages, 1950 flag_dirty, flag_decommitted); 1951 1952 /* Insert into runs_avail, now that coalescing is complete. */ 1953 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == 1954 arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1)); 1955 assert(arena_mapbits_dirty_get(chunk, run_ind) == 1956 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); 1957 assert(arena_mapbits_decommitted_get(chunk, run_ind) == 1958 arena_mapbits_decommitted_get(chunk, run_ind+run_pages-1)); 1959 arena_avail_insert(arena, chunk, run_ind, run_pages); 1960 1961 if (dirty) 1962 arena_run_dirty_insert(arena, chunk, run_ind, run_pages); 1963 1964 /* Deallocate chunk if it is now completely unused. */ 1965 if (size == arena_maxrun) { 1966 assert(run_ind == map_bias); 1967 assert(run_pages == (arena_maxrun >> LG_PAGE)); 1968 arena_chunk_dalloc(arena, chunk); 1969 } 1970 1971 /* 1972 * It is okay to do dirty page processing here even if the chunk was 1973 * deallocated above, since in that case it is the spare. Waiting 1974 * until after possible chunk deallocation to do dirty processing 1975 * allows for an old spare to be fully deallocated, thus decreasing the 1976 * chances of spuriously crossing the dirty page purging threshold. 1977 */ 1978 if (dirty) 1979 arena_maybe_purge(arena); 1980 } 1981 1982 static void 1983 arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 1984 size_t oldsize, size_t newsize) 1985 { 1986 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 1987 size_t pageind = arena_miscelm_to_pageind(miscelm); 1988 size_t head_npages = (oldsize - newsize) >> LG_PAGE; 1989 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); 1990 size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind); 1991 size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? 1992 CHUNK_MAP_UNZEROED : 0; 1993 1994 assert(oldsize > newsize); 1995 1996 /* 1997 * Update the chunk map so that arena_run_dalloc() can treat the 1998 * leading run as separately allocated. Set the last element of each 1999 * run first, in case of single-page runs. 2000 */ 2001 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); 2002 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty | 2003 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, 2004 pageind+head_npages-1))); 2005 arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty | 2006 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind))); 2007 2008 if (config_debug) { 2009 UNUSED size_t tail_npages = newsize >> LG_PAGE; 2010 assert(arena_mapbits_large_size_get(chunk, 2011 pageind+head_npages+tail_npages-1) == 0); 2012 assert(arena_mapbits_dirty_get(chunk, 2013 pageind+head_npages+tail_npages-1) == flag_dirty); 2014 } 2015 arena_mapbits_large_set(chunk, pageind+head_npages, newsize, 2016 flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, 2017 pageind+head_npages))); 2018 2019 arena_run_dalloc(arena, run, false, false, (flag_decommitted != 0)); 2020 } 2021 2022 static void 2023 arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 2024 size_t oldsize, size_t newsize, bool dirty) 2025 { 2026 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 2027 size_t pageind = arena_miscelm_to_pageind(miscelm); 2028 size_t head_npages = newsize >> LG_PAGE; 2029 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); 2030 size_t flag_decommitted = arena_mapbits_decommitted_get(chunk, pageind); 2031 size_t flag_unzeroed_mask = (flag_dirty | flag_decommitted) == 0 ? 2032 CHUNK_MAP_UNZEROED : 0; 2033 arena_chunk_map_misc_t *tail_miscelm; 2034 arena_run_t *tail_run; 2035 2036 assert(oldsize > newsize); 2037 2038 /* 2039 * Update the chunk map so that arena_run_dalloc() can treat the 2040 * trailing run as separately allocated. Set the last element of each 2041 * run first, in case of single-page runs. 2042 */ 2043 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); 2044 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty | 2045 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, 2046 pageind+head_npages-1))); 2047 arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty | 2048 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, pageind))); 2049 2050 if (config_debug) { 2051 UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE; 2052 assert(arena_mapbits_large_size_get(chunk, 2053 pageind+head_npages+tail_npages-1) == 0); 2054 assert(arena_mapbits_dirty_get(chunk, 2055 pageind+head_npages+tail_npages-1) == flag_dirty); 2056 } 2057 arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize, 2058 flag_dirty | (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, 2059 pageind+head_npages))); 2060 2061 tail_miscelm = arena_miscelm_get(chunk, pageind + head_npages); 2062 tail_run = &tail_miscelm->run; 2063 arena_run_dalloc(arena, tail_run, dirty, false, (flag_decommitted != 2064 0)); 2065 } 2066 2067 static arena_run_t * 2068 arena_bin_runs_first(arena_bin_t *bin) 2069 { 2070 arena_chunk_map_misc_t *miscelm = arena_run_tree_first(&bin->runs); 2071 if (miscelm != NULL) 2072 return (&miscelm->run); 2073 2074 return (NULL); 2075 } 2076 2077 static void 2078 arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run) 2079 { 2080 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 2081 2082 assert(arena_run_tree_search(&bin->runs, miscelm) == NULL); 2083 2084 arena_run_tree_insert(&bin->runs, miscelm); 2085 } 2086 2087 static void 2088 arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run) 2089 { 2090 arena_chunk_map_misc_t *miscelm = arena_run_to_miscelm(run); 2091 2092 assert(arena_run_tree_search(&bin->runs, miscelm) != NULL); 2093 2094 arena_run_tree_remove(&bin->runs, miscelm); 2095 } 2096 2097 static arena_run_t * 2098 arena_bin_nonfull_run_tryget(arena_bin_t *bin) 2099 { 2100 arena_run_t *run = arena_bin_runs_first(bin); 2101 if (run != NULL) { 2102 arena_bin_runs_remove(bin, run); 2103 if (config_stats) 2104 bin->stats.reruns++; 2105 } 2106 return (run); 2107 } 2108 2109 static arena_run_t * 2110 arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) 2111 { 2112 arena_run_t *run; 2113 szind_t binind; 2114 arena_bin_info_t *bin_info; 2115 2116 /* Look for a usable run. */ 2117 run = arena_bin_nonfull_run_tryget(bin); 2118 if (run != NULL) 2119 return (run); 2120 /* No existing runs have any space available. */ 2121 2122 binind = arena_bin_index(arena, bin); 2123 bin_info = &arena_bin_info[binind]; 2124 2125 /* Allocate a new run. */ 2126 malloc_mutex_unlock(&bin->lock); 2127 /******************************/ 2128 malloc_mutex_lock(&arena->lock); 2129 run = arena_run_alloc_small(arena, bin_info->run_size, binind); 2130 if (run != NULL) { 2131 /* Initialize run internals. */ 2132 run->binind = binind; 2133 run->nfree = bin_info->nregs; 2134 bitmap_init(run->bitmap, &bin_info->bitmap_info); 2135 } 2136 malloc_mutex_unlock(&arena->lock); 2137 /********************************/ 2138 malloc_mutex_lock(&bin->lock); 2139 if (run != NULL) { 2140 if (config_stats) { 2141 bin->stats.nruns++; 2142 bin->stats.curruns++; 2143 } 2144 return (run); 2145 } 2146 2147 /* 2148 * arena_run_alloc_small() failed, but another thread may have made 2149 * sufficient memory available while this one dropped bin->lock above, 2150 * so search one more time. 2151 */ 2152 run = arena_bin_nonfull_run_tryget(bin); 2153 if (run != NULL) 2154 return (run); 2155 2156 return (NULL); 2157 } 2158 2159 /* Re-fill bin->runcur, then call arena_run_reg_alloc(). */ 2160 static void * 2161 arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) 2162 { 2163 szind_t binind; 2164 arena_bin_info_t *bin_info; 2165 arena_run_t *run; 2166 2167 binind = arena_bin_index(arena, bin); 2168 bin_info = &arena_bin_info[binind]; 2169 bin->runcur = NULL; 2170 run = arena_bin_nonfull_run_get(arena, bin); 2171 if (bin->runcur != NULL && bin->runcur->nfree > 0) { 2172 /* 2173 * Another thread updated runcur while this one ran without the 2174 * bin lock in arena_bin_nonfull_run_get(). 2175 */ 2176 void *ret; 2177 assert(bin->runcur->nfree > 0); 2178 ret = arena_run_reg_alloc(bin->runcur, bin_info); 2179 if (run != NULL) { 2180 arena_chunk_t *chunk; 2181 2182 /* 2183 * arena_run_alloc_small() may have allocated run, or 2184 * it may have pulled run from the bin's run tree. 2185 * Therefore it is unsafe to make any assumptions about 2186 * how run has previously been used, and 2187 * arena_bin_lower_run() must be called, as if a region 2188 * were just deallocated from the run. 2189 */ 2190 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 2191 if (run->nfree == bin_info->nregs) 2192 arena_dalloc_bin_run(arena, chunk, run, bin); 2193 else 2194 arena_bin_lower_run(arena, chunk, run, bin); 2195 } 2196 return (ret); 2197 } 2198 2199 if (run == NULL) 2200 return (NULL); 2201 2202 bin->runcur = run; 2203 2204 assert(bin->runcur->nfree > 0); 2205 2206 return (arena_run_reg_alloc(bin->runcur, bin_info)); 2207 } 2208 2209 void 2210 arena_tcache_fill_small(tsd_t *tsd, arena_t *arena, tcache_bin_t *tbin, 2211 szind_t binind, uint64_t prof_accumbytes) 2212 { 2213 unsigned i, nfill; 2214 arena_bin_t *bin; 2215 2216 assert(tbin->ncached == 0); 2217 2218 if (config_prof && arena_prof_accum(arena, prof_accumbytes)) 2219 prof_idump(); 2220 bin = &arena->bins[binind]; 2221 malloc_mutex_lock(&bin->lock); 2222 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> 2223 tbin->lg_fill_div); i < nfill; i++) { 2224 arena_run_t *run; 2225 void *ptr; 2226 if ((run = bin->runcur) != NULL && run->nfree > 0) 2227 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); 2228 else 2229 ptr = arena_bin_malloc_hard(arena, bin); 2230 if (ptr == NULL) { 2231 /* 2232 * OOM. tbin->avail isn't yet filled down to its first 2233 * element, so the successful allocations (if any) must 2234 * be moved just before tbin->avail before bailing out. 2235 */ 2236 if (i > 0) { 2237 memmove(tbin->avail - i, tbin->avail - nfill, 2238 i * sizeof(void *)); 2239 } 2240 break; 2241 } 2242 if (config_fill && unlikely(opt_junk_alloc)) { 2243 arena_alloc_junk_small(ptr, &arena_bin_info[binind], 2244 true); 2245 } 2246 /* Insert such that low regions get used first. */ 2247 *(tbin->avail - nfill + i) = ptr; 2248 } 2249 if (config_stats) { 2250 bin->stats.nmalloc += i; 2251 bin->stats.nrequests += tbin->tstats.nrequests; 2252 bin->stats.curregs += i; 2253 bin->stats.nfills++; 2254 tbin->tstats.nrequests = 0; 2255 } 2256 malloc_mutex_unlock(&bin->lock); 2257 tbin->ncached = i; 2258 arena_decay_tick(tsd, arena); 2259 } 2260 2261 void 2262 arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero) 2263 { 2264 2265 if (zero) { 2266 size_t redzone_size = bin_info->redzone_size; 2267 memset((void *)((uintptr_t)ptr - redzone_size), 0xa5, 2268 redzone_size); 2269 memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5, 2270 redzone_size); 2271 } else { 2272 memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5, 2273 bin_info->reg_interval); 2274 } 2275 } 2276 2277 #ifdef JEMALLOC_JET 2278 #undef arena_redzone_corruption 2279 #define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl) 2280 #endif 2281 static void 2282 arena_redzone_corruption(void *ptr, size_t usize, bool after, 2283 size_t offset, uint8_t byte) 2284 { 2285 2286 malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p " 2287 "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s", 2288 after ? "after" : "before", ptr, usize, byte); 2289 } 2290 #ifdef JEMALLOC_JET 2291 #undef arena_redzone_corruption 2292 #define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption) 2293 arena_redzone_corruption_t *arena_redzone_corruption = 2294 JEMALLOC_N(arena_redzone_corruption_impl); 2295 #endif 2296 2297 static void 2298 arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset) 2299 { 2300 bool error = false; 2301 2302 if (opt_junk_alloc) { 2303 size_t size = bin_info->reg_size; 2304 size_t redzone_size = bin_info->redzone_size; 2305 size_t i; 2306 2307 for (i = 1; i <= redzone_size; i++) { 2308 uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i); 2309 if (*byte != 0xa5) { 2310 error = true; 2311 arena_redzone_corruption(ptr, size, false, i, 2312 *byte); 2313 if (reset) 2314 *byte = 0xa5; 2315 } 2316 } 2317 for (i = 0; i < redzone_size; i++) { 2318 uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i); 2319 if (*byte != 0xa5) { 2320 error = true; 2321 arena_redzone_corruption(ptr, size, true, i, 2322 *byte); 2323 if (reset) 2324 *byte = 0xa5; 2325 } 2326 } 2327 } 2328 2329 if (opt_abort && error) 2330 abort(); 2331 } 2332 2333 #ifdef JEMALLOC_JET 2334 #undef arena_dalloc_junk_small 2335 #define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl) 2336 #endif 2337 void 2338 arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) 2339 { 2340 size_t redzone_size = bin_info->redzone_size; 2341 2342 arena_redzones_validate(ptr, bin_info, false); 2343 memset((void *)((uintptr_t)ptr - redzone_size), 0x5a, 2344 bin_info->reg_interval); 2345 } 2346 #ifdef JEMALLOC_JET 2347 #undef arena_dalloc_junk_small 2348 #define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) 2349 arena_dalloc_junk_small_t *arena_dalloc_junk_small = 2350 JEMALLOC_N(arena_dalloc_junk_small_impl); 2351 #endif 2352 2353 void 2354 arena_quarantine_junk_small(void *ptr, size_t usize) 2355 { 2356 szind_t binind; 2357 arena_bin_info_t *bin_info; 2358 cassert(config_fill); 2359 assert(opt_junk_free); 2360 assert(opt_quarantine); 2361 assert(usize <= SMALL_MAXCLASS); 2362 2363 binind = size2index(usize); 2364 bin_info = &arena_bin_info[binind]; 2365 arena_redzones_validate(ptr, bin_info, true); 2366 } 2367 2368 static void * 2369 arena_malloc_small(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero) 2370 { 2371 void *ret; 2372 arena_bin_t *bin; 2373 size_t usize; 2374 arena_run_t *run; 2375 2376 assert(binind < NBINS); 2377 bin = &arena->bins[binind]; 2378 usize = index2size(binind); 2379 2380 malloc_mutex_lock(&bin->lock); 2381 if ((run = bin->runcur) != NULL && run->nfree > 0) 2382 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]); 2383 else 2384 ret = arena_bin_malloc_hard(arena, bin); 2385 2386 if (ret == NULL) { 2387 malloc_mutex_unlock(&bin->lock); 2388 return (NULL); 2389 } 2390 2391 if (config_stats) { 2392 bin->stats.nmalloc++; 2393 bin->stats.nrequests++; 2394 bin->stats.curregs++; 2395 } 2396 malloc_mutex_unlock(&bin->lock); 2397 if (config_prof && !isthreaded && arena_prof_accum(arena, usize)) 2398 prof_idump(); 2399 2400 if (!zero) { 2401 if (config_fill) { 2402 if (unlikely(opt_junk_alloc)) { 2403 arena_alloc_junk_small(ret, 2404 &arena_bin_info[binind], false); 2405 } else if (unlikely(opt_zero)) 2406 memset(ret, 0, usize); 2407 } 2408 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize); 2409 } else { 2410 if (config_fill && unlikely(opt_junk_alloc)) { 2411 arena_alloc_junk_small(ret, &arena_bin_info[binind], 2412 true); 2413 } 2414 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, usize); 2415 memset(ret, 0, usize); 2416 } 2417 2418 arena_decay_tick(tsd, arena); 2419 return (ret); 2420 } 2421 2422 void * 2423 arena_malloc_large(tsd_t *tsd, arena_t *arena, szind_t binind, bool zero) 2424 { 2425 void *ret; 2426 size_t usize; 2427 uintptr_t random_offset; 2428 arena_run_t *run; 2429 arena_chunk_map_misc_t *miscelm; 2430 UNUSED bool idump; 2431 2432 /* Large allocation. */ 2433 usize = index2size(binind); 2434 malloc_mutex_lock(&arena->lock); 2435 if (config_cache_oblivious) { 2436 uint64_t r; 2437 2438 /* 2439 * Compute a uniformly distributed offset within the first page 2440 * that is a multiple of the cacheline size, e.g. [0 .. 63) * 64 2441 * for 4 KiB pages and 64-byte cachelines. 2442 */ 2443 r = prng_lg_range(&arena->offset_state, LG_PAGE - LG_CACHELINE); 2444 random_offset = ((uintptr_t)r) << LG_CACHELINE; 2445 } else 2446 random_offset = 0; 2447 run = arena_run_alloc_large(arena, usize + large_pad, zero); 2448 if (run == NULL) { 2449 malloc_mutex_unlock(&arena->lock); 2450 return (NULL); 2451 } 2452 miscelm = arena_run_to_miscelm(run); 2453 ret = (void *)((uintptr_t)arena_miscelm_to_rpages(miscelm) + 2454 random_offset); 2455 if (config_stats) { 2456 szind_t index = binind - NBINS; 2457 2458 arena->stats.nmalloc_large++; 2459 arena->stats.nrequests_large++; 2460 arena->stats.allocated_large += usize; 2461 arena->stats.lstats[index].nmalloc++; 2462 arena->stats.lstats[index].nrequests++; 2463 arena->stats.lstats[index].curruns++; 2464 } 2465 if (config_prof) 2466 idump = arena_prof_accum_locked(arena, usize); 2467 malloc_mutex_unlock(&arena->lock); 2468 if (config_prof && idump) 2469 prof_idump(); 2470 2471 if (!zero) { 2472 if (config_fill) { 2473 if (unlikely(opt_junk_alloc)) 2474 memset(ret, 0xa5, usize); 2475 else if (unlikely(opt_zero)) 2476 memset(ret, 0, usize); 2477 } 2478 } 2479 2480 arena_decay_tick(tsd, arena); 2481 return (ret); 2482 } 2483 2484 void * 2485 arena_malloc_hard(tsd_t *tsd, arena_t *arena, size_t size, szind_t ind, 2486 bool zero, tcache_t *tcache) 2487 { 2488 2489 arena = arena_choose(tsd, arena); 2490 if (unlikely(arena == NULL)) 2491 return (NULL); 2492 2493 if (likely(size <= SMALL_MAXCLASS)) 2494 return (arena_malloc_small(tsd, arena, ind, zero)); 2495 if (likely(size <= large_maxclass)) 2496 return (arena_malloc_large(tsd, arena, ind, zero)); 2497 return (huge_malloc(tsd, arena, index2size(ind), zero, tcache)); 2498 } 2499 2500 /* Only handles large allocations that require more than page alignment. */ 2501 static void * 2502 arena_palloc_large(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, 2503 bool zero) 2504 { 2505 void *ret; 2506 size_t alloc_size, leadsize, trailsize; 2507 arena_run_t *run; 2508 arena_chunk_t *chunk; 2509 arena_chunk_map_misc_t *miscelm; 2510 void *rpages; 2511 2512 assert(usize == PAGE_CEILING(usize)); 2513 2514 arena = arena_choose(tsd, arena); 2515 if (unlikely(arena == NULL)) 2516 return (NULL); 2517 2518 alignment = PAGE_CEILING(alignment); 2519 alloc_size = usize + large_pad + alignment - PAGE; 2520 2521 malloc_mutex_lock(&arena->lock); 2522 run = arena_run_alloc_large(arena, alloc_size, false); 2523 if (run == NULL) { 2524 malloc_mutex_unlock(&arena->lock); 2525 return (NULL); 2526 } 2527 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 2528 miscelm = arena_run_to_miscelm(run); 2529 rpages = arena_miscelm_to_rpages(miscelm); 2530 2531 leadsize = ALIGNMENT_CEILING((uintptr_t)rpages, alignment) - 2532 (uintptr_t)rpages; 2533 assert(alloc_size >= leadsize + usize); 2534 trailsize = alloc_size - leadsize - usize - large_pad; 2535 if (leadsize != 0) { 2536 arena_chunk_map_misc_t *head_miscelm = miscelm; 2537 arena_run_t *head_run = run; 2538 2539 miscelm = arena_miscelm_get(chunk, 2540 arena_miscelm_to_pageind(head_miscelm) + (leadsize >> 2541 LG_PAGE)); 2542 run = &miscelm->run; 2543 2544 arena_run_trim_head(arena, chunk, head_run, alloc_size, 2545 alloc_size - leadsize); 2546 } 2547 if (trailsize != 0) { 2548 arena_run_trim_tail(arena, chunk, run, usize + large_pad + 2549 trailsize, usize + large_pad, false); 2550 } 2551 if (arena_run_init_large(arena, run, usize + large_pad, zero)) { 2552 size_t run_ind = 2553 arena_miscelm_to_pageind(arena_run_to_miscelm(run)); 2554 bool dirty = (arena_mapbits_dirty_get(chunk, run_ind) != 0); 2555 bool decommitted = (arena_mapbits_decommitted_get(chunk, 2556 run_ind) != 0); 2557 2558 assert(decommitted); /* Cause of OOM. */ 2559 arena_run_dalloc(arena, run, dirty, false, decommitted); 2560 malloc_mutex_unlock(&arena->lock); 2561 return (NULL); 2562 } 2563 ret = arena_miscelm_to_rpages(miscelm); 2564 2565 if (config_stats) { 2566 szind_t index = size2index(usize) - NBINS; 2567 2568 arena->stats.nmalloc_large++; 2569 arena->stats.nrequests_large++; 2570 arena->stats.allocated_large += usize; 2571 arena->stats.lstats[index].nmalloc++; 2572 arena->stats.lstats[index].nrequests++; 2573 arena->stats.lstats[index].curruns++; 2574 } 2575 malloc_mutex_unlock(&arena->lock); 2576 2577 if (config_fill && !zero) { 2578 if (unlikely(opt_junk_alloc)) 2579 memset(ret, 0xa5, usize); 2580 else if (unlikely(opt_zero)) 2581 memset(ret, 0, usize); 2582 } 2583 arena_decay_tick(tsd, arena); 2584 return (ret); 2585 } 2586 2587 void * 2588 arena_palloc(tsd_t *tsd, arena_t *arena, size_t usize, size_t alignment, 2589 bool zero, tcache_t *tcache) 2590 { 2591 void *ret; 2592 2593 if (usize <= SMALL_MAXCLASS && (alignment < PAGE || (alignment == PAGE 2594 && (usize & PAGE_MASK) == 0))) { 2595 /* Small; alignment doesn't require special run placement. */ 2596 ret = arena_malloc(tsd, arena, usize, size2index(usize), zero, 2597 tcache, true); 2598 } else if (usize <= large_maxclass && alignment <= PAGE) { 2599 /* 2600 * Large; alignment doesn't require special run placement. 2601 * However, the cached pointer may be at a random offset from 2602 * the base of the run, so do some bit manipulation to retrieve 2603 * the base. 2604 */ 2605 ret = arena_malloc(tsd, arena, usize, size2index(usize), zero, 2606 tcache, true); 2607 if (config_cache_oblivious) 2608 ret = (void *)((uintptr_t)ret & ~PAGE_MASK); 2609 } else { 2610 if (likely(usize <= large_maxclass)) { 2611 ret = arena_palloc_large(tsd, arena, usize, alignment, 2612 zero); 2613 } else if (likely(alignment <= chunksize)) 2614 ret = huge_malloc(tsd, arena, usize, zero, tcache); 2615 else { 2616 ret = huge_palloc(tsd, arena, usize, alignment, zero, 2617 tcache); 2618 } 2619 } 2620 return (ret); 2621 } 2622 2623 void 2624 arena_prof_promoted(const void *ptr, size_t size) 2625 { 2626 arena_chunk_t *chunk; 2627 size_t pageind; 2628 szind_t binind; 2629 2630 cassert(config_prof); 2631 assert(ptr != NULL); 2632 assert(CHUNK_ADDR2BASE(ptr) != ptr); 2633 assert(isalloc(ptr, false) == LARGE_MINCLASS); 2634 assert(isalloc(ptr, true) == LARGE_MINCLASS); 2635 assert(size <= SMALL_MAXCLASS); 2636 2637 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 2638 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2639 binind = size2index(size); 2640 assert(binind < NBINS); 2641 arena_mapbits_large_binind_set(chunk, pageind, binind); 2642 2643 assert(isalloc(ptr, false) == LARGE_MINCLASS); 2644 assert(isalloc(ptr, true) == size); 2645 } 2646 2647 static void 2648 arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, 2649 arena_bin_t *bin) 2650 { 2651 2652 /* Dissociate run from bin. */ 2653 if (run == bin->runcur) 2654 bin->runcur = NULL; 2655 else { 2656 szind_t binind = arena_bin_index(extent_node_arena_get( 2657 &chunk->node), bin); 2658 arena_bin_info_t *bin_info = &arena_bin_info[binind]; 2659 2660 if (bin_info->nregs != 1) { 2661 /* 2662 * This block's conditional is necessary because if the 2663 * run only contains one region, then it never gets 2664 * inserted into the non-full runs tree. 2665 */ 2666 arena_bin_runs_remove(bin, run); 2667 } 2668 } 2669 } 2670 2671 static void 2672 arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 2673 arena_bin_t *bin) 2674 { 2675 2676 assert(run != bin->runcur); 2677 assert(arena_run_tree_search(&bin->runs, arena_run_to_miscelm(run)) == 2678 NULL); 2679 2680 malloc_mutex_unlock(&bin->lock); 2681 /******************************/ 2682 malloc_mutex_lock(&arena->lock); 2683 arena_run_dalloc(arena, run, true, false, false); 2684 malloc_mutex_unlock(&arena->lock); 2685 /****************************/ 2686 malloc_mutex_lock(&bin->lock); 2687 if (config_stats) 2688 bin->stats.curruns--; 2689 } 2690 2691 static void 2692 arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 2693 arena_bin_t *bin) 2694 { 2695 2696 /* 2697 * Make sure that if bin->runcur is non-NULL, it refers to the lowest 2698 * non-full run. It is okay to NULL runcur out rather than proactively 2699 * keeping it pointing at the lowest non-full run. 2700 */ 2701 if ((uintptr_t)run < (uintptr_t)bin->runcur) { 2702 /* Switch runcur. */ 2703 if (bin->runcur->nfree > 0) 2704 arena_bin_runs_insert(bin, bin->runcur); 2705 bin->runcur = run; 2706 if (config_stats) 2707 bin->stats.reruns++; 2708 } else 2709 arena_bin_runs_insert(bin, run); 2710 } 2711 2712 static void 2713 arena_dalloc_bin_locked_impl(arena_t *arena, arena_chunk_t *chunk, void *ptr, 2714 arena_chunk_map_bits_t *bitselm, bool junked) 2715 { 2716 size_t pageind, rpages_ind; 2717 arena_run_t *run; 2718 arena_bin_t *bin; 2719 arena_bin_info_t *bin_info; 2720 szind_t binind; 2721 2722 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2723 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); 2724 run = &arena_miscelm_get(chunk, rpages_ind)->run; 2725 binind = run->binind; 2726 bin = &arena->bins[binind]; 2727 bin_info = &arena_bin_info[binind]; 2728 2729 if (!junked && config_fill && unlikely(opt_junk_free)) 2730 arena_dalloc_junk_small(ptr, bin_info); 2731 2732 arena_run_reg_dalloc(run, ptr); 2733 if (run->nfree == bin_info->nregs) { 2734 arena_dissociate_bin_run(chunk, run, bin); 2735 arena_dalloc_bin_run(arena, chunk, run, bin); 2736 } else if (run->nfree == 1 && run != bin->runcur) 2737 arena_bin_lower_run(arena, chunk, run, bin); 2738 2739 if (config_stats) { 2740 bin->stats.ndalloc++; 2741 bin->stats.curregs--; 2742 } 2743 } 2744 2745 void 2746 arena_dalloc_bin_junked_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, 2747 arena_chunk_map_bits_t *bitselm) 2748 { 2749 2750 arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, true); 2751 } 2752 2753 void 2754 arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, 2755 size_t pageind, arena_chunk_map_bits_t *bitselm) 2756 { 2757 arena_run_t *run; 2758 arena_bin_t *bin; 2759 size_t rpages_ind; 2760 2761 rpages_ind = pageind - arena_mapbits_small_runind_get(chunk, pageind); 2762 run = &arena_miscelm_get(chunk, rpages_ind)->run; 2763 bin = &arena->bins[run->binind]; 2764 malloc_mutex_lock(&bin->lock); 2765 arena_dalloc_bin_locked_impl(arena, chunk, ptr, bitselm, false); 2766 malloc_mutex_unlock(&bin->lock); 2767 } 2768 2769 void 2770 arena_dalloc_small(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr, 2771 size_t pageind) 2772 { 2773 arena_chunk_map_bits_t *bitselm; 2774 2775 if (config_debug) { 2776 /* arena_ptr_small_binind_get() does extra sanity checking. */ 2777 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, 2778 pageind)) != BININD_INVALID); 2779 } 2780 bitselm = arena_bitselm_get(chunk, pageind); 2781 arena_dalloc_bin(arena, chunk, ptr, pageind, bitselm); 2782 arena_decay_tick(tsd, arena); 2783 } 2784 2785 #ifdef JEMALLOC_JET 2786 #undef arena_dalloc_junk_large 2787 #define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl) 2788 #endif 2789 void 2790 arena_dalloc_junk_large(void *ptr, size_t usize) 2791 { 2792 2793 if (config_fill && unlikely(opt_junk_free)) 2794 memset(ptr, 0x5a, usize); 2795 } 2796 #ifdef JEMALLOC_JET 2797 #undef arena_dalloc_junk_large 2798 #define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large) 2799 arena_dalloc_junk_large_t *arena_dalloc_junk_large = 2800 JEMALLOC_N(arena_dalloc_junk_large_impl); 2801 #endif 2802 2803 static void 2804 arena_dalloc_large_locked_impl(arena_t *arena, arena_chunk_t *chunk, 2805 void *ptr, bool junked) 2806 { 2807 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2808 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); 2809 arena_run_t *run = &miscelm->run; 2810 2811 if (config_fill || config_stats) { 2812 size_t usize = arena_mapbits_large_size_get(chunk, pageind) - 2813 large_pad; 2814 2815 if (!junked) 2816 arena_dalloc_junk_large(ptr, usize); 2817 if (config_stats) { 2818 szind_t index = size2index(usize) - NBINS; 2819 2820 arena->stats.ndalloc_large++; 2821 arena->stats.allocated_large -= usize; 2822 arena->stats.lstats[index].ndalloc++; 2823 arena->stats.lstats[index].curruns--; 2824 } 2825 } 2826 2827 arena_run_dalloc(arena, run, true, false, false); 2828 } 2829 2830 void 2831 arena_dalloc_large_junked_locked(arena_t *arena, arena_chunk_t *chunk, 2832 void *ptr) 2833 { 2834 2835 arena_dalloc_large_locked_impl(arena, chunk, ptr, true); 2836 } 2837 2838 void 2839 arena_dalloc_large(tsd_t *tsd, arena_t *arena, arena_chunk_t *chunk, void *ptr) 2840 { 2841 2842 malloc_mutex_lock(&arena->lock); 2843 arena_dalloc_large_locked_impl(arena, chunk, ptr, false); 2844 malloc_mutex_unlock(&arena->lock); 2845 arena_decay_tick(tsd, arena); 2846 } 2847 2848 static void 2849 arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, 2850 size_t oldsize, size_t size) 2851 { 2852 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2853 arena_chunk_map_misc_t *miscelm = arena_miscelm_get(chunk, pageind); 2854 arena_run_t *run = &miscelm->run; 2855 2856 assert(size < oldsize); 2857 2858 /* 2859 * Shrink the run, and make trailing pages available for other 2860 * allocations. 2861 */ 2862 malloc_mutex_lock(&arena->lock); 2863 arena_run_trim_tail(arena, chunk, run, oldsize + large_pad, size + 2864 large_pad, true); 2865 if (config_stats) { 2866 szind_t oldindex = size2index(oldsize) - NBINS; 2867 szind_t index = size2index(size) - NBINS; 2868 2869 arena->stats.ndalloc_large++; 2870 arena->stats.allocated_large -= oldsize; 2871 arena->stats.lstats[oldindex].ndalloc++; 2872 arena->stats.lstats[oldindex].curruns--; 2873 2874 arena->stats.nmalloc_large++; 2875 arena->stats.nrequests_large++; 2876 arena->stats.allocated_large += size; 2877 arena->stats.lstats[index].nmalloc++; 2878 arena->stats.lstats[index].nrequests++; 2879 arena->stats.lstats[index].curruns++; 2880 } 2881 malloc_mutex_unlock(&arena->lock); 2882 } 2883 2884 static bool 2885 arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, 2886 size_t oldsize, size_t usize_min, size_t usize_max, bool zero) 2887 { 2888 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2889 size_t npages = (oldsize + large_pad) >> LG_PAGE; 2890 size_t followsize; 2891 2892 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind) - 2893 large_pad); 2894 2895 /* Try to extend the run. */ 2896 malloc_mutex_lock(&arena->lock); 2897 if (pageind+npages >= chunk_npages || arena_mapbits_allocated_get(chunk, 2898 pageind+npages) != 0) 2899 goto label_fail; 2900 followsize = arena_mapbits_unallocated_size_get(chunk, pageind+npages); 2901 if (oldsize + followsize >= usize_min) { 2902 /* 2903 * The next run is available and sufficiently large. Split the 2904 * following run, then merge the first part with the existing 2905 * allocation. 2906 */ 2907 arena_run_t *run; 2908 size_t usize, splitsize, size, flag_dirty, flag_unzeroed_mask; 2909 2910 usize = usize_max; 2911 while (oldsize + followsize < usize) 2912 usize = index2size(size2index(usize)-1); 2913 assert(usize >= usize_min); 2914 assert(usize >= oldsize); 2915 splitsize = usize - oldsize; 2916 if (splitsize == 0) 2917 goto label_fail; 2918 2919 run = &arena_miscelm_get(chunk, pageind+npages)->run; 2920 if (arena_run_split_large(arena, run, splitsize, zero)) 2921 goto label_fail; 2922 2923 if (config_cache_oblivious && zero) { 2924 /* 2925 * Zero the trailing bytes of the original allocation's 2926 * last page, since they are in an indeterminate state. 2927 * There will always be trailing bytes, because ptr's 2928 * offset from the beginning of the run is a multiple of 2929 * CACHELINE in [0 .. PAGE). 2930 */ 2931 void *zbase = (void *)((uintptr_t)ptr + oldsize); 2932 void *zpast = PAGE_ADDR2BASE((void *)((uintptr_t)zbase + 2933 PAGE)); 2934 size_t nzero = (uintptr_t)zpast - (uintptr_t)zbase; 2935 assert(nzero > 0); 2936 memset(zbase, 0, nzero); 2937 } 2938 2939 size = oldsize + splitsize; 2940 npages = (size + large_pad) >> LG_PAGE; 2941 2942 /* 2943 * Mark the extended run as dirty if either portion of the run 2944 * was dirty before allocation. This is rather pedantic, 2945 * because there's not actually any sequence of events that 2946 * could cause the resulting run to be passed to 2947 * arena_run_dalloc() with the dirty argument set to false 2948 * (which is when dirty flag consistency would really matter). 2949 */ 2950 flag_dirty = arena_mapbits_dirty_get(chunk, pageind) | 2951 arena_mapbits_dirty_get(chunk, pageind+npages-1); 2952 flag_unzeroed_mask = flag_dirty == 0 ? CHUNK_MAP_UNZEROED : 0; 2953 arena_mapbits_large_set(chunk, pageind, size + large_pad, 2954 flag_dirty | (flag_unzeroed_mask & 2955 arena_mapbits_unzeroed_get(chunk, pageind))); 2956 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty | 2957 (flag_unzeroed_mask & arena_mapbits_unzeroed_get(chunk, 2958 pageind+npages-1))); 2959 2960 if (config_stats) { 2961 szind_t oldindex = size2index(oldsize) - NBINS; 2962 szind_t index = size2index(size) - NBINS; 2963 2964 arena->stats.ndalloc_large++; 2965 arena->stats.allocated_large -= oldsize; 2966 arena->stats.lstats[oldindex].ndalloc++; 2967 arena->stats.lstats[oldindex].curruns--; 2968 2969 arena->stats.nmalloc_large++; 2970 arena->stats.nrequests_large++; 2971 arena->stats.allocated_large += size; 2972 arena->stats.lstats[index].nmalloc++; 2973 arena->stats.lstats[index].nrequests++; 2974 arena->stats.lstats[index].curruns++; 2975 } 2976 malloc_mutex_unlock(&arena->lock); 2977 return (false); 2978 } 2979 label_fail: 2980 malloc_mutex_unlock(&arena->lock); 2981 return (true); 2982 } 2983 2984 #ifdef JEMALLOC_JET 2985 #undef arena_ralloc_junk_large 2986 #define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl) 2987 #endif 2988 static void 2989 arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize) 2990 { 2991 2992 if (config_fill && unlikely(opt_junk_free)) { 2993 memset((void *)((uintptr_t)ptr + usize), 0x5a, 2994 old_usize - usize); 2995 } 2996 } 2997 #ifdef JEMALLOC_JET 2998 #undef arena_ralloc_junk_large 2999 #define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large) 3000 arena_ralloc_junk_large_t *arena_ralloc_junk_large = 3001 JEMALLOC_N(arena_ralloc_junk_large_impl); 3002 #endif 3003 3004 /* 3005 * Try to resize a large allocation, in order to avoid copying. This will 3006 * always fail if growing an object, and the following run is already in use. 3007 */ 3008 static bool 3009 arena_ralloc_large(void *ptr, size_t oldsize, size_t usize_min, 3010 size_t usize_max, bool zero) 3011 { 3012 arena_chunk_t *chunk; 3013 arena_t *arena; 3014 3015 if (oldsize == usize_max) { 3016 /* Current size class is compatible and maximal. */ 3017 return (false); 3018 } 3019 3020 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 3021 arena = extent_node_arena_get(&chunk->node); 3022 3023 if (oldsize < usize_max) { 3024 bool ret = arena_ralloc_large_grow(arena, chunk, ptr, oldsize, 3025 usize_min, usize_max, zero); 3026 if (config_fill && !ret && !zero) { 3027 if (unlikely(opt_junk_alloc)) { 3028 memset((void *)((uintptr_t)ptr + oldsize), 0xa5, 3029 isalloc(ptr, config_prof) - oldsize); 3030 } else if (unlikely(opt_zero)) { 3031 memset((void *)((uintptr_t)ptr + oldsize), 0, 3032 isalloc(ptr, config_prof) - oldsize); 3033 } 3034 } 3035 return (ret); 3036 } 3037 3038 assert(oldsize > usize_max); 3039 /* Fill before shrinking in order avoid a race. */ 3040 arena_ralloc_junk_large(ptr, oldsize, usize_max); 3041 arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, usize_max); 3042 return (false); 3043 } 3044 3045 bool 3046 arena_ralloc_no_move(tsd_t *tsd, void *ptr, size_t oldsize, size_t size, 3047 size_t extra, bool zero) 3048 { 3049 size_t usize_min, usize_max; 3050 3051 /* Calls with non-zero extra had to clamp extra. */ 3052 assert(extra == 0 || size + extra <= HUGE_MAXCLASS); 3053 3054 if (unlikely(size > HUGE_MAXCLASS)) 3055 return (true); 3056 3057 usize_min = s2u(size); 3058 usize_max = s2u(size + extra); 3059 if (likely(oldsize <= large_maxclass && usize_min <= large_maxclass)) { 3060 arena_chunk_t *chunk; 3061 3062 /* 3063 * Avoid moving the allocation if the size class can be left the 3064 * same. 3065 */ 3066 if (oldsize <= SMALL_MAXCLASS) { 3067 assert(arena_bin_info[size2index(oldsize)].reg_size == 3068 oldsize); 3069 if ((usize_max > SMALL_MAXCLASS || 3070 size2index(usize_max) != size2index(oldsize)) && 3071 (size > oldsize || usize_max < oldsize)) 3072 return (true); 3073 } else { 3074 if (usize_max <= SMALL_MAXCLASS) 3075 return (true); 3076 if (arena_ralloc_large(ptr, oldsize, usize_min, 3077 usize_max, zero)) 3078 return (true); 3079 } 3080 3081 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 3082 arena_decay_tick(tsd, extent_node_arena_get(&chunk->node)); 3083 return (false); 3084 } else { 3085 return (huge_ralloc_no_move(tsd, ptr, oldsize, usize_min, 3086 usize_max, zero)); 3087 } 3088 } 3089 3090 static void * 3091 arena_ralloc_move_helper(tsd_t *tsd, arena_t *arena, size_t usize, 3092 size_t alignment, bool zero, tcache_t *tcache) 3093 { 3094 3095 if (alignment == 0) 3096 return (arena_malloc(tsd, arena, usize, size2index(usize), zero, 3097 tcache, true)); 3098 usize = sa2u(usize, alignment); 3099 if (unlikely(usize == 0 || usize > HUGE_MAXCLASS)) 3100 return (NULL); 3101 return (ipalloct(tsd, usize, alignment, zero, tcache, arena)); 3102 } 3103 3104 void * 3105 arena_ralloc(tsd_t *tsd, arena_t *arena, void *ptr, size_t oldsize, size_t size, 3106 size_t alignment, bool zero, tcache_t *tcache) 3107 { 3108 void *ret; 3109 size_t usize; 3110 3111 usize = s2u(size); 3112 if (unlikely(usize == 0 || size > HUGE_MAXCLASS)) 3113 return (NULL); 3114 3115 if (likely(usize <= large_maxclass)) { 3116 size_t copysize; 3117 3118 /* Try to avoid moving the allocation. */ 3119 if (!arena_ralloc_no_move(tsd, ptr, oldsize, usize, 0, zero)) 3120 return (ptr); 3121 3122 /* 3123 * size and oldsize are different enough that we need to move 3124 * the object. In that case, fall back to allocating new space 3125 * and copying. 3126 */ 3127 ret = arena_ralloc_move_helper(tsd, arena, usize, alignment, 3128 zero, tcache); 3129 if (ret == NULL) 3130 return (NULL); 3131 3132 /* 3133 * Junk/zero-filling were already done by 3134 * ipalloc()/arena_malloc(). 3135 */ 3136 3137 copysize = (usize < oldsize) ? usize : oldsize; 3138 JEMALLOC_VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); 3139 memcpy(ret, ptr, copysize); 3140 isqalloc(tsd, ptr, oldsize, tcache); 3141 } else { 3142 ret = huge_ralloc(tsd, arena, ptr, oldsize, usize, alignment, 3143 zero, tcache); 3144 } 3145 return (ret); 3146 } 3147 3148 dss_prec_t 3149 arena_dss_prec_get(arena_t *arena) 3150 { 3151 dss_prec_t ret; 3152 3153 malloc_mutex_lock(&arena->lock); 3154 ret = arena->dss_prec; 3155 malloc_mutex_unlock(&arena->lock); 3156 return (ret); 3157 } 3158 3159 bool 3160 arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) 3161 { 3162 3163 if (!have_dss) 3164 return (dss_prec != dss_prec_disabled); 3165 malloc_mutex_lock(&arena->lock); 3166 arena->dss_prec = dss_prec; 3167 malloc_mutex_unlock(&arena->lock); 3168 return (false); 3169 } 3170 3171 ssize_t 3172 arena_lg_dirty_mult_default_get(void) 3173 { 3174 3175 return ((ssize_t)atomic_read_z((size_t *)&lg_dirty_mult_default)); 3176 } 3177 3178 bool 3179 arena_lg_dirty_mult_default_set(ssize_t lg_dirty_mult) 3180 { 3181 3182 if (opt_purge != purge_mode_ratio) 3183 return (true); 3184 if (!arena_lg_dirty_mult_valid(lg_dirty_mult)) 3185 return (true); 3186 atomic_write_z((size_t *)&lg_dirty_mult_default, (size_t)lg_dirty_mult); 3187 return (false); 3188 } 3189 3190 ssize_t 3191 arena_decay_time_default_get(void) 3192 { 3193 3194 return ((ssize_t)atomic_read_z((size_t *)&decay_time_default)); 3195 } 3196 3197 bool 3198 arena_decay_time_default_set(ssize_t decay_time) 3199 { 3200 3201 if (opt_purge != purge_mode_decay) 3202 return (true); 3203 if (!arena_decay_time_valid(decay_time)) 3204 return (true); 3205 atomic_write_z((size_t *)&decay_time_default, (size_t)decay_time); 3206 return (false); 3207 } 3208 3209 static void 3210 arena_basic_stats_merge_locked(arena_t *arena, unsigned *nthreads, 3211 const char **dss, ssize_t *lg_dirty_mult, ssize_t *decay_time, 3212 size_t *nactive, size_t *ndirty) 3213 { 3214 3215 *nthreads += arena_nthreads_get(arena); 3216 *dss = dss_prec_names[arena->dss_prec]; 3217 *lg_dirty_mult = arena->lg_dirty_mult; 3218 *decay_time = arena->decay_time; 3219 *nactive += arena->nactive; 3220 *ndirty += arena->ndirty; 3221 } 3222 3223 void 3224 arena_basic_stats_merge(arena_t *arena, unsigned *nthreads, const char **dss, 3225 ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive, 3226 size_t *ndirty) 3227 { 3228 3229 malloc_mutex_lock(&arena->lock); 3230 arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult, 3231 decay_time, nactive, ndirty); 3232 malloc_mutex_unlock(&arena->lock); 3233 } 3234 3235 void 3236 arena_stats_merge(arena_t *arena, unsigned *nthreads, const char **dss, 3237 ssize_t *lg_dirty_mult, ssize_t *decay_time, size_t *nactive, 3238 size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, 3239 malloc_large_stats_t *lstats, malloc_huge_stats_t *hstats) 3240 { 3241 unsigned i; 3242 3243 cassert(config_stats); 3244 3245 malloc_mutex_lock(&arena->lock); 3246 arena_basic_stats_merge_locked(arena, nthreads, dss, lg_dirty_mult, 3247 decay_time, nactive, ndirty); 3248 3249 astats->mapped += arena->stats.mapped; 3250 astats->npurge += arena->stats.npurge; 3251 astats->nmadvise += arena->stats.nmadvise; 3252 astats->purged += arena->stats.purged; 3253 astats->metadata_mapped += arena->stats.metadata_mapped; 3254 astats->metadata_allocated += arena_metadata_allocated_get(arena); 3255 astats->allocated_large += arena->stats.allocated_large; 3256 astats->nmalloc_large += arena->stats.nmalloc_large; 3257 astats->ndalloc_large += arena->stats.ndalloc_large; 3258 astats->nrequests_large += arena->stats.nrequests_large; 3259 astats->allocated_huge += arena->stats.allocated_huge; 3260 astats->nmalloc_huge += arena->stats.nmalloc_huge; 3261 astats->ndalloc_huge += arena->stats.ndalloc_huge; 3262 3263 for (i = 0; i < nlclasses; i++) { 3264 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc; 3265 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc; 3266 lstats[i].nrequests += arena->stats.lstats[i].nrequests; 3267 lstats[i].curruns += arena->stats.lstats[i].curruns; 3268 } 3269 3270 for (i = 0; i < nhclasses; i++) { 3271 hstats[i].nmalloc += arena->stats.hstats[i].nmalloc; 3272 hstats[i].ndalloc += arena->stats.hstats[i].ndalloc; 3273 hstats[i].curhchunks += arena->stats.hstats[i].curhchunks; 3274 } 3275 malloc_mutex_unlock(&arena->lock); 3276 3277 for (i = 0; i < NBINS; i++) { 3278 arena_bin_t *bin = &arena->bins[i]; 3279 3280 malloc_mutex_lock(&bin->lock); 3281 bstats[i].nmalloc += bin->stats.nmalloc; 3282 bstats[i].ndalloc += bin->stats.ndalloc; 3283 bstats[i].nrequests += bin->stats.nrequests; 3284 bstats[i].curregs += bin->stats.curregs; 3285 if (config_tcache) { 3286 bstats[i].nfills += bin->stats.nfills; 3287 bstats[i].nflushes += bin->stats.nflushes; 3288 } 3289 bstats[i].nruns += bin->stats.nruns; 3290 bstats[i].reruns += bin->stats.reruns; 3291 bstats[i].curruns += bin->stats.curruns; 3292 malloc_mutex_unlock(&bin->lock); 3293 } 3294 } 3295 3296 unsigned 3297 arena_nthreads_get(arena_t *arena) 3298 { 3299 3300 return (atomic_read_u(&arena->nthreads)); 3301 } 3302 3303 void 3304 arena_nthreads_inc(arena_t *arena) 3305 { 3306 3307 atomic_add_u(&arena->nthreads, 1); 3308 } 3309 3310 void 3311 arena_nthreads_dec(arena_t *arena) 3312 { 3313 3314 atomic_sub_u(&arena->nthreads, 1); 3315 } 3316 3317 arena_t * 3318 arena_new(unsigned ind) 3319 { 3320 arena_t *arena; 3321 size_t arena_size; 3322 unsigned i; 3323 arena_bin_t *bin; 3324 3325 /* Compute arena size to incorporate sufficient runs_avail elements. */ 3326 arena_size = offsetof(arena_t, runs_avail) + (sizeof(arena_run_tree_t) * 3327 runs_avail_nclasses); 3328 /* 3329 * Allocate arena, arena->lstats, and arena->hstats contiguously, mainly 3330 * because there is no way to clean up if base_alloc() OOMs. 3331 */ 3332 if (config_stats) { 3333 arena = (arena_t *)base_alloc(CACHELINE_CEILING(arena_size) + 3334 QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t) + 3335 nhclasses) * sizeof(malloc_huge_stats_t)); 3336 } else 3337 arena = (arena_t *)base_alloc(arena_size); 3338 if (arena == NULL) 3339 return (NULL); 3340 3341 arena->ind = ind; 3342 arena->nthreads = 0; 3343 if (malloc_mutex_init(&arena->lock)) 3344 return (NULL); 3345 3346 if (config_stats) { 3347 memset(&arena->stats, 0, sizeof(arena_stats_t)); 3348 arena->stats.lstats = (malloc_large_stats_t *)((uintptr_t)arena 3349 + CACHELINE_CEILING(arena_size)); 3350 memset(arena->stats.lstats, 0, nlclasses * 3351 sizeof(malloc_large_stats_t)); 3352 arena->stats.hstats = (malloc_huge_stats_t *)((uintptr_t)arena 3353 + CACHELINE_CEILING(arena_size) + 3354 QUANTUM_CEILING(nlclasses * sizeof(malloc_large_stats_t))); 3355 memset(arena->stats.hstats, 0, nhclasses * 3356 sizeof(malloc_huge_stats_t)); 3357 if (config_tcache) 3358 ql_new(&arena->tcache_ql); 3359 } 3360 3361 if (config_prof) 3362 arena->prof_accumbytes = 0; 3363 3364 if (config_cache_oblivious) { 3365 /* 3366 * A nondeterministic seed based on the address of arena reduces 3367 * the likelihood of lockstep non-uniform cache index 3368 * utilization among identical concurrent processes, but at the 3369 * cost of test repeatability. For debug builds, instead use a 3370 * deterministic seed. 3371 */ 3372 arena->offset_state = config_debug ? ind : 3373 (uint64_t)(uintptr_t)arena; 3374 } 3375 3376 arena->dss_prec = chunk_dss_prec_get(); 3377 3378 arena->spare = NULL; 3379 3380 arena->lg_dirty_mult = arena_lg_dirty_mult_default_get(); 3381 arena->purging = false; 3382 arena->nactive = 0; 3383 arena->ndirty = 0; 3384 3385 for(i = 0; i < runs_avail_nclasses; i++) 3386 arena_run_tree_new(&arena->runs_avail[i]); 3387 qr_new(&arena->runs_dirty, rd_link); 3388 qr_new(&arena->chunks_cache, cc_link); 3389 3390 if (opt_purge == purge_mode_decay) 3391 arena_decay_init(arena, arena_decay_time_default_get()); 3392 3393 ql_new(&arena->huge); 3394 if (malloc_mutex_init(&arena->huge_mtx)) 3395 return (NULL); 3396 3397 extent_tree_szad_new(&arena->chunks_szad_cached); 3398 extent_tree_ad_new(&arena->chunks_ad_cached); 3399 extent_tree_szad_new(&arena->chunks_szad_retained); 3400 extent_tree_ad_new(&arena->chunks_ad_retained); 3401 if (malloc_mutex_init(&arena->chunks_mtx)) 3402 return (NULL); 3403 ql_new(&arena->node_cache); 3404 if (malloc_mutex_init(&arena->node_cache_mtx)) 3405 return (NULL); 3406 3407 arena->chunk_hooks = chunk_hooks_default; 3408 3409 /* Initialize bins. */ 3410 for (i = 0; i < NBINS; i++) { 3411 bin = &arena->bins[i]; 3412 if (malloc_mutex_init(&bin->lock)) 3413 return (NULL); 3414 bin->runcur = NULL; 3415 arena_run_tree_new(&bin->runs); 3416 if (config_stats) 3417 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); 3418 } 3419 3420 return (arena); 3421 } 3422 3423 /* 3424 * Calculate bin_info->run_size such that it meets the following constraints: 3425 * 3426 * *) bin_info->run_size <= arena_maxrun 3427 * *) bin_info->nregs <= RUN_MAXREGS 3428 * 3429 * bin_info->nregs and bin_info->reg0_offset are also calculated here, since 3430 * these settings are all interdependent. 3431 */ 3432 static void 3433 bin_info_run_size_calc(arena_bin_info_t *bin_info) 3434 { 3435 size_t pad_size; 3436 size_t try_run_size, perfect_run_size, actual_run_size; 3437 uint32_t try_nregs, perfect_nregs, actual_nregs; 3438 3439 /* 3440 * Determine redzone size based on minimum alignment and minimum 3441 * redzone size. Add padding to the end of the run if it is needed to 3442 * align the regions. The padding allows each redzone to be half the 3443 * minimum alignment; without the padding, each redzone would have to 3444 * be twice as large in order to maintain alignment. 3445 */ 3446 if (config_fill && unlikely(opt_redzone)) { 3447 size_t align_min = ZU(1) << (ffs_zu(bin_info->reg_size) - 1); 3448 if (align_min <= REDZONE_MINSIZE) { 3449 bin_info->redzone_size = REDZONE_MINSIZE; 3450 pad_size = 0; 3451 } else { 3452 bin_info->redzone_size = align_min >> 1; 3453 pad_size = bin_info->redzone_size; 3454 } 3455 } else { 3456 bin_info->redzone_size = 0; 3457 pad_size = 0; 3458 } 3459 bin_info->reg_interval = bin_info->reg_size + 3460 (bin_info->redzone_size << 1); 3461 3462 /* 3463 * Compute run size under ideal conditions (no redzones, no limit on run 3464 * size). 3465 */ 3466 try_run_size = PAGE; 3467 try_nregs = (uint32_t)(try_run_size / bin_info->reg_size); 3468 do { 3469 perfect_run_size = try_run_size; 3470 perfect_nregs = try_nregs; 3471 3472 try_run_size += PAGE; 3473 try_nregs = (uint32_t)(try_run_size / bin_info->reg_size); 3474 } while (perfect_run_size != perfect_nregs * bin_info->reg_size); 3475 assert(perfect_nregs <= RUN_MAXREGS); 3476 3477 actual_run_size = perfect_run_size; 3478 actual_nregs = (uint32_t)((actual_run_size - pad_size) / 3479 bin_info->reg_interval); 3480 3481 /* 3482 * Redzones can require enough padding that not even a single region can 3483 * fit within the number of pages that would normally be dedicated to a 3484 * run for this size class. Increase the run size until at least one 3485 * region fits. 3486 */ 3487 while (actual_nregs == 0) { 3488 assert(config_fill && unlikely(opt_redzone)); 3489 3490 actual_run_size += PAGE; 3491 actual_nregs = (uint32_t)((actual_run_size - pad_size) / 3492 bin_info->reg_interval); 3493 } 3494 3495 /* 3496 * Make sure that the run will fit within an arena chunk. 3497 */ 3498 while (actual_run_size > arena_maxrun) { 3499 actual_run_size -= PAGE; 3500 actual_nregs = (uint32_t)((actual_run_size - pad_size) / 3501 bin_info->reg_interval); 3502 } 3503 assert(actual_nregs > 0); 3504 assert(actual_run_size == s2u(actual_run_size)); 3505 3506 /* Copy final settings. */ 3507 bin_info->run_size = actual_run_size; 3508 bin_info->nregs = actual_nregs; 3509 bin_info->reg0_offset = (uint32_t)(actual_run_size - (actual_nregs * 3510 bin_info->reg_interval) - pad_size + bin_info->redzone_size); 3511 3512 if (actual_run_size > small_maxrun) 3513 small_maxrun = actual_run_size; 3514 3515 assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs 3516 * bin_info->reg_interval) + pad_size == bin_info->run_size); 3517 } 3518 3519 static void 3520 bin_info_init(void) 3521 { 3522 arena_bin_info_t *bin_info; 3523 3524 #define BIN_INFO_INIT_bin_yes(index, size) \ 3525 bin_info = &arena_bin_info[index]; \ 3526 bin_info->reg_size = size; \ 3527 bin_info_run_size_calc(bin_info); \ 3528 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); 3529 #define BIN_INFO_INIT_bin_no(index, size) 3530 #define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ 3531 BIN_INFO_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)) 3532 SIZE_CLASSES 3533 #undef BIN_INFO_INIT_bin_yes 3534 #undef BIN_INFO_INIT_bin_no 3535 #undef SC 3536 } 3537 3538 static bool 3539 small_run_size_init(void) 3540 { 3541 3542 assert(small_maxrun != 0); 3543 3544 small_run_tab = (bool *)base_alloc(sizeof(bool) * (small_maxrun >> 3545 LG_PAGE)); 3546 if (small_run_tab == NULL) 3547 return (true); 3548 3549 #define TAB_INIT_bin_yes(index, size) { \ 3550 arena_bin_info_t *bin_info = &arena_bin_info[index]; \ 3551 small_run_tab[bin_info->run_size >> LG_PAGE] = true; \ 3552 } 3553 #define TAB_INIT_bin_no(index, size) 3554 #define SC(index, lg_grp, lg_delta, ndelta, bin, lg_delta_lookup) \ 3555 TAB_INIT_bin_##bin(index, (ZU(1)<<lg_grp) + (ZU(ndelta)<<lg_delta)) 3556 SIZE_CLASSES 3557 #undef TAB_INIT_bin_yes 3558 #undef TAB_INIT_bin_no 3559 #undef SC 3560 3561 return (false); 3562 } 3563 3564 static bool 3565 run_quantize_init(void) 3566 { 3567 unsigned i; 3568 3569 run_quantize_max = chunksize + large_pad; 3570 3571 run_quantize_floor_tab = (size_t *)base_alloc(sizeof(size_t) * 3572 (run_quantize_max >> LG_PAGE)); 3573 if (run_quantize_floor_tab == NULL) 3574 return (true); 3575 3576 run_quantize_ceil_tab = (size_t *)base_alloc(sizeof(size_t) * 3577 (run_quantize_max >> LG_PAGE)); 3578 if (run_quantize_ceil_tab == NULL) 3579 return (true); 3580 3581 for (i = 1; i <= run_quantize_max >> LG_PAGE; i++) { 3582 size_t run_size = i << LG_PAGE; 3583 3584 run_quantize_floor_tab[i-1] = 3585 run_quantize_floor_compute(run_size); 3586 run_quantize_ceil_tab[i-1] = 3587 run_quantize_ceil_compute(run_size); 3588 } 3589 3590 return (false); 3591 } 3592 3593 bool 3594 arena_boot(void) 3595 { 3596 unsigned i; 3597 3598 arena_lg_dirty_mult_default_set(opt_lg_dirty_mult); 3599 arena_decay_time_default_set(opt_decay_time); 3600 3601 /* 3602 * Compute the header size such that it is large enough to contain the 3603 * page map. The page map is biased to omit entries for the header 3604 * itself, so some iteration is necessary to compute the map bias. 3605 * 3606 * 1) Compute safe header_size and map_bias values that include enough 3607 * space for an unbiased page map. 3608 * 2) Refine map_bias based on (1) to omit the header pages in the page 3609 * map. The resulting map_bias may be one too small. 3610 * 3) Refine map_bias based on (2). The result will be >= the result 3611 * from (2), and will always be correct. 3612 */ 3613 map_bias = 0; 3614 for (i = 0; i < 3; i++) { 3615 size_t header_size = offsetof(arena_chunk_t, map_bits) + 3616 ((sizeof(arena_chunk_map_bits_t) + 3617 sizeof(arena_chunk_map_misc_t)) * (chunk_npages-map_bias)); 3618 map_bias = (header_size + PAGE_MASK) >> LG_PAGE; 3619 } 3620 assert(map_bias > 0); 3621 3622 map_misc_offset = offsetof(arena_chunk_t, map_bits) + 3623 sizeof(arena_chunk_map_bits_t) * (chunk_npages-map_bias); 3624 3625 arena_maxrun = chunksize - (map_bias << LG_PAGE); 3626 assert(arena_maxrun > 0); 3627 large_maxclass = index2size(size2index(chunksize)-1); 3628 if (large_maxclass > arena_maxrun) { 3629 /* 3630 * For small chunk sizes it's possible for there to be fewer 3631 * non-header pages available than are necessary to serve the 3632 * size classes just below chunksize. 3633 */ 3634 large_maxclass = arena_maxrun; 3635 } 3636 assert(large_maxclass > 0); 3637 nlclasses = size2index(large_maxclass) - size2index(SMALL_MAXCLASS); 3638 nhclasses = NSIZES - nlclasses - NBINS; 3639 3640 bin_info_init(); 3641 if (small_run_size_init()) 3642 return (true); 3643 if (run_quantize_init()) 3644 return (true); 3645 3646 runs_avail_bias = size2index(PAGE); 3647 runs_avail_nclasses = size2index(run_quantize_max)+1 - runs_avail_bias; 3648 3649 return (false); 3650 } 3651 3652 void 3653 arena_prefork(arena_t *arena) 3654 { 3655 unsigned i; 3656 3657 malloc_mutex_prefork(&arena->lock); 3658 malloc_mutex_prefork(&arena->huge_mtx); 3659 malloc_mutex_prefork(&arena->chunks_mtx); 3660 malloc_mutex_prefork(&arena->node_cache_mtx); 3661 for (i = 0; i < NBINS; i++) 3662 malloc_mutex_prefork(&arena->bins[i].lock); 3663 } 3664 3665 void 3666 arena_postfork_parent(arena_t *arena) 3667 { 3668 unsigned i; 3669 3670 for (i = 0; i < NBINS; i++) 3671 malloc_mutex_postfork_parent(&arena->bins[i].lock); 3672 malloc_mutex_postfork_parent(&arena->node_cache_mtx); 3673 malloc_mutex_postfork_parent(&arena->chunks_mtx); 3674 malloc_mutex_postfork_parent(&arena->huge_mtx); 3675 malloc_mutex_postfork_parent(&arena->lock); 3676 } 3677 3678 void 3679 arena_postfork_child(arena_t *arena) 3680 { 3681 unsigned i; 3682 3683 for (i = 0; i < NBINS; i++) 3684 malloc_mutex_postfork_child(&arena->bins[i].lock); 3685 malloc_mutex_postfork_child(&arena->node_cache_mtx); 3686 malloc_mutex_postfork_child(&arena->chunks_mtx); 3687 malloc_mutex_postfork_child(&arena->huge_mtx); 3688 malloc_mutex_postfork_child(&arena->lock); 3689 } 3690