1 #define JEMALLOC_ARENA_C_ 2 #include "jemalloc/internal/jemalloc_internal.h" 3 4 /******************************************************************************/ 5 /* Data. */ 6 7 ssize_t opt_lg_dirty_mult = LG_DIRTY_MULT_DEFAULT; 8 arena_bin_info_t arena_bin_info[NBINS]; 9 10 JEMALLOC_ALIGNED(CACHELINE) 11 const uint8_t small_size2bin[] = { 12 #define S2B_8(i) i, 13 #define S2B_16(i) S2B_8(i) S2B_8(i) 14 #define S2B_32(i) S2B_16(i) S2B_16(i) 15 #define S2B_64(i) S2B_32(i) S2B_32(i) 16 #define S2B_128(i) S2B_64(i) S2B_64(i) 17 #define S2B_256(i) S2B_128(i) S2B_128(i) 18 #define S2B_512(i) S2B_256(i) S2B_256(i) 19 #define S2B_1024(i) S2B_512(i) S2B_512(i) 20 #define S2B_2048(i) S2B_1024(i) S2B_1024(i) 21 #define S2B_4096(i) S2B_2048(i) S2B_2048(i) 22 #define S2B_8192(i) S2B_4096(i) S2B_4096(i) 23 #define SIZE_CLASS(bin, delta, size) \ 24 S2B_##delta(bin) 25 SIZE_CLASSES 26 #undef S2B_8 27 #undef S2B_16 28 #undef S2B_32 29 #undef S2B_64 30 #undef S2B_128 31 #undef S2B_256 32 #undef S2B_512 33 #undef S2B_1024 34 #undef S2B_2048 35 #undef S2B_4096 36 #undef S2B_8192 37 #undef SIZE_CLASS 38 }; 39 40 /******************************************************************************/ 41 /* 42 * Function prototypes for static functions that are referenced prior to 43 * definition. 44 */ 45 46 static void arena_purge(arena_t *arena, bool all); 47 static void arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, 48 bool cleaned); 49 static void arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, 50 arena_run_t *run, arena_bin_t *bin); 51 static void arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, 52 arena_run_t *run, arena_bin_t *bin); 53 54 /******************************************************************************/ 55 56 static inline int 57 arena_run_comp(arena_chunk_map_t *a, arena_chunk_map_t *b) 58 { 59 uintptr_t a_mapelm = (uintptr_t)a; 60 uintptr_t b_mapelm = (uintptr_t)b; 61 62 assert(a != NULL); 63 assert(b != NULL); 64 65 return ((a_mapelm > b_mapelm) - (a_mapelm < b_mapelm)); 66 } 67 68 /* Generate red-black tree functions. */ 69 rb_gen(static UNUSED, arena_run_tree_, arena_run_tree_t, arena_chunk_map_t, 70 u.rb_link, arena_run_comp) 71 72 static inline int 73 arena_avail_comp(arena_chunk_map_t *a, arena_chunk_map_t *b) 74 { 75 int ret; 76 size_t a_size = a->bits & ~PAGE_MASK; 77 size_t b_size = b->bits & ~PAGE_MASK; 78 79 ret = (a_size > b_size) - (a_size < b_size); 80 if (ret == 0) { 81 uintptr_t a_mapelm, b_mapelm; 82 83 if ((a->bits & CHUNK_MAP_KEY) != CHUNK_MAP_KEY) 84 a_mapelm = (uintptr_t)a; 85 else { 86 /* 87 * Treat keys as though they are lower than anything 88 * else. 89 */ 90 a_mapelm = 0; 91 } 92 b_mapelm = (uintptr_t)b; 93 94 ret = (a_mapelm > b_mapelm) - (a_mapelm < b_mapelm); 95 } 96 97 return (ret); 98 } 99 100 /* Generate red-black tree functions. */ 101 rb_gen(static UNUSED, arena_avail_tree_, arena_avail_tree_t, arena_chunk_map_t, 102 u.rb_link, arena_avail_comp) 103 104 static inline int 105 arena_chunk_dirty_comp(arena_chunk_t *a, arena_chunk_t *b) 106 { 107 108 assert(a != NULL); 109 assert(b != NULL); 110 111 /* 112 * Short-circuit for self comparison. The following comparison code 113 * would come to the same result, but at the cost of executing the slow 114 * path. 115 */ 116 if (a == b) 117 return (0); 118 119 /* 120 * Order such that chunks with higher fragmentation are "less than" 121 * those with lower fragmentation -- purging order is from "least" to 122 * "greatest". Fragmentation is measured as: 123 * 124 * mean current avail run size 125 * -------------------------------- 126 * mean defragmented avail run size 127 * 128 * navail 129 * ----------- 130 * nruns_avail nruns_avail-nruns_adjac 131 * = ========================= = ----------------------- 132 * navail nruns_avail 133 * ----------------------- 134 * nruns_avail-nruns_adjac 135 * 136 * The following code multiplies away the denominator prior to 137 * comparison, in order to avoid division. 138 * 139 */ 140 { 141 size_t a_val = (a->nruns_avail - a->nruns_adjac) * 142 b->nruns_avail; 143 size_t b_val = (b->nruns_avail - b->nruns_adjac) * 144 a->nruns_avail; 145 146 if (a_val < b_val) 147 return (1); 148 if (a_val > b_val) 149 return (-1); 150 } 151 /* 152 * Break ties by chunk address. For fragmented chunks, report lower 153 * addresses as "lower", so that fragmentation reduction happens first 154 * at lower addresses. However, use the opposite ordering for 155 * unfragmented chunks, in order to increase the chances of 156 * re-allocating dirty runs. 157 */ 158 { 159 uintptr_t a_chunk = (uintptr_t)a; 160 uintptr_t b_chunk = (uintptr_t)b; 161 int ret = ((a_chunk > b_chunk) - (a_chunk < b_chunk)); 162 if (a->nruns_adjac == 0) { 163 assert(b->nruns_adjac == 0); 164 ret = -ret; 165 } 166 return (ret); 167 } 168 } 169 170 /* Generate red-black tree functions. */ 171 rb_gen(static UNUSED, arena_chunk_dirty_, arena_chunk_tree_t, arena_chunk_t, 172 dirty_link, arena_chunk_dirty_comp) 173 174 static inline bool 175 arena_avail_adjac_pred(arena_chunk_t *chunk, size_t pageind) 176 { 177 bool ret; 178 179 if (pageind-1 < map_bias) 180 ret = false; 181 else { 182 ret = (arena_mapbits_allocated_get(chunk, pageind-1) == 0); 183 assert(ret == false || arena_mapbits_dirty_get(chunk, 184 pageind-1) != arena_mapbits_dirty_get(chunk, pageind)); 185 } 186 return (ret); 187 } 188 189 static inline bool 190 arena_avail_adjac_succ(arena_chunk_t *chunk, size_t pageind, size_t npages) 191 { 192 bool ret; 193 194 if (pageind+npages == chunk_npages) 195 ret = false; 196 else { 197 assert(pageind+npages < chunk_npages); 198 ret = (arena_mapbits_allocated_get(chunk, pageind+npages) == 0); 199 assert(ret == false || arena_mapbits_dirty_get(chunk, pageind) 200 != arena_mapbits_dirty_get(chunk, pageind+npages)); 201 } 202 return (ret); 203 } 204 205 static inline bool 206 arena_avail_adjac(arena_chunk_t *chunk, size_t pageind, size_t npages) 207 { 208 209 return (arena_avail_adjac_pred(chunk, pageind) || 210 arena_avail_adjac_succ(chunk, pageind, npages)); 211 } 212 213 static void 214 arena_avail_insert(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 215 size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ) 216 { 217 218 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 219 LG_PAGE)); 220 221 /* 222 * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be 223 * removed and reinserted even if the run to be inserted is clean. 224 */ 225 if (chunk->ndirty != 0) 226 arena_chunk_dirty_remove(&arena->chunks_dirty, chunk); 227 228 if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind)) 229 chunk->nruns_adjac++; 230 if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages)) 231 chunk->nruns_adjac++; 232 chunk->nruns_avail++; 233 assert(chunk->nruns_avail > chunk->nruns_adjac); 234 235 if (arena_mapbits_dirty_get(chunk, pageind) != 0) { 236 arena->ndirty += npages; 237 chunk->ndirty += npages; 238 } 239 if (chunk->ndirty != 0) 240 arena_chunk_dirty_insert(&arena->chunks_dirty, chunk); 241 242 arena_avail_tree_insert(&arena->runs_avail, arena_mapp_get(chunk, 243 pageind)); 244 } 245 246 static void 247 arena_avail_remove(arena_t *arena, arena_chunk_t *chunk, size_t pageind, 248 size_t npages, bool maybe_adjac_pred, bool maybe_adjac_succ) 249 { 250 251 assert(npages == (arena_mapbits_unallocated_size_get(chunk, pageind) >> 252 LG_PAGE)); 253 254 /* 255 * chunks_dirty is keyed by nruns_{avail,adjac}, so the chunk must be 256 * removed and reinserted even if the run to be removed is clean. 257 */ 258 if (chunk->ndirty != 0) 259 arena_chunk_dirty_remove(&arena->chunks_dirty, chunk); 260 261 if (maybe_adjac_pred && arena_avail_adjac_pred(chunk, pageind)) 262 chunk->nruns_adjac--; 263 if (maybe_adjac_succ && arena_avail_adjac_succ(chunk, pageind, npages)) 264 chunk->nruns_adjac--; 265 chunk->nruns_avail--; 266 assert(chunk->nruns_avail > chunk->nruns_adjac || (chunk->nruns_avail 267 == 0 && chunk->nruns_adjac == 0)); 268 269 if (arena_mapbits_dirty_get(chunk, pageind) != 0) { 270 arena->ndirty -= npages; 271 chunk->ndirty -= npages; 272 } 273 if (chunk->ndirty != 0) 274 arena_chunk_dirty_insert(&arena->chunks_dirty, chunk); 275 276 arena_avail_tree_remove(&arena->runs_avail, arena_mapp_get(chunk, 277 pageind)); 278 } 279 280 static inline void * 281 arena_run_reg_alloc(arena_run_t *run, arena_bin_info_t *bin_info) 282 { 283 void *ret; 284 unsigned regind; 285 bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + 286 (uintptr_t)bin_info->bitmap_offset); 287 288 assert(run->nfree > 0); 289 assert(bitmap_full(bitmap, &bin_info->bitmap_info) == false); 290 291 regind = bitmap_sfu(bitmap, &bin_info->bitmap_info); 292 ret = (void *)((uintptr_t)run + (uintptr_t)bin_info->reg0_offset + 293 (uintptr_t)(bin_info->reg_interval * regind)); 294 run->nfree--; 295 if (regind == run->nextind) 296 run->nextind++; 297 assert(regind < run->nextind); 298 return (ret); 299 } 300 301 static inline void 302 arena_run_reg_dalloc(arena_run_t *run, void *ptr) 303 { 304 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 305 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 306 size_t mapbits = arena_mapbits_get(chunk, pageind); 307 size_t binind = arena_ptr_small_binind_get(ptr, mapbits); 308 arena_bin_info_t *bin_info = &arena_bin_info[binind]; 309 unsigned regind = arena_run_regind(run, bin_info, ptr); 310 bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + 311 (uintptr_t)bin_info->bitmap_offset); 312 313 assert(run->nfree < bin_info->nregs); 314 /* Freeing an interior pointer can cause assertion failure. */ 315 assert(((uintptr_t)ptr - ((uintptr_t)run + 316 (uintptr_t)bin_info->reg0_offset)) % 317 (uintptr_t)bin_info->reg_interval == 0); 318 assert((uintptr_t)ptr >= (uintptr_t)run + 319 (uintptr_t)bin_info->reg0_offset); 320 /* Freeing an unallocated pointer can cause assertion failure. */ 321 assert(bitmap_get(bitmap, &bin_info->bitmap_info, regind)); 322 323 bitmap_unset(bitmap, &bin_info->bitmap_info, regind); 324 run->nfree++; 325 } 326 327 static inline void 328 arena_run_zero(arena_chunk_t *chunk, size_t run_ind, size_t npages) 329 { 330 331 VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + (run_ind << 332 LG_PAGE)), (npages << LG_PAGE)); 333 memset((void *)((uintptr_t)chunk + (run_ind << LG_PAGE)), 0, 334 (npages << LG_PAGE)); 335 } 336 337 static inline void 338 arena_run_page_mark_zeroed(arena_chunk_t *chunk, size_t run_ind) 339 { 340 341 VALGRIND_MAKE_MEM_DEFINED((void *)((uintptr_t)chunk + (run_ind << 342 LG_PAGE)), PAGE); 343 } 344 345 static inline void 346 arena_run_page_validate_zeroed(arena_chunk_t *chunk, size_t run_ind) 347 { 348 size_t i; 349 UNUSED size_t *p = (size_t *)((uintptr_t)chunk + (run_ind << LG_PAGE)); 350 351 arena_run_page_mark_zeroed(chunk, run_ind); 352 for (i = 0; i < PAGE / sizeof(size_t); i++) 353 assert(p[i] == 0); 354 } 355 356 static void 357 arena_cactive_update(arena_t *arena, size_t add_pages, size_t sub_pages) 358 { 359 360 if (config_stats) { 361 ssize_t cactive_diff = CHUNK_CEILING((arena->nactive + 362 add_pages) << LG_PAGE) - CHUNK_CEILING((arena->nactive - 363 sub_pages) << LG_PAGE); 364 if (cactive_diff != 0) 365 stats_cactive_add(cactive_diff); 366 } 367 } 368 369 static void 370 arena_run_split_remove(arena_t *arena, arena_chunk_t *chunk, size_t run_ind, 371 size_t flag_dirty, size_t need_pages) 372 { 373 size_t total_pages, rem_pages; 374 375 total_pages = arena_mapbits_unallocated_size_get(chunk, run_ind) >> 376 LG_PAGE; 377 assert(arena_mapbits_dirty_get(chunk, run_ind+total_pages-1) == 378 flag_dirty); 379 assert(need_pages <= total_pages); 380 rem_pages = total_pages - need_pages; 381 382 arena_avail_remove(arena, chunk, run_ind, total_pages, true, true); 383 arena_cactive_update(arena, need_pages, 0); 384 arena->nactive += need_pages; 385 386 /* Keep track of trailing unused pages for later use. */ 387 if (rem_pages > 0) { 388 if (flag_dirty != 0) { 389 arena_mapbits_unallocated_set(chunk, 390 run_ind+need_pages, (rem_pages << LG_PAGE), 391 flag_dirty); 392 arena_mapbits_unallocated_set(chunk, 393 run_ind+total_pages-1, (rem_pages << LG_PAGE), 394 flag_dirty); 395 } else { 396 arena_mapbits_unallocated_set(chunk, run_ind+need_pages, 397 (rem_pages << LG_PAGE), 398 arena_mapbits_unzeroed_get(chunk, 399 run_ind+need_pages)); 400 arena_mapbits_unallocated_set(chunk, 401 run_ind+total_pages-1, (rem_pages << LG_PAGE), 402 arena_mapbits_unzeroed_get(chunk, 403 run_ind+total_pages-1)); 404 } 405 arena_avail_insert(arena, chunk, run_ind+need_pages, rem_pages, 406 false, true); 407 } 408 } 409 410 static void 411 arena_run_split_large_helper(arena_t *arena, arena_run_t *run, size_t size, 412 bool remove, bool zero) 413 { 414 arena_chunk_t *chunk; 415 size_t flag_dirty, run_ind, need_pages, i; 416 417 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 418 run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); 419 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); 420 need_pages = (size >> LG_PAGE); 421 assert(need_pages > 0); 422 423 if (remove) { 424 arena_run_split_remove(arena, chunk, run_ind, flag_dirty, 425 need_pages); 426 } 427 428 if (zero) { 429 if (flag_dirty == 0) { 430 /* 431 * The run is clean, so some pages may be zeroed (i.e. 432 * never before touched). 433 */ 434 for (i = 0; i < need_pages; i++) { 435 if (arena_mapbits_unzeroed_get(chunk, run_ind+i) 436 != 0) 437 arena_run_zero(chunk, run_ind+i, 1); 438 else if (config_debug) { 439 arena_run_page_validate_zeroed(chunk, 440 run_ind+i); 441 } else { 442 arena_run_page_mark_zeroed(chunk, 443 run_ind+i); 444 } 445 } 446 } else { 447 /* The run is dirty, so all pages must be zeroed. */ 448 arena_run_zero(chunk, run_ind, need_pages); 449 } 450 } else { 451 VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + 452 (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); 453 } 454 455 /* 456 * Set the last element first, in case the run only contains one page 457 * (i.e. both statements set the same element). 458 */ 459 arena_mapbits_large_set(chunk, run_ind+need_pages-1, 0, flag_dirty); 460 arena_mapbits_large_set(chunk, run_ind, size, flag_dirty); 461 } 462 463 static void 464 arena_run_split_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) 465 { 466 467 arena_run_split_large_helper(arena, run, size, true, zero); 468 } 469 470 static void 471 arena_run_init_large(arena_t *arena, arena_run_t *run, size_t size, bool zero) 472 { 473 474 arena_run_split_large_helper(arena, run, size, false, zero); 475 } 476 477 static void 478 arena_run_split_small(arena_t *arena, arena_run_t *run, size_t size, 479 size_t binind) 480 { 481 arena_chunk_t *chunk; 482 size_t flag_dirty, run_ind, need_pages, i; 483 484 assert(binind != BININD_INVALID); 485 486 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 487 run_ind = (unsigned)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); 488 flag_dirty = arena_mapbits_dirty_get(chunk, run_ind); 489 need_pages = (size >> LG_PAGE); 490 assert(need_pages > 0); 491 492 arena_run_split_remove(arena, chunk, run_ind, flag_dirty, need_pages); 493 494 /* 495 * Propagate the dirty and unzeroed flags to the allocated small run, 496 * so that arena_dalloc_bin_run() has the ability to conditionally trim 497 * clean pages. 498 */ 499 arena_mapbits_small_set(chunk, run_ind, 0, binind, flag_dirty); 500 /* 501 * The first page will always be dirtied during small run 502 * initialization, so a validation failure here would not actually 503 * cause an observable failure. 504 */ 505 if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk, 506 run_ind) == 0) 507 arena_run_page_validate_zeroed(chunk, run_ind); 508 for (i = 1; i < need_pages - 1; i++) { 509 arena_mapbits_small_set(chunk, run_ind+i, i, binind, 0); 510 if (config_debug && flag_dirty == 0 && 511 arena_mapbits_unzeroed_get(chunk, run_ind+i) == 0) 512 arena_run_page_validate_zeroed(chunk, run_ind+i); 513 } 514 arena_mapbits_small_set(chunk, run_ind+need_pages-1, need_pages-1, 515 binind, flag_dirty); 516 if (config_debug && flag_dirty == 0 && arena_mapbits_unzeroed_get(chunk, 517 run_ind+need_pages-1) == 0) 518 arena_run_page_validate_zeroed(chunk, run_ind+need_pages-1); 519 VALGRIND_MAKE_MEM_UNDEFINED((void *)((uintptr_t)chunk + 520 (run_ind << LG_PAGE)), (need_pages << LG_PAGE)); 521 } 522 523 static arena_chunk_t * 524 arena_chunk_init_spare(arena_t *arena) 525 { 526 arena_chunk_t *chunk; 527 528 assert(arena->spare != NULL); 529 530 chunk = arena->spare; 531 arena->spare = NULL; 532 533 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); 534 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); 535 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == 536 arena_maxclass); 537 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == 538 arena_maxclass); 539 assert(arena_mapbits_dirty_get(chunk, map_bias) == 540 arena_mapbits_dirty_get(chunk, chunk_npages-1)); 541 542 return (chunk); 543 } 544 545 static arena_chunk_t * 546 arena_chunk_init_hard(arena_t *arena) 547 { 548 arena_chunk_t *chunk; 549 bool zero; 550 size_t unzeroed, i; 551 552 assert(arena->spare == NULL); 553 554 zero = false; 555 malloc_mutex_unlock(&arena->lock); 556 chunk = (arena_chunk_t *)chunk_alloc(chunksize, chunksize, false, 557 &zero, arena->dss_prec); 558 malloc_mutex_lock(&arena->lock); 559 if (chunk == NULL) 560 return (NULL); 561 if (config_stats) 562 arena->stats.mapped += chunksize; 563 564 chunk->arena = arena; 565 566 /* 567 * Claim that no pages are in use, since the header is merely overhead. 568 */ 569 chunk->ndirty = 0; 570 571 chunk->nruns_avail = 0; 572 chunk->nruns_adjac = 0; 573 574 /* 575 * Initialize the map to contain one maximal free untouched run. Mark 576 * the pages as zeroed iff chunk_alloc() returned a zeroed chunk. 577 */ 578 unzeroed = zero ? 0 : CHUNK_MAP_UNZEROED; 579 arena_mapbits_unallocated_set(chunk, map_bias, arena_maxclass, 580 unzeroed); 581 /* 582 * There is no need to initialize the internal page map entries unless 583 * the chunk is not zeroed. 584 */ 585 if (zero == false) { 586 VALGRIND_MAKE_MEM_UNDEFINED((void *)arena_mapp_get(chunk, 587 map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk, 588 chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk, 589 map_bias+1))); 590 for (i = map_bias+1; i < chunk_npages-1; i++) 591 arena_mapbits_unzeroed_set(chunk, i, unzeroed); 592 } else { 593 VALGRIND_MAKE_MEM_DEFINED((void *)arena_mapp_get(chunk, 594 map_bias+1), (size_t)((uintptr_t) arena_mapp_get(chunk, 595 chunk_npages-1) - (uintptr_t)arena_mapp_get(chunk, 596 map_bias+1))); 597 if (config_debug) { 598 for (i = map_bias+1; i < chunk_npages-1; i++) { 599 assert(arena_mapbits_unzeroed_get(chunk, i) == 600 unzeroed); 601 } 602 } 603 } 604 arena_mapbits_unallocated_set(chunk, chunk_npages-1, arena_maxclass, 605 unzeroed); 606 607 return (chunk); 608 } 609 610 static arena_chunk_t * 611 arena_chunk_alloc(arena_t *arena) 612 { 613 arena_chunk_t *chunk; 614 615 if (arena->spare != NULL) 616 chunk = arena_chunk_init_spare(arena); 617 else { 618 chunk = arena_chunk_init_hard(arena); 619 if (chunk == NULL) 620 return (NULL); 621 } 622 623 /* Insert the run into the runs_avail tree. */ 624 arena_avail_insert(arena, chunk, map_bias, chunk_npages-map_bias, 625 false, false); 626 627 return (chunk); 628 } 629 630 static void 631 arena_chunk_dealloc(arena_t *arena, arena_chunk_t *chunk) 632 { 633 assert(arena_mapbits_allocated_get(chunk, map_bias) == 0); 634 assert(arena_mapbits_allocated_get(chunk, chunk_npages-1) == 0); 635 assert(arena_mapbits_unallocated_size_get(chunk, map_bias) == 636 arena_maxclass); 637 assert(arena_mapbits_unallocated_size_get(chunk, chunk_npages-1) == 638 arena_maxclass); 639 assert(arena_mapbits_dirty_get(chunk, map_bias) == 640 arena_mapbits_dirty_get(chunk, chunk_npages-1)); 641 642 /* 643 * Remove run from the runs_avail tree, so that the arena does not use 644 * it. 645 */ 646 arena_avail_remove(arena, chunk, map_bias, chunk_npages-map_bias, 647 false, false); 648 649 if (arena->spare != NULL) { 650 arena_chunk_t *spare = arena->spare; 651 652 arena->spare = chunk; 653 malloc_mutex_unlock(&arena->lock); 654 chunk_dealloc((void *)spare, chunksize, true); 655 malloc_mutex_lock(&arena->lock); 656 if (config_stats) 657 arena->stats.mapped -= chunksize; 658 } else 659 arena->spare = chunk; 660 } 661 662 static arena_run_t * 663 arena_run_alloc_large_helper(arena_t *arena, size_t size, bool zero) 664 { 665 arena_run_t *run; 666 arena_chunk_map_t *mapelm, key; 667 668 key.bits = size | CHUNK_MAP_KEY; 669 mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key); 670 if (mapelm != NULL) { 671 arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm); 672 size_t pageind = (((uintptr_t)mapelm - 673 (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t)) 674 + map_bias; 675 676 run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << 677 LG_PAGE)); 678 arena_run_split_large(arena, run, size, zero); 679 return (run); 680 } 681 682 return (NULL); 683 } 684 685 static arena_run_t * 686 arena_run_alloc_large(arena_t *arena, size_t size, bool zero) 687 { 688 arena_chunk_t *chunk; 689 arena_run_t *run; 690 691 assert(size <= arena_maxclass); 692 assert((size & PAGE_MASK) == 0); 693 694 /* Search the arena's chunks for the lowest best fit. */ 695 run = arena_run_alloc_large_helper(arena, size, zero); 696 if (run != NULL) 697 return (run); 698 699 /* 700 * No usable runs. Create a new chunk from which to allocate the run. 701 */ 702 chunk = arena_chunk_alloc(arena); 703 if (chunk != NULL) { 704 run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE)); 705 arena_run_split_large(arena, run, size, zero); 706 return (run); 707 } 708 709 /* 710 * arena_chunk_alloc() failed, but another thread may have made 711 * sufficient memory available while this one dropped arena->lock in 712 * arena_chunk_alloc(), so search one more time. 713 */ 714 return (arena_run_alloc_large_helper(arena, size, zero)); 715 } 716 717 static arena_run_t * 718 arena_run_alloc_small_helper(arena_t *arena, size_t size, size_t binind) 719 { 720 arena_run_t *run; 721 arena_chunk_map_t *mapelm, key; 722 723 key.bits = size | CHUNK_MAP_KEY; 724 mapelm = arena_avail_tree_nsearch(&arena->runs_avail, &key); 725 if (mapelm != NULL) { 726 arena_chunk_t *run_chunk = CHUNK_ADDR2BASE(mapelm); 727 size_t pageind = (((uintptr_t)mapelm - 728 (uintptr_t)run_chunk->map) / sizeof(arena_chunk_map_t)) 729 + map_bias; 730 731 run = (arena_run_t *)((uintptr_t)run_chunk + (pageind << 732 LG_PAGE)); 733 arena_run_split_small(arena, run, size, binind); 734 return (run); 735 } 736 737 return (NULL); 738 } 739 740 static arena_run_t * 741 arena_run_alloc_small(arena_t *arena, size_t size, size_t binind) 742 { 743 arena_chunk_t *chunk; 744 arena_run_t *run; 745 746 assert(size <= arena_maxclass); 747 assert((size & PAGE_MASK) == 0); 748 assert(binind != BININD_INVALID); 749 750 /* Search the arena's chunks for the lowest best fit. */ 751 run = arena_run_alloc_small_helper(arena, size, binind); 752 if (run != NULL) 753 return (run); 754 755 /* 756 * No usable runs. Create a new chunk from which to allocate the run. 757 */ 758 chunk = arena_chunk_alloc(arena); 759 if (chunk != NULL) { 760 run = (arena_run_t *)((uintptr_t)chunk + (map_bias << LG_PAGE)); 761 arena_run_split_small(arena, run, size, binind); 762 return (run); 763 } 764 765 /* 766 * arena_chunk_alloc() failed, but another thread may have made 767 * sufficient memory available while this one dropped arena->lock in 768 * arena_chunk_alloc(), so search one more time. 769 */ 770 return (arena_run_alloc_small_helper(arena, size, binind)); 771 } 772 773 static inline void 774 arena_maybe_purge(arena_t *arena) 775 { 776 size_t npurgeable, threshold; 777 778 /* Don't purge if the option is disabled. */ 779 if (opt_lg_dirty_mult < 0) 780 return; 781 /* Don't purge if all dirty pages are already being purged. */ 782 if (arena->ndirty <= arena->npurgatory) 783 return; 784 npurgeable = arena->ndirty - arena->npurgatory; 785 threshold = (arena->nactive >> opt_lg_dirty_mult); 786 /* 787 * Don't purge unless the number of purgeable pages exceeds the 788 * threshold. 789 */ 790 if (npurgeable <= threshold) 791 return; 792 793 arena_purge(arena, false); 794 } 795 796 static arena_chunk_t * 797 chunks_dirty_iter_cb(arena_chunk_tree_t *tree, arena_chunk_t *chunk, void *arg) 798 { 799 size_t *ndirty = (size_t *)arg; 800 801 assert(chunk->ndirty != 0); 802 *ndirty += chunk->ndirty; 803 return (NULL); 804 } 805 806 static size_t 807 arena_compute_npurgatory(arena_t *arena, bool all) 808 { 809 size_t npurgatory, npurgeable; 810 811 /* 812 * Compute the minimum number of pages that this thread should try to 813 * purge. 814 */ 815 npurgeable = arena->ndirty - arena->npurgatory; 816 817 if (all == false) { 818 size_t threshold = (arena->nactive >> opt_lg_dirty_mult); 819 820 npurgatory = npurgeable - threshold; 821 } else 822 npurgatory = npurgeable; 823 824 return (npurgatory); 825 } 826 827 static void 828 arena_chunk_stash_dirty(arena_t *arena, arena_chunk_t *chunk, bool all, 829 arena_chunk_mapelms_t *mapelms) 830 { 831 size_t pageind, npages; 832 833 /* 834 * Temporarily allocate free dirty runs within chunk. If all is false, 835 * only operate on dirty runs that are fragments; otherwise operate on 836 * all dirty runs. 837 */ 838 for (pageind = map_bias; pageind < chunk_npages; pageind += npages) { 839 arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind); 840 if (arena_mapbits_allocated_get(chunk, pageind) == 0) { 841 size_t run_size = 842 arena_mapbits_unallocated_size_get(chunk, pageind); 843 844 npages = run_size >> LG_PAGE; 845 assert(pageind + npages <= chunk_npages); 846 assert(arena_mapbits_dirty_get(chunk, pageind) == 847 arena_mapbits_dirty_get(chunk, pageind+npages-1)); 848 849 if (arena_mapbits_dirty_get(chunk, pageind) != 0 && 850 (all || arena_avail_adjac(chunk, pageind, 851 npages))) { 852 arena_run_t *run = (arena_run_t *)((uintptr_t) 853 chunk + (uintptr_t)(pageind << LG_PAGE)); 854 855 arena_run_split_large(arena, run, run_size, 856 false); 857 /* Append to list for later processing. */ 858 ql_elm_new(mapelm, u.ql_link); 859 ql_tail_insert(mapelms, mapelm, u.ql_link); 860 } 861 } else { 862 /* Skip run. */ 863 if (arena_mapbits_large_get(chunk, pageind) != 0) { 864 npages = arena_mapbits_large_size_get(chunk, 865 pageind) >> LG_PAGE; 866 } else { 867 size_t binind; 868 arena_bin_info_t *bin_info; 869 arena_run_t *run = (arena_run_t *)((uintptr_t) 870 chunk + (uintptr_t)(pageind << LG_PAGE)); 871 872 assert(arena_mapbits_small_runind_get(chunk, 873 pageind) == 0); 874 binind = arena_bin_index(arena, run->bin); 875 bin_info = &arena_bin_info[binind]; 876 npages = bin_info->run_size >> LG_PAGE; 877 } 878 } 879 } 880 assert(pageind == chunk_npages); 881 assert(chunk->ndirty == 0 || all == false); 882 assert(chunk->nruns_adjac == 0); 883 } 884 885 static size_t 886 arena_chunk_purge_stashed(arena_t *arena, arena_chunk_t *chunk, 887 arena_chunk_mapelms_t *mapelms) 888 { 889 size_t npurged, pageind, npages, nmadvise; 890 arena_chunk_map_t *mapelm; 891 892 malloc_mutex_unlock(&arena->lock); 893 if (config_stats) 894 nmadvise = 0; 895 npurged = 0; 896 ql_foreach(mapelm, mapelms, u.ql_link) { 897 bool unzeroed; 898 size_t flag_unzeroed, i; 899 900 pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) / 901 sizeof(arena_chunk_map_t)) + map_bias; 902 npages = arena_mapbits_large_size_get(chunk, pageind) >> 903 LG_PAGE; 904 assert(pageind + npages <= chunk_npages); 905 unzeroed = pages_purge((void *)((uintptr_t)chunk + (pageind << 906 LG_PAGE)), (npages << LG_PAGE)); 907 flag_unzeroed = unzeroed ? CHUNK_MAP_UNZEROED : 0; 908 /* 909 * Set the unzeroed flag for all pages, now that pages_purge() 910 * has returned whether the pages were zeroed as a side effect 911 * of purging. This chunk map modification is safe even though 912 * the arena mutex isn't currently owned by this thread, 913 * because the run is marked as allocated, thus protecting it 914 * from being modified by any other thread. As long as these 915 * writes don't perturb the first and last elements' 916 * CHUNK_MAP_ALLOCATED bits, behavior is well defined. 917 */ 918 for (i = 0; i < npages; i++) { 919 arena_mapbits_unzeroed_set(chunk, pageind+i, 920 flag_unzeroed); 921 } 922 npurged += npages; 923 if (config_stats) 924 nmadvise++; 925 } 926 malloc_mutex_lock(&arena->lock); 927 if (config_stats) 928 arena->stats.nmadvise += nmadvise; 929 930 return (npurged); 931 } 932 933 static void 934 arena_chunk_unstash_purged(arena_t *arena, arena_chunk_t *chunk, 935 arena_chunk_mapelms_t *mapelms) 936 { 937 arena_chunk_map_t *mapelm; 938 size_t pageind; 939 940 /* Deallocate runs. */ 941 for (mapelm = ql_first(mapelms); mapelm != NULL; 942 mapelm = ql_first(mapelms)) { 943 arena_run_t *run; 944 945 pageind = (((uintptr_t)mapelm - (uintptr_t)chunk->map) / 946 sizeof(arena_chunk_map_t)) + map_bias; 947 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)(pageind << 948 LG_PAGE)); 949 ql_remove(mapelms, mapelm, u.ql_link); 950 arena_run_dalloc(arena, run, false, true); 951 } 952 } 953 954 static inline size_t 955 arena_chunk_purge(arena_t *arena, arena_chunk_t *chunk, bool all) 956 { 957 size_t npurged; 958 arena_chunk_mapelms_t mapelms; 959 960 ql_new(&mapelms); 961 962 /* 963 * If chunk is the spare, temporarily re-allocate it, 1) so that its 964 * run is reinserted into runs_avail, and 2) so that it cannot be 965 * completely discarded by another thread while arena->lock is dropped 966 * by this thread. Note that the arena_run_dalloc() call will 967 * implicitly deallocate the chunk, so no explicit action is required 968 * in this function to deallocate the chunk. 969 * 970 * Note that once a chunk contains dirty pages, it cannot again contain 971 * a single run unless 1) it is a dirty run, or 2) this function purges 972 * dirty pages and causes the transition to a single clean run. Thus 973 * (chunk == arena->spare) is possible, but it is not possible for 974 * this function to be called on the spare unless it contains a dirty 975 * run. 976 */ 977 if (chunk == arena->spare) { 978 assert(arena_mapbits_dirty_get(chunk, map_bias) != 0); 979 assert(arena_mapbits_dirty_get(chunk, chunk_npages-1) != 0); 980 981 arena_chunk_alloc(arena); 982 } 983 984 if (config_stats) 985 arena->stats.purged += chunk->ndirty; 986 987 /* 988 * Operate on all dirty runs if there is no clean/dirty run 989 * fragmentation. 990 */ 991 if (chunk->nruns_adjac == 0) 992 all = true; 993 994 arena_chunk_stash_dirty(arena, chunk, all, &mapelms); 995 npurged = arena_chunk_purge_stashed(arena, chunk, &mapelms); 996 arena_chunk_unstash_purged(arena, chunk, &mapelms); 997 998 return (npurged); 999 } 1000 1001 static void 1002 arena_purge(arena_t *arena, bool all) 1003 { 1004 arena_chunk_t *chunk; 1005 size_t npurgatory; 1006 if (config_debug) { 1007 size_t ndirty = 0; 1008 1009 arena_chunk_dirty_iter(&arena->chunks_dirty, NULL, 1010 chunks_dirty_iter_cb, (void *)&ndirty); 1011 assert(ndirty == arena->ndirty); 1012 } 1013 assert(arena->ndirty > arena->npurgatory || all); 1014 assert((arena->nactive >> opt_lg_dirty_mult) < (arena->ndirty - 1015 arena->npurgatory) || all); 1016 1017 if (config_stats) 1018 arena->stats.npurge++; 1019 1020 /* 1021 * Add the minimum number of pages this thread should try to purge to 1022 * arena->npurgatory. This will keep multiple threads from racing to 1023 * reduce ndirty below the threshold. 1024 */ 1025 npurgatory = arena_compute_npurgatory(arena, all); 1026 arena->npurgatory += npurgatory; 1027 1028 while (npurgatory > 0) { 1029 size_t npurgeable, npurged, nunpurged; 1030 1031 /* Get next chunk with dirty pages. */ 1032 chunk = arena_chunk_dirty_first(&arena->chunks_dirty); 1033 if (chunk == NULL) { 1034 /* 1035 * This thread was unable to purge as many pages as 1036 * originally intended, due to races with other threads 1037 * that either did some of the purging work, or re-used 1038 * dirty pages. 1039 */ 1040 arena->npurgatory -= npurgatory; 1041 return; 1042 } 1043 npurgeable = chunk->ndirty; 1044 assert(npurgeable != 0); 1045 1046 if (npurgeable > npurgatory && chunk->nruns_adjac == 0) { 1047 /* 1048 * This thread will purge all the dirty pages in chunk, 1049 * so set npurgatory to reflect this thread's intent to 1050 * purge the pages. This tends to reduce the chances 1051 * of the following scenario: 1052 * 1053 * 1) This thread sets arena->npurgatory such that 1054 * (arena->ndirty - arena->npurgatory) is at the 1055 * threshold. 1056 * 2) This thread drops arena->lock. 1057 * 3) Another thread causes one or more pages to be 1058 * dirtied, and immediately determines that it must 1059 * purge dirty pages. 1060 * 1061 * If this scenario *does* play out, that's okay, 1062 * because all of the purging work being done really 1063 * needs to happen. 1064 */ 1065 arena->npurgatory += npurgeable - npurgatory; 1066 npurgatory = npurgeable; 1067 } 1068 1069 /* 1070 * Keep track of how many pages are purgeable, versus how many 1071 * actually get purged, and adjust counters accordingly. 1072 */ 1073 arena->npurgatory -= npurgeable; 1074 npurgatory -= npurgeable; 1075 npurged = arena_chunk_purge(arena, chunk, all); 1076 nunpurged = npurgeable - npurged; 1077 arena->npurgatory += nunpurged; 1078 npurgatory += nunpurged; 1079 } 1080 } 1081 1082 void 1083 arena_purge_all(arena_t *arena) 1084 { 1085 1086 malloc_mutex_lock(&arena->lock); 1087 arena_purge(arena, true); 1088 malloc_mutex_unlock(&arena->lock); 1089 } 1090 1091 static void 1092 arena_run_coalesce(arena_t *arena, arena_chunk_t *chunk, size_t *p_size, 1093 size_t *p_run_ind, size_t *p_run_pages, size_t flag_dirty) 1094 { 1095 size_t size = *p_size; 1096 size_t run_ind = *p_run_ind; 1097 size_t run_pages = *p_run_pages; 1098 1099 /* Try to coalesce forward. */ 1100 if (run_ind + run_pages < chunk_npages && 1101 arena_mapbits_allocated_get(chunk, run_ind+run_pages) == 0 && 1102 arena_mapbits_dirty_get(chunk, run_ind+run_pages) == flag_dirty) { 1103 size_t nrun_size = arena_mapbits_unallocated_size_get(chunk, 1104 run_ind+run_pages); 1105 size_t nrun_pages = nrun_size >> LG_PAGE; 1106 1107 /* 1108 * Remove successor from runs_avail; the coalesced run is 1109 * inserted later. 1110 */ 1111 assert(arena_mapbits_unallocated_size_get(chunk, 1112 run_ind+run_pages+nrun_pages-1) == nrun_size); 1113 assert(arena_mapbits_dirty_get(chunk, 1114 run_ind+run_pages+nrun_pages-1) == flag_dirty); 1115 arena_avail_remove(arena, chunk, run_ind+run_pages, nrun_pages, 1116 false, true); 1117 1118 size += nrun_size; 1119 run_pages += nrun_pages; 1120 1121 arena_mapbits_unallocated_size_set(chunk, run_ind, size); 1122 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, 1123 size); 1124 } 1125 1126 /* Try to coalesce backward. */ 1127 if (run_ind > map_bias && arena_mapbits_allocated_get(chunk, 1128 run_ind-1) == 0 && arena_mapbits_dirty_get(chunk, run_ind-1) == 1129 flag_dirty) { 1130 size_t prun_size = arena_mapbits_unallocated_size_get(chunk, 1131 run_ind-1); 1132 size_t prun_pages = prun_size >> LG_PAGE; 1133 1134 run_ind -= prun_pages; 1135 1136 /* 1137 * Remove predecessor from runs_avail; the coalesced run is 1138 * inserted later. 1139 */ 1140 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == 1141 prun_size); 1142 assert(arena_mapbits_dirty_get(chunk, run_ind) == flag_dirty); 1143 arena_avail_remove(arena, chunk, run_ind, prun_pages, true, 1144 false); 1145 1146 size += prun_size; 1147 run_pages += prun_pages; 1148 1149 arena_mapbits_unallocated_size_set(chunk, run_ind, size); 1150 arena_mapbits_unallocated_size_set(chunk, run_ind+run_pages-1, 1151 size); 1152 } 1153 1154 *p_size = size; 1155 *p_run_ind = run_ind; 1156 *p_run_pages = run_pages; 1157 } 1158 1159 static void 1160 arena_run_dalloc(arena_t *arena, arena_run_t *run, bool dirty, bool cleaned) 1161 { 1162 arena_chunk_t *chunk; 1163 size_t size, run_ind, run_pages, flag_dirty; 1164 1165 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 1166 run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); 1167 assert(run_ind >= map_bias); 1168 assert(run_ind < chunk_npages); 1169 if (arena_mapbits_large_get(chunk, run_ind) != 0) { 1170 size = arena_mapbits_large_size_get(chunk, run_ind); 1171 assert(size == PAGE || 1172 arena_mapbits_large_size_get(chunk, 1173 run_ind+(size>>LG_PAGE)-1) == 0); 1174 } else { 1175 size_t binind = arena_bin_index(arena, run->bin); 1176 arena_bin_info_t *bin_info = &arena_bin_info[binind]; 1177 size = bin_info->run_size; 1178 } 1179 run_pages = (size >> LG_PAGE); 1180 arena_cactive_update(arena, 0, run_pages); 1181 arena->nactive -= run_pages; 1182 1183 /* 1184 * The run is dirty if the caller claims to have dirtied it, as well as 1185 * if it was already dirty before being allocated and the caller 1186 * doesn't claim to have cleaned it. 1187 */ 1188 assert(arena_mapbits_dirty_get(chunk, run_ind) == 1189 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); 1190 if (cleaned == false && arena_mapbits_dirty_get(chunk, run_ind) != 0) 1191 dirty = true; 1192 flag_dirty = dirty ? CHUNK_MAP_DIRTY : 0; 1193 1194 /* Mark pages as unallocated in the chunk map. */ 1195 if (dirty) { 1196 arena_mapbits_unallocated_set(chunk, run_ind, size, 1197 CHUNK_MAP_DIRTY); 1198 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, 1199 CHUNK_MAP_DIRTY); 1200 } else { 1201 arena_mapbits_unallocated_set(chunk, run_ind, size, 1202 arena_mapbits_unzeroed_get(chunk, run_ind)); 1203 arena_mapbits_unallocated_set(chunk, run_ind+run_pages-1, size, 1204 arena_mapbits_unzeroed_get(chunk, run_ind+run_pages-1)); 1205 } 1206 1207 arena_run_coalesce(arena, chunk, &size, &run_ind, &run_pages, 1208 flag_dirty); 1209 1210 /* Insert into runs_avail, now that coalescing is complete. */ 1211 assert(arena_mapbits_unallocated_size_get(chunk, run_ind) == 1212 arena_mapbits_unallocated_size_get(chunk, run_ind+run_pages-1)); 1213 assert(arena_mapbits_dirty_get(chunk, run_ind) == 1214 arena_mapbits_dirty_get(chunk, run_ind+run_pages-1)); 1215 arena_avail_insert(arena, chunk, run_ind, run_pages, true, true); 1216 1217 /* Deallocate chunk if it is now completely unused. */ 1218 if (size == arena_maxclass) { 1219 assert(run_ind == map_bias); 1220 assert(run_pages == (arena_maxclass >> LG_PAGE)); 1221 arena_chunk_dealloc(arena, chunk); 1222 } 1223 1224 /* 1225 * It is okay to do dirty page processing here even if the chunk was 1226 * deallocated above, since in that case it is the spare. Waiting 1227 * until after possible chunk deallocation to do dirty processing 1228 * allows for an old spare to be fully deallocated, thus decreasing the 1229 * chances of spuriously crossing the dirty page purging threshold. 1230 */ 1231 if (dirty) 1232 arena_maybe_purge(arena); 1233 } 1234 1235 static void 1236 arena_run_trim_head(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 1237 size_t oldsize, size_t newsize) 1238 { 1239 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; 1240 size_t head_npages = (oldsize - newsize) >> LG_PAGE; 1241 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); 1242 1243 assert(oldsize > newsize); 1244 1245 /* 1246 * Update the chunk map so that arena_run_dalloc() can treat the 1247 * leading run as separately allocated. Set the last element of each 1248 * run first, in case of single-page runs. 1249 */ 1250 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); 1251 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty); 1252 arena_mapbits_large_set(chunk, pageind, oldsize-newsize, flag_dirty); 1253 1254 if (config_debug) { 1255 UNUSED size_t tail_npages = newsize >> LG_PAGE; 1256 assert(arena_mapbits_large_size_get(chunk, 1257 pageind+head_npages+tail_npages-1) == 0); 1258 assert(arena_mapbits_dirty_get(chunk, 1259 pageind+head_npages+tail_npages-1) == flag_dirty); 1260 } 1261 arena_mapbits_large_set(chunk, pageind+head_npages, newsize, 1262 flag_dirty); 1263 1264 arena_run_dalloc(arena, run, false, false); 1265 } 1266 1267 static void 1268 arena_run_trim_tail(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 1269 size_t oldsize, size_t newsize, bool dirty) 1270 { 1271 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; 1272 size_t head_npages = newsize >> LG_PAGE; 1273 size_t flag_dirty = arena_mapbits_dirty_get(chunk, pageind); 1274 1275 assert(oldsize > newsize); 1276 1277 /* 1278 * Update the chunk map so that arena_run_dalloc() can treat the 1279 * trailing run as separately allocated. Set the last element of each 1280 * run first, in case of single-page runs. 1281 */ 1282 assert(arena_mapbits_large_size_get(chunk, pageind) == oldsize); 1283 arena_mapbits_large_set(chunk, pageind+head_npages-1, 0, flag_dirty); 1284 arena_mapbits_large_set(chunk, pageind, newsize, flag_dirty); 1285 1286 if (config_debug) { 1287 UNUSED size_t tail_npages = (oldsize - newsize) >> LG_PAGE; 1288 assert(arena_mapbits_large_size_get(chunk, 1289 pageind+head_npages+tail_npages-1) == 0); 1290 assert(arena_mapbits_dirty_get(chunk, 1291 pageind+head_npages+tail_npages-1) == flag_dirty); 1292 } 1293 arena_mapbits_large_set(chunk, pageind+head_npages, oldsize-newsize, 1294 flag_dirty); 1295 1296 arena_run_dalloc(arena, (arena_run_t *)((uintptr_t)run + newsize), 1297 dirty, false); 1298 } 1299 1300 static arena_run_t * 1301 arena_bin_runs_first(arena_bin_t *bin) 1302 { 1303 arena_chunk_map_t *mapelm = arena_run_tree_first(&bin->runs); 1304 if (mapelm != NULL) { 1305 arena_chunk_t *chunk; 1306 size_t pageind; 1307 arena_run_t *run; 1308 1309 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(mapelm); 1310 pageind = ((((uintptr_t)mapelm - (uintptr_t)chunk->map) / 1311 sizeof(arena_chunk_map_t))) + map_bias; 1312 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - 1313 arena_mapbits_small_runind_get(chunk, pageind)) << 1314 LG_PAGE)); 1315 return (run); 1316 } 1317 1318 return (NULL); 1319 } 1320 1321 static void 1322 arena_bin_runs_insert(arena_bin_t *bin, arena_run_t *run) 1323 { 1324 arena_chunk_t *chunk = CHUNK_ADDR2BASE(run); 1325 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; 1326 arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind); 1327 1328 assert(arena_run_tree_search(&bin->runs, mapelm) == NULL); 1329 1330 arena_run_tree_insert(&bin->runs, mapelm); 1331 } 1332 1333 static void 1334 arena_bin_runs_remove(arena_bin_t *bin, arena_run_t *run) 1335 { 1336 arena_chunk_t *chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 1337 size_t pageind = ((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE; 1338 arena_chunk_map_t *mapelm = arena_mapp_get(chunk, pageind); 1339 1340 assert(arena_run_tree_search(&bin->runs, mapelm) != NULL); 1341 1342 arena_run_tree_remove(&bin->runs, mapelm); 1343 } 1344 1345 static arena_run_t * 1346 arena_bin_nonfull_run_tryget(arena_bin_t *bin) 1347 { 1348 arena_run_t *run = arena_bin_runs_first(bin); 1349 if (run != NULL) { 1350 arena_bin_runs_remove(bin, run); 1351 if (config_stats) 1352 bin->stats.reruns++; 1353 } 1354 return (run); 1355 } 1356 1357 static arena_run_t * 1358 arena_bin_nonfull_run_get(arena_t *arena, arena_bin_t *bin) 1359 { 1360 arena_run_t *run; 1361 size_t binind; 1362 arena_bin_info_t *bin_info; 1363 1364 /* Look for a usable run. */ 1365 run = arena_bin_nonfull_run_tryget(bin); 1366 if (run != NULL) 1367 return (run); 1368 /* No existing runs have any space available. */ 1369 1370 binind = arena_bin_index(arena, bin); 1371 bin_info = &arena_bin_info[binind]; 1372 1373 /* Allocate a new run. */ 1374 malloc_mutex_unlock(&bin->lock); 1375 /******************************/ 1376 malloc_mutex_lock(&arena->lock); 1377 run = arena_run_alloc_small(arena, bin_info->run_size, binind); 1378 if (run != NULL) { 1379 bitmap_t *bitmap = (bitmap_t *)((uintptr_t)run + 1380 (uintptr_t)bin_info->bitmap_offset); 1381 1382 /* Initialize run internals. */ 1383 run->bin = bin; 1384 run->nextind = 0; 1385 run->nfree = bin_info->nregs; 1386 bitmap_init(bitmap, &bin_info->bitmap_info); 1387 } 1388 malloc_mutex_unlock(&arena->lock); 1389 /********************************/ 1390 malloc_mutex_lock(&bin->lock); 1391 if (run != NULL) { 1392 if (config_stats) { 1393 bin->stats.nruns++; 1394 bin->stats.curruns++; 1395 } 1396 return (run); 1397 } 1398 1399 /* 1400 * arena_run_alloc_small() failed, but another thread may have made 1401 * sufficient memory available while this one dropped bin->lock above, 1402 * so search one more time. 1403 */ 1404 run = arena_bin_nonfull_run_tryget(bin); 1405 if (run != NULL) 1406 return (run); 1407 1408 return (NULL); 1409 } 1410 1411 /* Re-fill bin->runcur, then call arena_run_reg_alloc(). */ 1412 static void * 1413 arena_bin_malloc_hard(arena_t *arena, arena_bin_t *bin) 1414 { 1415 void *ret; 1416 size_t binind; 1417 arena_bin_info_t *bin_info; 1418 arena_run_t *run; 1419 1420 binind = arena_bin_index(arena, bin); 1421 bin_info = &arena_bin_info[binind]; 1422 bin->runcur = NULL; 1423 run = arena_bin_nonfull_run_get(arena, bin); 1424 if (bin->runcur != NULL && bin->runcur->nfree > 0) { 1425 /* 1426 * Another thread updated runcur while this one ran without the 1427 * bin lock in arena_bin_nonfull_run_get(). 1428 */ 1429 assert(bin->runcur->nfree > 0); 1430 ret = arena_run_reg_alloc(bin->runcur, bin_info); 1431 if (run != NULL) { 1432 arena_chunk_t *chunk; 1433 1434 /* 1435 * arena_run_alloc_small() may have allocated run, or 1436 * it may have pulled run from the bin's run tree. 1437 * Therefore it is unsafe to make any assumptions about 1438 * how run has previously been used, and 1439 * arena_bin_lower_run() must be called, as if a region 1440 * were just deallocated from the run. 1441 */ 1442 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 1443 if (run->nfree == bin_info->nregs) 1444 arena_dalloc_bin_run(arena, chunk, run, bin); 1445 else 1446 arena_bin_lower_run(arena, chunk, run, bin); 1447 } 1448 return (ret); 1449 } 1450 1451 if (run == NULL) 1452 return (NULL); 1453 1454 bin->runcur = run; 1455 1456 assert(bin->runcur->nfree > 0); 1457 1458 return (arena_run_reg_alloc(bin->runcur, bin_info)); 1459 } 1460 1461 void 1462 arena_tcache_fill_small(arena_t *arena, tcache_bin_t *tbin, size_t binind, 1463 uint64_t prof_accumbytes) 1464 { 1465 unsigned i, nfill; 1466 arena_bin_t *bin; 1467 arena_run_t *run; 1468 void *ptr; 1469 1470 assert(tbin->ncached == 0); 1471 1472 if (config_prof && arena_prof_accum(arena, prof_accumbytes)) 1473 prof_idump(); 1474 bin = &arena->bins[binind]; 1475 malloc_mutex_lock(&bin->lock); 1476 for (i = 0, nfill = (tcache_bin_info[binind].ncached_max >> 1477 tbin->lg_fill_div); i < nfill; i++) { 1478 if ((run = bin->runcur) != NULL && run->nfree > 0) 1479 ptr = arena_run_reg_alloc(run, &arena_bin_info[binind]); 1480 else 1481 ptr = arena_bin_malloc_hard(arena, bin); 1482 if (ptr == NULL) 1483 break; 1484 if (config_fill && opt_junk) { 1485 arena_alloc_junk_small(ptr, &arena_bin_info[binind], 1486 true); 1487 } 1488 /* Insert such that low regions get used first. */ 1489 tbin->avail[nfill - 1 - i] = ptr; 1490 } 1491 if (config_stats) { 1492 bin->stats.allocated += i * arena_bin_info[binind].reg_size; 1493 bin->stats.nmalloc += i; 1494 bin->stats.nrequests += tbin->tstats.nrequests; 1495 bin->stats.nfills++; 1496 tbin->tstats.nrequests = 0; 1497 } 1498 malloc_mutex_unlock(&bin->lock); 1499 tbin->ncached = i; 1500 } 1501 1502 void 1503 arena_alloc_junk_small(void *ptr, arena_bin_info_t *bin_info, bool zero) 1504 { 1505 1506 if (zero) { 1507 size_t redzone_size = bin_info->redzone_size; 1508 memset((void *)((uintptr_t)ptr - redzone_size), 0xa5, 1509 redzone_size); 1510 memset((void *)((uintptr_t)ptr + bin_info->reg_size), 0xa5, 1511 redzone_size); 1512 } else { 1513 memset((void *)((uintptr_t)ptr - bin_info->redzone_size), 0xa5, 1514 bin_info->reg_interval); 1515 } 1516 } 1517 1518 #ifdef JEMALLOC_JET 1519 #undef arena_redzone_corruption 1520 #define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption_impl) 1521 #endif 1522 static void 1523 arena_redzone_corruption(void *ptr, size_t usize, bool after, 1524 size_t offset, uint8_t byte) 1525 { 1526 1527 malloc_printf("<jemalloc>: Corrupt redzone %zu byte%s %s %p " 1528 "(size %zu), byte=%#x\n", offset, (offset == 1) ? "" : "s", 1529 after ? "after" : "before", ptr, usize, byte); 1530 } 1531 #ifdef JEMALLOC_JET 1532 #undef arena_redzone_corruption 1533 #define arena_redzone_corruption JEMALLOC_N(arena_redzone_corruption) 1534 arena_redzone_corruption_t *arena_redzone_corruption = 1535 JEMALLOC_N(arena_redzone_corruption_impl); 1536 #endif 1537 1538 static void 1539 arena_redzones_validate(void *ptr, arena_bin_info_t *bin_info, bool reset) 1540 { 1541 size_t size = bin_info->reg_size; 1542 size_t redzone_size = bin_info->redzone_size; 1543 size_t i; 1544 bool error = false; 1545 1546 for (i = 1; i <= redzone_size; i++) { 1547 uint8_t *byte = (uint8_t *)((uintptr_t)ptr - i); 1548 if (*byte != 0xa5) { 1549 error = true; 1550 arena_redzone_corruption(ptr, size, false, i, *byte); 1551 if (reset) 1552 *byte = 0xa5; 1553 } 1554 } 1555 for (i = 0; i < redzone_size; i++) { 1556 uint8_t *byte = (uint8_t *)((uintptr_t)ptr + size + i); 1557 if (*byte != 0xa5) { 1558 error = true; 1559 arena_redzone_corruption(ptr, size, true, i, *byte); 1560 if (reset) 1561 *byte = 0xa5; 1562 } 1563 } 1564 if (opt_abort && error) 1565 abort(); 1566 } 1567 1568 #ifdef JEMALLOC_JET 1569 #undef arena_dalloc_junk_small 1570 #define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small_impl) 1571 #endif 1572 void 1573 arena_dalloc_junk_small(void *ptr, arena_bin_info_t *bin_info) 1574 { 1575 size_t redzone_size = bin_info->redzone_size; 1576 1577 arena_redzones_validate(ptr, bin_info, false); 1578 memset((void *)((uintptr_t)ptr - redzone_size), 0x5a, 1579 bin_info->reg_interval); 1580 } 1581 #ifdef JEMALLOC_JET 1582 #undef arena_dalloc_junk_small 1583 #define arena_dalloc_junk_small JEMALLOC_N(arena_dalloc_junk_small) 1584 arena_dalloc_junk_small_t *arena_dalloc_junk_small = 1585 JEMALLOC_N(arena_dalloc_junk_small_impl); 1586 #endif 1587 1588 void 1589 arena_quarantine_junk_small(void *ptr, size_t usize) 1590 { 1591 size_t binind; 1592 arena_bin_info_t *bin_info; 1593 cassert(config_fill); 1594 assert(opt_junk); 1595 assert(opt_quarantine); 1596 assert(usize <= SMALL_MAXCLASS); 1597 1598 binind = SMALL_SIZE2BIN(usize); 1599 bin_info = &arena_bin_info[binind]; 1600 arena_redzones_validate(ptr, bin_info, true); 1601 } 1602 1603 void * 1604 arena_malloc_small(arena_t *arena, size_t size, bool zero) 1605 { 1606 void *ret; 1607 arena_bin_t *bin; 1608 arena_run_t *run; 1609 size_t binind; 1610 1611 binind = SMALL_SIZE2BIN(size); 1612 assert(binind < NBINS); 1613 bin = &arena->bins[binind]; 1614 size = arena_bin_info[binind].reg_size; 1615 1616 malloc_mutex_lock(&bin->lock); 1617 if ((run = bin->runcur) != NULL && run->nfree > 0) 1618 ret = arena_run_reg_alloc(run, &arena_bin_info[binind]); 1619 else 1620 ret = arena_bin_malloc_hard(arena, bin); 1621 1622 if (ret == NULL) { 1623 malloc_mutex_unlock(&bin->lock); 1624 return (NULL); 1625 } 1626 1627 if (config_stats) { 1628 bin->stats.allocated += size; 1629 bin->stats.nmalloc++; 1630 bin->stats.nrequests++; 1631 } 1632 malloc_mutex_unlock(&bin->lock); 1633 if (config_prof && isthreaded == false && arena_prof_accum(arena, size)) 1634 prof_idump(); 1635 1636 if (zero == false) { 1637 if (config_fill) { 1638 if (opt_junk) { 1639 arena_alloc_junk_small(ret, 1640 &arena_bin_info[binind], false); 1641 } else if (opt_zero) 1642 memset(ret, 0, size); 1643 } 1644 VALGRIND_MAKE_MEM_UNDEFINED(ret, size); 1645 } else { 1646 if (config_fill && opt_junk) { 1647 arena_alloc_junk_small(ret, &arena_bin_info[binind], 1648 true); 1649 } 1650 VALGRIND_MAKE_MEM_UNDEFINED(ret, size); 1651 memset(ret, 0, size); 1652 } 1653 1654 return (ret); 1655 } 1656 1657 void * 1658 arena_malloc_large(arena_t *arena, size_t size, bool zero) 1659 { 1660 void *ret; 1661 UNUSED bool idump; 1662 1663 /* Large allocation. */ 1664 size = PAGE_CEILING(size); 1665 malloc_mutex_lock(&arena->lock); 1666 ret = (void *)arena_run_alloc_large(arena, size, zero); 1667 if (ret == NULL) { 1668 malloc_mutex_unlock(&arena->lock); 1669 return (NULL); 1670 } 1671 if (config_stats) { 1672 arena->stats.nmalloc_large++; 1673 arena->stats.nrequests_large++; 1674 arena->stats.allocated_large += size; 1675 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; 1676 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++; 1677 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; 1678 } 1679 if (config_prof) 1680 idump = arena_prof_accum_locked(arena, size); 1681 malloc_mutex_unlock(&arena->lock); 1682 if (config_prof && idump) 1683 prof_idump(); 1684 1685 if (zero == false) { 1686 if (config_fill) { 1687 if (opt_junk) 1688 memset(ret, 0xa5, size); 1689 else if (opt_zero) 1690 memset(ret, 0, size); 1691 } 1692 } 1693 1694 return (ret); 1695 } 1696 1697 /* Only handles large allocations that require more than page alignment. */ 1698 void * 1699 arena_palloc(arena_t *arena, size_t size, size_t alignment, bool zero) 1700 { 1701 void *ret; 1702 size_t alloc_size, leadsize, trailsize; 1703 arena_run_t *run; 1704 arena_chunk_t *chunk; 1705 1706 assert((size & PAGE_MASK) == 0); 1707 1708 alignment = PAGE_CEILING(alignment); 1709 alloc_size = size + alignment - PAGE; 1710 1711 malloc_mutex_lock(&arena->lock); 1712 run = arena_run_alloc_large(arena, alloc_size, false); 1713 if (run == NULL) { 1714 malloc_mutex_unlock(&arena->lock); 1715 return (NULL); 1716 } 1717 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(run); 1718 1719 leadsize = ALIGNMENT_CEILING((uintptr_t)run, alignment) - 1720 (uintptr_t)run; 1721 assert(alloc_size >= leadsize + size); 1722 trailsize = alloc_size - leadsize - size; 1723 ret = (void *)((uintptr_t)run + leadsize); 1724 if (leadsize != 0) { 1725 arena_run_trim_head(arena, chunk, run, alloc_size, alloc_size - 1726 leadsize); 1727 } 1728 if (trailsize != 0) { 1729 arena_run_trim_tail(arena, chunk, ret, size + trailsize, size, 1730 false); 1731 } 1732 arena_run_init_large(arena, (arena_run_t *)ret, size, zero); 1733 1734 if (config_stats) { 1735 arena->stats.nmalloc_large++; 1736 arena->stats.nrequests_large++; 1737 arena->stats.allocated_large += size; 1738 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; 1739 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++; 1740 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; 1741 } 1742 malloc_mutex_unlock(&arena->lock); 1743 1744 if (config_fill && zero == false) { 1745 if (opt_junk) 1746 memset(ret, 0xa5, size); 1747 else if (opt_zero) 1748 memset(ret, 0, size); 1749 } 1750 return (ret); 1751 } 1752 1753 void 1754 arena_prof_promoted(const void *ptr, size_t size) 1755 { 1756 arena_chunk_t *chunk; 1757 size_t pageind, binind; 1758 1759 cassert(config_prof); 1760 assert(ptr != NULL); 1761 assert(CHUNK_ADDR2BASE(ptr) != ptr); 1762 assert(isalloc(ptr, false) == PAGE); 1763 assert(isalloc(ptr, true) == PAGE); 1764 assert(size <= SMALL_MAXCLASS); 1765 1766 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 1767 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 1768 binind = SMALL_SIZE2BIN(size); 1769 assert(binind < NBINS); 1770 arena_mapbits_large_binind_set(chunk, pageind, binind); 1771 1772 assert(isalloc(ptr, false) == PAGE); 1773 assert(isalloc(ptr, true) == size); 1774 } 1775 1776 static void 1777 arena_dissociate_bin_run(arena_chunk_t *chunk, arena_run_t *run, 1778 arena_bin_t *bin) 1779 { 1780 1781 /* Dissociate run from bin. */ 1782 if (run == bin->runcur) 1783 bin->runcur = NULL; 1784 else { 1785 size_t binind = arena_bin_index(chunk->arena, bin); 1786 arena_bin_info_t *bin_info = &arena_bin_info[binind]; 1787 1788 if (bin_info->nregs != 1) { 1789 /* 1790 * This block's conditional is necessary because if the 1791 * run only contains one region, then it never gets 1792 * inserted into the non-full runs tree. 1793 */ 1794 arena_bin_runs_remove(bin, run); 1795 } 1796 } 1797 } 1798 1799 static void 1800 arena_dalloc_bin_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 1801 arena_bin_t *bin) 1802 { 1803 size_t binind; 1804 arena_bin_info_t *bin_info; 1805 size_t npages, run_ind, past; 1806 1807 assert(run != bin->runcur); 1808 assert(arena_run_tree_search(&bin->runs, 1809 arena_mapp_get(chunk, ((uintptr_t)run-(uintptr_t)chunk)>>LG_PAGE)) 1810 == NULL); 1811 1812 binind = arena_bin_index(chunk->arena, run->bin); 1813 bin_info = &arena_bin_info[binind]; 1814 1815 malloc_mutex_unlock(&bin->lock); 1816 /******************************/ 1817 npages = bin_info->run_size >> LG_PAGE; 1818 run_ind = (size_t)(((uintptr_t)run - (uintptr_t)chunk) >> LG_PAGE); 1819 past = (size_t)(PAGE_CEILING((uintptr_t)run + 1820 (uintptr_t)bin_info->reg0_offset + (uintptr_t)(run->nextind * 1821 bin_info->reg_interval - bin_info->redzone_size) - 1822 (uintptr_t)chunk) >> LG_PAGE); 1823 malloc_mutex_lock(&arena->lock); 1824 1825 /* 1826 * If the run was originally clean, and some pages were never touched, 1827 * trim the clean pages before deallocating the dirty portion of the 1828 * run. 1829 */ 1830 assert(arena_mapbits_dirty_get(chunk, run_ind) == 1831 arena_mapbits_dirty_get(chunk, run_ind+npages-1)); 1832 if (arena_mapbits_dirty_get(chunk, run_ind) == 0 && past - run_ind < 1833 npages) { 1834 /* Trim clean pages. Convert to large run beforehand. */ 1835 assert(npages > 0); 1836 arena_mapbits_large_set(chunk, run_ind, bin_info->run_size, 0); 1837 arena_mapbits_large_set(chunk, run_ind+npages-1, 0, 0); 1838 arena_run_trim_tail(arena, chunk, run, (npages << LG_PAGE), 1839 ((past - run_ind) << LG_PAGE), false); 1840 /* npages = past - run_ind; */ 1841 } 1842 arena_run_dalloc(arena, run, true, false); 1843 malloc_mutex_unlock(&arena->lock); 1844 /****************************/ 1845 malloc_mutex_lock(&bin->lock); 1846 if (config_stats) 1847 bin->stats.curruns--; 1848 } 1849 1850 static void 1851 arena_bin_lower_run(arena_t *arena, arena_chunk_t *chunk, arena_run_t *run, 1852 arena_bin_t *bin) 1853 { 1854 1855 /* 1856 * Make sure that if bin->runcur is non-NULL, it refers to the lowest 1857 * non-full run. It is okay to NULL runcur out rather than proactively 1858 * keeping it pointing at the lowest non-full run. 1859 */ 1860 if ((uintptr_t)run < (uintptr_t)bin->runcur) { 1861 /* Switch runcur. */ 1862 if (bin->runcur->nfree > 0) 1863 arena_bin_runs_insert(bin, bin->runcur); 1864 bin->runcur = run; 1865 if (config_stats) 1866 bin->stats.reruns++; 1867 } else 1868 arena_bin_runs_insert(bin, run); 1869 } 1870 1871 void 1872 arena_dalloc_bin_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr, 1873 arena_chunk_map_t *mapelm) 1874 { 1875 size_t pageind; 1876 arena_run_t *run; 1877 arena_bin_t *bin; 1878 arena_bin_info_t *bin_info; 1879 size_t size, binind; 1880 1881 pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 1882 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - 1883 arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE)); 1884 bin = run->bin; 1885 binind = arena_ptr_small_binind_get(ptr, mapelm->bits); 1886 bin_info = &arena_bin_info[binind]; 1887 if (config_fill || config_stats) 1888 size = bin_info->reg_size; 1889 1890 if (config_fill && opt_junk) 1891 arena_dalloc_junk_small(ptr, bin_info); 1892 1893 arena_run_reg_dalloc(run, ptr); 1894 if (run->nfree == bin_info->nregs) { 1895 arena_dissociate_bin_run(chunk, run, bin); 1896 arena_dalloc_bin_run(arena, chunk, run, bin); 1897 } else if (run->nfree == 1 && run != bin->runcur) 1898 arena_bin_lower_run(arena, chunk, run, bin); 1899 1900 if (config_stats) { 1901 bin->stats.allocated -= size; 1902 bin->stats.ndalloc++; 1903 } 1904 } 1905 1906 void 1907 arena_dalloc_bin(arena_t *arena, arena_chunk_t *chunk, void *ptr, 1908 size_t pageind, arena_chunk_map_t *mapelm) 1909 { 1910 arena_run_t *run; 1911 arena_bin_t *bin; 1912 1913 run = (arena_run_t *)((uintptr_t)chunk + (uintptr_t)((pageind - 1914 arena_mapbits_small_runind_get(chunk, pageind)) << LG_PAGE)); 1915 bin = run->bin; 1916 malloc_mutex_lock(&bin->lock); 1917 arena_dalloc_bin_locked(arena, chunk, ptr, mapelm); 1918 malloc_mutex_unlock(&bin->lock); 1919 } 1920 1921 void 1922 arena_dalloc_small(arena_t *arena, arena_chunk_t *chunk, void *ptr, 1923 size_t pageind) 1924 { 1925 arena_chunk_map_t *mapelm; 1926 1927 if (config_debug) { 1928 /* arena_ptr_small_binind_get() does extra sanity checking. */ 1929 assert(arena_ptr_small_binind_get(ptr, arena_mapbits_get(chunk, 1930 pageind)) != BININD_INVALID); 1931 } 1932 mapelm = arena_mapp_get(chunk, pageind); 1933 arena_dalloc_bin(arena, chunk, ptr, pageind, mapelm); 1934 } 1935 1936 #ifdef JEMALLOC_JET 1937 #undef arena_dalloc_junk_large 1938 #define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large_impl) 1939 #endif 1940 static void 1941 arena_dalloc_junk_large(void *ptr, size_t usize) 1942 { 1943 1944 if (config_fill && opt_junk) 1945 memset(ptr, 0x5a, usize); 1946 } 1947 #ifdef JEMALLOC_JET 1948 #undef arena_dalloc_junk_large 1949 #define arena_dalloc_junk_large JEMALLOC_N(arena_dalloc_junk_large) 1950 arena_dalloc_junk_large_t *arena_dalloc_junk_large = 1951 JEMALLOC_N(arena_dalloc_junk_large_impl); 1952 #endif 1953 1954 void 1955 arena_dalloc_large_locked(arena_t *arena, arena_chunk_t *chunk, void *ptr) 1956 { 1957 1958 if (config_fill || config_stats) { 1959 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 1960 size_t usize = arena_mapbits_large_size_get(chunk, pageind); 1961 1962 arena_dalloc_junk_large(ptr, usize); 1963 if (config_stats) { 1964 arena->stats.ndalloc_large++; 1965 arena->stats.allocated_large -= usize; 1966 arena->stats.lstats[(usize >> LG_PAGE) - 1].ndalloc++; 1967 arena->stats.lstats[(usize >> LG_PAGE) - 1].curruns--; 1968 } 1969 } 1970 1971 arena_run_dalloc(arena, (arena_run_t *)ptr, true, false); 1972 } 1973 1974 void 1975 arena_dalloc_large(arena_t *arena, arena_chunk_t *chunk, void *ptr) 1976 { 1977 1978 malloc_mutex_lock(&arena->lock); 1979 arena_dalloc_large_locked(arena, chunk, ptr); 1980 malloc_mutex_unlock(&arena->lock); 1981 } 1982 1983 static void 1984 arena_ralloc_large_shrink(arena_t *arena, arena_chunk_t *chunk, void *ptr, 1985 size_t oldsize, size_t size) 1986 { 1987 1988 assert(size < oldsize); 1989 1990 /* 1991 * Shrink the run, and make trailing pages available for other 1992 * allocations. 1993 */ 1994 malloc_mutex_lock(&arena->lock); 1995 arena_run_trim_tail(arena, chunk, (arena_run_t *)ptr, oldsize, size, 1996 true); 1997 if (config_stats) { 1998 arena->stats.ndalloc_large++; 1999 arena->stats.allocated_large -= oldsize; 2000 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++; 2001 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--; 2002 2003 arena->stats.nmalloc_large++; 2004 arena->stats.nrequests_large++; 2005 arena->stats.allocated_large += size; 2006 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; 2007 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++; 2008 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; 2009 } 2010 malloc_mutex_unlock(&arena->lock); 2011 } 2012 2013 static bool 2014 arena_ralloc_large_grow(arena_t *arena, arena_chunk_t *chunk, void *ptr, 2015 size_t oldsize, size_t size, size_t extra, bool zero) 2016 { 2017 size_t pageind = ((uintptr_t)ptr - (uintptr_t)chunk) >> LG_PAGE; 2018 size_t npages = oldsize >> LG_PAGE; 2019 size_t followsize; 2020 2021 assert(oldsize == arena_mapbits_large_size_get(chunk, pageind)); 2022 2023 /* Try to extend the run. */ 2024 assert(size + extra > oldsize); 2025 malloc_mutex_lock(&arena->lock); 2026 if (pageind + npages < chunk_npages && 2027 arena_mapbits_allocated_get(chunk, pageind+npages) == 0 && 2028 (followsize = arena_mapbits_unallocated_size_get(chunk, 2029 pageind+npages)) >= size - oldsize) { 2030 /* 2031 * The next run is available and sufficiently large. Split the 2032 * following run, then merge the first part with the existing 2033 * allocation. 2034 */ 2035 size_t flag_dirty; 2036 size_t splitsize = (oldsize + followsize <= size + extra) 2037 ? followsize : size + extra - oldsize; 2038 arena_run_split_large(arena, (arena_run_t *)((uintptr_t)chunk + 2039 ((pageind+npages) << LG_PAGE)), splitsize, zero); 2040 2041 size = oldsize + splitsize; 2042 npages = size >> LG_PAGE; 2043 2044 /* 2045 * Mark the extended run as dirty if either portion of the run 2046 * was dirty before allocation. This is rather pedantic, 2047 * because there's not actually any sequence of events that 2048 * could cause the resulting run to be passed to 2049 * arena_run_dalloc() with the dirty argument set to false 2050 * (which is when dirty flag consistency would really matter). 2051 */ 2052 flag_dirty = arena_mapbits_dirty_get(chunk, pageind) | 2053 arena_mapbits_dirty_get(chunk, pageind+npages-1); 2054 arena_mapbits_large_set(chunk, pageind, size, flag_dirty); 2055 arena_mapbits_large_set(chunk, pageind+npages-1, 0, flag_dirty); 2056 2057 if (config_stats) { 2058 arena->stats.ndalloc_large++; 2059 arena->stats.allocated_large -= oldsize; 2060 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].ndalloc++; 2061 arena->stats.lstats[(oldsize >> LG_PAGE) - 1].curruns--; 2062 2063 arena->stats.nmalloc_large++; 2064 arena->stats.nrequests_large++; 2065 arena->stats.allocated_large += size; 2066 arena->stats.lstats[(size >> LG_PAGE) - 1].nmalloc++; 2067 arena->stats.lstats[(size >> LG_PAGE) - 1].nrequests++; 2068 arena->stats.lstats[(size >> LG_PAGE) - 1].curruns++; 2069 } 2070 malloc_mutex_unlock(&arena->lock); 2071 return (false); 2072 } 2073 malloc_mutex_unlock(&arena->lock); 2074 2075 return (true); 2076 } 2077 2078 #ifdef JEMALLOC_JET 2079 #undef arena_ralloc_junk_large 2080 #define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large_impl) 2081 #endif 2082 static void 2083 arena_ralloc_junk_large(void *ptr, size_t old_usize, size_t usize) 2084 { 2085 2086 if (config_fill && opt_junk) { 2087 memset((void *)((uintptr_t)ptr + usize), 0x5a, 2088 old_usize - usize); 2089 } 2090 } 2091 #ifdef JEMALLOC_JET 2092 #undef arena_ralloc_junk_large 2093 #define arena_ralloc_junk_large JEMALLOC_N(arena_ralloc_junk_large) 2094 arena_ralloc_junk_large_t *arena_ralloc_junk_large = 2095 JEMALLOC_N(arena_ralloc_junk_large_impl); 2096 #endif 2097 2098 /* 2099 * Try to resize a large allocation, in order to avoid copying. This will 2100 * always fail if growing an object, and the following run is already in use. 2101 */ 2102 static bool 2103 arena_ralloc_large(void *ptr, size_t oldsize, size_t size, size_t extra, 2104 bool zero) 2105 { 2106 size_t psize; 2107 2108 psize = PAGE_CEILING(size + extra); 2109 if (psize == oldsize) { 2110 /* Same size class. */ 2111 return (false); 2112 } else { 2113 arena_chunk_t *chunk; 2114 arena_t *arena; 2115 2116 chunk = (arena_chunk_t *)CHUNK_ADDR2BASE(ptr); 2117 arena = chunk->arena; 2118 2119 if (psize < oldsize) { 2120 /* Fill before shrinking in order avoid a race. */ 2121 arena_ralloc_junk_large(ptr, oldsize, psize); 2122 arena_ralloc_large_shrink(arena, chunk, ptr, oldsize, 2123 psize); 2124 return (false); 2125 } else { 2126 bool ret = arena_ralloc_large_grow(arena, chunk, ptr, 2127 oldsize, PAGE_CEILING(size), 2128 psize - PAGE_CEILING(size), zero); 2129 if (config_fill && ret == false && zero == false) { 2130 if (opt_junk) { 2131 memset((void *)((uintptr_t)ptr + 2132 oldsize), 0xa5, isalloc(ptr, 2133 config_prof) - oldsize); 2134 } else if (opt_zero) { 2135 memset((void *)((uintptr_t)ptr + 2136 oldsize), 0, isalloc(ptr, 2137 config_prof) - oldsize); 2138 } 2139 } 2140 return (ret); 2141 } 2142 } 2143 } 2144 2145 bool 2146 arena_ralloc_no_move(void *ptr, size_t oldsize, size_t size, size_t extra, 2147 bool zero) 2148 { 2149 2150 /* 2151 * Avoid moving the allocation if the size class can be left the same. 2152 */ 2153 if (oldsize <= arena_maxclass) { 2154 if (oldsize <= SMALL_MAXCLASS) { 2155 assert(arena_bin_info[SMALL_SIZE2BIN(oldsize)].reg_size 2156 == oldsize); 2157 if ((size + extra <= SMALL_MAXCLASS && 2158 SMALL_SIZE2BIN(size + extra) == 2159 SMALL_SIZE2BIN(oldsize)) || (size <= oldsize && 2160 size + extra >= oldsize)) 2161 return (false); 2162 } else { 2163 assert(size <= arena_maxclass); 2164 if (size + extra > SMALL_MAXCLASS) { 2165 if (arena_ralloc_large(ptr, oldsize, size, 2166 extra, zero) == false) 2167 return (false); 2168 } 2169 } 2170 } 2171 2172 /* Reallocation would require a move. */ 2173 return (true); 2174 } 2175 2176 void * 2177 arena_ralloc(arena_t *arena, void *ptr, size_t oldsize, size_t size, 2178 size_t extra, size_t alignment, bool zero, bool try_tcache_alloc, 2179 bool try_tcache_dalloc) 2180 { 2181 void *ret; 2182 size_t copysize; 2183 2184 /* Try to avoid moving the allocation. */ 2185 if (arena_ralloc_no_move(ptr, oldsize, size, extra, zero) == false) 2186 return (ptr); 2187 2188 /* 2189 * size and oldsize are different enough that we need to move the 2190 * object. In that case, fall back to allocating new space and 2191 * copying. 2192 */ 2193 if (alignment != 0) { 2194 size_t usize = sa2u(size + extra, alignment); 2195 if (usize == 0) 2196 return (NULL); 2197 ret = ipalloct(usize, alignment, zero, try_tcache_alloc, arena); 2198 } else 2199 ret = arena_malloc(arena, size + extra, zero, try_tcache_alloc); 2200 2201 if (ret == NULL) { 2202 if (extra == 0) 2203 return (NULL); 2204 /* Try again, this time without extra. */ 2205 if (alignment != 0) { 2206 size_t usize = sa2u(size, alignment); 2207 if (usize == 0) 2208 return (NULL); 2209 ret = ipalloct(usize, alignment, zero, try_tcache_alloc, 2210 arena); 2211 } else 2212 ret = arena_malloc(arena, size, zero, try_tcache_alloc); 2213 2214 if (ret == NULL) 2215 return (NULL); 2216 } 2217 2218 /* Junk/zero-filling were already done by ipalloc()/arena_malloc(). */ 2219 2220 /* 2221 * Copy at most size bytes (not size+extra), since the caller has no 2222 * expectation that the extra bytes will be reliably preserved. 2223 */ 2224 copysize = (size < oldsize) ? size : oldsize; 2225 VALGRIND_MAKE_MEM_UNDEFINED(ret, copysize); 2226 memcpy(ret, ptr, copysize); 2227 iqalloct(ptr, try_tcache_dalloc); 2228 return (ret); 2229 } 2230 2231 dss_prec_t 2232 arena_dss_prec_get(arena_t *arena) 2233 { 2234 dss_prec_t ret; 2235 2236 malloc_mutex_lock(&arena->lock); 2237 ret = arena->dss_prec; 2238 malloc_mutex_unlock(&arena->lock); 2239 return (ret); 2240 } 2241 2242 void 2243 arena_dss_prec_set(arena_t *arena, dss_prec_t dss_prec) 2244 { 2245 2246 malloc_mutex_lock(&arena->lock); 2247 arena->dss_prec = dss_prec; 2248 malloc_mutex_unlock(&arena->lock); 2249 } 2250 2251 void 2252 arena_stats_merge(arena_t *arena, const char **dss, size_t *nactive, 2253 size_t *ndirty, arena_stats_t *astats, malloc_bin_stats_t *bstats, 2254 malloc_large_stats_t *lstats) 2255 { 2256 unsigned i; 2257 2258 malloc_mutex_lock(&arena->lock); 2259 *dss = dss_prec_names[arena->dss_prec]; 2260 *nactive += arena->nactive; 2261 *ndirty += arena->ndirty; 2262 2263 astats->mapped += arena->stats.mapped; 2264 astats->npurge += arena->stats.npurge; 2265 astats->nmadvise += arena->stats.nmadvise; 2266 astats->purged += arena->stats.purged; 2267 astats->allocated_large += arena->stats.allocated_large; 2268 astats->nmalloc_large += arena->stats.nmalloc_large; 2269 astats->ndalloc_large += arena->stats.ndalloc_large; 2270 astats->nrequests_large += arena->stats.nrequests_large; 2271 2272 for (i = 0; i < nlclasses; i++) { 2273 lstats[i].nmalloc += arena->stats.lstats[i].nmalloc; 2274 lstats[i].ndalloc += arena->stats.lstats[i].ndalloc; 2275 lstats[i].nrequests += arena->stats.lstats[i].nrequests; 2276 lstats[i].curruns += arena->stats.lstats[i].curruns; 2277 } 2278 malloc_mutex_unlock(&arena->lock); 2279 2280 for (i = 0; i < NBINS; i++) { 2281 arena_bin_t *bin = &arena->bins[i]; 2282 2283 malloc_mutex_lock(&bin->lock); 2284 bstats[i].allocated += bin->stats.allocated; 2285 bstats[i].nmalloc += bin->stats.nmalloc; 2286 bstats[i].ndalloc += bin->stats.ndalloc; 2287 bstats[i].nrequests += bin->stats.nrequests; 2288 if (config_tcache) { 2289 bstats[i].nfills += bin->stats.nfills; 2290 bstats[i].nflushes += bin->stats.nflushes; 2291 } 2292 bstats[i].nruns += bin->stats.nruns; 2293 bstats[i].reruns += bin->stats.reruns; 2294 bstats[i].curruns += bin->stats.curruns; 2295 malloc_mutex_unlock(&bin->lock); 2296 } 2297 } 2298 2299 bool 2300 arena_new(arena_t *arena, unsigned ind) 2301 { 2302 unsigned i; 2303 arena_bin_t *bin; 2304 2305 arena->ind = ind; 2306 arena->nthreads = 0; 2307 2308 if (malloc_mutex_init(&arena->lock)) 2309 return (true); 2310 2311 if (config_stats) { 2312 memset(&arena->stats, 0, sizeof(arena_stats_t)); 2313 arena->stats.lstats = 2314 (malloc_large_stats_t *)base_alloc(nlclasses * 2315 sizeof(malloc_large_stats_t)); 2316 if (arena->stats.lstats == NULL) 2317 return (true); 2318 memset(arena->stats.lstats, 0, nlclasses * 2319 sizeof(malloc_large_stats_t)); 2320 if (config_tcache) 2321 ql_new(&arena->tcache_ql); 2322 } 2323 2324 if (config_prof) 2325 arena->prof_accumbytes = 0; 2326 2327 arena->dss_prec = chunk_dss_prec_get(); 2328 2329 /* Initialize chunks. */ 2330 arena_chunk_dirty_new(&arena->chunks_dirty); 2331 arena->spare = NULL; 2332 2333 arena->nactive = 0; 2334 arena->ndirty = 0; 2335 arena->npurgatory = 0; 2336 2337 arena_avail_tree_new(&arena->runs_avail); 2338 2339 /* Initialize bins. */ 2340 for (i = 0; i < NBINS; i++) { 2341 bin = &arena->bins[i]; 2342 if (malloc_mutex_init(&bin->lock)) 2343 return (true); 2344 bin->runcur = NULL; 2345 arena_run_tree_new(&bin->runs); 2346 if (config_stats) 2347 memset(&bin->stats, 0, sizeof(malloc_bin_stats_t)); 2348 } 2349 2350 return (false); 2351 } 2352 2353 /* 2354 * Calculate bin_info->run_size such that it meets the following constraints: 2355 * 2356 * *) bin_info->run_size >= min_run_size 2357 * *) bin_info->run_size <= arena_maxclass 2358 * *) run header overhead <= RUN_MAX_OVRHD (or header overhead relaxed). 2359 * *) bin_info->nregs <= RUN_MAXREGS 2360 * 2361 * bin_info->nregs, bin_info->bitmap_offset, and bin_info->reg0_offset are also 2362 * calculated here, since these settings are all interdependent. 2363 */ 2364 static size_t 2365 bin_info_run_size_calc(arena_bin_info_t *bin_info, size_t min_run_size) 2366 { 2367 size_t pad_size; 2368 size_t try_run_size, good_run_size; 2369 uint32_t try_nregs, good_nregs; 2370 uint32_t try_hdr_size, good_hdr_size; 2371 uint32_t try_bitmap_offset, good_bitmap_offset; 2372 uint32_t try_ctx0_offset, good_ctx0_offset; 2373 uint32_t try_redzone0_offset, good_redzone0_offset; 2374 2375 assert(min_run_size >= PAGE); 2376 assert(min_run_size <= arena_maxclass); 2377 2378 /* 2379 * Determine redzone size based on minimum alignment and minimum 2380 * redzone size. Add padding to the end of the run if it is needed to 2381 * align the regions. The padding allows each redzone to be half the 2382 * minimum alignment; without the padding, each redzone would have to 2383 * be twice as large in order to maintain alignment. 2384 */ 2385 if (config_fill && opt_redzone) { 2386 size_t align_min = ZU(1) << (ffs(bin_info->reg_size) - 1); 2387 if (align_min <= REDZONE_MINSIZE) { 2388 bin_info->redzone_size = REDZONE_MINSIZE; 2389 pad_size = 0; 2390 } else { 2391 bin_info->redzone_size = align_min >> 1; 2392 pad_size = bin_info->redzone_size; 2393 } 2394 } else { 2395 bin_info->redzone_size = 0; 2396 pad_size = 0; 2397 } 2398 bin_info->reg_interval = bin_info->reg_size + 2399 (bin_info->redzone_size << 1); 2400 2401 /* 2402 * Calculate known-valid settings before entering the run_size 2403 * expansion loop, so that the first part of the loop always copies 2404 * valid settings. 2405 * 2406 * The do..while loop iteratively reduces the number of regions until 2407 * the run header and the regions no longer overlap. A closed formula 2408 * would be quite messy, since there is an interdependency between the 2409 * header's mask length and the number of regions. 2410 */ 2411 try_run_size = min_run_size; 2412 try_nregs = ((try_run_size - sizeof(arena_run_t)) / 2413 bin_info->reg_interval) 2414 + 1; /* Counter-act try_nregs-- in loop. */ 2415 if (try_nregs > RUN_MAXREGS) { 2416 try_nregs = RUN_MAXREGS 2417 + 1; /* Counter-act try_nregs-- in loop. */ 2418 } 2419 do { 2420 try_nregs--; 2421 try_hdr_size = sizeof(arena_run_t); 2422 /* Pad to a long boundary. */ 2423 try_hdr_size = LONG_CEILING(try_hdr_size); 2424 try_bitmap_offset = try_hdr_size; 2425 /* Add space for bitmap. */ 2426 try_hdr_size += bitmap_size(try_nregs); 2427 if (config_prof && opt_prof && prof_promote == false) { 2428 /* Pad to a quantum boundary. */ 2429 try_hdr_size = QUANTUM_CEILING(try_hdr_size); 2430 try_ctx0_offset = try_hdr_size; 2431 /* Add space for one (prof_ctx_t *) per region. */ 2432 try_hdr_size += try_nregs * sizeof(prof_ctx_t *); 2433 } else 2434 try_ctx0_offset = 0; 2435 try_redzone0_offset = try_run_size - (try_nregs * 2436 bin_info->reg_interval) - pad_size; 2437 } while (try_hdr_size > try_redzone0_offset); 2438 2439 /* run_size expansion loop. */ 2440 do { 2441 /* 2442 * Copy valid settings before trying more aggressive settings. 2443 */ 2444 good_run_size = try_run_size; 2445 good_nregs = try_nregs; 2446 good_hdr_size = try_hdr_size; 2447 good_bitmap_offset = try_bitmap_offset; 2448 good_ctx0_offset = try_ctx0_offset; 2449 good_redzone0_offset = try_redzone0_offset; 2450 2451 /* Try more aggressive settings. */ 2452 try_run_size += PAGE; 2453 try_nregs = ((try_run_size - sizeof(arena_run_t) - pad_size) / 2454 bin_info->reg_interval) 2455 + 1; /* Counter-act try_nregs-- in loop. */ 2456 if (try_nregs > RUN_MAXREGS) { 2457 try_nregs = RUN_MAXREGS 2458 + 1; /* Counter-act try_nregs-- in loop. */ 2459 } 2460 do { 2461 try_nregs--; 2462 try_hdr_size = sizeof(arena_run_t); 2463 /* Pad to a long boundary. */ 2464 try_hdr_size = LONG_CEILING(try_hdr_size); 2465 try_bitmap_offset = try_hdr_size; 2466 /* Add space for bitmap. */ 2467 try_hdr_size += bitmap_size(try_nregs); 2468 if (config_prof && opt_prof && prof_promote == false) { 2469 /* Pad to a quantum boundary. */ 2470 try_hdr_size = QUANTUM_CEILING(try_hdr_size); 2471 try_ctx0_offset = try_hdr_size; 2472 /* 2473 * Add space for one (prof_ctx_t *) per region. 2474 */ 2475 try_hdr_size += try_nregs * 2476 sizeof(prof_ctx_t *); 2477 } 2478 try_redzone0_offset = try_run_size - (try_nregs * 2479 bin_info->reg_interval) - pad_size; 2480 } while (try_hdr_size > try_redzone0_offset); 2481 } while (try_run_size <= arena_maxclass 2482 && RUN_MAX_OVRHD * (bin_info->reg_interval << 3) > 2483 RUN_MAX_OVRHD_RELAX 2484 && (try_redzone0_offset << RUN_BFP) > RUN_MAX_OVRHD * try_run_size 2485 && try_nregs < RUN_MAXREGS); 2486 2487 assert(good_hdr_size <= good_redzone0_offset); 2488 2489 /* Copy final settings. */ 2490 bin_info->run_size = good_run_size; 2491 bin_info->nregs = good_nregs; 2492 bin_info->bitmap_offset = good_bitmap_offset; 2493 bin_info->ctx0_offset = good_ctx0_offset; 2494 bin_info->reg0_offset = good_redzone0_offset + bin_info->redzone_size; 2495 2496 assert(bin_info->reg0_offset - bin_info->redzone_size + (bin_info->nregs 2497 * bin_info->reg_interval) + pad_size == bin_info->run_size); 2498 2499 return (good_run_size); 2500 } 2501 2502 static void 2503 bin_info_init(void) 2504 { 2505 arena_bin_info_t *bin_info; 2506 size_t prev_run_size = PAGE; 2507 2508 #define SIZE_CLASS(bin, delta, size) \ 2509 bin_info = &arena_bin_info[bin]; \ 2510 bin_info->reg_size = size; \ 2511 prev_run_size = bin_info_run_size_calc(bin_info, prev_run_size);\ 2512 bitmap_info_init(&bin_info->bitmap_info, bin_info->nregs); 2513 SIZE_CLASSES 2514 #undef SIZE_CLASS 2515 } 2516 2517 void 2518 arena_boot(void) 2519 { 2520 size_t header_size; 2521 unsigned i; 2522 2523 /* 2524 * Compute the header size such that it is large enough to contain the 2525 * page map. The page map is biased to omit entries for the header 2526 * itself, so some iteration is necessary to compute the map bias. 2527 * 2528 * 1) Compute safe header_size and map_bias values that include enough 2529 * space for an unbiased page map. 2530 * 2) Refine map_bias based on (1) to omit the header pages in the page 2531 * map. The resulting map_bias may be one too small. 2532 * 3) Refine map_bias based on (2). The result will be >= the result 2533 * from (2), and will always be correct. 2534 */ 2535 map_bias = 0; 2536 for (i = 0; i < 3; i++) { 2537 header_size = offsetof(arena_chunk_t, map) + 2538 (sizeof(arena_chunk_map_t) * (chunk_npages-map_bias)); 2539 map_bias = (header_size >> LG_PAGE) + ((header_size & PAGE_MASK) 2540 != 0); 2541 } 2542 assert(map_bias > 0); 2543 2544 arena_maxclass = chunksize - (map_bias << LG_PAGE); 2545 2546 bin_info_init(); 2547 } 2548 2549 void 2550 arena_prefork(arena_t *arena) 2551 { 2552 unsigned i; 2553 2554 malloc_mutex_prefork(&arena->lock); 2555 for (i = 0; i < NBINS; i++) 2556 malloc_mutex_prefork(&arena->bins[i].lock); 2557 } 2558 2559 void 2560 arena_postfork_parent(arena_t *arena) 2561 { 2562 unsigned i; 2563 2564 for (i = 0; i < NBINS; i++) 2565 malloc_mutex_postfork_parent(&arena->bins[i].lock); 2566 malloc_mutex_postfork_parent(&arena->lock); 2567 } 2568 2569 void 2570 arena_postfork_child(arena_t *arena) 2571 { 2572 unsigned i; 2573 2574 for (i = 0; i < NBINS; i++) 2575 malloc_mutex_postfork_child(&arena->bins[i].lock); 2576 malloc_mutex_postfork_child(&arena->lock); 2577 } 2578