Lines Matching +full:delta +full:- +full:y +full:- +full:threshold
81 pa_shard_basic_stats_merge(&arena->pa_shard, nactive, ndirty, nmuzzy);
96 base_stats_get(tsdn, arena->base, &base_allocated, &base_resident,
98 size_t pac_mapped_sz = pac_mapped(&arena->pa_shard.pac);
99 astats->mapped += base_mapped + pac_mapped_sz;
100 astats->resident += base_resident;
102 LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
104 astats->base += base_allocated;
105 atomic_load_add_store_zu(&astats->internal, arena_internal_get(arena));
106 astats->metadata_thp += metadata_thp;
108 for (szind_t i = 0; i < SC_NSIZES - SC_NBINS; i++) {
110 LOCKEDINT_MTX(arena->stats.mtx),
111 &arena->stats.lstats[i].nmalloc);
113 astats->nmalloc_large += nmalloc;
116 LOCKEDINT_MTX(arena->stats.mtx),
117 &arena->stats.lstats[i].ndalloc);
119 astats->ndalloc_large += ndalloc;
122 LOCKEDINT_MTX(arena->stats.mtx),
123 &arena->stats.lstats[i].nrequests);
126 astats->nrequests_large += nmalloc + nrequests;
130 astats->nfills_large += nmalloc;
133 LOCKEDINT_MTX(arena->stats.mtx),
134 &arena->stats.lstats[i].nflushes);
136 astats->nflushes_large += nflush;
139 assert(nmalloc - ndalloc <= SIZE_T_MAX);
140 size_t curlextents = (size_t)(nmalloc - ndalloc);
142 astats->allocated_large +=
146 pa_shard_stats_merge(tsdn, &arena->pa_shard, &astats->pa_shard_stats,
147 estats, hpastats, secstats, &astats->resident);
149 LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
151 /* Currently cached bytes and sanitizer-stashed bytes in tcache. */
152 astats->tcache_bytes = 0;
153 astats->tcache_stashed_bytes = 0;
154 malloc_mutex_lock(tsdn, &arena->tcache_ql_mtx);
156 ql_foreach(descriptor, &arena->cache_bin_array_descriptor_ql, link) {
158 cache_bin_t *cache_bin = &descriptor->bins[i];
163 astats->tcache_bytes += ncached * sz_index2size(i);
164 astats->tcache_stashed_bytes += nstashed *
169 &astats->mutex_prof_data[arena_prof_mutex_tcache_list],
170 &arena->tcache_ql_mtx);
171 malloc_mutex_unlock(tsdn, &arena->tcache_ql_mtx);
174 malloc_mutex_lock(tsdn, &arena->mtx); \
175 malloc_mutex_prof_read(tsdn, &astats->mutex_prof_data[ind], \
176 &arena->mtx); \
177 malloc_mutex_unlock(tsdn, &arena->mtx);
181 READ_ARENA_MUTEX_PROF_DATA(base->mtx,
184 pa_shard_mtx_stats_read(tsdn, &arena->pa_shard,
185 astats->mutex_prof_data);
187 nstime_copy(&astats->uptime, &arena->create_time);
188 nstime_update(&astats->uptime);
189 nstime_subtract(&astats->uptime, &arena->create_time);
209 &arena->pa_shard.pac.decay_dirty, 0);
220 if (decay_immediately(&arena->pa_shard.pac.decay_dirty)) {
233 assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
235 regind = bitmap_sfu(slab_data->bitmap, &bin_info->bitmap_info);
237 (uintptr_t)(bin_info->reg_size * regind));
248 assert(!bitmap_full(slab_data->bitmap, &bin_info->bitmap_info));
252 size_t regind = bitmap_sfu(slab_data->bitmap,
253 &bin_info->bitmap_info);
255 (uintptr_t)(bin_info->reg_size * regind));
259 bitmap_t g = slab_data->bitmap[group];
263 g = slab_data->bitmap[++group];
267 if (pop > (cnt - i)) {
268 pop = cnt - i;
276 uintptr_t regsize = (uintptr_t)bin_info->reg_size;
277 while (pop--) {
284 slab_data->bitmap[group] = g;
300 hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0;
302 locked_inc_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx),
303 &arena->stats.lstats[hindex].nmalloc, 1);
316 hindex = (index >= SC_NBINS) ? index - SC_NBINS : 0;
318 locked_inc_u64(tsdn, LOCKEDINT_MTX(arena->stats.mtx),
319 &arena->stats.lstats[hindex].ndalloc, 1);
338 edata_t *edata = pa_alloc(tsdn, &arena->pa_shard, esize, alignment,
344 LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
346 LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
360 LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
363 LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
373 LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
375 LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
385 LOCKEDINT_MTX_LOCK(tsdn, arena->stats.mtx);
387 LOCKEDINT_MTX_UNLOCK(tsdn, arena->stats.mtx);
412 return pa_decay_ms_set(tsdn, &arena->pa_shard, state, decay_ms,
418 return pa_decay_ms_get(&arena->pa_shard, state);
426 malloc_mutex_lock(tsdn, &decay->mtx);
427 pac_decay_all(tsdn, &arena->pa_shard.pac, decay, decay_stats,
429 malloc_mutex_unlock(tsdn, &decay->mtx);
433 if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
439 bool epoch_advanced = pac_maybe_decay_purge(tsdn, &arena->pa_shard.pac,
446 malloc_mutex_unlock(tsdn, &decay->mtx);
459 return arena_decay_impl(tsdn, arena, &arena->pa_shard.pac.decay_dirty,
460 &arena->pa_shard.pac.stats->decay_dirty,
461 &arena->pa_shard.pac.ecache_dirty, is_background_thread, all);
467 if (pa_shard_dont_decay_muzzy(&arena->pa_shard)) {
470 return arena_decay_impl(tsdn, arena, &arena->pa_shard.pac.decay_muzzy,
471 &arena->pa_shard.pac.stats->decay_muzzy,
472 &arena->pa_shard.pac.ecache_muzzy, is_background_thread, all);
483 sec_flush(tsdn, &arena->pa_shard.hpa_sec);
495 malloc_mutex_assert_owner(tsdn, &info->mtx);
497 if (malloc_mutex_trylock(tsdn, &decay->mtx)) {
502 malloc_mutex_unlock(tsdn, &decay->mtx);
507 if (nstime_compare(remaining_sleep, &decay->epoch) <= 0) {
508 malloc_mutex_unlock(tsdn, &decay->mtx);
511 nstime_subtract(remaining_sleep, &decay->epoch);
515 info->npages_to_purge_new += npurge_new;
517 malloc_mutex_unlock(tsdn, &decay->mtx);
518 return info->npages_to_purge_new >
535 if (malloc_mutex_trylock(tsdn, &info->mtx)) {
539 * threads. So keep this non-blocking, and leave the work to a
553 info->npages_to_purge_new = 0;
557 malloc_mutex_unlock(tsdn, &info->mtx);
564 pa_shard_do_deferred_work(tsdn, &arena->pa_shard);
570 pa_dalloc(tsdn, &arena->pa_shard, slab, &deferred_work_generated);
579 edata_heap_insert(&bin->slabs_nonfull, slab);
581 bin->stats.nonfull_slabs++;
587 edata_heap_remove(&bin->slabs_nonfull, slab);
589 bin->stats.nonfull_slabs--;
595 edata_t *slab = edata_heap_remove_first(&bin->slabs_nonfull);
600 bin->stats.reslabs++;
601 bin->stats.nonfull_slabs--;
617 edata_list_active_append(&bin->slabs_full, slab);
625 edata_list_active_remove(&bin->slabs_full, slab);
632 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
633 if (bin->slabcur != NULL) {
634 slab = bin->slabcur;
635 bin->slabcur = NULL;
636 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
638 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
640 while ((slab = edata_heap_remove_first(&bin->slabs_nonfull)) != NULL) {
641 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
643 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
645 for (slab = edata_list_active_first(&bin->slabs_full); slab != NULL;
646 slab = edata_list_active_first(&bin->slabs_full)) {
648 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
650 malloc_mutex_lock(tsd_tsdn(tsd), &bin->lock);
653 bin->stats.curregs = 0;
654 bin->stats.curslabs = 0;
656 malloc_mutex_unlock(tsd_tsdn(tsd), &bin->lock);
666 * - Some of the functions in the transitive closure of calls assume
670 * - mallctl("epoch", ...) may concurrently refresh stats. While
676 malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
678 for (edata_t *edata = edata_list_active_first(&arena->large);
679 edata != NULL; edata = edata_list_active_first(&arena->large)) {
683 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
698 malloc_mutex_lock(tsd_tsdn(tsd), &arena->large_mtx);
700 malloc_mutex_unlock(tsd_tsdn(tsd), &arena->large_mtx);
708 pa_shard_reset(tsd_tsdn(tsd), &arena->pa_shard);
757 * meaning read only cross-arena metadata access is possible. The
783 pac_t *pac = &arena->pa_shard.pac;
784 arena_prepare_base_deletion_sync(tsd, &pac->ecache_dirty.mtx,
786 arena_prepare_base_deletion_sync(tsd, &pac->ecache_muzzy.mtx,
788 arena_prepare_base_deletion_sync(tsd, &pac->ecache_retained.mtx,
797 assert(base_ind_get(arena->base) >= narenas_auto);
807 pa_shard_destroy(tsd_tsdn(tsd), &arena->pa_shard);
819 arena_set(base_ind_get(arena->base), NULL);
826 arena_prepare_base_deletion(tsd, arena->base);
827 base_delete(tsd_tsdn(tsd), arena->base);
839 edata_t *slab = pa_alloc(tsdn, &arena->pa_shard, bin_info->slab_size,
854 edata_nfree_binshard_set(slab, bin_info->nregs, binshard);
855 bitmap_init(slab_data->bitmap, &bin_info->bitmap_info, false);
867 malloc_mutex_assert_owner(tsdn, &bin->lock);
869 assert(bin->slabcur == NULL);
870 assert(edata_heap_first(&bin->slabs_nonfull) == NULL);
876 bin->stats.nslabs++;
877 bin->stats.curslabs++;
879 bin->slabcur = fresh_slab;
886 malloc_mutex_assert_owner(tsdn, &bin->lock);
890 return arena_slab_reg_alloc(bin->slabcur, &bin_infos[binind]);
896 malloc_mutex_assert_owner(tsdn, &bin->lock);
898 assert(bin->slabcur == NULL || edata_nfree_get(bin->slabcur) == 0);
900 if (bin->slabcur != NULL) {
901 arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
905 bin->slabcur = arena_bin_slabs_nonfull_tryget(bin);
906 assert(bin->slabcur == NULL || edata_nfree_get(bin->slabcur) > 0);
908 return (bin->slabcur == NULL);
918 binshard = tsd_binshardsp_get(tsdn_tsd(tsdn))->binshard[binind];
939 * Bin-local resources are used first: 1) bin->slabcur, and 2) nonfull
949 * (e.g. hugepage-backed or non-overcommit arenas), each fill-iteration
961 * local exhausted, b) unlock and slab_alloc returns null, c) re-lock
972 malloc_mutex_lock(tsdn, &bin->lock);
975 /* Try batch-fill from slabcur first. */
976 edata_t *slabcur = bin->slabcur;
978 unsigned tofill = nfill - filled;
990 assert(bin->slabcur != NULL);
998 assert(bin->slabcur != NULL);
1005 assert(bin->slabcur == NULL);
1020 bin->stats.nmalloc += filled;
1021 bin->stats.nrequests += cache_bin->tstats.nrequests;
1022 bin->stats.curregs += filled;
1023 bin->stats.nfills++;
1024 cache_bin->tstats.nrequests = 0;
1027 malloc_mutex_unlock(tsdn, &bin->lock);
1046 assert(edata_nfree_get(fresh_slab) == bin_info->nregs);
1060 const size_t nregs = bin_info->nregs;
1062 const size_t usize = bin_info->reg_size;
1078 size_t batch = nfill - filled;
1098 malloc_mutex_lock(tsdn, &bin->lock);
1100 * Only the last slab can be non-empty, and the last slab is non-empty
1107 edata_list_active_concat(&bin->slabs_full, &fulls);
1111 bin->stats.nslabs += nslab;
1112 bin->stats.curslabs += nslab;
1113 bin->stats.nmalloc += filled;
1114 bin->stats.nrequests += filled;
1115 bin->stats.curregs += filled;
1117 malloc_mutex_unlock(tsdn, &bin->lock);
1124 * Without allocating a new slab, try arena_slab_reg_alloc() and re-fill
1125 * bin->slabcur if necessary.
1130 malloc_mutex_assert_owner(tsdn, &bin->lock);
1131 if (bin->slabcur == NULL || edata_nfree_get(bin->slabcur) == 0) {
1137 assert(bin->slabcur != NULL && edata_nfree_get(bin->slabcur) > 0);
1138 return arena_slab_reg_alloc(bin->slabcur, &bin_infos[binind]);
1149 malloc_mutex_lock(tsdn, &bin->lock);
1153 malloc_mutex_unlock(tsdn, &bin->lock);
1158 malloc_mutex_lock(tsdn, &bin->lock);
1164 malloc_mutex_unlock(tsdn, &bin->lock);
1173 bin->stats.nmalloc++;
1174 bin->stats.nrequests++;
1175 bin->stats.curregs++;
1177 malloc_mutex_unlock(tsdn, &bin->lock);
1217 assert((usize & (alignment - 1)) == 0);
1297 if (slab == bin->slabcur) {
1298 bin->slabcur = NULL;
1306 * into the non-full slabs heap.
1308 if (bin_info->nregs == 1) {
1322 * Make sure that if bin->slabcur is non-NULL, it refers to the
1323 * oldest/lowest non-full slab. It is okay to NULL slabcur out rather
1324 * than proactively keeping it pointing at the oldest/lowest non-full
1327 if (bin->slabcur != NULL && edata_snad_comp(bin->slabcur, slab) > 0) {
1329 if (edata_nfree_get(bin->slabcur) > 0) {
1330 arena_bin_slabs_nonfull_insert(bin, bin->slabcur);
1332 arena_bin_slabs_full_insert(arena, bin, bin->slabcur);
1334 bin->slabcur = slab;
1336 bin->stats.reslabs++;
1345 malloc_mutex_assert_owner(tsdn, &bin->lock);
1347 assert(slab != bin->slabcur);
1349 bin->stats.curslabs--;
1373 malloc_mutex_lock(tsdn, &bin->lock);
1379 malloc_mutex_unlock(tsdn, &bin->lock);
1399 /* Calls with non-zero extra had to clamp extra. */
1470 hook_invoke_expand(hook_args->is_realloc
1473 hook_args->args);
1494 hook_invoke_alloc(hook_args->is_realloc
1496 hook_args->args);
1497 hook_invoke_dalloc(hook_args->is_realloc
1498 ? hook_dalloc_realloc : hook_dalloc_rallocx, ptr, hook_args->args);
1501 * Junk/zero-filling were already done by
1512 return base_ehooks_get(arena->base);
1521 malloc_mutex_lock(tsd_tsdn(tsd), &info->mtx);
1524 pa_shard_disable_hpa(tsd_tsdn(tsd), &arena->pa_shard);
1525 extent_hooks_t *ret = base_extent_hooks_set(arena->base, extent_hooks);
1527 malloc_mutex_unlock(tsd_tsdn(tsd), &info->mtx);
1535 return (dss_prec_t)atomic_load_u(&arena->dss_prec, ATOMIC_ACQUIRE);
1543 atomic_store_u(&arena->dss_prec, (unsigned)dss_prec, ATOMIC_RELEASE);
1580 &arena->pa_shard.pac, old_limit, new_limit);
1585 return atomic_load_u(&arena->nthreads[internal], ATOMIC_RELAXED);
1590 atomic_fetch_add_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
1595 atomic_fetch_sub_u(&arena->nthreads[internal], 1, ATOMIC_RELAXED);
1607 base = base_new(tsdn, ind, config->extent_hooks,
1608 config->metadata_use_hooks);
1620 atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
1621 atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
1622 arena->last_thd = NULL;
1625 if (arena_stats_init(tsdn, &arena->stats)) {
1629 ql_new(&arena->tcache_ql);
1630 ql_new(&arena->cache_bin_array_descriptor_ql);
1631 if (malloc_mutex_init(&arena->tcache_ql_mtx, "tcache_ql",
1637 atomic_store_u(&arena->dss_prec, (unsigned)extent_dss_prec_get(),
1640 edata_list_active_init(&arena->large);
1641 if (malloc_mutex_init(&arena->large_mtx, "arena_large",
1648 if (pa_shard_init(tsdn, &arena->pa_shard, &arena_pa_central_global,
1649 &arena_emap_global, base, ind, &arena->stats.pa_shard_stats,
1650 LOCKEDINT_MTX(arena->stats.mtx), &cur_time, oversize_threshold,
1657 atomic_store_u(&arena->binshard_next, 0, ATOMIC_RELEASE);
1659 bool err = bin_init(&arena->bins[i]);
1665 arena->base = base;
1668 arena->ind = ind;
1670 nstime_init_update(&arena->create_time);
1674 * - Custom extent hooks (we should only return memory allocated from
1676 * - Arena 0 initialization. In this case, we're mid-bootstrapping, and
1682 if (pa_shard_enable_hpa(tsdn, &arena->pa_shard,
1748 /* The threshold should be large size class. */
1777 sc_t *sc = &sc_data->sc[i];
1779 (1U << sc->lg_base) + (sc->ndelta << sc->lg_delta));
1794 pa_shard_prefork0(tsdn, &arena->pa_shard);
1800 malloc_mutex_prefork(tsdn, &arena->tcache_ql_mtx);
1806 pa_shard_prefork2(tsdn, &arena->pa_shard);
1811 pa_shard_prefork3(tsdn, &arena->pa_shard);
1816 pa_shard_prefork4(tsdn, &arena->pa_shard);
1821 pa_shard_prefork5(tsdn, &arena->pa_shard);
1826 base_prefork(tsdn, arena->base);
1831 malloc_mutex_prefork(tsdn, &arena->large_mtx);
1837 bin_prefork(tsdn, &arena->bins[i]);
1844 bin_postfork_parent(tsdn, &arena->bins[i]);
1847 malloc_mutex_postfork_parent(tsdn, &arena->large_mtx);
1848 base_postfork_parent(tsdn, arena->base);
1849 pa_shard_postfork_parent(tsdn, &arena->pa_shard);
1851 malloc_mutex_postfork_parent(tsdn, &arena->tcache_ql_mtx);
1857 atomic_store_u(&arena->nthreads[0], 0, ATOMIC_RELAXED);
1858 atomic_store_u(&arena->nthreads[1], 0, ATOMIC_RELAXED);
1866 ql_new(&arena->tcache_ql);
1867 ql_new(&arena->cache_bin_array_descriptor_ql);
1869 if (tcache_slow != NULL && tcache_slow->arena == arena) {
1870 tcache_t *tcache = tcache_slow->tcache;
1872 ql_tail_insert(&arena->tcache_ql, tcache_slow, link);
1874 &tcache_slow->cache_bin_array_descriptor,
1875 tcache->bins);
1876 ql_tail_insert(&arena->cache_bin_array_descriptor_ql,
1877 &tcache_slow->cache_bin_array_descriptor, link);
1882 bin_postfork_child(tsdn, &arena->bins[i]);
1885 malloc_mutex_postfork_child(tsdn, &arena->large_mtx);
1886 base_postfork_child(tsdn, arena->base);
1887 pa_shard_postfork_child(tsdn, &arena->pa_shard);
1889 malloc_mutex_postfork_child(tsdn, &arena->tcache_ql_mtx);