/linux/include/trace/events/ |
H A D | oom.h | 38 unsigned long reclaimable, 44 TP_ARGS(zoneref, order, reclaimable, available, min_wmark, no_progress_loops, wmark_check), 50 __field( unsigned long, reclaimable) 61 __entry->reclaimable = reclaimable; 71 __entry->reclaimable, __entry->available, __entry->min_wmark,
|
/linux/mm/ |
H A D | show_mem.c | 37 unsigned long reclaimable; in si_mem_available() local 60 * Part of the reclaimable slab and other kernel memory consists of in si_mem_available() 64 reclaimable = global_node_page_state_pages(NR_SLAB_RECLAIMABLE_B) + in si_mem_available() 66 reclaimable -= min(reclaimable / 2, wmark_low); in si_mem_available() 67 available += reclaimable; in si_mem_available()
|
H A D | vmscan.c | 489 int reclaimable = 0, write_pending = 0; in skip_throttle_noprogress() local 510 reclaimable += zone_reclaimable_pages(zone); in skip_throttle_noprogress() 514 if (2 * write_pending <= reclaimable) in skip_throttle_noprogress() 3922 unsigned long reclaimable; in set_initial_priority() local 3931 reclaimable = node_page_state(pgdat, NR_INACTIVE_FILE); in set_initial_priority() 3933 reclaimable += node_page_state(pgdat, NR_INACTIVE_ANON); in set_initial_priority() 3936 priority = fls_long(reclaimable) - 1 - fls_long(sc->nr_to_reclaim - 1); in set_initial_priority() 3998 bool reclaimable = !min_ttl; in lru_gen_age_node() local 4010 if (!reclaimable) in lru_gen_age_node() 4011 reclaimable = lruvec_is_reclaimable(lruvec, sc, min_ttl); in lru_gen_age_node() [all …]
|
H A D | page_alloc.c | 4127 unsigned long reclaimable; in should_reclaim_retry() local 4136 available = reclaimable = zone_reclaimable_pages(zone); in should_reclaim_retry() 4145 trace_reclaim_retry_zone(z, order, reclaimable, in should_reclaim_retry()
|
/linux/drivers/net/ethernet/chelsio/cxgb4vf/ |
H A D | sge.c | 411 static inline int reclaimable(const struct sge_txq *tq) in reclaimable() function 414 int reclaimable = hw_cidx - tq->cidx; in reclaimable() local 415 if (reclaimable < 0) in reclaimable() 416 reclaimable += tq->size; in reclaimable() 417 return reclaimable; in reclaimable() 434 int avail = reclaimable(tq); in reclaim_completed_tx() 2133 if (reclaimable(&txq->q) && __netif_tx_trylock(txq->txq)) { in sge_tx_timer_cb() 2134 int avail = reclaimable(&txq->q); in sge_tx_timer_cb()
|
/linux/tools/testing/selftests/cgroup/ |
H A D | memcg_protection.m | 10 % reclaim) and then the reclaim starts, all memory is reclaimable, i.e. treated
|
/linux/Documentation/admin-guide/mm/ |
H A D | concepts.rst | 166 disk, are called `reclaimable`. The most notable categories of the 167 reclaimable pages are page cache and anonymous memory. 178 The process of freeing the reclaimable physical memory pages and
|
H A D | idle_page_tracking.rst | 63 are not reclaimable, he or she can filter them out using
|
/linux/fs/xfs/ |
H A D | xfs_icache.c | 1116 long reclaimable = 0; in xfs_reclaim_inodes_count() local 1122 reclaimable += pag->pag_ici_reclaimable; in xfs_reclaim_inodes_count() 1126 return reclaimable; in xfs_reclaim_inodes_count()
|
/linux/drivers/md/ |
H A D | raid5-cache.c | 1502 sector_t reclaimable; in r5l_do_reclaim() local 1515 reclaimable = r5l_reclaimable_space(log); in r5l_do_reclaim() 1516 if (reclaimable >= reclaim_target || in r5l_do_reclaim() 1525 r5l_reclaimable_space(log) > reclaimable, in r5l_do_reclaim() 1532 if (reclaimable == 0 || !write_super) in r5l_do_reclaim()
|
/linux/Documentation/admin-guide/sysctl/ |
H A D | vm.rst | 166 and reclaimable pages, the number of pages at which the background kernel 201 and reclaimable pages, the number of pages at which a process which is 233 reclaimable slab objects like dentries and inodes. Once dropped, their 240 To free reclaimable slab objects (includes dentries and inodes):: 372 And on large highmem machines this lack of reclaimable lowmem memory 543 than this percentage of pages in a zone are reclaimable slab pages.
|
/linux/include/net/ |
H A D | sock.h | 1562 int reclaimable; in sk_mem_reclaim() local 1567 reclaimable = sk->sk_forward_alloc - sk_unused_reserved_mem(sk); in sk_mem_reclaim() 1569 if (reclaimable >= (int)PAGE_SIZE) in sk_mem_reclaim() 1570 __sk_mem_reclaim(sk, reclaimable); in sk_mem_reclaim()
|
/linux/net/mptcp/ |
H A D | protocol.c | 177 int reclaimable; in mptcp_rmem_uncharge() local 180 reclaimable = msk->rmem_fwd_alloc - sk_unused_reserved_mem(sk); in mptcp_rmem_uncharge() 183 if (unlikely(reclaimable >= PAGE_SIZE)) in mptcp_rmem_uncharge() 184 __mptcp_rmem_reclaim(sk, reclaimable); in mptcp_rmem_uncharge()
|
/linux/Documentation/scheduler/ |
H A D | sched-deadline.rst | 219 - Umax is the maximum reclaimable utilization (subjected to RT throttling 223 - Uextra is the (per runqueue) extra reclaimable utilization
|
/linux/Documentation/ABI/testing/ |
H A D | sysfs-kernel-slab | 393 are reclaimable (and grouped by their mobility).
|
/linux/Documentation/mm/ |
H A D | unevictable-lru.rst | 442 whole of the huge page, we want the rest of the page to be reclaimable.
|