Lines Matching +full:delta +full:- +full:y +full:- +full:threshold

1 // SPDX-License-Identifier: GPL-2.0
31 #include <linux/backing-dev.h>
45 #include <linux/memory-tiers.h>
188 if ((_folio)->lru.prev != _base) { \
191 prev = lru_to_folio(&(_folio->lru)); \
192 prefetchw(&prev->_field); \
209 return sc->target_mem_cgroup; in cgroup_reclaim()
218 return !sc->target_mem_cgroup || mem_cgroup_is_root(sc->target_mem_cgroup); in root_reclaim()
222 * writeback_throttling_sane - is the usual dirty throttling mechanism available?
247 if (sc->proactive && sc->proactive_swappiness) in sc_swappiness()
248 return *sc->proactive_swappiness; in sc_swappiness()
273 /* for_each_managed_zone_pgdat - helper macro to iterate over all managed zones in a pgdat up to
285 for ((idx) = 0, (zone) = (pgdat)->node_zones; \
296 WARN_ON_ONCE(rs && task->reclaim_state); in set_task_reclaim_state()
298 /* Check for the nulling of an already-nulled member */ in set_task_reclaim_state()
299 WARN_ON_ONCE(!rs && !task->reclaim_state); in set_task_reclaim_state()
301 task->reclaim_state = rs; in set_task_reclaim_state()
305 * flush_reclaim_state(): add pages reclaimed outside of LRU-based reclaim to
306 * scan_control->nr_reclaimed.
311 * Currently, reclaim_state->reclaimed includes three types of pages in flush_reclaim_state()
318 * single memcg. For example, a memcg-aware shrinker can free one object in flush_reclaim_state()
321 * overestimating the reclaimed amount (potentially under-reclaiming). in flush_reclaim_state()
323 * Only count such pages for global reclaim to prevent under-reclaiming in flush_reclaim_state()
338 if (current->reclaim_state && root_reclaim(sc)) { in flush_reclaim_state()
339 sc->nr_reclaimed += current->reclaim_state->reclaimed; in flush_reclaim_state()
340 current->reclaim_state->reclaimed = 0; in flush_reclaim_state()
351 if (sc && sc->no_demotion) in can_demote()
368 * For non-memcg reclaim, is there in can_reclaim_anon_pages()
406 * lruvec_lru_size - Returns the number of pages on the given LRU list.
409 * @zone_idx: zones to consider (use MAX_NR_ZONES - 1 for the whole LRU list)
459 BUILD_BUG_ON(PGSTEAL_##type - PGSTEAL_KSWAPD != \
460 PGDEMOTE_##type - PGDEMOTE_KSWAPD); \
461 BUILD_BUG_ON(PGSTEAL_##type - PGSTEAL_KSWAPD != \
462 PGSCAN_##type - PGSCAN_KSWAPD); \
474 return PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD; in reclaimer_offset()
475 if (sc->proactive) in reclaimer_offset()
476 return PGSTEAL_PROACTIVE - PGSTEAL_KSWAPD; in reclaimer_offset()
477 return PGSTEAL_DIRECT - PGSTEAL_KSWAPD; in reclaimer_offset()
485 * private data at folio->private. in is_page_cache_freeable()
487 return folio_ref_count(folio) - folio_test_private(folio) == in is_page_cache_freeable()
493 * -ENOSPC. We need to propagate that into the address_space for a subsequent
521 if (atomic_read(&pgdat->kswapd_failures) >= MAX_RECLAIM_RETRIES) in skip_throttle_noprogress()
529 for_each_managed_zone_pgdat(zone, pgdat, i, MAX_NR_ZONES - 1) { in skip_throttle_noprogress()
542 wait_queue_head_t *wqh = &pgdat->reclaim_wait[reason]; in reclaim_throttle()
552 current->flags & (PF_USER_WORKER|PF_KTHREAD)) { in reclaim_throttle()
560 * parallel reclaimers which is a short-lived event so the timeout is in reclaim_throttle()
562 * potentially long-lived events so use a longer timeout. This is shaky in reclaim_throttle()
571 if (atomic_inc_return(&pgdat->nr_writeback_throttled) == 1) { in reclaim_throttle()
572 WRITE_ONCE(pgdat->nr_reclaim_start, in reclaim_throttle()
602 atomic_dec(&pgdat->nr_writeback_throttled); in reclaim_throttle()
604 trace_mm_vmscan_throttled(pgdat->node_id, jiffies_to_usecs(timeout), in reclaim_throttle()
605 jiffies_to_usecs(timeout - ret), in reclaim_throttle()
622 * This is an inaccurate read as the per-cpu deltas may not in __acct_reclaim_writeback()
628 nr_written = node_page_state(pgdat, NR_THROTTLED_WRITTEN) - in __acct_reclaim_writeback()
629 READ_ONCE(pgdat->nr_reclaim_start); in __acct_reclaim_writeback()
632 wake_up(&pgdat->reclaim_wait[VMSCAN_THROTTLE_WRITEBACK]); in __acct_reclaim_writeback()
688 * than tmpfs/shmem. That's taken care of in page-writeback. in pageout()
705 * folio->mapping == NULL while being dirty with clean buffers. in pageout()
741 spin_lock(&mapping->host->i_lock); in __remove_mapping()
742 xa_lock_irq(&mapping->i_pages); in __remove_mapping()
764 * escape unnoticed. The smp_rmb is needed to ensure the folio->flags in __remove_mapping()
765 * load is not satisfied before that of folio->_refcount. in __remove_mapping()
780 swp_entry_t swap = folio->swap; in __remove_mapping()
791 free_folio = mapping->a_ops->free_folio; in __remove_mapping()
812 xa_unlock_irq(&mapping->i_pages); in __remove_mapping()
814 inode_add_lru(mapping->host); in __remove_mapping()
815 spin_unlock(&mapping->host->i_lock); in __remove_mapping()
827 xa_unlock_irq(&mapping->i_pages); in __remove_mapping()
828 spin_unlock(&mapping->host->i_lock); in __remove_mapping()
834 * remove_mapping() - Attempt to remove a folio from its mapping.
860 * folio_putback_lru - Put previously isolated folio onto appropriate LRU list.
892 set_mask_bits(&folio->flags.f, LRU_REFS_MASK, BIT(PG_referenced)); in lru_gen_set_refs()
896 set_mask_bits(&folio->flags.f, LRU_REFS_FLAGS, BIT(PG_workingset)); in lru_gen_set_refs()
912 referenced_ptes = folio_referenced(folio, 1, sc->target_mem_cgroup, in folio_check_references()
925 * 2) Skip the non-shared swapbacked folio mapped solely by in folio_check_references()
926 * the exiting or OOM-reaped process. in folio_check_references()
928 if (referenced_ptes == -1) in folio_check_references()
961 * Activate file-backed executable folios after first usage. in folio_check_references()
1005 if (mapping && mapping->a_ops->is_dirty_writeback) in folio_check_dirty_writeback()
1006 mapping->a_ops->is_dirty_writeback(folio, dirty, writeback); in folio_check_dirty_writeback()
1018 allowed_mask = mtc->nmask; in alloc_demote_folio()
1028 mtc->nmask = NULL; in alloc_demote_folio()
1029 mtc->gfp_mask |= __GFP_THISNODE; in alloc_demote_folio()
1034 mtc->gfp_mask &= ~__GFP_THISNODE; in alloc_demote_folio()
1035 mtc->nmask = allowed_mask; in alloc_demote_folio()
1047 int target_nid = next_demotion_node(pgdat->node_id); in demote_folio_list()
1087 * We can "enter_fs" for swap-cache with only __GFP_IO in may_enter_fs()
1089 * ->flags can be updated non-atomicially (scan_swap_map_slots), in may_enter_fs()
1115 do_demote_pass = can_demote(pgdat->node_id, sc, memcg); in shrink_folio_list()
1128 list_del(&folio->lru); in shrink_folio_list()
1153 sc->nr_scanned += nr_pages; in shrink_folio_list()
1158 if (!sc->may_unmap && folio_mapped(folio)) in shrink_folio_list()
1168 stat->nr_dirty += nr_pages; in shrink_folio_list()
1171 stat->nr_unqueued_dirty += nr_pages; in shrink_folio_list()
1180 stat->nr_congested += nr_pages; in shrink_folio_list()
1234 test_bit(PGDAT_WRITEBACK, &pgdat->flags)) { in shrink_folio_list()
1235 stat->nr_immediate += nr_pages; in shrink_folio_list()
1241 !may_enter_fs(folio, sc->gfp_mask) || in shrink_folio_list()
1245 * This is slightly racy - in shrink_folio_list()
1249 * interpreted as the readahead flag - but in shrink_folio_list()
1259 stat->nr_writeback += nr_pages; in shrink_folio_list()
1267 list_add_tail(&folio->lru, folio_list); in shrink_folio_list()
1279 stat->nr_ref_keep += nr_pages; in shrink_folio_list()
1292 list_add(&folio->lru, &demote_folios); in shrink_folio_list()
1304 if (!(sc->gfp_mask & __GFP_IO)) in shrink_folio_list()
1316 if (data_race(!list_empty(&folio->_deferred_list) && in shrink_folio_list()
1361 sc->nr_scanned -= (nr_pages - 1); in shrink_folio_list()
1392 stat->nr_unmap_fail += nr_pages; in shrink_folio_list()
1395 stat->nr_lazyfree_fail += nr_pages; in shrink_folio_list()
1415 * injecting inefficient single-folio I/O into in shrink_folio_list()
1426 !test_bit(PGDAT_DIRTY, &pgdat->flags))) { in shrink_folio_list()
1442 if (!may_enter_fs(folio, sc->gfp_mask)) in shrink_folio_list()
1444 if (!sc->may_writepage) in shrink_folio_list()
1463 sc->nr_scanned -= (nr_pages - 1); in shrink_folio_list()
1469 sc->nr_scanned -= (nr_pages - 1); in shrink_folio_list()
1472 stat->nr_pageout += nr_pages; in shrink_folio_list()
1480 * A synchronous write - probably a ramdisk. Go in shrink_folio_list()
1508 * and mark the folio clean - it can be freed. in shrink_folio_list()
1510 * Rarely, folios can have buffers and no ->mapping. in shrink_folio_list()
1519 if (!filemap_release_folio(folio, sc->gfp_mask)) in shrink_folio_list()
1554 sc->target_mem_cgroup)) in shrink_folio_list()
1579 sc->nr_scanned -= (nr_pages - 1); in shrink_folio_list()
1591 stat->nr_activate[type] += nr_pages; in shrink_folio_list()
1597 list_add(&folio->lru, &ret_folios); in shrink_folio_list()
1606 stat->nr_demoted += nr_demoted; in shrink_folio_list()
1628 if (!sc->proactive) { in shrink_folio_list()
1634 pgactivate = stat->nr_activate[0] + stat->nr_activate[1]; in shrink_folio_list()
1663 if (page_has_movable_ops(&folio->page)) in reclaim_clean_pages_from_list()
1668 list_move(&folio->lru, &clean_folios); in reclaim_clean_pages_from_list()
1679 nr_reclaimed = shrink_folio_list(&clean_folios, zone->zone_pgdat, &sc, in reclaim_clean_pages_from_list()
1684 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, in reclaim_clean_pages_from_list()
1685 -(long)nr_reclaimed); in reclaim_clean_pages_from_list()
1692 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON, in reclaim_clean_pages_from_list()
1694 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, in reclaim_clean_pages_from_list()
1695 -(long)stat.nr_lazyfree_fail); in reclaim_clean_pages_from_list()
1712 update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]); in update_lru_sizes()
1720 * lruvec->lru_lock is heavily contended. Some of the functions that
1743 struct list_head *src = &lruvec->lists[lru]; in isolate_lru_folios()
1764 (folio_zonenum(folio) > sc->reclaim_idx)) { in isolate_lru_folios()
1782 if (!sc->may_unmap && folio_mapped(folio)) in isolate_lru_folios()
1787 * sure the folio is not being freed elsewhere -- the in isolate_lru_folios()
1803 list_move(&folio->lru, move_to); in isolate_lru_folios()
1826 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, in isolate_lru_folios()
1833 * folio_isolate_lru() - Try to isolate a folio from its LRU list.
1903 * won't get blocked by normal direct-reclaimers, forming a circular in too_many_isolated()
1906 if (gfp_has_io_fs(sc->gfp_mask)) in too_many_isolated()
1934 list_del(&folio->lru); in move_folios_to_lru()
1936 spin_unlock_irq(&lruvec->lru_lock); in move_folios_to_lru()
1938 spin_lock_irq(&lruvec->lru_lock); in move_folios_to_lru()
1950 * list_add(&folio->lru,) in move_folios_to_lru()
1951 * list_add(&folio->lru,) in move_folios_to_lru()
1960 spin_unlock_irq(&lruvec->lru_lock); in move_folios_to_lru()
1963 spin_lock_irq(&lruvec->lru_lock); in move_folios_to_lru()
1982 spin_unlock_irq(&lruvec->lru_lock); in move_folios_to_lru()
1985 spin_lock_irq(&lruvec->lru_lock); in move_folios_to_lru()
1992 * If a kernel thread (such as nfsd for loop-back mounts) services a backing
1998 return !(current->flags & PF_LOCAL_THROTTLE); in current_may_throttle()
2034 spin_lock_irq(&lruvec->lru_lock); in shrink_inactive_list()
2046 spin_unlock_irq(&lruvec->lru_lock); in shrink_inactive_list()
2054 spin_lock_irq(&lruvec->lru_lock); in shrink_inactive_list()
2059 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); in shrink_inactive_list()
2067 nr_scanned - nr_reclaimed); in shrink_inactive_list()
2095 sc->nr.dirty += stat.nr_dirty; in shrink_inactive_list()
2096 sc->nr.congested += stat.nr_congested; in shrink_inactive_list()
2097 sc->nr.unqueued_dirty += stat.nr_unqueued_dirty; in shrink_inactive_list()
2098 sc->nr.writeback += stat.nr_writeback; in shrink_inactive_list()
2099 sc->nr.immediate += stat.nr_immediate; in shrink_inactive_list()
2100 sc->nr.taken += nr_taken; in shrink_inactive_list()
2102 sc->nr.file_taken += nr_taken; in shrink_inactive_list()
2104 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id, in shrink_inactive_list()
2105 nr_scanned, nr_reclaimed, &stat, sc->priority, file); in shrink_inactive_list()
2120 * It is safe to rely on the active flag against the non-LRU folios in here
2121 * because nobody will play with that bit on a non-LRU folio.
2123 * The downside is that we have to touch folio->_refcount against each folio.
2124 * But we had to alter folio->flags anyway.
2144 spin_lock_irq(&lruvec->lru_lock); in shrink_active_list()
2155 spin_unlock_irq(&lruvec->lru_lock); in shrink_active_list()
2162 list_del(&folio->lru); in shrink_active_list()
2178 if (folio_referenced(folio, 0, sc->target_mem_cgroup, in shrink_active_list()
2181 * Identify referenced, file-backed active folios and in shrink_active_list()
2185 * are not likely to be evicted by use-once streaming in shrink_active_list()
2191 list_add(&folio->lru, &l_active); in shrink_active_list()
2196 folio_clear_active(folio); /* we are de-activating */ in shrink_active_list()
2198 list_add(&folio->lru, &l_inactive); in shrink_active_list()
2204 spin_lock_irq(&lruvec->lru_lock); in shrink_active_list()
2212 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); in shrink_active_list()
2215 trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate, in shrink_active_list()
2216 nr_deactivate, nr_rotated, sc->priority, file); in shrink_active_list()
2236 list_del(&folio->lru); in reclaim_folio_list()
2239 trace_mm_vmscan_reclaim_pages(pgdat->node_id, sc.nr_scanned, nr_reclaimed, &stat); in reclaim_folio_list()
2262 list_move(&folio->lru, &node_folio_list); in reclaim_pages()
2281 if (sc->may_deactivate & (1 << is_file_lru(lru))) in shrink_list()
2284 sc->skipped_deactivate = 1; in shrink_list()
2296 * to the established workingset on the scan-resistant active list,
2310 * -------------------------------------
2329 gb = (inactive + active) >> (30 - PAGE_SHIFT); in inactive_is_low()
2353 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); in prepare_scan_control()
2356 * Flush the memory cgroup stats in rate-limited way as we don't need in prepare_scan_control()
2360 mem_cgroup_flush_stats_ratelimited(sc->target_mem_cgroup); in prepare_scan_control()
2365 spin_lock_irq(&target_lruvec->lru_lock); in prepare_scan_control()
2366 sc->anon_cost = target_lruvec->anon_cost; in prepare_scan_control()
2367 sc->file_cost = target_lruvec->file_cost; in prepare_scan_control()
2368 spin_unlock_irq(&target_lruvec->lru_lock); in prepare_scan_control()
2374 if (!sc->force_deactivate) { in prepare_scan_control()
2384 if (refaults != target_lruvec->refaults[WORKINGSET_ANON] || in prepare_scan_control()
2386 sc->may_deactivate |= DEACTIVATE_ANON; in prepare_scan_control()
2388 sc->may_deactivate &= ~DEACTIVATE_ANON; in prepare_scan_control()
2392 if (refaults != target_lruvec->refaults[WORKINGSET_FILE] || in prepare_scan_control()
2394 sc->may_deactivate |= DEACTIVATE_FILE; in prepare_scan_control()
2396 sc->may_deactivate &= ~DEACTIVATE_FILE; in prepare_scan_control()
2398 sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE; in prepare_scan_control()
2406 if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE) && in prepare_scan_control()
2407 !sc->no_cache_trim_mode) in prepare_scan_control()
2408 sc->cache_trim_mode = 1; in prepare_scan_control()
2410 sc->cache_trim_mode = 0; in prepare_scan_control()
2427 free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES); in prepare_scan_control()
2431 for_each_managed_zone_pgdat(zone, pgdat, z, MAX_NR_ZONES - 1) { in prepare_scan_control()
2442 sc->file_is_tiny = in prepare_scan_control()
2444 !(sc->may_deactivate & DEACTIVATE_ANON) && in prepare_scan_control()
2445 anon >> sc->priority; in prepare_scan_control()
2470 total_cost = sc->anon_cost + sc->file_cost; in calculate_pressure_balance()
2471 anon_cost = total_cost + sc->anon_cost; in calculate_pressure_balance()
2472 file_cost = total_cost + sc->file_cost; in calculate_pressure_balance()
2478 fp = (MAX_SWAPPINESS - swappiness) * (total_cost + 1); in calculate_pressure_balance()
2491 mem_cgroup_protection(sc->target_mem_cgroup, memcg, &min, &low); in apply_proportional_protection()
2500 * becomes extremely binary -- from nothing as we in apply_proportional_protection()
2501 * approach the memory protection threshold, to totally in apply_proportional_protection()
2515 * the best-effort low protection. However, we still in apply_proportional_protection()
2516 * ideally want to honor how well-behaved groups are in in apply_proportional_protection()
2527 if (!sc->memcg_low_reclaim && low > min) { in apply_proportional_protection()
2529 sc->memcg_low_skipped = 1; in apply_proportional_protection()
2537 scan -= scan * protection / (cgroup_size + 1); in apply_proportional_protection()
2542 * sc->priority further than desirable. in apply_proportional_protection()
2568 if (!sc->may_swap || !can_reclaim_anon_pages(memcg, pgdat->node_id, sc)) { in get_scan_count()
2587 WARN_ON_ONCE(!sc->proactive); in get_scan_count()
2597 if (!sc->priority && swappiness) { in get_scan_count()
2603 * If the system is almost out of file pages, force-scan anon. in get_scan_count()
2605 if (sc->file_is_tiny) { in get_scan_count()
2615 if (sc->cache_trim_mode) { in get_scan_count()
2629 lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx); in get_scan_count()
2631 scan >>= sc->priority; in get_scan_count()
2650 * round-off error. in get_scan_count()
2684 return can_demote(lruvec_pgdat(lruvec)->node_id, sc, in can_age_anon_pages()
2713 unsigned long max_seq = READ_ONCE((lruvec)->lrugen.max_seq)
2717 READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_ANON]), \
2718 READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_FILE]), \
2745 struct lruvec *lruvec = &memcg->nodeinfo[nid]->lruvec; in get_lruvec()
2748 if (!lruvec->pgdat) in get_lruvec()
2749 lruvec->pgdat = pgdat; in get_lruvec()
2756 return &pgdat->__lruvec; in get_lruvec()
2764 if (!sc->may_swap) in get_swappiness()
2767 if (!can_demote(pgdat->node_id, sc, memcg) && in get_swappiness()
2776 return lruvec->lrugen.max_seq - lruvec->lrugen.min_seq[type] + 1; in get_nr_gens()
2804 * To get rid of non-leaf entries that no longer have enough leaf entries, the
2805 * aging uses the double-buffering technique to flip to the other filter each
2806 * time it produces a new generation. For non-leaf entries that have enough
2832 key[0] = hash & (BIT(BLOOM_FILTER_SHIFT) - 1); in get_item_key()
2843 filter = READ_ONCE(mm_state->filters[gen]); in test_bloom_filter()
2859 filter = READ_ONCE(mm_state->filters[gen]); in update_bloom_filter()
2876 filter = mm_state->filters[gen]; in reset_bloom_filter()
2884 WRITE_ONCE(mm_state->filters[gen], filter); in reset_bloom_filter()
2902 return &memcg->mm_list; in get_mm_list()
2911 return &lruvec->mm_state; in get_mm_state()
2918 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in get_next_mm()
2919 struct lru_gen_mm_state *mm_state = get_mm_state(walk->lruvec); in get_next_mm()
2921 mm = list_entry(mm_state->head, struct mm_struct, lru_gen.list); in get_next_mm()
2922 key = pgdat->node_id % BITS_PER_TYPE(mm->lru_gen.bitmap); in get_next_mm()
2924 if (!walk->force_scan && !test_bit(key, &mm->lru_gen.bitmap)) in get_next_mm()
2927 clear_bit(key, &mm->lru_gen.bitmap); in get_next_mm()
2938 VM_WARN_ON_ONCE(!list_empty(&mm->lru_gen.list)); in lru_gen_add_mm()
2940 VM_WARN_ON_ONCE(mm->lru_gen.memcg); in lru_gen_add_mm()
2941 mm->lru_gen.memcg = memcg; in lru_gen_add_mm()
2943 spin_lock(&mm_list->lock); in lru_gen_add_mm()
2950 if (mm_state->tail == &mm_list->fifo) in lru_gen_add_mm()
2951 mm_state->tail = &mm->lru_gen.list; in lru_gen_add_mm()
2954 list_add_tail(&mm->lru_gen.list, &mm_list->fifo); in lru_gen_add_mm()
2956 spin_unlock(&mm_list->lock); in lru_gen_add_mm()
2965 if (list_empty(&mm->lru_gen.list)) in lru_gen_del_mm()
2969 memcg = mm->lru_gen.memcg; in lru_gen_del_mm()
2973 spin_lock(&mm_list->lock); in lru_gen_del_mm()
2980 if (mm_state->head == &mm->lru_gen.list) in lru_gen_del_mm()
2981 mm_state->head = mm_state->head->prev; in lru_gen_del_mm()
2984 if (mm_state->tail == &mm->lru_gen.list) in lru_gen_del_mm()
2985 mm_state->tail = mm_state->tail->next; in lru_gen_del_mm()
2988 list_del_init(&mm->lru_gen.list); in lru_gen_del_mm()
2990 spin_unlock(&mm_list->lock); in lru_gen_del_mm()
2993 mem_cgroup_put(mm->lru_gen.memcg); in lru_gen_del_mm()
2994 mm->lru_gen.memcg = NULL; in lru_gen_del_mm()
3002 struct task_struct *task = rcu_dereference_protected(mm->owner, true); in lru_gen_migrate_mm()
3004 VM_WARN_ON_ONCE(task->mm != mm); in lru_gen_migrate_mm()
3005 lockdep_assert_held(&task->alloc_lock); in lru_gen_migrate_mm()
3012 if (!mm->lru_gen.memcg) in lru_gen_migrate_mm()
3018 if (memcg == mm->lru_gen.memcg) in lru_gen_migrate_mm()
3021 VM_WARN_ON_ONCE(list_empty(&mm->lru_gen.list)); in lru_gen_migrate_mm()
3051 struct lruvec *lruvec = walk->lruvec; in reset_mm_stats()
3054 lockdep_assert_held(&get_mm_list(lruvec_memcg(lruvec))->lock); in reset_mm_stats()
3056 hist = lru_hist_from_seq(walk->seq); in reset_mm_stats()
3059 WRITE_ONCE(mm_state->stats[hist][i], in reset_mm_stats()
3060 mm_state->stats[hist][i] + walk->mm_stats[i]); in reset_mm_stats()
3061 walk->mm_stats[i] = 0; in reset_mm_stats()
3065 hist = lru_hist_from_seq(walk->seq + 1); in reset_mm_stats()
3068 WRITE_ONCE(mm_state->stats[hist][i], 0); in reset_mm_stats()
3077 struct lruvec *lruvec = walk->lruvec; in iterate_mm_list()
3083 * mm_state->seq is incremented after each iteration of mm_list. There in iterate_mm_list()
3092 spin_lock(&mm_list->lock); in iterate_mm_list()
3094 VM_WARN_ON_ONCE(mm_state->seq + 1 < walk->seq); in iterate_mm_list()
3096 if (walk->seq <= mm_state->seq) in iterate_mm_list()
3099 if (!mm_state->head) in iterate_mm_list()
3100 mm_state->head = &mm_list->fifo; in iterate_mm_list()
3102 if (mm_state->head == &mm_list->fifo) in iterate_mm_list()
3106 mm_state->head = mm_state->head->next; in iterate_mm_list()
3107 if (mm_state->head == &mm_list->fifo) { in iterate_mm_list()
3108 WRITE_ONCE(mm_state->seq, mm_state->seq + 1); in iterate_mm_list()
3114 if (!mm_state->tail || mm_state->tail == mm_state->head) { in iterate_mm_list()
3115 mm_state->tail = mm_state->head->next; in iterate_mm_list()
3116 walk->force_scan = true; in iterate_mm_list()
3123 spin_unlock(&mm_list->lock); in iterate_mm_list()
3126 reset_bloom_filter(mm_state, walk->seq + 1); in iterate_mm_list()
3143 spin_lock(&mm_list->lock); in iterate_mm_list_nowalk()
3145 VM_WARN_ON_ONCE(mm_state->seq + 1 < seq); in iterate_mm_list_nowalk()
3147 if (seq > mm_state->seq) { in iterate_mm_list_nowalk()
3148 mm_state->head = NULL; in iterate_mm_list_nowalk()
3149 mm_state->tail = NULL; in iterate_mm_list_nowalk()
3150 WRITE_ONCE(mm_state->seq, mm_state->seq + 1); in iterate_mm_list_nowalk()
3154 spin_unlock(&mm_list->lock); in iterate_mm_list_nowalk()
3164 * A feedback loop based on Proportional-Integral-Derivative (PID) controller.
3179 * 1. The D term may discount the other two terms over time so that long-lived
3192 struct lru_gen_folio *lrugen = &lruvec->lrugen; in read_ctrl_pos()
3193 int hist = lru_hist_from_seq(lrugen->min_seq[type]); in read_ctrl_pos()
3195 pos->gain = gain; in read_ctrl_pos()
3196 pos->refaulted = pos->total = 0; in read_ctrl_pos()
3198 for (i = tier % MAX_NR_TIERS; i <= min(tier, MAX_NR_TIERS - 1); i++) { in read_ctrl_pos()
3199 pos->refaulted += lrugen->avg_refaulted[type][i] + in read_ctrl_pos()
3200 atomic_long_read(&lrugen->refaulted[hist][type][i]); in read_ctrl_pos()
3201 pos->total += lrugen->avg_total[type][i] + in read_ctrl_pos()
3202 lrugen->protected[hist][type][i] + in read_ctrl_pos()
3203 atomic_long_read(&lrugen->evicted[hist][type][i]); in read_ctrl_pos()
3210 struct lru_gen_folio *lrugen = &lruvec->lrugen; in reset_ctrl_pos()
3212 unsigned long seq = carryover ? lrugen->min_seq[type] : lrugen->max_seq + 1; in reset_ctrl_pos()
3214 lockdep_assert_held(&lruvec->lru_lock); in reset_ctrl_pos()
3225 sum = lrugen->avg_refaulted[type][tier] + in reset_ctrl_pos()
3226 atomic_long_read(&lrugen->refaulted[hist][type][tier]); in reset_ctrl_pos()
3227 WRITE_ONCE(lrugen->avg_refaulted[type][tier], sum / 2); in reset_ctrl_pos()
3229 sum = lrugen->avg_total[type][tier] + in reset_ctrl_pos()
3230 lrugen->protected[hist][type][tier] + in reset_ctrl_pos()
3231 atomic_long_read(&lrugen->evicted[hist][type][tier]); in reset_ctrl_pos()
3232 WRITE_ONCE(lrugen->avg_total[type][tier], sum / 2); in reset_ctrl_pos()
3236 atomic_long_set(&lrugen->refaulted[hist][type][tier], 0); in reset_ctrl_pos()
3237 atomic_long_set(&lrugen->evicted[hist][type][tier], 0); in reset_ctrl_pos()
3238 WRITE_ONCE(lrugen->protected[hist][type][tier], 0); in reset_ctrl_pos()
3249 return pv->refaulted < MIN_LRU_BATCH || in positive_ctrl_err()
3250 pv->refaulted * (sp->total + MIN_LRU_BATCH) * sp->gain <= in positive_ctrl_err()
3251 (sp->refaulted + 1) * pv->total * pv->gain; in positive_ctrl_err()
3261 unsigned long new_flags, old_flags = READ_ONCE(folio->flags.f); in folio_update_gen()
3267 set_mask_bits(&folio->flags.f, LRU_REFS_MASK, BIT(PG_referenced)); in folio_update_gen()
3268 return -1; in folio_update_gen()
3274 return -1; in folio_update_gen()
3278 } while (!try_cmpxchg(&folio->flags.f, &old_flags, new_flags)); in folio_update_gen()
3280 return ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; in folio_update_gen()
3287 struct lru_gen_folio *lrugen = &lruvec->lrugen; in folio_inc_gen()
3288 int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]); in folio_inc_gen()
3289 unsigned long new_flags, old_flags = READ_ONCE(folio->flags.f); in folio_inc_gen()
3294 new_gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; in folio_inc_gen()
3306 } while (!try_cmpxchg(&folio->flags.f, &old_flags, new_flags)); in folio_inc_gen()
3318 int delta = folio_nr_pages(folio); in update_batch_size() local
3323 walk->batched++; in update_batch_size()
3325 walk->nr_pages[old_gen][type][zone] -= delta; in update_batch_size()
3326 walk->nr_pages[new_gen][type][zone] += delta; in update_batch_size()
3332 struct lruvec *lruvec = walk->lruvec; in reset_batch_size()
3333 struct lru_gen_folio *lrugen = &lruvec->lrugen; in reset_batch_size()
3335 walk->batched = 0; in reset_batch_size()
3339 int delta = walk->nr_pages[gen][type][zone]; in reset_batch_size() local
3341 if (!delta) in reset_batch_size()
3344 walk->nr_pages[gen][type][zone] = 0; in reset_batch_size()
3345 WRITE_ONCE(lrugen->nr_pages[gen][type][zone], in reset_batch_size()
3346 lrugen->nr_pages[gen][type][zone] + delta); in reset_batch_size()
3350 __update_lru_size(lruvec, lru, zone, delta); in reset_batch_size()
3357 struct vm_area_struct *vma = args->vma; in should_skip_vma()
3358 struct lru_gen_mm_walk *walk = args->private; in should_skip_vma()
3369 if (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) in should_skip_vma()
3372 if (vma == get_gate_vma(vma->vm_mm)) in should_skip_vma()
3376 return !walk->swappiness; in should_skip_vma()
3378 if (WARN_ON_ONCE(!vma->vm_file || !vma->vm_file->f_mapping)) in should_skip_vma()
3381 mapping = vma->vm_file->f_mapping; in should_skip_vma()
3386 return !walk->swappiness; in should_skip_vma()
3388 if (walk->swappiness > MAX_SWAPPINESS) in should_skip_vma()
3392 return !mapping->a_ops->read_folio; in should_skip_vma()
3396 * Some userspace memory allocators map many single-page VMAs. Instead of
3405 VMA_ITERATOR(vmi, args->mm, start); in get_next_vma()
3410 for_each_vma(vmi, args->vma) { in get_next_vma()
3411 if (end && end <= args->vma->vm_start) in get_next_vma()
3414 if (should_skip_vma(args->vma->vm_start, args->vma->vm_end, args)) in get_next_vma()
3417 *vm_start = max(start, args->vma->vm_start); in get_next_vma()
3418 *vm_end = min(end - 1, args->vma->vm_end - 1) + 1; in get_next_vma()
3431 VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end); in get_pte_pfn()
3434 return -1; in get_pte_pfn()
3437 return -1; in get_pte_pfn()
3439 if (!pte_young(pte) && !mm_has_notifiers(vma->vm_mm)) in get_pte_pfn()
3440 return -1; in get_pte_pfn()
3443 return -1; in get_pte_pfn()
3445 if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat)) in get_pte_pfn()
3446 return -1; in get_pte_pfn()
3456 VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end); in get_pmd_pfn()
3459 return -1; in get_pmd_pfn()
3461 if (!pmd_young(pmd) && !mm_has_notifiers(vma->vm_mm)) in get_pmd_pfn()
3462 return -1; in get_pmd_pfn()
3465 return -1; in get_pmd_pfn()
3467 if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat)) in get_pmd_pfn()
3468 return -1; in get_pmd_pfn()
3481 if (folio_nid(folio) != pgdat->node_id) in get_pfn_folio()
3533 struct lru_gen_mm_walk *walk = args->private; in walk_pte_range()
3534 struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec); in walk_pte_range()
3535 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in walk_pte_range()
3536 DEFINE_MAX_SEQ(walk->lruvec); in walk_pte_range()
3540 pte = pte_offset_map_rw_nolock(args->mm, pmd, start & PMD_MASK, &pmdval, &ptl); in walk_pte_range()
3562 walk->mm_stats[MM_LEAF_TOTAL]++; in walk_pte_range()
3564 pfn = get_pte_pfn(ptent, args->vma, addr, pgdat); in walk_pte_range()
3565 if (pfn == -1) in walk_pte_range()
3572 if (!ptep_clear_young_notify(args->vma, addr, pte + i)) in walk_pte_range()
3586 walk->mm_stats[MM_LEAF_YOUNG]++; in walk_pte_range()
3609 struct lru_gen_mm_walk *walk = args->private; in walk_pmd_range_locked()
3610 struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec); in walk_pmd_range_locked()
3611 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in walk_pmd_range_locked()
3612 DEFINE_MAX_SEQ(walk->lruvec); in walk_pmd_range_locked()
3618 if (*first == -1) { in walk_pmd_range_locked()
3624 i = addr == -1 ? 0 : pmd_index(addr) - pmd_index(*first); in walk_pmd_range_locked()
3626 __set_bit(i - 1, bitmap); in walk_pmd_range_locked()
3632 ptl = pmd_lockptr(args->mm, pmd); in walk_pmd_range_locked()
3649 if (!walk->force_scan && should_clear_pmd_young() && in walk_pmd_range_locked()
3650 !mm_has_notifiers(args->mm)) in walk_pmd_range_locked()
3656 if (pfn == -1) in walk_pmd_range_locked()
3676 walk->mm_stats[MM_LEAF_YOUNG]++; in walk_pmd_range_locked()
3686 *first = -1; in walk_pmd_range_locked()
3698 unsigned long first = -1; in walk_pmd_range()
3699 struct lru_gen_mm_walk *walk = args->private; in walk_pmd_range()
3700 struct lru_gen_mm_state *mm_state = get_mm_state(walk->lruvec); in walk_pmd_range()
3712 vma = args->vma; in walk_pmd_range()
3719 walk->mm_stats[MM_LEAF_TOTAL]++; in walk_pmd_range()
3724 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in walk_pmd_range()
3727 walk->mm_stats[MM_LEAF_TOTAL]++; in walk_pmd_range()
3729 if (pfn != -1) in walk_pmd_range()
3734 if (!walk->force_scan && should_clear_pmd_young() && in walk_pmd_range()
3735 !mm_has_notifiers(args->mm)) { in walk_pmd_range()
3742 if (!walk->force_scan && !test_bloom_filter(mm_state, walk->seq, pmd + i)) in walk_pmd_range()
3745 walk->mm_stats[MM_NONLEAF_FOUND]++; in walk_pmd_range()
3750 walk->mm_stats[MM_NONLEAF_ADDED]++; in walk_pmd_range()
3753 update_bloom_filter(mm_state, walk->seq + 1, pmd + i); in walk_pmd_range()
3756 walk_pmd_range_locked(pud, -1, vma, args, bitmap, &first); in walk_pmd_range()
3769 struct lru_gen_mm_walk *walk = args->private; in walk_pud_range()
3785 if (need_resched() || walk->batched >= MAX_LRU_BATCH) { in walk_pud_range()
3796 if (!end || !args->vma) in walk_pud_range()
3799 walk->next_addr = max(end, args->vma->vm_start); in walk_pud_range()
3801 return -EAGAIN; in walk_pud_range()
3812 struct lruvec *lruvec = walk->lruvec; in walk_mm()
3814 walk->next_addr = FIRST_USER_ADDRESS; in walk_mm()
3819 err = -EBUSY; in walk_mm()
3822 if (walk->seq != max_seq) in walk_mm()
3827 err = walk_page_range(mm, walk->next_addr, ULONG_MAX, &mm_walk_ops, walk); in walk_mm()
3832 if (walk->batched) { in walk_mm()
3833 spin_lock_irq(&lruvec->lru_lock); in walk_mm()
3835 spin_unlock_irq(&lruvec->lru_lock); in walk_mm()
3839 } while (err == -EAGAIN); in walk_mm()
3844 struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk; in set_mm_walk()
3849 walk = &pgdat->mm_walk; in set_mm_walk()
3856 current->reclaim_state->mm_walk = walk; in set_mm_walk()
3863 struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk; in clear_mm_walk()
3865 VM_WARN_ON_ONCE(walk && memchr_inv(walk->nr_pages, 0, sizeof(walk->nr_pages))); in clear_mm_walk()
3866 VM_WARN_ON_ONCE(walk && memchr_inv(walk->mm_stats, 0, sizeof(walk->mm_stats))); in clear_mm_walk()
3868 current->reclaim_state->mm_walk = NULL; in clear_mm_walk()
3878 struct lru_gen_folio *lrugen = &lruvec->lrugen; in inc_min_seq()
3879 int hist = lru_hist_from_seq(lrugen->min_seq[type]); in inc_min_seq()
3880 int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]); in inc_min_seq()
3892 struct list_head *head = &lrugen->folios[old_gen][type][zone]; in inc_min_seq()
3905 list_move_tail(&folio->lru, &lrugen->folios[new_gen][type][zone]); in inc_min_seq()
3910 int delta = folio_nr_pages(folio); in inc_min_seq() local
3912 WRITE_ONCE(lrugen->protected[hist][type][tier], in inc_min_seq()
3913 lrugen->protected[hist][type][tier] + delta); in inc_min_seq()
3916 if (!--remaining) in inc_min_seq()
3922 WRITE_ONCE(lrugen->min_seq[type], lrugen->min_seq[type] + 1); in inc_min_seq()
3932 struct lru_gen_folio *lrugen = &lruvec->lrugen; in try_to_inc_min_seq()
3939 while (min_seq[type] + MIN_NR_GENS <= lrugen->max_seq) { in try_to_inc_min_seq()
3943 if (!list_empty(&lrugen->folios[gen][type][zone])) in try_to_inc_min_seq()
3964 unsigned long seq = lrugen->max_seq - MIN_NR_GENS; in try_to_inc_min_seq()
3973 if (min_seq[type] <= lrugen->min_seq[type]) in try_to_inc_min_seq()
3977 WRITE_ONCE(lrugen->min_seq[type], min_seq[type]); in try_to_inc_min_seq()
3989 struct lru_gen_folio *lrugen = &lruvec->lrugen; in inc_max_seq()
3991 if (seq < READ_ONCE(lrugen->max_seq)) in inc_max_seq()
3994 spin_lock_irq(&lruvec->lru_lock); in inc_max_seq()
3998 success = seq == lrugen->max_seq; in inc_max_seq()
4009 spin_unlock_irq(&lruvec->lru_lock); in inc_max_seq()
4020 prev = lru_gen_from_seq(lrugen->max_seq - 1); in inc_max_seq()
4021 next = lru_gen_from_seq(lrugen->max_seq + 1); in inc_max_seq()
4026 long delta = lrugen->nr_pages[prev][type][zone] - in inc_max_seq() local
4027 lrugen->nr_pages[next][type][zone]; in inc_max_seq()
4029 if (!delta) in inc_max_seq()
4032 __update_lru_size(lruvec, lru, zone, delta); in inc_max_seq()
4033 __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, -delta); in inc_max_seq()
4040 WRITE_ONCE(lrugen->timestamps[next], jiffies); in inc_max_seq()
4042 smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1); in inc_max_seq()
4044 spin_unlock_irq(&lruvec->lru_lock); in inc_max_seq()
4055 struct lru_gen_folio *lrugen = &lruvec->lrugen; in try_to_inc_max_seq()
4058 VM_WARN_ON_ONCE(seq > READ_ONCE(lrugen->max_seq)); in try_to_inc_max_seq()
4064 if (seq <= READ_ONCE(mm_state->seq)) in try_to_inc_max_seq()
4084 walk->lruvec = lruvec; in try_to_inc_max_seq()
4085 walk->seq = seq; in try_to_inc_max_seq()
4086 walk->swappiness = swappiness; in try_to_inc_max_seq()
4087 walk->force_scan = force_scan; in try_to_inc_max_seq()
4112 if (sc->priority != DEF_PRIORITY || sc->nr_to_reclaim < MIN_LRU_BATCH) in set_initial_priority()
4120 if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc)) in set_initial_priority()
4123 /* round down reclaimable and round up sc->nr_to_reclaim */ in set_initial_priority()
4124 priority = fls_long(reclaimable) - 1 - fls_long(sc->nr_to_reclaim - 1); in set_initial_priority()
4130 sc->priority = clamp(priority, DEF_PRIORITY / 2, DEF_PRIORITY); in set_initial_priority()
4138 struct lru_gen_folio *lrugen = &lruvec->lrugen; in lruvec_is_sizable()
4150 total += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); in lruvec_is_sizable()
4155 return mem_cgroup_online(memcg) ? (total >> sc->priority) : total; in lruvec_is_sizable()
4174 birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); in lruvec_is_reclaimable()
4209 .gfp_mask = sc->gfp_mask, in lru_gen_age_node()
4238 pte_t *pte = pvmw->pte; in lru_gen_look_around()
4239 unsigned long addr = pvmw->address; in lru_gen_look_around()
4240 struct vm_area_struct *vma = pvmw->vma; in lru_gen_look_around()
4241 struct folio *folio = pfn_folio(pvmw->pfn); in lru_gen_look_around()
4249 lockdep_assert_held(pvmw->ptl); in lru_gen_look_around()
4255 if (spin_is_contended(pvmw->ptl)) in lru_gen_look_around()
4259 if (vma->vm_flags & VM_SPECIAL) in lru_gen_look_around()
4263 walk = current->reclaim_state ? current->reclaim_state->mm_walk : NULL; in lru_gen_look_around()
4265 start = max(addr & PMD_MASK, vma->vm_start); in lru_gen_look_around()
4266 end = min(addr | ~PMD_MASK, vma->vm_end - 1) + 1; in lru_gen_look_around()
4268 if (end - start == PAGE_SIZE) in lru_gen_look_around()
4271 if (end - start > MIN_LRU_BATCH * PAGE_SIZE) { in lru_gen_look_around()
4272 if (addr - start < MIN_LRU_BATCH * PAGE_SIZE / 2) in lru_gen_look_around()
4274 else if (end - addr < MIN_LRU_BATCH * PAGE_SIZE / 2) in lru_gen_look_around()
4275 start = end - MIN_LRU_BATCH * PAGE_SIZE; in lru_gen_look_around()
4277 start = addr - MIN_LRU_BATCH * PAGE_SIZE / 2; in lru_gen_look_around()
4284 pte -= (addr - start) / PAGE_SIZE; in lru_gen_look_around()
4291 if (pfn == -1) in lru_gen_look_around()
4320 update_bloom_filter(mm_state, max_seq, pvmw->pmd); in lru_gen_look_around()
4346 spin_lock_irqsave(&pgdat->memcg_lru.lock, flags); in lru_gen_rotate_memcg()
4348 VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list)); in lru_gen_rotate_memcg()
4351 new = old = lruvec->lrugen.gen; in lru_gen_rotate_memcg()
4359 new = get_memcg_gen(pgdat->memcg_lru.seq); in lru_gen_rotate_memcg()
4361 new = get_memcg_gen(pgdat->memcg_lru.seq + 1); in lru_gen_rotate_memcg()
4365 WRITE_ONCE(lruvec->lrugen.seg, seg); in lru_gen_rotate_memcg()
4366 WRITE_ONCE(lruvec->lrugen.gen, new); in lru_gen_rotate_memcg()
4368 hlist_nulls_del_rcu(&lruvec->lrugen.list); in lru_gen_rotate_memcg()
4371 hlist_nulls_add_head_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]); in lru_gen_rotate_memcg()
4373 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]); in lru_gen_rotate_memcg()
4375 pgdat->memcg_lru.nr_memcgs[old]--; in lru_gen_rotate_memcg()
4376 pgdat->memcg_lru.nr_memcgs[new]++; in lru_gen_rotate_memcg()
4378 if (!pgdat->memcg_lru.nr_memcgs[old] && old == get_memcg_gen(pgdat->memcg_lru.seq)) in lru_gen_rotate_memcg()
4379 WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); in lru_gen_rotate_memcg()
4381 spin_unlock_irqrestore(&pgdat->memcg_lru.lock, flags); in lru_gen_rotate_memcg()
4396 spin_lock_irq(&pgdat->memcg_lru.lock); in lru_gen_online_memcg()
4398 VM_WARN_ON_ONCE(!hlist_nulls_unhashed(&lruvec->lrugen.list)); in lru_gen_online_memcg()
4400 gen = get_memcg_gen(pgdat->memcg_lru.seq); in lru_gen_online_memcg()
4402 lruvec->lrugen.gen = gen; in lru_gen_online_memcg()
4404 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[gen][bin]); in lru_gen_online_memcg()
4405 pgdat->memcg_lru.nr_memcgs[gen]++; in lru_gen_online_memcg()
4407 spin_unlock_irq(&pgdat->memcg_lru.lock); in lru_gen_online_memcg()
4431 spin_lock_irq(&pgdat->memcg_lru.lock); in lru_gen_release_memcg()
4433 if (hlist_nulls_unhashed(&lruvec->lrugen.list)) in lru_gen_release_memcg()
4436 gen = lruvec->lrugen.gen; in lru_gen_release_memcg()
4438 hlist_nulls_del_init_rcu(&lruvec->lrugen.list); in lru_gen_release_memcg()
4439 pgdat->memcg_lru.nr_memcgs[gen]--; in lru_gen_release_memcg()
4441 if (!pgdat->memcg_lru.nr_memcgs[gen] && gen == get_memcg_gen(pgdat->memcg_lru.seq)) in lru_gen_release_memcg()
4442 WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); in lru_gen_release_memcg()
4444 spin_unlock_irq(&pgdat->memcg_lru.lock); in lru_gen_release_memcg()
4453 if (READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_HEAD) in lru_gen_soft_reclaim()
4471 int delta = folio_nr_pages(folio); in sort_folio() local
4475 struct lru_gen_folio *lrugen = &lruvec->lrugen; in sort_folio()
4485 __count_vm_events(UNEVICTABLE_PGCULLED, delta); in sort_folio()
4490 if (gen != lru_gen_from_seq(lrugen->min_seq[type])) { in sort_folio()
4491 list_move(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio()
4498 list_move(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio()
4502 int hist = lru_hist_from_seq(lrugen->min_seq[type]); in sort_folio()
4504 WRITE_ONCE(lrugen->protected[hist][type][tier], in sort_folio()
4505 lrugen->protected[hist][type][tier] + delta); in sort_folio()
4511 if (zone > sc->reclaim_idx) { in sort_folio()
4513 list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio()
4520 sc->nr.file_taken += delta; in sort_folio()
4522 sc->nr.unqueued_dirty += delta; in sort_folio()
4528 list_move(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio()
4540 if (!(sc->gfp_mask & __GFP_IO) && in isolate_folio()
4557 set_mask_bits(&folio->flags.f, LRU_REFS_MASK, 0); in isolate_folio()
4580 struct lru_gen_folio *lrugen = &lruvec->lrugen; in scan_folios()
4588 gen = lru_gen_from_seq(lrugen->min_seq[type]); in scan_folios()
4590 for (i = MAX_NR_ZONES; i > 0; i--) { in scan_folios()
4593 int zone = (sc->reclaim_idx + i) % MAX_NR_ZONES; in scan_folios()
4594 struct list_head *head = &lrugen->folios[gen][type][zone]; in scan_folios()
4598 int delta = folio_nr_pages(folio); in scan_folios() local
4605 scanned += delta; in scan_folios()
4608 sorted += delta; in scan_folios()
4610 list_add(&folio->lru, list); in scan_folios()
4611 isolated += delta; in scan_folios()
4613 list_move(&folio->lru, &moved); in scan_folios()
4614 skipped_zone += delta; in scan_folios()
4617 if (!--remaining || max(isolated, skipped_zone) >= MIN_LRU_BATCH) in scan_folios()
4639 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, MAX_LRU_BATCH, in scan_folios()
4643 sc->nr.file_taken += isolated; in scan_folios()
4668 return tier - 1; in get_tier_idx()
4685 read_ctrl_pos(lruvec, LRU_GEN_FILE, MAX_NR_TIERS, MAX_SWAPPINESS - swappiness, &pv); in get_type_to_scan()
4727 struct lru_gen_folio *lrugen = &lruvec->lrugen; in evict_folios()
4731 spin_lock_irq(&lruvec->lru_lock); in evict_folios()
4737 if (evictable_min_seq(lrugen->min_seq, swappiness) + MIN_NR_GENS > lrugen->max_seq) in evict_folios()
4740 spin_unlock_irq(&lruvec->lru_lock); in evict_folios()
4746 sc->nr.unqueued_dirty += stat.nr_unqueued_dirty; in evict_folios()
4747 sc->nr_reclaimed += reclaimed; in evict_folios()
4748 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id, in evict_folios()
4749 scanned, reclaimed, &stat, sc->priority, in evict_folios()
4756 list_del(&folio->lru); in evict_folios()
4764 list_move(&folio->lru, &clean); in evict_folios()
4770 set_mask_bits(&folio->flags.f, LRU_REFS_FLAGS, BIT(PG_active)); in evict_folios()
4773 spin_lock_irq(&lruvec->lru_lock); in evict_folios()
4777 walk = current->reclaim_state->mm_walk; in evict_folios()
4778 if (walk && walk->batched) { in evict_folios()
4779 walk->lruvec = lruvec; in evict_folios()
4792 spin_unlock_irq(&lruvec->lru_lock); in evict_folios()
4809 struct lru_gen_folio *lrugen = &lruvec->lrugen; in should_run_aging()
4824 size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); in should_run_aging()
4845 if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg)) in get_nr_to_scan()
4846 return -1; in get_nr_to_scan()
4857 if (!success || sc->priority == DEF_PRIORITY) in get_nr_to_scan()
4858 return nr_to_scan >> sc->priority; in get_nr_to_scan()
4861 return try_to_inc_max_seq(lruvec, max_seq, swappiness, false) ? -1 : 0; in get_nr_to_scan()
4873 if (sc->nr_reclaimed >= max(sc->nr_to_reclaim, compact_gap(sc->order))) in should_abort_scan()
4876 /* check the order to exclude compaction-induced reclaim */ in should_abort_scan()
4877 if (!current_is_kswapd() || sc->order) in should_abort_scan()
4883 for (i = 0; i <= sc->reclaim_idx; i++) { in should_abort_scan()
4884 struct zone *zone = lruvec_pgdat(lruvec)->node_zones + i; in should_abort_scan()
4887 if (managed_zone(zone) && !zone_watermark_ok(zone, 0, size, sc->reclaim_idx, 0)) in should_abort_scan()
4902 int delta; in try_to_shrink_lruvec() local
4908 delta = evict_folios(nr_to_scan, lruvec, sc, swappiness); in try_to_shrink_lruvec()
4909 if (!delta) in try_to_shrink_lruvec()
4912 scanned += delta; in try_to_shrink_lruvec()
4926 if (sc->nr.unqueued_dirty && sc->nr.unqueued_dirty == sc->nr.file_taken) in try_to_shrink_lruvec()
4936 unsigned long scanned = sc->nr_scanned; in shrink_one()
4937 unsigned long reclaimed = sc->nr_reclaimed; in shrink_one()
4947 if (READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_TAIL) in shrink_one()
4955 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority); in shrink_one()
4957 if (!sc->proactive) in shrink_one()
4958 vmpressure(sc->gfp_mask, memcg, false, sc->nr_scanned - scanned, in shrink_one()
4959 sc->nr_reclaimed - reclaimed); in shrink_one()
4970 return READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_TAIL ? in shrink_one()
4985 gen = get_memcg_gen(READ_ONCE(pgdat->memcg_lru.seq)); in shrink_many()
4993 hlist_nulls_for_each_entry_rcu(lrugen, pos, &pgdat->memcg_lru.fifo[gen][bin], list) { in shrink_many()
5002 if (gen != READ_ONCE(lrugen->gen)) in shrink_many()
5049 VM_WARN_ON_ONCE(!sc->may_writepage || !sc->may_unmap); in lru_gen_shrink_lruvec()
5055 set_mm_walk(NULL, sc->proactive); in lru_gen_shrink_lruvec()
5068 unsigned long reclaimed = sc->nr_reclaimed; in lru_gen_shrink_node()
5077 if (!sc->may_writepage || !sc->may_unmap) in lru_gen_shrink_node()
5084 set_mm_walk(pgdat, sc->proactive); in lru_gen_shrink_node()
5089 sc->nr_reclaimed = 0; in lru_gen_shrink_node()
5092 shrink_one(&pgdat->__lruvec, sc); in lru_gen_shrink_node()
5097 sc->nr_reclaimed += reclaimed; in lru_gen_shrink_node()
5103 if (sc->nr_reclaimed > reclaimed) in lru_gen_shrink_node()
5104 atomic_set(&pgdat->kswapd_failures, 0); in lru_gen_shrink_node()
5113 struct lru_gen_folio *lrugen = &lruvec->lrugen; in state_is_valid()
5115 if (lrugen->enabled) { in state_is_valid()
5119 if (!list_empty(&lruvec->lists[lru])) in state_is_valid()
5126 if (!list_empty(&lrugen->folios[gen][type][zone])) in state_is_valid()
5142 struct list_head *head = &lruvec->lists[lru]; in fill_evictable()
5151 VM_WARN_ON_ONCE_FOLIO(folio_lru_gen(folio) != -1, folio); in fill_evictable()
5157 if (!--remaining) in fill_evictable()
5171 struct list_head *head = &lruvec->lrugen.folios[gen][type][zone]; in drain_evictable()
5186 if (!--remaining) in drain_evictable()
5220 spin_lock_irq(&lruvec->lru_lock); in lru_gen_change_state()
5225 lruvec->lrugen.enabled = enabled; in lru_gen_change_state()
5228 spin_unlock_irq(&lruvec->lru_lock); in lru_gen_change_state()
5230 spin_lock_irq(&lruvec->lru_lock); in lru_gen_change_state()
5233 spin_unlock_irq(&lruvec->lru_lock); in lru_gen_change_state()
5254 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
5261 return -EINVAL; in min_ttl_ms_store()
5286 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
5295 else if (tolower(*buf) == 'y') in enabled_store()
5296 caps = -1; in enabled_store()
5298 return -EINVAL; in enabled_store()
5336 m->private = kvmalloc(PATH_MAX, GFP_KERNEL); in lru_gen_seq_start()
5337 if (!m->private) in lru_gen_seq_start()
5338 return ERR_PTR(-ENOMEM); in lru_gen_seq_start()
5345 if (!nr_to_skip--) in lru_gen_seq_start()
5358 kvfree(m->private); in lru_gen_seq_stop()
5359 m->private = NULL; in lru_gen_seq_stop()
5364 int nid = lruvec_pgdat(v)->node_id; in lru_gen_seq_next()
5388 struct lru_gen_folio *lrugen = &lruvec->lrugen; in lru_gen_seq_show_full()
5399 n[0] = READ_ONCE(lrugen->avg_refaulted[type][tier]); in lru_gen_seq_show_full()
5400 n[1] = READ_ONCE(lrugen->avg_total[type][tier]); in lru_gen_seq_show_full()
5403 n[0] = atomic_long_read(&lrugen->refaulted[hist][type][tier]); in lru_gen_seq_show_full()
5404 n[1] = atomic_long_read(&lrugen->evicted[hist][type][tier]); in lru_gen_seq_show_full()
5405 n[2] = READ_ONCE(lrugen->protected[hist][type][tier]); in lru_gen_seq_show_full()
5424 n = READ_ONCE(mm_state->stats[hist][i]); in lru_gen_seq_show_full()
5427 n = READ_ONCE(mm_state->stats[hist][i]); in lru_gen_seq_show_full()
5435 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
5439 bool full = debugfs_get_aux_num(m->file); in lru_gen_seq_show()
5441 struct lru_gen_folio *lrugen = &lruvec->lrugen; in lru_gen_seq_show()
5442 int nid = lruvec_pgdat(lruvec)->node_id; in lru_gen_seq_show()
5448 const char *path = memcg ? m->private : ""; in lru_gen_seq_show()
5452 cgroup_path(memcg->css.cgroup, m->private, PATH_MAX); in lru_gen_seq_show()
5462 seq = max_seq - MAX_NR_GENS + 1; in lru_gen_seq_show()
5469 unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); in lru_gen_seq_show()
5471 seq_printf(m, " %10lu %10u", seq, jiffies_to_msecs(jiffies - birth)); in lru_gen_seq_show()
5478 size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); in lru_gen_seq_show()
5505 return -EINVAL; in run_aging()
5507 return try_to_inc_max_seq(lruvec, max_seq, swappiness, force_scan) ? 0 : -EEXIST; in run_aging()
5516 return -EINVAL; in run_eviction()
5518 sc->nr_reclaimed = 0; in run_eviction()
5526 if (sc->nr_reclaimed >= nr_to_reclaim) in run_eviction()
5529 if (!evict_folios(nr_to_reclaim - sc->nr_reclaimed, lruvec, sc, in run_eviction()
5536 return -EINTR; in run_eviction()
5543 int err = -EINVAL; in run_cmd()
5547 return -EINVAL; in run_cmd()
5559 return -EINVAL; in run_cmd()
5565 sc->target_mem_cgroup = memcg; in run_cmd()
5577 case '-': in run_cmd()
5587 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
5595 int err = -EINVAL; in lru_gen_seq_write()
5600 .reclaim_idx = MAX_NR_ZONES - 1, in lru_gen_seq_write()
5607 return -ENOMEM; in lru_gen_seq_write()
5611 return -EFAULT; in lru_gen_seq_write()
5618 err = -ENOMEM; in lru_gen_seq_write()
5633 unsigned long opt = -1; in lru_gen_seq_write()
5642 err = -EINVAL; in lru_gen_seq_write()
5647 swappiness = -1; in lru_gen_seq_write()
5700 spin_lock_init(&pgdat->memcg_lru.lock); in lru_gen_init_pgdat()
5704 INIT_HLIST_NULLS_HEAD(&pgdat->memcg_lru.fifo[i][j], i); in lru_gen_init_pgdat()
5712 struct lru_gen_folio *lrugen = &lruvec->lrugen; in lru_gen_init_lruvec()
5715 lrugen->max_seq = MIN_NR_GENS + 1; in lru_gen_init_lruvec()
5716 lrugen->enabled = lru_gen_enabled(); in lru_gen_init_lruvec()
5719 lrugen->timestamps[i] = jiffies; in lru_gen_init_lruvec()
5722 INIT_LIST_HEAD(&lrugen->folios[gen][type][zone]); in lru_gen_init_lruvec()
5725 mm_state->seq = MIN_NR_GENS; in lru_gen_init_lruvec()
5737 INIT_LIST_HEAD(&mm_list->fifo); in lru_gen_init_memcg()
5738 spin_lock_init(&mm_list->lock); in lru_gen_init_memcg()
5747 VM_WARN_ON_ONCE(mm_list && !list_empty(&mm_list->fifo)); in lru_gen_exit_memcg()
5753 VM_WARN_ON_ONCE(memchr_inv(lruvec->lrugen.nr_pages, 0, in lru_gen_exit_memcg()
5754 sizeof(lruvec->lrugen.nr_pages))); in lru_gen_exit_memcg()
5756 lruvec->lrugen.list.next = LIST_POISON1; in lru_gen_exit_memcg()
5762 bitmap_free(mm_state->filters[i]); in lru_gen_exit_memcg()
5763 mm_state->filters[i] = NULL; in lru_gen_exit_memcg()
5813 unsigned long nr_to_reclaim = sc->nr_to_reclaim; in shrink_lruvec()
5839 sc->priority == DEF_PRIORITY); in shrink_lruvec()
5850 nr[lru] -= nr_to_scan; in shrink_lruvec()
5902 nr_scanned = targets[lru] - nr[lru]; in shrink_lruvec()
5903 nr[lru] = targets[lru] * (100 - percentage) / 100; in shrink_lruvec()
5904 nr[lru] -= min(nr[lru], nr_scanned); in shrink_lruvec()
5907 nr_scanned = targets[lru] - nr[lru]; in shrink_lruvec()
5908 nr[lru] = targets[lru] * (100 - percentage) / 100; in shrink_lruvec()
5909 nr[lru] -= min(nr[lru], nr_scanned); in shrink_lruvec()
5912 sc->nr_reclaimed += nr_reclaimed; in shrink_lruvec()
5927 if (gfp_compaction_allowed(sc->gfp_mask) && sc->order && in in_reclaim_compaction()
5928 (sc->order > PAGE_ALLOC_COSTLY_ORDER || in in_reclaim_compaction()
5929 sc->priority < DEF_PRIORITY - 2)) in in_reclaim_compaction()
5936 * Reclaim/compaction is used for high-order allocation requests. It reclaims
5937 * order-0 pages before compacting the zone. should_continue_reclaim() returns
5961 * first, by assuming that zero delta of sc->nr_scanned means full LRU in should_continue_reclaim()
5963 * where always a non-zero amount of pages were scanned. in should_continue_reclaim()
5969 for_each_managed_zone_pgdat(zone, pgdat, z, sc->reclaim_idx) { in should_continue_reclaim()
5973 if (zone_watermark_ok(zone, sc->order, watermark, in should_continue_reclaim()
5974 sc->reclaim_idx, 0)) in should_continue_reclaim()
5977 if (compaction_suitable(zone, sc->order, watermark, in should_continue_reclaim()
5978 sc->reclaim_idx)) in should_continue_reclaim()
5986 pages_for_compaction = compact_gap(sc->order); in should_continue_reclaim()
5988 if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc)) in should_continue_reclaim()
5996 struct mem_cgroup *target_memcg = sc->target_mem_cgroup; in shrink_node_memcgs()
6012 if (current_is_kswapd() || sc->memcg_full_walk) in shrink_node_memcgs()
6022 * This loop can become CPU-bound when target memcgs in shrink_node_memcgs()
6023 * aren't eligible for reclaim - either because they in shrink_node_memcgs()
6044 if (!sc->memcg_low_reclaim) { in shrink_node_memcgs()
6045 sc->memcg_low_skipped = 1; in shrink_node_memcgs()
6051 reclaimed = sc->nr_reclaimed; in shrink_node_memcgs()
6052 scanned = sc->nr_scanned; in shrink_node_memcgs()
6056 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, in shrink_node_memcgs()
6057 sc->priority); in shrink_node_memcgs()
6060 if (!sc->proactive) in shrink_node_memcgs()
6061 vmpressure(sc->gfp_mask, memcg, false, in shrink_node_memcgs()
6062 sc->nr_scanned - scanned, in shrink_node_memcgs()
6063 sc->nr_reclaimed - reclaimed); in shrink_node_memcgs()
6066 if (partial && sc->nr_reclaimed >= sc->nr_to_reclaim) { in shrink_node_memcgs()
6080 memset(&sc->nr, 0, sizeof(sc->nr)); in shrink_node()
6085 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); in shrink_node()
6088 memset(&sc->nr, 0, sizeof(sc->nr)); in shrink_node()
6090 nr_reclaimed = sc->nr_reclaimed; in shrink_node()
6091 nr_scanned = sc->nr_scanned; in shrink_node()
6099 nr_node_reclaimed = sc->nr_reclaimed - nr_reclaimed; in shrink_node()
6102 if (!sc->proactive) in shrink_node()
6103 vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true, in shrink_node()
6104 sc->nr_scanned - nr_scanned, nr_node_reclaimed); in shrink_node()
6112 * it implies that the long-lived page allocation rate in shrink_node()
6127 if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken) in shrink_node()
6128 set_bit(PGDAT_WRITEBACK, &pgdat->flags); in shrink_node()
6131 if (sc->nr.unqueued_dirty && in shrink_node()
6132 sc->nr.unqueued_dirty == sc->nr.file_taken) in shrink_node()
6133 set_bit(PGDAT_DIRTY, &pgdat->flags); in shrink_node()
6142 if (sc->nr.immediate) in shrink_node()
6153 if (sc->nr.dirty && sc->nr.dirty == sc->nr.congested) { in shrink_node()
6155 set_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags); in shrink_node()
6158 set_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags); in shrink_node()
6168 !sc->hibernation_mode && in shrink_node()
6169 (test_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags) || in shrink_node()
6170 test_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags))) in shrink_node()
6183 atomic_set(&pgdat->kswapd_failures, 0); in shrink_node()
6184 else if (sc->cache_trim_mode) in shrink_node()
6185 sc->cache_trim_mode_failed = 1; in shrink_node()
6189 * Returns true if compaction should go ahead for a costly-order request, or
6197 if (!gfp_compaction_allowed(sc->gfp_mask)) in compaction_ready()
6201 if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone), in compaction_ready()
6202 sc->reclaim_idx, 0)) in compaction_ready()
6216 if (compaction_suitable(zone, sc->order, watermark, sc->reclaim_idx)) in compaction_ready()
6228 if (sc->nr_reclaimed > (sc->nr_scanned >> 3)) { in consider_reclaim_throttle()
6231 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_NOPROGRESS]; in consider_reclaim_throttle()
6248 if (sc->priority == 1 && !sc->nr_reclaimed) in consider_reclaim_throttle()
6253 * This is the direct reclaim path, for page-allocating processes. We only
6275 orig_mask = sc->gfp_mask; in shrink_zones()
6277 sc->gfp_mask |= __GFP_HIGHMEM; in shrink_zones()
6278 sc->reclaim_idx = gfp_zone(sc->gfp_mask); in shrink_zones()
6282 sc->reclaim_idx, sc->nodemask) { in shrink_zones()
6296 * non-zero order, only frequent costly order in shrink_zones()
6302 sc->order > PAGE_ALLOC_COSTLY_ORDER && in shrink_zones()
6304 sc->compaction_ready = true; in shrink_zones()
6314 if (zone->zone_pgdat == last_pgdat) in shrink_zones()
6324 nr_soft_reclaimed = memcg1_soft_limit_reclaim(zone->zone_pgdat, in shrink_zones()
6325 sc->order, sc->gfp_mask, in shrink_zones()
6327 sc->nr_reclaimed += nr_soft_reclaimed; in shrink_zones()
6328 sc->nr_scanned += nr_soft_scanned; in shrink_zones()
6333 first_pgdat = zone->zone_pgdat; in shrink_zones()
6336 if (zone->zone_pgdat == last_pgdat) in shrink_zones()
6338 last_pgdat = zone->zone_pgdat; in shrink_zones()
6339 shrink_node(zone->zone_pgdat, sc); in shrink_zones()
6349 sc->gfp_mask = orig_mask; in shrink_zones()
6362 target_lruvec->refaults[WORKINGSET_ANON] = refaults; in snapshot_refaults()
6364 target_lruvec->refaults[WORKINGSET_FILE] = refaults; in snapshot_refaults()
6374 * high - the zone may be full of dirty or under-writeback pages, which this
6386 int initial_priority = sc->priority; in do_try_to_free_pages()
6394 __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1); in do_try_to_free_pages()
6397 if (!sc->proactive) in do_try_to_free_pages()
6398 vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup, in do_try_to_free_pages()
6399 sc->priority); in do_try_to_free_pages()
6400 sc->nr_scanned = 0; in do_try_to_free_pages()
6403 if (sc->nr_reclaimed >= sc->nr_to_reclaim) in do_try_to_free_pages()
6406 if (sc->compaction_ready) in do_try_to_free_pages()
6413 if (sc->priority < DEF_PRIORITY - 2) in do_try_to_free_pages()
6414 sc->may_writepage = 1; in do_try_to_free_pages()
6415 } while (--sc->priority >= 0); in do_try_to_free_pages()
6418 for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx, in do_try_to_free_pages()
6419 sc->nodemask) { in do_try_to_free_pages()
6420 if (zone->zone_pgdat == last_pgdat) in do_try_to_free_pages()
6422 last_pgdat = zone->zone_pgdat; in do_try_to_free_pages()
6424 snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat); in do_try_to_free_pages()
6429 lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, in do_try_to_free_pages()
6430 zone->zone_pgdat); in do_try_to_free_pages()
6431 clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags); in do_try_to_free_pages()
6437 if (sc->nr_reclaimed) in do_try_to_free_pages()
6438 return sc->nr_reclaimed; in do_try_to_free_pages()
6441 if (sc->compaction_ready) in do_try_to_free_pages()
6453 if (!sc->memcg_full_walk) { in do_try_to_free_pages()
6454 sc->priority = initial_priority; in do_try_to_free_pages()
6455 sc->memcg_full_walk = 1; in do_try_to_free_pages()
6468 if (sc->skipped_deactivate) { in do_try_to_free_pages()
6469 sc->priority = initial_priority; in do_try_to_free_pages()
6470 sc->force_deactivate = 1; in do_try_to_free_pages()
6471 sc->skipped_deactivate = 0; in do_try_to_free_pages()
6476 if (sc->memcg_low_skipped) { in do_try_to_free_pages()
6477 sc->priority = initial_priority; in do_try_to_free_pages()
6478 sc->force_deactivate = 0; in do_try_to_free_pages()
6479 sc->memcg_low_reclaim = 1; in do_try_to_free_pages()
6480 sc->memcg_low_skipped = 0; in do_try_to_free_pages()
6495 if (atomic_read(&pgdat->kswapd_failures) >= MAX_RECLAIM_RETRIES) in allow_direct_reclaim()
6513 if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) { in allow_direct_reclaim()
6514 if (READ_ONCE(pgdat->kswapd_highest_zoneidx) > ZONE_NORMAL) in allow_direct_reclaim()
6515 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, ZONE_NORMAL); in allow_direct_reclaim()
6517 wake_up_interruptible(&pgdat->kswapd_wait); in allow_direct_reclaim()
6546 if (current->flags & PF_KTHREAD) in throttle_direct_reclaim()
6576 pgdat = zone->zone_pgdat; in throttle_direct_reclaim()
6598 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait, in throttle_direct_reclaim()
6602 wait_event_killable(zone->zone_pgdat->pfmemalloc_wait, in throttle_direct_reclaim()
6669 .reclaim_idx = MAX_NR_ZONES - 1, in mem_cgroup_shrink_node()
6673 WARN_ON_ONCE(!current->reclaim_state); in mem_cgroup_shrink_node()
6710 .reclaim_idx = MAX_NR_ZONES - 1, in try_to_free_mem_cgroup_pages()
6780 * Check for watermark boosts top-down as the higher zones in pgdat_watermark_boosted()
6786 for (i = highest_zoneidx; i >= 0; i--) { in pgdat_watermark_boosted()
6787 zone = pgdat->node_zones + i; in pgdat_watermark_boosted()
6791 if (zone->watermark_boost) in pgdat_watermark_boosted()
6805 unsigned long mark = -1; in pgdat_balanced()
6809 * Check watermarks bottom-up as lower zones are more likely to in pgdat_balanced()
6838 * the cumulative error from the vmstat per-cpu cache in pgdat_balanced()
6843 * pageblock_nr_pages, while the vmstat pcp threshold in pgdat_balanced()
6845 * counter won't actually be per-cpu cached. But keep in pgdat_balanced()
6849 if (zone->percpu_drift_mark && free_pages < zone->percpu_drift_mark) in pgdat_balanced()
6859 * need balancing by definition. This can happen if a zone-restricted in pgdat_balanced()
6862 if (mark == -1) in pgdat_balanced()
6873 clear_bit(LRUVEC_NODE_CONGESTED, &lruvec->flags); in clear_pgdat_congested()
6874 clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags); in clear_pgdat_congested()
6875 clear_bit(PGDAT_DIRTY, &pgdat->flags); in clear_pgdat_congested()
6876 clear_bit(PGDAT_WRITEBACK, &pgdat->flags); in clear_pgdat_congested()
6901 if (waitqueue_active(&pgdat->pfmemalloc_wait)) in prepare_kswapd_sleep()
6902 wake_up_all(&pgdat->pfmemalloc_wait); in prepare_kswapd_sleep()
6905 if (atomic_read(&pgdat->kswapd_failures) >= MAX_RECLAIM_RETRIES) in prepare_kswapd_sleep()
6929 unsigned long nr_reclaimed = sc->nr_reclaimed; in kswapd_shrink_node()
6932 sc->nr_to_reclaim = 0; in kswapd_shrink_node()
6933 for_each_managed_zone_pgdat(zone, pgdat, z, sc->reclaim_idx) { in kswapd_shrink_node()
6934 sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX); in kswapd_shrink_node()
6945 * high-order allocations. If twice the allocation size has been in kswapd_shrink_node()
6946 * reclaimed then recheck watermarks only at order-0 to prevent in kswapd_shrink_node()
6947 * excessive reclaim. Assume that a process requested a high-order in kswapd_shrink_node()
6950 if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order)) in kswapd_shrink_node()
6951 sc->order = 0; in kswapd_shrink_node()
6954 return max(sc->nr_scanned, sc->nr_reclaimed - nr_reclaimed) >= sc->nr_to_reclaim; in kswapd_shrink_node()
6966 set_bit(ZONE_RECLAIM_ACTIVE, &zone->flags); in update_reclaim_active()
6968 clear_bit(ZONE_RECLAIM_ACTIVE, &zone->flags); in update_reclaim_active()
6991 * kswapd scans the zones in the highmem->normal->dma direction. It skips
7026 nr_boost_reclaim += zone->watermark_boost; in balance_pgdat()
7027 zone_boosts[i] = zone->watermark_boost; in balance_pgdat()
7046 * purpose -- on 64-bit systems it is expected that in balance_pgdat()
7047 * buffer_heads are stripped during active rotation. On 32-bit in balance_pgdat()
7054 for (i = MAX_NR_ZONES - 1; i >= 0; i--) { in balance_pgdat()
7055 zone = pgdat->node_zones + i; in balance_pgdat()
7069 * re-evaluate if boosting is required when kswapd next wakes. in balance_pgdat()
7086 if (nr_boost_reclaim && sc.priority == DEF_PRIORITY - 2) in balance_pgdat()
7091 * intent is to relieve pressure not issue sub-optimal IO in balance_pgdat()
7109 if (sc.priority < DEF_PRIORITY - 2) in balance_pgdat()
7132 if (waitqueue_active(&pgdat->pfmemalloc_wait) && in balance_pgdat()
7134 wake_up_all(&pgdat->pfmemalloc_wait); in balance_pgdat()
7147 nr_reclaimed = sc.nr_reclaimed - nr_reclaimed; in balance_pgdat()
7148 nr_boost_reclaim -= min(nr_boost_reclaim, nr_reclaimed); in balance_pgdat()
7159 sc.priority--; in balance_pgdat()
7173 atomic_inc(&pgdat->kswapd_failures); in balance_pgdat()
7187 zone = pgdat->node_zones + i; in balance_pgdat()
7188 spin_lock_irqsave(&zone->lock, flags); in balance_pgdat()
7189 zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]); in balance_pgdat()
7190 spin_unlock_irqrestore(&zone->lock, flags); in balance_pgdat()
7215 * The pgdat->kswapd_highest_zoneidx is used to pass the highest zone index to
7224 enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); in kswapd_highest_zoneidx()
7238 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); in kswapd_try_to_sleep()
7270 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, in kswapd_try_to_sleep()
7274 if (READ_ONCE(pgdat->kswapd_order) < reclaim_order) in kswapd_try_to_sleep()
7275 WRITE_ONCE(pgdat->kswapd_order, reclaim_order); in kswapd_try_to_sleep()
7278 finish_wait(&pgdat->kswapd_wait, &wait); in kswapd_try_to_sleep()
7279 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); in kswapd_try_to_sleep()
7288 trace_mm_vmscan_kswapd_sleep(pgdat->node_id); in kswapd_try_to_sleep()
7293 * true value by nr_online_cpus * threshold. To avoid the zone in kswapd_try_to_sleep()
7295 * per-cpu vmstat threshold while kswapd is awake and restore in kswapd_try_to_sleep()
7310 finish_wait(&pgdat->kswapd_wait, &wait); in kswapd_try_to_sleep()
7323 * If there are applications that are active memory-allocators
7329 unsigned int highest_zoneidx = MAX_NR_ZONES - 1; in kswapd()
7345 tsk->flags |= PF_MEMALLOC | PF_KSWAPD; in kswapd()
7348 WRITE_ONCE(pgdat->kswapd_order, 0); in kswapd()
7349 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); in kswapd()
7350 atomic_set(&pgdat->nr_writeback_throttled, 0); in kswapd()
7354 alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order); in kswapd()
7363 alloc_order = READ_ONCE(pgdat->kswapd_order); in kswapd()
7366 WRITE_ONCE(pgdat->kswapd_order, 0); in kswapd()
7367 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); in kswapd()
7380 * Reclaim begins at the requested order but if a high-order in kswapd()
7382 * order-0. If that happens, kswapd will consider sleeping in kswapd()
7387 trace_mm_vmscan_kswapd_wake(pgdat->node_id, highest_zoneidx, in kswapd()
7395 tsk->flags &= ~(PF_MEMALLOC | PF_KSWAPD); in kswapd()
7401 * A zone is low on free memory or too fragmented for high-order memory. If
7419 pgdat = zone->zone_pgdat; in wakeup_kswapd()
7420 curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); in wakeup_kswapd()
7423 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, highest_zoneidx); in wakeup_kswapd()
7425 if (READ_ONCE(pgdat->kswapd_order) < order) in wakeup_kswapd()
7426 WRITE_ONCE(pgdat->kswapd_order, order); in wakeup_kswapd()
7428 if (!waitqueue_active(&pgdat->kswapd_wait)) in wakeup_kswapd()
7432 if (atomic_read(&pgdat->kswapd_failures) >= MAX_RECLAIM_RETRIES || in wakeup_kswapd()
7437 * fragmented for high-order allocations. Wake up kcompactd in wakeup_kswapd()
7447 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, highest_zoneidx, order, in wakeup_kswapd()
7449 wake_up_interruptible(&pgdat->kswapd_wait); in wakeup_kswapd()
7454 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
7466 .reclaim_idx = MAX_NR_ZONES - 1, in shrink_all_memory()
7492 * This kswapd start function will be called by init and node-hot-add.
7499 if (!pgdat->kswapd) { in kswapd_run()
7500 pgdat->kswapd = kthread_create_on_node(kswapd, pgdat, nid, "kswapd%d", nid); in kswapd_run()
7501 if (IS_ERR(pgdat->kswapd)) { in kswapd_run()
7504 nid, PTR_ERR(pgdat->kswapd)); in kswapd_run()
7506 pgdat->kswapd = NULL; in kswapd_run()
7508 wake_up_process(pgdat->kswapd); in kswapd_run()
7524 kswapd = pgdat->kswapd; in kswapd_stop()
7527 pgdat->kswapd = NULL; in kswapd_stop()
7571 * If non-zero call node_reclaim when the number of free pages falls below
7606 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0; in node_unmapped_file_pages()
7613 unsigned long delta = 0; in node_pagecache_reclaimable() local
7628 delta += node_page_state(pgdat, NR_FILE_DIRTY); in node_pagecache_reclaimable()
7630 /* Watch for any possible underflows due to delta */ in node_pagecache_reclaimable()
7631 if (unlikely(delta > nr_pagecache_reclaimable)) in node_pagecache_reclaimable()
7632 delta = nr_pagecache_reclaimable; in node_pagecache_reclaimable()
7634 return nr_pagecache_reclaimable - delta; in node_pagecache_reclaimable()
7648 trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, sc->order, in __node_reclaim()
7649 sc->gfp_mask); in __node_reclaim()
7654 fs_reclaim_acquire(sc->gfp_mask); in __node_reclaim()
7659 set_task_reclaim_state(p, &sc->reclaim_state); in __node_reclaim()
7661 if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages || in __node_reclaim()
7662 node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) > pgdat->min_slab_pages) { in __node_reclaim()
7669 } while (sc->nr_reclaimed < nr_pages && --sc->priority >= 0); in __node_reclaim()
7674 fs_reclaim_release(sc->gfp_mask); in __node_reclaim()
7678 trace_mm_vmscan_node_reclaim_end(sc->nr_reclaimed); in __node_reclaim()
7680 return sc->nr_reclaimed; in __node_reclaim()
7709 if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages && in node_reclaim()
7711 pgdat->min_slab_pages) in node_reclaim()
7717 if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC)) in node_reclaim()
7726 if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id()) in node_reclaim()
7729 if (test_and_set_bit_lock(PGDAT_RECLAIM_LOCKED, &pgdat->flags)) in node_reclaim()
7733 clear_bit_unlock(PGDAT_RECLAIM_LOCKED, &pgdat->flags); in node_reclaim()
7759 int swappiness = -1; in user_proactive_reclaim()
7765 return -EINVAL; in user_proactive_reclaim()
7772 return -EINVAL; in user_proactive_reclaim()
7782 return -EINVAL; in user_proactive_reclaim()
7785 return -EINVAL; in user_proactive_reclaim()
7791 return -EINVAL; in user_proactive_reclaim()
7797 unsigned long batch_size = (nr_to_reclaim - nr_reclaimed) / 4; in user_proactive_reclaim()
7801 return -EINTR; in user_proactive_reclaim()
7818 swappiness == -1 ? NULL : &swappiness); in user_proactive_reclaim()
7823 .proactive_swappiness = swappiness == -1 ? NULL : &swappiness, in user_proactive_reclaim()
7833 &pgdat->flags)) in user_proactive_reclaim()
7834 return -EBUSY; in user_proactive_reclaim()
7838 clear_bit_unlock(PGDAT_RECLAIM_LOCKED, &pgdat->flags); in user_proactive_reclaim()
7841 if (!reclaimed && !nr_retries--) in user_proactive_reclaim()
7842 return -EAGAIN; in user_proactive_reclaim()
7853 * check_move_unevictable_folios - Move evictable folios to appropriate zone
7868 for (i = 0; i < fbatch->nr; i++) { in check_move_unevictable_folios()
7869 struct folio *folio = fbatch->folios[i]; in check_move_unevictable_folios()
7903 int ret, nid = dev->id; in reclaim_store()
7906 return ret ? -EAGAIN : count; in reclaim_store()
7912 return device_create_file(&node->dev, &dev_attr_reclaim); in reclaim_register_node()
7917 return device_remove_file(&node->dev, &dev_attr_reclaim); in reclaim_unregister_node()