Lines Matching +full:speed +full:- +full:bins

1 // SPDX-License-Identifier: GPL-2.0
31 #include <linux/backing-dev.h>
45 #include <linux/memory-tiers.h>
189 if ((_folio)->lru.prev != _base) { \
192 prev = lru_to_folio(&(_folio->lru)); \
193 prefetchw(&prev->_field); \
210 return sc->target_mem_cgroup; in cgroup_reclaim()
219 return !sc->target_mem_cgroup || mem_cgroup_is_root(sc->target_mem_cgroup); in root_reclaim()
223 * writeback_throttling_sane - is the usual dirty throttling mechanism available?
248 if (sc->proactive && sc->proactive_swappiness) in sc_swappiness()
249 return *sc->proactive_swappiness; in sc_swappiness()
278 WARN_ON_ONCE(rs && task->reclaim_state); in set_task_reclaim_state()
280 /* Check for the nulling of an already-nulled member */ in set_task_reclaim_state()
281 WARN_ON_ONCE(!rs && !task->reclaim_state); in set_task_reclaim_state()
283 task->reclaim_state = rs; in set_task_reclaim_state()
287 * flush_reclaim_state(): add pages reclaimed outside of LRU-based reclaim to
288 * scan_control->nr_reclaimed.
293 * Currently, reclaim_state->reclaimed includes three types of pages in flush_reclaim_state()
300 * single memcg. For example, a memcg-aware shrinker can free one object in flush_reclaim_state()
303 * overestimating the reclaimed amount (potentially under-reclaiming). in flush_reclaim_state()
305 * Only count such pages for global reclaim to prevent under-reclaiming in flush_reclaim_state()
320 if (current->reclaim_state && root_reclaim(sc)) { in flush_reclaim_state()
321 sc->nr_reclaimed += current->reclaim_state->reclaimed; in flush_reclaim_state()
322 current->reclaim_state->reclaimed = 0; in flush_reclaim_state()
330 if (sc && sc->no_demotion) in can_demote()
344 * For non-memcg reclaim, is there in can_reclaim_anon_pages()
378 * If there are no reclaimable file-backed or anonymous pages, in zone_reclaimable_pages()
389 * lruvec_lru_size - Returns the number of pages on the given LRU list.
392 * @zone_idx: zones to consider (use MAX_NR_ZONES - 1 for the whole LRU list)
401 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; in lruvec_lru_size()
446 BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD != in reclaimer_offset()
447 PGDEMOTE_DIRECT - PGDEMOTE_KSWAPD); in reclaimer_offset()
448 BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD != in reclaimer_offset()
449 PGDEMOTE_KHUGEPAGED - PGDEMOTE_KSWAPD); in reclaimer_offset()
450 BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD != in reclaimer_offset()
451 PGSCAN_DIRECT - PGSCAN_KSWAPD); in reclaimer_offset()
452 BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD != in reclaimer_offset()
453 PGSCAN_KHUGEPAGED - PGSCAN_KSWAPD); in reclaimer_offset()
458 return PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD; in reclaimer_offset()
459 return PGSTEAL_DIRECT - PGSTEAL_KSWAPD; in reclaimer_offset()
467 * private data at folio->private. in is_page_cache_freeable()
469 return folio_ref_count(folio) - folio_test_private(folio) == in is_page_cache_freeable()
475 * -ENOSPC. We need to propagate that into the address_space for a subsequent
503 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) in skip_throttle_noprogress()
512 struct zone *zone = pgdat->node_zones + i; in skip_throttle_noprogress()
529 wait_queue_head_t *wqh = &pgdat->reclaim_wait[reason]; in reclaim_throttle()
539 current->flags & (PF_USER_WORKER|PF_KTHREAD)) { in reclaim_throttle()
547 * parallel reclaimers which is a short-lived event so the timeout is in reclaim_throttle()
549 * potentially long-lived events so use a longer timeout. This is shaky in reclaim_throttle()
558 if (atomic_inc_return(&pgdat->nr_writeback_throttled) == 1) { in reclaim_throttle()
559 WRITE_ONCE(pgdat->nr_reclaim_start, in reclaim_throttle()
589 atomic_dec(&pgdat->nr_writeback_throttled); in reclaim_throttle()
591 trace_mm_vmscan_throttled(pgdat->node_id, jiffies_to_usecs(timeout), in reclaim_throttle()
592 jiffies_to_usecs(timeout - ret), in reclaim_throttle()
609 * This is an inaccurate read as the per-cpu deltas may not in __acct_reclaim_writeback()
615 nr_written = node_page_state(pgdat, NR_THROTTLED_WRITTEN) - in __acct_reclaim_writeback()
616 READ_ONCE(pgdat->nr_reclaim_start); in __acct_reclaim_writeback()
619 wake_up(&pgdat->reclaim_wait[VMSCAN_THROTTLE_WRITEBACK]); in __acct_reclaim_writeback()
636 * Calls ->writepage().
643 * will be non-blocking. To prevent this allocation from being in pageout()
662 * folio->mapping == NULL while being dirty with clean buffers. in pageout()
673 if (mapping->a_ops->writepage == NULL) in pageout()
696 res = mapping->a_ops->writepage(&folio->page, &wbc); in pageout()
730 spin_lock(&mapping->host->i_lock); in __remove_mapping()
731 xa_lock_irq(&mapping->i_pages); in __remove_mapping()
751 * escape unnoticed. The smp_rmb is needed to ensure the folio->flags in __remove_mapping()
752 * load is not satisfied before that of folio->_refcount. in __remove_mapping()
767 swp_entry_t swap = folio->swap; in __remove_mapping()
773 xa_unlock_irq(&mapping->i_pages); in __remove_mapping()
778 free_folio = mapping->a_ops->free_folio; in __remove_mapping()
799 xa_unlock_irq(&mapping->i_pages); in __remove_mapping()
801 inode_add_lru(mapping->host); in __remove_mapping()
802 spin_unlock(&mapping->host->i_lock); in __remove_mapping()
811 xa_unlock_irq(&mapping->i_pages); in __remove_mapping()
813 spin_unlock(&mapping->host->i_lock); in __remove_mapping()
818 * remove_mapping() - Attempt to remove a folio from its mapping.
844 * folio_putback_lru - Put previously isolated folio onto appropriate LRU list.
876 set_mask_bits(&folio->flags, LRU_REFS_MASK, BIT(PG_referenced)); in lru_gen_set_refs()
880 set_mask_bits(&folio->flags, LRU_REFS_FLAGS, BIT(PG_workingset)); in lru_gen_set_refs()
896 referenced_ptes = folio_referenced(folio, 1, sc->target_mem_cgroup, in folio_check_references()
909 * 2) Skip the non-shared swapbacked folio mapped solely by in folio_check_references()
910 * the exiting or OOM-reaped process. in folio_check_references()
912 if (referenced_ptes == -1) in folio_check_references()
945 * Activate file-backed executable folios after first usage. in folio_check_references()
989 if (mapping && mapping->a_ops->is_dirty_writeback) in folio_check_dirty_writeback()
990 mapping->a_ops->is_dirty_writeback(folio, dirty, writeback); in folio_check_dirty_writeback()
1001 allowed_mask = mtc->nmask; in alloc_migrate_folio()
1011 mtc->nmask = NULL; in alloc_migrate_folio()
1012 mtc->gfp_mask |= __GFP_THISNODE; in alloc_migrate_folio()
1017 mtc->gfp_mask &= ~__GFP_THISNODE; in alloc_migrate_folio()
1018 mtc->nmask = allowed_mask; in alloc_migrate_folio()
1030 int target_nid = next_demotion_node(pgdat->node_id); in demote_folio_list()
1070 * We can "enter_fs" for swap-cache with only __GFP_IO in may_enter_fs()
1072 * ->flags can be updated non-atomicially (scan_swap_map_slots), in may_enter_fs()
1097 do_demote_pass = can_demote(pgdat->node_id, sc); in shrink_folio_list()
1110 list_del(&folio->lru); in shrink_folio_list()
1120 sc->nr_scanned += nr_pages; in shrink_folio_list()
1125 if (!sc->may_unmap && folio_mapped(folio)) in shrink_folio_list()
1135 stat->nr_dirty += nr_pages; in shrink_folio_list()
1138 stat->nr_unqueued_dirty += nr_pages; in shrink_folio_list()
1147 stat->nr_congested += nr_pages; in shrink_folio_list()
1197 test_bit(PGDAT_WRITEBACK, &pgdat->flags)) { in shrink_folio_list()
1198 stat->nr_immediate += nr_pages; in shrink_folio_list()
1204 !may_enter_fs(folio, sc->gfp_mask)) { in shrink_folio_list()
1206 * This is slightly racy - in shrink_folio_list()
1210 * interpreted as the readahead flag - but in shrink_folio_list()
1220 stat->nr_writeback += nr_pages; in shrink_folio_list()
1228 list_add_tail(&folio->lru, folio_list); in shrink_folio_list()
1240 stat->nr_ref_keep += nr_pages; in shrink_folio_list()
1253 list_add(&folio->lru, &demote_folios); in shrink_folio_list()
1265 if (!(sc->gfp_mask & __GFP_IO)) in shrink_folio_list()
1277 if (data_race(!list_empty(&folio->_deferred_list) && in shrink_folio_list()
1310 sc->nr_scanned -= (nr_pages - 1); in shrink_folio_list()
1341 stat->nr_unmap_fail += nr_pages; in shrink_folio_list()
1344 stat->nr_lazyfree_fail += nr_pages; in shrink_folio_list()
1364 * injecting inefficient single-folio I/O into in shrink_folio_list()
1375 !test_bit(PGDAT_DIRTY, &pgdat->flags))) { in shrink_folio_list()
1391 if (!may_enter_fs(folio, sc->gfp_mask)) in shrink_folio_list()
1393 if (!sc->may_writepage) in shrink_folio_list()
1412 sc->nr_scanned -= (nr_pages - 1); in shrink_folio_list()
1418 sc->nr_scanned -= (nr_pages - 1); in shrink_folio_list()
1421 stat->nr_pageout += nr_pages; in shrink_folio_list()
1429 * A synchronous write - probably a ramdisk. Go in shrink_folio_list()
1457 * and mark the folio clean - it can be freed. in shrink_folio_list()
1459 * Rarely, folios can have buffers and no ->mapping. in shrink_folio_list()
1468 if (!filemap_release_folio(folio, sc->gfp_mask)) in shrink_folio_list()
1503 sc->target_mem_cgroup)) in shrink_folio_list()
1528 sc->nr_scanned -= (nr_pages - 1); in shrink_folio_list()
1540 stat->nr_activate[type] += nr_pages; in shrink_folio_list()
1546 list_add(&folio->lru, &ret_folios); in shrink_folio_list()
1555 stat->nr_demoted += nr_demoted; in shrink_folio_list()
1577 if (!sc->proactive) { in shrink_folio_list()
1583 pgactivate = stat->nr_activate[0] + stat->nr_activate[1]; in shrink_folio_list()
1615 list_move(&folio->lru, &clean_folios); in reclaim_clean_pages_from_list()
1626 nr_reclaimed = shrink_folio_list(&clean_folios, zone->zone_pgdat, &sc, in reclaim_clean_pages_from_list()
1631 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, in reclaim_clean_pages_from_list()
1632 -(long)nr_reclaimed); in reclaim_clean_pages_from_list()
1639 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON, in reclaim_clean_pages_from_list()
1641 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, in reclaim_clean_pages_from_list()
1642 -(long)stat.nr_lazyfree_fail); in reclaim_clean_pages_from_list()
1659 update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]); in update_lru_sizes()
1667 * lruvec->lru_lock is heavily contended. Some of the functions that
1690 struct list_head *src = &lruvec->lists[lru]; in isolate_lru_folios()
1713 (folio_zonenum(folio) > sc->reclaim_idx)) { in isolate_lru_folios()
1731 if (!sc->may_unmap && folio_mapped(folio)) in isolate_lru_folios()
1736 * sure the folio is not being freed elsewhere -- the in isolate_lru_folios()
1752 list_move(&folio->lru, move_to); in isolate_lru_folios()
1775 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, in isolate_lru_folios()
1782 * folio_isolate_lru() - Try to isolate a folio from its LRU list.
1852 * won't get blocked by normal direct-reclaimers, forming a circular in too_many_isolated()
1855 if (gfp_has_io_fs(sc->gfp_mask)) in too_many_isolated()
1883 list_del(&folio->lru); in move_folios_to_lru()
1885 spin_unlock_irq(&lruvec->lru_lock); in move_folios_to_lru()
1887 spin_lock_irq(&lruvec->lru_lock); in move_folios_to_lru()
1899 * list_add(&folio->lru,) in move_folios_to_lru()
1900 * list_add(&folio->lru,) in move_folios_to_lru()
1909 spin_unlock_irq(&lruvec->lru_lock); in move_folios_to_lru()
1912 spin_lock_irq(&lruvec->lru_lock); in move_folios_to_lru()
1931 spin_unlock_irq(&lruvec->lru_lock); in move_folios_to_lru()
1934 spin_lock_irq(&lruvec->lru_lock); in move_folios_to_lru()
1941 * If a kernel thread (such as nfsd for loop-back mounts) services a backing
1947 return !(current->flags & PF_LOCAL_THROTTLE); in current_may_throttle()
1983 spin_lock_irq(&lruvec->lru_lock); in shrink_inactive_list()
1995 spin_unlock_irq(&lruvec->lru_lock); in shrink_inactive_list()
2002 spin_lock_irq(&lruvec->lru_lock); in shrink_inactive_list()
2007 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); in shrink_inactive_list()
2013 spin_unlock_irq(&lruvec->lru_lock); in shrink_inactive_list()
2015 lru_note_cost(lruvec, file, stat.nr_pageout, nr_scanned - nr_reclaimed); in shrink_inactive_list()
2043 sc->nr.dirty += stat.nr_dirty; in shrink_inactive_list()
2044 sc->nr.congested += stat.nr_congested; in shrink_inactive_list()
2045 sc->nr.unqueued_dirty += stat.nr_unqueued_dirty; in shrink_inactive_list()
2046 sc->nr.writeback += stat.nr_writeback; in shrink_inactive_list()
2047 sc->nr.immediate += stat.nr_immediate; in shrink_inactive_list()
2048 sc->nr.taken += nr_taken; in shrink_inactive_list()
2050 sc->nr.file_taken += nr_taken; in shrink_inactive_list()
2052 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id, in shrink_inactive_list()
2053 nr_scanned, nr_reclaimed, &stat, sc->priority, file); in shrink_inactive_list()
2068 * It is safe to rely on the active flag against the non-LRU folios in here
2069 * because nobody will play with that bit on a non-LRU folio.
2071 * The downside is that we have to touch folio->_refcount against each folio.
2072 * But we had to alter folio->flags anyway.
2092 spin_lock_irq(&lruvec->lru_lock); in shrink_active_list()
2103 spin_unlock_irq(&lruvec->lru_lock); in shrink_active_list()
2110 list_del(&folio->lru); in shrink_active_list()
2126 if (folio_referenced(folio, 0, sc->target_mem_cgroup, in shrink_active_list()
2129 * Identify referenced, file-backed active folios and in shrink_active_list()
2133 * are not likely to be evicted by use-once streaming in shrink_active_list()
2139 list_add(&folio->lru, &l_active); in shrink_active_list()
2144 folio_clear_active(folio); /* we are de-activating */ in shrink_active_list()
2146 list_add(&folio->lru, &l_inactive); in shrink_active_list()
2152 spin_lock_irq(&lruvec->lru_lock); in shrink_active_list()
2160 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); in shrink_active_list()
2161 spin_unlock_irq(&lruvec->lru_lock); in shrink_active_list()
2165 trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate, in shrink_active_list()
2166 nr_deactivate, nr_rotated, sc->priority, file); in shrink_active_list()
2186 list_del(&folio->lru); in reclaim_folio_list()
2189 trace_mm_vmscan_reclaim_pages(pgdat->node_id, sc.nr_scanned, nr_reclaimed, &stat); in reclaim_folio_list()
2212 list_move(&folio->lru, &node_folio_list); in reclaim_pages()
2231 if (sc->may_deactivate & (1 << is_file_lru(lru))) in shrink_list()
2234 sc->skipped_deactivate = 1; in shrink_list()
2246 * to the established workingset on the scan-resistant active list,
2260 * -------------------------------------
2279 gb = (inactive + active) >> (30 - PAGE_SHIFT); in inactive_is_low()
2303 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); in prepare_scan_control()
2306 * Flush the memory cgroup stats in rate-limited way as we don't need in prepare_scan_control()
2310 mem_cgroup_flush_stats_ratelimited(sc->target_mem_cgroup); in prepare_scan_control()
2315 spin_lock_irq(&target_lruvec->lru_lock); in prepare_scan_control()
2316 sc->anon_cost = target_lruvec->anon_cost; in prepare_scan_control()
2317 sc->file_cost = target_lruvec->file_cost; in prepare_scan_control()
2318 spin_unlock_irq(&target_lruvec->lru_lock); in prepare_scan_control()
2324 if (!sc->force_deactivate) { in prepare_scan_control()
2334 if (refaults != target_lruvec->refaults[WORKINGSET_ANON] || in prepare_scan_control()
2336 sc->may_deactivate |= DEACTIVATE_ANON; in prepare_scan_control()
2338 sc->may_deactivate &= ~DEACTIVATE_ANON; in prepare_scan_control()
2342 if (refaults != target_lruvec->refaults[WORKINGSET_FILE] || in prepare_scan_control()
2344 sc->may_deactivate |= DEACTIVATE_FILE; in prepare_scan_control()
2346 sc->may_deactivate &= ~DEACTIVATE_FILE; in prepare_scan_control()
2348 sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE; in prepare_scan_control()
2356 if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE) && in prepare_scan_control()
2357 !sc->no_cache_trim_mode) in prepare_scan_control()
2358 sc->cache_trim_mode = 1; in prepare_scan_control()
2360 sc->cache_trim_mode = 0; in prepare_scan_control()
2376 free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES); in prepare_scan_control()
2381 struct zone *zone = &pgdat->node_zones[z]; in prepare_scan_control()
2396 sc->file_is_tiny = in prepare_scan_control()
2398 !(sc->may_deactivate & DEACTIVATE_ANON) && in prepare_scan_control()
2399 anon >> sc->priority; in prepare_scan_control()
2424 if (!sc->may_swap || !can_reclaim_anon_pages(memcg, pgdat->node_id, sc)) { in get_scan_count()
2446 if (!sc->priority && swappiness) { in get_scan_count()
2452 * If the system is almost out of file pages, force-scan anon. in get_scan_count()
2454 if (sc->file_is_tiny) { in get_scan_count()
2463 if (sc->cache_trim_mode) { in get_scan_count()
2484 total_cost = sc->anon_cost + sc->file_cost; in get_scan_count()
2485 anon_cost = total_cost + sc->anon_cost; in get_scan_count()
2486 file_cost = total_cost + sc->file_cost; in get_scan_count()
2492 fp = (MAX_SWAPPINESS - swappiness) * (total_cost + 1); in get_scan_count()
2505 lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx); in get_scan_count()
2506 mem_cgroup_protection(sc->target_mem_cgroup, memcg, in get_scan_count()
2516 * becomes extremely binary -- from nothing as we in get_scan_count()
2531 * the best-effort low protection. However, we still in get_scan_count()
2532 * ideally want to honor how well-behaved groups are in in get_scan_count()
2543 if (!sc->memcg_low_reclaim && low > min) { in get_scan_count()
2545 sc->memcg_low_skipped = 1; in get_scan_count()
2553 scan = lruvec_size - lruvec_size * protection / in get_scan_count()
2559 * sc->priority further than desirable. in get_scan_count()
2566 scan >>= sc->priority; in get_scan_count()
2585 * round-off error. in get_scan_count()
2619 return can_demote(pgdat->node_id, sc); in can_age_anon_pages()
2647 unsigned long max_seq = READ_ONCE((lruvec)->lrugen.max_seq)
2651 READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_ANON]), \
2652 READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_FILE]), \
2675 struct lruvec *lruvec = &memcg->nodeinfo[nid]->lruvec; in get_lruvec()
2678 if (!lruvec->pgdat) in get_lruvec()
2679 lruvec->pgdat = pgdat; in get_lruvec()
2686 return &pgdat->__lruvec; in get_lruvec()
2694 if (!sc->may_swap) in get_swappiness()
2697 if (!can_demote(pgdat->node_id, sc) && in get_swappiness()
2706 return lruvec->lrugen.max_seq - lruvec->lrugen.min_seq[type] + 1; in get_nr_gens()
2734 * To get rid of non-leaf entries that no longer have enough leaf entries, the
2735 * aging uses the double-buffering technique to flip to the other filter each
2736 * time it produces a new generation. For non-leaf entries that have enough
2762 key[0] = hash & (BIT(BLOOM_FILTER_SHIFT) - 1); in get_item_key()
2773 filter = READ_ONCE(mm_state->filters[gen]); in test_bloom_filter()
2789 filter = READ_ONCE(mm_state->filters[gen]); in update_bloom_filter()
2806 filter = mm_state->filters[gen]; in reset_bloom_filter()
2814 WRITE_ONCE(mm_state->filters[gen], filter); in reset_bloom_filter()
2832 return &memcg->mm_list; in get_mm_list()
2841 return &lruvec->mm_state; in get_mm_state()
2848 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in get_next_mm()
2849 struct lru_gen_mm_state *mm_state = get_mm_state(walk->lruvec); in get_next_mm()
2851 mm = list_entry(mm_state->head, struct mm_struct, lru_gen.list); in get_next_mm()
2852 key = pgdat->node_id % BITS_PER_TYPE(mm->lru_gen.bitmap); in get_next_mm()
2854 if (!walk->force_scan && !test_bit(key, &mm->lru_gen.bitmap)) in get_next_mm()
2857 clear_bit(key, &mm->lru_gen.bitmap); in get_next_mm()
2868 VM_WARN_ON_ONCE(!list_empty(&mm->lru_gen.list)); in lru_gen_add_mm()
2870 VM_WARN_ON_ONCE(mm->lru_gen.memcg); in lru_gen_add_mm()
2871 mm->lru_gen.memcg = memcg; in lru_gen_add_mm()
2873 spin_lock(&mm_list->lock); in lru_gen_add_mm()
2880 if (mm_state->tail == &mm_list->fifo) in lru_gen_add_mm()
2881 mm_state->tail = &mm->lru_gen.list; in lru_gen_add_mm()
2884 list_add_tail(&mm->lru_gen.list, &mm_list->fifo); in lru_gen_add_mm()
2886 spin_unlock(&mm_list->lock); in lru_gen_add_mm()
2895 if (list_empty(&mm->lru_gen.list)) in lru_gen_del_mm()
2899 memcg = mm->lru_gen.memcg; in lru_gen_del_mm()
2903 spin_lock(&mm_list->lock); in lru_gen_del_mm()
2910 if (mm_state->head == &mm->lru_gen.list) in lru_gen_del_mm()
2911 mm_state->head = mm_state->head->prev; in lru_gen_del_mm()
2914 if (mm_state->tail == &mm->lru_gen.list) in lru_gen_del_mm()
2915 mm_state->tail = mm_state->tail->next; in lru_gen_del_mm()
2918 list_del_init(&mm->lru_gen.list); in lru_gen_del_mm()
2920 spin_unlock(&mm_list->lock); in lru_gen_del_mm()
2923 mem_cgroup_put(mm->lru_gen.memcg); in lru_gen_del_mm()
2924 mm->lru_gen.memcg = NULL; in lru_gen_del_mm()
2932 struct task_struct *task = rcu_dereference_protected(mm->owner, true); in lru_gen_migrate_mm()
2934 VM_WARN_ON_ONCE(task->mm != mm); in lru_gen_migrate_mm()
2935 lockdep_assert_held(&task->alloc_lock); in lru_gen_migrate_mm()
2942 if (!mm->lru_gen.memcg) in lru_gen_migrate_mm()
2948 if (memcg == mm->lru_gen.memcg) in lru_gen_migrate_mm()
2951 VM_WARN_ON_ONCE(list_empty(&mm->lru_gen.list)); in lru_gen_migrate_mm()
2981 struct lruvec *lruvec = walk->lruvec; in reset_mm_stats()
2984 lockdep_assert_held(&get_mm_list(lruvec_memcg(lruvec))->lock); in reset_mm_stats()
2986 hist = lru_hist_from_seq(walk->seq); in reset_mm_stats()
2989 WRITE_ONCE(mm_state->stats[hist][i], in reset_mm_stats()
2990 mm_state->stats[hist][i] + walk->mm_stats[i]); in reset_mm_stats()
2991 walk->mm_stats[i] = 0; in reset_mm_stats()
2995 hist = lru_hist_from_seq(walk->seq + 1); in reset_mm_stats()
2998 WRITE_ONCE(mm_state->stats[hist][i], 0); in reset_mm_stats()
3007 struct lruvec *lruvec = walk->lruvec; in iterate_mm_list()
3013 * mm_state->seq is incremented after each iteration of mm_list. There in iterate_mm_list()
3022 spin_lock(&mm_list->lock); in iterate_mm_list()
3024 VM_WARN_ON_ONCE(mm_state->seq + 1 < walk->seq); in iterate_mm_list()
3026 if (walk->seq <= mm_state->seq) in iterate_mm_list()
3029 if (!mm_state->head) in iterate_mm_list()
3030 mm_state->head = &mm_list->fifo; in iterate_mm_list()
3032 if (mm_state->head == &mm_list->fifo) in iterate_mm_list()
3036 mm_state->head = mm_state->head->next; in iterate_mm_list()
3037 if (mm_state->head == &mm_list->fifo) { in iterate_mm_list()
3038 WRITE_ONCE(mm_state->seq, mm_state->seq + 1); in iterate_mm_list()
3044 if (!mm_state->tail || mm_state->tail == mm_state->head) { in iterate_mm_list()
3045 mm_state->tail = mm_state->head->next; in iterate_mm_list()
3046 walk->force_scan = true; in iterate_mm_list()
3053 spin_unlock(&mm_list->lock); in iterate_mm_list()
3056 reset_bloom_filter(mm_state, walk->seq + 1); in iterate_mm_list()
3073 spin_lock(&mm_list->lock); in iterate_mm_list_nowalk()
3075 VM_WARN_ON_ONCE(mm_state->seq + 1 < seq); in iterate_mm_list_nowalk()
3077 if (seq > mm_state->seq) { in iterate_mm_list_nowalk()
3078 mm_state->head = NULL; in iterate_mm_list_nowalk()
3079 mm_state->tail = NULL; in iterate_mm_list_nowalk()
3080 WRITE_ONCE(mm_state->seq, mm_state->seq + 1); in iterate_mm_list_nowalk()
3084 spin_unlock(&mm_list->lock); in iterate_mm_list_nowalk()
3094 * A feedback loop based on Proportional-Integral-Derivative (PID) controller.
3109 * 1. The D term may discount the other two terms over time so that long-lived
3122 struct lru_gen_folio *lrugen = &lruvec->lrugen; in read_ctrl_pos()
3123 int hist = lru_hist_from_seq(lrugen->min_seq[type]); in read_ctrl_pos()
3125 pos->gain = gain; in read_ctrl_pos()
3126 pos->refaulted = pos->total = 0; in read_ctrl_pos()
3128 for (i = tier % MAX_NR_TIERS; i <= min(tier, MAX_NR_TIERS - 1); i++) { in read_ctrl_pos()
3129 pos->refaulted += lrugen->avg_refaulted[type][i] + in read_ctrl_pos()
3130 atomic_long_read(&lrugen->refaulted[hist][type][i]); in read_ctrl_pos()
3131 pos->total += lrugen->avg_total[type][i] + in read_ctrl_pos()
3132 lrugen->protected[hist][type][i] + in read_ctrl_pos()
3133 atomic_long_read(&lrugen->evicted[hist][type][i]); in read_ctrl_pos()
3140 struct lru_gen_folio *lrugen = &lruvec->lrugen; in reset_ctrl_pos()
3142 unsigned long seq = carryover ? lrugen->min_seq[type] : lrugen->max_seq + 1; in reset_ctrl_pos()
3144 lockdep_assert_held(&lruvec->lru_lock); in reset_ctrl_pos()
3155 sum = lrugen->avg_refaulted[type][tier] + in reset_ctrl_pos()
3156 atomic_long_read(&lrugen->refaulted[hist][type][tier]); in reset_ctrl_pos()
3157 WRITE_ONCE(lrugen->avg_refaulted[type][tier], sum / 2); in reset_ctrl_pos()
3159 sum = lrugen->avg_total[type][tier] + in reset_ctrl_pos()
3160 lrugen->protected[hist][type][tier] + in reset_ctrl_pos()
3161 atomic_long_read(&lrugen->evicted[hist][type][tier]); in reset_ctrl_pos()
3162 WRITE_ONCE(lrugen->avg_total[type][tier], sum / 2); in reset_ctrl_pos()
3166 atomic_long_set(&lrugen->refaulted[hist][type][tier], 0); in reset_ctrl_pos()
3167 atomic_long_set(&lrugen->evicted[hist][type][tier], 0); in reset_ctrl_pos()
3168 WRITE_ONCE(lrugen->protected[hist][type][tier], 0); in reset_ctrl_pos()
3179 return pv->refaulted < MIN_LRU_BATCH || in positive_ctrl_err()
3180 pv->refaulted * (sp->total + MIN_LRU_BATCH) * sp->gain <= in positive_ctrl_err()
3181 (sp->refaulted + 1) * pv->total * pv->gain; in positive_ctrl_err()
3191 unsigned long new_flags, old_flags = READ_ONCE(folio->flags); in folio_update_gen()
3197 set_mask_bits(&folio->flags, LRU_REFS_MASK, BIT(PG_referenced)); in folio_update_gen()
3198 return -1; in folio_update_gen()
3204 return -1; in folio_update_gen()
3208 } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); in folio_update_gen()
3210 return ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; in folio_update_gen()
3217 struct lru_gen_folio *lrugen = &lruvec->lrugen; in folio_inc_gen()
3218 int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]); in folio_inc_gen()
3219 unsigned long new_flags, old_flags = READ_ONCE(folio->flags); in folio_inc_gen()
3224 new_gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; in folio_inc_gen()
3236 } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); in folio_inc_gen()
3253 walk->batched++; in update_batch_size()
3255 walk->nr_pages[old_gen][type][zone] -= delta; in update_batch_size()
3256 walk->nr_pages[new_gen][type][zone] += delta; in update_batch_size()
3262 struct lruvec *lruvec = walk->lruvec; in reset_batch_size()
3263 struct lru_gen_folio *lrugen = &lruvec->lrugen; in reset_batch_size()
3265 walk->batched = 0; in reset_batch_size()
3269 int delta = walk->nr_pages[gen][type][zone]; in reset_batch_size()
3274 walk->nr_pages[gen][type][zone] = 0; in reset_batch_size()
3275 WRITE_ONCE(lrugen->nr_pages[gen][type][zone], in reset_batch_size()
3276 lrugen->nr_pages[gen][type][zone] + delta); in reset_batch_size()
3287 struct vm_area_struct *vma = args->vma; in should_skip_vma()
3288 struct lru_gen_mm_walk *walk = args->private; in should_skip_vma()
3299 if (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) in should_skip_vma()
3302 if (vma == get_gate_vma(vma->vm_mm)) in should_skip_vma()
3306 return !walk->swappiness; in should_skip_vma()
3308 if (WARN_ON_ONCE(!vma->vm_file || !vma->vm_file->f_mapping)) in should_skip_vma()
3311 mapping = vma->vm_file->f_mapping; in should_skip_vma()
3316 return !walk->swappiness; in should_skip_vma()
3318 if (walk->swappiness > MAX_SWAPPINESS) in should_skip_vma()
3322 return !mapping->a_ops->read_folio; in should_skip_vma()
3326 * Some userspace memory allocators map many single-page VMAs. Instead of
3335 VMA_ITERATOR(vmi, args->mm, start); in get_next_vma()
3340 for_each_vma(vmi, args->vma) { in get_next_vma()
3341 if (end && end <= args->vma->vm_start) in get_next_vma()
3344 if (should_skip_vma(args->vma->vm_start, args->vma->vm_end, args)) in get_next_vma()
3347 *vm_start = max(start, args->vma->vm_start); in get_next_vma()
3348 *vm_end = min(end - 1, args->vma->vm_end - 1) + 1; in get_next_vma()
3361 VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end); in get_pte_pfn()
3364 return -1; in get_pte_pfn()
3367 return -1; in get_pte_pfn()
3369 if (!pte_young(pte) && !mm_has_notifiers(vma->vm_mm)) in get_pte_pfn()
3370 return -1; in get_pte_pfn()
3373 return -1; in get_pte_pfn()
3375 if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat)) in get_pte_pfn()
3376 return -1; in get_pte_pfn()
3386 VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end); in get_pmd_pfn()
3389 return -1; in get_pmd_pfn()
3392 return -1; in get_pmd_pfn()
3394 if (!pmd_young(pmd) && !mm_has_notifiers(vma->vm_mm)) in get_pmd_pfn()
3395 return -1; in get_pmd_pfn()
3398 return -1; in get_pmd_pfn()
3400 if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat)) in get_pmd_pfn()
3401 return -1; in get_pmd_pfn()
3414 if (folio_nid(folio) != pgdat->node_id) in get_pfn_folio()
3466 struct lru_gen_mm_walk *walk = args->private; in walk_pte_range()
3467 struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec); in walk_pte_range()
3468 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in walk_pte_range()
3469 DEFINE_MAX_SEQ(walk->lruvec); in walk_pte_range()
3473 pte = pte_offset_map_rw_nolock(args->mm, pmd, start & PMD_MASK, &pmdval, &ptl); in walk_pte_range()
3495 walk->mm_stats[MM_LEAF_TOTAL]++; in walk_pte_range()
3497 pfn = get_pte_pfn(ptent, args->vma, addr, pgdat); in walk_pte_range()
3498 if (pfn == -1) in walk_pte_range()
3505 if (!ptep_clear_young_notify(args->vma, addr, pte + i)) in walk_pte_range()
3519 walk->mm_stats[MM_LEAF_YOUNG]++; in walk_pte_range()
3542 struct lru_gen_mm_walk *walk = args->private; in walk_pmd_range_locked()
3543 struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec); in walk_pmd_range_locked()
3544 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in walk_pmd_range_locked()
3545 DEFINE_MAX_SEQ(walk->lruvec); in walk_pmd_range_locked()
3551 if (*first == -1) { in walk_pmd_range_locked()
3557 i = addr == -1 ? 0 : pmd_index(addr) - pmd_index(*first); in walk_pmd_range_locked()
3559 __set_bit(i - 1, bitmap); in walk_pmd_range_locked()
3565 ptl = pmd_lockptr(args->mm, pmd); in walk_pmd_range_locked()
3582 if (!walk->force_scan && should_clear_pmd_young() && in walk_pmd_range_locked()
3583 !mm_has_notifiers(args->mm)) in walk_pmd_range_locked()
3589 if (pfn == -1) in walk_pmd_range_locked()
3609 walk->mm_stats[MM_LEAF_YOUNG]++; in walk_pmd_range_locked()
3619 *first = -1; in walk_pmd_range_locked()
3631 unsigned long first = -1; in walk_pmd_range()
3632 struct lru_gen_mm_walk *walk = args->private; in walk_pmd_range()
3633 struct lru_gen_mm_state *mm_state = get_mm_state(walk->lruvec); in walk_pmd_range()
3645 vma = args->vma; in walk_pmd_range()
3652 walk->mm_stats[MM_LEAF_TOTAL]++; in walk_pmd_range()
3657 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in walk_pmd_range()
3660 walk->mm_stats[MM_LEAF_TOTAL]++; in walk_pmd_range()
3662 if (pfn != -1) in walk_pmd_range()
3667 if (!walk->force_scan && should_clear_pmd_young() && in walk_pmd_range()
3668 !mm_has_notifiers(args->mm)) { in walk_pmd_range()
3675 if (!walk->force_scan && !test_bloom_filter(mm_state, walk->seq, pmd + i)) in walk_pmd_range()
3678 walk->mm_stats[MM_NONLEAF_FOUND]++; in walk_pmd_range()
3683 walk->mm_stats[MM_NONLEAF_ADDED]++; in walk_pmd_range()
3686 update_bloom_filter(mm_state, walk->seq + 1, pmd + i); in walk_pmd_range()
3689 walk_pmd_range_locked(pud, -1, vma, args, bitmap, &first); in walk_pmd_range()
3702 struct lru_gen_mm_walk *walk = args->private; in walk_pud_range()
3718 if (need_resched() || walk->batched >= MAX_LRU_BATCH) { in walk_pud_range()
3729 if (!end || !args->vma) in walk_pud_range()
3732 walk->next_addr = max(end, args->vma->vm_start); in walk_pud_range()
3734 return -EAGAIN; in walk_pud_range()
3745 struct lruvec *lruvec = walk->lruvec; in walk_mm()
3747 walk->next_addr = FIRST_USER_ADDRESS; in walk_mm()
3752 err = -EBUSY; in walk_mm()
3755 if (walk->seq != max_seq) in walk_mm()
3760 err = walk_page_range(mm, walk->next_addr, ULONG_MAX, &mm_walk_ops, walk); in walk_mm()
3765 if (walk->batched) { in walk_mm()
3766 spin_lock_irq(&lruvec->lru_lock); in walk_mm()
3768 spin_unlock_irq(&lruvec->lru_lock); in walk_mm()
3772 } while (err == -EAGAIN); in walk_mm()
3777 struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk; in set_mm_walk()
3782 walk = &pgdat->mm_walk; in set_mm_walk()
3789 current->reclaim_state->mm_walk = walk; in set_mm_walk()
3796 struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk; in clear_mm_walk()
3798 VM_WARN_ON_ONCE(walk && memchr_inv(walk->nr_pages, 0, sizeof(walk->nr_pages))); in clear_mm_walk()
3799 VM_WARN_ON_ONCE(walk && memchr_inv(walk->mm_stats, 0, sizeof(walk->mm_stats))); in clear_mm_walk()
3801 current->reclaim_state->mm_walk = NULL; in clear_mm_walk()
3811 struct lru_gen_folio *lrugen = &lruvec->lrugen; in inc_min_seq()
3812 int hist = lru_hist_from_seq(lrugen->min_seq[type]); in inc_min_seq()
3813 int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]); in inc_min_seq()
3820 struct list_head *head = &lrugen->folios[old_gen][type][zone]; in inc_min_seq()
3833 list_move_tail(&folio->lru, &lrugen->folios[new_gen][type][zone]); in inc_min_seq()
3840 WRITE_ONCE(lrugen->protected[hist][type][tier], in inc_min_seq()
3841 lrugen->protected[hist][type][tier] + delta); in inc_min_seq()
3844 if (!--remaining) in inc_min_seq()
3850 WRITE_ONCE(lrugen->min_seq[type], lrugen->min_seq[type] + 1); in inc_min_seq()
3859 struct lru_gen_folio *lrugen = &lruvec->lrugen; in try_to_inc_min_seq()
3866 while (min_seq[type] + MIN_NR_GENS <= lrugen->max_seq) { in try_to_inc_min_seq()
3870 if (!list_empty(&lrugen->folios[gen][type][zone])) in try_to_inc_min_seq()
3882 unsigned long seq = lrugen->max_seq - MIN_NR_GENS; in try_to_inc_min_seq()
3891 if (min_seq[type] <= lrugen->min_seq[type]) in try_to_inc_min_seq()
3895 WRITE_ONCE(lrugen->min_seq[type], min_seq[type]); in try_to_inc_min_seq()
3907 struct lru_gen_folio *lrugen = &lruvec->lrugen; in inc_max_seq()
3909 if (seq < READ_ONCE(lrugen->max_seq)) in inc_max_seq()
3912 spin_lock_irq(&lruvec->lru_lock); in inc_max_seq()
3916 success = seq == lrugen->max_seq; in inc_max_seq()
3927 spin_unlock_irq(&lruvec->lru_lock); in inc_max_seq()
3938 prev = lru_gen_from_seq(lrugen->max_seq - 1); in inc_max_seq()
3939 next = lru_gen_from_seq(lrugen->max_seq + 1); in inc_max_seq()
3944 long delta = lrugen->nr_pages[prev][type][zone] - in inc_max_seq()
3945 lrugen->nr_pages[next][type][zone]; in inc_max_seq()
3951 __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, -delta); in inc_max_seq()
3958 WRITE_ONCE(lrugen->timestamps[next], jiffies); in inc_max_seq()
3960 smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1); in inc_max_seq()
3962 spin_unlock_irq(&lruvec->lru_lock); in inc_max_seq()
3973 struct lru_gen_folio *lrugen = &lruvec->lrugen; in try_to_inc_max_seq()
3976 VM_WARN_ON_ONCE(seq > READ_ONCE(lrugen->max_seq)); in try_to_inc_max_seq()
3982 if (seq <= READ_ONCE(mm_state->seq)) in try_to_inc_max_seq()
4002 walk->lruvec = lruvec; in try_to_inc_max_seq()
4003 walk->seq = seq; in try_to_inc_max_seq()
4004 walk->swappiness = swappiness; in try_to_inc_max_seq()
4005 walk->force_scan = force_scan; in try_to_inc_max_seq()
4030 if (sc->priority != DEF_PRIORITY || sc->nr_to_reclaim < MIN_LRU_BATCH) in set_initial_priority()
4038 if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc)) in set_initial_priority()
4041 /* round down reclaimable and round up sc->nr_to_reclaim */ in set_initial_priority()
4042 priority = fls_long(reclaimable) - 1 - fls_long(sc->nr_to_reclaim - 1); in set_initial_priority()
4048 sc->priority = clamp(priority, DEF_PRIORITY / 2, DEF_PRIORITY); in set_initial_priority()
4056 struct lru_gen_folio *lrugen = &lruvec->lrugen; in lruvec_is_sizable()
4068 total += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); in lruvec_is_sizable()
4073 return mem_cgroup_online(memcg) ? (total >> sc->priority) : total; in lruvec_is_sizable()
4092 birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); in lruvec_is_reclaimable()
4127 .gfp_mask = sc->gfp_mask, in lru_gen_age_node()
4156 pte_t *pte = pvmw->pte; in lru_gen_look_around()
4157 unsigned long addr = pvmw->address; in lru_gen_look_around()
4158 struct vm_area_struct *vma = pvmw->vma; in lru_gen_look_around()
4159 struct folio *folio = pfn_folio(pvmw->pfn); in lru_gen_look_around()
4167 lockdep_assert_held(pvmw->ptl); in lru_gen_look_around()
4173 if (spin_is_contended(pvmw->ptl)) in lru_gen_look_around()
4177 if (vma->vm_flags & VM_SPECIAL) in lru_gen_look_around()
4181 walk = current->reclaim_state ? current->reclaim_state->mm_walk : NULL; in lru_gen_look_around()
4183 start = max(addr & PMD_MASK, vma->vm_start); in lru_gen_look_around()
4184 end = min(addr | ~PMD_MASK, vma->vm_end - 1) + 1; in lru_gen_look_around()
4186 if (end - start == PAGE_SIZE) in lru_gen_look_around()
4189 if (end - start > MIN_LRU_BATCH * PAGE_SIZE) { in lru_gen_look_around()
4190 if (addr - start < MIN_LRU_BATCH * PAGE_SIZE / 2) in lru_gen_look_around()
4192 else if (end - addr < MIN_LRU_BATCH * PAGE_SIZE / 2) in lru_gen_look_around()
4193 start = end - MIN_LRU_BATCH * PAGE_SIZE; in lru_gen_look_around()
4195 start = addr - MIN_LRU_BATCH * PAGE_SIZE / 2; in lru_gen_look_around()
4202 pte -= (addr - start) / PAGE_SIZE; in lru_gen_look_around()
4209 if (pfn == -1) in lru_gen_look_around()
4238 update_bloom_filter(mm_state, max_seq, pvmw->pmd); in lru_gen_look_around()
4264 spin_lock_irqsave(&pgdat->memcg_lru.lock, flags); in lru_gen_rotate_memcg()
4266 VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list)); in lru_gen_rotate_memcg()
4269 new = old = lruvec->lrugen.gen; in lru_gen_rotate_memcg()
4277 new = get_memcg_gen(pgdat->memcg_lru.seq); in lru_gen_rotate_memcg()
4279 new = get_memcg_gen(pgdat->memcg_lru.seq + 1); in lru_gen_rotate_memcg()
4283 WRITE_ONCE(lruvec->lrugen.seg, seg); in lru_gen_rotate_memcg()
4284 WRITE_ONCE(lruvec->lrugen.gen, new); in lru_gen_rotate_memcg()
4286 hlist_nulls_del_rcu(&lruvec->lrugen.list); in lru_gen_rotate_memcg()
4289 hlist_nulls_add_head_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]); in lru_gen_rotate_memcg()
4291 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]); in lru_gen_rotate_memcg()
4293 pgdat->memcg_lru.nr_memcgs[old]--; in lru_gen_rotate_memcg()
4294 pgdat->memcg_lru.nr_memcgs[new]++; in lru_gen_rotate_memcg()
4296 if (!pgdat->memcg_lru.nr_memcgs[old] && old == get_memcg_gen(pgdat->memcg_lru.seq)) in lru_gen_rotate_memcg()
4297 WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); in lru_gen_rotate_memcg()
4299 spin_unlock_irqrestore(&pgdat->memcg_lru.lock, flags); in lru_gen_rotate_memcg()
4314 spin_lock_irq(&pgdat->memcg_lru.lock); in lru_gen_online_memcg()
4316 VM_WARN_ON_ONCE(!hlist_nulls_unhashed(&lruvec->lrugen.list)); in lru_gen_online_memcg()
4318 gen = get_memcg_gen(pgdat->memcg_lru.seq); in lru_gen_online_memcg()
4320 lruvec->lrugen.gen = gen; in lru_gen_online_memcg()
4322 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[gen][bin]); in lru_gen_online_memcg()
4323 pgdat->memcg_lru.nr_memcgs[gen]++; in lru_gen_online_memcg()
4325 spin_unlock_irq(&pgdat->memcg_lru.lock); in lru_gen_online_memcg()
4349 spin_lock_irq(&pgdat->memcg_lru.lock); in lru_gen_release_memcg()
4351 if (hlist_nulls_unhashed(&lruvec->lrugen.list)) in lru_gen_release_memcg()
4354 gen = lruvec->lrugen.gen; in lru_gen_release_memcg()
4356 hlist_nulls_del_init_rcu(&lruvec->lrugen.list); in lru_gen_release_memcg()
4357 pgdat->memcg_lru.nr_memcgs[gen]--; in lru_gen_release_memcg()
4359 if (!pgdat->memcg_lru.nr_memcgs[gen] && gen == get_memcg_gen(pgdat->memcg_lru.seq)) in lru_gen_release_memcg()
4360 WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); in lru_gen_release_memcg()
4362 spin_unlock_irq(&pgdat->memcg_lru.lock); in lru_gen_release_memcg()
4371 if (READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_HEAD) in lru_gen_soft_reclaim()
4393 struct lru_gen_folio *lrugen = &lruvec->lrugen; in sort_folio()
4408 if (gen != lru_gen_from_seq(lrugen->min_seq[type])) { in sort_folio()
4409 list_move(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio()
4416 list_move(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio()
4420 int hist = lru_hist_from_seq(lrugen->min_seq[type]); in sort_folio()
4422 WRITE_ONCE(lrugen->protected[hist][type][tier], in sort_folio()
4423 lrugen->protected[hist][type][tier] + delta); in sort_folio()
4429 if (!folio_test_lru(folio) || zone > sc->reclaim_idx) { in sort_folio()
4431 list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio()
4438 sc->nr.file_taken += delta; in sort_folio()
4440 sc->nr.unqueued_dirty += delta; in sort_folio()
4446 list_move(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio()
4458 if (!(sc->gfp_mask & __GFP_IO) && in isolate_folio()
4475 set_mask_bits(&folio->flags, LRU_REFS_MASK, 0); in isolate_folio()
4497 struct lru_gen_folio *lrugen = &lruvec->lrugen; in scan_folios()
4505 gen = lru_gen_from_seq(lrugen->min_seq[type]); in scan_folios()
4507 for (i = MAX_NR_ZONES; i > 0; i--) { in scan_folios()
4510 int zone = (sc->reclaim_idx + i) % MAX_NR_ZONES; in scan_folios()
4511 struct list_head *head = &lrugen->folios[gen][type][zone]; in scan_folios()
4527 list_add(&folio->lru, list); in scan_folios()
4530 list_move(&folio->lru, &moved); in scan_folios()
4534 if (!--remaining || max(isolated, skipped_zone) >= MIN_LRU_BATCH) in scan_folios()
4556 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, MAX_LRU_BATCH, in scan_folios()
4560 sc->nr.file_taken += isolated; in scan_folios()
4585 return tier - 1; in get_tier_idx()
4602 read_ctrl_pos(lruvec, LRU_GEN_FILE, MAX_NR_TIERS, MAX_SWAPPINESS - swappiness, &pv); in get_type_to_scan()
4642 struct lru_gen_folio *lrugen = &lruvec->lrugen; in evict_folios()
4646 spin_lock_irq(&lruvec->lru_lock); in evict_folios()
4652 if (evictable_min_seq(lrugen->min_seq, swappiness) + MIN_NR_GENS > lrugen->max_seq) in evict_folios()
4655 spin_unlock_irq(&lruvec->lru_lock); in evict_folios()
4661 sc->nr.unqueued_dirty += stat.nr_unqueued_dirty; in evict_folios()
4662 sc->nr_reclaimed += reclaimed; in evict_folios()
4663 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id, in evict_folios()
4664 scanned, reclaimed, &stat, sc->priority, in evict_folios()
4671 list_del(&folio->lru); in evict_folios()
4679 list_move(&folio->lru, &clean); in evict_folios()
4685 set_mask_bits(&folio->flags, LRU_REFS_FLAGS, BIT(PG_active)); in evict_folios()
4688 spin_lock_irq(&lruvec->lru_lock); in evict_folios()
4692 walk = current->reclaim_state->mm_walk; in evict_folios()
4693 if (walk && walk->batched) { in evict_folios()
4694 walk->lruvec = lruvec; in evict_folios()
4707 spin_unlock_irq(&lruvec->lru_lock); in evict_folios()
4724 struct lru_gen_folio *lrugen = &lruvec->lrugen; in should_run_aging()
4739 size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); in should_run_aging()
4760 if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg)) in get_nr_to_scan()
4761 return -1; in get_nr_to_scan()
4770 if (!success || sc->priority == DEF_PRIORITY) in get_nr_to_scan()
4771 return nr_to_scan >> sc->priority; in get_nr_to_scan()
4774 return try_to_inc_max_seq(lruvec, max_seq, swappiness, false) ? -1 : 0; in get_nr_to_scan()
4786 if (sc->nr_reclaimed >= max(sc->nr_to_reclaim, compact_gap(sc->order))) in should_abort_scan()
4789 /* check the order to exclude compaction-induced reclaim */ in should_abort_scan()
4790 if (!current_is_kswapd() || sc->order) in should_abort_scan()
4796 for (i = 0; i <= sc->reclaim_idx; i++) { in should_abort_scan()
4797 struct zone *zone = lruvec_pgdat(lruvec)->node_zones + i; in should_abort_scan()
4800 if (managed_zone(zone) && !zone_watermark_ok(zone, 0, size, sc->reclaim_idx, 0)) in should_abort_scan()
4839 if (sc->nr.unqueued_dirty && sc->nr.unqueued_dirty == sc->nr.file_taken) in try_to_shrink_lruvec()
4849 unsigned long scanned = sc->nr_scanned; in shrink_one()
4850 unsigned long reclaimed = sc->nr_reclaimed; in shrink_one()
4860 if (READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_TAIL) in shrink_one()
4868 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority); in shrink_one()
4870 if (!sc->proactive) in shrink_one()
4871 vmpressure(sc->gfp_mask, memcg, false, sc->nr_scanned - scanned, in shrink_one()
4872 sc->nr_reclaimed - reclaimed); in shrink_one()
4883 return READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_TAIL ? in shrink_one()
4898 gen = get_memcg_gen(READ_ONCE(pgdat->memcg_lru.seq)); in shrink_many()
4906 hlist_nulls_for_each_entry_rcu(lrugen, pos, &pgdat->memcg_lru.fifo[gen][bin], list) { in shrink_many()
4915 if (gen != READ_ONCE(lrugen->gen)) in shrink_many()
4951 /* try the rest of the bins of the current generation */ in shrink_many()
4962 VM_WARN_ON_ONCE(!sc->may_writepage || !sc->may_unmap); in lru_gen_shrink_lruvec()
4968 set_mm_walk(NULL, sc->proactive); in lru_gen_shrink_lruvec()
4981 unsigned long reclaimed = sc->nr_reclaimed; in lru_gen_shrink_node()
4990 if (!sc->may_writepage || !sc->may_unmap) in lru_gen_shrink_node()
4997 set_mm_walk(pgdat, sc->proactive); in lru_gen_shrink_node()
5002 sc->nr_reclaimed = 0; in lru_gen_shrink_node()
5005 shrink_one(&pgdat->__lruvec, sc); in lru_gen_shrink_node()
5010 sc->nr_reclaimed += reclaimed; in lru_gen_shrink_node()
5016 if (sc->nr_reclaimed > reclaimed) in lru_gen_shrink_node()
5017 pgdat->kswapd_failures = 0; in lru_gen_shrink_node()
5026 struct lru_gen_folio *lrugen = &lruvec->lrugen; in state_is_valid()
5028 if (lrugen->enabled) { in state_is_valid()
5032 if (!list_empty(&lruvec->lists[lru])) in state_is_valid()
5039 if (!list_empty(&lrugen->folios[gen][type][zone])) in state_is_valid()
5055 struct list_head *head = &lruvec->lists[lru]; in fill_evictable()
5064 VM_WARN_ON_ONCE_FOLIO(folio_lru_gen(folio) != -1, folio); in fill_evictable()
5070 if (!--remaining) in fill_evictable()
5084 struct list_head *head = &lruvec->lrugen.folios[gen][type][zone]; in drain_evictable()
5099 if (!--remaining) in drain_evictable()
5133 spin_lock_irq(&lruvec->lru_lock); in lru_gen_change_state()
5138 lruvec->lrugen.enabled = enabled; in lru_gen_change_state()
5141 spin_unlock_irq(&lruvec->lru_lock); in lru_gen_change_state()
5143 spin_lock_irq(&lruvec->lru_lock); in lru_gen_change_state()
5146 spin_unlock_irq(&lruvec->lru_lock); in lru_gen_change_state()
5167 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
5174 return -EINVAL; in min_ttl_ms_store()
5199 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
5209 caps = -1; in enabled_store()
5211 return -EINVAL; in enabled_store()
5249 m->private = kvmalloc(PATH_MAX, GFP_KERNEL); in lru_gen_seq_start()
5250 if (!m->private) in lru_gen_seq_start()
5251 return ERR_PTR(-ENOMEM); in lru_gen_seq_start()
5258 if (!nr_to_skip--) in lru_gen_seq_start()
5271 kvfree(m->private); in lru_gen_seq_stop()
5272 m->private = NULL; in lru_gen_seq_stop()
5277 int nid = lruvec_pgdat(v)->node_id; in lru_gen_seq_next()
5301 struct lru_gen_folio *lrugen = &lruvec->lrugen; in lru_gen_seq_show_full()
5312 n[0] = READ_ONCE(lrugen->avg_refaulted[type][tier]); in lru_gen_seq_show_full()
5313 n[1] = READ_ONCE(lrugen->avg_total[type][tier]); in lru_gen_seq_show_full()
5316 n[0] = atomic_long_read(&lrugen->refaulted[hist][type][tier]); in lru_gen_seq_show_full()
5317 n[1] = atomic_long_read(&lrugen->evicted[hist][type][tier]); in lru_gen_seq_show_full()
5318 n[2] = READ_ONCE(lrugen->protected[hist][type][tier]); in lru_gen_seq_show_full()
5337 n = READ_ONCE(mm_state->stats[hist][i]); in lru_gen_seq_show_full()
5340 n = READ_ONCE(mm_state->stats[hist][i]); in lru_gen_seq_show_full()
5348 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
5352 bool full = !debugfs_real_fops(m->file)->write; in lru_gen_seq_show()
5354 struct lru_gen_folio *lrugen = &lruvec->lrugen; in lru_gen_seq_show()
5355 int nid = lruvec_pgdat(lruvec)->node_id; in lru_gen_seq_show()
5361 const char *path = memcg ? m->private : ""; in lru_gen_seq_show()
5365 cgroup_path(memcg->css.cgroup, m->private, PATH_MAX); in lru_gen_seq_show()
5375 seq = max_seq - MAX_NR_GENS + 1; in lru_gen_seq_show()
5382 unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); in lru_gen_seq_show()
5384 seq_printf(m, " %10lu %10u", seq, jiffies_to_msecs(jiffies - birth)); in lru_gen_seq_show()
5391 size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); in lru_gen_seq_show()
5418 return -EINVAL; in run_aging()
5420 return try_to_inc_max_seq(lruvec, max_seq, swappiness, force_scan) ? 0 : -EEXIST; in run_aging()
5429 return -EINVAL; in run_eviction()
5431 sc->nr_reclaimed = 0; in run_eviction()
5439 if (sc->nr_reclaimed >= nr_to_reclaim) in run_eviction()
5448 return -EINTR; in run_eviction()
5455 int err = -EINVAL; in run_cmd()
5459 return -EINVAL; in run_cmd()
5471 return -EINVAL; in run_cmd()
5488 case '-': in run_cmd()
5498 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
5506 int err = -EINVAL; in lru_gen_seq_write()
5511 .reclaim_idx = MAX_NR_ZONES - 1, in lru_gen_seq_write()
5517 return -ENOMEM; in lru_gen_seq_write()
5521 return -EFAULT; in lru_gen_seq_write()
5528 err = -ENOMEM; in lru_gen_seq_write()
5542 unsigned int swappiness = -1; in lru_gen_seq_write()
5543 unsigned long opt = -1; in lru_gen_seq_write()
5552 err = -EINVAL; in lru_gen_seq_write()
5599 spin_lock_init(&pgdat->memcg_lru.lock); in lru_gen_init_pgdat()
5603 INIT_HLIST_NULLS_HEAD(&pgdat->memcg_lru.fifo[i][j], i); in lru_gen_init_pgdat()
5611 struct lru_gen_folio *lrugen = &lruvec->lrugen; in lru_gen_init_lruvec()
5614 lrugen->max_seq = MIN_NR_GENS + 1; in lru_gen_init_lruvec()
5615 lrugen->enabled = lru_gen_enabled(); in lru_gen_init_lruvec()
5618 lrugen->timestamps[i] = jiffies; in lru_gen_init_lruvec()
5621 INIT_LIST_HEAD(&lrugen->folios[gen][type][zone]); in lru_gen_init_lruvec()
5624 mm_state->seq = MIN_NR_GENS; in lru_gen_init_lruvec()
5636 INIT_LIST_HEAD(&mm_list->fifo); in lru_gen_init_memcg()
5637 spin_lock_init(&mm_list->lock); in lru_gen_init_memcg()
5646 VM_WARN_ON_ONCE(mm_list && !list_empty(&mm_list->fifo)); in lru_gen_exit_memcg()
5652 VM_WARN_ON_ONCE(memchr_inv(lruvec->lrugen.nr_pages, 0, in lru_gen_exit_memcg()
5653 sizeof(lruvec->lrugen.nr_pages))); in lru_gen_exit_memcg()
5655 lruvec->lrugen.list.next = LIST_POISON1; in lru_gen_exit_memcg()
5661 bitmap_free(mm_state->filters[i]); in lru_gen_exit_memcg()
5662 mm_state->filters[i] = NULL; in lru_gen_exit_memcg()
5710 unsigned long nr_to_reclaim = sc->nr_to_reclaim; in shrink_lruvec()
5736 sc->priority == DEF_PRIORITY); in shrink_lruvec()
5747 nr[lru] -= nr_to_scan; in shrink_lruvec()
5799 nr_scanned = targets[lru] - nr[lru]; in shrink_lruvec()
5800 nr[lru] = targets[lru] * (100 - percentage) / 100; in shrink_lruvec()
5801 nr[lru] -= min(nr[lru], nr_scanned); in shrink_lruvec()
5804 nr_scanned = targets[lru] - nr[lru]; in shrink_lruvec()
5805 nr[lru] = targets[lru] * (100 - percentage) / 100; in shrink_lruvec()
5806 nr[lru] -= min(nr[lru], nr_scanned); in shrink_lruvec()
5809 sc->nr_reclaimed += nr_reclaimed; in shrink_lruvec()
5824 if (gfp_compaction_allowed(sc->gfp_mask) && sc->order && in in_reclaim_compaction()
5825 (sc->order > PAGE_ALLOC_COSTLY_ORDER || in in_reclaim_compaction()
5826 sc->priority < DEF_PRIORITY - 2)) in in_reclaim_compaction()
5833 * Reclaim/compaction is used for high-order allocation requests. It reclaims
5834 * order-0 pages before compacting the zone. should_continue_reclaim() returns
5857 * first, by assuming that zero delta of sc->nr_scanned means full LRU in should_continue_reclaim()
5859 * where always a non-zero amount of pages were scanned. in should_continue_reclaim()
5865 for (z = 0; z <= sc->reclaim_idx; z++) { in should_continue_reclaim()
5866 struct zone *zone = &pgdat->node_zones[z]; in should_continue_reclaim()
5871 if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone), in should_continue_reclaim()
5872 sc->reclaim_idx, 0)) in should_continue_reclaim()
5875 if (compaction_suitable(zone, sc->order, sc->reclaim_idx)) in should_continue_reclaim()
5883 pages_for_compaction = compact_gap(sc->order); in should_continue_reclaim()
5885 if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc)) in should_continue_reclaim()
5893 struct mem_cgroup *target_memcg = sc->target_mem_cgroup; in shrink_node_memcgs()
5909 if (current_is_kswapd() || sc->memcg_full_walk) in shrink_node_memcgs()
5919 * This loop can become CPU-bound when target memcgs in shrink_node_memcgs()
5920 * aren't eligible for reclaim - either because they in shrink_node_memcgs()
5941 if (!sc->memcg_low_reclaim) { in shrink_node_memcgs()
5942 sc->memcg_low_skipped = 1; in shrink_node_memcgs()
5948 reclaimed = sc->nr_reclaimed; in shrink_node_memcgs()
5949 scanned = sc->nr_scanned; in shrink_node_memcgs()
5953 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, in shrink_node_memcgs()
5954 sc->priority); in shrink_node_memcgs()
5957 if (!sc->proactive) in shrink_node_memcgs()
5958 vmpressure(sc->gfp_mask, memcg, false, in shrink_node_memcgs()
5959 sc->nr_scanned - scanned, in shrink_node_memcgs()
5960 sc->nr_reclaimed - reclaimed); in shrink_node_memcgs()
5963 if (partial && sc->nr_reclaimed >= sc->nr_to_reclaim) { in shrink_node_memcgs()
5977 memset(&sc->nr, 0, sizeof(sc->nr)); in shrink_node()
5982 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); in shrink_node()
5985 memset(&sc->nr, 0, sizeof(sc->nr)); in shrink_node()
5987 nr_reclaimed = sc->nr_reclaimed; in shrink_node()
5988 nr_scanned = sc->nr_scanned; in shrink_node()
5996 nr_node_reclaimed = sc->nr_reclaimed - nr_reclaimed; in shrink_node()
5999 if (!sc->proactive) in shrink_node()
6000 vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true, in shrink_node()
6001 sc->nr_scanned - nr_scanned, nr_node_reclaimed); in shrink_node()
6009 * it implies that the long-lived page allocation rate in shrink_node()
6024 if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken) in shrink_node()
6025 set_bit(PGDAT_WRITEBACK, &pgdat->flags); in shrink_node()
6028 if (sc->nr.unqueued_dirty && in shrink_node()
6029 sc->nr.unqueued_dirty == sc->nr.file_taken) in shrink_node()
6030 set_bit(PGDAT_DIRTY, &pgdat->flags); in shrink_node()
6039 if (sc->nr.immediate) in shrink_node()
6050 if (sc->nr.dirty && sc->nr.dirty == sc->nr.congested) { in shrink_node()
6052 set_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags); in shrink_node()
6055 set_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags); in shrink_node()
6065 !sc->hibernation_mode && in shrink_node()
6066 (test_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags) || in shrink_node()
6067 test_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags))) in shrink_node()
6080 pgdat->kswapd_failures = 0; in shrink_node()
6081 else if (sc->cache_trim_mode) in shrink_node()
6082 sc->cache_trim_mode_failed = 1; in shrink_node()
6086 * Returns true if compaction should go ahead for a costly-order request, or
6094 if (!gfp_compaction_allowed(sc->gfp_mask)) in compaction_ready()
6098 if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone), in compaction_ready()
6099 sc->reclaim_idx, 0)) in compaction_ready()
6103 if (!compaction_suitable(zone, sc->order, sc->reclaim_idx)) in compaction_ready()
6115 watermark = high_wmark_pages(zone) + compact_gap(sc->order); in compaction_ready()
6117 return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx); in compaction_ready()
6126 if (sc->nr_reclaimed > (sc->nr_scanned >> 3)) { in consider_reclaim_throttle()
6129 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_NOPROGRESS]; in consider_reclaim_throttle()
6146 if (sc->priority == 1 && !sc->nr_reclaimed) in consider_reclaim_throttle()
6151 * This is the direct reclaim path, for page-allocating processes. We only
6173 orig_mask = sc->gfp_mask; in shrink_zones()
6175 sc->gfp_mask |= __GFP_HIGHMEM; in shrink_zones()
6176 sc->reclaim_idx = gfp_zone(sc->gfp_mask); in shrink_zones()
6180 sc->reclaim_idx, sc->nodemask) { in shrink_zones()
6194 * non-zero order, only frequent costly order in shrink_zones()
6200 sc->order > PAGE_ALLOC_COSTLY_ORDER && in shrink_zones()
6202 sc->compaction_ready = true; in shrink_zones()
6212 if (zone->zone_pgdat == last_pgdat) in shrink_zones()
6222 nr_soft_reclaimed = memcg1_soft_limit_reclaim(zone->zone_pgdat, in shrink_zones()
6223 sc->order, sc->gfp_mask, in shrink_zones()
6225 sc->nr_reclaimed += nr_soft_reclaimed; in shrink_zones()
6226 sc->nr_scanned += nr_soft_scanned; in shrink_zones()
6231 first_pgdat = zone->zone_pgdat; in shrink_zones()
6234 if (zone->zone_pgdat == last_pgdat) in shrink_zones()
6236 last_pgdat = zone->zone_pgdat; in shrink_zones()
6237 shrink_node(zone->zone_pgdat, sc); in shrink_zones()
6247 sc->gfp_mask = orig_mask; in shrink_zones()
6260 target_lruvec->refaults[WORKINGSET_ANON] = refaults; in snapshot_refaults()
6262 target_lruvec->refaults[WORKINGSET_FILE] = refaults; in snapshot_refaults()
6272 * high - the zone may be full of dirty or under-writeback pages, which this
6284 int initial_priority = sc->priority; in do_try_to_free_pages()
6292 __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1); in do_try_to_free_pages()
6295 if (!sc->proactive) in do_try_to_free_pages()
6296 vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup, in do_try_to_free_pages()
6297 sc->priority); in do_try_to_free_pages()
6298 sc->nr_scanned = 0; in do_try_to_free_pages()
6301 if (sc->nr_reclaimed >= sc->nr_to_reclaim) in do_try_to_free_pages()
6304 if (sc->compaction_ready) in do_try_to_free_pages()
6311 if (sc->priority < DEF_PRIORITY - 2) in do_try_to_free_pages()
6312 sc->may_writepage = 1; in do_try_to_free_pages()
6313 } while (--sc->priority >= 0); in do_try_to_free_pages()
6316 for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx, in do_try_to_free_pages()
6317 sc->nodemask) { in do_try_to_free_pages()
6318 if (zone->zone_pgdat == last_pgdat) in do_try_to_free_pages()
6320 last_pgdat = zone->zone_pgdat; in do_try_to_free_pages()
6322 snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat); in do_try_to_free_pages()
6327 lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, in do_try_to_free_pages()
6328 zone->zone_pgdat); in do_try_to_free_pages()
6329 clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags); in do_try_to_free_pages()
6335 if (sc->nr_reclaimed) in do_try_to_free_pages()
6336 return sc->nr_reclaimed; in do_try_to_free_pages()
6339 if (sc->compaction_ready) in do_try_to_free_pages()
6351 if (!sc->memcg_full_walk) { in do_try_to_free_pages()
6352 sc->priority = initial_priority; in do_try_to_free_pages()
6353 sc->memcg_full_walk = 1; in do_try_to_free_pages()
6366 if (sc->skipped_deactivate) { in do_try_to_free_pages()
6367 sc->priority = initial_priority; in do_try_to_free_pages()
6368 sc->force_deactivate = 1; in do_try_to_free_pages()
6369 sc->skipped_deactivate = 0; in do_try_to_free_pages()
6374 if (sc->memcg_low_skipped) { in do_try_to_free_pages()
6375 sc->priority = initial_priority; in do_try_to_free_pages()
6376 sc->force_deactivate = 0; in do_try_to_free_pages()
6377 sc->memcg_low_reclaim = 1; in do_try_to_free_pages()
6378 sc->memcg_low_skipped = 0; in do_try_to_free_pages()
6393 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) in allow_direct_reclaim()
6397 zone = &pgdat->node_zones[i]; in allow_direct_reclaim()
6415 if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) { in allow_direct_reclaim()
6416 if (READ_ONCE(pgdat->kswapd_highest_zoneidx) > ZONE_NORMAL) in allow_direct_reclaim()
6417 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, ZONE_NORMAL); in allow_direct_reclaim()
6419 wake_up_interruptible(&pgdat->kswapd_wait); in allow_direct_reclaim()
6448 if (current->flags & PF_KTHREAD) in throttle_direct_reclaim()
6478 pgdat = zone->zone_pgdat; in throttle_direct_reclaim()
6500 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait, in throttle_direct_reclaim()
6504 wait_event_killable(zone->zone_pgdat->pfmemalloc_wait, in throttle_direct_reclaim()
6571 .reclaim_idx = MAX_NR_ZONES - 1, in mem_cgroup_shrink_node()
6575 WARN_ON_ONCE(!current->reclaim_state); in mem_cgroup_shrink_node()
6612 .reclaim_idx = MAX_NR_ZONES - 1, in try_to_free_mem_cgroup_pages()
6673 * Check for watermark boosts top-down as the higher zones in pgdat_watermark_boosted()
6679 for (i = highest_zoneidx; i >= 0; i--) { in pgdat_watermark_boosted()
6680 zone = pgdat->node_zones + i; in pgdat_watermark_boosted()
6684 if (zone->watermark_boost) in pgdat_watermark_boosted()
6698 unsigned long mark = -1; in pgdat_balanced()
6702 * Check watermarks bottom-up as lower zones are more likely to in pgdat_balanced()
6706 zone = pgdat->node_zones + i; in pgdat_balanced()
6721 * need balancing by definition. This can happen if a zone-restricted in pgdat_balanced()
6724 if (mark == -1) in pgdat_balanced()
6735 clear_bit(LRUVEC_NODE_CONGESTED, &lruvec->flags); in clear_pgdat_congested()
6736 clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags); in clear_pgdat_congested()
6737 clear_bit(PGDAT_DIRTY, &pgdat->flags); in clear_pgdat_congested()
6738 clear_bit(PGDAT_WRITEBACK, &pgdat->flags); in clear_pgdat_congested()
6763 if (waitqueue_active(&pgdat->pfmemalloc_wait)) in prepare_kswapd_sleep()
6764 wake_up_all(&pgdat->pfmemalloc_wait); in prepare_kswapd_sleep()
6767 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) in prepare_kswapd_sleep()
6791 unsigned long nr_reclaimed = sc->nr_reclaimed; in kswapd_shrink_node()
6794 sc->nr_to_reclaim = 0; in kswapd_shrink_node()
6795 for (z = 0; z <= sc->reclaim_idx; z++) { in kswapd_shrink_node()
6796 zone = pgdat->node_zones + z; in kswapd_shrink_node()
6800 sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX); in kswapd_shrink_node()
6811 * high-order allocations. If twice the allocation size has been in kswapd_shrink_node()
6812 * reclaimed then recheck watermarks only at order-0 to prevent in kswapd_shrink_node()
6813 * excessive reclaim. Assume that a process requested a high-order in kswapd_shrink_node()
6816 if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order)) in kswapd_shrink_node()
6817 sc->order = 0; in kswapd_shrink_node()
6820 return max(sc->nr_scanned, sc->nr_reclaimed - nr_reclaimed) >= sc->nr_to_reclaim; in kswapd_shrink_node()
6831 zone = pgdat->node_zones + i; in update_reclaim_active()
6837 set_bit(ZONE_RECLAIM_ACTIVE, &zone->flags); in update_reclaim_active()
6839 clear_bit(ZONE_RECLAIM_ACTIVE, &zone->flags); in update_reclaim_active()
6862 * kswapd scans the zones in the highmem->normal->dma direction. It skips
6897 zone = pgdat->node_zones + i; in balance_pgdat()
6901 nr_boost_reclaim += zone->watermark_boost; in balance_pgdat()
6902 zone_boosts[i] = zone->watermark_boost; in balance_pgdat()
6921 * purpose -- on 64-bit systems it is expected that in balance_pgdat()
6922 * buffer_heads are stripped during active rotation. On 32-bit in balance_pgdat()
6929 for (i = MAX_NR_ZONES - 1; i >= 0; i--) { in balance_pgdat()
6930 zone = pgdat->node_zones + i; in balance_pgdat()
6944 * re-evaluate if boosting is required when kswapd next wakes. in balance_pgdat()
6961 if (nr_boost_reclaim && sc.priority == DEF_PRIORITY - 2) in balance_pgdat()
6966 * intent is to relieve pressure not issue sub-optimal IO in balance_pgdat()
6984 if (sc.priority < DEF_PRIORITY - 2) in balance_pgdat()
7007 if (waitqueue_active(&pgdat->pfmemalloc_wait) && in balance_pgdat()
7009 wake_up_all(&pgdat->pfmemalloc_wait); in balance_pgdat()
7022 nr_reclaimed = sc.nr_reclaimed - nr_reclaimed; in balance_pgdat()
7023 nr_boost_reclaim -= min(nr_boost_reclaim, nr_reclaimed); in balance_pgdat()
7034 sc.priority--; in balance_pgdat()
7048 pgdat->kswapd_failures++; in balance_pgdat()
7062 zone = pgdat->node_zones + i; in balance_pgdat()
7063 spin_lock_irqsave(&zone->lock, flags); in balance_pgdat()
7064 zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]); in balance_pgdat()
7065 spin_unlock_irqrestore(&zone->lock, flags); in balance_pgdat()
7090 * The pgdat->kswapd_highest_zoneidx is used to pass the highest zone index to
7099 enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); in kswapd_highest_zoneidx()
7113 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); in kswapd_try_to_sleep()
7145 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, in kswapd_try_to_sleep()
7149 if (READ_ONCE(pgdat->kswapd_order) < reclaim_order) in kswapd_try_to_sleep()
7150 WRITE_ONCE(pgdat->kswapd_order, reclaim_order); in kswapd_try_to_sleep()
7153 finish_wait(&pgdat->kswapd_wait, &wait); in kswapd_try_to_sleep()
7154 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); in kswapd_try_to_sleep()
7163 trace_mm_vmscan_kswapd_sleep(pgdat->node_id); in kswapd_try_to_sleep()
7170 * per-cpu vmstat threshold while kswapd is awake and restore in kswapd_try_to_sleep()
7185 finish_wait(&pgdat->kswapd_wait, &wait); in kswapd_try_to_sleep()
7198 * If there are applications that are active memory-allocators
7204 unsigned int highest_zoneidx = MAX_NR_ZONES - 1; in kswapd()
7220 tsk->flags |= PF_MEMALLOC | PF_KSWAPD; in kswapd()
7223 WRITE_ONCE(pgdat->kswapd_order, 0); in kswapd()
7224 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); in kswapd()
7225 atomic_set(&pgdat->nr_writeback_throttled, 0); in kswapd()
7229 alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order); in kswapd()
7238 alloc_order = READ_ONCE(pgdat->kswapd_order); in kswapd()
7241 WRITE_ONCE(pgdat->kswapd_order, 0); in kswapd()
7242 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); in kswapd()
7248 * We can speed up thawing tasks if we don't call balance_pgdat in kswapd()
7255 * Reclaim begins at the requested order but if a high-order in kswapd()
7257 * order-0. If that happens, kswapd will consider sleeping in kswapd()
7262 trace_mm_vmscan_kswapd_wake(pgdat->node_id, highest_zoneidx, in kswapd()
7270 tsk->flags &= ~(PF_MEMALLOC | PF_KSWAPD); in kswapd()
7276 * A zone is low on free memory or too fragmented for high-order memory. If
7294 pgdat = zone->zone_pgdat; in wakeup_kswapd()
7295 curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); in wakeup_kswapd()
7298 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, highest_zoneidx); in wakeup_kswapd()
7300 if (READ_ONCE(pgdat->kswapd_order) < order) in wakeup_kswapd()
7301 WRITE_ONCE(pgdat->kswapd_order, order); in wakeup_kswapd()
7303 if (!waitqueue_active(&pgdat->kswapd_wait)) in wakeup_kswapd()
7307 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES || in wakeup_kswapd()
7312 * fragmented for high-order allocations. Wake up kcompactd in wakeup_kswapd()
7322 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, highest_zoneidx, order, in wakeup_kswapd()
7324 wake_up_interruptible(&pgdat->kswapd_wait); in wakeup_kswapd()
7329 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
7341 .reclaim_idx = MAX_NR_ZONES - 1, in shrink_all_memory()
7367 * This kswapd start function will be called by init and node-hot-add.
7374 if (!pgdat->kswapd) { in kswapd_run()
7375 pgdat->kswapd = kthread_create_on_node(kswapd, pgdat, nid, "kswapd%d", nid); in kswapd_run()
7376 if (IS_ERR(pgdat->kswapd)) { in kswapd_run()
7379 nid, PTR_ERR(pgdat->kswapd)); in kswapd_run()
7381 pgdat->kswapd = NULL; in kswapd_run()
7383 wake_up_process(pgdat->kswapd); in kswapd_run()
7399 kswapd = pgdat->kswapd; in kswapd_stop()
7402 pgdat->kswapd = NULL; in kswapd_stop()
7423 * If non-zero call node_reclaim when the number of free pages falls below
7458 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0; in node_unmapped_file_pages()
7486 return nr_pagecache_reclaimable - delta; in node_pagecache_reclaimable()
7510 trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order, in __node_reclaim()
7523 if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages || in __node_reclaim()
7524 node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) > pgdat->min_slab_pages) { in __node_reclaim()
7531 } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0); in __node_reclaim()
7559 if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages && in node_reclaim()
7561 pgdat->min_slab_pages) in node_reclaim()
7567 if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC)) in node_reclaim()
7576 if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id()) in node_reclaim()
7579 if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags)) in node_reclaim()
7583 clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags); in node_reclaim()
7595 * check_move_unevictable_folios - Move evictable folios to appropriate zone
7610 for (i = 0; i < fbatch->nr; i++) { in check_move_unevictable_folios()
7611 struct folio *folio = fbatch->folios[i]; in check_move_unevictable_folios()