Lines Matching +full:no +full:- +full:ref +full:- +full:high +full:- +full:z

1 // SPDX-License-Identifier: GPL-2.0
8 * to bring the system back to freepages.high: 2.4.97, Rik van Riel.
31 #include <linux/backing-dev.h>
45 #include <linux/memory-tiers.h>
189 if ((_folio)->lru.prev != _base) { \
192 prev = lru_to_folio(&(_folio->lru)); \
193 prefetchw(&prev->_field); \
210 return sc->target_mem_cgroup; in cgroup_reclaim()
219 return !sc->target_mem_cgroup || mem_cgroup_is_root(sc->target_mem_cgroup); in root_reclaim()
223 * writeback_throttling_sane - is the usual dirty throttling mechanism available?
248 if (sc->proactive && sc->proactive_swappiness) in sc_swappiness()
249 return *sc->proactive_swappiness; in sc_swappiness()
278 WARN_ON_ONCE(rs && task->reclaim_state); in set_task_reclaim_state()
280 /* Check for the nulling of an already-nulled member */ in set_task_reclaim_state()
281 WARN_ON_ONCE(!rs && !task->reclaim_state); in set_task_reclaim_state()
283 task->reclaim_state = rs; in set_task_reclaim_state()
287 * flush_reclaim_state(): add pages reclaimed outside of LRU-based reclaim to
288 * scan_control->nr_reclaimed.
293 * Currently, reclaim_state->reclaimed includes three types of pages in flush_reclaim_state()
300 * single memcg. For example, a memcg-aware shrinker can free one object in flush_reclaim_state()
303 * overestimating the reclaimed amount (potentially under-reclaiming). in flush_reclaim_state()
305 * Only count such pages for global reclaim to prevent under-reclaiming in flush_reclaim_state()
320 if (current->reclaim_state && root_reclaim(sc)) { in flush_reclaim_state()
321 sc->nr_reclaimed += current->reclaim_state->reclaimed; in flush_reclaim_state()
322 current->reclaim_state->reclaimed = 0; in flush_reclaim_state()
330 if (sc && sc->no_demotion) in can_demote()
344 * For non-memcg reclaim, is there in can_reclaim_anon_pages()
382 * lruvec_lru_size - Returns the number of pages on the given LRU list.
385 * @zone_idx: zones to consider (use MAX_NR_ZONES - 1 for the whole LRU list)
394 struct zone *zone = &lruvec_pgdat(lruvec)->node_zones[zid]; in lruvec_lru_size()
439 BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD != in reclaimer_offset()
440 PGDEMOTE_DIRECT - PGDEMOTE_KSWAPD); in reclaimer_offset()
441 BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD != in reclaimer_offset()
442 PGDEMOTE_KHUGEPAGED - PGDEMOTE_KSWAPD); in reclaimer_offset()
443 BUILD_BUG_ON(PGSTEAL_DIRECT - PGSTEAL_KSWAPD != in reclaimer_offset()
444 PGSCAN_DIRECT - PGSCAN_KSWAPD); in reclaimer_offset()
445 BUILD_BUG_ON(PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD != in reclaimer_offset()
446 PGSCAN_KHUGEPAGED - PGSCAN_KSWAPD); in reclaimer_offset()
451 return PGSTEAL_KHUGEPAGED - PGSTEAL_KSWAPD; in reclaimer_offset()
452 return PGSTEAL_DIRECT - PGSTEAL_KSWAPD; in reclaimer_offset()
460 * private data at folio->private. in is_page_cache_freeable()
462 return folio_ref_count(folio) - folio_test_private(folio) == in is_page_cache_freeable()
468 * -ENOSPC. We need to propagate that into the address_space for a subsequent
472 * prevents it from being freed up. But we have a ref on the folio and once
496 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) in skip_throttle_noprogress()
505 struct zone *zone = pgdat->node_zones + i; in skip_throttle_noprogress()
522 wait_queue_head_t *wqh = &pgdat->reclaim_wait[reason]; in reclaim_throttle()
532 current->flags & (PF_USER_WORKER|PF_KTHREAD)) { in reclaim_throttle()
540 * parallel reclaimers which is a short-lived event so the timeout is in reclaim_throttle()
542 * potentially long-lived events so use a longer timeout. This is shaky in reclaim_throttle()
551 if (atomic_inc_return(&pgdat->nr_writeback_throttled) == 1) { in reclaim_throttle()
552 WRITE_ONCE(pgdat->nr_reclaim_start, in reclaim_throttle()
582 atomic_dec(&pgdat->nr_writeback_throttled); in reclaim_throttle()
584 trace_mm_vmscan_throttled(pgdat->node_id, jiffies_to_usecs(timeout), in reclaim_throttle()
585 jiffies_to_usecs(timeout - ret), in reclaim_throttle()
602 * This is an inaccurate read as the per-cpu deltas may not in __acct_reclaim_writeback()
608 nr_written = node_page_state(pgdat, NR_THROTTLED_WRITTEN) - in __acct_reclaim_writeback()
609 READ_ONCE(pgdat->nr_reclaim_start); in __acct_reclaim_writeback()
612 wake_up(&pgdat->reclaim_wait[VMSCAN_THROTTLE_WRITEBACK]); in __acct_reclaim_writeback()
629 * Calls ->writepage().
636 * will be non-blocking. To prevent this allocation from being in pageout()
655 * folio->mapping == NULL while being dirty with clean buffers. in pageout()
666 if (mapping->a_ops->writepage == NULL) in pageout()
689 res = mapping->a_ops->writepage(&folio->page, &wbc); in pageout()
723 spin_lock(&mapping->host->i_lock); in __remove_mapping()
724 xa_lock_irq(&mapping->i_pages); in __remove_mapping()
729 * a ref to the folio, it may be possible that they dirty it then in __remove_mapping()
744 * escape unnoticed. The smp_rmb is needed to ensure the folio->flags in __remove_mapping()
745 * load is not satisfied before that of folio->_refcount. in __remove_mapping()
760 swp_entry_t swap = folio->swap; in __remove_mapping()
766 xa_unlock_irq(&mapping->i_pages); in __remove_mapping()
771 free_folio = mapping->a_ops->free_folio; in __remove_mapping()
792 xa_unlock_irq(&mapping->i_pages); in __remove_mapping()
794 inode_add_lru(mapping->host); in __remove_mapping()
795 spin_unlock(&mapping->host->i_lock); in __remove_mapping()
804 xa_unlock_irq(&mapping->i_pages); in __remove_mapping()
806 spin_unlock(&mapping->host->i_lock); in __remove_mapping()
811 * remove_mapping() - Attempt to remove a folio from its mapping.
815 * If the folio is dirty, under writeback or if someone else has a ref
827 * drops the pagecache ref for us without requiring another in remove_mapping()
837 * folio_putback_lru - Put previously isolated folio onto appropriate LRU list.
848 folio_put(folio); /* drop ref from isolate */ in folio_putback_lru()
864 referenced_ptes = folio_referenced(folio, 1, sc->target_mem_cgroup, in folio_check_references()
878 * 2) Skip the non-shared swapbacked folio mapped solely by in folio_check_references()
879 * the exiting or OOM-reaped process. in folio_check_references()
881 if (referenced_ptes == -1) in folio_check_references()
905 * Activate file-backed executable folios after first usage. in folio_check_references()
949 if (mapping && mapping->a_ops->is_dirty_writeback) in folio_check_dirty_writeback()
950 mapping->a_ops->is_dirty_writeback(folio, dirty, writeback); in folio_check_dirty_writeback()
961 allowed_mask = mtc->nmask; in alloc_migrate_folio()
971 mtc->nmask = NULL; in alloc_migrate_folio()
972 mtc->gfp_mask |= __GFP_THISNODE; in alloc_migrate_folio()
977 mtc->gfp_mask &= ~__GFP_THISNODE; in alloc_migrate_folio()
978 mtc->nmask = allowed_mask; in alloc_migrate_folio()
990 int target_nid = next_demotion_node(pgdat->node_id); in demote_folio_list()
1030 * We can "enter_fs" for swap-cache with only __GFP_IO in may_enter_fs()
1032 * ->flags can be updated non-atomicially (scan_swap_map_slots), in may_enter_fs()
1057 do_demote_pass = can_demote(pgdat->node_id, sc); in shrink_folio_list()
1070 list_del(&folio->lru); in shrink_folio_list()
1080 sc->nr_scanned += nr_pages; in shrink_folio_list()
1085 if (!sc->may_unmap && folio_mapped(folio)) in shrink_folio_list()
1100 stat->nr_dirty += nr_pages; in shrink_folio_list()
1103 stat->nr_unqueued_dirty += nr_pages; in shrink_folio_list()
1112 stat->nr_congested += nr_pages; in shrink_folio_list()
1162 test_bit(PGDAT_WRITEBACK, &pgdat->flags)) { in shrink_folio_list()
1163 stat->nr_immediate += nr_pages; in shrink_folio_list()
1169 !may_enter_fs(folio, sc->gfp_mask)) { in shrink_folio_list()
1171 * This is slightly racy - in shrink_folio_list()
1175 * interpreted as the readahead flag - but in shrink_folio_list()
1185 stat->nr_writeback += nr_pages; in shrink_folio_list()
1193 list_add_tail(&folio->lru, folio_list); in shrink_folio_list()
1205 stat->nr_ref_keep += nr_pages; in shrink_folio_list()
1218 list_add(&folio->lru, &demote_folios); in shrink_folio_list()
1230 if (!(sc->gfp_mask & __GFP_IO)) in shrink_folio_list()
1242 if (data_race(!list_empty(&folio->_deferred_list) && in shrink_folio_list()
1275 sc->nr_scanned -= (nr_pages - 1); in shrink_folio_list()
1306 stat->nr_unmap_fail += nr_pages; in shrink_folio_list()
1309 stat->nr_lazyfree_fail += nr_pages; in shrink_folio_list()
1316 * No point in trying to reclaim folio if it is pinned. in shrink_folio_list()
1329 * injecting inefficient single-folio I/O into in shrink_folio_list()
1340 !test_bit(PGDAT_DIRTY, &pgdat->flags))) { in shrink_folio_list()
1356 if (!may_enter_fs(folio, sc->gfp_mask)) in shrink_folio_list()
1358 if (!sc->may_writepage) in shrink_folio_list()
1377 sc->nr_scanned -= (nr_pages - 1); in shrink_folio_list()
1383 sc->nr_scanned -= (nr_pages - 1); in shrink_folio_list()
1386 stat->nr_pageout += nr_pages; in shrink_folio_list()
1394 * A synchronous write - probably a ramdisk. Go in shrink_folio_list()
1422 * and mark the folio clean - it can be freed. in shrink_folio_list()
1424 * Rarely, folios can have buffers and no ->mapping. in shrink_folio_list()
1428 * folio is no longer mapped into process address space in shrink_folio_list()
1433 if (!filemap_release_folio(folio, sc->gfp_mask)) in shrink_folio_list()
1468 sc->target_mem_cgroup)) in shrink_folio_list()
1493 sc->nr_scanned -= (nr_pages - 1); in shrink_folio_list()
1505 stat->nr_activate[type] += nr_pages; in shrink_folio_list()
1511 list_add(&folio->lru, &ret_folios); in shrink_folio_list()
1518 stat->nr_demoted = demote_folio_list(&demote_folios, pgdat); in shrink_folio_list()
1519 nr_reclaimed += stat->nr_demoted; in shrink_folio_list()
1541 if (!sc->proactive) { in shrink_folio_list()
1547 pgactivate = stat->nr_activate[0] + stat->nr_activate[1]; in shrink_folio_list()
1579 list_move(&folio->lru, &clean_folios); in reclaim_clean_pages_from_list()
1590 nr_reclaimed = shrink_folio_list(&clean_folios, zone->zone_pgdat, &sc, in reclaim_clean_pages_from_list()
1595 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, in reclaim_clean_pages_from_list()
1596 -(long)nr_reclaimed); in reclaim_clean_pages_from_list()
1603 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_ANON, in reclaim_clean_pages_from_list()
1605 mod_node_page_state(zone->zone_pgdat, NR_ISOLATED_FILE, in reclaim_clean_pages_from_list()
1606 -(long)stat.nr_lazyfree_fail); in reclaim_clean_pages_from_list()
1623 update_lru_size(lruvec, lru, zid, -nr_zone_taken[zid]); in update_lru_sizes()
1631 * lruvec->lru_lock is heavily contended. Some of the functions that
1654 struct list_head *src = &lruvec->lists[lru]; in isolate_lru_folios()
1674 if (folio_zonenum(folio) > sc->reclaim_idx) { in isolate_lru_folios()
1682 * return with no isolated folios if the LRU mostly contains in isolate_lru_folios()
1691 if (!sc->may_unmap && folio_mapped(folio)) in isolate_lru_folios()
1696 * sure the folio is not being freed elsewhere -- the in isolate_lru_folios()
1712 list_move(&folio->lru, move_to); in isolate_lru_folios()
1735 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, nr_to_scan, in isolate_lru_folios()
1742 * folio_isolate_lru() - Try to isolate a folio from its LRU list.
1812 * won't get blocked by normal direct-reclaimers, forming a circular in too_many_isolated()
1815 if (gfp_has_io_fs(sc->gfp_mask)) in too_many_isolated()
1843 list_del(&folio->lru); in move_folios_to_lru()
1845 spin_unlock_irq(&lruvec->lru_lock); in move_folios_to_lru()
1847 spin_lock_irq(&lruvec->lru_lock); in move_folios_to_lru()
1859 * list_add(&folio->lru,) in move_folios_to_lru()
1860 * list_add(&folio->lru,) in move_folios_to_lru()
1869 spin_unlock_irq(&lruvec->lru_lock); in move_folios_to_lru()
1872 spin_lock_irq(&lruvec->lru_lock); in move_folios_to_lru()
1891 spin_unlock_irq(&lruvec->lru_lock); in move_folios_to_lru()
1894 spin_lock_irq(&lruvec->lru_lock); in move_folios_to_lru()
1901 * If a kernel thread (such as nfsd for loop-back mounts) services a backing
1907 return !(current->flags & PF_LOCAL_THROTTLE); in current_may_throttle()
1943 spin_lock_irq(&lruvec->lru_lock); in shrink_inactive_list()
1955 spin_unlock_irq(&lruvec->lru_lock); in shrink_inactive_list()
1962 spin_lock_irq(&lruvec->lru_lock); in shrink_inactive_list()
1967 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); in shrink_inactive_list()
1973 spin_unlock_irq(&lruvec->lru_lock); in shrink_inactive_list()
1975 lru_note_cost(lruvec, file, stat.nr_pageout, nr_scanned - nr_reclaimed); in shrink_inactive_list()
2003 sc->nr.dirty += stat.nr_dirty; in shrink_inactive_list()
2004 sc->nr.congested += stat.nr_congested; in shrink_inactive_list()
2005 sc->nr.unqueued_dirty += stat.nr_unqueued_dirty; in shrink_inactive_list()
2006 sc->nr.writeback += stat.nr_writeback; in shrink_inactive_list()
2007 sc->nr.immediate += stat.nr_immediate; in shrink_inactive_list()
2008 sc->nr.taken += nr_taken; in shrink_inactive_list()
2010 sc->nr.file_taken += nr_taken; in shrink_inactive_list()
2012 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id, in shrink_inactive_list()
2013 nr_scanned, nr_reclaimed, &stat, sc->priority, file); in shrink_inactive_list()
2028 * It is safe to rely on the active flag against the non-LRU folios in here
2029 * because nobody will play with that bit on a non-LRU folio.
2031 * The downside is that we have to touch folio->_refcount against each folio.
2032 * But we had to alter folio->flags anyway.
2052 spin_lock_irq(&lruvec->lru_lock); in shrink_active_list()
2063 spin_unlock_irq(&lruvec->lru_lock); in shrink_active_list()
2070 list_del(&folio->lru); in shrink_active_list()
2086 if (folio_referenced(folio, 0, sc->target_mem_cgroup, in shrink_active_list()
2089 * Identify referenced, file-backed active folios and in shrink_active_list()
2093 * are not likely to be evicted by use-once streaming in shrink_active_list()
2099 list_add(&folio->lru, &l_active); in shrink_active_list()
2104 folio_clear_active(folio); /* we are de-activating */ in shrink_active_list()
2106 list_add(&folio->lru, &l_inactive); in shrink_active_list()
2112 spin_lock_irq(&lruvec->lru_lock); in shrink_active_list()
2120 __mod_node_page_state(pgdat, NR_ISOLATED_ANON + file, -nr_taken); in shrink_active_list()
2121 spin_unlock_irq(&lruvec->lru_lock); in shrink_active_list()
2125 trace_mm_vmscan_lru_shrink_active(pgdat->node_id, nr_taken, nr_activate, in shrink_active_list()
2126 nr_deactivate, nr_rotated, sc->priority, file); in shrink_active_list()
2146 list_del(&folio->lru); in reclaim_folio_list()
2171 list_move(&folio->lru, &node_folio_list); in reclaim_pages()
2190 if (sc->may_deactivate & (1 << is_file_lru(lru))) in shrink_list()
2193 sc->skipped_deactivate = 1; in shrink_list()
2205 * to the established workingset on the scan-resistant active list,
2219 * -------------------------------------
2238 gb = (inactive + active) >> (30 - PAGE_SHIFT); in inactive_is_low()
2262 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); in prepare_scan_control()
2265 * Flush the memory cgroup stats in rate-limited way as we don't need in prepare_scan_control()
2269 mem_cgroup_flush_stats_ratelimited(sc->target_mem_cgroup); in prepare_scan_control()
2274 spin_lock_irq(&target_lruvec->lru_lock); in prepare_scan_control()
2275 sc->anon_cost = target_lruvec->anon_cost; in prepare_scan_control()
2276 sc->file_cost = target_lruvec->file_cost; in prepare_scan_control()
2277 spin_unlock_irq(&target_lruvec->lru_lock); in prepare_scan_control()
2283 if (!sc->force_deactivate) { in prepare_scan_control()
2293 if (refaults != target_lruvec->refaults[WORKINGSET_ANON] || in prepare_scan_control()
2295 sc->may_deactivate |= DEACTIVATE_ANON; in prepare_scan_control()
2297 sc->may_deactivate &= ~DEACTIVATE_ANON; in prepare_scan_control()
2301 if (refaults != target_lruvec->refaults[WORKINGSET_FILE] || in prepare_scan_control()
2303 sc->may_deactivate |= DEACTIVATE_FILE; in prepare_scan_control()
2305 sc->may_deactivate &= ~DEACTIVATE_FILE; in prepare_scan_control()
2307 sc->may_deactivate = DEACTIVATE_ANON | DEACTIVATE_FILE; in prepare_scan_control()
2315 if (file >> sc->priority && !(sc->may_deactivate & DEACTIVATE_FILE) && in prepare_scan_control()
2316 !sc->no_cache_trim_mode) in prepare_scan_control()
2317 sc->cache_trim_mode = 1; in prepare_scan_control()
2319 sc->cache_trim_mode = 0; in prepare_scan_control()
2333 int z; in prepare_scan_control() local
2335 free = sum_zone_node_page_state(pgdat->node_id, NR_FREE_PAGES); in prepare_scan_control()
2339 for (z = 0; z < MAX_NR_ZONES; z++) { in prepare_scan_control()
2340 struct zone *zone = &pgdat->node_zones[z]; in prepare_scan_control()
2355 sc->file_is_tiny = in prepare_scan_control()
2357 !(sc->may_deactivate & DEACTIVATE_ANON) && in prepare_scan_control()
2358 anon >> sc->priority; in prepare_scan_control()
2382 /* If we have no swap space, do not bother scanning anon folios. */ in get_scan_count()
2383 if (!sc->may_swap || !can_reclaim_anon_pages(memcg, pgdat->node_id, sc)) { in get_scan_count()
2389 * Global reclaim will swap to prevent OOM even with no in get_scan_count()
2405 if (!sc->priority && swappiness) { in get_scan_count()
2411 * If the system is almost out of file pages, force-scan anon. in get_scan_count()
2413 if (sc->file_is_tiny) { in get_scan_count()
2422 if (sc->cache_trim_mode) { in get_scan_count()
2437 * Although we limit that influence to ensure no list gets in get_scan_count()
2443 total_cost = sc->anon_cost + sc->file_cost; in get_scan_count()
2444 anon_cost = total_cost + sc->anon_cost; in get_scan_count()
2445 file_cost = total_cost + sc->file_cost; in get_scan_count()
2451 fp = (MAX_SWAPPINESS - swappiness) * (total_cost + 1); in get_scan_count()
2464 lruvec_size = lruvec_lru_size(lruvec, lru, sc->reclaim_idx); in get_scan_count()
2465 mem_cgroup_protection(sc->target_mem_cgroup, memcg, in get_scan_count()
2475 * becomes extremely binary -- from nothing as we in get_scan_count()
2479 * also means we simply get no protection at all if we in get_scan_count()
2490 * the best-effort low protection. However, we still in get_scan_count()
2491 * ideally want to honor how well-behaved groups are in in get_scan_count()
2502 if (!sc->memcg_low_reclaim && low > min) { in get_scan_count()
2504 sc->memcg_low_skipped = 1; in get_scan_count()
2512 scan = lruvec_size - lruvec_size * protection / in get_scan_count()
2518 * sc->priority further than desirable. in get_scan_count()
2525 scan >>= sc->priority; in get_scan_count()
2544 * round-off error. in get_scan_count()
2558 /* Look ma, no brain */ in get_scan_count()
2568 * ultimately no way to reclaim the memory.
2578 return can_demote(pgdat->node_id, sc); in can_age_anon_pages()
2608 unsigned long max_seq = READ_ONCE((lruvec)->lrugen.max_seq)
2612 READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_ANON]), \
2613 READ_ONCE((lruvec)->lrugen.min_seq[LRU_GEN_FILE]), \
2630 struct lruvec *lruvec = &memcg->nodeinfo[nid]->lruvec; in get_lruvec()
2633 if (!lruvec->pgdat) in get_lruvec()
2634 lruvec->pgdat = pgdat; in get_lruvec()
2641 return &pgdat->__lruvec; in get_lruvec()
2649 if (!sc->may_swap) in get_swappiness()
2652 if (!can_demote(pgdat->node_id, sc) && in get_swappiness()
2661 return lruvec->lrugen.max_seq - lruvec->lrugen.min_seq[type] + 1; in get_nr_gens()
2683 * To get rid of non-leaf entries that no longer have enough leaf entries, the
2684 * aging uses the double-buffering technique to flip to the other filter each
2685 * time it produces a new generation. For non-leaf entries that have enough
2711 key[0] = hash & (BIT(BLOOM_FILTER_SHIFT) - 1); in get_item_key()
2722 filter = READ_ONCE(mm_state->filters[gen]); in test_bloom_filter()
2738 filter = READ_ONCE(mm_state->filters[gen]); in update_bloom_filter()
2755 filter = mm_state->filters[gen]; in reset_bloom_filter()
2763 WRITE_ONCE(mm_state->filters[gen], filter); in reset_bloom_filter()
2781 return &memcg->mm_list; in get_mm_list()
2790 return &lruvec->mm_state; in get_mm_state()
2797 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in get_next_mm()
2798 struct lru_gen_mm_state *mm_state = get_mm_state(walk->lruvec); in get_next_mm()
2800 mm = list_entry(mm_state->head, struct mm_struct, lru_gen.list); in get_next_mm()
2801 key = pgdat->node_id % BITS_PER_TYPE(mm->lru_gen.bitmap); in get_next_mm()
2803 if (!walk->force_scan && !test_bit(key, &mm->lru_gen.bitmap)) in get_next_mm()
2806 clear_bit(key, &mm->lru_gen.bitmap); in get_next_mm()
2817 VM_WARN_ON_ONCE(!list_empty(&mm->lru_gen.list)); in lru_gen_add_mm()
2819 VM_WARN_ON_ONCE(mm->lru_gen.memcg); in lru_gen_add_mm()
2820 mm->lru_gen.memcg = memcg; in lru_gen_add_mm()
2822 spin_lock(&mm_list->lock); in lru_gen_add_mm()
2829 if (mm_state->tail == &mm_list->fifo) in lru_gen_add_mm()
2830 mm_state->tail = &mm->lru_gen.list; in lru_gen_add_mm()
2833 list_add_tail(&mm->lru_gen.list, &mm_list->fifo); in lru_gen_add_mm()
2835 spin_unlock(&mm_list->lock); in lru_gen_add_mm()
2844 if (list_empty(&mm->lru_gen.list)) in lru_gen_del_mm()
2848 memcg = mm->lru_gen.memcg; in lru_gen_del_mm()
2852 spin_lock(&mm_list->lock); in lru_gen_del_mm()
2859 if (mm_state->head == &mm->lru_gen.list) in lru_gen_del_mm()
2860 mm_state->head = mm_state->head->prev; in lru_gen_del_mm()
2863 if (mm_state->tail == &mm->lru_gen.list) in lru_gen_del_mm()
2864 mm_state->tail = mm_state->tail->next; in lru_gen_del_mm()
2867 list_del_init(&mm->lru_gen.list); in lru_gen_del_mm()
2869 spin_unlock(&mm_list->lock); in lru_gen_del_mm()
2872 mem_cgroup_put(mm->lru_gen.memcg); in lru_gen_del_mm()
2873 mm->lru_gen.memcg = NULL; in lru_gen_del_mm()
2881 struct task_struct *task = rcu_dereference_protected(mm->owner, true); in lru_gen_migrate_mm()
2883 VM_WARN_ON_ONCE(task->mm != mm); in lru_gen_migrate_mm()
2884 lockdep_assert_held(&task->alloc_lock); in lru_gen_migrate_mm()
2891 if (!mm->lru_gen.memcg) in lru_gen_migrate_mm()
2897 if (memcg == mm->lru_gen.memcg) in lru_gen_migrate_mm()
2900 VM_WARN_ON_ONCE(list_empty(&mm->lru_gen.list)); in lru_gen_migrate_mm()
2930 struct lruvec *lruvec = walk->lruvec; in reset_mm_stats()
2933 lockdep_assert_held(&get_mm_list(lruvec_memcg(lruvec))->lock); in reset_mm_stats()
2935 hist = lru_hist_from_seq(walk->seq); in reset_mm_stats()
2938 WRITE_ONCE(mm_state->stats[hist][i], in reset_mm_stats()
2939 mm_state->stats[hist][i] + walk->mm_stats[i]); in reset_mm_stats()
2940 walk->mm_stats[i] = 0; in reset_mm_stats()
2944 hist = lru_hist_from_seq(walk->seq + 1); in reset_mm_stats()
2947 WRITE_ONCE(mm_state->stats[hist][i], 0); in reset_mm_stats()
2956 struct lruvec *lruvec = walk->lruvec; in iterate_mm_list()
2962 * mm_state->seq is incremented after each iteration of mm_list. There in iterate_mm_list()
2971 spin_lock(&mm_list->lock); in iterate_mm_list()
2973 VM_WARN_ON_ONCE(mm_state->seq + 1 < walk->seq); in iterate_mm_list()
2975 if (walk->seq <= mm_state->seq) in iterate_mm_list()
2978 if (!mm_state->head) in iterate_mm_list()
2979 mm_state->head = &mm_list->fifo; in iterate_mm_list()
2981 if (mm_state->head == &mm_list->fifo) in iterate_mm_list()
2985 mm_state->head = mm_state->head->next; in iterate_mm_list()
2986 if (mm_state->head == &mm_list->fifo) { in iterate_mm_list()
2987 WRITE_ONCE(mm_state->seq, mm_state->seq + 1); in iterate_mm_list()
2993 if (!mm_state->tail || mm_state->tail == mm_state->head) { in iterate_mm_list()
2994 mm_state->tail = mm_state->head->next; in iterate_mm_list()
2995 walk->force_scan = true; in iterate_mm_list()
3002 spin_unlock(&mm_list->lock); in iterate_mm_list()
3005 reset_bloom_filter(mm_state, walk->seq + 1); in iterate_mm_list()
3022 spin_lock(&mm_list->lock); in iterate_mm_list_nowalk()
3024 VM_WARN_ON_ONCE(mm_state->seq + 1 < seq); in iterate_mm_list_nowalk()
3026 if (seq > mm_state->seq) { in iterate_mm_list_nowalk()
3027 mm_state->head = NULL; in iterate_mm_list_nowalk()
3028 mm_state->tail = NULL; in iterate_mm_list_nowalk()
3029 WRITE_ONCE(mm_state->seq, mm_state->seq + 1); in iterate_mm_list_nowalk()
3033 spin_unlock(&mm_list->lock); in iterate_mm_list_nowalk()
3043 * A feedback loop based on Proportional-Integral-Derivative (PID) controller.
3058 * 1. The D term may discount the other two terms over time so that long-lived
3070 struct lru_gen_folio *lrugen = &lruvec->lrugen; in read_ctrl_pos()
3071 int hist = lru_hist_from_seq(lrugen->min_seq[type]); in read_ctrl_pos()
3073 pos->refaulted = lrugen->avg_refaulted[type][tier] + in read_ctrl_pos()
3074 atomic_long_read(&lrugen->refaulted[hist][type][tier]); in read_ctrl_pos()
3075 pos->total = lrugen->avg_total[type][tier] + in read_ctrl_pos()
3076 atomic_long_read(&lrugen->evicted[hist][type][tier]); in read_ctrl_pos()
3078 pos->total += lrugen->protected[hist][type][tier - 1]; in read_ctrl_pos()
3079 pos->gain = gain; in read_ctrl_pos()
3085 struct lru_gen_folio *lrugen = &lruvec->lrugen; in reset_ctrl_pos()
3087 unsigned long seq = carryover ? lrugen->min_seq[type] : lrugen->max_seq + 1; in reset_ctrl_pos()
3089 lockdep_assert_held(&lruvec->lru_lock); in reset_ctrl_pos()
3100 sum = lrugen->avg_refaulted[type][tier] + in reset_ctrl_pos()
3101 atomic_long_read(&lrugen->refaulted[hist][type][tier]); in reset_ctrl_pos()
3102 WRITE_ONCE(lrugen->avg_refaulted[type][tier], sum / 2); in reset_ctrl_pos()
3104 sum = lrugen->avg_total[type][tier] + in reset_ctrl_pos()
3105 atomic_long_read(&lrugen->evicted[hist][type][tier]); in reset_ctrl_pos()
3107 sum += lrugen->protected[hist][type][tier - 1]; in reset_ctrl_pos()
3108 WRITE_ONCE(lrugen->avg_total[type][tier], sum / 2); in reset_ctrl_pos()
3112 atomic_long_set(&lrugen->refaulted[hist][type][tier], 0); in reset_ctrl_pos()
3113 atomic_long_set(&lrugen->evicted[hist][type][tier], 0); in reset_ctrl_pos()
3115 WRITE_ONCE(lrugen->protected[hist][type][tier - 1], 0); in reset_ctrl_pos()
3126 return pv->refaulted < MIN_LRU_BATCH || in positive_ctrl_err()
3127 pv->refaulted * (sp->total + MIN_LRU_BATCH) * sp->gain <= in positive_ctrl_err()
3128 (sp->refaulted + 1) * pv->total * pv->gain; in positive_ctrl_err()
3138 unsigned long new_flags, old_flags = READ_ONCE(folio->flags); in folio_update_gen()
3153 } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); in folio_update_gen()
3155 return ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; in folio_update_gen()
3162 struct lru_gen_folio *lrugen = &lruvec->lrugen; in folio_inc_gen()
3163 int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]); in folio_inc_gen()
3164 unsigned long new_flags, old_flags = READ_ONCE(folio->flags); in folio_inc_gen()
3169 new_gen = ((old_flags & LRU_GEN_MASK) >> LRU_GEN_PGOFF) - 1; in folio_inc_gen()
3181 } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); in folio_inc_gen()
3198 walk->batched++; in update_batch_size()
3200 walk->nr_pages[old_gen][type][zone] -= delta; in update_batch_size()
3201 walk->nr_pages[new_gen][type][zone] += delta; in update_batch_size()
3207 struct lruvec *lruvec = walk->lruvec; in reset_batch_size()
3208 struct lru_gen_folio *lrugen = &lruvec->lrugen; in reset_batch_size()
3210 walk->batched = 0; in reset_batch_size()
3214 int delta = walk->nr_pages[gen][type][zone]; in reset_batch_size()
3219 walk->nr_pages[gen][type][zone] = 0; in reset_batch_size()
3220 WRITE_ONCE(lrugen->nr_pages[gen][type][zone], in reset_batch_size()
3221 lrugen->nr_pages[gen][type][zone] + delta); in reset_batch_size()
3232 struct vm_area_struct *vma = args->vma; in should_skip_vma()
3233 struct lru_gen_mm_walk *walk = args->private; in should_skip_vma()
3244 if (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) in should_skip_vma()
3247 if (vma == get_gate_vma(vma->vm_mm)) in should_skip_vma()
3251 return !walk->can_swap; in should_skip_vma()
3253 if (WARN_ON_ONCE(!vma->vm_file || !vma->vm_file->f_mapping)) in should_skip_vma()
3256 mapping = vma->vm_file->f_mapping; in should_skip_vma()
3261 return !walk->can_swap; in should_skip_vma()
3264 return !mapping->a_ops->read_folio; in should_skip_vma()
3268 * Some userspace memory allocators map many single-page VMAs. Instead of
3277 VMA_ITERATOR(vmi, args->mm, start); in get_next_vma()
3282 for_each_vma(vmi, args->vma) { in get_next_vma()
3283 if (end && end <= args->vma->vm_start) in get_next_vma()
3286 if (should_skip_vma(args->vma->vm_start, args->vma->vm_end, args)) in get_next_vma()
3289 *vm_start = max(start, args->vma->vm_start); in get_next_vma()
3290 *vm_end = min(end - 1, args->vma->vm_end - 1) + 1; in get_next_vma()
3303 VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end); in get_pte_pfn()
3306 return -1; in get_pte_pfn()
3309 return -1; in get_pte_pfn()
3311 if (!pte_young(pte) && !mm_has_notifiers(vma->vm_mm)) in get_pte_pfn()
3312 return -1; in get_pte_pfn()
3315 return -1; in get_pte_pfn()
3317 if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat)) in get_pte_pfn()
3318 return -1; in get_pte_pfn()
3328 VM_WARN_ON_ONCE(addr < vma->vm_start || addr >= vma->vm_end); in get_pmd_pfn()
3331 return -1; in get_pmd_pfn()
3334 return -1; in get_pmd_pfn()
3336 if (!pmd_young(pmd) && !mm_has_notifiers(vma->vm_mm)) in get_pmd_pfn()
3337 return -1; in get_pmd_pfn()
3340 return -1; in get_pmd_pfn()
3342 if (pfn < pgdat->node_start_pfn || pfn >= pgdat_end_pfn(pgdat)) in get_pmd_pfn()
3343 return -1; in get_pmd_pfn()
3354 if (folio_nid(folio) != pgdat->node_id) in get_pfn_folio()
3384 struct lru_gen_mm_walk *walk = args->private; in walk_pte_range()
3385 struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec); in walk_pte_range()
3386 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in walk_pte_range()
3387 DEFINE_MAX_SEQ(walk->lruvec); in walk_pte_range()
3390 pte = pte_offset_map_nolock(args->mm, pmd, start & PMD_MASK, &ptl); in walk_pte_range()
3406 walk->mm_stats[MM_LEAF_TOTAL]++; in walk_pte_range()
3408 pfn = get_pte_pfn(ptent, args->vma, addr, pgdat); in walk_pte_range()
3409 if (pfn == -1) in walk_pte_range()
3412 folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap); in walk_pte_range()
3416 if (!ptep_clear_young_notify(args->vma, addr, pte + i)) in walk_pte_range()
3420 walk->mm_stats[MM_LEAF_YOUNG]++; in walk_pte_range()
3447 struct lru_gen_mm_walk *walk = args->private; in walk_pmd_range_locked()
3448 struct mem_cgroup *memcg = lruvec_memcg(walk->lruvec); in walk_pmd_range_locked()
3449 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in walk_pmd_range_locked()
3450 DEFINE_MAX_SEQ(walk->lruvec); in walk_pmd_range_locked()
3456 if (*first == -1) { in walk_pmd_range_locked()
3462 i = addr == -1 ? 0 : pmd_index(addr) - pmd_index(*first); in walk_pmd_range_locked()
3464 __set_bit(i - 1, bitmap); in walk_pmd_range_locked()
3470 ptl = pmd_lockptr(args->mm, pmd); in walk_pmd_range_locked()
3487 if (!walk->force_scan && should_clear_pmd_young() && in walk_pmd_range_locked()
3488 !mm_has_notifiers(args->mm)) in walk_pmd_range_locked()
3494 if (pfn == -1) in walk_pmd_range_locked()
3497 folio = get_pfn_folio(pfn, memcg, pgdat, walk->can_swap); in walk_pmd_range_locked()
3504 walk->mm_stats[MM_LEAF_YOUNG]++; in walk_pmd_range_locked()
3521 *first = -1; in walk_pmd_range_locked()
3533 unsigned long first = -1; in walk_pmd_range()
3534 struct lru_gen_mm_walk *walk = args->private; in walk_pmd_range()
3535 struct lru_gen_mm_state *mm_state = get_mm_state(walk->lruvec); in walk_pmd_range()
3547 vma = args->vma; in walk_pmd_range()
3554 walk->mm_stats[MM_LEAF_TOTAL]++; in walk_pmd_range()
3559 struct pglist_data *pgdat = lruvec_pgdat(walk->lruvec); in walk_pmd_range()
3562 walk->mm_stats[MM_LEAF_TOTAL]++; in walk_pmd_range()
3564 if (pfn != -1) in walk_pmd_range()
3569 if (!walk->force_scan && should_clear_pmd_young() && in walk_pmd_range()
3570 !mm_has_notifiers(args->mm)) { in walk_pmd_range()
3577 if (!walk->force_scan && !test_bloom_filter(mm_state, walk->seq, pmd + i)) in walk_pmd_range()
3580 walk->mm_stats[MM_NONLEAF_FOUND]++; in walk_pmd_range()
3585 walk->mm_stats[MM_NONLEAF_ADDED]++; in walk_pmd_range()
3588 update_bloom_filter(mm_state, walk->seq + 1, pmd + i); in walk_pmd_range()
3591 walk_pmd_range_locked(pud, -1, vma, args, bitmap, &first); in walk_pmd_range()
3604 struct lru_gen_mm_walk *walk = args->private; in walk_pud_range()
3620 if (need_resched() || walk->batched >= MAX_LRU_BATCH) { in walk_pud_range()
3631 if (!end || !args->vma) in walk_pud_range()
3634 walk->next_addr = max(end, args->vma->vm_start); in walk_pud_range()
3636 return -EAGAIN; in walk_pud_range()
3648 struct lruvec *lruvec = walk->lruvec; in walk_mm()
3651 walk->next_addr = FIRST_USER_ADDRESS; in walk_mm()
3656 err = -EBUSY; in walk_mm()
3659 if (walk->seq != max_seq) in walk_mm()
3668 err = walk_page_range(mm, walk->next_addr, ULONG_MAX, &mm_walk_ops, walk); in walk_mm()
3675 if (walk->batched) { in walk_mm()
3676 spin_lock_irq(&lruvec->lru_lock); in walk_mm()
3678 spin_unlock_irq(&lruvec->lru_lock); in walk_mm()
3682 } while (err == -EAGAIN); in walk_mm()
3687 struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk; in set_mm_walk()
3692 walk = &pgdat->mm_walk; in set_mm_walk()
3699 current->reclaim_state->mm_walk = walk; in set_mm_walk()
3706 struct lru_gen_mm_walk *walk = current->reclaim_state->mm_walk; in clear_mm_walk()
3708 VM_WARN_ON_ONCE(walk && memchr_inv(walk->nr_pages, 0, sizeof(walk->nr_pages))); in clear_mm_walk()
3709 VM_WARN_ON_ONCE(walk && memchr_inv(walk->mm_stats, 0, sizeof(walk->mm_stats))); in clear_mm_walk()
3711 current->reclaim_state->mm_walk = NULL; in clear_mm_walk()
3721 struct lru_gen_folio *lrugen = &lruvec->lrugen; in inc_min_seq()
3722 int new_gen, old_gen = lru_gen_from_seq(lrugen->min_seq[type]); in inc_min_seq()
3729 struct list_head *head = &lrugen->folios[old_gen][type][zone]; in inc_min_seq()
3740 list_move_tail(&folio->lru, &lrugen->folios[new_gen][type][zone]); in inc_min_seq()
3742 if (!--remaining) in inc_min_seq()
3748 WRITE_ONCE(lrugen->min_seq[type], lrugen->min_seq[type] + 1); in inc_min_seq()
3757 struct lru_gen_folio *lrugen = &lruvec->lrugen; in try_to_inc_min_seq()
3764 while (min_seq[type] + MIN_NR_GENS <= lrugen->max_seq) { in try_to_inc_min_seq()
3768 if (!list_empty(&lrugen->folios[gen][type][zone])) in try_to_inc_min_seq()
3781 min_seq[LRU_GEN_FILE] = max(min_seq[LRU_GEN_ANON], lrugen->min_seq[LRU_GEN_FILE]); in try_to_inc_min_seq()
3785 if (min_seq[type] == lrugen->min_seq[type]) in try_to_inc_min_seq()
3789 WRITE_ONCE(lrugen->min_seq[type], min_seq[type]); in try_to_inc_min_seq()
3802 struct lru_gen_folio *lrugen = &lruvec->lrugen; in inc_max_seq()
3804 if (seq < READ_ONCE(lrugen->max_seq)) in inc_max_seq()
3807 spin_lock_irq(&lruvec->lru_lock); in inc_max_seq()
3811 success = seq == lrugen->max_seq; in inc_max_seq()
3815 for (type = ANON_AND_FILE - 1; type >= 0; type--) { in inc_max_seq()
3824 spin_unlock_irq(&lruvec->lru_lock); in inc_max_seq()
3835 prev = lru_gen_from_seq(lrugen->max_seq - 1); in inc_max_seq()
3836 next = lru_gen_from_seq(lrugen->max_seq + 1); in inc_max_seq()
3841 long delta = lrugen->nr_pages[prev][type][zone] - in inc_max_seq()
3842 lrugen->nr_pages[next][type][zone]; in inc_max_seq()
3848 __update_lru_size(lruvec, lru + LRU_ACTIVE, zone, -delta); in inc_max_seq()
3855 WRITE_ONCE(lrugen->timestamps[next], jiffies); in inc_max_seq()
3857 smp_store_release(&lrugen->max_seq, lrugen->max_seq + 1); in inc_max_seq()
3859 spin_unlock_irq(&lruvec->lru_lock); in inc_max_seq()
3870 struct lru_gen_folio *lrugen = &lruvec->lrugen; in try_to_inc_max_seq()
3873 VM_WARN_ON_ONCE(seq > READ_ONCE(lrugen->max_seq)); in try_to_inc_max_seq()
3879 if (seq <= READ_ONCE(mm_state->seq)) in try_to_inc_max_seq()
3899 walk->lruvec = lruvec; in try_to_inc_max_seq()
3900 walk->seq = seq; in try_to_inc_max_seq()
3901 walk->can_swap = can_swap; in try_to_inc_max_seq()
3902 walk->force_scan = force_scan; in try_to_inc_max_seq()
3927 if (sc->priority != DEF_PRIORITY || sc->nr_to_reclaim < MIN_LRU_BATCH) in set_initial_priority()
3935 if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc)) in set_initial_priority()
3938 /* round down reclaimable and round up sc->nr_to_reclaim */ in set_initial_priority()
3939 priority = fls_long(reclaimable) - 1 - fls_long(sc->nr_to_reclaim - 1); in set_initial_priority()
3945 sc->priority = clamp(priority, DEF_PRIORITY / 2, DEF_PRIORITY); in set_initial_priority()
3953 struct lru_gen_folio *lrugen = &lruvec->lrugen; in lruvec_is_sizable()
3965 total += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); in lruvec_is_sizable()
3970 return mem_cgroup_online(memcg) ? (total >> sc->priority) : total; in lruvec_is_sizable()
3989 birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); in lruvec_is_reclaimable()
4024 .gfp_mask = sc->gfp_mask, in lru_gen_age_node()
4051 pte_t *pte = pvmw->pte; in lru_gen_look_around()
4052 unsigned long addr = pvmw->address; in lru_gen_look_around()
4053 struct vm_area_struct *vma = pvmw->vma; in lru_gen_look_around()
4054 struct folio *folio = pfn_folio(pvmw->pfn); in lru_gen_look_around()
4063 lockdep_assert_held(pvmw->ptl); in lru_gen_look_around()
4069 if (spin_is_contended(pvmw->ptl)) in lru_gen_look_around()
4073 if (vma->vm_flags & VM_SPECIAL) in lru_gen_look_around()
4077 walk = current->reclaim_state ? current->reclaim_state->mm_walk : NULL; in lru_gen_look_around()
4079 start = max(addr & PMD_MASK, vma->vm_start); in lru_gen_look_around()
4080 end = min(addr | ~PMD_MASK, vma->vm_end - 1) + 1; in lru_gen_look_around()
4082 if (end - start == PAGE_SIZE) in lru_gen_look_around()
4085 if (end - start > MIN_LRU_BATCH * PAGE_SIZE) { in lru_gen_look_around()
4086 if (addr - start < MIN_LRU_BATCH * PAGE_SIZE / 2) in lru_gen_look_around()
4088 else if (end - addr < MIN_LRU_BATCH * PAGE_SIZE / 2) in lru_gen_look_around()
4089 start = end - MIN_LRU_BATCH * PAGE_SIZE; in lru_gen_look_around()
4091 start = addr - MIN_LRU_BATCH * PAGE_SIZE / 2; in lru_gen_look_around()
4102 pte -= (addr - start) / PAGE_SIZE; in lru_gen_look_around()
4109 if (pfn == -1) in lru_gen_look_around()
4146 update_bloom_filter(mm_state, max_seq, pvmw->pmd); in lru_gen_look_around()
4172 spin_lock_irqsave(&pgdat->memcg_lru.lock, flags); in lru_gen_rotate_memcg()
4174 VM_WARN_ON_ONCE(hlist_nulls_unhashed(&lruvec->lrugen.list)); in lru_gen_rotate_memcg()
4177 new = old = lruvec->lrugen.gen; in lru_gen_rotate_memcg()
4185 new = get_memcg_gen(pgdat->memcg_lru.seq); in lru_gen_rotate_memcg()
4187 new = get_memcg_gen(pgdat->memcg_lru.seq + 1); in lru_gen_rotate_memcg()
4191 WRITE_ONCE(lruvec->lrugen.seg, seg); in lru_gen_rotate_memcg()
4192 WRITE_ONCE(lruvec->lrugen.gen, new); in lru_gen_rotate_memcg()
4194 hlist_nulls_del_rcu(&lruvec->lrugen.list); in lru_gen_rotate_memcg()
4197 hlist_nulls_add_head_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]); in lru_gen_rotate_memcg()
4199 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[new][bin]); in lru_gen_rotate_memcg()
4201 pgdat->memcg_lru.nr_memcgs[old]--; in lru_gen_rotate_memcg()
4202 pgdat->memcg_lru.nr_memcgs[new]++; in lru_gen_rotate_memcg()
4204 if (!pgdat->memcg_lru.nr_memcgs[old] && old == get_memcg_gen(pgdat->memcg_lru.seq)) in lru_gen_rotate_memcg()
4205 WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); in lru_gen_rotate_memcg()
4207 spin_unlock_irqrestore(&pgdat->memcg_lru.lock, flags); in lru_gen_rotate_memcg()
4222 spin_lock_irq(&pgdat->memcg_lru.lock); in lru_gen_online_memcg()
4224 VM_WARN_ON_ONCE(!hlist_nulls_unhashed(&lruvec->lrugen.list)); in lru_gen_online_memcg()
4226 gen = get_memcg_gen(pgdat->memcg_lru.seq); in lru_gen_online_memcg()
4228 lruvec->lrugen.gen = gen; in lru_gen_online_memcg()
4230 hlist_nulls_add_tail_rcu(&lruvec->lrugen.list, &pgdat->memcg_lru.fifo[gen][bin]); in lru_gen_online_memcg()
4231 pgdat->memcg_lru.nr_memcgs[gen]++; in lru_gen_online_memcg()
4233 spin_unlock_irq(&pgdat->memcg_lru.lock); in lru_gen_online_memcg()
4257 spin_lock_irq(&pgdat->memcg_lru.lock); in lru_gen_release_memcg()
4259 if (hlist_nulls_unhashed(&lruvec->lrugen.list)) in lru_gen_release_memcg()
4262 gen = lruvec->lrugen.gen; in lru_gen_release_memcg()
4264 hlist_nulls_del_init_rcu(&lruvec->lrugen.list); in lru_gen_release_memcg()
4265 pgdat->memcg_lru.nr_memcgs[gen]--; in lru_gen_release_memcg()
4267 if (!pgdat->memcg_lru.nr_memcgs[gen] && gen == get_memcg_gen(pgdat->memcg_lru.seq)) in lru_gen_release_memcg()
4268 WRITE_ONCE(pgdat->memcg_lru.seq, pgdat->memcg_lru.seq + 1); in lru_gen_release_memcg()
4270 spin_unlock_irq(&pgdat->memcg_lru.lock); in lru_gen_release_memcg()
4279 if (READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_HEAD) in lru_gen_soft_reclaim()
4299 struct lru_gen_folio *lrugen = &lruvec->lrugen; in sort_folio()
4314 if (gen != lru_gen_from_seq(lrugen->min_seq[type])) { in sort_folio()
4315 list_move(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio()
4321 int hist = lru_hist_from_seq(lrugen->min_seq[type]); in sort_folio()
4324 list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio()
4326 WRITE_ONCE(lrugen->protected[hist][type][tier - 1], in sort_folio()
4327 lrugen->protected[hist][type][tier - 1] + delta); in sort_folio()
4332 if (!folio_test_lru(folio) || zone > sc->reclaim_idx) { in sort_folio()
4334 list_move_tail(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio()
4342 list_move(&folio->lru, &lrugen->folios[gen][type][zone]); in sort_folio()
4354 if (!(sc->gfp_mask & __GFP_IO) && in isolate_folio()
4371 set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, 0); in isolate_folio()
4394 struct lru_gen_folio *lrugen = &lruvec->lrugen; in scan_folios()
4402 gen = lru_gen_from_seq(lrugen->min_seq[type]); in scan_folios()
4404 for (i = MAX_NR_ZONES; i > 0; i--) { in scan_folios()
4407 int zone = (sc->reclaim_idx + i) % MAX_NR_ZONES; in scan_folios()
4408 struct list_head *head = &lrugen->folios[gen][type][zone]; in scan_folios()
4424 list_add(&folio->lru, list); in scan_folios()
4427 list_move(&folio->lru, &moved); in scan_folios()
4431 if (!--remaining || max(isolated, skipped_zone) >= MIN_LRU_BATCH) in scan_folios()
4453 trace_mm_vmscan_lru_isolate(sc->reclaim_idx, sc->order, MAX_LRU_BATCH, in scan_folios()
4481 return tier - 1; in get_tier_idx()
4488 int gain[ANON_AND_FILE] = { swappiness, MAX_SWAPPINESS - swappiness }; in get_type_to_scan()
4507 *tier_idx = tier - 1; in get_type_to_scan()
4518 int tier = -1; in isolate_folios()
4537 else if (!(sc->gfp_mask & __GFP_IO)) in isolate_folios()
4551 tier = -1; in isolate_folios()
4575 spin_lock_irq(&lruvec->lru_lock); in evict_folios()
4584 spin_unlock_irq(&lruvec->lru_lock); in evict_folios()
4590 sc->nr_reclaimed += reclaimed; in evict_folios()
4591 trace_mm_vmscan_lru_shrink_inactive(pgdat->node_id, in evict_folios()
4592 scanned, reclaimed, &stat, sc->priority, in evict_folios()
4597 list_del(&folio->lru); in evict_folios()
4614 set_mask_bits(&folio->flags, LRU_REFS_MASK | LRU_REFS_FLAGS, in evict_folios()
4620 list_move(&folio->lru, &clean); in evict_folios()
4623 spin_lock_irq(&lruvec->lru_lock); in evict_folios()
4627 walk = current->reclaim_state->mm_walk; in evict_folios()
4628 if (walk && walk->batched) { in evict_folios()
4629 walk->lruvec = lruvec; in evict_folios()
4639 spin_unlock_irq(&lruvec->lru_lock); in evict_folios()
4658 struct lru_gen_folio *lrugen = &lruvec->lrugen; in should_run_aging()
4676 size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); in should_run_aging()
4723 if (mem_cgroup_below_min(sc->target_mem_cgroup, memcg)) in get_nr_to_scan()
4724 return -1; in get_nr_to_scan()
4733 if (!success || sc->priority == DEF_PRIORITY) in get_nr_to_scan()
4734 return nr_to_scan >> sc->priority; in get_nr_to_scan()
4737 return try_to_inc_max_seq(lruvec, max_seq, can_swap, false) ? -1 : 0; in get_nr_to_scan()
4749 if (sc->nr_reclaimed >= max(sc->nr_to_reclaim, compact_gap(sc->order))) in should_abort_scan()
4752 /* check the order to exclude compaction-induced reclaim */ in should_abort_scan()
4753 if (!current_is_kswapd() || sc->order) in should_abort_scan()
4759 for (i = 0; i <= sc->reclaim_idx; i++) { in should_abort_scan()
4760 struct zone *zone = lruvec_pgdat(lruvec)->node_zones + i; in should_abort_scan()
4763 if (managed_zone(zone) && !zone_watermark_ok(zone, 0, size, sc->reclaim_idx, 0)) in should_abort_scan()
4805 unsigned long scanned = sc->nr_scanned; in shrink_one()
4806 unsigned long reclaimed = sc->nr_reclaimed; in shrink_one()
4816 if (READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_TAIL) in shrink_one()
4824 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, sc->priority); in shrink_one()
4826 if (!sc->proactive) in shrink_one()
4827 vmpressure(sc->gfp_mask, memcg, false, sc->nr_scanned - scanned, in shrink_one()
4828 sc->nr_reclaimed - reclaimed); in shrink_one()
4839 return READ_ONCE(lruvec->lrugen.seg) != MEMCG_LRU_TAIL ? in shrink_one()
4854 gen = get_memcg_gen(READ_ONCE(pgdat->memcg_lru.seq)); in shrink_many()
4862 hlist_nulls_for_each_entry_rcu(lrugen, pos, &pgdat->memcg_lru.fifo[gen][bin], list) { in shrink_many()
4871 if (gen != READ_ONCE(lrugen->gen)) in shrink_many()
4918 VM_WARN_ON_ONCE(!sc->may_writepage || !sc->may_unmap); in lru_gen_shrink_lruvec()
4924 set_mm_walk(NULL, sc->proactive); in lru_gen_shrink_lruvec()
4937 unsigned long reclaimed = sc->nr_reclaimed; in lru_gen_shrink_node()
4943 * them is likely futile and can cause high reclaim latency when there in lru_gen_shrink_node()
4946 if (!sc->may_writepage || !sc->may_unmap) in lru_gen_shrink_node()
4953 set_mm_walk(pgdat, sc->proactive); in lru_gen_shrink_node()
4958 sc->nr_reclaimed = 0; in lru_gen_shrink_node()
4961 shrink_one(&pgdat->__lruvec, sc); in lru_gen_shrink_node()
4966 sc->nr_reclaimed += reclaimed; in lru_gen_shrink_node()
4972 if (sc->nr_reclaimed > reclaimed) in lru_gen_shrink_node()
4973 pgdat->kswapd_failures = 0; in lru_gen_shrink_node()
4982 struct lru_gen_folio *lrugen = &lruvec->lrugen; in state_is_valid()
4984 if (lrugen->enabled) { in state_is_valid()
4988 if (!list_empty(&lruvec->lists[lru])) in state_is_valid()
4995 if (!list_empty(&lrugen->folios[gen][type][zone])) in state_is_valid()
5011 struct list_head *head = &lruvec->lists[lru]; in fill_evictable()
5020 VM_WARN_ON_ONCE_FOLIO(folio_lru_gen(folio) != -1, folio); in fill_evictable()
5026 if (!--remaining) in fill_evictable()
5040 struct list_head *head = &lruvec->lrugen.folios[gen][type][zone]; in drain_evictable()
5055 if (!--remaining) in drain_evictable()
5089 spin_lock_irq(&lruvec->lru_lock); in lru_gen_change_state()
5094 lruvec->lrugen.enabled = enabled; in lru_gen_change_state()
5097 spin_unlock_irq(&lruvec->lru_lock); in lru_gen_change_state()
5099 spin_lock_irq(&lruvec->lru_lock); in lru_gen_change_state()
5102 spin_unlock_irq(&lruvec->lru_lock); in lru_gen_change_state()
5123 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
5130 return -EINVAL; in min_ttl_ms_store()
5155 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
5165 caps = -1; in enabled_store()
5167 return -EINVAL; in enabled_store()
5205 m->private = kvmalloc(PATH_MAX, GFP_KERNEL); in lru_gen_seq_start()
5206 if (!m->private) in lru_gen_seq_start()
5207 return ERR_PTR(-ENOMEM); in lru_gen_seq_start()
5214 if (!nr_to_skip--) in lru_gen_seq_start()
5227 kvfree(m->private); in lru_gen_seq_stop()
5228 m->private = NULL; in lru_gen_seq_stop()
5233 int nid = lruvec_pgdat(v)->node_id; in lru_gen_seq_next()
5257 struct lru_gen_folio *lrugen = &lruvec->lrugen; in lru_gen_seq_show_full()
5268 n[0] = READ_ONCE(lrugen->avg_refaulted[type][tier]); in lru_gen_seq_show_full()
5269 n[1] = READ_ONCE(lrugen->avg_total[type][tier]); in lru_gen_seq_show_full()
5272 n[0] = atomic_long_read(&lrugen->refaulted[hist][type][tier]); in lru_gen_seq_show_full()
5273 n[1] = atomic_long_read(&lrugen->evicted[hist][type][tier]); in lru_gen_seq_show_full()
5275 n[2] = READ_ONCE(lrugen->protected[hist][type][tier - 1]); in lru_gen_seq_show_full()
5294 n = READ_ONCE(mm_state->stats[hist][i]); in lru_gen_seq_show_full()
5297 n = READ_ONCE(mm_state->stats[hist][i]); in lru_gen_seq_show_full()
5305 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
5309 bool full = !debugfs_real_fops(m->file)->write; in lru_gen_seq_show()
5311 struct lru_gen_folio *lrugen = &lruvec->lrugen; in lru_gen_seq_show()
5312 int nid = lruvec_pgdat(lruvec)->node_id; in lru_gen_seq_show()
5318 const char *path = memcg ? m->private : ""; in lru_gen_seq_show()
5322 cgroup_path(memcg->css.cgroup, m->private, PATH_MAX); in lru_gen_seq_show()
5332 seq = max_seq - MAX_NR_GENS + 1; in lru_gen_seq_show()
5339 unsigned long birth = READ_ONCE(lruvec->lrugen.timestamps[gen]); in lru_gen_seq_show()
5341 seq_printf(m, " %10lu %10u", seq, jiffies_to_msecs(jiffies - birth)); in lru_gen_seq_show()
5348 size += max(READ_ONCE(lrugen->nr_pages[gen][type][zone]), 0L); in lru_gen_seq_show()
5379 return -EINVAL; in run_aging()
5381 if (!force_scan && min_seq[!can_swap] + MAX_NR_GENS - 1 <= max_seq) in run_aging()
5382 return -ERANGE; in run_aging()
5395 return -EINVAL; in run_eviction()
5397 sc->nr_reclaimed = 0; in run_eviction()
5405 if (sc->nr_reclaimed >= nr_to_reclaim) in run_eviction()
5414 return -EINTR; in run_eviction()
5421 int err = -EINVAL; in run_cmd()
5425 return -EINVAL; in run_cmd()
5437 return -EINVAL; in run_cmd()
5454 case '-': in run_cmd()
5464 /* see Documentation/admin-guide/mm/multigen_lru.rst for details */
5472 int err = -EINVAL; in lru_gen_seq_write()
5477 .reclaim_idx = MAX_NR_ZONES - 1, in lru_gen_seq_write()
5483 return -ENOMEM; in lru_gen_seq_write()
5487 return -EFAULT; in lru_gen_seq_write()
5494 err = -ENOMEM; in lru_gen_seq_write()
5508 unsigned int swappiness = -1; in lru_gen_seq_write()
5509 unsigned long opt = -1; in lru_gen_seq_write()
5518 err = -EINVAL; in lru_gen_seq_write()
5565 spin_lock_init(&pgdat->memcg_lru.lock); in lru_gen_init_pgdat()
5569 INIT_HLIST_NULLS_HEAD(&pgdat->memcg_lru.fifo[i][j], i); in lru_gen_init_pgdat()
5577 struct lru_gen_folio *lrugen = &lruvec->lrugen; in lru_gen_init_lruvec()
5580 lrugen->max_seq = MIN_NR_GENS + 1; in lru_gen_init_lruvec()
5581 lrugen->enabled = lru_gen_enabled(); in lru_gen_init_lruvec()
5584 lrugen->timestamps[i] = jiffies; in lru_gen_init_lruvec()
5587 INIT_LIST_HEAD(&lrugen->folios[gen][type][zone]); in lru_gen_init_lruvec()
5590 mm_state->seq = MIN_NR_GENS; in lru_gen_init_lruvec()
5602 INIT_LIST_HEAD(&mm_list->fifo); in lru_gen_init_memcg()
5603 spin_lock_init(&mm_list->lock); in lru_gen_init_memcg()
5612 VM_WARN_ON_ONCE(mm_list && !list_empty(&mm_list->fifo)); in lru_gen_exit_memcg()
5618 VM_WARN_ON_ONCE(memchr_inv(lruvec->lrugen.nr_pages, 0, in lru_gen_exit_memcg()
5619 sizeof(lruvec->lrugen.nr_pages))); in lru_gen_exit_memcg()
5621 lruvec->lrugen.list.next = LIST_POISON1; in lru_gen_exit_memcg()
5627 bitmap_free(mm_state->filters[i]); in lru_gen_exit_memcg()
5628 mm_state->filters[i] = NULL; in lru_gen_exit_memcg()
5676 unsigned long nr_to_reclaim = sc->nr_to_reclaim; in shrink_lruvec()
5702 sc->priority == DEF_PRIORITY); in shrink_lruvec()
5713 nr[lru] -= nr_to_scan; in shrink_lruvec()
5765 nr_scanned = targets[lru] - nr[lru]; in shrink_lruvec()
5766 nr[lru] = targets[lru] * (100 - percentage) / 100; in shrink_lruvec()
5767 nr[lru] -= min(nr[lru], nr_scanned); in shrink_lruvec()
5770 nr_scanned = targets[lru] - nr[lru]; in shrink_lruvec()
5771 nr[lru] = targets[lru] * (100 - percentage) / 100; in shrink_lruvec()
5772 nr[lru] -= min(nr[lru], nr_scanned); in shrink_lruvec()
5775 sc->nr_reclaimed += nr_reclaimed; in shrink_lruvec()
5790 if (gfp_compaction_allowed(sc->gfp_mask) && sc->order && in in_reclaim_compaction()
5791 (sc->order > PAGE_ALLOC_COSTLY_ORDER || in in_reclaim_compaction()
5792 sc->priority < DEF_PRIORITY - 2)) in in_reclaim_compaction()
5799 * Reclaim/compaction is used for high-order allocation requests. It reclaims
5800 * order-0 pages before compacting the zone. should_continue_reclaim() returns
5811 int z; in should_continue_reclaim() local
5823 * first, by assuming that zero delta of sc->nr_scanned means full LRU in should_continue_reclaim()
5825 * where always a non-zero amount of pages were scanned. in should_continue_reclaim()
5831 for (z = 0; z <= sc->reclaim_idx; z++) { in should_continue_reclaim()
5832 struct zone *zone = &pgdat->node_zones[z]; in should_continue_reclaim()
5837 if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone), in should_continue_reclaim()
5838 sc->reclaim_idx, 0)) in should_continue_reclaim()
5841 if (compaction_suitable(zone, sc->order, sc->reclaim_idx)) in should_continue_reclaim()
5849 pages_for_compaction = compact_gap(sc->order); in should_continue_reclaim()
5851 if (can_reclaim_anon_pages(NULL, pgdat->node_id, sc)) in should_continue_reclaim()
5859 struct mem_cgroup *target_memcg = sc->target_mem_cgroup; in shrink_node_memcgs()
5875 if (current_is_kswapd() || sc->memcg_full_walk) in shrink_node_memcgs()
5885 * This loop can become CPU-bound when target memcgs in shrink_node_memcgs()
5886 * aren't eligible for reclaim - either because they in shrink_node_memcgs()
5897 * If there is no reclaimable memory, OOM. in shrink_node_memcgs()
5907 if (!sc->memcg_low_reclaim) { in shrink_node_memcgs()
5908 sc->memcg_low_skipped = 1; in shrink_node_memcgs()
5914 reclaimed = sc->nr_reclaimed; in shrink_node_memcgs()
5915 scanned = sc->nr_scanned; in shrink_node_memcgs()
5919 shrink_slab(sc->gfp_mask, pgdat->node_id, memcg, in shrink_node_memcgs()
5920 sc->priority); in shrink_node_memcgs()
5923 if (!sc->proactive) in shrink_node_memcgs()
5924 vmpressure(sc->gfp_mask, memcg, false, in shrink_node_memcgs()
5925 sc->nr_scanned - scanned, in shrink_node_memcgs()
5926 sc->nr_reclaimed - reclaimed); in shrink_node_memcgs()
5929 if (partial && sc->nr_reclaimed >= sc->nr_to_reclaim) { in shrink_node_memcgs()
5947 target_lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, pgdat); in shrink_node()
5950 memset(&sc->nr, 0, sizeof(sc->nr)); in shrink_node()
5952 nr_reclaimed = sc->nr_reclaimed; in shrink_node()
5953 nr_scanned = sc->nr_scanned; in shrink_node()
5961 nr_node_reclaimed = sc->nr_reclaimed - nr_reclaimed; in shrink_node()
5964 if (!sc->proactive) in shrink_node()
5965 vmpressure(sc->gfp_mask, sc->target_mem_cgroup, true, in shrink_node()
5966 sc->nr_scanned - nr_scanned, nr_node_reclaimed); in shrink_node()
5974 * it implies that the long-lived page allocation rate in shrink_node()
5980 * context which is not ideal as there is no guarantee in shrink_node()
5989 if (sc->nr.writeback && sc->nr.writeback == sc->nr.taken) in shrink_node()
5990 set_bit(PGDAT_WRITEBACK, &pgdat->flags); in shrink_node()
5993 if (sc->nr.unqueued_dirty == sc->nr.file_taken) in shrink_node()
5994 set_bit(PGDAT_DIRTY, &pgdat->flags); in shrink_node()
6003 if (sc->nr.immediate) in shrink_node()
6014 if (sc->nr.dirty && sc->nr.dirty == sc->nr.congested) { in shrink_node()
6016 set_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags); in shrink_node()
6019 set_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags); in shrink_node()
6029 !sc->hibernation_mode && in shrink_node()
6030 (test_bit(LRUVEC_CGROUP_CONGESTED, &target_lruvec->flags) || in shrink_node()
6031 test_bit(LRUVEC_NODE_CONGESTED, &target_lruvec->flags))) in shrink_node()
6044 pgdat->kswapd_failures = 0; in shrink_node()
6045 else if (sc->cache_trim_mode) in shrink_node()
6046 sc->cache_trim_mode_failed = 1; in shrink_node()
6050 * Returns true if compaction should go ahead for a costly-order request, or
6058 if (!gfp_compaction_allowed(sc->gfp_mask)) in compaction_ready()
6062 if (zone_watermark_ok(zone, sc->order, min_wmark_pages(zone), in compaction_ready()
6063 sc->reclaim_idx, 0)) in compaction_ready()
6067 if (!compaction_suitable(zone, sc->order, sc->reclaim_idx)) in compaction_ready()
6077 * we are already above the high+gap watermark, don't reclaim at all. in compaction_ready()
6079 watermark = high_wmark_pages(zone) + compact_gap(sc->order); in compaction_ready()
6081 return zone_watermark_ok_safe(zone, 0, watermark, sc->reclaim_idx); in compaction_ready()
6090 if (sc->nr_reclaimed > (sc->nr_scanned >> 3)) { in consider_reclaim_throttle()
6093 wqh = &pgdat->reclaim_wait[VMSCAN_THROTTLE_NOPROGRESS]; in consider_reclaim_throttle()
6109 /* Throttle if making no progress at high prioities. */ in consider_reclaim_throttle()
6110 if (sc->priority == 1 && !sc->nr_reclaimed) in consider_reclaim_throttle()
6115 * This is the direct reclaim path, for page-allocating processes. We only
6124 struct zoneref *z; in shrink_zones() local
6137 orig_mask = sc->gfp_mask; in shrink_zones()
6139 sc->gfp_mask |= __GFP_HIGHMEM; in shrink_zones()
6140 sc->reclaim_idx = gfp_zone(sc->gfp_mask); in shrink_zones()
6143 for_each_zone_zonelist_nodemask(zone, z, zonelist, in shrink_zones()
6144 sc->reclaim_idx, sc->nodemask) { in shrink_zones()
6158 * non-zero order, only frequent costly order in shrink_zones()
6164 sc->order > PAGE_ALLOC_COSTLY_ORDER && in shrink_zones()
6166 sc->compaction_ready = true; in shrink_zones()
6176 if (zone->zone_pgdat == last_pgdat) in shrink_zones()
6186 nr_soft_reclaimed = memcg1_soft_limit_reclaim(zone->zone_pgdat, in shrink_zones()
6187 sc->order, sc->gfp_mask, in shrink_zones()
6189 sc->nr_reclaimed += nr_soft_reclaimed; in shrink_zones()
6190 sc->nr_scanned += nr_soft_scanned; in shrink_zones()
6195 first_pgdat = zone->zone_pgdat; in shrink_zones()
6198 if (zone->zone_pgdat == last_pgdat) in shrink_zones()
6200 last_pgdat = zone->zone_pgdat; in shrink_zones()
6201 shrink_node(zone->zone_pgdat, sc); in shrink_zones()
6211 sc->gfp_mask = orig_mask; in shrink_zones()
6224 target_lruvec->refaults[WORKINGSET_ANON] = refaults; in snapshot_refaults()
6226 target_lruvec->refaults[WORKINGSET_FILE] = refaults; in snapshot_refaults()
6236 * high - the zone may be full of dirty or under-writeback pages, which this
6242 * returns: 0, if no pages reclaimed
6248 int initial_priority = sc->priority; in do_try_to_free_pages()
6250 struct zoneref *z; in do_try_to_free_pages() local
6256 __count_zid_vm_events(ALLOCSTALL, sc->reclaim_idx, 1); in do_try_to_free_pages()
6259 if (!sc->proactive) in do_try_to_free_pages()
6260 vmpressure_prio(sc->gfp_mask, sc->target_mem_cgroup, in do_try_to_free_pages()
6261 sc->priority); in do_try_to_free_pages()
6262 sc->nr_scanned = 0; in do_try_to_free_pages()
6265 if (sc->nr_reclaimed >= sc->nr_to_reclaim) in do_try_to_free_pages()
6268 if (sc->compaction_ready) in do_try_to_free_pages()
6275 if (sc->priority < DEF_PRIORITY - 2) in do_try_to_free_pages()
6276 sc->may_writepage = 1; in do_try_to_free_pages()
6277 } while (--sc->priority >= 0); in do_try_to_free_pages()
6280 for_each_zone_zonelist_nodemask(zone, z, zonelist, sc->reclaim_idx, in do_try_to_free_pages()
6281 sc->nodemask) { in do_try_to_free_pages()
6282 if (zone->zone_pgdat == last_pgdat) in do_try_to_free_pages()
6284 last_pgdat = zone->zone_pgdat; in do_try_to_free_pages()
6286 snapshot_refaults(sc->target_mem_cgroup, zone->zone_pgdat); in do_try_to_free_pages()
6291 lruvec = mem_cgroup_lruvec(sc->target_mem_cgroup, in do_try_to_free_pages()
6292 zone->zone_pgdat); in do_try_to_free_pages()
6293 clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags); in do_try_to_free_pages()
6299 if (sc->nr_reclaimed) in do_try_to_free_pages()
6300 return sc->nr_reclaimed; in do_try_to_free_pages()
6303 if (sc->compaction_ready) in do_try_to_free_pages()
6311 * among cgroups), though, high concurrency can result in in do_try_to_free_pages()
6315 if (!sc->memcg_full_walk) { in do_try_to_free_pages()
6316 sc->priority = initial_priority; in do_try_to_free_pages()
6317 sc->memcg_full_walk = 1; in do_try_to_free_pages()
6330 if (sc->skipped_deactivate) { in do_try_to_free_pages()
6331 sc->priority = initial_priority; in do_try_to_free_pages()
6332 sc->force_deactivate = 1; in do_try_to_free_pages()
6333 sc->skipped_deactivate = 0; in do_try_to_free_pages()
6338 if (sc->memcg_low_skipped) { in do_try_to_free_pages()
6339 sc->priority = initial_priority; in do_try_to_free_pages()
6340 sc->force_deactivate = 0; in do_try_to_free_pages()
6341 sc->memcg_low_reclaim = 1; in do_try_to_free_pages()
6342 sc->memcg_low_skipped = 0; in do_try_to_free_pages()
6357 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) in allow_direct_reclaim()
6361 zone = &pgdat->node_zones[i]; in allow_direct_reclaim()
6372 /* If there are no reserves (unexpected config) then do not throttle */ in allow_direct_reclaim()
6379 if (!wmark_ok && waitqueue_active(&pgdat->kswapd_wait)) { in allow_direct_reclaim()
6380 if (READ_ONCE(pgdat->kswapd_highest_zoneidx) > ZONE_NORMAL) in allow_direct_reclaim()
6381 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, ZONE_NORMAL); in allow_direct_reclaim()
6383 wake_up_interruptible(&pgdat->kswapd_wait); in allow_direct_reclaim()
6401 struct zoneref *z; in throttle_direct_reclaim() local
6412 if (current->flags & PF_KTHREAD) in throttle_direct_reclaim()
6436 for_each_zone_zonelist_nodemask(zone, z, zonelist, in throttle_direct_reclaim()
6442 pgdat = zone->zone_pgdat; in throttle_direct_reclaim()
6448 /* If no zone was usable by the allocation flags then do not throttle */ in throttle_direct_reclaim()
6464 wait_event_interruptible_timeout(pgdat->pfmemalloc_wait, in throttle_direct_reclaim()
6468 wait_event_killable(zone->zone_pgdat->pfmemalloc_wait, in throttle_direct_reclaim()
6535 .reclaim_idx = MAX_NR_ZONES - 1, in mem_cgroup_shrink_node()
6539 WARN_ON_ONCE(!current->reclaim_state); in mem_cgroup_shrink_node()
6576 .reclaim_idx = MAX_NR_ZONES - 1, in try_to_free_mem_cgroup_pages()
6637 * Check for watermark boosts top-down as the higher zones in pgdat_watermark_boosted()
6640 * start prematurely when there is no boosting and a lower in pgdat_watermark_boosted()
6643 for (i = highest_zoneidx; i >= 0; i--) { in pgdat_watermark_boosted()
6644 zone = pgdat->node_zones + i; in pgdat_watermark_boosted()
6648 if (zone->watermark_boost) in pgdat_watermark_boosted()
6662 unsigned long mark = -1; in pgdat_balanced()
6666 * Check watermarks bottom-up as lower zones are more likely to in pgdat_balanced()
6670 zone = pgdat->node_zones + i; in pgdat_balanced()
6684 * If a node has no managed zone within highest_zoneidx, it does not in pgdat_balanced()
6685 * need balancing by definition. This can happen if a zone-restricted in pgdat_balanced()
6688 if (mark == -1) in pgdat_balanced()
6699 clear_bit(LRUVEC_NODE_CONGESTED, &lruvec->flags); in clear_pgdat_congested()
6700 clear_bit(LRUVEC_CGROUP_CONGESTED, &lruvec->flags); in clear_pgdat_congested()
6701 clear_bit(PGDAT_DIRTY, &pgdat->flags); in clear_pgdat_congested()
6702 clear_bit(PGDAT_WRITEBACK, &pgdat->flags); in clear_pgdat_congested()
6706 * Prepare kswapd for sleeping. This verifies that there are no processes
6721 * the wake up checks. If kswapd is going to sleep, no process should in prepare_kswapd_sleep()
6727 if (waitqueue_active(&pgdat->pfmemalloc_wait)) in prepare_kswapd_sleep()
6728 wake_up_all(&pgdat->pfmemalloc_wait); in prepare_kswapd_sleep()
6731 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES) in prepare_kswapd_sleep()
6754 int z; in kswapd_shrink_node() local
6755 unsigned long nr_reclaimed = sc->nr_reclaimed; in kswapd_shrink_node()
6758 sc->nr_to_reclaim = 0; in kswapd_shrink_node()
6759 for (z = 0; z <= sc->reclaim_idx; z++) { in kswapd_shrink_node()
6760 zone = pgdat->node_zones + z; in kswapd_shrink_node()
6764 sc->nr_to_reclaim += max(high_wmark_pages(zone), SWAP_CLUSTER_MAX); in kswapd_shrink_node()
6775 * high-order allocations. If twice the allocation size has been in kswapd_shrink_node()
6776 * reclaimed then recheck watermarks only at order-0 to prevent in kswapd_shrink_node()
6777 * excessive reclaim. Assume that a process requested a high-order in kswapd_shrink_node()
6780 if (sc->order && sc->nr_reclaimed >= compact_gap(sc->order)) in kswapd_shrink_node()
6781 sc->order = 0; in kswapd_shrink_node()
6784 return max(sc->nr_scanned, sc->nr_reclaimed - nr_reclaimed) >= sc->nr_to_reclaim; in kswapd_shrink_node()
6787 /* Page allocator PCP high watermark is lowered if reclaim is active. */
6795 zone = pgdat->node_zones + i; in update_reclaim_active()
6801 set_bit(ZONE_RECLAIM_ACTIVE, &zone->flags); in update_reclaim_active()
6803 clear_bit(ZONE_RECLAIM_ACTIVE, &zone->flags); in update_reclaim_active()
6826 * kswapd scans the zones in the highmem->normal->dma direction. It skips
6861 zone = pgdat->node_zones + i; in balance_pgdat()
6865 nr_boost_reclaim += zone->watermark_boost; in balance_pgdat()
6866 zone_boosts[i] = zone->watermark_boost; in balance_pgdat()
6885 * purpose -- on 64-bit systems it is expected that in balance_pgdat()
6886 * buffer_heads are stripped during active rotation. On 32-bit in balance_pgdat()
6893 for (i = MAX_NR_ZONES - 1; i >= 0; i--) { in balance_pgdat()
6894 zone = pgdat->node_zones + i; in balance_pgdat()
6908 * re-evaluate if boosting is required when kswapd next wakes. in balance_pgdat()
6917 * If boosting is not active then only reclaim if there are no in balance_pgdat()
6925 if (nr_boost_reclaim && sc.priority == DEF_PRIORITY - 2) in balance_pgdat()
6930 * intent is to relieve pressure not issue sub-optimal IO in balance_pgdat()
6931 * from reclaim context. If no pages are reclaimed, the in balance_pgdat()
6948 if (sc.priority < DEF_PRIORITY - 2) in balance_pgdat()
6959 * There should be no need to raise the scanning priority if in balance_pgdat()
6960 * enough pages are already being scanned that that high in balance_pgdat()
6967 * If the low watermark is met there is no need for processes in balance_pgdat()
6971 if (waitqueue_active(&pgdat->pfmemalloc_wait) && in balance_pgdat()
6973 wake_up_all(&pgdat->pfmemalloc_wait); in balance_pgdat()
6983 * Raise priority if scanning rate is too low or there was no in balance_pgdat()
6986 nr_reclaimed = sc.nr_reclaimed - nr_reclaimed; in balance_pgdat()
6987 nr_boost_reclaim -= min(nr_boost_reclaim, nr_reclaimed); in balance_pgdat()
6990 * If reclaim made no progress for a boost, stop reclaim as in balance_pgdat()
6998 sc.priority--; in balance_pgdat()
7012 pgdat->kswapd_failures++; in balance_pgdat()
7026 zone = pgdat->node_zones + i; in balance_pgdat()
7027 spin_lock_irqsave(&zone->lock, flags); in balance_pgdat()
7028 zone->watermark_boost -= min(zone->watermark_boost, zone_boosts[i]); in balance_pgdat()
7029 spin_unlock_irqrestore(&zone->lock, flags); in balance_pgdat()
7054 * The pgdat->kswapd_highest_zoneidx is used to pass the highest zone index to
7063 enum zone_type curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); in kswapd_highest_zoneidx()
7077 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); in kswapd_try_to_sleep()
7109 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, in kswapd_try_to_sleep()
7113 if (READ_ONCE(pgdat->kswapd_order) < reclaim_order) in kswapd_try_to_sleep()
7114 WRITE_ONCE(pgdat->kswapd_order, reclaim_order); in kswapd_try_to_sleep()
7117 finish_wait(&pgdat->kswapd_wait, &wait); in kswapd_try_to_sleep()
7118 prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE); in kswapd_try_to_sleep()
7127 trace_mm_vmscan_kswapd_sleep(pgdat->node_id); in kswapd_try_to_sleep()
7134 * per-cpu vmstat threshold while kswapd is awake and restore in kswapd_try_to_sleep()
7149 finish_wait(&pgdat->kswapd_wait, &wait); in kswapd_try_to_sleep()
7157 * free memory available even if there is no other activity
7162 * If there are applications that are active memory-allocators
7168 unsigned int highest_zoneidx = MAX_NR_ZONES - 1; in kswapd()
7171 const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id); in kswapd()
7188 tsk->flags |= PF_MEMALLOC | PF_KSWAPD; in kswapd()
7191 WRITE_ONCE(pgdat->kswapd_order, 0); in kswapd()
7192 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); in kswapd()
7193 atomic_set(&pgdat->nr_writeback_throttled, 0); in kswapd()
7197 alloc_order = reclaim_order = READ_ONCE(pgdat->kswapd_order); in kswapd()
7206 alloc_order = READ_ONCE(pgdat->kswapd_order); in kswapd()
7209 WRITE_ONCE(pgdat->kswapd_order, 0); in kswapd()
7210 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, MAX_NR_ZONES); in kswapd()
7223 * Reclaim begins at the requested order but if a high-order in kswapd()
7225 * order-0. If that happens, kswapd will consider sleeping in kswapd()
7230 trace_mm_vmscan_kswapd_wake(pgdat->node_id, highest_zoneidx, in kswapd()
7238 tsk->flags &= ~(PF_MEMALLOC | PF_KSWAPD); in kswapd()
7244 * A zone is low on free memory or too fragmented for high-order memory. If
7262 pgdat = zone->zone_pgdat; in wakeup_kswapd()
7263 curr_idx = READ_ONCE(pgdat->kswapd_highest_zoneidx); in wakeup_kswapd()
7266 WRITE_ONCE(pgdat->kswapd_highest_zoneidx, highest_zoneidx); in wakeup_kswapd()
7268 if (READ_ONCE(pgdat->kswapd_order) < order) in wakeup_kswapd()
7269 WRITE_ONCE(pgdat->kswapd_order, order); in wakeup_kswapd()
7271 if (!waitqueue_active(&pgdat->kswapd_wait)) in wakeup_kswapd()
7275 if (pgdat->kswapd_failures >= MAX_RECLAIM_RETRIES || in wakeup_kswapd()
7280 * fragmented for high-order allocations. Wake up kcompactd in wakeup_kswapd()
7290 trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, highest_zoneidx, order, in wakeup_kswapd()
7292 wake_up_interruptible(&pgdat->kswapd_wait); in wakeup_kswapd()
7297 * Try to free `nr_to_reclaim' of memory, system-wide, and return the number of
7309 .reclaim_idx = MAX_NR_ZONES - 1, in shrink_all_memory()
7335 * This kswapd start function will be called by init and node-hot-add.
7342 if (!pgdat->kswapd) { in kswapd_run()
7343 pgdat->kswapd = kthread_run(kswapd, pgdat, "kswapd%d", nid); in kswapd_run()
7344 if (IS_ERR(pgdat->kswapd)) { in kswapd_run()
7347 nid, PTR_ERR(pgdat->kswapd)); in kswapd_run()
7349 pgdat->kswapd = NULL; in kswapd_run()
7365 kswapd = pgdat->kswapd; in kswapd_stop()
7368 pgdat->kswapd = NULL; in kswapd_stop()
7389 * If non-zero call node_reclaim when the number of free pages falls below
7424 return (file_lru > file_mapped) ? (file_lru - file_mapped) : 0; in node_unmapped_file_pages()
7452 return nr_pagecache_reclaimable - delta; in node_pagecache_reclaimable()
7476 trace_mm_vmscan_node_reclaim_begin(pgdat->node_id, order, in __node_reclaim()
7489 if (node_pagecache_reclaimable(pgdat) > pgdat->min_unmapped_pages || in __node_reclaim()
7490 node_page_state_pages(pgdat, NR_SLAB_RECLAIMABLE_B) > pgdat->min_slab_pages) { in __node_reclaim()
7497 } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0); in __node_reclaim()
7525 if (node_pagecache_reclaimable(pgdat) <= pgdat->min_unmapped_pages && in node_reclaim()
7527 pgdat->min_slab_pages) in node_reclaim()
7533 if (!gfpflags_allow_blocking(gfp_mask) || (current->flags & PF_MEMALLOC)) in node_reclaim()
7542 if (node_state(pgdat->node_id, N_CPU) && pgdat->node_id != numa_node_id()) in node_reclaim()
7545 if (test_and_set_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags)) in node_reclaim()
7549 clear_bit(PGDAT_RECLAIM_LOCKED, &pgdat->flags); in node_reclaim()
7561 * check_move_unevictable_folios - Move evictable folios to appropriate zone
7576 for (i = 0; i < fbatch->nr; i++) { in check_move_unevictable_folios()
7577 struct folio *folio = fbatch->folios[i]; in check_move_unevictable_folios()