1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * linux/mm/swap.c 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 81da177e4SLinus Torvalds /* 9183ff22bSSimon Arlott * This file contains the default values for the operation of the 101da177e4SLinus Torvalds * Linux VM subsystem. Fine-tuning documentation can be found in 1157043247SMauro Carvalho Chehab * Documentation/admin-guide/sysctl/vm.rst. 121da177e4SLinus Torvalds * Started 18.12.91 131da177e4SLinus Torvalds * Swap aging added 23.2.95, Stephen Tweedie. 141da177e4SLinus Torvalds * Buffermem limits added 12.3.98, Rik van Riel. 151da177e4SLinus Torvalds */ 161da177e4SLinus Torvalds 171da177e4SLinus Torvalds #include <linux/mm.h> 181da177e4SLinus Torvalds #include <linux/sched.h> 191da177e4SLinus Torvalds #include <linux/kernel_stat.h> 201da177e4SLinus Torvalds #include <linux/swap.h> 211da177e4SLinus Torvalds #include <linux/mman.h> 221da177e4SLinus Torvalds #include <linux/pagemap.h> 231da177e4SLinus Torvalds #include <linux/pagevec.h> 241da177e4SLinus Torvalds #include <linux/init.h> 25b95f1b31SPaul Gortmaker #include <linux/export.h> 261da177e4SLinus Torvalds #include <linux/mm_inline.h> 271da177e4SLinus Torvalds #include <linux/percpu_counter.h> 283565fce3SDan Williams #include <linux/memremap.h> 291da177e4SLinus Torvalds #include <linux/percpu.h> 301da177e4SLinus Torvalds #include <linux/cpu.h> 311da177e4SLinus Torvalds #include <linux/notifier.h> 32e0bf68ddSPeter Zijlstra #include <linux/backing-dev.h> 3366e1707bSBalbir Singh #include <linux/memcontrol.h> 345a0e3ad6STejun Heo #include <linux/gfp.h> 35a27bb332SKent Overstreet #include <linux/uio.h> 36822fc613SNaoya Horiguchi #include <linux/hugetlb.h> 3733c3fc71SVladimir Davydov #include <linux/page_idle.h> 38b01b2141SIngo Molnar #include <linux/local_lock.h> 398cc621d2SMinchan Kim #include <linux/buffer_head.h> 401da177e4SLinus Torvalds 4164d6519dSLee Schermerhorn #include "internal.h" 4264d6519dSLee Schermerhorn 43c6286c98SMel Gorman #define CREATE_TRACE_POINTS 44c6286c98SMel Gorman #include <trace/events/pagemap.h> 45c6286c98SMel Gorman 46ea0ffd0cSKairui Song /* How many pages do we try to swap or page in/out together? As a power of 2 */ 471da177e4SLinus Torvalds int page_cluster; 48ea0ffd0cSKairui Song const int page_cluster_max = 31; 491da177e4SLinus Torvalds 50c2bc1681SMatthew Wilcox (Oracle) /* Protecting only lru_rotate.fbatch which requires disabling interrupts */ 51b01b2141SIngo Molnar struct lru_rotate { 52b01b2141SIngo Molnar local_lock_t lock; 53c2bc1681SMatthew Wilcox (Oracle) struct folio_batch fbatch; 54b01b2141SIngo Molnar }; 55b01b2141SIngo Molnar static DEFINE_PER_CPU(struct lru_rotate, lru_rotate) = { 56b01b2141SIngo Molnar .lock = INIT_LOCAL_LOCK(lock), 57b01b2141SIngo Molnar }; 58b01b2141SIngo Molnar 59b01b2141SIngo Molnar /* 6082ac64d8SMatthew Wilcox (Oracle) * The following folio batches are grouped together because they are protected 61b01b2141SIngo Molnar * by disabling preemption (and interrupts remain enabled). 62b01b2141SIngo Molnar */ 6382ac64d8SMatthew Wilcox (Oracle) struct cpu_fbatches { 64b01b2141SIngo Molnar local_lock_t lock; 6570dea534SMatthew Wilcox (Oracle) struct folio_batch lru_add; 667a3dbfe8SMatthew Wilcox (Oracle) struct folio_batch lru_deactivate_file; 6785cd7791SMatthew Wilcox (Oracle) struct folio_batch lru_deactivate; 68cec394baSMatthew Wilcox (Oracle) struct folio_batch lru_lazyfree; 69a4a921aaSMing Li #ifdef CONFIG_SMP 703a44610bSMatthew Wilcox (Oracle) struct folio_batch activate; 71a4a921aaSMing Li #endif 72b01b2141SIngo Molnar }; 7382ac64d8SMatthew Wilcox (Oracle) static DEFINE_PER_CPU(struct cpu_fbatches, cpu_fbatches) = { 74b01b2141SIngo Molnar .lock = INIT_LOCAL_LOCK(lock), 75b01b2141SIngo Molnar }; 76902aaed0SHisashi Hifumi 77b221385bSAdrian Bunk /* 78b109b870SHugh Dickins * This path almost never happens for VM activity - pages are normally freed 79b109b870SHugh Dickins * via pagevecs. But it gets used by networking - and for compound pages. 80b221385bSAdrian Bunk */ 81188e8caeSMatthew Wilcox (Oracle) static void __page_cache_release(struct folio *folio) 82b221385bSAdrian Bunk { 83188e8caeSMatthew Wilcox (Oracle) if (folio_test_lru(folio)) { 84fa9add64SHugh Dickins struct lruvec *lruvec; 85fa9add64SHugh Dickins unsigned long flags; 86b221385bSAdrian Bunk 87e809c3feSMatthew Wilcox (Oracle) lruvec = folio_lruvec_lock_irqsave(folio, &flags); 88188e8caeSMatthew Wilcox (Oracle) lruvec_del_folio(lruvec, folio); 89188e8caeSMatthew Wilcox (Oracle) __folio_clear_lru_flags(folio); 906168d0daSAlex Shi unlock_page_lruvec_irqrestore(lruvec, flags); 91b221385bSAdrian Bunk } 92188e8caeSMatthew Wilcox (Oracle) /* See comment on folio_test_mlocked in release_pages() */ 93188e8caeSMatthew Wilcox (Oracle) if (unlikely(folio_test_mlocked(folio))) { 94188e8caeSMatthew Wilcox (Oracle) long nr_pages = folio_nr_pages(folio); 95b109b870SHugh Dickins 96188e8caeSMatthew Wilcox (Oracle) __folio_clear_mlocked(folio); 97188e8caeSMatthew Wilcox (Oracle) zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages); 98b109b870SHugh Dickins count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages); 99b109b870SHugh Dickins } 10091807063SAndrea Arcangeli } 10191807063SAndrea Arcangeli 10283d99659SMatthew Wilcox (Oracle) static void __folio_put_small(struct folio *folio) 10391807063SAndrea Arcangeli { 104188e8caeSMatthew Wilcox (Oracle) __page_cache_release(folio); 10583d99659SMatthew Wilcox (Oracle) mem_cgroup_uncharge(folio); 10683d99659SMatthew Wilcox (Oracle) free_unref_page(&folio->page, 0); 107b221385bSAdrian Bunk } 108b221385bSAdrian Bunk 1095ef82fe7SMatthew Wilcox (Oracle) static void __folio_put_large(struct folio *folio) 11091807063SAndrea Arcangeli { 111822fc613SNaoya Horiguchi /* 112822fc613SNaoya Horiguchi * __page_cache_release() is supposed to be called for thp, not for 113822fc613SNaoya Horiguchi * hugetlb. This is because hugetlb page does never have PageLRU set 114822fc613SNaoya Horiguchi * (it's never listed to any LRU lists) and no memcg routines should 115822fc613SNaoya Horiguchi * be called for hugetlb (it has a separate hugetlb_cgroup.) 116822fc613SNaoya Horiguchi */ 1175ef82fe7SMatthew Wilcox (Oracle) if (!folio_test_hugetlb(folio)) 118188e8caeSMatthew Wilcox (Oracle) __page_cache_release(folio); 1195375336cSMatthew Wilcox (Oracle) destroy_large_folio(folio); 12091807063SAndrea Arcangeli } 12191807063SAndrea Arcangeli 1228d29c703SMatthew Wilcox (Oracle) void __folio_put(struct folio *folio) 123c747ce79SJianyu Zhan { 1248d29c703SMatthew Wilcox (Oracle) if (unlikely(folio_is_zone_device(folio))) 1258d29c703SMatthew Wilcox (Oracle) free_zone_device_page(&folio->page); 1268d29c703SMatthew Wilcox (Oracle) else if (unlikely(folio_test_large(folio))) 1275ef82fe7SMatthew Wilcox (Oracle) __folio_put_large(folio); 12826296ad2SAndrew Morton else 12983d99659SMatthew Wilcox (Oracle) __folio_put_small(folio); 13026296ad2SAndrew Morton } 1318d29c703SMatthew Wilcox (Oracle) EXPORT_SYMBOL(__folio_put); 13270b50f94SAndrea Arcangeli 1331d7ea732SAlexander Zarochentsev /** 1347682486bSRandy Dunlap * put_pages_list() - release a list of pages 1357682486bSRandy Dunlap * @pages: list of pages threaded on page->lru 1361d7ea732SAlexander Zarochentsev * 137988c69f1SMatthew Wilcox (Oracle) * Release a list of pages which are strung together on page.lru. 1381d7ea732SAlexander Zarochentsev */ 1391d7ea732SAlexander Zarochentsev void put_pages_list(struct list_head *pages) 1401d7ea732SAlexander Zarochentsev { 1412f58e5deSMatthew Wilcox (Oracle) struct folio *folio, *next; 1421d7ea732SAlexander Zarochentsev 1432f58e5deSMatthew Wilcox (Oracle) list_for_each_entry_safe(folio, next, pages, lru) { 1442f58e5deSMatthew Wilcox (Oracle) if (!folio_put_testzero(folio)) { 1452f58e5deSMatthew Wilcox (Oracle) list_del(&folio->lru); 146988c69f1SMatthew Wilcox (Oracle) continue; 1471d7ea732SAlexander Zarochentsev } 1482f58e5deSMatthew Wilcox (Oracle) if (folio_test_large(folio)) { 1492f58e5deSMatthew Wilcox (Oracle) list_del(&folio->lru); 1505ef82fe7SMatthew Wilcox (Oracle) __folio_put_large(folio); 151988c69f1SMatthew Wilcox (Oracle) continue; 152988c69f1SMatthew Wilcox (Oracle) } 1532f58e5deSMatthew Wilcox (Oracle) /* LRU flag must be clear because it's passed using the lru */ 154988c69f1SMatthew Wilcox (Oracle) } 155988c69f1SMatthew Wilcox (Oracle) 156988c69f1SMatthew Wilcox (Oracle) free_unref_page_list(pages); 1573cd018b4SMatthew Wilcox INIT_LIST_HEAD(pages); 1581d7ea732SAlexander Zarochentsev } 1591d7ea732SAlexander Zarochentsev EXPORT_SYMBOL(put_pages_list); 1601d7ea732SAlexander Zarochentsev 161c2bc1681SMatthew Wilcox (Oracle) typedef void (*move_fn_t)(struct lruvec *lruvec, struct folio *folio); 162c2bc1681SMatthew Wilcox (Oracle) 16370dea534SMatthew Wilcox (Oracle) static void lru_add_fn(struct lruvec *lruvec, struct folio *folio) 1647d80dd09SMatthew Wilcox (Oracle) { 1657d80dd09SMatthew Wilcox (Oracle) int was_unevictable = folio_test_clear_unevictable(folio); 1667d80dd09SMatthew Wilcox (Oracle) long nr_pages = folio_nr_pages(folio); 1677d80dd09SMatthew Wilcox (Oracle) 1687d80dd09SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); 1697d80dd09SMatthew Wilcox (Oracle) 1707d80dd09SMatthew Wilcox (Oracle) /* 1717d80dd09SMatthew Wilcox (Oracle) * Is an smp_mb__after_atomic() still required here, before 172188e8caeSMatthew Wilcox (Oracle) * folio_evictable() tests the mlocked flag, to rule out the possibility 1737d80dd09SMatthew Wilcox (Oracle) * of stranding an evictable folio on an unevictable LRU? I think 174e0650a41SMatthew Wilcox (Oracle) * not, because __munlock_folio() only clears the mlocked flag 175188e8caeSMatthew Wilcox (Oracle) * while the LRU lock is held. 1767d80dd09SMatthew Wilcox (Oracle) * 1777d80dd09SMatthew Wilcox (Oracle) * (That is not true of __page_cache_release(), and not necessarily 178188e8caeSMatthew Wilcox (Oracle) * true of release_pages(): but those only clear the mlocked flag after 179188e8caeSMatthew Wilcox (Oracle) * folio_put_testzero() has excluded any other users of the folio.) 1807d80dd09SMatthew Wilcox (Oracle) */ 1817d80dd09SMatthew Wilcox (Oracle) if (folio_evictable(folio)) { 1827d80dd09SMatthew Wilcox (Oracle) if (was_unevictable) 1837d80dd09SMatthew Wilcox (Oracle) __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages); 1847d80dd09SMatthew Wilcox (Oracle) } else { 1857d80dd09SMatthew Wilcox (Oracle) folio_clear_active(folio); 1867d80dd09SMatthew Wilcox (Oracle) folio_set_unevictable(folio); 1877d80dd09SMatthew Wilcox (Oracle) /* 1887d80dd09SMatthew Wilcox (Oracle) * folio->mlock_count = !!folio_test_mlocked(folio)? 189e0650a41SMatthew Wilcox (Oracle) * But that leaves __mlock_folio() in doubt whether another 1907d80dd09SMatthew Wilcox (Oracle) * actor has already counted the mlock or not. Err on the 1917d80dd09SMatthew Wilcox (Oracle) * safe side, underestimate, let page reclaim fix it, rather 1927d80dd09SMatthew Wilcox (Oracle) * than leaving a page on the unevictable LRU indefinitely. 1937d80dd09SMatthew Wilcox (Oracle) */ 1947d80dd09SMatthew Wilcox (Oracle) folio->mlock_count = 0; 1957d80dd09SMatthew Wilcox (Oracle) if (!was_unevictable) 1967d80dd09SMatthew Wilcox (Oracle) __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages); 1977d80dd09SMatthew Wilcox (Oracle) } 1987d80dd09SMatthew Wilcox (Oracle) 1997d80dd09SMatthew Wilcox (Oracle) lruvec_add_folio(lruvec, folio); 2007d80dd09SMatthew Wilcox (Oracle) trace_mm_lru_insertion(folio); 2017d80dd09SMatthew Wilcox (Oracle) } 2027d80dd09SMatthew Wilcox (Oracle) 203c2bc1681SMatthew Wilcox (Oracle) static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn) 204902aaed0SHisashi Hifumi { 205902aaed0SHisashi Hifumi int i; 2066168d0daSAlex Shi struct lruvec *lruvec = NULL; 2073dd7ae8eSShaohua Li unsigned long flags = 0; 208902aaed0SHisashi Hifumi 209c2bc1681SMatthew Wilcox (Oracle) for (i = 0; i < folio_batch_count(fbatch); i++) { 210c2bc1681SMatthew Wilcox (Oracle) struct folio *folio = fbatch->folios[i]; 2113dd7ae8eSShaohua Li 212c2bc1681SMatthew Wilcox (Oracle) /* block memcg migration while the folio moves between lru */ 21370dea534SMatthew Wilcox (Oracle) if (move_fn != lru_add_fn && !folio_test_clear_lru(folio)) 214fc574c23SAlex Shi continue; 215fc574c23SAlex Shi 2160de340cbSMatthew Wilcox (Oracle) lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags); 217c2bc1681SMatthew Wilcox (Oracle) move_fn(lruvec, folio); 218fc574c23SAlex Shi 219c2bc1681SMatthew Wilcox (Oracle) folio_set_lru(folio); 2203dd7ae8eSShaohua Li } 221c2bc1681SMatthew Wilcox (Oracle) 2226168d0daSAlex Shi if (lruvec) 2236168d0daSAlex Shi unlock_page_lruvec_irqrestore(lruvec, flags); 224c2bc1681SMatthew Wilcox (Oracle) folios_put(fbatch->folios, folio_batch_count(fbatch)); 225998ad18bSQi Zheng folio_batch_reinit(fbatch); 2263dd7ae8eSShaohua Li } 2273dd7ae8eSShaohua Li 228c2bc1681SMatthew Wilcox (Oracle) static void folio_batch_add_and_move(struct folio_batch *fbatch, 229c2bc1681SMatthew Wilcox (Oracle) struct folio *folio, move_fn_t move_fn) 2303dd7ae8eSShaohua Li { 231c2bc1681SMatthew Wilcox (Oracle) if (folio_batch_add(fbatch, folio) && !folio_test_large(folio) && 232c2bc1681SMatthew Wilcox (Oracle) !lru_cache_disabled()) 233c2bc1681SMatthew Wilcox (Oracle) return; 234c2bc1681SMatthew Wilcox (Oracle) folio_batch_move_lru(fbatch, move_fn); 235c2bc1681SMatthew Wilcox (Oracle) } 236575ced1cSMatthew Wilcox (Oracle) 237c2bc1681SMatthew Wilcox (Oracle) static void lru_move_tail_fn(struct lruvec *lruvec, struct folio *folio) 238c2bc1681SMatthew Wilcox (Oracle) { 239575ced1cSMatthew Wilcox (Oracle) if (!folio_test_unevictable(folio)) { 240575ced1cSMatthew Wilcox (Oracle) lruvec_del_folio(lruvec, folio); 241575ced1cSMatthew Wilcox (Oracle) folio_clear_active(folio); 242575ced1cSMatthew Wilcox (Oracle) lruvec_add_folio_tail(lruvec, folio); 243575ced1cSMatthew Wilcox (Oracle) __count_vm_events(PGROTATED, folio_nr_pages(folio)); 244902aaed0SHisashi Hifumi } 245902aaed0SHisashi Hifumi } 2463dd7ae8eSShaohua Li 2473dd7ae8eSShaohua Li /* 248575ced1cSMatthew Wilcox (Oracle) * Writeback is about to end against a folio which has been marked for 249575ced1cSMatthew Wilcox (Oracle) * immediate reclaim. If it still appears to be reclaimable, move it 250575ced1cSMatthew Wilcox (Oracle) * to the tail of the inactive list. 251c7c7b80cSAlex Shi * 252575ced1cSMatthew Wilcox (Oracle) * folio_rotate_reclaimable() must disable IRQs, to prevent nasty races. 2531da177e4SLinus Torvalds */ 254575ced1cSMatthew Wilcox (Oracle) void folio_rotate_reclaimable(struct folio *folio) 2551da177e4SLinus Torvalds { 256575ced1cSMatthew Wilcox (Oracle) if (!folio_test_locked(folio) && !folio_test_dirty(folio) && 257575ced1cSMatthew Wilcox (Oracle) !folio_test_unevictable(folio) && folio_test_lru(folio)) { 258c2bc1681SMatthew Wilcox (Oracle) struct folio_batch *fbatch; 2591da177e4SLinus Torvalds unsigned long flags; 2601da177e4SLinus Torvalds 261575ced1cSMatthew Wilcox (Oracle) folio_get(folio); 262b01b2141SIngo Molnar local_lock_irqsave(&lru_rotate.lock, flags); 263c2bc1681SMatthew Wilcox (Oracle) fbatch = this_cpu_ptr(&lru_rotate.fbatch); 264c2bc1681SMatthew Wilcox (Oracle) folio_batch_add_and_move(fbatch, folio, lru_move_tail_fn); 265b01b2141SIngo Molnar local_unlock_irqrestore(&lru_rotate.lock, flags); 266ac6aadb2SMiklos Szeredi } 2671da177e4SLinus Torvalds } 2681da177e4SLinus Torvalds 2690538a82cSJohannes Weiner void lru_note_cost(struct lruvec *lruvec, bool file, 2700538a82cSJohannes Weiner unsigned int nr_io, unsigned int nr_rotated) 2713e2f41f1SKOSAKI Motohiro { 2720538a82cSJohannes Weiner unsigned long cost; 2730538a82cSJohannes Weiner 2740538a82cSJohannes Weiner /* 2750538a82cSJohannes Weiner * Reflect the relative cost of incurring IO and spending CPU 2760538a82cSJohannes Weiner * time on rotations. This doesn't attempt to make a precise 2770538a82cSJohannes Weiner * comparison, it just says: if reloads are about comparable 2780538a82cSJohannes Weiner * between the LRU lists, or rotations are overwhelmingly 2790538a82cSJohannes Weiner * different between them, adjust scan balance for CPU work. 2800538a82cSJohannes Weiner */ 2810538a82cSJohannes Weiner cost = nr_io * SWAP_CLUSTER_MAX + nr_rotated; 2820538a82cSJohannes Weiner 2837cf111bcSJohannes Weiner do { 2847cf111bcSJohannes Weiner unsigned long lrusize; 2857cf111bcSJohannes Weiner 2866168d0daSAlex Shi /* 2876168d0daSAlex Shi * Hold lruvec->lru_lock is safe here, since 2886168d0daSAlex Shi * 1) The pinned lruvec in reclaim, or 2896168d0daSAlex Shi * 2) From a pre-LRU page during refault (which also holds the 2906168d0daSAlex Shi * rcu lock, so would be safe even if the page was on the LRU 2916168d0daSAlex Shi * and could move simultaneously to a new lruvec). 2926168d0daSAlex Shi */ 2936168d0daSAlex Shi spin_lock_irq(&lruvec->lru_lock); 2947cf111bcSJohannes Weiner /* Record cost event */ 29596f8bf4fSJohannes Weiner if (file) 2960538a82cSJohannes Weiner lruvec->file_cost += cost; 2971431d4d1SJohannes Weiner else 2980538a82cSJohannes Weiner lruvec->anon_cost += cost; 2997cf111bcSJohannes Weiner 3007cf111bcSJohannes Weiner /* 3017cf111bcSJohannes Weiner * Decay previous events 3027cf111bcSJohannes Weiner * 3037cf111bcSJohannes Weiner * Because workloads change over time (and to avoid 3047cf111bcSJohannes Weiner * overflow) we keep these statistics as a floating 3057cf111bcSJohannes Weiner * average, which ends up weighing recent refaults 3067cf111bcSJohannes Weiner * more than old ones. 3077cf111bcSJohannes Weiner */ 3087cf111bcSJohannes Weiner lrusize = lruvec_page_state(lruvec, NR_INACTIVE_ANON) + 3097cf111bcSJohannes Weiner lruvec_page_state(lruvec, NR_ACTIVE_ANON) + 3107cf111bcSJohannes Weiner lruvec_page_state(lruvec, NR_INACTIVE_FILE) + 3117cf111bcSJohannes Weiner lruvec_page_state(lruvec, NR_ACTIVE_FILE); 3127cf111bcSJohannes Weiner 3137cf111bcSJohannes Weiner if (lruvec->file_cost + lruvec->anon_cost > lrusize / 4) { 3147cf111bcSJohannes Weiner lruvec->file_cost /= 2; 3157cf111bcSJohannes Weiner lruvec->anon_cost /= 2; 3167cf111bcSJohannes Weiner } 3176168d0daSAlex Shi spin_unlock_irq(&lruvec->lru_lock); 3187cf111bcSJohannes Weiner } while ((lruvec = parent_lruvec(lruvec))); 3193e2f41f1SKOSAKI Motohiro } 3203e2f41f1SKOSAKI Motohiro 3210538a82cSJohannes Weiner void lru_note_cost_refault(struct folio *folio) 32296f8bf4fSJohannes Weiner { 3230995d7e5SMatthew Wilcox (Oracle) lru_note_cost(folio_lruvec(folio), folio_is_file_lru(folio), 3240538a82cSJohannes Weiner folio_nr_pages(folio), 0); 32596f8bf4fSJohannes Weiner } 32696f8bf4fSJohannes Weiner 3273a44610bSMatthew Wilcox (Oracle) static void folio_activate_fn(struct lruvec *lruvec, struct folio *folio) 328744ed144SShaohua Li { 329f2d27392SMatthew Wilcox (Oracle) if (!folio_test_active(folio) && !folio_test_unevictable(folio)) { 330f2d27392SMatthew Wilcox (Oracle) long nr_pages = folio_nr_pages(folio); 331744ed144SShaohua Li 332f2d27392SMatthew Wilcox (Oracle) lruvec_del_folio(lruvec, folio); 333f2d27392SMatthew Wilcox (Oracle) folio_set_active(folio); 334f2d27392SMatthew Wilcox (Oracle) lruvec_add_folio(lruvec, folio); 335f2d27392SMatthew Wilcox (Oracle) trace_mm_lru_activate(folio); 3367a608572SLinus Torvalds 33721e330fcSShakeel Butt __count_vm_events(PGACTIVATE, nr_pages); 33821e330fcSShakeel Butt __count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE, 33921e330fcSShakeel Butt nr_pages); 340744ed144SShaohua Li } 341eb709b0dSShaohua Li } 342eb709b0dSShaohua Li 343eb709b0dSShaohua Li #ifdef CONFIG_SMP 3443a44610bSMatthew Wilcox (Oracle) static void folio_activate_drain(int cpu) 345f2d27392SMatthew Wilcox (Oracle) { 34682ac64d8SMatthew Wilcox (Oracle) struct folio_batch *fbatch = &per_cpu(cpu_fbatches.activate, cpu); 347f2d27392SMatthew Wilcox (Oracle) 3483a44610bSMatthew Wilcox (Oracle) if (folio_batch_count(fbatch)) 3493a44610bSMatthew Wilcox (Oracle) folio_batch_move_lru(fbatch, folio_activate_fn); 3505fbc4616SChris Metcalf } 3515fbc4616SChris Metcalf 352018ee47fSYu Zhao void folio_activate(struct folio *folio) 353eb709b0dSShaohua Li { 354f2d27392SMatthew Wilcox (Oracle) if (folio_test_lru(folio) && !folio_test_active(folio) && 355f2d27392SMatthew Wilcox (Oracle) !folio_test_unevictable(folio)) { 3563a44610bSMatthew Wilcox (Oracle) struct folio_batch *fbatch; 357eb709b0dSShaohua Li 358f2d27392SMatthew Wilcox (Oracle) folio_get(folio); 35982ac64d8SMatthew Wilcox (Oracle) local_lock(&cpu_fbatches.lock); 36082ac64d8SMatthew Wilcox (Oracle) fbatch = this_cpu_ptr(&cpu_fbatches.activate); 3613a44610bSMatthew Wilcox (Oracle) folio_batch_add_and_move(fbatch, folio, folio_activate_fn); 36282ac64d8SMatthew Wilcox (Oracle) local_unlock(&cpu_fbatches.lock); 363eb709b0dSShaohua Li } 364eb709b0dSShaohua Li } 365eb709b0dSShaohua Li 366eb709b0dSShaohua Li #else 3673a44610bSMatthew Wilcox (Oracle) static inline void folio_activate_drain(int cpu) 368eb709b0dSShaohua Li { 369eb709b0dSShaohua Li } 370eb709b0dSShaohua Li 371018ee47fSYu Zhao void folio_activate(struct folio *folio) 372eb709b0dSShaohua Li { 3736168d0daSAlex Shi struct lruvec *lruvec; 374eb709b0dSShaohua Li 375f2d27392SMatthew Wilcox (Oracle) if (folio_test_clear_lru(folio)) { 376e809c3feSMatthew Wilcox (Oracle) lruvec = folio_lruvec_lock_irq(folio); 3773a44610bSMatthew Wilcox (Oracle) folio_activate_fn(lruvec, folio); 3786168d0daSAlex Shi unlock_page_lruvec_irq(lruvec); 379f2d27392SMatthew Wilcox (Oracle) folio_set_lru(folio); 3806168d0daSAlex Shi } 3811da177e4SLinus Torvalds } 382eb709b0dSShaohua Li #endif 3831da177e4SLinus Torvalds 38476580b65SMatthew Wilcox (Oracle) static void __lru_cache_activate_folio(struct folio *folio) 385059285a2SMel Gorman { 38670dea534SMatthew Wilcox (Oracle) struct folio_batch *fbatch; 387059285a2SMel Gorman int i; 388059285a2SMel Gorman 38982ac64d8SMatthew Wilcox (Oracle) local_lock(&cpu_fbatches.lock); 39082ac64d8SMatthew Wilcox (Oracle) fbatch = this_cpu_ptr(&cpu_fbatches.lru_add); 391b01b2141SIngo Molnar 392059285a2SMel Gorman /* 39370dea534SMatthew Wilcox (Oracle) * Search backwards on the optimistic assumption that the folio being 39470dea534SMatthew Wilcox (Oracle) * activated has just been added to this batch. Note that only 39570dea534SMatthew Wilcox (Oracle) * the local batch is examined as a !LRU folio could be in the 396059285a2SMel Gorman * process of being released, reclaimed, migrated or on a remote 39770dea534SMatthew Wilcox (Oracle) * batch that is currently being drained. Furthermore, marking 39870dea534SMatthew Wilcox (Oracle) * a remote batch's folio active potentially hits a race where 39970dea534SMatthew Wilcox (Oracle) * a folio is marked active just after it is added to the inactive 400059285a2SMel Gorman * list causing accounting errors and BUG_ON checks to trigger. 401059285a2SMel Gorman */ 40270dea534SMatthew Wilcox (Oracle) for (i = folio_batch_count(fbatch) - 1; i >= 0; i--) { 40370dea534SMatthew Wilcox (Oracle) struct folio *batch_folio = fbatch->folios[i]; 404059285a2SMel Gorman 40570dea534SMatthew Wilcox (Oracle) if (batch_folio == folio) { 40676580b65SMatthew Wilcox (Oracle) folio_set_active(folio); 407059285a2SMel Gorman break; 408059285a2SMel Gorman } 409059285a2SMel Gorman } 410059285a2SMel Gorman 41182ac64d8SMatthew Wilcox (Oracle) local_unlock(&cpu_fbatches.lock); 412059285a2SMel Gorman } 413059285a2SMel Gorman 414ac35a490SYu Zhao #ifdef CONFIG_LRU_GEN 415ac35a490SYu Zhao static void folio_inc_refs(struct folio *folio) 416ac35a490SYu Zhao { 417ac35a490SYu Zhao unsigned long new_flags, old_flags = READ_ONCE(folio->flags); 418ac35a490SYu Zhao 419ac35a490SYu Zhao if (folio_test_unevictable(folio)) 420ac35a490SYu Zhao return; 421ac35a490SYu Zhao 422ac35a490SYu Zhao if (!folio_test_referenced(folio)) { 423ac35a490SYu Zhao folio_set_referenced(folio); 424ac35a490SYu Zhao return; 425ac35a490SYu Zhao } 426ac35a490SYu Zhao 427ac35a490SYu Zhao if (!folio_test_workingset(folio)) { 428ac35a490SYu Zhao folio_set_workingset(folio); 429ac35a490SYu Zhao return; 430ac35a490SYu Zhao } 431ac35a490SYu Zhao 432ac35a490SYu Zhao /* see the comment on MAX_NR_TIERS */ 433ac35a490SYu Zhao do { 434ac35a490SYu Zhao new_flags = old_flags & LRU_REFS_MASK; 435ac35a490SYu Zhao if (new_flags == LRU_REFS_MASK) 436ac35a490SYu Zhao break; 437ac35a490SYu Zhao 438ac35a490SYu Zhao new_flags += BIT(LRU_REFS_PGOFF); 439ac35a490SYu Zhao new_flags |= old_flags & ~LRU_REFS_MASK; 440ac35a490SYu Zhao } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); 441ac35a490SYu Zhao } 442ac35a490SYu Zhao #else 443ac35a490SYu Zhao static void folio_inc_refs(struct folio *folio) 444ac35a490SYu Zhao { 445ac35a490SYu Zhao } 446ac35a490SYu Zhao #endif /* CONFIG_LRU_GEN */ 447ac35a490SYu Zhao 4481da177e4SLinus Torvalds /* 4491da177e4SLinus Torvalds * Mark a page as having seen activity. 4501da177e4SLinus Torvalds * 4511da177e4SLinus Torvalds * inactive,unreferenced -> inactive,referenced 4521da177e4SLinus Torvalds * inactive,referenced -> active,unreferenced 4531da177e4SLinus Torvalds * active,unreferenced -> active,referenced 454eb39d618SHugh Dickins * 455eb39d618SHugh Dickins * When a newly allocated page is not yet visible, so safe for non-atomic ops, 456eb39d618SHugh Dickins * __SetPageReferenced(page) may be substituted for mark_page_accessed(page). 4571da177e4SLinus Torvalds */ 45876580b65SMatthew Wilcox (Oracle) void folio_mark_accessed(struct folio *folio) 4591da177e4SLinus Torvalds { 460ac35a490SYu Zhao if (lru_gen_enabled()) { 461ac35a490SYu Zhao folio_inc_refs(folio); 462ac35a490SYu Zhao return; 463ac35a490SYu Zhao } 464ac35a490SYu Zhao 46576580b65SMatthew Wilcox (Oracle) if (!folio_test_referenced(folio)) { 46676580b65SMatthew Wilcox (Oracle) folio_set_referenced(folio); 46776580b65SMatthew Wilcox (Oracle) } else if (folio_test_unevictable(folio)) { 468a1100a74SFengguang Wu /* 469a1100a74SFengguang Wu * Unevictable pages are on the "LRU_UNEVICTABLE" list. But, 470a1100a74SFengguang Wu * this list is never rotated or maintained, so marking an 471914c32e4SBang Li * unevictable page accessed has no effect. 472a1100a74SFengguang Wu */ 47376580b65SMatthew Wilcox (Oracle) } else if (!folio_test_active(folio)) { 474059285a2SMel Gorman /* 4753a44610bSMatthew Wilcox (Oracle) * If the folio is on the LRU, queue it for activation via 47682ac64d8SMatthew Wilcox (Oracle) * cpu_fbatches.activate. Otherwise, assume the folio is in a 4773a44610bSMatthew Wilcox (Oracle) * folio_batch, mark it active and it'll be moved to the active 478059285a2SMel Gorman * LRU on the next drain. 479059285a2SMel Gorman */ 48076580b65SMatthew Wilcox (Oracle) if (folio_test_lru(folio)) 48176580b65SMatthew Wilcox (Oracle) folio_activate(folio); 482059285a2SMel Gorman else 48376580b65SMatthew Wilcox (Oracle) __lru_cache_activate_folio(folio); 48476580b65SMatthew Wilcox (Oracle) folio_clear_referenced(folio); 48576580b65SMatthew Wilcox (Oracle) workingset_activation(folio); 4861da177e4SLinus Torvalds } 48776580b65SMatthew Wilcox (Oracle) if (folio_test_idle(folio)) 48876580b65SMatthew Wilcox (Oracle) folio_clear_idle(folio); 4891da177e4SLinus Torvalds } 49076580b65SMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_mark_accessed); 4911da177e4SLinus Torvalds 492f04e9ebbSKOSAKI Motohiro /** 4930d31125dSMatthew Wilcox (Oracle) * folio_add_lru - Add a folio to an LRU list. 4940d31125dSMatthew Wilcox (Oracle) * @folio: The folio to be added to the LRU. 4952329d375SJianyu Zhan * 4960d31125dSMatthew Wilcox (Oracle) * Queue the folio for addition to the LRU. The decision on whether 4972329d375SJianyu Zhan * to add the page to the [in]active [file|anon] list is deferred until the 49882ac64d8SMatthew Wilcox (Oracle) * folio_batch is drained. This gives a chance for the caller of folio_add_lru() 4990d31125dSMatthew Wilcox (Oracle) * have the folio added to the active list using folio_mark_accessed(). 500f04e9ebbSKOSAKI Motohiro */ 5010d31125dSMatthew Wilcox (Oracle) void folio_add_lru(struct folio *folio) 5021da177e4SLinus Torvalds { 50370dea534SMatthew Wilcox (Oracle) struct folio_batch *fbatch; 5046058eaecSJohannes Weiner 50570dea534SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_active(folio) && 50670dea534SMatthew Wilcox (Oracle) folio_test_unevictable(folio), folio); 5070d31125dSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); 5086058eaecSJohannes Weiner 509ec1c86b2SYu Zhao /* see the comment in lru_gen_add_folio() */ 510ec1c86b2SYu Zhao if (lru_gen_enabled() && !folio_test_unevictable(folio) && 511ec1c86b2SYu Zhao lru_gen_in_fault() && !(current->flags & PF_MEMALLOC)) 512ec1c86b2SYu Zhao folio_set_active(folio); 513ec1c86b2SYu Zhao 5140d31125dSMatthew Wilcox (Oracle) folio_get(folio); 51582ac64d8SMatthew Wilcox (Oracle) local_lock(&cpu_fbatches.lock); 51682ac64d8SMatthew Wilcox (Oracle) fbatch = this_cpu_ptr(&cpu_fbatches.lru_add); 51770dea534SMatthew Wilcox (Oracle) folio_batch_add_and_move(fbatch, folio, lru_add_fn); 51882ac64d8SMatthew Wilcox (Oracle) local_unlock(&cpu_fbatches.lock); 5191da177e4SLinus Torvalds } 5200d31125dSMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_add_lru); 5211da177e4SLinus Torvalds 522894bc310SLee Schermerhorn /** 523681ecf63SMatthew Wilcox (Oracle) * folio_add_lru_vma() - Add a folio to the appropate LRU list for this VMA. 524681ecf63SMatthew Wilcox (Oracle) * @folio: The folio to be added to the LRU. 525681ecf63SMatthew Wilcox (Oracle) * @vma: VMA in which the folio is mapped. 52600501b53SJohannes Weiner * 527681ecf63SMatthew Wilcox (Oracle) * If the VMA is mlocked, @folio is added to the unevictable list. 528681ecf63SMatthew Wilcox (Oracle) * Otherwise, it is treated the same way as folio_add_lru(). 52900501b53SJohannes Weiner */ 530681ecf63SMatthew Wilcox (Oracle) void folio_add_lru_vma(struct folio *folio, struct vm_area_struct *vma) 53100501b53SJohannes Weiner { 532681ecf63SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); 53300501b53SJohannes Weiner 5342fbb0c10SHugh Dickins if (unlikely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED)) 53596f97c43SLorenzo Stoakes mlock_new_folio(folio); 5362fbb0c10SHugh Dickins else 537681ecf63SMatthew Wilcox (Oracle) folio_add_lru(folio); 53800501b53SJohannes Weiner } 53900501b53SJohannes Weiner 540902aaed0SHisashi Hifumi /* 5417a3dbfe8SMatthew Wilcox (Oracle) * If the folio cannot be invalidated, it is moved to the 54231560180SMinchan Kim * inactive list to speed up its reclaim. It is moved to the 54331560180SMinchan Kim * head of the list, rather than the tail, to give the flusher 54431560180SMinchan Kim * threads some time to write it out, as this is much more 54531560180SMinchan Kim * effective than the single-page writeout from reclaim. 546278df9f4SMinchan Kim * 5477a3dbfe8SMatthew Wilcox (Oracle) * If the folio isn't mapped and dirty/writeback, the folio 5487a3dbfe8SMatthew Wilcox (Oracle) * could be reclaimed asap using the reclaim flag. 549278df9f4SMinchan Kim * 5507a3dbfe8SMatthew Wilcox (Oracle) * 1. active, mapped folio -> none 5517a3dbfe8SMatthew Wilcox (Oracle) * 2. active, dirty/writeback folio -> inactive, head, reclaim 5527a3dbfe8SMatthew Wilcox (Oracle) * 3. inactive, mapped folio -> none 5537a3dbfe8SMatthew Wilcox (Oracle) * 4. inactive, dirty/writeback folio -> inactive, head, reclaim 554278df9f4SMinchan Kim * 5. inactive, clean -> inactive, tail 555278df9f4SMinchan Kim * 6. Others -> none 556278df9f4SMinchan Kim * 5577a3dbfe8SMatthew Wilcox (Oracle) * In 4, it moves to the head of the inactive list so the folio is 5587a3dbfe8SMatthew Wilcox (Oracle) * written out by flusher threads as this is much more efficient 559278df9f4SMinchan Kim * than the single-page writeout from reclaim. 56031560180SMinchan Kim */ 5617a3dbfe8SMatthew Wilcox (Oracle) static void lru_deactivate_file_fn(struct lruvec *lruvec, struct folio *folio) 56231560180SMinchan Kim { 5637a3dbfe8SMatthew Wilcox (Oracle) bool active = folio_test_active(folio); 5647a3dbfe8SMatthew Wilcox (Oracle) long nr_pages = folio_nr_pages(folio); 56531560180SMinchan Kim 5667a3dbfe8SMatthew Wilcox (Oracle) if (folio_test_unevictable(folio)) 567bad49d9cSMinchan Kim return; 568bad49d9cSMinchan Kim 5697a3dbfe8SMatthew Wilcox (Oracle) /* Some processes are using the folio */ 5707a3dbfe8SMatthew Wilcox (Oracle) if (folio_mapped(folio)) 57131560180SMinchan Kim return; 57231560180SMinchan Kim 5737a3dbfe8SMatthew Wilcox (Oracle) lruvec_del_folio(lruvec, folio); 5747a3dbfe8SMatthew Wilcox (Oracle) folio_clear_active(folio); 5757a3dbfe8SMatthew Wilcox (Oracle) folio_clear_referenced(folio); 57631560180SMinchan Kim 5777a3dbfe8SMatthew Wilcox (Oracle) if (folio_test_writeback(folio) || folio_test_dirty(folio)) { 578278df9f4SMinchan Kim /* 5797a3dbfe8SMatthew Wilcox (Oracle) * Setting the reclaim flag could race with 5807a3dbfe8SMatthew Wilcox (Oracle) * folio_end_writeback() and confuse readahead. But the 5817a3dbfe8SMatthew Wilcox (Oracle) * race window is _really_ small and it's not a critical 5827a3dbfe8SMatthew Wilcox (Oracle) * problem. 583278df9f4SMinchan Kim */ 5847a3dbfe8SMatthew Wilcox (Oracle) lruvec_add_folio(lruvec, folio); 5857a3dbfe8SMatthew Wilcox (Oracle) folio_set_reclaim(folio); 586278df9f4SMinchan Kim } else { 587278df9f4SMinchan Kim /* 5887a3dbfe8SMatthew Wilcox (Oracle) * The folio's writeback ended while it was in the batch. 5897a3dbfe8SMatthew Wilcox (Oracle) * We move that folio to the tail of the inactive list. 590278df9f4SMinchan Kim */ 5917a3dbfe8SMatthew Wilcox (Oracle) lruvec_add_folio_tail(lruvec, folio); 5925d91f31fSShakeel Butt __count_vm_events(PGROTATED, nr_pages); 593278df9f4SMinchan Kim } 594278df9f4SMinchan Kim 59521e330fcSShakeel Butt if (active) { 5965d91f31fSShakeel Butt __count_vm_events(PGDEACTIVATE, nr_pages); 59721e330fcSShakeel Butt __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, 59821e330fcSShakeel Butt nr_pages); 59921e330fcSShakeel Butt } 60031560180SMinchan Kim } 60131560180SMinchan Kim 60285cd7791SMatthew Wilcox (Oracle) static void lru_deactivate_fn(struct lruvec *lruvec, struct folio *folio) 6039c276cc6SMinchan Kim { 604ec1c86b2SYu Zhao if (!folio_test_unevictable(folio) && (folio_test_active(folio) || lru_gen_enabled())) { 60585cd7791SMatthew Wilcox (Oracle) long nr_pages = folio_nr_pages(folio); 6069c276cc6SMinchan Kim 60785cd7791SMatthew Wilcox (Oracle) lruvec_del_folio(lruvec, folio); 60885cd7791SMatthew Wilcox (Oracle) folio_clear_active(folio); 60985cd7791SMatthew Wilcox (Oracle) folio_clear_referenced(folio); 61085cd7791SMatthew Wilcox (Oracle) lruvec_add_folio(lruvec, folio); 6119c276cc6SMinchan Kim 61221e330fcSShakeel Butt __count_vm_events(PGDEACTIVATE, nr_pages); 61321e330fcSShakeel Butt __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, 61421e330fcSShakeel Butt nr_pages); 6159c276cc6SMinchan Kim } 6169c276cc6SMinchan Kim } 61710853a03SMinchan Kim 618cec394baSMatthew Wilcox (Oracle) static void lru_lazyfree_fn(struct lruvec *lruvec, struct folio *folio) 61910853a03SMinchan Kim { 620cec394baSMatthew Wilcox (Oracle) if (folio_test_anon(folio) && folio_test_swapbacked(folio) && 621cec394baSMatthew Wilcox (Oracle) !folio_test_swapcache(folio) && !folio_test_unevictable(folio)) { 622cec394baSMatthew Wilcox (Oracle) long nr_pages = folio_nr_pages(folio); 62310853a03SMinchan Kim 624cec394baSMatthew Wilcox (Oracle) lruvec_del_folio(lruvec, folio); 625cec394baSMatthew Wilcox (Oracle) folio_clear_active(folio); 626cec394baSMatthew Wilcox (Oracle) folio_clear_referenced(folio); 627f7ad2a6cSShaohua Li /* 628cec394baSMatthew Wilcox (Oracle) * Lazyfree folios are clean anonymous folios. They have 629cec394baSMatthew Wilcox (Oracle) * the swapbacked flag cleared, to distinguish them from normal 630cec394baSMatthew Wilcox (Oracle) * anonymous folios 631f7ad2a6cSShaohua Li */ 632cec394baSMatthew Wilcox (Oracle) folio_clear_swapbacked(folio); 633cec394baSMatthew Wilcox (Oracle) lruvec_add_folio(lruvec, folio); 63410853a03SMinchan Kim 63521e330fcSShakeel Butt __count_vm_events(PGLAZYFREE, nr_pages); 63621e330fcSShakeel Butt __count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE, 63721e330fcSShakeel Butt nr_pages); 63810853a03SMinchan Kim } 63910853a03SMinchan Kim } 64010853a03SMinchan Kim 64131560180SMinchan Kim /* 64282ac64d8SMatthew Wilcox (Oracle) * Drain pages out of the cpu's folio_batch. 643902aaed0SHisashi Hifumi * Either "cpu" is the current CPU, and preemption has already been 644902aaed0SHisashi Hifumi * disabled; or "cpu" is being hot-unplugged, and is already dead. 645902aaed0SHisashi Hifumi */ 646f0cb3c76SKonstantin Khlebnikov void lru_add_drain_cpu(int cpu) 6471da177e4SLinus Torvalds { 648a2d33b5dSMatthew Wilcox (Oracle) struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu); 649a2d33b5dSMatthew Wilcox (Oracle) struct folio_batch *fbatch = &fbatches->lru_add; 6501da177e4SLinus Torvalds 65170dea534SMatthew Wilcox (Oracle) if (folio_batch_count(fbatch)) 65270dea534SMatthew Wilcox (Oracle) folio_batch_move_lru(fbatch, lru_add_fn); 653902aaed0SHisashi Hifumi 654c2bc1681SMatthew Wilcox (Oracle) fbatch = &per_cpu(lru_rotate.fbatch, cpu); 6557e0cc01eSQian Cai /* Disabling interrupts below acts as a compiler barrier. */ 656c2bc1681SMatthew Wilcox (Oracle) if (data_race(folio_batch_count(fbatch))) { 657902aaed0SHisashi Hifumi unsigned long flags; 658902aaed0SHisashi Hifumi 659902aaed0SHisashi Hifumi /* No harm done if a racing interrupt already did this */ 660b01b2141SIngo Molnar local_lock_irqsave(&lru_rotate.lock, flags); 661c2bc1681SMatthew Wilcox (Oracle) folio_batch_move_lru(fbatch, lru_move_tail_fn); 662b01b2141SIngo Molnar local_unlock_irqrestore(&lru_rotate.lock, flags); 663902aaed0SHisashi Hifumi } 66431560180SMinchan Kim 665a2d33b5dSMatthew Wilcox (Oracle) fbatch = &fbatches->lru_deactivate_file; 6667a3dbfe8SMatthew Wilcox (Oracle) if (folio_batch_count(fbatch)) 6677a3dbfe8SMatthew Wilcox (Oracle) folio_batch_move_lru(fbatch, lru_deactivate_file_fn); 668eb709b0dSShaohua Li 669a2d33b5dSMatthew Wilcox (Oracle) fbatch = &fbatches->lru_deactivate; 67085cd7791SMatthew Wilcox (Oracle) if (folio_batch_count(fbatch)) 67185cd7791SMatthew Wilcox (Oracle) folio_batch_move_lru(fbatch, lru_deactivate_fn); 6729c276cc6SMinchan Kim 673a2d33b5dSMatthew Wilcox (Oracle) fbatch = &fbatches->lru_lazyfree; 674cec394baSMatthew Wilcox (Oracle) if (folio_batch_count(fbatch)) 675cec394baSMatthew Wilcox (Oracle) folio_batch_move_lru(fbatch, lru_lazyfree_fn); 67610853a03SMinchan Kim 6773a44610bSMatthew Wilcox (Oracle) folio_activate_drain(cpu); 67831560180SMinchan Kim } 67931560180SMinchan Kim 68031560180SMinchan Kim /** 6817a3dbfe8SMatthew Wilcox (Oracle) * deactivate_file_folio() - Deactivate a file folio. 682261b6840SMatthew Wilcox (Oracle) * @folio: Folio to deactivate. 68331560180SMinchan Kim * 684261b6840SMatthew Wilcox (Oracle) * This function hints to the VM that @folio is a good reclaim candidate, 685261b6840SMatthew Wilcox (Oracle) * for example if its invalidation fails due to the folio being dirty 68631560180SMinchan Kim * or under writeback. 687261b6840SMatthew Wilcox (Oracle) * 6887a3dbfe8SMatthew Wilcox (Oracle) * Context: Caller holds a reference on the folio. 68931560180SMinchan Kim */ 690261b6840SMatthew Wilcox (Oracle) void deactivate_file_folio(struct folio *folio) 69131560180SMinchan Kim { 6927a3dbfe8SMatthew Wilcox (Oracle) struct folio_batch *fbatch; 693b01b2141SIngo Molnar 6947a3dbfe8SMatthew Wilcox (Oracle) /* Deactivating an unevictable folio will not accelerate reclaim */ 695261b6840SMatthew Wilcox (Oracle) if (folio_test_unevictable(folio)) 696261b6840SMatthew Wilcox (Oracle) return; 697261b6840SMatthew Wilcox (Oracle) 698261b6840SMatthew Wilcox (Oracle) folio_get(folio); 69982ac64d8SMatthew Wilcox (Oracle) local_lock(&cpu_fbatches.lock); 70082ac64d8SMatthew Wilcox (Oracle) fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate_file); 7017a3dbfe8SMatthew Wilcox (Oracle) folio_batch_add_and_move(fbatch, folio, lru_deactivate_file_fn); 70282ac64d8SMatthew Wilcox (Oracle) local_unlock(&cpu_fbatches.lock); 70331560180SMinchan Kim } 70480bfed90SAndrew Morton 7059c276cc6SMinchan Kim /* 7065a9e3474SVishal Moola (Oracle) * folio_deactivate - deactivate a folio 7075a9e3474SVishal Moola (Oracle) * @folio: folio to deactivate 7089c276cc6SMinchan Kim * 7095a9e3474SVishal Moola (Oracle) * folio_deactivate() moves @folio to the inactive list if @folio was on the 7105a9e3474SVishal Moola (Oracle) * active list and was not unevictable. This is done to accelerate the 7115a9e3474SVishal Moola (Oracle) * reclaim of @folio. 7129c276cc6SMinchan Kim */ 7135a9e3474SVishal Moola (Oracle) void folio_deactivate(struct folio *folio) 7149c276cc6SMinchan Kim { 715ec1c86b2SYu Zhao if (folio_test_lru(folio) && !folio_test_unevictable(folio) && 716ec1c86b2SYu Zhao (folio_test_active(folio) || lru_gen_enabled())) { 71785cd7791SMatthew Wilcox (Oracle) struct folio_batch *fbatch; 71885cd7791SMatthew Wilcox (Oracle) 71985cd7791SMatthew Wilcox (Oracle) folio_get(folio); 72082ac64d8SMatthew Wilcox (Oracle) local_lock(&cpu_fbatches.lock); 72182ac64d8SMatthew Wilcox (Oracle) fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate); 72285cd7791SMatthew Wilcox (Oracle) folio_batch_add_and_move(fbatch, folio, lru_deactivate_fn); 72382ac64d8SMatthew Wilcox (Oracle) local_unlock(&cpu_fbatches.lock); 7249c276cc6SMinchan Kim } 7259c276cc6SMinchan Kim } 7269c276cc6SMinchan Kim 72710853a03SMinchan Kim /** 7286a6fe9ebSKefeng Wang * folio_mark_lazyfree - make an anon folio lazyfree 7296a6fe9ebSKefeng Wang * @folio: folio to deactivate 73010853a03SMinchan Kim * 7316a6fe9ebSKefeng Wang * folio_mark_lazyfree() moves @folio to the inactive file list. 7326a6fe9ebSKefeng Wang * This is done to accelerate the reclaim of @folio. 73310853a03SMinchan Kim */ 7346a6fe9ebSKefeng Wang void folio_mark_lazyfree(struct folio *folio) 73510853a03SMinchan Kim { 736cec394baSMatthew Wilcox (Oracle) if (folio_test_lru(folio) && folio_test_anon(folio) && 737cec394baSMatthew Wilcox (Oracle) folio_test_swapbacked(folio) && !folio_test_swapcache(folio) && 738cec394baSMatthew Wilcox (Oracle) !folio_test_unevictable(folio)) { 739cec394baSMatthew Wilcox (Oracle) struct folio_batch *fbatch; 740cec394baSMatthew Wilcox (Oracle) 741cec394baSMatthew Wilcox (Oracle) folio_get(folio); 74282ac64d8SMatthew Wilcox (Oracle) local_lock(&cpu_fbatches.lock); 74382ac64d8SMatthew Wilcox (Oracle) fbatch = this_cpu_ptr(&cpu_fbatches.lru_lazyfree); 744cec394baSMatthew Wilcox (Oracle) folio_batch_add_and_move(fbatch, folio, lru_lazyfree_fn); 74582ac64d8SMatthew Wilcox (Oracle) local_unlock(&cpu_fbatches.lock); 74610853a03SMinchan Kim } 74710853a03SMinchan Kim } 74810853a03SMinchan Kim 74980bfed90SAndrew Morton void lru_add_drain(void) 75080bfed90SAndrew Morton { 75182ac64d8SMatthew Wilcox (Oracle) local_lock(&cpu_fbatches.lock); 752b01b2141SIngo Molnar lru_add_drain_cpu(smp_processor_id()); 75382ac64d8SMatthew Wilcox (Oracle) local_unlock(&cpu_fbatches.lock); 75496f97c43SLorenzo Stoakes mlock_drain_local(); 755b01b2141SIngo Molnar } 756b01b2141SIngo Molnar 757243418e3SMinchan Kim /* 758243418e3SMinchan Kim * It's called from per-cpu workqueue context in SMP case so 759243418e3SMinchan Kim * lru_add_drain_cpu and invalidate_bh_lrus_cpu should run on 760243418e3SMinchan Kim * the same cpu. It shouldn't be a problem in !SMP case since 761243418e3SMinchan Kim * the core is only one and the locks will disable preemption. 762243418e3SMinchan Kim */ 763243418e3SMinchan Kim static void lru_add_and_bh_lrus_drain(void) 764243418e3SMinchan Kim { 76582ac64d8SMatthew Wilcox (Oracle) local_lock(&cpu_fbatches.lock); 766243418e3SMinchan Kim lru_add_drain_cpu(smp_processor_id()); 76782ac64d8SMatthew Wilcox (Oracle) local_unlock(&cpu_fbatches.lock); 768243418e3SMinchan Kim invalidate_bh_lrus_cpu(); 76996f97c43SLorenzo Stoakes mlock_drain_local(); 770243418e3SMinchan Kim } 771243418e3SMinchan Kim 772b01b2141SIngo Molnar void lru_add_drain_cpu_zone(struct zone *zone) 773b01b2141SIngo Molnar { 77482ac64d8SMatthew Wilcox (Oracle) local_lock(&cpu_fbatches.lock); 775b01b2141SIngo Molnar lru_add_drain_cpu(smp_processor_id()); 776b01b2141SIngo Molnar drain_local_pages(zone); 77782ac64d8SMatthew Wilcox (Oracle) local_unlock(&cpu_fbatches.lock); 77896f97c43SLorenzo Stoakes mlock_drain_local(); 7791da177e4SLinus Torvalds } 7801da177e4SLinus Torvalds 7816ea183d6SMichal Hocko #ifdef CONFIG_SMP 7826ea183d6SMichal Hocko 7836ea183d6SMichal Hocko static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); 7846ea183d6SMichal Hocko 785c4028958SDavid Howells static void lru_add_drain_per_cpu(struct work_struct *dummy) 786053837fcSNick Piggin { 787243418e3SMinchan Kim lru_add_and_bh_lrus_drain(); 788053837fcSNick Piggin } 789053837fcSNick Piggin 7904864545aSMatthew Wilcox (Oracle) static bool cpu_needs_drain(unsigned int cpu) 7914864545aSMatthew Wilcox (Oracle) { 7924864545aSMatthew Wilcox (Oracle) struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu); 7934864545aSMatthew Wilcox (Oracle) 7944864545aSMatthew Wilcox (Oracle) /* Check these in order of likelihood that they're not zero */ 7954864545aSMatthew Wilcox (Oracle) return folio_batch_count(&fbatches->lru_add) || 7964864545aSMatthew Wilcox (Oracle) data_race(folio_batch_count(&per_cpu(lru_rotate.fbatch, cpu))) || 7974864545aSMatthew Wilcox (Oracle) folio_batch_count(&fbatches->lru_deactivate_file) || 7984864545aSMatthew Wilcox (Oracle) folio_batch_count(&fbatches->lru_deactivate) || 7994864545aSMatthew Wilcox (Oracle) folio_batch_count(&fbatches->lru_lazyfree) || 8004864545aSMatthew Wilcox (Oracle) folio_batch_count(&fbatches->activate) || 80196f97c43SLorenzo Stoakes need_mlock_drain(cpu) || 8024864545aSMatthew Wilcox (Oracle) has_bh_in_lru(cpu, NULL); 8034864545aSMatthew Wilcox (Oracle) } 8044864545aSMatthew Wilcox (Oracle) 8059852a721SMichal Hocko /* 8069852a721SMichal Hocko * Doesn't need any cpu hotplug locking because we do rely on per-cpu 8079852a721SMichal Hocko * kworkers being shut down before our page_alloc_cpu_dead callback is 8089852a721SMichal Hocko * executed on the offlined cpu. 8099852a721SMichal Hocko * Calling this function with cpu hotplug locks held can actually lead 8109852a721SMichal Hocko * to obscure indirect dependencies via WQ context. 8119852a721SMichal Hocko */ 8123db3264dSMiaohe Lin static inline void __lru_add_drain_all(bool force_all_cpus) 813053837fcSNick Piggin { 8146446a513SAhmed S. Darwish /* 8156446a513SAhmed S. Darwish * lru_drain_gen - Global pages generation number 8166446a513SAhmed S. Darwish * 8176446a513SAhmed S. Darwish * (A) Definition: global lru_drain_gen = x implies that all generations 8186446a513SAhmed S. Darwish * 0 < n <= x are already *scheduled* for draining. 8196446a513SAhmed S. Darwish * 8206446a513SAhmed S. Darwish * This is an optimization for the highly-contended use case where a 8216446a513SAhmed S. Darwish * user space workload keeps constantly generating a flow of pages for 8226446a513SAhmed S. Darwish * each CPU. 8236446a513SAhmed S. Darwish */ 8246446a513SAhmed S. Darwish static unsigned int lru_drain_gen; 8255fbc4616SChris Metcalf static struct cpumask has_work; 8266446a513SAhmed S. Darwish static DEFINE_MUTEX(lock); 8276446a513SAhmed S. Darwish unsigned cpu, this_gen; 8285fbc4616SChris Metcalf 829ce612879SMichal Hocko /* 830ce612879SMichal Hocko * Make sure nobody triggers this path before mm_percpu_wq is fully 831ce612879SMichal Hocko * initialized. 832ce612879SMichal Hocko */ 833ce612879SMichal Hocko if (WARN_ON(!mm_percpu_wq)) 834ce612879SMichal Hocko return; 835ce612879SMichal Hocko 8366446a513SAhmed S. Darwish /* 83782ac64d8SMatthew Wilcox (Oracle) * Guarantee folio_batch counter stores visible by this CPU 83882ac64d8SMatthew Wilcox (Oracle) * are visible to other CPUs before loading the current drain 83982ac64d8SMatthew Wilcox (Oracle) * generation. 8406446a513SAhmed S. Darwish */ 8416446a513SAhmed S. Darwish smp_mb(); 8426446a513SAhmed S. Darwish 8436446a513SAhmed S. Darwish /* 8446446a513SAhmed S. Darwish * (B) Locally cache global LRU draining generation number 8456446a513SAhmed S. Darwish * 8466446a513SAhmed S. Darwish * The read barrier ensures that the counter is loaded before the mutex 8476446a513SAhmed S. Darwish * is taken. It pairs with smp_mb() inside the mutex critical section 8486446a513SAhmed S. Darwish * at (D). 8496446a513SAhmed S. Darwish */ 8506446a513SAhmed S. Darwish this_gen = smp_load_acquire(&lru_drain_gen); 851eef1a429SKonstantin Khlebnikov 8525fbc4616SChris Metcalf mutex_lock(&lock); 853eef1a429SKonstantin Khlebnikov 854eef1a429SKonstantin Khlebnikov /* 8556446a513SAhmed S. Darwish * (C) Exit the draining operation if a newer generation, from another 8566446a513SAhmed S. Darwish * lru_add_drain_all(), was already scheduled for draining. Check (A). 857eef1a429SKonstantin Khlebnikov */ 858d479960eSMinchan Kim if (unlikely(this_gen != lru_drain_gen && !force_all_cpus)) 859eef1a429SKonstantin Khlebnikov goto done; 860eef1a429SKonstantin Khlebnikov 8616446a513SAhmed S. Darwish /* 8626446a513SAhmed S. Darwish * (D) Increment global generation number 8636446a513SAhmed S. Darwish * 8646446a513SAhmed S. Darwish * Pairs with smp_load_acquire() at (B), outside of the critical 86582ac64d8SMatthew Wilcox (Oracle) * section. Use a full memory barrier to guarantee that the 86682ac64d8SMatthew Wilcox (Oracle) * new global drain generation number is stored before loading 86782ac64d8SMatthew Wilcox (Oracle) * folio_batch counters. 8686446a513SAhmed S. Darwish * 8696446a513SAhmed S. Darwish * This pairing must be done here, before the for_each_online_cpu loop 8706446a513SAhmed S. Darwish * below which drains the page vectors. 8716446a513SAhmed S. Darwish * 8726446a513SAhmed S. Darwish * Let x, y, and z represent some system CPU numbers, where x < y < z. 873cb152a1aSShijie Luo * Assume CPU #z is in the middle of the for_each_online_cpu loop 8746446a513SAhmed S. Darwish * below and has already reached CPU #y's per-cpu data. CPU #x comes 8756446a513SAhmed S. Darwish * along, adds some pages to its per-cpu vectors, then calls 8766446a513SAhmed S. Darwish * lru_add_drain_all(). 8776446a513SAhmed S. Darwish * 8786446a513SAhmed S. Darwish * If the paired barrier is done at any later step, e.g. after the 8796446a513SAhmed S. Darwish * loop, CPU #x will just exit at (C) and miss flushing out all of its 8806446a513SAhmed S. Darwish * added pages. 8816446a513SAhmed S. Darwish */ 8826446a513SAhmed S. Darwish WRITE_ONCE(lru_drain_gen, lru_drain_gen + 1); 8836446a513SAhmed S. Darwish smp_mb(); 884eef1a429SKonstantin Khlebnikov 8855fbc4616SChris Metcalf cpumask_clear(&has_work); 8865fbc4616SChris Metcalf for_each_online_cpu(cpu) { 8875fbc4616SChris Metcalf struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); 8885fbc4616SChris Metcalf 8894864545aSMatthew Wilcox (Oracle) if (cpu_needs_drain(cpu)) { 8905fbc4616SChris Metcalf INIT_WORK(work, lru_add_drain_per_cpu); 891ce612879SMichal Hocko queue_work_on(cpu, mm_percpu_wq, work); 8926446a513SAhmed S. Darwish __cpumask_set_cpu(cpu, &has_work); 8935fbc4616SChris Metcalf } 8945fbc4616SChris Metcalf } 8955fbc4616SChris Metcalf 8965fbc4616SChris Metcalf for_each_cpu(cpu, &has_work) 8975fbc4616SChris Metcalf flush_work(&per_cpu(lru_add_drain_work, cpu)); 8985fbc4616SChris Metcalf 899eef1a429SKonstantin Khlebnikov done: 9005fbc4616SChris Metcalf mutex_unlock(&lock); 901053837fcSNick Piggin } 902d479960eSMinchan Kim 903d479960eSMinchan Kim void lru_add_drain_all(void) 904d479960eSMinchan Kim { 905d479960eSMinchan Kim __lru_add_drain_all(false); 906d479960eSMinchan Kim } 9076ea183d6SMichal Hocko #else 9086ea183d6SMichal Hocko void lru_add_drain_all(void) 9096ea183d6SMichal Hocko { 9106ea183d6SMichal Hocko lru_add_drain(); 9116ea183d6SMichal Hocko } 9126446a513SAhmed S. Darwish #endif /* CONFIG_SMP */ 913053837fcSNick Piggin 914d479960eSMinchan Kim atomic_t lru_disable_count = ATOMIC_INIT(0); 915d479960eSMinchan Kim 916d479960eSMinchan Kim /* 917d479960eSMinchan Kim * lru_cache_disable() needs to be called before we start compiling 918d479960eSMinchan Kim * a list of pages to be migrated using isolate_lru_page(). 919d479960eSMinchan Kim * It drains pages on LRU cache and then disable on all cpus until 920d479960eSMinchan Kim * lru_cache_enable is called. 921d479960eSMinchan Kim * 922d479960eSMinchan Kim * Must be paired with a call to lru_cache_enable(). 923d479960eSMinchan Kim */ 924d479960eSMinchan Kim void lru_cache_disable(void) 925d479960eSMinchan Kim { 926d479960eSMinchan Kim atomic_inc(&lru_disable_count); 927d479960eSMinchan Kim /* 928ff042f4aSMarcelo Tosatti * Readers of lru_disable_count are protected by either disabling 929ff042f4aSMarcelo Tosatti * preemption or rcu_read_lock: 930ff042f4aSMarcelo Tosatti * 931ff042f4aSMarcelo Tosatti * preempt_disable, local_irq_disable [bh_lru_lock()] 932ff042f4aSMarcelo Tosatti * rcu_read_lock [rt_spin_lock CONFIG_PREEMPT_RT] 933ff042f4aSMarcelo Tosatti * preempt_disable [local_lock !CONFIG_PREEMPT_RT] 934ff042f4aSMarcelo Tosatti * 935ff042f4aSMarcelo Tosatti * Since v5.1 kernel, synchronize_rcu() is guaranteed to wait on 936ff042f4aSMarcelo Tosatti * preempt_disable() regions of code. So any CPU which sees 937ff042f4aSMarcelo Tosatti * lru_disable_count = 0 will have exited the critical 938ff042f4aSMarcelo Tosatti * section when synchronize_rcu() returns. 939d479960eSMinchan Kim */ 94031733463SMarcelo Tosatti synchronize_rcu_expedited(); 941ff042f4aSMarcelo Tosatti #ifdef CONFIG_SMP 942d479960eSMinchan Kim __lru_add_drain_all(true); 943d479960eSMinchan Kim #else 944243418e3SMinchan Kim lru_add_and_bh_lrus_drain(); 945d479960eSMinchan Kim #endif 946d479960eSMinchan Kim } 947d479960eSMinchan Kim 948aabfb572SMichal Hocko /** 949ea1754a0SKirill A. Shutemov * release_pages - batched put_page() 950449c7967SLinus Torvalds * @arg: array of pages to release 951aabfb572SMichal Hocko * @nr: number of pages 9521da177e4SLinus Torvalds * 953449c7967SLinus Torvalds * Decrement the reference count on all the pages in @arg. If it 954aabfb572SMichal Hocko * fell to zero, remove the page from the LRU and free it. 955449c7967SLinus Torvalds * 956449c7967SLinus Torvalds * Note that the argument can be an array of pages, encoded pages, 957449c7967SLinus Torvalds * or folio pointers. We ignore any encoded bits, and turn any of 958449c7967SLinus Torvalds * them into just a folio that gets free'd. 9591da177e4SLinus Torvalds */ 960449c7967SLinus Torvalds void release_pages(release_pages_arg arg, int nr) 9611da177e4SLinus Torvalds { 9621da177e4SLinus Torvalds int i; 963449c7967SLinus Torvalds struct encoded_page **encoded = arg.encoded_pages; 964cc59850eSKonstantin Khlebnikov LIST_HEAD(pages_to_free); 9656168d0daSAlex Shi struct lruvec *lruvec = NULL; 9660de340cbSMatthew Wilcox (Oracle) unsigned long flags = 0; 9673f649ab7SKees Cook unsigned int lock_batch; 9681da177e4SLinus Torvalds 9691da177e4SLinus Torvalds for (i = 0; i < nr; i++) { 970449c7967SLinus Torvalds struct folio *folio; 971449c7967SLinus Torvalds 972449c7967SLinus Torvalds /* Turn any of the argument types into a folio */ 973449c7967SLinus Torvalds folio = page_folio(encoded_page_ptr(encoded[i])); 9741da177e4SLinus Torvalds 975aabfb572SMichal Hocko /* 976aabfb572SMichal Hocko * Make sure the IRQ-safe lock-holding time does not get 977aabfb572SMichal Hocko * excessive with a continuous string of pages from the 9786168d0daSAlex Shi * same lruvec. The lock is held only if lruvec != NULL. 979aabfb572SMichal Hocko */ 9806168d0daSAlex Shi if (lruvec && ++lock_batch == SWAP_CLUSTER_MAX) { 9816168d0daSAlex Shi unlock_page_lruvec_irqrestore(lruvec, flags); 9826168d0daSAlex Shi lruvec = NULL; 983aabfb572SMichal Hocko } 984aabfb572SMichal Hocko 985ab5e653eSMatthew Wilcox (Oracle) if (is_huge_zero_page(&folio->page)) 986aa88b68cSKirill A. Shutemov continue; 987aa88b68cSKirill A. Shutemov 988ab5e653eSMatthew Wilcox (Oracle) if (folio_is_zone_device(folio)) { 9896168d0daSAlex Shi if (lruvec) { 9906168d0daSAlex Shi unlock_page_lruvec_irqrestore(lruvec, flags); 9916168d0daSAlex Shi lruvec = NULL; 992df6ad698SJérôme Glisse } 993ab5e653eSMatthew Wilcox (Oracle) if (put_devmap_managed_page(&folio->page)) 994df6ad698SJérôme Glisse continue; 995ab5e653eSMatthew Wilcox (Oracle) if (folio_put_testzero(folio)) 996ab5e653eSMatthew Wilcox (Oracle) free_zone_device_page(&folio->page); 99743fbdeb3SRalph Campbell continue; 99807d80269SJohn Hubbard } 999df6ad698SJérôme Glisse 1000ab5e653eSMatthew Wilcox (Oracle) if (!folio_put_testzero(folio)) 10011da177e4SLinus Torvalds continue; 10021da177e4SLinus Torvalds 1003ab5e653eSMatthew Wilcox (Oracle) if (folio_test_large(folio)) { 10046168d0daSAlex Shi if (lruvec) { 10056168d0daSAlex Shi unlock_page_lruvec_irqrestore(lruvec, flags); 10066168d0daSAlex Shi lruvec = NULL; 1007ddc58f27SKirill A. Shutemov } 10085ef82fe7SMatthew Wilcox (Oracle) __folio_put_large(folio); 1009ddc58f27SKirill A. Shutemov continue; 1010ddc58f27SKirill A. Shutemov } 1011ddc58f27SKirill A. Shutemov 1012ab5e653eSMatthew Wilcox (Oracle) if (folio_test_lru(folio)) { 10132a5e4e34SAlexander Duyck struct lruvec *prev_lruvec = lruvec; 1014894bc310SLee Schermerhorn 10150de340cbSMatthew Wilcox (Oracle) lruvec = folio_lruvec_relock_irqsave(folio, lruvec, 10162a5e4e34SAlexander Duyck &flags); 10172a5e4e34SAlexander Duyck if (prev_lruvec != lruvec) 1018aabfb572SMichal Hocko lock_batch = 0; 1019fa9add64SHugh Dickins 1020ab5e653eSMatthew Wilcox (Oracle) lruvec_del_folio(lruvec, folio); 1021ab5e653eSMatthew Wilcox (Oracle) __folio_clear_lru_flags(folio); 102246453a6eSNick Piggin } 102346453a6eSNick Piggin 1024b109b870SHugh Dickins /* 1025b109b870SHugh Dickins * In rare cases, when truncation or holepunching raced with 1026b109b870SHugh Dickins * munlock after VM_LOCKED was cleared, Mlocked may still be 1027b109b870SHugh Dickins * found set here. This does not indicate a problem, unless 1028b109b870SHugh Dickins * "unevictable_pgs_cleared" appears worryingly large. 1029b109b870SHugh Dickins */ 1030ab5e653eSMatthew Wilcox (Oracle) if (unlikely(folio_test_mlocked(folio))) { 1031ab5e653eSMatthew Wilcox (Oracle) __folio_clear_mlocked(folio); 1032ab5e653eSMatthew Wilcox (Oracle) zone_stat_sub_folio(folio, NR_MLOCK); 1033b109b870SHugh Dickins count_vm_event(UNEVICTABLE_PGCLEARED); 1034b109b870SHugh Dickins } 1035b109b870SHugh Dickins 1036ab5e653eSMatthew Wilcox (Oracle) list_add(&folio->lru, &pages_to_free); 10371da177e4SLinus Torvalds } 10386168d0daSAlex Shi if (lruvec) 10396168d0daSAlex Shi unlock_page_lruvec_irqrestore(lruvec, flags); 10401da177e4SLinus Torvalds 1041747db954SJohannes Weiner mem_cgroup_uncharge_list(&pages_to_free); 10422d4894b5SMel Gorman free_unref_page_list(&pages_to_free); 10431da177e4SLinus Torvalds } 10440be8557bSMiklos Szeredi EXPORT_SYMBOL(release_pages); 10451da177e4SLinus Torvalds 10461da177e4SLinus Torvalds /* 1047*1e0877d5SMatthew Wilcox (Oracle) * The folios which we're about to release may be in the deferred lru-addition 10481da177e4SLinus Torvalds * queues. That would prevent them from really being freed right now. That's 1049*1e0877d5SMatthew Wilcox (Oracle) * OK from a correctness point of view but is inefficient - those folios may be 10501da177e4SLinus Torvalds * cache-warm and we want to give them back to the page allocator ASAP. 10511da177e4SLinus Torvalds * 1052*1e0877d5SMatthew Wilcox (Oracle) * So __folio_batch_release() will drain those queues here. 105370dea534SMatthew Wilcox (Oracle) * folio_batch_move_lru() calls folios_put() directly to avoid 10541da177e4SLinus Torvalds * mutual recursion. 10551da177e4SLinus Torvalds */ 1056*1e0877d5SMatthew Wilcox (Oracle) void __folio_batch_release(struct folio_batch *fbatch) 10571da177e4SLinus Torvalds { 1058*1e0877d5SMatthew Wilcox (Oracle) if (!fbatch->percpu_pvec_drained) { 10591da177e4SLinus Torvalds lru_add_drain(); 1060*1e0877d5SMatthew Wilcox (Oracle) fbatch->percpu_pvec_drained = true; 1061d9ed0d08SMel Gorman } 1062*1e0877d5SMatthew Wilcox (Oracle) release_pages(fbatch->folios, folio_batch_count(fbatch)); 1063*1e0877d5SMatthew Wilcox (Oracle) folio_batch_reinit(fbatch); 10641da177e4SLinus Torvalds } 1065*1e0877d5SMatthew Wilcox (Oracle) EXPORT_SYMBOL(__folio_batch_release); 10667f285701SSteve French 10671da177e4SLinus Torvalds /** 10681613fac9SMatthew Wilcox (Oracle) * folio_batch_remove_exceptionals() - Prune non-folios from a batch. 10691613fac9SMatthew Wilcox (Oracle) * @fbatch: The batch to prune 10700cd6144aSJohannes Weiner * 10711613fac9SMatthew Wilcox (Oracle) * find_get_entries() fills a batch with both folios and shadow/swap/DAX 10721613fac9SMatthew Wilcox (Oracle) * entries. This function prunes all the non-folio entries from @fbatch 10731613fac9SMatthew Wilcox (Oracle) * without leaving holes, so that it can be passed on to folio-only batch 10741613fac9SMatthew Wilcox (Oracle) * operations. 10750cd6144aSJohannes Weiner */ 10761613fac9SMatthew Wilcox (Oracle) void folio_batch_remove_exceptionals(struct folio_batch *fbatch) 10770cd6144aSJohannes Weiner { 10781613fac9SMatthew Wilcox (Oracle) unsigned int i, j; 10790cd6144aSJohannes Weiner 10801613fac9SMatthew Wilcox (Oracle) for (i = 0, j = 0; i < folio_batch_count(fbatch); i++) { 10811613fac9SMatthew Wilcox (Oracle) struct folio *folio = fbatch->folios[i]; 10821613fac9SMatthew Wilcox (Oracle) if (!xa_is_value(folio)) 10831613fac9SMatthew Wilcox (Oracle) fbatch->folios[j++] = folio; 10840cd6144aSJohannes Weiner } 10851613fac9SMatthew Wilcox (Oracle) fbatch->nr = j; 10860cd6144aSJohannes Weiner } 10870cd6144aSJohannes Weiner 10881da177e4SLinus Torvalds /* 10891da177e4SLinus Torvalds * Perform any setup for the swap system 10901da177e4SLinus Torvalds */ 10911da177e4SLinus Torvalds void __init swap_setup(void) 10921da177e4SLinus Torvalds { 1093ca79b0c2SArun KS unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT); 1094e0bf68ddSPeter Zijlstra 10951da177e4SLinus Torvalds /* Use a smaller cluster for small-memory machines */ 10961da177e4SLinus Torvalds if (megs < 16) 10971da177e4SLinus Torvalds page_cluster = 2; 10981da177e4SLinus Torvalds else 10991da177e4SLinus Torvalds page_cluster = 3; 11001da177e4SLinus Torvalds /* 11011da177e4SLinus Torvalds * Right now other parts of the system means that we 11021da177e4SLinus Torvalds * _really_ don't want to cluster much more 11031da177e4SLinus Torvalds */ 11041da177e4SLinus Torvalds } 1105