1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * linux/mm/swap.c 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 81da177e4SLinus Torvalds /* 9183ff22bSSimon Arlott * This file contains the default values for the operation of the 101da177e4SLinus Torvalds * Linux VM subsystem. Fine-tuning documentation can be found in 1157043247SMauro Carvalho Chehab * Documentation/admin-guide/sysctl/vm.rst. 121da177e4SLinus Torvalds * Started 18.12.91 131da177e4SLinus Torvalds * Swap aging added 23.2.95, Stephen Tweedie. 141da177e4SLinus Torvalds * Buffermem limits added 12.3.98, Rik van Riel. 151da177e4SLinus Torvalds */ 161da177e4SLinus Torvalds 171da177e4SLinus Torvalds #include <linux/mm.h> 181da177e4SLinus Torvalds #include <linux/sched.h> 191da177e4SLinus Torvalds #include <linux/kernel_stat.h> 201da177e4SLinus Torvalds #include <linux/swap.h> 211da177e4SLinus Torvalds #include <linux/mman.h> 221da177e4SLinus Torvalds #include <linux/pagemap.h> 231da177e4SLinus Torvalds #include <linux/pagevec.h> 241da177e4SLinus Torvalds #include <linux/init.h> 25b95f1b31SPaul Gortmaker #include <linux/export.h> 261da177e4SLinus Torvalds #include <linux/mm_inline.h> 271da177e4SLinus Torvalds #include <linux/percpu_counter.h> 283565fce3SDan Williams #include <linux/memremap.h> 291da177e4SLinus Torvalds #include <linux/percpu.h> 301da177e4SLinus Torvalds #include <linux/cpu.h> 311da177e4SLinus Torvalds #include <linux/notifier.h> 32e0bf68ddSPeter Zijlstra #include <linux/backing-dev.h> 3366e1707bSBalbir Singh #include <linux/memcontrol.h> 345a0e3ad6STejun Heo #include <linux/gfp.h> 35a27bb332SKent Overstreet #include <linux/uio.h> 36822fc613SNaoya Horiguchi #include <linux/hugetlb.h> 3733c3fc71SVladimir Davydov #include <linux/page_idle.h> 38b01b2141SIngo Molnar #include <linux/local_lock.h> 398cc621d2SMinchan Kim #include <linux/buffer_head.h> 401da177e4SLinus Torvalds 4164d6519dSLee Schermerhorn #include "internal.h" 4264d6519dSLee Schermerhorn 43c6286c98SMel Gorman #define CREATE_TRACE_POINTS 44c6286c98SMel Gorman #include <trace/events/pagemap.h> 45c6286c98SMel Gorman 46ea0ffd0cSKairui Song /* How many pages do we try to swap or page in/out together? As a power of 2 */ 471da177e4SLinus Torvalds int page_cluster; 48ea0ffd0cSKairui Song const int page_cluster_max = 31; 491da177e4SLinus Torvalds 50c2bc1681SMatthew Wilcox (Oracle) /* Protecting only lru_rotate.fbatch which requires disabling interrupts */ 51b01b2141SIngo Molnar struct lru_rotate { 52b01b2141SIngo Molnar local_lock_t lock; 53c2bc1681SMatthew Wilcox (Oracle) struct folio_batch fbatch; 54b01b2141SIngo Molnar }; 55b01b2141SIngo Molnar static DEFINE_PER_CPU(struct lru_rotate, lru_rotate) = { 56b01b2141SIngo Molnar .lock = INIT_LOCAL_LOCK(lock), 57b01b2141SIngo Molnar }; 58b01b2141SIngo Molnar 59b01b2141SIngo Molnar /* 6082ac64d8SMatthew Wilcox (Oracle) * The following folio batches are grouped together because they are protected 61b01b2141SIngo Molnar * by disabling preemption (and interrupts remain enabled). 62b01b2141SIngo Molnar */ 6382ac64d8SMatthew Wilcox (Oracle) struct cpu_fbatches { 64b01b2141SIngo Molnar local_lock_t lock; 6570dea534SMatthew Wilcox (Oracle) struct folio_batch lru_add; 667a3dbfe8SMatthew Wilcox (Oracle) struct folio_batch lru_deactivate_file; 6785cd7791SMatthew Wilcox (Oracle) struct folio_batch lru_deactivate; 68cec394baSMatthew Wilcox (Oracle) struct folio_batch lru_lazyfree; 69a4a921aaSMing Li #ifdef CONFIG_SMP 703a44610bSMatthew Wilcox (Oracle) struct folio_batch activate; 71a4a921aaSMing Li #endif 72b01b2141SIngo Molnar }; 7382ac64d8SMatthew Wilcox (Oracle) static DEFINE_PER_CPU(struct cpu_fbatches, cpu_fbatches) = { 74b01b2141SIngo Molnar .lock = INIT_LOCAL_LOCK(lock), 75b01b2141SIngo Molnar }; 76902aaed0SHisashi Hifumi 77f1ee018bSMatthew Wilcox (Oracle) static void __page_cache_release(struct folio *folio, struct lruvec **lruvecp, 78f1ee018bSMatthew Wilcox (Oracle) unsigned long *flagsp) 79b221385bSAdrian Bunk { 80188e8caeSMatthew Wilcox (Oracle) if (folio_test_lru(folio)) { 81f1ee018bSMatthew Wilcox (Oracle) folio_lruvec_relock_irqsave(folio, lruvecp, flagsp); 82f1ee018bSMatthew Wilcox (Oracle) lruvec_del_folio(*lruvecp, folio); 83188e8caeSMatthew Wilcox (Oracle) __folio_clear_lru_flags(folio); 84b221385bSAdrian Bunk } 85f1ee018bSMatthew Wilcox (Oracle) 86f1ee018bSMatthew Wilcox (Oracle) /* 87f1ee018bSMatthew Wilcox (Oracle) * In rare cases, when truncation or holepunching raced with 88f1ee018bSMatthew Wilcox (Oracle) * munlock after VM_LOCKED was cleared, Mlocked may still be 89f1ee018bSMatthew Wilcox (Oracle) * found set here. This does not indicate a problem, unless 90f1ee018bSMatthew Wilcox (Oracle) * "unevictable_pgs_cleared" appears worryingly large. 91f1ee018bSMatthew Wilcox (Oracle) */ 92188e8caeSMatthew Wilcox (Oracle) if (unlikely(folio_test_mlocked(folio))) { 93188e8caeSMatthew Wilcox (Oracle) long nr_pages = folio_nr_pages(folio); 94b109b870SHugh Dickins 95188e8caeSMatthew Wilcox (Oracle) __folio_clear_mlocked(folio); 96188e8caeSMatthew Wilcox (Oracle) zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages); 97b109b870SHugh Dickins count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages); 98b109b870SHugh Dickins } 9991807063SAndrea Arcangeli } 10091807063SAndrea Arcangeli 101f1ee018bSMatthew Wilcox (Oracle) /* 102f1ee018bSMatthew Wilcox (Oracle) * This path almost never happens for VM activity - pages are normally freed 103f1ee018bSMatthew Wilcox (Oracle) * in batches. But it gets used by networking - and for compound pages. 104f1ee018bSMatthew Wilcox (Oracle) */ 105f1ee018bSMatthew Wilcox (Oracle) static void page_cache_release(struct folio *folio) 106f1ee018bSMatthew Wilcox (Oracle) { 107f1ee018bSMatthew Wilcox (Oracle) struct lruvec *lruvec = NULL; 108f1ee018bSMatthew Wilcox (Oracle) unsigned long flags; 109f1ee018bSMatthew Wilcox (Oracle) 110f1ee018bSMatthew Wilcox (Oracle) __page_cache_release(folio, &lruvec, &flags); 111f1ee018bSMatthew Wilcox (Oracle) if (lruvec) 112f1ee018bSMatthew Wilcox (Oracle) unlock_page_lruvec_irqrestore(lruvec, flags); 113f1ee018bSMatthew Wilcox (Oracle) } 114f1ee018bSMatthew Wilcox (Oracle) 11583d99659SMatthew Wilcox (Oracle) static void __folio_put_small(struct folio *folio) 11691807063SAndrea Arcangeli { 117f1ee018bSMatthew Wilcox (Oracle) page_cache_release(folio); 11883d99659SMatthew Wilcox (Oracle) mem_cgroup_uncharge(folio); 11983d99659SMatthew Wilcox (Oracle) free_unref_page(&folio->page, 0); 120b221385bSAdrian Bunk } 121b221385bSAdrian Bunk 1225ef82fe7SMatthew Wilcox (Oracle) static void __folio_put_large(struct folio *folio) 12391807063SAndrea Arcangeli { 124822fc613SNaoya Horiguchi /* 125822fc613SNaoya Horiguchi * __page_cache_release() is supposed to be called for thp, not for 126822fc613SNaoya Horiguchi * hugetlb. This is because hugetlb page does never have PageLRU set 127822fc613SNaoya Horiguchi * (it's never listed to any LRU lists) and no memcg routines should 128822fc613SNaoya Horiguchi * be called for hugetlb (it has a separate hugetlb_cgroup.) 129822fc613SNaoya Horiguchi */ 1305ef82fe7SMatthew Wilcox (Oracle) if (!folio_test_hugetlb(folio)) 131f1ee018bSMatthew Wilcox (Oracle) page_cache_release(folio); 1325375336cSMatthew Wilcox (Oracle) destroy_large_folio(folio); 13391807063SAndrea Arcangeli } 13491807063SAndrea Arcangeli 1358d29c703SMatthew Wilcox (Oracle) void __folio_put(struct folio *folio) 136c747ce79SJianyu Zhan { 1378d29c703SMatthew Wilcox (Oracle) if (unlikely(folio_is_zone_device(folio))) 1388d29c703SMatthew Wilcox (Oracle) free_zone_device_page(&folio->page); 1398d29c703SMatthew Wilcox (Oracle) else if (unlikely(folio_test_large(folio))) 1405ef82fe7SMatthew Wilcox (Oracle) __folio_put_large(folio); 14126296ad2SAndrew Morton else 14283d99659SMatthew Wilcox (Oracle) __folio_put_small(folio); 14326296ad2SAndrew Morton } 1448d29c703SMatthew Wilcox (Oracle) EXPORT_SYMBOL(__folio_put); 14570b50f94SAndrea Arcangeli 1461d7ea732SAlexander Zarochentsev /** 1477682486bSRandy Dunlap * put_pages_list() - release a list of pages 1487682486bSRandy Dunlap * @pages: list of pages threaded on page->lru 1491d7ea732SAlexander Zarochentsev * 150988c69f1SMatthew Wilcox (Oracle) * Release a list of pages which are strung together on page.lru. 1511d7ea732SAlexander Zarochentsev */ 1521d7ea732SAlexander Zarochentsev void put_pages_list(struct list_head *pages) 1531d7ea732SAlexander Zarochentsev { 15424835f89SMatthew Wilcox (Oracle) struct folio_batch fbatch; 155*b555895cSMatthew Wilcox (Oracle) struct folio *folio, *next; 1561d7ea732SAlexander Zarochentsev 15724835f89SMatthew Wilcox (Oracle) folio_batch_init(&fbatch); 158*b555895cSMatthew Wilcox (Oracle) list_for_each_entry_safe(folio, next, pages, lru) { 15924835f89SMatthew Wilcox (Oracle) if (!folio_put_testzero(folio)) 160988c69f1SMatthew Wilcox (Oracle) continue; 1612f58e5deSMatthew Wilcox (Oracle) if (folio_test_large(folio)) { 1625ef82fe7SMatthew Wilcox (Oracle) __folio_put_large(folio); 163988c69f1SMatthew Wilcox (Oracle) continue; 164988c69f1SMatthew Wilcox (Oracle) } 1652f58e5deSMatthew Wilcox (Oracle) /* LRU flag must be clear because it's passed using the lru */ 16624835f89SMatthew Wilcox (Oracle) if (folio_batch_add(&fbatch, folio) > 0) 16724835f89SMatthew Wilcox (Oracle) continue; 16824835f89SMatthew Wilcox (Oracle) free_unref_folios(&fbatch); 169988c69f1SMatthew Wilcox (Oracle) } 170988c69f1SMatthew Wilcox (Oracle) 17124835f89SMatthew Wilcox (Oracle) if (fbatch.nr) 17224835f89SMatthew Wilcox (Oracle) free_unref_folios(&fbatch); 1733cd018b4SMatthew Wilcox INIT_LIST_HEAD(pages); 1741d7ea732SAlexander Zarochentsev } 1751d7ea732SAlexander Zarochentsev EXPORT_SYMBOL(put_pages_list); 1761d7ea732SAlexander Zarochentsev 177c2bc1681SMatthew Wilcox (Oracle) typedef void (*move_fn_t)(struct lruvec *lruvec, struct folio *folio); 178c2bc1681SMatthew Wilcox (Oracle) 17970dea534SMatthew Wilcox (Oracle) static void lru_add_fn(struct lruvec *lruvec, struct folio *folio) 1807d80dd09SMatthew Wilcox (Oracle) { 1817d80dd09SMatthew Wilcox (Oracle) int was_unevictable = folio_test_clear_unevictable(folio); 1827d80dd09SMatthew Wilcox (Oracle) long nr_pages = folio_nr_pages(folio); 1837d80dd09SMatthew Wilcox (Oracle) 1847d80dd09SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); 1857d80dd09SMatthew Wilcox (Oracle) 1867d80dd09SMatthew Wilcox (Oracle) /* 1877d80dd09SMatthew Wilcox (Oracle) * Is an smp_mb__after_atomic() still required here, before 188188e8caeSMatthew Wilcox (Oracle) * folio_evictable() tests the mlocked flag, to rule out the possibility 1897d80dd09SMatthew Wilcox (Oracle) * of stranding an evictable folio on an unevictable LRU? I think 190e0650a41SMatthew Wilcox (Oracle) * not, because __munlock_folio() only clears the mlocked flag 191188e8caeSMatthew Wilcox (Oracle) * while the LRU lock is held. 1927d80dd09SMatthew Wilcox (Oracle) * 1937d80dd09SMatthew Wilcox (Oracle) * (That is not true of __page_cache_release(), and not necessarily 19499fbb6bfSMatthew Wilcox (Oracle) * true of folios_put(): but those only clear the mlocked flag after 195188e8caeSMatthew Wilcox (Oracle) * folio_put_testzero() has excluded any other users of the folio.) 1967d80dd09SMatthew Wilcox (Oracle) */ 1977d80dd09SMatthew Wilcox (Oracle) if (folio_evictable(folio)) { 1987d80dd09SMatthew Wilcox (Oracle) if (was_unevictable) 1997d80dd09SMatthew Wilcox (Oracle) __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages); 2007d80dd09SMatthew Wilcox (Oracle) } else { 2017d80dd09SMatthew Wilcox (Oracle) folio_clear_active(folio); 2027d80dd09SMatthew Wilcox (Oracle) folio_set_unevictable(folio); 2037d80dd09SMatthew Wilcox (Oracle) /* 2047d80dd09SMatthew Wilcox (Oracle) * folio->mlock_count = !!folio_test_mlocked(folio)? 205e0650a41SMatthew Wilcox (Oracle) * But that leaves __mlock_folio() in doubt whether another 2067d80dd09SMatthew Wilcox (Oracle) * actor has already counted the mlock or not. Err on the 2077d80dd09SMatthew Wilcox (Oracle) * safe side, underestimate, let page reclaim fix it, rather 2087d80dd09SMatthew Wilcox (Oracle) * than leaving a page on the unevictable LRU indefinitely. 2097d80dd09SMatthew Wilcox (Oracle) */ 2107d80dd09SMatthew Wilcox (Oracle) folio->mlock_count = 0; 2117d80dd09SMatthew Wilcox (Oracle) if (!was_unevictable) 2127d80dd09SMatthew Wilcox (Oracle) __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages); 2137d80dd09SMatthew Wilcox (Oracle) } 2147d80dd09SMatthew Wilcox (Oracle) 2157d80dd09SMatthew Wilcox (Oracle) lruvec_add_folio(lruvec, folio); 2167d80dd09SMatthew Wilcox (Oracle) trace_mm_lru_insertion(folio); 2177d80dd09SMatthew Wilcox (Oracle) } 2187d80dd09SMatthew Wilcox (Oracle) 219c2bc1681SMatthew Wilcox (Oracle) static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn) 220902aaed0SHisashi Hifumi { 221902aaed0SHisashi Hifumi int i; 2226168d0daSAlex Shi struct lruvec *lruvec = NULL; 2233dd7ae8eSShaohua Li unsigned long flags = 0; 224902aaed0SHisashi Hifumi 225c2bc1681SMatthew Wilcox (Oracle) for (i = 0; i < folio_batch_count(fbatch); i++) { 226c2bc1681SMatthew Wilcox (Oracle) struct folio *folio = fbatch->folios[i]; 2273dd7ae8eSShaohua Li 228c2bc1681SMatthew Wilcox (Oracle) /* block memcg migration while the folio moves between lru */ 22970dea534SMatthew Wilcox (Oracle) if (move_fn != lru_add_fn && !folio_test_clear_lru(folio)) 230fc574c23SAlex Shi continue; 231fc574c23SAlex Shi 232f1ee018bSMatthew Wilcox (Oracle) folio_lruvec_relock_irqsave(folio, &lruvec, &flags); 233c2bc1681SMatthew Wilcox (Oracle) move_fn(lruvec, folio); 234fc574c23SAlex Shi 235c2bc1681SMatthew Wilcox (Oracle) folio_set_lru(folio); 2363dd7ae8eSShaohua Li } 237c2bc1681SMatthew Wilcox (Oracle) 2386168d0daSAlex Shi if (lruvec) 2396168d0daSAlex Shi unlock_page_lruvec_irqrestore(lruvec, flags); 24099fbb6bfSMatthew Wilcox (Oracle) folios_put(fbatch); 2413dd7ae8eSShaohua Li } 2423dd7ae8eSShaohua Li 243c2bc1681SMatthew Wilcox (Oracle) static void folio_batch_add_and_move(struct folio_batch *fbatch, 244c2bc1681SMatthew Wilcox (Oracle) struct folio *folio, move_fn_t move_fn) 2453dd7ae8eSShaohua Li { 246c2bc1681SMatthew Wilcox (Oracle) if (folio_batch_add(fbatch, folio) && !folio_test_large(folio) && 247c2bc1681SMatthew Wilcox (Oracle) !lru_cache_disabled()) 248c2bc1681SMatthew Wilcox (Oracle) return; 249c2bc1681SMatthew Wilcox (Oracle) folio_batch_move_lru(fbatch, move_fn); 250c2bc1681SMatthew Wilcox (Oracle) } 251575ced1cSMatthew Wilcox (Oracle) 252c2bc1681SMatthew Wilcox (Oracle) static void lru_move_tail_fn(struct lruvec *lruvec, struct folio *folio) 253c2bc1681SMatthew Wilcox (Oracle) { 254575ced1cSMatthew Wilcox (Oracle) if (!folio_test_unevictable(folio)) { 255575ced1cSMatthew Wilcox (Oracle) lruvec_del_folio(lruvec, folio); 256575ced1cSMatthew Wilcox (Oracle) folio_clear_active(folio); 257575ced1cSMatthew Wilcox (Oracle) lruvec_add_folio_tail(lruvec, folio); 258575ced1cSMatthew Wilcox (Oracle) __count_vm_events(PGROTATED, folio_nr_pages(folio)); 259902aaed0SHisashi Hifumi } 260902aaed0SHisashi Hifumi } 2613dd7ae8eSShaohua Li 2623dd7ae8eSShaohua Li /* 263575ced1cSMatthew Wilcox (Oracle) * Writeback is about to end against a folio which has been marked for 264575ced1cSMatthew Wilcox (Oracle) * immediate reclaim. If it still appears to be reclaimable, move it 265575ced1cSMatthew Wilcox (Oracle) * to the tail of the inactive list. 266c7c7b80cSAlex Shi * 267575ced1cSMatthew Wilcox (Oracle) * folio_rotate_reclaimable() must disable IRQs, to prevent nasty races. 2681da177e4SLinus Torvalds */ 269575ced1cSMatthew Wilcox (Oracle) void folio_rotate_reclaimable(struct folio *folio) 2701da177e4SLinus Torvalds { 271575ced1cSMatthew Wilcox (Oracle) if (!folio_test_locked(folio) && !folio_test_dirty(folio) && 272575ced1cSMatthew Wilcox (Oracle) !folio_test_unevictable(folio) && folio_test_lru(folio)) { 273c2bc1681SMatthew Wilcox (Oracle) struct folio_batch *fbatch; 2741da177e4SLinus Torvalds unsigned long flags; 2751da177e4SLinus Torvalds 276575ced1cSMatthew Wilcox (Oracle) folio_get(folio); 277b01b2141SIngo Molnar local_lock_irqsave(&lru_rotate.lock, flags); 278c2bc1681SMatthew Wilcox (Oracle) fbatch = this_cpu_ptr(&lru_rotate.fbatch); 279c2bc1681SMatthew Wilcox (Oracle) folio_batch_add_and_move(fbatch, folio, lru_move_tail_fn); 280b01b2141SIngo Molnar local_unlock_irqrestore(&lru_rotate.lock, flags); 281ac6aadb2SMiklos Szeredi } 2821da177e4SLinus Torvalds } 2831da177e4SLinus Torvalds 2840538a82cSJohannes Weiner void lru_note_cost(struct lruvec *lruvec, bool file, 2850538a82cSJohannes Weiner unsigned int nr_io, unsigned int nr_rotated) 2863e2f41f1SKOSAKI Motohiro { 2870538a82cSJohannes Weiner unsigned long cost; 2880538a82cSJohannes Weiner 2890538a82cSJohannes Weiner /* 2900538a82cSJohannes Weiner * Reflect the relative cost of incurring IO and spending CPU 2910538a82cSJohannes Weiner * time on rotations. This doesn't attempt to make a precise 2920538a82cSJohannes Weiner * comparison, it just says: if reloads are about comparable 2930538a82cSJohannes Weiner * between the LRU lists, or rotations are overwhelmingly 2940538a82cSJohannes Weiner * different between them, adjust scan balance for CPU work. 2950538a82cSJohannes Weiner */ 2960538a82cSJohannes Weiner cost = nr_io * SWAP_CLUSTER_MAX + nr_rotated; 2970538a82cSJohannes Weiner 2987cf111bcSJohannes Weiner do { 2997cf111bcSJohannes Weiner unsigned long lrusize; 3007cf111bcSJohannes Weiner 3016168d0daSAlex Shi /* 3026168d0daSAlex Shi * Hold lruvec->lru_lock is safe here, since 3036168d0daSAlex Shi * 1) The pinned lruvec in reclaim, or 3046168d0daSAlex Shi * 2) From a pre-LRU page during refault (which also holds the 3056168d0daSAlex Shi * rcu lock, so would be safe even if the page was on the LRU 3066168d0daSAlex Shi * and could move simultaneously to a new lruvec). 3076168d0daSAlex Shi */ 3086168d0daSAlex Shi spin_lock_irq(&lruvec->lru_lock); 3097cf111bcSJohannes Weiner /* Record cost event */ 31096f8bf4fSJohannes Weiner if (file) 3110538a82cSJohannes Weiner lruvec->file_cost += cost; 3121431d4d1SJohannes Weiner else 3130538a82cSJohannes Weiner lruvec->anon_cost += cost; 3147cf111bcSJohannes Weiner 3157cf111bcSJohannes Weiner /* 3167cf111bcSJohannes Weiner * Decay previous events 3177cf111bcSJohannes Weiner * 3187cf111bcSJohannes Weiner * Because workloads change over time (and to avoid 3197cf111bcSJohannes Weiner * overflow) we keep these statistics as a floating 3207cf111bcSJohannes Weiner * average, which ends up weighing recent refaults 3217cf111bcSJohannes Weiner * more than old ones. 3227cf111bcSJohannes Weiner */ 3237cf111bcSJohannes Weiner lrusize = lruvec_page_state(lruvec, NR_INACTIVE_ANON) + 3247cf111bcSJohannes Weiner lruvec_page_state(lruvec, NR_ACTIVE_ANON) + 3257cf111bcSJohannes Weiner lruvec_page_state(lruvec, NR_INACTIVE_FILE) + 3267cf111bcSJohannes Weiner lruvec_page_state(lruvec, NR_ACTIVE_FILE); 3277cf111bcSJohannes Weiner 3287cf111bcSJohannes Weiner if (lruvec->file_cost + lruvec->anon_cost > lrusize / 4) { 3297cf111bcSJohannes Weiner lruvec->file_cost /= 2; 3307cf111bcSJohannes Weiner lruvec->anon_cost /= 2; 3317cf111bcSJohannes Weiner } 3326168d0daSAlex Shi spin_unlock_irq(&lruvec->lru_lock); 3337cf111bcSJohannes Weiner } while ((lruvec = parent_lruvec(lruvec))); 3343e2f41f1SKOSAKI Motohiro } 3353e2f41f1SKOSAKI Motohiro 3360538a82cSJohannes Weiner void lru_note_cost_refault(struct folio *folio) 33796f8bf4fSJohannes Weiner { 3380995d7e5SMatthew Wilcox (Oracle) lru_note_cost(folio_lruvec(folio), folio_is_file_lru(folio), 3390538a82cSJohannes Weiner folio_nr_pages(folio), 0); 34096f8bf4fSJohannes Weiner } 34196f8bf4fSJohannes Weiner 3423a44610bSMatthew Wilcox (Oracle) static void folio_activate_fn(struct lruvec *lruvec, struct folio *folio) 343744ed144SShaohua Li { 344f2d27392SMatthew Wilcox (Oracle) if (!folio_test_active(folio) && !folio_test_unevictable(folio)) { 345f2d27392SMatthew Wilcox (Oracle) long nr_pages = folio_nr_pages(folio); 346744ed144SShaohua Li 347f2d27392SMatthew Wilcox (Oracle) lruvec_del_folio(lruvec, folio); 348f2d27392SMatthew Wilcox (Oracle) folio_set_active(folio); 349f2d27392SMatthew Wilcox (Oracle) lruvec_add_folio(lruvec, folio); 350f2d27392SMatthew Wilcox (Oracle) trace_mm_lru_activate(folio); 3517a608572SLinus Torvalds 35221e330fcSShakeel Butt __count_vm_events(PGACTIVATE, nr_pages); 35321e330fcSShakeel Butt __count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE, 35421e330fcSShakeel Butt nr_pages); 355744ed144SShaohua Li } 356eb709b0dSShaohua Li } 357eb709b0dSShaohua Li 358eb709b0dSShaohua Li #ifdef CONFIG_SMP 3593a44610bSMatthew Wilcox (Oracle) static void folio_activate_drain(int cpu) 360f2d27392SMatthew Wilcox (Oracle) { 36182ac64d8SMatthew Wilcox (Oracle) struct folio_batch *fbatch = &per_cpu(cpu_fbatches.activate, cpu); 362f2d27392SMatthew Wilcox (Oracle) 3633a44610bSMatthew Wilcox (Oracle) if (folio_batch_count(fbatch)) 3643a44610bSMatthew Wilcox (Oracle) folio_batch_move_lru(fbatch, folio_activate_fn); 3655fbc4616SChris Metcalf } 3665fbc4616SChris Metcalf 367018ee47fSYu Zhao void folio_activate(struct folio *folio) 368eb709b0dSShaohua Li { 369f2d27392SMatthew Wilcox (Oracle) if (folio_test_lru(folio) && !folio_test_active(folio) && 370f2d27392SMatthew Wilcox (Oracle) !folio_test_unevictable(folio)) { 3713a44610bSMatthew Wilcox (Oracle) struct folio_batch *fbatch; 372eb709b0dSShaohua Li 373f2d27392SMatthew Wilcox (Oracle) folio_get(folio); 37482ac64d8SMatthew Wilcox (Oracle) local_lock(&cpu_fbatches.lock); 37582ac64d8SMatthew Wilcox (Oracle) fbatch = this_cpu_ptr(&cpu_fbatches.activate); 3763a44610bSMatthew Wilcox (Oracle) folio_batch_add_and_move(fbatch, folio, folio_activate_fn); 37782ac64d8SMatthew Wilcox (Oracle) local_unlock(&cpu_fbatches.lock); 378eb709b0dSShaohua Li } 379eb709b0dSShaohua Li } 380eb709b0dSShaohua Li 381eb709b0dSShaohua Li #else 3823a44610bSMatthew Wilcox (Oracle) static inline void folio_activate_drain(int cpu) 383eb709b0dSShaohua Li { 384eb709b0dSShaohua Li } 385eb709b0dSShaohua Li 386018ee47fSYu Zhao void folio_activate(struct folio *folio) 387eb709b0dSShaohua Li { 3886168d0daSAlex Shi struct lruvec *lruvec; 389eb709b0dSShaohua Li 390f2d27392SMatthew Wilcox (Oracle) if (folio_test_clear_lru(folio)) { 391e809c3feSMatthew Wilcox (Oracle) lruvec = folio_lruvec_lock_irq(folio); 3923a44610bSMatthew Wilcox (Oracle) folio_activate_fn(lruvec, folio); 3936168d0daSAlex Shi unlock_page_lruvec_irq(lruvec); 394f2d27392SMatthew Wilcox (Oracle) folio_set_lru(folio); 3956168d0daSAlex Shi } 3961da177e4SLinus Torvalds } 397eb709b0dSShaohua Li #endif 3981da177e4SLinus Torvalds 39976580b65SMatthew Wilcox (Oracle) static void __lru_cache_activate_folio(struct folio *folio) 400059285a2SMel Gorman { 40170dea534SMatthew Wilcox (Oracle) struct folio_batch *fbatch; 402059285a2SMel Gorman int i; 403059285a2SMel Gorman 40482ac64d8SMatthew Wilcox (Oracle) local_lock(&cpu_fbatches.lock); 40582ac64d8SMatthew Wilcox (Oracle) fbatch = this_cpu_ptr(&cpu_fbatches.lru_add); 406b01b2141SIngo Molnar 407059285a2SMel Gorman /* 40870dea534SMatthew Wilcox (Oracle) * Search backwards on the optimistic assumption that the folio being 40970dea534SMatthew Wilcox (Oracle) * activated has just been added to this batch. Note that only 41070dea534SMatthew Wilcox (Oracle) * the local batch is examined as a !LRU folio could be in the 411059285a2SMel Gorman * process of being released, reclaimed, migrated or on a remote 41270dea534SMatthew Wilcox (Oracle) * batch that is currently being drained. Furthermore, marking 41370dea534SMatthew Wilcox (Oracle) * a remote batch's folio active potentially hits a race where 41470dea534SMatthew Wilcox (Oracle) * a folio is marked active just after it is added to the inactive 415059285a2SMel Gorman * list causing accounting errors and BUG_ON checks to trigger. 416059285a2SMel Gorman */ 41770dea534SMatthew Wilcox (Oracle) for (i = folio_batch_count(fbatch) - 1; i >= 0; i--) { 41870dea534SMatthew Wilcox (Oracle) struct folio *batch_folio = fbatch->folios[i]; 419059285a2SMel Gorman 42070dea534SMatthew Wilcox (Oracle) if (batch_folio == folio) { 42176580b65SMatthew Wilcox (Oracle) folio_set_active(folio); 422059285a2SMel Gorman break; 423059285a2SMel Gorman } 424059285a2SMel Gorman } 425059285a2SMel Gorman 42682ac64d8SMatthew Wilcox (Oracle) local_unlock(&cpu_fbatches.lock); 427059285a2SMel Gorman } 428059285a2SMel Gorman 429ac35a490SYu Zhao #ifdef CONFIG_LRU_GEN 430ac35a490SYu Zhao static void folio_inc_refs(struct folio *folio) 431ac35a490SYu Zhao { 432ac35a490SYu Zhao unsigned long new_flags, old_flags = READ_ONCE(folio->flags); 433ac35a490SYu Zhao 434ac35a490SYu Zhao if (folio_test_unevictable(folio)) 435ac35a490SYu Zhao return; 436ac35a490SYu Zhao 437ac35a490SYu Zhao if (!folio_test_referenced(folio)) { 438ac35a490SYu Zhao folio_set_referenced(folio); 439ac35a490SYu Zhao return; 440ac35a490SYu Zhao } 441ac35a490SYu Zhao 442ac35a490SYu Zhao if (!folio_test_workingset(folio)) { 443ac35a490SYu Zhao folio_set_workingset(folio); 444ac35a490SYu Zhao return; 445ac35a490SYu Zhao } 446ac35a490SYu Zhao 447ac35a490SYu Zhao /* see the comment on MAX_NR_TIERS */ 448ac35a490SYu Zhao do { 449ac35a490SYu Zhao new_flags = old_flags & LRU_REFS_MASK; 450ac35a490SYu Zhao if (new_flags == LRU_REFS_MASK) 451ac35a490SYu Zhao break; 452ac35a490SYu Zhao 453ac35a490SYu Zhao new_flags += BIT(LRU_REFS_PGOFF); 454ac35a490SYu Zhao new_flags |= old_flags & ~LRU_REFS_MASK; 455ac35a490SYu Zhao } while (!try_cmpxchg(&folio->flags, &old_flags, new_flags)); 456ac35a490SYu Zhao } 457ac35a490SYu Zhao #else 458ac35a490SYu Zhao static void folio_inc_refs(struct folio *folio) 459ac35a490SYu Zhao { 460ac35a490SYu Zhao } 461ac35a490SYu Zhao #endif /* CONFIG_LRU_GEN */ 462ac35a490SYu Zhao 4631da177e4SLinus Torvalds /* 4641da177e4SLinus Torvalds * Mark a page as having seen activity. 4651da177e4SLinus Torvalds * 4661da177e4SLinus Torvalds * inactive,unreferenced -> inactive,referenced 4671da177e4SLinus Torvalds * inactive,referenced -> active,unreferenced 4681da177e4SLinus Torvalds * active,unreferenced -> active,referenced 469eb39d618SHugh Dickins * 470eb39d618SHugh Dickins * When a newly allocated page is not yet visible, so safe for non-atomic ops, 471eb39d618SHugh Dickins * __SetPageReferenced(page) may be substituted for mark_page_accessed(page). 4721da177e4SLinus Torvalds */ 47376580b65SMatthew Wilcox (Oracle) void folio_mark_accessed(struct folio *folio) 4741da177e4SLinus Torvalds { 475ac35a490SYu Zhao if (lru_gen_enabled()) { 476ac35a490SYu Zhao folio_inc_refs(folio); 477ac35a490SYu Zhao return; 478ac35a490SYu Zhao } 479ac35a490SYu Zhao 48076580b65SMatthew Wilcox (Oracle) if (!folio_test_referenced(folio)) { 48176580b65SMatthew Wilcox (Oracle) folio_set_referenced(folio); 48276580b65SMatthew Wilcox (Oracle) } else if (folio_test_unevictable(folio)) { 483a1100a74SFengguang Wu /* 484a1100a74SFengguang Wu * Unevictable pages are on the "LRU_UNEVICTABLE" list. But, 485a1100a74SFengguang Wu * this list is never rotated or maintained, so marking an 486914c32e4SBang Li * unevictable page accessed has no effect. 487a1100a74SFengguang Wu */ 48876580b65SMatthew Wilcox (Oracle) } else if (!folio_test_active(folio)) { 489059285a2SMel Gorman /* 4903a44610bSMatthew Wilcox (Oracle) * If the folio is on the LRU, queue it for activation via 49182ac64d8SMatthew Wilcox (Oracle) * cpu_fbatches.activate. Otherwise, assume the folio is in a 4923a44610bSMatthew Wilcox (Oracle) * folio_batch, mark it active and it'll be moved to the active 493059285a2SMel Gorman * LRU on the next drain. 494059285a2SMel Gorman */ 49576580b65SMatthew Wilcox (Oracle) if (folio_test_lru(folio)) 49676580b65SMatthew Wilcox (Oracle) folio_activate(folio); 497059285a2SMel Gorman else 49876580b65SMatthew Wilcox (Oracle) __lru_cache_activate_folio(folio); 49976580b65SMatthew Wilcox (Oracle) folio_clear_referenced(folio); 50076580b65SMatthew Wilcox (Oracle) workingset_activation(folio); 5011da177e4SLinus Torvalds } 50276580b65SMatthew Wilcox (Oracle) if (folio_test_idle(folio)) 50376580b65SMatthew Wilcox (Oracle) folio_clear_idle(folio); 5041da177e4SLinus Torvalds } 50576580b65SMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_mark_accessed); 5061da177e4SLinus Torvalds 507f04e9ebbSKOSAKI Motohiro /** 5080d31125dSMatthew Wilcox (Oracle) * folio_add_lru - Add a folio to an LRU list. 5090d31125dSMatthew Wilcox (Oracle) * @folio: The folio to be added to the LRU. 5102329d375SJianyu Zhan * 5110d31125dSMatthew Wilcox (Oracle) * Queue the folio for addition to the LRU. The decision on whether 5122329d375SJianyu Zhan * to add the page to the [in]active [file|anon] list is deferred until the 51382ac64d8SMatthew Wilcox (Oracle) * folio_batch is drained. This gives a chance for the caller of folio_add_lru() 5140d31125dSMatthew Wilcox (Oracle) * have the folio added to the active list using folio_mark_accessed(). 515f04e9ebbSKOSAKI Motohiro */ 5160d31125dSMatthew Wilcox (Oracle) void folio_add_lru(struct folio *folio) 5171da177e4SLinus Torvalds { 51870dea534SMatthew Wilcox (Oracle) struct folio_batch *fbatch; 5196058eaecSJohannes Weiner 52070dea534SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_active(folio) && 52170dea534SMatthew Wilcox (Oracle) folio_test_unevictable(folio), folio); 5220d31125dSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); 5236058eaecSJohannes Weiner 524ec1c86b2SYu Zhao /* see the comment in lru_gen_add_folio() */ 525ec1c86b2SYu Zhao if (lru_gen_enabled() && !folio_test_unevictable(folio) && 526ec1c86b2SYu Zhao lru_gen_in_fault() && !(current->flags & PF_MEMALLOC)) 527ec1c86b2SYu Zhao folio_set_active(folio); 528ec1c86b2SYu Zhao 5290d31125dSMatthew Wilcox (Oracle) folio_get(folio); 53082ac64d8SMatthew Wilcox (Oracle) local_lock(&cpu_fbatches.lock); 53182ac64d8SMatthew Wilcox (Oracle) fbatch = this_cpu_ptr(&cpu_fbatches.lru_add); 53270dea534SMatthew Wilcox (Oracle) folio_batch_add_and_move(fbatch, folio, lru_add_fn); 53382ac64d8SMatthew Wilcox (Oracle) local_unlock(&cpu_fbatches.lock); 5341da177e4SLinus Torvalds } 5350d31125dSMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_add_lru); 5361da177e4SLinus Torvalds 537894bc310SLee Schermerhorn /** 538681ecf63SMatthew Wilcox (Oracle) * folio_add_lru_vma() - Add a folio to the appropate LRU list for this VMA. 539681ecf63SMatthew Wilcox (Oracle) * @folio: The folio to be added to the LRU. 540681ecf63SMatthew Wilcox (Oracle) * @vma: VMA in which the folio is mapped. 54100501b53SJohannes Weiner * 542681ecf63SMatthew Wilcox (Oracle) * If the VMA is mlocked, @folio is added to the unevictable list. 543681ecf63SMatthew Wilcox (Oracle) * Otherwise, it is treated the same way as folio_add_lru(). 54400501b53SJohannes Weiner */ 545681ecf63SMatthew Wilcox (Oracle) void folio_add_lru_vma(struct folio *folio, struct vm_area_struct *vma) 54600501b53SJohannes Weiner { 547681ecf63SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); 54800501b53SJohannes Weiner 5492fbb0c10SHugh Dickins if (unlikely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED)) 55096f97c43SLorenzo Stoakes mlock_new_folio(folio); 5512fbb0c10SHugh Dickins else 552681ecf63SMatthew Wilcox (Oracle) folio_add_lru(folio); 55300501b53SJohannes Weiner } 55400501b53SJohannes Weiner 555902aaed0SHisashi Hifumi /* 5567a3dbfe8SMatthew Wilcox (Oracle) * If the folio cannot be invalidated, it is moved to the 55731560180SMinchan Kim * inactive list to speed up its reclaim. It is moved to the 55831560180SMinchan Kim * head of the list, rather than the tail, to give the flusher 55931560180SMinchan Kim * threads some time to write it out, as this is much more 56031560180SMinchan Kim * effective than the single-page writeout from reclaim. 561278df9f4SMinchan Kim * 5627a3dbfe8SMatthew Wilcox (Oracle) * If the folio isn't mapped and dirty/writeback, the folio 5637a3dbfe8SMatthew Wilcox (Oracle) * could be reclaimed asap using the reclaim flag. 564278df9f4SMinchan Kim * 5657a3dbfe8SMatthew Wilcox (Oracle) * 1. active, mapped folio -> none 5667a3dbfe8SMatthew Wilcox (Oracle) * 2. active, dirty/writeback folio -> inactive, head, reclaim 5677a3dbfe8SMatthew Wilcox (Oracle) * 3. inactive, mapped folio -> none 5687a3dbfe8SMatthew Wilcox (Oracle) * 4. inactive, dirty/writeback folio -> inactive, head, reclaim 569278df9f4SMinchan Kim * 5. inactive, clean -> inactive, tail 570278df9f4SMinchan Kim * 6. Others -> none 571278df9f4SMinchan Kim * 5727a3dbfe8SMatthew Wilcox (Oracle) * In 4, it moves to the head of the inactive list so the folio is 5737a3dbfe8SMatthew Wilcox (Oracle) * written out by flusher threads as this is much more efficient 574278df9f4SMinchan Kim * than the single-page writeout from reclaim. 57531560180SMinchan Kim */ 5767a3dbfe8SMatthew Wilcox (Oracle) static void lru_deactivate_file_fn(struct lruvec *lruvec, struct folio *folio) 57731560180SMinchan Kim { 5787a3dbfe8SMatthew Wilcox (Oracle) bool active = folio_test_active(folio); 5797a3dbfe8SMatthew Wilcox (Oracle) long nr_pages = folio_nr_pages(folio); 58031560180SMinchan Kim 5817a3dbfe8SMatthew Wilcox (Oracle) if (folio_test_unevictable(folio)) 582bad49d9cSMinchan Kim return; 583bad49d9cSMinchan Kim 5847a3dbfe8SMatthew Wilcox (Oracle) /* Some processes are using the folio */ 5857a3dbfe8SMatthew Wilcox (Oracle) if (folio_mapped(folio)) 58631560180SMinchan Kim return; 58731560180SMinchan Kim 5887a3dbfe8SMatthew Wilcox (Oracle) lruvec_del_folio(lruvec, folio); 5897a3dbfe8SMatthew Wilcox (Oracle) folio_clear_active(folio); 5907a3dbfe8SMatthew Wilcox (Oracle) folio_clear_referenced(folio); 59131560180SMinchan Kim 5927a3dbfe8SMatthew Wilcox (Oracle) if (folio_test_writeback(folio) || folio_test_dirty(folio)) { 593278df9f4SMinchan Kim /* 5947a3dbfe8SMatthew Wilcox (Oracle) * Setting the reclaim flag could race with 5957a3dbfe8SMatthew Wilcox (Oracle) * folio_end_writeback() and confuse readahead. But the 5967a3dbfe8SMatthew Wilcox (Oracle) * race window is _really_ small and it's not a critical 5977a3dbfe8SMatthew Wilcox (Oracle) * problem. 598278df9f4SMinchan Kim */ 5997a3dbfe8SMatthew Wilcox (Oracle) lruvec_add_folio(lruvec, folio); 6007a3dbfe8SMatthew Wilcox (Oracle) folio_set_reclaim(folio); 601278df9f4SMinchan Kim } else { 602278df9f4SMinchan Kim /* 6037a3dbfe8SMatthew Wilcox (Oracle) * The folio's writeback ended while it was in the batch. 6047a3dbfe8SMatthew Wilcox (Oracle) * We move that folio to the tail of the inactive list. 605278df9f4SMinchan Kim */ 6067a3dbfe8SMatthew Wilcox (Oracle) lruvec_add_folio_tail(lruvec, folio); 6075d91f31fSShakeel Butt __count_vm_events(PGROTATED, nr_pages); 608278df9f4SMinchan Kim } 609278df9f4SMinchan Kim 61021e330fcSShakeel Butt if (active) { 6115d91f31fSShakeel Butt __count_vm_events(PGDEACTIVATE, nr_pages); 61221e330fcSShakeel Butt __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, 61321e330fcSShakeel Butt nr_pages); 61421e330fcSShakeel Butt } 61531560180SMinchan Kim } 61631560180SMinchan Kim 61785cd7791SMatthew Wilcox (Oracle) static void lru_deactivate_fn(struct lruvec *lruvec, struct folio *folio) 6189c276cc6SMinchan Kim { 619ec1c86b2SYu Zhao if (!folio_test_unevictable(folio) && (folio_test_active(folio) || lru_gen_enabled())) { 62085cd7791SMatthew Wilcox (Oracle) long nr_pages = folio_nr_pages(folio); 6219c276cc6SMinchan Kim 62285cd7791SMatthew Wilcox (Oracle) lruvec_del_folio(lruvec, folio); 62385cd7791SMatthew Wilcox (Oracle) folio_clear_active(folio); 62485cd7791SMatthew Wilcox (Oracle) folio_clear_referenced(folio); 62585cd7791SMatthew Wilcox (Oracle) lruvec_add_folio(lruvec, folio); 6269c276cc6SMinchan Kim 62721e330fcSShakeel Butt __count_vm_events(PGDEACTIVATE, nr_pages); 62821e330fcSShakeel Butt __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, 62921e330fcSShakeel Butt nr_pages); 6309c276cc6SMinchan Kim } 6319c276cc6SMinchan Kim } 63210853a03SMinchan Kim 633cec394baSMatthew Wilcox (Oracle) static void lru_lazyfree_fn(struct lruvec *lruvec, struct folio *folio) 63410853a03SMinchan Kim { 635cec394baSMatthew Wilcox (Oracle) if (folio_test_anon(folio) && folio_test_swapbacked(folio) && 636cec394baSMatthew Wilcox (Oracle) !folio_test_swapcache(folio) && !folio_test_unevictable(folio)) { 637cec394baSMatthew Wilcox (Oracle) long nr_pages = folio_nr_pages(folio); 63810853a03SMinchan Kim 639cec394baSMatthew Wilcox (Oracle) lruvec_del_folio(lruvec, folio); 640cec394baSMatthew Wilcox (Oracle) folio_clear_active(folio); 641cec394baSMatthew Wilcox (Oracle) folio_clear_referenced(folio); 642f7ad2a6cSShaohua Li /* 643cec394baSMatthew Wilcox (Oracle) * Lazyfree folios are clean anonymous folios. They have 644cec394baSMatthew Wilcox (Oracle) * the swapbacked flag cleared, to distinguish them from normal 645cec394baSMatthew Wilcox (Oracle) * anonymous folios 646f7ad2a6cSShaohua Li */ 647cec394baSMatthew Wilcox (Oracle) folio_clear_swapbacked(folio); 648cec394baSMatthew Wilcox (Oracle) lruvec_add_folio(lruvec, folio); 64910853a03SMinchan Kim 65021e330fcSShakeel Butt __count_vm_events(PGLAZYFREE, nr_pages); 65121e330fcSShakeel Butt __count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE, 65221e330fcSShakeel Butt nr_pages); 65310853a03SMinchan Kim } 65410853a03SMinchan Kim } 65510853a03SMinchan Kim 65631560180SMinchan Kim /* 65782ac64d8SMatthew Wilcox (Oracle) * Drain pages out of the cpu's folio_batch. 658902aaed0SHisashi Hifumi * Either "cpu" is the current CPU, and preemption has already been 659902aaed0SHisashi Hifumi * disabled; or "cpu" is being hot-unplugged, and is already dead. 660902aaed0SHisashi Hifumi */ 661f0cb3c76SKonstantin Khlebnikov void lru_add_drain_cpu(int cpu) 6621da177e4SLinus Torvalds { 663a2d33b5dSMatthew Wilcox (Oracle) struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu); 664a2d33b5dSMatthew Wilcox (Oracle) struct folio_batch *fbatch = &fbatches->lru_add; 6651da177e4SLinus Torvalds 66670dea534SMatthew Wilcox (Oracle) if (folio_batch_count(fbatch)) 66770dea534SMatthew Wilcox (Oracle) folio_batch_move_lru(fbatch, lru_add_fn); 668902aaed0SHisashi Hifumi 669c2bc1681SMatthew Wilcox (Oracle) fbatch = &per_cpu(lru_rotate.fbatch, cpu); 6707e0cc01eSQian Cai /* Disabling interrupts below acts as a compiler barrier. */ 671c2bc1681SMatthew Wilcox (Oracle) if (data_race(folio_batch_count(fbatch))) { 672902aaed0SHisashi Hifumi unsigned long flags; 673902aaed0SHisashi Hifumi 674902aaed0SHisashi Hifumi /* No harm done if a racing interrupt already did this */ 675b01b2141SIngo Molnar local_lock_irqsave(&lru_rotate.lock, flags); 676c2bc1681SMatthew Wilcox (Oracle) folio_batch_move_lru(fbatch, lru_move_tail_fn); 677b01b2141SIngo Molnar local_unlock_irqrestore(&lru_rotate.lock, flags); 678902aaed0SHisashi Hifumi } 67931560180SMinchan Kim 680a2d33b5dSMatthew Wilcox (Oracle) fbatch = &fbatches->lru_deactivate_file; 6817a3dbfe8SMatthew Wilcox (Oracle) if (folio_batch_count(fbatch)) 6827a3dbfe8SMatthew Wilcox (Oracle) folio_batch_move_lru(fbatch, lru_deactivate_file_fn); 683eb709b0dSShaohua Li 684a2d33b5dSMatthew Wilcox (Oracle) fbatch = &fbatches->lru_deactivate; 68585cd7791SMatthew Wilcox (Oracle) if (folio_batch_count(fbatch)) 68685cd7791SMatthew Wilcox (Oracle) folio_batch_move_lru(fbatch, lru_deactivate_fn); 6879c276cc6SMinchan Kim 688a2d33b5dSMatthew Wilcox (Oracle) fbatch = &fbatches->lru_lazyfree; 689cec394baSMatthew Wilcox (Oracle) if (folio_batch_count(fbatch)) 690cec394baSMatthew Wilcox (Oracle) folio_batch_move_lru(fbatch, lru_lazyfree_fn); 69110853a03SMinchan Kim 6923a44610bSMatthew Wilcox (Oracle) folio_activate_drain(cpu); 69331560180SMinchan Kim } 69431560180SMinchan Kim 69531560180SMinchan Kim /** 6967a3dbfe8SMatthew Wilcox (Oracle) * deactivate_file_folio() - Deactivate a file folio. 697261b6840SMatthew Wilcox (Oracle) * @folio: Folio to deactivate. 69831560180SMinchan Kim * 699261b6840SMatthew Wilcox (Oracle) * This function hints to the VM that @folio is a good reclaim candidate, 700261b6840SMatthew Wilcox (Oracle) * for example if its invalidation fails due to the folio being dirty 70131560180SMinchan Kim * or under writeback. 702261b6840SMatthew Wilcox (Oracle) * 7037a3dbfe8SMatthew Wilcox (Oracle) * Context: Caller holds a reference on the folio. 70431560180SMinchan Kim */ 705261b6840SMatthew Wilcox (Oracle) void deactivate_file_folio(struct folio *folio) 70631560180SMinchan Kim { 7077a3dbfe8SMatthew Wilcox (Oracle) struct folio_batch *fbatch; 708b01b2141SIngo Molnar 7097a3dbfe8SMatthew Wilcox (Oracle) /* Deactivating an unevictable folio will not accelerate reclaim */ 710261b6840SMatthew Wilcox (Oracle) if (folio_test_unevictable(folio)) 711261b6840SMatthew Wilcox (Oracle) return; 712261b6840SMatthew Wilcox (Oracle) 713261b6840SMatthew Wilcox (Oracle) folio_get(folio); 71482ac64d8SMatthew Wilcox (Oracle) local_lock(&cpu_fbatches.lock); 71582ac64d8SMatthew Wilcox (Oracle) fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate_file); 7167a3dbfe8SMatthew Wilcox (Oracle) folio_batch_add_and_move(fbatch, folio, lru_deactivate_file_fn); 71782ac64d8SMatthew Wilcox (Oracle) local_unlock(&cpu_fbatches.lock); 71831560180SMinchan Kim } 71980bfed90SAndrew Morton 7209c276cc6SMinchan Kim /* 7215a9e3474SVishal Moola (Oracle) * folio_deactivate - deactivate a folio 7225a9e3474SVishal Moola (Oracle) * @folio: folio to deactivate 7239c276cc6SMinchan Kim * 7245a9e3474SVishal Moola (Oracle) * folio_deactivate() moves @folio to the inactive list if @folio was on the 7255a9e3474SVishal Moola (Oracle) * active list and was not unevictable. This is done to accelerate the 7265a9e3474SVishal Moola (Oracle) * reclaim of @folio. 7279c276cc6SMinchan Kim */ 7285a9e3474SVishal Moola (Oracle) void folio_deactivate(struct folio *folio) 7299c276cc6SMinchan Kim { 730ec1c86b2SYu Zhao if (folio_test_lru(folio) && !folio_test_unevictable(folio) && 731ec1c86b2SYu Zhao (folio_test_active(folio) || lru_gen_enabled())) { 73285cd7791SMatthew Wilcox (Oracle) struct folio_batch *fbatch; 73385cd7791SMatthew Wilcox (Oracle) 73485cd7791SMatthew Wilcox (Oracle) folio_get(folio); 73582ac64d8SMatthew Wilcox (Oracle) local_lock(&cpu_fbatches.lock); 73682ac64d8SMatthew Wilcox (Oracle) fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate); 73785cd7791SMatthew Wilcox (Oracle) folio_batch_add_and_move(fbatch, folio, lru_deactivate_fn); 73882ac64d8SMatthew Wilcox (Oracle) local_unlock(&cpu_fbatches.lock); 7399c276cc6SMinchan Kim } 7409c276cc6SMinchan Kim } 7419c276cc6SMinchan Kim 74210853a03SMinchan Kim /** 7436a6fe9ebSKefeng Wang * folio_mark_lazyfree - make an anon folio lazyfree 7446a6fe9ebSKefeng Wang * @folio: folio to deactivate 74510853a03SMinchan Kim * 7466a6fe9ebSKefeng Wang * folio_mark_lazyfree() moves @folio to the inactive file list. 7476a6fe9ebSKefeng Wang * This is done to accelerate the reclaim of @folio. 74810853a03SMinchan Kim */ 7496a6fe9ebSKefeng Wang void folio_mark_lazyfree(struct folio *folio) 75010853a03SMinchan Kim { 751cec394baSMatthew Wilcox (Oracle) if (folio_test_lru(folio) && folio_test_anon(folio) && 752cec394baSMatthew Wilcox (Oracle) folio_test_swapbacked(folio) && !folio_test_swapcache(folio) && 753cec394baSMatthew Wilcox (Oracle) !folio_test_unevictable(folio)) { 754cec394baSMatthew Wilcox (Oracle) struct folio_batch *fbatch; 755cec394baSMatthew Wilcox (Oracle) 756cec394baSMatthew Wilcox (Oracle) folio_get(folio); 75782ac64d8SMatthew Wilcox (Oracle) local_lock(&cpu_fbatches.lock); 75882ac64d8SMatthew Wilcox (Oracle) fbatch = this_cpu_ptr(&cpu_fbatches.lru_lazyfree); 759cec394baSMatthew Wilcox (Oracle) folio_batch_add_and_move(fbatch, folio, lru_lazyfree_fn); 76082ac64d8SMatthew Wilcox (Oracle) local_unlock(&cpu_fbatches.lock); 76110853a03SMinchan Kim } 76210853a03SMinchan Kim } 76310853a03SMinchan Kim 76480bfed90SAndrew Morton void lru_add_drain(void) 76580bfed90SAndrew Morton { 76682ac64d8SMatthew Wilcox (Oracle) local_lock(&cpu_fbatches.lock); 767b01b2141SIngo Molnar lru_add_drain_cpu(smp_processor_id()); 76882ac64d8SMatthew Wilcox (Oracle) local_unlock(&cpu_fbatches.lock); 76996f97c43SLorenzo Stoakes mlock_drain_local(); 770b01b2141SIngo Molnar } 771b01b2141SIngo Molnar 772243418e3SMinchan Kim /* 773243418e3SMinchan Kim * It's called from per-cpu workqueue context in SMP case so 774243418e3SMinchan Kim * lru_add_drain_cpu and invalidate_bh_lrus_cpu should run on 775243418e3SMinchan Kim * the same cpu. It shouldn't be a problem in !SMP case since 776243418e3SMinchan Kim * the core is only one and the locks will disable preemption. 777243418e3SMinchan Kim */ 778243418e3SMinchan Kim static void lru_add_and_bh_lrus_drain(void) 779243418e3SMinchan Kim { 78082ac64d8SMatthew Wilcox (Oracle) local_lock(&cpu_fbatches.lock); 781243418e3SMinchan Kim lru_add_drain_cpu(smp_processor_id()); 78282ac64d8SMatthew Wilcox (Oracle) local_unlock(&cpu_fbatches.lock); 783243418e3SMinchan Kim invalidate_bh_lrus_cpu(); 78496f97c43SLorenzo Stoakes mlock_drain_local(); 785243418e3SMinchan Kim } 786243418e3SMinchan Kim 787b01b2141SIngo Molnar void lru_add_drain_cpu_zone(struct zone *zone) 788b01b2141SIngo Molnar { 78982ac64d8SMatthew Wilcox (Oracle) local_lock(&cpu_fbatches.lock); 790b01b2141SIngo Molnar lru_add_drain_cpu(smp_processor_id()); 791b01b2141SIngo Molnar drain_local_pages(zone); 79282ac64d8SMatthew Wilcox (Oracle) local_unlock(&cpu_fbatches.lock); 79396f97c43SLorenzo Stoakes mlock_drain_local(); 7941da177e4SLinus Torvalds } 7951da177e4SLinus Torvalds 7966ea183d6SMichal Hocko #ifdef CONFIG_SMP 7976ea183d6SMichal Hocko 7986ea183d6SMichal Hocko static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); 7996ea183d6SMichal Hocko 800c4028958SDavid Howells static void lru_add_drain_per_cpu(struct work_struct *dummy) 801053837fcSNick Piggin { 802243418e3SMinchan Kim lru_add_and_bh_lrus_drain(); 803053837fcSNick Piggin } 804053837fcSNick Piggin 8054864545aSMatthew Wilcox (Oracle) static bool cpu_needs_drain(unsigned int cpu) 8064864545aSMatthew Wilcox (Oracle) { 8074864545aSMatthew Wilcox (Oracle) struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu); 8084864545aSMatthew Wilcox (Oracle) 8094864545aSMatthew Wilcox (Oracle) /* Check these in order of likelihood that they're not zero */ 8104864545aSMatthew Wilcox (Oracle) return folio_batch_count(&fbatches->lru_add) || 8114864545aSMatthew Wilcox (Oracle) data_race(folio_batch_count(&per_cpu(lru_rotate.fbatch, cpu))) || 8124864545aSMatthew Wilcox (Oracle) folio_batch_count(&fbatches->lru_deactivate_file) || 8134864545aSMatthew Wilcox (Oracle) folio_batch_count(&fbatches->lru_deactivate) || 8144864545aSMatthew Wilcox (Oracle) folio_batch_count(&fbatches->lru_lazyfree) || 8154864545aSMatthew Wilcox (Oracle) folio_batch_count(&fbatches->activate) || 81696f97c43SLorenzo Stoakes need_mlock_drain(cpu) || 8174864545aSMatthew Wilcox (Oracle) has_bh_in_lru(cpu, NULL); 8184864545aSMatthew Wilcox (Oracle) } 8194864545aSMatthew Wilcox (Oracle) 8209852a721SMichal Hocko /* 8219852a721SMichal Hocko * Doesn't need any cpu hotplug locking because we do rely on per-cpu 8229852a721SMichal Hocko * kworkers being shut down before our page_alloc_cpu_dead callback is 8239852a721SMichal Hocko * executed on the offlined cpu. 8249852a721SMichal Hocko * Calling this function with cpu hotplug locks held can actually lead 8259852a721SMichal Hocko * to obscure indirect dependencies via WQ context. 8269852a721SMichal Hocko */ 8273db3264dSMiaohe Lin static inline void __lru_add_drain_all(bool force_all_cpus) 828053837fcSNick Piggin { 8296446a513SAhmed S. Darwish /* 8306446a513SAhmed S. Darwish * lru_drain_gen - Global pages generation number 8316446a513SAhmed S. Darwish * 8326446a513SAhmed S. Darwish * (A) Definition: global lru_drain_gen = x implies that all generations 8336446a513SAhmed S. Darwish * 0 < n <= x are already *scheduled* for draining. 8346446a513SAhmed S. Darwish * 8356446a513SAhmed S. Darwish * This is an optimization for the highly-contended use case where a 8366446a513SAhmed S. Darwish * user space workload keeps constantly generating a flow of pages for 8376446a513SAhmed S. Darwish * each CPU. 8386446a513SAhmed S. Darwish */ 8396446a513SAhmed S. Darwish static unsigned int lru_drain_gen; 8405fbc4616SChris Metcalf static struct cpumask has_work; 8416446a513SAhmed S. Darwish static DEFINE_MUTEX(lock); 8426446a513SAhmed S. Darwish unsigned cpu, this_gen; 8435fbc4616SChris Metcalf 844ce612879SMichal Hocko /* 845ce612879SMichal Hocko * Make sure nobody triggers this path before mm_percpu_wq is fully 846ce612879SMichal Hocko * initialized. 847ce612879SMichal Hocko */ 848ce612879SMichal Hocko if (WARN_ON(!mm_percpu_wq)) 849ce612879SMichal Hocko return; 850ce612879SMichal Hocko 8516446a513SAhmed S. Darwish /* 85282ac64d8SMatthew Wilcox (Oracle) * Guarantee folio_batch counter stores visible by this CPU 85382ac64d8SMatthew Wilcox (Oracle) * are visible to other CPUs before loading the current drain 85482ac64d8SMatthew Wilcox (Oracle) * generation. 8556446a513SAhmed S. Darwish */ 8566446a513SAhmed S. Darwish smp_mb(); 8576446a513SAhmed S. Darwish 8586446a513SAhmed S. Darwish /* 8596446a513SAhmed S. Darwish * (B) Locally cache global LRU draining generation number 8606446a513SAhmed S. Darwish * 8616446a513SAhmed S. Darwish * The read barrier ensures that the counter is loaded before the mutex 8626446a513SAhmed S. Darwish * is taken. It pairs with smp_mb() inside the mutex critical section 8636446a513SAhmed S. Darwish * at (D). 8646446a513SAhmed S. Darwish */ 8656446a513SAhmed S. Darwish this_gen = smp_load_acquire(&lru_drain_gen); 866eef1a429SKonstantin Khlebnikov 8675fbc4616SChris Metcalf mutex_lock(&lock); 868eef1a429SKonstantin Khlebnikov 869eef1a429SKonstantin Khlebnikov /* 8706446a513SAhmed S. Darwish * (C) Exit the draining operation if a newer generation, from another 8716446a513SAhmed S. Darwish * lru_add_drain_all(), was already scheduled for draining. Check (A). 872eef1a429SKonstantin Khlebnikov */ 873d479960eSMinchan Kim if (unlikely(this_gen != lru_drain_gen && !force_all_cpus)) 874eef1a429SKonstantin Khlebnikov goto done; 875eef1a429SKonstantin Khlebnikov 8766446a513SAhmed S. Darwish /* 8776446a513SAhmed S. Darwish * (D) Increment global generation number 8786446a513SAhmed S. Darwish * 8796446a513SAhmed S. Darwish * Pairs with smp_load_acquire() at (B), outside of the critical 88082ac64d8SMatthew Wilcox (Oracle) * section. Use a full memory barrier to guarantee that the 88182ac64d8SMatthew Wilcox (Oracle) * new global drain generation number is stored before loading 88282ac64d8SMatthew Wilcox (Oracle) * folio_batch counters. 8836446a513SAhmed S. Darwish * 8846446a513SAhmed S. Darwish * This pairing must be done here, before the for_each_online_cpu loop 8856446a513SAhmed S. Darwish * below which drains the page vectors. 8866446a513SAhmed S. Darwish * 8876446a513SAhmed S. Darwish * Let x, y, and z represent some system CPU numbers, where x < y < z. 888cb152a1aSShijie Luo * Assume CPU #z is in the middle of the for_each_online_cpu loop 8896446a513SAhmed S. Darwish * below and has already reached CPU #y's per-cpu data. CPU #x comes 8906446a513SAhmed S. Darwish * along, adds some pages to its per-cpu vectors, then calls 8916446a513SAhmed S. Darwish * lru_add_drain_all(). 8926446a513SAhmed S. Darwish * 8936446a513SAhmed S. Darwish * If the paired barrier is done at any later step, e.g. after the 8946446a513SAhmed S. Darwish * loop, CPU #x will just exit at (C) and miss flushing out all of its 8956446a513SAhmed S. Darwish * added pages. 8966446a513SAhmed S. Darwish */ 8976446a513SAhmed S. Darwish WRITE_ONCE(lru_drain_gen, lru_drain_gen + 1); 8986446a513SAhmed S. Darwish smp_mb(); 899eef1a429SKonstantin Khlebnikov 9005fbc4616SChris Metcalf cpumask_clear(&has_work); 9015fbc4616SChris Metcalf for_each_online_cpu(cpu) { 9025fbc4616SChris Metcalf struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); 9035fbc4616SChris Metcalf 9044864545aSMatthew Wilcox (Oracle) if (cpu_needs_drain(cpu)) { 9055fbc4616SChris Metcalf INIT_WORK(work, lru_add_drain_per_cpu); 906ce612879SMichal Hocko queue_work_on(cpu, mm_percpu_wq, work); 9076446a513SAhmed S. Darwish __cpumask_set_cpu(cpu, &has_work); 9085fbc4616SChris Metcalf } 9095fbc4616SChris Metcalf } 9105fbc4616SChris Metcalf 9115fbc4616SChris Metcalf for_each_cpu(cpu, &has_work) 9125fbc4616SChris Metcalf flush_work(&per_cpu(lru_add_drain_work, cpu)); 9135fbc4616SChris Metcalf 914eef1a429SKonstantin Khlebnikov done: 9155fbc4616SChris Metcalf mutex_unlock(&lock); 916053837fcSNick Piggin } 917d479960eSMinchan Kim 918d479960eSMinchan Kim void lru_add_drain_all(void) 919d479960eSMinchan Kim { 920d479960eSMinchan Kim __lru_add_drain_all(false); 921d479960eSMinchan Kim } 9226ea183d6SMichal Hocko #else 9236ea183d6SMichal Hocko void lru_add_drain_all(void) 9246ea183d6SMichal Hocko { 9256ea183d6SMichal Hocko lru_add_drain(); 9266ea183d6SMichal Hocko } 9276446a513SAhmed S. Darwish #endif /* CONFIG_SMP */ 928053837fcSNick Piggin 929d479960eSMinchan Kim atomic_t lru_disable_count = ATOMIC_INIT(0); 930d479960eSMinchan Kim 931d479960eSMinchan Kim /* 932d479960eSMinchan Kim * lru_cache_disable() needs to be called before we start compiling 933d479960eSMinchan Kim * a list of pages to be migrated using isolate_lru_page(). 934d479960eSMinchan Kim * It drains pages on LRU cache and then disable on all cpus until 935d479960eSMinchan Kim * lru_cache_enable is called. 936d479960eSMinchan Kim * 937d479960eSMinchan Kim * Must be paired with a call to lru_cache_enable(). 938d479960eSMinchan Kim */ 939d479960eSMinchan Kim void lru_cache_disable(void) 940d479960eSMinchan Kim { 941d479960eSMinchan Kim atomic_inc(&lru_disable_count); 942d479960eSMinchan Kim /* 943ff042f4aSMarcelo Tosatti * Readers of lru_disable_count are protected by either disabling 944ff042f4aSMarcelo Tosatti * preemption or rcu_read_lock: 945ff042f4aSMarcelo Tosatti * 946ff042f4aSMarcelo Tosatti * preempt_disable, local_irq_disable [bh_lru_lock()] 947ff042f4aSMarcelo Tosatti * rcu_read_lock [rt_spin_lock CONFIG_PREEMPT_RT] 948ff042f4aSMarcelo Tosatti * preempt_disable [local_lock !CONFIG_PREEMPT_RT] 949ff042f4aSMarcelo Tosatti * 950ff042f4aSMarcelo Tosatti * Since v5.1 kernel, synchronize_rcu() is guaranteed to wait on 951ff042f4aSMarcelo Tosatti * preempt_disable() regions of code. So any CPU which sees 952ff042f4aSMarcelo Tosatti * lru_disable_count = 0 will have exited the critical 953ff042f4aSMarcelo Tosatti * section when synchronize_rcu() returns. 954d479960eSMinchan Kim */ 95531733463SMarcelo Tosatti synchronize_rcu_expedited(); 956ff042f4aSMarcelo Tosatti #ifdef CONFIG_SMP 957d479960eSMinchan Kim __lru_add_drain_all(true); 958d479960eSMinchan Kim #else 959243418e3SMinchan Kim lru_add_and_bh_lrus_drain(); 960d479960eSMinchan Kim #endif 961d479960eSMinchan Kim } 962d479960eSMinchan Kim 963aabfb572SMichal Hocko /** 96499fbb6bfSMatthew Wilcox (Oracle) * folios_put_refs - Reduce the reference count on a batch of folios. 96599fbb6bfSMatthew Wilcox (Oracle) * @folios: The folios. 96699fbb6bfSMatthew Wilcox (Oracle) * @refs: The number of refs to subtract from each folio. 9671da177e4SLinus Torvalds * 96899fbb6bfSMatthew Wilcox (Oracle) * Like folio_put(), but for a batch of folios. This is more efficient 96999fbb6bfSMatthew Wilcox (Oracle) * than writing the loop yourself as it will optimise the locks which need 97099fbb6bfSMatthew Wilcox (Oracle) * to be taken if the folios are freed. The folios batch is returned 97199fbb6bfSMatthew Wilcox (Oracle) * empty and ready to be reused for another batch; there is no need 97299fbb6bfSMatthew Wilcox (Oracle) * to reinitialise it. If @refs is NULL, we subtract one from each 97399fbb6bfSMatthew Wilcox (Oracle) * folio refcount. 974449c7967SLinus Torvalds * 97599fbb6bfSMatthew Wilcox (Oracle) * Context: May be called in process or interrupt context, but not in NMI 97699fbb6bfSMatthew Wilcox (Oracle) * context. May be called while holding a spinlock. 9771da177e4SLinus Torvalds */ 97899fbb6bfSMatthew Wilcox (Oracle) void folios_put_refs(struct folio_batch *folios, unsigned int *refs) 9791da177e4SLinus Torvalds { 9807c33b8c4SMatthew Wilcox (Oracle) int i, j; 9816168d0daSAlex Shi struct lruvec *lruvec = NULL; 9820de340cbSMatthew Wilcox (Oracle) unsigned long flags = 0; 9831da177e4SLinus Torvalds 9847c33b8c4SMatthew Wilcox (Oracle) for (i = 0, j = 0; i < folios->nr; i++) { 98599fbb6bfSMatthew Wilcox (Oracle) struct folio *folio = folios->folios[i]; 98699fbb6bfSMatthew Wilcox (Oracle) unsigned int nr_refs = refs ? refs[i] : 1; 987aabfb572SMichal Hocko 988ab5e653eSMatthew Wilcox (Oracle) if (is_huge_zero_page(&folio->page)) 989aa88b68cSKirill A. Shutemov continue; 990aa88b68cSKirill A. Shutemov 991ab5e653eSMatthew Wilcox (Oracle) if (folio_is_zone_device(folio)) { 9926168d0daSAlex Shi if (lruvec) { 9936168d0daSAlex Shi unlock_page_lruvec_irqrestore(lruvec, flags); 9946168d0daSAlex Shi lruvec = NULL; 995df6ad698SJérôme Glisse } 996d7f861b9SDavid Hildenbrand if (put_devmap_managed_page_refs(&folio->page, nr_refs)) 997df6ad698SJérôme Glisse continue; 998d7f861b9SDavid Hildenbrand if (folio_ref_sub_and_test(folio, nr_refs)) 999ab5e653eSMatthew Wilcox (Oracle) free_zone_device_page(&folio->page); 100043fbdeb3SRalph Campbell continue; 100107d80269SJohn Hubbard } 1002df6ad698SJérôme Glisse 1003d7f861b9SDavid Hildenbrand if (!folio_ref_sub_and_test(folio, nr_refs)) 10041da177e4SLinus Torvalds continue; 10051da177e4SLinus Torvalds 1006f77171d2SMatthew Wilcox (Oracle) /* hugetlb has its own memcg */ 1007f77171d2SMatthew Wilcox (Oracle) if (folio_test_hugetlb(folio)) { 10086168d0daSAlex Shi if (lruvec) { 10096168d0daSAlex Shi unlock_page_lruvec_irqrestore(lruvec, flags); 10106168d0daSAlex Shi lruvec = NULL; 1011ddc58f27SKirill A. Shutemov } 1012f77171d2SMatthew Wilcox (Oracle) free_huge_folio(folio); 1013ddc58f27SKirill A. Shutemov continue; 1014ddc58f27SKirill A. Shutemov } 101547932e70SMatthew Wilcox (Oracle) if (folio_test_large(folio) && 101647932e70SMatthew Wilcox (Oracle) folio_test_large_rmappable(folio)) 101747932e70SMatthew Wilcox (Oracle) folio_undo_large_rmappable(folio); 1018ddc58f27SKirill A. Shutemov 1019f1ee018bSMatthew Wilcox (Oracle) __page_cache_release(folio, &lruvec, &flags); 1020b109b870SHugh Dickins 10217c33b8c4SMatthew Wilcox (Oracle) if (j != i) 10227c33b8c4SMatthew Wilcox (Oracle) folios->folios[j] = folio; 10237c33b8c4SMatthew Wilcox (Oracle) j++; 10241da177e4SLinus Torvalds } 10256168d0daSAlex Shi if (lruvec) 10266168d0daSAlex Shi unlock_page_lruvec_irqrestore(lruvec, flags); 10277c33b8c4SMatthew Wilcox (Oracle) if (!j) { 102899fbb6bfSMatthew Wilcox (Oracle) folio_batch_reinit(folios); 10297c33b8c4SMatthew Wilcox (Oracle) return; 10307c33b8c4SMatthew Wilcox (Oracle) } 10317c33b8c4SMatthew Wilcox (Oracle) 10327c33b8c4SMatthew Wilcox (Oracle) folios->nr = j; 10337c33b8c4SMatthew Wilcox (Oracle) mem_cgroup_uncharge_folios(folios); 10347c33b8c4SMatthew Wilcox (Oracle) free_unref_folios(folios); 103599fbb6bfSMatthew Wilcox (Oracle) } 103699fbb6bfSMatthew Wilcox (Oracle) EXPORT_SYMBOL(folios_put_refs); 103799fbb6bfSMatthew Wilcox (Oracle) 103899fbb6bfSMatthew Wilcox (Oracle) /** 103999fbb6bfSMatthew Wilcox (Oracle) * release_pages - batched put_page() 104099fbb6bfSMatthew Wilcox (Oracle) * @arg: array of pages to release 104199fbb6bfSMatthew Wilcox (Oracle) * @nr: number of pages 104299fbb6bfSMatthew Wilcox (Oracle) * 104399fbb6bfSMatthew Wilcox (Oracle) * Decrement the reference count on all the pages in @arg. If it 104499fbb6bfSMatthew Wilcox (Oracle) * fell to zero, remove the page from the LRU and free it. 104599fbb6bfSMatthew Wilcox (Oracle) * 104699fbb6bfSMatthew Wilcox (Oracle) * Note that the argument can be an array of pages, encoded pages, 104799fbb6bfSMatthew Wilcox (Oracle) * or folio pointers. We ignore any encoded bits, and turn any of 104899fbb6bfSMatthew Wilcox (Oracle) * them into just a folio that gets free'd. 104999fbb6bfSMatthew Wilcox (Oracle) */ 105099fbb6bfSMatthew Wilcox (Oracle) void release_pages(release_pages_arg arg, int nr) 105199fbb6bfSMatthew Wilcox (Oracle) { 105299fbb6bfSMatthew Wilcox (Oracle) struct folio_batch fbatch; 105399fbb6bfSMatthew Wilcox (Oracle) int refs[PAGEVEC_SIZE]; 105499fbb6bfSMatthew Wilcox (Oracle) struct encoded_page **encoded = arg.encoded_pages; 105599fbb6bfSMatthew Wilcox (Oracle) int i; 105699fbb6bfSMatthew Wilcox (Oracle) 105799fbb6bfSMatthew Wilcox (Oracle) folio_batch_init(&fbatch); 105899fbb6bfSMatthew Wilcox (Oracle) for (i = 0; i < nr; i++) { 105999fbb6bfSMatthew Wilcox (Oracle) /* Turn any of the argument types into a folio */ 106099fbb6bfSMatthew Wilcox (Oracle) struct folio *folio = page_folio(encoded_page_ptr(encoded[i])); 106199fbb6bfSMatthew Wilcox (Oracle) 106299fbb6bfSMatthew Wilcox (Oracle) /* Is our next entry actually "nr_pages" -> "nr_refs" ? */ 106399fbb6bfSMatthew Wilcox (Oracle) refs[fbatch.nr] = 1; 106499fbb6bfSMatthew Wilcox (Oracle) if (unlikely(encoded_page_flags(encoded[i]) & 106599fbb6bfSMatthew Wilcox (Oracle) ENCODED_PAGE_BIT_NR_PAGES_NEXT)) 106699fbb6bfSMatthew Wilcox (Oracle) refs[fbatch.nr] = encoded_nr_pages(encoded[++i]); 106799fbb6bfSMatthew Wilcox (Oracle) 106899fbb6bfSMatthew Wilcox (Oracle) if (folio_batch_add(&fbatch, folio) > 0) 106999fbb6bfSMatthew Wilcox (Oracle) continue; 107099fbb6bfSMatthew Wilcox (Oracle) folios_put_refs(&fbatch, refs); 107199fbb6bfSMatthew Wilcox (Oracle) } 107299fbb6bfSMatthew Wilcox (Oracle) 107399fbb6bfSMatthew Wilcox (Oracle) if (fbatch.nr) 107499fbb6bfSMatthew Wilcox (Oracle) folios_put_refs(&fbatch, refs); 10751da177e4SLinus Torvalds } 10760be8557bSMiklos Szeredi EXPORT_SYMBOL(release_pages); 10771da177e4SLinus Torvalds 10781da177e4SLinus Torvalds /* 10791e0877d5SMatthew Wilcox (Oracle) * The folios which we're about to release may be in the deferred lru-addition 10801da177e4SLinus Torvalds * queues. That would prevent them from really being freed right now. That's 10811e0877d5SMatthew Wilcox (Oracle) * OK from a correctness point of view but is inefficient - those folios may be 10821da177e4SLinus Torvalds * cache-warm and we want to give them back to the page allocator ASAP. 10831da177e4SLinus Torvalds * 10841e0877d5SMatthew Wilcox (Oracle) * So __folio_batch_release() will drain those queues here. 108570dea534SMatthew Wilcox (Oracle) * folio_batch_move_lru() calls folios_put() directly to avoid 10861da177e4SLinus Torvalds * mutual recursion. 10871da177e4SLinus Torvalds */ 10881e0877d5SMatthew Wilcox (Oracle) void __folio_batch_release(struct folio_batch *fbatch) 10891da177e4SLinus Torvalds { 10901e0877d5SMatthew Wilcox (Oracle) if (!fbatch->percpu_pvec_drained) { 10911da177e4SLinus Torvalds lru_add_drain(); 10921e0877d5SMatthew Wilcox (Oracle) fbatch->percpu_pvec_drained = true; 1093d9ed0d08SMel Gorman } 10946871cc57SMatthew Wilcox (Oracle) folios_put(fbatch); 10951da177e4SLinus Torvalds } 10961e0877d5SMatthew Wilcox (Oracle) EXPORT_SYMBOL(__folio_batch_release); 10977f285701SSteve French 10981da177e4SLinus Torvalds /** 10991613fac9SMatthew Wilcox (Oracle) * folio_batch_remove_exceptionals() - Prune non-folios from a batch. 11001613fac9SMatthew Wilcox (Oracle) * @fbatch: The batch to prune 11010cd6144aSJohannes Weiner * 11021613fac9SMatthew Wilcox (Oracle) * find_get_entries() fills a batch with both folios and shadow/swap/DAX 11031613fac9SMatthew Wilcox (Oracle) * entries. This function prunes all the non-folio entries from @fbatch 11041613fac9SMatthew Wilcox (Oracle) * without leaving holes, so that it can be passed on to folio-only batch 11051613fac9SMatthew Wilcox (Oracle) * operations. 11060cd6144aSJohannes Weiner */ 11071613fac9SMatthew Wilcox (Oracle) void folio_batch_remove_exceptionals(struct folio_batch *fbatch) 11080cd6144aSJohannes Weiner { 11091613fac9SMatthew Wilcox (Oracle) unsigned int i, j; 11100cd6144aSJohannes Weiner 11111613fac9SMatthew Wilcox (Oracle) for (i = 0, j = 0; i < folio_batch_count(fbatch); i++) { 11121613fac9SMatthew Wilcox (Oracle) struct folio *folio = fbatch->folios[i]; 11131613fac9SMatthew Wilcox (Oracle) if (!xa_is_value(folio)) 11141613fac9SMatthew Wilcox (Oracle) fbatch->folios[j++] = folio; 11150cd6144aSJohannes Weiner } 11161613fac9SMatthew Wilcox (Oracle) fbatch->nr = j; 11170cd6144aSJohannes Weiner } 11180cd6144aSJohannes Weiner 11191da177e4SLinus Torvalds /* 11201da177e4SLinus Torvalds * Perform any setup for the swap system 11211da177e4SLinus Torvalds */ 11221da177e4SLinus Torvalds void __init swap_setup(void) 11231da177e4SLinus Torvalds { 1124ca79b0c2SArun KS unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT); 1125e0bf68ddSPeter Zijlstra 11261da177e4SLinus Torvalds /* Use a smaller cluster for small-memory machines */ 11271da177e4SLinus Torvalds if (megs < 16) 11281da177e4SLinus Torvalds page_cluster = 2; 11291da177e4SLinus Torvalds else 11301da177e4SLinus Torvalds page_cluster = 3; 11311da177e4SLinus Torvalds /* 11321da177e4SLinus Torvalds * Right now other parts of the system means that we 11331da177e4SLinus Torvalds * _really_ don't want to cluster much more 11341da177e4SLinus Torvalds */ 11351da177e4SLinus Torvalds } 1136