1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * linux/mm/swap.c 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 81da177e4SLinus Torvalds /* 9183ff22bSSimon Arlott * This file contains the default values for the operation of the 101da177e4SLinus Torvalds * Linux VM subsystem. Fine-tuning documentation can be found in 1157043247SMauro Carvalho Chehab * Documentation/admin-guide/sysctl/vm.rst. 121da177e4SLinus Torvalds * Started 18.12.91 131da177e4SLinus Torvalds * Swap aging added 23.2.95, Stephen Tweedie. 141da177e4SLinus Torvalds * Buffermem limits added 12.3.98, Rik van Riel. 151da177e4SLinus Torvalds */ 161da177e4SLinus Torvalds 171da177e4SLinus Torvalds #include <linux/mm.h> 181da177e4SLinus Torvalds #include <linux/sched.h> 191da177e4SLinus Torvalds #include <linux/kernel_stat.h> 201da177e4SLinus Torvalds #include <linux/swap.h> 211da177e4SLinus Torvalds #include <linux/mman.h> 221da177e4SLinus Torvalds #include <linux/pagemap.h> 231da177e4SLinus Torvalds #include <linux/pagevec.h> 241da177e4SLinus Torvalds #include <linux/init.h> 25b95f1b31SPaul Gortmaker #include <linux/export.h> 261da177e4SLinus Torvalds #include <linux/mm_inline.h> 271da177e4SLinus Torvalds #include <linux/percpu_counter.h> 283565fce3SDan Williams #include <linux/memremap.h> 291da177e4SLinus Torvalds #include <linux/percpu.h> 301da177e4SLinus Torvalds #include <linux/cpu.h> 311da177e4SLinus Torvalds #include <linux/notifier.h> 32e0bf68ddSPeter Zijlstra #include <linux/backing-dev.h> 3366e1707bSBalbir Singh #include <linux/memcontrol.h> 345a0e3ad6STejun Heo #include <linux/gfp.h> 35a27bb332SKent Overstreet #include <linux/uio.h> 36822fc613SNaoya Horiguchi #include <linux/hugetlb.h> 3733c3fc71SVladimir Davydov #include <linux/page_idle.h> 38b01b2141SIngo Molnar #include <linux/local_lock.h> 398cc621d2SMinchan Kim #include <linux/buffer_head.h> 401da177e4SLinus Torvalds 4164d6519dSLee Schermerhorn #include "internal.h" 4264d6519dSLee Schermerhorn 43c6286c98SMel Gorman #define CREATE_TRACE_POINTS 44c6286c98SMel Gorman #include <trace/events/pagemap.h> 45c6286c98SMel Gorman 461da177e4SLinus Torvalds /* How many pages do we try to swap or page in/out together? */ 471da177e4SLinus Torvalds int page_cluster; 481da177e4SLinus Torvalds 49c2bc1681SMatthew Wilcox (Oracle) /* Protecting only lru_rotate.fbatch which requires disabling interrupts */ 50b01b2141SIngo Molnar struct lru_rotate { 51b01b2141SIngo Molnar local_lock_t lock; 52c2bc1681SMatthew Wilcox (Oracle) struct folio_batch fbatch; 53b01b2141SIngo Molnar }; 54b01b2141SIngo Molnar static DEFINE_PER_CPU(struct lru_rotate, lru_rotate) = { 55b01b2141SIngo Molnar .lock = INIT_LOCAL_LOCK(lock), 56b01b2141SIngo Molnar }; 57b01b2141SIngo Molnar 58b01b2141SIngo Molnar /* 5982ac64d8SMatthew Wilcox (Oracle) * The following folio batches are grouped together because they are protected 60b01b2141SIngo Molnar * by disabling preemption (and interrupts remain enabled). 61b01b2141SIngo Molnar */ 6282ac64d8SMatthew Wilcox (Oracle) struct cpu_fbatches { 63b01b2141SIngo Molnar local_lock_t lock; 6470dea534SMatthew Wilcox (Oracle) struct folio_batch lru_add; 657a3dbfe8SMatthew Wilcox (Oracle) struct folio_batch lru_deactivate_file; 6685cd7791SMatthew Wilcox (Oracle) struct folio_batch lru_deactivate; 67cec394baSMatthew Wilcox (Oracle) struct folio_batch lru_lazyfree; 68a4a921aaSMing Li #ifdef CONFIG_SMP 693a44610bSMatthew Wilcox (Oracle) struct folio_batch activate; 70a4a921aaSMing Li #endif 71b01b2141SIngo Molnar }; 7282ac64d8SMatthew Wilcox (Oracle) static DEFINE_PER_CPU(struct cpu_fbatches, cpu_fbatches) = { 73b01b2141SIngo Molnar .lock = INIT_LOCAL_LOCK(lock), 74b01b2141SIngo Molnar }; 75902aaed0SHisashi Hifumi 76b221385bSAdrian Bunk /* 77b109b870SHugh Dickins * This path almost never happens for VM activity - pages are normally freed 78b109b870SHugh Dickins * via pagevecs. But it gets used by networking - and for compound pages. 79b221385bSAdrian Bunk */ 80*188e8caeSMatthew Wilcox (Oracle) static void __page_cache_release(struct folio *folio) 81b221385bSAdrian Bunk { 82*188e8caeSMatthew Wilcox (Oracle) if (folio_test_lru(folio)) { 83fa9add64SHugh Dickins struct lruvec *lruvec; 84fa9add64SHugh Dickins unsigned long flags; 85b221385bSAdrian Bunk 86e809c3feSMatthew Wilcox (Oracle) lruvec = folio_lruvec_lock_irqsave(folio, &flags); 87*188e8caeSMatthew Wilcox (Oracle) lruvec_del_folio(lruvec, folio); 88*188e8caeSMatthew Wilcox (Oracle) __folio_clear_lru_flags(folio); 896168d0daSAlex Shi unlock_page_lruvec_irqrestore(lruvec, flags); 90b221385bSAdrian Bunk } 91*188e8caeSMatthew Wilcox (Oracle) /* See comment on folio_test_mlocked in release_pages() */ 92*188e8caeSMatthew Wilcox (Oracle) if (unlikely(folio_test_mlocked(folio))) { 93*188e8caeSMatthew Wilcox (Oracle) long nr_pages = folio_nr_pages(folio); 94b109b870SHugh Dickins 95*188e8caeSMatthew Wilcox (Oracle) __folio_clear_mlocked(folio); 96*188e8caeSMatthew Wilcox (Oracle) zone_stat_mod_folio(folio, NR_MLOCK, -nr_pages); 97b109b870SHugh Dickins count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages); 98b109b870SHugh Dickins } 9991807063SAndrea Arcangeli } 10091807063SAndrea Arcangeli 10183d99659SMatthew Wilcox (Oracle) static void __folio_put_small(struct folio *folio) 10291807063SAndrea Arcangeli { 103*188e8caeSMatthew Wilcox (Oracle) __page_cache_release(folio); 10483d99659SMatthew Wilcox (Oracle) mem_cgroup_uncharge(folio); 10583d99659SMatthew Wilcox (Oracle) free_unref_page(&folio->page, 0); 106b221385bSAdrian Bunk } 107b221385bSAdrian Bunk 1085ef82fe7SMatthew Wilcox (Oracle) static void __folio_put_large(struct folio *folio) 10991807063SAndrea Arcangeli { 110822fc613SNaoya Horiguchi /* 111822fc613SNaoya Horiguchi * __page_cache_release() is supposed to be called for thp, not for 112822fc613SNaoya Horiguchi * hugetlb. This is because hugetlb page does never have PageLRU set 113822fc613SNaoya Horiguchi * (it's never listed to any LRU lists) and no memcg routines should 114822fc613SNaoya Horiguchi * be called for hugetlb (it has a separate hugetlb_cgroup.) 115822fc613SNaoya Horiguchi */ 1165ef82fe7SMatthew Wilcox (Oracle) if (!folio_test_hugetlb(folio)) 117*188e8caeSMatthew Wilcox (Oracle) __page_cache_release(folio); 1185ef82fe7SMatthew Wilcox (Oracle) destroy_compound_page(&folio->page); 11991807063SAndrea Arcangeli } 12091807063SAndrea Arcangeli 1218d29c703SMatthew Wilcox (Oracle) void __folio_put(struct folio *folio) 122c747ce79SJianyu Zhan { 1238d29c703SMatthew Wilcox (Oracle) if (unlikely(folio_is_zone_device(folio))) 1248d29c703SMatthew Wilcox (Oracle) free_zone_device_page(&folio->page); 1258d29c703SMatthew Wilcox (Oracle) else if (unlikely(folio_test_large(folio))) 1265ef82fe7SMatthew Wilcox (Oracle) __folio_put_large(folio); 12726296ad2SAndrew Morton else 12883d99659SMatthew Wilcox (Oracle) __folio_put_small(folio); 12926296ad2SAndrew Morton } 1308d29c703SMatthew Wilcox (Oracle) EXPORT_SYMBOL(__folio_put); 13170b50f94SAndrea Arcangeli 1321d7ea732SAlexander Zarochentsev /** 1337682486bSRandy Dunlap * put_pages_list() - release a list of pages 1347682486bSRandy Dunlap * @pages: list of pages threaded on page->lru 1351d7ea732SAlexander Zarochentsev * 136988c69f1SMatthew Wilcox (Oracle) * Release a list of pages which are strung together on page.lru. 1371d7ea732SAlexander Zarochentsev */ 1381d7ea732SAlexander Zarochentsev void put_pages_list(struct list_head *pages) 1391d7ea732SAlexander Zarochentsev { 1402f58e5deSMatthew Wilcox (Oracle) struct folio *folio, *next; 1411d7ea732SAlexander Zarochentsev 1422f58e5deSMatthew Wilcox (Oracle) list_for_each_entry_safe(folio, next, pages, lru) { 1432f58e5deSMatthew Wilcox (Oracle) if (!folio_put_testzero(folio)) { 1442f58e5deSMatthew Wilcox (Oracle) list_del(&folio->lru); 145988c69f1SMatthew Wilcox (Oracle) continue; 1461d7ea732SAlexander Zarochentsev } 1472f58e5deSMatthew Wilcox (Oracle) if (folio_test_large(folio)) { 1482f58e5deSMatthew Wilcox (Oracle) list_del(&folio->lru); 1495ef82fe7SMatthew Wilcox (Oracle) __folio_put_large(folio); 150988c69f1SMatthew Wilcox (Oracle) continue; 151988c69f1SMatthew Wilcox (Oracle) } 1522f58e5deSMatthew Wilcox (Oracle) /* LRU flag must be clear because it's passed using the lru */ 153988c69f1SMatthew Wilcox (Oracle) } 154988c69f1SMatthew Wilcox (Oracle) 155988c69f1SMatthew Wilcox (Oracle) free_unref_page_list(pages); 1563cd018b4SMatthew Wilcox INIT_LIST_HEAD(pages); 1571d7ea732SAlexander Zarochentsev } 1581d7ea732SAlexander Zarochentsev EXPORT_SYMBOL(put_pages_list); 1591d7ea732SAlexander Zarochentsev 16018022c5dSMel Gorman /* 16118022c5dSMel Gorman * get_kernel_pages() - pin kernel pages in memory 16218022c5dSMel Gorman * @kiov: An array of struct kvec structures 16318022c5dSMel Gorman * @nr_segs: number of segments to pin 16418022c5dSMel Gorman * @write: pinning for read/write, currently ignored 16518022c5dSMel Gorman * @pages: array that receives pointers to the pages pinned. 16618022c5dSMel Gorman * Should be at least nr_segs long. 16718022c5dSMel Gorman * 168133d2743SMiaohe Lin * Returns number of pages pinned. This may be fewer than the number requested. 169133d2743SMiaohe Lin * If nr_segs is 0 or negative, returns 0. If no pages were pinned, returns 0. 170133d2743SMiaohe Lin * Each page returned must be released with a put_page() call when it is 171133d2743SMiaohe Lin * finished with. 17218022c5dSMel Gorman */ 17318022c5dSMel Gorman int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write, 17418022c5dSMel Gorman struct page **pages) 17518022c5dSMel Gorman { 17618022c5dSMel Gorman int seg; 17718022c5dSMel Gorman 17818022c5dSMel Gorman for (seg = 0; seg < nr_segs; seg++) { 17918022c5dSMel Gorman if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE)) 18018022c5dSMel Gorman return seg; 18118022c5dSMel Gorman 1825a178119SMel Gorman pages[seg] = kmap_to_page(kiov[seg].iov_base); 18309cbfeafSKirill A. Shutemov get_page(pages[seg]); 18418022c5dSMel Gorman } 18518022c5dSMel Gorman 18618022c5dSMel Gorman return seg; 18718022c5dSMel Gorman } 18818022c5dSMel Gorman EXPORT_SYMBOL_GPL(get_kernel_pages); 18918022c5dSMel Gorman 190c2bc1681SMatthew Wilcox (Oracle) typedef void (*move_fn_t)(struct lruvec *lruvec, struct folio *folio); 191c2bc1681SMatthew Wilcox (Oracle) 19270dea534SMatthew Wilcox (Oracle) static void lru_add_fn(struct lruvec *lruvec, struct folio *folio) 1937d80dd09SMatthew Wilcox (Oracle) { 1947d80dd09SMatthew Wilcox (Oracle) int was_unevictable = folio_test_clear_unevictable(folio); 1957d80dd09SMatthew Wilcox (Oracle) long nr_pages = folio_nr_pages(folio); 1967d80dd09SMatthew Wilcox (Oracle) 1977d80dd09SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); 1987d80dd09SMatthew Wilcox (Oracle) 1997d80dd09SMatthew Wilcox (Oracle) /* 2007d80dd09SMatthew Wilcox (Oracle) * Is an smp_mb__after_atomic() still required here, before 201*188e8caeSMatthew Wilcox (Oracle) * folio_evictable() tests the mlocked flag, to rule out the possibility 2027d80dd09SMatthew Wilcox (Oracle) * of stranding an evictable folio on an unevictable LRU? I think 203*188e8caeSMatthew Wilcox (Oracle) * not, because __munlock_page() only clears the mlocked flag 204*188e8caeSMatthew Wilcox (Oracle) * while the LRU lock is held. 2057d80dd09SMatthew Wilcox (Oracle) * 2067d80dd09SMatthew Wilcox (Oracle) * (That is not true of __page_cache_release(), and not necessarily 207*188e8caeSMatthew Wilcox (Oracle) * true of release_pages(): but those only clear the mlocked flag after 208*188e8caeSMatthew Wilcox (Oracle) * folio_put_testzero() has excluded any other users of the folio.) 2097d80dd09SMatthew Wilcox (Oracle) */ 2107d80dd09SMatthew Wilcox (Oracle) if (folio_evictable(folio)) { 2117d80dd09SMatthew Wilcox (Oracle) if (was_unevictable) 2127d80dd09SMatthew Wilcox (Oracle) __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages); 2137d80dd09SMatthew Wilcox (Oracle) } else { 2147d80dd09SMatthew Wilcox (Oracle) folio_clear_active(folio); 2157d80dd09SMatthew Wilcox (Oracle) folio_set_unevictable(folio); 2167d80dd09SMatthew Wilcox (Oracle) /* 2177d80dd09SMatthew Wilcox (Oracle) * folio->mlock_count = !!folio_test_mlocked(folio)? 2187d80dd09SMatthew Wilcox (Oracle) * But that leaves __mlock_page() in doubt whether another 2197d80dd09SMatthew Wilcox (Oracle) * actor has already counted the mlock or not. Err on the 2207d80dd09SMatthew Wilcox (Oracle) * safe side, underestimate, let page reclaim fix it, rather 2217d80dd09SMatthew Wilcox (Oracle) * than leaving a page on the unevictable LRU indefinitely. 2227d80dd09SMatthew Wilcox (Oracle) */ 2237d80dd09SMatthew Wilcox (Oracle) folio->mlock_count = 0; 2247d80dd09SMatthew Wilcox (Oracle) if (!was_unevictable) 2257d80dd09SMatthew Wilcox (Oracle) __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages); 2267d80dd09SMatthew Wilcox (Oracle) } 2277d80dd09SMatthew Wilcox (Oracle) 2287d80dd09SMatthew Wilcox (Oracle) lruvec_add_folio(lruvec, folio); 2297d80dd09SMatthew Wilcox (Oracle) trace_mm_lru_insertion(folio); 2307d80dd09SMatthew Wilcox (Oracle) } 2317d80dd09SMatthew Wilcox (Oracle) 232c2bc1681SMatthew Wilcox (Oracle) static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn) 233c2bc1681SMatthew Wilcox (Oracle) { 234c2bc1681SMatthew Wilcox (Oracle) int i; 235c2bc1681SMatthew Wilcox (Oracle) struct lruvec *lruvec = NULL; 236c2bc1681SMatthew Wilcox (Oracle) unsigned long flags = 0; 237c2bc1681SMatthew Wilcox (Oracle) 238c2bc1681SMatthew Wilcox (Oracle) for (i = 0; i < folio_batch_count(fbatch); i++) { 239c2bc1681SMatthew Wilcox (Oracle) struct folio *folio = fbatch->folios[i]; 240c2bc1681SMatthew Wilcox (Oracle) 241c2bc1681SMatthew Wilcox (Oracle) /* block memcg migration while the folio moves between lru */ 24270dea534SMatthew Wilcox (Oracle) if (move_fn != lru_add_fn && !folio_test_clear_lru(folio)) 243c2bc1681SMatthew Wilcox (Oracle) continue; 244c2bc1681SMatthew Wilcox (Oracle) 245c2bc1681SMatthew Wilcox (Oracle) lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags); 246c2bc1681SMatthew Wilcox (Oracle) move_fn(lruvec, folio); 247c2bc1681SMatthew Wilcox (Oracle) 248c2bc1681SMatthew Wilcox (Oracle) folio_set_lru(folio); 249c2bc1681SMatthew Wilcox (Oracle) } 250c2bc1681SMatthew Wilcox (Oracle) 251c2bc1681SMatthew Wilcox (Oracle) if (lruvec) 252c2bc1681SMatthew Wilcox (Oracle) unlock_page_lruvec_irqrestore(lruvec, flags); 253c2bc1681SMatthew Wilcox (Oracle) folios_put(fbatch->folios, folio_batch_count(fbatch)); 254c2bc1681SMatthew Wilcox (Oracle) folio_batch_init(fbatch); 255c2bc1681SMatthew Wilcox (Oracle) } 256c2bc1681SMatthew Wilcox (Oracle) 257c2bc1681SMatthew Wilcox (Oracle) static void folio_batch_add_and_move(struct folio_batch *fbatch, 258c2bc1681SMatthew Wilcox (Oracle) struct folio *folio, move_fn_t move_fn) 259c2bc1681SMatthew Wilcox (Oracle) { 260c2bc1681SMatthew Wilcox (Oracle) if (folio_batch_add(fbatch, folio) && !folio_test_large(folio) && 261c2bc1681SMatthew Wilcox (Oracle) !lru_cache_disabled()) 262c2bc1681SMatthew Wilcox (Oracle) return; 263c2bc1681SMatthew Wilcox (Oracle) folio_batch_move_lru(fbatch, move_fn); 264c2bc1681SMatthew Wilcox (Oracle) } 265c2bc1681SMatthew Wilcox (Oracle) 266c2bc1681SMatthew Wilcox (Oracle) static void lru_move_tail_fn(struct lruvec *lruvec, struct folio *folio) 267c2bc1681SMatthew Wilcox (Oracle) { 268c2bc1681SMatthew Wilcox (Oracle) if (!folio_test_unevictable(folio)) { 269c2bc1681SMatthew Wilcox (Oracle) lruvec_del_folio(lruvec, folio); 270c2bc1681SMatthew Wilcox (Oracle) folio_clear_active(folio); 271c2bc1681SMatthew Wilcox (Oracle) lruvec_add_folio_tail(lruvec, folio); 272c2bc1681SMatthew Wilcox (Oracle) __count_vm_events(PGROTATED, folio_nr_pages(folio)); 273c2bc1681SMatthew Wilcox (Oracle) } 274c2bc1681SMatthew Wilcox (Oracle) } 275c2bc1681SMatthew Wilcox (Oracle) 2763dd7ae8eSShaohua Li /* 277575ced1cSMatthew Wilcox (Oracle) * Writeback is about to end against a folio which has been marked for 278575ced1cSMatthew Wilcox (Oracle) * immediate reclaim. If it still appears to be reclaimable, move it 279575ced1cSMatthew Wilcox (Oracle) * to the tail of the inactive list. 280c7c7b80cSAlex Shi * 281575ced1cSMatthew Wilcox (Oracle) * folio_rotate_reclaimable() must disable IRQs, to prevent nasty races. 2821da177e4SLinus Torvalds */ 283575ced1cSMatthew Wilcox (Oracle) void folio_rotate_reclaimable(struct folio *folio) 2841da177e4SLinus Torvalds { 285575ced1cSMatthew Wilcox (Oracle) if (!folio_test_locked(folio) && !folio_test_dirty(folio) && 286575ced1cSMatthew Wilcox (Oracle) !folio_test_unevictable(folio) && folio_test_lru(folio)) { 287c2bc1681SMatthew Wilcox (Oracle) struct folio_batch *fbatch; 2881da177e4SLinus Torvalds unsigned long flags; 2891da177e4SLinus Torvalds 290575ced1cSMatthew Wilcox (Oracle) folio_get(folio); 291b01b2141SIngo Molnar local_lock_irqsave(&lru_rotate.lock, flags); 292c2bc1681SMatthew Wilcox (Oracle) fbatch = this_cpu_ptr(&lru_rotate.fbatch); 293c2bc1681SMatthew Wilcox (Oracle) folio_batch_add_and_move(fbatch, folio, lru_move_tail_fn); 294b01b2141SIngo Molnar local_unlock_irqrestore(&lru_rotate.lock, flags); 295ac6aadb2SMiklos Szeredi } 2961da177e4SLinus Torvalds } 2971da177e4SLinus Torvalds 29896f8bf4fSJohannes Weiner void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages) 2993e2f41f1SKOSAKI Motohiro { 3007cf111bcSJohannes Weiner do { 3017cf111bcSJohannes Weiner unsigned long lrusize; 3027cf111bcSJohannes Weiner 3036168d0daSAlex Shi /* 3046168d0daSAlex Shi * Hold lruvec->lru_lock is safe here, since 3056168d0daSAlex Shi * 1) The pinned lruvec in reclaim, or 3066168d0daSAlex Shi * 2) From a pre-LRU page during refault (which also holds the 3076168d0daSAlex Shi * rcu lock, so would be safe even if the page was on the LRU 3086168d0daSAlex Shi * and could move simultaneously to a new lruvec). 3096168d0daSAlex Shi */ 3106168d0daSAlex Shi spin_lock_irq(&lruvec->lru_lock); 3117cf111bcSJohannes Weiner /* Record cost event */ 31296f8bf4fSJohannes Weiner if (file) 31396f8bf4fSJohannes Weiner lruvec->file_cost += nr_pages; 3141431d4d1SJohannes Weiner else 31596f8bf4fSJohannes Weiner lruvec->anon_cost += nr_pages; 3167cf111bcSJohannes Weiner 3177cf111bcSJohannes Weiner /* 3187cf111bcSJohannes Weiner * Decay previous events 3197cf111bcSJohannes Weiner * 3207cf111bcSJohannes Weiner * Because workloads change over time (and to avoid 3217cf111bcSJohannes Weiner * overflow) we keep these statistics as a floating 3227cf111bcSJohannes Weiner * average, which ends up weighing recent refaults 3237cf111bcSJohannes Weiner * more than old ones. 3247cf111bcSJohannes Weiner */ 3257cf111bcSJohannes Weiner lrusize = lruvec_page_state(lruvec, NR_INACTIVE_ANON) + 3267cf111bcSJohannes Weiner lruvec_page_state(lruvec, NR_ACTIVE_ANON) + 3277cf111bcSJohannes Weiner lruvec_page_state(lruvec, NR_INACTIVE_FILE) + 3287cf111bcSJohannes Weiner lruvec_page_state(lruvec, NR_ACTIVE_FILE); 3297cf111bcSJohannes Weiner 3307cf111bcSJohannes Weiner if (lruvec->file_cost + lruvec->anon_cost > lrusize / 4) { 3317cf111bcSJohannes Weiner lruvec->file_cost /= 2; 3327cf111bcSJohannes Weiner lruvec->anon_cost /= 2; 3337cf111bcSJohannes Weiner } 3346168d0daSAlex Shi spin_unlock_irq(&lruvec->lru_lock); 3357cf111bcSJohannes Weiner } while ((lruvec = parent_lruvec(lruvec))); 3363e2f41f1SKOSAKI Motohiro } 3373e2f41f1SKOSAKI Motohiro 3380995d7e5SMatthew Wilcox (Oracle) void lru_note_cost_folio(struct folio *folio) 33996f8bf4fSJohannes Weiner { 3400995d7e5SMatthew Wilcox (Oracle) lru_note_cost(folio_lruvec(folio), folio_is_file_lru(folio), 3410995d7e5SMatthew Wilcox (Oracle) folio_nr_pages(folio)); 34296f8bf4fSJohannes Weiner } 34396f8bf4fSJohannes Weiner 3443a44610bSMatthew Wilcox (Oracle) static void folio_activate_fn(struct lruvec *lruvec, struct folio *folio) 345744ed144SShaohua Li { 346f2d27392SMatthew Wilcox (Oracle) if (!folio_test_active(folio) && !folio_test_unevictable(folio)) { 347f2d27392SMatthew Wilcox (Oracle) long nr_pages = folio_nr_pages(folio); 348744ed144SShaohua Li 349f2d27392SMatthew Wilcox (Oracle) lruvec_del_folio(lruvec, folio); 350f2d27392SMatthew Wilcox (Oracle) folio_set_active(folio); 351f2d27392SMatthew Wilcox (Oracle) lruvec_add_folio(lruvec, folio); 352f2d27392SMatthew Wilcox (Oracle) trace_mm_lru_activate(folio); 3537a608572SLinus Torvalds 35421e330fcSShakeel Butt __count_vm_events(PGACTIVATE, nr_pages); 35521e330fcSShakeel Butt __count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE, 35621e330fcSShakeel Butt nr_pages); 357744ed144SShaohua Li } 358eb709b0dSShaohua Li } 359eb709b0dSShaohua Li 360eb709b0dSShaohua Li #ifdef CONFIG_SMP 3613a44610bSMatthew Wilcox (Oracle) static void folio_activate_drain(int cpu) 362f2d27392SMatthew Wilcox (Oracle) { 36382ac64d8SMatthew Wilcox (Oracle) struct folio_batch *fbatch = &per_cpu(cpu_fbatches.activate, cpu); 364f2d27392SMatthew Wilcox (Oracle) 3653a44610bSMatthew Wilcox (Oracle) if (folio_batch_count(fbatch)) 3663a44610bSMatthew Wilcox (Oracle) folio_batch_move_lru(fbatch, folio_activate_fn); 3675fbc4616SChris Metcalf } 3685fbc4616SChris Metcalf 369f2d27392SMatthew Wilcox (Oracle) static void folio_activate(struct folio *folio) 370eb709b0dSShaohua Li { 371f2d27392SMatthew Wilcox (Oracle) if (folio_test_lru(folio) && !folio_test_active(folio) && 372f2d27392SMatthew Wilcox (Oracle) !folio_test_unevictable(folio)) { 3733a44610bSMatthew Wilcox (Oracle) struct folio_batch *fbatch; 374eb709b0dSShaohua Li 375f2d27392SMatthew Wilcox (Oracle) folio_get(folio); 37682ac64d8SMatthew Wilcox (Oracle) local_lock(&cpu_fbatches.lock); 37782ac64d8SMatthew Wilcox (Oracle) fbatch = this_cpu_ptr(&cpu_fbatches.activate); 3783a44610bSMatthew Wilcox (Oracle) folio_batch_add_and_move(fbatch, folio, folio_activate_fn); 37982ac64d8SMatthew Wilcox (Oracle) local_unlock(&cpu_fbatches.lock); 380eb709b0dSShaohua Li } 381eb709b0dSShaohua Li } 382eb709b0dSShaohua Li 383eb709b0dSShaohua Li #else 3843a44610bSMatthew Wilcox (Oracle) static inline void folio_activate_drain(int cpu) 385eb709b0dSShaohua Li { 386eb709b0dSShaohua Li } 387eb709b0dSShaohua Li 388f2d27392SMatthew Wilcox (Oracle) static void folio_activate(struct folio *folio) 389eb709b0dSShaohua Li { 3906168d0daSAlex Shi struct lruvec *lruvec; 391eb709b0dSShaohua Li 392f2d27392SMatthew Wilcox (Oracle) if (folio_test_clear_lru(folio)) { 393e809c3feSMatthew Wilcox (Oracle) lruvec = folio_lruvec_lock_irq(folio); 3943a44610bSMatthew Wilcox (Oracle) folio_activate_fn(lruvec, folio); 3956168d0daSAlex Shi unlock_page_lruvec_irq(lruvec); 396f2d27392SMatthew Wilcox (Oracle) folio_set_lru(folio); 3976168d0daSAlex Shi } 3981da177e4SLinus Torvalds } 399eb709b0dSShaohua Li #endif 4001da177e4SLinus Torvalds 40176580b65SMatthew Wilcox (Oracle) static void __lru_cache_activate_folio(struct folio *folio) 402059285a2SMel Gorman { 40370dea534SMatthew Wilcox (Oracle) struct folio_batch *fbatch; 404059285a2SMel Gorman int i; 405059285a2SMel Gorman 40682ac64d8SMatthew Wilcox (Oracle) local_lock(&cpu_fbatches.lock); 40782ac64d8SMatthew Wilcox (Oracle) fbatch = this_cpu_ptr(&cpu_fbatches.lru_add); 408b01b2141SIngo Molnar 409059285a2SMel Gorman /* 41070dea534SMatthew Wilcox (Oracle) * Search backwards on the optimistic assumption that the folio being 41170dea534SMatthew Wilcox (Oracle) * activated has just been added to this batch. Note that only 41270dea534SMatthew Wilcox (Oracle) * the local batch is examined as a !LRU folio could be in the 413059285a2SMel Gorman * process of being released, reclaimed, migrated or on a remote 41470dea534SMatthew Wilcox (Oracle) * batch that is currently being drained. Furthermore, marking 41570dea534SMatthew Wilcox (Oracle) * a remote batch's folio active potentially hits a race where 41670dea534SMatthew Wilcox (Oracle) * a folio is marked active just after it is added to the inactive 417059285a2SMel Gorman * list causing accounting errors and BUG_ON checks to trigger. 418059285a2SMel Gorman */ 41970dea534SMatthew Wilcox (Oracle) for (i = folio_batch_count(fbatch) - 1; i >= 0; i--) { 42070dea534SMatthew Wilcox (Oracle) struct folio *batch_folio = fbatch->folios[i]; 421059285a2SMel Gorman 42270dea534SMatthew Wilcox (Oracle) if (batch_folio == folio) { 42376580b65SMatthew Wilcox (Oracle) folio_set_active(folio); 424059285a2SMel Gorman break; 425059285a2SMel Gorman } 426059285a2SMel Gorman } 427059285a2SMel Gorman 42882ac64d8SMatthew Wilcox (Oracle) local_unlock(&cpu_fbatches.lock); 429059285a2SMel Gorman } 430059285a2SMel Gorman 4311da177e4SLinus Torvalds /* 4321da177e4SLinus Torvalds * Mark a page as having seen activity. 4331da177e4SLinus Torvalds * 4341da177e4SLinus Torvalds * inactive,unreferenced -> inactive,referenced 4351da177e4SLinus Torvalds * inactive,referenced -> active,unreferenced 4361da177e4SLinus Torvalds * active,unreferenced -> active,referenced 437eb39d618SHugh Dickins * 438eb39d618SHugh Dickins * When a newly allocated page is not yet visible, so safe for non-atomic ops, 439eb39d618SHugh Dickins * __SetPageReferenced(page) may be substituted for mark_page_accessed(page). 4401da177e4SLinus Torvalds */ 44176580b65SMatthew Wilcox (Oracle) void folio_mark_accessed(struct folio *folio) 4421da177e4SLinus Torvalds { 44376580b65SMatthew Wilcox (Oracle) if (!folio_test_referenced(folio)) { 44476580b65SMatthew Wilcox (Oracle) folio_set_referenced(folio); 44576580b65SMatthew Wilcox (Oracle) } else if (folio_test_unevictable(folio)) { 446a1100a74SFengguang Wu /* 447a1100a74SFengguang Wu * Unevictable pages are on the "LRU_UNEVICTABLE" list. But, 448a1100a74SFengguang Wu * this list is never rotated or maintained, so marking an 449914c32e4SBang Li * unevictable page accessed has no effect. 450a1100a74SFengguang Wu */ 45176580b65SMatthew Wilcox (Oracle) } else if (!folio_test_active(folio)) { 452059285a2SMel Gorman /* 4533a44610bSMatthew Wilcox (Oracle) * If the folio is on the LRU, queue it for activation via 45482ac64d8SMatthew Wilcox (Oracle) * cpu_fbatches.activate. Otherwise, assume the folio is in a 4553a44610bSMatthew Wilcox (Oracle) * folio_batch, mark it active and it'll be moved to the active 456059285a2SMel Gorman * LRU on the next drain. 457059285a2SMel Gorman */ 45876580b65SMatthew Wilcox (Oracle) if (folio_test_lru(folio)) 45976580b65SMatthew Wilcox (Oracle) folio_activate(folio); 460059285a2SMel Gorman else 46176580b65SMatthew Wilcox (Oracle) __lru_cache_activate_folio(folio); 46276580b65SMatthew Wilcox (Oracle) folio_clear_referenced(folio); 46376580b65SMatthew Wilcox (Oracle) workingset_activation(folio); 4641da177e4SLinus Torvalds } 46576580b65SMatthew Wilcox (Oracle) if (folio_test_idle(folio)) 46676580b65SMatthew Wilcox (Oracle) folio_clear_idle(folio); 4671da177e4SLinus Torvalds } 46876580b65SMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_mark_accessed); 4691da177e4SLinus Torvalds 470f04e9ebbSKOSAKI Motohiro /** 4710d31125dSMatthew Wilcox (Oracle) * folio_add_lru - Add a folio to an LRU list. 4720d31125dSMatthew Wilcox (Oracle) * @folio: The folio to be added to the LRU. 4732329d375SJianyu Zhan * 4740d31125dSMatthew Wilcox (Oracle) * Queue the folio for addition to the LRU. The decision on whether 4752329d375SJianyu Zhan * to add the page to the [in]active [file|anon] list is deferred until the 47682ac64d8SMatthew Wilcox (Oracle) * folio_batch is drained. This gives a chance for the caller of folio_add_lru() 4770d31125dSMatthew Wilcox (Oracle) * have the folio added to the active list using folio_mark_accessed(). 478f04e9ebbSKOSAKI Motohiro */ 4790d31125dSMatthew Wilcox (Oracle) void folio_add_lru(struct folio *folio) 4801da177e4SLinus Torvalds { 48170dea534SMatthew Wilcox (Oracle) struct folio_batch *fbatch; 4826058eaecSJohannes Weiner 48370dea534SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_active(folio) && 48470dea534SMatthew Wilcox (Oracle) folio_test_unevictable(folio), folio); 4850d31125dSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); 4866058eaecSJohannes Weiner 4870d31125dSMatthew Wilcox (Oracle) folio_get(folio); 48882ac64d8SMatthew Wilcox (Oracle) local_lock(&cpu_fbatches.lock); 48982ac64d8SMatthew Wilcox (Oracle) fbatch = this_cpu_ptr(&cpu_fbatches.lru_add); 49070dea534SMatthew Wilcox (Oracle) folio_batch_add_and_move(fbatch, folio, lru_add_fn); 49182ac64d8SMatthew Wilcox (Oracle) local_unlock(&cpu_fbatches.lock); 4921da177e4SLinus Torvalds } 4930d31125dSMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_add_lru); 4941da177e4SLinus Torvalds 495894bc310SLee Schermerhorn /** 496b518154eSJoonsoo Kim * lru_cache_add_inactive_or_unevictable 49700501b53SJohannes Weiner * @page: the page to be added to LRU 49800501b53SJohannes Weiner * @vma: vma in which page is mapped for determining reclaimability 49900501b53SJohannes Weiner * 500b518154eSJoonsoo Kim * Place @page on the inactive or unevictable LRU list, depending on its 50112eab428SMiaohe Lin * evictability. 50200501b53SJohannes Weiner */ 503b518154eSJoonsoo Kim void lru_cache_add_inactive_or_unevictable(struct page *page, 50400501b53SJohannes Weiner struct vm_area_struct *vma) 50500501b53SJohannes Weiner { 50600501b53SJohannes Weiner VM_BUG_ON_PAGE(PageLRU(page), page); 50700501b53SJohannes Weiner 5082fbb0c10SHugh Dickins if (unlikely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED)) 5092fbb0c10SHugh Dickins mlock_new_page(page); 5102fbb0c10SHugh Dickins else 5119c4e6b1aSShakeel Butt lru_cache_add(page); 51200501b53SJohannes Weiner } 51300501b53SJohannes Weiner 514902aaed0SHisashi Hifumi /* 5157a3dbfe8SMatthew Wilcox (Oracle) * If the folio cannot be invalidated, it is moved to the 51631560180SMinchan Kim * inactive list to speed up its reclaim. It is moved to the 51731560180SMinchan Kim * head of the list, rather than the tail, to give the flusher 51831560180SMinchan Kim * threads some time to write it out, as this is much more 51931560180SMinchan Kim * effective than the single-page writeout from reclaim. 520278df9f4SMinchan Kim * 5217a3dbfe8SMatthew Wilcox (Oracle) * If the folio isn't mapped and dirty/writeback, the folio 5227a3dbfe8SMatthew Wilcox (Oracle) * could be reclaimed asap using the reclaim flag. 523278df9f4SMinchan Kim * 5247a3dbfe8SMatthew Wilcox (Oracle) * 1. active, mapped folio -> none 5257a3dbfe8SMatthew Wilcox (Oracle) * 2. active, dirty/writeback folio -> inactive, head, reclaim 5267a3dbfe8SMatthew Wilcox (Oracle) * 3. inactive, mapped folio -> none 5277a3dbfe8SMatthew Wilcox (Oracle) * 4. inactive, dirty/writeback folio -> inactive, head, reclaim 528278df9f4SMinchan Kim * 5. inactive, clean -> inactive, tail 529278df9f4SMinchan Kim * 6. Others -> none 530278df9f4SMinchan Kim * 5317a3dbfe8SMatthew Wilcox (Oracle) * In 4, it moves to the head of the inactive list so the folio is 5327a3dbfe8SMatthew Wilcox (Oracle) * written out by flusher threads as this is much more efficient 533278df9f4SMinchan Kim * than the single-page writeout from reclaim. 53431560180SMinchan Kim */ 5357a3dbfe8SMatthew Wilcox (Oracle) static void lru_deactivate_file_fn(struct lruvec *lruvec, struct folio *folio) 53631560180SMinchan Kim { 5377a3dbfe8SMatthew Wilcox (Oracle) bool active = folio_test_active(folio); 5387a3dbfe8SMatthew Wilcox (Oracle) long nr_pages = folio_nr_pages(folio); 53931560180SMinchan Kim 5407a3dbfe8SMatthew Wilcox (Oracle) if (folio_test_unevictable(folio)) 541bad49d9cSMinchan Kim return; 542bad49d9cSMinchan Kim 5437a3dbfe8SMatthew Wilcox (Oracle) /* Some processes are using the folio */ 5447a3dbfe8SMatthew Wilcox (Oracle) if (folio_mapped(folio)) 54531560180SMinchan Kim return; 54631560180SMinchan Kim 5477a3dbfe8SMatthew Wilcox (Oracle) lruvec_del_folio(lruvec, folio); 5487a3dbfe8SMatthew Wilcox (Oracle) folio_clear_active(folio); 5497a3dbfe8SMatthew Wilcox (Oracle) folio_clear_referenced(folio); 55031560180SMinchan Kim 5517a3dbfe8SMatthew Wilcox (Oracle) if (folio_test_writeback(folio) || folio_test_dirty(folio)) { 552278df9f4SMinchan Kim /* 5537a3dbfe8SMatthew Wilcox (Oracle) * Setting the reclaim flag could race with 5547a3dbfe8SMatthew Wilcox (Oracle) * folio_end_writeback() and confuse readahead. But the 5557a3dbfe8SMatthew Wilcox (Oracle) * race window is _really_ small and it's not a critical 5567a3dbfe8SMatthew Wilcox (Oracle) * problem. 557278df9f4SMinchan Kim */ 5587a3dbfe8SMatthew Wilcox (Oracle) lruvec_add_folio(lruvec, folio); 5597a3dbfe8SMatthew Wilcox (Oracle) folio_set_reclaim(folio); 560278df9f4SMinchan Kim } else { 561278df9f4SMinchan Kim /* 5627a3dbfe8SMatthew Wilcox (Oracle) * The folio's writeback ended while it was in the batch. 5637a3dbfe8SMatthew Wilcox (Oracle) * We move that folio to the tail of the inactive list. 564278df9f4SMinchan Kim */ 5657a3dbfe8SMatthew Wilcox (Oracle) lruvec_add_folio_tail(lruvec, folio); 5665d91f31fSShakeel Butt __count_vm_events(PGROTATED, nr_pages); 567278df9f4SMinchan Kim } 568278df9f4SMinchan Kim 56921e330fcSShakeel Butt if (active) { 5705d91f31fSShakeel Butt __count_vm_events(PGDEACTIVATE, nr_pages); 57121e330fcSShakeel Butt __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, 57221e330fcSShakeel Butt nr_pages); 57321e330fcSShakeel Butt } 57431560180SMinchan Kim } 57531560180SMinchan Kim 57685cd7791SMatthew Wilcox (Oracle) static void lru_deactivate_fn(struct lruvec *lruvec, struct folio *folio) 5779c276cc6SMinchan Kim { 57885cd7791SMatthew Wilcox (Oracle) if (folio_test_active(folio) && !folio_test_unevictable(folio)) { 57985cd7791SMatthew Wilcox (Oracle) long nr_pages = folio_nr_pages(folio); 5809c276cc6SMinchan Kim 58185cd7791SMatthew Wilcox (Oracle) lruvec_del_folio(lruvec, folio); 58285cd7791SMatthew Wilcox (Oracle) folio_clear_active(folio); 58385cd7791SMatthew Wilcox (Oracle) folio_clear_referenced(folio); 58485cd7791SMatthew Wilcox (Oracle) lruvec_add_folio(lruvec, folio); 5859c276cc6SMinchan Kim 58621e330fcSShakeel Butt __count_vm_events(PGDEACTIVATE, nr_pages); 58721e330fcSShakeel Butt __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, 58821e330fcSShakeel Butt nr_pages); 5899c276cc6SMinchan Kim } 5909c276cc6SMinchan Kim } 59110853a03SMinchan Kim 592cec394baSMatthew Wilcox (Oracle) static void lru_lazyfree_fn(struct lruvec *lruvec, struct folio *folio) 59310853a03SMinchan Kim { 594cec394baSMatthew Wilcox (Oracle) if (folio_test_anon(folio) && folio_test_swapbacked(folio) && 595cec394baSMatthew Wilcox (Oracle) !folio_test_swapcache(folio) && !folio_test_unevictable(folio)) { 596cec394baSMatthew Wilcox (Oracle) long nr_pages = folio_nr_pages(folio); 59710853a03SMinchan Kim 598cec394baSMatthew Wilcox (Oracle) lruvec_del_folio(lruvec, folio); 599cec394baSMatthew Wilcox (Oracle) folio_clear_active(folio); 600cec394baSMatthew Wilcox (Oracle) folio_clear_referenced(folio); 601f7ad2a6cSShaohua Li /* 602cec394baSMatthew Wilcox (Oracle) * Lazyfree folios are clean anonymous folios. They have 603cec394baSMatthew Wilcox (Oracle) * the swapbacked flag cleared, to distinguish them from normal 604cec394baSMatthew Wilcox (Oracle) * anonymous folios 605f7ad2a6cSShaohua Li */ 606cec394baSMatthew Wilcox (Oracle) folio_clear_swapbacked(folio); 607cec394baSMatthew Wilcox (Oracle) lruvec_add_folio(lruvec, folio); 60810853a03SMinchan Kim 60921e330fcSShakeel Butt __count_vm_events(PGLAZYFREE, nr_pages); 61021e330fcSShakeel Butt __count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE, 61121e330fcSShakeel Butt nr_pages); 61210853a03SMinchan Kim } 61310853a03SMinchan Kim } 61410853a03SMinchan Kim 61531560180SMinchan Kim /* 61682ac64d8SMatthew Wilcox (Oracle) * Drain pages out of the cpu's folio_batch. 617902aaed0SHisashi Hifumi * Either "cpu" is the current CPU, and preemption has already been 618902aaed0SHisashi Hifumi * disabled; or "cpu" is being hot-unplugged, and is already dead. 619902aaed0SHisashi Hifumi */ 620f0cb3c76SKonstantin Khlebnikov void lru_add_drain_cpu(int cpu) 6211da177e4SLinus Torvalds { 622a2d33b5dSMatthew Wilcox (Oracle) struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu); 623a2d33b5dSMatthew Wilcox (Oracle) struct folio_batch *fbatch = &fbatches->lru_add; 6241da177e4SLinus Torvalds 62570dea534SMatthew Wilcox (Oracle) if (folio_batch_count(fbatch)) 62670dea534SMatthew Wilcox (Oracle) folio_batch_move_lru(fbatch, lru_add_fn); 627902aaed0SHisashi Hifumi 628c2bc1681SMatthew Wilcox (Oracle) fbatch = &per_cpu(lru_rotate.fbatch, cpu); 6297e0cc01eSQian Cai /* Disabling interrupts below acts as a compiler barrier. */ 630c2bc1681SMatthew Wilcox (Oracle) if (data_race(folio_batch_count(fbatch))) { 631902aaed0SHisashi Hifumi unsigned long flags; 632902aaed0SHisashi Hifumi 633902aaed0SHisashi Hifumi /* No harm done if a racing interrupt already did this */ 634b01b2141SIngo Molnar local_lock_irqsave(&lru_rotate.lock, flags); 635c2bc1681SMatthew Wilcox (Oracle) folio_batch_move_lru(fbatch, lru_move_tail_fn); 636b01b2141SIngo Molnar local_unlock_irqrestore(&lru_rotate.lock, flags); 637902aaed0SHisashi Hifumi } 63831560180SMinchan Kim 639a2d33b5dSMatthew Wilcox (Oracle) fbatch = &fbatches->lru_deactivate_file; 6407a3dbfe8SMatthew Wilcox (Oracle) if (folio_batch_count(fbatch)) 6417a3dbfe8SMatthew Wilcox (Oracle) folio_batch_move_lru(fbatch, lru_deactivate_file_fn); 642eb709b0dSShaohua Li 643a2d33b5dSMatthew Wilcox (Oracle) fbatch = &fbatches->lru_deactivate; 64485cd7791SMatthew Wilcox (Oracle) if (folio_batch_count(fbatch)) 64585cd7791SMatthew Wilcox (Oracle) folio_batch_move_lru(fbatch, lru_deactivate_fn); 6469c276cc6SMinchan Kim 647a2d33b5dSMatthew Wilcox (Oracle) fbatch = &fbatches->lru_lazyfree; 648cec394baSMatthew Wilcox (Oracle) if (folio_batch_count(fbatch)) 649cec394baSMatthew Wilcox (Oracle) folio_batch_move_lru(fbatch, lru_lazyfree_fn); 65010853a03SMinchan Kim 6513a44610bSMatthew Wilcox (Oracle) folio_activate_drain(cpu); 65231560180SMinchan Kim } 65331560180SMinchan Kim 65431560180SMinchan Kim /** 6557a3dbfe8SMatthew Wilcox (Oracle) * deactivate_file_folio() - Deactivate a file folio. 656261b6840SMatthew Wilcox (Oracle) * @folio: Folio to deactivate. 65731560180SMinchan Kim * 658261b6840SMatthew Wilcox (Oracle) * This function hints to the VM that @folio is a good reclaim candidate, 659261b6840SMatthew Wilcox (Oracle) * for example if its invalidation fails due to the folio being dirty 66031560180SMinchan Kim * or under writeback. 661261b6840SMatthew Wilcox (Oracle) * 6627a3dbfe8SMatthew Wilcox (Oracle) * Context: Caller holds a reference on the folio. 66331560180SMinchan Kim */ 664261b6840SMatthew Wilcox (Oracle) void deactivate_file_folio(struct folio *folio) 66531560180SMinchan Kim { 6667a3dbfe8SMatthew Wilcox (Oracle) struct folio_batch *fbatch; 667b01b2141SIngo Molnar 6687a3dbfe8SMatthew Wilcox (Oracle) /* Deactivating an unevictable folio will not accelerate reclaim */ 669261b6840SMatthew Wilcox (Oracle) if (folio_test_unevictable(folio)) 670261b6840SMatthew Wilcox (Oracle) return; 671261b6840SMatthew Wilcox (Oracle) 672261b6840SMatthew Wilcox (Oracle) folio_get(folio); 67382ac64d8SMatthew Wilcox (Oracle) local_lock(&cpu_fbatches.lock); 67482ac64d8SMatthew Wilcox (Oracle) fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate_file); 6757a3dbfe8SMatthew Wilcox (Oracle) folio_batch_add_and_move(fbatch, folio, lru_deactivate_file_fn); 67682ac64d8SMatthew Wilcox (Oracle) local_unlock(&cpu_fbatches.lock); 67731560180SMinchan Kim } 67880bfed90SAndrew Morton 6799c276cc6SMinchan Kim /* 6809c276cc6SMinchan Kim * deactivate_page - deactivate a page 6819c276cc6SMinchan Kim * @page: page to deactivate 6829c276cc6SMinchan Kim * 6839c276cc6SMinchan Kim * deactivate_page() moves @page to the inactive list if @page was on the active 6849c276cc6SMinchan Kim * list and was not an unevictable page. This is done to accelerate the reclaim 6859c276cc6SMinchan Kim * of @page. 6869c276cc6SMinchan Kim */ 6879c276cc6SMinchan Kim void deactivate_page(struct page *page) 6889c276cc6SMinchan Kim { 68985cd7791SMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 6909c276cc6SMinchan Kim 69185cd7791SMatthew Wilcox (Oracle) if (folio_test_lru(folio) && folio_test_active(folio) && 69285cd7791SMatthew Wilcox (Oracle) !folio_test_unevictable(folio)) { 69385cd7791SMatthew Wilcox (Oracle) struct folio_batch *fbatch; 69485cd7791SMatthew Wilcox (Oracle) 69585cd7791SMatthew Wilcox (Oracle) folio_get(folio); 69682ac64d8SMatthew Wilcox (Oracle) local_lock(&cpu_fbatches.lock); 69782ac64d8SMatthew Wilcox (Oracle) fbatch = this_cpu_ptr(&cpu_fbatches.lru_deactivate); 69885cd7791SMatthew Wilcox (Oracle) folio_batch_add_and_move(fbatch, folio, lru_deactivate_fn); 69982ac64d8SMatthew Wilcox (Oracle) local_unlock(&cpu_fbatches.lock); 7009c276cc6SMinchan Kim } 7019c276cc6SMinchan Kim } 7029c276cc6SMinchan Kim 70310853a03SMinchan Kim /** 704f7ad2a6cSShaohua Li * mark_page_lazyfree - make an anon page lazyfree 70510853a03SMinchan Kim * @page: page to deactivate 70610853a03SMinchan Kim * 707f7ad2a6cSShaohua Li * mark_page_lazyfree() moves @page to the inactive file list. 708f7ad2a6cSShaohua Li * This is done to accelerate the reclaim of @page. 70910853a03SMinchan Kim */ 710f7ad2a6cSShaohua Li void mark_page_lazyfree(struct page *page) 71110853a03SMinchan Kim { 712cec394baSMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 71310853a03SMinchan Kim 714cec394baSMatthew Wilcox (Oracle) if (folio_test_lru(folio) && folio_test_anon(folio) && 715cec394baSMatthew Wilcox (Oracle) folio_test_swapbacked(folio) && !folio_test_swapcache(folio) && 716cec394baSMatthew Wilcox (Oracle) !folio_test_unevictable(folio)) { 717cec394baSMatthew Wilcox (Oracle) struct folio_batch *fbatch; 718cec394baSMatthew Wilcox (Oracle) 719cec394baSMatthew Wilcox (Oracle) folio_get(folio); 72082ac64d8SMatthew Wilcox (Oracle) local_lock(&cpu_fbatches.lock); 72182ac64d8SMatthew Wilcox (Oracle) fbatch = this_cpu_ptr(&cpu_fbatches.lru_lazyfree); 722cec394baSMatthew Wilcox (Oracle) folio_batch_add_and_move(fbatch, folio, lru_lazyfree_fn); 72382ac64d8SMatthew Wilcox (Oracle) local_unlock(&cpu_fbatches.lock); 72410853a03SMinchan Kim } 72510853a03SMinchan Kim } 72610853a03SMinchan Kim 72780bfed90SAndrew Morton void lru_add_drain(void) 72880bfed90SAndrew Morton { 72982ac64d8SMatthew Wilcox (Oracle) local_lock(&cpu_fbatches.lock); 730b01b2141SIngo Molnar lru_add_drain_cpu(smp_processor_id()); 73182ac64d8SMatthew Wilcox (Oracle) local_unlock(&cpu_fbatches.lock); 732adb11e78SSebastian Andrzej Siewior mlock_page_drain_local(); 733b01b2141SIngo Molnar } 734b01b2141SIngo Molnar 735243418e3SMinchan Kim /* 736243418e3SMinchan Kim * It's called from per-cpu workqueue context in SMP case so 737243418e3SMinchan Kim * lru_add_drain_cpu and invalidate_bh_lrus_cpu should run on 738243418e3SMinchan Kim * the same cpu. It shouldn't be a problem in !SMP case since 739243418e3SMinchan Kim * the core is only one and the locks will disable preemption. 740243418e3SMinchan Kim */ 741243418e3SMinchan Kim static void lru_add_and_bh_lrus_drain(void) 742243418e3SMinchan Kim { 74382ac64d8SMatthew Wilcox (Oracle) local_lock(&cpu_fbatches.lock); 744243418e3SMinchan Kim lru_add_drain_cpu(smp_processor_id()); 74582ac64d8SMatthew Wilcox (Oracle) local_unlock(&cpu_fbatches.lock); 746243418e3SMinchan Kim invalidate_bh_lrus_cpu(); 747adb11e78SSebastian Andrzej Siewior mlock_page_drain_local(); 748243418e3SMinchan Kim } 749243418e3SMinchan Kim 750b01b2141SIngo Molnar void lru_add_drain_cpu_zone(struct zone *zone) 751b01b2141SIngo Molnar { 75282ac64d8SMatthew Wilcox (Oracle) local_lock(&cpu_fbatches.lock); 753b01b2141SIngo Molnar lru_add_drain_cpu(smp_processor_id()); 754b01b2141SIngo Molnar drain_local_pages(zone); 75582ac64d8SMatthew Wilcox (Oracle) local_unlock(&cpu_fbatches.lock); 756adb11e78SSebastian Andrzej Siewior mlock_page_drain_local(); 7571da177e4SLinus Torvalds } 7581da177e4SLinus Torvalds 7596ea183d6SMichal Hocko #ifdef CONFIG_SMP 7606ea183d6SMichal Hocko 7616ea183d6SMichal Hocko static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); 7626ea183d6SMichal Hocko 763c4028958SDavid Howells static void lru_add_drain_per_cpu(struct work_struct *dummy) 764053837fcSNick Piggin { 765243418e3SMinchan Kim lru_add_and_bh_lrus_drain(); 766053837fcSNick Piggin } 767053837fcSNick Piggin 7684864545aSMatthew Wilcox (Oracle) static bool cpu_needs_drain(unsigned int cpu) 7694864545aSMatthew Wilcox (Oracle) { 7704864545aSMatthew Wilcox (Oracle) struct cpu_fbatches *fbatches = &per_cpu(cpu_fbatches, cpu); 7714864545aSMatthew Wilcox (Oracle) 7724864545aSMatthew Wilcox (Oracle) /* Check these in order of likelihood that they're not zero */ 7734864545aSMatthew Wilcox (Oracle) return folio_batch_count(&fbatches->lru_add) || 7744864545aSMatthew Wilcox (Oracle) data_race(folio_batch_count(&per_cpu(lru_rotate.fbatch, cpu))) || 7754864545aSMatthew Wilcox (Oracle) folio_batch_count(&fbatches->lru_deactivate_file) || 7764864545aSMatthew Wilcox (Oracle) folio_batch_count(&fbatches->lru_deactivate) || 7774864545aSMatthew Wilcox (Oracle) folio_batch_count(&fbatches->lru_lazyfree) || 7784864545aSMatthew Wilcox (Oracle) folio_batch_count(&fbatches->activate) || 7794864545aSMatthew Wilcox (Oracle) need_mlock_page_drain(cpu) || 7804864545aSMatthew Wilcox (Oracle) has_bh_in_lru(cpu, NULL); 7814864545aSMatthew Wilcox (Oracle) } 7824864545aSMatthew Wilcox (Oracle) 7839852a721SMichal Hocko /* 7849852a721SMichal Hocko * Doesn't need any cpu hotplug locking because we do rely on per-cpu 7859852a721SMichal Hocko * kworkers being shut down before our page_alloc_cpu_dead callback is 7869852a721SMichal Hocko * executed on the offlined cpu. 7879852a721SMichal Hocko * Calling this function with cpu hotplug locks held can actually lead 7889852a721SMichal Hocko * to obscure indirect dependencies via WQ context. 7899852a721SMichal Hocko */ 7903db3264dSMiaohe Lin static inline void __lru_add_drain_all(bool force_all_cpus) 791053837fcSNick Piggin { 7926446a513SAhmed S. Darwish /* 7936446a513SAhmed S. Darwish * lru_drain_gen - Global pages generation number 7946446a513SAhmed S. Darwish * 7956446a513SAhmed S. Darwish * (A) Definition: global lru_drain_gen = x implies that all generations 7966446a513SAhmed S. Darwish * 0 < n <= x are already *scheduled* for draining. 7976446a513SAhmed S. Darwish * 7986446a513SAhmed S. Darwish * This is an optimization for the highly-contended use case where a 7996446a513SAhmed S. Darwish * user space workload keeps constantly generating a flow of pages for 8006446a513SAhmed S. Darwish * each CPU. 8016446a513SAhmed S. Darwish */ 8026446a513SAhmed S. Darwish static unsigned int lru_drain_gen; 8035fbc4616SChris Metcalf static struct cpumask has_work; 8046446a513SAhmed S. Darwish static DEFINE_MUTEX(lock); 8056446a513SAhmed S. Darwish unsigned cpu, this_gen; 8065fbc4616SChris Metcalf 807ce612879SMichal Hocko /* 808ce612879SMichal Hocko * Make sure nobody triggers this path before mm_percpu_wq is fully 809ce612879SMichal Hocko * initialized. 810ce612879SMichal Hocko */ 811ce612879SMichal Hocko if (WARN_ON(!mm_percpu_wq)) 812ce612879SMichal Hocko return; 813ce612879SMichal Hocko 8146446a513SAhmed S. Darwish /* 81582ac64d8SMatthew Wilcox (Oracle) * Guarantee folio_batch counter stores visible by this CPU 81682ac64d8SMatthew Wilcox (Oracle) * are visible to other CPUs before loading the current drain 81782ac64d8SMatthew Wilcox (Oracle) * generation. 8186446a513SAhmed S. Darwish */ 8196446a513SAhmed S. Darwish smp_mb(); 8206446a513SAhmed S. Darwish 8216446a513SAhmed S. Darwish /* 8226446a513SAhmed S. Darwish * (B) Locally cache global LRU draining generation number 8236446a513SAhmed S. Darwish * 8246446a513SAhmed S. Darwish * The read barrier ensures that the counter is loaded before the mutex 8256446a513SAhmed S. Darwish * is taken. It pairs with smp_mb() inside the mutex critical section 8266446a513SAhmed S. Darwish * at (D). 8276446a513SAhmed S. Darwish */ 8286446a513SAhmed S. Darwish this_gen = smp_load_acquire(&lru_drain_gen); 829eef1a429SKonstantin Khlebnikov 8305fbc4616SChris Metcalf mutex_lock(&lock); 831eef1a429SKonstantin Khlebnikov 832eef1a429SKonstantin Khlebnikov /* 8336446a513SAhmed S. Darwish * (C) Exit the draining operation if a newer generation, from another 8346446a513SAhmed S. Darwish * lru_add_drain_all(), was already scheduled for draining. Check (A). 835eef1a429SKonstantin Khlebnikov */ 836d479960eSMinchan Kim if (unlikely(this_gen != lru_drain_gen && !force_all_cpus)) 837eef1a429SKonstantin Khlebnikov goto done; 838eef1a429SKonstantin Khlebnikov 8396446a513SAhmed S. Darwish /* 8406446a513SAhmed S. Darwish * (D) Increment global generation number 8416446a513SAhmed S. Darwish * 8426446a513SAhmed S. Darwish * Pairs with smp_load_acquire() at (B), outside of the critical 84382ac64d8SMatthew Wilcox (Oracle) * section. Use a full memory barrier to guarantee that the 84482ac64d8SMatthew Wilcox (Oracle) * new global drain generation number is stored before loading 84582ac64d8SMatthew Wilcox (Oracle) * folio_batch counters. 8466446a513SAhmed S. Darwish * 8476446a513SAhmed S. Darwish * This pairing must be done here, before the for_each_online_cpu loop 8486446a513SAhmed S. Darwish * below which drains the page vectors. 8496446a513SAhmed S. Darwish * 8506446a513SAhmed S. Darwish * Let x, y, and z represent some system CPU numbers, where x < y < z. 851cb152a1aSShijie Luo * Assume CPU #z is in the middle of the for_each_online_cpu loop 8526446a513SAhmed S. Darwish * below and has already reached CPU #y's per-cpu data. CPU #x comes 8536446a513SAhmed S. Darwish * along, adds some pages to its per-cpu vectors, then calls 8546446a513SAhmed S. Darwish * lru_add_drain_all(). 8556446a513SAhmed S. Darwish * 8566446a513SAhmed S. Darwish * If the paired barrier is done at any later step, e.g. after the 8576446a513SAhmed S. Darwish * loop, CPU #x will just exit at (C) and miss flushing out all of its 8586446a513SAhmed S. Darwish * added pages. 8596446a513SAhmed S. Darwish */ 8606446a513SAhmed S. Darwish WRITE_ONCE(lru_drain_gen, lru_drain_gen + 1); 8616446a513SAhmed S. Darwish smp_mb(); 862eef1a429SKonstantin Khlebnikov 8635fbc4616SChris Metcalf cpumask_clear(&has_work); 8645fbc4616SChris Metcalf for_each_online_cpu(cpu) { 8655fbc4616SChris Metcalf struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); 8665fbc4616SChris Metcalf 8674864545aSMatthew Wilcox (Oracle) if (cpu_needs_drain(cpu)) { 8685fbc4616SChris Metcalf INIT_WORK(work, lru_add_drain_per_cpu); 869ce612879SMichal Hocko queue_work_on(cpu, mm_percpu_wq, work); 8706446a513SAhmed S. Darwish __cpumask_set_cpu(cpu, &has_work); 8715fbc4616SChris Metcalf } 8725fbc4616SChris Metcalf } 8735fbc4616SChris Metcalf 8745fbc4616SChris Metcalf for_each_cpu(cpu, &has_work) 8755fbc4616SChris Metcalf flush_work(&per_cpu(lru_add_drain_work, cpu)); 8765fbc4616SChris Metcalf 877eef1a429SKonstantin Khlebnikov done: 8785fbc4616SChris Metcalf mutex_unlock(&lock); 879053837fcSNick Piggin } 880d479960eSMinchan Kim 881d479960eSMinchan Kim void lru_add_drain_all(void) 882d479960eSMinchan Kim { 883d479960eSMinchan Kim __lru_add_drain_all(false); 884d479960eSMinchan Kim } 8856ea183d6SMichal Hocko #else 8866ea183d6SMichal Hocko void lru_add_drain_all(void) 8876ea183d6SMichal Hocko { 8886ea183d6SMichal Hocko lru_add_drain(); 8896ea183d6SMichal Hocko } 8906446a513SAhmed S. Darwish #endif /* CONFIG_SMP */ 891053837fcSNick Piggin 892d479960eSMinchan Kim atomic_t lru_disable_count = ATOMIC_INIT(0); 893d479960eSMinchan Kim 894d479960eSMinchan Kim /* 895d479960eSMinchan Kim * lru_cache_disable() needs to be called before we start compiling 896d479960eSMinchan Kim * a list of pages to be migrated using isolate_lru_page(). 897d479960eSMinchan Kim * It drains pages on LRU cache and then disable on all cpus until 898d479960eSMinchan Kim * lru_cache_enable is called. 899d479960eSMinchan Kim * 900d479960eSMinchan Kim * Must be paired with a call to lru_cache_enable(). 901d479960eSMinchan Kim */ 902d479960eSMinchan Kim void lru_cache_disable(void) 903d479960eSMinchan Kim { 904d479960eSMinchan Kim atomic_inc(&lru_disable_count); 905d479960eSMinchan Kim /* 906ff042f4aSMarcelo Tosatti * Readers of lru_disable_count are protected by either disabling 907ff042f4aSMarcelo Tosatti * preemption or rcu_read_lock: 908ff042f4aSMarcelo Tosatti * 909ff042f4aSMarcelo Tosatti * preempt_disable, local_irq_disable [bh_lru_lock()] 910ff042f4aSMarcelo Tosatti * rcu_read_lock [rt_spin_lock CONFIG_PREEMPT_RT] 911ff042f4aSMarcelo Tosatti * preempt_disable [local_lock !CONFIG_PREEMPT_RT] 912ff042f4aSMarcelo Tosatti * 913ff042f4aSMarcelo Tosatti * Since v5.1 kernel, synchronize_rcu() is guaranteed to wait on 914ff042f4aSMarcelo Tosatti * preempt_disable() regions of code. So any CPU which sees 915ff042f4aSMarcelo Tosatti * lru_disable_count = 0 will have exited the critical 916ff042f4aSMarcelo Tosatti * section when synchronize_rcu() returns. 917d479960eSMinchan Kim */ 91831733463SMarcelo Tosatti synchronize_rcu_expedited(); 919ff042f4aSMarcelo Tosatti #ifdef CONFIG_SMP 920d479960eSMinchan Kim __lru_add_drain_all(true); 921d479960eSMinchan Kim #else 922243418e3SMinchan Kim lru_add_and_bh_lrus_drain(); 923d479960eSMinchan Kim #endif 924d479960eSMinchan Kim } 925d479960eSMinchan Kim 926aabfb572SMichal Hocko /** 927ea1754a0SKirill A. Shutemov * release_pages - batched put_page() 928aabfb572SMichal Hocko * @pages: array of pages to release 929aabfb572SMichal Hocko * @nr: number of pages 9301da177e4SLinus Torvalds * 931aabfb572SMichal Hocko * Decrement the reference count on all the pages in @pages. If it 932aabfb572SMichal Hocko * fell to zero, remove the page from the LRU and free it. 9331da177e4SLinus Torvalds */ 934c6f92f9fSMel Gorman void release_pages(struct page **pages, int nr) 9351da177e4SLinus Torvalds { 9361da177e4SLinus Torvalds int i; 937cc59850eSKonstantin Khlebnikov LIST_HEAD(pages_to_free); 9386168d0daSAlex Shi struct lruvec *lruvec = NULL; 9390de340cbSMatthew Wilcox (Oracle) unsigned long flags = 0; 9403f649ab7SKees Cook unsigned int lock_batch; 9411da177e4SLinus Torvalds 9421da177e4SLinus Torvalds for (i = 0; i < nr; i++) { 943ab5e653eSMatthew Wilcox (Oracle) struct folio *folio = page_folio(pages[i]); 9441da177e4SLinus Torvalds 945aabfb572SMichal Hocko /* 946aabfb572SMichal Hocko * Make sure the IRQ-safe lock-holding time does not get 947aabfb572SMichal Hocko * excessive with a continuous string of pages from the 9486168d0daSAlex Shi * same lruvec. The lock is held only if lruvec != NULL. 949aabfb572SMichal Hocko */ 9506168d0daSAlex Shi if (lruvec && ++lock_batch == SWAP_CLUSTER_MAX) { 9516168d0daSAlex Shi unlock_page_lruvec_irqrestore(lruvec, flags); 9526168d0daSAlex Shi lruvec = NULL; 953aabfb572SMichal Hocko } 954aabfb572SMichal Hocko 955ab5e653eSMatthew Wilcox (Oracle) if (is_huge_zero_page(&folio->page)) 956aa88b68cSKirill A. Shutemov continue; 957aa88b68cSKirill A. Shutemov 958ab5e653eSMatthew Wilcox (Oracle) if (folio_is_zone_device(folio)) { 9596168d0daSAlex Shi if (lruvec) { 9606168d0daSAlex Shi unlock_page_lruvec_irqrestore(lruvec, flags); 9616168d0daSAlex Shi lruvec = NULL; 962df6ad698SJérôme Glisse } 963ab5e653eSMatthew Wilcox (Oracle) if (put_devmap_managed_page(&folio->page)) 964df6ad698SJérôme Glisse continue; 965ab5e653eSMatthew Wilcox (Oracle) if (folio_put_testzero(folio)) 966ab5e653eSMatthew Wilcox (Oracle) free_zone_device_page(&folio->page); 96743fbdeb3SRalph Campbell continue; 96807d80269SJohn Hubbard } 969df6ad698SJérôme Glisse 970ab5e653eSMatthew Wilcox (Oracle) if (!folio_put_testzero(folio)) 9711da177e4SLinus Torvalds continue; 9721da177e4SLinus Torvalds 973ab5e653eSMatthew Wilcox (Oracle) if (folio_test_large(folio)) { 9746168d0daSAlex Shi if (lruvec) { 9756168d0daSAlex Shi unlock_page_lruvec_irqrestore(lruvec, flags); 9766168d0daSAlex Shi lruvec = NULL; 977ddc58f27SKirill A. Shutemov } 9785ef82fe7SMatthew Wilcox (Oracle) __folio_put_large(folio); 979ddc58f27SKirill A. Shutemov continue; 980ddc58f27SKirill A. Shutemov } 981ddc58f27SKirill A. Shutemov 982ab5e653eSMatthew Wilcox (Oracle) if (folio_test_lru(folio)) { 9832a5e4e34SAlexander Duyck struct lruvec *prev_lruvec = lruvec; 984894bc310SLee Schermerhorn 9850de340cbSMatthew Wilcox (Oracle) lruvec = folio_lruvec_relock_irqsave(folio, lruvec, 9862a5e4e34SAlexander Duyck &flags); 9872a5e4e34SAlexander Duyck if (prev_lruvec != lruvec) 988aabfb572SMichal Hocko lock_batch = 0; 989fa9add64SHugh Dickins 990ab5e653eSMatthew Wilcox (Oracle) lruvec_del_folio(lruvec, folio); 991ab5e653eSMatthew Wilcox (Oracle) __folio_clear_lru_flags(folio); 99246453a6eSNick Piggin } 99346453a6eSNick Piggin 994b109b870SHugh Dickins /* 995b109b870SHugh Dickins * In rare cases, when truncation or holepunching raced with 996b109b870SHugh Dickins * munlock after VM_LOCKED was cleared, Mlocked may still be 997b109b870SHugh Dickins * found set here. This does not indicate a problem, unless 998b109b870SHugh Dickins * "unevictable_pgs_cleared" appears worryingly large. 999b109b870SHugh Dickins */ 1000ab5e653eSMatthew Wilcox (Oracle) if (unlikely(folio_test_mlocked(folio))) { 1001ab5e653eSMatthew Wilcox (Oracle) __folio_clear_mlocked(folio); 1002ab5e653eSMatthew Wilcox (Oracle) zone_stat_sub_folio(folio, NR_MLOCK); 1003b109b870SHugh Dickins count_vm_event(UNEVICTABLE_PGCLEARED); 1004b109b870SHugh Dickins } 1005b109b870SHugh Dickins 1006ab5e653eSMatthew Wilcox (Oracle) list_add(&folio->lru, &pages_to_free); 10071da177e4SLinus Torvalds } 10086168d0daSAlex Shi if (lruvec) 10096168d0daSAlex Shi unlock_page_lruvec_irqrestore(lruvec, flags); 10101da177e4SLinus Torvalds 1011747db954SJohannes Weiner mem_cgroup_uncharge_list(&pages_to_free); 10122d4894b5SMel Gorman free_unref_page_list(&pages_to_free); 10131da177e4SLinus Torvalds } 10140be8557bSMiklos Szeredi EXPORT_SYMBOL(release_pages); 10151da177e4SLinus Torvalds 10161da177e4SLinus Torvalds /* 10171da177e4SLinus Torvalds * The pages which we're about to release may be in the deferred lru-addition 10181da177e4SLinus Torvalds * queues. That would prevent them from really being freed right now. That's 10191da177e4SLinus Torvalds * OK from a correctness point of view but is inefficient - those pages may be 10201da177e4SLinus Torvalds * cache-warm and we want to give them back to the page allocator ASAP. 10211da177e4SLinus Torvalds * 102270dea534SMatthew Wilcox (Oracle) * So __pagevec_release() will drain those queues here. 102370dea534SMatthew Wilcox (Oracle) * folio_batch_move_lru() calls folios_put() directly to avoid 10241da177e4SLinus Torvalds * mutual recursion. 10251da177e4SLinus Torvalds */ 10261da177e4SLinus Torvalds void __pagevec_release(struct pagevec *pvec) 10271da177e4SLinus Torvalds { 10287f0b5fb9SMel Gorman if (!pvec->percpu_pvec_drained) { 10291da177e4SLinus Torvalds lru_add_drain(); 10307f0b5fb9SMel Gorman pvec->percpu_pvec_drained = true; 1031d9ed0d08SMel Gorman } 1032c6f92f9fSMel Gorman release_pages(pvec->pages, pagevec_count(pvec)); 10331da177e4SLinus Torvalds pagevec_reinit(pvec); 10341da177e4SLinus Torvalds } 10357f285701SSteve French EXPORT_SYMBOL(__pagevec_release); 10367f285701SSteve French 10371da177e4SLinus Torvalds /** 10381613fac9SMatthew Wilcox (Oracle) * folio_batch_remove_exceptionals() - Prune non-folios from a batch. 10391613fac9SMatthew Wilcox (Oracle) * @fbatch: The batch to prune 10400cd6144aSJohannes Weiner * 10411613fac9SMatthew Wilcox (Oracle) * find_get_entries() fills a batch with both folios and shadow/swap/DAX 10421613fac9SMatthew Wilcox (Oracle) * entries. This function prunes all the non-folio entries from @fbatch 10431613fac9SMatthew Wilcox (Oracle) * without leaving holes, so that it can be passed on to folio-only batch 10441613fac9SMatthew Wilcox (Oracle) * operations. 10450cd6144aSJohannes Weiner */ 10461613fac9SMatthew Wilcox (Oracle) void folio_batch_remove_exceptionals(struct folio_batch *fbatch) 10470cd6144aSJohannes Weiner { 10481613fac9SMatthew Wilcox (Oracle) unsigned int i, j; 10490cd6144aSJohannes Weiner 10501613fac9SMatthew Wilcox (Oracle) for (i = 0, j = 0; i < folio_batch_count(fbatch); i++) { 10511613fac9SMatthew Wilcox (Oracle) struct folio *folio = fbatch->folios[i]; 10521613fac9SMatthew Wilcox (Oracle) if (!xa_is_value(folio)) 10531613fac9SMatthew Wilcox (Oracle) fbatch->folios[j++] = folio; 10540cd6144aSJohannes Weiner } 10551613fac9SMatthew Wilcox (Oracle) fbatch->nr = j; 10560cd6144aSJohannes Weiner } 10570cd6144aSJohannes Weiner 10580cd6144aSJohannes Weiner /** 1059b947cee4SJan Kara * pagevec_lookup_range - gang pagecache lookup 10601da177e4SLinus Torvalds * @pvec: Where the resulting pages are placed 10611da177e4SLinus Torvalds * @mapping: The address_space to search 10621da177e4SLinus Torvalds * @start: The starting page index 1063b947cee4SJan Kara * @end: The final page index 10641da177e4SLinus Torvalds * 1065e02a9f04SRandy Dunlap * pagevec_lookup_range() will search for & return a group of up to PAGEVEC_SIZE 1066b947cee4SJan Kara * pages in the mapping starting from index @start and upto index @end 1067b947cee4SJan Kara * (inclusive). The pages are placed in @pvec. pagevec_lookup() takes a 10681da177e4SLinus Torvalds * reference against the pages in @pvec. 10691da177e4SLinus Torvalds * 10701da177e4SLinus Torvalds * The search returns a group of mapping-contiguous pages with ascending 1071d72dc8a2SJan Kara * indexes. There may be holes in the indices due to not-present pages. We 1072d72dc8a2SJan Kara * also update @start to index the next page for the traversal. 10731da177e4SLinus Torvalds * 1074b947cee4SJan Kara * pagevec_lookup_range() returns the number of pages which were found. If this 1075e02a9f04SRandy Dunlap * number is smaller than PAGEVEC_SIZE, the end of specified range has been 1076b947cee4SJan Kara * reached. 10771da177e4SLinus Torvalds */ 1078b947cee4SJan Kara unsigned pagevec_lookup_range(struct pagevec *pvec, 1079397162ffSJan Kara struct address_space *mapping, pgoff_t *start, pgoff_t end) 10801da177e4SLinus Torvalds { 1081397162ffSJan Kara pvec->nr = find_get_pages_range(mapping, start, end, PAGEVEC_SIZE, 1082b947cee4SJan Kara pvec->pages); 10831da177e4SLinus Torvalds return pagevec_count(pvec); 10841da177e4SLinus Torvalds } 1085b947cee4SJan Kara EXPORT_SYMBOL(pagevec_lookup_range); 108678539fdfSChristoph Hellwig 108772b045aeSJan Kara unsigned pagevec_lookup_range_tag(struct pagevec *pvec, 108872b045aeSJan Kara struct address_space *mapping, pgoff_t *index, pgoff_t end, 108910bbd235SMatthew Wilcox xa_mark_t tag) 10901da177e4SLinus Torvalds { 109172b045aeSJan Kara pvec->nr = find_get_pages_range_tag(mapping, index, end, tag, 109267fd707fSJan Kara PAGEVEC_SIZE, pvec->pages); 10931da177e4SLinus Torvalds return pagevec_count(pvec); 10941da177e4SLinus Torvalds } 109572b045aeSJan Kara EXPORT_SYMBOL(pagevec_lookup_range_tag); 10961da177e4SLinus Torvalds 10971da177e4SLinus Torvalds /* 10981da177e4SLinus Torvalds * Perform any setup for the swap system 10991da177e4SLinus Torvalds */ 11001da177e4SLinus Torvalds void __init swap_setup(void) 11011da177e4SLinus Torvalds { 1102ca79b0c2SArun KS unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT); 1103e0bf68ddSPeter Zijlstra 11041da177e4SLinus Torvalds /* Use a smaller cluster for small-memory machines */ 11051da177e4SLinus Torvalds if (megs < 16) 11061da177e4SLinus Torvalds page_cluster = 2; 11071da177e4SLinus Torvalds else 11081da177e4SLinus Torvalds page_cluster = 3; 11091da177e4SLinus Torvalds /* 11101da177e4SLinus Torvalds * Right now other parts of the system means that we 11111da177e4SLinus Torvalds * _really_ don't want to cluster much more 11121da177e4SLinus Torvalds */ 11131da177e4SLinus Torvalds } 1114