1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * linux/mm/swap.c 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 81da177e4SLinus Torvalds /* 9183ff22bSSimon Arlott * This file contains the default values for the operation of the 101da177e4SLinus Torvalds * Linux VM subsystem. Fine-tuning documentation can be found in 1157043247SMauro Carvalho Chehab * Documentation/admin-guide/sysctl/vm.rst. 121da177e4SLinus Torvalds * Started 18.12.91 131da177e4SLinus Torvalds * Swap aging added 23.2.95, Stephen Tweedie. 141da177e4SLinus Torvalds * Buffermem limits added 12.3.98, Rik van Riel. 151da177e4SLinus Torvalds */ 161da177e4SLinus Torvalds 171da177e4SLinus Torvalds #include <linux/mm.h> 181da177e4SLinus Torvalds #include <linux/sched.h> 191da177e4SLinus Torvalds #include <linux/kernel_stat.h> 201da177e4SLinus Torvalds #include <linux/swap.h> 211da177e4SLinus Torvalds #include <linux/mman.h> 221da177e4SLinus Torvalds #include <linux/pagemap.h> 231da177e4SLinus Torvalds #include <linux/pagevec.h> 241da177e4SLinus Torvalds #include <linux/init.h> 25b95f1b31SPaul Gortmaker #include <linux/export.h> 261da177e4SLinus Torvalds #include <linux/mm_inline.h> 271da177e4SLinus Torvalds #include <linux/percpu_counter.h> 283565fce3SDan Williams #include <linux/memremap.h> 291da177e4SLinus Torvalds #include <linux/percpu.h> 301da177e4SLinus Torvalds #include <linux/cpu.h> 311da177e4SLinus Torvalds #include <linux/notifier.h> 32e0bf68ddSPeter Zijlstra #include <linux/backing-dev.h> 3366e1707bSBalbir Singh #include <linux/memcontrol.h> 345a0e3ad6STejun Heo #include <linux/gfp.h> 35a27bb332SKent Overstreet #include <linux/uio.h> 36822fc613SNaoya Horiguchi #include <linux/hugetlb.h> 3733c3fc71SVladimir Davydov #include <linux/page_idle.h> 38b01b2141SIngo Molnar #include <linux/local_lock.h> 398cc621d2SMinchan Kim #include <linux/buffer_head.h> 401da177e4SLinus Torvalds 4164d6519dSLee Schermerhorn #include "internal.h" 4264d6519dSLee Schermerhorn 43c6286c98SMel Gorman #define CREATE_TRACE_POINTS 44c6286c98SMel Gorman #include <trace/events/pagemap.h> 45c6286c98SMel Gorman 461da177e4SLinus Torvalds /* How many pages do we try to swap or page in/out together? */ 471da177e4SLinus Torvalds int page_cluster; 481da177e4SLinus Torvalds 49c2bc1681SMatthew Wilcox (Oracle) /* Protecting only lru_rotate.fbatch which requires disabling interrupts */ 50b01b2141SIngo Molnar struct lru_rotate { 51b01b2141SIngo Molnar local_lock_t lock; 52c2bc1681SMatthew Wilcox (Oracle) struct folio_batch fbatch; 53b01b2141SIngo Molnar }; 54b01b2141SIngo Molnar static DEFINE_PER_CPU(struct lru_rotate, lru_rotate) = { 55b01b2141SIngo Molnar .lock = INIT_LOCAL_LOCK(lock), 56b01b2141SIngo Molnar }; 57b01b2141SIngo Molnar 58b01b2141SIngo Molnar /* 59b01b2141SIngo Molnar * The following struct pagevec are grouped together because they are protected 60b01b2141SIngo Molnar * by disabling preemption (and interrupts remain enabled). 61b01b2141SIngo Molnar */ 62b01b2141SIngo Molnar struct lru_pvecs { 63b01b2141SIngo Molnar local_lock_t lock; 64b01b2141SIngo Molnar struct pagevec lru_add; 65b01b2141SIngo Molnar struct pagevec lru_deactivate_file; 66b01b2141SIngo Molnar struct pagevec lru_deactivate; 67b01b2141SIngo Molnar struct pagevec lru_lazyfree; 68a4a921aaSMing Li #ifdef CONFIG_SMP 69b01b2141SIngo Molnar struct pagevec activate_page; 70a4a921aaSMing Li #endif 71b01b2141SIngo Molnar }; 72b01b2141SIngo Molnar static DEFINE_PER_CPU(struct lru_pvecs, lru_pvecs) = { 73b01b2141SIngo Molnar .lock = INIT_LOCAL_LOCK(lock), 74b01b2141SIngo Molnar }; 75902aaed0SHisashi Hifumi 76b221385bSAdrian Bunk /* 77b109b870SHugh Dickins * This path almost never happens for VM activity - pages are normally freed 78b109b870SHugh Dickins * via pagevecs. But it gets used by networking - and for compound pages. 79b221385bSAdrian Bunk */ 80920c7a5dSHarvey Harrison static void __page_cache_release(struct page *page) 81b221385bSAdrian Bunk { 82b221385bSAdrian Bunk if (PageLRU(page)) { 83e809c3feSMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 84fa9add64SHugh Dickins struct lruvec *lruvec; 85fa9add64SHugh Dickins unsigned long flags; 86b221385bSAdrian Bunk 87e809c3feSMatthew Wilcox (Oracle) lruvec = folio_lruvec_lock_irqsave(folio, &flags); 8846ae6b2cSYu Zhao del_page_from_lru_list(page, lruvec); 8987560179SYu Zhao __clear_page_lru_flags(page); 906168d0daSAlex Shi unlock_page_lruvec_irqrestore(lruvec, flags); 91b221385bSAdrian Bunk } 92b109b870SHugh Dickins /* See comment on PageMlocked in release_pages() */ 93b109b870SHugh Dickins if (unlikely(PageMlocked(page))) { 94b109b870SHugh Dickins int nr_pages = thp_nr_pages(page); 95b109b870SHugh Dickins 96b109b870SHugh Dickins __ClearPageMlocked(page); 97b109b870SHugh Dickins mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); 98b109b870SHugh Dickins count_vm_events(UNEVICTABLE_PGCLEARED, nr_pages); 99b109b870SHugh Dickins } 10091807063SAndrea Arcangeli } 10191807063SAndrea Arcangeli 10291807063SAndrea Arcangeli static void __put_single_page(struct page *page) 10391807063SAndrea Arcangeli { 10491807063SAndrea Arcangeli __page_cache_release(page); 105bbc6b703SMatthew Wilcox (Oracle) mem_cgroup_uncharge(page_folio(page)); 10644042b44SMel Gorman free_unref_page(page, 0); 107b221385bSAdrian Bunk } 108b221385bSAdrian Bunk 10991807063SAndrea Arcangeli static void __put_compound_page(struct page *page) 11091807063SAndrea Arcangeli { 111822fc613SNaoya Horiguchi /* 112822fc613SNaoya Horiguchi * __page_cache_release() is supposed to be called for thp, not for 113822fc613SNaoya Horiguchi * hugetlb. This is because hugetlb page does never have PageLRU set 114822fc613SNaoya Horiguchi * (it's never listed to any LRU lists) and no memcg routines should 115822fc613SNaoya Horiguchi * be called for hugetlb (it has a separate hugetlb_cgroup.) 116822fc613SNaoya Horiguchi */ 117822fc613SNaoya Horiguchi if (!PageHuge(page)) 11891807063SAndrea Arcangeli __page_cache_release(page); 119ff45fc3cSMatthew Wilcox (Oracle) destroy_compound_page(page); 12091807063SAndrea Arcangeli } 12191807063SAndrea Arcangeli 122ddc58f27SKirill A. Shutemov void __put_page(struct page *page) 123c747ce79SJianyu Zhan { 12427674ef6SChristoph Hellwig if (unlikely(is_zone_device_page(page))) 12527674ef6SChristoph Hellwig free_zone_device_page(page); 12627674ef6SChristoph Hellwig else if (unlikely(PageCompound(page))) 12726296ad2SAndrew Morton __put_compound_page(page); 12826296ad2SAndrew Morton else 12926296ad2SAndrew Morton __put_single_page(page); 13026296ad2SAndrew Morton } 131ddc58f27SKirill A. Shutemov EXPORT_SYMBOL(__put_page); 13270b50f94SAndrea Arcangeli 1331d7ea732SAlexander Zarochentsev /** 1347682486bSRandy Dunlap * put_pages_list() - release a list of pages 1357682486bSRandy Dunlap * @pages: list of pages threaded on page->lru 1361d7ea732SAlexander Zarochentsev * 137988c69f1SMatthew Wilcox (Oracle) * Release a list of pages which are strung together on page.lru. 1381d7ea732SAlexander Zarochentsev */ 1391d7ea732SAlexander Zarochentsev void put_pages_list(struct list_head *pages) 1401d7ea732SAlexander Zarochentsev { 141988c69f1SMatthew Wilcox (Oracle) struct page *page, *next; 1421d7ea732SAlexander Zarochentsev 143988c69f1SMatthew Wilcox (Oracle) list_for_each_entry_safe(page, next, pages, lru) { 144988c69f1SMatthew Wilcox (Oracle) if (!put_page_testzero(page)) { 145988c69f1SMatthew Wilcox (Oracle) list_del(&page->lru); 146988c69f1SMatthew Wilcox (Oracle) continue; 1471d7ea732SAlexander Zarochentsev } 148988c69f1SMatthew Wilcox (Oracle) if (PageHead(page)) { 149988c69f1SMatthew Wilcox (Oracle) list_del(&page->lru); 150988c69f1SMatthew Wilcox (Oracle) __put_compound_page(page); 151988c69f1SMatthew Wilcox (Oracle) continue; 152988c69f1SMatthew Wilcox (Oracle) } 153988c69f1SMatthew Wilcox (Oracle) /* Cannot be PageLRU because it's passed to us using the lru */ 154988c69f1SMatthew Wilcox (Oracle) } 155988c69f1SMatthew Wilcox (Oracle) 156988c69f1SMatthew Wilcox (Oracle) free_unref_page_list(pages); 1573cd018b4SMatthew Wilcox INIT_LIST_HEAD(pages); 1581d7ea732SAlexander Zarochentsev } 1591d7ea732SAlexander Zarochentsev EXPORT_SYMBOL(put_pages_list); 1601d7ea732SAlexander Zarochentsev 16118022c5dSMel Gorman /* 16218022c5dSMel Gorman * get_kernel_pages() - pin kernel pages in memory 16318022c5dSMel Gorman * @kiov: An array of struct kvec structures 16418022c5dSMel Gorman * @nr_segs: number of segments to pin 16518022c5dSMel Gorman * @write: pinning for read/write, currently ignored 16618022c5dSMel Gorman * @pages: array that receives pointers to the pages pinned. 16718022c5dSMel Gorman * Should be at least nr_segs long. 16818022c5dSMel Gorman * 169133d2743SMiaohe Lin * Returns number of pages pinned. This may be fewer than the number requested. 170133d2743SMiaohe Lin * If nr_segs is 0 or negative, returns 0. If no pages were pinned, returns 0. 171133d2743SMiaohe Lin * Each page returned must be released with a put_page() call when it is 172133d2743SMiaohe Lin * finished with. 17318022c5dSMel Gorman */ 17418022c5dSMel Gorman int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write, 17518022c5dSMel Gorman struct page **pages) 17618022c5dSMel Gorman { 17718022c5dSMel Gorman int seg; 17818022c5dSMel Gorman 17918022c5dSMel Gorman for (seg = 0; seg < nr_segs; seg++) { 18018022c5dSMel Gorman if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE)) 18118022c5dSMel Gorman return seg; 18218022c5dSMel Gorman 1835a178119SMel Gorman pages[seg] = kmap_to_page(kiov[seg].iov_base); 18409cbfeafSKirill A. Shutemov get_page(pages[seg]); 18518022c5dSMel Gorman } 18618022c5dSMel Gorman 18718022c5dSMel Gorman return seg; 18818022c5dSMel Gorman } 18918022c5dSMel Gorman EXPORT_SYMBOL_GPL(get_kernel_pages); 19018022c5dSMel Gorman 1913dd7ae8eSShaohua Li static void pagevec_lru_move_fn(struct pagevec *pvec, 192c7c7b80cSAlex Shi void (*move_fn)(struct page *page, struct lruvec *lruvec)) 193902aaed0SHisashi Hifumi { 194902aaed0SHisashi Hifumi int i; 1956168d0daSAlex Shi struct lruvec *lruvec = NULL; 1963dd7ae8eSShaohua Li unsigned long flags = 0; 197902aaed0SHisashi Hifumi 198902aaed0SHisashi Hifumi for (i = 0; i < pagevec_count(pvec); i++) { 199902aaed0SHisashi Hifumi struct page *page = pvec->pages[i]; 2000de340cbSMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 2013dd7ae8eSShaohua Li 202fc574c23SAlex Shi /* block memcg migration during page moving between lru */ 203fc574c23SAlex Shi if (!TestClearPageLRU(page)) 204fc574c23SAlex Shi continue; 205fc574c23SAlex Shi 2060de340cbSMatthew Wilcox (Oracle) lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags); 207c7c7b80cSAlex Shi (*move_fn)(page, lruvec); 208fc574c23SAlex Shi 209fc574c23SAlex Shi SetPageLRU(page); 2103dd7ae8eSShaohua Li } 2116168d0daSAlex Shi if (lruvec) 2126168d0daSAlex Shi unlock_page_lruvec_irqrestore(lruvec, flags); 213c6f92f9fSMel Gorman release_pages(pvec->pages, pvec->nr); 2143dd7ae8eSShaohua Li pagevec_reinit(pvec); 2153dd7ae8eSShaohua Li } 2163dd7ae8eSShaohua Li 217d479960eSMinchan Kim /* return true if pagevec needs to drain */ 218d479960eSMinchan Kim static bool pagevec_add_and_need_flush(struct pagevec *pvec, struct page *page) 219d479960eSMinchan Kim { 220d479960eSMinchan Kim bool ret = false; 221d479960eSMinchan Kim 222d479960eSMinchan Kim if (!pagevec_add(pvec, page) || PageCompound(page) || 223d479960eSMinchan Kim lru_cache_disabled()) 224d479960eSMinchan Kim ret = true; 225d479960eSMinchan Kim 226d479960eSMinchan Kim return ret; 227d479960eSMinchan Kim } 228d479960eSMinchan Kim 229c2bc1681SMatthew Wilcox (Oracle) typedef void (*move_fn_t)(struct lruvec *lruvec, struct folio *folio); 230c2bc1681SMatthew Wilcox (Oracle) 231*7d80dd09SMatthew Wilcox (Oracle) static void __pagevec_lru_add_fn(struct folio *folio, struct lruvec *lruvec) 232*7d80dd09SMatthew Wilcox (Oracle) { 233*7d80dd09SMatthew Wilcox (Oracle) int was_unevictable = folio_test_clear_unevictable(folio); 234*7d80dd09SMatthew Wilcox (Oracle) long nr_pages = folio_nr_pages(folio); 235*7d80dd09SMatthew Wilcox (Oracle) 236*7d80dd09SMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); 237*7d80dd09SMatthew Wilcox (Oracle) 238*7d80dd09SMatthew Wilcox (Oracle) folio_set_lru(folio); 239*7d80dd09SMatthew Wilcox (Oracle) /* 240*7d80dd09SMatthew Wilcox (Oracle) * Is an smp_mb__after_atomic() still required here, before 241*7d80dd09SMatthew Wilcox (Oracle) * folio_evictable() tests PageMlocked, to rule out the possibility 242*7d80dd09SMatthew Wilcox (Oracle) * of stranding an evictable folio on an unevictable LRU? I think 243*7d80dd09SMatthew Wilcox (Oracle) * not, because __munlock_page() only clears PageMlocked while the LRU 244*7d80dd09SMatthew Wilcox (Oracle) * lock is held. 245*7d80dd09SMatthew Wilcox (Oracle) * 246*7d80dd09SMatthew Wilcox (Oracle) * (That is not true of __page_cache_release(), and not necessarily 247*7d80dd09SMatthew Wilcox (Oracle) * true of release_pages(): but those only clear PageMlocked after 248*7d80dd09SMatthew Wilcox (Oracle) * put_page_testzero() has excluded any other users of the page.) 249*7d80dd09SMatthew Wilcox (Oracle) */ 250*7d80dd09SMatthew Wilcox (Oracle) if (folio_evictable(folio)) { 251*7d80dd09SMatthew Wilcox (Oracle) if (was_unevictable) 252*7d80dd09SMatthew Wilcox (Oracle) __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages); 253*7d80dd09SMatthew Wilcox (Oracle) } else { 254*7d80dd09SMatthew Wilcox (Oracle) folio_clear_active(folio); 255*7d80dd09SMatthew Wilcox (Oracle) folio_set_unevictable(folio); 256*7d80dd09SMatthew Wilcox (Oracle) /* 257*7d80dd09SMatthew Wilcox (Oracle) * folio->mlock_count = !!folio_test_mlocked(folio)? 258*7d80dd09SMatthew Wilcox (Oracle) * But that leaves __mlock_page() in doubt whether another 259*7d80dd09SMatthew Wilcox (Oracle) * actor has already counted the mlock or not. Err on the 260*7d80dd09SMatthew Wilcox (Oracle) * safe side, underestimate, let page reclaim fix it, rather 261*7d80dd09SMatthew Wilcox (Oracle) * than leaving a page on the unevictable LRU indefinitely. 262*7d80dd09SMatthew Wilcox (Oracle) */ 263*7d80dd09SMatthew Wilcox (Oracle) folio->mlock_count = 0; 264*7d80dd09SMatthew Wilcox (Oracle) if (!was_unevictable) 265*7d80dd09SMatthew Wilcox (Oracle) __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages); 266*7d80dd09SMatthew Wilcox (Oracle) } 267*7d80dd09SMatthew Wilcox (Oracle) 268*7d80dd09SMatthew Wilcox (Oracle) lruvec_add_folio(lruvec, folio); 269*7d80dd09SMatthew Wilcox (Oracle) trace_mm_lru_insertion(folio); 270*7d80dd09SMatthew Wilcox (Oracle) } 271*7d80dd09SMatthew Wilcox (Oracle) 272*7d80dd09SMatthew Wilcox (Oracle) /* 273*7d80dd09SMatthew Wilcox (Oracle) * Add the passed pages to the LRU, then drop the caller's refcount 274*7d80dd09SMatthew Wilcox (Oracle) * on them. Reinitialises the caller's pagevec. 275*7d80dd09SMatthew Wilcox (Oracle) */ 276*7d80dd09SMatthew Wilcox (Oracle) static void __pagevec_lru_add(struct pagevec *pvec) 277*7d80dd09SMatthew Wilcox (Oracle) { 278*7d80dd09SMatthew Wilcox (Oracle) int i; 279*7d80dd09SMatthew Wilcox (Oracle) struct lruvec *lruvec = NULL; 280*7d80dd09SMatthew Wilcox (Oracle) unsigned long flags = 0; 281*7d80dd09SMatthew Wilcox (Oracle) 282*7d80dd09SMatthew Wilcox (Oracle) for (i = 0; i < pagevec_count(pvec); i++) { 283*7d80dd09SMatthew Wilcox (Oracle) struct folio *folio = page_folio(pvec->pages[i]); 284*7d80dd09SMatthew Wilcox (Oracle) 285*7d80dd09SMatthew Wilcox (Oracle) lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags); 286*7d80dd09SMatthew Wilcox (Oracle) __pagevec_lru_add_fn(folio, lruvec); 287*7d80dd09SMatthew Wilcox (Oracle) } 288*7d80dd09SMatthew Wilcox (Oracle) if (lruvec) 289*7d80dd09SMatthew Wilcox (Oracle) unlock_page_lruvec_irqrestore(lruvec, flags); 290*7d80dd09SMatthew Wilcox (Oracle) release_pages(pvec->pages, pvec->nr); 291*7d80dd09SMatthew Wilcox (Oracle) pagevec_reinit(pvec); 292*7d80dd09SMatthew Wilcox (Oracle) } 293*7d80dd09SMatthew Wilcox (Oracle) 294c2bc1681SMatthew Wilcox (Oracle) static void folio_batch_move_lru(struct folio_batch *fbatch, move_fn_t move_fn) 295c2bc1681SMatthew Wilcox (Oracle) { 296c2bc1681SMatthew Wilcox (Oracle) int i; 297c2bc1681SMatthew Wilcox (Oracle) struct lruvec *lruvec = NULL; 298c2bc1681SMatthew Wilcox (Oracle) unsigned long flags = 0; 299c2bc1681SMatthew Wilcox (Oracle) 300c2bc1681SMatthew Wilcox (Oracle) for (i = 0; i < folio_batch_count(fbatch); i++) { 301c2bc1681SMatthew Wilcox (Oracle) struct folio *folio = fbatch->folios[i]; 302c2bc1681SMatthew Wilcox (Oracle) 303c2bc1681SMatthew Wilcox (Oracle) /* block memcg migration while the folio moves between lru */ 304c2bc1681SMatthew Wilcox (Oracle) if (!folio_test_clear_lru(folio)) 305c2bc1681SMatthew Wilcox (Oracle) continue; 306c2bc1681SMatthew Wilcox (Oracle) 307c2bc1681SMatthew Wilcox (Oracle) lruvec = folio_lruvec_relock_irqsave(folio, lruvec, &flags); 308c2bc1681SMatthew Wilcox (Oracle) move_fn(lruvec, folio); 309c2bc1681SMatthew Wilcox (Oracle) 310c2bc1681SMatthew Wilcox (Oracle) folio_set_lru(folio); 311c2bc1681SMatthew Wilcox (Oracle) } 312c2bc1681SMatthew Wilcox (Oracle) 313c2bc1681SMatthew Wilcox (Oracle) if (lruvec) 314c2bc1681SMatthew Wilcox (Oracle) unlock_page_lruvec_irqrestore(lruvec, flags); 315c2bc1681SMatthew Wilcox (Oracle) folios_put(fbatch->folios, folio_batch_count(fbatch)); 316c2bc1681SMatthew Wilcox (Oracle) folio_batch_init(fbatch); 317c2bc1681SMatthew Wilcox (Oracle) } 318c2bc1681SMatthew Wilcox (Oracle) 319c2bc1681SMatthew Wilcox (Oracle) static void folio_batch_add_and_move(struct folio_batch *fbatch, 320c2bc1681SMatthew Wilcox (Oracle) struct folio *folio, move_fn_t move_fn) 321c2bc1681SMatthew Wilcox (Oracle) { 322c2bc1681SMatthew Wilcox (Oracle) if (folio_batch_add(fbatch, folio) && !folio_test_large(folio) && 323c2bc1681SMatthew Wilcox (Oracle) !lru_cache_disabled()) 324c2bc1681SMatthew Wilcox (Oracle) return; 325c2bc1681SMatthew Wilcox (Oracle) folio_batch_move_lru(fbatch, move_fn); 326c2bc1681SMatthew Wilcox (Oracle) } 327c2bc1681SMatthew Wilcox (Oracle) 328c2bc1681SMatthew Wilcox (Oracle) static void lru_move_tail_fn(struct lruvec *lruvec, struct folio *folio) 329c2bc1681SMatthew Wilcox (Oracle) { 330c2bc1681SMatthew Wilcox (Oracle) if (!folio_test_unevictable(folio)) { 331c2bc1681SMatthew Wilcox (Oracle) lruvec_del_folio(lruvec, folio); 332c2bc1681SMatthew Wilcox (Oracle) folio_clear_active(folio); 333c2bc1681SMatthew Wilcox (Oracle) lruvec_add_folio_tail(lruvec, folio); 334c2bc1681SMatthew Wilcox (Oracle) __count_vm_events(PGROTATED, folio_nr_pages(folio)); 335c2bc1681SMatthew Wilcox (Oracle) } 336c2bc1681SMatthew Wilcox (Oracle) } 337c2bc1681SMatthew Wilcox (Oracle) 3383dd7ae8eSShaohua Li /* 339575ced1cSMatthew Wilcox (Oracle) * Writeback is about to end against a folio which has been marked for 340575ced1cSMatthew Wilcox (Oracle) * immediate reclaim. If it still appears to be reclaimable, move it 341575ced1cSMatthew Wilcox (Oracle) * to the tail of the inactive list. 342c7c7b80cSAlex Shi * 343575ced1cSMatthew Wilcox (Oracle) * folio_rotate_reclaimable() must disable IRQs, to prevent nasty races. 3441da177e4SLinus Torvalds */ 345575ced1cSMatthew Wilcox (Oracle) void folio_rotate_reclaimable(struct folio *folio) 3461da177e4SLinus Torvalds { 347575ced1cSMatthew Wilcox (Oracle) if (!folio_test_locked(folio) && !folio_test_dirty(folio) && 348575ced1cSMatthew Wilcox (Oracle) !folio_test_unevictable(folio) && folio_test_lru(folio)) { 349c2bc1681SMatthew Wilcox (Oracle) struct folio_batch *fbatch; 3501da177e4SLinus Torvalds unsigned long flags; 3511da177e4SLinus Torvalds 352575ced1cSMatthew Wilcox (Oracle) folio_get(folio); 353b01b2141SIngo Molnar local_lock_irqsave(&lru_rotate.lock, flags); 354c2bc1681SMatthew Wilcox (Oracle) fbatch = this_cpu_ptr(&lru_rotate.fbatch); 355c2bc1681SMatthew Wilcox (Oracle) folio_batch_add_and_move(fbatch, folio, lru_move_tail_fn); 356b01b2141SIngo Molnar local_unlock_irqrestore(&lru_rotate.lock, flags); 357ac6aadb2SMiklos Szeredi } 3581da177e4SLinus Torvalds } 3591da177e4SLinus Torvalds 36096f8bf4fSJohannes Weiner void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages) 3613e2f41f1SKOSAKI Motohiro { 3627cf111bcSJohannes Weiner do { 3637cf111bcSJohannes Weiner unsigned long lrusize; 3647cf111bcSJohannes Weiner 3656168d0daSAlex Shi /* 3666168d0daSAlex Shi * Hold lruvec->lru_lock is safe here, since 3676168d0daSAlex Shi * 1) The pinned lruvec in reclaim, or 3686168d0daSAlex Shi * 2) From a pre-LRU page during refault (which also holds the 3696168d0daSAlex Shi * rcu lock, so would be safe even if the page was on the LRU 3706168d0daSAlex Shi * and could move simultaneously to a new lruvec). 3716168d0daSAlex Shi */ 3726168d0daSAlex Shi spin_lock_irq(&lruvec->lru_lock); 3737cf111bcSJohannes Weiner /* Record cost event */ 37496f8bf4fSJohannes Weiner if (file) 37596f8bf4fSJohannes Weiner lruvec->file_cost += nr_pages; 3761431d4d1SJohannes Weiner else 37796f8bf4fSJohannes Weiner lruvec->anon_cost += nr_pages; 3787cf111bcSJohannes Weiner 3797cf111bcSJohannes Weiner /* 3807cf111bcSJohannes Weiner * Decay previous events 3817cf111bcSJohannes Weiner * 3827cf111bcSJohannes Weiner * Because workloads change over time (and to avoid 3837cf111bcSJohannes Weiner * overflow) we keep these statistics as a floating 3847cf111bcSJohannes Weiner * average, which ends up weighing recent refaults 3857cf111bcSJohannes Weiner * more than old ones. 3867cf111bcSJohannes Weiner */ 3877cf111bcSJohannes Weiner lrusize = lruvec_page_state(lruvec, NR_INACTIVE_ANON) + 3887cf111bcSJohannes Weiner lruvec_page_state(lruvec, NR_ACTIVE_ANON) + 3897cf111bcSJohannes Weiner lruvec_page_state(lruvec, NR_INACTIVE_FILE) + 3907cf111bcSJohannes Weiner lruvec_page_state(lruvec, NR_ACTIVE_FILE); 3917cf111bcSJohannes Weiner 3927cf111bcSJohannes Weiner if (lruvec->file_cost + lruvec->anon_cost > lrusize / 4) { 3937cf111bcSJohannes Weiner lruvec->file_cost /= 2; 3947cf111bcSJohannes Weiner lruvec->anon_cost /= 2; 3957cf111bcSJohannes Weiner } 3966168d0daSAlex Shi spin_unlock_irq(&lruvec->lru_lock); 3977cf111bcSJohannes Weiner } while ((lruvec = parent_lruvec(lruvec))); 3983e2f41f1SKOSAKI Motohiro } 3993e2f41f1SKOSAKI Motohiro 4000995d7e5SMatthew Wilcox (Oracle) void lru_note_cost_folio(struct folio *folio) 40196f8bf4fSJohannes Weiner { 4020995d7e5SMatthew Wilcox (Oracle) lru_note_cost(folio_lruvec(folio), folio_is_file_lru(folio), 4030995d7e5SMatthew Wilcox (Oracle) folio_nr_pages(folio)); 40496f8bf4fSJohannes Weiner } 40596f8bf4fSJohannes Weiner 406f2d27392SMatthew Wilcox (Oracle) static void __folio_activate(struct folio *folio, struct lruvec *lruvec) 407744ed144SShaohua Li { 408f2d27392SMatthew Wilcox (Oracle) if (!folio_test_active(folio) && !folio_test_unevictable(folio)) { 409f2d27392SMatthew Wilcox (Oracle) long nr_pages = folio_nr_pages(folio); 410744ed144SShaohua Li 411f2d27392SMatthew Wilcox (Oracle) lruvec_del_folio(lruvec, folio); 412f2d27392SMatthew Wilcox (Oracle) folio_set_active(folio); 413f2d27392SMatthew Wilcox (Oracle) lruvec_add_folio(lruvec, folio); 414f2d27392SMatthew Wilcox (Oracle) trace_mm_lru_activate(folio); 4157a608572SLinus Torvalds 41621e330fcSShakeel Butt __count_vm_events(PGACTIVATE, nr_pages); 41721e330fcSShakeel Butt __count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE, 41821e330fcSShakeel Butt nr_pages); 419744ed144SShaohua Li } 420eb709b0dSShaohua Li } 421eb709b0dSShaohua Li 422eb709b0dSShaohua Li #ifdef CONFIG_SMP 423f2d27392SMatthew Wilcox (Oracle) static void __activate_page(struct page *page, struct lruvec *lruvec) 424f2d27392SMatthew Wilcox (Oracle) { 425f2d27392SMatthew Wilcox (Oracle) return __folio_activate(page_folio(page), lruvec); 426f2d27392SMatthew Wilcox (Oracle) } 427f2d27392SMatthew Wilcox (Oracle) 428eb709b0dSShaohua Li static void activate_page_drain(int cpu) 429eb709b0dSShaohua Li { 430b01b2141SIngo Molnar struct pagevec *pvec = &per_cpu(lru_pvecs.activate_page, cpu); 431eb709b0dSShaohua Li 432eb709b0dSShaohua Li if (pagevec_count(pvec)) 433c7c7b80cSAlex Shi pagevec_lru_move_fn(pvec, __activate_page); 434eb709b0dSShaohua Li } 435eb709b0dSShaohua Li 4365fbc4616SChris Metcalf static bool need_activate_page_drain(int cpu) 4375fbc4616SChris Metcalf { 438b01b2141SIngo Molnar return pagevec_count(&per_cpu(lru_pvecs.activate_page, cpu)) != 0; 4395fbc4616SChris Metcalf } 4405fbc4616SChris Metcalf 441f2d27392SMatthew Wilcox (Oracle) static void folio_activate(struct folio *folio) 442eb709b0dSShaohua Li { 443f2d27392SMatthew Wilcox (Oracle) if (folio_test_lru(folio) && !folio_test_active(folio) && 444f2d27392SMatthew Wilcox (Oracle) !folio_test_unevictable(folio)) { 445b01b2141SIngo Molnar struct pagevec *pvec; 446eb709b0dSShaohua Li 447f2d27392SMatthew Wilcox (Oracle) folio_get(folio); 448b01b2141SIngo Molnar local_lock(&lru_pvecs.lock); 449b01b2141SIngo Molnar pvec = this_cpu_ptr(&lru_pvecs.activate_page); 450f2d27392SMatthew Wilcox (Oracle) if (pagevec_add_and_need_flush(pvec, &folio->page)) 451c7c7b80cSAlex Shi pagevec_lru_move_fn(pvec, __activate_page); 452b01b2141SIngo Molnar local_unlock(&lru_pvecs.lock); 453eb709b0dSShaohua Li } 454eb709b0dSShaohua Li } 455eb709b0dSShaohua Li 456eb709b0dSShaohua Li #else 457eb709b0dSShaohua Li static inline void activate_page_drain(int cpu) 458eb709b0dSShaohua Li { 459eb709b0dSShaohua Li } 460eb709b0dSShaohua Li 461f2d27392SMatthew Wilcox (Oracle) static void folio_activate(struct folio *folio) 462eb709b0dSShaohua Li { 4636168d0daSAlex Shi struct lruvec *lruvec; 464eb709b0dSShaohua Li 465f2d27392SMatthew Wilcox (Oracle) if (folio_test_clear_lru(folio)) { 466e809c3feSMatthew Wilcox (Oracle) lruvec = folio_lruvec_lock_irq(folio); 467f2d27392SMatthew Wilcox (Oracle) __folio_activate(folio, lruvec); 4686168d0daSAlex Shi unlock_page_lruvec_irq(lruvec); 469f2d27392SMatthew Wilcox (Oracle) folio_set_lru(folio); 4706168d0daSAlex Shi } 4711da177e4SLinus Torvalds } 472eb709b0dSShaohua Li #endif 4731da177e4SLinus Torvalds 47476580b65SMatthew Wilcox (Oracle) static void __lru_cache_activate_folio(struct folio *folio) 475059285a2SMel Gorman { 476b01b2141SIngo Molnar struct pagevec *pvec; 477059285a2SMel Gorman int i; 478059285a2SMel Gorman 479b01b2141SIngo Molnar local_lock(&lru_pvecs.lock); 480b01b2141SIngo Molnar pvec = this_cpu_ptr(&lru_pvecs.lru_add); 481b01b2141SIngo Molnar 482059285a2SMel Gorman /* 483059285a2SMel Gorman * Search backwards on the optimistic assumption that the page being 484059285a2SMel Gorman * activated has just been added to this pagevec. Note that only 485059285a2SMel Gorman * the local pagevec is examined as a !PageLRU page could be in the 486059285a2SMel Gorman * process of being released, reclaimed, migrated or on a remote 487059285a2SMel Gorman * pagevec that is currently being drained. Furthermore, marking 488059285a2SMel Gorman * a remote pagevec's page PageActive potentially hits a race where 489059285a2SMel Gorman * a page is marked PageActive just after it is added to the inactive 490059285a2SMel Gorman * list causing accounting errors and BUG_ON checks to trigger. 491059285a2SMel Gorman */ 492059285a2SMel Gorman for (i = pagevec_count(pvec) - 1; i >= 0; i--) { 493059285a2SMel Gorman struct page *pagevec_page = pvec->pages[i]; 494059285a2SMel Gorman 49576580b65SMatthew Wilcox (Oracle) if (pagevec_page == &folio->page) { 49676580b65SMatthew Wilcox (Oracle) folio_set_active(folio); 497059285a2SMel Gorman break; 498059285a2SMel Gorman } 499059285a2SMel Gorman } 500059285a2SMel Gorman 501b01b2141SIngo Molnar local_unlock(&lru_pvecs.lock); 502059285a2SMel Gorman } 503059285a2SMel Gorman 5041da177e4SLinus Torvalds /* 5051da177e4SLinus Torvalds * Mark a page as having seen activity. 5061da177e4SLinus Torvalds * 5071da177e4SLinus Torvalds * inactive,unreferenced -> inactive,referenced 5081da177e4SLinus Torvalds * inactive,referenced -> active,unreferenced 5091da177e4SLinus Torvalds * active,unreferenced -> active,referenced 510eb39d618SHugh Dickins * 511eb39d618SHugh Dickins * When a newly allocated page is not yet visible, so safe for non-atomic ops, 512eb39d618SHugh Dickins * __SetPageReferenced(page) may be substituted for mark_page_accessed(page). 5131da177e4SLinus Torvalds */ 51476580b65SMatthew Wilcox (Oracle) void folio_mark_accessed(struct folio *folio) 5151da177e4SLinus Torvalds { 51676580b65SMatthew Wilcox (Oracle) if (!folio_test_referenced(folio)) { 51776580b65SMatthew Wilcox (Oracle) folio_set_referenced(folio); 51876580b65SMatthew Wilcox (Oracle) } else if (folio_test_unevictable(folio)) { 519a1100a74SFengguang Wu /* 520a1100a74SFengguang Wu * Unevictable pages are on the "LRU_UNEVICTABLE" list. But, 521a1100a74SFengguang Wu * this list is never rotated or maintained, so marking an 522914c32e4SBang Li * unevictable page accessed has no effect. 523a1100a74SFengguang Wu */ 52476580b65SMatthew Wilcox (Oracle) } else if (!folio_test_active(folio)) { 525059285a2SMel Gorman /* 526059285a2SMel Gorman * If the page is on the LRU, queue it for activation via 527b01b2141SIngo Molnar * lru_pvecs.activate_page. Otherwise, assume the page is on a 528059285a2SMel Gorman * pagevec, mark it active and it'll be moved to the active 529059285a2SMel Gorman * LRU on the next drain. 530059285a2SMel Gorman */ 53176580b65SMatthew Wilcox (Oracle) if (folio_test_lru(folio)) 53276580b65SMatthew Wilcox (Oracle) folio_activate(folio); 533059285a2SMel Gorman else 53476580b65SMatthew Wilcox (Oracle) __lru_cache_activate_folio(folio); 53576580b65SMatthew Wilcox (Oracle) folio_clear_referenced(folio); 53676580b65SMatthew Wilcox (Oracle) workingset_activation(folio); 5371da177e4SLinus Torvalds } 53876580b65SMatthew Wilcox (Oracle) if (folio_test_idle(folio)) 53976580b65SMatthew Wilcox (Oracle) folio_clear_idle(folio); 5401da177e4SLinus Torvalds } 54176580b65SMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_mark_accessed); 5421da177e4SLinus Torvalds 543f04e9ebbSKOSAKI Motohiro /** 5440d31125dSMatthew Wilcox (Oracle) * folio_add_lru - Add a folio to an LRU list. 5450d31125dSMatthew Wilcox (Oracle) * @folio: The folio to be added to the LRU. 5462329d375SJianyu Zhan * 5470d31125dSMatthew Wilcox (Oracle) * Queue the folio for addition to the LRU. The decision on whether 5482329d375SJianyu Zhan * to add the page to the [in]active [file|anon] list is deferred until the 5490d31125dSMatthew Wilcox (Oracle) * pagevec is drained. This gives a chance for the caller of folio_add_lru() 5500d31125dSMatthew Wilcox (Oracle) * have the folio added to the active list using folio_mark_accessed(). 551f04e9ebbSKOSAKI Motohiro */ 5520d31125dSMatthew Wilcox (Oracle) void folio_add_lru(struct folio *folio) 5531da177e4SLinus Torvalds { 5546058eaecSJohannes Weiner struct pagevec *pvec; 5556058eaecSJohannes Weiner 5560d31125dSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_active(folio) && folio_test_unevictable(folio), folio); 5570d31125dSMatthew Wilcox (Oracle) VM_BUG_ON_FOLIO(folio_test_lru(folio), folio); 5586058eaecSJohannes Weiner 5590d31125dSMatthew Wilcox (Oracle) folio_get(folio); 5606058eaecSJohannes Weiner local_lock(&lru_pvecs.lock); 5616058eaecSJohannes Weiner pvec = this_cpu_ptr(&lru_pvecs.lru_add); 5620d31125dSMatthew Wilcox (Oracle) if (pagevec_add_and_need_flush(pvec, &folio->page)) 5636058eaecSJohannes Weiner __pagevec_lru_add(pvec); 5646058eaecSJohannes Weiner local_unlock(&lru_pvecs.lock); 5651da177e4SLinus Torvalds } 5660d31125dSMatthew Wilcox (Oracle) EXPORT_SYMBOL(folio_add_lru); 5671da177e4SLinus Torvalds 568894bc310SLee Schermerhorn /** 569b518154eSJoonsoo Kim * lru_cache_add_inactive_or_unevictable 57000501b53SJohannes Weiner * @page: the page to be added to LRU 57100501b53SJohannes Weiner * @vma: vma in which page is mapped for determining reclaimability 57200501b53SJohannes Weiner * 573b518154eSJoonsoo Kim * Place @page on the inactive or unevictable LRU list, depending on its 57412eab428SMiaohe Lin * evictability. 57500501b53SJohannes Weiner */ 576b518154eSJoonsoo Kim void lru_cache_add_inactive_or_unevictable(struct page *page, 57700501b53SJohannes Weiner struct vm_area_struct *vma) 57800501b53SJohannes Weiner { 57900501b53SJohannes Weiner VM_BUG_ON_PAGE(PageLRU(page), page); 58000501b53SJohannes Weiner 5812fbb0c10SHugh Dickins if (unlikely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED)) 5822fbb0c10SHugh Dickins mlock_new_page(page); 5832fbb0c10SHugh Dickins else 5849c4e6b1aSShakeel Butt lru_cache_add(page); 58500501b53SJohannes Weiner } 58600501b53SJohannes Weiner 587902aaed0SHisashi Hifumi /* 58831560180SMinchan Kim * If the page can not be invalidated, it is moved to the 58931560180SMinchan Kim * inactive list to speed up its reclaim. It is moved to the 59031560180SMinchan Kim * head of the list, rather than the tail, to give the flusher 59131560180SMinchan Kim * threads some time to write it out, as this is much more 59231560180SMinchan Kim * effective than the single-page writeout from reclaim. 593278df9f4SMinchan Kim * 594278df9f4SMinchan Kim * If the page isn't page_mapped and dirty/writeback, the page 595278df9f4SMinchan Kim * could reclaim asap using PG_reclaim. 596278df9f4SMinchan Kim * 597278df9f4SMinchan Kim * 1. active, mapped page -> none 598278df9f4SMinchan Kim * 2. active, dirty/writeback page -> inactive, head, PG_reclaim 599278df9f4SMinchan Kim * 3. inactive, mapped page -> none 600278df9f4SMinchan Kim * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim 601278df9f4SMinchan Kim * 5. inactive, clean -> inactive, tail 602278df9f4SMinchan Kim * 6. Others -> none 603278df9f4SMinchan Kim * 604278df9f4SMinchan Kim * In 4, why it moves inactive's head, the VM expects the page would 605278df9f4SMinchan Kim * be write it out by flusher threads as this is much more effective 606278df9f4SMinchan Kim * than the single-page writeout from reclaim. 60731560180SMinchan Kim */ 608c7c7b80cSAlex Shi static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec) 60931560180SMinchan Kim { 61046ae6b2cSYu Zhao bool active = PageActive(page); 6116c357848SMatthew Wilcox (Oracle) int nr_pages = thp_nr_pages(page); 61231560180SMinchan Kim 613bad49d9cSMinchan Kim if (PageUnevictable(page)) 614bad49d9cSMinchan Kim return; 615bad49d9cSMinchan Kim 61631560180SMinchan Kim /* Some processes are using the page */ 61731560180SMinchan Kim if (page_mapped(page)) 61831560180SMinchan Kim return; 61931560180SMinchan Kim 62046ae6b2cSYu Zhao del_page_from_lru_list(page, lruvec); 62131560180SMinchan Kim ClearPageActive(page); 62231560180SMinchan Kim ClearPageReferenced(page); 62331560180SMinchan Kim 624278df9f4SMinchan Kim if (PageWriteback(page) || PageDirty(page)) { 625278df9f4SMinchan Kim /* 626278df9f4SMinchan Kim * PG_reclaim could be raced with end_page_writeback 627278df9f4SMinchan Kim * It can make readahead confusing. But race window 628278df9f4SMinchan Kim * is _really_ small and it's non-critical problem. 629278df9f4SMinchan Kim */ 6303a9c9788SYu Zhao add_page_to_lru_list(page, lruvec); 631278df9f4SMinchan Kim SetPageReclaim(page); 632278df9f4SMinchan Kim } else { 633278df9f4SMinchan Kim /* 634278df9f4SMinchan Kim * The page's writeback ends up during pagevec 635c4ffefd1SHyeonggon Yoo * We move that page into tail of inactive. 636278df9f4SMinchan Kim */ 6373a9c9788SYu Zhao add_page_to_lru_list_tail(page, lruvec); 6385d91f31fSShakeel Butt __count_vm_events(PGROTATED, nr_pages); 639278df9f4SMinchan Kim } 640278df9f4SMinchan Kim 64121e330fcSShakeel Butt if (active) { 6425d91f31fSShakeel Butt __count_vm_events(PGDEACTIVATE, nr_pages); 64321e330fcSShakeel Butt __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, 64421e330fcSShakeel Butt nr_pages); 64521e330fcSShakeel Butt } 64631560180SMinchan Kim } 64731560180SMinchan Kim 648c7c7b80cSAlex Shi static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec) 6499c276cc6SMinchan Kim { 650fc574c23SAlex Shi if (PageActive(page) && !PageUnevictable(page)) { 6516c357848SMatthew Wilcox (Oracle) int nr_pages = thp_nr_pages(page); 6529c276cc6SMinchan Kim 65346ae6b2cSYu Zhao del_page_from_lru_list(page, lruvec); 6549c276cc6SMinchan Kim ClearPageActive(page); 6559c276cc6SMinchan Kim ClearPageReferenced(page); 6563a9c9788SYu Zhao add_page_to_lru_list(page, lruvec); 6579c276cc6SMinchan Kim 65821e330fcSShakeel Butt __count_vm_events(PGDEACTIVATE, nr_pages); 65921e330fcSShakeel Butt __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, 66021e330fcSShakeel Butt nr_pages); 6619c276cc6SMinchan Kim } 6629c276cc6SMinchan Kim } 66310853a03SMinchan Kim 664c7c7b80cSAlex Shi static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec) 66510853a03SMinchan Kim { 666fc574c23SAlex Shi if (PageAnon(page) && PageSwapBacked(page) && 66724c92eb7SShaohua Li !PageSwapCache(page) && !PageUnevictable(page)) { 6686c357848SMatthew Wilcox (Oracle) int nr_pages = thp_nr_pages(page); 66910853a03SMinchan Kim 67046ae6b2cSYu Zhao del_page_from_lru_list(page, lruvec); 67110853a03SMinchan Kim ClearPageActive(page); 67210853a03SMinchan Kim ClearPageReferenced(page); 673f7ad2a6cSShaohua Li /* 6749de4f22aSHuang Ying * Lazyfree pages are clean anonymous pages. They have 6759de4f22aSHuang Ying * PG_swapbacked flag cleared, to distinguish them from normal 6769de4f22aSHuang Ying * anonymous pages 677f7ad2a6cSShaohua Li */ 678f7ad2a6cSShaohua Li ClearPageSwapBacked(page); 6793a9c9788SYu Zhao add_page_to_lru_list(page, lruvec); 68010853a03SMinchan Kim 68121e330fcSShakeel Butt __count_vm_events(PGLAZYFREE, nr_pages); 68221e330fcSShakeel Butt __count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE, 68321e330fcSShakeel Butt nr_pages); 68410853a03SMinchan Kim } 68510853a03SMinchan Kim } 68610853a03SMinchan Kim 68731560180SMinchan Kim /* 688902aaed0SHisashi Hifumi * Drain pages out of the cpu's pagevecs. 689902aaed0SHisashi Hifumi * Either "cpu" is the current CPU, and preemption has already been 690902aaed0SHisashi Hifumi * disabled; or "cpu" is being hot-unplugged, and is already dead. 691902aaed0SHisashi Hifumi */ 692f0cb3c76SKonstantin Khlebnikov void lru_add_drain_cpu(int cpu) 6931da177e4SLinus Torvalds { 694c2bc1681SMatthew Wilcox (Oracle) struct folio_batch *fbatch; 695b01b2141SIngo Molnar struct pagevec *pvec = &per_cpu(lru_pvecs.lru_add, cpu); 6961da177e4SLinus Torvalds 6971da177e4SLinus Torvalds if (pagevec_count(pvec)) 698a0b8cab3SMel Gorman __pagevec_lru_add(pvec); 699902aaed0SHisashi Hifumi 700c2bc1681SMatthew Wilcox (Oracle) fbatch = &per_cpu(lru_rotate.fbatch, cpu); 7017e0cc01eSQian Cai /* Disabling interrupts below acts as a compiler barrier. */ 702c2bc1681SMatthew Wilcox (Oracle) if (data_race(folio_batch_count(fbatch))) { 703902aaed0SHisashi Hifumi unsigned long flags; 704902aaed0SHisashi Hifumi 705902aaed0SHisashi Hifumi /* No harm done if a racing interrupt already did this */ 706b01b2141SIngo Molnar local_lock_irqsave(&lru_rotate.lock, flags); 707c2bc1681SMatthew Wilcox (Oracle) folio_batch_move_lru(fbatch, lru_move_tail_fn); 708b01b2141SIngo Molnar local_unlock_irqrestore(&lru_rotate.lock, flags); 709902aaed0SHisashi Hifumi } 71031560180SMinchan Kim 711b01b2141SIngo Molnar pvec = &per_cpu(lru_pvecs.lru_deactivate_file, cpu); 71231560180SMinchan Kim if (pagevec_count(pvec)) 713c7c7b80cSAlex Shi pagevec_lru_move_fn(pvec, lru_deactivate_file_fn); 714eb709b0dSShaohua Li 715b01b2141SIngo Molnar pvec = &per_cpu(lru_pvecs.lru_deactivate, cpu); 7169c276cc6SMinchan Kim if (pagevec_count(pvec)) 717c7c7b80cSAlex Shi pagevec_lru_move_fn(pvec, lru_deactivate_fn); 7189c276cc6SMinchan Kim 719b01b2141SIngo Molnar pvec = &per_cpu(lru_pvecs.lru_lazyfree, cpu); 72010853a03SMinchan Kim if (pagevec_count(pvec)) 721c7c7b80cSAlex Shi pagevec_lru_move_fn(pvec, lru_lazyfree_fn); 72210853a03SMinchan Kim 723eb709b0dSShaohua Li activate_page_drain(cpu); 72431560180SMinchan Kim } 72531560180SMinchan Kim 72631560180SMinchan Kim /** 727261b6840SMatthew Wilcox (Oracle) * deactivate_file_folio() - Forcefully deactivate a file folio. 728261b6840SMatthew Wilcox (Oracle) * @folio: Folio to deactivate. 72931560180SMinchan Kim * 730261b6840SMatthew Wilcox (Oracle) * This function hints to the VM that @folio is a good reclaim candidate, 731261b6840SMatthew Wilcox (Oracle) * for example if its invalidation fails due to the folio being dirty 73231560180SMinchan Kim * or under writeback. 733261b6840SMatthew Wilcox (Oracle) * 734261b6840SMatthew Wilcox (Oracle) * Context: Caller holds a reference on the page. 73531560180SMinchan Kim */ 736261b6840SMatthew Wilcox (Oracle) void deactivate_file_folio(struct folio *folio) 73731560180SMinchan Kim { 738b01b2141SIngo Molnar struct pagevec *pvec; 739b01b2141SIngo Molnar 740261b6840SMatthew Wilcox (Oracle) /* 741261b6840SMatthew Wilcox (Oracle) * In a workload with many unevictable pages such as mprotect, 742261b6840SMatthew Wilcox (Oracle) * unevictable folio deactivation for accelerating reclaim is pointless. 743261b6840SMatthew Wilcox (Oracle) */ 744261b6840SMatthew Wilcox (Oracle) if (folio_test_unevictable(folio)) 745261b6840SMatthew Wilcox (Oracle) return; 746261b6840SMatthew Wilcox (Oracle) 747261b6840SMatthew Wilcox (Oracle) folio_get(folio); 748b01b2141SIngo Molnar local_lock(&lru_pvecs.lock); 749b01b2141SIngo Molnar pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate_file); 75031560180SMinchan Kim 751261b6840SMatthew Wilcox (Oracle) if (pagevec_add_and_need_flush(pvec, &folio->page)) 752c7c7b80cSAlex Shi pagevec_lru_move_fn(pvec, lru_deactivate_file_fn); 753b01b2141SIngo Molnar local_unlock(&lru_pvecs.lock); 75431560180SMinchan Kim } 75580bfed90SAndrew Morton 7569c276cc6SMinchan Kim /* 7579c276cc6SMinchan Kim * deactivate_page - deactivate a page 7589c276cc6SMinchan Kim * @page: page to deactivate 7599c276cc6SMinchan Kim * 7609c276cc6SMinchan Kim * deactivate_page() moves @page to the inactive list if @page was on the active 7619c276cc6SMinchan Kim * list and was not an unevictable page. This is done to accelerate the reclaim 7629c276cc6SMinchan Kim * of @page. 7639c276cc6SMinchan Kim */ 7649c276cc6SMinchan Kim void deactivate_page(struct page *page) 7659c276cc6SMinchan Kim { 7669c276cc6SMinchan Kim if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { 767b01b2141SIngo Molnar struct pagevec *pvec; 7689c276cc6SMinchan Kim 769b01b2141SIngo Molnar local_lock(&lru_pvecs.lock); 770b01b2141SIngo Molnar pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate); 7719c276cc6SMinchan Kim get_page(page); 772d479960eSMinchan Kim if (pagevec_add_and_need_flush(pvec, page)) 773c7c7b80cSAlex Shi pagevec_lru_move_fn(pvec, lru_deactivate_fn); 774b01b2141SIngo Molnar local_unlock(&lru_pvecs.lock); 7759c276cc6SMinchan Kim } 7769c276cc6SMinchan Kim } 7779c276cc6SMinchan Kim 77810853a03SMinchan Kim /** 779f7ad2a6cSShaohua Li * mark_page_lazyfree - make an anon page lazyfree 78010853a03SMinchan Kim * @page: page to deactivate 78110853a03SMinchan Kim * 782f7ad2a6cSShaohua Li * mark_page_lazyfree() moves @page to the inactive file list. 783f7ad2a6cSShaohua Li * This is done to accelerate the reclaim of @page. 78410853a03SMinchan Kim */ 785f7ad2a6cSShaohua Li void mark_page_lazyfree(struct page *page) 78610853a03SMinchan Kim { 787f7ad2a6cSShaohua Li if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && 78824c92eb7SShaohua Li !PageSwapCache(page) && !PageUnevictable(page)) { 789b01b2141SIngo Molnar struct pagevec *pvec; 79010853a03SMinchan Kim 791b01b2141SIngo Molnar local_lock(&lru_pvecs.lock); 792b01b2141SIngo Molnar pvec = this_cpu_ptr(&lru_pvecs.lru_lazyfree); 79309cbfeafSKirill A. Shutemov get_page(page); 794d479960eSMinchan Kim if (pagevec_add_and_need_flush(pvec, page)) 795c7c7b80cSAlex Shi pagevec_lru_move_fn(pvec, lru_lazyfree_fn); 796b01b2141SIngo Molnar local_unlock(&lru_pvecs.lock); 79710853a03SMinchan Kim } 79810853a03SMinchan Kim } 79910853a03SMinchan Kim 80080bfed90SAndrew Morton void lru_add_drain(void) 80180bfed90SAndrew Morton { 802b01b2141SIngo Molnar local_lock(&lru_pvecs.lock); 803b01b2141SIngo Molnar lru_add_drain_cpu(smp_processor_id()); 804b01b2141SIngo Molnar local_unlock(&lru_pvecs.lock); 805adb11e78SSebastian Andrzej Siewior mlock_page_drain_local(); 806b01b2141SIngo Molnar } 807b01b2141SIngo Molnar 808243418e3SMinchan Kim /* 809243418e3SMinchan Kim * It's called from per-cpu workqueue context in SMP case so 810243418e3SMinchan Kim * lru_add_drain_cpu and invalidate_bh_lrus_cpu should run on 811243418e3SMinchan Kim * the same cpu. It shouldn't be a problem in !SMP case since 812243418e3SMinchan Kim * the core is only one and the locks will disable preemption. 813243418e3SMinchan Kim */ 814243418e3SMinchan Kim static void lru_add_and_bh_lrus_drain(void) 815243418e3SMinchan Kim { 816243418e3SMinchan Kim local_lock(&lru_pvecs.lock); 817243418e3SMinchan Kim lru_add_drain_cpu(smp_processor_id()); 818243418e3SMinchan Kim local_unlock(&lru_pvecs.lock); 819243418e3SMinchan Kim invalidate_bh_lrus_cpu(); 820adb11e78SSebastian Andrzej Siewior mlock_page_drain_local(); 821243418e3SMinchan Kim } 822243418e3SMinchan Kim 823b01b2141SIngo Molnar void lru_add_drain_cpu_zone(struct zone *zone) 824b01b2141SIngo Molnar { 825b01b2141SIngo Molnar local_lock(&lru_pvecs.lock); 826b01b2141SIngo Molnar lru_add_drain_cpu(smp_processor_id()); 827b01b2141SIngo Molnar drain_local_pages(zone); 828b01b2141SIngo Molnar local_unlock(&lru_pvecs.lock); 829adb11e78SSebastian Andrzej Siewior mlock_page_drain_local(); 8301da177e4SLinus Torvalds } 8311da177e4SLinus Torvalds 8326ea183d6SMichal Hocko #ifdef CONFIG_SMP 8336ea183d6SMichal Hocko 8346ea183d6SMichal Hocko static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); 8356ea183d6SMichal Hocko 836c4028958SDavid Howells static void lru_add_drain_per_cpu(struct work_struct *dummy) 837053837fcSNick Piggin { 838243418e3SMinchan Kim lru_add_and_bh_lrus_drain(); 839053837fcSNick Piggin } 840053837fcSNick Piggin 8419852a721SMichal Hocko /* 8429852a721SMichal Hocko * Doesn't need any cpu hotplug locking because we do rely on per-cpu 8439852a721SMichal Hocko * kworkers being shut down before our page_alloc_cpu_dead callback is 8449852a721SMichal Hocko * executed on the offlined cpu. 8459852a721SMichal Hocko * Calling this function with cpu hotplug locks held can actually lead 8469852a721SMichal Hocko * to obscure indirect dependencies via WQ context. 8479852a721SMichal Hocko */ 8483db3264dSMiaohe Lin static inline void __lru_add_drain_all(bool force_all_cpus) 849053837fcSNick Piggin { 8506446a513SAhmed S. Darwish /* 8516446a513SAhmed S. Darwish * lru_drain_gen - Global pages generation number 8526446a513SAhmed S. Darwish * 8536446a513SAhmed S. Darwish * (A) Definition: global lru_drain_gen = x implies that all generations 8546446a513SAhmed S. Darwish * 0 < n <= x are already *scheduled* for draining. 8556446a513SAhmed S. Darwish * 8566446a513SAhmed S. Darwish * This is an optimization for the highly-contended use case where a 8576446a513SAhmed S. Darwish * user space workload keeps constantly generating a flow of pages for 8586446a513SAhmed S. Darwish * each CPU. 8596446a513SAhmed S. Darwish */ 8606446a513SAhmed S. Darwish static unsigned int lru_drain_gen; 8615fbc4616SChris Metcalf static struct cpumask has_work; 8626446a513SAhmed S. Darwish static DEFINE_MUTEX(lock); 8636446a513SAhmed S. Darwish unsigned cpu, this_gen; 8645fbc4616SChris Metcalf 865ce612879SMichal Hocko /* 866ce612879SMichal Hocko * Make sure nobody triggers this path before mm_percpu_wq is fully 867ce612879SMichal Hocko * initialized. 868ce612879SMichal Hocko */ 869ce612879SMichal Hocko if (WARN_ON(!mm_percpu_wq)) 870ce612879SMichal Hocko return; 871ce612879SMichal Hocko 8726446a513SAhmed S. Darwish /* 8736446a513SAhmed S. Darwish * Guarantee pagevec counter stores visible by this CPU are visible to 8746446a513SAhmed S. Darwish * other CPUs before loading the current drain generation. 8756446a513SAhmed S. Darwish */ 8766446a513SAhmed S. Darwish smp_mb(); 8776446a513SAhmed S. Darwish 8786446a513SAhmed S. Darwish /* 8796446a513SAhmed S. Darwish * (B) Locally cache global LRU draining generation number 8806446a513SAhmed S. Darwish * 8816446a513SAhmed S. Darwish * The read barrier ensures that the counter is loaded before the mutex 8826446a513SAhmed S. Darwish * is taken. It pairs with smp_mb() inside the mutex critical section 8836446a513SAhmed S. Darwish * at (D). 8846446a513SAhmed S. Darwish */ 8856446a513SAhmed S. Darwish this_gen = smp_load_acquire(&lru_drain_gen); 886eef1a429SKonstantin Khlebnikov 8875fbc4616SChris Metcalf mutex_lock(&lock); 888eef1a429SKonstantin Khlebnikov 889eef1a429SKonstantin Khlebnikov /* 8906446a513SAhmed S. Darwish * (C) Exit the draining operation if a newer generation, from another 8916446a513SAhmed S. Darwish * lru_add_drain_all(), was already scheduled for draining. Check (A). 892eef1a429SKonstantin Khlebnikov */ 893d479960eSMinchan Kim if (unlikely(this_gen != lru_drain_gen && !force_all_cpus)) 894eef1a429SKonstantin Khlebnikov goto done; 895eef1a429SKonstantin Khlebnikov 8966446a513SAhmed S. Darwish /* 8976446a513SAhmed S. Darwish * (D) Increment global generation number 8986446a513SAhmed S. Darwish * 8996446a513SAhmed S. Darwish * Pairs with smp_load_acquire() at (B), outside of the critical 9006446a513SAhmed S. Darwish * section. Use a full memory barrier to guarantee that the new global 9016446a513SAhmed S. Darwish * drain generation number is stored before loading pagevec counters. 9026446a513SAhmed S. Darwish * 9036446a513SAhmed S. Darwish * This pairing must be done here, before the for_each_online_cpu loop 9046446a513SAhmed S. Darwish * below which drains the page vectors. 9056446a513SAhmed S. Darwish * 9066446a513SAhmed S. Darwish * Let x, y, and z represent some system CPU numbers, where x < y < z. 907cb152a1aSShijie Luo * Assume CPU #z is in the middle of the for_each_online_cpu loop 9086446a513SAhmed S. Darwish * below and has already reached CPU #y's per-cpu data. CPU #x comes 9096446a513SAhmed S. Darwish * along, adds some pages to its per-cpu vectors, then calls 9106446a513SAhmed S. Darwish * lru_add_drain_all(). 9116446a513SAhmed S. Darwish * 9126446a513SAhmed S. Darwish * If the paired barrier is done at any later step, e.g. after the 9136446a513SAhmed S. Darwish * loop, CPU #x will just exit at (C) and miss flushing out all of its 9146446a513SAhmed S. Darwish * added pages. 9156446a513SAhmed S. Darwish */ 9166446a513SAhmed S. Darwish WRITE_ONCE(lru_drain_gen, lru_drain_gen + 1); 9176446a513SAhmed S. Darwish smp_mb(); 918eef1a429SKonstantin Khlebnikov 9195fbc4616SChris Metcalf cpumask_clear(&has_work); 9205fbc4616SChris Metcalf for_each_online_cpu(cpu) { 9215fbc4616SChris Metcalf struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); 9225fbc4616SChris Metcalf 923ff042f4aSMarcelo Tosatti if (pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) || 924c2bc1681SMatthew Wilcox (Oracle) data_race(folio_batch_count(&per_cpu(lru_rotate.fbatch, cpu))) || 925b01b2141SIngo Molnar pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) || 926b01b2141SIngo Molnar pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) || 927b01b2141SIngo Molnar pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) || 9288cc621d2SMinchan Kim need_activate_page_drain(cpu) || 9292fbb0c10SHugh Dickins need_mlock_page_drain(cpu) || 9308cc621d2SMinchan Kim has_bh_in_lru(cpu, NULL)) { 9315fbc4616SChris Metcalf INIT_WORK(work, lru_add_drain_per_cpu); 932ce612879SMichal Hocko queue_work_on(cpu, mm_percpu_wq, work); 9336446a513SAhmed S. Darwish __cpumask_set_cpu(cpu, &has_work); 9345fbc4616SChris Metcalf } 9355fbc4616SChris Metcalf } 9365fbc4616SChris Metcalf 9375fbc4616SChris Metcalf for_each_cpu(cpu, &has_work) 9385fbc4616SChris Metcalf flush_work(&per_cpu(lru_add_drain_work, cpu)); 9395fbc4616SChris Metcalf 940eef1a429SKonstantin Khlebnikov done: 9415fbc4616SChris Metcalf mutex_unlock(&lock); 942053837fcSNick Piggin } 943d479960eSMinchan Kim 944d479960eSMinchan Kim void lru_add_drain_all(void) 945d479960eSMinchan Kim { 946d479960eSMinchan Kim __lru_add_drain_all(false); 947d479960eSMinchan Kim } 9486ea183d6SMichal Hocko #else 9496ea183d6SMichal Hocko void lru_add_drain_all(void) 9506ea183d6SMichal Hocko { 9516ea183d6SMichal Hocko lru_add_drain(); 9526ea183d6SMichal Hocko } 9536446a513SAhmed S. Darwish #endif /* CONFIG_SMP */ 954053837fcSNick Piggin 955d479960eSMinchan Kim atomic_t lru_disable_count = ATOMIC_INIT(0); 956d479960eSMinchan Kim 957d479960eSMinchan Kim /* 958d479960eSMinchan Kim * lru_cache_disable() needs to be called before we start compiling 959d479960eSMinchan Kim * a list of pages to be migrated using isolate_lru_page(). 960d479960eSMinchan Kim * It drains pages on LRU cache and then disable on all cpus until 961d479960eSMinchan Kim * lru_cache_enable is called. 962d479960eSMinchan Kim * 963d479960eSMinchan Kim * Must be paired with a call to lru_cache_enable(). 964d479960eSMinchan Kim */ 965d479960eSMinchan Kim void lru_cache_disable(void) 966d479960eSMinchan Kim { 967d479960eSMinchan Kim atomic_inc(&lru_disable_count); 968d479960eSMinchan Kim /* 969ff042f4aSMarcelo Tosatti * Readers of lru_disable_count are protected by either disabling 970ff042f4aSMarcelo Tosatti * preemption or rcu_read_lock: 971ff042f4aSMarcelo Tosatti * 972ff042f4aSMarcelo Tosatti * preempt_disable, local_irq_disable [bh_lru_lock()] 973ff042f4aSMarcelo Tosatti * rcu_read_lock [rt_spin_lock CONFIG_PREEMPT_RT] 974ff042f4aSMarcelo Tosatti * preempt_disable [local_lock !CONFIG_PREEMPT_RT] 975ff042f4aSMarcelo Tosatti * 976ff042f4aSMarcelo Tosatti * Since v5.1 kernel, synchronize_rcu() is guaranteed to wait on 977ff042f4aSMarcelo Tosatti * preempt_disable() regions of code. So any CPU which sees 978ff042f4aSMarcelo Tosatti * lru_disable_count = 0 will have exited the critical 979ff042f4aSMarcelo Tosatti * section when synchronize_rcu() returns. 980d479960eSMinchan Kim */ 98131733463SMarcelo Tosatti synchronize_rcu_expedited(); 982ff042f4aSMarcelo Tosatti #ifdef CONFIG_SMP 983d479960eSMinchan Kim __lru_add_drain_all(true); 984d479960eSMinchan Kim #else 985243418e3SMinchan Kim lru_add_and_bh_lrus_drain(); 986d479960eSMinchan Kim #endif 987d479960eSMinchan Kim } 988d479960eSMinchan Kim 989aabfb572SMichal Hocko /** 990ea1754a0SKirill A. Shutemov * release_pages - batched put_page() 991aabfb572SMichal Hocko * @pages: array of pages to release 992aabfb572SMichal Hocko * @nr: number of pages 9931da177e4SLinus Torvalds * 994aabfb572SMichal Hocko * Decrement the reference count on all the pages in @pages. If it 995aabfb572SMichal Hocko * fell to zero, remove the page from the LRU and free it. 9961da177e4SLinus Torvalds */ 997c6f92f9fSMel Gorman void release_pages(struct page **pages, int nr) 9981da177e4SLinus Torvalds { 9991da177e4SLinus Torvalds int i; 1000cc59850eSKonstantin Khlebnikov LIST_HEAD(pages_to_free); 10016168d0daSAlex Shi struct lruvec *lruvec = NULL; 10020de340cbSMatthew Wilcox (Oracle) unsigned long flags = 0; 10033f649ab7SKees Cook unsigned int lock_batch; 10041da177e4SLinus Torvalds 10051da177e4SLinus Torvalds for (i = 0; i < nr; i++) { 10061da177e4SLinus Torvalds struct page *page = pages[i]; 10070de340cbSMatthew Wilcox (Oracle) struct folio *folio = page_folio(page); 10081da177e4SLinus Torvalds 1009aabfb572SMichal Hocko /* 1010aabfb572SMichal Hocko * Make sure the IRQ-safe lock-holding time does not get 1011aabfb572SMichal Hocko * excessive with a continuous string of pages from the 10126168d0daSAlex Shi * same lruvec. The lock is held only if lruvec != NULL. 1013aabfb572SMichal Hocko */ 10146168d0daSAlex Shi if (lruvec && ++lock_batch == SWAP_CLUSTER_MAX) { 10156168d0daSAlex Shi unlock_page_lruvec_irqrestore(lruvec, flags); 10166168d0daSAlex Shi lruvec = NULL; 1017aabfb572SMichal Hocko } 1018aabfb572SMichal Hocko 10190de340cbSMatthew Wilcox (Oracle) page = &folio->page; 10206fcb52a5SAaron Lu if (is_huge_zero_page(page)) 1021aa88b68cSKirill A. Shutemov continue; 1022aa88b68cSKirill A. Shutemov 1023c5d6c45eSIra Weiny if (is_zone_device_page(page)) { 10246168d0daSAlex Shi if (lruvec) { 10256168d0daSAlex Shi unlock_page_lruvec_irqrestore(lruvec, flags); 10266168d0daSAlex Shi lruvec = NULL; 1027df6ad698SJérôme Glisse } 102889574945SChristoph Hellwig if (put_devmap_managed_page(page)) 1029df6ad698SJérôme Glisse continue; 103043fbdeb3SRalph Campbell if (put_page_testzero(page)) 103127674ef6SChristoph Hellwig free_zone_device_page(page); 103243fbdeb3SRalph Campbell continue; 103307d80269SJohn Hubbard } 1034df6ad698SJérôme Glisse 1035b5810039SNick Piggin if (!put_page_testzero(page)) 10361da177e4SLinus Torvalds continue; 10371da177e4SLinus Torvalds 1038ddc58f27SKirill A. Shutemov if (PageCompound(page)) { 10396168d0daSAlex Shi if (lruvec) { 10406168d0daSAlex Shi unlock_page_lruvec_irqrestore(lruvec, flags); 10416168d0daSAlex Shi lruvec = NULL; 1042ddc58f27SKirill A. Shutemov } 1043ddc58f27SKirill A. Shutemov __put_compound_page(page); 1044ddc58f27SKirill A. Shutemov continue; 1045ddc58f27SKirill A. Shutemov } 1046ddc58f27SKirill A. Shutemov 104746453a6eSNick Piggin if (PageLRU(page)) { 10482a5e4e34SAlexander Duyck struct lruvec *prev_lruvec = lruvec; 1049894bc310SLee Schermerhorn 10500de340cbSMatthew Wilcox (Oracle) lruvec = folio_lruvec_relock_irqsave(folio, lruvec, 10512a5e4e34SAlexander Duyck &flags); 10522a5e4e34SAlexander Duyck if (prev_lruvec != lruvec) 1053aabfb572SMichal Hocko lock_batch = 0; 1054fa9add64SHugh Dickins 105546ae6b2cSYu Zhao del_page_from_lru_list(page, lruvec); 105687560179SYu Zhao __clear_page_lru_flags(page); 105746453a6eSNick Piggin } 105846453a6eSNick Piggin 1059b109b870SHugh Dickins /* 1060b109b870SHugh Dickins * In rare cases, when truncation or holepunching raced with 1061b109b870SHugh Dickins * munlock after VM_LOCKED was cleared, Mlocked may still be 1062b109b870SHugh Dickins * found set here. This does not indicate a problem, unless 1063b109b870SHugh Dickins * "unevictable_pgs_cleared" appears worryingly large. 1064b109b870SHugh Dickins */ 1065b109b870SHugh Dickins if (unlikely(PageMlocked(page))) { 1066b109b870SHugh Dickins __ClearPageMlocked(page); 1067b109b870SHugh Dickins dec_zone_page_state(page, NR_MLOCK); 1068b109b870SHugh Dickins count_vm_event(UNEVICTABLE_PGCLEARED); 1069b109b870SHugh Dickins } 1070b109b870SHugh Dickins 1071cc59850eSKonstantin Khlebnikov list_add(&page->lru, &pages_to_free); 10721da177e4SLinus Torvalds } 10736168d0daSAlex Shi if (lruvec) 10746168d0daSAlex Shi unlock_page_lruvec_irqrestore(lruvec, flags); 10751da177e4SLinus Torvalds 1076747db954SJohannes Weiner mem_cgroup_uncharge_list(&pages_to_free); 10772d4894b5SMel Gorman free_unref_page_list(&pages_to_free); 10781da177e4SLinus Torvalds } 10790be8557bSMiklos Szeredi EXPORT_SYMBOL(release_pages); 10801da177e4SLinus Torvalds 10811da177e4SLinus Torvalds /* 10821da177e4SLinus Torvalds * The pages which we're about to release may be in the deferred lru-addition 10831da177e4SLinus Torvalds * queues. That would prevent them from really being freed right now. That's 10841da177e4SLinus Torvalds * OK from a correctness point of view but is inefficient - those pages may be 10851da177e4SLinus Torvalds * cache-warm and we want to give them back to the page allocator ASAP. 10861da177e4SLinus Torvalds * 10871da177e4SLinus Torvalds * So __pagevec_release() will drain those queues here. __pagevec_lru_add() 10881da177e4SLinus Torvalds * and __pagevec_lru_add_active() call release_pages() directly to avoid 10891da177e4SLinus Torvalds * mutual recursion. 10901da177e4SLinus Torvalds */ 10911da177e4SLinus Torvalds void __pagevec_release(struct pagevec *pvec) 10921da177e4SLinus Torvalds { 10937f0b5fb9SMel Gorman if (!pvec->percpu_pvec_drained) { 10941da177e4SLinus Torvalds lru_add_drain(); 10957f0b5fb9SMel Gorman pvec->percpu_pvec_drained = true; 1096d9ed0d08SMel Gorman } 1097c6f92f9fSMel Gorman release_pages(pvec->pages, pagevec_count(pvec)); 10981da177e4SLinus Torvalds pagevec_reinit(pvec); 10991da177e4SLinus Torvalds } 11007f285701SSteve French EXPORT_SYMBOL(__pagevec_release); 11017f285701SSteve French 11021da177e4SLinus Torvalds /** 11031613fac9SMatthew Wilcox (Oracle) * folio_batch_remove_exceptionals() - Prune non-folios from a batch. 11041613fac9SMatthew Wilcox (Oracle) * @fbatch: The batch to prune 11050cd6144aSJohannes Weiner * 11061613fac9SMatthew Wilcox (Oracle) * find_get_entries() fills a batch with both folios and shadow/swap/DAX 11071613fac9SMatthew Wilcox (Oracle) * entries. This function prunes all the non-folio entries from @fbatch 11081613fac9SMatthew Wilcox (Oracle) * without leaving holes, so that it can be passed on to folio-only batch 11091613fac9SMatthew Wilcox (Oracle) * operations. 11100cd6144aSJohannes Weiner */ 11111613fac9SMatthew Wilcox (Oracle) void folio_batch_remove_exceptionals(struct folio_batch *fbatch) 11120cd6144aSJohannes Weiner { 11131613fac9SMatthew Wilcox (Oracle) unsigned int i, j; 11140cd6144aSJohannes Weiner 11151613fac9SMatthew Wilcox (Oracle) for (i = 0, j = 0; i < folio_batch_count(fbatch); i++) { 11161613fac9SMatthew Wilcox (Oracle) struct folio *folio = fbatch->folios[i]; 11171613fac9SMatthew Wilcox (Oracle) if (!xa_is_value(folio)) 11181613fac9SMatthew Wilcox (Oracle) fbatch->folios[j++] = folio; 11190cd6144aSJohannes Weiner } 11201613fac9SMatthew Wilcox (Oracle) fbatch->nr = j; 11210cd6144aSJohannes Weiner } 11220cd6144aSJohannes Weiner 11230cd6144aSJohannes Weiner /** 1124b947cee4SJan Kara * pagevec_lookup_range - gang pagecache lookup 11251da177e4SLinus Torvalds * @pvec: Where the resulting pages are placed 11261da177e4SLinus Torvalds * @mapping: The address_space to search 11271da177e4SLinus Torvalds * @start: The starting page index 1128b947cee4SJan Kara * @end: The final page index 11291da177e4SLinus Torvalds * 1130e02a9f04SRandy Dunlap * pagevec_lookup_range() will search for & return a group of up to PAGEVEC_SIZE 1131b947cee4SJan Kara * pages in the mapping starting from index @start and upto index @end 1132b947cee4SJan Kara * (inclusive). The pages are placed in @pvec. pagevec_lookup() takes a 11331da177e4SLinus Torvalds * reference against the pages in @pvec. 11341da177e4SLinus Torvalds * 11351da177e4SLinus Torvalds * The search returns a group of mapping-contiguous pages with ascending 1136d72dc8a2SJan Kara * indexes. There may be holes in the indices due to not-present pages. We 1137d72dc8a2SJan Kara * also update @start to index the next page for the traversal. 11381da177e4SLinus Torvalds * 1139b947cee4SJan Kara * pagevec_lookup_range() returns the number of pages which were found. If this 1140e02a9f04SRandy Dunlap * number is smaller than PAGEVEC_SIZE, the end of specified range has been 1141b947cee4SJan Kara * reached. 11421da177e4SLinus Torvalds */ 1143b947cee4SJan Kara unsigned pagevec_lookup_range(struct pagevec *pvec, 1144397162ffSJan Kara struct address_space *mapping, pgoff_t *start, pgoff_t end) 11451da177e4SLinus Torvalds { 1146397162ffSJan Kara pvec->nr = find_get_pages_range(mapping, start, end, PAGEVEC_SIZE, 1147b947cee4SJan Kara pvec->pages); 11481da177e4SLinus Torvalds return pagevec_count(pvec); 11491da177e4SLinus Torvalds } 1150b947cee4SJan Kara EXPORT_SYMBOL(pagevec_lookup_range); 115178539fdfSChristoph Hellwig 115272b045aeSJan Kara unsigned pagevec_lookup_range_tag(struct pagevec *pvec, 115372b045aeSJan Kara struct address_space *mapping, pgoff_t *index, pgoff_t end, 115410bbd235SMatthew Wilcox xa_mark_t tag) 11551da177e4SLinus Torvalds { 115672b045aeSJan Kara pvec->nr = find_get_pages_range_tag(mapping, index, end, tag, 115767fd707fSJan Kara PAGEVEC_SIZE, pvec->pages); 11581da177e4SLinus Torvalds return pagevec_count(pvec); 11591da177e4SLinus Torvalds } 116072b045aeSJan Kara EXPORT_SYMBOL(pagevec_lookup_range_tag); 11611da177e4SLinus Torvalds 11621da177e4SLinus Torvalds /* 11631da177e4SLinus Torvalds * Perform any setup for the swap system 11641da177e4SLinus Torvalds */ 11651da177e4SLinus Torvalds void __init swap_setup(void) 11661da177e4SLinus Torvalds { 1167ca79b0c2SArun KS unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT); 1168e0bf68ddSPeter Zijlstra 11691da177e4SLinus Torvalds /* Use a smaller cluster for small-memory machines */ 11701da177e4SLinus Torvalds if (megs < 16) 11711da177e4SLinus Torvalds page_cluster = 2; 11721da177e4SLinus Torvalds else 11731da177e4SLinus Torvalds page_cluster = 3; 11741da177e4SLinus Torvalds /* 11751da177e4SLinus Torvalds * Right now other parts of the system means that we 11761da177e4SLinus Torvalds * _really_ don't want to cluster much more 11771da177e4SLinus Torvalds */ 11781da177e4SLinus Torvalds } 1179