1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * linux/mm/swap.c 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 81da177e4SLinus Torvalds /* 9183ff22bSSimon Arlott * This file contains the default values for the operation of the 101da177e4SLinus Torvalds * Linux VM subsystem. Fine-tuning documentation can be found in 1157043247SMauro Carvalho Chehab * Documentation/admin-guide/sysctl/vm.rst. 121da177e4SLinus Torvalds * Started 18.12.91 131da177e4SLinus Torvalds * Swap aging added 23.2.95, Stephen Tweedie. 141da177e4SLinus Torvalds * Buffermem limits added 12.3.98, Rik van Riel. 151da177e4SLinus Torvalds */ 161da177e4SLinus Torvalds 171da177e4SLinus Torvalds #include <linux/mm.h> 181da177e4SLinus Torvalds #include <linux/sched.h> 191da177e4SLinus Torvalds #include <linux/kernel_stat.h> 201da177e4SLinus Torvalds #include <linux/swap.h> 211da177e4SLinus Torvalds #include <linux/mman.h> 221da177e4SLinus Torvalds #include <linux/pagemap.h> 231da177e4SLinus Torvalds #include <linux/pagevec.h> 241da177e4SLinus Torvalds #include <linux/init.h> 25b95f1b31SPaul Gortmaker #include <linux/export.h> 261da177e4SLinus Torvalds #include <linux/mm_inline.h> 271da177e4SLinus Torvalds #include <linux/percpu_counter.h> 283565fce3SDan Williams #include <linux/memremap.h> 291da177e4SLinus Torvalds #include <linux/percpu.h> 301da177e4SLinus Torvalds #include <linux/cpu.h> 311da177e4SLinus Torvalds #include <linux/notifier.h> 32e0bf68ddSPeter Zijlstra #include <linux/backing-dev.h> 3366e1707bSBalbir Singh #include <linux/memcontrol.h> 345a0e3ad6STejun Heo #include <linux/gfp.h> 35a27bb332SKent Overstreet #include <linux/uio.h> 36822fc613SNaoya Horiguchi #include <linux/hugetlb.h> 3733c3fc71SVladimir Davydov #include <linux/page_idle.h> 38b01b2141SIngo Molnar #include <linux/local_lock.h> 398cc621d2SMinchan Kim #include <linux/buffer_head.h> 401da177e4SLinus Torvalds 4164d6519dSLee Schermerhorn #include "internal.h" 4264d6519dSLee Schermerhorn 43c6286c98SMel Gorman #define CREATE_TRACE_POINTS 44c6286c98SMel Gorman #include <trace/events/pagemap.h> 45c6286c98SMel Gorman 461da177e4SLinus Torvalds /* How many pages do we try to swap or page in/out together? */ 471da177e4SLinus Torvalds int page_cluster; 481da177e4SLinus Torvalds 49b01b2141SIngo Molnar /* Protecting only lru_rotate.pvec which requires disabling interrupts */ 50b01b2141SIngo Molnar struct lru_rotate { 51b01b2141SIngo Molnar local_lock_t lock; 52b01b2141SIngo Molnar struct pagevec pvec; 53b01b2141SIngo Molnar }; 54b01b2141SIngo Molnar static DEFINE_PER_CPU(struct lru_rotate, lru_rotate) = { 55b01b2141SIngo Molnar .lock = INIT_LOCAL_LOCK(lock), 56b01b2141SIngo Molnar }; 57b01b2141SIngo Molnar 58b01b2141SIngo Molnar /* 59b01b2141SIngo Molnar * The following struct pagevec are grouped together because they are protected 60b01b2141SIngo Molnar * by disabling preemption (and interrupts remain enabled). 61b01b2141SIngo Molnar */ 62b01b2141SIngo Molnar struct lru_pvecs { 63b01b2141SIngo Molnar local_lock_t lock; 64b01b2141SIngo Molnar struct pagevec lru_add; 65b01b2141SIngo Molnar struct pagevec lru_deactivate_file; 66b01b2141SIngo Molnar struct pagevec lru_deactivate; 67b01b2141SIngo Molnar struct pagevec lru_lazyfree; 68a4a921aaSMing Li #ifdef CONFIG_SMP 69b01b2141SIngo Molnar struct pagevec activate_page; 70a4a921aaSMing Li #endif 71b01b2141SIngo Molnar }; 72b01b2141SIngo Molnar static DEFINE_PER_CPU(struct lru_pvecs, lru_pvecs) = { 73b01b2141SIngo Molnar .lock = INIT_LOCAL_LOCK(lock), 74b01b2141SIngo Molnar }; 75902aaed0SHisashi Hifumi 76b221385bSAdrian Bunk /* 77b221385bSAdrian Bunk * This path almost never happens for VM activity - pages are normally 78b221385bSAdrian Bunk * freed via pagevecs. But it gets used by networking. 79b221385bSAdrian Bunk */ 80920c7a5dSHarvey Harrison static void __page_cache_release(struct page *page) 81b221385bSAdrian Bunk { 82b221385bSAdrian Bunk if (PageLRU(page)) { 83fa9add64SHugh Dickins struct lruvec *lruvec; 84fa9add64SHugh Dickins unsigned long flags; 85b221385bSAdrian Bunk 866168d0daSAlex Shi lruvec = lock_page_lruvec_irqsave(page, &flags); 8746ae6b2cSYu Zhao del_page_from_lru_list(page, lruvec); 8887560179SYu Zhao __clear_page_lru_flags(page); 896168d0daSAlex Shi unlock_page_lruvec_irqrestore(lruvec, flags); 90b221385bSAdrian Bunk } 9162906027SNicholas Piggin __ClearPageWaiters(page); 9291807063SAndrea Arcangeli } 9391807063SAndrea Arcangeli 9491807063SAndrea Arcangeli static void __put_single_page(struct page *page) 9591807063SAndrea Arcangeli { 9691807063SAndrea Arcangeli __page_cache_release(page); 977ae88534SYang Shi mem_cgroup_uncharge(page); 9844042b44SMel Gorman free_unref_page(page, 0); 99b221385bSAdrian Bunk } 100b221385bSAdrian Bunk 10191807063SAndrea Arcangeli static void __put_compound_page(struct page *page) 10291807063SAndrea Arcangeli { 103822fc613SNaoya Horiguchi /* 104822fc613SNaoya Horiguchi * __page_cache_release() is supposed to be called for thp, not for 105822fc613SNaoya Horiguchi * hugetlb. This is because hugetlb page does never have PageLRU set 106822fc613SNaoya Horiguchi * (it's never listed to any LRU lists) and no memcg routines should 107822fc613SNaoya Horiguchi * be called for hugetlb (it has a separate hugetlb_cgroup.) 108822fc613SNaoya Horiguchi */ 109822fc613SNaoya Horiguchi if (!PageHuge(page)) 11091807063SAndrea Arcangeli __page_cache_release(page); 111ff45fc3cSMatthew Wilcox (Oracle) destroy_compound_page(page); 11291807063SAndrea Arcangeli } 11391807063SAndrea Arcangeli 114ddc58f27SKirill A. Shutemov void __put_page(struct page *page) 115c747ce79SJianyu Zhan { 11671389703SDan Williams if (is_zone_device_page(page)) { 11771389703SDan Williams put_dev_pagemap(page->pgmap); 11871389703SDan Williams 11971389703SDan Williams /* 12071389703SDan Williams * The page belongs to the device that created pgmap. Do 12171389703SDan Williams * not return it to page allocator. 12271389703SDan Williams */ 12371389703SDan Williams return; 12471389703SDan Williams } 12571389703SDan Williams 126ddc58f27SKirill A. Shutemov if (unlikely(PageCompound(page))) 12726296ad2SAndrew Morton __put_compound_page(page); 12826296ad2SAndrew Morton else 12926296ad2SAndrew Morton __put_single_page(page); 13026296ad2SAndrew Morton } 131ddc58f27SKirill A. Shutemov EXPORT_SYMBOL(__put_page); 13270b50f94SAndrea Arcangeli 1331d7ea732SAlexander Zarochentsev /** 1347682486bSRandy Dunlap * put_pages_list() - release a list of pages 1357682486bSRandy Dunlap * @pages: list of pages threaded on page->lru 1361d7ea732SAlexander Zarochentsev * 1371d7ea732SAlexander Zarochentsev * Release a list of pages which are strung together on page.lru. Currently 1381d7ea732SAlexander Zarochentsev * used by read_cache_pages() and related error recovery code. 1391d7ea732SAlexander Zarochentsev */ 1401d7ea732SAlexander Zarochentsev void put_pages_list(struct list_head *pages) 1411d7ea732SAlexander Zarochentsev { 1421d7ea732SAlexander Zarochentsev while (!list_empty(pages)) { 1431d7ea732SAlexander Zarochentsev struct page *victim; 1441d7ea732SAlexander Zarochentsev 145f86196eaSNikolay Borisov victim = lru_to_page(pages); 1461d7ea732SAlexander Zarochentsev list_del(&victim->lru); 14709cbfeafSKirill A. Shutemov put_page(victim); 1481d7ea732SAlexander Zarochentsev } 1491d7ea732SAlexander Zarochentsev } 1501d7ea732SAlexander Zarochentsev EXPORT_SYMBOL(put_pages_list); 1511d7ea732SAlexander Zarochentsev 15218022c5dSMel Gorman /* 15318022c5dSMel Gorman * get_kernel_pages() - pin kernel pages in memory 15418022c5dSMel Gorman * @kiov: An array of struct kvec structures 15518022c5dSMel Gorman * @nr_segs: number of segments to pin 15618022c5dSMel Gorman * @write: pinning for read/write, currently ignored 15718022c5dSMel Gorman * @pages: array that receives pointers to the pages pinned. 15818022c5dSMel Gorman * Should be at least nr_segs long. 15918022c5dSMel Gorman * 16018022c5dSMel Gorman * Returns number of pages pinned. This may be fewer than the number 16118022c5dSMel Gorman * requested. If nr_pages is 0 or negative, returns 0. If no pages 16218022c5dSMel Gorman * were pinned, returns -errno. Each page returned must be released 16318022c5dSMel Gorman * with a put_page() call when it is finished with. 16418022c5dSMel Gorman */ 16518022c5dSMel Gorman int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write, 16618022c5dSMel Gorman struct page **pages) 16718022c5dSMel Gorman { 16818022c5dSMel Gorman int seg; 16918022c5dSMel Gorman 17018022c5dSMel Gorman for (seg = 0; seg < nr_segs; seg++) { 17118022c5dSMel Gorman if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE)) 17218022c5dSMel Gorman return seg; 17318022c5dSMel Gorman 1745a178119SMel Gorman pages[seg] = kmap_to_page(kiov[seg].iov_base); 17509cbfeafSKirill A. Shutemov get_page(pages[seg]); 17618022c5dSMel Gorman } 17718022c5dSMel Gorman 17818022c5dSMel Gorman return seg; 17918022c5dSMel Gorman } 18018022c5dSMel Gorman EXPORT_SYMBOL_GPL(get_kernel_pages); 18118022c5dSMel Gorman 1823dd7ae8eSShaohua Li static void pagevec_lru_move_fn(struct pagevec *pvec, 183c7c7b80cSAlex Shi void (*move_fn)(struct page *page, struct lruvec *lruvec)) 184902aaed0SHisashi Hifumi { 185902aaed0SHisashi Hifumi int i; 1866168d0daSAlex Shi struct lruvec *lruvec = NULL; 1873dd7ae8eSShaohua Li unsigned long flags = 0; 188902aaed0SHisashi Hifumi 189902aaed0SHisashi Hifumi for (i = 0; i < pagevec_count(pvec); i++) { 190902aaed0SHisashi Hifumi struct page *page = pvec->pages[i]; 1913dd7ae8eSShaohua Li 192fc574c23SAlex Shi /* block memcg migration during page moving between lru */ 193fc574c23SAlex Shi if (!TestClearPageLRU(page)) 194fc574c23SAlex Shi continue; 195fc574c23SAlex Shi 1962a5e4e34SAlexander Duyck lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags); 197c7c7b80cSAlex Shi (*move_fn)(page, lruvec); 198fc574c23SAlex Shi 199fc574c23SAlex Shi SetPageLRU(page); 2003dd7ae8eSShaohua Li } 2016168d0daSAlex Shi if (lruvec) 2026168d0daSAlex Shi unlock_page_lruvec_irqrestore(lruvec, flags); 203c6f92f9fSMel Gorman release_pages(pvec->pages, pvec->nr); 2043dd7ae8eSShaohua Li pagevec_reinit(pvec); 2053dd7ae8eSShaohua Li } 2063dd7ae8eSShaohua Li 207c7c7b80cSAlex Shi static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec) 2083dd7ae8eSShaohua Li { 209fc574c23SAlex Shi if (!PageUnevictable(page)) { 21046ae6b2cSYu Zhao del_page_from_lru_list(page, lruvec); 211c55e8d03SJohannes Weiner ClearPageActive(page); 2123a9c9788SYu Zhao add_page_to_lru_list_tail(page, lruvec); 213c7c7b80cSAlex Shi __count_vm_events(PGROTATED, thp_nr_pages(page)); 214902aaed0SHisashi Hifumi } 215902aaed0SHisashi Hifumi } 2163dd7ae8eSShaohua Li 217d479960eSMinchan Kim /* return true if pagevec needs to drain */ 218d479960eSMinchan Kim static bool pagevec_add_and_need_flush(struct pagevec *pvec, struct page *page) 219d479960eSMinchan Kim { 220d479960eSMinchan Kim bool ret = false; 221d479960eSMinchan Kim 222d479960eSMinchan Kim if (!pagevec_add(pvec, page) || PageCompound(page) || 223d479960eSMinchan Kim lru_cache_disabled()) 224d479960eSMinchan Kim ret = true; 225d479960eSMinchan Kim 226d479960eSMinchan Kim return ret; 227d479960eSMinchan Kim } 228d479960eSMinchan Kim 2293dd7ae8eSShaohua Li /* 2301da177e4SLinus Torvalds * Writeback is about to end against a page which has been marked for immediate 2311da177e4SLinus Torvalds * reclaim. If it still appears to be reclaimable, move it to the tail of the 232902aaed0SHisashi Hifumi * inactive list. 233c7c7b80cSAlex Shi * 234c7c7b80cSAlex Shi * rotate_reclaimable_page() must disable IRQs, to prevent nasty races. 2351da177e4SLinus Torvalds */ 236ac6aadb2SMiklos Szeredi void rotate_reclaimable_page(struct page *page) 2371da177e4SLinus Torvalds { 238c55e8d03SJohannes Weiner if (!PageLocked(page) && !PageDirty(page) && 239894bc310SLee Schermerhorn !PageUnevictable(page) && PageLRU(page)) { 240902aaed0SHisashi Hifumi struct pagevec *pvec; 2411da177e4SLinus Torvalds unsigned long flags; 2421da177e4SLinus Torvalds 24309cbfeafSKirill A. Shutemov get_page(page); 244b01b2141SIngo Molnar local_lock_irqsave(&lru_rotate.lock, flags); 245b01b2141SIngo Molnar pvec = this_cpu_ptr(&lru_rotate.pvec); 246d479960eSMinchan Kim if (pagevec_add_and_need_flush(pvec, page)) 247c7c7b80cSAlex Shi pagevec_lru_move_fn(pvec, pagevec_move_tail_fn); 248b01b2141SIngo Molnar local_unlock_irqrestore(&lru_rotate.lock, flags); 249ac6aadb2SMiklos Szeredi } 2501da177e4SLinus Torvalds } 2511da177e4SLinus Torvalds 25296f8bf4fSJohannes Weiner void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages) 2533e2f41f1SKOSAKI Motohiro { 2547cf111bcSJohannes Weiner do { 2557cf111bcSJohannes Weiner unsigned long lrusize; 2567cf111bcSJohannes Weiner 2576168d0daSAlex Shi /* 2586168d0daSAlex Shi * Hold lruvec->lru_lock is safe here, since 2596168d0daSAlex Shi * 1) The pinned lruvec in reclaim, or 2606168d0daSAlex Shi * 2) From a pre-LRU page during refault (which also holds the 2616168d0daSAlex Shi * rcu lock, so would be safe even if the page was on the LRU 2626168d0daSAlex Shi * and could move simultaneously to a new lruvec). 2636168d0daSAlex Shi */ 2646168d0daSAlex Shi spin_lock_irq(&lruvec->lru_lock); 2657cf111bcSJohannes Weiner /* Record cost event */ 26696f8bf4fSJohannes Weiner if (file) 26796f8bf4fSJohannes Weiner lruvec->file_cost += nr_pages; 2681431d4d1SJohannes Weiner else 26996f8bf4fSJohannes Weiner lruvec->anon_cost += nr_pages; 2707cf111bcSJohannes Weiner 2717cf111bcSJohannes Weiner /* 2727cf111bcSJohannes Weiner * Decay previous events 2737cf111bcSJohannes Weiner * 2747cf111bcSJohannes Weiner * Because workloads change over time (and to avoid 2757cf111bcSJohannes Weiner * overflow) we keep these statistics as a floating 2767cf111bcSJohannes Weiner * average, which ends up weighing recent refaults 2777cf111bcSJohannes Weiner * more than old ones. 2787cf111bcSJohannes Weiner */ 2797cf111bcSJohannes Weiner lrusize = lruvec_page_state(lruvec, NR_INACTIVE_ANON) + 2807cf111bcSJohannes Weiner lruvec_page_state(lruvec, NR_ACTIVE_ANON) + 2817cf111bcSJohannes Weiner lruvec_page_state(lruvec, NR_INACTIVE_FILE) + 2827cf111bcSJohannes Weiner lruvec_page_state(lruvec, NR_ACTIVE_FILE); 2837cf111bcSJohannes Weiner 2847cf111bcSJohannes Weiner if (lruvec->file_cost + lruvec->anon_cost > lrusize / 4) { 2857cf111bcSJohannes Weiner lruvec->file_cost /= 2; 2867cf111bcSJohannes Weiner lruvec->anon_cost /= 2; 2877cf111bcSJohannes Weiner } 2886168d0daSAlex Shi spin_unlock_irq(&lruvec->lru_lock); 2897cf111bcSJohannes Weiner } while ((lruvec = parent_lruvec(lruvec))); 2903e2f41f1SKOSAKI Motohiro } 2913e2f41f1SKOSAKI Motohiro 29296f8bf4fSJohannes Weiner void lru_note_cost_page(struct page *page) 29396f8bf4fSJohannes Weiner { 294a984226fSMuchun Song lru_note_cost(mem_cgroup_page_lruvec(page), 2956c357848SMatthew Wilcox (Oracle) page_is_file_lru(page), thp_nr_pages(page)); 29696f8bf4fSJohannes Weiner } 29796f8bf4fSJohannes Weiner 298c7c7b80cSAlex Shi static void __activate_page(struct page *page, struct lruvec *lruvec) 299744ed144SShaohua Li { 300fc574c23SAlex Shi if (!PageActive(page) && !PageUnevictable(page)) { 3016c357848SMatthew Wilcox (Oracle) int nr_pages = thp_nr_pages(page); 302744ed144SShaohua Li 30346ae6b2cSYu Zhao del_page_from_lru_list(page, lruvec); 304744ed144SShaohua Li SetPageActive(page); 3053a9c9788SYu Zhao add_page_to_lru_list(page, lruvec); 30624b7e581SMel Gorman trace_mm_lru_activate(page); 3077a608572SLinus Torvalds 30821e330fcSShakeel Butt __count_vm_events(PGACTIVATE, nr_pages); 30921e330fcSShakeel Butt __count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE, 31021e330fcSShakeel Butt nr_pages); 311744ed144SShaohua Li } 312eb709b0dSShaohua Li } 313eb709b0dSShaohua Li 314eb709b0dSShaohua Li #ifdef CONFIG_SMP 315eb709b0dSShaohua Li static void activate_page_drain(int cpu) 316eb709b0dSShaohua Li { 317b01b2141SIngo Molnar struct pagevec *pvec = &per_cpu(lru_pvecs.activate_page, cpu); 318eb709b0dSShaohua Li 319eb709b0dSShaohua Li if (pagevec_count(pvec)) 320c7c7b80cSAlex Shi pagevec_lru_move_fn(pvec, __activate_page); 321eb709b0dSShaohua Li } 322eb709b0dSShaohua Li 3235fbc4616SChris Metcalf static bool need_activate_page_drain(int cpu) 3245fbc4616SChris Metcalf { 325b01b2141SIngo Molnar return pagevec_count(&per_cpu(lru_pvecs.activate_page, cpu)) != 0; 3265fbc4616SChris Metcalf } 3275fbc4616SChris Metcalf 328cc2828b2SYu Zhao static void activate_page(struct page *page) 329eb709b0dSShaohua Li { 330800d8c63SKirill A. Shutemov page = compound_head(page); 331eb709b0dSShaohua Li if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 332b01b2141SIngo Molnar struct pagevec *pvec; 333eb709b0dSShaohua Li 334b01b2141SIngo Molnar local_lock(&lru_pvecs.lock); 335b01b2141SIngo Molnar pvec = this_cpu_ptr(&lru_pvecs.activate_page); 33609cbfeafSKirill A. Shutemov get_page(page); 337d479960eSMinchan Kim if (pagevec_add_and_need_flush(pvec, page)) 338c7c7b80cSAlex Shi pagevec_lru_move_fn(pvec, __activate_page); 339b01b2141SIngo Molnar local_unlock(&lru_pvecs.lock); 340eb709b0dSShaohua Li } 341eb709b0dSShaohua Li } 342eb709b0dSShaohua Li 343eb709b0dSShaohua Li #else 344eb709b0dSShaohua Li static inline void activate_page_drain(int cpu) 345eb709b0dSShaohua Li { 346eb709b0dSShaohua Li } 347eb709b0dSShaohua Li 348cc2828b2SYu Zhao static void activate_page(struct page *page) 349eb709b0dSShaohua Li { 3506168d0daSAlex Shi struct lruvec *lruvec; 351eb709b0dSShaohua Li 352800d8c63SKirill A. Shutemov page = compound_head(page); 3536168d0daSAlex Shi if (TestClearPageLRU(page)) { 3546168d0daSAlex Shi lruvec = lock_page_lruvec_irq(page); 3556168d0daSAlex Shi __activate_page(page, lruvec); 3566168d0daSAlex Shi unlock_page_lruvec_irq(lruvec); 3576168d0daSAlex Shi SetPageLRU(page); 3586168d0daSAlex Shi } 3591da177e4SLinus Torvalds } 360eb709b0dSShaohua Li #endif 3611da177e4SLinus Torvalds 362059285a2SMel Gorman static void __lru_cache_activate_page(struct page *page) 363059285a2SMel Gorman { 364b01b2141SIngo Molnar struct pagevec *pvec; 365059285a2SMel Gorman int i; 366059285a2SMel Gorman 367b01b2141SIngo Molnar local_lock(&lru_pvecs.lock); 368b01b2141SIngo Molnar pvec = this_cpu_ptr(&lru_pvecs.lru_add); 369b01b2141SIngo Molnar 370059285a2SMel Gorman /* 371059285a2SMel Gorman * Search backwards on the optimistic assumption that the page being 372059285a2SMel Gorman * activated has just been added to this pagevec. Note that only 373059285a2SMel Gorman * the local pagevec is examined as a !PageLRU page could be in the 374059285a2SMel Gorman * process of being released, reclaimed, migrated or on a remote 375059285a2SMel Gorman * pagevec that is currently being drained. Furthermore, marking 376059285a2SMel Gorman * a remote pagevec's page PageActive potentially hits a race where 377059285a2SMel Gorman * a page is marked PageActive just after it is added to the inactive 378059285a2SMel Gorman * list causing accounting errors and BUG_ON checks to trigger. 379059285a2SMel Gorman */ 380059285a2SMel Gorman for (i = pagevec_count(pvec) - 1; i >= 0; i--) { 381059285a2SMel Gorman struct page *pagevec_page = pvec->pages[i]; 382059285a2SMel Gorman 383059285a2SMel Gorman if (pagevec_page == page) { 384059285a2SMel Gorman SetPageActive(page); 385059285a2SMel Gorman break; 386059285a2SMel Gorman } 387059285a2SMel Gorman } 388059285a2SMel Gorman 389b01b2141SIngo Molnar local_unlock(&lru_pvecs.lock); 390059285a2SMel Gorman } 391059285a2SMel Gorman 3921da177e4SLinus Torvalds /* 3931da177e4SLinus Torvalds * Mark a page as having seen activity. 3941da177e4SLinus Torvalds * 3951da177e4SLinus Torvalds * inactive,unreferenced -> inactive,referenced 3961da177e4SLinus Torvalds * inactive,referenced -> active,unreferenced 3971da177e4SLinus Torvalds * active,unreferenced -> active,referenced 398eb39d618SHugh Dickins * 399eb39d618SHugh Dickins * When a newly allocated page is not yet visible, so safe for non-atomic ops, 400eb39d618SHugh Dickins * __SetPageReferenced(page) may be substituted for mark_page_accessed(page). 4011da177e4SLinus Torvalds */ 402920c7a5dSHarvey Harrison void mark_page_accessed(struct page *page) 4031da177e4SLinus Torvalds { 404e90309c9SKirill A. Shutemov page = compound_head(page); 405059285a2SMel Gorman 406a1100a74SFengguang Wu if (!PageReferenced(page)) { 407a1100a74SFengguang Wu SetPageReferenced(page); 408a1100a74SFengguang Wu } else if (PageUnevictable(page)) { 409a1100a74SFengguang Wu /* 410a1100a74SFengguang Wu * Unevictable pages are on the "LRU_UNEVICTABLE" list. But, 411a1100a74SFengguang Wu * this list is never rotated or maintained, so marking an 412a1100a74SFengguang Wu * evictable page accessed has no effect. 413a1100a74SFengguang Wu */ 414a1100a74SFengguang Wu } else if (!PageActive(page)) { 415059285a2SMel Gorman /* 416059285a2SMel Gorman * If the page is on the LRU, queue it for activation via 417b01b2141SIngo Molnar * lru_pvecs.activate_page. Otherwise, assume the page is on a 418059285a2SMel Gorman * pagevec, mark it active and it'll be moved to the active 419059285a2SMel Gorman * LRU on the next drain. 420059285a2SMel Gorman */ 421059285a2SMel Gorman if (PageLRU(page)) 4221da177e4SLinus Torvalds activate_page(page); 423059285a2SMel Gorman else 424059285a2SMel Gorman __lru_cache_activate_page(page); 4251da177e4SLinus Torvalds ClearPageReferenced(page); 426a528910eSJohannes Weiner workingset_activation(page); 4271da177e4SLinus Torvalds } 42833c3fc71SVladimir Davydov if (page_is_idle(page)) 42933c3fc71SVladimir Davydov clear_page_idle(page); 4301da177e4SLinus Torvalds } 4311da177e4SLinus Torvalds EXPORT_SYMBOL(mark_page_accessed); 4321da177e4SLinus Torvalds 433f04e9ebbSKOSAKI Motohiro /** 434c53954a0SMel Gorman * lru_cache_add - add a page to a page list 435f04e9ebbSKOSAKI Motohiro * @page: the page to be added to the LRU. 4362329d375SJianyu Zhan * 4372329d375SJianyu Zhan * Queue the page for addition to the LRU via pagevec. The decision on whether 4382329d375SJianyu Zhan * to add the page to the [in]active [file|anon] list is deferred until the 4392329d375SJianyu Zhan * pagevec is drained. This gives a chance for the caller of lru_cache_add() 4402329d375SJianyu Zhan * have the page added to the active list using mark_page_accessed(). 441f04e9ebbSKOSAKI Motohiro */ 442c53954a0SMel Gorman void lru_cache_add(struct page *page) 4431da177e4SLinus Torvalds { 4446058eaecSJohannes Weiner struct pagevec *pvec; 4456058eaecSJohannes Weiner 446309381feSSasha Levin VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page); 447309381feSSasha Levin VM_BUG_ON_PAGE(PageLRU(page), page); 4486058eaecSJohannes Weiner 4496058eaecSJohannes Weiner get_page(page); 4506058eaecSJohannes Weiner local_lock(&lru_pvecs.lock); 4516058eaecSJohannes Weiner pvec = this_cpu_ptr(&lru_pvecs.lru_add); 452d479960eSMinchan Kim if (pagevec_add_and_need_flush(pvec, page)) 4536058eaecSJohannes Weiner __pagevec_lru_add(pvec); 4546058eaecSJohannes Weiner local_unlock(&lru_pvecs.lock); 4551da177e4SLinus Torvalds } 4566058eaecSJohannes Weiner EXPORT_SYMBOL(lru_cache_add); 4571da177e4SLinus Torvalds 458894bc310SLee Schermerhorn /** 459b518154eSJoonsoo Kim * lru_cache_add_inactive_or_unevictable 46000501b53SJohannes Weiner * @page: the page to be added to LRU 46100501b53SJohannes Weiner * @vma: vma in which page is mapped for determining reclaimability 46200501b53SJohannes Weiner * 463b518154eSJoonsoo Kim * Place @page on the inactive or unevictable LRU list, depending on its 46412eab428SMiaohe Lin * evictability. 46500501b53SJohannes Weiner */ 466b518154eSJoonsoo Kim void lru_cache_add_inactive_or_unevictable(struct page *page, 46700501b53SJohannes Weiner struct vm_area_struct *vma) 46800501b53SJohannes Weiner { 469b518154eSJoonsoo Kim bool unevictable; 470b518154eSJoonsoo Kim 47100501b53SJohannes Weiner VM_BUG_ON_PAGE(PageLRU(page), page); 47200501b53SJohannes Weiner 473b518154eSJoonsoo Kim unevictable = (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED; 474b518154eSJoonsoo Kim if (unlikely(unevictable) && !TestSetPageMlocked(page)) { 4750964730bSHugh Dickins int nr_pages = thp_nr_pages(page); 47600501b53SJohannes Weiner /* 477cb152a1aSShijie Luo * We use the irq-unsafe __mod_zone_page_state because this 47800501b53SJohannes Weiner * counter is not modified from interrupt context, and the pte 47900501b53SJohannes Weiner * lock is held(spinlock), which implies preemption disabled. 48000501b53SJohannes Weiner */ 4810964730bSHugh Dickins __mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages); 4820964730bSHugh Dickins count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages); 48300501b53SJohannes Weiner } 4849c4e6b1aSShakeel Butt lru_cache_add(page); 48500501b53SJohannes Weiner } 48600501b53SJohannes Weiner 487902aaed0SHisashi Hifumi /* 48831560180SMinchan Kim * If the page can not be invalidated, it is moved to the 48931560180SMinchan Kim * inactive list to speed up its reclaim. It is moved to the 49031560180SMinchan Kim * head of the list, rather than the tail, to give the flusher 49131560180SMinchan Kim * threads some time to write it out, as this is much more 49231560180SMinchan Kim * effective than the single-page writeout from reclaim. 493278df9f4SMinchan Kim * 494278df9f4SMinchan Kim * If the page isn't page_mapped and dirty/writeback, the page 495278df9f4SMinchan Kim * could reclaim asap using PG_reclaim. 496278df9f4SMinchan Kim * 497278df9f4SMinchan Kim * 1. active, mapped page -> none 498278df9f4SMinchan Kim * 2. active, dirty/writeback page -> inactive, head, PG_reclaim 499278df9f4SMinchan Kim * 3. inactive, mapped page -> none 500278df9f4SMinchan Kim * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim 501278df9f4SMinchan Kim * 5. inactive, clean -> inactive, tail 502278df9f4SMinchan Kim * 6. Others -> none 503278df9f4SMinchan Kim * 504278df9f4SMinchan Kim * In 4, why it moves inactive's head, the VM expects the page would 505278df9f4SMinchan Kim * be write it out by flusher threads as this is much more effective 506278df9f4SMinchan Kim * than the single-page writeout from reclaim. 50731560180SMinchan Kim */ 508c7c7b80cSAlex Shi static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec) 50931560180SMinchan Kim { 51046ae6b2cSYu Zhao bool active = PageActive(page); 5116c357848SMatthew Wilcox (Oracle) int nr_pages = thp_nr_pages(page); 51231560180SMinchan Kim 513bad49d9cSMinchan Kim if (PageUnevictable(page)) 514bad49d9cSMinchan Kim return; 515bad49d9cSMinchan Kim 51631560180SMinchan Kim /* Some processes are using the page */ 51731560180SMinchan Kim if (page_mapped(page)) 51831560180SMinchan Kim return; 51931560180SMinchan Kim 52046ae6b2cSYu Zhao del_page_from_lru_list(page, lruvec); 52131560180SMinchan Kim ClearPageActive(page); 52231560180SMinchan Kim ClearPageReferenced(page); 52331560180SMinchan Kim 524278df9f4SMinchan Kim if (PageWriteback(page) || PageDirty(page)) { 525278df9f4SMinchan Kim /* 526278df9f4SMinchan Kim * PG_reclaim could be raced with end_page_writeback 527278df9f4SMinchan Kim * It can make readahead confusing. But race window 528278df9f4SMinchan Kim * is _really_ small and it's non-critical problem. 529278df9f4SMinchan Kim */ 5303a9c9788SYu Zhao add_page_to_lru_list(page, lruvec); 531278df9f4SMinchan Kim SetPageReclaim(page); 532278df9f4SMinchan Kim } else { 533278df9f4SMinchan Kim /* 534278df9f4SMinchan Kim * The page's writeback ends up during pagevec 535c4ffefd1SHyeonggon Yoo * We move that page into tail of inactive. 536278df9f4SMinchan Kim */ 5373a9c9788SYu Zhao add_page_to_lru_list_tail(page, lruvec); 5385d91f31fSShakeel Butt __count_vm_events(PGROTATED, nr_pages); 539278df9f4SMinchan Kim } 540278df9f4SMinchan Kim 54121e330fcSShakeel Butt if (active) { 5425d91f31fSShakeel Butt __count_vm_events(PGDEACTIVATE, nr_pages); 54321e330fcSShakeel Butt __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, 54421e330fcSShakeel Butt nr_pages); 54521e330fcSShakeel Butt } 54631560180SMinchan Kim } 54731560180SMinchan Kim 548c7c7b80cSAlex Shi static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec) 5499c276cc6SMinchan Kim { 550fc574c23SAlex Shi if (PageActive(page) && !PageUnevictable(page)) { 5516c357848SMatthew Wilcox (Oracle) int nr_pages = thp_nr_pages(page); 5529c276cc6SMinchan Kim 55346ae6b2cSYu Zhao del_page_from_lru_list(page, lruvec); 5549c276cc6SMinchan Kim ClearPageActive(page); 5559c276cc6SMinchan Kim ClearPageReferenced(page); 5563a9c9788SYu Zhao add_page_to_lru_list(page, lruvec); 5579c276cc6SMinchan Kim 55821e330fcSShakeel Butt __count_vm_events(PGDEACTIVATE, nr_pages); 55921e330fcSShakeel Butt __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, 56021e330fcSShakeel Butt nr_pages); 5619c276cc6SMinchan Kim } 5629c276cc6SMinchan Kim } 56310853a03SMinchan Kim 564c7c7b80cSAlex Shi static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec) 56510853a03SMinchan Kim { 566fc574c23SAlex Shi if (PageAnon(page) && PageSwapBacked(page) && 56724c92eb7SShaohua Li !PageSwapCache(page) && !PageUnevictable(page)) { 5686c357848SMatthew Wilcox (Oracle) int nr_pages = thp_nr_pages(page); 56910853a03SMinchan Kim 57046ae6b2cSYu Zhao del_page_from_lru_list(page, lruvec); 57110853a03SMinchan Kim ClearPageActive(page); 57210853a03SMinchan Kim ClearPageReferenced(page); 573f7ad2a6cSShaohua Li /* 5749de4f22aSHuang Ying * Lazyfree pages are clean anonymous pages. They have 5759de4f22aSHuang Ying * PG_swapbacked flag cleared, to distinguish them from normal 5769de4f22aSHuang Ying * anonymous pages 577f7ad2a6cSShaohua Li */ 578f7ad2a6cSShaohua Li ClearPageSwapBacked(page); 5793a9c9788SYu Zhao add_page_to_lru_list(page, lruvec); 58010853a03SMinchan Kim 58121e330fcSShakeel Butt __count_vm_events(PGLAZYFREE, nr_pages); 58221e330fcSShakeel Butt __count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE, 58321e330fcSShakeel Butt nr_pages); 58410853a03SMinchan Kim } 58510853a03SMinchan Kim } 58610853a03SMinchan Kim 58731560180SMinchan Kim /* 588902aaed0SHisashi Hifumi * Drain pages out of the cpu's pagevecs. 589902aaed0SHisashi Hifumi * Either "cpu" is the current CPU, and preemption has already been 590902aaed0SHisashi Hifumi * disabled; or "cpu" is being hot-unplugged, and is already dead. 591902aaed0SHisashi Hifumi */ 592f0cb3c76SKonstantin Khlebnikov void lru_add_drain_cpu(int cpu) 5931da177e4SLinus Torvalds { 594b01b2141SIngo Molnar struct pagevec *pvec = &per_cpu(lru_pvecs.lru_add, cpu); 5951da177e4SLinus Torvalds 5961da177e4SLinus Torvalds if (pagevec_count(pvec)) 597a0b8cab3SMel Gorman __pagevec_lru_add(pvec); 598902aaed0SHisashi Hifumi 599b01b2141SIngo Molnar pvec = &per_cpu(lru_rotate.pvec, cpu); 6007e0cc01eSQian Cai /* Disabling interrupts below acts as a compiler barrier. */ 6017e0cc01eSQian Cai if (data_race(pagevec_count(pvec))) { 602902aaed0SHisashi Hifumi unsigned long flags; 603902aaed0SHisashi Hifumi 604902aaed0SHisashi Hifumi /* No harm done if a racing interrupt already did this */ 605b01b2141SIngo Molnar local_lock_irqsave(&lru_rotate.lock, flags); 606c7c7b80cSAlex Shi pagevec_lru_move_fn(pvec, pagevec_move_tail_fn); 607b01b2141SIngo Molnar local_unlock_irqrestore(&lru_rotate.lock, flags); 608902aaed0SHisashi Hifumi } 60931560180SMinchan Kim 610b01b2141SIngo Molnar pvec = &per_cpu(lru_pvecs.lru_deactivate_file, cpu); 61131560180SMinchan Kim if (pagevec_count(pvec)) 612c7c7b80cSAlex Shi pagevec_lru_move_fn(pvec, lru_deactivate_file_fn); 613eb709b0dSShaohua Li 614b01b2141SIngo Molnar pvec = &per_cpu(lru_pvecs.lru_deactivate, cpu); 6159c276cc6SMinchan Kim if (pagevec_count(pvec)) 616c7c7b80cSAlex Shi pagevec_lru_move_fn(pvec, lru_deactivate_fn); 6179c276cc6SMinchan Kim 618b01b2141SIngo Molnar pvec = &per_cpu(lru_pvecs.lru_lazyfree, cpu); 61910853a03SMinchan Kim if (pagevec_count(pvec)) 620c7c7b80cSAlex Shi pagevec_lru_move_fn(pvec, lru_lazyfree_fn); 62110853a03SMinchan Kim 622eb709b0dSShaohua Li activate_page_drain(cpu); 62331560180SMinchan Kim } 62431560180SMinchan Kim 62531560180SMinchan Kim /** 626cc5993bdSMinchan Kim * deactivate_file_page - forcefully deactivate a file page 62731560180SMinchan Kim * @page: page to deactivate 62831560180SMinchan Kim * 62931560180SMinchan Kim * This function hints the VM that @page is a good reclaim candidate, 63031560180SMinchan Kim * for example if its invalidation fails due to the page being dirty 63131560180SMinchan Kim * or under writeback. 63231560180SMinchan Kim */ 633cc5993bdSMinchan Kim void deactivate_file_page(struct page *page) 63431560180SMinchan Kim { 635821ed6bbSMinchan Kim /* 636cc5993bdSMinchan Kim * In a workload with many unevictable page such as mprotect, 637cc5993bdSMinchan Kim * unevictable page deactivation for accelerating reclaim is pointless. 638821ed6bbSMinchan Kim */ 639821ed6bbSMinchan Kim if (PageUnevictable(page)) 640821ed6bbSMinchan Kim return; 641821ed6bbSMinchan Kim 64231560180SMinchan Kim if (likely(get_page_unless_zero(page))) { 643b01b2141SIngo Molnar struct pagevec *pvec; 644b01b2141SIngo Molnar 645b01b2141SIngo Molnar local_lock(&lru_pvecs.lock); 646b01b2141SIngo Molnar pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate_file); 64731560180SMinchan Kim 648d479960eSMinchan Kim if (pagevec_add_and_need_flush(pvec, page)) 649c7c7b80cSAlex Shi pagevec_lru_move_fn(pvec, lru_deactivate_file_fn); 650b01b2141SIngo Molnar local_unlock(&lru_pvecs.lock); 65131560180SMinchan Kim } 65280bfed90SAndrew Morton } 65380bfed90SAndrew Morton 6549c276cc6SMinchan Kim /* 6559c276cc6SMinchan Kim * deactivate_page - deactivate a page 6569c276cc6SMinchan Kim * @page: page to deactivate 6579c276cc6SMinchan Kim * 6589c276cc6SMinchan Kim * deactivate_page() moves @page to the inactive list if @page was on the active 6599c276cc6SMinchan Kim * list and was not an unevictable page. This is done to accelerate the reclaim 6609c276cc6SMinchan Kim * of @page. 6619c276cc6SMinchan Kim */ 6629c276cc6SMinchan Kim void deactivate_page(struct page *page) 6639c276cc6SMinchan Kim { 6649c276cc6SMinchan Kim if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { 665b01b2141SIngo Molnar struct pagevec *pvec; 6669c276cc6SMinchan Kim 667b01b2141SIngo Molnar local_lock(&lru_pvecs.lock); 668b01b2141SIngo Molnar pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate); 6699c276cc6SMinchan Kim get_page(page); 670d479960eSMinchan Kim if (pagevec_add_and_need_flush(pvec, page)) 671c7c7b80cSAlex Shi pagevec_lru_move_fn(pvec, lru_deactivate_fn); 672b01b2141SIngo Molnar local_unlock(&lru_pvecs.lock); 6739c276cc6SMinchan Kim } 6749c276cc6SMinchan Kim } 6759c276cc6SMinchan Kim 67610853a03SMinchan Kim /** 677f7ad2a6cSShaohua Li * mark_page_lazyfree - make an anon page lazyfree 67810853a03SMinchan Kim * @page: page to deactivate 67910853a03SMinchan Kim * 680f7ad2a6cSShaohua Li * mark_page_lazyfree() moves @page to the inactive file list. 681f7ad2a6cSShaohua Li * This is done to accelerate the reclaim of @page. 68210853a03SMinchan Kim */ 683f7ad2a6cSShaohua Li void mark_page_lazyfree(struct page *page) 68410853a03SMinchan Kim { 685f7ad2a6cSShaohua Li if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && 68624c92eb7SShaohua Li !PageSwapCache(page) && !PageUnevictable(page)) { 687b01b2141SIngo Molnar struct pagevec *pvec; 68810853a03SMinchan Kim 689b01b2141SIngo Molnar local_lock(&lru_pvecs.lock); 690b01b2141SIngo Molnar pvec = this_cpu_ptr(&lru_pvecs.lru_lazyfree); 69109cbfeafSKirill A. Shutemov get_page(page); 692d479960eSMinchan Kim if (pagevec_add_and_need_flush(pvec, page)) 693c7c7b80cSAlex Shi pagevec_lru_move_fn(pvec, lru_lazyfree_fn); 694b01b2141SIngo Molnar local_unlock(&lru_pvecs.lock); 69510853a03SMinchan Kim } 69610853a03SMinchan Kim } 69710853a03SMinchan Kim 69880bfed90SAndrew Morton void lru_add_drain(void) 69980bfed90SAndrew Morton { 700b01b2141SIngo Molnar local_lock(&lru_pvecs.lock); 701b01b2141SIngo Molnar lru_add_drain_cpu(smp_processor_id()); 702b01b2141SIngo Molnar local_unlock(&lru_pvecs.lock); 703b01b2141SIngo Molnar } 704b01b2141SIngo Molnar 705*243418e3SMinchan Kim /* 706*243418e3SMinchan Kim * It's called from per-cpu workqueue context in SMP case so 707*243418e3SMinchan Kim * lru_add_drain_cpu and invalidate_bh_lrus_cpu should run on 708*243418e3SMinchan Kim * the same cpu. It shouldn't be a problem in !SMP case since 709*243418e3SMinchan Kim * the core is only one and the locks will disable preemption. 710*243418e3SMinchan Kim */ 711*243418e3SMinchan Kim static void lru_add_and_bh_lrus_drain(void) 712*243418e3SMinchan Kim { 713*243418e3SMinchan Kim local_lock(&lru_pvecs.lock); 714*243418e3SMinchan Kim lru_add_drain_cpu(smp_processor_id()); 715*243418e3SMinchan Kim local_unlock(&lru_pvecs.lock); 716*243418e3SMinchan Kim invalidate_bh_lrus_cpu(); 717*243418e3SMinchan Kim } 718*243418e3SMinchan Kim 719b01b2141SIngo Molnar void lru_add_drain_cpu_zone(struct zone *zone) 720b01b2141SIngo Molnar { 721b01b2141SIngo Molnar local_lock(&lru_pvecs.lock); 722b01b2141SIngo Molnar lru_add_drain_cpu(smp_processor_id()); 723b01b2141SIngo Molnar drain_local_pages(zone); 724b01b2141SIngo Molnar local_unlock(&lru_pvecs.lock); 7251da177e4SLinus Torvalds } 7261da177e4SLinus Torvalds 7276ea183d6SMichal Hocko #ifdef CONFIG_SMP 7286ea183d6SMichal Hocko 7296ea183d6SMichal Hocko static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); 7306ea183d6SMichal Hocko 731c4028958SDavid Howells static void lru_add_drain_per_cpu(struct work_struct *dummy) 732053837fcSNick Piggin { 733*243418e3SMinchan Kim lru_add_and_bh_lrus_drain(); 734053837fcSNick Piggin } 735053837fcSNick Piggin 7369852a721SMichal Hocko /* 7379852a721SMichal Hocko * Doesn't need any cpu hotplug locking because we do rely on per-cpu 7389852a721SMichal Hocko * kworkers being shut down before our page_alloc_cpu_dead callback is 7399852a721SMichal Hocko * executed on the offlined cpu. 7409852a721SMichal Hocko * Calling this function with cpu hotplug locks held can actually lead 7419852a721SMichal Hocko * to obscure indirect dependencies via WQ context. 7429852a721SMichal Hocko */ 743d479960eSMinchan Kim inline void __lru_add_drain_all(bool force_all_cpus) 744053837fcSNick Piggin { 7456446a513SAhmed S. Darwish /* 7466446a513SAhmed S. Darwish * lru_drain_gen - Global pages generation number 7476446a513SAhmed S. Darwish * 7486446a513SAhmed S. Darwish * (A) Definition: global lru_drain_gen = x implies that all generations 7496446a513SAhmed S. Darwish * 0 < n <= x are already *scheduled* for draining. 7506446a513SAhmed S. Darwish * 7516446a513SAhmed S. Darwish * This is an optimization for the highly-contended use case where a 7526446a513SAhmed S. Darwish * user space workload keeps constantly generating a flow of pages for 7536446a513SAhmed S. Darwish * each CPU. 7546446a513SAhmed S. Darwish */ 7556446a513SAhmed S. Darwish static unsigned int lru_drain_gen; 7565fbc4616SChris Metcalf static struct cpumask has_work; 7576446a513SAhmed S. Darwish static DEFINE_MUTEX(lock); 7586446a513SAhmed S. Darwish unsigned cpu, this_gen; 7595fbc4616SChris Metcalf 760ce612879SMichal Hocko /* 761ce612879SMichal Hocko * Make sure nobody triggers this path before mm_percpu_wq is fully 762ce612879SMichal Hocko * initialized. 763ce612879SMichal Hocko */ 764ce612879SMichal Hocko if (WARN_ON(!mm_percpu_wq)) 765ce612879SMichal Hocko return; 766ce612879SMichal Hocko 7676446a513SAhmed S. Darwish /* 7686446a513SAhmed S. Darwish * Guarantee pagevec counter stores visible by this CPU are visible to 7696446a513SAhmed S. Darwish * other CPUs before loading the current drain generation. 7706446a513SAhmed S. Darwish */ 7716446a513SAhmed S. Darwish smp_mb(); 7726446a513SAhmed S. Darwish 7736446a513SAhmed S. Darwish /* 7746446a513SAhmed S. Darwish * (B) Locally cache global LRU draining generation number 7756446a513SAhmed S. Darwish * 7766446a513SAhmed S. Darwish * The read barrier ensures that the counter is loaded before the mutex 7776446a513SAhmed S. Darwish * is taken. It pairs with smp_mb() inside the mutex critical section 7786446a513SAhmed S. Darwish * at (D). 7796446a513SAhmed S. Darwish */ 7806446a513SAhmed S. Darwish this_gen = smp_load_acquire(&lru_drain_gen); 781eef1a429SKonstantin Khlebnikov 7825fbc4616SChris Metcalf mutex_lock(&lock); 783eef1a429SKonstantin Khlebnikov 784eef1a429SKonstantin Khlebnikov /* 7856446a513SAhmed S. Darwish * (C) Exit the draining operation if a newer generation, from another 7866446a513SAhmed S. Darwish * lru_add_drain_all(), was already scheduled for draining. Check (A). 787eef1a429SKonstantin Khlebnikov */ 788d479960eSMinchan Kim if (unlikely(this_gen != lru_drain_gen && !force_all_cpus)) 789eef1a429SKonstantin Khlebnikov goto done; 790eef1a429SKonstantin Khlebnikov 7916446a513SAhmed S. Darwish /* 7926446a513SAhmed S. Darwish * (D) Increment global generation number 7936446a513SAhmed S. Darwish * 7946446a513SAhmed S. Darwish * Pairs with smp_load_acquire() at (B), outside of the critical 7956446a513SAhmed S. Darwish * section. Use a full memory barrier to guarantee that the new global 7966446a513SAhmed S. Darwish * drain generation number is stored before loading pagevec counters. 7976446a513SAhmed S. Darwish * 7986446a513SAhmed S. Darwish * This pairing must be done here, before the for_each_online_cpu loop 7996446a513SAhmed S. Darwish * below which drains the page vectors. 8006446a513SAhmed S. Darwish * 8016446a513SAhmed S. Darwish * Let x, y, and z represent some system CPU numbers, where x < y < z. 802cb152a1aSShijie Luo * Assume CPU #z is in the middle of the for_each_online_cpu loop 8036446a513SAhmed S. Darwish * below and has already reached CPU #y's per-cpu data. CPU #x comes 8046446a513SAhmed S. Darwish * along, adds some pages to its per-cpu vectors, then calls 8056446a513SAhmed S. Darwish * lru_add_drain_all(). 8066446a513SAhmed S. Darwish * 8076446a513SAhmed S. Darwish * If the paired barrier is done at any later step, e.g. after the 8086446a513SAhmed S. Darwish * loop, CPU #x will just exit at (C) and miss flushing out all of its 8096446a513SAhmed S. Darwish * added pages. 8106446a513SAhmed S. Darwish */ 8116446a513SAhmed S. Darwish WRITE_ONCE(lru_drain_gen, lru_drain_gen + 1); 8126446a513SAhmed S. Darwish smp_mb(); 813eef1a429SKonstantin Khlebnikov 8145fbc4616SChris Metcalf cpumask_clear(&has_work); 8155fbc4616SChris Metcalf for_each_online_cpu(cpu) { 8165fbc4616SChris Metcalf struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); 8175fbc4616SChris Metcalf 818d479960eSMinchan Kim if (force_all_cpus || 819d479960eSMinchan Kim pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) || 8207e0cc01eSQian Cai data_race(pagevec_count(&per_cpu(lru_rotate.pvec, cpu))) || 821b01b2141SIngo Molnar pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) || 822b01b2141SIngo Molnar pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) || 823b01b2141SIngo Molnar pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) || 8248cc621d2SMinchan Kim need_activate_page_drain(cpu) || 8258cc621d2SMinchan Kim has_bh_in_lru(cpu, NULL)) { 8265fbc4616SChris Metcalf INIT_WORK(work, lru_add_drain_per_cpu); 827ce612879SMichal Hocko queue_work_on(cpu, mm_percpu_wq, work); 8286446a513SAhmed S. Darwish __cpumask_set_cpu(cpu, &has_work); 8295fbc4616SChris Metcalf } 8305fbc4616SChris Metcalf } 8315fbc4616SChris Metcalf 8325fbc4616SChris Metcalf for_each_cpu(cpu, &has_work) 8335fbc4616SChris Metcalf flush_work(&per_cpu(lru_add_drain_work, cpu)); 8345fbc4616SChris Metcalf 835eef1a429SKonstantin Khlebnikov done: 8365fbc4616SChris Metcalf mutex_unlock(&lock); 837053837fcSNick Piggin } 838d479960eSMinchan Kim 839d479960eSMinchan Kim void lru_add_drain_all(void) 840d479960eSMinchan Kim { 841d479960eSMinchan Kim __lru_add_drain_all(false); 842d479960eSMinchan Kim } 8436ea183d6SMichal Hocko #else 8446ea183d6SMichal Hocko void lru_add_drain_all(void) 8456ea183d6SMichal Hocko { 8466ea183d6SMichal Hocko lru_add_drain(); 8476ea183d6SMichal Hocko } 8486446a513SAhmed S. Darwish #endif /* CONFIG_SMP */ 849053837fcSNick Piggin 850d479960eSMinchan Kim atomic_t lru_disable_count = ATOMIC_INIT(0); 851d479960eSMinchan Kim 852d479960eSMinchan Kim /* 853d479960eSMinchan Kim * lru_cache_disable() needs to be called before we start compiling 854d479960eSMinchan Kim * a list of pages to be migrated using isolate_lru_page(). 855d479960eSMinchan Kim * It drains pages on LRU cache and then disable on all cpus until 856d479960eSMinchan Kim * lru_cache_enable is called. 857d479960eSMinchan Kim * 858d479960eSMinchan Kim * Must be paired with a call to lru_cache_enable(). 859d479960eSMinchan Kim */ 860d479960eSMinchan Kim void lru_cache_disable(void) 861d479960eSMinchan Kim { 862d479960eSMinchan Kim atomic_inc(&lru_disable_count); 863d479960eSMinchan Kim #ifdef CONFIG_SMP 864d479960eSMinchan Kim /* 865d479960eSMinchan Kim * lru_add_drain_all in the force mode will schedule draining on 866d479960eSMinchan Kim * all online CPUs so any calls of lru_cache_disabled wrapped by 867d479960eSMinchan Kim * local_lock or preemption disabled would be ordered by that. 868d479960eSMinchan Kim * The atomic operation doesn't need to have stronger ordering 869d479960eSMinchan Kim * requirements because that is enforeced by the scheduling 870d479960eSMinchan Kim * guarantees. 871d479960eSMinchan Kim */ 872d479960eSMinchan Kim __lru_add_drain_all(true); 873d479960eSMinchan Kim #else 874*243418e3SMinchan Kim lru_add_and_bh_lrus_drain(); 875d479960eSMinchan Kim #endif 876d479960eSMinchan Kim } 877d479960eSMinchan Kim 878aabfb572SMichal Hocko /** 879ea1754a0SKirill A. Shutemov * release_pages - batched put_page() 880aabfb572SMichal Hocko * @pages: array of pages to release 881aabfb572SMichal Hocko * @nr: number of pages 8821da177e4SLinus Torvalds * 883aabfb572SMichal Hocko * Decrement the reference count on all the pages in @pages. If it 884aabfb572SMichal Hocko * fell to zero, remove the page from the LRU and free it. 8851da177e4SLinus Torvalds */ 886c6f92f9fSMel Gorman void release_pages(struct page **pages, int nr) 8871da177e4SLinus Torvalds { 8881da177e4SLinus Torvalds int i; 889cc59850eSKonstantin Khlebnikov LIST_HEAD(pages_to_free); 8906168d0daSAlex Shi struct lruvec *lruvec = NULL; 8913f649ab7SKees Cook unsigned long flags; 8923f649ab7SKees Cook unsigned int lock_batch; 8931da177e4SLinus Torvalds 8941da177e4SLinus Torvalds for (i = 0; i < nr; i++) { 8951da177e4SLinus Torvalds struct page *page = pages[i]; 8961da177e4SLinus Torvalds 897aabfb572SMichal Hocko /* 898aabfb572SMichal Hocko * Make sure the IRQ-safe lock-holding time does not get 899aabfb572SMichal Hocko * excessive with a continuous string of pages from the 9006168d0daSAlex Shi * same lruvec. The lock is held only if lruvec != NULL. 901aabfb572SMichal Hocko */ 9026168d0daSAlex Shi if (lruvec && ++lock_batch == SWAP_CLUSTER_MAX) { 9036168d0daSAlex Shi unlock_page_lruvec_irqrestore(lruvec, flags); 9046168d0daSAlex Shi lruvec = NULL; 905aabfb572SMichal Hocko } 906aabfb572SMichal Hocko 907a9b576f7SRalph Campbell page = compound_head(page); 9086fcb52a5SAaron Lu if (is_huge_zero_page(page)) 909aa88b68cSKirill A. Shutemov continue; 910aa88b68cSKirill A. Shutemov 911c5d6c45eSIra Weiny if (is_zone_device_page(page)) { 9126168d0daSAlex Shi if (lruvec) { 9136168d0daSAlex Shi unlock_page_lruvec_irqrestore(lruvec, flags); 9146168d0daSAlex Shi lruvec = NULL; 915df6ad698SJérôme Glisse } 916c5d6c45eSIra Weiny /* 917c5d6c45eSIra Weiny * ZONE_DEVICE pages that return 'false' from 918a3e7bea0SMiaohe Lin * page_is_devmap_managed() do not require special 919c5d6c45eSIra Weiny * processing, and instead, expect a call to 920c5d6c45eSIra Weiny * put_page_testzero(). 921c5d6c45eSIra Weiny */ 92207d80269SJohn Hubbard if (page_is_devmap_managed(page)) { 92307d80269SJohn Hubbard put_devmap_managed_page(page); 924df6ad698SJérôme Glisse continue; 925df6ad698SJérôme Glisse } 92643fbdeb3SRalph Campbell if (put_page_testzero(page)) 92743fbdeb3SRalph Campbell put_dev_pagemap(page->pgmap); 92843fbdeb3SRalph Campbell continue; 92907d80269SJohn Hubbard } 930df6ad698SJérôme Glisse 931b5810039SNick Piggin if (!put_page_testzero(page)) 9321da177e4SLinus Torvalds continue; 9331da177e4SLinus Torvalds 934ddc58f27SKirill A. Shutemov if (PageCompound(page)) { 9356168d0daSAlex Shi if (lruvec) { 9366168d0daSAlex Shi unlock_page_lruvec_irqrestore(lruvec, flags); 9376168d0daSAlex Shi lruvec = NULL; 938ddc58f27SKirill A. Shutemov } 939ddc58f27SKirill A. Shutemov __put_compound_page(page); 940ddc58f27SKirill A. Shutemov continue; 941ddc58f27SKirill A. Shutemov } 942ddc58f27SKirill A. Shutemov 94346453a6eSNick Piggin if (PageLRU(page)) { 9442a5e4e34SAlexander Duyck struct lruvec *prev_lruvec = lruvec; 945894bc310SLee Schermerhorn 9462a5e4e34SAlexander Duyck lruvec = relock_page_lruvec_irqsave(page, lruvec, 9472a5e4e34SAlexander Duyck &flags); 9482a5e4e34SAlexander Duyck if (prev_lruvec != lruvec) 949aabfb572SMichal Hocko lock_batch = 0; 950fa9add64SHugh Dickins 95146ae6b2cSYu Zhao del_page_from_lru_list(page, lruvec); 95287560179SYu Zhao __clear_page_lru_flags(page); 95346453a6eSNick Piggin } 95446453a6eSNick Piggin 95562906027SNicholas Piggin __ClearPageWaiters(page); 956c53954a0SMel Gorman 957cc59850eSKonstantin Khlebnikov list_add(&page->lru, &pages_to_free); 9581da177e4SLinus Torvalds } 9596168d0daSAlex Shi if (lruvec) 9606168d0daSAlex Shi unlock_page_lruvec_irqrestore(lruvec, flags); 9611da177e4SLinus Torvalds 962747db954SJohannes Weiner mem_cgroup_uncharge_list(&pages_to_free); 9632d4894b5SMel Gorman free_unref_page_list(&pages_to_free); 9641da177e4SLinus Torvalds } 9650be8557bSMiklos Szeredi EXPORT_SYMBOL(release_pages); 9661da177e4SLinus Torvalds 9671da177e4SLinus Torvalds /* 9681da177e4SLinus Torvalds * The pages which we're about to release may be in the deferred lru-addition 9691da177e4SLinus Torvalds * queues. That would prevent them from really being freed right now. That's 9701da177e4SLinus Torvalds * OK from a correctness point of view but is inefficient - those pages may be 9711da177e4SLinus Torvalds * cache-warm and we want to give them back to the page allocator ASAP. 9721da177e4SLinus Torvalds * 9731da177e4SLinus Torvalds * So __pagevec_release() will drain those queues here. __pagevec_lru_add() 9741da177e4SLinus Torvalds * and __pagevec_lru_add_active() call release_pages() directly to avoid 9751da177e4SLinus Torvalds * mutual recursion. 9761da177e4SLinus Torvalds */ 9771da177e4SLinus Torvalds void __pagevec_release(struct pagevec *pvec) 9781da177e4SLinus Torvalds { 9797f0b5fb9SMel Gorman if (!pvec->percpu_pvec_drained) { 9801da177e4SLinus Torvalds lru_add_drain(); 9817f0b5fb9SMel Gorman pvec->percpu_pvec_drained = true; 982d9ed0d08SMel Gorman } 983c6f92f9fSMel Gorman release_pages(pvec->pages, pagevec_count(pvec)); 9841da177e4SLinus Torvalds pagevec_reinit(pvec); 9851da177e4SLinus Torvalds } 9867f285701SSteve French EXPORT_SYMBOL(__pagevec_release); 9877f285701SSteve French 988c7c7b80cSAlex Shi static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec) 9893dd7ae8eSShaohua Li { 9909c4e6b1aSShakeel Butt int was_unevictable = TestClearPageUnevictable(page); 9916c357848SMatthew Wilcox (Oracle) int nr_pages = thp_nr_pages(page); 9923dd7ae8eSShaohua Li 993309381feSSasha Levin VM_BUG_ON_PAGE(PageLRU(page), page); 9943dd7ae8eSShaohua Li 9959c4e6b1aSShakeel Butt /* 9969c4e6b1aSShakeel Butt * Page becomes evictable in two ways: 997dae966dcSPeng Fan * 1) Within LRU lock [munlock_vma_page() and __munlock_pagevec()]. 9989c4e6b1aSShakeel Butt * 2) Before acquiring LRU lock to put the page to correct LRU and then 9999c4e6b1aSShakeel Butt * a) do PageLRU check with lock [check_move_unevictable_pages] 10009c4e6b1aSShakeel Butt * b) do PageLRU check before lock [clear_page_mlock] 10019c4e6b1aSShakeel Butt * 10029c4e6b1aSShakeel Butt * (1) & (2a) are ok as LRU lock will serialize them. For (2b), we need 10039c4e6b1aSShakeel Butt * following strict ordering: 10049c4e6b1aSShakeel Butt * 10059c4e6b1aSShakeel Butt * #0: __pagevec_lru_add_fn #1: clear_page_mlock 10069c4e6b1aSShakeel Butt * 10079c4e6b1aSShakeel Butt * SetPageLRU() TestClearPageMlocked() 10089c4e6b1aSShakeel Butt * smp_mb() // explicit ordering // above provides strict 10099c4e6b1aSShakeel Butt * // ordering 10109c4e6b1aSShakeel Butt * PageMlocked() PageLRU() 10119c4e6b1aSShakeel Butt * 10129c4e6b1aSShakeel Butt * 10139c4e6b1aSShakeel Butt * if '#1' does not observe setting of PG_lru by '#0' and fails 10149c4e6b1aSShakeel Butt * isolation, the explicit barrier will make sure that page_evictable 10159c4e6b1aSShakeel Butt * check will put the page in correct LRU. Without smp_mb(), SetPageLRU 10169c4e6b1aSShakeel Butt * can be reordered after PageMlocked check and can make '#1' to fail 10179c4e6b1aSShakeel Butt * the isolation of the page whose Mlocked bit is cleared (#0 is also 10189c4e6b1aSShakeel Butt * looking at the same page) and the evictable page will be stranded 10199c4e6b1aSShakeel Butt * in an unevictable LRU. 10209c4e6b1aSShakeel Butt */ 10219a9b6cceSYang Shi SetPageLRU(page); 10229a9b6cceSYang Shi smp_mb__after_atomic(); 10239c4e6b1aSShakeel Butt 10249c4e6b1aSShakeel Butt if (page_evictable(page)) { 10259c4e6b1aSShakeel Butt if (was_unevictable) 10265d91f31fSShakeel Butt __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages); 10279c4e6b1aSShakeel Butt } else { 10289c4e6b1aSShakeel Butt ClearPageActive(page); 10299c4e6b1aSShakeel Butt SetPageUnevictable(page); 10309c4e6b1aSShakeel Butt if (!was_unevictable) 10315d91f31fSShakeel Butt __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages); 10329c4e6b1aSShakeel Butt } 10339c4e6b1aSShakeel Butt 10343a9c9788SYu Zhao add_page_to_lru_list(page, lruvec); 103586140453SYu Zhao trace_mm_lru_insertion(page); 10363dd7ae8eSShaohua Li } 10373dd7ae8eSShaohua Li 10381da177e4SLinus Torvalds /* 10391da177e4SLinus Torvalds * Add the passed pages to the LRU, then drop the caller's refcount 10401da177e4SLinus Torvalds * on them. Reinitialises the caller's pagevec. 10411da177e4SLinus Torvalds */ 1042a0b8cab3SMel Gorman void __pagevec_lru_add(struct pagevec *pvec) 10431da177e4SLinus Torvalds { 1044fc574c23SAlex Shi int i; 10456168d0daSAlex Shi struct lruvec *lruvec = NULL; 1046fc574c23SAlex Shi unsigned long flags = 0; 1047fc574c23SAlex Shi 1048fc574c23SAlex Shi for (i = 0; i < pagevec_count(pvec); i++) { 1049fc574c23SAlex Shi struct page *page = pvec->pages[i]; 1050fc574c23SAlex Shi 10512a5e4e34SAlexander Duyck lruvec = relock_page_lruvec_irqsave(page, lruvec, &flags); 1052fc574c23SAlex Shi __pagevec_lru_add_fn(page, lruvec); 1053fc574c23SAlex Shi } 10546168d0daSAlex Shi if (lruvec) 10556168d0daSAlex Shi unlock_page_lruvec_irqrestore(lruvec, flags); 1056fc574c23SAlex Shi release_pages(pvec->pages, pvec->nr); 1057fc574c23SAlex Shi pagevec_reinit(pvec); 10581da177e4SLinus Torvalds } 1059f04e9ebbSKOSAKI Motohiro 10601da177e4SLinus Torvalds /** 10610cd6144aSJohannes Weiner * pagevec_remove_exceptionals - pagevec exceptionals pruning 10620cd6144aSJohannes Weiner * @pvec: The pagevec to prune 10630cd6144aSJohannes Weiner * 1064a656a202SMatthew Wilcox (Oracle) * find_get_entries() fills both pages and XArray value entries (aka 1065a656a202SMatthew Wilcox (Oracle) * exceptional entries) into the pagevec. This function prunes all 10660cd6144aSJohannes Weiner * exceptionals from @pvec without leaving holes, so that it can be 10670cd6144aSJohannes Weiner * passed on to page-only pagevec operations. 10680cd6144aSJohannes Weiner */ 10690cd6144aSJohannes Weiner void pagevec_remove_exceptionals(struct pagevec *pvec) 10700cd6144aSJohannes Weiner { 10710cd6144aSJohannes Weiner int i, j; 10720cd6144aSJohannes Weiner 10730cd6144aSJohannes Weiner for (i = 0, j = 0; i < pagevec_count(pvec); i++) { 10740cd6144aSJohannes Weiner struct page *page = pvec->pages[i]; 10753159f943SMatthew Wilcox if (!xa_is_value(page)) 10760cd6144aSJohannes Weiner pvec->pages[j++] = page; 10770cd6144aSJohannes Weiner } 10780cd6144aSJohannes Weiner pvec->nr = j; 10790cd6144aSJohannes Weiner } 10800cd6144aSJohannes Weiner 10810cd6144aSJohannes Weiner /** 1082b947cee4SJan Kara * pagevec_lookup_range - gang pagecache lookup 10831da177e4SLinus Torvalds * @pvec: Where the resulting pages are placed 10841da177e4SLinus Torvalds * @mapping: The address_space to search 10851da177e4SLinus Torvalds * @start: The starting page index 1086b947cee4SJan Kara * @end: The final page index 10871da177e4SLinus Torvalds * 1088e02a9f04SRandy Dunlap * pagevec_lookup_range() will search for & return a group of up to PAGEVEC_SIZE 1089b947cee4SJan Kara * pages in the mapping starting from index @start and upto index @end 1090b947cee4SJan Kara * (inclusive). The pages are placed in @pvec. pagevec_lookup() takes a 10911da177e4SLinus Torvalds * reference against the pages in @pvec. 10921da177e4SLinus Torvalds * 10931da177e4SLinus Torvalds * The search returns a group of mapping-contiguous pages with ascending 1094d72dc8a2SJan Kara * indexes. There may be holes in the indices due to not-present pages. We 1095d72dc8a2SJan Kara * also update @start to index the next page for the traversal. 10961da177e4SLinus Torvalds * 1097b947cee4SJan Kara * pagevec_lookup_range() returns the number of pages which were found. If this 1098e02a9f04SRandy Dunlap * number is smaller than PAGEVEC_SIZE, the end of specified range has been 1099b947cee4SJan Kara * reached. 11001da177e4SLinus Torvalds */ 1101b947cee4SJan Kara unsigned pagevec_lookup_range(struct pagevec *pvec, 1102397162ffSJan Kara struct address_space *mapping, pgoff_t *start, pgoff_t end) 11031da177e4SLinus Torvalds { 1104397162ffSJan Kara pvec->nr = find_get_pages_range(mapping, start, end, PAGEVEC_SIZE, 1105b947cee4SJan Kara pvec->pages); 11061da177e4SLinus Torvalds return pagevec_count(pvec); 11071da177e4SLinus Torvalds } 1108b947cee4SJan Kara EXPORT_SYMBOL(pagevec_lookup_range); 110978539fdfSChristoph Hellwig 111072b045aeSJan Kara unsigned pagevec_lookup_range_tag(struct pagevec *pvec, 111172b045aeSJan Kara struct address_space *mapping, pgoff_t *index, pgoff_t end, 111210bbd235SMatthew Wilcox xa_mark_t tag) 11131da177e4SLinus Torvalds { 111472b045aeSJan Kara pvec->nr = find_get_pages_range_tag(mapping, index, end, tag, 111567fd707fSJan Kara PAGEVEC_SIZE, pvec->pages); 11161da177e4SLinus Torvalds return pagevec_count(pvec); 11171da177e4SLinus Torvalds } 111872b045aeSJan Kara EXPORT_SYMBOL(pagevec_lookup_range_tag); 11191da177e4SLinus Torvalds 11201da177e4SLinus Torvalds /* 11211da177e4SLinus Torvalds * Perform any setup for the swap system 11221da177e4SLinus Torvalds */ 11231da177e4SLinus Torvalds void __init swap_setup(void) 11241da177e4SLinus Torvalds { 1125ca79b0c2SArun KS unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT); 1126e0bf68ddSPeter Zijlstra 11271da177e4SLinus Torvalds /* Use a smaller cluster for small-memory machines */ 11281da177e4SLinus Torvalds if (megs < 16) 11291da177e4SLinus Torvalds page_cluster = 2; 11301da177e4SLinus Torvalds else 11311da177e4SLinus Torvalds page_cluster = 3; 11321da177e4SLinus Torvalds /* 11331da177e4SLinus Torvalds * Right now other parts of the system means that we 11341da177e4SLinus Torvalds * _really_ don't want to cluster much more 11351da177e4SLinus Torvalds */ 11361da177e4SLinus Torvalds } 113707d80269SJohn Hubbard 113807d80269SJohn Hubbard #ifdef CONFIG_DEV_PAGEMAP_OPS 113907d80269SJohn Hubbard void put_devmap_managed_page(struct page *page) 114007d80269SJohn Hubbard { 114107d80269SJohn Hubbard int count; 114207d80269SJohn Hubbard 114307d80269SJohn Hubbard if (WARN_ON_ONCE(!page_is_devmap_managed(page))) 114407d80269SJohn Hubbard return; 114507d80269SJohn Hubbard 114607d80269SJohn Hubbard count = page_ref_dec_return(page); 114707d80269SJohn Hubbard 114807d80269SJohn Hubbard /* 114907d80269SJohn Hubbard * devmap page refcounts are 1-based, rather than 0-based: if 115007d80269SJohn Hubbard * refcount is 1, then the page is free and the refcount is 115107d80269SJohn Hubbard * stable because nobody holds a reference on the page. 115207d80269SJohn Hubbard */ 115307d80269SJohn Hubbard if (count == 1) 115407d80269SJohn Hubbard free_devmap_managed_page(page); 115507d80269SJohn Hubbard else if (!count) 115607d80269SJohn Hubbard __put_page(page); 115707d80269SJohn Hubbard } 115807d80269SJohn Hubbard EXPORT_SYMBOL(put_devmap_managed_page); 115907d80269SJohn Hubbard #endif 1160