1457c8996SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-only 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * linux/mm/swap.c 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 61da177e4SLinus Torvalds */ 71da177e4SLinus Torvalds 81da177e4SLinus Torvalds /* 9183ff22bSSimon Arlott * This file contains the default values for the operation of the 101da177e4SLinus Torvalds * Linux VM subsystem. Fine-tuning documentation can be found in 1157043247SMauro Carvalho Chehab * Documentation/admin-guide/sysctl/vm.rst. 121da177e4SLinus Torvalds * Started 18.12.91 131da177e4SLinus Torvalds * Swap aging added 23.2.95, Stephen Tweedie. 141da177e4SLinus Torvalds * Buffermem limits added 12.3.98, Rik van Riel. 151da177e4SLinus Torvalds */ 161da177e4SLinus Torvalds 171da177e4SLinus Torvalds #include <linux/mm.h> 181da177e4SLinus Torvalds #include <linux/sched.h> 191da177e4SLinus Torvalds #include <linux/kernel_stat.h> 201da177e4SLinus Torvalds #include <linux/swap.h> 211da177e4SLinus Torvalds #include <linux/mman.h> 221da177e4SLinus Torvalds #include <linux/pagemap.h> 231da177e4SLinus Torvalds #include <linux/pagevec.h> 241da177e4SLinus Torvalds #include <linux/init.h> 25b95f1b31SPaul Gortmaker #include <linux/export.h> 261da177e4SLinus Torvalds #include <linux/mm_inline.h> 271da177e4SLinus Torvalds #include <linux/percpu_counter.h> 283565fce3SDan Williams #include <linux/memremap.h> 291da177e4SLinus Torvalds #include <linux/percpu.h> 301da177e4SLinus Torvalds #include <linux/cpu.h> 311da177e4SLinus Torvalds #include <linux/notifier.h> 32e0bf68ddSPeter Zijlstra #include <linux/backing-dev.h> 3366e1707bSBalbir Singh #include <linux/memcontrol.h> 345a0e3ad6STejun Heo #include <linux/gfp.h> 35a27bb332SKent Overstreet #include <linux/uio.h> 36822fc613SNaoya Horiguchi #include <linux/hugetlb.h> 3733c3fc71SVladimir Davydov #include <linux/page_idle.h> 38b01b2141SIngo Molnar #include <linux/local_lock.h> 391da177e4SLinus Torvalds 4064d6519dSLee Schermerhorn #include "internal.h" 4164d6519dSLee Schermerhorn 42c6286c98SMel Gorman #define CREATE_TRACE_POINTS 43c6286c98SMel Gorman #include <trace/events/pagemap.h> 44c6286c98SMel Gorman 451da177e4SLinus Torvalds /* How many pages do we try to swap or page in/out together? */ 461da177e4SLinus Torvalds int page_cluster; 471da177e4SLinus Torvalds 48b01b2141SIngo Molnar /* Protecting only lru_rotate.pvec which requires disabling interrupts */ 49b01b2141SIngo Molnar struct lru_rotate { 50b01b2141SIngo Molnar local_lock_t lock; 51b01b2141SIngo Molnar struct pagevec pvec; 52b01b2141SIngo Molnar }; 53b01b2141SIngo Molnar static DEFINE_PER_CPU(struct lru_rotate, lru_rotate) = { 54b01b2141SIngo Molnar .lock = INIT_LOCAL_LOCK(lock), 55b01b2141SIngo Molnar }; 56b01b2141SIngo Molnar 57b01b2141SIngo Molnar /* 58b01b2141SIngo Molnar * The following struct pagevec are grouped together because they are protected 59b01b2141SIngo Molnar * by disabling preemption (and interrupts remain enabled). 60b01b2141SIngo Molnar */ 61b01b2141SIngo Molnar struct lru_pvecs { 62b01b2141SIngo Molnar local_lock_t lock; 63b01b2141SIngo Molnar struct pagevec lru_add; 64b01b2141SIngo Molnar struct pagevec lru_deactivate_file; 65b01b2141SIngo Molnar struct pagevec lru_deactivate; 66b01b2141SIngo Molnar struct pagevec lru_lazyfree; 67a4a921aaSMing Li #ifdef CONFIG_SMP 68b01b2141SIngo Molnar struct pagevec activate_page; 69a4a921aaSMing Li #endif 70b01b2141SIngo Molnar }; 71b01b2141SIngo Molnar static DEFINE_PER_CPU(struct lru_pvecs, lru_pvecs) = { 72b01b2141SIngo Molnar .lock = INIT_LOCAL_LOCK(lock), 73b01b2141SIngo Molnar }; 74902aaed0SHisashi Hifumi 75b221385bSAdrian Bunk /* 76b221385bSAdrian Bunk * This path almost never happens for VM activity - pages are normally 77b221385bSAdrian Bunk * freed via pagevecs. But it gets used by networking. 78b221385bSAdrian Bunk */ 79920c7a5dSHarvey Harrison static void __page_cache_release(struct page *page) 80b221385bSAdrian Bunk { 81b221385bSAdrian Bunk if (PageLRU(page)) { 82f4b7e272SAndrey Ryabinin pg_data_t *pgdat = page_pgdat(page); 83fa9add64SHugh Dickins struct lruvec *lruvec; 84fa9add64SHugh Dickins unsigned long flags; 85b221385bSAdrian Bunk 86f4b7e272SAndrey Ryabinin spin_lock_irqsave(&pgdat->lru_lock, flags); 87f4b7e272SAndrey Ryabinin lruvec = mem_cgroup_page_lruvec(page, pgdat); 88309381feSSasha Levin VM_BUG_ON_PAGE(!PageLRU(page), page); 89b221385bSAdrian Bunk __ClearPageLRU(page); 90fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, page_off_lru(page)); 91f4b7e272SAndrey Ryabinin spin_unlock_irqrestore(&pgdat->lru_lock, flags); 92b221385bSAdrian Bunk } 9362906027SNicholas Piggin __ClearPageWaiters(page); 9491807063SAndrea Arcangeli } 9591807063SAndrea Arcangeli 9691807063SAndrea Arcangeli static void __put_single_page(struct page *page) 9791807063SAndrea Arcangeli { 9891807063SAndrea Arcangeli __page_cache_release(page); 997ae88534SYang Shi mem_cgroup_uncharge(page); 1002d4894b5SMel Gorman free_unref_page(page); 101b221385bSAdrian Bunk } 102b221385bSAdrian Bunk 10391807063SAndrea Arcangeli static void __put_compound_page(struct page *page) 10491807063SAndrea Arcangeli { 105822fc613SNaoya Horiguchi /* 106822fc613SNaoya Horiguchi * __page_cache_release() is supposed to be called for thp, not for 107822fc613SNaoya Horiguchi * hugetlb. This is because hugetlb page does never have PageLRU set 108822fc613SNaoya Horiguchi * (it's never listed to any LRU lists) and no memcg routines should 109822fc613SNaoya Horiguchi * be called for hugetlb (it has a separate hugetlb_cgroup.) 110822fc613SNaoya Horiguchi */ 111822fc613SNaoya Horiguchi if (!PageHuge(page)) 11291807063SAndrea Arcangeli __page_cache_release(page); 113ff45fc3cSMatthew Wilcox (Oracle) destroy_compound_page(page); 11491807063SAndrea Arcangeli } 11591807063SAndrea Arcangeli 116ddc58f27SKirill A. Shutemov void __put_page(struct page *page) 117c747ce79SJianyu Zhan { 11871389703SDan Williams if (is_zone_device_page(page)) { 11971389703SDan Williams put_dev_pagemap(page->pgmap); 12071389703SDan Williams 12171389703SDan Williams /* 12271389703SDan Williams * The page belongs to the device that created pgmap. Do 12371389703SDan Williams * not return it to page allocator. 12471389703SDan Williams */ 12571389703SDan Williams return; 12671389703SDan Williams } 12771389703SDan Williams 128ddc58f27SKirill A. Shutemov if (unlikely(PageCompound(page))) 12926296ad2SAndrew Morton __put_compound_page(page); 13026296ad2SAndrew Morton else 13126296ad2SAndrew Morton __put_single_page(page); 13226296ad2SAndrew Morton } 133ddc58f27SKirill A. Shutemov EXPORT_SYMBOL(__put_page); 13470b50f94SAndrea Arcangeli 1351d7ea732SAlexander Zarochentsev /** 1367682486bSRandy Dunlap * put_pages_list() - release a list of pages 1377682486bSRandy Dunlap * @pages: list of pages threaded on page->lru 1381d7ea732SAlexander Zarochentsev * 1391d7ea732SAlexander Zarochentsev * Release a list of pages which are strung together on page.lru. Currently 1401d7ea732SAlexander Zarochentsev * used by read_cache_pages() and related error recovery code. 1411d7ea732SAlexander Zarochentsev */ 1421d7ea732SAlexander Zarochentsev void put_pages_list(struct list_head *pages) 1431d7ea732SAlexander Zarochentsev { 1441d7ea732SAlexander Zarochentsev while (!list_empty(pages)) { 1451d7ea732SAlexander Zarochentsev struct page *victim; 1461d7ea732SAlexander Zarochentsev 147f86196eaSNikolay Borisov victim = lru_to_page(pages); 1481d7ea732SAlexander Zarochentsev list_del(&victim->lru); 14909cbfeafSKirill A. Shutemov put_page(victim); 1501d7ea732SAlexander Zarochentsev } 1511d7ea732SAlexander Zarochentsev } 1521d7ea732SAlexander Zarochentsev EXPORT_SYMBOL(put_pages_list); 1531d7ea732SAlexander Zarochentsev 15418022c5dSMel Gorman /* 15518022c5dSMel Gorman * get_kernel_pages() - pin kernel pages in memory 15618022c5dSMel Gorman * @kiov: An array of struct kvec structures 15718022c5dSMel Gorman * @nr_segs: number of segments to pin 15818022c5dSMel Gorman * @write: pinning for read/write, currently ignored 15918022c5dSMel Gorman * @pages: array that receives pointers to the pages pinned. 16018022c5dSMel Gorman * Should be at least nr_segs long. 16118022c5dSMel Gorman * 16218022c5dSMel Gorman * Returns number of pages pinned. This may be fewer than the number 16318022c5dSMel Gorman * requested. If nr_pages is 0 or negative, returns 0. If no pages 16418022c5dSMel Gorman * were pinned, returns -errno. Each page returned must be released 16518022c5dSMel Gorman * with a put_page() call when it is finished with. 16618022c5dSMel Gorman */ 16718022c5dSMel Gorman int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write, 16818022c5dSMel Gorman struct page **pages) 16918022c5dSMel Gorman { 17018022c5dSMel Gorman int seg; 17118022c5dSMel Gorman 17218022c5dSMel Gorman for (seg = 0; seg < nr_segs; seg++) { 17318022c5dSMel Gorman if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE)) 17418022c5dSMel Gorman return seg; 17518022c5dSMel Gorman 1765a178119SMel Gorman pages[seg] = kmap_to_page(kiov[seg].iov_base); 17709cbfeafSKirill A. Shutemov get_page(pages[seg]); 17818022c5dSMel Gorman } 17918022c5dSMel Gorman 18018022c5dSMel Gorman return seg; 18118022c5dSMel Gorman } 18218022c5dSMel Gorman EXPORT_SYMBOL_GPL(get_kernel_pages); 18318022c5dSMel Gorman 18418022c5dSMel Gorman /* 18518022c5dSMel Gorman * get_kernel_page() - pin a kernel page in memory 18618022c5dSMel Gorman * @start: starting kernel address 18718022c5dSMel Gorman * @write: pinning for read/write, currently ignored 18818022c5dSMel Gorman * @pages: array that receives pointer to the page pinned. 18918022c5dSMel Gorman * Must be at least nr_segs long. 19018022c5dSMel Gorman * 19118022c5dSMel Gorman * Returns 1 if page is pinned. If the page was not pinned, returns 19218022c5dSMel Gorman * -errno. The page returned must be released with a put_page() call 19318022c5dSMel Gorman * when it is finished with. 19418022c5dSMel Gorman */ 19518022c5dSMel Gorman int get_kernel_page(unsigned long start, int write, struct page **pages) 19618022c5dSMel Gorman { 19718022c5dSMel Gorman const struct kvec kiov = { 19818022c5dSMel Gorman .iov_base = (void *)start, 19918022c5dSMel Gorman .iov_len = PAGE_SIZE 20018022c5dSMel Gorman }; 20118022c5dSMel Gorman 20218022c5dSMel Gorman return get_kernel_pages(&kiov, 1, write, pages); 20318022c5dSMel Gorman } 20418022c5dSMel Gorman EXPORT_SYMBOL_GPL(get_kernel_page); 20518022c5dSMel Gorman 2063dd7ae8eSShaohua Li static void pagevec_lru_move_fn(struct pagevec *pvec, 207fa9add64SHugh Dickins void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg), 2083dd7ae8eSShaohua Li void *arg) 209902aaed0SHisashi Hifumi { 210902aaed0SHisashi Hifumi int i; 21168eb0731SMel Gorman struct pglist_data *pgdat = NULL; 212fa9add64SHugh Dickins struct lruvec *lruvec; 2133dd7ae8eSShaohua Li unsigned long flags = 0; 214902aaed0SHisashi Hifumi 215902aaed0SHisashi Hifumi for (i = 0; i < pagevec_count(pvec); i++) { 216902aaed0SHisashi Hifumi struct page *page = pvec->pages[i]; 21768eb0731SMel Gorman struct pglist_data *pagepgdat = page_pgdat(page); 218902aaed0SHisashi Hifumi 21968eb0731SMel Gorman if (pagepgdat != pgdat) { 22068eb0731SMel Gorman if (pgdat) 22168eb0731SMel Gorman spin_unlock_irqrestore(&pgdat->lru_lock, flags); 22268eb0731SMel Gorman pgdat = pagepgdat; 22368eb0731SMel Gorman spin_lock_irqsave(&pgdat->lru_lock, flags); 224902aaed0SHisashi Hifumi } 2253dd7ae8eSShaohua Li 22668eb0731SMel Gorman lruvec = mem_cgroup_page_lruvec(page, pgdat); 227fa9add64SHugh Dickins (*move_fn)(page, lruvec, arg); 2283dd7ae8eSShaohua Li } 22968eb0731SMel Gorman if (pgdat) 23068eb0731SMel Gorman spin_unlock_irqrestore(&pgdat->lru_lock, flags); 231c6f92f9fSMel Gorman release_pages(pvec->pages, pvec->nr); 2323dd7ae8eSShaohua Li pagevec_reinit(pvec); 2333dd7ae8eSShaohua Li } 2343dd7ae8eSShaohua Li 235fa9add64SHugh Dickins static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec, 236fa9add64SHugh Dickins void *arg) 2373dd7ae8eSShaohua Li { 2383dd7ae8eSShaohua Li int *pgmoved = arg; 2393dd7ae8eSShaohua Li 240c55e8d03SJohannes Weiner if (PageLRU(page) && !PageUnevictable(page)) { 241c55e8d03SJohannes Weiner del_page_from_lru_list(page, lruvec, page_lru(page)); 242c55e8d03SJohannes Weiner ClearPageActive(page); 243c55e8d03SJohannes Weiner add_page_to_lru_list_tail(page, lruvec, page_lru(page)); 2446c357848SMatthew Wilcox (Oracle) (*pgmoved) += thp_nr_pages(page); 245902aaed0SHisashi Hifumi } 246902aaed0SHisashi Hifumi } 2473dd7ae8eSShaohua Li 2483dd7ae8eSShaohua Li /* 2493dd7ae8eSShaohua Li * pagevec_move_tail() must be called with IRQ disabled. 2503dd7ae8eSShaohua Li * Otherwise this may cause nasty races. 2513dd7ae8eSShaohua Li */ 2523dd7ae8eSShaohua Li static void pagevec_move_tail(struct pagevec *pvec) 2533dd7ae8eSShaohua Li { 2543dd7ae8eSShaohua Li int pgmoved = 0; 2553dd7ae8eSShaohua Li 2563dd7ae8eSShaohua Li pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved); 257902aaed0SHisashi Hifumi __count_vm_events(PGROTATED, pgmoved); 258902aaed0SHisashi Hifumi } 259902aaed0SHisashi Hifumi 260902aaed0SHisashi Hifumi /* 2611da177e4SLinus Torvalds * Writeback is about to end against a page which has been marked for immediate 2621da177e4SLinus Torvalds * reclaim. If it still appears to be reclaimable, move it to the tail of the 263902aaed0SHisashi Hifumi * inactive list. 2641da177e4SLinus Torvalds */ 265ac6aadb2SMiklos Szeredi void rotate_reclaimable_page(struct page *page) 2661da177e4SLinus Torvalds { 267c55e8d03SJohannes Weiner if (!PageLocked(page) && !PageDirty(page) && 268894bc310SLee Schermerhorn !PageUnevictable(page) && PageLRU(page)) { 269902aaed0SHisashi Hifumi struct pagevec *pvec; 2701da177e4SLinus Torvalds unsigned long flags; 2711da177e4SLinus Torvalds 27209cbfeafSKirill A. Shutemov get_page(page); 273b01b2141SIngo Molnar local_lock_irqsave(&lru_rotate.lock, flags); 274b01b2141SIngo Molnar pvec = this_cpu_ptr(&lru_rotate.pvec); 2758f182270SLukasz Odzioba if (!pagevec_add(pvec, page) || PageCompound(page)) 276902aaed0SHisashi Hifumi pagevec_move_tail(pvec); 277b01b2141SIngo Molnar local_unlock_irqrestore(&lru_rotate.lock, flags); 278ac6aadb2SMiklos Szeredi } 2791da177e4SLinus Torvalds } 2801da177e4SLinus Torvalds 28196f8bf4fSJohannes Weiner void lru_note_cost(struct lruvec *lruvec, bool file, unsigned int nr_pages) 2823e2f41f1SKOSAKI Motohiro { 2837cf111bcSJohannes Weiner do { 2847cf111bcSJohannes Weiner unsigned long lrusize; 2857cf111bcSJohannes Weiner 2867cf111bcSJohannes Weiner /* Record cost event */ 28796f8bf4fSJohannes Weiner if (file) 28896f8bf4fSJohannes Weiner lruvec->file_cost += nr_pages; 2891431d4d1SJohannes Weiner else 29096f8bf4fSJohannes Weiner lruvec->anon_cost += nr_pages; 2917cf111bcSJohannes Weiner 2927cf111bcSJohannes Weiner /* 2937cf111bcSJohannes Weiner * Decay previous events 2947cf111bcSJohannes Weiner * 2957cf111bcSJohannes Weiner * Because workloads change over time (and to avoid 2967cf111bcSJohannes Weiner * overflow) we keep these statistics as a floating 2977cf111bcSJohannes Weiner * average, which ends up weighing recent refaults 2987cf111bcSJohannes Weiner * more than old ones. 2997cf111bcSJohannes Weiner */ 3007cf111bcSJohannes Weiner lrusize = lruvec_page_state(lruvec, NR_INACTIVE_ANON) + 3017cf111bcSJohannes Weiner lruvec_page_state(lruvec, NR_ACTIVE_ANON) + 3027cf111bcSJohannes Weiner lruvec_page_state(lruvec, NR_INACTIVE_FILE) + 3037cf111bcSJohannes Weiner lruvec_page_state(lruvec, NR_ACTIVE_FILE); 3047cf111bcSJohannes Weiner 3057cf111bcSJohannes Weiner if (lruvec->file_cost + lruvec->anon_cost > lrusize / 4) { 3067cf111bcSJohannes Weiner lruvec->file_cost /= 2; 3077cf111bcSJohannes Weiner lruvec->anon_cost /= 2; 3087cf111bcSJohannes Weiner } 3097cf111bcSJohannes Weiner } while ((lruvec = parent_lruvec(lruvec))); 3103e2f41f1SKOSAKI Motohiro } 3113e2f41f1SKOSAKI Motohiro 31296f8bf4fSJohannes Weiner void lru_note_cost_page(struct page *page) 31396f8bf4fSJohannes Weiner { 31496f8bf4fSJohannes Weiner lru_note_cost(mem_cgroup_page_lruvec(page, page_pgdat(page)), 3156c357848SMatthew Wilcox (Oracle) page_is_file_lru(page), thp_nr_pages(page)); 31696f8bf4fSJohannes Weiner } 31796f8bf4fSJohannes Weiner 318fa9add64SHugh Dickins static void __activate_page(struct page *page, struct lruvec *lruvec, 319fa9add64SHugh Dickins void *arg) 320744ed144SShaohua Li { 3217a608572SLinus Torvalds if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 322744ed144SShaohua Li int lru = page_lru_base_type(page); 3236c357848SMatthew Wilcox (Oracle) int nr_pages = thp_nr_pages(page); 324744ed144SShaohua Li 325fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, lru); 326744ed144SShaohua Li SetPageActive(page); 327744ed144SShaohua Li lru += LRU_ACTIVE; 328fa9add64SHugh Dickins add_page_to_lru_list(page, lruvec, lru); 32924b7e581SMel Gorman trace_mm_lru_activate(page); 3307a608572SLinus Torvalds 33121e330fcSShakeel Butt __count_vm_events(PGACTIVATE, nr_pages); 33221e330fcSShakeel Butt __count_memcg_events(lruvec_memcg(lruvec), PGACTIVATE, 33321e330fcSShakeel Butt nr_pages); 334744ed144SShaohua Li } 335eb709b0dSShaohua Li } 336eb709b0dSShaohua Li 337eb709b0dSShaohua Li #ifdef CONFIG_SMP 338eb709b0dSShaohua Li static void activate_page_drain(int cpu) 339eb709b0dSShaohua Li { 340b01b2141SIngo Molnar struct pagevec *pvec = &per_cpu(lru_pvecs.activate_page, cpu); 341eb709b0dSShaohua Li 342eb709b0dSShaohua Li if (pagevec_count(pvec)) 343eb709b0dSShaohua Li pagevec_lru_move_fn(pvec, __activate_page, NULL); 344eb709b0dSShaohua Li } 345eb709b0dSShaohua Li 3465fbc4616SChris Metcalf static bool need_activate_page_drain(int cpu) 3475fbc4616SChris Metcalf { 348b01b2141SIngo Molnar return pagevec_count(&per_cpu(lru_pvecs.activate_page, cpu)) != 0; 3495fbc4616SChris Metcalf } 3505fbc4616SChris Metcalf 351cc2828b2SYu Zhao static void activate_page(struct page *page) 352eb709b0dSShaohua Li { 353800d8c63SKirill A. Shutemov page = compound_head(page); 354eb709b0dSShaohua Li if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 355b01b2141SIngo Molnar struct pagevec *pvec; 356eb709b0dSShaohua Li 357b01b2141SIngo Molnar local_lock(&lru_pvecs.lock); 358b01b2141SIngo Molnar pvec = this_cpu_ptr(&lru_pvecs.activate_page); 35909cbfeafSKirill A. Shutemov get_page(page); 3608f182270SLukasz Odzioba if (!pagevec_add(pvec, page) || PageCompound(page)) 361eb709b0dSShaohua Li pagevec_lru_move_fn(pvec, __activate_page, NULL); 362b01b2141SIngo Molnar local_unlock(&lru_pvecs.lock); 363eb709b0dSShaohua Li } 364eb709b0dSShaohua Li } 365eb709b0dSShaohua Li 366eb709b0dSShaohua Li #else 367eb709b0dSShaohua Li static inline void activate_page_drain(int cpu) 368eb709b0dSShaohua Li { 369eb709b0dSShaohua Li } 370eb709b0dSShaohua Li 371cc2828b2SYu Zhao static void activate_page(struct page *page) 372eb709b0dSShaohua Li { 373f4b7e272SAndrey Ryabinin pg_data_t *pgdat = page_pgdat(page); 374eb709b0dSShaohua Li 375800d8c63SKirill A. Shutemov page = compound_head(page); 376f4b7e272SAndrey Ryabinin spin_lock_irq(&pgdat->lru_lock); 377f4b7e272SAndrey Ryabinin __activate_page(page, mem_cgroup_page_lruvec(page, pgdat), NULL); 378f4b7e272SAndrey Ryabinin spin_unlock_irq(&pgdat->lru_lock); 3791da177e4SLinus Torvalds } 380eb709b0dSShaohua Li #endif 3811da177e4SLinus Torvalds 382059285a2SMel Gorman static void __lru_cache_activate_page(struct page *page) 383059285a2SMel Gorman { 384b01b2141SIngo Molnar struct pagevec *pvec; 385059285a2SMel Gorman int i; 386059285a2SMel Gorman 387b01b2141SIngo Molnar local_lock(&lru_pvecs.lock); 388b01b2141SIngo Molnar pvec = this_cpu_ptr(&lru_pvecs.lru_add); 389b01b2141SIngo Molnar 390059285a2SMel Gorman /* 391059285a2SMel Gorman * Search backwards on the optimistic assumption that the page being 392059285a2SMel Gorman * activated has just been added to this pagevec. Note that only 393059285a2SMel Gorman * the local pagevec is examined as a !PageLRU page could be in the 394059285a2SMel Gorman * process of being released, reclaimed, migrated or on a remote 395059285a2SMel Gorman * pagevec that is currently being drained. Furthermore, marking 396059285a2SMel Gorman * a remote pagevec's page PageActive potentially hits a race where 397059285a2SMel Gorman * a page is marked PageActive just after it is added to the inactive 398059285a2SMel Gorman * list causing accounting errors and BUG_ON checks to trigger. 399059285a2SMel Gorman */ 400059285a2SMel Gorman for (i = pagevec_count(pvec) - 1; i >= 0; i--) { 401059285a2SMel Gorman struct page *pagevec_page = pvec->pages[i]; 402059285a2SMel Gorman 403059285a2SMel Gorman if (pagevec_page == page) { 404059285a2SMel Gorman SetPageActive(page); 405059285a2SMel Gorman break; 406059285a2SMel Gorman } 407059285a2SMel Gorman } 408059285a2SMel Gorman 409b01b2141SIngo Molnar local_unlock(&lru_pvecs.lock); 410059285a2SMel Gorman } 411059285a2SMel Gorman 4121da177e4SLinus Torvalds /* 4131da177e4SLinus Torvalds * Mark a page as having seen activity. 4141da177e4SLinus Torvalds * 4151da177e4SLinus Torvalds * inactive,unreferenced -> inactive,referenced 4161da177e4SLinus Torvalds * inactive,referenced -> active,unreferenced 4171da177e4SLinus Torvalds * active,unreferenced -> active,referenced 418eb39d618SHugh Dickins * 419eb39d618SHugh Dickins * When a newly allocated page is not yet visible, so safe for non-atomic ops, 420eb39d618SHugh Dickins * __SetPageReferenced(page) may be substituted for mark_page_accessed(page). 4211da177e4SLinus Torvalds */ 422920c7a5dSHarvey Harrison void mark_page_accessed(struct page *page) 4231da177e4SLinus Torvalds { 424e90309c9SKirill A. Shutemov page = compound_head(page); 425059285a2SMel Gorman 426a1100a74SFengguang Wu if (!PageReferenced(page)) { 427a1100a74SFengguang Wu SetPageReferenced(page); 428a1100a74SFengguang Wu } else if (PageUnevictable(page)) { 429a1100a74SFengguang Wu /* 430a1100a74SFengguang Wu * Unevictable pages are on the "LRU_UNEVICTABLE" list. But, 431a1100a74SFengguang Wu * this list is never rotated or maintained, so marking an 432a1100a74SFengguang Wu * evictable page accessed has no effect. 433a1100a74SFengguang Wu */ 434a1100a74SFengguang Wu } else if (!PageActive(page)) { 435059285a2SMel Gorman /* 436059285a2SMel Gorman * If the page is on the LRU, queue it for activation via 437b01b2141SIngo Molnar * lru_pvecs.activate_page. Otherwise, assume the page is on a 438059285a2SMel Gorman * pagevec, mark it active and it'll be moved to the active 439059285a2SMel Gorman * LRU on the next drain. 440059285a2SMel Gorman */ 441059285a2SMel Gorman if (PageLRU(page)) 4421da177e4SLinus Torvalds activate_page(page); 443059285a2SMel Gorman else 444059285a2SMel Gorman __lru_cache_activate_page(page); 4451da177e4SLinus Torvalds ClearPageReferenced(page); 446a528910eSJohannes Weiner workingset_activation(page); 4471da177e4SLinus Torvalds } 44833c3fc71SVladimir Davydov if (page_is_idle(page)) 44933c3fc71SVladimir Davydov clear_page_idle(page); 4501da177e4SLinus Torvalds } 4511da177e4SLinus Torvalds EXPORT_SYMBOL(mark_page_accessed); 4521da177e4SLinus Torvalds 453f04e9ebbSKOSAKI Motohiro /** 454c53954a0SMel Gorman * lru_cache_add - add a page to a page list 455f04e9ebbSKOSAKI Motohiro * @page: the page to be added to the LRU. 4562329d375SJianyu Zhan * 4572329d375SJianyu Zhan * Queue the page for addition to the LRU via pagevec. The decision on whether 4582329d375SJianyu Zhan * to add the page to the [in]active [file|anon] list is deferred until the 4592329d375SJianyu Zhan * pagevec is drained. This gives a chance for the caller of lru_cache_add() 4602329d375SJianyu Zhan * have the page added to the active list using mark_page_accessed(). 461f04e9ebbSKOSAKI Motohiro */ 462c53954a0SMel Gorman void lru_cache_add(struct page *page) 4631da177e4SLinus Torvalds { 4646058eaecSJohannes Weiner struct pagevec *pvec; 4656058eaecSJohannes Weiner 466309381feSSasha Levin VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page); 467309381feSSasha Levin VM_BUG_ON_PAGE(PageLRU(page), page); 4686058eaecSJohannes Weiner 4696058eaecSJohannes Weiner get_page(page); 4706058eaecSJohannes Weiner local_lock(&lru_pvecs.lock); 4716058eaecSJohannes Weiner pvec = this_cpu_ptr(&lru_pvecs.lru_add); 4726058eaecSJohannes Weiner if (!pagevec_add(pvec, page) || PageCompound(page)) 4736058eaecSJohannes Weiner __pagevec_lru_add(pvec); 4746058eaecSJohannes Weiner local_unlock(&lru_pvecs.lock); 4751da177e4SLinus Torvalds } 4766058eaecSJohannes Weiner EXPORT_SYMBOL(lru_cache_add); 4771da177e4SLinus Torvalds 478894bc310SLee Schermerhorn /** 479b518154eSJoonsoo Kim * lru_cache_add_inactive_or_unevictable 48000501b53SJohannes Weiner * @page: the page to be added to LRU 48100501b53SJohannes Weiner * @vma: vma in which page is mapped for determining reclaimability 48200501b53SJohannes Weiner * 483b518154eSJoonsoo Kim * Place @page on the inactive or unevictable LRU list, depending on its 48412eab428SMiaohe Lin * evictability. 48500501b53SJohannes Weiner */ 486b518154eSJoonsoo Kim void lru_cache_add_inactive_or_unevictable(struct page *page, 48700501b53SJohannes Weiner struct vm_area_struct *vma) 48800501b53SJohannes Weiner { 489b518154eSJoonsoo Kim bool unevictable; 490b518154eSJoonsoo Kim 49100501b53SJohannes Weiner VM_BUG_ON_PAGE(PageLRU(page), page); 49200501b53SJohannes Weiner 493b518154eSJoonsoo Kim unevictable = (vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) == VM_LOCKED; 494b518154eSJoonsoo Kim if (unlikely(unevictable) && !TestSetPageMlocked(page)) { 4950964730bSHugh Dickins int nr_pages = thp_nr_pages(page); 49600501b53SJohannes Weiner /* 49700501b53SJohannes Weiner * We use the irq-unsafe __mod_zone_page_stat because this 49800501b53SJohannes Weiner * counter is not modified from interrupt context, and the pte 49900501b53SJohannes Weiner * lock is held(spinlock), which implies preemption disabled. 50000501b53SJohannes Weiner */ 5010964730bSHugh Dickins __mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages); 5020964730bSHugh Dickins count_vm_events(UNEVICTABLE_PGMLOCKED, nr_pages); 50300501b53SJohannes Weiner } 5049c4e6b1aSShakeel Butt lru_cache_add(page); 50500501b53SJohannes Weiner } 50600501b53SJohannes Weiner 507902aaed0SHisashi Hifumi /* 50831560180SMinchan Kim * If the page can not be invalidated, it is moved to the 50931560180SMinchan Kim * inactive list to speed up its reclaim. It is moved to the 51031560180SMinchan Kim * head of the list, rather than the tail, to give the flusher 51131560180SMinchan Kim * threads some time to write it out, as this is much more 51231560180SMinchan Kim * effective than the single-page writeout from reclaim. 513278df9f4SMinchan Kim * 514278df9f4SMinchan Kim * If the page isn't page_mapped and dirty/writeback, the page 515278df9f4SMinchan Kim * could reclaim asap using PG_reclaim. 516278df9f4SMinchan Kim * 517278df9f4SMinchan Kim * 1. active, mapped page -> none 518278df9f4SMinchan Kim * 2. active, dirty/writeback page -> inactive, head, PG_reclaim 519278df9f4SMinchan Kim * 3. inactive, mapped page -> none 520278df9f4SMinchan Kim * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim 521278df9f4SMinchan Kim * 5. inactive, clean -> inactive, tail 522278df9f4SMinchan Kim * 6. Others -> none 523278df9f4SMinchan Kim * 524278df9f4SMinchan Kim * In 4, why it moves inactive's head, the VM expects the page would 525278df9f4SMinchan Kim * be write it out by flusher threads as this is much more effective 526278df9f4SMinchan Kim * than the single-page writeout from reclaim. 52731560180SMinchan Kim */ 528cc5993bdSMinchan Kim static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec, 529fa9add64SHugh Dickins void *arg) 53031560180SMinchan Kim { 531fbbb602eSJohannes Weiner int lru; 532278df9f4SMinchan Kim bool active; 5336c357848SMatthew Wilcox (Oracle) int nr_pages = thp_nr_pages(page); 53431560180SMinchan Kim 535278df9f4SMinchan Kim if (!PageLRU(page)) 53631560180SMinchan Kim return; 53731560180SMinchan Kim 538bad49d9cSMinchan Kim if (PageUnevictable(page)) 539bad49d9cSMinchan Kim return; 540bad49d9cSMinchan Kim 54131560180SMinchan Kim /* Some processes are using the page */ 54231560180SMinchan Kim if (page_mapped(page)) 54331560180SMinchan Kim return; 54431560180SMinchan Kim 545278df9f4SMinchan Kim active = PageActive(page); 54631560180SMinchan Kim lru = page_lru_base_type(page); 547fa9add64SHugh Dickins 548fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, lru + active); 54931560180SMinchan Kim ClearPageActive(page); 55031560180SMinchan Kim ClearPageReferenced(page); 55131560180SMinchan Kim 552278df9f4SMinchan Kim if (PageWriteback(page) || PageDirty(page)) { 553278df9f4SMinchan Kim /* 554278df9f4SMinchan Kim * PG_reclaim could be raced with end_page_writeback 555278df9f4SMinchan Kim * It can make readahead confusing. But race window 556278df9f4SMinchan Kim * is _really_ small and it's non-critical problem. 557278df9f4SMinchan Kim */ 558e7a1aaf2SYu Zhao add_page_to_lru_list(page, lruvec, lru); 559278df9f4SMinchan Kim SetPageReclaim(page); 560278df9f4SMinchan Kim } else { 561278df9f4SMinchan Kim /* 562278df9f4SMinchan Kim * The page's writeback ends up during pagevec 563278df9f4SMinchan Kim * We moves tha page into tail of inactive. 564278df9f4SMinchan Kim */ 565e7a1aaf2SYu Zhao add_page_to_lru_list_tail(page, lruvec, lru); 5665d91f31fSShakeel Butt __count_vm_events(PGROTATED, nr_pages); 567278df9f4SMinchan Kim } 568278df9f4SMinchan Kim 56921e330fcSShakeel Butt if (active) { 5705d91f31fSShakeel Butt __count_vm_events(PGDEACTIVATE, nr_pages); 57121e330fcSShakeel Butt __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, 57221e330fcSShakeel Butt nr_pages); 57321e330fcSShakeel Butt } 57431560180SMinchan Kim } 57531560180SMinchan Kim 5769c276cc6SMinchan Kim static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec, 5779c276cc6SMinchan Kim void *arg) 5789c276cc6SMinchan Kim { 5799c276cc6SMinchan Kim if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { 5809c276cc6SMinchan Kim int lru = page_lru_base_type(page); 5816c357848SMatthew Wilcox (Oracle) int nr_pages = thp_nr_pages(page); 5829c276cc6SMinchan Kim 5839c276cc6SMinchan Kim del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE); 5849c276cc6SMinchan Kim ClearPageActive(page); 5859c276cc6SMinchan Kim ClearPageReferenced(page); 5869c276cc6SMinchan Kim add_page_to_lru_list(page, lruvec, lru); 5879c276cc6SMinchan Kim 58821e330fcSShakeel Butt __count_vm_events(PGDEACTIVATE, nr_pages); 58921e330fcSShakeel Butt __count_memcg_events(lruvec_memcg(lruvec), PGDEACTIVATE, 59021e330fcSShakeel Butt nr_pages); 5919c276cc6SMinchan Kim } 5929c276cc6SMinchan Kim } 59310853a03SMinchan Kim 594f7ad2a6cSShaohua Li static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec, 59510853a03SMinchan Kim void *arg) 59610853a03SMinchan Kim { 597f7ad2a6cSShaohua Li if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && 59824c92eb7SShaohua Li !PageSwapCache(page) && !PageUnevictable(page)) { 599f7ad2a6cSShaohua Li bool active = PageActive(page); 6006c357848SMatthew Wilcox (Oracle) int nr_pages = thp_nr_pages(page); 60110853a03SMinchan Kim 602f7ad2a6cSShaohua Li del_page_from_lru_list(page, lruvec, 603f7ad2a6cSShaohua Li LRU_INACTIVE_ANON + active); 60410853a03SMinchan Kim ClearPageActive(page); 60510853a03SMinchan Kim ClearPageReferenced(page); 606f7ad2a6cSShaohua Li /* 6079de4f22aSHuang Ying * Lazyfree pages are clean anonymous pages. They have 6089de4f22aSHuang Ying * PG_swapbacked flag cleared, to distinguish them from normal 6099de4f22aSHuang Ying * anonymous pages 610f7ad2a6cSShaohua Li */ 611f7ad2a6cSShaohua Li ClearPageSwapBacked(page); 612f7ad2a6cSShaohua Li add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE); 61310853a03SMinchan Kim 61421e330fcSShakeel Butt __count_vm_events(PGLAZYFREE, nr_pages); 61521e330fcSShakeel Butt __count_memcg_events(lruvec_memcg(lruvec), PGLAZYFREE, 61621e330fcSShakeel Butt nr_pages); 61710853a03SMinchan Kim } 61810853a03SMinchan Kim } 61910853a03SMinchan Kim 62031560180SMinchan Kim /* 621902aaed0SHisashi Hifumi * Drain pages out of the cpu's pagevecs. 622902aaed0SHisashi Hifumi * Either "cpu" is the current CPU, and preemption has already been 623902aaed0SHisashi Hifumi * disabled; or "cpu" is being hot-unplugged, and is already dead. 624902aaed0SHisashi Hifumi */ 625f0cb3c76SKonstantin Khlebnikov void lru_add_drain_cpu(int cpu) 6261da177e4SLinus Torvalds { 627b01b2141SIngo Molnar struct pagevec *pvec = &per_cpu(lru_pvecs.lru_add, cpu); 6281da177e4SLinus Torvalds 6291da177e4SLinus Torvalds if (pagevec_count(pvec)) 630a0b8cab3SMel Gorman __pagevec_lru_add(pvec); 631902aaed0SHisashi Hifumi 632b01b2141SIngo Molnar pvec = &per_cpu(lru_rotate.pvec, cpu); 6337e0cc01eSQian Cai /* Disabling interrupts below acts as a compiler barrier. */ 6347e0cc01eSQian Cai if (data_race(pagevec_count(pvec))) { 635902aaed0SHisashi Hifumi unsigned long flags; 636902aaed0SHisashi Hifumi 637902aaed0SHisashi Hifumi /* No harm done if a racing interrupt already did this */ 638b01b2141SIngo Molnar local_lock_irqsave(&lru_rotate.lock, flags); 639902aaed0SHisashi Hifumi pagevec_move_tail(pvec); 640b01b2141SIngo Molnar local_unlock_irqrestore(&lru_rotate.lock, flags); 641902aaed0SHisashi Hifumi } 64231560180SMinchan Kim 643b01b2141SIngo Molnar pvec = &per_cpu(lru_pvecs.lru_deactivate_file, cpu); 64431560180SMinchan Kim if (pagevec_count(pvec)) 645cc5993bdSMinchan Kim pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); 646eb709b0dSShaohua Li 647b01b2141SIngo Molnar pvec = &per_cpu(lru_pvecs.lru_deactivate, cpu); 6489c276cc6SMinchan Kim if (pagevec_count(pvec)) 6499c276cc6SMinchan Kim pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); 6509c276cc6SMinchan Kim 651b01b2141SIngo Molnar pvec = &per_cpu(lru_pvecs.lru_lazyfree, cpu); 65210853a03SMinchan Kim if (pagevec_count(pvec)) 653f7ad2a6cSShaohua Li pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL); 65410853a03SMinchan Kim 655eb709b0dSShaohua Li activate_page_drain(cpu); 65631560180SMinchan Kim } 65731560180SMinchan Kim 65831560180SMinchan Kim /** 659cc5993bdSMinchan Kim * deactivate_file_page - forcefully deactivate a file page 66031560180SMinchan Kim * @page: page to deactivate 66131560180SMinchan Kim * 66231560180SMinchan Kim * This function hints the VM that @page is a good reclaim candidate, 66331560180SMinchan Kim * for example if its invalidation fails due to the page being dirty 66431560180SMinchan Kim * or under writeback. 66531560180SMinchan Kim */ 666cc5993bdSMinchan Kim void deactivate_file_page(struct page *page) 66731560180SMinchan Kim { 668821ed6bbSMinchan Kim /* 669cc5993bdSMinchan Kim * In a workload with many unevictable page such as mprotect, 670cc5993bdSMinchan Kim * unevictable page deactivation for accelerating reclaim is pointless. 671821ed6bbSMinchan Kim */ 672821ed6bbSMinchan Kim if (PageUnevictable(page)) 673821ed6bbSMinchan Kim return; 674821ed6bbSMinchan Kim 67531560180SMinchan Kim if (likely(get_page_unless_zero(page))) { 676b01b2141SIngo Molnar struct pagevec *pvec; 677b01b2141SIngo Molnar 678b01b2141SIngo Molnar local_lock(&lru_pvecs.lock); 679b01b2141SIngo Molnar pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate_file); 68031560180SMinchan Kim 6818f182270SLukasz Odzioba if (!pagevec_add(pvec, page) || PageCompound(page)) 682cc5993bdSMinchan Kim pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); 683b01b2141SIngo Molnar local_unlock(&lru_pvecs.lock); 68431560180SMinchan Kim } 68580bfed90SAndrew Morton } 68680bfed90SAndrew Morton 6879c276cc6SMinchan Kim /* 6889c276cc6SMinchan Kim * deactivate_page - deactivate a page 6899c276cc6SMinchan Kim * @page: page to deactivate 6909c276cc6SMinchan Kim * 6919c276cc6SMinchan Kim * deactivate_page() moves @page to the inactive list if @page was on the active 6929c276cc6SMinchan Kim * list and was not an unevictable page. This is done to accelerate the reclaim 6939c276cc6SMinchan Kim * of @page. 6949c276cc6SMinchan Kim */ 6959c276cc6SMinchan Kim void deactivate_page(struct page *page) 6969c276cc6SMinchan Kim { 6979c276cc6SMinchan Kim if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { 698b01b2141SIngo Molnar struct pagevec *pvec; 6999c276cc6SMinchan Kim 700b01b2141SIngo Molnar local_lock(&lru_pvecs.lock); 701b01b2141SIngo Molnar pvec = this_cpu_ptr(&lru_pvecs.lru_deactivate); 7029c276cc6SMinchan Kim get_page(page); 7039c276cc6SMinchan Kim if (!pagevec_add(pvec, page) || PageCompound(page)) 7049c276cc6SMinchan Kim pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); 705b01b2141SIngo Molnar local_unlock(&lru_pvecs.lock); 7069c276cc6SMinchan Kim } 7079c276cc6SMinchan Kim } 7089c276cc6SMinchan Kim 70910853a03SMinchan Kim /** 710f7ad2a6cSShaohua Li * mark_page_lazyfree - make an anon page lazyfree 71110853a03SMinchan Kim * @page: page to deactivate 71210853a03SMinchan Kim * 713f7ad2a6cSShaohua Li * mark_page_lazyfree() moves @page to the inactive file list. 714f7ad2a6cSShaohua Li * This is done to accelerate the reclaim of @page. 71510853a03SMinchan Kim */ 716f7ad2a6cSShaohua Li void mark_page_lazyfree(struct page *page) 71710853a03SMinchan Kim { 718f7ad2a6cSShaohua Li if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && 71924c92eb7SShaohua Li !PageSwapCache(page) && !PageUnevictable(page)) { 720b01b2141SIngo Molnar struct pagevec *pvec; 72110853a03SMinchan Kim 722b01b2141SIngo Molnar local_lock(&lru_pvecs.lock); 723b01b2141SIngo Molnar pvec = this_cpu_ptr(&lru_pvecs.lru_lazyfree); 72409cbfeafSKirill A. Shutemov get_page(page); 7258f182270SLukasz Odzioba if (!pagevec_add(pvec, page) || PageCompound(page)) 726f7ad2a6cSShaohua Li pagevec_lru_move_fn(pvec, lru_lazyfree_fn, NULL); 727b01b2141SIngo Molnar local_unlock(&lru_pvecs.lock); 72810853a03SMinchan Kim } 72910853a03SMinchan Kim } 73010853a03SMinchan Kim 73180bfed90SAndrew Morton void lru_add_drain(void) 73280bfed90SAndrew Morton { 733b01b2141SIngo Molnar local_lock(&lru_pvecs.lock); 734b01b2141SIngo Molnar lru_add_drain_cpu(smp_processor_id()); 735b01b2141SIngo Molnar local_unlock(&lru_pvecs.lock); 736b01b2141SIngo Molnar } 737b01b2141SIngo Molnar 738b01b2141SIngo Molnar void lru_add_drain_cpu_zone(struct zone *zone) 739b01b2141SIngo Molnar { 740b01b2141SIngo Molnar local_lock(&lru_pvecs.lock); 741b01b2141SIngo Molnar lru_add_drain_cpu(smp_processor_id()); 742b01b2141SIngo Molnar drain_local_pages(zone); 743b01b2141SIngo Molnar local_unlock(&lru_pvecs.lock); 7441da177e4SLinus Torvalds } 7451da177e4SLinus Torvalds 7466ea183d6SMichal Hocko #ifdef CONFIG_SMP 7476ea183d6SMichal Hocko 7486ea183d6SMichal Hocko static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); 7496ea183d6SMichal Hocko 750c4028958SDavid Howells static void lru_add_drain_per_cpu(struct work_struct *dummy) 751053837fcSNick Piggin { 752053837fcSNick Piggin lru_add_drain(); 753053837fcSNick Piggin } 754053837fcSNick Piggin 7559852a721SMichal Hocko /* 7569852a721SMichal Hocko * Doesn't need any cpu hotplug locking because we do rely on per-cpu 7579852a721SMichal Hocko * kworkers being shut down before our page_alloc_cpu_dead callback is 7589852a721SMichal Hocko * executed on the offlined cpu. 7599852a721SMichal Hocko * Calling this function with cpu hotplug locks held can actually lead 7609852a721SMichal Hocko * to obscure indirect dependencies via WQ context. 7619852a721SMichal Hocko */ 7629852a721SMichal Hocko void lru_add_drain_all(void) 763053837fcSNick Piggin { 7646446a513SAhmed S. Darwish /* 7656446a513SAhmed S. Darwish * lru_drain_gen - Global pages generation number 7666446a513SAhmed S. Darwish * 7676446a513SAhmed S. Darwish * (A) Definition: global lru_drain_gen = x implies that all generations 7686446a513SAhmed S. Darwish * 0 < n <= x are already *scheduled* for draining. 7696446a513SAhmed S. Darwish * 7706446a513SAhmed S. Darwish * This is an optimization for the highly-contended use case where a 7716446a513SAhmed S. Darwish * user space workload keeps constantly generating a flow of pages for 7726446a513SAhmed S. Darwish * each CPU. 7736446a513SAhmed S. Darwish */ 7746446a513SAhmed S. Darwish static unsigned int lru_drain_gen; 7755fbc4616SChris Metcalf static struct cpumask has_work; 7766446a513SAhmed S. Darwish static DEFINE_MUTEX(lock); 7776446a513SAhmed S. Darwish unsigned cpu, this_gen; 7785fbc4616SChris Metcalf 779ce612879SMichal Hocko /* 780ce612879SMichal Hocko * Make sure nobody triggers this path before mm_percpu_wq is fully 781ce612879SMichal Hocko * initialized. 782ce612879SMichal Hocko */ 783ce612879SMichal Hocko if (WARN_ON(!mm_percpu_wq)) 784ce612879SMichal Hocko return; 785ce612879SMichal Hocko 7866446a513SAhmed S. Darwish /* 7876446a513SAhmed S. Darwish * Guarantee pagevec counter stores visible by this CPU are visible to 7886446a513SAhmed S. Darwish * other CPUs before loading the current drain generation. 7896446a513SAhmed S. Darwish */ 7906446a513SAhmed S. Darwish smp_mb(); 7916446a513SAhmed S. Darwish 7926446a513SAhmed S. Darwish /* 7936446a513SAhmed S. Darwish * (B) Locally cache global LRU draining generation number 7946446a513SAhmed S. Darwish * 7956446a513SAhmed S. Darwish * The read barrier ensures that the counter is loaded before the mutex 7966446a513SAhmed S. Darwish * is taken. It pairs with smp_mb() inside the mutex critical section 7976446a513SAhmed S. Darwish * at (D). 7986446a513SAhmed S. Darwish */ 7996446a513SAhmed S. Darwish this_gen = smp_load_acquire(&lru_drain_gen); 800eef1a429SKonstantin Khlebnikov 8015fbc4616SChris Metcalf mutex_lock(&lock); 802eef1a429SKonstantin Khlebnikov 803eef1a429SKonstantin Khlebnikov /* 8046446a513SAhmed S. Darwish * (C) Exit the draining operation if a newer generation, from another 8056446a513SAhmed S. Darwish * lru_add_drain_all(), was already scheduled for draining. Check (A). 806eef1a429SKonstantin Khlebnikov */ 8076446a513SAhmed S. Darwish if (unlikely(this_gen != lru_drain_gen)) 808eef1a429SKonstantin Khlebnikov goto done; 809eef1a429SKonstantin Khlebnikov 8106446a513SAhmed S. Darwish /* 8116446a513SAhmed S. Darwish * (D) Increment global generation number 8126446a513SAhmed S. Darwish * 8136446a513SAhmed S. Darwish * Pairs with smp_load_acquire() at (B), outside of the critical 8146446a513SAhmed S. Darwish * section. Use a full memory barrier to guarantee that the new global 8156446a513SAhmed S. Darwish * drain generation number is stored before loading pagevec counters. 8166446a513SAhmed S. Darwish * 8176446a513SAhmed S. Darwish * This pairing must be done here, before the for_each_online_cpu loop 8186446a513SAhmed S. Darwish * below which drains the page vectors. 8196446a513SAhmed S. Darwish * 8206446a513SAhmed S. Darwish * Let x, y, and z represent some system CPU numbers, where x < y < z. 8216446a513SAhmed S. Darwish * Assume CPU #z is is in the middle of the for_each_online_cpu loop 8226446a513SAhmed S. Darwish * below and has already reached CPU #y's per-cpu data. CPU #x comes 8236446a513SAhmed S. Darwish * along, adds some pages to its per-cpu vectors, then calls 8246446a513SAhmed S. Darwish * lru_add_drain_all(). 8256446a513SAhmed S. Darwish * 8266446a513SAhmed S. Darwish * If the paired barrier is done at any later step, e.g. after the 8276446a513SAhmed S. Darwish * loop, CPU #x will just exit at (C) and miss flushing out all of its 8286446a513SAhmed S. Darwish * added pages. 8296446a513SAhmed S. Darwish */ 8306446a513SAhmed S. Darwish WRITE_ONCE(lru_drain_gen, lru_drain_gen + 1); 8316446a513SAhmed S. Darwish smp_mb(); 832eef1a429SKonstantin Khlebnikov 8335fbc4616SChris Metcalf cpumask_clear(&has_work); 8345fbc4616SChris Metcalf for_each_online_cpu(cpu) { 8355fbc4616SChris Metcalf struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); 8365fbc4616SChris Metcalf 837b01b2141SIngo Molnar if (pagevec_count(&per_cpu(lru_pvecs.lru_add, cpu)) || 8387e0cc01eSQian Cai data_race(pagevec_count(&per_cpu(lru_rotate.pvec, cpu))) || 839b01b2141SIngo Molnar pagevec_count(&per_cpu(lru_pvecs.lru_deactivate_file, cpu)) || 840b01b2141SIngo Molnar pagevec_count(&per_cpu(lru_pvecs.lru_deactivate, cpu)) || 841b01b2141SIngo Molnar pagevec_count(&per_cpu(lru_pvecs.lru_lazyfree, cpu)) || 8425fbc4616SChris Metcalf need_activate_page_drain(cpu)) { 8435fbc4616SChris Metcalf INIT_WORK(work, lru_add_drain_per_cpu); 844ce612879SMichal Hocko queue_work_on(cpu, mm_percpu_wq, work); 8456446a513SAhmed S. Darwish __cpumask_set_cpu(cpu, &has_work); 8465fbc4616SChris Metcalf } 8475fbc4616SChris Metcalf } 8485fbc4616SChris Metcalf 8495fbc4616SChris Metcalf for_each_cpu(cpu, &has_work) 8505fbc4616SChris Metcalf flush_work(&per_cpu(lru_add_drain_work, cpu)); 8515fbc4616SChris Metcalf 852eef1a429SKonstantin Khlebnikov done: 8535fbc4616SChris Metcalf mutex_unlock(&lock); 854053837fcSNick Piggin } 8556ea183d6SMichal Hocko #else 8566ea183d6SMichal Hocko void lru_add_drain_all(void) 8576ea183d6SMichal Hocko { 8586ea183d6SMichal Hocko lru_add_drain(); 8596ea183d6SMichal Hocko } 8606446a513SAhmed S. Darwish #endif /* CONFIG_SMP */ 861053837fcSNick Piggin 862aabfb572SMichal Hocko /** 863ea1754a0SKirill A. Shutemov * release_pages - batched put_page() 864aabfb572SMichal Hocko * @pages: array of pages to release 865aabfb572SMichal Hocko * @nr: number of pages 8661da177e4SLinus Torvalds * 867aabfb572SMichal Hocko * Decrement the reference count on all the pages in @pages. If it 868aabfb572SMichal Hocko * fell to zero, remove the page from the LRU and free it. 8691da177e4SLinus Torvalds */ 870c6f92f9fSMel Gorman void release_pages(struct page **pages, int nr) 8711da177e4SLinus Torvalds { 8721da177e4SLinus Torvalds int i; 873cc59850eSKonstantin Khlebnikov LIST_HEAD(pages_to_free); 874599d0c95SMel Gorman struct pglist_data *locked_pgdat = NULL; 875fa9add64SHugh Dickins struct lruvec *lruvec; 8763f649ab7SKees Cook unsigned long flags; 8773f649ab7SKees Cook unsigned int lock_batch; 8781da177e4SLinus Torvalds 8791da177e4SLinus Torvalds for (i = 0; i < nr; i++) { 8801da177e4SLinus Torvalds struct page *page = pages[i]; 8811da177e4SLinus Torvalds 882aabfb572SMichal Hocko /* 883aabfb572SMichal Hocko * Make sure the IRQ-safe lock-holding time does not get 884aabfb572SMichal Hocko * excessive with a continuous string of pages from the 885599d0c95SMel Gorman * same pgdat. The lock is held only if pgdat != NULL. 886aabfb572SMichal Hocko */ 887599d0c95SMel Gorman if (locked_pgdat && ++lock_batch == SWAP_CLUSTER_MAX) { 888599d0c95SMel Gorman spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags); 889599d0c95SMel Gorman locked_pgdat = NULL; 890aabfb572SMichal Hocko } 891aabfb572SMichal Hocko 892a9b576f7SRalph Campbell page = compound_head(page); 8936fcb52a5SAaron Lu if (is_huge_zero_page(page)) 894aa88b68cSKirill A. Shutemov continue; 895aa88b68cSKirill A. Shutemov 896c5d6c45eSIra Weiny if (is_zone_device_page(page)) { 897df6ad698SJérôme Glisse if (locked_pgdat) { 898df6ad698SJérôme Glisse spin_unlock_irqrestore(&locked_pgdat->lru_lock, 899df6ad698SJérôme Glisse flags); 900df6ad698SJérôme Glisse locked_pgdat = NULL; 901df6ad698SJérôme Glisse } 902c5d6c45eSIra Weiny /* 903c5d6c45eSIra Weiny * ZONE_DEVICE pages that return 'false' from 904a3e7bea0SMiaohe Lin * page_is_devmap_managed() do not require special 905c5d6c45eSIra Weiny * processing, and instead, expect a call to 906c5d6c45eSIra Weiny * put_page_testzero(). 907c5d6c45eSIra Weiny */ 90807d80269SJohn Hubbard if (page_is_devmap_managed(page)) { 90907d80269SJohn Hubbard put_devmap_managed_page(page); 910df6ad698SJérôme Glisse continue; 911df6ad698SJérôme Glisse } 912*43fbdeb3SRalph Campbell if (put_page_testzero(page)) 913*43fbdeb3SRalph Campbell put_dev_pagemap(page->pgmap); 914*43fbdeb3SRalph Campbell continue; 91507d80269SJohn Hubbard } 916df6ad698SJérôme Glisse 917b5810039SNick Piggin if (!put_page_testzero(page)) 9181da177e4SLinus Torvalds continue; 9191da177e4SLinus Torvalds 920ddc58f27SKirill A. Shutemov if (PageCompound(page)) { 921599d0c95SMel Gorman if (locked_pgdat) { 922599d0c95SMel Gorman spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags); 923599d0c95SMel Gorman locked_pgdat = NULL; 924ddc58f27SKirill A. Shutemov } 925ddc58f27SKirill A. Shutemov __put_compound_page(page); 926ddc58f27SKirill A. Shutemov continue; 927ddc58f27SKirill A. Shutemov } 928ddc58f27SKirill A. Shutemov 92946453a6eSNick Piggin if (PageLRU(page)) { 930599d0c95SMel Gorman struct pglist_data *pgdat = page_pgdat(page); 931894bc310SLee Schermerhorn 932599d0c95SMel Gorman if (pgdat != locked_pgdat) { 933599d0c95SMel Gorman if (locked_pgdat) 934599d0c95SMel Gorman spin_unlock_irqrestore(&locked_pgdat->lru_lock, 935902aaed0SHisashi Hifumi flags); 936aabfb572SMichal Hocko lock_batch = 0; 937599d0c95SMel Gorman locked_pgdat = pgdat; 938599d0c95SMel Gorman spin_lock_irqsave(&locked_pgdat->lru_lock, flags); 9391da177e4SLinus Torvalds } 940fa9add64SHugh Dickins 941599d0c95SMel Gorman lruvec = mem_cgroup_page_lruvec(page, locked_pgdat); 942309381feSSasha Levin VM_BUG_ON_PAGE(!PageLRU(page), page); 94367453911SNick Piggin __ClearPageLRU(page); 944fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, page_off_lru(page)); 94546453a6eSNick Piggin } 94646453a6eSNick Piggin 94762906027SNicholas Piggin __ClearPageWaiters(page); 948c53954a0SMel Gorman 949cc59850eSKonstantin Khlebnikov list_add(&page->lru, &pages_to_free); 9501da177e4SLinus Torvalds } 951599d0c95SMel Gorman if (locked_pgdat) 952599d0c95SMel Gorman spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags); 9531da177e4SLinus Torvalds 954747db954SJohannes Weiner mem_cgroup_uncharge_list(&pages_to_free); 9552d4894b5SMel Gorman free_unref_page_list(&pages_to_free); 9561da177e4SLinus Torvalds } 9570be8557bSMiklos Szeredi EXPORT_SYMBOL(release_pages); 9581da177e4SLinus Torvalds 9591da177e4SLinus Torvalds /* 9601da177e4SLinus Torvalds * The pages which we're about to release may be in the deferred lru-addition 9611da177e4SLinus Torvalds * queues. That would prevent them from really being freed right now. That's 9621da177e4SLinus Torvalds * OK from a correctness point of view but is inefficient - those pages may be 9631da177e4SLinus Torvalds * cache-warm and we want to give them back to the page allocator ASAP. 9641da177e4SLinus Torvalds * 9651da177e4SLinus Torvalds * So __pagevec_release() will drain those queues here. __pagevec_lru_add() 9661da177e4SLinus Torvalds * and __pagevec_lru_add_active() call release_pages() directly to avoid 9671da177e4SLinus Torvalds * mutual recursion. 9681da177e4SLinus Torvalds */ 9691da177e4SLinus Torvalds void __pagevec_release(struct pagevec *pvec) 9701da177e4SLinus Torvalds { 9717f0b5fb9SMel Gorman if (!pvec->percpu_pvec_drained) { 9721da177e4SLinus Torvalds lru_add_drain(); 9737f0b5fb9SMel Gorman pvec->percpu_pvec_drained = true; 974d9ed0d08SMel Gorman } 975c6f92f9fSMel Gorman release_pages(pvec->pages, pagevec_count(pvec)); 9761da177e4SLinus Torvalds pagevec_reinit(pvec); 9771da177e4SLinus Torvalds } 9787f285701SSteve French EXPORT_SYMBOL(__pagevec_release); 9797f285701SSteve French 98012d27107SHugh Dickins #ifdef CONFIG_TRANSPARENT_HUGEPAGE 98171e3aac0SAndrea Arcangeli /* used by __split_huge_page_refcount() */ 982fa9add64SHugh Dickins void lru_add_page_tail(struct page *page, struct page *page_tail, 9835bc7b8acSShaohua Li struct lruvec *lruvec, struct list_head *list) 98471e3aac0SAndrea Arcangeli { 985309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(page), page); 986309381feSSasha Levin VM_BUG_ON_PAGE(PageCompound(page_tail), page); 987309381feSSasha Levin VM_BUG_ON_PAGE(PageLRU(page_tail), page); 98835f3aa39SLance Roy lockdep_assert_held(&lruvec_pgdat(lruvec)->lru_lock); 98971e3aac0SAndrea Arcangeli 9905bc7b8acSShaohua Li if (!list) 99171e3aac0SAndrea Arcangeli SetPageLRU(page_tail); 99271e3aac0SAndrea Arcangeli 99312d27107SHugh Dickins if (likely(PageLRU(page))) 99412d27107SHugh Dickins list_add_tail(&page_tail->lru, &page->lru); 9955bc7b8acSShaohua Li else if (list) { 9965bc7b8acSShaohua Li /* page reclaim is reclaiming a huge page */ 9975bc7b8acSShaohua Li get_page(page_tail); 9985bc7b8acSShaohua Li list_add_tail(&page_tail->lru, list); 9995bc7b8acSShaohua Li } else { 100012d27107SHugh Dickins /* 100112d27107SHugh Dickins * Head page has not yet been counted, as an hpage, 100212d27107SHugh Dickins * so we must account for each subpage individually. 100312d27107SHugh Dickins * 1004e7a1aaf2SYu Zhao * Put page_tail on the list at the correct position 1005e7a1aaf2SYu Zhao * so they all end up in order. 100612d27107SHugh Dickins */ 1007e7a1aaf2SYu Zhao add_page_to_lru_list_tail(page_tail, lruvec, 1008e7a1aaf2SYu Zhao page_lru(page_tail)); 100971e3aac0SAndrea Arcangeli } 101071e3aac0SAndrea Arcangeli } 101112d27107SHugh Dickins #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 101271e3aac0SAndrea Arcangeli 1013fa9add64SHugh Dickins static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, 1014fa9add64SHugh Dickins void *arg) 10153dd7ae8eSShaohua Li { 10169c4e6b1aSShakeel Butt enum lru_list lru; 10179c4e6b1aSShakeel Butt int was_unevictable = TestClearPageUnevictable(page); 10186c357848SMatthew Wilcox (Oracle) int nr_pages = thp_nr_pages(page); 10193dd7ae8eSShaohua Li 1020309381feSSasha Levin VM_BUG_ON_PAGE(PageLRU(page), page); 10213dd7ae8eSShaohua Li 10229c4e6b1aSShakeel Butt /* 10239c4e6b1aSShakeel Butt * Page becomes evictable in two ways: 1024dae966dcSPeng Fan * 1) Within LRU lock [munlock_vma_page() and __munlock_pagevec()]. 10259c4e6b1aSShakeel Butt * 2) Before acquiring LRU lock to put the page to correct LRU and then 10269c4e6b1aSShakeel Butt * a) do PageLRU check with lock [check_move_unevictable_pages] 10279c4e6b1aSShakeel Butt * b) do PageLRU check before lock [clear_page_mlock] 10289c4e6b1aSShakeel Butt * 10299c4e6b1aSShakeel Butt * (1) & (2a) are ok as LRU lock will serialize them. For (2b), we need 10309c4e6b1aSShakeel Butt * following strict ordering: 10319c4e6b1aSShakeel Butt * 10329c4e6b1aSShakeel Butt * #0: __pagevec_lru_add_fn #1: clear_page_mlock 10339c4e6b1aSShakeel Butt * 10349c4e6b1aSShakeel Butt * SetPageLRU() TestClearPageMlocked() 10359c4e6b1aSShakeel Butt * smp_mb() // explicit ordering // above provides strict 10369c4e6b1aSShakeel Butt * // ordering 10379c4e6b1aSShakeel Butt * PageMlocked() PageLRU() 10389c4e6b1aSShakeel Butt * 10399c4e6b1aSShakeel Butt * 10409c4e6b1aSShakeel Butt * if '#1' does not observe setting of PG_lru by '#0' and fails 10419c4e6b1aSShakeel Butt * isolation, the explicit barrier will make sure that page_evictable 10429c4e6b1aSShakeel Butt * check will put the page in correct LRU. Without smp_mb(), SetPageLRU 10439c4e6b1aSShakeel Butt * can be reordered after PageMlocked check and can make '#1' to fail 10449c4e6b1aSShakeel Butt * the isolation of the page whose Mlocked bit is cleared (#0 is also 10459c4e6b1aSShakeel Butt * looking at the same page) and the evictable page will be stranded 10469c4e6b1aSShakeel Butt * in an unevictable LRU. 10479c4e6b1aSShakeel Butt */ 10489a9b6cceSYang Shi SetPageLRU(page); 10499a9b6cceSYang Shi smp_mb__after_atomic(); 10509c4e6b1aSShakeel Butt 10519c4e6b1aSShakeel Butt if (page_evictable(page)) { 10529c4e6b1aSShakeel Butt lru = page_lru(page); 10539c4e6b1aSShakeel Butt if (was_unevictable) 10545d91f31fSShakeel Butt __count_vm_events(UNEVICTABLE_PGRESCUED, nr_pages); 10559c4e6b1aSShakeel Butt } else { 10569c4e6b1aSShakeel Butt lru = LRU_UNEVICTABLE; 10579c4e6b1aSShakeel Butt ClearPageActive(page); 10589c4e6b1aSShakeel Butt SetPageUnevictable(page); 10599c4e6b1aSShakeel Butt if (!was_unevictable) 10605d91f31fSShakeel Butt __count_vm_events(UNEVICTABLE_PGCULLED, nr_pages); 10619c4e6b1aSShakeel Butt } 10629c4e6b1aSShakeel Butt 1063fa9add64SHugh Dickins add_page_to_lru_list(page, lruvec, lru); 106424b7e581SMel Gorman trace_mm_lru_insertion(page, lru); 10653dd7ae8eSShaohua Li } 10663dd7ae8eSShaohua Li 10671da177e4SLinus Torvalds /* 10681da177e4SLinus Torvalds * Add the passed pages to the LRU, then drop the caller's refcount 10691da177e4SLinus Torvalds * on them. Reinitialises the caller's pagevec. 10701da177e4SLinus Torvalds */ 1071a0b8cab3SMel Gorman void __pagevec_lru_add(struct pagevec *pvec) 10721da177e4SLinus Torvalds { 1073a0b8cab3SMel Gorman pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL); 10741da177e4SLinus Torvalds } 1075f04e9ebbSKOSAKI Motohiro 10761da177e4SLinus Torvalds /** 10770cd6144aSJohannes Weiner * pagevec_lookup_entries - gang pagecache lookup 10780cd6144aSJohannes Weiner * @pvec: Where the resulting entries are placed 10790cd6144aSJohannes Weiner * @mapping: The address_space to search 10800cd6144aSJohannes Weiner * @start: The starting entry index 1081cb6f0f34SMike Rapoport * @nr_entries: The maximum number of pages 10820cd6144aSJohannes Weiner * @indices: The cache indices corresponding to the entries in @pvec 10830cd6144aSJohannes Weiner * 10840cd6144aSJohannes Weiner * pagevec_lookup_entries() will search for and return a group of up 1085f144c390SMike Rapoport * to @nr_pages pages and shadow entries in the mapping. All 10860cd6144aSJohannes Weiner * entries are placed in @pvec. pagevec_lookup_entries() takes a 10870cd6144aSJohannes Weiner * reference against actual pages in @pvec. 10880cd6144aSJohannes Weiner * 10890cd6144aSJohannes Weiner * The search returns a group of mapping-contiguous entries with 10900cd6144aSJohannes Weiner * ascending indexes. There may be holes in the indices due to 10910cd6144aSJohannes Weiner * not-present entries. 10920cd6144aSJohannes Weiner * 109371725ed1SHugh Dickins * Only one subpage of a Transparent Huge Page is returned in one call: 109471725ed1SHugh Dickins * allowing truncate_inode_pages_range() to evict the whole THP without 109571725ed1SHugh Dickins * cycling through a pagevec of extra references. 109671725ed1SHugh Dickins * 10970cd6144aSJohannes Weiner * pagevec_lookup_entries() returns the number of entries which were 10980cd6144aSJohannes Weiner * found. 10990cd6144aSJohannes Weiner */ 11000cd6144aSJohannes Weiner unsigned pagevec_lookup_entries(struct pagevec *pvec, 11010cd6144aSJohannes Weiner struct address_space *mapping, 1102e02a9f04SRandy Dunlap pgoff_t start, unsigned nr_entries, 11030cd6144aSJohannes Weiner pgoff_t *indices) 11040cd6144aSJohannes Weiner { 1105e02a9f04SRandy Dunlap pvec->nr = find_get_entries(mapping, start, nr_entries, 11060cd6144aSJohannes Weiner pvec->pages, indices); 11070cd6144aSJohannes Weiner return pagevec_count(pvec); 11080cd6144aSJohannes Weiner } 11090cd6144aSJohannes Weiner 11100cd6144aSJohannes Weiner /** 11110cd6144aSJohannes Weiner * pagevec_remove_exceptionals - pagevec exceptionals pruning 11120cd6144aSJohannes Weiner * @pvec: The pagevec to prune 11130cd6144aSJohannes Weiner * 11140cd6144aSJohannes Weiner * pagevec_lookup_entries() fills both pages and exceptional radix 11150cd6144aSJohannes Weiner * tree entries into the pagevec. This function prunes all 11160cd6144aSJohannes Weiner * exceptionals from @pvec without leaving holes, so that it can be 11170cd6144aSJohannes Weiner * passed on to page-only pagevec operations. 11180cd6144aSJohannes Weiner */ 11190cd6144aSJohannes Weiner void pagevec_remove_exceptionals(struct pagevec *pvec) 11200cd6144aSJohannes Weiner { 11210cd6144aSJohannes Weiner int i, j; 11220cd6144aSJohannes Weiner 11230cd6144aSJohannes Weiner for (i = 0, j = 0; i < pagevec_count(pvec); i++) { 11240cd6144aSJohannes Weiner struct page *page = pvec->pages[i]; 11253159f943SMatthew Wilcox if (!xa_is_value(page)) 11260cd6144aSJohannes Weiner pvec->pages[j++] = page; 11270cd6144aSJohannes Weiner } 11280cd6144aSJohannes Weiner pvec->nr = j; 11290cd6144aSJohannes Weiner } 11300cd6144aSJohannes Weiner 11310cd6144aSJohannes Weiner /** 1132b947cee4SJan Kara * pagevec_lookup_range - gang pagecache lookup 11331da177e4SLinus Torvalds * @pvec: Where the resulting pages are placed 11341da177e4SLinus Torvalds * @mapping: The address_space to search 11351da177e4SLinus Torvalds * @start: The starting page index 1136b947cee4SJan Kara * @end: The final page index 11371da177e4SLinus Torvalds * 1138e02a9f04SRandy Dunlap * pagevec_lookup_range() will search for & return a group of up to PAGEVEC_SIZE 1139b947cee4SJan Kara * pages in the mapping starting from index @start and upto index @end 1140b947cee4SJan Kara * (inclusive). The pages are placed in @pvec. pagevec_lookup() takes a 11411da177e4SLinus Torvalds * reference against the pages in @pvec. 11421da177e4SLinus Torvalds * 11431da177e4SLinus Torvalds * The search returns a group of mapping-contiguous pages with ascending 1144d72dc8a2SJan Kara * indexes. There may be holes in the indices due to not-present pages. We 1145d72dc8a2SJan Kara * also update @start to index the next page for the traversal. 11461da177e4SLinus Torvalds * 1147b947cee4SJan Kara * pagevec_lookup_range() returns the number of pages which were found. If this 1148e02a9f04SRandy Dunlap * number is smaller than PAGEVEC_SIZE, the end of specified range has been 1149b947cee4SJan Kara * reached. 11501da177e4SLinus Torvalds */ 1151b947cee4SJan Kara unsigned pagevec_lookup_range(struct pagevec *pvec, 1152397162ffSJan Kara struct address_space *mapping, pgoff_t *start, pgoff_t end) 11531da177e4SLinus Torvalds { 1154397162ffSJan Kara pvec->nr = find_get_pages_range(mapping, start, end, PAGEVEC_SIZE, 1155b947cee4SJan Kara pvec->pages); 11561da177e4SLinus Torvalds return pagevec_count(pvec); 11571da177e4SLinus Torvalds } 1158b947cee4SJan Kara EXPORT_SYMBOL(pagevec_lookup_range); 115978539fdfSChristoph Hellwig 116072b045aeSJan Kara unsigned pagevec_lookup_range_tag(struct pagevec *pvec, 116172b045aeSJan Kara struct address_space *mapping, pgoff_t *index, pgoff_t end, 116210bbd235SMatthew Wilcox xa_mark_t tag) 11631da177e4SLinus Torvalds { 116472b045aeSJan Kara pvec->nr = find_get_pages_range_tag(mapping, index, end, tag, 116567fd707fSJan Kara PAGEVEC_SIZE, pvec->pages); 11661da177e4SLinus Torvalds return pagevec_count(pvec); 11671da177e4SLinus Torvalds } 116872b045aeSJan Kara EXPORT_SYMBOL(pagevec_lookup_range_tag); 11691da177e4SLinus Torvalds 117093d3b714SJan Kara unsigned pagevec_lookup_range_nr_tag(struct pagevec *pvec, 117193d3b714SJan Kara struct address_space *mapping, pgoff_t *index, pgoff_t end, 117210bbd235SMatthew Wilcox xa_mark_t tag, unsigned max_pages) 117393d3b714SJan Kara { 117493d3b714SJan Kara pvec->nr = find_get_pages_range_tag(mapping, index, end, tag, 117593d3b714SJan Kara min_t(unsigned int, max_pages, PAGEVEC_SIZE), pvec->pages); 117693d3b714SJan Kara return pagevec_count(pvec); 117793d3b714SJan Kara } 117893d3b714SJan Kara EXPORT_SYMBOL(pagevec_lookup_range_nr_tag); 11791da177e4SLinus Torvalds /* 11801da177e4SLinus Torvalds * Perform any setup for the swap system 11811da177e4SLinus Torvalds */ 11821da177e4SLinus Torvalds void __init swap_setup(void) 11831da177e4SLinus Torvalds { 1184ca79b0c2SArun KS unsigned long megs = totalram_pages() >> (20 - PAGE_SHIFT); 1185e0bf68ddSPeter Zijlstra 11861da177e4SLinus Torvalds /* Use a smaller cluster for small-memory machines */ 11871da177e4SLinus Torvalds if (megs < 16) 11881da177e4SLinus Torvalds page_cluster = 2; 11891da177e4SLinus Torvalds else 11901da177e4SLinus Torvalds page_cluster = 3; 11911da177e4SLinus Torvalds /* 11921da177e4SLinus Torvalds * Right now other parts of the system means that we 11931da177e4SLinus Torvalds * _really_ don't want to cluster much more 11941da177e4SLinus Torvalds */ 11951da177e4SLinus Torvalds } 119607d80269SJohn Hubbard 119707d80269SJohn Hubbard #ifdef CONFIG_DEV_PAGEMAP_OPS 119807d80269SJohn Hubbard void put_devmap_managed_page(struct page *page) 119907d80269SJohn Hubbard { 120007d80269SJohn Hubbard int count; 120107d80269SJohn Hubbard 120207d80269SJohn Hubbard if (WARN_ON_ONCE(!page_is_devmap_managed(page))) 120307d80269SJohn Hubbard return; 120407d80269SJohn Hubbard 120507d80269SJohn Hubbard count = page_ref_dec_return(page); 120607d80269SJohn Hubbard 120707d80269SJohn Hubbard /* 120807d80269SJohn Hubbard * devmap page refcounts are 1-based, rather than 0-based: if 120907d80269SJohn Hubbard * refcount is 1, then the page is free and the refcount is 121007d80269SJohn Hubbard * stable because nobody holds a reference on the page. 121107d80269SJohn Hubbard */ 121207d80269SJohn Hubbard if (count == 1) 121307d80269SJohn Hubbard free_devmap_managed_page(page); 121407d80269SJohn Hubbard else if (!count) 121507d80269SJohn Hubbard __put_page(page); 121607d80269SJohn Hubbard } 121707d80269SJohn Hubbard EXPORT_SYMBOL(put_devmap_managed_page); 121807d80269SJohn Hubbard #endif 1219