11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/mm/swap.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 51da177e4SLinus Torvalds */ 61da177e4SLinus Torvalds 71da177e4SLinus Torvalds /* 8183ff22bSSimon Arlott * This file contains the default values for the operation of the 91da177e4SLinus Torvalds * Linux VM subsystem. Fine-tuning documentation can be found in 101da177e4SLinus Torvalds * Documentation/sysctl/vm.txt. 111da177e4SLinus Torvalds * Started 18.12.91 121da177e4SLinus Torvalds * Swap aging added 23.2.95, Stephen Tweedie. 131da177e4SLinus Torvalds * Buffermem limits added 12.3.98, Rik van Riel. 141da177e4SLinus Torvalds */ 151da177e4SLinus Torvalds 161da177e4SLinus Torvalds #include <linux/mm.h> 171da177e4SLinus Torvalds #include <linux/sched.h> 181da177e4SLinus Torvalds #include <linux/kernel_stat.h> 191da177e4SLinus Torvalds #include <linux/swap.h> 201da177e4SLinus Torvalds #include <linux/mman.h> 211da177e4SLinus Torvalds #include <linux/pagemap.h> 221da177e4SLinus Torvalds #include <linux/pagevec.h> 231da177e4SLinus Torvalds #include <linux/init.h> 24b95f1b31SPaul Gortmaker #include <linux/export.h> 251da177e4SLinus Torvalds #include <linux/mm_inline.h> 261da177e4SLinus Torvalds #include <linux/percpu_counter.h> 271da177e4SLinus Torvalds #include <linux/percpu.h> 281da177e4SLinus Torvalds #include <linux/cpu.h> 291da177e4SLinus Torvalds #include <linux/notifier.h> 30e0bf68ddSPeter Zijlstra #include <linux/backing-dev.h> 3166e1707bSBalbir Singh #include <linux/memcontrol.h> 325a0e3ad6STejun Heo #include <linux/gfp.h> 33a27bb332SKent Overstreet #include <linux/uio.h> 34822fc613SNaoya Horiguchi #include <linux/hugetlb.h> 3533c3fc71SVladimir Davydov #include <linux/page_idle.h> 361da177e4SLinus Torvalds 3764d6519dSLee Schermerhorn #include "internal.h" 3864d6519dSLee Schermerhorn 39c6286c98SMel Gorman #define CREATE_TRACE_POINTS 40c6286c98SMel Gorman #include <trace/events/pagemap.h> 41c6286c98SMel Gorman 421da177e4SLinus Torvalds /* How many pages do we try to swap or page in/out together? */ 431da177e4SLinus Torvalds int page_cluster; 441da177e4SLinus Torvalds 4513f7f789SMel Gorman static DEFINE_PER_CPU(struct pagevec, lru_add_pvec); 46f84f9504SVegard Nossum static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); 47cc5993bdSMinchan Kim static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs); 48902aaed0SHisashi Hifumi 49b221385bSAdrian Bunk /* 50b221385bSAdrian Bunk * This path almost never happens for VM activity - pages are normally 51b221385bSAdrian Bunk * freed via pagevecs. But it gets used by networking. 52b221385bSAdrian Bunk */ 53920c7a5dSHarvey Harrison static void __page_cache_release(struct page *page) 54b221385bSAdrian Bunk { 55b221385bSAdrian Bunk if (PageLRU(page)) { 56b221385bSAdrian Bunk struct zone *zone = page_zone(page); 57fa9add64SHugh Dickins struct lruvec *lruvec; 58fa9add64SHugh Dickins unsigned long flags; 59b221385bSAdrian Bunk 60b221385bSAdrian Bunk spin_lock_irqsave(&zone->lru_lock, flags); 61fa9add64SHugh Dickins lruvec = mem_cgroup_page_lruvec(page, zone); 62309381feSSasha Levin VM_BUG_ON_PAGE(!PageLRU(page), page); 63b221385bSAdrian Bunk __ClearPageLRU(page); 64fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, page_off_lru(page)); 65b221385bSAdrian Bunk spin_unlock_irqrestore(&zone->lru_lock, flags); 66b221385bSAdrian Bunk } 670a31bc97SJohannes Weiner mem_cgroup_uncharge(page); 6891807063SAndrea Arcangeli } 6991807063SAndrea Arcangeli 7091807063SAndrea Arcangeli static void __put_single_page(struct page *page) 7191807063SAndrea Arcangeli { 7291807063SAndrea Arcangeli __page_cache_release(page); 73b745bc85SMel Gorman free_hot_cold_page(page, false); 74b221385bSAdrian Bunk } 75b221385bSAdrian Bunk 7691807063SAndrea Arcangeli static void __put_compound_page(struct page *page) 7791807063SAndrea Arcangeli { 7891807063SAndrea Arcangeli compound_page_dtor *dtor; 7991807063SAndrea Arcangeli 80822fc613SNaoya Horiguchi /* 81822fc613SNaoya Horiguchi * __page_cache_release() is supposed to be called for thp, not for 82822fc613SNaoya Horiguchi * hugetlb. This is because hugetlb page does never have PageLRU set 83822fc613SNaoya Horiguchi * (it's never listed to any LRU lists) and no memcg routines should 84822fc613SNaoya Horiguchi * be called for hugetlb (it has a separate hugetlb_cgroup.) 85822fc613SNaoya Horiguchi */ 86822fc613SNaoya Horiguchi if (!PageHuge(page)) 8791807063SAndrea Arcangeli __page_cache_release(page); 8891807063SAndrea Arcangeli dtor = get_compound_page_dtor(page); 8991807063SAndrea Arcangeli (*dtor)(page); 9091807063SAndrea Arcangeli } 9191807063SAndrea Arcangeli 92*ddc58f27SKirill A. Shutemov void __put_page(struct page *page) 93c747ce79SJianyu Zhan { 94*ddc58f27SKirill A. Shutemov if (unlikely(PageCompound(page))) 9526296ad2SAndrew Morton __put_compound_page(page); 9626296ad2SAndrew Morton else 9726296ad2SAndrew Morton __put_single_page(page); 9826296ad2SAndrew Morton } 99*ddc58f27SKirill A. Shutemov EXPORT_SYMBOL(__put_page); 10070b50f94SAndrea Arcangeli 1011d7ea732SAlexander Zarochentsev /** 1027682486bSRandy Dunlap * put_pages_list() - release a list of pages 1037682486bSRandy Dunlap * @pages: list of pages threaded on page->lru 1041d7ea732SAlexander Zarochentsev * 1051d7ea732SAlexander Zarochentsev * Release a list of pages which are strung together on page.lru. Currently 1061d7ea732SAlexander Zarochentsev * used by read_cache_pages() and related error recovery code. 1071d7ea732SAlexander Zarochentsev */ 1081d7ea732SAlexander Zarochentsev void put_pages_list(struct list_head *pages) 1091d7ea732SAlexander Zarochentsev { 1101d7ea732SAlexander Zarochentsev while (!list_empty(pages)) { 1111d7ea732SAlexander Zarochentsev struct page *victim; 1121d7ea732SAlexander Zarochentsev 1131d7ea732SAlexander Zarochentsev victim = list_entry(pages->prev, struct page, lru); 1141d7ea732SAlexander Zarochentsev list_del(&victim->lru); 1151d7ea732SAlexander Zarochentsev page_cache_release(victim); 1161d7ea732SAlexander Zarochentsev } 1171d7ea732SAlexander Zarochentsev } 1181d7ea732SAlexander Zarochentsev EXPORT_SYMBOL(put_pages_list); 1191d7ea732SAlexander Zarochentsev 12018022c5dSMel Gorman /* 12118022c5dSMel Gorman * get_kernel_pages() - pin kernel pages in memory 12218022c5dSMel Gorman * @kiov: An array of struct kvec structures 12318022c5dSMel Gorman * @nr_segs: number of segments to pin 12418022c5dSMel Gorman * @write: pinning for read/write, currently ignored 12518022c5dSMel Gorman * @pages: array that receives pointers to the pages pinned. 12618022c5dSMel Gorman * Should be at least nr_segs long. 12718022c5dSMel Gorman * 12818022c5dSMel Gorman * Returns number of pages pinned. This may be fewer than the number 12918022c5dSMel Gorman * requested. If nr_pages is 0 or negative, returns 0. If no pages 13018022c5dSMel Gorman * were pinned, returns -errno. Each page returned must be released 13118022c5dSMel Gorman * with a put_page() call when it is finished with. 13218022c5dSMel Gorman */ 13318022c5dSMel Gorman int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write, 13418022c5dSMel Gorman struct page **pages) 13518022c5dSMel Gorman { 13618022c5dSMel Gorman int seg; 13718022c5dSMel Gorman 13818022c5dSMel Gorman for (seg = 0; seg < nr_segs; seg++) { 13918022c5dSMel Gorman if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE)) 14018022c5dSMel Gorman return seg; 14118022c5dSMel Gorman 1425a178119SMel Gorman pages[seg] = kmap_to_page(kiov[seg].iov_base); 14318022c5dSMel Gorman page_cache_get(pages[seg]); 14418022c5dSMel Gorman } 14518022c5dSMel Gorman 14618022c5dSMel Gorman return seg; 14718022c5dSMel Gorman } 14818022c5dSMel Gorman EXPORT_SYMBOL_GPL(get_kernel_pages); 14918022c5dSMel Gorman 15018022c5dSMel Gorman /* 15118022c5dSMel Gorman * get_kernel_page() - pin a kernel page in memory 15218022c5dSMel Gorman * @start: starting kernel address 15318022c5dSMel Gorman * @write: pinning for read/write, currently ignored 15418022c5dSMel Gorman * @pages: array that receives pointer to the page pinned. 15518022c5dSMel Gorman * Must be at least nr_segs long. 15618022c5dSMel Gorman * 15718022c5dSMel Gorman * Returns 1 if page is pinned. If the page was not pinned, returns 15818022c5dSMel Gorman * -errno. The page returned must be released with a put_page() call 15918022c5dSMel Gorman * when it is finished with. 16018022c5dSMel Gorman */ 16118022c5dSMel Gorman int get_kernel_page(unsigned long start, int write, struct page **pages) 16218022c5dSMel Gorman { 16318022c5dSMel Gorman const struct kvec kiov = { 16418022c5dSMel Gorman .iov_base = (void *)start, 16518022c5dSMel Gorman .iov_len = PAGE_SIZE 16618022c5dSMel Gorman }; 16718022c5dSMel Gorman 16818022c5dSMel Gorman return get_kernel_pages(&kiov, 1, write, pages); 16918022c5dSMel Gorman } 17018022c5dSMel Gorman EXPORT_SYMBOL_GPL(get_kernel_page); 17118022c5dSMel Gorman 1723dd7ae8eSShaohua Li static void pagevec_lru_move_fn(struct pagevec *pvec, 173fa9add64SHugh Dickins void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg), 1743dd7ae8eSShaohua Li void *arg) 175902aaed0SHisashi Hifumi { 176902aaed0SHisashi Hifumi int i; 177902aaed0SHisashi Hifumi struct zone *zone = NULL; 178fa9add64SHugh Dickins struct lruvec *lruvec; 1793dd7ae8eSShaohua Li unsigned long flags = 0; 180902aaed0SHisashi Hifumi 181902aaed0SHisashi Hifumi for (i = 0; i < pagevec_count(pvec); i++) { 182902aaed0SHisashi Hifumi struct page *page = pvec->pages[i]; 183902aaed0SHisashi Hifumi struct zone *pagezone = page_zone(page); 184902aaed0SHisashi Hifumi 185902aaed0SHisashi Hifumi if (pagezone != zone) { 186902aaed0SHisashi Hifumi if (zone) 1873dd7ae8eSShaohua Li spin_unlock_irqrestore(&zone->lru_lock, flags); 188902aaed0SHisashi Hifumi zone = pagezone; 1893dd7ae8eSShaohua Li spin_lock_irqsave(&zone->lru_lock, flags); 190902aaed0SHisashi Hifumi } 1913dd7ae8eSShaohua Li 192fa9add64SHugh Dickins lruvec = mem_cgroup_page_lruvec(page, zone); 193fa9add64SHugh Dickins (*move_fn)(page, lruvec, arg); 1943dd7ae8eSShaohua Li } 1953dd7ae8eSShaohua Li if (zone) 1963dd7ae8eSShaohua Li spin_unlock_irqrestore(&zone->lru_lock, flags); 1973dd7ae8eSShaohua Li release_pages(pvec->pages, pvec->nr, pvec->cold); 1983dd7ae8eSShaohua Li pagevec_reinit(pvec); 1993dd7ae8eSShaohua Li } 2003dd7ae8eSShaohua Li 201fa9add64SHugh Dickins static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec, 202fa9add64SHugh Dickins void *arg) 2033dd7ae8eSShaohua Li { 2043dd7ae8eSShaohua Li int *pgmoved = arg; 2053dd7ae8eSShaohua Li 206894bc310SLee Schermerhorn if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 2073f58a829SMinchan Kim enum lru_list lru = page_lru_base_type(page); 208925b7673SJohannes Weiner list_move_tail(&page->lru, &lruvec->lists[lru]); 2093dd7ae8eSShaohua Li (*pgmoved)++; 210902aaed0SHisashi Hifumi } 211902aaed0SHisashi Hifumi } 2123dd7ae8eSShaohua Li 2133dd7ae8eSShaohua Li /* 2143dd7ae8eSShaohua Li * pagevec_move_tail() must be called with IRQ disabled. 2153dd7ae8eSShaohua Li * Otherwise this may cause nasty races. 2163dd7ae8eSShaohua Li */ 2173dd7ae8eSShaohua Li static void pagevec_move_tail(struct pagevec *pvec) 2183dd7ae8eSShaohua Li { 2193dd7ae8eSShaohua Li int pgmoved = 0; 2203dd7ae8eSShaohua Li 2213dd7ae8eSShaohua Li pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved); 222902aaed0SHisashi Hifumi __count_vm_events(PGROTATED, pgmoved); 223902aaed0SHisashi Hifumi } 224902aaed0SHisashi Hifumi 225902aaed0SHisashi Hifumi /* 2261da177e4SLinus Torvalds * Writeback is about to end against a page which has been marked for immediate 2271da177e4SLinus Torvalds * reclaim. If it still appears to be reclaimable, move it to the tail of the 228902aaed0SHisashi Hifumi * inactive list. 2291da177e4SLinus Torvalds */ 230ac6aadb2SMiklos Szeredi void rotate_reclaimable_page(struct page *page) 2311da177e4SLinus Torvalds { 232ac6aadb2SMiklos Szeredi if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) && 233894bc310SLee Schermerhorn !PageUnevictable(page) && PageLRU(page)) { 234902aaed0SHisashi Hifumi struct pagevec *pvec; 2351da177e4SLinus Torvalds unsigned long flags; 2361da177e4SLinus Torvalds 237902aaed0SHisashi Hifumi page_cache_get(page); 238902aaed0SHisashi Hifumi local_irq_save(flags); 2397c8e0181SChristoph Lameter pvec = this_cpu_ptr(&lru_rotate_pvecs); 240902aaed0SHisashi Hifumi if (!pagevec_add(pvec, page)) 241902aaed0SHisashi Hifumi pagevec_move_tail(pvec); 242902aaed0SHisashi Hifumi local_irq_restore(flags); 243ac6aadb2SMiklos Szeredi } 2441da177e4SLinus Torvalds } 2451da177e4SLinus Torvalds 246fa9add64SHugh Dickins static void update_page_reclaim_stat(struct lruvec *lruvec, 2473e2f41f1SKOSAKI Motohiro int file, int rotated) 2483e2f41f1SKOSAKI Motohiro { 249fa9add64SHugh Dickins struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; 2503e2f41f1SKOSAKI Motohiro 2513e2f41f1SKOSAKI Motohiro reclaim_stat->recent_scanned[file]++; 2523e2f41f1SKOSAKI Motohiro if (rotated) 2533e2f41f1SKOSAKI Motohiro reclaim_stat->recent_rotated[file]++; 2543e2f41f1SKOSAKI Motohiro } 2553e2f41f1SKOSAKI Motohiro 256fa9add64SHugh Dickins static void __activate_page(struct page *page, struct lruvec *lruvec, 257fa9add64SHugh Dickins void *arg) 258744ed144SShaohua Li { 2597a608572SLinus Torvalds if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 260744ed144SShaohua Li int file = page_is_file_cache(page); 261744ed144SShaohua Li int lru = page_lru_base_type(page); 262744ed144SShaohua Li 263fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, lru); 264744ed144SShaohua Li SetPageActive(page); 265744ed144SShaohua Li lru += LRU_ACTIVE; 266fa9add64SHugh Dickins add_page_to_lru_list(page, lruvec, lru); 26724b7e581SMel Gorman trace_mm_lru_activate(page); 2687a608572SLinus Torvalds 269fa9add64SHugh Dickins __count_vm_event(PGACTIVATE); 270fa9add64SHugh Dickins update_page_reclaim_stat(lruvec, file, 1); 271744ed144SShaohua Li } 272eb709b0dSShaohua Li } 273eb709b0dSShaohua Li 274eb709b0dSShaohua Li #ifdef CONFIG_SMP 275eb709b0dSShaohua Li static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs); 276eb709b0dSShaohua Li 277eb709b0dSShaohua Li static void activate_page_drain(int cpu) 278eb709b0dSShaohua Li { 279eb709b0dSShaohua Li struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu); 280eb709b0dSShaohua Li 281eb709b0dSShaohua Li if (pagevec_count(pvec)) 282eb709b0dSShaohua Li pagevec_lru_move_fn(pvec, __activate_page, NULL); 283eb709b0dSShaohua Li } 284eb709b0dSShaohua Li 2855fbc4616SChris Metcalf static bool need_activate_page_drain(int cpu) 2865fbc4616SChris Metcalf { 2875fbc4616SChris Metcalf return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0; 2885fbc4616SChris Metcalf } 2895fbc4616SChris Metcalf 290eb709b0dSShaohua Li void activate_page(struct page *page) 291eb709b0dSShaohua Li { 292eb709b0dSShaohua Li if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 293eb709b0dSShaohua Li struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); 294eb709b0dSShaohua Li 295eb709b0dSShaohua Li page_cache_get(page); 296eb709b0dSShaohua Li if (!pagevec_add(pvec, page)) 297eb709b0dSShaohua Li pagevec_lru_move_fn(pvec, __activate_page, NULL); 298eb709b0dSShaohua Li put_cpu_var(activate_page_pvecs); 299eb709b0dSShaohua Li } 300eb709b0dSShaohua Li } 301eb709b0dSShaohua Li 302eb709b0dSShaohua Li #else 303eb709b0dSShaohua Li static inline void activate_page_drain(int cpu) 304eb709b0dSShaohua Li { 305eb709b0dSShaohua Li } 306eb709b0dSShaohua Li 3075fbc4616SChris Metcalf static bool need_activate_page_drain(int cpu) 3085fbc4616SChris Metcalf { 3095fbc4616SChris Metcalf return false; 3105fbc4616SChris Metcalf } 3115fbc4616SChris Metcalf 312eb709b0dSShaohua Li void activate_page(struct page *page) 313eb709b0dSShaohua Li { 314eb709b0dSShaohua Li struct zone *zone = page_zone(page); 315eb709b0dSShaohua Li 316eb709b0dSShaohua Li spin_lock_irq(&zone->lru_lock); 317fa9add64SHugh Dickins __activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL); 3181da177e4SLinus Torvalds spin_unlock_irq(&zone->lru_lock); 3191da177e4SLinus Torvalds } 320eb709b0dSShaohua Li #endif 3211da177e4SLinus Torvalds 322059285a2SMel Gorman static void __lru_cache_activate_page(struct page *page) 323059285a2SMel Gorman { 324059285a2SMel Gorman struct pagevec *pvec = &get_cpu_var(lru_add_pvec); 325059285a2SMel Gorman int i; 326059285a2SMel Gorman 327059285a2SMel Gorman /* 328059285a2SMel Gorman * Search backwards on the optimistic assumption that the page being 329059285a2SMel Gorman * activated has just been added to this pagevec. Note that only 330059285a2SMel Gorman * the local pagevec is examined as a !PageLRU page could be in the 331059285a2SMel Gorman * process of being released, reclaimed, migrated or on a remote 332059285a2SMel Gorman * pagevec that is currently being drained. Furthermore, marking 333059285a2SMel Gorman * a remote pagevec's page PageActive potentially hits a race where 334059285a2SMel Gorman * a page is marked PageActive just after it is added to the inactive 335059285a2SMel Gorman * list causing accounting errors and BUG_ON checks to trigger. 336059285a2SMel Gorman */ 337059285a2SMel Gorman for (i = pagevec_count(pvec) - 1; i >= 0; i--) { 338059285a2SMel Gorman struct page *pagevec_page = pvec->pages[i]; 339059285a2SMel Gorman 340059285a2SMel Gorman if (pagevec_page == page) { 341059285a2SMel Gorman SetPageActive(page); 342059285a2SMel Gorman break; 343059285a2SMel Gorman } 344059285a2SMel Gorman } 345059285a2SMel Gorman 346059285a2SMel Gorman put_cpu_var(lru_add_pvec); 347059285a2SMel Gorman } 348059285a2SMel Gorman 3491da177e4SLinus Torvalds /* 3501da177e4SLinus Torvalds * Mark a page as having seen activity. 3511da177e4SLinus Torvalds * 3521da177e4SLinus Torvalds * inactive,unreferenced -> inactive,referenced 3531da177e4SLinus Torvalds * inactive,referenced -> active,unreferenced 3541da177e4SLinus Torvalds * active,unreferenced -> active,referenced 355eb39d618SHugh Dickins * 356eb39d618SHugh Dickins * When a newly allocated page is not yet visible, so safe for non-atomic ops, 357eb39d618SHugh Dickins * __SetPageReferenced(page) may be substituted for mark_page_accessed(page). 3581da177e4SLinus Torvalds */ 359920c7a5dSHarvey Harrison void mark_page_accessed(struct page *page) 3601da177e4SLinus Torvalds { 361894bc310SLee Schermerhorn if (!PageActive(page) && !PageUnevictable(page) && 362059285a2SMel Gorman PageReferenced(page)) { 363059285a2SMel Gorman 364059285a2SMel Gorman /* 365059285a2SMel Gorman * If the page is on the LRU, queue it for activation via 366059285a2SMel Gorman * activate_page_pvecs. Otherwise, assume the page is on a 367059285a2SMel Gorman * pagevec, mark it active and it'll be moved to the active 368059285a2SMel Gorman * LRU on the next drain. 369059285a2SMel Gorman */ 370059285a2SMel Gorman if (PageLRU(page)) 3711da177e4SLinus Torvalds activate_page(page); 372059285a2SMel Gorman else 373059285a2SMel Gorman __lru_cache_activate_page(page); 3741da177e4SLinus Torvalds ClearPageReferenced(page); 375a528910eSJohannes Weiner if (page_is_file_cache(page)) 376a528910eSJohannes Weiner workingset_activation(page); 3771da177e4SLinus Torvalds } else if (!PageReferenced(page)) { 3781da177e4SLinus Torvalds SetPageReferenced(page); 3791da177e4SLinus Torvalds } 38033c3fc71SVladimir Davydov if (page_is_idle(page)) 38133c3fc71SVladimir Davydov clear_page_idle(page); 3821da177e4SLinus Torvalds } 3831da177e4SLinus Torvalds EXPORT_SYMBOL(mark_page_accessed); 3841da177e4SLinus Torvalds 3852329d375SJianyu Zhan static void __lru_cache_add(struct page *page) 3861da177e4SLinus Torvalds { 38713f7f789SMel Gorman struct pagevec *pvec = &get_cpu_var(lru_add_pvec); 38813f7f789SMel Gorman 3891da177e4SLinus Torvalds page_cache_get(page); 390d741c9cdSRobin Dong if (!pagevec_space(pvec)) 391a0b8cab3SMel Gorman __pagevec_lru_add(pvec); 392d741c9cdSRobin Dong pagevec_add(pvec, page); 39313f7f789SMel Gorman put_cpu_var(lru_add_pvec); 3941da177e4SLinus Torvalds } 3952329d375SJianyu Zhan 3962329d375SJianyu Zhan /** 3972329d375SJianyu Zhan * lru_cache_add: add a page to the page lists 3982329d375SJianyu Zhan * @page: the page to add 3992329d375SJianyu Zhan */ 4002329d375SJianyu Zhan void lru_cache_add_anon(struct page *page) 4012329d375SJianyu Zhan { 4026fb81a17SMel Gorman if (PageActive(page)) 4032329d375SJianyu Zhan ClearPageActive(page); 4042329d375SJianyu Zhan __lru_cache_add(page); 4052329d375SJianyu Zhan } 4062329d375SJianyu Zhan 4072329d375SJianyu Zhan void lru_cache_add_file(struct page *page) 4082329d375SJianyu Zhan { 4096fb81a17SMel Gorman if (PageActive(page)) 4102329d375SJianyu Zhan ClearPageActive(page); 4112329d375SJianyu Zhan __lru_cache_add(page); 4122329d375SJianyu Zhan } 4132329d375SJianyu Zhan EXPORT_SYMBOL(lru_cache_add_file); 4141da177e4SLinus Torvalds 415f04e9ebbSKOSAKI Motohiro /** 416c53954a0SMel Gorman * lru_cache_add - add a page to a page list 417f04e9ebbSKOSAKI Motohiro * @page: the page to be added to the LRU. 4182329d375SJianyu Zhan * 4192329d375SJianyu Zhan * Queue the page for addition to the LRU via pagevec. The decision on whether 4202329d375SJianyu Zhan * to add the page to the [in]active [file|anon] list is deferred until the 4212329d375SJianyu Zhan * pagevec is drained. This gives a chance for the caller of lru_cache_add() 4222329d375SJianyu Zhan * have the page added to the active list using mark_page_accessed(). 423f04e9ebbSKOSAKI Motohiro */ 424c53954a0SMel Gorman void lru_cache_add(struct page *page) 4251da177e4SLinus Torvalds { 426309381feSSasha Levin VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page); 427309381feSSasha Levin VM_BUG_ON_PAGE(PageLRU(page), page); 428c53954a0SMel Gorman __lru_cache_add(page); 4291da177e4SLinus Torvalds } 4301da177e4SLinus Torvalds 431894bc310SLee Schermerhorn /** 432894bc310SLee Schermerhorn * add_page_to_unevictable_list - add a page to the unevictable list 433894bc310SLee Schermerhorn * @page: the page to be added to the unevictable list 434894bc310SLee Schermerhorn * 435894bc310SLee Schermerhorn * Add page directly to its zone's unevictable list. To avoid races with 436894bc310SLee Schermerhorn * tasks that might be making the page evictable, through eg. munlock, 437894bc310SLee Schermerhorn * munmap or exit, while it's not on the lru, we want to add the page 438894bc310SLee Schermerhorn * while it's locked or otherwise "invisible" to other tasks. This is 439894bc310SLee Schermerhorn * difficult to do when using the pagevec cache, so bypass that. 440894bc310SLee Schermerhorn */ 441894bc310SLee Schermerhorn void add_page_to_unevictable_list(struct page *page) 442894bc310SLee Schermerhorn { 443894bc310SLee Schermerhorn struct zone *zone = page_zone(page); 444fa9add64SHugh Dickins struct lruvec *lruvec; 445894bc310SLee Schermerhorn 446894bc310SLee Schermerhorn spin_lock_irq(&zone->lru_lock); 447fa9add64SHugh Dickins lruvec = mem_cgroup_page_lruvec(page, zone); 448ef2a2cbdSNaoya Horiguchi ClearPageActive(page); 449894bc310SLee Schermerhorn SetPageUnevictable(page); 450894bc310SLee Schermerhorn SetPageLRU(page); 451fa9add64SHugh Dickins add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE); 452894bc310SLee Schermerhorn spin_unlock_irq(&zone->lru_lock); 453894bc310SLee Schermerhorn } 454894bc310SLee Schermerhorn 45500501b53SJohannes Weiner /** 45600501b53SJohannes Weiner * lru_cache_add_active_or_unevictable 45700501b53SJohannes Weiner * @page: the page to be added to LRU 45800501b53SJohannes Weiner * @vma: vma in which page is mapped for determining reclaimability 45900501b53SJohannes Weiner * 46000501b53SJohannes Weiner * Place @page on the active or unevictable LRU list, depending on its 46100501b53SJohannes Weiner * evictability. Note that if the page is not evictable, it goes 46200501b53SJohannes Weiner * directly back onto it's zone's unevictable list, it does NOT use a 46300501b53SJohannes Weiner * per cpu pagevec. 46400501b53SJohannes Weiner */ 46500501b53SJohannes Weiner void lru_cache_add_active_or_unevictable(struct page *page, 46600501b53SJohannes Weiner struct vm_area_struct *vma) 46700501b53SJohannes Weiner { 46800501b53SJohannes Weiner VM_BUG_ON_PAGE(PageLRU(page), page); 46900501b53SJohannes Weiner 47000501b53SJohannes Weiner if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) { 47100501b53SJohannes Weiner SetPageActive(page); 47200501b53SJohannes Weiner lru_cache_add(page); 47300501b53SJohannes Weiner return; 47400501b53SJohannes Weiner } 47500501b53SJohannes Weiner 47600501b53SJohannes Weiner if (!TestSetPageMlocked(page)) { 47700501b53SJohannes Weiner /* 47800501b53SJohannes Weiner * We use the irq-unsafe __mod_zone_page_stat because this 47900501b53SJohannes Weiner * counter is not modified from interrupt context, and the pte 48000501b53SJohannes Weiner * lock is held(spinlock), which implies preemption disabled. 48100501b53SJohannes Weiner */ 48200501b53SJohannes Weiner __mod_zone_page_state(page_zone(page), NR_MLOCK, 48300501b53SJohannes Weiner hpage_nr_pages(page)); 48400501b53SJohannes Weiner count_vm_event(UNEVICTABLE_PGMLOCKED); 48500501b53SJohannes Weiner } 48600501b53SJohannes Weiner add_page_to_unevictable_list(page); 48700501b53SJohannes Weiner } 48800501b53SJohannes Weiner 489902aaed0SHisashi Hifumi /* 49031560180SMinchan Kim * If the page can not be invalidated, it is moved to the 49131560180SMinchan Kim * inactive list to speed up its reclaim. It is moved to the 49231560180SMinchan Kim * head of the list, rather than the tail, to give the flusher 49331560180SMinchan Kim * threads some time to write it out, as this is much more 49431560180SMinchan Kim * effective than the single-page writeout from reclaim. 495278df9f4SMinchan Kim * 496278df9f4SMinchan Kim * If the page isn't page_mapped and dirty/writeback, the page 497278df9f4SMinchan Kim * could reclaim asap using PG_reclaim. 498278df9f4SMinchan Kim * 499278df9f4SMinchan Kim * 1. active, mapped page -> none 500278df9f4SMinchan Kim * 2. active, dirty/writeback page -> inactive, head, PG_reclaim 501278df9f4SMinchan Kim * 3. inactive, mapped page -> none 502278df9f4SMinchan Kim * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim 503278df9f4SMinchan Kim * 5. inactive, clean -> inactive, tail 504278df9f4SMinchan Kim * 6. Others -> none 505278df9f4SMinchan Kim * 506278df9f4SMinchan Kim * In 4, why it moves inactive's head, the VM expects the page would 507278df9f4SMinchan Kim * be write it out by flusher threads as this is much more effective 508278df9f4SMinchan Kim * than the single-page writeout from reclaim. 50931560180SMinchan Kim */ 510cc5993bdSMinchan Kim static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec, 511fa9add64SHugh Dickins void *arg) 51231560180SMinchan Kim { 51331560180SMinchan Kim int lru, file; 514278df9f4SMinchan Kim bool active; 51531560180SMinchan Kim 516278df9f4SMinchan Kim if (!PageLRU(page)) 51731560180SMinchan Kim return; 51831560180SMinchan Kim 519bad49d9cSMinchan Kim if (PageUnevictable(page)) 520bad49d9cSMinchan Kim return; 521bad49d9cSMinchan Kim 52231560180SMinchan Kim /* Some processes are using the page */ 52331560180SMinchan Kim if (page_mapped(page)) 52431560180SMinchan Kim return; 52531560180SMinchan Kim 526278df9f4SMinchan Kim active = PageActive(page); 52731560180SMinchan Kim file = page_is_file_cache(page); 52831560180SMinchan Kim lru = page_lru_base_type(page); 529fa9add64SHugh Dickins 530fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, lru + active); 53131560180SMinchan Kim ClearPageActive(page); 53231560180SMinchan Kim ClearPageReferenced(page); 533fa9add64SHugh Dickins add_page_to_lru_list(page, lruvec, lru); 53431560180SMinchan Kim 535278df9f4SMinchan Kim if (PageWriteback(page) || PageDirty(page)) { 536278df9f4SMinchan Kim /* 537278df9f4SMinchan Kim * PG_reclaim could be raced with end_page_writeback 538278df9f4SMinchan Kim * It can make readahead confusing. But race window 539278df9f4SMinchan Kim * is _really_ small and it's non-critical problem. 540278df9f4SMinchan Kim */ 541278df9f4SMinchan Kim SetPageReclaim(page); 542278df9f4SMinchan Kim } else { 543278df9f4SMinchan Kim /* 544278df9f4SMinchan Kim * The page's writeback ends up during pagevec 545278df9f4SMinchan Kim * We moves tha page into tail of inactive. 546278df9f4SMinchan Kim */ 547925b7673SJohannes Weiner list_move_tail(&page->lru, &lruvec->lists[lru]); 548278df9f4SMinchan Kim __count_vm_event(PGROTATED); 549278df9f4SMinchan Kim } 550278df9f4SMinchan Kim 551278df9f4SMinchan Kim if (active) 552278df9f4SMinchan Kim __count_vm_event(PGDEACTIVATE); 553fa9add64SHugh Dickins update_page_reclaim_stat(lruvec, file, 0); 55431560180SMinchan Kim } 55531560180SMinchan Kim 55631560180SMinchan Kim /* 557902aaed0SHisashi Hifumi * Drain pages out of the cpu's pagevecs. 558902aaed0SHisashi Hifumi * Either "cpu" is the current CPU, and preemption has already been 559902aaed0SHisashi Hifumi * disabled; or "cpu" is being hot-unplugged, and is already dead. 560902aaed0SHisashi Hifumi */ 561f0cb3c76SKonstantin Khlebnikov void lru_add_drain_cpu(int cpu) 5621da177e4SLinus Torvalds { 56313f7f789SMel Gorman struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu); 5641da177e4SLinus Torvalds 5651da177e4SLinus Torvalds if (pagevec_count(pvec)) 566a0b8cab3SMel Gorman __pagevec_lru_add(pvec); 567902aaed0SHisashi Hifumi 568902aaed0SHisashi Hifumi pvec = &per_cpu(lru_rotate_pvecs, cpu); 569902aaed0SHisashi Hifumi if (pagevec_count(pvec)) { 570902aaed0SHisashi Hifumi unsigned long flags; 571902aaed0SHisashi Hifumi 572902aaed0SHisashi Hifumi /* No harm done if a racing interrupt already did this */ 573902aaed0SHisashi Hifumi local_irq_save(flags); 574902aaed0SHisashi Hifumi pagevec_move_tail(pvec); 575902aaed0SHisashi Hifumi local_irq_restore(flags); 576902aaed0SHisashi Hifumi } 57731560180SMinchan Kim 578cc5993bdSMinchan Kim pvec = &per_cpu(lru_deactivate_file_pvecs, cpu); 57931560180SMinchan Kim if (pagevec_count(pvec)) 580cc5993bdSMinchan Kim pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); 581eb709b0dSShaohua Li 582eb709b0dSShaohua Li activate_page_drain(cpu); 58331560180SMinchan Kim } 58431560180SMinchan Kim 58531560180SMinchan Kim /** 586cc5993bdSMinchan Kim * deactivate_file_page - forcefully deactivate a file page 58731560180SMinchan Kim * @page: page to deactivate 58831560180SMinchan Kim * 58931560180SMinchan Kim * This function hints the VM that @page is a good reclaim candidate, 59031560180SMinchan Kim * for example if its invalidation fails due to the page being dirty 59131560180SMinchan Kim * or under writeback. 59231560180SMinchan Kim */ 593cc5993bdSMinchan Kim void deactivate_file_page(struct page *page) 59431560180SMinchan Kim { 595821ed6bbSMinchan Kim /* 596cc5993bdSMinchan Kim * In a workload with many unevictable page such as mprotect, 597cc5993bdSMinchan Kim * unevictable page deactivation for accelerating reclaim is pointless. 598821ed6bbSMinchan Kim */ 599821ed6bbSMinchan Kim if (PageUnevictable(page)) 600821ed6bbSMinchan Kim return; 601821ed6bbSMinchan Kim 60231560180SMinchan Kim if (likely(get_page_unless_zero(page))) { 603cc5993bdSMinchan Kim struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs); 60431560180SMinchan Kim 60531560180SMinchan Kim if (!pagevec_add(pvec, page)) 606cc5993bdSMinchan Kim pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); 607cc5993bdSMinchan Kim put_cpu_var(lru_deactivate_file_pvecs); 60831560180SMinchan Kim } 60980bfed90SAndrew Morton } 61080bfed90SAndrew Morton 61180bfed90SAndrew Morton void lru_add_drain(void) 61280bfed90SAndrew Morton { 613f0cb3c76SKonstantin Khlebnikov lru_add_drain_cpu(get_cpu()); 61480bfed90SAndrew Morton put_cpu(); 6151da177e4SLinus Torvalds } 6161da177e4SLinus Torvalds 617c4028958SDavid Howells static void lru_add_drain_per_cpu(struct work_struct *dummy) 618053837fcSNick Piggin { 619053837fcSNick Piggin lru_add_drain(); 620053837fcSNick Piggin } 621053837fcSNick Piggin 6225fbc4616SChris Metcalf static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); 6235fbc4616SChris Metcalf 6245fbc4616SChris Metcalf void lru_add_drain_all(void) 625053837fcSNick Piggin { 6265fbc4616SChris Metcalf static DEFINE_MUTEX(lock); 6275fbc4616SChris Metcalf static struct cpumask has_work; 6285fbc4616SChris Metcalf int cpu; 6295fbc4616SChris Metcalf 6305fbc4616SChris Metcalf mutex_lock(&lock); 6315fbc4616SChris Metcalf get_online_cpus(); 6325fbc4616SChris Metcalf cpumask_clear(&has_work); 6335fbc4616SChris Metcalf 6345fbc4616SChris Metcalf for_each_online_cpu(cpu) { 6355fbc4616SChris Metcalf struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); 6365fbc4616SChris Metcalf 6375fbc4616SChris Metcalf if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) || 6385fbc4616SChris Metcalf pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) || 639cc5993bdSMinchan Kim pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) || 6405fbc4616SChris Metcalf need_activate_page_drain(cpu)) { 6415fbc4616SChris Metcalf INIT_WORK(work, lru_add_drain_per_cpu); 6425fbc4616SChris Metcalf schedule_work_on(cpu, work); 6435fbc4616SChris Metcalf cpumask_set_cpu(cpu, &has_work); 6445fbc4616SChris Metcalf } 6455fbc4616SChris Metcalf } 6465fbc4616SChris Metcalf 6475fbc4616SChris Metcalf for_each_cpu(cpu, &has_work) 6485fbc4616SChris Metcalf flush_work(&per_cpu(lru_add_drain_work, cpu)); 6495fbc4616SChris Metcalf 6505fbc4616SChris Metcalf put_online_cpus(); 6515fbc4616SChris Metcalf mutex_unlock(&lock); 652053837fcSNick Piggin } 653053837fcSNick Piggin 654aabfb572SMichal Hocko /** 655aabfb572SMichal Hocko * release_pages - batched page_cache_release() 656aabfb572SMichal Hocko * @pages: array of pages to release 657aabfb572SMichal Hocko * @nr: number of pages 658aabfb572SMichal Hocko * @cold: whether the pages are cache cold 6591da177e4SLinus Torvalds * 660aabfb572SMichal Hocko * Decrement the reference count on all the pages in @pages. If it 661aabfb572SMichal Hocko * fell to zero, remove the page from the LRU and free it. 6621da177e4SLinus Torvalds */ 663b745bc85SMel Gorman void release_pages(struct page **pages, int nr, bool cold) 6641da177e4SLinus Torvalds { 6651da177e4SLinus Torvalds int i; 666cc59850eSKonstantin Khlebnikov LIST_HEAD(pages_to_free); 6671da177e4SLinus Torvalds struct zone *zone = NULL; 668fa9add64SHugh Dickins struct lruvec *lruvec; 669902aaed0SHisashi Hifumi unsigned long uninitialized_var(flags); 670aabfb572SMichal Hocko unsigned int uninitialized_var(lock_batch); 6711da177e4SLinus Torvalds 6721da177e4SLinus Torvalds for (i = 0; i < nr; i++) { 6731da177e4SLinus Torvalds struct page *page = pages[i]; 6741da177e4SLinus Torvalds 675aabfb572SMichal Hocko /* 676aabfb572SMichal Hocko * Make sure the IRQ-safe lock-holding time does not get 677aabfb572SMichal Hocko * excessive with a continuous string of pages from the 678aabfb572SMichal Hocko * same zone. The lock is held only if zone != NULL. 679aabfb572SMichal Hocko */ 680aabfb572SMichal Hocko if (zone && ++lock_batch == SWAP_CLUSTER_MAX) { 681aabfb572SMichal Hocko spin_unlock_irqrestore(&zone->lru_lock, flags); 682aabfb572SMichal Hocko zone = NULL; 683aabfb572SMichal Hocko } 684aabfb572SMichal Hocko 685*ddc58f27SKirill A. Shutemov page = compound_head(page); 686b5810039SNick Piggin if (!put_page_testzero(page)) 6871da177e4SLinus Torvalds continue; 6881da177e4SLinus Torvalds 689*ddc58f27SKirill A. Shutemov if (PageCompound(page)) { 690*ddc58f27SKirill A. Shutemov if (zone) { 691*ddc58f27SKirill A. Shutemov spin_unlock_irqrestore(&zone->lru_lock, flags); 692*ddc58f27SKirill A. Shutemov zone = NULL; 693*ddc58f27SKirill A. Shutemov } 694*ddc58f27SKirill A. Shutemov __put_compound_page(page); 695*ddc58f27SKirill A. Shutemov continue; 696*ddc58f27SKirill A. Shutemov } 697*ddc58f27SKirill A. Shutemov 69846453a6eSNick Piggin if (PageLRU(page)) { 69946453a6eSNick Piggin struct zone *pagezone = page_zone(page); 700894bc310SLee Schermerhorn 7011da177e4SLinus Torvalds if (pagezone != zone) { 7021da177e4SLinus Torvalds if (zone) 703902aaed0SHisashi Hifumi spin_unlock_irqrestore(&zone->lru_lock, 704902aaed0SHisashi Hifumi flags); 705aabfb572SMichal Hocko lock_batch = 0; 7061da177e4SLinus Torvalds zone = pagezone; 707902aaed0SHisashi Hifumi spin_lock_irqsave(&zone->lru_lock, flags); 7081da177e4SLinus Torvalds } 709fa9add64SHugh Dickins 710fa9add64SHugh Dickins lruvec = mem_cgroup_page_lruvec(page, zone); 711309381feSSasha Levin VM_BUG_ON_PAGE(!PageLRU(page), page); 71267453911SNick Piggin __ClearPageLRU(page); 713fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, page_off_lru(page)); 71446453a6eSNick Piggin } 71546453a6eSNick Piggin 716c53954a0SMel Gorman /* Clear Active bit in case of parallel mark_page_accessed */ 717e3741b50SMel Gorman __ClearPageActive(page); 718c53954a0SMel Gorman 719cc59850eSKonstantin Khlebnikov list_add(&page->lru, &pages_to_free); 7201da177e4SLinus Torvalds } 7211da177e4SLinus Torvalds if (zone) 722902aaed0SHisashi Hifumi spin_unlock_irqrestore(&zone->lru_lock, flags); 7231da177e4SLinus Torvalds 724747db954SJohannes Weiner mem_cgroup_uncharge_list(&pages_to_free); 725cc59850eSKonstantin Khlebnikov free_hot_cold_page_list(&pages_to_free, cold); 7261da177e4SLinus Torvalds } 7270be8557bSMiklos Szeredi EXPORT_SYMBOL(release_pages); 7281da177e4SLinus Torvalds 7291da177e4SLinus Torvalds /* 7301da177e4SLinus Torvalds * The pages which we're about to release may be in the deferred lru-addition 7311da177e4SLinus Torvalds * queues. That would prevent them from really being freed right now. That's 7321da177e4SLinus Torvalds * OK from a correctness point of view but is inefficient - those pages may be 7331da177e4SLinus Torvalds * cache-warm and we want to give them back to the page allocator ASAP. 7341da177e4SLinus Torvalds * 7351da177e4SLinus Torvalds * So __pagevec_release() will drain those queues here. __pagevec_lru_add() 7361da177e4SLinus Torvalds * and __pagevec_lru_add_active() call release_pages() directly to avoid 7371da177e4SLinus Torvalds * mutual recursion. 7381da177e4SLinus Torvalds */ 7391da177e4SLinus Torvalds void __pagevec_release(struct pagevec *pvec) 7401da177e4SLinus Torvalds { 7411da177e4SLinus Torvalds lru_add_drain(); 7421da177e4SLinus Torvalds release_pages(pvec->pages, pagevec_count(pvec), pvec->cold); 7431da177e4SLinus Torvalds pagevec_reinit(pvec); 7441da177e4SLinus Torvalds } 7457f285701SSteve French EXPORT_SYMBOL(__pagevec_release); 7467f285701SSteve French 74712d27107SHugh Dickins #ifdef CONFIG_TRANSPARENT_HUGEPAGE 74871e3aac0SAndrea Arcangeli /* used by __split_huge_page_refcount() */ 749fa9add64SHugh Dickins void lru_add_page_tail(struct page *page, struct page *page_tail, 7505bc7b8acSShaohua Li struct lruvec *lruvec, struct list_head *list) 75171e3aac0SAndrea Arcangeli { 75271e3aac0SAndrea Arcangeli const int file = 0; 75371e3aac0SAndrea Arcangeli 754309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(page), page); 755309381feSSasha Levin VM_BUG_ON_PAGE(PageCompound(page_tail), page); 756309381feSSasha Levin VM_BUG_ON_PAGE(PageLRU(page_tail), page); 757fa9add64SHugh Dickins VM_BUG_ON(NR_CPUS != 1 && 758fa9add64SHugh Dickins !spin_is_locked(&lruvec_zone(lruvec)->lru_lock)); 75971e3aac0SAndrea Arcangeli 7605bc7b8acSShaohua Li if (!list) 76171e3aac0SAndrea Arcangeli SetPageLRU(page_tail); 76271e3aac0SAndrea Arcangeli 76312d27107SHugh Dickins if (likely(PageLRU(page))) 76412d27107SHugh Dickins list_add_tail(&page_tail->lru, &page->lru); 7655bc7b8acSShaohua Li else if (list) { 7665bc7b8acSShaohua Li /* page reclaim is reclaiming a huge page */ 7675bc7b8acSShaohua Li get_page(page_tail); 7685bc7b8acSShaohua Li list_add_tail(&page_tail->lru, list); 7695bc7b8acSShaohua Li } else { 77012d27107SHugh Dickins struct list_head *list_head; 77112d27107SHugh Dickins /* 77212d27107SHugh Dickins * Head page has not yet been counted, as an hpage, 77312d27107SHugh Dickins * so we must account for each subpage individually. 77412d27107SHugh Dickins * 77512d27107SHugh Dickins * Use the standard add function to put page_tail on the list, 77612d27107SHugh Dickins * but then correct its position so they all end up in order. 77712d27107SHugh Dickins */ 778e180cf80SKirill A. Shutemov add_page_to_lru_list(page_tail, lruvec, page_lru(page_tail)); 77912d27107SHugh Dickins list_head = page_tail->lru.prev; 78012d27107SHugh Dickins list_move_tail(&page_tail->lru, list_head); 78171e3aac0SAndrea Arcangeli } 7827512102cSHugh Dickins 7837512102cSHugh Dickins if (!PageUnevictable(page)) 784e180cf80SKirill A. Shutemov update_page_reclaim_stat(lruvec, file, PageActive(page_tail)); 78571e3aac0SAndrea Arcangeli } 78612d27107SHugh Dickins #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 78771e3aac0SAndrea Arcangeli 788fa9add64SHugh Dickins static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, 789fa9add64SHugh Dickins void *arg) 7903dd7ae8eSShaohua Li { 79113f7f789SMel Gorman int file = page_is_file_cache(page); 79213f7f789SMel Gorman int active = PageActive(page); 79313f7f789SMel Gorman enum lru_list lru = page_lru(page); 7943dd7ae8eSShaohua Li 795309381feSSasha Levin VM_BUG_ON_PAGE(PageLRU(page), page); 7963dd7ae8eSShaohua Li 7973dd7ae8eSShaohua Li SetPageLRU(page); 798fa9add64SHugh Dickins add_page_to_lru_list(page, lruvec, lru); 799fa9add64SHugh Dickins update_page_reclaim_stat(lruvec, file, active); 80024b7e581SMel Gorman trace_mm_lru_insertion(page, lru); 8013dd7ae8eSShaohua Li } 8023dd7ae8eSShaohua Li 8031da177e4SLinus Torvalds /* 8041da177e4SLinus Torvalds * Add the passed pages to the LRU, then drop the caller's refcount 8051da177e4SLinus Torvalds * on them. Reinitialises the caller's pagevec. 8061da177e4SLinus Torvalds */ 807a0b8cab3SMel Gorman void __pagevec_lru_add(struct pagevec *pvec) 8081da177e4SLinus Torvalds { 809a0b8cab3SMel Gorman pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL); 8101da177e4SLinus Torvalds } 8115095ae83SHugh Dickins EXPORT_SYMBOL(__pagevec_lru_add); 812f04e9ebbSKOSAKI Motohiro 8131da177e4SLinus Torvalds /** 8140cd6144aSJohannes Weiner * pagevec_lookup_entries - gang pagecache lookup 8150cd6144aSJohannes Weiner * @pvec: Where the resulting entries are placed 8160cd6144aSJohannes Weiner * @mapping: The address_space to search 8170cd6144aSJohannes Weiner * @start: The starting entry index 8180cd6144aSJohannes Weiner * @nr_entries: The maximum number of entries 8190cd6144aSJohannes Weiner * @indices: The cache indices corresponding to the entries in @pvec 8200cd6144aSJohannes Weiner * 8210cd6144aSJohannes Weiner * pagevec_lookup_entries() will search for and return a group of up 8220cd6144aSJohannes Weiner * to @nr_entries pages and shadow entries in the mapping. All 8230cd6144aSJohannes Weiner * entries are placed in @pvec. pagevec_lookup_entries() takes a 8240cd6144aSJohannes Weiner * reference against actual pages in @pvec. 8250cd6144aSJohannes Weiner * 8260cd6144aSJohannes Weiner * The search returns a group of mapping-contiguous entries with 8270cd6144aSJohannes Weiner * ascending indexes. There may be holes in the indices due to 8280cd6144aSJohannes Weiner * not-present entries. 8290cd6144aSJohannes Weiner * 8300cd6144aSJohannes Weiner * pagevec_lookup_entries() returns the number of entries which were 8310cd6144aSJohannes Weiner * found. 8320cd6144aSJohannes Weiner */ 8330cd6144aSJohannes Weiner unsigned pagevec_lookup_entries(struct pagevec *pvec, 8340cd6144aSJohannes Weiner struct address_space *mapping, 8350cd6144aSJohannes Weiner pgoff_t start, unsigned nr_pages, 8360cd6144aSJohannes Weiner pgoff_t *indices) 8370cd6144aSJohannes Weiner { 8380cd6144aSJohannes Weiner pvec->nr = find_get_entries(mapping, start, nr_pages, 8390cd6144aSJohannes Weiner pvec->pages, indices); 8400cd6144aSJohannes Weiner return pagevec_count(pvec); 8410cd6144aSJohannes Weiner } 8420cd6144aSJohannes Weiner 8430cd6144aSJohannes Weiner /** 8440cd6144aSJohannes Weiner * pagevec_remove_exceptionals - pagevec exceptionals pruning 8450cd6144aSJohannes Weiner * @pvec: The pagevec to prune 8460cd6144aSJohannes Weiner * 8470cd6144aSJohannes Weiner * pagevec_lookup_entries() fills both pages and exceptional radix 8480cd6144aSJohannes Weiner * tree entries into the pagevec. This function prunes all 8490cd6144aSJohannes Weiner * exceptionals from @pvec without leaving holes, so that it can be 8500cd6144aSJohannes Weiner * passed on to page-only pagevec operations. 8510cd6144aSJohannes Weiner */ 8520cd6144aSJohannes Weiner void pagevec_remove_exceptionals(struct pagevec *pvec) 8530cd6144aSJohannes Weiner { 8540cd6144aSJohannes Weiner int i, j; 8550cd6144aSJohannes Weiner 8560cd6144aSJohannes Weiner for (i = 0, j = 0; i < pagevec_count(pvec); i++) { 8570cd6144aSJohannes Weiner struct page *page = pvec->pages[i]; 8580cd6144aSJohannes Weiner if (!radix_tree_exceptional_entry(page)) 8590cd6144aSJohannes Weiner pvec->pages[j++] = page; 8600cd6144aSJohannes Weiner } 8610cd6144aSJohannes Weiner pvec->nr = j; 8620cd6144aSJohannes Weiner } 8630cd6144aSJohannes Weiner 8640cd6144aSJohannes Weiner /** 8651da177e4SLinus Torvalds * pagevec_lookup - gang pagecache lookup 8661da177e4SLinus Torvalds * @pvec: Where the resulting pages are placed 8671da177e4SLinus Torvalds * @mapping: The address_space to search 8681da177e4SLinus Torvalds * @start: The starting page index 8691da177e4SLinus Torvalds * @nr_pages: The maximum number of pages 8701da177e4SLinus Torvalds * 8711da177e4SLinus Torvalds * pagevec_lookup() will search for and return a group of up to @nr_pages pages 8721da177e4SLinus Torvalds * in the mapping. The pages are placed in @pvec. pagevec_lookup() takes a 8731da177e4SLinus Torvalds * reference against the pages in @pvec. 8741da177e4SLinus Torvalds * 8751da177e4SLinus Torvalds * The search returns a group of mapping-contiguous pages with ascending 8761da177e4SLinus Torvalds * indexes. There may be holes in the indices due to not-present pages. 8771da177e4SLinus Torvalds * 8781da177e4SLinus Torvalds * pagevec_lookup() returns the number of pages which were found. 8791da177e4SLinus Torvalds */ 8801da177e4SLinus Torvalds unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, 8811da177e4SLinus Torvalds pgoff_t start, unsigned nr_pages) 8821da177e4SLinus Torvalds { 8831da177e4SLinus Torvalds pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages); 8841da177e4SLinus Torvalds return pagevec_count(pvec); 8851da177e4SLinus Torvalds } 88678539fdfSChristoph Hellwig EXPORT_SYMBOL(pagevec_lookup); 88778539fdfSChristoph Hellwig 8881da177e4SLinus Torvalds unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, 8891da177e4SLinus Torvalds pgoff_t *index, int tag, unsigned nr_pages) 8901da177e4SLinus Torvalds { 8911da177e4SLinus Torvalds pvec->nr = find_get_pages_tag(mapping, index, tag, 8921da177e4SLinus Torvalds nr_pages, pvec->pages); 8931da177e4SLinus Torvalds return pagevec_count(pvec); 8941da177e4SLinus Torvalds } 8957f285701SSteve French EXPORT_SYMBOL(pagevec_lookup_tag); 8961da177e4SLinus Torvalds 8971da177e4SLinus Torvalds /* 8981da177e4SLinus Torvalds * Perform any setup for the swap system 8991da177e4SLinus Torvalds */ 9001da177e4SLinus Torvalds void __init swap_setup(void) 9011da177e4SLinus Torvalds { 9024481374cSJan Beulich unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT); 903e0bf68ddSPeter Zijlstra #ifdef CONFIG_SWAP 90433806f06SShaohua Li int i; 90533806f06SShaohua Li 90627ba0644SKirill A. Shutemov for (i = 0; i < MAX_SWAPFILES; i++) 90733806f06SShaohua Li spin_lock_init(&swapper_spaces[i].tree_lock); 908e0bf68ddSPeter Zijlstra #endif 909e0bf68ddSPeter Zijlstra 9101da177e4SLinus Torvalds /* Use a smaller cluster for small-memory machines */ 9111da177e4SLinus Torvalds if (megs < 16) 9121da177e4SLinus Torvalds page_cluster = 2; 9131da177e4SLinus Torvalds else 9141da177e4SLinus Torvalds page_cluster = 3; 9151da177e4SLinus Torvalds /* 9161da177e4SLinus Torvalds * Right now other parts of the system means that we 9171da177e4SLinus Torvalds * _really_ don't want to cluster much more 9181da177e4SLinus Torvalds */ 9191da177e4SLinus Torvalds } 920