11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/mm/swap.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 51da177e4SLinus Torvalds */ 61da177e4SLinus Torvalds 71da177e4SLinus Torvalds /* 8183ff22bSSimon Arlott * This file contains the default values for the operation of the 91da177e4SLinus Torvalds * Linux VM subsystem. Fine-tuning documentation can be found in 101da177e4SLinus Torvalds * Documentation/sysctl/vm.txt. 111da177e4SLinus Torvalds * Started 18.12.91 121da177e4SLinus Torvalds * Swap aging added 23.2.95, Stephen Tweedie. 131da177e4SLinus Torvalds * Buffermem limits added 12.3.98, Rik van Riel. 141da177e4SLinus Torvalds */ 151da177e4SLinus Torvalds 161da177e4SLinus Torvalds #include <linux/mm.h> 171da177e4SLinus Torvalds #include <linux/sched.h> 181da177e4SLinus Torvalds #include <linux/kernel_stat.h> 191da177e4SLinus Torvalds #include <linux/swap.h> 201da177e4SLinus Torvalds #include <linux/mman.h> 211da177e4SLinus Torvalds #include <linux/pagemap.h> 221da177e4SLinus Torvalds #include <linux/pagevec.h> 231da177e4SLinus Torvalds #include <linux/init.h> 24b95f1b31SPaul Gortmaker #include <linux/export.h> 251da177e4SLinus Torvalds #include <linux/mm_inline.h> 261da177e4SLinus Torvalds #include <linux/percpu_counter.h> 273565fce3SDan Williams #include <linux/memremap.h> 281da177e4SLinus Torvalds #include <linux/percpu.h> 291da177e4SLinus Torvalds #include <linux/cpu.h> 301da177e4SLinus Torvalds #include <linux/notifier.h> 31e0bf68ddSPeter Zijlstra #include <linux/backing-dev.h> 3266e1707bSBalbir Singh #include <linux/memcontrol.h> 335a0e3ad6STejun Heo #include <linux/gfp.h> 34a27bb332SKent Overstreet #include <linux/uio.h> 35822fc613SNaoya Horiguchi #include <linux/hugetlb.h> 3633c3fc71SVladimir Davydov #include <linux/page_idle.h> 371da177e4SLinus Torvalds 3864d6519dSLee Schermerhorn #include "internal.h" 3964d6519dSLee Schermerhorn 40c6286c98SMel Gorman #define CREATE_TRACE_POINTS 41c6286c98SMel Gorman #include <trace/events/pagemap.h> 42c6286c98SMel Gorman 431da177e4SLinus Torvalds /* How many pages do we try to swap or page in/out together? */ 441da177e4SLinus Torvalds int page_cluster; 451da177e4SLinus Torvalds 4613f7f789SMel Gorman static DEFINE_PER_CPU(struct pagevec, lru_add_pvec); 47f84f9504SVegard Nossum static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); 48cc5993bdSMinchan Kim static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs); 4910853a03SMinchan Kim static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); 50a4a921aaSMing Li #ifdef CONFIG_SMP 51a4a921aaSMing Li static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs); 52a4a921aaSMing Li #endif 53902aaed0SHisashi Hifumi 54b221385bSAdrian Bunk /* 55b221385bSAdrian Bunk * This path almost never happens for VM activity - pages are normally 56b221385bSAdrian Bunk * freed via pagevecs. But it gets used by networking. 57b221385bSAdrian Bunk */ 58920c7a5dSHarvey Harrison static void __page_cache_release(struct page *page) 59b221385bSAdrian Bunk { 60b221385bSAdrian Bunk if (PageLRU(page)) { 61b221385bSAdrian Bunk struct zone *zone = page_zone(page); 62fa9add64SHugh Dickins struct lruvec *lruvec; 63fa9add64SHugh Dickins unsigned long flags; 64b221385bSAdrian Bunk 65a52633d8SMel Gorman spin_lock_irqsave(zone_lru_lock(zone), flags); 66599d0c95SMel Gorman lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat); 67309381feSSasha Levin VM_BUG_ON_PAGE(!PageLRU(page), page); 68b221385bSAdrian Bunk __ClearPageLRU(page); 69fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, page_off_lru(page)); 70a52633d8SMel Gorman spin_unlock_irqrestore(zone_lru_lock(zone), flags); 71b221385bSAdrian Bunk } 7262906027SNicholas Piggin __ClearPageWaiters(page); 730a31bc97SJohannes Weiner mem_cgroup_uncharge(page); 7491807063SAndrea Arcangeli } 7591807063SAndrea Arcangeli 7691807063SAndrea Arcangeli static void __put_single_page(struct page *page) 7791807063SAndrea Arcangeli { 7891807063SAndrea Arcangeli __page_cache_release(page); 79b745bc85SMel Gorman free_hot_cold_page(page, false); 80b221385bSAdrian Bunk } 81b221385bSAdrian Bunk 8291807063SAndrea Arcangeli static void __put_compound_page(struct page *page) 8391807063SAndrea Arcangeli { 8491807063SAndrea Arcangeli compound_page_dtor *dtor; 8591807063SAndrea Arcangeli 86822fc613SNaoya Horiguchi /* 87822fc613SNaoya Horiguchi * __page_cache_release() is supposed to be called for thp, not for 88822fc613SNaoya Horiguchi * hugetlb. This is because hugetlb page does never have PageLRU set 89822fc613SNaoya Horiguchi * (it's never listed to any LRU lists) and no memcg routines should 90822fc613SNaoya Horiguchi * be called for hugetlb (it has a separate hugetlb_cgroup.) 91822fc613SNaoya Horiguchi */ 92822fc613SNaoya Horiguchi if (!PageHuge(page)) 9391807063SAndrea Arcangeli __page_cache_release(page); 9491807063SAndrea Arcangeli dtor = get_compound_page_dtor(page); 9591807063SAndrea Arcangeli (*dtor)(page); 9691807063SAndrea Arcangeli } 9791807063SAndrea Arcangeli 98ddc58f27SKirill A. Shutemov void __put_page(struct page *page) 99c747ce79SJianyu Zhan { 100ddc58f27SKirill A. Shutemov if (unlikely(PageCompound(page))) 10126296ad2SAndrew Morton __put_compound_page(page); 10226296ad2SAndrew Morton else 10326296ad2SAndrew Morton __put_single_page(page); 10426296ad2SAndrew Morton } 105ddc58f27SKirill A. Shutemov EXPORT_SYMBOL(__put_page); 10670b50f94SAndrea Arcangeli 1071d7ea732SAlexander Zarochentsev /** 1087682486bSRandy Dunlap * put_pages_list() - release a list of pages 1097682486bSRandy Dunlap * @pages: list of pages threaded on page->lru 1101d7ea732SAlexander Zarochentsev * 1111d7ea732SAlexander Zarochentsev * Release a list of pages which are strung together on page.lru. Currently 1121d7ea732SAlexander Zarochentsev * used by read_cache_pages() and related error recovery code. 1131d7ea732SAlexander Zarochentsev */ 1141d7ea732SAlexander Zarochentsev void put_pages_list(struct list_head *pages) 1151d7ea732SAlexander Zarochentsev { 1161d7ea732SAlexander Zarochentsev while (!list_empty(pages)) { 1171d7ea732SAlexander Zarochentsev struct page *victim; 1181d7ea732SAlexander Zarochentsev 1191d7ea732SAlexander Zarochentsev victim = list_entry(pages->prev, struct page, lru); 1201d7ea732SAlexander Zarochentsev list_del(&victim->lru); 12109cbfeafSKirill A. Shutemov put_page(victim); 1221d7ea732SAlexander Zarochentsev } 1231d7ea732SAlexander Zarochentsev } 1241d7ea732SAlexander Zarochentsev EXPORT_SYMBOL(put_pages_list); 1251d7ea732SAlexander Zarochentsev 12618022c5dSMel Gorman /* 12718022c5dSMel Gorman * get_kernel_pages() - pin kernel pages in memory 12818022c5dSMel Gorman * @kiov: An array of struct kvec structures 12918022c5dSMel Gorman * @nr_segs: number of segments to pin 13018022c5dSMel Gorman * @write: pinning for read/write, currently ignored 13118022c5dSMel Gorman * @pages: array that receives pointers to the pages pinned. 13218022c5dSMel Gorman * Should be at least nr_segs long. 13318022c5dSMel Gorman * 13418022c5dSMel Gorman * Returns number of pages pinned. This may be fewer than the number 13518022c5dSMel Gorman * requested. If nr_pages is 0 or negative, returns 0. If no pages 13618022c5dSMel Gorman * were pinned, returns -errno. Each page returned must be released 13718022c5dSMel Gorman * with a put_page() call when it is finished with. 13818022c5dSMel Gorman */ 13918022c5dSMel Gorman int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write, 14018022c5dSMel Gorman struct page **pages) 14118022c5dSMel Gorman { 14218022c5dSMel Gorman int seg; 14318022c5dSMel Gorman 14418022c5dSMel Gorman for (seg = 0; seg < nr_segs; seg++) { 14518022c5dSMel Gorman if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE)) 14618022c5dSMel Gorman return seg; 14718022c5dSMel Gorman 1485a178119SMel Gorman pages[seg] = kmap_to_page(kiov[seg].iov_base); 14909cbfeafSKirill A. Shutemov get_page(pages[seg]); 15018022c5dSMel Gorman } 15118022c5dSMel Gorman 15218022c5dSMel Gorman return seg; 15318022c5dSMel Gorman } 15418022c5dSMel Gorman EXPORT_SYMBOL_GPL(get_kernel_pages); 15518022c5dSMel Gorman 15618022c5dSMel Gorman /* 15718022c5dSMel Gorman * get_kernel_page() - pin a kernel page in memory 15818022c5dSMel Gorman * @start: starting kernel address 15918022c5dSMel Gorman * @write: pinning for read/write, currently ignored 16018022c5dSMel Gorman * @pages: array that receives pointer to the page pinned. 16118022c5dSMel Gorman * Must be at least nr_segs long. 16218022c5dSMel Gorman * 16318022c5dSMel Gorman * Returns 1 if page is pinned. If the page was not pinned, returns 16418022c5dSMel Gorman * -errno. The page returned must be released with a put_page() call 16518022c5dSMel Gorman * when it is finished with. 16618022c5dSMel Gorman */ 16718022c5dSMel Gorman int get_kernel_page(unsigned long start, int write, struct page **pages) 16818022c5dSMel Gorman { 16918022c5dSMel Gorman const struct kvec kiov = { 17018022c5dSMel Gorman .iov_base = (void *)start, 17118022c5dSMel Gorman .iov_len = PAGE_SIZE 17218022c5dSMel Gorman }; 17318022c5dSMel Gorman 17418022c5dSMel Gorman return get_kernel_pages(&kiov, 1, write, pages); 17518022c5dSMel Gorman } 17618022c5dSMel Gorman EXPORT_SYMBOL_GPL(get_kernel_page); 17718022c5dSMel Gorman 1783dd7ae8eSShaohua Li static void pagevec_lru_move_fn(struct pagevec *pvec, 179fa9add64SHugh Dickins void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg), 1803dd7ae8eSShaohua Li void *arg) 181902aaed0SHisashi Hifumi { 182902aaed0SHisashi Hifumi int i; 18368eb0731SMel Gorman struct pglist_data *pgdat = NULL; 184fa9add64SHugh Dickins struct lruvec *lruvec; 1853dd7ae8eSShaohua Li unsigned long flags = 0; 186902aaed0SHisashi Hifumi 187902aaed0SHisashi Hifumi for (i = 0; i < pagevec_count(pvec); i++) { 188902aaed0SHisashi Hifumi struct page *page = pvec->pages[i]; 18968eb0731SMel Gorman struct pglist_data *pagepgdat = page_pgdat(page); 190902aaed0SHisashi Hifumi 19168eb0731SMel Gorman if (pagepgdat != pgdat) { 19268eb0731SMel Gorman if (pgdat) 19368eb0731SMel Gorman spin_unlock_irqrestore(&pgdat->lru_lock, flags); 19468eb0731SMel Gorman pgdat = pagepgdat; 19568eb0731SMel Gorman spin_lock_irqsave(&pgdat->lru_lock, flags); 196902aaed0SHisashi Hifumi } 1973dd7ae8eSShaohua Li 19868eb0731SMel Gorman lruvec = mem_cgroup_page_lruvec(page, pgdat); 199fa9add64SHugh Dickins (*move_fn)(page, lruvec, arg); 2003dd7ae8eSShaohua Li } 20168eb0731SMel Gorman if (pgdat) 20268eb0731SMel Gorman spin_unlock_irqrestore(&pgdat->lru_lock, flags); 2033dd7ae8eSShaohua Li release_pages(pvec->pages, pvec->nr, pvec->cold); 2043dd7ae8eSShaohua Li pagevec_reinit(pvec); 2053dd7ae8eSShaohua Li } 2063dd7ae8eSShaohua Li 207fa9add64SHugh Dickins static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec, 208fa9add64SHugh Dickins void *arg) 2093dd7ae8eSShaohua Li { 2103dd7ae8eSShaohua Li int *pgmoved = arg; 2113dd7ae8eSShaohua Li 212c55e8d03SJohannes Weiner if (PageLRU(page) && !PageUnevictable(page)) { 213c55e8d03SJohannes Weiner del_page_from_lru_list(page, lruvec, page_lru(page)); 214c55e8d03SJohannes Weiner ClearPageActive(page); 215c55e8d03SJohannes Weiner add_page_to_lru_list_tail(page, lruvec, page_lru(page)); 2163dd7ae8eSShaohua Li (*pgmoved)++; 217902aaed0SHisashi Hifumi } 218902aaed0SHisashi Hifumi } 2193dd7ae8eSShaohua Li 2203dd7ae8eSShaohua Li /* 2213dd7ae8eSShaohua Li * pagevec_move_tail() must be called with IRQ disabled. 2223dd7ae8eSShaohua Li * Otherwise this may cause nasty races. 2233dd7ae8eSShaohua Li */ 2243dd7ae8eSShaohua Li static void pagevec_move_tail(struct pagevec *pvec) 2253dd7ae8eSShaohua Li { 2263dd7ae8eSShaohua Li int pgmoved = 0; 2273dd7ae8eSShaohua Li 2283dd7ae8eSShaohua Li pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved); 229902aaed0SHisashi Hifumi __count_vm_events(PGROTATED, pgmoved); 230902aaed0SHisashi Hifumi } 231902aaed0SHisashi Hifumi 232902aaed0SHisashi Hifumi /* 2331da177e4SLinus Torvalds * Writeback is about to end against a page which has been marked for immediate 2341da177e4SLinus Torvalds * reclaim. If it still appears to be reclaimable, move it to the tail of the 235902aaed0SHisashi Hifumi * inactive list. 2361da177e4SLinus Torvalds */ 237ac6aadb2SMiklos Szeredi void rotate_reclaimable_page(struct page *page) 2381da177e4SLinus Torvalds { 239c55e8d03SJohannes Weiner if (!PageLocked(page) && !PageDirty(page) && 240894bc310SLee Schermerhorn !PageUnevictable(page) && PageLRU(page)) { 241902aaed0SHisashi Hifumi struct pagevec *pvec; 2421da177e4SLinus Torvalds unsigned long flags; 2431da177e4SLinus Torvalds 24409cbfeafSKirill A. Shutemov get_page(page); 245902aaed0SHisashi Hifumi local_irq_save(flags); 2467c8e0181SChristoph Lameter pvec = this_cpu_ptr(&lru_rotate_pvecs); 2478f182270SLukasz Odzioba if (!pagevec_add(pvec, page) || PageCompound(page)) 248902aaed0SHisashi Hifumi pagevec_move_tail(pvec); 249902aaed0SHisashi Hifumi local_irq_restore(flags); 250ac6aadb2SMiklos Szeredi } 2511da177e4SLinus Torvalds } 2521da177e4SLinus Torvalds 253fa9add64SHugh Dickins static void update_page_reclaim_stat(struct lruvec *lruvec, 2543e2f41f1SKOSAKI Motohiro int file, int rotated) 2553e2f41f1SKOSAKI Motohiro { 256fa9add64SHugh Dickins struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; 2573e2f41f1SKOSAKI Motohiro 2583e2f41f1SKOSAKI Motohiro reclaim_stat->recent_scanned[file]++; 2593e2f41f1SKOSAKI Motohiro if (rotated) 2603e2f41f1SKOSAKI Motohiro reclaim_stat->recent_rotated[file]++; 2613e2f41f1SKOSAKI Motohiro } 2623e2f41f1SKOSAKI Motohiro 263fa9add64SHugh Dickins static void __activate_page(struct page *page, struct lruvec *lruvec, 264fa9add64SHugh Dickins void *arg) 265744ed144SShaohua Li { 2667a608572SLinus Torvalds if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 267744ed144SShaohua Li int file = page_is_file_cache(page); 268744ed144SShaohua Li int lru = page_lru_base_type(page); 269744ed144SShaohua Li 270fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, lru); 271744ed144SShaohua Li SetPageActive(page); 272744ed144SShaohua Li lru += LRU_ACTIVE; 273fa9add64SHugh Dickins add_page_to_lru_list(page, lruvec, lru); 27424b7e581SMel Gorman trace_mm_lru_activate(page); 2757a608572SLinus Torvalds 276fa9add64SHugh Dickins __count_vm_event(PGACTIVATE); 277fa9add64SHugh Dickins update_page_reclaim_stat(lruvec, file, 1); 278744ed144SShaohua Li } 279eb709b0dSShaohua Li } 280eb709b0dSShaohua Li 281eb709b0dSShaohua Li #ifdef CONFIG_SMP 282eb709b0dSShaohua Li static void activate_page_drain(int cpu) 283eb709b0dSShaohua Li { 284eb709b0dSShaohua Li struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu); 285eb709b0dSShaohua Li 286eb709b0dSShaohua Li if (pagevec_count(pvec)) 287eb709b0dSShaohua Li pagevec_lru_move_fn(pvec, __activate_page, NULL); 288eb709b0dSShaohua Li } 289eb709b0dSShaohua Li 2905fbc4616SChris Metcalf static bool need_activate_page_drain(int cpu) 2915fbc4616SChris Metcalf { 2925fbc4616SChris Metcalf return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0; 2935fbc4616SChris Metcalf } 2945fbc4616SChris Metcalf 295eb709b0dSShaohua Li void activate_page(struct page *page) 296eb709b0dSShaohua Li { 297800d8c63SKirill A. Shutemov page = compound_head(page); 298eb709b0dSShaohua Li if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 299eb709b0dSShaohua Li struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); 300eb709b0dSShaohua Li 30109cbfeafSKirill A. Shutemov get_page(page); 3028f182270SLukasz Odzioba if (!pagevec_add(pvec, page) || PageCompound(page)) 303eb709b0dSShaohua Li pagevec_lru_move_fn(pvec, __activate_page, NULL); 304eb709b0dSShaohua Li put_cpu_var(activate_page_pvecs); 305eb709b0dSShaohua Li } 306eb709b0dSShaohua Li } 307eb709b0dSShaohua Li 308eb709b0dSShaohua Li #else 309eb709b0dSShaohua Li static inline void activate_page_drain(int cpu) 310eb709b0dSShaohua Li { 311eb709b0dSShaohua Li } 312eb709b0dSShaohua Li 3135fbc4616SChris Metcalf static bool need_activate_page_drain(int cpu) 3145fbc4616SChris Metcalf { 3155fbc4616SChris Metcalf return false; 3165fbc4616SChris Metcalf } 3175fbc4616SChris Metcalf 318eb709b0dSShaohua Li void activate_page(struct page *page) 319eb709b0dSShaohua Li { 320eb709b0dSShaohua Li struct zone *zone = page_zone(page); 321eb709b0dSShaohua Li 322800d8c63SKirill A. Shutemov page = compound_head(page); 323a52633d8SMel Gorman spin_lock_irq(zone_lru_lock(zone)); 324599d0c95SMel Gorman __activate_page(page, mem_cgroup_page_lruvec(page, zone->zone_pgdat), NULL); 325a52633d8SMel Gorman spin_unlock_irq(zone_lru_lock(zone)); 3261da177e4SLinus Torvalds } 327eb709b0dSShaohua Li #endif 3281da177e4SLinus Torvalds 329059285a2SMel Gorman static void __lru_cache_activate_page(struct page *page) 330059285a2SMel Gorman { 331059285a2SMel Gorman struct pagevec *pvec = &get_cpu_var(lru_add_pvec); 332059285a2SMel Gorman int i; 333059285a2SMel Gorman 334059285a2SMel Gorman /* 335059285a2SMel Gorman * Search backwards on the optimistic assumption that the page being 336059285a2SMel Gorman * activated has just been added to this pagevec. Note that only 337059285a2SMel Gorman * the local pagevec is examined as a !PageLRU page could be in the 338059285a2SMel Gorman * process of being released, reclaimed, migrated or on a remote 339059285a2SMel Gorman * pagevec that is currently being drained. Furthermore, marking 340059285a2SMel Gorman * a remote pagevec's page PageActive potentially hits a race where 341059285a2SMel Gorman * a page is marked PageActive just after it is added to the inactive 342059285a2SMel Gorman * list causing accounting errors and BUG_ON checks to trigger. 343059285a2SMel Gorman */ 344059285a2SMel Gorman for (i = pagevec_count(pvec) - 1; i >= 0; i--) { 345059285a2SMel Gorman struct page *pagevec_page = pvec->pages[i]; 346059285a2SMel Gorman 347059285a2SMel Gorman if (pagevec_page == page) { 348059285a2SMel Gorman SetPageActive(page); 349059285a2SMel Gorman break; 350059285a2SMel Gorman } 351059285a2SMel Gorman } 352059285a2SMel Gorman 353059285a2SMel Gorman put_cpu_var(lru_add_pvec); 354059285a2SMel Gorman } 355059285a2SMel Gorman 3561da177e4SLinus Torvalds /* 3571da177e4SLinus Torvalds * Mark a page as having seen activity. 3581da177e4SLinus Torvalds * 3591da177e4SLinus Torvalds * inactive,unreferenced -> inactive,referenced 3601da177e4SLinus Torvalds * inactive,referenced -> active,unreferenced 3611da177e4SLinus Torvalds * active,unreferenced -> active,referenced 362eb39d618SHugh Dickins * 363eb39d618SHugh Dickins * When a newly allocated page is not yet visible, so safe for non-atomic ops, 364eb39d618SHugh Dickins * __SetPageReferenced(page) may be substituted for mark_page_accessed(page). 3651da177e4SLinus Torvalds */ 366920c7a5dSHarvey Harrison void mark_page_accessed(struct page *page) 3671da177e4SLinus Torvalds { 368e90309c9SKirill A. Shutemov page = compound_head(page); 369894bc310SLee Schermerhorn if (!PageActive(page) && !PageUnevictable(page) && 370059285a2SMel Gorman PageReferenced(page)) { 371059285a2SMel Gorman 372059285a2SMel Gorman /* 373059285a2SMel Gorman * If the page is on the LRU, queue it for activation via 374059285a2SMel Gorman * activate_page_pvecs. Otherwise, assume the page is on a 375059285a2SMel Gorman * pagevec, mark it active and it'll be moved to the active 376059285a2SMel Gorman * LRU on the next drain. 377059285a2SMel Gorman */ 378059285a2SMel Gorman if (PageLRU(page)) 3791da177e4SLinus Torvalds activate_page(page); 380059285a2SMel Gorman else 381059285a2SMel Gorman __lru_cache_activate_page(page); 3821da177e4SLinus Torvalds ClearPageReferenced(page); 383a528910eSJohannes Weiner if (page_is_file_cache(page)) 384a528910eSJohannes Weiner workingset_activation(page); 3851da177e4SLinus Torvalds } else if (!PageReferenced(page)) { 3861da177e4SLinus Torvalds SetPageReferenced(page); 3871da177e4SLinus Torvalds } 38833c3fc71SVladimir Davydov if (page_is_idle(page)) 38933c3fc71SVladimir Davydov clear_page_idle(page); 3901da177e4SLinus Torvalds } 3911da177e4SLinus Torvalds EXPORT_SYMBOL(mark_page_accessed); 3921da177e4SLinus Torvalds 3932329d375SJianyu Zhan static void __lru_cache_add(struct page *page) 3941da177e4SLinus Torvalds { 39513f7f789SMel Gorman struct pagevec *pvec = &get_cpu_var(lru_add_pvec); 39613f7f789SMel Gorman 39709cbfeafSKirill A. Shutemov get_page(page); 3988f182270SLukasz Odzioba if (!pagevec_add(pvec, page) || PageCompound(page)) 399a0b8cab3SMel Gorman __pagevec_lru_add(pvec); 40013f7f789SMel Gorman put_cpu_var(lru_add_pvec); 4011da177e4SLinus Torvalds } 4022329d375SJianyu Zhan 4032329d375SJianyu Zhan /** 4042329d375SJianyu Zhan * lru_cache_add: add a page to the page lists 4052329d375SJianyu Zhan * @page: the page to add 4062329d375SJianyu Zhan */ 4072329d375SJianyu Zhan void lru_cache_add_anon(struct page *page) 4082329d375SJianyu Zhan { 4096fb81a17SMel Gorman if (PageActive(page)) 4102329d375SJianyu Zhan ClearPageActive(page); 4112329d375SJianyu Zhan __lru_cache_add(page); 4122329d375SJianyu Zhan } 4132329d375SJianyu Zhan 4142329d375SJianyu Zhan void lru_cache_add_file(struct page *page) 4152329d375SJianyu Zhan { 4166fb81a17SMel Gorman if (PageActive(page)) 4172329d375SJianyu Zhan ClearPageActive(page); 4182329d375SJianyu Zhan __lru_cache_add(page); 4192329d375SJianyu Zhan } 4202329d375SJianyu Zhan EXPORT_SYMBOL(lru_cache_add_file); 4211da177e4SLinus Torvalds 422f04e9ebbSKOSAKI Motohiro /** 423c53954a0SMel Gorman * lru_cache_add - add a page to a page list 424f04e9ebbSKOSAKI Motohiro * @page: the page to be added to the LRU. 4252329d375SJianyu Zhan * 4262329d375SJianyu Zhan * Queue the page for addition to the LRU via pagevec. The decision on whether 4272329d375SJianyu Zhan * to add the page to the [in]active [file|anon] list is deferred until the 4282329d375SJianyu Zhan * pagevec is drained. This gives a chance for the caller of lru_cache_add() 4292329d375SJianyu Zhan * have the page added to the active list using mark_page_accessed(). 430f04e9ebbSKOSAKI Motohiro */ 431c53954a0SMel Gorman void lru_cache_add(struct page *page) 4321da177e4SLinus Torvalds { 433309381feSSasha Levin VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page); 434309381feSSasha Levin VM_BUG_ON_PAGE(PageLRU(page), page); 435c53954a0SMel Gorman __lru_cache_add(page); 4361da177e4SLinus Torvalds } 4371da177e4SLinus Torvalds 438894bc310SLee Schermerhorn /** 439894bc310SLee Schermerhorn * add_page_to_unevictable_list - add a page to the unevictable list 440894bc310SLee Schermerhorn * @page: the page to be added to the unevictable list 441894bc310SLee Schermerhorn * 442894bc310SLee Schermerhorn * Add page directly to its zone's unevictable list. To avoid races with 443894bc310SLee Schermerhorn * tasks that might be making the page evictable, through eg. munlock, 444894bc310SLee Schermerhorn * munmap or exit, while it's not on the lru, we want to add the page 445894bc310SLee Schermerhorn * while it's locked or otherwise "invisible" to other tasks. This is 446894bc310SLee Schermerhorn * difficult to do when using the pagevec cache, so bypass that. 447894bc310SLee Schermerhorn */ 448894bc310SLee Schermerhorn void add_page_to_unevictable_list(struct page *page) 449894bc310SLee Schermerhorn { 450599d0c95SMel Gorman struct pglist_data *pgdat = page_pgdat(page); 451fa9add64SHugh Dickins struct lruvec *lruvec; 452894bc310SLee Schermerhorn 453599d0c95SMel Gorman spin_lock_irq(&pgdat->lru_lock); 454599d0c95SMel Gorman lruvec = mem_cgroup_page_lruvec(page, pgdat); 455ef2a2cbdSNaoya Horiguchi ClearPageActive(page); 456894bc310SLee Schermerhorn SetPageUnevictable(page); 457894bc310SLee Schermerhorn SetPageLRU(page); 458fa9add64SHugh Dickins add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE); 459599d0c95SMel Gorman spin_unlock_irq(&pgdat->lru_lock); 460894bc310SLee Schermerhorn } 461894bc310SLee Schermerhorn 46200501b53SJohannes Weiner /** 46300501b53SJohannes Weiner * lru_cache_add_active_or_unevictable 46400501b53SJohannes Weiner * @page: the page to be added to LRU 46500501b53SJohannes Weiner * @vma: vma in which page is mapped for determining reclaimability 46600501b53SJohannes Weiner * 46700501b53SJohannes Weiner * Place @page on the active or unevictable LRU list, depending on its 46800501b53SJohannes Weiner * evictability. Note that if the page is not evictable, it goes 46900501b53SJohannes Weiner * directly back onto it's zone's unevictable list, it does NOT use a 47000501b53SJohannes Weiner * per cpu pagevec. 47100501b53SJohannes Weiner */ 47200501b53SJohannes Weiner void lru_cache_add_active_or_unevictable(struct page *page, 47300501b53SJohannes Weiner struct vm_area_struct *vma) 47400501b53SJohannes Weiner { 47500501b53SJohannes Weiner VM_BUG_ON_PAGE(PageLRU(page), page); 47600501b53SJohannes Weiner 47700501b53SJohannes Weiner if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) { 47800501b53SJohannes Weiner SetPageActive(page); 47900501b53SJohannes Weiner lru_cache_add(page); 48000501b53SJohannes Weiner return; 48100501b53SJohannes Weiner } 48200501b53SJohannes Weiner 48300501b53SJohannes Weiner if (!TestSetPageMlocked(page)) { 48400501b53SJohannes Weiner /* 48500501b53SJohannes Weiner * We use the irq-unsafe __mod_zone_page_stat because this 48600501b53SJohannes Weiner * counter is not modified from interrupt context, and the pte 48700501b53SJohannes Weiner * lock is held(spinlock), which implies preemption disabled. 48800501b53SJohannes Weiner */ 48900501b53SJohannes Weiner __mod_zone_page_state(page_zone(page), NR_MLOCK, 49000501b53SJohannes Weiner hpage_nr_pages(page)); 49100501b53SJohannes Weiner count_vm_event(UNEVICTABLE_PGMLOCKED); 49200501b53SJohannes Weiner } 49300501b53SJohannes Weiner add_page_to_unevictable_list(page); 49400501b53SJohannes Weiner } 49500501b53SJohannes Weiner 496902aaed0SHisashi Hifumi /* 49731560180SMinchan Kim * If the page can not be invalidated, it is moved to the 49831560180SMinchan Kim * inactive list to speed up its reclaim. It is moved to the 49931560180SMinchan Kim * head of the list, rather than the tail, to give the flusher 50031560180SMinchan Kim * threads some time to write it out, as this is much more 50131560180SMinchan Kim * effective than the single-page writeout from reclaim. 502278df9f4SMinchan Kim * 503278df9f4SMinchan Kim * If the page isn't page_mapped and dirty/writeback, the page 504278df9f4SMinchan Kim * could reclaim asap using PG_reclaim. 505278df9f4SMinchan Kim * 506278df9f4SMinchan Kim * 1. active, mapped page -> none 507278df9f4SMinchan Kim * 2. active, dirty/writeback page -> inactive, head, PG_reclaim 508278df9f4SMinchan Kim * 3. inactive, mapped page -> none 509278df9f4SMinchan Kim * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim 510278df9f4SMinchan Kim * 5. inactive, clean -> inactive, tail 511278df9f4SMinchan Kim * 6. Others -> none 512278df9f4SMinchan Kim * 513278df9f4SMinchan Kim * In 4, why it moves inactive's head, the VM expects the page would 514278df9f4SMinchan Kim * be write it out by flusher threads as this is much more effective 515278df9f4SMinchan Kim * than the single-page writeout from reclaim. 51631560180SMinchan Kim */ 517cc5993bdSMinchan Kim static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec, 518fa9add64SHugh Dickins void *arg) 51931560180SMinchan Kim { 52031560180SMinchan Kim int lru, file; 521278df9f4SMinchan Kim bool active; 52231560180SMinchan Kim 523278df9f4SMinchan Kim if (!PageLRU(page)) 52431560180SMinchan Kim return; 52531560180SMinchan Kim 526bad49d9cSMinchan Kim if (PageUnevictable(page)) 527bad49d9cSMinchan Kim return; 528bad49d9cSMinchan Kim 52931560180SMinchan Kim /* Some processes are using the page */ 53031560180SMinchan Kim if (page_mapped(page)) 53131560180SMinchan Kim return; 53231560180SMinchan Kim 533278df9f4SMinchan Kim active = PageActive(page); 53431560180SMinchan Kim file = page_is_file_cache(page); 53531560180SMinchan Kim lru = page_lru_base_type(page); 536fa9add64SHugh Dickins 537fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, lru + active); 53831560180SMinchan Kim ClearPageActive(page); 53931560180SMinchan Kim ClearPageReferenced(page); 540fa9add64SHugh Dickins add_page_to_lru_list(page, lruvec, lru); 54131560180SMinchan Kim 542278df9f4SMinchan Kim if (PageWriteback(page) || PageDirty(page)) { 543278df9f4SMinchan Kim /* 544278df9f4SMinchan Kim * PG_reclaim could be raced with end_page_writeback 545278df9f4SMinchan Kim * It can make readahead confusing. But race window 546278df9f4SMinchan Kim * is _really_ small and it's non-critical problem. 547278df9f4SMinchan Kim */ 548278df9f4SMinchan Kim SetPageReclaim(page); 549278df9f4SMinchan Kim } else { 550278df9f4SMinchan Kim /* 551278df9f4SMinchan Kim * The page's writeback ends up during pagevec 552278df9f4SMinchan Kim * We moves tha page into tail of inactive. 553278df9f4SMinchan Kim */ 554925b7673SJohannes Weiner list_move_tail(&page->lru, &lruvec->lists[lru]); 555278df9f4SMinchan Kim __count_vm_event(PGROTATED); 556278df9f4SMinchan Kim } 557278df9f4SMinchan Kim 558278df9f4SMinchan Kim if (active) 559278df9f4SMinchan Kim __count_vm_event(PGDEACTIVATE); 560fa9add64SHugh Dickins update_page_reclaim_stat(lruvec, file, 0); 56131560180SMinchan Kim } 56231560180SMinchan Kim 56310853a03SMinchan Kim 56410853a03SMinchan Kim static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec, 56510853a03SMinchan Kim void *arg) 56610853a03SMinchan Kim { 56710853a03SMinchan Kim if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { 56810853a03SMinchan Kim int file = page_is_file_cache(page); 56910853a03SMinchan Kim int lru = page_lru_base_type(page); 57010853a03SMinchan Kim 57110853a03SMinchan Kim del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE); 57210853a03SMinchan Kim ClearPageActive(page); 57310853a03SMinchan Kim ClearPageReferenced(page); 57410853a03SMinchan Kim add_page_to_lru_list(page, lruvec, lru); 57510853a03SMinchan Kim 57610853a03SMinchan Kim __count_vm_event(PGDEACTIVATE); 57710853a03SMinchan Kim update_page_reclaim_stat(lruvec, file, 0); 57810853a03SMinchan Kim } 57910853a03SMinchan Kim } 58010853a03SMinchan Kim 58131560180SMinchan Kim /* 582902aaed0SHisashi Hifumi * Drain pages out of the cpu's pagevecs. 583902aaed0SHisashi Hifumi * Either "cpu" is the current CPU, and preemption has already been 584902aaed0SHisashi Hifumi * disabled; or "cpu" is being hot-unplugged, and is already dead. 585902aaed0SHisashi Hifumi */ 586f0cb3c76SKonstantin Khlebnikov void lru_add_drain_cpu(int cpu) 5871da177e4SLinus Torvalds { 58813f7f789SMel Gorman struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu); 5891da177e4SLinus Torvalds 5901da177e4SLinus Torvalds if (pagevec_count(pvec)) 591a0b8cab3SMel Gorman __pagevec_lru_add(pvec); 592902aaed0SHisashi Hifumi 593902aaed0SHisashi Hifumi pvec = &per_cpu(lru_rotate_pvecs, cpu); 594902aaed0SHisashi Hifumi if (pagevec_count(pvec)) { 595902aaed0SHisashi Hifumi unsigned long flags; 596902aaed0SHisashi Hifumi 597902aaed0SHisashi Hifumi /* No harm done if a racing interrupt already did this */ 598902aaed0SHisashi Hifumi local_irq_save(flags); 599902aaed0SHisashi Hifumi pagevec_move_tail(pvec); 600902aaed0SHisashi Hifumi local_irq_restore(flags); 601902aaed0SHisashi Hifumi } 60231560180SMinchan Kim 603cc5993bdSMinchan Kim pvec = &per_cpu(lru_deactivate_file_pvecs, cpu); 60431560180SMinchan Kim if (pagevec_count(pvec)) 605cc5993bdSMinchan Kim pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); 606eb709b0dSShaohua Li 60710853a03SMinchan Kim pvec = &per_cpu(lru_deactivate_pvecs, cpu); 60810853a03SMinchan Kim if (pagevec_count(pvec)) 60910853a03SMinchan Kim pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); 61010853a03SMinchan Kim 611eb709b0dSShaohua Li activate_page_drain(cpu); 61231560180SMinchan Kim } 61331560180SMinchan Kim 61431560180SMinchan Kim /** 615cc5993bdSMinchan Kim * deactivate_file_page - forcefully deactivate a file page 61631560180SMinchan Kim * @page: page to deactivate 61731560180SMinchan Kim * 61831560180SMinchan Kim * This function hints the VM that @page is a good reclaim candidate, 61931560180SMinchan Kim * for example if its invalidation fails due to the page being dirty 62031560180SMinchan Kim * or under writeback. 62131560180SMinchan Kim */ 622cc5993bdSMinchan Kim void deactivate_file_page(struct page *page) 62331560180SMinchan Kim { 624821ed6bbSMinchan Kim /* 625cc5993bdSMinchan Kim * In a workload with many unevictable page such as mprotect, 626cc5993bdSMinchan Kim * unevictable page deactivation for accelerating reclaim is pointless. 627821ed6bbSMinchan Kim */ 628821ed6bbSMinchan Kim if (PageUnevictable(page)) 629821ed6bbSMinchan Kim return; 630821ed6bbSMinchan Kim 63131560180SMinchan Kim if (likely(get_page_unless_zero(page))) { 632cc5993bdSMinchan Kim struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs); 63331560180SMinchan Kim 6348f182270SLukasz Odzioba if (!pagevec_add(pvec, page) || PageCompound(page)) 635cc5993bdSMinchan Kim pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); 636cc5993bdSMinchan Kim put_cpu_var(lru_deactivate_file_pvecs); 63731560180SMinchan Kim } 63880bfed90SAndrew Morton } 63980bfed90SAndrew Morton 64010853a03SMinchan Kim /** 64110853a03SMinchan Kim * deactivate_page - deactivate a page 64210853a03SMinchan Kim * @page: page to deactivate 64310853a03SMinchan Kim * 64410853a03SMinchan Kim * deactivate_page() moves @page to the inactive list if @page was on the active 64510853a03SMinchan Kim * list and was not an unevictable page. This is done to accelerate the reclaim 64610853a03SMinchan Kim * of @page. 64710853a03SMinchan Kim */ 64810853a03SMinchan Kim void deactivate_page(struct page *page) 64910853a03SMinchan Kim { 65010853a03SMinchan Kim if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { 65110853a03SMinchan Kim struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); 65210853a03SMinchan Kim 65309cbfeafSKirill A. Shutemov get_page(page); 6548f182270SLukasz Odzioba if (!pagevec_add(pvec, page) || PageCompound(page)) 65510853a03SMinchan Kim pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); 65610853a03SMinchan Kim put_cpu_var(lru_deactivate_pvecs); 65710853a03SMinchan Kim } 65810853a03SMinchan Kim } 65910853a03SMinchan Kim 66080bfed90SAndrew Morton void lru_add_drain(void) 66180bfed90SAndrew Morton { 662f0cb3c76SKonstantin Khlebnikov lru_add_drain_cpu(get_cpu()); 66380bfed90SAndrew Morton put_cpu(); 6641da177e4SLinus Torvalds } 6651da177e4SLinus Torvalds 666c4028958SDavid Howells static void lru_add_drain_per_cpu(struct work_struct *dummy) 667053837fcSNick Piggin { 668053837fcSNick Piggin lru_add_drain(); 669053837fcSNick Piggin } 670053837fcSNick Piggin 6715fbc4616SChris Metcalf static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); 6725fbc4616SChris Metcalf 6735fbc4616SChris Metcalf void lru_add_drain_all(void) 674053837fcSNick Piggin { 6755fbc4616SChris Metcalf static DEFINE_MUTEX(lock); 6765fbc4616SChris Metcalf static struct cpumask has_work; 6775fbc4616SChris Metcalf int cpu; 6785fbc4616SChris Metcalf 679*ce612879SMichal Hocko /* 680*ce612879SMichal Hocko * Make sure nobody triggers this path before mm_percpu_wq is fully 681*ce612879SMichal Hocko * initialized. 682*ce612879SMichal Hocko */ 683*ce612879SMichal Hocko if (WARN_ON(!mm_percpu_wq)) 684*ce612879SMichal Hocko return; 685*ce612879SMichal Hocko 6865fbc4616SChris Metcalf mutex_lock(&lock); 6875fbc4616SChris Metcalf get_online_cpus(); 6885fbc4616SChris Metcalf cpumask_clear(&has_work); 6895fbc4616SChris Metcalf 6905fbc4616SChris Metcalf for_each_online_cpu(cpu) { 6915fbc4616SChris Metcalf struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); 6925fbc4616SChris Metcalf 6935fbc4616SChris Metcalf if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) || 6945fbc4616SChris Metcalf pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) || 695cc5993bdSMinchan Kim pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) || 69610853a03SMinchan Kim pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) || 6975fbc4616SChris Metcalf need_activate_page_drain(cpu)) { 6985fbc4616SChris Metcalf INIT_WORK(work, lru_add_drain_per_cpu); 699*ce612879SMichal Hocko queue_work_on(cpu, mm_percpu_wq, work); 7005fbc4616SChris Metcalf cpumask_set_cpu(cpu, &has_work); 7015fbc4616SChris Metcalf } 7025fbc4616SChris Metcalf } 7035fbc4616SChris Metcalf 7045fbc4616SChris Metcalf for_each_cpu(cpu, &has_work) 7055fbc4616SChris Metcalf flush_work(&per_cpu(lru_add_drain_work, cpu)); 7065fbc4616SChris Metcalf 7075fbc4616SChris Metcalf put_online_cpus(); 7085fbc4616SChris Metcalf mutex_unlock(&lock); 709053837fcSNick Piggin } 710053837fcSNick Piggin 711aabfb572SMichal Hocko /** 712ea1754a0SKirill A. Shutemov * release_pages - batched put_page() 713aabfb572SMichal Hocko * @pages: array of pages to release 714aabfb572SMichal Hocko * @nr: number of pages 715aabfb572SMichal Hocko * @cold: whether the pages are cache cold 7161da177e4SLinus Torvalds * 717aabfb572SMichal Hocko * Decrement the reference count on all the pages in @pages. If it 718aabfb572SMichal Hocko * fell to zero, remove the page from the LRU and free it. 7191da177e4SLinus Torvalds */ 720b745bc85SMel Gorman void release_pages(struct page **pages, int nr, bool cold) 7211da177e4SLinus Torvalds { 7221da177e4SLinus Torvalds int i; 723cc59850eSKonstantin Khlebnikov LIST_HEAD(pages_to_free); 724599d0c95SMel Gorman struct pglist_data *locked_pgdat = NULL; 725fa9add64SHugh Dickins struct lruvec *lruvec; 726902aaed0SHisashi Hifumi unsigned long uninitialized_var(flags); 727aabfb572SMichal Hocko unsigned int uninitialized_var(lock_batch); 7281da177e4SLinus Torvalds 7291da177e4SLinus Torvalds for (i = 0; i < nr; i++) { 7301da177e4SLinus Torvalds struct page *page = pages[i]; 7311da177e4SLinus Torvalds 732aabfb572SMichal Hocko /* 733aabfb572SMichal Hocko * Make sure the IRQ-safe lock-holding time does not get 734aabfb572SMichal Hocko * excessive with a continuous string of pages from the 735599d0c95SMel Gorman * same pgdat. The lock is held only if pgdat != NULL. 736aabfb572SMichal Hocko */ 737599d0c95SMel Gorman if (locked_pgdat && ++lock_batch == SWAP_CLUSTER_MAX) { 738599d0c95SMel Gorman spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags); 739599d0c95SMel Gorman locked_pgdat = NULL; 740aabfb572SMichal Hocko } 741aabfb572SMichal Hocko 7426fcb52a5SAaron Lu if (is_huge_zero_page(page)) 743aa88b68cSKirill A. Shutemov continue; 744aa88b68cSKirill A. Shutemov 745ddc58f27SKirill A. Shutemov page = compound_head(page); 746b5810039SNick Piggin if (!put_page_testzero(page)) 7471da177e4SLinus Torvalds continue; 7481da177e4SLinus Torvalds 749ddc58f27SKirill A. Shutemov if (PageCompound(page)) { 750599d0c95SMel Gorman if (locked_pgdat) { 751599d0c95SMel Gorman spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags); 752599d0c95SMel Gorman locked_pgdat = NULL; 753ddc58f27SKirill A. Shutemov } 754ddc58f27SKirill A. Shutemov __put_compound_page(page); 755ddc58f27SKirill A. Shutemov continue; 756ddc58f27SKirill A. Shutemov } 757ddc58f27SKirill A. Shutemov 75846453a6eSNick Piggin if (PageLRU(page)) { 759599d0c95SMel Gorman struct pglist_data *pgdat = page_pgdat(page); 760894bc310SLee Schermerhorn 761599d0c95SMel Gorman if (pgdat != locked_pgdat) { 762599d0c95SMel Gorman if (locked_pgdat) 763599d0c95SMel Gorman spin_unlock_irqrestore(&locked_pgdat->lru_lock, 764902aaed0SHisashi Hifumi flags); 765aabfb572SMichal Hocko lock_batch = 0; 766599d0c95SMel Gorman locked_pgdat = pgdat; 767599d0c95SMel Gorman spin_lock_irqsave(&locked_pgdat->lru_lock, flags); 7681da177e4SLinus Torvalds } 769fa9add64SHugh Dickins 770599d0c95SMel Gorman lruvec = mem_cgroup_page_lruvec(page, locked_pgdat); 771309381feSSasha Levin VM_BUG_ON_PAGE(!PageLRU(page), page); 77267453911SNick Piggin __ClearPageLRU(page); 773fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, page_off_lru(page)); 77446453a6eSNick Piggin } 77546453a6eSNick Piggin 776c53954a0SMel Gorman /* Clear Active bit in case of parallel mark_page_accessed */ 777e3741b50SMel Gorman __ClearPageActive(page); 77862906027SNicholas Piggin __ClearPageWaiters(page); 779c53954a0SMel Gorman 780cc59850eSKonstantin Khlebnikov list_add(&page->lru, &pages_to_free); 7811da177e4SLinus Torvalds } 782599d0c95SMel Gorman if (locked_pgdat) 783599d0c95SMel Gorman spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags); 7841da177e4SLinus Torvalds 785747db954SJohannes Weiner mem_cgroup_uncharge_list(&pages_to_free); 786cc59850eSKonstantin Khlebnikov free_hot_cold_page_list(&pages_to_free, cold); 7871da177e4SLinus Torvalds } 7880be8557bSMiklos Szeredi EXPORT_SYMBOL(release_pages); 7891da177e4SLinus Torvalds 7901da177e4SLinus Torvalds /* 7911da177e4SLinus Torvalds * The pages which we're about to release may be in the deferred lru-addition 7921da177e4SLinus Torvalds * queues. That would prevent them from really being freed right now. That's 7931da177e4SLinus Torvalds * OK from a correctness point of view but is inefficient - those pages may be 7941da177e4SLinus Torvalds * cache-warm and we want to give them back to the page allocator ASAP. 7951da177e4SLinus Torvalds * 7961da177e4SLinus Torvalds * So __pagevec_release() will drain those queues here. __pagevec_lru_add() 7971da177e4SLinus Torvalds * and __pagevec_lru_add_active() call release_pages() directly to avoid 7981da177e4SLinus Torvalds * mutual recursion. 7991da177e4SLinus Torvalds */ 8001da177e4SLinus Torvalds void __pagevec_release(struct pagevec *pvec) 8011da177e4SLinus Torvalds { 8021da177e4SLinus Torvalds lru_add_drain(); 8031da177e4SLinus Torvalds release_pages(pvec->pages, pagevec_count(pvec), pvec->cold); 8041da177e4SLinus Torvalds pagevec_reinit(pvec); 8051da177e4SLinus Torvalds } 8067f285701SSteve French EXPORT_SYMBOL(__pagevec_release); 8077f285701SSteve French 80812d27107SHugh Dickins #ifdef CONFIG_TRANSPARENT_HUGEPAGE 80971e3aac0SAndrea Arcangeli /* used by __split_huge_page_refcount() */ 810fa9add64SHugh Dickins void lru_add_page_tail(struct page *page, struct page *page_tail, 8115bc7b8acSShaohua Li struct lruvec *lruvec, struct list_head *list) 81271e3aac0SAndrea Arcangeli { 81371e3aac0SAndrea Arcangeli const int file = 0; 81471e3aac0SAndrea Arcangeli 815309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(page), page); 816309381feSSasha Levin VM_BUG_ON_PAGE(PageCompound(page_tail), page); 817309381feSSasha Levin VM_BUG_ON_PAGE(PageLRU(page_tail), page); 818fa9add64SHugh Dickins VM_BUG_ON(NR_CPUS != 1 && 819599d0c95SMel Gorman !spin_is_locked(&lruvec_pgdat(lruvec)->lru_lock)); 82071e3aac0SAndrea Arcangeli 8215bc7b8acSShaohua Li if (!list) 82271e3aac0SAndrea Arcangeli SetPageLRU(page_tail); 82371e3aac0SAndrea Arcangeli 82412d27107SHugh Dickins if (likely(PageLRU(page))) 82512d27107SHugh Dickins list_add_tail(&page_tail->lru, &page->lru); 8265bc7b8acSShaohua Li else if (list) { 8275bc7b8acSShaohua Li /* page reclaim is reclaiming a huge page */ 8285bc7b8acSShaohua Li get_page(page_tail); 8295bc7b8acSShaohua Li list_add_tail(&page_tail->lru, list); 8305bc7b8acSShaohua Li } else { 83112d27107SHugh Dickins struct list_head *list_head; 83212d27107SHugh Dickins /* 83312d27107SHugh Dickins * Head page has not yet been counted, as an hpage, 83412d27107SHugh Dickins * so we must account for each subpage individually. 83512d27107SHugh Dickins * 83612d27107SHugh Dickins * Use the standard add function to put page_tail on the list, 83712d27107SHugh Dickins * but then correct its position so they all end up in order. 83812d27107SHugh Dickins */ 839e180cf80SKirill A. Shutemov add_page_to_lru_list(page_tail, lruvec, page_lru(page_tail)); 84012d27107SHugh Dickins list_head = page_tail->lru.prev; 84112d27107SHugh Dickins list_move_tail(&page_tail->lru, list_head); 84271e3aac0SAndrea Arcangeli } 8437512102cSHugh Dickins 8447512102cSHugh Dickins if (!PageUnevictable(page)) 845e180cf80SKirill A. Shutemov update_page_reclaim_stat(lruvec, file, PageActive(page_tail)); 84671e3aac0SAndrea Arcangeli } 84712d27107SHugh Dickins #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 84871e3aac0SAndrea Arcangeli 849fa9add64SHugh Dickins static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, 850fa9add64SHugh Dickins void *arg) 8513dd7ae8eSShaohua Li { 85213f7f789SMel Gorman int file = page_is_file_cache(page); 85313f7f789SMel Gorman int active = PageActive(page); 85413f7f789SMel Gorman enum lru_list lru = page_lru(page); 8553dd7ae8eSShaohua Li 856309381feSSasha Levin VM_BUG_ON_PAGE(PageLRU(page), page); 8573dd7ae8eSShaohua Li 8583dd7ae8eSShaohua Li SetPageLRU(page); 859fa9add64SHugh Dickins add_page_to_lru_list(page, lruvec, lru); 860fa9add64SHugh Dickins update_page_reclaim_stat(lruvec, file, active); 86124b7e581SMel Gorman trace_mm_lru_insertion(page, lru); 8623dd7ae8eSShaohua Li } 8633dd7ae8eSShaohua Li 8641da177e4SLinus Torvalds /* 8651da177e4SLinus Torvalds * Add the passed pages to the LRU, then drop the caller's refcount 8661da177e4SLinus Torvalds * on them. Reinitialises the caller's pagevec. 8671da177e4SLinus Torvalds */ 868a0b8cab3SMel Gorman void __pagevec_lru_add(struct pagevec *pvec) 8691da177e4SLinus Torvalds { 870a0b8cab3SMel Gorman pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL); 8711da177e4SLinus Torvalds } 8725095ae83SHugh Dickins EXPORT_SYMBOL(__pagevec_lru_add); 873f04e9ebbSKOSAKI Motohiro 8741da177e4SLinus Torvalds /** 8750cd6144aSJohannes Weiner * pagevec_lookup_entries - gang pagecache lookup 8760cd6144aSJohannes Weiner * @pvec: Where the resulting entries are placed 8770cd6144aSJohannes Weiner * @mapping: The address_space to search 8780cd6144aSJohannes Weiner * @start: The starting entry index 8790cd6144aSJohannes Weiner * @nr_entries: The maximum number of entries 8800cd6144aSJohannes Weiner * @indices: The cache indices corresponding to the entries in @pvec 8810cd6144aSJohannes Weiner * 8820cd6144aSJohannes Weiner * pagevec_lookup_entries() will search for and return a group of up 8830cd6144aSJohannes Weiner * to @nr_entries pages and shadow entries in the mapping. All 8840cd6144aSJohannes Weiner * entries are placed in @pvec. pagevec_lookup_entries() takes a 8850cd6144aSJohannes Weiner * reference against actual pages in @pvec. 8860cd6144aSJohannes Weiner * 8870cd6144aSJohannes Weiner * The search returns a group of mapping-contiguous entries with 8880cd6144aSJohannes Weiner * ascending indexes. There may be holes in the indices due to 8890cd6144aSJohannes Weiner * not-present entries. 8900cd6144aSJohannes Weiner * 8910cd6144aSJohannes Weiner * pagevec_lookup_entries() returns the number of entries which were 8920cd6144aSJohannes Weiner * found. 8930cd6144aSJohannes Weiner */ 8940cd6144aSJohannes Weiner unsigned pagevec_lookup_entries(struct pagevec *pvec, 8950cd6144aSJohannes Weiner struct address_space *mapping, 8960cd6144aSJohannes Weiner pgoff_t start, unsigned nr_pages, 8970cd6144aSJohannes Weiner pgoff_t *indices) 8980cd6144aSJohannes Weiner { 8990cd6144aSJohannes Weiner pvec->nr = find_get_entries(mapping, start, nr_pages, 9000cd6144aSJohannes Weiner pvec->pages, indices); 9010cd6144aSJohannes Weiner return pagevec_count(pvec); 9020cd6144aSJohannes Weiner } 9030cd6144aSJohannes Weiner 9040cd6144aSJohannes Weiner /** 9050cd6144aSJohannes Weiner * pagevec_remove_exceptionals - pagevec exceptionals pruning 9060cd6144aSJohannes Weiner * @pvec: The pagevec to prune 9070cd6144aSJohannes Weiner * 9080cd6144aSJohannes Weiner * pagevec_lookup_entries() fills both pages and exceptional radix 9090cd6144aSJohannes Weiner * tree entries into the pagevec. This function prunes all 9100cd6144aSJohannes Weiner * exceptionals from @pvec without leaving holes, so that it can be 9110cd6144aSJohannes Weiner * passed on to page-only pagevec operations. 9120cd6144aSJohannes Weiner */ 9130cd6144aSJohannes Weiner void pagevec_remove_exceptionals(struct pagevec *pvec) 9140cd6144aSJohannes Weiner { 9150cd6144aSJohannes Weiner int i, j; 9160cd6144aSJohannes Weiner 9170cd6144aSJohannes Weiner for (i = 0, j = 0; i < pagevec_count(pvec); i++) { 9180cd6144aSJohannes Weiner struct page *page = pvec->pages[i]; 9190cd6144aSJohannes Weiner if (!radix_tree_exceptional_entry(page)) 9200cd6144aSJohannes Weiner pvec->pages[j++] = page; 9210cd6144aSJohannes Weiner } 9220cd6144aSJohannes Weiner pvec->nr = j; 9230cd6144aSJohannes Weiner } 9240cd6144aSJohannes Weiner 9250cd6144aSJohannes Weiner /** 9261da177e4SLinus Torvalds * pagevec_lookup - gang pagecache lookup 9271da177e4SLinus Torvalds * @pvec: Where the resulting pages are placed 9281da177e4SLinus Torvalds * @mapping: The address_space to search 9291da177e4SLinus Torvalds * @start: The starting page index 9301da177e4SLinus Torvalds * @nr_pages: The maximum number of pages 9311da177e4SLinus Torvalds * 9321da177e4SLinus Torvalds * pagevec_lookup() will search for and return a group of up to @nr_pages pages 9331da177e4SLinus Torvalds * in the mapping. The pages are placed in @pvec. pagevec_lookup() takes a 9341da177e4SLinus Torvalds * reference against the pages in @pvec. 9351da177e4SLinus Torvalds * 9361da177e4SLinus Torvalds * The search returns a group of mapping-contiguous pages with ascending 9371da177e4SLinus Torvalds * indexes. There may be holes in the indices due to not-present pages. 9381da177e4SLinus Torvalds * 9391da177e4SLinus Torvalds * pagevec_lookup() returns the number of pages which were found. 9401da177e4SLinus Torvalds */ 9411da177e4SLinus Torvalds unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, 9421da177e4SLinus Torvalds pgoff_t start, unsigned nr_pages) 9431da177e4SLinus Torvalds { 9441da177e4SLinus Torvalds pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages); 9451da177e4SLinus Torvalds return pagevec_count(pvec); 9461da177e4SLinus Torvalds } 94778539fdfSChristoph Hellwig EXPORT_SYMBOL(pagevec_lookup); 94878539fdfSChristoph Hellwig 9491da177e4SLinus Torvalds unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, 9501da177e4SLinus Torvalds pgoff_t *index, int tag, unsigned nr_pages) 9511da177e4SLinus Torvalds { 9521da177e4SLinus Torvalds pvec->nr = find_get_pages_tag(mapping, index, tag, 9531da177e4SLinus Torvalds nr_pages, pvec->pages); 9541da177e4SLinus Torvalds return pagevec_count(pvec); 9551da177e4SLinus Torvalds } 9567f285701SSteve French EXPORT_SYMBOL(pagevec_lookup_tag); 9571da177e4SLinus Torvalds 9581da177e4SLinus Torvalds /* 9591da177e4SLinus Torvalds * Perform any setup for the swap system 9601da177e4SLinus Torvalds */ 9611da177e4SLinus Torvalds void __init swap_setup(void) 9621da177e4SLinus Torvalds { 9634481374cSJan Beulich unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT); 964e0bf68ddSPeter Zijlstra 9651da177e4SLinus Torvalds /* Use a smaller cluster for small-memory machines */ 9661da177e4SLinus Torvalds if (megs < 16) 9671da177e4SLinus Torvalds page_cluster = 2; 9681da177e4SLinus Torvalds else 9691da177e4SLinus Torvalds page_cluster = 3; 9701da177e4SLinus Torvalds /* 9711da177e4SLinus Torvalds * Right now other parts of the system means that we 9721da177e4SLinus Torvalds * _really_ don't want to cluster much more 9731da177e4SLinus Torvalds */ 9741da177e4SLinus Torvalds } 975