11da177e4SLinus Torvalds /* 21da177e4SLinus Torvalds * linux/mm/swap.c 31da177e4SLinus Torvalds * 41da177e4SLinus Torvalds * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 51da177e4SLinus Torvalds */ 61da177e4SLinus Torvalds 71da177e4SLinus Torvalds /* 8183ff22bSSimon Arlott * This file contains the default values for the operation of the 91da177e4SLinus Torvalds * Linux VM subsystem. Fine-tuning documentation can be found in 101da177e4SLinus Torvalds * Documentation/sysctl/vm.txt. 111da177e4SLinus Torvalds * Started 18.12.91 121da177e4SLinus Torvalds * Swap aging added 23.2.95, Stephen Tweedie. 131da177e4SLinus Torvalds * Buffermem limits added 12.3.98, Rik van Riel. 141da177e4SLinus Torvalds */ 151da177e4SLinus Torvalds 161da177e4SLinus Torvalds #include <linux/mm.h> 171da177e4SLinus Torvalds #include <linux/sched.h> 181da177e4SLinus Torvalds #include <linux/kernel_stat.h> 191da177e4SLinus Torvalds #include <linux/swap.h> 201da177e4SLinus Torvalds #include <linux/mman.h> 211da177e4SLinus Torvalds #include <linux/pagemap.h> 221da177e4SLinus Torvalds #include <linux/pagevec.h> 231da177e4SLinus Torvalds #include <linux/init.h> 24b95f1b31SPaul Gortmaker #include <linux/export.h> 251da177e4SLinus Torvalds #include <linux/mm_inline.h> 261da177e4SLinus Torvalds #include <linux/percpu_counter.h> 27*3565fce3SDan Williams #include <linux/memremap.h> 281da177e4SLinus Torvalds #include <linux/percpu.h> 291da177e4SLinus Torvalds #include <linux/cpu.h> 301da177e4SLinus Torvalds #include <linux/notifier.h> 31e0bf68ddSPeter Zijlstra #include <linux/backing-dev.h> 3266e1707bSBalbir Singh #include <linux/memcontrol.h> 335a0e3ad6STejun Heo #include <linux/gfp.h> 34a27bb332SKent Overstreet #include <linux/uio.h> 35822fc613SNaoya Horiguchi #include <linux/hugetlb.h> 3633c3fc71SVladimir Davydov #include <linux/page_idle.h> 371da177e4SLinus Torvalds 3864d6519dSLee Schermerhorn #include "internal.h" 3964d6519dSLee Schermerhorn 40c6286c98SMel Gorman #define CREATE_TRACE_POINTS 41c6286c98SMel Gorman #include <trace/events/pagemap.h> 42c6286c98SMel Gorman 431da177e4SLinus Torvalds /* How many pages do we try to swap or page in/out together? */ 441da177e4SLinus Torvalds int page_cluster; 451da177e4SLinus Torvalds 4613f7f789SMel Gorman static DEFINE_PER_CPU(struct pagevec, lru_add_pvec); 47f84f9504SVegard Nossum static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); 48cc5993bdSMinchan Kim static DEFINE_PER_CPU(struct pagevec, lru_deactivate_file_pvecs); 4910853a03SMinchan Kim static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); 50902aaed0SHisashi Hifumi 51b221385bSAdrian Bunk /* 52b221385bSAdrian Bunk * This path almost never happens for VM activity - pages are normally 53b221385bSAdrian Bunk * freed via pagevecs. But it gets used by networking. 54b221385bSAdrian Bunk */ 55920c7a5dSHarvey Harrison static void __page_cache_release(struct page *page) 56b221385bSAdrian Bunk { 57b221385bSAdrian Bunk if (PageLRU(page)) { 58b221385bSAdrian Bunk struct zone *zone = page_zone(page); 59fa9add64SHugh Dickins struct lruvec *lruvec; 60fa9add64SHugh Dickins unsigned long flags; 61b221385bSAdrian Bunk 62b221385bSAdrian Bunk spin_lock_irqsave(&zone->lru_lock, flags); 63fa9add64SHugh Dickins lruvec = mem_cgroup_page_lruvec(page, zone); 64309381feSSasha Levin VM_BUG_ON_PAGE(!PageLRU(page), page); 65b221385bSAdrian Bunk __ClearPageLRU(page); 66fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, page_off_lru(page)); 67b221385bSAdrian Bunk spin_unlock_irqrestore(&zone->lru_lock, flags); 68b221385bSAdrian Bunk } 690a31bc97SJohannes Weiner mem_cgroup_uncharge(page); 7091807063SAndrea Arcangeli } 7191807063SAndrea Arcangeli 7291807063SAndrea Arcangeli static void __put_single_page(struct page *page) 7391807063SAndrea Arcangeli { 7491807063SAndrea Arcangeli __page_cache_release(page); 75b745bc85SMel Gorman free_hot_cold_page(page, false); 76b221385bSAdrian Bunk } 77b221385bSAdrian Bunk 7891807063SAndrea Arcangeli static void __put_compound_page(struct page *page) 7991807063SAndrea Arcangeli { 8091807063SAndrea Arcangeli compound_page_dtor *dtor; 8191807063SAndrea Arcangeli 82822fc613SNaoya Horiguchi /* 83822fc613SNaoya Horiguchi * __page_cache_release() is supposed to be called for thp, not for 84822fc613SNaoya Horiguchi * hugetlb. This is because hugetlb page does never have PageLRU set 85822fc613SNaoya Horiguchi * (it's never listed to any LRU lists) and no memcg routines should 86822fc613SNaoya Horiguchi * be called for hugetlb (it has a separate hugetlb_cgroup.) 87822fc613SNaoya Horiguchi */ 88822fc613SNaoya Horiguchi if (!PageHuge(page)) 8991807063SAndrea Arcangeli __page_cache_release(page); 9091807063SAndrea Arcangeli dtor = get_compound_page_dtor(page); 9191807063SAndrea Arcangeli (*dtor)(page); 9291807063SAndrea Arcangeli } 9391807063SAndrea Arcangeli 94ddc58f27SKirill A. Shutemov void __put_page(struct page *page) 95c747ce79SJianyu Zhan { 96ddc58f27SKirill A. Shutemov if (unlikely(PageCompound(page))) 9726296ad2SAndrew Morton __put_compound_page(page); 9826296ad2SAndrew Morton else 9926296ad2SAndrew Morton __put_single_page(page); 10026296ad2SAndrew Morton } 101ddc58f27SKirill A. Shutemov EXPORT_SYMBOL(__put_page); 10270b50f94SAndrea Arcangeli 1031d7ea732SAlexander Zarochentsev /** 1047682486bSRandy Dunlap * put_pages_list() - release a list of pages 1057682486bSRandy Dunlap * @pages: list of pages threaded on page->lru 1061d7ea732SAlexander Zarochentsev * 1071d7ea732SAlexander Zarochentsev * Release a list of pages which are strung together on page.lru. Currently 1081d7ea732SAlexander Zarochentsev * used by read_cache_pages() and related error recovery code. 1091d7ea732SAlexander Zarochentsev */ 1101d7ea732SAlexander Zarochentsev void put_pages_list(struct list_head *pages) 1111d7ea732SAlexander Zarochentsev { 1121d7ea732SAlexander Zarochentsev while (!list_empty(pages)) { 1131d7ea732SAlexander Zarochentsev struct page *victim; 1141d7ea732SAlexander Zarochentsev 1151d7ea732SAlexander Zarochentsev victim = list_entry(pages->prev, struct page, lru); 1161d7ea732SAlexander Zarochentsev list_del(&victim->lru); 1171d7ea732SAlexander Zarochentsev page_cache_release(victim); 1181d7ea732SAlexander Zarochentsev } 1191d7ea732SAlexander Zarochentsev } 1201d7ea732SAlexander Zarochentsev EXPORT_SYMBOL(put_pages_list); 1211d7ea732SAlexander Zarochentsev 12218022c5dSMel Gorman /* 12318022c5dSMel Gorman * get_kernel_pages() - pin kernel pages in memory 12418022c5dSMel Gorman * @kiov: An array of struct kvec structures 12518022c5dSMel Gorman * @nr_segs: number of segments to pin 12618022c5dSMel Gorman * @write: pinning for read/write, currently ignored 12718022c5dSMel Gorman * @pages: array that receives pointers to the pages pinned. 12818022c5dSMel Gorman * Should be at least nr_segs long. 12918022c5dSMel Gorman * 13018022c5dSMel Gorman * Returns number of pages pinned. This may be fewer than the number 13118022c5dSMel Gorman * requested. If nr_pages is 0 or negative, returns 0. If no pages 13218022c5dSMel Gorman * were pinned, returns -errno. Each page returned must be released 13318022c5dSMel Gorman * with a put_page() call when it is finished with. 13418022c5dSMel Gorman */ 13518022c5dSMel Gorman int get_kernel_pages(const struct kvec *kiov, int nr_segs, int write, 13618022c5dSMel Gorman struct page **pages) 13718022c5dSMel Gorman { 13818022c5dSMel Gorman int seg; 13918022c5dSMel Gorman 14018022c5dSMel Gorman for (seg = 0; seg < nr_segs; seg++) { 14118022c5dSMel Gorman if (WARN_ON(kiov[seg].iov_len != PAGE_SIZE)) 14218022c5dSMel Gorman return seg; 14318022c5dSMel Gorman 1445a178119SMel Gorman pages[seg] = kmap_to_page(kiov[seg].iov_base); 14518022c5dSMel Gorman page_cache_get(pages[seg]); 14618022c5dSMel Gorman } 14718022c5dSMel Gorman 14818022c5dSMel Gorman return seg; 14918022c5dSMel Gorman } 15018022c5dSMel Gorman EXPORT_SYMBOL_GPL(get_kernel_pages); 15118022c5dSMel Gorman 15218022c5dSMel Gorman /* 15318022c5dSMel Gorman * get_kernel_page() - pin a kernel page in memory 15418022c5dSMel Gorman * @start: starting kernel address 15518022c5dSMel Gorman * @write: pinning for read/write, currently ignored 15618022c5dSMel Gorman * @pages: array that receives pointer to the page pinned. 15718022c5dSMel Gorman * Must be at least nr_segs long. 15818022c5dSMel Gorman * 15918022c5dSMel Gorman * Returns 1 if page is pinned. If the page was not pinned, returns 16018022c5dSMel Gorman * -errno. The page returned must be released with a put_page() call 16118022c5dSMel Gorman * when it is finished with. 16218022c5dSMel Gorman */ 16318022c5dSMel Gorman int get_kernel_page(unsigned long start, int write, struct page **pages) 16418022c5dSMel Gorman { 16518022c5dSMel Gorman const struct kvec kiov = { 16618022c5dSMel Gorman .iov_base = (void *)start, 16718022c5dSMel Gorman .iov_len = PAGE_SIZE 16818022c5dSMel Gorman }; 16918022c5dSMel Gorman 17018022c5dSMel Gorman return get_kernel_pages(&kiov, 1, write, pages); 17118022c5dSMel Gorman } 17218022c5dSMel Gorman EXPORT_SYMBOL_GPL(get_kernel_page); 17318022c5dSMel Gorman 1743dd7ae8eSShaohua Li static void pagevec_lru_move_fn(struct pagevec *pvec, 175fa9add64SHugh Dickins void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg), 1763dd7ae8eSShaohua Li void *arg) 177902aaed0SHisashi Hifumi { 178902aaed0SHisashi Hifumi int i; 179902aaed0SHisashi Hifumi struct zone *zone = NULL; 180fa9add64SHugh Dickins struct lruvec *lruvec; 1813dd7ae8eSShaohua Li unsigned long flags = 0; 182902aaed0SHisashi Hifumi 183902aaed0SHisashi Hifumi for (i = 0; i < pagevec_count(pvec); i++) { 184902aaed0SHisashi Hifumi struct page *page = pvec->pages[i]; 185902aaed0SHisashi Hifumi struct zone *pagezone = page_zone(page); 186902aaed0SHisashi Hifumi 187902aaed0SHisashi Hifumi if (pagezone != zone) { 188902aaed0SHisashi Hifumi if (zone) 1893dd7ae8eSShaohua Li spin_unlock_irqrestore(&zone->lru_lock, flags); 190902aaed0SHisashi Hifumi zone = pagezone; 1913dd7ae8eSShaohua Li spin_lock_irqsave(&zone->lru_lock, flags); 192902aaed0SHisashi Hifumi } 1933dd7ae8eSShaohua Li 194fa9add64SHugh Dickins lruvec = mem_cgroup_page_lruvec(page, zone); 195fa9add64SHugh Dickins (*move_fn)(page, lruvec, arg); 1963dd7ae8eSShaohua Li } 1973dd7ae8eSShaohua Li if (zone) 1983dd7ae8eSShaohua Li spin_unlock_irqrestore(&zone->lru_lock, flags); 1993dd7ae8eSShaohua Li release_pages(pvec->pages, pvec->nr, pvec->cold); 2003dd7ae8eSShaohua Li pagevec_reinit(pvec); 2013dd7ae8eSShaohua Li } 2023dd7ae8eSShaohua Li 203fa9add64SHugh Dickins static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec, 204fa9add64SHugh Dickins void *arg) 2053dd7ae8eSShaohua Li { 2063dd7ae8eSShaohua Li int *pgmoved = arg; 2073dd7ae8eSShaohua Li 208894bc310SLee Schermerhorn if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 2093f58a829SMinchan Kim enum lru_list lru = page_lru_base_type(page); 210925b7673SJohannes Weiner list_move_tail(&page->lru, &lruvec->lists[lru]); 2113dd7ae8eSShaohua Li (*pgmoved)++; 212902aaed0SHisashi Hifumi } 213902aaed0SHisashi Hifumi } 2143dd7ae8eSShaohua Li 2153dd7ae8eSShaohua Li /* 2163dd7ae8eSShaohua Li * pagevec_move_tail() must be called with IRQ disabled. 2173dd7ae8eSShaohua Li * Otherwise this may cause nasty races. 2183dd7ae8eSShaohua Li */ 2193dd7ae8eSShaohua Li static void pagevec_move_tail(struct pagevec *pvec) 2203dd7ae8eSShaohua Li { 2213dd7ae8eSShaohua Li int pgmoved = 0; 2223dd7ae8eSShaohua Li 2233dd7ae8eSShaohua Li pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved); 224902aaed0SHisashi Hifumi __count_vm_events(PGROTATED, pgmoved); 225902aaed0SHisashi Hifumi } 226902aaed0SHisashi Hifumi 227902aaed0SHisashi Hifumi /* 2281da177e4SLinus Torvalds * Writeback is about to end against a page which has been marked for immediate 2291da177e4SLinus Torvalds * reclaim. If it still appears to be reclaimable, move it to the tail of the 230902aaed0SHisashi Hifumi * inactive list. 2311da177e4SLinus Torvalds */ 232ac6aadb2SMiklos Szeredi void rotate_reclaimable_page(struct page *page) 2331da177e4SLinus Torvalds { 234ac6aadb2SMiklos Szeredi if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) && 235894bc310SLee Schermerhorn !PageUnevictable(page) && PageLRU(page)) { 236902aaed0SHisashi Hifumi struct pagevec *pvec; 2371da177e4SLinus Torvalds unsigned long flags; 2381da177e4SLinus Torvalds 239902aaed0SHisashi Hifumi page_cache_get(page); 240902aaed0SHisashi Hifumi local_irq_save(flags); 2417c8e0181SChristoph Lameter pvec = this_cpu_ptr(&lru_rotate_pvecs); 242902aaed0SHisashi Hifumi if (!pagevec_add(pvec, page)) 243902aaed0SHisashi Hifumi pagevec_move_tail(pvec); 244902aaed0SHisashi Hifumi local_irq_restore(flags); 245ac6aadb2SMiklos Szeredi } 2461da177e4SLinus Torvalds } 2471da177e4SLinus Torvalds 248fa9add64SHugh Dickins static void update_page_reclaim_stat(struct lruvec *lruvec, 2493e2f41f1SKOSAKI Motohiro int file, int rotated) 2503e2f41f1SKOSAKI Motohiro { 251fa9add64SHugh Dickins struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat; 2523e2f41f1SKOSAKI Motohiro 2533e2f41f1SKOSAKI Motohiro reclaim_stat->recent_scanned[file]++; 2543e2f41f1SKOSAKI Motohiro if (rotated) 2553e2f41f1SKOSAKI Motohiro reclaim_stat->recent_rotated[file]++; 2563e2f41f1SKOSAKI Motohiro } 2573e2f41f1SKOSAKI Motohiro 258fa9add64SHugh Dickins static void __activate_page(struct page *page, struct lruvec *lruvec, 259fa9add64SHugh Dickins void *arg) 260744ed144SShaohua Li { 2617a608572SLinus Torvalds if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 262744ed144SShaohua Li int file = page_is_file_cache(page); 263744ed144SShaohua Li int lru = page_lru_base_type(page); 264744ed144SShaohua Li 265fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, lru); 266744ed144SShaohua Li SetPageActive(page); 267744ed144SShaohua Li lru += LRU_ACTIVE; 268fa9add64SHugh Dickins add_page_to_lru_list(page, lruvec, lru); 26924b7e581SMel Gorman trace_mm_lru_activate(page); 2707a608572SLinus Torvalds 271fa9add64SHugh Dickins __count_vm_event(PGACTIVATE); 272fa9add64SHugh Dickins update_page_reclaim_stat(lruvec, file, 1); 273744ed144SShaohua Li } 274eb709b0dSShaohua Li } 275eb709b0dSShaohua Li 276eb709b0dSShaohua Li #ifdef CONFIG_SMP 277eb709b0dSShaohua Li static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs); 278eb709b0dSShaohua Li 279eb709b0dSShaohua Li static void activate_page_drain(int cpu) 280eb709b0dSShaohua Li { 281eb709b0dSShaohua Li struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu); 282eb709b0dSShaohua Li 283eb709b0dSShaohua Li if (pagevec_count(pvec)) 284eb709b0dSShaohua Li pagevec_lru_move_fn(pvec, __activate_page, NULL); 285eb709b0dSShaohua Li } 286eb709b0dSShaohua Li 2875fbc4616SChris Metcalf static bool need_activate_page_drain(int cpu) 2885fbc4616SChris Metcalf { 2895fbc4616SChris Metcalf return pagevec_count(&per_cpu(activate_page_pvecs, cpu)) != 0; 2905fbc4616SChris Metcalf } 2915fbc4616SChris Metcalf 292eb709b0dSShaohua Li void activate_page(struct page *page) 293eb709b0dSShaohua Li { 294eb709b0dSShaohua Li if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 295eb709b0dSShaohua Li struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); 296eb709b0dSShaohua Li 297eb709b0dSShaohua Li page_cache_get(page); 298eb709b0dSShaohua Li if (!pagevec_add(pvec, page)) 299eb709b0dSShaohua Li pagevec_lru_move_fn(pvec, __activate_page, NULL); 300eb709b0dSShaohua Li put_cpu_var(activate_page_pvecs); 301eb709b0dSShaohua Li } 302eb709b0dSShaohua Li } 303eb709b0dSShaohua Li 304eb709b0dSShaohua Li #else 305eb709b0dSShaohua Li static inline void activate_page_drain(int cpu) 306eb709b0dSShaohua Li { 307eb709b0dSShaohua Li } 308eb709b0dSShaohua Li 3095fbc4616SChris Metcalf static bool need_activate_page_drain(int cpu) 3105fbc4616SChris Metcalf { 3115fbc4616SChris Metcalf return false; 3125fbc4616SChris Metcalf } 3135fbc4616SChris Metcalf 314eb709b0dSShaohua Li void activate_page(struct page *page) 315eb709b0dSShaohua Li { 316eb709b0dSShaohua Li struct zone *zone = page_zone(page); 317eb709b0dSShaohua Li 318eb709b0dSShaohua Li spin_lock_irq(&zone->lru_lock); 319fa9add64SHugh Dickins __activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL); 3201da177e4SLinus Torvalds spin_unlock_irq(&zone->lru_lock); 3211da177e4SLinus Torvalds } 322eb709b0dSShaohua Li #endif 3231da177e4SLinus Torvalds 324059285a2SMel Gorman static void __lru_cache_activate_page(struct page *page) 325059285a2SMel Gorman { 326059285a2SMel Gorman struct pagevec *pvec = &get_cpu_var(lru_add_pvec); 327059285a2SMel Gorman int i; 328059285a2SMel Gorman 329059285a2SMel Gorman /* 330059285a2SMel Gorman * Search backwards on the optimistic assumption that the page being 331059285a2SMel Gorman * activated has just been added to this pagevec. Note that only 332059285a2SMel Gorman * the local pagevec is examined as a !PageLRU page could be in the 333059285a2SMel Gorman * process of being released, reclaimed, migrated or on a remote 334059285a2SMel Gorman * pagevec that is currently being drained. Furthermore, marking 335059285a2SMel Gorman * a remote pagevec's page PageActive potentially hits a race where 336059285a2SMel Gorman * a page is marked PageActive just after it is added to the inactive 337059285a2SMel Gorman * list causing accounting errors and BUG_ON checks to trigger. 338059285a2SMel Gorman */ 339059285a2SMel Gorman for (i = pagevec_count(pvec) - 1; i >= 0; i--) { 340059285a2SMel Gorman struct page *pagevec_page = pvec->pages[i]; 341059285a2SMel Gorman 342059285a2SMel Gorman if (pagevec_page == page) { 343059285a2SMel Gorman SetPageActive(page); 344059285a2SMel Gorman break; 345059285a2SMel Gorman } 346059285a2SMel Gorman } 347059285a2SMel Gorman 348059285a2SMel Gorman put_cpu_var(lru_add_pvec); 349059285a2SMel Gorman } 350059285a2SMel Gorman 3511da177e4SLinus Torvalds /* 3521da177e4SLinus Torvalds * Mark a page as having seen activity. 3531da177e4SLinus Torvalds * 3541da177e4SLinus Torvalds * inactive,unreferenced -> inactive,referenced 3551da177e4SLinus Torvalds * inactive,referenced -> active,unreferenced 3561da177e4SLinus Torvalds * active,unreferenced -> active,referenced 357eb39d618SHugh Dickins * 358eb39d618SHugh Dickins * When a newly allocated page is not yet visible, so safe for non-atomic ops, 359eb39d618SHugh Dickins * __SetPageReferenced(page) may be substituted for mark_page_accessed(page). 3601da177e4SLinus Torvalds */ 361920c7a5dSHarvey Harrison void mark_page_accessed(struct page *page) 3621da177e4SLinus Torvalds { 363e90309c9SKirill A. Shutemov page = compound_head(page); 364894bc310SLee Schermerhorn if (!PageActive(page) && !PageUnevictable(page) && 365059285a2SMel Gorman PageReferenced(page)) { 366059285a2SMel Gorman 367059285a2SMel Gorman /* 368059285a2SMel Gorman * If the page is on the LRU, queue it for activation via 369059285a2SMel Gorman * activate_page_pvecs. Otherwise, assume the page is on a 370059285a2SMel Gorman * pagevec, mark it active and it'll be moved to the active 371059285a2SMel Gorman * LRU on the next drain. 372059285a2SMel Gorman */ 373059285a2SMel Gorman if (PageLRU(page)) 3741da177e4SLinus Torvalds activate_page(page); 375059285a2SMel Gorman else 376059285a2SMel Gorman __lru_cache_activate_page(page); 3771da177e4SLinus Torvalds ClearPageReferenced(page); 378a528910eSJohannes Weiner if (page_is_file_cache(page)) 379a528910eSJohannes Weiner workingset_activation(page); 3801da177e4SLinus Torvalds } else if (!PageReferenced(page)) { 3811da177e4SLinus Torvalds SetPageReferenced(page); 3821da177e4SLinus Torvalds } 38333c3fc71SVladimir Davydov if (page_is_idle(page)) 38433c3fc71SVladimir Davydov clear_page_idle(page); 3851da177e4SLinus Torvalds } 3861da177e4SLinus Torvalds EXPORT_SYMBOL(mark_page_accessed); 3871da177e4SLinus Torvalds 3882329d375SJianyu Zhan static void __lru_cache_add(struct page *page) 3891da177e4SLinus Torvalds { 39013f7f789SMel Gorman struct pagevec *pvec = &get_cpu_var(lru_add_pvec); 39113f7f789SMel Gorman 3921da177e4SLinus Torvalds page_cache_get(page); 393d741c9cdSRobin Dong if (!pagevec_space(pvec)) 394a0b8cab3SMel Gorman __pagevec_lru_add(pvec); 395d741c9cdSRobin Dong pagevec_add(pvec, page); 39613f7f789SMel Gorman put_cpu_var(lru_add_pvec); 3971da177e4SLinus Torvalds } 3982329d375SJianyu Zhan 3992329d375SJianyu Zhan /** 4002329d375SJianyu Zhan * lru_cache_add: add a page to the page lists 4012329d375SJianyu Zhan * @page: the page to add 4022329d375SJianyu Zhan */ 4032329d375SJianyu Zhan void lru_cache_add_anon(struct page *page) 4042329d375SJianyu Zhan { 4056fb81a17SMel Gorman if (PageActive(page)) 4062329d375SJianyu Zhan ClearPageActive(page); 4072329d375SJianyu Zhan __lru_cache_add(page); 4082329d375SJianyu Zhan } 4092329d375SJianyu Zhan 4102329d375SJianyu Zhan void lru_cache_add_file(struct page *page) 4112329d375SJianyu Zhan { 4126fb81a17SMel Gorman if (PageActive(page)) 4132329d375SJianyu Zhan ClearPageActive(page); 4142329d375SJianyu Zhan __lru_cache_add(page); 4152329d375SJianyu Zhan } 4162329d375SJianyu Zhan EXPORT_SYMBOL(lru_cache_add_file); 4171da177e4SLinus Torvalds 418f04e9ebbSKOSAKI Motohiro /** 419c53954a0SMel Gorman * lru_cache_add - add a page to a page list 420f04e9ebbSKOSAKI Motohiro * @page: the page to be added to the LRU. 4212329d375SJianyu Zhan * 4222329d375SJianyu Zhan * Queue the page for addition to the LRU via pagevec. The decision on whether 4232329d375SJianyu Zhan * to add the page to the [in]active [file|anon] list is deferred until the 4242329d375SJianyu Zhan * pagevec is drained. This gives a chance for the caller of lru_cache_add() 4252329d375SJianyu Zhan * have the page added to the active list using mark_page_accessed(). 426f04e9ebbSKOSAKI Motohiro */ 427c53954a0SMel Gorman void lru_cache_add(struct page *page) 4281da177e4SLinus Torvalds { 429309381feSSasha Levin VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page); 430309381feSSasha Levin VM_BUG_ON_PAGE(PageLRU(page), page); 431c53954a0SMel Gorman __lru_cache_add(page); 4321da177e4SLinus Torvalds } 4331da177e4SLinus Torvalds 434894bc310SLee Schermerhorn /** 435894bc310SLee Schermerhorn * add_page_to_unevictable_list - add a page to the unevictable list 436894bc310SLee Schermerhorn * @page: the page to be added to the unevictable list 437894bc310SLee Schermerhorn * 438894bc310SLee Schermerhorn * Add page directly to its zone's unevictable list. To avoid races with 439894bc310SLee Schermerhorn * tasks that might be making the page evictable, through eg. munlock, 440894bc310SLee Schermerhorn * munmap or exit, while it's not on the lru, we want to add the page 441894bc310SLee Schermerhorn * while it's locked or otherwise "invisible" to other tasks. This is 442894bc310SLee Schermerhorn * difficult to do when using the pagevec cache, so bypass that. 443894bc310SLee Schermerhorn */ 444894bc310SLee Schermerhorn void add_page_to_unevictable_list(struct page *page) 445894bc310SLee Schermerhorn { 446894bc310SLee Schermerhorn struct zone *zone = page_zone(page); 447fa9add64SHugh Dickins struct lruvec *lruvec; 448894bc310SLee Schermerhorn 449894bc310SLee Schermerhorn spin_lock_irq(&zone->lru_lock); 450fa9add64SHugh Dickins lruvec = mem_cgroup_page_lruvec(page, zone); 451ef2a2cbdSNaoya Horiguchi ClearPageActive(page); 452894bc310SLee Schermerhorn SetPageUnevictable(page); 453894bc310SLee Schermerhorn SetPageLRU(page); 454fa9add64SHugh Dickins add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE); 455894bc310SLee Schermerhorn spin_unlock_irq(&zone->lru_lock); 456894bc310SLee Schermerhorn } 457894bc310SLee Schermerhorn 45800501b53SJohannes Weiner /** 45900501b53SJohannes Weiner * lru_cache_add_active_or_unevictable 46000501b53SJohannes Weiner * @page: the page to be added to LRU 46100501b53SJohannes Weiner * @vma: vma in which page is mapped for determining reclaimability 46200501b53SJohannes Weiner * 46300501b53SJohannes Weiner * Place @page on the active or unevictable LRU list, depending on its 46400501b53SJohannes Weiner * evictability. Note that if the page is not evictable, it goes 46500501b53SJohannes Weiner * directly back onto it's zone's unevictable list, it does NOT use a 46600501b53SJohannes Weiner * per cpu pagevec. 46700501b53SJohannes Weiner */ 46800501b53SJohannes Weiner void lru_cache_add_active_or_unevictable(struct page *page, 46900501b53SJohannes Weiner struct vm_area_struct *vma) 47000501b53SJohannes Weiner { 47100501b53SJohannes Weiner VM_BUG_ON_PAGE(PageLRU(page), page); 47200501b53SJohannes Weiner 47300501b53SJohannes Weiner if (likely((vma->vm_flags & (VM_LOCKED | VM_SPECIAL)) != VM_LOCKED)) { 47400501b53SJohannes Weiner SetPageActive(page); 47500501b53SJohannes Weiner lru_cache_add(page); 47600501b53SJohannes Weiner return; 47700501b53SJohannes Weiner } 47800501b53SJohannes Weiner 47900501b53SJohannes Weiner if (!TestSetPageMlocked(page)) { 48000501b53SJohannes Weiner /* 48100501b53SJohannes Weiner * We use the irq-unsafe __mod_zone_page_stat because this 48200501b53SJohannes Weiner * counter is not modified from interrupt context, and the pte 48300501b53SJohannes Weiner * lock is held(spinlock), which implies preemption disabled. 48400501b53SJohannes Weiner */ 48500501b53SJohannes Weiner __mod_zone_page_state(page_zone(page), NR_MLOCK, 48600501b53SJohannes Weiner hpage_nr_pages(page)); 48700501b53SJohannes Weiner count_vm_event(UNEVICTABLE_PGMLOCKED); 48800501b53SJohannes Weiner } 48900501b53SJohannes Weiner add_page_to_unevictable_list(page); 49000501b53SJohannes Weiner } 49100501b53SJohannes Weiner 492902aaed0SHisashi Hifumi /* 49331560180SMinchan Kim * If the page can not be invalidated, it is moved to the 49431560180SMinchan Kim * inactive list to speed up its reclaim. It is moved to the 49531560180SMinchan Kim * head of the list, rather than the tail, to give the flusher 49631560180SMinchan Kim * threads some time to write it out, as this is much more 49731560180SMinchan Kim * effective than the single-page writeout from reclaim. 498278df9f4SMinchan Kim * 499278df9f4SMinchan Kim * If the page isn't page_mapped and dirty/writeback, the page 500278df9f4SMinchan Kim * could reclaim asap using PG_reclaim. 501278df9f4SMinchan Kim * 502278df9f4SMinchan Kim * 1. active, mapped page -> none 503278df9f4SMinchan Kim * 2. active, dirty/writeback page -> inactive, head, PG_reclaim 504278df9f4SMinchan Kim * 3. inactive, mapped page -> none 505278df9f4SMinchan Kim * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim 506278df9f4SMinchan Kim * 5. inactive, clean -> inactive, tail 507278df9f4SMinchan Kim * 6. Others -> none 508278df9f4SMinchan Kim * 509278df9f4SMinchan Kim * In 4, why it moves inactive's head, the VM expects the page would 510278df9f4SMinchan Kim * be write it out by flusher threads as this is much more effective 511278df9f4SMinchan Kim * than the single-page writeout from reclaim. 51231560180SMinchan Kim */ 513cc5993bdSMinchan Kim static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec, 514fa9add64SHugh Dickins void *arg) 51531560180SMinchan Kim { 51631560180SMinchan Kim int lru, file; 517278df9f4SMinchan Kim bool active; 51831560180SMinchan Kim 519278df9f4SMinchan Kim if (!PageLRU(page)) 52031560180SMinchan Kim return; 52131560180SMinchan Kim 522bad49d9cSMinchan Kim if (PageUnevictable(page)) 523bad49d9cSMinchan Kim return; 524bad49d9cSMinchan Kim 52531560180SMinchan Kim /* Some processes are using the page */ 52631560180SMinchan Kim if (page_mapped(page)) 52731560180SMinchan Kim return; 52831560180SMinchan Kim 529278df9f4SMinchan Kim active = PageActive(page); 53031560180SMinchan Kim file = page_is_file_cache(page); 53131560180SMinchan Kim lru = page_lru_base_type(page); 532fa9add64SHugh Dickins 533fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, lru + active); 53431560180SMinchan Kim ClearPageActive(page); 53531560180SMinchan Kim ClearPageReferenced(page); 536fa9add64SHugh Dickins add_page_to_lru_list(page, lruvec, lru); 53731560180SMinchan Kim 538278df9f4SMinchan Kim if (PageWriteback(page) || PageDirty(page)) { 539278df9f4SMinchan Kim /* 540278df9f4SMinchan Kim * PG_reclaim could be raced with end_page_writeback 541278df9f4SMinchan Kim * It can make readahead confusing. But race window 542278df9f4SMinchan Kim * is _really_ small and it's non-critical problem. 543278df9f4SMinchan Kim */ 544278df9f4SMinchan Kim SetPageReclaim(page); 545278df9f4SMinchan Kim } else { 546278df9f4SMinchan Kim /* 547278df9f4SMinchan Kim * The page's writeback ends up during pagevec 548278df9f4SMinchan Kim * We moves tha page into tail of inactive. 549278df9f4SMinchan Kim */ 550925b7673SJohannes Weiner list_move_tail(&page->lru, &lruvec->lists[lru]); 551278df9f4SMinchan Kim __count_vm_event(PGROTATED); 552278df9f4SMinchan Kim } 553278df9f4SMinchan Kim 554278df9f4SMinchan Kim if (active) 555278df9f4SMinchan Kim __count_vm_event(PGDEACTIVATE); 556fa9add64SHugh Dickins update_page_reclaim_stat(lruvec, file, 0); 55731560180SMinchan Kim } 55831560180SMinchan Kim 55910853a03SMinchan Kim 56010853a03SMinchan Kim static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec, 56110853a03SMinchan Kim void *arg) 56210853a03SMinchan Kim { 56310853a03SMinchan Kim if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { 56410853a03SMinchan Kim int file = page_is_file_cache(page); 56510853a03SMinchan Kim int lru = page_lru_base_type(page); 56610853a03SMinchan Kim 56710853a03SMinchan Kim del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE); 56810853a03SMinchan Kim ClearPageActive(page); 56910853a03SMinchan Kim ClearPageReferenced(page); 57010853a03SMinchan Kim add_page_to_lru_list(page, lruvec, lru); 57110853a03SMinchan Kim 57210853a03SMinchan Kim __count_vm_event(PGDEACTIVATE); 57310853a03SMinchan Kim update_page_reclaim_stat(lruvec, file, 0); 57410853a03SMinchan Kim } 57510853a03SMinchan Kim } 57610853a03SMinchan Kim 57731560180SMinchan Kim /* 578902aaed0SHisashi Hifumi * Drain pages out of the cpu's pagevecs. 579902aaed0SHisashi Hifumi * Either "cpu" is the current CPU, and preemption has already been 580902aaed0SHisashi Hifumi * disabled; or "cpu" is being hot-unplugged, and is already dead. 581902aaed0SHisashi Hifumi */ 582f0cb3c76SKonstantin Khlebnikov void lru_add_drain_cpu(int cpu) 5831da177e4SLinus Torvalds { 58413f7f789SMel Gorman struct pagevec *pvec = &per_cpu(lru_add_pvec, cpu); 5851da177e4SLinus Torvalds 5861da177e4SLinus Torvalds if (pagevec_count(pvec)) 587a0b8cab3SMel Gorman __pagevec_lru_add(pvec); 588902aaed0SHisashi Hifumi 589902aaed0SHisashi Hifumi pvec = &per_cpu(lru_rotate_pvecs, cpu); 590902aaed0SHisashi Hifumi if (pagevec_count(pvec)) { 591902aaed0SHisashi Hifumi unsigned long flags; 592902aaed0SHisashi Hifumi 593902aaed0SHisashi Hifumi /* No harm done if a racing interrupt already did this */ 594902aaed0SHisashi Hifumi local_irq_save(flags); 595902aaed0SHisashi Hifumi pagevec_move_tail(pvec); 596902aaed0SHisashi Hifumi local_irq_restore(flags); 597902aaed0SHisashi Hifumi } 59831560180SMinchan Kim 599cc5993bdSMinchan Kim pvec = &per_cpu(lru_deactivate_file_pvecs, cpu); 60031560180SMinchan Kim if (pagevec_count(pvec)) 601cc5993bdSMinchan Kim pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); 602eb709b0dSShaohua Li 60310853a03SMinchan Kim pvec = &per_cpu(lru_deactivate_pvecs, cpu); 60410853a03SMinchan Kim if (pagevec_count(pvec)) 60510853a03SMinchan Kim pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); 60610853a03SMinchan Kim 607eb709b0dSShaohua Li activate_page_drain(cpu); 60831560180SMinchan Kim } 60931560180SMinchan Kim 61031560180SMinchan Kim /** 611cc5993bdSMinchan Kim * deactivate_file_page - forcefully deactivate a file page 61231560180SMinchan Kim * @page: page to deactivate 61331560180SMinchan Kim * 61431560180SMinchan Kim * This function hints the VM that @page is a good reclaim candidate, 61531560180SMinchan Kim * for example if its invalidation fails due to the page being dirty 61631560180SMinchan Kim * or under writeback. 61731560180SMinchan Kim */ 618cc5993bdSMinchan Kim void deactivate_file_page(struct page *page) 61931560180SMinchan Kim { 620821ed6bbSMinchan Kim /* 621cc5993bdSMinchan Kim * In a workload with many unevictable page such as mprotect, 622cc5993bdSMinchan Kim * unevictable page deactivation for accelerating reclaim is pointless. 623821ed6bbSMinchan Kim */ 624821ed6bbSMinchan Kim if (PageUnevictable(page)) 625821ed6bbSMinchan Kim return; 626821ed6bbSMinchan Kim 62731560180SMinchan Kim if (likely(get_page_unless_zero(page))) { 628cc5993bdSMinchan Kim struct pagevec *pvec = &get_cpu_var(lru_deactivate_file_pvecs); 62931560180SMinchan Kim 63031560180SMinchan Kim if (!pagevec_add(pvec, page)) 631cc5993bdSMinchan Kim pagevec_lru_move_fn(pvec, lru_deactivate_file_fn, NULL); 632cc5993bdSMinchan Kim put_cpu_var(lru_deactivate_file_pvecs); 63331560180SMinchan Kim } 63480bfed90SAndrew Morton } 63580bfed90SAndrew Morton 63610853a03SMinchan Kim /** 63710853a03SMinchan Kim * deactivate_page - deactivate a page 63810853a03SMinchan Kim * @page: page to deactivate 63910853a03SMinchan Kim * 64010853a03SMinchan Kim * deactivate_page() moves @page to the inactive list if @page was on the active 64110853a03SMinchan Kim * list and was not an unevictable page. This is done to accelerate the reclaim 64210853a03SMinchan Kim * of @page. 64310853a03SMinchan Kim */ 64410853a03SMinchan Kim void deactivate_page(struct page *page) 64510853a03SMinchan Kim { 64610853a03SMinchan Kim if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { 64710853a03SMinchan Kim struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); 64810853a03SMinchan Kim 64910853a03SMinchan Kim page_cache_get(page); 65010853a03SMinchan Kim if (!pagevec_add(pvec, page)) 65110853a03SMinchan Kim pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); 65210853a03SMinchan Kim put_cpu_var(lru_deactivate_pvecs); 65310853a03SMinchan Kim } 65410853a03SMinchan Kim } 65510853a03SMinchan Kim 65680bfed90SAndrew Morton void lru_add_drain(void) 65780bfed90SAndrew Morton { 658f0cb3c76SKonstantin Khlebnikov lru_add_drain_cpu(get_cpu()); 65980bfed90SAndrew Morton put_cpu(); 6601da177e4SLinus Torvalds } 6611da177e4SLinus Torvalds 662c4028958SDavid Howells static void lru_add_drain_per_cpu(struct work_struct *dummy) 663053837fcSNick Piggin { 664053837fcSNick Piggin lru_add_drain(); 665053837fcSNick Piggin } 666053837fcSNick Piggin 6675fbc4616SChris Metcalf static DEFINE_PER_CPU(struct work_struct, lru_add_drain_work); 6685fbc4616SChris Metcalf 6695fbc4616SChris Metcalf void lru_add_drain_all(void) 670053837fcSNick Piggin { 6715fbc4616SChris Metcalf static DEFINE_MUTEX(lock); 6725fbc4616SChris Metcalf static struct cpumask has_work; 6735fbc4616SChris Metcalf int cpu; 6745fbc4616SChris Metcalf 6755fbc4616SChris Metcalf mutex_lock(&lock); 6765fbc4616SChris Metcalf get_online_cpus(); 6775fbc4616SChris Metcalf cpumask_clear(&has_work); 6785fbc4616SChris Metcalf 6795fbc4616SChris Metcalf for_each_online_cpu(cpu) { 6805fbc4616SChris Metcalf struct work_struct *work = &per_cpu(lru_add_drain_work, cpu); 6815fbc4616SChris Metcalf 6825fbc4616SChris Metcalf if (pagevec_count(&per_cpu(lru_add_pvec, cpu)) || 6835fbc4616SChris Metcalf pagevec_count(&per_cpu(lru_rotate_pvecs, cpu)) || 684cc5993bdSMinchan Kim pagevec_count(&per_cpu(lru_deactivate_file_pvecs, cpu)) || 68510853a03SMinchan Kim pagevec_count(&per_cpu(lru_deactivate_pvecs, cpu)) || 6865fbc4616SChris Metcalf need_activate_page_drain(cpu)) { 6875fbc4616SChris Metcalf INIT_WORK(work, lru_add_drain_per_cpu); 6885fbc4616SChris Metcalf schedule_work_on(cpu, work); 6895fbc4616SChris Metcalf cpumask_set_cpu(cpu, &has_work); 6905fbc4616SChris Metcalf } 6915fbc4616SChris Metcalf } 6925fbc4616SChris Metcalf 6935fbc4616SChris Metcalf for_each_cpu(cpu, &has_work) 6945fbc4616SChris Metcalf flush_work(&per_cpu(lru_add_drain_work, cpu)); 6955fbc4616SChris Metcalf 6965fbc4616SChris Metcalf put_online_cpus(); 6975fbc4616SChris Metcalf mutex_unlock(&lock); 698053837fcSNick Piggin } 699053837fcSNick Piggin 700aabfb572SMichal Hocko /** 701aabfb572SMichal Hocko * release_pages - batched page_cache_release() 702aabfb572SMichal Hocko * @pages: array of pages to release 703aabfb572SMichal Hocko * @nr: number of pages 704aabfb572SMichal Hocko * @cold: whether the pages are cache cold 7051da177e4SLinus Torvalds * 706aabfb572SMichal Hocko * Decrement the reference count on all the pages in @pages. If it 707aabfb572SMichal Hocko * fell to zero, remove the page from the LRU and free it. 7081da177e4SLinus Torvalds */ 709b745bc85SMel Gorman void release_pages(struct page **pages, int nr, bool cold) 7101da177e4SLinus Torvalds { 7111da177e4SLinus Torvalds int i; 712cc59850eSKonstantin Khlebnikov LIST_HEAD(pages_to_free); 7131da177e4SLinus Torvalds struct zone *zone = NULL; 714fa9add64SHugh Dickins struct lruvec *lruvec; 715902aaed0SHisashi Hifumi unsigned long uninitialized_var(flags); 716aabfb572SMichal Hocko unsigned int uninitialized_var(lock_batch); 7171da177e4SLinus Torvalds 7181da177e4SLinus Torvalds for (i = 0; i < nr; i++) { 7191da177e4SLinus Torvalds struct page *page = pages[i]; 7201da177e4SLinus Torvalds 721aabfb572SMichal Hocko /* 722aabfb572SMichal Hocko * Make sure the IRQ-safe lock-holding time does not get 723aabfb572SMichal Hocko * excessive with a continuous string of pages from the 724aabfb572SMichal Hocko * same zone. The lock is held only if zone != NULL. 725aabfb572SMichal Hocko */ 726aabfb572SMichal Hocko if (zone && ++lock_batch == SWAP_CLUSTER_MAX) { 727aabfb572SMichal Hocko spin_unlock_irqrestore(&zone->lru_lock, flags); 728aabfb572SMichal Hocko zone = NULL; 729aabfb572SMichal Hocko } 730aabfb572SMichal Hocko 731ddc58f27SKirill A. Shutemov page = compound_head(page); 732b5810039SNick Piggin if (!put_page_testzero(page)) 7331da177e4SLinus Torvalds continue; 7341da177e4SLinus Torvalds 735ddc58f27SKirill A. Shutemov if (PageCompound(page)) { 736ddc58f27SKirill A. Shutemov if (zone) { 737ddc58f27SKirill A. Shutemov spin_unlock_irqrestore(&zone->lru_lock, flags); 738ddc58f27SKirill A. Shutemov zone = NULL; 739ddc58f27SKirill A. Shutemov } 740ddc58f27SKirill A. Shutemov __put_compound_page(page); 741ddc58f27SKirill A. Shutemov continue; 742ddc58f27SKirill A. Shutemov } 743ddc58f27SKirill A. Shutemov 74446453a6eSNick Piggin if (PageLRU(page)) { 74546453a6eSNick Piggin struct zone *pagezone = page_zone(page); 746894bc310SLee Schermerhorn 7471da177e4SLinus Torvalds if (pagezone != zone) { 7481da177e4SLinus Torvalds if (zone) 749902aaed0SHisashi Hifumi spin_unlock_irqrestore(&zone->lru_lock, 750902aaed0SHisashi Hifumi flags); 751aabfb572SMichal Hocko lock_batch = 0; 7521da177e4SLinus Torvalds zone = pagezone; 753902aaed0SHisashi Hifumi spin_lock_irqsave(&zone->lru_lock, flags); 7541da177e4SLinus Torvalds } 755fa9add64SHugh Dickins 756fa9add64SHugh Dickins lruvec = mem_cgroup_page_lruvec(page, zone); 757309381feSSasha Levin VM_BUG_ON_PAGE(!PageLRU(page), page); 75867453911SNick Piggin __ClearPageLRU(page); 759fa9add64SHugh Dickins del_page_from_lru_list(page, lruvec, page_off_lru(page)); 76046453a6eSNick Piggin } 76146453a6eSNick Piggin 762c53954a0SMel Gorman /* Clear Active bit in case of parallel mark_page_accessed */ 763e3741b50SMel Gorman __ClearPageActive(page); 764c53954a0SMel Gorman 765cc59850eSKonstantin Khlebnikov list_add(&page->lru, &pages_to_free); 7661da177e4SLinus Torvalds } 7671da177e4SLinus Torvalds if (zone) 768902aaed0SHisashi Hifumi spin_unlock_irqrestore(&zone->lru_lock, flags); 7691da177e4SLinus Torvalds 770747db954SJohannes Weiner mem_cgroup_uncharge_list(&pages_to_free); 771cc59850eSKonstantin Khlebnikov free_hot_cold_page_list(&pages_to_free, cold); 7721da177e4SLinus Torvalds } 7730be8557bSMiklos Szeredi EXPORT_SYMBOL(release_pages); 7741da177e4SLinus Torvalds 7751da177e4SLinus Torvalds /* 7761da177e4SLinus Torvalds * The pages which we're about to release may be in the deferred lru-addition 7771da177e4SLinus Torvalds * queues. That would prevent them from really being freed right now. That's 7781da177e4SLinus Torvalds * OK from a correctness point of view but is inefficient - those pages may be 7791da177e4SLinus Torvalds * cache-warm and we want to give them back to the page allocator ASAP. 7801da177e4SLinus Torvalds * 7811da177e4SLinus Torvalds * So __pagevec_release() will drain those queues here. __pagevec_lru_add() 7821da177e4SLinus Torvalds * and __pagevec_lru_add_active() call release_pages() directly to avoid 7831da177e4SLinus Torvalds * mutual recursion. 7841da177e4SLinus Torvalds */ 7851da177e4SLinus Torvalds void __pagevec_release(struct pagevec *pvec) 7861da177e4SLinus Torvalds { 7871da177e4SLinus Torvalds lru_add_drain(); 7881da177e4SLinus Torvalds release_pages(pvec->pages, pagevec_count(pvec), pvec->cold); 7891da177e4SLinus Torvalds pagevec_reinit(pvec); 7901da177e4SLinus Torvalds } 7917f285701SSteve French EXPORT_SYMBOL(__pagevec_release); 7927f285701SSteve French 79312d27107SHugh Dickins #ifdef CONFIG_TRANSPARENT_HUGEPAGE 79471e3aac0SAndrea Arcangeli /* used by __split_huge_page_refcount() */ 795fa9add64SHugh Dickins void lru_add_page_tail(struct page *page, struct page *page_tail, 7965bc7b8acSShaohua Li struct lruvec *lruvec, struct list_head *list) 79771e3aac0SAndrea Arcangeli { 79871e3aac0SAndrea Arcangeli const int file = 0; 79971e3aac0SAndrea Arcangeli 800309381feSSasha Levin VM_BUG_ON_PAGE(!PageHead(page), page); 801309381feSSasha Levin VM_BUG_ON_PAGE(PageCompound(page_tail), page); 802309381feSSasha Levin VM_BUG_ON_PAGE(PageLRU(page_tail), page); 803fa9add64SHugh Dickins VM_BUG_ON(NR_CPUS != 1 && 804fa9add64SHugh Dickins !spin_is_locked(&lruvec_zone(lruvec)->lru_lock)); 80571e3aac0SAndrea Arcangeli 8065bc7b8acSShaohua Li if (!list) 80771e3aac0SAndrea Arcangeli SetPageLRU(page_tail); 80871e3aac0SAndrea Arcangeli 80912d27107SHugh Dickins if (likely(PageLRU(page))) 81012d27107SHugh Dickins list_add_tail(&page_tail->lru, &page->lru); 8115bc7b8acSShaohua Li else if (list) { 8125bc7b8acSShaohua Li /* page reclaim is reclaiming a huge page */ 8135bc7b8acSShaohua Li get_page(page_tail); 8145bc7b8acSShaohua Li list_add_tail(&page_tail->lru, list); 8155bc7b8acSShaohua Li } else { 81612d27107SHugh Dickins struct list_head *list_head; 81712d27107SHugh Dickins /* 81812d27107SHugh Dickins * Head page has not yet been counted, as an hpage, 81912d27107SHugh Dickins * so we must account for each subpage individually. 82012d27107SHugh Dickins * 82112d27107SHugh Dickins * Use the standard add function to put page_tail on the list, 82212d27107SHugh Dickins * but then correct its position so they all end up in order. 82312d27107SHugh Dickins */ 824e180cf80SKirill A. Shutemov add_page_to_lru_list(page_tail, lruvec, page_lru(page_tail)); 82512d27107SHugh Dickins list_head = page_tail->lru.prev; 82612d27107SHugh Dickins list_move_tail(&page_tail->lru, list_head); 82771e3aac0SAndrea Arcangeli } 8287512102cSHugh Dickins 8297512102cSHugh Dickins if (!PageUnevictable(page)) 830e180cf80SKirill A. Shutemov update_page_reclaim_stat(lruvec, file, PageActive(page_tail)); 83171e3aac0SAndrea Arcangeli } 83212d27107SHugh Dickins #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 83371e3aac0SAndrea Arcangeli 834fa9add64SHugh Dickins static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, 835fa9add64SHugh Dickins void *arg) 8363dd7ae8eSShaohua Li { 83713f7f789SMel Gorman int file = page_is_file_cache(page); 83813f7f789SMel Gorman int active = PageActive(page); 83913f7f789SMel Gorman enum lru_list lru = page_lru(page); 8403dd7ae8eSShaohua Li 841309381feSSasha Levin VM_BUG_ON_PAGE(PageLRU(page), page); 8423dd7ae8eSShaohua Li 8433dd7ae8eSShaohua Li SetPageLRU(page); 844fa9add64SHugh Dickins add_page_to_lru_list(page, lruvec, lru); 845fa9add64SHugh Dickins update_page_reclaim_stat(lruvec, file, active); 84624b7e581SMel Gorman trace_mm_lru_insertion(page, lru); 8473dd7ae8eSShaohua Li } 8483dd7ae8eSShaohua Li 8491da177e4SLinus Torvalds /* 8501da177e4SLinus Torvalds * Add the passed pages to the LRU, then drop the caller's refcount 8511da177e4SLinus Torvalds * on them. Reinitialises the caller's pagevec. 8521da177e4SLinus Torvalds */ 853a0b8cab3SMel Gorman void __pagevec_lru_add(struct pagevec *pvec) 8541da177e4SLinus Torvalds { 855a0b8cab3SMel Gorman pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, NULL); 8561da177e4SLinus Torvalds } 8575095ae83SHugh Dickins EXPORT_SYMBOL(__pagevec_lru_add); 858f04e9ebbSKOSAKI Motohiro 8591da177e4SLinus Torvalds /** 8600cd6144aSJohannes Weiner * pagevec_lookup_entries - gang pagecache lookup 8610cd6144aSJohannes Weiner * @pvec: Where the resulting entries are placed 8620cd6144aSJohannes Weiner * @mapping: The address_space to search 8630cd6144aSJohannes Weiner * @start: The starting entry index 8640cd6144aSJohannes Weiner * @nr_entries: The maximum number of entries 8650cd6144aSJohannes Weiner * @indices: The cache indices corresponding to the entries in @pvec 8660cd6144aSJohannes Weiner * 8670cd6144aSJohannes Weiner * pagevec_lookup_entries() will search for and return a group of up 8680cd6144aSJohannes Weiner * to @nr_entries pages and shadow entries in the mapping. All 8690cd6144aSJohannes Weiner * entries are placed in @pvec. pagevec_lookup_entries() takes a 8700cd6144aSJohannes Weiner * reference against actual pages in @pvec. 8710cd6144aSJohannes Weiner * 8720cd6144aSJohannes Weiner * The search returns a group of mapping-contiguous entries with 8730cd6144aSJohannes Weiner * ascending indexes. There may be holes in the indices due to 8740cd6144aSJohannes Weiner * not-present entries. 8750cd6144aSJohannes Weiner * 8760cd6144aSJohannes Weiner * pagevec_lookup_entries() returns the number of entries which were 8770cd6144aSJohannes Weiner * found. 8780cd6144aSJohannes Weiner */ 8790cd6144aSJohannes Weiner unsigned pagevec_lookup_entries(struct pagevec *pvec, 8800cd6144aSJohannes Weiner struct address_space *mapping, 8810cd6144aSJohannes Weiner pgoff_t start, unsigned nr_pages, 8820cd6144aSJohannes Weiner pgoff_t *indices) 8830cd6144aSJohannes Weiner { 8840cd6144aSJohannes Weiner pvec->nr = find_get_entries(mapping, start, nr_pages, 8850cd6144aSJohannes Weiner pvec->pages, indices); 8860cd6144aSJohannes Weiner return pagevec_count(pvec); 8870cd6144aSJohannes Weiner } 8880cd6144aSJohannes Weiner 8890cd6144aSJohannes Weiner /** 8900cd6144aSJohannes Weiner * pagevec_remove_exceptionals - pagevec exceptionals pruning 8910cd6144aSJohannes Weiner * @pvec: The pagevec to prune 8920cd6144aSJohannes Weiner * 8930cd6144aSJohannes Weiner * pagevec_lookup_entries() fills both pages and exceptional radix 8940cd6144aSJohannes Weiner * tree entries into the pagevec. This function prunes all 8950cd6144aSJohannes Weiner * exceptionals from @pvec without leaving holes, so that it can be 8960cd6144aSJohannes Weiner * passed on to page-only pagevec operations. 8970cd6144aSJohannes Weiner */ 8980cd6144aSJohannes Weiner void pagevec_remove_exceptionals(struct pagevec *pvec) 8990cd6144aSJohannes Weiner { 9000cd6144aSJohannes Weiner int i, j; 9010cd6144aSJohannes Weiner 9020cd6144aSJohannes Weiner for (i = 0, j = 0; i < pagevec_count(pvec); i++) { 9030cd6144aSJohannes Weiner struct page *page = pvec->pages[i]; 9040cd6144aSJohannes Weiner if (!radix_tree_exceptional_entry(page)) 9050cd6144aSJohannes Weiner pvec->pages[j++] = page; 9060cd6144aSJohannes Weiner } 9070cd6144aSJohannes Weiner pvec->nr = j; 9080cd6144aSJohannes Weiner } 9090cd6144aSJohannes Weiner 9100cd6144aSJohannes Weiner /** 9111da177e4SLinus Torvalds * pagevec_lookup - gang pagecache lookup 9121da177e4SLinus Torvalds * @pvec: Where the resulting pages are placed 9131da177e4SLinus Torvalds * @mapping: The address_space to search 9141da177e4SLinus Torvalds * @start: The starting page index 9151da177e4SLinus Torvalds * @nr_pages: The maximum number of pages 9161da177e4SLinus Torvalds * 9171da177e4SLinus Torvalds * pagevec_lookup() will search for and return a group of up to @nr_pages pages 9181da177e4SLinus Torvalds * in the mapping. The pages are placed in @pvec. pagevec_lookup() takes a 9191da177e4SLinus Torvalds * reference against the pages in @pvec. 9201da177e4SLinus Torvalds * 9211da177e4SLinus Torvalds * The search returns a group of mapping-contiguous pages with ascending 9221da177e4SLinus Torvalds * indexes. There may be holes in the indices due to not-present pages. 9231da177e4SLinus Torvalds * 9241da177e4SLinus Torvalds * pagevec_lookup() returns the number of pages which were found. 9251da177e4SLinus Torvalds */ 9261da177e4SLinus Torvalds unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, 9271da177e4SLinus Torvalds pgoff_t start, unsigned nr_pages) 9281da177e4SLinus Torvalds { 9291da177e4SLinus Torvalds pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages); 9301da177e4SLinus Torvalds return pagevec_count(pvec); 9311da177e4SLinus Torvalds } 93278539fdfSChristoph Hellwig EXPORT_SYMBOL(pagevec_lookup); 93378539fdfSChristoph Hellwig 9341da177e4SLinus Torvalds unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, 9351da177e4SLinus Torvalds pgoff_t *index, int tag, unsigned nr_pages) 9361da177e4SLinus Torvalds { 9371da177e4SLinus Torvalds pvec->nr = find_get_pages_tag(mapping, index, tag, 9381da177e4SLinus Torvalds nr_pages, pvec->pages); 9391da177e4SLinus Torvalds return pagevec_count(pvec); 9401da177e4SLinus Torvalds } 9417f285701SSteve French EXPORT_SYMBOL(pagevec_lookup_tag); 9421da177e4SLinus Torvalds 9431da177e4SLinus Torvalds /* 9441da177e4SLinus Torvalds * Perform any setup for the swap system 9451da177e4SLinus Torvalds */ 9461da177e4SLinus Torvalds void __init swap_setup(void) 9471da177e4SLinus Torvalds { 9484481374cSJan Beulich unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT); 949e0bf68ddSPeter Zijlstra #ifdef CONFIG_SWAP 95033806f06SShaohua Li int i; 95133806f06SShaohua Li 95227ba0644SKirill A. Shutemov for (i = 0; i < MAX_SWAPFILES; i++) 95333806f06SShaohua Li spin_lock_init(&swapper_spaces[i].tree_lock); 954e0bf68ddSPeter Zijlstra #endif 955e0bf68ddSPeter Zijlstra 9561da177e4SLinus Torvalds /* Use a smaller cluster for small-memory machines */ 9571da177e4SLinus Torvalds if (megs < 16) 9581da177e4SLinus Torvalds page_cluster = 2; 9591da177e4SLinus Torvalds else 9601da177e4SLinus Torvalds page_cluster = 3; 9611da177e4SLinus Torvalds /* 9621da177e4SLinus Torvalds * Right now other parts of the system means that we 9631da177e4SLinus Torvalds * _really_ don't want to cluster much more 9641da177e4SLinus Torvalds */ 9651da177e4SLinus Torvalds } 966