xref: /linux/mm/swap.c (revision 5a0e3ad6af8660be21ca98a971cd00f331318c05)
11da177e4SLinus Torvalds /*
21da177e4SLinus Torvalds  *  linux/mm/swap.c
31da177e4SLinus Torvalds  *
41da177e4SLinus Torvalds  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
51da177e4SLinus Torvalds  */
61da177e4SLinus Torvalds 
71da177e4SLinus Torvalds /*
8183ff22bSSimon Arlott  * This file contains the default values for the operation of the
91da177e4SLinus Torvalds  * Linux VM subsystem. Fine-tuning documentation can be found in
101da177e4SLinus Torvalds  * Documentation/sysctl/vm.txt.
111da177e4SLinus Torvalds  * Started 18.12.91
121da177e4SLinus Torvalds  * Swap aging added 23.2.95, Stephen Tweedie.
131da177e4SLinus Torvalds  * Buffermem limits added 12.3.98, Rik van Riel.
141da177e4SLinus Torvalds  */
151da177e4SLinus Torvalds 
161da177e4SLinus Torvalds #include <linux/mm.h>
171da177e4SLinus Torvalds #include <linux/sched.h>
181da177e4SLinus Torvalds #include <linux/kernel_stat.h>
191da177e4SLinus Torvalds #include <linux/swap.h>
201da177e4SLinus Torvalds #include <linux/mman.h>
211da177e4SLinus Torvalds #include <linux/pagemap.h>
221da177e4SLinus Torvalds #include <linux/pagevec.h>
231da177e4SLinus Torvalds #include <linux/init.h>
241da177e4SLinus Torvalds #include <linux/module.h>
251da177e4SLinus Torvalds #include <linux/mm_inline.h>
261da177e4SLinus Torvalds #include <linux/buffer_head.h>	/* for try_to_release_page() */
271da177e4SLinus Torvalds #include <linux/percpu_counter.h>
281da177e4SLinus Torvalds #include <linux/percpu.h>
291da177e4SLinus Torvalds #include <linux/cpu.h>
301da177e4SLinus Torvalds #include <linux/notifier.h>
31e0bf68ddSPeter Zijlstra #include <linux/backing-dev.h>
3266e1707bSBalbir Singh #include <linux/memcontrol.h>
33*5a0e3ad6STejun Heo #include <linux/gfp.h>
341da177e4SLinus Torvalds 
3564d6519dSLee Schermerhorn #include "internal.h"
3664d6519dSLee Schermerhorn 
371da177e4SLinus Torvalds /* How many pages do we try to swap or page in/out together? */
381da177e4SLinus Torvalds int page_cluster;
391da177e4SLinus Torvalds 
40f04e9ebbSKOSAKI Motohiro static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs);
41f84f9504SVegard Nossum static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs);
42902aaed0SHisashi Hifumi 
43b221385bSAdrian Bunk /*
44b221385bSAdrian Bunk  * This path almost never happens for VM activity - pages are normally
45b221385bSAdrian Bunk  * freed via pagevecs.  But it gets used by networking.
46b221385bSAdrian Bunk  */
47920c7a5dSHarvey Harrison static void __page_cache_release(struct page *page)
48b221385bSAdrian Bunk {
49b221385bSAdrian Bunk 	if (PageLRU(page)) {
50b221385bSAdrian Bunk 		unsigned long flags;
51b221385bSAdrian Bunk 		struct zone *zone = page_zone(page);
52b221385bSAdrian Bunk 
53b221385bSAdrian Bunk 		spin_lock_irqsave(&zone->lru_lock, flags);
54b221385bSAdrian Bunk 		VM_BUG_ON(!PageLRU(page));
55b221385bSAdrian Bunk 		__ClearPageLRU(page);
56b221385bSAdrian Bunk 		del_page_from_lru(zone, page);
57b221385bSAdrian Bunk 		spin_unlock_irqrestore(&zone->lru_lock, flags);
58b221385bSAdrian Bunk 	}
59fc91668eSLi Hong 	free_hot_cold_page(page, 0);
60b221385bSAdrian Bunk }
61b221385bSAdrian Bunk 
628519fb30SNick Piggin static void put_compound_page(struct page *page)
631da177e4SLinus Torvalds {
64d85f3385SChristoph Lameter 	page = compound_head(page);
651da177e4SLinus Torvalds 	if (put_page_testzero(page)) {
6633f2ef89SAndy Whitcroft 		compound_page_dtor *dtor;
671da177e4SLinus Torvalds 
6833f2ef89SAndy Whitcroft 		dtor = get_compound_page_dtor(page);
691da177e4SLinus Torvalds 		(*dtor)(page);
701da177e4SLinus Torvalds 	}
711da177e4SLinus Torvalds }
728519fb30SNick Piggin 
738519fb30SNick Piggin void put_page(struct page *page)
748519fb30SNick Piggin {
758519fb30SNick Piggin 	if (unlikely(PageCompound(page)))
768519fb30SNick Piggin 		put_compound_page(page);
778519fb30SNick Piggin 	else if (put_page_testzero(page))
781da177e4SLinus Torvalds 		__page_cache_release(page);
791da177e4SLinus Torvalds }
801da177e4SLinus Torvalds EXPORT_SYMBOL(put_page);
811da177e4SLinus Torvalds 
821d7ea732SAlexander Zarochentsev /**
837682486bSRandy Dunlap  * put_pages_list() - release a list of pages
847682486bSRandy Dunlap  * @pages: list of pages threaded on page->lru
851d7ea732SAlexander Zarochentsev  *
861d7ea732SAlexander Zarochentsev  * Release a list of pages which are strung together on page.lru.  Currently
871d7ea732SAlexander Zarochentsev  * used by read_cache_pages() and related error recovery code.
881d7ea732SAlexander Zarochentsev  */
891d7ea732SAlexander Zarochentsev void put_pages_list(struct list_head *pages)
901d7ea732SAlexander Zarochentsev {
911d7ea732SAlexander Zarochentsev 	while (!list_empty(pages)) {
921d7ea732SAlexander Zarochentsev 		struct page *victim;
931d7ea732SAlexander Zarochentsev 
941d7ea732SAlexander Zarochentsev 		victim = list_entry(pages->prev, struct page, lru);
951d7ea732SAlexander Zarochentsev 		list_del(&victim->lru);
961d7ea732SAlexander Zarochentsev 		page_cache_release(victim);
971d7ea732SAlexander Zarochentsev 	}
981d7ea732SAlexander Zarochentsev }
991d7ea732SAlexander Zarochentsev EXPORT_SYMBOL(put_pages_list);
1001d7ea732SAlexander Zarochentsev 
1011da177e4SLinus Torvalds /*
102902aaed0SHisashi Hifumi  * pagevec_move_tail() must be called with IRQ disabled.
103902aaed0SHisashi Hifumi  * Otherwise this may cause nasty races.
104902aaed0SHisashi Hifumi  */
105902aaed0SHisashi Hifumi static void pagevec_move_tail(struct pagevec *pvec)
106902aaed0SHisashi Hifumi {
107902aaed0SHisashi Hifumi 	int i;
108902aaed0SHisashi Hifumi 	int pgmoved = 0;
109902aaed0SHisashi Hifumi 	struct zone *zone = NULL;
110902aaed0SHisashi Hifumi 
111902aaed0SHisashi Hifumi 	for (i = 0; i < pagevec_count(pvec); i++) {
112902aaed0SHisashi Hifumi 		struct page *page = pvec->pages[i];
113902aaed0SHisashi Hifumi 		struct zone *pagezone = page_zone(page);
114902aaed0SHisashi Hifumi 
115902aaed0SHisashi Hifumi 		if (pagezone != zone) {
116902aaed0SHisashi Hifumi 			if (zone)
117902aaed0SHisashi Hifumi 				spin_unlock(&zone->lru_lock);
118902aaed0SHisashi Hifumi 			zone = pagezone;
119902aaed0SHisashi Hifumi 			spin_lock(&zone->lru_lock);
120902aaed0SHisashi Hifumi 		}
121894bc310SLee Schermerhorn 		if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
122401a8e1cSJohannes Weiner 			int lru = page_lru_base_type(page);
1234f98a2feSRik van Riel 			list_move_tail(&page->lru, &zone->lru[lru].list);
124902aaed0SHisashi Hifumi 			pgmoved++;
125902aaed0SHisashi Hifumi 		}
126902aaed0SHisashi Hifumi 	}
127902aaed0SHisashi Hifumi 	if (zone)
128902aaed0SHisashi Hifumi 		spin_unlock(&zone->lru_lock);
129902aaed0SHisashi Hifumi 	__count_vm_events(PGROTATED, pgmoved);
130902aaed0SHisashi Hifumi 	release_pages(pvec->pages, pvec->nr, pvec->cold);
131902aaed0SHisashi Hifumi 	pagevec_reinit(pvec);
132902aaed0SHisashi Hifumi }
133902aaed0SHisashi Hifumi 
134902aaed0SHisashi Hifumi /*
1351da177e4SLinus Torvalds  * Writeback is about to end against a page which has been marked for immediate
1361da177e4SLinus Torvalds  * reclaim.  If it still appears to be reclaimable, move it to the tail of the
137902aaed0SHisashi Hifumi  * inactive list.
1381da177e4SLinus Torvalds  */
139ac6aadb2SMiklos Szeredi void  rotate_reclaimable_page(struct page *page)
1401da177e4SLinus Torvalds {
141ac6aadb2SMiklos Szeredi 	if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) &&
142894bc310SLee Schermerhorn 	    !PageUnevictable(page) && PageLRU(page)) {
143902aaed0SHisashi Hifumi 		struct pagevec *pvec;
1441da177e4SLinus Torvalds 		unsigned long flags;
1451da177e4SLinus Torvalds 
146902aaed0SHisashi Hifumi 		page_cache_get(page);
147902aaed0SHisashi Hifumi 		local_irq_save(flags);
148902aaed0SHisashi Hifumi 		pvec = &__get_cpu_var(lru_rotate_pvecs);
149902aaed0SHisashi Hifumi 		if (!pagevec_add(pvec, page))
150902aaed0SHisashi Hifumi 			pagevec_move_tail(pvec);
151902aaed0SHisashi Hifumi 		local_irq_restore(flags);
152ac6aadb2SMiklos Szeredi 	}
1531da177e4SLinus Torvalds }
1541da177e4SLinus Torvalds 
1553e2f41f1SKOSAKI Motohiro static void update_page_reclaim_stat(struct zone *zone, struct page *page,
1563e2f41f1SKOSAKI Motohiro 				     int file, int rotated)
1573e2f41f1SKOSAKI Motohiro {
1583e2f41f1SKOSAKI Motohiro 	struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat;
1593e2f41f1SKOSAKI Motohiro 	struct zone_reclaim_stat *memcg_reclaim_stat;
1603e2f41f1SKOSAKI Motohiro 
1613e2f41f1SKOSAKI Motohiro 	memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page);
1623e2f41f1SKOSAKI Motohiro 
1633e2f41f1SKOSAKI Motohiro 	reclaim_stat->recent_scanned[file]++;
1643e2f41f1SKOSAKI Motohiro 	if (rotated)
1653e2f41f1SKOSAKI Motohiro 		reclaim_stat->recent_rotated[file]++;
1663e2f41f1SKOSAKI Motohiro 
1673e2f41f1SKOSAKI Motohiro 	if (!memcg_reclaim_stat)
1683e2f41f1SKOSAKI Motohiro 		return;
1693e2f41f1SKOSAKI Motohiro 
1703e2f41f1SKOSAKI Motohiro 	memcg_reclaim_stat->recent_scanned[file]++;
1713e2f41f1SKOSAKI Motohiro 	if (rotated)
1723e2f41f1SKOSAKI Motohiro 		memcg_reclaim_stat->recent_rotated[file]++;
1733e2f41f1SKOSAKI Motohiro }
1743e2f41f1SKOSAKI Motohiro 
1751da177e4SLinus Torvalds /*
1761da177e4SLinus Torvalds  * FIXME: speed this up?
1771da177e4SLinus Torvalds  */
178920c7a5dSHarvey Harrison void activate_page(struct page *page)
1791da177e4SLinus Torvalds {
1801da177e4SLinus Torvalds 	struct zone *zone = page_zone(page);
1811da177e4SLinus Torvalds 
1821da177e4SLinus Torvalds 	spin_lock_irq(&zone->lru_lock);
183894bc310SLee Schermerhorn 	if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
1844f98a2feSRik van Riel 		int file = page_is_file_cache(page);
185401a8e1cSJohannes Weiner 		int lru = page_lru_base_type(page);
1864f98a2feSRik van Riel 		del_page_from_lru_list(zone, page, lru);
1874f98a2feSRik van Riel 
1881da177e4SLinus Torvalds 		SetPageActive(page);
1894f98a2feSRik van Riel 		lru += LRU_ACTIVE;
1904f98a2feSRik van Riel 		add_page_to_lru_list(zone, page, lru);
191f8891e5eSChristoph Lameter 		__count_vm_event(PGACTIVATE);
1924f98a2feSRik van Riel 
1936c0b1351SJohannes Weiner 		update_page_reclaim_stat(zone, page, file, 1);
1941da177e4SLinus Torvalds 	}
1951da177e4SLinus Torvalds 	spin_unlock_irq(&zone->lru_lock);
1961da177e4SLinus Torvalds }
1971da177e4SLinus Torvalds 
1981da177e4SLinus Torvalds /*
1991da177e4SLinus Torvalds  * Mark a page as having seen activity.
2001da177e4SLinus Torvalds  *
2011da177e4SLinus Torvalds  * inactive,unreferenced	->	inactive,referenced
2021da177e4SLinus Torvalds  * inactive,referenced		->	active,unreferenced
2031da177e4SLinus Torvalds  * active,unreferenced		->	active,referenced
2041da177e4SLinus Torvalds  */
205920c7a5dSHarvey Harrison void mark_page_accessed(struct page *page)
2061da177e4SLinus Torvalds {
207894bc310SLee Schermerhorn 	if (!PageActive(page) && !PageUnevictable(page) &&
208894bc310SLee Schermerhorn 			PageReferenced(page) && PageLRU(page)) {
2091da177e4SLinus Torvalds 		activate_page(page);
2101da177e4SLinus Torvalds 		ClearPageReferenced(page);
2111da177e4SLinus Torvalds 	} else if (!PageReferenced(page)) {
2121da177e4SLinus Torvalds 		SetPageReferenced(page);
2131da177e4SLinus Torvalds 	}
2141da177e4SLinus Torvalds }
2151da177e4SLinus Torvalds 
2161da177e4SLinus Torvalds EXPORT_SYMBOL(mark_page_accessed);
2171da177e4SLinus Torvalds 
218f04e9ebbSKOSAKI Motohiro void __lru_cache_add(struct page *page, enum lru_list lru)
2191da177e4SLinus Torvalds {
220f04e9ebbSKOSAKI Motohiro 	struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru];
2211da177e4SLinus Torvalds 
2221da177e4SLinus Torvalds 	page_cache_get(page);
2231da177e4SLinus Torvalds 	if (!pagevec_add(pvec, page))
224f04e9ebbSKOSAKI Motohiro 		____pagevec_lru_add(pvec, lru);
2251da177e4SLinus Torvalds 	put_cpu_var(lru_add_pvecs);
2261da177e4SLinus Torvalds }
2271da177e4SLinus Torvalds 
228f04e9ebbSKOSAKI Motohiro /**
229f04e9ebbSKOSAKI Motohiro  * lru_cache_add_lru - add a page to a page list
230f04e9ebbSKOSAKI Motohiro  * @page: the page to be added to the LRU.
231f04e9ebbSKOSAKI Motohiro  * @lru: the LRU list to which the page is added.
232f04e9ebbSKOSAKI Motohiro  */
233f04e9ebbSKOSAKI Motohiro void lru_cache_add_lru(struct page *page, enum lru_list lru)
2341da177e4SLinus Torvalds {
235f04e9ebbSKOSAKI Motohiro 	if (PageActive(page)) {
236894bc310SLee Schermerhorn 		VM_BUG_ON(PageUnevictable(page));
237f04e9ebbSKOSAKI Motohiro 		ClearPageActive(page);
238894bc310SLee Schermerhorn 	} else if (PageUnevictable(page)) {
239894bc310SLee Schermerhorn 		VM_BUG_ON(PageActive(page));
240894bc310SLee Schermerhorn 		ClearPageUnevictable(page);
241f04e9ebbSKOSAKI Motohiro 	}
2421da177e4SLinus Torvalds 
243894bc310SLee Schermerhorn 	VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page));
244f04e9ebbSKOSAKI Motohiro 	__lru_cache_add(page, lru);
2451da177e4SLinus Torvalds }
2461da177e4SLinus Torvalds 
247894bc310SLee Schermerhorn /**
248894bc310SLee Schermerhorn  * add_page_to_unevictable_list - add a page to the unevictable list
249894bc310SLee Schermerhorn  * @page:  the page to be added to the unevictable list
250894bc310SLee Schermerhorn  *
251894bc310SLee Schermerhorn  * Add page directly to its zone's unevictable list.  To avoid races with
252894bc310SLee Schermerhorn  * tasks that might be making the page evictable, through eg. munlock,
253894bc310SLee Schermerhorn  * munmap or exit, while it's not on the lru, we want to add the page
254894bc310SLee Schermerhorn  * while it's locked or otherwise "invisible" to other tasks.  This is
255894bc310SLee Schermerhorn  * difficult to do when using the pagevec cache, so bypass that.
256894bc310SLee Schermerhorn  */
257894bc310SLee Schermerhorn void add_page_to_unevictable_list(struct page *page)
258894bc310SLee Schermerhorn {
259894bc310SLee Schermerhorn 	struct zone *zone = page_zone(page);
260894bc310SLee Schermerhorn 
261894bc310SLee Schermerhorn 	spin_lock_irq(&zone->lru_lock);
262894bc310SLee Schermerhorn 	SetPageUnevictable(page);
263894bc310SLee Schermerhorn 	SetPageLRU(page);
264894bc310SLee Schermerhorn 	add_page_to_lru_list(zone, page, LRU_UNEVICTABLE);
265894bc310SLee Schermerhorn 	spin_unlock_irq(&zone->lru_lock);
266894bc310SLee Schermerhorn }
267894bc310SLee Schermerhorn 
268902aaed0SHisashi Hifumi /*
269902aaed0SHisashi Hifumi  * Drain pages out of the cpu's pagevecs.
270902aaed0SHisashi Hifumi  * Either "cpu" is the current CPU, and preemption has already been
271902aaed0SHisashi Hifumi  * disabled; or "cpu" is being hot-unplugged, and is already dead.
272902aaed0SHisashi Hifumi  */
273902aaed0SHisashi Hifumi static void drain_cpu_pagevecs(int cpu)
2741da177e4SLinus Torvalds {
275f04e9ebbSKOSAKI Motohiro 	struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu);
276902aaed0SHisashi Hifumi 	struct pagevec *pvec;
277f04e9ebbSKOSAKI Motohiro 	int lru;
2781da177e4SLinus Torvalds 
279f04e9ebbSKOSAKI Motohiro 	for_each_lru(lru) {
280f04e9ebbSKOSAKI Motohiro 		pvec = &pvecs[lru - LRU_BASE];
2811da177e4SLinus Torvalds 		if (pagevec_count(pvec))
282f04e9ebbSKOSAKI Motohiro 			____pagevec_lru_add(pvec, lru);
283f04e9ebbSKOSAKI Motohiro 	}
284902aaed0SHisashi Hifumi 
285902aaed0SHisashi Hifumi 	pvec = &per_cpu(lru_rotate_pvecs, cpu);
286902aaed0SHisashi Hifumi 	if (pagevec_count(pvec)) {
287902aaed0SHisashi Hifumi 		unsigned long flags;
288902aaed0SHisashi Hifumi 
289902aaed0SHisashi Hifumi 		/* No harm done if a racing interrupt already did this */
290902aaed0SHisashi Hifumi 		local_irq_save(flags);
291902aaed0SHisashi Hifumi 		pagevec_move_tail(pvec);
292902aaed0SHisashi Hifumi 		local_irq_restore(flags);
293902aaed0SHisashi Hifumi 	}
29480bfed90SAndrew Morton }
29580bfed90SAndrew Morton 
29680bfed90SAndrew Morton void lru_add_drain(void)
29780bfed90SAndrew Morton {
298902aaed0SHisashi Hifumi 	drain_cpu_pagevecs(get_cpu());
29980bfed90SAndrew Morton 	put_cpu();
3001da177e4SLinus Torvalds }
3011da177e4SLinus Torvalds 
302c4028958SDavid Howells static void lru_add_drain_per_cpu(struct work_struct *dummy)
303053837fcSNick Piggin {
304053837fcSNick Piggin 	lru_add_drain();
305053837fcSNick Piggin }
306053837fcSNick Piggin 
307053837fcSNick Piggin /*
308053837fcSNick Piggin  * Returns 0 for success
309053837fcSNick Piggin  */
310053837fcSNick Piggin int lru_add_drain_all(void)
311053837fcSNick Piggin {
312c4028958SDavid Howells 	return schedule_on_each_cpu(lru_add_drain_per_cpu);
313053837fcSNick Piggin }
314053837fcSNick Piggin 
3151da177e4SLinus Torvalds /*
3161da177e4SLinus Torvalds  * Batched page_cache_release().  Decrement the reference count on all the
3171da177e4SLinus Torvalds  * passed pages.  If it fell to zero then remove the page from the LRU and
3181da177e4SLinus Torvalds  * free it.
3191da177e4SLinus Torvalds  *
3201da177e4SLinus Torvalds  * Avoid taking zone->lru_lock if possible, but if it is taken, retain it
3211da177e4SLinus Torvalds  * for the remainder of the operation.
3221da177e4SLinus Torvalds  *
323ab33dc09SFernando Luis Vazquez Cao  * The locking in this function is against shrink_inactive_list(): we recheck
324ab33dc09SFernando Luis Vazquez Cao  * the page count inside the lock to see whether shrink_inactive_list()
325ab33dc09SFernando Luis Vazquez Cao  * grabbed the page via the LRU.  If it did, give up: shrink_inactive_list()
326ab33dc09SFernando Luis Vazquez Cao  * will free it.
3271da177e4SLinus Torvalds  */
3281da177e4SLinus Torvalds void release_pages(struct page **pages, int nr, int cold)
3291da177e4SLinus Torvalds {
3301da177e4SLinus Torvalds 	int i;
3311da177e4SLinus Torvalds 	struct pagevec pages_to_free;
3321da177e4SLinus Torvalds 	struct zone *zone = NULL;
333902aaed0SHisashi Hifumi 	unsigned long uninitialized_var(flags);
3341da177e4SLinus Torvalds 
3351da177e4SLinus Torvalds 	pagevec_init(&pages_to_free, cold);
3361da177e4SLinus Torvalds 	for (i = 0; i < nr; i++) {
3371da177e4SLinus Torvalds 		struct page *page = pages[i];
3381da177e4SLinus Torvalds 
3398519fb30SNick Piggin 		if (unlikely(PageCompound(page))) {
3408519fb30SNick Piggin 			if (zone) {
341902aaed0SHisashi Hifumi 				spin_unlock_irqrestore(&zone->lru_lock, flags);
3428519fb30SNick Piggin 				zone = NULL;
3438519fb30SNick Piggin 			}
3448519fb30SNick Piggin 			put_compound_page(page);
3458519fb30SNick Piggin 			continue;
3468519fb30SNick Piggin 		}
3478519fb30SNick Piggin 
348b5810039SNick Piggin 		if (!put_page_testzero(page))
3491da177e4SLinus Torvalds 			continue;
3501da177e4SLinus Torvalds 
35146453a6eSNick Piggin 		if (PageLRU(page)) {
35246453a6eSNick Piggin 			struct zone *pagezone = page_zone(page);
353894bc310SLee Schermerhorn 
3541da177e4SLinus Torvalds 			if (pagezone != zone) {
3551da177e4SLinus Torvalds 				if (zone)
356902aaed0SHisashi Hifumi 					spin_unlock_irqrestore(&zone->lru_lock,
357902aaed0SHisashi Hifumi 									flags);
3581da177e4SLinus Torvalds 				zone = pagezone;
359902aaed0SHisashi Hifumi 				spin_lock_irqsave(&zone->lru_lock, flags);
3601da177e4SLinus Torvalds 			}
361725d704eSNick Piggin 			VM_BUG_ON(!PageLRU(page));
36267453911SNick Piggin 			__ClearPageLRU(page);
3631da177e4SLinus Torvalds 			del_page_from_lru(zone, page);
36446453a6eSNick Piggin 		}
36546453a6eSNick Piggin 
3661da177e4SLinus Torvalds 		if (!pagevec_add(&pages_to_free, page)) {
36746453a6eSNick Piggin 			if (zone) {
368902aaed0SHisashi Hifumi 				spin_unlock_irqrestore(&zone->lru_lock, flags);
36946453a6eSNick Piggin 				zone = NULL;
37046453a6eSNick Piggin 			}
3711da177e4SLinus Torvalds 			__pagevec_free(&pages_to_free);
3721da177e4SLinus Torvalds 			pagevec_reinit(&pages_to_free);
3731da177e4SLinus Torvalds   		}
3741da177e4SLinus Torvalds 	}
3751da177e4SLinus Torvalds 	if (zone)
376902aaed0SHisashi Hifumi 		spin_unlock_irqrestore(&zone->lru_lock, flags);
3771da177e4SLinus Torvalds 
3781da177e4SLinus Torvalds 	pagevec_free(&pages_to_free);
3791da177e4SLinus Torvalds }
3801da177e4SLinus Torvalds 
3811da177e4SLinus Torvalds /*
3821da177e4SLinus Torvalds  * The pages which we're about to release may be in the deferred lru-addition
3831da177e4SLinus Torvalds  * queues.  That would prevent them from really being freed right now.  That's
3841da177e4SLinus Torvalds  * OK from a correctness point of view but is inefficient - those pages may be
3851da177e4SLinus Torvalds  * cache-warm and we want to give them back to the page allocator ASAP.
3861da177e4SLinus Torvalds  *
3871da177e4SLinus Torvalds  * So __pagevec_release() will drain those queues here.  __pagevec_lru_add()
3881da177e4SLinus Torvalds  * and __pagevec_lru_add_active() call release_pages() directly to avoid
3891da177e4SLinus Torvalds  * mutual recursion.
3901da177e4SLinus Torvalds  */
3911da177e4SLinus Torvalds void __pagevec_release(struct pagevec *pvec)
3921da177e4SLinus Torvalds {
3931da177e4SLinus Torvalds 	lru_add_drain();
3941da177e4SLinus Torvalds 	release_pages(pvec->pages, pagevec_count(pvec), pvec->cold);
3951da177e4SLinus Torvalds 	pagevec_reinit(pvec);
3961da177e4SLinus Torvalds }
3971da177e4SLinus Torvalds 
3987f285701SSteve French EXPORT_SYMBOL(__pagevec_release);
3997f285701SSteve French 
4001da177e4SLinus Torvalds /*
4011da177e4SLinus Torvalds  * Add the passed pages to the LRU, then drop the caller's refcount
4021da177e4SLinus Torvalds  * on them.  Reinitialises the caller's pagevec.
4031da177e4SLinus Torvalds  */
404f04e9ebbSKOSAKI Motohiro void ____pagevec_lru_add(struct pagevec *pvec, enum lru_list lru)
4051da177e4SLinus Torvalds {
4061da177e4SLinus Torvalds 	int i;
4071da177e4SLinus Torvalds 	struct zone *zone = NULL;
4086e901571SKOSAKI Motohiro 
409894bc310SLee Schermerhorn 	VM_BUG_ON(is_unevictable_lru(lru));
4101da177e4SLinus Torvalds 
4111da177e4SLinus Torvalds 	for (i = 0; i < pagevec_count(pvec); i++) {
4121da177e4SLinus Torvalds 		struct page *page = pvec->pages[i];
4131da177e4SLinus Torvalds 		struct zone *pagezone = page_zone(page);
4149ff473b9SRik van Riel 		int file;
4153e2f41f1SKOSAKI Motohiro 		int active;
4161da177e4SLinus Torvalds 
4171da177e4SLinus Torvalds 		if (pagezone != zone) {
4181da177e4SLinus Torvalds 			if (zone)
4191da177e4SLinus Torvalds 				spin_unlock_irq(&zone->lru_lock);
4201da177e4SLinus Torvalds 			zone = pagezone;
4211da177e4SLinus Torvalds 			spin_lock_irq(&zone->lru_lock);
4221da177e4SLinus Torvalds 		}
423894bc310SLee Schermerhorn 		VM_BUG_ON(PageActive(page));
424894bc310SLee Schermerhorn 		VM_BUG_ON(PageUnevictable(page));
425725d704eSNick Piggin 		VM_BUG_ON(PageLRU(page));
4268d438f96SNick Piggin 		SetPageLRU(page);
4273e2f41f1SKOSAKI Motohiro 		active = is_active_lru(lru);
4289ff473b9SRik van Riel 		file = is_file_lru(lru);
4293e2f41f1SKOSAKI Motohiro 		if (active)
4304c84cacfSNick Piggin 			SetPageActive(page);
4313e2f41f1SKOSAKI Motohiro 		update_page_reclaim_stat(zone, page, file, active);
432f04e9ebbSKOSAKI Motohiro 		add_page_to_lru_list(zone, page, lru);
4331da177e4SLinus Torvalds 	}
4341da177e4SLinus Torvalds 	if (zone)
4351da177e4SLinus Torvalds 		spin_unlock_irq(&zone->lru_lock);
4361da177e4SLinus Torvalds 	release_pages(pvec->pages, pvec->nr, pvec->cold);
4371da177e4SLinus Torvalds 	pagevec_reinit(pvec);
4381da177e4SLinus Torvalds }
4391da177e4SLinus Torvalds 
440f04e9ebbSKOSAKI Motohiro EXPORT_SYMBOL(____pagevec_lru_add);
441f04e9ebbSKOSAKI Motohiro 
4421da177e4SLinus Torvalds /*
4431da177e4SLinus Torvalds  * Try to drop buffers from the pages in a pagevec
4441da177e4SLinus Torvalds  */
4451da177e4SLinus Torvalds void pagevec_strip(struct pagevec *pvec)
4461da177e4SLinus Torvalds {
4471da177e4SLinus Torvalds 	int i;
4481da177e4SLinus Torvalds 
4491da177e4SLinus Torvalds 	for (i = 0; i < pagevec_count(pvec); i++) {
4501da177e4SLinus Torvalds 		struct page *page = pvec->pages[i];
4511da177e4SLinus Torvalds 
452266cf658SDavid Howells 		if (page_has_private(page) && trylock_page(page)) {
453266cf658SDavid Howells 			if (page_has_private(page))
4541da177e4SLinus Torvalds 				try_to_release_page(page, 0);
4551da177e4SLinus Torvalds 			unlock_page(page);
4561da177e4SLinus Torvalds 		}
4571da177e4SLinus Torvalds 	}
4581da177e4SLinus Torvalds }
4591da177e4SLinus Torvalds 
4601da177e4SLinus Torvalds /**
4611da177e4SLinus Torvalds  * pagevec_lookup - gang pagecache lookup
4621da177e4SLinus Torvalds  * @pvec:	Where the resulting pages are placed
4631da177e4SLinus Torvalds  * @mapping:	The address_space to search
4641da177e4SLinus Torvalds  * @start:	The starting page index
4651da177e4SLinus Torvalds  * @nr_pages:	The maximum number of pages
4661da177e4SLinus Torvalds  *
4671da177e4SLinus Torvalds  * pagevec_lookup() will search for and return a group of up to @nr_pages pages
4681da177e4SLinus Torvalds  * in the mapping.  The pages are placed in @pvec.  pagevec_lookup() takes a
4691da177e4SLinus Torvalds  * reference against the pages in @pvec.
4701da177e4SLinus Torvalds  *
4711da177e4SLinus Torvalds  * The search returns a group of mapping-contiguous pages with ascending
4721da177e4SLinus Torvalds  * indexes.  There may be holes in the indices due to not-present pages.
4731da177e4SLinus Torvalds  *
4741da177e4SLinus Torvalds  * pagevec_lookup() returns the number of pages which were found.
4751da177e4SLinus Torvalds  */
4761da177e4SLinus Torvalds unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping,
4771da177e4SLinus Torvalds 		pgoff_t start, unsigned nr_pages)
4781da177e4SLinus Torvalds {
4791da177e4SLinus Torvalds 	pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages);
4801da177e4SLinus Torvalds 	return pagevec_count(pvec);
4811da177e4SLinus Torvalds }
4821da177e4SLinus Torvalds 
48378539fdfSChristoph Hellwig EXPORT_SYMBOL(pagevec_lookup);
48478539fdfSChristoph Hellwig 
4851da177e4SLinus Torvalds unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping,
4861da177e4SLinus Torvalds 		pgoff_t *index, int tag, unsigned nr_pages)
4871da177e4SLinus Torvalds {
4881da177e4SLinus Torvalds 	pvec->nr = find_get_pages_tag(mapping, index, tag,
4891da177e4SLinus Torvalds 					nr_pages, pvec->pages);
4901da177e4SLinus Torvalds 	return pagevec_count(pvec);
4911da177e4SLinus Torvalds }
4921da177e4SLinus Torvalds 
4937f285701SSteve French EXPORT_SYMBOL(pagevec_lookup_tag);
4941da177e4SLinus Torvalds 
4951da177e4SLinus Torvalds /*
4961da177e4SLinus Torvalds  * Perform any setup for the swap system
4971da177e4SLinus Torvalds  */
4981da177e4SLinus Torvalds void __init swap_setup(void)
4991da177e4SLinus Torvalds {
5004481374cSJan Beulich 	unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT);
5011da177e4SLinus Torvalds 
502e0bf68ddSPeter Zijlstra #ifdef CONFIG_SWAP
503e0bf68ddSPeter Zijlstra 	bdi_init(swapper_space.backing_dev_info);
504e0bf68ddSPeter Zijlstra #endif
505e0bf68ddSPeter Zijlstra 
5061da177e4SLinus Torvalds 	/* Use a smaller cluster for small-memory machines */
5071da177e4SLinus Torvalds 	if (megs < 16)
5081da177e4SLinus Torvalds 		page_cluster = 2;
5091da177e4SLinus Torvalds 	else
5101da177e4SLinus Torvalds 		page_cluster = 3;
5111da177e4SLinus Torvalds 	/*
5121da177e4SLinus Torvalds 	 * Right now other parts of the system means that we
5131da177e4SLinus Torvalds 	 * _really_ don't want to cluster much more
5141da177e4SLinus Torvalds 	 */
5151da177e4SLinus Torvalds }
516