xref: /linux/mm/swap_state.c (revision aae466b0052e1888edd1d7f473d4310d64936196)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  *  linux/mm/swap_state.c
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
61da177e4SLinus Torvalds  *  Swap reorganised 29.12.95, Stephen Tweedie
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
91da177e4SLinus Torvalds  */
101da177e4SLinus Torvalds #include <linux/mm.h>
115a0e3ad6STejun Heo #include <linux/gfp.h>
121da177e4SLinus Torvalds #include <linux/kernel_stat.h>
131da177e4SLinus Torvalds #include <linux/swap.h>
1446017e95SHugh Dickins #include <linux/swapops.h>
151da177e4SLinus Torvalds #include <linux/init.h>
161da177e4SLinus Torvalds #include <linux/pagemap.h>
171da177e4SLinus Torvalds #include <linux/backing-dev.h>
183fb5c298SChristian Ehrhardt #include <linux/blkdev.h>
19c484d410SHugh Dickins #include <linux/pagevec.h>
20b20a3503SChristoph Lameter #include <linux/migrate.h>
214b3ef9daSHuang, Ying #include <linux/vmalloc.h>
2267afa38eSTim Chen #include <linux/swap_slots.h>
2338d8b4e6SHuang Ying #include <linux/huge_mm.h>
24243bce09SHugh Dickins #include "internal.h"
251da177e4SLinus Torvalds 
261da177e4SLinus Torvalds /*
271da177e4SLinus Torvalds  * swapper_space is a fiction, retained to simplify the path through
287eaceaccSJens Axboe  * vmscan's shrink_page_list.
291da177e4SLinus Torvalds  */
30f5e54d6eSChristoph Hellwig static const struct address_space_operations swap_aops = {
311da177e4SLinus Torvalds 	.writepage	= swap_writepage,
3262c230bcSMel Gorman 	.set_page_dirty	= swap_set_page_dirty,
331c93923cSAndrew Morton #ifdef CONFIG_MIGRATION
34e965f963SChristoph Lameter 	.migratepage	= migrate_page,
351c93923cSAndrew Morton #endif
361da177e4SLinus Torvalds };
371da177e4SLinus Torvalds 
38783cb68eSChangbin Du struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
39783cb68eSChangbin Du static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
40f5c754d6SColin Ian King static bool enable_vma_readahead __read_mostly = true;
41ec560175SHuang Ying 
42ec560175SHuang Ying #define SWAP_RA_WIN_SHIFT	(PAGE_SHIFT / 2)
43ec560175SHuang Ying #define SWAP_RA_HITS_MASK	((1UL << SWAP_RA_WIN_SHIFT) - 1)
44ec560175SHuang Ying #define SWAP_RA_HITS_MAX	SWAP_RA_HITS_MASK
45ec560175SHuang Ying #define SWAP_RA_WIN_MASK	(~PAGE_MASK & ~SWAP_RA_HITS_MASK)
46ec560175SHuang Ying 
47ec560175SHuang Ying #define SWAP_RA_HITS(v)		((v) & SWAP_RA_HITS_MASK)
48ec560175SHuang Ying #define SWAP_RA_WIN(v)		(((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
49ec560175SHuang Ying #define SWAP_RA_ADDR(v)		((v) & PAGE_MASK)
50ec560175SHuang Ying 
51ec560175SHuang Ying #define SWAP_RA_VAL(addr, win, hits)				\
52ec560175SHuang Ying 	(((addr) & PAGE_MASK) |					\
53ec560175SHuang Ying 	 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) |	\
54ec560175SHuang Ying 	 ((hits) & SWAP_RA_HITS_MASK))
55ec560175SHuang Ying 
56ec560175SHuang Ying /* Initial readahead hits is 4 to start up with a small window */
57ec560175SHuang Ying #define GET_SWAP_RA_VAL(vma)					\
58ec560175SHuang Ying 	(atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
591da177e4SLinus Torvalds 
601da177e4SLinus Torvalds #define INC_CACHE_INFO(x)	do { swap_cache_info.x++; } while (0)
6138d8b4e6SHuang Ying #define ADD_CACHE_INFO(x, nr)	do { swap_cache_info.x += (nr); } while (0)
621da177e4SLinus Torvalds 
631da177e4SLinus Torvalds static struct {
641da177e4SLinus Torvalds 	unsigned long add_total;
651da177e4SLinus Torvalds 	unsigned long del_total;
661da177e4SLinus Torvalds 	unsigned long find_success;
671da177e4SLinus Torvalds 	unsigned long find_total;
681da177e4SLinus Torvalds } swap_cache_info;
691da177e4SLinus Torvalds 
7033806f06SShaohua Li unsigned long total_swapcache_pages(void)
7133806f06SShaohua Li {
724b3ef9daSHuang, Ying 	unsigned int i, j, nr;
7333806f06SShaohua Li 	unsigned long ret = 0;
744b3ef9daSHuang, Ying 	struct address_space *spaces;
75054f1d1fSHuang Ying 	struct swap_info_struct *si;
7633806f06SShaohua Li 
774b3ef9daSHuang, Ying 	for (i = 0; i < MAX_SWAPFILES; i++) {
78054f1d1fSHuang Ying 		swp_entry_t entry = swp_entry(i, 1);
79054f1d1fSHuang Ying 
80054f1d1fSHuang Ying 		/* Avoid get_swap_device() to warn for bad swap entry */
81054f1d1fSHuang Ying 		if (!swp_swap_info(entry))
824b3ef9daSHuang, Ying 			continue;
83054f1d1fSHuang Ying 		/* Prevent swapoff to free swapper_spaces */
84054f1d1fSHuang Ying 		si = get_swap_device(entry);
85054f1d1fSHuang Ying 		if (!si)
86054f1d1fSHuang Ying 			continue;
87054f1d1fSHuang Ying 		nr = nr_swapper_spaces[i];
88054f1d1fSHuang Ying 		spaces = swapper_spaces[i];
894b3ef9daSHuang, Ying 		for (j = 0; j < nr; j++)
904b3ef9daSHuang, Ying 			ret += spaces[j].nrpages;
91054f1d1fSHuang Ying 		put_swap_device(si);
924b3ef9daSHuang, Ying 	}
9333806f06SShaohua Li 	return ret;
9433806f06SShaohua Li }
9533806f06SShaohua Li 
96579f8290SShaohua Li static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
97579f8290SShaohua Li 
981da177e4SLinus Torvalds void show_swap_cache_info(void)
991da177e4SLinus Torvalds {
10033806f06SShaohua Li 	printk("%lu pages in swap cache\n", total_swapcache_pages());
1012c97b7fcSJohannes Weiner 	printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
1021da177e4SLinus Torvalds 		swap_cache_info.add_total, swap_cache_info.del_total,
103bb63be0aSHugh Dickins 		swap_cache_info.find_success, swap_cache_info.find_total);
104ec8acf20SShaohua Li 	printk("Free swap  = %ldkB\n",
105ec8acf20SShaohua Li 		get_nr_swap_pages() << (PAGE_SHIFT - 10));
1061da177e4SLinus Torvalds 	printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
1071da177e4SLinus Torvalds }
1081da177e4SLinus Torvalds 
109*aae466b0SJoonsoo Kim void *get_shadow_from_swap_cache(swp_entry_t entry)
110*aae466b0SJoonsoo Kim {
111*aae466b0SJoonsoo Kim 	struct address_space *address_space = swap_address_space(entry);
112*aae466b0SJoonsoo Kim 	pgoff_t idx = swp_offset(entry);
113*aae466b0SJoonsoo Kim 	struct page *page;
114*aae466b0SJoonsoo Kim 
115*aae466b0SJoonsoo Kim 	page = find_get_entry(address_space, idx);
116*aae466b0SJoonsoo Kim 	if (xa_is_value(page))
117*aae466b0SJoonsoo Kim 		return page;
118*aae466b0SJoonsoo Kim 	if (page)
119*aae466b0SJoonsoo Kim 		put_page(page);
120*aae466b0SJoonsoo Kim 	return NULL;
121*aae466b0SJoonsoo Kim }
122*aae466b0SJoonsoo Kim 
1231da177e4SLinus Torvalds /*
1248d93b41cSMatthew Wilcox  * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
1251da177e4SLinus Torvalds  * but sets SwapCache flag and private instead of mapping and index.
1261da177e4SLinus Torvalds  */
1273852f676SJoonsoo Kim int add_to_swap_cache(struct page *page, swp_entry_t entry,
1283852f676SJoonsoo Kim 			gfp_t gfp, void **shadowp)
1291da177e4SLinus Torvalds {
1308d93b41cSMatthew Wilcox 	struct address_space *address_space = swap_address_space(entry);
13138d8b4e6SHuang Ying 	pgoff_t idx = swp_offset(entry);
1328d93b41cSMatthew Wilcox 	XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page));
133cb774451SWei Yang 	unsigned long i, nr = hpage_nr_pages(page);
1343852f676SJoonsoo Kim 	void *old;
1351da177e4SLinus Torvalds 
136309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageLocked(page), page);
137309381feSSasha Levin 	VM_BUG_ON_PAGE(PageSwapCache(page), page);
138309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
13951726b12SHugh Dickins 
14038d8b4e6SHuang Ying 	page_ref_add(page, nr);
1411da177e4SLinus Torvalds 	SetPageSwapCache(page);
142e286781dSNick Piggin 
1438d93b41cSMatthew Wilcox 	do {
1443852f676SJoonsoo Kim 		unsigned long nr_shadows = 0;
1453852f676SJoonsoo Kim 
1468d93b41cSMatthew Wilcox 		xas_lock_irq(&xas);
1478d93b41cSMatthew Wilcox 		xas_create_range(&xas);
1488d93b41cSMatthew Wilcox 		if (xas_error(&xas))
1498d93b41cSMatthew Wilcox 			goto unlock;
15038d8b4e6SHuang Ying 		for (i = 0; i < nr; i++) {
1518d93b41cSMatthew Wilcox 			VM_BUG_ON_PAGE(xas.xa_index != idx + i, page);
1523852f676SJoonsoo Kim 			old = xas_load(&xas);
1533852f676SJoonsoo Kim 			if (xa_is_value(old)) {
1543852f676SJoonsoo Kim 				nr_shadows++;
1553852f676SJoonsoo Kim 				if (shadowp)
1563852f676SJoonsoo Kim 					*shadowp = old;
1573852f676SJoonsoo Kim 			}
15838d8b4e6SHuang Ying 			set_page_private(page + i, entry.val + i);
1594101196bSMatthew Wilcox (Oracle) 			xas_store(&xas, page);
1608d93b41cSMatthew Wilcox 			xas_next(&xas);
1611da177e4SLinus Torvalds 		}
1623852f676SJoonsoo Kim 		address_space->nrexceptional -= nr_shadows;
16338d8b4e6SHuang Ying 		address_space->nrpages += nr;
16438d8b4e6SHuang Ying 		__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
16538d8b4e6SHuang Ying 		ADD_CACHE_INFO(add_total, nr);
1668d93b41cSMatthew Wilcox unlock:
1678d93b41cSMatthew Wilcox 		xas_unlock_irq(&xas);
1688d93b41cSMatthew Wilcox 	} while (xas_nomem(&xas, gfp));
1698d93b41cSMatthew Wilcox 
1708d93b41cSMatthew Wilcox 	if (!xas_error(&xas))
1718d93b41cSMatthew Wilcox 		return 0;
1728d93b41cSMatthew Wilcox 
17338d8b4e6SHuang Ying 	ClearPageSwapCache(page);
17438d8b4e6SHuang Ying 	page_ref_sub(page, nr);
1758d93b41cSMatthew Wilcox 	return xas_error(&xas);
1761da177e4SLinus Torvalds }
1771da177e4SLinus Torvalds 
1781da177e4SLinus Torvalds /*
1791da177e4SLinus Torvalds  * This must be called only on pages that have
1801da177e4SLinus Torvalds  * been verified to be in the swap cache.
1811da177e4SLinus Torvalds  */
1823852f676SJoonsoo Kim void __delete_from_swap_cache(struct page *page,
1833852f676SJoonsoo Kim 			swp_entry_t entry, void *shadow)
1841da177e4SLinus Torvalds {
1854e17ec25SMatthew Wilcox 	struct address_space *address_space = swap_address_space(entry);
18638d8b4e6SHuang Ying 	int i, nr = hpage_nr_pages(page);
1874e17ec25SMatthew Wilcox 	pgoff_t idx = swp_offset(entry);
1884e17ec25SMatthew Wilcox 	XA_STATE(xas, &address_space->i_pages, idx);
18933806f06SShaohua Li 
190309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageLocked(page), page);
191309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageSwapCache(page), page);
192309381feSSasha Levin 	VM_BUG_ON_PAGE(PageWriteback(page), page);
1931da177e4SLinus Torvalds 
19438d8b4e6SHuang Ying 	for (i = 0; i < nr; i++) {
1953852f676SJoonsoo Kim 		void *entry = xas_store(&xas, shadow);
1964101196bSMatthew Wilcox (Oracle) 		VM_BUG_ON_PAGE(entry != page, entry);
19738d8b4e6SHuang Ying 		set_page_private(page + i, 0);
1984e17ec25SMatthew Wilcox 		xas_next(&xas);
19938d8b4e6SHuang Ying 	}
2001da177e4SLinus Torvalds 	ClearPageSwapCache(page);
2013852f676SJoonsoo Kim 	if (shadow)
2023852f676SJoonsoo Kim 		address_space->nrexceptional += nr;
20338d8b4e6SHuang Ying 	address_space->nrpages -= nr;
20438d8b4e6SHuang Ying 	__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
20538d8b4e6SHuang Ying 	ADD_CACHE_INFO(del_total, nr);
2061da177e4SLinus Torvalds }
2071da177e4SLinus Torvalds 
2081da177e4SLinus Torvalds /**
2091da177e4SLinus Torvalds  * add_to_swap - allocate swap space for a page
2101da177e4SLinus Torvalds  * @page: page we want to move to swap
2111da177e4SLinus Torvalds  *
2121da177e4SLinus Torvalds  * Allocate swap space for the page and add the page to the
2131da177e4SLinus Torvalds  * swap cache.  Caller needs to hold the page lock.
2141da177e4SLinus Torvalds  */
2150f074658SMinchan Kim int add_to_swap(struct page *page)
2161da177e4SLinus Torvalds {
2171da177e4SLinus Torvalds 	swp_entry_t entry;
2181da177e4SLinus Torvalds 	int err;
2191da177e4SLinus Torvalds 
220309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageLocked(page), page);
221309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageUptodate(page), page);
2221da177e4SLinus Torvalds 
22338d8b4e6SHuang Ying 	entry = get_swap_page(page);
2241da177e4SLinus Torvalds 	if (!entry.val)
2250f074658SMinchan Kim 		return 0;
2260f074658SMinchan Kim 
227bd53b714SNick Piggin 	/*
2288d93b41cSMatthew Wilcox 	 * XArray node allocations from PF_MEMALLOC contexts could
229bd53b714SNick Piggin 	 * completely exhaust the page allocator. __GFP_NOMEMALLOC
230bd53b714SNick Piggin 	 * stops emergency reserves from being allocated.
2311da177e4SLinus Torvalds 	 *
232bd53b714SNick Piggin 	 * TODO: this could cause a theoretical memory reclaim
233bd53b714SNick Piggin 	 * deadlock in the swap out path.
2341da177e4SLinus Torvalds 	 */
2351da177e4SLinus Torvalds 	/*
236854e9ed0SMinchan Kim 	 * Add it to the swap cache.
2371da177e4SLinus Torvalds 	 */
238f000944dSHugh Dickins 	err = add_to_swap_cache(page, entry,
2393852f676SJoonsoo Kim 			__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
24038d8b4e6SHuang Ying 	if (err)
2412ca4532aSDaisuke Nishimura 		/*
2422ca4532aSDaisuke Nishimura 		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
2432ca4532aSDaisuke Nishimura 		 * clear SWAP_HAS_CACHE flag.
2442ca4532aSDaisuke Nishimura 		 */
2450f074658SMinchan Kim 		goto fail;
2469625456cSShaohua Li 	/*
2479625456cSShaohua Li 	 * Normally the page will be dirtied in unmap because its pte should be
2489625456cSShaohua Li 	 * dirty. A special case is MADV_FREE page. The page'e pte could have
2499625456cSShaohua Li 	 * dirty bit cleared but the page's SwapBacked bit is still set because
2509625456cSShaohua Li 	 * clearing the dirty bit and SwapBacked bit has no lock protected. For
2519625456cSShaohua Li 	 * such page, unmap will not set dirty bit for it, so page reclaim will
2529625456cSShaohua Li 	 * not write the page out. This can cause data corruption when the page
2539625456cSShaohua Li 	 * is swap in later. Always setting the dirty bit for the page solves
2549625456cSShaohua Li 	 * the problem.
2559625456cSShaohua Li 	 */
2569625456cSShaohua Li 	set_page_dirty(page);
2571da177e4SLinus Torvalds 
25838d8b4e6SHuang Ying 	return 1;
25938d8b4e6SHuang Ying 
26038d8b4e6SHuang Ying fail:
2610f074658SMinchan Kim 	put_swap_page(page, entry);
26238d8b4e6SHuang Ying 	return 0;
26338d8b4e6SHuang Ying }
26438d8b4e6SHuang Ying 
2651da177e4SLinus Torvalds /*
2661da177e4SLinus Torvalds  * This must be called only on pages that have
2671da177e4SLinus Torvalds  * been verified to be in the swap cache and locked.
2681da177e4SLinus Torvalds  * It will never put the page into the free list,
2691da177e4SLinus Torvalds  * the caller has a reference on the page.
2701da177e4SLinus Torvalds  */
2711da177e4SLinus Torvalds void delete_from_swap_cache(struct page *page)
2721da177e4SLinus Torvalds {
2734e17ec25SMatthew Wilcox 	swp_entry_t entry = { .val = page_private(page) };
2744e17ec25SMatthew Wilcox 	struct address_space *address_space = swap_address_space(entry);
2751da177e4SLinus Torvalds 
276b93b0163SMatthew Wilcox 	xa_lock_irq(&address_space->i_pages);
2773852f676SJoonsoo Kim 	__delete_from_swap_cache(page, entry, NULL);
278b93b0163SMatthew Wilcox 	xa_unlock_irq(&address_space->i_pages);
2791da177e4SLinus Torvalds 
28075f6d6d2SMinchan Kim 	put_swap_page(page, entry);
28138d8b4e6SHuang Ying 	page_ref_sub(page, hpage_nr_pages(page));
2821da177e4SLinus Torvalds }
2831da177e4SLinus Torvalds 
2843852f676SJoonsoo Kim void clear_shadow_from_swap_cache(int type, unsigned long begin,
2853852f676SJoonsoo Kim 				unsigned long end)
2863852f676SJoonsoo Kim {
2873852f676SJoonsoo Kim 	unsigned long curr = begin;
2883852f676SJoonsoo Kim 	void *old;
2893852f676SJoonsoo Kim 
2903852f676SJoonsoo Kim 	for (;;) {
2913852f676SJoonsoo Kim 		unsigned long nr_shadows = 0;
2923852f676SJoonsoo Kim 		swp_entry_t entry = swp_entry(type, curr);
2933852f676SJoonsoo Kim 		struct address_space *address_space = swap_address_space(entry);
2943852f676SJoonsoo Kim 		XA_STATE(xas, &address_space->i_pages, curr);
2953852f676SJoonsoo Kim 
2963852f676SJoonsoo Kim 		xa_lock_irq(&address_space->i_pages);
2973852f676SJoonsoo Kim 		xas_for_each(&xas, old, end) {
2983852f676SJoonsoo Kim 			if (!xa_is_value(old))
2993852f676SJoonsoo Kim 				continue;
3003852f676SJoonsoo Kim 			xas_store(&xas, NULL);
3013852f676SJoonsoo Kim 			nr_shadows++;
3023852f676SJoonsoo Kim 		}
3033852f676SJoonsoo Kim 		address_space->nrexceptional -= nr_shadows;
3043852f676SJoonsoo Kim 		xa_unlock_irq(&address_space->i_pages);
3053852f676SJoonsoo Kim 
3063852f676SJoonsoo Kim 		/* search the next swapcache until we meet end */
3073852f676SJoonsoo Kim 		curr >>= SWAP_ADDRESS_SPACE_SHIFT;
3083852f676SJoonsoo Kim 		curr++;
3093852f676SJoonsoo Kim 		curr <<= SWAP_ADDRESS_SPACE_SHIFT;
3103852f676SJoonsoo Kim 		if (curr > end)
3113852f676SJoonsoo Kim 			break;
3123852f676SJoonsoo Kim 	}
3133852f676SJoonsoo Kim }
3143852f676SJoonsoo Kim 
3151da177e4SLinus Torvalds /*
3161da177e4SLinus Torvalds  * If we are the only user, then try to free up the swap cache.
3171da177e4SLinus Torvalds  *
3181da177e4SLinus Torvalds  * Its ok to check for PageSwapCache without the page lock
3191da177e4SLinus Torvalds  * here because we are going to recheck again inside
320a2c43eedSHugh Dickins  * try_to_free_swap() _with_ the lock.
3211da177e4SLinus Torvalds  * 					- Marcelo
3221da177e4SLinus Torvalds  */
3231da177e4SLinus Torvalds static inline void free_swap_cache(struct page *page)
3241da177e4SLinus Torvalds {
325a2c43eedSHugh Dickins 	if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
326a2c43eedSHugh Dickins 		try_to_free_swap(page);
3271da177e4SLinus Torvalds 		unlock_page(page);
3281da177e4SLinus Torvalds 	}
3291da177e4SLinus Torvalds }
3301da177e4SLinus Torvalds 
3311da177e4SLinus Torvalds /*
3321da177e4SLinus Torvalds  * Perform a free_page(), also freeing any swap cache associated with
333b8072f09SHugh Dickins  * this page if it is the last user of the page.
3341da177e4SLinus Torvalds  */
3351da177e4SLinus Torvalds void free_page_and_swap_cache(struct page *page)
3361da177e4SLinus Torvalds {
3371da177e4SLinus Torvalds 	free_swap_cache(page);
3386fcb52a5SAaron Lu 	if (!is_huge_zero_page(page))
33909cbfeafSKirill A. Shutemov 		put_page(page);
3401da177e4SLinus Torvalds }
3411da177e4SLinus Torvalds 
3421da177e4SLinus Torvalds /*
3431da177e4SLinus Torvalds  * Passed an array of pages, drop them all from swapcache and then release
3441da177e4SLinus Torvalds  * them.  They are removed from the LRU and freed if this is their last use.
3451da177e4SLinus Torvalds  */
3461da177e4SLinus Torvalds void free_pages_and_swap_cache(struct page **pages, int nr)
3471da177e4SLinus Torvalds {
3481da177e4SLinus Torvalds 	struct page **pagep = pages;
3491da177e4SLinus Torvalds 	int i;
3501da177e4SLinus Torvalds 
351aabfb572SMichal Hocko 	lru_add_drain();
352aabfb572SMichal Hocko 	for (i = 0; i < nr; i++)
3531da177e4SLinus Torvalds 		free_swap_cache(pagep[i]);
354c6f92f9fSMel Gorman 	release_pages(pagep, nr);
3551da177e4SLinus Torvalds }
3561da177e4SLinus Torvalds 
357e9e9b7ecSMinchan Kim static inline bool swap_use_vma_readahead(void)
358e9e9b7ecSMinchan Kim {
359e9e9b7ecSMinchan Kim 	return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
360e9e9b7ecSMinchan Kim }
361e9e9b7ecSMinchan Kim 
3621da177e4SLinus Torvalds /*
3631da177e4SLinus Torvalds  * Lookup a swap entry in the swap cache. A found page will be returned
3641da177e4SLinus Torvalds  * unlocked and with its refcount incremented - we rely on the kernel
3651da177e4SLinus Torvalds  * lock getting page table operations atomic even if we drop the page
3661da177e4SLinus Torvalds  * lock before returning.
3671da177e4SLinus Torvalds  */
368ec560175SHuang Ying struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
369ec560175SHuang Ying 			       unsigned long addr)
3701da177e4SLinus Torvalds {
3711da177e4SLinus Torvalds 	struct page *page;
372eb085574SHuang Ying 	struct swap_info_struct *si;
3731da177e4SLinus Torvalds 
374eb085574SHuang Ying 	si = get_swap_device(entry);
375eb085574SHuang Ying 	if (!si)
376eb085574SHuang Ying 		return NULL;
377f6ab1f7fSHuang Ying 	page = find_get_page(swap_address_space(entry), swp_offset(entry));
378eb085574SHuang Ying 	put_swap_device(si);
3791da177e4SLinus Torvalds 
3801da177e4SLinus Torvalds 	INC_CACHE_INFO(find_total);
381ec560175SHuang Ying 	if (page) {
382eaf649ebSMinchan Kim 		bool vma_ra = swap_use_vma_readahead();
383eaf649ebSMinchan Kim 		bool readahead;
384eaf649ebSMinchan Kim 
385ec560175SHuang Ying 		INC_CACHE_INFO(find_success);
386eaf649ebSMinchan Kim 		/*
387eaf649ebSMinchan Kim 		 * At the moment, we don't support PG_readahead for anon THP
388eaf649ebSMinchan Kim 		 * so let's bail out rather than confusing the readahead stat.
389eaf649ebSMinchan Kim 		 */
390ec560175SHuang Ying 		if (unlikely(PageTransCompound(page)))
391ec560175SHuang Ying 			return page;
392eaf649ebSMinchan Kim 
393ec560175SHuang Ying 		readahead = TestClearPageReadahead(page);
394eaf649ebSMinchan Kim 		if (vma && vma_ra) {
395eaf649ebSMinchan Kim 			unsigned long ra_val;
396eaf649ebSMinchan Kim 			int win, hits;
397eaf649ebSMinchan Kim 
398eaf649ebSMinchan Kim 			ra_val = GET_SWAP_RA_VAL(vma);
399eaf649ebSMinchan Kim 			win = SWAP_RA_WIN(ra_val);
400eaf649ebSMinchan Kim 			hits = SWAP_RA_HITS(ra_val);
401ec560175SHuang Ying 			if (readahead)
402ec560175SHuang Ying 				hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
403ec560175SHuang Ying 			atomic_long_set(&vma->swap_readahead_info,
404ec560175SHuang Ying 					SWAP_RA_VAL(addr, win, hits));
405ec560175SHuang Ying 		}
406eaf649ebSMinchan Kim 
407ec560175SHuang Ying 		if (readahead) {
408ec560175SHuang Ying 			count_vm_event(SWAP_RA_HIT);
409eaf649ebSMinchan Kim 			if (!vma || !vma_ra)
410ec560175SHuang Ying 				atomic_inc(&swapin_readahead_hits);
411ec560175SHuang Ying 		}
412ec560175SHuang Ying 	}
413eaf649ebSMinchan Kim 
4141da177e4SLinus Torvalds 	return page;
4151da177e4SLinus Torvalds }
4161da177e4SLinus Torvalds 
4175b999aadSDmitry Safonov struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
4185b999aadSDmitry Safonov 			struct vm_area_struct *vma, unsigned long addr,
4195b999aadSDmitry Safonov 			bool *new_page_allocated)
4201da177e4SLinus Torvalds {
421eb085574SHuang Ying 	struct swap_info_struct *si;
4224c6355b2SJohannes Weiner 	struct page *page;
423*aae466b0SJoonsoo Kim 	void *shadow = NULL;
4244c6355b2SJohannes Weiner 
4255b999aadSDmitry Safonov 	*new_page_allocated = false;
4261da177e4SLinus Torvalds 
4274c6355b2SJohannes Weiner 	for (;;) {
4284c6355b2SJohannes Weiner 		int err;
4291da177e4SLinus Torvalds 		/*
4301da177e4SLinus Torvalds 		 * First check the swap cache.  Since this is normally
4311da177e4SLinus Torvalds 		 * called after lookup_swap_cache() failed, re-calling
4321da177e4SLinus Torvalds 		 * that would confuse statistics.
4331da177e4SLinus Torvalds 		 */
434eb085574SHuang Ying 		si = get_swap_device(entry);
435eb085574SHuang Ying 		if (!si)
4364c6355b2SJohannes Weiner 			return NULL;
4374c6355b2SJohannes Weiner 		page = find_get_page(swap_address_space(entry),
438eb085574SHuang Ying 				     swp_offset(entry));
439eb085574SHuang Ying 		put_swap_device(si);
4404c6355b2SJohannes Weiner 		if (page)
4414c6355b2SJohannes Weiner 			return page;
4421da177e4SLinus Torvalds 
443ba81f838SHuang Ying 		/*
444ba81f838SHuang Ying 		 * Just skip read ahead for unused swap slot.
445ba81f838SHuang Ying 		 * During swap_off when swap_slot_cache is disabled,
446ba81f838SHuang Ying 		 * we have to handle the race between putting
447ba81f838SHuang Ying 		 * swap entry in swap cache and marking swap slot
448ba81f838SHuang Ying 		 * as SWAP_HAS_CACHE.  That's done in later part of code or
449ba81f838SHuang Ying 		 * else swap_off will be aborted if we return NULL.
450ba81f838SHuang Ying 		 */
451ba81f838SHuang Ying 		if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
4524c6355b2SJohannes Weiner 			return NULL;
453e8c26ab6STim Chen 
4541da177e4SLinus Torvalds 		/*
4554c6355b2SJohannes Weiner 		 * Get a new page to read into from swap.  Allocate it now,
4564c6355b2SJohannes Weiner 		 * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
4574c6355b2SJohannes Weiner 		 * cause any racers to loop around until we add it to cache.
4581da177e4SLinus Torvalds 		 */
4594c6355b2SJohannes Weiner 		page = alloc_page_vma(gfp_mask, vma, addr);
4604c6355b2SJohannes Weiner 		if (!page)
4614c6355b2SJohannes Weiner 			return NULL;
4621da177e4SLinus Torvalds 
4631da177e4SLinus Torvalds 		/*
464f000944dSHugh Dickins 		 * Swap entry may have been freed since our caller observed it.
465f000944dSHugh Dickins 		 */
466355cfa73SKAMEZAWA Hiroyuki 		err = swapcache_prepare(entry);
4674c6355b2SJohannes Weiner 		if (!err)
468f000944dSHugh Dickins 			break;
469f000944dSHugh Dickins 
4704c6355b2SJohannes Weiner 		put_page(page);
4714c6355b2SJohannes Weiner 		if (err != -EEXIST)
4724c6355b2SJohannes Weiner 			return NULL;
4731da177e4SLinus Torvalds 
4744c6355b2SJohannes Weiner 		/*
4754c6355b2SJohannes Weiner 		 * We might race against __delete_from_swap_cache(), and
4764c6355b2SJohannes Weiner 		 * stumble across a swap_map entry whose SWAP_HAS_CACHE
4774c6355b2SJohannes Weiner 		 * has not yet been cleared.  Or race against another
4784c6355b2SJohannes Weiner 		 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
4794c6355b2SJohannes Weiner 		 * in swap_map, but not yet added its page to swap cache.
4804c6355b2SJohannes Weiner 		 */
4814c6355b2SJohannes Weiner 		cond_resched();
4824c6355b2SJohannes Weiner 	}
4834c6355b2SJohannes Weiner 
4844c6355b2SJohannes Weiner 	/*
4854c6355b2SJohannes Weiner 	 * The swap entry is ours to swap in. Prepare the new page.
4864c6355b2SJohannes Weiner 	 */
4874c6355b2SJohannes Weiner 
4884c6355b2SJohannes Weiner 	__SetPageLocked(page);
4894c6355b2SJohannes Weiner 	__SetPageSwapBacked(page);
4904c6355b2SJohannes Weiner 
4914c6355b2SJohannes Weiner 	/* May fail (-ENOMEM) if XArray node allocation failed. */
492*aae466b0SJoonsoo Kim 	if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) {
4934c6355b2SJohannes Weiner 		put_swap_page(page, entry);
4944c6355b2SJohannes Weiner 		goto fail_unlock;
4954c6355b2SJohannes Weiner 	}
4964c6355b2SJohannes Weiner 
497d9eb1ea2SJohannes Weiner 	if (mem_cgroup_charge(page, NULL, gfp_mask)) {
4984c6355b2SJohannes Weiner 		delete_from_swap_cache(page);
4994c6355b2SJohannes Weiner 		goto fail_unlock;
5004c6355b2SJohannes Weiner 	}
5014c6355b2SJohannes Weiner 
502*aae466b0SJoonsoo Kim 	if (shadow)
503*aae466b0SJoonsoo Kim 		workingset_refault(page, shadow);
504314b57fbSJohannes Weiner 
5054c6355b2SJohannes Weiner 	/* Caller will initiate read into locked page */
5064c6355b2SJohannes Weiner 	SetPageWorkingset(page);
5076058eaecSJohannes Weiner 	lru_cache_add(page);
5084c6355b2SJohannes Weiner 	*new_page_allocated = true;
5094c6355b2SJohannes Weiner 	return page;
5104c6355b2SJohannes Weiner 
5114c6355b2SJohannes Weiner fail_unlock:
5124c6355b2SJohannes Weiner 	unlock_page(page);
5134c6355b2SJohannes Weiner 	put_page(page);
5144c6355b2SJohannes Weiner 	return NULL;
5151da177e4SLinus Torvalds }
51646017e95SHugh Dickins 
5175b999aadSDmitry Safonov /*
5185b999aadSDmitry Safonov  * Locate a page of swap in physical memory, reserving swap cache space
5195b999aadSDmitry Safonov  * and reading the disk if it is not already cached.
5205b999aadSDmitry Safonov  * A failure return means that either the page allocation failed or that
5215b999aadSDmitry Safonov  * the swap entry is no longer in use.
5225b999aadSDmitry Safonov  */
5235b999aadSDmitry Safonov struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
52423955622SShaohua Li 		struct vm_area_struct *vma, unsigned long addr, bool do_poll)
5255b999aadSDmitry Safonov {
5265b999aadSDmitry Safonov 	bool page_was_allocated;
5275b999aadSDmitry Safonov 	struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
5285b999aadSDmitry Safonov 			vma, addr, &page_was_allocated);
5295b999aadSDmitry Safonov 
5305b999aadSDmitry Safonov 	if (page_was_allocated)
53123955622SShaohua Li 		swap_readpage(retpage, do_poll);
5325b999aadSDmitry Safonov 
5335b999aadSDmitry Safonov 	return retpage;
5345b999aadSDmitry Safonov }
5355b999aadSDmitry Safonov 
536ec560175SHuang Ying static unsigned int __swapin_nr_pages(unsigned long prev_offset,
537ec560175SHuang Ying 				      unsigned long offset,
538ec560175SHuang Ying 				      int hits,
539ec560175SHuang Ying 				      int max_pages,
540ec560175SHuang Ying 				      int prev_win)
541579f8290SShaohua Li {
542ec560175SHuang Ying 	unsigned int pages, last_ra;
543579f8290SShaohua Li 
544579f8290SShaohua Li 	/*
545579f8290SShaohua Li 	 * This heuristic has been found to work well on both sequential and
546579f8290SShaohua Li 	 * random loads, swapping to hard disk or to SSD: please don't ask
547579f8290SShaohua Li 	 * what the "+ 2" means, it just happens to work well, that's all.
548579f8290SShaohua Li 	 */
549ec560175SHuang Ying 	pages = hits + 2;
550579f8290SShaohua Li 	if (pages == 2) {
551579f8290SShaohua Li 		/*
552579f8290SShaohua Li 		 * We can have no readahead hits to judge by: but must not get
553579f8290SShaohua Li 		 * stuck here forever, so check for an adjacent offset instead
554579f8290SShaohua Li 		 * (and don't even bother to check whether swap type is same).
555579f8290SShaohua Li 		 */
556579f8290SShaohua Li 		if (offset != prev_offset + 1 && offset != prev_offset - 1)
557579f8290SShaohua Li 			pages = 1;
558579f8290SShaohua Li 	} else {
559579f8290SShaohua Li 		unsigned int roundup = 4;
560579f8290SShaohua Li 		while (roundup < pages)
561579f8290SShaohua Li 			roundup <<= 1;
562579f8290SShaohua Li 		pages = roundup;
563579f8290SShaohua Li 	}
564579f8290SShaohua Li 
565579f8290SShaohua Li 	if (pages > max_pages)
566579f8290SShaohua Li 		pages = max_pages;
567579f8290SShaohua Li 
568579f8290SShaohua Li 	/* Don't shrink readahead too fast */
569ec560175SHuang Ying 	last_ra = prev_win / 2;
570579f8290SShaohua Li 	if (pages < last_ra)
571579f8290SShaohua Li 		pages = last_ra;
572ec560175SHuang Ying 
573ec560175SHuang Ying 	return pages;
574ec560175SHuang Ying }
575ec560175SHuang Ying 
576ec560175SHuang Ying static unsigned long swapin_nr_pages(unsigned long offset)
577ec560175SHuang Ying {
578ec560175SHuang Ying 	static unsigned long prev_offset;
579ec560175SHuang Ying 	unsigned int hits, pages, max_pages;
580ec560175SHuang Ying 	static atomic_t last_readahead_pages;
581ec560175SHuang Ying 
582ec560175SHuang Ying 	max_pages = 1 << READ_ONCE(page_cluster);
583ec560175SHuang Ying 	if (max_pages <= 1)
584ec560175SHuang Ying 		return 1;
585ec560175SHuang Ying 
586ec560175SHuang Ying 	hits = atomic_xchg(&swapin_readahead_hits, 0);
587d6c1f098SQian Cai 	pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
588d6c1f098SQian Cai 				  max_pages,
589ec560175SHuang Ying 				  atomic_read(&last_readahead_pages));
590ec560175SHuang Ying 	if (!hits)
591d6c1f098SQian Cai 		WRITE_ONCE(prev_offset, offset);
592579f8290SShaohua Li 	atomic_set(&last_readahead_pages, pages);
593579f8290SShaohua Li 
594579f8290SShaohua Li 	return pages;
595579f8290SShaohua Li }
596579f8290SShaohua Li 
59746017e95SHugh Dickins /**
598e9e9b7ecSMinchan Kim  * swap_cluster_readahead - swap in pages in hope we need them soon
59946017e95SHugh Dickins  * @entry: swap entry of this memory
6007682486bSRandy Dunlap  * @gfp_mask: memory allocation flags
601e9e9b7ecSMinchan Kim  * @vmf: fault information
60246017e95SHugh Dickins  *
60346017e95SHugh Dickins  * Returns the struct page for entry and addr, after queueing swapin.
60446017e95SHugh Dickins  *
60546017e95SHugh Dickins  * Primitive swap readahead code. We simply read an aligned block of
60646017e95SHugh Dickins  * (1 << page_cluster) entries in the swap area. This method is chosen
60746017e95SHugh Dickins  * because it doesn't cost us any seek time.  We also make sure to queue
60846017e95SHugh Dickins  * the 'original' request together with the readahead ones...
60946017e95SHugh Dickins  *
61046017e95SHugh Dickins  * This has been extended to use the NUMA policies from the mm triggering
61146017e95SHugh Dickins  * the readahead.
61246017e95SHugh Dickins  *
613c1e8d7c6SMichel Lespinasse  * Caller must hold read mmap_lock if vmf->vma is not NULL.
61446017e95SHugh Dickins  */
615e9e9b7ecSMinchan Kim struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
616e9e9b7ecSMinchan Kim 				struct vm_fault *vmf)
61746017e95SHugh Dickins {
61846017e95SHugh Dickins 	struct page *page;
619579f8290SShaohua Li 	unsigned long entry_offset = swp_offset(entry);
620579f8290SShaohua Li 	unsigned long offset = entry_offset;
62167f96aa2SRik van Riel 	unsigned long start_offset, end_offset;
622579f8290SShaohua Li 	unsigned long mask;
623e9a6effaSHuang Ying 	struct swap_info_struct *si = swp_swap_info(entry);
6243fb5c298SChristian Ehrhardt 	struct blk_plug plug;
625c4fa6309SHuang Ying 	bool do_poll = true, page_allocated;
626e9e9b7ecSMinchan Kim 	struct vm_area_struct *vma = vmf->vma;
627e9e9b7ecSMinchan Kim 	unsigned long addr = vmf->address;
62846017e95SHugh Dickins 
629579f8290SShaohua Li 	mask = swapin_nr_pages(offset) - 1;
630579f8290SShaohua Li 	if (!mask)
631579f8290SShaohua Li 		goto skip;
632579f8290SShaohua Li 
6338fd2e0b5SYang Shi 	/* Test swap type to make sure the dereference is safe */
6348fd2e0b5SYang Shi 	if (likely(si->flags & (SWP_BLKDEV | SWP_FS))) {
6358fd2e0b5SYang Shi 		struct inode *inode = si->swap_file->f_mapping->host;
6368fd2e0b5SYang Shi 		if (inode_read_congested(inode))
6378fd2e0b5SYang Shi 			goto skip;
6388fd2e0b5SYang Shi 	}
6398fd2e0b5SYang Shi 
64023955622SShaohua Li 	do_poll = false;
64167f96aa2SRik van Riel 	/* Read a page_cluster sized and aligned cluster around offset. */
64267f96aa2SRik van Riel 	start_offset = offset & ~mask;
64367f96aa2SRik van Riel 	end_offset = offset | mask;
64467f96aa2SRik van Riel 	if (!start_offset)	/* First page is swap header. */
64567f96aa2SRik van Riel 		start_offset++;
646e9a6effaSHuang Ying 	if (end_offset >= si->max)
647e9a6effaSHuang Ying 		end_offset = si->max - 1;
64867f96aa2SRik van Riel 
6493fb5c298SChristian Ehrhardt 	blk_start_plug(&plug);
65067f96aa2SRik van Riel 	for (offset = start_offset; offset <= end_offset ; offset++) {
65146017e95SHugh Dickins 		/* Ok, do the async read-ahead now */
652c4fa6309SHuang Ying 		page = __read_swap_cache_async(
653c4fa6309SHuang Ying 			swp_entry(swp_type(entry), offset),
654c4fa6309SHuang Ying 			gfp_mask, vma, addr, &page_allocated);
65546017e95SHugh Dickins 		if (!page)
65667f96aa2SRik van Riel 			continue;
657c4fa6309SHuang Ying 		if (page_allocated) {
658c4fa6309SHuang Ying 			swap_readpage(page, false);
659eaf649ebSMinchan Kim 			if (offset != entry_offset) {
660579f8290SShaohua Li 				SetPageReadahead(page);
661cbc65df2SHuang Ying 				count_vm_event(SWAP_RA);
662cbc65df2SHuang Ying 			}
663c4fa6309SHuang Ying 		}
66409cbfeafSKirill A. Shutemov 		put_page(page);
66546017e95SHugh Dickins 	}
6663fb5c298SChristian Ehrhardt 	blk_finish_plug(&plug);
6673fb5c298SChristian Ehrhardt 
66846017e95SHugh Dickins 	lru_add_drain();	/* Push any new pages onto the LRU now */
669579f8290SShaohua Li skip:
67023955622SShaohua Li 	return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll);
67146017e95SHugh Dickins }
6724b3ef9daSHuang, Ying 
6734b3ef9daSHuang, Ying int init_swap_address_space(unsigned int type, unsigned long nr_pages)
6744b3ef9daSHuang, Ying {
6754b3ef9daSHuang, Ying 	struct address_space *spaces, *space;
6764b3ef9daSHuang, Ying 	unsigned int i, nr;
6774b3ef9daSHuang, Ying 
6784b3ef9daSHuang, Ying 	nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
679778e1cddSKees Cook 	spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
6804b3ef9daSHuang, Ying 	if (!spaces)
6814b3ef9daSHuang, Ying 		return -ENOMEM;
6824b3ef9daSHuang, Ying 	for (i = 0; i < nr; i++) {
6834b3ef9daSHuang, Ying 		space = spaces + i;
684a2833486SMatthew Wilcox 		xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ);
6854b3ef9daSHuang, Ying 		atomic_set(&space->i_mmap_writable, 0);
6864b3ef9daSHuang, Ying 		space->a_ops = &swap_aops;
6874b3ef9daSHuang, Ying 		/* swap cache doesn't use writeback related tags */
6884b3ef9daSHuang, Ying 		mapping_set_no_writeback_tags(space);
6894b3ef9daSHuang, Ying 	}
6904b3ef9daSHuang, Ying 	nr_swapper_spaces[type] = nr;
691054f1d1fSHuang Ying 	swapper_spaces[type] = spaces;
6924b3ef9daSHuang, Ying 
6934b3ef9daSHuang, Ying 	return 0;
6944b3ef9daSHuang, Ying }
6954b3ef9daSHuang, Ying 
6964b3ef9daSHuang, Ying void exit_swap_address_space(unsigned int type)
6974b3ef9daSHuang, Ying {
698054f1d1fSHuang Ying 	kvfree(swapper_spaces[type]);
6994b3ef9daSHuang, Ying 	nr_swapper_spaces[type] = 0;
700054f1d1fSHuang Ying 	swapper_spaces[type] = NULL;
7014b3ef9daSHuang, Ying }
702ec560175SHuang Ying 
703ec560175SHuang Ying static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
704ec560175SHuang Ying 				     unsigned long faddr,
705ec560175SHuang Ying 				     unsigned long lpfn,
706ec560175SHuang Ying 				     unsigned long rpfn,
707ec560175SHuang Ying 				     unsigned long *start,
708ec560175SHuang Ying 				     unsigned long *end)
709ec560175SHuang Ying {
710ec560175SHuang Ying 	*start = max3(lpfn, PFN_DOWN(vma->vm_start),
711ec560175SHuang Ying 		      PFN_DOWN(faddr & PMD_MASK));
712ec560175SHuang Ying 	*end = min3(rpfn, PFN_DOWN(vma->vm_end),
713ec560175SHuang Ying 		    PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
714ec560175SHuang Ying }
715ec560175SHuang Ying 
716eaf649ebSMinchan Kim static void swap_ra_info(struct vm_fault *vmf,
717eaf649ebSMinchan Kim 			struct vma_swap_readahead *ra_info)
718ec560175SHuang Ying {
719ec560175SHuang Ying 	struct vm_area_struct *vma = vmf->vma;
720eaf649ebSMinchan Kim 	unsigned long ra_val;
721ec560175SHuang Ying 	swp_entry_t entry;
722ec560175SHuang Ying 	unsigned long faddr, pfn, fpfn;
723ec560175SHuang Ying 	unsigned long start, end;
724eaf649ebSMinchan Kim 	pte_t *pte, *orig_pte;
725ec560175SHuang Ying 	unsigned int max_win, hits, prev_win, win, left;
726ec560175SHuang Ying #ifndef CONFIG_64BIT
727ec560175SHuang Ying 	pte_t *tpte;
728ec560175SHuang Ying #endif
729ec560175SHuang Ying 
73061b63972SHuang Ying 	max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
73161b63972SHuang Ying 			     SWAP_RA_ORDER_CEILING);
73261b63972SHuang Ying 	if (max_win == 1) {
733eaf649ebSMinchan Kim 		ra_info->win = 1;
734eaf649ebSMinchan Kim 		return;
73561b63972SHuang Ying 	}
73661b63972SHuang Ying 
737ec560175SHuang Ying 	faddr = vmf->address;
738eaf649ebSMinchan Kim 	orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
739eaf649ebSMinchan Kim 	entry = pte_to_swp_entry(*pte);
740eaf649ebSMinchan Kim 	if ((unlikely(non_swap_entry(entry)))) {
741eaf649ebSMinchan Kim 		pte_unmap(orig_pte);
742eaf649ebSMinchan Kim 		return;
743eaf649ebSMinchan Kim 	}
744ec560175SHuang Ying 
745ec560175SHuang Ying 	fpfn = PFN_DOWN(faddr);
746eaf649ebSMinchan Kim 	ra_val = GET_SWAP_RA_VAL(vma);
747eaf649ebSMinchan Kim 	pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
748eaf649ebSMinchan Kim 	prev_win = SWAP_RA_WIN(ra_val);
749eaf649ebSMinchan Kim 	hits = SWAP_RA_HITS(ra_val);
750eaf649ebSMinchan Kim 	ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
751ec560175SHuang Ying 					       max_win, prev_win);
752ec560175SHuang Ying 	atomic_long_set(&vma->swap_readahead_info,
753ec560175SHuang Ying 			SWAP_RA_VAL(faddr, win, 0));
754ec560175SHuang Ying 
755eaf649ebSMinchan Kim 	if (win == 1) {
756eaf649ebSMinchan Kim 		pte_unmap(orig_pte);
757eaf649ebSMinchan Kim 		return;
758eaf649ebSMinchan Kim 	}
759ec560175SHuang Ying 
760ec560175SHuang Ying 	/* Copy the PTEs because the page table may be unmapped */
761ec560175SHuang Ying 	if (fpfn == pfn + 1)
762ec560175SHuang Ying 		swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end);
763ec560175SHuang Ying 	else if (pfn == fpfn + 1)
764ec560175SHuang Ying 		swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1,
765ec560175SHuang Ying 				  &start, &end);
766ec560175SHuang Ying 	else {
767ec560175SHuang Ying 		left = (win - 1) / 2;
768ec560175SHuang Ying 		swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left,
769ec560175SHuang Ying 				  &start, &end);
770ec560175SHuang Ying 	}
771eaf649ebSMinchan Kim 	ra_info->nr_pte = end - start;
772eaf649ebSMinchan Kim 	ra_info->offset = fpfn - start;
773eaf649ebSMinchan Kim 	pte -= ra_info->offset;
774ec560175SHuang Ying #ifdef CONFIG_64BIT
775eaf649ebSMinchan Kim 	ra_info->ptes = pte;
776ec560175SHuang Ying #else
777eaf649ebSMinchan Kim 	tpte = ra_info->ptes;
778ec560175SHuang Ying 	for (pfn = start; pfn != end; pfn++)
779ec560175SHuang Ying 		*tpte++ = *pte++;
780ec560175SHuang Ying #endif
781eaf649ebSMinchan Kim 	pte_unmap(orig_pte);
782ec560175SHuang Ying }
783ec560175SHuang Ying 
784e9f59873SYang Shi /**
785e9f59873SYang Shi  * swap_vma_readahead - swap in pages in hope we need them soon
78627ec4878SKrzysztof Kozlowski  * @fentry: swap entry of this memory
787e9f59873SYang Shi  * @gfp_mask: memory allocation flags
788e9f59873SYang Shi  * @vmf: fault information
789e9f59873SYang Shi  *
790e9f59873SYang Shi  * Returns the struct page for entry and addr, after queueing swapin.
791e9f59873SYang Shi  *
792e9f59873SYang Shi  * Primitive swap readahead code. We simply read in a few pages whoes
793e9f59873SYang Shi  * virtual addresses are around the fault address in the same vma.
794e9f59873SYang Shi  *
795c1e8d7c6SMichel Lespinasse  * Caller must hold read mmap_lock if vmf->vma is not NULL.
796e9f59873SYang Shi  *
797e9f59873SYang Shi  */
798f5c754d6SColin Ian King static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
799eaf649ebSMinchan Kim 				       struct vm_fault *vmf)
800ec560175SHuang Ying {
801ec560175SHuang Ying 	struct blk_plug plug;
802ec560175SHuang Ying 	struct vm_area_struct *vma = vmf->vma;
803ec560175SHuang Ying 	struct page *page;
804ec560175SHuang Ying 	pte_t *pte, pentry;
805ec560175SHuang Ying 	swp_entry_t entry;
806ec560175SHuang Ying 	unsigned int i;
807ec560175SHuang Ying 	bool page_allocated;
808eaf649ebSMinchan Kim 	struct vma_swap_readahead ra_info = {0,};
809ec560175SHuang Ying 
810eaf649ebSMinchan Kim 	swap_ra_info(vmf, &ra_info);
811eaf649ebSMinchan Kim 	if (ra_info.win == 1)
812ec560175SHuang Ying 		goto skip;
813ec560175SHuang Ying 
814ec560175SHuang Ying 	blk_start_plug(&plug);
815eaf649ebSMinchan Kim 	for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
816ec560175SHuang Ying 	     i++, pte++) {
817ec560175SHuang Ying 		pentry = *pte;
818ec560175SHuang Ying 		if (pte_none(pentry))
819ec560175SHuang Ying 			continue;
820ec560175SHuang Ying 		if (pte_present(pentry))
821ec560175SHuang Ying 			continue;
822ec560175SHuang Ying 		entry = pte_to_swp_entry(pentry);
823ec560175SHuang Ying 		if (unlikely(non_swap_entry(entry)))
824ec560175SHuang Ying 			continue;
825ec560175SHuang Ying 		page = __read_swap_cache_async(entry, gfp_mask, vma,
826ec560175SHuang Ying 					       vmf->address, &page_allocated);
827ec560175SHuang Ying 		if (!page)
828ec560175SHuang Ying 			continue;
829ec560175SHuang Ying 		if (page_allocated) {
830ec560175SHuang Ying 			swap_readpage(page, false);
831eaf649ebSMinchan Kim 			if (i != ra_info.offset) {
832ec560175SHuang Ying 				SetPageReadahead(page);
833ec560175SHuang Ying 				count_vm_event(SWAP_RA);
834ec560175SHuang Ying 			}
835ec560175SHuang Ying 		}
836ec560175SHuang Ying 		put_page(page);
837ec560175SHuang Ying 	}
838ec560175SHuang Ying 	blk_finish_plug(&plug);
839ec560175SHuang Ying 	lru_add_drain();
840ec560175SHuang Ying skip:
841ec560175SHuang Ying 	return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
842eaf649ebSMinchan Kim 				     ra_info.win == 1);
843ec560175SHuang Ying }
844d9bfcfdcSHuang Ying 
845e9e9b7ecSMinchan Kim /**
846e9e9b7ecSMinchan Kim  * swapin_readahead - swap in pages in hope we need them soon
847e9e9b7ecSMinchan Kim  * @entry: swap entry of this memory
848e9e9b7ecSMinchan Kim  * @gfp_mask: memory allocation flags
849e9e9b7ecSMinchan Kim  * @vmf: fault information
850e9e9b7ecSMinchan Kim  *
851e9e9b7ecSMinchan Kim  * Returns the struct page for entry and addr, after queueing swapin.
852e9e9b7ecSMinchan Kim  *
853e9e9b7ecSMinchan Kim  * It's a main entry function for swap readahead. By the configuration,
854e9e9b7ecSMinchan Kim  * it will read ahead blocks by cluster-based(ie, physical disk based)
855e9e9b7ecSMinchan Kim  * or vma-based(ie, virtual address based on faulty address) readahead.
856e9e9b7ecSMinchan Kim  */
857e9e9b7ecSMinchan Kim struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
858e9e9b7ecSMinchan Kim 				struct vm_fault *vmf)
859e9e9b7ecSMinchan Kim {
860e9e9b7ecSMinchan Kim 	return swap_use_vma_readahead() ?
861e9e9b7ecSMinchan Kim 			swap_vma_readahead(entry, gfp_mask, vmf) :
862e9e9b7ecSMinchan Kim 			swap_cluster_readahead(entry, gfp_mask, vmf);
863e9e9b7ecSMinchan Kim }
864e9e9b7ecSMinchan Kim 
865d9bfcfdcSHuang Ying #ifdef CONFIG_SYSFS
866d9bfcfdcSHuang Ying static ssize_t vma_ra_enabled_show(struct kobject *kobj,
867d9bfcfdcSHuang Ying 				     struct kobj_attribute *attr, char *buf)
868d9bfcfdcSHuang Ying {
869e9e9b7ecSMinchan Kim 	return sprintf(buf, "%s\n", enable_vma_readahead ? "true" : "false");
870d9bfcfdcSHuang Ying }
871d9bfcfdcSHuang Ying static ssize_t vma_ra_enabled_store(struct kobject *kobj,
872d9bfcfdcSHuang Ying 				      struct kobj_attribute *attr,
873d9bfcfdcSHuang Ying 				      const char *buf, size_t count)
874d9bfcfdcSHuang Ying {
875d9bfcfdcSHuang Ying 	if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1))
876e9e9b7ecSMinchan Kim 		enable_vma_readahead = true;
877d9bfcfdcSHuang Ying 	else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1))
878e9e9b7ecSMinchan Kim 		enable_vma_readahead = false;
879d9bfcfdcSHuang Ying 	else
880d9bfcfdcSHuang Ying 		return -EINVAL;
881d9bfcfdcSHuang Ying 
882d9bfcfdcSHuang Ying 	return count;
883d9bfcfdcSHuang Ying }
884d9bfcfdcSHuang Ying static struct kobj_attribute vma_ra_enabled_attr =
885d9bfcfdcSHuang Ying 	__ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show,
886d9bfcfdcSHuang Ying 	       vma_ra_enabled_store);
887d9bfcfdcSHuang Ying 
888d9bfcfdcSHuang Ying static struct attribute *swap_attrs[] = {
889d9bfcfdcSHuang Ying 	&vma_ra_enabled_attr.attr,
890d9bfcfdcSHuang Ying 	NULL,
891d9bfcfdcSHuang Ying };
892d9bfcfdcSHuang Ying 
893d9bfcfdcSHuang Ying static struct attribute_group swap_attr_group = {
894d9bfcfdcSHuang Ying 	.attrs = swap_attrs,
895d9bfcfdcSHuang Ying };
896d9bfcfdcSHuang Ying 
897d9bfcfdcSHuang Ying static int __init swap_init_sysfs(void)
898d9bfcfdcSHuang Ying {
899d9bfcfdcSHuang Ying 	int err;
900d9bfcfdcSHuang Ying 	struct kobject *swap_kobj;
901d9bfcfdcSHuang Ying 
902d9bfcfdcSHuang Ying 	swap_kobj = kobject_create_and_add("swap", mm_kobj);
903d9bfcfdcSHuang Ying 	if (!swap_kobj) {
904d9bfcfdcSHuang Ying 		pr_err("failed to create swap kobject\n");
905d9bfcfdcSHuang Ying 		return -ENOMEM;
906d9bfcfdcSHuang Ying 	}
907d9bfcfdcSHuang Ying 	err = sysfs_create_group(swap_kobj, &swap_attr_group);
908d9bfcfdcSHuang Ying 	if (err) {
909d9bfcfdcSHuang Ying 		pr_err("failed to register swap group\n");
910d9bfcfdcSHuang Ying 		goto delete_obj;
911d9bfcfdcSHuang Ying 	}
912d9bfcfdcSHuang Ying 	return 0;
913d9bfcfdcSHuang Ying 
914d9bfcfdcSHuang Ying delete_obj:
915d9bfcfdcSHuang Ying 	kobject_put(swap_kobj);
916d9bfcfdcSHuang Ying 	return err;
917d9bfcfdcSHuang Ying }
918d9bfcfdcSHuang Ying subsys_initcall(swap_init_sysfs);
919d9bfcfdcSHuang Ying #endif
920