xref: /linux/mm/swap_state.c (revision 6106b93efad1ea14d428558c2e4efc5f76874c23)
1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0
21da177e4SLinus Torvalds /*
31da177e4SLinus Torvalds  *  linux/mm/swap_state.c
41da177e4SLinus Torvalds  *
51da177e4SLinus Torvalds  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
61da177e4SLinus Torvalds  *  Swap reorganised 29.12.95, Stephen Tweedie
71da177e4SLinus Torvalds  *
81da177e4SLinus Torvalds  *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
91da177e4SLinus Torvalds  */
101da177e4SLinus Torvalds #include <linux/mm.h>
115a0e3ad6STejun Heo #include <linux/gfp.h>
121da177e4SLinus Torvalds #include <linux/kernel_stat.h>
131da177e4SLinus Torvalds #include <linux/swap.h>
1446017e95SHugh Dickins #include <linux/swapops.h>
151da177e4SLinus Torvalds #include <linux/init.h>
161da177e4SLinus Torvalds #include <linux/pagemap.h>
171da177e4SLinus Torvalds #include <linux/backing-dev.h>
183fb5c298SChristian Ehrhardt #include <linux/blkdev.h>
19c484d410SHugh Dickins #include <linux/pagevec.h>
20b20a3503SChristoph Lameter #include <linux/migrate.h>
214b3ef9daSHuang, Ying #include <linux/vmalloc.h>
2267afa38eSTim Chen #include <linux/swap_slots.h>
2338d8b4e6SHuang Ying #include <linux/huge_mm.h>
2461ef1865SMatthew Wilcox (Oracle) #include <linux/shmem_fs.h>
25243bce09SHugh Dickins #include "internal.h"
26014bb1deSNeilBrown #include "swap.h"
271da177e4SLinus Torvalds 
281da177e4SLinus Torvalds /*
291da177e4SLinus Torvalds  * swapper_space is a fiction, retained to simplify the path through
307eaceaccSJens Axboe  * vmscan's shrink_page_list.
311da177e4SLinus Torvalds  */
32f5e54d6eSChristoph Hellwig static const struct address_space_operations swap_aops = {
331da177e4SLinus Torvalds 	.writepage	= swap_writepage,
344c4a7634SNeilBrown 	.dirty_folio	= noop_dirty_folio,
351c93923cSAndrew Morton #ifdef CONFIG_MIGRATION
36e965f963SChristoph Lameter 	.migratepage	= migrate_page,
371c93923cSAndrew Morton #endif
381da177e4SLinus Torvalds };
391da177e4SLinus Torvalds 
40783cb68eSChangbin Du struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
41783cb68eSChangbin Du static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
42f5c754d6SColin Ian King static bool enable_vma_readahead __read_mostly = true;
43ec560175SHuang Ying 
44ec560175SHuang Ying #define SWAP_RA_WIN_SHIFT	(PAGE_SHIFT / 2)
45ec560175SHuang Ying #define SWAP_RA_HITS_MASK	((1UL << SWAP_RA_WIN_SHIFT) - 1)
46ec560175SHuang Ying #define SWAP_RA_HITS_MAX	SWAP_RA_HITS_MASK
47ec560175SHuang Ying #define SWAP_RA_WIN_MASK	(~PAGE_MASK & ~SWAP_RA_HITS_MASK)
48ec560175SHuang Ying 
49ec560175SHuang Ying #define SWAP_RA_HITS(v)		((v) & SWAP_RA_HITS_MASK)
50ec560175SHuang Ying #define SWAP_RA_WIN(v)		(((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
51ec560175SHuang Ying #define SWAP_RA_ADDR(v)		((v) & PAGE_MASK)
52ec560175SHuang Ying 
53ec560175SHuang Ying #define SWAP_RA_VAL(addr, win, hits)				\
54ec560175SHuang Ying 	(((addr) & PAGE_MASK) |					\
55ec560175SHuang Ying 	 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) |	\
56ec560175SHuang Ying 	 ((hits) & SWAP_RA_HITS_MASK))
57ec560175SHuang Ying 
58ec560175SHuang Ying /* Initial readahead hits is 4 to start up with a small window */
59ec560175SHuang Ying #define GET_SWAP_RA_VAL(vma)					\
60ec560175SHuang Ying 	(atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
611da177e4SLinus Torvalds 
62b96a3db2SQian Cai #define INC_CACHE_INFO(x)	data_race(swap_cache_info.x++)
63b96a3db2SQian Cai #define ADD_CACHE_INFO(x, nr)	data_race(swap_cache_info.x += (nr))
641da177e4SLinus Torvalds 
651da177e4SLinus Torvalds static struct {
661da177e4SLinus Torvalds 	unsigned long add_total;
671da177e4SLinus Torvalds 	unsigned long del_total;
681da177e4SLinus Torvalds 	unsigned long find_success;
691da177e4SLinus Torvalds 	unsigned long find_total;
701da177e4SLinus Torvalds } swap_cache_info;
711da177e4SLinus Torvalds 
72579f8290SShaohua Li static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
73579f8290SShaohua Li 
741da177e4SLinus Torvalds void show_swap_cache_info(void)
751da177e4SLinus Torvalds {
7633806f06SShaohua Li 	printk("%lu pages in swap cache\n", total_swapcache_pages());
772c97b7fcSJohannes Weiner 	printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n",
781da177e4SLinus Torvalds 		swap_cache_info.add_total, swap_cache_info.del_total,
79bb63be0aSHugh Dickins 		swap_cache_info.find_success, swap_cache_info.find_total);
80ec8acf20SShaohua Li 	printk("Free swap  = %ldkB\n",
81ec8acf20SShaohua Li 		get_nr_swap_pages() << (PAGE_SHIFT - 10));
821da177e4SLinus Torvalds 	printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
831da177e4SLinus Torvalds }
841da177e4SLinus Torvalds 
85aae466b0SJoonsoo Kim void *get_shadow_from_swap_cache(swp_entry_t entry)
86aae466b0SJoonsoo Kim {
87aae466b0SJoonsoo Kim 	struct address_space *address_space = swap_address_space(entry);
88aae466b0SJoonsoo Kim 	pgoff_t idx = swp_offset(entry);
89aae466b0SJoonsoo Kim 	struct page *page;
90aae466b0SJoonsoo Kim 
918c647dd1SMatthew Wilcox (Oracle) 	page = xa_load(&address_space->i_pages, idx);
92aae466b0SJoonsoo Kim 	if (xa_is_value(page))
93aae466b0SJoonsoo Kim 		return page;
94aae466b0SJoonsoo Kim 	return NULL;
95aae466b0SJoonsoo Kim }
96aae466b0SJoonsoo Kim 
971da177e4SLinus Torvalds /*
988d93b41cSMatthew Wilcox  * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space,
991da177e4SLinus Torvalds  * but sets SwapCache flag and private instead of mapping and index.
1001da177e4SLinus Torvalds  */
1013852f676SJoonsoo Kim int add_to_swap_cache(struct page *page, swp_entry_t entry,
1023852f676SJoonsoo Kim 			gfp_t gfp, void **shadowp)
1031da177e4SLinus Torvalds {
1048d93b41cSMatthew Wilcox 	struct address_space *address_space = swap_address_space(entry);
10538d8b4e6SHuang Ying 	pgoff_t idx = swp_offset(entry);
1068d93b41cSMatthew Wilcox 	XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page));
1076c357848SMatthew Wilcox (Oracle) 	unsigned long i, nr = thp_nr_pages(page);
1083852f676SJoonsoo Kim 	void *old;
1091da177e4SLinus Torvalds 
110309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageLocked(page), page);
111309381feSSasha Levin 	VM_BUG_ON_PAGE(PageSwapCache(page), page);
112309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageSwapBacked(page), page);
11351726b12SHugh Dickins 
11438d8b4e6SHuang Ying 	page_ref_add(page, nr);
1151da177e4SLinus Torvalds 	SetPageSwapCache(page);
116e286781dSNick Piggin 
1178d93b41cSMatthew Wilcox 	do {
1188d93b41cSMatthew Wilcox 		xas_lock_irq(&xas);
1198d93b41cSMatthew Wilcox 		xas_create_range(&xas);
1208d93b41cSMatthew Wilcox 		if (xas_error(&xas))
1218d93b41cSMatthew Wilcox 			goto unlock;
12238d8b4e6SHuang Ying 		for (i = 0; i < nr; i++) {
1238d93b41cSMatthew Wilcox 			VM_BUG_ON_PAGE(xas.xa_index != idx + i, page);
1243852f676SJoonsoo Kim 			old = xas_load(&xas);
1253852f676SJoonsoo Kim 			if (xa_is_value(old)) {
1263852f676SJoonsoo Kim 				if (shadowp)
1273852f676SJoonsoo Kim 					*shadowp = old;
1283852f676SJoonsoo Kim 			}
12938d8b4e6SHuang Ying 			set_page_private(page + i, entry.val + i);
1304101196bSMatthew Wilcox (Oracle) 			xas_store(&xas, page);
1318d93b41cSMatthew Wilcox 			xas_next(&xas);
1321da177e4SLinus Torvalds 		}
13338d8b4e6SHuang Ying 		address_space->nrpages += nr;
13438d8b4e6SHuang Ying 		__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr);
135b6038942SShakeel Butt 		__mod_lruvec_page_state(page, NR_SWAPCACHE, nr);
13638d8b4e6SHuang Ying 		ADD_CACHE_INFO(add_total, nr);
1378d93b41cSMatthew Wilcox unlock:
1388d93b41cSMatthew Wilcox 		xas_unlock_irq(&xas);
1398d93b41cSMatthew Wilcox 	} while (xas_nomem(&xas, gfp));
1408d93b41cSMatthew Wilcox 
1418d93b41cSMatthew Wilcox 	if (!xas_error(&xas))
1428d93b41cSMatthew Wilcox 		return 0;
1438d93b41cSMatthew Wilcox 
14438d8b4e6SHuang Ying 	ClearPageSwapCache(page);
14538d8b4e6SHuang Ying 	page_ref_sub(page, nr);
1468d93b41cSMatthew Wilcox 	return xas_error(&xas);
1471da177e4SLinus Torvalds }
1481da177e4SLinus Torvalds 
1491da177e4SLinus Torvalds /*
1501da177e4SLinus Torvalds  * This must be called only on pages that have
1511da177e4SLinus Torvalds  * been verified to be in the swap cache.
1521da177e4SLinus Torvalds  */
1533852f676SJoonsoo Kim void __delete_from_swap_cache(struct page *page,
1543852f676SJoonsoo Kim 			swp_entry_t entry, void *shadow)
1551da177e4SLinus Torvalds {
1564e17ec25SMatthew Wilcox 	struct address_space *address_space = swap_address_space(entry);
1576c357848SMatthew Wilcox (Oracle) 	int i, nr = thp_nr_pages(page);
1584e17ec25SMatthew Wilcox 	pgoff_t idx = swp_offset(entry);
1594e17ec25SMatthew Wilcox 	XA_STATE(xas, &address_space->i_pages, idx);
16033806f06SShaohua Li 
161309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageLocked(page), page);
162309381feSSasha Levin 	VM_BUG_ON_PAGE(!PageSwapCache(page), page);
163309381feSSasha Levin 	VM_BUG_ON_PAGE(PageWriteback(page), page);
1641da177e4SLinus Torvalds 
16538d8b4e6SHuang Ying 	for (i = 0; i < nr; i++) {
1663852f676SJoonsoo Kim 		void *entry = xas_store(&xas, shadow);
1674101196bSMatthew Wilcox (Oracle) 		VM_BUG_ON_PAGE(entry != page, entry);
16838d8b4e6SHuang Ying 		set_page_private(page + i, 0);
1694e17ec25SMatthew Wilcox 		xas_next(&xas);
17038d8b4e6SHuang Ying 	}
1711da177e4SLinus Torvalds 	ClearPageSwapCache(page);
17238d8b4e6SHuang Ying 	address_space->nrpages -= nr;
17338d8b4e6SHuang Ying 	__mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr);
174b6038942SShakeel Butt 	__mod_lruvec_page_state(page, NR_SWAPCACHE, -nr);
17538d8b4e6SHuang Ying 	ADD_CACHE_INFO(del_total, nr);
1761da177e4SLinus Torvalds }
1771da177e4SLinus Torvalds 
1781da177e4SLinus Torvalds /**
17909c02e56SMatthew Wilcox (Oracle)  * add_to_swap - allocate swap space for a folio
18009c02e56SMatthew Wilcox (Oracle)  * @folio: folio we want to move to swap
1811da177e4SLinus Torvalds  *
18209c02e56SMatthew Wilcox (Oracle)  * Allocate swap space for the folio and add the folio to the
18309c02e56SMatthew Wilcox (Oracle)  * swap cache.
18409c02e56SMatthew Wilcox (Oracle)  *
18509c02e56SMatthew Wilcox (Oracle)  * Context: Caller needs to hold the folio lock.
18609c02e56SMatthew Wilcox (Oracle)  * Return: Whether the folio was added to the swap cache.
1871da177e4SLinus Torvalds  */
18809c02e56SMatthew Wilcox (Oracle) bool add_to_swap(struct folio *folio)
1891da177e4SLinus Torvalds {
1901da177e4SLinus Torvalds 	swp_entry_t entry;
1911da177e4SLinus Torvalds 	int err;
1921da177e4SLinus Torvalds 
19309c02e56SMatthew Wilcox (Oracle) 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
19409c02e56SMatthew Wilcox (Oracle) 	VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
1951da177e4SLinus Torvalds 
196e2e3fdc7SMatthew Wilcox (Oracle) 	entry = folio_alloc_swap(folio);
1971da177e4SLinus Torvalds 	if (!entry.val)
19809c02e56SMatthew Wilcox (Oracle) 		return false;
1990f074658SMinchan Kim 
200bd53b714SNick Piggin 	/*
2018d93b41cSMatthew Wilcox 	 * XArray node allocations from PF_MEMALLOC contexts could
202bd53b714SNick Piggin 	 * completely exhaust the page allocator. __GFP_NOMEMALLOC
203bd53b714SNick Piggin 	 * stops emergency reserves from being allocated.
2041da177e4SLinus Torvalds 	 *
205bd53b714SNick Piggin 	 * TODO: this could cause a theoretical memory reclaim
206bd53b714SNick Piggin 	 * deadlock in the swap out path.
2071da177e4SLinus Torvalds 	 */
2081da177e4SLinus Torvalds 	/*
209854e9ed0SMinchan Kim 	 * Add it to the swap cache.
2101da177e4SLinus Torvalds 	 */
21109c02e56SMatthew Wilcox (Oracle) 	err = add_to_swap_cache(&folio->page, entry,
2123852f676SJoonsoo Kim 			__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
21338d8b4e6SHuang Ying 	if (err)
2142ca4532aSDaisuke Nishimura 		/*
2152ca4532aSDaisuke Nishimura 		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
2162ca4532aSDaisuke Nishimura 		 * clear SWAP_HAS_CACHE flag.
2172ca4532aSDaisuke Nishimura 		 */
2180f074658SMinchan Kim 		goto fail;
2199625456cSShaohua Li 	/*
22009c02e56SMatthew Wilcox (Oracle) 	 * Normally the folio will be dirtied in unmap because its
22109c02e56SMatthew Wilcox (Oracle) 	 * pte should be dirty. A special case is MADV_FREE page. The
22209c02e56SMatthew Wilcox (Oracle) 	 * page's pte could have dirty bit cleared but the folio's
22309c02e56SMatthew Wilcox (Oracle) 	 * SwapBacked flag is still set because clearing the dirty bit
22409c02e56SMatthew Wilcox (Oracle) 	 * and SwapBacked flag has no lock protected. For such folio,
22509c02e56SMatthew Wilcox (Oracle) 	 * unmap will not set dirty bit for it, so folio reclaim will
22609c02e56SMatthew Wilcox (Oracle) 	 * not write the folio out. This can cause data corruption when
22709c02e56SMatthew Wilcox (Oracle) 	 * the folio is swapped in later. Always setting the dirty flag
22809c02e56SMatthew Wilcox (Oracle) 	 * for the folio solves the problem.
2299625456cSShaohua Li 	 */
23009c02e56SMatthew Wilcox (Oracle) 	folio_mark_dirty(folio);
2311da177e4SLinus Torvalds 
23209c02e56SMatthew Wilcox (Oracle) 	return true;
23338d8b4e6SHuang Ying 
23438d8b4e6SHuang Ying fail:
23509c02e56SMatthew Wilcox (Oracle) 	put_swap_page(&folio->page, entry);
23609c02e56SMatthew Wilcox (Oracle) 	return false;
23738d8b4e6SHuang Ying }
23838d8b4e6SHuang Ying 
2391da177e4SLinus Torvalds /*
2401da177e4SLinus Torvalds  * This must be called only on pages that have
2411da177e4SLinus Torvalds  * been verified to be in the swap cache and locked.
2421da177e4SLinus Torvalds  * It will never put the page into the free list,
2431da177e4SLinus Torvalds  * the caller has a reference on the page.
2441da177e4SLinus Torvalds  */
2451da177e4SLinus Torvalds void delete_from_swap_cache(struct page *page)
2461da177e4SLinus Torvalds {
2474e17ec25SMatthew Wilcox 	swp_entry_t entry = { .val = page_private(page) };
2484e17ec25SMatthew Wilcox 	struct address_space *address_space = swap_address_space(entry);
2491da177e4SLinus Torvalds 
250b93b0163SMatthew Wilcox 	xa_lock_irq(&address_space->i_pages);
2513852f676SJoonsoo Kim 	__delete_from_swap_cache(page, entry, NULL);
252b93b0163SMatthew Wilcox 	xa_unlock_irq(&address_space->i_pages);
2531da177e4SLinus Torvalds 
25475f6d6d2SMinchan Kim 	put_swap_page(page, entry);
2556c357848SMatthew Wilcox (Oracle) 	page_ref_sub(page, thp_nr_pages(page));
2561da177e4SLinus Torvalds }
2571da177e4SLinus Torvalds 
2583852f676SJoonsoo Kim void clear_shadow_from_swap_cache(int type, unsigned long begin,
2593852f676SJoonsoo Kim 				unsigned long end)
2603852f676SJoonsoo Kim {
2613852f676SJoonsoo Kim 	unsigned long curr = begin;
2623852f676SJoonsoo Kim 	void *old;
2633852f676SJoonsoo Kim 
2643852f676SJoonsoo Kim 	for (;;) {
2653852f676SJoonsoo Kim 		swp_entry_t entry = swp_entry(type, curr);
2663852f676SJoonsoo Kim 		struct address_space *address_space = swap_address_space(entry);
2673852f676SJoonsoo Kim 		XA_STATE(xas, &address_space->i_pages, curr);
2683852f676SJoonsoo Kim 
2693852f676SJoonsoo Kim 		xa_lock_irq(&address_space->i_pages);
2703852f676SJoonsoo Kim 		xas_for_each(&xas, old, end) {
2713852f676SJoonsoo Kim 			if (!xa_is_value(old))
2723852f676SJoonsoo Kim 				continue;
2733852f676SJoonsoo Kim 			xas_store(&xas, NULL);
2743852f676SJoonsoo Kim 		}
2753852f676SJoonsoo Kim 		xa_unlock_irq(&address_space->i_pages);
2763852f676SJoonsoo Kim 
2773852f676SJoonsoo Kim 		/* search the next swapcache until we meet end */
2783852f676SJoonsoo Kim 		curr >>= SWAP_ADDRESS_SPACE_SHIFT;
2793852f676SJoonsoo Kim 		curr++;
2803852f676SJoonsoo Kim 		curr <<= SWAP_ADDRESS_SPACE_SHIFT;
2813852f676SJoonsoo Kim 		if (curr > end)
2823852f676SJoonsoo Kim 			break;
2833852f676SJoonsoo Kim 	}
2843852f676SJoonsoo Kim }
2853852f676SJoonsoo Kim 
2861da177e4SLinus Torvalds /*
2871da177e4SLinus Torvalds  * If we are the only user, then try to free up the swap cache.
2881da177e4SLinus Torvalds  *
2891da177e4SLinus Torvalds  * Its ok to check for PageSwapCache without the page lock
2901da177e4SLinus Torvalds  * here because we are going to recheck again inside
291a2c43eedSHugh Dickins  * try_to_free_swap() _with_ the lock.
2921da177e4SLinus Torvalds  * 					- Marcelo
2931da177e4SLinus Torvalds  */
294f4c4a3f4SHuang Ying void free_swap_cache(struct page *page)
2951da177e4SLinus Torvalds {
296a2c43eedSHugh Dickins 	if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) {
297a2c43eedSHugh Dickins 		try_to_free_swap(page);
2981da177e4SLinus Torvalds 		unlock_page(page);
2991da177e4SLinus Torvalds 	}
3001da177e4SLinus Torvalds }
3011da177e4SLinus Torvalds 
3021da177e4SLinus Torvalds /*
3031da177e4SLinus Torvalds  * Perform a free_page(), also freeing any swap cache associated with
304b8072f09SHugh Dickins  * this page if it is the last user of the page.
3051da177e4SLinus Torvalds  */
3061da177e4SLinus Torvalds void free_page_and_swap_cache(struct page *page)
3071da177e4SLinus Torvalds {
3081da177e4SLinus Torvalds 	free_swap_cache(page);
3096fcb52a5SAaron Lu 	if (!is_huge_zero_page(page))
31009cbfeafSKirill A. Shutemov 		put_page(page);
3111da177e4SLinus Torvalds }
3121da177e4SLinus Torvalds 
3131da177e4SLinus Torvalds /*
3141da177e4SLinus Torvalds  * Passed an array of pages, drop them all from swapcache and then release
3151da177e4SLinus Torvalds  * them.  They are removed from the LRU and freed if this is their last use.
3161da177e4SLinus Torvalds  */
3171da177e4SLinus Torvalds void free_pages_and_swap_cache(struct page **pages, int nr)
3181da177e4SLinus Torvalds {
3191da177e4SLinus Torvalds 	struct page **pagep = pages;
3201da177e4SLinus Torvalds 	int i;
3211da177e4SLinus Torvalds 
322aabfb572SMichal Hocko 	lru_add_drain();
323aabfb572SMichal Hocko 	for (i = 0; i < nr; i++)
3241da177e4SLinus Torvalds 		free_swap_cache(pagep[i]);
325c6f92f9fSMel Gorman 	release_pages(pagep, nr);
3261da177e4SLinus Torvalds }
3271da177e4SLinus Torvalds 
328e9e9b7ecSMinchan Kim static inline bool swap_use_vma_readahead(void)
329e9e9b7ecSMinchan Kim {
330e9e9b7ecSMinchan Kim 	return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
331e9e9b7ecSMinchan Kim }
332e9e9b7ecSMinchan Kim 
3331da177e4SLinus Torvalds /*
3341da177e4SLinus Torvalds  * Lookup a swap entry in the swap cache. A found page will be returned
3351da177e4SLinus Torvalds  * unlocked and with its refcount incremented - we rely on the kernel
3361da177e4SLinus Torvalds  * lock getting page table operations atomic even if we drop the page
3371da177e4SLinus Torvalds  * lock before returning.
3381da177e4SLinus Torvalds  */
339ec560175SHuang Ying struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma,
340ec560175SHuang Ying 			       unsigned long addr)
3411da177e4SLinus Torvalds {
3421da177e4SLinus Torvalds 	struct page *page;
343eb085574SHuang Ying 	struct swap_info_struct *si;
3441da177e4SLinus Torvalds 
345eb085574SHuang Ying 	si = get_swap_device(entry);
346eb085574SHuang Ying 	if (!si)
347eb085574SHuang Ying 		return NULL;
348f6ab1f7fSHuang Ying 	page = find_get_page(swap_address_space(entry), swp_offset(entry));
349eb085574SHuang Ying 	put_swap_device(si);
3501da177e4SLinus Torvalds 
3511da177e4SLinus Torvalds 	INC_CACHE_INFO(find_total);
352ec560175SHuang Ying 	if (page) {
353eaf649ebSMinchan Kim 		bool vma_ra = swap_use_vma_readahead();
354eaf649ebSMinchan Kim 		bool readahead;
355eaf649ebSMinchan Kim 
356ec560175SHuang Ying 		INC_CACHE_INFO(find_success);
357eaf649ebSMinchan Kim 		/*
358eaf649ebSMinchan Kim 		 * At the moment, we don't support PG_readahead for anon THP
359eaf649ebSMinchan Kim 		 * so let's bail out rather than confusing the readahead stat.
360eaf649ebSMinchan Kim 		 */
361ec560175SHuang Ying 		if (unlikely(PageTransCompound(page)))
362ec560175SHuang Ying 			return page;
363eaf649ebSMinchan Kim 
364ec560175SHuang Ying 		readahead = TestClearPageReadahead(page);
365eaf649ebSMinchan Kim 		if (vma && vma_ra) {
366eaf649ebSMinchan Kim 			unsigned long ra_val;
367eaf649ebSMinchan Kim 			int win, hits;
368eaf649ebSMinchan Kim 
369eaf649ebSMinchan Kim 			ra_val = GET_SWAP_RA_VAL(vma);
370eaf649ebSMinchan Kim 			win = SWAP_RA_WIN(ra_val);
371eaf649ebSMinchan Kim 			hits = SWAP_RA_HITS(ra_val);
372ec560175SHuang Ying 			if (readahead)
373ec560175SHuang Ying 				hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
374ec560175SHuang Ying 			atomic_long_set(&vma->swap_readahead_info,
375ec560175SHuang Ying 					SWAP_RA_VAL(addr, win, hits));
376ec560175SHuang Ying 		}
377eaf649ebSMinchan Kim 
378ec560175SHuang Ying 		if (readahead) {
379ec560175SHuang Ying 			count_vm_event(SWAP_RA_HIT);
380eaf649ebSMinchan Kim 			if (!vma || !vma_ra)
381ec560175SHuang Ying 				atomic_inc(&swapin_readahead_hits);
382ec560175SHuang Ying 		}
383ec560175SHuang Ying 	}
384eaf649ebSMinchan Kim 
3851da177e4SLinus Torvalds 	return page;
3861da177e4SLinus Torvalds }
3871da177e4SLinus Torvalds 
38861ef1865SMatthew Wilcox (Oracle) /**
38961ef1865SMatthew Wilcox (Oracle)  * find_get_incore_page - Find and get a page from the page or swap caches.
39061ef1865SMatthew Wilcox (Oracle)  * @mapping: The address_space to search.
39161ef1865SMatthew Wilcox (Oracle)  * @index: The page cache index.
39261ef1865SMatthew Wilcox (Oracle)  *
39361ef1865SMatthew Wilcox (Oracle)  * This differs from find_get_page() in that it will also look for the
39461ef1865SMatthew Wilcox (Oracle)  * page in the swap cache.
39561ef1865SMatthew Wilcox (Oracle)  *
39661ef1865SMatthew Wilcox (Oracle)  * Return: The found page or %NULL.
39761ef1865SMatthew Wilcox (Oracle)  */
39861ef1865SMatthew Wilcox (Oracle) struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index)
39961ef1865SMatthew Wilcox (Oracle) {
40061ef1865SMatthew Wilcox (Oracle) 	swp_entry_t swp;
40161ef1865SMatthew Wilcox (Oracle) 	struct swap_info_struct *si;
40244835d20SMatthew Wilcox (Oracle) 	struct page *page = pagecache_get_page(mapping, index,
40344835d20SMatthew Wilcox (Oracle) 						FGP_ENTRY | FGP_HEAD, 0);
40461ef1865SMatthew Wilcox (Oracle) 
405a6de4b48SMatthew Wilcox (Oracle) 	if (!page)
40661ef1865SMatthew Wilcox (Oracle) 		return page;
407a6de4b48SMatthew Wilcox (Oracle) 	if (!xa_is_value(page))
408a6de4b48SMatthew Wilcox (Oracle) 		return find_subpage(page, index);
40961ef1865SMatthew Wilcox (Oracle) 	if (!shmem_mapping(mapping))
41061ef1865SMatthew Wilcox (Oracle) 		return NULL;
41161ef1865SMatthew Wilcox (Oracle) 
41261ef1865SMatthew Wilcox (Oracle) 	swp = radix_to_swp_entry(page);
41361ef1865SMatthew Wilcox (Oracle) 	/* Prevent swapoff from happening to us */
41461ef1865SMatthew Wilcox (Oracle) 	si = get_swap_device(swp);
41561ef1865SMatthew Wilcox (Oracle) 	if (!si)
41661ef1865SMatthew Wilcox (Oracle) 		return NULL;
41761ef1865SMatthew Wilcox (Oracle) 	page = find_get_page(swap_address_space(swp), swp_offset(swp));
41861ef1865SMatthew Wilcox (Oracle) 	put_swap_device(si);
41961ef1865SMatthew Wilcox (Oracle) 	return page;
42061ef1865SMatthew Wilcox (Oracle) }
42161ef1865SMatthew Wilcox (Oracle) 
4225b999aadSDmitry Safonov struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
4235b999aadSDmitry Safonov 			struct vm_area_struct *vma, unsigned long addr,
4245b999aadSDmitry Safonov 			bool *new_page_allocated)
4251da177e4SLinus Torvalds {
426eb085574SHuang Ying 	struct swap_info_struct *si;
4274c6355b2SJohannes Weiner 	struct page *page;
428aae466b0SJoonsoo Kim 	void *shadow = NULL;
4294c6355b2SJohannes Weiner 
4305b999aadSDmitry Safonov 	*new_page_allocated = false;
4311da177e4SLinus Torvalds 
4324c6355b2SJohannes Weiner 	for (;;) {
4334c6355b2SJohannes Weiner 		int err;
4341da177e4SLinus Torvalds 		/*
4351da177e4SLinus Torvalds 		 * First check the swap cache.  Since this is normally
4361da177e4SLinus Torvalds 		 * called after lookup_swap_cache() failed, re-calling
4371da177e4SLinus Torvalds 		 * that would confuse statistics.
4381da177e4SLinus Torvalds 		 */
439eb085574SHuang Ying 		si = get_swap_device(entry);
440eb085574SHuang Ying 		if (!si)
4414c6355b2SJohannes Weiner 			return NULL;
4424c6355b2SJohannes Weiner 		page = find_get_page(swap_address_space(entry),
443eb085574SHuang Ying 				     swp_offset(entry));
444eb085574SHuang Ying 		put_swap_device(si);
4454c6355b2SJohannes Weiner 		if (page)
4464c6355b2SJohannes Weiner 			return page;
4471da177e4SLinus Torvalds 
448ba81f838SHuang Ying 		/*
449ba81f838SHuang Ying 		 * Just skip read ahead for unused swap slot.
450ba81f838SHuang Ying 		 * During swap_off when swap_slot_cache is disabled,
451ba81f838SHuang Ying 		 * we have to handle the race between putting
452ba81f838SHuang Ying 		 * swap entry in swap cache and marking swap slot
453ba81f838SHuang Ying 		 * as SWAP_HAS_CACHE.  That's done in later part of code or
454ba81f838SHuang Ying 		 * else swap_off will be aborted if we return NULL.
455ba81f838SHuang Ying 		 */
456ba81f838SHuang Ying 		if (!__swp_swapcount(entry) && swap_slot_cache_enabled)
4574c6355b2SJohannes Weiner 			return NULL;
458e8c26ab6STim Chen 
4591da177e4SLinus Torvalds 		/*
4604c6355b2SJohannes Weiner 		 * Get a new page to read into from swap.  Allocate it now,
4614c6355b2SJohannes Weiner 		 * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
4624c6355b2SJohannes Weiner 		 * cause any racers to loop around until we add it to cache.
4631da177e4SLinus Torvalds 		 */
4644c6355b2SJohannes Weiner 		page = alloc_page_vma(gfp_mask, vma, addr);
4654c6355b2SJohannes Weiner 		if (!page)
4664c6355b2SJohannes Weiner 			return NULL;
4671da177e4SLinus Torvalds 
4681da177e4SLinus Torvalds 		/*
469f000944dSHugh Dickins 		 * Swap entry may have been freed since our caller observed it.
470f000944dSHugh Dickins 		 */
471355cfa73SKAMEZAWA Hiroyuki 		err = swapcache_prepare(entry);
4724c6355b2SJohannes Weiner 		if (!err)
473f000944dSHugh Dickins 			break;
474f000944dSHugh Dickins 
4754c6355b2SJohannes Weiner 		put_page(page);
4764c6355b2SJohannes Weiner 		if (err != -EEXIST)
4774c6355b2SJohannes Weiner 			return NULL;
4781da177e4SLinus Torvalds 
4794c6355b2SJohannes Weiner 		/*
4804c6355b2SJohannes Weiner 		 * We might race against __delete_from_swap_cache(), and
4814c6355b2SJohannes Weiner 		 * stumble across a swap_map entry whose SWAP_HAS_CACHE
4824c6355b2SJohannes Weiner 		 * has not yet been cleared.  Or race against another
4834c6355b2SJohannes Weiner 		 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
4844c6355b2SJohannes Weiner 		 * in swap_map, but not yet added its page to swap cache.
4854c6355b2SJohannes Weiner 		 */
486029c4628SGuo Ziliang 		schedule_timeout_uninterruptible(1);
4874c6355b2SJohannes Weiner 	}
4884c6355b2SJohannes Weiner 
4894c6355b2SJohannes Weiner 	/*
4904c6355b2SJohannes Weiner 	 * The swap entry is ours to swap in. Prepare the new page.
4914c6355b2SJohannes Weiner 	 */
4924c6355b2SJohannes Weiner 
4934c6355b2SJohannes Weiner 	__SetPageLocked(page);
4944c6355b2SJohannes Weiner 	__SetPageSwapBacked(page);
4954c6355b2SJohannes Weiner 
4960add0c77SShakeel Butt 	if (mem_cgroup_swapin_charge_page(page, NULL, gfp_mask, entry))
4974c6355b2SJohannes Weiner 		goto fail_unlock;
4984c6355b2SJohannes Weiner 
4990add0c77SShakeel Butt 	/* May fail (-ENOMEM) if XArray node allocation failed. */
5000add0c77SShakeel Butt 	if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
5014c6355b2SJohannes Weiner 		goto fail_unlock;
5020add0c77SShakeel Butt 
5030add0c77SShakeel Butt 	mem_cgroup_swapin_uncharge_swap(entry);
5044c6355b2SJohannes Weiner 
505aae466b0SJoonsoo Kim 	if (shadow)
5060995d7e5SMatthew Wilcox (Oracle) 		workingset_refault(page_folio(page), shadow);
507314b57fbSJohannes Weiner 
5084c6355b2SJohannes Weiner 	/* Caller will initiate read into locked page */
5096058eaecSJohannes Weiner 	lru_cache_add(page);
5104c6355b2SJohannes Weiner 	*new_page_allocated = true;
5114c6355b2SJohannes Weiner 	return page;
5124c6355b2SJohannes Weiner 
5134c6355b2SJohannes Weiner fail_unlock:
5140add0c77SShakeel Butt 	put_swap_page(page, entry);
5154c6355b2SJohannes Weiner 	unlock_page(page);
5164c6355b2SJohannes Weiner 	put_page(page);
5174c6355b2SJohannes Weiner 	return NULL;
5181da177e4SLinus Torvalds }
51946017e95SHugh Dickins 
5205b999aadSDmitry Safonov /*
5215b999aadSDmitry Safonov  * Locate a page of swap in physical memory, reserving swap cache space
5225b999aadSDmitry Safonov  * and reading the disk if it is not already cached.
5235b999aadSDmitry Safonov  * A failure return means that either the page allocation failed or that
5245b999aadSDmitry Safonov  * the swap entry is no longer in use.
5255b999aadSDmitry Safonov  */
5265b999aadSDmitry Safonov struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
5275169b844SNeilBrown 				   struct vm_area_struct *vma,
5285169b844SNeilBrown 				   unsigned long addr, bool do_poll,
5295169b844SNeilBrown 				   struct swap_iocb **plug)
5305b999aadSDmitry Safonov {
5315b999aadSDmitry Safonov 	bool page_was_allocated;
5325b999aadSDmitry Safonov 	struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
5335b999aadSDmitry Safonov 			vma, addr, &page_was_allocated);
5345b999aadSDmitry Safonov 
5355b999aadSDmitry Safonov 	if (page_was_allocated)
5365169b844SNeilBrown 		swap_readpage(retpage, do_poll, plug);
5375b999aadSDmitry Safonov 
5385b999aadSDmitry Safonov 	return retpage;
5395b999aadSDmitry Safonov }
5405b999aadSDmitry Safonov 
541ec560175SHuang Ying static unsigned int __swapin_nr_pages(unsigned long prev_offset,
542ec560175SHuang Ying 				      unsigned long offset,
543ec560175SHuang Ying 				      int hits,
544ec560175SHuang Ying 				      int max_pages,
545ec560175SHuang Ying 				      int prev_win)
546579f8290SShaohua Li {
547ec560175SHuang Ying 	unsigned int pages, last_ra;
548579f8290SShaohua Li 
549579f8290SShaohua Li 	/*
550579f8290SShaohua Li 	 * This heuristic has been found to work well on both sequential and
551579f8290SShaohua Li 	 * random loads, swapping to hard disk or to SSD: please don't ask
552579f8290SShaohua Li 	 * what the "+ 2" means, it just happens to work well, that's all.
553579f8290SShaohua Li 	 */
554ec560175SHuang Ying 	pages = hits + 2;
555579f8290SShaohua Li 	if (pages == 2) {
556579f8290SShaohua Li 		/*
557579f8290SShaohua Li 		 * We can have no readahead hits to judge by: but must not get
558579f8290SShaohua Li 		 * stuck here forever, so check for an adjacent offset instead
559579f8290SShaohua Li 		 * (and don't even bother to check whether swap type is same).
560579f8290SShaohua Li 		 */
561579f8290SShaohua Li 		if (offset != prev_offset + 1 && offset != prev_offset - 1)
562579f8290SShaohua Li 			pages = 1;
563579f8290SShaohua Li 	} else {
564579f8290SShaohua Li 		unsigned int roundup = 4;
565579f8290SShaohua Li 		while (roundup < pages)
566579f8290SShaohua Li 			roundup <<= 1;
567579f8290SShaohua Li 		pages = roundup;
568579f8290SShaohua Li 	}
569579f8290SShaohua Li 
570579f8290SShaohua Li 	if (pages > max_pages)
571579f8290SShaohua Li 		pages = max_pages;
572579f8290SShaohua Li 
573579f8290SShaohua Li 	/* Don't shrink readahead too fast */
574ec560175SHuang Ying 	last_ra = prev_win / 2;
575579f8290SShaohua Li 	if (pages < last_ra)
576579f8290SShaohua Li 		pages = last_ra;
577ec560175SHuang Ying 
578ec560175SHuang Ying 	return pages;
579ec560175SHuang Ying }
580ec560175SHuang Ying 
581ec560175SHuang Ying static unsigned long swapin_nr_pages(unsigned long offset)
582ec560175SHuang Ying {
583ec560175SHuang Ying 	static unsigned long prev_offset;
584ec560175SHuang Ying 	unsigned int hits, pages, max_pages;
585ec560175SHuang Ying 	static atomic_t last_readahead_pages;
586ec560175SHuang Ying 
587ec560175SHuang Ying 	max_pages = 1 << READ_ONCE(page_cluster);
588ec560175SHuang Ying 	if (max_pages <= 1)
589ec560175SHuang Ying 		return 1;
590ec560175SHuang Ying 
591ec560175SHuang Ying 	hits = atomic_xchg(&swapin_readahead_hits, 0);
592d6c1f098SQian Cai 	pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
593d6c1f098SQian Cai 				  max_pages,
594ec560175SHuang Ying 				  atomic_read(&last_readahead_pages));
595ec560175SHuang Ying 	if (!hits)
596d6c1f098SQian Cai 		WRITE_ONCE(prev_offset, offset);
597579f8290SShaohua Li 	atomic_set(&last_readahead_pages, pages);
598579f8290SShaohua Li 
599579f8290SShaohua Li 	return pages;
600579f8290SShaohua Li }
601579f8290SShaohua Li 
60246017e95SHugh Dickins /**
603e9e9b7ecSMinchan Kim  * swap_cluster_readahead - swap in pages in hope we need them soon
60446017e95SHugh Dickins  * @entry: swap entry of this memory
6057682486bSRandy Dunlap  * @gfp_mask: memory allocation flags
606e9e9b7ecSMinchan Kim  * @vmf: fault information
60746017e95SHugh Dickins  *
60846017e95SHugh Dickins  * Returns the struct page for entry and addr, after queueing swapin.
60946017e95SHugh Dickins  *
61046017e95SHugh Dickins  * Primitive swap readahead code. We simply read an aligned block of
61146017e95SHugh Dickins  * (1 << page_cluster) entries in the swap area. This method is chosen
61246017e95SHugh Dickins  * because it doesn't cost us any seek time.  We also make sure to queue
61346017e95SHugh Dickins  * the 'original' request together with the readahead ones...
61446017e95SHugh Dickins  *
61546017e95SHugh Dickins  * This has been extended to use the NUMA policies from the mm triggering
61646017e95SHugh Dickins  * the readahead.
61746017e95SHugh Dickins  *
618c1e8d7c6SMichel Lespinasse  * Caller must hold read mmap_lock if vmf->vma is not NULL.
61946017e95SHugh Dickins  */
620e9e9b7ecSMinchan Kim struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
621e9e9b7ecSMinchan Kim 				struct vm_fault *vmf)
62246017e95SHugh Dickins {
62346017e95SHugh Dickins 	struct page *page;
624579f8290SShaohua Li 	unsigned long entry_offset = swp_offset(entry);
625579f8290SShaohua Li 	unsigned long offset = entry_offset;
62667f96aa2SRik van Riel 	unsigned long start_offset, end_offset;
627579f8290SShaohua Li 	unsigned long mask;
628e9a6effaSHuang Ying 	struct swap_info_struct *si = swp_swap_info(entry);
6293fb5c298SChristian Ehrhardt 	struct blk_plug plug;
6305169b844SNeilBrown 	struct swap_iocb *splug = NULL;
631c4fa6309SHuang Ying 	bool do_poll = true, page_allocated;
632e9e9b7ecSMinchan Kim 	struct vm_area_struct *vma = vmf->vma;
633e9e9b7ecSMinchan Kim 	unsigned long addr = vmf->address;
63446017e95SHugh Dickins 
635579f8290SShaohua Li 	mask = swapin_nr_pages(offset) - 1;
636579f8290SShaohua Li 	if (!mask)
637579f8290SShaohua Li 		goto skip;
638579f8290SShaohua Li 
63923955622SShaohua Li 	do_poll = false;
64067f96aa2SRik van Riel 	/* Read a page_cluster sized and aligned cluster around offset. */
64167f96aa2SRik van Riel 	start_offset = offset & ~mask;
64267f96aa2SRik van Riel 	end_offset = offset | mask;
64367f96aa2SRik van Riel 	if (!start_offset)	/* First page is swap header. */
64467f96aa2SRik van Riel 		start_offset++;
645e9a6effaSHuang Ying 	if (end_offset >= si->max)
646e9a6effaSHuang Ying 		end_offset = si->max - 1;
64767f96aa2SRik van Riel 
6483fb5c298SChristian Ehrhardt 	blk_start_plug(&plug);
64967f96aa2SRik van Riel 	for (offset = start_offset; offset <= end_offset ; offset++) {
65046017e95SHugh Dickins 		/* Ok, do the async read-ahead now */
651c4fa6309SHuang Ying 		page = __read_swap_cache_async(
652c4fa6309SHuang Ying 			swp_entry(swp_type(entry), offset),
653c4fa6309SHuang Ying 			gfp_mask, vma, addr, &page_allocated);
65446017e95SHugh Dickins 		if (!page)
65567f96aa2SRik van Riel 			continue;
656c4fa6309SHuang Ying 		if (page_allocated) {
6575169b844SNeilBrown 			swap_readpage(page, false, &splug);
658eaf649ebSMinchan Kim 			if (offset != entry_offset) {
659579f8290SShaohua Li 				SetPageReadahead(page);
660cbc65df2SHuang Ying 				count_vm_event(SWAP_RA);
661cbc65df2SHuang Ying 			}
662c4fa6309SHuang Ying 		}
66309cbfeafSKirill A. Shutemov 		put_page(page);
66446017e95SHugh Dickins 	}
6653fb5c298SChristian Ehrhardt 	blk_finish_plug(&plug);
6665169b844SNeilBrown 	swap_read_unplug(splug);
6673fb5c298SChristian Ehrhardt 
66846017e95SHugh Dickins 	lru_add_drain();	/* Push any new pages onto the LRU now */
669579f8290SShaohua Li skip:
6705169b844SNeilBrown 	/* The page was likely read above, so no need for plugging here */
6715169b844SNeilBrown 	return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll, NULL);
67246017e95SHugh Dickins }
6734b3ef9daSHuang, Ying 
6744b3ef9daSHuang, Ying int init_swap_address_space(unsigned int type, unsigned long nr_pages)
6754b3ef9daSHuang, Ying {
6764b3ef9daSHuang, Ying 	struct address_space *spaces, *space;
6774b3ef9daSHuang, Ying 	unsigned int i, nr;
6784b3ef9daSHuang, Ying 
6794b3ef9daSHuang, Ying 	nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
680778e1cddSKees Cook 	spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
6814b3ef9daSHuang, Ying 	if (!spaces)
6824b3ef9daSHuang, Ying 		return -ENOMEM;
6834b3ef9daSHuang, Ying 	for (i = 0; i < nr; i++) {
6844b3ef9daSHuang, Ying 		space = spaces + i;
685a2833486SMatthew Wilcox 		xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ);
6864b3ef9daSHuang, Ying 		atomic_set(&space->i_mmap_writable, 0);
6874b3ef9daSHuang, Ying 		space->a_ops = &swap_aops;
6884b3ef9daSHuang, Ying 		/* swap cache doesn't use writeback related tags */
6894b3ef9daSHuang, Ying 		mapping_set_no_writeback_tags(space);
6904b3ef9daSHuang, Ying 	}
6914b3ef9daSHuang, Ying 	nr_swapper_spaces[type] = nr;
692054f1d1fSHuang Ying 	swapper_spaces[type] = spaces;
6934b3ef9daSHuang, Ying 
6944b3ef9daSHuang, Ying 	return 0;
6954b3ef9daSHuang, Ying }
6964b3ef9daSHuang, Ying 
6974b3ef9daSHuang, Ying void exit_swap_address_space(unsigned int type)
6984b3ef9daSHuang, Ying {
699eea4a501SHuang Ying 	int i;
700eea4a501SHuang Ying 	struct address_space *spaces = swapper_spaces[type];
701eea4a501SHuang Ying 
702eea4a501SHuang Ying 	for (i = 0; i < nr_swapper_spaces[type]; i++)
703eea4a501SHuang Ying 		VM_WARN_ON_ONCE(!mapping_empty(&spaces[i]));
704eea4a501SHuang Ying 	kvfree(spaces);
7054b3ef9daSHuang, Ying 	nr_swapper_spaces[type] = 0;
706054f1d1fSHuang Ying 	swapper_spaces[type] = NULL;
7074b3ef9daSHuang, Ying }
708ec560175SHuang Ying 
709ec560175SHuang Ying static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma,
710ec560175SHuang Ying 				     unsigned long faddr,
711ec560175SHuang Ying 				     unsigned long lpfn,
712ec560175SHuang Ying 				     unsigned long rpfn,
713ec560175SHuang Ying 				     unsigned long *start,
714ec560175SHuang Ying 				     unsigned long *end)
715ec560175SHuang Ying {
716ec560175SHuang Ying 	*start = max3(lpfn, PFN_DOWN(vma->vm_start),
717ec560175SHuang Ying 		      PFN_DOWN(faddr & PMD_MASK));
718ec560175SHuang Ying 	*end = min3(rpfn, PFN_DOWN(vma->vm_end),
719ec560175SHuang Ying 		    PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
720ec560175SHuang Ying }
721ec560175SHuang Ying 
722eaf649ebSMinchan Kim static void swap_ra_info(struct vm_fault *vmf,
723eaf649ebSMinchan Kim 			struct vma_swap_readahead *ra_info)
724ec560175SHuang Ying {
725ec560175SHuang Ying 	struct vm_area_struct *vma = vmf->vma;
726eaf649ebSMinchan Kim 	unsigned long ra_val;
727ec560175SHuang Ying 	unsigned long faddr, pfn, fpfn;
728ec560175SHuang Ying 	unsigned long start, end;
729eaf649ebSMinchan Kim 	pte_t *pte, *orig_pte;
730ec560175SHuang Ying 	unsigned int max_win, hits, prev_win, win, left;
731ec560175SHuang Ying #ifndef CONFIG_64BIT
732ec560175SHuang Ying 	pte_t *tpte;
733ec560175SHuang Ying #endif
734ec560175SHuang Ying 
73561b63972SHuang Ying 	max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
73661b63972SHuang Ying 			     SWAP_RA_ORDER_CEILING);
73761b63972SHuang Ying 	if (max_win == 1) {
738eaf649ebSMinchan Kim 		ra_info->win = 1;
739eaf649ebSMinchan Kim 		return;
74061b63972SHuang Ying 	}
74161b63972SHuang Ying 
742ec560175SHuang Ying 	faddr = vmf->address;
743eaf649ebSMinchan Kim 	orig_pte = pte = pte_offset_map(vmf->pmd, faddr);
744ec560175SHuang Ying 
745ec560175SHuang Ying 	fpfn = PFN_DOWN(faddr);
746eaf649ebSMinchan Kim 	ra_val = GET_SWAP_RA_VAL(vma);
747eaf649ebSMinchan Kim 	pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
748eaf649ebSMinchan Kim 	prev_win = SWAP_RA_WIN(ra_val);
749eaf649ebSMinchan Kim 	hits = SWAP_RA_HITS(ra_val);
750eaf649ebSMinchan Kim 	ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
751ec560175SHuang Ying 					       max_win, prev_win);
752ec560175SHuang Ying 	atomic_long_set(&vma->swap_readahead_info,
753ec560175SHuang Ying 			SWAP_RA_VAL(faddr, win, 0));
754ec560175SHuang Ying 
755eaf649ebSMinchan Kim 	if (win == 1) {
756eaf649ebSMinchan Kim 		pte_unmap(orig_pte);
757eaf649ebSMinchan Kim 		return;
758eaf649ebSMinchan Kim 	}
759ec560175SHuang Ying 
760ec560175SHuang Ying 	/* Copy the PTEs because the page table may be unmapped */
761ec560175SHuang Ying 	if (fpfn == pfn + 1)
762ec560175SHuang Ying 		swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end);
763ec560175SHuang Ying 	else if (pfn == fpfn + 1)
764ec560175SHuang Ying 		swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1,
765ec560175SHuang Ying 				  &start, &end);
766ec560175SHuang Ying 	else {
767ec560175SHuang Ying 		left = (win - 1) / 2;
768ec560175SHuang Ying 		swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left,
769ec560175SHuang Ying 				  &start, &end);
770ec560175SHuang Ying 	}
771eaf649ebSMinchan Kim 	ra_info->nr_pte = end - start;
772eaf649ebSMinchan Kim 	ra_info->offset = fpfn - start;
773eaf649ebSMinchan Kim 	pte -= ra_info->offset;
774ec560175SHuang Ying #ifdef CONFIG_64BIT
775eaf649ebSMinchan Kim 	ra_info->ptes = pte;
776ec560175SHuang Ying #else
777eaf649ebSMinchan Kim 	tpte = ra_info->ptes;
778ec560175SHuang Ying 	for (pfn = start; pfn != end; pfn++)
779ec560175SHuang Ying 		*tpte++ = *pte++;
780ec560175SHuang Ying #endif
781eaf649ebSMinchan Kim 	pte_unmap(orig_pte);
782ec560175SHuang Ying }
783ec560175SHuang Ying 
784e9f59873SYang Shi /**
785e9f59873SYang Shi  * swap_vma_readahead - swap in pages in hope we need them soon
78627ec4878SKrzysztof Kozlowski  * @fentry: swap entry of this memory
787e9f59873SYang Shi  * @gfp_mask: memory allocation flags
788e9f59873SYang Shi  * @vmf: fault information
789e9f59873SYang Shi  *
790e9f59873SYang Shi  * Returns the struct page for entry and addr, after queueing swapin.
791e9f59873SYang Shi  *
792cb152a1aSShijie Luo  * Primitive swap readahead code. We simply read in a few pages whose
793e9f59873SYang Shi  * virtual addresses are around the fault address in the same vma.
794e9f59873SYang Shi  *
795c1e8d7c6SMichel Lespinasse  * Caller must hold read mmap_lock if vmf->vma is not NULL.
796e9f59873SYang Shi  *
797e9f59873SYang Shi  */
798f5c754d6SColin Ian King static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
799eaf649ebSMinchan Kim 				       struct vm_fault *vmf)
800ec560175SHuang Ying {
801ec560175SHuang Ying 	struct blk_plug plug;
8025169b844SNeilBrown 	struct swap_iocb *splug = NULL;
803ec560175SHuang Ying 	struct vm_area_struct *vma = vmf->vma;
804ec560175SHuang Ying 	struct page *page;
805ec560175SHuang Ying 	pte_t *pte, pentry;
806ec560175SHuang Ying 	swp_entry_t entry;
807ec560175SHuang Ying 	unsigned int i;
808ec560175SHuang Ying 	bool page_allocated;
809e97af699SMiaohe Lin 	struct vma_swap_readahead ra_info = {
810e97af699SMiaohe Lin 		.win = 1,
811e97af699SMiaohe Lin 	};
812ec560175SHuang Ying 
813eaf649ebSMinchan Kim 	swap_ra_info(vmf, &ra_info);
814eaf649ebSMinchan Kim 	if (ra_info.win == 1)
815ec560175SHuang Ying 		goto skip;
816ec560175SHuang Ying 
817ec560175SHuang Ying 	blk_start_plug(&plug);
818eaf649ebSMinchan Kim 	for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte;
819ec560175SHuang Ying 	     i++, pte++) {
820ec560175SHuang Ying 		pentry = *pte;
82192bafb20SMiaohe Lin 		if (!is_swap_pte(pentry))
822ec560175SHuang Ying 			continue;
823ec560175SHuang Ying 		entry = pte_to_swp_entry(pentry);
824ec560175SHuang Ying 		if (unlikely(non_swap_entry(entry)))
825ec560175SHuang Ying 			continue;
826ec560175SHuang Ying 		page = __read_swap_cache_async(entry, gfp_mask, vma,
827ec560175SHuang Ying 					       vmf->address, &page_allocated);
828ec560175SHuang Ying 		if (!page)
829ec560175SHuang Ying 			continue;
830ec560175SHuang Ying 		if (page_allocated) {
8315169b844SNeilBrown 			swap_readpage(page, false, &splug);
832eaf649ebSMinchan Kim 			if (i != ra_info.offset) {
833ec560175SHuang Ying 				SetPageReadahead(page);
834ec560175SHuang Ying 				count_vm_event(SWAP_RA);
835ec560175SHuang Ying 			}
836ec560175SHuang Ying 		}
837ec560175SHuang Ying 		put_page(page);
838ec560175SHuang Ying 	}
839ec560175SHuang Ying 	blk_finish_plug(&plug);
8405169b844SNeilBrown 	swap_read_unplug(splug);
841ec560175SHuang Ying 	lru_add_drain();
842ec560175SHuang Ying skip:
8435169b844SNeilBrown 	/* The page was likely read above, so no need for plugging here */
844ec560175SHuang Ying 	return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
8455169b844SNeilBrown 				     ra_info.win == 1, NULL);
846ec560175SHuang Ying }
847d9bfcfdcSHuang Ying 
848e9e9b7ecSMinchan Kim /**
849e9e9b7ecSMinchan Kim  * swapin_readahead - swap in pages in hope we need them soon
850e9e9b7ecSMinchan Kim  * @entry: swap entry of this memory
851e9e9b7ecSMinchan Kim  * @gfp_mask: memory allocation flags
852e9e9b7ecSMinchan Kim  * @vmf: fault information
853e9e9b7ecSMinchan Kim  *
854e9e9b7ecSMinchan Kim  * Returns the struct page for entry and addr, after queueing swapin.
855e9e9b7ecSMinchan Kim  *
856e9e9b7ecSMinchan Kim  * It's a main entry function for swap readahead. By the configuration,
857e9e9b7ecSMinchan Kim  * it will read ahead blocks by cluster-based(ie, physical disk based)
858e9e9b7ecSMinchan Kim  * or vma-based(ie, virtual address based on faulty address) readahead.
859e9e9b7ecSMinchan Kim  */
860e9e9b7ecSMinchan Kim struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
861e9e9b7ecSMinchan Kim 				struct vm_fault *vmf)
862e9e9b7ecSMinchan Kim {
863e9e9b7ecSMinchan Kim 	return swap_use_vma_readahead() ?
864e9e9b7ecSMinchan Kim 			swap_vma_readahead(entry, gfp_mask, vmf) :
865e9e9b7ecSMinchan Kim 			swap_cluster_readahead(entry, gfp_mask, vmf);
866e9e9b7ecSMinchan Kim }
867e9e9b7ecSMinchan Kim 
868d9bfcfdcSHuang Ying #ifdef CONFIG_SYSFS
869d9bfcfdcSHuang Ying static ssize_t vma_ra_enabled_show(struct kobject *kobj,
870d9bfcfdcSHuang Ying 				     struct kobj_attribute *attr, char *buf)
871d9bfcfdcSHuang Ying {
872ae7a927dSJoe Perches 	return sysfs_emit(buf, "%s\n",
873ae7a927dSJoe Perches 			  enable_vma_readahead ? "true" : "false");
874d9bfcfdcSHuang Ying }
875d9bfcfdcSHuang Ying static ssize_t vma_ra_enabled_store(struct kobject *kobj,
876d9bfcfdcSHuang Ying 				      struct kobj_attribute *attr,
877d9bfcfdcSHuang Ying 				      const char *buf, size_t count)
878d9bfcfdcSHuang Ying {
879717aeab4SJagdish Gediya 	ssize_t ret;
880717aeab4SJagdish Gediya 
881717aeab4SJagdish Gediya 	ret = kstrtobool(buf, &enable_vma_readahead);
882717aeab4SJagdish Gediya 	if (ret)
883717aeab4SJagdish Gediya 		return ret;
884d9bfcfdcSHuang Ying 
885d9bfcfdcSHuang Ying 	return count;
886d9bfcfdcSHuang Ying }
887*6106b93eSMiaohe Lin static struct kobj_attribute vma_ra_enabled_attr = __ATTR_RW(vma_ra_enabled);
888d9bfcfdcSHuang Ying 
889d9bfcfdcSHuang Ying static struct attribute *swap_attrs[] = {
890d9bfcfdcSHuang Ying 	&vma_ra_enabled_attr.attr,
891d9bfcfdcSHuang Ying 	NULL,
892d9bfcfdcSHuang Ying };
893d9bfcfdcSHuang Ying 
894e48333b6SRikard Falkeborn static const struct attribute_group swap_attr_group = {
895d9bfcfdcSHuang Ying 	.attrs = swap_attrs,
896d9bfcfdcSHuang Ying };
897d9bfcfdcSHuang Ying 
898d9bfcfdcSHuang Ying static int __init swap_init_sysfs(void)
899d9bfcfdcSHuang Ying {
900d9bfcfdcSHuang Ying 	int err;
901d9bfcfdcSHuang Ying 	struct kobject *swap_kobj;
902d9bfcfdcSHuang Ying 
903d9bfcfdcSHuang Ying 	swap_kobj = kobject_create_and_add("swap", mm_kobj);
904d9bfcfdcSHuang Ying 	if (!swap_kobj) {
905d9bfcfdcSHuang Ying 		pr_err("failed to create swap kobject\n");
906d9bfcfdcSHuang Ying 		return -ENOMEM;
907d9bfcfdcSHuang Ying 	}
908d9bfcfdcSHuang Ying 	err = sysfs_create_group(swap_kobj, &swap_attr_group);
909d9bfcfdcSHuang Ying 	if (err) {
910d9bfcfdcSHuang Ying 		pr_err("failed to register swap group\n");
911d9bfcfdcSHuang Ying 		goto delete_obj;
912d9bfcfdcSHuang Ying 	}
913d9bfcfdcSHuang Ying 	return 0;
914d9bfcfdcSHuang Ying 
915d9bfcfdcSHuang Ying delete_obj:
916d9bfcfdcSHuang Ying 	kobject_put(swap_kobj);
917d9bfcfdcSHuang Ying 	return err;
918d9bfcfdcSHuang Ying }
919d9bfcfdcSHuang Ying subsys_initcall(swap_init_sysfs);
920d9bfcfdcSHuang Ying #endif
921