xref: /linux/mm/swap_state.c (revision 1b0975ee3bdd3eb19a47371c26fd7ef8f7f6b599)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/mm/swap_state.c
4  *
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  *  Swap reorganised 29.12.95, Stephen Tweedie
7  *
8  *  Rewritten to use page cache, (C) 1998 Stephen Tweedie
9  */
10 #include <linux/mm.h>
11 #include <linux/gfp.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/swap.h>
14 #include <linux/swapops.h>
15 #include <linux/init.h>
16 #include <linux/pagemap.h>
17 #include <linux/backing-dev.h>
18 #include <linux/blkdev.h>
19 #include <linux/migrate.h>
20 #include <linux/vmalloc.h>
21 #include <linux/swap_slots.h>
22 #include <linux/huge_mm.h>
23 #include <linux/shmem_fs.h>
24 #include "internal.h"
25 #include "swap.h"
26 
27 /*
28  * swapper_space is a fiction, retained to simplify the path through
29  * vmscan's shrink_page_list.
30  */
31 static const struct address_space_operations swap_aops = {
32 	.writepage	= swap_writepage,
33 	.dirty_folio	= noop_dirty_folio,
34 #ifdef CONFIG_MIGRATION
35 	.migrate_folio	= migrate_folio,
36 #endif
37 };
38 
39 struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly;
40 static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly;
41 static bool enable_vma_readahead __read_mostly = true;
42 
43 #define SWAP_RA_WIN_SHIFT	(PAGE_SHIFT / 2)
44 #define SWAP_RA_HITS_MASK	((1UL << SWAP_RA_WIN_SHIFT) - 1)
45 #define SWAP_RA_HITS_MAX	SWAP_RA_HITS_MASK
46 #define SWAP_RA_WIN_MASK	(~PAGE_MASK & ~SWAP_RA_HITS_MASK)
47 
48 #define SWAP_RA_HITS(v)		((v) & SWAP_RA_HITS_MASK)
49 #define SWAP_RA_WIN(v)		(((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT)
50 #define SWAP_RA_ADDR(v)		((v) & PAGE_MASK)
51 
52 #define SWAP_RA_VAL(addr, win, hits)				\
53 	(((addr) & PAGE_MASK) |					\
54 	 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) |	\
55 	 ((hits) & SWAP_RA_HITS_MASK))
56 
57 /* Initial readahead hits is 4 to start up with a small window */
58 #define GET_SWAP_RA_VAL(vma)					\
59 	(atomic_long_read(&(vma)->swap_readahead_info) ? : 4)
60 
61 static atomic_t swapin_readahead_hits = ATOMIC_INIT(4);
62 
63 void show_swap_cache_info(void)
64 {
65 	printk("%lu pages in swap cache\n", total_swapcache_pages());
66 	printk("Free swap  = %ldkB\n",
67 		get_nr_swap_pages() << (PAGE_SHIFT - 10));
68 	printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10));
69 }
70 
71 void *get_shadow_from_swap_cache(swp_entry_t entry)
72 {
73 	struct address_space *address_space = swap_address_space(entry);
74 	pgoff_t idx = swp_offset(entry);
75 	struct page *page;
76 
77 	page = xa_load(&address_space->i_pages, idx);
78 	if (xa_is_value(page))
79 		return page;
80 	return NULL;
81 }
82 
83 /*
84  * add_to_swap_cache resembles filemap_add_folio on swapper_space,
85  * but sets SwapCache flag and private instead of mapping and index.
86  */
87 int add_to_swap_cache(struct folio *folio, swp_entry_t entry,
88 			gfp_t gfp, void **shadowp)
89 {
90 	struct address_space *address_space = swap_address_space(entry);
91 	pgoff_t idx = swp_offset(entry);
92 	XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio));
93 	unsigned long i, nr = folio_nr_pages(folio);
94 	void *old;
95 
96 	xas_set_update(&xas, workingset_update_node);
97 
98 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
99 	VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio);
100 	VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio);
101 
102 	folio_ref_add(folio, nr);
103 	folio_set_swapcache(folio);
104 
105 	do {
106 		xas_lock_irq(&xas);
107 		xas_create_range(&xas);
108 		if (xas_error(&xas))
109 			goto unlock;
110 		for (i = 0; i < nr; i++) {
111 			VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio);
112 			old = xas_load(&xas);
113 			if (xa_is_value(old)) {
114 				if (shadowp)
115 					*shadowp = old;
116 			}
117 			set_page_private(folio_page(folio, i), entry.val + i);
118 			xas_store(&xas, folio);
119 			xas_next(&xas);
120 		}
121 		address_space->nrpages += nr;
122 		__node_stat_mod_folio(folio, NR_FILE_PAGES, nr);
123 		__lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr);
124 unlock:
125 		xas_unlock_irq(&xas);
126 	} while (xas_nomem(&xas, gfp));
127 
128 	if (!xas_error(&xas))
129 		return 0;
130 
131 	folio_clear_swapcache(folio);
132 	folio_ref_sub(folio, nr);
133 	return xas_error(&xas);
134 }
135 
136 /*
137  * This must be called only on folios that have
138  * been verified to be in the swap cache.
139  */
140 void __delete_from_swap_cache(struct folio *folio,
141 			swp_entry_t entry, void *shadow)
142 {
143 	struct address_space *address_space = swap_address_space(entry);
144 	int i;
145 	long nr = folio_nr_pages(folio);
146 	pgoff_t idx = swp_offset(entry);
147 	XA_STATE(xas, &address_space->i_pages, idx);
148 
149 	xas_set_update(&xas, workingset_update_node);
150 
151 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
152 	VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
153 	VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
154 
155 	for (i = 0; i < nr; i++) {
156 		void *entry = xas_store(&xas, shadow);
157 		VM_BUG_ON_PAGE(entry != folio, entry);
158 		set_page_private(folio_page(folio, i), 0);
159 		xas_next(&xas);
160 	}
161 	folio_clear_swapcache(folio);
162 	address_space->nrpages -= nr;
163 	__node_stat_mod_folio(folio, NR_FILE_PAGES, -nr);
164 	__lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr);
165 }
166 
167 /**
168  * add_to_swap - allocate swap space for a folio
169  * @folio: folio we want to move to swap
170  *
171  * Allocate swap space for the folio and add the folio to the
172  * swap cache.
173  *
174  * Context: Caller needs to hold the folio lock.
175  * Return: Whether the folio was added to the swap cache.
176  */
177 bool add_to_swap(struct folio *folio)
178 {
179 	swp_entry_t entry;
180 	int err;
181 
182 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
183 	VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio);
184 
185 	entry = folio_alloc_swap(folio);
186 	if (!entry.val)
187 		return false;
188 
189 	/*
190 	 * XArray node allocations from PF_MEMALLOC contexts could
191 	 * completely exhaust the page allocator. __GFP_NOMEMALLOC
192 	 * stops emergency reserves from being allocated.
193 	 *
194 	 * TODO: this could cause a theoretical memory reclaim
195 	 * deadlock in the swap out path.
196 	 */
197 	/*
198 	 * Add it to the swap cache.
199 	 */
200 	err = add_to_swap_cache(folio, entry,
201 			__GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL);
202 	if (err)
203 		/*
204 		 * add_to_swap_cache() doesn't return -EEXIST, so we can safely
205 		 * clear SWAP_HAS_CACHE flag.
206 		 */
207 		goto fail;
208 	/*
209 	 * Normally the folio will be dirtied in unmap because its
210 	 * pte should be dirty. A special case is MADV_FREE page. The
211 	 * page's pte could have dirty bit cleared but the folio's
212 	 * SwapBacked flag is still set because clearing the dirty bit
213 	 * and SwapBacked flag has no lock protected. For such folio,
214 	 * unmap will not set dirty bit for it, so folio reclaim will
215 	 * not write the folio out. This can cause data corruption when
216 	 * the folio is swapped in later. Always setting the dirty flag
217 	 * for the folio solves the problem.
218 	 */
219 	folio_mark_dirty(folio);
220 
221 	return true;
222 
223 fail:
224 	put_swap_folio(folio, entry);
225 	return false;
226 }
227 
228 /*
229  * This must be called only on folios that have
230  * been verified to be in the swap cache and locked.
231  * It will never put the folio into the free list,
232  * the caller has a reference on the folio.
233  */
234 void delete_from_swap_cache(struct folio *folio)
235 {
236 	swp_entry_t entry = folio_swap_entry(folio);
237 	struct address_space *address_space = swap_address_space(entry);
238 
239 	xa_lock_irq(&address_space->i_pages);
240 	__delete_from_swap_cache(folio, entry, NULL);
241 	xa_unlock_irq(&address_space->i_pages);
242 
243 	put_swap_folio(folio, entry);
244 	folio_ref_sub(folio, folio_nr_pages(folio));
245 }
246 
247 void clear_shadow_from_swap_cache(int type, unsigned long begin,
248 				unsigned long end)
249 {
250 	unsigned long curr = begin;
251 	void *old;
252 
253 	for (;;) {
254 		swp_entry_t entry = swp_entry(type, curr);
255 		struct address_space *address_space = swap_address_space(entry);
256 		XA_STATE(xas, &address_space->i_pages, curr);
257 
258 		xas_set_update(&xas, workingset_update_node);
259 
260 		xa_lock_irq(&address_space->i_pages);
261 		xas_for_each(&xas, old, end) {
262 			if (!xa_is_value(old))
263 				continue;
264 			xas_store(&xas, NULL);
265 		}
266 		xa_unlock_irq(&address_space->i_pages);
267 
268 		/* search the next swapcache until we meet end */
269 		curr >>= SWAP_ADDRESS_SPACE_SHIFT;
270 		curr++;
271 		curr <<= SWAP_ADDRESS_SPACE_SHIFT;
272 		if (curr > end)
273 			break;
274 	}
275 }
276 
277 /*
278  * If we are the only user, then try to free up the swap cache.
279  *
280  * Its ok to check the swapcache flag without the folio lock
281  * here because we are going to recheck again inside
282  * folio_free_swap() _with_ the lock.
283  * 					- Marcelo
284  */
285 void free_swap_cache(struct page *page)
286 {
287 	struct folio *folio = page_folio(page);
288 
289 	if (folio_test_swapcache(folio) && !folio_mapped(folio) &&
290 	    folio_trylock(folio)) {
291 		folio_free_swap(folio);
292 		folio_unlock(folio);
293 	}
294 }
295 
296 /*
297  * Perform a free_page(), also freeing any swap cache associated with
298  * this page if it is the last user of the page.
299  */
300 void free_page_and_swap_cache(struct page *page)
301 {
302 	free_swap_cache(page);
303 	if (!is_huge_zero_page(page))
304 		put_page(page);
305 }
306 
307 /*
308  * Passed an array of pages, drop them all from swapcache and then release
309  * them.  They are removed from the LRU and freed if this is their last use.
310  */
311 void free_pages_and_swap_cache(struct encoded_page **pages, int nr)
312 {
313 	lru_add_drain();
314 	for (int i = 0; i < nr; i++)
315 		free_swap_cache(encoded_page_ptr(pages[i]));
316 	release_pages(pages, nr);
317 }
318 
319 static inline bool swap_use_vma_readahead(void)
320 {
321 	return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap);
322 }
323 
324 /*
325  * Lookup a swap entry in the swap cache. A found folio will be returned
326  * unlocked and with its refcount incremented - we rely on the kernel
327  * lock getting page table operations atomic even if we drop the folio
328  * lock before returning.
329  *
330  * Caller must lock the swap device or hold a reference to keep it valid.
331  */
332 struct folio *swap_cache_get_folio(swp_entry_t entry,
333 		struct vm_area_struct *vma, unsigned long addr)
334 {
335 	struct folio *folio;
336 
337 	folio = filemap_get_folio(swap_address_space(entry), swp_offset(entry));
338 	if (!IS_ERR(folio)) {
339 		bool vma_ra = swap_use_vma_readahead();
340 		bool readahead;
341 
342 		/*
343 		 * At the moment, we don't support PG_readahead for anon THP
344 		 * so let's bail out rather than confusing the readahead stat.
345 		 */
346 		if (unlikely(folio_test_large(folio)))
347 			return folio;
348 
349 		readahead = folio_test_clear_readahead(folio);
350 		if (vma && vma_ra) {
351 			unsigned long ra_val;
352 			int win, hits;
353 
354 			ra_val = GET_SWAP_RA_VAL(vma);
355 			win = SWAP_RA_WIN(ra_val);
356 			hits = SWAP_RA_HITS(ra_val);
357 			if (readahead)
358 				hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX);
359 			atomic_long_set(&vma->swap_readahead_info,
360 					SWAP_RA_VAL(addr, win, hits));
361 		}
362 
363 		if (readahead) {
364 			count_vm_event(SWAP_RA_HIT);
365 			if (!vma || !vma_ra)
366 				atomic_inc(&swapin_readahead_hits);
367 		}
368 	} else {
369 		folio = NULL;
370 	}
371 
372 	return folio;
373 }
374 
375 /**
376  * filemap_get_incore_folio - Find and get a folio from the page or swap caches.
377  * @mapping: The address_space to search.
378  * @index: The page cache index.
379  *
380  * This differs from filemap_get_folio() in that it will also look for the
381  * folio in the swap cache.
382  *
383  * Return: The found folio or %NULL.
384  */
385 struct folio *filemap_get_incore_folio(struct address_space *mapping,
386 		pgoff_t index)
387 {
388 	swp_entry_t swp;
389 	struct swap_info_struct *si;
390 	struct folio *folio = filemap_get_entry(mapping, index);
391 
392 	if (!folio)
393 		return ERR_PTR(-ENOENT);
394 	if (!xa_is_value(folio))
395 		return folio;
396 	if (!shmem_mapping(mapping))
397 		return ERR_PTR(-ENOENT);
398 
399 	swp = radix_to_swp_entry(folio);
400 	/* There might be swapin error entries in shmem mapping. */
401 	if (non_swap_entry(swp))
402 		return ERR_PTR(-ENOENT);
403 	/* Prevent swapoff from happening to us */
404 	si = get_swap_device(swp);
405 	if (!si)
406 		return ERR_PTR(-ENOENT);
407 	index = swp_offset(swp);
408 	folio = filemap_get_folio(swap_address_space(swp), index);
409 	put_swap_device(si);
410 	return folio;
411 }
412 
413 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
414 			struct vm_area_struct *vma, unsigned long addr,
415 			bool *new_page_allocated)
416 {
417 	struct swap_info_struct *si;
418 	struct folio *folio;
419 	struct page *page;
420 	void *shadow = NULL;
421 
422 	*new_page_allocated = false;
423 	si = get_swap_device(entry);
424 	if (!si)
425 		return NULL;
426 
427 	for (;;) {
428 		int err;
429 		/*
430 		 * First check the swap cache.  Since this is normally
431 		 * called after swap_cache_get_folio() failed, re-calling
432 		 * that would confuse statistics.
433 		 */
434 		folio = filemap_get_folio(swap_address_space(entry),
435 						swp_offset(entry));
436 		if (!IS_ERR(folio)) {
437 			page = folio_file_page(folio, swp_offset(entry));
438 			goto got_page;
439 		}
440 
441 		/*
442 		 * Just skip read ahead for unused swap slot.
443 		 * During swap_off when swap_slot_cache is disabled,
444 		 * we have to handle the race between putting
445 		 * swap entry in swap cache and marking swap slot
446 		 * as SWAP_HAS_CACHE.  That's done in later part of code or
447 		 * else swap_off will be aborted if we return NULL.
448 		 */
449 		if (!swap_swapcount(si, entry) && swap_slot_cache_enabled)
450 			goto fail_put_swap;
451 
452 		/*
453 		 * Get a new page to read into from swap.  Allocate it now,
454 		 * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will
455 		 * cause any racers to loop around until we add it to cache.
456 		 */
457 		folio = vma_alloc_folio(gfp_mask, 0, vma, addr, false);
458 		if (!folio)
459                         goto fail_put_swap;
460 
461 		/*
462 		 * Swap entry may have been freed since our caller observed it.
463 		 */
464 		err = swapcache_prepare(entry);
465 		if (!err)
466 			break;
467 
468 		folio_put(folio);
469 		if (err != -EEXIST)
470 			goto fail_put_swap;
471 
472 		/*
473 		 * We might race against __delete_from_swap_cache(), and
474 		 * stumble across a swap_map entry whose SWAP_HAS_CACHE
475 		 * has not yet been cleared.  Or race against another
476 		 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE
477 		 * in swap_map, but not yet added its page to swap cache.
478 		 */
479 		schedule_timeout_uninterruptible(1);
480 	}
481 
482 	/*
483 	 * The swap entry is ours to swap in. Prepare the new page.
484 	 */
485 
486 	__folio_set_locked(folio);
487 	__folio_set_swapbacked(folio);
488 
489 	if (mem_cgroup_swapin_charge_folio(folio, NULL, gfp_mask, entry))
490 		goto fail_unlock;
491 
492 	/* May fail (-ENOMEM) if XArray node allocation failed. */
493 	if (add_to_swap_cache(folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow))
494 		goto fail_unlock;
495 
496 	mem_cgroup_swapin_uncharge_swap(entry);
497 
498 	if (shadow)
499 		workingset_refault(folio, shadow);
500 
501 	/* Caller will initiate read into locked folio */
502 	folio_add_lru(folio);
503 	*new_page_allocated = true;
504 	page = &folio->page;
505 got_page:
506 	put_swap_device(si);
507 	return page;
508 
509 fail_unlock:
510 	put_swap_folio(folio, entry);
511 	folio_unlock(folio);
512 	folio_put(folio);
513 fail_put_swap:
514 	put_swap_device(si);
515 	return NULL;
516 }
517 
518 /*
519  * Locate a page of swap in physical memory, reserving swap cache space
520  * and reading the disk if it is not already cached.
521  * A failure return means that either the page allocation failed or that
522  * the swap entry is no longer in use.
523  *
524  * get/put_swap_device() aren't needed to call this function, because
525  * __read_swap_cache_async() call them and swap_readpage() holds the
526  * swap cache folio lock.
527  */
528 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask,
529 				   struct vm_area_struct *vma,
530 				   unsigned long addr, bool do_poll,
531 				   struct swap_iocb **plug)
532 {
533 	bool page_was_allocated;
534 	struct page *retpage = __read_swap_cache_async(entry, gfp_mask,
535 			vma, addr, &page_was_allocated);
536 
537 	if (page_was_allocated)
538 		swap_readpage(retpage, do_poll, plug);
539 
540 	return retpage;
541 }
542 
543 static unsigned int __swapin_nr_pages(unsigned long prev_offset,
544 				      unsigned long offset,
545 				      int hits,
546 				      int max_pages,
547 				      int prev_win)
548 {
549 	unsigned int pages, last_ra;
550 
551 	/*
552 	 * This heuristic has been found to work well on both sequential and
553 	 * random loads, swapping to hard disk or to SSD: please don't ask
554 	 * what the "+ 2" means, it just happens to work well, that's all.
555 	 */
556 	pages = hits + 2;
557 	if (pages == 2) {
558 		/*
559 		 * We can have no readahead hits to judge by: but must not get
560 		 * stuck here forever, so check for an adjacent offset instead
561 		 * (and don't even bother to check whether swap type is same).
562 		 */
563 		if (offset != prev_offset + 1 && offset != prev_offset - 1)
564 			pages = 1;
565 	} else {
566 		unsigned int roundup = 4;
567 		while (roundup < pages)
568 			roundup <<= 1;
569 		pages = roundup;
570 	}
571 
572 	if (pages > max_pages)
573 		pages = max_pages;
574 
575 	/* Don't shrink readahead too fast */
576 	last_ra = prev_win / 2;
577 	if (pages < last_ra)
578 		pages = last_ra;
579 
580 	return pages;
581 }
582 
583 static unsigned long swapin_nr_pages(unsigned long offset)
584 {
585 	static unsigned long prev_offset;
586 	unsigned int hits, pages, max_pages;
587 	static atomic_t last_readahead_pages;
588 
589 	max_pages = 1 << READ_ONCE(page_cluster);
590 	if (max_pages <= 1)
591 		return 1;
592 
593 	hits = atomic_xchg(&swapin_readahead_hits, 0);
594 	pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits,
595 				  max_pages,
596 				  atomic_read(&last_readahead_pages));
597 	if (!hits)
598 		WRITE_ONCE(prev_offset, offset);
599 	atomic_set(&last_readahead_pages, pages);
600 
601 	return pages;
602 }
603 
604 /**
605  * swap_cluster_readahead - swap in pages in hope we need them soon
606  * @entry: swap entry of this memory
607  * @gfp_mask: memory allocation flags
608  * @vmf: fault information
609  *
610  * Returns the struct page for entry and addr, after queueing swapin.
611  *
612  * Primitive swap readahead code. We simply read an aligned block of
613  * (1 << page_cluster) entries in the swap area. This method is chosen
614  * because it doesn't cost us any seek time.  We also make sure to queue
615  * the 'original' request together with the readahead ones...
616  *
617  * This has been extended to use the NUMA policies from the mm triggering
618  * the readahead.
619  *
620  * Caller must hold read mmap_lock if vmf->vma is not NULL.
621  */
622 struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask,
623 				struct vm_fault *vmf)
624 {
625 	struct page *page;
626 	unsigned long entry_offset = swp_offset(entry);
627 	unsigned long offset = entry_offset;
628 	unsigned long start_offset, end_offset;
629 	unsigned long mask;
630 	struct swap_info_struct *si = swp_swap_info(entry);
631 	struct blk_plug plug;
632 	struct swap_iocb *splug = NULL;
633 	bool do_poll = true, page_allocated;
634 	struct vm_area_struct *vma = vmf->vma;
635 	unsigned long addr = vmf->address;
636 
637 	mask = swapin_nr_pages(offset) - 1;
638 	if (!mask)
639 		goto skip;
640 
641 	do_poll = false;
642 	/* Read a page_cluster sized and aligned cluster around offset. */
643 	start_offset = offset & ~mask;
644 	end_offset = offset | mask;
645 	if (!start_offset)	/* First page is swap header. */
646 		start_offset++;
647 	if (end_offset >= si->max)
648 		end_offset = si->max - 1;
649 
650 	blk_start_plug(&plug);
651 	for (offset = start_offset; offset <= end_offset ; offset++) {
652 		/* Ok, do the async read-ahead now */
653 		page = __read_swap_cache_async(
654 			swp_entry(swp_type(entry), offset),
655 			gfp_mask, vma, addr, &page_allocated);
656 		if (!page)
657 			continue;
658 		if (page_allocated) {
659 			swap_readpage(page, false, &splug);
660 			if (offset != entry_offset) {
661 				SetPageReadahead(page);
662 				count_vm_event(SWAP_RA);
663 			}
664 		}
665 		put_page(page);
666 	}
667 	blk_finish_plug(&plug);
668 	swap_read_unplug(splug);
669 
670 	lru_add_drain();	/* Push any new pages onto the LRU now */
671 skip:
672 	/* The page was likely read above, so no need for plugging here */
673 	return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll, NULL);
674 }
675 
676 int init_swap_address_space(unsigned int type, unsigned long nr_pages)
677 {
678 	struct address_space *spaces, *space;
679 	unsigned int i, nr;
680 
681 	nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES);
682 	spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL);
683 	if (!spaces)
684 		return -ENOMEM;
685 	for (i = 0; i < nr; i++) {
686 		space = spaces + i;
687 		xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ);
688 		atomic_set(&space->i_mmap_writable, 0);
689 		space->a_ops = &swap_aops;
690 		/* swap cache doesn't use writeback related tags */
691 		mapping_set_no_writeback_tags(space);
692 	}
693 	nr_swapper_spaces[type] = nr;
694 	swapper_spaces[type] = spaces;
695 
696 	return 0;
697 }
698 
699 void exit_swap_address_space(unsigned int type)
700 {
701 	int i;
702 	struct address_space *spaces = swapper_spaces[type];
703 
704 	for (i = 0; i < nr_swapper_spaces[type]; i++)
705 		VM_WARN_ON_ONCE(!mapping_empty(&spaces[i]));
706 	kvfree(spaces);
707 	nr_swapper_spaces[type] = 0;
708 	swapper_spaces[type] = NULL;
709 }
710 
711 #define SWAP_RA_ORDER_CEILING	5
712 
713 struct vma_swap_readahead {
714 	unsigned short win;
715 	unsigned short offset;
716 	unsigned short nr_pte;
717 };
718 
719 static void swap_ra_info(struct vm_fault *vmf,
720 			 struct vma_swap_readahead *ra_info)
721 {
722 	struct vm_area_struct *vma = vmf->vma;
723 	unsigned long ra_val;
724 	unsigned long faddr, pfn, fpfn, lpfn, rpfn;
725 	unsigned long start, end;
726 	unsigned int max_win, hits, prev_win, win;
727 
728 	max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster),
729 			     SWAP_RA_ORDER_CEILING);
730 	if (max_win == 1) {
731 		ra_info->win = 1;
732 		return;
733 	}
734 
735 	faddr = vmf->address;
736 	fpfn = PFN_DOWN(faddr);
737 	ra_val = GET_SWAP_RA_VAL(vma);
738 	pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val));
739 	prev_win = SWAP_RA_WIN(ra_val);
740 	hits = SWAP_RA_HITS(ra_val);
741 	ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits,
742 					       max_win, prev_win);
743 	atomic_long_set(&vma->swap_readahead_info,
744 			SWAP_RA_VAL(faddr, win, 0));
745 	if (win == 1)
746 		return;
747 
748 	if (fpfn == pfn + 1) {
749 		lpfn = fpfn;
750 		rpfn = fpfn + win;
751 	} else if (pfn == fpfn + 1) {
752 		lpfn = fpfn - win + 1;
753 		rpfn = fpfn + 1;
754 	} else {
755 		unsigned int left = (win - 1) / 2;
756 
757 		lpfn = fpfn - left;
758 		rpfn = fpfn + win - left;
759 	}
760 	start = max3(lpfn, PFN_DOWN(vma->vm_start),
761 		     PFN_DOWN(faddr & PMD_MASK));
762 	end = min3(rpfn, PFN_DOWN(vma->vm_end),
763 		   PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE));
764 
765 	ra_info->nr_pte = end - start;
766 	ra_info->offset = fpfn - start;
767 }
768 
769 /**
770  * swap_vma_readahead - swap in pages in hope we need them soon
771  * @fentry: swap entry of this memory
772  * @gfp_mask: memory allocation flags
773  * @vmf: fault information
774  *
775  * Returns the struct page for entry and addr, after queueing swapin.
776  *
777  * Primitive swap readahead code. We simply read in a few pages whose
778  * virtual addresses are around the fault address in the same vma.
779  *
780  * Caller must hold read mmap_lock if vmf->vma is not NULL.
781  *
782  */
783 static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask,
784 				       struct vm_fault *vmf)
785 {
786 	struct blk_plug plug;
787 	struct swap_iocb *splug = NULL;
788 	struct vm_area_struct *vma = vmf->vma;
789 	struct page *page;
790 	pte_t *pte = NULL, pentry;
791 	unsigned long addr;
792 	swp_entry_t entry;
793 	unsigned int i;
794 	bool page_allocated;
795 	struct vma_swap_readahead ra_info = {
796 		.win = 1,
797 	};
798 
799 	swap_ra_info(vmf, &ra_info);
800 	if (ra_info.win == 1)
801 		goto skip;
802 
803 	addr = vmf->address - (ra_info.offset * PAGE_SIZE);
804 
805 	blk_start_plug(&plug);
806 	for (i = 0; i < ra_info.nr_pte; i++, addr += PAGE_SIZE) {
807 		if (!pte++) {
808 			pte = pte_offset_map(vmf->pmd, addr);
809 			if (!pte)
810 				break;
811 		}
812 		pentry = ptep_get_lockless(pte);
813 		if (!is_swap_pte(pentry))
814 			continue;
815 		entry = pte_to_swp_entry(pentry);
816 		if (unlikely(non_swap_entry(entry)))
817 			continue;
818 		pte_unmap(pte);
819 		pte = NULL;
820 		page = __read_swap_cache_async(entry, gfp_mask, vma,
821 					       addr, &page_allocated);
822 		if (!page)
823 			continue;
824 		if (page_allocated) {
825 			swap_readpage(page, false, &splug);
826 			if (i != ra_info.offset) {
827 				SetPageReadahead(page);
828 				count_vm_event(SWAP_RA);
829 			}
830 		}
831 		put_page(page);
832 	}
833 	if (pte)
834 		pte_unmap(pte);
835 	blk_finish_plug(&plug);
836 	swap_read_unplug(splug);
837 	lru_add_drain();
838 skip:
839 	/* The page was likely read above, so no need for plugging here */
840 	return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address,
841 				     ra_info.win == 1, NULL);
842 }
843 
844 /**
845  * swapin_readahead - swap in pages in hope we need them soon
846  * @entry: swap entry of this memory
847  * @gfp_mask: memory allocation flags
848  * @vmf: fault information
849  *
850  * Returns the struct page for entry and addr, after queueing swapin.
851  *
852  * It's a main entry function for swap readahead. By the configuration,
853  * it will read ahead blocks by cluster-based(ie, physical disk based)
854  * or vma-based(ie, virtual address based on faulty address) readahead.
855  */
856 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask,
857 				struct vm_fault *vmf)
858 {
859 	return swap_use_vma_readahead() ?
860 			swap_vma_readahead(entry, gfp_mask, vmf) :
861 			swap_cluster_readahead(entry, gfp_mask, vmf);
862 }
863 
864 #ifdef CONFIG_SYSFS
865 static ssize_t vma_ra_enabled_show(struct kobject *kobj,
866 				     struct kobj_attribute *attr, char *buf)
867 {
868 	return sysfs_emit(buf, "%s\n",
869 			  enable_vma_readahead ? "true" : "false");
870 }
871 static ssize_t vma_ra_enabled_store(struct kobject *kobj,
872 				      struct kobj_attribute *attr,
873 				      const char *buf, size_t count)
874 {
875 	ssize_t ret;
876 
877 	ret = kstrtobool(buf, &enable_vma_readahead);
878 	if (ret)
879 		return ret;
880 
881 	return count;
882 }
883 static struct kobj_attribute vma_ra_enabled_attr = __ATTR_RW(vma_ra_enabled);
884 
885 static struct attribute *swap_attrs[] = {
886 	&vma_ra_enabled_attr.attr,
887 	NULL,
888 };
889 
890 static const struct attribute_group swap_attr_group = {
891 	.attrs = swap_attrs,
892 };
893 
894 static int __init swap_init_sysfs(void)
895 {
896 	int err;
897 	struct kobject *swap_kobj;
898 
899 	swap_kobj = kobject_create_and_add("swap", mm_kobj);
900 	if (!swap_kobj) {
901 		pr_err("failed to create swap kobject\n");
902 		return -ENOMEM;
903 	}
904 	err = sysfs_create_group(swap_kobj, &swap_attr_group);
905 	if (err) {
906 		pr_err("failed to register swap group\n");
907 		goto delete_obj;
908 	}
909 	return 0;
910 
911 delete_obj:
912 	kobject_put(swap_kobj);
913 	return err;
914 }
915 subsys_initcall(swap_init_sysfs);
916 #endif
917