1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/mm/swap_state.c 4 * 5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 6 * Swap reorganised 29.12.95, Stephen Tweedie 7 * 8 * Rewritten to use page cache, (C) 1998 Stephen Tweedie 9 */ 10 #include <linux/mm.h> 11 #include <linux/gfp.h> 12 #include <linux/kernel_stat.h> 13 #include <linux/mempolicy.h> 14 #include <linux/swap.h> 15 #include <linux/swapops.h> 16 #include <linux/init.h> 17 #include <linux/pagemap.h> 18 #include <linux/pagevec.h> 19 #include <linux/backing-dev.h> 20 #include <linux/blkdev.h> 21 #include <linux/migrate.h> 22 #include <linux/vmalloc.h> 23 #include <linux/swap_slots.h> 24 #include <linux/huge_mm.h> 25 #include <linux/shmem_fs.h> 26 #include "internal.h" 27 #include "swap.h" 28 29 /* 30 * swapper_space is a fiction, retained to simplify the path through 31 * vmscan's shrink_folio_list. 32 */ 33 static const struct address_space_operations swap_aops = { 34 .writepage = swap_writepage, 35 .dirty_folio = noop_dirty_folio, 36 #ifdef CONFIG_MIGRATION 37 .migrate_folio = migrate_folio, 38 #endif 39 }; 40 41 struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly; 42 static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly; 43 static bool enable_vma_readahead __read_mostly = true; 44 45 #define SWAP_RA_ORDER_CEILING 5 46 47 #define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2) 48 #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1) 49 #define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK 50 #define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK) 51 52 #define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK) 53 #define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT) 54 #define SWAP_RA_ADDR(v) ((v) & PAGE_MASK) 55 56 #define SWAP_RA_VAL(addr, win, hits) \ 57 (((addr) & PAGE_MASK) | \ 58 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \ 59 ((hits) & SWAP_RA_HITS_MASK)) 60 61 /* Initial readahead hits is 4 to start up with a small window */ 62 #define GET_SWAP_RA_VAL(vma) \ 63 (atomic_long_read(&(vma)->swap_readahead_info) ? : 4) 64 65 static atomic_t swapin_readahead_hits = ATOMIC_INIT(4); 66 67 void show_swap_cache_info(void) 68 { 69 printk("%lu pages in swap cache\n", total_swapcache_pages()); 70 printk("Free swap = %ldkB\n", K(get_nr_swap_pages())); 71 printk("Total swap = %lukB\n", K(total_swap_pages)); 72 } 73 74 void *get_shadow_from_swap_cache(swp_entry_t entry) 75 { 76 struct address_space *address_space = swap_address_space(entry); 77 pgoff_t idx = swap_cache_index(entry); 78 void *shadow; 79 80 shadow = xa_load(&address_space->i_pages, idx); 81 if (xa_is_value(shadow)) 82 return shadow; 83 return NULL; 84 } 85 86 /* 87 * add_to_swap_cache resembles filemap_add_folio on swapper_space, 88 * but sets SwapCache flag and private instead of mapping and index. 89 */ 90 int add_to_swap_cache(struct folio *folio, swp_entry_t entry, 91 gfp_t gfp, void **shadowp) 92 { 93 struct address_space *address_space = swap_address_space(entry); 94 pgoff_t idx = swap_cache_index(entry); 95 XA_STATE_ORDER(xas, &address_space->i_pages, idx, folio_order(folio)); 96 unsigned long i, nr = folio_nr_pages(folio); 97 void *old; 98 99 xas_set_update(&xas, workingset_update_node); 100 101 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 102 VM_BUG_ON_FOLIO(folio_test_swapcache(folio), folio); 103 VM_BUG_ON_FOLIO(!folio_test_swapbacked(folio), folio); 104 105 folio_ref_add(folio, nr); 106 folio_set_swapcache(folio); 107 folio->swap = entry; 108 109 do { 110 xas_lock_irq(&xas); 111 xas_create_range(&xas); 112 if (xas_error(&xas)) 113 goto unlock; 114 for (i = 0; i < nr; i++) { 115 VM_BUG_ON_FOLIO(xas.xa_index != idx + i, folio); 116 if (shadowp) { 117 old = xas_load(&xas); 118 if (xa_is_value(old)) 119 *shadowp = old; 120 } 121 xas_store(&xas, folio); 122 xas_next(&xas); 123 } 124 address_space->nrpages += nr; 125 __node_stat_mod_folio(folio, NR_FILE_PAGES, nr); 126 __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, nr); 127 unlock: 128 xas_unlock_irq(&xas); 129 } while (xas_nomem(&xas, gfp)); 130 131 if (!xas_error(&xas)) 132 return 0; 133 134 folio_clear_swapcache(folio); 135 folio_ref_sub(folio, nr); 136 return xas_error(&xas); 137 } 138 139 /* 140 * This must be called only on folios that have 141 * been verified to be in the swap cache. 142 */ 143 void __delete_from_swap_cache(struct folio *folio, 144 swp_entry_t entry, void *shadow) 145 { 146 struct address_space *address_space = swap_address_space(entry); 147 int i; 148 long nr = folio_nr_pages(folio); 149 pgoff_t idx = swap_cache_index(entry); 150 XA_STATE(xas, &address_space->i_pages, idx); 151 152 xas_set_update(&xas, workingset_update_node); 153 154 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 155 VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio); 156 VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio); 157 158 for (i = 0; i < nr; i++) { 159 void *entry = xas_store(&xas, shadow); 160 VM_BUG_ON_PAGE(entry != folio, entry); 161 xas_next(&xas); 162 } 163 folio->swap.val = 0; 164 folio_clear_swapcache(folio); 165 address_space->nrpages -= nr; 166 __node_stat_mod_folio(folio, NR_FILE_PAGES, -nr); 167 __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr); 168 } 169 170 /** 171 * add_to_swap - allocate swap space for a folio 172 * @folio: folio we want to move to swap 173 * 174 * Allocate swap space for the folio and add the folio to the 175 * swap cache. 176 * 177 * Context: Caller needs to hold the folio lock. 178 * Return: Whether the folio was added to the swap cache. 179 */ 180 bool add_to_swap(struct folio *folio) 181 { 182 swp_entry_t entry; 183 int err; 184 185 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 186 VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio); 187 188 entry = folio_alloc_swap(folio); 189 if (!entry.val) 190 return false; 191 192 /* 193 * XArray node allocations from PF_MEMALLOC contexts could 194 * completely exhaust the page allocator. __GFP_NOMEMALLOC 195 * stops emergency reserves from being allocated. 196 * 197 * TODO: this could cause a theoretical memory reclaim 198 * deadlock in the swap out path. 199 */ 200 /* 201 * Add it to the swap cache. 202 */ 203 err = add_to_swap_cache(folio, entry, 204 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL); 205 if (err) 206 /* 207 * add_to_swap_cache() doesn't return -EEXIST, so we can safely 208 * clear SWAP_HAS_CACHE flag. 209 */ 210 goto fail; 211 /* 212 * Normally the folio will be dirtied in unmap because its 213 * pte should be dirty. A special case is MADV_FREE page. The 214 * page's pte could have dirty bit cleared but the folio's 215 * SwapBacked flag is still set because clearing the dirty bit 216 * and SwapBacked flag has no lock protected. For such folio, 217 * unmap will not set dirty bit for it, so folio reclaim will 218 * not write the folio out. This can cause data corruption when 219 * the folio is swapped in later. Always setting the dirty flag 220 * for the folio solves the problem. 221 */ 222 folio_mark_dirty(folio); 223 224 return true; 225 226 fail: 227 put_swap_folio(folio, entry); 228 return false; 229 } 230 231 /* 232 * This must be called only on folios that have 233 * been verified to be in the swap cache and locked. 234 * It will never put the folio into the free list, 235 * the caller has a reference on the folio. 236 */ 237 void delete_from_swap_cache(struct folio *folio) 238 { 239 swp_entry_t entry = folio->swap; 240 struct address_space *address_space = swap_address_space(entry); 241 242 xa_lock_irq(&address_space->i_pages); 243 __delete_from_swap_cache(folio, entry, NULL); 244 xa_unlock_irq(&address_space->i_pages); 245 246 put_swap_folio(folio, entry); 247 folio_ref_sub(folio, folio_nr_pages(folio)); 248 } 249 250 void clear_shadow_from_swap_cache(int type, unsigned long begin, 251 unsigned long end) 252 { 253 unsigned long curr = begin; 254 void *old; 255 256 for (;;) { 257 swp_entry_t entry = swp_entry(type, curr); 258 unsigned long index = curr & SWAP_ADDRESS_SPACE_MASK; 259 struct address_space *address_space = swap_address_space(entry); 260 XA_STATE(xas, &address_space->i_pages, index); 261 262 xas_set_update(&xas, workingset_update_node); 263 264 xa_lock_irq(&address_space->i_pages); 265 xas_for_each(&xas, old, min(index + (end - curr), SWAP_ADDRESS_SPACE_PAGES)) { 266 if (!xa_is_value(old)) 267 continue; 268 xas_store(&xas, NULL); 269 } 270 xa_unlock_irq(&address_space->i_pages); 271 272 /* search the next swapcache until we meet end */ 273 curr >>= SWAP_ADDRESS_SPACE_SHIFT; 274 curr++; 275 curr <<= SWAP_ADDRESS_SPACE_SHIFT; 276 if (curr > end) 277 break; 278 } 279 } 280 281 /* 282 * If we are the only user, then try to free up the swap cache. 283 * 284 * Its ok to check the swapcache flag without the folio lock 285 * here because we are going to recheck again inside 286 * folio_free_swap() _with_ the lock. 287 * - Marcelo 288 */ 289 void free_swap_cache(struct folio *folio) 290 { 291 if (folio_test_swapcache(folio) && !folio_mapped(folio) && 292 folio_trylock(folio)) { 293 folio_free_swap(folio); 294 folio_unlock(folio); 295 } 296 } 297 298 /* 299 * Perform a free_page(), also freeing any swap cache associated with 300 * this page if it is the last user of the page. 301 */ 302 void free_page_and_swap_cache(struct page *page) 303 { 304 struct folio *folio = page_folio(page); 305 306 free_swap_cache(folio); 307 if (!is_huge_zero_folio(folio)) 308 folio_put(folio); 309 } 310 311 /* 312 * Passed an array of pages, drop them all from swapcache and then release 313 * them. They are removed from the LRU and freed if this is their last use. 314 */ 315 void free_pages_and_swap_cache(struct encoded_page **pages, int nr) 316 { 317 struct folio_batch folios; 318 unsigned int refs[PAGEVEC_SIZE]; 319 320 folio_batch_init(&folios); 321 for (int i = 0; i < nr; i++) { 322 struct folio *folio = page_folio(encoded_page_ptr(pages[i])); 323 324 free_swap_cache(folio); 325 refs[folios.nr] = 1; 326 if (unlikely(encoded_page_flags(pages[i]) & 327 ENCODED_PAGE_BIT_NR_PAGES_NEXT)) 328 refs[folios.nr] = encoded_nr_pages(pages[++i]); 329 330 if (folio_batch_add(&folios, folio) == 0) 331 folios_put_refs(&folios, refs); 332 } 333 if (folios.nr) 334 folios_put_refs(&folios, refs); 335 } 336 337 static inline bool swap_use_vma_readahead(void) 338 { 339 return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap); 340 } 341 342 /* 343 * Lookup a swap entry in the swap cache. A found folio will be returned 344 * unlocked and with its refcount incremented - we rely on the kernel 345 * lock getting page table operations atomic even if we drop the folio 346 * lock before returning. 347 * 348 * Caller must lock the swap device or hold a reference to keep it valid. 349 */ 350 struct folio *swap_cache_get_folio(swp_entry_t entry, 351 struct vm_area_struct *vma, unsigned long addr) 352 { 353 struct folio *folio; 354 355 folio = filemap_get_folio(swap_address_space(entry), swap_cache_index(entry)); 356 if (!IS_ERR(folio)) { 357 bool vma_ra = swap_use_vma_readahead(); 358 bool readahead; 359 360 /* 361 * At the moment, we don't support PG_readahead for anon THP 362 * so let's bail out rather than confusing the readahead stat. 363 */ 364 if (unlikely(folio_test_large(folio))) 365 return folio; 366 367 readahead = folio_test_clear_readahead(folio); 368 if (vma && vma_ra) { 369 unsigned long ra_val; 370 int win, hits; 371 372 ra_val = GET_SWAP_RA_VAL(vma); 373 win = SWAP_RA_WIN(ra_val); 374 hits = SWAP_RA_HITS(ra_val); 375 if (readahead) 376 hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX); 377 atomic_long_set(&vma->swap_readahead_info, 378 SWAP_RA_VAL(addr, win, hits)); 379 } 380 381 if (readahead) { 382 count_vm_event(SWAP_RA_HIT); 383 if (!vma || !vma_ra) 384 atomic_inc(&swapin_readahead_hits); 385 } 386 } else { 387 folio = NULL; 388 } 389 390 return folio; 391 } 392 393 /** 394 * filemap_get_incore_folio - Find and get a folio from the page or swap caches. 395 * @mapping: The address_space to search. 396 * @index: The page cache index. 397 * 398 * This differs from filemap_get_folio() in that it will also look for the 399 * folio in the swap cache. 400 * 401 * Return: The found folio or %NULL. 402 */ 403 struct folio *filemap_get_incore_folio(struct address_space *mapping, 404 pgoff_t index) 405 { 406 swp_entry_t swp; 407 struct swap_info_struct *si; 408 struct folio *folio = filemap_get_entry(mapping, index); 409 410 if (!folio) 411 return ERR_PTR(-ENOENT); 412 if (!xa_is_value(folio)) 413 return folio; 414 if (!shmem_mapping(mapping)) 415 return ERR_PTR(-ENOENT); 416 417 swp = radix_to_swp_entry(folio); 418 /* There might be swapin error entries in shmem mapping. */ 419 if (non_swap_entry(swp)) 420 return ERR_PTR(-ENOENT); 421 /* Prevent swapoff from happening to us */ 422 si = get_swap_device(swp); 423 if (!si) 424 return ERR_PTR(-ENOENT); 425 index = swap_cache_index(swp); 426 folio = filemap_get_folio(swap_address_space(swp), index); 427 put_swap_device(si); 428 return folio; 429 } 430 431 struct folio *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, 432 struct mempolicy *mpol, pgoff_t ilx, bool *new_page_allocated, 433 bool skip_if_exists) 434 { 435 struct swap_info_struct *si; 436 struct folio *folio; 437 struct folio *new_folio = NULL; 438 struct folio *result = NULL; 439 void *shadow = NULL; 440 441 *new_page_allocated = false; 442 si = get_swap_device(entry); 443 if (!si) 444 return NULL; 445 446 for (;;) { 447 int err; 448 /* 449 * First check the swap cache. Since this is normally 450 * called after swap_cache_get_folio() failed, re-calling 451 * that would confuse statistics. 452 */ 453 folio = filemap_get_folio(swap_address_space(entry), 454 swap_cache_index(entry)); 455 if (!IS_ERR(folio)) 456 goto got_folio; 457 458 /* 459 * Just skip read ahead for unused swap slot. 460 * During swap_off when swap_slot_cache is disabled, 461 * we have to handle the race between putting 462 * swap entry in swap cache and marking swap slot 463 * as SWAP_HAS_CACHE. That's done in later part of code or 464 * else swap_off will be aborted if we return NULL. 465 */ 466 if (!swap_swapcount(si, entry) && swap_slot_cache_enabled) 467 goto put_and_return; 468 469 /* 470 * Get a new folio to read into from swap. Allocate it now if 471 * new_folio not exist, before marking swap_map SWAP_HAS_CACHE, 472 * when -EEXIST will cause any racers to loop around until we 473 * add it to cache. 474 */ 475 if (!new_folio) { 476 new_folio = folio_alloc_mpol(gfp_mask, 0, mpol, ilx, numa_node_id()); 477 if (!new_folio) 478 goto put_and_return; 479 } 480 481 /* 482 * Swap entry may have been freed since our caller observed it. 483 */ 484 err = swapcache_prepare(entry, 1); 485 if (!err) 486 break; 487 else if (err != -EEXIST) 488 goto put_and_return; 489 490 /* 491 * Protect against a recursive call to __read_swap_cache_async() 492 * on the same entry waiting forever here because SWAP_HAS_CACHE 493 * is set but the folio is not the swap cache yet. This can 494 * happen today if mem_cgroup_swapin_charge_folio() below 495 * triggers reclaim through zswap, which may call 496 * __read_swap_cache_async() in the writeback path. 497 */ 498 if (skip_if_exists) 499 goto put_and_return; 500 501 /* 502 * We might race against __delete_from_swap_cache(), and 503 * stumble across a swap_map entry whose SWAP_HAS_CACHE 504 * has not yet been cleared. Or race against another 505 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE 506 * in swap_map, but not yet added its folio to swap cache. 507 */ 508 schedule_timeout_uninterruptible(1); 509 } 510 511 /* 512 * The swap entry is ours to swap in. Prepare the new folio. 513 */ 514 __folio_set_locked(new_folio); 515 __folio_set_swapbacked(new_folio); 516 517 if (mem_cgroup_swapin_charge_folio(new_folio, NULL, gfp_mask, entry)) 518 goto fail_unlock; 519 520 /* May fail (-ENOMEM) if XArray node allocation failed. */ 521 if (add_to_swap_cache(new_folio, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) 522 goto fail_unlock; 523 524 mem_cgroup_swapin_uncharge_swap(entry, 1); 525 526 if (shadow) 527 workingset_refault(new_folio, shadow); 528 529 /* Caller will initiate read into locked new_folio */ 530 folio_add_lru(new_folio); 531 *new_page_allocated = true; 532 folio = new_folio; 533 got_folio: 534 result = folio; 535 goto put_and_return; 536 537 fail_unlock: 538 put_swap_folio(new_folio, entry); 539 folio_unlock(new_folio); 540 put_and_return: 541 put_swap_device(si); 542 if (!(*new_page_allocated) && new_folio) 543 folio_put(new_folio); 544 return result; 545 } 546 547 /* 548 * Locate a page of swap in physical memory, reserving swap cache space 549 * and reading the disk if it is not already cached. 550 * A failure return means that either the page allocation failed or that 551 * the swap entry is no longer in use. 552 * 553 * get/put_swap_device() aren't needed to call this function, because 554 * __read_swap_cache_async() call them and swap_read_folio() holds the 555 * swap cache folio lock. 556 */ 557 struct folio *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, 558 struct vm_area_struct *vma, unsigned long addr, 559 struct swap_iocb **plug) 560 { 561 bool page_allocated; 562 struct mempolicy *mpol; 563 pgoff_t ilx; 564 struct folio *folio; 565 566 mpol = get_vma_policy(vma, addr, 0, &ilx); 567 folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx, 568 &page_allocated, false); 569 mpol_cond_put(mpol); 570 571 if (page_allocated) 572 swap_read_folio(folio, plug); 573 return folio; 574 } 575 576 static unsigned int __swapin_nr_pages(unsigned long prev_offset, 577 unsigned long offset, 578 int hits, 579 int max_pages, 580 int prev_win) 581 { 582 unsigned int pages, last_ra; 583 584 /* 585 * This heuristic has been found to work well on both sequential and 586 * random loads, swapping to hard disk or to SSD: please don't ask 587 * what the "+ 2" means, it just happens to work well, that's all. 588 */ 589 pages = hits + 2; 590 if (pages == 2) { 591 /* 592 * We can have no readahead hits to judge by: but must not get 593 * stuck here forever, so check for an adjacent offset instead 594 * (and don't even bother to check whether swap type is same). 595 */ 596 if (offset != prev_offset + 1 && offset != prev_offset - 1) 597 pages = 1; 598 } else { 599 unsigned int roundup = 4; 600 while (roundup < pages) 601 roundup <<= 1; 602 pages = roundup; 603 } 604 605 if (pages > max_pages) 606 pages = max_pages; 607 608 /* Don't shrink readahead too fast */ 609 last_ra = prev_win / 2; 610 if (pages < last_ra) 611 pages = last_ra; 612 613 return pages; 614 } 615 616 static unsigned long swapin_nr_pages(unsigned long offset) 617 { 618 static unsigned long prev_offset; 619 unsigned int hits, pages, max_pages; 620 static atomic_t last_readahead_pages; 621 622 max_pages = 1 << READ_ONCE(page_cluster); 623 if (max_pages <= 1) 624 return 1; 625 626 hits = atomic_xchg(&swapin_readahead_hits, 0); 627 pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits, 628 max_pages, 629 atomic_read(&last_readahead_pages)); 630 if (!hits) 631 WRITE_ONCE(prev_offset, offset); 632 atomic_set(&last_readahead_pages, pages); 633 634 return pages; 635 } 636 637 /** 638 * swap_cluster_readahead - swap in pages in hope we need them soon 639 * @entry: swap entry of this memory 640 * @gfp_mask: memory allocation flags 641 * @mpol: NUMA memory allocation policy to be applied 642 * @ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE 643 * 644 * Returns the struct folio for entry and addr, after queueing swapin. 645 * 646 * Primitive swap readahead code. We simply read an aligned block of 647 * (1 << page_cluster) entries in the swap area. This method is chosen 648 * because it doesn't cost us any seek time. We also make sure to queue 649 * the 'original' request together with the readahead ones... 650 * 651 * Note: it is intentional that the same NUMA policy and interleave index 652 * are used for every page of the readahead: neighbouring pages on swap 653 * are fairly likely to have been swapped out from the same node. 654 */ 655 struct folio *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, 656 struct mempolicy *mpol, pgoff_t ilx) 657 { 658 struct folio *folio; 659 unsigned long entry_offset = swp_offset(entry); 660 unsigned long offset = entry_offset; 661 unsigned long start_offset, end_offset; 662 unsigned long mask; 663 struct swap_info_struct *si = swp_swap_info(entry); 664 struct blk_plug plug; 665 struct swap_iocb *splug = NULL; 666 bool page_allocated; 667 668 mask = swapin_nr_pages(offset) - 1; 669 if (!mask) 670 goto skip; 671 672 /* Read a page_cluster sized and aligned cluster around offset. */ 673 start_offset = offset & ~mask; 674 end_offset = offset | mask; 675 if (!start_offset) /* First page is swap header. */ 676 start_offset++; 677 if (end_offset >= si->max) 678 end_offset = si->max - 1; 679 680 blk_start_plug(&plug); 681 for (offset = start_offset; offset <= end_offset ; offset++) { 682 /* Ok, do the async read-ahead now */ 683 folio = __read_swap_cache_async( 684 swp_entry(swp_type(entry), offset), 685 gfp_mask, mpol, ilx, &page_allocated, false); 686 if (!folio) 687 continue; 688 if (page_allocated) { 689 swap_read_folio(folio, &splug); 690 if (offset != entry_offset) { 691 folio_set_readahead(folio); 692 count_vm_event(SWAP_RA); 693 } 694 } 695 folio_put(folio); 696 } 697 blk_finish_plug(&plug); 698 swap_read_unplug(splug); 699 lru_add_drain(); /* Push any new pages onto the LRU now */ 700 skip: 701 /* The page was likely read above, so no need for plugging here */ 702 folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx, 703 &page_allocated, false); 704 if (unlikely(page_allocated)) 705 swap_read_folio(folio, NULL); 706 return folio; 707 } 708 709 int init_swap_address_space(unsigned int type, unsigned long nr_pages) 710 { 711 struct address_space *spaces, *space; 712 unsigned int i, nr; 713 714 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES); 715 spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL); 716 if (!spaces) 717 return -ENOMEM; 718 for (i = 0; i < nr; i++) { 719 space = spaces + i; 720 xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ); 721 atomic_set(&space->i_mmap_writable, 0); 722 space->a_ops = &swap_aops; 723 /* swap cache doesn't use writeback related tags */ 724 mapping_set_no_writeback_tags(space); 725 } 726 nr_swapper_spaces[type] = nr; 727 swapper_spaces[type] = spaces; 728 729 return 0; 730 } 731 732 void exit_swap_address_space(unsigned int type) 733 { 734 int i; 735 struct address_space *spaces = swapper_spaces[type]; 736 737 for (i = 0; i < nr_swapper_spaces[type]; i++) 738 VM_WARN_ON_ONCE(!mapping_empty(&spaces[i])); 739 kvfree(spaces); 740 nr_swapper_spaces[type] = 0; 741 swapper_spaces[type] = NULL; 742 } 743 744 static int swap_vma_ra_win(struct vm_fault *vmf, unsigned long *start, 745 unsigned long *end) 746 { 747 struct vm_area_struct *vma = vmf->vma; 748 unsigned long ra_val; 749 unsigned long faddr, prev_faddr, left, right; 750 unsigned int max_win, hits, prev_win, win; 751 752 max_win = 1 << min(READ_ONCE(page_cluster), SWAP_RA_ORDER_CEILING); 753 if (max_win == 1) 754 return 1; 755 756 faddr = vmf->address; 757 ra_val = GET_SWAP_RA_VAL(vma); 758 prev_faddr = SWAP_RA_ADDR(ra_val); 759 prev_win = SWAP_RA_WIN(ra_val); 760 hits = SWAP_RA_HITS(ra_val); 761 win = __swapin_nr_pages(PFN_DOWN(prev_faddr), PFN_DOWN(faddr), hits, 762 max_win, prev_win); 763 atomic_long_set(&vma->swap_readahead_info, SWAP_RA_VAL(faddr, win, 0)); 764 if (win == 1) 765 return 1; 766 767 if (faddr == prev_faddr + PAGE_SIZE) 768 left = faddr; 769 else if (prev_faddr == faddr + PAGE_SIZE) 770 left = faddr - (win << PAGE_SHIFT) + PAGE_SIZE; 771 else 772 left = faddr - (((win - 1) / 2) << PAGE_SHIFT); 773 right = left + (win << PAGE_SHIFT); 774 if ((long)left < 0) 775 left = 0; 776 *start = max3(left, vma->vm_start, faddr & PMD_MASK); 777 *end = min3(right, vma->vm_end, (faddr & PMD_MASK) + PMD_SIZE); 778 779 return win; 780 } 781 782 /** 783 * swap_vma_readahead - swap in pages in hope we need them soon 784 * @targ_entry: swap entry of the targeted memory 785 * @gfp_mask: memory allocation flags 786 * @mpol: NUMA memory allocation policy to be applied 787 * @targ_ilx: NUMA interleave index, for use only when MPOL_INTERLEAVE 788 * @vmf: fault information 789 * 790 * Returns the struct folio for entry and addr, after queueing swapin. 791 * 792 * Primitive swap readahead code. We simply read in a few pages whose 793 * virtual addresses are around the fault address in the same vma. 794 * 795 * Caller must hold read mmap_lock if vmf->vma is not NULL. 796 * 797 */ 798 static struct folio *swap_vma_readahead(swp_entry_t targ_entry, gfp_t gfp_mask, 799 struct mempolicy *mpol, pgoff_t targ_ilx, struct vm_fault *vmf) 800 { 801 struct blk_plug plug; 802 struct swap_iocb *splug = NULL; 803 struct folio *folio; 804 pte_t *pte = NULL, pentry; 805 int win; 806 unsigned long start, end, addr; 807 swp_entry_t entry; 808 pgoff_t ilx; 809 bool page_allocated; 810 811 win = swap_vma_ra_win(vmf, &start, &end); 812 if (win == 1) 813 goto skip; 814 815 ilx = targ_ilx - PFN_DOWN(vmf->address - start); 816 817 blk_start_plug(&plug); 818 for (addr = start; addr < end; ilx++, addr += PAGE_SIZE) { 819 if (!pte++) { 820 pte = pte_offset_map(vmf->pmd, addr); 821 if (!pte) 822 break; 823 } 824 pentry = ptep_get_lockless(pte); 825 if (!is_swap_pte(pentry)) 826 continue; 827 entry = pte_to_swp_entry(pentry); 828 if (unlikely(non_swap_entry(entry))) 829 continue; 830 pte_unmap(pte); 831 pte = NULL; 832 folio = __read_swap_cache_async(entry, gfp_mask, mpol, ilx, 833 &page_allocated, false); 834 if (!folio) 835 continue; 836 if (page_allocated) { 837 swap_read_folio(folio, &splug); 838 if (addr != vmf->address) { 839 folio_set_readahead(folio); 840 count_vm_event(SWAP_RA); 841 } 842 } 843 folio_put(folio); 844 } 845 if (pte) 846 pte_unmap(pte); 847 blk_finish_plug(&plug); 848 swap_read_unplug(splug); 849 lru_add_drain(); 850 skip: 851 /* The folio was likely read above, so no need for plugging here */ 852 folio = __read_swap_cache_async(targ_entry, gfp_mask, mpol, targ_ilx, 853 &page_allocated, false); 854 if (unlikely(page_allocated)) 855 swap_read_folio(folio, NULL); 856 return folio; 857 } 858 859 /** 860 * swapin_readahead - swap in pages in hope we need them soon 861 * @entry: swap entry of this memory 862 * @gfp_mask: memory allocation flags 863 * @vmf: fault information 864 * 865 * Returns the struct folio for entry and addr, after queueing swapin. 866 * 867 * It's a main entry function for swap readahead. By the configuration, 868 * it will read ahead blocks by cluster-based(ie, physical disk based) 869 * or vma-based(ie, virtual address based on faulty address) readahead. 870 */ 871 struct folio *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, 872 struct vm_fault *vmf) 873 { 874 struct mempolicy *mpol; 875 pgoff_t ilx; 876 struct folio *folio; 877 878 mpol = get_vma_policy(vmf->vma, vmf->address, 0, &ilx); 879 folio = swap_use_vma_readahead() ? 880 swap_vma_readahead(entry, gfp_mask, mpol, ilx, vmf) : 881 swap_cluster_readahead(entry, gfp_mask, mpol, ilx); 882 mpol_cond_put(mpol); 883 884 return folio; 885 } 886 887 #ifdef CONFIG_SYSFS 888 static ssize_t vma_ra_enabled_show(struct kobject *kobj, 889 struct kobj_attribute *attr, char *buf) 890 { 891 return sysfs_emit(buf, "%s\n", str_true_false(enable_vma_readahead)); 892 } 893 static ssize_t vma_ra_enabled_store(struct kobject *kobj, 894 struct kobj_attribute *attr, 895 const char *buf, size_t count) 896 { 897 ssize_t ret; 898 899 ret = kstrtobool(buf, &enable_vma_readahead); 900 if (ret) 901 return ret; 902 903 return count; 904 } 905 static struct kobj_attribute vma_ra_enabled_attr = __ATTR_RW(vma_ra_enabled); 906 907 static struct attribute *swap_attrs[] = { 908 &vma_ra_enabled_attr.attr, 909 NULL, 910 }; 911 912 static const struct attribute_group swap_attr_group = { 913 .attrs = swap_attrs, 914 }; 915 916 static int __init swap_init_sysfs(void) 917 { 918 int err; 919 struct kobject *swap_kobj; 920 921 swap_kobj = kobject_create_and_add("swap", mm_kobj); 922 if (!swap_kobj) { 923 pr_err("failed to create swap kobject\n"); 924 return -ENOMEM; 925 } 926 err = sysfs_create_group(swap_kobj, &swap_attr_group); 927 if (err) { 928 pr_err("failed to register swap group\n"); 929 goto delete_obj; 930 } 931 return 0; 932 933 delete_obj: 934 kobject_put(swap_kobj); 935 return err; 936 } 937 subsys_initcall(swap_init_sysfs); 938 #endif 939