1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/mm/swap_state.c 4 * 5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 6 * Swap reorganised 29.12.95, Stephen Tweedie 7 * 8 * Rewritten to use page cache, (C) 1998 Stephen Tweedie 9 */ 10 #include <linux/mm.h> 11 #include <linux/gfp.h> 12 #include <linux/kernel_stat.h> 13 #include <linux/swap.h> 14 #include <linux/swapops.h> 15 #include <linux/init.h> 16 #include <linux/pagemap.h> 17 #include <linux/backing-dev.h> 18 #include <linux/blkdev.h> 19 #include <linux/pagevec.h> 20 #include <linux/migrate.h> 21 #include <linux/vmalloc.h> 22 #include <linux/swap_slots.h> 23 #include <linux/huge_mm.h> 24 #include <linux/shmem_fs.h> 25 #include "internal.h" 26 #include "swap.h" 27 28 /* 29 * swapper_space is a fiction, retained to simplify the path through 30 * vmscan's shrink_page_list. 31 */ 32 static const struct address_space_operations swap_aops = { 33 .writepage = swap_writepage, 34 .dirty_folio = noop_dirty_folio, 35 #ifdef CONFIG_MIGRATION 36 .migrate_folio = migrate_folio, 37 #endif 38 }; 39 40 struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly; 41 static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly; 42 static bool enable_vma_readahead __read_mostly = true; 43 44 #define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2) 45 #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1) 46 #define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK 47 #define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK) 48 49 #define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK) 50 #define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT) 51 #define SWAP_RA_ADDR(v) ((v) & PAGE_MASK) 52 53 #define SWAP_RA_VAL(addr, win, hits) \ 54 (((addr) & PAGE_MASK) | \ 55 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \ 56 ((hits) & SWAP_RA_HITS_MASK)) 57 58 /* Initial readahead hits is 4 to start up with a small window */ 59 #define GET_SWAP_RA_VAL(vma) \ 60 (atomic_long_read(&(vma)->swap_readahead_info) ? : 4) 61 62 static atomic_t swapin_readahead_hits = ATOMIC_INIT(4); 63 64 void show_swap_cache_info(void) 65 { 66 printk("%lu pages in swap cache\n", total_swapcache_pages()); 67 printk("Free swap = %ldkB\n", 68 get_nr_swap_pages() << (PAGE_SHIFT - 10)); 69 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10)); 70 } 71 72 void *get_shadow_from_swap_cache(swp_entry_t entry) 73 { 74 struct address_space *address_space = swap_address_space(entry); 75 pgoff_t idx = swp_offset(entry); 76 struct page *page; 77 78 page = xa_load(&address_space->i_pages, idx); 79 if (xa_is_value(page)) 80 return page; 81 return NULL; 82 } 83 84 /* 85 * add_to_swap_cache resembles filemap_add_folio on swapper_space, 86 * but sets SwapCache flag and private instead of mapping and index. 87 */ 88 int add_to_swap_cache(struct page *page, swp_entry_t entry, 89 gfp_t gfp, void **shadowp) 90 { 91 struct address_space *address_space = swap_address_space(entry); 92 pgoff_t idx = swp_offset(entry); 93 XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page)); 94 unsigned long i, nr = thp_nr_pages(page); 95 void *old; 96 97 VM_BUG_ON_PAGE(!PageLocked(page), page); 98 VM_BUG_ON_PAGE(PageSwapCache(page), page); 99 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 100 101 page_ref_add(page, nr); 102 SetPageSwapCache(page); 103 104 do { 105 xas_lock_irq(&xas); 106 xas_create_range(&xas); 107 if (xas_error(&xas)) 108 goto unlock; 109 for (i = 0; i < nr; i++) { 110 VM_BUG_ON_PAGE(xas.xa_index != idx + i, page); 111 old = xas_load(&xas); 112 if (xa_is_value(old)) { 113 if (shadowp) 114 *shadowp = old; 115 } 116 set_page_private(page + i, entry.val + i); 117 xas_store(&xas, page); 118 xas_next(&xas); 119 } 120 address_space->nrpages += nr; 121 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); 122 __mod_lruvec_page_state(page, NR_SWAPCACHE, nr); 123 unlock: 124 xas_unlock_irq(&xas); 125 } while (xas_nomem(&xas, gfp)); 126 127 if (!xas_error(&xas)) 128 return 0; 129 130 ClearPageSwapCache(page); 131 page_ref_sub(page, nr); 132 return xas_error(&xas); 133 } 134 135 /* 136 * This must be called only on folios that have 137 * been verified to be in the swap cache. 138 */ 139 void __delete_from_swap_cache(struct folio *folio, 140 swp_entry_t entry, void *shadow) 141 { 142 struct address_space *address_space = swap_address_space(entry); 143 int i; 144 long nr = folio_nr_pages(folio); 145 pgoff_t idx = swp_offset(entry); 146 XA_STATE(xas, &address_space->i_pages, idx); 147 148 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 149 VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio); 150 VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio); 151 152 for (i = 0; i < nr; i++) { 153 void *entry = xas_store(&xas, shadow); 154 VM_BUG_ON_FOLIO(entry != folio, folio); 155 set_page_private(folio_page(folio, i), 0); 156 xas_next(&xas); 157 } 158 folio_clear_swapcache(folio); 159 address_space->nrpages -= nr; 160 __node_stat_mod_folio(folio, NR_FILE_PAGES, -nr); 161 __lruvec_stat_mod_folio(folio, NR_SWAPCACHE, -nr); 162 } 163 164 /** 165 * add_to_swap - allocate swap space for a folio 166 * @folio: folio we want to move to swap 167 * 168 * Allocate swap space for the folio and add the folio to the 169 * swap cache. 170 * 171 * Context: Caller needs to hold the folio lock. 172 * Return: Whether the folio was added to the swap cache. 173 */ 174 bool add_to_swap(struct folio *folio) 175 { 176 swp_entry_t entry; 177 int err; 178 179 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 180 VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio); 181 182 entry = folio_alloc_swap(folio); 183 if (!entry.val) 184 return false; 185 186 /* 187 * XArray node allocations from PF_MEMALLOC contexts could 188 * completely exhaust the page allocator. __GFP_NOMEMALLOC 189 * stops emergency reserves from being allocated. 190 * 191 * TODO: this could cause a theoretical memory reclaim 192 * deadlock in the swap out path. 193 */ 194 /* 195 * Add it to the swap cache. 196 */ 197 err = add_to_swap_cache(&folio->page, entry, 198 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL); 199 if (err) 200 /* 201 * add_to_swap_cache() doesn't return -EEXIST, so we can safely 202 * clear SWAP_HAS_CACHE flag. 203 */ 204 goto fail; 205 /* 206 * Normally the folio will be dirtied in unmap because its 207 * pte should be dirty. A special case is MADV_FREE page. The 208 * page's pte could have dirty bit cleared but the folio's 209 * SwapBacked flag is still set because clearing the dirty bit 210 * and SwapBacked flag has no lock protected. For such folio, 211 * unmap will not set dirty bit for it, so folio reclaim will 212 * not write the folio out. This can cause data corruption when 213 * the folio is swapped in later. Always setting the dirty flag 214 * for the folio solves the problem. 215 */ 216 folio_mark_dirty(folio); 217 218 return true; 219 220 fail: 221 put_swap_page(&folio->page, entry); 222 return false; 223 } 224 225 /* 226 * This must be called only on folios that have 227 * been verified to be in the swap cache and locked. 228 * It will never put the folio into the free list, 229 * the caller has a reference on the folio. 230 */ 231 void delete_from_swap_cache(struct folio *folio) 232 { 233 swp_entry_t entry = folio_swap_entry(folio); 234 struct address_space *address_space = swap_address_space(entry); 235 236 xa_lock_irq(&address_space->i_pages); 237 __delete_from_swap_cache(folio, entry, NULL); 238 xa_unlock_irq(&address_space->i_pages); 239 240 put_swap_page(&folio->page, entry); 241 folio_ref_sub(folio, folio_nr_pages(folio)); 242 } 243 244 void clear_shadow_from_swap_cache(int type, unsigned long begin, 245 unsigned long end) 246 { 247 unsigned long curr = begin; 248 void *old; 249 250 for (;;) { 251 swp_entry_t entry = swp_entry(type, curr); 252 struct address_space *address_space = swap_address_space(entry); 253 XA_STATE(xas, &address_space->i_pages, curr); 254 255 xa_lock_irq(&address_space->i_pages); 256 xas_for_each(&xas, old, end) { 257 if (!xa_is_value(old)) 258 continue; 259 xas_store(&xas, NULL); 260 } 261 xa_unlock_irq(&address_space->i_pages); 262 263 /* search the next swapcache until we meet end */ 264 curr >>= SWAP_ADDRESS_SPACE_SHIFT; 265 curr++; 266 curr <<= SWAP_ADDRESS_SPACE_SHIFT; 267 if (curr > end) 268 break; 269 } 270 } 271 272 /* 273 * If we are the only user, then try to free up the swap cache. 274 * 275 * Its ok to check for PageSwapCache without the page lock 276 * here because we are going to recheck again inside 277 * try_to_free_swap() _with_ the lock. 278 * - Marcelo 279 */ 280 void free_swap_cache(struct page *page) 281 { 282 if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) { 283 try_to_free_swap(page); 284 unlock_page(page); 285 } 286 } 287 288 /* 289 * Perform a free_page(), also freeing any swap cache associated with 290 * this page if it is the last user of the page. 291 */ 292 void free_page_and_swap_cache(struct page *page) 293 { 294 free_swap_cache(page); 295 if (!is_huge_zero_page(page)) 296 put_page(page); 297 } 298 299 /* 300 * Passed an array of pages, drop them all from swapcache and then release 301 * them. They are removed from the LRU and freed if this is their last use. 302 */ 303 void free_pages_and_swap_cache(struct page **pages, int nr) 304 { 305 struct page **pagep = pages; 306 int i; 307 308 lru_add_drain(); 309 for (i = 0; i < nr; i++) 310 free_swap_cache(pagep[i]); 311 release_pages(pagep, nr); 312 } 313 314 static inline bool swap_use_vma_readahead(void) 315 { 316 return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap); 317 } 318 319 /* 320 * Lookup a swap entry in the swap cache. A found page will be returned 321 * unlocked and with its refcount incremented - we rely on the kernel 322 * lock getting page table operations atomic even if we drop the page 323 * lock before returning. 324 */ 325 struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma, 326 unsigned long addr) 327 { 328 struct page *page; 329 struct swap_info_struct *si; 330 331 si = get_swap_device(entry); 332 if (!si) 333 return NULL; 334 page = find_get_page(swap_address_space(entry), swp_offset(entry)); 335 put_swap_device(si); 336 337 if (page) { 338 bool vma_ra = swap_use_vma_readahead(); 339 bool readahead; 340 341 /* 342 * At the moment, we don't support PG_readahead for anon THP 343 * so let's bail out rather than confusing the readahead stat. 344 */ 345 if (unlikely(PageTransCompound(page))) 346 return page; 347 348 readahead = TestClearPageReadahead(page); 349 if (vma && vma_ra) { 350 unsigned long ra_val; 351 int win, hits; 352 353 ra_val = GET_SWAP_RA_VAL(vma); 354 win = SWAP_RA_WIN(ra_val); 355 hits = SWAP_RA_HITS(ra_val); 356 if (readahead) 357 hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX); 358 atomic_long_set(&vma->swap_readahead_info, 359 SWAP_RA_VAL(addr, win, hits)); 360 } 361 362 if (readahead) { 363 count_vm_event(SWAP_RA_HIT); 364 if (!vma || !vma_ra) 365 atomic_inc(&swapin_readahead_hits); 366 } 367 } 368 369 return page; 370 } 371 372 /** 373 * find_get_incore_page - Find and get a page from the page or swap caches. 374 * @mapping: The address_space to search. 375 * @index: The page cache index. 376 * 377 * This differs from find_get_page() in that it will also look for the 378 * page in the swap cache. 379 * 380 * Return: The found page or %NULL. 381 */ 382 struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index) 383 { 384 swp_entry_t swp; 385 struct swap_info_struct *si; 386 struct page *page = pagecache_get_page(mapping, index, 387 FGP_ENTRY | FGP_HEAD, 0); 388 389 if (!page) 390 return page; 391 if (!xa_is_value(page)) 392 return find_subpage(page, index); 393 if (!shmem_mapping(mapping)) 394 return NULL; 395 396 swp = radix_to_swp_entry(page); 397 /* There might be swapin error entries in shmem mapping. */ 398 if (non_swap_entry(swp)) 399 return NULL; 400 /* Prevent swapoff from happening to us */ 401 si = get_swap_device(swp); 402 if (!si) 403 return NULL; 404 page = find_get_page(swap_address_space(swp), swp_offset(swp)); 405 put_swap_device(si); 406 return page; 407 } 408 409 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, 410 struct vm_area_struct *vma, unsigned long addr, 411 bool *new_page_allocated) 412 { 413 struct swap_info_struct *si; 414 struct page *page; 415 void *shadow = NULL; 416 417 *new_page_allocated = false; 418 419 for (;;) { 420 int err; 421 /* 422 * First check the swap cache. Since this is normally 423 * called after lookup_swap_cache() failed, re-calling 424 * that would confuse statistics. 425 */ 426 si = get_swap_device(entry); 427 if (!si) 428 return NULL; 429 page = find_get_page(swap_address_space(entry), 430 swp_offset(entry)); 431 put_swap_device(si); 432 if (page) 433 return page; 434 435 /* 436 * Just skip read ahead for unused swap slot. 437 * During swap_off when swap_slot_cache is disabled, 438 * we have to handle the race between putting 439 * swap entry in swap cache and marking swap slot 440 * as SWAP_HAS_CACHE. That's done in later part of code or 441 * else swap_off will be aborted if we return NULL. 442 */ 443 if (!__swp_swapcount(entry) && swap_slot_cache_enabled) 444 return NULL; 445 446 /* 447 * Get a new page to read into from swap. Allocate it now, 448 * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will 449 * cause any racers to loop around until we add it to cache. 450 */ 451 page = alloc_page_vma(gfp_mask, vma, addr); 452 if (!page) 453 return NULL; 454 455 /* 456 * Swap entry may have been freed since our caller observed it. 457 */ 458 err = swapcache_prepare(entry); 459 if (!err) 460 break; 461 462 put_page(page); 463 if (err != -EEXIST) 464 return NULL; 465 466 /* 467 * We might race against __delete_from_swap_cache(), and 468 * stumble across a swap_map entry whose SWAP_HAS_CACHE 469 * has not yet been cleared. Or race against another 470 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE 471 * in swap_map, but not yet added its page to swap cache. 472 */ 473 schedule_timeout_uninterruptible(1); 474 } 475 476 /* 477 * The swap entry is ours to swap in. Prepare the new page. 478 */ 479 480 __SetPageLocked(page); 481 __SetPageSwapBacked(page); 482 483 if (mem_cgroup_swapin_charge_page(page, NULL, gfp_mask, entry)) 484 goto fail_unlock; 485 486 /* May fail (-ENOMEM) if XArray node allocation failed. */ 487 if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) 488 goto fail_unlock; 489 490 mem_cgroup_swapin_uncharge_swap(entry); 491 492 if (shadow) 493 workingset_refault(page_folio(page), shadow); 494 495 /* Caller will initiate read into locked page */ 496 lru_cache_add(page); 497 *new_page_allocated = true; 498 return page; 499 500 fail_unlock: 501 put_swap_page(page, entry); 502 unlock_page(page); 503 put_page(page); 504 return NULL; 505 } 506 507 /* 508 * Locate a page of swap in physical memory, reserving swap cache space 509 * and reading the disk if it is not already cached. 510 * A failure return means that either the page allocation failed or that 511 * the swap entry is no longer in use. 512 */ 513 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, 514 struct vm_area_struct *vma, 515 unsigned long addr, bool do_poll, 516 struct swap_iocb **plug) 517 { 518 bool page_was_allocated; 519 struct page *retpage = __read_swap_cache_async(entry, gfp_mask, 520 vma, addr, &page_was_allocated); 521 522 if (page_was_allocated) 523 swap_readpage(retpage, do_poll, plug); 524 525 return retpage; 526 } 527 528 static unsigned int __swapin_nr_pages(unsigned long prev_offset, 529 unsigned long offset, 530 int hits, 531 int max_pages, 532 int prev_win) 533 { 534 unsigned int pages, last_ra; 535 536 /* 537 * This heuristic has been found to work well on both sequential and 538 * random loads, swapping to hard disk or to SSD: please don't ask 539 * what the "+ 2" means, it just happens to work well, that's all. 540 */ 541 pages = hits + 2; 542 if (pages == 2) { 543 /* 544 * We can have no readahead hits to judge by: but must not get 545 * stuck here forever, so check for an adjacent offset instead 546 * (and don't even bother to check whether swap type is same). 547 */ 548 if (offset != prev_offset + 1 && offset != prev_offset - 1) 549 pages = 1; 550 } else { 551 unsigned int roundup = 4; 552 while (roundup < pages) 553 roundup <<= 1; 554 pages = roundup; 555 } 556 557 if (pages > max_pages) 558 pages = max_pages; 559 560 /* Don't shrink readahead too fast */ 561 last_ra = prev_win / 2; 562 if (pages < last_ra) 563 pages = last_ra; 564 565 return pages; 566 } 567 568 static unsigned long swapin_nr_pages(unsigned long offset) 569 { 570 static unsigned long prev_offset; 571 unsigned int hits, pages, max_pages; 572 static atomic_t last_readahead_pages; 573 574 max_pages = 1 << READ_ONCE(page_cluster); 575 if (max_pages <= 1) 576 return 1; 577 578 hits = atomic_xchg(&swapin_readahead_hits, 0); 579 pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits, 580 max_pages, 581 atomic_read(&last_readahead_pages)); 582 if (!hits) 583 WRITE_ONCE(prev_offset, offset); 584 atomic_set(&last_readahead_pages, pages); 585 586 return pages; 587 } 588 589 /** 590 * swap_cluster_readahead - swap in pages in hope we need them soon 591 * @entry: swap entry of this memory 592 * @gfp_mask: memory allocation flags 593 * @vmf: fault information 594 * 595 * Returns the struct page for entry and addr, after queueing swapin. 596 * 597 * Primitive swap readahead code. We simply read an aligned block of 598 * (1 << page_cluster) entries in the swap area. This method is chosen 599 * because it doesn't cost us any seek time. We also make sure to queue 600 * the 'original' request together with the readahead ones... 601 * 602 * This has been extended to use the NUMA policies from the mm triggering 603 * the readahead. 604 * 605 * Caller must hold read mmap_lock if vmf->vma is not NULL. 606 */ 607 struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, 608 struct vm_fault *vmf) 609 { 610 struct page *page; 611 unsigned long entry_offset = swp_offset(entry); 612 unsigned long offset = entry_offset; 613 unsigned long start_offset, end_offset; 614 unsigned long mask; 615 struct swap_info_struct *si = swp_swap_info(entry); 616 struct blk_plug plug; 617 struct swap_iocb *splug = NULL; 618 bool do_poll = true, page_allocated; 619 struct vm_area_struct *vma = vmf->vma; 620 unsigned long addr = vmf->address; 621 622 mask = swapin_nr_pages(offset) - 1; 623 if (!mask) 624 goto skip; 625 626 do_poll = false; 627 /* Read a page_cluster sized and aligned cluster around offset. */ 628 start_offset = offset & ~mask; 629 end_offset = offset | mask; 630 if (!start_offset) /* First page is swap header. */ 631 start_offset++; 632 if (end_offset >= si->max) 633 end_offset = si->max - 1; 634 635 blk_start_plug(&plug); 636 for (offset = start_offset; offset <= end_offset ; offset++) { 637 /* Ok, do the async read-ahead now */ 638 page = __read_swap_cache_async( 639 swp_entry(swp_type(entry), offset), 640 gfp_mask, vma, addr, &page_allocated); 641 if (!page) 642 continue; 643 if (page_allocated) { 644 swap_readpage(page, false, &splug); 645 if (offset != entry_offset) { 646 SetPageReadahead(page); 647 count_vm_event(SWAP_RA); 648 } 649 } 650 put_page(page); 651 } 652 blk_finish_plug(&plug); 653 swap_read_unplug(splug); 654 655 lru_add_drain(); /* Push any new pages onto the LRU now */ 656 skip: 657 /* The page was likely read above, so no need for plugging here */ 658 return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll, NULL); 659 } 660 661 int init_swap_address_space(unsigned int type, unsigned long nr_pages) 662 { 663 struct address_space *spaces, *space; 664 unsigned int i, nr; 665 666 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES); 667 spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL); 668 if (!spaces) 669 return -ENOMEM; 670 for (i = 0; i < nr; i++) { 671 space = spaces + i; 672 xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ); 673 atomic_set(&space->i_mmap_writable, 0); 674 space->a_ops = &swap_aops; 675 /* swap cache doesn't use writeback related tags */ 676 mapping_set_no_writeback_tags(space); 677 } 678 nr_swapper_spaces[type] = nr; 679 swapper_spaces[type] = spaces; 680 681 return 0; 682 } 683 684 void exit_swap_address_space(unsigned int type) 685 { 686 int i; 687 struct address_space *spaces = swapper_spaces[type]; 688 689 for (i = 0; i < nr_swapper_spaces[type]; i++) 690 VM_WARN_ON_ONCE(!mapping_empty(&spaces[i])); 691 kvfree(spaces); 692 nr_swapper_spaces[type] = 0; 693 swapper_spaces[type] = NULL; 694 } 695 696 static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma, 697 unsigned long faddr, 698 unsigned long lpfn, 699 unsigned long rpfn, 700 unsigned long *start, 701 unsigned long *end) 702 { 703 *start = max3(lpfn, PFN_DOWN(vma->vm_start), 704 PFN_DOWN(faddr & PMD_MASK)); 705 *end = min3(rpfn, PFN_DOWN(vma->vm_end), 706 PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE)); 707 } 708 709 static void swap_ra_info(struct vm_fault *vmf, 710 struct vma_swap_readahead *ra_info) 711 { 712 struct vm_area_struct *vma = vmf->vma; 713 unsigned long ra_val; 714 unsigned long faddr, pfn, fpfn; 715 unsigned long start, end; 716 pte_t *pte, *orig_pte; 717 unsigned int max_win, hits, prev_win, win, left; 718 #ifndef CONFIG_64BIT 719 pte_t *tpte; 720 #endif 721 722 max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster), 723 SWAP_RA_ORDER_CEILING); 724 if (max_win == 1) { 725 ra_info->win = 1; 726 return; 727 } 728 729 faddr = vmf->address; 730 orig_pte = pte = pte_offset_map(vmf->pmd, faddr); 731 732 fpfn = PFN_DOWN(faddr); 733 ra_val = GET_SWAP_RA_VAL(vma); 734 pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val)); 735 prev_win = SWAP_RA_WIN(ra_val); 736 hits = SWAP_RA_HITS(ra_val); 737 ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits, 738 max_win, prev_win); 739 atomic_long_set(&vma->swap_readahead_info, 740 SWAP_RA_VAL(faddr, win, 0)); 741 742 if (win == 1) { 743 pte_unmap(orig_pte); 744 return; 745 } 746 747 /* Copy the PTEs because the page table may be unmapped */ 748 if (fpfn == pfn + 1) 749 swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end); 750 else if (pfn == fpfn + 1) 751 swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1, 752 &start, &end); 753 else { 754 left = (win - 1) / 2; 755 swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left, 756 &start, &end); 757 } 758 ra_info->nr_pte = end - start; 759 ra_info->offset = fpfn - start; 760 pte -= ra_info->offset; 761 #ifdef CONFIG_64BIT 762 ra_info->ptes = pte; 763 #else 764 tpte = ra_info->ptes; 765 for (pfn = start; pfn != end; pfn++) 766 *tpte++ = *pte++; 767 #endif 768 pte_unmap(orig_pte); 769 } 770 771 /** 772 * swap_vma_readahead - swap in pages in hope we need them soon 773 * @fentry: swap entry of this memory 774 * @gfp_mask: memory allocation flags 775 * @vmf: fault information 776 * 777 * Returns the struct page for entry and addr, after queueing swapin. 778 * 779 * Primitive swap readahead code. We simply read in a few pages whose 780 * virtual addresses are around the fault address in the same vma. 781 * 782 * Caller must hold read mmap_lock if vmf->vma is not NULL. 783 * 784 */ 785 static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, 786 struct vm_fault *vmf) 787 { 788 struct blk_plug plug; 789 struct swap_iocb *splug = NULL; 790 struct vm_area_struct *vma = vmf->vma; 791 struct page *page; 792 pte_t *pte, pentry; 793 swp_entry_t entry; 794 unsigned int i; 795 bool page_allocated; 796 struct vma_swap_readahead ra_info = { 797 .win = 1, 798 }; 799 800 swap_ra_info(vmf, &ra_info); 801 if (ra_info.win == 1) 802 goto skip; 803 804 blk_start_plug(&plug); 805 for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte; 806 i++, pte++) { 807 pentry = *pte; 808 if (!is_swap_pte(pentry)) 809 continue; 810 entry = pte_to_swp_entry(pentry); 811 if (unlikely(non_swap_entry(entry))) 812 continue; 813 page = __read_swap_cache_async(entry, gfp_mask, vma, 814 vmf->address, &page_allocated); 815 if (!page) 816 continue; 817 if (page_allocated) { 818 swap_readpage(page, false, &splug); 819 if (i != ra_info.offset) { 820 SetPageReadahead(page); 821 count_vm_event(SWAP_RA); 822 } 823 } 824 put_page(page); 825 } 826 blk_finish_plug(&plug); 827 swap_read_unplug(splug); 828 lru_add_drain(); 829 skip: 830 /* The page was likely read above, so no need for plugging here */ 831 return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address, 832 ra_info.win == 1, NULL); 833 } 834 835 /** 836 * swapin_readahead - swap in pages in hope we need them soon 837 * @entry: swap entry of this memory 838 * @gfp_mask: memory allocation flags 839 * @vmf: fault information 840 * 841 * Returns the struct page for entry and addr, after queueing swapin. 842 * 843 * It's a main entry function for swap readahead. By the configuration, 844 * it will read ahead blocks by cluster-based(ie, physical disk based) 845 * or vma-based(ie, virtual address based on faulty address) readahead. 846 */ 847 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, 848 struct vm_fault *vmf) 849 { 850 return swap_use_vma_readahead() ? 851 swap_vma_readahead(entry, gfp_mask, vmf) : 852 swap_cluster_readahead(entry, gfp_mask, vmf); 853 } 854 855 #ifdef CONFIG_SYSFS 856 static ssize_t vma_ra_enabled_show(struct kobject *kobj, 857 struct kobj_attribute *attr, char *buf) 858 { 859 return sysfs_emit(buf, "%s\n", 860 enable_vma_readahead ? "true" : "false"); 861 } 862 static ssize_t vma_ra_enabled_store(struct kobject *kobj, 863 struct kobj_attribute *attr, 864 const char *buf, size_t count) 865 { 866 ssize_t ret; 867 868 ret = kstrtobool(buf, &enable_vma_readahead); 869 if (ret) 870 return ret; 871 872 return count; 873 } 874 static struct kobj_attribute vma_ra_enabled_attr = __ATTR_RW(vma_ra_enabled); 875 876 static struct attribute *swap_attrs[] = { 877 &vma_ra_enabled_attr.attr, 878 NULL, 879 }; 880 881 static const struct attribute_group swap_attr_group = { 882 .attrs = swap_attrs, 883 }; 884 885 static int __init swap_init_sysfs(void) 886 { 887 int err; 888 struct kobject *swap_kobj; 889 890 swap_kobj = kobject_create_and_add("swap", mm_kobj); 891 if (!swap_kobj) { 892 pr_err("failed to create swap kobject\n"); 893 return -ENOMEM; 894 } 895 err = sysfs_create_group(swap_kobj, &swap_attr_group); 896 if (err) { 897 pr_err("failed to register swap group\n"); 898 goto delete_obj; 899 } 900 return 0; 901 902 delete_obj: 903 kobject_put(swap_kobj); 904 return err; 905 } 906 subsys_initcall(swap_init_sysfs); 907 #endif 908