1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/mm/swap_state.c 4 * 5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 6 * Swap reorganised 29.12.95, Stephen Tweedie 7 * 8 * Rewritten to use page cache, (C) 1998 Stephen Tweedie 9 */ 10 #include <linux/mm.h> 11 #include <linux/gfp.h> 12 #include <linux/kernel_stat.h> 13 #include <linux/swap.h> 14 #include <linux/swapops.h> 15 #include <linux/init.h> 16 #include <linux/pagemap.h> 17 #include <linux/backing-dev.h> 18 #include <linux/blkdev.h> 19 #include <linux/pagevec.h> 20 #include <linux/migrate.h> 21 #include <linux/vmalloc.h> 22 #include <linux/swap_slots.h> 23 #include <linux/huge_mm.h> 24 #include "internal.h" 25 26 /* 27 * swapper_space is a fiction, retained to simplify the path through 28 * vmscan's shrink_page_list. 29 */ 30 static const struct address_space_operations swap_aops = { 31 .writepage = swap_writepage, 32 .set_page_dirty = swap_set_page_dirty, 33 #ifdef CONFIG_MIGRATION 34 .migratepage = migrate_page, 35 #endif 36 }; 37 38 struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly; 39 static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly; 40 static bool enable_vma_readahead __read_mostly = true; 41 42 #define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2) 43 #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1) 44 #define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK 45 #define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK) 46 47 #define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK) 48 #define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT) 49 #define SWAP_RA_ADDR(v) ((v) & PAGE_MASK) 50 51 #define SWAP_RA_VAL(addr, win, hits) \ 52 (((addr) & PAGE_MASK) | \ 53 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \ 54 ((hits) & SWAP_RA_HITS_MASK)) 55 56 /* Initial readahead hits is 4 to start up with a small window */ 57 #define GET_SWAP_RA_VAL(vma) \ 58 (atomic_long_read(&(vma)->swap_readahead_info) ? : 4) 59 60 #define INC_CACHE_INFO(x) data_race(swap_cache_info.x++) 61 #define ADD_CACHE_INFO(x, nr) data_race(swap_cache_info.x += (nr)) 62 63 static struct { 64 unsigned long add_total; 65 unsigned long del_total; 66 unsigned long find_success; 67 unsigned long find_total; 68 } swap_cache_info; 69 70 unsigned long total_swapcache_pages(void) 71 { 72 unsigned int i, j, nr; 73 unsigned long ret = 0; 74 struct address_space *spaces; 75 struct swap_info_struct *si; 76 77 for (i = 0; i < MAX_SWAPFILES; i++) { 78 swp_entry_t entry = swp_entry(i, 1); 79 80 /* Avoid get_swap_device() to warn for bad swap entry */ 81 if (!swp_swap_info(entry)) 82 continue; 83 /* Prevent swapoff to free swapper_spaces */ 84 si = get_swap_device(entry); 85 if (!si) 86 continue; 87 nr = nr_swapper_spaces[i]; 88 spaces = swapper_spaces[i]; 89 for (j = 0; j < nr; j++) 90 ret += spaces[j].nrpages; 91 put_swap_device(si); 92 } 93 return ret; 94 } 95 96 static atomic_t swapin_readahead_hits = ATOMIC_INIT(4); 97 98 void show_swap_cache_info(void) 99 { 100 printk("%lu pages in swap cache\n", total_swapcache_pages()); 101 printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n", 102 swap_cache_info.add_total, swap_cache_info.del_total, 103 swap_cache_info.find_success, swap_cache_info.find_total); 104 printk("Free swap = %ldkB\n", 105 get_nr_swap_pages() << (PAGE_SHIFT - 10)); 106 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10)); 107 } 108 109 void *get_shadow_from_swap_cache(swp_entry_t entry) 110 { 111 struct address_space *address_space = swap_address_space(entry); 112 pgoff_t idx = swp_offset(entry); 113 struct page *page; 114 115 page = find_get_entry(address_space, idx); 116 if (xa_is_value(page)) 117 return page; 118 if (page) 119 put_page(page); 120 return NULL; 121 } 122 123 /* 124 * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, 125 * but sets SwapCache flag and private instead of mapping and index. 126 */ 127 int add_to_swap_cache(struct page *page, swp_entry_t entry, 128 gfp_t gfp, void **shadowp) 129 { 130 struct address_space *address_space = swap_address_space(entry); 131 pgoff_t idx = swp_offset(entry); 132 XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page)); 133 unsigned long i, nr = thp_nr_pages(page); 134 void *old; 135 136 VM_BUG_ON_PAGE(!PageLocked(page), page); 137 VM_BUG_ON_PAGE(PageSwapCache(page), page); 138 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 139 140 page_ref_add(page, nr); 141 SetPageSwapCache(page); 142 143 do { 144 unsigned long nr_shadows = 0; 145 146 xas_lock_irq(&xas); 147 xas_create_range(&xas); 148 if (xas_error(&xas)) 149 goto unlock; 150 for (i = 0; i < nr; i++) { 151 VM_BUG_ON_PAGE(xas.xa_index != idx + i, page); 152 old = xas_load(&xas); 153 if (xa_is_value(old)) { 154 nr_shadows++; 155 if (shadowp) 156 *shadowp = old; 157 } 158 set_page_private(page + i, entry.val + i); 159 xas_store(&xas, page); 160 xas_next(&xas); 161 } 162 address_space->nrexceptional -= nr_shadows; 163 address_space->nrpages += nr; 164 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); 165 ADD_CACHE_INFO(add_total, nr); 166 unlock: 167 xas_unlock_irq(&xas); 168 } while (xas_nomem(&xas, gfp)); 169 170 if (!xas_error(&xas)) 171 return 0; 172 173 ClearPageSwapCache(page); 174 page_ref_sub(page, nr); 175 return xas_error(&xas); 176 } 177 178 /* 179 * This must be called only on pages that have 180 * been verified to be in the swap cache. 181 */ 182 void __delete_from_swap_cache(struct page *page, 183 swp_entry_t entry, void *shadow) 184 { 185 struct address_space *address_space = swap_address_space(entry); 186 int i, nr = thp_nr_pages(page); 187 pgoff_t idx = swp_offset(entry); 188 XA_STATE(xas, &address_space->i_pages, idx); 189 190 VM_BUG_ON_PAGE(!PageLocked(page), page); 191 VM_BUG_ON_PAGE(!PageSwapCache(page), page); 192 VM_BUG_ON_PAGE(PageWriteback(page), page); 193 194 for (i = 0; i < nr; i++) { 195 void *entry = xas_store(&xas, shadow); 196 VM_BUG_ON_PAGE(entry != page, entry); 197 set_page_private(page + i, 0); 198 xas_next(&xas); 199 } 200 ClearPageSwapCache(page); 201 if (shadow) 202 address_space->nrexceptional += nr; 203 address_space->nrpages -= nr; 204 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr); 205 ADD_CACHE_INFO(del_total, nr); 206 } 207 208 /** 209 * add_to_swap - allocate swap space for a page 210 * @page: page we want to move to swap 211 * 212 * Allocate swap space for the page and add the page to the 213 * swap cache. Caller needs to hold the page lock. 214 */ 215 int add_to_swap(struct page *page) 216 { 217 swp_entry_t entry; 218 int err; 219 220 VM_BUG_ON_PAGE(!PageLocked(page), page); 221 VM_BUG_ON_PAGE(!PageUptodate(page), page); 222 223 entry = get_swap_page(page); 224 if (!entry.val) 225 return 0; 226 227 /* 228 * XArray node allocations from PF_MEMALLOC contexts could 229 * completely exhaust the page allocator. __GFP_NOMEMALLOC 230 * stops emergency reserves from being allocated. 231 * 232 * TODO: this could cause a theoretical memory reclaim 233 * deadlock in the swap out path. 234 */ 235 /* 236 * Add it to the swap cache. 237 */ 238 err = add_to_swap_cache(page, entry, 239 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL); 240 if (err) 241 /* 242 * add_to_swap_cache() doesn't return -EEXIST, so we can safely 243 * clear SWAP_HAS_CACHE flag. 244 */ 245 goto fail; 246 /* 247 * Normally the page will be dirtied in unmap because its pte should be 248 * dirty. A special case is MADV_FREE page. The page'e pte could have 249 * dirty bit cleared but the page's SwapBacked bit is still set because 250 * clearing the dirty bit and SwapBacked bit has no lock protected. For 251 * such page, unmap will not set dirty bit for it, so page reclaim will 252 * not write the page out. This can cause data corruption when the page 253 * is swap in later. Always setting the dirty bit for the page solves 254 * the problem. 255 */ 256 set_page_dirty(page); 257 258 return 1; 259 260 fail: 261 put_swap_page(page, entry); 262 return 0; 263 } 264 265 /* 266 * This must be called only on pages that have 267 * been verified to be in the swap cache and locked. 268 * It will never put the page into the free list, 269 * the caller has a reference on the page. 270 */ 271 void delete_from_swap_cache(struct page *page) 272 { 273 swp_entry_t entry = { .val = page_private(page) }; 274 struct address_space *address_space = swap_address_space(entry); 275 276 xa_lock_irq(&address_space->i_pages); 277 __delete_from_swap_cache(page, entry, NULL); 278 xa_unlock_irq(&address_space->i_pages); 279 280 put_swap_page(page, entry); 281 page_ref_sub(page, thp_nr_pages(page)); 282 } 283 284 void clear_shadow_from_swap_cache(int type, unsigned long begin, 285 unsigned long end) 286 { 287 unsigned long curr = begin; 288 void *old; 289 290 for (;;) { 291 unsigned long nr_shadows = 0; 292 swp_entry_t entry = swp_entry(type, curr); 293 struct address_space *address_space = swap_address_space(entry); 294 XA_STATE(xas, &address_space->i_pages, curr); 295 296 xa_lock_irq(&address_space->i_pages); 297 xas_for_each(&xas, old, end) { 298 if (!xa_is_value(old)) 299 continue; 300 xas_store(&xas, NULL); 301 nr_shadows++; 302 } 303 address_space->nrexceptional -= nr_shadows; 304 xa_unlock_irq(&address_space->i_pages); 305 306 /* search the next swapcache until we meet end */ 307 curr >>= SWAP_ADDRESS_SPACE_SHIFT; 308 curr++; 309 curr <<= SWAP_ADDRESS_SPACE_SHIFT; 310 if (curr > end) 311 break; 312 } 313 } 314 315 /* 316 * If we are the only user, then try to free up the swap cache. 317 * 318 * Its ok to check for PageSwapCache without the page lock 319 * here because we are going to recheck again inside 320 * try_to_free_swap() _with_ the lock. 321 * - Marcelo 322 */ 323 static inline void free_swap_cache(struct page *page) 324 { 325 if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) { 326 try_to_free_swap(page); 327 unlock_page(page); 328 } 329 } 330 331 /* 332 * Perform a free_page(), also freeing any swap cache associated with 333 * this page if it is the last user of the page. 334 */ 335 void free_page_and_swap_cache(struct page *page) 336 { 337 free_swap_cache(page); 338 if (!is_huge_zero_page(page)) 339 put_page(page); 340 } 341 342 /* 343 * Passed an array of pages, drop them all from swapcache and then release 344 * them. They are removed from the LRU and freed if this is their last use. 345 */ 346 void free_pages_and_swap_cache(struct page **pages, int nr) 347 { 348 struct page **pagep = pages; 349 int i; 350 351 lru_add_drain(); 352 for (i = 0; i < nr; i++) 353 free_swap_cache(pagep[i]); 354 release_pages(pagep, nr); 355 } 356 357 static inline bool swap_use_vma_readahead(void) 358 { 359 return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap); 360 } 361 362 /* 363 * Lookup a swap entry in the swap cache. A found page will be returned 364 * unlocked and with its refcount incremented - we rely on the kernel 365 * lock getting page table operations atomic even if we drop the page 366 * lock before returning. 367 */ 368 struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma, 369 unsigned long addr) 370 { 371 struct page *page; 372 struct swap_info_struct *si; 373 374 si = get_swap_device(entry); 375 if (!si) 376 return NULL; 377 page = find_get_page(swap_address_space(entry), swp_offset(entry)); 378 put_swap_device(si); 379 380 INC_CACHE_INFO(find_total); 381 if (page) { 382 bool vma_ra = swap_use_vma_readahead(); 383 bool readahead; 384 385 INC_CACHE_INFO(find_success); 386 /* 387 * At the moment, we don't support PG_readahead for anon THP 388 * so let's bail out rather than confusing the readahead stat. 389 */ 390 if (unlikely(PageTransCompound(page))) 391 return page; 392 393 readahead = TestClearPageReadahead(page); 394 if (vma && vma_ra) { 395 unsigned long ra_val; 396 int win, hits; 397 398 ra_val = GET_SWAP_RA_VAL(vma); 399 win = SWAP_RA_WIN(ra_val); 400 hits = SWAP_RA_HITS(ra_val); 401 if (readahead) 402 hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX); 403 atomic_long_set(&vma->swap_readahead_info, 404 SWAP_RA_VAL(addr, win, hits)); 405 } 406 407 if (readahead) { 408 count_vm_event(SWAP_RA_HIT); 409 if (!vma || !vma_ra) 410 atomic_inc(&swapin_readahead_hits); 411 } 412 } 413 414 return page; 415 } 416 417 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, 418 struct vm_area_struct *vma, unsigned long addr, 419 bool *new_page_allocated) 420 { 421 struct swap_info_struct *si; 422 struct page *page; 423 void *shadow = NULL; 424 425 *new_page_allocated = false; 426 427 for (;;) { 428 int err; 429 /* 430 * First check the swap cache. Since this is normally 431 * called after lookup_swap_cache() failed, re-calling 432 * that would confuse statistics. 433 */ 434 si = get_swap_device(entry); 435 if (!si) 436 return NULL; 437 page = find_get_page(swap_address_space(entry), 438 swp_offset(entry)); 439 put_swap_device(si); 440 if (page) 441 return page; 442 443 /* 444 * Just skip read ahead for unused swap slot. 445 * During swap_off when swap_slot_cache is disabled, 446 * we have to handle the race between putting 447 * swap entry in swap cache and marking swap slot 448 * as SWAP_HAS_CACHE. That's done in later part of code or 449 * else swap_off will be aborted if we return NULL. 450 */ 451 if (!__swp_swapcount(entry) && swap_slot_cache_enabled) 452 return NULL; 453 454 /* 455 * Get a new page to read into from swap. Allocate it now, 456 * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will 457 * cause any racers to loop around until we add it to cache. 458 */ 459 page = alloc_page_vma(gfp_mask, vma, addr); 460 if (!page) 461 return NULL; 462 463 /* 464 * Swap entry may have been freed since our caller observed it. 465 */ 466 err = swapcache_prepare(entry); 467 if (!err) 468 break; 469 470 put_page(page); 471 if (err != -EEXIST) 472 return NULL; 473 474 /* 475 * We might race against __delete_from_swap_cache(), and 476 * stumble across a swap_map entry whose SWAP_HAS_CACHE 477 * has not yet been cleared. Or race against another 478 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE 479 * in swap_map, but not yet added its page to swap cache. 480 */ 481 cond_resched(); 482 } 483 484 /* 485 * The swap entry is ours to swap in. Prepare the new page. 486 */ 487 488 __SetPageLocked(page); 489 __SetPageSwapBacked(page); 490 491 /* May fail (-ENOMEM) if XArray node allocation failed. */ 492 if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) { 493 put_swap_page(page, entry); 494 goto fail_unlock; 495 } 496 497 if (mem_cgroup_charge(page, NULL, gfp_mask)) { 498 delete_from_swap_cache(page); 499 goto fail_unlock; 500 } 501 502 if (shadow) 503 workingset_refault(page, shadow); 504 505 /* Caller will initiate read into locked page */ 506 SetPageWorkingset(page); 507 lru_cache_add(page); 508 *new_page_allocated = true; 509 return page; 510 511 fail_unlock: 512 unlock_page(page); 513 put_page(page); 514 return NULL; 515 } 516 517 /* 518 * Locate a page of swap in physical memory, reserving swap cache space 519 * and reading the disk if it is not already cached. 520 * A failure return means that either the page allocation failed or that 521 * the swap entry is no longer in use. 522 */ 523 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, 524 struct vm_area_struct *vma, unsigned long addr, bool do_poll) 525 { 526 bool page_was_allocated; 527 struct page *retpage = __read_swap_cache_async(entry, gfp_mask, 528 vma, addr, &page_was_allocated); 529 530 if (page_was_allocated) 531 swap_readpage(retpage, do_poll); 532 533 return retpage; 534 } 535 536 static unsigned int __swapin_nr_pages(unsigned long prev_offset, 537 unsigned long offset, 538 int hits, 539 int max_pages, 540 int prev_win) 541 { 542 unsigned int pages, last_ra; 543 544 /* 545 * This heuristic has been found to work well on both sequential and 546 * random loads, swapping to hard disk or to SSD: please don't ask 547 * what the "+ 2" means, it just happens to work well, that's all. 548 */ 549 pages = hits + 2; 550 if (pages == 2) { 551 /* 552 * We can have no readahead hits to judge by: but must not get 553 * stuck here forever, so check for an adjacent offset instead 554 * (and don't even bother to check whether swap type is same). 555 */ 556 if (offset != prev_offset + 1 && offset != prev_offset - 1) 557 pages = 1; 558 } else { 559 unsigned int roundup = 4; 560 while (roundup < pages) 561 roundup <<= 1; 562 pages = roundup; 563 } 564 565 if (pages > max_pages) 566 pages = max_pages; 567 568 /* Don't shrink readahead too fast */ 569 last_ra = prev_win / 2; 570 if (pages < last_ra) 571 pages = last_ra; 572 573 return pages; 574 } 575 576 static unsigned long swapin_nr_pages(unsigned long offset) 577 { 578 static unsigned long prev_offset; 579 unsigned int hits, pages, max_pages; 580 static atomic_t last_readahead_pages; 581 582 max_pages = 1 << READ_ONCE(page_cluster); 583 if (max_pages <= 1) 584 return 1; 585 586 hits = atomic_xchg(&swapin_readahead_hits, 0); 587 pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits, 588 max_pages, 589 atomic_read(&last_readahead_pages)); 590 if (!hits) 591 WRITE_ONCE(prev_offset, offset); 592 atomic_set(&last_readahead_pages, pages); 593 594 return pages; 595 } 596 597 /** 598 * swap_cluster_readahead - swap in pages in hope we need them soon 599 * @entry: swap entry of this memory 600 * @gfp_mask: memory allocation flags 601 * @vmf: fault information 602 * 603 * Returns the struct page for entry and addr, after queueing swapin. 604 * 605 * Primitive swap readahead code. We simply read an aligned block of 606 * (1 << page_cluster) entries in the swap area. This method is chosen 607 * because it doesn't cost us any seek time. We also make sure to queue 608 * the 'original' request together with the readahead ones... 609 * 610 * This has been extended to use the NUMA policies from the mm triggering 611 * the readahead. 612 * 613 * Caller must hold read mmap_lock if vmf->vma is not NULL. 614 */ 615 struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, 616 struct vm_fault *vmf) 617 { 618 struct page *page; 619 unsigned long entry_offset = swp_offset(entry); 620 unsigned long offset = entry_offset; 621 unsigned long start_offset, end_offset; 622 unsigned long mask; 623 struct swap_info_struct *si = swp_swap_info(entry); 624 struct blk_plug plug; 625 bool do_poll = true, page_allocated; 626 struct vm_area_struct *vma = vmf->vma; 627 unsigned long addr = vmf->address; 628 629 mask = swapin_nr_pages(offset) - 1; 630 if (!mask) 631 goto skip; 632 633 /* Test swap type to make sure the dereference is safe */ 634 if (likely(si->flags & (SWP_BLKDEV | SWP_FS))) { 635 struct inode *inode = si->swap_file->f_mapping->host; 636 if (inode_read_congested(inode)) 637 goto skip; 638 } 639 640 do_poll = false; 641 /* Read a page_cluster sized and aligned cluster around offset. */ 642 start_offset = offset & ~mask; 643 end_offset = offset | mask; 644 if (!start_offset) /* First page is swap header. */ 645 start_offset++; 646 if (end_offset >= si->max) 647 end_offset = si->max - 1; 648 649 blk_start_plug(&plug); 650 for (offset = start_offset; offset <= end_offset ; offset++) { 651 /* Ok, do the async read-ahead now */ 652 page = __read_swap_cache_async( 653 swp_entry(swp_type(entry), offset), 654 gfp_mask, vma, addr, &page_allocated); 655 if (!page) 656 continue; 657 if (page_allocated) { 658 swap_readpage(page, false); 659 if (offset != entry_offset) { 660 SetPageReadahead(page); 661 count_vm_event(SWAP_RA); 662 } 663 } 664 put_page(page); 665 } 666 blk_finish_plug(&plug); 667 668 lru_add_drain(); /* Push any new pages onto the LRU now */ 669 skip: 670 return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll); 671 } 672 673 int init_swap_address_space(unsigned int type, unsigned long nr_pages) 674 { 675 struct address_space *spaces, *space; 676 unsigned int i, nr; 677 678 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES); 679 spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL); 680 if (!spaces) 681 return -ENOMEM; 682 for (i = 0; i < nr; i++) { 683 space = spaces + i; 684 xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ); 685 atomic_set(&space->i_mmap_writable, 0); 686 space->a_ops = &swap_aops; 687 /* swap cache doesn't use writeback related tags */ 688 mapping_set_no_writeback_tags(space); 689 } 690 nr_swapper_spaces[type] = nr; 691 swapper_spaces[type] = spaces; 692 693 return 0; 694 } 695 696 void exit_swap_address_space(unsigned int type) 697 { 698 kvfree(swapper_spaces[type]); 699 nr_swapper_spaces[type] = 0; 700 swapper_spaces[type] = NULL; 701 } 702 703 static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma, 704 unsigned long faddr, 705 unsigned long lpfn, 706 unsigned long rpfn, 707 unsigned long *start, 708 unsigned long *end) 709 { 710 *start = max3(lpfn, PFN_DOWN(vma->vm_start), 711 PFN_DOWN(faddr & PMD_MASK)); 712 *end = min3(rpfn, PFN_DOWN(vma->vm_end), 713 PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE)); 714 } 715 716 static void swap_ra_info(struct vm_fault *vmf, 717 struct vma_swap_readahead *ra_info) 718 { 719 struct vm_area_struct *vma = vmf->vma; 720 unsigned long ra_val; 721 swp_entry_t entry; 722 unsigned long faddr, pfn, fpfn; 723 unsigned long start, end; 724 pte_t *pte, *orig_pte; 725 unsigned int max_win, hits, prev_win, win, left; 726 #ifndef CONFIG_64BIT 727 pte_t *tpte; 728 #endif 729 730 max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster), 731 SWAP_RA_ORDER_CEILING); 732 if (max_win == 1) { 733 ra_info->win = 1; 734 return; 735 } 736 737 faddr = vmf->address; 738 orig_pte = pte = pte_offset_map(vmf->pmd, faddr); 739 entry = pte_to_swp_entry(*pte); 740 if ((unlikely(non_swap_entry(entry)))) { 741 pte_unmap(orig_pte); 742 return; 743 } 744 745 fpfn = PFN_DOWN(faddr); 746 ra_val = GET_SWAP_RA_VAL(vma); 747 pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val)); 748 prev_win = SWAP_RA_WIN(ra_val); 749 hits = SWAP_RA_HITS(ra_val); 750 ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits, 751 max_win, prev_win); 752 atomic_long_set(&vma->swap_readahead_info, 753 SWAP_RA_VAL(faddr, win, 0)); 754 755 if (win == 1) { 756 pte_unmap(orig_pte); 757 return; 758 } 759 760 /* Copy the PTEs because the page table may be unmapped */ 761 if (fpfn == pfn + 1) 762 swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end); 763 else if (pfn == fpfn + 1) 764 swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1, 765 &start, &end); 766 else { 767 left = (win - 1) / 2; 768 swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left, 769 &start, &end); 770 } 771 ra_info->nr_pte = end - start; 772 ra_info->offset = fpfn - start; 773 pte -= ra_info->offset; 774 #ifdef CONFIG_64BIT 775 ra_info->ptes = pte; 776 #else 777 tpte = ra_info->ptes; 778 for (pfn = start; pfn != end; pfn++) 779 *tpte++ = *pte++; 780 #endif 781 pte_unmap(orig_pte); 782 } 783 784 /** 785 * swap_vma_readahead - swap in pages in hope we need them soon 786 * @fentry: swap entry of this memory 787 * @gfp_mask: memory allocation flags 788 * @vmf: fault information 789 * 790 * Returns the struct page for entry and addr, after queueing swapin. 791 * 792 * Primitive swap readahead code. We simply read in a few pages whoes 793 * virtual addresses are around the fault address in the same vma. 794 * 795 * Caller must hold read mmap_lock if vmf->vma is not NULL. 796 * 797 */ 798 static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, 799 struct vm_fault *vmf) 800 { 801 struct blk_plug plug; 802 struct vm_area_struct *vma = vmf->vma; 803 struct page *page; 804 pte_t *pte, pentry; 805 swp_entry_t entry; 806 unsigned int i; 807 bool page_allocated; 808 struct vma_swap_readahead ra_info = {0,}; 809 810 swap_ra_info(vmf, &ra_info); 811 if (ra_info.win == 1) 812 goto skip; 813 814 blk_start_plug(&plug); 815 for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte; 816 i++, pte++) { 817 pentry = *pte; 818 if (pte_none(pentry)) 819 continue; 820 if (pte_present(pentry)) 821 continue; 822 entry = pte_to_swp_entry(pentry); 823 if (unlikely(non_swap_entry(entry))) 824 continue; 825 page = __read_swap_cache_async(entry, gfp_mask, vma, 826 vmf->address, &page_allocated); 827 if (!page) 828 continue; 829 if (page_allocated) { 830 swap_readpage(page, false); 831 if (i != ra_info.offset) { 832 SetPageReadahead(page); 833 count_vm_event(SWAP_RA); 834 } 835 } 836 put_page(page); 837 } 838 blk_finish_plug(&plug); 839 lru_add_drain(); 840 skip: 841 return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address, 842 ra_info.win == 1); 843 } 844 845 /** 846 * swapin_readahead - swap in pages in hope we need them soon 847 * @entry: swap entry of this memory 848 * @gfp_mask: memory allocation flags 849 * @vmf: fault information 850 * 851 * Returns the struct page for entry and addr, after queueing swapin. 852 * 853 * It's a main entry function for swap readahead. By the configuration, 854 * it will read ahead blocks by cluster-based(ie, physical disk based) 855 * or vma-based(ie, virtual address based on faulty address) readahead. 856 */ 857 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, 858 struct vm_fault *vmf) 859 { 860 return swap_use_vma_readahead() ? 861 swap_vma_readahead(entry, gfp_mask, vmf) : 862 swap_cluster_readahead(entry, gfp_mask, vmf); 863 } 864 865 #ifdef CONFIG_SYSFS 866 static ssize_t vma_ra_enabled_show(struct kobject *kobj, 867 struct kobj_attribute *attr, char *buf) 868 { 869 return sprintf(buf, "%s\n", enable_vma_readahead ? "true" : "false"); 870 } 871 static ssize_t vma_ra_enabled_store(struct kobject *kobj, 872 struct kobj_attribute *attr, 873 const char *buf, size_t count) 874 { 875 if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1)) 876 enable_vma_readahead = true; 877 else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1)) 878 enable_vma_readahead = false; 879 else 880 return -EINVAL; 881 882 return count; 883 } 884 static struct kobj_attribute vma_ra_enabled_attr = 885 __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show, 886 vma_ra_enabled_store); 887 888 static struct attribute *swap_attrs[] = { 889 &vma_ra_enabled_attr.attr, 890 NULL, 891 }; 892 893 static struct attribute_group swap_attr_group = { 894 .attrs = swap_attrs, 895 }; 896 897 static int __init swap_init_sysfs(void) 898 { 899 int err; 900 struct kobject *swap_kobj; 901 902 swap_kobj = kobject_create_and_add("swap", mm_kobj); 903 if (!swap_kobj) { 904 pr_err("failed to create swap kobject\n"); 905 return -ENOMEM; 906 } 907 err = sysfs_create_group(swap_kobj, &swap_attr_group); 908 if (err) { 909 pr_err("failed to register swap group\n"); 910 goto delete_obj; 911 } 912 return 0; 913 914 delete_obj: 915 kobject_put(swap_kobj); 916 return err; 917 } 918 subsys_initcall(swap_init_sysfs); 919 #endif 920