1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/mm/swap_state.c 4 * 5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 6 * Swap reorganised 29.12.95, Stephen Tweedie 7 * 8 * Rewritten to use page cache, (C) 1998 Stephen Tweedie 9 */ 10 #include <linux/mm.h> 11 #include <linux/gfp.h> 12 #include <linux/kernel_stat.h> 13 #include <linux/swap.h> 14 #include <linux/swapops.h> 15 #include <linux/init.h> 16 #include <linux/pagemap.h> 17 #include <linux/backing-dev.h> 18 #include <linux/blkdev.h> 19 #include <linux/pagevec.h> 20 #include <linux/migrate.h> 21 #include <linux/vmalloc.h> 22 #include <linux/swap_slots.h> 23 #include <linux/huge_mm.h> 24 #include <linux/shmem_fs.h> 25 #include "internal.h" 26 27 /* 28 * swapper_space is a fiction, retained to simplify the path through 29 * vmscan's shrink_page_list. 30 */ 31 static const struct address_space_operations swap_aops = { 32 .writepage = swap_writepage, 33 .set_page_dirty = swap_set_page_dirty, 34 #ifdef CONFIG_MIGRATION 35 .migratepage = migrate_page, 36 #endif 37 }; 38 39 struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly; 40 static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly; 41 static bool enable_vma_readahead __read_mostly = true; 42 43 #define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2) 44 #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1) 45 #define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK 46 #define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK) 47 48 #define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK) 49 #define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT) 50 #define SWAP_RA_ADDR(v) ((v) & PAGE_MASK) 51 52 #define SWAP_RA_VAL(addr, win, hits) \ 53 (((addr) & PAGE_MASK) | \ 54 (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \ 55 ((hits) & SWAP_RA_HITS_MASK)) 56 57 /* Initial readahead hits is 4 to start up with a small window */ 58 #define GET_SWAP_RA_VAL(vma) \ 59 (atomic_long_read(&(vma)->swap_readahead_info) ? : 4) 60 61 #define INC_CACHE_INFO(x) data_race(swap_cache_info.x++) 62 #define ADD_CACHE_INFO(x, nr) data_race(swap_cache_info.x += (nr)) 63 64 static struct { 65 unsigned long add_total; 66 unsigned long del_total; 67 unsigned long find_success; 68 unsigned long find_total; 69 } swap_cache_info; 70 71 static atomic_t swapin_readahead_hits = ATOMIC_INIT(4); 72 73 void show_swap_cache_info(void) 74 { 75 printk("%lu pages in swap cache\n", total_swapcache_pages()); 76 printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n", 77 swap_cache_info.add_total, swap_cache_info.del_total, 78 swap_cache_info.find_success, swap_cache_info.find_total); 79 printk("Free swap = %ldkB\n", 80 get_nr_swap_pages() << (PAGE_SHIFT - 10)); 81 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10)); 82 } 83 84 void *get_shadow_from_swap_cache(swp_entry_t entry) 85 { 86 struct address_space *address_space = swap_address_space(entry); 87 pgoff_t idx = swp_offset(entry); 88 struct page *page; 89 90 page = xa_load(&address_space->i_pages, idx); 91 if (xa_is_value(page)) 92 return page; 93 return NULL; 94 } 95 96 /* 97 * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, 98 * but sets SwapCache flag and private instead of mapping and index. 99 */ 100 int add_to_swap_cache(struct page *page, swp_entry_t entry, 101 gfp_t gfp, void **shadowp) 102 { 103 struct address_space *address_space = swap_address_space(entry); 104 pgoff_t idx = swp_offset(entry); 105 XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page)); 106 unsigned long i, nr = thp_nr_pages(page); 107 void *old; 108 109 VM_BUG_ON_PAGE(!PageLocked(page), page); 110 VM_BUG_ON_PAGE(PageSwapCache(page), page); 111 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 112 113 page_ref_add(page, nr); 114 SetPageSwapCache(page); 115 116 do { 117 unsigned long nr_shadows = 0; 118 119 xas_lock_irq(&xas); 120 xas_create_range(&xas); 121 if (xas_error(&xas)) 122 goto unlock; 123 for (i = 0; i < nr; i++) { 124 VM_BUG_ON_PAGE(xas.xa_index != idx + i, page); 125 old = xas_load(&xas); 126 if (xa_is_value(old)) { 127 nr_shadows++; 128 if (shadowp) 129 *shadowp = old; 130 } 131 set_page_private(page + i, entry.val + i); 132 xas_store(&xas, page); 133 xas_next(&xas); 134 } 135 address_space->nrexceptional -= nr_shadows; 136 address_space->nrpages += nr; 137 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); 138 __mod_lruvec_page_state(page, NR_SWAPCACHE, nr); 139 ADD_CACHE_INFO(add_total, nr); 140 unlock: 141 xas_unlock_irq(&xas); 142 } while (xas_nomem(&xas, gfp)); 143 144 if (!xas_error(&xas)) 145 return 0; 146 147 ClearPageSwapCache(page); 148 page_ref_sub(page, nr); 149 return xas_error(&xas); 150 } 151 152 /* 153 * This must be called only on pages that have 154 * been verified to be in the swap cache. 155 */ 156 void __delete_from_swap_cache(struct page *page, 157 swp_entry_t entry, void *shadow) 158 { 159 struct address_space *address_space = swap_address_space(entry); 160 int i, nr = thp_nr_pages(page); 161 pgoff_t idx = swp_offset(entry); 162 XA_STATE(xas, &address_space->i_pages, idx); 163 164 VM_BUG_ON_PAGE(!PageLocked(page), page); 165 VM_BUG_ON_PAGE(!PageSwapCache(page), page); 166 VM_BUG_ON_PAGE(PageWriteback(page), page); 167 168 for (i = 0; i < nr; i++) { 169 void *entry = xas_store(&xas, shadow); 170 VM_BUG_ON_PAGE(entry != page, entry); 171 set_page_private(page + i, 0); 172 xas_next(&xas); 173 } 174 ClearPageSwapCache(page); 175 if (shadow) 176 address_space->nrexceptional += nr; 177 address_space->nrpages -= nr; 178 __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr); 179 __mod_lruvec_page_state(page, NR_SWAPCACHE, -nr); 180 ADD_CACHE_INFO(del_total, nr); 181 } 182 183 /** 184 * add_to_swap - allocate swap space for a page 185 * @page: page we want to move to swap 186 * 187 * Allocate swap space for the page and add the page to the 188 * swap cache. Caller needs to hold the page lock. 189 */ 190 int add_to_swap(struct page *page) 191 { 192 swp_entry_t entry; 193 int err; 194 195 VM_BUG_ON_PAGE(!PageLocked(page), page); 196 VM_BUG_ON_PAGE(!PageUptodate(page), page); 197 198 entry = get_swap_page(page); 199 if (!entry.val) 200 return 0; 201 202 /* 203 * XArray node allocations from PF_MEMALLOC contexts could 204 * completely exhaust the page allocator. __GFP_NOMEMALLOC 205 * stops emergency reserves from being allocated. 206 * 207 * TODO: this could cause a theoretical memory reclaim 208 * deadlock in the swap out path. 209 */ 210 /* 211 * Add it to the swap cache. 212 */ 213 err = add_to_swap_cache(page, entry, 214 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL); 215 if (err) 216 /* 217 * add_to_swap_cache() doesn't return -EEXIST, so we can safely 218 * clear SWAP_HAS_CACHE flag. 219 */ 220 goto fail; 221 /* 222 * Normally the page will be dirtied in unmap because its pte should be 223 * dirty. A special case is MADV_FREE page. The page's pte could have 224 * dirty bit cleared but the page's SwapBacked bit is still set because 225 * clearing the dirty bit and SwapBacked bit has no lock protected. For 226 * such page, unmap will not set dirty bit for it, so page reclaim will 227 * not write the page out. This can cause data corruption when the page 228 * is swap in later. Always setting the dirty bit for the page solves 229 * the problem. 230 */ 231 set_page_dirty(page); 232 233 return 1; 234 235 fail: 236 put_swap_page(page, entry); 237 return 0; 238 } 239 240 /* 241 * This must be called only on pages that have 242 * been verified to be in the swap cache and locked. 243 * It will never put the page into the free list, 244 * the caller has a reference on the page. 245 */ 246 void delete_from_swap_cache(struct page *page) 247 { 248 swp_entry_t entry = { .val = page_private(page) }; 249 struct address_space *address_space = swap_address_space(entry); 250 251 xa_lock_irq(&address_space->i_pages); 252 __delete_from_swap_cache(page, entry, NULL); 253 xa_unlock_irq(&address_space->i_pages); 254 255 put_swap_page(page, entry); 256 page_ref_sub(page, thp_nr_pages(page)); 257 } 258 259 void clear_shadow_from_swap_cache(int type, unsigned long begin, 260 unsigned long end) 261 { 262 unsigned long curr = begin; 263 void *old; 264 265 for (;;) { 266 unsigned long nr_shadows = 0; 267 swp_entry_t entry = swp_entry(type, curr); 268 struct address_space *address_space = swap_address_space(entry); 269 XA_STATE(xas, &address_space->i_pages, curr); 270 271 xa_lock_irq(&address_space->i_pages); 272 xas_for_each(&xas, old, end) { 273 if (!xa_is_value(old)) 274 continue; 275 xas_store(&xas, NULL); 276 nr_shadows++; 277 } 278 address_space->nrexceptional -= nr_shadows; 279 xa_unlock_irq(&address_space->i_pages); 280 281 /* search the next swapcache until we meet end */ 282 curr >>= SWAP_ADDRESS_SPACE_SHIFT; 283 curr++; 284 curr <<= SWAP_ADDRESS_SPACE_SHIFT; 285 if (curr > end) 286 break; 287 } 288 } 289 290 /* 291 * If we are the only user, then try to free up the swap cache. 292 * 293 * Its ok to check for PageSwapCache without the page lock 294 * here because we are going to recheck again inside 295 * try_to_free_swap() _with_ the lock. 296 * - Marcelo 297 */ 298 static inline void free_swap_cache(struct page *page) 299 { 300 if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) { 301 try_to_free_swap(page); 302 unlock_page(page); 303 } 304 } 305 306 /* 307 * Perform a free_page(), also freeing any swap cache associated with 308 * this page if it is the last user of the page. 309 */ 310 void free_page_and_swap_cache(struct page *page) 311 { 312 free_swap_cache(page); 313 if (!is_huge_zero_page(page)) 314 put_page(page); 315 } 316 317 /* 318 * Passed an array of pages, drop them all from swapcache and then release 319 * them. They are removed from the LRU and freed if this is their last use. 320 */ 321 void free_pages_and_swap_cache(struct page **pages, int nr) 322 { 323 struct page **pagep = pages; 324 int i; 325 326 lru_add_drain(); 327 for (i = 0; i < nr; i++) 328 free_swap_cache(pagep[i]); 329 release_pages(pagep, nr); 330 } 331 332 static inline bool swap_use_vma_readahead(void) 333 { 334 return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap); 335 } 336 337 /* 338 * Lookup a swap entry in the swap cache. A found page will be returned 339 * unlocked and with its refcount incremented - we rely on the kernel 340 * lock getting page table operations atomic even if we drop the page 341 * lock before returning. 342 */ 343 struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma, 344 unsigned long addr) 345 { 346 struct page *page; 347 struct swap_info_struct *si; 348 349 si = get_swap_device(entry); 350 if (!si) 351 return NULL; 352 page = find_get_page(swap_address_space(entry), swp_offset(entry)); 353 put_swap_device(si); 354 355 INC_CACHE_INFO(find_total); 356 if (page) { 357 bool vma_ra = swap_use_vma_readahead(); 358 bool readahead; 359 360 INC_CACHE_INFO(find_success); 361 /* 362 * At the moment, we don't support PG_readahead for anon THP 363 * so let's bail out rather than confusing the readahead stat. 364 */ 365 if (unlikely(PageTransCompound(page))) 366 return page; 367 368 readahead = TestClearPageReadahead(page); 369 if (vma && vma_ra) { 370 unsigned long ra_val; 371 int win, hits; 372 373 ra_val = GET_SWAP_RA_VAL(vma); 374 win = SWAP_RA_WIN(ra_val); 375 hits = SWAP_RA_HITS(ra_val); 376 if (readahead) 377 hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX); 378 atomic_long_set(&vma->swap_readahead_info, 379 SWAP_RA_VAL(addr, win, hits)); 380 } 381 382 if (readahead) { 383 count_vm_event(SWAP_RA_HIT); 384 if (!vma || !vma_ra) 385 atomic_inc(&swapin_readahead_hits); 386 } 387 } 388 389 return page; 390 } 391 392 /** 393 * find_get_incore_page - Find and get a page from the page or swap caches. 394 * @mapping: The address_space to search. 395 * @index: The page cache index. 396 * 397 * This differs from find_get_page() in that it will also look for the 398 * page in the swap cache. 399 * 400 * Return: The found page or %NULL. 401 */ 402 struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index) 403 { 404 swp_entry_t swp; 405 struct swap_info_struct *si; 406 struct page *page = pagecache_get_page(mapping, index, 407 FGP_ENTRY | FGP_HEAD, 0); 408 409 if (!page) 410 return page; 411 if (!xa_is_value(page)) 412 return find_subpage(page, index); 413 if (!shmem_mapping(mapping)) 414 return NULL; 415 416 swp = radix_to_swp_entry(page); 417 /* Prevent swapoff from happening to us */ 418 si = get_swap_device(swp); 419 if (!si) 420 return NULL; 421 page = find_get_page(swap_address_space(swp), swp_offset(swp)); 422 put_swap_device(si); 423 return page; 424 } 425 426 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, 427 struct vm_area_struct *vma, unsigned long addr, 428 bool *new_page_allocated) 429 { 430 struct swap_info_struct *si; 431 struct page *page; 432 void *shadow = NULL; 433 434 *new_page_allocated = false; 435 436 for (;;) { 437 int err; 438 /* 439 * First check the swap cache. Since this is normally 440 * called after lookup_swap_cache() failed, re-calling 441 * that would confuse statistics. 442 */ 443 si = get_swap_device(entry); 444 if (!si) 445 return NULL; 446 page = find_get_page(swap_address_space(entry), 447 swp_offset(entry)); 448 put_swap_device(si); 449 if (page) 450 return page; 451 452 /* 453 * Just skip read ahead for unused swap slot. 454 * During swap_off when swap_slot_cache is disabled, 455 * we have to handle the race between putting 456 * swap entry in swap cache and marking swap slot 457 * as SWAP_HAS_CACHE. That's done in later part of code or 458 * else swap_off will be aborted if we return NULL. 459 */ 460 if (!__swp_swapcount(entry) && swap_slot_cache_enabled) 461 return NULL; 462 463 /* 464 * Get a new page to read into from swap. Allocate it now, 465 * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will 466 * cause any racers to loop around until we add it to cache. 467 */ 468 page = alloc_page_vma(gfp_mask, vma, addr); 469 if (!page) 470 return NULL; 471 472 /* 473 * Swap entry may have been freed since our caller observed it. 474 */ 475 err = swapcache_prepare(entry); 476 if (!err) 477 break; 478 479 put_page(page); 480 if (err != -EEXIST) 481 return NULL; 482 483 /* 484 * We might race against __delete_from_swap_cache(), and 485 * stumble across a swap_map entry whose SWAP_HAS_CACHE 486 * has not yet been cleared. Or race against another 487 * __read_swap_cache_async(), which has set SWAP_HAS_CACHE 488 * in swap_map, but not yet added its page to swap cache. 489 */ 490 cond_resched(); 491 } 492 493 /* 494 * The swap entry is ours to swap in. Prepare the new page. 495 */ 496 497 __SetPageLocked(page); 498 __SetPageSwapBacked(page); 499 500 /* May fail (-ENOMEM) if XArray node allocation failed. */ 501 if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) { 502 put_swap_page(page, entry); 503 goto fail_unlock; 504 } 505 506 if (mem_cgroup_charge(page, NULL, gfp_mask)) { 507 delete_from_swap_cache(page); 508 goto fail_unlock; 509 } 510 511 if (shadow) 512 workingset_refault(page, shadow); 513 514 /* Caller will initiate read into locked page */ 515 lru_cache_add(page); 516 *new_page_allocated = true; 517 return page; 518 519 fail_unlock: 520 unlock_page(page); 521 put_page(page); 522 return NULL; 523 } 524 525 /* 526 * Locate a page of swap in physical memory, reserving swap cache space 527 * and reading the disk if it is not already cached. 528 * A failure return means that either the page allocation failed or that 529 * the swap entry is no longer in use. 530 */ 531 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, 532 struct vm_area_struct *vma, unsigned long addr, bool do_poll) 533 { 534 bool page_was_allocated; 535 struct page *retpage = __read_swap_cache_async(entry, gfp_mask, 536 vma, addr, &page_was_allocated); 537 538 if (page_was_allocated) 539 swap_readpage(retpage, do_poll); 540 541 return retpage; 542 } 543 544 static unsigned int __swapin_nr_pages(unsigned long prev_offset, 545 unsigned long offset, 546 int hits, 547 int max_pages, 548 int prev_win) 549 { 550 unsigned int pages, last_ra; 551 552 /* 553 * This heuristic has been found to work well on both sequential and 554 * random loads, swapping to hard disk or to SSD: please don't ask 555 * what the "+ 2" means, it just happens to work well, that's all. 556 */ 557 pages = hits + 2; 558 if (pages == 2) { 559 /* 560 * We can have no readahead hits to judge by: but must not get 561 * stuck here forever, so check for an adjacent offset instead 562 * (and don't even bother to check whether swap type is same). 563 */ 564 if (offset != prev_offset + 1 && offset != prev_offset - 1) 565 pages = 1; 566 } else { 567 unsigned int roundup = 4; 568 while (roundup < pages) 569 roundup <<= 1; 570 pages = roundup; 571 } 572 573 if (pages > max_pages) 574 pages = max_pages; 575 576 /* Don't shrink readahead too fast */ 577 last_ra = prev_win / 2; 578 if (pages < last_ra) 579 pages = last_ra; 580 581 return pages; 582 } 583 584 static unsigned long swapin_nr_pages(unsigned long offset) 585 { 586 static unsigned long prev_offset; 587 unsigned int hits, pages, max_pages; 588 static atomic_t last_readahead_pages; 589 590 max_pages = 1 << READ_ONCE(page_cluster); 591 if (max_pages <= 1) 592 return 1; 593 594 hits = atomic_xchg(&swapin_readahead_hits, 0); 595 pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits, 596 max_pages, 597 atomic_read(&last_readahead_pages)); 598 if (!hits) 599 WRITE_ONCE(prev_offset, offset); 600 atomic_set(&last_readahead_pages, pages); 601 602 return pages; 603 } 604 605 /** 606 * swap_cluster_readahead - swap in pages in hope we need them soon 607 * @entry: swap entry of this memory 608 * @gfp_mask: memory allocation flags 609 * @vmf: fault information 610 * 611 * Returns the struct page for entry and addr, after queueing swapin. 612 * 613 * Primitive swap readahead code. We simply read an aligned block of 614 * (1 << page_cluster) entries in the swap area. This method is chosen 615 * because it doesn't cost us any seek time. We also make sure to queue 616 * the 'original' request together with the readahead ones... 617 * 618 * This has been extended to use the NUMA policies from the mm triggering 619 * the readahead. 620 * 621 * Caller must hold read mmap_lock if vmf->vma is not NULL. 622 */ 623 struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, 624 struct vm_fault *vmf) 625 { 626 struct page *page; 627 unsigned long entry_offset = swp_offset(entry); 628 unsigned long offset = entry_offset; 629 unsigned long start_offset, end_offset; 630 unsigned long mask; 631 struct swap_info_struct *si = swp_swap_info(entry); 632 struct blk_plug plug; 633 bool do_poll = true, page_allocated; 634 struct vm_area_struct *vma = vmf->vma; 635 unsigned long addr = vmf->address; 636 637 mask = swapin_nr_pages(offset) - 1; 638 if (!mask) 639 goto skip; 640 641 /* Test swap type to make sure the dereference is safe */ 642 if (likely(si->flags & (SWP_BLKDEV | SWP_FS_OPS))) { 643 struct inode *inode = si->swap_file->f_mapping->host; 644 if (inode_read_congested(inode)) 645 goto skip; 646 } 647 648 do_poll = false; 649 /* Read a page_cluster sized and aligned cluster around offset. */ 650 start_offset = offset & ~mask; 651 end_offset = offset | mask; 652 if (!start_offset) /* First page is swap header. */ 653 start_offset++; 654 if (end_offset >= si->max) 655 end_offset = si->max - 1; 656 657 blk_start_plug(&plug); 658 for (offset = start_offset; offset <= end_offset ; offset++) { 659 /* Ok, do the async read-ahead now */ 660 page = __read_swap_cache_async( 661 swp_entry(swp_type(entry), offset), 662 gfp_mask, vma, addr, &page_allocated); 663 if (!page) 664 continue; 665 if (page_allocated) { 666 swap_readpage(page, false); 667 if (offset != entry_offset) { 668 SetPageReadahead(page); 669 count_vm_event(SWAP_RA); 670 } 671 } 672 put_page(page); 673 } 674 blk_finish_plug(&plug); 675 676 lru_add_drain(); /* Push any new pages onto the LRU now */ 677 skip: 678 return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll); 679 } 680 681 int init_swap_address_space(unsigned int type, unsigned long nr_pages) 682 { 683 struct address_space *spaces, *space; 684 unsigned int i, nr; 685 686 nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES); 687 spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL); 688 if (!spaces) 689 return -ENOMEM; 690 for (i = 0; i < nr; i++) { 691 space = spaces + i; 692 xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ); 693 atomic_set(&space->i_mmap_writable, 0); 694 space->a_ops = &swap_aops; 695 /* swap cache doesn't use writeback related tags */ 696 mapping_set_no_writeback_tags(space); 697 } 698 nr_swapper_spaces[type] = nr; 699 swapper_spaces[type] = spaces; 700 701 return 0; 702 } 703 704 void exit_swap_address_space(unsigned int type) 705 { 706 kvfree(swapper_spaces[type]); 707 nr_swapper_spaces[type] = 0; 708 swapper_spaces[type] = NULL; 709 } 710 711 static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma, 712 unsigned long faddr, 713 unsigned long lpfn, 714 unsigned long rpfn, 715 unsigned long *start, 716 unsigned long *end) 717 { 718 *start = max3(lpfn, PFN_DOWN(vma->vm_start), 719 PFN_DOWN(faddr & PMD_MASK)); 720 *end = min3(rpfn, PFN_DOWN(vma->vm_end), 721 PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE)); 722 } 723 724 static void swap_ra_info(struct vm_fault *vmf, 725 struct vma_swap_readahead *ra_info) 726 { 727 struct vm_area_struct *vma = vmf->vma; 728 unsigned long ra_val; 729 swp_entry_t entry; 730 unsigned long faddr, pfn, fpfn; 731 unsigned long start, end; 732 pte_t *pte, *orig_pte; 733 unsigned int max_win, hits, prev_win, win, left; 734 #ifndef CONFIG_64BIT 735 pte_t *tpte; 736 #endif 737 738 max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster), 739 SWAP_RA_ORDER_CEILING); 740 if (max_win == 1) { 741 ra_info->win = 1; 742 return; 743 } 744 745 faddr = vmf->address; 746 orig_pte = pte = pte_offset_map(vmf->pmd, faddr); 747 entry = pte_to_swp_entry(*pte); 748 if ((unlikely(non_swap_entry(entry)))) { 749 pte_unmap(orig_pte); 750 return; 751 } 752 753 fpfn = PFN_DOWN(faddr); 754 ra_val = GET_SWAP_RA_VAL(vma); 755 pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val)); 756 prev_win = SWAP_RA_WIN(ra_val); 757 hits = SWAP_RA_HITS(ra_val); 758 ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits, 759 max_win, prev_win); 760 atomic_long_set(&vma->swap_readahead_info, 761 SWAP_RA_VAL(faddr, win, 0)); 762 763 if (win == 1) { 764 pte_unmap(orig_pte); 765 return; 766 } 767 768 /* Copy the PTEs because the page table may be unmapped */ 769 if (fpfn == pfn + 1) 770 swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end); 771 else if (pfn == fpfn + 1) 772 swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1, 773 &start, &end); 774 else { 775 left = (win - 1) / 2; 776 swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left, 777 &start, &end); 778 } 779 ra_info->nr_pte = end - start; 780 ra_info->offset = fpfn - start; 781 pte -= ra_info->offset; 782 #ifdef CONFIG_64BIT 783 ra_info->ptes = pte; 784 #else 785 tpte = ra_info->ptes; 786 for (pfn = start; pfn != end; pfn++) 787 *tpte++ = *pte++; 788 #endif 789 pte_unmap(orig_pte); 790 } 791 792 /** 793 * swap_vma_readahead - swap in pages in hope we need them soon 794 * @fentry: swap entry of this memory 795 * @gfp_mask: memory allocation flags 796 * @vmf: fault information 797 * 798 * Returns the struct page for entry and addr, after queueing swapin. 799 * 800 * Primitive swap readahead code. We simply read in a few pages whoes 801 * virtual addresses are around the fault address in the same vma. 802 * 803 * Caller must hold read mmap_lock if vmf->vma is not NULL. 804 * 805 */ 806 static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, 807 struct vm_fault *vmf) 808 { 809 struct blk_plug plug; 810 struct vm_area_struct *vma = vmf->vma; 811 struct page *page; 812 pte_t *pte, pentry; 813 swp_entry_t entry; 814 unsigned int i; 815 bool page_allocated; 816 struct vma_swap_readahead ra_info = { 817 .win = 1, 818 }; 819 820 swap_ra_info(vmf, &ra_info); 821 if (ra_info.win == 1) 822 goto skip; 823 824 blk_start_plug(&plug); 825 for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte; 826 i++, pte++) { 827 pentry = *pte; 828 if (pte_none(pentry)) 829 continue; 830 if (pte_present(pentry)) 831 continue; 832 entry = pte_to_swp_entry(pentry); 833 if (unlikely(non_swap_entry(entry))) 834 continue; 835 page = __read_swap_cache_async(entry, gfp_mask, vma, 836 vmf->address, &page_allocated); 837 if (!page) 838 continue; 839 if (page_allocated) { 840 swap_readpage(page, false); 841 if (i != ra_info.offset) { 842 SetPageReadahead(page); 843 count_vm_event(SWAP_RA); 844 } 845 } 846 put_page(page); 847 } 848 blk_finish_plug(&plug); 849 lru_add_drain(); 850 skip: 851 return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address, 852 ra_info.win == 1); 853 } 854 855 /** 856 * swapin_readahead - swap in pages in hope we need them soon 857 * @entry: swap entry of this memory 858 * @gfp_mask: memory allocation flags 859 * @vmf: fault information 860 * 861 * Returns the struct page for entry and addr, after queueing swapin. 862 * 863 * It's a main entry function for swap readahead. By the configuration, 864 * it will read ahead blocks by cluster-based(ie, physical disk based) 865 * or vma-based(ie, virtual address based on faulty address) readahead. 866 */ 867 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, 868 struct vm_fault *vmf) 869 { 870 return swap_use_vma_readahead() ? 871 swap_vma_readahead(entry, gfp_mask, vmf) : 872 swap_cluster_readahead(entry, gfp_mask, vmf); 873 } 874 875 #ifdef CONFIG_SYSFS 876 static ssize_t vma_ra_enabled_show(struct kobject *kobj, 877 struct kobj_attribute *attr, char *buf) 878 { 879 return sysfs_emit(buf, "%s\n", 880 enable_vma_readahead ? "true" : "false"); 881 } 882 static ssize_t vma_ra_enabled_store(struct kobject *kobj, 883 struct kobj_attribute *attr, 884 const char *buf, size_t count) 885 { 886 if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1)) 887 enable_vma_readahead = true; 888 else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1)) 889 enable_vma_readahead = false; 890 else 891 return -EINVAL; 892 893 return count; 894 } 895 static struct kobj_attribute vma_ra_enabled_attr = 896 __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show, 897 vma_ra_enabled_store); 898 899 static struct attribute *swap_attrs[] = { 900 &vma_ra_enabled_attr.attr, 901 NULL, 902 }; 903 904 static const struct attribute_group swap_attr_group = { 905 .attrs = swap_attrs, 906 }; 907 908 static int __init swap_init_sysfs(void) 909 { 910 int err; 911 struct kobject *swap_kobj; 912 913 swap_kobj = kobject_create_and_add("swap", mm_kobj); 914 if (!swap_kobj) { 915 pr_err("failed to create swap kobject\n"); 916 return -ENOMEM; 917 } 918 err = sysfs_create_group(swap_kobj, &swap_attr_group); 919 if (err) { 920 pr_err("failed to register swap group\n"); 921 goto delete_obj; 922 } 923 return 0; 924 925 delete_obj: 926 kobject_put(swap_kobj); 927 return err; 928 } 929 subsys_initcall(swap_init_sysfs); 930 #endif 931