1b2441318SGreg Kroah-Hartman // SPDX-License-Identifier: GPL-2.0 21da177e4SLinus Torvalds /* 31da177e4SLinus Torvalds * linux/mm/swap_state.c 41da177e4SLinus Torvalds * 51da177e4SLinus Torvalds * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 61da177e4SLinus Torvalds * Swap reorganised 29.12.95, Stephen Tweedie 71da177e4SLinus Torvalds * 81da177e4SLinus Torvalds * Rewritten to use page cache, (C) 1998 Stephen Tweedie 91da177e4SLinus Torvalds */ 101da177e4SLinus Torvalds #include <linux/mm.h> 115a0e3ad6STejun Heo #include <linux/gfp.h> 121da177e4SLinus Torvalds #include <linux/kernel_stat.h> 131da177e4SLinus Torvalds #include <linux/swap.h> 1446017e95SHugh Dickins #include <linux/swapops.h> 151da177e4SLinus Torvalds #include <linux/init.h> 161da177e4SLinus Torvalds #include <linux/pagemap.h> 171da177e4SLinus Torvalds #include <linux/backing-dev.h> 183fb5c298SChristian Ehrhardt #include <linux/blkdev.h> 19c484d410SHugh Dickins #include <linux/pagevec.h> 20b20a3503SChristoph Lameter #include <linux/migrate.h> 214b3ef9daSHuang, Ying #include <linux/vmalloc.h> 2267afa38eSTim Chen #include <linux/swap_slots.h> 2338d8b4e6SHuang Ying #include <linux/huge_mm.h> 2461ef1865SMatthew Wilcox (Oracle) #include <linux/shmem_fs.h> 25243bce09SHugh Dickins #include "internal.h" 261da177e4SLinus Torvalds 271da177e4SLinus Torvalds /* 281da177e4SLinus Torvalds * swapper_space is a fiction, retained to simplify the path through 297eaceaccSJens Axboe * vmscan's shrink_page_list. 301da177e4SLinus Torvalds */ 31f5e54d6eSChristoph Hellwig static const struct address_space_operations swap_aops = { 321da177e4SLinus Torvalds .writepage = swap_writepage, 3362c230bcSMel Gorman .set_page_dirty = swap_set_page_dirty, 341c93923cSAndrew Morton #ifdef CONFIG_MIGRATION 35e965f963SChristoph Lameter .migratepage = migrate_page, 361c93923cSAndrew Morton #endif 371da177e4SLinus Torvalds }; 381da177e4SLinus Torvalds 39783cb68eSChangbin Du struct address_space *swapper_spaces[MAX_SWAPFILES] __read_mostly; 40783cb68eSChangbin Du static unsigned int nr_swapper_spaces[MAX_SWAPFILES] __read_mostly; 41f5c754d6SColin Ian King static bool enable_vma_readahead __read_mostly = true; 42ec560175SHuang Ying 43ec560175SHuang Ying #define SWAP_RA_WIN_SHIFT (PAGE_SHIFT / 2) 44ec560175SHuang Ying #define SWAP_RA_HITS_MASK ((1UL << SWAP_RA_WIN_SHIFT) - 1) 45ec560175SHuang Ying #define SWAP_RA_HITS_MAX SWAP_RA_HITS_MASK 46ec560175SHuang Ying #define SWAP_RA_WIN_MASK (~PAGE_MASK & ~SWAP_RA_HITS_MASK) 47ec560175SHuang Ying 48ec560175SHuang Ying #define SWAP_RA_HITS(v) ((v) & SWAP_RA_HITS_MASK) 49ec560175SHuang Ying #define SWAP_RA_WIN(v) (((v) & SWAP_RA_WIN_MASK) >> SWAP_RA_WIN_SHIFT) 50ec560175SHuang Ying #define SWAP_RA_ADDR(v) ((v) & PAGE_MASK) 51ec560175SHuang Ying 52ec560175SHuang Ying #define SWAP_RA_VAL(addr, win, hits) \ 53ec560175SHuang Ying (((addr) & PAGE_MASK) | \ 54ec560175SHuang Ying (((win) << SWAP_RA_WIN_SHIFT) & SWAP_RA_WIN_MASK) | \ 55ec560175SHuang Ying ((hits) & SWAP_RA_HITS_MASK)) 56ec560175SHuang Ying 57ec560175SHuang Ying /* Initial readahead hits is 4 to start up with a small window */ 58ec560175SHuang Ying #define GET_SWAP_RA_VAL(vma) \ 59ec560175SHuang Ying (atomic_long_read(&(vma)->swap_readahead_info) ? : 4) 601da177e4SLinus Torvalds 61b96a3db2SQian Cai #define INC_CACHE_INFO(x) data_race(swap_cache_info.x++) 62b96a3db2SQian Cai #define ADD_CACHE_INFO(x, nr) data_race(swap_cache_info.x += (nr)) 631da177e4SLinus Torvalds 641da177e4SLinus Torvalds static struct { 651da177e4SLinus Torvalds unsigned long add_total; 661da177e4SLinus Torvalds unsigned long del_total; 671da177e4SLinus Torvalds unsigned long find_success; 681da177e4SLinus Torvalds unsigned long find_total; 691da177e4SLinus Torvalds } swap_cache_info; 701da177e4SLinus Torvalds 71579f8290SShaohua Li static atomic_t swapin_readahead_hits = ATOMIC_INIT(4); 72579f8290SShaohua Li 731da177e4SLinus Torvalds void show_swap_cache_info(void) 741da177e4SLinus Torvalds { 7533806f06SShaohua Li printk("%lu pages in swap cache\n", total_swapcache_pages()); 762c97b7fcSJohannes Weiner printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n", 771da177e4SLinus Torvalds swap_cache_info.add_total, swap_cache_info.del_total, 78bb63be0aSHugh Dickins swap_cache_info.find_success, swap_cache_info.find_total); 79ec8acf20SShaohua Li printk("Free swap = %ldkB\n", 80ec8acf20SShaohua Li get_nr_swap_pages() << (PAGE_SHIFT - 10)); 811da177e4SLinus Torvalds printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10)); 821da177e4SLinus Torvalds } 831da177e4SLinus Torvalds 84aae466b0SJoonsoo Kim void *get_shadow_from_swap_cache(swp_entry_t entry) 85aae466b0SJoonsoo Kim { 86aae466b0SJoonsoo Kim struct address_space *address_space = swap_address_space(entry); 87aae466b0SJoonsoo Kim pgoff_t idx = swp_offset(entry); 88aae466b0SJoonsoo Kim struct page *page; 89aae466b0SJoonsoo Kim 908c647dd1SMatthew Wilcox (Oracle) page = xa_load(&address_space->i_pages, idx); 91aae466b0SJoonsoo Kim if (xa_is_value(page)) 92aae466b0SJoonsoo Kim return page; 93aae466b0SJoonsoo Kim return NULL; 94aae466b0SJoonsoo Kim } 95aae466b0SJoonsoo Kim 961da177e4SLinus Torvalds /* 978d93b41cSMatthew Wilcox * add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, 981da177e4SLinus Torvalds * but sets SwapCache flag and private instead of mapping and index. 991da177e4SLinus Torvalds */ 1003852f676SJoonsoo Kim int add_to_swap_cache(struct page *page, swp_entry_t entry, 1013852f676SJoonsoo Kim gfp_t gfp, void **shadowp) 1021da177e4SLinus Torvalds { 1038d93b41cSMatthew Wilcox struct address_space *address_space = swap_address_space(entry); 10438d8b4e6SHuang Ying pgoff_t idx = swp_offset(entry); 1058d93b41cSMatthew Wilcox XA_STATE_ORDER(xas, &address_space->i_pages, idx, compound_order(page)); 1066c357848SMatthew Wilcox (Oracle) unsigned long i, nr = thp_nr_pages(page); 1073852f676SJoonsoo Kim void *old; 1081da177e4SLinus Torvalds 109309381feSSasha Levin VM_BUG_ON_PAGE(!PageLocked(page), page); 110309381feSSasha Levin VM_BUG_ON_PAGE(PageSwapCache(page), page); 111309381feSSasha Levin VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 11251726b12SHugh Dickins 11338d8b4e6SHuang Ying page_ref_add(page, nr); 1141da177e4SLinus Torvalds SetPageSwapCache(page); 115e286781dSNick Piggin 1168d93b41cSMatthew Wilcox do { 1173852f676SJoonsoo Kim unsigned long nr_shadows = 0; 1183852f676SJoonsoo Kim 1198d93b41cSMatthew Wilcox xas_lock_irq(&xas); 1208d93b41cSMatthew Wilcox xas_create_range(&xas); 1218d93b41cSMatthew Wilcox if (xas_error(&xas)) 1228d93b41cSMatthew Wilcox goto unlock; 12338d8b4e6SHuang Ying for (i = 0; i < nr; i++) { 1248d93b41cSMatthew Wilcox VM_BUG_ON_PAGE(xas.xa_index != idx + i, page); 1253852f676SJoonsoo Kim old = xas_load(&xas); 1263852f676SJoonsoo Kim if (xa_is_value(old)) { 1273852f676SJoonsoo Kim nr_shadows++; 1283852f676SJoonsoo Kim if (shadowp) 1293852f676SJoonsoo Kim *shadowp = old; 1303852f676SJoonsoo Kim } 13138d8b4e6SHuang Ying set_page_private(page + i, entry.val + i); 1324101196bSMatthew Wilcox (Oracle) xas_store(&xas, page); 1338d93b41cSMatthew Wilcox xas_next(&xas); 1341da177e4SLinus Torvalds } 13538d8b4e6SHuang Ying address_space->nrpages += nr; 13638d8b4e6SHuang Ying __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, nr); 137b6038942SShakeel Butt __mod_lruvec_page_state(page, NR_SWAPCACHE, nr); 13838d8b4e6SHuang Ying ADD_CACHE_INFO(add_total, nr); 1398d93b41cSMatthew Wilcox unlock: 1408d93b41cSMatthew Wilcox xas_unlock_irq(&xas); 1418d93b41cSMatthew Wilcox } while (xas_nomem(&xas, gfp)); 1428d93b41cSMatthew Wilcox 1438d93b41cSMatthew Wilcox if (!xas_error(&xas)) 1448d93b41cSMatthew Wilcox return 0; 1458d93b41cSMatthew Wilcox 14638d8b4e6SHuang Ying ClearPageSwapCache(page); 14738d8b4e6SHuang Ying page_ref_sub(page, nr); 1488d93b41cSMatthew Wilcox return xas_error(&xas); 1491da177e4SLinus Torvalds } 1501da177e4SLinus Torvalds 1511da177e4SLinus Torvalds /* 1521da177e4SLinus Torvalds * This must be called only on pages that have 1531da177e4SLinus Torvalds * been verified to be in the swap cache. 1541da177e4SLinus Torvalds */ 1553852f676SJoonsoo Kim void __delete_from_swap_cache(struct page *page, 1563852f676SJoonsoo Kim swp_entry_t entry, void *shadow) 1571da177e4SLinus Torvalds { 1584e17ec25SMatthew Wilcox struct address_space *address_space = swap_address_space(entry); 1596c357848SMatthew Wilcox (Oracle) int i, nr = thp_nr_pages(page); 1604e17ec25SMatthew Wilcox pgoff_t idx = swp_offset(entry); 1614e17ec25SMatthew Wilcox XA_STATE(xas, &address_space->i_pages, idx); 16233806f06SShaohua Li 163309381feSSasha Levin VM_BUG_ON_PAGE(!PageLocked(page), page); 164309381feSSasha Levin VM_BUG_ON_PAGE(!PageSwapCache(page), page); 165309381feSSasha Levin VM_BUG_ON_PAGE(PageWriteback(page), page); 1661da177e4SLinus Torvalds 16738d8b4e6SHuang Ying for (i = 0; i < nr; i++) { 1683852f676SJoonsoo Kim void *entry = xas_store(&xas, shadow); 1694101196bSMatthew Wilcox (Oracle) VM_BUG_ON_PAGE(entry != page, entry); 17038d8b4e6SHuang Ying set_page_private(page + i, 0); 1714e17ec25SMatthew Wilcox xas_next(&xas); 17238d8b4e6SHuang Ying } 1731da177e4SLinus Torvalds ClearPageSwapCache(page); 17438d8b4e6SHuang Ying address_space->nrpages -= nr; 17538d8b4e6SHuang Ying __mod_node_page_state(page_pgdat(page), NR_FILE_PAGES, -nr); 176b6038942SShakeel Butt __mod_lruvec_page_state(page, NR_SWAPCACHE, -nr); 17738d8b4e6SHuang Ying ADD_CACHE_INFO(del_total, nr); 1781da177e4SLinus Torvalds } 1791da177e4SLinus Torvalds 1801da177e4SLinus Torvalds /** 1811da177e4SLinus Torvalds * add_to_swap - allocate swap space for a page 1821da177e4SLinus Torvalds * @page: page we want to move to swap 1831da177e4SLinus Torvalds * 1841da177e4SLinus Torvalds * Allocate swap space for the page and add the page to the 1851da177e4SLinus Torvalds * swap cache. Caller needs to hold the page lock. 1861da177e4SLinus Torvalds */ 1870f074658SMinchan Kim int add_to_swap(struct page *page) 1881da177e4SLinus Torvalds { 1891da177e4SLinus Torvalds swp_entry_t entry; 1901da177e4SLinus Torvalds int err; 1911da177e4SLinus Torvalds 192309381feSSasha Levin VM_BUG_ON_PAGE(!PageLocked(page), page); 193309381feSSasha Levin VM_BUG_ON_PAGE(!PageUptodate(page), page); 1941da177e4SLinus Torvalds 19538d8b4e6SHuang Ying entry = get_swap_page(page); 1961da177e4SLinus Torvalds if (!entry.val) 1970f074658SMinchan Kim return 0; 1980f074658SMinchan Kim 199bd53b714SNick Piggin /* 2008d93b41cSMatthew Wilcox * XArray node allocations from PF_MEMALLOC contexts could 201bd53b714SNick Piggin * completely exhaust the page allocator. __GFP_NOMEMALLOC 202bd53b714SNick Piggin * stops emergency reserves from being allocated. 2031da177e4SLinus Torvalds * 204bd53b714SNick Piggin * TODO: this could cause a theoretical memory reclaim 205bd53b714SNick Piggin * deadlock in the swap out path. 2061da177e4SLinus Torvalds */ 2071da177e4SLinus Torvalds /* 208854e9ed0SMinchan Kim * Add it to the swap cache. 2091da177e4SLinus Torvalds */ 210f000944dSHugh Dickins err = add_to_swap_cache(page, entry, 2113852f676SJoonsoo Kim __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN, NULL); 21238d8b4e6SHuang Ying if (err) 2132ca4532aSDaisuke Nishimura /* 2142ca4532aSDaisuke Nishimura * add_to_swap_cache() doesn't return -EEXIST, so we can safely 2152ca4532aSDaisuke Nishimura * clear SWAP_HAS_CACHE flag. 2162ca4532aSDaisuke Nishimura */ 2170f074658SMinchan Kim goto fail; 2189625456cSShaohua Li /* 2199625456cSShaohua Li * Normally the page will be dirtied in unmap because its pte should be 2200e9aa675SMiaohe Lin * dirty. A special case is MADV_FREE page. The page's pte could have 2219625456cSShaohua Li * dirty bit cleared but the page's SwapBacked bit is still set because 2229625456cSShaohua Li * clearing the dirty bit and SwapBacked bit has no lock protected. For 2239625456cSShaohua Li * such page, unmap will not set dirty bit for it, so page reclaim will 2249625456cSShaohua Li * not write the page out. This can cause data corruption when the page 2259625456cSShaohua Li * is swap in later. Always setting the dirty bit for the page solves 2269625456cSShaohua Li * the problem. 2279625456cSShaohua Li */ 2289625456cSShaohua Li set_page_dirty(page); 2291da177e4SLinus Torvalds 23038d8b4e6SHuang Ying return 1; 23138d8b4e6SHuang Ying 23238d8b4e6SHuang Ying fail: 2330f074658SMinchan Kim put_swap_page(page, entry); 23438d8b4e6SHuang Ying return 0; 23538d8b4e6SHuang Ying } 23638d8b4e6SHuang Ying 2371da177e4SLinus Torvalds /* 2381da177e4SLinus Torvalds * This must be called only on pages that have 2391da177e4SLinus Torvalds * been verified to be in the swap cache and locked. 2401da177e4SLinus Torvalds * It will never put the page into the free list, 2411da177e4SLinus Torvalds * the caller has a reference on the page. 2421da177e4SLinus Torvalds */ 2431da177e4SLinus Torvalds void delete_from_swap_cache(struct page *page) 2441da177e4SLinus Torvalds { 2454e17ec25SMatthew Wilcox swp_entry_t entry = { .val = page_private(page) }; 2464e17ec25SMatthew Wilcox struct address_space *address_space = swap_address_space(entry); 2471da177e4SLinus Torvalds 248b93b0163SMatthew Wilcox xa_lock_irq(&address_space->i_pages); 2493852f676SJoonsoo Kim __delete_from_swap_cache(page, entry, NULL); 250b93b0163SMatthew Wilcox xa_unlock_irq(&address_space->i_pages); 2511da177e4SLinus Torvalds 25275f6d6d2SMinchan Kim put_swap_page(page, entry); 2536c357848SMatthew Wilcox (Oracle) page_ref_sub(page, thp_nr_pages(page)); 2541da177e4SLinus Torvalds } 2551da177e4SLinus Torvalds 2563852f676SJoonsoo Kim void clear_shadow_from_swap_cache(int type, unsigned long begin, 2573852f676SJoonsoo Kim unsigned long end) 2583852f676SJoonsoo Kim { 2593852f676SJoonsoo Kim unsigned long curr = begin; 2603852f676SJoonsoo Kim void *old; 2613852f676SJoonsoo Kim 2623852f676SJoonsoo Kim for (;;) { 2633852f676SJoonsoo Kim unsigned long nr_shadows = 0; 2643852f676SJoonsoo Kim swp_entry_t entry = swp_entry(type, curr); 2653852f676SJoonsoo Kim struct address_space *address_space = swap_address_space(entry); 2663852f676SJoonsoo Kim XA_STATE(xas, &address_space->i_pages, curr); 2673852f676SJoonsoo Kim 2683852f676SJoonsoo Kim xa_lock_irq(&address_space->i_pages); 2693852f676SJoonsoo Kim xas_for_each(&xas, old, end) { 2703852f676SJoonsoo Kim if (!xa_is_value(old)) 2713852f676SJoonsoo Kim continue; 2723852f676SJoonsoo Kim xas_store(&xas, NULL); 2733852f676SJoonsoo Kim nr_shadows++; 2743852f676SJoonsoo Kim } 2753852f676SJoonsoo Kim xa_unlock_irq(&address_space->i_pages); 2763852f676SJoonsoo Kim 2773852f676SJoonsoo Kim /* search the next swapcache until we meet end */ 2783852f676SJoonsoo Kim curr >>= SWAP_ADDRESS_SPACE_SHIFT; 2793852f676SJoonsoo Kim curr++; 2803852f676SJoonsoo Kim curr <<= SWAP_ADDRESS_SPACE_SHIFT; 2813852f676SJoonsoo Kim if (curr > end) 2823852f676SJoonsoo Kim break; 2833852f676SJoonsoo Kim } 2843852f676SJoonsoo Kim } 2853852f676SJoonsoo Kim 2861da177e4SLinus Torvalds /* 2871da177e4SLinus Torvalds * If we are the only user, then try to free up the swap cache. 2881da177e4SLinus Torvalds * 2891da177e4SLinus Torvalds * Its ok to check for PageSwapCache without the page lock 2901da177e4SLinus Torvalds * here because we are going to recheck again inside 291a2c43eedSHugh Dickins * try_to_free_swap() _with_ the lock. 2921da177e4SLinus Torvalds * - Marcelo 2931da177e4SLinus Torvalds */ 2941da177e4SLinus Torvalds static inline void free_swap_cache(struct page *page) 2951da177e4SLinus Torvalds { 296a2c43eedSHugh Dickins if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) { 297a2c43eedSHugh Dickins try_to_free_swap(page); 2981da177e4SLinus Torvalds unlock_page(page); 2991da177e4SLinus Torvalds } 3001da177e4SLinus Torvalds } 3011da177e4SLinus Torvalds 3021da177e4SLinus Torvalds /* 3031da177e4SLinus Torvalds * Perform a free_page(), also freeing any swap cache associated with 304b8072f09SHugh Dickins * this page if it is the last user of the page. 3051da177e4SLinus Torvalds */ 3061da177e4SLinus Torvalds void free_page_and_swap_cache(struct page *page) 3071da177e4SLinus Torvalds { 3081da177e4SLinus Torvalds free_swap_cache(page); 3096fcb52a5SAaron Lu if (!is_huge_zero_page(page)) 31009cbfeafSKirill A. Shutemov put_page(page); 3111da177e4SLinus Torvalds } 3121da177e4SLinus Torvalds 3131da177e4SLinus Torvalds /* 3141da177e4SLinus Torvalds * Passed an array of pages, drop them all from swapcache and then release 3151da177e4SLinus Torvalds * them. They are removed from the LRU and freed if this is their last use. 3161da177e4SLinus Torvalds */ 3171da177e4SLinus Torvalds void free_pages_and_swap_cache(struct page **pages, int nr) 3181da177e4SLinus Torvalds { 3191da177e4SLinus Torvalds struct page **pagep = pages; 3201da177e4SLinus Torvalds int i; 3211da177e4SLinus Torvalds 322aabfb572SMichal Hocko lru_add_drain(); 323aabfb572SMichal Hocko for (i = 0; i < nr; i++) 3241da177e4SLinus Torvalds free_swap_cache(pagep[i]); 325c6f92f9fSMel Gorman release_pages(pagep, nr); 3261da177e4SLinus Torvalds } 3271da177e4SLinus Torvalds 328e9e9b7ecSMinchan Kim static inline bool swap_use_vma_readahead(void) 329e9e9b7ecSMinchan Kim { 330e9e9b7ecSMinchan Kim return READ_ONCE(enable_vma_readahead) && !atomic_read(&nr_rotate_swap); 331e9e9b7ecSMinchan Kim } 332e9e9b7ecSMinchan Kim 3331da177e4SLinus Torvalds /* 3341da177e4SLinus Torvalds * Lookup a swap entry in the swap cache. A found page will be returned 3351da177e4SLinus Torvalds * unlocked and with its refcount incremented - we rely on the kernel 3361da177e4SLinus Torvalds * lock getting page table operations atomic even if we drop the page 3371da177e4SLinus Torvalds * lock before returning. 3381da177e4SLinus Torvalds */ 339ec560175SHuang Ying struct page *lookup_swap_cache(swp_entry_t entry, struct vm_area_struct *vma, 340ec560175SHuang Ying unsigned long addr) 3411da177e4SLinus Torvalds { 3421da177e4SLinus Torvalds struct page *page; 343eb085574SHuang Ying struct swap_info_struct *si; 3441da177e4SLinus Torvalds 345eb085574SHuang Ying si = get_swap_device(entry); 346eb085574SHuang Ying if (!si) 347eb085574SHuang Ying return NULL; 348f6ab1f7fSHuang Ying page = find_get_page(swap_address_space(entry), swp_offset(entry)); 349eb085574SHuang Ying put_swap_device(si); 3501da177e4SLinus Torvalds 3511da177e4SLinus Torvalds INC_CACHE_INFO(find_total); 352ec560175SHuang Ying if (page) { 353eaf649ebSMinchan Kim bool vma_ra = swap_use_vma_readahead(); 354eaf649ebSMinchan Kim bool readahead; 355eaf649ebSMinchan Kim 356ec560175SHuang Ying INC_CACHE_INFO(find_success); 357eaf649ebSMinchan Kim /* 358eaf649ebSMinchan Kim * At the moment, we don't support PG_readahead for anon THP 359eaf649ebSMinchan Kim * so let's bail out rather than confusing the readahead stat. 360eaf649ebSMinchan Kim */ 361ec560175SHuang Ying if (unlikely(PageTransCompound(page))) 362ec560175SHuang Ying return page; 363eaf649ebSMinchan Kim 364ec560175SHuang Ying readahead = TestClearPageReadahead(page); 365eaf649ebSMinchan Kim if (vma && vma_ra) { 366eaf649ebSMinchan Kim unsigned long ra_val; 367eaf649ebSMinchan Kim int win, hits; 368eaf649ebSMinchan Kim 369eaf649ebSMinchan Kim ra_val = GET_SWAP_RA_VAL(vma); 370eaf649ebSMinchan Kim win = SWAP_RA_WIN(ra_val); 371eaf649ebSMinchan Kim hits = SWAP_RA_HITS(ra_val); 372ec560175SHuang Ying if (readahead) 373ec560175SHuang Ying hits = min_t(int, hits + 1, SWAP_RA_HITS_MAX); 374ec560175SHuang Ying atomic_long_set(&vma->swap_readahead_info, 375ec560175SHuang Ying SWAP_RA_VAL(addr, win, hits)); 376ec560175SHuang Ying } 377eaf649ebSMinchan Kim 378ec560175SHuang Ying if (readahead) { 379ec560175SHuang Ying count_vm_event(SWAP_RA_HIT); 380eaf649ebSMinchan Kim if (!vma || !vma_ra) 381ec560175SHuang Ying atomic_inc(&swapin_readahead_hits); 382ec560175SHuang Ying } 383ec560175SHuang Ying } 384eaf649ebSMinchan Kim 3851da177e4SLinus Torvalds return page; 3861da177e4SLinus Torvalds } 3871da177e4SLinus Torvalds 38861ef1865SMatthew Wilcox (Oracle) /** 38961ef1865SMatthew Wilcox (Oracle) * find_get_incore_page - Find and get a page from the page or swap caches. 39061ef1865SMatthew Wilcox (Oracle) * @mapping: The address_space to search. 39161ef1865SMatthew Wilcox (Oracle) * @index: The page cache index. 39261ef1865SMatthew Wilcox (Oracle) * 39361ef1865SMatthew Wilcox (Oracle) * This differs from find_get_page() in that it will also look for the 39461ef1865SMatthew Wilcox (Oracle) * page in the swap cache. 39561ef1865SMatthew Wilcox (Oracle) * 39661ef1865SMatthew Wilcox (Oracle) * Return: The found page or %NULL. 39761ef1865SMatthew Wilcox (Oracle) */ 39861ef1865SMatthew Wilcox (Oracle) struct page *find_get_incore_page(struct address_space *mapping, pgoff_t index) 39961ef1865SMatthew Wilcox (Oracle) { 40061ef1865SMatthew Wilcox (Oracle) swp_entry_t swp; 40161ef1865SMatthew Wilcox (Oracle) struct swap_info_struct *si; 40244835d20SMatthew Wilcox (Oracle) struct page *page = pagecache_get_page(mapping, index, 40344835d20SMatthew Wilcox (Oracle) FGP_ENTRY | FGP_HEAD, 0); 40461ef1865SMatthew Wilcox (Oracle) 405a6de4b48SMatthew Wilcox (Oracle) if (!page) 40661ef1865SMatthew Wilcox (Oracle) return page; 407a6de4b48SMatthew Wilcox (Oracle) if (!xa_is_value(page)) 408a6de4b48SMatthew Wilcox (Oracle) return find_subpage(page, index); 40961ef1865SMatthew Wilcox (Oracle) if (!shmem_mapping(mapping)) 41061ef1865SMatthew Wilcox (Oracle) return NULL; 41161ef1865SMatthew Wilcox (Oracle) 41261ef1865SMatthew Wilcox (Oracle) swp = radix_to_swp_entry(page); 41361ef1865SMatthew Wilcox (Oracle) /* Prevent swapoff from happening to us */ 41461ef1865SMatthew Wilcox (Oracle) si = get_swap_device(swp); 41561ef1865SMatthew Wilcox (Oracle) if (!si) 41661ef1865SMatthew Wilcox (Oracle) return NULL; 41761ef1865SMatthew Wilcox (Oracle) page = find_get_page(swap_address_space(swp), swp_offset(swp)); 41861ef1865SMatthew Wilcox (Oracle) put_swap_device(si); 41961ef1865SMatthew Wilcox (Oracle) return page; 42061ef1865SMatthew Wilcox (Oracle) } 42161ef1865SMatthew Wilcox (Oracle) 4225b999aadSDmitry Safonov struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, 4235b999aadSDmitry Safonov struct vm_area_struct *vma, unsigned long addr, 4245b999aadSDmitry Safonov bool *new_page_allocated) 4251da177e4SLinus Torvalds { 426eb085574SHuang Ying struct swap_info_struct *si; 4274c6355b2SJohannes Weiner struct page *page; 428aae466b0SJoonsoo Kim void *shadow = NULL; 4294c6355b2SJohannes Weiner 4305b999aadSDmitry Safonov *new_page_allocated = false; 4311da177e4SLinus Torvalds 4324c6355b2SJohannes Weiner for (;;) { 4334c6355b2SJohannes Weiner int err; 4341da177e4SLinus Torvalds /* 4351da177e4SLinus Torvalds * First check the swap cache. Since this is normally 4361da177e4SLinus Torvalds * called after lookup_swap_cache() failed, re-calling 4371da177e4SLinus Torvalds * that would confuse statistics. 4381da177e4SLinus Torvalds */ 439eb085574SHuang Ying si = get_swap_device(entry); 440eb085574SHuang Ying if (!si) 4414c6355b2SJohannes Weiner return NULL; 4424c6355b2SJohannes Weiner page = find_get_page(swap_address_space(entry), 443eb085574SHuang Ying swp_offset(entry)); 444eb085574SHuang Ying put_swap_device(si); 4454c6355b2SJohannes Weiner if (page) 4464c6355b2SJohannes Weiner return page; 4471da177e4SLinus Torvalds 448ba81f838SHuang Ying /* 449ba81f838SHuang Ying * Just skip read ahead for unused swap slot. 450ba81f838SHuang Ying * During swap_off when swap_slot_cache is disabled, 451ba81f838SHuang Ying * we have to handle the race between putting 452ba81f838SHuang Ying * swap entry in swap cache and marking swap slot 453ba81f838SHuang Ying * as SWAP_HAS_CACHE. That's done in later part of code or 454ba81f838SHuang Ying * else swap_off will be aborted if we return NULL. 455ba81f838SHuang Ying */ 456ba81f838SHuang Ying if (!__swp_swapcount(entry) && swap_slot_cache_enabled) 4574c6355b2SJohannes Weiner return NULL; 458e8c26ab6STim Chen 4591da177e4SLinus Torvalds /* 4604c6355b2SJohannes Weiner * Get a new page to read into from swap. Allocate it now, 4614c6355b2SJohannes Weiner * before marking swap_map SWAP_HAS_CACHE, when -EEXIST will 4624c6355b2SJohannes Weiner * cause any racers to loop around until we add it to cache. 4631da177e4SLinus Torvalds */ 4644c6355b2SJohannes Weiner page = alloc_page_vma(gfp_mask, vma, addr); 4654c6355b2SJohannes Weiner if (!page) 4664c6355b2SJohannes Weiner return NULL; 4671da177e4SLinus Torvalds 4681da177e4SLinus Torvalds /* 469f000944dSHugh Dickins * Swap entry may have been freed since our caller observed it. 470f000944dSHugh Dickins */ 471355cfa73SKAMEZAWA Hiroyuki err = swapcache_prepare(entry); 4724c6355b2SJohannes Weiner if (!err) 473f000944dSHugh Dickins break; 474f000944dSHugh Dickins 4754c6355b2SJohannes Weiner put_page(page); 4764c6355b2SJohannes Weiner if (err != -EEXIST) 4774c6355b2SJohannes Weiner return NULL; 4781da177e4SLinus Torvalds 4794c6355b2SJohannes Weiner /* 4804c6355b2SJohannes Weiner * We might race against __delete_from_swap_cache(), and 4814c6355b2SJohannes Weiner * stumble across a swap_map entry whose SWAP_HAS_CACHE 4824c6355b2SJohannes Weiner * has not yet been cleared. Or race against another 4834c6355b2SJohannes Weiner * __read_swap_cache_async(), which has set SWAP_HAS_CACHE 4844c6355b2SJohannes Weiner * in swap_map, but not yet added its page to swap cache. 4854c6355b2SJohannes Weiner */ 4864c6355b2SJohannes Weiner cond_resched(); 4874c6355b2SJohannes Weiner } 4884c6355b2SJohannes Weiner 4894c6355b2SJohannes Weiner /* 4904c6355b2SJohannes Weiner * The swap entry is ours to swap in. Prepare the new page. 4914c6355b2SJohannes Weiner */ 4924c6355b2SJohannes Weiner 4934c6355b2SJohannes Weiner __SetPageLocked(page); 4944c6355b2SJohannes Weiner __SetPageSwapBacked(page); 4954c6355b2SJohannes Weiner 4960add0c77SShakeel Butt if (mem_cgroup_swapin_charge_page(page, NULL, gfp_mask, entry)) 4974c6355b2SJohannes Weiner goto fail_unlock; 4984c6355b2SJohannes Weiner 4990add0c77SShakeel Butt /* May fail (-ENOMEM) if XArray node allocation failed. */ 5000add0c77SShakeel Butt if (add_to_swap_cache(page, entry, gfp_mask & GFP_RECLAIM_MASK, &shadow)) 5014c6355b2SJohannes Weiner goto fail_unlock; 5020add0c77SShakeel Butt 5030add0c77SShakeel Butt mem_cgroup_swapin_uncharge_swap(entry); 5044c6355b2SJohannes Weiner 505aae466b0SJoonsoo Kim if (shadow) 506aae466b0SJoonsoo Kim workingset_refault(page, shadow); 507314b57fbSJohannes Weiner 5084c6355b2SJohannes Weiner /* Caller will initiate read into locked page */ 5096058eaecSJohannes Weiner lru_cache_add(page); 5104c6355b2SJohannes Weiner *new_page_allocated = true; 5114c6355b2SJohannes Weiner return page; 5124c6355b2SJohannes Weiner 5134c6355b2SJohannes Weiner fail_unlock: 5140add0c77SShakeel Butt put_swap_page(page, entry); 5154c6355b2SJohannes Weiner unlock_page(page); 5164c6355b2SJohannes Weiner put_page(page); 5174c6355b2SJohannes Weiner return NULL; 5181da177e4SLinus Torvalds } 51946017e95SHugh Dickins 5205b999aadSDmitry Safonov /* 5215b999aadSDmitry Safonov * Locate a page of swap in physical memory, reserving swap cache space 5225b999aadSDmitry Safonov * and reading the disk if it is not already cached. 5235b999aadSDmitry Safonov * A failure return means that either the page allocation failed or that 5245b999aadSDmitry Safonov * the swap entry is no longer in use. 5255b999aadSDmitry Safonov */ 5265b999aadSDmitry Safonov struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, 52723955622SShaohua Li struct vm_area_struct *vma, unsigned long addr, bool do_poll) 5285b999aadSDmitry Safonov { 5295b999aadSDmitry Safonov bool page_was_allocated; 5305b999aadSDmitry Safonov struct page *retpage = __read_swap_cache_async(entry, gfp_mask, 5315b999aadSDmitry Safonov vma, addr, &page_was_allocated); 5325b999aadSDmitry Safonov 5335b999aadSDmitry Safonov if (page_was_allocated) 53423955622SShaohua Li swap_readpage(retpage, do_poll); 5355b999aadSDmitry Safonov 5365b999aadSDmitry Safonov return retpage; 5375b999aadSDmitry Safonov } 5385b999aadSDmitry Safonov 539ec560175SHuang Ying static unsigned int __swapin_nr_pages(unsigned long prev_offset, 540ec560175SHuang Ying unsigned long offset, 541ec560175SHuang Ying int hits, 542ec560175SHuang Ying int max_pages, 543ec560175SHuang Ying int prev_win) 544579f8290SShaohua Li { 545ec560175SHuang Ying unsigned int pages, last_ra; 546579f8290SShaohua Li 547579f8290SShaohua Li /* 548579f8290SShaohua Li * This heuristic has been found to work well on both sequential and 549579f8290SShaohua Li * random loads, swapping to hard disk or to SSD: please don't ask 550579f8290SShaohua Li * what the "+ 2" means, it just happens to work well, that's all. 551579f8290SShaohua Li */ 552ec560175SHuang Ying pages = hits + 2; 553579f8290SShaohua Li if (pages == 2) { 554579f8290SShaohua Li /* 555579f8290SShaohua Li * We can have no readahead hits to judge by: but must not get 556579f8290SShaohua Li * stuck here forever, so check for an adjacent offset instead 557579f8290SShaohua Li * (and don't even bother to check whether swap type is same). 558579f8290SShaohua Li */ 559579f8290SShaohua Li if (offset != prev_offset + 1 && offset != prev_offset - 1) 560579f8290SShaohua Li pages = 1; 561579f8290SShaohua Li } else { 562579f8290SShaohua Li unsigned int roundup = 4; 563579f8290SShaohua Li while (roundup < pages) 564579f8290SShaohua Li roundup <<= 1; 565579f8290SShaohua Li pages = roundup; 566579f8290SShaohua Li } 567579f8290SShaohua Li 568579f8290SShaohua Li if (pages > max_pages) 569579f8290SShaohua Li pages = max_pages; 570579f8290SShaohua Li 571579f8290SShaohua Li /* Don't shrink readahead too fast */ 572ec560175SHuang Ying last_ra = prev_win / 2; 573579f8290SShaohua Li if (pages < last_ra) 574579f8290SShaohua Li pages = last_ra; 575ec560175SHuang Ying 576ec560175SHuang Ying return pages; 577ec560175SHuang Ying } 578ec560175SHuang Ying 579ec560175SHuang Ying static unsigned long swapin_nr_pages(unsigned long offset) 580ec560175SHuang Ying { 581ec560175SHuang Ying static unsigned long prev_offset; 582ec560175SHuang Ying unsigned int hits, pages, max_pages; 583ec560175SHuang Ying static atomic_t last_readahead_pages; 584ec560175SHuang Ying 585ec560175SHuang Ying max_pages = 1 << READ_ONCE(page_cluster); 586ec560175SHuang Ying if (max_pages <= 1) 587ec560175SHuang Ying return 1; 588ec560175SHuang Ying 589ec560175SHuang Ying hits = atomic_xchg(&swapin_readahead_hits, 0); 590d6c1f098SQian Cai pages = __swapin_nr_pages(READ_ONCE(prev_offset), offset, hits, 591d6c1f098SQian Cai max_pages, 592ec560175SHuang Ying atomic_read(&last_readahead_pages)); 593ec560175SHuang Ying if (!hits) 594d6c1f098SQian Cai WRITE_ONCE(prev_offset, offset); 595579f8290SShaohua Li atomic_set(&last_readahead_pages, pages); 596579f8290SShaohua Li 597579f8290SShaohua Li return pages; 598579f8290SShaohua Li } 599579f8290SShaohua Li 60046017e95SHugh Dickins /** 601e9e9b7ecSMinchan Kim * swap_cluster_readahead - swap in pages in hope we need them soon 60246017e95SHugh Dickins * @entry: swap entry of this memory 6037682486bSRandy Dunlap * @gfp_mask: memory allocation flags 604e9e9b7ecSMinchan Kim * @vmf: fault information 60546017e95SHugh Dickins * 60646017e95SHugh Dickins * Returns the struct page for entry and addr, after queueing swapin. 60746017e95SHugh Dickins * 60846017e95SHugh Dickins * Primitive swap readahead code. We simply read an aligned block of 60946017e95SHugh Dickins * (1 << page_cluster) entries in the swap area. This method is chosen 61046017e95SHugh Dickins * because it doesn't cost us any seek time. We also make sure to queue 61146017e95SHugh Dickins * the 'original' request together with the readahead ones... 61246017e95SHugh Dickins * 61346017e95SHugh Dickins * This has been extended to use the NUMA policies from the mm triggering 61446017e95SHugh Dickins * the readahead. 61546017e95SHugh Dickins * 616c1e8d7c6SMichel Lespinasse * Caller must hold read mmap_lock if vmf->vma is not NULL. 61746017e95SHugh Dickins */ 618e9e9b7ecSMinchan Kim struct page *swap_cluster_readahead(swp_entry_t entry, gfp_t gfp_mask, 619e9e9b7ecSMinchan Kim struct vm_fault *vmf) 62046017e95SHugh Dickins { 62146017e95SHugh Dickins struct page *page; 622579f8290SShaohua Li unsigned long entry_offset = swp_offset(entry); 623579f8290SShaohua Li unsigned long offset = entry_offset; 62467f96aa2SRik van Riel unsigned long start_offset, end_offset; 625579f8290SShaohua Li unsigned long mask; 626e9a6effaSHuang Ying struct swap_info_struct *si = swp_swap_info(entry); 6273fb5c298SChristian Ehrhardt struct blk_plug plug; 628c4fa6309SHuang Ying bool do_poll = true, page_allocated; 629e9e9b7ecSMinchan Kim struct vm_area_struct *vma = vmf->vma; 630e9e9b7ecSMinchan Kim unsigned long addr = vmf->address; 63146017e95SHugh Dickins 632579f8290SShaohua Li mask = swapin_nr_pages(offset) - 1; 633579f8290SShaohua Li if (!mask) 634579f8290SShaohua Li goto skip; 635579f8290SShaohua Li 6368fd2e0b5SYang Shi /* Test swap type to make sure the dereference is safe */ 63732646315SGao Xiang if (likely(si->flags & (SWP_BLKDEV | SWP_FS_OPS))) { 6388fd2e0b5SYang Shi struct inode *inode = si->swap_file->f_mapping->host; 6398fd2e0b5SYang Shi if (inode_read_congested(inode)) 6408fd2e0b5SYang Shi goto skip; 6418fd2e0b5SYang Shi } 6428fd2e0b5SYang Shi 64323955622SShaohua Li do_poll = false; 64467f96aa2SRik van Riel /* Read a page_cluster sized and aligned cluster around offset. */ 64567f96aa2SRik van Riel start_offset = offset & ~mask; 64667f96aa2SRik van Riel end_offset = offset | mask; 64767f96aa2SRik van Riel if (!start_offset) /* First page is swap header. */ 64867f96aa2SRik van Riel start_offset++; 649e9a6effaSHuang Ying if (end_offset >= si->max) 650e9a6effaSHuang Ying end_offset = si->max - 1; 65167f96aa2SRik van Riel 6523fb5c298SChristian Ehrhardt blk_start_plug(&plug); 65367f96aa2SRik van Riel for (offset = start_offset; offset <= end_offset ; offset++) { 65446017e95SHugh Dickins /* Ok, do the async read-ahead now */ 655c4fa6309SHuang Ying page = __read_swap_cache_async( 656c4fa6309SHuang Ying swp_entry(swp_type(entry), offset), 657c4fa6309SHuang Ying gfp_mask, vma, addr, &page_allocated); 65846017e95SHugh Dickins if (!page) 65967f96aa2SRik van Riel continue; 660c4fa6309SHuang Ying if (page_allocated) { 661c4fa6309SHuang Ying swap_readpage(page, false); 662eaf649ebSMinchan Kim if (offset != entry_offset) { 663579f8290SShaohua Li SetPageReadahead(page); 664cbc65df2SHuang Ying count_vm_event(SWAP_RA); 665cbc65df2SHuang Ying } 666c4fa6309SHuang Ying } 66709cbfeafSKirill A. Shutemov put_page(page); 66846017e95SHugh Dickins } 6693fb5c298SChristian Ehrhardt blk_finish_plug(&plug); 6703fb5c298SChristian Ehrhardt 67146017e95SHugh Dickins lru_add_drain(); /* Push any new pages onto the LRU now */ 672579f8290SShaohua Li skip: 67323955622SShaohua Li return read_swap_cache_async(entry, gfp_mask, vma, addr, do_poll); 67446017e95SHugh Dickins } 6754b3ef9daSHuang, Ying 6764b3ef9daSHuang, Ying int init_swap_address_space(unsigned int type, unsigned long nr_pages) 6774b3ef9daSHuang, Ying { 6784b3ef9daSHuang, Ying struct address_space *spaces, *space; 6794b3ef9daSHuang, Ying unsigned int i, nr; 6804b3ef9daSHuang, Ying 6814b3ef9daSHuang, Ying nr = DIV_ROUND_UP(nr_pages, SWAP_ADDRESS_SPACE_PAGES); 682778e1cddSKees Cook spaces = kvcalloc(nr, sizeof(struct address_space), GFP_KERNEL); 6834b3ef9daSHuang, Ying if (!spaces) 6844b3ef9daSHuang, Ying return -ENOMEM; 6854b3ef9daSHuang, Ying for (i = 0; i < nr; i++) { 6864b3ef9daSHuang, Ying space = spaces + i; 687a2833486SMatthew Wilcox xa_init_flags(&space->i_pages, XA_FLAGS_LOCK_IRQ); 6884b3ef9daSHuang, Ying atomic_set(&space->i_mmap_writable, 0); 6894b3ef9daSHuang, Ying space->a_ops = &swap_aops; 6904b3ef9daSHuang, Ying /* swap cache doesn't use writeback related tags */ 6914b3ef9daSHuang, Ying mapping_set_no_writeback_tags(space); 6924b3ef9daSHuang, Ying } 6934b3ef9daSHuang, Ying nr_swapper_spaces[type] = nr; 694054f1d1fSHuang Ying swapper_spaces[type] = spaces; 6954b3ef9daSHuang, Ying 6964b3ef9daSHuang, Ying return 0; 6974b3ef9daSHuang, Ying } 6984b3ef9daSHuang, Ying 6994b3ef9daSHuang, Ying void exit_swap_address_space(unsigned int type) 7004b3ef9daSHuang, Ying { 701054f1d1fSHuang Ying kvfree(swapper_spaces[type]); 7024b3ef9daSHuang, Ying nr_swapper_spaces[type] = 0; 703054f1d1fSHuang Ying swapper_spaces[type] = NULL; 7044b3ef9daSHuang, Ying } 705ec560175SHuang Ying 706ec560175SHuang Ying static inline void swap_ra_clamp_pfn(struct vm_area_struct *vma, 707ec560175SHuang Ying unsigned long faddr, 708ec560175SHuang Ying unsigned long lpfn, 709ec560175SHuang Ying unsigned long rpfn, 710ec560175SHuang Ying unsigned long *start, 711ec560175SHuang Ying unsigned long *end) 712ec560175SHuang Ying { 713ec560175SHuang Ying *start = max3(lpfn, PFN_DOWN(vma->vm_start), 714ec560175SHuang Ying PFN_DOWN(faddr & PMD_MASK)); 715ec560175SHuang Ying *end = min3(rpfn, PFN_DOWN(vma->vm_end), 716ec560175SHuang Ying PFN_DOWN((faddr & PMD_MASK) + PMD_SIZE)); 717ec560175SHuang Ying } 718ec560175SHuang Ying 719eaf649ebSMinchan Kim static void swap_ra_info(struct vm_fault *vmf, 720eaf649ebSMinchan Kim struct vma_swap_readahead *ra_info) 721ec560175SHuang Ying { 722ec560175SHuang Ying struct vm_area_struct *vma = vmf->vma; 723eaf649ebSMinchan Kim unsigned long ra_val; 724ec560175SHuang Ying swp_entry_t entry; 725ec560175SHuang Ying unsigned long faddr, pfn, fpfn; 726ec560175SHuang Ying unsigned long start, end; 727eaf649ebSMinchan Kim pte_t *pte, *orig_pte; 728ec560175SHuang Ying unsigned int max_win, hits, prev_win, win, left; 729ec560175SHuang Ying #ifndef CONFIG_64BIT 730ec560175SHuang Ying pte_t *tpte; 731ec560175SHuang Ying #endif 732ec560175SHuang Ying 73361b63972SHuang Ying max_win = 1 << min_t(unsigned int, READ_ONCE(page_cluster), 73461b63972SHuang Ying SWAP_RA_ORDER_CEILING); 73561b63972SHuang Ying if (max_win == 1) { 736eaf649ebSMinchan Kim ra_info->win = 1; 737eaf649ebSMinchan Kim return; 73861b63972SHuang Ying } 73961b63972SHuang Ying 740ec560175SHuang Ying faddr = vmf->address; 741eaf649ebSMinchan Kim orig_pte = pte = pte_offset_map(vmf->pmd, faddr); 742eaf649ebSMinchan Kim entry = pte_to_swp_entry(*pte); 743eaf649ebSMinchan Kim if ((unlikely(non_swap_entry(entry)))) { 744eaf649ebSMinchan Kim pte_unmap(orig_pte); 745eaf649ebSMinchan Kim return; 746eaf649ebSMinchan Kim } 747ec560175SHuang Ying 748ec560175SHuang Ying fpfn = PFN_DOWN(faddr); 749eaf649ebSMinchan Kim ra_val = GET_SWAP_RA_VAL(vma); 750eaf649ebSMinchan Kim pfn = PFN_DOWN(SWAP_RA_ADDR(ra_val)); 751eaf649ebSMinchan Kim prev_win = SWAP_RA_WIN(ra_val); 752eaf649ebSMinchan Kim hits = SWAP_RA_HITS(ra_val); 753eaf649ebSMinchan Kim ra_info->win = win = __swapin_nr_pages(pfn, fpfn, hits, 754ec560175SHuang Ying max_win, prev_win); 755ec560175SHuang Ying atomic_long_set(&vma->swap_readahead_info, 756ec560175SHuang Ying SWAP_RA_VAL(faddr, win, 0)); 757ec560175SHuang Ying 758eaf649ebSMinchan Kim if (win == 1) { 759eaf649ebSMinchan Kim pte_unmap(orig_pte); 760eaf649ebSMinchan Kim return; 761eaf649ebSMinchan Kim } 762ec560175SHuang Ying 763ec560175SHuang Ying /* Copy the PTEs because the page table may be unmapped */ 764ec560175SHuang Ying if (fpfn == pfn + 1) 765ec560175SHuang Ying swap_ra_clamp_pfn(vma, faddr, fpfn, fpfn + win, &start, &end); 766ec560175SHuang Ying else if (pfn == fpfn + 1) 767ec560175SHuang Ying swap_ra_clamp_pfn(vma, faddr, fpfn - win + 1, fpfn + 1, 768ec560175SHuang Ying &start, &end); 769ec560175SHuang Ying else { 770ec560175SHuang Ying left = (win - 1) / 2; 771ec560175SHuang Ying swap_ra_clamp_pfn(vma, faddr, fpfn - left, fpfn + win - left, 772ec560175SHuang Ying &start, &end); 773ec560175SHuang Ying } 774eaf649ebSMinchan Kim ra_info->nr_pte = end - start; 775eaf649ebSMinchan Kim ra_info->offset = fpfn - start; 776eaf649ebSMinchan Kim pte -= ra_info->offset; 777ec560175SHuang Ying #ifdef CONFIG_64BIT 778eaf649ebSMinchan Kim ra_info->ptes = pte; 779ec560175SHuang Ying #else 780eaf649ebSMinchan Kim tpte = ra_info->ptes; 781ec560175SHuang Ying for (pfn = start; pfn != end; pfn++) 782ec560175SHuang Ying *tpte++ = *pte++; 783ec560175SHuang Ying #endif 784eaf649ebSMinchan Kim pte_unmap(orig_pte); 785ec560175SHuang Ying } 786ec560175SHuang Ying 787e9f59873SYang Shi /** 788e9f59873SYang Shi * swap_vma_readahead - swap in pages in hope we need them soon 78927ec4878SKrzysztof Kozlowski * @fentry: swap entry of this memory 790e9f59873SYang Shi * @gfp_mask: memory allocation flags 791e9f59873SYang Shi * @vmf: fault information 792e9f59873SYang Shi * 793e9f59873SYang Shi * Returns the struct page for entry and addr, after queueing swapin. 794e9f59873SYang Shi * 795*cb152a1aSShijie Luo * Primitive swap readahead code. We simply read in a few pages whose 796e9f59873SYang Shi * virtual addresses are around the fault address in the same vma. 797e9f59873SYang Shi * 798c1e8d7c6SMichel Lespinasse * Caller must hold read mmap_lock if vmf->vma is not NULL. 799e9f59873SYang Shi * 800e9f59873SYang Shi */ 801f5c754d6SColin Ian King static struct page *swap_vma_readahead(swp_entry_t fentry, gfp_t gfp_mask, 802eaf649ebSMinchan Kim struct vm_fault *vmf) 803ec560175SHuang Ying { 804ec560175SHuang Ying struct blk_plug plug; 805ec560175SHuang Ying struct vm_area_struct *vma = vmf->vma; 806ec560175SHuang Ying struct page *page; 807ec560175SHuang Ying pte_t *pte, pentry; 808ec560175SHuang Ying swp_entry_t entry; 809ec560175SHuang Ying unsigned int i; 810ec560175SHuang Ying bool page_allocated; 811e97af699SMiaohe Lin struct vma_swap_readahead ra_info = { 812e97af699SMiaohe Lin .win = 1, 813e97af699SMiaohe Lin }; 814ec560175SHuang Ying 815eaf649ebSMinchan Kim swap_ra_info(vmf, &ra_info); 816eaf649ebSMinchan Kim if (ra_info.win == 1) 817ec560175SHuang Ying goto skip; 818ec560175SHuang Ying 819ec560175SHuang Ying blk_start_plug(&plug); 820eaf649ebSMinchan Kim for (i = 0, pte = ra_info.ptes; i < ra_info.nr_pte; 821ec560175SHuang Ying i++, pte++) { 822ec560175SHuang Ying pentry = *pte; 823ec560175SHuang Ying if (pte_none(pentry)) 824ec560175SHuang Ying continue; 825ec560175SHuang Ying if (pte_present(pentry)) 826ec560175SHuang Ying continue; 827ec560175SHuang Ying entry = pte_to_swp_entry(pentry); 828ec560175SHuang Ying if (unlikely(non_swap_entry(entry))) 829ec560175SHuang Ying continue; 830ec560175SHuang Ying page = __read_swap_cache_async(entry, gfp_mask, vma, 831ec560175SHuang Ying vmf->address, &page_allocated); 832ec560175SHuang Ying if (!page) 833ec560175SHuang Ying continue; 834ec560175SHuang Ying if (page_allocated) { 835ec560175SHuang Ying swap_readpage(page, false); 836eaf649ebSMinchan Kim if (i != ra_info.offset) { 837ec560175SHuang Ying SetPageReadahead(page); 838ec560175SHuang Ying count_vm_event(SWAP_RA); 839ec560175SHuang Ying } 840ec560175SHuang Ying } 841ec560175SHuang Ying put_page(page); 842ec560175SHuang Ying } 843ec560175SHuang Ying blk_finish_plug(&plug); 844ec560175SHuang Ying lru_add_drain(); 845ec560175SHuang Ying skip: 846ec560175SHuang Ying return read_swap_cache_async(fentry, gfp_mask, vma, vmf->address, 847eaf649ebSMinchan Kim ra_info.win == 1); 848ec560175SHuang Ying } 849d9bfcfdcSHuang Ying 850e9e9b7ecSMinchan Kim /** 851e9e9b7ecSMinchan Kim * swapin_readahead - swap in pages in hope we need them soon 852e9e9b7ecSMinchan Kim * @entry: swap entry of this memory 853e9e9b7ecSMinchan Kim * @gfp_mask: memory allocation flags 854e9e9b7ecSMinchan Kim * @vmf: fault information 855e9e9b7ecSMinchan Kim * 856e9e9b7ecSMinchan Kim * Returns the struct page for entry and addr, after queueing swapin. 857e9e9b7ecSMinchan Kim * 858e9e9b7ecSMinchan Kim * It's a main entry function for swap readahead. By the configuration, 859e9e9b7ecSMinchan Kim * it will read ahead blocks by cluster-based(ie, physical disk based) 860e9e9b7ecSMinchan Kim * or vma-based(ie, virtual address based on faulty address) readahead. 861e9e9b7ecSMinchan Kim */ 862e9e9b7ecSMinchan Kim struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, 863e9e9b7ecSMinchan Kim struct vm_fault *vmf) 864e9e9b7ecSMinchan Kim { 865e9e9b7ecSMinchan Kim return swap_use_vma_readahead() ? 866e9e9b7ecSMinchan Kim swap_vma_readahead(entry, gfp_mask, vmf) : 867e9e9b7ecSMinchan Kim swap_cluster_readahead(entry, gfp_mask, vmf); 868e9e9b7ecSMinchan Kim } 869e9e9b7ecSMinchan Kim 870d9bfcfdcSHuang Ying #ifdef CONFIG_SYSFS 871d9bfcfdcSHuang Ying static ssize_t vma_ra_enabled_show(struct kobject *kobj, 872d9bfcfdcSHuang Ying struct kobj_attribute *attr, char *buf) 873d9bfcfdcSHuang Ying { 874ae7a927dSJoe Perches return sysfs_emit(buf, "%s\n", 875ae7a927dSJoe Perches enable_vma_readahead ? "true" : "false"); 876d9bfcfdcSHuang Ying } 877d9bfcfdcSHuang Ying static ssize_t vma_ra_enabled_store(struct kobject *kobj, 878d9bfcfdcSHuang Ying struct kobj_attribute *attr, 879d9bfcfdcSHuang Ying const char *buf, size_t count) 880d9bfcfdcSHuang Ying { 881d9bfcfdcSHuang Ying if (!strncmp(buf, "true", 4) || !strncmp(buf, "1", 1)) 882e9e9b7ecSMinchan Kim enable_vma_readahead = true; 883d9bfcfdcSHuang Ying else if (!strncmp(buf, "false", 5) || !strncmp(buf, "0", 1)) 884e9e9b7ecSMinchan Kim enable_vma_readahead = false; 885d9bfcfdcSHuang Ying else 886d9bfcfdcSHuang Ying return -EINVAL; 887d9bfcfdcSHuang Ying 888d9bfcfdcSHuang Ying return count; 889d9bfcfdcSHuang Ying } 890d9bfcfdcSHuang Ying static struct kobj_attribute vma_ra_enabled_attr = 891d9bfcfdcSHuang Ying __ATTR(vma_ra_enabled, 0644, vma_ra_enabled_show, 892d9bfcfdcSHuang Ying vma_ra_enabled_store); 893d9bfcfdcSHuang Ying 894d9bfcfdcSHuang Ying static struct attribute *swap_attrs[] = { 895d9bfcfdcSHuang Ying &vma_ra_enabled_attr.attr, 896d9bfcfdcSHuang Ying NULL, 897d9bfcfdcSHuang Ying }; 898d9bfcfdcSHuang Ying 899e48333b6SRikard Falkeborn static const struct attribute_group swap_attr_group = { 900d9bfcfdcSHuang Ying .attrs = swap_attrs, 901d9bfcfdcSHuang Ying }; 902d9bfcfdcSHuang Ying 903d9bfcfdcSHuang Ying static int __init swap_init_sysfs(void) 904d9bfcfdcSHuang Ying { 905d9bfcfdcSHuang Ying int err; 906d9bfcfdcSHuang Ying struct kobject *swap_kobj; 907d9bfcfdcSHuang Ying 908d9bfcfdcSHuang Ying swap_kobj = kobject_create_and_add("swap", mm_kobj); 909d9bfcfdcSHuang Ying if (!swap_kobj) { 910d9bfcfdcSHuang Ying pr_err("failed to create swap kobject\n"); 911d9bfcfdcSHuang Ying return -ENOMEM; 912d9bfcfdcSHuang Ying } 913d9bfcfdcSHuang Ying err = sysfs_create_group(swap_kobj, &swap_attr_group); 914d9bfcfdcSHuang Ying if (err) { 915d9bfcfdcSHuang Ying pr_err("failed to register swap group\n"); 916d9bfcfdcSHuang Ying goto delete_obj; 917d9bfcfdcSHuang Ying } 918d9bfcfdcSHuang Ying return 0; 919d9bfcfdcSHuang Ying 920d9bfcfdcSHuang Ying delete_obj: 921d9bfcfdcSHuang Ying kobject_put(swap_kobj); 922d9bfcfdcSHuang Ying return err; 923d9bfcfdcSHuang Ying } 924d9bfcfdcSHuang Ying subsys_initcall(swap_init_sysfs); 925d9bfcfdcSHuang Ying #endif 926