1 /* 2 * linux/mm/swap_state.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 * Swap reorganised 29.12.95, Stephen Tweedie 6 * 7 * Rewritten to use page cache, (C) 1998 Stephen Tweedie 8 */ 9 #include <linux/mm.h> 10 #include <linux/gfp.h> 11 #include <linux/kernel_stat.h> 12 #include <linux/swap.h> 13 #include <linux/swapops.h> 14 #include <linux/init.h> 15 #include <linux/pagemap.h> 16 #include <linux/backing-dev.h> 17 #include <linux/blkdev.h> 18 #include <linux/pagevec.h> 19 #include <linux/migrate.h> 20 21 #include <asm/pgtable.h> 22 23 /* 24 * swapper_space is a fiction, retained to simplify the path through 25 * vmscan's shrink_page_list. 26 */ 27 static const struct address_space_operations swap_aops = { 28 .writepage = swap_writepage, 29 .set_page_dirty = swap_set_page_dirty, 30 #ifdef CONFIG_MIGRATION 31 .migratepage = migrate_page, 32 #endif 33 }; 34 35 struct address_space swapper_spaces[MAX_SWAPFILES] = { 36 [0 ... MAX_SWAPFILES - 1] = { 37 .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN), 38 .i_mmap_writable = ATOMIC_INIT(0), 39 .a_ops = &swap_aops, 40 /* swap cache doesn't use writeback related tags */ 41 .flags = 1 << AS_NO_WRITEBACK_TAGS, 42 } 43 }; 44 45 #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0) 46 47 static struct { 48 unsigned long add_total; 49 unsigned long del_total; 50 unsigned long find_success; 51 unsigned long find_total; 52 } swap_cache_info; 53 54 unsigned long total_swapcache_pages(void) 55 { 56 int i; 57 unsigned long ret = 0; 58 59 for (i = 0; i < MAX_SWAPFILES; i++) 60 ret += swapper_spaces[i].nrpages; 61 return ret; 62 } 63 64 static atomic_t swapin_readahead_hits = ATOMIC_INIT(4); 65 66 void show_swap_cache_info(void) 67 { 68 printk("%lu pages in swap cache\n", total_swapcache_pages()); 69 printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n", 70 swap_cache_info.add_total, swap_cache_info.del_total, 71 swap_cache_info.find_success, swap_cache_info.find_total); 72 printk("Free swap = %ldkB\n", 73 get_nr_swap_pages() << (PAGE_SHIFT - 10)); 74 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10)); 75 } 76 77 /* 78 * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, 79 * but sets SwapCache flag and private instead of mapping and index. 80 */ 81 int __add_to_swap_cache(struct page *page, swp_entry_t entry) 82 { 83 int error; 84 struct address_space *address_space; 85 86 VM_BUG_ON_PAGE(!PageLocked(page), page); 87 VM_BUG_ON_PAGE(PageSwapCache(page), page); 88 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 89 90 get_page(page); 91 SetPageSwapCache(page); 92 set_page_private(page, entry.val); 93 94 address_space = swap_address_space(entry); 95 spin_lock_irq(&address_space->tree_lock); 96 error = radix_tree_insert(&address_space->page_tree, 97 swp_offset(entry), page); 98 if (likely(!error)) { 99 address_space->nrpages++; 100 __inc_node_page_state(page, NR_FILE_PAGES); 101 INC_CACHE_INFO(add_total); 102 } 103 spin_unlock_irq(&address_space->tree_lock); 104 105 if (unlikely(error)) { 106 /* 107 * Only the context which have set SWAP_HAS_CACHE flag 108 * would call add_to_swap_cache(). 109 * So add_to_swap_cache() doesn't returns -EEXIST. 110 */ 111 VM_BUG_ON(error == -EEXIST); 112 set_page_private(page, 0UL); 113 ClearPageSwapCache(page); 114 put_page(page); 115 } 116 117 return error; 118 } 119 120 121 int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) 122 { 123 int error; 124 125 error = radix_tree_maybe_preload(gfp_mask); 126 if (!error) { 127 error = __add_to_swap_cache(page, entry); 128 radix_tree_preload_end(); 129 } 130 return error; 131 } 132 133 /* 134 * This must be called only on pages that have 135 * been verified to be in the swap cache. 136 */ 137 void __delete_from_swap_cache(struct page *page) 138 { 139 swp_entry_t entry; 140 struct address_space *address_space; 141 142 VM_BUG_ON_PAGE(!PageLocked(page), page); 143 VM_BUG_ON_PAGE(!PageSwapCache(page), page); 144 VM_BUG_ON_PAGE(PageWriteback(page), page); 145 146 entry.val = page_private(page); 147 address_space = swap_address_space(entry); 148 radix_tree_delete(&address_space->page_tree, swp_offset(entry)); 149 set_page_private(page, 0); 150 ClearPageSwapCache(page); 151 address_space->nrpages--; 152 __dec_node_page_state(page, NR_FILE_PAGES); 153 INC_CACHE_INFO(del_total); 154 } 155 156 /** 157 * add_to_swap - allocate swap space for a page 158 * @page: page we want to move to swap 159 * 160 * Allocate swap space for the page and add the page to the 161 * swap cache. Caller needs to hold the page lock. 162 */ 163 int add_to_swap(struct page *page, struct list_head *list) 164 { 165 swp_entry_t entry; 166 int err; 167 168 VM_BUG_ON_PAGE(!PageLocked(page), page); 169 VM_BUG_ON_PAGE(!PageUptodate(page), page); 170 171 entry = get_swap_page(); 172 if (!entry.val) 173 return 0; 174 175 if (mem_cgroup_try_charge_swap(page, entry)) { 176 swapcache_free(entry); 177 return 0; 178 } 179 180 if (unlikely(PageTransHuge(page))) 181 if (unlikely(split_huge_page_to_list(page, list))) { 182 swapcache_free(entry); 183 return 0; 184 } 185 186 /* 187 * Radix-tree node allocations from PF_MEMALLOC contexts could 188 * completely exhaust the page allocator. __GFP_NOMEMALLOC 189 * stops emergency reserves from being allocated. 190 * 191 * TODO: this could cause a theoretical memory reclaim 192 * deadlock in the swap out path. 193 */ 194 /* 195 * Add it to the swap cache. 196 */ 197 err = add_to_swap_cache(page, entry, 198 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN); 199 200 if (!err) { 201 return 1; 202 } else { /* -ENOMEM radix-tree allocation failure */ 203 /* 204 * add_to_swap_cache() doesn't return -EEXIST, so we can safely 205 * clear SWAP_HAS_CACHE flag. 206 */ 207 swapcache_free(entry); 208 return 0; 209 } 210 } 211 212 /* 213 * This must be called only on pages that have 214 * been verified to be in the swap cache and locked. 215 * It will never put the page into the free list, 216 * the caller has a reference on the page. 217 */ 218 void delete_from_swap_cache(struct page *page) 219 { 220 swp_entry_t entry; 221 struct address_space *address_space; 222 223 entry.val = page_private(page); 224 225 address_space = swap_address_space(entry); 226 spin_lock_irq(&address_space->tree_lock); 227 __delete_from_swap_cache(page); 228 spin_unlock_irq(&address_space->tree_lock); 229 230 swapcache_free(entry); 231 put_page(page); 232 } 233 234 /* 235 * If we are the only user, then try to free up the swap cache. 236 * 237 * Its ok to check for PageSwapCache without the page lock 238 * here because we are going to recheck again inside 239 * try_to_free_swap() _with_ the lock. 240 * - Marcelo 241 */ 242 static inline void free_swap_cache(struct page *page) 243 { 244 if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) { 245 try_to_free_swap(page); 246 unlock_page(page); 247 } 248 } 249 250 /* 251 * Perform a free_page(), also freeing any swap cache associated with 252 * this page if it is the last user of the page. 253 */ 254 void free_page_and_swap_cache(struct page *page) 255 { 256 free_swap_cache(page); 257 if (!is_huge_zero_page(page)) 258 put_page(page); 259 } 260 261 /* 262 * Passed an array of pages, drop them all from swapcache and then release 263 * them. They are removed from the LRU and freed if this is their last use. 264 */ 265 void free_pages_and_swap_cache(struct page **pages, int nr) 266 { 267 struct page **pagep = pages; 268 int i; 269 270 lru_add_drain(); 271 for (i = 0; i < nr; i++) 272 free_swap_cache(pagep[i]); 273 release_pages(pagep, nr, false); 274 } 275 276 /* 277 * Lookup a swap entry in the swap cache. A found page will be returned 278 * unlocked and with its refcount incremented - we rely on the kernel 279 * lock getting page table operations atomic even if we drop the page 280 * lock before returning. 281 */ 282 struct page * lookup_swap_cache(swp_entry_t entry) 283 { 284 struct page *page; 285 286 page = find_get_page(swap_address_space(entry), swp_offset(entry)); 287 288 if (page) { 289 INC_CACHE_INFO(find_success); 290 if (TestClearPageReadahead(page)) 291 atomic_inc(&swapin_readahead_hits); 292 } 293 294 INC_CACHE_INFO(find_total); 295 return page; 296 } 297 298 struct page *__read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, 299 struct vm_area_struct *vma, unsigned long addr, 300 bool *new_page_allocated) 301 { 302 struct page *found_page, *new_page = NULL; 303 struct address_space *swapper_space = swap_address_space(entry); 304 int err; 305 *new_page_allocated = false; 306 307 do { 308 /* 309 * First check the swap cache. Since this is normally 310 * called after lookup_swap_cache() failed, re-calling 311 * that would confuse statistics. 312 */ 313 found_page = find_get_page(swapper_space, swp_offset(entry)); 314 if (found_page) 315 break; 316 317 /* 318 * Get a new page to read into from swap. 319 */ 320 if (!new_page) { 321 new_page = alloc_page_vma(gfp_mask, vma, addr); 322 if (!new_page) 323 break; /* Out of memory */ 324 } 325 326 /* 327 * call radix_tree_preload() while we can wait. 328 */ 329 err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL); 330 if (err) 331 break; 332 333 /* 334 * Swap entry may have been freed since our caller observed it. 335 */ 336 err = swapcache_prepare(entry); 337 if (err == -EEXIST) { 338 radix_tree_preload_end(); 339 /* 340 * We might race against get_swap_page() and stumble 341 * across a SWAP_HAS_CACHE swap_map entry whose page 342 * has not been brought into the swapcache yet, while 343 * the other end is scheduled away waiting on discard 344 * I/O completion at scan_swap_map(). 345 * 346 * In order to avoid turning this transitory state 347 * into a permanent loop around this -EEXIST case 348 * if !CONFIG_PREEMPT and the I/O completion happens 349 * to be waiting on the CPU waitqueue where we are now 350 * busy looping, we just conditionally invoke the 351 * scheduler here, if there are some more important 352 * tasks to run. 353 */ 354 cond_resched(); 355 continue; 356 } 357 if (err) { /* swp entry is obsolete ? */ 358 radix_tree_preload_end(); 359 break; 360 } 361 362 /* May fail (-ENOMEM) if radix-tree node allocation failed. */ 363 __SetPageLocked(new_page); 364 __SetPageSwapBacked(new_page); 365 err = __add_to_swap_cache(new_page, entry); 366 if (likely(!err)) { 367 radix_tree_preload_end(); 368 /* 369 * Initiate read into locked page and return. 370 */ 371 lru_cache_add_anon(new_page); 372 *new_page_allocated = true; 373 return new_page; 374 } 375 radix_tree_preload_end(); 376 __ClearPageLocked(new_page); 377 /* 378 * add_to_swap_cache() doesn't return -EEXIST, so we can safely 379 * clear SWAP_HAS_CACHE flag. 380 */ 381 swapcache_free(entry); 382 } while (err != -ENOMEM); 383 384 if (new_page) 385 put_page(new_page); 386 return found_page; 387 } 388 389 /* 390 * Locate a page of swap in physical memory, reserving swap cache space 391 * and reading the disk if it is not already cached. 392 * A failure return means that either the page allocation failed or that 393 * the swap entry is no longer in use. 394 */ 395 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, 396 struct vm_area_struct *vma, unsigned long addr) 397 { 398 bool page_was_allocated; 399 struct page *retpage = __read_swap_cache_async(entry, gfp_mask, 400 vma, addr, &page_was_allocated); 401 402 if (page_was_allocated) 403 swap_readpage(retpage); 404 405 return retpage; 406 } 407 408 static unsigned long swapin_nr_pages(unsigned long offset) 409 { 410 static unsigned long prev_offset; 411 unsigned int pages, max_pages, last_ra; 412 static atomic_t last_readahead_pages; 413 414 max_pages = 1 << READ_ONCE(page_cluster); 415 if (max_pages <= 1) 416 return 1; 417 418 /* 419 * This heuristic has been found to work well on both sequential and 420 * random loads, swapping to hard disk or to SSD: please don't ask 421 * what the "+ 2" means, it just happens to work well, that's all. 422 */ 423 pages = atomic_xchg(&swapin_readahead_hits, 0) + 2; 424 if (pages == 2) { 425 /* 426 * We can have no readahead hits to judge by: but must not get 427 * stuck here forever, so check for an adjacent offset instead 428 * (and don't even bother to check whether swap type is same). 429 */ 430 if (offset != prev_offset + 1 && offset != prev_offset - 1) 431 pages = 1; 432 prev_offset = offset; 433 } else { 434 unsigned int roundup = 4; 435 while (roundup < pages) 436 roundup <<= 1; 437 pages = roundup; 438 } 439 440 if (pages > max_pages) 441 pages = max_pages; 442 443 /* Don't shrink readahead too fast */ 444 last_ra = atomic_read(&last_readahead_pages) / 2; 445 if (pages < last_ra) 446 pages = last_ra; 447 atomic_set(&last_readahead_pages, pages); 448 449 return pages; 450 } 451 452 /** 453 * swapin_readahead - swap in pages in hope we need them soon 454 * @entry: swap entry of this memory 455 * @gfp_mask: memory allocation flags 456 * @vma: user vma this address belongs to 457 * @addr: target address for mempolicy 458 * 459 * Returns the struct page for entry and addr, after queueing swapin. 460 * 461 * Primitive swap readahead code. We simply read an aligned block of 462 * (1 << page_cluster) entries in the swap area. This method is chosen 463 * because it doesn't cost us any seek time. We also make sure to queue 464 * the 'original' request together with the readahead ones... 465 * 466 * This has been extended to use the NUMA policies from the mm triggering 467 * the readahead. 468 * 469 * Caller must hold down_read on the vma->vm_mm if vma is not NULL. 470 */ 471 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, 472 struct vm_area_struct *vma, unsigned long addr) 473 { 474 struct page *page; 475 unsigned long entry_offset = swp_offset(entry); 476 unsigned long offset = entry_offset; 477 unsigned long start_offset, end_offset; 478 unsigned long mask; 479 struct blk_plug plug; 480 481 mask = swapin_nr_pages(offset) - 1; 482 if (!mask) 483 goto skip; 484 485 /* Read a page_cluster sized and aligned cluster around offset. */ 486 start_offset = offset & ~mask; 487 end_offset = offset | mask; 488 if (!start_offset) /* First page is swap header. */ 489 start_offset++; 490 491 blk_start_plug(&plug); 492 for (offset = start_offset; offset <= end_offset ; offset++) { 493 /* Ok, do the async read-ahead now */ 494 page = read_swap_cache_async(swp_entry(swp_type(entry), offset), 495 gfp_mask, vma, addr); 496 if (!page) 497 continue; 498 if (offset != entry_offset) 499 SetPageReadahead(page); 500 put_page(page); 501 } 502 blk_finish_plug(&plug); 503 504 lru_add_drain(); /* Push any new pages onto the LRU now */ 505 skip: 506 return read_swap_cache_async(entry, gfp_mask, vma, addr); 507 } 508