1 /* 2 * linux/mm/swap_state.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 * Swap reorganised 29.12.95, Stephen Tweedie 6 * 7 * Rewritten to use page cache, (C) 1998 Stephen Tweedie 8 */ 9 #include <linux/mm.h> 10 #include <linux/gfp.h> 11 #include <linux/kernel_stat.h> 12 #include <linux/swap.h> 13 #include <linux/swapops.h> 14 #include <linux/init.h> 15 #include <linux/pagemap.h> 16 #include <linux/backing-dev.h> 17 #include <linux/blkdev.h> 18 #include <linux/pagevec.h> 19 #include <linux/migrate.h> 20 #include <linux/page_cgroup.h> 21 22 #include <asm/pgtable.h> 23 24 /* 25 * swapper_space is a fiction, retained to simplify the path through 26 * vmscan's shrink_page_list. 27 */ 28 static const struct address_space_operations swap_aops = { 29 .writepage = swap_writepage, 30 .set_page_dirty = swap_set_page_dirty, 31 .migratepage = migrate_page, 32 }; 33 34 static struct backing_dev_info swap_backing_dev_info = { 35 .name = "swap", 36 .capabilities = BDI_CAP_NO_ACCT_AND_WRITEBACK | BDI_CAP_SWAP_BACKED, 37 }; 38 39 struct address_space swapper_spaces[MAX_SWAPFILES] = { 40 [0 ... MAX_SWAPFILES - 1] = { 41 .page_tree = RADIX_TREE_INIT(GFP_ATOMIC|__GFP_NOWARN), 42 .a_ops = &swap_aops, 43 .backing_dev_info = &swap_backing_dev_info, 44 } 45 }; 46 47 #define INC_CACHE_INFO(x) do { swap_cache_info.x++; } while (0) 48 49 static struct { 50 unsigned long add_total; 51 unsigned long del_total; 52 unsigned long find_success; 53 unsigned long find_total; 54 } swap_cache_info; 55 56 unsigned long total_swapcache_pages(void) 57 { 58 int i; 59 unsigned long ret = 0; 60 61 for (i = 0; i < MAX_SWAPFILES; i++) 62 ret += swapper_spaces[i].nrpages; 63 return ret; 64 } 65 66 static atomic_t swapin_readahead_hits = ATOMIC_INIT(4); 67 68 void show_swap_cache_info(void) 69 { 70 printk("%lu pages in swap cache\n", total_swapcache_pages()); 71 printk("Swap cache stats: add %lu, delete %lu, find %lu/%lu\n", 72 swap_cache_info.add_total, swap_cache_info.del_total, 73 swap_cache_info.find_success, swap_cache_info.find_total); 74 printk("Free swap = %ldkB\n", 75 get_nr_swap_pages() << (PAGE_SHIFT - 10)); 76 printk("Total swap = %lukB\n", total_swap_pages << (PAGE_SHIFT - 10)); 77 } 78 79 /* 80 * __add_to_swap_cache resembles add_to_page_cache_locked on swapper_space, 81 * but sets SwapCache flag and private instead of mapping and index. 82 */ 83 int __add_to_swap_cache(struct page *page, swp_entry_t entry) 84 { 85 int error; 86 struct address_space *address_space; 87 88 VM_BUG_ON_PAGE(!PageLocked(page), page); 89 VM_BUG_ON_PAGE(PageSwapCache(page), page); 90 VM_BUG_ON_PAGE(!PageSwapBacked(page), page); 91 92 page_cache_get(page); 93 SetPageSwapCache(page); 94 set_page_private(page, entry.val); 95 96 address_space = swap_address_space(entry); 97 spin_lock_irq(&address_space->tree_lock); 98 error = radix_tree_insert(&address_space->page_tree, 99 entry.val, page); 100 if (likely(!error)) { 101 address_space->nrpages++; 102 __inc_zone_page_state(page, NR_FILE_PAGES); 103 INC_CACHE_INFO(add_total); 104 } 105 spin_unlock_irq(&address_space->tree_lock); 106 107 if (unlikely(error)) { 108 /* 109 * Only the context which have set SWAP_HAS_CACHE flag 110 * would call add_to_swap_cache(). 111 * So add_to_swap_cache() doesn't returns -EEXIST. 112 */ 113 VM_BUG_ON(error == -EEXIST); 114 set_page_private(page, 0UL); 115 ClearPageSwapCache(page); 116 page_cache_release(page); 117 } 118 119 return error; 120 } 121 122 123 int add_to_swap_cache(struct page *page, swp_entry_t entry, gfp_t gfp_mask) 124 { 125 int error; 126 127 error = radix_tree_maybe_preload(gfp_mask); 128 if (!error) { 129 error = __add_to_swap_cache(page, entry); 130 radix_tree_preload_end(); 131 } 132 return error; 133 } 134 135 /* 136 * This must be called only on pages that have 137 * been verified to be in the swap cache. 138 */ 139 void __delete_from_swap_cache(struct page *page) 140 { 141 swp_entry_t entry; 142 struct address_space *address_space; 143 144 VM_BUG_ON_PAGE(!PageLocked(page), page); 145 VM_BUG_ON_PAGE(!PageSwapCache(page), page); 146 VM_BUG_ON_PAGE(PageWriteback(page), page); 147 148 entry.val = page_private(page); 149 address_space = swap_address_space(entry); 150 radix_tree_delete(&address_space->page_tree, page_private(page)); 151 set_page_private(page, 0); 152 ClearPageSwapCache(page); 153 address_space->nrpages--; 154 __dec_zone_page_state(page, NR_FILE_PAGES); 155 INC_CACHE_INFO(del_total); 156 } 157 158 /** 159 * add_to_swap - allocate swap space for a page 160 * @page: page we want to move to swap 161 * 162 * Allocate swap space for the page and add the page to the 163 * swap cache. Caller needs to hold the page lock. 164 */ 165 int add_to_swap(struct page *page, struct list_head *list) 166 { 167 swp_entry_t entry; 168 int err; 169 170 VM_BUG_ON_PAGE(!PageLocked(page), page); 171 VM_BUG_ON_PAGE(!PageUptodate(page), page); 172 173 entry = get_swap_page(); 174 if (!entry.val) 175 return 0; 176 177 if (unlikely(PageTransHuge(page))) 178 if (unlikely(split_huge_page_to_list(page, list))) { 179 swapcache_free(entry, NULL); 180 return 0; 181 } 182 183 /* 184 * Radix-tree node allocations from PF_MEMALLOC contexts could 185 * completely exhaust the page allocator. __GFP_NOMEMALLOC 186 * stops emergency reserves from being allocated. 187 * 188 * TODO: this could cause a theoretical memory reclaim 189 * deadlock in the swap out path. 190 */ 191 /* 192 * Add it to the swap cache and mark it dirty 193 */ 194 err = add_to_swap_cache(page, entry, 195 __GFP_HIGH|__GFP_NOMEMALLOC|__GFP_NOWARN); 196 197 if (!err) { /* Success */ 198 SetPageDirty(page); 199 return 1; 200 } else { /* -ENOMEM radix-tree allocation failure */ 201 /* 202 * add_to_swap_cache() doesn't return -EEXIST, so we can safely 203 * clear SWAP_HAS_CACHE flag. 204 */ 205 swapcache_free(entry, NULL); 206 return 0; 207 } 208 } 209 210 /* 211 * This must be called only on pages that have 212 * been verified to be in the swap cache and locked. 213 * It will never put the page into the free list, 214 * the caller has a reference on the page. 215 */ 216 void delete_from_swap_cache(struct page *page) 217 { 218 swp_entry_t entry; 219 struct address_space *address_space; 220 221 entry.val = page_private(page); 222 223 address_space = swap_address_space(entry); 224 spin_lock_irq(&address_space->tree_lock); 225 __delete_from_swap_cache(page); 226 spin_unlock_irq(&address_space->tree_lock); 227 228 swapcache_free(entry, page); 229 page_cache_release(page); 230 } 231 232 /* 233 * If we are the only user, then try to free up the swap cache. 234 * 235 * Its ok to check for PageSwapCache without the page lock 236 * here because we are going to recheck again inside 237 * try_to_free_swap() _with_ the lock. 238 * - Marcelo 239 */ 240 static inline void free_swap_cache(struct page *page) 241 { 242 if (PageSwapCache(page) && !page_mapped(page) && trylock_page(page)) { 243 try_to_free_swap(page); 244 unlock_page(page); 245 } 246 } 247 248 /* 249 * Perform a free_page(), also freeing any swap cache associated with 250 * this page if it is the last user of the page. 251 */ 252 void free_page_and_swap_cache(struct page *page) 253 { 254 free_swap_cache(page); 255 page_cache_release(page); 256 } 257 258 /* 259 * Passed an array of pages, drop them all from swapcache and then release 260 * them. They are removed from the LRU and freed if this is their last use. 261 */ 262 void free_pages_and_swap_cache(struct page **pages, int nr) 263 { 264 struct page **pagep = pages; 265 266 lru_add_drain(); 267 while (nr) { 268 int todo = min(nr, PAGEVEC_SIZE); 269 int i; 270 271 for (i = 0; i < todo; i++) 272 free_swap_cache(pagep[i]); 273 release_pages(pagep, todo, 0); 274 pagep += todo; 275 nr -= todo; 276 } 277 } 278 279 /* 280 * Lookup a swap entry in the swap cache. A found page will be returned 281 * unlocked and with its refcount incremented - we rely on the kernel 282 * lock getting page table operations atomic even if we drop the page 283 * lock before returning. 284 */ 285 struct page * lookup_swap_cache(swp_entry_t entry) 286 { 287 struct page *page; 288 289 page = find_get_page(swap_address_space(entry), entry.val); 290 291 if (page) { 292 INC_CACHE_INFO(find_success); 293 if (TestClearPageReadahead(page)) 294 atomic_inc(&swapin_readahead_hits); 295 } 296 297 INC_CACHE_INFO(find_total); 298 return page; 299 } 300 301 /* 302 * Locate a page of swap in physical memory, reserving swap cache space 303 * and reading the disk if it is not already cached. 304 * A failure return means that either the page allocation failed or that 305 * the swap entry is no longer in use. 306 */ 307 struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, 308 struct vm_area_struct *vma, unsigned long addr) 309 { 310 struct page *found_page, *new_page = NULL; 311 int err; 312 313 do { 314 /* 315 * First check the swap cache. Since this is normally 316 * called after lookup_swap_cache() failed, re-calling 317 * that would confuse statistics. 318 */ 319 found_page = find_get_page(swap_address_space(entry), 320 entry.val); 321 if (found_page) 322 break; 323 324 /* 325 * Get a new page to read into from swap. 326 */ 327 if (!new_page) { 328 new_page = alloc_page_vma(gfp_mask, vma, addr); 329 if (!new_page) 330 break; /* Out of memory */ 331 } 332 333 /* 334 * call radix_tree_preload() while we can wait. 335 */ 336 err = radix_tree_maybe_preload(gfp_mask & GFP_KERNEL); 337 if (err) 338 break; 339 340 /* 341 * Swap entry may have been freed since our caller observed it. 342 */ 343 err = swapcache_prepare(entry); 344 if (err == -EEXIST) { 345 radix_tree_preload_end(); 346 /* 347 * We might race against get_swap_page() and stumble 348 * across a SWAP_HAS_CACHE swap_map entry whose page 349 * has not been brought into the swapcache yet, while 350 * the other end is scheduled away waiting on discard 351 * I/O completion at scan_swap_map(). 352 * 353 * In order to avoid turning this transitory state 354 * into a permanent loop around this -EEXIST case 355 * if !CONFIG_PREEMPT and the I/O completion happens 356 * to be waiting on the CPU waitqueue where we are now 357 * busy looping, we just conditionally invoke the 358 * scheduler here, if there are some more important 359 * tasks to run. 360 */ 361 cond_resched(); 362 continue; 363 } 364 if (err) { /* swp entry is obsolete ? */ 365 radix_tree_preload_end(); 366 break; 367 } 368 369 /* May fail (-ENOMEM) if radix-tree node allocation failed. */ 370 __set_page_locked(new_page); 371 SetPageSwapBacked(new_page); 372 err = __add_to_swap_cache(new_page, entry); 373 if (likely(!err)) { 374 radix_tree_preload_end(); 375 /* 376 * Initiate read into locked page and return. 377 */ 378 lru_cache_add_anon(new_page); 379 swap_readpage(new_page); 380 return new_page; 381 } 382 radix_tree_preload_end(); 383 ClearPageSwapBacked(new_page); 384 __clear_page_locked(new_page); 385 /* 386 * add_to_swap_cache() doesn't return -EEXIST, so we can safely 387 * clear SWAP_HAS_CACHE flag. 388 */ 389 swapcache_free(entry, NULL); 390 } while (err != -ENOMEM); 391 392 if (new_page) 393 page_cache_release(new_page); 394 return found_page; 395 } 396 397 static unsigned long swapin_nr_pages(unsigned long offset) 398 { 399 static unsigned long prev_offset; 400 unsigned int pages, max_pages, last_ra; 401 static atomic_t last_readahead_pages; 402 403 max_pages = 1 << ACCESS_ONCE(page_cluster); 404 if (max_pages <= 1) 405 return 1; 406 407 /* 408 * This heuristic has been found to work well on both sequential and 409 * random loads, swapping to hard disk or to SSD: please don't ask 410 * what the "+ 2" means, it just happens to work well, that's all. 411 */ 412 pages = atomic_xchg(&swapin_readahead_hits, 0) + 2; 413 if (pages == 2) { 414 /* 415 * We can have no readahead hits to judge by: but must not get 416 * stuck here forever, so check for an adjacent offset instead 417 * (and don't even bother to check whether swap type is same). 418 */ 419 if (offset != prev_offset + 1 && offset != prev_offset - 1) 420 pages = 1; 421 prev_offset = offset; 422 } else { 423 unsigned int roundup = 4; 424 while (roundup < pages) 425 roundup <<= 1; 426 pages = roundup; 427 } 428 429 if (pages > max_pages) 430 pages = max_pages; 431 432 /* Don't shrink readahead too fast */ 433 last_ra = atomic_read(&last_readahead_pages) / 2; 434 if (pages < last_ra) 435 pages = last_ra; 436 atomic_set(&last_readahead_pages, pages); 437 438 return pages; 439 } 440 441 /** 442 * swapin_readahead - swap in pages in hope we need them soon 443 * @entry: swap entry of this memory 444 * @gfp_mask: memory allocation flags 445 * @vma: user vma this address belongs to 446 * @addr: target address for mempolicy 447 * 448 * Returns the struct page for entry and addr, after queueing swapin. 449 * 450 * Primitive swap readahead code. We simply read an aligned block of 451 * (1 << page_cluster) entries in the swap area. This method is chosen 452 * because it doesn't cost us any seek time. We also make sure to queue 453 * the 'original' request together with the readahead ones... 454 * 455 * This has been extended to use the NUMA policies from the mm triggering 456 * the readahead. 457 * 458 * Caller must hold down_read on the vma->vm_mm if vma is not NULL. 459 */ 460 struct page *swapin_readahead(swp_entry_t entry, gfp_t gfp_mask, 461 struct vm_area_struct *vma, unsigned long addr) 462 { 463 struct page *page; 464 unsigned long entry_offset = swp_offset(entry); 465 unsigned long offset = entry_offset; 466 unsigned long start_offset, end_offset; 467 unsigned long mask; 468 struct blk_plug plug; 469 470 mask = swapin_nr_pages(offset) - 1; 471 if (!mask) 472 goto skip; 473 474 /* Read a page_cluster sized and aligned cluster around offset. */ 475 start_offset = offset & ~mask; 476 end_offset = offset | mask; 477 if (!start_offset) /* First page is swap header. */ 478 start_offset++; 479 480 blk_start_plug(&plug); 481 for (offset = start_offset; offset <= end_offset ; offset++) { 482 /* Ok, do the async read-ahead now */ 483 page = read_swap_cache_async(swp_entry(swp_type(entry), offset), 484 gfp_mask, vma, addr); 485 if (!page) 486 continue; 487 if (offset != entry_offset) 488 SetPageReadahead(page); 489 page_cache_release(page); 490 } 491 blk_finish_plug(&plug); 492 493 lru_add_drain(); /* Push any new pages onto the LRU now */ 494 skip: 495 return read_swap_cache_async(entry, gfp_mask, vma, addr); 496 } 497