1 /* 2 * linux/mm/swapfile.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 * Swap reorganised 29.12.95, Stephen Tweedie 6 */ 7 8 #include <linux/mm.h> 9 #include <linux/hugetlb.h> 10 #include <linux/mman.h> 11 #include <linux/slab.h> 12 #include <linux/kernel_stat.h> 13 #include <linux/swap.h> 14 #include <linux/vmalloc.h> 15 #include <linux/pagemap.h> 16 #include <linux/namei.h> 17 #include <linux/shmem_fs.h> 18 #include <linux/blkdev.h> 19 #include <linux/random.h> 20 #include <linux/writeback.h> 21 #include <linux/proc_fs.h> 22 #include <linux/seq_file.h> 23 #include <linux/init.h> 24 #include <linux/module.h> 25 #include <linux/ksm.h> 26 #include <linux/rmap.h> 27 #include <linux/security.h> 28 #include <linux/backing-dev.h> 29 #include <linux/mutex.h> 30 #include <linux/capability.h> 31 #include <linux/syscalls.h> 32 #include <linux/memcontrol.h> 33 #include <linux/poll.h> 34 #include <linux/oom.h> 35 36 #include <asm/pgtable.h> 37 #include <asm/tlbflush.h> 38 #include <linux/swapops.h> 39 #include <linux/page_cgroup.h> 40 41 static bool swap_count_continued(struct swap_info_struct *, pgoff_t, 42 unsigned char); 43 static void free_swap_count_continuations(struct swap_info_struct *); 44 static sector_t map_swap_entry(swp_entry_t, struct block_device**); 45 46 static DEFINE_SPINLOCK(swap_lock); 47 static unsigned int nr_swapfiles; 48 long nr_swap_pages; 49 long total_swap_pages; 50 static int least_priority; 51 52 static const char Bad_file[] = "Bad swap file entry "; 53 static const char Unused_file[] = "Unused swap file entry "; 54 static const char Bad_offset[] = "Bad swap offset entry "; 55 static const char Unused_offset[] = "Unused swap offset entry "; 56 57 static struct swap_list_t swap_list = {-1, -1}; 58 59 static struct swap_info_struct *swap_info[MAX_SWAPFILES]; 60 61 static DEFINE_MUTEX(swapon_mutex); 62 63 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait); 64 /* Activity counter to indicate that a swapon or swapoff has occurred */ 65 static atomic_t proc_poll_event = ATOMIC_INIT(0); 66 67 static inline unsigned char swap_count(unsigned char ent) 68 { 69 return ent & ~SWAP_HAS_CACHE; /* may include SWAP_HAS_CONT flag */ 70 } 71 72 /* returns 1 if swap entry is freed */ 73 static int 74 __try_to_reclaim_swap(struct swap_info_struct *si, unsigned long offset) 75 { 76 swp_entry_t entry = swp_entry(si->type, offset); 77 struct page *page; 78 int ret = 0; 79 80 page = find_get_page(&swapper_space, entry.val); 81 if (!page) 82 return 0; 83 /* 84 * This function is called from scan_swap_map() and it's called 85 * by vmscan.c at reclaiming pages. So, we hold a lock on a page, here. 86 * We have to use trylock for avoiding deadlock. This is a special 87 * case and you should use try_to_free_swap() with explicit lock_page() 88 * in usual operations. 89 */ 90 if (trylock_page(page)) { 91 ret = try_to_free_swap(page); 92 unlock_page(page); 93 } 94 page_cache_release(page); 95 return ret; 96 } 97 98 /* 99 * swapon tell device that all the old swap contents can be discarded, 100 * to allow the swap device to optimize its wear-levelling. 101 */ 102 static int discard_swap(struct swap_info_struct *si) 103 { 104 struct swap_extent *se; 105 sector_t start_block; 106 sector_t nr_blocks; 107 int err = 0; 108 109 /* Do not discard the swap header page! */ 110 se = &si->first_swap_extent; 111 start_block = (se->start_block + 1) << (PAGE_SHIFT - 9); 112 nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); 113 if (nr_blocks) { 114 err = blkdev_issue_discard(si->bdev, start_block, 115 nr_blocks, GFP_KERNEL, 0); 116 if (err) 117 return err; 118 cond_resched(); 119 } 120 121 list_for_each_entry(se, &si->first_swap_extent.list, list) { 122 start_block = se->start_block << (PAGE_SHIFT - 9); 123 nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); 124 125 err = blkdev_issue_discard(si->bdev, start_block, 126 nr_blocks, GFP_KERNEL, 0); 127 if (err) 128 break; 129 130 cond_resched(); 131 } 132 return err; /* That will often be -EOPNOTSUPP */ 133 } 134 135 /* 136 * swap allocation tell device that a cluster of swap can now be discarded, 137 * to allow the swap device to optimize its wear-levelling. 138 */ 139 static void discard_swap_cluster(struct swap_info_struct *si, 140 pgoff_t start_page, pgoff_t nr_pages) 141 { 142 struct swap_extent *se = si->curr_swap_extent; 143 int found_extent = 0; 144 145 while (nr_pages) { 146 struct list_head *lh; 147 148 if (se->start_page <= start_page && 149 start_page < se->start_page + se->nr_pages) { 150 pgoff_t offset = start_page - se->start_page; 151 sector_t start_block = se->start_block + offset; 152 sector_t nr_blocks = se->nr_pages - offset; 153 154 if (nr_blocks > nr_pages) 155 nr_blocks = nr_pages; 156 start_page += nr_blocks; 157 nr_pages -= nr_blocks; 158 159 if (!found_extent++) 160 si->curr_swap_extent = se; 161 162 start_block <<= PAGE_SHIFT - 9; 163 nr_blocks <<= PAGE_SHIFT - 9; 164 if (blkdev_issue_discard(si->bdev, start_block, 165 nr_blocks, GFP_NOIO, 0)) 166 break; 167 } 168 169 lh = se->list.next; 170 se = list_entry(lh, struct swap_extent, list); 171 } 172 } 173 174 static int wait_for_discard(void *word) 175 { 176 schedule(); 177 return 0; 178 } 179 180 #define SWAPFILE_CLUSTER 256 181 #define LATENCY_LIMIT 256 182 183 static unsigned long scan_swap_map(struct swap_info_struct *si, 184 unsigned char usage) 185 { 186 unsigned long offset; 187 unsigned long scan_base; 188 unsigned long last_in_cluster = 0; 189 int latency_ration = LATENCY_LIMIT; 190 int found_free_cluster = 0; 191 192 /* 193 * We try to cluster swap pages by allocating them sequentially 194 * in swap. Once we've allocated SWAPFILE_CLUSTER pages this 195 * way, however, we resort to first-free allocation, starting 196 * a new cluster. This prevents us from scattering swap pages 197 * all over the entire swap partition, so that we reduce 198 * overall disk seek times between swap pages. -- sct 199 * But we do now try to find an empty cluster. -Andrea 200 * And we let swap pages go all over an SSD partition. Hugh 201 */ 202 203 si->flags += SWP_SCANNING; 204 scan_base = offset = si->cluster_next; 205 206 if (unlikely(!si->cluster_nr--)) { 207 if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) { 208 si->cluster_nr = SWAPFILE_CLUSTER - 1; 209 goto checks; 210 } 211 if (si->flags & SWP_DISCARDABLE) { 212 /* 213 * Start range check on racing allocations, in case 214 * they overlap the cluster we eventually decide on 215 * (we scan without swap_lock to allow preemption). 216 * It's hardly conceivable that cluster_nr could be 217 * wrapped during our scan, but don't depend on it. 218 */ 219 if (si->lowest_alloc) 220 goto checks; 221 si->lowest_alloc = si->max; 222 si->highest_alloc = 0; 223 } 224 spin_unlock(&swap_lock); 225 226 /* 227 * If seek is expensive, start searching for new cluster from 228 * start of partition, to minimize the span of allocated swap. 229 * But if seek is cheap, search from our current position, so 230 * that swap is allocated from all over the partition: if the 231 * Flash Translation Layer only remaps within limited zones, 232 * we don't want to wear out the first zone too quickly. 233 */ 234 if (!(si->flags & SWP_SOLIDSTATE)) 235 scan_base = offset = si->lowest_bit; 236 last_in_cluster = offset + SWAPFILE_CLUSTER - 1; 237 238 /* Locate the first empty (unaligned) cluster */ 239 for (; last_in_cluster <= si->highest_bit; offset++) { 240 if (si->swap_map[offset]) 241 last_in_cluster = offset + SWAPFILE_CLUSTER; 242 else if (offset == last_in_cluster) { 243 spin_lock(&swap_lock); 244 offset -= SWAPFILE_CLUSTER - 1; 245 si->cluster_next = offset; 246 si->cluster_nr = SWAPFILE_CLUSTER - 1; 247 found_free_cluster = 1; 248 goto checks; 249 } 250 if (unlikely(--latency_ration < 0)) { 251 cond_resched(); 252 latency_ration = LATENCY_LIMIT; 253 } 254 } 255 256 offset = si->lowest_bit; 257 last_in_cluster = offset + SWAPFILE_CLUSTER - 1; 258 259 /* Locate the first empty (unaligned) cluster */ 260 for (; last_in_cluster < scan_base; offset++) { 261 if (si->swap_map[offset]) 262 last_in_cluster = offset + SWAPFILE_CLUSTER; 263 else if (offset == last_in_cluster) { 264 spin_lock(&swap_lock); 265 offset -= SWAPFILE_CLUSTER - 1; 266 si->cluster_next = offset; 267 si->cluster_nr = SWAPFILE_CLUSTER - 1; 268 found_free_cluster = 1; 269 goto checks; 270 } 271 if (unlikely(--latency_ration < 0)) { 272 cond_resched(); 273 latency_ration = LATENCY_LIMIT; 274 } 275 } 276 277 offset = scan_base; 278 spin_lock(&swap_lock); 279 si->cluster_nr = SWAPFILE_CLUSTER - 1; 280 si->lowest_alloc = 0; 281 } 282 283 checks: 284 if (!(si->flags & SWP_WRITEOK)) 285 goto no_page; 286 if (!si->highest_bit) 287 goto no_page; 288 if (offset > si->highest_bit) 289 scan_base = offset = si->lowest_bit; 290 291 /* reuse swap entry of cache-only swap if not busy. */ 292 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { 293 int swap_was_freed; 294 spin_unlock(&swap_lock); 295 swap_was_freed = __try_to_reclaim_swap(si, offset); 296 spin_lock(&swap_lock); 297 /* entry was freed successfully, try to use this again */ 298 if (swap_was_freed) 299 goto checks; 300 goto scan; /* check next one */ 301 } 302 303 if (si->swap_map[offset]) 304 goto scan; 305 306 if (offset == si->lowest_bit) 307 si->lowest_bit++; 308 if (offset == si->highest_bit) 309 si->highest_bit--; 310 si->inuse_pages++; 311 if (si->inuse_pages == si->pages) { 312 si->lowest_bit = si->max; 313 si->highest_bit = 0; 314 } 315 si->swap_map[offset] = usage; 316 si->cluster_next = offset + 1; 317 si->flags -= SWP_SCANNING; 318 319 if (si->lowest_alloc) { 320 /* 321 * Only set when SWP_DISCARDABLE, and there's a scan 322 * for a free cluster in progress or just completed. 323 */ 324 if (found_free_cluster) { 325 /* 326 * To optimize wear-levelling, discard the 327 * old data of the cluster, taking care not to 328 * discard any of its pages that have already 329 * been allocated by racing tasks (offset has 330 * already stepped over any at the beginning). 331 */ 332 if (offset < si->highest_alloc && 333 si->lowest_alloc <= last_in_cluster) 334 last_in_cluster = si->lowest_alloc - 1; 335 si->flags |= SWP_DISCARDING; 336 spin_unlock(&swap_lock); 337 338 if (offset < last_in_cluster) 339 discard_swap_cluster(si, offset, 340 last_in_cluster - offset + 1); 341 342 spin_lock(&swap_lock); 343 si->lowest_alloc = 0; 344 si->flags &= ~SWP_DISCARDING; 345 346 smp_mb(); /* wake_up_bit advises this */ 347 wake_up_bit(&si->flags, ilog2(SWP_DISCARDING)); 348 349 } else if (si->flags & SWP_DISCARDING) { 350 /* 351 * Delay using pages allocated by racing tasks 352 * until the whole discard has been issued. We 353 * could defer that delay until swap_writepage, 354 * but it's easier to keep this self-contained. 355 */ 356 spin_unlock(&swap_lock); 357 wait_on_bit(&si->flags, ilog2(SWP_DISCARDING), 358 wait_for_discard, TASK_UNINTERRUPTIBLE); 359 spin_lock(&swap_lock); 360 } else { 361 /* 362 * Note pages allocated by racing tasks while 363 * scan for a free cluster is in progress, so 364 * that its final discard can exclude them. 365 */ 366 if (offset < si->lowest_alloc) 367 si->lowest_alloc = offset; 368 if (offset > si->highest_alloc) 369 si->highest_alloc = offset; 370 } 371 } 372 return offset; 373 374 scan: 375 spin_unlock(&swap_lock); 376 while (++offset <= si->highest_bit) { 377 if (!si->swap_map[offset]) { 378 spin_lock(&swap_lock); 379 goto checks; 380 } 381 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { 382 spin_lock(&swap_lock); 383 goto checks; 384 } 385 if (unlikely(--latency_ration < 0)) { 386 cond_resched(); 387 latency_ration = LATENCY_LIMIT; 388 } 389 } 390 offset = si->lowest_bit; 391 while (++offset < scan_base) { 392 if (!si->swap_map[offset]) { 393 spin_lock(&swap_lock); 394 goto checks; 395 } 396 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { 397 spin_lock(&swap_lock); 398 goto checks; 399 } 400 if (unlikely(--latency_ration < 0)) { 401 cond_resched(); 402 latency_ration = LATENCY_LIMIT; 403 } 404 } 405 spin_lock(&swap_lock); 406 407 no_page: 408 si->flags -= SWP_SCANNING; 409 return 0; 410 } 411 412 swp_entry_t get_swap_page(void) 413 { 414 struct swap_info_struct *si; 415 pgoff_t offset; 416 int type, next; 417 int wrapped = 0; 418 419 spin_lock(&swap_lock); 420 if (nr_swap_pages <= 0) 421 goto noswap; 422 nr_swap_pages--; 423 424 for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) { 425 si = swap_info[type]; 426 next = si->next; 427 if (next < 0 || 428 (!wrapped && si->prio != swap_info[next]->prio)) { 429 next = swap_list.head; 430 wrapped++; 431 } 432 433 if (!si->highest_bit) 434 continue; 435 if (!(si->flags & SWP_WRITEOK)) 436 continue; 437 438 swap_list.next = next; 439 /* This is called for allocating swap entry for cache */ 440 offset = scan_swap_map(si, SWAP_HAS_CACHE); 441 if (offset) { 442 spin_unlock(&swap_lock); 443 return swp_entry(type, offset); 444 } 445 next = swap_list.next; 446 } 447 448 nr_swap_pages++; 449 noswap: 450 spin_unlock(&swap_lock); 451 return (swp_entry_t) {0}; 452 } 453 454 /* The only caller of this function is now susupend routine */ 455 swp_entry_t get_swap_page_of_type(int type) 456 { 457 struct swap_info_struct *si; 458 pgoff_t offset; 459 460 spin_lock(&swap_lock); 461 si = swap_info[type]; 462 if (si && (si->flags & SWP_WRITEOK)) { 463 nr_swap_pages--; 464 /* This is called for allocating swap entry, not cache */ 465 offset = scan_swap_map(si, 1); 466 if (offset) { 467 spin_unlock(&swap_lock); 468 return swp_entry(type, offset); 469 } 470 nr_swap_pages++; 471 } 472 spin_unlock(&swap_lock); 473 return (swp_entry_t) {0}; 474 } 475 476 static struct swap_info_struct *swap_info_get(swp_entry_t entry) 477 { 478 struct swap_info_struct *p; 479 unsigned long offset, type; 480 481 if (!entry.val) 482 goto out; 483 type = swp_type(entry); 484 if (type >= nr_swapfiles) 485 goto bad_nofile; 486 p = swap_info[type]; 487 if (!(p->flags & SWP_USED)) 488 goto bad_device; 489 offset = swp_offset(entry); 490 if (offset >= p->max) 491 goto bad_offset; 492 if (!p->swap_map[offset]) 493 goto bad_free; 494 spin_lock(&swap_lock); 495 return p; 496 497 bad_free: 498 printk(KERN_ERR "swap_free: %s%08lx\n", Unused_offset, entry.val); 499 goto out; 500 bad_offset: 501 printk(KERN_ERR "swap_free: %s%08lx\n", Bad_offset, entry.val); 502 goto out; 503 bad_device: 504 printk(KERN_ERR "swap_free: %s%08lx\n", Unused_file, entry.val); 505 goto out; 506 bad_nofile: 507 printk(KERN_ERR "swap_free: %s%08lx\n", Bad_file, entry.val); 508 out: 509 return NULL; 510 } 511 512 static unsigned char swap_entry_free(struct swap_info_struct *p, 513 swp_entry_t entry, unsigned char usage) 514 { 515 unsigned long offset = swp_offset(entry); 516 unsigned char count; 517 unsigned char has_cache; 518 519 count = p->swap_map[offset]; 520 has_cache = count & SWAP_HAS_CACHE; 521 count &= ~SWAP_HAS_CACHE; 522 523 if (usage == SWAP_HAS_CACHE) { 524 VM_BUG_ON(!has_cache); 525 has_cache = 0; 526 } else if (count == SWAP_MAP_SHMEM) { 527 /* 528 * Or we could insist on shmem.c using a special 529 * swap_shmem_free() and free_shmem_swap_and_cache()... 530 */ 531 count = 0; 532 } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) { 533 if (count == COUNT_CONTINUED) { 534 if (swap_count_continued(p, offset, count)) 535 count = SWAP_MAP_MAX | COUNT_CONTINUED; 536 else 537 count = SWAP_MAP_MAX; 538 } else 539 count--; 540 } 541 542 if (!count) 543 mem_cgroup_uncharge_swap(entry); 544 545 usage = count | has_cache; 546 p->swap_map[offset] = usage; 547 548 /* free if no reference */ 549 if (!usage) { 550 struct gendisk *disk = p->bdev->bd_disk; 551 if (offset < p->lowest_bit) 552 p->lowest_bit = offset; 553 if (offset > p->highest_bit) 554 p->highest_bit = offset; 555 if (swap_list.next >= 0 && 556 p->prio > swap_info[swap_list.next]->prio) 557 swap_list.next = p->type; 558 nr_swap_pages++; 559 p->inuse_pages--; 560 if ((p->flags & SWP_BLKDEV) && 561 disk->fops->swap_slot_free_notify) 562 disk->fops->swap_slot_free_notify(p->bdev, offset); 563 } 564 565 return usage; 566 } 567 568 /* 569 * Caller has made sure that the swapdevice corresponding to entry 570 * is still around or has not been recycled. 571 */ 572 void swap_free(swp_entry_t entry) 573 { 574 struct swap_info_struct *p; 575 576 p = swap_info_get(entry); 577 if (p) { 578 swap_entry_free(p, entry, 1); 579 spin_unlock(&swap_lock); 580 } 581 } 582 583 /* 584 * Called after dropping swapcache to decrease refcnt to swap entries. 585 */ 586 void swapcache_free(swp_entry_t entry, struct page *page) 587 { 588 struct swap_info_struct *p; 589 unsigned char count; 590 591 p = swap_info_get(entry); 592 if (p) { 593 count = swap_entry_free(p, entry, SWAP_HAS_CACHE); 594 if (page) 595 mem_cgroup_uncharge_swapcache(page, entry, count != 0); 596 spin_unlock(&swap_lock); 597 } 598 } 599 600 /* 601 * How many references to page are currently swapped out? 602 * This does not give an exact answer when swap count is continued, 603 * but does include the high COUNT_CONTINUED flag to allow for that. 604 */ 605 static inline int page_swapcount(struct page *page) 606 { 607 int count = 0; 608 struct swap_info_struct *p; 609 swp_entry_t entry; 610 611 entry.val = page_private(page); 612 p = swap_info_get(entry); 613 if (p) { 614 count = swap_count(p->swap_map[swp_offset(entry)]); 615 spin_unlock(&swap_lock); 616 } 617 return count; 618 } 619 620 /* 621 * We can write to an anon page without COW if there are no other references 622 * to it. And as a side-effect, free up its swap: because the old content 623 * on disk will never be read, and seeking back there to write new content 624 * later would only waste time away from clustering. 625 */ 626 int reuse_swap_page(struct page *page) 627 { 628 int count; 629 630 VM_BUG_ON(!PageLocked(page)); 631 if (unlikely(PageKsm(page))) 632 return 0; 633 count = page_mapcount(page); 634 if (count <= 1 && PageSwapCache(page)) { 635 count += page_swapcount(page); 636 if (count == 1 && !PageWriteback(page)) { 637 delete_from_swap_cache(page); 638 SetPageDirty(page); 639 } 640 } 641 return count <= 1; 642 } 643 644 /* 645 * If swap is getting full, or if there are no more mappings of this page, 646 * then try_to_free_swap is called to free its swap space. 647 */ 648 int try_to_free_swap(struct page *page) 649 { 650 VM_BUG_ON(!PageLocked(page)); 651 652 if (!PageSwapCache(page)) 653 return 0; 654 if (PageWriteback(page)) 655 return 0; 656 if (page_swapcount(page)) 657 return 0; 658 659 /* 660 * Once hibernation has begun to create its image of memory, 661 * there's a danger that one of the calls to try_to_free_swap() 662 * - most probably a call from __try_to_reclaim_swap() while 663 * hibernation is allocating its own swap pages for the image, 664 * but conceivably even a call from memory reclaim - will free 665 * the swap from a page which has already been recorded in the 666 * image as a clean swapcache page, and then reuse its swap for 667 * another page of the image. On waking from hibernation, the 668 * original page might be freed under memory pressure, then 669 * later read back in from swap, now with the wrong data. 670 * 671 * Hibernation clears bits from gfp_allowed_mask to prevent 672 * memory reclaim from writing to disk, so check that here. 673 */ 674 if (!(gfp_allowed_mask & __GFP_IO)) 675 return 0; 676 677 delete_from_swap_cache(page); 678 SetPageDirty(page); 679 return 1; 680 } 681 682 /* 683 * Free the swap entry like above, but also try to 684 * free the page cache entry if it is the last user. 685 */ 686 int free_swap_and_cache(swp_entry_t entry) 687 { 688 struct swap_info_struct *p; 689 struct page *page = NULL; 690 691 if (non_swap_entry(entry)) 692 return 1; 693 694 p = swap_info_get(entry); 695 if (p) { 696 if (swap_entry_free(p, entry, 1) == SWAP_HAS_CACHE) { 697 page = find_get_page(&swapper_space, entry.val); 698 if (page && !trylock_page(page)) { 699 page_cache_release(page); 700 page = NULL; 701 } 702 } 703 spin_unlock(&swap_lock); 704 } 705 if (page) { 706 /* 707 * Not mapped elsewhere, or swap space full? Free it! 708 * Also recheck PageSwapCache now page is locked (above). 709 */ 710 if (PageSwapCache(page) && !PageWriteback(page) && 711 (!page_mapped(page) || vm_swap_full())) { 712 delete_from_swap_cache(page); 713 SetPageDirty(page); 714 } 715 unlock_page(page); 716 page_cache_release(page); 717 } 718 return p != NULL; 719 } 720 721 #ifdef CONFIG_CGROUP_MEM_RES_CTLR 722 /** 723 * mem_cgroup_count_swap_user - count the user of a swap entry 724 * @ent: the swap entry to be checked 725 * @pagep: the pointer for the swap cache page of the entry to be stored 726 * 727 * Returns the number of the user of the swap entry. The number is valid only 728 * for swaps of anonymous pages. 729 * If the entry is found on swap cache, the page is stored to pagep with 730 * refcount of it being incremented. 731 */ 732 int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep) 733 { 734 struct page *page; 735 struct swap_info_struct *p; 736 int count = 0; 737 738 page = find_get_page(&swapper_space, ent.val); 739 if (page) 740 count += page_mapcount(page); 741 p = swap_info_get(ent); 742 if (p) { 743 count += swap_count(p->swap_map[swp_offset(ent)]); 744 spin_unlock(&swap_lock); 745 } 746 747 *pagep = page; 748 return count; 749 } 750 #endif 751 752 #ifdef CONFIG_HIBERNATION 753 /* 754 * Find the swap type that corresponds to given device (if any). 755 * 756 * @offset - number of the PAGE_SIZE-sized block of the device, starting 757 * from 0, in which the swap header is expected to be located. 758 * 759 * This is needed for the suspend to disk (aka swsusp). 760 */ 761 int swap_type_of(dev_t device, sector_t offset, struct block_device **bdev_p) 762 { 763 struct block_device *bdev = NULL; 764 int type; 765 766 if (device) 767 bdev = bdget(device); 768 769 spin_lock(&swap_lock); 770 for (type = 0; type < nr_swapfiles; type++) { 771 struct swap_info_struct *sis = swap_info[type]; 772 773 if (!(sis->flags & SWP_WRITEOK)) 774 continue; 775 776 if (!bdev) { 777 if (bdev_p) 778 *bdev_p = bdgrab(sis->bdev); 779 780 spin_unlock(&swap_lock); 781 return type; 782 } 783 if (bdev == sis->bdev) { 784 struct swap_extent *se = &sis->first_swap_extent; 785 786 if (se->start_block == offset) { 787 if (bdev_p) 788 *bdev_p = bdgrab(sis->bdev); 789 790 spin_unlock(&swap_lock); 791 bdput(bdev); 792 return type; 793 } 794 } 795 } 796 spin_unlock(&swap_lock); 797 if (bdev) 798 bdput(bdev); 799 800 return -ENODEV; 801 } 802 803 /* 804 * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev 805 * corresponding to given index in swap_info (swap type). 806 */ 807 sector_t swapdev_block(int type, pgoff_t offset) 808 { 809 struct block_device *bdev; 810 811 if ((unsigned int)type >= nr_swapfiles) 812 return 0; 813 if (!(swap_info[type]->flags & SWP_WRITEOK)) 814 return 0; 815 return map_swap_entry(swp_entry(type, offset), &bdev); 816 } 817 818 /* 819 * Return either the total number of swap pages of given type, or the number 820 * of free pages of that type (depending on @free) 821 * 822 * This is needed for software suspend 823 */ 824 unsigned int count_swap_pages(int type, int free) 825 { 826 unsigned int n = 0; 827 828 spin_lock(&swap_lock); 829 if ((unsigned int)type < nr_swapfiles) { 830 struct swap_info_struct *sis = swap_info[type]; 831 832 if (sis->flags & SWP_WRITEOK) { 833 n = sis->pages; 834 if (free) 835 n -= sis->inuse_pages; 836 } 837 } 838 spin_unlock(&swap_lock); 839 return n; 840 } 841 #endif /* CONFIG_HIBERNATION */ 842 843 /* 844 * No need to decide whether this PTE shares the swap entry with others, 845 * just let do_wp_page work it out if a write is requested later - to 846 * force COW, vm_page_prot omits write permission from any private vma. 847 */ 848 static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, 849 unsigned long addr, swp_entry_t entry, struct page *page) 850 { 851 struct mem_cgroup *ptr; 852 spinlock_t *ptl; 853 pte_t *pte; 854 int ret = 1; 855 856 if (mem_cgroup_try_charge_swapin(vma->vm_mm, page, GFP_KERNEL, &ptr)) { 857 ret = -ENOMEM; 858 goto out_nolock; 859 } 860 861 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 862 if (unlikely(!pte_same(*pte, swp_entry_to_pte(entry)))) { 863 if (ret > 0) 864 mem_cgroup_cancel_charge_swapin(ptr); 865 ret = 0; 866 goto out; 867 } 868 869 dec_mm_counter(vma->vm_mm, MM_SWAPENTS); 870 inc_mm_counter(vma->vm_mm, MM_ANONPAGES); 871 get_page(page); 872 set_pte_at(vma->vm_mm, addr, pte, 873 pte_mkold(mk_pte(page, vma->vm_page_prot))); 874 page_add_anon_rmap(page, vma, addr); 875 mem_cgroup_commit_charge_swapin(page, ptr); 876 swap_free(entry); 877 /* 878 * Move the page to the active list so it is not 879 * immediately swapped out again after swapon. 880 */ 881 activate_page(page); 882 out: 883 pte_unmap_unlock(pte, ptl); 884 out_nolock: 885 return ret; 886 } 887 888 static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, 889 unsigned long addr, unsigned long end, 890 swp_entry_t entry, struct page *page) 891 { 892 pte_t swp_pte = swp_entry_to_pte(entry); 893 pte_t *pte; 894 int ret = 0; 895 896 /* 897 * We don't actually need pte lock while scanning for swp_pte: since 898 * we hold page lock and mmap_sem, swp_pte cannot be inserted into the 899 * page table while we're scanning; though it could get zapped, and on 900 * some architectures (e.g. x86_32 with PAE) we might catch a glimpse 901 * of unmatched parts which look like swp_pte, so unuse_pte must 902 * recheck under pte lock. Scanning without pte lock lets it be 903 * preemptible whenever CONFIG_PREEMPT but not CONFIG_HIGHPTE. 904 */ 905 pte = pte_offset_map(pmd, addr); 906 do { 907 /* 908 * swapoff spends a _lot_ of time in this loop! 909 * Test inline before going to call unuse_pte. 910 */ 911 if (unlikely(pte_same(*pte, swp_pte))) { 912 pte_unmap(pte); 913 ret = unuse_pte(vma, pmd, addr, entry, page); 914 if (ret) 915 goto out; 916 pte = pte_offset_map(pmd, addr); 917 } 918 } while (pte++, addr += PAGE_SIZE, addr != end); 919 pte_unmap(pte - 1); 920 out: 921 return ret; 922 } 923 924 static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud, 925 unsigned long addr, unsigned long end, 926 swp_entry_t entry, struct page *page) 927 { 928 pmd_t *pmd; 929 unsigned long next; 930 int ret; 931 932 pmd = pmd_offset(pud, addr); 933 do { 934 next = pmd_addr_end(addr, end); 935 if (unlikely(pmd_trans_huge(*pmd))) 936 continue; 937 if (pmd_none_or_clear_bad(pmd)) 938 continue; 939 ret = unuse_pte_range(vma, pmd, addr, next, entry, page); 940 if (ret) 941 return ret; 942 } while (pmd++, addr = next, addr != end); 943 return 0; 944 } 945 946 static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd, 947 unsigned long addr, unsigned long end, 948 swp_entry_t entry, struct page *page) 949 { 950 pud_t *pud; 951 unsigned long next; 952 int ret; 953 954 pud = pud_offset(pgd, addr); 955 do { 956 next = pud_addr_end(addr, end); 957 if (pud_none_or_clear_bad(pud)) 958 continue; 959 ret = unuse_pmd_range(vma, pud, addr, next, entry, page); 960 if (ret) 961 return ret; 962 } while (pud++, addr = next, addr != end); 963 return 0; 964 } 965 966 static int unuse_vma(struct vm_area_struct *vma, 967 swp_entry_t entry, struct page *page) 968 { 969 pgd_t *pgd; 970 unsigned long addr, end, next; 971 int ret; 972 973 if (page_anon_vma(page)) { 974 addr = page_address_in_vma(page, vma); 975 if (addr == -EFAULT) 976 return 0; 977 else 978 end = addr + PAGE_SIZE; 979 } else { 980 addr = vma->vm_start; 981 end = vma->vm_end; 982 } 983 984 pgd = pgd_offset(vma->vm_mm, addr); 985 do { 986 next = pgd_addr_end(addr, end); 987 if (pgd_none_or_clear_bad(pgd)) 988 continue; 989 ret = unuse_pud_range(vma, pgd, addr, next, entry, page); 990 if (ret) 991 return ret; 992 } while (pgd++, addr = next, addr != end); 993 return 0; 994 } 995 996 static int unuse_mm(struct mm_struct *mm, 997 swp_entry_t entry, struct page *page) 998 { 999 struct vm_area_struct *vma; 1000 int ret = 0; 1001 1002 if (!down_read_trylock(&mm->mmap_sem)) { 1003 /* 1004 * Activate page so shrink_inactive_list is unlikely to unmap 1005 * its ptes while lock is dropped, so swapoff can make progress. 1006 */ 1007 activate_page(page); 1008 unlock_page(page); 1009 down_read(&mm->mmap_sem); 1010 lock_page(page); 1011 } 1012 for (vma = mm->mmap; vma; vma = vma->vm_next) { 1013 if (vma->anon_vma && (ret = unuse_vma(vma, entry, page))) 1014 break; 1015 } 1016 up_read(&mm->mmap_sem); 1017 return (ret < 0)? ret: 0; 1018 } 1019 1020 /* 1021 * Scan swap_map from current position to next entry still in use. 1022 * Recycle to start on reaching the end, returning 0 when empty. 1023 */ 1024 static unsigned int find_next_to_unuse(struct swap_info_struct *si, 1025 unsigned int prev) 1026 { 1027 unsigned int max = si->max; 1028 unsigned int i = prev; 1029 unsigned char count; 1030 1031 /* 1032 * No need for swap_lock here: we're just looking 1033 * for whether an entry is in use, not modifying it; false 1034 * hits are okay, and sys_swapoff() has already prevented new 1035 * allocations from this area (while holding swap_lock). 1036 */ 1037 for (;;) { 1038 if (++i >= max) { 1039 if (!prev) { 1040 i = 0; 1041 break; 1042 } 1043 /* 1044 * No entries in use at top of swap_map, 1045 * loop back to start and recheck there. 1046 */ 1047 max = prev + 1; 1048 prev = 0; 1049 i = 1; 1050 } 1051 count = si->swap_map[i]; 1052 if (count && swap_count(count) != SWAP_MAP_BAD) 1053 break; 1054 } 1055 return i; 1056 } 1057 1058 /* 1059 * We completely avoid races by reading each swap page in advance, 1060 * and then search for the process using it. All the necessary 1061 * page table adjustments can then be made atomically. 1062 */ 1063 static int try_to_unuse(unsigned int type) 1064 { 1065 struct swap_info_struct *si = swap_info[type]; 1066 struct mm_struct *start_mm; 1067 unsigned char *swap_map; 1068 unsigned char swcount; 1069 struct page *page; 1070 swp_entry_t entry; 1071 unsigned int i = 0; 1072 int retval = 0; 1073 1074 /* 1075 * When searching mms for an entry, a good strategy is to 1076 * start at the first mm we freed the previous entry from 1077 * (though actually we don't notice whether we or coincidence 1078 * freed the entry). Initialize this start_mm with a hold. 1079 * 1080 * A simpler strategy would be to start at the last mm we 1081 * freed the previous entry from; but that would take less 1082 * advantage of mmlist ordering, which clusters forked mms 1083 * together, child after parent. If we race with dup_mmap(), we 1084 * prefer to resolve parent before child, lest we miss entries 1085 * duplicated after we scanned child: using last mm would invert 1086 * that. 1087 */ 1088 start_mm = &init_mm; 1089 atomic_inc(&init_mm.mm_users); 1090 1091 /* 1092 * Keep on scanning until all entries have gone. Usually, 1093 * one pass through swap_map is enough, but not necessarily: 1094 * there are races when an instance of an entry might be missed. 1095 */ 1096 while ((i = find_next_to_unuse(si, i)) != 0) { 1097 if (signal_pending(current)) { 1098 retval = -EINTR; 1099 break; 1100 } 1101 1102 /* 1103 * Get a page for the entry, using the existing swap 1104 * cache page if there is one. Otherwise, get a clean 1105 * page and read the swap into it. 1106 */ 1107 swap_map = &si->swap_map[i]; 1108 entry = swp_entry(type, i); 1109 page = read_swap_cache_async(entry, 1110 GFP_HIGHUSER_MOVABLE, NULL, 0); 1111 if (!page) { 1112 /* 1113 * Either swap_duplicate() failed because entry 1114 * has been freed independently, and will not be 1115 * reused since sys_swapoff() already disabled 1116 * allocation from here, or alloc_page() failed. 1117 */ 1118 if (!*swap_map) 1119 continue; 1120 retval = -ENOMEM; 1121 break; 1122 } 1123 1124 /* 1125 * Don't hold on to start_mm if it looks like exiting. 1126 */ 1127 if (atomic_read(&start_mm->mm_users) == 1) { 1128 mmput(start_mm); 1129 start_mm = &init_mm; 1130 atomic_inc(&init_mm.mm_users); 1131 } 1132 1133 /* 1134 * Wait for and lock page. When do_swap_page races with 1135 * try_to_unuse, do_swap_page can handle the fault much 1136 * faster than try_to_unuse can locate the entry. This 1137 * apparently redundant "wait_on_page_locked" lets try_to_unuse 1138 * defer to do_swap_page in such a case - in some tests, 1139 * do_swap_page and try_to_unuse repeatedly compete. 1140 */ 1141 wait_on_page_locked(page); 1142 wait_on_page_writeback(page); 1143 lock_page(page); 1144 wait_on_page_writeback(page); 1145 1146 /* 1147 * Remove all references to entry. 1148 */ 1149 swcount = *swap_map; 1150 if (swap_count(swcount) == SWAP_MAP_SHMEM) { 1151 retval = shmem_unuse(entry, page); 1152 /* page has already been unlocked and released */ 1153 if (retval < 0) 1154 break; 1155 continue; 1156 } 1157 if (swap_count(swcount) && start_mm != &init_mm) 1158 retval = unuse_mm(start_mm, entry, page); 1159 1160 if (swap_count(*swap_map)) { 1161 int set_start_mm = (*swap_map >= swcount); 1162 struct list_head *p = &start_mm->mmlist; 1163 struct mm_struct *new_start_mm = start_mm; 1164 struct mm_struct *prev_mm = start_mm; 1165 struct mm_struct *mm; 1166 1167 atomic_inc(&new_start_mm->mm_users); 1168 atomic_inc(&prev_mm->mm_users); 1169 spin_lock(&mmlist_lock); 1170 while (swap_count(*swap_map) && !retval && 1171 (p = p->next) != &start_mm->mmlist) { 1172 mm = list_entry(p, struct mm_struct, mmlist); 1173 if (!atomic_inc_not_zero(&mm->mm_users)) 1174 continue; 1175 spin_unlock(&mmlist_lock); 1176 mmput(prev_mm); 1177 prev_mm = mm; 1178 1179 cond_resched(); 1180 1181 swcount = *swap_map; 1182 if (!swap_count(swcount)) /* any usage ? */ 1183 ; 1184 else if (mm == &init_mm) 1185 set_start_mm = 1; 1186 else 1187 retval = unuse_mm(mm, entry, page); 1188 1189 if (set_start_mm && *swap_map < swcount) { 1190 mmput(new_start_mm); 1191 atomic_inc(&mm->mm_users); 1192 new_start_mm = mm; 1193 set_start_mm = 0; 1194 } 1195 spin_lock(&mmlist_lock); 1196 } 1197 spin_unlock(&mmlist_lock); 1198 mmput(prev_mm); 1199 mmput(start_mm); 1200 start_mm = new_start_mm; 1201 } 1202 if (retval) { 1203 unlock_page(page); 1204 page_cache_release(page); 1205 break; 1206 } 1207 1208 /* 1209 * If a reference remains (rare), we would like to leave 1210 * the page in the swap cache; but try_to_unmap could 1211 * then re-duplicate the entry once we drop page lock, 1212 * so we might loop indefinitely; also, that page could 1213 * not be swapped out to other storage meanwhile. So: 1214 * delete from cache even if there's another reference, 1215 * after ensuring that the data has been saved to disk - 1216 * since if the reference remains (rarer), it will be 1217 * read from disk into another page. Splitting into two 1218 * pages would be incorrect if swap supported "shared 1219 * private" pages, but they are handled by tmpfs files. 1220 * 1221 * Given how unuse_vma() targets one particular offset 1222 * in an anon_vma, once the anon_vma has been determined, 1223 * this splitting happens to be just what is needed to 1224 * handle where KSM pages have been swapped out: re-reading 1225 * is unnecessarily slow, but we can fix that later on. 1226 */ 1227 if (swap_count(*swap_map) && 1228 PageDirty(page) && PageSwapCache(page)) { 1229 struct writeback_control wbc = { 1230 .sync_mode = WB_SYNC_NONE, 1231 }; 1232 1233 swap_writepage(page, &wbc); 1234 lock_page(page); 1235 wait_on_page_writeback(page); 1236 } 1237 1238 /* 1239 * It is conceivable that a racing task removed this page from 1240 * swap cache just before we acquired the page lock at the top, 1241 * or while we dropped it in unuse_mm(). The page might even 1242 * be back in swap cache on another swap area: that we must not 1243 * delete, since it may not have been written out to swap yet. 1244 */ 1245 if (PageSwapCache(page) && 1246 likely(page_private(page) == entry.val)) 1247 delete_from_swap_cache(page); 1248 1249 /* 1250 * So we could skip searching mms once swap count went 1251 * to 1, we did not mark any present ptes as dirty: must 1252 * mark page dirty so shrink_page_list will preserve it. 1253 */ 1254 SetPageDirty(page); 1255 unlock_page(page); 1256 page_cache_release(page); 1257 1258 /* 1259 * Make sure that we aren't completely killing 1260 * interactive performance. 1261 */ 1262 cond_resched(); 1263 } 1264 1265 mmput(start_mm); 1266 return retval; 1267 } 1268 1269 /* 1270 * After a successful try_to_unuse, if no swap is now in use, we know 1271 * we can empty the mmlist. swap_lock must be held on entry and exit. 1272 * Note that mmlist_lock nests inside swap_lock, and an mm must be 1273 * added to the mmlist just after page_duplicate - before would be racy. 1274 */ 1275 static void drain_mmlist(void) 1276 { 1277 struct list_head *p, *next; 1278 unsigned int type; 1279 1280 for (type = 0; type < nr_swapfiles; type++) 1281 if (swap_info[type]->inuse_pages) 1282 return; 1283 spin_lock(&mmlist_lock); 1284 list_for_each_safe(p, next, &init_mm.mmlist) 1285 list_del_init(p); 1286 spin_unlock(&mmlist_lock); 1287 } 1288 1289 /* 1290 * Use this swapdev's extent info to locate the (PAGE_SIZE) block which 1291 * corresponds to page offset for the specified swap entry. 1292 * Note that the type of this function is sector_t, but it returns page offset 1293 * into the bdev, not sector offset. 1294 */ 1295 static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev) 1296 { 1297 struct swap_info_struct *sis; 1298 struct swap_extent *start_se; 1299 struct swap_extent *se; 1300 pgoff_t offset; 1301 1302 sis = swap_info[swp_type(entry)]; 1303 *bdev = sis->bdev; 1304 1305 offset = swp_offset(entry); 1306 start_se = sis->curr_swap_extent; 1307 se = start_se; 1308 1309 for ( ; ; ) { 1310 struct list_head *lh; 1311 1312 if (se->start_page <= offset && 1313 offset < (se->start_page + se->nr_pages)) { 1314 return se->start_block + (offset - se->start_page); 1315 } 1316 lh = se->list.next; 1317 se = list_entry(lh, struct swap_extent, list); 1318 sis->curr_swap_extent = se; 1319 BUG_ON(se == start_se); /* It *must* be present */ 1320 } 1321 } 1322 1323 /* 1324 * Returns the page offset into bdev for the specified page's swap entry. 1325 */ 1326 sector_t map_swap_page(struct page *page, struct block_device **bdev) 1327 { 1328 swp_entry_t entry; 1329 entry.val = page_private(page); 1330 return map_swap_entry(entry, bdev); 1331 } 1332 1333 /* 1334 * Free all of a swapdev's extent information 1335 */ 1336 static void destroy_swap_extents(struct swap_info_struct *sis) 1337 { 1338 while (!list_empty(&sis->first_swap_extent.list)) { 1339 struct swap_extent *se; 1340 1341 se = list_entry(sis->first_swap_extent.list.next, 1342 struct swap_extent, list); 1343 list_del(&se->list); 1344 kfree(se); 1345 } 1346 } 1347 1348 /* 1349 * Add a block range (and the corresponding page range) into this swapdev's 1350 * extent list. The extent list is kept sorted in page order. 1351 * 1352 * This function rather assumes that it is called in ascending page order. 1353 */ 1354 static int 1355 add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, 1356 unsigned long nr_pages, sector_t start_block) 1357 { 1358 struct swap_extent *se; 1359 struct swap_extent *new_se; 1360 struct list_head *lh; 1361 1362 if (start_page == 0) { 1363 se = &sis->first_swap_extent; 1364 sis->curr_swap_extent = se; 1365 se->start_page = 0; 1366 se->nr_pages = nr_pages; 1367 se->start_block = start_block; 1368 return 1; 1369 } else { 1370 lh = sis->first_swap_extent.list.prev; /* Highest extent */ 1371 se = list_entry(lh, struct swap_extent, list); 1372 BUG_ON(se->start_page + se->nr_pages != start_page); 1373 if (se->start_block + se->nr_pages == start_block) { 1374 /* Merge it */ 1375 se->nr_pages += nr_pages; 1376 return 0; 1377 } 1378 } 1379 1380 /* 1381 * No merge. Insert a new extent, preserving ordering. 1382 */ 1383 new_se = kmalloc(sizeof(*se), GFP_KERNEL); 1384 if (new_se == NULL) 1385 return -ENOMEM; 1386 new_se->start_page = start_page; 1387 new_se->nr_pages = nr_pages; 1388 new_se->start_block = start_block; 1389 1390 list_add_tail(&new_se->list, &sis->first_swap_extent.list); 1391 return 1; 1392 } 1393 1394 /* 1395 * A `swap extent' is a simple thing which maps a contiguous range of pages 1396 * onto a contiguous range of disk blocks. An ordered list of swap extents 1397 * is built at swapon time and is then used at swap_writepage/swap_readpage 1398 * time for locating where on disk a page belongs. 1399 * 1400 * If the swapfile is an S_ISBLK block device, a single extent is installed. 1401 * This is done so that the main operating code can treat S_ISBLK and S_ISREG 1402 * swap files identically. 1403 * 1404 * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap 1405 * extent list operates in PAGE_SIZE disk blocks. Both S_ISREG and S_ISBLK 1406 * swapfiles are handled *identically* after swapon time. 1407 * 1408 * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks 1409 * and will parse them into an ordered extent list, in PAGE_SIZE chunks. If 1410 * some stray blocks are found which do not fall within the PAGE_SIZE alignment 1411 * requirements, they are simply tossed out - we will never use those blocks 1412 * for swapping. 1413 * 1414 * For S_ISREG swapfiles we set S_SWAPFILE across the life of the swapon. This 1415 * prevents root from shooting her foot off by ftruncating an in-use swapfile, 1416 * which will scribble on the fs. 1417 * 1418 * The amount of disk space which a single swap extent represents varies. 1419 * Typically it is in the 1-4 megabyte range. So we can have hundreds of 1420 * extents in the list. To avoid much list walking, we cache the previous 1421 * search location in `curr_swap_extent', and start new searches from there. 1422 * This is extremely effective. The average number of iterations in 1423 * map_swap_page() has been measured at about 0.3 per page. - akpm. 1424 */ 1425 static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span) 1426 { 1427 struct inode *inode; 1428 unsigned blocks_per_page; 1429 unsigned long page_no; 1430 unsigned blkbits; 1431 sector_t probe_block; 1432 sector_t last_block; 1433 sector_t lowest_block = -1; 1434 sector_t highest_block = 0; 1435 int nr_extents = 0; 1436 int ret; 1437 1438 inode = sis->swap_file->f_mapping->host; 1439 if (S_ISBLK(inode->i_mode)) { 1440 ret = add_swap_extent(sis, 0, sis->max, 0); 1441 *span = sis->pages; 1442 goto out; 1443 } 1444 1445 blkbits = inode->i_blkbits; 1446 blocks_per_page = PAGE_SIZE >> blkbits; 1447 1448 /* 1449 * Map all the blocks into the extent list. This code doesn't try 1450 * to be very smart. 1451 */ 1452 probe_block = 0; 1453 page_no = 0; 1454 last_block = i_size_read(inode) >> blkbits; 1455 while ((probe_block + blocks_per_page) <= last_block && 1456 page_no < sis->max) { 1457 unsigned block_in_page; 1458 sector_t first_block; 1459 1460 first_block = bmap(inode, probe_block); 1461 if (first_block == 0) 1462 goto bad_bmap; 1463 1464 /* 1465 * It must be PAGE_SIZE aligned on-disk 1466 */ 1467 if (first_block & (blocks_per_page - 1)) { 1468 probe_block++; 1469 goto reprobe; 1470 } 1471 1472 for (block_in_page = 1; block_in_page < blocks_per_page; 1473 block_in_page++) { 1474 sector_t block; 1475 1476 block = bmap(inode, probe_block + block_in_page); 1477 if (block == 0) 1478 goto bad_bmap; 1479 if (block != first_block + block_in_page) { 1480 /* Discontiguity */ 1481 probe_block++; 1482 goto reprobe; 1483 } 1484 } 1485 1486 first_block >>= (PAGE_SHIFT - blkbits); 1487 if (page_no) { /* exclude the header page */ 1488 if (first_block < lowest_block) 1489 lowest_block = first_block; 1490 if (first_block > highest_block) 1491 highest_block = first_block; 1492 } 1493 1494 /* 1495 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks 1496 */ 1497 ret = add_swap_extent(sis, page_no, 1, first_block); 1498 if (ret < 0) 1499 goto out; 1500 nr_extents += ret; 1501 page_no++; 1502 probe_block += blocks_per_page; 1503 reprobe: 1504 continue; 1505 } 1506 ret = nr_extents; 1507 *span = 1 + highest_block - lowest_block; 1508 if (page_no == 0) 1509 page_no = 1; /* force Empty message */ 1510 sis->max = page_no; 1511 sis->pages = page_no - 1; 1512 sis->highest_bit = page_no - 1; 1513 out: 1514 return ret; 1515 bad_bmap: 1516 printk(KERN_ERR "swapon: swapfile has holes\n"); 1517 ret = -EINVAL; 1518 goto out; 1519 } 1520 1521 static void enable_swap_info(struct swap_info_struct *p, int prio, 1522 unsigned char *swap_map) 1523 { 1524 int i, prev; 1525 1526 spin_lock(&swap_lock); 1527 if (prio >= 0) 1528 p->prio = prio; 1529 else 1530 p->prio = --least_priority; 1531 p->swap_map = swap_map; 1532 p->flags |= SWP_WRITEOK; 1533 nr_swap_pages += p->pages; 1534 total_swap_pages += p->pages; 1535 1536 /* insert swap space into swap_list: */ 1537 prev = -1; 1538 for (i = swap_list.head; i >= 0; i = swap_info[i]->next) { 1539 if (p->prio >= swap_info[i]->prio) 1540 break; 1541 prev = i; 1542 } 1543 p->next = i; 1544 if (prev < 0) 1545 swap_list.head = swap_list.next = p->type; 1546 else 1547 swap_info[prev]->next = p->type; 1548 spin_unlock(&swap_lock); 1549 } 1550 1551 SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) 1552 { 1553 struct swap_info_struct *p = NULL; 1554 unsigned char *swap_map; 1555 struct file *swap_file, *victim; 1556 struct address_space *mapping; 1557 struct inode *inode; 1558 char *pathname; 1559 int oom_score_adj; 1560 int i, type, prev; 1561 int err; 1562 1563 if (!capable(CAP_SYS_ADMIN)) 1564 return -EPERM; 1565 1566 pathname = getname(specialfile); 1567 err = PTR_ERR(pathname); 1568 if (IS_ERR(pathname)) 1569 goto out; 1570 1571 victim = filp_open(pathname, O_RDWR|O_LARGEFILE, 0); 1572 putname(pathname); 1573 err = PTR_ERR(victim); 1574 if (IS_ERR(victim)) 1575 goto out; 1576 1577 mapping = victim->f_mapping; 1578 prev = -1; 1579 spin_lock(&swap_lock); 1580 for (type = swap_list.head; type >= 0; type = swap_info[type]->next) { 1581 p = swap_info[type]; 1582 if (p->flags & SWP_WRITEOK) { 1583 if (p->swap_file->f_mapping == mapping) 1584 break; 1585 } 1586 prev = type; 1587 } 1588 if (type < 0) { 1589 err = -EINVAL; 1590 spin_unlock(&swap_lock); 1591 goto out_dput; 1592 } 1593 if (!security_vm_enough_memory(p->pages)) 1594 vm_unacct_memory(p->pages); 1595 else { 1596 err = -ENOMEM; 1597 spin_unlock(&swap_lock); 1598 goto out_dput; 1599 } 1600 if (prev < 0) 1601 swap_list.head = p->next; 1602 else 1603 swap_info[prev]->next = p->next; 1604 if (type == swap_list.next) { 1605 /* just pick something that's safe... */ 1606 swap_list.next = swap_list.head; 1607 } 1608 if (p->prio < 0) { 1609 for (i = p->next; i >= 0; i = swap_info[i]->next) 1610 swap_info[i]->prio = p->prio--; 1611 least_priority++; 1612 } 1613 nr_swap_pages -= p->pages; 1614 total_swap_pages -= p->pages; 1615 p->flags &= ~SWP_WRITEOK; 1616 spin_unlock(&swap_lock); 1617 1618 oom_score_adj = test_set_oom_score_adj(OOM_SCORE_ADJ_MAX); 1619 err = try_to_unuse(type); 1620 test_set_oom_score_adj(oom_score_adj); 1621 1622 if (err) { 1623 /* 1624 * reading p->prio and p->swap_map outside the lock is 1625 * safe here because only sys_swapon and sys_swapoff 1626 * change them, and there can be no other sys_swapon or 1627 * sys_swapoff for this swap_info_struct at this point. 1628 */ 1629 /* re-insert swap space back into swap_list */ 1630 enable_swap_info(p, p->prio, p->swap_map); 1631 goto out_dput; 1632 } 1633 1634 destroy_swap_extents(p); 1635 if (p->flags & SWP_CONTINUED) 1636 free_swap_count_continuations(p); 1637 1638 mutex_lock(&swapon_mutex); 1639 spin_lock(&swap_lock); 1640 drain_mmlist(); 1641 1642 /* wait for anyone still in scan_swap_map */ 1643 p->highest_bit = 0; /* cuts scans short */ 1644 while (p->flags >= SWP_SCANNING) { 1645 spin_unlock(&swap_lock); 1646 schedule_timeout_uninterruptible(1); 1647 spin_lock(&swap_lock); 1648 } 1649 1650 swap_file = p->swap_file; 1651 p->swap_file = NULL; 1652 p->max = 0; 1653 swap_map = p->swap_map; 1654 p->swap_map = NULL; 1655 p->flags = 0; 1656 spin_unlock(&swap_lock); 1657 mutex_unlock(&swapon_mutex); 1658 vfree(swap_map); 1659 /* Destroy swap account informatin */ 1660 swap_cgroup_swapoff(type); 1661 1662 inode = mapping->host; 1663 if (S_ISBLK(inode->i_mode)) { 1664 struct block_device *bdev = I_BDEV(inode); 1665 set_blocksize(bdev, p->old_block_size); 1666 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); 1667 } else { 1668 mutex_lock(&inode->i_mutex); 1669 inode->i_flags &= ~S_SWAPFILE; 1670 mutex_unlock(&inode->i_mutex); 1671 } 1672 filp_close(swap_file, NULL); 1673 err = 0; 1674 atomic_inc(&proc_poll_event); 1675 wake_up_interruptible(&proc_poll_wait); 1676 1677 out_dput: 1678 filp_close(victim, NULL); 1679 out: 1680 return err; 1681 } 1682 1683 #ifdef CONFIG_PROC_FS 1684 static unsigned swaps_poll(struct file *file, poll_table *wait) 1685 { 1686 struct seq_file *seq = file->private_data; 1687 1688 poll_wait(file, &proc_poll_wait, wait); 1689 1690 if (seq->poll_event != atomic_read(&proc_poll_event)) { 1691 seq->poll_event = atomic_read(&proc_poll_event); 1692 return POLLIN | POLLRDNORM | POLLERR | POLLPRI; 1693 } 1694 1695 return POLLIN | POLLRDNORM; 1696 } 1697 1698 /* iterator */ 1699 static void *swap_start(struct seq_file *swap, loff_t *pos) 1700 { 1701 struct swap_info_struct *si; 1702 int type; 1703 loff_t l = *pos; 1704 1705 mutex_lock(&swapon_mutex); 1706 1707 if (!l) 1708 return SEQ_START_TOKEN; 1709 1710 for (type = 0; type < nr_swapfiles; type++) { 1711 smp_rmb(); /* read nr_swapfiles before swap_info[type] */ 1712 si = swap_info[type]; 1713 if (!(si->flags & SWP_USED) || !si->swap_map) 1714 continue; 1715 if (!--l) 1716 return si; 1717 } 1718 1719 return NULL; 1720 } 1721 1722 static void *swap_next(struct seq_file *swap, void *v, loff_t *pos) 1723 { 1724 struct swap_info_struct *si = v; 1725 int type; 1726 1727 if (v == SEQ_START_TOKEN) 1728 type = 0; 1729 else 1730 type = si->type + 1; 1731 1732 for (; type < nr_swapfiles; type++) { 1733 smp_rmb(); /* read nr_swapfiles before swap_info[type] */ 1734 si = swap_info[type]; 1735 if (!(si->flags & SWP_USED) || !si->swap_map) 1736 continue; 1737 ++*pos; 1738 return si; 1739 } 1740 1741 return NULL; 1742 } 1743 1744 static void swap_stop(struct seq_file *swap, void *v) 1745 { 1746 mutex_unlock(&swapon_mutex); 1747 } 1748 1749 static int swap_show(struct seq_file *swap, void *v) 1750 { 1751 struct swap_info_struct *si = v; 1752 struct file *file; 1753 int len; 1754 1755 if (si == SEQ_START_TOKEN) { 1756 seq_puts(swap,"Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n"); 1757 return 0; 1758 } 1759 1760 file = si->swap_file; 1761 len = seq_path(swap, &file->f_path, " \t\n\\"); 1762 seq_printf(swap, "%*s%s\t%u\t%u\t%d\n", 1763 len < 40 ? 40 - len : 1, " ", 1764 S_ISBLK(file->f_path.dentry->d_inode->i_mode) ? 1765 "partition" : "file\t", 1766 si->pages << (PAGE_SHIFT - 10), 1767 si->inuse_pages << (PAGE_SHIFT - 10), 1768 si->prio); 1769 return 0; 1770 } 1771 1772 static const struct seq_operations swaps_op = { 1773 .start = swap_start, 1774 .next = swap_next, 1775 .stop = swap_stop, 1776 .show = swap_show 1777 }; 1778 1779 static int swaps_open(struct inode *inode, struct file *file) 1780 { 1781 struct seq_file *seq; 1782 int ret; 1783 1784 ret = seq_open(file, &swaps_op); 1785 if (ret) 1786 return ret; 1787 1788 seq = file->private_data; 1789 seq->poll_event = atomic_read(&proc_poll_event); 1790 return 0; 1791 } 1792 1793 static const struct file_operations proc_swaps_operations = { 1794 .open = swaps_open, 1795 .read = seq_read, 1796 .llseek = seq_lseek, 1797 .release = seq_release, 1798 .poll = swaps_poll, 1799 }; 1800 1801 static int __init procswaps_init(void) 1802 { 1803 proc_create("swaps", 0, NULL, &proc_swaps_operations); 1804 return 0; 1805 } 1806 __initcall(procswaps_init); 1807 #endif /* CONFIG_PROC_FS */ 1808 1809 #ifdef MAX_SWAPFILES_CHECK 1810 static int __init max_swapfiles_check(void) 1811 { 1812 MAX_SWAPFILES_CHECK(); 1813 return 0; 1814 } 1815 late_initcall(max_swapfiles_check); 1816 #endif 1817 1818 static struct swap_info_struct *alloc_swap_info(void) 1819 { 1820 struct swap_info_struct *p; 1821 unsigned int type; 1822 1823 p = kzalloc(sizeof(*p), GFP_KERNEL); 1824 if (!p) 1825 return ERR_PTR(-ENOMEM); 1826 1827 spin_lock(&swap_lock); 1828 for (type = 0; type < nr_swapfiles; type++) { 1829 if (!(swap_info[type]->flags & SWP_USED)) 1830 break; 1831 } 1832 if (type >= MAX_SWAPFILES) { 1833 spin_unlock(&swap_lock); 1834 kfree(p); 1835 return ERR_PTR(-EPERM); 1836 } 1837 if (type >= nr_swapfiles) { 1838 p->type = type; 1839 swap_info[type] = p; 1840 /* 1841 * Write swap_info[type] before nr_swapfiles, in case a 1842 * racing procfs swap_start() or swap_next() is reading them. 1843 * (We never shrink nr_swapfiles, we never free this entry.) 1844 */ 1845 smp_wmb(); 1846 nr_swapfiles++; 1847 } else { 1848 kfree(p); 1849 p = swap_info[type]; 1850 /* 1851 * Do not memset this entry: a racing procfs swap_next() 1852 * would be relying on p->type to remain valid. 1853 */ 1854 } 1855 INIT_LIST_HEAD(&p->first_swap_extent.list); 1856 p->flags = SWP_USED; 1857 p->next = -1; 1858 spin_unlock(&swap_lock); 1859 1860 return p; 1861 } 1862 1863 static int claim_swapfile(struct swap_info_struct *p, struct inode *inode) 1864 { 1865 int error; 1866 1867 if (S_ISBLK(inode->i_mode)) { 1868 p->bdev = bdgrab(I_BDEV(inode)); 1869 error = blkdev_get(p->bdev, 1870 FMODE_READ | FMODE_WRITE | FMODE_EXCL, 1871 sys_swapon); 1872 if (error < 0) { 1873 p->bdev = NULL; 1874 return -EINVAL; 1875 } 1876 p->old_block_size = block_size(p->bdev); 1877 error = set_blocksize(p->bdev, PAGE_SIZE); 1878 if (error < 0) 1879 return error; 1880 p->flags |= SWP_BLKDEV; 1881 } else if (S_ISREG(inode->i_mode)) { 1882 p->bdev = inode->i_sb->s_bdev; 1883 mutex_lock(&inode->i_mutex); 1884 if (IS_SWAPFILE(inode)) 1885 return -EBUSY; 1886 } else 1887 return -EINVAL; 1888 1889 return 0; 1890 } 1891 1892 static unsigned long read_swap_header(struct swap_info_struct *p, 1893 union swap_header *swap_header, 1894 struct inode *inode) 1895 { 1896 int i; 1897 unsigned long maxpages; 1898 unsigned long swapfilepages; 1899 1900 if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) { 1901 printk(KERN_ERR "Unable to find swap-space signature\n"); 1902 return 0; 1903 } 1904 1905 /* swap partition endianess hack... */ 1906 if (swab32(swap_header->info.version) == 1) { 1907 swab32s(&swap_header->info.version); 1908 swab32s(&swap_header->info.last_page); 1909 swab32s(&swap_header->info.nr_badpages); 1910 for (i = 0; i < swap_header->info.nr_badpages; i++) 1911 swab32s(&swap_header->info.badpages[i]); 1912 } 1913 /* Check the swap header's sub-version */ 1914 if (swap_header->info.version != 1) { 1915 printk(KERN_WARNING 1916 "Unable to handle swap header version %d\n", 1917 swap_header->info.version); 1918 return 0; 1919 } 1920 1921 p->lowest_bit = 1; 1922 p->cluster_next = 1; 1923 p->cluster_nr = 0; 1924 1925 /* 1926 * Find out how many pages are allowed for a single swap 1927 * device. There are three limiting factors: 1) the number 1928 * of bits for the swap offset in the swp_entry_t type, and 1929 * 2) the number of bits in the swap pte as defined by the 1930 * the different architectures, and 3) the number of free bits 1931 * in an exceptional radix_tree entry. In order to find the 1932 * largest possible bit mask, a swap entry with swap type 0 1933 * and swap offset ~0UL is created, encoded to a swap pte, 1934 * decoded to a swp_entry_t again, and finally the swap 1935 * offset is extracted. This will mask all the bits from 1936 * the initial ~0UL mask that can't be encoded in either 1937 * the swp_entry_t or the architecture definition of a 1938 * swap pte. Then the same is done for a radix_tree entry. 1939 */ 1940 maxpages = swp_offset(pte_to_swp_entry( 1941 swp_entry_to_pte(swp_entry(0, ~0UL)))); 1942 maxpages = swp_offset(radix_to_swp_entry( 1943 swp_to_radix_entry(swp_entry(0, maxpages)))) + 1; 1944 1945 if (maxpages > swap_header->info.last_page) { 1946 maxpages = swap_header->info.last_page + 1; 1947 /* p->max is an unsigned int: don't overflow it */ 1948 if ((unsigned int)maxpages == 0) 1949 maxpages = UINT_MAX; 1950 } 1951 p->highest_bit = maxpages - 1; 1952 1953 if (!maxpages) 1954 return 0; 1955 swapfilepages = i_size_read(inode) >> PAGE_SHIFT; 1956 if (swapfilepages && maxpages > swapfilepages) { 1957 printk(KERN_WARNING 1958 "Swap area shorter than signature indicates\n"); 1959 return 0; 1960 } 1961 if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode)) 1962 return 0; 1963 if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES) 1964 return 0; 1965 1966 return maxpages; 1967 } 1968 1969 static int setup_swap_map_and_extents(struct swap_info_struct *p, 1970 union swap_header *swap_header, 1971 unsigned char *swap_map, 1972 unsigned long maxpages, 1973 sector_t *span) 1974 { 1975 int i; 1976 unsigned int nr_good_pages; 1977 int nr_extents; 1978 1979 nr_good_pages = maxpages - 1; /* omit header page */ 1980 1981 for (i = 0; i < swap_header->info.nr_badpages; i++) { 1982 unsigned int page_nr = swap_header->info.badpages[i]; 1983 if (page_nr == 0 || page_nr > swap_header->info.last_page) 1984 return -EINVAL; 1985 if (page_nr < maxpages) { 1986 swap_map[page_nr] = SWAP_MAP_BAD; 1987 nr_good_pages--; 1988 } 1989 } 1990 1991 if (nr_good_pages) { 1992 swap_map[0] = SWAP_MAP_BAD; 1993 p->max = maxpages; 1994 p->pages = nr_good_pages; 1995 nr_extents = setup_swap_extents(p, span); 1996 if (nr_extents < 0) 1997 return nr_extents; 1998 nr_good_pages = p->pages; 1999 } 2000 if (!nr_good_pages) { 2001 printk(KERN_WARNING "Empty swap-file\n"); 2002 return -EINVAL; 2003 } 2004 2005 return nr_extents; 2006 } 2007 2008 SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) 2009 { 2010 struct swap_info_struct *p; 2011 char *name; 2012 struct file *swap_file = NULL; 2013 struct address_space *mapping; 2014 int i; 2015 int prio; 2016 int error; 2017 union swap_header *swap_header; 2018 int nr_extents; 2019 sector_t span; 2020 unsigned long maxpages; 2021 unsigned char *swap_map = NULL; 2022 struct page *page = NULL; 2023 struct inode *inode = NULL; 2024 2025 if (!capable(CAP_SYS_ADMIN)) 2026 return -EPERM; 2027 2028 p = alloc_swap_info(); 2029 if (IS_ERR(p)) 2030 return PTR_ERR(p); 2031 2032 name = getname(specialfile); 2033 if (IS_ERR(name)) { 2034 error = PTR_ERR(name); 2035 name = NULL; 2036 goto bad_swap; 2037 } 2038 swap_file = filp_open(name, O_RDWR|O_LARGEFILE, 0); 2039 if (IS_ERR(swap_file)) { 2040 error = PTR_ERR(swap_file); 2041 swap_file = NULL; 2042 goto bad_swap; 2043 } 2044 2045 p->swap_file = swap_file; 2046 mapping = swap_file->f_mapping; 2047 2048 for (i = 0; i < nr_swapfiles; i++) { 2049 struct swap_info_struct *q = swap_info[i]; 2050 2051 if (q == p || !q->swap_file) 2052 continue; 2053 if (mapping == q->swap_file->f_mapping) { 2054 error = -EBUSY; 2055 goto bad_swap; 2056 } 2057 } 2058 2059 inode = mapping->host; 2060 /* If S_ISREG(inode->i_mode) will do mutex_lock(&inode->i_mutex); */ 2061 error = claim_swapfile(p, inode); 2062 if (unlikely(error)) 2063 goto bad_swap; 2064 2065 /* 2066 * Read the swap header. 2067 */ 2068 if (!mapping->a_ops->readpage) { 2069 error = -EINVAL; 2070 goto bad_swap; 2071 } 2072 page = read_mapping_page(mapping, 0, swap_file); 2073 if (IS_ERR(page)) { 2074 error = PTR_ERR(page); 2075 goto bad_swap; 2076 } 2077 swap_header = kmap(page); 2078 2079 maxpages = read_swap_header(p, swap_header, inode); 2080 if (unlikely(!maxpages)) { 2081 error = -EINVAL; 2082 goto bad_swap; 2083 } 2084 2085 /* OK, set up the swap map and apply the bad block list */ 2086 swap_map = vzalloc(maxpages); 2087 if (!swap_map) { 2088 error = -ENOMEM; 2089 goto bad_swap; 2090 } 2091 2092 error = swap_cgroup_swapon(p->type, maxpages); 2093 if (error) 2094 goto bad_swap; 2095 2096 nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map, 2097 maxpages, &span); 2098 if (unlikely(nr_extents < 0)) { 2099 error = nr_extents; 2100 goto bad_swap; 2101 } 2102 2103 if (p->bdev) { 2104 if (blk_queue_nonrot(bdev_get_queue(p->bdev))) { 2105 p->flags |= SWP_SOLIDSTATE; 2106 p->cluster_next = 1 + (random32() % p->highest_bit); 2107 } 2108 if (discard_swap(p) == 0 && (swap_flags & SWAP_FLAG_DISCARD)) 2109 p->flags |= SWP_DISCARDABLE; 2110 } 2111 2112 mutex_lock(&swapon_mutex); 2113 prio = -1; 2114 if (swap_flags & SWAP_FLAG_PREFER) 2115 prio = 2116 (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT; 2117 enable_swap_info(p, prio, swap_map); 2118 2119 printk(KERN_INFO "Adding %uk swap on %s. " 2120 "Priority:%d extents:%d across:%lluk %s%s\n", 2121 p->pages<<(PAGE_SHIFT-10), name, p->prio, 2122 nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10), 2123 (p->flags & SWP_SOLIDSTATE) ? "SS" : "", 2124 (p->flags & SWP_DISCARDABLE) ? "D" : ""); 2125 2126 mutex_unlock(&swapon_mutex); 2127 atomic_inc(&proc_poll_event); 2128 wake_up_interruptible(&proc_poll_wait); 2129 2130 if (S_ISREG(inode->i_mode)) 2131 inode->i_flags |= S_SWAPFILE; 2132 error = 0; 2133 goto out; 2134 bad_swap: 2135 if (inode && S_ISBLK(inode->i_mode) && p->bdev) { 2136 set_blocksize(p->bdev, p->old_block_size); 2137 blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); 2138 } 2139 destroy_swap_extents(p); 2140 swap_cgroup_swapoff(p->type); 2141 spin_lock(&swap_lock); 2142 p->swap_file = NULL; 2143 p->flags = 0; 2144 spin_unlock(&swap_lock); 2145 vfree(swap_map); 2146 if (swap_file) { 2147 if (inode && S_ISREG(inode->i_mode)) { 2148 mutex_unlock(&inode->i_mutex); 2149 inode = NULL; 2150 } 2151 filp_close(swap_file, NULL); 2152 } 2153 out: 2154 if (page && !IS_ERR(page)) { 2155 kunmap(page); 2156 page_cache_release(page); 2157 } 2158 if (name) 2159 putname(name); 2160 if (inode && S_ISREG(inode->i_mode)) 2161 mutex_unlock(&inode->i_mutex); 2162 return error; 2163 } 2164 2165 void si_swapinfo(struct sysinfo *val) 2166 { 2167 unsigned int type; 2168 unsigned long nr_to_be_unused = 0; 2169 2170 spin_lock(&swap_lock); 2171 for (type = 0; type < nr_swapfiles; type++) { 2172 struct swap_info_struct *si = swap_info[type]; 2173 2174 if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK)) 2175 nr_to_be_unused += si->inuse_pages; 2176 } 2177 val->freeswap = nr_swap_pages + nr_to_be_unused; 2178 val->totalswap = total_swap_pages + nr_to_be_unused; 2179 spin_unlock(&swap_lock); 2180 } 2181 2182 /* 2183 * Verify that a swap entry is valid and increment its swap map count. 2184 * 2185 * Returns error code in following case. 2186 * - success -> 0 2187 * - swp_entry is invalid -> EINVAL 2188 * - swp_entry is migration entry -> EINVAL 2189 * - swap-cache reference is requested but there is already one. -> EEXIST 2190 * - swap-cache reference is requested but the entry is not used. -> ENOENT 2191 * - swap-mapped reference requested but needs continued swap count. -> ENOMEM 2192 */ 2193 static int __swap_duplicate(swp_entry_t entry, unsigned char usage) 2194 { 2195 struct swap_info_struct *p; 2196 unsigned long offset, type; 2197 unsigned char count; 2198 unsigned char has_cache; 2199 int err = -EINVAL; 2200 2201 if (non_swap_entry(entry)) 2202 goto out; 2203 2204 type = swp_type(entry); 2205 if (type >= nr_swapfiles) 2206 goto bad_file; 2207 p = swap_info[type]; 2208 offset = swp_offset(entry); 2209 2210 spin_lock(&swap_lock); 2211 if (unlikely(offset >= p->max)) 2212 goto unlock_out; 2213 2214 count = p->swap_map[offset]; 2215 has_cache = count & SWAP_HAS_CACHE; 2216 count &= ~SWAP_HAS_CACHE; 2217 err = 0; 2218 2219 if (usage == SWAP_HAS_CACHE) { 2220 2221 /* set SWAP_HAS_CACHE if there is no cache and entry is used */ 2222 if (!has_cache && count) 2223 has_cache = SWAP_HAS_CACHE; 2224 else if (has_cache) /* someone else added cache */ 2225 err = -EEXIST; 2226 else /* no users remaining */ 2227 err = -ENOENT; 2228 2229 } else if (count || has_cache) { 2230 2231 if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX) 2232 count += usage; 2233 else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX) 2234 err = -EINVAL; 2235 else if (swap_count_continued(p, offset, count)) 2236 count = COUNT_CONTINUED; 2237 else 2238 err = -ENOMEM; 2239 } else 2240 err = -ENOENT; /* unused swap entry */ 2241 2242 p->swap_map[offset] = count | has_cache; 2243 2244 unlock_out: 2245 spin_unlock(&swap_lock); 2246 out: 2247 return err; 2248 2249 bad_file: 2250 printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val); 2251 goto out; 2252 } 2253 2254 /* 2255 * Help swapoff by noting that swap entry belongs to shmem/tmpfs 2256 * (in which case its reference count is never incremented). 2257 */ 2258 void swap_shmem_alloc(swp_entry_t entry) 2259 { 2260 __swap_duplicate(entry, SWAP_MAP_SHMEM); 2261 } 2262 2263 /* 2264 * Increase reference count of swap entry by 1. 2265 * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required 2266 * but could not be atomically allocated. Returns 0, just as if it succeeded, 2267 * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which 2268 * might occur if a page table entry has got corrupted. 2269 */ 2270 int swap_duplicate(swp_entry_t entry) 2271 { 2272 int err = 0; 2273 2274 while (!err && __swap_duplicate(entry, 1) == -ENOMEM) 2275 err = add_swap_count_continuation(entry, GFP_ATOMIC); 2276 return err; 2277 } 2278 2279 /* 2280 * @entry: swap entry for which we allocate swap cache. 2281 * 2282 * Called when allocating swap cache for existing swap entry, 2283 * This can return error codes. Returns 0 at success. 2284 * -EBUSY means there is a swap cache. 2285 * Note: return code is different from swap_duplicate(). 2286 */ 2287 int swapcache_prepare(swp_entry_t entry) 2288 { 2289 return __swap_duplicate(entry, SWAP_HAS_CACHE); 2290 } 2291 2292 /* 2293 * swap_lock prevents swap_map being freed. Don't grab an extra 2294 * reference on the swaphandle, it doesn't matter if it becomes unused. 2295 */ 2296 int valid_swaphandles(swp_entry_t entry, unsigned long *offset) 2297 { 2298 struct swap_info_struct *si; 2299 int our_page_cluster = page_cluster; 2300 pgoff_t target, toff; 2301 pgoff_t base, end; 2302 int nr_pages = 0; 2303 2304 if (!our_page_cluster) /* no readahead */ 2305 return 0; 2306 2307 si = swap_info[swp_type(entry)]; 2308 target = swp_offset(entry); 2309 base = (target >> our_page_cluster) << our_page_cluster; 2310 end = base + (1 << our_page_cluster); 2311 if (!base) /* first page is swap header */ 2312 base++; 2313 2314 spin_lock(&swap_lock); 2315 if (end > si->max) /* don't go beyond end of map */ 2316 end = si->max; 2317 2318 /* Count contiguous allocated slots above our target */ 2319 for (toff = target; ++toff < end; nr_pages++) { 2320 /* Don't read in free or bad pages */ 2321 if (!si->swap_map[toff]) 2322 break; 2323 if (swap_count(si->swap_map[toff]) == SWAP_MAP_BAD) 2324 break; 2325 } 2326 /* Count contiguous allocated slots below our target */ 2327 for (toff = target; --toff >= base; nr_pages++) { 2328 /* Don't read in free or bad pages */ 2329 if (!si->swap_map[toff]) 2330 break; 2331 if (swap_count(si->swap_map[toff]) == SWAP_MAP_BAD) 2332 break; 2333 } 2334 spin_unlock(&swap_lock); 2335 2336 /* 2337 * Indicate starting offset, and return number of pages to get: 2338 * if only 1, say 0, since there's then no readahead to be done. 2339 */ 2340 *offset = ++toff; 2341 return nr_pages? ++nr_pages: 0; 2342 } 2343 2344 /* 2345 * add_swap_count_continuation - called when a swap count is duplicated 2346 * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's 2347 * page of the original vmalloc'ed swap_map, to hold the continuation count 2348 * (for that entry and for its neighbouring PAGE_SIZE swap entries). Called 2349 * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc. 2350 * 2351 * These continuation pages are seldom referenced: the common paths all work 2352 * on the original swap_map, only referring to a continuation page when the 2353 * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX. 2354 * 2355 * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding 2356 * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL) 2357 * can be called after dropping locks. 2358 */ 2359 int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask) 2360 { 2361 struct swap_info_struct *si; 2362 struct page *head; 2363 struct page *page; 2364 struct page *list_page; 2365 pgoff_t offset; 2366 unsigned char count; 2367 2368 /* 2369 * When debugging, it's easier to use __GFP_ZERO here; but it's better 2370 * for latency not to zero a page while GFP_ATOMIC and holding locks. 2371 */ 2372 page = alloc_page(gfp_mask | __GFP_HIGHMEM); 2373 2374 si = swap_info_get(entry); 2375 if (!si) { 2376 /* 2377 * An acceptable race has occurred since the failing 2378 * __swap_duplicate(): the swap entry has been freed, 2379 * perhaps even the whole swap_map cleared for swapoff. 2380 */ 2381 goto outer; 2382 } 2383 2384 offset = swp_offset(entry); 2385 count = si->swap_map[offset] & ~SWAP_HAS_CACHE; 2386 2387 if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) { 2388 /* 2389 * The higher the swap count, the more likely it is that tasks 2390 * will race to add swap count continuation: we need to avoid 2391 * over-provisioning. 2392 */ 2393 goto out; 2394 } 2395 2396 if (!page) { 2397 spin_unlock(&swap_lock); 2398 return -ENOMEM; 2399 } 2400 2401 /* 2402 * We are fortunate that although vmalloc_to_page uses pte_offset_map, 2403 * no architecture is using highmem pages for kernel pagetables: so it 2404 * will not corrupt the GFP_ATOMIC caller's atomic pagetable kmaps. 2405 */ 2406 head = vmalloc_to_page(si->swap_map + offset); 2407 offset &= ~PAGE_MASK; 2408 2409 /* 2410 * Page allocation does not initialize the page's lru field, 2411 * but it does always reset its private field. 2412 */ 2413 if (!page_private(head)) { 2414 BUG_ON(count & COUNT_CONTINUED); 2415 INIT_LIST_HEAD(&head->lru); 2416 set_page_private(head, SWP_CONTINUED); 2417 si->flags |= SWP_CONTINUED; 2418 } 2419 2420 list_for_each_entry(list_page, &head->lru, lru) { 2421 unsigned char *map; 2422 2423 /* 2424 * If the previous map said no continuation, but we've found 2425 * a continuation page, free our allocation and use this one. 2426 */ 2427 if (!(count & COUNT_CONTINUED)) 2428 goto out; 2429 2430 map = kmap_atomic(list_page, KM_USER0) + offset; 2431 count = *map; 2432 kunmap_atomic(map, KM_USER0); 2433 2434 /* 2435 * If this continuation count now has some space in it, 2436 * free our allocation and use this one. 2437 */ 2438 if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX) 2439 goto out; 2440 } 2441 2442 list_add_tail(&page->lru, &head->lru); 2443 page = NULL; /* now it's attached, don't free it */ 2444 out: 2445 spin_unlock(&swap_lock); 2446 outer: 2447 if (page) 2448 __free_page(page); 2449 return 0; 2450 } 2451 2452 /* 2453 * swap_count_continued - when the original swap_map count is incremented 2454 * from SWAP_MAP_MAX, check if there is already a continuation page to carry 2455 * into, carry if so, or else fail until a new continuation page is allocated; 2456 * when the original swap_map count is decremented from 0 with continuation, 2457 * borrow from the continuation and report whether it still holds more. 2458 * Called while __swap_duplicate() or swap_entry_free() holds swap_lock. 2459 */ 2460 static bool swap_count_continued(struct swap_info_struct *si, 2461 pgoff_t offset, unsigned char count) 2462 { 2463 struct page *head; 2464 struct page *page; 2465 unsigned char *map; 2466 2467 head = vmalloc_to_page(si->swap_map + offset); 2468 if (page_private(head) != SWP_CONTINUED) { 2469 BUG_ON(count & COUNT_CONTINUED); 2470 return false; /* need to add count continuation */ 2471 } 2472 2473 offset &= ~PAGE_MASK; 2474 page = list_entry(head->lru.next, struct page, lru); 2475 map = kmap_atomic(page, KM_USER0) + offset; 2476 2477 if (count == SWAP_MAP_MAX) /* initial increment from swap_map */ 2478 goto init_map; /* jump over SWAP_CONT_MAX checks */ 2479 2480 if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */ 2481 /* 2482 * Think of how you add 1 to 999 2483 */ 2484 while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) { 2485 kunmap_atomic(map, KM_USER0); 2486 page = list_entry(page->lru.next, struct page, lru); 2487 BUG_ON(page == head); 2488 map = kmap_atomic(page, KM_USER0) + offset; 2489 } 2490 if (*map == SWAP_CONT_MAX) { 2491 kunmap_atomic(map, KM_USER0); 2492 page = list_entry(page->lru.next, struct page, lru); 2493 if (page == head) 2494 return false; /* add count continuation */ 2495 map = kmap_atomic(page, KM_USER0) + offset; 2496 init_map: *map = 0; /* we didn't zero the page */ 2497 } 2498 *map += 1; 2499 kunmap_atomic(map, KM_USER0); 2500 page = list_entry(page->lru.prev, struct page, lru); 2501 while (page != head) { 2502 map = kmap_atomic(page, KM_USER0) + offset; 2503 *map = COUNT_CONTINUED; 2504 kunmap_atomic(map, KM_USER0); 2505 page = list_entry(page->lru.prev, struct page, lru); 2506 } 2507 return true; /* incremented */ 2508 2509 } else { /* decrementing */ 2510 /* 2511 * Think of how you subtract 1 from 1000 2512 */ 2513 BUG_ON(count != COUNT_CONTINUED); 2514 while (*map == COUNT_CONTINUED) { 2515 kunmap_atomic(map, KM_USER0); 2516 page = list_entry(page->lru.next, struct page, lru); 2517 BUG_ON(page == head); 2518 map = kmap_atomic(page, KM_USER0) + offset; 2519 } 2520 BUG_ON(*map == 0); 2521 *map -= 1; 2522 if (*map == 0) 2523 count = 0; 2524 kunmap_atomic(map, KM_USER0); 2525 page = list_entry(page->lru.prev, struct page, lru); 2526 while (page != head) { 2527 map = kmap_atomic(page, KM_USER0) + offset; 2528 *map = SWAP_CONT_MAX | count; 2529 count = COUNT_CONTINUED; 2530 kunmap_atomic(map, KM_USER0); 2531 page = list_entry(page->lru.prev, struct page, lru); 2532 } 2533 return count == COUNT_CONTINUED; 2534 } 2535 } 2536 2537 /* 2538 * free_swap_count_continuations - swapoff free all the continuation pages 2539 * appended to the swap_map, after swap_map is quiesced, before vfree'ing it. 2540 */ 2541 static void free_swap_count_continuations(struct swap_info_struct *si) 2542 { 2543 pgoff_t offset; 2544 2545 for (offset = 0; offset < si->max; offset += PAGE_SIZE) { 2546 struct page *head; 2547 head = vmalloc_to_page(si->swap_map + offset); 2548 if (page_private(head)) { 2549 struct list_head *this, *next; 2550 list_for_each_safe(this, next, &head->lru) { 2551 struct page *page; 2552 page = list_entry(this, struct page, lru); 2553 list_del(this); 2554 __free_page(page); 2555 } 2556 } 2557 } 2558 } 2559