1 /* 2 * linux/mm/swapfile.c 3 * 4 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 5 * Swap reorganised 29.12.95, Stephen Tweedie 6 */ 7 8 #include <linux/config.h> 9 #include <linux/mm.h> 10 #include <linux/hugetlb.h> 11 #include <linux/mman.h> 12 #include <linux/slab.h> 13 #include <linux/kernel_stat.h> 14 #include <linux/swap.h> 15 #include <linux/vmalloc.h> 16 #include <linux/pagemap.h> 17 #include <linux/namei.h> 18 #include <linux/shm.h> 19 #include <linux/blkdev.h> 20 #include <linux/writeback.h> 21 #include <linux/proc_fs.h> 22 #include <linux/seq_file.h> 23 #include <linux/init.h> 24 #include <linux/module.h> 25 #include <linux/rmap.h> 26 #include <linux/security.h> 27 #include <linux/backing-dev.h> 28 #include <linux/syscalls.h> 29 30 #include <asm/pgtable.h> 31 #include <asm/tlbflush.h> 32 #include <linux/swapops.h> 33 34 DEFINE_SPINLOCK(swap_lock); 35 unsigned int nr_swapfiles; 36 long total_swap_pages; 37 static int swap_overflow; 38 39 static const char Bad_file[] = "Bad swap file entry "; 40 static const char Unused_file[] = "Unused swap file entry "; 41 static const char Bad_offset[] = "Bad swap offset entry "; 42 static const char Unused_offset[] = "Unused swap offset entry "; 43 44 struct swap_list_t swap_list = {-1, -1}; 45 46 struct swap_info_struct swap_info[MAX_SWAPFILES]; 47 48 static DECLARE_MUTEX(swapon_sem); 49 50 /* 51 * We need this because the bdev->unplug_fn can sleep and we cannot 52 * hold swap_lock while calling the unplug_fn. And swap_lock 53 * cannot be turned into a semaphore. 54 */ 55 static DECLARE_RWSEM(swap_unplug_sem); 56 57 void swap_unplug_io_fn(struct backing_dev_info *unused_bdi, struct page *page) 58 { 59 swp_entry_t entry; 60 61 down_read(&swap_unplug_sem); 62 entry.val = page_private(page); 63 if (PageSwapCache(page)) { 64 struct block_device *bdev = swap_info[swp_type(entry)].bdev; 65 struct backing_dev_info *bdi; 66 67 /* 68 * If the page is removed from swapcache from under us (with a 69 * racy try_to_unuse/swapoff) we need an additional reference 70 * count to avoid reading garbage from page_private(page) above. 71 * If the WARN_ON triggers during a swapoff it maybe the race 72 * condition and it's harmless. However if it triggers without 73 * swapoff it signals a problem. 74 */ 75 WARN_ON(page_count(page) <= 1); 76 77 bdi = bdev->bd_inode->i_mapping->backing_dev_info; 78 blk_run_backing_dev(bdi, page); 79 } 80 up_read(&swap_unplug_sem); 81 } 82 83 #define SWAPFILE_CLUSTER 256 84 #define LATENCY_LIMIT 256 85 86 static inline unsigned long scan_swap_map(struct swap_info_struct *si) 87 { 88 unsigned long offset, last_in_cluster; 89 int latency_ration = LATENCY_LIMIT; 90 91 /* 92 * We try to cluster swap pages by allocating them sequentially 93 * in swap. Once we've allocated SWAPFILE_CLUSTER pages this 94 * way, however, we resort to first-free allocation, starting 95 * a new cluster. This prevents us from scattering swap pages 96 * all over the entire swap partition, so that we reduce 97 * overall disk seek times between swap pages. -- sct 98 * But we do now try to find an empty cluster. -Andrea 99 */ 100 101 si->flags += SWP_SCANNING; 102 if (unlikely(!si->cluster_nr)) { 103 si->cluster_nr = SWAPFILE_CLUSTER - 1; 104 if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) 105 goto lowest; 106 spin_unlock(&swap_lock); 107 108 offset = si->lowest_bit; 109 last_in_cluster = offset + SWAPFILE_CLUSTER - 1; 110 111 /* Locate the first empty (unaligned) cluster */ 112 for (; last_in_cluster <= si->highest_bit; offset++) { 113 if (si->swap_map[offset]) 114 last_in_cluster = offset + SWAPFILE_CLUSTER; 115 else if (offset == last_in_cluster) { 116 spin_lock(&swap_lock); 117 si->cluster_next = offset-SWAPFILE_CLUSTER-1; 118 goto cluster; 119 } 120 if (unlikely(--latency_ration < 0)) { 121 cond_resched(); 122 latency_ration = LATENCY_LIMIT; 123 } 124 } 125 spin_lock(&swap_lock); 126 goto lowest; 127 } 128 129 si->cluster_nr--; 130 cluster: 131 offset = si->cluster_next; 132 if (offset > si->highest_bit) 133 lowest: offset = si->lowest_bit; 134 checks: if (!(si->flags & SWP_WRITEOK)) 135 goto no_page; 136 if (!si->highest_bit) 137 goto no_page; 138 if (!si->swap_map[offset]) { 139 if (offset == si->lowest_bit) 140 si->lowest_bit++; 141 if (offset == si->highest_bit) 142 si->highest_bit--; 143 si->inuse_pages++; 144 if (si->inuse_pages == si->pages) { 145 si->lowest_bit = si->max; 146 si->highest_bit = 0; 147 } 148 si->swap_map[offset] = 1; 149 si->cluster_next = offset + 1; 150 si->flags -= SWP_SCANNING; 151 return offset; 152 } 153 154 spin_unlock(&swap_lock); 155 while (++offset <= si->highest_bit) { 156 if (!si->swap_map[offset]) { 157 spin_lock(&swap_lock); 158 goto checks; 159 } 160 if (unlikely(--latency_ration < 0)) { 161 cond_resched(); 162 latency_ration = LATENCY_LIMIT; 163 } 164 } 165 spin_lock(&swap_lock); 166 goto lowest; 167 168 no_page: 169 si->flags -= SWP_SCANNING; 170 return 0; 171 } 172 173 swp_entry_t get_swap_page(void) 174 { 175 struct swap_info_struct *si; 176 pgoff_t offset; 177 int type, next; 178 int wrapped = 0; 179 180 spin_lock(&swap_lock); 181 if (nr_swap_pages <= 0) 182 goto noswap; 183 nr_swap_pages--; 184 185 for (type = swap_list.next; type >= 0 && wrapped < 2; type = next) { 186 si = swap_info + type; 187 next = si->next; 188 if (next < 0 || 189 (!wrapped && si->prio != swap_info[next].prio)) { 190 next = swap_list.head; 191 wrapped++; 192 } 193 194 if (!si->highest_bit) 195 continue; 196 if (!(si->flags & SWP_WRITEOK)) 197 continue; 198 199 swap_list.next = next; 200 offset = scan_swap_map(si); 201 if (offset) { 202 spin_unlock(&swap_lock); 203 return swp_entry(type, offset); 204 } 205 next = swap_list.next; 206 } 207 208 nr_swap_pages++; 209 noswap: 210 spin_unlock(&swap_lock); 211 return (swp_entry_t) {0}; 212 } 213 214 swp_entry_t get_swap_page_of_type(int type) 215 { 216 struct swap_info_struct *si; 217 pgoff_t offset; 218 219 spin_lock(&swap_lock); 220 si = swap_info + type; 221 if (si->flags & SWP_WRITEOK) { 222 nr_swap_pages--; 223 offset = scan_swap_map(si); 224 if (offset) { 225 spin_unlock(&swap_lock); 226 return swp_entry(type, offset); 227 } 228 nr_swap_pages++; 229 } 230 spin_unlock(&swap_lock); 231 return (swp_entry_t) {0}; 232 } 233 234 static struct swap_info_struct * swap_info_get(swp_entry_t entry) 235 { 236 struct swap_info_struct * p; 237 unsigned long offset, type; 238 239 if (!entry.val) 240 goto out; 241 type = swp_type(entry); 242 if (type >= nr_swapfiles) 243 goto bad_nofile; 244 p = & swap_info[type]; 245 if (!(p->flags & SWP_USED)) 246 goto bad_device; 247 offset = swp_offset(entry); 248 if (offset >= p->max) 249 goto bad_offset; 250 if (!p->swap_map[offset]) 251 goto bad_free; 252 spin_lock(&swap_lock); 253 return p; 254 255 bad_free: 256 printk(KERN_ERR "swap_free: %s%08lx\n", Unused_offset, entry.val); 257 goto out; 258 bad_offset: 259 printk(KERN_ERR "swap_free: %s%08lx\n", Bad_offset, entry.val); 260 goto out; 261 bad_device: 262 printk(KERN_ERR "swap_free: %s%08lx\n", Unused_file, entry.val); 263 goto out; 264 bad_nofile: 265 printk(KERN_ERR "swap_free: %s%08lx\n", Bad_file, entry.val); 266 out: 267 return NULL; 268 } 269 270 static int swap_entry_free(struct swap_info_struct *p, unsigned long offset) 271 { 272 int count = p->swap_map[offset]; 273 274 if (count < SWAP_MAP_MAX) { 275 count--; 276 p->swap_map[offset] = count; 277 if (!count) { 278 if (offset < p->lowest_bit) 279 p->lowest_bit = offset; 280 if (offset > p->highest_bit) 281 p->highest_bit = offset; 282 if (p->prio > swap_info[swap_list.next].prio) 283 swap_list.next = p - swap_info; 284 nr_swap_pages++; 285 p->inuse_pages--; 286 } 287 } 288 return count; 289 } 290 291 /* 292 * Caller has made sure that the swapdevice corresponding to entry 293 * is still around or has not been recycled. 294 */ 295 void swap_free(swp_entry_t entry) 296 { 297 struct swap_info_struct * p; 298 299 p = swap_info_get(entry); 300 if (p) { 301 swap_entry_free(p, swp_offset(entry)); 302 spin_unlock(&swap_lock); 303 } 304 } 305 306 /* 307 * How many references to page are currently swapped out? 308 */ 309 static inline int page_swapcount(struct page *page) 310 { 311 int count = 0; 312 struct swap_info_struct *p; 313 swp_entry_t entry; 314 315 entry.val = page_private(page); 316 p = swap_info_get(entry); 317 if (p) { 318 /* Subtract the 1 for the swap cache itself */ 319 count = p->swap_map[swp_offset(entry)] - 1; 320 spin_unlock(&swap_lock); 321 } 322 return count; 323 } 324 325 /* 326 * We can use this swap cache entry directly 327 * if there are no other references to it. 328 */ 329 int can_share_swap_page(struct page *page) 330 { 331 int count; 332 333 BUG_ON(!PageLocked(page)); 334 count = page_mapcount(page); 335 if (count <= 1 && PageSwapCache(page)) 336 count += page_swapcount(page); 337 return count == 1; 338 } 339 340 /* 341 * Work out if there are any other processes sharing this 342 * swap cache page. Free it if you can. Return success. 343 */ 344 int remove_exclusive_swap_page(struct page *page) 345 { 346 int retval; 347 struct swap_info_struct * p; 348 swp_entry_t entry; 349 350 BUG_ON(PagePrivate(page)); 351 BUG_ON(!PageLocked(page)); 352 353 if (!PageSwapCache(page)) 354 return 0; 355 if (PageWriteback(page)) 356 return 0; 357 if (page_count(page) != 2) /* 2: us + cache */ 358 return 0; 359 360 entry.val = page_private(page); 361 p = swap_info_get(entry); 362 if (!p) 363 return 0; 364 365 /* Is the only swap cache user the cache itself? */ 366 retval = 0; 367 if (p->swap_map[swp_offset(entry)] == 1) { 368 /* Recheck the page count with the swapcache lock held.. */ 369 write_lock_irq(&swapper_space.tree_lock); 370 if ((page_count(page) == 2) && !PageWriteback(page)) { 371 __delete_from_swap_cache(page); 372 SetPageDirty(page); 373 retval = 1; 374 } 375 write_unlock_irq(&swapper_space.tree_lock); 376 } 377 spin_unlock(&swap_lock); 378 379 if (retval) { 380 swap_free(entry); 381 page_cache_release(page); 382 } 383 384 return retval; 385 } 386 387 /* 388 * Free the swap entry like above, but also try to 389 * free the page cache entry if it is the last user. 390 */ 391 void free_swap_and_cache(swp_entry_t entry) 392 { 393 struct swap_info_struct * p; 394 struct page *page = NULL; 395 396 p = swap_info_get(entry); 397 if (p) { 398 if (swap_entry_free(p, swp_offset(entry)) == 1) 399 page = find_trylock_page(&swapper_space, entry.val); 400 spin_unlock(&swap_lock); 401 } 402 if (page) { 403 int one_user; 404 405 BUG_ON(PagePrivate(page)); 406 page_cache_get(page); 407 one_user = (page_count(page) == 2); 408 /* Only cache user (+us), or swap space full? Free it! */ 409 if (!PageWriteback(page) && (one_user || vm_swap_full())) { 410 delete_from_swap_cache(page); 411 SetPageDirty(page); 412 } 413 unlock_page(page); 414 page_cache_release(page); 415 } 416 } 417 418 /* 419 * No need to decide whether this PTE shares the swap entry with others, 420 * just let do_wp_page work it out if a write is requested later - to 421 * force COW, vm_page_prot omits write permission from any private vma. 422 */ 423 static void unuse_pte(struct vm_area_struct *vma, pte_t *pte, 424 unsigned long addr, swp_entry_t entry, struct page *page) 425 { 426 inc_mm_counter(vma->vm_mm, anon_rss); 427 get_page(page); 428 set_pte_at(vma->vm_mm, addr, pte, 429 pte_mkold(mk_pte(page, vma->vm_page_prot))); 430 page_add_anon_rmap(page, vma, addr); 431 swap_free(entry); 432 /* 433 * Move the page to the active list so it is not 434 * immediately swapped out again after swapon. 435 */ 436 activate_page(page); 437 } 438 439 static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, 440 unsigned long addr, unsigned long end, 441 swp_entry_t entry, struct page *page) 442 { 443 pte_t swp_pte = swp_entry_to_pte(entry); 444 pte_t *pte; 445 spinlock_t *ptl; 446 int found = 0; 447 448 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 449 do { 450 /* 451 * swapoff spends a _lot_ of time in this loop! 452 * Test inline before going to call unuse_pte. 453 */ 454 if (unlikely(pte_same(*pte, swp_pte))) { 455 unuse_pte(vma, pte++, addr, entry, page); 456 found = 1; 457 break; 458 } 459 } while (pte++, addr += PAGE_SIZE, addr != end); 460 pte_unmap_unlock(pte - 1, ptl); 461 return found; 462 } 463 464 static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud, 465 unsigned long addr, unsigned long end, 466 swp_entry_t entry, struct page *page) 467 { 468 pmd_t *pmd; 469 unsigned long next; 470 471 pmd = pmd_offset(pud, addr); 472 do { 473 next = pmd_addr_end(addr, end); 474 if (pmd_none_or_clear_bad(pmd)) 475 continue; 476 if (unuse_pte_range(vma, pmd, addr, next, entry, page)) 477 return 1; 478 } while (pmd++, addr = next, addr != end); 479 return 0; 480 } 481 482 static inline int unuse_pud_range(struct vm_area_struct *vma, pgd_t *pgd, 483 unsigned long addr, unsigned long end, 484 swp_entry_t entry, struct page *page) 485 { 486 pud_t *pud; 487 unsigned long next; 488 489 pud = pud_offset(pgd, addr); 490 do { 491 next = pud_addr_end(addr, end); 492 if (pud_none_or_clear_bad(pud)) 493 continue; 494 if (unuse_pmd_range(vma, pud, addr, next, entry, page)) 495 return 1; 496 } while (pud++, addr = next, addr != end); 497 return 0; 498 } 499 500 static int unuse_vma(struct vm_area_struct *vma, 501 swp_entry_t entry, struct page *page) 502 { 503 pgd_t *pgd; 504 unsigned long addr, end, next; 505 506 if (page->mapping) { 507 addr = page_address_in_vma(page, vma); 508 if (addr == -EFAULT) 509 return 0; 510 else 511 end = addr + PAGE_SIZE; 512 } else { 513 addr = vma->vm_start; 514 end = vma->vm_end; 515 } 516 517 pgd = pgd_offset(vma->vm_mm, addr); 518 do { 519 next = pgd_addr_end(addr, end); 520 if (pgd_none_or_clear_bad(pgd)) 521 continue; 522 if (unuse_pud_range(vma, pgd, addr, next, entry, page)) 523 return 1; 524 } while (pgd++, addr = next, addr != end); 525 return 0; 526 } 527 528 static int unuse_mm(struct mm_struct *mm, 529 swp_entry_t entry, struct page *page) 530 { 531 struct vm_area_struct *vma; 532 533 if (!down_read_trylock(&mm->mmap_sem)) { 534 /* 535 * Activate page so shrink_cache is unlikely to unmap its 536 * ptes while lock is dropped, so swapoff can make progress. 537 */ 538 activate_page(page); 539 unlock_page(page); 540 down_read(&mm->mmap_sem); 541 lock_page(page); 542 } 543 for (vma = mm->mmap; vma; vma = vma->vm_next) { 544 if (vma->anon_vma && unuse_vma(vma, entry, page)) 545 break; 546 } 547 up_read(&mm->mmap_sem); 548 /* 549 * Currently unuse_mm cannot fail, but leave error handling 550 * at call sites for now, since we change it from time to time. 551 */ 552 return 0; 553 } 554 555 /* 556 * Scan swap_map from current position to next entry still in use. 557 * Recycle to start on reaching the end, returning 0 when empty. 558 */ 559 static unsigned int find_next_to_unuse(struct swap_info_struct *si, 560 unsigned int prev) 561 { 562 unsigned int max = si->max; 563 unsigned int i = prev; 564 int count; 565 566 /* 567 * No need for swap_lock here: we're just looking 568 * for whether an entry is in use, not modifying it; false 569 * hits are okay, and sys_swapoff() has already prevented new 570 * allocations from this area (while holding swap_lock). 571 */ 572 for (;;) { 573 if (++i >= max) { 574 if (!prev) { 575 i = 0; 576 break; 577 } 578 /* 579 * No entries in use at top of swap_map, 580 * loop back to start and recheck there. 581 */ 582 max = prev + 1; 583 prev = 0; 584 i = 1; 585 } 586 count = si->swap_map[i]; 587 if (count && count != SWAP_MAP_BAD) 588 break; 589 } 590 return i; 591 } 592 593 /* 594 * We completely avoid races by reading each swap page in advance, 595 * and then search for the process using it. All the necessary 596 * page table adjustments can then be made atomically. 597 */ 598 static int try_to_unuse(unsigned int type) 599 { 600 struct swap_info_struct * si = &swap_info[type]; 601 struct mm_struct *start_mm; 602 unsigned short *swap_map; 603 unsigned short swcount; 604 struct page *page; 605 swp_entry_t entry; 606 unsigned int i = 0; 607 int retval = 0; 608 int reset_overflow = 0; 609 int shmem; 610 611 /* 612 * When searching mms for an entry, a good strategy is to 613 * start at the first mm we freed the previous entry from 614 * (though actually we don't notice whether we or coincidence 615 * freed the entry). Initialize this start_mm with a hold. 616 * 617 * A simpler strategy would be to start at the last mm we 618 * freed the previous entry from; but that would take less 619 * advantage of mmlist ordering, which clusters forked mms 620 * together, child after parent. If we race with dup_mmap(), we 621 * prefer to resolve parent before child, lest we miss entries 622 * duplicated after we scanned child: using last mm would invert 623 * that. Though it's only a serious concern when an overflowed 624 * swap count is reset from SWAP_MAP_MAX, preventing a rescan. 625 */ 626 start_mm = &init_mm; 627 atomic_inc(&init_mm.mm_users); 628 629 /* 630 * Keep on scanning until all entries have gone. Usually, 631 * one pass through swap_map is enough, but not necessarily: 632 * there are races when an instance of an entry might be missed. 633 */ 634 while ((i = find_next_to_unuse(si, i)) != 0) { 635 if (signal_pending(current)) { 636 retval = -EINTR; 637 break; 638 } 639 640 /* 641 * Get a page for the entry, using the existing swap 642 * cache page if there is one. Otherwise, get a clean 643 * page and read the swap into it. 644 */ 645 swap_map = &si->swap_map[i]; 646 entry = swp_entry(type, i); 647 page = read_swap_cache_async(entry, NULL, 0); 648 if (!page) { 649 /* 650 * Either swap_duplicate() failed because entry 651 * has been freed independently, and will not be 652 * reused since sys_swapoff() already disabled 653 * allocation from here, or alloc_page() failed. 654 */ 655 if (!*swap_map) 656 continue; 657 retval = -ENOMEM; 658 break; 659 } 660 661 /* 662 * Don't hold on to start_mm if it looks like exiting. 663 */ 664 if (atomic_read(&start_mm->mm_users) == 1) { 665 mmput(start_mm); 666 start_mm = &init_mm; 667 atomic_inc(&init_mm.mm_users); 668 } 669 670 /* 671 * Wait for and lock page. When do_swap_page races with 672 * try_to_unuse, do_swap_page can handle the fault much 673 * faster than try_to_unuse can locate the entry. This 674 * apparently redundant "wait_on_page_locked" lets try_to_unuse 675 * defer to do_swap_page in such a case - in some tests, 676 * do_swap_page and try_to_unuse repeatedly compete. 677 */ 678 wait_on_page_locked(page); 679 wait_on_page_writeback(page); 680 lock_page(page); 681 wait_on_page_writeback(page); 682 683 /* 684 * Remove all references to entry. 685 * Whenever we reach init_mm, there's no address space 686 * to search, but use it as a reminder to search shmem. 687 */ 688 shmem = 0; 689 swcount = *swap_map; 690 if (swcount > 1) { 691 if (start_mm == &init_mm) 692 shmem = shmem_unuse(entry, page); 693 else 694 retval = unuse_mm(start_mm, entry, page); 695 } 696 if (*swap_map > 1) { 697 int set_start_mm = (*swap_map >= swcount); 698 struct list_head *p = &start_mm->mmlist; 699 struct mm_struct *new_start_mm = start_mm; 700 struct mm_struct *prev_mm = start_mm; 701 struct mm_struct *mm; 702 703 atomic_inc(&new_start_mm->mm_users); 704 atomic_inc(&prev_mm->mm_users); 705 spin_lock(&mmlist_lock); 706 while (*swap_map > 1 && !retval && 707 (p = p->next) != &start_mm->mmlist) { 708 mm = list_entry(p, struct mm_struct, mmlist); 709 if (atomic_inc_return(&mm->mm_users) == 1) { 710 atomic_dec(&mm->mm_users); 711 continue; 712 } 713 spin_unlock(&mmlist_lock); 714 mmput(prev_mm); 715 prev_mm = mm; 716 717 cond_resched(); 718 719 swcount = *swap_map; 720 if (swcount <= 1) 721 ; 722 else if (mm == &init_mm) { 723 set_start_mm = 1; 724 shmem = shmem_unuse(entry, page); 725 } else 726 retval = unuse_mm(mm, entry, page); 727 if (set_start_mm && *swap_map < swcount) { 728 mmput(new_start_mm); 729 atomic_inc(&mm->mm_users); 730 new_start_mm = mm; 731 set_start_mm = 0; 732 } 733 spin_lock(&mmlist_lock); 734 } 735 spin_unlock(&mmlist_lock); 736 mmput(prev_mm); 737 mmput(start_mm); 738 start_mm = new_start_mm; 739 } 740 if (retval) { 741 unlock_page(page); 742 page_cache_release(page); 743 break; 744 } 745 746 /* 747 * How could swap count reach 0x7fff when the maximum 748 * pid is 0x7fff, and there's no way to repeat a swap 749 * page within an mm (except in shmem, where it's the 750 * shared object which takes the reference count)? 751 * We believe SWAP_MAP_MAX cannot occur in Linux 2.4. 752 * 753 * If that's wrong, then we should worry more about 754 * exit_mmap() and do_munmap() cases described above: 755 * we might be resetting SWAP_MAP_MAX too early here. 756 * We know "Undead"s can happen, they're okay, so don't 757 * report them; but do report if we reset SWAP_MAP_MAX. 758 */ 759 if (*swap_map == SWAP_MAP_MAX) { 760 spin_lock(&swap_lock); 761 *swap_map = 1; 762 spin_unlock(&swap_lock); 763 reset_overflow = 1; 764 } 765 766 /* 767 * If a reference remains (rare), we would like to leave 768 * the page in the swap cache; but try_to_unmap could 769 * then re-duplicate the entry once we drop page lock, 770 * so we might loop indefinitely; also, that page could 771 * not be swapped out to other storage meanwhile. So: 772 * delete from cache even if there's another reference, 773 * after ensuring that the data has been saved to disk - 774 * since if the reference remains (rarer), it will be 775 * read from disk into another page. Splitting into two 776 * pages would be incorrect if swap supported "shared 777 * private" pages, but they are handled by tmpfs files. 778 * 779 * Note shmem_unuse already deleted a swappage from 780 * the swap cache, unless the move to filepage failed: 781 * in which case it left swappage in cache, lowered its 782 * swap count to pass quickly through the loops above, 783 * and now we must reincrement count to try again later. 784 */ 785 if ((*swap_map > 1) && PageDirty(page) && PageSwapCache(page)) { 786 struct writeback_control wbc = { 787 .sync_mode = WB_SYNC_NONE, 788 }; 789 790 swap_writepage(page, &wbc); 791 lock_page(page); 792 wait_on_page_writeback(page); 793 } 794 if (PageSwapCache(page)) { 795 if (shmem) 796 swap_duplicate(entry); 797 else 798 delete_from_swap_cache(page); 799 } 800 801 /* 802 * So we could skip searching mms once swap count went 803 * to 1, we did not mark any present ptes as dirty: must 804 * mark page dirty so shrink_list will preserve it. 805 */ 806 SetPageDirty(page); 807 unlock_page(page); 808 page_cache_release(page); 809 810 /* 811 * Make sure that we aren't completely killing 812 * interactive performance. 813 */ 814 cond_resched(); 815 } 816 817 mmput(start_mm); 818 if (reset_overflow) { 819 printk(KERN_WARNING "swapoff: cleared swap entry overflow\n"); 820 swap_overflow = 0; 821 } 822 return retval; 823 } 824 825 /* 826 * After a successful try_to_unuse, if no swap is now in use, we know 827 * we can empty the mmlist. swap_lock must be held on entry and exit. 828 * Note that mmlist_lock nests inside swap_lock, and an mm must be 829 * added to the mmlist just after page_duplicate - before would be racy. 830 */ 831 static void drain_mmlist(void) 832 { 833 struct list_head *p, *next; 834 unsigned int i; 835 836 for (i = 0; i < nr_swapfiles; i++) 837 if (swap_info[i].inuse_pages) 838 return; 839 spin_lock(&mmlist_lock); 840 list_for_each_safe(p, next, &init_mm.mmlist) 841 list_del_init(p); 842 spin_unlock(&mmlist_lock); 843 } 844 845 /* 846 * Use this swapdev's extent info to locate the (PAGE_SIZE) block which 847 * corresponds to page offset `offset'. 848 */ 849 sector_t map_swap_page(struct swap_info_struct *sis, pgoff_t offset) 850 { 851 struct swap_extent *se = sis->curr_swap_extent; 852 struct swap_extent *start_se = se; 853 854 for ( ; ; ) { 855 struct list_head *lh; 856 857 if (se->start_page <= offset && 858 offset < (se->start_page + se->nr_pages)) { 859 return se->start_block + (offset - se->start_page); 860 } 861 lh = se->list.next; 862 if (lh == &sis->extent_list) 863 lh = lh->next; 864 se = list_entry(lh, struct swap_extent, list); 865 sis->curr_swap_extent = se; 866 BUG_ON(se == start_se); /* It *must* be present */ 867 } 868 } 869 870 /* 871 * Free all of a swapdev's extent information 872 */ 873 static void destroy_swap_extents(struct swap_info_struct *sis) 874 { 875 while (!list_empty(&sis->extent_list)) { 876 struct swap_extent *se; 877 878 se = list_entry(sis->extent_list.next, 879 struct swap_extent, list); 880 list_del(&se->list); 881 kfree(se); 882 } 883 } 884 885 /* 886 * Add a block range (and the corresponding page range) into this swapdev's 887 * extent list. The extent list is kept sorted in page order. 888 * 889 * This function rather assumes that it is called in ascending page order. 890 */ 891 static int 892 add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, 893 unsigned long nr_pages, sector_t start_block) 894 { 895 struct swap_extent *se; 896 struct swap_extent *new_se; 897 struct list_head *lh; 898 899 lh = sis->extent_list.prev; /* The highest page extent */ 900 if (lh != &sis->extent_list) { 901 se = list_entry(lh, struct swap_extent, list); 902 BUG_ON(se->start_page + se->nr_pages != start_page); 903 if (se->start_block + se->nr_pages == start_block) { 904 /* Merge it */ 905 se->nr_pages += nr_pages; 906 return 0; 907 } 908 } 909 910 /* 911 * No merge. Insert a new extent, preserving ordering. 912 */ 913 new_se = kmalloc(sizeof(*se), GFP_KERNEL); 914 if (new_se == NULL) 915 return -ENOMEM; 916 new_se->start_page = start_page; 917 new_se->nr_pages = nr_pages; 918 new_se->start_block = start_block; 919 920 list_add_tail(&new_se->list, &sis->extent_list); 921 return 1; 922 } 923 924 /* 925 * A `swap extent' is a simple thing which maps a contiguous range of pages 926 * onto a contiguous range of disk blocks. An ordered list of swap extents 927 * is built at swapon time and is then used at swap_writepage/swap_readpage 928 * time for locating where on disk a page belongs. 929 * 930 * If the swapfile is an S_ISBLK block device, a single extent is installed. 931 * This is done so that the main operating code can treat S_ISBLK and S_ISREG 932 * swap files identically. 933 * 934 * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap 935 * extent list operates in PAGE_SIZE disk blocks. Both S_ISREG and S_ISBLK 936 * swapfiles are handled *identically* after swapon time. 937 * 938 * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks 939 * and will parse them into an ordered extent list, in PAGE_SIZE chunks. If 940 * some stray blocks are found which do not fall within the PAGE_SIZE alignment 941 * requirements, they are simply tossed out - we will never use those blocks 942 * for swapping. 943 * 944 * For S_ISREG swapfiles we set S_SWAPFILE across the life of the swapon. This 945 * prevents root from shooting her foot off by ftruncating an in-use swapfile, 946 * which will scribble on the fs. 947 * 948 * The amount of disk space which a single swap extent represents varies. 949 * Typically it is in the 1-4 megabyte range. So we can have hundreds of 950 * extents in the list. To avoid much list walking, we cache the previous 951 * search location in `curr_swap_extent', and start new searches from there. 952 * This is extremely effective. The average number of iterations in 953 * map_swap_page() has been measured at about 0.3 per page. - akpm. 954 */ 955 static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span) 956 { 957 struct inode *inode; 958 unsigned blocks_per_page; 959 unsigned long page_no; 960 unsigned blkbits; 961 sector_t probe_block; 962 sector_t last_block; 963 sector_t lowest_block = -1; 964 sector_t highest_block = 0; 965 int nr_extents = 0; 966 int ret; 967 968 inode = sis->swap_file->f_mapping->host; 969 if (S_ISBLK(inode->i_mode)) { 970 ret = add_swap_extent(sis, 0, sis->max, 0); 971 *span = sis->pages; 972 goto done; 973 } 974 975 blkbits = inode->i_blkbits; 976 blocks_per_page = PAGE_SIZE >> blkbits; 977 978 /* 979 * Map all the blocks into the extent list. This code doesn't try 980 * to be very smart. 981 */ 982 probe_block = 0; 983 page_no = 0; 984 last_block = i_size_read(inode) >> blkbits; 985 while ((probe_block + blocks_per_page) <= last_block && 986 page_no < sis->max) { 987 unsigned block_in_page; 988 sector_t first_block; 989 990 first_block = bmap(inode, probe_block); 991 if (first_block == 0) 992 goto bad_bmap; 993 994 /* 995 * It must be PAGE_SIZE aligned on-disk 996 */ 997 if (first_block & (blocks_per_page - 1)) { 998 probe_block++; 999 goto reprobe; 1000 } 1001 1002 for (block_in_page = 1; block_in_page < blocks_per_page; 1003 block_in_page++) { 1004 sector_t block; 1005 1006 block = bmap(inode, probe_block + block_in_page); 1007 if (block == 0) 1008 goto bad_bmap; 1009 if (block != first_block + block_in_page) { 1010 /* Discontiguity */ 1011 probe_block++; 1012 goto reprobe; 1013 } 1014 } 1015 1016 first_block >>= (PAGE_SHIFT - blkbits); 1017 if (page_no) { /* exclude the header page */ 1018 if (first_block < lowest_block) 1019 lowest_block = first_block; 1020 if (first_block > highest_block) 1021 highest_block = first_block; 1022 } 1023 1024 /* 1025 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks 1026 */ 1027 ret = add_swap_extent(sis, page_no, 1, first_block); 1028 if (ret < 0) 1029 goto out; 1030 nr_extents += ret; 1031 page_no++; 1032 probe_block += blocks_per_page; 1033 reprobe: 1034 continue; 1035 } 1036 ret = nr_extents; 1037 *span = 1 + highest_block - lowest_block; 1038 if (page_no == 0) 1039 page_no = 1; /* force Empty message */ 1040 sis->max = page_no; 1041 sis->pages = page_no - 1; 1042 sis->highest_bit = page_no - 1; 1043 done: 1044 sis->curr_swap_extent = list_entry(sis->extent_list.prev, 1045 struct swap_extent, list); 1046 goto out; 1047 bad_bmap: 1048 printk(KERN_ERR "swapon: swapfile has holes\n"); 1049 ret = -EINVAL; 1050 out: 1051 return ret; 1052 } 1053 1054 #if 0 /* We don't need this yet */ 1055 #include <linux/backing-dev.h> 1056 int page_queue_congested(struct page *page) 1057 { 1058 struct backing_dev_info *bdi; 1059 1060 BUG_ON(!PageLocked(page)); /* It pins the swap_info_struct */ 1061 1062 if (PageSwapCache(page)) { 1063 swp_entry_t entry = { .val = page_private(page) }; 1064 struct swap_info_struct *sis; 1065 1066 sis = get_swap_info_struct(swp_type(entry)); 1067 bdi = sis->bdev->bd_inode->i_mapping->backing_dev_info; 1068 } else 1069 bdi = page->mapping->backing_dev_info; 1070 return bdi_write_congested(bdi); 1071 } 1072 #endif 1073 1074 asmlinkage long sys_swapoff(const char __user * specialfile) 1075 { 1076 struct swap_info_struct * p = NULL; 1077 unsigned short *swap_map; 1078 struct file *swap_file, *victim; 1079 struct address_space *mapping; 1080 struct inode *inode; 1081 char * pathname; 1082 int i, type, prev; 1083 int err; 1084 1085 if (!capable(CAP_SYS_ADMIN)) 1086 return -EPERM; 1087 1088 pathname = getname(specialfile); 1089 err = PTR_ERR(pathname); 1090 if (IS_ERR(pathname)) 1091 goto out; 1092 1093 victim = filp_open(pathname, O_RDWR|O_LARGEFILE, 0); 1094 putname(pathname); 1095 err = PTR_ERR(victim); 1096 if (IS_ERR(victim)) 1097 goto out; 1098 1099 mapping = victim->f_mapping; 1100 prev = -1; 1101 spin_lock(&swap_lock); 1102 for (type = swap_list.head; type >= 0; type = swap_info[type].next) { 1103 p = swap_info + type; 1104 if ((p->flags & SWP_ACTIVE) == SWP_ACTIVE) { 1105 if (p->swap_file->f_mapping == mapping) 1106 break; 1107 } 1108 prev = type; 1109 } 1110 if (type < 0) { 1111 err = -EINVAL; 1112 spin_unlock(&swap_lock); 1113 goto out_dput; 1114 } 1115 if (!security_vm_enough_memory(p->pages)) 1116 vm_unacct_memory(p->pages); 1117 else { 1118 err = -ENOMEM; 1119 spin_unlock(&swap_lock); 1120 goto out_dput; 1121 } 1122 if (prev < 0) { 1123 swap_list.head = p->next; 1124 } else { 1125 swap_info[prev].next = p->next; 1126 } 1127 if (type == swap_list.next) { 1128 /* just pick something that's safe... */ 1129 swap_list.next = swap_list.head; 1130 } 1131 nr_swap_pages -= p->pages; 1132 total_swap_pages -= p->pages; 1133 p->flags &= ~SWP_WRITEOK; 1134 spin_unlock(&swap_lock); 1135 1136 current->flags |= PF_SWAPOFF; 1137 err = try_to_unuse(type); 1138 current->flags &= ~PF_SWAPOFF; 1139 1140 if (err) { 1141 /* re-insert swap space back into swap_list */ 1142 spin_lock(&swap_lock); 1143 for (prev = -1, i = swap_list.head; i >= 0; prev = i, i = swap_info[i].next) 1144 if (p->prio >= swap_info[i].prio) 1145 break; 1146 p->next = i; 1147 if (prev < 0) 1148 swap_list.head = swap_list.next = p - swap_info; 1149 else 1150 swap_info[prev].next = p - swap_info; 1151 nr_swap_pages += p->pages; 1152 total_swap_pages += p->pages; 1153 p->flags |= SWP_WRITEOK; 1154 spin_unlock(&swap_lock); 1155 goto out_dput; 1156 } 1157 1158 /* wait for any unplug function to finish */ 1159 down_write(&swap_unplug_sem); 1160 up_write(&swap_unplug_sem); 1161 1162 destroy_swap_extents(p); 1163 down(&swapon_sem); 1164 spin_lock(&swap_lock); 1165 drain_mmlist(); 1166 1167 /* wait for anyone still in scan_swap_map */ 1168 p->highest_bit = 0; /* cuts scans short */ 1169 while (p->flags >= SWP_SCANNING) { 1170 spin_unlock(&swap_lock); 1171 schedule_timeout_uninterruptible(1); 1172 spin_lock(&swap_lock); 1173 } 1174 1175 swap_file = p->swap_file; 1176 p->swap_file = NULL; 1177 p->max = 0; 1178 swap_map = p->swap_map; 1179 p->swap_map = NULL; 1180 p->flags = 0; 1181 spin_unlock(&swap_lock); 1182 up(&swapon_sem); 1183 vfree(swap_map); 1184 inode = mapping->host; 1185 if (S_ISBLK(inode->i_mode)) { 1186 struct block_device *bdev = I_BDEV(inode); 1187 set_blocksize(bdev, p->old_block_size); 1188 bd_release(bdev); 1189 } else { 1190 mutex_lock(&inode->i_mutex); 1191 inode->i_flags &= ~S_SWAPFILE; 1192 mutex_unlock(&inode->i_mutex); 1193 } 1194 filp_close(swap_file, NULL); 1195 err = 0; 1196 1197 out_dput: 1198 filp_close(victim, NULL); 1199 out: 1200 return err; 1201 } 1202 1203 #ifdef CONFIG_PROC_FS 1204 /* iterator */ 1205 static void *swap_start(struct seq_file *swap, loff_t *pos) 1206 { 1207 struct swap_info_struct *ptr = swap_info; 1208 int i; 1209 loff_t l = *pos; 1210 1211 down(&swapon_sem); 1212 1213 for (i = 0; i < nr_swapfiles; i++, ptr++) { 1214 if (!(ptr->flags & SWP_USED) || !ptr->swap_map) 1215 continue; 1216 if (!l--) 1217 return ptr; 1218 } 1219 1220 return NULL; 1221 } 1222 1223 static void *swap_next(struct seq_file *swap, void *v, loff_t *pos) 1224 { 1225 struct swap_info_struct *ptr = v; 1226 struct swap_info_struct *endptr = swap_info + nr_swapfiles; 1227 1228 for (++ptr; ptr < endptr; ptr++) { 1229 if (!(ptr->flags & SWP_USED) || !ptr->swap_map) 1230 continue; 1231 ++*pos; 1232 return ptr; 1233 } 1234 1235 return NULL; 1236 } 1237 1238 static void swap_stop(struct seq_file *swap, void *v) 1239 { 1240 up(&swapon_sem); 1241 } 1242 1243 static int swap_show(struct seq_file *swap, void *v) 1244 { 1245 struct swap_info_struct *ptr = v; 1246 struct file *file; 1247 int len; 1248 1249 if (v == swap_info) 1250 seq_puts(swap, "Filename\t\t\t\tType\t\tSize\tUsed\tPriority\n"); 1251 1252 file = ptr->swap_file; 1253 len = seq_path(swap, file->f_vfsmnt, file->f_dentry, " \t\n\\"); 1254 seq_printf(swap, "%*s%s\t%u\t%u\t%d\n", 1255 len < 40 ? 40 - len : 1, " ", 1256 S_ISBLK(file->f_dentry->d_inode->i_mode) ? 1257 "partition" : "file\t", 1258 ptr->pages << (PAGE_SHIFT - 10), 1259 ptr->inuse_pages << (PAGE_SHIFT - 10), 1260 ptr->prio); 1261 return 0; 1262 } 1263 1264 static struct seq_operations swaps_op = { 1265 .start = swap_start, 1266 .next = swap_next, 1267 .stop = swap_stop, 1268 .show = swap_show 1269 }; 1270 1271 static int swaps_open(struct inode *inode, struct file *file) 1272 { 1273 return seq_open(file, &swaps_op); 1274 } 1275 1276 static struct file_operations proc_swaps_operations = { 1277 .open = swaps_open, 1278 .read = seq_read, 1279 .llseek = seq_lseek, 1280 .release = seq_release, 1281 }; 1282 1283 static int __init procswaps_init(void) 1284 { 1285 struct proc_dir_entry *entry; 1286 1287 entry = create_proc_entry("swaps", 0, NULL); 1288 if (entry) 1289 entry->proc_fops = &proc_swaps_operations; 1290 return 0; 1291 } 1292 __initcall(procswaps_init); 1293 #endif /* CONFIG_PROC_FS */ 1294 1295 /* 1296 * Written 01/25/92 by Simmule Turner, heavily changed by Linus. 1297 * 1298 * The swapon system call 1299 */ 1300 asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags) 1301 { 1302 struct swap_info_struct * p; 1303 char *name = NULL; 1304 struct block_device *bdev = NULL; 1305 struct file *swap_file = NULL; 1306 struct address_space *mapping; 1307 unsigned int type; 1308 int i, prev; 1309 int error; 1310 static int least_priority; 1311 union swap_header *swap_header = NULL; 1312 int swap_header_version; 1313 unsigned int nr_good_pages = 0; 1314 int nr_extents = 0; 1315 sector_t span; 1316 unsigned long maxpages = 1; 1317 int swapfilesize; 1318 unsigned short *swap_map; 1319 struct page *page = NULL; 1320 struct inode *inode = NULL; 1321 int did_down = 0; 1322 1323 if (!capable(CAP_SYS_ADMIN)) 1324 return -EPERM; 1325 spin_lock(&swap_lock); 1326 p = swap_info; 1327 for (type = 0 ; type < nr_swapfiles ; type++,p++) 1328 if (!(p->flags & SWP_USED)) 1329 break; 1330 error = -EPERM; 1331 /* 1332 * Test if adding another swap device is possible. There are 1333 * two limiting factors: 1) the number of bits for the swap 1334 * type swp_entry_t definition and 2) the number of bits for 1335 * the swap type in the swap ptes as defined by the different 1336 * architectures. To honor both limitations a swap entry 1337 * with swap offset 0 and swap type ~0UL is created, encoded 1338 * to a swap pte, decoded to a swp_entry_t again and finally 1339 * the swap type part is extracted. This will mask all bits 1340 * from the initial ~0UL that can't be encoded in either the 1341 * swp_entry_t or the architecture definition of a swap pte. 1342 */ 1343 if (type > swp_type(pte_to_swp_entry(swp_entry_to_pte(swp_entry(~0UL,0))))) { 1344 spin_unlock(&swap_lock); 1345 goto out; 1346 } 1347 if (type >= nr_swapfiles) 1348 nr_swapfiles = type+1; 1349 INIT_LIST_HEAD(&p->extent_list); 1350 p->flags = SWP_USED; 1351 p->swap_file = NULL; 1352 p->old_block_size = 0; 1353 p->swap_map = NULL; 1354 p->lowest_bit = 0; 1355 p->highest_bit = 0; 1356 p->cluster_nr = 0; 1357 p->inuse_pages = 0; 1358 p->next = -1; 1359 if (swap_flags & SWAP_FLAG_PREFER) { 1360 p->prio = 1361 (swap_flags & SWAP_FLAG_PRIO_MASK)>>SWAP_FLAG_PRIO_SHIFT; 1362 } else { 1363 p->prio = --least_priority; 1364 } 1365 spin_unlock(&swap_lock); 1366 name = getname(specialfile); 1367 error = PTR_ERR(name); 1368 if (IS_ERR(name)) { 1369 name = NULL; 1370 goto bad_swap_2; 1371 } 1372 swap_file = filp_open(name, O_RDWR|O_LARGEFILE, 0); 1373 error = PTR_ERR(swap_file); 1374 if (IS_ERR(swap_file)) { 1375 swap_file = NULL; 1376 goto bad_swap_2; 1377 } 1378 1379 p->swap_file = swap_file; 1380 mapping = swap_file->f_mapping; 1381 inode = mapping->host; 1382 1383 error = -EBUSY; 1384 for (i = 0; i < nr_swapfiles; i++) { 1385 struct swap_info_struct *q = &swap_info[i]; 1386 1387 if (i == type || !q->swap_file) 1388 continue; 1389 if (mapping == q->swap_file->f_mapping) 1390 goto bad_swap; 1391 } 1392 1393 error = -EINVAL; 1394 if (S_ISBLK(inode->i_mode)) { 1395 bdev = I_BDEV(inode); 1396 error = bd_claim(bdev, sys_swapon); 1397 if (error < 0) { 1398 bdev = NULL; 1399 error = -EINVAL; 1400 goto bad_swap; 1401 } 1402 p->old_block_size = block_size(bdev); 1403 error = set_blocksize(bdev, PAGE_SIZE); 1404 if (error < 0) 1405 goto bad_swap; 1406 p->bdev = bdev; 1407 } else if (S_ISREG(inode->i_mode)) { 1408 p->bdev = inode->i_sb->s_bdev; 1409 mutex_lock(&inode->i_mutex); 1410 did_down = 1; 1411 if (IS_SWAPFILE(inode)) { 1412 error = -EBUSY; 1413 goto bad_swap; 1414 } 1415 } else { 1416 goto bad_swap; 1417 } 1418 1419 swapfilesize = i_size_read(inode) >> PAGE_SHIFT; 1420 1421 /* 1422 * Read the swap header. 1423 */ 1424 if (!mapping->a_ops->readpage) { 1425 error = -EINVAL; 1426 goto bad_swap; 1427 } 1428 page = read_cache_page(mapping, 0, 1429 (filler_t *)mapping->a_ops->readpage, swap_file); 1430 if (IS_ERR(page)) { 1431 error = PTR_ERR(page); 1432 goto bad_swap; 1433 } 1434 wait_on_page_locked(page); 1435 if (!PageUptodate(page)) 1436 goto bad_swap; 1437 kmap(page); 1438 swap_header = page_address(page); 1439 1440 if (!memcmp("SWAP-SPACE",swap_header->magic.magic,10)) 1441 swap_header_version = 1; 1442 else if (!memcmp("SWAPSPACE2",swap_header->magic.magic,10)) 1443 swap_header_version = 2; 1444 else { 1445 printk("Unable to find swap-space signature\n"); 1446 error = -EINVAL; 1447 goto bad_swap; 1448 } 1449 1450 switch (swap_header_version) { 1451 case 1: 1452 printk(KERN_ERR "version 0 swap is no longer supported. " 1453 "Use mkswap -v1 %s\n", name); 1454 error = -EINVAL; 1455 goto bad_swap; 1456 case 2: 1457 /* Check the swap header's sub-version and the size of 1458 the swap file and bad block lists */ 1459 if (swap_header->info.version != 1) { 1460 printk(KERN_WARNING 1461 "Unable to handle swap header version %d\n", 1462 swap_header->info.version); 1463 error = -EINVAL; 1464 goto bad_swap; 1465 } 1466 1467 p->lowest_bit = 1; 1468 p->cluster_next = 1; 1469 1470 /* 1471 * Find out how many pages are allowed for a single swap 1472 * device. There are two limiting factors: 1) the number of 1473 * bits for the swap offset in the swp_entry_t type and 1474 * 2) the number of bits in the a swap pte as defined by 1475 * the different architectures. In order to find the 1476 * largest possible bit mask a swap entry with swap type 0 1477 * and swap offset ~0UL is created, encoded to a swap pte, 1478 * decoded to a swp_entry_t again and finally the swap 1479 * offset is extracted. This will mask all the bits from 1480 * the initial ~0UL mask that can't be encoded in either 1481 * the swp_entry_t or the architecture definition of a 1482 * swap pte. 1483 */ 1484 maxpages = swp_offset(pte_to_swp_entry(swp_entry_to_pte(swp_entry(0,~0UL)))) - 1; 1485 if (maxpages > swap_header->info.last_page) 1486 maxpages = swap_header->info.last_page; 1487 p->highest_bit = maxpages - 1; 1488 1489 error = -EINVAL; 1490 if (!maxpages) 1491 goto bad_swap; 1492 if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode)) 1493 goto bad_swap; 1494 if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES) 1495 goto bad_swap; 1496 1497 /* OK, set up the swap map and apply the bad block list */ 1498 if (!(p->swap_map = vmalloc(maxpages * sizeof(short)))) { 1499 error = -ENOMEM; 1500 goto bad_swap; 1501 } 1502 1503 error = 0; 1504 memset(p->swap_map, 0, maxpages * sizeof(short)); 1505 for (i = 0; i < swap_header->info.nr_badpages; i++) { 1506 int page_nr = swap_header->info.badpages[i]; 1507 if (page_nr <= 0 || page_nr >= swap_header->info.last_page) 1508 error = -EINVAL; 1509 else 1510 p->swap_map[page_nr] = SWAP_MAP_BAD; 1511 } 1512 nr_good_pages = swap_header->info.last_page - 1513 swap_header->info.nr_badpages - 1514 1 /* header page */; 1515 if (error) 1516 goto bad_swap; 1517 } 1518 1519 if (swapfilesize && maxpages > swapfilesize) { 1520 printk(KERN_WARNING 1521 "Swap area shorter than signature indicates\n"); 1522 error = -EINVAL; 1523 goto bad_swap; 1524 } 1525 if (nr_good_pages) { 1526 p->swap_map[0] = SWAP_MAP_BAD; 1527 p->max = maxpages; 1528 p->pages = nr_good_pages; 1529 nr_extents = setup_swap_extents(p, &span); 1530 if (nr_extents < 0) { 1531 error = nr_extents; 1532 goto bad_swap; 1533 } 1534 nr_good_pages = p->pages; 1535 } 1536 if (!nr_good_pages) { 1537 printk(KERN_WARNING "Empty swap-file\n"); 1538 error = -EINVAL; 1539 goto bad_swap; 1540 } 1541 1542 down(&swapon_sem); 1543 spin_lock(&swap_lock); 1544 p->flags = SWP_ACTIVE; 1545 nr_swap_pages += nr_good_pages; 1546 total_swap_pages += nr_good_pages; 1547 1548 printk(KERN_INFO "Adding %uk swap on %s. " 1549 "Priority:%d extents:%d across:%lluk\n", 1550 nr_good_pages<<(PAGE_SHIFT-10), name, p->prio, 1551 nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10)); 1552 1553 /* insert swap space into swap_list: */ 1554 prev = -1; 1555 for (i = swap_list.head; i >= 0; i = swap_info[i].next) { 1556 if (p->prio >= swap_info[i].prio) { 1557 break; 1558 } 1559 prev = i; 1560 } 1561 p->next = i; 1562 if (prev < 0) { 1563 swap_list.head = swap_list.next = p - swap_info; 1564 } else { 1565 swap_info[prev].next = p - swap_info; 1566 } 1567 spin_unlock(&swap_lock); 1568 up(&swapon_sem); 1569 error = 0; 1570 goto out; 1571 bad_swap: 1572 if (bdev) { 1573 set_blocksize(bdev, p->old_block_size); 1574 bd_release(bdev); 1575 } 1576 destroy_swap_extents(p); 1577 bad_swap_2: 1578 spin_lock(&swap_lock); 1579 swap_map = p->swap_map; 1580 p->swap_file = NULL; 1581 p->swap_map = NULL; 1582 p->flags = 0; 1583 if (!(swap_flags & SWAP_FLAG_PREFER)) 1584 ++least_priority; 1585 spin_unlock(&swap_lock); 1586 vfree(swap_map); 1587 if (swap_file) 1588 filp_close(swap_file, NULL); 1589 out: 1590 if (page && !IS_ERR(page)) { 1591 kunmap(page); 1592 page_cache_release(page); 1593 } 1594 if (name) 1595 putname(name); 1596 if (did_down) { 1597 if (!error) 1598 inode->i_flags |= S_SWAPFILE; 1599 mutex_unlock(&inode->i_mutex); 1600 } 1601 return error; 1602 } 1603 1604 void si_swapinfo(struct sysinfo *val) 1605 { 1606 unsigned int i; 1607 unsigned long nr_to_be_unused = 0; 1608 1609 spin_lock(&swap_lock); 1610 for (i = 0; i < nr_swapfiles; i++) { 1611 if (!(swap_info[i].flags & SWP_USED) || 1612 (swap_info[i].flags & SWP_WRITEOK)) 1613 continue; 1614 nr_to_be_unused += swap_info[i].inuse_pages; 1615 } 1616 val->freeswap = nr_swap_pages + nr_to_be_unused; 1617 val->totalswap = total_swap_pages + nr_to_be_unused; 1618 spin_unlock(&swap_lock); 1619 } 1620 1621 /* 1622 * Verify that a swap entry is valid and increment its swap map count. 1623 * 1624 * Note: if swap_map[] reaches SWAP_MAP_MAX the entries are treated as 1625 * "permanent", but will be reclaimed by the next swapoff. 1626 */ 1627 int swap_duplicate(swp_entry_t entry) 1628 { 1629 struct swap_info_struct * p; 1630 unsigned long offset, type; 1631 int result = 0; 1632 1633 type = swp_type(entry); 1634 if (type >= nr_swapfiles) 1635 goto bad_file; 1636 p = type + swap_info; 1637 offset = swp_offset(entry); 1638 1639 spin_lock(&swap_lock); 1640 if (offset < p->max && p->swap_map[offset]) { 1641 if (p->swap_map[offset] < SWAP_MAP_MAX - 1) { 1642 p->swap_map[offset]++; 1643 result = 1; 1644 } else if (p->swap_map[offset] <= SWAP_MAP_MAX) { 1645 if (swap_overflow++ < 5) 1646 printk(KERN_WARNING "swap_dup: swap entry overflow\n"); 1647 p->swap_map[offset] = SWAP_MAP_MAX; 1648 result = 1; 1649 } 1650 } 1651 spin_unlock(&swap_lock); 1652 out: 1653 return result; 1654 1655 bad_file: 1656 printk(KERN_ERR "swap_dup: %s%08lx\n", Bad_file, entry.val); 1657 goto out; 1658 } 1659 1660 struct swap_info_struct * 1661 get_swap_info_struct(unsigned type) 1662 { 1663 return &swap_info[type]; 1664 } 1665 1666 /* 1667 * swap_lock prevents swap_map being freed. Don't grab an extra 1668 * reference on the swaphandle, it doesn't matter if it becomes unused. 1669 */ 1670 int valid_swaphandles(swp_entry_t entry, unsigned long *offset) 1671 { 1672 int ret = 0, i = 1 << page_cluster; 1673 unsigned long toff; 1674 struct swap_info_struct *swapdev = swp_type(entry) + swap_info; 1675 1676 if (!page_cluster) /* no readahead */ 1677 return 0; 1678 toff = (swp_offset(entry) >> page_cluster) << page_cluster; 1679 if (!toff) /* first page is swap header */ 1680 toff++, i--; 1681 *offset = toff; 1682 1683 spin_lock(&swap_lock); 1684 do { 1685 /* Don't read-ahead past the end of the swap area */ 1686 if (toff >= swapdev->max) 1687 break; 1688 /* Don't read in free or bad pages */ 1689 if (!swapdev->swap_map[toff]) 1690 break; 1691 if (swapdev->swap_map[toff] == SWAP_MAP_BAD) 1692 break; 1693 toff++; 1694 ret++; 1695 } while (--i); 1696 spin_unlock(&swap_lock); 1697 return ret; 1698 } 1699