1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/mm/swapfile.c 4 * 5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 6 * Swap reorganised 29.12.95, Stephen Tweedie 7 */ 8 9 #include <linux/mm.h> 10 #include <linux/sched/mm.h> 11 #include <linux/sched/task.h> 12 #include <linux/hugetlb.h> 13 #include <linux/mman.h> 14 #include <linux/slab.h> 15 #include <linux/kernel_stat.h> 16 #include <linux/swap.h> 17 #include <linux/vmalloc.h> 18 #include <linux/pagemap.h> 19 #include <linux/namei.h> 20 #include <linux/shmem_fs.h> 21 #include <linux/blkdev.h> 22 #include <linux/random.h> 23 #include <linux/writeback.h> 24 #include <linux/proc_fs.h> 25 #include <linux/seq_file.h> 26 #include <linux/init.h> 27 #include <linux/ksm.h> 28 #include <linux/rmap.h> 29 #include <linux/security.h> 30 #include <linux/backing-dev.h> 31 #include <linux/mutex.h> 32 #include <linux/capability.h> 33 #include <linux/syscalls.h> 34 #include <linux/memcontrol.h> 35 #include <linux/poll.h> 36 #include <linux/oom.h> 37 #include <linux/frontswap.h> 38 #include <linux/swapfile.h> 39 #include <linux/export.h> 40 #include <linux/swap_slots.h> 41 #include <linux/sort.h> 42 43 #include <asm/tlbflush.h> 44 #include <linux/swapops.h> 45 #include <linux/swap_cgroup.h> 46 47 static bool swap_count_continued(struct swap_info_struct *, pgoff_t, 48 unsigned char); 49 static void free_swap_count_continuations(struct swap_info_struct *); 50 static sector_t map_swap_entry(swp_entry_t, struct block_device**); 51 52 DEFINE_SPINLOCK(swap_lock); 53 static unsigned int nr_swapfiles; 54 atomic_long_t nr_swap_pages; 55 /* 56 * Some modules use swappable objects and may try to swap them out under 57 * memory pressure (via the shrinker). Before doing so, they may wish to 58 * check to see if any swap space is available. 59 */ 60 EXPORT_SYMBOL_GPL(nr_swap_pages); 61 /* protected with swap_lock. reading in vm_swap_full() doesn't need lock */ 62 long total_swap_pages; 63 static int least_priority = -1; 64 65 static const char Bad_file[] = "Bad swap file entry "; 66 static const char Unused_file[] = "Unused swap file entry "; 67 static const char Bad_offset[] = "Bad swap offset entry "; 68 static const char Unused_offset[] = "Unused swap offset entry "; 69 70 /* 71 * all active swap_info_structs 72 * protected with swap_lock, and ordered by priority. 73 */ 74 PLIST_HEAD(swap_active_head); 75 76 /* 77 * all available (active, not full) swap_info_structs 78 * protected with swap_avail_lock, ordered by priority. 79 * This is used by get_swap_page() instead of swap_active_head 80 * because swap_active_head includes all swap_info_structs, 81 * but get_swap_page() doesn't need to look at full ones. 82 * This uses its own lock instead of swap_lock because when a 83 * swap_info_struct changes between not-full/full, it needs to 84 * add/remove itself to/from this list, but the swap_info_struct->lock 85 * is held and the locking order requires swap_lock to be taken 86 * before any swap_info_struct->lock. 87 */ 88 static struct plist_head *swap_avail_heads; 89 static DEFINE_SPINLOCK(swap_avail_lock); 90 91 struct swap_info_struct *swap_info[MAX_SWAPFILES]; 92 93 static DEFINE_MUTEX(swapon_mutex); 94 95 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait); 96 /* Activity counter to indicate that a swapon or swapoff has occurred */ 97 static atomic_t proc_poll_event = ATOMIC_INIT(0); 98 99 atomic_t nr_rotate_swap = ATOMIC_INIT(0); 100 101 static struct swap_info_struct *swap_type_to_swap_info(int type) 102 { 103 if (type >= READ_ONCE(nr_swapfiles)) 104 return NULL; 105 106 smp_rmb(); /* Pairs with smp_wmb in alloc_swap_info. */ 107 return READ_ONCE(swap_info[type]); 108 } 109 110 static inline unsigned char swap_count(unsigned char ent) 111 { 112 return ent & ~SWAP_HAS_CACHE; /* may include COUNT_CONTINUED flag */ 113 } 114 115 /* Reclaim the swap entry anyway if possible */ 116 #define TTRS_ANYWAY 0x1 117 /* 118 * Reclaim the swap entry if there are no more mappings of the 119 * corresponding page 120 */ 121 #define TTRS_UNMAPPED 0x2 122 /* Reclaim the swap entry if swap is getting full*/ 123 #define TTRS_FULL 0x4 124 125 /* returns 1 if swap entry is freed */ 126 static int __try_to_reclaim_swap(struct swap_info_struct *si, 127 unsigned long offset, unsigned long flags) 128 { 129 swp_entry_t entry = swp_entry(si->type, offset); 130 struct page *page; 131 int ret = 0; 132 133 page = find_get_page(swap_address_space(entry), offset); 134 if (!page) 135 return 0; 136 /* 137 * When this function is called from scan_swap_map_slots() and it's 138 * called by vmscan.c at reclaiming pages. So, we hold a lock on a page, 139 * here. We have to use trylock for avoiding deadlock. This is a special 140 * case and you should use try_to_free_swap() with explicit lock_page() 141 * in usual operations. 142 */ 143 if (trylock_page(page)) { 144 if ((flags & TTRS_ANYWAY) || 145 ((flags & TTRS_UNMAPPED) && !page_mapped(page)) || 146 ((flags & TTRS_FULL) && mem_cgroup_swap_full(page))) 147 ret = try_to_free_swap(page); 148 unlock_page(page); 149 } 150 put_page(page); 151 return ret; 152 } 153 154 static inline struct swap_extent *first_se(struct swap_info_struct *sis) 155 { 156 struct rb_node *rb = rb_first(&sis->swap_extent_root); 157 return rb_entry(rb, struct swap_extent, rb_node); 158 } 159 160 static inline struct swap_extent *next_se(struct swap_extent *se) 161 { 162 struct rb_node *rb = rb_next(&se->rb_node); 163 return rb ? rb_entry(rb, struct swap_extent, rb_node) : NULL; 164 } 165 166 /* 167 * swapon tell device that all the old swap contents can be discarded, 168 * to allow the swap device to optimize its wear-levelling. 169 */ 170 static int discard_swap(struct swap_info_struct *si) 171 { 172 struct swap_extent *se; 173 sector_t start_block; 174 sector_t nr_blocks; 175 int err = 0; 176 177 /* Do not discard the swap header page! */ 178 se = first_se(si); 179 start_block = (se->start_block + 1) << (PAGE_SHIFT - 9); 180 nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); 181 if (nr_blocks) { 182 err = blkdev_issue_discard(si->bdev, start_block, 183 nr_blocks, GFP_KERNEL, 0); 184 if (err) 185 return err; 186 cond_resched(); 187 } 188 189 for (se = next_se(se); se; se = next_se(se)) { 190 start_block = se->start_block << (PAGE_SHIFT - 9); 191 nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); 192 193 err = blkdev_issue_discard(si->bdev, start_block, 194 nr_blocks, GFP_KERNEL, 0); 195 if (err) 196 break; 197 198 cond_resched(); 199 } 200 return err; /* That will often be -EOPNOTSUPP */ 201 } 202 203 static struct swap_extent * 204 offset_to_swap_extent(struct swap_info_struct *sis, unsigned long offset) 205 { 206 struct swap_extent *se; 207 struct rb_node *rb; 208 209 rb = sis->swap_extent_root.rb_node; 210 while (rb) { 211 se = rb_entry(rb, struct swap_extent, rb_node); 212 if (offset < se->start_page) 213 rb = rb->rb_left; 214 else if (offset >= se->start_page + se->nr_pages) 215 rb = rb->rb_right; 216 else 217 return se; 218 } 219 /* It *must* be present */ 220 BUG(); 221 } 222 223 /* 224 * swap allocation tell device that a cluster of swap can now be discarded, 225 * to allow the swap device to optimize its wear-levelling. 226 */ 227 static void discard_swap_cluster(struct swap_info_struct *si, 228 pgoff_t start_page, pgoff_t nr_pages) 229 { 230 struct swap_extent *se = offset_to_swap_extent(si, start_page); 231 232 while (nr_pages) { 233 pgoff_t offset = start_page - se->start_page; 234 sector_t start_block = se->start_block + offset; 235 sector_t nr_blocks = se->nr_pages - offset; 236 237 if (nr_blocks > nr_pages) 238 nr_blocks = nr_pages; 239 start_page += nr_blocks; 240 nr_pages -= nr_blocks; 241 242 start_block <<= PAGE_SHIFT - 9; 243 nr_blocks <<= PAGE_SHIFT - 9; 244 if (blkdev_issue_discard(si->bdev, start_block, 245 nr_blocks, GFP_NOIO, 0)) 246 break; 247 248 se = next_se(se); 249 } 250 } 251 252 #ifdef CONFIG_THP_SWAP 253 #define SWAPFILE_CLUSTER HPAGE_PMD_NR 254 255 #define swap_entry_size(size) (size) 256 #else 257 #define SWAPFILE_CLUSTER 256 258 259 /* 260 * Define swap_entry_size() as constant to let compiler to optimize 261 * out some code if !CONFIG_THP_SWAP 262 */ 263 #define swap_entry_size(size) 1 264 #endif 265 #define LATENCY_LIMIT 256 266 267 static inline void cluster_set_flag(struct swap_cluster_info *info, 268 unsigned int flag) 269 { 270 info->flags = flag; 271 } 272 273 static inline unsigned int cluster_count(struct swap_cluster_info *info) 274 { 275 return info->data; 276 } 277 278 static inline void cluster_set_count(struct swap_cluster_info *info, 279 unsigned int c) 280 { 281 info->data = c; 282 } 283 284 static inline void cluster_set_count_flag(struct swap_cluster_info *info, 285 unsigned int c, unsigned int f) 286 { 287 info->flags = f; 288 info->data = c; 289 } 290 291 static inline unsigned int cluster_next(struct swap_cluster_info *info) 292 { 293 return info->data; 294 } 295 296 static inline void cluster_set_next(struct swap_cluster_info *info, 297 unsigned int n) 298 { 299 info->data = n; 300 } 301 302 static inline void cluster_set_next_flag(struct swap_cluster_info *info, 303 unsigned int n, unsigned int f) 304 { 305 info->flags = f; 306 info->data = n; 307 } 308 309 static inline bool cluster_is_free(struct swap_cluster_info *info) 310 { 311 return info->flags & CLUSTER_FLAG_FREE; 312 } 313 314 static inline bool cluster_is_null(struct swap_cluster_info *info) 315 { 316 return info->flags & CLUSTER_FLAG_NEXT_NULL; 317 } 318 319 static inline void cluster_set_null(struct swap_cluster_info *info) 320 { 321 info->flags = CLUSTER_FLAG_NEXT_NULL; 322 info->data = 0; 323 } 324 325 static inline bool cluster_is_huge(struct swap_cluster_info *info) 326 { 327 if (IS_ENABLED(CONFIG_THP_SWAP)) 328 return info->flags & CLUSTER_FLAG_HUGE; 329 return false; 330 } 331 332 static inline void cluster_clear_huge(struct swap_cluster_info *info) 333 { 334 info->flags &= ~CLUSTER_FLAG_HUGE; 335 } 336 337 static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si, 338 unsigned long offset) 339 { 340 struct swap_cluster_info *ci; 341 342 ci = si->cluster_info; 343 if (ci) { 344 ci += offset / SWAPFILE_CLUSTER; 345 spin_lock(&ci->lock); 346 } 347 return ci; 348 } 349 350 static inline void unlock_cluster(struct swap_cluster_info *ci) 351 { 352 if (ci) 353 spin_unlock(&ci->lock); 354 } 355 356 /* 357 * Determine the locking method in use for this device. Return 358 * swap_cluster_info if SSD-style cluster-based locking is in place. 359 */ 360 static inline struct swap_cluster_info *lock_cluster_or_swap_info( 361 struct swap_info_struct *si, unsigned long offset) 362 { 363 struct swap_cluster_info *ci; 364 365 /* Try to use fine-grained SSD-style locking if available: */ 366 ci = lock_cluster(si, offset); 367 /* Otherwise, fall back to traditional, coarse locking: */ 368 if (!ci) 369 spin_lock(&si->lock); 370 371 return ci; 372 } 373 374 static inline void unlock_cluster_or_swap_info(struct swap_info_struct *si, 375 struct swap_cluster_info *ci) 376 { 377 if (ci) 378 unlock_cluster(ci); 379 else 380 spin_unlock(&si->lock); 381 } 382 383 static inline bool cluster_list_empty(struct swap_cluster_list *list) 384 { 385 return cluster_is_null(&list->head); 386 } 387 388 static inline unsigned int cluster_list_first(struct swap_cluster_list *list) 389 { 390 return cluster_next(&list->head); 391 } 392 393 static void cluster_list_init(struct swap_cluster_list *list) 394 { 395 cluster_set_null(&list->head); 396 cluster_set_null(&list->tail); 397 } 398 399 static void cluster_list_add_tail(struct swap_cluster_list *list, 400 struct swap_cluster_info *ci, 401 unsigned int idx) 402 { 403 if (cluster_list_empty(list)) { 404 cluster_set_next_flag(&list->head, idx, 0); 405 cluster_set_next_flag(&list->tail, idx, 0); 406 } else { 407 struct swap_cluster_info *ci_tail; 408 unsigned int tail = cluster_next(&list->tail); 409 410 /* 411 * Nested cluster lock, but both cluster locks are 412 * only acquired when we held swap_info_struct->lock 413 */ 414 ci_tail = ci + tail; 415 spin_lock_nested(&ci_tail->lock, SINGLE_DEPTH_NESTING); 416 cluster_set_next(ci_tail, idx); 417 spin_unlock(&ci_tail->lock); 418 cluster_set_next_flag(&list->tail, idx, 0); 419 } 420 } 421 422 static unsigned int cluster_list_del_first(struct swap_cluster_list *list, 423 struct swap_cluster_info *ci) 424 { 425 unsigned int idx; 426 427 idx = cluster_next(&list->head); 428 if (cluster_next(&list->tail) == idx) { 429 cluster_set_null(&list->head); 430 cluster_set_null(&list->tail); 431 } else 432 cluster_set_next_flag(&list->head, 433 cluster_next(&ci[idx]), 0); 434 435 return idx; 436 } 437 438 /* Add a cluster to discard list and schedule it to do discard */ 439 static void swap_cluster_schedule_discard(struct swap_info_struct *si, 440 unsigned int idx) 441 { 442 /* 443 * If scan_swap_map() can't find a free cluster, it will check 444 * si->swap_map directly. To make sure the discarding cluster isn't 445 * taken by scan_swap_map(), mark the swap entries bad (occupied). It 446 * will be cleared after discard 447 */ 448 memset(si->swap_map + idx * SWAPFILE_CLUSTER, 449 SWAP_MAP_BAD, SWAPFILE_CLUSTER); 450 451 cluster_list_add_tail(&si->discard_clusters, si->cluster_info, idx); 452 453 schedule_work(&si->discard_work); 454 } 455 456 static void __free_cluster(struct swap_info_struct *si, unsigned long idx) 457 { 458 struct swap_cluster_info *ci = si->cluster_info; 459 460 cluster_set_flag(ci + idx, CLUSTER_FLAG_FREE); 461 cluster_list_add_tail(&si->free_clusters, ci, idx); 462 } 463 464 /* 465 * Doing discard actually. After a cluster discard is finished, the cluster 466 * will be added to free cluster list. caller should hold si->lock. 467 */ 468 static void swap_do_scheduled_discard(struct swap_info_struct *si) 469 { 470 struct swap_cluster_info *info, *ci; 471 unsigned int idx; 472 473 info = si->cluster_info; 474 475 while (!cluster_list_empty(&si->discard_clusters)) { 476 idx = cluster_list_del_first(&si->discard_clusters, info); 477 spin_unlock(&si->lock); 478 479 discard_swap_cluster(si, idx * SWAPFILE_CLUSTER, 480 SWAPFILE_CLUSTER); 481 482 spin_lock(&si->lock); 483 ci = lock_cluster(si, idx * SWAPFILE_CLUSTER); 484 __free_cluster(si, idx); 485 memset(si->swap_map + idx * SWAPFILE_CLUSTER, 486 0, SWAPFILE_CLUSTER); 487 unlock_cluster(ci); 488 } 489 } 490 491 static void swap_discard_work(struct work_struct *work) 492 { 493 struct swap_info_struct *si; 494 495 si = container_of(work, struct swap_info_struct, discard_work); 496 497 spin_lock(&si->lock); 498 swap_do_scheduled_discard(si); 499 spin_unlock(&si->lock); 500 } 501 502 static void alloc_cluster(struct swap_info_struct *si, unsigned long idx) 503 { 504 struct swap_cluster_info *ci = si->cluster_info; 505 506 VM_BUG_ON(cluster_list_first(&si->free_clusters) != idx); 507 cluster_list_del_first(&si->free_clusters, ci); 508 cluster_set_count_flag(ci + idx, 0, 0); 509 } 510 511 static void free_cluster(struct swap_info_struct *si, unsigned long idx) 512 { 513 struct swap_cluster_info *ci = si->cluster_info + idx; 514 515 VM_BUG_ON(cluster_count(ci) != 0); 516 /* 517 * If the swap is discardable, prepare discard the cluster 518 * instead of free it immediately. The cluster will be freed 519 * after discard. 520 */ 521 if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) == 522 (SWP_WRITEOK | SWP_PAGE_DISCARD)) { 523 swap_cluster_schedule_discard(si, idx); 524 return; 525 } 526 527 __free_cluster(si, idx); 528 } 529 530 /* 531 * The cluster corresponding to page_nr will be used. The cluster will be 532 * removed from free cluster list and its usage counter will be increased. 533 */ 534 static void inc_cluster_info_page(struct swap_info_struct *p, 535 struct swap_cluster_info *cluster_info, unsigned long page_nr) 536 { 537 unsigned long idx = page_nr / SWAPFILE_CLUSTER; 538 539 if (!cluster_info) 540 return; 541 if (cluster_is_free(&cluster_info[idx])) 542 alloc_cluster(p, idx); 543 544 VM_BUG_ON(cluster_count(&cluster_info[idx]) >= SWAPFILE_CLUSTER); 545 cluster_set_count(&cluster_info[idx], 546 cluster_count(&cluster_info[idx]) + 1); 547 } 548 549 /* 550 * The cluster corresponding to page_nr decreases one usage. If the usage 551 * counter becomes 0, which means no page in the cluster is in using, we can 552 * optionally discard the cluster and add it to free cluster list. 553 */ 554 static void dec_cluster_info_page(struct swap_info_struct *p, 555 struct swap_cluster_info *cluster_info, unsigned long page_nr) 556 { 557 unsigned long idx = page_nr / SWAPFILE_CLUSTER; 558 559 if (!cluster_info) 560 return; 561 562 VM_BUG_ON(cluster_count(&cluster_info[idx]) == 0); 563 cluster_set_count(&cluster_info[idx], 564 cluster_count(&cluster_info[idx]) - 1); 565 566 if (cluster_count(&cluster_info[idx]) == 0) 567 free_cluster(p, idx); 568 } 569 570 /* 571 * It's possible scan_swap_map() uses a free cluster in the middle of free 572 * cluster list. Avoiding such abuse to avoid list corruption. 573 */ 574 static bool 575 scan_swap_map_ssd_cluster_conflict(struct swap_info_struct *si, 576 unsigned long offset) 577 { 578 struct percpu_cluster *percpu_cluster; 579 bool conflict; 580 581 offset /= SWAPFILE_CLUSTER; 582 conflict = !cluster_list_empty(&si->free_clusters) && 583 offset != cluster_list_first(&si->free_clusters) && 584 cluster_is_free(&si->cluster_info[offset]); 585 586 if (!conflict) 587 return false; 588 589 percpu_cluster = this_cpu_ptr(si->percpu_cluster); 590 cluster_set_null(&percpu_cluster->index); 591 return true; 592 } 593 594 /* 595 * Try to get a swap entry from current cpu's swap entry pool (a cluster). This 596 * might involve allocating a new cluster for current CPU too. 597 */ 598 static bool scan_swap_map_try_ssd_cluster(struct swap_info_struct *si, 599 unsigned long *offset, unsigned long *scan_base) 600 { 601 struct percpu_cluster *cluster; 602 struct swap_cluster_info *ci; 603 unsigned long tmp, max; 604 605 new_cluster: 606 cluster = this_cpu_ptr(si->percpu_cluster); 607 if (cluster_is_null(&cluster->index)) { 608 if (!cluster_list_empty(&si->free_clusters)) { 609 cluster->index = si->free_clusters.head; 610 cluster->next = cluster_next(&cluster->index) * 611 SWAPFILE_CLUSTER; 612 } else if (!cluster_list_empty(&si->discard_clusters)) { 613 /* 614 * we don't have free cluster but have some clusters in 615 * discarding, do discard now and reclaim them, then 616 * reread cluster_next_cpu since we dropped si->lock 617 */ 618 swap_do_scheduled_discard(si); 619 *scan_base = this_cpu_read(*si->cluster_next_cpu); 620 *offset = *scan_base; 621 goto new_cluster; 622 } else 623 return false; 624 } 625 626 /* 627 * Other CPUs can use our cluster if they can't find a free cluster, 628 * check if there is still free entry in the cluster 629 */ 630 tmp = cluster->next; 631 max = min_t(unsigned long, si->max, 632 (cluster_next(&cluster->index) + 1) * SWAPFILE_CLUSTER); 633 if (tmp < max) { 634 ci = lock_cluster(si, tmp); 635 while (tmp < max) { 636 if (!si->swap_map[tmp]) 637 break; 638 tmp++; 639 } 640 unlock_cluster(ci); 641 } 642 if (tmp >= max) { 643 cluster_set_null(&cluster->index); 644 goto new_cluster; 645 } 646 cluster->next = tmp + 1; 647 *offset = tmp; 648 *scan_base = tmp; 649 return true; 650 } 651 652 static void __del_from_avail_list(struct swap_info_struct *p) 653 { 654 int nid; 655 656 for_each_node(nid) 657 plist_del(&p->avail_lists[nid], &swap_avail_heads[nid]); 658 } 659 660 static void del_from_avail_list(struct swap_info_struct *p) 661 { 662 spin_lock(&swap_avail_lock); 663 __del_from_avail_list(p); 664 spin_unlock(&swap_avail_lock); 665 } 666 667 static void swap_range_alloc(struct swap_info_struct *si, unsigned long offset, 668 unsigned int nr_entries) 669 { 670 unsigned int end = offset + nr_entries - 1; 671 672 if (offset == si->lowest_bit) 673 si->lowest_bit += nr_entries; 674 if (end == si->highest_bit) 675 WRITE_ONCE(si->highest_bit, si->highest_bit - nr_entries); 676 si->inuse_pages += nr_entries; 677 if (si->inuse_pages == si->pages) { 678 si->lowest_bit = si->max; 679 si->highest_bit = 0; 680 del_from_avail_list(si); 681 } 682 } 683 684 static void add_to_avail_list(struct swap_info_struct *p) 685 { 686 int nid; 687 688 spin_lock(&swap_avail_lock); 689 for_each_node(nid) { 690 WARN_ON(!plist_node_empty(&p->avail_lists[nid])); 691 plist_add(&p->avail_lists[nid], &swap_avail_heads[nid]); 692 } 693 spin_unlock(&swap_avail_lock); 694 } 695 696 static void swap_range_free(struct swap_info_struct *si, unsigned long offset, 697 unsigned int nr_entries) 698 { 699 unsigned long begin = offset; 700 unsigned long end = offset + nr_entries - 1; 701 void (*swap_slot_free_notify)(struct block_device *, unsigned long); 702 703 if (offset < si->lowest_bit) 704 si->lowest_bit = offset; 705 if (end > si->highest_bit) { 706 bool was_full = !si->highest_bit; 707 708 WRITE_ONCE(si->highest_bit, end); 709 if (was_full && (si->flags & SWP_WRITEOK)) 710 add_to_avail_list(si); 711 } 712 atomic_long_add(nr_entries, &nr_swap_pages); 713 si->inuse_pages -= nr_entries; 714 if (si->flags & SWP_BLKDEV) 715 swap_slot_free_notify = 716 si->bdev->bd_disk->fops->swap_slot_free_notify; 717 else 718 swap_slot_free_notify = NULL; 719 while (offset <= end) { 720 arch_swap_invalidate_page(si->type, offset); 721 frontswap_invalidate_page(si->type, offset); 722 if (swap_slot_free_notify) 723 swap_slot_free_notify(si->bdev, offset); 724 offset++; 725 } 726 clear_shadow_from_swap_cache(si->type, begin, end); 727 } 728 729 static void set_cluster_next(struct swap_info_struct *si, unsigned long next) 730 { 731 unsigned long prev; 732 733 if (!(si->flags & SWP_SOLIDSTATE)) { 734 si->cluster_next = next; 735 return; 736 } 737 738 prev = this_cpu_read(*si->cluster_next_cpu); 739 /* 740 * Cross the swap address space size aligned trunk, choose 741 * another trunk randomly to avoid lock contention on swap 742 * address space if possible. 743 */ 744 if ((prev >> SWAP_ADDRESS_SPACE_SHIFT) != 745 (next >> SWAP_ADDRESS_SPACE_SHIFT)) { 746 /* No free swap slots available */ 747 if (si->highest_bit <= si->lowest_bit) 748 return; 749 next = si->lowest_bit + 750 prandom_u32_max(si->highest_bit - si->lowest_bit + 1); 751 next = ALIGN_DOWN(next, SWAP_ADDRESS_SPACE_PAGES); 752 next = max_t(unsigned int, next, si->lowest_bit); 753 } 754 this_cpu_write(*si->cluster_next_cpu, next); 755 } 756 757 static int scan_swap_map_slots(struct swap_info_struct *si, 758 unsigned char usage, int nr, 759 swp_entry_t slots[]) 760 { 761 struct swap_cluster_info *ci; 762 unsigned long offset; 763 unsigned long scan_base; 764 unsigned long last_in_cluster = 0; 765 int latency_ration = LATENCY_LIMIT; 766 int n_ret = 0; 767 bool scanned_many = false; 768 769 /* 770 * We try to cluster swap pages by allocating them sequentially 771 * in swap. Once we've allocated SWAPFILE_CLUSTER pages this 772 * way, however, we resort to first-free allocation, starting 773 * a new cluster. This prevents us from scattering swap pages 774 * all over the entire swap partition, so that we reduce 775 * overall disk seek times between swap pages. -- sct 776 * But we do now try to find an empty cluster. -Andrea 777 * And we let swap pages go all over an SSD partition. Hugh 778 */ 779 780 si->flags += SWP_SCANNING; 781 /* 782 * Use percpu scan base for SSD to reduce lock contention on 783 * cluster and swap cache. For HDD, sequential access is more 784 * important. 785 */ 786 if (si->flags & SWP_SOLIDSTATE) 787 scan_base = this_cpu_read(*si->cluster_next_cpu); 788 else 789 scan_base = si->cluster_next; 790 offset = scan_base; 791 792 /* SSD algorithm */ 793 if (si->cluster_info) { 794 if (!scan_swap_map_try_ssd_cluster(si, &offset, &scan_base)) 795 goto scan; 796 } else if (unlikely(!si->cluster_nr--)) { 797 if (si->pages - si->inuse_pages < SWAPFILE_CLUSTER) { 798 si->cluster_nr = SWAPFILE_CLUSTER - 1; 799 goto checks; 800 } 801 802 spin_unlock(&si->lock); 803 804 /* 805 * If seek is expensive, start searching for new cluster from 806 * start of partition, to minimize the span of allocated swap. 807 * If seek is cheap, that is the SWP_SOLIDSTATE si->cluster_info 808 * case, just handled by scan_swap_map_try_ssd_cluster() above. 809 */ 810 scan_base = offset = si->lowest_bit; 811 last_in_cluster = offset + SWAPFILE_CLUSTER - 1; 812 813 /* Locate the first empty (unaligned) cluster */ 814 for (; last_in_cluster <= si->highest_bit; offset++) { 815 if (si->swap_map[offset]) 816 last_in_cluster = offset + SWAPFILE_CLUSTER; 817 else if (offset == last_in_cluster) { 818 spin_lock(&si->lock); 819 offset -= SWAPFILE_CLUSTER - 1; 820 si->cluster_next = offset; 821 si->cluster_nr = SWAPFILE_CLUSTER - 1; 822 goto checks; 823 } 824 if (unlikely(--latency_ration < 0)) { 825 cond_resched(); 826 latency_ration = LATENCY_LIMIT; 827 } 828 } 829 830 offset = scan_base; 831 spin_lock(&si->lock); 832 si->cluster_nr = SWAPFILE_CLUSTER - 1; 833 } 834 835 checks: 836 if (si->cluster_info) { 837 while (scan_swap_map_ssd_cluster_conflict(si, offset)) { 838 /* take a break if we already got some slots */ 839 if (n_ret) 840 goto done; 841 if (!scan_swap_map_try_ssd_cluster(si, &offset, 842 &scan_base)) 843 goto scan; 844 } 845 } 846 if (!(si->flags & SWP_WRITEOK)) 847 goto no_page; 848 if (!si->highest_bit) 849 goto no_page; 850 if (offset > si->highest_bit) 851 scan_base = offset = si->lowest_bit; 852 853 ci = lock_cluster(si, offset); 854 /* reuse swap entry of cache-only swap if not busy. */ 855 if (vm_swap_full() && si->swap_map[offset] == SWAP_HAS_CACHE) { 856 int swap_was_freed; 857 unlock_cluster(ci); 858 spin_unlock(&si->lock); 859 swap_was_freed = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY); 860 spin_lock(&si->lock); 861 /* entry was freed successfully, try to use this again */ 862 if (swap_was_freed) 863 goto checks; 864 goto scan; /* check next one */ 865 } 866 867 if (si->swap_map[offset]) { 868 unlock_cluster(ci); 869 if (!n_ret) 870 goto scan; 871 else 872 goto done; 873 } 874 WRITE_ONCE(si->swap_map[offset], usage); 875 inc_cluster_info_page(si, si->cluster_info, offset); 876 unlock_cluster(ci); 877 878 swap_range_alloc(si, offset, 1); 879 slots[n_ret++] = swp_entry(si->type, offset); 880 881 /* got enough slots or reach max slots? */ 882 if ((n_ret == nr) || (offset >= si->highest_bit)) 883 goto done; 884 885 /* search for next available slot */ 886 887 /* time to take a break? */ 888 if (unlikely(--latency_ration < 0)) { 889 if (n_ret) 890 goto done; 891 spin_unlock(&si->lock); 892 cond_resched(); 893 spin_lock(&si->lock); 894 latency_ration = LATENCY_LIMIT; 895 } 896 897 /* try to get more slots in cluster */ 898 if (si->cluster_info) { 899 if (scan_swap_map_try_ssd_cluster(si, &offset, &scan_base)) 900 goto checks; 901 } else if (si->cluster_nr && !si->swap_map[++offset]) { 902 /* non-ssd case, still more slots in cluster? */ 903 --si->cluster_nr; 904 goto checks; 905 } 906 907 /* 908 * Even if there's no free clusters available (fragmented), 909 * try to scan a little more quickly with lock held unless we 910 * have scanned too many slots already. 911 */ 912 if (!scanned_many) { 913 unsigned long scan_limit; 914 915 if (offset < scan_base) 916 scan_limit = scan_base; 917 else 918 scan_limit = si->highest_bit; 919 for (; offset <= scan_limit && --latency_ration > 0; 920 offset++) { 921 if (!si->swap_map[offset]) 922 goto checks; 923 } 924 } 925 926 done: 927 set_cluster_next(si, offset + 1); 928 si->flags -= SWP_SCANNING; 929 return n_ret; 930 931 scan: 932 spin_unlock(&si->lock); 933 while (++offset <= READ_ONCE(si->highest_bit)) { 934 if (data_race(!si->swap_map[offset])) { 935 spin_lock(&si->lock); 936 goto checks; 937 } 938 if (vm_swap_full() && 939 READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) { 940 spin_lock(&si->lock); 941 goto checks; 942 } 943 if (unlikely(--latency_ration < 0)) { 944 cond_resched(); 945 latency_ration = LATENCY_LIMIT; 946 scanned_many = true; 947 } 948 } 949 offset = si->lowest_bit; 950 while (offset < scan_base) { 951 if (data_race(!si->swap_map[offset])) { 952 spin_lock(&si->lock); 953 goto checks; 954 } 955 if (vm_swap_full() && 956 READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) { 957 spin_lock(&si->lock); 958 goto checks; 959 } 960 if (unlikely(--latency_ration < 0)) { 961 cond_resched(); 962 latency_ration = LATENCY_LIMIT; 963 scanned_many = true; 964 } 965 offset++; 966 } 967 spin_lock(&si->lock); 968 969 no_page: 970 si->flags -= SWP_SCANNING; 971 return n_ret; 972 } 973 974 static int swap_alloc_cluster(struct swap_info_struct *si, swp_entry_t *slot) 975 { 976 unsigned long idx; 977 struct swap_cluster_info *ci; 978 unsigned long offset, i; 979 unsigned char *map; 980 981 /* 982 * Should not even be attempting cluster allocations when huge 983 * page swap is disabled. Warn and fail the allocation. 984 */ 985 if (!IS_ENABLED(CONFIG_THP_SWAP)) { 986 VM_WARN_ON_ONCE(1); 987 return 0; 988 } 989 990 if (cluster_list_empty(&si->free_clusters)) 991 return 0; 992 993 idx = cluster_list_first(&si->free_clusters); 994 offset = idx * SWAPFILE_CLUSTER; 995 ci = lock_cluster(si, offset); 996 alloc_cluster(si, idx); 997 cluster_set_count_flag(ci, SWAPFILE_CLUSTER, CLUSTER_FLAG_HUGE); 998 999 map = si->swap_map + offset; 1000 for (i = 0; i < SWAPFILE_CLUSTER; i++) 1001 map[i] = SWAP_HAS_CACHE; 1002 unlock_cluster(ci); 1003 swap_range_alloc(si, offset, SWAPFILE_CLUSTER); 1004 *slot = swp_entry(si->type, offset); 1005 1006 return 1; 1007 } 1008 1009 static void swap_free_cluster(struct swap_info_struct *si, unsigned long idx) 1010 { 1011 unsigned long offset = idx * SWAPFILE_CLUSTER; 1012 struct swap_cluster_info *ci; 1013 1014 ci = lock_cluster(si, offset); 1015 memset(si->swap_map + offset, 0, SWAPFILE_CLUSTER); 1016 cluster_set_count_flag(ci, 0, 0); 1017 free_cluster(si, idx); 1018 unlock_cluster(ci); 1019 swap_range_free(si, offset, SWAPFILE_CLUSTER); 1020 } 1021 1022 static unsigned long scan_swap_map(struct swap_info_struct *si, 1023 unsigned char usage) 1024 { 1025 swp_entry_t entry; 1026 int n_ret; 1027 1028 n_ret = scan_swap_map_slots(si, usage, 1, &entry); 1029 1030 if (n_ret) 1031 return swp_offset(entry); 1032 else 1033 return 0; 1034 1035 } 1036 1037 int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_size) 1038 { 1039 unsigned long size = swap_entry_size(entry_size); 1040 struct swap_info_struct *si, *next; 1041 long avail_pgs; 1042 int n_ret = 0; 1043 int node; 1044 1045 /* Only single cluster request supported */ 1046 WARN_ON_ONCE(n_goal > 1 && size == SWAPFILE_CLUSTER); 1047 1048 avail_pgs = atomic_long_read(&nr_swap_pages) / size; 1049 if (avail_pgs <= 0) 1050 goto noswap; 1051 1052 n_goal = min3((long)n_goal, (long)SWAP_BATCH, avail_pgs); 1053 1054 atomic_long_sub(n_goal * size, &nr_swap_pages); 1055 1056 spin_lock(&swap_avail_lock); 1057 1058 start_over: 1059 node = numa_node_id(); 1060 plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) { 1061 /* requeue si to after same-priority siblings */ 1062 plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]); 1063 spin_unlock(&swap_avail_lock); 1064 spin_lock(&si->lock); 1065 if (!si->highest_bit || !(si->flags & SWP_WRITEOK)) { 1066 spin_lock(&swap_avail_lock); 1067 if (plist_node_empty(&si->avail_lists[node])) { 1068 spin_unlock(&si->lock); 1069 goto nextsi; 1070 } 1071 WARN(!si->highest_bit, 1072 "swap_info %d in list but !highest_bit\n", 1073 si->type); 1074 WARN(!(si->flags & SWP_WRITEOK), 1075 "swap_info %d in list but !SWP_WRITEOK\n", 1076 si->type); 1077 __del_from_avail_list(si); 1078 spin_unlock(&si->lock); 1079 goto nextsi; 1080 } 1081 if (size == SWAPFILE_CLUSTER) { 1082 if (si->flags & SWP_BLKDEV) 1083 n_ret = swap_alloc_cluster(si, swp_entries); 1084 } else 1085 n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE, 1086 n_goal, swp_entries); 1087 spin_unlock(&si->lock); 1088 if (n_ret || size == SWAPFILE_CLUSTER) 1089 goto check_out; 1090 pr_debug("scan_swap_map of si %d failed to find offset\n", 1091 si->type); 1092 1093 spin_lock(&swap_avail_lock); 1094 nextsi: 1095 /* 1096 * if we got here, it's likely that si was almost full before, 1097 * and since scan_swap_map() can drop the si->lock, multiple 1098 * callers probably all tried to get a page from the same si 1099 * and it filled up before we could get one; or, the si filled 1100 * up between us dropping swap_avail_lock and taking si->lock. 1101 * Since we dropped the swap_avail_lock, the swap_avail_head 1102 * list may have been modified; so if next is still in the 1103 * swap_avail_head list then try it, otherwise start over 1104 * if we have not gotten any slots. 1105 */ 1106 if (plist_node_empty(&next->avail_lists[node])) 1107 goto start_over; 1108 } 1109 1110 spin_unlock(&swap_avail_lock); 1111 1112 check_out: 1113 if (n_ret < n_goal) 1114 atomic_long_add((long)(n_goal - n_ret) * size, 1115 &nr_swap_pages); 1116 noswap: 1117 return n_ret; 1118 } 1119 1120 /* The only caller of this function is now suspend routine */ 1121 swp_entry_t get_swap_page_of_type(int type) 1122 { 1123 struct swap_info_struct *si = swap_type_to_swap_info(type); 1124 pgoff_t offset; 1125 1126 if (!si) 1127 goto fail; 1128 1129 spin_lock(&si->lock); 1130 if (si->flags & SWP_WRITEOK) { 1131 atomic_long_dec(&nr_swap_pages); 1132 /* This is called for allocating swap entry, not cache */ 1133 offset = scan_swap_map(si, 1); 1134 if (offset) { 1135 spin_unlock(&si->lock); 1136 return swp_entry(type, offset); 1137 } 1138 atomic_long_inc(&nr_swap_pages); 1139 } 1140 spin_unlock(&si->lock); 1141 fail: 1142 return (swp_entry_t) {0}; 1143 } 1144 1145 static struct swap_info_struct *__swap_info_get(swp_entry_t entry) 1146 { 1147 struct swap_info_struct *p; 1148 unsigned long offset; 1149 1150 if (!entry.val) 1151 goto out; 1152 p = swp_swap_info(entry); 1153 if (!p) 1154 goto bad_nofile; 1155 if (data_race(!(p->flags & SWP_USED))) 1156 goto bad_device; 1157 offset = swp_offset(entry); 1158 if (offset >= p->max) 1159 goto bad_offset; 1160 return p; 1161 1162 bad_offset: 1163 pr_err("swap_info_get: %s%08lx\n", Bad_offset, entry.val); 1164 goto out; 1165 bad_device: 1166 pr_err("swap_info_get: %s%08lx\n", Unused_file, entry.val); 1167 goto out; 1168 bad_nofile: 1169 pr_err("swap_info_get: %s%08lx\n", Bad_file, entry.val); 1170 out: 1171 return NULL; 1172 } 1173 1174 static struct swap_info_struct *_swap_info_get(swp_entry_t entry) 1175 { 1176 struct swap_info_struct *p; 1177 1178 p = __swap_info_get(entry); 1179 if (!p) 1180 goto out; 1181 if (data_race(!p->swap_map[swp_offset(entry)])) 1182 goto bad_free; 1183 return p; 1184 1185 bad_free: 1186 pr_err("swap_info_get: %s%08lx\n", Unused_offset, entry.val); 1187 goto out; 1188 out: 1189 return NULL; 1190 } 1191 1192 static struct swap_info_struct *swap_info_get(swp_entry_t entry) 1193 { 1194 struct swap_info_struct *p; 1195 1196 p = _swap_info_get(entry); 1197 if (p) 1198 spin_lock(&p->lock); 1199 return p; 1200 } 1201 1202 static struct swap_info_struct *swap_info_get_cont(swp_entry_t entry, 1203 struct swap_info_struct *q) 1204 { 1205 struct swap_info_struct *p; 1206 1207 p = _swap_info_get(entry); 1208 1209 if (p != q) { 1210 if (q != NULL) 1211 spin_unlock(&q->lock); 1212 if (p != NULL) 1213 spin_lock(&p->lock); 1214 } 1215 return p; 1216 } 1217 1218 static unsigned char __swap_entry_free_locked(struct swap_info_struct *p, 1219 unsigned long offset, 1220 unsigned char usage) 1221 { 1222 unsigned char count; 1223 unsigned char has_cache; 1224 1225 count = p->swap_map[offset]; 1226 1227 has_cache = count & SWAP_HAS_CACHE; 1228 count &= ~SWAP_HAS_CACHE; 1229 1230 if (usage == SWAP_HAS_CACHE) { 1231 VM_BUG_ON(!has_cache); 1232 has_cache = 0; 1233 } else if (count == SWAP_MAP_SHMEM) { 1234 /* 1235 * Or we could insist on shmem.c using a special 1236 * swap_shmem_free() and free_shmem_swap_and_cache()... 1237 */ 1238 count = 0; 1239 } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) { 1240 if (count == COUNT_CONTINUED) { 1241 if (swap_count_continued(p, offset, count)) 1242 count = SWAP_MAP_MAX | COUNT_CONTINUED; 1243 else 1244 count = SWAP_MAP_MAX; 1245 } else 1246 count--; 1247 } 1248 1249 usage = count | has_cache; 1250 if (usage) 1251 WRITE_ONCE(p->swap_map[offset], usage); 1252 else 1253 WRITE_ONCE(p->swap_map[offset], SWAP_HAS_CACHE); 1254 1255 return usage; 1256 } 1257 1258 /* 1259 * Check whether swap entry is valid in the swap device. If so, 1260 * return pointer to swap_info_struct, and keep the swap entry valid 1261 * via preventing the swap device from being swapoff, until 1262 * put_swap_device() is called. Otherwise return NULL. 1263 * 1264 * The entirety of the RCU read critical section must come before the 1265 * return from or after the call to synchronize_rcu() in 1266 * enable_swap_info() or swapoff(). So if "si->flags & SWP_VALID" is 1267 * true, the si->map, si->cluster_info, etc. must be valid in the 1268 * critical section. 1269 * 1270 * Notice that swapoff or swapoff+swapon can still happen before the 1271 * rcu_read_lock() in get_swap_device() or after the rcu_read_unlock() 1272 * in put_swap_device() if there isn't any other way to prevent 1273 * swapoff, such as page lock, page table lock, etc. The caller must 1274 * be prepared for that. For example, the following situation is 1275 * possible. 1276 * 1277 * CPU1 CPU2 1278 * do_swap_page() 1279 * ... swapoff+swapon 1280 * __read_swap_cache_async() 1281 * swapcache_prepare() 1282 * __swap_duplicate() 1283 * // check swap_map 1284 * // verify PTE not changed 1285 * 1286 * In __swap_duplicate(), the swap_map need to be checked before 1287 * changing partly because the specified swap entry may be for another 1288 * swap device which has been swapoff. And in do_swap_page(), after 1289 * the page is read from the swap device, the PTE is verified not 1290 * changed with the page table locked to check whether the swap device 1291 * has been swapoff or swapoff+swapon. 1292 */ 1293 struct swap_info_struct *get_swap_device(swp_entry_t entry) 1294 { 1295 struct swap_info_struct *si; 1296 unsigned long offset; 1297 1298 if (!entry.val) 1299 goto out; 1300 si = swp_swap_info(entry); 1301 if (!si) 1302 goto bad_nofile; 1303 1304 rcu_read_lock(); 1305 if (data_race(!(si->flags & SWP_VALID))) 1306 goto unlock_out; 1307 offset = swp_offset(entry); 1308 if (offset >= si->max) 1309 goto unlock_out; 1310 1311 return si; 1312 bad_nofile: 1313 pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val); 1314 out: 1315 return NULL; 1316 unlock_out: 1317 rcu_read_unlock(); 1318 return NULL; 1319 } 1320 1321 static unsigned char __swap_entry_free(struct swap_info_struct *p, 1322 swp_entry_t entry) 1323 { 1324 struct swap_cluster_info *ci; 1325 unsigned long offset = swp_offset(entry); 1326 unsigned char usage; 1327 1328 ci = lock_cluster_or_swap_info(p, offset); 1329 usage = __swap_entry_free_locked(p, offset, 1); 1330 unlock_cluster_or_swap_info(p, ci); 1331 if (!usage) 1332 free_swap_slot(entry); 1333 1334 return usage; 1335 } 1336 1337 static void swap_entry_free(struct swap_info_struct *p, swp_entry_t entry) 1338 { 1339 struct swap_cluster_info *ci; 1340 unsigned long offset = swp_offset(entry); 1341 unsigned char count; 1342 1343 ci = lock_cluster(p, offset); 1344 count = p->swap_map[offset]; 1345 VM_BUG_ON(count != SWAP_HAS_CACHE); 1346 p->swap_map[offset] = 0; 1347 dec_cluster_info_page(p, p->cluster_info, offset); 1348 unlock_cluster(ci); 1349 1350 mem_cgroup_uncharge_swap(entry, 1); 1351 swap_range_free(p, offset, 1); 1352 } 1353 1354 /* 1355 * Caller has made sure that the swap device corresponding to entry 1356 * is still around or has not been recycled. 1357 */ 1358 void swap_free(swp_entry_t entry) 1359 { 1360 struct swap_info_struct *p; 1361 1362 p = _swap_info_get(entry); 1363 if (p) 1364 __swap_entry_free(p, entry); 1365 } 1366 1367 /* 1368 * Called after dropping swapcache to decrease refcnt to swap entries. 1369 */ 1370 void put_swap_page(struct page *page, swp_entry_t entry) 1371 { 1372 unsigned long offset = swp_offset(entry); 1373 unsigned long idx = offset / SWAPFILE_CLUSTER; 1374 struct swap_cluster_info *ci; 1375 struct swap_info_struct *si; 1376 unsigned char *map; 1377 unsigned int i, free_entries = 0; 1378 unsigned char val; 1379 int size = swap_entry_size(thp_nr_pages(page)); 1380 1381 si = _swap_info_get(entry); 1382 if (!si) 1383 return; 1384 1385 ci = lock_cluster_or_swap_info(si, offset); 1386 if (size == SWAPFILE_CLUSTER) { 1387 VM_BUG_ON(!cluster_is_huge(ci)); 1388 map = si->swap_map + offset; 1389 for (i = 0; i < SWAPFILE_CLUSTER; i++) { 1390 val = map[i]; 1391 VM_BUG_ON(!(val & SWAP_HAS_CACHE)); 1392 if (val == SWAP_HAS_CACHE) 1393 free_entries++; 1394 } 1395 cluster_clear_huge(ci); 1396 if (free_entries == SWAPFILE_CLUSTER) { 1397 unlock_cluster_or_swap_info(si, ci); 1398 spin_lock(&si->lock); 1399 mem_cgroup_uncharge_swap(entry, SWAPFILE_CLUSTER); 1400 swap_free_cluster(si, idx); 1401 spin_unlock(&si->lock); 1402 return; 1403 } 1404 } 1405 for (i = 0; i < size; i++, entry.val++) { 1406 if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE)) { 1407 unlock_cluster_or_swap_info(si, ci); 1408 free_swap_slot(entry); 1409 if (i == size - 1) 1410 return; 1411 lock_cluster_or_swap_info(si, offset); 1412 } 1413 } 1414 unlock_cluster_or_swap_info(si, ci); 1415 } 1416 1417 #ifdef CONFIG_THP_SWAP 1418 int split_swap_cluster(swp_entry_t entry) 1419 { 1420 struct swap_info_struct *si; 1421 struct swap_cluster_info *ci; 1422 unsigned long offset = swp_offset(entry); 1423 1424 si = _swap_info_get(entry); 1425 if (!si) 1426 return -EBUSY; 1427 ci = lock_cluster(si, offset); 1428 cluster_clear_huge(ci); 1429 unlock_cluster(ci); 1430 return 0; 1431 } 1432 #endif 1433 1434 static int swp_entry_cmp(const void *ent1, const void *ent2) 1435 { 1436 const swp_entry_t *e1 = ent1, *e2 = ent2; 1437 1438 return (int)swp_type(*e1) - (int)swp_type(*e2); 1439 } 1440 1441 void swapcache_free_entries(swp_entry_t *entries, int n) 1442 { 1443 struct swap_info_struct *p, *prev; 1444 int i; 1445 1446 if (n <= 0) 1447 return; 1448 1449 prev = NULL; 1450 p = NULL; 1451 1452 /* 1453 * Sort swap entries by swap device, so each lock is only taken once. 1454 * nr_swapfiles isn't absolutely correct, but the overhead of sort() is 1455 * so low that it isn't necessary to optimize further. 1456 */ 1457 if (nr_swapfiles > 1) 1458 sort(entries, n, sizeof(entries[0]), swp_entry_cmp, NULL); 1459 for (i = 0; i < n; ++i) { 1460 p = swap_info_get_cont(entries[i], prev); 1461 if (p) 1462 swap_entry_free(p, entries[i]); 1463 prev = p; 1464 } 1465 if (p) 1466 spin_unlock(&p->lock); 1467 } 1468 1469 /* 1470 * How many references to page are currently swapped out? 1471 * This does not give an exact answer when swap count is continued, 1472 * but does include the high COUNT_CONTINUED flag to allow for that. 1473 */ 1474 int page_swapcount(struct page *page) 1475 { 1476 int count = 0; 1477 struct swap_info_struct *p; 1478 struct swap_cluster_info *ci; 1479 swp_entry_t entry; 1480 unsigned long offset; 1481 1482 entry.val = page_private(page); 1483 p = _swap_info_get(entry); 1484 if (p) { 1485 offset = swp_offset(entry); 1486 ci = lock_cluster_or_swap_info(p, offset); 1487 count = swap_count(p->swap_map[offset]); 1488 unlock_cluster_or_swap_info(p, ci); 1489 } 1490 return count; 1491 } 1492 1493 int __swap_count(swp_entry_t entry) 1494 { 1495 struct swap_info_struct *si; 1496 pgoff_t offset = swp_offset(entry); 1497 int count = 0; 1498 1499 si = get_swap_device(entry); 1500 if (si) { 1501 count = swap_count(si->swap_map[offset]); 1502 put_swap_device(si); 1503 } 1504 return count; 1505 } 1506 1507 static int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry) 1508 { 1509 int count = 0; 1510 pgoff_t offset = swp_offset(entry); 1511 struct swap_cluster_info *ci; 1512 1513 ci = lock_cluster_or_swap_info(si, offset); 1514 count = swap_count(si->swap_map[offset]); 1515 unlock_cluster_or_swap_info(si, ci); 1516 return count; 1517 } 1518 1519 /* 1520 * How many references to @entry are currently swapped out? 1521 * This does not give an exact answer when swap count is continued, 1522 * but does include the high COUNT_CONTINUED flag to allow for that. 1523 */ 1524 int __swp_swapcount(swp_entry_t entry) 1525 { 1526 int count = 0; 1527 struct swap_info_struct *si; 1528 1529 si = get_swap_device(entry); 1530 if (si) { 1531 count = swap_swapcount(si, entry); 1532 put_swap_device(si); 1533 } 1534 return count; 1535 } 1536 1537 /* 1538 * How many references to @entry are currently swapped out? 1539 * This considers COUNT_CONTINUED so it returns exact answer. 1540 */ 1541 int swp_swapcount(swp_entry_t entry) 1542 { 1543 int count, tmp_count, n; 1544 struct swap_info_struct *p; 1545 struct swap_cluster_info *ci; 1546 struct page *page; 1547 pgoff_t offset; 1548 unsigned char *map; 1549 1550 p = _swap_info_get(entry); 1551 if (!p) 1552 return 0; 1553 1554 offset = swp_offset(entry); 1555 1556 ci = lock_cluster_or_swap_info(p, offset); 1557 1558 count = swap_count(p->swap_map[offset]); 1559 if (!(count & COUNT_CONTINUED)) 1560 goto out; 1561 1562 count &= ~COUNT_CONTINUED; 1563 n = SWAP_MAP_MAX + 1; 1564 1565 page = vmalloc_to_page(p->swap_map + offset); 1566 offset &= ~PAGE_MASK; 1567 VM_BUG_ON(page_private(page) != SWP_CONTINUED); 1568 1569 do { 1570 page = list_next_entry(page, lru); 1571 map = kmap_atomic(page); 1572 tmp_count = map[offset]; 1573 kunmap_atomic(map); 1574 1575 count += (tmp_count & ~COUNT_CONTINUED) * n; 1576 n *= (SWAP_CONT_MAX + 1); 1577 } while (tmp_count & COUNT_CONTINUED); 1578 out: 1579 unlock_cluster_or_swap_info(p, ci); 1580 return count; 1581 } 1582 1583 static bool swap_page_trans_huge_swapped(struct swap_info_struct *si, 1584 swp_entry_t entry) 1585 { 1586 struct swap_cluster_info *ci; 1587 unsigned char *map = si->swap_map; 1588 unsigned long roffset = swp_offset(entry); 1589 unsigned long offset = round_down(roffset, SWAPFILE_CLUSTER); 1590 int i; 1591 bool ret = false; 1592 1593 ci = lock_cluster_or_swap_info(si, offset); 1594 if (!ci || !cluster_is_huge(ci)) { 1595 if (swap_count(map[roffset])) 1596 ret = true; 1597 goto unlock_out; 1598 } 1599 for (i = 0; i < SWAPFILE_CLUSTER; i++) { 1600 if (swap_count(map[offset + i])) { 1601 ret = true; 1602 break; 1603 } 1604 } 1605 unlock_out: 1606 unlock_cluster_or_swap_info(si, ci); 1607 return ret; 1608 } 1609 1610 static bool page_swapped(struct page *page) 1611 { 1612 swp_entry_t entry; 1613 struct swap_info_struct *si; 1614 1615 if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page))) 1616 return page_swapcount(page) != 0; 1617 1618 page = compound_head(page); 1619 entry.val = page_private(page); 1620 si = _swap_info_get(entry); 1621 if (si) 1622 return swap_page_trans_huge_swapped(si, entry); 1623 return false; 1624 } 1625 1626 static int page_trans_huge_map_swapcount(struct page *page, int *total_mapcount, 1627 int *total_swapcount) 1628 { 1629 int i, map_swapcount, _total_mapcount, _total_swapcount; 1630 unsigned long offset = 0; 1631 struct swap_info_struct *si; 1632 struct swap_cluster_info *ci = NULL; 1633 unsigned char *map = NULL; 1634 int mapcount, swapcount = 0; 1635 1636 /* hugetlbfs shouldn't call it */ 1637 VM_BUG_ON_PAGE(PageHuge(page), page); 1638 1639 if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!PageTransCompound(page))) { 1640 mapcount = page_trans_huge_mapcount(page, total_mapcount); 1641 if (PageSwapCache(page)) 1642 swapcount = page_swapcount(page); 1643 if (total_swapcount) 1644 *total_swapcount = swapcount; 1645 return mapcount + swapcount; 1646 } 1647 1648 page = compound_head(page); 1649 1650 _total_mapcount = _total_swapcount = map_swapcount = 0; 1651 if (PageSwapCache(page)) { 1652 swp_entry_t entry; 1653 1654 entry.val = page_private(page); 1655 si = _swap_info_get(entry); 1656 if (si) { 1657 map = si->swap_map; 1658 offset = swp_offset(entry); 1659 } 1660 } 1661 if (map) 1662 ci = lock_cluster(si, offset); 1663 for (i = 0; i < HPAGE_PMD_NR; i++) { 1664 mapcount = atomic_read(&page[i]._mapcount) + 1; 1665 _total_mapcount += mapcount; 1666 if (map) { 1667 swapcount = swap_count(map[offset + i]); 1668 _total_swapcount += swapcount; 1669 } 1670 map_swapcount = max(map_swapcount, mapcount + swapcount); 1671 } 1672 unlock_cluster(ci); 1673 if (PageDoubleMap(page)) { 1674 map_swapcount -= 1; 1675 _total_mapcount -= HPAGE_PMD_NR; 1676 } 1677 mapcount = compound_mapcount(page); 1678 map_swapcount += mapcount; 1679 _total_mapcount += mapcount; 1680 if (total_mapcount) 1681 *total_mapcount = _total_mapcount; 1682 if (total_swapcount) 1683 *total_swapcount = _total_swapcount; 1684 1685 return map_swapcount; 1686 } 1687 1688 /* 1689 * We can write to an anon page without COW if there are no other references 1690 * to it. And as a side-effect, free up its swap: because the old content 1691 * on disk will never be read, and seeking back there to write new content 1692 * later would only waste time away from clustering. 1693 * 1694 * NOTE: total_map_swapcount should not be relied upon by the caller if 1695 * reuse_swap_page() returns false, but it may be always overwritten 1696 * (see the other implementation for CONFIG_SWAP=n). 1697 */ 1698 bool reuse_swap_page(struct page *page, int *total_map_swapcount) 1699 { 1700 int count, total_mapcount, total_swapcount; 1701 1702 VM_BUG_ON_PAGE(!PageLocked(page), page); 1703 if (unlikely(PageKsm(page))) 1704 return false; 1705 count = page_trans_huge_map_swapcount(page, &total_mapcount, 1706 &total_swapcount); 1707 if (total_map_swapcount) 1708 *total_map_swapcount = total_mapcount + total_swapcount; 1709 if (count == 1 && PageSwapCache(page) && 1710 (likely(!PageTransCompound(page)) || 1711 /* The remaining swap count will be freed soon */ 1712 total_swapcount == page_swapcount(page))) { 1713 if (!PageWriteback(page)) { 1714 page = compound_head(page); 1715 delete_from_swap_cache(page); 1716 SetPageDirty(page); 1717 } else { 1718 swp_entry_t entry; 1719 struct swap_info_struct *p; 1720 1721 entry.val = page_private(page); 1722 p = swap_info_get(entry); 1723 if (p->flags & SWP_STABLE_WRITES) { 1724 spin_unlock(&p->lock); 1725 return false; 1726 } 1727 spin_unlock(&p->lock); 1728 } 1729 } 1730 1731 return count <= 1; 1732 } 1733 1734 /* 1735 * If swap is getting full, or if there are no more mappings of this page, 1736 * then try_to_free_swap is called to free its swap space. 1737 */ 1738 int try_to_free_swap(struct page *page) 1739 { 1740 VM_BUG_ON_PAGE(!PageLocked(page), page); 1741 1742 if (!PageSwapCache(page)) 1743 return 0; 1744 if (PageWriteback(page)) 1745 return 0; 1746 if (page_swapped(page)) 1747 return 0; 1748 1749 /* 1750 * Once hibernation has begun to create its image of memory, 1751 * there's a danger that one of the calls to try_to_free_swap() 1752 * - most probably a call from __try_to_reclaim_swap() while 1753 * hibernation is allocating its own swap pages for the image, 1754 * but conceivably even a call from memory reclaim - will free 1755 * the swap from a page which has already been recorded in the 1756 * image as a clean swapcache page, and then reuse its swap for 1757 * another page of the image. On waking from hibernation, the 1758 * original page might be freed under memory pressure, then 1759 * later read back in from swap, now with the wrong data. 1760 * 1761 * Hibernation suspends storage while it is writing the image 1762 * to disk so check that here. 1763 */ 1764 if (pm_suspended_storage()) 1765 return 0; 1766 1767 page = compound_head(page); 1768 delete_from_swap_cache(page); 1769 SetPageDirty(page); 1770 return 1; 1771 } 1772 1773 /* 1774 * Free the swap entry like above, but also try to 1775 * free the page cache entry if it is the last user. 1776 */ 1777 int free_swap_and_cache(swp_entry_t entry) 1778 { 1779 struct swap_info_struct *p; 1780 unsigned char count; 1781 1782 if (non_swap_entry(entry)) 1783 return 1; 1784 1785 p = _swap_info_get(entry); 1786 if (p) { 1787 count = __swap_entry_free(p, entry); 1788 if (count == SWAP_HAS_CACHE && 1789 !swap_page_trans_huge_swapped(p, entry)) 1790 __try_to_reclaim_swap(p, swp_offset(entry), 1791 TTRS_UNMAPPED | TTRS_FULL); 1792 } 1793 return p != NULL; 1794 } 1795 1796 #ifdef CONFIG_HIBERNATION 1797 /* 1798 * Find the swap type that corresponds to given device (if any). 1799 * 1800 * @offset - number of the PAGE_SIZE-sized block of the device, starting 1801 * from 0, in which the swap header is expected to be located. 1802 * 1803 * This is needed for the suspend to disk (aka swsusp). 1804 */ 1805 int swap_type_of(dev_t device, sector_t offset) 1806 { 1807 int type; 1808 1809 if (!device) 1810 return -1; 1811 1812 spin_lock(&swap_lock); 1813 for (type = 0; type < nr_swapfiles; type++) { 1814 struct swap_info_struct *sis = swap_info[type]; 1815 1816 if (!(sis->flags & SWP_WRITEOK)) 1817 continue; 1818 1819 if (device == sis->bdev->bd_dev) { 1820 struct swap_extent *se = first_se(sis); 1821 1822 if (se->start_block == offset) { 1823 spin_unlock(&swap_lock); 1824 return type; 1825 } 1826 } 1827 } 1828 spin_unlock(&swap_lock); 1829 return -ENODEV; 1830 } 1831 1832 int find_first_swap(dev_t *device) 1833 { 1834 int type; 1835 1836 spin_lock(&swap_lock); 1837 for (type = 0; type < nr_swapfiles; type++) { 1838 struct swap_info_struct *sis = swap_info[type]; 1839 1840 if (!(sis->flags & SWP_WRITEOK)) 1841 continue; 1842 *device = sis->bdev->bd_dev; 1843 spin_unlock(&swap_lock); 1844 return type; 1845 } 1846 spin_unlock(&swap_lock); 1847 return -ENODEV; 1848 } 1849 1850 /* 1851 * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev 1852 * corresponding to given index in swap_info (swap type). 1853 */ 1854 sector_t swapdev_block(int type, pgoff_t offset) 1855 { 1856 struct block_device *bdev; 1857 struct swap_info_struct *si = swap_type_to_swap_info(type); 1858 1859 if (!si || !(si->flags & SWP_WRITEOK)) 1860 return 0; 1861 return map_swap_entry(swp_entry(type, offset), &bdev); 1862 } 1863 1864 /* 1865 * Return either the total number of swap pages of given type, or the number 1866 * of free pages of that type (depending on @free) 1867 * 1868 * This is needed for software suspend 1869 */ 1870 unsigned int count_swap_pages(int type, int free) 1871 { 1872 unsigned int n = 0; 1873 1874 spin_lock(&swap_lock); 1875 if ((unsigned int)type < nr_swapfiles) { 1876 struct swap_info_struct *sis = swap_info[type]; 1877 1878 spin_lock(&sis->lock); 1879 if (sis->flags & SWP_WRITEOK) { 1880 n = sis->pages; 1881 if (free) 1882 n -= sis->inuse_pages; 1883 } 1884 spin_unlock(&sis->lock); 1885 } 1886 spin_unlock(&swap_lock); 1887 return n; 1888 } 1889 #endif /* CONFIG_HIBERNATION */ 1890 1891 static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte) 1892 { 1893 return pte_same(pte_swp_clear_soft_dirty(pte), swp_pte); 1894 } 1895 1896 /* 1897 * No need to decide whether this PTE shares the swap entry with others, 1898 * just let do_wp_page work it out if a write is requested later - to 1899 * force COW, vm_page_prot omits write permission from any private vma. 1900 */ 1901 static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, 1902 unsigned long addr, swp_entry_t entry, struct page *page) 1903 { 1904 struct page *swapcache; 1905 spinlock_t *ptl; 1906 pte_t *pte; 1907 int ret = 1; 1908 1909 swapcache = page; 1910 page = ksm_might_need_to_copy(page, vma, addr); 1911 if (unlikely(!page)) 1912 return -ENOMEM; 1913 1914 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 1915 if (unlikely(!pte_same_as_swp(*pte, swp_entry_to_pte(entry)))) { 1916 ret = 0; 1917 goto out; 1918 } 1919 1920 dec_mm_counter(vma->vm_mm, MM_SWAPENTS); 1921 inc_mm_counter(vma->vm_mm, MM_ANONPAGES); 1922 get_page(page); 1923 set_pte_at(vma->vm_mm, addr, pte, 1924 pte_mkold(mk_pte(page, vma->vm_page_prot))); 1925 if (page == swapcache) { 1926 page_add_anon_rmap(page, vma, addr, false); 1927 } else { /* ksm created a completely new copy */ 1928 page_add_new_anon_rmap(page, vma, addr, false); 1929 lru_cache_add_inactive_or_unevictable(page, vma); 1930 } 1931 swap_free(entry); 1932 /* 1933 * Move the page to the active list so it is not 1934 * immediately swapped out again after swapon. 1935 */ 1936 activate_page(page); 1937 out: 1938 pte_unmap_unlock(pte, ptl); 1939 if (page != swapcache) { 1940 unlock_page(page); 1941 put_page(page); 1942 } 1943 return ret; 1944 } 1945 1946 static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, 1947 unsigned long addr, unsigned long end, 1948 unsigned int type, bool frontswap, 1949 unsigned long *fs_pages_to_unuse) 1950 { 1951 struct page *page; 1952 swp_entry_t entry; 1953 pte_t *pte; 1954 struct swap_info_struct *si; 1955 unsigned long offset; 1956 int ret = 0; 1957 volatile unsigned char *swap_map; 1958 1959 si = swap_info[type]; 1960 pte = pte_offset_map(pmd, addr); 1961 do { 1962 struct vm_fault vmf; 1963 1964 if (!is_swap_pte(*pte)) 1965 continue; 1966 1967 entry = pte_to_swp_entry(*pte); 1968 if (swp_type(entry) != type) 1969 continue; 1970 1971 offset = swp_offset(entry); 1972 if (frontswap && !frontswap_test(si, offset)) 1973 continue; 1974 1975 pte_unmap(pte); 1976 swap_map = &si->swap_map[offset]; 1977 page = lookup_swap_cache(entry, vma, addr); 1978 if (!page) { 1979 vmf.vma = vma; 1980 vmf.address = addr; 1981 vmf.pmd = pmd; 1982 page = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, 1983 &vmf); 1984 } 1985 if (!page) { 1986 if (*swap_map == 0 || *swap_map == SWAP_MAP_BAD) 1987 goto try_next; 1988 return -ENOMEM; 1989 } 1990 1991 lock_page(page); 1992 wait_on_page_writeback(page); 1993 ret = unuse_pte(vma, pmd, addr, entry, page); 1994 if (ret < 0) { 1995 unlock_page(page); 1996 put_page(page); 1997 goto out; 1998 } 1999 2000 try_to_free_swap(page); 2001 unlock_page(page); 2002 put_page(page); 2003 2004 if (*fs_pages_to_unuse && !--(*fs_pages_to_unuse)) { 2005 ret = FRONTSWAP_PAGES_UNUSED; 2006 goto out; 2007 } 2008 try_next: 2009 pte = pte_offset_map(pmd, addr); 2010 } while (pte++, addr += PAGE_SIZE, addr != end); 2011 pte_unmap(pte - 1); 2012 2013 ret = 0; 2014 out: 2015 return ret; 2016 } 2017 2018 static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud, 2019 unsigned long addr, unsigned long end, 2020 unsigned int type, bool frontswap, 2021 unsigned long *fs_pages_to_unuse) 2022 { 2023 pmd_t *pmd; 2024 unsigned long next; 2025 int ret; 2026 2027 pmd = pmd_offset(pud, addr); 2028 do { 2029 cond_resched(); 2030 next = pmd_addr_end(addr, end); 2031 if (pmd_none_or_trans_huge_or_clear_bad(pmd)) 2032 continue; 2033 ret = unuse_pte_range(vma, pmd, addr, next, type, 2034 frontswap, fs_pages_to_unuse); 2035 if (ret) 2036 return ret; 2037 } while (pmd++, addr = next, addr != end); 2038 return 0; 2039 } 2040 2041 static inline int unuse_pud_range(struct vm_area_struct *vma, p4d_t *p4d, 2042 unsigned long addr, unsigned long end, 2043 unsigned int type, bool frontswap, 2044 unsigned long *fs_pages_to_unuse) 2045 { 2046 pud_t *pud; 2047 unsigned long next; 2048 int ret; 2049 2050 pud = pud_offset(p4d, addr); 2051 do { 2052 next = pud_addr_end(addr, end); 2053 if (pud_none_or_clear_bad(pud)) 2054 continue; 2055 ret = unuse_pmd_range(vma, pud, addr, next, type, 2056 frontswap, fs_pages_to_unuse); 2057 if (ret) 2058 return ret; 2059 } while (pud++, addr = next, addr != end); 2060 return 0; 2061 } 2062 2063 static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd, 2064 unsigned long addr, unsigned long end, 2065 unsigned int type, bool frontswap, 2066 unsigned long *fs_pages_to_unuse) 2067 { 2068 p4d_t *p4d; 2069 unsigned long next; 2070 int ret; 2071 2072 p4d = p4d_offset(pgd, addr); 2073 do { 2074 next = p4d_addr_end(addr, end); 2075 if (p4d_none_or_clear_bad(p4d)) 2076 continue; 2077 ret = unuse_pud_range(vma, p4d, addr, next, type, 2078 frontswap, fs_pages_to_unuse); 2079 if (ret) 2080 return ret; 2081 } while (p4d++, addr = next, addr != end); 2082 return 0; 2083 } 2084 2085 static int unuse_vma(struct vm_area_struct *vma, unsigned int type, 2086 bool frontswap, unsigned long *fs_pages_to_unuse) 2087 { 2088 pgd_t *pgd; 2089 unsigned long addr, end, next; 2090 int ret; 2091 2092 addr = vma->vm_start; 2093 end = vma->vm_end; 2094 2095 pgd = pgd_offset(vma->vm_mm, addr); 2096 do { 2097 next = pgd_addr_end(addr, end); 2098 if (pgd_none_or_clear_bad(pgd)) 2099 continue; 2100 ret = unuse_p4d_range(vma, pgd, addr, next, type, 2101 frontswap, fs_pages_to_unuse); 2102 if (ret) 2103 return ret; 2104 } while (pgd++, addr = next, addr != end); 2105 return 0; 2106 } 2107 2108 static int unuse_mm(struct mm_struct *mm, unsigned int type, 2109 bool frontswap, unsigned long *fs_pages_to_unuse) 2110 { 2111 struct vm_area_struct *vma; 2112 int ret = 0; 2113 2114 mmap_read_lock(mm); 2115 for (vma = mm->mmap; vma; vma = vma->vm_next) { 2116 if (vma->anon_vma) { 2117 ret = unuse_vma(vma, type, frontswap, 2118 fs_pages_to_unuse); 2119 if (ret) 2120 break; 2121 } 2122 cond_resched(); 2123 } 2124 mmap_read_unlock(mm); 2125 return ret; 2126 } 2127 2128 /* 2129 * Scan swap_map (or frontswap_map if frontswap parameter is true) 2130 * from current position to next entry still in use. Return 0 2131 * if there are no inuse entries after prev till end of the map. 2132 */ 2133 static unsigned int find_next_to_unuse(struct swap_info_struct *si, 2134 unsigned int prev, bool frontswap) 2135 { 2136 unsigned int i; 2137 unsigned char count; 2138 2139 /* 2140 * No need for swap_lock here: we're just looking 2141 * for whether an entry is in use, not modifying it; false 2142 * hits are okay, and sys_swapoff() has already prevented new 2143 * allocations from this area (while holding swap_lock). 2144 */ 2145 for (i = prev + 1; i < si->max; i++) { 2146 count = READ_ONCE(si->swap_map[i]); 2147 if (count && swap_count(count) != SWAP_MAP_BAD) 2148 if (!frontswap || frontswap_test(si, i)) 2149 break; 2150 if ((i % LATENCY_LIMIT) == 0) 2151 cond_resched(); 2152 } 2153 2154 if (i == si->max) 2155 i = 0; 2156 2157 return i; 2158 } 2159 2160 /* 2161 * If the boolean frontswap is true, only unuse pages_to_unuse pages; 2162 * pages_to_unuse==0 means all pages; ignored if frontswap is false 2163 */ 2164 int try_to_unuse(unsigned int type, bool frontswap, 2165 unsigned long pages_to_unuse) 2166 { 2167 struct mm_struct *prev_mm; 2168 struct mm_struct *mm; 2169 struct list_head *p; 2170 int retval = 0; 2171 struct swap_info_struct *si = swap_info[type]; 2172 struct page *page; 2173 swp_entry_t entry; 2174 unsigned int i; 2175 2176 if (!READ_ONCE(si->inuse_pages)) 2177 return 0; 2178 2179 if (!frontswap) 2180 pages_to_unuse = 0; 2181 2182 retry: 2183 retval = shmem_unuse(type, frontswap, &pages_to_unuse); 2184 if (retval) 2185 goto out; 2186 2187 prev_mm = &init_mm; 2188 mmget(prev_mm); 2189 2190 spin_lock(&mmlist_lock); 2191 p = &init_mm.mmlist; 2192 while (READ_ONCE(si->inuse_pages) && 2193 !signal_pending(current) && 2194 (p = p->next) != &init_mm.mmlist) { 2195 2196 mm = list_entry(p, struct mm_struct, mmlist); 2197 if (!mmget_not_zero(mm)) 2198 continue; 2199 spin_unlock(&mmlist_lock); 2200 mmput(prev_mm); 2201 prev_mm = mm; 2202 retval = unuse_mm(mm, type, frontswap, &pages_to_unuse); 2203 2204 if (retval) { 2205 mmput(prev_mm); 2206 goto out; 2207 } 2208 2209 /* 2210 * Make sure that we aren't completely killing 2211 * interactive performance. 2212 */ 2213 cond_resched(); 2214 spin_lock(&mmlist_lock); 2215 } 2216 spin_unlock(&mmlist_lock); 2217 2218 mmput(prev_mm); 2219 2220 i = 0; 2221 while (READ_ONCE(si->inuse_pages) && 2222 !signal_pending(current) && 2223 (i = find_next_to_unuse(si, i, frontswap)) != 0) { 2224 2225 entry = swp_entry(type, i); 2226 page = find_get_page(swap_address_space(entry), i); 2227 if (!page) 2228 continue; 2229 2230 /* 2231 * It is conceivable that a racing task removed this page from 2232 * swap cache just before we acquired the page lock. The page 2233 * might even be back in swap cache on another swap area. But 2234 * that is okay, try_to_free_swap() only removes stale pages. 2235 */ 2236 lock_page(page); 2237 wait_on_page_writeback(page); 2238 try_to_free_swap(page); 2239 unlock_page(page); 2240 put_page(page); 2241 2242 /* 2243 * For frontswap, we just need to unuse pages_to_unuse, if 2244 * it was specified. Need not check frontswap again here as 2245 * we already zeroed out pages_to_unuse if not frontswap. 2246 */ 2247 if (pages_to_unuse && --pages_to_unuse == 0) 2248 goto out; 2249 } 2250 2251 /* 2252 * Lets check again to see if there are still swap entries in the map. 2253 * If yes, we would need to do retry the unuse logic again. 2254 * Under global memory pressure, swap entries can be reinserted back 2255 * into process space after the mmlist loop above passes over them. 2256 * 2257 * Limit the number of retries? No: when mmget_not_zero() above fails, 2258 * that mm is likely to be freeing swap from exit_mmap(), which proceeds 2259 * at its own independent pace; and even shmem_writepage() could have 2260 * been preempted after get_swap_page(), temporarily hiding that swap. 2261 * It's easy and robust (though cpu-intensive) just to keep retrying. 2262 */ 2263 if (READ_ONCE(si->inuse_pages)) { 2264 if (!signal_pending(current)) 2265 goto retry; 2266 retval = -EINTR; 2267 } 2268 out: 2269 return (retval == FRONTSWAP_PAGES_UNUSED) ? 0 : retval; 2270 } 2271 2272 /* 2273 * After a successful try_to_unuse, if no swap is now in use, we know 2274 * we can empty the mmlist. swap_lock must be held on entry and exit. 2275 * Note that mmlist_lock nests inside swap_lock, and an mm must be 2276 * added to the mmlist just after page_duplicate - before would be racy. 2277 */ 2278 static void drain_mmlist(void) 2279 { 2280 struct list_head *p, *next; 2281 unsigned int type; 2282 2283 for (type = 0; type < nr_swapfiles; type++) 2284 if (swap_info[type]->inuse_pages) 2285 return; 2286 spin_lock(&mmlist_lock); 2287 list_for_each_safe(p, next, &init_mm.mmlist) 2288 list_del_init(p); 2289 spin_unlock(&mmlist_lock); 2290 } 2291 2292 /* 2293 * Use this swapdev's extent info to locate the (PAGE_SIZE) block which 2294 * corresponds to page offset for the specified swap entry. 2295 * Note that the type of this function is sector_t, but it returns page offset 2296 * into the bdev, not sector offset. 2297 */ 2298 static sector_t map_swap_entry(swp_entry_t entry, struct block_device **bdev) 2299 { 2300 struct swap_info_struct *sis; 2301 struct swap_extent *se; 2302 pgoff_t offset; 2303 2304 sis = swp_swap_info(entry); 2305 *bdev = sis->bdev; 2306 2307 offset = swp_offset(entry); 2308 se = offset_to_swap_extent(sis, offset); 2309 return se->start_block + (offset - se->start_page); 2310 } 2311 2312 /* 2313 * Returns the page offset into bdev for the specified page's swap entry. 2314 */ 2315 sector_t map_swap_page(struct page *page, struct block_device **bdev) 2316 { 2317 swp_entry_t entry; 2318 entry.val = page_private(page); 2319 return map_swap_entry(entry, bdev); 2320 } 2321 2322 /* 2323 * Free all of a swapdev's extent information 2324 */ 2325 static void destroy_swap_extents(struct swap_info_struct *sis) 2326 { 2327 while (!RB_EMPTY_ROOT(&sis->swap_extent_root)) { 2328 struct rb_node *rb = sis->swap_extent_root.rb_node; 2329 struct swap_extent *se = rb_entry(rb, struct swap_extent, rb_node); 2330 2331 rb_erase(rb, &sis->swap_extent_root); 2332 kfree(se); 2333 } 2334 2335 if (sis->flags & SWP_ACTIVATED) { 2336 struct file *swap_file = sis->swap_file; 2337 struct address_space *mapping = swap_file->f_mapping; 2338 2339 sis->flags &= ~SWP_ACTIVATED; 2340 if (mapping->a_ops->swap_deactivate) 2341 mapping->a_ops->swap_deactivate(swap_file); 2342 } 2343 } 2344 2345 /* 2346 * Add a block range (and the corresponding page range) into this swapdev's 2347 * extent tree. 2348 * 2349 * This function rather assumes that it is called in ascending page order. 2350 */ 2351 int 2352 add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, 2353 unsigned long nr_pages, sector_t start_block) 2354 { 2355 struct rb_node **link = &sis->swap_extent_root.rb_node, *parent = NULL; 2356 struct swap_extent *se; 2357 struct swap_extent *new_se; 2358 2359 /* 2360 * place the new node at the right most since the 2361 * function is called in ascending page order. 2362 */ 2363 while (*link) { 2364 parent = *link; 2365 link = &parent->rb_right; 2366 } 2367 2368 if (parent) { 2369 se = rb_entry(parent, struct swap_extent, rb_node); 2370 BUG_ON(se->start_page + se->nr_pages != start_page); 2371 if (se->start_block + se->nr_pages == start_block) { 2372 /* Merge it */ 2373 se->nr_pages += nr_pages; 2374 return 0; 2375 } 2376 } 2377 2378 /* No merge, insert a new extent. */ 2379 new_se = kmalloc(sizeof(*se), GFP_KERNEL); 2380 if (new_se == NULL) 2381 return -ENOMEM; 2382 new_se->start_page = start_page; 2383 new_se->nr_pages = nr_pages; 2384 new_se->start_block = start_block; 2385 2386 rb_link_node(&new_se->rb_node, parent, link); 2387 rb_insert_color(&new_se->rb_node, &sis->swap_extent_root); 2388 return 1; 2389 } 2390 EXPORT_SYMBOL_GPL(add_swap_extent); 2391 2392 /* 2393 * A `swap extent' is a simple thing which maps a contiguous range of pages 2394 * onto a contiguous range of disk blocks. An ordered list of swap extents 2395 * is built at swapon time and is then used at swap_writepage/swap_readpage 2396 * time for locating where on disk a page belongs. 2397 * 2398 * If the swapfile is an S_ISBLK block device, a single extent is installed. 2399 * This is done so that the main operating code can treat S_ISBLK and S_ISREG 2400 * swap files identically. 2401 * 2402 * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap 2403 * extent list operates in PAGE_SIZE disk blocks. Both S_ISREG and S_ISBLK 2404 * swapfiles are handled *identically* after swapon time. 2405 * 2406 * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks 2407 * and will parse them into an ordered extent list, in PAGE_SIZE chunks. If 2408 * some stray blocks are found which do not fall within the PAGE_SIZE alignment 2409 * requirements, they are simply tossed out - we will never use those blocks 2410 * for swapping. 2411 * 2412 * For all swap devices we set S_SWAPFILE across the life of the swapon. This 2413 * prevents users from writing to the swap device, which will corrupt memory. 2414 * 2415 * The amount of disk space which a single swap extent represents varies. 2416 * Typically it is in the 1-4 megabyte range. So we can have hundreds of 2417 * extents in the list. To avoid much list walking, we cache the previous 2418 * search location in `curr_swap_extent', and start new searches from there. 2419 * This is extremely effective. The average number of iterations in 2420 * map_swap_page() has been measured at about 0.3 per page. - akpm. 2421 */ 2422 static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span) 2423 { 2424 struct file *swap_file = sis->swap_file; 2425 struct address_space *mapping = swap_file->f_mapping; 2426 struct inode *inode = mapping->host; 2427 int ret; 2428 2429 if (S_ISBLK(inode->i_mode)) { 2430 ret = add_swap_extent(sis, 0, sis->max, 0); 2431 *span = sis->pages; 2432 return ret; 2433 } 2434 2435 if (mapping->a_ops->swap_activate) { 2436 ret = mapping->a_ops->swap_activate(sis, swap_file, span); 2437 if (ret >= 0) 2438 sis->flags |= SWP_ACTIVATED; 2439 if (!ret) { 2440 sis->flags |= SWP_FS; 2441 ret = add_swap_extent(sis, 0, sis->max, 0); 2442 *span = sis->pages; 2443 } 2444 return ret; 2445 } 2446 2447 return generic_swapfile_activate(sis, swap_file, span); 2448 } 2449 2450 static int swap_node(struct swap_info_struct *p) 2451 { 2452 struct block_device *bdev; 2453 2454 if (p->bdev) 2455 bdev = p->bdev; 2456 else 2457 bdev = p->swap_file->f_inode->i_sb->s_bdev; 2458 2459 return bdev ? bdev->bd_disk->node_id : NUMA_NO_NODE; 2460 } 2461 2462 static void setup_swap_info(struct swap_info_struct *p, int prio, 2463 unsigned char *swap_map, 2464 struct swap_cluster_info *cluster_info) 2465 { 2466 int i; 2467 2468 if (prio >= 0) 2469 p->prio = prio; 2470 else 2471 p->prio = --least_priority; 2472 /* 2473 * the plist prio is negated because plist ordering is 2474 * low-to-high, while swap ordering is high-to-low 2475 */ 2476 p->list.prio = -p->prio; 2477 for_each_node(i) { 2478 if (p->prio >= 0) 2479 p->avail_lists[i].prio = -p->prio; 2480 else { 2481 if (swap_node(p) == i) 2482 p->avail_lists[i].prio = 1; 2483 else 2484 p->avail_lists[i].prio = -p->prio; 2485 } 2486 } 2487 p->swap_map = swap_map; 2488 p->cluster_info = cluster_info; 2489 } 2490 2491 static void _enable_swap_info(struct swap_info_struct *p) 2492 { 2493 p->flags |= SWP_WRITEOK | SWP_VALID; 2494 atomic_long_add(p->pages, &nr_swap_pages); 2495 total_swap_pages += p->pages; 2496 2497 assert_spin_locked(&swap_lock); 2498 /* 2499 * both lists are plists, and thus priority ordered. 2500 * swap_active_head needs to be priority ordered for swapoff(), 2501 * which on removal of any swap_info_struct with an auto-assigned 2502 * (i.e. negative) priority increments the auto-assigned priority 2503 * of any lower-priority swap_info_structs. 2504 * swap_avail_head needs to be priority ordered for get_swap_page(), 2505 * which allocates swap pages from the highest available priority 2506 * swap_info_struct. 2507 */ 2508 plist_add(&p->list, &swap_active_head); 2509 add_to_avail_list(p); 2510 } 2511 2512 static void enable_swap_info(struct swap_info_struct *p, int prio, 2513 unsigned char *swap_map, 2514 struct swap_cluster_info *cluster_info, 2515 unsigned long *frontswap_map) 2516 { 2517 frontswap_init(p->type, frontswap_map); 2518 spin_lock(&swap_lock); 2519 spin_lock(&p->lock); 2520 setup_swap_info(p, prio, swap_map, cluster_info); 2521 spin_unlock(&p->lock); 2522 spin_unlock(&swap_lock); 2523 /* 2524 * Guarantee swap_map, cluster_info, etc. fields are valid 2525 * between get/put_swap_device() if SWP_VALID bit is set 2526 */ 2527 synchronize_rcu(); 2528 spin_lock(&swap_lock); 2529 spin_lock(&p->lock); 2530 _enable_swap_info(p); 2531 spin_unlock(&p->lock); 2532 spin_unlock(&swap_lock); 2533 } 2534 2535 static void reinsert_swap_info(struct swap_info_struct *p) 2536 { 2537 spin_lock(&swap_lock); 2538 spin_lock(&p->lock); 2539 setup_swap_info(p, p->prio, p->swap_map, p->cluster_info); 2540 _enable_swap_info(p); 2541 spin_unlock(&p->lock); 2542 spin_unlock(&swap_lock); 2543 } 2544 2545 bool has_usable_swap(void) 2546 { 2547 bool ret = true; 2548 2549 spin_lock(&swap_lock); 2550 if (plist_head_empty(&swap_active_head)) 2551 ret = false; 2552 spin_unlock(&swap_lock); 2553 return ret; 2554 } 2555 2556 SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) 2557 { 2558 struct swap_info_struct *p = NULL; 2559 unsigned char *swap_map; 2560 struct swap_cluster_info *cluster_info; 2561 unsigned long *frontswap_map; 2562 struct file *swap_file, *victim; 2563 struct address_space *mapping; 2564 struct inode *inode; 2565 struct filename *pathname; 2566 int err, found = 0; 2567 unsigned int old_block_size; 2568 2569 if (!capable(CAP_SYS_ADMIN)) 2570 return -EPERM; 2571 2572 BUG_ON(!current->mm); 2573 2574 pathname = getname(specialfile); 2575 if (IS_ERR(pathname)) 2576 return PTR_ERR(pathname); 2577 2578 victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0); 2579 err = PTR_ERR(victim); 2580 if (IS_ERR(victim)) 2581 goto out; 2582 2583 mapping = victim->f_mapping; 2584 spin_lock(&swap_lock); 2585 plist_for_each_entry(p, &swap_active_head, list) { 2586 if (p->flags & SWP_WRITEOK) { 2587 if (p->swap_file->f_mapping == mapping) { 2588 found = 1; 2589 break; 2590 } 2591 } 2592 } 2593 if (!found) { 2594 err = -EINVAL; 2595 spin_unlock(&swap_lock); 2596 goto out_dput; 2597 } 2598 if (!security_vm_enough_memory_mm(current->mm, p->pages)) 2599 vm_unacct_memory(p->pages); 2600 else { 2601 err = -ENOMEM; 2602 spin_unlock(&swap_lock); 2603 goto out_dput; 2604 } 2605 del_from_avail_list(p); 2606 spin_lock(&p->lock); 2607 if (p->prio < 0) { 2608 struct swap_info_struct *si = p; 2609 int nid; 2610 2611 plist_for_each_entry_continue(si, &swap_active_head, list) { 2612 si->prio++; 2613 si->list.prio--; 2614 for_each_node(nid) { 2615 if (si->avail_lists[nid].prio != 1) 2616 si->avail_lists[nid].prio--; 2617 } 2618 } 2619 least_priority++; 2620 } 2621 plist_del(&p->list, &swap_active_head); 2622 atomic_long_sub(p->pages, &nr_swap_pages); 2623 total_swap_pages -= p->pages; 2624 p->flags &= ~SWP_WRITEOK; 2625 spin_unlock(&p->lock); 2626 spin_unlock(&swap_lock); 2627 2628 disable_swap_slots_cache_lock(); 2629 2630 set_current_oom_origin(); 2631 err = try_to_unuse(p->type, false, 0); /* force unuse all pages */ 2632 clear_current_oom_origin(); 2633 2634 if (err) { 2635 /* re-insert swap space back into swap_list */ 2636 reinsert_swap_info(p); 2637 reenable_swap_slots_cache_unlock(); 2638 goto out_dput; 2639 } 2640 2641 reenable_swap_slots_cache_unlock(); 2642 2643 spin_lock(&swap_lock); 2644 spin_lock(&p->lock); 2645 p->flags &= ~SWP_VALID; /* mark swap device as invalid */ 2646 spin_unlock(&p->lock); 2647 spin_unlock(&swap_lock); 2648 /* 2649 * wait for swap operations protected by get/put_swap_device() 2650 * to complete 2651 */ 2652 synchronize_rcu(); 2653 2654 flush_work(&p->discard_work); 2655 2656 destroy_swap_extents(p); 2657 if (p->flags & SWP_CONTINUED) 2658 free_swap_count_continuations(p); 2659 2660 if (!p->bdev || !blk_queue_nonrot(bdev_get_queue(p->bdev))) 2661 atomic_dec(&nr_rotate_swap); 2662 2663 mutex_lock(&swapon_mutex); 2664 spin_lock(&swap_lock); 2665 spin_lock(&p->lock); 2666 drain_mmlist(); 2667 2668 /* wait for anyone still in scan_swap_map */ 2669 p->highest_bit = 0; /* cuts scans short */ 2670 while (p->flags >= SWP_SCANNING) { 2671 spin_unlock(&p->lock); 2672 spin_unlock(&swap_lock); 2673 schedule_timeout_uninterruptible(1); 2674 spin_lock(&swap_lock); 2675 spin_lock(&p->lock); 2676 } 2677 2678 swap_file = p->swap_file; 2679 old_block_size = p->old_block_size; 2680 p->swap_file = NULL; 2681 p->max = 0; 2682 swap_map = p->swap_map; 2683 p->swap_map = NULL; 2684 cluster_info = p->cluster_info; 2685 p->cluster_info = NULL; 2686 frontswap_map = frontswap_map_get(p); 2687 spin_unlock(&p->lock); 2688 spin_unlock(&swap_lock); 2689 arch_swap_invalidate_area(p->type); 2690 frontswap_invalidate_area(p->type); 2691 frontswap_map_set(p, NULL); 2692 mutex_unlock(&swapon_mutex); 2693 free_percpu(p->percpu_cluster); 2694 p->percpu_cluster = NULL; 2695 free_percpu(p->cluster_next_cpu); 2696 p->cluster_next_cpu = NULL; 2697 vfree(swap_map); 2698 kvfree(cluster_info); 2699 kvfree(frontswap_map); 2700 /* Destroy swap account information */ 2701 swap_cgroup_swapoff(p->type); 2702 exit_swap_address_space(p->type); 2703 2704 inode = mapping->host; 2705 if (S_ISBLK(inode->i_mode)) { 2706 struct block_device *bdev = I_BDEV(inode); 2707 2708 set_blocksize(bdev, old_block_size); 2709 blkdev_put(bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); 2710 } 2711 2712 inode_lock(inode); 2713 inode->i_flags &= ~S_SWAPFILE; 2714 inode_unlock(inode); 2715 filp_close(swap_file, NULL); 2716 2717 /* 2718 * Clear the SWP_USED flag after all resources are freed so that swapon 2719 * can reuse this swap_info in alloc_swap_info() safely. It is ok to 2720 * not hold p->lock after we cleared its SWP_WRITEOK. 2721 */ 2722 spin_lock(&swap_lock); 2723 p->flags = 0; 2724 spin_unlock(&swap_lock); 2725 2726 err = 0; 2727 atomic_inc(&proc_poll_event); 2728 wake_up_interruptible(&proc_poll_wait); 2729 2730 out_dput: 2731 filp_close(victim, NULL); 2732 out: 2733 putname(pathname); 2734 return err; 2735 } 2736 2737 #ifdef CONFIG_PROC_FS 2738 static __poll_t swaps_poll(struct file *file, poll_table *wait) 2739 { 2740 struct seq_file *seq = file->private_data; 2741 2742 poll_wait(file, &proc_poll_wait, wait); 2743 2744 if (seq->poll_event != atomic_read(&proc_poll_event)) { 2745 seq->poll_event = atomic_read(&proc_poll_event); 2746 return EPOLLIN | EPOLLRDNORM | EPOLLERR | EPOLLPRI; 2747 } 2748 2749 return EPOLLIN | EPOLLRDNORM; 2750 } 2751 2752 /* iterator */ 2753 static void *swap_start(struct seq_file *swap, loff_t *pos) 2754 { 2755 struct swap_info_struct *si; 2756 int type; 2757 loff_t l = *pos; 2758 2759 mutex_lock(&swapon_mutex); 2760 2761 if (!l) 2762 return SEQ_START_TOKEN; 2763 2764 for (type = 0; (si = swap_type_to_swap_info(type)); type++) { 2765 if (!(si->flags & SWP_USED) || !si->swap_map) 2766 continue; 2767 if (!--l) 2768 return si; 2769 } 2770 2771 return NULL; 2772 } 2773 2774 static void *swap_next(struct seq_file *swap, void *v, loff_t *pos) 2775 { 2776 struct swap_info_struct *si = v; 2777 int type; 2778 2779 if (v == SEQ_START_TOKEN) 2780 type = 0; 2781 else 2782 type = si->type + 1; 2783 2784 ++(*pos); 2785 for (; (si = swap_type_to_swap_info(type)); type++) { 2786 if (!(si->flags & SWP_USED) || !si->swap_map) 2787 continue; 2788 return si; 2789 } 2790 2791 return NULL; 2792 } 2793 2794 static void swap_stop(struct seq_file *swap, void *v) 2795 { 2796 mutex_unlock(&swapon_mutex); 2797 } 2798 2799 static int swap_show(struct seq_file *swap, void *v) 2800 { 2801 struct swap_info_struct *si = v; 2802 struct file *file; 2803 int len; 2804 unsigned int bytes, inuse; 2805 2806 if (si == SEQ_START_TOKEN) { 2807 seq_puts(swap,"Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority\n"); 2808 return 0; 2809 } 2810 2811 bytes = si->pages << (PAGE_SHIFT - 10); 2812 inuse = si->inuse_pages << (PAGE_SHIFT - 10); 2813 2814 file = si->swap_file; 2815 len = seq_file_path(swap, file, " \t\n\\"); 2816 seq_printf(swap, "%*s%s\t%u\t%s%u\t%s%d\n", 2817 len < 40 ? 40 - len : 1, " ", 2818 S_ISBLK(file_inode(file)->i_mode) ? 2819 "partition" : "file\t", 2820 bytes, bytes < 10000000 ? "\t" : "", 2821 inuse, inuse < 10000000 ? "\t" : "", 2822 si->prio); 2823 return 0; 2824 } 2825 2826 static const struct seq_operations swaps_op = { 2827 .start = swap_start, 2828 .next = swap_next, 2829 .stop = swap_stop, 2830 .show = swap_show 2831 }; 2832 2833 static int swaps_open(struct inode *inode, struct file *file) 2834 { 2835 struct seq_file *seq; 2836 int ret; 2837 2838 ret = seq_open(file, &swaps_op); 2839 if (ret) 2840 return ret; 2841 2842 seq = file->private_data; 2843 seq->poll_event = atomic_read(&proc_poll_event); 2844 return 0; 2845 } 2846 2847 static const struct proc_ops swaps_proc_ops = { 2848 .proc_flags = PROC_ENTRY_PERMANENT, 2849 .proc_open = swaps_open, 2850 .proc_read = seq_read, 2851 .proc_lseek = seq_lseek, 2852 .proc_release = seq_release, 2853 .proc_poll = swaps_poll, 2854 }; 2855 2856 static int __init procswaps_init(void) 2857 { 2858 proc_create("swaps", 0, NULL, &swaps_proc_ops); 2859 return 0; 2860 } 2861 __initcall(procswaps_init); 2862 #endif /* CONFIG_PROC_FS */ 2863 2864 #ifdef MAX_SWAPFILES_CHECK 2865 static int __init max_swapfiles_check(void) 2866 { 2867 MAX_SWAPFILES_CHECK(); 2868 return 0; 2869 } 2870 late_initcall(max_swapfiles_check); 2871 #endif 2872 2873 static struct swap_info_struct *alloc_swap_info(void) 2874 { 2875 struct swap_info_struct *p; 2876 unsigned int type; 2877 int i; 2878 2879 p = kvzalloc(struct_size(p, avail_lists, nr_node_ids), GFP_KERNEL); 2880 if (!p) 2881 return ERR_PTR(-ENOMEM); 2882 2883 spin_lock(&swap_lock); 2884 for (type = 0; type < nr_swapfiles; type++) { 2885 if (!(swap_info[type]->flags & SWP_USED)) 2886 break; 2887 } 2888 if (type >= MAX_SWAPFILES) { 2889 spin_unlock(&swap_lock); 2890 kvfree(p); 2891 return ERR_PTR(-EPERM); 2892 } 2893 if (type >= nr_swapfiles) { 2894 p->type = type; 2895 WRITE_ONCE(swap_info[type], p); 2896 /* 2897 * Write swap_info[type] before nr_swapfiles, in case a 2898 * racing procfs swap_start() or swap_next() is reading them. 2899 * (We never shrink nr_swapfiles, we never free this entry.) 2900 */ 2901 smp_wmb(); 2902 WRITE_ONCE(nr_swapfiles, nr_swapfiles + 1); 2903 } else { 2904 kvfree(p); 2905 p = swap_info[type]; 2906 /* 2907 * Do not memset this entry: a racing procfs swap_next() 2908 * would be relying on p->type to remain valid. 2909 */ 2910 } 2911 p->swap_extent_root = RB_ROOT; 2912 plist_node_init(&p->list, 0); 2913 for_each_node(i) 2914 plist_node_init(&p->avail_lists[i], 0); 2915 p->flags = SWP_USED; 2916 spin_unlock(&swap_lock); 2917 spin_lock_init(&p->lock); 2918 spin_lock_init(&p->cont_lock); 2919 2920 return p; 2921 } 2922 2923 static int claim_swapfile(struct swap_info_struct *p, struct inode *inode) 2924 { 2925 int error; 2926 2927 if (S_ISBLK(inode->i_mode)) { 2928 p->bdev = blkdev_get_by_dev(inode->i_rdev, 2929 FMODE_READ | FMODE_WRITE | FMODE_EXCL, p); 2930 if (IS_ERR(p->bdev)) { 2931 error = PTR_ERR(p->bdev); 2932 p->bdev = NULL; 2933 return error; 2934 } 2935 p->old_block_size = block_size(p->bdev); 2936 error = set_blocksize(p->bdev, PAGE_SIZE); 2937 if (error < 0) 2938 return error; 2939 /* 2940 * Zoned block devices contain zones that have a sequential 2941 * write only restriction. Hence zoned block devices are not 2942 * suitable for swapping. Disallow them here. 2943 */ 2944 if (blk_queue_is_zoned(p->bdev->bd_disk->queue)) 2945 return -EINVAL; 2946 p->flags |= SWP_BLKDEV; 2947 } else if (S_ISREG(inode->i_mode)) { 2948 p->bdev = inode->i_sb->s_bdev; 2949 } 2950 2951 return 0; 2952 } 2953 2954 2955 /* 2956 * Find out how many pages are allowed for a single swap device. There 2957 * are two limiting factors: 2958 * 1) the number of bits for the swap offset in the swp_entry_t type, and 2959 * 2) the number of bits in the swap pte, as defined by the different 2960 * architectures. 2961 * 2962 * In order to find the largest possible bit mask, a swap entry with 2963 * swap type 0 and swap offset ~0UL is created, encoded to a swap pte, 2964 * decoded to a swp_entry_t again, and finally the swap offset is 2965 * extracted. 2966 * 2967 * This will mask all the bits from the initial ~0UL mask that can't 2968 * be encoded in either the swp_entry_t or the architecture definition 2969 * of a swap pte. 2970 */ 2971 unsigned long generic_max_swapfile_size(void) 2972 { 2973 return swp_offset(pte_to_swp_entry( 2974 swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1; 2975 } 2976 2977 /* Can be overridden by an architecture for additional checks. */ 2978 __weak unsigned long max_swapfile_size(void) 2979 { 2980 return generic_max_swapfile_size(); 2981 } 2982 2983 static unsigned long read_swap_header(struct swap_info_struct *p, 2984 union swap_header *swap_header, 2985 struct inode *inode) 2986 { 2987 int i; 2988 unsigned long maxpages; 2989 unsigned long swapfilepages; 2990 unsigned long last_page; 2991 2992 if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) { 2993 pr_err("Unable to find swap-space signature\n"); 2994 return 0; 2995 } 2996 2997 /* swap partition endianess hack... */ 2998 if (swab32(swap_header->info.version) == 1) { 2999 swab32s(&swap_header->info.version); 3000 swab32s(&swap_header->info.last_page); 3001 swab32s(&swap_header->info.nr_badpages); 3002 if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES) 3003 return 0; 3004 for (i = 0; i < swap_header->info.nr_badpages; i++) 3005 swab32s(&swap_header->info.badpages[i]); 3006 } 3007 /* Check the swap header's sub-version */ 3008 if (swap_header->info.version != 1) { 3009 pr_warn("Unable to handle swap header version %d\n", 3010 swap_header->info.version); 3011 return 0; 3012 } 3013 3014 p->lowest_bit = 1; 3015 p->cluster_next = 1; 3016 p->cluster_nr = 0; 3017 3018 maxpages = max_swapfile_size(); 3019 last_page = swap_header->info.last_page; 3020 if (!last_page) { 3021 pr_warn("Empty swap-file\n"); 3022 return 0; 3023 } 3024 if (last_page > maxpages) { 3025 pr_warn("Truncating oversized swap area, only using %luk out of %luk\n", 3026 maxpages << (PAGE_SHIFT - 10), 3027 last_page << (PAGE_SHIFT - 10)); 3028 } 3029 if (maxpages > last_page) { 3030 maxpages = last_page + 1; 3031 /* p->max is an unsigned int: don't overflow it */ 3032 if ((unsigned int)maxpages == 0) 3033 maxpages = UINT_MAX; 3034 } 3035 p->highest_bit = maxpages - 1; 3036 3037 if (!maxpages) 3038 return 0; 3039 swapfilepages = i_size_read(inode) >> PAGE_SHIFT; 3040 if (swapfilepages && maxpages > swapfilepages) { 3041 pr_warn("Swap area shorter than signature indicates\n"); 3042 return 0; 3043 } 3044 if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode)) 3045 return 0; 3046 if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES) 3047 return 0; 3048 3049 return maxpages; 3050 } 3051 3052 #define SWAP_CLUSTER_INFO_COLS \ 3053 DIV_ROUND_UP(L1_CACHE_BYTES, sizeof(struct swap_cluster_info)) 3054 #define SWAP_CLUSTER_SPACE_COLS \ 3055 DIV_ROUND_UP(SWAP_ADDRESS_SPACE_PAGES, SWAPFILE_CLUSTER) 3056 #define SWAP_CLUSTER_COLS \ 3057 max_t(unsigned int, SWAP_CLUSTER_INFO_COLS, SWAP_CLUSTER_SPACE_COLS) 3058 3059 static int setup_swap_map_and_extents(struct swap_info_struct *p, 3060 union swap_header *swap_header, 3061 unsigned char *swap_map, 3062 struct swap_cluster_info *cluster_info, 3063 unsigned long maxpages, 3064 sector_t *span) 3065 { 3066 unsigned int j, k; 3067 unsigned int nr_good_pages; 3068 int nr_extents; 3069 unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER); 3070 unsigned long col = p->cluster_next / SWAPFILE_CLUSTER % SWAP_CLUSTER_COLS; 3071 unsigned long i, idx; 3072 3073 nr_good_pages = maxpages - 1; /* omit header page */ 3074 3075 cluster_list_init(&p->free_clusters); 3076 cluster_list_init(&p->discard_clusters); 3077 3078 for (i = 0; i < swap_header->info.nr_badpages; i++) { 3079 unsigned int page_nr = swap_header->info.badpages[i]; 3080 if (page_nr == 0 || page_nr > swap_header->info.last_page) 3081 return -EINVAL; 3082 if (page_nr < maxpages) { 3083 swap_map[page_nr] = SWAP_MAP_BAD; 3084 nr_good_pages--; 3085 /* 3086 * Haven't marked the cluster free yet, no list 3087 * operation involved 3088 */ 3089 inc_cluster_info_page(p, cluster_info, page_nr); 3090 } 3091 } 3092 3093 /* Haven't marked the cluster free yet, no list operation involved */ 3094 for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++) 3095 inc_cluster_info_page(p, cluster_info, i); 3096 3097 if (nr_good_pages) { 3098 swap_map[0] = SWAP_MAP_BAD; 3099 /* 3100 * Not mark the cluster free yet, no list 3101 * operation involved 3102 */ 3103 inc_cluster_info_page(p, cluster_info, 0); 3104 p->max = maxpages; 3105 p->pages = nr_good_pages; 3106 nr_extents = setup_swap_extents(p, span); 3107 if (nr_extents < 0) 3108 return nr_extents; 3109 nr_good_pages = p->pages; 3110 } 3111 if (!nr_good_pages) { 3112 pr_warn("Empty swap-file\n"); 3113 return -EINVAL; 3114 } 3115 3116 if (!cluster_info) 3117 return nr_extents; 3118 3119 3120 /* 3121 * Reduce false cache line sharing between cluster_info and 3122 * sharing same address space. 3123 */ 3124 for (k = 0; k < SWAP_CLUSTER_COLS; k++) { 3125 j = (k + col) % SWAP_CLUSTER_COLS; 3126 for (i = 0; i < DIV_ROUND_UP(nr_clusters, SWAP_CLUSTER_COLS); i++) { 3127 idx = i * SWAP_CLUSTER_COLS + j; 3128 if (idx >= nr_clusters) 3129 continue; 3130 if (cluster_count(&cluster_info[idx])) 3131 continue; 3132 cluster_set_flag(&cluster_info[idx], CLUSTER_FLAG_FREE); 3133 cluster_list_add_tail(&p->free_clusters, cluster_info, 3134 idx); 3135 } 3136 } 3137 return nr_extents; 3138 } 3139 3140 /* 3141 * Helper to sys_swapon determining if a given swap 3142 * backing device queue supports DISCARD operations. 3143 */ 3144 static bool swap_discardable(struct swap_info_struct *si) 3145 { 3146 struct request_queue *q = bdev_get_queue(si->bdev); 3147 3148 if (!q || !blk_queue_discard(q)) 3149 return false; 3150 3151 return true; 3152 } 3153 3154 SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) 3155 { 3156 struct swap_info_struct *p; 3157 struct filename *name; 3158 struct file *swap_file = NULL; 3159 struct address_space *mapping; 3160 int prio; 3161 int error; 3162 union swap_header *swap_header; 3163 int nr_extents; 3164 sector_t span; 3165 unsigned long maxpages; 3166 unsigned char *swap_map = NULL; 3167 struct swap_cluster_info *cluster_info = NULL; 3168 unsigned long *frontswap_map = NULL; 3169 struct page *page = NULL; 3170 struct inode *inode = NULL; 3171 bool inced_nr_rotate_swap = false; 3172 3173 if (swap_flags & ~SWAP_FLAGS_VALID) 3174 return -EINVAL; 3175 3176 if (!capable(CAP_SYS_ADMIN)) 3177 return -EPERM; 3178 3179 if (!swap_avail_heads) 3180 return -ENOMEM; 3181 3182 p = alloc_swap_info(); 3183 if (IS_ERR(p)) 3184 return PTR_ERR(p); 3185 3186 INIT_WORK(&p->discard_work, swap_discard_work); 3187 3188 name = getname(specialfile); 3189 if (IS_ERR(name)) { 3190 error = PTR_ERR(name); 3191 name = NULL; 3192 goto bad_swap; 3193 } 3194 swap_file = file_open_name(name, O_RDWR|O_LARGEFILE, 0); 3195 if (IS_ERR(swap_file)) { 3196 error = PTR_ERR(swap_file); 3197 swap_file = NULL; 3198 goto bad_swap; 3199 } 3200 3201 p->swap_file = swap_file; 3202 mapping = swap_file->f_mapping; 3203 inode = mapping->host; 3204 3205 error = claim_swapfile(p, inode); 3206 if (unlikely(error)) 3207 goto bad_swap; 3208 3209 inode_lock(inode); 3210 if (IS_SWAPFILE(inode)) { 3211 error = -EBUSY; 3212 goto bad_swap_unlock_inode; 3213 } 3214 3215 /* 3216 * Read the swap header. 3217 */ 3218 if (!mapping->a_ops->readpage) { 3219 error = -EINVAL; 3220 goto bad_swap_unlock_inode; 3221 } 3222 page = read_mapping_page(mapping, 0, swap_file); 3223 if (IS_ERR(page)) { 3224 error = PTR_ERR(page); 3225 goto bad_swap_unlock_inode; 3226 } 3227 swap_header = kmap(page); 3228 3229 maxpages = read_swap_header(p, swap_header, inode); 3230 if (unlikely(!maxpages)) { 3231 error = -EINVAL; 3232 goto bad_swap_unlock_inode; 3233 } 3234 3235 /* OK, set up the swap map and apply the bad block list */ 3236 swap_map = vzalloc(maxpages); 3237 if (!swap_map) { 3238 error = -ENOMEM; 3239 goto bad_swap_unlock_inode; 3240 } 3241 3242 if (p->bdev && blk_queue_stable_writes(p->bdev->bd_disk->queue)) 3243 p->flags |= SWP_STABLE_WRITES; 3244 3245 if (p->bdev && p->bdev->bd_disk->fops->rw_page) 3246 p->flags |= SWP_SYNCHRONOUS_IO; 3247 3248 if (p->bdev && blk_queue_nonrot(bdev_get_queue(p->bdev))) { 3249 int cpu; 3250 unsigned long ci, nr_cluster; 3251 3252 p->flags |= SWP_SOLIDSTATE; 3253 p->cluster_next_cpu = alloc_percpu(unsigned int); 3254 if (!p->cluster_next_cpu) { 3255 error = -ENOMEM; 3256 goto bad_swap_unlock_inode; 3257 } 3258 /* 3259 * select a random position to start with to help wear leveling 3260 * SSD 3261 */ 3262 for_each_possible_cpu(cpu) { 3263 per_cpu(*p->cluster_next_cpu, cpu) = 3264 1 + prandom_u32_max(p->highest_bit); 3265 } 3266 nr_cluster = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER); 3267 3268 cluster_info = kvcalloc(nr_cluster, sizeof(*cluster_info), 3269 GFP_KERNEL); 3270 if (!cluster_info) { 3271 error = -ENOMEM; 3272 goto bad_swap_unlock_inode; 3273 } 3274 3275 for (ci = 0; ci < nr_cluster; ci++) 3276 spin_lock_init(&((cluster_info + ci)->lock)); 3277 3278 p->percpu_cluster = alloc_percpu(struct percpu_cluster); 3279 if (!p->percpu_cluster) { 3280 error = -ENOMEM; 3281 goto bad_swap_unlock_inode; 3282 } 3283 for_each_possible_cpu(cpu) { 3284 struct percpu_cluster *cluster; 3285 cluster = per_cpu_ptr(p->percpu_cluster, cpu); 3286 cluster_set_null(&cluster->index); 3287 } 3288 } else { 3289 atomic_inc(&nr_rotate_swap); 3290 inced_nr_rotate_swap = true; 3291 } 3292 3293 error = swap_cgroup_swapon(p->type, maxpages); 3294 if (error) 3295 goto bad_swap_unlock_inode; 3296 3297 nr_extents = setup_swap_map_and_extents(p, swap_header, swap_map, 3298 cluster_info, maxpages, &span); 3299 if (unlikely(nr_extents < 0)) { 3300 error = nr_extents; 3301 goto bad_swap_unlock_inode; 3302 } 3303 /* frontswap enabled? set up bit-per-page map for frontswap */ 3304 if (IS_ENABLED(CONFIG_FRONTSWAP)) 3305 frontswap_map = kvcalloc(BITS_TO_LONGS(maxpages), 3306 sizeof(long), 3307 GFP_KERNEL); 3308 3309 if (p->bdev &&(swap_flags & SWAP_FLAG_DISCARD) && swap_discardable(p)) { 3310 /* 3311 * When discard is enabled for swap with no particular 3312 * policy flagged, we set all swap discard flags here in 3313 * order to sustain backward compatibility with older 3314 * swapon(8) releases. 3315 */ 3316 p->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD | 3317 SWP_PAGE_DISCARD); 3318 3319 /* 3320 * By flagging sys_swapon, a sysadmin can tell us to 3321 * either do single-time area discards only, or to just 3322 * perform discards for released swap page-clusters. 3323 * Now it's time to adjust the p->flags accordingly. 3324 */ 3325 if (swap_flags & SWAP_FLAG_DISCARD_ONCE) 3326 p->flags &= ~SWP_PAGE_DISCARD; 3327 else if (swap_flags & SWAP_FLAG_DISCARD_PAGES) 3328 p->flags &= ~SWP_AREA_DISCARD; 3329 3330 /* issue a swapon-time discard if it's still required */ 3331 if (p->flags & SWP_AREA_DISCARD) { 3332 int err = discard_swap(p); 3333 if (unlikely(err)) 3334 pr_err("swapon: discard_swap(%p): %d\n", 3335 p, err); 3336 } 3337 } 3338 3339 error = init_swap_address_space(p->type, maxpages); 3340 if (error) 3341 goto bad_swap_unlock_inode; 3342 3343 /* 3344 * Flush any pending IO and dirty mappings before we start using this 3345 * swap device. 3346 */ 3347 inode->i_flags |= S_SWAPFILE; 3348 error = inode_drain_writes(inode); 3349 if (error) { 3350 inode->i_flags &= ~S_SWAPFILE; 3351 goto bad_swap_unlock_inode; 3352 } 3353 3354 mutex_lock(&swapon_mutex); 3355 prio = -1; 3356 if (swap_flags & SWAP_FLAG_PREFER) 3357 prio = 3358 (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT; 3359 enable_swap_info(p, prio, swap_map, cluster_info, frontswap_map); 3360 3361 pr_info("Adding %uk swap on %s. Priority:%d extents:%d across:%lluk %s%s%s%s%s\n", 3362 p->pages<<(PAGE_SHIFT-10), name->name, p->prio, 3363 nr_extents, (unsigned long long)span<<(PAGE_SHIFT-10), 3364 (p->flags & SWP_SOLIDSTATE) ? "SS" : "", 3365 (p->flags & SWP_DISCARDABLE) ? "D" : "", 3366 (p->flags & SWP_AREA_DISCARD) ? "s" : "", 3367 (p->flags & SWP_PAGE_DISCARD) ? "c" : "", 3368 (frontswap_map) ? "FS" : ""); 3369 3370 mutex_unlock(&swapon_mutex); 3371 atomic_inc(&proc_poll_event); 3372 wake_up_interruptible(&proc_poll_wait); 3373 3374 error = 0; 3375 goto out; 3376 bad_swap_unlock_inode: 3377 inode_unlock(inode); 3378 bad_swap: 3379 free_percpu(p->percpu_cluster); 3380 p->percpu_cluster = NULL; 3381 free_percpu(p->cluster_next_cpu); 3382 p->cluster_next_cpu = NULL; 3383 if (inode && S_ISBLK(inode->i_mode) && p->bdev) { 3384 set_blocksize(p->bdev, p->old_block_size); 3385 blkdev_put(p->bdev, FMODE_READ | FMODE_WRITE | FMODE_EXCL); 3386 } 3387 inode = NULL; 3388 destroy_swap_extents(p); 3389 swap_cgroup_swapoff(p->type); 3390 spin_lock(&swap_lock); 3391 p->swap_file = NULL; 3392 p->flags = 0; 3393 spin_unlock(&swap_lock); 3394 vfree(swap_map); 3395 kvfree(cluster_info); 3396 kvfree(frontswap_map); 3397 if (inced_nr_rotate_swap) 3398 atomic_dec(&nr_rotate_swap); 3399 if (swap_file) 3400 filp_close(swap_file, NULL); 3401 out: 3402 if (page && !IS_ERR(page)) { 3403 kunmap(page); 3404 put_page(page); 3405 } 3406 if (name) 3407 putname(name); 3408 if (inode) 3409 inode_unlock(inode); 3410 if (!error) 3411 enable_swap_slots_cache(); 3412 return error; 3413 } 3414 3415 void si_swapinfo(struct sysinfo *val) 3416 { 3417 unsigned int type; 3418 unsigned long nr_to_be_unused = 0; 3419 3420 spin_lock(&swap_lock); 3421 for (type = 0; type < nr_swapfiles; type++) { 3422 struct swap_info_struct *si = swap_info[type]; 3423 3424 if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK)) 3425 nr_to_be_unused += si->inuse_pages; 3426 } 3427 val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused; 3428 val->totalswap = total_swap_pages + nr_to_be_unused; 3429 spin_unlock(&swap_lock); 3430 } 3431 3432 /* 3433 * Verify that a swap entry is valid and increment its swap map count. 3434 * 3435 * Returns error code in following case. 3436 * - success -> 0 3437 * - swp_entry is invalid -> EINVAL 3438 * - swp_entry is migration entry -> EINVAL 3439 * - swap-cache reference is requested but there is already one. -> EEXIST 3440 * - swap-cache reference is requested but the entry is not used. -> ENOENT 3441 * - swap-mapped reference requested but needs continued swap count. -> ENOMEM 3442 */ 3443 static int __swap_duplicate(swp_entry_t entry, unsigned char usage) 3444 { 3445 struct swap_info_struct *p; 3446 struct swap_cluster_info *ci; 3447 unsigned long offset; 3448 unsigned char count; 3449 unsigned char has_cache; 3450 int err = -EINVAL; 3451 3452 p = get_swap_device(entry); 3453 if (!p) 3454 goto out; 3455 3456 offset = swp_offset(entry); 3457 ci = lock_cluster_or_swap_info(p, offset); 3458 3459 count = p->swap_map[offset]; 3460 3461 /* 3462 * swapin_readahead() doesn't check if a swap entry is valid, so the 3463 * swap entry could be SWAP_MAP_BAD. Check here with lock held. 3464 */ 3465 if (unlikely(swap_count(count) == SWAP_MAP_BAD)) { 3466 err = -ENOENT; 3467 goto unlock_out; 3468 } 3469 3470 has_cache = count & SWAP_HAS_CACHE; 3471 count &= ~SWAP_HAS_CACHE; 3472 err = 0; 3473 3474 if (usage == SWAP_HAS_CACHE) { 3475 3476 /* set SWAP_HAS_CACHE if there is no cache and entry is used */ 3477 if (!has_cache && count) 3478 has_cache = SWAP_HAS_CACHE; 3479 else if (has_cache) /* someone else added cache */ 3480 err = -EEXIST; 3481 else /* no users remaining */ 3482 err = -ENOENT; 3483 3484 } else if (count || has_cache) { 3485 3486 if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX) 3487 count += usage; 3488 else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX) 3489 err = -EINVAL; 3490 else if (swap_count_continued(p, offset, count)) 3491 count = COUNT_CONTINUED; 3492 else 3493 err = -ENOMEM; 3494 } else 3495 err = -ENOENT; /* unused swap entry */ 3496 3497 WRITE_ONCE(p->swap_map[offset], count | has_cache); 3498 3499 unlock_out: 3500 unlock_cluster_or_swap_info(p, ci); 3501 out: 3502 if (p) 3503 put_swap_device(p); 3504 return err; 3505 } 3506 3507 /* 3508 * Help swapoff by noting that swap entry belongs to shmem/tmpfs 3509 * (in which case its reference count is never incremented). 3510 */ 3511 void swap_shmem_alloc(swp_entry_t entry) 3512 { 3513 __swap_duplicate(entry, SWAP_MAP_SHMEM); 3514 } 3515 3516 /* 3517 * Increase reference count of swap entry by 1. 3518 * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required 3519 * but could not be atomically allocated. Returns 0, just as if it succeeded, 3520 * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which 3521 * might occur if a page table entry has got corrupted. 3522 */ 3523 int swap_duplicate(swp_entry_t entry) 3524 { 3525 int err = 0; 3526 3527 while (!err && __swap_duplicate(entry, 1) == -ENOMEM) 3528 err = add_swap_count_continuation(entry, GFP_ATOMIC); 3529 return err; 3530 } 3531 3532 /* 3533 * @entry: swap entry for which we allocate swap cache. 3534 * 3535 * Called when allocating swap cache for existing swap entry, 3536 * This can return error codes. Returns 0 at success. 3537 * -EEXIST means there is a swap cache. 3538 * Note: return code is different from swap_duplicate(). 3539 */ 3540 int swapcache_prepare(swp_entry_t entry) 3541 { 3542 return __swap_duplicate(entry, SWAP_HAS_CACHE); 3543 } 3544 3545 struct swap_info_struct *swp_swap_info(swp_entry_t entry) 3546 { 3547 return swap_type_to_swap_info(swp_type(entry)); 3548 } 3549 3550 struct swap_info_struct *page_swap_info(struct page *page) 3551 { 3552 swp_entry_t entry = { .val = page_private(page) }; 3553 return swp_swap_info(entry); 3554 } 3555 3556 /* 3557 * out-of-line __page_file_ methods to avoid include hell. 3558 */ 3559 struct address_space *__page_file_mapping(struct page *page) 3560 { 3561 return page_swap_info(page)->swap_file->f_mapping; 3562 } 3563 EXPORT_SYMBOL_GPL(__page_file_mapping); 3564 3565 pgoff_t __page_file_index(struct page *page) 3566 { 3567 swp_entry_t swap = { .val = page_private(page) }; 3568 return swp_offset(swap); 3569 } 3570 EXPORT_SYMBOL_GPL(__page_file_index); 3571 3572 /* 3573 * add_swap_count_continuation - called when a swap count is duplicated 3574 * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's 3575 * page of the original vmalloc'ed swap_map, to hold the continuation count 3576 * (for that entry and for its neighbouring PAGE_SIZE swap entries). Called 3577 * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc. 3578 * 3579 * These continuation pages are seldom referenced: the common paths all work 3580 * on the original swap_map, only referring to a continuation page when the 3581 * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX. 3582 * 3583 * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding 3584 * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL) 3585 * can be called after dropping locks. 3586 */ 3587 int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask) 3588 { 3589 struct swap_info_struct *si; 3590 struct swap_cluster_info *ci; 3591 struct page *head; 3592 struct page *page; 3593 struct page *list_page; 3594 pgoff_t offset; 3595 unsigned char count; 3596 int ret = 0; 3597 3598 /* 3599 * When debugging, it's easier to use __GFP_ZERO here; but it's better 3600 * for latency not to zero a page while GFP_ATOMIC and holding locks. 3601 */ 3602 page = alloc_page(gfp_mask | __GFP_HIGHMEM); 3603 3604 si = get_swap_device(entry); 3605 if (!si) { 3606 /* 3607 * An acceptable race has occurred since the failing 3608 * __swap_duplicate(): the swap device may be swapoff 3609 */ 3610 goto outer; 3611 } 3612 spin_lock(&si->lock); 3613 3614 offset = swp_offset(entry); 3615 3616 ci = lock_cluster(si, offset); 3617 3618 count = si->swap_map[offset] & ~SWAP_HAS_CACHE; 3619 3620 if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) { 3621 /* 3622 * The higher the swap count, the more likely it is that tasks 3623 * will race to add swap count continuation: we need to avoid 3624 * over-provisioning. 3625 */ 3626 goto out; 3627 } 3628 3629 if (!page) { 3630 ret = -ENOMEM; 3631 goto out; 3632 } 3633 3634 /* 3635 * We are fortunate that although vmalloc_to_page uses pte_offset_map, 3636 * no architecture is using highmem pages for kernel page tables: so it 3637 * will not corrupt the GFP_ATOMIC caller's atomic page table kmaps. 3638 */ 3639 head = vmalloc_to_page(si->swap_map + offset); 3640 offset &= ~PAGE_MASK; 3641 3642 spin_lock(&si->cont_lock); 3643 /* 3644 * Page allocation does not initialize the page's lru field, 3645 * but it does always reset its private field. 3646 */ 3647 if (!page_private(head)) { 3648 BUG_ON(count & COUNT_CONTINUED); 3649 INIT_LIST_HEAD(&head->lru); 3650 set_page_private(head, SWP_CONTINUED); 3651 si->flags |= SWP_CONTINUED; 3652 } 3653 3654 list_for_each_entry(list_page, &head->lru, lru) { 3655 unsigned char *map; 3656 3657 /* 3658 * If the previous map said no continuation, but we've found 3659 * a continuation page, free our allocation and use this one. 3660 */ 3661 if (!(count & COUNT_CONTINUED)) 3662 goto out_unlock_cont; 3663 3664 map = kmap_atomic(list_page) + offset; 3665 count = *map; 3666 kunmap_atomic(map); 3667 3668 /* 3669 * If this continuation count now has some space in it, 3670 * free our allocation and use this one. 3671 */ 3672 if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX) 3673 goto out_unlock_cont; 3674 } 3675 3676 list_add_tail(&page->lru, &head->lru); 3677 page = NULL; /* now it's attached, don't free it */ 3678 out_unlock_cont: 3679 spin_unlock(&si->cont_lock); 3680 out: 3681 unlock_cluster(ci); 3682 spin_unlock(&si->lock); 3683 put_swap_device(si); 3684 outer: 3685 if (page) 3686 __free_page(page); 3687 return ret; 3688 } 3689 3690 /* 3691 * swap_count_continued - when the original swap_map count is incremented 3692 * from SWAP_MAP_MAX, check if there is already a continuation page to carry 3693 * into, carry if so, or else fail until a new continuation page is allocated; 3694 * when the original swap_map count is decremented from 0 with continuation, 3695 * borrow from the continuation and report whether it still holds more. 3696 * Called while __swap_duplicate() or swap_entry_free() holds swap or cluster 3697 * lock. 3698 */ 3699 static bool swap_count_continued(struct swap_info_struct *si, 3700 pgoff_t offset, unsigned char count) 3701 { 3702 struct page *head; 3703 struct page *page; 3704 unsigned char *map; 3705 bool ret; 3706 3707 head = vmalloc_to_page(si->swap_map + offset); 3708 if (page_private(head) != SWP_CONTINUED) { 3709 BUG_ON(count & COUNT_CONTINUED); 3710 return false; /* need to add count continuation */ 3711 } 3712 3713 spin_lock(&si->cont_lock); 3714 offset &= ~PAGE_MASK; 3715 page = list_next_entry(head, lru); 3716 map = kmap_atomic(page) + offset; 3717 3718 if (count == SWAP_MAP_MAX) /* initial increment from swap_map */ 3719 goto init_map; /* jump over SWAP_CONT_MAX checks */ 3720 3721 if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */ 3722 /* 3723 * Think of how you add 1 to 999 3724 */ 3725 while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) { 3726 kunmap_atomic(map); 3727 page = list_next_entry(page, lru); 3728 BUG_ON(page == head); 3729 map = kmap_atomic(page) + offset; 3730 } 3731 if (*map == SWAP_CONT_MAX) { 3732 kunmap_atomic(map); 3733 page = list_next_entry(page, lru); 3734 if (page == head) { 3735 ret = false; /* add count continuation */ 3736 goto out; 3737 } 3738 map = kmap_atomic(page) + offset; 3739 init_map: *map = 0; /* we didn't zero the page */ 3740 } 3741 *map += 1; 3742 kunmap_atomic(map); 3743 while ((page = list_prev_entry(page, lru)) != head) { 3744 map = kmap_atomic(page) + offset; 3745 *map = COUNT_CONTINUED; 3746 kunmap_atomic(map); 3747 } 3748 ret = true; /* incremented */ 3749 3750 } else { /* decrementing */ 3751 /* 3752 * Think of how you subtract 1 from 1000 3753 */ 3754 BUG_ON(count != COUNT_CONTINUED); 3755 while (*map == COUNT_CONTINUED) { 3756 kunmap_atomic(map); 3757 page = list_next_entry(page, lru); 3758 BUG_ON(page == head); 3759 map = kmap_atomic(page) + offset; 3760 } 3761 BUG_ON(*map == 0); 3762 *map -= 1; 3763 if (*map == 0) 3764 count = 0; 3765 kunmap_atomic(map); 3766 while ((page = list_prev_entry(page, lru)) != head) { 3767 map = kmap_atomic(page) + offset; 3768 *map = SWAP_CONT_MAX | count; 3769 count = COUNT_CONTINUED; 3770 kunmap_atomic(map); 3771 } 3772 ret = count == COUNT_CONTINUED; 3773 } 3774 out: 3775 spin_unlock(&si->cont_lock); 3776 return ret; 3777 } 3778 3779 /* 3780 * free_swap_count_continuations - swapoff free all the continuation pages 3781 * appended to the swap_map, after swap_map is quiesced, before vfree'ing it. 3782 */ 3783 static void free_swap_count_continuations(struct swap_info_struct *si) 3784 { 3785 pgoff_t offset; 3786 3787 for (offset = 0; offset < si->max; offset += PAGE_SIZE) { 3788 struct page *head; 3789 head = vmalloc_to_page(si->swap_map + offset); 3790 if (page_private(head)) { 3791 struct page *page, *next; 3792 3793 list_for_each_entry_safe(page, next, &head->lru, lru) { 3794 list_del(&page->lru); 3795 __free_page(page); 3796 } 3797 } 3798 } 3799 } 3800 3801 #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) 3802 void cgroup_throttle_swaprate(struct page *page, gfp_t gfp_mask) 3803 { 3804 struct swap_info_struct *si, *next; 3805 int nid = page_to_nid(page); 3806 3807 if (!(gfp_mask & __GFP_IO)) 3808 return; 3809 3810 if (!blk_cgroup_congested()) 3811 return; 3812 3813 /* 3814 * We've already scheduled a throttle, avoid taking the global swap 3815 * lock. 3816 */ 3817 if (current->throttle_queue) 3818 return; 3819 3820 spin_lock(&swap_avail_lock); 3821 plist_for_each_entry_safe(si, next, &swap_avail_heads[nid], 3822 avail_lists[nid]) { 3823 if (si->bdev) { 3824 blkcg_schedule_throttle(bdev_get_queue(si->bdev), true); 3825 break; 3826 } 3827 } 3828 spin_unlock(&swap_avail_lock); 3829 } 3830 #endif 3831 3832 static int __init swapfile_init(void) 3833 { 3834 int nid; 3835 3836 swap_avail_heads = kmalloc_array(nr_node_ids, sizeof(struct plist_head), 3837 GFP_KERNEL); 3838 if (!swap_avail_heads) { 3839 pr_emerg("Not enough memory for swap heads, swap is disabled\n"); 3840 return -ENOMEM; 3841 } 3842 3843 for_each_node(nid) 3844 plist_head_init(&swap_avail_heads[nid]); 3845 3846 return 0; 3847 } 3848 subsys_initcall(swapfile_init); 3849