1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * linux/mm/swapfile.c 4 * 5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds 6 * Swap reorganised 29.12.95, Stephen Tweedie 7 */ 8 9 #include <linux/blkdev.h> 10 #include <linux/mm.h> 11 #include <linux/sched/mm.h> 12 #include <linux/sched/task.h> 13 #include <linux/hugetlb.h> 14 #include <linux/mman.h> 15 #include <linux/slab.h> 16 #include <linux/kernel_stat.h> 17 #include <linux/swap.h> 18 #include <linux/vmalloc.h> 19 #include <linux/pagemap.h> 20 #include <linux/namei.h> 21 #include <linux/shmem_fs.h> 22 #include <linux/blk-cgroup.h> 23 #include <linux/random.h> 24 #include <linux/writeback.h> 25 #include <linux/proc_fs.h> 26 #include <linux/seq_file.h> 27 #include <linux/init.h> 28 #include <linux/ksm.h> 29 #include <linux/rmap.h> 30 #include <linux/security.h> 31 #include <linux/backing-dev.h> 32 #include <linux/mutex.h> 33 #include <linux/capability.h> 34 #include <linux/syscalls.h> 35 #include <linux/memcontrol.h> 36 #include <linux/poll.h> 37 #include <linux/oom.h> 38 #include <linux/swapfile.h> 39 #include <linux/export.h> 40 #include <linux/sort.h> 41 #include <linux/completion.h> 42 #include <linux/suspend.h> 43 #include <linux/zswap.h> 44 #include <linux/plist.h> 45 46 #include <asm/tlbflush.h> 47 #include <linux/leafops.h> 48 #include <linux/swap_cgroup.h> 49 #include "swap_table.h" 50 #include "internal.h" 51 #include "swap.h" 52 53 static void swap_range_alloc(struct swap_info_struct *si, 54 unsigned int nr_entries); 55 static bool folio_swapcache_freeable(struct folio *folio); 56 static void move_cluster(struct swap_info_struct *si, 57 struct swap_cluster_info *ci, struct list_head *list, 58 enum swap_cluster_flags new_flags); 59 60 /* 61 * Protects the swap_info array, and the SWP_USED flag. swap_info contains 62 * lazily allocated & freed swap device info struts, and SWP_USED indicates 63 * which device is used, ~SWP_USED devices and can be reused. 64 * 65 * Also protects swap_active_head total_swap_pages, and the SWP_WRITEOK flag. 66 */ 67 static DEFINE_SPINLOCK(swap_lock); 68 static unsigned int nr_swapfiles; 69 atomic_long_t nr_swap_pages; 70 /* 71 * Some modules use swappable objects and may try to swap them out under 72 * memory pressure (via the shrinker). Before doing so, they may wish to 73 * check to see if any swap space is available. 74 */ 75 EXPORT_SYMBOL_GPL(nr_swap_pages); 76 /* protected with swap_lock. reading in vm_swap_full() doesn't need lock */ 77 long total_swap_pages; 78 #define DEF_SWAP_PRIO -1 79 unsigned long swapfile_maximum_size; 80 #ifdef CONFIG_MIGRATION 81 bool swap_migration_ad_supported; 82 #endif /* CONFIG_MIGRATION */ 83 84 static const char Bad_file[] = "Bad swap file entry "; 85 static const char Bad_offset[] = "Bad swap offset entry "; 86 87 /* 88 * all active swap_info_structs 89 * protected with swap_lock, and ordered by priority. 90 */ 91 static PLIST_HEAD(swap_active_head); 92 93 /* 94 * all available (active, not full) swap_info_structs 95 * protected with swap_avail_lock, ordered by priority. 96 * This is used by folio_alloc_swap() instead of swap_active_head 97 * because swap_active_head includes all swap_info_structs, 98 * but folio_alloc_swap() doesn't need to look at full ones. 99 * This uses its own lock instead of swap_lock because when a 100 * swap_info_struct changes between not-full/full, it needs to 101 * add/remove itself to/from this list, but the swap_info_struct->lock 102 * is held and the locking order requires swap_lock to be taken 103 * before any swap_info_struct->lock. 104 */ 105 static PLIST_HEAD(swap_avail_head); 106 static DEFINE_SPINLOCK(swap_avail_lock); 107 108 struct swap_info_struct *swap_info[MAX_SWAPFILES]; 109 110 static struct kmem_cache *swap_table_cachep; 111 112 /* Protects si->swap_file for /proc/swaps usage */ 113 static DEFINE_MUTEX(swapon_mutex); 114 115 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait); 116 /* Activity counter to indicate that a swapon or swapoff has occurred */ 117 static atomic_t proc_poll_event = ATOMIC_INIT(0); 118 119 atomic_t nr_rotate_swap = ATOMIC_INIT(0); 120 121 struct percpu_swap_cluster { 122 struct swap_info_struct *si[SWAP_NR_ORDERS]; 123 unsigned long offset[SWAP_NR_ORDERS]; 124 local_lock_t lock; 125 }; 126 127 static DEFINE_PER_CPU(struct percpu_swap_cluster, percpu_swap_cluster) = { 128 .si = { NULL }, 129 .offset = { SWAP_ENTRY_INVALID }, 130 .lock = INIT_LOCAL_LOCK(), 131 }; 132 133 /* May return NULL on invalid type, caller must check for NULL return */ 134 static struct swap_info_struct *swap_type_to_info(int type) 135 { 136 if (type >= MAX_SWAPFILES) 137 return NULL; 138 return READ_ONCE(swap_info[type]); /* rcu_dereference() */ 139 } 140 141 /* May return NULL on invalid entry, caller must check for NULL return */ 142 static struct swap_info_struct *swap_entry_to_info(swp_entry_t entry) 143 { 144 return swap_type_to_info(swp_type(entry)); 145 } 146 147 /* 148 * Use the second highest bit of inuse_pages counter as the indicator 149 * if one swap device is on the available plist, so the atomic can 150 * still be updated arithmetically while having special data embedded. 151 * 152 * inuse_pages counter is the only thing indicating if a device should 153 * be on avail_lists or not (except swapon / swapoff). By embedding the 154 * off-list bit in the atomic counter, updates no longer need any lock 155 * to check the list status. 156 * 157 * This bit will be set if the device is not on the plist and not 158 * usable, will be cleared if the device is on the plist. 159 */ 160 #define SWAP_USAGE_OFFLIST_BIT (1UL << (BITS_PER_TYPE(atomic_t) - 2)) 161 #define SWAP_USAGE_COUNTER_MASK (~SWAP_USAGE_OFFLIST_BIT) 162 static long swap_usage_in_pages(struct swap_info_struct *si) 163 { 164 return atomic_long_read(&si->inuse_pages) & SWAP_USAGE_COUNTER_MASK; 165 } 166 167 /* Reclaim the swap entry anyway if possible */ 168 #define TTRS_ANYWAY 0x1 169 /* 170 * Reclaim the swap entry if there are no more mappings of the 171 * corresponding page 172 */ 173 #define TTRS_UNMAPPED 0x2 174 /* Reclaim the swap entry if swap is getting full */ 175 #define TTRS_FULL 0x4 176 177 static bool swap_only_has_cache(struct swap_cluster_info *ci, 178 unsigned long offset, int nr_pages) 179 { 180 unsigned int ci_off = offset % SWAPFILE_CLUSTER; 181 unsigned int ci_end = ci_off + nr_pages; 182 unsigned long swp_tb; 183 184 do { 185 swp_tb = __swap_table_get(ci, ci_off); 186 VM_WARN_ON_ONCE(!swp_tb_is_folio(swp_tb)); 187 if (swp_tb_get_count(swp_tb)) 188 return false; 189 } while (++ci_off < ci_end); 190 191 return true; 192 } 193 194 /* 195 * returns number of pages in the folio that backs the swap entry. If positive, 196 * the folio was reclaimed. If negative, the folio was not reclaimed. If 0, no 197 * folio was associated with the swap entry. 198 */ 199 static int __try_to_reclaim_swap(struct swap_info_struct *si, 200 unsigned long offset, unsigned long flags) 201 { 202 const swp_entry_t entry = swp_entry(si->type, offset); 203 struct swap_cluster_info *ci; 204 struct folio *folio; 205 int ret, nr_pages; 206 bool need_reclaim; 207 208 again: 209 folio = swap_cache_get_folio(entry); 210 if (!folio) 211 return 0; 212 213 nr_pages = folio_nr_pages(folio); 214 ret = -nr_pages; 215 216 /* 217 * We hold a folio lock here. We have to use trylock for 218 * avoiding deadlock. This is a special case and you should 219 * use folio_free_swap() with explicit folio_lock() in usual 220 * operations. 221 */ 222 if (!folio_trylock(folio)) 223 goto out; 224 225 /* 226 * Offset could point to the middle of a large folio, or folio 227 * may no longer point to the expected offset before it's locked. 228 */ 229 if (!folio_matches_swap_entry(folio, entry)) { 230 folio_unlock(folio); 231 folio_put(folio); 232 goto again; 233 } 234 offset = swp_offset(folio->swap); 235 236 need_reclaim = ((flags & TTRS_ANYWAY) || 237 ((flags & TTRS_UNMAPPED) && !folio_mapped(folio)) || 238 ((flags & TTRS_FULL) && mem_cgroup_swap_full(folio))); 239 if (!need_reclaim || !folio_swapcache_freeable(folio)) 240 goto out_unlock; 241 242 /* 243 * It's safe to delete the folio from swap cache only if the folio 244 * is in swap cache with swap count == 0. The slots have no page table 245 * reference or pending writeback, and can't be allocated to others. 246 */ 247 ci = swap_cluster_lock(si, offset); 248 need_reclaim = swap_only_has_cache(ci, offset, nr_pages); 249 swap_cluster_unlock(ci); 250 if (!need_reclaim) 251 goto out_unlock; 252 253 swap_cache_del_folio(folio); 254 folio_set_dirty(folio); 255 ret = nr_pages; 256 out_unlock: 257 folio_unlock(folio); 258 out: 259 folio_put(folio); 260 return ret; 261 } 262 263 static inline struct swap_extent *first_se(struct swap_info_struct *sis) 264 { 265 struct rb_node *rb = rb_first(&sis->swap_extent_root); 266 return rb_entry(rb, struct swap_extent, rb_node); 267 } 268 269 static inline struct swap_extent *next_se(struct swap_extent *se) 270 { 271 struct rb_node *rb = rb_next(&se->rb_node); 272 return rb ? rb_entry(rb, struct swap_extent, rb_node) : NULL; 273 } 274 275 /* 276 * swapon tell device that all the old swap contents can be discarded, 277 * to allow the swap device to optimize its wear-levelling. 278 */ 279 static int discard_swap(struct swap_info_struct *si) 280 { 281 struct swap_extent *se; 282 sector_t start_block; 283 sector_t nr_blocks; 284 int err = 0; 285 286 /* Do not discard the swap header page! */ 287 se = first_se(si); 288 start_block = (se->start_block + 1) << (PAGE_SHIFT - 9); 289 nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9); 290 if (nr_blocks) { 291 err = blkdev_issue_discard(si->bdev, start_block, 292 nr_blocks, GFP_KERNEL); 293 if (err) 294 return err; 295 cond_resched(); 296 } 297 298 for (se = next_se(se); se; se = next_se(se)) { 299 start_block = se->start_block << (PAGE_SHIFT - 9); 300 nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9); 301 302 err = blkdev_issue_discard(si->bdev, start_block, 303 nr_blocks, GFP_KERNEL); 304 if (err) 305 break; 306 307 cond_resched(); 308 } 309 return err; /* That will often be -EOPNOTSUPP */ 310 } 311 312 static struct swap_extent * 313 offset_to_swap_extent(struct swap_info_struct *sis, unsigned long offset) 314 { 315 struct swap_extent *se; 316 struct rb_node *rb; 317 318 rb = sis->swap_extent_root.rb_node; 319 while (rb) { 320 se = rb_entry(rb, struct swap_extent, rb_node); 321 if (offset < se->start_page) 322 rb = rb->rb_left; 323 else if (offset >= se->start_page + se->nr_pages) 324 rb = rb->rb_right; 325 else 326 return se; 327 } 328 /* It *must* be present */ 329 BUG(); 330 } 331 332 sector_t swap_folio_sector(struct folio *folio) 333 { 334 struct swap_info_struct *sis = __swap_entry_to_info(folio->swap); 335 struct swap_extent *se; 336 sector_t sector; 337 pgoff_t offset; 338 339 offset = swp_offset(folio->swap); 340 se = offset_to_swap_extent(sis, offset); 341 sector = se->start_block + (offset - se->start_page); 342 return sector << (PAGE_SHIFT - 9); 343 } 344 345 /* 346 * swap allocation tell device that a cluster of swap can now be discarded, 347 * to allow the swap device to optimize its wear-levelling. 348 */ 349 static void discard_swap_cluster(struct swap_info_struct *si, 350 pgoff_t start_page, pgoff_t nr_pages) 351 { 352 struct swap_extent *se = offset_to_swap_extent(si, start_page); 353 354 while (nr_pages) { 355 pgoff_t offset = start_page - se->start_page; 356 sector_t start_block = se->start_block + offset; 357 sector_t nr_blocks = se->nr_pages - offset; 358 359 if (nr_blocks > nr_pages) 360 nr_blocks = nr_pages; 361 start_page += nr_blocks; 362 nr_pages -= nr_blocks; 363 364 start_block <<= PAGE_SHIFT - 9; 365 nr_blocks <<= PAGE_SHIFT - 9; 366 if (blkdev_issue_discard(si->bdev, start_block, 367 nr_blocks, GFP_NOIO)) 368 break; 369 370 se = next_se(se); 371 } 372 } 373 374 #define LATENCY_LIMIT 256 375 376 static inline bool cluster_is_empty(struct swap_cluster_info *info) 377 { 378 return info->count == 0; 379 } 380 381 static inline bool cluster_is_discard(struct swap_cluster_info *info) 382 { 383 return info->flags == CLUSTER_FLAG_DISCARD; 384 } 385 386 static inline bool cluster_table_is_alloced(struct swap_cluster_info *ci) 387 { 388 return rcu_dereference_protected(ci->table, lockdep_is_held(&ci->lock)); 389 } 390 391 static inline bool cluster_is_usable(struct swap_cluster_info *ci, int order) 392 { 393 if (unlikely(ci->flags > CLUSTER_FLAG_USABLE)) 394 return false; 395 if (!cluster_table_is_alloced(ci)) 396 return false; 397 if (!order) 398 return true; 399 return cluster_is_empty(ci) || order == ci->order; 400 } 401 402 static inline unsigned int cluster_index(struct swap_info_struct *si, 403 struct swap_cluster_info *ci) 404 { 405 return ci - si->cluster_info; 406 } 407 408 static inline unsigned int cluster_offset(struct swap_info_struct *si, 409 struct swap_cluster_info *ci) 410 { 411 return cluster_index(si, ci) * SWAPFILE_CLUSTER; 412 } 413 414 static struct swap_table *swap_table_alloc(gfp_t gfp) 415 { 416 struct folio *folio; 417 418 if (!SWP_TABLE_USE_PAGE) 419 return kmem_cache_zalloc(swap_table_cachep, gfp); 420 421 folio = folio_alloc(gfp | __GFP_ZERO, 0); 422 if (folio) 423 return folio_address(folio); 424 return NULL; 425 } 426 427 static void swap_table_free_folio_rcu_cb(struct rcu_head *head) 428 { 429 struct folio *folio; 430 431 folio = page_folio(container_of(head, struct page, rcu_head)); 432 folio_put(folio); 433 } 434 435 static void swap_table_free(struct swap_table *table) 436 { 437 if (!SWP_TABLE_USE_PAGE) { 438 kmem_cache_free(swap_table_cachep, table); 439 return; 440 } 441 442 call_rcu(&(folio_page(virt_to_folio(table), 0)->rcu_head), 443 swap_table_free_folio_rcu_cb); 444 } 445 446 /* 447 * Sanity check to ensure nothing leaked, and the specified range is empty. 448 * One special case is that bad slots can't be freed, so check the number of 449 * bad slots for swapoff, and non-swapoff path must never free bad slots. 450 */ 451 static void swap_cluster_assert_empty(struct swap_cluster_info *ci, 452 unsigned int ci_off, unsigned int nr, 453 bool swapoff) 454 { 455 unsigned int ci_end = ci_off + nr; 456 unsigned long swp_tb; 457 int bad_slots = 0; 458 459 if (!IS_ENABLED(CONFIG_DEBUG_VM) && !swapoff) 460 return; 461 462 do { 463 swp_tb = __swap_table_get(ci, ci_off); 464 if (swp_tb_is_bad(swp_tb)) 465 bad_slots++; 466 else 467 WARN_ON_ONCE(!swp_tb_is_null(swp_tb)); 468 } while (++ci_off < ci_end); 469 470 WARN_ON_ONCE(bad_slots != (swapoff ? ci->count : 0)); 471 WARN_ON_ONCE(nr == SWAPFILE_CLUSTER && ci->extend_table); 472 } 473 474 static void swap_cluster_free_table(struct swap_cluster_info *ci) 475 { 476 struct swap_table *table; 477 478 /* Only empty cluster's table is allow to be freed */ 479 lockdep_assert_held(&ci->lock); 480 table = (void *)rcu_dereference_protected(ci->table, true); 481 rcu_assign_pointer(ci->table, NULL); 482 483 swap_table_free(table); 484 } 485 486 /* 487 * Allocate swap table for one cluster. Attempt an atomic allocation first, 488 * then fallback to sleeping allocation. 489 */ 490 static struct swap_cluster_info * 491 swap_cluster_alloc_table(struct swap_info_struct *si, 492 struct swap_cluster_info *ci) 493 { 494 struct swap_table *table; 495 496 /* 497 * Only cluster isolation from the allocator does table allocation. 498 * Swap allocator uses percpu clusters and holds the local lock. 499 */ 500 lockdep_assert_held(&this_cpu_ptr(&percpu_swap_cluster)->lock); 501 if (!(si->flags & SWP_SOLIDSTATE)) 502 lockdep_assert_held(&si->global_cluster_lock); 503 lockdep_assert_held(&ci->lock); 504 505 /* The cluster must be free and was just isolated from the free list. */ 506 VM_WARN_ON_ONCE(ci->flags || !cluster_is_empty(ci)); 507 508 table = swap_table_alloc(__GFP_HIGH | __GFP_NOMEMALLOC | __GFP_NOWARN); 509 if (table) { 510 rcu_assign_pointer(ci->table, table); 511 return ci; 512 } 513 514 /* 515 * Try a sleep allocation. Each isolated free cluster may cause 516 * a sleep allocation, but there is a limited number of them, so 517 * the potential recursive allocation is limited. 518 */ 519 spin_unlock(&ci->lock); 520 if (!(si->flags & SWP_SOLIDSTATE)) 521 spin_unlock(&si->global_cluster_lock); 522 local_unlock(&percpu_swap_cluster.lock); 523 524 table = swap_table_alloc(__GFP_HIGH | __GFP_NOMEMALLOC | GFP_KERNEL); 525 526 /* 527 * Back to atomic context. We might have migrated to a new CPU with a 528 * usable percpu cluster. But just keep using the isolated cluster to 529 * make things easier. Migration indicates a slight change of workload 530 * so using a new free cluster might not be a bad idea, and the worst 531 * could happen with ignoring the percpu cluster is fragmentation, 532 * which is acceptable since this fallback and race is rare. 533 */ 534 local_lock(&percpu_swap_cluster.lock); 535 if (!(si->flags & SWP_SOLIDSTATE)) 536 spin_lock(&si->global_cluster_lock); 537 spin_lock(&ci->lock); 538 539 /* Nothing except this helper should touch a dangling empty cluster. */ 540 if (WARN_ON_ONCE(cluster_table_is_alloced(ci))) { 541 if (table) 542 swap_table_free(table); 543 return ci; 544 } 545 546 if (!table) { 547 move_cluster(si, ci, &si->free_clusters, CLUSTER_FLAG_FREE); 548 spin_unlock(&ci->lock); 549 return NULL; 550 } 551 552 rcu_assign_pointer(ci->table, table); 553 return ci; 554 } 555 556 static void move_cluster(struct swap_info_struct *si, 557 struct swap_cluster_info *ci, struct list_head *list, 558 enum swap_cluster_flags new_flags) 559 { 560 VM_WARN_ON(ci->flags == new_flags); 561 562 BUILD_BUG_ON(1 << sizeof(ci->flags) * BITS_PER_BYTE < CLUSTER_FLAG_MAX); 563 lockdep_assert_held(&ci->lock); 564 565 spin_lock(&si->lock); 566 if (ci->flags == CLUSTER_FLAG_NONE) 567 list_add_tail(&ci->list, list); 568 else 569 list_move_tail(&ci->list, list); 570 spin_unlock(&si->lock); 571 ci->flags = new_flags; 572 } 573 574 /* Add a cluster to discard list and schedule it to do discard */ 575 static void swap_cluster_schedule_discard(struct swap_info_struct *si, 576 struct swap_cluster_info *ci) 577 { 578 VM_BUG_ON(ci->flags == CLUSTER_FLAG_FREE); 579 move_cluster(si, ci, &si->discard_clusters, CLUSTER_FLAG_DISCARD); 580 schedule_work(&si->discard_work); 581 } 582 583 static void __free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci) 584 { 585 swap_cluster_assert_empty(ci, 0, SWAPFILE_CLUSTER, false); 586 swap_cluster_free_table(ci); 587 move_cluster(si, ci, &si->free_clusters, CLUSTER_FLAG_FREE); 588 ci->order = 0; 589 } 590 591 /* 592 * Isolate and lock the first cluster that is not contented on a list, 593 * clean its flag before taken off-list. Cluster flag must be in sync 594 * with list status, so cluster updaters can always know the cluster 595 * list status without touching si lock. 596 * 597 * Note it's possible that all clusters on a list are contented so 598 * this returns NULL for an non-empty list. 599 */ 600 static struct swap_cluster_info *isolate_lock_cluster( 601 struct swap_info_struct *si, struct list_head *list) 602 { 603 struct swap_cluster_info *ci, *found = NULL; 604 u8 flags = CLUSTER_FLAG_NONE; 605 606 spin_lock(&si->lock); 607 list_for_each_entry(ci, list, list) { 608 if (!spin_trylock(&ci->lock)) 609 continue; 610 611 /* We may only isolate and clear flags of following lists */ 612 VM_BUG_ON(!ci->flags); 613 VM_BUG_ON(ci->flags > CLUSTER_FLAG_USABLE && 614 ci->flags != CLUSTER_FLAG_FULL); 615 616 list_del(&ci->list); 617 flags = ci->flags; 618 ci->flags = CLUSTER_FLAG_NONE; 619 found = ci; 620 break; 621 } 622 spin_unlock(&si->lock); 623 624 if (found && !cluster_table_is_alloced(found)) { 625 /* Only an empty free cluster's swap table can be freed. */ 626 VM_WARN_ON_ONCE(flags != CLUSTER_FLAG_FREE); 627 VM_WARN_ON_ONCE(list != &si->free_clusters); 628 VM_WARN_ON_ONCE(!cluster_is_empty(found)); 629 return swap_cluster_alloc_table(si, found); 630 } 631 632 return found; 633 } 634 635 /* 636 * Doing discard actually. After a cluster discard is finished, the cluster 637 * will be added to free cluster list. Discard cluster is a bit special as 638 * they don't participate in allocation or reclaim, so clusters marked as 639 * CLUSTER_FLAG_DISCARD must remain off-list or on discard list. 640 */ 641 static bool swap_do_scheduled_discard(struct swap_info_struct *si) 642 { 643 struct swap_cluster_info *ci; 644 bool ret = false; 645 unsigned int idx; 646 647 spin_lock(&si->lock); 648 while (!list_empty(&si->discard_clusters)) { 649 ci = list_first_entry(&si->discard_clusters, struct swap_cluster_info, list); 650 /* 651 * Delete the cluster from list to prepare for discard, but keep 652 * the CLUSTER_FLAG_DISCARD flag, percpu_swap_cluster could be 653 * pointing to it, or ran into by relocate_cluster. 654 */ 655 list_del(&ci->list); 656 idx = cluster_index(si, ci); 657 spin_unlock(&si->lock); 658 discard_swap_cluster(si, idx * SWAPFILE_CLUSTER, 659 SWAPFILE_CLUSTER); 660 661 spin_lock(&ci->lock); 662 /* 663 * Discard is done, clear its flags as it's off-list, then 664 * return the cluster to allocation list. 665 */ 666 ci->flags = CLUSTER_FLAG_NONE; 667 __free_cluster(si, ci); 668 spin_unlock(&ci->lock); 669 ret = true; 670 spin_lock(&si->lock); 671 } 672 spin_unlock(&si->lock); 673 return ret; 674 } 675 676 static void swap_discard_work(struct work_struct *work) 677 { 678 struct swap_info_struct *si; 679 680 si = container_of(work, struct swap_info_struct, discard_work); 681 682 swap_do_scheduled_discard(si); 683 } 684 685 static void swap_users_ref_free(struct percpu_ref *ref) 686 { 687 struct swap_info_struct *si; 688 689 si = container_of(ref, struct swap_info_struct, users); 690 complete(&si->comp); 691 } 692 693 /* 694 * Must be called after freeing if ci->count == 0, moves the cluster to free 695 * or discard list. 696 */ 697 static void free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci) 698 { 699 VM_BUG_ON(ci->count != 0); 700 VM_BUG_ON(ci->flags == CLUSTER_FLAG_FREE); 701 lockdep_assert_held(&ci->lock); 702 703 /* 704 * If the swap is discardable, prepare discard the cluster 705 * instead of free it immediately. The cluster will be freed 706 * after discard. 707 */ 708 if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) == 709 (SWP_WRITEOK | SWP_PAGE_DISCARD)) { 710 swap_cluster_schedule_discard(si, ci); 711 return; 712 } 713 714 __free_cluster(si, ci); 715 } 716 717 /* 718 * Must be called after freeing if ci->count != 0, moves the cluster to 719 * nonfull list. 720 */ 721 static void partial_free_cluster(struct swap_info_struct *si, 722 struct swap_cluster_info *ci) 723 { 724 VM_BUG_ON(!ci->count || ci->count == SWAPFILE_CLUSTER); 725 lockdep_assert_held(&ci->lock); 726 727 if (ci->flags != CLUSTER_FLAG_NONFULL) 728 move_cluster(si, ci, &si->nonfull_clusters[ci->order], 729 CLUSTER_FLAG_NONFULL); 730 } 731 732 /* 733 * Must be called after allocation, moves the cluster to full or frag list. 734 * Note: allocation doesn't acquire si lock, and may drop the ci lock for 735 * reclaim, so the cluster could be any where when called. 736 */ 737 static void relocate_cluster(struct swap_info_struct *si, 738 struct swap_cluster_info *ci) 739 { 740 lockdep_assert_held(&ci->lock); 741 742 /* Discard cluster must remain off-list or on discard list */ 743 if (cluster_is_discard(ci)) 744 return; 745 746 if (!ci->count) { 747 if (ci->flags != CLUSTER_FLAG_FREE) 748 free_cluster(si, ci); 749 } else if (ci->count != SWAPFILE_CLUSTER) { 750 if (ci->flags != CLUSTER_FLAG_FRAG) 751 move_cluster(si, ci, &si->frag_clusters[ci->order], 752 CLUSTER_FLAG_FRAG); 753 } else { 754 if (ci->flags != CLUSTER_FLAG_FULL) 755 move_cluster(si, ci, &si->full_clusters, 756 CLUSTER_FLAG_FULL); 757 } 758 } 759 760 /* 761 * The cluster corresponding to @offset will be accounted as having one bad 762 * slot. The cluster will not be added to the free cluster list, and its 763 * usage counter will be increased by 1. Only used for initialization. 764 */ 765 static int swap_cluster_setup_bad_slot(struct swap_info_struct *si, 766 struct swap_cluster_info *cluster_info, 767 unsigned int offset, bool mask) 768 { 769 unsigned int ci_off = offset % SWAPFILE_CLUSTER; 770 unsigned long idx = offset / SWAPFILE_CLUSTER; 771 struct swap_cluster_info *ci; 772 struct swap_table *table; 773 int ret = 0; 774 775 /* si->max may got shrunk by swap swap_activate() */ 776 if (offset >= si->max && !mask) { 777 pr_debug("Ignoring bad slot %u (max: %u)\n", offset, si->max); 778 return 0; 779 } 780 /* 781 * Account it, skip header slot: si->pages is initiated as 782 * si->max - 1. Also skip the masking of last cluster, 783 * si->pages doesn't include that part. 784 */ 785 if (offset && !mask) 786 si->pages -= 1; 787 if (!si->pages) { 788 pr_warn("Empty swap-file\n"); 789 return -EINVAL; 790 } 791 792 ci = cluster_info + idx; 793 if (!ci->table) { 794 table = swap_table_alloc(GFP_KERNEL); 795 if (!table) 796 return -ENOMEM; 797 rcu_assign_pointer(ci->table, table); 798 } 799 spin_lock(&ci->lock); 800 /* Check for duplicated bad swap slots. */ 801 if (__swap_table_xchg(ci, ci_off, SWP_TB_BAD) != SWP_TB_NULL) { 802 pr_warn("Duplicated bad slot offset %d\n", offset); 803 ret = -EINVAL; 804 } else { 805 ci->count++; 806 } 807 spin_unlock(&ci->lock); 808 809 WARN_ON(ci->count > SWAPFILE_CLUSTER); 810 WARN_ON(ci->flags); 811 812 return ret; 813 } 814 815 /* 816 * Reclaim drops the ci lock, so the cluster may become unusable (freed or 817 * stolen by a lower order). @usable will be set to false if that happens. 818 */ 819 static bool cluster_reclaim_range(struct swap_info_struct *si, 820 struct swap_cluster_info *ci, 821 unsigned long start, unsigned int order, 822 bool *usable) 823 { 824 unsigned int nr_pages = 1 << order; 825 unsigned long offset = start, end = start + nr_pages; 826 unsigned long swp_tb; 827 828 spin_unlock(&ci->lock); 829 do { 830 swp_tb = swap_table_get(ci, offset % SWAPFILE_CLUSTER); 831 if (swp_tb_get_count(swp_tb)) 832 break; 833 if (swp_tb_is_folio(swp_tb)) 834 if (__try_to_reclaim_swap(si, offset, TTRS_ANYWAY) < 0) 835 break; 836 } while (++offset < end); 837 spin_lock(&ci->lock); 838 839 /* 840 * We just dropped ci->lock so cluster could be used by another 841 * order or got freed, check if it's still usable or empty. 842 */ 843 if (!cluster_is_usable(ci, order)) { 844 *usable = false; 845 return false; 846 } 847 *usable = true; 848 849 /* Fast path, no need to scan if the whole cluster is empty */ 850 if (cluster_is_empty(ci)) 851 return true; 852 853 /* 854 * Recheck the range no matter reclaim succeeded or not, the slot 855 * could have been be freed while we are not holding the lock. 856 */ 857 for (offset = start; offset < end; offset++) { 858 swp_tb = __swap_table_get(ci, offset % SWAPFILE_CLUSTER); 859 if (!swp_tb_is_null(swp_tb)) 860 return false; 861 } 862 863 return true; 864 } 865 866 static bool cluster_scan_range(struct swap_info_struct *si, 867 struct swap_cluster_info *ci, 868 unsigned long offset, unsigned int nr_pages, 869 bool *need_reclaim) 870 { 871 unsigned int ci_off = offset % SWAPFILE_CLUSTER; 872 unsigned int ci_end = ci_off + nr_pages; 873 unsigned long swp_tb; 874 875 do { 876 swp_tb = __swap_table_get(ci, ci_off); 877 if (swp_tb_is_null(swp_tb)) 878 continue; 879 if (swp_tb_is_folio(swp_tb) && !__swp_tb_get_count(swp_tb)) { 880 if (!vm_swap_full()) 881 return false; 882 *need_reclaim = true; 883 continue; 884 } 885 /* Slot with zero count can only be NULL or folio */ 886 VM_WARN_ON(!swp_tb_get_count(swp_tb)); 887 return false; 888 } while (++ci_off < ci_end); 889 890 return true; 891 } 892 893 static bool __swap_cluster_alloc_entries(struct swap_info_struct *si, 894 struct swap_cluster_info *ci, 895 struct folio *folio, 896 unsigned int ci_off) 897 { 898 unsigned int order; 899 unsigned long nr_pages; 900 901 lockdep_assert_held(&ci->lock); 902 903 if (!(si->flags & SWP_WRITEOK)) 904 return false; 905 906 /* 907 * All mm swap allocation starts with a folio (folio_alloc_swap), 908 * it's also the only allocation path for large orders allocation. 909 * Such swap slots starts with count == 0 and will be increased 910 * upon folio unmap. 911 * 912 * Else, it's a exclusive order 0 allocation for hibernation. 913 * The slot starts with count == 1 and never increases. 914 */ 915 if (likely(folio)) { 916 order = folio_order(folio); 917 nr_pages = 1 << order; 918 swap_cluster_assert_empty(ci, ci_off, nr_pages, false); 919 __swap_cache_add_folio(ci, folio, swp_entry(si->type, 920 ci_off + cluster_offset(si, ci))); 921 } else if (IS_ENABLED(CONFIG_HIBERNATION)) { 922 order = 0; 923 nr_pages = 1; 924 swap_cluster_assert_empty(ci, ci_off, 1, false); 925 /* Sets a fake shadow as placeholder */ 926 __swap_table_set(ci, ci_off, shadow_to_swp_tb(NULL, 1)); 927 } else { 928 /* Allocation without folio is only possible with hibernation */ 929 WARN_ON_ONCE(1); 930 return false; 931 } 932 933 /* 934 * The first allocation in a cluster makes the 935 * cluster exclusive to this order 936 */ 937 if (cluster_is_empty(ci)) 938 ci->order = order; 939 ci->count += nr_pages; 940 swap_range_alloc(si, nr_pages); 941 942 return true; 943 } 944 945 /* Try use a new cluster for current CPU and allocate from it. */ 946 static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, 947 struct swap_cluster_info *ci, 948 struct folio *folio, unsigned long offset) 949 { 950 unsigned int next = SWAP_ENTRY_INVALID, found = SWAP_ENTRY_INVALID; 951 unsigned long start = ALIGN_DOWN(offset, SWAPFILE_CLUSTER); 952 unsigned int order = likely(folio) ? folio_order(folio) : 0; 953 unsigned long end = start + SWAPFILE_CLUSTER; 954 unsigned int nr_pages = 1 << order; 955 bool need_reclaim, ret, usable; 956 957 lockdep_assert_held(&ci->lock); 958 VM_WARN_ON(!cluster_is_usable(ci, order)); 959 960 if (end < nr_pages || ci->count + nr_pages > SWAPFILE_CLUSTER) 961 goto out; 962 963 for (end -= nr_pages; offset <= end; offset += nr_pages) { 964 need_reclaim = false; 965 if (!cluster_scan_range(si, ci, offset, nr_pages, &need_reclaim)) 966 continue; 967 if (need_reclaim) { 968 ret = cluster_reclaim_range(si, ci, offset, order, &usable); 969 if (!usable) 970 goto out; 971 if (cluster_is_empty(ci)) 972 offset = start; 973 /* Reclaim failed but cluster is usable, try next */ 974 if (!ret) 975 continue; 976 } 977 if (!__swap_cluster_alloc_entries(si, ci, folio, offset % SWAPFILE_CLUSTER)) 978 break; 979 found = offset; 980 offset += nr_pages; 981 if (ci->count < SWAPFILE_CLUSTER && offset <= end) 982 next = offset; 983 break; 984 } 985 out: 986 relocate_cluster(si, ci); 987 swap_cluster_unlock(ci); 988 if (si->flags & SWP_SOLIDSTATE) { 989 this_cpu_write(percpu_swap_cluster.offset[order], next); 990 this_cpu_write(percpu_swap_cluster.si[order], si); 991 } else { 992 si->global_cluster->next[order] = next; 993 } 994 return found; 995 } 996 997 static unsigned int alloc_swap_scan_list(struct swap_info_struct *si, 998 struct list_head *list, 999 struct folio *folio, 1000 bool scan_all) 1001 { 1002 unsigned int found = SWAP_ENTRY_INVALID; 1003 1004 do { 1005 struct swap_cluster_info *ci = isolate_lock_cluster(si, list); 1006 unsigned long offset; 1007 1008 if (!ci) 1009 break; 1010 offset = cluster_offset(si, ci); 1011 found = alloc_swap_scan_cluster(si, ci, folio, offset); 1012 if (found) 1013 break; 1014 } while (scan_all); 1015 1016 return found; 1017 } 1018 1019 static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force) 1020 { 1021 long to_scan = 1; 1022 unsigned long offset, end; 1023 struct swap_cluster_info *ci; 1024 unsigned long swp_tb; 1025 int nr_reclaim; 1026 1027 if (force) 1028 to_scan = swap_usage_in_pages(si) / SWAPFILE_CLUSTER; 1029 1030 while ((ci = isolate_lock_cluster(si, &si->full_clusters))) { 1031 offset = cluster_offset(si, ci); 1032 end = min(si->max, offset + SWAPFILE_CLUSTER); 1033 to_scan--; 1034 1035 while (offset < end) { 1036 swp_tb = swap_table_get(ci, offset % SWAPFILE_CLUSTER); 1037 if (swp_tb_is_folio(swp_tb) && !__swp_tb_get_count(swp_tb)) { 1038 spin_unlock(&ci->lock); 1039 nr_reclaim = __try_to_reclaim_swap(si, offset, 1040 TTRS_ANYWAY); 1041 spin_lock(&ci->lock); 1042 if (nr_reclaim) { 1043 offset += abs(nr_reclaim); 1044 continue; 1045 } 1046 } 1047 offset++; 1048 } 1049 1050 /* in case no swap cache is reclaimed */ 1051 if (ci->flags == CLUSTER_FLAG_NONE) 1052 relocate_cluster(si, ci); 1053 1054 swap_cluster_unlock(ci); 1055 if (to_scan <= 0) 1056 break; 1057 } 1058 } 1059 1060 static void swap_reclaim_work(struct work_struct *work) 1061 { 1062 struct swap_info_struct *si; 1063 1064 si = container_of(work, struct swap_info_struct, reclaim_work); 1065 1066 swap_reclaim_full_clusters(si, true); 1067 } 1068 1069 /* 1070 * Try to allocate swap entries with specified order and try set a new 1071 * cluster for current CPU too. 1072 */ 1073 static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, 1074 struct folio *folio) 1075 { 1076 struct swap_cluster_info *ci; 1077 unsigned int order = likely(folio) ? folio_order(folio) : 0; 1078 unsigned int offset = SWAP_ENTRY_INVALID, found = SWAP_ENTRY_INVALID; 1079 1080 /* 1081 * Swapfile is not block device so unable 1082 * to allocate large entries. 1083 */ 1084 if (order && !(si->flags & SWP_BLKDEV)) 1085 return 0; 1086 1087 if (!(si->flags & SWP_SOLIDSTATE)) { 1088 /* Serialize HDD SWAP allocation for each device. */ 1089 spin_lock(&si->global_cluster_lock); 1090 offset = si->global_cluster->next[order]; 1091 if (offset == SWAP_ENTRY_INVALID) 1092 goto new_cluster; 1093 1094 ci = swap_cluster_lock(si, offset); 1095 /* Cluster could have been used by another order */ 1096 if (cluster_is_usable(ci, order)) { 1097 if (cluster_is_empty(ci)) 1098 offset = cluster_offset(si, ci); 1099 found = alloc_swap_scan_cluster(si, ci, folio, offset); 1100 } else { 1101 swap_cluster_unlock(ci); 1102 } 1103 if (found) 1104 goto done; 1105 } 1106 1107 new_cluster: 1108 /* 1109 * If the device need discard, prefer new cluster over nonfull 1110 * to spread out the writes. 1111 */ 1112 if (si->flags & SWP_PAGE_DISCARD) { 1113 found = alloc_swap_scan_list(si, &si->free_clusters, folio, false); 1114 if (found) 1115 goto done; 1116 } 1117 1118 if (order < PMD_ORDER) { 1119 found = alloc_swap_scan_list(si, &si->nonfull_clusters[order], folio, true); 1120 if (found) 1121 goto done; 1122 } 1123 1124 if (!(si->flags & SWP_PAGE_DISCARD)) { 1125 found = alloc_swap_scan_list(si, &si->free_clusters, folio, false); 1126 if (found) 1127 goto done; 1128 } 1129 1130 /* Try reclaim full clusters if free and nonfull lists are drained */ 1131 if (vm_swap_full()) 1132 swap_reclaim_full_clusters(si, false); 1133 1134 if (order < PMD_ORDER) { 1135 /* 1136 * Scan only one fragment cluster is good enough. Order 0 1137 * allocation will surely success, and large allocation 1138 * failure is not critical. Scanning one cluster still 1139 * keeps the list rotated and reclaimed (for clean swap cache). 1140 */ 1141 found = alloc_swap_scan_list(si, &si->frag_clusters[order], folio, false); 1142 if (found) 1143 goto done; 1144 } 1145 1146 if (order) 1147 goto done; 1148 1149 /* Order 0 stealing from higher order */ 1150 for (int o = 1; o < SWAP_NR_ORDERS; o++) { 1151 /* 1152 * Clusters here have at least one usable slots and can't fail order 0 1153 * allocation, but reclaim may drop si->lock and race with another user. 1154 */ 1155 found = alloc_swap_scan_list(si, &si->frag_clusters[o], folio, true); 1156 if (found) 1157 goto done; 1158 1159 found = alloc_swap_scan_list(si, &si->nonfull_clusters[o], folio, true); 1160 if (found) 1161 goto done; 1162 } 1163 done: 1164 if (!(si->flags & SWP_SOLIDSTATE)) 1165 spin_unlock(&si->global_cluster_lock); 1166 1167 return found; 1168 } 1169 1170 /* SWAP_USAGE_OFFLIST_BIT can only be set by this helper. */ 1171 static void del_from_avail_list(struct swap_info_struct *si, bool swapoff) 1172 { 1173 unsigned long pages; 1174 1175 spin_lock(&swap_avail_lock); 1176 1177 if (swapoff) { 1178 /* 1179 * Forcefully remove it. Clear the SWP_WRITEOK flags for 1180 * swapoff here so it's synchronized by both si->lock and 1181 * swap_avail_lock, to ensure the result can be seen by 1182 * add_to_avail_list. 1183 */ 1184 lockdep_assert_held(&si->lock); 1185 si->flags &= ~SWP_WRITEOK; 1186 atomic_long_or(SWAP_USAGE_OFFLIST_BIT, &si->inuse_pages); 1187 } else { 1188 /* 1189 * If not called by swapoff, take it off-list only if it's 1190 * full and SWAP_USAGE_OFFLIST_BIT is not set (strictly 1191 * si->inuse_pages == pages), any concurrent slot freeing, 1192 * or device already removed from plist by someone else 1193 * will make this return false. 1194 */ 1195 pages = si->pages; 1196 if (!atomic_long_try_cmpxchg(&si->inuse_pages, &pages, 1197 pages | SWAP_USAGE_OFFLIST_BIT)) 1198 goto skip; 1199 } 1200 1201 plist_del(&si->avail_list, &swap_avail_head); 1202 1203 skip: 1204 spin_unlock(&swap_avail_lock); 1205 } 1206 1207 /* SWAP_USAGE_OFFLIST_BIT can only be cleared by this helper. */ 1208 static void add_to_avail_list(struct swap_info_struct *si, bool swapon) 1209 { 1210 long val; 1211 unsigned long pages; 1212 1213 spin_lock(&swap_avail_lock); 1214 1215 /* Corresponding to SWP_WRITEOK clearing in del_from_avail_list */ 1216 if (swapon) { 1217 lockdep_assert_held(&si->lock); 1218 si->flags |= SWP_WRITEOK; 1219 } else { 1220 if (!(READ_ONCE(si->flags) & SWP_WRITEOK)) 1221 goto skip; 1222 } 1223 1224 if (!(atomic_long_read(&si->inuse_pages) & SWAP_USAGE_OFFLIST_BIT)) 1225 goto skip; 1226 1227 val = atomic_long_fetch_and_relaxed(~SWAP_USAGE_OFFLIST_BIT, &si->inuse_pages); 1228 1229 /* 1230 * When device is full and device is on the plist, only one updater will 1231 * see (inuse_pages == si->pages) and will call del_from_avail_list. If 1232 * that updater happen to be here, just skip adding. 1233 */ 1234 pages = si->pages; 1235 if (val == pages) { 1236 /* Just like the cmpxchg in del_from_avail_list */ 1237 if (atomic_long_try_cmpxchg(&si->inuse_pages, &pages, 1238 pages | SWAP_USAGE_OFFLIST_BIT)) 1239 goto skip; 1240 } 1241 1242 plist_add(&si->avail_list, &swap_avail_head); 1243 1244 skip: 1245 spin_unlock(&swap_avail_lock); 1246 } 1247 1248 /* 1249 * swap_usage_add / swap_usage_sub of each slot are serialized by ci->lock 1250 * within each cluster, so the total contribution to the global counter should 1251 * always be positive and cannot exceed the total number of usable slots. 1252 */ 1253 static bool swap_usage_add(struct swap_info_struct *si, unsigned int nr_entries) 1254 { 1255 long val = atomic_long_add_return_relaxed(nr_entries, &si->inuse_pages); 1256 1257 /* 1258 * If device is full, and SWAP_USAGE_OFFLIST_BIT is not set, 1259 * remove it from the plist. 1260 */ 1261 if (unlikely(val == si->pages)) { 1262 del_from_avail_list(si, false); 1263 return true; 1264 } 1265 1266 return false; 1267 } 1268 1269 static void swap_usage_sub(struct swap_info_struct *si, unsigned int nr_entries) 1270 { 1271 long val = atomic_long_sub_return_relaxed(nr_entries, &si->inuse_pages); 1272 1273 /* 1274 * If device is not full, and SWAP_USAGE_OFFLIST_BIT is set, 1275 * add it to the plist. 1276 */ 1277 if (unlikely(val & SWAP_USAGE_OFFLIST_BIT)) 1278 add_to_avail_list(si, false); 1279 } 1280 1281 static void swap_range_alloc(struct swap_info_struct *si, 1282 unsigned int nr_entries) 1283 { 1284 if (swap_usage_add(si, nr_entries)) { 1285 if (vm_swap_full()) 1286 schedule_work(&si->reclaim_work); 1287 } 1288 atomic_long_sub(nr_entries, &nr_swap_pages); 1289 } 1290 1291 static void swap_range_free(struct swap_info_struct *si, unsigned long offset, 1292 unsigned int nr_entries) 1293 { 1294 unsigned long end = offset + nr_entries - 1; 1295 void (*swap_slot_free_notify)(struct block_device *, unsigned long); 1296 unsigned int i; 1297 1298 /* 1299 * Use atomic clear_bit operations only on zeromap instead of non-atomic 1300 * bitmap_clear to prevent adjacent bits corruption due to simultaneous writes. 1301 */ 1302 for (i = 0; i < nr_entries; i++) { 1303 clear_bit(offset + i, si->zeromap); 1304 zswap_invalidate(swp_entry(si->type, offset + i)); 1305 } 1306 1307 if (si->flags & SWP_BLKDEV) 1308 swap_slot_free_notify = 1309 si->bdev->bd_disk->fops->swap_slot_free_notify; 1310 else 1311 swap_slot_free_notify = NULL; 1312 while (offset <= end) { 1313 arch_swap_invalidate_page(si->type, offset); 1314 if (swap_slot_free_notify) 1315 swap_slot_free_notify(si->bdev, offset); 1316 offset++; 1317 } 1318 1319 /* 1320 * Make sure that try_to_unuse() observes si->inuse_pages reaching 0 1321 * only after the above cleanups are done. 1322 */ 1323 smp_wmb(); 1324 atomic_long_add(nr_entries, &nr_swap_pages); 1325 swap_usage_sub(si, nr_entries); 1326 } 1327 1328 static bool get_swap_device_info(struct swap_info_struct *si) 1329 { 1330 if (!percpu_ref_tryget_live(&si->users)) 1331 return false; 1332 /* 1333 * Guarantee the si->users are checked before accessing other 1334 * fields of swap_info_struct, and si->flags (SWP_WRITEOK) is 1335 * up to dated. 1336 * 1337 * Paired with the spin_unlock() after setup_swap_info() in 1338 * enable_swap_info(), and smp_wmb() in swapoff. 1339 */ 1340 smp_rmb(); 1341 return true; 1342 } 1343 1344 /* 1345 * Fast path try to get swap entries with specified order from current 1346 * CPU's swap entry pool (a cluster). 1347 */ 1348 static bool swap_alloc_fast(struct folio *folio) 1349 { 1350 unsigned int order = folio_order(folio); 1351 struct swap_cluster_info *ci; 1352 struct swap_info_struct *si; 1353 unsigned int offset; 1354 1355 /* 1356 * Once allocated, swap_info_struct will never be completely freed, 1357 * so checking it's liveness by get_swap_device_info is enough. 1358 */ 1359 si = this_cpu_read(percpu_swap_cluster.si[order]); 1360 offset = this_cpu_read(percpu_swap_cluster.offset[order]); 1361 if (!si || !offset || !get_swap_device_info(si)) 1362 return false; 1363 1364 ci = swap_cluster_lock(si, offset); 1365 if (cluster_is_usable(ci, order)) { 1366 if (cluster_is_empty(ci)) 1367 offset = cluster_offset(si, ci); 1368 alloc_swap_scan_cluster(si, ci, folio, offset); 1369 } else { 1370 swap_cluster_unlock(ci); 1371 } 1372 1373 put_swap_device(si); 1374 return folio_test_swapcache(folio); 1375 } 1376 1377 /* Rotate the device and switch to a new cluster */ 1378 static void swap_alloc_slow(struct folio *folio) 1379 { 1380 struct swap_info_struct *si, *next; 1381 1382 spin_lock(&swap_avail_lock); 1383 start_over: 1384 plist_for_each_entry_safe(si, next, &swap_avail_head, avail_list) { 1385 /* Rotate the device and switch to a new cluster */ 1386 plist_requeue(&si->avail_list, &swap_avail_head); 1387 spin_unlock(&swap_avail_lock); 1388 if (get_swap_device_info(si)) { 1389 cluster_alloc_swap_entry(si, folio); 1390 put_swap_device(si); 1391 if (folio_test_swapcache(folio)) 1392 return; 1393 if (folio_test_large(folio)) 1394 return; 1395 } 1396 1397 spin_lock(&swap_avail_lock); 1398 /* 1399 * if we got here, it's likely that si was almost full before, 1400 * multiple callers probably all tried to get a page from the 1401 * same si and it filled up before we could get one; or, the si 1402 * filled up between us dropping swap_avail_lock. 1403 * Since we dropped the swap_avail_lock, the swap_avail_list 1404 * may have been modified; so if next is still in the 1405 * swap_avail_head list then try it, otherwise start over if we 1406 * have not gotten any slots. 1407 */ 1408 if (plist_node_empty(&next->avail_list)) 1409 goto start_over; 1410 } 1411 spin_unlock(&swap_avail_lock); 1412 } 1413 1414 /* 1415 * Discard pending clusters in a synchronized way when under high pressure. 1416 * Return: true if any cluster is discarded. 1417 */ 1418 static bool swap_sync_discard(void) 1419 { 1420 bool ret = false; 1421 struct swap_info_struct *si, *next; 1422 1423 spin_lock(&swap_lock); 1424 start_over: 1425 plist_for_each_entry_safe(si, next, &swap_active_head, list) { 1426 spin_unlock(&swap_lock); 1427 if (get_swap_device_info(si)) { 1428 if (si->flags & SWP_PAGE_DISCARD) 1429 ret = swap_do_scheduled_discard(si); 1430 put_swap_device(si); 1431 } 1432 if (ret) 1433 return true; 1434 1435 spin_lock(&swap_lock); 1436 if (plist_node_empty(&next->list)) 1437 goto start_over; 1438 } 1439 spin_unlock(&swap_lock); 1440 1441 return false; 1442 } 1443 1444 static int swap_extend_table_alloc(struct swap_info_struct *si, 1445 struct swap_cluster_info *ci, gfp_t gfp) 1446 { 1447 void *table; 1448 1449 table = kzalloc(sizeof(ci->extend_table[0]) * SWAPFILE_CLUSTER, gfp); 1450 if (!table) 1451 return -ENOMEM; 1452 1453 spin_lock(&ci->lock); 1454 if (!ci->extend_table) 1455 ci->extend_table = table; 1456 else 1457 kfree(table); 1458 spin_unlock(&ci->lock); 1459 return 0; 1460 } 1461 1462 int swap_retry_table_alloc(swp_entry_t entry, gfp_t gfp) 1463 { 1464 int ret; 1465 struct swap_info_struct *si; 1466 struct swap_cluster_info *ci; 1467 unsigned long offset = swp_offset(entry); 1468 1469 si = get_swap_device(entry); 1470 if (!si) 1471 return 0; 1472 1473 ci = __swap_offset_to_cluster(si, offset); 1474 ret = swap_extend_table_alloc(si, ci, gfp); 1475 1476 put_swap_device(si); 1477 return ret; 1478 } 1479 1480 static void swap_extend_table_try_free(struct swap_cluster_info *ci) 1481 { 1482 unsigned long i; 1483 bool can_free = true; 1484 1485 if (!ci->extend_table) 1486 return; 1487 1488 for (i = 0; i < SWAPFILE_CLUSTER; i++) { 1489 if (ci->extend_table[i]) 1490 can_free = false; 1491 } 1492 1493 if (can_free) { 1494 kfree(ci->extend_table); 1495 ci->extend_table = NULL; 1496 } 1497 } 1498 1499 /* Decrease the swap count of one slot, without freeing it */ 1500 static void __swap_cluster_put_entry(struct swap_cluster_info *ci, 1501 unsigned int ci_off) 1502 { 1503 int count; 1504 unsigned long swp_tb; 1505 1506 lockdep_assert_held(&ci->lock); 1507 swp_tb = __swap_table_get(ci, ci_off); 1508 count = __swp_tb_get_count(swp_tb); 1509 1510 VM_WARN_ON_ONCE(count <= 0); 1511 VM_WARN_ON_ONCE(count > SWP_TB_COUNT_MAX); 1512 1513 if (count == SWP_TB_COUNT_MAX) { 1514 count = ci->extend_table[ci_off]; 1515 /* Overflow starts with SWP_TB_COUNT_MAX */ 1516 VM_WARN_ON_ONCE(count < SWP_TB_COUNT_MAX); 1517 count--; 1518 if (count == (SWP_TB_COUNT_MAX - 1)) { 1519 ci->extend_table[ci_off] = 0; 1520 __swap_table_set(ci, ci_off, __swp_tb_mk_count(swp_tb, count)); 1521 swap_extend_table_try_free(ci); 1522 } else { 1523 ci->extend_table[ci_off] = count; 1524 } 1525 } else { 1526 __swap_table_set(ci, ci_off, __swp_tb_mk_count(swp_tb, --count)); 1527 } 1528 } 1529 1530 /** 1531 * swap_put_entries_cluster - Decrease the swap count of slots within one cluster 1532 * @si: The swap device. 1533 * @offset: start offset of slots. 1534 * @nr: number of slots. 1535 * @reclaim_cache: if true, also reclaim the swap cache if slots are freed. 1536 * 1537 * This helper decreases the swap count of a set of slots and tries to 1538 * batch free them. Also reclaims the swap cache if @reclaim_cache is true. 1539 * 1540 * Context: The specified slots must be pinned by existing swap count or swap 1541 * cache reference, so they won't be released until this helper returns. 1542 */ 1543 static void swap_put_entries_cluster(struct swap_info_struct *si, 1544 pgoff_t offset, int nr, 1545 bool reclaim_cache) 1546 { 1547 struct swap_cluster_info *ci; 1548 unsigned int ci_off, ci_end; 1549 pgoff_t end = offset + nr; 1550 bool need_reclaim = false; 1551 unsigned int nr_reclaimed; 1552 unsigned long swp_tb; 1553 int ci_batch = -1; 1554 1555 ci = swap_cluster_lock(si, offset); 1556 ci_off = offset % SWAPFILE_CLUSTER; 1557 ci_end = ci_off + nr; 1558 do { 1559 swp_tb = __swap_table_get(ci, ci_off); 1560 if (swp_tb_get_count(swp_tb) == 1) { 1561 /* count == 1 and non-cached slots will be batch freed. */ 1562 if (!swp_tb_is_folio(swp_tb)) { 1563 if (ci_batch == -1) 1564 ci_batch = ci_off; 1565 continue; 1566 } 1567 /* count will be 0 after put, slot can be reclaimed */ 1568 need_reclaim = true; 1569 } 1570 /* 1571 * A count != 1 or cached slot can't be freed. Put its swap 1572 * count and then free the interrupted pending batch. Cached 1573 * slots will be freed when folio is removed from swap cache 1574 * (__swap_cache_del_folio). 1575 */ 1576 __swap_cluster_put_entry(ci, ci_off); 1577 if (ci_batch != -1) { 1578 __swap_cluster_free_entries(si, ci, ci_batch, ci_off - ci_batch); 1579 ci_batch = -1; 1580 } 1581 } while (++ci_off < ci_end); 1582 1583 if (ci_batch != -1) 1584 __swap_cluster_free_entries(si, ci, ci_batch, ci_off - ci_batch); 1585 swap_cluster_unlock(ci); 1586 1587 if (!need_reclaim || !reclaim_cache) 1588 return; 1589 1590 do { 1591 nr_reclaimed = __try_to_reclaim_swap(si, offset, 1592 TTRS_UNMAPPED | TTRS_FULL); 1593 offset++; 1594 if (nr_reclaimed) 1595 offset = round_up(offset, abs(nr_reclaimed)); 1596 } while (offset < end); 1597 } 1598 1599 /* Increase the swap count of one slot. */ 1600 static int __swap_cluster_dup_entry(struct swap_cluster_info *ci, 1601 unsigned int ci_off) 1602 { 1603 int count; 1604 unsigned long swp_tb; 1605 1606 lockdep_assert_held(&ci->lock); 1607 swp_tb = __swap_table_get(ci, ci_off); 1608 /* Bad or special slots can't be handled */ 1609 if (WARN_ON_ONCE(swp_tb_is_bad(swp_tb))) 1610 return -EINVAL; 1611 count = __swp_tb_get_count(swp_tb); 1612 /* Must be either cached or have a count already */ 1613 if (WARN_ON_ONCE(!count && !swp_tb_is_folio(swp_tb))) 1614 return -ENOENT; 1615 1616 if (likely(count < (SWP_TB_COUNT_MAX - 1))) { 1617 __swap_table_set(ci, ci_off, __swp_tb_mk_count(swp_tb, count + 1)); 1618 VM_WARN_ON_ONCE(ci->extend_table && ci->extend_table[ci_off]); 1619 } else if (count == (SWP_TB_COUNT_MAX - 1)) { 1620 if (ci->extend_table) { 1621 VM_WARN_ON_ONCE(ci->extend_table[ci_off]); 1622 ci->extend_table[ci_off] = SWP_TB_COUNT_MAX; 1623 __swap_table_set(ci, ci_off, __swp_tb_mk_count(swp_tb, SWP_TB_COUNT_MAX)); 1624 } else { 1625 return -ENOMEM; 1626 } 1627 } else if (count == SWP_TB_COUNT_MAX) { 1628 VM_WARN_ON_ONCE(ci->extend_table[ci_off] >= 1629 type_max(typeof(ci->extend_table[0]))); 1630 ++ci->extend_table[ci_off]; 1631 } else { 1632 /* Never happens unless counting went wrong */ 1633 WARN_ON_ONCE(1); 1634 } 1635 1636 return 0; 1637 } 1638 1639 /** 1640 * swap_dup_entries_cluster: Increase the swap count of slots within one cluster. 1641 * @si: The swap device. 1642 * @offset: start offset of slots. 1643 * @nr: number of slots. 1644 * 1645 * Context: The specified slots must be pinned by existing swap count or swap 1646 * cache reference, so they won't be released until this helper returns. 1647 * Return: 0 on success. -ENOMEM if the swap count maxed out (SWP_TB_COUNT_MAX) 1648 * and failed to allocate an extended table, -EINVAL if any entry is bad entry. 1649 */ 1650 static int swap_dup_entries_cluster(struct swap_info_struct *si, 1651 pgoff_t offset, int nr) 1652 { 1653 int err; 1654 struct swap_cluster_info *ci; 1655 unsigned int ci_start, ci_off, ci_end; 1656 1657 ci_start = offset % SWAPFILE_CLUSTER; 1658 ci_end = ci_start + nr; 1659 ci_off = ci_start; 1660 ci = swap_cluster_lock(si, offset); 1661 restart: 1662 do { 1663 err = __swap_cluster_dup_entry(ci, ci_off); 1664 if (unlikely(err)) { 1665 if (err == -ENOMEM) { 1666 spin_unlock(&ci->lock); 1667 err = swap_extend_table_alloc(si, ci, GFP_ATOMIC); 1668 spin_lock(&ci->lock); 1669 if (!err) 1670 goto restart; 1671 } 1672 goto failed; 1673 } 1674 } while (++ci_off < ci_end); 1675 swap_cluster_unlock(ci); 1676 return 0; 1677 failed: 1678 while (ci_off-- > ci_start) 1679 __swap_cluster_put_entry(ci, ci_off); 1680 swap_extend_table_try_free(ci); 1681 swap_cluster_unlock(ci); 1682 return err; 1683 } 1684 1685 /** 1686 * folio_alloc_swap - allocate swap space for a folio 1687 * @folio: folio we want to move to swap 1688 * 1689 * Allocate swap space for the folio and add the folio to the 1690 * swap cache. 1691 * 1692 * Context: Caller needs to hold the folio lock. 1693 * Return: Whether the folio was added to the swap cache. 1694 */ 1695 int folio_alloc_swap(struct folio *folio) 1696 { 1697 unsigned int order = folio_order(folio); 1698 unsigned int size = 1 << order; 1699 1700 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 1701 VM_BUG_ON_FOLIO(!folio_test_uptodate(folio), folio); 1702 1703 if (order) { 1704 /* 1705 * Reject large allocation when THP_SWAP is disabled, 1706 * the caller should split the folio and try again. 1707 */ 1708 if (!IS_ENABLED(CONFIG_THP_SWAP)) 1709 return -EAGAIN; 1710 1711 /* 1712 * Allocation size should never exceed cluster size 1713 * (HPAGE_PMD_SIZE). 1714 */ 1715 if (size > SWAPFILE_CLUSTER) { 1716 VM_WARN_ON_ONCE(1); 1717 return -EINVAL; 1718 } 1719 } 1720 1721 again: 1722 local_lock(&percpu_swap_cluster.lock); 1723 if (!swap_alloc_fast(folio)) 1724 swap_alloc_slow(folio); 1725 local_unlock(&percpu_swap_cluster.lock); 1726 1727 if (!order && unlikely(!folio_test_swapcache(folio))) { 1728 if (swap_sync_discard()) 1729 goto again; 1730 } 1731 1732 /* Need to call this even if allocation failed, for MEMCG_SWAP_FAIL. */ 1733 if (unlikely(mem_cgroup_try_charge_swap(folio, folio->swap))) 1734 swap_cache_del_folio(folio); 1735 1736 if (unlikely(!folio_test_swapcache(folio))) 1737 return -ENOMEM; 1738 1739 return 0; 1740 } 1741 1742 /** 1743 * folio_dup_swap() - Increase swap count of swap entries of a folio. 1744 * @folio: folio with swap entries bounded. 1745 * @subpage: if not NULL, only increase the swap count of this subpage. 1746 * 1747 * Typically called when the folio is unmapped and have its swap entry to 1748 * take its place: Swap entries allocated to a folio has count == 0 and pinned 1749 * by swap cache. The swap cache pin doesn't increase the swap count. This 1750 * helper sets the initial count == 1 and increases the count as the folio is 1751 * unmapped and swap entries referencing the slots are generated to replace 1752 * the folio. 1753 * 1754 * Context: Caller must ensure the folio is locked and in the swap cache. 1755 * NOTE: The caller also has to ensure there is no raced call to 1756 * swap_put_entries_direct on its swap entry before this helper returns, or 1757 * the swap count may underflow. 1758 */ 1759 int folio_dup_swap(struct folio *folio, struct page *subpage) 1760 { 1761 swp_entry_t entry = folio->swap; 1762 unsigned long nr_pages = folio_nr_pages(folio); 1763 1764 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); 1765 VM_WARN_ON_FOLIO(!folio_test_swapcache(folio), folio); 1766 1767 if (subpage) { 1768 entry.val += folio_page_idx(folio, subpage); 1769 nr_pages = 1; 1770 } 1771 1772 return swap_dup_entries_cluster(swap_entry_to_info(entry), 1773 swp_offset(entry), nr_pages); 1774 } 1775 1776 /** 1777 * folio_put_swap() - Decrease swap count of swap entries of a folio. 1778 * @folio: folio with swap entries bounded, must be in swap cache and locked. 1779 * @subpage: if not NULL, only decrease the swap count of this subpage. 1780 * 1781 * This won't free the swap slots even if swap count drops to zero, they are 1782 * still pinned by the swap cache. User may call folio_free_swap to free them. 1783 * Context: Caller must ensure the folio is locked and in the swap cache. 1784 */ 1785 void folio_put_swap(struct folio *folio, struct page *subpage) 1786 { 1787 swp_entry_t entry = folio->swap; 1788 unsigned long nr_pages = folio_nr_pages(folio); 1789 struct swap_info_struct *si = __swap_entry_to_info(entry); 1790 1791 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); 1792 VM_WARN_ON_FOLIO(!folio_test_swapcache(folio), folio); 1793 1794 if (subpage) { 1795 entry.val += folio_page_idx(folio, subpage); 1796 nr_pages = 1; 1797 } 1798 1799 swap_put_entries_cluster(si, swp_offset(entry), nr_pages, false); 1800 } 1801 1802 /* 1803 * When we get a swap entry, if there aren't some other ways to 1804 * prevent swapoff, such as the folio in swap cache is locked, RCU 1805 * reader side is locked, etc., the swap entry may become invalid 1806 * because of swapoff. Then, we need to enclose all swap related 1807 * functions with get_swap_device() and put_swap_device(), unless the 1808 * swap functions call get/put_swap_device() by themselves. 1809 * 1810 * RCU reader side lock (including any spinlock) is sufficient to 1811 * prevent swapoff, because synchronize_rcu() is called in swapoff() 1812 * before freeing data structures. 1813 * 1814 * Check whether swap entry is valid in the swap device. If so, 1815 * return pointer to swap_info_struct, and keep the swap entry valid 1816 * via preventing the swap device from being swapoff, until 1817 * put_swap_device() is called. Otherwise return NULL. 1818 * 1819 * Notice that swapoff or swapoff+swapon can still happen before the 1820 * percpu_ref_tryget_live() in get_swap_device() or after the 1821 * percpu_ref_put() in put_swap_device() if there isn't any other way 1822 * to prevent swapoff. The caller must be prepared for that. For 1823 * example, the following situation is possible. 1824 * 1825 * CPU1 CPU2 1826 * do_swap_page() 1827 * ... swapoff+swapon 1828 * swap_cache_alloc_folio() 1829 * swap_cache_add_folio() 1830 * // check swap_map 1831 * // verify PTE not changed 1832 * 1833 * In __swap_duplicate(), the swap_map need to be checked before 1834 * changing partly because the specified swap entry may be for another 1835 * swap device which has been swapoff. And in do_swap_page(), after 1836 * the page is read from the swap device, the PTE is verified not 1837 * changed with the page table locked to check whether the swap device 1838 * has been swapoff or swapoff+swapon. 1839 */ 1840 struct swap_info_struct *get_swap_device(swp_entry_t entry) 1841 { 1842 struct swap_info_struct *si; 1843 unsigned long offset; 1844 1845 if (!entry.val) 1846 goto out; 1847 si = swap_entry_to_info(entry); 1848 if (!si) 1849 goto bad_nofile; 1850 if (!get_swap_device_info(si)) 1851 goto out; 1852 offset = swp_offset(entry); 1853 if (offset >= si->max) 1854 goto put_out; 1855 1856 return si; 1857 bad_nofile: 1858 pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val); 1859 out: 1860 return NULL; 1861 put_out: 1862 pr_err("%s: %s%08lx\n", __func__, Bad_offset, entry.val); 1863 percpu_ref_put(&si->users); 1864 return NULL; 1865 } 1866 1867 /* 1868 * Free a set of swap slots after their swap count dropped to zero, or will be 1869 * zero after putting the last ref (saves one __swap_cluster_put_entry call). 1870 */ 1871 void __swap_cluster_free_entries(struct swap_info_struct *si, 1872 struct swap_cluster_info *ci, 1873 unsigned int ci_start, unsigned int nr_pages) 1874 { 1875 unsigned long old_tb; 1876 unsigned int ci_off = ci_start, ci_end = ci_start + nr_pages; 1877 unsigned long offset = cluster_offset(si, ci) + ci_start; 1878 1879 VM_WARN_ON(ci->count < nr_pages); 1880 1881 ci->count -= nr_pages; 1882 do { 1883 old_tb = __swap_table_get(ci, ci_off); 1884 /* Release the last ref, or after swap cache is dropped */ 1885 VM_WARN_ON(!swp_tb_is_shadow(old_tb) || __swp_tb_get_count(old_tb) > 1); 1886 __swap_table_set(ci, ci_off, null_to_swp_tb()); 1887 } while (++ci_off < ci_end); 1888 1889 mem_cgroup_uncharge_swap(swp_entry(si->type, offset), nr_pages); 1890 swap_range_free(si, offset, nr_pages); 1891 swap_cluster_assert_empty(ci, ci_start, nr_pages, false); 1892 1893 if (!ci->count) 1894 free_cluster(si, ci); 1895 else 1896 partial_free_cluster(si, ci); 1897 } 1898 1899 int __swap_count(swp_entry_t entry) 1900 { 1901 struct swap_cluster_info *ci = __swap_entry_to_cluster(entry); 1902 unsigned int ci_off = swp_cluster_offset(entry); 1903 1904 return swp_tb_get_count(__swap_table_get(ci, ci_off)); 1905 } 1906 1907 /** 1908 * swap_entry_swapped - Check if the swap entry is swapped. 1909 * @si: the swap device. 1910 * @entry: the swap entry. 1911 */ 1912 bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry) 1913 { 1914 pgoff_t offset = swp_offset(entry); 1915 struct swap_cluster_info *ci; 1916 unsigned long swp_tb; 1917 1918 ci = swap_cluster_lock(si, offset); 1919 swp_tb = swap_table_get(ci, offset % SWAPFILE_CLUSTER); 1920 swap_cluster_unlock(ci); 1921 1922 return swp_tb_get_count(swp_tb) > 0; 1923 } 1924 1925 /* 1926 * How many references to @entry are currently swapped out? 1927 * This returns exact answer. 1928 */ 1929 int swp_swapcount(swp_entry_t entry) 1930 { 1931 struct swap_info_struct *si; 1932 struct swap_cluster_info *ci; 1933 unsigned long swp_tb; 1934 int count; 1935 1936 si = get_swap_device(entry); 1937 if (!si) 1938 return 0; 1939 1940 ci = swap_cluster_lock(si, swp_offset(entry)); 1941 swp_tb = __swap_table_get(ci, swp_cluster_offset(entry)); 1942 count = swp_tb_get_count(swp_tb); 1943 if (count == SWP_TB_COUNT_MAX) 1944 count = ci->extend_table[swp_cluster_offset(entry)]; 1945 swap_cluster_unlock(ci); 1946 put_swap_device(si); 1947 1948 return count < 0 ? 0 : count; 1949 } 1950 1951 /* 1952 * folio_maybe_swapped - Test if a folio covers any swap slot with count > 0. 1953 * 1954 * Check if a folio is swapped. Holding the folio lock ensures the folio won't 1955 * go from not-swapped to swapped because the initial swap count increment can 1956 * only be done by folio_dup_swap, which also locks the folio. But a concurrent 1957 * decrease of swap count is possible through swap_put_entries_direct, so this 1958 * may return a false positive. 1959 * 1960 * Context: Caller must ensure the folio is locked and in the swap cache. 1961 */ 1962 static bool folio_maybe_swapped(struct folio *folio) 1963 { 1964 swp_entry_t entry = folio->swap; 1965 struct swap_cluster_info *ci; 1966 unsigned int ci_off, ci_end; 1967 bool ret = false; 1968 1969 VM_WARN_ON_ONCE_FOLIO(!folio_test_locked(folio), folio); 1970 VM_WARN_ON_ONCE_FOLIO(!folio_test_swapcache(folio), folio); 1971 1972 ci = __swap_entry_to_cluster(entry); 1973 ci_off = swp_cluster_offset(entry); 1974 ci_end = ci_off + folio_nr_pages(folio); 1975 /* 1976 * Extra locking not needed, folio lock ensures its swap entries 1977 * won't be released, the backing data won't be gone either. 1978 */ 1979 rcu_read_lock(); 1980 do { 1981 if (__swp_tb_get_count(__swap_table_get(ci, ci_off))) { 1982 ret = true; 1983 break; 1984 } 1985 } while (++ci_off < ci_end); 1986 rcu_read_unlock(); 1987 1988 return ret; 1989 } 1990 1991 static bool folio_swapcache_freeable(struct folio *folio) 1992 { 1993 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio); 1994 1995 if (!folio_test_swapcache(folio)) 1996 return false; 1997 if (folio_test_writeback(folio)) 1998 return false; 1999 2000 /* 2001 * Once hibernation has begun to create its image of memory, 2002 * there's a danger that one of the calls to folio_free_swap() 2003 * - most probably a call from __try_to_reclaim_swap() while 2004 * hibernation is allocating its own swap pages for the image, 2005 * but conceivably even a call from memory reclaim - will free 2006 * the swap from a folio which has already been recorded in the 2007 * image as a clean swapcache folio, and then reuse its swap for 2008 * another page of the image. On waking from hibernation, the 2009 * original folio might be freed under memory pressure, then 2010 * later read back in from swap, now with the wrong data. 2011 * 2012 * Hibernation suspends storage while it is writing the image 2013 * to disk so check that here. 2014 */ 2015 if (pm_suspended_storage()) 2016 return false; 2017 2018 return true; 2019 } 2020 2021 /** 2022 * folio_free_swap() - Free the swap space used for this folio. 2023 * @folio: The folio to remove. 2024 * 2025 * If swap is getting full, or if there are no more mappings of this folio, 2026 * then call folio_free_swap to free its swap space. 2027 * 2028 * Return: true if we were able to release the swap space. 2029 */ 2030 bool folio_free_swap(struct folio *folio) 2031 { 2032 if (!folio_swapcache_freeable(folio)) 2033 return false; 2034 if (folio_maybe_swapped(folio)) 2035 return false; 2036 2037 swap_cache_del_folio(folio); 2038 folio_set_dirty(folio); 2039 return true; 2040 } 2041 2042 /** 2043 * swap_put_entries_direct() - Release reference on range of swap entries and 2044 * reclaim their cache if no more references remain. 2045 * @entry: First entry of range. 2046 * @nr: Number of entries in range. 2047 * 2048 * For each swap entry in the contiguous range, release a reference. If any swap 2049 * entries become free, try to reclaim their underlying folios, if present. The 2050 * offset range is defined by [entry.offset, entry.offset + nr). 2051 * 2052 * Context: Caller must ensure there is no race condition on the reference 2053 * owner. e.g., locking the PTL of a PTE containing the entry being released. 2054 */ 2055 void swap_put_entries_direct(swp_entry_t entry, int nr) 2056 { 2057 const unsigned long start_offset = swp_offset(entry); 2058 const unsigned long end_offset = start_offset + nr; 2059 unsigned long offset, cluster_end; 2060 struct swap_info_struct *si; 2061 2062 si = get_swap_device(entry); 2063 if (WARN_ON_ONCE(!si)) 2064 return; 2065 if (WARN_ON_ONCE(end_offset > si->max)) 2066 goto out; 2067 2068 /* Put entries and reclaim cache in each cluster */ 2069 offset = start_offset; 2070 do { 2071 cluster_end = min(round_up(offset + 1, SWAPFILE_CLUSTER), end_offset); 2072 swap_put_entries_cluster(si, offset, cluster_end - offset, true); 2073 offset = cluster_end; 2074 } while (offset < end_offset); 2075 out: 2076 put_swap_device(si); 2077 } 2078 2079 #ifdef CONFIG_HIBERNATION 2080 /* Allocate a slot for hibernation */ 2081 swp_entry_t swap_alloc_hibernation_slot(int type) 2082 { 2083 struct swap_info_struct *pcp_si, *si = swap_type_to_info(type); 2084 unsigned long pcp_offset, offset = SWAP_ENTRY_INVALID; 2085 struct swap_cluster_info *ci; 2086 swp_entry_t entry = {0}; 2087 2088 if (!si) 2089 goto fail; 2090 2091 /* This is called for allocating swap entry, not cache */ 2092 if (get_swap_device_info(si)) { 2093 if (si->flags & SWP_WRITEOK) { 2094 /* 2095 * Try the local cluster first if it matches the device. If 2096 * not, try grab a new cluster and override local cluster. 2097 */ 2098 local_lock(&percpu_swap_cluster.lock); 2099 pcp_si = this_cpu_read(percpu_swap_cluster.si[0]); 2100 pcp_offset = this_cpu_read(percpu_swap_cluster.offset[0]); 2101 if (pcp_si == si && pcp_offset) { 2102 ci = swap_cluster_lock(si, pcp_offset); 2103 if (cluster_is_usable(ci, 0)) 2104 offset = alloc_swap_scan_cluster(si, ci, NULL, pcp_offset); 2105 else 2106 swap_cluster_unlock(ci); 2107 } 2108 if (!offset) 2109 offset = cluster_alloc_swap_entry(si, NULL); 2110 local_unlock(&percpu_swap_cluster.lock); 2111 if (offset) 2112 entry = swp_entry(si->type, offset); 2113 } 2114 put_swap_device(si); 2115 } 2116 fail: 2117 return entry; 2118 } 2119 2120 /* Free a slot allocated by swap_alloc_hibernation_slot */ 2121 void swap_free_hibernation_slot(swp_entry_t entry) 2122 { 2123 struct swap_info_struct *si; 2124 struct swap_cluster_info *ci; 2125 pgoff_t offset = swp_offset(entry); 2126 2127 si = get_swap_device(entry); 2128 if (WARN_ON(!si)) 2129 return; 2130 2131 ci = swap_cluster_lock(si, offset); 2132 __swap_cluster_put_entry(ci, offset % SWAPFILE_CLUSTER); 2133 __swap_cluster_free_entries(si, ci, offset % SWAPFILE_CLUSTER, 1); 2134 swap_cluster_unlock(ci); 2135 2136 /* In theory readahead might add it to the swap cache by accident */ 2137 __try_to_reclaim_swap(si, offset, TTRS_ANYWAY); 2138 put_swap_device(si); 2139 } 2140 2141 /* 2142 * Find the swap type that corresponds to given device (if any). 2143 * 2144 * @offset - number of the PAGE_SIZE-sized block of the device, starting 2145 * from 0, in which the swap header is expected to be located. 2146 * 2147 * This is needed for the suspend to disk (aka swsusp). 2148 */ 2149 int swap_type_of(dev_t device, sector_t offset) 2150 { 2151 int type; 2152 2153 if (!device) 2154 return -1; 2155 2156 spin_lock(&swap_lock); 2157 for (type = 0; type < nr_swapfiles; type++) { 2158 struct swap_info_struct *sis = swap_info[type]; 2159 2160 if (!(sis->flags & SWP_WRITEOK)) 2161 continue; 2162 2163 if (device == sis->bdev->bd_dev) { 2164 struct swap_extent *se = first_se(sis); 2165 2166 if (se->start_block == offset) { 2167 spin_unlock(&swap_lock); 2168 return type; 2169 } 2170 } 2171 } 2172 spin_unlock(&swap_lock); 2173 return -ENODEV; 2174 } 2175 2176 int find_first_swap(dev_t *device) 2177 { 2178 int type; 2179 2180 spin_lock(&swap_lock); 2181 for (type = 0; type < nr_swapfiles; type++) { 2182 struct swap_info_struct *sis = swap_info[type]; 2183 2184 if (!(sis->flags & SWP_WRITEOK)) 2185 continue; 2186 *device = sis->bdev->bd_dev; 2187 spin_unlock(&swap_lock); 2188 return type; 2189 } 2190 spin_unlock(&swap_lock); 2191 return -ENODEV; 2192 } 2193 2194 /* 2195 * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev 2196 * corresponding to given index in swap_info (swap type). 2197 */ 2198 sector_t swapdev_block(int type, pgoff_t offset) 2199 { 2200 struct swap_info_struct *si = swap_type_to_info(type); 2201 struct swap_extent *se; 2202 2203 if (!si || !(si->flags & SWP_WRITEOK)) 2204 return 0; 2205 se = offset_to_swap_extent(si, offset); 2206 return se->start_block + (offset - se->start_page); 2207 } 2208 2209 /* 2210 * Return either the total number of swap pages of given type, or the number 2211 * of free pages of that type (depending on @free) 2212 * 2213 * This is needed for software suspend 2214 */ 2215 unsigned int count_swap_pages(int type, int free) 2216 { 2217 unsigned int n = 0; 2218 2219 spin_lock(&swap_lock); 2220 if ((unsigned int)type < nr_swapfiles) { 2221 struct swap_info_struct *sis = swap_info[type]; 2222 2223 spin_lock(&sis->lock); 2224 if (sis->flags & SWP_WRITEOK) { 2225 n = sis->pages; 2226 if (free) 2227 n -= swap_usage_in_pages(sis); 2228 } 2229 spin_unlock(&sis->lock); 2230 } 2231 spin_unlock(&swap_lock); 2232 return n; 2233 } 2234 #endif /* CONFIG_HIBERNATION */ 2235 2236 static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte) 2237 { 2238 return pte_same(pte_swp_clear_flags(pte), swp_pte); 2239 } 2240 2241 /* 2242 * No need to decide whether this PTE shares the swap entry with others, 2243 * just let do_wp_page work it out if a write is requested later - to 2244 * force COW, vm_page_prot omits write permission from any private vma. 2245 */ 2246 static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd, 2247 unsigned long addr, swp_entry_t entry, struct folio *folio) 2248 { 2249 struct page *page; 2250 struct folio *swapcache; 2251 spinlock_t *ptl; 2252 pte_t *pte, new_pte, old_pte; 2253 bool hwpoisoned = false; 2254 int ret = 1; 2255 2256 /* 2257 * If the folio is removed from swap cache by others, continue to 2258 * unuse other PTEs. try_to_unuse may try again if we missed this one. 2259 */ 2260 if (!folio_matches_swap_entry(folio, entry)) 2261 return 0; 2262 2263 swapcache = folio; 2264 folio = ksm_might_need_to_copy(folio, vma, addr); 2265 if (unlikely(!folio)) 2266 return -ENOMEM; 2267 else if (unlikely(folio == ERR_PTR(-EHWPOISON))) { 2268 hwpoisoned = true; 2269 folio = swapcache; 2270 } 2271 2272 page = folio_file_page(folio, swp_offset(entry)); 2273 if (PageHWPoison(page)) 2274 hwpoisoned = true; 2275 2276 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); 2277 if (unlikely(!pte || !pte_same_as_swp(ptep_get(pte), 2278 swp_entry_to_pte(entry)))) { 2279 ret = 0; 2280 goto out; 2281 } 2282 2283 old_pte = ptep_get(pte); 2284 2285 if (unlikely(hwpoisoned || !folio_test_uptodate(folio))) { 2286 swp_entry_t swp_entry; 2287 2288 dec_mm_counter(vma->vm_mm, MM_SWAPENTS); 2289 if (hwpoisoned) { 2290 swp_entry = make_hwpoison_entry(page); 2291 } else { 2292 swp_entry = make_poisoned_swp_entry(); 2293 } 2294 new_pte = swp_entry_to_pte(swp_entry); 2295 ret = 0; 2296 goto setpte; 2297 } 2298 2299 /* 2300 * Some architectures may have to restore extra metadata to the page 2301 * when reading from swap. This metadata may be indexed by swap entry 2302 * so this must be called before folio_put_swap(). 2303 */ 2304 arch_swap_restore(folio_swap(entry, folio), folio); 2305 2306 dec_mm_counter(vma->vm_mm, MM_SWAPENTS); 2307 inc_mm_counter(vma->vm_mm, MM_ANONPAGES); 2308 folio_get(folio); 2309 if (folio == swapcache) { 2310 rmap_t rmap_flags = RMAP_NONE; 2311 2312 /* 2313 * See do_swap_page(): writeback would be problematic. 2314 * However, we do a folio_wait_writeback() just before this 2315 * call and have the folio locked. 2316 */ 2317 VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio); 2318 if (pte_swp_exclusive(old_pte)) 2319 rmap_flags |= RMAP_EXCLUSIVE; 2320 /* 2321 * We currently only expect small !anon folios, which are either 2322 * fully exclusive or fully shared. If we ever get large folios 2323 * here, we have to be careful. 2324 */ 2325 if (!folio_test_anon(folio)) { 2326 VM_WARN_ON_ONCE(folio_test_large(folio)); 2327 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio); 2328 folio_add_new_anon_rmap(folio, vma, addr, rmap_flags); 2329 } else { 2330 folio_add_anon_rmap_pte(folio, page, vma, addr, rmap_flags); 2331 } 2332 } else { /* ksm created a completely new copy */ 2333 folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE); 2334 folio_add_lru_vma(folio, vma); 2335 } 2336 new_pte = pte_mkold(mk_pte(page, vma->vm_page_prot)); 2337 if (pte_swp_soft_dirty(old_pte)) 2338 new_pte = pte_mksoft_dirty(new_pte); 2339 if (pte_swp_uffd_wp(old_pte)) 2340 new_pte = pte_mkuffd_wp(new_pte); 2341 setpte: 2342 set_pte_at(vma->vm_mm, addr, pte, new_pte); 2343 folio_put_swap(swapcache, folio_file_page(swapcache, swp_offset(entry))); 2344 out: 2345 if (pte) 2346 pte_unmap_unlock(pte, ptl); 2347 if (folio != swapcache) { 2348 folio_unlock(folio); 2349 folio_put(folio); 2350 } 2351 return ret; 2352 } 2353 2354 static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd, 2355 unsigned long addr, unsigned long end, 2356 unsigned int type) 2357 { 2358 pte_t *pte = NULL; 2359 2360 do { 2361 struct folio *folio; 2362 unsigned long swp_tb; 2363 softleaf_t entry; 2364 int ret; 2365 pte_t ptent; 2366 2367 if (!pte++) { 2368 pte = pte_offset_map(pmd, addr); 2369 if (!pte) 2370 break; 2371 } 2372 2373 ptent = ptep_get_lockless(pte); 2374 entry = softleaf_from_pte(ptent); 2375 2376 if (!softleaf_is_swap(entry)) 2377 continue; 2378 if (swp_type(entry) != type) 2379 continue; 2380 2381 pte_unmap(pte); 2382 pte = NULL; 2383 2384 folio = swap_cache_get_folio(entry); 2385 if (!folio) { 2386 struct vm_fault vmf = { 2387 .vma = vma, 2388 .address = addr, 2389 .real_address = addr, 2390 .pmd = pmd, 2391 }; 2392 2393 folio = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE, 2394 &vmf); 2395 } 2396 if (!folio) { 2397 swp_tb = swap_table_get(__swap_entry_to_cluster(entry), 2398 swp_cluster_offset(entry)); 2399 if (swp_tb_get_count(swp_tb) <= 0) 2400 continue; 2401 return -ENOMEM; 2402 } 2403 2404 folio_lock(folio); 2405 folio_wait_writeback(folio); 2406 ret = unuse_pte(vma, pmd, addr, entry, folio); 2407 if (ret < 0) { 2408 folio_unlock(folio); 2409 folio_put(folio); 2410 return ret; 2411 } 2412 2413 folio_free_swap(folio); 2414 folio_unlock(folio); 2415 folio_put(folio); 2416 } while (addr += PAGE_SIZE, addr != end); 2417 2418 if (pte) 2419 pte_unmap(pte); 2420 return 0; 2421 } 2422 2423 static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud, 2424 unsigned long addr, unsigned long end, 2425 unsigned int type) 2426 { 2427 pmd_t *pmd; 2428 unsigned long next; 2429 int ret; 2430 2431 pmd = pmd_offset(pud, addr); 2432 do { 2433 cond_resched(); 2434 next = pmd_addr_end(addr, end); 2435 ret = unuse_pte_range(vma, pmd, addr, next, type); 2436 if (ret) 2437 return ret; 2438 } while (pmd++, addr = next, addr != end); 2439 return 0; 2440 } 2441 2442 static inline int unuse_pud_range(struct vm_area_struct *vma, p4d_t *p4d, 2443 unsigned long addr, unsigned long end, 2444 unsigned int type) 2445 { 2446 pud_t *pud; 2447 unsigned long next; 2448 int ret; 2449 2450 pud = pud_offset(p4d, addr); 2451 do { 2452 next = pud_addr_end(addr, end); 2453 if (pud_none_or_clear_bad(pud)) 2454 continue; 2455 ret = unuse_pmd_range(vma, pud, addr, next, type); 2456 if (ret) 2457 return ret; 2458 } while (pud++, addr = next, addr != end); 2459 return 0; 2460 } 2461 2462 static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd, 2463 unsigned long addr, unsigned long end, 2464 unsigned int type) 2465 { 2466 p4d_t *p4d; 2467 unsigned long next; 2468 int ret; 2469 2470 p4d = p4d_offset(pgd, addr); 2471 do { 2472 next = p4d_addr_end(addr, end); 2473 if (p4d_none_or_clear_bad(p4d)) 2474 continue; 2475 ret = unuse_pud_range(vma, p4d, addr, next, type); 2476 if (ret) 2477 return ret; 2478 } while (p4d++, addr = next, addr != end); 2479 return 0; 2480 } 2481 2482 static int unuse_vma(struct vm_area_struct *vma, unsigned int type) 2483 { 2484 pgd_t *pgd; 2485 unsigned long addr, end, next; 2486 int ret; 2487 2488 addr = vma->vm_start; 2489 end = vma->vm_end; 2490 2491 pgd = pgd_offset(vma->vm_mm, addr); 2492 do { 2493 next = pgd_addr_end(addr, end); 2494 if (pgd_none_or_clear_bad(pgd)) 2495 continue; 2496 ret = unuse_p4d_range(vma, pgd, addr, next, type); 2497 if (ret) 2498 return ret; 2499 } while (pgd++, addr = next, addr != end); 2500 return 0; 2501 } 2502 2503 static int unuse_mm(struct mm_struct *mm, unsigned int type) 2504 { 2505 struct vm_area_struct *vma; 2506 int ret = 0; 2507 VMA_ITERATOR(vmi, mm, 0); 2508 2509 mmap_read_lock(mm); 2510 if (check_stable_address_space(mm)) 2511 goto unlock; 2512 for_each_vma(vmi, vma) { 2513 if (vma->anon_vma && !is_vm_hugetlb_page(vma)) { 2514 ret = unuse_vma(vma, type); 2515 if (ret) 2516 break; 2517 } 2518 2519 cond_resched(); 2520 } 2521 unlock: 2522 mmap_read_unlock(mm); 2523 return ret; 2524 } 2525 2526 /* 2527 * Scan swap table from current position to next entry still in use. 2528 * Return 0 if there are no inuse entries after prev till end of 2529 * the map. 2530 */ 2531 static unsigned int find_next_to_unuse(struct swap_info_struct *si, 2532 unsigned int prev) 2533 { 2534 unsigned int i; 2535 unsigned long swp_tb; 2536 2537 /* 2538 * No need for swap_lock here: we're just looking 2539 * for whether an entry is in use, not modifying it; false 2540 * hits are okay, and sys_swapoff() has already prevented new 2541 * allocations from this area (while holding swap_lock). 2542 */ 2543 for (i = prev + 1; i < si->max; i++) { 2544 swp_tb = swap_table_get(__swap_offset_to_cluster(si, i), 2545 i % SWAPFILE_CLUSTER); 2546 if (!swp_tb_is_null(swp_tb) && !swp_tb_is_bad(swp_tb)) 2547 break; 2548 if ((i % LATENCY_LIMIT) == 0) 2549 cond_resched(); 2550 } 2551 2552 if (i == si->max) 2553 i = 0; 2554 2555 return i; 2556 } 2557 2558 static int try_to_unuse(unsigned int type) 2559 { 2560 struct mm_struct *prev_mm; 2561 struct mm_struct *mm; 2562 struct list_head *p; 2563 int retval = 0; 2564 struct swap_info_struct *si = swap_info[type]; 2565 struct folio *folio; 2566 swp_entry_t entry; 2567 unsigned int i; 2568 2569 if (!swap_usage_in_pages(si)) 2570 goto success; 2571 2572 retry: 2573 retval = shmem_unuse(type); 2574 if (retval) 2575 return retval; 2576 2577 prev_mm = &init_mm; 2578 mmget(prev_mm); 2579 2580 spin_lock(&mmlist_lock); 2581 p = &init_mm.mmlist; 2582 while (swap_usage_in_pages(si) && 2583 !signal_pending(current) && 2584 (p = p->next) != &init_mm.mmlist) { 2585 2586 mm = list_entry(p, struct mm_struct, mmlist); 2587 if (!mmget_not_zero(mm)) 2588 continue; 2589 spin_unlock(&mmlist_lock); 2590 mmput(prev_mm); 2591 prev_mm = mm; 2592 retval = unuse_mm(mm, type); 2593 if (retval) { 2594 mmput(prev_mm); 2595 return retval; 2596 } 2597 2598 /* 2599 * Make sure that we aren't completely killing 2600 * interactive performance. 2601 */ 2602 cond_resched(); 2603 spin_lock(&mmlist_lock); 2604 } 2605 spin_unlock(&mmlist_lock); 2606 2607 mmput(prev_mm); 2608 2609 i = 0; 2610 while (swap_usage_in_pages(si) && 2611 !signal_pending(current) && 2612 (i = find_next_to_unuse(si, i)) != 0) { 2613 2614 entry = swp_entry(type, i); 2615 folio = swap_cache_get_folio(entry); 2616 if (!folio) 2617 continue; 2618 2619 /* 2620 * It is conceivable that a racing task removed this folio from 2621 * swap cache just before we acquired the page lock. The folio 2622 * might even be back in swap cache on another swap area. But 2623 * that is okay, folio_free_swap() only removes stale folios. 2624 */ 2625 folio_lock(folio); 2626 folio_wait_writeback(folio); 2627 folio_free_swap(folio); 2628 folio_unlock(folio); 2629 folio_put(folio); 2630 } 2631 2632 /* 2633 * Lets check again to see if there are still swap entries in the map. 2634 * If yes, we would need to do retry the unuse logic again. 2635 * Under global memory pressure, swap entries can be reinserted back 2636 * into process space after the mmlist loop above passes over them. 2637 * 2638 * Limit the number of retries? No: when mmget_not_zero() 2639 * above fails, that mm is likely to be freeing swap from 2640 * exit_mmap(), which proceeds at its own independent pace; 2641 * and even shmem_writeout() could have been preempted after 2642 * folio_alloc_swap(), temporarily hiding that swap. It's easy 2643 * and robust (though cpu-intensive) just to keep retrying. 2644 */ 2645 if (swap_usage_in_pages(si)) { 2646 if (!signal_pending(current)) 2647 goto retry; 2648 return -EINTR; 2649 } 2650 2651 success: 2652 /* 2653 * Make sure that further cleanups after try_to_unuse() returns happen 2654 * after swap_range_free() reduces si->inuse_pages to 0. 2655 */ 2656 smp_mb(); 2657 return 0; 2658 } 2659 2660 /* 2661 * After a successful try_to_unuse, if no swap is now in use, we know 2662 * we can empty the mmlist. swap_lock must be held on entry and exit. 2663 * Note that mmlist_lock nests inside swap_lock, and an mm must be 2664 * added to the mmlist just after page_duplicate - before would be racy. 2665 */ 2666 static void drain_mmlist(void) 2667 { 2668 struct list_head *p, *next; 2669 unsigned int type; 2670 2671 for (type = 0; type < nr_swapfiles; type++) 2672 if (swap_usage_in_pages(swap_info[type])) 2673 return; 2674 spin_lock(&mmlist_lock); 2675 list_for_each_safe(p, next, &init_mm.mmlist) 2676 list_del_init(p); 2677 spin_unlock(&mmlist_lock); 2678 } 2679 2680 /* 2681 * Free all of a swapdev's extent information 2682 */ 2683 static void destroy_swap_extents(struct swap_info_struct *sis, 2684 struct file *swap_file) 2685 { 2686 while (!RB_EMPTY_ROOT(&sis->swap_extent_root)) { 2687 struct rb_node *rb = sis->swap_extent_root.rb_node; 2688 struct swap_extent *se = rb_entry(rb, struct swap_extent, rb_node); 2689 2690 rb_erase(rb, &sis->swap_extent_root); 2691 kfree(se); 2692 } 2693 2694 if (sis->flags & SWP_ACTIVATED) { 2695 struct address_space *mapping = swap_file->f_mapping; 2696 2697 sis->flags &= ~SWP_ACTIVATED; 2698 if (mapping->a_ops->swap_deactivate) 2699 mapping->a_ops->swap_deactivate(swap_file); 2700 } 2701 } 2702 2703 /* 2704 * Add a block range (and the corresponding page range) into this swapdev's 2705 * extent tree. 2706 * 2707 * This function rather assumes that it is called in ascending page order. 2708 */ 2709 int 2710 add_swap_extent(struct swap_info_struct *sis, unsigned long start_page, 2711 unsigned long nr_pages, sector_t start_block) 2712 { 2713 struct rb_node **link = &sis->swap_extent_root.rb_node, *parent = NULL; 2714 struct swap_extent *se; 2715 struct swap_extent *new_se; 2716 2717 /* 2718 * place the new node at the right most since the 2719 * function is called in ascending page order. 2720 */ 2721 while (*link) { 2722 parent = *link; 2723 link = &parent->rb_right; 2724 } 2725 2726 if (parent) { 2727 se = rb_entry(parent, struct swap_extent, rb_node); 2728 BUG_ON(se->start_page + se->nr_pages != start_page); 2729 if (se->start_block + se->nr_pages == start_block) { 2730 /* Merge it */ 2731 se->nr_pages += nr_pages; 2732 return 0; 2733 } 2734 } 2735 2736 /* No merge, insert a new extent. */ 2737 new_se = kmalloc_obj(*se); 2738 if (new_se == NULL) 2739 return -ENOMEM; 2740 new_se->start_page = start_page; 2741 new_se->nr_pages = nr_pages; 2742 new_se->start_block = start_block; 2743 2744 rb_link_node(&new_se->rb_node, parent, link); 2745 rb_insert_color(&new_se->rb_node, &sis->swap_extent_root); 2746 return 1; 2747 } 2748 EXPORT_SYMBOL_GPL(add_swap_extent); 2749 2750 /* 2751 * A `swap extent' is a simple thing which maps a contiguous range of pages 2752 * onto a contiguous range of disk blocks. A rbtree of swap extents is 2753 * built at swapon time and is then used at swap_writepage/swap_read_folio 2754 * time for locating where on disk a page belongs. 2755 * 2756 * If the swapfile is an S_ISBLK block device, a single extent is installed. 2757 * This is done so that the main operating code can treat S_ISBLK and S_ISREG 2758 * swap files identically. 2759 * 2760 * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap 2761 * extent rbtree operates in PAGE_SIZE disk blocks. Both S_ISREG and S_ISBLK 2762 * swapfiles are handled *identically* after swapon time. 2763 * 2764 * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks 2765 * and will parse them into a rbtree, in PAGE_SIZE chunks. If some stray 2766 * blocks are found which do not fall within the PAGE_SIZE alignment 2767 * requirements, they are simply tossed out - we will never use those blocks 2768 * for swapping. 2769 * 2770 * For all swap devices we set S_SWAPFILE across the life of the swapon. This 2771 * prevents users from writing to the swap device, which will corrupt memory. 2772 * 2773 * The amount of disk space which a single swap extent represents varies. 2774 * Typically it is in the 1-4 megabyte range. So we can have hundreds of 2775 * extents in the rbtree. - akpm. 2776 */ 2777 static int setup_swap_extents(struct swap_info_struct *sis, 2778 struct file *swap_file, sector_t *span) 2779 { 2780 struct address_space *mapping = swap_file->f_mapping; 2781 struct inode *inode = mapping->host; 2782 int ret; 2783 2784 if (S_ISBLK(inode->i_mode)) { 2785 ret = add_swap_extent(sis, 0, sis->max, 0); 2786 *span = sis->pages; 2787 return ret; 2788 } 2789 2790 if (mapping->a_ops->swap_activate) { 2791 ret = mapping->a_ops->swap_activate(sis, swap_file, span); 2792 if (ret < 0) 2793 return ret; 2794 sis->flags |= SWP_ACTIVATED; 2795 if ((sis->flags & SWP_FS_OPS) && 2796 sio_pool_init() != 0) { 2797 destroy_swap_extents(sis, swap_file); 2798 return -ENOMEM; 2799 } 2800 return ret; 2801 } 2802 2803 return generic_swapfile_activate(sis, swap_file, span); 2804 } 2805 2806 static void _enable_swap_info(struct swap_info_struct *si) 2807 { 2808 atomic_long_add(si->pages, &nr_swap_pages); 2809 total_swap_pages += si->pages; 2810 2811 assert_spin_locked(&swap_lock); 2812 2813 plist_add(&si->list, &swap_active_head); 2814 2815 /* Add back to available list */ 2816 add_to_avail_list(si, true); 2817 } 2818 2819 /* 2820 * Called after the swap device is ready, resurrect its percpu ref, it's now 2821 * safe to reference it. Add it to the list to expose it to the allocator. 2822 */ 2823 static void enable_swap_info(struct swap_info_struct *si) 2824 { 2825 percpu_ref_resurrect(&si->users); 2826 spin_lock(&swap_lock); 2827 spin_lock(&si->lock); 2828 _enable_swap_info(si); 2829 spin_unlock(&si->lock); 2830 spin_unlock(&swap_lock); 2831 } 2832 2833 static void reinsert_swap_info(struct swap_info_struct *si) 2834 { 2835 spin_lock(&swap_lock); 2836 spin_lock(&si->lock); 2837 _enable_swap_info(si); 2838 spin_unlock(&si->lock); 2839 spin_unlock(&swap_lock); 2840 } 2841 2842 /* 2843 * Called after clearing SWP_WRITEOK, ensures cluster_alloc_range 2844 * see the updated flags, so there will be no more allocations. 2845 */ 2846 static void wait_for_allocation(struct swap_info_struct *si) 2847 { 2848 unsigned long offset; 2849 unsigned long end = ALIGN(si->max, SWAPFILE_CLUSTER); 2850 struct swap_cluster_info *ci; 2851 2852 BUG_ON(si->flags & SWP_WRITEOK); 2853 2854 for (offset = 0; offset < end; offset += SWAPFILE_CLUSTER) { 2855 ci = swap_cluster_lock(si, offset); 2856 swap_cluster_unlock(ci); 2857 } 2858 } 2859 2860 static void free_swap_cluster_info(struct swap_cluster_info *cluster_info, 2861 unsigned long maxpages) 2862 { 2863 struct swap_cluster_info *ci; 2864 int i, nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER); 2865 2866 if (!cluster_info) 2867 return; 2868 for (i = 0; i < nr_clusters; i++) { 2869 ci = cluster_info + i; 2870 /* Cluster with bad marks count will have a remaining table */ 2871 spin_lock(&ci->lock); 2872 if (rcu_dereference_protected(ci->table, true)) { 2873 swap_cluster_assert_empty(ci, 0, SWAPFILE_CLUSTER, true); 2874 swap_cluster_free_table(ci); 2875 } 2876 spin_unlock(&ci->lock); 2877 } 2878 kvfree(cluster_info); 2879 } 2880 2881 /* 2882 * Called after swap device's reference count is dead, so 2883 * neither scan nor allocation will use it. 2884 */ 2885 static void flush_percpu_swap_cluster(struct swap_info_struct *si) 2886 { 2887 int cpu, i; 2888 struct swap_info_struct **pcp_si; 2889 2890 for_each_possible_cpu(cpu) { 2891 pcp_si = per_cpu_ptr(percpu_swap_cluster.si, cpu); 2892 /* 2893 * Invalidate the percpu swap cluster cache, si->users 2894 * is dead, so no new user will point to it, just flush 2895 * any existing user. 2896 */ 2897 for (i = 0; i < SWAP_NR_ORDERS; i++) 2898 cmpxchg(&pcp_si[i], si, NULL); 2899 } 2900 } 2901 2902 2903 SYSCALL_DEFINE1(swapoff, const char __user *, specialfile) 2904 { 2905 struct swap_info_struct *p = NULL; 2906 unsigned long *zeromap; 2907 struct swap_cluster_info *cluster_info; 2908 struct file *swap_file, *victim; 2909 struct address_space *mapping; 2910 struct inode *inode; 2911 unsigned int maxpages; 2912 int err, found = 0; 2913 2914 if (!capable(CAP_SYS_ADMIN)) 2915 return -EPERM; 2916 2917 BUG_ON(!current->mm); 2918 2919 CLASS(filename, pathname)(specialfile); 2920 victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0); 2921 if (IS_ERR(victim)) 2922 return PTR_ERR(victim); 2923 2924 mapping = victim->f_mapping; 2925 spin_lock(&swap_lock); 2926 plist_for_each_entry(p, &swap_active_head, list) { 2927 if (p->flags & SWP_WRITEOK) { 2928 if (p->swap_file->f_mapping == mapping) { 2929 found = 1; 2930 break; 2931 } 2932 } 2933 } 2934 if (!found) { 2935 err = -EINVAL; 2936 spin_unlock(&swap_lock); 2937 goto out_dput; 2938 } 2939 if (!security_vm_enough_memory_mm(current->mm, p->pages)) 2940 vm_unacct_memory(p->pages); 2941 else { 2942 err = -ENOMEM; 2943 spin_unlock(&swap_lock); 2944 goto out_dput; 2945 } 2946 spin_lock(&p->lock); 2947 del_from_avail_list(p, true); 2948 plist_del(&p->list, &swap_active_head); 2949 atomic_long_sub(p->pages, &nr_swap_pages); 2950 total_swap_pages -= p->pages; 2951 spin_unlock(&p->lock); 2952 spin_unlock(&swap_lock); 2953 2954 wait_for_allocation(p); 2955 2956 set_current_oom_origin(); 2957 err = try_to_unuse(p->type); 2958 clear_current_oom_origin(); 2959 2960 if (err) { 2961 /* re-insert swap space back into swap_list */ 2962 reinsert_swap_info(p); 2963 goto out_dput; 2964 } 2965 2966 /* 2967 * Wait for swap operations protected by get/put_swap_device() 2968 * to complete. Because of synchronize_rcu() here, all swap 2969 * operations protected by RCU reader side lock (including any 2970 * spinlock) will be waited too. This makes it easy to 2971 * prevent folio_test_swapcache() and the following swap cache 2972 * operations from racing with swapoff. 2973 */ 2974 percpu_ref_kill(&p->users); 2975 synchronize_rcu(); 2976 wait_for_completion(&p->comp); 2977 2978 flush_work(&p->discard_work); 2979 flush_work(&p->reclaim_work); 2980 flush_percpu_swap_cluster(p); 2981 2982 destroy_swap_extents(p, p->swap_file); 2983 2984 if (!(p->flags & SWP_SOLIDSTATE)) 2985 atomic_dec(&nr_rotate_swap); 2986 2987 mutex_lock(&swapon_mutex); 2988 spin_lock(&swap_lock); 2989 spin_lock(&p->lock); 2990 drain_mmlist(); 2991 2992 swap_file = p->swap_file; 2993 p->swap_file = NULL; 2994 zeromap = p->zeromap; 2995 p->zeromap = NULL; 2996 maxpages = p->max; 2997 cluster_info = p->cluster_info; 2998 p->max = 0; 2999 p->cluster_info = NULL; 3000 spin_unlock(&p->lock); 3001 spin_unlock(&swap_lock); 3002 arch_swap_invalidate_area(p->type); 3003 zswap_swapoff(p->type); 3004 mutex_unlock(&swapon_mutex); 3005 kfree(p->global_cluster); 3006 p->global_cluster = NULL; 3007 kvfree(zeromap); 3008 free_swap_cluster_info(cluster_info, maxpages); 3009 /* Destroy swap account information */ 3010 swap_cgroup_swapoff(p->type); 3011 3012 inode = mapping->host; 3013 3014 inode_lock(inode); 3015 inode->i_flags &= ~S_SWAPFILE; 3016 inode_unlock(inode); 3017 filp_close(swap_file, NULL); 3018 3019 /* 3020 * Clear the SWP_USED flag after all resources are freed so that swapon 3021 * can reuse this swap_info in alloc_swap_info() safely. It is ok to 3022 * not hold p->lock after we cleared its SWP_WRITEOK. 3023 */ 3024 spin_lock(&swap_lock); 3025 p->flags = 0; 3026 spin_unlock(&swap_lock); 3027 3028 err = 0; 3029 atomic_inc(&proc_poll_event); 3030 wake_up_interruptible(&proc_poll_wait); 3031 3032 out_dput: 3033 filp_close(victim, NULL); 3034 return err; 3035 } 3036 3037 #ifdef CONFIG_PROC_FS 3038 static __poll_t swaps_poll(struct file *file, poll_table *wait) 3039 { 3040 struct seq_file *seq = file->private_data; 3041 3042 poll_wait(file, &proc_poll_wait, wait); 3043 3044 if (seq->poll_event != atomic_read(&proc_poll_event)) { 3045 seq->poll_event = atomic_read(&proc_poll_event); 3046 return EPOLLIN | EPOLLRDNORM | EPOLLERR | EPOLLPRI; 3047 } 3048 3049 return EPOLLIN | EPOLLRDNORM; 3050 } 3051 3052 /* iterator */ 3053 static void *swap_start(struct seq_file *swap, loff_t *pos) 3054 { 3055 struct swap_info_struct *si; 3056 int type; 3057 loff_t l = *pos; 3058 3059 mutex_lock(&swapon_mutex); 3060 3061 if (!l) 3062 return SEQ_START_TOKEN; 3063 3064 for (type = 0; (si = swap_type_to_info(type)); type++) { 3065 if (!(si->swap_file)) 3066 continue; 3067 if (!--l) 3068 return si; 3069 } 3070 3071 return NULL; 3072 } 3073 3074 static void *swap_next(struct seq_file *swap, void *v, loff_t *pos) 3075 { 3076 struct swap_info_struct *si = v; 3077 int type; 3078 3079 if (v == SEQ_START_TOKEN) 3080 type = 0; 3081 else 3082 type = si->type + 1; 3083 3084 ++(*pos); 3085 for (; (si = swap_type_to_info(type)); type++) { 3086 if (!(si->swap_file)) 3087 continue; 3088 return si; 3089 } 3090 3091 return NULL; 3092 } 3093 3094 static void swap_stop(struct seq_file *swap, void *v) 3095 { 3096 mutex_unlock(&swapon_mutex); 3097 } 3098 3099 static int swap_show(struct seq_file *swap, void *v) 3100 { 3101 struct swap_info_struct *si = v; 3102 struct file *file; 3103 int len; 3104 unsigned long bytes, inuse; 3105 3106 if (si == SEQ_START_TOKEN) { 3107 seq_puts(swap, "Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority\n"); 3108 return 0; 3109 } 3110 3111 bytes = K(si->pages); 3112 inuse = K(swap_usage_in_pages(si)); 3113 3114 file = si->swap_file; 3115 len = seq_file_path(swap, file, " \t\n\\"); 3116 seq_printf(swap, "%*s%s\t%lu\t%s%lu\t%s%d\n", 3117 len < 40 ? 40 - len : 1, " ", 3118 S_ISBLK(file_inode(file)->i_mode) ? 3119 "partition" : "file\t", 3120 bytes, bytes < 10000000 ? "\t" : "", 3121 inuse, inuse < 10000000 ? "\t" : "", 3122 si->prio); 3123 return 0; 3124 } 3125 3126 static const struct seq_operations swaps_op = { 3127 .start = swap_start, 3128 .next = swap_next, 3129 .stop = swap_stop, 3130 .show = swap_show 3131 }; 3132 3133 static int swaps_open(struct inode *inode, struct file *file) 3134 { 3135 struct seq_file *seq; 3136 int ret; 3137 3138 ret = seq_open(file, &swaps_op); 3139 if (ret) 3140 return ret; 3141 3142 seq = file->private_data; 3143 seq->poll_event = atomic_read(&proc_poll_event); 3144 return 0; 3145 } 3146 3147 static const struct proc_ops swaps_proc_ops = { 3148 .proc_flags = PROC_ENTRY_PERMANENT, 3149 .proc_open = swaps_open, 3150 .proc_read = seq_read, 3151 .proc_lseek = seq_lseek, 3152 .proc_release = seq_release, 3153 .proc_poll = swaps_poll, 3154 }; 3155 3156 static int __init procswaps_init(void) 3157 { 3158 proc_create("swaps", 0, NULL, &swaps_proc_ops); 3159 return 0; 3160 } 3161 __initcall(procswaps_init); 3162 #endif /* CONFIG_PROC_FS */ 3163 3164 #ifdef MAX_SWAPFILES_CHECK 3165 static int __init max_swapfiles_check(void) 3166 { 3167 MAX_SWAPFILES_CHECK(); 3168 return 0; 3169 } 3170 late_initcall(max_swapfiles_check); 3171 #endif 3172 3173 static struct swap_info_struct *alloc_swap_info(void) 3174 { 3175 struct swap_info_struct *p; 3176 struct swap_info_struct *defer = NULL; 3177 unsigned int type; 3178 3179 p = kvzalloc_obj(struct swap_info_struct); 3180 if (!p) 3181 return ERR_PTR(-ENOMEM); 3182 3183 if (percpu_ref_init(&p->users, swap_users_ref_free, 3184 PERCPU_REF_INIT_DEAD, GFP_KERNEL)) { 3185 kvfree(p); 3186 return ERR_PTR(-ENOMEM); 3187 } 3188 3189 spin_lock(&swap_lock); 3190 for (type = 0; type < nr_swapfiles; type++) { 3191 if (!(swap_info[type]->flags & SWP_USED)) 3192 break; 3193 } 3194 if (type >= MAX_SWAPFILES) { 3195 spin_unlock(&swap_lock); 3196 percpu_ref_exit(&p->users); 3197 kvfree(p); 3198 return ERR_PTR(-EPERM); 3199 } 3200 if (type >= nr_swapfiles) { 3201 p->type = type; 3202 /* 3203 * Publish the swap_info_struct after initializing it. 3204 * Note that kvzalloc() above zeroes all its fields. 3205 */ 3206 smp_store_release(&swap_info[type], p); /* rcu_assign_pointer() */ 3207 nr_swapfiles++; 3208 } else { 3209 defer = p; 3210 p = swap_info[type]; 3211 /* 3212 * Do not memset this entry: a racing procfs swap_next() 3213 * would be relying on p->type to remain valid. 3214 */ 3215 } 3216 p->swap_extent_root = RB_ROOT; 3217 plist_node_init(&p->list, 0); 3218 plist_node_init(&p->avail_list, 0); 3219 p->flags = SWP_USED; 3220 spin_unlock(&swap_lock); 3221 if (defer) { 3222 percpu_ref_exit(&defer->users); 3223 kvfree(defer); 3224 } 3225 spin_lock_init(&p->lock); 3226 atomic_long_set(&p->inuse_pages, SWAP_USAGE_OFFLIST_BIT); 3227 init_completion(&p->comp); 3228 3229 return p; 3230 } 3231 3232 static int claim_swapfile(struct swap_info_struct *si, struct inode *inode) 3233 { 3234 if (S_ISBLK(inode->i_mode)) { 3235 si->bdev = I_BDEV(inode); 3236 /* 3237 * Zoned block devices contain zones that have a sequential 3238 * write only restriction. Hence zoned block devices are not 3239 * suitable for swapping. Disallow them here. 3240 */ 3241 if (bdev_is_zoned(si->bdev)) 3242 return -EINVAL; 3243 si->flags |= SWP_BLKDEV; 3244 } else if (S_ISREG(inode->i_mode)) { 3245 si->bdev = inode->i_sb->s_bdev; 3246 } 3247 3248 return 0; 3249 } 3250 3251 3252 /* 3253 * Find out how many pages are allowed for a single swap device. There 3254 * are two limiting factors: 3255 * 1) the number of bits for the swap offset in the swp_entry_t type, and 3256 * 2) the number of bits in the swap pte, as defined by the different 3257 * architectures. 3258 * 3259 * In order to find the largest possible bit mask, a swap entry with 3260 * swap type 0 and swap offset ~0UL is created, encoded to a swap pte, 3261 * decoded to a swp_entry_t again, and finally the swap offset is 3262 * extracted. 3263 * 3264 * This will mask all the bits from the initial ~0UL mask that can't 3265 * be encoded in either the swp_entry_t or the architecture definition 3266 * of a swap pte. 3267 */ 3268 unsigned long generic_max_swapfile_size(void) 3269 { 3270 swp_entry_t entry = swp_entry(0, ~0UL); 3271 const pte_t pte = softleaf_to_pte(entry); 3272 3273 /* 3274 * Since the PTE can be an invalid softleaf entry (e.g. the none PTE), 3275 * we need to do this manually. 3276 */ 3277 entry = __pte_to_swp_entry(pte); 3278 entry = swp_entry(__swp_type(entry), __swp_offset(entry)); 3279 3280 return swp_offset(entry) + 1; 3281 } 3282 3283 /* Can be overridden by an architecture for additional checks. */ 3284 __weak unsigned long arch_max_swapfile_size(void) 3285 { 3286 return generic_max_swapfile_size(); 3287 } 3288 3289 static unsigned long read_swap_header(struct swap_info_struct *si, 3290 union swap_header *swap_header, 3291 struct inode *inode) 3292 { 3293 int i; 3294 unsigned long maxpages; 3295 unsigned long swapfilepages; 3296 unsigned long last_page; 3297 3298 if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) { 3299 pr_err("Unable to find swap-space signature\n"); 3300 return 0; 3301 } 3302 3303 /* swap partition endianness hack... */ 3304 if (swab32(swap_header->info.version) == 1) { 3305 swab32s(&swap_header->info.version); 3306 swab32s(&swap_header->info.last_page); 3307 swab32s(&swap_header->info.nr_badpages); 3308 if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES) 3309 return 0; 3310 for (i = 0; i < swap_header->info.nr_badpages; i++) 3311 swab32s(&swap_header->info.badpages[i]); 3312 } 3313 /* Check the swap header's sub-version */ 3314 if (swap_header->info.version != 1) { 3315 pr_warn("Unable to handle swap header version %d\n", 3316 swap_header->info.version); 3317 return 0; 3318 } 3319 3320 maxpages = swapfile_maximum_size; 3321 last_page = swap_header->info.last_page; 3322 if (!last_page) { 3323 pr_warn("Empty swap-file\n"); 3324 return 0; 3325 } 3326 if (last_page > maxpages) { 3327 pr_warn("Truncating oversized swap area, only using %luk out of %luk\n", 3328 K(maxpages), K(last_page)); 3329 } 3330 if (maxpages > last_page) { 3331 maxpages = last_page + 1; 3332 /* p->max is an unsigned int: don't overflow it */ 3333 if ((unsigned int)maxpages == 0) 3334 maxpages = UINT_MAX; 3335 } 3336 3337 if (!maxpages) 3338 return 0; 3339 swapfilepages = i_size_read(inode) >> PAGE_SHIFT; 3340 if (swapfilepages && maxpages > swapfilepages) { 3341 pr_warn("Swap area shorter than signature indicates\n"); 3342 return 0; 3343 } 3344 if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode)) 3345 return 0; 3346 if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES) 3347 return 0; 3348 3349 return maxpages; 3350 } 3351 3352 static int setup_swap_clusters_info(struct swap_info_struct *si, 3353 union swap_header *swap_header, 3354 unsigned long maxpages) 3355 { 3356 unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER); 3357 struct swap_cluster_info *cluster_info; 3358 int err = -ENOMEM; 3359 unsigned long i; 3360 3361 cluster_info = kvzalloc_objs(*cluster_info, nr_clusters); 3362 if (!cluster_info) 3363 goto err; 3364 3365 for (i = 0; i < nr_clusters; i++) 3366 spin_lock_init(&cluster_info[i].lock); 3367 3368 if (!(si->flags & SWP_SOLIDSTATE)) { 3369 si->global_cluster = kmalloc_obj(*si->global_cluster); 3370 if (!si->global_cluster) 3371 goto err; 3372 for (i = 0; i < SWAP_NR_ORDERS; i++) 3373 si->global_cluster->next[i] = SWAP_ENTRY_INVALID; 3374 spin_lock_init(&si->global_cluster_lock); 3375 } 3376 3377 /* 3378 * Mark unusable pages (header page, bad pages, and the EOF part of 3379 * the last cluster) as unavailable. The clusters aren't marked free 3380 * yet, so no list operations are involved yet. 3381 */ 3382 err = swap_cluster_setup_bad_slot(si, cluster_info, 0, false); 3383 if (err) 3384 goto err; 3385 for (i = 0; i < swap_header->info.nr_badpages; i++) { 3386 unsigned int page_nr = swap_header->info.badpages[i]; 3387 3388 if (!page_nr || page_nr > swap_header->info.last_page) { 3389 pr_warn("Bad slot offset is out of border: %d (last_page: %d)\n", 3390 page_nr, swap_header->info.last_page); 3391 err = -EINVAL; 3392 goto err; 3393 } 3394 err = swap_cluster_setup_bad_slot(si, cluster_info, page_nr, false); 3395 if (err) 3396 goto err; 3397 } 3398 for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++) { 3399 err = swap_cluster_setup_bad_slot(si, cluster_info, i, true); 3400 if (err) 3401 goto err; 3402 } 3403 3404 INIT_LIST_HEAD(&si->free_clusters); 3405 INIT_LIST_HEAD(&si->full_clusters); 3406 INIT_LIST_HEAD(&si->discard_clusters); 3407 3408 for (i = 0; i < SWAP_NR_ORDERS; i++) { 3409 INIT_LIST_HEAD(&si->nonfull_clusters[i]); 3410 INIT_LIST_HEAD(&si->frag_clusters[i]); 3411 } 3412 3413 for (i = 0; i < nr_clusters; i++) { 3414 struct swap_cluster_info *ci = &cluster_info[i]; 3415 3416 if (ci->count) { 3417 ci->flags = CLUSTER_FLAG_NONFULL; 3418 list_add_tail(&ci->list, &si->nonfull_clusters[0]); 3419 } else { 3420 ci->flags = CLUSTER_FLAG_FREE; 3421 list_add_tail(&ci->list, &si->free_clusters); 3422 } 3423 } 3424 3425 si->cluster_info = cluster_info; 3426 return 0; 3427 err: 3428 free_swap_cluster_info(cluster_info, maxpages); 3429 return err; 3430 } 3431 3432 SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags) 3433 { 3434 struct swap_info_struct *si; 3435 struct file *swap_file = NULL; 3436 struct address_space *mapping; 3437 struct dentry *dentry; 3438 int prio; 3439 int error; 3440 union swap_header *swap_header; 3441 int nr_extents; 3442 sector_t span; 3443 unsigned long maxpages; 3444 struct folio *folio = NULL; 3445 struct inode *inode = NULL; 3446 bool inced_nr_rotate_swap = false; 3447 3448 if (swap_flags & ~SWAP_FLAGS_VALID) 3449 return -EINVAL; 3450 3451 if (!capable(CAP_SYS_ADMIN)) 3452 return -EPERM; 3453 3454 /* 3455 * Allocate or reuse existing !SWP_USED swap_info. The returned 3456 * si will stay in a dying status, so nothing will access its content 3457 * until enable_swap_info resurrects its percpu ref and expose it. 3458 */ 3459 si = alloc_swap_info(); 3460 if (IS_ERR(si)) 3461 return PTR_ERR(si); 3462 3463 INIT_WORK(&si->discard_work, swap_discard_work); 3464 INIT_WORK(&si->reclaim_work, swap_reclaim_work); 3465 3466 CLASS(filename, name)(specialfile); 3467 swap_file = file_open_name(name, O_RDWR | O_LARGEFILE | O_EXCL, 0); 3468 if (IS_ERR(swap_file)) { 3469 error = PTR_ERR(swap_file); 3470 swap_file = NULL; 3471 goto bad_swap; 3472 } 3473 3474 mapping = swap_file->f_mapping; 3475 dentry = swap_file->f_path.dentry; 3476 inode = mapping->host; 3477 3478 error = claim_swapfile(si, inode); 3479 if (unlikely(error)) 3480 goto bad_swap; 3481 3482 inode_lock(inode); 3483 if (d_unlinked(dentry) || cant_mount(dentry)) { 3484 error = -ENOENT; 3485 goto bad_swap_unlock_inode; 3486 } 3487 if (IS_SWAPFILE(inode)) { 3488 error = -EBUSY; 3489 goto bad_swap_unlock_inode; 3490 } 3491 3492 /* 3493 * The swap subsystem needs a major overhaul to support this. 3494 * It doesn't work yet so just disable it for now. 3495 */ 3496 if (mapping_min_folio_order(mapping) > 0) { 3497 error = -EINVAL; 3498 goto bad_swap_unlock_inode; 3499 } 3500 3501 /* 3502 * Read the swap header. 3503 */ 3504 if (!mapping->a_ops->read_folio) { 3505 error = -EINVAL; 3506 goto bad_swap_unlock_inode; 3507 } 3508 folio = read_mapping_folio(mapping, 0, swap_file); 3509 if (IS_ERR(folio)) { 3510 error = PTR_ERR(folio); 3511 goto bad_swap_unlock_inode; 3512 } 3513 swap_header = kmap_local_folio(folio, 0); 3514 3515 maxpages = read_swap_header(si, swap_header, inode); 3516 if (unlikely(!maxpages)) { 3517 error = -EINVAL; 3518 goto bad_swap_unlock_inode; 3519 } 3520 3521 si->max = maxpages; 3522 si->pages = maxpages - 1; 3523 nr_extents = setup_swap_extents(si, swap_file, &span); 3524 if (nr_extents < 0) { 3525 error = nr_extents; 3526 goto bad_swap_unlock_inode; 3527 } 3528 if (si->pages != si->max - 1) { 3529 pr_err("swap:%u != (max:%u - 1)\n", si->pages, si->max); 3530 error = -EINVAL; 3531 goto bad_swap_unlock_inode; 3532 } 3533 3534 maxpages = si->max; 3535 3536 /* Set up the swap cluster info */ 3537 error = setup_swap_clusters_info(si, swap_header, maxpages); 3538 if (error) 3539 goto bad_swap_unlock_inode; 3540 3541 error = swap_cgroup_swapon(si->type, maxpages); 3542 if (error) 3543 goto bad_swap_unlock_inode; 3544 3545 /* 3546 * Use kvmalloc_array instead of bitmap_zalloc as the allocation order might 3547 * be above MAX_PAGE_ORDER incase of a large swap file. 3548 */ 3549 si->zeromap = kvmalloc_array(BITS_TO_LONGS(maxpages), sizeof(long), 3550 GFP_KERNEL | __GFP_ZERO); 3551 if (!si->zeromap) { 3552 error = -ENOMEM; 3553 goto bad_swap_unlock_inode; 3554 } 3555 3556 if (si->bdev && bdev_stable_writes(si->bdev)) 3557 si->flags |= SWP_STABLE_WRITES; 3558 3559 if (si->bdev && bdev_synchronous(si->bdev)) 3560 si->flags |= SWP_SYNCHRONOUS_IO; 3561 3562 if (si->bdev && !bdev_rot(si->bdev)) { 3563 si->flags |= SWP_SOLIDSTATE; 3564 } else { 3565 atomic_inc(&nr_rotate_swap); 3566 inced_nr_rotate_swap = true; 3567 } 3568 3569 if ((swap_flags & SWAP_FLAG_DISCARD) && 3570 si->bdev && bdev_max_discard_sectors(si->bdev)) { 3571 /* 3572 * When discard is enabled for swap with no particular 3573 * policy flagged, we set all swap discard flags here in 3574 * order to sustain backward compatibility with older 3575 * swapon(8) releases. 3576 */ 3577 si->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD | 3578 SWP_PAGE_DISCARD); 3579 3580 /* 3581 * By flagging sys_swapon, a sysadmin can tell us to 3582 * either do single-time area discards only, or to just 3583 * perform discards for released swap page-clusters. 3584 * Now it's time to adjust the p->flags accordingly. 3585 */ 3586 if (swap_flags & SWAP_FLAG_DISCARD_ONCE) 3587 si->flags &= ~SWP_PAGE_DISCARD; 3588 else if (swap_flags & SWAP_FLAG_DISCARD_PAGES) 3589 si->flags &= ~SWP_AREA_DISCARD; 3590 3591 /* issue a swapon-time discard if it's still required */ 3592 if (si->flags & SWP_AREA_DISCARD) { 3593 int err = discard_swap(si); 3594 if (unlikely(err)) 3595 pr_err("swapon: discard_swap(%p): %d\n", 3596 si, err); 3597 } 3598 } 3599 3600 error = zswap_swapon(si->type, maxpages); 3601 if (error) 3602 goto bad_swap_unlock_inode; 3603 3604 /* 3605 * Flush any pending IO and dirty mappings before we start using this 3606 * swap device. 3607 */ 3608 inode->i_flags |= S_SWAPFILE; 3609 error = inode_drain_writes(inode); 3610 if (error) { 3611 inode->i_flags &= ~S_SWAPFILE; 3612 goto free_swap_zswap; 3613 } 3614 3615 mutex_lock(&swapon_mutex); 3616 prio = DEF_SWAP_PRIO; 3617 if (swap_flags & SWAP_FLAG_PREFER) 3618 prio = swap_flags & SWAP_FLAG_PRIO_MASK; 3619 3620 /* 3621 * The plist prio is negated because plist ordering is 3622 * low-to-high, while swap ordering is high-to-low 3623 */ 3624 si->prio = prio; 3625 si->list.prio = -si->prio; 3626 si->avail_list.prio = -si->prio; 3627 si->swap_file = swap_file; 3628 3629 /* Sets SWP_WRITEOK, resurrect the percpu ref, expose the swap device */ 3630 enable_swap_info(si); 3631 3632 pr_info("Adding %uk swap on %s. Priority:%d extents:%d across:%lluk %s%s%s%s\n", 3633 K(si->pages), name->name, si->prio, nr_extents, 3634 K((unsigned long long)span), 3635 (si->flags & SWP_SOLIDSTATE) ? "SS" : "", 3636 (si->flags & SWP_DISCARDABLE) ? "D" : "", 3637 (si->flags & SWP_AREA_DISCARD) ? "s" : "", 3638 (si->flags & SWP_PAGE_DISCARD) ? "c" : ""); 3639 3640 mutex_unlock(&swapon_mutex); 3641 atomic_inc(&proc_poll_event); 3642 wake_up_interruptible(&proc_poll_wait); 3643 3644 error = 0; 3645 goto out; 3646 free_swap_zswap: 3647 zswap_swapoff(si->type); 3648 bad_swap_unlock_inode: 3649 inode_unlock(inode); 3650 bad_swap: 3651 kfree(si->global_cluster); 3652 si->global_cluster = NULL; 3653 inode = NULL; 3654 destroy_swap_extents(si, swap_file); 3655 swap_cgroup_swapoff(si->type); 3656 free_swap_cluster_info(si->cluster_info, si->max); 3657 si->cluster_info = NULL; 3658 kvfree(si->zeromap); 3659 si->zeromap = NULL; 3660 /* 3661 * Clear the SWP_USED flag after all resources are freed so 3662 * alloc_swap_info can reuse this si safely. 3663 */ 3664 spin_lock(&swap_lock); 3665 si->flags = 0; 3666 spin_unlock(&swap_lock); 3667 if (inced_nr_rotate_swap) 3668 atomic_dec(&nr_rotate_swap); 3669 if (swap_file) 3670 filp_close(swap_file, NULL); 3671 out: 3672 if (!IS_ERR_OR_NULL(folio)) 3673 folio_release_kmap(folio, swap_header); 3674 if (inode) 3675 inode_unlock(inode); 3676 return error; 3677 } 3678 3679 void si_swapinfo(struct sysinfo *val) 3680 { 3681 unsigned int type; 3682 unsigned long nr_to_be_unused = 0; 3683 3684 spin_lock(&swap_lock); 3685 for (type = 0; type < nr_swapfiles; type++) { 3686 struct swap_info_struct *si = swap_info[type]; 3687 3688 if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK)) 3689 nr_to_be_unused += swap_usage_in_pages(si); 3690 } 3691 val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused; 3692 val->totalswap = total_swap_pages + nr_to_be_unused; 3693 spin_unlock(&swap_lock); 3694 } 3695 3696 /* 3697 * swap_dup_entry_direct() - Increase reference count of a swap entry by one. 3698 * @entry: first swap entry from which we want to increase the refcount. 3699 * 3700 * Returns 0 for success, or -ENOMEM if the extend table is required 3701 * but could not be atomically allocated. Returns -EINVAL if the swap 3702 * entry is invalid, which might occur if a page table entry has got 3703 * corrupted. 3704 * 3705 * Context: Caller must ensure there is no race condition on the reference 3706 * owner. e.g., locking the PTL of a PTE containing the entry being increased. 3707 * Also the swap entry must have a count >= 1. Otherwise folio_dup_swap should 3708 * be used. 3709 */ 3710 int swap_dup_entry_direct(swp_entry_t entry) 3711 { 3712 struct swap_info_struct *si; 3713 3714 si = swap_entry_to_info(entry); 3715 if (WARN_ON_ONCE(!si)) { 3716 pr_err("%s%08lx\n", Bad_file, entry.val); 3717 return -EINVAL; 3718 } 3719 3720 /* 3721 * The caller must be increasing the swap count from a direct 3722 * reference of the swap slot (e.g. a swap entry in page table). 3723 * So the swap count must be >= 1. 3724 */ 3725 VM_WARN_ON_ONCE(!swap_entry_swapped(si, entry)); 3726 3727 return swap_dup_entries_cluster(si, swp_offset(entry), 1); 3728 } 3729 3730 #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP) 3731 static bool __has_usable_swap(void) 3732 { 3733 return !plist_head_empty(&swap_active_head); 3734 } 3735 3736 void __folio_throttle_swaprate(struct folio *folio, gfp_t gfp) 3737 { 3738 struct swap_info_struct *si; 3739 3740 if (!(gfp & __GFP_IO)) 3741 return; 3742 3743 if (!__has_usable_swap()) 3744 return; 3745 3746 if (!blk_cgroup_congested()) 3747 return; 3748 3749 /* 3750 * We've already scheduled a throttle, avoid taking the global swap 3751 * lock. 3752 */ 3753 if (current->throttle_disk) 3754 return; 3755 3756 spin_lock(&swap_avail_lock); 3757 plist_for_each_entry(si, &swap_avail_head, avail_list) { 3758 if (si->bdev) { 3759 blkcg_schedule_throttle(si->bdev->bd_disk, true); 3760 break; 3761 } 3762 } 3763 spin_unlock(&swap_avail_lock); 3764 } 3765 #endif 3766 3767 static int __init swapfile_init(void) 3768 { 3769 swapfile_maximum_size = arch_max_swapfile_size(); 3770 3771 /* 3772 * Once a cluster is freed, it's swap table content is read 3773 * only, and all swap cache readers (swap_cache_*) verifies 3774 * the content before use. So it's safe to use RCU slab here. 3775 */ 3776 if (!SWP_TABLE_USE_PAGE) 3777 swap_table_cachep = kmem_cache_create("swap_table", 3778 sizeof(struct swap_table), 3779 0, SLAB_PANIC | SLAB_TYPESAFE_BY_RCU, NULL); 3780 3781 #ifdef CONFIG_MIGRATION 3782 if (swapfile_maximum_size >= (1UL << SWP_MIG_TOTAL_BITS)) 3783 swap_migration_ad_supported = true; 3784 #endif /* CONFIG_MIGRATION */ 3785 3786 return 0; 3787 } 3788 subsys_initcall(swapfile_init); 3789