Lines Matching full:si
56 static void swap_entries_free(struct swap_info_struct *si,
59 static void swap_range_alloc(struct swap_info_struct *si,
62 static void move_cluster(struct swap_info_struct *si,
122 struct swap_info_struct *si[SWAP_NR_ORDERS]; member
128 .si = { NULL },
167 static long swap_usage_in_pages(struct swap_info_struct *si) in swap_usage_in_pages() argument
169 return atomic_long_read(&si->inuse_pages) & SWAP_USAGE_COUNTER_MASK; in swap_usage_in_pages()
182 static bool swap_only_has_cache(struct swap_info_struct *si, in swap_only_has_cache() argument
185 unsigned char *map = si->swap_map + offset; in swap_only_has_cache()
197 static bool swap_is_last_map(struct swap_info_struct *si, in swap_is_last_map() argument
200 unsigned char *map = si->swap_map + offset; in swap_is_last_map()
221 static int __try_to_reclaim_swap(struct swap_info_struct *si, in __try_to_reclaim_swap() argument
224 const swp_entry_t entry = swp_entry(si->type, offset); in __try_to_reclaim_swap()
270 ci = swap_cluster_lock(si, offset); in __try_to_reclaim_swap()
271 need_reclaim = swap_only_has_cache(si, offset, nr_pages); in __try_to_reclaim_swap()
302 static int discard_swap(struct swap_info_struct *si) in discard_swap() argument
310 se = first_se(si); in discard_swap()
314 err = blkdev_issue_discard(si->bdev, start_block, in discard_swap()
325 err = blkdev_issue_discard(si->bdev, start_block, in discard_swap()
372 static void discard_swap_cluster(struct swap_info_struct *si, in discard_swap_cluster() argument
375 struct swap_extent *se = offset_to_swap_extent(si, start_page); in discard_swap_cluster()
389 if (blkdev_issue_discard(si->bdev, start_block, in discard_swap_cluster()
425 static inline unsigned int cluster_index(struct swap_info_struct *si, in cluster_index() argument
428 return ci - si->cluster_info; in cluster_index()
431 static inline unsigned int cluster_offset(struct swap_info_struct *si, in cluster_offset() argument
434 return cluster_index(si, ci) * SWAPFILE_CLUSTER; in cluster_offset()
490 swap_cluster_alloc_table(struct swap_info_struct *si, in swap_cluster_alloc_table() argument
517 if (!(si->flags & SWP_SOLIDSTATE)) in swap_cluster_alloc_table()
518 spin_unlock(&si->global_cluster_lock); in swap_cluster_alloc_table()
532 if (!(si->flags & SWP_SOLIDSTATE)) in swap_cluster_alloc_table()
533 spin_lock(&si->global_cluster_lock); in swap_cluster_alloc_table()
544 move_cluster(si, ci, &si->free_clusters, CLUSTER_FLAG_FREE); in swap_cluster_alloc_table()
553 static void move_cluster(struct swap_info_struct *si, in move_cluster() argument
562 spin_lock(&si->lock); in move_cluster()
567 spin_unlock(&si->lock); in move_cluster()
572 static void swap_cluster_schedule_discard(struct swap_info_struct *si, in swap_cluster_schedule_discard() argument
576 move_cluster(si, ci, &si->discard_clusters, CLUSTER_FLAG_DISCARD); in swap_cluster_schedule_discard()
577 schedule_work(&si->discard_work); in swap_cluster_schedule_discard()
580 static void __free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci) in __free_cluster() argument
583 move_cluster(si, ci, &si->free_clusters, CLUSTER_FLAG_FREE); in __free_cluster()
591 * list status without touching si lock.
597 struct swap_info_struct *si, struct list_head *list, int order) in isolate_lock_cluster() argument
601 spin_lock(&si->lock); in isolate_lock_cluster()
616 spin_unlock(&si->lock); in isolate_lock_cluster()
620 VM_WARN_ON_ONCE(list != &si->free_clusters); in isolate_lock_cluster()
622 return swap_cluster_alloc_table(si, found); in isolate_lock_cluster()
634 static bool swap_do_scheduled_discard(struct swap_info_struct *si) in swap_do_scheduled_discard() argument
640 spin_lock(&si->lock); in swap_do_scheduled_discard()
641 while (!list_empty(&si->discard_clusters)) { in swap_do_scheduled_discard()
642 ci = list_first_entry(&si->discard_clusters, struct swap_cluster_info, list); in swap_do_scheduled_discard()
649 idx = cluster_index(si, ci); in swap_do_scheduled_discard()
650 spin_unlock(&si->lock); in swap_do_scheduled_discard()
651 discard_swap_cluster(si, idx * SWAPFILE_CLUSTER, in swap_do_scheduled_discard()
660 __free_cluster(si, ci); in swap_do_scheduled_discard()
663 spin_lock(&si->lock); in swap_do_scheduled_discard()
665 spin_unlock(&si->lock); in swap_do_scheduled_discard()
671 struct swap_info_struct *si; in swap_discard_work() local
673 si = container_of(work, struct swap_info_struct, discard_work); in swap_discard_work()
675 swap_do_scheduled_discard(si); in swap_discard_work()
680 struct swap_info_struct *si; in swap_users_ref_free() local
682 si = container_of(ref, struct swap_info_struct, users); in swap_users_ref_free()
683 complete(&si->comp); in swap_users_ref_free()
690 static void free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci) in free_cluster() argument
701 if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) == in free_cluster()
703 swap_cluster_schedule_discard(si, ci); in free_cluster()
707 __free_cluster(si, ci); in free_cluster()
714 static void partial_free_cluster(struct swap_info_struct *si, in partial_free_cluster() argument
721 move_cluster(si, ci, &si->nonfull_clusters[ci->order], in partial_free_cluster()
727 * Note: allocation doesn't acquire si lock, and may drop the ci lock for
730 static void relocate_cluster(struct swap_info_struct *si, in relocate_cluster() argument
741 free_cluster(si, ci); in relocate_cluster()
744 move_cluster(si, ci, &si->frag_clusters[ci->order], in relocate_cluster()
748 move_cluster(si, ci, &si->full_clusters, in relocate_cluster()
758 static int inc_cluster_info_page(struct swap_info_struct *si, in inc_cluster_info_page() argument
781 static bool cluster_reclaim_range(struct swap_info_struct *si, in cluster_reclaim_range() argument
785 unsigned char *map = si->swap_map; in cluster_reclaim_range()
796 nr_reclaim = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY); in cluster_reclaim_range()
819 static bool cluster_scan_range(struct swap_info_struct *si, in cluster_scan_range() argument
825 unsigned char *map = si->swap_map; in cluster_scan_range()
867 static bool cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster_info *ci, in cluster_alloc_range() argument
875 if (!(si->flags & SWP_WRITEOK)) in cluster_alloc_range()
885 memset(si->swap_map + start, usage, nr_pages); in cluster_alloc_range()
887 swap_range_alloc(si, nr_pages); in cluster_alloc_range()
894 static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, in alloc_swap_scan_cluster() argument
902 unsigned long end = min(start + SWAPFILE_CLUSTER, si->max); in alloc_swap_scan_cluster()
913 if (!cluster_scan_range(si, ci, offset, nr_pages, &need_reclaim)) in alloc_swap_scan_cluster()
916 ret = cluster_reclaim_range(si, ci, offset, offset + nr_pages); in alloc_swap_scan_cluster()
931 if (!cluster_alloc_range(si, ci, offset, usage, order)) in alloc_swap_scan_cluster()
940 relocate_cluster(si, ci); in alloc_swap_scan_cluster()
942 if (si->flags & SWP_SOLIDSTATE) { in alloc_swap_scan_cluster()
944 this_cpu_write(percpu_swap_cluster.si[order], si); in alloc_swap_scan_cluster()
946 si->global_cluster->next[order] = next; in alloc_swap_scan_cluster()
951 static unsigned int alloc_swap_scan_list(struct swap_info_struct *si, in alloc_swap_scan_list() argument
960 struct swap_cluster_info *ci = isolate_lock_cluster(si, list, order); in alloc_swap_scan_list()
965 offset = cluster_offset(si, ci); in alloc_swap_scan_list()
966 found = alloc_swap_scan_cluster(si, ci, offset, order, usage); in alloc_swap_scan_list()
974 static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force) in swap_reclaim_full_clusters() argument
979 unsigned char *map = si->swap_map; in swap_reclaim_full_clusters()
983 to_scan = swap_usage_in_pages(si) / SWAPFILE_CLUSTER; in swap_reclaim_full_clusters()
985 while ((ci = isolate_lock_cluster(si, &si->full_clusters, 0))) { in swap_reclaim_full_clusters()
986 offset = cluster_offset(si, ci); in swap_reclaim_full_clusters()
987 end = min(si->max, offset + SWAPFILE_CLUSTER); in swap_reclaim_full_clusters()
993 nr_reclaim = __try_to_reclaim_swap(si, offset, in swap_reclaim_full_clusters()
1006 relocate_cluster(si, ci); in swap_reclaim_full_clusters()
1016 struct swap_info_struct *si; in swap_reclaim_work() local
1018 si = container_of(work, struct swap_info_struct, reclaim_work); in swap_reclaim_work()
1020 swap_reclaim_full_clusters(si, true); in swap_reclaim_work()
1027 static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int order, in cluster_alloc_swap_entry() argument
1037 if (order && !(si->flags & SWP_BLKDEV)) in cluster_alloc_swap_entry()
1040 if (!(si->flags & SWP_SOLIDSTATE)) { in cluster_alloc_swap_entry()
1042 spin_lock(&si->global_cluster_lock); in cluster_alloc_swap_entry()
1043 offset = si->global_cluster->next[order]; in cluster_alloc_swap_entry()
1047 ci = swap_cluster_lock(si, offset); in cluster_alloc_swap_entry()
1051 offset = cluster_offset(si, ci); in cluster_alloc_swap_entry()
1052 found = alloc_swap_scan_cluster(si, ci, offset, in cluster_alloc_swap_entry()
1066 if (si->flags & SWP_PAGE_DISCARD) { in cluster_alloc_swap_entry()
1067 found = alloc_swap_scan_list(si, &si->free_clusters, order, usage, in cluster_alloc_swap_entry()
1074 found = alloc_swap_scan_list(si, &si->nonfull_clusters[order], in cluster_alloc_swap_entry()
1080 if (!(si->flags & SWP_PAGE_DISCARD)) { in cluster_alloc_swap_entry()
1081 found = alloc_swap_scan_list(si, &si->free_clusters, order, usage, in cluster_alloc_swap_entry()
1089 swap_reclaim_full_clusters(si, false); in cluster_alloc_swap_entry()
1098 found = alloc_swap_scan_list(si, &si->frag_clusters[order], order, in cluster_alloc_swap_entry()
1108 if ((si->flags & SWP_PAGE_DISCARD) && swap_do_scheduled_discard(si)) in cluster_alloc_swap_entry()
1118 * allocation, but reclaim may drop si->lock and race with another user. in cluster_alloc_swap_entry()
1120 found = alloc_swap_scan_list(si, &si->frag_clusters[o], in cluster_alloc_swap_entry()
1125 found = alloc_swap_scan_list(si, &si->nonfull_clusters[o], in cluster_alloc_swap_entry()
1131 if (!(si->flags & SWP_SOLIDSTATE)) in cluster_alloc_swap_entry()
1132 spin_unlock(&si->global_cluster_lock); in cluster_alloc_swap_entry()
1138 static void del_from_avail_list(struct swap_info_struct *si, bool swapoff) in del_from_avail_list() argument
1148 * swapoff here so it's synchronized by both si->lock and in del_from_avail_list()
1152 lockdep_assert_held(&si->lock); in del_from_avail_list()
1153 si->flags &= ~SWP_WRITEOK; in del_from_avail_list()
1154 atomic_long_or(SWAP_USAGE_OFFLIST_BIT, &si->inuse_pages); in del_from_avail_list()
1159 * si->inuse_pages == pages), any concurrent slot freeing, in del_from_avail_list()
1163 pages = si->pages; in del_from_avail_list()
1164 if (!atomic_long_try_cmpxchg(&si->inuse_pages, &pages, in del_from_avail_list()
1170 plist_del(&si->avail_lists[nid], &swap_avail_heads[nid]); in del_from_avail_list()
1177 static void add_to_avail_list(struct swap_info_struct *si, bool swapon) in add_to_avail_list() argument
1187 lockdep_assert_held(&si->lock); in add_to_avail_list()
1188 si->flags |= SWP_WRITEOK; in add_to_avail_list()
1190 if (!(READ_ONCE(si->flags) & SWP_WRITEOK)) in add_to_avail_list()
1194 if (!(atomic_long_read(&si->inuse_pages) & SWAP_USAGE_OFFLIST_BIT)) in add_to_avail_list()
1197 val = atomic_long_fetch_and_relaxed(~SWAP_USAGE_OFFLIST_BIT, &si->inuse_pages); in add_to_avail_list()
1201 * see (inuse_pages == si->pages) and will call del_from_avail_list. If in add_to_avail_list()
1204 pages = si->pages; in add_to_avail_list()
1207 if (atomic_long_try_cmpxchg(&si->inuse_pages, &pages, in add_to_avail_list()
1213 plist_add(&si->avail_lists[nid], &swap_avail_heads[nid]); in add_to_avail_list()
1224 static bool swap_usage_add(struct swap_info_struct *si, unsigned int nr_entries) in swap_usage_add() argument
1226 long val = atomic_long_add_return_relaxed(nr_entries, &si->inuse_pages); in swap_usage_add()
1232 if (unlikely(val == si->pages)) { in swap_usage_add()
1233 del_from_avail_list(si, false); in swap_usage_add()
1240 static void swap_usage_sub(struct swap_info_struct *si, unsigned int nr_entries) in swap_usage_sub() argument
1242 long val = atomic_long_sub_return_relaxed(nr_entries, &si->inuse_pages); in swap_usage_sub()
1249 add_to_avail_list(si, false); in swap_usage_sub()
1252 static void swap_range_alloc(struct swap_info_struct *si, in swap_range_alloc() argument
1255 if (swap_usage_add(si, nr_entries)) { in swap_range_alloc()
1257 schedule_work(&si->reclaim_work); in swap_range_alloc()
1262 static void swap_range_free(struct swap_info_struct *si, unsigned long offset, in swap_range_free() argument
1275 clear_bit(offset + i, si->zeromap); in swap_range_free()
1276 zswap_invalidate(swp_entry(si->type, offset + i)); in swap_range_free()
1279 if (si->flags & SWP_BLKDEV) in swap_range_free()
1281 si->bdev->bd_disk->fops->swap_slot_free_notify; in swap_range_free()
1285 arch_swap_invalidate_page(si->type, offset); in swap_range_free()
1287 swap_slot_free_notify(si->bdev, offset); in swap_range_free()
1290 __swap_cache_clear_shadow(swp_entry(si->type, begin), nr_entries); in swap_range_free()
1293 * Make sure that try_to_unuse() observes si->inuse_pages reaching 0 in swap_range_free()
1298 swap_usage_sub(si, nr_entries); in swap_range_free()
1301 static bool get_swap_device_info(struct swap_info_struct *si) in get_swap_device_info() argument
1303 if (!percpu_ref_tryget_live(&si->users)) in get_swap_device_info()
1306 * Guarantee the si->users are checked before accessing other in get_swap_device_info()
1307 * fields of swap_info_struct, and si->flags (SWP_WRITEOK) is in get_swap_device_info()
1325 struct swap_info_struct *si; in swap_alloc_fast() local
1332 si = this_cpu_read(percpu_swap_cluster.si[order]); in swap_alloc_fast()
1334 if (!si || !offset || !get_swap_device_info(si)) in swap_alloc_fast()
1337 ci = swap_cluster_lock(si, offset); in swap_alloc_fast()
1340 offset = cluster_offset(si, ci); in swap_alloc_fast()
1341 found = alloc_swap_scan_cluster(si, ci, offset, order, SWAP_HAS_CACHE); in swap_alloc_fast()
1343 *entry = swp_entry(si->type, found); in swap_alloc_fast()
1348 put_swap_device(si); in swap_alloc_fast()
1358 struct swap_info_struct *si, *next; in swap_alloc_slow() local
1363 plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) { in swap_alloc_slow()
1365 plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]); in swap_alloc_slow()
1367 if (get_swap_device_info(si)) { in swap_alloc_slow()
1368 offset = cluster_alloc_swap_entry(si, order, SWAP_HAS_CACHE); in swap_alloc_slow()
1369 put_swap_device(si); in swap_alloc_slow()
1371 *entry = swp_entry(si->type, offset); in swap_alloc_slow()
1380 * if we got here, it's likely that si was almost full before, in swap_alloc_slow()
1381 * and since scan_swap_map_slots() can drop the si->lock, in swap_alloc_slow()
1383 * same si and it filled up before we could get one; or, the si in swap_alloc_slow()
1385 * si->lock. Since we dropped the swap_avail_lock, the in swap_alloc_slow()
1458 struct swap_info_struct *si; in _swap_info_get() local
1463 si = swap_entry_to_info(entry); in _swap_info_get()
1464 if (!si) in _swap_info_get()
1466 if (data_race(!(si->flags & SWP_USED))) in _swap_info_get()
1469 if (offset >= si->max) in _swap_info_get()
1471 if (data_race(!si->swap_map[swp_offset(entry)])) in _swap_info_get()
1473 return si; in _swap_info_get()
1490 static unsigned char swap_entry_put_locked(struct swap_info_struct *si, in swap_entry_put_locked() argument
1499 count = si->swap_map[offset]; in swap_entry_put_locked()
1515 if (swap_count_continued(si, offset, count)) in swap_entry_put_locked()
1525 WRITE_ONCE(si->swap_map[offset], usage); in swap_entry_put_locked()
1527 swap_entries_free(si, ci, entry, 1); in swap_entry_put_locked()
1573 struct swap_info_struct *si; in get_swap_device() local
1578 si = swap_entry_to_info(entry); in get_swap_device()
1579 if (!si) in get_swap_device()
1581 if (!get_swap_device_info(si)) in get_swap_device()
1584 if (offset >= si->max) in get_swap_device()
1587 return si; in get_swap_device()
1594 percpu_ref_put(&si->users); in get_swap_device()
1598 static void swap_entries_put_cache(struct swap_info_struct *si, in swap_entries_put_cache() argument
1604 ci = swap_cluster_lock(si, offset); in swap_entries_put_cache()
1605 if (swap_only_has_cache(si, offset, nr)) { in swap_entries_put_cache()
1606 swap_entries_free(si, ci, entry, nr); in swap_entries_put_cache()
1609 swap_entry_put_locked(si, ci, entry, SWAP_HAS_CACHE); in swap_entries_put_cache()
1614 static bool swap_entries_put_map(struct swap_info_struct *si, in swap_entries_put_map() argument
1625 count = swap_count(data_race(si->swap_map[offset])); in swap_entries_put_map()
1629 ci = swap_cluster_lock(si, offset); in swap_entries_put_map()
1630 if (!swap_is_last_map(si, offset, nr, &has_cache)) { in swap_entries_put_map()
1634 swap_entries_free(si, ci, entry, nr); in swap_entries_put_map()
1637 WRITE_ONCE(si->swap_map[offset + i], SWAP_HAS_CACHE); in swap_entries_put_map()
1643 ci = swap_cluster_lock(si, offset); in swap_entries_put_map()
1646 count = swap_entry_put_locked(si, ci, entry, 1); in swap_entries_put_map()
1659 static bool swap_entries_put_map_nr(struct swap_info_struct *si, in swap_entries_put_map_nr() argument
1669 has_cache |= swap_entries_put_map(si, entry, cluster_nr); in swap_entries_put_map_nr()
1692 static void swap_entries_free(struct swap_info_struct *si, in swap_entries_free() argument
1697 unsigned char *map = si->swap_map + offset; in swap_entries_free()
1701 VM_BUG_ON(ci != __swap_offset_to_cluster(si, offset + nr_pages - 1)); in swap_entries_free()
1712 swap_range_free(si, offset, nr_pages); in swap_entries_free()
1716 free_cluster(si, ci); in swap_entries_free()
1718 partial_free_cluster(si, ci); in swap_entries_free()
1748 struct swap_info_struct *si; in put_swap_folio() local
1751 si = _swap_info_get(entry); in put_swap_folio()
1752 if (!si) in put_swap_folio()
1755 swap_entries_put_cache(si, entry, size); in put_swap_folio()
1760 struct swap_info_struct *si = __swap_entry_to_info(entry); in __swap_count() local
1763 return swap_count(si->swap_map[offset]); in __swap_count()
1771 bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry) in swap_entry_swapped() argument
1777 ci = swap_cluster_lock(si, offset); in swap_entry_swapped()
1778 count = swap_count(si->swap_map[offset]); in swap_entry_swapped()
1790 struct swap_info_struct *si; in swp_swapcount() local
1796 si = _swap_info_get(entry); in swp_swapcount()
1797 if (!si) in swp_swapcount()
1802 ci = swap_cluster_lock(si, offset); in swp_swapcount()
1804 count = swap_count(si->swap_map[offset]); in swp_swapcount()
1811 page = vmalloc_to_page(si->swap_map + offset); in swp_swapcount()
1829 static bool swap_page_trans_huge_swapped(struct swap_info_struct *si, in swap_page_trans_huge_swapped() argument
1833 unsigned char *map = si->swap_map; in swap_page_trans_huge_swapped()
1840 ci = swap_cluster_lock(si, offset); in swap_page_trans_huge_swapped()
1860 struct swap_info_struct *si = _swap_info_get(entry); in folio_swapped() local
1862 if (!si) in folio_swapped()
1866 return swap_entry_swapped(si, entry); in folio_swapped()
1868 return swap_page_trans_huge_swapped(si, entry, folio_order(folio)); in folio_swapped()
1936 struct swap_info_struct *si; in free_swap_and_cache_nr() local
1940 si = get_swap_device(entry); in free_swap_and_cache_nr()
1941 if (!si) in free_swap_and_cache_nr()
1944 if (WARN_ON(end_offset > si->max)) in free_swap_and_cache_nr()
1950 any_only_cache = swap_entries_put_map_nr(si, entry, nr); in free_swap_and_cache_nr()
1964 if (READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) { in free_swap_and_cache_nr()
1973 nr = __try_to_reclaim_swap(si, offset, in free_swap_and_cache_nr()
1984 put_swap_device(si); in free_swap_and_cache_nr()
1991 struct swap_info_struct *si = swap_type_to_info(type); in get_swap_page_of_type() local
1995 if (!si) in get_swap_page_of_type()
1999 if (get_swap_device_info(si)) { in get_swap_page_of_type()
2000 if (si->flags & SWP_WRITEOK) { in get_swap_page_of_type()
2006 offset = cluster_alloc_swap_entry(si, 0, 1); in get_swap_page_of_type()
2009 entry = swp_entry(si->type, offset); in get_swap_page_of_type()
2013 put_swap_device(si); in get_swap_page_of_type()
2078 struct swap_info_struct *si = swap_type_to_info(type); in swapdev_block() local
2081 if (!si || !(si->flags & SWP_WRITEOK)) in swapdev_block()
2083 se = offset_to_swap_extent(si, offset); in swapdev_block()
2237 struct swap_info_struct *si; in unuse_pte_range() local
2239 si = swap_info[type]; in unuse_pte_range()
2280 swp_count = READ_ONCE(si->swap_map[offset]); in unuse_pte_range()
2413 static unsigned int find_next_to_unuse(struct swap_info_struct *si, in find_next_to_unuse() argument
2425 for (i = prev + 1; i < si->max; i++) { in find_next_to_unuse()
2426 count = READ_ONCE(si->swap_map[i]); in find_next_to_unuse()
2433 if (i == si->max) in find_next_to_unuse()
2445 struct swap_info_struct *si = swap_info[type]; in try_to_unuse() local
2450 if (!swap_usage_in_pages(si)) in try_to_unuse()
2463 while (swap_usage_in_pages(si) && in try_to_unuse()
2491 while (swap_usage_in_pages(si) && in try_to_unuse()
2493 (i = find_next_to_unuse(si, i)) != 0) { in try_to_unuse()
2526 if (swap_usage_in_pages(si)) { in try_to_unuse()
2535 * after swap_range_free() reduces si->inuse_pages to 0. in try_to_unuse()
2687 static int swap_node(struct swap_info_struct *si) in swap_node() argument
2691 if (si->bdev) in swap_node()
2692 bdev = si->bdev; in swap_node()
2694 bdev = si->swap_file->f_inode->i_sb->s_bdev; in swap_node()
2699 static void setup_swap_info(struct swap_info_struct *si, int prio, in setup_swap_info() argument
2707 si->prio = prio; in setup_swap_info()
2709 si->prio = --least_priority; in setup_swap_info()
2714 si->list.prio = -si->prio; in setup_swap_info()
2716 if (si->prio >= 0) in setup_swap_info()
2717 si->avail_lists[i].prio = -si->prio; in setup_swap_info()
2719 if (swap_node(si) == i) in setup_swap_info()
2720 si->avail_lists[i].prio = 1; in setup_swap_info()
2722 si->avail_lists[i].prio = -si->prio; in setup_swap_info()
2725 si->swap_map = swap_map; in setup_swap_info()
2726 si->cluster_info = cluster_info; in setup_swap_info()
2727 si->zeromap = zeromap; in setup_swap_info()
2730 static void _enable_swap_info(struct swap_info_struct *si) in _enable_swap_info() argument
2732 atomic_long_add(si->pages, &nr_swap_pages); in _enable_swap_info()
2733 total_swap_pages += si->pages; in _enable_swap_info()
2746 plist_add(&si->list, &swap_active_head); in _enable_swap_info()
2749 add_to_avail_list(si, true); in _enable_swap_info()
2752 static void enable_swap_info(struct swap_info_struct *si, int prio, in enable_swap_info() argument
2758 spin_lock(&si->lock); in enable_swap_info()
2759 setup_swap_info(si, prio, swap_map, cluster_info, zeromap); in enable_swap_info()
2760 spin_unlock(&si->lock); in enable_swap_info()
2765 percpu_ref_resurrect(&si->users); in enable_swap_info()
2767 spin_lock(&si->lock); in enable_swap_info()
2768 _enable_swap_info(si); in enable_swap_info()
2769 spin_unlock(&si->lock); in enable_swap_info()
2773 static void reinsert_swap_info(struct swap_info_struct *si) in reinsert_swap_info() argument
2776 spin_lock(&si->lock); in reinsert_swap_info()
2777 setup_swap_info(si, si->prio, si->swap_map, si->cluster_info, si->zeromap); in reinsert_swap_info()
2778 _enable_swap_info(si); in reinsert_swap_info()
2779 spin_unlock(&si->lock); in reinsert_swap_info()
2787 static void wait_for_allocation(struct swap_info_struct *si) in wait_for_allocation() argument
2790 unsigned long end = ALIGN(si->max, SWAPFILE_CLUSTER); in wait_for_allocation()
2793 BUG_ON(si->flags & SWP_WRITEOK); in wait_for_allocation()
2796 ci = swap_cluster_lock(si, offset); in wait_for_allocation()
2826 static void flush_percpu_swap_cluster(struct swap_info_struct *si) in flush_percpu_swap_cluster() argument
2832 pcp_si = per_cpu_ptr(percpu_swap_cluster.si, cpu); in flush_percpu_swap_cluster()
2834 * Invalidate the percpu swap cluster cache, si->users in flush_percpu_swap_cluster()
2839 cmpxchg(&pcp_si[i], si, NULL); in flush_percpu_swap_cluster()
2896 struct swap_info_struct *si = p; in SYSCALL_DEFINE1() local
2899 plist_for_each_entry_continue(si, &swap_active_head, list) { in SYSCALL_DEFINE1()
2900 si->prio++; in SYSCALL_DEFINE1()
2901 si->list.prio--; in SYSCALL_DEFINE1()
2903 if (si->avail_lists[nid].prio != 1) in SYSCALL_DEFINE1()
2904 si->avail_lists[nid].prio--; in SYSCALL_DEFINE1()
3023 struct swap_info_struct *si; in swap_start() local
3032 for (type = 0; (si = swap_type_to_info(type)); type++) { in swap_start()
3033 if (!(si->flags & SWP_USED) || !si->swap_map) in swap_start()
3036 return si; in swap_start()
3044 struct swap_info_struct *si = v; in swap_next() local
3050 type = si->type + 1; in swap_next()
3053 for (; (si = swap_type_to_info(type)); type++) { in swap_next()
3054 if (!(si->flags & SWP_USED) || !si->swap_map) in swap_next()
3056 return si; in swap_next()
3069 struct swap_info_struct *si = v; in swap_show() local
3074 if (si == SEQ_START_TOKEN) { in swap_show()
3079 bytes = K(si->pages); in swap_show()
3080 inuse = K(swap_usage_in_pages(si)); in swap_show()
3082 file = si->swap_file; in swap_show()
3090 si->prio); in swap_show()
3203 static int claim_swapfile(struct swap_info_struct *si, struct inode *inode) in claim_swapfile() argument
3206 si->bdev = I_BDEV(inode); in claim_swapfile()
3212 if (bdev_is_zoned(si->bdev)) in claim_swapfile()
3214 si->flags |= SWP_BLKDEV; in claim_swapfile()
3216 si->bdev = inode->i_sb->s_bdev; in claim_swapfile()
3251 static unsigned long read_swap_header(struct swap_info_struct *si, in read_swap_header() argument
3314 static int setup_swap_map(struct swap_info_struct *si, in setup_swap_map() argument
3328 si->pages--; in setup_swap_map()
3332 if (!si->pages) { in setup_swap_map()
3340 static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si, in setup_clusters() argument
3356 if (!(si->flags & SWP_SOLIDSTATE)) { in setup_clusters()
3357 si->global_cluster = kmalloc(sizeof(*si->global_cluster), in setup_clusters()
3359 if (!si->global_cluster) in setup_clusters()
3362 si->global_cluster->next[i] = SWAP_ENTRY_INVALID; in setup_clusters()
3363 spin_lock_init(&si->global_cluster_lock); in setup_clusters()
3373 err = inc_cluster_info_page(si, cluster_info, 0); in setup_clusters()
3381 err = inc_cluster_info_page(si, cluster_info, page_nr); in setup_clusters()
3386 err = inc_cluster_info_page(si, cluster_info, i); in setup_clusters()
3391 INIT_LIST_HEAD(&si->free_clusters); in setup_clusters()
3392 INIT_LIST_HEAD(&si->full_clusters); in setup_clusters()
3393 INIT_LIST_HEAD(&si->discard_clusters); in setup_clusters()
3396 INIT_LIST_HEAD(&si->nonfull_clusters[i]); in setup_clusters()
3397 INIT_LIST_HEAD(&si->frag_clusters[i]); in setup_clusters()
3405 list_add_tail(&ci->list, &si->nonfull_clusters[0]); in setup_clusters()
3408 list_add_tail(&ci->list, &si->free_clusters); in setup_clusters()
3421 struct swap_info_struct *si; in SYSCALL_DEFINE2() local
3448 si = alloc_swap_info(); in SYSCALL_DEFINE2()
3449 if (IS_ERR(si)) in SYSCALL_DEFINE2()
3450 return PTR_ERR(si); in SYSCALL_DEFINE2()
3452 INIT_WORK(&si->discard_work, swap_discard_work); in SYSCALL_DEFINE2()
3453 INIT_WORK(&si->reclaim_work, swap_reclaim_work); in SYSCALL_DEFINE2()
3468 si->swap_file = swap_file; in SYSCALL_DEFINE2()
3473 error = claim_swapfile(si, inode); in SYSCALL_DEFINE2()
3510 maxpages = read_swap_header(si, swap_header, inode); in SYSCALL_DEFINE2()
3516 si->max = maxpages; in SYSCALL_DEFINE2()
3517 si->pages = maxpages - 1; in SYSCALL_DEFINE2()
3518 nr_extents = setup_swap_extents(si, &span); in SYSCALL_DEFINE2()
3523 if (si->pages != si->max - 1) { in SYSCALL_DEFINE2()
3524 pr_err("swap:%u != (max:%u - 1)\n", si->pages, si->max); in SYSCALL_DEFINE2()
3529 maxpages = si->max; in SYSCALL_DEFINE2()
3538 error = swap_cgroup_swapon(si->type, maxpages); in SYSCALL_DEFINE2()
3542 error = setup_swap_map(si, swap_header, swap_map, maxpages); in SYSCALL_DEFINE2()
3557 if (si->bdev && bdev_stable_writes(si->bdev)) in SYSCALL_DEFINE2()
3558 si->flags |= SWP_STABLE_WRITES; in SYSCALL_DEFINE2()
3560 if (si->bdev && bdev_synchronous(si->bdev)) in SYSCALL_DEFINE2()
3561 si->flags |= SWP_SYNCHRONOUS_IO; in SYSCALL_DEFINE2()
3563 if (si->bdev && bdev_nonrot(si->bdev)) { in SYSCALL_DEFINE2()
3564 si->flags |= SWP_SOLIDSTATE; in SYSCALL_DEFINE2()
3570 cluster_info = setup_clusters(si, swap_header, maxpages); in SYSCALL_DEFINE2()
3578 si->bdev && bdev_max_discard_sectors(si->bdev)) { in SYSCALL_DEFINE2()
3585 si->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD | in SYSCALL_DEFINE2()
3595 si->flags &= ~SWP_PAGE_DISCARD; in SYSCALL_DEFINE2()
3597 si->flags &= ~SWP_AREA_DISCARD; in SYSCALL_DEFINE2()
3600 if (si->flags & SWP_AREA_DISCARD) { in SYSCALL_DEFINE2()
3601 int err = discard_swap(si); in SYSCALL_DEFINE2()
3604 si, err); in SYSCALL_DEFINE2()
3608 error = zswap_swapon(si->type, maxpages); in SYSCALL_DEFINE2()
3627 enable_swap_info(si, prio, swap_map, cluster_info, zeromap); in SYSCALL_DEFINE2()
3630 K(si->pages), name->name, si->prio, nr_extents, in SYSCALL_DEFINE2()
3632 (si->flags & SWP_SOLIDSTATE) ? "SS" : "", in SYSCALL_DEFINE2()
3633 (si->flags & SWP_DISCARDABLE) ? "D" : "", in SYSCALL_DEFINE2()
3634 (si->flags & SWP_AREA_DISCARD) ? "s" : "", in SYSCALL_DEFINE2()
3635 (si->flags & SWP_PAGE_DISCARD) ? "c" : ""); in SYSCALL_DEFINE2()
3644 zswap_swapoff(si->type); in SYSCALL_DEFINE2()
3648 kfree(si->global_cluster); in SYSCALL_DEFINE2()
3649 si->global_cluster = NULL; in SYSCALL_DEFINE2()
3651 destroy_swap_extents(si); in SYSCALL_DEFINE2()
3652 swap_cgroup_swapoff(si->type); in SYSCALL_DEFINE2()
3654 si->swap_file = NULL; in SYSCALL_DEFINE2()
3655 si->flags = 0; in SYSCALL_DEFINE2()
3682 struct swap_info_struct *si = swap_info[type]; in si_swapinfo() local
3684 if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK)) in si_swapinfo()
3685 nr_to_be_unused += swap_usage_in_pages(si); in si_swapinfo()
3704 struct swap_info_struct *si; in __swap_duplicate() local
3711 si = swap_entry_to_info(entry); in __swap_duplicate()
3712 if (WARN_ON_ONCE(!si)) { in __swap_duplicate()
3720 ci = swap_cluster_lock(si, offset); in __swap_duplicate()
3724 count = si->swap_map[offset + i]; in __swap_duplicate()
3752 count = si->swap_map[offset + i]; in __swap_duplicate()
3760 else if (swap_count_continued(si, offset + i, count)) in __swap_duplicate()
3771 WRITE_ONCE(si->swap_map[offset + i], count | has_cache); in __swap_duplicate()
3821 void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr) in swapcache_clear() argument
3823 swap_entries_put_cache(si, entry, nr); in swapcache_clear()
3843 struct swap_info_struct *si; in add_swap_count_continuation() local
3858 si = get_swap_device(entry); in add_swap_count_continuation()
3859 if (!si) { in add_swap_count_continuation()
3869 ci = swap_cluster_lock(si, offset); in add_swap_count_continuation()
3871 count = swap_count(si->swap_map[offset]); in add_swap_count_continuation()
3887 head = vmalloc_to_page(si->swap_map + offset); in add_swap_count_continuation()
3890 spin_lock(&si->cont_lock); in add_swap_count_continuation()
3899 si->flags |= SWP_CONTINUED; in add_swap_count_continuation()
3927 spin_unlock(&si->cont_lock); in add_swap_count_continuation()
3930 put_swap_device(si); in add_swap_count_continuation()
3946 static bool swap_count_continued(struct swap_info_struct *si, in swap_count_continued() argument
3954 head = vmalloc_to_page(si->swap_map + offset); in swap_count_continued()
3960 spin_lock(&si->cont_lock); in swap_count_continued()
4022 spin_unlock(&si->cont_lock); in swap_count_continued()
4030 static void free_swap_count_continuations(struct swap_info_struct *si) in free_swap_count_continuations() argument
4034 for (offset = 0; offset < si->max; offset += PAGE_SIZE) { in free_swap_count_continuations()
4036 head = vmalloc_to_page(si->swap_map + offset); in free_swap_count_continuations()
4056 struct swap_info_struct *si, *next; in __folio_throttle_swaprate() local
4076 plist_for_each_entry_safe(si, next, &swap_avail_heads[nid], in __folio_throttle_swaprate()
4078 if (si->bdev) { in __folio_throttle_swaprate()
4079 blkcg_schedule_throttle(si->bdev->bd_disk, true); in __folio_throttle_swaprate()