Lines Matching refs:si

56 static void swap_entries_free(struct swap_info_struct *si,
59 static void swap_range_alloc(struct swap_info_struct *si,
62 static void move_cluster(struct swap_info_struct *si,
122 struct swap_info_struct *si[SWAP_NR_ORDERS]; member
128 .si = { NULL },
167 static long swap_usage_in_pages(struct swap_info_struct *si) in swap_usage_in_pages() argument
169 return atomic_long_read(&si->inuse_pages) & SWAP_USAGE_COUNTER_MASK; in swap_usage_in_pages()
182 static bool swap_only_has_cache(struct swap_info_struct *si, in swap_only_has_cache() argument
185 unsigned char *map = si->swap_map + offset; in swap_only_has_cache()
197 static bool swap_is_last_map(struct swap_info_struct *si, in swap_is_last_map() argument
200 unsigned char *map = si->swap_map + offset; in swap_is_last_map()
221 static int __try_to_reclaim_swap(struct swap_info_struct *si, in __try_to_reclaim_swap() argument
224 const swp_entry_t entry = swp_entry(si->type, offset); in __try_to_reclaim_swap()
269 ci = swap_cluster_lock(si, offset); in __try_to_reclaim_swap()
270 need_reclaim = swap_only_has_cache(si, offset, nr_pages); in __try_to_reclaim_swap()
301 static int discard_swap(struct swap_info_struct *si) in discard_swap() argument
309 se = first_se(si); in discard_swap()
313 err = blkdev_issue_discard(si->bdev, start_block, in discard_swap()
324 err = blkdev_issue_discard(si->bdev, start_block, in discard_swap()
371 static void discard_swap_cluster(struct swap_info_struct *si, in discard_swap_cluster() argument
374 struct swap_extent *se = offset_to_swap_extent(si, start_page); in discard_swap_cluster()
388 if (blkdev_issue_discard(si->bdev, start_block, in discard_swap_cluster()
424 static inline unsigned int cluster_index(struct swap_info_struct *si, in cluster_index() argument
427 return ci - si->cluster_info; in cluster_index()
430 static inline unsigned int cluster_offset(struct swap_info_struct *si, in cluster_offset() argument
433 return cluster_index(si, ci) * SWAPFILE_CLUSTER; in cluster_offset()
489 swap_cluster_alloc_table(struct swap_info_struct *si, in swap_cluster_alloc_table() argument
516 if (!(si->flags & SWP_SOLIDSTATE)) in swap_cluster_alloc_table()
517 spin_unlock(&si->global_cluster_lock); in swap_cluster_alloc_table()
531 if (!(si->flags & SWP_SOLIDSTATE)) in swap_cluster_alloc_table()
532 spin_lock(&si->global_cluster_lock); in swap_cluster_alloc_table()
543 move_cluster(si, ci, &si->free_clusters, CLUSTER_FLAG_FREE); in swap_cluster_alloc_table()
552 static void move_cluster(struct swap_info_struct *si, in move_cluster() argument
561 spin_lock(&si->lock); in move_cluster()
566 spin_unlock(&si->lock); in move_cluster()
571 static void swap_cluster_schedule_discard(struct swap_info_struct *si, in swap_cluster_schedule_discard() argument
575 move_cluster(si, ci, &si->discard_clusters, CLUSTER_FLAG_DISCARD); in swap_cluster_schedule_discard()
576 schedule_work(&si->discard_work); in swap_cluster_schedule_discard()
579 static void __free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci) in __free_cluster() argument
582 move_cluster(si, ci, &si->free_clusters, CLUSTER_FLAG_FREE); in __free_cluster()
596 struct swap_info_struct *si, struct list_head *list) in isolate_lock_cluster() argument
600 spin_lock(&si->lock); in isolate_lock_cluster()
615 spin_unlock(&si->lock); in isolate_lock_cluster()
619 VM_WARN_ON_ONCE(list != &si->free_clusters); in isolate_lock_cluster()
621 return swap_cluster_alloc_table(si, found); in isolate_lock_cluster()
633 static bool swap_do_scheduled_discard(struct swap_info_struct *si) in swap_do_scheduled_discard() argument
639 spin_lock(&si->lock); in swap_do_scheduled_discard()
640 while (!list_empty(&si->discard_clusters)) { in swap_do_scheduled_discard()
641 ci = list_first_entry(&si->discard_clusters, struct swap_cluster_info, list); in swap_do_scheduled_discard()
648 idx = cluster_index(si, ci); in swap_do_scheduled_discard()
649 spin_unlock(&si->lock); in swap_do_scheduled_discard()
650 discard_swap_cluster(si, idx * SWAPFILE_CLUSTER, in swap_do_scheduled_discard()
659 __free_cluster(si, ci); in swap_do_scheduled_discard()
662 spin_lock(&si->lock); in swap_do_scheduled_discard()
664 spin_unlock(&si->lock); in swap_do_scheduled_discard()
670 struct swap_info_struct *si; in swap_discard_work() local
672 si = container_of(work, struct swap_info_struct, discard_work); in swap_discard_work()
674 swap_do_scheduled_discard(si); in swap_discard_work()
679 struct swap_info_struct *si; in swap_users_ref_free() local
681 si = container_of(ref, struct swap_info_struct, users); in swap_users_ref_free()
682 complete(&si->comp); in swap_users_ref_free()
689 static void free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci) in free_cluster() argument
700 if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) == in free_cluster()
702 swap_cluster_schedule_discard(si, ci); in free_cluster()
706 __free_cluster(si, ci); in free_cluster()
713 static void partial_free_cluster(struct swap_info_struct *si, in partial_free_cluster() argument
720 move_cluster(si, ci, &si->nonfull_clusters[ci->order], in partial_free_cluster()
729 static void relocate_cluster(struct swap_info_struct *si, in relocate_cluster() argument
740 free_cluster(si, ci); in relocate_cluster()
743 move_cluster(si, ci, &si->frag_clusters[ci->order], in relocate_cluster()
747 move_cluster(si, ci, &si->full_clusters, in relocate_cluster()
780 static bool cluster_reclaim_range(struct swap_info_struct *si, in cluster_reclaim_range() argument
784 unsigned char *map = si->swap_map; in cluster_reclaim_range()
795 nr_reclaim = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY); in cluster_reclaim_range()
818 static bool cluster_scan_range(struct swap_info_struct *si, in cluster_scan_range() argument
824 unsigned char *map = si->swap_map; in cluster_scan_range()
866 static bool cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster_info *ci, in cluster_alloc_range() argument
874 if (!(si->flags & SWP_WRITEOK)) in cluster_alloc_range()
884 memset(si->swap_map + start, usage, nr_pages); in cluster_alloc_range()
886 swap_range_alloc(si, nr_pages); in cluster_alloc_range()
893 static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si, in alloc_swap_scan_cluster() argument
901 unsigned long end = min(start + SWAPFILE_CLUSTER, si->max); in alloc_swap_scan_cluster()
912 if (!cluster_scan_range(si, ci, offset, nr_pages, &need_reclaim)) in alloc_swap_scan_cluster()
915 ret = cluster_reclaim_range(si, ci, offset, offset + nr_pages); in alloc_swap_scan_cluster()
930 if (!cluster_alloc_range(si, ci, offset, usage, order)) in alloc_swap_scan_cluster()
939 relocate_cluster(si, ci); in alloc_swap_scan_cluster()
941 if (si->flags & SWP_SOLIDSTATE) { in alloc_swap_scan_cluster()
943 this_cpu_write(percpu_swap_cluster.si[order], si); in alloc_swap_scan_cluster()
945 si->global_cluster->next[order] = next; in alloc_swap_scan_cluster()
950 static unsigned int alloc_swap_scan_list(struct swap_info_struct *si, in alloc_swap_scan_list() argument
959 struct swap_cluster_info *ci = isolate_lock_cluster(si, list); in alloc_swap_scan_list()
964 offset = cluster_offset(si, ci); in alloc_swap_scan_list()
965 found = alloc_swap_scan_cluster(si, ci, offset, order, usage); in alloc_swap_scan_list()
973 static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force) in swap_reclaim_full_clusters() argument
978 unsigned char *map = si->swap_map; in swap_reclaim_full_clusters()
982 to_scan = swap_usage_in_pages(si) / SWAPFILE_CLUSTER; in swap_reclaim_full_clusters()
984 while ((ci = isolate_lock_cluster(si, &si->full_clusters))) { in swap_reclaim_full_clusters()
985 offset = cluster_offset(si, ci); in swap_reclaim_full_clusters()
986 end = min(si->max, offset + SWAPFILE_CLUSTER); in swap_reclaim_full_clusters()
992 nr_reclaim = __try_to_reclaim_swap(si, offset, in swap_reclaim_full_clusters()
1005 relocate_cluster(si, ci); in swap_reclaim_full_clusters()
1015 struct swap_info_struct *si; in swap_reclaim_work() local
1017 si = container_of(work, struct swap_info_struct, reclaim_work); in swap_reclaim_work()
1019 swap_reclaim_full_clusters(si, true); in swap_reclaim_work()
1026 static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int order, in cluster_alloc_swap_entry() argument
1036 if (order && !(si->flags & SWP_BLKDEV)) in cluster_alloc_swap_entry()
1039 if (!(si->flags & SWP_SOLIDSTATE)) { in cluster_alloc_swap_entry()
1041 spin_lock(&si->global_cluster_lock); in cluster_alloc_swap_entry()
1042 offset = si->global_cluster->next[order]; in cluster_alloc_swap_entry()
1046 ci = swap_cluster_lock(si, offset); in cluster_alloc_swap_entry()
1050 offset = cluster_offset(si, ci); in cluster_alloc_swap_entry()
1051 found = alloc_swap_scan_cluster(si, ci, offset, in cluster_alloc_swap_entry()
1065 if (si->flags & SWP_PAGE_DISCARD) { in cluster_alloc_swap_entry()
1066 found = alloc_swap_scan_list(si, &si->free_clusters, order, usage, in cluster_alloc_swap_entry()
1073 found = alloc_swap_scan_list(si, &si->nonfull_clusters[order], in cluster_alloc_swap_entry()
1079 if (!(si->flags & SWP_PAGE_DISCARD)) { in cluster_alloc_swap_entry()
1080 found = alloc_swap_scan_list(si, &si->free_clusters, order, usage, in cluster_alloc_swap_entry()
1088 swap_reclaim_full_clusters(si, false); in cluster_alloc_swap_entry()
1097 found = alloc_swap_scan_list(si, &si->frag_clusters[order], order, in cluster_alloc_swap_entry()
1112 found = alloc_swap_scan_list(si, &si->frag_clusters[o], in cluster_alloc_swap_entry()
1117 found = alloc_swap_scan_list(si, &si->nonfull_clusters[o], in cluster_alloc_swap_entry()
1123 if (!(si->flags & SWP_SOLIDSTATE)) in cluster_alloc_swap_entry()
1124 spin_unlock(&si->global_cluster_lock); in cluster_alloc_swap_entry()
1130 static void del_from_avail_list(struct swap_info_struct *si, bool swapoff) in del_from_avail_list() argument
1143 lockdep_assert_held(&si->lock); in del_from_avail_list()
1144 si->flags &= ~SWP_WRITEOK; in del_from_avail_list()
1145 atomic_long_or(SWAP_USAGE_OFFLIST_BIT, &si->inuse_pages); in del_from_avail_list()
1154 pages = si->pages; in del_from_avail_list()
1155 if (!atomic_long_try_cmpxchg(&si->inuse_pages, &pages, in del_from_avail_list()
1160 plist_del(&si->avail_list, &swap_avail_head); in del_from_avail_list()
1167 static void add_to_avail_list(struct swap_info_struct *si, bool swapon) in add_to_avail_list() argument
1176 lockdep_assert_held(&si->lock); in add_to_avail_list()
1177 si->flags |= SWP_WRITEOK; in add_to_avail_list()
1179 if (!(READ_ONCE(si->flags) & SWP_WRITEOK)) in add_to_avail_list()
1183 if (!(atomic_long_read(&si->inuse_pages) & SWAP_USAGE_OFFLIST_BIT)) in add_to_avail_list()
1186 val = atomic_long_fetch_and_relaxed(~SWAP_USAGE_OFFLIST_BIT, &si->inuse_pages); in add_to_avail_list()
1193 pages = si->pages; in add_to_avail_list()
1196 if (atomic_long_try_cmpxchg(&si->inuse_pages, &pages, in add_to_avail_list()
1201 plist_add(&si->avail_list, &swap_avail_head); in add_to_avail_list()
1212 static bool swap_usage_add(struct swap_info_struct *si, unsigned int nr_entries) in swap_usage_add() argument
1214 long val = atomic_long_add_return_relaxed(nr_entries, &si->inuse_pages); in swap_usage_add()
1220 if (unlikely(val == si->pages)) { in swap_usage_add()
1221 del_from_avail_list(si, false); in swap_usage_add()
1228 static void swap_usage_sub(struct swap_info_struct *si, unsigned int nr_entries) in swap_usage_sub() argument
1230 long val = atomic_long_sub_return_relaxed(nr_entries, &si->inuse_pages); in swap_usage_sub()
1237 add_to_avail_list(si, false); in swap_usage_sub()
1240 static void swap_range_alloc(struct swap_info_struct *si, in swap_range_alloc() argument
1243 if (swap_usage_add(si, nr_entries)) { in swap_range_alloc()
1245 schedule_work(&si->reclaim_work); in swap_range_alloc()
1250 static void swap_range_free(struct swap_info_struct *si, unsigned long offset, in swap_range_free() argument
1263 clear_bit(offset + i, si->zeromap); in swap_range_free()
1264 zswap_invalidate(swp_entry(si->type, offset + i)); in swap_range_free()
1267 if (si->flags & SWP_BLKDEV) in swap_range_free()
1269 si->bdev->bd_disk->fops->swap_slot_free_notify; in swap_range_free()
1273 arch_swap_invalidate_page(si->type, offset); in swap_range_free()
1275 swap_slot_free_notify(si->bdev, offset); in swap_range_free()
1278 __swap_cache_clear_shadow(swp_entry(si->type, begin), nr_entries); in swap_range_free()
1286 swap_usage_sub(si, nr_entries); in swap_range_free()
1289 static bool get_swap_device_info(struct swap_info_struct *si) in get_swap_device_info() argument
1291 if (!percpu_ref_tryget_live(&si->users)) in get_swap_device_info()
1313 struct swap_info_struct *si; in swap_alloc_fast() local
1320 si = this_cpu_read(percpu_swap_cluster.si[order]); in swap_alloc_fast()
1322 if (!si || !offset || !get_swap_device_info(si)) in swap_alloc_fast()
1325 ci = swap_cluster_lock(si, offset); in swap_alloc_fast()
1328 offset = cluster_offset(si, ci); in swap_alloc_fast()
1329 found = alloc_swap_scan_cluster(si, ci, offset, order, SWAP_HAS_CACHE); in swap_alloc_fast()
1331 *entry = swp_entry(si->type, found); in swap_alloc_fast()
1336 put_swap_device(si); in swap_alloc_fast()
1345 struct swap_info_struct *si, *next; in swap_alloc_slow() local
1349 plist_for_each_entry_safe(si, next, &swap_avail_head, avail_list) { in swap_alloc_slow()
1351 plist_requeue(&si->avail_list, &swap_avail_head); in swap_alloc_slow()
1353 if (get_swap_device_info(si)) { in swap_alloc_slow()
1354 offset = cluster_alloc_swap_entry(si, order, SWAP_HAS_CACHE); in swap_alloc_slow()
1355 put_swap_device(si); in swap_alloc_slow()
1357 *entry = swp_entry(si->type, offset); in swap_alloc_slow()
1388 struct swap_info_struct *si, *next; in swap_sync_discard() local
1392 plist_for_each_entry_safe(si, next, &swap_active_head, list) { in swap_sync_discard()
1394 if (get_swap_device_info(si)) { in swap_sync_discard()
1395 if (si->flags & SWP_PAGE_DISCARD) in swap_sync_discard()
1396 ret = swap_do_scheduled_discard(si); in swap_sync_discard()
1397 put_swap_device(si); in swap_sync_discard()
1477 struct swap_info_struct *si; in _swap_info_get() local
1482 si = swap_entry_to_info(entry); in _swap_info_get()
1483 if (!si) in _swap_info_get()
1485 if (data_race(!(si->flags & SWP_USED))) in _swap_info_get()
1488 if (offset >= si->max) in _swap_info_get()
1490 if (data_race(!si->swap_map[swp_offset(entry)])) in _swap_info_get()
1492 return si; in _swap_info_get()
1509 static unsigned char swap_entry_put_locked(struct swap_info_struct *si, in swap_entry_put_locked() argument
1518 count = si->swap_map[offset]; in swap_entry_put_locked()
1534 if (swap_count_continued(si, offset, count)) in swap_entry_put_locked()
1544 WRITE_ONCE(si->swap_map[offset], usage); in swap_entry_put_locked()
1546 swap_entries_free(si, ci, entry, 1); in swap_entry_put_locked()
1592 struct swap_info_struct *si; in get_swap_device() local
1597 si = swap_entry_to_info(entry); in get_swap_device()
1598 if (!si) in get_swap_device()
1600 if (!get_swap_device_info(si)) in get_swap_device()
1603 if (offset >= si->max) in get_swap_device()
1606 return si; in get_swap_device()
1613 percpu_ref_put(&si->users); in get_swap_device()
1617 static void swap_entries_put_cache(struct swap_info_struct *si, in swap_entries_put_cache() argument
1623 ci = swap_cluster_lock(si, offset); in swap_entries_put_cache()
1624 if (swap_only_has_cache(si, offset, nr)) { in swap_entries_put_cache()
1625 swap_entries_free(si, ci, entry, nr); in swap_entries_put_cache()
1628 swap_entry_put_locked(si, ci, entry, SWAP_HAS_CACHE); in swap_entries_put_cache()
1633 static bool swap_entries_put_map(struct swap_info_struct *si, in swap_entries_put_map() argument
1644 count = swap_count(data_race(si->swap_map[offset])); in swap_entries_put_map()
1648 ci = swap_cluster_lock(si, offset); in swap_entries_put_map()
1649 if (!swap_is_last_map(si, offset, nr, &has_cache)) { in swap_entries_put_map()
1653 swap_entries_free(si, ci, entry, nr); in swap_entries_put_map()
1656 WRITE_ONCE(si->swap_map[offset + i], SWAP_HAS_CACHE); in swap_entries_put_map()
1662 ci = swap_cluster_lock(si, offset); in swap_entries_put_map()
1665 count = swap_entry_put_locked(si, ci, entry, 1); in swap_entries_put_map()
1678 static bool swap_entries_put_map_nr(struct swap_info_struct *si, in swap_entries_put_map_nr() argument
1688 has_cache |= swap_entries_put_map(si, entry, cluster_nr); in swap_entries_put_map_nr()
1711 static void swap_entries_free(struct swap_info_struct *si, in swap_entries_free() argument
1716 unsigned char *map = si->swap_map + offset; in swap_entries_free()
1720 VM_BUG_ON(ci != __swap_offset_to_cluster(si, offset + nr_pages - 1)); in swap_entries_free()
1731 swap_range_free(si, offset, nr_pages); in swap_entries_free()
1735 free_cluster(si, ci); in swap_entries_free()
1737 partial_free_cluster(si, ci); in swap_entries_free()
1767 struct swap_info_struct *si; in put_swap_folio() local
1770 si = _swap_info_get(entry); in put_swap_folio()
1771 if (!si) in put_swap_folio()
1774 swap_entries_put_cache(si, entry, size); in put_swap_folio()
1779 struct swap_info_struct *si = __swap_entry_to_info(entry); in __swap_count() local
1782 return swap_count(si->swap_map[offset]); in __swap_count()
1790 bool swap_entry_swapped(struct swap_info_struct *si, swp_entry_t entry) in swap_entry_swapped() argument
1796 ci = swap_cluster_lock(si, offset); in swap_entry_swapped()
1797 count = swap_count(si->swap_map[offset]); in swap_entry_swapped()
1809 struct swap_info_struct *si; in swp_swapcount() local
1815 si = _swap_info_get(entry); in swp_swapcount()
1816 if (!si) in swp_swapcount()
1821 ci = swap_cluster_lock(si, offset); in swp_swapcount()
1823 count = swap_count(si->swap_map[offset]); in swp_swapcount()
1830 page = vmalloc_to_page(si->swap_map + offset); in swp_swapcount()
1848 static bool swap_page_trans_huge_swapped(struct swap_info_struct *si, in swap_page_trans_huge_swapped() argument
1852 unsigned char *map = si->swap_map; in swap_page_trans_huge_swapped()
1859 ci = swap_cluster_lock(si, offset); in swap_page_trans_huge_swapped()
1879 struct swap_info_struct *si = _swap_info_get(entry); in folio_swapped() local
1881 if (!si) in folio_swapped()
1885 return swap_entry_swapped(si, entry); in folio_swapped()
1887 return swap_page_trans_huge_swapped(si, entry, folio_order(folio)); in folio_swapped()
1955 struct swap_info_struct *si; in free_swap_and_cache_nr() local
1959 si = get_swap_device(entry); in free_swap_and_cache_nr()
1960 if (!si) in free_swap_and_cache_nr()
1963 if (WARN_ON(end_offset > si->max)) in free_swap_and_cache_nr()
1969 any_only_cache = swap_entries_put_map_nr(si, entry, nr); in free_swap_and_cache_nr()
1983 if (READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) { in free_swap_and_cache_nr()
1992 nr = __try_to_reclaim_swap(si, offset, in free_swap_and_cache_nr()
2003 put_swap_device(si); in free_swap_and_cache_nr()
2010 struct swap_info_struct *si = swap_type_to_info(type); in get_swap_page_of_type() local
2014 if (!si) in get_swap_page_of_type()
2018 if (get_swap_device_info(si)) { in get_swap_page_of_type()
2019 if (si->flags & SWP_WRITEOK) { in get_swap_page_of_type()
2025 offset = cluster_alloc_swap_entry(si, 0, 1); in get_swap_page_of_type()
2028 entry = swp_entry(si->type, offset); in get_swap_page_of_type()
2030 put_swap_device(si); in get_swap_page_of_type()
2095 struct swap_info_struct *si = swap_type_to_info(type); in swapdev_block() local
2098 if (!si || !(si->flags & SWP_WRITEOK)) in swapdev_block()
2100 se = offset_to_swap_extent(si, offset); in swapdev_block()
2254 struct swap_info_struct *si; in unuse_pte_range() local
2256 si = swap_info[type]; in unuse_pte_range()
2296 swp_count = READ_ONCE(si->swap_map[offset]); in unuse_pte_range()
2429 static unsigned int find_next_to_unuse(struct swap_info_struct *si, in find_next_to_unuse() argument
2441 for (i = prev + 1; i < si->max; i++) { in find_next_to_unuse()
2442 count = READ_ONCE(si->swap_map[i]); in find_next_to_unuse()
2449 if (i == si->max) in find_next_to_unuse()
2461 struct swap_info_struct *si = swap_info[type]; in try_to_unuse() local
2466 if (!swap_usage_in_pages(si)) in try_to_unuse()
2479 while (swap_usage_in_pages(si) && in try_to_unuse()
2507 while (swap_usage_in_pages(si) && in try_to_unuse()
2509 (i = find_next_to_unuse(si, i)) != 0) { in try_to_unuse()
2542 if (swap_usage_in_pages(si)) { in try_to_unuse()
2703 static void setup_swap_info(struct swap_info_struct *si, int prio, in setup_swap_info() argument
2708 si->prio = prio; in setup_swap_info()
2713 si->list.prio = -si->prio; in setup_swap_info()
2714 si->avail_list.prio = -si->prio; in setup_swap_info()
2715 si->swap_map = swap_map; in setup_swap_info()
2716 si->cluster_info = cluster_info; in setup_swap_info()
2717 si->zeromap = zeromap; in setup_swap_info()
2720 static void _enable_swap_info(struct swap_info_struct *si) in _enable_swap_info() argument
2722 atomic_long_add(si->pages, &nr_swap_pages); in _enable_swap_info()
2723 total_swap_pages += si->pages; in _enable_swap_info()
2727 plist_add(&si->list, &swap_active_head); in _enable_swap_info()
2730 add_to_avail_list(si, true); in _enable_swap_info()
2733 static void enable_swap_info(struct swap_info_struct *si, int prio, in enable_swap_info() argument
2739 spin_lock(&si->lock); in enable_swap_info()
2740 setup_swap_info(si, prio, swap_map, cluster_info, zeromap); in enable_swap_info()
2741 spin_unlock(&si->lock); in enable_swap_info()
2746 percpu_ref_resurrect(&si->users); in enable_swap_info()
2748 spin_lock(&si->lock); in enable_swap_info()
2749 _enable_swap_info(si); in enable_swap_info()
2750 spin_unlock(&si->lock); in enable_swap_info()
2754 static void reinsert_swap_info(struct swap_info_struct *si) in reinsert_swap_info() argument
2757 spin_lock(&si->lock); in reinsert_swap_info()
2758 setup_swap_info(si, si->prio, si->swap_map, si->cluster_info, si->zeromap); in reinsert_swap_info()
2759 _enable_swap_info(si); in reinsert_swap_info()
2760 spin_unlock(&si->lock); in reinsert_swap_info()
2768 static void wait_for_allocation(struct swap_info_struct *si) in wait_for_allocation() argument
2771 unsigned long end = ALIGN(si->max, SWAPFILE_CLUSTER); in wait_for_allocation()
2774 BUG_ON(si->flags & SWP_WRITEOK); in wait_for_allocation()
2777 ci = swap_cluster_lock(si, offset); in wait_for_allocation()
2807 static void flush_percpu_swap_cluster(struct swap_info_struct *si) in flush_percpu_swap_cluster() argument
2813 pcp_si = per_cpu_ptr(percpu_swap_cluster.si, cpu); in flush_percpu_swap_cluster()
2820 cmpxchg(&pcp_si[i], si, NULL); in flush_percpu_swap_cluster()
2990 struct swap_info_struct *si; in swap_start() local
2999 for (type = 0; (si = swap_type_to_info(type)); type++) { in swap_start()
3000 if (!(si->flags & SWP_USED) || !si->swap_map) in swap_start()
3003 return si; in swap_start()
3011 struct swap_info_struct *si = v; in swap_next() local
3017 type = si->type + 1; in swap_next()
3020 for (; (si = swap_type_to_info(type)); type++) { in swap_next()
3021 if (!(si->flags & SWP_USED) || !si->swap_map) in swap_next()
3023 return si; in swap_next()
3036 struct swap_info_struct *si = v; in swap_show() local
3041 if (si == SEQ_START_TOKEN) { in swap_show()
3046 bytes = K(si->pages); in swap_show()
3047 inuse = K(swap_usage_in_pages(si)); in swap_show()
3049 file = si->swap_file; in swap_show()
3057 si->prio); in swap_show()
3168 static int claim_swapfile(struct swap_info_struct *si, struct inode *inode) in claim_swapfile() argument
3171 si->bdev = I_BDEV(inode); in claim_swapfile()
3177 if (bdev_is_zoned(si->bdev)) in claim_swapfile()
3179 si->flags |= SWP_BLKDEV; in claim_swapfile()
3181 si->bdev = inode->i_sb->s_bdev; in claim_swapfile()
3225 static unsigned long read_swap_header(struct swap_info_struct *si, in read_swap_header() argument
3288 static int setup_swap_map(struct swap_info_struct *si, in setup_swap_map() argument
3302 si->pages--; in setup_swap_map()
3306 if (!si->pages) { in setup_swap_map()
3314 static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si, in setup_clusters() argument
3330 if (!(si->flags & SWP_SOLIDSTATE)) { in setup_clusters()
3331 si->global_cluster = kmalloc(sizeof(*si->global_cluster), in setup_clusters()
3333 if (!si->global_cluster) in setup_clusters()
3336 si->global_cluster->next[i] = SWAP_ENTRY_INVALID; in setup_clusters()
3337 spin_lock_init(&si->global_cluster_lock); in setup_clusters()
3365 INIT_LIST_HEAD(&si->free_clusters); in setup_clusters()
3366 INIT_LIST_HEAD(&si->full_clusters); in setup_clusters()
3367 INIT_LIST_HEAD(&si->discard_clusters); in setup_clusters()
3370 INIT_LIST_HEAD(&si->nonfull_clusters[i]); in setup_clusters()
3371 INIT_LIST_HEAD(&si->frag_clusters[i]); in setup_clusters()
3379 list_add_tail(&ci->list, &si->nonfull_clusters[0]); in setup_clusters()
3382 list_add_tail(&ci->list, &si->free_clusters); in setup_clusters()
3394 struct swap_info_struct *si; in SYSCALL_DEFINE2() local
3418 si = alloc_swap_info(); in SYSCALL_DEFINE2()
3419 if (IS_ERR(si)) in SYSCALL_DEFINE2()
3420 return PTR_ERR(si); in SYSCALL_DEFINE2()
3422 INIT_WORK(&si->discard_work, swap_discard_work); in SYSCALL_DEFINE2()
3423 INIT_WORK(&si->reclaim_work, swap_reclaim_work); in SYSCALL_DEFINE2()
3438 si->swap_file = swap_file; in SYSCALL_DEFINE2()
3443 error = claim_swapfile(si, inode); in SYSCALL_DEFINE2()
3480 maxpages = read_swap_header(si, swap_header, inode); in SYSCALL_DEFINE2()
3486 si->max = maxpages; in SYSCALL_DEFINE2()
3487 si->pages = maxpages - 1; in SYSCALL_DEFINE2()
3488 nr_extents = setup_swap_extents(si, &span); in SYSCALL_DEFINE2()
3493 if (si->pages != si->max - 1) { in SYSCALL_DEFINE2()
3494 pr_err("swap:%u != (max:%u - 1)\n", si->pages, si->max); in SYSCALL_DEFINE2()
3499 maxpages = si->max; in SYSCALL_DEFINE2()
3508 error = swap_cgroup_swapon(si->type, maxpages); in SYSCALL_DEFINE2()
3512 error = setup_swap_map(si, swap_header, swap_map, maxpages); in SYSCALL_DEFINE2()
3527 if (si->bdev && bdev_stable_writes(si->bdev)) in SYSCALL_DEFINE2()
3528 si->flags |= SWP_STABLE_WRITES; in SYSCALL_DEFINE2()
3530 if (si->bdev && bdev_synchronous(si->bdev)) in SYSCALL_DEFINE2()
3531 si->flags |= SWP_SYNCHRONOUS_IO; in SYSCALL_DEFINE2()
3533 if (si->bdev && bdev_nonrot(si->bdev)) { in SYSCALL_DEFINE2()
3534 si->flags |= SWP_SOLIDSTATE; in SYSCALL_DEFINE2()
3540 cluster_info = setup_clusters(si, swap_header, maxpages); in SYSCALL_DEFINE2()
3548 si->bdev && bdev_max_discard_sectors(si->bdev)) { in SYSCALL_DEFINE2()
3555 si->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD | in SYSCALL_DEFINE2()
3565 si->flags &= ~SWP_PAGE_DISCARD; in SYSCALL_DEFINE2()
3567 si->flags &= ~SWP_AREA_DISCARD; in SYSCALL_DEFINE2()
3570 if (si->flags & SWP_AREA_DISCARD) { in SYSCALL_DEFINE2()
3571 int err = discard_swap(si); in SYSCALL_DEFINE2()
3574 si, err); in SYSCALL_DEFINE2()
3578 error = zswap_swapon(si->type, maxpages); in SYSCALL_DEFINE2()
3597 enable_swap_info(si, prio, swap_map, cluster_info, zeromap); in SYSCALL_DEFINE2()
3600 K(si->pages), name->name, si->prio, nr_extents, in SYSCALL_DEFINE2()
3602 (si->flags & SWP_SOLIDSTATE) ? "SS" : "", in SYSCALL_DEFINE2()
3603 (si->flags & SWP_DISCARDABLE) ? "D" : "", in SYSCALL_DEFINE2()
3604 (si->flags & SWP_AREA_DISCARD) ? "s" : "", in SYSCALL_DEFINE2()
3605 (si->flags & SWP_PAGE_DISCARD) ? "c" : ""); in SYSCALL_DEFINE2()
3614 zswap_swapoff(si->type); in SYSCALL_DEFINE2()
3618 kfree(si->global_cluster); in SYSCALL_DEFINE2()
3619 si->global_cluster = NULL; in SYSCALL_DEFINE2()
3621 destroy_swap_extents(si); in SYSCALL_DEFINE2()
3622 swap_cgroup_swapoff(si->type); in SYSCALL_DEFINE2()
3624 si->swap_file = NULL; in SYSCALL_DEFINE2()
3625 si->flags = 0; in SYSCALL_DEFINE2()
3652 struct swap_info_struct *si = swap_info[type]; in si_swapinfo() local
3654 if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK)) in si_swapinfo()
3655 nr_to_be_unused += swap_usage_in_pages(si); in si_swapinfo()
3674 struct swap_info_struct *si; in __swap_duplicate() local
3681 si = swap_entry_to_info(entry); in __swap_duplicate()
3682 if (WARN_ON_ONCE(!si)) { in __swap_duplicate()
3690 ci = swap_cluster_lock(si, offset); in __swap_duplicate()
3694 count = si->swap_map[offset + i]; in __swap_duplicate()
3722 count = si->swap_map[offset + i]; in __swap_duplicate()
3730 else if (swap_count_continued(si, offset + i, count)) in __swap_duplicate()
3741 WRITE_ONCE(si->swap_map[offset + i], count | has_cache); in __swap_duplicate()
3791 void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr) in swapcache_clear() argument
3793 swap_entries_put_cache(si, entry, nr); in swapcache_clear()
3813 struct swap_info_struct *si; in add_swap_count_continuation() local
3828 si = get_swap_device(entry); in add_swap_count_continuation()
3829 if (!si) { in add_swap_count_continuation()
3839 ci = swap_cluster_lock(si, offset); in add_swap_count_continuation()
3841 count = swap_count(si->swap_map[offset]); in add_swap_count_continuation()
3857 head = vmalloc_to_page(si->swap_map + offset); in add_swap_count_continuation()
3860 spin_lock(&si->cont_lock); in add_swap_count_continuation()
3869 si->flags |= SWP_CONTINUED; in add_swap_count_continuation()
3897 spin_unlock(&si->cont_lock); in add_swap_count_continuation()
3900 put_swap_device(si); in add_swap_count_continuation()
3916 static bool swap_count_continued(struct swap_info_struct *si, in swap_count_continued() argument
3924 head = vmalloc_to_page(si->swap_map + offset); in swap_count_continued()
3930 spin_lock(&si->cont_lock); in swap_count_continued()
3992 spin_unlock(&si->cont_lock); in swap_count_continued()
4000 static void free_swap_count_continuations(struct swap_info_struct *si) in free_swap_count_continuations() argument
4004 for (offset = 0; offset < si->max; offset += PAGE_SIZE) { in free_swap_count_continuations()
4006 head = vmalloc_to_page(si->swap_map + offset); in free_swap_count_continuations()
4026 struct swap_info_struct *si; in __folio_throttle_swaprate() local
4045 plist_for_each_entry(si, &swap_avail_head, avail_list) { in __folio_throttle_swaprate()
4046 if (si->bdev) { in __folio_throttle_swaprate()
4047 blkcg_schedule_throttle(si->bdev->bd_disk, true); in __folio_throttle_swaprate()