1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * linux/mm/swapfile.c
4 *
5 * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
6 * Swap reorganised 29.12.95, Stephen Tweedie
7 */
8
9 #include <linux/blkdev.h>
10 #include <linux/mm.h>
11 #include <linux/sched/mm.h>
12 #include <linux/sched/task.h>
13 #include <linux/hugetlb.h>
14 #include <linux/mman.h>
15 #include <linux/slab.h>
16 #include <linux/kernel_stat.h>
17 #include <linux/swap.h>
18 #include <linux/vmalloc.h>
19 #include <linux/pagemap.h>
20 #include <linux/namei.h>
21 #include <linux/shmem_fs.h>
22 #include <linux/blk-cgroup.h>
23 #include <linux/random.h>
24 #include <linux/writeback.h>
25 #include <linux/proc_fs.h>
26 #include <linux/seq_file.h>
27 #include <linux/init.h>
28 #include <linux/ksm.h>
29 #include <linux/rmap.h>
30 #include <linux/security.h>
31 #include <linux/backing-dev.h>
32 #include <linux/mutex.h>
33 #include <linux/capability.h>
34 #include <linux/syscalls.h>
35 #include <linux/memcontrol.h>
36 #include <linux/poll.h>
37 #include <linux/oom.h>
38 #include <linux/swapfile.h>
39 #include <linux/export.h>
40 #include <linux/swap_slots.h>
41 #include <linux/sort.h>
42 #include <linux/completion.h>
43 #include <linux/suspend.h>
44 #include <linux/zswap.h>
45 #include <linux/plist.h>
46
47 #include <asm/tlbflush.h>
48 #include <linux/swapops.h>
49 #include <linux/swap_cgroup.h>
50 #include "internal.h"
51 #include "swap.h"
52
53 static bool swap_count_continued(struct swap_info_struct *, pgoff_t,
54 unsigned char);
55 static void free_swap_count_continuations(struct swap_info_struct *);
56 static void swap_entry_range_free(struct swap_info_struct *si,
57 struct swap_cluster_info *ci,
58 swp_entry_t entry, unsigned int nr_pages);
59 static void swap_range_alloc(struct swap_info_struct *si,
60 unsigned int nr_entries);
61 static bool folio_swapcache_freeable(struct folio *folio);
62 static struct swap_cluster_info *lock_cluster(struct swap_info_struct *si,
63 unsigned long offset);
64 static inline void unlock_cluster(struct swap_cluster_info *ci);
65
66 static DEFINE_SPINLOCK(swap_lock);
67 static unsigned int nr_swapfiles;
68 atomic_long_t nr_swap_pages;
69 /*
70 * Some modules use swappable objects and may try to swap them out under
71 * memory pressure (via the shrinker). Before doing so, they may wish to
72 * check to see if any swap space is available.
73 */
74 EXPORT_SYMBOL_GPL(nr_swap_pages);
75 /* protected with swap_lock. reading in vm_swap_full() doesn't need lock */
76 long total_swap_pages;
77 static int least_priority = -1;
78 unsigned long swapfile_maximum_size;
79 #ifdef CONFIG_MIGRATION
80 bool swap_migration_ad_supported;
81 #endif /* CONFIG_MIGRATION */
82
83 static const char Bad_file[] = "Bad swap file entry ";
84 static const char Unused_file[] = "Unused swap file entry ";
85 static const char Bad_offset[] = "Bad swap offset entry ";
86 static const char Unused_offset[] = "Unused swap offset entry ";
87
88 /*
89 * all active swap_info_structs
90 * protected with swap_lock, and ordered by priority.
91 */
92 static PLIST_HEAD(swap_active_head);
93
94 /*
95 * all available (active, not full) swap_info_structs
96 * protected with swap_avail_lock, ordered by priority.
97 * This is used by folio_alloc_swap() instead of swap_active_head
98 * because swap_active_head includes all swap_info_structs,
99 * but folio_alloc_swap() doesn't need to look at full ones.
100 * This uses its own lock instead of swap_lock because when a
101 * swap_info_struct changes between not-full/full, it needs to
102 * add/remove itself to/from this list, but the swap_info_struct->lock
103 * is held and the locking order requires swap_lock to be taken
104 * before any swap_info_struct->lock.
105 */
106 static struct plist_head *swap_avail_heads;
107 static DEFINE_SPINLOCK(swap_avail_lock);
108
109 static struct swap_info_struct *swap_info[MAX_SWAPFILES];
110
111 static DEFINE_MUTEX(swapon_mutex);
112
113 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
114 /* Activity counter to indicate that a swapon or swapoff has occurred */
115 static atomic_t proc_poll_event = ATOMIC_INIT(0);
116
117 atomic_t nr_rotate_swap = ATOMIC_INIT(0);
118
swap_type_to_swap_info(int type)119 static struct swap_info_struct *swap_type_to_swap_info(int type)
120 {
121 if (type >= MAX_SWAPFILES)
122 return NULL;
123
124 return READ_ONCE(swap_info[type]); /* rcu_dereference() */
125 }
126
swap_count(unsigned char ent)127 static inline unsigned char swap_count(unsigned char ent)
128 {
129 return ent & ~SWAP_HAS_CACHE; /* may include COUNT_CONTINUED flag */
130 }
131
132 /*
133 * Use the second highest bit of inuse_pages counter as the indicator
134 * if one swap device is on the available plist, so the atomic can
135 * still be updated arithmetically while having special data embedded.
136 *
137 * inuse_pages counter is the only thing indicating if a device should
138 * be on avail_lists or not (except swapon / swapoff). By embedding the
139 * off-list bit in the atomic counter, updates no longer need any lock
140 * to check the list status.
141 *
142 * This bit will be set if the device is not on the plist and not
143 * usable, will be cleared if the device is on the plist.
144 */
145 #define SWAP_USAGE_OFFLIST_BIT (1UL << (BITS_PER_TYPE(atomic_t) - 2))
146 #define SWAP_USAGE_COUNTER_MASK (~SWAP_USAGE_OFFLIST_BIT)
swap_usage_in_pages(struct swap_info_struct * si)147 static long swap_usage_in_pages(struct swap_info_struct *si)
148 {
149 return atomic_long_read(&si->inuse_pages) & SWAP_USAGE_COUNTER_MASK;
150 }
151
152 /* Reclaim the swap entry anyway if possible */
153 #define TTRS_ANYWAY 0x1
154 /*
155 * Reclaim the swap entry if there are no more mappings of the
156 * corresponding page
157 */
158 #define TTRS_UNMAPPED 0x2
159 /* Reclaim the swap entry if swap is getting full */
160 #define TTRS_FULL 0x4
161 /* Reclaim directly, bypass the slot cache and don't touch device lock */
162 #define TTRS_DIRECT 0x8
163
swap_is_has_cache(struct swap_info_struct * si,unsigned long offset,int nr_pages)164 static bool swap_is_has_cache(struct swap_info_struct *si,
165 unsigned long offset, int nr_pages)
166 {
167 unsigned char *map = si->swap_map + offset;
168 unsigned char *map_end = map + nr_pages;
169
170 do {
171 VM_BUG_ON(!(*map & SWAP_HAS_CACHE));
172 if (*map != SWAP_HAS_CACHE)
173 return false;
174 } while (++map < map_end);
175
176 return true;
177 }
178
swap_is_last_map(struct swap_info_struct * si,unsigned long offset,int nr_pages,bool * has_cache)179 static bool swap_is_last_map(struct swap_info_struct *si,
180 unsigned long offset, int nr_pages, bool *has_cache)
181 {
182 unsigned char *map = si->swap_map + offset;
183 unsigned char *map_end = map + nr_pages;
184 unsigned char count = *map;
185
186 if (swap_count(count) != 1)
187 return false;
188
189 while (++map < map_end) {
190 if (*map != count)
191 return false;
192 }
193
194 *has_cache = !!(count & SWAP_HAS_CACHE);
195 return true;
196 }
197
198 /*
199 * returns number of pages in the folio that backs the swap entry. If positive,
200 * the folio was reclaimed. If negative, the folio was not reclaimed. If 0, no
201 * folio was associated with the swap entry.
202 */
__try_to_reclaim_swap(struct swap_info_struct * si,unsigned long offset,unsigned long flags)203 static int __try_to_reclaim_swap(struct swap_info_struct *si,
204 unsigned long offset, unsigned long flags)
205 {
206 swp_entry_t entry = swp_entry(si->type, offset);
207 struct address_space *address_space = swap_address_space(entry);
208 struct swap_cluster_info *ci;
209 struct folio *folio;
210 int ret, nr_pages;
211 bool need_reclaim;
212
213 folio = filemap_get_folio(address_space, swap_cache_index(entry));
214 if (IS_ERR(folio))
215 return 0;
216
217 nr_pages = folio_nr_pages(folio);
218 ret = -nr_pages;
219
220 /*
221 * When this function is called from scan_swap_map_slots() and it's
222 * called by vmscan.c at reclaiming folios. So we hold a folio lock
223 * here. We have to use trylock for avoiding deadlock. This is a special
224 * case and you should use folio_free_swap() with explicit folio_lock()
225 * in usual operations.
226 */
227 if (!folio_trylock(folio))
228 goto out;
229
230 /* offset could point to the middle of a large folio */
231 entry = folio->swap;
232 offset = swp_offset(entry);
233
234 need_reclaim = ((flags & TTRS_ANYWAY) ||
235 ((flags & TTRS_UNMAPPED) && !folio_mapped(folio)) ||
236 ((flags & TTRS_FULL) && mem_cgroup_swap_full(folio)));
237 if (!need_reclaim || !folio_swapcache_freeable(folio))
238 goto out_unlock;
239
240 /*
241 * It's safe to delete the folio from swap cache only if the folio's
242 * swap_map is HAS_CACHE only, which means the slots have no page table
243 * reference or pending writeback, and can't be allocated to others.
244 */
245 ci = lock_cluster(si, offset);
246 need_reclaim = swap_is_has_cache(si, offset, nr_pages);
247 unlock_cluster(ci);
248 if (!need_reclaim)
249 goto out_unlock;
250
251 if (!(flags & TTRS_DIRECT)) {
252 /* Free through slot cache */
253 delete_from_swap_cache(folio);
254 folio_set_dirty(folio);
255 ret = nr_pages;
256 goto out_unlock;
257 }
258
259 xa_lock_irq(&address_space->i_pages);
260 __delete_from_swap_cache(folio, entry, NULL);
261 xa_unlock_irq(&address_space->i_pages);
262 folio_ref_sub(folio, nr_pages);
263 folio_set_dirty(folio);
264
265 ci = lock_cluster(si, offset);
266 swap_entry_range_free(si, ci, entry, nr_pages);
267 unlock_cluster(ci);
268 ret = nr_pages;
269 out_unlock:
270 folio_unlock(folio);
271 out:
272 folio_put(folio);
273 return ret;
274 }
275
first_se(struct swap_info_struct * sis)276 static inline struct swap_extent *first_se(struct swap_info_struct *sis)
277 {
278 struct rb_node *rb = rb_first(&sis->swap_extent_root);
279 return rb_entry(rb, struct swap_extent, rb_node);
280 }
281
next_se(struct swap_extent * se)282 static inline struct swap_extent *next_se(struct swap_extent *se)
283 {
284 struct rb_node *rb = rb_next(&se->rb_node);
285 return rb ? rb_entry(rb, struct swap_extent, rb_node) : NULL;
286 }
287
288 /*
289 * swapon tell device that all the old swap contents can be discarded,
290 * to allow the swap device to optimize its wear-levelling.
291 */
discard_swap(struct swap_info_struct * si)292 static int discard_swap(struct swap_info_struct *si)
293 {
294 struct swap_extent *se;
295 sector_t start_block;
296 sector_t nr_blocks;
297 int err = 0;
298
299 /* Do not discard the swap header page! */
300 se = first_se(si);
301 start_block = (se->start_block + 1) << (PAGE_SHIFT - 9);
302 nr_blocks = ((sector_t)se->nr_pages - 1) << (PAGE_SHIFT - 9);
303 if (nr_blocks) {
304 err = blkdev_issue_discard(si->bdev, start_block,
305 nr_blocks, GFP_KERNEL);
306 if (err)
307 return err;
308 cond_resched();
309 }
310
311 for (se = next_se(se); se; se = next_se(se)) {
312 start_block = se->start_block << (PAGE_SHIFT - 9);
313 nr_blocks = (sector_t)se->nr_pages << (PAGE_SHIFT - 9);
314
315 err = blkdev_issue_discard(si->bdev, start_block,
316 nr_blocks, GFP_KERNEL);
317 if (err)
318 break;
319
320 cond_resched();
321 }
322 return err; /* That will often be -EOPNOTSUPP */
323 }
324
325 static struct swap_extent *
offset_to_swap_extent(struct swap_info_struct * sis,unsigned long offset)326 offset_to_swap_extent(struct swap_info_struct *sis, unsigned long offset)
327 {
328 struct swap_extent *se;
329 struct rb_node *rb;
330
331 rb = sis->swap_extent_root.rb_node;
332 while (rb) {
333 se = rb_entry(rb, struct swap_extent, rb_node);
334 if (offset < se->start_page)
335 rb = rb->rb_left;
336 else if (offset >= se->start_page + se->nr_pages)
337 rb = rb->rb_right;
338 else
339 return se;
340 }
341 /* It *must* be present */
342 BUG();
343 }
344
swap_folio_sector(struct folio * folio)345 sector_t swap_folio_sector(struct folio *folio)
346 {
347 struct swap_info_struct *sis = swp_swap_info(folio->swap);
348 struct swap_extent *se;
349 sector_t sector;
350 pgoff_t offset;
351
352 offset = swp_offset(folio->swap);
353 se = offset_to_swap_extent(sis, offset);
354 sector = se->start_block + (offset - se->start_page);
355 return sector << (PAGE_SHIFT - 9);
356 }
357
358 /*
359 * swap allocation tell device that a cluster of swap can now be discarded,
360 * to allow the swap device to optimize its wear-levelling.
361 */
discard_swap_cluster(struct swap_info_struct * si,pgoff_t start_page,pgoff_t nr_pages)362 static void discard_swap_cluster(struct swap_info_struct *si,
363 pgoff_t start_page, pgoff_t nr_pages)
364 {
365 struct swap_extent *se = offset_to_swap_extent(si, start_page);
366
367 while (nr_pages) {
368 pgoff_t offset = start_page - se->start_page;
369 sector_t start_block = se->start_block + offset;
370 sector_t nr_blocks = se->nr_pages - offset;
371
372 if (nr_blocks > nr_pages)
373 nr_blocks = nr_pages;
374 start_page += nr_blocks;
375 nr_pages -= nr_blocks;
376
377 start_block <<= PAGE_SHIFT - 9;
378 nr_blocks <<= PAGE_SHIFT - 9;
379 if (blkdev_issue_discard(si->bdev, start_block,
380 nr_blocks, GFP_NOIO))
381 break;
382
383 se = next_se(se);
384 }
385 }
386
387 #ifdef CONFIG_THP_SWAP
388 #define SWAPFILE_CLUSTER HPAGE_PMD_NR
389
390 #define swap_entry_order(order) (order)
391 #else
392 #define SWAPFILE_CLUSTER 256
393
394 /*
395 * Define swap_entry_order() as constant to let compiler to optimize
396 * out some code if !CONFIG_THP_SWAP
397 */
398 #define swap_entry_order(order) 0
399 #endif
400 #define LATENCY_LIMIT 256
401
cluster_is_empty(struct swap_cluster_info * info)402 static inline bool cluster_is_empty(struct swap_cluster_info *info)
403 {
404 return info->count == 0;
405 }
406
cluster_is_discard(struct swap_cluster_info * info)407 static inline bool cluster_is_discard(struct swap_cluster_info *info)
408 {
409 return info->flags == CLUSTER_FLAG_DISCARD;
410 }
411
cluster_is_usable(struct swap_cluster_info * ci,int order)412 static inline bool cluster_is_usable(struct swap_cluster_info *ci, int order)
413 {
414 if (unlikely(ci->flags > CLUSTER_FLAG_USABLE))
415 return false;
416 if (!order)
417 return true;
418 return cluster_is_empty(ci) || order == ci->order;
419 }
420
cluster_index(struct swap_info_struct * si,struct swap_cluster_info * ci)421 static inline unsigned int cluster_index(struct swap_info_struct *si,
422 struct swap_cluster_info *ci)
423 {
424 return ci - si->cluster_info;
425 }
426
offset_to_cluster(struct swap_info_struct * si,unsigned long offset)427 static inline struct swap_cluster_info *offset_to_cluster(struct swap_info_struct *si,
428 unsigned long offset)
429 {
430 return &si->cluster_info[offset / SWAPFILE_CLUSTER];
431 }
432
cluster_offset(struct swap_info_struct * si,struct swap_cluster_info * ci)433 static inline unsigned int cluster_offset(struct swap_info_struct *si,
434 struct swap_cluster_info *ci)
435 {
436 return cluster_index(si, ci) * SWAPFILE_CLUSTER;
437 }
438
lock_cluster(struct swap_info_struct * si,unsigned long offset)439 static inline struct swap_cluster_info *lock_cluster(struct swap_info_struct *si,
440 unsigned long offset)
441 {
442 struct swap_cluster_info *ci;
443
444 ci = offset_to_cluster(si, offset);
445 spin_lock(&ci->lock);
446
447 return ci;
448 }
449
unlock_cluster(struct swap_cluster_info * ci)450 static inline void unlock_cluster(struct swap_cluster_info *ci)
451 {
452 spin_unlock(&ci->lock);
453 }
454
move_cluster(struct swap_info_struct * si,struct swap_cluster_info * ci,struct list_head * list,enum swap_cluster_flags new_flags)455 static void move_cluster(struct swap_info_struct *si,
456 struct swap_cluster_info *ci, struct list_head *list,
457 enum swap_cluster_flags new_flags)
458 {
459 VM_WARN_ON(ci->flags == new_flags);
460
461 BUILD_BUG_ON(1 << sizeof(ci->flags) * BITS_PER_BYTE < CLUSTER_FLAG_MAX);
462 lockdep_assert_held(&ci->lock);
463
464 spin_lock(&si->lock);
465 if (ci->flags == CLUSTER_FLAG_NONE)
466 list_add_tail(&ci->list, list);
467 else
468 list_move_tail(&ci->list, list);
469 spin_unlock(&si->lock);
470
471 if (ci->flags == CLUSTER_FLAG_FRAG)
472 atomic_long_dec(&si->frag_cluster_nr[ci->order]);
473 else if (new_flags == CLUSTER_FLAG_FRAG)
474 atomic_long_inc(&si->frag_cluster_nr[ci->order]);
475 ci->flags = new_flags;
476 }
477
478 /* Add a cluster to discard list and schedule it to do discard */
swap_cluster_schedule_discard(struct swap_info_struct * si,struct swap_cluster_info * ci)479 static void swap_cluster_schedule_discard(struct swap_info_struct *si,
480 struct swap_cluster_info *ci)
481 {
482 unsigned int idx = cluster_index(si, ci);
483 /*
484 * If scan_swap_map_slots() can't find a free cluster, it will check
485 * si->swap_map directly. To make sure the discarding cluster isn't
486 * taken by scan_swap_map_slots(), mark the swap entries bad (occupied).
487 * It will be cleared after discard
488 */
489 memset(si->swap_map + idx * SWAPFILE_CLUSTER,
490 SWAP_MAP_BAD, SWAPFILE_CLUSTER);
491 VM_BUG_ON(ci->flags == CLUSTER_FLAG_FREE);
492 move_cluster(si, ci, &si->discard_clusters, CLUSTER_FLAG_DISCARD);
493 schedule_work(&si->discard_work);
494 }
495
__free_cluster(struct swap_info_struct * si,struct swap_cluster_info * ci)496 static void __free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci)
497 {
498 lockdep_assert_held(&ci->lock);
499 move_cluster(si, ci, &si->free_clusters, CLUSTER_FLAG_FREE);
500 ci->order = 0;
501 }
502
503 /*
504 * Isolate and lock the first cluster that is not contented on a list,
505 * clean its flag before taken off-list. Cluster flag must be in sync
506 * with list status, so cluster updaters can always know the cluster
507 * list status without touching si lock.
508 *
509 * Note it's possible that all clusters on a list are contented so
510 * this returns NULL for an non-empty list.
511 */
isolate_lock_cluster(struct swap_info_struct * si,struct list_head * list)512 static struct swap_cluster_info *isolate_lock_cluster(
513 struct swap_info_struct *si, struct list_head *list)
514 {
515 struct swap_cluster_info *ci, *ret = NULL;
516
517 spin_lock(&si->lock);
518
519 if (unlikely(!(si->flags & SWP_WRITEOK)))
520 goto out;
521
522 list_for_each_entry(ci, list, list) {
523 if (!spin_trylock(&ci->lock))
524 continue;
525
526 /* We may only isolate and clear flags of following lists */
527 VM_BUG_ON(!ci->flags);
528 VM_BUG_ON(ci->flags > CLUSTER_FLAG_USABLE &&
529 ci->flags != CLUSTER_FLAG_FULL);
530
531 list_del(&ci->list);
532 ci->flags = CLUSTER_FLAG_NONE;
533 ret = ci;
534 break;
535 }
536 out:
537 spin_unlock(&si->lock);
538
539 return ret;
540 }
541
542 /*
543 * Doing discard actually. After a cluster discard is finished, the cluster
544 * will be added to free cluster list. Discard cluster is a bit special as
545 * they don't participate in allocation or reclaim, so clusters marked as
546 * CLUSTER_FLAG_DISCARD must remain off-list or on discard list.
547 */
swap_do_scheduled_discard(struct swap_info_struct * si)548 static bool swap_do_scheduled_discard(struct swap_info_struct *si)
549 {
550 struct swap_cluster_info *ci;
551 bool ret = false;
552 unsigned int idx;
553
554 spin_lock(&si->lock);
555 while (!list_empty(&si->discard_clusters)) {
556 ci = list_first_entry(&si->discard_clusters, struct swap_cluster_info, list);
557 /*
558 * Delete the cluster from list to prepare for discard, but keep
559 * the CLUSTER_FLAG_DISCARD flag, there could be percpu_cluster
560 * pointing to it, or ran into by relocate_cluster.
561 */
562 list_del(&ci->list);
563 idx = cluster_index(si, ci);
564 spin_unlock(&si->lock);
565 discard_swap_cluster(si, idx * SWAPFILE_CLUSTER,
566 SWAPFILE_CLUSTER);
567
568 spin_lock(&ci->lock);
569 /*
570 * Discard is done, clear its flags as it's off-list, then
571 * return the cluster to allocation list.
572 */
573 ci->flags = CLUSTER_FLAG_NONE;
574 memset(si->swap_map + idx * SWAPFILE_CLUSTER,
575 0, SWAPFILE_CLUSTER);
576 __free_cluster(si, ci);
577 spin_unlock(&ci->lock);
578 ret = true;
579 spin_lock(&si->lock);
580 }
581 spin_unlock(&si->lock);
582 return ret;
583 }
584
swap_discard_work(struct work_struct * work)585 static void swap_discard_work(struct work_struct *work)
586 {
587 struct swap_info_struct *si;
588
589 si = container_of(work, struct swap_info_struct, discard_work);
590
591 swap_do_scheduled_discard(si);
592 }
593
swap_users_ref_free(struct percpu_ref * ref)594 static void swap_users_ref_free(struct percpu_ref *ref)
595 {
596 struct swap_info_struct *si;
597
598 si = container_of(ref, struct swap_info_struct, users);
599 complete(&si->comp);
600 }
601
602 /*
603 * Must be called after freeing if ci->count == 0, moves the cluster to free
604 * or discard list.
605 */
free_cluster(struct swap_info_struct * si,struct swap_cluster_info * ci)606 static void free_cluster(struct swap_info_struct *si, struct swap_cluster_info *ci)
607 {
608 VM_BUG_ON(ci->count != 0);
609 VM_BUG_ON(ci->flags == CLUSTER_FLAG_FREE);
610 lockdep_assert_held(&ci->lock);
611
612 /*
613 * If the swap is discardable, prepare discard the cluster
614 * instead of free it immediately. The cluster will be freed
615 * after discard.
616 */
617 if ((si->flags & (SWP_WRITEOK | SWP_PAGE_DISCARD)) ==
618 (SWP_WRITEOK | SWP_PAGE_DISCARD)) {
619 swap_cluster_schedule_discard(si, ci);
620 return;
621 }
622
623 __free_cluster(si, ci);
624 }
625
626 /*
627 * Must be called after freeing if ci->count != 0, moves the cluster to
628 * nonfull list.
629 */
partial_free_cluster(struct swap_info_struct * si,struct swap_cluster_info * ci)630 static void partial_free_cluster(struct swap_info_struct *si,
631 struct swap_cluster_info *ci)
632 {
633 VM_BUG_ON(!ci->count || ci->count == SWAPFILE_CLUSTER);
634 lockdep_assert_held(&ci->lock);
635
636 if (ci->flags != CLUSTER_FLAG_NONFULL)
637 move_cluster(si, ci, &si->nonfull_clusters[ci->order],
638 CLUSTER_FLAG_NONFULL);
639 }
640
641 /*
642 * Must be called after allocation, moves the cluster to full or frag list.
643 * Note: allocation doesn't acquire si lock, and may drop the ci lock for
644 * reclaim, so the cluster could be any where when called.
645 */
relocate_cluster(struct swap_info_struct * si,struct swap_cluster_info * ci)646 static void relocate_cluster(struct swap_info_struct *si,
647 struct swap_cluster_info *ci)
648 {
649 lockdep_assert_held(&ci->lock);
650
651 /* Discard cluster must remain off-list or on discard list */
652 if (cluster_is_discard(ci))
653 return;
654
655 if (!ci->count) {
656 if (ci->flags != CLUSTER_FLAG_FREE)
657 free_cluster(si, ci);
658 } else if (ci->count != SWAPFILE_CLUSTER) {
659 if (ci->flags != CLUSTER_FLAG_FRAG)
660 move_cluster(si, ci, &si->frag_clusters[ci->order],
661 CLUSTER_FLAG_FRAG);
662 } else {
663 if (ci->flags != CLUSTER_FLAG_FULL)
664 move_cluster(si, ci, &si->full_clusters,
665 CLUSTER_FLAG_FULL);
666 }
667 }
668
669 /*
670 * The cluster corresponding to page_nr will be used. The cluster will not be
671 * added to free cluster list and its usage counter will be increased by 1.
672 * Only used for initialization.
673 */
inc_cluster_info_page(struct swap_info_struct * si,struct swap_cluster_info * cluster_info,unsigned long page_nr)674 static void inc_cluster_info_page(struct swap_info_struct *si,
675 struct swap_cluster_info *cluster_info, unsigned long page_nr)
676 {
677 unsigned long idx = page_nr / SWAPFILE_CLUSTER;
678 struct swap_cluster_info *ci;
679
680 ci = cluster_info + idx;
681 ci->count++;
682
683 VM_BUG_ON(ci->count > SWAPFILE_CLUSTER);
684 VM_BUG_ON(ci->flags);
685 }
686
cluster_reclaim_range(struct swap_info_struct * si,struct swap_cluster_info * ci,unsigned long start,unsigned long end)687 static bool cluster_reclaim_range(struct swap_info_struct *si,
688 struct swap_cluster_info *ci,
689 unsigned long start, unsigned long end)
690 {
691 unsigned char *map = si->swap_map;
692 unsigned long offset = start;
693 int nr_reclaim;
694
695 spin_unlock(&ci->lock);
696 do {
697 switch (READ_ONCE(map[offset])) {
698 case 0:
699 offset++;
700 break;
701 case SWAP_HAS_CACHE:
702 nr_reclaim = __try_to_reclaim_swap(si, offset, TTRS_ANYWAY | TTRS_DIRECT);
703 if (nr_reclaim > 0)
704 offset += nr_reclaim;
705 else
706 goto out;
707 break;
708 default:
709 goto out;
710 }
711 } while (offset < end);
712 out:
713 spin_lock(&ci->lock);
714 /*
715 * Recheck the range no matter reclaim succeeded or not, the slot
716 * could have been be freed while we are not holding the lock.
717 */
718 for (offset = start; offset < end; offset++)
719 if (READ_ONCE(map[offset]))
720 return false;
721
722 return true;
723 }
724
cluster_scan_range(struct swap_info_struct * si,struct swap_cluster_info * ci,unsigned long start,unsigned int nr_pages,bool * need_reclaim)725 static bool cluster_scan_range(struct swap_info_struct *si,
726 struct swap_cluster_info *ci,
727 unsigned long start, unsigned int nr_pages,
728 bool *need_reclaim)
729 {
730 unsigned long offset, end = start + nr_pages;
731 unsigned char *map = si->swap_map;
732
733 for (offset = start; offset < end; offset++) {
734 switch (READ_ONCE(map[offset])) {
735 case 0:
736 continue;
737 case SWAP_HAS_CACHE:
738 if (!vm_swap_full())
739 return false;
740 *need_reclaim = true;
741 continue;
742 default:
743 return false;
744 }
745 }
746
747 return true;
748 }
749
cluster_alloc_range(struct swap_info_struct * si,struct swap_cluster_info * ci,unsigned int start,unsigned char usage,unsigned int order)750 static bool cluster_alloc_range(struct swap_info_struct *si, struct swap_cluster_info *ci,
751 unsigned int start, unsigned char usage,
752 unsigned int order)
753 {
754 unsigned int nr_pages = 1 << order;
755
756 lockdep_assert_held(&ci->lock);
757
758 if (!(si->flags & SWP_WRITEOK))
759 return false;
760
761 /*
762 * The first allocation in a cluster makes the
763 * cluster exclusive to this order
764 */
765 if (cluster_is_empty(ci))
766 ci->order = order;
767
768 memset(si->swap_map + start, usage, nr_pages);
769 swap_range_alloc(si, nr_pages);
770 ci->count += nr_pages;
771
772 return true;
773 }
774
775 /* Try use a new cluster for current CPU and allocate from it. */
alloc_swap_scan_cluster(struct swap_info_struct * si,struct swap_cluster_info * ci,unsigned long offset,unsigned int order,unsigned char usage)776 static unsigned int alloc_swap_scan_cluster(struct swap_info_struct *si,
777 struct swap_cluster_info *ci,
778 unsigned long offset,
779 unsigned int order,
780 unsigned char usage)
781 {
782 unsigned int next = SWAP_ENTRY_INVALID, found = SWAP_ENTRY_INVALID;
783 unsigned long start = ALIGN_DOWN(offset, SWAPFILE_CLUSTER);
784 unsigned long end = min(start + SWAPFILE_CLUSTER, si->max);
785 unsigned int nr_pages = 1 << order;
786 bool need_reclaim, ret;
787
788 lockdep_assert_held(&ci->lock);
789
790 if (end < nr_pages || ci->count + nr_pages > SWAPFILE_CLUSTER)
791 goto out;
792
793 for (end -= nr_pages; offset <= end; offset += nr_pages) {
794 need_reclaim = false;
795 if (!cluster_scan_range(si, ci, offset, nr_pages, &need_reclaim))
796 continue;
797 if (need_reclaim) {
798 ret = cluster_reclaim_range(si, ci, offset, offset + nr_pages);
799 /*
800 * Reclaim drops ci->lock and cluster could be used
801 * by another order. Not checking flag as off-list
802 * cluster has no flag set, and change of list
803 * won't cause fragmentation.
804 */
805 if (!cluster_is_usable(ci, order))
806 goto out;
807 if (cluster_is_empty(ci))
808 offset = start;
809 /* Reclaim failed but cluster is usable, try next */
810 if (!ret)
811 continue;
812 }
813 if (!cluster_alloc_range(si, ci, offset, usage, order))
814 break;
815 found = offset;
816 offset += nr_pages;
817 if (ci->count < SWAPFILE_CLUSTER && offset <= end)
818 next = offset;
819 break;
820 }
821 out:
822 relocate_cluster(si, ci);
823 unlock_cluster(ci);
824 if (si->flags & SWP_SOLIDSTATE)
825 __this_cpu_write(si->percpu_cluster->next[order], next);
826 else
827 si->global_cluster->next[order] = next;
828 return found;
829 }
830
831 /* Return true if reclaimed a whole cluster */
swap_reclaim_full_clusters(struct swap_info_struct * si,bool force)832 static void swap_reclaim_full_clusters(struct swap_info_struct *si, bool force)
833 {
834 long to_scan = 1;
835 unsigned long offset, end;
836 struct swap_cluster_info *ci;
837 unsigned char *map = si->swap_map;
838 int nr_reclaim;
839
840 if (force)
841 to_scan = swap_usage_in_pages(si) / SWAPFILE_CLUSTER;
842
843 while ((ci = isolate_lock_cluster(si, &si->full_clusters))) {
844 offset = cluster_offset(si, ci);
845 end = min(si->max, offset + SWAPFILE_CLUSTER);
846 to_scan--;
847
848 while (offset < end) {
849 if (READ_ONCE(map[offset]) == SWAP_HAS_CACHE) {
850 spin_unlock(&ci->lock);
851 nr_reclaim = __try_to_reclaim_swap(si, offset,
852 TTRS_ANYWAY | TTRS_DIRECT);
853 spin_lock(&ci->lock);
854 if (nr_reclaim) {
855 offset += abs(nr_reclaim);
856 continue;
857 }
858 }
859 offset++;
860 }
861
862 /* in case no swap cache is reclaimed */
863 if (ci->flags == CLUSTER_FLAG_NONE)
864 relocate_cluster(si, ci);
865
866 unlock_cluster(ci);
867 if (to_scan <= 0)
868 break;
869 }
870 }
871
swap_reclaim_work(struct work_struct * work)872 static void swap_reclaim_work(struct work_struct *work)
873 {
874 struct swap_info_struct *si;
875
876 si = container_of(work, struct swap_info_struct, reclaim_work);
877
878 swap_reclaim_full_clusters(si, true);
879 }
880
881 /*
882 * Try to get swap entries with specified order from current cpu's swap entry
883 * pool (a cluster). This might involve allocating a new cluster for current CPU
884 * too.
885 */
cluster_alloc_swap_entry(struct swap_info_struct * si,int order,unsigned char usage)886 static unsigned long cluster_alloc_swap_entry(struct swap_info_struct *si, int order,
887 unsigned char usage)
888 {
889 struct swap_cluster_info *ci;
890 unsigned int offset, found = 0;
891
892 if (si->flags & SWP_SOLIDSTATE) {
893 /* Fast path using per CPU cluster */
894 local_lock(&si->percpu_cluster->lock);
895 offset = __this_cpu_read(si->percpu_cluster->next[order]);
896 } else {
897 /* Serialize HDD SWAP allocation for each device. */
898 spin_lock(&si->global_cluster_lock);
899 offset = si->global_cluster->next[order];
900 }
901
902 if (offset) {
903 ci = lock_cluster(si, offset);
904 /* Cluster could have been used by another order */
905 if (cluster_is_usable(ci, order)) {
906 if (cluster_is_empty(ci))
907 offset = cluster_offset(si, ci);
908 found = alloc_swap_scan_cluster(si, ci, offset,
909 order, usage);
910 } else {
911 unlock_cluster(ci);
912 }
913 if (found)
914 goto done;
915 }
916
917 new_cluster:
918 ci = isolate_lock_cluster(si, &si->free_clusters);
919 if (ci) {
920 found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
921 order, usage);
922 if (found)
923 goto done;
924 }
925
926 /* Try reclaim from full clusters if free clusters list is drained */
927 if (vm_swap_full())
928 swap_reclaim_full_clusters(si, false);
929
930 if (order < PMD_ORDER) {
931 unsigned int frags = 0, frags_existing;
932
933 while ((ci = isolate_lock_cluster(si, &si->nonfull_clusters[order]))) {
934 found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
935 order, usage);
936 if (found)
937 goto done;
938 /* Clusters failed to allocate are moved to frag_clusters */
939 frags++;
940 }
941
942 frags_existing = atomic_long_read(&si->frag_cluster_nr[order]);
943 while (frags < frags_existing &&
944 (ci = isolate_lock_cluster(si, &si->frag_clusters[order]))) {
945 atomic_long_dec(&si->frag_cluster_nr[order]);
946 /*
947 * Rotate the frag list to iterate, they were all
948 * failing high order allocation or moved here due to
949 * per-CPU usage, but they could contain newly released
950 * reclaimable (eg. lazy-freed swap cache) slots.
951 */
952 found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
953 order, usage);
954 if (found)
955 goto done;
956 frags++;
957 }
958 }
959
960 /*
961 * We don't have free cluster but have some clusters in
962 * discarding, do discard now and reclaim them, then
963 * reread cluster_next_cpu since we dropped si->lock
964 */
965 if ((si->flags & SWP_PAGE_DISCARD) && swap_do_scheduled_discard(si))
966 goto new_cluster;
967
968 if (order)
969 goto done;
970
971 /* Order 0 stealing from higher order */
972 for (int o = 1; o < SWAP_NR_ORDERS; o++) {
973 /*
974 * Clusters here have at least one usable slots and can't fail order 0
975 * allocation, but reclaim may drop si->lock and race with another user.
976 */
977 while ((ci = isolate_lock_cluster(si, &si->frag_clusters[o]))) {
978 atomic_long_dec(&si->frag_cluster_nr[o]);
979 found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
980 0, usage);
981 if (found)
982 goto done;
983 }
984
985 while ((ci = isolate_lock_cluster(si, &si->nonfull_clusters[o]))) {
986 found = alloc_swap_scan_cluster(si, ci, cluster_offset(si, ci),
987 0, usage);
988 if (found)
989 goto done;
990 }
991 }
992 done:
993 if (si->flags & SWP_SOLIDSTATE)
994 local_unlock(&si->percpu_cluster->lock);
995 else
996 spin_unlock(&si->global_cluster_lock);
997 return found;
998 }
999
1000 /* SWAP_USAGE_OFFLIST_BIT can only be set by this helper. */
del_from_avail_list(struct swap_info_struct * si,bool swapoff)1001 static void del_from_avail_list(struct swap_info_struct *si, bool swapoff)
1002 {
1003 int nid;
1004 unsigned long pages;
1005
1006 spin_lock(&swap_avail_lock);
1007
1008 if (swapoff) {
1009 /*
1010 * Forcefully remove it. Clear the SWP_WRITEOK flags for
1011 * swapoff here so it's synchronized by both si->lock and
1012 * swap_avail_lock, to ensure the result can be seen by
1013 * add_to_avail_list.
1014 */
1015 lockdep_assert_held(&si->lock);
1016 si->flags &= ~SWP_WRITEOK;
1017 atomic_long_or(SWAP_USAGE_OFFLIST_BIT, &si->inuse_pages);
1018 } else {
1019 /*
1020 * If not called by swapoff, take it off-list only if it's
1021 * full and SWAP_USAGE_OFFLIST_BIT is not set (strictly
1022 * si->inuse_pages == pages), any concurrent slot freeing,
1023 * or device already removed from plist by someone else
1024 * will make this return false.
1025 */
1026 pages = si->pages;
1027 if (!atomic_long_try_cmpxchg(&si->inuse_pages, &pages,
1028 pages | SWAP_USAGE_OFFLIST_BIT))
1029 goto skip;
1030 }
1031
1032 for_each_node(nid)
1033 plist_del(&si->avail_lists[nid], &swap_avail_heads[nid]);
1034
1035 skip:
1036 spin_unlock(&swap_avail_lock);
1037 }
1038
1039 /* SWAP_USAGE_OFFLIST_BIT can only be cleared by this helper. */
add_to_avail_list(struct swap_info_struct * si,bool swapon)1040 static void add_to_avail_list(struct swap_info_struct *si, bool swapon)
1041 {
1042 int nid;
1043 long val;
1044 unsigned long pages;
1045
1046 spin_lock(&swap_avail_lock);
1047
1048 /* Corresponding to SWP_WRITEOK clearing in del_from_avail_list */
1049 if (swapon) {
1050 lockdep_assert_held(&si->lock);
1051 si->flags |= SWP_WRITEOK;
1052 } else {
1053 if (!(READ_ONCE(si->flags) & SWP_WRITEOK))
1054 goto skip;
1055 }
1056
1057 if (!(atomic_long_read(&si->inuse_pages) & SWAP_USAGE_OFFLIST_BIT))
1058 goto skip;
1059
1060 val = atomic_long_fetch_and_relaxed(~SWAP_USAGE_OFFLIST_BIT, &si->inuse_pages);
1061
1062 /*
1063 * When device is full and device is on the plist, only one updater will
1064 * see (inuse_pages == si->pages) and will call del_from_avail_list. If
1065 * that updater happen to be here, just skip adding.
1066 */
1067 pages = si->pages;
1068 if (val == pages) {
1069 /* Just like the cmpxchg in del_from_avail_list */
1070 if (atomic_long_try_cmpxchg(&si->inuse_pages, &pages,
1071 pages | SWAP_USAGE_OFFLIST_BIT))
1072 goto skip;
1073 }
1074
1075 for_each_node(nid)
1076 plist_add(&si->avail_lists[nid], &swap_avail_heads[nid]);
1077
1078 skip:
1079 spin_unlock(&swap_avail_lock);
1080 }
1081
1082 /*
1083 * swap_usage_add / swap_usage_sub of each slot are serialized by ci->lock
1084 * within each cluster, so the total contribution to the global counter should
1085 * always be positive and cannot exceed the total number of usable slots.
1086 */
swap_usage_add(struct swap_info_struct * si,unsigned int nr_entries)1087 static bool swap_usage_add(struct swap_info_struct *si, unsigned int nr_entries)
1088 {
1089 long val = atomic_long_add_return_relaxed(nr_entries, &si->inuse_pages);
1090
1091 /*
1092 * If device is full, and SWAP_USAGE_OFFLIST_BIT is not set,
1093 * remove it from the plist.
1094 */
1095 if (unlikely(val == si->pages)) {
1096 del_from_avail_list(si, false);
1097 return true;
1098 }
1099
1100 return false;
1101 }
1102
swap_usage_sub(struct swap_info_struct * si,unsigned int nr_entries)1103 static void swap_usage_sub(struct swap_info_struct *si, unsigned int nr_entries)
1104 {
1105 long val = atomic_long_sub_return_relaxed(nr_entries, &si->inuse_pages);
1106
1107 /*
1108 * If device is not full, and SWAP_USAGE_OFFLIST_BIT is set,
1109 * remove it from the plist.
1110 */
1111 if (unlikely(val & SWAP_USAGE_OFFLIST_BIT))
1112 add_to_avail_list(si, false);
1113 }
1114
swap_range_alloc(struct swap_info_struct * si,unsigned int nr_entries)1115 static void swap_range_alloc(struct swap_info_struct *si,
1116 unsigned int nr_entries)
1117 {
1118 if (swap_usage_add(si, nr_entries)) {
1119 if (vm_swap_full())
1120 schedule_work(&si->reclaim_work);
1121 }
1122 }
1123
swap_range_free(struct swap_info_struct * si,unsigned long offset,unsigned int nr_entries)1124 static void swap_range_free(struct swap_info_struct *si, unsigned long offset,
1125 unsigned int nr_entries)
1126 {
1127 unsigned long begin = offset;
1128 unsigned long end = offset + nr_entries - 1;
1129 void (*swap_slot_free_notify)(struct block_device *, unsigned long);
1130 unsigned int i;
1131
1132 /*
1133 * Use atomic clear_bit operations only on zeromap instead of non-atomic
1134 * bitmap_clear to prevent adjacent bits corruption due to simultaneous writes.
1135 */
1136 for (i = 0; i < nr_entries; i++) {
1137 clear_bit(offset + i, si->zeromap);
1138 zswap_invalidate(swp_entry(si->type, offset + i));
1139 }
1140
1141 if (si->flags & SWP_BLKDEV)
1142 swap_slot_free_notify =
1143 si->bdev->bd_disk->fops->swap_slot_free_notify;
1144 else
1145 swap_slot_free_notify = NULL;
1146 while (offset <= end) {
1147 arch_swap_invalidate_page(si->type, offset);
1148 if (swap_slot_free_notify)
1149 swap_slot_free_notify(si->bdev, offset);
1150 offset++;
1151 }
1152 clear_shadow_from_swap_cache(si->type, begin, end);
1153
1154 /*
1155 * Make sure that try_to_unuse() observes si->inuse_pages reaching 0
1156 * only after the above cleanups are done.
1157 */
1158 smp_wmb();
1159 atomic_long_add(nr_entries, &nr_swap_pages);
1160 swap_usage_sub(si, nr_entries);
1161 }
1162
cluster_alloc_swap(struct swap_info_struct * si,unsigned char usage,int nr,swp_entry_t slots[],int order)1163 static int cluster_alloc_swap(struct swap_info_struct *si,
1164 unsigned char usage, int nr,
1165 swp_entry_t slots[], int order)
1166 {
1167 int n_ret = 0;
1168
1169 while (n_ret < nr) {
1170 unsigned long offset = cluster_alloc_swap_entry(si, order, usage);
1171
1172 if (!offset)
1173 break;
1174 slots[n_ret++] = swp_entry(si->type, offset);
1175 }
1176
1177 return n_ret;
1178 }
1179
scan_swap_map_slots(struct swap_info_struct * si,unsigned char usage,int nr,swp_entry_t slots[],int order)1180 static int scan_swap_map_slots(struct swap_info_struct *si,
1181 unsigned char usage, int nr,
1182 swp_entry_t slots[], int order)
1183 {
1184 unsigned int nr_pages = 1 << order;
1185
1186 /*
1187 * We try to cluster swap pages by allocating them sequentially
1188 * in swap. Once we've allocated SWAPFILE_CLUSTER pages this
1189 * way, however, we resort to first-free allocation, starting
1190 * a new cluster. This prevents us from scattering swap pages
1191 * all over the entire swap partition, so that we reduce
1192 * overall disk seek times between swap pages. -- sct
1193 * But we do now try to find an empty cluster. -Andrea
1194 * And we let swap pages go all over an SSD partition. Hugh
1195 */
1196 if (order > 0) {
1197 /*
1198 * Should not even be attempting large allocations when huge
1199 * page swap is disabled. Warn and fail the allocation.
1200 */
1201 if (!IS_ENABLED(CONFIG_THP_SWAP) ||
1202 nr_pages > SWAPFILE_CLUSTER) {
1203 VM_WARN_ON_ONCE(1);
1204 return 0;
1205 }
1206
1207 /*
1208 * Swapfile is not block device so unable
1209 * to allocate large entries.
1210 */
1211 if (!(si->flags & SWP_BLKDEV))
1212 return 0;
1213 }
1214
1215 return cluster_alloc_swap(si, usage, nr, slots, order);
1216 }
1217
get_swap_device_info(struct swap_info_struct * si)1218 static bool get_swap_device_info(struct swap_info_struct *si)
1219 {
1220 if (!percpu_ref_tryget_live(&si->users))
1221 return false;
1222 /*
1223 * Guarantee the si->users are checked before accessing other
1224 * fields of swap_info_struct, and si->flags (SWP_WRITEOK) is
1225 * up to dated.
1226 *
1227 * Paired with the spin_unlock() after setup_swap_info() in
1228 * enable_swap_info(), and smp_wmb() in swapoff.
1229 */
1230 smp_rmb();
1231 return true;
1232 }
1233
get_swap_pages(int n_goal,swp_entry_t swp_entries[],int entry_order)1234 int get_swap_pages(int n_goal, swp_entry_t swp_entries[], int entry_order)
1235 {
1236 int order = swap_entry_order(entry_order);
1237 unsigned long size = 1 << order;
1238 struct swap_info_struct *si, *next;
1239 long avail_pgs;
1240 int n_ret = 0;
1241 int node;
1242
1243 spin_lock(&swap_avail_lock);
1244
1245 avail_pgs = atomic_long_read(&nr_swap_pages) / size;
1246 if (avail_pgs <= 0) {
1247 spin_unlock(&swap_avail_lock);
1248 goto noswap;
1249 }
1250
1251 n_goal = min3((long)n_goal, (long)SWAP_BATCH, avail_pgs);
1252
1253 atomic_long_sub(n_goal * size, &nr_swap_pages);
1254
1255 start_over:
1256 node = numa_node_id();
1257 plist_for_each_entry_safe(si, next, &swap_avail_heads[node], avail_lists[node]) {
1258 /* requeue si to after same-priority siblings */
1259 plist_requeue(&si->avail_lists[node], &swap_avail_heads[node]);
1260 spin_unlock(&swap_avail_lock);
1261 if (get_swap_device_info(si)) {
1262 n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE,
1263 n_goal, swp_entries, order);
1264 put_swap_device(si);
1265 if (n_ret || size > 1)
1266 goto check_out;
1267 }
1268
1269 spin_lock(&swap_avail_lock);
1270 /*
1271 * if we got here, it's likely that si was almost full before,
1272 * and since scan_swap_map_slots() can drop the si->lock,
1273 * multiple callers probably all tried to get a page from the
1274 * same si and it filled up before we could get one; or, the si
1275 * filled up between us dropping swap_avail_lock and taking
1276 * si->lock. Since we dropped the swap_avail_lock, the
1277 * swap_avail_head list may have been modified; so if next is
1278 * still in the swap_avail_head list then try it, otherwise
1279 * start over if we have not gotten any slots.
1280 */
1281 if (plist_node_empty(&next->avail_lists[node]))
1282 goto start_over;
1283 }
1284
1285 spin_unlock(&swap_avail_lock);
1286
1287 check_out:
1288 if (n_ret < n_goal)
1289 atomic_long_add((long)(n_goal - n_ret) * size,
1290 &nr_swap_pages);
1291 noswap:
1292 return n_ret;
1293 }
1294
_swap_info_get(swp_entry_t entry)1295 static struct swap_info_struct *_swap_info_get(swp_entry_t entry)
1296 {
1297 struct swap_info_struct *si;
1298 unsigned long offset;
1299
1300 if (!entry.val)
1301 goto out;
1302 si = swp_swap_info(entry);
1303 if (!si)
1304 goto bad_nofile;
1305 if (data_race(!(si->flags & SWP_USED)))
1306 goto bad_device;
1307 offset = swp_offset(entry);
1308 if (offset >= si->max)
1309 goto bad_offset;
1310 if (data_race(!si->swap_map[swp_offset(entry)]))
1311 goto bad_free;
1312 return si;
1313
1314 bad_free:
1315 pr_err("%s: %s%08lx\n", __func__, Unused_offset, entry.val);
1316 goto out;
1317 bad_offset:
1318 pr_err("%s: %s%08lx\n", __func__, Bad_offset, entry.val);
1319 goto out;
1320 bad_device:
1321 pr_err("%s: %s%08lx\n", __func__, Unused_file, entry.val);
1322 goto out;
1323 bad_nofile:
1324 pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val);
1325 out:
1326 return NULL;
1327 }
1328
__swap_entry_free_locked(struct swap_info_struct * si,unsigned long offset,unsigned char usage)1329 static unsigned char __swap_entry_free_locked(struct swap_info_struct *si,
1330 unsigned long offset,
1331 unsigned char usage)
1332 {
1333 unsigned char count;
1334 unsigned char has_cache;
1335
1336 count = si->swap_map[offset];
1337
1338 has_cache = count & SWAP_HAS_CACHE;
1339 count &= ~SWAP_HAS_CACHE;
1340
1341 if (usage == SWAP_HAS_CACHE) {
1342 VM_BUG_ON(!has_cache);
1343 has_cache = 0;
1344 } else if (count == SWAP_MAP_SHMEM) {
1345 /*
1346 * Or we could insist on shmem.c using a special
1347 * swap_shmem_free() and free_shmem_swap_and_cache()...
1348 */
1349 count = 0;
1350 } else if ((count & ~COUNT_CONTINUED) <= SWAP_MAP_MAX) {
1351 if (count == COUNT_CONTINUED) {
1352 if (swap_count_continued(si, offset, count))
1353 count = SWAP_MAP_MAX | COUNT_CONTINUED;
1354 else
1355 count = SWAP_MAP_MAX;
1356 } else
1357 count--;
1358 }
1359
1360 usage = count | has_cache;
1361 if (usage)
1362 WRITE_ONCE(si->swap_map[offset], usage);
1363 else
1364 WRITE_ONCE(si->swap_map[offset], SWAP_HAS_CACHE);
1365
1366 return usage;
1367 }
1368
1369 /*
1370 * When we get a swap entry, if there aren't some other ways to
1371 * prevent swapoff, such as the folio in swap cache is locked, RCU
1372 * reader side is locked, etc., the swap entry may become invalid
1373 * because of swapoff. Then, we need to enclose all swap related
1374 * functions with get_swap_device() and put_swap_device(), unless the
1375 * swap functions call get/put_swap_device() by themselves.
1376 *
1377 * RCU reader side lock (including any spinlock) is sufficient to
1378 * prevent swapoff, because synchronize_rcu() is called in swapoff()
1379 * before freeing data structures.
1380 *
1381 * Check whether swap entry is valid in the swap device. If so,
1382 * return pointer to swap_info_struct, and keep the swap entry valid
1383 * via preventing the swap device from being swapoff, until
1384 * put_swap_device() is called. Otherwise return NULL.
1385 *
1386 * Notice that swapoff or swapoff+swapon can still happen before the
1387 * percpu_ref_tryget_live() in get_swap_device() or after the
1388 * percpu_ref_put() in put_swap_device() if there isn't any other way
1389 * to prevent swapoff. The caller must be prepared for that. For
1390 * example, the following situation is possible.
1391 *
1392 * CPU1 CPU2
1393 * do_swap_page()
1394 * ... swapoff+swapon
1395 * __read_swap_cache_async()
1396 * swapcache_prepare()
1397 * __swap_duplicate()
1398 * // check swap_map
1399 * // verify PTE not changed
1400 *
1401 * In __swap_duplicate(), the swap_map need to be checked before
1402 * changing partly because the specified swap entry may be for another
1403 * swap device which has been swapoff. And in do_swap_page(), after
1404 * the page is read from the swap device, the PTE is verified not
1405 * changed with the page table locked to check whether the swap device
1406 * has been swapoff or swapoff+swapon.
1407 */
get_swap_device(swp_entry_t entry)1408 struct swap_info_struct *get_swap_device(swp_entry_t entry)
1409 {
1410 struct swap_info_struct *si;
1411 unsigned long offset;
1412
1413 if (!entry.val)
1414 goto out;
1415 si = swp_swap_info(entry);
1416 if (!si)
1417 goto bad_nofile;
1418 if (!get_swap_device_info(si))
1419 goto out;
1420 offset = swp_offset(entry);
1421 if (offset >= si->max)
1422 goto put_out;
1423
1424 return si;
1425 bad_nofile:
1426 pr_err("%s: %s%08lx\n", __func__, Bad_file, entry.val);
1427 out:
1428 return NULL;
1429 put_out:
1430 pr_err("%s: %s%08lx\n", __func__, Bad_offset, entry.val);
1431 percpu_ref_put(&si->users);
1432 return NULL;
1433 }
1434
__swap_entry_free(struct swap_info_struct * si,swp_entry_t entry)1435 static unsigned char __swap_entry_free(struct swap_info_struct *si,
1436 swp_entry_t entry)
1437 {
1438 struct swap_cluster_info *ci;
1439 unsigned long offset = swp_offset(entry);
1440 unsigned char usage;
1441
1442 ci = lock_cluster(si, offset);
1443 usage = __swap_entry_free_locked(si, offset, 1);
1444 if (!usage)
1445 swap_entry_range_free(si, ci, swp_entry(si->type, offset), 1);
1446 unlock_cluster(ci);
1447
1448 return usage;
1449 }
1450
__swap_entries_free(struct swap_info_struct * si,swp_entry_t entry,int nr)1451 static bool __swap_entries_free(struct swap_info_struct *si,
1452 swp_entry_t entry, int nr)
1453 {
1454 unsigned long offset = swp_offset(entry);
1455 unsigned int type = swp_type(entry);
1456 struct swap_cluster_info *ci;
1457 bool has_cache = false;
1458 unsigned char count;
1459 int i;
1460
1461 if (nr <= 1 || swap_count(data_race(si->swap_map[offset])) != 1)
1462 goto fallback;
1463 /* cross into another cluster */
1464 if (nr > SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER)
1465 goto fallback;
1466
1467 ci = lock_cluster(si, offset);
1468 if (!swap_is_last_map(si, offset, nr, &has_cache)) {
1469 unlock_cluster(ci);
1470 goto fallback;
1471 }
1472 for (i = 0; i < nr; i++)
1473 WRITE_ONCE(si->swap_map[offset + i], SWAP_HAS_CACHE);
1474 if (!has_cache)
1475 swap_entry_range_free(si, ci, entry, nr);
1476 unlock_cluster(ci);
1477
1478 return has_cache;
1479
1480 fallback:
1481 for (i = 0; i < nr; i++) {
1482 if (data_race(si->swap_map[offset + i])) {
1483 count = __swap_entry_free(si, swp_entry(type, offset + i));
1484 if (count == SWAP_HAS_CACHE)
1485 has_cache = true;
1486 } else {
1487 WARN_ON_ONCE(1);
1488 }
1489 }
1490 return has_cache;
1491 }
1492
1493 /*
1494 * Drop the last HAS_CACHE flag of swap entries, caller have to
1495 * ensure all entries belong to the same cgroup.
1496 */
swap_entry_range_free(struct swap_info_struct * si,struct swap_cluster_info * ci,swp_entry_t entry,unsigned int nr_pages)1497 static void swap_entry_range_free(struct swap_info_struct *si,
1498 struct swap_cluster_info *ci,
1499 swp_entry_t entry, unsigned int nr_pages)
1500 {
1501 unsigned long offset = swp_offset(entry);
1502 unsigned char *map = si->swap_map + offset;
1503 unsigned char *map_end = map + nr_pages;
1504
1505 /* It should never free entries across different clusters */
1506 VM_BUG_ON(ci != offset_to_cluster(si, offset + nr_pages - 1));
1507 VM_BUG_ON(cluster_is_empty(ci));
1508 VM_BUG_ON(ci->count < nr_pages);
1509
1510 ci->count -= nr_pages;
1511 do {
1512 VM_BUG_ON(*map != SWAP_HAS_CACHE);
1513 *map = 0;
1514 } while (++map < map_end);
1515
1516 mem_cgroup_uncharge_swap(entry, nr_pages);
1517 swap_range_free(si, offset, nr_pages);
1518
1519 if (!ci->count)
1520 free_cluster(si, ci);
1521 else
1522 partial_free_cluster(si, ci);
1523 }
1524
cluster_swap_free_nr(struct swap_info_struct * si,unsigned long offset,int nr_pages,unsigned char usage)1525 static void cluster_swap_free_nr(struct swap_info_struct *si,
1526 unsigned long offset, int nr_pages,
1527 unsigned char usage)
1528 {
1529 struct swap_cluster_info *ci;
1530 unsigned long end = offset + nr_pages;
1531
1532 ci = lock_cluster(si, offset);
1533 do {
1534 if (!__swap_entry_free_locked(si, offset, usage))
1535 swap_entry_range_free(si, ci, swp_entry(si->type, offset), 1);
1536 } while (++offset < end);
1537 unlock_cluster(ci);
1538 }
1539
1540 /*
1541 * Caller has made sure that the swap device corresponding to entry
1542 * is still around or has not been recycled.
1543 */
swap_free_nr(swp_entry_t entry,int nr_pages)1544 void swap_free_nr(swp_entry_t entry, int nr_pages)
1545 {
1546 int nr;
1547 struct swap_info_struct *sis;
1548 unsigned long offset = swp_offset(entry);
1549
1550 sis = _swap_info_get(entry);
1551 if (!sis)
1552 return;
1553
1554 while (nr_pages) {
1555 nr = min_t(int, nr_pages, SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER);
1556 cluster_swap_free_nr(sis, offset, nr, 1);
1557 offset += nr;
1558 nr_pages -= nr;
1559 }
1560 }
1561
1562 /*
1563 * Called after dropping swapcache to decrease refcnt to swap entries.
1564 */
put_swap_folio(struct folio * folio,swp_entry_t entry)1565 void put_swap_folio(struct folio *folio, swp_entry_t entry)
1566 {
1567 unsigned long offset = swp_offset(entry);
1568 struct swap_cluster_info *ci;
1569 struct swap_info_struct *si;
1570 int size = 1 << swap_entry_order(folio_order(folio));
1571
1572 si = _swap_info_get(entry);
1573 if (!si)
1574 return;
1575
1576 ci = lock_cluster(si, offset);
1577 if (swap_is_has_cache(si, offset, size))
1578 swap_entry_range_free(si, ci, entry, size);
1579 else {
1580 for (int i = 0; i < size; i++, entry.val++) {
1581 if (!__swap_entry_free_locked(si, offset + i, SWAP_HAS_CACHE))
1582 swap_entry_range_free(si, ci, entry, 1);
1583 }
1584 }
1585 unlock_cluster(ci);
1586 }
1587
swapcache_free_entries(swp_entry_t * entries,int n)1588 void swapcache_free_entries(swp_entry_t *entries, int n)
1589 {
1590 int i;
1591 struct swap_cluster_info *ci;
1592 struct swap_info_struct *si = NULL;
1593
1594 if (n <= 0)
1595 return;
1596
1597 for (i = 0; i < n; ++i) {
1598 si = _swap_info_get(entries[i]);
1599 if (si) {
1600 ci = lock_cluster(si, swp_offset(entries[i]));
1601 swap_entry_range_free(si, ci, entries[i], 1);
1602 unlock_cluster(ci);
1603 }
1604 }
1605 }
1606
__swap_count(swp_entry_t entry)1607 int __swap_count(swp_entry_t entry)
1608 {
1609 struct swap_info_struct *si = swp_swap_info(entry);
1610 pgoff_t offset = swp_offset(entry);
1611
1612 return swap_count(si->swap_map[offset]);
1613 }
1614
1615 /*
1616 * How many references to @entry are currently swapped out?
1617 * This does not give an exact answer when swap count is continued,
1618 * but does include the high COUNT_CONTINUED flag to allow for that.
1619 */
swap_swapcount(struct swap_info_struct * si,swp_entry_t entry)1620 int swap_swapcount(struct swap_info_struct *si, swp_entry_t entry)
1621 {
1622 pgoff_t offset = swp_offset(entry);
1623 struct swap_cluster_info *ci;
1624 int count;
1625
1626 ci = lock_cluster(si, offset);
1627 count = swap_count(si->swap_map[offset]);
1628 unlock_cluster(ci);
1629 return count;
1630 }
1631
1632 /*
1633 * How many references to @entry are currently swapped out?
1634 * This considers COUNT_CONTINUED so it returns exact answer.
1635 */
swp_swapcount(swp_entry_t entry)1636 int swp_swapcount(swp_entry_t entry)
1637 {
1638 int count, tmp_count, n;
1639 struct swap_info_struct *si;
1640 struct swap_cluster_info *ci;
1641 struct page *page;
1642 pgoff_t offset;
1643 unsigned char *map;
1644
1645 si = _swap_info_get(entry);
1646 if (!si)
1647 return 0;
1648
1649 offset = swp_offset(entry);
1650
1651 ci = lock_cluster(si, offset);
1652
1653 count = swap_count(si->swap_map[offset]);
1654 if (!(count & COUNT_CONTINUED))
1655 goto out;
1656
1657 count &= ~COUNT_CONTINUED;
1658 n = SWAP_MAP_MAX + 1;
1659
1660 page = vmalloc_to_page(si->swap_map + offset);
1661 offset &= ~PAGE_MASK;
1662 VM_BUG_ON(page_private(page) != SWP_CONTINUED);
1663
1664 do {
1665 page = list_next_entry(page, lru);
1666 map = kmap_local_page(page);
1667 tmp_count = map[offset];
1668 kunmap_local(map);
1669
1670 count += (tmp_count & ~COUNT_CONTINUED) * n;
1671 n *= (SWAP_CONT_MAX + 1);
1672 } while (tmp_count & COUNT_CONTINUED);
1673 out:
1674 unlock_cluster(ci);
1675 return count;
1676 }
1677
swap_page_trans_huge_swapped(struct swap_info_struct * si,swp_entry_t entry,int order)1678 static bool swap_page_trans_huge_swapped(struct swap_info_struct *si,
1679 swp_entry_t entry, int order)
1680 {
1681 struct swap_cluster_info *ci;
1682 unsigned char *map = si->swap_map;
1683 unsigned int nr_pages = 1 << order;
1684 unsigned long roffset = swp_offset(entry);
1685 unsigned long offset = round_down(roffset, nr_pages);
1686 int i;
1687 bool ret = false;
1688
1689 ci = lock_cluster(si, offset);
1690 if (nr_pages == 1) {
1691 if (swap_count(map[roffset]))
1692 ret = true;
1693 goto unlock_out;
1694 }
1695 for (i = 0; i < nr_pages; i++) {
1696 if (swap_count(map[offset + i])) {
1697 ret = true;
1698 break;
1699 }
1700 }
1701 unlock_out:
1702 unlock_cluster(ci);
1703 return ret;
1704 }
1705
folio_swapped(struct folio * folio)1706 static bool folio_swapped(struct folio *folio)
1707 {
1708 swp_entry_t entry = folio->swap;
1709 struct swap_info_struct *si = _swap_info_get(entry);
1710
1711 if (!si)
1712 return false;
1713
1714 if (!IS_ENABLED(CONFIG_THP_SWAP) || likely(!folio_test_large(folio)))
1715 return swap_swapcount(si, entry) != 0;
1716
1717 return swap_page_trans_huge_swapped(si, entry, folio_order(folio));
1718 }
1719
folio_swapcache_freeable(struct folio * folio)1720 static bool folio_swapcache_freeable(struct folio *folio)
1721 {
1722 VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
1723
1724 if (!folio_test_swapcache(folio))
1725 return false;
1726 if (folio_test_writeback(folio))
1727 return false;
1728
1729 /*
1730 * Once hibernation has begun to create its image of memory,
1731 * there's a danger that one of the calls to folio_free_swap()
1732 * - most probably a call from __try_to_reclaim_swap() while
1733 * hibernation is allocating its own swap pages for the image,
1734 * but conceivably even a call from memory reclaim - will free
1735 * the swap from a folio which has already been recorded in the
1736 * image as a clean swapcache folio, and then reuse its swap for
1737 * another page of the image. On waking from hibernation, the
1738 * original folio might be freed under memory pressure, then
1739 * later read back in from swap, now with the wrong data.
1740 *
1741 * Hibernation suspends storage while it is writing the image
1742 * to disk so check that here.
1743 */
1744 if (pm_suspended_storage())
1745 return false;
1746
1747 return true;
1748 }
1749
1750 /**
1751 * folio_free_swap() - Free the swap space used for this folio.
1752 * @folio: The folio to remove.
1753 *
1754 * If swap is getting full, or if there are no more mappings of this folio,
1755 * then call folio_free_swap to free its swap space.
1756 *
1757 * Return: true if we were able to release the swap space.
1758 */
folio_free_swap(struct folio * folio)1759 bool folio_free_swap(struct folio *folio)
1760 {
1761 if (!folio_swapcache_freeable(folio))
1762 return false;
1763 if (folio_swapped(folio))
1764 return false;
1765
1766 delete_from_swap_cache(folio);
1767 folio_set_dirty(folio);
1768 return true;
1769 }
1770
1771 /**
1772 * free_swap_and_cache_nr() - Release reference on range of swap entries and
1773 * reclaim their cache if no more references remain.
1774 * @entry: First entry of range.
1775 * @nr: Number of entries in range.
1776 *
1777 * For each swap entry in the contiguous range, release a reference. If any swap
1778 * entries become free, try to reclaim their underlying folios, if present. The
1779 * offset range is defined by [entry.offset, entry.offset + nr).
1780 */
free_swap_and_cache_nr(swp_entry_t entry,int nr)1781 void free_swap_and_cache_nr(swp_entry_t entry, int nr)
1782 {
1783 const unsigned long start_offset = swp_offset(entry);
1784 const unsigned long end_offset = start_offset + nr;
1785 struct swap_info_struct *si;
1786 bool any_only_cache = false;
1787 unsigned long offset;
1788
1789 if (non_swap_entry(entry))
1790 return;
1791
1792 si = get_swap_device(entry);
1793 if (!si)
1794 return;
1795
1796 if (WARN_ON(end_offset > si->max))
1797 goto out;
1798
1799 /*
1800 * First free all entries in the range.
1801 */
1802 any_only_cache = __swap_entries_free(si, entry, nr);
1803
1804 /*
1805 * Short-circuit the below loop if none of the entries had their
1806 * reference drop to zero.
1807 */
1808 if (!any_only_cache)
1809 goto out;
1810
1811 /*
1812 * Now go back over the range trying to reclaim the swap cache. This is
1813 * more efficient for large folios because we will only try to reclaim
1814 * the swap once per folio in the common case. If we do
1815 * __swap_entry_free() and __try_to_reclaim_swap() in the same loop, the
1816 * latter will get a reference and lock the folio for every individual
1817 * page but will only succeed once the swap slot for every subpage is
1818 * zero.
1819 */
1820 for (offset = start_offset; offset < end_offset; offset += nr) {
1821 nr = 1;
1822 if (READ_ONCE(si->swap_map[offset]) == SWAP_HAS_CACHE) {
1823 /*
1824 * Folios are always naturally aligned in swap so
1825 * advance forward to the next boundary. Zero means no
1826 * folio was found for the swap entry, so advance by 1
1827 * in this case. Negative value means folio was found
1828 * but could not be reclaimed. Here we can still advance
1829 * to the next boundary.
1830 */
1831 nr = __try_to_reclaim_swap(si, offset,
1832 TTRS_UNMAPPED | TTRS_FULL);
1833 if (nr == 0)
1834 nr = 1;
1835 else if (nr < 0)
1836 nr = -nr;
1837 nr = ALIGN(offset + 1, nr) - offset;
1838 }
1839 }
1840
1841 out:
1842 put_swap_device(si);
1843 }
1844
1845 #ifdef CONFIG_HIBERNATION
1846
get_swap_page_of_type(int type)1847 swp_entry_t get_swap_page_of_type(int type)
1848 {
1849 struct swap_info_struct *si = swap_type_to_swap_info(type);
1850 swp_entry_t entry = {0};
1851
1852 if (!si)
1853 goto fail;
1854
1855 /* This is called for allocating swap entry, not cache */
1856 if (get_swap_device_info(si)) {
1857 if ((si->flags & SWP_WRITEOK) && scan_swap_map_slots(si, 1, 1, &entry, 0))
1858 atomic_long_dec(&nr_swap_pages);
1859 put_swap_device(si);
1860 }
1861 fail:
1862 return entry;
1863 }
1864
1865 /*
1866 * Find the swap type that corresponds to given device (if any).
1867 *
1868 * @offset - number of the PAGE_SIZE-sized block of the device, starting
1869 * from 0, in which the swap header is expected to be located.
1870 *
1871 * This is needed for the suspend to disk (aka swsusp).
1872 */
swap_type_of(dev_t device,sector_t offset)1873 int swap_type_of(dev_t device, sector_t offset)
1874 {
1875 int type;
1876
1877 if (!device)
1878 return -1;
1879
1880 spin_lock(&swap_lock);
1881 for (type = 0; type < nr_swapfiles; type++) {
1882 struct swap_info_struct *sis = swap_info[type];
1883
1884 if (!(sis->flags & SWP_WRITEOK))
1885 continue;
1886
1887 if (device == sis->bdev->bd_dev) {
1888 struct swap_extent *se = first_se(sis);
1889
1890 if (se->start_block == offset) {
1891 spin_unlock(&swap_lock);
1892 return type;
1893 }
1894 }
1895 }
1896 spin_unlock(&swap_lock);
1897 return -ENODEV;
1898 }
1899
find_first_swap(dev_t * device)1900 int find_first_swap(dev_t *device)
1901 {
1902 int type;
1903
1904 spin_lock(&swap_lock);
1905 for (type = 0; type < nr_swapfiles; type++) {
1906 struct swap_info_struct *sis = swap_info[type];
1907
1908 if (!(sis->flags & SWP_WRITEOK))
1909 continue;
1910 *device = sis->bdev->bd_dev;
1911 spin_unlock(&swap_lock);
1912 return type;
1913 }
1914 spin_unlock(&swap_lock);
1915 return -ENODEV;
1916 }
1917
1918 /*
1919 * Get the (PAGE_SIZE) block corresponding to given offset on the swapdev
1920 * corresponding to given index in swap_info (swap type).
1921 */
swapdev_block(int type,pgoff_t offset)1922 sector_t swapdev_block(int type, pgoff_t offset)
1923 {
1924 struct swap_info_struct *si = swap_type_to_swap_info(type);
1925 struct swap_extent *se;
1926
1927 if (!si || !(si->flags & SWP_WRITEOK))
1928 return 0;
1929 se = offset_to_swap_extent(si, offset);
1930 return se->start_block + (offset - se->start_page);
1931 }
1932
1933 /*
1934 * Return either the total number of swap pages of given type, or the number
1935 * of free pages of that type (depending on @free)
1936 *
1937 * This is needed for software suspend
1938 */
count_swap_pages(int type,int free)1939 unsigned int count_swap_pages(int type, int free)
1940 {
1941 unsigned int n = 0;
1942
1943 spin_lock(&swap_lock);
1944 if ((unsigned int)type < nr_swapfiles) {
1945 struct swap_info_struct *sis = swap_info[type];
1946
1947 spin_lock(&sis->lock);
1948 if (sis->flags & SWP_WRITEOK) {
1949 n = sis->pages;
1950 if (free)
1951 n -= swap_usage_in_pages(sis);
1952 }
1953 spin_unlock(&sis->lock);
1954 }
1955 spin_unlock(&swap_lock);
1956 return n;
1957 }
1958 #endif /* CONFIG_HIBERNATION */
1959
pte_same_as_swp(pte_t pte,pte_t swp_pte)1960 static inline int pte_same_as_swp(pte_t pte, pte_t swp_pte)
1961 {
1962 return pte_same(pte_swp_clear_flags(pte), swp_pte);
1963 }
1964
1965 /*
1966 * No need to decide whether this PTE shares the swap entry with others,
1967 * just let do_wp_page work it out if a write is requested later - to
1968 * force COW, vm_page_prot omits write permission from any private vma.
1969 */
unuse_pte(struct vm_area_struct * vma,pmd_t * pmd,unsigned long addr,swp_entry_t entry,struct folio * folio)1970 static int unuse_pte(struct vm_area_struct *vma, pmd_t *pmd,
1971 unsigned long addr, swp_entry_t entry, struct folio *folio)
1972 {
1973 struct page *page;
1974 struct folio *swapcache;
1975 spinlock_t *ptl;
1976 pte_t *pte, new_pte, old_pte;
1977 bool hwpoisoned = false;
1978 int ret = 1;
1979
1980 swapcache = folio;
1981 folio = ksm_might_need_to_copy(folio, vma, addr);
1982 if (unlikely(!folio))
1983 return -ENOMEM;
1984 else if (unlikely(folio == ERR_PTR(-EHWPOISON))) {
1985 hwpoisoned = true;
1986 folio = swapcache;
1987 }
1988
1989 page = folio_file_page(folio, swp_offset(entry));
1990 if (PageHWPoison(page))
1991 hwpoisoned = true;
1992
1993 pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
1994 if (unlikely(!pte || !pte_same_as_swp(ptep_get(pte),
1995 swp_entry_to_pte(entry)))) {
1996 ret = 0;
1997 goto out;
1998 }
1999
2000 old_pte = ptep_get(pte);
2001
2002 if (unlikely(hwpoisoned || !folio_test_uptodate(folio))) {
2003 swp_entry_t swp_entry;
2004
2005 dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
2006 if (hwpoisoned) {
2007 swp_entry = make_hwpoison_entry(page);
2008 } else {
2009 swp_entry = make_poisoned_swp_entry();
2010 }
2011 new_pte = swp_entry_to_pte(swp_entry);
2012 ret = 0;
2013 goto setpte;
2014 }
2015
2016 /*
2017 * Some architectures may have to restore extra metadata to the page
2018 * when reading from swap. This metadata may be indexed by swap entry
2019 * so this must be called before swap_free().
2020 */
2021 arch_swap_restore(folio_swap(entry, folio), folio);
2022
2023 dec_mm_counter(vma->vm_mm, MM_SWAPENTS);
2024 inc_mm_counter(vma->vm_mm, MM_ANONPAGES);
2025 folio_get(folio);
2026 if (folio == swapcache) {
2027 rmap_t rmap_flags = RMAP_NONE;
2028
2029 /*
2030 * See do_swap_page(): writeback would be problematic.
2031 * However, we do a folio_wait_writeback() just before this
2032 * call and have the folio locked.
2033 */
2034 VM_BUG_ON_FOLIO(folio_test_writeback(folio), folio);
2035 if (pte_swp_exclusive(old_pte))
2036 rmap_flags |= RMAP_EXCLUSIVE;
2037 /*
2038 * We currently only expect small !anon folios, which are either
2039 * fully exclusive or fully shared. If we ever get large folios
2040 * here, we have to be careful.
2041 */
2042 if (!folio_test_anon(folio)) {
2043 VM_WARN_ON_ONCE(folio_test_large(folio));
2044 VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
2045 folio_add_new_anon_rmap(folio, vma, addr, rmap_flags);
2046 } else {
2047 folio_add_anon_rmap_pte(folio, page, vma, addr, rmap_flags);
2048 }
2049 } else { /* ksm created a completely new copy */
2050 folio_add_new_anon_rmap(folio, vma, addr, RMAP_EXCLUSIVE);
2051 folio_add_lru_vma(folio, vma);
2052 }
2053 new_pte = pte_mkold(mk_pte(page, vma->vm_page_prot));
2054 if (pte_swp_soft_dirty(old_pte))
2055 new_pte = pte_mksoft_dirty(new_pte);
2056 if (pte_swp_uffd_wp(old_pte))
2057 new_pte = pte_mkuffd_wp(new_pte);
2058 setpte:
2059 set_pte_at(vma->vm_mm, addr, pte, new_pte);
2060 swap_free(entry);
2061 out:
2062 if (pte)
2063 pte_unmap_unlock(pte, ptl);
2064 if (folio != swapcache) {
2065 folio_unlock(folio);
2066 folio_put(folio);
2067 }
2068 return ret;
2069 }
2070
unuse_pte_range(struct vm_area_struct * vma,pmd_t * pmd,unsigned long addr,unsigned long end,unsigned int type)2071 static int unuse_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
2072 unsigned long addr, unsigned long end,
2073 unsigned int type)
2074 {
2075 pte_t *pte = NULL;
2076 struct swap_info_struct *si;
2077
2078 si = swap_info[type];
2079 do {
2080 struct folio *folio;
2081 unsigned long offset;
2082 unsigned char swp_count;
2083 swp_entry_t entry;
2084 int ret;
2085 pte_t ptent;
2086
2087 if (!pte++) {
2088 pte = pte_offset_map(pmd, addr);
2089 if (!pte)
2090 break;
2091 }
2092
2093 ptent = ptep_get_lockless(pte);
2094
2095 if (!is_swap_pte(ptent))
2096 continue;
2097
2098 entry = pte_to_swp_entry(ptent);
2099 if (swp_type(entry) != type)
2100 continue;
2101
2102 offset = swp_offset(entry);
2103 pte_unmap(pte);
2104 pte = NULL;
2105
2106 folio = swap_cache_get_folio(entry, vma, addr);
2107 if (!folio) {
2108 struct vm_fault vmf = {
2109 .vma = vma,
2110 .address = addr,
2111 .real_address = addr,
2112 .pmd = pmd,
2113 };
2114
2115 folio = swapin_readahead(entry, GFP_HIGHUSER_MOVABLE,
2116 &vmf);
2117 }
2118 if (!folio) {
2119 swp_count = READ_ONCE(si->swap_map[offset]);
2120 if (swp_count == 0 || swp_count == SWAP_MAP_BAD)
2121 continue;
2122 return -ENOMEM;
2123 }
2124
2125 folio_lock(folio);
2126 folio_wait_writeback(folio);
2127 ret = unuse_pte(vma, pmd, addr, entry, folio);
2128 if (ret < 0) {
2129 folio_unlock(folio);
2130 folio_put(folio);
2131 return ret;
2132 }
2133
2134 folio_free_swap(folio);
2135 folio_unlock(folio);
2136 folio_put(folio);
2137 } while (addr += PAGE_SIZE, addr != end);
2138
2139 if (pte)
2140 pte_unmap(pte);
2141 return 0;
2142 }
2143
unuse_pmd_range(struct vm_area_struct * vma,pud_t * pud,unsigned long addr,unsigned long end,unsigned int type)2144 static inline int unuse_pmd_range(struct vm_area_struct *vma, pud_t *pud,
2145 unsigned long addr, unsigned long end,
2146 unsigned int type)
2147 {
2148 pmd_t *pmd;
2149 unsigned long next;
2150 int ret;
2151
2152 pmd = pmd_offset(pud, addr);
2153 do {
2154 cond_resched();
2155 next = pmd_addr_end(addr, end);
2156 ret = unuse_pte_range(vma, pmd, addr, next, type);
2157 if (ret)
2158 return ret;
2159 } while (pmd++, addr = next, addr != end);
2160 return 0;
2161 }
2162
unuse_pud_range(struct vm_area_struct * vma,p4d_t * p4d,unsigned long addr,unsigned long end,unsigned int type)2163 static inline int unuse_pud_range(struct vm_area_struct *vma, p4d_t *p4d,
2164 unsigned long addr, unsigned long end,
2165 unsigned int type)
2166 {
2167 pud_t *pud;
2168 unsigned long next;
2169 int ret;
2170
2171 pud = pud_offset(p4d, addr);
2172 do {
2173 next = pud_addr_end(addr, end);
2174 if (pud_none_or_clear_bad(pud))
2175 continue;
2176 ret = unuse_pmd_range(vma, pud, addr, next, type);
2177 if (ret)
2178 return ret;
2179 } while (pud++, addr = next, addr != end);
2180 return 0;
2181 }
2182
unuse_p4d_range(struct vm_area_struct * vma,pgd_t * pgd,unsigned long addr,unsigned long end,unsigned int type)2183 static inline int unuse_p4d_range(struct vm_area_struct *vma, pgd_t *pgd,
2184 unsigned long addr, unsigned long end,
2185 unsigned int type)
2186 {
2187 p4d_t *p4d;
2188 unsigned long next;
2189 int ret;
2190
2191 p4d = p4d_offset(pgd, addr);
2192 do {
2193 next = p4d_addr_end(addr, end);
2194 if (p4d_none_or_clear_bad(p4d))
2195 continue;
2196 ret = unuse_pud_range(vma, p4d, addr, next, type);
2197 if (ret)
2198 return ret;
2199 } while (p4d++, addr = next, addr != end);
2200 return 0;
2201 }
2202
unuse_vma(struct vm_area_struct * vma,unsigned int type)2203 static int unuse_vma(struct vm_area_struct *vma, unsigned int type)
2204 {
2205 pgd_t *pgd;
2206 unsigned long addr, end, next;
2207 int ret;
2208
2209 addr = vma->vm_start;
2210 end = vma->vm_end;
2211
2212 pgd = pgd_offset(vma->vm_mm, addr);
2213 do {
2214 next = pgd_addr_end(addr, end);
2215 if (pgd_none_or_clear_bad(pgd))
2216 continue;
2217 ret = unuse_p4d_range(vma, pgd, addr, next, type);
2218 if (ret)
2219 return ret;
2220 } while (pgd++, addr = next, addr != end);
2221 return 0;
2222 }
2223
unuse_mm(struct mm_struct * mm,unsigned int type)2224 static int unuse_mm(struct mm_struct *mm, unsigned int type)
2225 {
2226 struct vm_area_struct *vma;
2227 int ret = 0;
2228 VMA_ITERATOR(vmi, mm, 0);
2229
2230 mmap_read_lock(mm);
2231 for_each_vma(vmi, vma) {
2232 if (vma->anon_vma && !is_vm_hugetlb_page(vma)) {
2233 ret = unuse_vma(vma, type);
2234 if (ret)
2235 break;
2236 }
2237
2238 cond_resched();
2239 }
2240 mmap_read_unlock(mm);
2241 return ret;
2242 }
2243
2244 /*
2245 * Scan swap_map from current position to next entry still in use.
2246 * Return 0 if there are no inuse entries after prev till end of
2247 * the map.
2248 */
find_next_to_unuse(struct swap_info_struct * si,unsigned int prev)2249 static unsigned int find_next_to_unuse(struct swap_info_struct *si,
2250 unsigned int prev)
2251 {
2252 unsigned int i;
2253 unsigned char count;
2254
2255 /*
2256 * No need for swap_lock here: we're just looking
2257 * for whether an entry is in use, not modifying it; false
2258 * hits are okay, and sys_swapoff() has already prevented new
2259 * allocations from this area (while holding swap_lock).
2260 */
2261 for (i = prev + 1; i < si->max; i++) {
2262 count = READ_ONCE(si->swap_map[i]);
2263 if (count && swap_count(count) != SWAP_MAP_BAD)
2264 break;
2265 if ((i % LATENCY_LIMIT) == 0)
2266 cond_resched();
2267 }
2268
2269 if (i == si->max)
2270 i = 0;
2271
2272 return i;
2273 }
2274
try_to_unuse(unsigned int type)2275 static int try_to_unuse(unsigned int type)
2276 {
2277 struct mm_struct *prev_mm;
2278 struct mm_struct *mm;
2279 struct list_head *p;
2280 int retval = 0;
2281 struct swap_info_struct *si = swap_info[type];
2282 struct folio *folio;
2283 swp_entry_t entry;
2284 unsigned int i;
2285
2286 if (!swap_usage_in_pages(si))
2287 goto success;
2288
2289 retry:
2290 retval = shmem_unuse(type);
2291 if (retval)
2292 return retval;
2293
2294 prev_mm = &init_mm;
2295 mmget(prev_mm);
2296
2297 spin_lock(&mmlist_lock);
2298 p = &init_mm.mmlist;
2299 while (swap_usage_in_pages(si) &&
2300 !signal_pending(current) &&
2301 (p = p->next) != &init_mm.mmlist) {
2302
2303 mm = list_entry(p, struct mm_struct, mmlist);
2304 if (!mmget_not_zero(mm))
2305 continue;
2306 spin_unlock(&mmlist_lock);
2307 mmput(prev_mm);
2308 prev_mm = mm;
2309 retval = unuse_mm(mm, type);
2310 if (retval) {
2311 mmput(prev_mm);
2312 return retval;
2313 }
2314
2315 /*
2316 * Make sure that we aren't completely killing
2317 * interactive performance.
2318 */
2319 cond_resched();
2320 spin_lock(&mmlist_lock);
2321 }
2322 spin_unlock(&mmlist_lock);
2323
2324 mmput(prev_mm);
2325
2326 i = 0;
2327 while (swap_usage_in_pages(si) &&
2328 !signal_pending(current) &&
2329 (i = find_next_to_unuse(si, i)) != 0) {
2330
2331 entry = swp_entry(type, i);
2332 folio = filemap_get_folio(swap_address_space(entry), swap_cache_index(entry));
2333 if (IS_ERR(folio))
2334 continue;
2335
2336 /*
2337 * It is conceivable that a racing task removed this folio from
2338 * swap cache just before we acquired the page lock. The folio
2339 * might even be back in swap cache on another swap area. But
2340 * that is okay, folio_free_swap() only removes stale folios.
2341 */
2342 folio_lock(folio);
2343 folio_wait_writeback(folio);
2344 folio_free_swap(folio);
2345 folio_unlock(folio);
2346 folio_put(folio);
2347 }
2348
2349 /*
2350 * Lets check again to see if there are still swap entries in the map.
2351 * If yes, we would need to do retry the unuse logic again.
2352 * Under global memory pressure, swap entries can be reinserted back
2353 * into process space after the mmlist loop above passes over them.
2354 *
2355 * Limit the number of retries? No: when mmget_not_zero()
2356 * above fails, that mm is likely to be freeing swap from
2357 * exit_mmap(), which proceeds at its own independent pace;
2358 * and even shmem_writepage() could have been preempted after
2359 * folio_alloc_swap(), temporarily hiding that swap. It's easy
2360 * and robust (though cpu-intensive) just to keep retrying.
2361 */
2362 if (swap_usage_in_pages(si)) {
2363 if (!signal_pending(current))
2364 goto retry;
2365 return -EINTR;
2366 }
2367
2368 success:
2369 /*
2370 * Make sure that further cleanups after try_to_unuse() returns happen
2371 * after swap_range_free() reduces si->inuse_pages to 0.
2372 */
2373 smp_mb();
2374 return 0;
2375 }
2376
2377 /*
2378 * After a successful try_to_unuse, if no swap is now in use, we know
2379 * we can empty the mmlist. swap_lock must be held on entry and exit.
2380 * Note that mmlist_lock nests inside swap_lock, and an mm must be
2381 * added to the mmlist just after page_duplicate - before would be racy.
2382 */
drain_mmlist(void)2383 static void drain_mmlist(void)
2384 {
2385 struct list_head *p, *next;
2386 unsigned int type;
2387
2388 for (type = 0; type < nr_swapfiles; type++)
2389 if (swap_usage_in_pages(swap_info[type]))
2390 return;
2391 spin_lock(&mmlist_lock);
2392 list_for_each_safe(p, next, &init_mm.mmlist)
2393 list_del_init(p);
2394 spin_unlock(&mmlist_lock);
2395 }
2396
2397 /*
2398 * Free all of a swapdev's extent information
2399 */
destroy_swap_extents(struct swap_info_struct * sis)2400 static void destroy_swap_extents(struct swap_info_struct *sis)
2401 {
2402 while (!RB_EMPTY_ROOT(&sis->swap_extent_root)) {
2403 struct rb_node *rb = sis->swap_extent_root.rb_node;
2404 struct swap_extent *se = rb_entry(rb, struct swap_extent, rb_node);
2405
2406 rb_erase(rb, &sis->swap_extent_root);
2407 kfree(se);
2408 }
2409
2410 if (sis->flags & SWP_ACTIVATED) {
2411 struct file *swap_file = sis->swap_file;
2412 struct address_space *mapping = swap_file->f_mapping;
2413
2414 sis->flags &= ~SWP_ACTIVATED;
2415 if (mapping->a_ops->swap_deactivate)
2416 mapping->a_ops->swap_deactivate(swap_file);
2417 }
2418 }
2419
2420 /*
2421 * Add a block range (and the corresponding page range) into this swapdev's
2422 * extent tree.
2423 *
2424 * This function rather assumes that it is called in ascending page order.
2425 */
2426 int
add_swap_extent(struct swap_info_struct * sis,unsigned long start_page,unsigned long nr_pages,sector_t start_block)2427 add_swap_extent(struct swap_info_struct *sis, unsigned long start_page,
2428 unsigned long nr_pages, sector_t start_block)
2429 {
2430 struct rb_node **link = &sis->swap_extent_root.rb_node, *parent = NULL;
2431 struct swap_extent *se;
2432 struct swap_extent *new_se;
2433
2434 /*
2435 * place the new node at the right most since the
2436 * function is called in ascending page order.
2437 */
2438 while (*link) {
2439 parent = *link;
2440 link = &parent->rb_right;
2441 }
2442
2443 if (parent) {
2444 se = rb_entry(parent, struct swap_extent, rb_node);
2445 BUG_ON(se->start_page + se->nr_pages != start_page);
2446 if (se->start_block + se->nr_pages == start_block) {
2447 /* Merge it */
2448 se->nr_pages += nr_pages;
2449 return 0;
2450 }
2451 }
2452
2453 /* No merge, insert a new extent. */
2454 new_se = kmalloc(sizeof(*se), GFP_KERNEL);
2455 if (new_se == NULL)
2456 return -ENOMEM;
2457 new_se->start_page = start_page;
2458 new_se->nr_pages = nr_pages;
2459 new_se->start_block = start_block;
2460
2461 rb_link_node(&new_se->rb_node, parent, link);
2462 rb_insert_color(&new_se->rb_node, &sis->swap_extent_root);
2463 return 1;
2464 }
2465 EXPORT_SYMBOL_GPL(add_swap_extent);
2466
2467 /*
2468 * A `swap extent' is a simple thing which maps a contiguous range of pages
2469 * onto a contiguous range of disk blocks. A rbtree of swap extents is
2470 * built at swapon time and is then used at swap_writepage/swap_read_folio
2471 * time for locating where on disk a page belongs.
2472 *
2473 * If the swapfile is an S_ISBLK block device, a single extent is installed.
2474 * This is done so that the main operating code can treat S_ISBLK and S_ISREG
2475 * swap files identically.
2476 *
2477 * Whether the swapdev is an S_ISREG file or an S_ISBLK blockdev, the swap
2478 * extent rbtree operates in PAGE_SIZE disk blocks. Both S_ISREG and S_ISBLK
2479 * swapfiles are handled *identically* after swapon time.
2480 *
2481 * For S_ISREG swapfiles, setup_swap_extents() will walk all the file's blocks
2482 * and will parse them into a rbtree, in PAGE_SIZE chunks. If some stray
2483 * blocks are found which do not fall within the PAGE_SIZE alignment
2484 * requirements, they are simply tossed out - we will never use those blocks
2485 * for swapping.
2486 *
2487 * For all swap devices we set S_SWAPFILE across the life of the swapon. This
2488 * prevents users from writing to the swap device, which will corrupt memory.
2489 *
2490 * The amount of disk space which a single swap extent represents varies.
2491 * Typically it is in the 1-4 megabyte range. So we can have hundreds of
2492 * extents in the rbtree. - akpm.
2493 */
setup_swap_extents(struct swap_info_struct * sis,sector_t * span)2494 static int setup_swap_extents(struct swap_info_struct *sis, sector_t *span)
2495 {
2496 struct file *swap_file = sis->swap_file;
2497 struct address_space *mapping = swap_file->f_mapping;
2498 struct inode *inode = mapping->host;
2499 int ret;
2500
2501 if (S_ISBLK(inode->i_mode)) {
2502 ret = add_swap_extent(sis, 0, sis->max, 0);
2503 *span = sis->pages;
2504 return ret;
2505 }
2506
2507 if (mapping->a_ops->swap_activate) {
2508 ret = mapping->a_ops->swap_activate(sis, swap_file, span);
2509 if (ret < 0)
2510 return ret;
2511 sis->flags |= SWP_ACTIVATED;
2512 if ((sis->flags & SWP_FS_OPS) &&
2513 sio_pool_init() != 0) {
2514 destroy_swap_extents(sis);
2515 return -ENOMEM;
2516 }
2517 return ret;
2518 }
2519
2520 return generic_swapfile_activate(sis, swap_file, span);
2521 }
2522
swap_node(struct swap_info_struct * si)2523 static int swap_node(struct swap_info_struct *si)
2524 {
2525 struct block_device *bdev;
2526
2527 if (si->bdev)
2528 bdev = si->bdev;
2529 else
2530 bdev = si->swap_file->f_inode->i_sb->s_bdev;
2531
2532 return bdev ? bdev->bd_disk->node_id : NUMA_NO_NODE;
2533 }
2534
setup_swap_info(struct swap_info_struct * si,int prio,unsigned char * swap_map,struct swap_cluster_info * cluster_info,unsigned long * zeromap)2535 static void setup_swap_info(struct swap_info_struct *si, int prio,
2536 unsigned char *swap_map,
2537 struct swap_cluster_info *cluster_info,
2538 unsigned long *zeromap)
2539 {
2540 int i;
2541
2542 if (prio >= 0)
2543 si->prio = prio;
2544 else
2545 si->prio = --least_priority;
2546 /*
2547 * the plist prio is negated because plist ordering is
2548 * low-to-high, while swap ordering is high-to-low
2549 */
2550 si->list.prio = -si->prio;
2551 for_each_node(i) {
2552 if (si->prio >= 0)
2553 si->avail_lists[i].prio = -si->prio;
2554 else {
2555 if (swap_node(si) == i)
2556 si->avail_lists[i].prio = 1;
2557 else
2558 si->avail_lists[i].prio = -si->prio;
2559 }
2560 }
2561 si->swap_map = swap_map;
2562 si->cluster_info = cluster_info;
2563 si->zeromap = zeromap;
2564 }
2565
_enable_swap_info(struct swap_info_struct * si)2566 static void _enable_swap_info(struct swap_info_struct *si)
2567 {
2568 atomic_long_add(si->pages, &nr_swap_pages);
2569 total_swap_pages += si->pages;
2570
2571 assert_spin_locked(&swap_lock);
2572 /*
2573 * both lists are plists, and thus priority ordered.
2574 * swap_active_head needs to be priority ordered for swapoff(),
2575 * which on removal of any swap_info_struct with an auto-assigned
2576 * (i.e. negative) priority increments the auto-assigned priority
2577 * of any lower-priority swap_info_structs.
2578 * swap_avail_head needs to be priority ordered for folio_alloc_swap(),
2579 * which allocates swap pages from the highest available priority
2580 * swap_info_struct.
2581 */
2582 plist_add(&si->list, &swap_active_head);
2583
2584 /* Add back to available list */
2585 add_to_avail_list(si, true);
2586 }
2587
enable_swap_info(struct swap_info_struct * si,int prio,unsigned char * swap_map,struct swap_cluster_info * cluster_info,unsigned long * zeromap)2588 static void enable_swap_info(struct swap_info_struct *si, int prio,
2589 unsigned char *swap_map,
2590 struct swap_cluster_info *cluster_info,
2591 unsigned long *zeromap)
2592 {
2593 spin_lock(&swap_lock);
2594 spin_lock(&si->lock);
2595 setup_swap_info(si, prio, swap_map, cluster_info, zeromap);
2596 spin_unlock(&si->lock);
2597 spin_unlock(&swap_lock);
2598 /*
2599 * Finished initializing swap device, now it's safe to reference it.
2600 */
2601 percpu_ref_resurrect(&si->users);
2602 spin_lock(&swap_lock);
2603 spin_lock(&si->lock);
2604 _enable_swap_info(si);
2605 spin_unlock(&si->lock);
2606 spin_unlock(&swap_lock);
2607 }
2608
reinsert_swap_info(struct swap_info_struct * si)2609 static void reinsert_swap_info(struct swap_info_struct *si)
2610 {
2611 spin_lock(&swap_lock);
2612 spin_lock(&si->lock);
2613 setup_swap_info(si, si->prio, si->swap_map, si->cluster_info, si->zeromap);
2614 _enable_swap_info(si);
2615 spin_unlock(&si->lock);
2616 spin_unlock(&swap_lock);
2617 }
2618
__has_usable_swap(void)2619 static bool __has_usable_swap(void)
2620 {
2621 return !plist_head_empty(&swap_active_head);
2622 }
2623
has_usable_swap(void)2624 bool has_usable_swap(void)
2625 {
2626 bool ret;
2627
2628 spin_lock(&swap_lock);
2629 ret = __has_usable_swap();
2630 spin_unlock(&swap_lock);
2631 return ret;
2632 }
2633
2634 /*
2635 * Called after clearing SWP_WRITEOK, ensures cluster_alloc_range
2636 * see the updated flags, so there will be no more allocations.
2637 */
wait_for_allocation(struct swap_info_struct * si)2638 static void wait_for_allocation(struct swap_info_struct *si)
2639 {
2640 unsigned long offset;
2641 unsigned long end = ALIGN(si->max, SWAPFILE_CLUSTER);
2642 struct swap_cluster_info *ci;
2643
2644 BUG_ON(si->flags & SWP_WRITEOK);
2645
2646 for (offset = 0; offset < end; offset += SWAPFILE_CLUSTER) {
2647 ci = lock_cluster(si, offset);
2648 unlock_cluster(ci);
2649 }
2650 }
2651
SYSCALL_DEFINE1(swapoff,const char __user *,specialfile)2652 SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
2653 {
2654 struct swap_info_struct *p = NULL;
2655 unsigned char *swap_map;
2656 unsigned long *zeromap;
2657 struct swap_cluster_info *cluster_info;
2658 struct file *swap_file, *victim;
2659 struct address_space *mapping;
2660 struct inode *inode;
2661 struct filename *pathname;
2662 int err, found = 0;
2663
2664 if (!capable(CAP_SYS_ADMIN))
2665 return -EPERM;
2666
2667 BUG_ON(!current->mm);
2668
2669 pathname = getname(specialfile);
2670 if (IS_ERR(pathname))
2671 return PTR_ERR(pathname);
2672
2673 victim = file_open_name(pathname, O_RDWR|O_LARGEFILE, 0);
2674 err = PTR_ERR(victim);
2675 if (IS_ERR(victim))
2676 goto out;
2677
2678 mapping = victim->f_mapping;
2679 spin_lock(&swap_lock);
2680 plist_for_each_entry(p, &swap_active_head, list) {
2681 if (p->flags & SWP_WRITEOK) {
2682 if (p->swap_file->f_mapping == mapping) {
2683 found = 1;
2684 break;
2685 }
2686 }
2687 }
2688 if (!found) {
2689 err = -EINVAL;
2690 spin_unlock(&swap_lock);
2691 goto out_dput;
2692 }
2693 if (!security_vm_enough_memory_mm(current->mm, p->pages))
2694 vm_unacct_memory(p->pages);
2695 else {
2696 err = -ENOMEM;
2697 spin_unlock(&swap_lock);
2698 goto out_dput;
2699 }
2700 spin_lock(&p->lock);
2701 del_from_avail_list(p, true);
2702 if (p->prio < 0) {
2703 struct swap_info_struct *si = p;
2704 int nid;
2705
2706 plist_for_each_entry_continue(si, &swap_active_head, list) {
2707 si->prio++;
2708 si->list.prio--;
2709 for_each_node(nid) {
2710 if (si->avail_lists[nid].prio != 1)
2711 si->avail_lists[nid].prio--;
2712 }
2713 }
2714 least_priority++;
2715 }
2716 plist_del(&p->list, &swap_active_head);
2717 atomic_long_sub(p->pages, &nr_swap_pages);
2718 total_swap_pages -= p->pages;
2719 spin_unlock(&p->lock);
2720 spin_unlock(&swap_lock);
2721
2722 wait_for_allocation(p);
2723
2724 disable_swap_slots_cache_lock();
2725
2726 set_current_oom_origin();
2727 err = try_to_unuse(p->type);
2728 clear_current_oom_origin();
2729
2730 if (err) {
2731 /* re-insert swap space back into swap_list */
2732 reinsert_swap_info(p);
2733 reenable_swap_slots_cache_unlock();
2734 goto out_dput;
2735 }
2736
2737 reenable_swap_slots_cache_unlock();
2738
2739 /*
2740 * Wait for swap operations protected by get/put_swap_device()
2741 * to complete. Because of synchronize_rcu() here, all swap
2742 * operations protected by RCU reader side lock (including any
2743 * spinlock) will be waited too. This makes it easy to
2744 * prevent folio_test_swapcache() and the following swap cache
2745 * operations from racing with swapoff.
2746 */
2747 percpu_ref_kill(&p->users);
2748 synchronize_rcu();
2749 wait_for_completion(&p->comp);
2750
2751 flush_work(&p->discard_work);
2752 flush_work(&p->reclaim_work);
2753
2754 destroy_swap_extents(p);
2755 if (p->flags & SWP_CONTINUED)
2756 free_swap_count_continuations(p);
2757
2758 if (!p->bdev || !bdev_nonrot(p->bdev))
2759 atomic_dec(&nr_rotate_swap);
2760
2761 mutex_lock(&swapon_mutex);
2762 spin_lock(&swap_lock);
2763 spin_lock(&p->lock);
2764 drain_mmlist();
2765
2766 swap_file = p->swap_file;
2767 p->swap_file = NULL;
2768 p->max = 0;
2769 swap_map = p->swap_map;
2770 p->swap_map = NULL;
2771 zeromap = p->zeromap;
2772 p->zeromap = NULL;
2773 cluster_info = p->cluster_info;
2774 p->cluster_info = NULL;
2775 spin_unlock(&p->lock);
2776 spin_unlock(&swap_lock);
2777 arch_swap_invalidate_area(p->type);
2778 zswap_swapoff(p->type);
2779 mutex_unlock(&swapon_mutex);
2780 free_percpu(p->percpu_cluster);
2781 p->percpu_cluster = NULL;
2782 kfree(p->global_cluster);
2783 p->global_cluster = NULL;
2784 vfree(swap_map);
2785 kvfree(zeromap);
2786 kvfree(cluster_info);
2787 /* Destroy swap account information */
2788 swap_cgroup_swapoff(p->type);
2789 exit_swap_address_space(p->type);
2790
2791 inode = mapping->host;
2792
2793 inode_lock(inode);
2794 inode->i_flags &= ~S_SWAPFILE;
2795 inode_unlock(inode);
2796 filp_close(swap_file, NULL);
2797
2798 /*
2799 * Clear the SWP_USED flag after all resources are freed so that swapon
2800 * can reuse this swap_info in alloc_swap_info() safely. It is ok to
2801 * not hold p->lock after we cleared its SWP_WRITEOK.
2802 */
2803 spin_lock(&swap_lock);
2804 p->flags = 0;
2805 spin_unlock(&swap_lock);
2806
2807 err = 0;
2808 atomic_inc(&proc_poll_event);
2809 wake_up_interruptible(&proc_poll_wait);
2810
2811 out_dput:
2812 filp_close(victim, NULL);
2813 out:
2814 putname(pathname);
2815 return err;
2816 }
2817
2818 #ifdef CONFIG_PROC_FS
swaps_poll(struct file * file,poll_table * wait)2819 static __poll_t swaps_poll(struct file *file, poll_table *wait)
2820 {
2821 struct seq_file *seq = file->private_data;
2822
2823 poll_wait(file, &proc_poll_wait, wait);
2824
2825 if (seq->poll_event != atomic_read(&proc_poll_event)) {
2826 seq->poll_event = atomic_read(&proc_poll_event);
2827 return EPOLLIN | EPOLLRDNORM | EPOLLERR | EPOLLPRI;
2828 }
2829
2830 return EPOLLIN | EPOLLRDNORM;
2831 }
2832
2833 /* iterator */
swap_start(struct seq_file * swap,loff_t * pos)2834 static void *swap_start(struct seq_file *swap, loff_t *pos)
2835 {
2836 struct swap_info_struct *si;
2837 int type;
2838 loff_t l = *pos;
2839
2840 mutex_lock(&swapon_mutex);
2841
2842 if (!l)
2843 return SEQ_START_TOKEN;
2844
2845 for (type = 0; (si = swap_type_to_swap_info(type)); type++) {
2846 if (!(si->flags & SWP_USED) || !si->swap_map)
2847 continue;
2848 if (!--l)
2849 return si;
2850 }
2851
2852 return NULL;
2853 }
2854
swap_next(struct seq_file * swap,void * v,loff_t * pos)2855 static void *swap_next(struct seq_file *swap, void *v, loff_t *pos)
2856 {
2857 struct swap_info_struct *si = v;
2858 int type;
2859
2860 if (v == SEQ_START_TOKEN)
2861 type = 0;
2862 else
2863 type = si->type + 1;
2864
2865 ++(*pos);
2866 for (; (si = swap_type_to_swap_info(type)); type++) {
2867 if (!(si->flags & SWP_USED) || !si->swap_map)
2868 continue;
2869 return si;
2870 }
2871
2872 return NULL;
2873 }
2874
swap_stop(struct seq_file * swap,void * v)2875 static void swap_stop(struct seq_file *swap, void *v)
2876 {
2877 mutex_unlock(&swapon_mutex);
2878 }
2879
swap_show(struct seq_file * swap,void * v)2880 static int swap_show(struct seq_file *swap, void *v)
2881 {
2882 struct swap_info_struct *si = v;
2883 struct file *file;
2884 int len;
2885 unsigned long bytes, inuse;
2886
2887 if (si == SEQ_START_TOKEN) {
2888 seq_puts(swap, "Filename\t\t\t\tType\t\tSize\t\tUsed\t\tPriority\n");
2889 return 0;
2890 }
2891
2892 bytes = K(si->pages);
2893 inuse = K(swap_usage_in_pages(si));
2894
2895 file = si->swap_file;
2896 len = seq_file_path(swap, file, " \t\n\\");
2897 seq_printf(swap, "%*s%s\t%lu\t%s%lu\t%s%d\n",
2898 len < 40 ? 40 - len : 1, " ",
2899 S_ISBLK(file_inode(file)->i_mode) ?
2900 "partition" : "file\t",
2901 bytes, bytes < 10000000 ? "\t" : "",
2902 inuse, inuse < 10000000 ? "\t" : "",
2903 si->prio);
2904 return 0;
2905 }
2906
2907 static const struct seq_operations swaps_op = {
2908 .start = swap_start,
2909 .next = swap_next,
2910 .stop = swap_stop,
2911 .show = swap_show
2912 };
2913
swaps_open(struct inode * inode,struct file * file)2914 static int swaps_open(struct inode *inode, struct file *file)
2915 {
2916 struct seq_file *seq;
2917 int ret;
2918
2919 ret = seq_open(file, &swaps_op);
2920 if (ret)
2921 return ret;
2922
2923 seq = file->private_data;
2924 seq->poll_event = atomic_read(&proc_poll_event);
2925 return 0;
2926 }
2927
2928 static const struct proc_ops swaps_proc_ops = {
2929 .proc_flags = PROC_ENTRY_PERMANENT,
2930 .proc_open = swaps_open,
2931 .proc_read = seq_read,
2932 .proc_lseek = seq_lseek,
2933 .proc_release = seq_release,
2934 .proc_poll = swaps_poll,
2935 };
2936
procswaps_init(void)2937 static int __init procswaps_init(void)
2938 {
2939 proc_create("swaps", 0, NULL, &swaps_proc_ops);
2940 return 0;
2941 }
2942 __initcall(procswaps_init);
2943 #endif /* CONFIG_PROC_FS */
2944
2945 #ifdef MAX_SWAPFILES_CHECK
max_swapfiles_check(void)2946 static int __init max_swapfiles_check(void)
2947 {
2948 MAX_SWAPFILES_CHECK();
2949 return 0;
2950 }
2951 late_initcall(max_swapfiles_check);
2952 #endif
2953
alloc_swap_info(void)2954 static struct swap_info_struct *alloc_swap_info(void)
2955 {
2956 struct swap_info_struct *p;
2957 struct swap_info_struct *defer = NULL;
2958 unsigned int type;
2959 int i;
2960
2961 p = kvzalloc(struct_size(p, avail_lists, nr_node_ids), GFP_KERNEL);
2962 if (!p)
2963 return ERR_PTR(-ENOMEM);
2964
2965 if (percpu_ref_init(&p->users, swap_users_ref_free,
2966 PERCPU_REF_INIT_DEAD, GFP_KERNEL)) {
2967 kvfree(p);
2968 return ERR_PTR(-ENOMEM);
2969 }
2970
2971 spin_lock(&swap_lock);
2972 for (type = 0; type < nr_swapfiles; type++) {
2973 if (!(swap_info[type]->flags & SWP_USED))
2974 break;
2975 }
2976 if (type >= MAX_SWAPFILES) {
2977 spin_unlock(&swap_lock);
2978 percpu_ref_exit(&p->users);
2979 kvfree(p);
2980 return ERR_PTR(-EPERM);
2981 }
2982 if (type >= nr_swapfiles) {
2983 p->type = type;
2984 /*
2985 * Publish the swap_info_struct after initializing it.
2986 * Note that kvzalloc() above zeroes all its fields.
2987 */
2988 smp_store_release(&swap_info[type], p); /* rcu_assign_pointer() */
2989 nr_swapfiles++;
2990 } else {
2991 defer = p;
2992 p = swap_info[type];
2993 /*
2994 * Do not memset this entry: a racing procfs swap_next()
2995 * would be relying on p->type to remain valid.
2996 */
2997 }
2998 p->swap_extent_root = RB_ROOT;
2999 plist_node_init(&p->list, 0);
3000 for_each_node(i)
3001 plist_node_init(&p->avail_lists[i], 0);
3002 p->flags = SWP_USED;
3003 spin_unlock(&swap_lock);
3004 if (defer) {
3005 percpu_ref_exit(&defer->users);
3006 kvfree(defer);
3007 }
3008 spin_lock_init(&p->lock);
3009 spin_lock_init(&p->cont_lock);
3010 atomic_long_set(&p->inuse_pages, SWAP_USAGE_OFFLIST_BIT);
3011 init_completion(&p->comp);
3012
3013 return p;
3014 }
3015
claim_swapfile(struct swap_info_struct * si,struct inode * inode)3016 static int claim_swapfile(struct swap_info_struct *si, struct inode *inode)
3017 {
3018 if (S_ISBLK(inode->i_mode)) {
3019 si->bdev = I_BDEV(inode);
3020 /*
3021 * Zoned block devices contain zones that have a sequential
3022 * write only restriction. Hence zoned block devices are not
3023 * suitable for swapping. Disallow them here.
3024 */
3025 if (bdev_is_zoned(si->bdev))
3026 return -EINVAL;
3027 si->flags |= SWP_BLKDEV;
3028 } else if (S_ISREG(inode->i_mode)) {
3029 si->bdev = inode->i_sb->s_bdev;
3030 }
3031
3032 return 0;
3033 }
3034
3035
3036 /*
3037 * Find out how many pages are allowed for a single swap device. There
3038 * are two limiting factors:
3039 * 1) the number of bits for the swap offset in the swp_entry_t type, and
3040 * 2) the number of bits in the swap pte, as defined by the different
3041 * architectures.
3042 *
3043 * In order to find the largest possible bit mask, a swap entry with
3044 * swap type 0 and swap offset ~0UL is created, encoded to a swap pte,
3045 * decoded to a swp_entry_t again, and finally the swap offset is
3046 * extracted.
3047 *
3048 * This will mask all the bits from the initial ~0UL mask that can't
3049 * be encoded in either the swp_entry_t or the architecture definition
3050 * of a swap pte.
3051 */
generic_max_swapfile_size(void)3052 unsigned long generic_max_swapfile_size(void)
3053 {
3054 return swp_offset(pte_to_swp_entry(
3055 swp_entry_to_pte(swp_entry(0, ~0UL)))) + 1;
3056 }
3057
3058 /* Can be overridden by an architecture for additional checks. */
arch_max_swapfile_size(void)3059 __weak unsigned long arch_max_swapfile_size(void)
3060 {
3061 return generic_max_swapfile_size();
3062 }
3063
read_swap_header(struct swap_info_struct * si,union swap_header * swap_header,struct inode * inode)3064 static unsigned long read_swap_header(struct swap_info_struct *si,
3065 union swap_header *swap_header,
3066 struct inode *inode)
3067 {
3068 int i;
3069 unsigned long maxpages;
3070 unsigned long swapfilepages;
3071 unsigned long last_page;
3072
3073 if (memcmp("SWAPSPACE2", swap_header->magic.magic, 10)) {
3074 pr_err("Unable to find swap-space signature\n");
3075 return 0;
3076 }
3077
3078 /* swap partition endianness hack... */
3079 if (swab32(swap_header->info.version) == 1) {
3080 swab32s(&swap_header->info.version);
3081 swab32s(&swap_header->info.last_page);
3082 swab32s(&swap_header->info.nr_badpages);
3083 if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
3084 return 0;
3085 for (i = 0; i < swap_header->info.nr_badpages; i++)
3086 swab32s(&swap_header->info.badpages[i]);
3087 }
3088 /* Check the swap header's sub-version */
3089 if (swap_header->info.version != 1) {
3090 pr_warn("Unable to handle swap header version %d\n",
3091 swap_header->info.version);
3092 return 0;
3093 }
3094
3095 maxpages = swapfile_maximum_size;
3096 last_page = swap_header->info.last_page;
3097 if (!last_page) {
3098 pr_warn("Empty swap-file\n");
3099 return 0;
3100 }
3101 if (last_page > maxpages) {
3102 pr_warn("Truncating oversized swap area, only using %luk out of %luk\n",
3103 K(maxpages), K(last_page));
3104 }
3105 if (maxpages > last_page) {
3106 maxpages = last_page + 1;
3107 /* p->max is an unsigned int: don't overflow it */
3108 if ((unsigned int)maxpages == 0)
3109 maxpages = UINT_MAX;
3110 }
3111
3112 if (!maxpages)
3113 return 0;
3114 swapfilepages = i_size_read(inode) >> PAGE_SHIFT;
3115 if (swapfilepages && maxpages > swapfilepages) {
3116 pr_warn("Swap area shorter than signature indicates\n");
3117 return 0;
3118 }
3119 if (swap_header->info.nr_badpages && S_ISREG(inode->i_mode))
3120 return 0;
3121 if (swap_header->info.nr_badpages > MAX_SWAP_BADPAGES)
3122 return 0;
3123
3124 return maxpages;
3125 }
3126
3127 #define SWAP_CLUSTER_INFO_COLS \
3128 DIV_ROUND_UP(L1_CACHE_BYTES, sizeof(struct swap_cluster_info))
3129 #define SWAP_CLUSTER_SPACE_COLS \
3130 DIV_ROUND_UP(SWAP_ADDRESS_SPACE_PAGES, SWAPFILE_CLUSTER)
3131 #define SWAP_CLUSTER_COLS \
3132 max_t(unsigned int, SWAP_CLUSTER_INFO_COLS, SWAP_CLUSTER_SPACE_COLS)
3133
setup_swap_map_and_extents(struct swap_info_struct * si,union swap_header * swap_header,unsigned char * swap_map,unsigned long maxpages,sector_t * span)3134 static int setup_swap_map_and_extents(struct swap_info_struct *si,
3135 union swap_header *swap_header,
3136 unsigned char *swap_map,
3137 unsigned long maxpages,
3138 sector_t *span)
3139 {
3140 unsigned int nr_good_pages;
3141 unsigned long i;
3142 int nr_extents;
3143
3144 nr_good_pages = maxpages - 1; /* omit header page */
3145
3146 for (i = 0; i < swap_header->info.nr_badpages; i++) {
3147 unsigned int page_nr = swap_header->info.badpages[i];
3148 if (page_nr == 0 || page_nr > swap_header->info.last_page)
3149 return -EINVAL;
3150 if (page_nr < maxpages) {
3151 swap_map[page_nr] = SWAP_MAP_BAD;
3152 nr_good_pages--;
3153 }
3154 }
3155
3156 if (nr_good_pages) {
3157 swap_map[0] = SWAP_MAP_BAD;
3158 si->max = maxpages;
3159 si->pages = nr_good_pages;
3160 nr_extents = setup_swap_extents(si, span);
3161 if (nr_extents < 0)
3162 return nr_extents;
3163 nr_good_pages = si->pages;
3164 }
3165 if (!nr_good_pages) {
3166 pr_warn("Empty swap-file\n");
3167 return -EINVAL;
3168 }
3169
3170 return nr_extents;
3171 }
3172
setup_clusters(struct swap_info_struct * si,union swap_header * swap_header,unsigned long maxpages)3173 static struct swap_cluster_info *setup_clusters(struct swap_info_struct *si,
3174 union swap_header *swap_header,
3175 unsigned long maxpages)
3176 {
3177 unsigned long nr_clusters = DIV_ROUND_UP(maxpages, SWAPFILE_CLUSTER);
3178 struct swap_cluster_info *cluster_info;
3179 unsigned long i, j, k, idx;
3180 int cpu, err = -ENOMEM;
3181
3182 cluster_info = kvcalloc(nr_clusters, sizeof(*cluster_info), GFP_KERNEL);
3183 if (!cluster_info)
3184 goto err;
3185
3186 for (i = 0; i < nr_clusters; i++)
3187 spin_lock_init(&cluster_info[i].lock);
3188
3189 if (si->flags & SWP_SOLIDSTATE) {
3190 si->percpu_cluster = alloc_percpu(struct percpu_cluster);
3191 if (!si->percpu_cluster)
3192 goto err_free;
3193
3194 for_each_possible_cpu(cpu) {
3195 struct percpu_cluster *cluster;
3196
3197 cluster = per_cpu_ptr(si->percpu_cluster, cpu);
3198 for (i = 0; i < SWAP_NR_ORDERS; i++)
3199 cluster->next[i] = SWAP_ENTRY_INVALID;
3200 local_lock_init(&cluster->lock);
3201 }
3202 } else {
3203 si->global_cluster = kmalloc(sizeof(*si->global_cluster),
3204 GFP_KERNEL);
3205 if (!si->global_cluster)
3206 goto err_free;
3207 for (i = 0; i < SWAP_NR_ORDERS; i++)
3208 si->global_cluster->next[i] = SWAP_ENTRY_INVALID;
3209 spin_lock_init(&si->global_cluster_lock);
3210 }
3211
3212 /*
3213 * Mark unusable pages as unavailable. The clusters aren't
3214 * marked free yet, so no list operations are involved yet.
3215 *
3216 * See setup_swap_map_and_extents(): header page, bad pages,
3217 * and the EOF part of the last cluster.
3218 */
3219 inc_cluster_info_page(si, cluster_info, 0);
3220 for (i = 0; i < swap_header->info.nr_badpages; i++)
3221 inc_cluster_info_page(si, cluster_info,
3222 swap_header->info.badpages[i]);
3223 for (i = maxpages; i < round_up(maxpages, SWAPFILE_CLUSTER); i++)
3224 inc_cluster_info_page(si, cluster_info, i);
3225
3226 INIT_LIST_HEAD(&si->free_clusters);
3227 INIT_LIST_HEAD(&si->full_clusters);
3228 INIT_LIST_HEAD(&si->discard_clusters);
3229
3230 for (i = 0; i < SWAP_NR_ORDERS; i++) {
3231 INIT_LIST_HEAD(&si->nonfull_clusters[i]);
3232 INIT_LIST_HEAD(&si->frag_clusters[i]);
3233 atomic_long_set(&si->frag_cluster_nr[i], 0);
3234 }
3235
3236 /*
3237 * Reduce false cache line sharing between cluster_info and
3238 * sharing same address space.
3239 */
3240 for (k = 0; k < SWAP_CLUSTER_COLS; k++) {
3241 j = k % SWAP_CLUSTER_COLS;
3242 for (i = 0; i < DIV_ROUND_UP(nr_clusters, SWAP_CLUSTER_COLS); i++) {
3243 struct swap_cluster_info *ci;
3244 idx = i * SWAP_CLUSTER_COLS + j;
3245 ci = cluster_info + idx;
3246 if (idx >= nr_clusters)
3247 continue;
3248 if (ci->count) {
3249 ci->flags = CLUSTER_FLAG_NONFULL;
3250 list_add_tail(&ci->list, &si->nonfull_clusters[0]);
3251 continue;
3252 }
3253 ci->flags = CLUSTER_FLAG_FREE;
3254 list_add_tail(&ci->list, &si->free_clusters);
3255 }
3256 }
3257
3258 return cluster_info;
3259
3260 err_free:
3261 kvfree(cluster_info);
3262 err:
3263 return ERR_PTR(err);
3264 }
3265
SYSCALL_DEFINE2(swapon,const char __user *,specialfile,int,swap_flags)3266 SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
3267 {
3268 struct swap_info_struct *si;
3269 struct filename *name;
3270 struct file *swap_file = NULL;
3271 struct address_space *mapping;
3272 struct dentry *dentry;
3273 int prio;
3274 int error;
3275 union swap_header *swap_header;
3276 int nr_extents;
3277 sector_t span;
3278 unsigned long maxpages;
3279 unsigned char *swap_map = NULL;
3280 unsigned long *zeromap = NULL;
3281 struct swap_cluster_info *cluster_info = NULL;
3282 struct folio *folio = NULL;
3283 struct inode *inode = NULL;
3284 bool inced_nr_rotate_swap = false;
3285
3286 if (swap_flags & ~SWAP_FLAGS_VALID)
3287 return -EINVAL;
3288
3289 if (!capable(CAP_SYS_ADMIN))
3290 return -EPERM;
3291
3292 if (!swap_avail_heads)
3293 return -ENOMEM;
3294
3295 si = alloc_swap_info();
3296 if (IS_ERR(si))
3297 return PTR_ERR(si);
3298
3299 INIT_WORK(&si->discard_work, swap_discard_work);
3300 INIT_WORK(&si->reclaim_work, swap_reclaim_work);
3301
3302 name = getname(specialfile);
3303 if (IS_ERR(name)) {
3304 error = PTR_ERR(name);
3305 name = NULL;
3306 goto bad_swap;
3307 }
3308 swap_file = file_open_name(name, O_RDWR | O_LARGEFILE | O_EXCL, 0);
3309 if (IS_ERR(swap_file)) {
3310 error = PTR_ERR(swap_file);
3311 swap_file = NULL;
3312 goto bad_swap;
3313 }
3314
3315 si->swap_file = swap_file;
3316 mapping = swap_file->f_mapping;
3317 dentry = swap_file->f_path.dentry;
3318 inode = mapping->host;
3319
3320 error = claim_swapfile(si, inode);
3321 if (unlikely(error))
3322 goto bad_swap;
3323
3324 inode_lock(inode);
3325 if (d_unlinked(dentry) || cant_mount(dentry)) {
3326 error = -ENOENT;
3327 goto bad_swap_unlock_inode;
3328 }
3329 if (IS_SWAPFILE(inode)) {
3330 error = -EBUSY;
3331 goto bad_swap_unlock_inode;
3332 }
3333
3334 /*
3335 * Read the swap header.
3336 */
3337 if (!mapping->a_ops->read_folio) {
3338 error = -EINVAL;
3339 goto bad_swap_unlock_inode;
3340 }
3341 folio = read_mapping_folio(mapping, 0, swap_file);
3342 if (IS_ERR(folio)) {
3343 error = PTR_ERR(folio);
3344 goto bad_swap_unlock_inode;
3345 }
3346 swap_header = kmap_local_folio(folio, 0);
3347
3348 maxpages = read_swap_header(si, swap_header, inode);
3349 if (unlikely(!maxpages)) {
3350 error = -EINVAL;
3351 goto bad_swap_unlock_inode;
3352 }
3353
3354 /* OK, set up the swap map and apply the bad block list */
3355 swap_map = vzalloc(maxpages);
3356 if (!swap_map) {
3357 error = -ENOMEM;
3358 goto bad_swap_unlock_inode;
3359 }
3360
3361 error = swap_cgroup_swapon(si->type, maxpages);
3362 if (error)
3363 goto bad_swap_unlock_inode;
3364
3365 nr_extents = setup_swap_map_and_extents(si, swap_header, swap_map,
3366 maxpages, &span);
3367 if (unlikely(nr_extents < 0)) {
3368 error = nr_extents;
3369 goto bad_swap_unlock_inode;
3370 }
3371
3372 /*
3373 * Use kvmalloc_array instead of bitmap_zalloc as the allocation order might
3374 * be above MAX_PAGE_ORDER incase of a large swap file.
3375 */
3376 zeromap = kvmalloc_array(BITS_TO_LONGS(maxpages), sizeof(long),
3377 GFP_KERNEL | __GFP_ZERO);
3378 if (!zeromap) {
3379 error = -ENOMEM;
3380 goto bad_swap_unlock_inode;
3381 }
3382
3383 if (si->bdev && bdev_stable_writes(si->bdev))
3384 si->flags |= SWP_STABLE_WRITES;
3385
3386 if (si->bdev && bdev_synchronous(si->bdev))
3387 si->flags |= SWP_SYNCHRONOUS_IO;
3388
3389 if (si->bdev && bdev_nonrot(si->bdev)) {
3390 si->flags |= SWP_SOLIDSTATE;
3391 } else {
3392 atomic_inc(&nr_rotate_swap);
3393 inced_nr_rotate_swap = true;
3394 }
3395
3396 cluster_info = setup_clusters(si, swap_header, maxpages);
3397 if (IS_ERR(cluster_info)) {
3398 error = PTR_ERR(cluster_info);
3399 cluster_info = NULL;
3400 goto bad_swap_unlock_inode;
3401 }
3402
3403 if ((swap_flags & SWAP_FLAG_DISCARD) &&
3404 si->bdev && bdev_max_discard_sectors(si->bdev)) {
3405 /*
3406 * When discard is enabled for swap with no particular
3407 * policy flagged, we set all swap discard flags here in
3408 * order to sustain backward compatibility with older
3409 * swapon(8) releases.
3410 */
3411 si->flags |= (SWP_DISCARDABLE | SWP_AREA_DISCARD |
3412 SWP_PAGE_DISCARD);
3413
3414 /*
3415 * By flagging sys_swapon, a sysadmin can tell us to
3416 * either do single-time area discards only, or to just
3417 * perform discards for released swap page-clusters.
3418 * Now it's time to adjust the p->flags accordingly.
3419 */
3420 if (swap_flags & SWAP_FLAG_DISCARD_ONCE)
3421 si->flags &= ~SWP_PAGE_DISCARD;
3422 else if (swap_flags & SWAP_FLAG_DISCARD_PAGES)
3423 si->flags &= ~SWP_AREA_DISCARD;
3424
3425 /* issue a swapon-time discard if it's still required */
3426 if (si->flags & SWP_AREA_DISCARD) {
3427 int err = discard_swap(si);
3428 if (unlikely(err))
3429 pr_err("swapon: discard_swap(%p): %d\n",
3430 si, err);
3431 }
3432 }
3433
3434 error = init_swap_address_space(si->type, maxpages);
3435 if (error)
3436 goto bad_swap_unlock_inode;
3437
3438 error = zswap_swapon(si->type, maxpages);
3439 if (error)
3440 goto free_swap_address_space;
3441
3442 /*
3443 * Flush any pending IO and dirty mappings before we start using this
3444 * swap device.
3445 */
3446 inode->i_flags |= S_SWAPFILE;
3447 error = inode_drain_writes(inode);
3448 if (error) {
3449 inode->i_flags &= ~S_SWAPFILE;
3450 goto free_swap_zswap;
3451 }
3452
3453 mutex_lock(&swapon_mutex);
3454 prio = -1;
3455 if (swap_flags & SWAP_FLAG_PREFER)
3456 prio =
3457 (swap_flags & SWAP_FLAG_PRIO_MASK) >> SWAP_FLAG_PRIO_SHIFT;
3458 enable_swap_info(si, prio, swap_map, cluster_info, zeromap);
3459
3460 pr_info("Adding %uk swap on %s. Priority:%d extents:%d across:%lluk %s%s%s%s\n",
3461 K(si->pages), name->name, si->prio, nr_extents,
3462 K((unsigned long long)span),
3463 (si->flags & SWP_SOLIDSTATE) ? "SS" : "",
3464 (si->flags & SWP_DISCARDABLE) ? "D" : "",
3465 (si->flags & SWP_AREA_DISCARD) ? "s" : "",
3466 (si->flags & SWP_PAGE_DISCARD) ? "c" : "");
3467
3468 mutex_unlock(&swapon_mutex);
3469 atomic_inc(&proc_poll_event);
3470 wake_up_interruptible(&proc_poll_wait);
3471
3472 error = 0;
3473 goto out;
3474 free_swap_zswap:
3475 zswap_swapoff(si->type);
3476 free_swap_address_space:
3477 exit_swap_address_space(si->type);
3478 bad_swap_unlock_inode:
3479 inode_unlock(inode);
3480 bad_swap:
3481 free_percpu(si->percpu_cluster);
3482 si->percpu_cluster = NULL;
3483 kfree(si->global_cluster);
3484 si->global_cluster = NULL;
3485 inode = NULL;
3486 destroy_swap_extents(si);
3487 swap_cgroup_swapoff(si->type);
3488 spin_lock(&swap_lock);
3489 si->swap_file = NULL;
3490 si->flags = 0;
3491 spin_unlock(&swap_lock);
3492 vfree(swap_map);
3493 kvfree(zeromap);
3494 kvfree(cluster_info);
3495 if (inced_nr_rotate_swap)
3496 atomic_dec(&nr_rotate_swap);
3497 if (swap_file)
3498 filp_close(swap_file, NULL);
3499 out:
3500 if (!IS_ERR_OR_NULL(folio))
3501 folio_release_kmap(folio, swap_header);
3502 if (name)
3503 putname(name);
3504 if (inode)
3505 inode_unlock(inode);
3506 if (!error)
3507 enable_swap_slots_cache();
3508 return error;
3509 }
3510
si_swapinfo(struct sysinfo * val)3511 void si_swapinfo(struct sysinfo *val)
3512 {
3513 unsigned int type;
3514 unsigned long nr_to_be_unused = 0;
3515
3516 spin_lock(&swap_lock);
3517 for (type = 0; type < nr_swapfiles; type++) {
3518 struct swap_info_struct *si = swap_info[type];
3519
3520 if ((si->flags & SWP_USED) && !(si->flags & SWP_WRITEOK))
3521 nr_to_be_unused += swap_usage_in_pages(si);
3522 }
3523 val->freeswap = atomic_long_read(&nr_swap_pages) + nr_to_be_unused;
3524 val->totalswap = total_swap_pages + nr_to_be_unused;
3525 spin_unlock(&swap_lock);
3526 }
3527
3528 /*
3529 * Verify that nr swap entries are valid and increment their swap map counts.
3530 *
3531 * Returns error code in following case.
3532 * - success -> 0
3533 * - swp_entry is invalid -> EINVAL
3534 * - swp_entry is migration entry -> EINVAL
3535 * - swap-cache reference is requested but there is already one. -> EEXIST
3536 * - swap-cache reference is requested but the entry is not used. -> ENOENT
3537 * - swap-mapped reference requested but needs continued swap count. -> ENOMEM
3538 */
__swap_duplicate(swp_entry_t entry,unsigned char usage,int nr)3539 static int __swap_duplicate(swp_entry_t entry, unsigned char usage, int nr)
3540 {
3541 struct swap_info_struct *si;
3542 struct swap_cluster_info *ci;
3543 unsigned long offset;
3544 unsigned char count;
3545 unsigned char has_cache;
3546 int err, i;
3547
3548 si = swp_swap_info(entry);
3549 if (WARN_ON_ONCE(!si)) {
3550 pr_err("%s%08lx\n", Bad_file, entry.val);
3551 return -EINVAL;
3552 }
3553
3554 offset = swp_offset(entry);
3555 VM_WARN_ON(nr > SWAPFILE_CLUSTER - offset % SWAPFILE_CLUSTER);
3556 VM_WARN_ON(usage == 1 && nr > 1);
3557 ci = lock_cluster(si, offset);
3558
3559 err = 0;
3560 for (i = 0; i < nr; i++) {
3561 count = si->swap_map[offset + i];
3562
3563 /*
3564 * swapin_readahead() doesn't check if a swap entry is valid, so the
3565 * swap entry could be SWAP_MAP_BAD. Check here with lock held.
3566 */
3567 if (unlikely(swap_count(count) == SWAP_MAP_BAD)) {
3568 err = -ENOENT;
3569 goto unlock_out;
3570 }
3571
3572 has_cache = count & SWAP_HAS_CACHE;
3573 count &= ~SWAP_HAS_CACHE;
3574
3575 if (!count && !has_cache) {
3576 err = -ENOENT;
3577 } else if (usage == SWAP_HAS_CACHE) {
3578 if (has_cache)
3579 err = -EEXIST;
3580 } else if ((count & ~COUNT_CONTINUED) > SWAP_MAP_MAX) {
3581 err = -EINVAL;
3582 }
3583
3584 if (err)
3585 goto unlock_out;
3586 }
3587
3588 for (i = 0; i < nr; i++) {
3589 count = si->swap_map[offset + i];
3590 has_cache = count & SWAP_HAS_CACHE;
3591 count &= ~SWAP_HAS_CACHE;
3592
3593 if (usage == SWAP_HAS_CACHE)
3594 has_cache = SWAP_HAS_CACHE;
3595 else if ((count & ~COUNT_CONTINUED) < SWAP_MAP_MAX)
3596 count += usage;
3597 else if (swap_count_continued(si, offset + i, count))
3598 count = COUNT_CONTINUED;
3599 else {
3600 /*
3601 * Don't need to rollback changes, because if
3602 * usage == 1, there must be nr == 1.
3603 */
3604 err = -ENOMEM;
3605 goto unlock_out;
3606 }
3607
3608 WRITE_ONCE(si->swap_map[offset + i], count | has_cache);
3609 }
3610
3611 unlock_out:
3612 unlock_cluster(ci);
3613 return err;
3614 }
3615
3616 /*
3617 * Help swapoff by noting that swap entry belongs to shmem/tmpfs
3618 * (in which case its reference count is never incremented).
3619 */
swap_shmem_alloc(swp_entry_t entry,int nr)3620 void swap_shmem_alloc(swp_entry_t entry, int nr)
3621 {
3622 __swap_duplicate(entry, SWAP_MAP_SHMEM, nr);
3623 }
3624
3625 /*
3626 * Increase reference count of swap entry by 1.
3627 * Returns 0 for success, or -ENOMEM if a swap_count_continuation is required
3628 * but could not be atomically allocated. Returns 0, just as if it succeeded,
3629 * if __swap_duplicate() fails for another reason (-EINVAL or -ENOENT), which
3630 * might occur if a page table entry has got corrupted.
3631 */
swap_duplicate(swp_entry_t entry)3632 int swap_duplicate(swp_entry_t entry)
3633 {
3634 int err = 0;
3635
3636 while (!err && __swap_duplicate(entry, 1, 1) == -ENOMEM)
3637 err = add_swap_count_continuation(entry, GFP_ATOMIC);
3638 return err;
3639 }
3640
3641 /*
3642 * @entry: first swap entry from which we allocate nr swap cache.
3643 *
3644 * Called when allocating swap cache for existing swap entries,
3645 * This can return error codes. Returns 0 at success.
3646 * -EEXIST means there is a swap cache.
3647 * Note: return code is different from swap_duplicate().
3648 */
swapcache_prepare(swp_entry_t entry,int nr)3649 int swapcache_prepare(swp_entry_t entry, int nr)
3650 {
3651 return __swap_duplicate(entry, SWAP_HAS_CACHE, nr);
3652 }
3653
swapcache_clear(struct swap_info_struct * si,swp_entry_t entry,int nr)3654 void swapcache_clear(struct swap_info_struct *si, swp_entry_t entry, int nr)
3655 {
3656 unsigned long offset = swp_offset(entry);
3657
3658 cluster_swap_free_nr(si, offset, nr, SWAP_HAS_CACHE);
3659 }
3660
swp_swap_info(swp_entry_t entry)3661 struct swap_info_struct *swp_swap_info(swp_entry_t entry)
3662 {
3663 return swap_type_to_swap_info(swp_type(entry));
3664 }
3665
3666 /*
3667 * out-of-line methods to avoid include hell.
3668 */
swapcache_mapping(struct folio * folio)3669 struct address_space *swapcache_mapping(struct folio *folio)
3670 {
3671 return swp_swap_info(folio->swap)->swap_file->f_mapping;
3672 }
3673 EXPORT_SYMBOL_GPL(swapcache_mapping);
3674
__folio_swap_cache_index(struct folio * folio)3675 pgoff_t __folio_swap_cache_index(struct folio *folio)
3676 {
3677 return swap_cache_index(folio->swap);
3678 }
3679 EXPORT_SYMBOL_GPL(__folio_swap_cache_index);
3680
3681 /*
3682 * add_swap_count_continuation - called when a swap count is duplicated
3683 * beyond SWAP_MAP_MAX, it allocates a new page and links that to the entry's
3684 * page of the original vmalloc'ed swap_map, to hold the continuation count
3685 * (for that entry and for its neighbouring PAGE_SIZE swap entries). Called
3686 * again when count is duplicated beyond SWAP_MAP_MAX * SWAP_CONT_MAX, etc.
3687 *
3688 * These continuation pages are seldom referenced: the common paths all work
3689 * on the original swap_map, only referring to a continuation page when the
3690 * low "digit" of a count is incremented or decremented through SWAP_MAP_MAX.
3691 *
3692 * add_swap_count_continuation(, GFP_ATOMIC) can be called while holding
3693 * page table locks; if it fails, add_swap_count_continuation(, GFP_KERNEL)
3694 * can be called after dropping locks.
3695 */
add_swap_count_continuation(swp_entry_t entry,gfp_t gfp_mask)3696 int add_swap_count_continuation(swp_entry_t entry, gfp_t gfp_mask)
3697 {
3698 struct swap_info_struct *si;
3699 struct swap_cluster_info *ci;
3700 struct page *head;
3701 struct page *page;
3702 struct page *list_page;
3703 pgoff_t offset;
3704 unsigned char count;
3705 int ret = 0;
3706
3707 /*
3708 * When debugging, it's easier to use __GFP_ZERO here; but it's better
3709 * for latency not to zero a page while GFP_ATOMIC and holding locks.
3710 */
3711 page = alloc_page(gfp_mask | __GFP_HIGHMEM);
3712
3713 si = get_swap_device(entry);
3714 if (!si) {
3715 /*
3716 * An acceptable race has occurred since the failing
3717 * __swap_duplicate(): the swap device may be swapoff
3718 */
3719 goto outer;
3720 }
3721
3722 offset = swp_offset(entry);
3723
3724 ci = lock_cluster(si, offset);
3725
3726 count = swap_count(si->swap_map[offset]);
3727
3728 if ((count & ~COUNT_CONTINUED) != SWAP_MAP_MAX) {
3729 /*
3730 * The higher the swap count, the more likely it is that tasks
3731 * will race to add swap count continuation: we need to avoid
3732 * over-provisioning.
3733 */
3734 goto out;
3735 }
3736
3737 if (!page) {
3738 ret = -ENOMEM;
3739 goto out;
3740 }
3741
3742 head = vmalloc_to_page(si->swap_map + offset);
3743 offset &= ~PAGE_MASK;
3744
3745 spin_lock(&si->cont_lock);
3746 /*
3747 * Page allocation does not initialize the page's lru field,
3748 * but it does always reset its private field.
3749 */
3750 if (!page_private(head)) {
3751 BUG_ON(count & COUNT_CONTINUED);
3752 INIT_LIST_HEAD(&head->lru);
3753 set_page_private(head, SWP_CONTINUED);
3754 si->flags |= SWP_CONTINUED;
3755 }
3756
3757 list_for_each_entry(list_page, &head->lru, lru) {
3758 unsigned char *map;
3759
3760 /*
3761 * If the previous map said no continuation, but we've found
3762 * a continuation page, free our allocation and use this one.
3763 */
3764 if (!(count & COUNT_CONTINUED))
3765 goto out_unlock_cont;
3766
3767 map = kmap_local_page(list_page) + offset;
3768 count = *map;
3769 kunmap_local(map);
3770
3771 /*
3772 * If this continuation count now has some space in it,
3773 * free our allocation and use this one.
3774 */
3775 if ((count & ~COUNT_CONTINUED) != SWAP_CONT_MAX)
3776 goto out_unlock_cont;
3777 }
3778
3779 list_add_tail(&page->lru, &head->lru);
3780 page = NULL; /* now it's attached, don't free it */
3781 out_unlock_cont:
3782 spin_unlock(&si->cont_lock);
3783 out:
3784 unlock_cluster(ci);
3785 put_swap_device(si);
3786 outer:
3787 if (page)
3788 __free_page(page);
3789 return ret;
3790 }
3791
3792 /*
3793 * swap_count_continued - when the original swap_map count is incremented
3794 * from SWAP_MAP_MAX, check if there is already a continuation page to carry
3795 * into, carry if so, or else fail until a new continuation page is allocated;
3796 * when the original swap_map count is decremented from 0 with continuation,
3797 * borrow from the continuation and report whether it still holds more.
3798 * Called while __swap_duplicate() or swap_entry_free() holds swap or cluster
3799 * lock.
3800 */
swap_count_continued(struct swap_info_struct * si,pgoff_t offset,unsigned char count)3801 static bool swap_count_continued(struct swap_info_struct *si,
3802 pgoff_t offset, unsigned char count)
3803 {
3804 struct page *head;
3805 struct page *page;
3806 unsigned char *map;
3807 bool ret;
3808
3809 head = vmalloc_to_page(si->swap_map + offset);
3810 if (page_private(head) != SWP_CONTINUED) {
3811 BUG_ON(count & COUNT_CONTINUED);
3812 return false; /* need to add count continuation */
3813 }
3814
3815 spin_lock(&si->cont_lock);
3816 offset &= ~PAGE_MASK;
3817 page = list_next_entry(head, lru);
3818 map = kmap_local_page(page) + offset;
3819
3820 if (count == SWAP_MAP_MAX) /* initial increment from swap_map */
3821 goto init_map; /* jump over SWAP_CONT_MAX checks */
3822
3823 if (count == (SWAP_MAP_MAX | COUNT_CONTINUED)) { /* incrementing */
3824 /*
3825 * Think of how you add 1 to 999
3826 */
3827 while (*map == (SWAP_CONT_MAX | COUNT_CONTINUED)) {
3828 kunmap_local(map);
3829 page = list_next_entry(page, lru);
3830 BUG_ON(page == head);
3831 map = kmap_local_page(page) + offset;
3832 }
3833 if (*map == SWAP_CONT_MAX) {
3834 kunmap_local(map);
3835 page = list_next_entry(page, lru);
3836 if (page == head) {
3837 ret = false; /* add count continuation */
3838 goto out;
3839 }
3840 map = kmap_local_page(page) + offset;
3841 init_map: *map = 0; /* we didn't zero the page */
3842 }
3843 *map += 1;
3844 kunmap_local(map);
3845 while ((page = list_prev_entry(page, lru)) != head) {
3846 map = kmap_local_page(page) + offset;
3847 *map = COUNT_CONTINUED;
3848 kunmap_local(map);
3849 }
3850 ret = true; /* incremented */
3851
3852 } else { /* decrementing */
3853 /*
3854 * Think of how you subtract 1 from 1000
3855 */
3856 BUG_ON(count != COUNT_CONTINUED);
3857 while (*map == COUNT_CONTINUED) {
3858 kunmap_local(map);
3859 page = list_next_entry(page, lru);
3860 BUG_ON(page == head);
3861 map = kmap_local_page(page) + offset;
3862 }
3863 BUG_ON(*map == 0);
3864 *map -= 1;
3865 if (*map == 0)
3866 count = 0;
3867 kunmap_local(map);
3868 while ((page = list_prev_entry(page, lru)) != head) {
3869 map = kmap_local_page(page) + offset;
3870 *map = SWAP_CONT_MAX | count;
3871 count = COUNT_CONTINUED;
3872 kunmap_local(map);
3873 }
3874 ret = count == COUNT_CONTINUED;
3875 }
3876 out:
3877 spin_unlock(&si->cont_lock);
3878 return ret;
3879 }
3880
3881 /*
3882 * free_swap_count_continuations - swapoff free all the continuation pages
3883 * appended to the swap_map, after swap_map is quiesced, before vfree'ing it.
3884 */
free_swap_count_continuations(struct swap_info_struct * si)3885 static void free_swap_count_continuations(struct swap_info_struct *si)
3886 {
3887 pgoff_t offset;
3888
3889 for (offset = 0; offset < si->max; offset += PAGE_SIZE) {
3890 struct page *head;
3891 head = vmalloc_to_page(si->swap_map + offset);
3892 if (page_private(head)) {
3893 struct page *page, *next;
3894
3895 list_for_each_entry_safe(page, next, &head->lru, lru) {
3896 list_del(&page->lru);
3897 __free_page(page);
3898 }
3899 }
3900 }
3901 }
3902
3903 #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
__folio_throttle_swaprate(struct folio * folio,gfp_t gfp)3904 void __folio_throttle_swaprate(struct folio *folio, gfp_t gfp)
3905 {
3906 struct swap_info_struct *si, *next;
3907 int nid = folio_nid(folio);
3908
3909 if (!(gfp & __GFP_IO))
3910 return;
3911
3912 if (!__has_usable_swap())
3913 return;
3914
3915 if (!blk_cgroup_congested())
3916 return;
3917
3918 /*
3919 * We've already scheduled a throttle, avoid taking the global swap
3920 * lock.
3921 */
3922 if (current->throttle_disk)
3923 return;
3924
3925 spin_lock(&swap_avail_lock);
3926 plist_for_each_entry_safe(si, next, &swap_avail_heads[nid],
3927 avail_lists[nid]) {
3928 if (si->bdev) {
3929 blkcg_schedule_throttle(si->bdev->bd_disk, true);
3930 break;
3931 }
3932 }
3933 spin_unlock(&swap_avail_lock);
3934 }
3935 #endif
3936
swapfile_init(void)3937 static int __init swapfile_init(void)
3938 {
3939 int nid;
3940
3941 swap_avail_heads = kmalloc_array(nr_node_ids, sizeof(struct plist_head),
3942 GFP_KERNEL);
3943 if (!swap_avail_heads) {
3944 pr_emerg("Not enough memory for swap heads, swap is disabled\n");
3945 return -ENOMEM;
3946 }
3947
3948 for_each_node(nid)
3949 plist_head_init(&swap_avail_heads[nid]);
3950
3951 swapfile_maximum_size = arch_max_swapfile_size();
3952
3953 #ifdef CONFIG_MIGRATION
3954 if (swapfile_maximum_size >= (1UL << SWP_MIG_TOTAL_BITS))
3955 swap_migration_ad_supported = true;
3956 #endif /* CONFIG_MIGRATION */
3957
3958 return 0;
3959 }
3960 subsys_initcall(swapfile_init);
3961