xref: /linux/mm/page_io.c (revision 34dc1baba215b826e454b8d19e4f24adbeb7d00d)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/mm/page_io.c
4  *
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  *
7  *  Swap reorganised 29.12.95,
8  *  Asynchronous swapping added 30.12.95. Stephen Tweedie
9  *  Removed race in async swapping. 14.4.1996. Bruno Haible
10  *  Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
11  *  Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
12  */
13 
14 #include <linux/mm.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/gfp.h>
17 #include <linux/pagemap.h>
18 #include <linux/swap.h>
19 #include <linux/bio.h>
20 #include <linux/swapops.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/psi.h>
24 #include <linux/uio.h>
25 #include <linux/sched/task.h>
26 #include <linux/delayacct.h>
27 #include <linux/zswap.h>
28 #include "swap.h"
29 
30 static void __end_swap_bio_write(struct bio *bio)
31 {
32 	struct folio *folio = bio_first_folio_all(bio);
33 
34 	if (bio->bi_status) {
35 		/*
36 		 * We failed to write the page out to swap-space.
37 		 * Re-dirty the page in order to avoid it being reclaimed.
38 		 * Also print a dire warning that things will go BAD (tm)
39 		 * very quickly.
40 		 *
41 		 * Also clear PG_reclaim to avoid folio_rotate_reclaimable()
42 		 */
43 		folio_mark_dirty(folio);
44 		pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n",
45 				     MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
46 				     (unsigned long long)bio->bi_iter.bi_sector);
47 		folio_clear_reclaim(folio);
48 	}
49 	folio_end_writeback(folio);
50 }
51 
52 static void end_swap_bio_write(struct bio *bio)
53 {
54 	__end_swap_bio_write(bio);
55 	bio_put(bio);
56 }
57 
58 static void __end_swap_bio_read(struct bio *bio)
59 {
60 	struct folio *folio = bio_first_folio_all(bio);
61 
62 	if (bio->bi_status) {
63 		pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n",
64 				     MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
65 				     (unsigned long long)bio->bi_iter.bi_sector);
66 	} else {
67 		folio_mark_uptodate(folio);
68 	}
69 	folio_unlock(folio);
70 }
71 
72 static void end_swap_bio_read(struct bio *bio)
73 {
74 	__end_swap_bio_read(bio);
75 	bio_put(bio);
76 }
77 
78 int generic_swapfile_activate(struct swap_info_struct *sis,
79 				struct file *swap_file,
80 				sector_t *span)
81 {
82 	struct address_space *mapping = swap_file->f_mapping;
83 	struct inode *inode = mapping->host;
84 	unsigned blocks_per_page;
85 	unsigned long page_no;
86 	unsigned blkbits;
87 	sector_t probe_block;
88 	sector_t last_block;
89 	sector_t lowest_block = -1;
90 	sector_t highest_block = 0;
91 	int nr_extents = 0;
92 	int ret;
93 
94 	blkbits = inode->i_blkbits;
95 	blocks_per_page = PAGE_SIZE >> blkbits;
96 
97 	/*
98 	 * Map all the blocks into the extent tree.  This code doesn't try
99 	 * to be very smart.
100 	 */
101 	probe_block = 0;
102 	page_no = 0;
103 	last_block = i_size_read(inode) >> blkbits;
104 	while ((probe_block + blocks_per_page) <= last_block &&
105 			page_no < sis->max) {
106 		unsigned block_in_page;
107 		sector_t first_block;
108 
109 		cond_resched();
110 
111 		first_block = probe_block;
112 		ret = bmap(inode, &first_block);
113 		if (ret || !first_block)
114 			goto bad_bmap;
115 
116 		/*
117 		 * It must be PAGE_SIZE aligned on-disk
118 		 */
119 		if (first_block & (blocks_per_page - 1)) {
120 			probe_block++;
121 			goto reprobe;
122 		}
123 
124 		for (block_in_page = 1; block_in_page < blocks_per_page;
125 					block_in_page++) {
126 			sector_t block;
127 
128 			block = probe_block + block_in_page;
129 			ret = bmap(inode, &block);
130 			if (ret || !block)
131 				goto bad_bmap;
132 
133 			if (block != first_block + block_in_page) {
134 				/* Discontiguity */
135 				probe_block++;
136 				goto reprobe;
137 			}
138 		}
139 
140 		first_block >>= (PAGE_SHIFT - blkbits);
141 		if (page_no) {	/* exclude the header page */
142 			if (first_block < lowest_block)
143 				lowest_block = first_block;
144 			if (first_block > highest_block)
145 				highest_block = first_block;
146 		}
147 
148 		/*
149 		 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
150 		 */
151 		ret = add_swap_extent(sis, page_no, 1, first_block);
152 		if (ret < 0)
153 			goto out;
154 		nr_extents += ret;
155 		page_no++;
156 		probe_block += blocks_per_page;
157 reprobe:
158 		continue;
159 	}
160 	ret = nr_extents;
161 	*span = 1 + highest_block - lowest_block;
162 	if (page_no == 0)
163 		page_no = 1;	/* force Empty message */
164 	sis->max = page_no;
165 	sis->pages = page_no - 1;
166 	sis->highest_bit = page_no - 1;
167 out:
168 	return ret;
169 bad_bmap:
170 	pr_err("swapon: swapfile has holes\n");
171 	ret = -EINVAL;
172 	goto out;
173 }
174 
175 /*
176  * We may have stale swap cache pages in memory: notice
177  * them here and get rid of the unnecessary final write.
178  */
179 int swap_writepage(struct page *page, struct writeback_control *wbc)
180 {
181 	struct folio *folio = page_folio(page);
182 	int ret;
183 
184 	if (folio_free_swap(folio)) {
185 		folio_unlock(folio);
186 		return 0;
187 	}
188 	/*
189 	 * Arch code may have to preserve more data than just the page
190 	 * contents, e.g. memory tags.
191 	 */
192 	ret = arch_prepare_to_swap(&folio->page);
193 	if (ret) {
194 		folio_mark_dirty(folio);
195 		folio_unlock(folio);
196 		return ret;
197 	}
198 	if (zswap_store(folio)) {
199 		folio_start_writeback(folio);
200 		folio_unlock(folio);
201 		folio_end_writeback(folio);
202 		return 0;
203 	}
204 	__swap_writepage(&folio->page, wbc);
205 	return 0;
206 }
207 
208 static inline void count_swpout_vm_event(struct folio *folio)
209 {
210 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
211 	if (unlikely(folio_test_pmd_mappable(folio)))
212 		count_vm_event(THP_SWPOUT);
213 #endif
214 	count_vm_events(PSWPOUT, folio_nr_pages(folio));
215 }
216 
217 #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
218 static void bio_associate_blkg_from_page(struct bio *bio, struct folio *folio)
219 {
220 	struct cgroup_subsys_state *css;
221 	struct mem_cgroup *memcg;
222 
223 	memcg = folio_memcg(folio);
224 	if (!memcg)
225 		return;
226 
227 	rcu_read_lock();
228 	css = cgroup_e_css(memcg->css.cgroup, &io_cgrp_subsys);
229 	bio_associate_blkg_from_css(bio, css);
230 	rcu_read_unlock();
231 }
232 #else
233 #define bio_associate_blkg_from_page(bio, folio)		do { } while (0)
234 #endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */
235 
236 struct swap_iocb {
237 	struct kiocb		iocb;
238 	struct bio_vec		bvec[SWAP_CLUSTER_MAX];
239 	int			pages;
240 	int			len;
241 };
242 static mempool_t *sio_pool;
243 
244 int sio_pool_init(void)
245 {
246 	if (!sio_pool) {
247 		mempool_t *pool = mempool_create_kmalloc_pool(
248 			SWAP_CLUSTER_MAX, sizeof(struct swap_iocb));
249 		if (cmpxchg(&sio_pool, NULL, pool))
250 			mempool_destroy(pool);
251 	}
252 	if (!sio_pool)
253 		return -ENOMEM;
254 	return 0;
255 }
256 
257 static void sio_write_complete(struct kiocb *iocb, long ret)
258 {
259 	struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
260 	struct page *page = sio->bvec[0].bv_page;
261 	int p;
262 
263 	if (ret != sio->len) {
264 		/*
265 		 * In the case of swap-over-nfs, this can be a
266 		 * temporary failure if the system has limited
267 		 * memory for allocating transmit buffers.
268 		 * Mark the page dirty and avoid
269 		 * folio_rotate_reclaimable but rate-limit the
270 		 * messages but do not flag PageError like
271 		 * the normal direct-to-bio case as it could
272 		 * be temporary.
273 		 */
274 		pr_err_ratelimited("Write error %ld on dio swapfile (%llu)\n",
275 				   ret, page_file_offset(page));
276 		for (p = 0; p < sio->pages; p++) {
277 			page = sio->bvec[p].bv_page;
278 			set_page_dirty(page);
279 			ClearPageReclaim(page);
280 		}
281 	} else {
282 		for (p = 0; p < sio->pages; p++)
283 			count_swpout_vm_event(page_folio(sio->bvec[p].bv_page));
284 	}
285 
286 	for (p = 0; p < sio->pages; p++)
287 		end_page_writeback(sio->bvec[p].bv_page);
288 
289 	mempool_free(sio, sio_pool);
290 }
291 
292 static void swap_writepage_fs(struct page *page, struct writeback_control *wbc)
293 {
294 	struct swap_iocb *sio = NULL;
295 	struct swap_info_struct *sis = page_swap_info(page);
296 	struct file *swap_file = sis->swap_file;
297 	loff_t pos = page_file_offset(page);
298 
299 	set_page_writeback(page);
300 	unlock_page(page);
301 	if (wbc->swap_plug)
302 		sio = *wbc->swap_plug;
303 	if (sio) {
304 		if (sio->iocb.ki_filp != swap_file ||
305 		    sio->iocb.ki_pos + sio->len != pos) {
306 			swap_write_unplug(sio);
307 			sio = NULL;
308 		}
309 	}
310 	if (!sio) {
311 		sio = mempool_alloc(sio_pool, GFP_NOIO);
312 		init_sync_kiocb(&sio->iocb, swap_file);
313 		sio->iocb.ki_complete = sio_write_complete;
314 		sio->iocb.ki_pos = pos;
315 		sio->pages = 0;
316 		sio->len = 0;
317 	}
318 	bvec_set_page(&sio->bvec[sio->pages], page, thp_size(page), 0);
319 	sio->len += thp_size(page);
320 	sio->pages += 1;
321 	if (sio->pages == ARRAY_SIZE(sio->bvec) || !wbc->swap_plug) {
322 		swap_write_unplug(sio);
323 		sio = NULL;
324 	}
325 	if (wbc->swap_plug)
326 		*wbc->swap_plug = sio;
327 }
328 
329 static void swap_writepage_bdev_sync(struct page *page,
330 		struct writeback_control *wbc, struct swap_info_struct *sis)
331 {
332 	struct bio_vec bv;
333 	struct bio bio;
334 	struct folio *folio = page_folio(page);
335 
336 	bio_init(&bio, sis->bdev, &bv, 1,
337 		 REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc));
338 	bio.bi_iter.bi_sector = swap_page_sector(page);
339 	__bio_add_page(&bio, page, thp_size(page), 0);
340 
341 	bio_associate_blkg_from_page(&bio, folio);
342 	count_swpout_vm_event(folio);
343 
344 	folio_start_writeback(folio);
345 	folio_unlock(folio);
346 
347 	submit_bio_wait(&bio);
348 	__end_swap_bio_write(&bio);
349 }
350 
351 static void swap_writepage_bdev_async(struct page *page,
352 		struct writeback_control *wbc, struct swap_info_struct *sis)
353 {
354 	struct bio *bio;
355 	struct folio *folio = page_folio(page);
356 
357 	bio = bio_alloc(sis->bdev, 1,
358 			REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc),
359 			GFP_NOIO);
360 	bio->bi_iter.bi_sector = swap_page_sector(page);
361 	bio->bi_end_io = end_swap_bio_write;
362 	__bio_add_page(bio, page, thp_size(page), 0);
363 
364 	bio_associate_blkg_from_page(bio, folio);
365 	count_swpout_vm_event(folio);
366 	folio_start_writeback(folio);
367 	folio_unlock(folio);
368 	submit_bio(bio);
369 }
370 
371 void __swap_writepage(struct page *page, struct writeback_control *wbc)
372 {
373 	struct swap_info_struct *sis = page_swap_info(page);
374 
375 	VM_BUG_ON_PAGE(!PageSwapCache(page), page);
376 	/*
377 	 * ->flags can be updated non-atomicially (scan_swap_map_slots),
378 	 * but that will never affect SWP_FS_OPS, so the data_race
379 	 * is safe.
380 	 */
381 	if (data_race(sis->flags & SWP_FS_OPS))
382 		swap_writepage_fs(page, wbc);
383 	else if (sis->flags & SWP_SYNCHRONOUS_IO)
384 		swap_writepage_bdev_sync(page, wbc, sis);
385 	else
386 		swap_writepage_bdev_async(page, wbc, sis);
387 }
388 
389 void swap_write_unplug(struct swap_iocb *sio)
390 {
391 	struct iov_iter from;
392 	struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
393 	int ret;
394 
395 	iov_iter_bvec(&from, ITER_SOURCE, sio->bvec, sio->pages, sio->len);
396 	ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
397 	if (ret != -EIOCBQUEUED)
398 		sio_write_complete(&sio->iocb, ret);
399 }
400 
401 static void sio_read_complete(struct kiocb *iocb, long ret)
402 {
403 	struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
404 	int p;
405 
406 	if (ret == sio->len) {
407 		for (p = 0; p < sio->pages; p++) {
408 			struct folio *folio = page_folio(sio->bvec[p].bv_page);
409 
410 			folio_mark_uptodate(folio);
411 			folio_unlock(folio);
412 		}
413 		count_vm_events(PSWPIN, sio->pages);
414 	} else {
415 		for (p = 0; p < sio->pages; p++) {
416 			struct folio *folio = page_folio(sio->bvec[p].bv_page);
417 
418 			folio_unlock(folio);
419 		}
420 		pr_alert_ratelimited("Read-error on swap-device\n");
421 	}
422 	mempool_free(sio, sio_pool);
423 }
424 
425 static void swap_readpage_fs(struct page *page,
426 			     struct swap_iocb **plug)
427 {
428 	struct swap_info_struct *sis = page_swap_info(page);
429 	struct swap_iocb *sio = NULL;
430 	loff_t pos = page_file_offset(page);
431 
432 	if (plug)
433 		sio = *plug;
434 	if (sio) {
435 		if (sio->iocb.ki_filp != sis->swap_file ||
436 		    sio->iocb.ki_pos + sio->len != pos) {
437 			swap_read_unplug(sio);
438 			sio = NULL;
439 		}
440 	}
441 	if (!sio) {
442 		sio = mempool_alloc(sio_pool, GFP_KERNEL);
443 		init_sync_kiocb(&sio->iocb, sis->swap_file);
444 		sio->iocb.ki_pos = pos;
445 		sio->iocb.ki_complete = sio_read_complete;
446 		sio->pages = 0;
447 		sio->len = 0;
448 	}
449 	bvec_set_page(&sio->bvec[sio->pages], page, thp_size(page), 0);
450 	sio->len += thp_size(page);
451 	sio->pages += 1;
452 	if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) {
453 		swap_read_unplug(sio);
454 		sio = NULL;
455 	}
456 	if (plug)
457 		*plug = sio;
458 }
459 
460 static void swap_readpage_bdev_sync(struct page *page,
461 		struct swap_info_struct *sis)
462 {
463 	struct bio_vec bv;
464 	struct bio bio;
465 
466 	bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_READ);
467 	bio.bi_iter.bi_sector = swap_page_sector(page);
468 	__bio_add_page(&bio, page, thp_size(page), 0);
469 	/*
470 	 * Keep this task valid during swap readpage because the oom killer may
471 	 * attempt to access it in the page fault retry time check.
472 	 */
473 	get_task_struct(current);
474 	count_vm_event(PSWPIN);
475 	submit_bio_wait(&bio);
476 	__end_swap_bio_read(&bio);
477 	put_task_struct(current);
478 }
479 
480 static void swap_readpage_bdev_async(struct page *page,
481 		struct swap_info_struct *sis)
482 {
483 	struct bio *bio;
484 
485 	bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL);
486 	bio->bi_iter.bi_sector = swap_page_sector(page);
487 	bio->bi_end_io = end_swap_bio_read;
488 	__bio_add_page(bio, page, thp_size(page), 0);
489 	count_vm_event(PSWPIN);
490 	submit_bio(bio);
491 }
492 
493 void swap_readpage(struct page *page, bool synchronous, struct swap_iocb **plug)
494 {
495 	struct folio *folio = page_folio(page);
496 	struct swap_info_struct *sis = page_swap_info(page);
497 	bool workingset = folio_test_workingset(folio);
498 	unsigned long pflags;
499 	bool in_thrashing;
500 
501 	VM_BUG_ON_FOLIO(!folio_test_swapcache(folio) && !synchronous, folio);
502 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
503 	VM_BUG_ON_FOLIO(folio_test_uptodate(folio), folio);
504 
505 	/*
506 	 * Count submission time as memory stall and delay. When the device
507 	 * is congested, or the submitting cgroup IO-throttled, submission
508 	 * can be a significant part of overall IO time.
509 	 */
510 	if (workingset) {
511 		delayacct_thrashing_start(&in_thrashing);
512 		psi_memstall_enter(&pflags);
513 	}
514 	delayacct_swapin_start();
515 
516 	if (zswap_load(folio)) {
517 		folio_mark_uptodate(folio);
518 		folio_unlock(folio);
519 	} else if (data_race(sis->flags & SWP_FS_OPS)) {
520 		swap_readpage_fs(page, plug);
521 	} else if (synchronous || (sis->flags & SWP_SYNCHRONOUS_IO)) {
522 		swap_readpage_bdev_sync(page, sis);
523 	} else {
524 		swap_readpage_bdev_async(page, sis);
525 	}
526 
527 	if (workingset) {
528 		delayacct_thrashing_end(&in_thrashing);
529 		psi_memstall_leave(&pflags);
530 	}
531 	delayacct_swapin_end();
532 }
533 
534 void __swap_read_unplug(struct swap_iocb *sio)
535 {
536 	struct iov_iter from;
537 	struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
538 	int ret;
539 
540 	iov_iter_bvec(&from, ITER_DEST, sio->bvec, sio->pages, sio->len);
541 	ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
542 	if (ret != -EIOCBQUEUED)
543 		sio_read_complete(&sio->iocb, ret);
544 }
545