xref: /linux/mm/page_io.c (revision f3c11cf5cae044668f888a50abb37b29600ca197)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *  linux/mm/page_io.c
4  *
5  *  Copyright (C) 1991, 1992, 1993, 1994  Linus Torvalds
6  *
7  *  Swap reorganised 29.12.95,
8  *  Asynchronous swapping added 30.12.95. Stephen Tweedie
9  *  Removed race in async swapping. 14.4.1996. Bruno Haible
10  *  Add swap of shared pages through the page cache. 20.2.1998. Stephen Tweedie
11  *  Always use brw_page, life becomes simpler. 12 May 1998 Eric Biederman
12  */
13 
14 #include <linux/mm.h>
15 #include <linux/kernel_stat.h>
16 #include <linux/gfp.h>
17 #include <linux/pagemap.h>
18 #include <linux/swap.h>
19 #include <linux/bio.h>
20 #include <linux/swapops.h>
21 #include <linux/writeback.h>
22 #include <linux/blkdev.h>
23 #include <linux/psi.h>
24 #include <linux/uio.h>
25 #include <linux/sched/task.h>
26 #include <linux/delayacct.h>
27 #include <linux/zswap.h>
28 #include "swap.h"
29 
30 static void __end_swap_bio_write(struct bio *bio)
31 {
32 	struct folio *folio = bio_first_folio_all(bio);
33 
34 	if (bio->bi_status) {
35 		/*
36 		 * We failed to write the page out to swap-space.
37 		 * Re-dirty the page in order to avoid it being reclaimed.
38 		 * Also print a dire warning that things will go BAD (tm)
39 		 * very quickly.
40 		 *
41 		 * Also clear PG_reclaim to avoid folio_rotate_reclaimable()
42 		 */
43 		folio_mark_dirty(folio);
44 		pr_alert_ratelimited("Write-error on swap-device (%u:%u:%llu)\n",
45 				     MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
46 				     (unsigned long long)bio->bi_iter.bi_sector);
47 		folio_clear_reclaim(folio);
48 	}
49 	folio_end_writeback(folio);
50 }
51 
52 static void end_swap_bio_write(struct bio *bio)
53 {
54 	__end_swap_bio_write(bio);
55 	bio_put(bio);
56 }
57 
58 static void __end_swap_bio_read(struct bio *bio)
59 {
60 	struct folio *folio = bio_first_folio_all(bio);
61 
62 	if (bio->bi_status) {
63 		pr_alert_ratelimited("Read-error on swap-device (%u:%u:%llu)\n",
64 				     MAJOR(bio_dev(bio)), MINOR(bio_dev(bio)),
65 				     (unsigned long long)bio->bi_iter.bi_sector);
66 	} else {
67 		folio_mark_uptodate(folio);
68 	}
69 	folio_unlock(folio);
70 }
71 
72 static void end_swap_bio_read(struct bio *bio)
73 {
74 	__end_swap_bio_read(bio);
75 	bio_put(bio);
76 }
77 
78 int generic_swapfile_activate(struct swap_info_struct *sis,
79 				struct file *swap_file,
80 				sector_t *span)
81 {
82 	struct address_space *mapping = swap_file->f_mapping;
83 	struct inode *inode = mapping->host;
84 	unsigned blocks_per_page;
85 	unsigned long page_no;
86 	unsigned blkbits;
87 	sector_t probe_block;
88 	sector_t last_block;
89 	sector_t lowest_block = -1;
90 	sector_t highest_block = 0;
91 	int nr_extents = 0;
92 	int ret;
93 
94 	blkbits = inode->i_blkbits;
95 	blocks_per_page = PAGE_SIZE >> blkbits;
96 
97 	/*
98 	 * Map all the blocks into the extent tree.  This code doesn't try
99 	 * to be very smart.
100 	 */
101 	probe_block = 0;
102 	page_no = 0;
103 	last_block = i_size_read(inode) >> blkbits;
104 	while ((probe_block + blocks_per_page) <= last_block &&
105 			page_no < sis->max) {
106 		unsigned block_in_page;
107 		sector_t first_block;
108 
109 		cond_resched();
110 
111 		first_block = probe_block;
112 		ret = bmap(inode, &first_block);
113 		if (ret || !first_block)
114 			goto bad_bmap;
115 
116 		/*
117 		 * It must be PAGE_SIZE aligned on-disk
118 		 */
119 		if (first_block & (blocks_per_page - 1)) {
120 			probe_block++;
121 			goto reprobe;
122 		}
123 
124 		for (block_in_page = 1; block_in_page < blocks_per_page;
125 					block_in_page++) {
126 			sector_t block;
127 
128 			block = probe_block + block_in_page;
129 			ret = bmap(inode, &block);
130 			if (ret || !block)
131 				goto bad_bmap;
132 
133 			if (block != first_block + block_in_page) {
134 				/* Discontiguity */
135 				probe_block++;
136 				goto reprobe;
137 			}
138 		}
139 
140 		first_block >>= (PAGE_SHIFT - blkbits);
141 		if (page_no) {	/* exclude the header page */
142 			if (first_block < lowest_block)
143 				lowest_block = first_block;
144 			if (first_block > highest_block)
145 				highest_block = first_block;
146 		}
147 
148 		/*
149 		 * We found a PAGE_SIZE-length, PAGE_SIZE-aligned run of blocks
150 		 */
151 		ret = add_swap_extent(sis, page_no, 1, first_block);
152 		if (ret < 0)
153 			goto out;
154 		nr_extents += ret;
155 		page_no++;
156 		probe_block += blocks_per_page;
157 reprobe:
158 		continue;
159 	}
160 	ret = nr_extents;
161 	*span = 1 + highest_block - lowest_block;
162 	if (page_no == 0)
163 		page_no = 1;	/* force Empty message */
164 	sis->max = page_no;
165 	sis->pages = page_no - 1;
166 	sis->highest_bit = page_no - 1;
167 out:
168 	return ret;
169 bad_bmap:
170 	pr_err("swapon: swapfile has holes\n");
171 	ret = -EINVAL;
172 	goto out;
173 }
174 
175 static bool is_folio_zero_filled(struct folio *folio)
176 {
177 	unsigned int pos, last_pos;
178 	unsigned long *data;
179 	unsigned int i;
180 
181 	last_pos = PAGE_SIZE / sizeof(*data) - 1;
182 	for (i = 0; i < folio_nr_pages(folio); i++) {
183 		data = kmap_local_folio(folio, i * PAGE_SIZE);
184 		/*
185 		 * Check last word first, incase the page is zero-filled at
186 		 * the start and has non-zero data at the end, which is common
187 		 * in real-world workloads.
188 		 */
189 		if (data[last_pos]) {
190 			kunmap_local(data);
191 			return false;
192 		}
193 		for (pos = 0; pos < last_pos; pos++) {
194 			if (data[pos]) {
195 				kunmap_local(data);
196 				return false;
197 			}
198 		}
199 		kunmap_local(data);
200 	}
201 
202 	return true;
203 }
204 
205 static void swap_zeromap_folio_set(struct folio *folio)
206 {
207 	struct swap_info_struct *sis = swp_swap_info(folio->swap);
208 	swp_entry_t entry;
209 	unsigned int i;
210 
211 	for (i = 0; i < folio_nr_pages(folio); i++) {
212 		entry = page_swap_entry(folio_page(folio, i));
213 		set_bit(swp_offset(entry), sis->zeromap);
214 	}
215 }
216 
217 static void swap_zeromap_folio_clear(struct folio *folio)
218 {
219 	struct swap_info_struct *sis = swp_swap_info(folio->swap);
220 	swp_entry_t entry;
221 	unsigned int i;
222 
223 	for (i = 0; i < folio_nr_pages(folio); i++) {
224 		entry = page_swap_entry(folio_page(folio, i));
225 		clear_bit(swp_offset(entry), sis->zeromap);
226 	}
227 }
228 
229 /*
230  * Return the index of the first subpage which is not zero-filled
231  * according to swap_info_struct->zeromap.
232  * If all pages are zero-filled according to zeromap, it will return
233  * folio_nr_pages(folio).
234  */
235 static unsigned int swap_zeromap_folio_test(struct folio *folio)
236 {
237 	struct swap_info_struct *sis = swp_swap_info(folio->swap);
238 	swp_entry_t entry;
239 	unsigned int i;
240 
241 	for (i = 0; i < folio_nr_pages(folio); i++) {
242 		entry = page_swap_entry(folio_page(folio, i));
243 		if (!test_bit(swp_offset(entry), sis->zeromap))
244 			return i;
245 	}
246 	return i;
247 }
248 
249 /*
250  * We may have stale swap cache pages in memory: notice
251  * them here and get rid of the unnecessary final write.
252  */
253 int swap_writepage(struct page *page, struct writeback_control *wbc)
254 {
255 	struct folio *folio = page_folio(page);
256 	int ret;
257 
258 	if (folio_free_swap(folio)) {
259 		folio_unlock(folio);
260 		return 0;
261 	}
262 	/*
263 	 * Arch code may have to preserve more data than just the page
264 	 * contents, e.g. memory tags.
265 	 */
266 	ret = arch_prepare_to_swap(folio);
267 	if (ret) {
268 		folio_mark_dirty(folio);
269 		folio_unlock(folio);
270 		return ret;
271 	}
272 
273 	/*
274 	 * Use a bitmap (zeromap) to avoid doing IO for zero-filled pages.
275 	 * The bits in zeromap are protected by the locked swapcache folio
276 	 * and atomic updates are used to protect against read-modify-write
277 	 * corruption due to other zero swap entries seeing concurrent updates.
278 	 */
279 	if (is_folio_zero_filled(folio)) {
280 		swap_zeromap_folio_set(folio);
281 		folio_unlock(folio);
282 		return 0;
283 	} else {
284 		/*
285 		 * Clear bits this folio occupies in the zeromap to prevent
286 		 * zero data being read in from any previous zero writes that
287 		 * occupied the same swap entries.
288 		 */
289 		swap_zeromap_folio_clear(folio);
290 	}
291 	if (zswap_store(folio)) {
292 		folio_unlock(folio);
293 		return 0;
294 	}
295 	if (!mem_cgroup_zswap_writeback_enabled(folio_memcg(folio))) {
296 		folio_mark_dirty(folio);
297 		return AOP_WRITEPAGE_ACTIVATE;
298 	}
299 
300 	__swap_writepage(folio, wbc);
301 	return 0;
302 }
303 
304 static inline void count_swpout_vm_event(struct folio *folio)
305 {
306 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
307 	if (unlikely(folio_test_pmd_mappable(folio))) {
308 		count_memcg_folio_events(folio, THP_SWPOUT, 1);
309 		count_vm_event(THP_SWPOUT);
310 	}
311 	count_mthp_stat(folio_order(folio), MTHP_STAT_SWPOUT);
312 #endif
313 	count_vm_events(PSWPOUT, folio_nr_pages(folio));
314 }
315 
316 #if defined(CONFIG_MEMCG) && defined(CONFIG_BLK_CGROUP)
317 static void bio_associate_blkg_from_page(struct bio *bio, struct folio *folio)
318 {
319 	struct cgroup_subsys_state *css;
320 	struct mem_cgroup *memcg;
321 
322 	memcg = folio_memcg(folio);
323 	if (!memcg)
324 		return;
325 
326 	rcu_read_lock();
327 	css = cgroup_e_css(memcg->css.cgroup, &io_cgrp_subsys);
328 	bio_associate_blkg_from_css(bio, css);
329 	rcu_read_unlock();
330 }
331 #else
332 #define bio_associate_blkg_from_page(bio, folio)		do { } while (0)
333 #endif /* CONFIG_MEMCG && CONFIG_BLK_CGROUP */
334 
335 struct swap_iocb {
336 	struct kiocb		iocb;
337 	struct bio_vec		bvec[SWAP_CLUSTER_MAX];
338 	int			pages;
339 	int			len;
340 };
341 static mempool_t *sio_pool;
342 
343 int sio_pool_init(void)
344 {
345 	if (!sio_pool) {
346 		mempool_t *pool = mempool_create_kmalloc_pool(
347 			SWAP_CLUSTER_MAX, sizeof(struct swap_iocb));
348 		if (cmpxchg(&sio_pool, NULL, pool))
349 			mempool_destroy(pool);
350 	}
351 	if (!sio_pool)
352 		return -ENOMEM;
353 	return 0;
354 }
355 
356 static void sio_write_complete(struct kiocb *iocb, long ret)
357 {
358 	struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
359 	struct page *page = sio->bvec[0].bv_page;
360 	int p;
361 
362 	if (ret != sio->len) {
363 		/*
364 		 * In the case of swap-over-nfs, this can be a
365 		 * temporary failure if the system has limited
366 		 * memory for allocating transmit buffers.
367 		 * Mark the page dirty and avoid
368 		 * folio_rotate_reclaimable but rate-limit the
369 		 * messages.
370 		 */
371 		pr_err_ratelimited("Write error %ld on dio swapfile (%llu)\n",
372 				   ret, swap_dev_pos(page_swap_entry(page)));
373 		for (p = 0; p < sio->pages; p++) {
374 			page = sio->bvec[p].bv_page;
375 			set_page_dirty(page);
376 			ClearPageReclaim(page);
377 		}
378 	}
379 
380 	for (p = 0; p < sio->pages; p++)
381 		end_page_writeback(sio->bvec[p].bv_page);
382 
383 	mempool_free(sio, sio_pool);
384 }
385 
386 static void swap_writepage_fs(struct folio *folio, struct writeback_control *wbc)
387 {
388 	struct swap_iocb *sio = NULL;
389 	struct swap_info_struct *sis = swp_swap_info(folio->swap);
390 	struct file *swap_file = sis->swap_file;
391 	loff_t pos = swap_dev_pos(folio->swap);
392 
393 	count_swpout_vm_event(folio);
394 	folio_start_writeback(folio);
395 	folio_unlock(folio);
396 	if (wbc->swap_plug)
397 		sio = *wbc->swap_plug;
398 	if (sio) {
399 		if (sio->iocb.ki_filp != swap_file ||
400 		    sio->iocb.ki_pos + sio->len != pos) {
401 			swap_write_unplug(sio);
402 			sio = NULL;
403 		}
404 	}
405 	if (!sio) {
406 		sio = mempool_alloc(sio_pool, GFP_NOIO);
407 		init_sync_kiocb(&sio->iocb, swap_file);
408 		sio->iocb.ki_complete = sio_write_complete;
409 		sio->iocb.ki_pos = pos;
410 		sio->pages = 0;
411 		sio->len = 0;
412 	}
413 	bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0);
414 	sio->len += folio_size(folio);
415 	sio->pages += 1;
416 	if (sio->pages == ARRAY_SIZE(sio->bvec) || !wbc->swap_plug) {
417 		swap_write_unplug(sio);
418 		sio = NULL;
419 	}
420 	if (wbc->swap_plug)
421 		*wbc->swap_plug = sio;
422 }
423 
424 static void swap_writepage_bdev_sync(struct folio *folio,
425 		struct writeback_control *wbc, struct swap_info_struct *sis)
426 {
427 	struct bio_vec bv;
428 	struct bio bio;
429 
430 	bio_init(&bio, sis->bdev, &bv, 1,
431 		 REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc));
432 	bio.bi_iter.bi_sector = swap_folio_sector(folio);
433 	bio_add_folio_nofail(&bio, folio, folio_size(folio), 0);
434 
435 	bio_associate_blkg_from_page(&bio, folio);
436 	count_swpout_vm_event(folio);
437 
438 	folio_start_writeback(folio);
439 	folio_unlock(folio);
440 
441 	submit_bio_wait(&bio);
442 	__end_swap_bio_write(&bio);
443 }
444 
445 static void swap_writepage_bdev_async(struct folio *folio,
446 		struct writeback_control *wbc, struct swap_info_struct *sis)
447 {
448 	struct bio *bio;
449 
450 	bio = bio_alloc(sis->bdev, 1,
451 			REQ_OP_WRITE | REQ_SWAP | wbc_to_write_flags(wbc),
452 			GFP_NOIO);
453 	bio->bi_iter.bi_sector = swap_folio_sector(folio);
454 	bio->bi_end_io = end_swap_bio_write;
455 	bio_add_folio_nofail(bio, folio, folio_size(folio), 0);
456 
457 	bio_associate_blkg_from_page(bio, folio);
458 	count_swpout_vm_event(folio);
459 	folio_start_writeback(folio);
460 	folio_unlock(folio);
461 	submit_bio(bio);
462 }
463 
464 void __swap_writepage(struct folio *folio, struct writeback_control *wbc)
465 {
466 	struct swap_info_struct *sis = swp_swap_info(folio->swap);
467 
468 	VM_BUG_ON_FOLIO(!folio_test_swapcache(folio), folio);
469 	/*
470 	 * ->flags can be updated non-atomicially (scan_swap_map_slots),
471 	 * but that will never affect SWP_FS_OPS, so the data_race
472 	 * is safe.
473 	 */
474 	if (data_race(sis->flags & SWP_FS_OPS))
475 		swap_writepage_fs(folio, wbc);
476 	/*
477 	 * ->flags can be updated non-atomicially (scan_swap_map_slots),
478 	 * but that will never affect SWP_SYNCHRONOUS_IO, so the data_race
479 	 * is safe.
480 	 */
481 	else if (data_race(sis->flags & SWP_SYNCHRONOUS_IO))
482 		swap_writepage_bdev_sync(folio, wbc, sis);
483 	else
484 		swap_writepage_bdev_async(folio, wbc, sis);
485 }
486 
487 void swap_write_unplug(struct swap_iocb *sio)
488 {
489 	struct iov_iter from;
490 	struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
491 	int ret;
492 
493 	iov_iter_bvec(&from, ITER_SOURCE, sio->bvec, sio->pages, sio->len);
494 	ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
495 	if (ret != -EIOCBQUEUED)
496 		sio_write_complete(&sio->iocb, ret);
497 }
498 
499 static void sio_read_complete(struct kiocb *iocb, long ret)
500 {
501 	struct swap_iocb *sio = container_of(iocb, struct swap_iocb, iocb);
502 	int p;
503 
504 	if (ret == sio->len) {
505 		for (p = 0; p < sio->pages; p++) {
506 			struct folio *folio = page_folio(sio->bvec[p].bv_page);
507 
508 			folio_mark_uptodate(folio);
509 			folio_unlock(folio);
510 		}
511 		count_vm_events(PSWPIN, sio->pages);
512 	} else {
513 		for (p = 0; p < sio->pages; p++) {
514 			struct folio *folio = page_folio(sio->bvec[p].bv_page);
515 
516 			folio_unlock(folio);
517 		}
518 		pr_alert_ratelimited("Read-error on swap-device\n");
519 	}
520 	mempool_free(sio, sio_pool);
521 }
522 
523 static bool swap_read_folio_zeromap(struct folio *folio)
524 {
525 	unsigned int idx = swap_zeromap_folio_test(folio);
526 
527 	if (idx == 0)
528 		return false;
529 
530 	/*
531 	 * Swapping in a large folio that is partially in the zeromap is not
532 	 * currently handled. Return true without marking the folio uptodate so
533 	 * that an IO error is emitted (e.g. do_swap_page() will sigbus).
534 	 */
535 	if (WARN_ON_ONCE(idx < folio_nr_pages(folio)))
536 		return true;
537 
538 	folio_zero_range(folio, 0, folio_size(folio));
539 	folio_mark_uptodate(folio);
540 	return true;
541 }
542 
543 static void swap_read_folio_fs(struct folio *folio, struct swap_iocb **plug)
544 {
545 	struct swap_info_struct *sis = swp_swap_info(folio->swap);
546 	struct swap_iocb *sio = NULL;
547 	loff_t pos = swap_dev_pos(folio->swap);
548 
549 	if (plug)
550 		sio = *plug;
551 	if (sio) {
552 		if (sio->iocb.ki_filp != sis->swap_file ||
553 		    sio->iocb.ki_pos + sio->len != pos) {
554 			swap_read_unplug(sio);
555 			sio = NULL;
556 		}
557 	}
558 	if (!sio) {
559 		sio = mempool_alloc(sio_pool, GFP_KERNEL);
560 		init_sync_kiocb(&sio->iocb, sis->swap_file);
561 		sio->iocb.ki_pos = pos;
562 		sio->iocb.ki_complete = sio_read_complete;
563 		sio->pages = 0;
564 		sio->len = 0;
565 	}
566 	bvec_set_folio(&sio->bvec[sio->pages], folio, folio_size(folio), 0);
567 	sio->len += folio_size(folio);
568 	sio->pages += 1;
569 	if (sio->pages == ARRAY_SIZE(sio->bvec) || !plug) {
570 		swap_read_unplug(sio);
571 		sio = NULL;
572 	}
573 	if (plug)
574 		*plug = sio;
575 }
576 
577 static void swap_read_folio_bdev_sync(struct folio *folio,
578 		struct swap_info_struct *sis)
579 {
580 	struct bio_vec bv;
581 	struct bio bio;
582 
583 	bio_init(&bio, sis->bdev, &bv, 1, REQ_OP_READ);
584 	bio.bi_iter.bi_sector = swap_folio_sector(folio);
585 	bio_add_folio_nofail(&bio, folio, folio_size(folio), 0);
586 	/*
587 	 * Keep this task valid during swap readpage because the oom killer may
588 	 * attempt to access it in the page fault retry time check.
589 	 */
590 	get_task_struct(current);
591 	count_vm_event(PSWPIN);
592 	submit_bio_wait(&bio);
593 	__end_swap_bio_read(&bio);
594 	put_task_struct(current);
595 }
596 
597 static void swap_read_folio_bdev_async(struct folio *folio,
598 		struct swap_info_struct *sis)
599 {
600 	struct bio *bio;
601 
602 	bio = bio_alloc(sis->bdev, 1, REQ_OP_READ, GFP_KERNEL);
603 	bio->bi_iter.bi_sector = swap_folio_sector(folio);
604 	bio->bi_end_io = end_swap_bio_read;
605 	bio_add_folio_nofail(bio, folio, folio_size(folio), 0);
606 	count_vm_event(PSWPIN);
607 	submit_bio(bio);
608 }
609 
610 void swap_read_folio(struct folio *folio, struct swap_iocb **plug)
611 {
612 	struct swap_info_struct *sis = swp_swap_info(folio->swap);
613 	bool synchronous = sis->flags & SWP_SYNCHRONOUS_IO;
614 	bool workingset = folio_test_workingset(folio);
615 	unsigned long pflags;
616 	bool in_thrashing;
617 
618 	VM_BUG_ON_FOLIO(!folio_test_swapcache(folio) && !synchronous, folio);
619 	VM_BUG_ON_FOLIO(!folio_test_locked(folio), folio);
620 	VM_BUG_ON_FOLIO(folio_test_uptodate(folio), folio);
621 
622 	/*
623 	 * Count submission time as memory stall and delay. When the device
624 	 * is congested, or the submitting cgroup IO-throttled, submission
625 	 * can be a significant part of overall IO time.
626 	 */
627 	if (workingset) {
628 		delayacct_thrashing_start(&in_thrashing);
629 		psi_memstall_enter(&pflags);
630 	}
631 	delayacct_swapin_start();
632 
633 	if (swap_read_folio_zeromap(folio)) {
634 		folio_unlock(folio);
635 		goto finish;
636 	} else if (zswap_load(folio)) {
637 		folio_unlock(folio);
638 		goto finish;
639 	}
640 
641 	/* We have to read from slower devices. Increase zswap protection. */
642 	zswap_folio_swapin(folio);
643 
644 	if (data_race(sis->flags & SWP_FS_OPS)) {
645 		swap_read_folio_fs(folio, plug);
646 	} else if (synchronous) {
647 		swap_read_folio_bdev_sync(folio, sis);
648 	} else {
649 		swap_read_folio_bdev_async(folio, sis);
650 	}
651 
652 finish:
653 	if (workingset) {
654 		delayacct_thrashing_end(&in_thrashing);
655 		psi_memstall_leave(&pflags);
656 	}
657 	delayacct_swapin_end();
658 }
659 
660 void __swap_read_unplug(struct swap_iocb *sio)
661 {
662 	struct iov_iter from;
663 	struct address_space *mapping = sio->iocb.ki_filp->f_mapping;
664 	int ret;
665 
666 	iov_iter_bvec(&from, ITER_DEST, sio->bvec, sio->pages, sio->len);
667 	ret = mapping->a_ops->swap_rw(&sio->iocb, &from);
668 	if (ret != -EIOCBQUEUED)
669 		sio_read_complete(&sio->iocb, ret);
670 }
671