xref: /linux/fs/iomap/bio.c (revision 1885cdbfbb51ede3637166c895d0b8040c9899cc)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2010 Red Hat, Inc.
4  * Copyright (C) 2016-2023 Christoph Hellwig.
5  */
6 #include <linux/iomap.h>
7 #include <linux/pagemap.h>
8 #include "internal.h"
9 #include "trace.h"
10 
iomap_read_end_io(struct bio * bio)11 static void iomap_read_end_io(struct bio *bio)
12 {
13 	int error = blk_status_to_errno(bio->bi_status);
14 	struct folio_iter fi;
15 
16 	bio_for_each_folio_all(fi, bio)
17 		iomap_finish_folio_read(fi.folio, fi.offset, fi.length, error);
18 	bio_put(bio);
19 }
20 
iomap_bio_submit_read(struct iomap_read_folio_ctx * ctx)21 static void iomap_bio_submit_read(struct iomap_read_folio_ctx *ctx)
22 {
23 	struct bio *bio = ctx->read_ctx;
24 
25 	if (bio)
26 		submit_bio(bio);
27 }
28 
iomap_bio_read_folio_range(const struct iomap_iter * iter,struct iomap_read_folio_ctx * ctx,size_t plen)29 static int iomap_bio_read_folio_range(const struct iomap_iter *iter,
30 		struct iomap_read_folio_ctx *ctx, size_t plen)
31 {
32 	struct folio *folio = ctx->cur_folio;
33 	const struct iomap *iomap = &iter->iomap;
34 	loff_t pos = iter->pos;
35 	size_t poff = offset_in_folio(folio, pos);
36 	loff_t length = iomap_length(iter);
37 	sector_t sector;
38 	struct bio *bio = ctx->read_ctx;
39 
40 	sector = iomap_sector(iomap, pos);
41 	if (!bio || bio_end_sector(bio) != sector ||
42 	    !bio_add_folio(bio, folio, plen, poff)) {
43 		gfp_t gfp = mapping_gfp_constraint(folio->mapping, GFP_KERNEL);
44 		gfp_t orig_gfp = gfp;
45 		unsigned int nr_vecs = DIV_ROUND_UP(length, PAGE_SIZE);
46 
47 		if (bio)
48 			submit_bio(bio);
49 
50 		if (ctx->rac) /* same as readahead_gfp_mask */
51 			gfp |= __GFP_NORETRY | __GFP_NOWARN;
52 		bio = bio_alloc(iomap->bdev, bio_max_segs(nr_vecs), REQ_OP_READ,
53 				     gfp);
54 		/*
55 		 * If the bio_alloc fails, try it again for a single page to
56 		 * avoid having to deal with partial page reads.  This emulates
57 		 * what do_mpage_read_folio does.
58 		 */
59 		if (!bio)
60 			bio = bio_alloc(iomap->bdev, 1, REQ_OP_READ, orig_gfp);
61 		if (ctx->rac)
62 			bio->bi_opf |= REQ_RAHEAD;
63 		bio->bi_iter.bi_sector = sector;
64 		bio->bi_end_io = iomap_read_end_io;
65 		bio_add_folio_nofail(bio, folio, plen, poff);
66 		ctx->read_ctx = bio;
67 	}
68 	return 0;
69 }
70 
71 const struct iomap_read_ops iomap_bio_read_ops = {
72 	.read_folio_range = iomap_bio_read_folio_range,
73 	.submit_read = iomap_bio_submit_read,
74 };
75 EXPORT_SYMBOL_GPL(iomap_bio_read_ops);
76 
iomap_bio_read_folio_range_sync(const struct iomap_iter * iter,struct folio * folio,loff_t pos,size_t len)77 int iomap_bio_read_folio_range_sync(const struct iomap_iter *iter,
78 		struct folio *folio, loff_t pos, size_t len)
79 {
80 	const struct iomap *srcmap = iomap_iter_srcmap(iter);
81 	struct bio_vec bvec;
82 	struct bio bio;
83 
84 	bio_init(&bio, srcmap->bdev, &bvec, 1, REQ_OP_READ);
85 	bio.bi_iter.bi_sector = iomap_sector(srcmap, pos);
86 	bio_add_folio_nofail(&bio, folio, len, offset_in_folio(folio, pos));
87 	return submit_bio_wait(&bio);
88 }
89