xref: /linux/fs/erofs/fileio.c (revision c532de5a67a70f8533d495f8f2aaa9a0491c3ad0)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2024, Alibaba Cloud
4  */
5 #include "internal.h"
6 #include <trace/events/erofs.h>
7 
8 struct erofs_fileio_rq {
9 	struct bio_vec bvecs[BIO_MAX_VECS];
10 	struct bio bio;
11 	struct kiocb iocb;
12 };
13 
14 struct erofs_fileio {
15 	struct erofs_map_blocks map;
16 	struct erofs_map_dev dev;
17 	struct erofs_fileio_rq *rq;
18 };
19 
20 static void erofs_fileio_ki_complete(struct kiocb *iocb, long ret)
21 {
22 	struct erofs_fileio_rq *rq =
23 			container_of(iocb, struct erofs_fileio_rq, iocb);
24 	struct folio_iter fi;
25 
26 	if (ret > 0) {
27 		if (ret != rq->bio.bi_iter.bi_size) {
28 			bio_advance(&rq->bio, ret);
29 			zero_fill_bio(&rq->bio);
30 		}
31 		ret = 0;
32 	}
33 	if (rq->bio.bi_end_io) {
34 		rq->bio.bi_end_io(&rq->bio);
35 	} else {
36 		bio_for_each_folio_all(fi, &rq->bio) {
37 			DBG_BUGON(folio_test_uptodate(fi.folio));
38 			erofs_onlinefolio_end(fi.folio, ret);
39 		}
40 	}
41 	bio_uninit(&rq->bio);
42 	kfree(rq);
43 }
44 
45 static void erofs_fileio_rq_submit(struct erofs_fileio_rq *rq)
46 {
47 	struct iov_iter iter;
48 	int ret;
49 
50 	if (!rq)
51 		return;
52 	rq->iocb.ki_pos = rq->bio.bi_iter.bi_sector << SECTOR_SHIFT;
53 	rq->iocb.ki_ioprio = get_current_ioprio();
54 	rq->iocb.ki_complete = erofs_fileio_ki_complete;
55 	rq->iocb.ki_flags = (rq->iocb.ki_filp->f_mode & FMODE_CAN_ODIRECT) ?
56 				IOCB_DIRECT : 0;
57 	iov_iter_bvec(&iter, ITER_DEST, rq->bvecs, rq->bio.bi_vcnt,
58 		      rq->bio.bi_iter.bi_size);
59 	ret = vfs_iocb_iter_read(rq->iocb.ki_filp, &rq->iocb, &iter);
60 	if (ret != -EIOCBQUEUED)
61 		erofs_fileio_ki_complete(&rq->iocb, ret);
62 }
63 
64 static struct erofs_fileio_rq *erofs_fileio_rq_alloc(struct erofs_map_dev *mdev)
65 {
66 	struct erofs_fileio_rq *rq = kzalloc(sizeof(*rq),
67 					     GFP_KERNEL | __GFP_NOFAIL);
68 
69 	bio_init(&rq->bio, NULL, rq->bvecs, BIO_MAX_VECS, REQ_OP_READ);
70 	rq->iocb.ki_filp = mdev->m_fp;
71 	return rq;
72 }
73 
74 struct bio *erofs_fileio_bio_alloc(struct erofs_map_dev *mdev)
75 {
76 	return &erofs_fileio_rq_alloc(mdev)->bio;
77 }
78 
79 void erofs_fileio_submit_bio(struct bio *bio)
80 {
81 	return erofs_fileio_rq_submit(container_of(bio, struct erofs_fileio_rq,
82 						   bio));
83 }
84 
85 static int erofs_fileio_scan_folio(struct erofs_fileio *io, struct folio *folio)
86 {
87 	struct inode *inode = folio_inode(folio);
88 	struct erofs_map_blocks *map = &io->map;
89 	unsigned int cur = 0, end = folio_size(folio), len, attached = 0;
90 	loff_t pos = folio_pos(folio), ofs;
91 	struct iov_iter iter;
92 	struct bio_vec bv;
93 	int err = 0;
94 
95 	erofs_onlinefolio_init(folio);
96 	while (cur < end) {
97 		if (!in_range(pos + cur, map->m_la, map->m_llen)) {
98 			map->m_la = pos + cur;
99 			map->m_llen = end - cur;
100 			err = erofs_map_blocks(inode, map);
101 			if (err)
102 				break;
103 		}
104 
105 		ofs = folio_pos(folio) + cur - map->m_la;
106 		len = min_t(loff_t, map->m_llen - ofs, end - cur);
107 		if (map->m_flags & EROFS_MAP_META) {
108 			struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
109 			void *src;
110 
111 			src = erofs_read_metabuf(&buf, inode->i_sb,
112 						 map->m_pa + ofs, EROFS_KMAP);
113 			if (IS_ERR(src)) {
114 				err = PTR_ERR(src);
115 				break;
116 			}
117 			bvec_set_folio(&bv, folio, len, cur);
118 			iov_iter_bvec(&iter, ITER_DEST, &bv, 1, len);
119 			if (copy_to_iter(src, len, &iter) != len) {
120 				erofs_put_metabuf(&buf);
121 				err = -EIO;
122 				break;
123 			}
124 			erofs_put_metabuf(&buf);
125 		} else if (!(map->m_flags & EROFS_MAP_MAPPED)) {
126 			folio_zero_segment(folio, cur, cur + len);
127 			attached = 0;
128 		} else {
129 			if (io->rq && (map->m_pa + ofs != io->dev.m_pa ||
130 				       map->m_deviceid != io->dev.m_deviceid)) {
131 io_retry:
132 				erofs_fileio_rq_submit(io->rq);
133 				io->rq = NULL;
134 			}
135 
136 			if (!io->rq) {
137 				io->dev = (struct erofs_map_dev) {
138 					.m_pa = io->map.m_pa + ofs,
139 					.m_deviceid = io->map.m_deviceid,
140 				};
141 				err = erofs_map_dev(inode->i_sb, &io->dev);
142 				if (err)
143 					break;
144 				io->rq = erofs_fileio_rq_alloc(&io->dev);
145 				io->rq->bio.bi_iter.bi_sector = io->dev.m_pa >> 9;
146 				attached = 0;
147 			}
148 			if (!attached++)
149 				erofs_onlinefolio_split(folio);
150 			if (!bio_add_folio(&io->rq->bio, folio, len, cur))
151 				goto io_retry;
152 			io->dev.m_pa += len;
153 		}
154 		cur += len;
155 	}
156 	erofs_onlinefolio_end(folio, err);
157 	return err;
158 }
159 
160 static int erofs_fileio_read_folio(struct file *file, struct folio *folio)
161 {
162 	struct erofs_fileio io = {};
163 	int err;
164 
165 	trace_erofs_read_folio(folio, true);
166 	err = erofs_fileio_scan_folio(&io, folio);
167 	erofs_fileio_rq_submit(io.rq);
168 	return err;
169 }
170 
171 static void erofs_fileio_readahead(struct readahead_control *rac)
172 {
173 	struct inode *inode = rac->mapping->host;
174 	struct erofs_fileio io = {};
175 	struct folio *folio;
176 	int err;
177 
178 	trace_erofs_readpages(inode, readahead_index(rac),
179 			      readahead_count(rac), true);
180 	while ((folio = readahead_folio(rac))) {
181 		err = erofs_fileio_scan_folio(&io, folio);
182 		if (err && err != -EINTR)
183 			erofs_err(inode->i_sb, "readahead error at folio %lu @ nid %llu",
184 				  folio->index, EROFS_I(inode)->nid);
185 	}
186 	erofs_fileio_rq_submit(io.rq);
187 }
188 
189 const struct address_space_operations erofs_fileio_aops = {
190 	.read_folio = erofs_fileio_read_folio,
191 	.readahead = erofs_fileio_readahead,
192 };
193