xref: /linux/fs/erofs/fileio.c (revision afcefc58fdfd687e3a9a9bef0be5846b96f710b7)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2024, Alibaba Cloud
4  */
5 #include "internal.h"
6 #include <trace/events/erofs.h>
7 
8 struct erofs_fileio_rq {
9 	struct bio_vec bvecs[16];
10 	struct bio bio;
11 	struct kiocb iocb;
12 	struct super_block *sb;
13 };
14 
15 struct erofs_fileio {
16 	struct erofs_map_blocks map;
17 	struct erofs_map_dev dev;
18 	struct erofs_fileio_rq *rq;
19 };
20 
21 static void erofs_fileio_ki_complete(struct kiocb *iocb, long ret)
22 {
23 	struct erofs_fileio_rq *rq =
24 			container_of(iocb, struct erofs_fileio_rq, iocb);
25 	struct folio_iter fi;
26 
27 	if (ret > 0) {
28 		if (ret != rq->bio.bi_iter.bi_size) {
29 			bio_advance(&rq->bio, ret);
30 			zero_fill_bio(&rq->bio);
31 		}
32 		ret = 0;
33 	}
34 	if (rq->bio.bi_end_io) {
35 		if (ret < 0 && !rq->bio.bi_status)
36 			rq->bio.bi_status = errno_to_blk_status(ret);
37 		rq->bio.bi_end_io(&rq->bio);
38 	} else {
39 		bio_for_each_folio_all(fi, &rq->bio) {
40 			DBG_BUGON(folio_test_uptodate(fi.folio));
41 			erofs_onlinefolio_end(fi.folio, ret);
42 		}
43 	}
44 	bio_uninit(&rq->bio);
45 	kfree(rq);
46 }
47 
48 static void erofs_fileio_rq_submit(struct erofs_fileio_rq *rq)
49 {
50 	const struct cred *old_cred;
51 	struct iov_iter iter;
52 	int ret;
53 
54 	if (!rq)
55 		return;
56 	rq->iocb.ki_pos = rq->bio.bi_iter.bi_sector << SECTOR_SHIFT;
57 	rq->iocb.ki_ioprio = get_current_ioprio();
58 	rq->iocb.ki_complete = erofs_fileio_ki_complete;
59 	if (test_opt(&EROFS_SB(rq->sb)->opt, DIRECT_IO) &&
60 	    rq->iocb.ki_filp->f_mode & FMODE_CAN_ODIRECT)
61 		rq->iocb.ki_flags = IOCB_DIRECT;
62 	iov_iter_bvec(&iter, ITER_DEST, rq->bvecs, rq->bio.bi_vcnt,
63 		      rq->bio.bi_iter.bi_size);
64 	old_cred = override_creds(rq->iocb.ki_filp->f_cred);
65 	ret = vfs_iocb_iter_read(rq->iocb.ki_filp, &rq->iocb, &iter);
66 	revert_creds(old_cred);
67 	if (ret != -EIOCBQUEUED)
68 		erofs_fileio_ki_complete(&rq->iocb, ret);
69 }
70 
71 static struct erofs_fileio_rq *erofs_fileio_rq_alloc(struct erofs_map_dev *mdev)
72 {
73 	struct erofs_fileio_rq *rq = kzalloc(sizeof(*rq),
74 					     GFP_KERNEL | __GFP_NOFAIL);
75 
76 	bio_init(&rq->bio, NULL, rq->bvecs, ARRAY_SIZE(rq->bvecs), REQ_OP_READ);
77 	rq->iocb.ki_filp = mdev->m_dif->file;
78 	rq->sb = mdev->m_sb;
79 	return rq;
80 }
81 
82 struct bio *erofs_fileio_bio_alloc(struct erofs_map_dev *mdev)
83 {
84 	return &erofs_fileio_rq_alloc(mdev)->bio;
85 }
86 
87 void erofs_fileio_submit_bio(struct bio *bio)
88 {
89 	return erofs_fileio_rq_submit(container_of(bio, struct erofs_fileio_rq,
90 						   bio));
91 }
92 
93 static int erofs_fileio_scan_folio(struct erofs_fileio *io, struct folio *folio)
94 {
95 	struct inode *inode = folio_inode(folio);
96 	struct erofs_map_blocks *map = &io->map;
97 	unsigned int cur = 0, end = folio_size(folio), len, attached = 0;
98 	loff_t pos = folio_pos(folio), ofs;
99 	struct iov_iter iter;
100 	struct bio_vec bv;
101 	int err = 0;
102 
103 	erofs_onlinefolio_init(folio);
104 	while (cur < end) {
105 		if (!in_range(pos + cur, map->m_la, map->m_llen)) {
106 			map->m_la = pos + cur;
107 			map->m_llen = end - cur;
108 			err = erofs_map_blocks(inode, map);
109 			if (err)
110 				break;
111 		}
112 
113 		ofs = folio_pos(folio) + cur - map->m_la;
114 		len = min_t(loff_t, map->m_llen - ofs, end - cur);
115 		if (map->m_flags & EROFS_MAP_META) {
116 			struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
117 			void *src;
118 
119 			src = erofs_read_metabuf(&buf, inode->i_sb,
120 						 map->m_pa + ofs, true);
121 			if (IS_ERR(src)) {
122 				err = PTR_ERR(src);
123 				break;
124 			}
125 			bvec_set_folio(&bv, folio, len, cur);
126 			iov_iter_bvec(&iter, ITER_DEST, &bv, 1, len);
127 			if (copy_to_iter(src, len, &iter) != len) {
128 				erofs_put_metabuf(&buf);
129 				err = -EIO;
130 				break;
131 			}
132 			erofs_put_metabuf(&buf);
133 		} else if (!(map->m_flags & EROFS_MAP_MAPPED)) {
134 			folio_zero_segment(folio, cur, cur + len);
135 			attached = 0;
136 		} else {
137 			if (io->rq && (map->m_pa + ofs != io->dev.m_pa ||
138 				       map->m_deviceid != io->dev.m_deviceid)) {
139 io_retry:
140 				erofs_fileio_rq_submit(io->rq);
141 				io->rq = NULL;
142 			}
143 
144 			if (!io->rq) {
145 				io->dev = (struct erofs_map_dev) {
146 					.m_pa = io->map.m_pa + ofs,
147 					.m_deviceid = io->map.m_deviceid,
148 				};
149 				err = erofs_map_dev(inode->i_sb, &io->dev);
150 				if (err)
151 					break;
152 				io->rq = erofs_fileio_rq_alloc(&io->dev);
153 				io->rq->bio.bi_iter.bi_sector =
154 					(io->dev.m_dif->fsoff + io->dev.m_pa) >> 9;
155 				attached = 0;
156 			}
157 			if (!bio_add_folio(&io->rq->bio, folio, len, cur))
158 				goto io_retry;
159 			if (!attached++)
160 				erofs_onlinefolio_split(folio);
161 			io->dev.m_pa += len;
162 		}
163 		cur += len;
164 	}
165 	erofs_onlinefolio_end(folio, err);
166 	return err;
167 }
168 
169 static int erofs_fileio_read_folio(struct file *file, struct folio *folio)
170 {
171 	struct erofs_fileio io = {};
172 	int err;
173 
174 	trace_erofs_read_folio(folio, true);
175 	err = erofs_fileio_scan_folio(&io, folio);
176 	erofs_fileio_rq_submit(io.rq);
177 	return err;
178 }
179 
180 static void erofs_fileio_readahead(struct readahead_control *rac)
181 {
182 	struct inode *inode = rac->mapping->host;
183 	struct erofs_fileio io = {};
184 	struct folio *folio;
185 	int err;
186 
187 	trace_erofs_readahead(inode, readahead_index(rac),
188 			      readahead_count(rac), true);
189 	while ((folio = readahead_folio(rac))) {
190 		err = erofs_fileio_scan_folio(&io, folio);
191 		if (err && err != -EINTR)
192 			erofs_err(inode->i_sb, "readahead error at folio %lu @ nid %llu",
193 				  folio->index, EROFS_I(inode)->nid);
194 	}
195 	erofs_fileio_rq_submit(io.rq);
196 }
197 
198 const struct address_space_operations erofs_fileio_aops = {
199 	.read_folio = erofs_fileio_read_folio,
200 	.readahead = erofs_fileio_readahead,
201 };
202