xref: /linux/fs/erofs/fileio.c (revision 80bb50e2d459213cccff3111d5ef98ed4238c0d5)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * Copyright (C) 2024, Alibaba Cloud
4  */
5 #include "internal.h"
6 #include <trace/events/erofs.h>
7 
8 struct erofs_fileio_rq {
9 	struct bio_vec bvecs[16];
10 	struct bio bio;
11 	struct kiocb iocb;
12 	struct super_block *sb;
13 	refcount_t ref;
14 };
15 
16 struct erofs_fileio {
17 	struct erofs_map_blocks map;
18 	struct erofs_map_dev dev;
19 	struct erofs_fileio_rq *rq;
20 };
21 
22 static void erofs_fileio_ki_complete(struct kiocb *iocb, long ret)
23 {
24 	struct erofs_fileio_rq *rq =
25 			container_of(iocb, struct erofs_fileio_rq, iocb);
26 	struct folio_iter fi;
27 
28 	if (ret >= 0 && ret != rq->bio.bi_iter.bi_size)
29 		ret = -EIO;
30 	if (!rq->bio.bi_end_io) {
31 		bio_for_each_folio_all(fi, &rq->bio) {
32 			DBG_BUGON(folio_test_uptodate(fi.folio));
33 			erofs_onlinefolio_end(fi.folio, ret < 0, false);
34 		}
35 	} else if (ret < 0 && !rq->bio.bi_status) {
36 		rq->bio.bi_status = errno_to_blk_status(ret);
37 	}
38 	bio_endio(&rq->bio);
39 	bio_uninit(&rq->bio);
40 	if (refcount_dec_and_test(&rq->ref))
41 		kfree(rq);
42 }
43 
44 static void erofs_fileio_rq_submit(struct erofs_fileio_rq *rq)
45 {
46 	struct iov_iter iter;
47 	ssize_t ret;
48 
49 	if (!rq)
50 		return;
51 	rq->iocb.ki_pos = rq->bio.bi_iter.bi_sector << SECTOR_SHIFT;
52 	rq->iocb.ki_ioprio = get_current_ioprio();
53 	rq->iocb.ki_complete = erofs_fileio_ki_complete;
54 	if (test_opt(&EROFS_SB(rq->sb)->opt, DIRECT_IO) &&
55 	    rq->iocb.ki_filp->f_mode & FMODE_CAN_ODIRECT)
56 		rq->iocb.ki_flags = IOCB_DIRECT;
57 	iov_iter_bvec(&iter, ITER_DEST, rq->bvecs, rq->bio.bi_vcnt,
58 		      rq->bio.bi_iter.bi_size);
59 	scoped_with_creds(rq->iocb.ki_filp->f_cred)
60 		ret = vfs_iocb_iter_read(rq->iocb.ki_filp, &rq->iocb, &iter);
61 	if (ret != -EIOCBQUEUED)
62 		erofs_fileio_ki_complete(&rq->iocb, ret);
63 	if (refcount_dec_and_test(&rq->ref))
64 		kfree(rq);
65 }
66 
67 static struct erofs_fileio_rq *erofs_fileio_rq_alloc(struct erofs_map_dev *mdev)
68 {
69 	struct erofs_fileio_rq *rq = kzalloc_obj(*rq, GFP_KERNEL | __GFP_NOFAIL);
70 
71 	bio_init(&rq->bio, NULL, rq->bvecs, ARRAY_SIZE(rq->bvecs), REQ_OP_READ);
72 	rq->iocb.ki_filp = mdev->m_dif->file;
73 	rq->sb = mdev->m_sb;
74 	refcount_set(&rq->ref, 2);
75 	return rq;
76 }
77 
78 struct bio *erofs_fileio_bio_alloc(struct erofs_map_dev *mdev)
79 {
80 	return &erofs_fileio_rq_alloc(mdev)->bio;
81 }
82 
83 void erofs_fileio_submit_bio(struct bio *bio)
84 {
85 	return erofs_fileio_rq_submit(container_of(bio, struct erofs_fileio_rq,
86 						   bio));
87 }
88 
89 static int erofs_fileio_scan_folio(struct erofs_fileio *io,
90 				   struct inode *inode, struct folio *folio)
91 {
92 	struct erofs_map_blocks *map = &io->map;
93 	unsigned int cur = 0, end = folio_size(folio), len, attached = 0;
94 	loff_t pos = folio_pos(folio), ofs;
95 	int err = 0;
96 
97 	erofs_onlinefolio_init(folio);
98 	while (cur < end) {
99 		if (!in_range(pos + cur, map->m_la, map->m_llen)) {
100 			map->m_la = pos + cur;
101 			map->m_llen = end - cur;
102 			err = erofs_map_blocks(inode, map);
103 			if (err)
104 				break;
105 		}
106 
107 		ofs = folio_pos(folio) + cur - map->m_la;
108 		len = min_t(loff_t, map->m_llen - ofs, end - cur);
109 		if (map->m_flags & EROFS_MAP_META) {
110 			struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
111 			void *src;
112 
113 			src = erofs_read_metabuf(&buf, inode->i_sb,
114 				map->m_pa + ofs, erofs_inode_in_metabox(inode));
115 			if (IS_ERR(src)) {
116 				err = PTR_ERR(src);
117 				break;
118 			}
119 			memcpy_to_folio(folio, cur, src, len);
120 			erofs_put_metabuf(&buf);
121 		} else if (!(map->m_flags & EROFS_MAP_MAPPED)) {
122 			folio_zero_segment(folio, cur, cur + len);
123 			attached = 0;
124 		} else {
125 			if (io->rq && (map->m_pa + ofs != io->dev.m_pa ||
126 				       map->m_deviceid != io->dev.m_deviceid)) {
127 io_retry:
128 				erofs_fileio_rq_submit(io->rq);
129 				io->rq = NULL;
130 			}
131 
132 			if (!io->rq) {
133 				io->dev = (struct erofs_map_dev) {
134 					.m_pa = io->map.m_pa + ofs,
135 					.m_deviceid = io->map.m_deviceid,
136 				};
137 				err = erofs_map_dev(inode->i_sb, &io->dev);
138 				if (err)
139 					break;
140 				io->rq = erofs_fileio_rq_alloc(&io->dev);
141 				io->rq->bio.bi_iter.bi_sector =
142 					(io->dev.m_dif->fsoff + io->dev.m_pa) >> 9;
143 				attached = 0;
144 			}
145 			if (!bio_add_folio(&io->rq->bio, folio, len, cur))
146 				goto io_retry;
147 			if (!attached++)
148 				erofs_onlinefolio_split(folio);
149 			io->dev.m_pa += len;
150 		}
151 		cur += len;
152 	}
153 	erofs_onlinefolio_end(folio, err, false);
154 	return err;
155 }
156 
157 static int erofs_fileio_read_folio(struct file *file, struct folio *folio)
158 {
159 	bool need_iput;
160 	struct inode *realinode = erofs_real_inode(folio_inode(folio), &need_iput);
161 	struct erofs_fileio io = {};
162 	int err;
163 
164 	trace_erofs_read_folio(realinode, folio, true);
165 	err = erofs_fileio_scan_folio(&io, realinode, folio);
166 	erofs_fileio_rq_submit(io.rq);
167 	if (need_iput)
168 		iput(realinode);
169 	return err;
170 }
171 
172 static void erofs_fileio_readahead(struct readahead_control *rac)
173 {
174 	bool need_iput;
175 	struct inode *realinode = erofs_real_inode(rac->mapping->host, &need_iput);
176 	struct erofs_fileio io = {};
177 	struct folio *folio;
178 	int err;
179 
180 	trace_erofs_readahead(realinode, readahead_index(rac),
181 			      readahead_count(rac), true);
182 	while ((folio = readahead_folio(rac))) {
183 		err = erofs_fileio_scan_folio(&io, realinode, folio);
184 		if (err && err != -EINTR)
185 			erofs_err(realinode->i_sb, "readahead error at folio %lu @ nid %llu",
186 				  folio->index, EROFS_I(realinode)->nid);
187 	}
188 	erofs_fileio_rq_submit(io.rq);
189 	if (need_iput)
190 		iput(realinode);
191 }
192 
193 const struct address_space_operations erofs_fileio_aops = {
194 	.read_folio = erofs_fileio_read_folio,
195 	.readahead = erofs_fileio_readahead,
196 };
197