1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 * Copyright (C) 2024, Alibaba Cloud
4 */
5 #include "internal.h"
6 #include <trace/events/erofs.h>
7
8 struct erofs_fileio_rq {
9 struct bio_vec bvecs[BIO_MAX_VECS];
10 struct bio bio;
11 struct kiocb iocb;
12 struct super_block *sb;
13 };
14
15 struct erofs_fileio {
16 struct erofs_map_blocks map;
17 struct erofs_map_dev dev;
18 struct erofs_fileio_rq *rq;
19 };
20
erofs_fileio_ki_complete(struct kiocb * iocb,long ret)21 static void erofs_fileio_ki_complete(struct kiocb *iocb, long ret)
22 {
23 struct erofs_fileio_rq *rq =
24 container_of(iocb, struct erofs_fileio_rq, iocb);
25 struct folio_iter fi;
26
27 if (ret > 0) {
28 if (ret != rq->bio.bi_iter.bi_size) {
29 bio_advance(&rq->bio, ret);
30 zero_fill_bio(&rq->bio);
31 }
32 ret = 0;
33 }
34 if (rq->bio.bi_end_io) {
35 rq->bio.bi_end_io(&rq->bio);
36 } else {
37 bio_for_each_folio_all(fi, &rq->bio) {
38 DBG_BUGON(folio_test_uptodate(fi.folio));
39 erofs_onlinefolio_end(fi.folio, ret);
40 }
41 }
42 bio_uninit(&rq->bio);
43 kfree(rq);
44 }
45
erofs_fileio_rq_submit(struct erofs_fileio_rq * rq)46 static void erofs_fileio_rq_submit(struct erofs_fileio_rq *rq)
47 {
48 struct iov_iter iter;
49 int ret;
50
51 if (!rq)
52 return;
53 rq->iocb.ki_pos = rq->bio.bi_iter.bi_sector << SECTOR_SHIFT;
54 rq->iocb.ki_ioprio = get_current_ioprio();
55 rq->iocb.ki_complete = erofs_fileio_ki_complete;
56 if (test_opt(&EROFS_SB(rq->sb)->opt, DIRECT_IO) &&
57 rq->iocb.ki_filp->f_mode & FMODE_CAN_ODIRECT)
58 rq->iocb.ki_flags = IOCB_DIRECT;
59 iov_iter_bvec(&iter, ITER_DEST, rq->bvecs, rq->bio.bi_vcnt,
60 rq->bio.bi_iter.bi_size);
61 ret = vfs_iocb_iter_read(rq->iocb.ki_filp, &rq->iocb, &iter);
62 if (ret != -EIOCBQUEUED)
63 erofs_fileio_ki_complete(&rq->iocb, ret);
64 }
65
erofs_fileio_rq_alloc(struct erofs_map_dev * mdev)66 static struct erofs_fileio_rq *erofs_fileio_rq_alloc(struct erofs_map_dev *mdev)
67 {
68 struct erofs_fileio_rq *rq = kzalloc(sizeof(*rq),
69 GFP_KERNEL | __GFP_NOFAIL);
70
71 bio_init(&rq->bio, NULL, rq->bvecs, BIO_MAX_VECS, REQ_OP_READ);
72 rq->iocb.ki_filp = mdev->m_dif->file;
73 rq->sb = mdev->m_sb;
74 return rq;
75 }
76
erofs_fileio_bio_alloc(struct erofs_map_dev * mdev)77 struct bio *erofs_fileio_bio_alloc(struct erofs_map_dev *mdev)
78 {
79 return &erofs_fileio_rq_alloc(mdev)->bio;
80 }
81
erofs_fileio_submit_bio(struct bio * bio)82 void erofs_fileio_submit_bio(struct bio *bio)
83 {
84 return erofs_fileio_rq_submit(container_of(bio, struct erofs_fileio_rq,
85 bio));
86 }
87
erofs_fileio_scan_folio(struct erofs_fileio * io,struct folio * folio)88 static int erofs_fileio_scan_folio(struct erofs_fileio *io, struct folio *folio)
89 {
90 struct inode *inode = folio_inode(folio);
91 struct erofs_map_blocks *map = &io->map;
92 unsigned int cur = 0, end = folio_size(folio), len, attached = 0;
93 loff_t pos = folio_pos(folio), ofs;
94 struct iov_iter iter;
95 struct bio_vec bv;
96 int err = 0;
97
98 erofs_onlinefolio_init(folio);
99 while (cur < end) {
100 if (!in_range(pos + cur, map->m_la, map->m_llen)) {
101 map->m_la = pos + cur;
102 map->m_llen = end - cur;
103 err = erofs_map_blocks(inode, map);
104 if (err)
105 break;
106 }
107
108 ofs = folio_pos(folio) + cur - map->m_la;
109 len = min_t(loff_t, map->m_llen - ofs, end - cur);
110 if (map->m_flags & EROFS_MAP_META) {
111 struct erofs_buf buf = __EROFS_BUF_INITIALIZER;
112 void *src;
113
114 src = erofs_read_metabuf(&buf, inode->i_sb,
115 map->m_pa + ofs, EROFS_KMAP);
116 if (IS_ERR(src)) {
117 err = PTR_ERR(src);
118 break;
119 }
120 bvec_set_folio(&bv, folio, len, cur);
121 iov_iter_bvec(&iter, ITER_DEST, &bv, 1, len);
122 if (copy_to_iter(src, len, &iter) != len) {
123 erofs_put_metabuf(&buf);
124 err = -EIO;
125 break;
126 }
127 erofs_put_metabuf(&buf);
128 } else if (!(map->m_flags & EROFS_MAP_MAPPED)) {
129 folio_zero_segment(folio, cur, cur + len);
130 attached = 0;
131 } else {
132 if (io->rq && (map->m_pa + ofs != io->dev.m_pa ||
133 map->m_deviceid != io->dev.m_deviceid)) {
134 io_retry:
135 erofs_fileio_rq_submit(io->rq);
136 io->rq = NULL;
137 }
138
139 if (!io->rq) {
140 io->dev = (struct erofs_map_dev) {
141 .m_pa = io->map.m_pa + ofs,
142 .m_deviceid = io->map.m_deviceid,
143 };
144 err = erofs_map_dev(inode->i_sb, &io->dev);
145 if (err)
146 break;
147 io->rq = erofs_fileio_rq_alloc(&io->dev);
148 io->rq->bio.bi_iter.bi_sector = io->dev.m_pa >> 9;
149 attached = 0;
150 }
151 if (!attached++)
152 erofs_onlinefolio_split(folio);
153 if (!bio_add_folio(&io->rq->bio, folio, len, cur))
154 goto io_retry;
155 io->dev.m_pa += len;
156 }
157 cur += len;
158 }
159 erofs_onlinefolio_end(folio, err);
160 return err;
161 }
162
erofs_fileio_read_folio(struct file * file,struct folio * folio)163 static int erofs_fileio_read_folio(struct file *file, struct folio *folio)
164 {
165 struct erofs_fileio io = {};
166 int err;
167
168 trace_erofs_read_folio(folio, true);
169 err = erofs_fileio_scan_folio(&io, folio);
170 erofs_fileio_rq_submit(io.rq);
171 return err;
172 }
173
erofs_fileio_readahead(struct readahead_control * rac)174 static void erofs_fileio_readahead(struct readahead_control *rac)
175 {
176 struct inode *inode = rac->mapping->host;
177 struct erofs_fileio io = {};
178 struct folio *folio;
179 int err;
180
181 trace_erofs_readpages(inode, readahead_index(rac),
182 readahead_count(rac), true);
183 while ((folio = readahead_folio(rac))) {
184 err = erofs_fileio_scan_folio(&io, folio);
185 if (err && err != -EINTR)
186 erofs_err(inode->i_sb, "readahead error at folio %lu @ nid %llu",
187 folio->index, EROFS_I(inode)->nid);
188 }
189 erofs_fileio_rq_submit(io.rq);
190 }
191
192 const struct address_space_operations erofs_fileio_aops = {
193 .read_folio = erofs_fileio_read_folio,
194 .readahead = erofs_fileio_readahead,
195 };
196