1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_IO_READ_H
3 #define _BCACHEFS_IO_READ_H
4
5 #include "bkey_buf.h"
6 #include "reflink.h"
7
8 struct bch_read_bio {
9 struct bch_fs *c;
10 u64 start_time;
11 u64 submit_time;
12
13 /*
14 * Reads will often have to be split, and if the extent being read from
15 * was checksummed or compressed we'll also have to allocate bounce
16 * buffers and copy the data back into the original bio.
17 *
18 * If we didn't have to split, we have to save and restore the original
19 * bi_end_io - @split below indicates which:
20 */
21 union {
22 struct bch_read_bio *parent;
23 bio_end_io_t *end_io;
24 };
25
26 /*
27 * Saved copy of bio->bi_iter, from submission time - allows us to
28 * resubmit on IO error, and also to copy data back to the original bio
29 * when we're bouncing:
30 */
31 struct bvec_iter bvec_iter;
32
33 unsigned offset_into_extent;
34
35 u16 flags;
36 union {
37 struct {
38 u16 bounce:1,
39 split:1,
40 kmalloc:1,
41 have_ioref:1,
42 narrow_crcs:1,
43 hole:1,
44 retry:2,
45 context:2;
46 };
47 u16 _state;
48 };
49
50 struct bch_devs_list devs_have;
51
52 struct extent_ptr_decoded pick;
53
54 /*
55 * pos we read from - different from data_pos for indirect extents:
56 */
57 u32 subvol;
58 struct bpos read_pos;
59
60 /*
61 * start pos of data we read (may not be pos of data we want) - for
62 * promote, narrow extents paths:
63 */
64 enum btree_id data_btree;
65 struct bpos data_pos;
66 struct bversion version;
67
68 struct promote_op *promote;
69
70 struct bch_io_opts opts;
71
72 struct work_struct work;
73
74 struct bio bio;
75 };
76
77 #define to_rbio(_bio) container_of((_bio), struct bch_read_bio, bio)
78
79 struct bch_devs_mask;
80 struct cache_promote_op;
81 struct extent_ptr_decoded;
82
bch2_read_indirect_extent(struct btree_trans * trans,enum btree_id * data_btree,s64 * offset_into_extent,struct bkey_buf * extent)83 static inline int bch2_read_indirect_extent(struct btree_trans *trans,
84 enum btree_id *data_btree,
85 s64 *offset_into_extent,
86 struct bkey_buf *extent)
87 {
88 if (extent->k->k.type != KEY_TYPE_reflink_p)
89 return 0;
90
91 *data_btree = BTREE_ID_reflink;
92 struct btree_iter iter;
93 struct bkey_s_c k = bch2_lookup_indirect_extent(trans, &iter,
94 offset_into_extent,
95 bkey_i_to_s_c_reflink_p(extent->k),
96 true, 0);
97 int ret = bkey_err(k);
98 if (ret)
99 return ret;
100
101 if (bkey_deleted(k.k)) {
102 bch2_trans_iter_exit(trans, &iter);
103 return -BCH_ERR_missing_indirect_extent;
104 }
105
106 bch2_bkey_buf_reassemble(extent, trans->c, k);
107 bch2_trans_iter_exit(trans, &iter);
108 return 0;
109 }
110
111 enum bch_read_flags {
112 BCH_READ_RETRY_IF_STALE = 1 << 0,
113 BCH_READ_MAY_PROMOTE = 1 << 1,
114 BCH_READ_USER_MAPPED = 1 << 2,
115 BCH_READ_NODECODE = 1 << 3,
116 BCH_READ_LAST_FRAGMENT = 1 << 4,
117
118 /* internal: */
119 BCH_READ_MUST_BOUNCE = 1 << 5,
120 BCH_READ_MUST_CLONE = 1 << 6,
121 BCH_READ_IN_RETRY = 1 << 7,
122 };
123
124 int __bch2_read_extent(struct btree_trans *, struct bch_read_bio *,
125 struct bvec_iter, struct bpos, enum btree_id,
126 struct bkey_s_c, unsigned,
127 struct bch_io_failures *, unsigned);
128
bch2_read_extent(struct btree_trans * trans,struct bch_read_bio * rbio,struct bpos read_pos,enum btree_id data_btree,struct bkey_s_c k,unsigned offset_into_extent,unsigned flags)129 static inline void bch2_read_extent(struct btree_trans *trans,
130 struct bch_read_bio *rbio, struct bpos read_pos,
131 enum btree_id data_btree, struct bkey_s_c k,
132 unsigned offset_into_extent, unsigned flags)
133 {
134 __bch2_read_extent(trans, rbio, rbio->bio.bi_iter, read_pos,
135 data_btree, k, offset_into_extent, NULL, flags);
136 }
137
138 void __bch2_read(struct bch_fs *, struct bch_read_bio *, struct bvec_iter,
139 subvol_inum, struct bch_io_failures *, unsigned flags);
140
bch2_read(struct bch_fs * c,struct bch_read_bio * rbio,subvol_inum inum)141 static inline void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
142 subvol_inum inum)
143 {
144 struct bch_io_failures failed = { .nr = 0 };
145
146 BUG_ON(rbio->_state);
147
148 rbio->c = c;
149 rbio->start_time = local_clock();
150 rbio->subvol = inum.subvol;
151
152 __bch2_read(c, rbio, rbio->bio.bi_iter, inum, &failed,
153 BCH_READ_RETRY_IF_STALE|
154 BCH_READ_MAY_PROMOTE|
155 BCH_READ_USER_MAPPED);
156 }
157
rbio_init(struct bio * bio,struct bch_io_opts opts)158 static inline struct bch_read_bio *rbio_init(struct bio *bio,
159 struct bch_io_opts opts)
160 {
161 struct bch_read_bio *rbio = to_rbio(bio);
162
163 rbio->_state = 0;
164 rbio->promote = NULL;
165 rbio->opts = opts;
166 return rbio;
167 }
168
169 void bch2_fs_io_read_exit(struct bch_fs *);
170 int bch2_fs_io_read_init(struct bch_fs *);
171
172 #endif /* _BCACHEFS_IO_READ_H */
173