xref: /linux/fs/bcachefs/io_read.h (revision 36df6f734a7ad69880c5262543165c47cb57169f)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_IO_READ_H
3 #define _BCACHEFS_IO_READ_H
4 
5 #include "bkey_buf.h"
6 #include "btree_iter.h"
7 #include "extents_types.h"
8 #include "reflink.h"
9 
10 struct bch_read_bio {
11 	struct bch_fs		*c;
12 	u64			start_time;
13 	u64			submit_time;
14 
15 	/*
16 	 * Reads will often have to be split, and if the extent being read from
17 	 * was checksummed or compressed we'll also have to allocate bounce
18 	 * buffers and copy the data back into the original bio.
19 	 *
20 	 * If we didn't have to split, we have to save and restore the original
21 	 * bi_end_io - @split below indicates which:
22 	 */
23 	union {
24 	struct bch_read_bio	*parent;
25 	bio_end_io_t		*end_io;
26 	};
27 
28 	/*
29 	 * Saved copy of bio->bi_iter, from submission time - allows us to
30 	 * resubmit on IO error, and also to copy data back to the original bio
31 	 * when we're bouncing:
32 	 */
33 	struct bvec_iter	bvec_iter;
34 
35 	unsigned		offset_into_extent;
36 
37 	u16			flags;
38 	union {
39 	struct {
40 	u16			data_update:1,
41 				promote:1,
42 				bounce:1,
43 				split:1,
44 				have_ioref:1,
45 				narrow_crcs:1,
46 				saw_error:1,
47 				self_healing:1,
48 				context:2;
49 	};
50 	u16			_state;
51 	};
52 	s16			ret;
53 #ifdef CONFIG_BCACHEFS_ASYNC_OBJECT_LISTS
54 	unsigned		list_idx;
55 #endif
56 
57 	struct extent_ptr_decoded pick;
58 
59 	/*
60 	 * pos we read from - different from data_pos for indirect extents:
61 	 */
62 	u32			subvol;
63 	struct bpos		read_pos;
64 
65 	/*
66 	 * start pos of data we read (may not be pos of data we want) - for
67 	 * promote, narrow extents paths:
68 	 */
69 	enum btree_id		data_btree;
70 	struct bpos		data_pos;
71 	struct bversion		version;
72 
73 	struct bch_io_opts	opts;
74 
75 	struct work_struct	work;
76 
77 	struct bio		bio;
78 };
79 
80 #define to_rbio(_bio)		container_of((_bio), struct bch_read_bio, bio)
81 
82 struct bch_devs_mask;
83 struct cache_promote_op;
84 struct extent_ptr_decoded;
85 
bch2_read_indirect_extent(struct btree_trans * trans,enum btree_id * data_btree,s64 * offset_into_extent,struct bkey_buf * extent)86 static inline int bch2_read_indirect_extent(struct btree_trans *trans,
87 					    enum btree_id *data_btree,
88 					    s64 *offset_into_extent,
89 					    struct bkey_buf *extent)
90 {
91 	if (extent->k->k.type != KEY_TYPE_reflink_p)
92 		return 0;
93 
94 	*data_btree = BTREE_ID_reflink;
95 
96 	struct bch_fs *c = trans->c;
97 	struct btree_iter iter;
98 	struct bkey_s_c k = bch2_lookup_indirect_extent(trans, &iter,
99 						offset_into_extent,
100 						bkey_i_to_s_c_reflink_p(extent->k),
101 						true, 0);
102 	int ret = bkey_err(k);
103 	if (ret)
104 		return ret;
105 
106 	if (bkey_deleted(k.k)) {
107 		bch2_trans_iter_exit(trans, &iter);
108 		return bch_err_throw(c, missing_indirect_extent);
109 	}
110 
111 	bch2_bkey_buf_reassemble(extent, c, k);
112 	bch2_trans_iter_exit(trans, &iter);
113 	return 0;
114 }
115 
116 #define BCH_READ_FLAGS()		\
117 	x(retry_if_stale)		\
118 	x(may_promote)			\
119 	x(user_mapped)			\
120 	x(last_fragment)		\
121 	x(must_bounce)			\
122 	x(must_clone)			\
123 	x(in_retry)
124 
125 enum __bch_read_flags {
126 #define x(n)	__BCH_READ_##n,
127 	BCH_READ_FLAGS()
128 #undef x
129 };
130 
131 enum bch_read_flags {
132 #define x(n)	BCH_READ_##n = BIT(__BCH_READ_##n),
133 	BCH_READ_FLAGS()
134 #undef x
135 };
136 
137 int __bch2_read_extent(struct btree_trans *, struct bch_read_bio *,
138 		       struct bvec_iter, struct bpos, enum btree_id,
139 		       struct bkey_s_c, unsigned,
140 		       struct bch_io_failures *, unsigned, int);
141 
bch2_read_extent(struct btree_trans * trans,struct bch_read_bio * rbio,struct bpos read_pos,enum btree_id data_btree,struct bkey_s_c k,unsigned offset_into_extent,unsigned flags)142 static inline void bch2_read_extent(struct btree_trans *trans,
143 			struct bch_read_bio *rbio, struct bpos read_pos,
144 			enum btree_id data_btree, struct bkey_s_c k,
145 			unsigned offset_into_extent, unsigned flags)
146 {
147 	int ret = __bch2_read_extent(trans, rbio, rbio->bio.bi_iter, read_pos,
148 				     data_btree, k, offset_into_extent, NULL, flags, -1);
149 	/* __bch2_read_extent only returns errors if BCH_READ_in_retry is set */
150 	WARN(ret, "unhandled error from __bch2_read_extent()");
151 }
152 
153 int __bch2_read(struct btree_trans *, struct bch_read_bio *, struct bvec_iter,
154 		subvol_inum,
155 		struct bch_io_failures *, struct bkey_buf *, unsigned flags);
156 
bch2_read(struct bch_fs * c,struct bch_read_bio * rbio,subvol_inum inum)157 static inline void bch2_read(struct bch_fs *c, struct bch_read_bio *rbio,
158 			     subvol_inum inum)
159 {
160 	BUG_ON(rbio->_state);
161 
162 	rbio->subvol = inum.subvol;
163 
164 	bch2_trans_run(c,
165 		__bch2_read(trans, rbio, rbio->bio.bi_iter, inum, NULL, NULL,
166 			    BCH_READ_retry_if_stale|
167 			    BCH_READ_may_promote|
168 			    BCH_READ_user_mapped));
169 }
170 
rbio_init_fragment(struct bio * bio,struct bch_read_bio * orig)171 static inline struct bch_read_bio *rbio_init_fragment(struct bio *bio,
172 						      struct bch_read_bio *orig)
173 {
174 	struct bch_read_bio *rbio = to_rbio(bio);
175 
176 	rbio->c			= orig->c;
177 	rbio->_state		= 0;
178 	rbio->flags		= 0;
179 	rbio->ret		= 0;
180 	rbio->split		= true;
181 	rbio->parent		= orig;
182 	rbio->opts		= orig->opts;
183 #ifdef CONFIG_BCACHEFS_ASYNC_OBJECT_LISTS
184 	rbio->list_idx	= 0;
185 #endif
186 	return rbio;
187 }
188 
rbio_init(struct bio * bio,struct bch_fs * c,struct bch_io_opts opts,bio_end_io_t end_io)189 static inline struct bch_read_bio *rbio_init(struct bio *bio,
190 					     struct bch_fs *c,
191 					     struct bch_io_opts opts,
192 					     bio_end_io_t end_io)
193 {
194 	struct bch_read_bio *rbio = to_rbio(bio);
195 
196 	rbio->start_time	= local_clock();
197 	rbio->c			= c;
198 	rbio->_state		= 0;
199 	rbio->flags		= 0;
200 	rbio->ret		= 0;
201 	rbio->opts		= opts;
202 	rbio->bio.bi_end_io	= end_io;
203 #ifdef CONFIG_BCACHEFS_ASYNC_OBJECT_LISTS
204 	rbio->list_idx	= 0;
205 #endif
206 	return rbio;
207 }
208 
209 struct promote_op;
210 void bch2_promote_op_to_text(struct printbuf *, struct promote_op *);
211 void bch2_read_bio_to_text(struct printbuf *, struct bch_read_bio *);
212 
213 void bch2_fs_io_read_exit(struct bch_fs *);
214 int bch2_fs_io_read_init(struct bch_fs *);
215 
216 #endif /* _BCACHEFS_IO_READ_H */
217