xref: /linux/fs/bcachefs/ec.h (revision 42b16d3ac371a2fac9b6f08fd75f23f34ba3955a)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_EC_H
3 #define _BCACHEFS_EC_H
4 
5 #include "ec_types.h"
6 #include "buckets_types.h"
7 #include "extents_types.h"
8 
9 enum bch_validate_flags;
10 
11 int bch2_stripe_validate(struct bch_fs *, struct bkey_s_c, enum bch_validate_flags);
12 void bch2_stripe_to_text(struct printbuf *, struct bch_fs *,
13 			 struct bkey_s_c);
14 int bch2_trigger_stripe(struct btree_trans *, enum btree_id, unsigned,
15 			struct bkey_s_c, struct bkey_s,
16 			enum btree_iter_update_trigger_flags);
17 
18 #define bch2_bkey_ops_stripe ((struct bkey_ops) {	\
19 	.key_validate	= bch2_stripe_validate,		\
20 	.val_to_text	= bch2_stripe_to_text,		\
21 	.swab		= bch2_ptr_swab,		\
22 	.trigger	= bch2_trigger_stripe,		\
23 	.min_val_size	= 8,				\
24 })
25 
stripe_csums_per_device(const struct bch_stripe * s)26 static inline unsigned stripe_csums_per_device(const struct bch_stripe *s)
27 {
28 	return DIV_ROUND_UP(le16_to_cpu(s->sectors),
29 			    1 << s->csum_granularity_bits);
30 }
31 
stripe_csum_offset(const struct bch_stripe * s,unsigned dev,unsigned csum_idx)32 static inline unsigned stripe_csum_offset(const struct bch_stripe *s,
33 					  unsigned dev, unsigned csum_idx)
34 {
35 	EBUG_ON(s->csum_type >= BCH_CSUM_NR);
36 
37 	unsigned csum_bytes = bch_crc_bytes[s->csum_type];
38 
39 	return sizeof(struct bch_stripe) +
40 		sizeof(struct bch_extent_ptr) * s->nr_blocks +
41 		(dev * stripe_csums_per_device(s) + csum_idx) * csum_bytes;
42 }
43 
stripe_blockcount_offset(const struct bch_stripe * s,unsigned idx)44 static inline unsigned stripe_blockcount_offset(const struct bch_stripe *s,
45 						unsigned idx)
46 {
47 	return stripe_csum_offset(s, s->nr_blocks, 0) +
48 		sizeof(u16) * idx;
49 }
50 
stripe_blockcount_get(const struct bch_stripe * s,unsigned idx)51 static inline unsigned stripe_blockcount_get(const struct bch_stripe *s,
52 					     unsigned idx)
53 {
54 	return le16_to_cpup((void *) s + stripe_blockcount_offset(s, idx));
55 }
56 
stripe_blockcount_set(struct bch_stripe * s,unsigned idx,unsigned v)57 static inline void stripe_blockcount_set(struct bch_stripe *s,
58 					 unsigned idx, unsigned v)
59 {
60 	__le16 *p = (void *) s + stripe_blockcount_offset(s, idx);
61 
62 	*p = cpu_to_le16(v);
63 }
64 
stripe_val_u64s(const struct bch_stripe * s)65 static inline unsigned stripe_val_u64s(const struct bch_stripe *s)
66 {
67 	return DIV_ROUND_UP(stripe_blockcount_offset(s, s->nr_blocks),
68 			    sizeof(u64));
69 }
70 
stripe_csum(struct bch_stripe * s,unsigned block,unsigned csum_idx)71 static inline void *stripe_csum(struct bch_stripe *s,
72 				unsigned block, unsigned csum_idx)
73 {
74 	EBUG_ON(block >= s->nr_blocks);
75 	EBUG_ON(csum_idx >= stripe_csums_per_device(s));
76 
77 	return (void *) s + stripe_csum_offset(s, block, csum_idx);
78 }
79 
stripe_csum_get(struct bch_stripe * s,unsigned block,unsigned csum_idx)80 static inline struct bch_csum stripe_csum_get(struct bch_stripe *s,
81 				   unsigned block, unsigned csum_idx)
82 {
83 	struct bch_csum csum = { 0 };
84 
85 	memcpy(&csum, stripe_csum(s, block, csum_idx), bch_crc_bytes[s->csum_type]);
86 	return csum;
87 }
88 
stripe_csum_set(struct bch_stripe * s,unsigned block,unsigned csum_idx,struct bch_csum csum)89 static inline void stripe_csum_set(struct bch_stripe *s,
90 				   unsigned block, unsigned csum_idx,
91 				   struct bch_csum csum)
92 {
93 	memcpy(stripe_csum(s, block, csum_idx), &csum, bch_crc_bytes[s->csum_type]);
94 }
95 
__bch2_ptr_matches_stripe(const struct bch_extent_ptr * stripe_ptr,const struct bch_extent_ptr * data_ptr,unsigned sectors)96 static inline bool __bch2_ptr_matches_stripe(const struct bch_extent_ptr *stripe_ptr,
97 					     const struct bch_extent_ptr *data_ptr,
98 					     unsigned sectors)
99 {
100 	return  (data_ptr->dev    == stripe_ptr->dev ||
101 		 data_ptr->dev    == BCH_SB_MEMBER_INVALID ||
102 		 stripe_ptr->dev  == BCH_SB_MEMBER_INVALID) &&
103 		data_ptr->gen    == stripe_ptr->gen &&
104 		data_ptr->offset >= stripe_ptr->offset &&
105 		data_ptr->offset  < stripe_ptr->offset + sectors;
106 }
107 
bch2_ptr_matches_stripe(const struct bch_stripe * s,struct extent_ptr_decoded p)108 static inline bool bch2_ptr_matches_stripe(const struct bch_stripe *s,
109 					   struct extent_ptr_decoded p)
110 {
111 	unsigned nr_data = s->nr_blocks - s->nr_redundant;
112 
113 	BUG_ON(!p.has_ec);
114 
115 	if (p.ec.block >= nr_data)
116 		return false;
117 
118 	return __bch2_ptr_matches_stripe(&s->ptrs[p.ec.block], &p.ptr,
119 					 le16_to_cpu(s->sectors));
120 }
121 
bch2_ptr_matches_stripe_m(const struct gc_stripe * m,struct extent_ptr_decoded p)122 static inline bool bch2_ptr_matches_stripe_m(const struct gc_stripe *m,
123 					     struct extent_ptr_decoded p)
124 {
125 	unsigned nr_data = m->nr_blocks - m->nr_redundant;
126 
127 	BUG_ON(!p.has_ec);
128 
129 	if (p.ec.block >= nr_data)
130 		return false;
131 
132 	return __bch2_ptr_matches_stripe(&m->ptrs[p.ec.block], &p.ptr,
133 					 m->sectors);
134 }
135 
136 struct bch_read_bio;
137 
138 struct ec_stripe_buf {
139 	/* might not be buffering the entire stripe: */
140 	unsigned		offset;
141 	unsigned		size;
142 	unsigned long		valid[BITS_TO_LONGS(BCH_BKEY_PTRS_MAX)];
143 
144 	void			*data[BCH_BKEY_PTRS_MAX];
145 
146 	__BKEY_PADDED(key, 255);
147 };
148 
149 struct ec_stripe_head;
150 
151 enum ec_stripe_ref {
152 	STRIPE_REF_io,
153 	STRIPE_REF_stripe,
154 	STRIPE_REF_NR
155 };
156 
157 struct ec_stripe_new {
158 	struct bch_fs		*c;
159 	struct ec_stripe_head	*h;
160 	struct mutex		lock;
161 	struct list_head	list;
162 
163 	struct hlist_node	hash;
164 	u64			idx;
165 
166 	struct closure		iodone;
167 
168 	atomic_t		ref[STRIPE_REF_NR];
169 
170 	int			err;
171 
172 	u8			nr_data;
173 	u8			nr_parity;
174 	bool			allocated;
175 	bool			pending;
176 	bool			have_existing_stripe;
177 
178 	unsigned long		blocks_gotten[BITS_TO_LONGS(BCH_BKEY_PTRS_MAX)];
179 	unsigned long		blocks_allocated[BITS_TO_LONGS(BCH_BKEY_PTRS_MAX)];
180 	open_bucket_idx_t	blocks[BCH_BKEY_PTRS_MAX];
181 	struct disk_reservation	res;
182 
183 	struct ec_stripe_buf	new_stripe;
184 	struct ec_stripe_buf	existing_stripe;
185 };
186 
187 struct ec_stripe_head {
188 	struct list_head	list;
189 	struct mutex		lock;
190 
191 	unsigned		disk_label;
192 	unsigned		algo;
193 	unsigned		redundancy;
194 	enum bch_watermark	watermark;
195 	bool			insufficient_devs;
196 
197 	unsigned long		rw_devs_change_count;
198 
199 	u64			nr_created;
200 
201 	struct bch_devs_mask	devs;
202 	unsigned		nr_active_devs;
203 
204 	unsigned		blocksize;
205 
206 	struct dev_stripe_state	block_stripe;
207 	struct dev_stripe_state	parity_stripe;
208 
209 	struct ec_stripe_new	*s;
210 };
211 
212 int bch2_ec_read_extent(struct btree_trans *, struct bch_read_bio *, struct bkey_s_c);
213 
214 void *bch2_writepoint_ec_buf(struct bch_fs *, struct write_point *);
215 
216 void bch2_ec_bucket_cancel(struct bch_fs *, struct open_bucket *);
217 
218 int bch2_ec_stripe_new_alloc(struct bch_fs *, struct ec_stripe_head *);
219 
220 void bch2_ec_stripe_head_put(struct bch_fs *, struct ec_stripe_head *);
221 struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *,
222 			unsigned, unsigned, unsigned,
223 			enum bch_watermark, struct closure *);
224 
225 void bch2_stripes_heap_update(struct bch_fs *, struct stripe *, size_t);
226 void bch2_stripes_heap_del(struct bch_fs *, struct stripe *, size_t);
227 void bch2_stripes_heap_insert(struct bch_fs *, struct stripe *, size_t);
228 
229 void bch2_do_stripe_deletes(struct bch_fs *);
230 void bch2_ec_do_stripe_creates(struct bch_fs *);
231 void bch2_ec_stripe_new_free(struct bch_fs *, struct ec_stripe_new *);
232 
ec_stripe_new_get(struct ec_stripe_new * s,enum ec_stripe_ref ref)233 static inline void ec_stripe_new_get(struct ec_stripe_new *s,
234 				     enum ec_stripe_ref ref)
235 {
236 	atomic_inc(&s->ref[ref]);
237 }
238 
ec_stripe_new_put(struct bch_fs * c,struct ec_stripe_new * s,enum ec_stripe_ref ref)239 static inline void ec_stripe_new_put(struct bch_fs *c, struct ec_stripe_new *s,
240 				     enum ec_stripe_ref ref)
241 {
242 	BUG_ON(atomic_read(&s->ref[ref]) <= 0);
243 
244 	if (atomic_dec_and_test(&s->ref[ref]))
245 		switch (ref) {
246 		case STRIPE_REF_stripe:
247 			bch2_ec_stripe_new_free(c, s);
248 			break;
249 		case STRIPE_REF_io:
250 			bch2_ec_do_stripe_creates(c);
251 			break;
252 		default:
253 			BUG();
254 		}
255 }
256 
257 int bch2_dev_remove_stripes(struct bch_fs *, unsigned);
258 
259 void bch2_ec_stop_dev(struct bch_fs *, struct bch_dev *);
260 void bch2_fs_ec_stop(struct bch_fs *);
261 void bch2_fs_ec_flush(struct bch_fs *);
262 
263 int bch2_stripes_read(struct bch_fs *);
264 
265 void bch2_stripes_heap_to_text(struct printbuf *, struct bch_fs *);
266 void bch2_new_stripes_to_text(struct printbuf *, struct bch_fs *);
267 
268 void bch2_fs_ec_exit(struct bch_fs *);
269 void bch2_fs_ec_init_early(struct bch_fs *);
270 int bch2_fs_ec_init(struct bch_fs *);
271 
272 #endif /* _BCACHEFS_EC_H */
273