1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_EC_H
3 #define _BCACHEFS_EC_H
4
5 #include "ec_types.h"
6 #include "buckets_types.h"
7 #include "extents_types.h"
8
9 int bch2_stripe_validate(struct bch_fs *, struct bkey_s_c,
10 struct bkey_validate_context);
11 void bch2_stripe_to_text(struct printbuf *, struct bch_fs *,
12 struct bkey_s_c);
13 int bch2_trigger_stripe(struct btree_trans *, enum btree_id, unsigned,
14 struct bkey_s_c, struct bkey_s,
15 enum btree_iter_update_trigger_flags);
16
17 #define bch2_bkey_ops_stripe ((struct bkey_ops) { \
18 .key_validate = bch2_stripe_validate, \
19 .val_to_text = bch2_stripe_to_text, \
20 .swab = bch2_ptr_swab, \
21 .trigger = bch2_trigger_stripe, \
22 .min_val_size = 8, \
23 })
24
stripe_csums_per_device(const struct bch_stripe * s)25 static inline unsigned stripe_csums_per_device(const struct bch_stripe *s)
26 {
27 return DIV_ROUND_UP(le16_to_cpu(s->sectors),
28 1 << s->csum_granularity_bits);
29 }
30
stripe_csum_offset(const struct bch_stripe * s,unsigned dev,unsigned csum_idx)31 static inline unsigned stripe_csum_offset(const struct bch_stripe *s,
32 unsigned dev, unsigned csum_idx)
33 {
34 EBUG_ON(s->csum_type >= BCH_CSUM_NR);
35
36 unsigned csum_bytes = bch_crc_bytes[s->csum_type];
37
38 return sizeof(struct bch_stripe) +
39 sizeof(struct bch_extent_ptr) * s->nr_blocks +
40 (dev * stripe_csums_per_device(s) + csum_idx) * csum_bytes;
41 }
42
stripe_blockcount_offset(const struct bch_stripe * s,unsigned idx)43 static inline unsigned stripe_blockcount_offset(const struct bch_stripe *s,
44 unsigned idx)
45 {
46 return stripe_csum_offset(s, s->nr_blocks, 0) +
47 sizeof(u16) * idx;
48 }
49
stripe_blockcount_get(const struct bch_stripe * s,unsigned idx)50 static inline unsigned stripe_blockcount_get(const struct bch_stripe *s,
51 unsigned idx)
52 {
53 return le16_to_cpup((void *) s + stripe_blockcount_offset(s, idx));
54 }
55
stripe_blockcount_set(struct bch_stripe * s,unsigned idx,unsigned v)56 static inline void stripe_blockcount_set(struct bch_stripe *s,
57 unsigned idx, unsigned v)
58 {
59 __le16 *p = (void *) s + stripe_blockcount_offset(s, idx);
60
61 *p = cpu_to_le16(v);
62 }
63
stripe_val_u64s(const struct bch_stripe * s)64 static inline unsigned stripe_val_u64s(const struct bch_stripe *s)
65 {
66 return DIV_ROUND_UP(stripe_blockcount_offset(s, s->nr_blocks),
67 sizeof(u64));
68 }
69
stripe_csum(struct bch_stripe * s,unsigned block,unsigned csum_idx)70 static inline void *stripe_csum(struct bch_stripe *s,
71 unsigned block, unsigned csum_idx)
72 {
73 EBUG_ON(block >= s->nr_blocks);
74 EBUG_ON(csum_idx >= stripe_csums_per_device(s));
75
76 return (void *) s + stripe_csum_offset(s, block, csum_idx);
77 }
78
stripe_csum_get(struct bch_stripe * s,unsigned block,unsigned csum_idx)79 static inline struct bch_csum stripe_csum_get(struct bch_stripe *s,
80 unsigned block, unsigned csum_idx)
81 {
82 struct bch_csum csum = { 0 };
83
84 memcpy(&csum, stripe_csum(s, block, csum_idx), bch_crc_bytes[s->csum_type]);
85 return csum;
86 }
87
stripe_csum_set(struct bch_stripe * s,unsigned block,unsigned csum_idx,struct bch_csum csum)88 static inline void stripe_csum_set(struct bch_stripe *s,
89 unsigned block, unsigned csum_idx,
90 struct bch_csum csum)
91 {
92 memcpy(stripe_csum(s, block, csum_idx), &csum, bch_crc_bytes[s->csum_type]);
93 }
94
95 #define STRIPE_LRU_POS_EMPTY 1
96
stripe_lru_pos(const struct bch_stripe * s)97 static inline u64 stripe_lru_pos(const struct bch_stripe *s)
98 {
99 if (!s)
100 return 0;
101
102 unsigned nr_data = s->nr_blocks - s->nr_redundant, blocks_empty = 0;
103
104 for (unsigned i = 0; i < nr_data; i++)
105 blocks_empty += !stripe_blockcount_get(s, i);
106
107 /* Will be picked up by the stripe_delete worker */
108 if (blocks_empty == nr_data)
109 return STRIPE_LRU_POS_EMPTY;
110
111 if (!blocks_empty)
112 return 0;
113
114 /* invert: more blocks empty = reuse first */
115 return LRU_TIME_MAX - blocks_empty;
116 }
117
__bch2_ptr_matches_stripe(const struct bch_extent_ptr * stripe_ptr,const struct bch_extent_ptr * data_ptr,unsigned sectors)118 static inline bool __bch2_ptr_matches_stripe(const struct bch_extent_ptr *stripe_ptr,
119 const struct bch_extent_ptr *data_ptr,
120 unsigned sectors)
121 {
122 return (data_ptr->dev == stripe_ptr->dev ||
123 data_ptr->dev == BCH_SB_MEMBER_INVALID ||
124 stripe_ptr->dev == BCH_SB_MEMBER_INVALID) &&
125 data_ptr->gen == stripe_ptr->gen &&
126 data_ptr->offset >= stripe_ptr->offset &&
127 data_ptr->offset < stripe_ptr->offset + sectors;
128 }
129
bch2_ptr_matches_stripe(const struct bch_stripe * s,struct extent_ptr_decoded p)130 static inline bool bch2_ptr_matches_stripe(const struct bch_stripe *s,
131 struct extent_ptr_decoded p)
132 {
133 unsigned nr_data = s->nr_blocks - s->nr_redundant;
134
135 BUG_ON(!p.has_ec);
136
137 if (p.ec.block >= nr_data)
138 return false;
139
140 return __bch2_ptr_matches_stripe(&s->ptrs[p.ec.block], &p.ptr,
141 le16_to_cpu(s->sectors));
142 }
143
bch2_ptr_matches_stripe_m(const struct gc_stripe * m,struct extent_ptr_decoded p)144 static inline bool bch2_ptr_matches_stripe_m(const struct gc_stripe *m,
145 struct extent_ptr_decoded p)
146 {
147 unsigned nr_data = m->nr_blocks - m->nr_redundant;
148
149 BUG_ON(!p.has_ec);
150
151 if (p.ec.block >= nr_data)
152 return false;
153
154 return __bch2_ptr_matches_stripe(&m->ptrs[p.ec.block], &p.ptr,
155 m->sectors);
156 }
157
gc_stripe_unlock(struct gc_stripe * s)158 static inline void gc_stripe_unlock(struct gc_stripe *s)
159 {
160 BUILD_BUG_ON(!((union ulong_byte_assert) { .ulong = 1UL << BUCKET_LOCK_BITNR }).byte);
161
162 clear_bit_unlock(BUCKET_LOCK_BITNR, (void *) &s->lock);
163 smp_mb__after_atomic();
164 wake_up_bit((void *) &s->lock, BUCKET_LOCK_BITNR);
165 }
166
gc_stripe_lock(struct gc_stripe * s)167 static inline void gc_stripe_lock(struct gc_stripe *s)
168 {
169 wait_on_bit_lock((void *) &s->lock, BUCKET_LOCK_BITNR,
170 TASK_UNINTERRUPTIBLE);
171 }
172
173 struct bch_read_bio;
174
175 struct ec_stripe_buf {
176 /* might not be buffering the entire stripe: */
177 unsigned offset;
178 unsigned size;
179 unsigned long valid[BITS_TO_LONGS(BCH_BKEY_PTRS_MAX)];
180
181 void *data[BCH_BKEY_PTRS_MAX];
182
183 __BKEY_PADDED(key, 255);
184 };
185
186 struct ec_stripe_head;
187
188 enum ec_stripe_ref {
189 STRIPE_REF_io,
190 STRIPE_REF_stripe,
191 STRIPE_REF_NR
192 };
193
194 struct ec_stripe_new {
195 struct bch_fs *c;
196 struct ec_stripe_head *h;
197 struct mutex lock;
198 struct list_head list;
199
200 struct hlist_node hash;
201 u64 idx;
202
203 struct closure iodone;
204
205 atomic_t ref[STRIPE_REF_NR];
206
207 int err;
208
209 u8 nr_data;
210 u8 nr_parity;
211 bool allocated;
212 bool pending;
213 bool have_existing_stripe;
214
215 unsigned long blocks_gotten[BITS_TO_LONGS(BCH_BKEY_PTRS_MAX)];
216 unsigned long blocks_allocated[BITS_TO_LONGS(BCH_BKEY_PTRS_MAX)];
217 open_bucket_idx_t blocks[BCH_BKEY_PTRS_MAX];
218 struct disk_reservation res;
219
220 struct ec_stripe_buf new_stripe;
221 struct ec_stripe_buf existing_stripe;
222 };
223
224 struct ec_stripe_head {
225 struct list_head list;
226 struct mutex lock;
227
228 unsigned disk_label;
229 unsigned algo;
230 unsigned redundancy;
231 enum bch_watermark watermark;
232 bool insufficient_devs;
233
234 unsigned long rw_devs_change_count;
235
236 u64 nr_created;
237
238 struct bch_devs_mask devs;
239 unsigned nr_active_devs;
240
241 unsigned blocksize;
242
243 struct dev_stripe_state block_stripe;
244 struct dev_stripe_state parity_stripe;
245
246 struct ec_stripe_new *s;
247 };
248
249 int bch2_ec_read_extent(struct btree_trans *, struct bch_read_bio *, struct bkey_s_c);
250
251 void *bch2_writepoint_ec_buf(struct bch_fs *, struct write_point *);
252
253 void bch2_ec_bucket_cancel(struct bch_fs *, struct open_bucket *, int);
254
255 int bch2_ec_stripe_new_alloc(struct bch_fs *, struct ec_stripe_head *);
256
257 void bch2_ec_stripe_head_put(struct bch_fs *, struct ec_stripe_head *);
258
259 struct alloc_request;
260 struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *,
261 struct alloc_request *, unsigned, struct closure *);
262
263 void bch2_do_stripe_deletes(struct bch_fs *);
264 void bch2_ec_do_stripe_creates(struct bch_fs *);
265 void bch2_ec_stripe_new_free(struct bch_fs *, struct ec_stripe_new *);
266
ec_stripe_new_get(struct ec_stripe_new * s,enum ec_stripe_ref ref)267 static inline void ec_stripe_new_get(struct ec_stripe_new *s,
268 enum ec_stripe_ref ref)
269 {
270 atomic_inc(&s->ref[ref]);
271 }
272
ec_stripe_new_put(struct bch_fs * c,struct ec_stripe_new * s,enum ec_stripe_ref ref)273 static inline void ec_stripe_new_put(struct bch_fs *c, struct ec_stripe_new *s,
274 enum ec_stripe_ref ref)
275 {
276 BUG_ON(atomic_read(&s->ref[ref]) <= 0);
277
278 if (atomic_dec_and_test(&s->ref[ref]))
279 switch (ref) {
280 case STRIPE_REF_stripe:
281 bch2_ec_stripe_new_free(c, s);
282 break;
283 case STRIPE_REF_io:
284 bch2_ec_do_stripe_creates(c);
285 break;
286 default:
287 BUG();
288 }
289 }
290
291 int bch2_invalidate_stripe_to_dev(struct btree_trans *, struct btree_iter *,
292 struct bkey_s_c, unsigned, unsigned);
293 int bch2_dev_remove_stripes(struct bch_fs *, unsigned, unsigned);
294
295 void bch2_ec_stop_dev(struct bch_fs *, struct bch_dev *);
296 void bch2_fs_ec_stop(struct bch_fs *);
297 void bch2_fs_ec_flush(struct bch_fs *);
298
299 int bch2_stripes_read(struct bch_fs *);
300
301 void bch2_new_stripes_to_text(struct printbuf *, struct bch_fs *);
302
303 void bch2_fs_ec_exit(struct bch_fs *);
304 void bch2_fs_ec_init_early(struct bch_fs *);
305 int bch2_fs_ec_init(struct bch_fs *);
306
307 int bch2_check_stripe_to_lru_refs(struct bch_fs *);
308
309 #endif /* _BCACHEFS_EC_H */
310