xref: /linux/fs/bcachefs/ec.h (revision 7a9b709e7cc5ce1ffb84ce07bf6d157e1de758df)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_EC_H
3 #define _BCACHEFS_EC_H
4 
5 #include "ec_types.h"
6 #include "buckets_types.h"
7 #include "extents_types.h"
8 
9 int bch2_stripe_validate(struct bch_fs *, struct bkey_s_c,
10 			 struct bkey_validate_context);
11 void bch2_stripe_to_text(struct printbuf *, struct bch_fs *,
12 			 struct bkey_s_c);
13 int bch2_trigger_stripe(struct btree_trans *, enum btree_id, unsigned,
14 			struct bkey_s_c, struct bkey_s,
15 			enum btree_iter_update_trigger_flags);
16 
17 #define bch2_bkey_ops_stripe ((struct bkey_ops) {	\
18 	.key_validate	= bch2_stripe_validate,		\
19 	.val_to_text	= bch2_stripe_to_text,		\
20 	.swab		= bch2_ptr_swab,		\
21 	.trigger	= bch2_trigger_stripe,		\
22 	.min_val_size	= 8,				\
23 })
24 
25 static inline unsigned stripe_csums_per_device(const struct bch_stripe *s)
26 {
27 	return DIV_ROUND_UP(le16_to_cpu(s->sectors),
28 			    1 << s->csum_granularity_bits);
29 }
30 
31 static inline unsigned stripe_csum_offset(const struct bch_stripe *s,
32 					  unsigned dev, unsigned csum_idx)
33 {
34 	EBUG_ON(s->csum_type >= BCH_CSUM_NR);
35 
36 	unsigned csum_bytes = bch_crc_bytes[s->csum_type];
37 
38 	return sizeof(struct bch_stripe) +
39 		sizeof(struct bch_extent_ptr) * s->nr_blocks +
40 		(dev * stripe_csums_per_device(s) + csum_idx) * csum_bytes;
41 }
42 
43 static inline unsigned stripe_blockcount_offset(const struct bch_stripe *s,
44 						unsigned idx)
45 {
46 	return stripe_csum_offset(s, s->nr_blocks, 0) +
47 		sizeof(u16) * idx;
48 }
49 
50 static inline unsigned stripe_blockcount_get(const struct bch_stripe *s,
51 					     unsigned idx)
52 {
53 	return le16_to_cpup((void *) s + stripe_blockcount_offset(s, idx));
54 }
55 
56 static inline void stripe_blockcount_set(struct bch_stripe *s,
57 					 unsigned idx, unsigned v)
58 {
59 	__le16 *p = (void *) s + stripe_blockcount_offset(s, idx);
60 
61 	*p = cpu_to_le16(v);
62 }
63 
64 static inline unsigned stripe_val_u64s(const struct bch_stripe *s)
65 {
66 	return DIV_ROUND_UP(stripe_blockcount_offset(s, s->nr_blocks),
67 			    sizeof(u64));
68 }
69 
70 static inline void *stripe_csum(struct bch_stripe *s,
71 				unsigned block, unsigned csum_idx)
72 {
73 	EBUG_ON(block >= s->nr_blocks);
74 	EBUG_ON(csum_idx >= stripe_csums_per_device(s));
75 
76 	return (void *) s + stripe_csum_offset(s, block, csum_idx);
77 }
78 
79 static inline struct bch_csum stripe_csum_get(struct bch_stripe *s,
80 				   unsigned block, unsigned csum_idx)
81 {
82 	struct bch_csum csum = { 0 };
83 
84 	memcpy(&csum, stripe_csum(s, block, csum_idx), bch_crc_bytes[s->csum_type]);
85 	return csum;
86 }
87 
88 static inline void stripe_csum_set(struct bch_stripe *s,
89 				   unsigned block, unsigned csum_idx,
90 				   struct bch_csum csum)
91 {
92 	memcpy(stripe_csum(s, block, csum_idx), &csum, bch_crc_bytes[s->csum_type]);
93 }
94 
95 #define STRIPE_LRU_POS_EMPTY	1
96 
97 static inline u64 stripe_lru_pos(const struct bch_stripe *s)
98 {
99 	if (!s)
100 		return 0;
101 
102 	unsigned nr_data = s->nr_blocks - s->nr_redundant, blocks_empty = 0;
103 
104 	for (unsigned i = 0; i < nr_data; i++)
105 		blocks_empty += !stripe_blockcount_get(s, i);
106 
107 	/* Will be picked up by the stripe_delete worker */
108 	if (blocks_empty == nr_data)
109 		return STRIPE_LRU_POS_EMPTY;
110 
111 	if (!blocks_empty)
112 		return 0;
113 
114 	/* invert: more blocks empty = reuse first */
115 	return LRU_TIME_MAX - blocks_empty;
116 }
117 
118 static inline bool __bch2_ptr_matches_stripe(const struct bch_extent_ptr *stripe_ptr,
119 					     const struct bch_extent_ptr *data_ptr,
120 					     unsigned sectors)
121 {
122 	return  (data_ptr->dev    == stripe_ptr->dev ||
123 		 data_ptr->dev    == BCH_SB_MEMBER_INVALID ||
124 		 stripe_ptr->dev  == BCH_SB_MEMBER_INVALID) &&
125 		data_ptr->gen    == stripe_ptr->gen &&
126 		data_ptr->offset >= stripe_ptr->offset &&
127 		data_ptr->offset  < stripe_ptr->offset + sectors;
128 }
129 
130 static inline bool bch2_ptr_matches_stripe(const struct bch_stripe *s,
131 					   struct extent_ptr_decoded p)
132 {
133 	unsigned nr_data = s->nr_blocks - s->nr_redundant;
134 
135 	BUG_ON(!p.has_ec);
136 
137 	if (p.ec.block >= nr_data)
138 		return false;
139 
140 	return __bch2_ptr_matches_stripe(&s->ptrs[p.ec.block], &p.ptr,
141 					 le16_to_cpu(s->sectors));
142 }
143 
144 static inline bool bch2_ptr_matches_stripe_m(const struct gc_stripe *m,
145 					     struct extent_ptr_decoded p)
146 {
147 	unsigned nr_data = m->nr_blocks - m->nr_redundant;
148 
149 	BUG_ON(!p.has_ec);
150 
151 	if (p.ec.block >= nr_data)
152 		return false;
153 
154 	return __bch2_ptr_matches_stripe(&m->ptrs[p.ec.block], &p.ptr,
155 					 m->sectors);
156 }
157 
158 static inline void gc_stripe_unlock(struct gc_stripe *s)
159 {
160 	BUILD_BUG_ON(!((union ulong_byte_assert) { .ulong = 1UL << BUCKET_LOCK_BITNR }).byte);
161 
162 	clear_bit_unlock(BUCKET_LOCK_BITNR, (void *) &s->lock);
163 	wake_up_bit((void *) &s->lock, BUCKET_LOCK_BITNR);
164 }
165 
166 static inline void gc_stripe_lock(struct gc_stripe *s)
167 {
168 	wait_on_bit_lock((void *) &s->lock, BUCKET_LOCK_BITNR,
169 			 TASK_UNINTERRUPTIBLE);
170 }
171 
172 struct bch_read_bio;
173 
174 struct ec_stripe_buf {
175 	/* might not be buffering the entire stripe: */
176 	unsigned		offset;
177 	unsigned		size;
178 	unsigned long		valid[BITS_TO_LONGS(BCH_BKEY_PTRS_MAX)];
179 
180 	void			*data[BCH_BKEY_PTRS_MAX];
181 
182 	__BKEY_PADDED(key, 255);
183 };
184 
185 struct ec_stripe_head;
186 
187 enum ec_stripe_ref {
188 	STRIPE_REF_io,
189 	STRIPE_REF_stripe,
190 	STRIPE_REF_NR
191 };
192 
193 struct ec_stripe_new {
194 	struct bch_fs		*c;
195 	struct ec_stripe_head	*h;
196 	struct mutex		lock;
197 	struct list_head	list;
198 
199 	struct hlist_node	hash;
200 	u64			idx;
201 
202 	struct closure		iodone;
203 
204 	atomic_t		ref[STRIPE_REF_NR];
205 
206 	int			err;
207 
208 	u8			nr_data;
209 	u8			nr_parity;
210 	bool			allocated;
211 	bool			pending;
212 	bool			have_existing_stripe;
213 
214 	unsigned long		blocks_gotten[BITS_TO_LONGS(BCH_BKEY_PTRS_MAX)];
215 	unsigned long		blocks_allocated[BITS_TO_LONGS(BCH_BKEY_PTRS_MAX)];
216 	open_bucket_idx_t	blocks[BCH_BKEY_PTRS_MAX];
217 	struct disk_reservation	res;
218 
219 	struct ec_stripe_buf	new_stripe;
220 	struct ec_stripe_buf	existing_stripe;
221 };
222 
223 struct ec_stripe_head {
224 	struct list_head	list;
225 	struct mutex		lock;
226 
227 	unsigned		disk_label;
228 	unsigned		algo;
229 	unsigned		redundancy;
230 	enum bch_watermark	watermark;
231 	bool			insufficient_devs;
232 
233 	unsigned long		rw_devs_change_count;
234 
235 	u64			nr_created;
236 
237 	struct bch_devs_mask	devs;
238 	unsigned		nr_active_devs;
239 
240 	unsigned		blocksize;
241 
242 	struct dev_stripe_state	block_stripe;
243 	struct dev_stripe_state	parity_stripe;
244 
245 	struct ec_stripe_new	*s;
246 };
247 
248 int bch2_ec_read_extent(struct btree_trans *, struct bch_read_bio *, struct bkey_s_c);
249 
250 void *bch2_writepoint_ec_buf(struct bch_fs *, struct write_point *);
251 
252 void bch2_ec_bucket_cancel(struct bch_fs *, struct open_bucket *, int);
253 
254 int bch2_ec_stripe_new_alloc(struct bch_fs *, struct ec_stripe_head *);
255 
256 void bch2_ec_stripe_head_put(struct bch_fs *, struct ec_stripe_head *);
257 struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *,
258 			unsigned, unsigned, unsigned,
259 			enum bch_watermark, struct closure *);
260 
261 void bch2_do_stripe_deletes(struct bch_fs *);
262 void bch2_ec_do_stripe_creates(struct bch_fs *);
263 void bch2_ec_stripe_new_free(struct bch_fs *, struct ec_stripe_new *);
264 
265 static inline void ec_stripe_new_get(struct ec_stripe_new *s,
266 				     enum ec_stripe_ref ref)
267 {
268 	atomic_inc(&s->ref[ref]);
269 }
270 
271 static inline void ec_stripe_new_put(struct bch_fs *c, struct ec_stripe_new *s,
272 				     enum ec_stripe_ref ref)
273 {
274 	BUG_ON(atomic_read(&s->ref[ref]) <= 0);
275 
276 	if (atomic_dec_and_test(&s->ref[ref]))
277 		switch (ref) {
278 		case STRIPE_REF_stripe:
279 			bch2_ec_stripe_new_free(c, s);
280 			break;
281 		case STRIPE_REF_io:
282 			bch2_ec_do_stripe_creates(c);
283 			break;
284 		default:
285 			BUG();
286 		}
287 }
288 
289 int bch2_dev_remove_stripes(struct bch_fs *, unsigned);
290 
291 void bch2_ec_stop_dev(struct bch_fs *, struct bch_dev *);
292 void bch2_fs_ec_stop(struct bch_fs *);
293 void bch2_fs_ec_flush(struct bch_fs *);
294 
295 int bch2_stripes_read(struct bch_fs *);
296 
297 void bch2_new_stripes_to_text(struct printbuf *, struct bch_fs *);
298 
299 void bch2_fs_ec_exit(struct bch_fs *);
300 void bch2_fs_ec_init_early(struct bch_fs *);
301 int bch2_fs_ec_init(struct bch_fs *);
302 
303 int bch2_check_stripe_to_lru_refs(struct bch_fs *);
304 
305 #endif /* _BCACHEFS_EC_H */
306