xref: /linux/fs/bcachefs/ec.h (revision 46e6acfe3501fa938af9c5bd730f0020235b08a2)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef _BCACHEFS_EC_H
3 #define _BCACHEFS_EC_H
4 
5 #include "ec_types.h"
6 #include "buckets_types.h"
7 #include "extents_types.h"
8 
9 enum bch_validate_flags;
10 
11 int bch2_stripe_invalid(struct bch_fs *, struct bkey_s_c,
12 			enum bch_validate_flags, struct printbuf *);
13 void bch2_stripe_to_text(struct printbuf *, struct bch_fs *,
14 			 struct bkey_s_c);
15 int bch2_trigger_stripe(struct btree_trans *, enum btree_id, unsigned,
16 			struct bkey_s_c, struct bkey_s,
17 			enum btree_iter_update_trigger_flags);
18 
19 #define bch2_bkey_ops_stripe ((struct bkey_ops) {	\
20 	.key_invalid	= bch2_stripe_invalid,		\
21 	.val_to_text	= bch2_stripe_to_text,		\
22 	.swab		= bch2_ptr_swab,		\
23 	.trigger	= bch2_trigger_stripe,		\
24 	.min_val_size	= 8,				\
25 })
26 
27 static inline unsigned stripe_csums_per_device(const struct bch_stripe *s)
28 {
29 	return DIV_ROUND_UP(le16_to_cpu(s->sectors),
30 			    1 << s->csum_granularity_bits);
31 }
32 
33 static inline unsigned stripe_csum_offset(const struct bch_stripe *s,
34 					  unsigned dev, unsigned csum_idx)
35 {
36 	EBUG_ON(s->csum_type >= BCH_CSUM_NR);
37 
38 	unsigned csum_bytes = bch_crc_bytes[s->csum_type];
39 
40 	return sizeof(struct bch_stripe) +
41 		sizeof(struct bch_extent_ptr) * s->nr_blocks +
42 		(dev * stripe_csums_per_device(s) + csum_idx) * csum_bytes;
43 }
44 
45 static inline unsigned stripe_blockcount_offset(const struct bch_stripe *s,
46 						unsigned idx)
47 {
48 	return stripe_csum_offset(s, s->nr_blocks, 0) +
49 		sizeof(u16) * idx;
50 }
51 
52 static inline unsigned stripe_blockcount_get(const struct bch_stripe *s,
53 					     unsigned idx)
54 {
55 	return le16_to_cpup((void *) s + stripe_blockcount_offset(s, idx));
56 }
57 
58 static inline void stripe_blockcount_set(struct bch_stripe *s,
59 					 unsigned idx, unsigned v)
60 {
61 	__le16 *p = (void *) s + stripe_blockcount_offset(s, idx);
62 
63 	*p = cpu_to_le16(v);
64 }
65 
66 static inline unsigned stripe_val_u64s(const struct bch_stripe *s)
67 {
68 	return DIV_ROUND_UP(stripe_blockcount_offset(s, s->nr_blocks),
69 			    sizeof(u64));
70 }
71 
72 static inline void *stripe_csum(struct bch_stripe *s,
73 				unsigned block, unsigned csum_idx)
74 {
75 	EBUG_ON(block >= s->nr_blocks);
76 	EBUG_ON(csum_idx >= stripe_csums_per_device(s));
77 
78 	return (void *) s + stripe_csum_offset(s, block, csum_idx);
79 }
80 
81 static inline struct bch_csum stripe_csum_get(struct bch_stripe *s,
82 				   unsigned block, unsigned csum_idx)
83 {
84 	struct bch_csum csum = { 0 };
85 
86 	memcpy(&csum, stripe_csum(s, block, csum_idx), bch_crc_bytes[s->csum_type]);
87 	return csum;
88 }
89 
90 static inline void stripe_csum_set(struct bch_stripe *s,
91 				   unsigned block, unsigned csum_idx,
92 				   struct bch_csum csum)
93 {
94 	memcpy(stripe_csum(s, block, csum_idx), &csum, bch_crc_bytes[s->csum_type]);
95 }
96 
97 static inline bool __bch2_ptr_matches_stripe(const struct bch_extent_ptr *stripe_ptr,
98 					     const struct bch_extent_ptr *data_ptr,
99 					     unsigned sectors)
100 {
101 	return  data_ptr->dev    == stripe_ptr->dev &&
102 		data_ptr->gen    == stripe_ptr->gen &&
103 		data_ptr->offset >= stripe_ptr->offset &&
104 		data_ptr->offset  < stripe_ptr->offset + sectors;
105 }
106 
107 static inline bool bch2_ptr_matches_stripe(const struct bch_stripe *s,
108 					   struct extent_ptr_decoded p)
109 {
110 	unsigned nr_data = s->nr_blocks - s->nr_redundant;
111 
112 	BUG_ON(!p.has_ec);
113 
114 	if (p.ec.block >= nr_data)
115 		return false;
116 
117 	return __bch2_ptr_matches_stripe(&s->ptrs[p.ec.block], &p.ptr,
118 					 le16_to_cpu(s->sectors));
119 }
120 
121 static inline bool bch2_ptr_matches_stripe_m(const struct gc_stripe *m,
122 					     struct extent_ptr_decoded p)
123 {
124 	unsigned nr_data = m->nr_blocks - m->nr_redundant;
125 
126 	BUG_ON(!p.has_ec);
127 
128 	if (p.ec.block >= nr_data)
129 		return false;
130 
131 	return __bch2_ptr_matches_stripe(&m->ptrs[p.ec.block], &p.ptr,
132 					 m->sectors);
133 }
134 
135 struct bch_read_bio;
136 
137 struct ec_stripe_buf {
138 	/* might not be buffering the entire stripe: */
139 	unsigned		offset;
140 	unsigned		size;
141 	unsigned long		valid[BITS_TO_LONGS(BCH_BKEY_PTRS_MAX)];
142 
143 	void			*data[BCH_BKEY_PTRS_MAX];
144 
145 	__BKEY_PADDED(key, 255);
146 };
147 
148 struct ec_stripe_head;
149 
150 enum ec_stripe_ref {
151 	STRIPE_REF_io,
152 	STRIPE_REF_stripe,
153 	STRIPE_REF_NR
154 };
155 
156 struct ec_stripe_new {
157 	struct bch_fs		*c;
158 	struct ec_stripe_head	*h;
159 	struct mutex		lock;
160 	struct list_head	list;
161 
162 	struct hlist_node	hash;
163 	u64			idx;
164 
165 	struct closure		iodone;
166 
167 	atomic_t		ref[STRIPE_REF_NR];
168 
169 	int			err;
170 
171 	u8			nr_data;
172 	u8			nr_parity;
173 	bool			allocated;
174 	bool			pending;
175 	bool			have_existing_stripe;
176 
177 	unsigned long		blocks_gotten[BITS_TO_LONGS(BCH_BKEY_PTRS_MAX)];
178 	unsigned long		blocks_allocated[BITS_TO_LONGS(BCH_BKEY_PTRS_MAX)];
179 	open_bucket_idx_t	blocks[BCH_BKEY_PTRS_MAX];
180 	struct disk_reservation	res;
181 
182 	struct ec_stripe_buf	new_stripe;
183 	struct ec_stripe_buf	existing_stripe;
184 };
185 
186 struct ec_stripe_head {
187 	struct list_head	list;
188 	struct mutex		lock;
189 
190 	unsigned		target;
191 	unsigned		algo;
192 	unsigned		redundancy;
193 	enum bch_watermark	watermark;
194 
195 	struct bch_devs_mask	devs;
196 	unsigned		nr_active_devs;
197 
198 	unsigned		blocksize;
199 
200 	struct dev_stripe_state	block_stripe;
201 	struct dev_stripe_state	parity_stripe;
202 
203 	struct ec_stripe_new	*s;
204 };
205 
206 int bch2_ec_read_extent(struct btree_trans *, struct bch_read_bio *);
207 
208 void *bch2_writepoint_ec_buf(struct bch_fs *, struct write_point *);
209 
210 void bch2_ec_bucket_cancel(struct bch_fs *, struct open_bucket *);
211 
212 int bch2_ec_stripe_new_alloc(struct bch_fs *, struct ec_stripe_head *);
213 
214 void bch2_ec_stripe_head_put(struct bch_fs *, struct ec_stripe_head *);
215 struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *,
216 			unsigned, unsigned, unsigned,
217 			enum bch_watermark, struct closure *);
218 
219 void bch2_stripes_heap_update(struct bch_fs *, struct stripe *, size_t);
220 void bch2_stripes_heap_del(struct bch_fs *, struct stripe *, size_t);
221 void bch2_stripes_heap_insert(struct bch_fs *, struct stripe *, size_t);
222 
223 void bch2_do_stripe_deletes(struct bch_fs *);
224 void bch2_ec_do_stripe_creates(struct bch_fs *);
225 void bch2_ec_stripe_new_free(struct bch_fs *, struct ec_stripe_new *);
226 
227 static inline void ec_stripe_new_get(struct ec_stripe_new *s,
228 				     enum ec_stripe_ref ref)
229 {
230 	atomic_inc(&s->ref[ref]);
231 }
232 
233 static inline void ec_stripe_new_put(struct bch_fs *c, struct ec_stripe_new *s,
234 				     enum ec_stripe_ref ref)
235 {
236 	BUG_ON(atomic_read(&s->ref[ref]) <= 0);
237 
238 	if (atomic_dec_and_test(&s->ref[ref]))
239 		switch (ref) {
240 		case STRIPE_REF_stripe:
241 			bch2_ec_stripe_new_free(c, s);
242 			break;
243 		case STRIPE_REF_io:
244 			bch2_ec_do_stripe_creates(c);
245 			break;
246 		default:
247 			BUG();
248 		}
249 }
250 
251 void bch2_ec_stop_dev(struct bch_fs *, struct bch_dev *);
252 void bch2_fs_ec_stop(struct bch_fs *);
253 void bch2_fs_ec_flush(struct bch_fs *);
254 
255 int bch2_stripes_read(struct bch_fs *);
256 
257 void bch2_stripes_heap_to_text(struct printbuf *, struct bch_fs *);
258 void bch2_new_stripes_to_text(struct printbuf *, struct bch_fs *);
259 
260 void bch2_fs_ec_exit(struct bch_fs *);
261 void bch2_fs_ec_init_early(struct bch_fs *);
262 int bch2_fs_ec_init(struct bch_fs *);
263 
264 #endif /* _BCACHEFS_EC_H */
265