xref: /linux/fs/bcachefs/ec.c (revision 4a4b30ea80d8cb5e8c4c62bb86201f4ea0d9b030)
1 // SPDX-License-Identifier: GPL-2.0
2 
3 /* erasure coding */
4 
5 #include "bcachefs.h"
6 #include "alloc_background.h"
7 #include "alloc_foreground.h"
8 #include "backpointers.h"
9 #include "bkey_buf.h"
10 #include "bset.h"
11 #include "btree_gc.h"
12 #include "btree_update.h"
13 #include "btree_write_buffer.h"
14 #include "buckets.h"
15 #include "checksum.h"
16 #include "disk_accounting.h"
17 #include "disk_groups.h"
18 #include "ec.h"
19 #include "error.h"
20 #include "io_read.h"
21 #include "io_write.h"
22 #include "keylist.h"
23 #include "lru.h"
24 #include "recovery.h"
25 #include "replicas.h"
26 #include "super-io.h"
27 #include "util.h"
28 
29 #include <linux/sort.h>
30 #include <linux/string_choices.h>
31 
32 #ifdef __KERNEL__
33 
34 #include <linux/raid/pq.h>
35 #include <linux/raid/xor.h>
36 
raid5_recov(unsigned disks,unsigned failed_idx,size_t size,void ** data)37 static void raid5_recov(unsigned disks, unsigned failed_idx,
38 			size_t size, void **data)
39 {
40 	unsigned i = 2, nr;
41 
42 	BUG_ON(failed_idx >= disks);
43 
44 	swap(data[0], data[failed_idx]);
45 	memcpy(data[0], data[1], size);
46 
47 	while (i < disks) {
48 		nr = min_t(unsigned, disks - i, MAX_XOR_BLOCKS);
49 		xor_blocks(nr, size, data[0], data + i);
50 		i += nr;
51 	}
52 
53 	swap(data[0], data[failed_idx]);
54 }
55 
raid_gen(int nd,int np,size_t size,void ** v)56 static void raid_gen(int nd, int np, size_t size, void **v)
57 {
58 	if (np >= 1)
59 		raid5_recov(nd + np, nd, size, v);
60 	if (np >= 2)
61 		raid6_call.gen_syndrome(nd + np, size, v);
62 	BUG_ON(np > 2);
63 }
64 
raid_rec(int nr,int * ir,int nd,int np,size_t size,void ** v)65 static void raid_rec(int nr, int *ir, int nd, int np, size_t size, void **v)
66 {
67 	switch (nr) {
68 	case 0:
69 		break;
70 	case 1:
71 		if (ir[0] < nd + 1)
72 			raid5_recov(nd + 1, ir[0], size, v);
73 		else
74 			raid6_call.gen_syndrome(nd + np, size, v);
75 		break;
76 	case 2:
77 		if (ir[1] < nd) {
78 			/* data+data failure. */
79 			raid6_2data_recov(nd + np, size, ir[0], ir[1], v);
80 		} else if (ir[0] < nd) {
81 			/* data + p/q failure */
82 
83 			if (ir[1] == nd) /* data + p failure */
84 				raid6_datap_recov(nd + np, size, ir[0], v);
85 			else { /* data + q failure */
86 				raid5_recov(nd + 1, ir[0], size, v);
87 				raid6_call.gen_syndrome(nd + np, size, v);
88 			}
89 		} else {
90 			raid_gen(nd, np, size, v);
91 		}
92 		break;
93 	default:
94 		BUG();
95 	}
96 }
97 
98 #else
99 
100 #include <raid/raid.h>
101 
102 #endif
103 
104 struct ec_bio {
105 	struct bch_dev		*ca;
106 	struct ec_stripe_buf	*buf;
107 	size_t			idx;
108 	u64			submit_time;
109 	struct bio		bio;
110 };
111 
112 /* Stripes btree keys: */
113 
bch2_stripe_validate(struct bch_fs * c,struct bkey_s_c k,struct bkey_validate_context from)114 int bch2_stripe_validate(struct bch_fs *c, struct bkey_s_c k,
115 			 struct bkey_validate_context from)
116 {
117 	const struct bch_stripe *s = bkey_s_c_to_stripe(k).v;
118 	int ret = 0;
119 
120 	bkey_fsck_err_on(bkey_eq(k.k->p, POS_MIN) ||
121 			 bpos_gt(k.k->p, POS(0, U32_MAX)),
122 			 c, stripe_pos_bad,
123 			 "stripe at bad pos");
124 
125 	bkey_fsck_err_on(bkey_val_u64s(k.k) < stripe_val_u64s(s),
126 			 c, stripe_val_size_bad,
127 			 "incorrect value size (%zu < %u)",
128 			 bkey_val_u64s(k.k), stripe_val_u64s(s));
129 
130 	bkey_fsck_err_on(s->csum_granularity_bits >= 64,
131 			 c, stripe_csum_granularity_bad,
132 			 "invalid csum granularity (%u >= 64)",
133 			 s->csum_granularity_bits);
134 
135 	ret = bch2_bkey_ptrs_validate(c, k, from);
136 fsck_err:
137 	return ret;
138 }
139 
bch2_stripe_to_text(struct printbuf * out,struct bch_fs * c,struct bkey_s_c k)140 void bch2_stripe_to_text(struct printbuf *out, struct bch_fs *c,
141 			 struct bkey_s_c k)
142 {
143 	const struct bch_stripe *sp = bkey_s_c_to_stripe(k).v;
144 	struct bch_stripe s = {};
145 
146 	memcpy(&s, sp, min(sizeof(s), bkey_val_bytes(k.k)));
147 
148 	unsigned nr_data = s.nr_blocks - s.nr_redundant;
149 
150 	prt_printf(out, "algo %u sectors %u blocks %u:%u csum ",
151 		   s.algorithm,
152 		   le16_to_cpu(s.sectors),
153 		   nr_data,
154 		   s.nr_redundant);
155 	bch2_prt_csum_type(out, s.csum_type);
156 	prt_str(out, " gran ");
157 	if (s.csum_granularity_bits < 64)
158 		prt_printf(out, "%llu", 1ULL << s.csum_granularity_bits);
159 	else
160 		prt_printf(out, "(invalid shift %u)", s.csum_granularity_bits);
161 
162 	if (s.disk_label) {
163 		prt_str(out, " label");
164 		bch2_disk_path_to_text(out, c, s.disk_label - 1);
165 	}
166 
167 	for (unsigned i = 0; i < s.nr_blocks; i++) {
168 		const struct bch_extent_ptr *ptr = sp->ptrs + i;
169 
170 		if ((void *) ptr >= bkey_val_end(k))
171 			break;
172 
173 		prt_char(out, ' ');
174 		bch2_extent_ptr_to_text(out, c, ptr);
175 
176 		if (s.csum_type < BCH_CSUM_NR &&
177 		    i < nr_data &&
178 		    stripe_blockcount_offset(&s, i) < bkey_val_bytes(k.k))
179 			prt_printf(out,  "#%u", stripe_blockcount_get(sp, i));
180 	}
181 }
182 
183 /* Triggers: */
184 
__mark_stripe_bucket(struct btree_trans * trans,struct bch_dev * ca,struct bkey_s_c_stripe s,unsigned ptr_idx,bool deleting,struct bpos bucket,struct bch_alloc_v4 * a,enum btree_iter_update_trigger_flags flags)185 static int __mark_stripe_bucket(struct btree_trans *trans,
186 				struct bch_dev *ca,
187 				struct bkey_s_c_stripe s,
188 				unsigned ptr_idx, bool deleting,
189 				struct bpos bucket,
190 				struct bch_alloc_v4 *a,
191 				enum btree_iter_update_trigger_flags flags)
192 {
193 	const struct bch_extent_ptr *ptr = s.v->ptrs + ptr_idx;
194 	unsigned nr_data = s.v->nr_blocks - s.v->nr_redundant;
195 	bool parity = ptr_idx >= nr_data;
196 	enum bch_data_type data_type = parity ? BCH_DATA_parity : BCH_DATA_stripe;
197 	s64 sectors = parity ? le16_to_cpu(s.v->sectors) : 0;
198 	struct printbuf buf = PRINTBUF;
199 	int ret = 0;
200 
201 	struct bch_fs *c = trans->c;
202 	if (deleting)
203 		sectors = -sectors;
204 
205 	if (!deleting) {
206 		if (bch2_trans_inconsistent_on(a->stripe ||
207 					       a->stripe_redundancy, trans,
208 				"bucket %llu:%llu gen %u data type %s dirty_sectors %u: multiple stripes using same bucket (%u, %llu)\n%s",
209 				bucket.inode, bucket.offset, a->gen,
210 				bch2_data_type_str(a->data_type),
211 				a->dirty_sectors,
212 				a->stripe, s.k->p.offset,
213 				(bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
214 			ret = -BCH_ERR_mark_stripe;
215 			goto err;
216 		}
217 
218 		if (bch2_trans_inconsistent_on(parity && bch2_bucket_sectors_total(*a), trans,
219 				"bucket %llu:%llu gen %u data type %s dirty_sectors %u cached_sectors %u: data already in parity bucket\n%s",
220 				bucket.inode, bucket.offset, a->gen,
221 				bch2_data_type_str(a->data_type),
222 				a->dirty_sectors,
223 				a->cached_sectors,
224 				(bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
225 			ret = -BCH_ERR_mark_stripe;
226 			goto err;
227 		}
228 	} else {
229 		if (bch2_trans_inconsistent_on(a->stripe != s.k->p.offset ||
230 					       a->stripe_redundancy != s.v->nr_redundant, trans,
231 				"bucket %llu:%llu gen %u: not marked as stripe when deleting stripe (got %u)\n%s",
232 				bucket.inode, bucket.offset, a->gen,
233 				a->stripe,
234 				(bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
235 			ret = -BCH_ERR_mark_stripe;
236 			goto err;
237 		}
238 
239 		if (bch2_trans_inconsistent_on(a->data_type != data_type, trans,
240 				"bucket %llu:%llu gen %u data type %s: wrong data type when stripe, should be %s\n%s",
241 				bucket.inode, bucket.offset, a->gen,
242 				bch2_data_type_str(a->data_type),
243 				bch2_data_type_str(data_type),
244 				(bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
245 			ret = -BCH_ERR_mark_stripe;
246 			goto err;
247 		}
248 
249 		if (bch2_trans_inconsistent_on(parity &&
250 					       (a->dirty_sectors != -sectors ||
251 						a->cached_sectors), trans,
252 				"bucket %llu:%llu gen %u dirty_sectors %u cached_sectors %u: wrong sectors when deleting parity block of stripe\n%s",
253 				bucket.inode, bucket.offset, a->gen,
254 				a->dirty_sectors,
255 				a->cached_sectors,
256 				(bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
257 			ret = -BCH_ERR_mark_stripe;
258 			goto err;
259 		}
260 	}
261 
262 	if (sectors) {
263 		ret = bch2_bucket_ref_update(trans, ca, s.s_c, ptr, sectors, data_type,
264 					     a->gen, a->data_type, &a->dirty_sectors);
265 		if (ret)
266 			goto err;
267 	}
268 
269 	if (!deleting) {
270 		a->stripe		= s.k->p.offset;
271 		a->stripe_redundancy	= s.v->nr_redundant;
272 		alloc_data_type_set(a, data_type);
273 	} else {
274 		a->stripe		= 0;
275 		a->stripe_redundancy	= 0;
276 		alloc_data_type_set(a, BCH_DATA_user);
277 	}
278 err:
279 	printbuf_exit(&buf);
280 	return ret;
281 }
282 
mark_stripe_bucket(struct btree_trans * trans,struct bkey_s_c_stripe s,unsigned ptr_idx,bool deleting,enum btree_iter_update_trigger_flags flags)283 static int mark_stripe_bucket(struct btree_trans *trans,
284 			      struct bkey_s_c_stripe s,
285 			      unsigned ptr_idx, bool deleting,
286 			      enum btree_iter_update_trigger_flags flags)
287 {
288 	struct bch_fs *c = trans->c;
289 	const struct bch_extent_ptr *ptr = s.v->ptrs + ptr_idx;
290 	struct printbuf buf = PRINTBUF;
291 	int ret = 0;
292 
293 	struct bch_dev *ca = bch2_dev_tryget(c, ptr->dev);
294 	if (unlikely(!ca)) {
295 		if (ptr->dev != BCH_SB_MEMBER_INVALID && !(flags & BTREE_TRIGGER_overwrite))
296 			ret = -BCH_ERR_mark_stripe;
297 		goto err;
298 	}
299 
300 	struct bpos bucket = PTR_BUCKET_POS(ca, ptr);
301 
302 	if (flags & BTREE_TRIGGER_transactional) {
303 		struct extent_ptr_decoded p = {
304 			.ptr = *ptr,
305 			.crc = bch2_extent_crc_unpack(s.k, NULL),
306 		};
307 		struct bkey_i_backpointer bp;
308 		bch2_extent_ptr_to_bp(c, BTREE_ID_stripes, 0, s.s_c, p,
309 				      (const union bch_extent_entry *) ptr, &bp);
310 
311 		struct bkey_i_alloc_v4 *a =
312 			bch2_trans_start_alloc_update(trans, bucket, 0);
313 		ret   = PTR_ERR_OR_ZERO(a) ?:
314 			__mark_stripe_bucket(trans, ca, s, ptr_idx, deleting, bucket, &a->v, flags) ?:
315 			bch2_bucket_backpointer_mod(trans, s.s_c, &bp,
316 						    !(flags & BTREE_TRIGGER_overwrite));
317 		if (ret)
318 			goto err;
319 	}
320 
321 	if (flags & BTREE_TRIGGER_gc) {
322 		struct bucket *g = gc_bucket(ca, bucket.offset);
323 		if (bch2_fs_inconsistent_on(!g, c, "reference to invalid bucket on device %u\n  %s",
324 					    ptr->dev,
325 					    (bch2_bkey_val_to_text(&buf, c, s.s_c), buf.buf))) {
326 			ret = -BCH_ERR_mark_stripe;
327 			goto err;
328 		}
329 
330 		bucket_lock(g);
331 		struct bch_alloc_v4 old = bucket_m_to_alloc(*g), new = old;
332 		ret = __mark_stripe_bucket(trans, ca, s, ptr_idx, deleting, bucket, &new, flags);
333 		alloc_to_bucket(g, new);
334 		bucket_unlock(g);
335 
336 		if (!ret)
337 			ret = bch2_alloc_key_to_dev_counters(trans, ca, &old, &new, flags);
338 	}
339 err:
340 	bch2_dev_put(ca);
341 	printbuf_exit(&buf);
342 	return ret;
343 }
344 
mark_stripe_buckets(struct btree_trans * trans,struct bkey_s_c old,struct bkey_s_c new,enum btree_iter_update_trigger_flags flags)345 static int mark_stripe_buckets(struct btree_trans *trans,
346 			       struct bkey_s_c old, struct bkey_s_c new,
347 			       enum btree_iter_update_trigger_flags flags)
348 {
349 	const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
350 		? bkey_s_c_to_stripe(old).v : NULL;
351 	const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
352 		? bkey_s_c_to_stripe(new).v : NULL;
353 
354 	BUG_ON(old_s && new_s && old_s->nr_blocks != new_s->nr_blocks);
355 
356 	unsigned nr_blocks = new_s ? new_s->nr_blocks : old_s->nr_blocks;
357 
358 	for (unsigned i = 0; i < nr_blocks; i++) {
359 		if (new_s && old_s &&
360 		    !memcmp(&new_s->ptrs[i],
361 			    &old_s->ptrs[i],
362 			    sizeof(new_s->ptrs[i])))
363 			continue;
364 
365 		if (new_s) {
366 			int ret = mark_stripe_bucket(trans,
367 					bkey_s_c_to_stripe(new), i, false, flags);
368 			if (ret)
369 				return ret;
370 		}
371 
372 		if (old_s) {
373 			int ret = mark_stripe_bucket(trans,
374 					bkey_s_c_to_stripe(old), i, true, flags);
375 			if (ret)
376 				return ret;
377 		}
378 	}
379 
380 	return 0;
381 }
382 
bch2_trigger_stripe(struct btree_trans * trans,enum btree_id btree,unsigned level,struct bkey_s_c old,struct bkey_s _new,enum btree_iter_update_trigger_flags flags)383 int bch2_trigger_stripe(struct btree_trans *trans,
384 			enum btree_id btree, unsigned level,
385 			struct bkey_s_c old, struct bkey_s _new,
386 			enum btree_iter_update_trigger_flags flags)
387 {
388 	struct bkey_s_c new = _new.s_c;
389 	struct bch_fs *c = trans->c;
390 	u64 idx = new.k->p.offset;
391 	const struct bch_stripe *old_s = old.k->type == KEY_TYPE_stripe
392 		? bkey_s_c_to_stripe(old).v : NULL;
393 	const struct bch_stripe *new_s = new.k->type == KEY_TYPE_stripe
394 		? bkey_s_c_to_stripe(new).v : NULL;
395 
396 	if (unlikely(flags & BTREE_TRIGGER_check_repair))
397 		return bch2_check_fix_ptrs(trans, btree, level, _new.s_c, flags);
398 
399 	BUG_ON(new_s && old_s &&
400 	       (new_s->nr_blocks	!= old_s->nr_blocks ||
401 		new_s->nr_redundant	!= old_s->nr_redundant));
402 
403 	if (flags & BTREE_TRIGGER_transactional) {
404 		int ret = bch2_lru_change(trans,
405 					  BCH_LRU_STRIPE_FRAGMENTATION,
406 					  idx,
407 					  stripe_lru_pos(old_s),
408 					  stripe_lru_pos(new_s));
409 		if (ret)
410 			return ret;
411 	}
412 
413 	if (flags & (BTREE_TRIGGER_transactional|BTREE_TRIGGER_gc)) {
414 		/*
415 		 * If the pointers aren't changing, we don't need to do anything:
416 		 */
417 		if (new_s && old_s &&
418 		    new_s->nr_blocks	== old_s->nr_blocks &&
419 		    new_s->nr_redundant	== old_s->nr_redundant &&
420 		    !memcmp(old_s->ptrs, new_s->ptrs,
421 			    new_s->nr_blocks * sizeof(struct bch_extent_ptr)))
422 			return 0;
423 
424 		struct gc_stripe *gc = NULL;
425 		if (flags & BTREE_TRIGGER_gc) {
426 			gc = genradix_ptr_alloc(&c->gc_stripes, idx, GFP_KERNEL);
427 			if (!gc) {
428 				bch_err(c, "error allocating memory for gc_stripes, idx %llu", idx);
429 				return -BCH_ERR_ENOMEM_mark_stripe;
430 			}
431 
432 			/*
433 			 * This will be wrong when we bring back runtime gc: we should
434 			 * be unmarking the old key and then marking the new key
435 			 *
436 			 * Also: when we bring back runtime gc, locking
437 			 */
438 			gc->alive	= true;
439 			gc->sectors	= le16_to_cpu(new_s->sectors);
440 			gc->nr_blocks	= new_s->nr_blocks;
441 			gc->nr_redundant	= new_s->nr_redundant;
442 
443 			for (unsigned i = 0; i < new_s->nr_blocks; i++)
444 				gc->ptrs[i] = new_s->ptrs[i];
445 
446 			/*
447 			 * gc recalculates this field from stripe ptr
448 			 * references:
449 			 */
450 			memset(gc->block_sectors, 0, sizeof(gc->block_sectors));
451 		}
452 
453 		if (new_s) {
454 			s64 sectors = (u64) le16_to_cpu(new_s->sectors) * new_s->nr_redundant;
455 
456 			struct disk_accounting_pos acc = {
457 				.type = BCH_DISK_ACCOUNTING_replicas,
458 			};
459 			bch2_bkey_to_replicas(&acc.replicas, new);
460 			int ret = bch2_disk_accounting_mod(trans, &acc, &sectors, 1, gc);
461 			if (ret)
462 				return ret;
463 
464 			if (gc)
465 				memcpy(&gc->r.e, &acc.replicas, replicas_entry_bytes(&acc.replicas));
466 		}
467 
468 		if (old_s) {
469 			s64 sectors = -((s64) le16_to_cpu(old_s->sectors)) * old_s->nr_redundant;
470 
471 			struct disk_accounting_pos acc = {
472 				.type = BCH_DISK_ACCOUNTING_replicas,
473 			};
474 			bch2_bkey_to_replicas(&acc.replicas, old);
475 			int ret = bch2_disk_accounting_mod(trans, &acc, &sectors, 1, gc);
476 			if (ret)
477 				return ret;
478 		}
479 
480 		int ret = mark_stripe_buckets(trans, old, new, flags);
481 		if (ret)
482 			return ret;
483 	}
484 
485 	return 0;
486 }
487 
488 /* returns blocknr in stripe that we matched: */
bkey_matches_stripe(struct bch_stripe * s,struct bkey_s_c k,unsigned * block)489 static const struct bch_extent_ptr *bkey_matches_stripe(struct bch_stripe *s,
490 						struct bkey_s_c k, unsigned *block)
491 {
492 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
493 	unsigned i, nr_data = s->nr_blocks - s->nr_redundant;
494 
495 	bkey_for_each_ptr(ptrs, ptr)
496 		for (i = 0; i < nr_data; i++)
497 			if (__bch2_ptr_matches_stripe(&s->ptrs[i], ptr,
498 						      le16_to_cpu(s->sectors))) {
499 				*block = i;
500 				return ptr;
501 			}
502 
503 	return NULL;
504 }
505 
extent_has_stripe_ptr(struct bkey_s_c k,u64 idx)506 static bool extent_has_stripe_ptr(struct bkey_s_c k, u64 idx)
507 {
508 	switch (k.k->type) {
509 	case KEY_TYPE_extent: {
510 		struct bkey_s_c_extent e = bkey_s_c_to_extent(k);
511 		const union bch_extent_entry *entry;
512 
513 		extent_for_each_entry(e, entry)
514 			if (extent_entry_type(entry) ==
515 			    BCH_EXTENT_ENTRY_stripe_ptr &&
516 			    entry->stripe_ptr.idx == idx)
517 				return true;
518 
519 		break;
520 	}
521 	}
522 
523 	return false;
524 }
525 
526 /* Stripe bufs: */
527 
ec_stripe_buf_exit(struct ec_stripe_buf * buf)528 static void ec_stripe_buf_exit(struct ec_stripe_buf *buf)
529 {
530 	if (buf->key.k.type == KEY_TYPE_stripe) {
531 		struct bkey_i_stripe *s = bkey_i_to_stripe(&buf->key);
532 		unsigned i;
533 
534 		for (i = 0; i < s->v.nr_blocks; i++) {
535 			kvfree(buf->data[i]);
536 			buf->data[i] = NULL;
537 		}
538 	}
539 }
540 
541 /* XXX: this is a non-mempoolified memory allocation: */
ec_stripe_buf_init(struct ec_stripe_buf * buf,unsigned offset,unsigned size)542 static int ec_stripe_buf_init(struct ec_stripe_buf *buf,
543 			      unsigned offset, unsigned size)
544 {
545 	struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
546 	unsigned csum_granularity = 1U << v->csum_granularity_bits;
547 	unsigned end = offset + size;
548 	unsigned i;
549 
550 	BUG_ON(end > le16_to_cpu(v->sectors));
551 
552 	offset	= round_down(offset, csum_granularity);
553 	end	= min_t(unsigned, le16_to_cpu(v->sectors),
554 			round_up(end, csum_granularity));
555 
556 	buf->offset	= offset;
557 	buf->size	= end - offset;
558 
559 	memset(buf->valid, 0xFF, sizeof(buf->valid));
560 
561 	for (i = 0; i < v->nr_blocks; i++) {
562 		buf->data[i] = kvmalloc(buf->size << 9, GFP_KERNEL);
563 		if (!buf->data[i])
564 			goto err;
565 	}
566 
567 	return 0;
568 err:
569 	ec_stripe_buf_exit(buf);
570 	return -BCH_ERR_ENOMEM_stripe_buf;
571 }
572 
573 /* Checksumming: */
574 
ec_block_checksum(struct ec_stripe_buf * buf,unsigned block,unsigned offset)575 static struct bch_csum ec_block_checksum(struct ec_stripe_buf *buf,
576 					 unsigned block, unsigned offset)
577 {
578 	struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
579 	unsigned csum_granularity = 1 << v->csum_granularity_bits;
580 	unsigned end = buf->offset + buf->size;
581 	unsigned len = min(csum_granularity, end - offset);
582 
583 	BUG_ON(offset >= end);
584 	BUG_ON(offset <  buf->offset);
585 	BUG_ON(offset & (csum_granularity - 1));
586 	BUG_ON(offset + len != le16_to_cpu(v->sectors) &&
587 	       (len & (csum_granularity - 1)));
588 
589 	return bch2_checksum(NULL, v->csum_type,
590 			     null_nonce(),
591 			     buf->data[block] + ((offset - buf->offset) << 9),
592 			     len << 9);
593 }
594 
ec_generate_checksums(struct ec_stripe_buf * buf)595 static void ec_generate_checksums(struct ec_stripe_buf *buf)
596 {
597 	struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
598 	unsigned i, j, csums_per_device = stripe_csums_per_device(v);
599 
600 	if (!v->csum_type)
601 		return;
602 
603 	BUG_ON(buf->offset);
604 	BUG_ON(buf->size != le16_to_cpu(v->sectors));
605 
606 	for (i = 0; i < v->nr_blocks; i++)
607 		for (j = 0; j < csums_per_device; j++)
608 			stripe_csum_set(v, i, j,
609 				ec_block_checksum(buf, i, j << v->csum_granularity_bits));
610 }
611 
ec_validate_checksums(struct bch_fs * c,struct ec_stripe_buf * buf)612 static void ec_validate_checksums(struct bch_fs *c, struct ec_stripe_buf *buf)
613 {
614 	struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
615 	unsigned csum_granularity = 1 << v->csum_granularity_bits;
616 	unsigned i;
617 
618 	if (!v->csum_type)
619 		return;
620 
621 	for (i = 0; i < v->nr_blocks; i++) {
622 		unsigned offset = buf->offset;
623 		unsigned end = buf->offset + buf->size;
624 
625 		if (!test_bit(i, buf->valid))
626 			continue;
627 
628 		while (offset < end) {
629 			unsigned j = offset >> v->csum_granularity_bits;
630 			unsigned len = min(csum_granularity, end - offset);
631 			struct bch_csum want = stripe_csum_get(v, i, j);
632 			struct bch_csum got = ec_block_checksum(buf, i, offset);
633 
634 			if (bch2_crc_cmp(want, got)) {
635 				struct bch_dev *ca = bch2_dev_tryget(c, v->ptrs[i].dev);
636 				if (ca) {
637 					struct printbuf err = PRINTBUF;
638 
639 					prt_str(&err, "stripe ");
640 					bch2_csum_err_msg(&err, v->csum_type, want, got);
641 					prt_printf(&err, "  for %ps at %u of\n  ", (void *) _RET_IP_, i);
642 					bch2_bkey_val_to_text(&err, c, bkey_i_to_s_c(&buf->key));
643 					bch_err_ratelimited(ca, "%s", err.buf);
644 					printbuf_exit(&err);
645 
646 					bch2_io_error(ca, BCH_MEMBER_ERROR_checksum);
647 				}
648 
649 				clear_bit(i, buf->valid);
650 				break;
651 			}
652 
653 			offset += len;
654 		}
655 	}
656 }
657 
658 /* Erasure coding: */
659 
ec_generate_ec(struct ec_stripe_buf * buf)660 static void ec_generate_ec(struct ec_stripe_buf *buf)
661 {
662 	struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
663 	unsigned nr_data = v->nr_blocks - v->nr_redundant;
664 	unsigned bytes = le16_to_cpu(v->sectors) << 9;
665 
666 	raid_gen(nr_data, v->nr_redundant, bytes, buf->data);
667 }
668 
ec_nr_failed(struct ec_stripe_buf * buf)669 static unsigned ec_nr_failed(struct ec_stripe_buf *buf)
670 {
671 	struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
672 
673 	return v->nr_blocks - bitmap_weight(buf->valid, v->nr_blocks);
674 }
675 
ec_do_recov(struct bch_fs * c,struct ec_stripe_buf * buf)676 static int ec_do_recov(struct bch_fs *c, struct ec_stripe_buf *buf)
677 {
678 	struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
679 	unsigned i, failed[BCH_BKEY_PTRS_MAX], nr_failed = 0;
680 	unsigned nr_data = v->nr_blocks - v->nr_redundant;
681 	unsigned bytes = buf->size << 9;
682 
683 	if (ec_nr_failed(buf) > v->nr_redundant) {
684 		bch_err_ratelimited(c,
685 			"error doing reconstruct read: unable to read enough blocks");
686 		return -1;
687 	}
688 
689 	for (i = 0; i < nr_data; i++)
690 		if (!test_bit(i, buf->valid))
691 			failed[nr_failed++] = i;
692 
693 	raid_rec(nr_failed, failed, nr_data, v->nr_redundant, bytes, buf->data);
694 	return 0;
695 }
696 
697 /* IO: */
698 
ec_block_endio(struct bio * bio)699 static void ec_block_endio(struct bio *bio)
700 {
701 	struct ec_bio *ec_bio = container_of(bio, struct ec_bio, bio);
702 	struct bch_stripe *v = &bkey_i_to_stripe(&ec_bio->buf->key)->v;
703 	struct bch_extent_ptr *ptr = &v->ptrs[ec_bio->idx];
704 	struct bch_dev *ca = ec_bio->ca;
705 	struct closure *cl = bio->bi_private;
706 
707 	bch2_account_io_completion(ca, bio_data_dir(bio),
708 				   ec_bio->submit_time, !bio->bi_status);
709 
710 	if (bio->bi_status) {
711 		bch_err_dev_ratelimited(ca, "erasure coding %s error: %s",
712 			       str_write_read(bio_data_dir(bio)),
713 			       bch2_blk_status_to_str(bio->bi_status));
714 		clear_bit(ec_bio->idx, ec_bio->buf->valid);
715 	}
716 
717 	int stale = dev_ptr_stale(ca, ptr);
718 	if (stale) {
719 		bch_err_ratelimited(ca->fs,
720 				    "error %s stripe: stale/invalid pointer (%i) after io",
721 				    bio_data_dir(bio) == READ ? "reading from" : "writing to",
722 				    stale);
723 		clear_bit(ec_bio->idx, ec_bio->buf->valid);
724 	}
725 
726 	bio_put(&ec_bio->bio);
727 	percpu_ref_put(&ca->io_ref);
728 	closure_put(cl);
729 }
730 
ec_block_io(struct bch_fs * c,struct ec_stripe_buf * buf,blk_opf_t opf,unsigned idx,struct closure * cl)731 static void ec_block_io(struct bch_fs *c, struct ec_stripe_buf *buf,
732 			blk_opf_t opf, unsigned idx, struct closure *cl)
733 {
734 	struct bch_stripe *v = &bkey_i_to_stripe(&buf->key)->v;
735 	unsigned offset = 0, bytes = buf->size << 9;
736 	struct bch_extent_ptr *ptr = &v->ptrs[idx];
737 	enum bch_data_type data_type = idx < v->nr_blocks - v->nr_redundant
738 		? BCH_DATA_user
739 		: BCH_DATA_parity;
740 	int rw = op_is_write(opf);
741 
742 	struct bch_dev *ca = bch2_dev_get_ioref(c, ptr->dev, rw);
743 	if (!ca) {
744 		clear_bit(idx, buf->valid);
745 		return;
746 	}
747 
748 	int stale = dev_ptr_stale(ca, ptr);
749 	if (stale) {
750 		bch_err_ratelimited(c,
751 				    "error %s stripe: stale pointer (%i)",
752 				    rw == READ ? "reading from" : "writing to",
753 				    stale);
754 		clear_bit(idx, buf->valid);
755 		return;
756 	}
757 
758 
759 	this_cpu_add(ca->io_done->sectors[rw][data_type], buf->size);
760 
761 	while (offset < bytes) {
762 		unsigned nr_iovecs = min_t(size_t, BIO_MAX_VECS,
763 					   DIV_ROUND_UP(bytes, PAGE_SIZE));
764 		unsigned b = min_t(size_t, bytes - offset,
765 				   nr_iovecs << PAGE_SHIFT);
766 		struct ec_bio *ec_bio;
767 
768 		ec_bio = container_of(bio_alloc_bioset(ca->disk_sb.bdev,
769 						       nr_iovecs,
770 						       opf,
771 						       GFP_KERNEL,
772 						       &c->ec_bioset),
773 				      struct ec_bio, bio);
774 
775 		ec_bio->ca			= ca;
776 		ec_bio->buf			= buf;
777 		ec_bio->idx			= idx;
778 		ec_bio->submit_time		= local_clock();
779 
780 		ec_bio->bio.bi_iter.bi_sector	= ptr->offset + buf->offset + (offset >> 9);
781 		ec_bio->bio.bi_end_io		= ec_block_endio;
782 		ec_bio->bio.bi_private		= cl;
783 
784 		bch2_bio_map(&ec_bio->bio, buf->data[idx] + offset, b);
785 
786 		closure_get(cl);
787 		percpu_ref_get(&ca->io_ref);
788 
789 		submit_bio(&ec_bio->bio);
790 
791 		offset += b;
792 	}
793 
794 	percpu_ref_put(&ca->io_ref);
795 }
796 
get_stripe_key_trans(struct btree_trans * trans,u64 idx,struct ec_stripe_buf * stripe)797 static int get_stripe_key_trans(struct btree_trans *trans, u64 idx,
798 				struct ec_stripe_buf *stripe)
799 {
800 	struct btree_iter iter;
801 	struct bkey_s_c k;
802 	int ret;
803 
804 	k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes,
805 			       POS(0, idx), BTREE_ITER_slots);
806 	ret = bkey_err(k);
807 	if (ret)
808 		goto err;
809 	if (k.k->type != KEY_TYPE_stripe) {
810 		ret = -ENOENT;
811 		goto err;
812 	}
813 	bkey_reassemble(&stripe->key, k);
814 err:
815 	bch2_trans_iter_exit(trans, &iter);
816 	return ret;
817 }
818 
819 /* recovery read path: */
bch2_ec_read_extent(struct btree_trans * trans,struct bch_read_bio * rbio,struct bkey_s_c orig_k)820 int bch2_ec_read_extent(struct btree_trans *trans, struct bch_read_bio *rbio,
821 			struct bkey_s_c orig_k)
822 {
823 	struct bch_fs *c = trans->c;
824 	struct ec_stripe_buf *buf = NULL;
825 	struct closure cl;
826 	struct bch_stripe *v;
827 	unsigned i, offset;
828 	const char *msg = NULL;
829 	struct printbuf msgbuf = PRINTBUF;
830 	int ret = 0;
831 
832 	closure_init_stack(&cl);
833 
834 	BUG_ON(!rbio->pick.has_ec);
835 
836 	buf = kzalloc(sizeof(*buf), GFP_NOFS);
837 	if (!buf)
838 		return -BCH_ERR_ENOMEM_ec_read_extent;
839 
840 	ret = lockrestart_do(trans, get_stripe_key_trans(trans, rbio->pick.ec.idx, buf));
841 	if (ret) {
842 		msg = "stripe not found";
843 		goto err;
844 	}
845 
846 	v = &bkey_i_to_stripe(&buf->key)->v;
847 
848 	if (!bch2_ptr_matches_stripe(v, rbio->pick)) {
849 		msg = "pointer doesn't match stripe";
850 		goto err;
851 	}
852 
853 	offset = rbio->bio.bi_iter.bi_sector - v->ptrs[rbio->pick.ec.block].offset;
854 	if (offset + bio_sectors(&rbio->bio) > le16_to_cpu(v->sectors)) {
855 		msg = "read is bigger than stripe";
856 		goto err;
857 	}
858 
859 	ret = ec_stripe_buf_init(buf, offset, bio_sectors(&rbio->bio));
860 	if (ret) {
861 		msg = "-ENOMEM";
862 		goto err;
863 	}
864 
865 	for (i = 0; i < v->nr_blocks; i++)
866 		ec_block_io(c, buf, REQ_OP_READ, i, &cl);
867 
868 	closure_sync(&cl);
869 
870 	if (ec_nr_failed(buf) > v->nr_redundant) {
871 		msg = "unable to read enough blocks";
872 		goto err;
873 	}
874 
875 	ec_validate_checksums(c, buf);
876 
877 	ret = ec_do_recov(c, buf);
878 	if (ret)
879 		goto err;
880 
881 	memcpy_to_bio(&rbio->bio, rbio->bio.bi_iter,
882 		      buf->data[rbio->pick.ec.block] + ((offset - buf->offset) << 9));
883 out:
884 	ec_stripe_buf_exit(buf);
885 	kfree(buf);
886 	return ret;
887 err:
888 	bch2_bkey_val_to_text(&msgbuf, c, orig_k);
889 	bch_err_ratelimited(c,
890 			    "error doing reconstruct read: %s\n  %s", msg, msgbuf.buf);
891 	printbuf_exit(&msgbuf);
892 	ret = -BCH_ERR_stripe_reconstruct;
893 	goto out;
894 }
895 
896 /* stripe bucket accounting: */
897 
__ec_stripe_mem_alloc(struct bch_fs * c,size_t idx,gfp_t gfp)898 static int __ec_stripe_mem_alloc(struct bch_fs *c, size_t idx, gfp_t gfp)
899 {
900 	if (c->gc_pos.phase != GC_PHASE_not_running &&
901 	    !genradix_ptr_alloc(&c->gc_stripes, idx, gfp))
902 		return -BCH_ERR_ENOMEM_ec_stripe_mem_alloc;
903 
904 	return 0;
905 }
906 
ec_stripe_mem_alloc(struct btree_trans * trans,struct btree_iter * iter)907 static int ec_stripe_mem_alloc(struct btree_trans *trans,
908 			       struct btree_iter *iter)
909 {
910 	return allocate_dropping_locks_errcode(trans,
911 			__ec_stripe_mem_alloc(trans->c, iter->pos.offset, _gfp));
912 }
913 
914 /*
915  * Hash table of open stripes:
916  * Stripes that are being created or modified are kept in a hash table, so that
917  * stripe deletion can skip them.
918  */
919 
__bch2_stripe_is_open(struct bch_fs * c,u64 idx)920 static bool __bch2_stripe_is_open(struct bch_fs *c, u64 idx)
921 {
922 	unsigned hash = hash_64(idx, ilog2(ARRAY_SIZE(c->ec_stripes_new)));
923 	struct ec_stripe_new *s;
924 
925 	hlist_for_each_entry(s, &c->ec_stripes_new[hash], hash)
926 		if (s->idx == idx)
927 			return true;
928 	return false;
929 }
930 
bch2_stripe_is_open(struct bch_fs * c,u64 idx)931 static bool bch2_stripe_is_open(struct bch_fs *c, u64 idx)
932 {
933 	bool ret = false;
934 
935 	spin_lock(&c->ec_stripes_new_lock);
936 	ret = __bch2_stripe_is_open(c, idx);
937 	spin_unlock(&c->ec_stripes_new_lock);
938 
939 	return ret;
940 }
941 
bch2_try_open_stripe(struct bch_fs * c,struct ec_stripe_new * s,u64 idx)942 static bool bch2_try_open_stripe(struct bch_fs *c,
943 				 struct ec_stripe_new *s,
944 				 u64 idx)
945 {
946 	bool ret;
947 
948 	spin_lock(&c->ec_stripes_new_lock);
949 	ret = !__bch2_stripe_is_open(c, idx);
950 	if (ret) {
951 		unsigned hash = hash_64(idx, ilog2(ARRAY_SIZE(c->ec_stripes_new)));
952 
953 		s->idx = idx;
954 		hlist_add_head(&s->hash, &c->ec_stripes_new[hash]);
955 	}
956 	spin_unlock(&c->ec_stripes_new_lock);
957 
958 	return ret;
959 }
960 
bch2_stripe_close(struct bch_fs * c,struct ec_stripe_new * s)961 static void bch2_stripe_close(struct bch_fs *c, struct ec_stripe_new *s)
962 {
963 	BUG_ON(!s->idx);
964 
965 	spin_lock(&c->ec_stripes_new_lock);
966 	hlist_del_init(&s->hash);
967 	spin_unlock(&c->ec_stripes_new_lock);
968 
969 	s->idx = 0;
970 }
971 
972 /* stripe deletion */
973 
ec_stripe_delete(struct btree_trans * trans,u64 idx)974 static int ec_stripe_delete(struct btree_trans *trans, u64 idx)
975 {
976 	struct btree_iter iter;
977 	struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter,
978 					       BTREE_ID_stripes, POS(0, idx),
979 					       BTREE_ITER_intent);
980 	int ret = bkey_err(k);
981 	if (ret)
982 		goto err;
983 
984 	/*
985 	 * We expect write buffer races here
986 	 * Important: check stripe_is_open with stripe key locked:
987 	 */
988 	if (k.k->type == KEY_TYPE_stripe &&
989 	    !bch2_stripe_is_open(trans->c, idx) &&
990 	    stripe_lru_pos(bkey_s_c_to_stripe(k).v) == 1)
991 		ret = bch2_btree_delete_at(trans, &iter, 0);
992 err:
993 	bch2_trans_iter_exit(trans, &iter);
994 	return ret;
995 }
996 
997 /*
998  * XXX
999  * can we kill this and delete stripes from the trigger?
1000  */
ec_stripe_delete_work(struct work_struct * work)1001 static void ec_stripe_delete_work(struct work_struct *work)
1002 {
1003 	struct bch_fs *c =
1004 		container_of(work, struct bch_fs, ec_stripe_delete_work);
1005 
1006 	bch2_trans_run(c,
1007 		bch2_btree_write_buffer_tryflush(trans) ?:
1008 		for_each_btree_key_max_commit(trans, lru_iter, BTREE_ID_lru,
1009 				lru_pos(BCH_LRU_STRIPE_FRAGMENTATION, 1, 0),
1010 				lru_pos(BCH_LRU_STRIPE_FRAGMENTATION, 1, LRU_TIME_MAX),
1011 				0, lru_k,
1012 				NULL, NULL,
1013 				BCH_TRANS_COMMIT_no_enospc, ({
1014 			ec_stripe_delete(trans, lru_k.k->p.offset);
1015 		})));
1016 	bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete);
1017 }
1018 
bch2_do_stripe_deletes(struct bch_fs * c)1019 void bch2_do_stripe_deletes(struct bch_fs *c)
1020 {
1021 	if (bch2_write_ref_tryget(c, BCH_WRITE_REF_stripe_delete) &&
1022 	    !queue_work(c->write_ref_wq, &c->ec_stripe_delete_work))
1023 		bch2_write_ref_put(c, BCH_WRITE_REF_stripe_delete);
1024 }
1025 
1026 /* stripe creation: */
1027 
ec_stripe_key_update(struct btree_trans * trans,struct bkey_i_stripe * old,struct bkey_i_stripe * new)1028 static int ec_stripe_key_update(struct btree_trans *trans,
1029 				struct bkey_i_stripe *old,
1030 				struct bkey_i_stripe *new)
1031 {
1032 	struct bch_fs *c = trans->c;
1033 	bool create = !old;
1034 
1035 	struct btree_iter iter;
1036 	struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_stripes,
1037 					       new->k.p, BTREE_ITER_intent);
1038 	int ret = bkey_err(k);
1039 	if (ret)
1040 		goto err;
1041 
1042 	if (bch2_fs_inconsistent_on(k.k->type != (create ? KEY_TYPE_deleted : KEY_TYPE_stripe),
1043 				    c, "error %s stripe: got existing key type %s",
1044 				    create ? "creating" : "updating",
1045 				    bch2_bkey_types[k.k->type])) {
1046 		ret = -EINVAL;
1047 		goto err;
1048 	}
1049 
1050 	if (k.k->type == KEY_TYPE_stripe) {
1051 		const struct bch_stripe *v = bkey_s_c_to_stripe(k).v;
1052 
1053 		BUG_ON(old->v.nr_blocks != new->v.nr_blocks);
1054 		BUG_ON(old->v.nr_blocks != v->nr_blocks);
1055 
1056 		for (unsigned i = 0; i < new->v.nr_blocks; i++) {
1057 			unsigned sectors = stripe_blockcount_get(v, i);
1058 
1059 			if (!bch2_extent_ptr_eq(old->v.ptrs[i], new->v.ptrs[i]) && sectors) {
1060 				struct printbuf buf = PRINTBUF;
1061 
1062 				prt_printf(&buf, "stripe changed nonempty block %u", i);
1063 				prt_str(&buf, "\nold: ");
1064 				bch2_bkey_val_to_text(&buf, c, k);
1065 				prt_str(&buf, "\nnew: ");
1066 				bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&new->k_i));
1067 				bch2_fs_inconsistent(c, "%s", buf.buf);
1068 				printbuf_exit(&buf);
1069 				ret = -EINVAL;
1070 				goto err;
1071 			}
1072 
1073 			/*
1074 			 * If the stripe ptr changed underneath us, it must have
1075 			 * been dev_remove_stripes() -> * invalidate_stripe_to_dev()
1076 			 */
1077 			if (!bch2_extent_ptr_eq(old->v.ptrs[i], v->ptrs[i])) {
1078 				BUG_ON(v->ptrs[i].dev != BCH_SB_MEMBER_INVALID);
1079 
1080 				if (bch2_extent_ptr_eq(old->v.ptrs[i], new->v.ptrs[i]))
1081 					new->v.ptrs[i].dev = BCH_SB_MEMBER_INVALID;
1082 			}
1083 
1084 			stripe_blockcount_set(&new->v, i, sectors);
1085 		}
1086 	}
1087 
1088 	ret = bch2_trans_update(trans, &iter, &new->k_i, 0);
1089 err:
1090 	bch2_trans_iter_exit(trans, &iter);
1091 	return ret;
1092 }
1093 
ec_stripe_update_extent(struct btree_trans * trans,struct bch_dev * ca,struct bpos bucket,u8 gen,struct ec_stripe_buf * s,struct bkey_s_c_backpointer bp,struct bkey_buf * last_flushed)1094 static int ec_stripe_update_extent(struct btree_trans *trans,
1095 				   struct bch_dev *ca,
1096 				   struct bpos bucket, u8 gen,
1097 				   struct ec_stripe_buf *s,
1098 				   struct bkey_s_c_backpointer bp,
1099 				   struct bkey_buf *last_flushed)
1100 {
1101 	struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v;
1102 	struct bch_fs *c = trans->c;
1103 	struct btree_iter iter;
1104 	struct bkey_s_c k;
1105 	const struct bch_extent_ptr *ptr_c;
1106 	struct bch_extent_ptr *ec_ptr = NULL;
1107 	struct bch_extent_stripe_ptr stripe_ptr;
1108 	struct bkey_i *n;
1109 	int ret, dev, block;
1110 
1111 	if (bp.v->level) {
1112 		struct printbuf buf = PRINTBUF;
1113 		struct btree_iter node_iter;
1114 		struct btree *b;
1115 
1116 		b = bch2_backpointer_get_node(trans, bp, &node_iter, last_flushed);
1117 		bch2_trans_iter_exit(trans, &node_iter);
1118 
1119 		if (!b)
1120 			return 0;
1121 
1122 		prt_printf(&buf, "found btree node in erasure coded bucket: b=%px\n", b);
1123 		bch2_bkey_val_to_text(&buf, c, bp.s_c);
1124 
1125 		bch2_fs_inconsistent(c, "%s", buf.buf);
1126 		printbuf_exit(&buf);
1127 		return -BCH_ERR_erasure_coding_found_btree_node;
1128 	}
1129 
1130 	k = bch2_backpointer_get_key(trans, bp, &iter, BTREE_ITER_intent, last_flushed);
1131 	ret = bkey_err(k);
1132 	if (ret)
1133 		return ret;
1134 	if (!k.k) {
1135 		/*
1136 		 * extent no longer exists - we could flush the btree
1137 		 * write buffer and retry to verify, but no need:
1138 		 */
1139 		return 0;
1140 	}
1141 
1142 	if (extent_has_stripe_ptr(k, s->key.k.p.offset))
1143 		goto out;
1144 
1145 	ptr_c = bkey_matches_stripe(v, k, &block);
1146 	/*
1147 	 * It doesn't generally make sense to erasure code cached ptrs:
1148 	 * XXX: should we be incrementing a counter?
1149 	 */
1150 	if (!ptr_c || ptr_c->cached)
1151 		goto out;
1152 
1153 	dev = v->ptrs[block].dev;
1154 
1155 	n = bch2_trans_kmalloc(trans, bkey_bytes(k.k) + sizeof(stripe_ptr));
1156 	ret = PTR_ERR_OR_ZERO(n);
1157 	if (ret)
1158 		goto out;
1159 
1160 	bkey_reassemble(n, k);
1161 
1162 	bch2_bkey_drop_ptrs_noerror(bkey_i_to_s(n), ptr, ptr->dev != dev);
1163 	ec_ptr = bch2_bkey_has_device(bkey_i_to_s(n), dev);
1164 	BUG_ON(!ec_ptr);
1165 
1166 	stripe_ptr = (struct bch_extent_stripe_ptr) {
1167 		.type = 1 << BCH_EXTENT_ENTRY_stripe_ptr,
1168 		.block		= block,
1169 		.redundancy	= v->nr_redundant,
1170 		.idx		= s->key.k.p.offset,
1171 	};
1172 
1173 	__extent_entry_insert(n,
1174 			(union bch_extent_entry *) ec_ptr,
1175 			(union bch_extent_entry *) &stripe_ptr);
1176 
1177 	ret = bch2_trans_update(trans, &iter, n, 0);
1178 out:
1179 	bch2_trans_iter_exit(trans, &iter);
1180 	return ret;
1181 }
1182 
ec_stripe_update_bucket(struct btree_trans * trans,struct ec_stripe_buf * s,unsigned block)1183 static int ec_stripe_update_bucket(struct btree_trans *trans, struct ec_stripe_buf *s,
1184 				   unsigned block)
1185 {
1186 	struct bch_fs *c = trans->c;
1187 	struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v;
1188 	struct bch_extent_ptr ptr = v->ptrs[block];
1189 	int ret = 0;
1190 
1191 	struct bch_dev *ca = bch2_dev_tryget(c, ptr.dev);
1192 	if (!ca)
1193 		return -BCH_ERR_ENOENT_dev_not_found;
1194 
1195 	struct bpos bucket_pos = PTR_BUCKET_POS(ca, &ptr);
1196 
1197 	struct bkey_buf last_flushed;
1198 	bch2_bkey_buf_init(&last_flushed);
1199 	bkey_init(&last_flushed.k->k);
1200 
1201 	ret = for_each_btree_key_max_commit(trans, bp_iter, BTREE_ID_backpointers,
1202 			bucket_pos_to_bp_start(ca, bucket_pos),
1203 			bucket_pos_to_bp_end(ca, bucket_pos), 0, bp_k,
1204 			NULL, NULL,
1205 			BCH_TRANS_COMMIT_no_check_rw|
1206 			BCH_TRANS_COMMIT_no_enospc, ({
1207 		if (bkey_ge(bp_k.k->p, bucket_pos_to_bp(ca, bpos_nosnap_successor(bucket_pos), 0)))
1208 			break;
1209 
1210 		if (bp_k.k->type != KEY_TYPE_backpointer)
1211 			continue;
1212 
1213 		struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(bp_k);
1214 		if (bp.v->btree_id == BTREE_ID_stripes)
1215 			continue;
1216 
1217 		ec_stripe_update_extent(trans, ca, bucket_pos, ptr.gen, s,
1218 					bp, &last_flushed);
1219 	}));
1220 
1221 	bch2_bkey_buf_exit(&last_flushed, c);
1222 	bch2_dev_put(ca);
1223 	return ret;
1224 }
1225 
ec_stripe_update_extents(struct bch_fs * c,struct ec_stripe_buf * s)1226 static int ec_stripe_update_extents(struct bch_fs *c, struct ec_stripe_buf *s)
1227 {
1228 	struct btree_trans *trans = bch2_trans_get(c);
1229 	struct bch_stripe *v = &bkey_i_to_stripe(&s->key)->v;
1230 	unsigned nr_data = v->nr_blocks - v->nr_redundant;
1231 
1232 	int ret = bch2_btree_write_buffer_flush_sync(trans);
1233 	if (ret)
1234 		goto err;
1235 
1236 	for (unsigned i = 0; i < nr_data; i++) {
1237 		ret = ec_stripe_update_bucket(trans, s, i);
1238 		if (ret)
1239 			break;
1240 	}
1241 err:
1242 	bch2_trans_put(trans);
1243 	return ret;
1244 }
1245 
zero_out_rest_of_ec_bucket(struct bch_fs * c,struct ec_stripe_new * s,unsigned block,struct open_bucket * ob)1246 static void zero_out_rest_of_ec_bucket(struct bch_fs *c,
1247 				       struct ec_stripe_new *s,
1248 				       unsigned block,
1249 				       struct open_bucket *ob)
1250 {
1251 	struct bch_dev *ca = bch2_dev_get_ioref(c, ob->dev, WRITE);
1252 	if (!ca) {
1253 		s->err = -BCH_ERR_erofs_no_writes;
1254 		return;
1255 	}
1256 
1257 	unsigned offset = ca->mi.bucket_size - ob->sectors_free;
1258 	memset(s->new_stripe.data[block] + (offset << 9),
1259 	       0,
1260 	       ob->sectors_free << 9);
1261 
1262 	int ret = blkdev_issue_zeroout(ca->disk_sb.bdev,
1263 			ob->bucket * ca->mi.bucket_size + offset,
1264 			ob->sectors_free,
1265 			GFP_KERNEL, 0);
1266 
1267 	percpu_ref_put(&ca->io_ref);
1268 
1269 	if (ret)
1270 		s->err = ret;
1271 }
1272 
bch2_ec_stripe_new_free(struct bch_fs * c,struct ec_stripe_new * s)1273 void bch2_ec_stripe_new_free(struct bch_fs *c, struct ec_stripe_new *s)
1274 {
1275 	if (s->idx)
1276 		bch2_stripe_close(c, s);
1277 	kfree(s);
1278 }
1279 
1280 /*
1281  * data buckets of new stripe all written: create the stripe
1282  */
ec_stripe_create(struct ec_stripe_new * s)1283 static void ec_stripe_create(struct ec_stripe_new *s)
1284 {
1285 	struct bch_fs *c = s->c;
1286 	struct open_bucket *ob;
1287 	struct bch_stripe *v = &bkey_i_to_stripe(&s->new_stripe.key)->v;
1288 	unsigned i, nr_data = v->nr_blocks - v->nr_redundant;
1289 	int ret;
1290 
1291 	BUG_ON(s->h->s == s);
1292 
1293 	closure_sync(&s->iodone);
1294 
1295 	if (!s->err) {
1296 		for (i = 0; i < nr_data; i++)
1297 			if (s->blocks[i]) {
1298 				ob = c->open_buckets + s->blocks[i];
1299 
1300 				if (ob->sectors_free)
1301 					zero_out_rest_of_ec_bucket(c, s, i, ob);
1302 			}
1303 	}
1304 
1305 	if (s->err) {
1306 		if (!bch2_err_matches(s->err, EROFS))
1307 			bch_err(c, "error creating stripe: error writing data buckets");
1308 		ret = s->err;
1309 		goto err;
1310 	}
1311 
1312 	if (s->have_existing_stripe) {
1313 		ec_validate_checksums(c, &s->existing_stripe);
1314 
1315 		if (ec_do_recov(c, &s->existing_stripe)) {
1316 			bch_err(c, "error creating stripe: error reading existing stripe");
1317 			ret = -BCH_ERR_ec_block_read;
1318 			goto err;
1319 		}
1320 
1321 		for (i = 0; i < nr_data; i++)
1322 			if (stripe_blockcount_get(&bkey_i_to_stripe(&s->existing_stripe.key)->v, i))
1323 				swap(s->new_stripe.data[i],
1324 				     s->existing_stripe.data[i]);
1325 
1326 		ec_stripe_buf_exit(&s->existing_stripe);
1327 	}
1328 
1329 	BUG_ON(!s->allocated);
1330 	BUG_ON(!s->idx);
1331 
1332 	ec_generate_ec(&s->new_stripe);
1333 
1334 	ec_generate_checksums(&s->new_stripe);
1335 
1336 	/* write p/q: */
1337 	for (i = nr_data; i < v->nr_blocks; i++)
1338 		ec_block_io(c, &s->new_stripe, REQ_OP_WRITE, i, &s->iodone);
1339 	closure_sync(&s->iodone);
1340 
1341 	if (ec_nr_failed(&s->new_stripe)) {
1342 		bch_err(c, "error creating stripe: error writing redundancy buckets");
1343 		ret = -BCH_ERR_ec_block_write;
1344 		goto err;
1345 	}
1346 
1347 	ret = bch2_trans_commit_do(c, &s->res, NULL,
1348 		BCH_TRANS_COMMIT_no_check_rw|
1349 		BCH_TRANS_COMMIT_no_enospc,
1350 		ec_stripe_key_update(trans,
1351 				     s->have_existing_stripe
1352 				     ? bkey_i_to_stripe(&s->existing_stripe.key)
1353 				     : NULL,
1354 				     bkey_i_to_stripe(&s->new_stripe.key)));
1355 	bch_err_msg(c, ret, "creating stripe key");
1356 	if (ret) {
1357 		goto err;
1358 	}
1359 
1360 	ret = ec_stripe_update_extents(c, &s->new_stripe);
1361 	bch_err_msg(c, ret, "error updating extents");
1362 	if (ret)
1363 		goto err;
1364 err:
1365 	trace_stripe_create(c, s->idx, ret);
1366 
1367 	bch2_disk_reservation_put(c, &s->res);
1368 
1369 	for (i = 0; i < v->nr_blocks; i++)
1370 		if (s->blocks[i]) {
1371 			ob = c->open_buckets + s->blocks[i];
1372 
1373 			if (i < nr_data) {
1374 				ob->ec = NULL;
1375 				__bch2_open_bucket_put(c, ob);
1376 			} else {
1377 				bch2_open_bucket_put(c, ob);
1378 			}
1379 		}
1380 
1381 	mutex_lock(&c->ec_stripe_new_lock);
1382 	list_del(&s->list);
1383 	mutex_unlock(&c->ec_stripe_new_lock);
1384 	wake_up(&c->ec_stripe_new_wait);
1385 
1386 	ec_stripe_buf_exit(&s->existing_stripe);
1387 	ec_stripe_buf_exit(&s->new_stripe);
1388 	closure_debug_destroy(&s->iodone);
1389 
1390 	ec_stripe_new_put(c, s, STRIPE_REF_stripe);
1391 }
1392 
get_pending_stripe(struct bch_fs * c)1393 static struct ec_stripe_new *get_pending_stripe(struct bch_fs *c)
1394 {
1395 	struct ec_stripe_new *s;
1396 
1397 	mutex_lock(&c->ec_stripe_new_lock);
1398 	list_for_each_entry(s, &c->ec_stripe_new_list, list)
1399 		if (!atomic_read(&s->ref[STRIPE_REF_io]))
1400 			goto out;
1401 	s = NULL;
1402 out:
1403 	mutex_unlock(&c->ec_stripe_new_lock);
1404 
1405 	return s;
1406 }
1407 
ec_stripe_create_work(struct work_struct * work)1408 static void ec_stripe_create_work(struct work_struct *work)
1409 {
1410 	struct bch_fs *c = container_of(work,
1411 		struct bch_fs, ec_stripe_create_work);
1412 	struct ec_stripe_new *s;
1413 
1414 	while ((s = get_pending_stripe(c)))
1415 		ec_stripe_create(s);
1416 
1417 	bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create);
1418 }
1419 
bch2_ec_do_stripe_creates(struct bch_fs * c)1420 void bch2_ec_do_stripe_creates(struct bch_fs *c)
1421 {
1422 	bch2_write_ref_get(c, BCH_WRITE_REF_stripe_create);
1423 
1424 	if (!queue_work(system_long_wq, &c->ec_stripe_create_work))
1425 		bch2_write_ref_put(c, BCH_WRITE_REF_stripe_create);
1426 }
1427 
ec_stripe_new_set_pending(struct bch_fs * c,struct ec_stripe_head * h)1428 static void ec_stripe_new_set_pending(struct bch_fs *c, struct ec_stripe_head *h)
1429 {
1430 	struct ec_stripe_new *s = h->s;
1431 
1432 	lockdep_assert_held(&h->lock);
1433 
1434 	BUG_ON(!s->allocated && !s->err);
1435 
1436 	h->s		= NULL;
1437 	s->pending	= true;
1438 
1439 	mutex_lock(&c->ec_stripe_new_lock);
1440 	list_add(&s->list, &c->ec_stripe_new_list);
1441 	mutex_unlock(&c->ec_stripe_new_lock);
1442 
1443 	ec_stripe_new_put(c, s, STRIPE_REF_io);
1444 }
1445 
ec_stripe_new_cancel(struct bch_fs * c,struct ec_stripe_head * h,int err)1446 static void ec_stripe_new_cancel(struct bch_fs *c, struct ec_stripe_head *h, int err)
1447 {
1448 	h->s->err = err;
1449 	ec_stripe_new_set_pending(c, h);
1450 }
1451 
bch2_ec_bucket_cancel(struct bch_fs * c,struct open_bucket * ob,int err)1452 void bch2_ec_bucket_cancel(struct bch_fs *c, struct open_bucket *ob, int err)
1453 {
1454 	struct ec_stripe_new *s = ob->ec;
1455 
1456 	s->err = err;
1457 }
1458 
bch2_writepoint_ec_buf(struct bch_fs * c,struct write_point * wp)1459 void *bch2_writepoint_ec_buf(struct bch_fs *c, struct write_point *wp)
1460 {
1461 	struct open_bucket *ob = ec_open_bucket(c, &wp->ptrs);
1462 	if (!ob)
1463 		return NULL;
1464 
1465 	BUG_ON(!ob->ec->new_stripe.data[ob->ec_idx]);
1466 
1467 	struct bch_dev *ca	= ob_dev(c, ob);
1468 	unsigned offset		= ca->mi.bucket_size - ob->sectors_free;
1469 
1470 	return ob->ec->new_stripe.data[ob->ec_idx] + (offset << 9);
1471 }
1472 
unsigned_cmp(const void * _l,const void * _r)1473 static int unsigned_cmp(const void *_l, const void *_r)
1474 {
1475 	unsigned l = *((const unsigned *) _l);
1476 	unsigned r = *((const unsigned *) _r);
1477 
1478 	return cmp_int(l, r);
1479 }
1480 
1481 /* pick most common bucket size: */
pick_blocksize(struct bch_fs * c,struct bch_devs_mask * devs)1482 static unsigned pick_blocksize(struct bch_fs *c,
1483 			       struct bch_devs_mask *devs)
1484 {
1485 	unsigned nr = 0, sizes[BCH_SB_MEMBERS_MAX];
1486 	struct {
1487 		unsigned nr, size;
1488 	} cur = { 0, 0 }, best = { 0, 0 };
1489 
1490 	for_each_member_device_rcu(c, ca, devs)
1491 		sizes[nr++] = ca->mi.bucket_size;
1492 
1493 	sort(sizes, nr, sizeof(unsigned), unsigned_cmp, NULL);
1494 
1495 	for (unsigned i = 0; i < nr; i++) {
1496 		if (sizes[i] != cur.size) {
1497 			if (cur.nr > best.nr)
1498 				best = cur;
1499 
1500 			cur.nr = 0;
1501 			cur.size = sizes[i];
1502 		}
1503 
1504 		cur.nr++;
1505 	}
1506 
1507 	if (cur.nr > best.nr)
1508 		best = cur;
1509 
1510 	return best.size;
1511 }
1512 
may_create_new_stripe(struct bch_fs * c)1513 static bool may_create_new_stripe(struct bch_fs *c)
1514 {
1515 	return false;
1516 }
1517 
ec_stripe_key_init(struct bch_fs * c,struct bkey_i * k,unsigned nr_data,unsigned nr_parity,unsigned stripe_size,unsigned disk_label)1518 static void ec_stripe_key_init(struct bch_fs *c,
1519 			       struct bkey_i *k,
1520 			       unsigned nr_data,
1521 			       unsigned nr_parity,
1522 			       unsigned stripe_size,
1523 			       unsigned disk_label)
1524 {
1525 	struct bkey_i_stripe *s = bkey_stripe_init(k);
1526 	unsigned u64s;
1527 
1528 	s->v.sectors			= cpu_to_le16(stripe_size);
1529 	s->v.algorithm			= 0;
1530 	s->v.nr_blocks			= nr_data + nr_parity;
1531 	s->v.nr_redundant		= nr_parity;
1532 	s->v.csum_granularity_bits	= ilog2(c->opts.encoded_extent_max >> 9);
1533 	s->v.csum_type			= BCH_CSUM_crc32c;
1534 	s->v.disk_label			= disk_label;
1535 
1536 	while ((u64s = stripe_val_u64s(&s->v)) > BKEY_VAL_U64s_MAX) {
1537 		BUG_ON(1 << s->v.csum_granularity_bits >=
1538 		       le16_to_cpu(s->v.sectors) ||
1539 		       s->v.csum_granularity_bits == U8_MAX);
1540 		s->v.csum_granularity_bits++;
1541 	}
1542 
1543 	set_bkey_val_u64s(&s->k, u64s);
1544 }
1545 
ec_new_stripe_alloc(struct bch_fs * c,struct ec_stripe_head * h)1546 static struct ec_stripe_new *ec_new_stripe_alloc(struct bch_fs *c, struct ec_stripe_head *h)
1547 {
1548 	struct ec_stripe_new *s;
1549 
1550 	lockdep_assert_held(&h->lock);
1551 
1552 	s = kzalloc(sizeof(*s), GFP_KERNEL);
1553 	if (!s)
1554 		return NULL;
1555 
1556 	mutex_init(&s->lock);
1557 	closure_init(&s->iodone, NULL);
1558 	atomic_set(&s->ref[STRIPE_REF_stripe], 1);
1559 	atomic_set(&s->ref[STRIPE_REF_io], 1);
1560 	s->c		= c;
1561 	s->h		= h;
1562 	s->nr_data	= min_t(unsigned, h->nr_active_devs,
1563 				BCH_BKEY_PTRS_MAX) - h->redundancy;
1564 	s->nr_parity	= h->redundancy;
1565 
1566 	ec_stripe_key_init(c, &s->new_stripe.key,
1567 			   s->nr_data, s->nr_parity,
1568 			   h->blocksize, h->disk_label);
1569 	return s;
1570 }
1571 
ec_stripe_head_devs_update(struct bch_fs * c,struct ec_stripe_head * h)1572 static void ec_stripe_head_devs_update(struct bch_fs *c, struct ec_stripe_head *h)
1573 {
1574 	struct bch_devs_mask devs = h->devs;
1575 
1576 	rcu_read_lock();
1577 	h->devs = target_rw_devs(c, BCH_DATA_user, h->disk_label
1578 				 ? group_to_target(h->disk_label - 1)
1579 				 : 0);
1580 	unsigned nr_devs = dev_mask_nr(&h->devs);
1581 
1582 	for_each_member_device_rcu(c, ca, &h->devs)
1583 		if (!ca->mi.durability)
1584 			__clear_bit(ca->dev_idx, h->devs.d);
1585 	unsigned nr_devs_with_durability = dev_mask_nr(&h->devs);
1586 
1587 	h->blocksize = pick_blocksize(c, &h->devs);
1588 
1589 	h->nr_active_devs = 0;
1590 	for_each_member_device_rcu(c, ca, &h->devs)
1591 		if (ca->mi.bucket_size == h->blocksize)
1592 			h->nr_active_devs++;
1593 
1594 	rcu_read_unlock();
1595 
1596 	/*
1597 	 * If we only have redundancy + 1 devices, we're better off with just
1598 	 * replication:
1599 	 */
1600 	h->insufficient_devs = h->nr_active_devs < h->redundancy + 2;
1601 
1602 	if (h->insufficient_devs) {
1603 		const char *err;
1604 
1605 		if (nr_devs < h->redundancy + 2)
1606 			err = NULL;
1607 		else if (nr_devs_with_durability < h->redundancy + 2)
1608 			err = "cannot use durability=0 devices";
1609 		else
1610 			err = "mismatched bucket sizes";
1611 
1612 		if (err)
1613 			bch_err(c, "insufficient devices available to create stripe (have %u, need %u): %s",
1614 				h->nr_active_devs, h->redundancy + 2, err);
1615 	}
1616 
1617 	struct bch_devs_mask devs_leaving;
1618 	bitmap_andnot(devs_leaving.d, devs.d, h->devs.d, BCH_SB_MEMBERS_MAX);
1619 
1620 	if (h->s && !h->s->allocated && dev_mask_nr(&devs_leaving))
1621 		ec_stripe_new_cancel(c, h, -EINTR);
1622 
1623 	h->rw_devs_change_count = c->rw_devs_change_count;
1624 }
1625 
1626 static struct ec_stripe_head *
ec_new_stripe_head_alloc(struct bch_fs * c,unsigned disk_label,unsigned algo,unsigned redundancy,enum bch_watermark watermark)1627 ec_new_stripe_head_alloc(struct bch_fs *c, unsigned disk_label,
1628 			 unsigned algo, unsigned redundancy,
1629 			 enum bch_watermark watermark)
1630 {
1631 	struct ec_stripe_head *h;
1632 
1633 	h = kzalloc(sizeof(*h), GFP_KERNEL);
1634 	if (!h)
1635 		return NULL;
1636 
1637 	mutex_init(&h->lock);
1638 	BUG_ON(!mutex_trylock(&h->lock));
1639 
1640 	h->disk_label	= disk_label;
1641 	h->algo		= algo;
1642 	h->redundancy	= redundancy;
1643 	h->watermark	= watermark;
1644 
1645 	list_add(&h->list, &c->ec_stripe_head_list);
1646 	return h;
1647 }
1648 
bch2_ec_stripe_head_put(struct bch_fs * c,struct ec_stripe_head * h)1649 void bch2_ec_stripe_head_put(struct bch_fs *c, struct ec_stripe_head *h)
1650 {
1651 	if (h->s &&
1652 	    h->s->allocated &&
1653 	    bitmap_weight(h->s->blocks_allocated,
1654 			  h->s->nr_data) == h->s->nr_data)
1655 		ec_stripe_new_set_pending(c, h);
1656 
1657 	mutex_unlock(&h->lock);
1658 }
1659 
1660 static struct ec_stripe_head *
__bch2_ec_stripe_head_get(struct btree_trans * trans,unsigned disk_label,unsigned algo,unsigned redundancy,enum bch_watermark watermark)1661 __bch2_ec_stripe_head_get(struct btree_trans *trans,
1662 			  unsigned disk_label,
1663 			  unsigned algo,
1664 			  unsigned redundancy,
1665 			  enum bch_watermark watermark)
1666 {
1667 	struct bch_fs *c = trans->c;
1668 	struct ec_stripe_head *h;
1669 	int ret;
1670 
1671 	if (!redundancy)
1672 		return NULL;
1673 
1674 	ret = bch2_trans_mutex_lock(trans, &c->ec_stripe_head_lock);
1675 	if (ret)
1676 		return ERR_PTR(ret);
1677 
1678 	if (test_bit(BCH_FS_going_ro, &c->flags)) {
1679 		h = ERR_PTR(-BCH_ERR_erofs_no_writes);
1680 		goto err;
1681 	}
1682 
1683 	list_for_each_entry(h, &c->ec_stripe_head_list, list)
1684 		if (h->disk_label	== disk_label &&
1685 		    h->algo		== algo &&
1686 		    h->redundancy	== redundancy &&
1687 		    h->watermark	== watermark) {
1688 			ret = bch2_trans_mutex_lock(trans, &h->lock);
1689 			if (ret) {
1690 				h = ERR_PTR(ret);
1691 				goto err;
1692 			}
1693 			goto found;
1694 		}
1695 
1696 	h = ec_new_stripe_head_alloc(c, disk_label, algo, redundancy, watermark);
1697 	if (!h) {
1698 		h = ERR_PTR(-BCH_ERR_ENOMEM_stripe_head_alloc);
1699 		goto err;
1700 	}
1701 found:
1702 	if (h->rw_devs_change_count != c->rw_devs_change_count)
1703 		ec_stripe_head_devs_update(c, h);
1704 
1705 	if (h->insufficient_devs) {
1706 		mutex_unlock(&h->lock);
1707 		h = NULL;
1708 	}
1709 err:
1710 	mutex_unlock(&c->ec_stripe_head_lock);
1711 	return h;
1712 }
1713 
new_stripe_alloc_buckets(struct btree_trans * trans,struct ec_stripe_head * h,struct ec_stripe_new * s,enum bch_watermark watermark,struct closure * cl)1714 static int new_stripe_alloc_buckets(struct btree_trans *trans,
1715 				    struct ec_stripe_head *h, struct ec_stripe_new *s,
1716 				    enum bch_watermark watermark, struct closure *cl)
1717 {
1718 	struct bch_fs *c = trans->c;
1719 	struct bch_devs_mask devs = h->devs;
1720 	struct open_bucket *ob;
1721 	struct open_buckets buckets;
1722 	struct bch_stripe *v = &bkey_i_to_stripe(&s->new_stripe.key)->v;
1723 	unsigned i, j, nr_have_parity = 0, nr_have_data = 0;
1724 	bool have_cache = true;
1725 	int ret = 0;
1726 
1727 	BUG_ON(v->nr_blocks	!= s->nr_data + s->nr_parity);
1728 	BUG_ON(v->nr_redundant	!= s->nr_parity);
1729 
1730 	/* * We bypass the sector allocator which normally does this: */
1731 	bitmap_and(devs.d, devs.d, c->rw_devs[BCH_DATA_user].d, BCH_SB_MEMBERS_MAX);
1732 
1733 	for_each_set_bit(i, s->blocks_gotten, v->nr_blocks) {
1734 		/*
1735 		 * Note: we don't yet repair invalid blocks (failed/removed
1736 		 * devices) when reusing stripes - we still need a codepath to
1737 		 * walk backpointers and update all extents that point to that
1738 		 * block when updating the stripe
1739 		 */
1740 		if (v->ptrs[i].dev != BCH_SB_MEMBER_INVALID)
1741 			__clear_bit(v->ptrs[i].dev, devs.d);
1742 
1743 		if (i < s->nr_data)
1744 			nr_have_data++;
1745 		else
1746 			nr_have_parity++;
1747 	}
1748 
1749 	BUG_ON(nr_have_data	> s->nr_data);
1750 	BUG_ON(nr_have_parity	> s->nr_parity);
1751 
1752 	buckets.nr = 0;
1753 	if (nr_have_parity < s->nr_parity) {
1754 		ret = bch2_bucket_alloc_set_trans(trans, &buckets,
1755 					    &h->parity_stripe,
1756 					    &devs,
1757 					    s->nr_parity,
1758 					    &nr_have_parity,
1759 					    &have_cache, 0,
1760 					    BCH_DATA_parity,
1761 					    watermark,
1762 					    cl);
1763 
1764 		open_bucket_for_each(c, &buckets, ob, i) {
1765 			j = find_next_zero_bit(s->blocks_gotten,
1766 					       s->nr_data + s->nr_parity,
1767 					       s->nr_data);
1768 			BUG_ON(j >= s->nr_data + s->nr_parity);
1769 
1770 			s->blocks[j] = buckets.v[i];
1771 			v->ptrs[j] = bch2_ob_ptr(c, ob);
1772 			__set_bit(j, s->blocks_gotten);
1773 		}
1774 
1775 		if (ret)
1776 			return ret;
1777 	}
1778 
1779 	buckets.nr = 0;
1780 	if (nr_have_data < s->nr_data) {
1781 		ret = bch2_bucket_alloc_set_trans(trans, &buckets,
1782 					    &h->block_stripe,
1783 					    &devs,
1784 					    s->nr_data,
1785 					    &nr_have_data,
1786 					    &have_cache, 0,
1787 					    BCH_DATA_user,
1788 					    watermark,
1789 					    cl);
1790 
1791 		open_bucket_for_each(c, &buckets, ob, i) {
1792 			j = find_next_zero_bit(s->blocks_gotten,
1793 					       s->nr_data, 0);
1794 			BUG_ON(j >= s->nr_data);
1795 
1796 			s->blocks[j] = buckets.v[i];
1797 			v->ptrs[j] = bch2_ob_ptr(c, ob);
1798 			__set_bit(j, s->blocks_gotten);
1799 		}
1800 
1801 		if (ret)
1802 			return ret;
1803 	}
1804 
1805 	return 0;
1806 }
1807 
__get_existing_stripe(struct btree_trans * trans,struct ec_stripe_head * head,struct ec_stripe_buf * stripe,u64 idx)1808 static int __get_existing_stripe(struct btree_trans *trans,
1809 				 struct ec_stripe_head *head,
1810 				 struct ec_stripe_buf *stripe,
1811 				 u64 idx)
1812 {
1813 	struct bch_fs *c = trans->c;
1814 
1815 	struct btree_iter iter;
1816 	struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter,
1817 					  BTREE_ID_stripes, POS(0, idx), 0);
1818 	int ret = bkey_err(k);
1819 	if (ret)
1820 		goto err;
1821 
1822 	/* We expect write buffer races here */
1823 	if (k.k->type != KEY_TYPE_stripe)
1824 		goto out;
1825 
1826 	struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
1827 	if (stripe_lru_pos(s.v) <= 1)
1828 		goto out;
1829 
1830 	if (s.v->disk_label		== head->disk_label &&
1831 	    s.v->algorithm		== head->algo &&
1832 	    s.v->nr_redundant		== head->redundancy &&
1833 	    le16_to_cpu(s.v->sectors)	== head->blocksize &&
1834 	    bch2_try_open_stripe(c, head->s, idx)) {
1835 		bkey_reassemble(&stripe->key, k);
1836 		ret = 1;
1837 	}
1838 out:
1839 	bch2_set_btree_iter_dontneed(&iter);
1840 err:
1841 	bch2_trans_iter_exit(trans, &iter);
1842 	return ret;
1843 }
1844 
init_new_stripe_from_existing(struct bch_fs * c,struct ec_stripe_new * s)1845 static int init_new_stripe_from_existing(struct bch_fs *c, struct ec_stripe_new *s)
1846 {
1847 	struct bch_stripe *new_v = &bkey_i_to_stripe(&s->new_stripe.key)->v;
1848 	struct bch_stripe *existing_v = &bkey_i_to_stripe(&s->existing_stripe.key)->v;
1849 	unsigned i;
1850 
1851 	BUG_ON(existing_v->nr_redundant != s->nr_parity);
1852 	s->nr_data = existing_v->nr_blocks -
1853 		existing_v->nr_redundant;
1854 
1855 	int ret = ec_stripe_buf_init(&s->existing_stripe, 0, le16_to_cpu(existing_v->sectors));
1856 	if (ret) {
1857 		bch2_stripe_close(c, s);
1858 		return ret;
1859 	}
1860 
1861 	BUG_ON(s->existing_stripe.size != le16_to_cpu(existing_v->sectors));
1862 
1863 	/*
1864 	 * Free buckets we initially allocated - they might conflict with
1865 	 * blocks from the stripe we're reusing:
1866 	 */
1867 	for_each_set_bit(i, s->blocks_gotten, new_v->nr_blocks) {
1868 		bch2_open_bucket_put(c, c->open_buckets + s->blocks[i]);
1869 		s->blocks[i] = 0;
1870 	}
1871 	memset(s->blocks_gotten, 0, sizeof(s->blocks_gotten));
1872 	memset(s->blocks_allocated, 0, sizeof(s->blocks_allocated));
1873 
1874 	for (unsigned i = 0; i < existing_v->nr_blocks; i++) {
1875 		if (stripe_blockcount_get(existing_v, i)) {
1876 			__set_bit(i, s->blocks_gotten);
1877 			__set_bit(i, s->blocks_allocated);
1878 		}
1879 
1880 		ec_block_io(c, &s->existing_stripe, READ, i, &s->iodone);
1881 	}
1882 
1883 	bkey_copy(&s->new_stripe.key, &s->existing_stripe.key);
1884 	s->have_existing_stripe = true;
1885 
1886 	return 0;
1887 }
1888 
__bch2_ec_stripe_head_reuse(struct btree_trans * trans,struct ec_stripe_head * h,struct ec_stripe_new * s)1889 static int __bch2_ec_stripe_head_reuse(struct btree_trans *trans, struct ec_stripe_head *h,
1890 				       struct ec_stripe_new *s)
1891 {
1892 	struct bch_fs *c = trans->c;
1893 
1894 	/*
1895 	 * If we can't allocate a new stripe, and there's no stripes with empty
1896 	 * blocks for us to reuse, that means we have to wait on copygc:
1897 	 */
1898 	if (may_create_new_stripe(c))
1899 		return -1;
1900 
1901 	struct btree_iter lru_iter;
1902 	struct bkey_s_c lru_k;
1903 	int ret = 0;
1904 
1905 	for_each_btree_key_max_norestart(trans, lru_iter, BTREE_ID_lru,
1906 			lru_pos(BCH_LRU_STRIPE_FRAGMENTATION, 2, 0),
1907 			lru_pos(BCH_LRU_STRIPE_FRAGMENTATION, 2, LRU_TIME_MAX),
1908 			0, lru_k, ret) {
1909 		ret = __get_existing_stripe(trans, h, &s->existing_stripe, lru_k.k->p.offset);
1910 		if (ret)
1911 			break;
1912 	}
1913 	bch2_trans_iter_exit(trans, &lru_iter);
1914 	if (!ret)
1915 		ret = -BCH_ERR_stripe_alloc_blocked;
1916 	if (ret == 1)
1917 		ret = 0;
1918 	if (ret)
1919 		return ret;
1920 
1921 	return init_new_stripe_from_existing(c, s);
1922 }
1923 
__bch2_ec_stripe_head_reserve(struct btree_trans * trans,struct ec_stripe_head * h,struct ec_stripe_new * s)1924 static int __bch2_ec_stripe_head_reserve(struct btree_trans *trans, struct ec_stripe_head *h,
1925 					 struct ec_stripe_new *s)
1926 {
1927 	struct bch_fs *c = trans->c;
1928 	struct btree_iter iter;
1929 	struct bkey_s_c k;
1930 	struct bpos min_pos = POS(0, 1);
1931 	struct bpos start_pos = bpos_max(min_pos, POS(0, c->ec_stripe_hint));
1932 	int ret;
1933 
1934 	if (!s->res.sectors) {
1935 		ret = bch2_disk_reservation_get(c, &s->res,
1936 					h->blocksize,
1937 					s->nr_parity,
1938 					BCH_DISK_RESERVATION_NOFAIL);
1939 		if (ret)
1940 			return ret;
1941 	}
1942 
1943 	/*
1944 	 * Allocate stripe slot
1945 	 * XXX: we're going to need a bitrange btree of free stripes
1946 	 */
1947 	for_each_btree_key_norestart(trans, iter, BTREE_ID_stripes, start_pos,
1948 			   BTREE_ITER_slots|BTREE_ITER_intent, k, ret) {
1949 		if (bkey_gt(k.k->p, POS(0, U32_MAX))) {
1950 			if (start_pos.offset) {
1951 				start_pos = min_pos;
1952 				bch2_btree_iter_set_pos(&iter, start_pos);
1953 				continue;
1954 			}
1955 
1956 			ret = -BCH_ERR_ENOSPC_stripe_create;
1957 			break;
1958 		}
1959 
1960 		if (bkey_deleted(k.k) &&
1961 		    bch2_try_open_stripe(c, s, k.k->p.offset))
1962 			break;
1963 	}
1964 
1965 	c->ec_stripe_hint = iter.pos.offset;
1966 
1967 	if (ret)
1968 		goto err;
1969 
1970 	ret = ec_stripe_mem_alloc(trans, &iter);
1971 	if (ret) {
1972 		bch2_stripe_close(c, s);
1973 		goto err;
1974 	}
1975 
1976 	s->new_stripe.key.k.p = iter.pos;
1977 out:
1978 	bch2_trans_iter_exit(trans, &iter);
1979 	return ret;
1980 err:
1981 	bch2_disk_reservation_put(c, &s->res);
1982 	goto out;
1983 }
1984 
bch2_ec_stripe_head_get(struct btree_trans * trans,unsigned target,unsigned algo,unsigned redundancy,enum bch_watermark watermark,struct closure * cl)1985 struct ec_stripe_head *bch2_ec_stripe_head_get(struct btree_trans *trans,
1986 					       unsigned target,
1987 					       unsigned algo,
1988 					       unsigned redundancy,
1989 					       enum bch_watermark watermark,
1990 					       struct closure *cl)
1991 {
1992 	struct bch_fs *c = trans->c;
1993 	struct ec_stripe_head *h;
1994 	bool waiting = false;
1995 	unsigned disk_label = 0;
1996 	struct target t = target_decode(target);
1997 	int ret;
1998 
1999 	if (t.type == TARGET_GROUP) {
2000 		if (t.group > U8_MAX) {
2001 			bch_err(c, "cannot create a stripe when disk_label > U8_MAX");
2002 			return NULL;
2003 		}
2004 		disk_label = t.group + 1; /* 0 == no label */
2005 	}
2006 
2007 	h = __bch2_ec_stripe_head_get(trans, disk_label, algo, redundancy, watermark);
2008 	if (IS_ERR_OR_NULL(h))
2009 		return h;
2010 
2011 	if (!h->s) {
2012 		h->s = ec_new_stripe_alloc(c, h);
2013 		if (!h->s) {
2014 			ret = -BCH_ERR_ENOMEM_ec_new_stripe_alloc;
2015 			bch_err(c, "failed to allocate new stripe");
2016 			goto err;
2017 		}
2018 
2019 		h->nr_created++;
2020 	}
2021 
2022 	struct ec_stripe_new *s = h->s;
2023 
2024 	if (s->allocated)
2025 		goto allocated;
2026 
2027 	if (s->have_existing_stripe)
2028 		goto alloc_existing;
2029 
2030 	/* First, try to allocate a full stripe: */
2031 	ret =   new_stripe_alloc_buckets(trans, h, s, BCH_WATERMARK_stripe, NULL) ?:
2032 		__bch2_ec_stripe_head_reserve(trans, h, s);
2033 	if (!ret)
2034 		goto allocate_buf;
2035 	if (bch2_err_matches(ret, BCH_ERR_transaction_restart) ||
2036 	    bch2_err_matches(ret, ENOMEM))
2037 		goto err;
2038 
2039 	/*
2040 	 * Not enough buckets available for a full stripe: we must reuse an
2041 	 * existing stripe:
2042 	 */
2043 	while (1) {
2044 		ret = __bch2_ec_stripe_head_reuse(trans, h, s);
2045 		if (!ret)
2046 			break;
2047 		if (waiting || !cl || ret != -BCH_ERR_stripe_alloc_blocked)
2048 			goto err;
2049 
2050 		if (watermark == BCH_WATERMARK_copygc) {
2051 			ret =   new_stripe_alloc_buckets(trans, h, s, watermark, NULL) ?:
2052 				__bch2_ec_stripe_head_reserve(trans, h, s);
2053 			if (ret)
2054 				goto err;
2055 			goto allocate_buf;
2056 		}
2057 
2058 		/* XXX freelist_wait? */
2059 		closure_wait(&c->freelist_wait, cl);
2060 		waiting = true;
2061 	}
2062 
2063 	if (waiting)
2064 		closure_wake_up(&c->freelist_wait);
2065 alloc_existing:
2066 	/*
2067 	 * Retry allocating buckets, with the watermark for this
2068 	 * particular write:
2069 	 */
2070 	ret = new_stripe_alloc_buckets(trans, h, s, watermark, cl);
2071 	if (ret)
2072 		goto err;
2073 
2074 allocate_buf:
2075 	ret = ec_stripe_buf_init(&s->new_stripe, 0, h->blocksize);
2076 	if (ret)
2077 		goto err;
2078 
2079 	s->allocated = true;
2080 allocated:
2081 	BUG_ON(!s->idx);
2082 	BUG_ON(!s->new_stripe.data[0]);
2083 	BUG_ON(trans->restarted);
2084 	return h;
2085 err:
2086 	bch2_ec_stripe_head_put(c, h);
2087 	return ERR_PTR(ret);
2088 }
2089 
2090 /* device removal */
2091 
bch2_invalidate_stripe_to_dev(struct btree_trans * trans,struct bkey_s_c k_a)2092 static int bch2_invalidate_stripe_to_dev(struct btree_trans *trans, struct bkey_s_c k_a)
2093 {
2094 	struct bch_alloc_v4 a_convert;
2095 	const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k_a, &a_convert);
2096 
2097 	if (!a->stripe)
2098 		return 0;
2099 
2100 	if (a->stripe_sectors) {
2101 		bch_err(trans->c, "trying to invalidate device in stripe when bucket has stripe data");
2102 		return -BCH_ERR_invalidate_stripe_to_dev;
2103 	}
2104 
2105 	struct btree_iter iter;
2106 	struct bkey_i_stripe *s =
2107 		bch2_bkey_get_mut_typed(trans, &iter, BTREE_ID_stripes, POS(0, a->stripe),
2108 					BTREE_ITER_slots, stripe);
2109 	int ret = PTR_ERR_OR_ZERO(s);
2110 	if (ret)
2111 		return ret;
2112 
2113 	struct disk_accounting_pos acc = {
2114 		.type = BCH_DISK_ACCOUNTING_replicas,
2115 	};
2116 
2117 	s64 sectors = 0;
2118 	for (unsigned i = 0; i < s->v.nr_blocks; i++)
2119 		sectors -= stripe_blockcount_get(&s->v, i);
2120 
2121 	bch2_bkey_to_replicas(&acc.replicas, bkey_i_to_s_c(&s->k_i));
2122 	acc.replicas.data_type = BCH_DATA_user;
2123 	ret = bch2_disk_accounting_mod(trans, &acc, &sectors, 1, false);
2124 	if (ret)
2125 		goto err;
2126 
2127 	struct bkey_ptrs ptrs = bch2_bkey_ptrs(bkey_i_to_s(&s->k_i));
2128 	bkey_for_each_ptr(ptrs, ptr)
2129 		if (ptr->dev == k_a.k->p.inode)
2130 			ptr->dev = BCH_SB_MEMBER_INVALID;
2131 
2132 	sectors = -sectors;
2133 
2134 	bch2_bkey_to_replicas(&acc.replicas, bkey_i_to_s_c(&s->k_i));
2135 	acc.replicas.data_type = BCH_DATA_user;
2136 	ret = bch2_disk_accounting_mod(trans, &acc, &sectors, 1, false);
2137 	if (ret)
2138 		goto err;
2139 err:
2140 	bch2_trans_iter_exit(trans, &iter);
2141 	return ret;
2142 }
2143 
bch2_dev_remove_stripes(struct bch_fs * c,unsigned dev_idx)2144 int bch2_dev_remove_stripes(struct bch_fs *c, unsigned dev_idx)
2145 {
2146 	return bch2_trans_run(c,
2147 		for_each_btree_key_max_commit(trans, iter,
2148 				  BTREE_ID_alloc, POS(dev_idx, 0), POS(dev_idx, U64_MAX),
2149 				  BTREE_ITER_intent, k,
2150 				  NULL, NULL, 0, ({
2151 			bch2_invalidate_stripe_to_dev(trans, k);
2152 	})));
2153 }
2154 
2155 /* startup/shutdown */
2156 
__bch2_ec_stop(struct bch_fs * c,struct bch_dev * ca)2157 static void __bch2_ec_stop(struct bch_fs *c, struct bch_dev *ca)
2158 {
2159 	struct ec_stripe_head *h;
2160 	struct open_bucket *ob;
2161 	unsigned i;
2162 
2163 	mutex_lock(&c->ec_stripe_head_lock);
2164 	list_for_each_entry(h, &c->ec_stripe_head_list, list) {
2165 		mutex_lock(&h->lock);
2166 		if (!h->s)
2167 			goto unlock;
2168 
2169 		if (!ca)
2170 			goto found;
2171 
2172 		for (i = 0; i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; i++) {
2173 			if (!h->s->blocks[i])
2174 				continue;
2175 
2176 			ob = c->open_buckets + h->s->blocks[i];
2177 			if (ob->dev == ca->dev_idx)
2178 				goto found;
2179 		}
2180 		goto unlock;
2181 found:
2182 		ec_stripe_new_cancel(c, h, -BCH_ERR_erofs_no_writes);
2183 unlock:
2184 		mutex_unlock(&h->lock);
2185 	}
2186 	mutex_unlock(&c->ec_stripe_head_lock);
2187 }
2188 
bch2_ec_stop_dev(struct bch_fs * c,struct bch_dev * ca)2189 void bch2_ec_stop_dev(struct bch_fs *c, struct bch_dev *ca)
2190 {
2191 	__bch2_ec_stop(c, ca);
2192 }
2193 
bch2_fs_ec_stop(struct bch_fs * c)2194 void bch2_fs_ec_stop(struct bch_fs *c)
2195 {
2196 	__bch2_ec_stop(c, NULL);
2197 }
2198 
bch2_fs_ec_flush_done(struct bch_fs * c)2199 static bool bch2_fs_ec_flush_done(struct bch_fs *c)
2200 {
2201 	bool ret;
2202 
2203 	mutex_lock(&c->ec_stripe_new_lock);
2204 	ret = list_empty(&c->ec_stripe_new_list);
2205 	mutex_unlock(&c->ec_stripe_new_lock);
2206 
2207 	return ret;
2208 }
2209 
bch2_fs_ec_flush(struct bch_fs * c)2210 void bch2_fs_ec_flush(struct bch_fs *c)
2211 {
2212 	wait_event(c->ec_stripe_new_wait, bch2_fs_ec_flush_done(c));
2213 }
2214 
bch2_stripes_read(struct bch_fs * c)2215 int bch2_stripes_read(struct bch_fs *c)
2216 {
2217 	return 0;
2218 }
2219 
bch2_new_stripe_to_text(struct printbuf * out,struct bch_fs * c,struct ec_stripe_new * s)2220 static void bch2_new_stripe_to_text(struct printbuf *out, struct bch_fs *c,
2221 				    struct ec_stripe_new *s)
2222 {
2223 	prt_printf(out, "\tidx %llu blocks %u+%u allocated %u ref %u %u %s obs",
2224 		   s->idx, s->nr_data, s->nr_parity,
2225 		   bitmap_weight(s->blocks_allocated, s->nr_data),
2226 		   atomic_read(&s->ref[STRIPE_REF_io]),
2227 		   atomic_read(&s->ref[STRIPE_REF_stripe]),
2228 		   bch2_watermarks[s->h->watermark]);
2229 
2230 	struct bch_stripe *v = &bkey_i_to_stripe(&s->new_stripe.key)->v;
2231 	unsigned i;
2232 	for_each_set_bit(i, s->blocks_gotten, v->nr_blocks)
2233 		prt_printf(out, " %u", s->blocks[i]);
2234 	prt_newline(out);
2235 	bch2_bkey_val_to_text(out, c, bkey_i_to_s_c(&s->new_stripe.key));
2236 	prt_newline(out);
2237 }
2238 
bch2_new_stripes_to_text(struct printbuf * out,struct bch_fs * c)2239 void bch2_new_stripes_to_text(struct printbuf *out, struct bch_fs *c)
2240 {
2241 	struct ec_stripe_head *h;
2242 	struct ec_stripe_new *s;
2243 
2244 	mutex_lock(&c->ec_stripe_head_lock);
2245 	list_for_each_entry(h, &c->ec_stripe_head_list, list) {
2246 		prt_printf(out, "disk label %u algo %u redundancy %u %s nr created %llu:\n",
2247 		       h->disk_label, h->algo, h->redundancy,
2248 		       bch2_watermarks[h->watermark],
2249 		       h->nr_created);
2250 
2251 		if (h->s)
2252 			bch2_new_stripe_to_text(out, c, h->s);
2253 	}
2254 	mutex_unlock(&c->ec_stripe_head_lock);
2255 
2256 	prt_printf(out, "in flight:\n");
2257 
2258 	mutex_lock(&c->ec_stripe_new_lock);
2259 	list_for_each_entry(s, &c->ec_stripe_new_list, list)
2260 		bch2_new_stripe_to_text(out, c, s);
2261 	mutex_unlock(&c->ec_stripe_new_lock);
2262 }
2263 
bch2_fs_ec_exit(struct bch_fs * c)2264 void bch2_fs_ec_exit(struct bch_fs *c)
2265 {
2266 	struct ec_stripe_head *h;
2267 	unsigned i;
2268 
2269 	while (1) {
2270 		mutex_lock(&c->ec_stripe_head_lock);
2271 		h = list_pop_entry(&c->ec_stripe_head_list, struct ec_stripe_head, list);
2272 		mutex_unlock(&c->ec_stripe_head_lock);
2273 
2274 		if (!h)
2275 			break;
2276 
2277 		if (h->s) {
2278 			for (i = 0; i < bkey_i_to_stripe(&h->s->new_stripe.key)->v.nr_blocks; i++)
2279 				BUG_ON(h->s->blocks[i]);
2280 
2281 			kfree(h->s);
2282 		}
2283 		kfree(h);
2284 	}
2285 
2286 	BUG_ON(!list_empty(&c->ec_stripe_new_list));
2287 
2288 	bioset_exit(&c->ec_bioset);
2289 }
2290 
bch2_fs_ec_init_early(struct bch_fs * c)2291 void bch2_fs_ec_init_early(struct bch_fs *c)
2292 {
2293 	spin_lock_init(&c->ec_stripes_new_lock);
2294 
2295 	INIT_LIST_HEAD(&c->ec_stripe_head_list);
2296 	mutex_init(&c->ec_stripe_head_lock);
2297 
2298 	INIT_LIST_HEAD(&c->ec_stripe_new_list);
2299 	mutex_init(&c->ec_stripe_new_lock);
2300 	init_waitqueue_head(&c->ec_stripe_new_wait);
2301 
2302 	INIT_WORK(&c->ec_stripe_create_work, ec_stripe_create_work);
2303 	INIT_WORK(&c->ec_stripe_delete_work, ec_stripe_delete_work);
2304 }
2305 
bch2_fs_ec_init(struct bch_fs * c)2306 int bch2_fs_ec_init(struct bch_fs *c)
2307 {
2308 	return bioset_init(&c->ec_bioset, 1, offsetof(struct ec_bio, bio),
2309 			   BIOSET_NEED_BVECS);
2310 }
2311 
bch2_check_stripe_to_lru_ref(struct btree_trans * trans,struct bkey_s_c k,struct bkey_buf * last_flushed)2312 static int bch2_check_stripe_to_lru_ref(struct btree_trans *trans,
2313 					struct bkey_s_c k,
2314 					struct bkey_buf *last_flushed)
2315 {
2316 	if (k.k->type != KEY_TYPE_stripe)
2317 		return 0;
2318 
2319 	struct bkey_s_c_stripe s = bkey_s_c_to_stripe(k);
2320 
2321 	u64 lru_idx = stripe_lru_pos(s.v);
2322 	if (lru_idx) {
2323 		int ret = bch2_lru_check_set(trans, BCH_LRU_STRIPE_FRAGMENTATION,
2324 					     k.k->p.offset, lru_idx, k, last_flushed);
2325 		if (ret)
2326 			return ret;
2327 	}
2328 	return 0;
2329 }
2330 
bch2_check_stripe_to_lru_refs(struct bch_fs * c)2331 int bch2_check_stripe_to_lru_refs(struct bch_fs *c)
2332 {
2333 	struct bkey_buf last_flushed;
2334 
2335 	bch2_bkey_buf_init(&last_flushed);
2336 	bkey_init(&last_flushed.k->k);
2337 
2338 	int ret = bch2_trans_run(c,
2339 		for_each_btree_key_commit(trans, iter, BTREE_ID_stripes,
2340 				POS_MIN, BTREE_ITER_prefetch, k,
2341 				NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
2342 			bch2_check_stripe_to_lru_ref(trans, k, &last_flushed)));
2343 
2344 	bch2_bkey_buf_exit(&last_flushed, c);
2345 	bch_err_fn(c, ret);
2346 	return ret;
2347 }
2348