xref: /linux/fs/bcachefs/alloc_background.c (revision 39daa09d34ada1bc7227d68def63e0a2105b5496)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
5 #include "backpointers.h"
6 #include "btree_cache.h"
7 #include "btree_io.h"
8 #include "btree_key_cache.h"
9 #include "btree_update.h"
10 #include "btree_update_interior.h"
11 #include "btree_gc.h"
12 #include "btree_write_buffer.h"
13 #include "buckets.h"
14 #include "buckets_waiting_for_journal.h"
15 #include "clock.h"
16 #include "debug.h"
17 #include "ec.h"
18 #include "error.h"
19 #include "lru.h"
20 #include "recovery.h"
21 #include "trace.h"
22 #include "varint.h"
23 
24 #include <linux/kthread.h>
25 #include <linux/math64.h>
26 #include <linux/random.h>
27 #include <linux/rculist.h>
28 #include <linux/rcupdate.h>
29 #include <linux/sched/task.h>
30 #include <linux/sort.h>
31 
32 static void bch2_discard_one_bucket_fast(struct bch_dev *, u64);
33 
34 /* Persistent alloc info: */
35 
36 static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = {
37 #define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8,
38 	BCH_ALLOC_FIELDS_V1()
39 #undef x
40 };
41 
42 struct bkey_alloc_unpacked {
43 	u64		journal_seq;
44 	u8		gen;
45 	u8		oldest_gen;
46 	u8		data_type;
47 	bool		need_discard:1;
48 	bool		need_inc_gen:1;
49 #define x(_name, _bits)	u##_bits _name;
50 	BCH_ALLOC_FIELDS_V2()
51 #undef  x
52 };
53 
54 static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
55 				     const void **p, unsigned field)
56 {
57 	unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
58 	u64 v;
59 
60 	if (!(a->fields & (1 << field)))
61 		return 0;
62 
63 	switch (bytes) {
64 	case 1:
65 		v = *((const u8 *) *p);
66 		break;
67 	case 2:
68 		v = le16_to_cpup(*p);
69 		break;
70 	case 4:
71 		v = le32_to_cpup(*p);
72 		break;
73 	case 8:
74 		v = le64_to_cpup(*p);
75 		break;
76 	default:
77 		BUG();
78 	}
79 
80 	*p += bytes;
81 	return v;
82 }
83 
84 static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
85 				 struct bkey_s_c k)
86 {
87 	const struct bch_alloc *in = bkey_s_c_to_alloc(k).v;
88 	const void *d = in->data;
89 	unsigned idx = 0;
90 
91 	out->gen = in->gen;
92 
93 #define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++);
94 	BCH_ALLOC_FIELDS_V1()
95 #undef  x
96 }
97 
98 static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out,
99 				struct bkey_s_c k)
100 {
101 	struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k);
102 	const u8 *in = a.v->data;
103 	const u8 *end = bkey_val_end(a);
104 	unsigned fieldnr = 0;
105 	int ret;
106 	u64 v;
107 
108 	out->gen	= a.v->gen;
109 	out->oldest_gen	= a.v->oldest_gen;
110 	out->data_type	= a.v->data_type;
111 
112 #define x(_name, _bits)							\
113 	if (fieldnr < a.v->nr_fields) {					\
114 		ret = bch2_varint_decode_fast(in, end, &v);		\
115 		if (ret < 0)						\
116 			return ret;					\
117 		in += ret;						\
118 	} else {							\
119 		v = 0;							\
120 	}								\
121 	out->_name = v;							\
122 	if (v != out->_name)						\
123 		return -1;						\
124 	fieldnr++;
125 
126 	BCH_ALLOC_FIELDS_V2()
127 #undef  x
128 	return 0;
129 }
130 
131 static int bch2_alloc_unpack_v3(struct bkey_alloc_unpacked *out,
132 				struct bkey_s_c k)
133 {
134 	struct bkey_s_c_alloc_v3 a = bkey_s_c_to_alloc_v3(k);
135 	const u8 *in = a.v->data;
136 	const u8 *end = bkey_val_end(a);
137 	unsigned fieldnr = 0;
138 	int ret;
139 	u64 v;
140 
141 	out->gen	= a.v->gen;
142 	out->oldest_gen	= a.v->oldest_gen;
143 	out->data_type	= a.v->data_type;
144 	out->need_discard = BCH_ALLOC_V3_NEED_DISCARD(a.v);
145 	out->need_inc_gen = BCH_ALLOC_V3_NEED_INC_GEN(a.v);
146 	out->journal_seq = le64_to_cpu(a.v->journal_seq);
147 
148 #define x(_name, _bits)							\
149 	if (fieldnr < a.v->nr_fields) {					\
150 		ret = bch2_varint_decode_fast(in, end, &v);		\
151 		if (ret < 0)						\
152 			return ret;					\
153 		in += ret;						\
154 	} else {							\
155 		v = 0;							\
156 	}								\
157 	out->_name = v;							\
158 	if (v != out->_name)						\
159 		return -1;						\
160 	fieldnr++;
161 
162 	BCH_ALLOC_FIELDS_V2()
163 #undef  x
164 	return 0;
165 }
166 
167 static struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
168 {
169 	struct bkey_alloc_unpacked ret = { .gen	= 0 };
170 
171 	switch (k.k->type) {
172 	case KEY_TYPE_alloc:
173 		bch2_alloc_unpack_v1(&ret, k);
174 		break;
175 	case KEY_TYPE_alloc_v2:
176 		bch2_alloc_unpack_v2(&ret, k);
177 		break;
178 	case KEY_TYPE_alloc_v3:
179 		bch2_alloc_unpack_v3(&ret, k);
180 		break;
181 	}
182 
183 	return ret;
184 }
185 
186 static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
187 {
188 	unsigned i, bytes = offsetof(struct bch_alloc, data);
189 
190 	for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_V1_FIELD_BYTES); i++)
191 		if (a->fields & (1 << i))
192 			bytes += BCH_ALLOC_V1_FIELD_BYTES[i];
193 
194 	return DIV_ROUND_UP(bytes, sizeof(u64));
195 }
196 
197 int bch2_alloc_v1_invalid(struct bch_fs *c, struct bkey_s_c k,
198 			  enum bch_validate_flags flags,
199 			  struct printbuf *err)
200 {
201 	struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
202 	int ret = 0;
203 
204 	/* allow for unknown fields */
205 	bkey_fsck_err_on(bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v), c, err,
206 			 alloc_v1_val_size_bad,
207 			 "incorrect value size (%zu < %u)",
208 			 bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v));
209 fsck_err:
210 	return ret;
211 }
212 
213 int bch2_alloc_v2_invalid(struct bch_fs *c, struct bkey_s_c k,
214 			  enum bch_validate_flags flags,
215 			  struct printbuf *err)
216 {
217 	struct bkey_alloc_unpacked u;
218 	int ret = 0;
219 
220 	bkey_fsck_err_on(bch2_alloc_unpack_v2(&u, k), c, err,
221 			 alloc_v2_unpack_error,
222 			 "unpack error");
223 fsck_err:
224 	return ret;
225 }
226 
227 int bch2_alloc_v3_invalid(struct bch_fs *c, struct bkey_s_c k,
228 			  enum bch_validate_flags flags,
229 			  struct printbuf *err)
230 {
231 	struct bkey_alloc_unpacked u;
232 	int ret = 0;
233 
234 	bkey_fsck_err_on(bch2_alloc_unpack_v3(&u, k), c, err,
235 			 alloc_v2_unpack_error,
236 			 "unpack error");
237 fsck_err:
238 	return ret;
239 }
240 
241 int bch2_alloc_v4_invalid(struct bch_fs *c, struct bkey_s_c k,
242 			  enum bch_validate_flags flags, struct printbuf *err)
243 {
244 	struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k);
245 	int ret = 0;
246 
247 	bkey_fsck_err_on(alloc_v4_u64s_noerror(a.v) > bkey_val_u64s(k.k), c, err,
248 			 alloc_v4_val_size_bad,
249 			 "bad val size (%u > %zu)",
250 			 alloc_v4_u64s_noerror(a.v), bkey_val_u64s(k.k));
251 
252 	bkey_fsck_err_on(!BCH_ALLOC_V4_BACKPOINTERS_START(a.v) &&
253 			 BCH_ALLOC_V4_NR_BACKPOINTERS(a.v), c, err,
254 			 alloc_v4_backpointers_start_bad,
255 			 "invalid backpointers_start");
256 
257 	bkey_fsck_err_on(alloc_data_type(*a.v, a.v->data_type) != a.v->data_type, c, err,
258 			 alloc_key_data_type_bad,
259 			 "invalid data type (got %u should be %u)",
260 			 a.v->data_type, alloc_data_type(*a.v, a.v->data_type));
261 
262 	for (unsigned i = 0; i < 2; i++)
263 		bkey_fsck_err_on(a.v->io_time[i] > LRU_TIME_MAX,
264 				 c, err,
265 				 alloc_key_io_time_bad,
266 				 "invalid io_time[%s]: %llu, max %llu",
267 				 i == READ ? "read" : "write",
268 				 a.v->io_time[i], LRU_TIME_MAX);
269 
270 	switch (a.v->data_type) {
271 	case BCH_DATA_free:
272 	case BCH_DATA_need_gc_gens:
273 	case BCH_DATA_need_discard:
274 		bkey_fsck_err_on(bch2_bucket_sectors_total(*a.v) || a.v->stripe,
275 				 c, err, alloc_key_empty_but_have_data,
276 				 "empty data type free but have data");
277 		break;
278 	case BCH_DATA_sb:
279 	case BCH_DATA_journal:
280 	case BCH_DATA_btree:
281 	case BCH_DATA_user:
282 	case BCH_DATA_parity:
283 		bkey_fsck_err_on(!bch2_bucket_sectors_dirty(*a.v),
284 				 c, err, alloc_key_dirty_sectors_0,
285 				 "data_type %s but dirty_sectors==0",
286 				 bch2_data_type_str(a.v->data_type));
287 		break;
288 	case BCH_DATA_cached:
289 		bkey_fsck_err_on(!a.v->cached_sectors ||
290 				 bch2_bucket_sectors_dirty(*a.v) ||
291 				 a.v->stripe,
292 				 c, err, alloc_key_cached_inconsistency,
293 				 "data type inconsistency");
294 
295 		bkey_fsck_err_on(!a.v->io_time[READ] &&
296 				 c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_to_lru_refs,
297 				 c, err, alloc_key_cached_but_read_time_zero,
298 				 "cached bucket with read_time == 0");
299 		break;
300 	case BCH_DATA_stripe:
301 		break;
302 	}
303 fsck_err:
304 	return ret;
305 }
306 
307 void bch2_alloc_v4_swab(struct bkey_s k)
308 {
309 	struct bch_alloc_v4 *a = bkey_s_to_alloc_v4(k).v;
310 	struct bch_backpointer *bp, *bps;
311 
312 	a->journal_seq		= swab64(a->journal_seq);
313 	a->flags		= swab32(a->flags);
314 	a->dirty_sectors	= swab32(a->dirty_sectors);
315 	a->cached_sectors	= swab32(a->cached_sectors);
316 	a->io_time[0]		= swab64(a->io_time[0]);
317 	a->io_time[1]		= swab64(a->io_time[1]);
318 	a->stripe		= swab32(a->stripe);
319 	a->nr_external_backpointers = swab32(a->nr_external_backpointers);
320 	a->fragmentation_lru	= swab64(a->fragmentation_lru);
321 
322 	bps = alloc_v4_backpointers(a);
323 	for (bp = bps; bp < bps + BCH_ALLOC_V4_NR_BACKPOINTERS(a); bp++) {
324 		bp->bucket_offset	= swab40(bp->bucket_offset);
325 		bp->bucket_len		= swab32(bp->bucket_len);
326 		bch2_bpos_swab(&bp->pos);
327 	}
328 }
329 
330 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
331 {
332 	struct bch_alloc_v4 _a;
333 	const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a);
334 
335 	prt_newline(out);
336 	printbuf_indent_add(out, 2);
337 
338 	prt_printf(out, "gen %u oldest_gen %u data_type ", a->gen, a->oldest_gen);
339 	bch2_prt_data_type(out, a->data_type);
340 	prt_newline(out);
341 	prt_printf(out, "journal_seq       %llu\n",	a->journal_seq);
342 	prt_printf(out, "need_discard      %llu\n",	BCH_ALLOC_V4_NEED_DISCARD(a));
343 	prt_printf(out, "need_inc_gen      %llu\n",	BCH_ALLOC_V4_NEED_INC_GEN(a));
344 	prt_printf(out, "dirty_sectors     %u\n",	a->dirty_sectors);
345 	prt_printf(out, "cached_sectors    %u\n",	a->cached_sectors);
346 	prt_printf(out, "stripe            %u\n",	a->stripe);
347 	prt_printf(out, "stripe_redundancy %u\n",	a->stripe_redundancy);
348 	prt_printf(out, "io_time[READ]     %llu\n",	a->io_time[READ]);
349 	prt_printf(out, "io_time[WRITE]    %llu\n",	a->io_time[WRITE]);
350 	prt_printf(out, "fragmentation     %llu\n",	a->fragmentation_lru);
351 	prt_printf(out, "bp_start          %llu\n", BCH_ALLOC_V4_BACKPOINTERS_START(a));
352 	printbuf_indent_sub(out, 2);
353 }
354 
355 void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out)
356 {
357 	if (k.k->type == KEY_TYPE_alloc_v4) {
358 		void *src, *dst;
359 
360 		*out = *bkey_s_c_to_alloc_v4(k).v;
361 
362 		src = alloc_v4_backpointers(out);
363 		SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
364 		dst = alloc_v4_backpointers(out);
365 
366 		if (src < dst)
367 			memset(src, 0, dst - src);
368 
369 		SET_BCH_ALLOC_V4_NR_BACKPOINTERS(out, 0);
370 	} else {
371 		struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
372 
373 		*out = (struct bch_alloc_v4) {
374 			.journal_seq		= u.journal_seq,
375 			.flags			= u.need_discard,
376 			.gen			= u.gen,
377 			.oldest_gen		= u.oldest_gen,
378 			.data_type		= u.data_type,
379 			.stripe_redundancy	= u.stripe_redundancy,
380 			.dirty_sectors		= u.dirty_sectors,
381 			.cached_sectors		= u.cached_sectors,
382 			.io_time[READ]		= u.read_time,
383 			.io_time[WRITE]		= u.write_time,
384 			.stripe			= u.stripe,
385 		};
386 
387 		SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
388 	}
389 }
390 
391 static noinline struct bkey_i_alloc_v4 *
392 __bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
393 {
394 	struct bkey_i_alloc_v4 *ret;
395 
396 	ret = bch2_trans_kmalloc(trans, max(bkey_bytes(k.k), sizeof(struct bkey_i_alloc_v4)));
397 	if (IS_ERR(ret))
398 		return ret;
399 
400 	if (k.k->type == KEY_TYPE_alloc_v4) {
401 		void *src, *dst;
402 
403 		bkey_reassemble(&ret->k_i, k);
404 
405 		src = alloc_v4_backpointers(&ret->v);
406 		SET_BCH_ALLOC_V4_BACKPOINTERS_START(&ret->v, BCH_ALLOC_V4_U64s);
407 		dst = alloc_v4_backpointers(&ret->v);
408 
409 		if (src < dst)
410 			memset(src, 0, dst - src);
411 
412 		SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&ret->v, 0);
413 		set_alloc_v4_u64s(ret);
414 	} else {
415 		bkey_alloc_v4_init(&ret->k_i);
416 		ret->k.p = k.k->p;
417 		bch2_alloc_to_v4(k, &ret->v);
418 	}
419 	return ret;
420 }
421 
422 static inline struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut_inlined(struct btree_trans *trans, struct bkey_s_c k)
423 {
424 	struct bkey_s_c_alloc_v4 a;
425 
426 	if (likely(k.k->type == KEY_TYPE_alloc_v4) &&
427 	    ((a = bkey_s_c_to_alloc_v4(k), true) &&
428 	     BCH_ALLOC_V4_NR_BACKPOINTERS(a.v) == 0))
429 		return bch2_bkey_make_mut_noupdate_typed(trans, k, alloc_v4);
430 
431 	return __bch2_alloc_to_v4_mut(trans, k);
432 }
433 
434 struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
435 {
436 	return bch2_alloc_to_v4_mut_inlined(trans, k);
437 }
438 
439 struct bkey_i_alloc_v4 *
440 bch2_trans_start_alloc_update_noupdate(struct btree_trans *trans, struct btree_iter *iter,
441 				       struct bpos pos)
442 {
443 	struct bkey_s_c k = bch2_bkey_get_iter(trans, iter, BTREE_ID_alloc, pos,
444 					       BTREE_ITER_with_updates|
445 					       BTREE_ITER_cached|
446 					       BTREE_ITER_intent);
447 	int ret = bkey_err(k);
448 	if (unlikely(ret))
449 		return ERR_PTR(ret);
450 
451 	struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut_inlined(trans, k);
452 	ret = PTR_ERR_OR_ZERO(a);
453 	if (unlikely(ret))
454 		goto err;
455 	return a;
456 err:
457 	bch2_trans_iter_exit(trans, iter);
458 	return ERR_PTR(ret);
459 }
460 
461 __flatten
462 struct bkey_i_alloc_v4 *bch2_trans_start_alloc_update(struct btree_trans *trans, struct bpos pos)
463 {
464 	struct btree_iter iter;
465 	struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update_noupdate(trans, &iter, pos);
466 	int ret = PTR_ERR_OR_ZERO(a);
467 	if (ret)
468 		return ERR_PTR(ret);
469 
470 	ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
471 	bch2_trans_iter_exit(trans, &iter);
472 	return unlikely(ret) ? ERR_PTR(ret) : a;
473 }
474 
475 static struct bpos alloc_gens_pos(struct bpos pos, unsigned *offset)
476 {
477 	*offset = pos.offset & KEY_TYPE_BUCKET_GENS_MASK;
478 
479 	pos.offset >>= KEY_TYPE_BUCKET_GENS_BITS;
480 	return pos;
481 }
482 
483 static struct bpos bucket_gens_pos_to_alloc(struct bpos pos, unsigned offset)
484 {
485 	pos.offset <<= KEY_TYPE_BUCKET_GENS_BITS;
486 	pos.offset += offset;
487 	return pos;
488 }
489 
490 static unsigned alloc_gen(struct bkey_s_c k, unsigned offset)
491 {
492 	return k.k->type == KEY_TYPE_bucket_gens
493 		? bkey_s_c_to_bucket_gens(k).v->gens[offset]
494 		: 0;
495 }
496 
497 int bch2_bucket_gens_invalid(struct bch_fs *c, struct bkey_s_c k,
498 			     enum bch_validate_flags flags,
499 			     struct printbuf *err)
500 {
501 	int ret = 0;
502 
503 	bkey_fsck_err_on(bkey_val_bytes(k.k) != sizeof(struct bch_bucket_gens), c, err,
504 			 bucket_gens_val_size_bad,
505 			 "bad val size (%zu != %zu)",
506 			 bkey_val_bytes(k.k), sizeof(struct bch_bucket_gens));
507 fsck_err:
508 	return ret;
509 }
510 
511 void bch2_bucket_gens_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
512 {
513 	struct bkey_s_c_bucket_gens g = bkey_s_c_to_bucket_gens(k);
514 	unsigned i;
515 
516 	for (i = 0; i < ARRAY_SIZE(g.v->gens); i++) {
517 		if (i)
518 			prt_char(out, ' ');
519 		prt_printf(out, "%u", g.v->gens[i]);
520 	}
521 }
522 
523 int bch2_bucket_gens_init(struct bch_fs *c)
524 {
525 	struct btree_trans *trans = bch2_trans_get(c);
526 	struct bkey_i_bucket_gens g;
527 	bool have_bucket_gens_key = false;
528 	int ret;
529 
530 	ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
531 				 BTREE_ITER_prefetch, k, ({
532 		/*
533 		 * Not a fsck error because this is checked/repaired by
534 		 * bch2_check_alloc_key() which runs later:
535 		 */
536 		if (!bch2_dev_bucket_exists(c, k.k->p))
537 			continue;
538 
539 		struct bch_alloc_v4 a;
540 		u8 gen = bch2_alloc_to_v4(k, &a)->gen;
541 		unsigned offset;
542 		struct bpos pos = alloc_gens_pos(iter.pos, &offset);
543 		int ret2 = 0;
544 
545 		if (have_bucket_gens_key && bkey_cmp(iter.pos, pos)) {
546 			ret2 =  bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0) ?:
547 				bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
548 			if (ret2)
549 				goto iter_err;
550 			have_bucket_gens_key = false;
551 		}
552 
553 		if (!have_bucket_gens_key) {
554 			bkey_bucket_gens_init(&g.k_i);
555 			g.k.p = pos;
556 			have_bucket_gens_key = true;
557 		}
558 
559 		g.v.gens[offset] = gen;
560 iter_err:
561 		ret2;
562 	}));
563 
564 	if (have_bucket_gens_key && !ret)
565 		ret = commit_do(trans, NULL, NULL,
566 				BCH_TRANS_COMMIT_no_enospc,
567 			bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0));
568 
569 	bch2_trans_put(trans);
570 
571 	bch_err_fn(c, ret);
572 	return ret;
573 }
574 
575 int bch2_alloc_read(struct bch_fs *c)
576 {
577 	struct btree_trans *trans = bch2_trans_get(c);
578 	struct bch_dev *ca = NULL;
579 	int ret;
580 
581 	down_read(&c->gc_lock);
582 
583 	if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_bucket_gens) {
584 		ret = for_each_btree_key(trans, iter, BTREE_ID_bucket_gens, POS_MIN,
585 					 BTREE_ITER_prefetch, k, ({
586 			u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
587 			u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
588 
589 			if (k.k->type != KEY_TYPE_bucket_gens)
590 				continue;
591 
592 			ca = bch2_dev_iterate(c, ca, k.k->p.inode);
593 			/*
594 			 * Not a fsck error because this is checked/repaired by
595 			 * bch2_check_alloc_key() which runs later:
596 			 */
597 			if (!ca) {
598 				bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
599 				continue;
600 			}
601 
602 			const struct bch_bucket_gens *g = bkey_s_c_to_bucket_gens(k).v;
603 
604 			for (u64 b = max_t(u64, ca->mi.first_bucket, start);
605 			     b < min_t(u64, ca->mi.nbuckets, end);
606 			     b++)
607 				*bucket_gen(ca, b) = g->gens[b & KEY_TYPE_BUCKET_GENS_MASK];
608 			0;
609 		}));
610 	} else {
611 		ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
612 					 BTREE_ITER_prefetch, k, ({
613 			ca = bch2_dev_iterate(c, ca, k.k->p.inode);
614 			/*
615 			 * Not a fsck error because this is checked/repaired by
616 			 * bch2_check_alloc_key() which runs later:
617 			 */
618 			if (!ca) {
619 				bch2_btree_iter_set_pos(&iter, POS(k.k->p.inode + 1, 0));
620 				continue;
621 			}
622 
623 			struct bch_alloc_v4 a;
624 			*bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen;
625 			0;
626 		}));
627 	}
628 
629 	bch2_dev_put(ca);
630 	bch2_trans_put(trans);
631 	up_read(&c->gc_lock);
632 
633 	bch_err_fn(c, ret);
634 	return ret;
635 }
636 
637 /* Free space/discard btree: */
638 
639 static int bch2_bucket_do_index(struct btree_trans *trans,
640 				struct bch_dev *ca,
641 				struct bkey_s_c alloc_k,
642 				const struct bch_alloc_v4 *a,
643 				bool set)
644 {
645 	struct bch_fs *c = trans->c;
646 	struct btree_iter iter;
647 	struct bkey_s_c old;
648 	struct bkey_i *k;
649 	enum btree_id btree;
650 	enum bch_bkey_type old_type = !set ? KEY_TYPE_set : KEY_TYPE_deleted;
651 	enum bch_bkey_type new_type =  set ? KEY_TYPE_set : KEY_TYPE_deleted;
652 	struct printbuf buf = PRINTBUF;
653 	int ret;
654 
655 	if (a->data_type != BCH_DATA_free &&
656 	    a->data_type != BCH_DATA_need_discard)
657 		return 0;
658 
659 	k = bch2_trans_kmalloc_nomemzero(trans, sizeof(*k));
660 	if (IS_ERR(k))
661 		return PTR_ERR(k);
662 
663 	bkey_init(&k->k);
664 	k->k.type = new_type;
665 
666 	switch (a->data_type) {
667 	case BCH_DATA_free:
668 		btree = BTREE_ID_freespace;
669 		k->k.p = alloc_freespace_pos(alloc_k.k->p, *a);
670 		bch2_key_resize(&k->k, 1);
671 		break;
672 	case BCH_DATA_need_discard:
673 		btree = BTREE_ID_need_discard;
674 		k->k.p = alloc_k.k->p;
675 		break;
676 	default:
677 		return 0;
678 	}
679 
680 	old = bch2_bkey_get_iter(trans, &iter, btree,
681 			     bkey_start_pos(&k->k),
682 			     BTREE_ITER_intent);
683 	ret = bkey_err(old);
684 	if (ret)
685 		return ret;
686 
687 	if (ca->mi.freespace_initialized &&
688 	    c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info &&
689 	    bch2_trans_inconsistent_on(old.k->type != old_type, trans,
690 			"incorrect key when %s %s:%llu:%llu:0 (got %s should be %s)\n"
691 			"  for %s",
692 			set ? "setting" : "clearing",
693 			bch2_btree_id_str(btree),
694 			iter.pos.inode,
695 			iter.pos.offset,
696 			bch2_bkey_types[old.k->type],
697 			bch2_bkey_types[old_type],
698 			(bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
699 		ret = -EIO;
700 		goto err;
701 	}
702 
703 	ret = bch2_trans_update(trans, &iter, k, 0);
704 err:
705 	bch2_trans_iter_exit(trans, &iter);
706 	printbuf_exit(&buf);
707 	return ret;
708 }
709 
710 static noinline int bch2_bucket_gen_update(struct btree_trans *trans,
711 					   struct bpos bucket, u8 gen)
712 {
713 	struct btree_iter iter;
714 	unsigned offset;
715 	struct bpos pos = alloc_gens_pos(bucket, &offset);
716 	struct bkey_i_bucket_gens *g;
717 	struct bkey_s_c k;
718 	int ret;
719 
720 	g = bch2_trans_kmalloc(trans, sizeof(*g));
721 	ret = PTR_ERR_OR_ZERO(g);
722 	if (ret)
723 		return ret;
724 
725 	k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_bucket_gens, pos,
726 			       BTREE_ITER_intent|
727 			       BTREE_ITER_with_updates);
728 	ret = bkey_err(k);
729 	if (ret)
730 		return ret;
731 
732 	if (k.k->type != KEY_TYPE_bucket_gens) {
733 		bkey_bucket_gens_init(&g->k_i);
734 		g->k.p = iter.pos;
735 	} else {
736 		bkey_reassemble(&g->k_i, k);
737 	}
738 
739 	g->v.gens[offset] = gen;
740 
741 	ret = bch2_trans_update(trans, &iter, &g->k_i, 0);
742 	bch2_trans_iter_exit(trans, &iter);
743 	return ret;
744 }
745 
746 int bch2_trigger_alloc(struct btree_trans *trans,
747 		       enum btree_id btree, unsigned level,
748 		       struct bkey_s_c old, struct bkey_s new,
749 		       enum btree_iter_update_trigger_flags flags)
750 {
751 	struct bch_fs *c = trans->c;
752 	struct printbuf buf = PRINTBUF;
753 	int ret = 0;
754 
755 	struct bch_dev *ca = bch2_dev_bucket_tryget(c, new.k->p);
756 	if (!ca)
757 		return -EIO;
758 
759 	struct bch_alloc_v4 old_a_convert;
760 	const struct bch_alloc_v4 *old_a = bch2_alloc_to_v4(old, &old_a_convert);
761 
762 	if (flags & BTREE_TRIGGER_transactional) {
763 		struct bch_alloc_v4 *new_a = bkey_s_to_alloc_v4(new).v;
764 
765 		alloc_data_type_set(new_a, new_a->data_type);
766 
767 		if (bch2_bucket_sectors_total(*new_a) > bch2_bucket_sectors_total(*old_a)) {
768 			new_a->io_time[READ] = bch2_current_io_time(c, READ);
769 			new_a->io_time[WRITE]= bch2_current_io_time(c, WRITE);
770 			SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true);
771 			SET_BCH_ALLOC_V4_NEED_DISCARD(new_a, true);
772 		}
773 
774 		if (data_type_is_empty(new_a->data_type) &&
775 		    BCH_ALLOC_V4_NEED_INC_GEN(new_a) &&
776 		    !bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset)) {
777 			new_a->gen++;
778 			SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false);
779 			alloc_data_type_set(new_a, new_a->data_type);
780 		}
781 
782 		if (old_a->data_type != new_a->data_type ||
783 		    (new_a->data_type == BCH_DATA_free &&
784 		     alloc_freespace_genbits(*old_a) != alloc_freespace_genbits(*new_a))) {
785 			ret =   bch2_bucket_do_index(trans, ca, old, old_a, false) ?:
786 				bch2_bucket_do_index(trans, ca, new.s_c, new_a, true);
787 			if (ret)
788 				goto err;
789 		}
790 
791 		if (new_a->data_type == BCH_DATA_cached &&
792 		    !new_a->io_time[READ])
793 			new_a->io_time[READ] = bch2_current_io_time(c, READ);
794 
795 		u64 old_lru = alloc_lru_idx_read(*old_a);
796 		u64 new_lru = alloc_lru_idx_read(*new_a);
797 		if (old_lru != new_lru) {
798 			ret = bch2_lru_change(trans, new.k->p.inode,
799 					      bucket_to_u64(new.k->p),
800 					      old_lru, new_lru);
801 			if (ret)
802 				goto err;
803 		}
804 
805 		new_a->fragmentation_lru = alloc_lru_idx_fragmentation(*new_a, ca);
806 		if (old_a->fragmentation_lru != new_a->fragmentation_lru) {
807 			ret = bch2_lru_change(trans,
808 					BCH_LRU_FRAGMENTATION_START,
809 					bucket_to_u64(new.k->p),
810 					old_a->fragmentation_lru, new_a->fragmentation_lru);
811 			if (ret)
812 				goto err;
813 		}
814 
815 		if (old_a->gen != new_a->gen) {
816 			ret = bch2_bucket_gen_update(trans, new.k->p, new_a->gen);
817 			if (ret)
818 				goto err;
819 		}
820 
821 		/*
822 		 * need to know if we're getting called from the invalidate path or
823 		 * not:
824 		 */
825 
826 		if ((flags & BTREE_TRIGGER_bucket_invalidate) &&
827 		    old_a->cached_sectors) {
828 			ret = bch2_update_cached_sectors_list(trans, new.k->p.inode,
829 							      -((s64) old_a->cached_sectors));
830 			if (ret)
831 				goto err;
832 		}
833 	}
834 
835 	if ((flags & BTREE_TRIGGER_atomic) && (flags & BTREE_TRIGGER_insert)) {
836 		struct bch_alloc_v4 *new_a = bkey_s_to_alloc_v4(new).v;
837 		u64 journal_seq = trans->journal_res.seq;
838 		u64 bucket_journal_seq = new_a->journal_seq;
839 
840 		if ((flags & BTREE_TRIGGER_insert) &&
841 		    data_type_is_empty(old_a->data_type) !=
842 		    data_type_is_empty(new_a->data_type) &&
843 		    new.k->type == KEY_TYPE_alloc_v4) {
844 			struct bch_alloc_v4 *v = bkey_s_to_alloc_v4(new).v;
845 
846 			/*
847 			 * If the btree updates referring to a bucket weren't flushed
848 			 * before the bucket became empty again, then the we don't have
849 			 * to wait on a journal flush before we can reuse the bucket:
850 			 */
851 			v->journal_seq = bucket_journal_seq =
852 				data_type_is_empty(new_a->data_type) &&
853 				(journal_seq == v->journal_seq ||
854 				 bch2_journal_noflush_seq(&c->journal, v->journal_seq))
855 				? 0 : journal_seq;
856 		}
857 
858 		if (!data_type_is_empty(old_a->data_type) &&
859 		    data_type_is_empty(new_a->data_type) &&
860 		    bucket_journal_seq) {
861 			ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
862 					c->journal.flushed_seq_ondisk,
863 					new.k->p.inode, new.k->p.offset,
864 					bucket_journal_seq);
865 			if (ret) {
866 				bch2_fs_fatal_error(c,
867 					"setting bucket_needs_journal_commit: %s", bch2_err_str(ret));
868 				goto err;
869 			}
870 		}
871 
872 		percpu_down_read(&c->mark_lock);
873 		if (new_a->gen != old_a->gen) {
874 			u8 *gen = bucket_gen(ca, new.k->p.offset);
875 			if (unlikely(!gen)) {
876 				percpu_up_read(&c->mark_lock);
877 				goto invalid_bucket;
878 			}
879 			*gen = new_a->gen;
880 		}
881 
882 		bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, false);
883 		percpu_up_read(&c->mark_lock);
884 
885 #define eval_state(_a, expr)		({ const struct bch_alloc_v4 *a = _a; expr; })
886 #define statechange(expr)		!eval_state(old_a, expr) && eval_state(new_a, expr)
887 #define bucket_flushed(a)		(!a->journal_seq || a->journal_seq <= c->journal.flushed_seq_ondisk)
888 
889 		if (statechange(a->data_type == BCH_DATA_free) &&
890 		    bucket_flushed(new_a))
891 			closure_wake_up(&c->freelist_wait);
892 
893 		if (statechange(a->data_type == BCH_DATA_need_discard) &&
894 		    !bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset) &&
895 		    bucket_flushed(new_a))
896 			bch2_discard_one_bucket_fast(ca, new.k->p.offset);
897 
898 		if (statechange(a->data_type == BCH_DATA_cached) &&
899 		    !bch2_bucket_is_open(c, new.k->p.inode, new.k->p.offset) &&
900 		    should_invalidate_buckets(ca, bch2_dev_usage_read(ca)))
901 			bch2_dev_do_invalidates(ca);
902 
903 		if (statechange(a->data_type == BCH_DATA_need_gc_gens))
904 			bch2_gc_gens_async(c);
905 	}
906 
907 	if ((flags & BTREE_TRIGGER_gc) &&
908 	    (flags & BTREE_TRIGGER_bucket_invalidate)) {
909 		struct bch_alloc_v4 new_a_convert;
910 		const struct bch_alloc_v4 *new_a = bch2_alloc_to_v4(new.s_c, &new_a_convert);
911 
912 		percpu_down_read(&c->mark_lock);
913 		struct bucket *g = gc_bucket(ca, new.k->p.offset);
914 		if (unlikely(!g)) {
915 			percpu_up_read(&c->mark_lock);
916 			goto invalid_bucket;
917 		}
918 		g->gen_valid	= 1;
919 
920 		bucket_lock(g);
921 
922 		g->gen_valid		= 1;
923 		g->gen			= new_a->gen;
924 		g->data_type		= new_a->data_type;
925 		g->stripe		= new_a->stripe;
926 		g->stripe_redundancy	= new_a->stripe_redundancy;
927 		g->dirty_sectors	= new_a->dirty_sectors;
928 		g->cached_sectors	= new_a->cached_sectors;
929 
930 		bucket_unlock(g);
931 		percpu_up_read(&c->mark_lock);
932 	}
933 err:
934 	printbuf_exit(&buf);
935 	bch2_dev_put(ca);
936 	return ret;
937 invalid_bucket:
938 	bch2_fs_inconsistent(c, "reference to invalid bucket\n  %s",
939 			     (bch2_bkey_val_to_text(&buf, c, new.s_c), buf.buf));
940 	ret = -EIO;
941 	goto err;
942 }
943 
944 /*
945  * This synthesizes deleted extents for holes, similar to BTREE_ITER_slots for
946  * extents style btrees, but works on non-extents btrees:
947  */
948 static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos end, struct bkey *hole)
949 {
950 	struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
951 
952 	if (bkey_err(k))
953 		return k;
954 
955 	if (k.k->type) {
956 		return k;
957 	} else {
958 		struct btree_iter iter2;
959 		struct bpos next;
960 
961 		bch2_trans_copy_iter(&iter2, iter);
962 
963 		struct btree_path *path = btree_iter_path(iter->trans, iter);
964 		if (!bpos_eq(path->l[0].b->key.k.p, SPOS_MAX))
965 			end = bkey_min(end, bpos_nosnap_successor(path->l[0].b->key.k.p));
966 
967 		end = bkey_min(end, POS(iter->pos.inode, iter->pos.offset + U32_MAX - 1));
968 
969 		/*
970 		 * btree node min/max is a closed interval, upto takes a half
971 		 * open interval:
972 		 */
973 		k = bch2_btree_iter_peek_upto(&iter2, end);
974 		next = iter2.pos;
975 		bch2_trans_iter_exit(iter->trans, &iter2);
976 
977 		BUG_ON(next.offset >= iter->pos.offset + U32_MAX);
978 
979 		if (bkey_err(k))
980 			return k;
981 
982 		bkey_init(hole);
983 		hole->p = iter->pos;
984 
985 		bch2_key_resize(hole, next.offset - iter->pos.offset);
986 		return (struct bkey_s_c) { hole, NULL };
987 	}
988 }
989 
990 static bool next_bucket(struct bch_fs *c, struct bch_dev **ca, struct bpos *bucket)
991 {
992 	if (*ca) {
993 		if (bucket->offset < (*ca)->mi.first_bucket)
994 			bucket->offset = (*ca)->mi.first_bucket;
995 
996 		if (bucket->offset < (*ca)->mi.nbuckets)
997 			return true;
998 
999 		bch2_dev_put(*ca);
1000 		*ca = NULL;
1001 		bucket->inode++;
1002 		bucket->offset = 0;
1003 	}
1004 
1005 	rcu_read_lock();
1006 	*ca = __bch2_next_dev_idx(c, bucket->inode, NULL);
1007 	if (*ca) {
1008 		*bucket = POS((*ca)->dev_idx, (*ca)->mi.first_bucket);
1009 		bch2_dev_get(*ca);
1010 	}
1011 	rcu_read_unlock();
1012 
1013 	return *ca != NULL;
1014 }
1015 
1016 static struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_iter *iter,
1017 					struct bch_dev **ca, struct bkey *hole)
1018 {
1019 	struct bch_fs *c = iter->trans->c;
1020 	struct bkey_s_c k;
1021 again:
1022 	k = bch2_get_key_or_hole(iter, POS_MAX, hole);
1023 	if (bkey_err(k))
1024 		return k;
1025 
1026 	*ca = bch2_dev_iterate_noerror(c, *ca, k.k->p.inode);
1027 
1028 	if (!k.k->type) {
1029 		struct bpos hole_start = bkey_start_pos(k.k);
1030 
1031 		if (!*ca || !bucket_valid(*ca, hole_start.offset)) {
1032 			if (!next_bucket(c, ca, &hole_start))
1033 				return bkey_s_c_null;
1034 
1035 			bch2_btree_iter_set_pos(iter, hole_start);
1036 			goto again;
1037 		}
1038 
1039 		if (k.k->p.offset > (*ca)->mi.nbuckets)
1040 			bch2_key_resize(hole, (*ca)->mi.nbuckets - hole_start.offset);
1041 	}
1042 
1043 	return k;
1044 }
1045 
1046 static noinline_for_stack
1047 int bch2_check_alloc_key(struct btree_trans *trans,
1048 			 struct bkey_s_c alloc_k,
1049 			 struct btree_iter *alloc_iter,
1050 			 struct btree_iter *discard_iter,
1051 			 struct btree_iter *freespace_iter,
1052 			 struct btree_iter *bucket_gens_iter)
1053 {
1054 	struct bch_fs *c = trans->c;
1055 	struct bch_alloc_v4 a_convert;
1056 	const struct bch_alloc_v4 *a;
1057 	unsigned discard_key_type, freespace_key_type;
1058 	unsigned gens_offset;
1059 	struct bkey_s_c k;
1060 	struct printbuf buf = PRINTBUF;
1061 	int ret = 0;
1062 
1063 	struct bch_dev *ca = bch2_dev_bucket_tryget_noerror(c, alloc_k.k->p);
1064 	if (fsck_err_on(!ca,
1065 			c, alloc_key_to_missing_dev_bucket,
1066 			"alloc key for invalid device:bucket %llu:%llu",
1067 			alloc_k.k->p.inode, alloc_k.k->p.offset))
1068 		ret = bch2_btree_delete_at(trans, alloc_iter, 0);
1069 	if (!ca)
1070 		return ret;
1071 
1072 	if (!ca->mi.freespace_initialized)
1073 		goto out;
1074 
1075 	a = bch2_alloc_to_v4(alloc_k, &a_convert);
1076 
1077 	discard_key_type = a->data_type == BCH_DATA_need_discard ? KEY_TYPE_set : 0;
1078 	bch2_btree_iter_set_pos(discard_iter, alloc_k.k->p);
1079 	k = bch2_btree_iter_peek_slot(discard_iter);
1080 	ret = bkey_err(k);
1081 	if (ret)
1082 		goto err;
1083 
1084 	if (fsck_err_on(k.k->type != discard_key_type,
1085 			c, need_discard_key_wrong,
1086 			"incorrect key in need_discard btree (got %s should be %s)\n"
1087 			"  %s",
1088 			bch2_bkey_types[k.k->type],
1089 			bch2_bkey_types[discard_key_type],
1090 			(bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
1091 		struct bkey_i *update =
1092 			bch2_trans_kmalloc(trans, sizeof(*update));
1093 
1094 		ret = PTR_ERR_OR_ZERO(update);
1095 		if (ret)
1096 			goto err;
1097 
1098 		bkey_init(&update->k);
1099 		update->k.type	= discard_key_type;
1100 		update->k.p	= discard_iter->pos;
1101 
1102 		ret = bch2_trans_update(trans, discard_iter, update, 0);
1103 		if (ret)
1104 			goto err;
1105 	}
1106 
1107 	freespace_key_type = a->data_type == BCH_DATA_free ? KEY_TYPE_set : 0;
1108 	bch2_btree_iter_set_pos(freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a));
1109 	k = bch2_btree_iter_peek_slot(freespace_iter);
1110 	ret = bkey_err(k);
1111 	if (ret)
1112 		goto err;
1113 
1114 	if (fsck_err_on(k.k->type != freespace_key_type,
1115 			c, freespace_key_wrong,
1116 			"incorrect key in freespace btree (got %s should be %s)\n"
1117 			"  %s",
1118 			bch2_bkey_types[k.k->type],
1119 			bch2_bkey_types[freespace_key_type],
1120 			(printbuf_reset(&buf),
1121 			 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
1122 		struct bkey_i *update =
1123 			bch2_trans_kmalloc(trans, sizeof(*update));
1124 
1125 		ret = PTR_ERR_OR_ZERO(update);
1126 		if (ret)
1127 			goto err;
1128 
1129 		bkey_init(&update->k);
1130 		update->k.type	= freespace_key_type;
1131 		update->k.p	= freespace_iter->pos;
1132 		bch2_key_resize(&update->k, 1);
1133 
1134 		ret = bch2_trans_update(trans, freespace_iter, update, 0);
1135 		if (ret)
1136 			goto err;
1137 	}
1138 
1139 	bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset));
1140 	k = bch2_btree_iter_peek_slot(bucket_gens_iter);
1141 	ret = bkey_err(k);
1142 	if (ret)
1143 		goto err;
1144 
1145 	if (fsck_err_on(a->gen != alloc_gen(k, gens_offset),
1146 			c, bucket_gens_key_wrong,
1147 			"incorrect gen in bucket_gens btree (got %u should be %u)\n"
1148 			"  %s",
1149 			alloc_gen(k, gens_offset), a->gen,
1150 			(printbuf_reset(&buf),
1151 			 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
1152 		struct bkey_i_bucket_gens *g =
1153 			bch2_trans_kmalloc(trans, sizeof(*g));
1154 
1155 		ret = PTR_ERR_OR_ZERO(g);
1156 		if (ret)
1157 			goto err;
1158 
1159 		if (k.k->type == KEY_TYPE_bucket_gens) {
1160 			bkey_reassemble(&g->k_i, k);
1161 		} else {
1162 			bkey_bucket_gens_init(&g->k_i);
1163 			g->k.p = alloc_gens_pos(alloc_k.k->p, &gens_offset);
1164 		}
1165 
1166 		g->v.gens[gens_offset] = a->gen;
1167 
1168 		ret = bch2_trans_update(trans, bucket_gens_iter, &g->k_i, 0);
1169 		if (ret)
1170 			goto err;
1171 	}
1172 out:
1173 err:
1174 fsck_err:
1175 	bch2_dev_put(ca);
1176 	printbuf_exit(&buf);
1177 	return ret;
1178 }
1179 
1180 static noinline_for_stack
1181 int bch2_check_alloc_hole_freespace(struct btree_trans *trans,
1182 				    struct bch_dev *ca,
1183 				    struct bpos start,
1184 				    struct bpos *end,
1185 				    struct btree_iter *freespace_iter)
1186 {
1187 	struct bch_fs *c = trans->c;
1188 	struct bkey_s_c k;
1189 	struct printbuf buf = PRINTBUF;
1190 	int ret;
1191 
1192 	if (!ca->mi.freespace_initialized)
1193 		return 0;
1194 
1195 	bch2_btree_iter_set_pos(freespace_iter, start);
1196 
1197 	k = bch2_btree_iter_peek_slot(freespace_iter);
1198 	ret = bkey_err(k);
1199 	if (ret)
1200 		goto err;
1201 
1202 	*end = bkey_min(k.k->p, *end);
1203 
1204 	if (fsck_err_on(k.k->type != KEY_TYPE_set,
1205 			c, freespace_hole_missing,
1206 			"hole in alloc btree missing in freespace btree\n"
1207 			"  device %llu buckets %llu-%llu",
1208 			freespace_iter->pos.inode,
1209 			freespace_iter->pos.offset,
1210 			end->offset)) {
1211 		struct bkey_i *update =
1212 			bch2_trans_kmalloc(trans, sizeof(*update));
1213 
1214 		ret = PTR_ERR_OR_ZERO(update);
1215 		if (ret)
1216 			goto err;
1217 
1218 		bkey_init(&update->k);
1219 		update->k.type	= KEY_TYPE_set;
1220 		update->k.p	= freespace_iter->pos;
1221 		bch2_key_resize(&update->k,
1222 				min_t(u64, U32_MAX, end->offset -
1223 				      freespace_iter->pos.offset));
1224 
1225 		ret = bch2_trans_update(trans, freespace_iter, update, 0);
1226 		if (ret)
1227 			goto err;
1228 	}
1229 err:
1230 fsck_err:
1231 	printbuf_exit(&buf);
1232 	return ret;
1233 }
1234 
1235 static noinline_for_stack
1236 int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans,
1237 				      struct bpos start,
1238 				      struct bpos *end,
1239 				      struct btree_iter *bucket_gens_iter)
1240 {
1241 	struct bch_fs *c = trans->c;
1242 	struct bkey_s_c k;
1243 	struct printbuf buf = PRINTBUF;
1244 	unsigned i, gens_offset, gens_end_offset;
1245 	int ret;
1246 
1247 	bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(start, &gens_offset));
1248 
1249 	k = bch2_btree_iter_peek_slot(bucket_gens_iter);
1250 	ret = bkey_err(k);
1251 	if (ret)
1252 		goto err;
1253 
1254 	if (bkey_cmp(alloc_gens_pos(start, &gens_offset),
1255 		     alloc_gens_pos(*end,  &gens_end_offset)))
1256 		gens_end_offset = KEY_TYPE_BUCKET_GENS_NR;
1257 
1258 	if (k.k->type == KEY_TYPE_bucket_gens) {
1259 		struct bkey_i_bucket_gens g;
1260 		bool need_update = false;
1261 
1262 		bkey_reassemble(&g.k_i, k);
1263 
1264 		for (i = gens_offset; i < gens_end_offset; i++) {
1265 			if (fsck_err_on(g.v.gens[i], c,
1266 					bucket_gens_hole_wrong,
1267 					"hole in alloc btree at %llu:%llu with nonzero gen in bucket_gens btree (%u)",
1268 					bucket_gens_pos_to_alloc(k.k->p, i).inode,
1269 					bucket_gens_pos_to_alloc(k.k->p, i).offset,
1270 					g.v.gens[i])) {
1271 				g.v.gens[i] = 0;
1272 				need_update = true;
1273 			}
1274 		}
1275 
1276 		if (need_update) {
1277 			struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g));
1278 
1279 			ret = PTR_ERR_OR_ZERO(u);
1280 			if (ret)
1281 				goto err;
1282 
1283 			memcpy(u, &g, sizeof(g));
1284 
1285 			ret = bch2_trans_update(trans, bucket_gens_iter, u, 0);
1286 			if (ret)
1287 				goto err;
1288 		}
1289 	}
1290 
1291 	*end = bkey_min(*end, bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0));
1292 err:
1293 fsck_err:
1294 	printbuf_exit(&buf);
1295 	return ret;
1296 }
1297 
1298 static noinline_for_stack int bch2_check_discard_freespace_key(struct btree_trans *trans,
1299 					      struct btree_iter *iter)
1300 {
1301 	struct bch_fs *c = trans->c;
1302 	struct btree_iter alloc_iter;
1303 	struct bkey_s_c alloc_k;
1304 	struct bch_alloc_v4 a_convert;
1305 	const struct bch_alloc_v4 *a;
1306 	u64 genbits;
1307 	struct bpos pos;
1308 	enum bch_data_type state = iter->btree_id == BTREE_ID_need_discard
1309 		? BCH_DATA_need_discard
1310 		: BCH_DATA_free;
1311 	struct printbuf buf = PRINTBUF;
1312 	int ret;
1313 
1314 	pos = iter->pos;
1315 	pos.offset &= ~(~0ULL << 56);
1316 	genbits = iter->pos.offset & (~0ULL << 56);
1317 
1318 	alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc, pos, 0);
1319 	ret = bkey_err(alloc_k);
1320 	if (ret)
1321 		return ret;
1322 
1323 	if (fsck_err_on(!bch2_dev_bucket_exists(c, pos), c,
1324 			need_discard_freespace_key_to_invalid_dev_bucket,
1325 			"entry in %s btree for nonexistant dev:bucket %llu:%llu",
1326 			bch2_btree_id_str(iter->btree_id), pos.inode, pos.offset))
1327 		goto delete;
1328 
1329 	a = bch2_alloc_to_v4(alloc_k, &a_convert);
1330 
1331 	if (fsck_err_on(a->data_type != state ||
1332 			(state == BCH_DATA_free &&
1333 			 genbits != alloc_freespace_genbits(*a)), c,
1334 			need_discard_freespace_key_bad,
1335 			"%s\n  incorrectly set at %s:%llu:%llu:0 (free %u, genbits %llu should be %llu)",
1336 			(bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
1337 			bch2_btree_id_str(iter->btree_id),
1338 			iter->pos.inode,
1339 			iter->pos.offset,
1340 			a->data_type == state,
1341 			genbits >> 56, alloc_freespace_genbits(*a) >> 56))
1342 		goto delete;
1343 out:
1344 fsck_err:
1345 	bch2_set_btree_iter_dontneed(&alloc_iter);
1346 	bch2_trans_iter_exit(trans, &alloc_iter);
1347 	printbuf_exit(&buf);
1348 	return ret;
1349 delete:
1350 	ret =   bch2_btree_delete_extent_at(trans, iter,
1351 			iter->btree_id == BTREE_ID_freespace ? 1 : 0, 0) ?:
1352 		bch2_trans_commit(trans, NULL, NULL,
1353 			BCH_TRANS_COMMIT_no_enospc);
1354 	goto out;
1355 }
1356 
1357 /*
1358  * We've already checked that generation numbers in the bucket_gens btree are
1359  * valid for buckets that exist; this just checks for keys for nonexistent
1360  * buckets.
1361  */
1362 static noinline_for_stack
1363 int bch2_check_bucket_gens_key(struct btree_trans *trans,
1364 			       struct btree_iter *iter,
1365 			       struct bkey_s_c k)
1366 {
1367 	struct bch_fs *c = trans->c;
1368 	struct bkey_i_bucket_gens g;
1369 	u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
1370 	u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
1371 	u64 b;
1372 	bool need_update = false;
1373 	struct printbuf buf = PRINTBUF;
1374 	int ret = 0;
1375 
1376 	BUG_ON(k.k->type != KEY_TYPE_bucket_gens);
1377 	bkey_reassemble(&g.k_i, k);
1378 
1379 	struct bch_dev *ca = bch2_dev_tryget_noerror(c, k.k->p.inode);
1380 	if (!ca) {
1381 		if (fsck_err(c, bucket_gens_to_invalid_dev,
1382 			     "bucket_gens key for invalid device:\n  %s",
1383 			     (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
1384 			ret = bch2_btree_delete_at(trans, iter, 0);
1385 		goto out;
1386 	}
1387 
1388 	if (fsck_err_on(end <= ca->mi.first_bucket ||
1389 			start >= ca->mi.nbuckets, c,
1390 			bucket_gens_to_invalid_buckets,
1391 			"bucket_gens key for invalid buckets:\n  %s",
1392 			(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1393 		ret = bch2_btree_delete_at(trans, iter, 0);
1394 		goto out;
1395 	}
1396 
1397 	for (b = start; b < ca->mi.first_bucket; b++)
1398 		if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], c,
1399 				bucket_gens_nonzero_for_invalid_buckets,
1400 				"bucket_gens key has nonzero gen for invalid bucket")) {
1401 			g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
1402 			need_update = true;
1403 		}
1404 
1405 	for (b = ca->mi.nbuckets; b < end; b++)
1406 		if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], c,
1407 				bucket_gens_nonzero_for_invalid_buckets,
1408 				"bucket_gens key has nonzero gen for invalid bucket")) {
1409 			g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
1410 			need_update = true;
1411 		}
1412 
1413 	if (need_update) {
1414 		struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g));
1415 
1416 		ret = PTR_ERR_OR_ZERO(u);
1417 		if (ret)
1418 			goto out;
1419 
1420 		memcpy(u, &g, sizeof(g));
1421 		ret = bch2_trans_update(trans, iter, u, 0);
1422 	}
1423 out:
1424 fsck_err:
1425 	bch2_dev_put(ca);
1426 	printbuf_exit(&buf);
1427 	return ret;
1428 }
1429 
1430 int bch2_check_alloc_info(struct bch_fs *c)
1431 {
1432 	struct btree_trans *trans = bch2_trans_get(c);
1433 	struct btree_iter iter, discard_iter, freespace_iter, bucket_gens_iter;
1434 	struct bch_dev *ca = NULL;
1435 	struct bkey hole;
1436 	struct bkey_s_c k;
1437 	int ret = 0;
1438 
1439 	bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS_MIN,
1440 			     BTREE_ITER_prefetch);
1441 	bch2_trans_iter_init(trans, &discard_iter, BTREE_ID_need_discard, POS_MIN,
1442 			     BTREE_ITER_prefetch);
1443 	bch2_trans_iter_init(trans, &freespace_iter, BTREE_ID_freespace, POS_MIN,
1444 			     BTREE_ITER_prefetch);
1445 	bch2_trans_iter_init(trans, &bucket_gens_iter, BTREE_ID_bucket_gens, POS_MIN,
1446 			     BTREE_ITER_prefetch);
1447 
1448 	while (1) {
1449 		struct bpos next;
1450 
1451 		bch2_trans_begin(trans);
1452 
1453 		k = bch2_get_key_or_real_bucket_hole(&iter, &ca, &hole);
1454 		ret = bkey_err(k);
1455 		if (ret)
1456 			goto bkey_err;
1457 
1458 		if (!k.k)
1459 			break;
1460 
1461 		if (k.k->type) {
1462 			next = bpos_nosnap_successor(k.k->p);
1463 
1464 			ret = bch2_check_alloc_key(trans,
1465 						   k, &iter,
1466 						   &discard_iter,
1467 						   &freespace_iter,
1468 						   &bucket_gens_iter);
1469 			if (ret)
1470 				goto bkey_err;
1471 		} else {
1472 			next = k.k->p;
1473 
1474 			ret = bch2_check_alloc_hole_freespace(trans, ca,
1475 						    bkey_start_pos(k.k),
1476 						    &next,
1477 						    &freespace_iter) ?:
1478 				bch2_check_alloc_hole_bucket_gens(trans,
1479 						    bkey_start_pos(k.k),
1480 						    &next,
1481 						    &bucket_gens_iter);
1482 			if (ret)
1483 				goto bkey_err;
1484 		}
1485 
1486 		ret = bch2_trans_commit(trans, NULL, NULL,
1487 					BCH_TRANS_COMMIT_no_enospc);
1488 		if (ret)
1489 			goto bkey_err;
1490 
1491 		bch2_btree_iter_set_pos(&iter, next);
1492 bkey_err:
1493 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1494 			continue;
1495 		if (ret)
1496 			break;
1497 	}
1498 	bch2_trans_iter_exit(trans, &bucket_gens_iter);
1499 	bch2_trans_iter_exit(trans, &freespace_iter);
1500 	bch2_trans_iter_exit(trans, &discard_iter);
1501 	bch2_trans_iter_exit(trans, &iter);
1502 	bch2_dev_put(ca);
1503 	ca = NULL;
1504 
1505 	if (ret < 0)
1506 		goto err;
1507 
1508 	ret = for_each_btree_key(trans, iter,
1509 			BTREE_ID_need_discard, POS_MIN,
1510 			BTREE_ITER_prefetch, k,
1511 		bch2_check_discard_freespace_key(trans, &iter));
1512 	if (ret)
1513 		goto err;
1514 
1515 	bch2_trans_iter_init(trans, &iter, BTREE_ID_freespace, POS_MIN,
1516 			     BTREE_ITER_prefetch);
1517 	while (1) {
1518 		bch2_trans_begin(trans);
1519 		k = bch2_btree_iter_peek(&iter);
1520 		if (!k.k)
1521 			break;
1522 
1523 		ret = bkey_err(k) ?:
1524 			bch2_check_discard_freespace_key(trans, &iter);
1525 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
1526 			ret = 0;
1527 			continue;
1528 		}
1529 		if (ret) {
1530 			struct printbuf buf = PRINTBUF;
1531 			bch2_bkey_val_to_text(&buf, c, k);
1532 
1533 			bch_err(c, "while checking %s", buf.buf);
1534 			printbuf_exit(&buf);
1535 			break;
1536 		}
1537 
1538 		bch2_btree_iter_set_pos(&iter, bpos_nosnap_successor(iter.pos));
1539 	}
1540 	bch2_trans_iter_exit(trans, &iter);
1541 	if (ret)
1542 		goto err;
1543 
1544 	ret = for_each_btree_key_commit(trans, iter,
1545 			BTREE_ID_bucket_gens, POS_MIN,
1546 			BTREE_ITER_prefetch, k,
1547 			NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1548 		bch2_check_bucket_gens_key(trans, &iter, k));
1549 err:
1550 	bch2_trans_put(trans);
1551 	bch_err_fn(c, ret);
1552 	return ret;
1553 }
1554 
1555 static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
1556 				       struct btree_iter *alloc_iter)
1557 {
1558 	struct bch_fs *c = trans->c;
1559 	struct btree_iter lru_iter;
1560 	struct bch_alloc_v4 a_convert;
1561 	const struct bch_alloc_v4 *a;
1562 	struct bkey_s_c alloc_k, lru_k;
1563 	struct printbuf buf = PRINTBUF;
1564 	int ret;
1565 
1566 	alloc_k = bch2_btree_iter_peek(alloc_iter);
1567 	if (!alloc_k.k)
1568 		return 0;
1569 
1570 	ret = bkey_err(alloc_k);
1571 	if (ret)
1572 		return ret;
1573 
1574 	a = bch2_alloc_to_v4(alloc_k, &a_convert);
1575 
1576 	if (a->data_type != BCH_DATA_cached)
1577 		return 0;
1578 
1579 	if (fsck_err_on(!a->io_time[READ], c,
1580 			alloc_key_cached_but_read_time_zero,
1581 			"cached bucket with read_time 0\n"
1582 			"  %s",
1583 		(printbuf_reset(&buf),
1584 		 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
1585 		struct bkey_i_alloc_v4 *a_mut =
1586 			bch2_alloc_to_v4_mut(trans, alloc_k);
1587 		ret = PTR_ERR_OR_ZERO(a_mut);
1588 		if (ret)
1589 			goto err;
1590 
1591 		a_mut->v.io_time[READ] = bch2_current_io_time(c, READ);
1592 		ret = bch2_trans_update(trans, alloc_iter,
1593 					&a_mut->k_i, BTREE_TRIGGER_norun);
1594 		if (ret)
1595 			goto err;
1596 
1597 		a = &a_mut->v;
1598 	}
1599 
1600 	lru_k = bch2_bkey_get_iter(trans, &lru_iter, BTREE_ID_lru,
1601 			     lru_pos(alloc_k.k->p.inode,
1602 				     bucket_to_u64(alloc_k.k->p),
1603 				     a->io_time[READ]), 0);
1604 	ret = bkey_err(lru_k);
1605 	if (ret)
1606 		return ret;
1607 
1608 	if (fsck_err_on(lru_k.k->type != KEY_TYPE_set, c,
1609 			alloc_key_to_missing_lru_entry,
1610 			"missing lru entry\n"
1611 			"  %s",
1612 			(printbuf_reset(&buf),
1613 			 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
1614 		ret = bch2_lru_set(trans,
1615 				   alloc_k.k->p.inode,
1616 				   bucket_to_u64(alloc_k.k->p),
1617 				   a->io_time[READ]);
1618 		if (ret)
1619 			goto err;
1620 	}
1621 err:
1622 fsck_err:
1623 	bch2_trans_iter_exit(trans, &lru_iter);
1624 	printbuf_exit(&buf);
1625 	return ret;
1626 }
1627 
1628 int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
1629 {
1630 	int ret = bch2_trans_run(c,
1631 		for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
1632 				POS_MIN, BTREE_ITER_prefetch, k,
1633 				NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1634 			bch2_check_alloc_to_lru_ref(trans, &iter)));
1635 	bch_err_fn(c, ret);
1636 	return ret;
1637 }
1638 
1639 static int discard_in_flight_add(struct bch_dev *ca, u64 bucket, bool in_progress)
1640 {
1641 	int ret;
1642 
1643 	mutex_lock(&ca->discard_buckets_in_flight_lock);
1644 	darray_for_each(ca->discard_buckets_in_flight, i)
1645 		if (i->bucket == bucket) {
1646 			ret = -BCH_ERR_EEXIST_discard_in_flight_add;
1647 			goto out;
1648 		}
1649 
1650 	ret = darray_push(&ca->discard_buckets_in_flight, ((struct discard_in_flight) {
1651 			   .in_progress = in_progress,
1652 			   .bucket	= bucket,
1653 	}));
1654 out:
1655 	mutex_unlock(&ca->discard_buckets_in_flight_lock);
1656 	return ret;
1657 }
1658 
1659 static void discard_in_flight_remove(struct bch_dev *ca, u64 bucket)
1660 {
1661 	mutex_lock(&ca->discard_buckets_in_flight_lock);
1662 	darray_for_each(ca->discard_buckets_in_flight, i)
1663 		if (i->bucket == bucket) {
1664 			BUG_ON(!i->in_progress);
1665 			darray_remove_item(&ca->discard_buckets_in_flight, i);
1666 			goto found;
1667 		}
1668 	BUG();
1669 found:
1670 	mutex_unlock(&ca->discard_buckets_in_flight_lock);
1671 }
1672 
1673 struct discard_buckets_state {
1674 	u64		seen;
1675 	u64		open;
1676 	u64		need_journal_commit;
1677 	u64		discarded;
1678 	u64		need_journal_commit_this_dev;
1679 };
1680 
1681 static int bch2_discard_one_bucket(struct btree_trans *trans,
1682 				   struct bch_dev *ca,
1683 				   struct btree_iter *need_discard_iter,
1684 				   struct bpos *discard_pos_done,
1685 				   struct discard_buckets_state *s)
1686 {
1687 	struct bch_fs *c = trans->c;
1688 	struct bpos pos = need_discard_iter->pos;
1689 	struct btree_iter iter = { NULL };
1690 	struct bkey_s_c k;
1691 	struct bkey_i_alloc_v4 *a;
1692 	struct printbuf buf = PRINTBUF;
1693 	bool discard_locked = false;
1694 	int ret = 0;
1695 
1696 	if (bch2_bucket_is_open_safe(c, pos.inode, pos.offset)) {
1697 		s->open++;
1698 		goto out;
1699 	}
1700 
1701 	if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
1702 			c->journal.flushed_seq_ondisk,
1703 			pos.inode, pos.offset)) {
1704 		s->need_journal_commit++;
1705 		s->need_journal_commit_this_dev++;
1706 		goto out;
1707 	}
1708 
1709 	k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc,
1710 			       need_discard_iter->pos,
1711 			       BTREE_ITER_cached);
1712 	ret = bkey_err(k);
1713 	if (ret)
1714 		goto out;
1715 
1716 	a = bch2_alloc_to_v4_mut(trans, k);
1717 	ret = PTR_ERR_OR_ZERO(a);
1718 	if (ret)
1719 		goto out;
1720 
1721 	if (bch2_bucket_sectors_total(a->v)) {
1722 		if (bch2_trans_inconsistent_on(c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info,
1723 					       trans, "attempting to discard bucket with dirty data\n%s",
1724 					       (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
1725 			ret = -EIO;
1726 		goto out;
1727 	}
1728 
1729 	if (a->v.data_type != BCH_DATA_need_discard) {
1730 		if (data_type_is_empty(a->v.data_type) &&
1731 		    BCH_ALLOC_V4_NEED_INC_GEN(&a->v)) {
1732 			a->v.gen++;
1733 			SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
1734 			goto write;
1735 		}
1736 
1737 		if (bch2_trans_inconsistent_on(c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info,
1738 					       trans, "bucket incorrectly set in need_discard btree\n"
1739 					       "%s",
1740 					       (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
1741 			ret = -EIO;
1742 		goto out;
1743 	}
1744 
1745 	if (a->v.journal_seq > c->journal.flushed_seq_ondisk) {
1746 		if (bch2_trans_inconsistent_on(c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info,
1747 					       trans, "clearing need_discard but journal_seq %llu > flushed_seq %llu\n%s",
1748 					       a->v.journal_seq,
1749 					       c->journal.flushed_seq_ondisk,
1750 					       (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
1751 			ret = -EIO;
1752 		goto out;
1753 	}
1754 
1755 	if (discard_in_flight_add(ca, iter.pos.offset, true))
1756 		goto out;
1757 
1758 	discard_locked = true;
1759 
1760 	if (!bkey_eq(*discard_pos_done, iter.pos) &&
1761 	    ca->mi.discard && !c->opts.nochanges) {
1762 		/*
1763 		 * This works without any other locks because this is the only
1764 		 * thread that removes items from the need_discard tree
1765 		 */
1766 		bch2_trans_unlock_long(trans);
1767 		blkdev_issue_discard(ca->disk_sb.bdev,
1768 				     k.k->p.offset * ca->mi.bucket_size,
1769 				     ca->mi.bucket_size,
1770 				     GFP_KERNEL);
1771 		*discard_pos_done = iter.pos;
1772 
1773 		ret = bch2_trans_relock_notrace(trans);
1774 		if (ret)
1775 			goto out;
1776 	}
1777 
1778 	SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false);
1779 write:
1780 	alloc_data_type_set(&a->v, a->v.data_type);
1781 
1782 	ret =   bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
1783 		bch2_trans_commit(trans, NULL, NULL,
1784 				  BCH_WATERMARK_btree|
1785 				  BCH_TRANS_COMMIT_no_enospc);
1786 	if (ret)
1787 		goto out;
1788 
1789 	count_event(c, bucket_discard);
1790 	s->discarded++;
1791 out:
1792 	if (discard_locked)
1793 		discard_in_flight_remove(ca, iter.pos.offset);
1794 	s->seen++;
1795 	bch2_trans_iter_exit(trans, &iter);
1796 	printbuf_exit(&buf);
1797 	return ret;
1798 }
1799 
1800 static void bch2_do_discards_work(struct work_struct *work)
1801 {
1802 	struct bch_dev *ca = container_of(work, struct bch_dev, discard_work);
1803 	struct bch_fs *c = ca->fs;
1804 	struct discard_buckets_state s = {};
1805 	struct bpos discard_pos_done = POS_MAX;
1806 	int ret;
1807 
1808 	/*
1809 	 * We're doing the commit in bch2_discard_one_bucket instead of using
1810 	 * for_each_btree_key_commit() so that we can increment counters after
1811 	 * successful commit:
1812 	 */
1813 	ret = bch2_trans_run(c,
1814 		for_each_btree_key_upto(trans, iter,
1815 				   BTREE_ID_need_discard,
1816 				   POS(ca->dev_idx, 0),
1817 				   POS(ca->dev_idx, U64_MAX), 0, k,
1818 			bch2_discard_one_bucket(trans, ca, &iter, &discard_pos_done, &s)));
1819 
1820 	trace_discard_buckets(c, s.seen, s.open, s.need_journal_commit, s.discarded,
1821 			      bch2_err_str(ret));
1822 
1823 	bch2_write_ref_put(c, BCH_WRITE_REF_discard);
1824 	percpu_ref_put(&ca->io_ref);
1825 }
1826 
1827 void bch2_dev_do_discards(struct bch_dev *ca)
1828 {
1829 	struct bch_fs *c = ca->fs;
1830 
1831 	if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE))
1832 		return;
1833 
1834 	if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_discard))
1835 		goto put_ioref;
1836 
1837 	if (queue_work(c->write_ref_wq, &ca->discard_work))
1838 		return;
1839 
1840 	bch2_write_ref_put(c, BCH_WRITE_REF_discard);
1841 put_ioref:
1842 	percpu_ref_put(&ca->io_ref);
1843 }
1844 
1845 void bch2_do_discards(struct bch_fs *c)
1846 {
1847 	for_each_member_device(c, ca)
1848 		bch2_dev_do_discards(ca);
1849 }
1850 
1851 static int bch2_clear_bucket_needs_discard(struct btree_trans *trans, struct bpos bucket)
1852 {
1853 	struct btree_iter iter;
1854 	bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, bucket, BTREE_ITER_intent);
1855 	struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter);
1856 	int ret = bkey_err(k);
1857 	if (ret)
1858 		goto err;
1859 
1860 	struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut(trans, k);
1861 	ret = PTR_ERR_OR_ZERO(a);
1862 	if (ret)
1863 		goto err;
1864 
1865 	BUG_ON(a->v.dirty_sectors);
1866 	SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false);
1867 	alloc_data_type_set(&a->v, a->v.data_type);
1868 
1869 	ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
1870 err:
1871 	bch2_trans_iter_exit(trans, &iter);
1872 	return ret;
1873 }
1874 
1875 static void bch2_do_discards_fast_work(struct work_struct *work)
1876 {
1877 	struct bch_dev *ca = container_of(work, struct bch_dev, discard_fast_work);
1878 	struct bch_fs *c = ca->fs;
1879 
1880 	while (1) {
1881 		bool got_bucket = false;
1882 		u64 bucket;
1883 
1884 		mutex_lock(&ca->discard_buckets_in_flight_lock);
1885 		darray_for_each(ca->discard_buckets_in_flight, i) {
1886 			if (i->in_progress)
1887 				continue;
1888 
1889 			got_bucket = true;
1890 			bucket = i->bucket;
1891 			i->in_progress = true;
1892 			break;
1893 		}
1894 		mutex_unlock(&ca->discard_buckets_in_flight_lock);
1895 
1896 		if (!got_bucket)
1897 			break;
1898 
1899 		if (ca->mi.discard && !c->opts.nochanges)
1900 			blkdev_issue_discard(ca->disk_sb.bdev,
1901 					     bucket_to_sector(ca, bucket),
1902 					     ca->mi.bucket_size,
1903 					     GFP_KERNEL);
1904 
1905 		int ret = bch2_trans_do(c, NULL, NULL,
1906 			BCH_WATERMARK_btree|
1907 			BCH_TRANS_COMMIT_no_enospc,
1908 			bch2_clear_bucket_needs_discard(trans, POS(ca->dev_idx, bucket)));
1909 		bch_err_fn(c, ret);
1910 
1911 		discard_in_flight_remove(ca, bucket);
1912 
1913 		if (ret)
1914 			break;
1915 	}
1916 
1917 	bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast);
1918 	percpu_ref_put(&ca->io_ref);
1919 }
1920 
1921 static void bch2_discard_one_bucket_fast(struct bch_dev *ca, u64 bucket)
1922 {
1923 	struct bch_fs *c = ca->fs;
1924 
1925 	if (discard_in_flight_add(ca, bucket, false))
1926 		return;
1927 
1928 	if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE))
1929 		return;
1930 
1931 	if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_discard_fast))
1932 		goto put_ioref;
1933 
1934 	if (queue_work(c->write_ref_wq, &ca->discard_fast_work))
1935 		return;
1936 
1937 	bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast);
1938 put_ioref:
1939 	percpu_ref_put(&ca->io_ref);
1940 }
1941 
1942 static int invalidate_one_bucket(struct btree_trans *trans,
1943 				 struct btree_iter *lru_iter,
1944 				 struct bkey_s_c lru_k,
1945 				 s64 *nr_to_invalidate)
1946 {
1947 	struct bch_fs *c = trans->c;
1948 	struct bkey_i_alloc_v4 *a = NULL;
1949 	struct printbuf buf = PRINTBUF;
1950 	struct bpos bucket = u64_to_bucket(lru_k.k->p.offset);
1951 	unsigned cached_sectors;
1952 	int ret = 0;
1953 
1954 	if (*nr_to_invalidate <= 0)
1955 		return 1;
1956 
1957 	if (!bch2_dev_bucket_exists(c, bucket)) {
1958 		prt_str(&buf, "lru entry points to invalid bucket");
1959 		goto err;
1960 	}
1961 
1962 	if (bch2_bucket_is_open_safe(c, bucket.inode, bucket.offset))
1963 		return 0;
1964 
1965 	a = bch2_trans_start_alloc_update(trans, bucket);
1966 	ret = PTR_ERR_OR_ZERO(a);
1967 	if (ret)
1968 		goto out;
1969 
1970 	/* We expect harmless races here due to the btree write buffer: */
1971 	if (lru_pos_time(lru_iter->pos) != alloc_lru_idx_read(a->v))
1972 		goto out;
1973 
1974 	BUG_ON(a->v.data_type != BCH_DATA_cached);
1975 	BUG_ON(a->v.dirty_sectors);
1976 
1977 	if (!a->v.cached_sectors)
1978 		bch_err(c, "invalidating empty bucket, confused");
1979 
1980 	cached_sectors = a->v.cached_sectors;
1981 
1982 	SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
1983 	a->v.gen++;
1984 	a->v.data_type		= 0;
1985 	a->v.dirty_sectors	= 0;
1986 	a->v.cached_sectors	= 0;
1987 	a->v.io_time[READ]	= bch2_current_io_time(c, READ);
1988 	a->v.io_time[WRITE]	= bch2_current_io_time(c, WRITE);
1989 
1990 	ret = bch2_trans_commit(trans, NULL, NULL,
1991 				BCH_WATERMARK_btree|
1992 				BCH_TRANS_COMMIT_no_enospc);
1993 	if (ret)
1994 		goto out;
1995 
1996 	trace_and_count(c, bucket_invalidate, c, bucket.inode, bucket.offset, cached_sectors);
1997 	--*nr_to_invalidate;
1998 out:
1999 	printbuf_exit(&buf);
2000 	return ret;
2001 err:
2002 	prt_str(&buf, "\n  lru key: ");
2003 	bch2_bkey_val_to_text(&buf, c, lru_k);
2004 
2005 	prt_str(&buf, "\n  lru entry: ");
2006 	bch2_lru_pos_to_text(&buf, lru_iter->pos);
2007 
2008 	prt_str(&buf, "\n  alloc key: ");
2009 	if (!a)
2010 		bch2_bpos_to_text(&buf, bucket);
2011 	else
2012 		bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i));
2013 
2014 	bch_err(c, "%s", buf.buf);
2015 	if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_lrus) {
2016 		bch2_inconsistent_error(c);
2017 		ret = -EINVAL;
2018 	}
2019 
2020 	goto out;
2021 }
2022 
2023 static struct bkey_s_c next_lru_key(struct btree_trans *trans, struct btree_iter *iter,
2024 				    struct bch_dev *ca, bool *wrapped)
2025 {
2026 	struct bkey_s_c k;
2027 again:
2028 	k = bch2_btree_iter_peek_upto(iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX));
2029 	if (!k.k && !*wrapped) {
2030 		bch2_btree_iter_set_pos(iter, lru_pos(ca->dev_idx, 0, 0));
2031 		*wrapped = true;
2032 		goto again;
2033 	}
2034 
2035 	return k;
2036 }
2037 
2038 static void bch2_do_invalidates_work(struct work_struct *work)
2039 {
2040 	struct bch_dev *ca = container_of(work, struct bch_dev, invalidate_work);
2041 	struct bch_fs *c = ca->fs;
2042 	struct btree_trans *trans = bch2_trans_get(c);
2043 	int ret = 0;
2044 
2045 	ret = bch2_btree_write_buffer_tryflush(trans);
2046 	if (ret)
2047 		goto err;
2048 
2049 	s64 nr_to_invalidate =
2050 		should_invalidate_buckets(ca, bch2_dev_usage_read(ca));
2051 	struct btree_iter iter;
2052 	bool wrapped = false;
2053 
2054 	bch2_trans_iter_init(trans, &iter, BTREE_ID_lru,
2055 			     lru_pos(ca->dev_idx, 0,
2056 				     ((bch2_current_io_time(c, READ) + U32_MAX) &
2057 				      LRU_TIME_MAX)), 0);
2058 
2059 	while (true) {
2060 		bch2_trans_begin(trans);
2061 
2062 		struct bkey_s_c k = next_lru_key(trans, &iter, ca, &wrapped);
2063 		ret = bkey_err(k);
2064 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
2065 			continue;
2066 		if (ret)
2067 			break;
2068 		if (!k.k)
2069 			break;
2070 
2071 		ret = invalidate_one_bucket(trans, &iter, k, &nr_to_invalidate);
2072 		if (ret)
2073 			break;
2074 
2075 		bch2_btree_iter_advance(&iter);
2076 	}
2077 	bch2_trans_iter_exit(trans, &iter);
2078 err:
2079 	bch2_trans_put(trans);
2080 	bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
2081 	percpu_ref_put(&ca->io_ref);
2082 }
2083 
2084 void bch2_dev_do_invalidates(struct bch_dev *ca)
2085 {
2086 	struct bch_fs *c = ca->fs;
2087 
2088 	if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE))
2089 		return;
2090 
2091 	if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_invalidate))
2092 		goto put_ioref;
2093 
2094 	if (queue_work(c->write_ref_wq, &ca->invalidate_work))
2095 		return;
2096 
2097 	bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
2098 put_ioref:
2099 	percpu_ref_put(&ca->io_ref);
2100 }
2101 
2102 void bch2_do_invalidates(struct bch_fs *c)
2103 {
2104 	for_each_member_device(c, ca)
2105 		bch2_dev_do_invalidates(ca);
2106 }
2107 
2108 int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
2109 			    u64 bucket_start, u64 bucket_end)
2110 {
2111 	struct btree_trans *trans = bch2_trans_get(c);
2112 	struct btree_iter iter;
2113 	struct bkey_s_c k;
2114 	struct bkey hole;
2115 	struct bpos end = POS(ca->dev_idx, bucket_end);
2116 	struct bch_member *m;
2117 	unsigned long last_updated = jiffies;
2118 	int ret;
2119 
2120 	BUG_ON(bucket_start > bucket_end);
2121 	BUG_ON(bucket_end > ca->mi.nbuckets);
2122 
2123 	bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
2124 		POS(ca->dev_idx, max_t(u64, ca->mi.first_bucket, bucket_start)),
2125 		BTREE_ITER_prefetch);
2126 	/*
2127 	 * Scan the alloc btree for every bucket on @ca, and add buckets to the
2128 	 * freespace/need_discard/need_gc_gens btrees as needed:
2129 	 */
2130 	while (1) {
2131 		if (last_updated + HZ * 10 < jiffies) {
2132 			bch_info(ca, "%s: currently at %llu/%llu",
2133 				 __func__, iter.pos.offset, ca->mi.nbuckets);
2134 			last_updated = jiffies;
2135 		}
2136 
2137 		bch2_trans_begin(trans);
2138 
2139 		if (bkey_ge(iter.pos, end)) {
2140 			ret = 0;
2141 			break;
2142 		}
2143 
2144 		k = bch2_get_key_or_hole(&iter, end, &hole);
2145 		ret = bkey_err(k);
2146 		if (ret)
2147 			goto bkey_err;
2148 
2149 		if (k.k->type) {
2150 			/*
2151 			 * We process live keys in the alloc btree one at a
2152 			 * time:
2153 			 */
2154 			struct bch_alloc_v4 a_convert;
2155 			const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
2156 
2157 			ret =   bch2_bucket_do_index(trans, ca, k, a, true) ?:
2158 				bch2_trans_commit(trans, NULL, NULL,
2159 						  BCH_TRANS_COMMIT_no_enospc);
2160 			if (ret)
2161 				goto bkey_err;
2162 
2163 			bch2_btree_iter_advance(&iter);
2164 		} else {
2165 			struct bkey_i *freespace;
2166 
2167 			freespace = bch2_trans_kmalloc(trans, sizeof(*freespace));
2168 			ret = PTR_ERR_OR_ZERO(freespace);
2169 			if (ret)
2170 				goto bkey_err;
2171 
2172 			bkey_init(&freespace->k);
2173 			freespace->k.type	= KEY_TYPE_set;
2174 			freespace->k.p		= k.k->p;
2175 			freespace->k.size	= k.k->size;
2176 
2177 			ret = bch2_btree_insert_trans(trans, BTREE_ID_freespace, freespace, 0) ?:
2178 				bch2_trans_commit(trans, NULL, NULL,
2179 						  BCH_TRANS_COMMIT_no_enospc);
2180 			if (ret)
2181 				goto bkey_err;
2182 
2183 			bch2_btree_iter_set_pos(&iter, k.k->p);
2184 		}
2185 bkey_err:
2186 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
2187 			continue;
2188 		if (ret)
2189 			break;
2190 	}
2191 
2192 	bch2_trans_iter_exit(trans, &iter);
2193 	bch2_trans_put(trans);
2194 
2195 	if (ret < 0) {
2196 		bch_err_msg(ca, ret, "initializing free space");
2197 		return ret;
2198 	}
2199 
2200 	mutex_lock(&c->sb_lock);
2201 	m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
2202 	SET_BCH_MEMBER_FREESPACE_INITIALIZED(m, true);
2203 	mutex_unlock(&c->sb_lock);
2204 
2205 	return 0;
2206 }
2207 
2208 int bch2_fs_freespace_init(struct bch_fs *c)
2209 {
2210 	int ret = 0;
2211 	bool doing_init = false;
2212 
2213 	/*
2214 	 * We can crash during the device add path, so we need to check this on
2215 	 * every mount:
2216 	 */
2217 
2218 	for_each_member_device(c, ca) {
2219 		if (ca->mi.freespace_initialized)
2220 			continue;
2221 
2222 		if (!doing_init) {
2223 			bch_info(c, "initializing freespace");
2224 			doing_init = true;
2225 		}
2226 
2227 		ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets);
2228 		if (ret) {
2229 			bch2_dev_put(ca);
2230 			bch_err_fn(c, ret);
2231 			return ret;
2232 		}
2233 	}
2234 
2235 	if (doing_init) {
2236 		mutex_lock(&c->sb_lock);
2237 		bch2_write_super(c);
2238 		mutex_unlock(&c->sb_lock);
2239 		bch_verbose(c, "done initializing freespace");
2240 	}
2241 
2242 	return 0;
2243 }
2244 
2245 /* Bucket IO clocks: */
2246 
2247 int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
2248 			      size_t bucket_nr, int rw)
2249 {
2250 	struct bch_fs *c = trans->c;
2251 	struct btree_iter iter;
2252 	struct bkey_i_alloc_v4 *a;
2253 	u64 now;
2254 	int ret = 0;
2255 
2256 	if (bch2_trans_relock(trans))
2257 		bch2_trans_begin(trans);
2258 
2259 	a = bch2_trans_start_alloc_update_noupdate(trans, &iter, POS(dev, bucket_nr));
2260 	ret = PTR_ERR_OR_ZERO(a);
2261 	if (ret)
2262 		return ret;
2263 
2264 	now = bch2_current_io_time(c, rw);
2265 	if (a->v.io_time[rw] == now)
2266 		goto out;
2267 
2268 	a->v.io_time[rw] = now;
2269 
2270 	ret   = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
2271 		bch2_trans_commit(trans, NULL, NULL, 0);
2272 out:
2273 	bch2_trans_iter_exit(trans, &iter);
2274 	return ret;
2275 }
2276 
2277 /* Startup/shutdown (ro/rw): */
2278 
2279 void bch2_recalc_capacity(struct bch_fs *c)
2280 {
2281 	u64 capacity = 0, reserved_sectors = 0, gc_reserve;
2282 	unsigned bucket_size_max = 0;
2283 	unsigned long ra_pages = 0;
2284 
2285 	lockdep_assert_held(&c->state_lock);
2286 
2287 	for_each_online_member(c, ca) {
2288 		struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi;
2289 
2290 		ra_pages += bdi->ra_pages;
2291 	}
2292 
2293 	bch2_set_ra_pages(c, ra_pages);
2294 
2295 	for_each_rw_member(c, ca) {
2296 		u64 dev_reserve = 0;
2297 
2298 		/*
2299 		 * We need to reserve buckets (from the number
2300 		 * of currently available buckets) against
2301 		 * foreground writes so that mainly copygc can
2302 		 * make forward progress.
2303 		 *
2304 		 * We need enough to refill the various reserves
2305 		 * from scratch - copygc will use its entire
2306 		 * reserve all at once, then run against when
2307 		 * its reserve is refilled (from the formerly
2308 		 * available buckets).
2309 		 *
2310 		 * This reserve is just used when considering if
2311 		 * allocations for foreground writes must wait -
2312 		 * not -ENOSPC calculations.
2313 		 */
2314 
2315 		dev_reserve += ca->nr_btree_reserve * 2;
2316 		dev_reserve += ca->mi.nbuckets >> 6; /* copygc reserve */
2317 
2318 		dev_reserve += 1;	/* btree write point */
2319 		dev_reserve += 1;	/* copygc write point */
2320 		dev_reserve += 1;	/* rebalance write point */
2321 
2322 		dev_reserve *= ca->mi.bucket_size;
2323 
2324 		capacity += bucket_to_sector(ca, ca->mi.nbuckets -
2325 					     ca->mi.first_bucket);
2326 
2327 		reserved_sectors += dev_reserve * 2;
2328 
2329 		bucket_size_max = max_t(unsigned, bucket_size_max,
2330 					ca->mi.bucket_size);
2331 	}
2332 
2333 	gc_reserve = c->opts.gc_reserve_bytes
2334 		? c->opts.gc_reserve_bytes >> 9
2335 		: div64_u64(capacity * c->opts.gc_reserve_percent, 100);
2336 
2337 	reserved_sectors = max(gc_reserve, reserved_sectors);
2338 
2339 	reserved_sectors = min(reserved_sectors, capacity);
2340 
2341 	c->capacity = capacity - reserved_sectors;
2342 
2343 	c->bucket_size_max = bucket_size_max;
2344 
2345 	/* Wake up case someone was waiting for buckets */
2346 	closure_wake_up(&c->freelist_wait);
2347 }
2348 
2349 u64 bch2_min_rw_member_capacity(struct bch_fs *c)
2350 {
2351 	u64 ret = U64_MAX;
2352 
2353 	for_each_rw_member(c, ca)
2354 		ret = min(ret, ca->mi.nbuckets * ca->mi.bucket_size);
2355 	return ret;
2356 }
2357 
2358 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
2359 {
2360 	struct open_bucket *ob;
2361 	bool ret = false;
2362 
2363 	for (ob = c->open_buckets;
2364 	     ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
2365 	     ob++) {
2366 		spin_lock(&ob->lock);
2367 		if (ob->valid && !ob->on_partial_list &&
2368 		    ob->dev == ca->dev_idx)
2369 			ret = true;
2370 		spin_unlock(&ob->lock);
2371 	}
2372 
2373 	return ret;
2374 }
2375 
2376 /* device goes ro: */
2377 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
2378 {
2379 	unsigned i;
2380 
2381 	/* First, remove device from allocation groups: */
2382 
2383 	for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
2384 		clear_bit(ca->dev_idx, c->rw_devs[i].d);
2385 
2386 	/*
2387 	 * Capacity is calculated based off of devices in allocation groups:
2388 	 */
2389 	bch2_recalc_capacity(c);
2390 
2391 	bch2_open_buckets_stop(c, ca, false);
2392 
2393 	/*
2394 	 * Wake up threads that were blocked on allocation, so they can notice
2395 	 * the device can no longer be removed and the capacity has changed:
2396 	 */
2397 	closure_wake_up(&c->freelist_wait);
2398 
2399 	/*
2400 	 * journal_res_get() can block waiting for free space in the journal -
2401 	 * it needs to notice there may not be devices to allocate from anymore:
2402 	 */
2403 	wake_up(&c->journal.wait);
2404 
2405 	/* Now wait for any in flight writes: */
2406 
2407 	closure_wait_event(&c->open_buckets_wait,
2408 			   !bch2_dev_has_open_write_point(c, ca));
2409 }
2410 
2411 /* device goes rw: */
2412 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
2413 {
2414 	unsigned i;
2415 
2416 	for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
2417 		if (ca->mi.data_allowed & (1 << i))
2418 			set_bit(ca->dev_idx, c->rw_devs[i].d);
2419 }
2420 
2421 void bch2_dev_allocator_background_exit(struct bch_dev *ca)
2422 {
2423 	darray_exit(&ca->discard_buckets_in_flight);
2424 }
2425 
2426 void bch2_dev_allocator_background_init(struct bch_dev *ca)
2427 {
2428 	mutex_init(&ca->discard_buckets_in_flight_lock);
2429 	INIT_WORK(&ca->discard_work, bch2_do_discards_work);
2430 	INIT_WORK(&ca->discard_fast_work, bch2_do_discards_fast_work);
2431 	INIT_WORK(&ca->invalidate_work, bch2_do_invalidates_work);
2432 }
2433 
2434 void bch2_fs_allocator_background_init(struct bch_fs *c)
2435 {
2436 	spin_lock_init(&c->freelist_lock);
2437 }
2438