xref: /linux/fs/bcachefs/alloc_background.c (revision ae22a94997b8a03dcb3c922857c203246711f9d4)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
5 #include "backpointers.h"
6 #include "btree_cache.h"
7 #include "btree_io.h"
8 #include "btree_key_cache.h"
9 #include "btree_update.h"
10 #include "btree_update_interior.h"
11 #include "btree_gc.h"
12 #include "btree_write_buffer.h"
13 #include "buckets.h"
14 #include "buckets_waiting_for_journal.h"
15 #include "clock.h"
16 #include "debug.h"
17 #include "ec.h"
18 #include "error.h"
19 #include "lru.h"
20 #include "recovery.h"
21 #include "trace.h"
22 #include "varint.h"
23 
24 #include <linux/kthread.h>
25 #include <linux/math64.h>
26 #include <linux/random.h>
27 #include <linux/rculist.h>
28 #include <linux/rcupdate.h>
29 #include <linux/sched/task.h>
30 #include <linux/sort.h>
31 
32 static void bch2_discard_one_bucket_fast(struct bch_fs *c, struct bpos bucket);
33 
34 /* Persistent alloc info: */
35 
36 static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = {
37 #define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8,
38 	BCH_ALLOC_FIELDS_V1()
39 #undef x
40 };
41 
42 struct bkey_alloc_unpacked {
43 	u64		journal_seq;
44 	u8		gen;
45 	u8		oldest_gen;
46 	u8		data_type;
47 	bool		need_discard:1;
48 	bool		need_inc_gen:1;
49 #define x(_name, _bits)	u##_bits _name;
50 	BCH_ALLOC_FIELDS_V2()
51 #undef  x
52 };
53 
54 static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
55 				     const void **p, unsigned field)
56 {
57 	unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
58 	u64 v;
59 
60 	if (!(a->fields & (1 << field)))
61 		return 0;
62 
63 	switch (bytes) {
64 	case 1:
65 		v = *((const u8 *) *p);
66 		break;
67 	case 2:
68 		v = le16_to_cpup(*p);
69 		break;
70 	case 4:
71 		v = le32_to_cpup(*p);
72 		break;
73 	case 8:
74 		v = le64_to_cpup(*p);
75 		break;
76 	default:
77 		BUG();
78 	}
79 
80 	*p += bytes;
81 	return v;
82 }
83 
84 static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
85 				 struct bkey_s_c k)
86 {
87 	const struct bch_alloc *in = bkey_s_c_to_alloc(k).v;
88 	const void *d = in->data;
89 	unsigned idx = 0;
90 
91 	out->gen = in->gen;
92 
93 #define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++);
94 	BCH_ALLOC_FIELDS_V1()
95 #undef  x
96 }
97 
98 static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out,
99 				struct bkey_s_c k)
100 {
101 	struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k);
102 	const u8 *in = a.v->data;
103 	const u8 *end = bkey_val_end(a);
104 	unsigned fieldnr = 0;
105 	int ret;
106 	u64 v;
107 
108 	out->gen	= a.v->gen;
109 	out->oldest_gen	= a.v->oldest_gen;
110 	out->data_type	= a.v->data_type;
111 
112 #define x(_name, _bits)							\
113 	if (fieldnr < a.v->nr_fields) {					\
114 		ret = bch2_varint_decode_fast(in, end, &v);		\
115 		if (ret < 0)						\
116 			return ret;					\
117 		in += ret;						\
118 	} else {							\
119 		v = 0;							\
120 	}								\
121 	out->_name = v;							\
122 	if (v != out->_name)						\
123 		return -1;						\
124 	fieldnr++;
125 
126 	BCH_ALLOC_FIELDS_V2()
127 #undef  x
128 	return 0;
129 }
130 
131 static int bch2_alloc_unpack_v3(struct bkey_alloc_unpacked *out,
132 				struct bkey_s_c k)
133 {
134 	struct bkey_s_c_alloc_v3 a = bkey_s_c_to_alloc_v3(k);
135 	const u8 *in = a.v->data;
136 	const u8 *end = bkey_val_end(a);
137 	unsigned fieldnr = 0;
138 	int ret;
139 	u64 v;
140 
141 	out->gen	= a.v->gen;
142 	out->oldest_gen	= a.v->oldest_gen;
143 	out->data_type	= a.v->data_type;
144 	out->need_discard = BCH_ALLOC_V3_NEED_DISCARD(a.v);
145 	out->need_inc_gen = BCH_ALLOC_V3_NEED_INC_GEN(a.v);
146 	out->journal_seq = le64_to_cpu(a.v->journal_seq);
147 
148 #define x(_name, _bits)							\
149 	if (fieldnr < a.v->nr_fields) {					\
150 		ret = bch2_varint_decode_fast(in, end, &v);		\
151 		if (ret < 0)						\
152 			return ret;					\
153 		in += ret;						\
154 	} else {							\
155 		v = 0;							\
156 	}								\
157 	out->_name = v;							\
158 	if (v != out->_name)						\
159 		return -1;						\
160 	fieldnr++;
161 
162 	BCH_ALLOC_FIELDS_V2()
163 #undef  x
164 	return 0;
165 }
166 
167 static struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
168 {
169 	struct bkey_alloc_unpacked ret = { .gen	= 0 };
170 
171 	switch (k.k->type) {
172 	case KEY_TYPE_alloc:
173 		bch2_alloc_unpack_v1(&ret, k);
174 		break;
175 	case KEY_TYPE_alloc_v2:
176 		bch2_alloc_unpack_v2(&ret, k);
177 		break;
178 	case KEY_TYPE_alloc_v3:
179 		bch2_alloc_unpack_v3(&ret, k);
180 		break;
181 	}
182 
183 	return ret;
184 }
185 
186 static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
187 {
188 	unsigned i, bytes = offsetof(struct bch_alloc, data);
189 
190 	for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_V1_FIELD_BYTES); i++)
191 		if (a->fields & (1 << i))
192 			bytes += BCH_ALLOC_V1_FIELD_BYTES[i];
193 
194 	return DIV_ROUND_UP(bytes, sizeof(u64));
195 }
196 
197 int bch2_alloc_v1_invalid(struct bch_fs *c, struct bkey_s_c k,
198 			  enum bkey_invalid_flags flags,
199 			  struct printbuf *err)
200 {
201 	struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
202 	int ret = 0;
203 
204 	/* allow for unknown fields */
205 	bkey_fsck_err_on(bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v), c, err,
206 			 alloc_v1_val_size_bad,
207 			 "incorrect value size (%zu < %u)",
208 			 bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v));
209 fsck_err:
210 	return ret;
211 }
212 
213 int bch2_alloc_v2_invalid(struct bch_fs *c, struct bkey_s_c k,
214 			  enum bkey_invalid_flags flags,
215 			  struct printbuf *err)
216 {
217 	struct bkey_alloc_unpacked u;
218 	int ret = 0;
219 
220 	bkey_fsck_err_on(bch2_alloc_unpack_v2(&u, k), c, err,
221 			 alloc_v2_unpack_error,
222 			 "unpack error");
223 fsck_err:
224 	return ret;
225 }
226 
227 int bch2_alloc_v3_invalid(struct bch_fs *c, struct bkey_s_c k,
228 			  enum bkey_invalid_flags flags,
229 			  struct printbuf *err)
230 {
231 	struct bkey_alloc_unpacked u;
232 	int ret = 0;
233 
234 	bkey_fsck_err_on(bch2_alloc_unpack_v3(&u, k), c, err,
235 			 alloc_v2_unpack_error,
236 			 "unpack error");
237 fsck_err:
238 	return ret;
239 }
240 
241 int bch2_alloc_v4_invalid(struct bch_fs *c, struct bkey_s_c k,
242 			  enum bkey_invalid_flags flags, struct printbuf *err)
243 {
244 	struct bkey_s_c_alloc_v4 a = bkey_s_c_to_alloc_v4(k);
245 	int ret = 0;
246 
247 	bkey_fsck_err_on(alloc_v4_u64s(a.v) > bkey_val_u64s(k.k), c, err,
248 			 alloc_v4_val_size_bad,
249 			 "bad val size (%u > %zu)",
250 			 alloc_v4_u64s(a.v), bkey_val_u64s(k.k));
251 
252 	bkey_fsck_err_on(!BCH_ALLOC_V4_BACKPOINTERS_START(a.v) &&
253 			 BCH_ALLOC_V4_NR_BACKPOINTERS(a.v), c, err,
254 			 alloc_v4_backpointers_start_bad,
255 			 "invalid backpointers_start");
256 
257 	bkey_fsck_err_on(alloc_data_type(*a.v, a.v->data_type) != a.v->data_type, c, err,
258 			 alloc_key_data_type_bad,
259 			 "invalid data type (got %u should be %u)",
260 			 a.v->data_type, alloc_data_type(*a.v, a.v->data_type));
261 
262 	switch (a.v->data_type) {
263 	case BCH_DATA_free:
264 	case BCH_DATA_need_gc_gens:
265 	case BCH_DATA_need_discard:
266 		bkey_fsck_err_on(bch2_bucket_sectors(*a.v) || a.v->stripe,
267 				 c, err, alloc_key_empty_but_have_data,
268 				 "empty data type free but have data");
269 		break;
270 	case BCH_DATA_sb:
271 	case BCH_DATA_journal:
272 	case BCH_DATA_btree:
273 	case BCH_DATA_user:
274 	case BCH_DATA_parity:
275 		bkey_fsck_err_on(!bch2_bucket_sectors_dirty(*a.v),
276 				 c, err, alloc_key_dirty_sectors_0,
277 				 "data_type %s but dirty_sectors==0",
278 				 bch2_data_type_str(a.v->data_type));
279 		break;
280 	case BCH_DATA_cached:
281 		bkey_fsck_err_on(!a.v->cached_sectors ||
282 				 bch2_bucket_sectors_dirty(*a.v) ||
283 				 a.v->stripe,
284 				 c, err, alloc_key_cached_inconsistency,
285 				 "data type inconsistency");
286 
287 		bkey_fsck_err_on(!a.v->io_time[READ] &&
288 				 c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_to_lru_refs,
289 				 c, err, alloc_key_cached_but_read_time_zero,
290 				 "cached bucket with read_time == 0");
291 		break;
292 	case BCH_DATA_stripe:
293 		break;
294 	}
295 fsck_err:
296 	return ret;
297 }
298 
299 void bch2_alloc_v4_swab(struct bkey_s k)
300 {
301 	struct bch_alloc_v4 *a = bkey_s_to_alloc_v4(k).v;
302 	struct bch_backpointer *bp, *bps;
303 
304 	a->journal_seq		= swab64(a->journal_seq);
305 	a->flags		= swab32(a->flags);
306 	a->dirty_sectors	= swab32(a->dirty_sectors);
307 	a->cached_sectors	= swab32(a->cached_sectors);
308 	a->io_time[0]		= swab64(a->io_time[0]);
309 	a->io_time[1]		= swab64(a->io_time[1]);
310 	a->stripe		= swab32(a->stripe);
311 	a->nr_external_backpointers = swab32(a->nr_external_backpointers);
312 	a->fragmentation_lru	= swab64(a->fragmentation_lru);
313 
314 	bps = alloc_v4_backpointers(a);
315 	for (bp = bps; bp < bps + BCH_ALLOC_V4_NR_BACKPOINTERS(a); bp++) {
316 		bp->bucket_offset	= swab40(bp->bucket_offset);
317 		bp->bucket_len		= swab32(bp->bucket_len);
318 		bch2_bpos_swab(&bp->pos);
319 	}
320 }
321 
322 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
323 {
324 	struct bch_alloc_v4 _a;
325 	const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a);
326 
327 	prt_newline(out);
328 	printbuf_indent_add(out, 2);
329 
330 	prt_printf(out, "gen %u oldest_gen %u data_type ", a->gen, a->oldest_gen);
331 	bch2_prt_data_type(out, a->data_type);
332 	prt_newline(out);
333 	prt_printf(out, "journal_seq       %llu",	a->journal_seq);
334 	prt_newline(out);
335 	prt_printf(out, "need_discard      %llu",	BCH_ALLOC_V4_NEED_DISCARD(a));
336 	prt_newline(out);
337 	prt_printf(out, "need_inc_gen      %llu",	BCH_ALLOC_V4_NEED_INC_GEN(a));
338 	prt_newline(out);
339 	prt_printf(out, "dirty_sectors     %u",	a->dirty_sectors);
340 	prt_newline(out);
341 	prt_printf(out, "cached_sectors    %u",	a->cached_sectors);
342 	prt_newline(out);
343 	prt_printf(out, "stripe            %u",	a->stripe);
344 	prt_newline(out);
345 	prt_printf(out, "stripe_redundancy %u",	a->stripe_redundancy);
346 	prt_newline(out);
347 	prt_printf(out, "io_time[READ]     %llu",	a->io_time[READ]);
348 	prt_newline(out);
349 	prt_printf(out, "io_time[WRITE]    %llu",	a->io_time[WRITE]);
350 	prt_newline(out);
351 	prt_printf(out, "fragmentation     %llu",	a->fragmentation_lru);
352 	prt_newline(out);
353 	prt_printf(out, "bp_start          %llu", BCH_ALLOC_V4_BACKPOINTERS_START(a));
354 	printbuf_indent_sub(out, 2);
355 }
356 
357 void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out)
358 {
359 	if (k.k->type == KEY_TYPE_alloc_v4) {
360 		void *src, *dst;
361 
362 		*out = *bkey_s_c_to_alloc_v4(k).v;
363 
364 		src = alloc_v4_backpointers(out);
365 		SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
366 		dst = alloc_v4_backpointers(out);
367 
368 		if (src < dst)
369 			memset(src, 0, dst - src);
370 
371 		SET_BCH_ALLOC_V4_NR_BACKPOINTERS(out, 0);
372 	} else {
373 		struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
374 
375 		*out = (struct bch_alloc_v4) {
376 			.journal_seq		= u.journal_seq,
377 			.flags			= u.need_discard,
378 			.gen			= u.gen,
379 			.oldest_gen		= u.oldest_gen,
380 			.data_type		= u.data_type,
381 			.stripe_redundancy	= u.stripe_redundancy,
382 			.dirty_sectors		= u.dirty_sectors,
383 			.cached_sectors		= u.cached_sectors,
384 			.io_time[READ]		= u.read_time,
385 			.io_time[WRITE]		= u.write_time,
386 			.stripe			= u.stripe,
387 		};
388 
389 		SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
390 	}
391 }
392 
393 static noinline struct bkey_i_alloc_v4 *
394 __bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
395 {
396 	struct bkey_i_alloc_v4 *ret;
397 
398 	ret = bch2_trans_kmalloc(trans, max(bkey_bytes(k.k), sizeof(struct bkey_i_alloc_v4)));
399 	if (IS_ERR(ret))
400 		return ret;
401 
402 	if (k.k->type == KEY_TYPE_alloc_v4) {
403 		void *src, *dst;
404 
405 		bkey_reassemble(&ret->k_i, k);
406 
407 		src = alloc_v4_backpointers(&ret->v);
408 		SET_BCH_ALLOC_V4_BACKPOINTERS_START(&ret->v, BCH_ALLOC_V4_U64s);
409 		dst = alloc_v4_backpointers(&ret->v);
410 
411 		if (src < dst)
412 			memset(src, 0, dst - src);
413 
414 		SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&ret->v, 0);
415 		set_alloc_v4_u64s(ret);
416 	} else {
417 		bkey_alloc_v4_init(&ret->k_i);
418 		ret->k.p = k.k->p;
419 		bch2_alloc_to_v4(k, &ret->v);
420 	}
421 	return ret;
422 }
423 
424 static inline struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut_inlined(struct btree_trans *trans, struct bkey_s_c k)
425 {
426 	struct bkey_s_c_alloc_v4 a;
427 
428 	if (likely(k.k->type == KEY_TYPE_alloc_v4) &&
429 	    ((a = bkey_s_c_to_alloc_v4(k), true) &&
430 	     BCH_ALLOC_V4_NR_BACKPOINTERS(a.v) == 0))
431 		return bch2_bkey_make_mut_noupdate_typed(trans, k, alloc_v4);
432 
433 	return __bch2_alloc_to_v4_mut(trans, k);
434 }
435 
436 struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
437 {
438 	return bch2_alloc_to_v4_mut_inlined(trans, k);
439 }
440 
441 struct bkey_i_alloc_v4 *
442 bch2_trans_start_alloc_update(struct btree_trans *trans, struct btree_iter *iter,
443 			      struct bpos pos)
444 {
445 	struct bkey_s_c k;
446 	struct bkey_i_alloc_v4 *a;
447 	int ret;
448 
449 	k = bch2_bkey_get_iter(trans, iter, BTREE_ID_alloc, pos,
450 			     BTREE_ITER_WITH_UPDATES|
451 			     BTREE_ITER_CACHED|
452 			     BTREE_ITER_INTENT);
453 	ret = bkey_err(k);
454 	if (unlikely(ret))
455 		return ERR_PTR(ret);
456 
457 	a = bch2_alloc_to_v4_mut_inlined(trans, k);
458 	ret = PTR_ERR_OR_ZERO(a);
459 	if (unlikely(ret))
460 		goto err;
461 	return a;
462 err:
463 	bch2_trans_iter_exit(trans, iter);
464 	return ERR_PTR(ret);
465 }
466 
467 static struct bpos alloc_gens_pos(struct bpos pos, unsigned *offset)
468 {
469 	*offset = pos.offset & KEY_TYPE_BUCKET_GENS_MASK;
470 
471 	pos.offset >>= KEY_TYPE_BUCKET_GENS_BITS;
472 	return pos;
473 }
474 
475 static struct bpos bucket_gens_pos_to_alloc(struct bpos pos, unsigned offset)
476 {
477 	pos.offset <<= KEY_TYPE_BUCKET_GENS_BITS;
478 	pos.offset += offset;
479 	return pos;
480 }
481 
482 static unsigned alloc_gen(struct bkey_s_c k, unsigned offset)
483 {
484 	return k.k->type == KEY_TYPE_bucket_gens
485 		? bkey_s_c_to_bucket_gens(k).v->gens[offset]
486 		: 0;
487 }
488 
489 int bch2_bucket_gens_invalid(struct bch_fs *c, struct bkey_s_c k,
490 			     enum bkey_invalid_flags flags,
491 			     struct printbuf *err)
492 {
493 	int ret = 0;
494 
495 	bkey_fsck_err_on(bkey_val_bytes(k.k) != sizeof(struct bch_bucket_gens), c, err,
496 			 bucket_gens_val_size_bad,
497 			 "bad val size (%zu != %zu)",
498 			 bkey_val_bytes(k.k), sizeof(struct bch_bucket_gens));
499 fsck_err:
500 	return ret;
501 }
502 
503 void bch2_bucket_gens_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
504 {
505 	struct bkey_s_c_bucket_gens g = bkey_s_c_to_bucket_gens(k);
506 	unsigned i;
507 
508 	for (i = 0; i < ARRAY_SIZE(g.v->gens); i++) {
509 		if (i)
510 			prt_char(out, ' ');
511 		prt_printf(out, "%u", g.v->gens[i]);
512 	}
513 }
514 
515 int bch2_bucket_gens_init(struct bch_fs *c)
516 {
517 	struct btree_trans *trans = bch2_trans_get(c);
518 	struct bkey_i_bucket_gens g;
519 	bool have_bucket_gens_key = false;
520 	int ret;
521 
522 	ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
523 				 BTREE_ITER_PREFETCH, k, ({
524 		/*
525 		 * Not a fsck error because this is checked/repaired by
526 		 * bch2_check_alloc_key() which runs later:
527 		 */
528 		if (!bch2_dev_bucket_exists(c, k.k->p))
529 			continue;
530 
531 		struct bch_alloc_v4 a;
532 		u8 gen = bch2_alloc_to_v4(k, &a)->gen;
533 		unsigned offset;
534 		struct bpos pos = alloc_gens_pos(iter.pos, &offset);
535 		int ret2 = 0;
536 
537 		if (have_bucket_gens_key && bkey_cmp(iter.pos, pos)) {
538 			ret2 =  bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0) ?:
539 				bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
540 			if (ret2)
541 				goto iter_err;
542 			have_bucket_gens_key = false;
543 		}
544 
545 		if (!have_bucket_gens_key) {
546 			bkey_bucket_gens_init(&g.k_i);
547 			g.k.p = pos;
548 			have_bucket_gens_key = true;
549 		}
550 
551 		g.v.gens[offset] = gen;
552 iter_err:
553 		ret2;
554 	}));
555 
556 	if (have_bucket_gens_key && !ret)
557 		ret = commit_do(trans, NULL, NULL,
558 				BCH_TRANS_COMMIT_no_enospc,
559 			bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0));
560 
561 	bch2_trans_put(trans);
562 
563 	bch_err_fn(c, ret);
564 	return ret;
565 }
566 
567 int bch2_alloc_read(struct bch_fs *c)
568 {
569 	struct btree_trans *trans = bch2_trans_get(c);
570 	int ret;
571 
572 	down_read(&c->gc_lock);
573 
574 	if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_bucket_gens) {
575 		ret = for_each_btree_key(trans, iter, BTREE_ID_bucket_gens, POS_MIN,
576 					 BTREE_ITER_PREFETCH, k, ({
577 			u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
578 			u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
579 
580 			if (k.k->type != KEY_TYPE_bucket_gens)
581 				continue;
582 
583 			const struct bch_bucket_gens *g = bkey_s_c_to_bucket_gens(k).v;
584 
585 			/*
586 			 * Not a fsck error because this is checked/repaired by
587 			 * bch2_check_alloc_key() which runs later:
588 			 */
589 			if (!bch2_dev_exists2(c, k.k->p.inode))
590 				continue;
591 
592 			struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode);
593 
594 			for (u64 b = max_t(u64, ca->mi.first_bucket, start);
595 			     b < min_t(u64, ca->mi.nbuckets, end);
596 			     b++)
597 				*bucket_gen(ca, b) = g->gens[b & KEY_TYPE_BUCKET_GENS_MASK];
598 			0;
599 		}));
600 	} else {
601 		ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
602 					 BTREE_ITER_PREFETCH, k, ({
603 			/*
604 			 * Not a fsck error because this is checked/repaired by
605 			 * bch2_check_alloc_key() which runs later:
606 			 */
607 			if (!bch2_dev_bucket_exists(c, k.k->p))
608 				continue;
609 
610 			struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode);
611 
612 			struct bch_alloc_v4 a;
613 			*bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen;
614 			0;
615 		}));
616 	}
617 
618 	bch2_trans_put(trans);
619 	up_read(&c->gc_lock);
620 
621 	bch_err_fn(c, ret);
622 	return ret;
623 }
624 
625 /* Free space/discard btree: */
626 
627 static int bch2_bucket_do_index(struct btree_trans *trans,
628 				struct bkey_s_c alloc_k,
629 				const struct bch_alloc_v4 *a,
630 				bool set)
631 {
632 	struct bch_fs *c = trans->c;
633 	struct bch_dev *ca = bch_dev_bkey_exists(c, alloc_k.k->p.inode);
634 	struct btree_iter iter;
635 	struct bkey_s_c old;
636 	struct bkey_i *k;
637 	enum btree_id btree;
638 	enum bch_bkey_type old_type = !set ? KEY_TYPE_set : KEY_TYPE_deleted;
639 	enum bch_bkey_type new_type =  set ? KEY_TYPE_set : KEY_TYPE_deleted;
640 	struct printbuf buf = PRINTBUF;
641 	int ret;
642 
643 	if (a->data_type != BCH_DATA_free &&
644 	    a->data_type != BCH_DATA_need_discard)
645 		return 0;
646 
647 	k = bch2_trans_kmalloc_nomemzero(trans, sizeof(*k));
648 	if (IS_ERR(k))
649 		return PTR_ERR(k);
650 
651 	bkey_init(&k->k);
652 	k->k.type = new_type;
653 
654 	switch (a->data_type) {
655 	case BCH_DATA_free:
656 		btree = BTREE_ID_freespace;
657 		k->k.p = alloc_freespace_pos(alloc_k.k->p, *a);
658 		bch2_key_resize(&k->k, 1);
659 		break;
660 	case BCH_DATA_need_discard:
661 		btree = BTREE_ID_need_discard;
662 		k->k.p = alloc_k.k->p;
663 		break;
664 	default:
665 		return 0;
666 	}
667 
668 	old = bch2_bkey_get_iter(trans, &iter, btree,
669 			     bkey_start_pos(&k->k),
670 			     BTREE_ITER_INTENT);
671 	ret = bkey_err(old);
672 	if (ret)
673 		return ret;
674 
675 	if (ca->mi.freespace_initialized &&
676 	    c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info &&
677 	    bch2_trans_inconsistent_on(old.k->type != old_type, trans,
678 			"incorrect key when %s %s:%llu:%llu:0 (got %s should be %s)\n"
679 			"  for %s",
680 			set ? "setting" : "clearing",
681 			bch2_btree_id_str(btree),
682 			iter.pos.inode,
683 			iter.pos.offset,
684 			bch2_bkey_types[old.k->type],
685 			bch2_bkey_types[old_type],
686 			(bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
687 		ret = -EIO;
688 		goto err;
689 	}
690 
691 	ret = bch2_trans_update(trans, &iter, k, 0);
692 err:
693 	bch2_trans_iter_exit(trans, &iter);
694 	printbuf_exit(&buf);
695 	return ret;
696 }
697 
698 static noinline int bch2_bucket_gen_update(struct btree_trans *trans,
699 					   struct bpos bucket, u8 gen)
700 {
701 	struct btree_iter iter;
702 	unsigned offset;
703 	struct bpos pos = alloc_gens_pos(bucket, &offset);
704 	struct bkey_i_bucket_gens *g;
705 	struct bkey_s_c k;
706 	int ret;
707 
708 	g = bch2_trans_kmalloc(trans, sizeof(*g));
709 	ret = PTR_ERR_OR_ZERO(g);
710 	if (ret)
711 		return ret;
712 
713 	k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_bucket_gens, pos,
714 			       BTREE_ITER_INTENT|
715 			       BTREE_ITER_WITH_UPDATES);
716 	ret = bkey_err(k);
717 	if (ret)
718 		return ret;
719 
720 	if (k.k->type != KEY_TYPE_bucket_gens) {
721 		bkey_bucket_gens_init(&g->k_i);
722 		g->k.p = iter.pos;
723 	} else {
724 		bkey_reassemble(&g->k_i, k);
725 	}
726 
727 	g->v.gens[offset] = gen;
728 
729 	ret = bch2_trans_update(trans, &iter, &g->k_i, 0);
730 	bch2_trans_iter_exit(trans, &iter);
731 	return ret;
732 }
733 
734 int bch2_trigger_alloc(struct btree_trans *trans,
735 		       enum btree_id btree, unsigned level,
736 		       struct bkey_s_c old, struct bkey_s new,
737 		       unsigned flags)
738 {
739 	struct bch_fs *c = trans->c;
740 	int ret = 0;
741 
742 	if (bch2_trans_inconsistent_on(!bch2_dev_bucket_exists(c, new.k->p), trans,
743 				       "alloc key for invalid device or bucket"))
744 		return -EIO;
745 
746 	struct bch_dev *ca = bch_dev_bkey_exists(c, new.k->p.inode);
747 
748 	struct bch_alloc_v4 old_a_convert;
749 	const struct bch_alloc_v4 *old_a = bch2_alloc_to_v4(old, &old_a_convert);
750 
751 	if (flags & BTREE_TRIGGER_TRANSACTIONAL) {
752 		struct bch_alloc_v4 *new_a = bkey_s_to_alloc_v4(new).v;
753 
754 		new_a->data_type = alloc_data_type(*new_a, new_a->data_type);
755 
756 		if (bch2_bucket_sectors(*new_a) > bch2_bucket_sectors(*old_a)) {
757 			new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
758 			new_a->io_time[WRITE]= max_t(u64, 1, atomic64_read(&c->io_clock[WRITE].now));
759 			SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true);
760 			SET_BCH_ALLOC_V4_NEED_DISCARD(new_a, true);
761 		}
762 
763 		if (data_type_is_empty(new_a->data_type) &&
764 		    BCH_ALLOC_V4_NEED_INC_GEN(new_a) &&
765 		    !bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset)) {
766 			new_a->gen++;
767 			SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false);
768 		}
769 
770 		if (old_a->data_type != new_a->data_type ||
771 		    (new_a->data_type == BCH_DATA_free &&
772 		     alloc_freespace_genbits(*old_a) != alloc_freespace_genbits(*new_a))) {
773 			ret =   bch2_bucket_do_index(trans, old, old_a, false) ?:
774 				bch2_bucket_do_index(trans, new.s_c, new_a, true);
775 			if (ret)
776 				return ret;
777 		}
778 
779 		if (new_a->data_type == BCH_DATA_cached &&
780 		    !new_a->io_time[READ])
781 			new_a->io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
782 
783 		u64 old_lru = alloc_lru_idx_read(*old_a);
784 		u64 new_lru = alloc_lru_idx_read(*new_a);
785 		if (old_lru != new_lru) {
786 			ret = bch2_lru_change(trans, new.k->p.inode,
787 					      bucket_to_u64(new.k->p),
788 					      old_lru, new_lru);
789 			if (ret)
790 				return ret;
791 		}
792 
793 		new_a->fragmentation_lru = alloc_lru_idx_fragmentation(*new_a,
794 						bch_dev_bkey_exists(c, new.k->p.inode));
795 		if (old_a->fragmentation_lru != new_a->fragmentation_lru) {
796 			ret = bch2_lru_change(trans,
797 					BCH_LRU_FRAGMENTATION_START,
798 					bucket_to_u64(new.k->p),
799 					old_a->fragmentation_lru, new_a->fragmentation_lru);
800 			if (ret)
801 				return ret;
802 		}
803 
804 		if (old_a->gen != new_a->gen) {
805 			ret = bch2_bucket_gen_update(trans, new.k->p, new_a->gen);
806 			if (ret)
807 				return ret;
808 		}
809 
810 		/*
811 		 * need to know if we're getting called from the invalidate path or
812 		 * not:
813 		 */
814 
815 		if ((flags & BTREE_TRIGGER_BUCKET_INVALIDATE) &&
816 		    old_a->cached_sectors) {
817 			ret = bch2_update_cached_sectors_list(trans, new.k->p.inode,
818 							      -((s64) old_a->cached_sectors));
819 			if (ret)
820 				return ret;
821 		}
822 	}
823 
824 	if ((flags & BTREE_TRIGGER_ATOMIC) && (flags & BTREE_TRIGGER_INSERT)) {
825 		struct bch_alloc_v4 *new_a = bkey_s_to_alloc_v4(new).v;
826 		u64 journal_seq = trans->journal_res.seq;
827 		u64 bucket_journal_seq = new_a->journal_seq;
828 
829 		if ((flags & BTREE_TRIGGER_INSERT) &&
830 		    data_type_is_empty(old_a->data_type) !=
831 		    data_type_is_empty(new_a->data_type) &&
832 		    new.k->type == KEY_TYPE_alloc_v4) {
833 			struct bch_alloc_v4 *v = bkey_s_to_alloc_v4(new).v;
834 
835 			/*
836 			 * If the btree updates referring to a bucket weren't flushed
837 			 * before the bucket became empty again, then the we don't have
838 			 * to wait on a journal flush before we can reuse the bucket:
839 			 */
840 			v->journal_seq = bucket_journal_seq =
841 				data_type_is_empty(new_a->data_type) &&
842 				(journal_seq == v->journal_seq ||
843 				 bch2_journal_noflush_seq(&c->journal, v->journal_seq))
844 				? 0 : journal_seq;
845 		}
846 
847 		if (!data_type_is_empty(old_a->data_type) &&
848 		    data_type_is_empty(new_a->data_type) &&
849 		    bucket_journal_seq) {
850 			ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
851 					c->journal.flushed_seq_ondisk,
852 					new.k->p.inode, new.k->p.offset,
853 					bucket_journal_seq);
854 			if (ret) {
855 				bch2_fs_fatal_error(c,
856 					"setting bucket_needs_journal_commit: %s", bch2_err_str(ret));
857 				return ret;
858 			}
859 		}
860 
861 		percpu_down_read(&c->mark_lock);
862 		if (new_a->gen != old_a->gen)
863 			*bucket_gen(ca, new.k->p.offset) = new_a->gen;
864 
865 		bch2_dev_usage_update(c, ca, old_a, new_a, journal_seq, false);
866 		percpu_up_read(&c->mark_lock);
867 
868 #define eval_state(_a, expr)		({ const struct bch_alloc_v4 *a = _a; expr; })
869 #define statechange(expr)		!eval_state(old_a, expr) && eval_state(new_a, expr)
870 #define bucket_flushed(a)		(!a->journal_seq || a->journal_seq <= c->journal.flushed_seq_ondisk)
871 
872 		if (statechange(a->data_type == BCH_DATA_free) &&
873 		    bucket_flushed(new_a))
874 			closure_wake_up(&c->freelist_wait);
875 
876 		if (statechange(a->data_type == BCH_DATA_need_discard) &&
877 		    !bch2_bucket_is_open(c, new.k->p.inode, new.k->p.offset) &&
878 		    bucket_flushed(new_a))
879 			bch2_discard_one_bucket_fast(c, new.k->p);
880 
881 		if (statechange(a->data_type == BCH_DATA_cached) &&
882 		    !bch2_bucket_is_open(c, new.k->p.inode, new.k->p.offset) &&
883 		    should_invalidate_buckets(ca, bch2_dev_usage_read(ca)))
884 			bch2_do_invalidates(c);
885 
886 		if (statechange(a->data_type == BCH_DATA_need_gc_gens))
887 			bch2_do_gc_gens(c);
888 	}
889 
890 	if ((flags & BTREE_TRIGGER_GC) &&
891 	    (flags & BTREE_TRIGGER_BUCKET_INVALIDATE)) {
892 		struct bch_alloc_v4 new_a_convert;
893 		const struct bch_alloc_v4 *new_a = bch2_alloc_to_v4(new.s_c, &new_a_convert);
894 
895 		percpu_down_read(&c->mark_lock);
896 		struct bucket *g = gc_bucket(ca, new.k->p.offset);
897 
898 		bucket_lock(g);
899 
900 		g->gen_valid		= 1;
901 		g->gen			= new_a->gen;
902 		g->data_type		= new_a->data_type;
903 		g->stripe		= new_a->stripe;
904 		g->stripe_redundancy	= new_a->stripe_redundancy;
905 		g->dirty_sectors	= new_a->dirty_sectors;
906 		g->cached_sectors	= new_a->cached_sectors;
907 
908 		bucket_unlock(g);
909 		percpu_up_read(&c->mark_lock);
910 	}
911 
912 	return 0;
913 }
914 
915 /*
916  * This synthesizes deleted extents for holes, similar to BTREE_ITER_SLOTS for
917  * extents style btrees, but works on non-extents btrees:
918  */
919 static struct bkey_s_c bch2_get_key_or_hole(struct btree_iter *iter, struct bpos end, struct bkey *hole)
920 {
921 	struct bkey_s_c k = bch2_btree_iter_peek_slot(iter);
922 
923 	if (bkey_err(k))
924 		return k;
925 
926 	if (k.k->type) {
927 		return k;
928 	} else {
929 		struct btree_iter iter2;
930 		struct bpos next;
931 
932 		bch2_trans_copy_iter(&iter2, iter);
933 
934 		struct btree_path *path = btree_iter_path(iter->trans, iter);
935 		if (!bpos_eq(path->l[0].b->key.k.p, SPOS_MAX))
936 			end = bkey_min(end, bpos_nosnap_successor(path->l[0].b->key.k.p));
937 
938 		end = bkey_min(end, POS(iter->pos.inode, iter->pos.offset + U32_MAX - 1));
939 
940 		/*
941 		 * btree node min/max is a closed interval, upto takes a half
942 		 * open interval:
943 		 */
944 		k = bch2_btree_iter_peek_upto(&iter2, end);
945 		next = iter2.pos;
946 		bch2_trans_iter_exit(iter->trans, &iter2);
947 
948 		BUG_ON(next.offset >= iter->pos.offset + U32_MAX);
949 
950 		if (bkey_err(k))
951 			return k;
952 
953 		bkey_init(hole);
954 		hole->p = iter->pos;
955 
956 		bch2_key_resize(hole, next.offset - iter->pos.offset);
957 		return (struct bkey_s_c) { hole, NULL };
958 	}
959 }
960 
961 static bool next_bucket(struct bch_fs *c, struct bpos *bucket)
962 {
963 	struct bch_dev *ca;
964 
965 	if (bch2_dev_bucket_exists(c, *bucket))
966 		return true;
967 
968 	if (bch2_dev_exists2(c, bucket->inode)) {
969 		ca = bch_dev_bkey_exists(c, bucket->inode);
970 
971 		if (bucket->offset < ca->mi.first_bucket) {
972 			bucket->offset = ca->mi.first_bucket;
973 			return true;
974 		}
975 
976 		bucket->inode++;
977 		bucket->offset = 0;
978 	}
979 
980 	rcu_read_lock();
981 	ca = __bch2_next_dev_idx(c, bucket->inode, NULL);
982 	if (ca)
983 		*bucket = POS(ca->dev_idx, ca->mi.first_bucket);
984 	rcu_read_unlock();
985 
986 	return ca != NULL;
987 }
988 
989 static struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_iter *iter, struct bkey *hole)
990 {
991 	struct bch_fs *c = iter->trans->c;
992 	struct bkey_s_c k;
993 again:
994 	k = bch2_get_key_or_hole(iter, POS_MAX, hole);
995 	if (bkey_err(k))
996 		return k;
997 
998 	if (!k.k->type) {
999 		struct bpos bucket = bkey_start_pos(k.k);
1000 
1001 		if (!bch2_dev_bucket_exists(c, bucket)) {
1002 			if (!next_bucket(c, &bucket))
1003 				return bkey_s_c_null;
1004 
1005 			bch2_btree_iter_set_pos(iter, bucket);
1006 			goto again;
1007 		}
1008 
1009 		if (!bch2_dev_bucket_exists(c, k.k->p)) {
1010 			struct bch_dev *ca = bch_dev_bkey_exists(c, bucket.inode);
1011 
1012 			bch2_key_resize(hole, ca->mi.nbuckets - bucket.offset);
1013 		}
1014 	}
1015 
1016 	return k;
1017 }
1018 
1019 static noinline_for_stack
1020 int bch2_check_alloc_key(struct btree_trans *trans,
1021 			 struct bkey_s_c alloc_k,
1022 			 struct btree_iter *alloc_iter,
1023 			 struct btree_iter *discard_iter,
1024 			 struct btree_iter *freespace_iter,
1025 			 struct btree_iter *bucket_gens_iter)
1026 {
1027 	struct bch_fs *c = trans->c;
1028 	struct bch_dev *ca;
1029 	struct bch_alloc_v4 a_convert;
1030 	const struct bch_alloc_v4 *a;
1031 	unsigned discard_key_type, freespace_key_type;
1032 	unsigned gens_offset;
1033 	struct bkey_s_c k;
1034 	struct printbuf buf = PRINTBUF;
1035 	int ret;
1036 
1037 	if (fsck_err_on(!bch2_dev_bucket_exists(c, alloc_k.k->p), c,
1038 			alloc_key_to_missing_dev_bucket,
1039 			"alloc key for invalid device:bucket %llu:%llu",
1040 			alloc_k.k->p.inode, alloc_k.k->p.offset))
1041 		return bch2_btree_delete_at(trans, alloc_iter, 0);
1042 
1043 	ca = bch_dev_bkey_exists(c, alloc_k.k->p.inode);
1044 	if (!ca->mi.freespace_initialized)
1045 		return 0;
1046 
1047 	a = bch2_alloc_to_v4(alloc_k, &a_convert);
1048 
1049 	discard_key_type = a->data_type == BCH_DATA_need_discard ? KEY_TYPE_set : 0;
1050 	bch2_btree_iter_set_pos(discard_iter, alloc_k.k->p);
1051 	k = bch2_btree_iter_peek_slot(discard_iter);
1052 	ret = bkey_err(k);
1053 	if (ret)
1054 		goto err;
1055 
1056 	if (fsck_err_on(k.k->type != discard_key_type,
1057 			c, need_discard_key_wrong,
1058 			"incorrect key in need_discard btree (got %s should be %s)\n"
1059 			"  %s",
1060 			bch2_bkey_types[k.k->type],
1061 			bch2_bkey_types[discard_key_type],
1062 			(bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
1063 		struct bkey_i *update =
1064 			bch2_trans_kmalloc(trans, sizeof(*update));
1065 
1066 		ret = PTR_ERR_OR_ZERO(update);
1067 		if (ret)
1068 			goto err;
1069 
1070 		bkey_init(&update->k);
1071 		update->k.type	= discard_key_type;
1072 		update->k.p	= discard_iter->pos;
1073 
1074 		ret = bch2_trans_update(trans, discard_iter, update, 0);
1075 		if (ret)
1076 			goto err;
1077 	}
1078 
1079 	freespace_key_type = a->data_type == BCH_DATA_free ? KEY_TYPE_set : 0;
1080 	bch2_btree_iter_set_pos(freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a));
1081 	k = bch2_btree_iter_peek_slot(freespace_iter);
1082 	ret = bkey_err(k);
1083 	if (ret)
1084 		goto err;
1085 
1086 	if (fsck_err_on(k.k->type != freespace_key_type,
1087 			c, freespace_key_wrong,
1088 			"incorrect key in freespace btree (got %s should be %s)\n"
1089 			"  %s",
1090 			bch2_bkey_types[k.k->type],
1091 			bch2_bkey_types[freespace_key_type],
1092 			(printbuf_reset(&buf),
1093 			 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
1094 		struct bkey_i *update =
1095 			bch2_trans_kmalloc(trans, sizeof(*update));
1096 
1097 		ret = PTR_ERR_OR_ZERO(update);
1098 		if (ret)
1099 			goto err;
1100 
1101 		bkey_init(&update->k);
1102 		update->k.type	= freespace_key_type;
1103 		update->k.p	= freespace_iter->pos;
1104 		bch2_key_resize(&update->k, 1);
1105 
1106 		ret = bch2_trans_update(trans, freespace_iter, update, 0);
1107 		if (ret)
1108 			goto err;
1109 	}
1110 
1111 	bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset));
1112 	k = bch2_btree_iter_peek_slot(bucket_gens_iter);
1113 	ret = bkey_err(k);
1114 	if (ret)
1115 		goto err;
1116 
1117 	if (fsck_err_on(a->gen != alloc_gen(k, gens_offset),
1118 			c, bucket_gens_key_wrong,
1119 			"incorrect gen in bucket_gens btree (got %u should be %u)\n"
1120 			"  %s",
1121 			alloc_gen(k, gens_offset), a->gen,
1122 			(printbuf_reset(&buf),
1123 			 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
1124 		struct bkey_i_bucket_gens *g =
1125 			bch2_trans_kmalloc(trans, sizeof(*g));
1126 
1127 		ret = PTR_ERR_OR_ZERO(g);
1128 		if (ret)
1129 			goto err;
1130 
1131 		if (k.k->type == KEY_TYPE_bucket_gens) {
1132 			bkey_reassemble(&g->k_i, k);
1133 		} else {
1134 			bkey_bucket_gens_init(&g->k_i);
1135 			g->k.p = alloc_gens_pos(alloc_k.k->p, &gens_offset);
1136 		}
1137 
1138 		g->v.gens[gens_offset] = a->gen;
1139 
1140 		ret = bch2_trans_update(trans, bucket_gens_iter, &g->k_i, 0);
1141 		if (ret)
1142 			goto err;
1143 	}
1144 err:
1145 fsck_err:
1146 	printbuf_exit(&buf);
1147 	return ret;
1148 }
1149 
1150 static noinline_for_stack
1151 int bch2_check_alloc_hole_freespace(struct btree_trans *trans,
1152 				    struct bpos start,
1153 				    struct bpos *end,
1154 				    struct btree_iter *freespace_iter)
1155 {
1156 	struct bch_fs *c = trans->c;
1157 	struct bch_dev *ca;
1158 	struct bkey_s_c k;
1159 	struct printbuf buf = PRINTBUF;
1160 	int ret;
1161 
1162 	ca = bch_dev_bkey_exists(c, start.inode);
1163 	if (!ca->mi.freespace_initialized)
1164 		return 0;
1165 
1166 	bch2_btree_iter_set_pos(freespace_iter, start);
1167 
1168 	k = bch2_btree_iter_peek_slot(freespace_iter);
1169 	ret = bkey_err(k);
1170 	if (ret)
1171 		goto err;
1172 
1173 	*end = bkey_min(k.k->p, *end);
1174 
1175 	if (fsck_err_on(k.k->type != KEY_TYPE_set,
1176 			c, freespace_hole_missing,
1177 			"hole in alloc btree missing in freespace btree\n"
1178 			"  device %llu buckets %llu-%llu",
1179 			freespace_iter->pos.inode,
1180 			freespace_iter->pos.offset,
1181 			end->offset)) {
1182 		struct bkey_i *update =
1183 			bch2_trans_kmalloc(trans, sizeof(*update));
1184 
1185 		ret = PTR_ERR_OR_ZERO(update);
1186 		if (ret)
1187 			goto err;
1188 
1189 		bkey_init(&update->k);
1190 		update->k.type	= KEY_TYPE_set;
1191 		update->k.p	= freespace_iter->pos;
1192 		bch2_key_resize(&update->k,
1193 				min_t(u64, U32_MAX, end->offset -
1194 				      freespace_iter->pos.offset));
1195 
1196 		ret = bch2_trans_update(trans, freespace_iter, update, 0);
1197 		if (ret)
1198 			goto err;
1199 	}
1200 err:
1201 fsck_err:
1202 	printbuf_exit(&buf);
1203 	return ret;
1204 }
1205 
1206 static noinline_for_stack
1207 int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans,
1208 				      struct bpos start,
1209 				      struct bpos *end,
1210 				      struct btree_iter *bucket_gens_iter)
1211 {
1212 	struct bch_fs *c = trans->c;
1213 	struct bkey_s_c k;
1214 	struct printbuf buf = PRINTBUF;
1215 	unsigned i, gens_offset, gens_end_offset;
1216 	int ret;
1217 
1218 	bch2_btree_iter_set_pos(bucket_gens_iter, alloc_gens_pos(start, &gens_offset));
1219 
1220 	k = bch2_btree_iter_peek_slot(bucket_gens_iter);
1221 	ret = bkey_err(k);
1222 	if (ret)
1223 		goto err;
1224 
1225 	if (bkey_cmp(alloc_gens_pos(start, &gens_offset),
1226 		     alloc_gens_pos(*end,  &gens_end_offset)))
1227 		gens_end_offset = KEY_TYPE_BUCKET_GENS_NR;
1228 
1229 	if (k.k->type == KEY_TYPE_bucket_gens) {
1230 		struct bkey_i_bucket_gens g;
1231 		bool need_update = false;
1232 
1233 		bkey_reassemble(&g.k_i, k);
1234 
1235 		for (i = gens_offset; i < gens_end_offset; i++) {
1236 			if (fsck_err_on(g.v.gens[i], c,
1237 					bucket_gens_hole_wrong,
1238 					"hole in alloc btree at %llu:%llu with nonzero gen in bucket_gens btree (%u)",
1239 					bucket_gens_pos_to_alloc(k.k->p, i).inode,
1240 					bucket_gens_pos_to_alloc(k.k->p, i).offset,
1241 					g.v.gens[i])) {
1242 				g.v.gens[i] = 0;
1243 				need_update = true;
1244 			}
1245 		}
1246 
1247 		if (need_update) {
1248 			struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g));
1249 
1250 			ret = PTR_ERR_OR_ZERO(u);
1251 			if (ret)
1252 				goto err;
1253 
1254 			memcpy(u, &g, sizeof(g));
1255 
1256 			ret = bch2_trans_update(trans, bucket_gens_iter, u, 0);
1257 			if (ret)
1258 				goto err;
1259 		}
1260 	}
1261 
1262 	*end = bkey_min(*end, bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0));
1263 err:
1264 fsck_err:
1265 	printbuf_exit(&buf);
1266 	return ret;
1267 }
1268 
1269 static noinline_for_stack int bch2_check_discard_freespace_key(struct btree_trans *trans,
1270 					      struct btree_iter *iter)
1271 {
1272 	struct bch_fs *c = trans->c;
1273 	struct btree_iter alloc_iter;
1274 	struct bkey_s_c alloc_k;
1275 	struct bch_alloc_v4 a_convert;
1276 	const struct bch_alloc_v4 *a;
1277 	u64 genbits;
1278 	struct bpos pos;
1279 	enum bch_data_type state = iter->btree_id == BTREE_ID_need_discard
1280 		? BCH_DATA_need_discard
1281 		: BCH_DATA_free;
1282 	struct printbuf buf = PRINTBUF;
1283 	int ret;
1284 
1285 	pos = iter->pos;
1286 	pos.offset &= ~(~0ULL << 56);
1287 	genbits = iter->pos.offset & (~0ULL << 56);
1288 
1289 	alloc_k = bch2_bkey_get_iter(trans, &alloc_iter, BTREE_ID_alloc, pos, 0);
1290 	ret = bkey_err(alloc_k);
1291 	if (ret)
1292 		return ret;
1293 
1294 	if (fsck_err_on(!bch2_dev_bucket_exists(c, pos), c,
1295 			need_discard_freespace_key_to_invalid_dev_bucket,
1296 			"entry in %s btree for nonexistant dev:bucket %llu:%llu",
1297 			bch2_btree_id_str(iter->btree_id), pos.inode, pos.offset))
1298 		goto delete;
1299 
1300 	a = bch2_alloc_to_v4(alloc_k, &a_convert);
1301 
1302 	if (fsck_err_on(a->data_type != state ||
1303 			(state == BCH_DATA_free &&
1304 			 genbits != alloc_freespace_genbits(*a)), c,
1305 			need_discard_freespace_key_bad,
1306 			"%s\n  incorrectly set at %s:%llu:%llu:0 (free %u, genbits %llu should be %llu)",
1307 			(bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
1308 			bch2_btree_id_str(iter->btree_id),
1309 			iter->pos.inode,
1310 			iter->pos.offset,
1311 			a->data_type == state,
1312 			genbits >> 56, alloc_freespace_genbits(*a) >> 56))
1313 		goto delete;
1314 out:
1315 fsck_err:
1316 	set_btree_iter_dontneed(&alloc_iter);
1317 	bch2_trans_iter_exit(trans, &alloc_iter);
1318 	printbuf_exit(&buf);
1319 	return ret;
1320 delete:
1321 	ret =   bch2_btree_delete_extent_at(trans, iter,
1322 			iter->btree_id == BTREE_ID_freespace ? 1 : 0, 0) ?:
1323 		bch2_trans_commit(trans, NULL, NULL,
1324 			BCH_TRANS_COMMIT_no_enospc);
1325 	goto out;
1326 }
1327 
1328 /*
1329  * We've already checked that generation numbers in the bucket_gens btree are
1330  * valid for buckets that exist; this just checks for keys for nonexistent
1331  * buckets.
1332  */
1333 static noinline_for_stack
1334 int bch2_check_bucket_gens_key(struct btree_trans *trans,
1335 			       struct btree_iter *iter,
1336 			       struct bkey_s_c k)
1337 {
1338 	struct bch_fs *c = trans->c;
1339 	struct bkey_i_bucket_gens g;
1340 	struct bch_dev *ca;
1341 	u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
1342 	u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
1343 	u64 b;
1344 	bool need_update = false, dev_exists;
1345 	struct printbuf buf = PRINTBUF;
1346 	int ret = 0;
1347 
1348 	BUG_ON(k.k->type != KEY_TYPE_bucket_gens);
1349 	bkey_reassemble(&g.k_i, k);
1350 
1351 	/* if no bch_dev, skip out whether we repair or not */
1352 	dev_exists = bch2_dev_exists2(c, k.k->p.inode);
1353 	if (!dev_exists) {
1354 		if (fsck_err_on(!dev_exists, c,
1355 				bucket_gens_to_invalid_dev,
1356 				"bucket_gens key for invalid device:\n  %s",
1357 				(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1358 			ret = bch2_btree_delete_at(trans, iter, 0);
1359 		}
1360 		goto out;
1361 	}
1362 
1363 	ca = bch_dev_bkey_exists(c, k.k->p.inode);
1364 	if (fsck_err_on(end <= ca->mi.first_bucket ||
1365 			start >= ca->mi.nbuckets, c,
1366 			bucket_gens_to_invalid_buckets,
1367 			"bucket_gens key for invalid buckets:\n  %s",
1368 			(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1369 		ret = bch2_btree_delete_at(trans, iter, 0);
1370 		goto out;
1371 	}
1372 
1373 	for (b = start; b < ca->mi.first_bucket; b++)
1374 		if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], c,
1375 				bucket_gens_nonzero_for_invalid_buckets,
1376 				"bucket_gens key has nonzero gen for invalid bucket")) {
1377 			g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
1378 			need_update = true;
1379 		}
1380 
1381 	for (b = ca->mi.nbuckets; b < end; b++)
1382 		if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK], c,
1383 				bucket_gens_nonzero_for_invalid_buckets,
1384 				"bucket_gens key has nonzero gen for invalid bucket")) {
1385 			g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
1386 			need_update = true;
1387 		}
1388 
1389 	if (need_update) {
1390 		struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g));
1391 
1392 		ret = PTR_ERR_OR_ZERO(u);
1393 		if (ret)
1394 			goto out;
1395 
1396 		memcpy(u, &g, sizeof(g));
1397 		ret = bch2_trans_update(trans, iter, u, 0);
1398 	}
1399 out:
1400 fsck_err:
1401 	printbuf_exit(&buf);
1402 	return ret;
1403 }
1404 
1405 int bch2_check_alloc_info(struct bch_fs *c)
1406 {
1407 	struct btree_trans *trans = bch2_trans_get(c);
1408 	struct btree_iter iter, discard_iter, freespace_iter, bucket_gens_iter;
1409 	struct bkey hole;
1410 	struct bkey_s_c k;
1411 	int ret = 0;
1412 
1413 	bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS_MIN,
1414 			     BTREE_ITER_PREFETCH);
1415 	bch2_trans_iter_init(trans, &discard_iter, BTREE_ID_need_discard, POS_MIN,
1416 			     BTREE_ITER_PREFETCH);
1417 	bch2_trans_iter_init(trans, &freespace_iter, BTREE_ID_freespace, POS_MIN,
1418 			     BTREE_ITER_PREFETCH);
1419 	bch2_trans_iter_init(trans, &bucket_gens_iter, BTREE_ID_bucket_gens, POS_MIN,
1420 			     BTREE_ITER_PREFETCH);
1421 
1422 	while (1) {
1423 		struct bpos next;
1424 
1425 		bch2_trans_begin(trans);
1426 
1427 		k = bch2_get_key_or_real_bucket_hole(&iter, &hole);
1428 		ret = bkey_err(k);
1429 		if (ret)
1430 			goto bkey_err;
1431 
1432 		if (!k.k)
1433 			break;
1434 
1435 		if (k.k->type) {
1436 			next = bpos_nosnap_successor(k.k->p);
1437 
1438 			ret = bch2_check_alloc_key(trans,
1439 						   k, &iter,
1440 						   &discard_iter,
1441 						   &freespace_iter,
1442 						   &bucket_gens_iter);
1443 			if (ret)
1444 				goto bkey_err;
1445 		} else {
1446 			next = k.k->p;
1447 
1448 			ret = bch2_check_alloc_hole_freespace(trans,
1449 						    bkey_start_pos(k.k),
1450 						    &next,
1451 						    &freespace_iter) ?:
1452 				bch2_check_alloc_hole_bucket_gens(trans,
1453 						    bkey_start_pos(k.k),
1454 						    &next,
1455 						    &bucket_gens_iter);
1456 			if (ret)
1457 				goto bkey_err;
1458 		}
1459 
1460 		ret = bch2_trans_commit(trans, NULL, NULL,
1461 					BCH_TRANS_COMMIT_no_enospc);
1462 		if (ret)
1463 			goto bkey_err;
1464 
1465 		bch2_btree_iter_set_pos(&iter, next);
1466 bkey_err:
1467 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1468 			continue;
1469 		if (ret)
1470 			break;
1471 	}
1472 	bch2_trans_iter_exit(trans, &bucket_gens_iter);
1473 	bch2_trans_iter_exit(trans, &freespace_iter);
1474 	bch2_trans_iter_exit(trans, &discard_iter);
1475 	bch2_trans_iter_exit(trans, &iter);
1476 
1477 	if (ret < 0)
1478 		goto err;
1479 
1480 	ret = for_each_btree_key(trans, iter,
1481 			BTREE_ID_need_discard, POS_MIN,
1482 			BTREE_ITER_PREFETCH, k,
1483 		bch2_check_discard_freespace_key(trans, &iter));
1484 	if (ret)
1485 		goto err;
1486 
1487 	bch2_trans_iter_init(trans, &iter, BTREE_ID_freespace, POS_MIN,
1488 			     BTREE_ITER_PREFETCH);
1489 	while (1) {
1490 		bch2_trans_begin(trans);
1491 		k = bch2_btree_iter_peek(&iter);
1492 		if (!k.k)
1493 			break;
1494 
1495 		ret = bkey_err(k) ?:
1496 			bch2_check_discard_freespace_key(trans, &iter);
1497 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
1498 			ret = 0;
1499 			continue;
1500 		}
1501 		if (ret) {
1502 			struct printbuf buf = PRINTBUF;
1503 			bch2_bkey_val_to_text(&buf, c, k);
1504 
1505 			bch_err(c, "while checking %s", buf.buf);
1506 			printbuf_exit(&buf);
1507 			break;
1508 		}
1509 
1510 		bch2_btree_iter_set_pos(&iter, bpos_nosnap_successor(iter.pos));
1511 	}
1512 	bch2_trans_iter_exit(trans, &iter);
1513 	if (ret)
1514 		goto err;
1515 
1516 	ret = for_each_btree_key_commit(trans, iter,
1517 			BTREE_ID_bucket_gens, POS_MIN,
1518 			BTREE_ITER_PREFETCH, k,
1519 			NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1520 		bch2_check_bucket_gens_key(trans, &iter, k));
1521 err:
1522 	bch2_trans_put(trans);
1523 	bch_err_fn(c, ret);
1524 	return ret;
1525 }
1526 
1527 static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
1528 				       struct btree_iter *alloc_iter)
1529 {
1530 	struct bch_fs *c = trans->c;
1531 	struct btree_iter lru_iter;
1532 	struct bch_alloc_v4 a_convert;
1533 	const struct bch_alloc_v4 *a;
1534 	struct bkey_s_c alloc_k, lru_k;
1535 	struct printbuf buf = PRINTBUF;
1536 	int ret;
1537 
1538 	alloc_k = bch2_btree_iter_peek(alloc_iter);
1539 	if (!alloc_k.k)
1540 		return 0;
1541 
1542 	ret = bkey_err(alloc_k);
1543 	if (ret)
1544 		return ret;
1545 
1546 	a = bch2_alloc_to_v4(alloc_k, &a_convert);
1547 
1548 	if (a->data_type != BCH_DATA_cached)
1549 		return 0;
1550 
1551 	if (fsck_err_on(!a->io_time[READ], c,
1552 			alloc_key_cached_but_read_time_zero,
1553 			"cached bucket with read_time 0\n"
1554 			"  %s",
1555 		(printbuf_reset(&buf),
1556 		 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
1557 		struct bkey_i_alloc_v4 *a_mut =
1558 			bch2_alloc_to_v4_mut(trans, alloc_k);
1559 		ret = PTR_ERR_OR_ZERO(a_mut);
1560 		if (ret)
1561 			goto err;
1562 
1563 		a_mut->v.io_time[READ] = atomic64_read(&c->io_clock[READ].now);
1564 		ret = bch2_trans_update(trans, alloc_iter,
1565 					&a_mut->k_i, BTREE_TRIGGER_NORUN);
1566 		if (ret)
1567 			goto err;
1568 
1569 		a = &a_mut->v;
1570 	}
1571 
1572 	lru_k = bch2_bkey_get_iter(trans, &lru_iter, BTREE_ID_lru,
1573 			     lru_pos(alloc_k.k->p.inode,
1574 				     bucket_to_u64(alloc_k.k->p),
1575 				     a->io_time[READ]), 0);
1576 	ret = bkey_err(lru_k);
1577 	if (ret)
1578 		return ret;
1579 
1580 	if (fsck_err_on(lru_k.k->type != KEY_TYPE_set, c,
1581 			alloc_key_to_missing_lru_entry,
1582 			"missing lru entry\n"
1583 			"  %s",
1584 			(printbuf_reset(&buf),
1585 			 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
1586 		ret = bch2_lru_set(trans,
1587 				   alloc_k.k->p.inode,
1588 				   bucket_to_u64(alloc_k.k->p),
1589 				   a->io_time[READ]);
1590 		if (ret)
1591 			goto err;
1592 	}
1593 err:
1594 fsck_err:
1595 	bch2_trans_iter_exit(trans, &lru_iter);
1596 	printbuf_exit(&buf);
1597 	return ret;
1598 }
1599 
1600 int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
1601 {
1602 	int ret = bch2_trans_run(c,
1603 		for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
1604 				POS_MIN, BTREE_ITER_PREFETCH, k,
1605 				NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1606 			bch2_check_alloc_to_lru_ref(trans, &iter)));
1607 	bch_err_fn(c, ret);
1608 	return ret;
1609 }
1610 
1611 static int discard_in_flight_add(struct bch_fs *c, struct bpos bucket)
1612 {
1613 	int ret;
1614 
1615 	mutex_lock(&c->discard_buckets_in_flight_lock);
1616 	darray_for_each(c->discard_buckets_in_flight, i)
1617 		if (bkey_eq(*i, bucket)) {
1618 			ret = -EEXIST;
1619 			goto out;
1620 		}
1621 
1622 	ret = darray_push(&c->discard_buckets_in_flight, bucket);
1623 out:
1624 	mutex_unlock(&c->discard_buckets_in_flight_lock);
1625 	return ret;
1626 }
1627 
1628 static void discard_in_flight_remove(struct bch_fs *c, struct bpos bucket)
1629 {
1630 	mutex_lock(&c->discard_buckets_in_flight_lock);
1631 	darray_for_each(c->discard_buckets_in_flight, i)
1632 		if (bkey_eq(*i, bucket)) {
1633 			darray_remove_item(&c->discard_buckets_in_flight, i);
1634 			goto found;
1635 		}
1636 	BUG();
1637 found:
1638 	mutex_unlock(&c->discard_buckets_in_flight_lock);
1639 }
1640 
1641 struct discard_buckets_state {
1642 	u64		seen;
1643 	u64		open;
1644 	u64		need_journal_commit;
1645 	u64		discarded;
1646 	struct bch_dev	*ca;
1647 	u64		need_journal_commit_this_dev;
1648 };
1649 
1650 static void discard_buckets_next_dev(struct bch_fs *c, struct discard_buckets_state *s, struct bch_dev *ca)
1651 {
1652 	if (s->ca == ca)
1653 		return;
1654 
1655 	if (s->ca && s->need_journal_commit_this_dev >
1656 	    bch2_dev_usage_read(s->ca).d[BCH_DATA_free].buckets)
1657 		bch2_journal_flush_async(&c->journal, NULL);
1658 
1659 	if (s->ca)
1660 		percpu_ref_put(&s->ca->ref);
1661 	if (ca)
1662 		percpu_ref_get(&ca->ref);
1663 	s->ca = ca;
1664 	s->need_journal_commit_this_dev = 0;
1665 }
1666 
1667 static int bch2_discard_one_bucket(struct btree_trans *trans,
1668 				   struct btree_iter *need_discard_iter,
1669 				   struct bpos *discard_pos_done,
1670 				   struct discard_buckets_state *s)
1671 {
1672 	struct bch_fs *c = trans->c;
1673 	struct bpos pos = need_discard_iter->pos;
1674 	struct btree_iter iter = { NULL };
1675 	struct bkey_s_c k;
1676 	struct bch_dev *ca;
1677 	struct bkey_i_alloc_v4 *a;
1678 	struct printbuf buf = PRINTBUF;
1679 	bool discard_locked = false;
1680 	int ret = 0;
1681 
1682 	ca = bch_dev_bkey_exists(c, pos.inode);
1683 
1684 	if (!percpu_ref_tryget(&ca->io_ref)) {
1685 		bch2_btree_iter_set_pos(need_discard_iter, POS(pos.inode + 1, 0));
1686 		return 0;
1687 	}
1688 
1689 	discard_buckets_next_dev(c, s, ca);
1690 
1691 	if (bch2_bucket_is_open_safe(c, pos.inode, pos.offset)) {
1692 		s->open++;
1693 		goto out;
1694 	}
1695 
1696 	if (bch2_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
1697 			c->journal.flushed_seq_ondisk,
1698 			pos.inode, pos.offset)) {
1699 		s->need_journal_commit++;
1700 		s->need_journal_commit_this_dev++;
1701 		goto out;
1702 	}
1703 
1704 	k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc,
1705 			       need_discard_iter->pos,
1706 			       BTREE_ITER_CACHED);
1707 	ret = bkey_err(k);
1708 	if (ret)
1709 		goto out;
1710 
1711 	a = bch2_alloc_to_v4_mut(trans, k);
1712 	ret = PTR_ERR_OR_ZERO(a);
1713 	if (ret)
1714 		goto out;
1715 
1716 	if (BCH_ALLOC_V4_NEED_INC_GEN(&a->v)) {
1717 		a->v.gen++;
1718 		SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
1719 		goto write;
1720 	}
1721 
1722 	if (a->v.journal_seq > c->journal.flushed_seq_ondisk) {
1723 		if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info) {
1724 			bch2_trans_inconsistent(trans,
1725 				"clearing need_discard but journal_seq %llu > flushed_seq %llu\n"
1726 				"%s",
1727 				a->v.journal_seq,
1728 				c->journal.flushed_seq_ondisk,
1729 				(bch2_bkey_val_to_text(&buf, c, k), buf.buf));
1730 			ret = -EIO;
1731 		}
1732 		goto out;
1733 	}
1734 
1735 	if (a->v.data_type != BCH_DATA_need_discard) {
1736 		if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_info) {
1737 			bch2_trans_inconsistent(trans,
1738 				"bucket incorrectly set in need_discard btree\n"
1739 				"%s",
1740 				(bch2_bkey_val_to_text(&buf, c, k), buf.buf));
1741 			ret = -EIO;
1742 		}
1743 
1744 		goto out;
1745 	}
1746 
1747 	if (discard_in_flight_add(c, SPOS(iter.pos.inode, iter.pos.offset, true)))
1748 		goto out;
1749 
1750 	discard_locked = true;
1751 
1752 	if (!bkey_eq(*discard_pos_done, iter.pos) &&
1753 	    ca->mi.discard && !c->opts.nochanges) {
1754 		/*
1755 		 * This works without any other locks because this is the only
1756 		 * thread that removes items from the need_discard tree
1757 		 */
1758 		bch2_trans_unlock_long(trans);
1759 		blkdev_issue_discard(ca->disk_sb.bdev,
1760 				     k.k->p.offset * ca->mi.bucket_size,
1761 				     ca->mi.bucket_size,
1762 				     GFP_KERNEL);
1763 		*discard_pos_done = iter.pos;
1764 
1765 		ret = bch2_trans_relock_notrace(trans);
1766 		if (ret)
1767 			goto out;
1768 	}
1769 
1770 	SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false);
1771 	a->v.data_type = alloc_data_type(a->v, a->v.data_type);
1772 write:
1773 	ret =   bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
1774 		bch2_trans_commit(trans, NULL, NULL,
1775 				  BCH_WATERMARK_btree|
1776 				  BCH_TRANS_COMMIT_no_enospc);
1777 	if (ret)
1778 		goto out;
1779 
1780 	count_event(c, bucket_discard);
1781 	s->discarded++;
1782 out:
1783 	if (discard_locked)
1784 		discard_in_flight_remove(c, iter.pos);
1785 	s->seen++;
1786 	bch2_trans_iter_exit(trans, &iter);
1787 	percpu_ref_put(&ca->io_ref);
1788 	printbuf_exit(&buf);
1789 	return ret;
1790 }
1791 
1792 static void bch2_do_discards_work(struct work_struct *work)
1793 {
1794 	struct bch_fs *c = container_of(work, struct bch_fs, discard_work);
1795 	struct discard_buckets_state s = {};
1796 	struct bpos discard_pos_done = POS_MAX;
1797 	int ret;
1798 
1799 	/*
1800 	 * We're doing the commit in bch2_discard_one_bucket instead of using
1801 	 * for_each_btree_key_commit() so that we can increment counters after
1802 	 * successful commit:
1803 	 */
1804 	ret = bch2_trans_run(c,
1805 		for_each_btree_key(trans, iter,
1806 				   BTREE_ID_need_discard, POS_MIN, 0, k,
1807 			bch2_discard_one_bucket(trans, &iter, &discard_pos_done, &s)));
1808 
1809 	discard_buckets_next_dev(c, &s, NULL);
1810 
1811 	trace_discard_buckets(c, s.seen, s.open, s.need_journal_commit, s.discarded,
1812 			      bch2_err_str(ret));
1813 
1814 	bch2_write_ref_put(c, BCH_WRITE_REF_discard);
1815 }
1816 
1817 void bch2_do_discards(struct bch_fs *c)
1818 {
1819 	if (bch2_write_ref_tryget(c, BCH_WRITE_REF_discard) &&
1820 	    !queue_work(c->write_ref_wq, &c->discard_work))
1821 		bch2_write_ref_put(c, BCH_WRITE_REF_discard);
1822 }
1823 
1824 static int bch2_clear_bucket_needs_discard(struct btree_trans *trans, struct bpos bucket)
1825 {
1826 	struct btree_iter iter;
1827 	bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, bucket, BTREE_ITER_INTENT);
1828 	struct bkey_s_c k = bch2_btree_iter_peek_slot(&iter);
1829 	int ret = bkey_err(k);
1830 	if (ret)
1831 		goto err;
1832 
1833 	struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut(trans, k);
1834 	ret = PTR_ERR_OR_ZERO(a);
1835 	if (ret)
1836 		goto err;
1837 
1838 	SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false);
1839 	a->v.data_type = alloc_data_type(a->v, a->v.data_type);
1840 
1841 	ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
1842 err:
1843 	bch2_trans_iter_exit(trans, &iter);
1844 	return ret;
1845 }
1846 
1847 static void bch2_do_discards_fast_work(struct work_struct *work)
1848 {
1849 	struct bch_fs *c = container_of(work, struct bch_fs, discard_fast_work);
1850 
1851 	while (1) {
1852 		bool got_bucket = false;
1853 		struct bpos bucket;
1854 		struct bch_dev *ca;
1855 
1856 		mutex_lock(&c->discard_buckets_in_flight_lock);
1857 		darray_for_each(c->discard_buckets_in_flight, i) {
1858 			if (i->snapshot)
1859 				continue;
1860 
1861 			ca = bch_dev_bkey_exists(c, i->inode);
1862 
1863 			if (!percpu_ref_tryget(&ca->io_ref)) {
1864 				darray_remove_item(&c->discard_buckets_in_flight, i);
1865 				continue;
1866 			}
1867 
1868 			got_bucket = true;
1869 			bucket = *i;
1870 			i->snapshot = true;
1871 			break;
1872 		}
1873 		mutex_unlock(&c->discard_buckets_in_flight_lock);
1874 
1875 		if (!got_bucket)
1876 			break;
1877 
1878 		if (ca->mi.discard && !c->opts.nochanges)
1879 			blkdev_issue_discard(ca->disk_sb.bdev,
1880 					     bucket.offset * ca->mi.bucket_size,
1881 					     ca->mi.bucket_size,
1882 					     GFP_KERNEL);
1883 
1884 		int ret = bch2_trans_do(c, NULL, NULL,
1885 					BCH_WATERMARK_btree|
1886 					BCH_TRANS_COMMIT_no_enospc,
1887 					bch2_clear_bucket_needs_discard(trans, bucket));
1888 		bch_err_fn(c, ret);
1889 
1890 		percpu_ref_put(&ca->io_ref);
1891 		discard_in_flight_remove(c, bucket);
1892 
1893 		if (ret)
1894 			break;
1895 	}
1896 
1897 	bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast);
1898 }
1899 
1900 static void bch2_discard_one_bucket_fast(struct bch_fs *c, struct bpos bucket)
1901 {
1902 	struct bch_dev *ca = bch_dev_bkey_exists(c, bucket.inode);
1903 
1904 	if (!percpu_ref_is_dying(&ca->io_ref) &&
1905 	    !discard_in_flight_add(c, bucket) &&
1906 	    bch2_write_ref_tryget(c, BCH_WRITE_REF_discard_fast) &&
1907 	    !queue_work(c->write_ref_wq, &c->discard_fast_work))
1908 		bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast);
1909 }
1910 
1911 static int invalidate_one_bucket(struct btree_trans *trans,
1912 				 struct btree_iter *lru_iter,
1913 				 struct bkey_s_c lru_k,
1914 				 s64 *nr_to_invalidate)
1915 {
1916 	struct bch_fs *c = trans->c;
1917 	struct btree_iter alloc_iter = { NULL };
1918 	struct bkey_i_alloc_v4 *a = NULL;
1919 	struct printbuf buf = PRINTBUF;
1920 	struct bpos bucket = u64_to_bucket(lru_k.k->p.offset);
1921 	unsigned cached_sectors;
1922 	int ret = 0;
1923 
1924 	if (*nr_to_invalidate <= 0)
1925 		return 1;
1926 
1927 	if (!bch2_dev_bucket_exists(c, bucket)) {
1928 		prt_str(&buf, "lru entry points to invalid bucket");
1929 		goto err;
1930 	}
1931 
1932 	if (bch2_bucket_is_open_safe(c, bucket.inode, bucket.offset))
1933 		return 0;
1934 
1935 	a = bch2_trans_start_alloc_update(trans, &alloc_iter, bucket);
1936 	ret = PTR_ERR_OR_ZERO(a);
1937 	if (ret)
1938 		goto out;
1939 
1940 	/* We expect harmless races here due to the btree write buffer: */
1941 	if (lru_pos_time(lru_iter->pos) != alloc_lru_idx_read(a->v))
1942 		goto out;
1943 
1944 	BUG_ON(a->v.data_type != BCH_DATA_cached);
1945 
1946 	if (!a->v.cached_sectors)
1947 		bch_err(c, "invalidating empty bucket, confused");
1948 
1949 	cached_sectors = a->v.cached_sectors;
1950 
1951 	SET_BCH_ALLOC_V4_NEED_INC_GEN(&a->v, false);
1952 	a->v.gen++;
1953 	a->v.data_type		= 0;
1954 	a->v.dirty_sectors	= 0;
1955 	a->v.cached_sectors	= 0;
1956 	a->v.io_time[READ]	= atomic64_read(&c->io_clock[READ].now);
1957 	a->v.io_time[WRITE]	= atomic64_read(&c->io_clock[WRITE].now);
1958 
1959 	ret =   bch2_trans_update(trans, &alloc_iter, &a->k_i,
1960 				BTREE_TRIGGER_BUCKET_INVALIDATE) ?:
1961 		bch2_trans_commit(trans, NULL, NULL,
1962 				  BCH_WATERMARK_btree|
1963 				  BCH_TRANS_COMMIT_no_enospc);
1964 	if (ret)
1965 		goto out;
1966 
1967 	trace_and_count(c, bucket_invalidate, c, bucket.inode, bucket.offset, cached_sectors);
1968 	--*nr_to_invalidate;
1969 out:
1970 	bch2_trans_iter_exit(trans, &alloc_iter);
1971 	printbuf_exit(&buf);
1972 	return ret;
1973 err:
1974 	prt_str(&buf, "\n  lru key: ");
1975 	bch2_bkey_val_to_text(&buf, c, lru_k);
1976 
1977 	prt_str(&buf, "\n  lru entry: ");
1978 	bch2_lru_pos_to_text(&buf, lru_iter->pos);
1979 
1980 	prt_str(&buf, "\n  alloc key: ");
1981 	if (!a)
1982 		bch2_bpos_to_text(&buf, bucket);
1983 	else
1984 		bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&a->k_i));
1985 
1986 	bch_err(c, "%s", buf.buf);
1987 	if (c->curr_recovery_pass > BCH_RECOVERY_PASS_check_lrus) {
1988 		bch2_inconsistent_error(c);
1989 		ret = -EINVAL;
1990 	}
1991 
1992 	goto out;
1993 }
1994 
1995 static void bch2_do_invalidates_work(struct work_struct *work)
1996 {
1997 	struct bch_fs *c = container_of(work, struct bch_fs, invalidate_work);
1998 	struct btree_trans *trans = bch2_trans_get(c);
1999 	int ret = 0;
2000 
2001 	ret = bch2_btree_write_buffer_tryflush(trans);
2002 	if (ret)
2003 		goto err;
2004 
2005 	for_each_member_device(c, ca) {
2006 		s64 nr_to_invalidate =
2007 			should_invalidate_buckets(ca, bch2_dev_usage_read(ca));
2008 
2009 		ret = for_each_btree_key_upto(trans, iter, BTREE_ID_lru,
2010 				lru_pos(ca->dev_idx, 0, 0),
2011 				lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX),
2012 				BTREE_ITER_INTENT, k,
2013 			invalidate_one_bucket(trans, &iter, k, &nr_to_invalidate));
2014 
2015 		if (ret < 0) {
2016 			percpu_ref_put(&ca->ref);
2017 			break;
2018 		}
2019 	}
2020 err:
2021 	bch2_trans_put(trans);
2022 	bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
2023 }
2024 
2025 void bch2_do_invalidates(struct bch_fs *c)
2026 {
2027 	if (bch2_write_ref_tryget(c, BCH_WRITE_REF_invalidate) &&
2028 	    !queue_work(c->write_ref_wq, &c->invalidate_work))
2029 		bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
2030 }
2031 
2032 int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
2033 			    u64 bucket_start, u64 bucket_end)
2034 {
2035 	struct btree_trans *trans = bch2_trans_get(c);
2036 	struct btree_iter iter;
2037 	struct bkey_s_c k;
2038 	struct bkey hole;
2039 	struct bpos end = POS(ca->dev_idx, bucket_end);
2040 	struct bch_member *m;
2041 	unsigned long last_updated = jiffies;
2042 	int ret;
2043 
2044 	BUG_ON(bucket_start > bucket_end);
2045 	BUG_ON(bucket_end > ca->mi.nbuckets);
2046 
2047 	bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
2048 		POS(ca->dev_idx, max_t(u64, ca->mi.first_bucket, bucket_start)),
2049 		BTREE_ITER_PREFETCH);
2050 	/*
2051 	 * Scan the alloc btree for every bucket on @ca, and add buckets to the
2052 	 * freespace/need_discard/need_gc_gens btrees as needed:
2053 	 */
2054 	while (1) {
2055 		if (last_updated + HZ * 10 < jiffies) {
2056 			bch_info(ca, "%s: currently at %llu/%llu",
2057 				 __func__, iter.pos.offset, ca->mi.nbuckets);
2058 			last_updated = jiffies;
2059 		}
2060 
2061 		bch2_trans_begin(trans);
2062 
2063 		if (bkey_ge(iter.pos, end)) {
2064 			ret = 0;
2065 			break;
2066 		}
2067 
2068 		k = bch2_get_key_or_hole(&iter, end, &hole);
2069 		ret = bkey_err(k);
2070 		if (ret)
2071 			goto bkey_err;
2072 
2073 		if (k.k->type) {
2074 			/*
2075 			 * We process live keys in the alloc btree one at a
2076 			 * time:
2077 			 */
2078 			struct bch_alloc_v4 a_convert;
2079 			const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
2080 
2081 			ret =   bch2_bucket_do_index(trans, k, a, true) ?:
2082 				bch2_trans_commit(trans, NULL, NULL,
2083 						  BCH_TRANS_COMMIT_no_enospc);
2084 			if (ret)
2085 				goto bkey_err;
2086 
2087 			bch2_btree_iter_advance(&iter);
2088 		} else {
2089 			struct bkey_i *freespace;
2090 
2091 			freespace = bch2_trans_kmalloc(trans, sizeof(*freespace));
2092 			ret = PTR_ERR_OR_ZERO(freespace);
2093 			if (ret)
2094 				goto bkey_err;
2095 
2096 			bkey_init(&freespace->k);
2097 			freespace->k.type	= KEY_TYPE_set;
2098 			freespace->k.p		= k.k->p;
2099 			freespace->k.size	= k.k->size;
2100 
2101 			ret = bch2_btree_insert_trans(trans, BTREE_ID_freespace, freespace, 0) ?:
2102 				bch2_trans_commit(trans, NULL, NULL,
2103 						  BCH_TRANS_COMMIT_no_enospc);
2104 			if (ret)
2105 				goto bkey_err;
2106 
2107 			bch2_btree_iter_set_pos(&iter, k.k->p);
2108 		}
2109 bkey_err:
2110 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
2111 			continue;
2112 		if (ret)
2113 			break;
2114 	}
2115 
2116 	bch2_trans_iter_exit(trans, &iter);
2117 	bch2_trans_put(trans);
2118 
2119 	if (ret < 0) {
2120 		bch_err_msg(ca, ret, "initializing free space");
2121 		return ret;
2122 	}
2123 
2124 	mutex_lock(&c->sb_lock);
2125 	m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
2126 	SET_BCH_MEMBER_FREESPACE_INITIALIZED(m, true);
2127 	mutex_unlock(&c->sb_lock);
2128 
2129 	return 0;
2130 }
2131 
2132 int bch2_fs_freespace_init(struct bch_fs *c)
2133 {
2134 	int ret = 0;
2135 	bool doing_init = false;
2136 
2137 	/*
2138 	 * We can crash during the device add path, so we need to check this on
2139 	 * every mount:
2140 	 */
2141 
2142 	for_each_member_device(c, ca) {
2143 		if (ca->mi.freespace_initialized)
2144 			continue;
2145 
2146 		if (!doing_init) {
2147 			bch_info(c, "initializing freespace");
2148 			doing_init = true;
2149 		}
2150 
2151 		ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets);
2152 		if (ret) {
2153 			percpu_ref_put(&ca->ref);
2154 			bch_err_fn(c, ret);
2155 			return ret;
2156 		}
2157 	}
2158 
2159 	if (doing_init) {
2160 		mutex_lock(&c->sb_lock);
2161 		bch2_write_super(c);
2162 		mutex_unlock(&c->sb_lock);
2163 		bch_verbose(c, "done initializing freespace");
2164 	}
2165 
2166 	return 0;
2167 }
2168 
2169 /* Bucket IO clocks: */
2170 
2171 int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
2172 			      size_t bucket_nr, int rw)
2173 {
2174 	struct bch_fs *c = trans->c;
2175 	struct btree_iter iter;
2176 	struct bkey_i_alloc_v4 *a;
2177 	u64 now;
2178 	int ret = 0;
2179 
2180 	a = bch2_trans_start_alloc_update(trans, &iter,  POS(dev, bucket_nr));
2181 	ret = PTR_ERR_OR_ZERO(a);
2182 	if (ret)
2183 		return ret;
2184 
2185 	now = atomic64_read(&c->io_clock[rw].now);
2186 	if (a->v.io_time[rw] == now)
2187 		goto out;
2188 
2189 	a->v.io_time[rw] = now;
2190 
2191 	ret   = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
2192 		bch2_trans_commit(trans, NULL, NULL, 0);
2193 out:
2194 	bch2_trans_iter_exit(trans, &iter);
2195 	return ret;
2196 }
2197 
2198 /* Startup/shutdown (ro/rw): */
2199 
2200 void bch2_recalc_capacity(struct bch_fs *c)
2201 {
2202 	u64 capacity = 0, reserved_sectors = 0, gc_reserve;
2203 	unsigned bucket_size_max = 0;
2204 	unsigned long ra_pages = 0;
2205 
2206 	lockdep_assert_held(&c->state_lock);
2207 
2208 	for_each_online_member(c, ca) {
2209 		struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi;
2210 
2211 		ra_pages += bdi->ra_pages;
2212 	}
2213 
2214 	bch2_set_ra_pages(c, ra_pages);
2215 
2216 	for_each_rw_member(c, ca) {
2217 		u64 dev_reserve = 0;
2218 
2219 		/*
2220 		 * We need to reserve buckets (from the number
2221 		 * of currently available buckets) against
2222 		 * foreground writes so that mainly copygc can
2223 		 * make forward progress.
2224 		 *
2225 		 * We need enough to refill the various reserves
2226 		 * from scratch - copygc will use its entire
2227 		 * reserve all at once, then run against when
2228 		 * its reserve is refilled (from the formerly
2229 		 * available buckets).
2230 		 *
2231 		 * This reserve is just used when considering if
2232 		 * allocations for foreground writes must wait -
2233 		 * not -ENOSPC calculations.
2234 		 */
2235 
2236 		dev_reserve += ca->nr_btree_reserve * 2;
2237 		dev_reserve += ca->mi.nbuckets >> 6; /* copygc reserve */
2238 
2239 		dev_reserve += 1;	/* btree write point */
2240 		dev_reserve += 1;	/* copygc write point */
2241 		dev_reserve += 1;	/* rebalance write point */
2242 
2243 		dev_reserve *= ca->mi.bucket_size;
2244 
2245 		capacity += bucket_to_sector(ca, ca->mi.nbuckets -
2246 					     ca->mi.first_bucket);
2247 
2248 		reserved_sectors += dev_reserve * 2;
2249 
2250 		bucket_size_max = max_t(unsigned, bucket_size_max,
2251 					ca->mi.bucket_size);
2252 	}
2253 
2254 	gc_reserve = c->opts.gc_reserve_bytes
2255 		? c->opts.gc_reserve_bytes >> 9
2256 		: div64_u64(capacity * c->opts.gc_reserve_percent, 100);
2257 
2258 	reserved_sectors = max(gc_reserve, reserved_sectors);
2259 
2260 	reserved_sectors = min(reserved_sectors, capacity);
2261 
2262 	c->capacity = capacity - reserved_sectors;
2263 
2264 	c->bucket_size_max = bucket_size_max;
2265 
2266 	/* Wake up case someone was waiting for buckets */
2267 	closure_wake_up(&c->freelist_wait);
2268 }
2269 
2270 u64 bch2_min_rw_member_capacity(struct bch_fs *c)
2271 {
2272 	u64 ret = U64_MAX;
2273 
2274 	for_each_rw_member(c, ca)
2275 		ret = min(ret, ca->mi.nbuckets * ca->mi.bucket_size);
2276 	return ret;
2277 }
2278 
2279 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
2280 {
2281 	struct open_bucket *ob;
2282 	bool ret = false;
2283 
2284 	for (ob = c->open_buckets;
2285 	     ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
2286 	     ob++) {
2287 		spin_lock(&ob->lock);
2288 		if (ob->valid && !ob->on_partial_list &&
2289 		    ob->dev == ca->dev_idx)
2290 			ret = true;
2291 		spin_unlock(&ob->lock);
2292 	}
2293 
2294 	return ret;
2295 }
2296 
2297 /* device goes ro: */
2298 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
2299 {
2300 	unsigned i;
2301 
2302 	/* First, remove device from allocation groups: */
2303 
2304 	for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
2305 		clear_bit(ca->dev_idx, c->rw_devs[i].d);
2306 
2307 	/*
2308 	 * Capacity is calculated based off of devices in allocation groups:
2309 	 */
2310 	bch2_recalc_capacity(c);
2311 
2312 	bch2_open_buckets_stop(c, ca, false);
2313 
2314 	/*
2315 	 * Wake up threads that were blocked on allocation, so they can notice
2316 	 * the device can no longer be removed and the capacity has changed:
2317 	 */
2318 	closure_wake_up(&c->freelist_wait);
2319 
2320 	/*
2321 	 * journal_res_get() can block waiting for free space in the journal -
2322 	 * it needs to notice there may not be devices to allocate from anymore:
2323 	 */
2324 	wake_up(&c->journal.wait);
2325 
2326 	/* Now wait for any in flight writes: */
2327 
2328 	closure_wait_event(&c->open_buckets_wait,
2329 			   !bch2_dev_has_open_write_point(c, ca));
2330 }
2331 
2332 /* device goes rw: */
2333 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
2334 {
2335 	unsigned i;
2336 
2337 	for (i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
2338 		if (ca->mi.data_allowed & (1 << i))
2339 			set_bit(ca->dev_idx, c->rw_devs[i].d);
2340 }
2341 
2342 void bch2_fs_allocator_background_exit(struct bch_fs *c)
2343 {
2344 	darray_exit(&c->discard_buckets_in_flight);
2345 }
2346 
2347 void bch2_fs_allocator_background_init(struct bch_fs *c)
2348 {
2349 	spin_lock_init(&c->freelist_lock);
2350 	mutex_init(&c->discard_buckets_in_flight_lock);
2351 	INIT_WORK(&c->discard_work, bch2_do_discards_work);
2352 	INIT_WORK(&c->discard_fast_work, bch2_do_discards_fast_work);
2353 	INIT_WORK(&c->invalidate_work, bch2_do_invalidates_work);
2354 }
2355