xref: /linux/fs/bcachefs/alloc_background.c (revision 56770e24f678a84a21f21bcc1ae9cbc1364677bd)
1 // SPDX-License-Identifier: GPL-2.0
2 #include "bcachefs.h"
3 #include "alloc_background.h"
4 #include "alloc_foreground.h"
5 #include "backpointers.h"
6 #include "bkey_buf.h"
7 #include "btree_cache.h"
8 #include "btree_io.h"
9 #include "btree_key_cache.h"
10 #include "btree_update.h"
11 #include "btree_update_interior.h"
12 #include "btree_gc.h"
13 #include "btree_write_buffer.h"
14 #include "buckets.h"
15 #include "buckets_waiting_for_journal.h"
16 #include "clock.h"
17 #include "debug.h"
18 #include "disk_accounting.h"
19 #include "ec.h"
20 #include "error.h"
21 #include "lru.h"
22 #include "recovery.h"
23 #include "trace.h"
24 #include "varint.h"
25 
26 #include <linux/kthread.h>
27 #include <linux/math64.h>
28 #include <linux/random.h>
29 #include <linux/rculist.h>
30 #include <linux/rcupdate.h>
31 #include <linux/sched/task.h>
32 #include <linux/sort.h>
33 #include <linux/jiffies.h>
34 
35 static void bch2_discard_one_bucket_fast(struct bch_dev *, u64);
36 
37 /* Persistent alloc info: */
38 
39 static const unsigned BCH_ALLOC_V1_FIELD_BYTES[] = {
40 #define x(name, bits) [BCH_ALLOC_FIELD_V1_##name] = bits / 8,
41 	BCH_ALLOC_FIELDS_V1()
42 #undef x
43 };
44 
45 struct bkey_alloc_unpacked {
46 	u64		journal_seq;
47 	u8		gen;
48 	u8		oldest_gen;
49 	u8		data_type;
50 	bool		need_discard:1;
51 	bool		need_inc_gen:1;
52 #define x(_name, _bits)	u##_bits _name;
53 	BCH_ALLOC_FIELDS_V2()
54 #undef  x
55 };
56 
alloc_field_v1_get(const struct bch_alloc * a,const void ** p,unsigned field)57 static inline u64 alloc_field_v1_get(const struct bch_alloc *a,
58 				     const void **p, unsigned field)
59 {
60 	unsigned bytes = BCH_ALLOC_V1_FIELD_BYTES[field];
61 	u64 v;
62 
63 	if (!(a->fields & (1 << field)))
64 		return 0;
65 
66 	switch (bytes) {
67 	case 1:
68 		v = *((const u8 *) *p);
69 		break;
70 	case 2:
71 		v = le16_to_cpup(*p);
72 		break;
73 	case 4:
74 		v = le32_to_cpup(*p);
75 		break;
76 	case 8:
77 		v = le64_to_cpup(*p);
78 		break;
79 	default:
80 		BUG();
81 	}
82 
83 	*p += bytes;
84 	return v;
85 }
86 
bch2_alloc_unpack_v1(struct bkey_alloc_unpacked * out,struct bkey_s_c k)87 static void bch2_alloc_unpack_v1(struct bkey_alloc_unpacked *out,
88 				 struct bkey_s_c k)
89 {
90 	const struct bch_alloc *in = bkey_s_c_to_alloc(k).v;
91 	const void *d = in->data;
92 	unsigned idx = 0;
93 
94 	out->gen = in->gen;
95 
96 #define x(_name, _bits) out->_name = alloc_field_v1_get(in, &d, idx++);
97 	BCH_ALLOC_FIELDS_V1()
98 #undef  x
99 }
100 
bch2_alloc_unpack_v2(struct bkey_alloc_unpacked * out,struct bkey_s_c k)101 static int bch2_alloc_unpack_v2(struct bkey_alloc_unpacked *out,
102 				struct bkey_s_c k)
103 {
104 	struct bkey_s_c_alloc_v2 a = bkey_s_c_to_alloc_v2(k);
105 	const u8 *in = a.v->data;
106 	const u8 *end = bkey_val_end(a);
107 	unsigned fieldnr = 0;
108 	int ret;
109 	u64 v;
110 
111 	out->gen	= a.v->gen;
112 	out->oldest_gen	= a.v->oldest_gen;
113 	out->data_type	= a.v->data_type;
114 
115 #define x(_name, _bits)							\
116 	if (fieldnr < a.v->nr_fields) {					\
117 		ret = bch2_varint_decode_fast(in, end, &v);		\
118 		if (ret < 0)						\
119 			return ret;					\
120 		in += ret;						\
121 	} else {							\
122 		v = 0;							\
123 	}								\
124 	out->_name = v;							\
125 	if (v != out->_name)						\
126 		return -1;						\
127 	fieldnr++;
128 
129 	BCH_ALLOC_FIELDS_V2()
130 #undef  x
131 	return 0;
132 }
133 
bch2_alloc_unpack_v3(struct bkey_alloc_unpacked * out,struct bkey_s_c k)134 static int bch2_alloc_unpack_v3(struct bkey_alloc_unpacked *out,
135 				struct bkey_s_c k)
136 {
137 	struct bkey_s_c_alloc_v3 a = bkey_s_c_to_alloc_v3(k);
138 	const u8 *in = a.v->data;
139 	const u8 *end = bkey_val_end(a);
140 	unsigned fieldnr = 0;
141 	int ret;
142 	u64 v;
143 
144 	out->gen	= a.v->gen;
145 	out->oldest_gen	= a.v->oldest_gen;
146 	out->data_type	= a.v->data_type;
147 	out->need_discard = BCH_ALLOC_V3_NEED_DISCARD(a.v);
148 	out->need_inc_gen = BCH_ALLOC_V3_NEED_INC_GEN(a.v);
149 	out->journal_seq = le64_to_cpu(a.v->journal_seq);
150 
151 #define x(_name, _bits)							\
152 	if (fieldnr < a.v->nr_fields) {					\
153 		ret = bch2_varint_decode_fast(in, end, &v);		\
154 		if (ret < 0)						\
155 			return ret;					\
156 		in += ret;						\
157 	} else {							\
158 		v = 0;							\
159 	}								\
160 	out->_name = v;							\
161 	if (v != out->_name)						\
162 		return -1;						\
163 	fieldnr++;
164 
165 	BCH_ALLOC_FIELDS_V2()
166 #undef  x
167 	return 0;
168 }
169 
bch2_alloc_unpack(struct bkey_s_c k)170 static struct bkey_alloc_unpacked bch2_alloc_unpack(struct bkey_s_c k)
171 {
172 	struct bkey_alloc_unpacked ret = { .gen	= 0 };
173 
174 	switch (k.k->type) {
175 	case KEY_TYPE_alloc:
176 		bch2_alloc_unpack_v1(&ret, k);
177 		break;
178 	case KEY_TYPE_alloc_v2:
179 		bch2_alloc_unpack_v2(&ret, k);
180 		break;
181 	case KEY_TYPE_alloc_v3:
182 		bch2_alloc_unpack_v3(&ret, k);
183 		break;
184 	}
185 
186 	return ret;
187 }
188 
bch_alloc_v1_val_u64s(const struct bch_alloc * a)189 static unsigned bch_alloc_v1_val_u64s(const struct bch_alloc *a)
190 {
191 	unsigned i, bytes = offsetof(struct bch_alloc, data);
192 
193 	for (i = 0; i < ARRAY_SIZE(BCH_ALLOC_V1_FIELD_BYTES); i++)
194 		if (a->fields & (1 << i))
195 			bytes += BCH_ALLOC_V1_FIELD_BYTES[i];
196 
197 	return DIV_ROUND_UP(bytes, sizeof(u64));
198 }
199 
bch2_alloc_v1_validate(struct bch_fs * c,struct bkey_s_c k,struct bkey_validate_context from)200 int bch2_alloc_v1_validate(struct bch_fs *c, struct bkey_s_c k,
201 			   struct bkey_validate_context from)
202 {
203 	struct bkey_s_c_alloc a = bkey_s_c_to_alloc(k);
204 	int ret = 0;
205 
206 	/* allow for unknown fields */
207 	bkey_fsck_err_on(bkey_val_u64s(a.k) < bch_alloc_v1_val_u64s(a.v),
208 			 c, alloc_v1_val_size_bad,
209 			 "incorrect value size (%zu < %u)",
210 			 bkey_val_u64s(a.k), bch_alloc_v1_val_u64s(a.v));
211 fsck_err:
212 	return ret;
213 }
214 
bch2_alloc_v2_validate(struct bch_fs * c,struct bkey_s_c k,struct bkey_validate_context from)215 int bch2_alloc_v2_validate(struct bch_fs *c, struct bkey_s_c k,
216 			   struct bkey_validate_context from)
217 {
218 	struct bkey_alloc_unpacked u;
219 	int ret = 0;
220 
221 	bkey_fsck_err_on(bch2_alloc_unpack_v2(&u, k),
222 			 c, alloc_v2_unpack_error,
223 			 "unpack error");
224 fsck_err:
225 	return ret;
226 }
227 
bch2_alloc_v3_validate(struct bch_fs * c,struct bkey_s_c k,struct bkey_validate_context from)228 int bch2_alloc_v3_validate(struct bch_fs *c, struct bkey_s_c k,
229 			   struct bkey_validate_context from)
230 {
231 	struct bkey_alloc_unpacked u;
232 	int ret = 0;
233 
234 	bkey_fsck_err_on(bch2_alloc_unpack_v3(&u, k),
235 			 c, alloc_v3_unpack_error,
236 			 "unpack error");
237 fsck_err:
238 	return ret;
239 }
240 
bch2_alloc_v4_validate(struct bch_fs * c,struct bkey_s_c k,struct bkey_validate_context from)241 int bch2_alloc_v4_validate(struct bch_fs *c, struct bkey_s_c k,
242 			   struct bkey_validate_context from)
243 {
244 	struct bch_alloc_v4 a;
245 	int ret = 0;
246 
247 	bkey_val_copy(&a, bkey_s_c_to_alloc_v4(k));
248 
249 	bkey_fsck_err_on(alloc_v4_u64s_noerror(&a) > bkey_val_u64s(k.k),
250 			 c, alloc_v4_val_size_bad,
251 			 "bad val size (%u > %zu)",
252 			 alloc_v4_u64s_noerror(&a), bkey_val_u64s(k.k));
253 
254 	bkey_fsck_err_on(!BCH_ALLOC_V4_BACKPOINTERS_START(&a) &&
255 			 BCH_ALLOC_V4_NR_BACKPOINTERS(&a),
256 			 c, alloc_v4_backpointers_start_bad,
257 			 "invalid backpointers_start");
258 
259 	bkey_fsck_err_on(alloc_data_type(a, a.data_type) != a.data_type,
260 			 c, alloc_key_data_type_bad,
261 			 "invalid data type (got %u should be %u)",
262 			 a.data_type, alloc_data_type(a, a.data_type));
263 
264 	for (unsigned i = 0; i < 2; i++)
265 		bkey_fsck_err_on(a.io_time[i] > LRU_TIME_MAX,
266 				 c, alloc_key_io_time_bad,
267 				 "invalid io_time[%s]: %llu, max %llu",
268 				 i == READ ? "read" : "write",
269 				 a.io_time[i], LRU_TIME_MAX);
270 
271 	unsigned stripe_sectors = BCH_ALLOC_V4_BACKPOINTERS_START(&a) * sizeof(u64) >
272 		offsetof(struct bch_alloc_v4, stripe_sectors)
273 		? a.stripe_sectors
274 		: 0;
275 
276 	switch (a.data_type) {
277 	case BCH_DATA_free:
278 	case BCH_DATA_need_gc_gens:
279 	case BCH_DATA_need_discard:
280 		bkey_fsck_err_on(stripe_sectors ||
281 				 a.dirty_sectors ||
282 				 a.cached_sectors ||
283 				 a.stripe,
284 				 c, alloc_key_empty_but_have_data,
285 				 "empty data type free but have data %u.%u.%u %u",
286 				 stripe_sectors,
287 				 a.dirty_sectors,
288 				 a.cached_sectors,
289 				 a.stripe);
290 		break;
291 	case BCH_DATA_sb:
292 	case BCH_DATA_journal:
293 	case BCH_DATA_btree:
294 	case BCH_DATA_user:
295 	case BCH_DATA_parity:
296 		bkey_fsck_err_on(!a.dirty_sectors &&
297 				 !stripe_sectors,
298 				 c, alloc_key_dirty_sectors_0,
299 				 "data_type %s but dirty_sectors==0",
300 				 bch2_data_type_str(a.data_type));
301 		break;
302 	case BCH_DATA_cached:
303 		bkey_fsck_err_on(!a.cached_sectors ||
304 				 a.dirty_sectors ||
305 				 stripe_sectors ||
306 				 a.stripe,
307 				 c, alloc_key_cached_inconsistency,
308 				 "data type inconsistency");
309 
310 		bkey_fsck_err_on(!a.io_time[READ] &&
311 				 c->curr_recovery_pass > BCH_RECOVERY_PASS_check_alloc_to_lru_refs,
312 				 c, alloc_key_cached_but_read_time_zero,
313 				 "cached bucket with read_time == 0");
314 		break;
315 	case BCH_DATA_stripe:
316 		break;
317 	}
318 fsck_err:
319 	return ret;
320 }
321 
bch2_alloc_v4_swab(struct bkey_s k)322 void bch2_alloc_v4_swab(struct bkey_s k)
323 {
324 	struct bch_alloc_v4 *a = bkey_s_to_alloc_v4(k).v;
325 
326 	a->journal_seq_nonempty	= swab64(a->journal_seq_nonempty);
327 	a->journal_seq_empty	= swab64(a->journal_seq_empty);
328 	a->flags		= swab32(a->flags);
329 	a->dirty_sectors	= swab32(a->dirty_sectors);
330 	a->cached_sectors	= swab32(a->cached_sectors);
331 	a->io_time[0]		= swab64(a->io_time[0]);
332 	a->io_time[1]		= swab64(a->io_time[1]);
333 	a->stripe		= swab32(a->stripe);
334 	a->nr_external_backpointers = swab32(a->nr_external_backpointers);
335 	a->stripe_sectors	= swab32(a->stripe_sectors);
336 }
337 
bch2_alloc_to_text(struct printbuf * out,struct bch_fs * c,struct bkey_s_c k)338 void bch2_alloc_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
339 {
340 	struct bch_alloc_v4 _a;
341 	const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &_a);
342 	struct bch_dev *ca = c ? bch2_dev_bucket_tryget_noerror(c, k.k->p) : NULL;
343 
344 	prt_newline(out);
345 	printbuf_indent_add(out, 2);
346 
347 	prt_printf(out, "gen %u oldest_gen %u data_type ", a->gen, a->oldest_gen);
348 	bch2_prt_data_type(out, a->data_type);
349 	prt_newline(out);
350 	prt_printf(out, "journal_seq_nonempty %llu\n",	a->journal_seq_nonempty);
351 	prt_printf(out, "journal_seq_empty    %llu\n",	a->journal_seq_empty);
352 	prt_printf(out, "need_discard         %llu\n",	BCH_ALLOC_V4_NEED_DISCARD(a));
353 	prt_printf(out, "need_inc_gen         %llu\n",	BCH_ALLOC_V4_NEED_INC_GEN(a));
354 	prt_printf(out, "dirty_sectors        %u\n",	a->dirty_sectors);
355 	prt_printf(out, "stripe_sectors       %u\n",	a->stripe_sectors);
356 	prt_printf(out, "cached_sectors       %u\n",	a->cached_sectors);
357 	prt_printf(out, "stripe               %u\n",	a->stripe);
358 	prt_printf(out, "stripe_redundancy    %u\n",	a->stripe_redundancy);
359 	prt_printf(out, "io_time[READ]        %llu\n",	a->io_time[READ]);
360 	prt_printf(out, "io_time[WRITE]       %llu\n",	a->io_time[WRITE]);
361 
362 	if (ca)
363 		prt_printf(out, "fragmentation     %llu\n",	alloc_lru_idx_fragmentation(*a, ca));
364 	prt_printf(out, "bp_start          %llu\n", BCH_ALLOC_V4_BACKPOINTERS_START(a));
365 	printbuf_indent_sub(out, 2);
366 
367 	bch2_dev_put(ca);
368 }
369 
__bch2_alloc_to_v4(struct bkey_s_c k,struct bch_alloc_v4 * out)370 void __bch2_alloc_to_v4(struct bkey_s_c k, struct bch_alloc_v4 *out)
371 {
372 	if (k.k->type == KEY_TYPE_alloc_v4) {
373 		void *src, *dst;
374 
375 		*out = *bkey_s_c_to_alloc_v4(k).v;
376 
377 		src = alloc_v4_backpointers(out);
378 		SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
379 		dst = alloc_v4_backpointers(out);
380 
381 		if (src < dst)
382 			memset(src, 0, dst - src);
383 
384 		SET_BCH_ALLOC_V4_NR_BACKPOINTERS(out, 0);
385 	} else {
386 		struct bkey_alloc_unpacked u = bch2_alloc_unpack(k);
387 
388 		*out = (struct bch_alloc_v4) {
389 			.journal_seq_nonempty	= u.journal_seq,
390 			.flags			= u.need_discard,
391 			.gen			= u.gen,
392 			.oldest_gen		= u.oldest_gen,
393 			.data_type		= u.data_type,
394 			.stripe_redundancy	= u.stripe_redundancy,
395 			.dirty_sectors		= u.dirty_sectors,
396 			.cached_sectors		= u.cached_sectors,
397 			.io_time[READ]		= u.read_time,
398 			.io_time[WRITE]		= u.write_time,
399 			.stripe			= u.stripe,
400 		};
401 
402 		SET_BCH_ALLOC_V4_BACKPOINTERS_START(out, BCH_ALLOC_V4_U64s);
403 	}
404 }
405 
406 static noinline struct bkey_i_alloc_v4 *
__bch2_alloc_to_v4_mut(struct btree_trans * trans,struct bkey_s_c k)407 __bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
408 {
409 	struct bkey_i_alloc_v4 *ret;
410 
411 	ret = bch2_trans_kmalloc(trans, max(bkey_bytes(k.k), sizeof(struct bkey_i_alloc_v4)));
412 	if (IS_ERR(ret))
413 		return ret;
414 
415 	if (k.k->type == KEY_TYPE_alloc_v4) {
416 		void *src, *dst;
417 
418 		bkey_reassemble(&ret->k_i, k);
419 
420 		src = alloc_v4_backpointers(&ret->v);
421 		SET_BCH_ALLOC_V4_BACKPOINTERS_START(&ret->v, BCH_ALLOC_V4_U64s);
422 		dst = alloc_v4_backpointers(&ret->v);
423 
424 		if (src < dst)
425 			memset(src, 0, dst - src);
426 
427 		SET_BCH_ALLOC_V4_NR_BACKPOINTERS(&ret->v, 0);
428 		set_alloc_v4_u64s(ret);
429 	} else {
430 		bkey_alloc_v4_init(&ret->k_i);
431 		ret->k.p = k.k->p;
432 		bch2_alloc_to_v4(k, &ret->v);
433 	}
434 	return ret;
435 }
436 
bch2_alloc_to_v4_mut_inlined(struct btree_trans * trans,struct bkey_s_c k)437 static inline struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut_inlined(struct btree_trans *trans, struct bkey_s_c k)
438 {
439 	struct bkey_s_c_alloc_v4 a;
440 
441 	if (likely(k.k->type == KEY_TYPE_alloc_v4) &&
442 	    ((a = bkey_s_c_to_alloc_v4(k), true) &&
443 	     BCH_ALLOC_V4_NR_BACKPOINTERS(a.v) == 0))
444 		return bch2_bkey_make_mut_noupdate_typed(trans, k, alloc_v4);
445 
446 	return __bch2_alloc_to_v4_mut(trans, k);
447 }
448 
bch2_alloc_to_v4_mut(struct btree_trans * trans,struct bkey_s_c k)449 struct bkey_i_alloc_v4 *bch2_alloc_to_v4_mut(struct btree_trans *trans, struct bkey_s_c k)
450 {
451 	return bch2_alloc_to_v4_mut_inlined(trans, k);
452 }
453 
454 struct bkey_i_alloc_v4 *
bch2_trans_start_alloc_update_noupdate(struct btree_trans * trans,struct btree_iter * iter,struct bpos pos)455 bch2_trans_start_alloc_update_noupdate(struct btree_trans *trans, struct btree_iter *iter,
456 				       struct bpos pos)
457 {
458 	struct bkey_s_c k = bch2_bkey_get_iter(trans, iter, BTREE_ID_alloc, pos,
459 					       BTREE_ITER_with_updates|
460 					       BTREE_ITER_cached|
461 					       BTREE_ITER_intent);
462 	int ret = bkey_err(k);
463 	if (unlikely(ret))
464 		return ERR_PTR(ret);
465 
466 	struct bkey_i_alloc_v4 *a = bch2_alloc_to_v4_mut_inlined(trans, k);
467 	ret = PTR_ERR_OR_ZERO(a);
468 	if (unlikely(ret))
469 		goto err;
470 	return a;
471 err:
472 	bch2_trans_iter_exit(trans, iter);
473 	return ERR_PTR(ret);
474 }
475 
476 __flatten
bch2_trans_start_alloc_update(struct btree_trans * trans,struct bpos pos,enum btree_iter_update_trigger_flags flags)477 struct bkey_i_alloc_v4 *bch2_trans_start_alloc_update(struct btree_trans *trans, struct bpos pos,
478 						      enum btree_iter_update_trigger_flags flags)
479 {
480 	struct btree_iter iter;
481 	struct bkey_i_alloc_v4 *a = bch2_trans_start_alloc_update_noupdate(trans, &iter, pos);
482 	int ret = PTR_ERR_OR_ZERO(a);
483 	if (ret)
484 		return ERR_PTR(ret);
485 
486 	ret = bch2_trans_update(trans, &iter, &a->k_i, flags);
487 	bch2_trans_iter_exit(trans, &iter);
488 	return unlikely(ret) ? ERR_PTR(ret) : a;
489 }
490 
alloc_gens_pos(struct bpos pos,unsigned * offset)491 static struct bpos alloc_gens_pos(struct bpos pos, unsigned *offset)
492 {
493 	*offset = pos.offset & KEY_TYPE_BUCKET_GENS_MASK;
494 
495 	pos.offset >>= KEY_TYPE_BUCKET_GENS_BITS;
496 	return pos;
497 }
498 
bucket_gens_pos_to_alloc(struct bpos pos,unsigned offset)499 static struct bpos bucket_gens_pos_to_alloc(struct bpos pos, unsigned offset)
500 {
501 	pos.offset <<= KEY_TYPE_BUCKET_GENS_BITS;
502 	pos.offset += offset;
503 	return pos;
504 }
505 
alloc_gen(struct bkey_s_c k,unsigned offset)506 static unsigned alloc_gen(struct bkey_s_c k, unsigned offset)
507 {
508 	return k.k->type == KEY_TYPE_bucket_gens
509 		? bkey_s_c_to_bucket_gens(k).v->gens[offset]
510 		: 0;
511 }
512 
bch2_bucket_gens_validate(struct bch_fs * c,struct bkey_s_c k,struct bkey_validate_context from)513 int bch2_bucket_gens_validate(struct bch_fs *c, struct bkey_s_c k,
514 			      struct bkey_validate_context from)
515 {
516 	int ret = 0;
517 
518 	bkey_fsck_err_on(bkey_val_bytes(k.k) != sizeof(struct bch_bucket_gens),
519 			 c, bucket_gens_val_size_bad,
520 			 "bad val size (%zu != %zu)",
521 			 bkey_val_bytes(k.k), sizeof(struct bch_bucket_gens));
522 fsck_err:
523 	return ret;
524 }
525 
bch2_bucket_gens_to_text(struct printbuf * out,struct bch_fs * c,struct bkey_s_c k)526 void bch2_bucket_gens_to_text(struct printbuf *out, struct bch_fs *c, struct bkey_s_c k)
527 {
528 	struct bkey_s_c_bucket_gens g = bkey_s_c_to_bucket_gens(k);
529 	unsigned i;
530 
531 	for (i = 0; i < ARRAY_SIZE(g.v->gens); i++) {
532 		if (i)
533 			prt_char(out, ' ');
534 		prt_printf(out, "%u", g.v->gens[i]);
535 	}
536 }
537 
bch2_bucket_gens_init(struct bch_fs * c)538 int bch2_bucket_gens_init(struct bch_fs *c)
539 {
540 	struct btree_trans *trans = bch2_trans_get(c);
541 	struct bkey_i_bucket_gens g;
542 	bool have_bucket_gens_key = false;
543 	int ret;
544 
545 	ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
546 				 BTREE_ITER_prefetch, k, ({
547 		/*
548 		 * Not a fsck error because this is checked/repaired by
549 		 * bch2_check_alloc_key() which runs later:
550 		 */
551 		if (!bch2_dev_bucket_exists(c, k.k->p))
552 			continue;
553 
554 		struct bch_alloc_v4 a;
555 		u8 gen = bch2_alloc_to_v4(k, &a)->gen;
556 		unsigned offset;
557 		struct bpos pos = alloc_gens_pos(iter.pos, &offset);
558 		int ret2 = 0;
559 
560 		if (have_bucket_gens_key && !bkey_eq(g.k.p, pos)) {
561 			ret2 =  bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0) ?:
562 				bch2_trans_commit(trans, NULL, NULL, BCH_TRANS_COMMIT_no_enospc);
563 			if (ret2)
564 				goto iter_err;
565 			have_bucket_gens_key = false;
566 		}
567 
568 		if (!have_bucket_gens_key) {
569 			bkey_bucket_gens_init(&g.k_i);
570 			g.k.p = pos;
571 			have_bucket_gens_key = true;
572 		}
573 
574 		g.v.gens[offset] = gen;
575 iter_err:
576 		ret2;
577 	}));
578 
579 	if (have_bucket_gens_key && !ret)
580 		ret = commit_do(trans, NULL, NULL,
581 				BCH_TRANS_COMMIT_no_enospc,
582 			bch2_btree_insert_trans(trans, BTREE_ID_bucket_gens, &g.k_i, 0));
583 
584 	bch2_trans_put(trans);
585 
586 	bch_err_fn(c, ret);
587 	return ret;
588 }
589 
bch2_alloc_read(struct bch_fs * c)590 int bch2_alloc_read(struct bch_fs *c)
591 {
592 	down_read(&c->state_lock);
593 
594 	struct btree_trans *trans = bch2_trans_get(c);
595 	struct bch_dev *ca = NULL;
596 	int ret;
597 
598 	if (c->sb.version_upgrade_complete >= bcachefs_metadata_version_bucket_gens) {
599 		ret = for_each_btree_key(trans, iter, BTREE_ID_bucket_gens, POS_MIN,
600 					 BTREE_ITER_prefetch, k, ({
601 			u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
602 			u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
603 
604 			if (k.k->type != KEY_TYPE_bucket_gens)
605 				continue;
606 
607 			ca = bch2_dev_iterate(c, ca, k.k->p.inode);
608 			/*
609 			 * Not a fsck error because this is checked/repaired by
610 			 * bch2_check_alloc_key() which runs later:
611 			 */
612 			if (!ca) {
613 				bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode + 1, 0));
614 				continue;
615 			}
616 
617 			const struct bch_bucket_gens *g = bkey_s_c_to_bucket_gens(k).v;
618 
619 			for (u64 b = max_t(u64, ca->mi.first_bucket, start);
620 			     b < min_t(u64, ca->mi.nbuckets, end);
621 			     b++)
622 				*bucket_gen(ca, b) = g->gens[b & KEY_TYPE_BUCKET_GENS_MASK];
623 			0;
624 		}));
625 	} else {
626 		ret = for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
627 					 BTREE_ITER_prefetch, k, ({
628 			ca = bch2_dev_iterate(c, ca, k.k->p.inode);
629 			/*
630 			 * Not a fsck error because this is checked/repaired by
631 			 * bch2_check_alloc_key() which runs later:
632 			 */
633 			if (!ca) {
634 				bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode + 1, 0));
635 				continue;
636 			}
637 
638 			if (k.k->p.offset < ca->mi.first_bucket) {
639 				bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode, ca->mi.first_bucket));
640 				continue;
641 			}
642 
643 			if (k.k->p.offset >= ca->mi.nbuckets) {
644 				bch2_btree_iter_set_pos(trans, &iter, POS(k.k->p.inode + 1, 0));
645 				continue;
646 			}
647 
648 			struct bch_alloc_v4 a;
649 			*bucket_gen(ca, k.k->p.offset) = bch2_alloc_to_v4(k, &a)->gen;
650 			0;
651 		}));
652 	}
653 
654 	bch2_dev_put(ca);
655 	bch2_trans_put(trans);
656 
657 	up_read(&c->state_lock);
658 	bch_err_fn(c, ret);
659 	return ret;
660 }
661 
662 /* Free space/discard btree: */
663 
__need_discard_or_freespace_err(struct btree_trans * trans,struct bkey_s_c alloc_k,bool set,bool discard,bool repair)664 static int __need_discard_or_freespace_err(struct btree_trans *trans,
665 					   struct bkey_s_c alloc_k,
666 					   bool set, bool discard, bool repair)
667 {
668 	struct bch_fs *c = trans->c;
669 	enum bch_fsck_flags flags = FSCK_CAN_IGNORE|(repair ? FSCK_CAN_FIX : 0);
670 	enum bch_sb_error_id err_id = discard
671 		? BCH_FSCK_ERR_need_discard_key_wrong
672 		: BCH_FSCK_ERR_freespace_key_wrong;
673 	enum btree_id btree = discard ? BTREE_ID_need_discard : BTREE_ID_freespace;
674 	struct printbuf buf = PRINTBUF;
675 
676 	bch2_bkey_val_to_text(&buf, c, alloc_k);
677 
678 	int ret = __bch2_fsck_err(NULL, trans, flags, err_id,
679 				  "bucket incorrectly %sset in %s btree\n%s",
680 				  set ? "" : "un",
681 				  bch2_btree_id_str(btree),
682 				  buf.buf);
683 	if (ret == -BCH_ERR_fsck_ignore ||
684 	    ret == -BCH_ERR_fsck_errors_not_fixed)
685 		ret = 0;
686 
687 	printbuf_exit(&buf);
688 	return ret;
689 }
690 
691 #define need_discard_or_freespace_err(...)		\
692 	fsck_err_wrap(__need_discard_or_freespace_err(__VA_ARGS__))
693 
694 #define need_discard_or_freespace_err_on(cond, ...)		\
695 	(unlikely(cond) ?  need_discard_or_freespace_err(__VA_ARGS__) : false)
696 
bch2_bucket_do_index(struct btree_trans * trans,struct bch_dev * ca,struct bkey_s_c alloc_k,const struct bch_alloc_v4 * a,bool set)697 static int bch2_bucket_do_index(struct btree_trans *trans,
698 				struct bch_dev *ca,
699 				struct bkey_s_c alloc_k,
700 				const struct bch_alloc_v4 *a,
701 				bool set)
702 {
703 	enum btree_id btree;
704 	struct bpos pos;
705 
706 	if (a->data_type != BCH_DATA_free &&
707 	    a->data_type != BCH_DATA_need_discard)
708 		return 0;
709 
710 	switch (a->data_type) {
711 	case BCH_DATA_free:
712 		btree = BTREE_ID_freespace;
713 		pos = alloc_freespace_pos(alloc_k.k->p, *a);
714 		break;
715 	case BCH_DATA_need_discard:
716 		btree = BTREE_ID_need_discard;
717 		pos = alloc_k.k->p;
718 		break;
719 	default:
720 		return 0;
721 	}
722 
723 	struct btree_iter iter;
724 	struct bkey_s_c old = bch2_bkey_get_iter(trans, &iter, btree, pos, BTREE_ITER_intent);
725 	int ret = bkey_err(old);
726 	if (ret)
727 		return ret;
728 
729 	need_discard_or_freespace_err_on(ca->mi.freespace_initialized &&
730 					 !old.k->type != set,
731 					 trans, alloc_k, set,
732 					 btree == BTREE_ID_need_discard, false);
733 
734 	ret = bch2_btree_bit_mod_iter(trans, &iter, set);
735 fsck_err:
736 	bch2_trans_iter_exit(trans, &iter);
737 	return ret;
738 }
739 
bch2_bucket_gen_update(struct btree_trans * trans,struct bpos bucket,u8 gen)740 static noinline int bch2_bucket_gen_update(struct btree_trans *trans,
741 					   struct bpos bucket, u8 gen)
742 {
743 	struct btree_iter iter;
744 	unsigned offset;
745 	struct bpos pos = alloc_gens_pos(bucket, &offset);
746 	struct bkey_i_bucket_gens *g;
747 	struct bkey_s_c k;
748 	int ret;
749 
750 	g = bch2_trans_kmalloc(trans, sizeof(*g));
751 	ret = PTR_ERR_OR_ZERO(g);
752 	if (ret)
753 		return ret;
754 
755 	k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_bucket_gens, pos,
756 			       BTREE_ITER_intent|
757 			       BTREE_ITER_with_updates);
758 	ret = bkey_err(k);
759 	if (ret)
760 		return ret;
761 
762 	if (k.k->type != KEY_TYPE_bucket_gens) {
763 		bkey_bucket_gens_init(&g->k_i);
764 		g->k.p = iter.pos;
765 	} else {
766 		bkey_reassemble(&g->k_i, k);
767 	}
768 
769 	g->v.gens[offset] = gen;
770 
771 	ret = bch2_trans_update(trans, &iter, &g->k_i, 0);
772 	bch2_trans_iter_exit(trans, &iter);
773 	return ret;
774 }
775 
bch2_dev_data_type_accounting_mod(struct btree_trans * trans,struct bch_dev * ca,enum bch_data_type data_type,s64 delta_buckets,s64 delta_sectors,s64 delta_fragmented,unsigned flags)776 static inline int bch2_dev_data_type_accounting_mod(struct btree_trans *trans, struct bch_dev *ca,
777 						    enum bch_data_type data_type,
778 						    s64 delta_buckets,
779 						    s64 delta_sectors,
780 						    s64 delta_fragmented, unsigned flags)
781 {
782 	s64 d[3] = { delta_buckets, delta_sectors, delta_fragmented };
783 
784 	return bch2_disk_accounting_mod2(trans, flags & BTREE_TRIGGER_gc,
785 					 d, dev_data_type,
786 					 .dev		= ca->dev_idx,
787 					 .data_type	= data_type);
788 }
789 
bch2_alloc_key_to_dev_counters(struct btree_trans * trans,struct bch_dev * ca,const struct bch_alloc_v4 * old,const struct bch_alloc_v4 * new,unsigned flags)790 int bch2_alloc_key_to_dev_counters(struct btree_trans *trans, struct bch_dev *ca,
791 				   const struct bch_alloc_v4 *old,
792 				   const struct bch_alloc_v4 *new,
793 				   unsigned flags)
794 {
795 	s64 old_sectors = bch2_bucket_sectors(*old);
796 	s64 new_sectors = bch2_bucket_sectors(*new);
797 	if (old->data_type != new->data_type) {
798 		int ret = bch2_dev_data_type_accounting_mod(trans, ca, new->data_type,
799 				 1,  new_sectors,  bch2_bucket_sectors_fragmented(ca, *new), flags) ?:
800 			  bch2_dev_data_type_accounting_mod(trans, ca, old->data_type,
801 				-1, -old_sectors, -bch2_bucket_sectors_fragmented(ca, *old), flags);
802 		if (ret)
803 			return ret;
804 	} else if (old_sectors != new_sectors) {
805 		int ret = bch2_dev_data_type_accounting_mod(trans, ca, new->data_type,
806 					 0,
807 					 new_sectors - old_sectors,
808 					 bch2_bucket_sectors_fragmented(ca, *new) -
809 					 bch2_bucket_sectors_fragmented(ca, *old), flags);
810 		if (ret)
811 			return ret;
812 	}
813 
814 	s64 old_unstriped = bch2_bucket_sectors_unstriped(*old);
815 	s64 new_unstriped = bch2_bucket_sectors_unstriped(*new);
816 	if (old_unstriped != new_unstriped) {
817 		int ret = bch2_dev_data_type_accounting_mod(trans, ca, BCH_DATA_unstriped,
818 					 !!new_unstriped - !!old_unstriped,
819 					 new_unstriped - old_unstriped,
820 					 0,
821 					 flags);
822 		if (ret)
823 			return ret;
824 	}
825 
826 	return 0;
827 }
828 
bch2_trigger_alloc(struct btree_trans * trans,enum btree_id btree,unsigned level,struct bkey_s_c old,struct bkey_s new,enum btree_iter_update_trigger_flags flags)829 int bch2_trigger_alloc(struct btree_trans *trans,
830 		       enum btree_id btree, unsigned level,
831 		       struct bkey_s_c old, struct bkey_s new,
832 		       enum btree_iter_update_trigger_flags flags)
833 {
834 	struct bch_fs *c = trans->c;
835 	struct printbuf buf = PRINTBUF;
836 	int ret = 0;
837 
838 	struct bch_dev *ca = bch2_dev_bucket_tryget(c, new.k->p);
839 	if (!ca)
840 		return -BCH_ERR_trigger_alloc;
841 
842 	struct bch_alloc_v4 old_a_convert;
843 	const struct bch_alloc_v4 *old_a = bch2_alloc_to_v4(old, &old_a_convert);
844 
845 	struct bch_alloc_v4 *new_a;
846 	if (likely(new.k->type == KEY_TYPE_alloc_v4)) {
847 		new_a = bkey_s_to_alloc_v4(new).v;
848 	} else {
849 		BUG_ON(!(flags & (BTREE_TRIGGER_gc|BTREE_TRIGGER_check_repair)));
850 
851 		struct bkey_i_alloc_v4 *new_ka = bch2_alloc_to_v4_mut_inlined(trans, new.s_c);
852 		ret = PTR_ERR_OR_ZERO(new_ka);
853 		if (unlikely(ret))
854 			goto err;
855 		new_a = &new_ka->v;
856 	}
857 
858 	if (flags & BTREE_TRIGGER_transactional) {
859 		alloc_data_type_set(new_a, new_a->data_type);
860 
861 		int is_empty_delta = (int) data_type_is_empty(new_a->data_type) -
862 				     (int) data_type_is_empty(old_a->data_type);
863 
864 		if (is_empty_delta < 0) {
865 			new_a->io_time[READ] = bch2_current_io_time(c, READ);
866 			new_a->io_time[WRITE]= bch2_current_io_time(c, WRITE);
867 			SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, true);
868 			SET_BCH_ALLOC_V4_NEED_DISCARD(new_a, true);
869 		}
870 
871 		if (data_type_is_empty(new_a->data_type) &&
872 		    BCH_ALLOC_V4_NEED_INC_GEN(new_a) &&
873 		    !bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset)) {
874 			if (new_a->oldest_gen == new_a->gen &&
875 			    !bch2_bucket_sectors_total(*new_a))
876 				new_a->oldest_gen++;
877 			new_a->gen++;
878 			SET_BCH_ALLOC_V4_NEED_INC_GEN(new_a, false);
879 			alloc_data_type_set(new_a, new_a->data_type);
880 		}
881 
882 		if (old_a->data_type != new_a->data_type ||
883 		    (new_a->data_type == BCH_DATA_free &&
884 		     alloc_freespace_genbits(*old_a) != alloc_freespace_genbits(*new_a))) {
885 			ret =   bch2_bucket_do_index(trans, ca, old, old_a, false) ?:
886 				bch2_bucket_do_index(trans, ca, new.s_c, new_a, true);
887 			if (ret)
888 				goto err;
889 		}
890 
891 		if (new_a->data_type == BCH_DATA_cached &&
892 		    !new_a->io_time[READ])
893 			new_a->io_time[READ] = bch2_current_io_time(c, READ);
894 
895 		ret = bch2_lru_change(trans, new.k->p.inode,
896 				      bucket_to_u64(new.k->p),
897 				      alloc_lru_idx_read(*old_a),
898 				      alloc_lru_idx_read(*new_a));
899 		if (ret)
900 			goto err;
901 
902 		ret = bch2_lru_change(trans,
903 				      BCH_LRU_BUCKET_FRAGMENTATION,
904 				      bucket_to_u64(new.k->p),
905 				      alloc_lru_idx_fragmentation(*old_a, ca),
906 				      alloc_lru_idx_fragmentation(*new_a, ca));
907 		if (ret)
908 			goto err;
909 
910 		if (old_a->gen != new_a->gen) {
911 			ret = bch2_bucket_gen_update(trans, new.k->p, new_a->gen);
912 			if (ret)
913 				goto err;
914 		}
915 
916 		if ((flags & BTREE_TRIGGER_bucket_invalidate) &&
917 		    old_a->cached_sectors) {
918 			ret = bch2_mod_dev_cached_sectors(trans, ca->dev_idx,
919 					 -((s64) old_a->cached_sectors),
920 					 flags & BTREE_TRIGGER_gc);
921 			if (ret)
922 				goto err;
923 		}
924 
925 		ret = bch2_alloc_key_to_dev_counters(trans, ca, old_a, new_a, flags);
926 		if (ret)
927 			goto err;
928 	}
929 
930 	if ((flags & BTREE_TRIGGER_atomic) && (flags & BTREE_TRIGGER_insert)) {
931 		u64 transaction_seq = trans->journal_res.seq;
932 		BUG_ON(!transaction_seq);
933 
934 		if (log_fsck_err_on(transaction_seq && new_a->journal_seq_nonempty > transaction_seq,
935 				    trans, alloc_key_journal_seq_in_future,
936 				    "bucket journal seq in future (currently at %llu)\n%s",
937 				    journal_cur_seq(&c->journal),
938 				    (bch2_bkey_val_to_text(&buf, c, new.s_c), buf.buf)))
939 			new_a->journal_seq_nonempty = transaction_seq;
940 
941 		int is_empty_delta = (int) data_type_is_empty(new_a->data_type) -
942 				     (int) data_type_is_empty(old_a->data_type);
943 
944 		/*
945 		 * Record journal sequence number of empty -> nonempty transition:
946 		 * Note that there may be multiple empty -> nonempty
947 		 * transitions, data in a bucket may be overwritten while we're
948 		 * still writing to it - so be careful to only record the first:
949 		 * */
950 		if (is_empty_delta < 0 &&
951 		    new_a->journal_seq_empty <= c->journal.flushed_seq_ondisk) {
952 			new_a->journal_seq_nonempty	= transaction_seq;
953 			new_a->journal_seq_empty	= 0;
954 		}
955 
956 		/*
957 		 * Bucket becomes empty: mark it as waiting for a journal flush,
958 		 * unless updates since empty -> nonempty transition were never
959 		 * flushed - we may need to ask the journal not to flush
960 		 * intermediate sequence numbers:
961 		 */
962 		if (is_empty_delta > 0) {
963 			if (new_a->journal_seq_nonempty == transaction_seq ||
964 			    bch2_journal_noflush_seq(&c->journal,
965 						     new_a->journal_seq_nonempty,
966 						     transaction_seq)) {
967 				new_a->journal_seq_nonempty = new_a->journal_seq_empty = 0;
968 			} else {
969 				new_a->journal_seq_empty = transaction_seq;
970 
971 				ret = bch2_set_bucket_needs_journal_commit(&c->buckets_waiting_for_journal,
972 									   c->journal.flushed_seq_ondisk,
973 									   new.k->p.inode, new.k->p.offset,
974 									   transaction_seq);
975 				if (bch2_fs_fatal_err_on(ret, c,
976 						"setting bucket_needs_journal_commit: %s",
977 						bch2_err_str(ret)))
978 					goto err;
979 			}
980 		}
981 
982 		if (new_a->gen != old_a->gen) {
983 			rcu_read_lock();
984 			u8 *gen = bucket_gen(ca, new.k->p.offset);
985 			if (unlikely(!gen)) {
986 				rcu_read_unlock();
987 				goto invalid_bucket;
988 			}
989 			*gen = new_a->gen;
990 			rcu_read_unlock();
991 		}
992 
993 #define eval_state(_a, expr)		({ const struct bch_alloc_v4 *a = _a; expr; })
994 #define statechange(expr)		!eval_state(old_a, expr) && eval_state(new_a, expr)
995 #define bucket_flushed(a)		(a->journal_seq_empty <= c->journal.flushed_seq_ondisk)
996 
997 		if (statechange(a->data_type == BCH_DATA_free) &&
998 		    bucket_flushed(new_a))
999 			closure_wake_up(&c->freelist_wait);
1000 
1001 		if (statechange(a->data_type == BCH_DATA_need_discard) &&
1002 		    !bch2_bucket_is_open_safe(c, new.k->p.inode, new.k->p.offset) &&
1003 		    bucket_flushed(new_a))
1004 			bch2_discard_one_bucket_fast(ca, new.k->p.offset);
1005 
1006 		if (statechange(a->data_type == BCH_DATA_cached) &&
1007 		    !bch2_bucket_is_open(c, new.k->p.inode, new.k->p.offset) &&
1008 		    should_invalidate_buckets(ca, bch2_dev_usage_read(ca)))
1009 			bch2_dev_do_invalidates(ca);
1010 
1011 		if (statechange(a->data_type == BCH_DATA_need_gc_gens))
1012 			bch2_gc_gens_async(c);
1013 	}
1014 
1015 	if ((flags & BTREE_TRIGGER_gc) && (flags & BTREE_TRIGGER_insert)) {
1016 		rcu_read_lock();
1017 		struct bucket *g = gc_bucket(ca, new.k->p.offset);
1018 		if (unlikely(!g)) {
1019 			rcu_read_unlock();
1020 			goto invalid_bucket;
1021 		}
1022 		g->gen_valid	= 1;
1023 		g->gen		= new_a->gen;
1024 		rcu_read_unlock();
1025 	}
1026 err:
1027 fsck_err:
1028 	printbuf_exit(&buf);
1029 	bch2_dev_put(ca);
1030 	return ret;
1031 invalid_bucket:
1032 	bch2_fs_inconsistent(c, "reference to invalid bucket\n%s",
1033 			     (bch2_bkey_val_to_text(&buf, c, new.s_c), buf.buf));
1034 	ret = -BCH_ERR_trigger_alloc;
1035 	goto err;
1036 }
1037 
1038 /*
1039  * This synthesizes deleted extents for holes, similar to BTREE_ITER_slots for
1040  * extents style btrees, but works on non-extents btrees:
1041  */
bch2_get_key_or_hole(struct btree_trans * trans,struct btree_iter * iter,struct bpos end,struct bkey * hole)1042 static struct bkey_s_c bch2_get_key_or_hole(struct btree_trans *trans, struct btree_iter *iter,
1043 					    struct bpos end, struct bkey *hole)
1044 {
1045 	struct bkey_s_c k = bch2_btree_iter_peek_slot(trans, iter);
1046 
1047 	if (bkey_err(k))
1048 		return k;
1049 
1050 	if (k.k->type) {
1051 		return k;
1052 	} else {
1053 		struct btree_iter iter2;
1054 		struct bpos next;
1055 
1056 		bch2_trans_copy_iter(trans, &iter2, iter);
1057 
1058 		struct btree_path *path = btree_iter_path(trans, iter);
1059 		if (!bpos_eq(path->l[0].b->key.k.p, SPOS_MAX))
1060 			end = bkey_min(end, bpos_nosnap_successor(path->l[0].b->key.k.p));
1061 
1062 		end = bkey_min(end, POS(iter->pos.inode, iter->pos.offset + U32_MAX - 1));
1063 
1064 		/*
1065 		 * btree node min/max is a closed interval, upto takes a half
1066 		 * open interval:
1067 		 */
1068 		k = bch2_btree_iter_peek_max(trans, &iter2, end);
1069 		next = iter2.pos;
1070 		bch2_trans_iter_exit(trans, &iter2);
1071 
1072 		BUG_ON(next.offset >= iter->pos.offset + U32_MAX);
1073 
1074 		if (bkey_err(k))
1075 			return k;
1076 
1077 		bkey_init(hole);
1078 		hole->p = iter->pos;
1079 
1080 		bch2_key_resize(hole, next.offset - iter->pos.offset);
1081 		return (struct bkey_s_c) { hole, NULL };
1082 	}
1083 }
1084 
next_bucket(struct bch_fs * c,struct bch_dev ** ca,struct bpos * bucket)1085 static bool next_bucket(struct bch_fs *c, struct bch_dev **ca, struct bpos *bucket)
1086 {
1087 	if (*ca) {
1088 		if (bucket->offset < (*ca)->mi.first_bucket)
1089 			bucket->offset = (*ca)->mi.first_bucket;
1090 
1091 		if (bucket->offset < (*ca)->mi.nbuckets)
1092 			return true;
1093 
1094 		bch2_dev_put(*ca);
1095 		*ca = NULL;
1096 		bucket->inode++;
1097 		bucket->offset = 0;
1098 	}
1099 
1100 	rcu_read_lock();
1101 	*ca = __bch2_next_dev_idx(c, bucket->inode, NULL);
1102 	if (*ca) {
1103 		*bucket = POS((*ca)->dev_idx, (*ca)->mi.first_bucket);
1104 		bch2_dev_get(*ca);
1105 	}
1106 	rcu_read_unlock();
1107 
1108 	return *ca != NULL;
1109 }
1110 
bch2_get_key_or_real_bucket_hole(struct btree_trans * trans,struct btree_iter * iter,struct bch_dev ** ca,struct bkey * hole)1111 static struct bkey_s_c bch2_get_key_or_real_bucket_hole(struct btree_trans *trans,
1112 							struct btree_iter *iter,
1113 							struct bch_dev **ca, struct bkey *hole)
1114 {
1115 	struct bch_fs *c = trans->c;
1116 	struct bkey_s_c k;
1117 again:
1118 	k = bch2_get_key_or_hole(trans, iter, POS_MAX, hole);
1119 	if (bkey_err(k))
1120 		return k;
1121 
1122 	*ca = bch2_dev_iterate_noerror(c, *ca, k.k->p.inode);
1123 
1124 	if (!k.k->type) {
1125 		struct bpos hole_start = bkey_start_pos(k.k);
1126 
1127 		if (!*ca || !bucket_valid(*ca, hole_start.offset)) {
1128 			if (!next_bucket(c, ca, &hole_start))
1129 				return bkey_s_c_null;
1130 
1131 			bch2_btree_iter_set_pos(trans, iter, hole_start);
1132 			goto again;
1133 		}
1134 
1135 		if (k.k->p.offset > (*ca)->mi.nbuckets)
1136 			bch2_key_resize(hole, (*ca)->mi.nbuckets - hole_start.offset);
1137 	}
1138 
1139 	return k;
1140 }
1141 
1142 static noinline_for_stack
bch2_check_alloc_key(struct btree_trans * trans,struct bkey_s_c alloc_k,struct btree_iter * alloc_iter,struct btree_iter * discard_iter,struct btree_iter * freespace_iter,struct btree_iter * bucket_gens_iter)1143 int bch2_check_alloc_key(struct btree_trans *trans,
1144 			 struct bkey_s_c alloc_k,
1145 			 struct btree_iter *alloc_iter,
1146 			 struct btree_iter *discard_iter,
1147 			 struct btree_iter *freespace_iter,
1148 			 struct btree_iter *bucket_gens_iter)
1149 {
1150 	struct bch_fs *c = trans->c;
1151 	struct bch_alloc_v4 a_convert;
1152 	const struct bch_alloc_v4 *a;
1153 	unsigned gens_offset;
1154 	struct bkey_s_c k;
1155 	struct printbuf buf = PRINTBUF;
1156 	int ret = 0;
1157 
1158 	struct bch_dev *ca = bch2_dev_bucket_tryget_noerror(c, alloc_k.k->p);
1159 	if (fsck_err_on(!ca,
1160 			trans, alloc_key_to_missing_dev_bucket,
1161 			"alloc key for invalid device:bucket %llu:%llu",
1162 			alloc_k.k->p.inode, alloc_k.k->p.offset))
1163 		ret = bch2_btree_delete_at(trans, alloc_iter, 0);
1164 	if (!ca)
1165 		return ret;
1166 
1167 	if (!ca->mi.freespace_initialized)
1168 		goto out;
1169 
1170 	a = bch2_alloc_to_v4(alloc_k, &a_convert);
1171 
1172 	bch2_btree_iter_set_pos(trans, discard_iter, alloc_k.k->p);
1173 	k = bch2_btree_iter_peek_slot(trans, discard_iter);
1174 	ret = bkey_err(k);
1175 	if (ret)
1176 		goto err;
1177 
1178 	bool is_discarded = a->data_type == BCH_DATA_need_discard;
1179 	if (need_discard_or_freespace_err_on(!!k.k->type != is_discarded,
1180 					     trans, alloc_k, !is_discarded, true, true)) {
1181 		ret = bch2_btree_bit_mod_iter(trans, discard_iter, is_discarded);
1182 		if (ret)
1183 			goto err;
1184 	}
1185 
1186 	bch2_btree_iter_set_pos(trans, freespace_iter, alloc_freespace_pos(alloc_k.k->p, *a));
1187 	k = bch2_btree_iter_peek_slot(trans, freespace_iter);
1188 	ret = bkey_err(k);
1189 	if (ret)
1190 		goto err;
1191 
1192 	bool is_free = a->data_type == BCH_DATA_free;
1193 	if (need_discard_or_freespace_err_on(!!k.k->type != is_free,
1194 					     trans, alloc_k, !is_free, false, true)) {
1195 		ret = bch2_btree_bit_mod_iter(trans, freespace_iter, is_free);
1196 		if (ret)
1197 			goto err;
1198 	}
1199 
1200 	bch2_btree_iter_set_pos(trans, bucket_gens_iter, alloc_gens_pos(alloc_k.k->p, &gens_offset));
1201 	k = bch2_btree_iter_peek_slot(trans, bucket_gens_iter);
1202 	ret = bkey_err(k);
1203 	if (ret)
1204 		goto err;
1205 
1206 	if (fsck_err_on(a->gen != alloc_gen(k, gens_offset),
1207 			trans, bucket_gens_key_wrong,
1208 			"incorrect gen in bucket_gens btree (got %u should be %u)\n%s",
1209 			alloc_gen(k, gens_offset), a->gen,
1210 			(printbuf_reset(&buf),
1211 			 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
1212 		struct bkey_i_bucket_gens *g =
1213 			bch2_trans_kmalloc(trans, sizeof(*g));
1214 
1215 		ret = PTR_ERR_OR_ZERO(g);
1216 		if (ret)
1217 			goto err;
1218 
1219 		if (k.k->type == KEY_TYPE_bucket_gens) {
1220 			bkey_reassemble(&g->k_i, k);
1221 		} else {
1222 			bkey_bucket_gens_init(&g->k_i);
1223 			g->k.p = alloc_gens_pos(alloc_k.k->p, &gens_offset);
1224 		}
1225 
1226 		g->v.gens[gens_offset] = a->gen;
1227 
1228 		ret = bch2_trans_update(trans, bucket_gens_iter, &g->k_i, 0);
1229 		if (ret)
1230 			goto err;
1231 	}
1232 out:
1233 err:
1234 fsck_err:
1235 	bch2_dev_put(ca);
1236 	printbuf_exit(&buf);
1237 	return ret;
1238 }
1239 
1240 static noinline_for_stack
bch2_check_alloc_hole_freespace(struct btree_trans * trans,struct bch_dev * ca,struct bpos start,struct bpos * end,struct btree_iter * freespace_iter)1241 int bch2_check_alloc_hole_freespace(struct btree_trans *trans,
1242 				    struct bch_dev *ca,
1243 				    struct bpos start,
1244 				    struct bpos *end,
1245 				    struct btree_iter *freespace_iter)
1246 {
1247 	struct bkey_s_c k;
1248 	struct printbuf buf = PRINTBUF;
1249 	int ret;
1250 
1251 	if (!ca->mi.freespace_initialized)
1252 		return 0;
1253 
1254 	bch2_btree_iter_set_pos(trans, freespace_iter, start);
1255 
1256 	k = bch2_btree_iter_peek_slot(trans, freespace_iter);
1257 	ret = bkey_err(k);
1258 	if (ret)
1259 		goto err;
1260 
1261 	*end = bkey_min(k.k->p, *end);
1262 
1263 	if (fsck_err_on(k.k->type != KEY_TYPE_set,
1264 			trans, freespace_hole_missing,
1265 			"hole in alloc btree missing in freespace btree\n"
1266 			"device %llu buckets %llu-%llu",
1267 			freespace_iter->pos.inode,
1268 			freespace_iter->pos.offset,
1269 			end->offset)) {
1270 		struct bkey_i *update =
1271 			bch2_trans_kmalloc(trans, sizeof(*update));
1272 
1273 		ret = PTR_ERR_OR_ZERO(update);
1274 		if (ret)
1275 			goto err;
1276 
1277 		bkey_init(&update->k);
1278 		update->k.type	= KEY_TYPE_set;
1279 		update->k.p	= freespace_iter->pos;
1280 		bch2_key_resize(&update->k,
1281 				min_t(u64, U32_MAX, end->offset -
1282 				      freespace_iter->pos.offset));
1283 
1284 		ret = bch2_trans_update(trans, freespace_iter, update, 0);
1285 		if (ret)
1286 			goto err;
1287 	}
1288 err:
1289 fsck_err:
1290 	printbuf_exit(&buf);
1291 	return ret;
1292 }
1293 
1294 static noinline_for_stack
bch2_check_alloc_hole_bucket_gens(struct btree_trans * trans,struct bpos start,struct bpos * end,struct btree_iter * bucket_gens_iter)1295 int bch2_check_alloc_hole_bucket_gens(struct btree_trans *trans,
1296 				      struct bpos start,
1297 				      struct bpos *end,
1298 				      struct btree_iter *bucket_gens_iter)
1299 {
1300 	struct bkey_s_c k;
1301 	struct printbuf buf = PRINTBUF;
1302 	unsigned i, gens_offset, gens_end_offset;
1303 	int ret;
1304 
1305 	bch2_btree_iter_set_pos(trans, bucket_gens_iter, alloc_gens_pos(start, &gens_offset));
1306 
1307 	k = bch2_btree_iter_peek_slot(trans, bucket_gens_iter);
1308 	ret = bkey_err(k);
1309 	if (ret)
1310 		goto err;
1311 
1312 	if (bkey_cmp(alloc_gens_pos(start, &gens_offset),
1313 		     alloc_gens_pos(*end,  &gens_end_offset)))
1314 		gens_end_offset = KEY_TYPE_BUCKET_GENS_NR;
1315 
1316 	if (k.k->type == KEY_TYPE_bucket_gens) {
1317 		struct bkey_i_bucket_gens g;
1318 		bool need_update = false;
1319 
1320 		bkey_reassemble(&g.k_i, k);
1321 
1322 		for (i = gens_offset; i < gens_end_offset; i++) {
1323 			if (fsck_err_on(g.v.gens[i], trans,
1324 					bucket_gens_hole_wrong,
1325 					"hole in alloc btree at %llu:%llu with nonzero gen in bucket_gens btree (%u)",
1326 					bucket_gens_pos_to_alloc(k.k->p, i).inode,
1327 					bucket_gens_pos_to_alloc(k.k->p, i).offset,
1328 					g.v.gens[i])) {
1329 				g.v.gens[i] = 0;
1330 				need_update = true;
1331 			}
1332 		}
1333 
1334 		if (need_update) {
1335 			struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g));
1336 
1337 			ret = PTR_ERR_OR_ZERO(u);
1338 			if (ret)
1339 				goto err;
1340 
1341 			memcpy(u, &g, sizeof(g));
1342 
1343 			ret = bch2_trans_update(trans, bucket_gens_iter, u, 0);
1344 			if (ret)
1345 				goto err;
1346 		}
1347 	}
1348 
1349 	*end = bkey_min(*end, bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0));
1350 err:
1351 fsck_err:
1352 	printbuf_exit(&buf);
1353 	return ret;
1354 }
1355 
1356 struct check_discard_freespace_key_async {
1357 	struct work_struct	work;
1358 	struct bch_fs		*c;
1359 	struct bbpos		pos;
1360 };
1361 
bch2_recheck_discard_freespace_key(struct btree_trans * trans,struct bbpos pos)1362 static int bch2_recheck_discard_freespace_key(struct btree_trans *trans, struct bbpos pos)
1363 {
1364 	struct btree_iter iter;
1365 	struct bkey_s_c k = bch2_bkey_get_iter(trans, &iter, pos.btree, pos.pos, 0);
1366 	int ret = bkey_err(k);
1367 	if (ret)
1368 		return ret;
1369 
1370 	u8 gen;
1371 	ret = k.k->type != KEY_TYPE_set
1372 		? bch2_check_discard_freespace_key(trans, &iter, &gen, false)
1373 		: 0;
1374 	bch2_trans_iter_exit(trans, &iter);
1375 	return ret;
1376 }
1377 
check_discard_freespace_key_work(struct work_struct * work)1378 static void check_discard_freespace_key_work(struct work_struct *work)
1379 {
1380 	struct check_discard_freespace_key_async *w =
1381 		container_of(work, struct check_discard_freespace_key_async, work);
1382 
1383 	bch2_trans_do(w->c, bch2_recheck_discard_freespace_key(trans, w->pos));
1384 	bch2_write_ref_put(w->c, BCH_WRITE_REF_check_discard_freespace_key);
1385 	kfree(w);
1386 }
1387 
bch2_check_discard_freespace_key(struct btree_trans * trans,struct btree_iter * iter,u8 * gen,bool async_repair)1388 int bch2_check_discard_freespace_key(struct btree_trans *trans, struct btree_iter *iter, u8 *gen,
1389 				     bool async_repair)
1390 {
1391 	struct bch_fs *c = trans->c;
1392 	enum bch_data_type state = iter->btree_id == BTREE_ID_need_discard
1393 		? BCH_DATA_need_discard
1394 		: BCH_DATA_free;
1395 	struct printbuf buf = PRINTBUF;
1396 
1397 	struct bpos bucket = iter->pos;
1398 	bucket.offset &= ~(~0ULL << 56);
1399 	u64 genbits = iter->pos.offset & (~0ULL << 56);
1400 
1401 	struct btree_iter alloc_iter;
1402 	struct bkey_s_c alloc_k = bch2_bkey_get_iter(trans, &alloc_iter,
1403 						     BTREE_ID_alloc, bucket,
1404 						     async_repair ? BTREE_ITER_cached : 0);
1405 	int ret = bkey_err(alloc_k);
1406 	if (ret)
1407 		return ret;
1408 
1409 	if (!bch2_dev_bucket_exists(c, bucket)) {
1410 		if (fsck_err(trans, need_discard_freespace_key_to_invalid_dev_bucket,
1411 			     "entry in %s btree for nonexistant dev:bucket %llu:%llu",
1412 			     bch2_btree_id_str(iter->btree_id), bucket.inode, bucket.offset))
1413 			goto delete;
1414 		ret = 1;
1415 		goto out;
1416 	}
1417 
1418 	struct bch_alloc_v4 a_convert;
1419 	const struct bch_alloc_v4 *a = bch2_alloc_to_v4(alloc_k, &a_convert);
1420 
1421 	if (a->data_type != state ||
1422 	    (state == BCH_DATA_free &&
1423 	     genbits != alloc_freespace_genbits(*a))) {
1424 		if (fsck_err(trans, need_discard_freespace_key_bad,
1425 			     "%s\nincorrectly set at %s:%llu:%llu:0 (free %u, genbits %llu should be %llu)",
1426 			     (bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf),
1427 			     bch2_btree_id_str(iter->btree_id),
1428 			     iter->pos.inode,
1429 			     iter->pos.offset,
1430 			     a->data_type == state,
1431 			     genbits >> 56, alloc_freespace_genbits(*a) >> 56))
1432 			goto delete;
1433 		ret = 1;
1434 		goto out;
1435 	}
1436 
1437 	*gen = a->gen;
1438 out:
1439 fsck_err:
1440 	bch2_set_btree_iter_dontneed(trans, &alloc_iter);
1441 	bch2_trans_iter_exit(trans, &alloc_iter);
1442 	printbuf_exit(&buf);
1443 	return ret;
1444 delete:
1445 	if (!async_repair) {
1446 		ret =   bch2_btree_bit_mod_iter(trans, iter, false) ?:
1447 			bch2_trans_commit(trans, NULL, NULL,
1448 				BCH_TRANS_COMMIT_no_enospc) ?:
1449 			-BCH_ERR_transaction_restart_commit;
1450 		goto out;
1451 	} else {
1452 		/*
1453 		 * We can't repair here when called from the allocator path: the
1454 		 * commit will recurse back into the allocator
1455 		 */
1456 		struct check_discard_freespace_key_async *w =
1457 			kzalloc(sizeof(*w), GFP_KERNEL);
1458 		if (!w)
1459 			goto out;
1460 
1461 		if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_check_discard_freespace_key)) {
1462 			kfree(w);
1463 			goto out;
1464 		}
1465 
1466 		INIT_WORK(&w->work, check_discard_freespace_key_work);
1467 		w->c = c;
1468 		w->pos = BBPOS(iter->btree_id, iter->pos);
1469 		queue_work(c->write_ref_wq, &w->work);
1470 		goto out;
1471 	}
1472 }
1473 
bch2_check_discard_freespace_key_fsck(struct btree_trans * trans,struct btree_iter * iter)1474 static int bch2_check_discard_freespace_key_fsck(struct btree_trans *trans, struct btree_iter *iter)
1475 {
1476 	u8 gen;
1477 	int ret = bch2_check_discard_freespace_key(trans, iter, &gen, false);
1478 	return ret < 0 ? ret : 0;
1479 }
1480 
1481 /*
1482  * We've already checked that generation numbers in the bucket_gens btree are
1483  * valid for buckets that exist; this just checks for keys for nonexistent
1484  * buckets.
1485  */
1486 static noinline_for_stack
bch2_check_bucket_gens_key(struct btree_trans * trans,struct btree_iter * iter,struct bkey_s_c k)1487 int bch2_check_bucket_gens_key(struct btree_trans *trans,
1488 			       struct btree_iter *iter,
1489 			       struct bkey_s_c k)
1490 {
1491 	struct bch_fs *c = trans->c;
1492 	struct bkey_i_bucket_gens g;
1493 	u64 start = bucket_gens_pos_to_alloc(k.k->p, 0).offset;
1494 	u64 end = bucket_gens_pos_to_alloc(bpos_nosnap_successor(k.k->p), 0).offset;
1495 	u64 b;
1496 	bool need_update = false;
1497 	struct printbuf buf = PRINTBUF;
1498 	int ret = 0;
1499 
1500 	BUG_ON(k.k->type != KEY_TYPE_bucket_gens);
1501 	bkey_reassemble(&g.k_i, k);
1502 
1503 	struct bch_dev *ca = bch2_dev_tryget_noerror(c, k.k->p.inode);
1504 	if (!ca) {
1505 		if (fsck_err(trans, bucket_gens_to_invalid_dev,
1506 			     "bucket_gens key for invalid device:\n%s",
1507 			     (bch2_bkey_val_to_text(&buf, c, k), buf.buf)))
1508 			ret = bch2_btree_delete_at(trans, iter, 0);
1509 		goto out;
1510 	}
1511 
1512 	if (fsck_err_on(end <= ca->mi.first_bucket ||
1513 			start >= ca->mi.nbuckets,
1514 			trans, bucket_gens_to_invalid_buckets,
1515 			"bucket_gens key for invalid buckets:\n%s",
1516 			(bch2_bkey_val_to_text(&buf, c, k), buf.buf))) {
1517 		ret = bch2_btree_delete_at(trans, iter, 0);
1518 		goto out;
1519 	}
1520 
1521 	for (b = start; b < ca->mi.first_bucket; b++)
1522 		if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK],
1523 				trans, bucket_gens_nonzero_for_invalid_buckets,
1524 				"bucket_gens key has nonzero gen for invalid bucket")) {
1525 			g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
1526 			need_update = true;
1527 		}
1528 
1529 	for (b = ca->mi.nbuckets; b < end; b++)
1530 		if (fsck_err_on(g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK],
1531 				trans, bucket_gens_nonzero_for_invalid_buckets,
1532 				"bucket_gens key has nonzero gen for invalid bucket")) {
1533 			g.v.gens[b & KEY_TYPE_BUCKET_GENS_MASK] = 0;
1534 			need_update = true;
1535 		}
1536 
1537 	if (need_update) {
1538 		struct bkey_i *u = bch2_trans_kmalloc(trans, sizeof(g));
1539 
1540 		ret = PTR_ERR_OR_ZERO(u);
1541 		if (ret)
1542 			goto out;
1543 
1544 		memcpy(u, &g, sizeof(g));
1545 		ret = bch2_trans_update(trans, iter, u, 0);
1546 	}
1547 out:
1548 fsck_err:
1549 	bch2_dev_put(ca);
1550 	printbuf_exit(&buf);
1551 	return ret;
1552 }
1553 
bch2_check_alloc_info(struct bch_fs * c)1554 int bch2_check_alloc_info(struct bch_fs *c)
1555 {
1556 	struct btree_trans *trans = bch2_trans_get(c);
1557 	struct btree_iter iter, discard_iter, freespace_iter, bucket_gens_iter;
1558 	struct bch_dev *ca = NULL;
1559 	struct bkey hole;
1560 	struct bkey_s_c k;
1561 	int ret = 0;
1562 
1563 	bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc, POS_MIN,
1564 			     BTREE_ITER_prefetch);
1565 	bch2_trans_iter_init(trans, &discard_iter, BTREE_ID_need_discard, POS_MIN,
1566 			     BTREE_ITER_prefetch);
1567 	bch2_trans_iter_init(trans, &freespace_iter, BTREE_ID_freespace, POS_MIN,
1568 			     BTREE_ITER_prefetch);
1569 	bch2_trans_iter_init(trans, &bucket_gens_iter, BTREE_ID_bucket_gens, POS_MIN,
1570 			     BTREE_ITER_prefetch);
1571 
1572 	while (1) {
1573 		struct bpos next;
1574 
1575 		bch2_trans_begin(trans);
1576 
1577 		k = bch2_get_key_or_real_bucket_hole(trans, &iter, &ca, &hole);
1578 		ret = bkey_err(k);
1579 		if (ret)
1580 			goto bkey_err;
1581 
1582 		if (!k.k)
1583 			break;
1584 
1585 		if (k.k->type) {
1586 			next = bpos_nosnap_successor(k.k->p);
1587 
1588 			ret = bch2_check_alloc_key(trans,
1589 						   k, &iter,
1590 						   &discard_iter,
1591 						   &freespace_iter,
1592 						   &bucket_gens_iter);
1593 			if (ret)
1594 				goto bkey_err;
1595 		} else {
1596 			next = k.k->p;
1597 
1598 			ret = bch2_check_alloc_hole_freespace(trans, ca,
1599 						    bkey_start_pos(k.k),
1600 						    &next,
1601 						    &freespace_iter) ?:
1602 				bch2_check_alloc_hole_bucket_gens(trans,
1603 						    bkey_start_pos(k.k),
1604 						    &next,
1605 						    &bucket_gens_iter);
1606 			if (ret)
1607 				goto bkey_err;
1608 		}
1609 
1610 		ret = bch2_trans_commit(trans, NULL, NULL,
1611 					BCH_TRANS_COMMIT_no_enospc);
1612 		if (ret)
1613 			goto bkey_err;
1614 
1615 		bch2_btree_iter_set_pos(trans, &iter, next);
1616 bkey_err:
1617 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
1618 			continue;
1619 		if (ret)
1620 			break;
1621 	}
1622 	bch2_trans_iter_exit(trans, &bucket_gens_iter);
1623 	bch2_trans_iter_exit(trans, &freespace_iter);
1624 	bch2_trans_iter_exit(trans, &discard_iter);
1625 	bch2_trans_iter_exit(trans, &iter);
1626 	bch2_dev_put(ca);
1627 	ca = NULL;
1628 
1629 	if (ret < 0)
1630 		goto err;
1631 
1632 	ret = for_each_btree_key(trans, iter,
1633 			BTREE_ID_need_discard, POS_MIN,
1634 			BTREE_ITER_prefetch, k,
1635 		bch2_check_discard_freespace_key_fsck(trans, &iter));
1636 	if (ret)
1637 		goto err;
1638 
1639 	bch2_trans_iter_init(trans, &iter, BTREE_ID_freespace, POS_MIN,
1640 			     BTREE_ITER_prefetch);
1641 	while (1) {
1642 		bch2_trans_begin(trans);
1643 		k = bch2_btree_iter_peek(trans, &iter);
1644 		if (!k.k)
1645 			break;
1646 
1647 		ret = bkey_err(k) ?:
1648 			bch2_check_discard_freespace_key_fsck(trans, &iter);
1649 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart)) {
1650 			ret = 0;
1651 			continue;
1652 		}
1653 		if (ret) {
1654 			struct printbuf buf = PRINTBUF;
1655 			bch2_bkey_val_to_text(&buf, c, k);
1656 
1657 			bch_err(c, "while checking %s", buf.buf);
1658 			printbuf_exit(&buf);
1659 			break;
1660 		}
1661 
1662 		bch2_btree_iter_set_pos(trans, &iter, bpos_nosnap_successor(iter.pos));
1663 	}
1664 	bch2_trans_iter_exit(trans, &iter);
1665 	if (ret)
1666 		goto err;
1667 
1668 	ret = for_each_btree_key_commit(trans, iter,
1669 			BTREE_ID_bucket_gens, POS_MIN,
1670 			BTREE_ITER_prefetch, k,
1671 			NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1672 		bch2_check_bucket_gens_key(trans, &iter, k));
1673 err:
1674 	bch2_trans_put(trans);
1675 	bch_err_fn(c, ret);
1676 	return ret;
1677 }
1678 
bch2_check_alloc_to_lru_ref(struct btree_trans * trans,struct btree_iter * alloc_iter,struct bkey_buf * last_flushed)1679 static int bch2_check_alloc_to_lru_ref(struct btree_trans *trans,
1680 				       struct btree_iter *alloc_iter,
1681 				       struct bkey_buf *last_flushed)
1682 {
1683 	struct bch_fs *c = trans->c;
1684 	struct bch_alloc_v4 a_convert;
1685 	const struct bch_alloc_v4 *a;
1686 	struct bkey_s_c alloc_k;
1687 	struct printbuf buf = PRINTBUF;
1688 	int ret;
1689 
1690 	alloc_k = bch2_btree_iter_peek(trans, alloc_iter);
1691 	if (!alloc_k.k)
1692 		return 0;
1693 
1694 	ret = bkey_err(alloc_k);
1695 	if (ret)
1696 		return ret;
1697 
1698 	struct bch_dev *ca = bch2_dev_tryget_noerror(c, alloc_k.k->p.inode);
1699 	if (!ca)
1700 		return 0;
1701 
1702 	a = bch2_alloc_to_v4(alloc_k, &a_convert);
1703 
1704 	u64 lru_idx = alloc_lru_idx_fragmentation(*a, ca);
1705 	if (lru_idx) {
1706 		ret = bch2_lru_check_set(trans, BCH_LRU_BUCKET_FRAGMENTATION,
1707 					 bucket_to_u64(alloc_k.k->p),
1708 					 lru_idx, alloc_k, last_flushed);
1709 		if (ret)
1710 			goto err;
1711 	}
1712 
1713 	if (a->data_type != BCH_DATA_cached)
1714 		goto err;
1715 
1716 	if (fsck_err_on(!a->io_time[READ],
1717 			trans, alloc_key_cached_but_read_time_zero,
1718 			"cached bucket with read_time 0\n%s",
1719 		(printbuf_reset(&buf),
1720 		 bch2_bkey_val_to_text(&buf, c, alloc_k), buf.buf))) {
1721 		struct bkey_i_alloc_v4 *a_mut =
1722 			bch2_alloc_to_v4_mut(trans, alloc_k);
1723 		ret = PTR_ERR_OR_ZERO(a_mut);
1724 		if (ret)
1725 			goto err;
1726 
1727 		a_mut->v.io_time[READ] = bch2_current_io_time(c, READ);
1728 		ret = bch2_trans_update(trans, alloc_iter,
1729 					&a_mut->k_i, BTREE_TRIGGER_norun);
1730 		if (ret)
1731 			goto err;
1732 
1733 		a = &a_mut->v;
1734 	}
1735 
1736 	ret = bch2_lru_check_set(trans, alloc_k.k->p.inode,
1737 				 bucket_to_u64(alloc_k.k->p),
1738 				 a->io_time[READ],
1739 				 alloc_k, last_flushed);
1740 	if (ret)
1741 		goto err;
1742 err:
1743 fsck_err:
1744 	bch2_dev_put(ca);
1745 	printbuf_exit(&buf);
1746 	return ret;
1747 }
1748 
bch2_check_alloc_to_lru_refs(struct bch_fs * c)1749 int bch2_check_alloc_to_lru_refs(struct bch_fs *c)
1750 {
1751 	struct bkey_buf last_flushed;
1752 
1753 	bch2_bkey_buf_init(&last_flushed);
1754 	bkey_init(&last_flushed.k->k);
1755 
1756 	int ret = bch2_trans_run(c,
1757 		for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
1758 				POS_MIN, BTREE_ITER_prefetch, k,
1759 				NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1760 			bch2_check_alloc_to_lru_ref(trans, &iter, &last_flushed))) ?:
1761 		bch2_check_stripe_to_lru_refs(c);
1762 
1763 	bch2_bkey_buf_exit(&last_flushed, c);
1764 	bch_err_fn(c, ret);
1765 	return ret;
1766 }
1767 
discard_in_flight_add(struct bch_dev * ca,u64 bucket,bool in_progress)1768 static int discard_in_flight_add(struct bch_dev *ca, u64 bucket, bool in_progress)
1769 {
1770 	int ret;
1771 
1772 	mutex_lock(&ca->discard_buckets_in_flight_lock);
1773 	darray_for_each(ca->discard_buckets_in_flight, i)
1774 		if (i->bucket == bucket) {
1775 			ret = -BCH_ERR_EEXIST_discard_in_flight_add;
1776 			goto out;
1777 		}
1778 
1779 	ret = darray_push(&ca->discard_buckets_in_flight, ((struct discard_in_flight) {
1780 			   .in_progress = in_progress,
1781 			   .bucket	= bucket,
1782 	}));
1783 out:
1784 	mutex_unlock(&ca->discard_buckets_in_flight_lock);
1785 	return ret;
1786 }
1787 
discard_in_flight_remove(struct bch_dev * ca,u64 bucket)1788 static void discard_in_flight_remove(struct bch_dev *ca, u64 bucket)
1789 {
1790 	mutex_lock(&ca->discard_buckets_in_flight_lock);
1791 	darray_for_each(ca->discard_buckets_in_flight, i)
1792 		if (i->bucket == bucket) {
1793 			BUG_ON(!i->in_progress);
1794 			darray_remove_item(&ca->discard_buckets_in_flight, i);
1795 			goto found;
1796 		}
1797 	BUG();
1798 found:
1799 	mutex_unlock(&ca->discard_buckets_in_flight_lock);
1800 }
1801 
1802 struct discard_buckets_state {
1803 	u64		seen;
1804 	u64		open;
1805 	u64		need_journal_commit;
1806 	u64		discarded;
1807 };
1808 
1809 /*
1810  * This is needed because discard is both a filesystem option and a device
1811  * option, and mount options are supposed to apply to that mount and not be
1812  * persisted, i.e. if it's set as a mount option we can't propagate it to the
1813  * device.
1814  */
discard_opt_enabled(struct bch_fs * c,struct bch_dev * ca)1815 static inline bool discard_opt_enabled(struct bch_fs *c, struct bch_dev *ca)
1816 {
1817 	return test_bit(BCH_FS_discard_mount_opt_set, &c->flags)
1818 		? c->opts.discard
1819 		: ca->mi.discard;
1820 }
1821 
bch2_discard_one_bucket(struct btree_trans * trans,struct bch_dev * ca,struct btree_iter * need_discard_iter,struct bpos * discard_pos_done,struct discard_buckets_state * s,bool fastpath)1822 static int bch2_discard_one_bucket(struct btree_trans *trans,
1823 				   struct bch_dev *ca,
1824 				   struct btree_iter *need_discard_iter,
1825 				   struct bpos *discard_pos_done,
1826 				   struct discard_buckets_state *s,
1827 				   bool fastpath)
1828 {
1829 	struct bch_fs *c = trans->c;
1830 	struct bpos pos = need_discard_iter->pos;
1831 	struct btree_iter iter = {};
1832 	struct bkey_s_c k;
1833 	struct bkey_i_alloc_v4 *a;
1834 	struct printbuf buf = PRINTBUF;
1835 	bool discard_locked = false;
1836 	int ret = 0;
1837 
1838 	if (bch2_bucket_is_open_safe(c, pos.inode, pos.offset)) {
1839 		s->open++;
1840 		goto out;
1841 	}
1842 
1843 	u64 seq_ready = bch2_bucket_journal_seq_ready(&c->buckets_waiting_for_journal,
1844 						      pos.inode, pos.offset);
1845 	if (seq_ready > c->journal.flushed_seq_ondisk) {
1846 		if (seq_ready > c->journal.flushing_seq)
1847 			s->need_journal_commit++;
1848 		goto out;
1849 	}
1850 
1851 	k = bch2_bkey_get_iter(trans, &iter, BTREE_ID_alloc,
1852 			       need_discard_iter->pos,
1853 			       BTREE_ITER_cached);
1854 	ret = bkey_err(k);
1855 	if (ret)
1856 		goto out;
1857 
1858 	a = bch2_alloc_to_v4_mut(trans, k);
1859 	ret = PTR_ERR_OR_ZERO(a);
1860 	if (ret)
1861 		goto out;
1862 
1863 	if (a->v.data_type != BCH_DATA_need_discard) {
1864 		if (need_discard_or_freespace_err(trans, k, true, true, true)) {
1865 			ret = bch2_btree_bit_mod_iter(trans, need_discard_iter, false);
1866 			if (ret)
1867 				goto out;
1868 			goto commit;
1869 		}
1870 
1871 		goto out;
1872 	}
1873 
1874 	if (!fastpath) {
1875 		if (discard_in_flight_add(ca, iter.pos.offset, true))
1876 			goto out;
1877 
1878 		discard_locked = true;
1879 	}
1880 
1881 	if (!bkey_eq(*discard_pos_done, iter.pos)) {
1882 		s->discarded++;
1883 		*discard_pos_done = iter.pos;
1884 
1885 		if (discard_opt_enabled(c, ca) && !c->opts.nochanges) {
1886 			/*
1887 			 * This works without any other locks because this is the only
1888 			 * thread that removes items from the need_discard tree
1889 			 */
1890 			bch2_trans_unlock_long(trans);
1891 			blkdev_issue_discard(ca->disk_sb.bdev,
1892 					     k.k->p.offset * ca->mi.bucket_size,
1893 					     ca->mi.bucket_size,
1894 					     GFP_KERNEL);
1895 			ret = bch2_trans_relock_notrace(trans);
1896 			if (ret)
1897 				goto out;
1898 		}
1899 	}
1900 
1901 	SET_BCH_ALLOC_V4_NEED_DISCARD(&a->v, false);
1902 	alloc_data_type_set(&a->v, a->v.data_type);
1903 
1904 	ret = bch2_trans_update(trans, &iter, &a->k_i, 0);
1905 	if (ret)
1906 		goto out;
1907 commit:
1908 	ret = bch2_trans_commit(trans, NULL, NULL,
1909 				BCH_WATERMARK_btree|
1910 				BCH_TRANS_COMMIT_no_enospc);
1911 	if (ret)
1912 		goto out;
1913 
1914 	if (!fastpath)
1915 		count_event(c, bucket_discard);
1916 	else
1917 		count_event(c, bucket_discard_fast);
1918 out:
1919 fsck_err:
1920 	if (discard_locked)
1921 		discard_in_flight_remove(ca, iter.pos.offset);
1922 	if (!ret)
1923 		s->seen++;
1924 	bch2_trans_iter_exit(trans, &iter);
1925 	printbuf_exit(&buf);
1926 	return ret;
1927 }
1928 
bch2_do_discards_work(struct work_struct * work)1929 static void bch2_do_discards_work(struct work_struct *work)
1930 {
1931 	struct bch_dev *ca = container_of(work, struct bch_dev, discard_work);
1932 	struct bch_fs *c = ca->fs;
1933 	struct discard_buckets_state s = {};
1934 	struct bpos discard_pos_done = POS_MAX;
1935 	int ret;
1936 
1937 	/*
1938 	 * We're doing the commit in bch2_discard_one_bucket instead of using
1939 	 * for_each_btree_key_commit() so that we can increment counters after
1940 	 * successful commit:
1941 	 */
1942 	ret = bch2_trans_run(c,
1943 		for_each_btree_key_max(trans, iter,
1944 				   BTREE_ID_need_discard,
1945 				   POS(ca->dev_idx, 0),
1946 				   POS(ca->dev_idx, U64_MAX), 0, k,
1947 			bch2_discard_one_bucket(trans, ca, &iter, &discard_pos_done, &s, false)));
1948 
1949 	if (s.need_journal_commit > dev_buckets_available(ca, BCH_WATERMARK_normal))
1950 		bch2_journal_flush_async(&c->journal, NULL);
1951 
1952 	trace_discard_buckets(c, s.seen, s.open, s.need_journal_commit, s.discarded,
1953 			      bch2_err_str(ret));
1954 
1955 	percpu_ref_put(&ca->io_ref[WRITE]);
1956 	bch2_write_ref_put(c, BCH_WRITE_REF_discard);
1957 }
1958 
bch2_dev_do_discards(struct bch_dev * ca)1959 void bch2_dev_do_discards(struct bch_dev *ca)
1960 {
1961 	struct bch_fs *c = ca->fs;
1962 
1963 	if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_discard))
1964 		return;
1965 
1966 	if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE))
1967 		goto put_write_ref;
1968 
1969 	if (queue_work(c->write_ref_wq, &ca->discard_work))
1970 		return;
1971 
1972 	percpu_ref_put(&ca->io_ref[WRITE]);
1973 put_write_ref:
1974 	bch2_write_ref_put(c, BCH_WRITE_REF_discard);
1975 }
1976 
bch2_do_discards(struct bch_fs * c)1977 void bch2_do_discards(struct bch_fs *c)
1978 {
1979 	for_each_member_device(c, ca)
1980 		bch2_dev_do_discards(ca);
1981 }
1982 
bch2_do_discards_fast_one(struct btree_trans * trans,struct bch_dev * ca,u64 bucket,struct bpos * discard_pos_done,struct discard_buckets_state * s)1983 static int bch2_do_discards_fast_one(struct btree_trans *trans,
1984 				     struct bch_dev *ca,
1985 				     u64 bucket,
1986 				     struct bpos *discard_pos_done,
1987 				     struct discard_buckets_state *s)
1988 {
1989 	struct btree_iter need_discard_iter;
1990 	struct bkey_s_c discard_k = bch2_bkey_get_iter(trans, &need_discard_iter,
1991 					BTREE_ID_need_discard, POS(ca->dev_idx, bucket), 0);
1992 	int ret = bkey_err(discard_k);
1993 	if (ret)
1994 		return ret;
1995 
1996 	if (log_fsck_err_on(discard_k.k->type != KEY_TYPE_set,
1997 			    trans, discarding_bucket_not_in_need_discard_btree,
1998 			    "attempting to discard bucket %u:%llu not in need_discard btree",
1999 			    ca->dev_idx, bucket))
2000 		goto out;
2001 
2002 	ret = bch2_discard_one_bucket(trans, ca, &need_discard_iter, discard_pos_done, s, true);
2003 out:
2004 fsck_err:
2005 	bch2_trans_iter_exit(trans, &need_discard_iter);
2006 	return ret;
2007 }
2008 
bch2_do_discards_fast_work(struct work_struct * work)2009 static void bch2_do_discards_fast_work(struct work_struct *work)
2010 {
2011 	struct bch_dev *ca = container_of(work, struct bch_dev, discard_fast_work);
2012 	struct bch_fs *c = ca->fs;
2013 	struct discard_buckets_state s = {};
2014 	struct bpos discard_pos_done = POS_MAX;
2015 	struct btree_trans *trans = bch2_trans_get(c);
2016 	int ret = 0;
2017 
2018 	while (1) {
2019 		bool got_bucket = false;
2020 		u64 bucket;
2021 
2022 		mutex_lock(&ca->discard_buckets_in_flight_lock);
2023 		darray_for_each(ca->discard_buckets_in_flight, i) {
2024 			if (i->in_progress)
2025 				continue;
2026 
2027 			got_bucket = true;
2028 			bucket = i->bucket;
2029 			i->in_progress = true;
2030 			break;
2031 		}
2032 		mutex_unlock(&ca->discard_buckets_in_flight_lock);
2033 
2034 		if (!got_bucket)
2035 			break;
2036 
2037 		ret = lockrestart_do(trans,
2038 			bch2_do_discards_fast_one(trans, ca, bucket, &discard_pos_done, &s));
2039 		bch_err_fn(c, ret);
2040 
2041 		discard_in_flight_remove(ca, bucket);
2042 
2043 		if (ret)
2044 			break;
2045 	}
2046 
2047 	trace_discard_buckets_fast(c, s.seen, s.open, s.need_journal_commit, s.discarded, bch2_err_str(ret));
2048 
2049 	bch2_trans_put(trans);
2050 	percpu_ref_put(&ca->io_ref[WRITE]);
2051 	bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast);
2052 }
2053 
bch2_discard_one_bucket_fast(struct bch_dev * ca,u64 bucket)2054 static void bch2_discard_one_bucket_fast(struct bch_dev *ca, u64 bucket)
2055 {
2056 	struct bch_fs *c = ca->fs;
2057 
2058 	if (discard_in_flight_add(ca, bucket, false))
2059 		return;
2060 
2061 	if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_discard_fast))
2062 		return;
2063 
2064 	if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE))
2065 		goto put_ref;
2066 
2067 	if (queue_work(c->write_ref_wq, &ca->discard_fast_work))
2068 		return;
2069 
2070 	percpu_ref_put(&ca->io_ref[WRITE]);
2071 put_ref:
2072 	bch2_write_ref_put(c, BCH_WRITE_REF_discard_fast);
2073 }
2074 
invalidate_one_bp(struct btree_trans * trans,struct bch_dev * ca,struct bkey_s_c_backpointer bp,struct bkey_buf * last_flushed)2075 static int invalidate_one_bp(struct btree_trans *trans,
2076 			     struct bch_dev *ca,
2077 			     struct bkey_s_c_backpointer bp,
2078 			     struct bkey_buf *last_flushed)
2079 {
2080 	struct btree_iter extent_iter;
2081 	struct bkey_s_c extent_k =
2082 		bch2_backpointer_get_key(trans, bp, &extent_iter, 0, last_flushed);
2083 	int ret = bkey_err(extent_k);
2084 	if (ret)
2085 		return ret;
2086 
2087 	if (!extent_k.k)
2088 		return 0;
2089 
2090 	struct bkey_i *n =
2091 		bch2_bkey_make_mut(trans, &extent_iter, &extent_k,
2092 				   BTREE_UPDATE_internal_snapshot_node);
2093 	ret = PTR_ERR_OR_ZERO(n);
2094 	if (ret)
2095 		goto err;
2096 
2097 	bch2_bkey_drop_device(bkey_i_to_s(n), ca->dev_idx);
2098 err:
2099 	bch2_trans_iter_exit(trans, &extent_iter);
2100 	return ret;
2101 }
2102 
invalidate_one_bucket_by_bps(struct btree_trans * trans,struct bch_dev * ca,struct bpos bucket,u8 gen,struct bkey_buf * last_flushed)2103 static int invalidate_one_bucket_by_bps(struct btree_trans *trans,
2104 					struct bch_dev *ca,
2105 					struct bpos bucket,
2106 					u8 gen,
2107 					struct bkey_buf *last_flushed)
2108 {
2109 	struct bpos bp_start	= bucket_pos_to_bp_start(ca,	bucket);
2110 	struct bpos bp_end	= bucket_pos_to_bp_end(ca,	bucket);
2111 
2112 	return for_each_btree_key_max_commit(trans, iter, BTREE_ID_backpointers,
2113 				      bp_start, bp_end, 0, k,
2114 				      NULL, NULL,
2115 				      BCH_WATERMARK_btree|
2116 				      BCH_TRANS_COMMIT_no_enospc, ({
2117 		if (k.k->type != KEY_TYPE_backpointer)
2118 			continue;
2119 
2120 		struct bkey_s_c_backpointer bp = bkey_s_c_to_backpointer(k);
2121 
2122 		if (bp.v->bucket_gen != gen)
2123 			continue;
2124 
2125 		/* filter out bps with gens that don't match */
2126 
2127 		invalidate_one_bp(trans, ca, bp, last_flushed);
2128 	}));
2129 }
2130 
2131 noinline_for_stack
invalidate_one_bucket(struct btree_trans * trans,struct bch_dev * ca,struct btree_iter * lru_iter,struct bkey_s_c lru_k,struct bkey_buf * last_flushed,s64 * nr_to_invalidate)2132 static int invalidate_one_bucket(struct btree_trans *trans,
2133 				 struct bch_dev *ca,
2134 				 struct btree_iter *lru_iter,
2135 				 struct bkey_s_c lru_k,
2136 				 struct bkey_buf *last_flushed,
2137 				 s64 *nr_to_invalidate)
2138 {
2139 	struct bch_fs *c = trans->c;
2140 	struct printbuf buf = PRINTBUF;
2141 	struct bpos bucket = u64_to_bucket(lru_k.k->p.offset);
2142 	struct btree_iter alloc_iter = {};
2143 	int ret = 0;
2144 
2145 	if (*nr_to_invalidate <= 0)
2146 		return 1;
2147 
2148 	if (!bch2_dev_bucket_exists(c, bucket)) {
2149 		if (fsck_err(trans, lru_entry_to_invalid_bucket,
2150 			     "lru key points to nonexistent device:bucket %llu:%llu",
2151 			     bucket.inode, bucket.offset))
2152 			return bch2_btree_bit_mod_buffered(trans, BTREE_ID_lru, lru_iter->pos, false);
2153 		goto out;
2154 	}
2155 
2156 	if (bch2_bucket_is_open_safe(c, bucket.inode, bucket.offset))
2157 		return 0;
2158 
2159 	struct bkey_s_c alloc_k = bch2_bkey_get_iter(trans, &alloc_iter,
2160 						     BTREE_ID_alloc, bucket,
2161 						     BTREE_ITER_cached);
2162 	ret = bkey_err(alloc_k);
2163 	if (ret)
2164 		return ret;
2165 
2166 	struct bch_alloc_v4 a_convert;
2167 	const struct bch_alloc_v4 *a = bch2_alloc_to_v4(alloc_k, &a_convert);
2168 
2169 	/* We expect harmless races here due to the btree write buffer: */
2170 	if (lru_pos_time(lru_iter->pos) != alloc_lru_idx_read(*a))
2171 		goto out;
2172 
2173 	/*
2174 	 * Impossible since alloc_lru_idx_read() only returns nonzero if the
2175 	 * bucket is supposed to be on the cached bucket LRU (i.e.
2176 	 * BCH_DATA_cached)
2177 	 *
2178 	 * bch2_lru_validate() also disallows lru keys with lru_pos_time() == 0
2179 	 */
2180 	BUG_ON(a->data_type != BCH_DATA_cached);
2181 	BUG_ON(a->dirty_sectors);
2182 
2183 	if (!a->cached_sectors)
2184 		bch_err(c, "invalidating empty bucket, confused");
2185 
2186 	unsigned cached_sectors = a->cached_sectors;
2187 	u8 gen = a->gen;
2188 
2189 	ret = invalidate_one_bucket_by_bps(trans, ca, bucket, gen, last_flushed);
2190 	if (ret)
2191 		goto out;
2192 
2193 	trace_and_count(c, bucket_invalidate, c, bucket.inode, bucket.offset, cached_sectors);
2194 	--*nr_to_invalidate;
2195 out:
2196 fsck_err:
2197 	bch2_trans_iter_exit(trans, &alloc_iter);
2198 	printbuf_exit(&buf);
2199 	return ret;
2200 }
2201 
next_lru_key(struct btree_trans * trans,struct btree_iter * iter,struct bch_dev * ca,bool * wrapped)2202 static struct bkey_s_c next_lru_key(struct btree_trans *trans, struct btree_iter *iter,
2203 				    struct bch_dev *ca, bool *wrapped)
2204 {
2205 	struct bkey_s_c k;
2206 again:
2207 	k = bch2_btree_iter_peek_max(trans, iter, lru_pos(ca->dev_idx, U64_MAX, LRU_TIME_MAX));
2208 	if (!k.k && !*wrapped) {
2209 		bch2_btree_iter_set_pos(trans, iter, lru_pos(ca->dev_idx, 0, 0));
2210 		*wrapped = true;
2211 		goto again;
2212 	}
2213 
2214 	return k;
2215 }
2216 
bch2_do_invalidates_work(struct work_struct * work)2217 static void bch2_do_invalidates_work(struct work_struct *work)
2218 {
2219 	struct bch_dev *ca = container_of(work, struct bch_dev, invalidate_work);
2220 	struct bch_fs *c = ca->fs;
2221 	struct btree_trans *trans = bch2_trans_get(c);
2222 	int ret = 0;
2223 
2224 	struct bkey_buf last_flushed;
2225 	bch2_bkey_buf_init(&last_flushed);
2226 	bkey_init(&last_flushed.k->k);
2227 
2228 	ret = bch2_btree_write_buffer_tryflush(trans);
2229 	if (ret)
2230 		goto err;
2231 
2232 	s64 nr_to_invalidate =
2233 		should_invalidate_buckets(ca, bch2_dev_usage_read(ca));
2234 	struct btree_iter iter;
2235 	bool wrapped = false;
2236 
2237 	bch2_trans_iter_init(trans, &iter, BTREE_ID_lru,
2238 			     lru_pos(ca->dev_idx, 0,
2239 				     ((bch2_current_io_time(c, READ) + U32_MAX) &
2240 				      LRU_TIME_MAX)), 0);
2241 
2242 	while (true) {
2243 		bch2_trans_begin(trans);
2244 
2245 		struct bkey_s_c k = next_lru_key(trans, &iter, ca, &wrapped);
2246 		ret = bkey_err(k);
2247 		if (ret)
2248 			goto restart_err;
2249 		if (!k.k)
2250 			break;
2251 
2252 		ret = invalidate_one_bucket(trans, ca, &iter, k, &last_flushed, &nr_to_invalidate);
2253 restart_err:
2254 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
2255 			continue;
2256 		if (ret)
2257 			break;
2258 
2259 		bch2_btree_iter_advance(trans, &iter);
2260 	}
2261 	bch2_trans_iter_exit(trans, &iter);
2262 err:
2263 	bch2_trans_put(trans);
2264 	percpu_ref_put(&ca->io_ref[WRITE]);
2265 	bch2_bkey_buf_exit(&last_flushed, c);
2266 	bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
2267 }
2268 
bch2_dev_do_invalidates(struct bch_dev * ca)2269 void bch2_dev_do_invalidates(struct bch_dev *ca)
2270 {
2271 	struct bch_fs *c = ca->fs;
2272 
2273 	if (!bch2_write_ref_tryget(c, BCH_WRITE_REF_invalidate))
2274 		return;
2275 
2276 	if (!bch2_dev_get_ioref(c, ca->dev_idx, WRITE))
2277 		goto put_ref;
2278 
2279 	if (queue_work(c->write_ref_wq, &ca->invalidate_work))
2280 		return;
2281 
2282 	percpu_ref_put(&ca->io_ref[WRITE]);
2283 put_ref:
2284 	bch2_write_ref_put(c, BCH_WRITE_REF_invalidate);
2285 }
2286 
bch2_do_invalidates(struct bch_fs * c)2287 void bch2_do_invalidates(struct bch_fs *c)
2288 {
2289 	for_each_member_device(c, ca)
2290 		bch2_dev_do_invalidates(ca);
2291 }
2292 
bch2_dev_freespace_init(struct bch_fs * c,struct bch_dev * ca,u64 bucket_start,u64 bucket_end)2293 int bch2_dev_freespace_init(struct bch_fs *c, struct bch_dev *ca,
2294 			    u64 bucket_start, u64 bucket_end)
2295 {
2296 	struct btree_trans *trans = bch2_trans_get(c);
2297 	struct btree_iter iter;
2298 	struct bkey_s_c k;
2299 	struct bkey hole;
2300 	struct bpos end = POS(ca->dev_idx, bucket_end);
2301 	struct bch_member *m;
2302 	unsigned long last_updated = jiffies;
2303 	int ret;
2304 
2305 	BUG_ON(bucket_start > bucket_end);
2306 	BUG_ON(bucket_end > ca->mi.nbuckets);
2307 
2308 	bch2_trans_iter_init(trans, &iter, BTREE_ID_alloc,
2309 		POS(ca->dev_idx, max_t(u64, ca->mi.first_bucket, bucket_start)),
2310 		BTREE_ITER_prefetch);
2311 	/*
2312 	 * Scan the alloc btree for every bucket on @ca, and add buckets to the
2313 	 * freespace/need_discard/need_gc_gens btrees as needed:
2314 	 */
2315 	while (1) {
2316 		if (time_after(jiffies, last_updated + HZ * 10)) {
2317 			bch_info(ca, "%s: currently at %llu/%llu",
2318 				 __func__, iter.pos.offset, ca->mi.nbuckets);
2319 			last_updated = jiffies;
2320 		}
2321 
2322 		bch2_trans_begin(trans);
2323 
2324 		if (bkey_ge(iter.pos, end)) {
2325 			ret = 0;
2326 			break;
2327 		}
2328 
2329 		k = bch2_get_key_or_hole(trans, &iter, end, &hole);
2330 		ret = bkey_err(k);
2331 		if (ret)
2332 			goto bkey_err;
2333 
2334 		if (k.k->type) {
2335 			/*
2336 			 * We process live keys in the alloc btree one at a
2337 			 * time:
2338 			 */
2339 			struct bch_alloc_v4 a_convert;
2340 			const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
2341 
2342 			ret =   bch2_bucket_do_index(trans, ca, k, a, true) ?:
2343 				bch2_trans_commit(trans, NULL, NULL,
2344 						  BCH_TRANS_COMMIT_no_enospc);
2345 			if (ret)
2346 				goto bkey_err;
2347 
2348 			bch2_btree_iter_advance(trans, &iter);
2349 		} else {
2350 			struct bkey_i *freespace;
2351 
2352 			freespace = bch2_trans_kmalloc(trans, sizeof(*freespace));
2353 			ret = PTR_ERR_OR_ZERO(freespace);
2354 			if (ret)
2355 				goto bkey_err;
2356 
2357 			bkey_init(&freespace->k);
2358 			freespace->k.type	= KEY_TYPE_set;
2359 			freespace->k.p		= k.k->p;
2360 			freespace->k.size	= k.k->size;
2361 
2362 			ret = bch2_btree_insert_trans(trans, BTREE_ID_freespace, freespace, 0) ?:
2363 				bch2_trans_commit(trans, NULL, NULL,
2364 						  BCH_TRANS_COMMIT_no_enospc);
2365 			if (ret)
2366 				goto bkey_err;
2367 
2368 			bch2_btree_iter_set_pos(trans, &iter, k.k->p);
2369 		}
2370 bkey_err:
2371 		if (bch2_err_matches(ret, BCH_ERR_transaction_restart))
2372 			continue;
2373 		if (ret)
2374 			break;
2375 	}
2376 
2377 	bch2_trans_iter_exit(trans, &iter);
2378 	bch2_trans_put(trans);
2379 
2380 	if (ret < 0) {
2381 		bch_err_msg(ca, ret, "initializing free space");
2382 		return ret;
2383 	}
2384 
2385 	mutex_lock(&c->sb_lock);
2386 	m = bch2_members_v2_get_mut(c->disk_sb.sb, ca->dev_idx);
2387 	SET_BCH_MEMBER_FREESPACE_INITIALIZED(m, true);
2388 	mutex_unlock(&c->sb_lock);
2389 
2390 	return 0;
2391 }
2392 
bch2_fs_freespace_init(struct bch_fs * c)2393 int bch2_fs_freespace_init(struct bch_fs *c)
2394 {
2395 	int ret = 0;
2396 	bool doing_init = false;
2397 
2398 	/*
2399 	 * We can crash during the device add path, so we need to check this on
2400 	 * every mount:
2401 	 */
2402 
2403 	for_each_member_device(c, ca) {
2404 		if (ca->mi.freespace_initialized)
2405 			continue;
2406 
2407 		if (!doing_init) {
2408 			bch_info(c, "initializing freespace");
2409 			doing_init = true;
2410 		}
2411 
2412 		ret = bch2_dev_freespace_init(c, ca, 0, ca->mi.nbuckets);
2413 		if (ret) {
2414 			bch2_dev_put(ca);
2415 			bch_err_fn(c, ret);
2416 			return ret;
2417 		}
2418 	}
2419 
2420 	if (doing_init) {
2421 		mutex_lock(&c->sb_lock);
2422 		bch2_write_super(c);
2423 		mutex_unlock(&c->sb_lock);
2424 		bch_verbose(c, "done initializing freespace");
2425 	}
2426 
2427 	return 0;
2428 }
2429 
2430 /* device removal */
2431 
bch2_dev_remove_alloc(struct bch_fs * c,struct bch_dev * ca)2432 int bch2_dev_remove_alloc(struct bch_fs *c, struct bch_dev *ca)
2433 {
2434 	struct bpos start	= POS(ca->dev_idx, 0);
2435 	struct bpos end		= POS(ca->dev_idx, U64_MAX);
2436 	int ret;
2437 
2438 	/*
2439 	 * We clear the LRU and need_discard btrees first so that we don't race
2440 	 * with bch2_do_invalidates() and bch2_do_discards()
2441 	 */
2442 	ret =   bch2_dev_remove_stripes(c, ca->dev_idx) ?:
2443 		bch2_btree_delete_range(c, BTREE_ID_lru, start, end,
2444 					BTREE_TRIGGER_norun, NULL) ?:
2445 		bch2_btree_delete_range(c, BTREE_ID_need_discard, start, end,
2446 					BTREE_TRIGGER_norun, NULL) ?:
2447 		bch2_btree_delete_range(c, BTREE_ID_freespace, start, end,
2448 					BTREE_TRIGGER_norun, NULL) ?:
2449 		bch2_btree_delete_range(c, BTREE_ID_backpointers, start, end,
2450 					BTREE_TRIGGER_norun, NULL) ?:
2451 		bch2_btree_delete_range(c, BTREE_ID_bucket_gens, start, end,
2452 					BTREE_TRIGGER_norun, NULL) ?:
2453 		bch2_btree_delete_range(c, BTREE_ID_alloc, start, end,
2454 					BTREE_TRIGGER_norun, NULL) ?:
2455 		bch2_dev_usage_remove(c, ca->dev_idx);
2456 	bch_err_msg(ca, ret, "removing dev alloc info");
2457 	return ret;
2458 }
2459 
2460 /* Bucket IO clocks: */
2461 
__bch2_bucket_io_time_reset(struct btree_trans * trans,unsigned dev,size_t bucket_nr,int rw)2462 static int __bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
2463 				size_t bucket_nr, int rw)
2464 {
2465 	struct bch_fs *c = trans->c;
2466 
2467 	struct btree_iter iter;
2468 	struct bkey_i_alloc_v4 *a =
2469 		bch2_trans_start_alloc_update_noupdate(trans, &iter, POS(dev, bucket_nr));
2470 	int ret = PTR_ERR_OR_ZERO(a);
2471 	if (ret)
2472 		return ret;
2473 
2474 	u64 now = bch2_current_io_time(c, rw);
2475 	if (a->v.io_time[rw] == now)
2476 		goto out;
2477 
2478 	a->v.io_time[rw] = now;
2479 
2480 	ret   = bch2_trans_update(trans, &iter, &a->k_i, 0) ?:
2481 		bch2_trans_commit(trans, NULL, NULL, 0);
2482 out:
2483 	bch2_trans_iter_exit(trans, &iter);
2484 	return ret;
2485 }
2486 
bch2_bucket_io_time_reset(struct btree_trans * trans,unsigned dev,size_t bucket_nr,int rw)2487 int bch2_bucket_io_time_reset(struct btree_trans *trans, unsigned dev,
2488 			      size_t bucket_nr, int rw)
2489 {
2490 	if (bch2_trans_relock(trans))
2491 		bch2_trans_begin(trans);
2492 
2493 	return nested_lockrestart_do(trans, __bch2_bucket_io_time_reset(trans, dev, bucket_nr, rw));
2494 }
2495 
2496 /* Startup/shutdown (ro/rw): */
2497 
bch2_recalc_capacity(struct bch_fs * c)2498 void bch2_recalc_capacity(struct bch_fs *c)
2499 {
2500 	u64 capacity = 0, reserved_sectors = 0, gc_reserve;
2501 	unsigned bucket_size_max = 0;
2502 	unsigned long ra_pages = 0;
2503 
2504 	lockdep_assert_held(&c->state_lock);
2505 
2506 	for_each_online_member(c, ca) {
2507 		struct backing_dev_info *bdi = ca->disk_sb.bdev->bd_disk->bdi;
2508 
2509 		ra_pages += bdi->ra_pages;
2510 	}
2511 
2512 	bch2_set_ra_pages(c, ra_pages);
2513 
2514 	__for_each_online_member(c, ca, BIT(BCH_MEMBER_STATE_rw), READ) {
2515 		u64 dev_reserve = 0;
2516 
2517 		/*
2518 		 * We need to reserve buckets (from the number
2519 		 * of currently available buckets) against
2520 		 * foreground writes so that mainly copygc can
2521 		 * make forward progress.
2522 		 *
2523 		 * We need enough to refill the various reserves
2524 		 * from scratch - copygc will use its entire
2525 		 * reserve all at once, then run against when
2526 		 * its reserve is refilled (from the formerly
2527 		 * available buckets).
2528 		 *
2529 		 * This reserve is just used when considering if
2530 		 * allocations for foreground writes must wait -
2531 		 * not -ENOSPC calculations.
2532 		 */
2533 
2534 		dev_reserve += ca->nr_btree_reserve * 2;
2535 		dev_reserve += ca->mi.nbuckets >> 6; /* copygc reserve */
2536 
2537 		dev_reserve += 1;	/* btree write point */
2538 		dev_reserve += 1;	/* copygc write point */
2539 		dev_reserve += 1;	/* rebalance write point */
2540 
2541 		dev_reserve *= ca->mi.bucket_size;
2542 
2543 		capacity += bucket_to_sector(ca, ca->mi.nbuckets -
2544 					     ca->mi.first_bucket);
2545 
2546 		reserved_sectors += dev_reserve * 2;
2547 
2548 		bucket_size_max = max_t(unsigned, bucket_size_max,
2549 					ca->mi.bucket_size);
2550 	}
2551 
2552 	gc_reserve = c->opts.gc_reserve_bytes
2553 		? c->opts.gc_reserve_bytes >> 9
2554 		: div64_u64(capacity * c->opts.gc_reserve_percent, 100);
2555 
2556 	reserved_sectors = max(gc_reserve, reserved_sectors);
2557 
2558 	reserved_sectors = min(reserved_sectors, capacity);
2559 
2560 	c->reserved = reserved_sectors;
2561 	c->capacity = capacity - reserved_sectors;
2562 
2563 	c->bucket_size_max = bucket_size_max;
2564 
2565 	/* Wake up case someone was waiting for buckets */
2566 	closure_wake_up(&c->freelist_wait);
2567 }
2568 
bch2_min_rw_member_capacity(struct bch_fs * c)2569 u64 bch2_min_rw_member_capacity(struct bch_fs *c)
2570 {
2571 	u64 ret = U64_MAX;
2572 
2573 	for_each_rw_member(c, ca)
2574 		ret = min(ret, ca->mi.nbuckets * ca->mi.bucket_size);
2575 	return ret;
2576 }
2577 
bch2_dev_has_open_write_point(struct bch_fs * c,struct bch_dev * ca)2578 static bool bch2_dev_has_open_write_point(struct bch_fs *c, struct bch_dev *ca)
2579 {
2580 	struct open_bucket *ob;
2581 	bool ret = false;
2582 
2583 	for (ob = c->open_buckets;
2584 	     ob < c->open_buckets + ARRAY_SIZE(c->open_buckets);
2585 	     ob++) {
2586 		spin_lock(&ob->lock);
2587 		if (ob->valid && !ob->on_partial_list &&
2588 		    ob->dev == ca->dev_idx)
2589 			ret = true;
2590 		spin_unlock(&ob->lock);
2591 	}
2592 
2593 	return ret;
2594 }
2595 
2596 /* device goes ro: */
bch2_dev_allocator_remove(struct bch_fs * c,struct bch_dev * ca)2597 void bch2_dev_allocator_remove(struct bch_fs *c, struct bch_dev *ca)
2598 {
2599 	lockdep_assert_held(&c->state_lock);
2600 
2601 	/* First, remove device from allocation groups: */
2602 
2603 	for (unsigned i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
2604 		clear_bit(ca->dev_idx, c->rw_devs[i].d);
2605 
2606 	c->rw_devs_change_count++;
2607 
2608 	/*
2609 	 * Capacity is calculated based off of devices in allocation groups:
2610 	 */
2611 	bch2_recalc_capacity(c);
2612 
2613 	bch2_open_buckets_stop(c, ca, false);
2614 
2615 	/*
2616 	 * Wake up threads that were blocked on allocation, so they can notice
2617 	 * the device can no longer be removed and the capacity has changed:
2618 	 */
2619 	closure_wake_up(&c->freelist_wait);
2620 
2621 	/*
2622 	 * journal_res_get() can block waiting for free space in the journal -
2623 	 * it needs to notice there may not be devices to allocate from anymore:
2624 	 */
2625 	wake_up(&c->journal.wait);
2626 
2627 	/* Now wait for any in flight writes: */
2628 
2629 	closure_wait_event(&c->open_buckets_wait,
2630 			   !bch2_dev_has_open_write_point(c, ca));
2631 }
2632 
2633 /* device goes rw: */
bch2_dev_allocator_add(struct bch_fs * c,struct bch_dev * ca)2634 void bch2_dev_allocator_add(struct bch_fs *c, struct bch_dev *ca)
2635 {
2636 	lockdep_assert_held(&c->state_lock);
2637 
2638 	for (unsigned i = 0; i < ARRAY_SIZE(c->rw_devs); i++)
2639 		if (ca->mi.data_allowed & (1 << i))
2640 			set_bit(ca->dev_idx, c->rw_devs[i].d);
2641 
2642 	c->rw_devs_change_count++;
2643 }
2644 
bch2_dev_allocator_background_exit(struct bch_dev * ca)2645 void bch2_dev_allocator_background_exit(struct bch_dev *ca)
2646 {
2647 	darray_exit(&ca->discard_buckets_in_flight);
2648 }
2649 
bch2_dev_allocator_background_init(struct bch_dev * ca)2650 void bch2_dev_allocator_background_init(struct bch_dev *ca)
2651 {
2652 	mutex_init(&ca->discard_buckets_in_flight_lock);
2653 	INIT_WORK(&ca->discard_work, bch2_do_discards_work);
2654 	INIT_WORK(&ca->discard_fast_work, bch2_do_discards_fast_work);
2655 	INIT_WORK(&ca->invalidate_work, bch2_do_invalidates_work);
2656 }
2657 
bch2_fs_allocator_background_init(struct bch_fs * c)2658 void bch2_fs_allocator_background_init(struct bch_fs *c)
2659 {
2660 	spin_lock_init(&c->freelist_lock);
2661 }
2662