xref: /linux/fs/bcachefs/btree_gc.c (revision 72bea132f3680ee51e7ed2cee62892b6f5121909)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (C) 2010 Kent Overstreet <kent.overstreet@gmail.com>
4  * Copyright (C) 2014 Datera Inc.
5  */
6 
7 #include "bcachefs.h"
8 #include "alloc_background.h"
9 #include "alloc_foreground.h"
10 #include "bkey_methods.h"
11 #include "bkey_buf.h"
12 #include "btree_journal_iter.h"
13 #include "btree_key_cache.h"
14 #include "btree_locking.h"
15 #include "btree_update_interior.h"
16 #include "btree_io.h"
17 #include "btree_gc.h"
18 #include "buckets.h"
19 #include "clock.h"
20 #include "debug.h"
21 #include "ec.h"
22 #include "error.h"
23 #include "extents.h"
24 #include "journal.h"
25 #include "keylist.h"
26 #include "move.h"
27 #include "recovery.h"
28 #include "reflink.h"
29 #include "replicas.h"
30 #include "super-io.h"
31 #include "trace.h"
32 
33 #include <linux/slab.h>
34 #include <linux/bitops.h>
35 #include <linux/freezer.h>
36 #include <linux/kthread.h>
37 #include <linux/preempt.h>
38 #include <linux/rcupdate.h>
39 #include <linux/sched/task.h>
40 
41 #define DROP_THIS_NODE		10
42 #define DROP_PREV_NODE		11
43 
44 static struct bkey_s unsafe_bkey_s_c_to_s(struct bkey_s_c k)
45 {
46 	return (struct bkey_s) {{{
47 		(struct bkey *) k.k,
48 		(struct bch_val *) k.v
49 	}}};
50 }
51 
52 static bool should_restart_for_topology_repair(struct bch_fs *c)
53 {
54 	return c->opts.fix_errors != FSCK_FIX_no &&
55 		!(c->recovery_passes_complete & BIT_ULL(BCH_RECOVERY_PASS_check_topology));
56 }
57 
58 static inline void __gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
59 {
60 	preempt_disable();
61 	write_seqcount_begin(&c->gc_pos_lock);
62 	c->gc_pos = new_pos;
63 	write_seqcount_end(&c->gc_pos_lock);
64 	preempt_enable();
65 }
66 
67 static inline void gc_pos_set(struct bch_fs *c, struct gc_pos new_pos)
68 {
69 	BUG_ON(gc_pos_cmp(new_pos, c->gc_pos) <= 0);
70 	__gc_pos_set(c, new_pos);
71 }
72 
73 /*
74  * Missing: if an interior btree node is empty, we need to do something -
75  * perhaps just kill it
76  */
77 static int bch2_gc_check_topology(struct bch_fs *c,
78 				  struct btree *b,
79 				  struct bkey_buf *prev,
80 				  struct bkey_buf cur,
81 				  bool is_last)
82 {
83 	struct bpos node_start	= b->data->min_key;
84 	struct bpos node_end	= b->data->max_key;
85 	struct bpos expected_start = bkey_deleted(&prev->k->k)
86 		? node_start
87 		: bpos_successor(prev->k->k.p);
88 	struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
89 	int ret = 0;
90 
91 	if (cur.k->k.type == KEY_TYPE_btree_ptr_v2) {
92 		struct bkey_i_btree_ptr_v2 *bp = bkey_i_to_btree_ptr_v2(cur.k);
93 
94 		if (!bpos_eq(expected_start, bp->v.min_key)) {
95 			bch2_topology_error(c);
96 
97 			if (bkey_deleted(&prev->k->k)) {
98 				prt_printf(&buf1, "start of node: ");
99 				bch2_bpos_to_text(&buf1, node_start);
100 			} else {
101 				bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(prev->k));
102 			}
103 			bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(cur.k));
104 
105 			if (__fsck_err(c,
106 				       FSCK_CAN_FIX|
107 				       FSCK_CAN_IGNORE|
108 				       FSCK_NO_RATELIMIT,
109 				       btree_node_topology_bad_min_key,
110 				       "btree node with incorrect min_key at btree %s level %u:\n"
111 				       "  prev %s\n"
112 				       "  cur %s",
113 				       bch2_btree_id_str(b->c.btree_id), b->c.level,
114 				       buf1.buf, buf2.buf) && should_restart_for_topology_repair(c)) {
115 				bch_info(c, "Halting mark and sweep to start topology repair pass");
116 				ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
117 				goto err;
118 			} else {
119 				set_bit(BCH_FS_initial_gc_unfixed, &c->flags);
120 			}
121 		}
122 	}
123 
124 	if (is_last && !bpos_eq(cur.k->k.p, node_end)) {
125 		bch2_topology_error(c);
126 
127 		printbuf_reset(&buf1);
128 		printbuf_reset(&buf2);
129 
130 		bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(cur.k));
131 		bch2_bpos_to_text(&buf2, node_end);
132 
133 		if (__fsck_err(c, FSCK_CAN_FIX|FSCK_CAN_IGNORE|FSCK_NO_RATELIMIT,
134 			  btree_node_topology_bad_max_key,
135 			  "btree node with incorrect max_key at btree %s level %u:\n"
136 			  "  %s\n"
137 			  "  expected %s",
138 			  bch2_btree_id_str(b->c.btree_id), b->c.level,
139 			  buf1.buf, buf2.buf) &&
140 		    should_restart_for_topology_repair(c)) {
141 			bch_info(c, "Halting mark and sweep to start topology repair pass");
142 			ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
143 			goto err;
144 		} else {
145 			set_bit(BCH_FS_initial_gc_unfixed, &c->flags);
146 		}
147 	}
148 
149 	bch2_bkey_buf_copy(prev, c, cur.k);
150 err:
151 fsck_err:
152 	printbuf_exit(&buf2);
153 	printbuf_exit(&buf1);
154 	return ret;
155 }
156 
157 static void btree_ptr_to_v2(struct btree *b, struct bkey_i_btree_ptr_v2 *dst)
158 {
159 	switch (b->key.k.type) {
160 	case KEY_TYPE_btree_ptr: {
161 		struct bkey_i_btree_ptr *src = bkey_i_to_btree_ptr(&b->key);
162 
163 		dst->k.p		= src->k.p;
164 		dst->v.mem_ptr		= 0;
165 		dst->v.seq		= b->data->keys.seq;
166 		dst->v.sectors_written	= 0;
167 		dst->v.flags		= 0;
168 		dst->v.min_key		= b->data->min_key;
169 		set_bkey_val_bytes(&dst->k, sizeof(dst->v) + bkey_val_bytes(&src->k));
170 		memcpy(dst->v.start, src->v.start, bkey_val_bytes(&src->k));
171 		break;
172 	}
173 	case KEY_TYPE_btree_ptr_v2:
174 		bkey_copy(&dst->k_i, &b->key);
175 		break;
176 	default:
177 		BUG();
178 	}
179 }
180 
181 static void bch2_btree_node_update_key_early(struct btree_trans *trans,
182 					     enum btree_id btree, unsigned level,
183 					     struct bkey_s_c old, struct bkey_i *new)
184 {
185 	struct bch_fs *c = trans->c;
186 	struct btree *b;
187 	struct bkey_buf tmp;
188 	int ret;
189 
190 	bch2_bkey_buf_init(&tmp);
191 	bch2_bkey_buf_reassemble(&tmp, c, old);
192 
193 	b = bch2_btree_node_get_noiter(trans, tmp.k, btree, level, true);
194 	if (!IS_ERR_OR_NULL(b)) {
195 		mutex_lock(&c->btree_cache.lock);
196 
197 		bch2_btree_node_hash_remove(&c->btree_cache, b);
198 
199 		bkey_copy(&b->key, new);
200 		ret = __bch2_btree_node_hash_insert(&c->btree_cache, b);
201 		BUG_ON(ret);
202 
203 		mutex_unlock(&c->btree_cache.lock);
204 		six_unlock_read(&b->c.lock);
205 	}
206 
207 	bch2_bkey_buf_exit(&tmp, c);
208 }
209 
210 static int set_node_min(struct bch_fs *c, struct btree *b, struct bpos new_min)
211 {
212 	struct bkey_i_btree_ptr_v2 *new;
213 	int ret;
214 
215 	new = kmalloc_array(BKEY_BTREE_PTR_U64s_MAX, sizeof(u64), GFP_KERNEL);
216 	if (!new)
217 		return -BCH_ERR_ENOMEM_gc_repair_key;
218 
219 	btree_ptr_to_v2(b, new);
220 	b->data->min_key	= new_min;
221 	new->v.min_key		= new_min;
222 	SET_BTREE_PTR_RANGE_UPDATED(&new->v, true);
223 
224 	ret = bch2_journal_key_insert_take(c, b->c.btree_id, b->c.level + 1, &new->k_i);
225 	if (ret) {
226 		kfree(new);
227 		return ret;
228 	}
229 
230 	bch2_btree_node_drop_keys_outside_node(b);
231 	bkey_copy(&b->key, &new->k_i);
232 	return 0;
233 }
234 
235 static int set_node_max(struct bch_fs *c, struct btree *b, struct bpos new_max)
236 {
237 	struct bkey_i_btree_ptr_v2 *new;
238 	int ret;
239 
240 	ret = bch2_journal_key_delete(c, b->c.btree_id, b->c.level + 1, b->key.k.p);
241 	if (ret)
242 		return ret;
243 
244 	new = kmalloc_array(BKEY_BTREE_PTR_U64s_MAX, sizeof(u64), GFP_KERNEL);
245 	if (!new)
246 		return -BCH_ERR_ENOMEM_gc_repair_key;
247 
248 	btree_ptr_to_v2(b, new);
249 	b->data->max_key	= new_max;
250 	new->k.p		= new_max;
251 	SET_BTREE_PTR_RANGE_UPDATED(&new->v, true);
252 
253 	ret = bch2_journal_key_insert_take(c, b->c.btree_id, b->c.level + 1, &new->k_i);
254 	if (ret) {
255 		kfree(new);
256 		return ret;
257 	}
258 
259 	bch2_btree_node_drop_keys_outside_node(b);
260 
261 	mutex_lock(&c->btree_cache.lock);
262 	bch2_btree_node_hash_remove(&c->btree_cache, b);
263 
264 	bkey_copy(&b->key, &new->k_i);
265 	ret = __bch2_btree_node_hash_insert(&c->btree_cache, b);
266 	BUG_ON(ret);
267 	mutex_unlock(&c->btree_cache.lock);
268 	return 0;
269 }
270 
271 static int btree_repair_node_boundaries(struct bch_fs *c, struct btree *b,
272 					struct btree *prev, struct btree *cur)
273 {
274 	struct bpos expected_start = !prev
275 		? b->data->min_key
276 		: bpos_successor(prev->key.k.p);
277 	struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
278 	int ret = 0;
279 
280 	if (!prev) {
281 		prt_printf(&buf1, "start of node: ");
282 		bch2_bpos_to_text(&buf1, b->data->min_key);
283 	} else {
284 		bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(&prev->key));
285 	}
286 
287 	bch2_bkey_val_to_text(&buf2, c, bkey_i_to_s_c(&cur->key));
288 
289 	if (prev &&
290 	    bpos_gt(expected_start, cur->data->min_key) &&
291 	    BTREE_NODE_SEQ(cur->data) > BTREE_NODE_SEQ(prev->data)) {
292 		/* cur overwrites prev: */
293 
294 		if (mustfix_fsck_err_on(bpos_ge(prev->data->min_key,
295 						cur->data->min_key), c,
296 				btree_node_topology_overwritten_by_next_node,
297 				"btree node overwritten by next node at btree %s level %u:\n"
298 				"  node %s\n"
299 				"  next %s",
300 				bch2_btree_id_str(b->c.btree_id), b->c.level,
301 				buf1.buf, buf2.buf)) {
302 			ret = DROP_PREV_NODE;
303 			goto out;
304 		}
305 
306 		if (mustfix_fsck_err_on(!bpos_eq(prev->key.k.p,
307 						 bpos_predecessor(cur->data->min_key)), c,
308 				btree_node_topology_bad_max_key,
309 				"btree node with incorrect max_key at btree %s level %u:\n"
310 				"  node %s\n"
311 				"  next %s",
312 				bch2_btree_id_str(b->c.btree_id), b->c.level,
313 				buf1.buf, buf2.buf))
314 			ret = set_node_max(c, prev,
315 					   bpos_predecessor(cur->data->min_key));
316 	} else {
317 		/* prev overwrites cur: */
318 
319 		if (mustfix_fsck_err_on(bpos_ge(expected_start,
320 						cur->data->max_key), c,
321 				btree_node_topology_overwritten_by_prev_node,
322 				"btree node overwritten by prev node at btree %s level %u:\n"
323 				"  prev %s\n"
324 				"  node %s",
325 				bch2_btree_id_str(b->c.btree_id), b->c.level,
326 				buf1.buf, buf2.buf)) {
327 			ret = DROP_THIS_NODE;
328 			goto out;
329 		}
330 
331 		if (mustfix_fsck_err_on(!bpos_eq(expected_start, cur->data->min_key), c,
332 				btree_node_topology_bad_min_key,
333 				"btree node with incorrect min_key at btree %s level %u:\n"
334 				"  prev %s\n"
335 				"  node %s",
336 				bch2_btree_id_str(b->c.btree_id), b->c.level,
337 				buf1.buf, buf2.buf))
338 			ret = set_node_min(c, cur, expected_start);
339 	}
340 out:
341 fsck_err:
342 	printbuf_exit(&buf2);
343 	printbuf_exit(&buf1);
344 	return ret;
345 }
346 
347 static int btree_repair_node_end(struct bch_fs *c, struct btree *b,
348 				 struct btree *child)
349 {
350 	struct printbuf buf1 = PRINTBUF, buf2 = PRINTBUF;
351 	int ret = 0;
352 
353 	bch2_bkey_val_to_text(&buf1, c, bkey_i_to_s_c(&child->key));
354 	bch2_bpos_to_text(&buf2, b->key.k.p);
355 
356 	if (mustfix_fsck_err_on(!bpos_eq(child->key.k.p, b->key.k.p), c,
357 				btree_node_topology_bad_max_key,
358 			"btree node with incorrect max_key at btree %s level %u:\n"
359 			"  %s\n"
360 			"  expected %s",
361 			bch2_btree_id_str(b->c.btree_id), b->c.level,
362 			buf1.buf, buf2.buf)) {
363 		ret = set_node_max(c, child, b->key.k.p);
364 		if (ret)
365 			goto err;
366 	}
367 err:
368 fsck_err:
369 	printbuf_exit(&buf2);
370 	printbuf_exit(&buf1);
371 	return ret;
372 }
373 
374 static int bch2_btree_repair_topology_recurse(struct btree_trans *trans, struct btree *b)
375 {
376 	struct bch_fs *c = trans->c;
377 	struct btree_and_journal_iter iter;
378 	struct bkey_s_c k;
379 	struct bkey_buf prev_k, cur_k;
380 	struct btree *prev = NULL, *cur = NULL;
381 	bool have_child, dropped_children = false;
382 	struct printbuf buf = PRINTBUF;
383 	int ret = 0;
384 
385 	if (!b->c.level)
386 		return 0;
387 again:
388 	prev = NULL;
389 	have_child = dropped_children = false;
390 	bch2_bkey_buf_init(&prev_k);
391 	bch2_bkey_buf_init(&cur_k);
392 	bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b);
393 	iter.prefetch = true;
394 
395 	while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
396 		BUG_ON(bpos_lt(k.k->p, b->data->min_key));
397 		BUG_ON(bpos_gt(k.k->p, b->data->max_key));
398 
399 		bch2_btree_and_journal_iter_advance(&iter);
400 		bch2_bkey_buf_reassemble(&cur_k, c, k);
401 
402 		cur = bch2_btree_node_get_noiter(trans, cur_k.k,
403 					b->c.btree_id, b->c.level - 1,
404 					false);
405 		ret = PTR_ERR_OR_ZERO(cur);
406 
407 		printbuf_reset(&buf);
408 		bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(cur_k.k));
409 
410 		if (mustfix_fsck_err_on(bch2_err_matches(ret, EIO), c,
411 				btree_node_unreadable,
412 				"Topology repair: unreadable btree node at btree %s level %u:\n"
413 				"  %s",
414 				bch2_btree_id_str(b->c.btree_id),
415 				b->c.level - 1,
416 				buf.buf)) {
417 			bch2_btree_node_evict(trans, cur_k.k);
418 			ret = bch2_journal_key_delete(c, b->c.btree_id,
419 						      b->c.level, cur_k.k->k.p);
420 			cur = NULL;
421 			if (ret)
422 				break;
423 			continue;
424 		}
425 
426 		bch_err_msg(c, ret, "getting btree node");
427 		if (ret)
428 			break;
429 
430 		ret = btree_repair_node_boundaries(c, b, prev, cur);
431 
432 		if (ret == DROP_THIS_NODE) {
433 			six_unlock_read(&cur->c.lock);
434 			bch2_btree_node_evict(trans, cur_k.k);
435 			ret = bch2_journal_key_delete(c, b->c.btree_id,
436 						      b->c.level, cur_k.k->k.p);
437 			cur = NULL;
438 			if (ret)
439 				break;
440 			continue;
441 		}
442 
443 		if (prev)
444 			six_unlock_read(&prev->c.lock);
445 		prev = NULL;
446 
447 		if (ret == DROP_PREV_NODE) {
448 			bch2_btree_node_evict(trans, prev_k.k);
449 			ret = bch2_journal_key_delete(c, b->c.btree_id,
450 						      b->c.level, prev_k.k->k.p);
451 			if (ret)
452 				break;
453 
454 			bch2_btree_and_journal_iter_exit(&iter);
455 			bch2_bkey_buf_exit(&prev_k, c);
456 			bch2_bkey_buf_exit(&cur_k, c);
457 			goto again;
458 		} else if (ret)
459 			break;
460 
461 		prev = cur;
462 		cur = NULL;
463 		bch2_bkey_buf_copy(&prev_k, c, cur_k.k);
464 	}
465 
466 	if (!ret && !IS_ERR_OR_NULL(prev)) {
467 		BUG_ON(cur);
468 		ret = btree_repair_node_end(c, b, prev);
469 	}
470 
471 	if (!IS_ERR_OR_NULL(prev))
472 		six_unlock_read(&prev->c.lock);
473 	prev = NULL;
474 	if (!IS_ERR_OR_NULL(cur))
475 		six_unlock_read(&cur->c.lock);
476 	cur = NULL;
477 
478 	if (ret)
479 		goto err;
480 
481 	bch2_btree_and_journal_iter_exit(&iter);
482 	bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b);
483 	iter.prefetch = true;
484 
485 	while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
486 		bch2_bkey_buf_reassemble(&cur_k, c, k);
487 		bch2_btree_and_journal_iter_advance(&iter);
488 
489 		cur = bch2_btree_node_get_noiter(trans, cur_k.k,
490 					b->c.btree_id, b->c.level - 1,
491 					false);
492 		ret = PTR_ERR_OR_ZERO(cur);
493 
494 		bch_err_msg(c, ret, "getting btree node");
495 		if (ret)
496 			goto err;
497 
498 		ret = bch2_btree_repair_topology_recurse(trans, cur);
499 		six_unlock_read(&cur->c.lock);
500 		cur = NULL;
501 
502 		if (ret == DROP_THIS_NODE) {
503 			bch2_btree_node_evict(trans, cur_k.k);
504 			ret = bch2_journal_key_delete(c, b->c.btree_id,
505 						      b->c.level, cur_k.k->k.p);
506 			dropped_children = true;
507 		}
508 
509 		if (ret)
510 			goto err;
511 
512 		have_child = true;
513 	}
514 
515 	printbuf_reset(&buf);
516 	bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(&b->key));
517 
518 	if (mustfix_fsck_err_on(!have_child, c,
519 			btree_node_topology_interior_node_empty,
520 			"empty interior btree node at btree %s level %u\n"
521 			"  %s",
522 			bch2_btree_id_str(b->c.btree_id),
523 			b->c.level, buf.buf))
524 		ret = DROP_THIS_NODE;
525 err:
526 fsck_err:
527 	if (!IS_ERR_OR_NULL(prev))
528 		six_unlock_read(&prev->c.lock);
529 	if (!IS_ERR_OR_NULL(cur))
530 		six_unlock_read(&cur->c.lock);
531 
532 	bch2_btree_and_journal_iter_exit(&iter);
533 	bch2_bkey_buf_exit(&prev_k, c);
534 	bch2_bkey_buf_exit(&cur_k, c);
535 
536 	if (!ret && dropped_children)
537 		goto again;
538 
539 	printbuf_exit(&buf);
540 	return ret;
541 }
542 
543 int bch2_check_topology(struct bch_fs *c)
544 {
545 	struct btree_trans *trans = bch2_trans_get(c);
546 	struct btree *b;
547 	unsigned i;
548 	int ret = 0;
549 
550 	for (i = 0; i < btree_id_nr_alive(c) && !ret; i++) {
551 		struct btree_root *r = bch2_btree_id_root(c, i);
552 
553 		if (!r->alive)
554 			continue;
555 
556 		b = r->b;
557 		if (btree_node_fake(b))
558 			continue;
559 
560 		btree_node_lock_nopath_nofail(trans, &b->c, SIX_LOCK_read);
561 		ret = bch2_btree_repair_topology_recurse(trans, b);
562 		six_unlock_read(&b->c.lock);
563 
564 		if (ret == DROP_THIS_NODE) {
565 			bch_err(c, "empty btree root - repair unimplemented");
566 			ret = -BCH_ERR_fsck_repair_unimplemented;
567 		}
568 	}
569 
570 	bch2_trans_put(trans);
571 
572 	return ret;
573 }
574 
575 static int bch2_check_fix_ptrs(struct btree_trans *trans, enum btree_id btree_id,
576 			       unsigned level, bool is_root,
577 			       struct bkey_s_c *k)
578 {
579 	struct bch_fs *c = trans->c;
580 	struct bkey_ptrs_c ptrs_c = bch2_bkey_ptrs_c(*k);
581 	const union bch_extent_entry *entry_c;
582 	struct extent_ptr_decoded p = { 0 };
583 	bool do_update = false;
584 	struct printbuf buf = PRINTBUF;
585 	int ret = 0;
586 
587 	/*
588 	 * XXX
589 	 * use check_bucket_ref here
590 	 */
591 	bkey_for_each_ptr_decode(k->k, ptrs_c, p, entry_c) {
592 		struct bch_dev *ca = bch_dev_bkey_exists(c, p.ptr.dev);
593 		struct bucket *g = PTR_GC_BUCKET(ca, &p.ptr);
594 		enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, &entry_c->ptr);
595 
596 		if (fsck_err_on(!g->gen_valid,
597 				c, ptr_to_missing_alloc_key,
598 				"bucket %u:%zu data type %s ptr gen %u missing in alloc btree\n"
599 				"while marking %s",
600 				p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
601 				bch2_data_type_str(ptr_data_type(k->k, &p.ptr)),
602 				p.ptr.gen,
603 				(printbuf_reset(&buf),
604 				 bch2_bkey_val_to_text(&buf, c, *k), buf.buf))) {
605 			if (!p.ptr.cached) {
606 				g->gen_valid		= true;
607 				g->gen			= p.ptr.gen;
608 			} else {
609 				do_update = true;
610 			}
611 		}
612 
613 		if (fsck_err_on(gen_cmp(p.ptr.gen, g->gen) > 0,
614 				c, ptr_gen_newer_than_bucket_gen,
615 				"bucket %u:%zu data type %s ptr gen in the future: %u > %u\n"
616 				"while marking %s",
617 				p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
618 				bch2_data_type_str(ptr_data_type(k->k, &p.ptr)),
619 				p.ptr.gen, g->gen,
620 				(printbuf_reset(&buf),
621 				 bch2_bkey_val_to_text(&buf, c, *k), buf.buf))) {
622 			if (!p.ptr.cached) {
623 				g->gen_valid		= true;
624 				g->gen			= p.ptr.gen;
625 				g->data_type		= 0;
626 				g->dirty_sectors	= 0;
627 				g->cached_sectors	= 0;
628 				set_bit(BCH_FS_need_another_gc, &c->flags);
629 			} else {
630 				do_update = true;
631 			}
632 		}
633 
634 		if (fsck_err_on(gen_cmp(g->gen, p.ptr.gen) > BUCKET_GC_GEN_MAX,
635 				c, ptr_gen_newer_than_bucket_gen,
636 				"bucket %u:%zu gen %u data type %s: ptr gen %u too stale\n"
637 				"while marking %s",
638 				p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr), g->gen,
639 				bch2_data_type_str(ptr_data_type(k->k, &p.ptr)),
640 				p.ptr.gen,
641 				(printbuf_reset(&buf),
642 				 bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))
643 			do_update = true;
644 
645 		if (fsck_err_on(!p.ptr.cached && gen_cmp(p.ptr.gen, g->gen) < 0,
646 				c, stale_dirty_ptr,
647 				"bucket %u:%zu data type %s stale dirty ptr: %u < %u\n"
648 				"while marking %s",
649 				p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
650 				bch2_data_type_str(ptr_data_type(k->k, &p.ptr)),
651 				p.ptr.gen, g->gen,
652 				(printbuf_reset(&buf),
653 				 bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))
654 			do_update = true;
655 
656 		if (data_type != BCH_DATA_btree && p.ptr.gen != g->gen)
657 			continue;
658 
659 		if (fsck_err_on(bucket_data_type(g->data_type) &&
660 				bucket_data_type(g->data_type) != data_type, c,
661 				ptr_bucket_data_type_mismatch,
662 				"bucket %u:%zu different types of data in same bucket: %s, %s\n"
663 				"while marking %s",
664 				p.ptr.dev, PTR_BUCKET_NR(ca, &p.ptr),
665 				bch2_data_type_str(g->data_type),
666 				bch2_data_type_str(data_type),
667 				(printbuf_reset(&buf),
668 				 bch2_bkey_val_to_text(&buf, c, *k), buf.buf))) {
669 			if (data_type == BCH_DATA_btree) {
670 				g->data_type	= data_type;
671 				set_bit(BCH_FS_need_another_gc, &c->flags);
672 			} else {
673 				do_update = true;
674 			}
675 		}
676 
677 		if (p.has_ec) {
678 			struct gc_stripe *m = genradix_ptr(&c->gc_stripes, p.ec.idx);
679 
680 			if (fsck_err_on(!m || !m->alive, c,
681 					ptr_to_missing_stripe,
682 					"pointer to nonexistent stripe %llu\n"
683 					"while marking %s",
684 					(u64) p.ec.idx,
685 					(printbuf_reset(&buf),
686 					 bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))
687 				do_update = true;
688 
689 			if (fsck_err_on(m && m->alive && !bch2_ptr_matches_stripe_m(m, p), c,
690 					ptr_to_incorrect_stripe,
691 					"pointer does not match stripe %llu\n"
692 					"while marking %s",
693 					(u64) p.ec.idx,
694 					(printbuf_reset(&buf),
695 					 bch2_bkey_val_to_text(&buf, c, *k), buf.buf)))
696 				do_update = true;
697 		}
698 	}
699 
700 	if (do_update) {
701 		struct bkey_ptrs ptrs;
702 		union bch_extent_entry *entry;
703 		struct bch_extent_ptr *ptr;
704 		struct bkey_i *new;
705 
706 		if (is_root) {
707 			bch_err(c, "cannot update btree roots yet");
708 			ret = -EINVAL;
709 			goto err;
710 		}
711 
712 		new = kmalloc(bkey_bytes(k->k), GFP_KERNEL);
713 		if (!new) {
714 			ret = -BCH_ERR_ENOMEM_gc_repair_key;
715 			bch_err_msg(c, ret, "allocating new key");
716 			goto err;
717 		}
718 
719 		bkey_reassemble(new, *k);
720 
721 		if (level) {
722 			/*
723 			 * We don't want to drop btree node pointers - if the
724 			 * btree node isn't there anymore, the read path will
725 			 * sort it out:
726 			 */
727 			ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
728 			bkey_for_each_ptr(ptrs, ptr) {
729 				struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
730 				struct bucket *g = PTR_GC_BUCKET(ca, ptr);
731 
732 				ptr->gen = g->gen;
733 			}
734 		} else {
735 			bch2_bkey_drop_ptrs(bkey_i_to_s(new), ptr, ({
736 				struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
737 				struct bucket *g = PTR_GC_BUCKET(ca, ptr);
738 				enum bch_data_type data_type = bch2_bkey_ptr_data_type(*k, ptr);
739 
740 				(ptr->cached &&
741 				 (!g->gen_valid || gen_cmp(ptr->gen, g->gen) > 0)) ||
742 				(!ptr->cached &&
743 				 gen_cmp(ptr->gen, g->gen) < 0) ||
744 				gen_cmp(g->gen, ptr->gen) > BUCKET_GC_GEN_MAX ||
745 				(g->data_type &&
746 				 g->data_type != data_type);
747 			}));
748 again:
749 			ptrs = bch2_bkey_ptrs(bkey_i_to_s(new));
750 			bkey_extent_entry_for_each(ptrs, entry) {
751 				if (extent_entry_type(entry) == BCH_EXTENT_ENTRY_stripe_ptr) {
752 					struct gc_stripe *m = genradix_ptr(&c->gc_stripes,
753 									entry->stripe_ptr.idx);
754 					union bch_extent_entry *next_ptr;
755 
756 					bkey_extent_entry_for_each_from(ptrs, next_ptr, entry)
757 						if (extent_entry_type(next_ptr) == BCH_EXTENT_ENTRY_ptr)
758 							goto found;
759 					next_ptr = NULL;
760 found:
761 					if (!next_ptr) {
762 						bch_err(c, "aieee, found stripe ptr with no data ptr");
763 						continue;
764 					}
765 
766 					if (!m || !m->alive ||
767 					    !__bch2_ptr_matches_stripe(&m->ptrs[entry->stripe_ptr.block],
768 								       &next_ptr->ptr,
769 								       m->sectors)) {
770 						bch2_bkey_extent_entry_drop(new, entry);
771 						goto again;
772 					}
773 				}
774 			}
775 		}
776 
777 		ret = bch2_journal_key_insert_take(c, btree_id, level, new);
778 		if (ret) {
779 			kfree(new);
780 			goto err;
781 		}
782 
783 		if (level)
784 			bch2_btree_node_update_key_early(trans, btree_id, level - 1, *k, new);
785 
786 		if (0) {
787 			printbuf_reset(&buf);
788 			bch2_bkey_val_to_text(&buf, c, *k);
789 			bch_info(c, "updated %s", buf.buf);
790 
791 			printbuf_reset(&buf);
792 			bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(new));
793 			bch_info(c, "new key %s", buf.buf);
794 		}
795 
796 		*k = bkey_i_to_s_c(new);
797 	}
798 err:
799 fsck_err:
800 	printbuf_exit(&buf);
801 	return ret;
802 }
803 
804 /* marking of btree keys/nodes: */
805 
806 static int bch2_gc_mark_key(struct btree_trans *trans, enum btree_id btree_id,
807 			    unsigned level, bool is_root,
808 			    struct bkey_s_c *k,
809 			    bool initial)
810 {
811 	struct bch_fs *c = trans->c;
812 	struct bkey deleted = KEY(0, 0, 0);
813 	struct bkey_s_c old = (struct bkey_s_c) { &deleted, NULL };
814 	int ret = 0;
815 
816 	deleted.p = k->k->p;
817 
818 	if (initial) {
819 		BUG_ON(bch2_journal_seq_verify &&
820 		       k->k->version.lo > atomic64_read(&c->journal.seq));
821 
822 		ret = bch2_check_fix_ptrs(trans, btree_id, level, is_root, k);
823 		if (ret)
824 			goto err;
825 
826 		if (fsck_err_on(k->k->version.lo > atomic64_read(&c->key_version), c,
827 				bkey_version_in_future,
828 				"key version number higher than recorded: %llu > %llu",
829 				k->k->version.lo,
830 				atomic64_read(&c->key_version)))
831 			atomic64_set(&c->key_version, k->k->version.lo);
832 	}
833 
834 	ret = commit_do(trans, NULL, NULL, 0,
835 			bch2_key_trigger(trans, btree_id, level, old, unsafe_bkey_s_c_to_s(*k), BTREE_TRIGGER_GC));
836 fsck_err:
837 err:
838 	bch_err_fn(c, ret);
839 	return ret;
840 }
841 
842 static int btree_gc_mark_node(struct btree_trans *trans, struct btree *b, bool initial)
843 {
844 	struct bch_fs *c = trans->c;
845 	struct btree_node_iter iter;
846 	struct bkey unpacked;
847 	struct bkey_s_c k;
848 	struct bkey_buf prev, cur;
849 	int ret = 0;
850 
851 	if (!btree_node_type_needs_gc(btree_node_type(b)))
852 		return 0;
853 
854 	bch2_btree_node_iter_init_from_start(&iter, b);
855 	bch2_bkey_buf_init(&prev);
856 	bch2_bkey_buf_init(&cur);
857 	bkey_init(&prev.k->k);
858 
859 	while ((k = bch2_btree_node_iter_peek_unpack(&iter, b, &unpacked)).k) {
860 		ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level, false,
861 				       &k, initial);
862 		if (ret)
863 			break;
864 
865 		bch2_btree_node_iter_advance(&iter, b);
866 
867 		if (b->c.level) {
868 			bch2_bkey_buf_reassemble(&cur, c, k);
869 
870 			ret = bch2_gc_check_topology(c, b, &prev, cur,
871 					bch2_btree_node_iter_end(&iter));
872 			if (ret)
873 				break;
874 		}
875 	}
876 
877 	bch2_bkey_buf_exit(&cur, c);
878 	bch2_bkey_buf_exit(&prev, c);
879 	return ret;
880 }
881 
882 static int bch2_gc_btree(struct btree_trans *trans, enum btree_id btree_id,
883 			 bool initial, bool metadata_only)
884 {
885 	struct bch_fs *c = trans->c;
886 	struct btree_iter iter;
887 	struct btree *b;
888 	unsigned depth = metadata_only ? 1 : 0;
889 	int ret = 0;
890 
891 	gc_pos_set(c, gc_pos_btree(btree_id, POS_MIN, 0));
892 
893 	__for_each_btree_node(trans, iter, btree_id, POS_MIN,
894 			      0, depth, BTREE_ITER_PREFETCH, b, ret) {
895 		bch2_verify_btree_nr_keys(b);
896 
897 		gc_pos_set(c, gc_pos_btree_node(b));
898 
899 		ret = btree_gc_mark_node(trans, b, initial);
900 		if (ret)
901 			break;
902 	}
903 	bch2_trans_iter_exit(trans, &iter);
904 
905 	if (ret)
906 		return ret;
907 
908 	mutex_lock(&c->btree_root_lock);
909 	b = bch2_btree_id_root(c, btree_id)->b;
910 	if (!btree_node_fake(b)) {
911 		struct bkey_s_c k = bkey_i_to_s_c(&b->key);
912 
913 		ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level + 1,
914 				       true, &k, initial);
915 	}
916 	gc_pos_set(c, gc_pos_btree_root(b->c.btree_id));
917 	mutex_unlock(&c->btree_root_lock);
918 
919 	return ret;
920 }
921 
922 static int bch2_gc_btree_init_recurse(struct btree_trans *trans, struct btree *b,
923 				      unsigned target_depth)
924 {
925 	struct bch_fs *c = trans->c;
926 	struct btree_and_journal_iter iter;
927 	struct bkey_s_c k;
928 	struct bkey_buf cur, prev;
929 	struct printbuf buf = PRINTBUF;
930 	int ret = 0;
931 
932 	bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b);
933 	bch2_bkey_buf_init(&prev);
934 	bch2_bkey_buf_init(&cur);
935 	bkey_init(&prev.k->k);
936 
937 	while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
938 		BUG_ON(bpos_lt(k.k->p, b->data->min_key));
939 		BUG_ON(bpos_gt(k.k->p, b->data->max_key));
940 
941 		ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level,
942 				       false, &k, true);
943 		if (ret)
944 			goto fsck_err;
945 
946 		if (b->c.level) {
947 			bch2_bkey_buf_reassemble(&cur, c, k);
948 			k = bkey_i_to_s_c(cur.k);
949 
950 			bch2_btree_and_journal_iter_advance(&iter);
951 
952 			ret = bch2_gc_check_topology(c, b,
953 					&prev, cur,
954 					!bch2_btree_and_journal_iter_peek(&iter).k);
955 			if (ret)
956 				goto fsck_err;
957 		} else {
958 			bch2_btree_and_journal_iter_advance(&iter);
959 		}
960 	}
961 
962 	if (b->c.level > target_depth) {
963 		bch2_btree_and_journal_iter_exit(&iter);
964 		bch2_btree_and_journal_iter_init_node_iter(trans, &iter, b);
965 		iter.prefetch = true;
966 
967 		while ((k = bch2_btree_and_journal_iter_peek(&iter)).k) {
968 			struct btree *child;
969 
970 			bch2_bkey_buf_reassemble(&cur, c, k);
971 			bch2_btree_and_journal_iter_advance(&iter);
972 
973 			child = bch2_btree_node_get_noiter(trans, cur.k,
974 						b->c.btree_id, b->c.level - 1,
975 						false);
976 			ret = PTR_ERR_OR_ZERO(child);
977 
978 			if (bch2_err_matches(ret, EIO)) {
979 				bch2_topology_error(c);
980 
981 				if (__fsck_err(c,
982 					  FSCK_CAN_FIX|
983 					  FSCK_CAN_IGNORE|
984 					  FSCK_NO_RATELIMIT,
985 					  btree_node_read_error,
986 					  "Unreadable btree node at btree %s level %u:\n"
987 					  "  %s",
988 					  bch2_btree_id_str(b->c.btree_id),
989 					  b->c.level - 1,
990 					  (printbuf_reset(&buf),
991 					   bch2_bkey_val_to_text(&buf, c, bkey_i_to_s_c(cur.k)), buf.buf)) &&
992 				    should_restart_for_topology_repair(c)) {
993 					bch_info(c, "Halting mark and sweep to start topology repair pass");
994 					ret = bch2_run_explicit_recovery_pass(c, BCH_RECOVERY_PASS_check_topology);
995 					goto fsck_err;
996 				} else {
997 					/* Continue marking when opted to not
998 					 * fix the error: */
999 					ret = 0;
1000 					set_bit(BCH_FS_initial_gc_unfixed, &c->flags);
1001 					continue;
1002 				}
1003 			} else if (ret) {
1004 				bch_err_msg(c, ret, "getting btree node");
1005 				break;
1006 			}
1007 
1008 			ret = bch2_gc_btree_init_recurse(trans, child,
1009 							 target_depth);
1010 			six_unlock_read(&child->c.lock);
1011 
1012 			if (ret)
1013 				break;
1014 		}
1015 	}
1016 fsck_err:
1017 	bch2_bkey_buf_exit(&cur, c);
1018 	bch2_bkey_buf_exit(&prev, c);
1019 	bch2_btree_and_journal_iter_exit(&iter);
1020 	printbuf_exit(&buf);
1021 	return ret;
1022 }
1023 
1024 static int bch2_gc_btree_init(struct btree_trans *trans,
1025 			      enum btree_id btree_id,
1026 			      bool metadata_only)
1027 {
1028 	struct bch_fs *c = trans->c;
1029 	struct btree *b;
1030 	unsigned target_depth = metadata_only ? 1 : 0;
1031 	struct printbuf buf = PRINTBUF;
1032 	int ret = 0;
1033 
1034 	b = bch2_btree_id_root(c, btree_id)->b;
1035 
1036 	if (btree_node_fake(b))
1037 		return 0;
1038 
1039 	six_lock_read(&b->c.lock, NULL, NULL);
1040 	printbuf_reset(&buf);
1041 	bch2_bpos_to_text(&buf, b->data->min_key);
1042 	if (mustfix_fsck_err_on(!bpos_eq(b->data->min_key, POS_MIN), c,
1043 				btree_root_bad_min_key,
1044 			"btree root with incorrect min_key: %s", buf.buf)) {
1045 		bch_err(c, "repair unimplemented");
1046 		ret = -BCH_ERR_fsck_repair_unimplemented;
1047 		goto fsck_err;
1048 	}
1049 
1050 	printbuf_reset(&buf);
1051 	bch2_bpos_to_text(&buf, b->data->max_key);
1052 	if (mustfix_fsck_err_on(!bpos_eq(b->data->max_key, SPOS_MAX), c,
1053 				btree_root_bad_max_key,
1054 			"btree root with incorrect max_key: %s", buf.buf)) {
1055 		bch_err(c, "repair unimplemented");
1056 		ret = -BCH_ERR_fsck_repair_unimplemented;
1057 		goto fsck_err;
1058 	}
1059 
1060 	if (b->c.level >= target_depth)
1061 		ret = bch2_gc_btree_init_recurse(trans, b, target_depth);
1062 
1063 	if (!ret) {
1064 		struct bkey_s_c k = bkey_i_to_s_c(&b->key);
1065 
1066 		ret = bch2_gc_mark_key(trans, b->c.btree_id, b->c.level + 1, true,
1067 				       &k, true);
1068 	}
1069 fsck_err:
1070 	six_unlock_read(&b->c.lock);
1071 
1072 	bch_err_fn(c, ret);
1073 	printbuf_exit(&buf);
1074 	return ret;
1075 }
1076 
1077 static inline int btree_id_gc_phase_cmp(enum btree_id l, enum btree_id r)
1078 {
1079 	return  (int) btree_id_to_gc_phase(l) -
1080 		(int) btree_id_to_gc_phase(r);
1081 }
1082 
1083 static int bch2_gc_btrees(struct bch_fs *c, bool initial, bool metadata_only)
1084 {
1085 	struct btree_trans *trans = bch2_trans_get(c);
1086 	enum btree_id ids[BTREE_ID_NR];
1087 	unsigned i;
1088 	int ret = 0;
1089 
1090 	for (i = 0; i < BTREE_ID_NR; i++)
1091 		ids[i] = i;
1092 	bubble_sort(ids, BTREE_ID_NR, btree_id_gc_phase_cmp);
1093 
1094 	for (i = 0; i < BTREE_ID_NR && !ret; i++)
1095 		ret = initial
1096 			? bch2_gc_btree_init(trans, ids[i], metadata_only)
1097 			: bch2_gc_btree(trans, ids[i], initial, metadata_only);
1098 
1099 	for (i = BTREE_ID_NR; i < btree_id_nr_alive(c) && !ret; i++) {
1100 		if (!bch2_btree_id_root(c, i)->alive)
1101 			continue;
1102 
1103 		ret = initial
1104 			? bch2_gc_btree_init(trans, i, metadata_only)
1105 			: bch2_gc_btree(trans, i, initial, metadata_only);
1106 	}
1107 
1108 	bch2_trans_put(trans);
1109 	bch_err_fn(c, ret);
1110 	return ret;
1111 }
1112 
1113 static void mark_metadata_sectors(struct bch_fs *c, struct bch_dev *ca,
1114 				  u64 start, u64 end,
1115 				  enum bch_data_type type,
1116 				  unsigned flags)
1117 {
1118 	u64 b = sector_to_bucket(ca, start);
1119 
1120 	do {
1121 		unsigned sectors =
1122 			min_t(u64, bucket_to_sector(ca, b + 1), end) - start;
1123 
1124 		bch2_mark_metadata_bucket(c, ca, b, type, sectors,
1125 					  gc_phase(GC_PHASE_SB), flags);
1126 		b++;
1127 		start += sectors;
1128 	} while (start < end);
1129 }
1130 
1131 static void bch2_mark_dev_superblock(struct bch_fs *c, struct bch_dev *ca,
1132 				     unsigned flags)
1133 {
1134 	struct bch_sb_layout *layout = &ca->disk_sb.sb->layout;
1135 	unsigned i;
1136 	u64 b;
1137 
1138 	for (i = 0; i < layout->nr_superblocks; i++) {
1139 		u64 offset = le64_to_cpu(layout->sb_offset[i]);
1140 
1141 		if (offset == BCH_SB_SECTOR)
1142 			mark_metadata_sectors(c, ca, 0, BCH_SB_SECTOR,
1143 					      BCH_DATA_sb, flags);
1144 
1145 		mark_metadata_sectors(c, ca, offset,
1146 				      offset + (1 << layout->sb_max_size_bits),
1147 				      BCH_DATA_sb, flags);
1148 	}
1149 
1150 	for (i = 0; i < ca->journal.nr; i++) {
1151 		b = ca->journal.buckets[i];
1152 		bch2_mark_metadata_bucket(c, ca, b, BCH_DATA_journal,
1153 					  ca->mi.bucket_size,
1154 					  gc_phase(GC_PHASE_SB), flags);
1155 	}
1156 }
1157 
1158 static void bch2_mark_superblocks(struct bch_fs *c)
1159 {
1160 	mutex_lock(&c->sb_lock);
1161 	gc_pos_set(c, gc_phase(GC_PHASE_SB));
1162 
1163 	for_each_online_member(c, ca)
1164 		bch2_mark_dev_superblock(c, ca, BTREE_TRIGGER_GC);
1165 	mutex_unlock(&c->sb_lock);
1166 }
1167 
1168 #if 0
1169 /* Also see bch2_pending_btree_node_free_insert_done() */
1170 static void bch2_mark_pending_btree_node_frees(struct bch_fs *c)
1171 {
1172 	struct btree_update *as;
1173 	struct pending_btree_node_free *d;
1174 
1175 	mutex_lock(&c->btree_interior_update_lock);
1176 	gc_pos_set(c, gc_phase(GC_PHASE_PENDING_DELETE));
1177 
1178 	for_each_pending_btree_node_free(c, as, d)
1179 		if (d->index_update_done)
1180 			bch2_mark_key(c, bkey_i_to_s_c(&d->key), BTREE_TRIGGER_GC);
1181 
1182 	mutex_unlock(&c->btree_interior_update_lock);
1183 }
1184 #endif
1185 
1186 static void bch2_gc_free(struct bch_fs *c)
1187 {
1188 	genradix_free(&c->reflink_gc_table);
1189 	genradix_free(&c->gc_stripes);
1190 
1191 	for_each_member_device(c, ca) {
1192 		kvfree(rcu_dereference_protected(ca->buckets_gc, 1));
1193 		ca->buckets_gc = NULL;
1194 
1195 		free_percpu(ca->usage_gc);
1196 		ca->usage_gc = NULL;
1197 	}
1198 
1199 	free_percpu(c->usage_gc);
1200 	c->usage_gc = NULL;
1201 }
1202 
1203 static int bch2_gc_done(struct bch_fs *c,
1204 			bool initial, bool metadata_only)
1205 {
1206 	struct bch_dev *ca = NULL;
1207 	struct printbuf buf = PRINTBUF;
1208 	bool verify = !metadata_only &&
1209 		!c->opts.reconstruct_alloc &&
1210 		(!initial || (c->sb.compat & (1ULL << BCH_COMPAT_alloc_info)));
1211 	unsigned i;
1212 	int ret = 0;
1213 
1214 	percpu_down_write(&c->mark_lock);
1215 
1216 #define copy_field(_err, _f, _msg, ...)					\
1217 	if (dst->_f != src->_f &&					\
1218 	    (!verify ||							\
1219 	     fsck_err(c, _err, _msg ": got %llu, should be %llu"	\
1220 		      , ##__VA_ARGS__, dst->_f, src->_f)))		\
1221 		dst->_f = src->_f
1222 #define copy_dev_field(_err, _f, _msg, ...)				\
1223 	copy_field(_err, _f, "dev %u has wrong " _msg, ca->dev_idx, ##__VA_ARGS__)
1224 #define copy_fs_field(_err, _f, _msg, ...)				\
1225 	copy_field(_err, _f, "fs has wrong " _msg, ##__VA_ARGS__)
1226 
1227 	for (i = 0; i < ARRAY_SIZE(c->usage); i++)
1228 		bch2_fs_usage_acc_to_base(c, i);
1229 
1230 	__for_each_member_device(c, ca) {
1231 		struct bch_dev_usage *dst = ca->usage_base;
1232 		struct bch_dev_usage *src = (void *)
1233 			bch2_acc_percpu_u64s((u64 __percpu *) ca->usage_gc,
1234 					     dev_usage_u64s());
1235 
1236 		for (i = 0; i < BCH_DATA_NR; i++) {
1237 			copy_dev_field(dev_usage_buckets_wrong,
1238 				       d[i].buckets,	"%s buckets", bch2_data_type_str(i));
1239 			copy_dev_field(dev_usage_sectors_wrong,
1240 				       d[i].sectors,	"%s sectors", bch2_data_type_str(i));
1241 			copy_dev_field(dev_usage_fragmented_wrong,
1242 				       d[i].fragmented,	"%s fragmented", bch2_data_type_str(i));
1243 		}
1244 	}
1245 
1246 	{
1247 		unsigned nr = fs_usage_u64s(c);
1248 		struct bch_fs_usage *dst = c->usage_base;
1249 		struct bch_fs_usage *src = (void *)
1250 			bch2_acc_percpu_u64s((u64 __percpu *) c->usage_gc, nr);
1251 
1252 		copy_fs_field(fs_usage_hidden_wrong,
1253 			      b.hidden,		"hidden");
1254 		copy_fs_field(fs_usage_btree_wrong,
1255 			      b.btree,		"btree");
1256 
1257 		if (!metadata_only) {
1258 			copy_fs_field(fs_usage_data_wrong,
1259 				      b.data,	"data");
1260 			copy_fs_field(fs_usage_cached_wrong,
1261 				      b.cached,	"cached");
1262 			copy_fs_field(fs_usage_reserved_wrong,
1263 				      b.reserved,	"reserved");
1264 			copy_fs_field(fs_usage_nr_inodes_wrong,
1265 				      b.nr_inodes,"nr_inodes");
1266 
1267 			for (i = 0; i < BCH_REPLICAS_MAX; i++)
1268 				copy_fs_field(fs_usage_persistent_reserved_wrong,
1269 					      persistent_reserved[i],
1270 					      "persistent_reserved[%i]", i);
1271 		}
1272 
1273 		for (i = 0; i < c->replicas.nr; i++) {
1274 			struct bch_replicas_entry_v1 *e =
1275 				cpu_replicas_entry(&c->replicas, i);
1276 
1277 			if (metadata_only &&
1278 			    (e->data_type == BCH_DATA_user ||
1279 			     e->data_type == BCH_DATA_cached))
1280 				continue;
1281 
1282 			printbuf_reset(&buf);
1283 			bch2_replicas_entry_to_text(&buf, e);
1284 
1285 			copy_fs_field(fs_usage_replicas_wrong,
1286 				      replicas[i], "%s", buf.buf);
1287 		}
1288 	}
1289 
1290 #undef copy_fs_field
1291 #undef copy_dev_field
1292 #undef copy_stripe_field
1293 #undef copy_field
1294 fsck_err:
1295 	if (ca)
1296 		percpu_ref_put(&ca->ref);
1297 	bch_err_fn(c, ret);
1298 
1299 	percpu_up_write(&c->mark_lock);
1300 	printbuf_exit(&buf);
1301 	return ret;
1302 }
1303 
1304 static int bch2_gc_start(struct bch_fs *c)
1305 {
1306 	BUG_ON(c->usage_gc);
1307 
1308 	c->usage_gc = __alloc_percpu_gfp(fs_usage_u64s(c) * sizeof(u64),
1309 					 sizeof(u64), GFP_KERNEL);
1310 	if (!c->usage_gc) {
1311 		bch_err(c, "error allocating c->usage_gc");
1312 		return -BCH_ERR_ENOMEM_gc_start;
1313 	}
1314 
1315 	for_each_member_device(c, ca) {
1316 		BUG_ON(ca->usage_gc);
1317 
1318 		ca->usage_gc = alloc_percpu(struct bch_dev_usage);
1319 		if (!ca->usage_gc) {
1320 			bch_err(c, "error allocating ca->usage_gc");
1321 			percpu_ref_put(&ca->ref);
1322 			return -BCH_ERR_ENOMEM_gc_start;
1323 		}
1324 
1325 		this_cpu_write(ca->usage_gc->d[BCH_DATA_free].buckets,
1326 			       ca->mi.nbuckets - ca->mi.first_bucket);
1327 	}
1328 
1329 	return 0;
1330 }
1331 
1332 static int bch2_gc_reset(struct bch_fs *c)
1333 {
1334 	for_each_member_device(c, ca) {
1335 		free_percpu(ca->usage_gc);
1336 		ca->usage_gc = NULL;
1337 	}
1338 
1339 	free_percpu(c->usage_gc);
1340 	c->usage_gc = NULL;
1341 
1342 	return bch2_gc_start(c);
1343 }
1344 
1345 /* returns true if not equal */
1346 static inline bool bch2_alloc_v4_cmp(struct bch_alloc_v4 l,
1347 				     struct bch_alloc_v4 r)
1348 {
1349 	return  l.gen != r.gen				||
1350 		l.oldest_gen != r.oldest_gen		||
1351 		l.data_type != r.data_type		||
1352 		l.dirty_sectors	!= r.dirty_sectors	||
1353 		l.cached_sectors != r.cached_sectors	 ||
1354 		l.stripe_redundancy != r.stripe_redundancy ||
1355 		l.stripe != r.stripe;
1356 }
1357 
1358 static int bch2_alloc_write_key(struct btree_trans *trans,
1359 				struct btree_iter *iter,
1360 				struct bkey_s_c k,
1361 				bool metadata_only)
1362 {
1363 	struct bch_fs *c = trans->c;
1364 	struct bch_dev *ca = bch_dev_bkey_exists(c, iter->pos.inode);
1365 	struct bucket old_gc, gc, *b;
1366 	struct bkey_i_alloc_v4 *a;
1367 	struct bch_alloc_v4 old_convert, new;
1368 	const struct bch_alloc_v4 *old;
1369 	int ret;
1370 
1371 	old = bch2_alloc_to_v4(k, &old_convert);
1372 	new = *old;
1373 
1374 	percpu_down_read(&c->mark_lock);
1375 	b = gc_bucket(ca, iter->pos.offset);
1376 	old_gc = *b;
1377 
1378 	if ((old->data_type == BCH_DATA_sb ||
1379 	     old->data_type == BCH_DATA_journal) &&
1380 	    !bch2_dev_is_online(ca)) {
1381 		b->data_type = old->data_type;
1382 		b->dirty_sectors = old->dirty_sectors;
1383 	}
1384 
1385 	/*
1386 	 * b->data_type doesn't yet include need_discard & need_gc_gen states -
1387 	 * fix that here:
1388 	 */
1389 	b->data_type = __alloc_data_type(b->dirty_sectors,
1390 					 b->cached_sectors,
1391 					 b->stripe,
1392 					 *old,
1393 					 b->data_type);
1394 	gc = *b;
1395 
1396 	if (gc.data_type != old_gc.data_type ||
1397 	    gc.dirty_sectors != old_gc.dirty_sectors)
1398 		bch2_dev_usage_update_m(c, ca, &old_gc, &gc);
1399 	percpu_up_read(&c->mark_lock);
1400 
1401 	if (metadata_only &&
1402 	    gc.data_type != BCH_DATA_sb &&
1403 	    gc.data_type != BCH_DATA_journal &&
1404 	    gc.data_type != BCH_DATA_btree)
1405 		return 0;
1406 
1407 	if (gen_after(old->gen, gc.gen))
1408 		return 0;
1409 
1410 	if (fsck_err_on(new.data_type != gc.data_type, c,
1411 			alloc_key_data_type_wrong,
1412 			"bucket %llu:%llu gen %u has wrong data_type"
1413 			": got %s, should be %s",
1414 			iter->pos.inode, iter->pos.offset,
1415 			gc.gen,
1416 			bch2_data_type_str(new.data_type),
1417 			bch2_data_type_str(gc.data_type)))
1418 		new.data_type = gc.data_type;
1419 
1420 #define copy_bucket_field(_errtype, _f)					\
1421 	if (fsck_err_on(new._f != gc._f, c, _errtype,			\
1422 			"bucket %llu:%llu gen %u data type %s has wrong " #_f	\
1423 			": got %u, should be %u",			\
1424 			iter->pos.inode, iter->pos.offset,		\
1425 			gc.gen,						\
1426 			bch2_data_type_str(gc.data_type),		\
1427 			new._f, gc._f))					\
1428 		new._f = gc._f;						\
1429 
1430 	copy_bucket_field(alloc_key_gen_wrong,
1431 			  gen);
1432 	copy_bucket_field(alloc_key_dirty_sectors_wrong,
1433 			  dirty_sectors);
1434 	copy_bucket_field(alloc_key_cached_sectors_wrong,
1435 			  cached_sectors);
1436 	copy_bucket_field(alloc_key_stripe_wrong,
1437 			  stripe);
1438 	copy_bucket_field(alloc_key_stripe_redundancy_wrong,
1439 			  stripe_redundancy);
1440 #undef copy_bucket_field
1441 
1442 	if (!bch2_alloc_v4_cmp(*old, new))
1443 		return 0;
1444 
1445 	a = bch2_alloc_to_v4_mut(trans, k);
1446 	ret = PTR_ERR_OR_ZERO(a);
1447 	if (ret)
1448 		return ret;
1449 
1450 	a->v = new;
1451 
1452 	/*
1453 	 * The trigger normally makes sure this is set, but we're not running
1454 	 * triggers:
1455 	 */
1456 	if (a->v.data_type == BCH_DATA_cached && !a->v.io_time[READ])
1457 		a->v.io_time[READ] = max_t(u64, 1, atomic64_read(&c->io_clock[READ].now));
1458 
1459 	ret = bch2_trans_update(trans, iter, &a->k_i, BTREE_TRIGGER_NORUN);
1460 fsck_err:
1461 	return ret;
1462 }
1463 
1464 static int bch2_gc_alloc_done(struct bch_fs *c, bool metadata_only)
1465 {
1466 	int ret = 0;
1467 
1468 	for_each_member_device(c, ca) {
1469 		ret = bch2_trans_run(c,
1470 			for_each_btree_key_upto_commit(trans, iter, BTREE_ID_alloc,
1471 					POS(ca->dev_idx, ca->mi.first_bucket),
1472 					POS(ca->dev_idx, ca->mi.nbuckets - 1),
1473 					BTREE_ITER_SLOTS|BTREE_ITER_PREFETCH, k,
1474 					NULL, NULL, BCH_TRANS_COMMIT_lazy_rw,
1475 				bch2_alloc_write_key(trans, &iter, k, metadata_only)));
1476 		if (ret) {
1477 			percpu_ref_put(&ca->ref);
1478 			break;
1479 		}
1480 	}
1481 
1482 	bch_err_fn(c, ret);
1483 	return ret;
1484 }
1485 
1486 static int bch2_gc_alloc_start(struct bch_fs *c, bool metadata_only)
1487 {
1488 	for_each_member_device(c, ca) {
1489 		struct bucket_array *buckets = kvmalloc(sizeof(struct bucket_array) +
1490 				ca->mi.nbuckets * sizeof(struct bucket),
1491 				GFP_KERNEL|__GFP_ZERO);
1492 		if (!buckets) {
1493 			percpu_ref_put(&ca->ref);
1494 			bch_err(c, "error allocating ca->buckets[gc]");
1495 			return -BCH_ERR_ENOMEM_gc_alloc_start;
1496 		}
1497 
1498 		buckets->first_bucket	= ca->mi.first_bucket;
1499 		buckets->nbuckets	= ca->mi.nbuckets;
1500 		rcu_assign_pointer(ca->buckets_gc, buckets);
1501 	}
1502 
1503 	int ret = bch2_trans_run(c,
1504 		for_each_btree_key(trans, iter, BTREE_ID_alloc, POS_MIN,
1505 					 BTREE_ITER_PREFETCH, k, ({
1506 			struct bch_dev *ca = bch_dev_bkey_exists(c, k.k->p.inode);
1507 			struct bucket *g = gc_bucket(ca, k.k->p.offset);
1508 
1509 			struct bch_alloc_v4 a_convert;
1510 			const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
1511 
1512 			g->gen_valid	= 1;
1513 			g->gen		= a->gen;
1514 
1515 			if (metadata_only &&
1516 			    (a->data_type == BCH_DATA_user ||
1517 			     a->data_type == BCH_DATA_cached ||
1518 			     a->data_type == BCH_DATA_parity)) {
1519 				g->data_type		= a->data_type;
1520 				g->dirty_sectors	= a->dirty_sectors;
1521 				g->cached_sectors	= a->cached_sectors;
1522 				g->stripe		= a->stripe;
1523 				g->stripe_redundancy	= a->stripe_redundancy;
1524 			}
1525 
1526 			0;
1527 		})));
1528 	bch_err_fn(c, ret);
1529 	return ret;
1530 }
1531 
1532 static void bch2_gc_alloc_reset(struct bch_fs *c, bool metadata_only)
1533 {
1534 	for_each_member_device(c, ca) {
1535 		struct bucket_array *buckets = gc_bucket_array(ca);
1536 		struct bucket *g;
1537 
1538 		for_each_bucket(g, buckets) {
1539 			if (metadata_only &&
1540 			    (g->data_type == BCH_DATA_user ||
1541 			     g->data_type == BCH_DATA_cached ||
1542 			     g->data_type == BCH_DATA_parity))
1543 				continue;
1544 			g->data_type = 0;
1545 			g->dirty_sectors = 0;
1546 			g->cached_sectors = 0;
1547 		}
1548 	}
1549 }
1550 
1551 static int bch2_gc_write_reflink_key(struct btree_trans *trans,
1552 				     struct btree_iter *iter,
1553 				     struct bkey_s_c k,
1554 				     size_t *idx)
1555 {
1556 	struct bch_fs *c = trans->c;
1557 	const __le64 *refcount = bkey_refcount_c(k);
1558 	struct printbuf buf = PRINTBUF;
1559 	struct reflink_gc *r;
1560 	int ret = 0;
1561 
1562 	if (!refcount)
1563 		return 0;
1564 
1565 	while ((r = genradix_ptr(&c->reflink_gc_table, *idx)) &&
1566 	       r->offset < k.k->p.offset)
1567 		++*idx;
1568 
1569 	if (!r ||
1570 	    r->offset != k.k->p.offset ||
1571 	    r->size != k.k->size) {
1572 		bch_err(c, "unexpected inconsistency walking reflink table at gc finish");
1573 		return -EINVAL;
1574 	}
1575 
1576 	if (fsck_err_on(r->refcount != le64_to_cpu(*refcount), c,
1577 			reflink_v_refcount_wrong,
1578 			"reflink key has wrong refcount:\n"
1579 			"  %s\n"
1580 			"  should be %u",
1581 			(bch2_bkey_val_to_text(&buf, c, k), buf.buf),
1582 			r->refcount)) {
1583 		struct bkey_i *new = bch2_bkey_make_mut_noupdate(trans, k);
1584 		ret = PTR_ERR_OR_ZERO(new);
1585 		if (ret)
1586 			return ret;
1587 
1588 		if (!r->refcount)
1589 			new->k.type = KEY_TYPE_deleted;
1590 		else
1591 			*bkey_refcount(bkey_i_to_s(new)) = cpu_to_le64(r->refcount);
1592 		ret = bch2_trans_update(trans, iter, new, 0);
1593 	}
1594 fsck_err:
1595 	printbuf_exit(&buf);
1596 	return ret;
1597 }
1598 
1599 static int bch2_gc_reflink_done(struct bch_fs *c, bool metadata_only)
1600 {
1601 	size_t idx = 0;
1602 
1603 	if (metadata_only)
1604 		return 0;
1605 
1606 	int ret = bch2_trans_run(c,
1607 		for_each_btree_key_commit(trans, iter,
1608 				BTREE_ID_reflink, POS_MIN,
1609 				BTREE_ITER_PREFETCH, k,
1610 				NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1611 			bch2_gc_write_reflink_key(trans, &iter, k, &idx)));
1612 	c->reflink_gc_nr = 0;
1613 	return ret;
1614 }
1615 
1616 static int bch2_gc_reflink_start(struct bch_fs *c,
1617 				 bool metadata_only)
1618 {
1619 
1620 	if (metadata_only)
1621 		return 0;
1622 
1623 	c->reflink_gc_nr = 0;
1624 
1625 	int ret = bch2_trans_run(c,
1626 		for_each_btree_key(trans, iter, BTREE_ID_reflink, POS_MIN,
1627 				   BTREE_ITER_PREFETCH, k, ({
1628 			const __le64 *refcount = bkey_refcount_c(k);
1629 
1630 			if (!refcount)
1631 				continue;
1632 
1633 			struct reflink_gc *r = genradix_ptr_alloc(&c->reflink_gc_table,
1634 							c->reflink_gc_nr++, GFP_KERNEL);
1635 			if (!r) {
1636 				ret = -BCH_ERR_ENOMEM_gc_reflink_start;
1637 				break;
1638 			}
1639 
1640 			r->offset	= k.k->p.offset;
1641 			r->size		= k.k->size;
1642 			r->refcount	= 0;
1643 			0;
1644 		})));
1645 
1646 	bch_err_fn(c, ret);
1647 	return ret;
1648 }
1649 
1650 static void bch2_gc_reflink_reset(struct bch_fs *c, bool metadata_only)
1651 {
1652 	struct genradix_iter iter;
1653 	struct reflink_gc *r;
1654 
1655 	genradix_for_each(&c->reflink_gc_table, iter, r)
1656 		r->refcount = 0;
1657 }
1658 
1659 static int bch2_gc_write_stripes_key(struct btree_trans *trans,
1660 				     struct btree_iter *iter,
1661 				     struct bkey_s_c k)
1662 {
1663 	struct bch_fs *c = trans->c;
1664 	struct printbuf buf = PRINTBUF;
1665 	const struct bch_stripe *s;
1666 	struct gc_stripe *m;
1667 	bool bad = false;
1668 	unsigned i;
1669 	int ret = 0;
1670 
1671 	if (k.k->type != KEY_TYPE_stripe)
1672 		return 0;
1673 
1674 	s = bkey_s_c_to_stripe(k).v;
1675 	m = genradix_ptr(&c->gc_stripes, k.k->p.offset);
1676 
1677 	for (i = 0; i < s->nr_blocks; i++) {
1678 		u32 old = stripe_blockcount_get(s, i);
1679 		u32 new = (m ? m->block_sectors[i] : 0);
1680 
1681 		if (old != new) {
1682 			prt_printf(&buf, "stripe block %u has wrong sector count: got %u, should be %u\n",
1683 				   i, old, new);
1684 			bad = true;
1685 		}
1686 	}
1687 
1688 	if (bad)
1689 		bch2_bkey_val_to_text(&buf, c, k);
1690 
1691 	if (fsck_err_on(bad, c, stripe_sector_count_wrong,
1692 			"%s", buf.buf)) {
1693 		struct bkey_i_stripe *new;
1694 
1695 		new = bch2_trans_kmalloc(trans, bkey_bytes(k.k));
1696 		ret = PTR_ERR_OR_ZERO(new);
1697 		if (ret)
1698 			return ret;
1699 
1700 		bkey_reassemble(&new->k_i, k);
1701 
1702 		for (i = 0; i < new->v.nr_blocks; i++)
1703 			stripe_blockcount_set(&new->v, i, m ? m->block_sectors[i] : 0);
1704 
1705 		ret = bch2_trans_update(trans, iter, &new->k_i, 0);
1706 	}
1707 fsck_err:
1708 	printbuf_exit(&buf);
1709 	return ret;
1710 }
1711 
1712 static int bch2_gc_stripes_done(struct bch_fs *c, bool metadata_only)
1713 {
1714 	if (metadata_only)
1715 		return 0;
1716 
1717 	return bch2_trans_run(c,
1718 		for_each_btree_key_commit(trans, iter,
1719 				BTREE_ID_stripes, POS_MIN,
1720 				BTREE_ITER_PREFETCH, k,
1721 				NULL, NULL, BCH_TRANS_COMMIT_no_enospc,
1722 			bch2_gc_write_stripes_key(trans, &iter, k)));
1723 }
1724 
1725 static void bch2_gc_stripes_reset(struct bch_fs *c, bool metadata_only)
1726 {
1727 	genradix_free(&c->gc_stripes);
1728 }
1729 
1730 /**
1731  * bch2_gc - walk _all_ references to buckets, and recompute them:
1732  *
1733  * @c:			filesystem object
1734  * @initial:		are we in recovery?
1735  * @metadata_only:	are we just checking metadata references, or everything?
1736  *
1737  * Returns: 0 on success, or standard errcode on failure
1738  *
1739  * Order matters here:
1740  *  - Concurrent GC relies on the fact that we have a total ordering for
1741  *    everything that GC walks - see  gc_will_visit_node(),
1742  *    gc_will_visit_root()
1743  *
1744  *  - also, references move around in the course of index updates and
1745  *    various other crap: everything needs to agree on the ordering
1746  *    references are allowed to move around in - e.g., we're allowed to
1747  *    start with a reference owned by an open_bucket (the allocator) and
1748  *    move it to the btree, but not the reverse.
1749  *
1750  *    This is necessary to ensure that gc doesn't miss references that
1751  *    move around - if references move backwards in the ordering GC
1752  *    uses, GC could skip past them
1753  */
1754 int bch2_gc(struct bch_fs *c, bool initial, bool metadata_only)
1755 {
1756 	unsigned iter = 0;
1757 	int ret;
1758 
1759 	lockdep_assert_held(&c->state_lock);
1760 
1761 	down_write(&c->gc_lock);
1762 
1763 	bch2_btree_interior_updates_flush(c);
1764 
1765 	ret   = bch2_gc_start(c) ?:
1766 		bch2_gc_alloc_start(c, metadata_only) ?:
1767 		bch2_gc_reflink_start(c, metadata_only);
1768 	if (ret)
1769 		goto out;
1770 again:
1771 	gc_pos_set(c, gc_phase(GC_PHASE_START));
1772 
1773 	bch2_mark_superblocks(c);
1774 
1775 	ret = bch2_gc_btrees(c, initial, metadata_only);
1776 
1777 	if (ret)
1778 		goto out;
1779 
1780 #if 0
1781 	bch2_mark_pending_btree_node_frees(c);
1782 #endif
1783 	c->gc_count++;
1784 
1785 	if (test_bit(BCH_FS_need_another_gc, &c->flags) ||
1786 	    (!iter && bch2_test_restart_gc)) {
1787 		if (iter++ > 2) {
1788 			bch_info(c, "Unable to fix bucket gens, looping");
1789 			ret = -EINVAL;
1790 			goto out;
1791 		}
1792 
1793 		/*
1794 		 * XXX: make sure gens we fixed got saved
1795 		 */
1796 		bch_info(c, "Second GC pass needed, restarting:");
1797 		clear_bit(BCH_FS_need_another_gc, &c->flags);
1798 		__gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING));
1799 
1800 		bch2_gc_stripes_reset(c, metadata_only);
1801 		bch2_gc_alloc_reset(c, metadata_only);
1802 		bch2_gc_reflink_reset(c, metadata_only);
1803 		ret = bch2_gc_reset(c);
1804 		if (ret)
1805 			goto out;
1806 
1807 		/* flush fsck errors, reset counters */
1808 		bch2_flush_fsck_errs(c);
1809 		goto again;
1810 	}
1811 out:
1812 	if (!ret) {
1813 		bch2_journal_block(&c->journal);
1814 
1815 		ret   = bch2_gc_alloc_done(c, metadata_only) ?:
1816 			bch2_gc_done(c, initial, metadata_only) ?:
1817 			bch2_gc_stripes_done(c, metadata_only) ?:
1818 			bch2_gc_reflink_done(c, metadata_only);
1819 
1820 		bch2_journal_unblock(&c->journal);
1821 	}
1822 
1823 	percpu_down_write(&c->mark_lock);
1824 	/* Indicates that gc is no longer in progress: */
1825 	__gc_pos_set(c, gc_phase(GC_PHASE_NOT_RUNNING));
1826 
1827 	bch2_gc_free(c);
1828 	percpu_up_write(&c->mark_lock);
1829 
1830 	up_write(&c->gc_lock);
1831 
1832 	/*
1833 	 * At startup, allocations can happen directly instead of via the
1834 	 * allocator thread - issue wakeup in case they blocked on gc_lock:
1835 	 */
1836 	closure_wake_up(&c->freelist_wait);
1837 	bch_err_fn(c, ret);
1838 	return ret;
1839 }
1840 
1841 static int gc_btree_gens_key(struct btree_trans *trans,
1842 			     struct btree_iter *iter,
1843 			     struct bkey_s_c k)
1844 {
1845 	struct bch_fs *c = trans->c;
1846 	struct bkey_ptrs_c ptrs = bch2_bkey_ptrs_c(k);
1847 	struct bkey_i *u;
1848 	int ret;
1849 
1850 	percpu_down_read(&c->mark_lock);
1851 	bkey_for_each_ptr(ptrs, ptr) {
1852 		struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
1853 
1854 		if (ptr_stale(ca, ptr) > 16) {
1855 			percpu_up_read(&c->mark_lock);
1856 			goto update;
1857 		}
1858 	}
1859 
1860 	bkey_for_each_ptr(ptrs, ptr) {
1861 		struct bch_dev *ca = bch_dev_bkey_exists(c, ptr->dev);
1862 		u8 *gen = &ca->oldest_gen[PTR_BUCKET_NR(ca, ptr)];
1863 
1864 		if (gen_after(*gen, ptr->gen))
1865 			*gen = ptr->gen;
1866 	}
1867 	percpu_up_read(&c->mark_lock);
1868 	return 0;
1869 update:
1870 	u = bch2_bkey_make_mut(trans, iter, &k, 0);
1871 	ret = PTR_ERR_OR_ZERO(u);
1872 	if (ret)
1873 		return ret;
1874 
1875 	bch2_extent_normalize(c, bkey_i_to_s(u));
1876 	return 0;
1877 }
1878 
1879 static int bch2_alloc_write_oldest_gen(struct btree_trans *trans, struct btree_iter *iter,
1880 				       struct bkey_s_c k)
1881 {
1882 	struct bch_dev *ca = bch_dev_bkey_exists(trans->c, iter->pos.inode);
1883 	struct bch_alloc_v4 a_convert;
1884 	const struct bch_alloc_v4 *a = bch2_alloc_to_v4(k, &a_convert);
1885 	struct bkey_i_alloc_v4 *a_mut;
1886 	int ret;
1887 
1888 	if (a->oldest_gen == ca->oldest_gen[iter->pos.offset])
1889 		return 0;
1890 
1891 	a_mut = bch2_alloc_to_v4_mut(trans, k);
1892 	ret = PTR_ERR_OR_ZERO(a_mut);
1893 	if (ret)
1894 		return ret;
1895 
1896 	a_mut->v.oldest_gen = ca->oldest_gen[iter->pos.offset];
1897 	a_mut->v.data_type = alloc_data_type(a_mut->v, a_mut->v.data_type);
1898 
1899 	return bch2_trans_update(trans, iter, &a_mut->k_i, 0);
1900 }
1901 
1902 int bch2_gc_gens(struct bch_fs *c)
1903 {
1904 	u64 b, start_time = local_clock();
1905 	int ret;
1906 
1907 	/*
1908 	 * Ideally we would be using state_lock and not gc_lock here, but that
1909 	 * introduces a deadlock in the RO path - we currently take the state
1910 	 * lock at the start of going RO, thus the gc thread may get stuck:
1911 	 */
1912 	if (!mutex_trylock(&c->gc_gens_lock))
1913 		return 0;
1914 
1915 	trace_and_count(c, gc_gens_start, c);
1916 	down_read(&c->gc_lock);
1917 
1918 	for_each_member_device(c, ca) {
1919 		struct bucket_gens *gens = bucket_gens(ca);
1920 
1921 		BUG_ON(ca->oldest_gen);
1922 
1923 		ca->oldest_gen = kvmalloc(gens->nbuckets, GFP_KERNEL);
1924 		if (!ca->oldest_gen) {
1925 			percpu_ref_put(&ca->ref);
1926 			ret = -BCH_ERR_ENOMEM_gc_gens;
1927 			goto err;
1928 		}
1929 
1930 		for (b = gens->first_bucket;
1931 		     b < gens->nbuckets; b++)
1932 			ca->oldest_gen[b] = gens->b[b];
1933 	}
1934 
1935 	for (unsigned i = 0; i < BTREE_ID_NR; i++)
1936 		if (btree_type_has_ptrs(i)) {
1937 			c->gc_gens_btree = i;
1938 			c->gc_gens_pos = POS_MIN;
1939 
1940 			ret = bch2_trans_run(c,
1941 				for_each_btree_key_commit(trans, iter, i,
1942 						POS_MIN,
1943 						BTREE_ITER_PREFETCH|BTREE_ITER_ALL_SNAPSHOTS,
1944 						k,
1945 						NULL, NULL,
1946 						BCH_TRANS_COMMIT_no_enospc,
1947 					gc_btree_gens_key(trans, &iter, k)));
1948 			if (ret)
1949 				goto err;
1950 		}
1951 
1952 	ret = bch2_trans_run(c,
1953 		for_each_btree_key_commit(trans, iter, BTREE_ID_alloc,
1954 				POS_MIN,
1955 				BTREE_ITER_PREFETCH,
1956 				k,
1957 				NULL, NULL,
1958 				BCH_TRANS_COMMIT_no_enospc,
1959 			bch2_alloc_write_oldest_gen(trans, &iter, k)));
1960 	if (ret)
1961 		goto err;
1962 
1963 	c->gc_gens_btree	= 0;
1964 	c->gc_gens_pos		= POS_MIN;
1965 
1966 	c->gc_count++;
1967 
1968 	bch2_time_stats_update(&c->times[BCH_TIME_btree_gc], start_time);
1969 	trace_and_count(c, gc_gens_end, c);
1970 err:
1971 	for_each_member_device(c, ca) {
1972 		kvfree(ca->oldest_gen);
1973 		ca->oldest_gen = NULL;
1974 	}
1975 
1976 	up_read(&c->gc_lock);
1977 	mutex_unlock(&c->gc_gens_lock);
1978 	if (!bch2_err_matches(ret, EROFS))
1979 		bch_err_fn(c, ret);
1980 	return ret;
1981 }
1982 
1983 static int bch2_gc_thread(void *arg)
1984 {
1985 	struct bch_fs *c = arg;
1986 	struct io_clock *clock = &c->io_clock[WRITE];
1987 	unsigned long last = atomic64_read(&clock->now);
1988 	unsigned last_kick = atomic_read(&c->kick_gc);
1989 
1990 	set_freezable();
1991 
1992 	while (1) {
1993 		while (1) {
1994 			set_current_state(TASK_INTERRUPTIBLE);
1995 
1996 			if (kthread_should_stop()) {
1997 				__set_current_state(TASK_RUNNING);
1998 				return 0;
1999 			}
2000 
2001 			if (atomic_read(&c->kick_gc) != last_kick)
2002 				break;
2003 
2004 			if (c->btree_gc_periodic) {
2005 				unsigned long next = last + c->capacity / 16;
2006 
2007 				if (atomic64_read(&clock->now) >= next)
2008 					break;
2009 
2010 				bch2_io_clock_schedule_timeout(clock, next);
2011 			} else {
2012 				schedule();
2013 			}
2014 
2015 			try_to_freeze();
2016 		}
2017 		__set_current_state(TASK_RUNNING);
2018 
2019 		last = atomic64_read(&clock->now);
2020 		last_kick = atomic_read(&c->kick_gc);
2021 
2022 		/*
2023 		 * Full gc is currently incompatible with btree key cache:
2024 		 */
2025 #if 0
2026 		ret = bch2_gc(c, false, false);
2027 #else
2028 		bch2_gc_gens(c);
2029 #endif
2030 		debug_check_no_locks_held();
2031 	}
2032 
2033 	return 0;
2034 }
2035 
2036 void bch2_gc_thread_stop(struct bch_fs *c)
2037 {
2038 	struct task_struct *p;
2039 
2040 	p = c->gc_thread;
2041 	c->gc_thread = NULL;
2042 
2043 	if (p) {
2044 		kthread_stop(p);
2045 		put_task_struct(p);
2046 	}
2047 }
2048 
2049 int bch2_gc_thread_start(struct bch_fs *c)
2050 {
2051 	struct task_struct *p;
2052 
2053 	if (c->gc_thread)
2054 		return 0;
2055 
2056 	p = kthread_create(bch2_gc_thread, c, "bch-gc/%s", c->name);
2057 	if (IS_ERR(p)) {
2058 		bch_err_fn(c, PTR_ERR(p));
2059 		return PTR_ERR(p);
2060 	}
2061 
2062 	get_task_struct(p);
2063 	c->gc_thread = p;
2064 	wake_up_process(p);
2065 	return 0;
2066 }
2067